blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
061fe013b00e81a60e803451933733099e3cafaf | Shell | bopoda/dcompany | /bin/provision.sh | UTF-8 | 346 | 2.96875 | 3 | [] | no_license | #!/bin/bash
if [ `whoami` != "root" ]; then
echo "You must be root to provision this";
exit;
fi
aptitude update
# Go to this script directory
cd `cd -P "$( dirname "$0" )" && pwd`
# obtain a top level git
cd `git rev-parse --show-toplevel`
echo 'export APPLICATION_ENV=local' > /etc/profile.d/application-environment.sh
./bin/provision/db | true |
2761c7b4f4c05b26c0339aeb56105974d392e8b9 | Shell | yilab/sg | /sg-client | UTF-8 | 1,579 | 3.96875 | 4 | [] | no_license | #!/bin/bash
# Designed to be run from the crontab, for example:
# To log every 5 mins your laptop's temperature to your $SG_HOST
# */5 * * * * ID=temp sg-client -d $SG_HOST -g temp /sys/class/thermal/thermal_zone0/temp
# Check if time is sane
if test $(date +%s) -lt 1378882344
then
echo "Time is in the past: $(date)" 1>&2
exit 1
fi
# Setup your SSH_ENV like so: https://github.com/kaihendry/Kai-s--HOME/blob/master/.xinitrc
SSH_ENV=$HOME/.ssh/environment
test -e "$SSH_ENV" && . $SSH_ENV
sr=/var/sg # sg's root
# to identify where the data is coming from
id=$(hostname)
# Every graph needs a name
g="unnamed"
while getopts "r:g:d:i:" o
do
case "$o" in
(g) g="$OPTARG" ;; # graph name
(d) d="$OPTARG" ;; # destination hostname
(r) dr="$OPTARG";; # destination root directory
(i) id="$OPTARG";; # override hostname as client id
esac
done
shift $((OPTIND - 1))
# Crucial hierachy e.g. /var/sg/machine-id/temperature/14022.csv, 022 is the day of the year, 14 is last two digits of the year
f="$sr/$id/$g/$(date -u +%y%j).csv"
# MUST be rooted by EPOCH time
if test -f "$1"
then
m="$(date +%s) $(< $1)"
else
m="$(date +%s) $(< /dev/stdin)"
fi
mkdir -p "$(dirname "$f")"
echo $m >> "$f" || exit
if test "$d"
then
# Directing stderr to remove annoying Could not chdir to home directory /home/stats: No such file or directory
# Where rsync tries to chdir to /home/stats, stat's $HOME, despite the Match user stats's ChrootDirectory /var/sg setting
flock -n "/tmp/sg-${id}-${g}" -c "rsync -e ssh --append -r $sr/$id/$g/*.csv $d:${dr:-$sr}/$id/$g/" 2>/dev/null
fi
| true |
4d8f5ffaef53e7a3af46845b2698321e824b1655 | Shell | dlux/InstallScripts | /common_packages | UTF-8 | 24,872 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# ============================================================================
# This file is meant to hold all common package installation used across the
# install scripts under this project.
# Specifically those that require a special configuration or need several
# libraries to be installed to work properly
# e.x. mysql
# ============================================================================
# Uncomment the following line to debug this script
# set -o xtrace
source common_functions
SetPackageManager
#=================================================
#=================================================
function CreateSelfSignedCertificate {
# To use an SSL certificate to secure a service but without
# requiring a CA-signed certificate - i.e sign your own certificates.
# users will be displayed a browser warning about certificate not trusted.
# See http://do.co/2gtYDet
cert_path="${1}"
subj=${2:-/C=US/ST=Oregon/L=Portland/O=Dlux/OU=Data Center/CN=Dlux}
[[ ! -d $cert_path ]] && PrintError "Path ${cert_path} does not exists."
pushd $cert_path
openssl req -newkey rsa:4096 -nodes -keyout domain.key \
-x509 -days 365 -out domain.crt -subj "${subj}"
popd
}
function CreateSelfCASignedCertificate {
cert_path="${1}"
subj=${2:-/C=US/ST=Oregon/L=Portland/O=Dlux/OU=Data Center/CN=Dlux}
[[ ! -d $cert_path ]] && PrintError "Path ${cert_path} does not exists."
pushd $cert_path
# 1. Generate 4096-bit long RSA key for root CA.
openssl genrsa -out ca.key 4096
# 2. Create self-signed 5y valid root CA certificate.
openssl req -new -x509 -days 1826 \
-key ca.key -out ca.crt -subj "${subj}"
# 3. Create subordinate CA to be used for the signing. This means:
# create key, create csr, & process csr to get a crt signed by the root CA.
# KEY
openssl genrsa -out ia.key 4096
# CSR
openssl req -new -key ia.key -out ia.csr -subj "${subj}"
# 5. CRT (Signed)
openssl x509 -req -days 730 -set_serial 01 \
-CA ca.crt -CAkey ca.key -in ia.csr -out ia.crt
popd
}
function CreateCSRCertificate {
cert_path="${1}"
[[ ! -d $cert_path ]] && PrintError "Path ${cert_path} does not exists."
pushd $cert_path
# Use this method to secure an Apache HTTP or Nginx web server (SSL-HTTPS)
# Use when using a Certificate Authority (CA) to issue the SSL certificate
# Created private key. csr must be sent to CA for CA-signed SSL cert
openssl req \
-newkey rsa:2048 -nodes -keyout domain.key \
-out domain.csr
popd
}
function CustomizeApache {
Customize"${_PACKAGE_MANAGER}"Apache
}
function InstallAnsible {
echo 'installing ansible via pip'
version=${1:-2.6.1}
# Use pip
[[ -z $(which pip) ]] && curl -Lo- https://bootstrap.pypa.io/get-pip.py | python
pip install ansible==$version
}
function InstallApache {
Install"${_PACKAGE_MANAGER}"Apache
}
function InstallApacheMod {
Install"${_PACKAGE_MANAGER}"ApacheMod "$@"
}
function InstallDhcp {
Install"${_PACKAGE_MANAGER}"Dhcp "$@"
}
function InstallDns {
Install"${_PACKAGE_MANAGER}"Dns "$@"
}
function InstallDocker {
if command -v docker; then
return
fi
curl -fsSL https://get.docker.com/ | sh
usermod -aG docker $USER
systemctl enable docker.service
}
function SetDockerProxy {
if -z "$1"; then
PrintError 'Need to provide proxy variable'
fi
if -z command -v docker; then
PrintError "Docker must be installed"
fi
SetProxy "$1"
mkdir -p /etc/systemd/system/docker.service.d
pushd /etc/systemd/system/docker.service.d
WriteLog '---> SETTING PRXY ON DOCKER'
echo '[Service]' > http-proxy.conf
echo "Environment=\"HTTP_PROXY=$http_proxy\"" >> http-proxy.conf
echo '[Service]' > https-proxy.conf
echo "Environment=\"HTTPS_PROXY=$http_proxy\"" >> https-proxy.conf
echo '[Service]' > no-proxy.conf
echo "Environment=\"NO_PROXY=$no_proxy\"" >> no-proxy.conf
popd
mkdir -p "$HOME/.docker"
config="{ \"proxies\": { \"default\": { \"httpProxy\": \"$http_proxy\","
config+="\"httpsProxy\": \"$http_proxy\",\"noProxy\": \"$no_proxy\","
echo "${config::-1} } } }" | tee "$HOME/.docker/config.json"
systemctl daemon-reload
systemctl restart docker
times=0
until sudo docker info || times > 9; do
printf "."
sleep 2
times=$((times + 1))
done
}
function InstallFirewallUFW {
Install"${_PACKAGE_MANAGER}"FirewallUFW "$@"
}
function InstallJenkins {
Install"${_PACKAGE_MANAGER}"Jenkins "$@"
}
function InstallELKSElasticsearch {
Install"${_PACKAGE_MANAGER}"ELKSElasticsearch "$@"
}
function InstallELKSKibana {
Install"${_PACKAGE_MANAGER}"ELKSKibana "$@"
}
function InstallELKSLogstash {
Install"${_PACKAGE_MANAGER}"ELKSLogstash "$@"
}
function InstallMysql {
# Can accept a password
Install"${_PACKAGE_MANAGER}"Mysql "$@"
}
function InstallNginx {
Install"${_PACKAGE_MANAGER}"Nginx
}
function InstallNodejs {
# Can accept a integer representing a major version [4 to 8].
# Defaults to version 8
Install"${_PACKAGE_MANAGER}"Nodejs "$@"
}
function InstallPackages {
for arg in $@; do
${_INSTALLER_CMD} "$arg"
done
}
function InstallPhp {
# Can accept php version to install 5.5 or 7.0
Install"${_PACKAGE_MANAGER}"Php "$@"
}
function InstallPip {
if ! command -v pip; then
curl -Lo- https://bootstrap.pypa.io/get-pip.py | python
else
pip install -U pip
fi
}
function InstallPython3 {
Install"${_PACKAGE_MANAGER}"Python3 "$@"
}
function InstallQemu {
Install"${_PACKAGE_MANAGER}"Qemu
}
function InstallLibvirt {
Install"${_PACKAGE_MANAGER}"Qemu
Install"${_PACKAGE_MANAGER}"Libvirt
}
function InstallTftp {
# Can accept string "upload" to enable upload feature
Install"${_PACKAGE_MANAGER}"Tftp "$@"
}
function InstallVagrant {
# Can accept vagrant version defaults to 2.2.19
Install"${_PACKAGE_MANAGER}"Vagrant "$@"
}
function InstallVirtualBox {
# Default version 6.0
Install"${_PACKAGE_MANAGER}"VirtualBox "@"
}
function UninstallApache {
Uninstall"${_PACKAGE_MANAGER}"Apache
}
function UninstallMysql {
Uninstall"${_PACKAGE_MANAGER}"Mysql
}
function WaitForJenkinsSvr {
attemptCount=1
maxAttempts=${1:-100}
echo "Waiting jenkins status to be active..."
while [ "$attemptCount" -le "$maxAttempts" ]; do
echo "Attempt: $attemptCount of $maxAttempts"
tmp=$(systemctl status jenkins | grep -s 'Active: active')
[[ -z $tmp ]] && attemptCount=$(($attemptCount + 1)) && sleep 1s || break
done
[[ -z $(systemctl status jenkins | grep -s 'Active: active') ]] && PrintError "Timeout after $maxAttempts sec." || echo 'Jenkins Launched'
}
#=================================================
# APT PACKAGE FUNCTIONS
#=================================================
function CustomizeaptApache {
# Verify apache is installed
msg="Apache server is not installed"
[[ -z $( ps aux | grep apache2 | grep -v grep ) ]] && PrintError "${msg}"
mkdir -p /var/www/html/customErrors
pushd '/var/www/html/customErrors/'
cat <<EOF > "c403.html"
<h1 style='color:red'>Permission Denied</h1>
<p>You have no permission to access this page :-(</p>
<p>If you think you should be able to see this page contact your admin.</p>
EOF
cat <<EOF > "c404.html"
<h1 style='color:red'>Error 404: Not found :-(</h1>
<p>No idea where that file is, sorry. Is it the correct URL?</p>
EOF
cat <<EOF > "c50x.html"
<h1>Oops! Something went wrong...</h1>
<p>We seem to be having some technical difficulties. Hang tight.</p>
EOF
popd
pushd '/etc/apache2/sites-available/'
sed -i 's/<\/VirtualHost>//g' 000-default.conf
cat <<EOF >> "000-default.conf"
ErrorDocument 403 /customErrors/c403.html
ErrorDocument 404 /customErrors/c404.html
ErrorDocument 500 /customErrors/c50x.html
</VirtualHost>
EOF
popd
systemctl restart apache2
}
function InstallaptApache {
apachePort=${1}
# Verify Apache is not already installed
if [[ -z $( ps aux | grep apache2 | grep -v grep ) ]]; then
apt-get install -y apache2 apache2-utils
# Disable directory browsing
pushd '/etc/apache2/'
sed -i 's/Options Indexes/Options/g' apache2.conf
# Add performance
sed -i '/#Supplementa/a\
HostnameLookups Off
' apache2.conf
# If non-default port specified (default is 80) configure it
if [ -n "$apachePort" ]; then
sed -i "s/\:80/\:$apachePort/g" sites-available/000-default.conf
sed -i "s/80/$apachePort/g" ports.conf
fi
rm sites-enabled/000-default.conf
ln -s '/etc/apache2/sites-available/000-default.conf' sites-enabled/
popd
service apache2 restart
fi
}
function InstallaptApacheOnNginx {
# Install nginx
InstallNginx
apachePort=${1:-8085}
cat <<EOF > "/etc/nginx/conf.d/infra.conf"
upstream apache_server {
server 127.0.0.1:$apachePort fail_timeout=5;
}
server {
listen 80;
server_name localhost;
location / {
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header Host \$http_host;
proxy_redirect off;
}
location /apache {
proxy_pass http://apache_server/;
proxy_set_header Host $http_host;
}
}
EOF
InstallaptApache $apachePort
}
function InstallaptApacheMod {
# Verify Apache is installed
mod="${1}"
em='Apache must be installed and running'
[[ -z $( ps aux | grep apache2 | grep -v grep ) ]] && PrintError "$em"
[[ -z "${mod}" ]] && PrintError "Expecting a mod name to install e.g. wsgi"
if [[ $mod == 'proxy' ]]; then
echo "mod-proxy Under Development. Need to be verified."
# Enable modules
a2enmod proxy
a2enmod proxy_http headers proxy_connect rewrite version
#a2enmod proxy_html proxy_balancer
#a2enmod proxy_ajp rewrite deflate headers proxy_balancer
elif [[ $mod == 'wsgi' ]]; then
apt-get install libapache2-mod-wsgi
a2enmod wsgi
echo "mod-wsgi installed."
elif [[ $mod == 'ssl' ]]; then
a2enmod ssl
echo "mod-ssl installed."
elif [[ $mod == fqdn ]]; then
echo "ServerName localhost" > /etc/apache2/conf-available/fqdn.conf
a2enmod fqdn
else
PrintError "mod-$mod NOT found."
fi
service apache2 restart
}
function InstallaptDhcp {
apt-get install -y isc-dhcp-server subnetcalc
[[ -n "$1" ]] && ConfigDhcpapt $@
}
function ConfigDhcpapt {
echo "Calculating and setting minimal subnet config"
subnet=${1:-192.168.1.0}
netmask=${2:-24} # Max 254 hosts
calc=$(subnetcalc ${subnet}/${netmask})
exp_netmask=$(echo "$calc" | grep Netmask | awk '{print $3}')
r1=$(echo "$calc" | grep Range | awk -F '{ ' '{print $2}' | awk -F ' }' '{print $1}' | awk -F ' - ' '{print $1}')
r2=$(echo "$calc" | grep Range | awk -F '{ ' '{print $2}' | awk -F ' }' '{print $1}' | awk -F ' - ' '{print $2}')
fName=/etc/dhcp/dhcpd.conf
[[ -f $fName ]] && mv $fName $fName$(date '+%Y_%m_%d_%H:%M:%S')
cat <<EOF > $fName
# minimal sample $fName
default-lease-time 600;
max-lease-time 7200;
subnet $subnet netmask $exp_netmask {
range $r1 $r2;
option routers $r2;
option domain-name-servers $r1;
option domain-name "luzcazares.com";
}
EOF
echo 'Set net interfaces'
ifaces=$(ip -o link show | awk -F': ' '{print $2}')
for iface in $ifaces; do
[[ $iface != 'lo' ]] && sed -i "s/^INTERFACES\=\"/INTERFACES\=\"$iface /g" /etc/default/isc-dhcp-server
done
systemctl restart isc-dhcp-server
}
function InstallaptDns {
echo '<--- INSTALLING DNS --'
apt-get install -y bind9 dnsutils
}
function InstallaptFirewallUFW {
apt-get install -y ufw
}
function InstallaptJenkins {
apt-get -y -qq update
# if jenkins not installed then install it
# if instaled it will be updated
if [ ! -f /etc/init.d/jenkins ]; then
wget -qO- https://pkg.jenkins.io/debian/jenkins-ci.org.key | apt-key add -
echo 'deb http://pkg.jenkins.io/debian-stable binary/' > /etc/apt/sources.list.d/jenkins.list
apt-get -y -qq update
fi
apt-get install -y jenkins
# Use given port for Jenkins
[[ -n "$1" ]] && sed -i "s/^HTTP_PORT=..../HTTP_PORT=${1}/g" /etc/default/jenkins
WaitForJenkinsSvr 10
}
function InstallaptELKSLogstash {
repo="https://artifacts.elastic.co"
apt-get install -y openjdk-8-jdk
apt-get install -y apt-transport-https
wget -qO - ${repo}/GPG-KEY-elasticsearch | apt-key add -
source_="/etc/apt/sources.list.d/elastic-6.x.list"
echo "deb ${repo}/packages/6.x/apt stable main" | sudo tee -a $source_
apt-get update && sudo apt-get install -y logstash
systemctl start logstash.service
}
function InstallaptMysql {
# MySQL - Needs a password for DB_root user
[[ -z $1 ]] && PrintError "Mysql need a password for DB root user"
# Make sure not installed
if [[ -n $(dpkg --get-selections | grep mysql) ]]; then
echo "Mysql is already installed!!"
return
fi
echo "Installing MySQL server"
apt-get install -y python-setuptools python-mysqldb
debconf-set-selections <<< "mysql-server mysql-server/root_password password ${1}"
debconf-set-selections <<< "mysql-server mysql-server/root_password_again password ${1}"
apt-get install -y mysql-server
}
function HardeningaptMysql {
# Hardening mysql script output keep changing:
#mysql_secure_installation
#unable to run it unatended just with expect which is tight to string output
mysql -uroot -p"${1}" <<MYSQL_SCRIPT
/* Remove anonymous users */
DELETE FROM mysql.user WHERE User='';
/* Remove remote root */
DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');
/* Remove test database */
DROP DATABASE test;
/* Removing privileges on test database */
DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%'
/* Install validate_password plugin */
INSTALL PLUGIN validate_password SONAME 'validate_password.so';
FLUSH PRIVILEGES;
MYSQL_SCRIPT
}
function InstallaptNginx {
apt-get install -y nginx
}
function InstallaptNodejs {
version=${1:-8}
curl -sL https://deb.nodesource.com/setup_"${version}".x | bash -
apt-get install -y nodejs
}
#function InstallaptPackages {
# for arg in $@; do
# apt-get install $arg
# done
#}
function InstallaptPhp {
release="${1:-$(lsb_release -rs)}"
if [[ $release == 14.* || $release == 5.* ]]; then
echo "Installing PHP 5.5"
apt-geit install -y php5 libapache2-mod-php5 php5-mcrypt php5-cli \
php5-curl php5-mysql php5-ldap php5-gd php5-json
php5enmod mcrypt
#elif [[ $release == 16.* || $release == 7.* ]]; then
else
echo "Installing PHP 7.0"
apt-get -y install php7.0 libapache2-mod-php7.0 php7.0-mcrypt \
php-memcache php7.0-mysql php7.0-curl php7.0-gd php7.0-xmlrpc \
php7.0-xsl php7.0-mbstring php-gettext
fi
# Move index.php as first apache site option
cat <<EOF >> "/etc/apache2/mods-enabled/dir.conf"
<IfModule mod_dir.c>
DirectoryIndex index.php index.html index.cgi index.pl index.xhtml index.htm
</IfModule>
EOF
cat <<EOF > "/var/www/html/info.php"
<?php
phpinfo();
?>
EOF
systemctl restart apache2
}
function InstallaptTftp {
echo "Installing TFTP-HPA server."
apt-get install -y tftpd-hpa tftp
if [[ $1 -eq 'upload' ]]; then
echo "Allowing upload into the server."
cp /etc/default/tftpd-hpa /etc/default/tftpd-hpa.ORIGINAL
sed -i '/TFTP_OPTIONS/c\' /etc/default/tftpd-hpa
echo 'TFTP_OPTIONS="--secure --create"' >> /etc/default/tftpd-hpa
chown -R tftp /var/lib/tftpboot
service tftpd-hpa restart
service tftpd-hpa status
echo "Just a test file" >> /var/lib/tftpboot/test.file
fi
}
function InstallaptVagrant {
release="${1:-2.2.19}"
name="vagrant_${release}_x86_64.deb"
WriteLog "Installing Vagrant $release"
wget -q https://releases.hashicorp.com/vagrant/$release/$name
dpkg -i $name
}
function InstallaptVirtualBox {
release="${1:-6.0}"
$addr="http://download.virtualbox.org/virtualbox/debian"
echo "deb $addr $VERSION_CODENAME contrib" >> /etc/apt/sources.list
base="https://www.virtualbox.org/download"
wget -q "$base/oracle_vbox_2016.asc" -O- | apt-key add -
wget -q "$base/oracle_vbox.asc" -O- | apt-key add -
apt-get -y update
apt-get install -y "virtualBox-$release" dkms
}
function UninstallaptMysql {
echo "Uninstalling Mysql"
apt-get -y purge mysql-server mysql-client mysql-common \
mysql-server-core-5.5 mysql-client-core-5.5
rm -rf /etc/mysql /var/lib/mysql
apt-get -y autoremove
apt-get autoclean
}
function UninstallaptApache {
service apache2 stop
apt-get -y purge apache2*
rm -rf /etc/apache2
apt-get -y autoremove
apt-get autoclean
}
function UninstallaptJenkins {
systemctl stop jenkins
apt-get remove --purge jenkins
rm -f /etc/nginx/sites-available/jenkins
rm -f /etc/nginx/sites-enabled/jenkins
}
#=================================================
# YUM PACKAGE FUNCTIONS
#=================================================
#function InstallyumPackages {
# for arg in $@;do
# yum install $arg
# done
#}
function CustomizeyumApache {
# Verify apache is installed
msg="Apache server is not installed"
[[ -z $( ps aux | grep httpd | grep -v grep ) ]] && PrintError "${msg}"
mkdir -p /var/www/html/customErrors
pushd '/var/www/html/customErrors/'
cat <<EOF > "c403.html"
<h1 style='color:red'>Permission Denied</h1>
<p>You have no permission to access this page :-(</p>
<p>If you think you should be able to see this page contact your admin.</p>
EOF
cat <<EOF > "c404.html"
<h1 style='color:red'>Error 404: Not found :-(</h1>
<p>No idea where that file is, sorry. Is it the correct URL?</p>
EOF
cat <<EOF > "c50x.html"
<h1>Oops! Something went wrong...</h1>
<p>We seem to be having some technical difficulties. Hang tight.</p>
EOF
popd
pushd '/etc/httpd/conf'
sed -i '/#ErrorDocument.402/a\
ErrorDocument 403 /customErrors/c403.html\
ErrorDocument 404 /customErrors/c404.html\
ErrorDocument 500 /customErrors/c50x.html' httpd.conf
popd
systemctl restart httpd
}
function InstallyumApache {
apachePort=${1}
# Verify Apache is not already installed
if [[ -z $( ps aux | grep httpd | grep -v grep ) ]]; then
yum install -y httpd
yum groupinstall -y web-server
# Disable directory browsing
pushd '/etc/httpd/conf/'
sed -i 's/Options Indexes/Options/g' httpd.conf
# Add performance
sed -i '/#Supplementa/a\
HostnameLookups Off
' httpd.conf
# set ServerName
sed -i 's/^#ServerName.*:/ServerName localhost:/g' httpd.conf
# If non-default port specified (default is 80) configure it
if [ -n "$apachePort" ]; then
sed -i "s/\:80/\:$apachePort/g" httpd.conf
fi
popd
systemctl enable httpd
systemctl restart httpd
fi
}
function InstallyumDns {
echo '<--- INSTALLING DNS --'
yum install bind bind-utils -y
}
function InstallyumMysql {
# MySQL - Needs a password for DB_root user
[[ -z $1 ]] && PrintError "Mysql need a password for DB root user"
# Make sure not already installed
if [[ -n $(rpm -qa | grep -i MySQL) ]]; then
echo "Mysql is already installed!!"
return
fi
echo "Installing MySQL server"
curl -OL http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum update
yum install -y mysql-server
systemctl start mysql
/usr/bin/mysqladmin -u root password "${1}"
}
function InstallyumELKSElasticsearch {
repo="https://artifacts.elastic.co/downloads"
release="${1:-6.5.4}"
yum install -y java-1.8.0-openjdk
curl -L -O $repo/elasticsearch/elasticsearch-${release}.rpm
rpm -i elasticsearch-${release}.rpm
echo -e "\n\n---> Configuring elasticsearch"
echo '#Disable AssumeMP' >> /etc/elasticsearch/jvm.options
echo '-XX:-AssumeMP' >> /etc/elasticsearch/jvm.options
# Bind all interfaces so you can access elasticsearch locally and remotely
file='/etc/elasticsearch/elasticsearch.yml'
sed -i '/#network.host/a network.host: 0.0.0.0' $file
systemctl daemon-reload
systemctl enable elasticsearch.service
systemctl start elasticsearch.service
echo 'waiting for elasticsearch to start'
sleep 10
curl http://localhost:9200
}
function InstallyumELKSKibana {
repo="https://artifacts.elastic.co/downloads"
release="${1:-6.5.4}"
yum install -y java-1.8.0-openjdk
curl -L -O $repo/kibana/kibana-${release}-x86_64.rpm
rpm -i kibana-${release}-x86_64.rpm
echo -e "\n\n---> Configuring kibana"
# Bind all interfaces so you can access kibana locally and remotely
sed -i '/#server.host/a server.host: 0.0.0.0' /etc/kibana/kibana.yml
systemctl daemon-reload
systemctl enable kibana.service
systemctl start kibana.service
echo 'waiting for kibana to start'
sleep 10
}
function InstallyumELKSLogstash {
repo="https://artifacts.elastic.co/downloads"
release="${1:-6.5.4}"
yum install -y java-1.8.0-openjdk
curl -L -O $repo/logstash/logstash-${release}.rpm
rpm -i logstash-${release}.rpm
echo -e "\n\n---> Configuring logstash"
echo '#Disable AssumeMP' >> /etc/logstash/jvm.options
echo '-XX:-AssumeMP' >> /etc/logstash/jvm.options
cat <<EOF > '/etc/logstash/conf.d/logstash-simple.conf'
input { stdin { } }
output {
elasticsearch { hosts => ["localhost:9200"] }
stdout { codec => rubydebug }
}
EOF
systemctl daemon-reload
systemctl enable logstash.service
systemctl start logstash.service
echo 'waiting for logstash to start'
sleep 10
}
function InstallyumFirewallUFW {
yum install -y epel-release
yum install -y --enablerepo="epel" ufw
}
function InstallyumPhp {
release="${1:-7.0}"
[[ $release[0] != 7 ]] && echo "Currently only version 7.x is supported"
echo "Installing PHP $release"
yum install -y \
https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum install -y http://rpms.remirepo.net/enterprise/remi-release-7.rpm
yum update
# Make release 7.0=70 (and so on)
release=$(echo $release |sed 's/\.//g')
yum install -y yum-utils
yum-config-manager --enable remi-php$release
# Install it
yum install -y php php-mcrypt php-cli php-gd php-curl php-mysql \
php-ldap php-zip php-fileinfo
# move index.php as apache first option
cat <<EOF >> "/etc/httpd/conf.modules.d/00-dir.conf"
<IfModule mod_dir.c>
DirectoryIndex index.php index.html index.cgi index.pl index.xhtml index.htm
</IfModule>
EOF
cat <<EOF > "/var/www/html/info.php"
<?php
phpinfo();
?>
EOF
systemctl restart apache2
}
function InstallyumLibvirt {
yum install -y quemu quemu-kvm libvirt libvirt-client libvirt-devel
yum install -y ruby-devel
}
function InstallyumPython3 {
version="${1:-3.7.4}"
yum install -y gcc openssl-devel bzip2-devel libffi-devel readline-devel
pushd /usr/src
curl -LO https://www.python.org/ftp/python/${version}/Python-${version}.tgz
tar xvzf Python-${version}.tgz
rm -f Python-${version}.tgz
cd Python-${version}
./configure --enable-optimizations
make altinstall
popd
v_=${version::-2}
ln -s /usr/local/bin/python${v_} /usr/bin/python3
ln -s /usr/local/bin/pip${v_} /usr/bin/pip3
# Next lines broke yum since it does not support python3
# alternatives --install /usr/bin/python python /usr/bin/python2 50
# alternatives --install /usr/bin/python python /usr/local/bin/python${v_} 60
}
function InstallyumQemu {
yum install -y qemu quemu-kvm genisoimage
}
function InstallyumVagrant {
release="${1:-2.2.19}"
name=vagrant_${release}_x86_64.rpm
wget https://releases.hashicorp.com/vagrant/${release}/$name
yum install -y $name
}
function InstallyumVirtualBox {
release="${1:-6.0}"
addr="http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo"
wget -q $addr -P /etc/yum.repos.d
yum install -y --enablerepo=epel dkms
addr="https://www.virtualbox.org/download/oracle_vbox.asc"
wget -q $addr -O- | rpm --import -
yum install -y "VirtualBox-$release"
}
function UninstallyumMysql {
systemctl stop mysql
yum remove -y $(rpm -qa|grep mysql)
yum clean all
}
| true |
42d23714510c2bc8bf84c10ddcbc764c51d77d09 | Shell | srivathsanmurali/.dotfiles | /bin/git-show-file | UTF-8 | 171 | 3.03125 | 3 | [] | no_license | #!/bin/sh
set -eu
COMMIT=${1:-"HEAD"}
FILE=$(git diff-tree --no-commit-id --name-only -r "$COMMIT" | fzf --prompt="Select file:")
git show "$COMMIT" -- "$FILE" | less -r
| true |
128a8395b5afbd3dea8fcac2fb966b7ee1c09990 | Shell | ohsu-cedar-comp-hub/ChIP-seq | /sbatch/61_sbatchPeakPlotQC.sh | UTF-8 | 2,000 | 3.6875 | 4 | [] | no_license | #!/bin/bash
### For each sample, run the model.r script and also split the resulting pdf into two separate files.
#SBATCH --partition exacloud # partition (queue)
#SBATCH --nodes 1 # number of nodes
#SBATCH --ntasks 2 # number of "tasks" to be allocated for the job
#SBATCH --ntasks-per-core 1 # Max number of "tasks" per core.
#SBATCH --cpus-per-task 1 # Set if you know a task requires multiple processors
#SBATCH --mem-per-cpu 12000 # Memory required per allocated CPU (mutually exclusive with mem)
##SBATCH --mem 16000 # memory pool for each node
#SBATCH --time 0-24:00 # time (D-HH:MM)
#SBATCH --output peakPlot_%j.out # Standard output
#SBATCH --error peakPlot_%j.err # Standard error
### SET I/O VARIABLES
IN=$sdata/data/50_peaks # Directory containing all input files. Should be one job per file
OUT=$sdata/data/50_peaks # Path to output directory
### Record slurm info
echo "SLURM_JOBID: " $SLURM_JOBID
echo "SLURM_ARRAY_TASK_ID: " $SLURM_ARRAY_TASK_ID
echo "SLURM_ARRAY_JOB_ID: " $SLURM_ARRAY_JOB_ID
### Function
function pdfsplit ()
{
if [ $# -lt 4 ]; then
echo "Usage: pdfsplit input.pdf first_page last_page output.pdf"
echo "Function Taken from Westley Weimer - www.cs.virginia.edu/~weimer/pdfsplit/pdfsplit"
# exit 1
fi
yes | gs -dBATCH -sOutputFile="$4" -dFirstPage=$2 -dLastPage=$3 -sDEVICE=pdfwrite "$1" >& /dev/null
}
cd $IN
for file in `ls $IN/*.r`; do
## Get base
BASE=${file%_model.r}
BASE=`basename $BASE`
## Create pdf
/usr/bin/Rscript $file
## Split pdf
pdfsplit $IN/$BASE\_model.pdf 1 1 $IN/$BASE\_peakModel.pdf
pdfsplit $IN/$BASE\_model.pdf 2 2 $IN/$BASE\_crossCor.pdf
## Remove original
rm $IN/$BASE\_model.pdf
done
| true |
4fb2a506060968075e265076a49dc455fa9d68bb | Shell | jayjayswal/bs-selenium-assignment | /test.sh | UTF-8 | 4,466 | 3.046875 | 3 | [] | no_license | #!/bin/bash
AUTOMATE_USERNAME="jayjayswal_D9npvR"
AUTOMATE_ACCESS_KEY="s9UoshQgmUN79dshsrLJ"
if [ -n "$1" ] && [ $1 == '--browserstack' ]; then
SELENIUM_SERVER_PATH="https://${AUTOMATE_USERNAME}:${AUTOMATE_ACCESS_KEY}@hub-cloud.browserstack.com"
SELENIUM_CAPABILITIES="{
\"desiredCapabilities\": {
\"browserName\" : \"chrome\",
\"os_version\" : \"Sierra\",
\"resolution\" : \"1920x1080\",
\"browser_version\" : \"65.0\",
\"os\" : \"OS X\",
\"name\" : \"Bstack - Selenium assignment phase 2\",
\"build\" : \"Bstack Selenium assignment build 1\"
}
}"
ELEMENT_ID_KEY="ELEMENT"
SEARCH_BUTTON_X_PATH="/html/body/div[1]/div[3]/form/div[1]/div[1]/div[2]/div[2]/div[2]/center/input[1]"
else
SELENIUM_SERVER_PATH="http://localhost:4444"
SELENIUM_CAPABILITIES="{
\"desiredCapabilities\": {
\"browserName\" : \"chrome\"
}
}"
ELEMENT_ID_KEY="element-6066-11e4-a52e-4f735466cecf"
SEARCH_BUTTON_X_PATH="/html/body/div[1]/div[3]/form/div[1]/div[1]/div[3]/center/input[1]"
fi
CheckLastCommandStatus() {
retVal=$?
if [ $retVal -ne 0 ]; then
echo $1
exit $retVal
fi
if [ -n "$2" ]; then
status=$(echo $2 | jq -r '.status')
if [ $status -ne 0 ]; then
echo $2
exit 1
fi
fi
}
CheckPrerequisites() {
which jq &>/dev/null
CheckLastCommandStatus "'jq' is not installed, Please install and try again"
}
# checking prerequisites
CheckPrerequisites
# starting a session
RES=$(curl -sS --location --request POST $SELENIUM_SERVER_PATH'/wd/hub/session' \
--header 'Content-Type: application/json' \
--data-raw "$SELENIUM_CAPABILITIES"
)
CheckLastCommandStatus "Selenium failed to start session"
SELENIUM_SESSION_ID=$(echo $RES | jq -r '.sessionId')
# SELENIUM_SESSION_ID="c26992c29c2e81580e6d72f59ab84bb1"
CheckLastCommandStatus "Selenium session id not found"
echo "Selenium session ID:" $SELENIUM_SESSION_ID
#opning google in
RES=$(curl -sS --location --request POST $SELENIUM_SERVER_PATH'/wd/hub/session/'$SELENIUM_SESSION_ID'/url' \
--header 'Content-Type: application/json' \
--data-raw '{
"url" : "https://www.google.com/"
}')
CheckLastCommandStatus "Cannot open google.com in session" "$RES"
#fetch input element
RES=$(curl -sS --location --request POST $SELENIUM_SERVER_PATH'/wd/hub/session/'$SELENIUM_SESSION_ID'/element' \
--header 'Content-Type: application/json' \
--data-raw '{
"using" : "xpath",
"value": "/html/body/div[1]/div[3]/form/div[1]/div[1]/div[1]/div/div[2]/input"
}')
CheckLastCommandStatus "Error in finding input element"
GOOGLE_TEXT_INPUT_ID=$(echo $RES | jq -r '.value | .["'$ELEMENT_ID_KEY'"]')
CheckLastCommandStatus "Error in finding input element id"
echo "google text input ID:" $GOOGLE_TEXT_INPUT_ID
#fetch submit button element
RES=$(curl -sS --location --request POST $SELENIUM_SERVER_PATH'/wd/hub/session/'$SELENIUM_SESSION_ID'/element' \
--header 'Content-Type: application/json' \
--data-raw '{
"using" : "xpath",
"value": "'$SEARCH_BUTTON_X_PATH'"
}')
CheckLastCommandStatus "Error in finding submit button element"
GOOGLE_SUBMIT_BUTTON_ID=$(echo $RES | jq -r '.value | .["'$ELEMENT_ID_KEY'"]')
CheckLastCommandStatus "Error in finding submit button element id"
echo "google submit button ID:" $GOOGLE_SUBMIT_BUTTON_ID
#enter name in input text
RES=$(curl -sS --location --request POST $SELENIUM_SERVER_PATH'/wd/hub/session/'$SELENIUM_SESSION_ID'/element/'$GOOGLE_TEXT_INPUT_ID'/value' \
--header 'Content-Type: application/json' \
--data-raw '{
"value" : ["Jay Jayswal"]
}')
CheckLastCommandStatus "Error in entring name in input box"
#click submit button
RES=$(curl -sS --location --request POST $SELENIUM_SERVER_PATH'/wd/hub/session/'$SELENIUM_SESSION_ID'/element/'$GOOGLE_SUBMIT_BUTTON_ID'/click')
CheckLastCommandStatus "Error clicking submit button"
#fetch title of new page
RES=$(curl -sS --location --request GET $SELENIUM_SERVER_PATH'/wd/hub/session/'$SELENIUM_SESSION_ID'/title')
CheckLastCommandStatus "Error while fetching title"
PAGE_TITLE=$(echo $RES | jq -r '.value')
CheckLastCommandStatus "Error while fetching title from response"
echo "The title of page is:" $PAGE_TITLE
#delete session
RES=$(curl -sS --location --request DELETE $SELENIUM_SERVER_PATH'/wd/hub/session/'$SELENIUM_SESSION_ID'')
CheckLastCommandStatus "Error while deleting session"
| true |
2a19f7c91a865e540e7222c3d3c8f407a9bd228f | Shell | lborguetti/goss-cis-benchmark | /scripts/test_section_05_level1/5-2-3.sh | UTF-8 | 1,307 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# 5.2.3 Ensure permissions on SSH public host key files are configured
# configured (Automated)
#
# Description:
# An SSH public key is one of two files used in SSH public key authentication.
# In this authentication method, a public key is a key that can be used for
# verifying digital signatures generated using a corresponding private key. Only
# a public key that corresponds to a private key will be able to authenticate
# successfully.
#
# Rationale:
# If a public host key file is modified by an unauthorized user, the SSH service
# may be compromised.
set -o errexit
set -o nounset
file=""
files=""
stat_file=""
status="0"
t="0"
files=$(find /etc/ssh -xdev -type f -name 'ssh_host_*_key.pub') || status="1"
if [ "${status}" -eq 0 ]; then
if [ -n "${files}" ]; then
for file in ${files}; do
stat_file=$(stat -c "%a-%u-%g-%U-%G" "${file}") || status="1"
if [ -n "${stat_file}" ]; then
if [ "${stat_file}" = "600-0-0-root-root" ]; then
/bin/true
else
echo "${file} ownership or permissions is wrong"
t="1"
fi
fi
done
fi
if [ "${t}" -eq 0 ]; then
echo "Ownership and permissions of public key are correct"
fi
else
echo "Ownership and permissions of public key are correct"
fi
| true |
583ce98e399527271db4a2e08f2bc65f8af2ff3f | Shell | JaredFern/ImportanceSampling | /seqmodel/script/sortuniq.sh | UTF-8 | 78 | 2.515625 | 3 | [] | no_license | #!/bin/bash
mv "$1" "$1.all"
sort "$1.all" | uniq > "$1"
sed -i '/^$/d' "$1"
| true |
3feeb3896f865b9f3f3e1f15f8db207a8f9c8be6 | Shell | qbeightol/unix-tools-and-scripting | /a2sub/commword.sh | UTF-8 | 259 | 2.8125 | 3 | [] | no_license | #!/bin/bash
# see if you can figure out what went wrong when I tried to delete punctuation
# using tr -d [:punct:]
# finds the 10 most common words within the tweets folder
cat tweets/* | tr ' ' '\n' | sort | uniq -c | sort -rn | head -10 > most_common.txt
| true |
5ea9ad86d04a2c52537fc5dc264ef371b0358a29 | Shell | ayosec/curljson | /curljson | UTF-8 | 1,884 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Wrapper to use JSON with cURL. It sends a regular request using cURL, and the
# pretty-print the response if Content-Type is JSON.
#
# If the stdin is not a TTY, it sends its content as the request body.
#
# Usage:
#
# $ curljson [options] URL
# $ curljson [options] URL < body.json
#
# [options] is any cURL option. See https://curl.se/docs/manpage.html for details.
set -euo pipefail
HEADERS_FILE=$(mktemp)
BODY_FILE=$(mktemp)
METADATA_FILE=$(mktemp)
cleanup() {
rm -f "$HEADERS_FILE" "$BODY_FILE" "$METADATA_FILE"
}
trap cleanup EXIT
# Execute cURL.
#
# If stdin is not a TTY, use it as the request body.
CURL_ARGS=(
--silent
--write-out "CURLJSONMETA;%{time_total};%{size_download};%{content_type};"
--dump-header "$HEADERS_FILE"
--output "$BODY_FILE"
)
if [ ! -t 0 ]
then
CURL_ARGS+=(
--header "Content-Type: application/json"
--data-binary @-
)
fi
curl "${CURL_ARGS[@]}" "$@" > "$METADATA_FILE"
# Read metadata from the --write-out output.
readarray -d ';' -n 4 -t METADATA < "$METADATA_FILE"
if [ "${METADATA[0]}" = CURLJSONMETA ]
then
CONTENT_TYPE=${METADATA[3]}
if [ -t 2 ]
then
printf '# \e[3m%s bytes in %s seconds.\e[m\n\n' "${METADATA[2]}" "${METADATA[1]}" 1>&2
fi
else
# The user added their own --write-out option,
# so we can't parse the output from cURL.
#
# Try to extract Content-Type from HEADERS_FILE.
cat "$METADATA_FILE"
CONTENT_TYPE=$(
awk < "$HEADERS_FILE" -v IGNORECASE=1 '
/^content-type: / {
sub("[;\\r].*", "", $2); print $2
}
'
)
fi
# Print headers only if stderr is a TTY.
if [ -t 2 ]
then
sed < "$HEADERS_FILE" 1>&2 '
s/^\(\S\+:\)/\x1b[36m\1\x1b[m/
/^HTTP\/[0-9]/ {
s/.*/\x1b[1m\0\x1b[m/
}
'
fi
# Print body.
if [ -t 1 ] && [ "$CONTENT_TYPE" = application/json ]
then
jq . "$BODY_FILE"
else
cat "$BODY_FILE"
fi
| true |
9b711f1bbcfbd39dea3c43658f6540424148e5e8 | Shell | iandeeph/portal-pulsa | /script/newCekPulsa.sh | UTF-8 | 63,976 | 3.1875 | 3 | [] | no_license |
#! /bin/bash
# clear
#===============================================================================
#Konfigurasi Database
#===============================================================================
HOST='1.1.1.200'
USER='root'
PASSWORD='c3rmat'
#===============================================================================
#Inisialisasi harga paket masing-masing provider,, nantinya jika pulsa kurang dari harga paket maka akan minta isi pulsa ke Tukang Pulsa
#===============================================================================
HARGA_PAKET_SIMPATI=12500
HARGA_PAKET_XL=132000
HARGA_PAKET_THREE=5000
#===============================================================================
#Inisialisasi parameter untuk post to slack
#===============================================================================
CHANNEL="#cermati_pulsa"
USERNAME="Pika Pulsa"
ICONEMOJI=":pika-shy:"
ICONEMOJI2=":pikapika:"
#===============================================================================
#inisialisasi tanggal habis paket untuk provider Simpati/Telkomsel
#jika tanggal = hari ini, maka paket akan diperpanjang
#jika paket diperpanjang, maka tanggal akan diupdate / ditambahkan sesuai panjangnya masa berlaku paket
#paket Indosat tidak ada karena Indosat diperpanjang setiap hari selama pulsa mencukupi
#===============================================================================
#===============================================================================
#mengambil semua element dalam database, query dari database
#===============================================================================
#===============================================================================
#TELKOMSEL
#===============================================================================
telkomselResult=($(mysql dbpulsa -h$HOST -u$USER -p$PASSWORD -Bse "select namaProvider, noProvider, host, span, hargaPaket, expDatePaket, caraCekPulsa, caraAktivasi from provider where namaProvider like 'Telkomsel%' order by namaProvider;"))
cntTelkomselElm=8
cntTelkomsel=${#telkomselResult[@]}
telkomselSet=$(((cntTelkomsel+1)/cntTelkomselElm))
for (( i=1 ; i<=telkomselSet ; i++ ))
do
x=$((cntTelkomselElm * (i-1)))
telkomselNama[$i]=${telkomselResult[$((x + 0 ))]};
telkomselNo[$i]=${telkomselResult[$((x + 1))]};
telkomselHost[$i]=${telkomselResult[$((x + 2))]};
telkomselSpan[$i]=${telkomselResult[$((x + 3))]};
telkomselHargaPaket[$i]=${telkomselResult[$((x + 4))]};
telkomselExpDatePaket[$i]=${telkomselResult[$((x + 5))]};
telkomselCaraCekPulsa[$i]=${telkomselResult[$((x + 6))]};
telkomselCaraAktivasi[$i]=${telkomselResult[$((x + 7))]};
done
#===============================================================================
#XL
#===============================================================================
XLResult=($(mysql dbpulsa -h$HOST -u$USER -p$PASSWORD -Bse "select namaProvider, noProvider, host, span, hargaPaket, expDatePaket, caraCekPulsa, caraAktivasi from provider where namaProvider like 'XL%' order by namaProvider;"))
cntXLElm=8
cntXL=${#XLResult[@]}
XLSet=$(((cntXL+1)/cntXLElm))
for (( i=1 ; i<=XLSet ; i++ ))
do
x=$((cntXLElm * (i-1)))
XLNama[$i]=${XLResult[$((x + 0 ))]};
XLNo[$i]=${XLResult[$((x + 1))]};
XLHost[$i]=${XLResult[$((x + 2))]};
XLSpan[$i]=${XLResult[$((x + 3))]};
XLHargaPaket[$i]=${XLResult[$((x + 4))]};
XLExpDatePaket[$i]=${XLResult[$((x + 5))]};
XLCaraCekPulsa[$i]=${XLResult[$((x + 6))]};
XLCaraAktivasi[$i]=${XLResult[$((x + 7))]};
done
#===============================================================================
#THREE
#===============================================================================
threeResult=($(mysql dbpulsa -h$HOST -u$USER -p$PASSWORD -Bse "select namaProvider, noProvider, host, span, hargaPaket, expDatePaket, caraCekPulsa from provider where namaProvider like 'Three%' order by namaProvider;"))
cntThreeElm=7
cntThree=${#threeResult[@]}
threeSet=$(((cntThree+1)/cntThreeElm))
for (( i=1 ; i<=threeSet ; i++ ))
do
x=$((cntThreeElm * (i-1)))
threeNama[$i]=${threeResult[$((x + 0 ))]};
threeNo[$i]=${threeResult[$((x + 1))]};
threeHost[$i]=${threeResult[$((x + 2))]};
threeSpan[$i]=${threeResult[$((x + 3))]};
threeHargaPaket[$i]=${threeResult[$((x + 4))]};
threeExpDatePaket[$i]=${threeResult[$((x + 5))]};
threeCaraCekPulsa[$i]=${threeResult[$((x + 6))]};
done
cnt=${#telkomselExpDatePaket[@]} #menghitung total row
for (( i=1 ; i<=${cnt} ; i++ )) #loooping sebanyak total row
do
telkomselExpDatePaket[$i]=${telkomselExpDatePaket[$i]//[-]/} #merubah dateformat menjadi yyyymmdd yang sebelumnya yyy-dd-mm dengan menghilangkan "-"
done
#===============================================================================
#mencari tanggal hari ini dalam format yyyymmdd
#===============================================================================
NOW=$(date +%Y%m%d)
currentTime=$(date +"[ %Y-%m-%d %H:%M:%S ]")
mysqlDateNow=$(date +"%Y-%m-%d %H:%M:%S")
#===============================================================================
#inisialisasi nomor tukang pulsa a.k.a Karin dan tukang ketik a.k.a ian
#===============================================================================
TUKANGPULSA=081381171337
TUKANGKETIK=08992112203
#===============================================================================
#inisialisasi array untuk nomor telp masing-masing provider.. urutan nomor tergantung kepada posisi pada slot openvox..
#===============================================================================
# TELKOMSEL=(081212232674 081212232835 081212232617 081319468847 082112592932 081213374483 081295882084 081295741478 081212232638)
# XL=(081807184805 087886347632 087780867200 087883072681)
# INDOSAT=(085710250739 085710250748 081513779454)
# THREE=(089629783240 089629779562 089629789574)
if [ -t 1 ] ; then #mengecek apakan dijalankan di terminal atau di cronjob, karena cronjob tidak dapat membaca tput
#===============================================================================
#Inisialisasi warna text untuk memudahkan membaca output
#===============================================================================
red=`tput setaf 1`
green=`tput setaf 2`
yellow=`tput setaf 3`
reset=`tput sgr0`
else
red=''
green=''
yellow=''
reset=''
fi
#===============================================================================
#restarting openvox, tujuannya untuk memastikan cek pulsa dengan kode USSD berhasil.. Karena menurut pengalaman, adakalanya USSD gagal dijalankan..
#metode restart adalah mengirim SMS dengan isi pesan 'reboot system <password> ke masing-masing modul openvox (3.3.3.2, 3.3.3.3, 3.3.3.4 & 3.3.3.5)'
#===============================================================================
echo "$currentTime - Restarting openvox..."
echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${telkomselNo[1]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${telkomselNo[5]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${telkomselNo[5]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${telkomselNo[17]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${XLNo[1]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${XLNo[5]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${XLNo[9]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${threeNo[1]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
#===============================================================================
#memberikan waktu untuk openvox kembali UP setelah di restart..
#===============================================================================
sleep 3m
#===============================================================================
#menghapus file known_host, tujuannya agar setiap kali ssh akan membuat RSA baru.. Jadi tidak ada kegagalan saat pertamakali SSH ke host tersebut..
#===============================================================================
echo $(rm -rf ~/.ssh/known_hosts)
#===============================================================================
#inisialisasi function command script cek pulsa
#===============================================================================
telkomselFx1()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[1]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[1]} ${telkomselCaraCekPulsa[1]}'")
}
telkomselFx2()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[2]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[2]} ${telkomselCaraCekPulsa[2]}'")
}
telkomselFx3()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[3]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[3]} ${telkomselCaraCekPulsa[3]}'")
}
telkomselFx4()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[4]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[4]} ${telkomselCaraCekPulsa[4]}'")
}
telkomselFx5()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[5]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[5]} ${telkomselCaraCekPulsa[5]}'")
}
telkomselFx6()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[6]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[6]} ${telkomselCaraCekPulsa[6]}'")
}
telkomselFx7()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[7]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[7]} ${telkomselCaraCekPulsa[7]}'")
}
telkomselFx8()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[8]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[8]} ${telkomselCaraCekPulsa[8]}'")
}
telkomselFx9()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[9]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[9]} ${telkomselCaraCekPulsa[9]}'")
}
telkomselFx10()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[10]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[10]} ${telkomselCaraCekPulsa[10]}'")
}
telkomselFx11()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[11]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[11]} ${telkomselCaraCekPulsa[11]}'")
}
telkomselFx12()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[12]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[12]} ${telkomselCaraCekPulsa[12]}'")
}
telkomselFx13()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[13]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[13]} ${telkomselCaraCekPulsa[13]}'")
}
telkomselFx14()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[14]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[14]} ${telkomselCaraCekPulsa[14]}'")
}
telkomselFx15()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[15]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[15]} ${telkomselCaraCekPulsa[15]}'")
}
telkomselFx16()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[16]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[16]} ${telkomselCaraCekPulsa[16]}'")
}
telkomselFx17()
{
echo $(rm -rf ~/.ssh/known_hosts)
telkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[17]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[17]} ${telkomselCaraCekPulsa[17]}'")
}
xlFx1()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[1]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[1]} ${XLCaraCekPulsa[1]}'")
}
xlFx2()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[2]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[2]} ${XLCaraCekPulsa[2]}'")
}
xlFx3()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[3]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[3]} ${XLCaraCekPulsa[3]}'")
}
xlFx4()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[4]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[4]} ${XLCaraCekPulsa[4]}'")
}
xlFx5()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[5]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[5]} ${XLCaraCekPulsa[5]}'")
}
xlFx6()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[6]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[6]} ${XLCaraCekPulsa[6]}'")
}
xlFx7()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[7]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[7]} ${XLCaraCekPulsa[7]}'")
}
xlFx8()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[8]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[8]} ${XLCaraCekPulsa[8]}'")
}
xlFx9()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[9]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[9]} ${XLCaraCekPulsa[9]}'")
}
xlFx10()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[10]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[10]} ${XLCaraCekPulsa[10]}'")
}
xlFx11()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[11]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[11]} ${XLCaraCekPulsa[11]}'")
}
xlFx12()
{
sleep 1m
echo $(rm -rf ~/.ssh/known_hosts)
xl=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[12]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[12]} ${XLCaraCekPulsa[12]}'")
}
# indosatFx1()
# {
# echo $(rm -rf ~/.ssh/known_hosts)
# indosat=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.4 -p12345 "asterisk -rx 'gsm send ussd 2 *555#'")
# }
# indosatFx2()
# {
# echo $(rm -rf ~/.ssh/known_hosts)
# indosat=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.4 -p12345 "asterisk -rx 'gsm send ussd 3 *555#'")
# }
# indosatFx3()
# {
# echo $(rm -rf ~/.ssh/known_hosts)
# indosat=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.4 -p12345 "asterisk -rx 'gsm send ussd 4 *555#'")
# }
threeFx1()
{
echo $(rm -rf ~/.ssh/known_hosts)
three=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${threeHost[1]} -p12345 "asterisk -rx 'gsm send ussd ${threeSpan[1]} ${threeCaraCekPulsa[1]}'")
}
threeFx2()
{
echo $(rm -rf ~/.ssh/known_hosts)
three=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${threeHost[2]} -p12345 "asterisk -rx 'gsm send ussd ${threeSpan[2]} ${threeCaraCekPulsa[2]}'")
}
threeFx3()
{
echo $(rm -rf ~/.ssh/known_hosts)
three=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${threeHost[3]} -p12345 "asterisk -rx 'gsm send ussd ${threeSpan[3]} ${threeCaraCekPulsa[3]}'")
}
renewalTelkomselFx1()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[1]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[1]} ${telkomselCaraAktivasi[1]}'")
}
renewalTelkomselFx2()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[2]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[2]} ${telkomselCaraAktivasi[2]}'")
}
renewalTelkomselFx3()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[3]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[3]} ${telkomselCaraAktivasi[3]}'")
}
renewalTelkomselFx4()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[4]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[4]} ${telkomselCaraAktivasi[4]}'")
}
renewalTelkomselFx5()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[5]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[5]} ${telkomselCaraAktivasi[5]}'")
}
renewalTelkomselFx6()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[6]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[6]} ${telkomselCaraAktivasi[6]}'")
}
renewalTelkomselFx7()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[7]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[7]} ${telkomselCaraAktivasi[7]}'")
}
renewalTelkomselFx8()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[8]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[8]} ${telkomselCaraAktivasi[8]}'")
}
renewalTelkomselFx9()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[9]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[9]} ${telkomselCaraAktivasi[9]}'")
}
renewalTelkomselFx10()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[10]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[10]} ${telkomselCaraAktivasi[10]}'")
}
renewalTelkomselFx11()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[11]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[11]} ${telkomselCaraAktivasi[11]}'")
}
renewalTelkomselFx12()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[12]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[12]} ${telkomselCaraAktivasi[12]}'")
}
renewalTelkomselFx13()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[13]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[13]} ${telkomselCaraAktivasi[13]}'")
}
renewalTelkomselFx14()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[14]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[14]} ${telkomselCaraAktivasi[14]}'")
}
renewalTelkomselFx15()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[15]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[15]} ${telkomselCaraAktivasi[15]}'")
}
renewalTelkomselFx16()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[16]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[16]} ${telkomselCaraAktivasi[16]}'")
}
renewalTelkomselFx17()
{
echo $(rm -rf ~/.ssh/known_hosts)
perpanjangTelkomsel=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[17]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[17]} ${telkomselCaraAktivasi[17]}'")
}
for (( i = 1; i <= 17; i++ )); do
telkomsel[$i]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${telkomselHost[$i]} -p12345 "asterisk -rx 'gsm send ussd ${telkomselSpan[$i]} ${telkomselCaraCekPulsa[$i]}'")
sleep 5s
done
for (( i = 1; i <= 14; i++ )); do
XL[$i]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${XLHost[$i]} -p12345 "asterisk -rx 'gsm send ussd ${XLSpan[$i]} ${XLCaraCekPulsa[$i]}'")
sleep 5s
done
# indosat[1]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.4 -p12345 "asterisk -rx 'gsm send ussd 2 *555#'")
# sleep 5s
# indosat[2]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.4 -p12345 "asterisk -rx 'gsm send ussd 3 *555#'")
# sleep 5s
# indosat[3]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.4 -p12345 "asterisk -rx 'gsm send ussd 4 *555#'")
# sleep 5s
for (( i = 1; i <= 3; i++ )); do
three[$i]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@${threeHost[$i]} -p12345 "asterisk -rx 'gsm send ussd ${threeSpan[$i]} ${threeCaraCekPulsa[$i]}'")
sleep 5s
done
numSimpati=1
numXl=1
numIndosat=1
numThree=1
maxAttempt=5
maxAttempt=$((maxAttempt+0))
# ==================================================================================================
# Simpati
# ==================================================================================================
for i in "${telkomselNo[@]}" #looping sebanyak jumlah variable array
do
#===============================================================================
#melakukan cek pulsa untuk masing-masing nomor pada slot openvox
#metodenya adalah SSH pada openvox dan menjalankan USSD pada asterisk di openvox
#===============================================================================
echo "$currentTime - ===================================================================================================="
echo "$currentTime - Checking Pulsa ${telkomselNama[$numSimpati]}..."
echo "$currentTime - ===================================================================================================="
cekString=${telkomsel[$numSimpati]:2:6} # mengecek respon dari openvox
cekString2=${telkomsel[$numSimpati]:49:4} # mengecek respon dari openvox
cekString3=${telkomsel[$numSimpati]:48:4} # mengecek respon dari openvox
echo "$currentTime - USSD REPLY : ${yellow}${telkomsel[$numSimpati]}${reset}"
if [ "$cekString" = "Recive" ] ; then #bila respon open = Recive
if [[ "$cekString2" != "Maaf" ]] || [[ "$cekString3" != "Maaf" ]]; then
echo "$currentTime - ${green}${telkomselNama[$numSimpati]} Cek Berhasil...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
USSDReplyTelkomsel[$numSimpati]="${telkomsel[$numSimpati]}"
telkomsel[$numSimpati]=${telkomsel[$numSimpati]:62:6} #mengambil character yang bernilai jumlah pulsa
telkomsel[$numSimpati]=${telkomsel[$numSimpati]//[.Aktif]/} #mengabaikan character lain selain angka
telkomsel[$numSimpati]=$((telkomsel[$numSimpati] + 0)) #merubah variable yang semula string menjadi integer
echo "$currentTime - ${green}Sisa pulsa ${telkomselNama[$numSimpati]} : ${telkomsel[$numSimpati]}${reset}"
#===============================================================================
#memasukan nilai cek pulsa (pulsa) kedalam database
#===============================================================================
sisaPulsaTelkomsel[$numSimpati]=${telkomsel[$numSimpati]}
if [[ ${telkomsel[$numSimpati]} -lt ${telkomselHargaPaket[$numSimpati]} ]]; then #mengecek jika pulsa kurang dari harga paket masing-masing provider
echo "$currentTime - Kirim Slack ke PIKArin, minta isi pulsa Telkomsel - ${telkomselNo[$numSimpati]}"
#insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : ${telkomselNo[$numSimpati]}, sisa pulsa: (${telkomsel[$numSimpati]}), harga paket: ${telkomselHargaPaket[$numSimpati]}, Exp Date Paket: ${telkomselExpDatePaket[$numSimpati]}', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
slackText="Simpati No : $i,\nSisa Pulsa: ${telkomsel[$numSimpati]},\nHarga Paket: ${telkomselHargaPaket[$numSimpati]},\nExp Date Paket: ${telkomselExpDatePaket[$numSimpati]}"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"$slackText"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI"'"}' https://hooks.slack.com/services/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
fi
else
attempt=1
attempt=$((attempt + 0))
cekBerhasil=""
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Cek Gagal...${reset}"
echo "----------------------------------------------"
while [[ $attempt -le $maxAttempt && "$cekBerhasil" != "berhasil" ]]; do
echo "$currentTime - ${telkomselNama[$numSimpati]} percobaan ke-$attempt"
telkomselFx$numSimpati
cekString=${telkomsel:2:6}
cekString2=${telkomsel:49:4}
cekString3=${telkomsel:49:4}
echo "$currentTime - USSD REPLY : ${yellow}$telkomsel${reset}"
USSDReplyTelkomsel[$numSimpati]="$telkomsel"
if [ "$cekString" = "Recive" ]; then
if [[ "$cekString2" != "Maaf" ]] || [[ "$cekString3" != "Maaf" ]]; then
echo "$currentTime - ${green}${telkomselNama[$numSimpati]} Cek Berhasil...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
cekBerhasil="berhasil"
attempt=$((attempt + 3))
telkomsel=${telkomsel:62:6}
telkomsel=${telkomsel//[.Aktif]/}
telkomsel=$((telkomsel + 0))
echo "$currentTime - ${green}Sisa pulsa }${telkomselNama[$numSimpati]} : $telkomsel${reset}"
#===============================================================================
#memasukan nilai cek pulsa (pulsa) kedalam database
#===============================================================================
sisaPulsaTelkomsel[$numSimpati]=$telkomsel
if [[ $telkomsel -lt ${telkomselHargaPaket[$numSimpati]} ]]; then
echo "$currentTime - Kirim SMS ke PIKArin, minta isi pulsa Telkomsel - ${telkomselNo[$numSimpati]}"
#insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : ${telkomselNo[$numSimpati]}, sisa pulsa: ($telkomsel), harga paket: ${telkomselHargaPaket[$numSimpati]}, Exp Date Paket: ${telkomselExpDatePaket[$numSimpati]}', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
slackText="Simpati No : $i,\nSisa Pulsa: Sisa Pulsa: $telkomsel,\nHarga Paket: ${telkomselHargaPaket[$numSimpati]},\nExp Date Paket: ${telkomselExpDatePaket[$numSimpati]}"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"$slackText"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI"'"}' https://hooks.slack.com/services/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
fi
else
cekBerhasil="gagal"
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Cek Gagal...${reset}"
echo "----------------------------------------------"
attempt=$((attempt + 1))
if [[ $attempt == $maxAttempt ]]; then
#===============================================================================
#jika cek gagal,, tetap diinsert dengan nilai 0
#===============================================================================
sisaPulsaTelkomsel[$numSimpati]=0
fi
fi
else
cekBerhasil="gagal"
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Cek Gagal...${reset}"
echo "----------------------------------------------"
attempt=$((attempt + 1))
if [[ $attempt == $maxAttempt ]]; then
#===============================================================================
#jika cek gagal,, tetap diinsert dengan nilai 0
#===============================================================================
sisaPulsaTelkomsel[$numSimpati]=0
fi
fi
done
fi
else
attempt=1
attempt=$((attempt + 0))
cekBerhasil=""
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Cek Gagal...${reset}"
echo "----------------------------------------------"
while [[ $attempt -le $maxAttempt && "$cekBerhasil" != "berhasil" ]]; do
echo "$currentTime - ${telkomselNama[$numSimpati]} percobaan ke-$attempt"
telkomselFx$numSimpati
cekString=${telkomsel:2:6}
cekString2=${telkomsel:49:4}
cekString3=${telkomsel:49:4}
echo "$currentTime - USSD REPLY : ${yellow}$telkomsel${reset}"
USSDReplyTelkomsel[$numSimpati]="$telkomsel"
if [ "$cekString" = "Recive" ]; then
if [[ "$cekString2" != "Maaf" ]] || [[ "$cekString3" != "Maaf" ]]; then
echo "$currentTime - ${green}${telkomselNama[$numSimpati]} Cek Berhasil...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
cekBerhasil="berhasil"
attempt=$((attempt + 3))
telkomsel=${telkomsel:62:6}
telkomsel=${telkomsel//[.Aktif]/}
telkomsel=$((telkomsel + 0))
echo "$currentTime - ${green}Sisa pulsa }${telkomselNama[$numSimpati]} : $telkomsel${reset}"
#===============================================================================
#memasukan nilai cek pulsa (pulsa) kedalam database
#===============================================================================
sisaPulsaTelkomsel[$numSimpati]=$telkomsel
if [[ $telkomsel -lt ${telkomselHargaPaket[$numSimpati]} ]]; then
echo "$currentTime - Kirim SMS ke PIKArin, minta isi pulsa Telkomsel - ${telkomselNo[$numSimpati]}"
#insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : ${telkomselNo[$numSimpati]}, sisa pulsa: ($telkomsel), harga paket: ${telkomselHargaPaket[$numSimpati]}, Exp Date Paket: ${telkomselExpDatePaket[$numSimpati]}', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
slackText="Simpati No : $i,\nSisa Pulsa: Sisa Pulsa: $telkomsel,\nHarga Paket: ${telkomselHargaPaket[$numSimpati]},\nExp Date Paket: ${telkomselExpDatePaket[$numSimpati]}"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"$slackText"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI"'"}' https://hooks.slack.com/services/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
fi
else
cekBerhasil="gagal"
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Cek Gagal...${reset}"
echo "----------------------------------------------"
attempt=$((attempt + 1))
if [[ $attempt == $maxAttempt ]]; then
#===============================================================================
#jika cek gagal,, tetap diinsert dengan nilai 0
#===============================================================================
sisaPulsaTelkomsel[$numSimpati]=0
fi
fi
else
cekBerhasil="gagal"
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Cek Gagal...${reset}"
echo "----------------------------------------------"
attempt=$((attempt + 1))
if [[ $attempt == $maxAttempt ]]; then
#===============================================================================
#jika cek gagal,, tetap diinsert dengan nilai 0
#===============================================================================
sisaPulsaTelkomsel[$numSimpati]=0
fi
fi
done
fi
echo "$currentTime - ${green}+++++++++++++++++++++++ CHECKING ${telkomselNama[$numSimpati]} FINISHED+++++++++++++++++++++${reset}"
if [[ $NOW -ge ${telkomselExpDatePaket[$numSimpati]} ]]; then
if [[ ${sisaPulsaTelkomsel[$numSimpati]} -lt ${telkomselHargaPaket[$numSimpati]} ]]; then
echo "$currentTime - ===================================================================================================="
echo "$currentTime - Perpanjang Paket ${telkomselNama[$numSimpati]}..."
echo "$currentTime - ===================================================================================================="
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Gagal Perpanjang... Pulsa tidak cukup..${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
textNotifikasiTelkomsel[$numSimpati]="${telkomselNama[$numSimpati]} perpanjang paket gagal, pulsa tidak cukup untuk perpanjang paket.. \nSisa Pulsa : ${sisaPulsaTelkomsel[$numSimpati]}"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"${textNotifikasiTelkomsel[$numSimpati]}"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI2"'"}' https://hooks.slack.com/services/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
else
echo "$currentTime - ===================================================================================================="
echo "$currentTime - Perpanjang Paket ${telkomselNama[$numSimpati]}..."
echo "$currentTime - ===================================================================================================="
# ===============================================================================
# menentukan tanggal baru untuk tanggal habis paket selanjutnya
# ===============================================================================
newDate=$(date -d "6 days" +%Y-%m-%d)
# ===============================================================================
# Memanggil funtion
# ===============================================================================
renewalTelkomselFx$numSimpati
cekString=${perpanjangTelkomsel:2:6} # mengecek respon dari openvox
cekString2=${perpanjangTelkomsel:48:4} # mengecek respon dari openvox
echo "$currentTime - USSD REPLY${yellow}$perpanjangTelkomsel${reset}"
if [[ "$cekString" == "Recive" ]] && [[ "$cekString2" != "maaf" ]]; then #bila respon openvox = Recive
echo "$currentTime - ${green}${telkomselNama[$numSimpati]} Berhasil Perpanjang...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
# ===============================================================================
# mengirim sms ke admin, kalo baru saja paket diperpanjang.. tujuannya agar admin memastikan perpanjangan berjalan sesuai dengan seharusnya
# ===============================================================================
echo "$currentTime - ${green}Kirim SMS ke Admin, ngasih tau kalo ${telkomselNama[$numSimpati]} baru aja perpanjang paket.. ${reset}"
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGKETIK', '${telkomselNama[$numSimpati]} perpanjang paket berhasil.. USSD REPLY : $perpanjangTelkomsel', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
textNotifikasiTelkomsel[$numSimpati]="${telkomselNama[$numSimpati]} perpanjang paket berhasil.. \nUSSD REPLY : $perpanjangTelkomsel"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"${textNotifikasiTelkomsel[$numSimpati]}"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI2"'"}' https://hooks.slack.com/services/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
# ===============================================================================
# jika berhasil maka tanggal exp date akan diupdate
# ===============================================================================
mysql -h1.1.1.200 -uroot -pc3rmat dbpulsa -e "update provider set expDatePaket = '$newDate' where namaProvider = '${telkomselNama[$numSimpati]}';"
else
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Gagal Perpanjang...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
attempt=1
attempt=$((attempt + 0))
while [[ $attempt -le $maxAttempt && "$cekBerhasil" != "berhasil" ]]; do
echo "$currentTime - ${telkomselNama[$numSimpati]} percobaan ke-$attempt"
renewalTelkomselFx$numSimpati
cekString=${perpanjangTelkomsel:2:6}
echo "$currentTime - USSD REPLY : ${yellow}$perpanjangTelkomsel${reset}"
if [ "$cekString" = "Recive" ]; then
echo "$currentTime - ${green}${telkomselNama[$numSimpati]} Berhasil Perpanjang...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
# ===============================================================================
# mengirim sms ke admin
# ===============================================================================
echo "$currentTime - ${green}Kirim SMS ke Admin, ngasih tau kalo ${telkomselNama[$numSimpati]} baru aja perpanjang paket.. ${reset}"
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGKETIK', '${telkomselNama[$numSimpati]} perpanjang paket berhasil setelah percobaan ke-$attempt.. USSD REPLY : $perpanjangTelkomsel', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
textNotifikasiTelkomsel[$numSimpati]="${telkomselNama[$numSimpati]} perpanjang paket berhasil setelah percobaan ke-$attempt.. \nUSSD REPLY : $perpanjangTelkomsel"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"${textNotifikasiTelkomsel[$numSimpati]}"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI2"'"}' https://hooks.slack.com/services/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
# ===============================================================================
# jika berhasil maka tanggal exp date akan diupdate
# ===============================================================================
mysql -h1.1.1.200 -uroot -pc3rmat dbpulsa -e "update provider set expDatePaket = '$newDate' where namaProvider = '${telkomselNama[$numSimpati]}';"
cekBerhasil="berhasil"
attempt=$((attempt + 3))
else
cekBerhasil="gagal"
echo "$currentTime - ${red}${telkomselNama[$numSimpati]} Gagal Perpanjang...${reset}"
echo "$currentTime - ----------------------------------------------"
attempt=$((attempt + 1))
sleep 5s
if [[ $attempt == $maxAttempt ]]; then
# ===============================================================================
# mengirim sms ke admin
# ===============================================================================
echo "$currentTime - ${green}Kirim SMS ke Admin, ngasih tau kalo ${telkomselNama[$numSimpati]} baru aja perpanjang paket.. ${reset}"
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGKETIK', '${telkomselNama[$numSimpati]} perpanjang paket gagal.. USSD REPLY : $perpanjangTelkomsel', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
textNotifikasiTelkomsel[$numSimpati]="${telkomselNama[$numSimpati]} perpanjang paket gagal.. \nUSSD REPLY : $perpanjangTelkomsel"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"${textNotifikasiTelkomsel[$numSimpati]}"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI2"'"}' https://hooks.slack.com/services/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
fi
fi
done
fi
fi
echo "$currentTime - ${green}+++++++++++++++++++++++ RENEWAL ${telkomselNama[$numSimpati]} FINISHED+++++++++++++++++++++${reset}"
fi
#===============================================================================
#memasukan nilai cek pulsa kedalam database
#===============================================================================
echo "INSERT INTO pulsa (namaProvider, sisaPulsa, tanggal, ussdReply) VALUES ('${telkomselNama[$numSimpati]}', '${sisaPulsaTelkomsel[$numSimpati]}', '$mysqlDateNow', '${USSDReplyTelkomsel[$numSimpati]}');"| mysql -h$HOST -u$USER -p$PASSWORD dbpulsa
numSimpati=$((numSimpati + 1))
done
# #alert Paket Habis Simpati
# #===============================================================================
# #mengecek apakah tanggal habis paket >= hari ini
# #===============================================================================
# if [[ $NOW -ge $NEXT_UPDATE_SIMPATI ]]; then
# echo "$currentTime - ===================================================================================================="
# echo "$currentTime - Perpanjang Paket Telkomsel"
# echo "$currentTime - ===================================================================================================="
# # ===============================================================================
# # menentukan tanggal baru untuk tanggal habis paket selanjutnya
# # ===============================================================================
# newDate=$(date -d "6 days" +%Y%m%d)
# # ===============================================================================
# # mengirim sms ke admin, kalo baru saja paket diperpanjang.. tujuannya agar admin make sure perpanjangan berjalan sesuai dengan seharusnya
# # ===============================================================================
# echo "$currentTime - ${green}Kirim SMS ke Admin, ngasih tau kalo Telkomsel baru aja perpanjang paket.. ${reset}"
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGKETIK', 'Telkomsel perpanjang paket... coba cek..!!!', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
# # ===============================================================================
# # restarting openvox lagi, alasannya untuk memastikan tidak ada kegagalan saat mengirimkan request ke openvox
# # ===============================================================================
# echo "$currentTime - Restarting openvox..."
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${TELKOMSEL[0]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${TELKOMSEL[5]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('${TELKOMSEL[8]}', 'reboot system c3rmat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
# sleep 3m
# #===============================================================================
# # Perpanjang Paket Via USSD
# #===============================================================================
# echo "$currentTime - Kirim USSD Perpanjang Paket Simpati"
# echo $(rm -rf ~/.ssh/known_hosts)
# renewalTelkomsel[1]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.2 -p12345 "asterisk -rx 'gsm send ussd 1 *999*4*2*1*1#'")
# sleep 5s
# renewalTelkomsel[2]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.2 -p12345 "asterisk -rx 'gsm send ussd 2 *999*4*2*1*1#'")
# sleep 5s
# renewalTelkomsel[3]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.2 -p12345 "asterisk -rx 'gsm send ussd 3 *999*4*2*1*1#'")
# sleep 5s
# renewalTelkomsel[4]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.2 -p12345 "asterisk -rx 'gsm send ussd 4 *999*4*2*1*1#'")
# sleep 5s
# renewalTelkomsel[5]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.3 -p12345 "asterisk -rx 'gsm send ussd 1 *999*4*3*1*1#'")
# sleep 5s
# renewalTelkomsel[6]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.3 -p12345 "asterisk -rx 'gsm send ussd 2 *999*4*2*1*1#'")
# sleep 5s
# renewalTelkomsel[7]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.3 -p12345 "asterisk -rx 'gsm send ussd 3 *999*4*2*1*1#'")
# sleep 5s
# renewalTelkomsel[8]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.3 -p12345 "asterisk -rx 'gsm send ussd 4 *999*4*2*1*1#'")
# sleep 5s
# renewalTelkomsel[9]=$(sshpass -padmin ssh -o StrictHostKeyChecking=no admin@3.3.3.4 -p12345 "asterisk -rx 'gsm send ussd 1 *999*4*2*1*1#'")
# numRenewal=1
# for i in "${renewalTelkomsel[@]}" #looping sebanyak jumlah variable array
# do
# cekString=${renewalTelkomsel[$numRenewal]:2:6} # mengecek respon dari openvox
# echo "$currentTime - USSD REPLY${yellow}${renewalTelkomsel[$numRenewal]}${reset}"
# if [ "$cekString" = "Recive" ]; then #bila respon openvox = Recive
# echo "$currentTime - ${green}Simpati$numRenewal Berhasil Perpanjang...${reset}"
# echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
# # ===============================================================================
# # jika tanggal habis paket adalah >= hari ini, maka paket diperpanjang selama panjangnya masa berlaku paket
# # ===============================================================================
# echo "$newDate">paketHabisSimpati.txt
# else
# echo "$currentTime - ${red}Simpati$numRenewal Gagal Perpanjang...${reset}"
# echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
# attempt=1
# attempt=$((attempt + 0))
# while [[ $attempt -le $maxAttempt && "$cekBerhasil" != "berhasil" ]]; do
# echo "$currentTime - ${telkomselNama[$numSimpati]} percobaan ke-$attempt"
# renewalTelkomsel$numRenewalFx
# cekString=${perpanjangTelkomsel:2:6}
# echo "$currentTime - USSD REPLY : ${yellow}$perpanjangTelkomsel${reset}"
# if [ "$cekString" = "Recive" ]; then
# echo "$currentTime - ${green}Simpati$numRenewal Berhasil Perpanjang...${reset}"
# echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
# cekBerhasil="berhasil"
# attempt=$((attempt + 3))
# else
# cekBerhasil="gagal"
# echo "$currentTime - ${red}Simpati$numRenewal Gagal Perpanjang...${reset}"
# echo "$currentTime - ----------------------------------------------"
# attempt=$((attempt + 1))
# sleep 5s
# fi
# done
# fi
# numRenewal=$((numRenewal + 1))
# done
# fi
# ==================================================================================================
# XL
# ==================================================================================================
for i in "${XLNo[@]}" #looping sebanyak jumlah variable array
do
#===============================================================================
#melakukan cek pulsa untuk masing-masing nomor pada slot openvox
#metodenya adalah SSH pada openvox dan menjalankan USSD pada asterisk di openvox
#===============================================================================
echo "$currentTime - ===================================================================================================="
echo "$currentTime - Checking Pulsa ${XLNama[$numXl]}..."
echo "$currentTime - ===================================================================================================="
cekString=${XL[$numXl]:2:6} # mengecek respon dari openvox
echo "$currentTime - USSD REPLY : ${yellow}${XL[$numXl]}${reset}"
if [ "$cekString" = "Recive" ]; then #bila respon open = Recive
echo "$currentTime - ${green}${XLNama[$numXl]} Cek Berhasil...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
USSDReplyXL[$numXl]="${XL[$numXl]}"
XL[$numXl]=${XL[$numXl]:55:6} #mengambil character yang bernilai jumlah pulsa
XL[$numXl]=${XL[$numXl]//[ . sd\/ ]/} #mengabaikan character lain selain angka
XL[$numXl]=$((XL[$numXl] + 0)) #merubah variable yang semula string menjadi integer
echo "$currentTime - ${green}Sisa Pulsa : ${XL[$numXl]}${reset}"
#===============================================================================
#memasukan nilai cek pulsa (pulsa) kedalam database
#===============================================================================
sisaPulsaXL[$numXl]=${XL[$numXl]}
if [[ ${XL[$numXl]} -lt ${XLHargaPaket[$numXl]} ]]; then #mengecek jika pulsa kurang dari harga paket masing-masing provider
echo "$currentTime - Kirim SMS ke PIKArin, minta isi pulsa XL - ${XLNo[$numXl]}"
#insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : XL $i, sisa pulsa: (${XL[$numXl]}), harga paket: ${XLHargaPaket[$numXl]}, Exp Date Paket: ${XLExpDatePaket[$numXl]}', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
# textMintaPulsaXL[$numXl]="XL No : $i, Sisa Pulsa: ${XL[$numXl]}, Harga Paket: ${XLHargaPaket[$numXl]}, Exp Date Paket: ${XLExpDatePaket[$numXl]}"
slackText="XL No : $i,\nSisa Pulsa: ${XL[$numXl]},\nHarga Paket: ${XLHargaPaket[$numXl]},\nExp Date Paket: ${XLExpDatePaket[$numXl]}"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"$slackText"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI"'"}' https://hooks.slack.com/services/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
fi
else
attempt=1
attempt=$((attempt + 0))
cekBerhasil=""
echo "$currentTime - ${red}${XLNama[$numXl]} Cek Gagal...${reset}"
echo "$currentTime - ----------------------------------------------"
while [[ $attempt -le $maxAttempt && "$cekBerhasil" != "berhasil" ]]; do
echo "$currentTime - ${XLNama[$numXl]} percobaan ke-$attempt"
xlFx$numXl
cekString=${xl:2:6}
echo "$currentTime - USSD REPLY : ${yellow}$xl${reset}"
USSDReplyXL[$numXl]="$xl"
if [[ "$cekString" = "Recive" ]]; then
echo "$currentTime - ${green}${XLNama[$numXl]} Cek Berhasil...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
cekBerhasil="berhasil"
attempt=$((attempt + 3))
xl=${xl:55:6}
xl=${xl//[ . sd\/ ]/}
xl=$((xl + 0))
echo "$currentTime - ${green}Sisa Pulsa : $xl${reset}"
#===============================================================================
#memasukan nilai cek pulsa (pulsa) kedalam database
#===============================================================================
sisaPulsaXL[$numXl]=$xl
if [[ $xl -lt ${XLHargaPaket[$numXl]} ]]; then
echo "$currentTime - Kirim SMS ke PIKArin, minta isi pulsa XL - ${XLNo[$numXl]}"
#insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : XL $i, sisa pulsa: ($xl), harga paket: ${XLHargaPaket[$numXl]}, Exp Date Paket: ${XLExpDatePaket[$numXl]}', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
slackText="XL No : $i,\nSisa Pulsa: $xl,\nHarga Paket: ${XLHargaPaket[$numXl]},\nExp Date Paket: ${XLExpDatePaket[$numXl]}"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"$slackText"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI"'"}' https://hooks.slack.com/services/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
fi
else
cekBerhasil="gagal"
echo "$currentTime - ${red}${XLNama[$numXl]} Cek Gagal...${reset}"
echo "$currentTime - ----------------------------------------------"
attempt=$((attempt + 1))
if [[ $attempt == $maxAttempt ]]; then
#===============================================================================
#jika cek gagal,, tetap diinsert dengan nilai "-"
#===============================================================================
sisaPulsaXL[$numXl]=0
fi
fi
done
fi
echo "$currentTime - ${yellow}+++++++++++++++++++++++ CHECKING ${XLNama[$numXl]} FINISHED+++++++++++++++++++++${reset}"
#===============================================================================
#memasukan nilai cek pulsa dan paket kedalam database
#===============================================================================
echo "INSERT INTO pulsa (namaProvider, sisaPulsa, tanggal,ussdReply) VALUES ('${XLNama[$numXl]}', '${sisaPulsaXL[$numXl]}', '$mysqlDateNow', '${USSDReplyXL[$numXl]}');"| mysql -h$HOST -u$USER -p$PASSWORD dbpulsa
numXl=$((numXl + 1))
done
# # ==================================================================================================
# # INDOSAT
# # ==================================================================================================
# for i in "${indosat[@]}" #looping sebanyak jumlah variable array
# do
# textMintaPulsa[$numIndosat]="Mentari 100.000 : ${INDOSAT[$((numIndosat-1))]}"
# #===============================================================================
# #melakukan cek pulsa untuk masing-masing nomor pada slot openvox
# #metodenya adalah SSH pada openvox dan menjalankan USSD pada asterisk di openvox
# #===============================================================================
# echo "$currentTime - ===================================================================================================="
# echo "$currentTime - Checking Pulsa INDOSAT$numIndosat..."
# echo "$currentTime - ===================================================================================================="
# cekString=${indosat[$numIndosat]:2:6} # mengecek respon dari openvox
# cekString2=${indosat[$numIndosat]:49:10} # mengecek respon dari openvox
# echo "$currentTime - USSD REPLY : ${yellow}${indosat[$numIndosat]}${reset}"
# if [ "$cekString" = "Recive" ] && [ "$cekString2" = "PulsaUTAMA" ]; then #bila respon open = Recive
# echo "$currentTime - ${green}INDOSAT$numIndosat Cek Berhasil...${reset}"
# echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
# indosat[$numIndosat]=${indosat[$numIndosat]:63:6} #mengambil character yang bernilai jumlah pulsa
# indosat[$numIndosat]=${indosat[$numIndosat]//[. Aktif]/} #mengabaikan character lain selain angka
# indosat[$numIndosat]=$((indosat[$numIndosat] + 0)) #merubah variable yang semula string menjadi integer
# echo "$currentTime - ${green}Sisa Pulsa INDOSAT$numIndosat : ${indosat[$numIndosat]}${reset}"
# # jsonIndosat$numIndosat="{namaProvider:\"Indosat$numIndosat\", sisaPulsa:\"${indosat[$numIndosat]}\", tanggal: \"$mysqlDateNow\"}"
# echo "INSERT INTO pulsa (namaProvider, sisaPulsa, tanggal) VALUES ('Indosat$numIndosat', '${indosat[$numIndosat]}', '$mysqlDateNow');"| mysql -h$HOST -u$USER -p$PASSWORD dbpulsa
# if [[ ${indosat[$numIndosat]} -lt $HARGA_PAKET_INDOSAT ]]; then #mengecek jika pulsa kurang dari harga paket masing-masing provider
# echo "$currentTime - Kirim SMS ke PIKArin, minta isi pulsa INDOSAT - ${INDOSAT[$((numIndosat-1))]}"
# #insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : ${textMintaPulsa[$numIndosat]}, sisa pulsa: (${indosat[$numIndosat]})', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
# fi
# else
# attempt=1
# attempt=$((attempt + 0))
# cekBerhasil=""
# echo "$currentTime - ${red}INDOSAT$numIndosat Cek Gagal...${reset}"
# while [[ $attempt -le $maxAttempt && "$cekBerhasil" != "berhasil" ]]; do
# echo "$currentTime - INDOSAT$numIndosat percobaan ke-$attempt"
# indosatFx$numIndosat
# cekString=${indosat:2:6}
# cekString2=${indosat:49:10}
# echo "$currentTime - USSD REPLY : ${yellow}$indosat${reset}"
# if [ "$cekString" = "Recive" ] && [ "$cekString2" = "PulsaUTAMA" ]; then
# echo "$currentTime - ${green}INDOSAT$numIndosat Cek Berhasil...${reset}"
# echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
# cekBerhasil="berhasil"
# attempt=$((attempt + 3))
# indosat=${indosat:63:6}
# indosat=${indosat//[. Aktif]/}
# indosat=$((indosat + 0))
# echo "$currentTime - ${green}Sisa Pulsa INDOSAT$numIndosat : $indosat${reset}"
# # jsonIndosat$numIndosat="{namaProvider:\"Indosat$numIndosat\", sisaPulsa:\"$indosat\", tanggal: \"$mysqlDateNow\"}"
# echo "INSERT INTO pulsa (namaProvider, sisaPulsa, tanggal) VALUES ('Indosat$numIndosat', '$indosat', '$mysqlDateNow');"| mysql -h$HOST -u$USER -p$PASSWORD dbpulsa
# if [[ ${indosat} -lt $HARGA_PAKET_SIMPATI ]]; then
# echo "$currentTime - Kirim SMS ke PIKArin, minta isi pulsa INDOSAT - ${INDOSAT[$((numIndosat-1))]}"
# #insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : ${textMintaPulsa[$numIndosat]}, sisa pulsa: $indosat', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
# fi
# else
# cekBerhasil="gagal"
# echo "$currentTime - ${red}INDOSAT$numIndosat Cek Gagal...${reset}"
# echo "$currentTime - ----------------------------------------------"
# attempt=$((attempt + 1))
# if [[ $attempt == $maxAttempt ]]; then
# # jsonIndosat$numIndosat="{namaProvider:\"Indosat$numIndosat\", sisaPulsa:"-", tanggal: \"$mysqlDateNow\"}"
# echo "INSERT INTO pulsa (namaProvider, sisaPulsa, tanggal) VALUES ('Indosat$numIndosat', '-', '$mysqlDateNow');"| mysql -h$HOST -u$USER -p$PASSWORD dbpulsa
# fi
# fi
# done
# fi
# echo "$currentTime - ${yellow}+++++++++++++++++++++++ CHECKING INDOSAT$numIndosat FINISHED+++++++++++++++++++++${reset}"
# numIndosat=$((numIndosat + 1))
# done
# ==================================================================================================
# THREE
# ==================================================================================================
for i in "${threeNo[@]}" #looping sebanyak jumlah variable array
do
#===============================================================================
#melakukan cek pulsa untuk masing-masing nomor pada slot openvox
#metodenya adalah SSH pada openvox dan menjalankan USSD pada asterisk di openvox
#===============================================================================
echo "$currentTime - ===================================================================================================="
echo "$currentTime - Checking Pulsa ${threeNama[$numThree]}..."
echo "$currentTime - ===================================================================================================="
cekString=${three[$numThree]:2:6} # mengecek respon dari openvox
cekString2=${three[$numThree]:74:3}
echo "$currentTime - USSD REPLY : ${yellow}${three[$numThree]}${reset}"
if [ "$cekString" = "Recive" ] && [ "$cekString2" = "Pul" ]; then #bila respon open = Recive
echo "$currentTime - ${green}${threeNama[$numThree]} Cek Berhasil...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
USSDReplyThree[$numThree]="${three[$numThree]}"
three[$numThree]=${three[$numThree]:82:6} #mengambil character yang bernilai jumlah pulsa
three[$numThree]=${three[$numThree]//[,Bonus]/} #mengabaikan character lain selain angka
three[$numThree]=$((three[$numThree] + 0)) #merubah variable yang semula string menjadi integer
echo "$currentTime - ${green}Sisa Pulsa ${threeNama[$numThree]} : ${three[$numThree]}${reset}"
sisaPulsaThree[$numThree]=${three[$numThree]}
if [[ ${three[$numThree]} -lt ${threeHargaPaket[$numThree]} ]]; then #mengecek jika pulsa kurang dari harga paket masing-masing provider
echo "$currentTime - Kirim SMS ke PIKArin, minta isi pulsa THREE - $i"
#insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : Three $i, sisa pulsa: (${three[$numThree]}), harga paket: ${threeHargaPaket[$numThree]}, Exp Date Paket: Hari ini Jam 23:59', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
slackText="Three No : $i,\nSisa Pulsa: ${three[$numThree]},\nHarga Paket: ${threeHargaPaket[$numThree]},\nExp Date Paket: Hari ini Jam 23:59"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"$slackText"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI"'"}' https://hooks.slack.com/services/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
fi
else
attempt=1
attempt=$((attempt + 0))
cekBerhasil=""
echo "$currentTime - ${red}${threeNama[$numThree]} Cek Gagal...${reset}"
while [[ $attempt -le $maxAttempt && "$cekBerhasil" != "berhasil" ]]; do
echo "$currentTime - ${threeNama[$numThree]} percobaan ke-$attempt"
threeFx$numThree
cekString=${three:2:6}
cekString2=${three:74:3}
echo "$currentTime - USSD REPLY : ${yellow}$three${reset}"
USSDReplyThree[$numThree]="$three"
if [ "$cekString" = "Recive" ] && [ "$cekString2" = "Pul" ]; then
echo "$currentTime - ${green}${threeNama[$numThree]} Cek Berhasil...${reset}"
echo "$currentTime - -------------------------------------------------------------------------------------------------------------"
cekBerhasil="berhasil"
attempt=$((attempt + 3))
three=${three:82:6}
three=${three//[,Bonus]/}
three=$((three + 0))
echo "$currentTime - ${green}Sisa Pulsa ${threeNama[$numThree]} : $three${reset}"
sisaPulsaThree[$numThree]=$three
if [[ $three -lt ${threeHargaPaket[$numThree]} ]]; then
echo "$currentTime - Kirim SMS ke PIKArin, minta isi pulsa THREE - $i"
#insert ke database sms untuk mengirim pulsa ke tukang pulsa
# echo "INSERT INTO outbox (DestinationNumber, TextDecoded, CreatorID) VALUES ('$TUKANGPULSA', 'Pikaa ~~ Minta pulsa : Three $i, sisa pulsa: ($three), harga paket: ${threeHargaPaket[$numThree]}, Exp Date Paket: Hari ini Jam 23:59', 'BashAdmin');"| mysql -h$HOST -u$USER -p$PASSWORD sms
slackText="Three No : $i,\nSisa Pulsa: $three,\nHarga Paket: ${threeHargaPaket[$numThree]},\nExp Date Paket: Hari ini Jam 23:59"
curl -X POST -H 'Content-type: application/json' --data '{"text": "```'"$slackText"'```", "channel": "'"$CHANNEL"'", "username": "'"$USERNAME"'", "icon_emoji": "'"$ICONEMOJI"'"}' https://hooks.slack.com/services/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
fi
else
cekBerhasil="gagal"
echo "$currentTime - ${red}${threeNama[$numThree]} Cek Gagal...${reset}"
echo "$currentTime - ----------------------------------------------"
attempt=$((attempt + 1))
if [[ $attempt == $maxAttempt ]]; then
sisaPulsaThree[$numThree]=0
fi
fi
done
fi
echo "$currentTime - ${yellow}+++++++++++++++++++++++ CHECKING ${threeNama[$numThree]} FINISHED+++++++++++++++++++++${reset}"
#===============================================================================
#memasukan nilai cek pulsa dan paket kedalam database
#===============================================================================
echo "INSERT INTO pulsa (namaProvider, sisaPulsa, tanggal, ussdReply) VALUES ('${threeNama[$numThree]}', '${sisaPulsaThree[$numThree]}', '$mysqlDateNow', '${USSDReplyThree[$numThree]}');"| mysql -h$HOST -u$USER -p$PASSWORD dbpulsa
numThree=$((numThree + 1))
done | true |
dc92c726168d221bb2c7dc94984b10e11f220a5c | Shell | btk15049/home | /util/gg.sh | SHIFT_JIS | 8,227 | 4.125 | 4 | [] | no_license | #!/bin/bash
red=31
green=32
yellow=33
blue=36
on_console=0
use_file=1
use_directory=2
use_clipboard=3
on=0
off=1
#Ftecho
function cecho {
local color="${1}"
local msg="${@:2}"
printf "\033[${color}m${msg}\033[m\n"
}
#wt@C݂ȂȂxtrueԂ
function file_none {
local file_name="${1}"
if [ ! -f $file_name ];then
cecho $red "${file_name} is not found."
return 0
fi
return 1
}
#̃t@CȂxtrueԂ
function file_empty {
local file_name="${1}"
if [ ! -s $file_name ];then
cecho $red "${file_name} is empty."
return 0
fi
return 1
}
function file_set {
if [ $# -eq 0 ]; then
usage
return 1
fi
touch .cww
rm .cww
local files=("$@")
local files_num=`expr $#`
for ((i=0;i<$files_num;i++))
do
files[i]=`echo ${files[i]} | cut -d"." -f1`
echo ${files[i]} >>.cww
file_none ${files[i]}.cpp && cp ~/home/template.cpp ${files[i]}.cpp && cecho $blue "create ${files[i]}.cpp"
done
return 0
}
function build {
file_none .cww && cecho $red "use set subcommand." && return 1
file_empty .cww && cecho $red "use set subcommand." && return 1
IFS=$'\n'
local files=(`cat .cww`)
IFS=$' '
local files_num=${#files[@]}
name=${files[0]}
for ((i=0;i<$files_num;i++))
do
files[i]="${files[i]}.cpp"
done
touch log
cecho $blue "g++ -O3 -std=c++11 ${files[@]} -o $name 2>log"
g++ -O3 -std=c++11 ${files[@]} -o $name.out -DBTK 2>log
local ret=$?
head -7 log
if [ -e $name.out ]; then
mv $name.out $name
fi
return $ret
}
function file_open {
file_none .cww && cecho $red "use set subcommand." && return 1
file_empty .cww && cecho $red "use set subcommand." && return 1
IFS=$'\n'
local files=(`cat .cww`)
IFS=$' '
local files_num=${#files[@]}
for ((i=0;i<$files_num;i++))
do
file_none ${files[i]}.cpp || emacs ${files[i]}.cpp
done
return 0
}
function run_test {
#O
file_none .cww && cecho $red "use set subcommand." && return 1
file_empty .cww && cecho $red "use set subcommand." && return 1
#st@C̎擾 $namest@C
IFS=$'\n'
local files=(`cat .cww`)
IFS=$' '
local name=${files[0]}
file_none $name && cecho $red "use set or build subcommand." && return 1
#operand̎擾
local op=("$@")
local op_num=`expr $#`
#etOyуt@C|C^
local vis=$off
local input=$on_console
local input_pointer=.input
local output=$on_console
local output_pointer=.output
#operandSTătOXV
for ((i=0,j=1;i<$op_num;i++,j++))
do
case "${op[i]}" in
"-v")
vis=$on
;;
"-c")
input=$use_clipboard
;;
"-fi")
if [ $j -lt $op_num ];then
input=$use_file
input_pointer=${op[j]}
else
return 1
fi
;;
"-fo")
if [ $j -lt $op_num ];then
output=$use_file
output_pointer=${op[j]}
else
return 1
fi
;;
"-di")
if [ $j -lt $op_num ];then
input=$use_directory
input_pointer=${op[j]}
else
return 1
fi
;;
"-do")
if [ $j -lt $op_num ];then
output=$use_directory
output_pointer=${op[j]}
else
return 1
fi
;;
*)
;;
esac
done
#/O %hogeŌhoge菜炵(eXgς)
input_pointer=${input_pointer%/}
output_pointer=${output_pointer%/}
#fBNgo
if [ $input -eq $use_directory ] || [ $output -eq $use_directory ] ;then
#O
if [ $input -ne $output ];then
cecho $red "-d option is incorrect."
return 1
fi
if [ ! -d $input_pointer ];then
cecho $red "$input_pointer is not a directory."
return 1
fi
if [ ! -d $output_pointer ];then
cecho $red "$output_pointer is not a directory."
return 1
fi
#܂ŗO
#̓fBNgfindŗCT IFS͋ŎȂȂ߂̑Ώ
IFS=$'\n'
for tourist in `find ${input_pointer} -maxdepth 1 -type f`;
do
IFS=$' '
#chokudai͏filename
chokudai=`basename ${tourist}`
latte="${input_pointer}/${chokudai}"
malta="${output_pointer}/${chokudai}"
#s
cecho $blue "run ${name} <${latte} >${malta}"
#͂\Ƃ̓RgAEgO
#[ $vis -eq $on ] && cecho $green `cat ${latte} | sed -e ':loop;N;$!b loop;s/\\n/\\\\n/g' `
$name <${latte} >${malta}
[ $vis -eq $on ] && cat ${malta}
IFS=$'\n'
done
fi
#n̂悤ȏꍇ܂ ܂ło͂̓R\[t@C2
#=ł
if [ $input -eq $on_console ] && [ $output -eq $on_console ];then
cecho $blue "run ${name}"
$name
fi
if [ $input -eq $on_console ] && [ $output -eq $use_file ];then
cecho $blue "run ${name} >${output_pointer}"
$name >${output_pointer}
[ $vis -eq $on ] && cat ${output_pointer}
fi
#=pbpaste
if [ $input -eq $use_clipboard ] && [ $output -eq $on_console ];then
cecho $blue "run pbpaste | ${name}"
[ $vis -eq $on ] && cecho $green `pbpaste | sed -e ':loop;N;$!b loop;s/\\n/\\\\n/g'`
pbpaste | ${name}
fi
if [ $input -eq $use_clipboard ] && [ $output -eq $use_file ];then
cecho $blue "run pbpaste | ${name} >${output_pointer}"
[ $vis -eq $on ] && cecho $green `pbpaste | sed -e ':loop;N;$!b loop;s/\\n/\\\\n/g'`
pbpaste | ${name} >${output_pointer}
[ $vis -eq $on ] && cat ${output_pointer}
fi
#=t@C
if [ $input -eq $use_file ] && [ $output -eq $on_console ];then
cecho $blue "run ${name} <${input_pointer}"
[ $vis -eq $on ] && cecho $green `cat ${input_pointer} | sed -e ':loop;N;$!b loop;s/\\n/\\\\n/g' `
$name <${input_pointer}
fi
if [ $input -eq $use_file ] && [ $output -eq $use_file ];then
cecho $blue "run ${name} <${input_pointer} >${output_pointer}"
[ $vis -eq $on ] && cecho $green `cat ${input_pointer} | sed -e ':loop;N;$!b loop;s/\\n/\\\\n/g' `
$name <${input_pointer} >${output_pointer}
[ $vis -eq $on ] && cat ${output_pointer}
fi
return 0
}
function usage {
cat << EOF
Usage:
gg [commands]
commands:
run [option]
option:
-c :use clipboard for input.
-v :visualize input and output
-fi file :assign dir for input file
-fo file :assign dir for output file
-di dir :assign dir for input directory
-do dir :assign dir for output directory
set <src.cpp>
... set files to .cww
open
... emacs {.cww}
build
... g++ {.cww}
copy src
... pbcopy
EOF
}
case "${1}" in
"set")
file_set ${@:2} && cecho $green "succeeded"
;;
"open")
file_open && cecho $green "succeeded"
;;
"build")
build && cecho $green "build succeeded"
;;
"run")
run_test ${@:2} && cecho $green "\\nrun succeeded"
;;
"copy")
cecho $blue "pbcopy <$2"
pbcopy <$2 && cecho $green "copy succeeded"
;;
*)
usage
;;
esac
| true |
a975496071864699da431b2f2164ef25a370a40b | Shell | mauriciojost/dotfiles | /modules/crontab/update-nas-mount.bash | UTF-8 | 380 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
set -x
HOME_NAS_IP="$1"
LOG=/tmp/update-nas-mount.bash.log
echo "" >> $LOG
echo "" >> $LOG
date >> $LOG
ping -c 1 $HOME_NAS_IP -W 1 &>> $LOG
if [ "$?" == "0" ]
then
echo "### At home, mounting..." &>> $LOG
mount /mnt/nas &>> $LOG
else
echo "### Not at home, unmounting..." &>> $LOG
sudo umount -f -l /mnt/nas &>> $LOG
fi
echo "### Done." &>> $LOG
| true |
d77aa71e4bf6647b237c8ac21f9e029f43f91b2b | Shell | bketelsen/dlxshell | /src/lib/create_base.sh | UTF-8 | 8,539 | 3.21875 | 3 | [] | no_license | # Add any function here that is needed in more than one parts of your
# application, or that you otherwise wish to extract from the main function
# scripts.
#
# Note that code here should be wrapped inside bash functions, and it is
# recommended to have a separate file for each function.
#
# Subdirectories will also be scanned for *.sh, so you have no reason not
# to organize your code neatly.
#
create_base() {
cat << EOF > $RECIPE_CONFIG_HOME/base.yaml
image:
description: Ubuntu {{ image.release }}
distribution: ubuntu
release: focal
architecture: x86_64
name: ubuntu-disco-x86_64
architecture_mapped: amd64
architecture_kernel: x86_64
architecture_personality: linux64
source:
downloader: debootstrap
url: http://archive.ubuntu.com/ubuntu
keys:
- 0x790BC7277767219C42C86F933B4FE6ACC0B21F32
- 0xf6ecb3762474eda9d21b7022871920d1991bc93c
keyserver: keyserver.ubuntu.com
same_as: gutsy
targets:
lxc:
create-message: |-
You just created an {{ image.description }} container.
To enable SSH, run: apt install openssh-server
No default root or user password are set by LXC.
config:
- type: all
before: 5
content: lxc.include = LXC_TEMPLATE_CONFIG/ubuntu.common.conf
- type: user
before: 5
content: lxc.include = LXC_TEMPLATE_CONFIG/ubuntu.userns.conf
- type: all
after: 4
content: |-
lxc.include = LXC_TEMPLATE_CONFIG/common.conf
# For Ubuntu 14.04
lxc.mount.entry = /sys/kernel/debug sys/kernel/debug none bind,optional 0 0
lxc.mount.entry = /sys/kernel/security sys/kernel/security none bind,optional 0 0
lxc.mount.entry = /sys/fs/pstore sys/fs/pstore none bind,optional 0 0
lxc.mount.entry = mqueue dev/mqueue mqueue rw,relatime,create=dir,optional 0 0
- type: user
after: 4
content: |-
lxc.include = LXC_TEMPLATE_CONFIG/userns.conf
# For Ubuntu 14.04
lxc.mount.entry = /sys/firmware/efi/efivars sys/firmware/efi/efivars none bind,optional 0 0
lxc.mount.entry = /proc/sys/fs/binfmt_misc proc/sys/fs/binfmt_misc none bind,optional 0 0
- type: all
content: lxc.arch = {{ image.architecture_personality }}
type: ""
files:
- generator: hostname
path: /etc/hostname
- generator: hosts
path: /etc/hosts
- generator: remove
path: /etc/resolvconf/resolv.conf.d/original
- generator: remove
path: /etc/resolvconf/resolv.conf.d/tail
- generator: dump
path: /etc/machine-id
- generator: remove
path: /var/lib/dbus/machine-id
- releases:
- bionic
- eoan
- focal
- groovy
variants:
- default
types:
- container
generator: dump
path: /etc/netplan/10-lxc.yaml
content: |-
network:
version: 2
ethernets:
eth0:
dhcp4: true
dhcp-identifier: mac
- releases:
- trusty
- xenial
types:
- container
generator: dump
path: /etc/network/interfaces
content: |-
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet dhcp
source /etc/network/interfaces.d/*.cfg
- releases:
- bionic
- eoan
- focal
- groovy
variants:
- default
types:
- vm
generator: dump
path: /etc/netplan/10-lxc.yaml
content: |-
network:
version: 2
ethernets:
enp5s0:
dhcp4: true
dhcp-identifier: mac
- releases:
- trusty
- xenial
types:
- vm
generator: dump
path: /etc/network/interfaces
content: |-
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
auto enp5s0
iface enp5s0 inet dhcp
source /etc/network/interfaces.d/*.cfg
- releases:
- trusty
types:
- container
generator: upstart-tty
path: /etc/init/lxc-tty.conf
- variants:
- cloud
generator: cloud-init
name: meta-data
- variants:
- cloud
generator: cloud-init
name: network-config
- variants:
- cloud
generator: cloud-init
name: user-data
- variants:
- cloud
generator: cloud-init
name: vendor-data
- types:
- vm
generator: fstab
name: ext4
- types:
- vm
generator: lxd-agent
name: lxd-agent
- types:
- vm
generator: dump
path: /etc/default/grub.d/50-lxd.cfg
content: |-
GRUB_RECORDFAIL_TIMEOUT=0
GRUB_TIMEOUT=0
GRUB_CMDLINE_LINUX_DEFAULT="${GRUB_CMDLINE_LINUX_DEFAULT} console=tty1 console=ttyS0"
GRUB_TERMINAL=console
- variants:
- default
generator: dump
path: /etc/sudoers.d/90-lxd
content: |-
# User rules for dlx
dlx ALL=(ALL) NOPASSWD:ALL
mode: "0440"
packages:
manager: apt
update: true
cleanup: true
sets:
- packages:
- apt-transport-https
- fuse
- language-pack-en
- openssh-client
- openssh-server
- vim
action: install
- variants:
- cloud
packages:
- cloud-init
action: install
- architectures:
- amd64
- arm64
types:
- vm
packages:
- acpid
action: install
- architectures:
- amd64
types:
- vm
packages:
- grub-efi-amd64-signed
- shim-signed
action: install
- architectures:
- arm64
types:
- vm
packages:
- grub-efi-arm64-signed
action: install
- releases:
- disco
- eoan
- focal
- groovy
architectures:
- arm64
types:
- vm
packages:
- shim-signed
action: install
- releases:
- xenial
types:
- vm
packages:
- linux-virtual-hwe-16.04
action: install
- releases:
- bionic
- eoan
- focal
- groovy
types:
- vm
packages:
- linux-virtual
action: install
- types:
- vm
packages:
- os-prober
action: remove
- packages:
- tailscale
action: install
repositories:
- architectures:
- amd64
- i386
name: sources.list
url: |-
deb http://archive.ubuntu.com/ubuntu {{ image.release }} main restricted universe multiverse
deb http://archive.ubuntu.com/ubuntu {{ image.release }}-updates main restricted universe multiverse
deb http://security.ubuntu.com/ubuntu {{ image.release }}-security main restricted universe multiverse
- architectures:
- armhf
- arm64
- powerpc
- powerpc64
- ppc64el
name: sources.list
url: |-
deb http://ports.ubuntu.com/ubuntu-ports {{ image.release }} main restricted universe multiverse
deb http://ports.ubuntu.com/ubuntu-ports {{ image.release }}-updates main restricted universe multiverse
deb http://ports.ubuntu.com/ubuntu-ports {{ image.release }}-security main restricted universe multiverse
- name: sources.list
url: deb https://pkgs.tailscale.com/stable/ubuntu {{ image.release }} main
key: |
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBF5UmbgBEADAA5mxC8EoWEf53RVdlhQJbNnQW7fctUA5yNcGUbGGGTk6XFqO
nlek0Us0FAl5KVBgcS0Bj+VSwKVI/wx91tnAWI36CHeMyPTawdT4FTcS2jZMHbcN
UMqM1mcGs3wEQmKz795lfy2cQdVktc886aAF8hy1GmZDSs2zcGMvq5KCNPuX3DD5
INPumZqRTjwSwlGptUZrJpKWH4KvuGr5PSy/NzC8uSCuhLbFJc1Q6dQGKlQxwh+q
AF4uQ1+bdy92GHiFsCMi7q43hiBg5J9r55M/skboXkNBlS6kFviP+PADHNZe5Vw0
0ERtD/HzYb3cH5YneZuYXvnJq2/XjaN6OwkQXuqQpusB5fhIyLXE5ZqNlwBzX71S
779tIyjShpPXf1HEVxNO8TdVncx/7Zx/FSdwUJm4PMYQmnwBIyKlYWlV2AGgfxFk
mt2VexyS5s4YA1POuyiwW0iH1Ppp9X14KtOfNimBa0yEzgW3CHTEg55MNZup6k2Q
mRGtRjeqM5cjrq/Ix15hISmgbZogPRkhz/tcalK38WWAR4h3N8eIoPasLr9i9OVe
8aqsyXefCrziaiJczA0kCqhoryUUtceMgvaHl+lIPwyW0XWwj+0q45qzjLvKet+V
Q8oKLT1nMr/whgeSJi99f/jE4sWIbHZ0wwR02ZCikKnS05arl3v+hiBKPQARAQAB
tERUYWlsc2NhbGUgSW5jLiAoUGFja2FnZSByZXBvc2l0b3J5IHNpZ25pbmcga2V5
KSA8aW5mb0B0YWlsc2NhbGUuY29tPokCTgQTAQgAOBYhBCWWqZ6qszghiTwKeUWM
qDKVf1hoBQJeVJm4AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEEWMqDKV
f1hoWHEP/1DYd9WZrodyV5zy1izvj0FXtUReJi374gDn3cHrG6uYtXcE9HWZhxQD
6nDgYuey5sBhLvPQiE/sl5GYXNw/O95XVk8HS54BHCCYq1GeYkZaiCGLGFBA08JK
7PZItGsfdJHwHfhSMtGPS7Cpmylje9gh8ic56NAhC7c5tGTlD69Y8zGHjnRQC6Hg
wF34jdp8JTQpSctpmiOxOXN+eH8N59zb0k30CUym1Am438AR0PI6RBTnubBH+Xsc
eQhLJnmJ1bM6GP4agXw5T1G/qp95gjIddHXzOkEvrpVfJFCtp91VIlBwycspKYVp
1IKAdPM6CVf/YoDkawwm4y4OcmvNarA5dhWBG0Xqse4v1dlYbiHIFcDzXuMyrHYs
D2Wg8Hx8TD64uBHY0fp24nweCLnaZCckVUsnYjb0A494lgwveswbZeZ6JC5SbDKH
Tc2SE4jq+fsEEJsqsdHIC04d+pMXI95HinJHU1SLBTeKLvEF8Zuk7RTJyaUTjs7h
Ne+xWDmRjjR/D/GXBxNrM9mEq6Jvp/ilYTdWwAyrSmTdotHb+NWjAGpJWj5AZCH9
HeBr2mtVhvTu3KtCQmGpRiR18zMbmemRXUh+IX5hpWGzynhtnSt7vXOvhJdqqc1D
VennRMQZMb09wJjPcvLIApUMl69r29XmyB59NM3UggK/UCJrpYfmuQINBF5UmbgB
EADTSKKyeF3XWDxm3x67MOv1Zm3ocoe5xGDRApPkgqEMA+7/mjVlahNXqA8btmwM
z1BH5+trjOUoohFqhr9FPPLuKaS/pE7BBP38KzeA4KcTiEq5FQ4JzZAIRGyhsAr+
6bxcKV/tZirqOBQFC7bH2UAHH7uIKHDUbBIDFHjnmdIzJ5MBPMgqvSPZvcKWm40g
W+LWMGoSMH1Uxd+BvW74509eezL8p3ts42txVNvWMSKDkpiCRMBhfcf5c+YFXWbu
r5qus2mnVw0hIyYTUdRZIkOcYBalBjewVmGuSIISnUv76vHz133i0zh4JcXHUDqc
yLBUgVWckqci32ahy3jc4MdilPeAnjJQcpJVBtMUNTZ4KM7UxLmOa5hYwvooliFJ
wUFPB+1ZwN8d+Ly12gRKf8qA/iL8M5H4nQrML2dRJ8NKzP2U73Fw+n6S1ngrDX8k
TPhQBq4EDjDyX7SW3Liemj5BCuWJAo53/2cL9P9I5Nu3i2pLJOHzjBSXxWaMMmti
kopArlSMWMdsGgb0xYX+aSV7xW+tefYZJY1AFJ1x2ZgfIc+4zyuXnHYA2jVYLAfF
pApqwwn8JaTJWNhny/OtAss7XV/WuTEOMWXaTO9nyNmHla9KjxlBkDJG9sCcgYMg
aCAnoLRUABCWatxPly9ZlVbIPPzBAr8VN/TEUbceAH0nIwARAQABiQI2BBgBCAAg
FiEEJZapnqqzOCGJPAp5RYyoMpV/WGgFAl5UmbgCGwwACgkQRYyoMpV/WGji9w/8
Di9yLnnudvRnGLXGDDF2DbQUiwlNeJtHPHH4B9kKRKJDH1Rt5426Lw8vAumDpBlR
EeuT6/YQU+LSapWoDzNcmDLzoFP7RSQaB9aL/nJXv+VjlsVH/crpSTTgGDs8qGsL
O3Y2U1Gjo5uMBoOfXwS8o1VWO/5eUwS0KH7hpbOuZcf9U9l1VD2YpGfnMwX1rnre
INJqseQAUL3oyNl76gRzyuyQ4AIA06r40hZDgybH0ADN1JtfVk8z4ofo/GcfoXqm
hifWJa2SwwHeijhdN1T/kG0FZFHs1DBuBYJG3iJ3/bMeL15j1OjncIYIYccdoEUd
uHnp4+ZYj5kND0DFziTvOC4WyPpv3BlBVariPzEnEqnhjx5RYwMabtTXoYJwUkxX
2gAjKqh2tXissChdwDGRNASSDrChHLkQewx+SxT5kDaOhB84ZDnp+urn9A+clLkN
lZMsMQUObaRW68uybSbZSmIWFVM1GovRMgrPG3T6PAykQhFyE/kMFrv5KpPh7jDj
5JwzQkxLkFMcZDdS43VymKEggxqtM6scIRU55i059fLPAVXJG5in1WhMNsmt49lb
KqB6je3plIWOLSPuCJ/kR9xdFp7Qk88GCXEd0+4z/vFn4hoOr85NXFtxhS8k9GfJ
mM/ZfUq7YmHR+Rswe0zrrCwTDdePjGMo9cHpd39jCvc=
=AIVM
-----END PGP PUBLIC KEY BLOCK-----
actions:
- variants:
- default
trigger: post-update
action: |-
#!/bin/sh
set -eux
# Create the dlx user account
getent group sudo >/dev/null 2>&1 || groupadd --system sudo
useradd --create-home -s /bin/bash -G sudo -U dlx
- releases:
- bionic
- eoan
- focal
- groovy
trigger: post-packages
action: |-
#!/bin/sh
set -eux
# Enable systemd-networkd
systemctl enable systemd-networkd
- trigger: post-packages
action: |-
#!/bin/sh
set -eux
# Make sure the locale is built and functional
locale-gen en_US.UTF-8
update-locale LANG=en_US.UTF-8
# Cleanup underlying /run
mount -o bind / /mnt
rm -rf /mnt/run/*
umount /mnt
# Cleanup temporary shadow paths
rm /etc/*-
- types:
- vm
trigger: post-files
action: |-
#!/bin/sh
set -eux
TARGET="x86_64"
[ "$(uname -m)" = "aarch64" ] && TARGET="arm64"
update-grub
grub-install --uefi-secure-boot --target="${TARGET}-efi" --no-nvram --removable
update-grub
sed -i "s#root=[^ ]*#root=/dev/sda2#g" /boot/grub/grub.cfg
mappings:
architecture_map: debian
EOF
current_user=`whoami`
old="dlx"
filename="data.txt"
sed -i "s/$old/$current_user/" $RECIPE_CONFIG_HOME/base.yaml
}
| true |
f3f5b96b563024a5e8fecc8a7f953bb8263112b5 | Shell | thibauld/api | /scripts/test_e2e.sh | UTF-8 | 4,660 | 4.125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
main() {
# exit script if any error occurs
set -e
# cleanup upon exit or termination
trap "finish 2" INT
trap "finish 15" TERM
trap 'finish $?' EXIT
# check script parameters
for STEP in $@; do
parseStep
done
# set variables
LOCAL_DIR=$PWD
LOCAL_NAME=$(basename ${LOCAL_DIR})
[ -f "${LOCAL_DIR}/.env" ] && source ${LOCAL_DIR}/.env
for STEP in $@; do
if [ ${STEP} != "cleanup" ]; then
# parse script parameters
parseStep
# set repository location
setRepoDir
if [ "$PHASE" = "install" ]; then
install
elif [ "$PHASE" = "run" ]; then
setArtifactsDir
[ "$REPO_NAME" = "api" ] && setPgDatabase
run
elif [ "$PHASE" = "testE2E" ]; then
testE2E
fi
fi
done
}
cleanup() {
#pkill -f node selenium chromedriver Chrome
pkill node || true
}
finish() {
# can't rely on $? because of the sleep command running in parallel with spawned jobs
EXIT_CODE=$1
if [ ${EXIT_CODE} -ne 0 ]; then
trap '' EXIT TERM
cleanup
fi
echo "Finished with exit code $EXIT_CODE."
exit ${EXIT_CODE}
}
parseStep() {
if [ "${STEP}" = "cleanup" ]; then
cleanup
else
REPO_NAME=$(echo ${STEP} | sed 's/:.*//')
PHASE=$(echo ${STEP} | sed 's/.*://')
if ( [ "$REPO_NAME" != "api" ] && [ "$REPO_NAME" != "website" ] && [ "$REPO_NAME" != "app" ] ) ||
( [ "$PHASE" != "install" ] && [ "$PHASE" != "run" ] && [ "$PHASE" != "testE2E" ] ) ||
( [ "$REPO_NAME" = "api" ] && [ "$PHASE" = "testE2E" ] ); then
echo "Unrecognized step $STEP"
usage 1;
fi
fi
}
usage() {
CMD=test_e2e.sh
echo " "
echo "Usage: $CMD [<repo>:<phase> <repo>:<phase> ... <repo>:<phase>] [cleanup]"
echo " "
echo " <repo>: api, website or app"
echo " <phase>: install, run or testE2E. testE2E not applicable to api."
echo " "
echo "E.g : $CMD website:install"
echo " $CMD website:run"
echo " "
echo " $CMD cleanup"
echo " "
exit $1;
}
setRepoDir() {
if [ ${REPO_NAME} = ${LOCAL_NAME} ]; then
REPO_DIR=${LOCAL_DIR}
else
if [ "$NODE_ENV" = "development" ]; then
REPO_DIR_VAR_NAME=$(echo ${REPO_NAME} | awk '{print toupper($0)}')_DIR
if [ ! -d "${!REPO_DIR_VAR_NAME}" ]; then
echo "$REPO_DIR_VAR_NAME not configured in .env"
exit 1
fi
REPO_DIR=${!REPO_DIR_VAR_NAME}
else
REPO_DIR="$HOME/$REPO_NAME"
fi
fi
}
setArtifactsDir() {
if [ "$NODE_ENV" = "development" ]; then
ARTIFACTS_DIR="${LOCAL_DIR}/test/e2e/output"
else
ARTIFACTS_DIR="${CIRCLE_ARTIFACTS}/e2e"
fi
mkdir -p ${ARTIFACTS_DIR}
echo "Artifacts directory set to $ARTIFACTS_DIR"
}
setPgDatabase() {
if [ "$NODE_ENV" = "development" ]; then
# don't override developer's database
echo "setting PG_DATABASE=opencollective_e2e"
export PG_DATABASE=opencollective_e2e
fi
}
install() {
if [ -d ${REPO_DIR} ]; then
echo "$REPO_NAME already checked out to $REPO_DIR"
else
echo "Checking out $REPO_NAME into $REPO_DIR"
# use Github SVN export to avoid fetching git history, faster
REPO_SVN=https://github.com/OpenCollective/${REPO_NAME}/trunk
svn export ${REPO_SVN} ${REPO_DIR}
fi
cd ${REPO_DIR}
echo "Performing NPM install"
START=$(date +%s)
npm install
END=$(date +%s)
echo "Executed NPM install in $(($END - $START)) seconds"
linkRepoNmToCache
}
linkRepoNmToCache() {
REPO_NM="${REPO_DIR}/node_modules/"
CACHE_DIR="${HOME}/cache/"
[ -d ${CACHE_DIR} ] || mkdir ${CACHE_DIR}
REPO_NM_CACHE="${CACHE_DIR}/${REPO_NAME}_node_modules"
echo "Linking ${REPO_NM_CACHE} -> ${REPO_NM}"
ln -s ${REPO_NM} ${REPO_NM_CACHE}
}
runProcess() {
NAME=$1
cd $2
COMMAND=$3
LOG_FILE="$ARTIFACTS_DIR/$NAME.log"
PARENT=$$
# in case spawned process exits unexpectedly, kill parent process and its sub-processes (via the trap)
sh -c "$COMMAND | tee $LOG_FILE 2>&1;
kill $PARENT 2>/dev/null" &
echo "Started $NAME with PID $! and saved output to $LOG_FILE"
# Wait for startup. Break down sleep into pieces to allow prospective kill signals to get trapped.
if [ "$NODE_ENV" = "development" ]; then
DELAY=5
else
DELAY=40
fi
for i in $(seq ${DELAY}); do sleep 1; done
echo "Waited for $NAME startup during $DELAY seconds"
}
run() {
if [ ! -d ${REPO_DIR} ]; then
echo "${REPO_NAME} not installed in ${REPO_DIR}, exiting."
exit 1;
else
runProcess ${REPO_NAME} ${REPO_DIR} 'npm start'
fi
}
testE2E() {
echo "Starting ${REPO_NAME} E2E tests"
cd ${REPO_DIR}
npm run nightwatch
echo "Finished ${REPO_NAME} E2E tests"
}
main $@ | true |
c1021358a96c5eb132760d80c32cae5c2f398592 | Shell | roh9singh/Hadoop-Installation-Scripts | /spark-tera.sh | UTF-8 | 1,998 | 3.171875 | 3 | [] | no_license | #This a script torun the Spark Sort
#Calculats the size of the file
size=`expr $1 \* 100`
#Displays the running processes of hadoop and their ports
jps
#Generates a file of size $1 using gensort
printf "\t\n\n ---------------------Generating $size Byte data----------------------\n\n"
sudo chmod 700 gensort
./gensort -a $1 /mnt/raid/input
#Imports the data from the disk to the HDFS
printf "\t\n\n ---------------------Importing $size Byte data to HDFS----------------------\n\n"
hadoop fs -copyFromLocal /mnt/raid/input /inputfile
#Calculates the Start time
START=$(date +%s.%N)
#Sorting begins
printf "\t ---------------------Sorting $size Byte data ----------------------\n"
spark-submit --class com.rohit.spark.terasort.TeraSort spark-terasort/target/spark-terasort-1.0-SNAPSHOT-jar-with-dependencies.jar hdfs:///inputfile hdfs:///outputfile
#Calculates the END time
END=$(date +%s.%N)
#Finds the execution time
DIFF=$(echo "$END - $START" | bc)
printf "Time for Execution : $DIFF s \n"
printf "\n"
#validates the sorted file using teravalidate
printf "\t---------------------Validating $size Byte data----------------------\n"
spark-submit --class com.rohit.spark.terasort.TeraValidate spark-terasort/target/spark-terasort-1.0-SNAPSHOT-jar-with-dependencies.jar hdfs:///outputfile hdfs:///validatefile
printf "\t---------------------Sorted Output---------------------\n"
printf "\t---------------------First 10 lines---------------------\n"
hadoop fs -cat /outputfile/part-r-00000 | head -n 10
printf "\t---------------------Last 10 lines----------------------\n"
hadoop fs -cat /outputfile/part-r-00074 | tail -n 10
(printf "\t---------------------Sorted Output---------------------\n"
printf "\t---------------------First 10 lines---------------------\n"
hadoop fs -cat /outputfile/part-r-00000 | head -n 10
printf "\t---------------------Last 10 lines----------------------\n"
hadoop fs -cat /outputfile/part-r-00074 | tail -n 10
)> terasort-spark-memory-10GB.txt
| true |
1441181229ba9d8425447292991783f57ae55264 | Shell | tt-jsr/configfiles | /machines/linux-vm/jeff_home/.bash_aliases | UTF-8 | 3,603 | 3.046875 | 3 | [] | no_license |
alias ll='ls -lF'
alias la='ls -AF'
alias l='ls -CF'
alias ssh='ssh -o GSSAPIAuthentication=no'
alias cssh='chef_ssh'
#debesys
alias Make='make -rR -j${CPU} --quiet show_progress=1 config=debug '
alias ttknife='./run ./ttknife'
alias spy='./run python ttex/unittests/spy.py'
alias ttrader='./run t_trader/tt/ttrader/t_trader.py -c ~/ttrader.conf'
alias pytrader='./run pytrader/src/pytrader.py -c ~/pytrader.conf'
alias scripttrader='./run t_trader/tt/ttrader/script_trader.py -c ~/ttrader.conf'
alias pytest='./run python ttex/pyTests/run_tests.py'
alias req-build='./run deploy/chef/scripts/request_build.py'
alias req-deploy='./run deploy/chef/scripts/request_deploy.py'
alias bump='./run deploy/chef/scripts/bump_cookbook_version.py'
#vim projects
alias ettex='gvim -c ":Project ~/ttex.proj"'
alias eachtung='gvim -c ":Project ~/achtung.proj"'
alias ecppactor='gvim -c ":Project ~/cppactor.proj"'
alias eoc='gvim -c ":Project ~/oc.proj"'
alias ettrader='gvim -c ":Project ~/ttrader.proj"'
alias epytrader='gvim -c ":Project ~/pytrader.proj"'
#directories
alias cdlog='cd /var/log/debesys'
alias cddeb='cd ~/projects/debesys'
#git
alias status='git stash list;git status'
alias co='git-checkout'
alias ci='git commit'
alias pull='git pull origin'
alias push='git push origin'
alias delete='delete-branch'
alias branches='git branch'
alias gsu='git submodule update'
alias gfo='git fetch origin'
alias gss='git stash save'
alias gsp='git stash pop'
alias gsl='git stash list'
function git-checkout {
git fetch origin
git remote prune origin
if [ -n "$1" ]
then
case $1 in
master)
branch='master'
;;
uat)
branch='uat/current'
;;
stage)
branch='release/current'
;;
dev)
branch='develop'
;;
*)
branch=$1
;;
esac
else
echo
PS3="Branch: "
branches=`git for-each-ref --format='%(refname:short)' refs/heads`
select b in $branches;
do
branch=$b
break;
done
fi
git checkout $branch
echo "git submodule update"
git submodule update
git stash list
}
function delete-branch {
PS3="Branch: "
branches=`git for-each-ref --format='%(refname:short)' refs/heads`
select branch in $branches;
do
echo -n "Delete $branch (y/n)? "
read yesno
if [ "$yesno" = 'y' ]
then
git branch -d $branch
git push origin :$branch
fi
break;
done
}
function chef_ssh {
if [ -z "$1" -o -z "$2" ]
then
echo "Usage: chef-ssh env recipe"
echo "Environments: dev, stage, sqe, devsim"
echo " uat, prod, prodsim"
return
fi
knife=~/.chef/knife.rb
case $1 in
dev)
env='int-dev-cert'
;;
stage)
env='int-stage-cert'
;;
devsim)
env='int-dev-sim'
;;
sqe)
env='int-sqe-cert'
;;
uat)
env='ext-uat-cert'
knife=~/.chef/knife.external.rb
;;
prod)
env='ext-prod-live'
knife=~/.chef/knife.external.rb
;;
prodsim)
env='ext-prod-sim'
knife=~/.chef/knife.external.rb
;;
esac
oc=$2
ips=`./run ./ttknife --config $knife search node "chef_environment:$env AND recipe:$oc" | grep IP | sed 's/IP:[ \t]*\([0-9.]*\)/\1/'`
PS3="Machine: "
select selection in $ips
do
ssh root@$selection
break
done
}
| true |
60a87756ae8671fd993ccca8276a5f0773f65b12 | Shell | t33r0hr/kio-ng2-structure | /scripts/watch.sh | UTF-8 | 316 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
SCRIPT_PATH="$(dirname "${0}")"
SCRIPT_FILE="$(basename "${0}")"
CLI_ROOT="$(cd "$(dirname "${0}")/.."; pwd)"
NODEMON_BIN="${CLI_ROOT}/node_modules/.bin/nodemon"
NPM_COMMAND=${1:-build}
function main() {
cd "${CLI_ROOT}"
$NODEMON_BIN -w ./src -e ts --exec "npm run ${NPM_COMMAND}"
}
main | true |
d340a3307fb94d6df1cbd6223a2a1706118f669e | Shell | shinkou/maxwell-eval | /master_slave/mk-pcluster.sh | UTF-8 | 3,777 | 3.3125 | 3 | [] | no_license | #!/bin/bash
SCRIPT=`readlink -f $0`
BASEDIR=`dirname "$SCRIPT"`
DATAVOL='data'
DATADIR="$BASEDIR"
DATAMNT="/$DATAVOL"
function mknet()
{
docker network create --subnet '172.19.0.0/24' maxwell-net
}
function mkvms()
{
docker create --network maxwell-net --ip '172.19.0.2' -e MYSQL_ROOT_PASSWORD='secret' --name maxwell-mysql-master 'mysql:5.7.21'
docker create --network maxwell-net --ip '172.19.0.3' -e MYSQL_ROOT_PASSWORD='secret' --name maxwell-mysql-slave 'mysql:5.7.21'
docker create --network maxwell-net --ip '172.19.0.4' --name maxwell-zookeeper 'shinkou/zookeeper'
docker create --network maxwell-net --ip '172.19.0.5' -e ZOOKEEPER=maxwell-zookeeper --name maxwell-kafka 'shinkou/kafka'
}
function initvms()
{
docker start maxwell-zookeeper maxwell-kafka maxwell-mysql-master maxwell-mysql-slave
sleep 10
docker run -v "$DATADIR:$DATAMNT" --network 'maxwell-net' --rm -it 'mysql' bash -c 'mysql -h'"'"'maxwell-mysql-master'"'"' -uroot -psecret < /data/init_master.sql'
docker run -v "$DATADIR:$DATAMNT" --network 'maxwell-net' --rm -it 'mysql' bash -c 'mysqldump -h'"'"'maxwell-mysql-master'"'"' -uroot -psecret --all-databases --master-data > /data/dbdump.db'
docker exec maxwell-mysql-master bash -c 'echo "server_id=1" >> /etc/mysql/mysql.conf.d/mysqld.cnf'
docker exec maxwell-mysql-master bash -c 'echo "log-bin=mysql-bin" >> /etc/mysql/mysql.conf.d/mysqld.cnf'
docker exec maxwell-mysql-master bash -c 'echo "innodb_flush_log_at_trx_commit=1" >> /etc/mysql/mysql.conf.d/mysqld.cnf'
docker exec maxwell-mysql-master bash -c 'echo "sync_binlog=1" >> /etc/mysql/mysql.conf.d/mysqld.cnf'
docker restart maxwell-mysql-master
docker exec maxwell-mysql-slave bash -c 'echo "server_id=2" >> /etc/mysql/mysql.conf.d/mysqld.cnf'
docker exec maxwell-mysql-slave bash -c 'echo "log-bin=master" >> /etc/mysql/mysql.conf.d/mysqld.cnf'
docker exec maxwell-mysql-slave bash -c 'echo "log-slave-updates" >> /etc/mysql/mysql.conf.d/mysqld.cnf'
docker exec maxwell-mysql-slave bash -c 'echo "skip-slave-start" >> /etc/mysql/mysql.conf.d/mysqld.cnf'
docker restart maxwell-mysql-slave
docker run -v "$DATADIR:$DATAMNT" --network 'maxwell-net' --rm -it 'mysql' bash -c 'mysql -h'"'"'maxwell-mysql-slave'"'"' -uroot -psecret < /data/dbdump.db'
docker run -v "$DATADIR:$DATAMNT" --network 'maxwell-net' --rm -it 'mysql' bash -c 'rm /data/dbdump.db'
docker run -v "$DATADIR:$DATAMNT" --network 'maxwell-net' --rm -it 'mysql' bash -c 'mysql -h'"'"'maxwell-mysql-master'"'"' -uroot -psecret -e '"'"'SHOW MASTER STATUS;'"'"''
echo
cat ./init_slave.sql
echo -n 'Should we go ahead and run the above query (Yes/no)? '
read go_ahead
case "${go_ahead,,}" in
yes | y)
docker run -v "$DATADIR:$DATAMNT" --network 'maxwell-net' --rm -it 'mysql' bash -c 'mysql -h'"'"'maxwell-mysql-slave'"'"' -uroot -psecret < /data/init_slave.sql'
echo 'Finished setting up master-slave MySQL cluster.'
;;
*)
echo 'Please make sure to run a SQL statement like the following to finish the setup:'
echo
cat "${BASEDIR}/init_slave.sql"
echo
echo 'Good luck!'
echo
;;
esac
}
function printusage()
{
echo 'Usage: mk-pcluster.sh [ ARG [ ARG [ ... ] ] ]'
echo
echo 'where'
echo ' ARG "network", "containers", or "all"'
echo
}
function getdatadir()
{
echo "Please enter the path of your data folder (default: \"$DATADIR\"): "
read datadir
echo
if [[ -n $datadir ]]; then
DATADIR="$datadir"
fi
}
getdatadir
if [[ $# -eq 0 ]]; then
set -- containers
fi
for arg in "$@"; do
case $arg in
--help | -h)
printusage
;;
network)
mknet
;;
containers)
mkvms
initvms
;;
all)
mknet
mkvms
initvms
;;
*)
echo "Invalid argument \"$arg\"."
echo
printusage
exit 1
;;
esac
done
| true |
bf9123c93bffca9516591038fe077b9e1acda901 | Shell | larsks/flocx | /scripts/build-docs | UTF-8 | 467 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
PATH=$PWD/node_modules/.bin:$PATH
export PATH
rm -rf htmldocs
mkdir -p htmldocs/html/specs
make html
for spec in docs/specs/*.yaml; do
basename=${spec##*/}
basename_no_ext=${basename%.yaml}
redoc-cli bundle $spec -o htmldocs/html/specs/${basename_no_ext}.html
done
# Disable github jekyll processing
touch htmldocs/html/.nojekyll
cat > htmldocs/html/metadata.json <<EOF
{
"date": "$(date)",
"revision": "$(git rev-parse HEAD)"
}
EOF
| true |
ddbf14d1ddbd842576469d28d1f6c1f1560bde57 | Shell | leeleelee3264/linux-study-collection | /bashScript/lesson21_grep.sh | UTF-8 | 272 | 3.53125 | 4 | [] | no_license | #! /bin/bash
# something with grep command
echo "Enter filename to search text from"
read fileName
if [ -f $fileName ]
then
echo "Enter the text to search"
read grepvar
# grep command
grep -inv $grepvar $fileName
else
echo "$fileName does not exist"
fi
| true |
8ddeb8868dda357c1be8dc671b22bfbcbb993124 | Shell | Markuze/mmo-static | /try_set.sh | UTF-8 | 1,223 | 3.4375 | 3 | [] | no_license | #!/bin/bash
function set_module {
#grep $base $dir/Makefile|grep -oP "obj-\\$\(\w+\)" |grep -oP "\(\w+\)"|grep -Po "\w+"
grep $base $dir/Makefile|grep -oP "obj-\\$\(\w+\)" |grep -oP "\(\w+\)"|grep -Po "\w+" > $tmp
if [ $? -ne 0 ]; then
grep -oP "obj-\\$\(\w+\)" $dir/Makefile|grep -oP "\(\w+\)"|grep -Po "\w+" > $tmp
fi
for i in `cat $tmp`;
do
echo "./scripts/config --enable $i"
./scripts/config --enable $i
done
}
function die {
echo "usage: $0 -f <file name>"
echo $@
exit -1
}
echo "hello"
while getopts ":f:" opt; do
case ${opt} in
f )
ofile=$OPTARG
[ -e $ofile ] || die "no such file $ofile"
;;
\? )
echo "Invalid Option: -$OPTARG" 1>&2
exit 1
;;
: )
echo "Invalid Option: -$OPTARG requires an argument" 1>&2
exit 1
;;
esac
done
shift $((OPTIND -1))
echo ">$ofile<"
[ -z ${ofile} ] && die "-f option is required"
base=`basename $ofile`
dir=`dirname $ofile`
tmp='/tmp/${ofile}.txt'
set_module
echo "$dir"
base=`basename $dir`
dir=`dirname $dir`
set_module
exit 0
#for i in `cat $tmp`;
#do
# grep $i .config
# echo $?
#done
#scripts/kconfig/conf --silentoldconfig Kconfig
#for i in `cat $tmp`;
#do
# grep $i .config
# echo $?
#done
| true |
c11ece3adde43f30d513e3e5fb58a2287fca0954 | Shell | koenrh/shell-scripts | /test/urldecode.bats | UTF-8 | 675 | 3.375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bats
function setup() {
export WORKSPACE="${WORKSPACE-"${BATS_TEST_DIRNAME}/.."}"
}
function assert_key_equals {
key="$1"
got=$("$WORKSPACE/urldecode" "$key")
expected=$2
if [[ "$got" != "$expected" ]]
then
echo "Expected \"$got\" to equal \"$expected\""
return 1
fi
}
@test "url decode paths and URLs" {
assert_key_equals "%2Fapi%2F2%2Fstudio%2Fcookie%2F78b" "/api/2/studio/cookie/78b"
assert_key_equals "url=https%3A%2F%2Fwww.example.nl%2F" "url=https://www.example.nl/"
}
@test "url decode JSON strings" {
assert_key_equals "%7B%22a%22%3A%22%5Bfoo%5D%22%2C%22b%22%3A%22%5Bbar%5D%22%7D" "{\"a\":\"[foo]\",\"b\":\"[bar]\"}"
}
| true |
582881d58a4531a66ee841a619f1ff82e87b7b5c | Shell | TimothyHelton/pyproject_generator | /test_pyproject_generator.sh | UTF-8 | 729 | 3.5 | 4 | [] | permissive | #!/bin/bash
# test_pyproject_generator.sh
test_dir="test123"
if [ -d "${test_dir}" ]; then
echo "Closing containers and deleting test package"
cd "${test_dir}";
make docker-down;
cd .. && sudo rm -rf "${test_dir}"
fi
echo
echo "################################"
echo "Cleaning Docker Artifacts..."
docker image rm "${test_dir}_python"
docker network rm "${USER}-${test_dir}-network"
docker volume rm "${USER}_${test_dir}-secret"
echo
echo "################################"
echo "Creating Test package..."
./pyproject_generator/pypackage_generator.sh "${test_dir}" \
echo
echo "################################"
echo "Update Docker configuration file..."
cd "${test_dir}" \
&& make docker-update-config
| true |
faa7cfb4f4152395368423b1d36ccb72d2688ee7 | Shell | mhcerri/xmenu | /complete.sh | UTF-8 | 697 | 3.59375 | 4 | [] | no_license | #!/bin/bash
. /etc/bash_completion
if [[ "$#" -eq 1 ]]; then
if OUT="$(compgen -c -- "$1")"; then
echo "$OUT" | sort -u
exit 0
fi
set -- $*
fi
PROG="$(basename "$1")"
FN="$(complete -p "$PROG" | grep -oE -- '-F *[^ ]+' | sed -e 's/-F //')"
COMP_WORDS=($*)
COMP_LINE="$*"
COMP_COUNT="$(echo "$COMP_LINE" | wc -c)"
COMP_CWORD=$(( $# -1 ))
if [[ "$COMP_CWORD" -lt 1 ]]; then
COMP_CWORD=1
fi
$FN
echo "'$COMP_WORDS'" >&2
echo "'$COMP_LINE'" >&2
echo "'$COMP_COUNT'" >&2
echo "'$COMP_CWORD'" >&2
for ((i=0;i<${#COMPREPLY[*]};i++)); do
echo ${COMPREPLY[i]}
done | sort -u
if [[ "${#COMPREPLY[*]}" -gt 0 ]]; then
exit 0
fi
compgen -f -d -- ${!#} | sort -u
| true |
39a4af420b7c7620a7fcfeac546adfba2d4b8b22 | Shell | rkmax/julianreyes.co | /utils.sh | UTF-8 | 301 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env bash
server() {
python -m http.server --directory src/
}
deploy() {
local bucket=${1}
local distribution=${2}
aws s3 sync --acl public-read src/ s3://${bucket}/
aws cloudfront create-invalidation --distribution-id ${distribution} --paths '/*' > /dev/null
}
"$@"
| true |
737d56dbaf134bd1b5e766aa39e030a64a7b09e6 | Shell | Rosszor/Shell-Code-Work | /Autocfg_Validate.sh | UTF-8 | 11,465 | 3.421875 | 3 | [] | no_license | #!/bin/sh
# Script Created by Ross Thompson
# Version: 1.3
#
# File: Autocfg_Validate.sh
# Created: 3/11/2015 as Autocfg_Validate.sh
# Purpose: Will run all configuration and validation steps for the server that has been installed with Autocfg_postbuild script.
################################################################################
#
# MODIFICATIONS:
# Version: 1.0 - 3/11/2015
# - Created script.
# Version 1.1 - 5/19/2015
# - Made changes to configure/validation to reflect changes in install script.
# - Removed Tidal/Splunk/HPOV from configure/validation as they will not be installed.
# - Added Oracle-release file to output to check if/what OEL version
# Version 1.2 - 6/10/2015
# - Changed VAS configuration to use GetUXHOST and created a universal UXHOST variable to be used throughout the script. This will aid in script expansion (Specifically for PBIS validation)
# Version 1.3 - 6/23/2015
# - Added removal of SSH login to the Reboot function.
# To Add/Update
# - Add PBIS validation
# - Add loop for all ifcfg-ethX files (For Physicals)
# - Add section that removes root login ability
##############################################################################
SetVar() {
##############################################################################
# Set Variables for Script #
# - Some of the set variables will not be used. Most are set for future #
# expansion of the script. #
##############################################################################
HOST="`hostname|awk -F. '{print $1}'`"
#Variable for log file that all user input and script output is written to
LOG="/tmp/Postbuild_Log"
#Universal UXHOST variable
UXHOST=""
}
GetUXHOST() {
##############################################################################
# Asks user for UXHOST group to add to users.allow file later in the script #
##############################################################################
echo "#########################################################################"
echo -n " UXHOST for users.allow? "
read VASHOST
UXHOST=${VASHOST}
}
ConfigureNetbackup() {
##############################################################################
# Makes changes to Netbackup #
# - Edits /usr/openv/netbackup/bp.conf and restarts the netbackup service #
# if the user selected to add a backnet configuration earlier. #
##############################################################################
echo " Configuring Netbackup Files and Service:"
NBFILE="/usr/openv/netbackup/bp.conf"
echo SERVER = netbpmas01v > ${NBFILE}
echo CLIENT_NAME = ${HOST}-bn >> ${NBFILE}
echo CONNECT_OPTIONS = localhost 1 0 2 >> ${NBFILE}
echo CLIENT_READ_TIMEOUT = 900 >> ${NBFILE}
echo "SERVER = netbpmas21v" >> ${NBFILE}
echo "SERVER = netbpmas21" >> ${NBFILE}
echo "SERVER = netbpmas22" >> ${NBFILE}
echo "SERVER = netbpmas21dr" >> ${NBFILE}
echo "SERVER = netbpmed21" >> ${NBFILE}
echo "SERVER = netbpmed22" >> ${NBFILE}
echo "SERVER = netbpmed23" >> ${NBFILE}
echo "SERVER = netbpmed24" >> ${NBFILE}
echo "SERVER = netbpmed25" >> ${NBFILE}
echo "SERVER = netbpmed26" >> ${NBFILE}
echo "SERVER = netbpmed31" >> ${NBFILE}
echo "SERVER = netbpmed32" >> ${NBFILE}
echo "SERVER = netbpmed33" >> ${NBFILE}
echo "SERVER = netbpmed34" >> ${NBFILE}
chkconfig netbackup on
/etc/init.d/static-route
traceroute netbpmas01v
echo "DONE"
echo "#########################################################################"
}
ConfigureVAS() {
##############################################################################
# Makes changes to VAS #
# - Edits and restarts the vasd service #
##############################################################################
echo " Configure VAS for new IP and UXHOST information:"
#Asks user for UXHOST group to add to users.allow
GetUXHOST
#Add UXHOST to users.allow
echo ${UXHOST} >> /etc/opt/quest/vas/users.allow
#Edit vas.conf
#Flush domain information
/opt/quest/bin/vastool flush statedir
echo " VAS Password = tV!ojLeY"
#Rejoin to the domain with correct IP infomration and UXHOST
/opt/quest/bin/vastool -u vasadmin join -f -c OU=HOSTS,OU=Unix,DC=Corp,DC=TWCABLE,DC=com -n ${HOST}.corp.twcable.com corp.twcable.com
#Remove expired license
rm /etc/opt/quest/vas/.licenses/VAS_license_187-20250
/opt/quest/bin/vastool status
echo "DONE"
echo "#########################################################################"
}
ConfigureXymon() {
##############################################################################
# Makes changes to Xymon #
# - Edits /etc/sysconfig/xymon-client and starts the service #
##############################################################################
echo " Configure Xymon for hostname and xymon server IP:"
sed -i 's/XYMONSERVERS=""/XYMONSERVERS="10.136.255.49"/' /etc/sysconfig/xymon-client
echo "CLIENTHOSTNAME=\"${HOST}\"" >> /etc/sysconfig/xymon-client
echo "DONE"
echo "#########################################################################"
}
RunValidation() {
##############################################################################
# Validation/Log Creation #
##############################################################################
echo " Server: ${HOST}"
echo " Server: ${HOST}" > ${LOG}
echo ""
echo "" >> ${LOG}
echo " Please Validate Output:"
echo " Please Validate Output:" >> ${LOG}
echo " Log file - /tmp/Postbuild_Log"
echo " Log file - /tmp/Postbuild_Log" >> ${LOG}
echo " Date - $(date)"
echo " Date - $(date)" >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "REDHAT/OEL VERSION" >> ${LOG}
cat /etc/redhat-release >> ${LOG}
cat /etc/oracle-release >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "TSG PATCH FILE" >> ${LOG}
cat /etc/tsg_unix_patch_release >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "/ETC/HOSTS" >> ${LOG}
cat /etc/hosts >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "/ETC/RESOLV.CONF" >> ${LOG}
echo " First entry should be xxx.xxx.xxx.xxx for CDC" >> ${LOG}
echo " or xxx.xxx.xxx.xxx for CDP" >> ${LOG}
echo ""
cat /etc/resolv.conf >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "/ETC/SYSCONFIG/NETWORK-SCRIPTS/IFCFG-ETH0" >> ${LOG}
cat /etc/sysconfig/network-scripts/ifcfg-eth0 >> ${LOG}
echo "#########################################################################" >> ${LOG}
if [ -e /etc/sysconfig/network-scripts/ifcfg-eth1 ]; then
echo "/ETC/SYSCONFIG/NETWORK-SCRIPTS/IFCFG-ETH1" >> ${LOG}
cat /etc/sysconfig/network-scripts/ifcfg-eth1 >> ${LOG}
echo "/ETC/INIT.D/STATIC-ROUTE" >> ${LOG}
cat /etc/init.d/static-route >> ${LOG}
echo "#########################################################################" >> ${LOG}
fi
echo "NETSTAT -RN" >> ${LOG}
/bin/netstat -rn >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "/ETC/FSTAB" >> ${LOG}
cat /etc/fstab >> ${LOG} 2>&1 >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "VOLUME GROUPS" >> ${LOG}
vgdisplay >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "LOGICAL VOLUMES" >> ${LOG}
lvdisplay >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "PHYSICAL VOLUMES" >> ${LOG}
pvdisplay >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "PARTITION TABLE" >> ${LOG}
df -h >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "MEMORY, CPU AND SWAP" >> ${LOG}
echo "MemTotal:" >> ${LOG}
cat /proc/meminfo |grep MemTotal >> ${LOG}
echo "CPU Total:" >> ${LOG}
cat /proc/cpuinfo |grep processor |wc -l >> ${LOG}
echo "Swap Total:" >> ${LOG}
cat /proc/meminfo |grep SwapTotal >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "SELINUX" >> ${LOG}
echo "SELinux status:" >> ${LOG}
echo "Should be permissive/targeted" >> ${LOG}
cat /etc/sysconfig/selinux |grep ^SELINUX >> ${LOG}
cat /etc/selinux/ >> ${LOG}
echo "SELinux Enforce mode" >> ${LOG}
cat /selinux/enforce >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "NTP CONFIGURATION" >> ${LOG}
cat /etc/ntp.conf >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "SYSCTL" >> ${LOG}
sysctl -p >> ${LOG} 2>&1
echo "#########################################################################" >> ${LOG}
echo "QUEST SSH STATUS" >> ${LOG}
service sshd status >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "POWERBROKER STATUS" >> ${LOG}
/usr/local/bin/pbrun -v >> ${LOG} 2>&1
echo "#########################################################################" >> ${LOG}
echo "NETBACKUP STATUS" >> ${LOG}
cat /usr/openv/netbackup/bp.conf >> ${LOG}
service netbackup start >> ${LOG}
/etc/init.d/static-route
traceroute netbpmas01v >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "VAS STATUS" >> ${LOG}
cat /etc/opt/quest/vas/users.allow >> ${LOG}
service vasd status >> ${LOG}
/opt/quest/bin/vastool status >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "XYMON STATUS" >> ${LOG}
cat /etc/sysconfig/xymon-client >> ${LOG}
service xymon-client status >> ${LOG}
echo "#########################################################################" >> ${LOG}
echo "#########################################################################"
echo " Please enter your email address:"
read EMAILADDR
mail -a ${LOG} -s "Postbuild_Log: ${HOST}" ${EMAILADDR} < ${LOG} 2>/dev/null
}
Reboot() {
##############################################################################
# Validation #
##############################################################################
echo " REBOOTING SERVER FOR CHANGES:"
sed -i 's/\#PermitRootLogin yes/PermitRootLogin no/' /etc/ssh/sshd_config
sed -i '/PermitRootLogin yes/d' /etc/ssh/sshd_config
sed -i 's/\PermitRootLogin yes/PermitRootLogin no/' /etc/opt/quest/ssh/sshd_config
sed -i '/PermitRootLogin yes/d' /etc/opt/quest/ssh/sshd_config
shutdown -r now
}
main() {
##############################################################################
# Main Section #
# Calls all functions in order of operations #
##############################################################################
SetVar
#Call ConfigureVAS function
ConfigureVAS
#Call ConfigureNetbackup function if Backnet exists
ConfigureNetbackup
#Call ConfigureXymon function
ConfigureXymon
#Run Validation function which creates a log to check for errors
RunValidation
#Reboot
}
main | true |
3fb06712281d5dede0d61dbd20041a5e1a802576 | Shell | mjafary/git-utilities | /rebase-repo.sh | UTF-8 | 3,267 | 5.03125 | 5 | [] | no_license | #!/bin/bash
#
# This script will guide the user through the process of rebasing their current repository
# (and branch if not on master). Should take the guess work out.
#
# Globals
# The following is false by default
REBASE_CURRENT_BRANCH=0
GIT_BASE_DIR=`/usr/bin/git rev-parse --show-toplevel`
GREP=`which grep`
# Function to print the headers
print_header () {
printf "\033[34m################################################################################\033[1;32m\n\n${1}\n\n\033[31mEnter to proceed: \033[0m"
read TEMP
pushd . > /dev/null
}
# Clear screen prior to starting
clear
# Get the current branch
STARTING_BRANCH=`/usr/bin/git branch | $GREP '*' | /usr/bin/awk '{print $2}'`
# If we are not in the master branch, check that we should be rebasing
if [ "${STARTING_BRANCH}" != 'master' ]; then
printf "\033[34m################################################################################\033[1;32m\n\nNot currently on master branch, do you want to rebase ${STARTING_BRANCH} as well?\n\n\033[31m[y/N]: \033[0m"
read TEMP
case "${TEMP}" in
y|Y)
# Set flag to true
REBASE_CURRENT_BRANCH=1;;
*)
REBASE_CURRENT_BRANCH=0;;
esac
# Change to master
pushd . > /dev/null
cd "${GIT_BASE_DIR}"
if [ "${STARTING_BRANCH}" != 'master' ]; then
printf "\033[1;32m\n\nChanging to master branch\033[0m\n\n"
/usr/bin/git checkout master
fi
fi
# Get the rebase source, default is upstream
printf "\033[34m################################################################################\033[1;32m\n\nWhich repository is your rebase target? [u]pstream or [o]rigin\n\n\033[31m[U/o]: \033[0m"
read TEMP
case "${TEMP}" in
o|O)
# Set flag to true
REBASE_TARGET='origin';;
*)
REBASE_TARGET='upstream';;
esac
# Fetch repo data
if [ "${REBASE_TARGET}" == 'upstream' ]; then
printf "\033[34m################################################################################\033[1;32m\n\nFetching Upstream repo data \033[0m\n\n"
/usr/bin/git fetch --prune upstream
fi
printf "\033[34m################################################################################\033[1;32m\n\nFetching Origin repo data \033[0m\n\n"
/usr/bin/git fetch --prune origin
# Do the rebase
printf "\033[34m################################################################################\033[1;32m\n\nRebasing to ${REBASE_TARGET}\033[0m\n\n"
/usr/bin/git rebase "${REBASE_TARGET}"/master
# Push changes back to master
if [ "${REBASE_TARGET}" == 'upstream' ]; then
printf "\033[34m################################################################################\033[1;32m\n\nJust rebased against upstream.\n\nDo you want to push to your origin/master branch?\n\n\033[31m[Y/n]:\033[0m"
read TEMP
case "${TEMP}" in
n|N)
;;
*)
# Push changes to origin
/usr/bin/git push origin master;;
esac
fi
# Change back to other branch
if [ "${STARTING_BRANCH}" != 'master' ]; then
printf "\033[34m################################################################################\033[1;32m\n\nGoing back to ${STARTING_BRANCH}\033[0m\n\n"
/usr/bin/git checkout "${STARTING_BRANCH}"
popd > /dev/null
if [ ${REBASE_CURRENT_BRANCH} -eq 1 ]; then
/usr/bin/git rebase master
fi
fi
| true |
8458850760fce75f10197206c48c4d4f35faccc9 | Shell | NadilBourkadi/dotfiles | /zshrc | UTF-8 | 3,597 | 3.125 | 3 | [] | no_license | zsh ~/init.zsh
alias python=python3
alias vim=/usr/local/bin/vim
__kube_ps1() {
CONTEXT=$(cat ~/.kube/config | grep 'namespace:' | cut -d':' -f2)
echo "[k8s$CONTEXT]"
}
export TERM=xterm-256color
export EDITOR='vim'
#
# Kubernetes
#
alias pods='k get pods'
alias p='k get pods'
alias wp='while true; do clear; k get pods; sleep 5; done'
alias wt='while true; do clear; k top pods; sleep 5; done'
alias k=kubectl
alias n=namespace
alias docker-killall='docker kill $(docker ps -q)'
alias g=grep
export KOPS_STATE_STORE=s3://lantum-kops
export NAME=k.lantum.com
export KUBE_EDITOR='vim'
get-image() {
k describe deploy $1 | grep Image
}
function forward {
port=$2
service=$(k get pods | grep $1 | head -1 | cut -d ' ' -f1)
echo "Forwarding pod $service on port $port"
kubectl port-forward "$service" $port
}
function delete {
service=$(k get -n staging pods | grep $1 | head -1 | cut -d ' ' -f1)
echo "Deleting pod on staging $service"
kubectl delete pod "$service"
}
function logs {
service=$1
shift
kubectl logs --tail=100 -f "deployment/$service" $@
}
function kbash {
service=$(kubectl get pods | grep $1 | grep Running | head -1 | cut -d ' ' -f1)
echo "Executing bash on $service"
shift
kubectl exec "$service" -it bash $@
}
function ksh {
service=$(k get pods | grep $1 | grep Running | head -1 | cut -d ' ' -f1)
echo "Executing bash on $service"
shift
kubectl exec "$service" -it sh $@
}
function describe {
service=$(k get pods | grep $1 | head -1 | cut -d ' ' -f1)
echo "Executing describe on $service"
k describe pod "$service"
}
function namespace {
echo "Setting namespace $1"
kubectl config set-context $(kubectl config current-context) --namespace=$1
}
function kimage {
for service in $*
do
k describe deploy $service | grep Image | sed "1q;d"
done
}
#
# Docker Compose
#
alias dc='docker-compose'
function dc-test {
(cd ~/Dev/stack && docker-compose run $1 python manage.py test $2)
}
function dc-up-d {
(cd ~/Dev/stack && docker-compose up -d $1)
}
function dc-up {
(cd ~/Dev/stack && docker-compose up $1)
}
function dc-man {
service=$1
shift
(cd ~/Dev/stack && docker-compose run "$service" python manage.py "$@")
}
build-dh ()
{
project=$1;
cd $project;
version=`git log -1 --pretty=oneline | cut -d' ' -f1`;
docker build -t networklocum/$project:$version .;
docker push networklocum/$project:$version;
cd ..;
}
alias ng-res='sudo service nginx restart'
alias ng-rel='sudo service nginx rel'
alias vimrc='vim ~/.vimrc'
#
# NPM
#
alias start-react='(cd ~/Dev/react-web-app && npm start)'
alias start-bart='(cd ~/Dev/bart && npm run watch)'
#
# Tmux
#
# Grep for tmux session
tmux-ls(){
if [ -z "$1" ]; then
tmux ls
else
tmux ls | grep -i $1 | sed 's/:.*//';
fi
}
tmux-a(){
tmux-ls $1 | xargs tmux switch -t
}
fab-bd(){
environment=$1
service=$2
(cd ~/Dev/stack && fab $environment build_and_deploy:$service)
}
export PATH="/usr/bin:/bin:/usr/sbin:/sbin:$PATH"
context ()
{
if [ -z "$1" ]; then
kubectl config current-context;
return 0;
fi;
kubectl config use-context $1
}
source ~/.zsh/antigen.zsh
# Load the oh-my-zsh's library.
antigen use oh-my-zsh
antigen bundle git
antigen bundle heroku
antigen bundle command-not-found
antigen bundle last-working-dir
antigen bundle kube-ps1
antigen bundle zsh-users/zsh-syntax-highlighting
antigen apply
source ~/dotfiles/dil.zsh-theme
ZSH_AUTOSUGGEST_STRATEGY=completion
| true |
f9cc8d17d157d6035632e4f694e3ea6d28e36274 | Shell | sigg3/slackpatch | /slackpatch.sh | UTF-8 | 8,701 | 3.734375 | 4 | [] | no_license | #!/bin/bash
#
# Simple Slackware patching tool created in 2016 by Sigg3.net
# Copyright (C) 2016 Sigbjoern "Sigg3" Smelror <me@sigg3.net>.
#
# slackpatch is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, version 3 of the License.
#
# slackpatch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# Full license terms: <http://www.gnu.org/licenses/gpl-3.0.txt>
#
# # # # # # # # # # # # # # # # #
SERVER="ftp.slackware.com" # LEAVE AS-IS or change to a local mirror of ftp.slackware.com (or custom repo)
VERSION="" # LEAVE EMPTY for auto detection or use 13.0 .. 14.2 OR current
DESK="/root/slackpatch" # script working directory (root will administer these files)
SLACKPATCH="slackpatch 0.6" # obligatory vanity field (including version)
REPO="$DESK/$SERVER" # downloaded listing of packages on $SERVER
BASE="$DESK/.upgrades.log" # $REPO diff file for filtering updates
INSTALLED="/var/log/packages/" # list of installed packages on system
MINION=$( logname ) # used for downloading and managing files in /tmp
MESS="/tmp/slackpatch-$MINION/$(date +%N)" # temp work directory (by $MINION user)
# # # # # # # # # # # # # # # # # # # # # # # #
#
# TODO change DESK to $logname/.slackpatch/
#
# TODO output normal CLI stuff: version, help, usage info
#
# TODO cleanup file names (please use descriptive names)
#
# TODO integrate sources from slackpkg update?
#
# Runtime # . . .
Title() {
clear && echo "== $SLACKPATCH by sigg3.net =="
}
# Got root?
if (( UID )) ; then
Title
echo -e "Error: You must have root. Run script like this: $ su -c '$0'\nYou can also 'su' into root and run $0 since \$logname is preserved.\n"
echo -e "Note: Root privileges are not used for downloading, only upgradepkg.\nDownloads are run in the equivalent of: $ su -c 'su \$(logname) -c curl ...'"
exit 1
else
Title && cat <<"EOT"
slackpatch is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for details.
Full license terms: <http://www.gnu.org/licenses/gpl-3.0.txt>
WARNING: Before upgrading a package, save any configuration files that you wish
to keep (such as in /etc). Sometimes these will be preserved, but it depends on
the package structure. If you want to force new versions of the config files to
be installed, remove the old ones manually prior to running slackpatch.
Don't worry, person. slackpatch will ask before doing anything dangerous.
EOT
echo -e "\n\n" && read -p "Type 'OK' to continue: " OPT
case "$OPT" in
"ok" | "OK" | "Ok" ) Title ;;
* ) exit 0 ;;
esac
fi
for dep in "curl" "md5sum" "upgradepkg" "su" "logname" ; do
case "$dep" in #
upgradepkg ) UPGRADE=$( which $dep ) ;;
su ) SU=$( which $dep ) ;;
* ) TEST=$( which $dep &>/dev/null ) ;;
esac #
[[ "$?" = 1 ]] && echo "Error: missing dependency $dep" && exit 1
done
# Test version
SLACKFILE="/etc/slackware-version"
if [[ -f "$SLACKFILE" ]] ; then
TEST=$( sed "s/Slackware //g" "$SLACKFILE" )
if [[ -z "$VERSION" ]] ; then
VERSION="$TEST"
else
[[ "$TEST" != "$VERSION" ]] && echo "Error: version mismatch. $VERSIONFILE says $TEST not $VERSION." && exit 1
fi
else
[[ -z "$VERSION" ]] && echo "Error: \$VERSION cannot be empty when $SLACKFILE is missing." && exit 1
fi
# Construct URLs
ARCH=$( uname -m )
WEB="$SERVER/pub/slackware/slackware" && [[ "$ARCH" = "x86_64" ]] && WEB+="64"
WEB+="-$VERSION/patches" && CHECKSUMS="$WEB/CHECKSUMS.md5" && WEB+="/packages"
# Output config and test network
echo -en "Running $SLACKPATCH for Slackware $VERSION $ARCH\n\nusr: $MINION (used for downloading)\nsys: Slackware $VERSION $ARCH\nsrc: $WEB "
if [[ $( ping -c 1 "$SERVER" | grep -o "[0-9] received" | sed 's/.received//g' ) = 1 ]] ; then
echo -e "(online)\ndir: $DESK/\n"
else
echo -e "(offline)\n\nError: $SERVER does not respond to ping." && exit 1
fi
# Functions
AsUser() { # Run $1 as loguser $MINION (also works when logged in as root from a previous user shell.)
"$SU" "$MINION" -c "$1"
}
CleanUp() { # Removes unwanted cruft
[[ -z "$1" ]] && local EXIT_CODE=0 || local EXIT_CODE="$1"
echo "Cleaning up.."
#echo "Debug" && exit $EXIT_CODE # Comment this when done
[[ -d "$MESS/" ]] && rm -Rfv "$MESS/"
[[ -f "$REPO" ]] && rm -fv "$REPO"
[[ -f "$DESK/updates" ]] && rm -fv "$DESK/updates"
[[ -f "$DESK/updates.diff" ]] && rm -fv "$DESK/updates.diff"
[[ -f "$DESK/updates.actual" ]] && rm -fv "$DESK/updates.actual"
[[ -f "$DESK/updates.filtered" ]] && rm -fv "$DESK/updates.filtered"
[[ -f "$DESK/updates.installed" ]] && rm -fv "$DESK/updates.installed"
echo -e "\nDone. (code $EXIT_CODE)"
exit $EXIT_CODE
}
trap CleanUp SIGHUP SIGINT SIGTERM
# Create work and temporary directories
[[ ! -d "$DESK/" ]] && mkdir -p "$DESK/"
[[ ! -d "$MESS/" ]] && AsUser "mkdir -p $MESS/"
AsUser "curl -s -l ftp://$WEB/ > $MESS/.listing" # Get package listing from $SERVER (REPO)
[[ "$?" != 0 ]] && echo "Error: could not retrieve repo list from $WEB (curl err $?)" && CleanUp 1
cp "$MESS/.listing" "$REPO" # secondary operation necessary since curl above is run as normal user
# Check against existing log $BASE
if [[ -f "$BASE" ]] ; then
grep ".txz" "$REPO" | grep -v ".asc" > "$DESK/updates.diff"
sort -o "$DESK/updates.diff" "$DESK/updates.diff"
sort -o "$BASE" "$BASE"
diff "$BASE" "$DESK/updates.diff" | sed '1d' | tr -d ">" | tr -d "<" | tr -d " " > "$DESK/updates"
UPDATES=$( cat "$DESK/updates" | wc -l ) && [[ "$UPDATES" = 0 ]] && echo -e "Status: No new updates available.\n" && CleanUp 0
else
cp "$REPO" "$DESK/updates"
sort -o "$BASE" "$BASE"
fi
# Filter list of updates (txz only and not packages already installed)
grep ".txz" "$DESK/updates" | grep -v ".asc" > "$DESK/updates.actual" && mv -f "$DESK/updates.actual" "$DESK/updates"
while read -r ; do IS_INSTALLED=$( find "$INSTALLED" -name "${REPLY:0:-4}" | wc -l ) && [[ "$IS_INSTALLED" -gt 0 ]] && echo "${REPLY:0:-4}" >> "$DESK/updates.installed" ; done < "$DESK/updates"
while read -r ; do grep -v "$REPLY" "$DESK/updates" >> "$DESK/updates.filtered" ; mv -f "$DESK/updates.filtered" "$DESK/updates" ; done < "$DESK/updates.installed"
[[ -f "$DESK/updates.installed" ]] && rm -f "$DESK/updates.installed"
awk '!a[$0]++' "$DESK/updates" > "$DESK/updates.actual" && mv -f "$DESK/updates.actual" "$DESK/updates" # remove duplicates
# Output names of updates (if any) and prompt for permission
UPDATES=$(cat "$DESK/updates" | wc -l)
case "$UPDATES" in
0 ) echo -e "Status: No new updates available.\n" && CleanUp 0 ;;
1 ) echo -e "Status: There is 1 updated package available:\n" ;;
* ) echo -e "Status: There are $UPDATES updates available:\n" ;;
esac
while read -r ; do echo -e "* $REPLY\n" ; done < "$DESK/updates"
read -p "Retrieve package updates and perform upgrade? Type 'yes': " OPT
case "$OPT" in
"Yes" | "yes" | "YES" ) echo -en "\nFetching CHECKSUMS.md5 .." ;;
* ) echo "Aborting update." && CleanUp 0 ;;
esac
# Get checksums file from $SERVER
AsUser "curl -s -o $MESS/CHECKSUMS.md5 -L ftp://$CHECKSUMS"
[[ "$?" != 0 ]] && echo -e "\nError: Could not fetch ftp://$CHECKSUMS (curl err $?)" && CleanUp || echo ".. OK."
# Upgrade loop
while read -u 3 -r software ; do
software=$( echo "$software" | tr -d '\r' ) # remove carriage return
AsUser "curl -s -o $MESS/$software -L ftp://$WEB/$software"
[[ "$?" != 0 ]] && echo "Error: Could not retrieve $software (curl err $?)" && continue
SOFT_MD5SUM=$( md5sum "$MESS/$software" | head -c 32 )
LIST_MD5SUM=$( grep "$software" "$MESS/CHECKSUMS.md5" | grep -v ".asc" | awk '{ print $1 }')
if [[ "$SOFT_MD5SUM" = "$LIST_MD5SUM" ]] ; then
INSTALL=0 && clear && echo "$software - checksum matches"
else
INSTALL=1 && echo "Error: Checksum of $software does not match"
fi
echo -e "\nFILE:\t$SOFT_MD5SUM ($software)\nLIST:\t$LIST_MD5SUM (CHECKSUMS.md5)\n"
if [[ "$INSTALL" = 0 ]] ; then
read -sn 1 -p "Do you want to upgrade $software [Y/n]? " OPT
case "$OPT" in
y | Y ) "$UPGRADE" "$MESS/$software" && echo "$software" >> "$BASE" ;;
* ) echo "Not installing $software .." && sleep 1 ;;
esac
else
echo "Skipping $software" && sleep 1
fi
done 3< "$DESK/updates"
CleanUp
| true |
10775cffa069a88114a2ced5f1d45310ffa307ae | Shell | songshansls/ForGDY | /tasks/lint.sh | UTF-8 | 298 | 2.546875 | 3 | [] | no_license | #!/bin/sh
# Ensure all javascript files staged for commit pass standard code style
git diff --name-only --cached --relative | grep '\.jsx\?$' | xargs eslint
if [ $? -ne 0 ]; then exit 1; fi
git diff --name-only --cached --relative | grep '\.css$' | xargs stylelint
if [ $? -ne 0 ]; then exit 1; fi
| true |
ea54af1a5e2bc34a193201cf5c05273e39c565da | Shell | fsmi/gad | /tests/test_skip_basedir_with_user/prepare | UTF-8 | 246 | 2.5625 | 3 | [] | no_license | #!/bin/sh -e
rm -rf base
mkdir base
mkdir base/b1
mkdir base/b1/c1
touch base/b1/c2
mkdir base/b2
mkdir base/b2/c1
touch base/b2/c2
setfacl -bR base
chmod -R u=rwX,g=,o= base
chmod -R a-s base
chown -R root:users base
chown nobody base/b1/c2
| true |
79e2dd50750d7c58e07d81919a5296ec78d4fa33 | Shell | ginkgo-project/ginkgo | /examples/build-setup.sh | UTF-8 | 993 | 3.390625 | 3 | [
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"CECILL-2.0",
"MIT",
"LGPL-2.1-only",
"Unlicense",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"CECILL-C",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
# copy libraries
LIBRARY_NAMES="ginkgo ginkgo_reference ginkgo_omp ginkgo_cuda ginkgo_hip ginkgo_dpcpp ginkgo_device"
SUFFIXES=".so .dylib .dll d.so d.dylib d.dll"
VERSION="1.7.0"
for name in ${LIBRARY_NAMES}; do
for suffix in ${SUFFIXES}; do
cp ${BUILD_DIR}/lib/lib${name}${suffix}.${VERSION} \
${THIS_DIR} 2>/dev/null
if [ -e "${THIS_DIR}/lib${name}${suffix}.${VERSION}" ]
then
ln -s ${THIS_DIR}/lib${name}${suffix}.${VERSION} ${THIS_DIR}/lib${name}${suffix} 2>/dev/null
fi
done
done
# figure out correct compiler flags
if ls ${THIS_DIR} | grep -F "libginkgo." >/dev/null; then
LINK_FLAGS="-lginkgo -lginkgo_omp -lginkgo_cuda -lginkgo_reference -lginkgo_hip -lginkgo_dpcpp -lginkgo_device -Wl,-rpath,${THIS_DIR}"
else
LINK_FLAGS="-lginkgod -lginkgo_ompd -lginkgo_cudad -lginkgo_referenced -lginkgo_hipd -lginkgo_dpcppd -lginkgo_deviced -Wl,-rpath,${THIS_DIR}"
fi
if [ -z "${CXX}" ]; then
CXX="c++"
fi
| true |
878b5fcf6b033e3e10c2c13bb0ce85821bf23403 | Shell | 3Xpl0it3r/scripts | /controller_init.sh | UTF-8 | 7,892 | 3.34375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# description: this script is used to build some neceressury files/scripts for init an crd controller
Project=$1
Version=$2
Author=$3
Domain="$Author.cn"
if [ "$Version" = "" ]
then
Version="v1alpha1"
fi
# GroupName=$(echo $PROJECT|awk -F'-' '{print $1}'|awk -F'_' '{print $1}').$Domain
GroupName=$(echo $Project|sed 's/-//'|sed 's/_//').$Domain
GroupNameForShort=$(echo $Project|sed 's/-//'|sed 's/_//')
CustomResName=$(echo $Project|awk -F'-' '{print $1}'|awk -F'_' '{print $1}')
# create project directory
mkdir -pv $Project
mkdir -pv $Project/hack
mkdir -pv $Project/pkg/apis/$GroupName/$Version
mkdir -pv $Project/pkg/client
# create boilerplate.go.txt tools.go update-group.sh
cat >> $Project/hack/boilerplate.go.txt << EOF
/*
Copyright The $Project Authors.
*/
EOF
cat >> $Project/hack/tools.go << EOF
// +build tools
package tools
import _ "k8s.io/code-generator"
EOF
cat >> $Project/hack/update-group.sh << EOF
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
(
# To support running this script from anywhere, we have to first cd into this directory
# so we can install the tools.
cd "\$(dirname "\${0}")"/../vendor/k8s.io/code-generator/
go install ./cmd/{defaulter-gen,client-gen,lister-gen,informer-gen,deepcopy-gen}
)
# Go installs the above commands to get installed in $GOBIN if defined, and $GOPATH/bin otherwise:
GOBIN="$(go env GOBIN)"
gobin="${GOBIN:-$(go env GOPATH)/bin}"
echo "Generating deepcopy funcs"
"\${gobin}/deepcopy-gen" -O zz_generated.deepcopy --go-header-file ./boilerplate.go.txt --bounding-dirs ../pkg/apis/$GroupName/$Version --input-dirs ../pkg/apis/$GroupName/$Version --output-base ./
echo "Generating clientset for $GroupName "
"\${gobin}/client-gen" --clientset-name versioned --go-header-file ./boilerplate.go.txt --input-dirs ../pkg/apis/$GroupName/$Version --output-base ../ --output-package pkg/client/clientset
echo "Generating listers for $GroupName "
"\${gobin}/lister-gen" --go-header-file ./boilerplate.go.txt --input-dirs ../pkg/apis/$GroupName/$Version --output-base ../ --output-package pkg/client/listers
echo "Generating informers for $GroupName "
"\${gobin}/informer-gen" --go-header-file ./boilerplate.go.txt --input-dirs ../pkg/apis/$GroupName/$Version --output-package pkg/client/informers --output-base ../ --listers-package ../pkg/client/listers --versioned-clientset-package ../pkg/client/clientset/versioned
EOF
chmod +x $Project/hack/update-group.sh
# create apis
# auto generate regisgter.go file
cat >> $Project/pkg/apis/$GroupName/register.go << EOF
/*
Copyright `date "+%Y"` The $Project Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package $GroupNameForShort
const (
GroupName = "$GroupName"
)
EOF
# auto generate doc.go
cat >> $Project/pkg/apis/$GroupName/$Version/doc.go << EOF
/*
Copyright `date "+%Y"` The $Project Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=$GroupName
// Package $Version is the $Version version of the API.
package $Version // import "$GroupName/pkg/apis/$GroupName/$Version"
EOF
# auto geneate types.go
_Spec=$CustomResName"Spec"
_Status=$CustomResName"Status"
_List=$CustomResName"Itemm"
cat >> $Project/pkg/apis/$GroupName/$Version/types.go << EOF
/*
Copyright `date "+%Y"` The $Project Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package $Version
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:defaulter-gen=true
// $CustomResName represent $CustomResName cluster
type $CustomResName struct {
metav1.TypeMeta \`json:",inline"\`
metav1.ObjectMeta \`json:"metadata,omitempty"\`
Spec $_Spec \`json:"spec"\`
Status $_Status \`json:"status"\`
}
// $_Spec describe the behaviour of $CustomResName
type $_Spec struct {
// todo, write your code
}
// $_Status represent the current status of $CustomResName cluster resource
type $_Status struct {
// todo, write your code
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// $_List represent a list of $CustomResName cluster
type $_List struct {
metav1.TypeMeta \`json:",inline"\`
metav1.ListMeta \`json:"metadata,omitempty"\`
Items []$CustomResName \`json:"items"\`
}
EOF
# generate regiser.go
cat >> $Project/pkg/apis/$GroupName/$Version/register.go << EOF
/*
Copyright `date "+%Y"` The $Project Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package $Version
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"$GroupName/pkg/apis/$GroupName"
)
const (
Version = "$Version"
)
var (
// SchemeBuilder initializes a scheme builder
SchemeBuilder = runtime.NewSchemeBuilder(addKnowTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
var (
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: $GroupNameForShort.GroupName, Version: Version}
)
// Resource takes an unqualified resource and returns a Group-qualified GroupResource.
func Resource(resource string)schema.GroupResource{
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string)schema.GroupKind{
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// addKnownTypes adds the set of types defined in this package to the supplied scheme.
func addKnowTypes(scheme *runtime.Scheme)error{
scheme.AddKnownTypes(SchemeGroupVersion,
new($CustomResName),
new($_List),)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
EOF
# init go mod
cd $Project && go mod init $GroupName && go mod tidy
# go mod vendor
export GOPROXY=https://goproxy.cn && go mod vendor
| true |
f24b162acef6eda92593b218b4ef23bb49186138 | Shell | blueslow/dotfiles | /bin/gif2png.sh | UTF-8 | 263 | 3.46875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | #! /bin/bash
echo converts all non animated gif files in a directory to png
echo the gif files also remains
files=$(ls -1A *.gif)
echo $files
for file in $files
do
dest=$(basename $file .gif).png
echo "$file -> $dest"
convert $file $dest
done
| true |
1067708f7b40b5cec93035b0c8599af829870f0e | Shell | ibm-cloud-architecture/refarch-kc | /scripts/localk8s/ocpversion.sh | UTF-8 | 244 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Determine whether the target cluster is an OpenShift cluster
OCPVERSION=`oc get clusterversion -o jsonpath='{.items[].status.desired.version}'`
if [ ! -z "$OCPVERSION" ]; then
echo "Target is OpenShift version $OCPVERSION"
fi | true |
7ed4935382c1fbece116f877de99296651f1c2d5 | Shell | neurobin/DemoCode | /bash/crpool.sh | UTF-8 | 418 | 3.328125 | 3 | [] | no_license | #!/bin/sh
dir=${1-apt.udeb/cache/archives}
odir=${2-.}
find "$dir" -maxdepth 1 -type f -exec sh -c '
p=$0
odir=$1
n=$(basename $p)
pn=$(echo "$n" |sed -e "s/-udeb.*$//" -e "s/[_-][0-9].*$//")
if echo "$pn" | grep -q "^lib."; then
pn1=$(echo "$pn" |sed -E "s/^(....).*/\1/")
else
pn1=$(echo "$pn" |sed -E "s/^(.).*$/\1/")
fi
mkdir -p "$odir/pool/main/$pn1/$pn"
cp -L "$p" "$odir/pool/main/$pn1/$pn"
' '{}' "$odir" \;
| true |
ee225fd9f41365780c0b63946884558c1d696c19 | Shell | lueyoung/workspace-manifest-old-2 | /gitlab/ch-namespace.sh | UTF-8 | 424 | 3.75 | 4 | [] | no_license | #!/bin/bash
NAMESPACE=gitlab
ADDITION="namespace:"
TO="namespace: $NAMESPACE"
FILES="$(find ./ -name '*.yaml' -type f)"
for FILE in $FILES; do
echo $FILE
IF0=$(cat $FILE | grep "$ADDITION")
if [ -z "$IF0" ]; then
echo "$(date) - [WARN] - no namespace filed found, add a namespace first!"
exit 1
else
sed -i "/$ADDITION/ c {{.placeholder}} $TO" $FILE
sed -i s"/{{.placeholder}}//g" $FILE
fi
done
| true |
f2557b705fc5c8f117d87d270e31ba4caf010c04 | Shell | DeinsOne/PlanetMOV | /scipts/detectPlatform.sh | UTF-8 | 258 | 3.171875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
detectPlatform() {
if [[ $(uname -a | grep Ubuntu) ]]; then
CURRENT_PLATFORM="Ubuntu"
elif [[ $(uname -a | grep Fedora) ]]; then
CURRENT_PLATFORM="Fedora"
else
CURRENT_PLATFORM="..."
fi
}
detectPlatform
| true |
6a94e765c37063ec44d45876d08633f9d76157b5 | Shell | simhaonline/one-ipfs | /remotes/tm/ipfs/clone | UTF-8 | 354 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
NAME=clone_remote
ARGS_ORIG=$@
DST_HOST=$(echo $2 | cut -d':' -f 1)
if [ -z "${ONE_LOCATION}" ]; then
ONE_LOCATION=/var/lib/one
fi
. $ONE_LOCATION/remotes/scripts_common.sh
log "Calling $NAME"
ssh_exec_and_log $DST_HOST \
"$ONE_LOCATION/remotes/tm/ipfs/$NAME $ARGS_ORIG" \
"Error calling $NAME"
exit 0
| true |
ada481635f8eb94ce15f23830530f65b7ea291ed | Shell | karmab/kcli | /kvirt/cluster/microshift/scripts/01_clients.sh | UTF-8 | 799 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
MAJOR={{ 8 if 'rhel8' in image else 9 }}
{% set tag_str = tag|string %}
{% if version == 'dev-preview' %}
TAG={{ tag if tag_str.split('.')|length > 2 else "latest-" + tag_str }}
echo """[microshift-dev-preview]
name=Microshift Dev Preview
baseurl=https://mirror.openshift.com/pub/openshift-v4/x86_64/microshift/ocp-dev-preview/$TAG/el$MAJOR/os
enabled=1
gpgcheck=0""" > /etc/yum.repos.d/microshift.repo
{% endif %}
TAG={{ tag }}
subscription-manager repos --enable rhocp-$TAG-for-rhel-$MAJOR-$(uname -i)-rpms --enable fast-datapath-for-rhel-$MAJOR-$(uname -i)-rpms
dnf -y install openshift-clients lvm2 podman
test -f /root/auth.json && podman login registry.redhat.io --authfile /root/auth.json
DEVICE=/dev/$(lsblk -o name | tail -1)
pvcreate $DEVICE
vgcreate rhel $DEVICE
| true |
b1a292d31bc6b9d63937e6a036aee20f8906ea58 | Shell | AlexRaybosh/jpi | /src_java/build_java.sh | UTF-8 | 2,086 | 3.25 | 3 | [] | no_license | #!/bin/bash
echo ROOT=$ROOT SRCDIR=$SRCDIR JAVA_BUILD=$JAVA_BUILD
export CLASSPATH="."
VERSION=$(git rev-parse HEAD)
echo $VERSION > $JAVA_BUILD/VERSION
echo $VERSION > $ROOT/classes/VERSION
mostrecent=$(find $SRCDIR -type f \( -name '*java' -o -name Makefile -o -name build_java.sh \) \
-printf "%T@\0%p\0" | \
awk '{if ($0>max) {max=$0;getline mostrecent} else getline}END{print mostrecent}' RS='\0') \
|| exit 1
echo Latest java file $mostrecent
mr_ts=$(stat -c %Y $mostrecent)
c_ts=$mr_ts-1
test -f $JAVA_BUILD/compiled && c_ts=$(stat -c %Y $JAVA_BUILD/compiled)
if [ $c_ts -gt $mr_ts ]; then
echo "Latest java file $mostrecent timestamp=$mr_ts, but it was already compiled at $c_ts"
exit 0
else
echo "Latest java file $mostrecent timestamp=$mr_ts, too new, comparing to compiled at $c_ts"
fi
unset CLASSPATH
CLASSPATH=$JAVA_BUILD
$JAVA_HOME/bin/java -version
if [ -d $ROOT/javalib ]; then
for jar in $(find $ROOT/javalib -type f); do
CLASSPATH="$CLASSPATH:$jar"
done
fi
$JAVA_HOME/bin/javac -source 1.7 -target 1.7 -O -cp $CLASSPATH \
-d $JAVA_BUILD -sourcepath $SRCDIR \
$(find . -name '*.java') \
|| exit 1
#echo Copy SoLoader as a real java
/bin/cp $SRCDIR/jpi/utils/loader/SoLoader.fake-java $JAVA_BUILD/jpi/utils/loader/SoLoader.java || exit 1
echo Compile SoLoader
( cd $JAVA_BUILD && $JAVA_HOME/bin/javac -source 1.7 -target 1.7 -d . jpi/utils/loader/SoLoader.java && /bin/rm jpi/utils/loader/SoLoader.java) || exit 1
echo cp -f $JAVA_BUILD/jpi/utils/loader/SoLoader.class $JAVA_BUILD/jpi/utils/loader/SoLoader.bytes
cp -f $JAVA_BUILD/jpi/utils/loader/SoLoader.class $JAVA_BUILD/jpi/utils/loader/SoLoader.bytes || exit 1
echo cp -f $JAVA_BUILD/jpi/utils/loader/SoLoader.class $ROOT/classes/jpi/utils/loader/SoLoader.bytes || exit 1
mv -f $JAVA_BUILD/jpi/utils/loader/SoLoader.class $ROOT/classes/jpi/utils/loader/SoLoader.bytes || exit 1
echo Find and copy any resources
for p in $(find $SRCDIR -name '*.properties'); do
cp $p $JAVA_BUILD/$(dirname $p) || exit 1
cp $p $ROOT/classes/$(dirname $p) || exit 1
done
touch $JAVA_BUILD/compiled
| true |
ec1a5fe350eb91a29b0fbac66f150455b195f0c9 | Shell | edruid/Git-image-diff | /git-imgdiff.sh | UTF-8 | 217 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
echo Comparing $1 externally
if [ -z "$GIT_IMAGE_DIFF_FUZZ" ]
then
GIT_IMAGE_DIFF_FUZZ=4000
fi
compare -fuzz $GIT_IMAGE_DIFF_FUZZ $2 $5 png:- | montage -geometry +4+4 $2 - $5 png:- | display -title "$1" -
| true |
555d645d0fd61ffa1b30befbe4d98a8a164a29a7 | Shell | charlie1404/oh-my-zsh | /lib/correction.zsh | UTF-8 | 832 | 2.609375 | 3 | [
"MIT"
] | permissive | if [[ "$ENABLE_CORRECTION" == "true" ]]; then
# Disable correction.
alias ack='nocorrect ack'
alias cd='nocorrect cd'
alias cp='nocorrect cp'
alias ebuild='nocorrect ebuild'
alias gcc='nocorrect gcc'
alias gist='nocorrect gist'
alias grep='nocorrect grep'
alias heroku='nocorrect heroku'
alias ln='nocorrect ln'
alias man='nocorrect man'
alias mkdir='nocorrect mkdir'
alias mv='nocorrect mv'
alias mysql='nocorrect mysql'
alias rm='nocorrect rm'
alias sudo='nocorrect sudo'
# Disable globbing.
alias bower='noglob bower'
alias fc='noglob fc'
alias find='noglob find'
alias ftp='noglob ftp'
alias history='noglob history'
alias locate='noglob locate'
alias rake='noglob rake'
alias rsync='noglob rsync'
alias scp='noglob scp'
alias sftp='noglob sftp'
setopt correct_all
fi
| true |
8917705dfa2857ae6151f963f66a8d4ab76789ef | Shell | cyclestreets/cyclestreets-setup | /backup-deployment/cyclestreetsRotateDaily.sh | UTF-8 | 2,433 | 4.4375 | 4 | [] | no_license | #!/bin/bash
# Rotates the CycleStreets backups daily.
### Stage 1 - general setup
# Ensure this script is NOT run as root (it should be run as cyclestreets)
if [ "$(id -u)" = "0" ]; then
echo "# This script must NOT be run as root." 1>&2
exit 1
fi
# Bomb out if something goes wrong
set -e
# Lock directory
lockdir=/var/lock/cyclestreets
mkdir -p $lockdir
# Set a lock file; see: http://stackoverflow.com/questions/7057234/bash-flock-exit-if-cant-acquire-lock/7057385
(
flock -n 9 || { echo 'CycleStreets daily rotate is already in progress' ; exit 1; }
### CREDENTIALS ###
# Get the script directory see: http://stackoverflow.com/a/246128/180733
# The multi-line method of geting the script directory is needed because this script is likely symlinked from cron
SOURCE="${BASH_SOURCE[0]}"
DIR="$( dirname "$SOURCE" )"
while [ -h "$SOURCE" ]
do
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SCRIPTDIRECTORY=$DIR
# Define the location of the credentials file relative to script directory
configFile=../.config.sh
# Generate your own credentials file by copying from .config.sh.template
if [ ! -x $SCRIPTDIRECTORY/${configFile} ]; then
echo "# The config file, ${configFile}, does not exist or is not executable - copy your own based on the ${configFile}.template file." 1>&2
exit 1
fi
# Load the credentials
. $SCRIPTDIRECTORY/${configFile}
# Logging
logFile=$SCRIPTDIRECTORY/log.txt
touch ${logFile}
echo "$(date --iso-8601=seconds) CycleStreets daily rotation" >> ${logFile}
# Main body
# Folder locations
folder=${websitesBackupsFolder}
rotateDaily=${SCRIPTDIRECTORY}/../utility/rotateDaily.sh
# Rotate
$rotateDaily $folder www_cyclestreets.sql.gz
$rotateDaily $folder www_csBatch_jobs_servers_threads.sql.gz
# Microsites
folder=/websites/microsites/backup
$rotateDaily $folder microsites_websites.tar.bz2
$rotateDaily $folder microsites_allDatabases.sql.gz
echo "$(date --iso-8601=seconds) CycleStreets daily rotation done" >> ${logFile}
# Cyclescape
folder=/websites/cyclescape/backup
$rotateDaily $folder cyclescapeDB.sql.gz
$rotateDaily $folder cyclescapeShared.tar.bz2
echo "$(date --iso-8601=seconds) Cyclescape daily rotation done" >> ${logFile}
# Remove the lock file - ${0##*/} extracts the script's basename
) 9>$lockdir/${0##*/}
# End of file
| true |
00e48a9db61d2334b536f7441a0d71c0117403cf | Shell | mia0x75/falcon-plus | /config/confgen.sh | UTF-8 | 1,010 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
confs=(
'%%REDIS%%=redis:\/\/127.0.0.1:6379\/0'
'%%MYSQL%%=root:root@tcp(127.0.0.1:3306)'
'%%API_DEFAULT_TOKEN%%=TOKEN'
'%%API_RPC%%=127.0.0.1:6000'
'%%API_HTTP%%=127.0.0.1:6008'
'%%EXPORTER_HTTP%%=127.0.0.1:6018'
'%%UPDATER_HTTP%%=127.0.0.1:6028'
'%%HBS_RPC%%=127.0.0.1:6030'
'%%HBS_HTTP%%=127.0.0.1:6038'
'%%AGGREGATOR_HTTP%%=127.0.0.1:6048'
'%%ALARM_HTTP%%=127.0.0.1:6058'
'%%TRANSFER_RPC%%=127.0.0.1:6060'
'%%TRANSFER_HTTP%%=127.0.0.1:6068'
'%%TRANSFER_SOCKET%%=127.0.0.1:6062'
'%%GRAPH_RPC%%=127.0.0.1:6070'
'%%GRAPH_HTTP%%=127.0.0.1:6078'
'%%JUDGE_RPC%%=127.0.0.1:6080'
'%%JUDGE_HTTP%%=127.0.0.1:6088'
'%%NODATA_HTTP%%=127.0.0.1:6098'
'%%GATEWAY_RPC%%=127.0.0.1:6100'
'%%GATEWAY_HTTP%%=127.0.0.1:6108'
'%%GATEWAY_SOCKET%%=127.0.0.1:6102'
'%%AGENT_HTTP%%=127.0.0.1:6818'
)
configurer() {
for i in "${confs[@]}"
do
search="${i%%=*}"
replace="${i##*=}"
find ./out/*/*.json -type f -exec sed -i -e "s/${search}/${replace}/g" {} \;
done
}
configurer
| true |
18dfdbf75c13d08a6ee839eaf8b8d00e6e62e29f | Shell | WillAbides/semver-select | /script/update-docs | UTF-8 | 1,022 | 4.25 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#/ script/update-docs updates documentation.
#/ - For projects with binaries, it updates the usage output in README.md.
#/ - Adds script descriptions to CONTRIBUTING.md.
set -e
CDPATH="" cd -- "$(dirname -- "$0")/.."
update_file_section() {
file="$1"
start_comment="$2"
end_comment="$3"
replacement="$4"
pattern="${start_comment}*${end_comment}"
data="$(cat "$file")"
replacement="$start_comment
$replacement
$end_comment"
echo "${data//$pattern/$replacement}" > "$file"
}
if [ -f script/semver-select ]; then
USAGE_OUTPUT="
\`\`\`
$(COLUMNS=100 script/"semver-select" --help)
\`\`\`
"
fi
update_file_section README.md '<!--- start usage output --->' '<!--- end usage output --->' "$USAGE_OUTPUT"
for f in script/*; do
[ -f "$f" ] || continue
SCRIPT_DESCRIPTIONS="$SCRIPT_DESCRIPTIONS
### $(basename "$f")
$(grep '^#/' "$f" | cut -c 4-)
"
done
update_file_section CONTRIBUTING.md '<!--- start script descriptions --->' '<!--- end script descriptions --->' "$SCRIPT_DESCRIPTIONS"
| true |
61f77f7c023d018d121881615b87a4ab1452b559 | Shell | adam000/dotfiles | /bashrc | UTF-8 | 2,476 | 3.28125 | 3 | [] | no_license | # First, get utils
[[ -e "$HOME/.bashutils" ]] && source ~/.bashutils
swap() {
if (( $# == 2 )); then
mv "$1" /tmp/
mv "$2" "`dirname $1`"
mv "/tmp/`basename $1`" "`dirname $2`"
else
echo "Usage: swap <file1> <file2>"
return 1
fi
}
# colors
RED="\[\e[31m\]"
GRE="\[\e[32m\]"
YEL="\[\e[33m\]"
BLU="\[\e[34m\]"
PUR="\[\e[35m\]"
CYA="\[\e[36m\]"
WHI="\[\e[37m\]"
NUL="\[\e[0m\]"
# my custom prompt
export PS1="\n[\`FOO=\$?; if [ ! \$FOO = 0 ]; then echo -n ${RED}; else echo -n ${GRE}; fi; echo -n \"\t${NUL}] \"; if [ ! \${FOO} = 0 ]; then echo \"${RED}E:\$FOO${NUL} \"; fi\`${PUR}(${NUL}\h${PUR})${NUL} ${CYA}\w${NUL}\n$ "
# history options
HISTSIZE=200
HISTCONTROL=ignoreboth
#############
## EXPORTS ##
#############
export EDITOR=vim
if command_exists mvim; then
export VISUAL=mvim
else
export VISUAL=vim
fi
export CLICOLOR=1
#############
## ALIASES ##
#############
# computer machines
alias vogon="ssh ahintz@vogon.csc.calpoly.edu"
alias xeon="ssh ahintz@xeon.csc.calpoly.edu"
alias unix1="ssh ahintz@unix1.csc.calpoly.edu"
alias unix2="ssh ahintz@unix2.csc.calpoly.edu"
alias unix3="ssh ahintz@unix3.csc.calpoly.edu"
alias unix4="ssh ahintz@unix4.csc.calpoly.edu"
alias sparc01="ssh ahintz@sparc01.csc.calpoly.edu"
alias sparc02="ssh ahintz@sparc02.csc.calpoly.edu"
# shortcut commands
alias xsera="cd ~/scm/git/xsera"
alias lit="clear; ls; echo -----------------------------------; git status"
alias cit="clear; git status"
alias ll="ls -la"
alias wme="w | egrep --color=always '^|`whoami`.*$'"
if [ -d ~/scm/git/dotfiles ]; then
alias dotfiles="cd ~/scm/git/dotfiles"
fi
# typos
alias sl="ls -F"
# OS-X-Specific commands
if [ ! -z "`echo $OSTYPE | grep darwin`" ]; then
alias xc="open -a xcode"
alias chrome="open -a google\ chrome"
# show file in Finder
alias show="open -R"
# Fun stuff
alias newinst="open -n -a"
alias blend="open -a Blender"
fi
# OP commands
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
alias ......="cd ../../../../.."
alias .......="cd ../../../../../.."
# My editor is vim
set -o vi
# A fun little touch
alias :q="exit"
# git stuffs
st()
{
if git rev-parse --git-dir > /dev/null 2>&1; then
git branch | grep "\*"
git status -s
if ! git diff --quiet; then
echo -e "+\t-\tfile"
git diff --numstat | cat
fi
else
return -1
fi;
}
alias ci="git commit -a -m"
# Sourcing other file(s)
if [[ -e ~/.localbashrc ]]; then
. ~/.localbashrc
fi
# bashrc loaded. Set the variable
export BASHRC_LOADED=0
# vim: set ts=2 sw=2:
| true |
ce85afd8061d9e17b3a298aaf49bc80d373bf80f | Shell | liufan-creat/magic | /gene_expression.sh | UTF-8 | 3,294 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#June 2016, Sachit Saksena
##This file converts SRA -> FASTQ -> gene_expression.csv
##REQUIRES SRA TOOLKIT AND RSEM INSTALLED AS SRATOOLKIT AND RSEM_TOOLS, RESPECTIVELY
##ALSO REQUIRES BOWTIE INSTALLED AS NAMED FROM BOWTIE WEBSITE
#install sratoolkit and put in directory called sratoolkit and rsem in rsem_tools
#sra_file= file or path containing "<chromatin_mark><rep>.sra
#output_fastq=<chromatin_mark><rep>.fastq
#rsem_chromosome_path= path to directory containing all chromosome sequences
#rsem_reference_gtf == file or path containing reference .gtf file mm9.gtf
#sample_name == name or path of all reference outputs
#final=<chromatin_mark><rep>.csv
## get params from external file
if [ -z "$1" ]
then
echo "Usage $0 <Param.file>"
exit
fi
## Reading parameter from the Parameters file #########################################
param_file=$1
#checks for valid .sra file
sra_file=$(grep -w "^sra_file" "$param_file" | sed 's/.*=//' | sed 's/\s+$//')
if [ ! $sra_file ]
then
echo "$0: No sra file specified $sra_file"
exit
fi
output_fastq=$(grep -w "^output_fastq" "$param_file" | sed 's/.*=//' | sed 's/\s+$//')
if [ ! $output_fastq ]
then
echo "$0: No fastq output specified $output_fastq"
exit
fi
################################ RSEM PARAMETERS ###########################################
## get params from external file
rsem_reference_gtf=$(grep -w "^rsem_reference_gtf" "$param_file" | sed 's/.*=//' | sed 's/\s+$//')
if [ ! $rsem_reference_gtf ]
then
echo "$0 No refseq gtf file provided $rsem_reference_gtf"
exit
fi
rsem_chromosome_path=$(grep -w "^rsem_chromosome_path" "$param_file" | sed 's/.*=//' | sed 's/\s+$//')
if [ ! $rsem_chromosome_path ]
then
echo "$0 No chromosome path provided $rsem_chromosome_path"
exit
fi
sample_name=$(grep -w "^sample_name" "$param_file" | sed 's/.*=//' | sed 's/\s+$//')
if [ ! $sample_name ]
then
echo "$0 No sample_name for output provided $sample_name"
exit
fi
final_output=$(grep -w "^final_output" "$param_file" | sed 's/.*=//' | sed 's/\s+$//')
if [ ! $final_output }
then
echo "$0 No final_output specified $final_output"
exit
fi
echo ">>got params<<"
echo "..."
echo "..."
echo "..."
echo "..."
echo "..."
#read in
echo "Converting sra to fastq..."
#for job submission
#echo "Submitting batch job..."
#bsub -n 4 -W 03:00 -q short ./sratoolkit/bin/fastq_dump $sra_file > $output_fastq
#for interactive system
./sratoolkit/bin/fastq-dump -Z $sra_file > $output_fastq
echo ">>preparing references<<"
echo "Submitting batch job..."
#create reference
#bsub -n 8 -W 03:00 -q short ./rsem_tools/rsem-prepare-reference --gtf $rsem_reference_gtf --bowtie --bowtie-path ./bowtie-1.1.2 --bowtie2 --bowtie2-path ./bowtie2-2.2.9 ./chr $sample_name
#interactive session
./rsem_tools/rsem-prepare-reference --gtf $rsem_reference_gtf --bowtie --bowtie-path ./bowtie-1.1.2 --bowtie2 --bowtie2-path ./bowtie2-2.2.9 ./chr $sample_name
echo ">calculating expression<"
echo "..."
echo "..."
#calculate gene expression
#bsub -n 8 -W 03:00 -q short ./rsem_tools/rsem-calculate-expression \
#interactive sesssion
./rsem_tools/rsem-calculate-expression --bowtie2 --bowtie2-path ./bowtie2-2.2.9 --append-names --estimate-rspd -p 8 --output-genome-bam $output_fastq $sample_name expression_output
| true |
7d30ff8ae4d66151b53d96772f34f5054927cbf0 | Shell | 760985933/sysbench | /bench_db/select_rand_pk/select_rand_pk.sh | UTF-8 | 6,603 | 3.359375 | 3 | [] | no_license | #!/bin/bash
source ~/.bash_profile
PATH=$PATH:/usr/local/mysql/bin/
basedir=$(cd $(dirname $0);pwd)
interval_sec=10
test_type="select_rand_pk"
i=0
declare -a table_size_list
for var in $5
do
table_size_list[$i]=$var
i=`expr $i + 1`
done
numthreads_list="$7"
table_count="$6"
max_time=180
num=(1 2 3)
test_time=0
###################开启关闭mysql服务##########################
mysqld_service() {
try_time=1
/usr/local/mysql/bin/mysqld_multi stop 3306
echo "Waiting stop...sleep 10s"
sleep 60
while true
do
echo "ps -ef | grep mysqld"
threadnum=$(ps -ef | grep mysqld |grep -v grep | wc -l)
if [ $threadnum -ne 0 ];then
sleep 10
fi
/usr/local/mysql/bin/mysqladmin $1 ping >/dev/null 2>&1
check=$?
if [ $check -eq 1 ];then
break
else
echo "Still alive,try $try_time"
sleep 10
try_time=$(($try_time+1))
fi
done
/usr/local/mysql/bin/mysqld_multi start 3306
if [ $? -eq 0 ];then
echo "已执行了start mysqld命令"
fi
echo "Waiting start..."
sleep 10
try_time=1
while true
do
/usr/local/mysql/bin/mysqladmin $1 ping >/dev/null 2>&1
check=$?
if [ $check -eq 0 ];then
break
else
echo "Starting ,wait more $try_time"
try_time=$(($try_time+1))
sleep 10
fi
done
mysql $1 <<EOF
set global query_cache_type=off;
EOF
}
###################################################################################
####建立输出文件存放目录#########
for ((j=0;j<3;j++))
do
if [ ! -d $basedir/${num[$j]} ];then
mkdir $basedir/${num[$j]}
else
rm -rf $basedir/${num[$j]}
mkdir $basedir/${num[$j]}
fi
done
#################################
date_str=`date +%Y%m%d_%H%M%S`
for table_size in ${table_size_list[*]}
do
if [ $test_time -eq 0 ]; then
sysbench --test=$basedir/$test_type.lua $1 --oltp-tables-count=$table_count --oltp-table-size=$table_size --rand-init=on --oltp-read-only=off --report-interval=$interval_sec --rand-type=special --rand-spec-pct=5 --max-time=$max_time --max-requests=0 --mysql-table-engine=innodb prepare | tee -a $basedir/sysbench_prepare
else
a=${table_size_list[$test_time]}
b=${table_size_list[$(($test_time-1))]}
let asize=$a-$b
sysbench --test=$basedir/$test_type"_delta.lua" $1 --oltp-tables-count=$table_count --oltp-table-size=$asize --rand-init=on --oltp-read-only=off --report-interval=$interval_sec --rand-type=special --rand-spec-pct=5 --max-time=$max_time --max-requests=0 --mysql-table-engine=innodb prepare | tee -a $basedir/sysbench_prepare
fi
if [ $? -eq 1 ]; then
echo "sysbench prepare failed"
fi
mysqld_service "$2"
#######################收集com_select,com_delete,com_update,com_insert##############################################
date_start=$(date +%s)
com_select_1=`mysql $2 -e "show global status like 'Com_select';" | awk 'NR==2 {print $2}'`
com_delete_1=`mysql $2 -e "show global status like 'Com_delete';" | awk 'NR==2 {print $2}'`
com_insert_1=`mysql $2 -e "show global status like 'Com_insert';" | awk 'NR==2 {print $2}'`
com_update_1=`mysql $2 -e "show global status like 'Com_update';" | awk 'NR==2 {print $2}'`
echo "########################################################开始测试:$table_size######################################################"
for ((i=0;i<3;i++))
do
for numthreads in $numthreads_list
do
sysbench --test=$basedir/$test_type.lua $1 --oltp-tables-count=$table_count --oltp-table-size=$table_size --rand-init=on --num-threads=$numthreads --oltp-read-only=off --report-interval=$interval_sec --rand-type=special --rand-spec-pct=5 --max-time=$max_time --max-requests=0 --mysql-table-engine=innodb run | tee -a $basedir/${num[$i]}/sysbench_run_${numthreads}_${table_size}
if [ "$?" != "0" ];then
echo "sysbench run failed"
fi
echo "End running test : `date`"
done
done
test_time=$(($test_time+1))
done
#######################收集com_select,com_delete,com_update,com_insert##############################################
date_stop=$(date +%s)
com_select_2=`mysql $2 -e "show global status like 'Com_select';" | awk 'NR==2 {print $2}'`
com_delete_2=`mysql $2 -e "show global status like 'Com_delete';" | awk 'NR==2 {print $2}'`
com_insert_2=`mysql $2 -e "show global status like 'Com_insert';" | awk 'NR==2 {print $2}'`
com_update_2=`mysql $2 -e "show global status like 'Com_update';" | awk 'NR==2 {print $2}'`
let com_select="($com_select_2 - $com_select_1)"/"($date_stop-$date_start)"
let com_delete="($com_delete_2 - $com_delete_1)"/"($date_stop-$date_start)"
let com_insert="($com_insert_2 - $com_insert_1)"/"($date_stop-$date_start)"
let com_update="($com_update_2 - $com_update_1)"/"($date_stop-$date_start)"
extra_stat="s:$com_select/i:$com_insert/d:$com_delete/u:$com_update"
echo "extra_stat : "$extra_stat
########################################################结束测试######################################################
sysbench --test=$basedir/$test_type.lua $1 --oltp-tables-count=$table_count --oltp-table-size=10000 --rand-init=on --num-threads=16 --oltp-read-only=off --report-interval=$interval_sec --rand-type=special --rand-spec-pct=5 --max-time=$max_time --max-requests=0 --mysql-table-engine=innodb cleanup | tee -a $basedir/sysbench_clean
echo "#############################################开始收集信息并入库###########################################################"
updir=$(cd $basedir;cd ..;pwd)
test_id=$3
test_type=$4
########本机测试#############################
`sh $updir/collect.sh "$extra_stat" "$2" "select_rand_pk" "$numthreads_list" "$5" "$basedir" "$test_id" "$test_type"`
########入库到db35测试#######################
#conn="--user=dbbench --password=w5q9C4BHXgH3Y --host=10.16.133.35 --port=3306"
#`sh $updir/collect.sh "$extra_stat" "$conn" "select_rand_pk" "$numthreads_list" "$table_size_list" "$basedir" "$test_id" "$test_type"`
if [ "$?" != "0" ];then
echo "入库出现错误,请检查入库脚本是否有错!"
else
echo "入库成功"
fi
| true |
9f40991c84097643966883df6c4b10cd9f9a8cbf | Shell | siddhanthgupta/BoxLinux | /Event Handling/qhandler.sh | UTF-8 | 766 | 3.359375 | 3 | [] | no_license | #!/bin/bash
#Handle the queue, for every changed directory, fire off a difference finder
source config.sh
#run indefinitely
while true; do
#lock the file so notifier doesn't add to it now.
lockfile $JOBBUF.lock
cat $JOBQUEUE $JOBBUF > tmp && mv tmp $JOBQUEUE
cat /dev/null > $JOBBUF #clear the queue
rm -f $JOBBUF.lock #remove lock
#for each line in the job queue fire off a difference finding script
while read line; do
./dirstat.sh "$line" #generate cur state
echo "$line$CURSTATE" "$line$PREVSTATE"
./folderdiff.py "$line" "$line$CURSTATE" "$line$PREVSTATE" "$ACTIONFILE"
mv "$line$CURSTATE" "$line$PREVSTATE" #prev state changes to cur state
done < $JOBQUEUE
cat /dev/null > $JOBQUEUE #clear the queue
sleep $INTERVAL #sleep now.
done
| true |
f7c6815021503d1edf3c80427cb56197f90b47a0 | Shell | ZimboPro/scripts | /shellScripts/DeveloperScripts/installDev.sh | UTF-8 | 2,175 | 3.46875 | 3 | [] | no_license | #!/bin/bash
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
# git
sudo apt install -y git
#Setting up Git
read -p "${c}Do you want to setup Git global config? (y/n): " -r; $r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
echo -e "${c}Setting up Git"; $r
(set -x; git --version )
echo -e "${c}Setting up global git config at ~/.gitconfig"; $r
git config --global color.ui true
read -p "Enter Your Full Name: " name
read -p "Enter Your Email: " email
git config --global user.name "$name"
git config --global user.email "$email"
echo -e "${c}Git Setup Successfully!"; $r
else
echo -e "${c}Skipping!"; $r && :
fi
# common
sudo apt install -y build-essentials file
# vs code
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > packages.microsoft.gpg
sudo install -o root -g root -m 644 packages.microsoft.gpg /usr/share/keyrings/
sudo sh -c 'echo "deb [arch=amd64 signed-by=/usr/share/keyrings/packages.microsoft.gpg] https://packages.microsoft.com/repos/vscode stable main" > /etc/apt/sources.list.d/vscode.list'
sudo apt-get install -y apt-transport-https
sudo apt update -y
sudo apt install -y code
sudo rm -f microsoft.gpg
# python 3
sudo add-apt-repository ppa:deadsnakes/ppa -y
sudo apt-get update -y
sudo apt install -y python3
# golang
wget https://dl.google.com/go/go1.12.7.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.12.7.linux-amd64.tar.gz
sudo rm -f go1.12.7.linux-amd64.tar.gz
echo "export PATH=$PATH:/usr/local/go/bin"
# java
sudo apt install -y default-jre
sudo apt install -y default-jdk
sudo apt-get install maven -y
# node
curl -sL https://deb.nodesource.com/setup_12.x | sudo bash - #Submit the version according to your need.
sudo apt install -y nodejs
# Docker
curl -fsSL get.docker.com -o get-docker.sh
sh get-docker.sh
# Docker-compose
sudo pip install docker-compose
# C
sudo apt install -y build-essentials clang gcc gdb glibc
# c++
sudo apt install -y clang++ g++
# homebrew
sh -c "$(curl -fsSL https://raw.githubusercontent.com/Linuxbrew/install/master/install.sh)"
# valgrind and cmake
brew install cmake
brew install valgrind
# rust
curl -sf -L https://static.rust-lang.org/rustup.sh | sh
| true |
5da8e422c1aa3aa17bdf77eef5d37445359df223 | Shell | johandry/VeDeV | /packer/scripts/centos/ruby.sh | UTF-8 | 1,368 | 3.578125 | 4 | [] | no_license | #!/bin/bash -eux
# Print script message in yellow
message () {
echo -e "\033[93;1mSCRIPT:\033[0m ${1}"
}
# Should this be installed and continue a minimal installation?
message "Installing Development tools"
yum -y groupinstall "Development Tools"
message "Installing ruby"
# Centos 6.6 have ruby 1.8, upgrade it to the latest version.
if grep -q -i "release 6" /etc/redhat-release
then
# Remove ruby, if any
yum remove -y ruby ruby-devel
# Install ruby dependencies
yum -y install gcc g++ make automake autoconf curl-devel openssl-devel zlib-devel httpd-devel apr-devel apr-util-devel sqlite-devel
# Install ruby latest version (2.1.5)
# Update URL if ruby latest version change
wget http://cache.ruby-lang.org/pub/ruby/2.1/ruby-2.1.5.tar.gz
tar xvfvz ruby-*.tar.gz
chown -R root.root ruby-* && cd ruby-* && ./configure && make && make install
else
yum -y install ruby
# Install ruby dependencies
yum -y install gcc g++ make automake autoconf curl-devel openssl-devel zlib-devel httpd-devel apr-devel apr-util-devel sqlite-devel
# Install doc and devel
yum -y install ruby-rdoc ruby-devel
# Install rubygems
yum -y install rubygems
fi
# message "Updating Gems"
# # Upgrading rubugem and gems. Gems as vagrant, not as root.
# gem update --system
# su - vagrant -c "gem install bundler"
# su - vagrant -c "gem update"
| true |
986ebacefd23ee4808bad47704fa40a71a11bacd | Shell | alexcurtin/ghorg | /scripts/local-gitlab/seed.sh | UTF-8 | 1,193 | 3.09375 | 3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | #! /bin/bash
# https://docs.gitlab.com/ee/install/docker.html#install-gitlab-using-docker-engine
TOKEN=$1
GITLAB_URL=$2
# Create 3 groups, namespace_id will start at 4
curl --request POST --header "PRIVATE-TOKEN: $TOKEN" \
--header "Content-Type: application/json" \
--data '{"path": "group1", "name": "group1" }' \
"${GITLAB_URL}/api/v4/groups"
curl --request POST --header "PRIVATE-TOKEN: $TOKEN" \
--header "Content-Type: application/json" \
--data '{"path": "group2", "name": "group2" }' \
"${GITLAB_URL}/api/v4/groups"
# create repos for user
for ((a=0; a <= 10 ; a++))
do
curl --header "PRIVATE-TOKEN: $TOKEN" -X POST "${GITLAB_URL}/api/v4/projects?name=baz${a}&initialize_with_readme=true"
done
# create repos in group1
for ((a=0; a <= 10 ; a++))
do
curl --header "PRIVATE-TOKEN: $TOKEN" -X POST "${GITLAB_URL}/api/v4/projects?name=baz${a}&namespace_id=4&initialize_with_readme=true"
done
# create repos in group2
for ((a=0; a <= 10 ; a++))
do
curl --header "PRIVATE-TOKEN: $TOKEN" -X POST "${GITLAB_URL}/api/v4/projects?name=baz${a}&namespace_id=5&initialize_with_readme=true"
done
./scripts/local-gitlab/clone.sh "${TOKEN}" "${GITLAB_URL}"
| true |
9182128d59cafbe77d835ae357eb11801371f4a5 | Shell | cadeon/duckiesays | /db/create_db.sh | UTF-8 | 594 | 2.84375 | 3 | [
"MIT"
] | permissive | #! /bin/sh
## DATABASE CREATION
#
# Make sure ALL connections (app, pgAdmin) to db are disconnected before running
#
# Note: psql must be in $PATH
# For authentication to postgres to work from a non-postgres system level user, we need to set
# the PGPASSWORD environment variable.
echo "Killing old db"
rm ../duckiedb.sqlite3
## CREATE EMPTY SCHEMA + LOOKUP TABLES
echo "Creating schema..."
sqlite3 ../duckiedb.sqlite3 < create_tables.sql
echo "...done"
## DATA INSERTION
## uncomment psql line below
echo "Creating data..."
sqlite3 ../duckiedb.sqlite3 < create_db.dmp
echo "...done"
| true |
62d31b16880f8fa69e3e40d7560a2afaec2a46cd | Shell | esecules/PlexSuspend | /suspend_check | UTF-8 | 4,760 | 3.78125 | 4 | [] | no_license | #!/bin/bash
#prepare a string for output to a log
out_string=""
#get the current date and date of the last wakeup
#last_wake file is touched by a user script in /etc/pm/sleep.d
current_date=`date +%s`
last_wake=`date +%s -r /var/log/pm-suspend.log`
#calculate the difference in time of last wake to now
diff_time=$(($current_date-$last_wake))
#set up some time lengths in seconds for use later
onetwenty=$((120*60))
thirty=$((30*60))
ninty=$((90*60))
#we should stay awake for 30 minutes after we are
#woken up to allow user to perform an action
if [ $diff_time -lt $thirty ]
then
out_string="--- Wake was less than 30 minutes ago, not sleeping yet!\n"
fi
#check the PlexWatch log to see when it was updated, and see
#if we should stay awake, we are allowing 30 min after after PlexWatch
#updates to allow something else to happen
/opt/plexWatch/plexWatch.pl
plexwatch_update=`date +%s -r /opt/plexWatch/plexWatch.log`
plexwatch_diff=$(($current_date-$plexwatch_update))
if [ $plexwatch_diff -lt $thirty ]
then
out_string=$out_string"--- PlexWatch updated less than 30 minutes ago, no sleeping yet!\n"
fi
#count the number of FTP users, if there are more than two lines
#then we have a user on FTP, so touch a file to make note of this for later
if [ `ftpwho | wc -l` -gt 2 ]
then
touch /var/run/ftp_watch
fi
#check time difference of the last time we found and FTP user
#if we found one less than 90 minutes ago, make note of it to prevent sleep
if [ -a /var/run/ftp_watch ]
then
ftp_watch=`date +%s -r /var/run/ftp_watch`
ftp_diff=$(($current_date-$ftp_watch))
if [ $ftp_diff -lt $ninty ]
then
out_string=$out_string"--- An FTP transfer was running less than 90 minutes ago, no sleeping yet!\n"
fi
fi
#check for snapraid running, and make note to prevent sleep
if [ `pgrep snapraid | wc -l` != "0" ]
then
out_string=$out_string"--- Snapraid is running!\n"
fi
#check for samba user, and make note to prevent sleep
if [ `/usr/bin/smbstatus | grep DENY | wc -l` != "0" ]
then
out_string=$out_string"--- Samba lock exists.\n"
fi
#check for rsync command running, and make note to prevent sleep
if [ `pgrep rsync | wc -l` != "0" ]
then
out_string=$out_string"--- Process relating to rsync exists.\n"
fi
#check to make sure nobody is actively watching a video with Plex
#this is to ensure that we don’t skip over a user watching if the
#PlexWatch log hasn’t been updated yet.
if [ `/opt/plexWatch/plexWatch.pl --watching | grep User | wc -l` != "0" ]
then
touch /var/run/plex_transcoder_last_seen
out_string=$out_string"--- PlexWatch reports somebody is watching a video.\n"
fi
#check for lftp command running, and make note to prevent sleep
if [ `pgrep 'lftp' | wc -l` != "0" ]
then
out_string=$out_string"--- LFTP running.\n"
fi
#check for unrar command running, and make note to prevent sleep
if [ `pgrep 'unrar' | wc -l` != "0" ]
then
out_string=$out_string"--- Unrar running.\n"
fi
#check for filebot running, and make note to prevent sleep
if [ `pgrep 'filebot' | wc -l` != "0" ]
then
out_string=$out_string"--- Filebot running.\n"
fi
#check for command running, and make note to prevent sleep
if [ `pgrep 'youtube-dl' | wc -l` != "0" ]
then
out_string=$out_string"--- youtube-dl running.\n"
fi
#check for tar command running, and make note to prevent sleep
#this is used for home directory backups
if [ `pgrep -x 'tar' | wc -l` != "0" ]
then
out_string=$out_string"--- tar process running.\n"
fi
### following takes a while
#get some info on network statistics
netstat -ute > .netstat
### now use output for various tests:
#look for an FTP user we may have missed earlier
if [ `cat .netstat | grep ftp | wc -l` != "0" ]
then
out_string=$out_string"--- Got user on FTP.\n"
fi
#check for users on VNC
if [ `cat .netstat | grep :5900 | wc -l` != "0" ]
then
out_string=$out_string"--- Got user on VNC.\n"
fi
rm .netstat
#check for active SSH sessions
if [ "`grep sshd:session /var/log/auth.log | tail -1 | awk '{print $8}'`" = "opened" ]
then
out_string=$out_string"--- User hasn't closed ssh connection.\n"
fi
##This is not quite ready yet
##check for mouse/keyboard activity
shopt -s lastpipe
w | awk '$2 ~ /:[0-9]+/ {print $1}' | while read -r user ; do
idletime=$(cat /home/$user/.useridle)
idletime=$(($idletime/1000))
if [ $idletime -lt $thirty ]
then
out_string+="--- $user has used the keyboard/mouse in the past ($idletime sec) no sleeping yet!\n"
fi
done
#FINISH
#If we have any data in our output, we’ll echo it to a log as defined in the cron job
#this can then be used by the other script to check and perform suspend when necessary
if [[ -n $out_string ]]
then
printf "$(date)- suspend check...\n$out_string"
exit 1
fi
| true |
ab85d8cff0920320c2a36d4c12025ee0d8b30cfe | Shell | sohamkor/GithubActivityMonitor | /HW 4/hotswap/swapExtraCredit.sh | UTF-8 | 2,884 | 4.375 | 4 | [] | no_license | #!/usr/bin/env bash
readonly NETWORK_NAME="ecs189_default"
readonly NGINX_CONTAINER_NAME="ecs189_proxy_1"
# ------------------- Helper Methods ------------------
# Really handy method taken from Prem's dorun.sh script, with a minor change to adapt to our needs
function killitif() {
docker ps -a > /tmp/yy_xx$$
if grep --quiet -w $1 /tmp/yy_xx$$
then
echo "killing older version of $1"
docker rm -f `docker ps -a | grep $1 | sed -e 's: .*$::'`
fi
}
function getCurrentActiveVersion() {
# Get the config file as it is right now from the nginx server
currConfig=`docker exec ecs189_proxy_1 /bin/cat /etc/nginx/nginx.conf`
# Extract the web number from it (can be either 1 or 2, atleast as far as current functionality goes)
webVersion=`echo "$currConfig" | sed -n -e 's#proxy_pass http://web##p' | sed -e 's/ //g' | head -c 1`
# Print this out
echo "web$webVersion"
}
function swapTo() {
imgToUpdateTo="$1"
thisContainer=$(getCurrentActiveVersion)
if [ "$thisContainer" != "web1" ] && [ "$thisContainer" != "web2" ]
then
echo "swapContainers: Invalid parameter - $thisContainer"
exit 1
fi
if [ "$thisContainer" == "web1" ]
then
otherContainer="web2"
swap_script="/bin/swap2.sh"
else
otherContainer="web1"
swap_script="/bin/swap1.sh"
fi
# Before we start the container, we just check to make sure no stray ones of it are still remaining
killitif $otherContainer
# But since web1 may be spawned as ecs189_web1_1 by docker-compose, it doesn't hurt to remove that as well
dockerComposeName="ecs189_""$otherContainer""_1"
killitif $dockerComposeName
# Now start up a fresh copy of it
docker run -d --name $otherContainer --network $NETWORK_NAME $imgToUpdateTo
# Give it some time to start up
sleep 5
# And execute the swap script on the nginx container to do the actual swapping
docker exec $NGINX_CONTAINER_NAME /bin/bash $swap_script
# And finally clean up the other container
killitif $thisContainer
# Once again, gotta make sure that this form of the name hasn't been configured by docker-compose
dockerComposeName="ecs189_""$thisContainer""_1"
killitif $dockerComposeName
}
function isValidImage() {
imageName="$1"
docker image list | cut -d " " -f 1 | grep --quiet -w "$imageName"
return $?
}
# Make sure that the parameter given is a valid image
imageToSwapTo="$1"
if [ "$imageToSwapTo" == "" ]
then
echo "Parameter missing: specify the image to swap to."
exit 1
fi
if ! isValidImage $imageToSwapTo
then
echo "The given image is not valid."
exit 1
fi
# Now that we have determined that the given image is one that is valid and therefore be used to create a container
# that is what we'll do
swapTo $imageToSwapTo
echo "Swapped successfully."
| true |
07171ae47704f7c9394037bb1fd323b687eabffa | Shell | ghuntley/monorepo | /third_party/git/t/perf/bisect_regression | UTF-8 | 2,156 | 3.890625 | 4 | [
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-3.0-only",
"GPL-2.0-only",
"MIT"
] | permissive | #!/bin/sh
# Read a line coming from `./aggregate.perl --sort-by regression ...`
# and automatically bisect to find the commit responsible for the
# performance regression.
#
# Lines from `./aggregate.perl --sort-by regression ...` look like:
#
# +100.0% p7821-grep-engines-fixed.1 0.04(0.10+0.03) 0.08(0.11+0.08) v2.14.3 v2.15.1
# +33.3% p7820-grep-engines.1 0.03(0.08+0.02) 0.04(0.08+0.02) v2.14.3 v2.15.1
#
die () {
echo >&2 "error: $*"
exit 1
}
while [ $# -gt 0 ]; do
arg="$1"
case "$arg" in
--help)
echo "usage: $0 [--config file] [--subsection subsection]"
exit 0
;;
--config)
shift
GIT_PERF_CONFIG_FILE=$(cd "$(dirname "$1")"; pwd)/$(basename "$1")
export GIT_PERF_CONFIG_FILE
shift ;;
--subsection)
shift
GIT_PERF_SUBSECTION="$1"
export GIT_PERF_SUBSECTION
shift ;;
--*)
die "unrecognised option: '$arg'" ;;
*)
die "unknown argument '$arg'"
;;
esac
done
read -r regression subtest oldtime newtime oldrev newrev
test_script=$(echo "$subtest" | sed -e 's/\(.*\)\.[0-9]*$/\1.sh/')
test_number=$(echo "$subtest" | sed -e 's/.*\.\([0-9]*\)$/\1/')
# oldtime and newtime are decimal number, not integers
oldtime=$(echo "$oldtime" | sed -e 's/^\([0-9]\+\.[0-9]\+\).*$/\1/')
newtime=$(echo "$newtime" | sed -e 's/^\([0-9]\+\.[0-9]\+\).*$/\1/')
test $(echo "$newtime" "$oldtime" | awk '{ print ($1 > $2) }') = 1 ||
die "New time '$newtime' shoud be greater than old time '$oldtime'"
tmpdir=$(mktemp -d -t bisect_regression_XXXXXX) || die "Failed to create temp directory"
echo "$oldtime" >"$tmpdir/oldtime" || die "Failed to write to '$tmpdir/oldtime'"
echo "$newtime" >"$tmpdir/newtime" || die "Failed to write to '$tmpdir/newtime'"
# Bisecting must be performed from the top level directory (even with --no-checkout)
(
toplevel_dir=$(git rev-parse --show-toplevel) || die "Failed to find top level directory"
cd "$toplevel_dir" || die "Failed to cd into top level directory '$toplevel_dir'"
git bisect start --no-checkout "$newrev" "$oldrev" || die "Failed to start bisecting"
git bisect run t/perf/bisect_run_script "$test_script" "$test_number" "$tmpdir"
res="$?"
git bisect reset
exit "$res"
)
| true |
99d9c1c45972a9ff20ced4d72f3c5fa69d832603 | Shell | dustinpfister/demos_linux | /forpost/linux-bash-script-arrays/s1_basic/basic-expression.sh | UTF-8 | 167 | 3.140625 | 3 | [] | no_license | #!/bin/bash
# another way is to create an expression
# like this:
arr=("one" "two" "three")
echo $arr # "one"
echo ${arr[1]} # "two"
echo ${arr[@]} # "one two three" | true |
6d15fa9b2162f6ddab26caa905ad6f8dff69beec | Shell | jpspence/DDiMAPpublic | /scripts/runSHRiMPforDDiMAP_CS.sh | UTF-8 | 1,594 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#this program expects Fastq and .fa file to exist
cd $myWorkingDir
pwd
export shrimpPath=/home/jspence/SHRiMP_2_2_3/bin/gmapper-cs
# runs shrimp to get sam file using 8 processors and fastq input...
# Add a log file as a sink for the 2 output like 2>$myAligned.log if you would like (Matlab can be used to capture all stuff and log it)
# use parameters for
# 8 processors (-N 8)
# fastq input (-Q)
# output only one per read (-o 1 --strata)
# all contigs are present (--all-contigs) - this results in smaller sam file as no split genome merge is needed
#
$shrimpPath -N 8 -Q -o 1 --strata --all-contigs $myFastq $myFasta > ${myAligned}.sam
#
# define files to read or create to make an indexed bam file from a sam file
#
#
# change the lines below to match how your files are named for this reference file and sam file combo
#
export sam_file=${myAligned}.sam
#
# change the lines below to match how you would like the output file names to look
#
export bam_file=${myAligned}.bam
export bamsorted_prefix=${myAligned}_sorted
export bamsorted_file=$bamsorted_prefix.bam
#
# view converts sam to bam
#
echo "Making unsorted bam file"
/home/jan/samtools-0.1.19/samtools view -b -S -o $bam_file $sam_file
# sam file no longer needed
rm $sam_file
# sort sorts them along chromsomes to make index work
#echo "Making sorted bam file"
#/home/jan/samtools-0.1.19/samtools sort $bam_file $bamsorted_prefix
# bam file no longer needed
#rm $bam_file
# now make the index
#echo "Indexing sorted bam file"
#/home/jan/samtools-0.1.19/samtools index $bamsorted_file
| true |
fa29c674af23a189e062c9b1336b21830f512275 | Shell | ljkyp/comparetool | /conversionfile_im.ksh | UTF-8 | 5,353 | 3.921875 | 4 | [] | no_license | #!/bin/ksh
#ksh ./conversionfile.ksh ./newdata/ datatxt11.txt 0 1 #header なし
#ksh ./conversionfile.ksh ./newdata/ datatxt11.txt 1 1 #header あり
#ksh ./conversionfile.ksh ./newdata/ datacsv11.csv 0 1
if [[ $# -ne 4 ]]; then
echo '引数は4個必要(現:'$#'個)'
exit 255
fi
# 入力データパス
INPUT_DATA_PATH=$1
# 入力データファイル名
INPUT_DATA_FILE=$2
# ヘッダーフラグ
HEADER_FLAG=$3
# 現新フラグ
OLD_NEW_FLAG=$4
# 現新フラグによって出力データのパスを設定
if [ $OLD_NEW_FLAG -eq 0 ]; then
OUTPUT_DATA_PATH = './oldoutput/'
elif [ $OLD_NEW_FLAG -eq 1 ]; then
OUTPUT_DATA_PATH="./newoutput/"
else
echo '現新Flagは0または1'
exit 255
fi
# 入力ファイル名(拡張子抜き)
INPUT_FILE_NAME=${INPUT_DATA_FILE%.*}
# 入力ファイルの拡張子(ファイル名抜き)
INPUT_FILE_EXT=${INPUT_DATA_FILE#*.}
# 出力ファイルフルパス
OUTPUT_DATA_FILE=$OUTPUT_DATA_PATH$INPUT_DATA_FILE
# パターンファイルパス
PATTERN_PATH='./pattern/'
# パターンファイル名
PATTERN_FILE=$PATTERN_PATH$INPUT_FILE_NAME'.csv'
# 仮ファイル
TEMP_INPUT_DATA='./tempInputData.csv'
TEMP_LENGTHPATTERN='./lengthPattern.csv'
TEMP_OUTPUTPATTERN='./outputpattern.csv'
TEMP_OUTPUTPATTERN2='./outputpattern2.csv'
TEMP_OUTPUTPATTERN3='./outputpattern3.csv'
# ヘッダーフラグによるヘッダー削除処理
if [[ $HEADER_FLAG -eq 1 ]]; then
tail -n +2 $INPUT_DATA_PATH$INPUT_DATA_FILE > $TEMP_INPUT_DATA
elif [[ $HEADER_FLAG -eq 0 ]]; then
cat $INPUT_DATA_PATH$INPUT_DATA_FILE > $TEMP_INPUT_DATA
else
echo 'HEADER_FLAGは0または1)'
exit 255
fi
# csvファイルは4番目の行の値を利用する
tail -n 1 $PATTERN_FILE |sed 's/\([^,]*\),\(.*\)/\2/' > $TEMP_OUTPUTPATTERN
# txtファイルは3番目の行の値を利用する
tail -n 2 $PATTERN_FILE |sed 's/\([^,]*\),\(.*\)/\2/' | sed '2d' > $TEMP_LENGTHPATTERN
COUNT=0
IS_REMAINED=TRUE
START=1
END=0
touch $TEMP_OUTPUTPATTERN2
if [[ $INPUT_FILE_EXT == 'csv' ]]; then
# CSVファイルのawk用パターンファイルを作成する。
while [ $IS_REMAINED == TRUE ]
do
COUNT=$(( $COUNT + 1 ))
#patternファイルでTXTは出力制限項目を、CSVは項目長さを読んで値があれば
OUTPUT_ITEM=`cat $TEMP_OUTPUTPATTERN | cut -f $COUNT -d ','`
if [[ $OUTPUT_ITEM != '' ]]; then
#特殊文字排除
OUTPUT_ITEM=`echo $OUTPUT_ITEM | sed 's/[^0-9]//g'`
#Patternによって'0'は出力制限
if [[ $OUTPUT_ITEM != '0' ]]; then
echo -n '$'$COUNT',' >> $TEMP_OUTPUTPATTERN2
fi
else
IS_REMAINED=FALSE
fi
done
elif [[ $INPUT_FILE_EXT == 'txt' ]]; then
# TXTファイルのsut用パターンファイルを作成する。
while [ $IS_REMAINED == TRUE ]
do
COUNT=$(( $COUNT + 1 ))
#patternファイルでTXTは出力制限項目を、CSVは項目長さを読んで値があれば
OUTPUT_ITEM=`cat $TEMP_OUTPUTPATTERN | cut -f $COUNT -d ','`
#cut用 length計算
CUT_LENGTH=`cat $TEMP_LENGTHPATTERN | cut -f $COUNT -d ','`
#patternファイルの出力制限項目を読んで値があれば
if [[ $OUTPUT_ITEM != '' ]]; then
#特殊文字排除
CUT_LENGTH=`echo $CUT_LENGTH | sed 's/[^0-9]//g'`
#cutするEnd位置計算
if [[ $END -eq 0 ]]; then
END=$CUT_LENGTH
else
END=`expr $END + $CUT_LENGTH`
fi
#Patternによって'0'は出力制限
if [[ $OUTPUT_ITEM != '0' ]]; then
echo -n $START'-'$END',' >> $TEMP_OUTPUTPATTERN2
fi
#cutするStart位置計算
START=`expr $END + 1`
else
IS_REMAINED=FALSE
fi
done
fi
#最後の','を抜かして出力'
sed 's/\(\,$\)//g' $TEMP_OUTPUTPATTERN2 > $TEMP_OUTPUTPATTERN3
# パターンファイルから指定された項目を出力する。
PATTERN=$(<$TEMP_OUTPUTPATTERN3)
# 拡張子確認
if [[ $INPUT_FILE_EXT == 'csv' ]]; then
# 入力データを変換し、出力ファイルを作成する。
awk 'BEGIN{ FS=","; OFS=","; } { print '$PATTERN'; }' $TEMP_INPUT_DATA > $OUTPUT_DATA_FILE
elif [[ $INPUT_FILE_EXT == 'txt' ]]; then
# 入力データを変換し、出力ファイルを作成する。
# cat ./newdata/datatxt1.txt | cut -c1-1,2-5,6-10,11-14,23-26,27-30,31-34,35-38
cat $TEMP_INPUT_DATA | cut -c$PATTERN > $OUTPUT_DATA_FILE
else
# 拡張子が正しくない場合
echo '拡張子がtxtやcsvではない。'
exit 255
fi
#原本ファイル
echo '原本ファイル'
cat $INPUT_DATA_PATH$INPUT_DATA_FILE
echo ''
#変換後ファイル
echo '変換後ファイル'
cat $OUTPUT_DATA_FILE
# 仮ファイル削除(出力パータンファイル)
if [[ -f "$TEMP_LENGTHPATTERN" ]]; then
rm $TEMP_LENGTHPATTERN
fi
if [[ -f "$TEMP_OUTPUTPATTERN" ]]; then
rm $TEMP_OUTPUTPATTERN
fi
if [[ -f "$TEMP_OUTPUTPATTERN2" ]]; then
rm $TEMP_OUTPUTPATTERN2
fi
if [[ -f "$TEMP_OUTPUTPATTERN3" ]]; then
rm $TEMP_OUTPUTPATTERN3
fi
if [[ -f "$TEMP_INPUT_DATA" ]]; then
rm $TEMP_INPUT_DATA
fi
# 正常終了
exit 0 | true |
af1d6742de342cf4ecdb25f898efc13f7f595a56 | Shell | EmmaEyckmans/dev-workflow-skeleton | /ops/scripts/start-jenkins.sh | UTF-8 | 546 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
source build-maven.sh
JENKINS_HOME="/var/jenkins_home/"
if [ ! -d $JENKINS_HOME ]
then
echo "Need password to create $JENKINS_HOME as mount point for JENKINS_HOME"
sudo mkdir -p $JENKINS_HOME
sudo chown 1000 $JENKINS_HOME
fi
docker network create -d bridge dws-jenkins
docker build -t dws/jenkins ../jenkins/
docker rm -vf dws_jenkins
docker run -d -p 8888:8888 \
--net dws-jenkins \
-v $JENKINS_HOME:/var/jenkins_home \
-v /var/run/docker.sock:/var/run/docker.sock \
--name dws_jenkins dws/jenkins | true |
c74c74099ad63728a6845e75ddff63d38e507ef5 | Shell | andrewsardone/backup | /backup.sh | UTF-8 | 1,185 | 4.1875 | 4 | [
"MIT"
] | permissive | #/usr/bin/env bash
# Usage: ./backup
# Executes a backup of my system. It will ensure only one backup is running at
# once. Handy for one-off full-system backups or for scheduling via a job
# runner.
set -e
function check_dependency() {
command -v $1 >/dev/null 2>&1 || {
echo >&2 "${1} is required. Please install."
exit 1
}
}
# echo but with a the date & time prefixed on the line
function techo() {
echo $(date +"%Y-%m-%d %T") $@
}
check_dependency restic
base_path=`dirname $0`
env_file="$base_path/restic.env"
files_list="$base_path/restic.files"
pid_file="$base_path/tmp/backup.pid"
# Set up PID file for a simple lock to prevent duplicate backups running
# simultaneously.
if [ -f "$pid_file" ]; then
if ps -p $(cat $pid_file) > /dev/null; then
techo "File $pid_file exists. Another backup is probably already in progress."
exit 1
else
techo $(date +"%Y-%m-%d %T") "File $pid_file exists but process "$(cat $pid_file)" not found. Removing PID file."
rm $pid_file
fi
fi
echo $$ > "$pid_file"
techo "Backup start"
source "$env_file"
restic backup \
--files-from="$files_list" \
--tag automated
techo "Backup finished"
rm "$pid_file"
| true |
75a1570a766d2695cc3dd88847dc21ec6bef4c1d | Shell | iMadeThem/RDC | /Tools/git-hooks/pre-commit.cmds/00_codestyler_hook | UTF-8 | 2,182 | 3.765625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
set -u
set -o pipefail
CUR_DIR="$( dirname "${BASH_SOURCE[0]}" )"
CUR_DIR="${CUR_DIR/\.git*/}"
cd "${CUR_DIR}"
REPO_ROOT="$(unset GIT_DIR; git rev-parse --show-toplevel)"
CONFIG_FILE_C="${REPO_ROOT}/Tools/codestyler/lang-c.cfg"
CONFIG_FILE_OC="${REPO_ROOT}/Tools/codestyler/lang-objc.cfg"
CONFIG_FILE_JAVA="${REPO_ROOT}/Tools/codestyler/lang-java.cfg"
EXE_PATH="${REPO_ROOT}/Tools/codestyler/uncrustify"
cd "${REPO_ROOT}"
# Loop through all the files of interest
while IFS= read -rd '' GIT_STATUS
do
IFS= read -rd '' FILE_PATH
# format all the files
#for FILE_PATH in "${REPO_ROOT}"/source/code/*
#do
[ "$GIT_STATUS" == 'D' ] && continue
FILE_EXT="${FILE_PATH##*.}"
[ "$FILE_EXT" != 'h' ] &&
[ "$FILE_EXT" != 'c' ] &&
[ "$FILE_EXT" != "cpp" ] &&
[ "$FILE_EXT" != "cc" ] &&
[ "$FILE_EXT" != 'm' ] &&
[ "$FILE_EXT" != "java" ] && continue
case $FILE_EXT in
[hc] ) # C code [.h | .c]
echo "Formatting C source code..."
"$EXE_PATH" -l C -c "$CONFIG_FILE_C" --no-backup --mtime "$FILE_PATH" 2>&1 || true
rm "${FILE_PATH}.uncrustify" >/dev/null 2>&1 || true
git add "$FILE_PATH"
;;
"cpp" | "cc" )
echo "Formatting C++ source code..."
"$EXE_PATH" -l C++ -c "$CONFIG_FILE_C" --no-backup --mtime "$FILE_PATH" 2>&1 || true
rm "${FILE_PATH}.uncrustify" >/dev/null 2>&1 || true
git add "$FILE_PATH"
;;
[m] ) # Object-C code [.m]
echo "Formatting Object-C source code..."
"$EXE_PATH" -l OC -c "$CONFIG_FILE_OC" --no-backup --mtime "$FILE_PATH" 2>&1 || true
rm "${FILE_PATH}.uncrustify" >/dev/null 2>&1 || true
git add "$FILE_PATH"
;;
"java" )
echo "Formatting JAVA source code..."
"$EXE_PATH" -l JAVA -c "$CONFIG_FILE_JAVA" --no-backup --mtime "$FILE_PATH" 2>&1 || true
rm "${FILE_PATH}.uncrustify" >/dev/null 2>&1 || true
git add "$FILE_PATH"
;;
* )
echo "Unknown source code found: $FILE_PATH"
;;
esac
done < <(git diff --cached --name-status -z)
echo "Done"
| true |
cd33fd4526226480fdd1e6904bbc14d786d7d933 | Shell | novcn/f14 | /net432/projects/project_2/run.sh | UTF-8 | 1,591 | 2.828125 | 3 | [] | no_license | #!/usr/bin/bash
#uncomment the topolgy you want. The simple two-server topology is uncommented here.
# Change the SERVER variable below to point your server executable.
SERVER=./server
SERVER_NAME=`echo $SERVER | sed 's#.*/\(.*\)#\1#g'`
#Server topology for Project 1
# $SERVER localhost 5000
# Generate simple one way server topology
# $SERVER localhost 5000 localhost 5001 &
# Generate a simple two-server topology
# $SERVER localhost 5000 localhost 5001 &
# $SERVER localhost 5001 localhost 5000 &
# Generate a capital-H shaped topology
$SERVER localhost 5000 localhost 5001 &
$SERVER localhost 5001 localhost 5000 localhost 5002 localhost 5003 &
$SERVER localhost 5002 localhost 5001 &
$SERVER localhost 5003 localhost 5001 localhost 5005 &
$SERVER localhost 5004 localhost 5005 &
$SERVER localhost 5005 localhost 5004 localhost 5003 localhost 5006 &
$SERVER localhost 5006 localhost 5005 &
# Generate a 3x3 grid topology
# $SERVER localhost 5000 localhost 5001 localhost 5003 &
# $SERVER localhost 5001 localhost 5000 localhost 5002 localhost 5004 &
# $SERVER localhost 5002 localhost 5001 localhost 5005 &
# $SERVER localhost 5003 localhost 5000 localhost 5004 localhost 5006 &
# $SERVER localhost 5004 localhost 5001 localhost 5003 localhost 5005 localhost 5007 &
# $SERVER localhost 5005 localhost 5002 localhost 5004 localhost 5008 &
# $SERVER localhost 5006 localhost 5003 localhost 5007 &
# $SERVER localhost 5007 localhost 5006 localhost 5004 localhost 5008 &
# $SERVER localhost 5008 localhost 5005 localhost 5007 &
echo "Press ENTER to quit"
read
pkill $SERVER_NAME | true |
a7f1619944cad75e9a9a88a18639a373df975732 | Shell | ggcatu/AI-Games | /Entrega/Ejercicio 3/TopSpin/run.bash | UTF-8 | 165 | 2.578125 | 3 | [] | no_license | #!/bin/bash
while read line
do
echo "$line" | timeout 600s ./16_4_topspin.dfs_2 >> e.txt &
[ $(jobs | wc -l) -ge $(nproc) ] && wait
done < topspin16-4.txt
| true |
0a43ba87c7c8c48afe46ca0d40ba4cafc3d93c7c | Shell | a5ob7r/xmonad-config | /build | UTF-8 | 4,090 | 4.34375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# This is a custom build script to build and manage my xmonad executable by
# cabal. 'xmonad' command use custom build script if it find an executable
# script named as 'build` on the config directory. For instance, this script is
# deployed to '~/.config/xmonad/build' with user executable permission.
# 'xmonad' command pass an appropriate user xmonad executable path as the first
# argument to the custom build script when calls it. So if we run it by hand,
# we also need to pass output path as the first argument like below.
#
# $ ./build ~/.cache/xmonad/xmonad-x86_64-linux
#
# 'xmonad' doesn't check whether or not we should recompile if we have a custom
# build script. So we need to check it ourselves.
#
# 'xmonad' command redirects stderr output of the custom build script to
# xmonad.errors, which is probably at '~/.local/share/xmonad/xmonad.errors'.
set -Cueo pipefail
###############################################################################
# Constants
# Log prefix message.
readonly LOGPREFIX="[${0##*/}]"
# My xmonad executable name compiled by cabal.
readonly BINNAME=myxmonad
# Files consist my xmonad executable. It is out-of-date if even one of them is
# newer than it.
readonly -a SOURCES=(
app
src
xmonad-config.cabal
cabal.project
cabal.project.local
)
###############################################################################
# Functions
# Wrap texts from stdin with their arguments.
wrap () {
echo -n -e "$1"
while read -r; do
echo -e "$REPLY"
done
echo -n -e "$2"
}
# Error message.
error () {
local left right
if [[ -t 1 && $TERM != dumb ]]; then
left='\033[31m'
right='\033[0m'
fi
wrap "$left" "$right" <<<"$LOGPREFIX $*" >&2
}
# Notificatin message.
info () {
echo "$LOGPREFIX $*"
}
# Build and deploy(install) my xmonad executable.
install_xmonad () {
local -r dest=$1
local -r dest_dir=${dest%/*}
if ! [[ -d $dest_dir ]]; then
error "$dest_dir isn't a directory."
return 1
fi
# NOTE: xmonad binaries built by cabal-3.8.1.0 can't create xft font sets.
cabal install \
--installdir="$dest_dir" \
--install-method=copy \
--overwrite-policy=always \
;
if [[ -e $dest ]]; then
if [[ -h $dest ]]; then
if [[ $dest -ef $dest_dir/$BINNAME ]]; then
info "$dest is linked to $dest_dir/$BINNAME, so no need to do anything."
else
info "$dest isn't linked to $dest_dir/$BINNAME. Please unlink it."
fi
else
info "$dest isn't a symlink. Please remove it."
fi
else
ln -sv "$BINNAME" "$dest"
fi
}
###############################################################################
# Main
# Arguments.
case $# in
0 )
error 'Must pass the output executable path as the first argument.'
exit 1
;;
1 )
DEST=$1
readonly DEST
;;
* )
error 'Too many arguments. Only needs one argument.'
exit 1
;;
esac
# Initial installation.
if ! [[ -x $DEST ]]; then
info "Not found an executable of '$DEST', so build and install it."
install_xmonad "$DEST"
exit
fi
declare -a updated=()
# NOTE: "read" with "-d DELIM" returns non-zero if the DELIM isn't in the
# input. If the DELIM is an empty string, it means that DELIM is a NUL
# character (\0). However any NUL character in command substitutions is ignored
# by bash, so "read -d ''" with an input which is created by a command
# substitution always returns non-zero as a status code.
read -r -d '' -a srcs <<<"$(
for src in "${SOURCES[@]}"; do
if [[ -d $src ]]; then
find "$src" -type f -name '*.hs' -o -name '*.lhs' -o -name '*.hsc'
else
echo "$src"
fi
done
)" || true
for src in "${srcs[@]}"; do
if [[ $src -nt $DEST ]]; then
updated+=("$src")
fi
done
# Recompile if my xmonad executable is out-of-date.
if (( ${#updated[@]} )); then
info "Found some updated source files."
for src in "${updated[@]}"; do
info " - $src"
done
info "So recompile my xmonad executable."
install_xmonad "$DEST"
else
info 'No need to recompile Xmonad, so do nothing.'
fi
| true |
c5352a2a7783275ef5bc2632ddbabda1cf92660f | Shell | a07061625/swooleyaf_install | /configs/swooleyaf/es/install.sh | UTF-8 | 4,198 | 2.8125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -o nounset
set -o errexit
vim /etc/sysctl.conf
vm.max_map_count=655360
sysctl -p
# 重启电脑
vim /etc/security/limits.conf
* soft memlock unlimited
* hard memlock unlimited
* soft nproc 120000
* hard nproc 131072
* soft nofile 260000
* hard nofile 262140
root soft memlock unlimited
root hard memlock unlimited
root soft nproc 120000
root hard nproc 131072
root soft nofile 260000
root hard nofile 262140
cd /usr/local/elasticsearch
# 插件
## 在线安装
bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v7.7.1/elasticsearch-analysis-ik-7.7.1.zip
bin/elasticsearch-plugin install https://github.com/medcl/elasticsearch-analysis-pinyin/releases/download/v7.7.1/elasticsearch-analysis-pinyin-7.7.1.zip
## 压缩包安装
cd /usr/local/elasticsearch/plugins
unzip analysis-ik-v7.7.1.zip
unzip analysis-pinyin-v7.7.1.zip
rm -rf analysis-ik-v7.7.1.zip
rm -rf analysis-pinyin-v7.7.1.zip
mkdir /home/data/elasticsearch
mkdir /home/logs/elasticsearch
chown -R www:www /usr/local/elasticsearch
chown -R www:www /home/logs/elasticsearch
chown -R www:www /home/data/elasticsearch
cd /usr/local/elasticsearch
# 证书生成
bin/elasticsearch-certutil ca //输入自定义密码
bin/elasticsearch-certutil cert --ca elastic-stack-ca.p12 //输入自定义密码
mkdir config/certs
mv elastic-stack-ca.p12 config/certs/
mv elastic-certificates.p12 config/certs/
bin/elasticsearch-keystore add xpack.security.transport.ssl.keystore.secure_password //密码为创建证书时设置的密码
bin/elasticsearch-keystore add xpack.security.transport.ssl.truststore.secure_password //密码为创建证书时设置的密码
# 启动
bin/elasticsearch
# 后台启动
bin/elasticsearch -d
# 初始化密码
bin/elasticsearch-setup-passwords interactive
# 索引优化
curl -u elastic:jw07061625 -H "Content-Type: application/json" -X PUT 'http://192.168.96.21:9201/_all/_settings?preserve_existing=true' -d '{"index.mapping.total_fields.limit" : "200","index.merge.scheduler.max_thread_count" : "1"}'
curl -u elastic:jw07061625 -H "Content-Type: application/json" -X PUT 'http://192.168.96.21:9201/_settings' -d '{"index":{"number_of_replicas":0}}'
# 清除日志,最好是每天重启一次elasticsearch
DEL_DATE=`date +%Y-%m-%d -d "-3 days"`
curl -u elastic:jw07061625 -X DELETE http://192.168.96.21:9201/log-${DEL_DATE}
# 数据落盘,每半个小时执行一次
curl -u elastic:jw07061625 -X POST http://192.168.96.21:9201/_flush
# 调整es的索引的写入参数,牺牲持久性来换取高写入性能
# index.refresh_interval: doc被检索到的周期,不要求足够的实时性其实完全可以关闭
# index.translog.durability: 是否在每次写数据或者修改数据就触发一次fsync,默认是request,即每次都触发fsync
# index.translog.flush_threshold_size: translog的大小达到此值时会进行一次flush操作,默认是512mb
# index.translog.sync_interval: 多久出发一次fsync,只有被fsync才会被写入到磁盘
# index.merge.scheduler.max_thread_count: segment进行merge的线程数,磁盘不是SSD盘可以将其调整为1
# index.merge.policy.max_merged_segment: 最大可以merge的segment
# index.merge.policy.floor_segment: 小于这个值的segment,均会被优先进行merge操作,将其调大,最大限度的一次多完成merge操作
# index.mapping.total_fields.limit: 调整索引字段数量上限
curl -H "Content-Type:application/json" -u elastic:jw07061625 -d '{
"index.refresh_interval":"60s",
"index.translog.durability":"async",
"index.translog.flush_threshold_size":"512mb",
"index.translog.sync_interval":"60s",
"index.merge.scheduler.max_thread_count":1,
"index.merge.policy.max_merged_segment":"1gb",
"index.merge.policy.floor_segment":"100mb",
"index.mapping.total_fields.limit":2000
}' -X PUT http://192.168.96.21:9201/_settings
# 解决问题: Data too large, data for [@timestamp] would be larger than limit
curl -u elastic:jw07061625 -H "Content-Type: application/json" -d '{"fielddata":"true"}' -X POST 'http://192.168.96.21:9201/_cache/clear'
# 文档 https://github.com/elastic/built-docs.git
| true |
aae795b97c58c6068359c6893e0ca5027273c1cf | Shell | curityio/mutual-tls-api-example | /2-build.sh | UTF-8 | 1,156 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
########################################################################
# A script to build resources into Docker containers ready for deploying
########################################################################
#
# Get full path of the parent folder of this script
#
D=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
#
# Get API dependencies
#
cd "$D/api"
rm -rf node_modules
npm install
if [ $? -ne 0 ]; then
echo "Problem encountered downloading API dependencies"
exit 1
fi
#
# Build API code
#
npm run build
if [ $? -ne 0 ]; then
echo "Problem encountered building API code"
exit 1
fi
#
# Build the API Docker Container
#
docker build -f ./Dockerfile -t mutual-tls-api:1.0.0 .
if [ $? -ne 0 ]; then
echo 'Problem encountered building the API docker container'
exit
fi
cd ../docker/reverse-proxy
#
# Download reverse proxy plugins
#
git submodule update --init --remote --rebase
#
# Build the customized NGINX Docker Container with plugins
#
docker build -f ./Dockerfile -t custom_openresty:1.19.3.1-8-bionic .
if [ $? -ne 0 ]; then
echo 'Problem encountered building the Custom NGINX docker container'
exit
fi
| true |
ce790af42e5b965e0f625f47ce23152ef3f64af0 | Shell | safespring-community/nextcloud | /mail_config.sh | UTF-8 | 831 | 2.953125 | 3 | [] | no_license | #!/bin/bash
echo "Script to configure mail"
while ! php /var/www/html/occ status | grep -q 'installed: true'
do
echo "Waiting for the Nextcloud installation to complete..."
sleep 5
done
echo "Configuring mail"
php /var/www/html/occ config:system:set mail_from_address --value=${MAIL_FROM_ADDRESS}
php /var/www/html/occ config:system:set mail_smtpmode --value=${MAIL_SMTPMODE}
php /var/www/html/occ config:system:set mail_sendmailmode --value=${MAIL_SENDMAILMODE}
php /var/www/html/occ config:system:set mail_domain --value=${MAIL_DOMAIN}
php /var/www/html/occ config:system:set mail_smtpport --value=${MAIL_SMTPPORT}
php /var/www/html/occ config:system:set mail_smtphost --value=${MAIL_SMTPHOST}
php /var/www/html/occ user:setting ${NEXTCLOUD_ADMIN_USER} settings email "${NEXTCLOUD_ADMIN_EMAIL}"
echo "Mail configured"
| true |
c0b4d994c45695bbe913d220b6e720516cacf67d | Shell | jkramer/home | /.zsh/func/cleanup | UTF-8 | 194 | 2.8125 | 3 | [] | no_license | # vim:filetype=zsh
trap "cleanup" EXIT
function cleanup {
/usr/bin/clear
SHELLS=`pgrep -U $EUID "zsh" | grep -cv $$`
if [[ $SHELLS = 0 ]]; then
/bin/rm -f "$HOME/.gpg-agent-info"
fi
}
| true |
830dcedbf41fbf142d1b5fb8685c655da095da8e | Shell | sly117/tpm-luks-new | /dracut/RHEL6/plymouth-tpm/plymouth-populate-initrd | UTF-8 | 1,649 | 3.34375 | 3 | [] | no_license | #!/bin/bash
if ldd /sbin/plymouthd |grep -q lib64; then
LIBDIR="/usr/lib64"
else
LIBDIR="/usr/lib"
fi
PLYMOUTH_LOGO_FILE="/usr/share/pixmaps/system-logo-white.png"
PLYMOUTH_THEME=$(plymouth-set-default-theme)
inst /sbin/plymouthd /bin/plymouthd
dracut_install /bin/plymouth \
"${PLYMOUTH_LOGO_FILE}" \
/etc/system-release
mkdir -p "${initdir}/usr/share/plymouth"
if [[ $hostonly ]]; then
dracut_install "${LIBDIR}/plymouth/text.so" \
"${LIBDIR}/plymouth/details.so" \
"/usr/share/plymouth/themes/details/details.plymouth" \
"/usr/share/plymouth/themes/text/text.plymouth" \
if [[ -d /usr/share/plymouth/themes/${PLYMOUTH_THEME} ]]; then
for x in "/usr/share/plymouth/themes/${PLYMOUTH_THEME}"/* ; do
[[ -f "$x" ]] || break
inst $x
done
fi
if [ -L /usr/share/plymouth/themes/default.plymouth ]; then
inst /usr/share/plymouth/themes/default.plymouth
# Install plugin for this theme
PLYMOUTH_PLUGIN=$(grep "^ModuleName=" /usr/share/plymouth/themes/default.plymouth | while read a b c; do echo $b; done;)
inst ${LIBDIR}/plymouth/${PLYMOUTH_PLUGIN}.so
fi
else
for x in /usr/share/plymouth/themes/{text,details}/* ; do
[[ -f "$x" ]] || continue
THEME_DIR=$(dirname "$x")
mkdir -p "${initdir}/$THEME_DIR"
dracut_install "$x"
done
for x in ${LIBDIR}/plymouth/{text,details}.so ; do
[[ -f "$x" ]] || continue
[[ "$x" != "${x%%/label.so}" ]] && continue
dracut_install "$x"
done
(
cd ${initdir}/usr/share/plymouth/themes;
ln -s text/text.plymouth default.plymouth 2>&1;
)
fi
# vim:ts=8:sw=4:sts=4:et
| true |
0f4ef406dadb908552217a61ecc2df0883df47d9 | Shell | mrjulio/freebsd-unattended | /memcached/install.sh | UTF-8 | 587 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Memcached
#
if [ $(pkg info | grep -c memcached) -eq 0 ]
then
echoStatus $COLOR_BLUE '[INSTALL] memcached'
cd /usr/ports/databases/memcached
make -DBATCH install clean
echo 'memcached_enable="YES"' >> /etc/rc.conf
echo 'memcached_flags="-l '$MEMCACHED_LISTEN_IP' -p '$MEMCACHED_LISTEN_PORT' -m '$MEMCACHED_MEMORY' -t '$MEMCACHED_THREADS' -d"' >> /etc/rc.conf
cd /usr/ports/databases/pecl-memcached
make -DBATCH install clean
echoStatus $COLOR_GREEN '[FINISH] memcached'
else
echoStatus $COLOR_WHITE '[SKIP] memcached'
fi
| true |
f49f2cd7b217647f99abfc16204e0745e6273f03 | Shell | budcalabrese/Bash | /pullinghostnames.bash | UTF-8 | 446 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# pointing to the directory that contains the text file
MYPATH="/home/usr/bud"
# Pulling information from Nginx reverse proxy
# Pulling the lines that have server in field 1 and the domain nmae in field 2
for server in $(awk '$1~/server/ && $2~/stripdomainname/ {print $2}' ${MYPATH}/routes.conf); do
host=${server%:*}
port=${server##*:}
# printing to the screen
echo ${host} $(host ${host} | awk '{print $NF}') ${port//;/}
done | true |
7bfc45b07b0e91c40db4e83f9dc48a472b1ba703 | Shell | phoronix-test-suite/test-profiles | /pts/tww2-1.0.0/install.sh | UTF-8 | 14,801 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash -e
# Install Total War WARHAMMER II on Linux and generate launcher scripts and preference templates
# Base constants
#
export STEAM_GAME_ID=594570
export GAME_PREFS="$DEBUG_REAL_HOME/.local/share/feral-interactive/Total War WARHAMMER II"
export GAME_INSTALL_DIR_BASE="steamapps/common/Total War WARHAMMER II/"
export DEFAULT_STEAM_INSTALL_BASE="$DEBUG_REAL_HOME/.steam/steam"
# Try and install the game in case it isn't already
#
echo "Ensuring game is installed"
HOME="$DEBUG_REAL_HOME" steam "steam://install/$STEAM_GAME_ID"
# Work out the steam install directory
#
export CONFIG_PATH="$DEBUG_REAL_HOME/.steam/steam/config/config.vdf"
echo "Searching ${CONFIG_PATH} for install directories"
_INSTALL_PATHS=$( awk '/BaseInstallFolder/ { gsub(/"/, "", $2); print $2 }' "${CONFIG_PATH}" )
# Find one that contains the game
while read -r STEAM_PATH; do
_NEW_FULL_PATH="${STEAM_PATH}/${GAME_INSTALL_DIR_BASE}"
echo "Checking for game install: ${_NEW_FULL_PATH}"
if [ -d "${_NEW_FULL_PATH}" ]; then
echo "Found game install: ${_NEW_FULL_PATH}"
export GAME_INSTALL_DIR="${_NEW_FULL_PATH}"
fi
done <<< "${_INSTALL_PATHS}"
# Allow the default location as well
if [ ! -d "${GAME_INSTALL_DIR}" ]; then
export GAME_INSTALL_DIR="${DEFAULT_STEAM_INSTALL_BASE}/${GAME_INSTALL_DIR_BASE}"
echo "Using default directory for game install: ${GAME_INSTALL_DIR}"
fi
# Bail if we still couldn't find the game
if [ ! -f "${GAME_INSTALL_DIR}/TotalWarhammer2.sh" ]; then
>&2 echo "Missing run script in install dir - ${GAME_INSTALL_DIR}/TotalWarhammer2.sh"
exit 1
fi
# Gather the steam env variables the game runs with
#
echo "Gathering environment variables for game"
HOME="$DEBUG_REAL_HOME" steam steam://run/$STEAM_GAME_ID &
sleep 6
GAME_PID=$( pidof TotalWarhammer2 | cut -d' ' -f1 )
if [ -z "$GAME_PID" ]; then
echo "Could not find process TotalWarhammer2"
exit 1
fi
echo '#!/bin/bash' > steam-env-vars.sh
echo "# Collected steam environment for Total War: Warhammer II\n# PID : $GAME_PID" >> steam-env-vars.sh
while read -rd $'\0' ENV ; do
NAME=$(echo "$ENV" | cut -zd= -f1); VAL=$(echo "$ENV" | cut -zd= -f2)
case $NAME in
*DBUS*) true
;;
*)
echo "export $NAME=\"$VAL\""
;;
esac
done < "/proc/$GAME_PID/environ" >> steam-env-vars.sh
killall -9 TotalWarhammer2
sleep 6
if [ -z "${STEAM_ACCOUNT_ID}" ]; then
pushd "${GAME_PREFS}/SaveData/"
STEAM_ACCOUNT_ID="$(ls |head -1)"
popd
else
STEAM_ACCOUNT_ID="Steam Saves (${STEAM_ACCOUNT_ID})"
fi
RESULTS_PREFIX="${GAME_PREFS}/SaveData/${STEAM_ACCOUNT_ID}/"
# Create the game launching script
#
echo "Generating run script"
cat > tww2.sh <<- EOM
#!/bin/bash
# Generated run script for Total War: WARHAMMER II
# $( date )
# Source the steam runtime environment
#
. steam-env-vars.sh
# Run the game
#
cd "${GAME_INSTALL_DIR}"
./TotalWarhammer2.sh
# Grab the output (most recent non _frametimes txt file)
RESULTS_DIR="${RESULTS_PREFIX}benchmarks/"
mkdir -p "\${RESULTS_DIR}"
cd "\${RESULTS_DIR}"
true > "\$LOG_FILE"
FPS_VALUES=\$( grep -A3 "frames per second" \$(ls -t | grep -P "benchmark_.*[0-9]+.txt" | head -n 1) | tail -n 3 )
cat benchmark_*.txt >> "\$LOG_FILE"
echo "\${FPS_VALUES}" >> "\$LOG_FILE"
EOM
chmod +x tww2.sh
# Create the template preferences file
#
echo "Generating settings template"
cat > preferences.template.xml <<- EOM
<?xml version="1.0" encoding="UTF-8"?>
<registry>
<key name="HKEY_CLASSES_ROOT">
</key>
<key name="HKEY_CURRENT_CONFIG">
</key>
<key name="HKEY_CURRENT_USER">
<key name="AutoValueRemap">
<key name="GPURemap">
<key name="keys">
<value name="Software\IndirectX\Direct3D\Config" type="integer">1</value>
</key>
<key name="values">
<value name="Software\Feral Interactive\Total War WARHAMMER II\Setup\FullScreen" type="integer">1</value>
<value name="Software\Feral Interactive\Total War WARHAMMER II\Setup\ScreenH" type="integer">1</value>
<value name="Software\Feral Interactive\Total War WARHAMMER II\Setup\ScreenW" type="integer">1</value>
</key>
</key>
</key>
<key name="Software">
<key name="Feral Interactive">
<key name="Total War WARHAMMER II">
<key name="Setup">
<!-- disable pausing -->
<value name="AllowPausing" type="integer">0</value>
<value name="PauseMoviesOnPause" type="integer">0</value>
<value name="PauseOnSuspend" type="integer">0</value>
<value name="PauseSoundOnPause" type="integer">0</value>
<value name="PauseTimersOnPause" type="integer">0</value>
<value name="AllowSendUsageData" type="integer">0</value>
<!-- Don't show splash screen -->
<value name="GameOptionsDialogLastTab" type="integer">60000</value>
<value name="GameOptionsDialogShouldShow" type="integer">0</value>
<value name="GameOptionsDialogShouldShowBigPicture" type="integer">0</value>
<value name="GameOptionsDialogShown" type="integer">1</value>
<!-- Disable Splash Screen Warnings -->
<value name="SoftwareUpdatedAskedUser" type="integer">1</value>
<value name="SoftwareUpdatedCanCheck" type="integer">0</value>
<key name="GraphicsSettings">
<value name="advice_level" type="integer">2</value>
<value name="advisor_mode" type="integer">2</value>
<value name="alliance_faction_colours" type="integer">0</value>
<value name="audio_api_type" type="integer">0</value>
<value name="audio_mute_in_background" type="integer">1</value>
<value name="audio_quality" type="integer">0</value>
<value name="audio_speaker_configuration" type="integer">0</value>
<value name="battle_camera_shake_enabled" type="integer">1</value>
<value name="battle_defend_default" type="integer">0</value>
<value name="battle_groups_locked_by_default" type="integer">0</value>
<value name="battle_run_by_default" type="integer">1</value>
<value name="battle_skirmish_default" type="integer">1</value>
<value name="camera_move_speed" type="binary">0000000000005940</value>
<value name="camera_turn_speed" type="binary">0000000000001440</value>
<value name="cinematic_smoothing" type="binary">0000000000002ec0</value>
<value name="default_battle_camera_type" type="integer">0</value>
<value name="gfx_aa" type="integer">@gfx_aa@</value>
<value name="gfx_alpha_blend" type="integer">0</value>
<value name="gfx_blood_effects" type="integer">1</value>
<value name="gfx_brightness_setting" type="binary">000000403333f33f</value>
<value name="gfx_building_quality" type="integer">@gfx_building_quality@</value>
<value name="gfx_depth_of_field" type="integer">0</value>
<value name="gfx_device_type" type="integer">1</value>
<value name="gfx_distortion" type="integer">1</value>
<value name="gfx_effects_quality" type="integer">@gfx_effects_quality@</value>
<value name="gfx_first_run" type="integer">0</value>
<value name="gfx_fog" type="integer">@gfx_fog@</value>
<value name="gfx_gamma_setting" type="binary">0000000000000040</value>
<value name="gfx_gpu_select" type="integer">0</value>
<value name="gfx_grass_quality" type="integer">@gfx_grass_quality@</value>
<value name="gfx_lighting_quality" type="integer">@gfx_lighting_quality@</value>
<value name="gfx_resolution_scale" type="binary">000000000000f03f</value>
<value name="gfx_screen_space_reflections" type="integer">0</value>
<value name="gfx_shadow_quality" type="integer">@gfx_shadow_quality@</value>
<value name="gfx_sharpening" type="integer">1</value>
<value name="gfx_sky_quality" type="integer">@gfx_sky_quality@</value>
<value name="gfx_ssao" type="integer">@gfx_ssao@</value>
<value name="gfx_terrain_quality" type="integer">@gfx_terrain_quality@</value>
<value name="gfx_tesselation" type="integer">0</value>
<value name="gfx_texture_filtering" type="integer">@gfx_texture_filtering@</value>
<value name="gfx_texture_quality" type="integer">@gfx_texture_quality@</value>
<value name="gfx_tree_quality" type="integer">@gfx_tree_quality@</value>
<value name="gfx_unit_quality" type="integer">@gfx_unit_quality@</value>
<value name="gfx_unit_size" type="integer">@gfx_unit_size@</value>
<value name="gfx_unlimited_video_memory" type="integer">0</value>
<value name="gfx_vignette" type="integer">0</value>
<value name="gfx_vsync" type="integer">0</value>
<value name="gfx_water_quality" type="integer">@gfx_water_quality@</value>
<value name="invert_cam_x_axis" type="integer">0</value>
<value name="invert_cam_y_axis" type="integer">0</value>
<value name="mouse_wheel_sensitivity" type="integer">50</value>
<value name="porthole_3d" type="integer">@porthole_3d@</value>
<value name="proximity_fading" type="integer">1</value>
<value name="scroll_transition_enabled" type="integer">1</value>
<value name="show_projectile_trails" type="integer">1</value>
<value name="sound_advisor_volume" type="integer">100</value>
<value name="sound_master_enabled" type="integer">1</value>
<value name="sound_master_volume" type="integer">100</value>
<value name="sound_music_enabled" type="integer">1</value>
<value name="sound_music_volume" type="integer">100</value>
<value name="sound_sfx_volume" type="integer">100</value>
<value name="sound_vo_enabled" type="integer">1</value>
<value name="sound_vo_volume" type="integer">100</value>
<value name="subtitles" type="integer">0</value>
<value name="ui_colour_profile" type="integer">0</value>
<value name="ui_mouse_scroll" type="integer">1</value>
<value name="ui_scale" type="binary">000000000000f03f</value>
<value name="ui_unit_id_scale" type="binary">0000000000000000</value>
<value name="ui_unit_tooltip_expand_mode" type="integer">2</value>
<value name="voice_chat_enable" type="integer">1</value>
<value name="voice_chat_microphone_gain" type="integer">100</value>
<value name="voice_chat_microphone_gain_boost" type="integer">1</value>
<value name="voice_chat_transmit_only_when_key_pressed" type="integer">1</value>
<value name="voice_chat_volume" type="integer">100</value>
</key>
<value name="AvoidSwapInjectionDuringPGOW" type="integer">1</value>
<value name="ConstrainLiveWindowResize" type="integer">1</value>
<value name="DisableMomentumScrolling" type="integer">1</value>
<value name="DoneMinOS" type="integer">1</value>
<value name="DonePromotional" type="integer">1</value>
<value name="DoneUnsupported" type="integer">1</value>
<value name="ForceSystemFullscreen" type="integer">1</value>
<value name="FullScreen" type="integer">1</value>
<value name="GameSelected" type="integer">0</value>
<value name="LiveWindowResizePercentage" type="integer">0</value>
<value name="LiveWindowResizeThreshold" type="integer">0</value>
<value name="MinWindowedHeight" type="integer">0</value>
<value name="MinWindowedWidth" type="integer">0</value>
<value name="MissionControlDetection" type="integer">1</value>
<value name="ScreenH" type="integer">@screen_height@</value>
<value name="ScreenW" type="integer">@screen_width@</value>
<value name="SpecificationFirstLaunchCheck" type="integer">0</value>
<value name="UseDynamicShroud" type="integer">1</value>
<value name="UseRestrictedWorkGroupSize" type="integer">1</value>
<value name="UseSpecializedShaders" type="integer">1</value>
</key>
</key>
</key>
<key name="MacDoze">
<key name="Config">
<value name="ExtraCommandLine" type="string">game_startup_mode benchmark_auto_quit script/benchmarks/@benchmark_name@</value>
<value name="ExtraCommandLineEnabled" type="integer">1</value>
</key>
</key>
</key>
</key>
<key name="HKEY_LOCAL_MACHINE">
<key name="Hardware">
</key>
<key name="Software">
</key>
</key>
<key name="HKEY_USERS">
</key>
</registry>
EOM
| true |
f4f9f6d2b5e562c41fb7f05a42434175549cfe4f | Shell | denisab85/UofM | /Labs/6/build_and_run.sh | UTF-8 | 982 | 3.46875 | 3 | [] | no_license | #!/bin/bash
read -p "Enter CACHE_SIZE: " CACHE_SIZE
read -p "Enter MAX_NUMBER: " MAX_NUMBER
echo "---------------------------------"
printf "Compiling regular version with CACHE_SIZE = $CACHE_SIZE\n"
cd ./fib_project/fib
clang -c -D CACHE_SIZE=$CACHE_SIZE ./*.c
clang -Wall -o ../../fib ./*.o
cd ../../
echo "---------------------------------"
printf "Running fib 1..$MAX_NUMBER in a loop:\n\n"
for ((n=1; n<=MAX_NUMBER; n++)); do
./fib $n
done
printf "\nTiming the largest number [fib $MAX_NUMBER]:\n"
time ./fib $MAX_NUMBER
printf "\n\n---------------------------------\nCompiling 'even' version with CACHE_SIZE = $CACHE_SIZE\n"
cd ./fib_project/fib
clang -c -D CACHE_SIZE=$CACHE_SIZE -D EVEN ./*.c
clang -Wall -o ../../fib ./*.o
cd ../../
echo "---------------------------------"
printf "Running fib 1..$MAX_NUMBER in a loop:\n\n"
for ((n=1; n<=MAX_NUMBER; n++)); do
./fib $n
done
printf "\nTiming the largest number [fib $MAX_NUMBER]:\n"
time ./fib $MAX_NUMBER
| true |
bec1d591840d2ded757b54dd83c9204c35c85c3a | Shell | amuthyala511/BridgelabzAssignment | /day-9Assignment/empWageComputation.sh | UTF-8 | 972 | 3.59375 | 4 | [] | no_license | #!/bin/bash -x
echo "Welcome to Employee Wage Computation Program on Master Branch";
isPresent=1;
PART_TIME=1;
FULL_TIME=2;
EMP_WAGE_PER_HR=20;
NUM_WORKING_DAYS=20;
MAX_HRS_IN_MONTH=100;
totalEmpHrs=0;
totalWorkingDays=0;
Attendance=$(( $RANDOM%2 ));
if [[ $Attendance -eq $isPresent ]]
then
echo "The Employee is Present";
declare -A empDailyWage;
function getworkhrs()
{
local empCheck=$1;
case $empCheck in
$FULL_TIME) empHrs=8 ;;
$PART_TIME) empHrs=4 ;;
*) empHrs=0 ;;
esac
echo $empHrs;
}
while [[ $totalEmpHrs -lt $MAX_HRS_IN_MONTH && $totalWorkingDays -lt $NUM_WORKING_DAYS ]]
do
((totalWorkingDays++));
empCheck=$(( $RANDOM%3 ));
empHrs=$( getworkhrs $empCheck );
totalEmpHrs=$(( $totalEmpHrs+$empHrs ));
empDailyWage["Day" $totalWorkingDays]=$(( $empHrs * $EMP_WAGE_PER_HR ));
done
totalSalary=$(( $totalEmpHrs * $EMP_WAGE_PER_HR ));
echo ${empDailyWage[@]};
echo ${!empDailyWage[@]};
else
echo "The Employee is Absent";
fi
| true |
e9425da88ce24492864c5bde7951e422bf8d6a2b | Shell | nckz/ni | /install | UTF-8 | 608 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Install ni into a common location
# Author: Nick Zwart
# Date: 2019dec31
set -euo pipefail
####################################################################### INCLUDE
# assume config is in the same dir
source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/config.bash ; config
########################################################################## MAIN
# check for existing hook
if [ -f "${CMD_DEST}" ]; then
echo "A script named ${CMD_SRC} is already installed:"
echo " ${CMD_DEST}"
echo "Install aborted."
exit 1
fi
# link the script
cp -v $CMD_SRC $CMD_DEST
| true |
1e0f84651fe737f966206bb6fe2bcb4353e15a27 | Shell | kaiostech/B2G | /run-emulator.sh | UTF-8 | 1,933 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Get full path from where the script was executed, full path is needed to run emulator succesfully
B2G_HOME=$(cd $(dirname $BASH_SOURCE); pwd)
. $B2G_HOME/load-config.sh
DEVICE=${DEVICE:-generic}
TOOLS_PATH=$B2G_HOME/out/host/`uname -s | tr "[[:upper:]]" "[[:lower:]]"`-x86/bin
DBG_CMD=""
if [ x"$DBG" != x"" ]; then
DBG_CMD="gdb -args"
fi
TAIL_ARGS=""
if [ x"$GDBSERVER" != x"" ]; then
TAIL_ARGS="$TAIL_ARGS -s -S"
fi
dns_servers=""
if [ x"$B2G_DNS_SERVER" != x"" ]; then
dns_servers=$B2G_DNS_SERVER
fi
# DNS servers from command line arg override ones from environment variable.
while [ $# -gt 0 ]; do
case "$1" in
--dns-server)
shift; dns_servers=$1 ;;
*)
break ;;
esac
shift
done
emu_extra_args=""
if [ -n "$dns_servers" ]; then
emu_extra_args="$emu_extra_args -dns-server $dns_servers"
fi
if [ "$DEVICE" = "generic_x86" ]; then
EMULATOR=$TOOLS_PATH/emulator-x86
KERNEL=$B2G_HOME/prebuilts/qemu-kernel/x86/kernel-qemu
else
EMULATOR=$TOOLS_PATH/emulator
KERNEL=$B2G_HOME/prebuilts/qemu-kernel/arm/kernel-qemu-armv7
TAIL_ARGS="$TAIL_ARGS -cpu cortex-a8"
fi
SDCARD_SIZE=${SDCARD_SIZE:-512M}
SDCARD_IMG=${SDCARD_IMG:-${B2G_HOME}/out/target/product/${DEVICE}/sdcard.img}
if [ ! -f "${SDCARD_IMG}" ]; then
echo "Creating sdcard image file with size: ${SDCARD_SIZE} ..."
${TOOLS_PATH}/mksdcard -l sdcard ${SDCARD_SIZE} ${SDCARD_IMG}
fi
export DYLD_LIBRARY_PATH="$B2G_HOME/out/host/darwin-x86/lib"
export PATH=$PATH:$TOOLS_PATH
${DBG_CMD} $EMULATOR \
-kernel $KERNEL \
-sysdir $B2G_HOME/out/target/product/$DEVICE/ \
-data $B2G_HOME/out/target/product/$DEVICE/userdata.img \
-sdcard ${SDCARD_IMG} \
-memory 512 \
-partition-size 512 \
-skindir $B2G_HOME/development/tools/emulator/skins \
-skin HVGA \
-verbose \
-gpu on \
-camera-back webcam0 \
$emu_extra_args \
-qemu $TAIL_ARGS
| true |
952c51e35ad6cf55419287402fa880914d707b24 | Shell | anthraxx/arch-pkgbuilds | /villoc-git/PKGBUILD | UTF-8 | 821 | 2.625 | 3 | [] | no_license | # Maintainer: Levente Polyak <levente[at]leventepolyak[dot]net>
pkgname=villoc-git
_gitname=villoc
pkgver=0.0.19.c207275
pkgrel=1
pkgdesc="Tool for visualization of heap operations"
arch=('any')
url="https://github.com/wapiflapi/villoc"
license=('MIT')
depends=('python')
makedepends=('git')
provides=('villoc')
conflicts=('villoc')
source=(${pkgname}::git+https://github.com/wapiflapi/${_gitname})
sha512sums=('SKIP')
pkgver() {
cd ${pkgname}
printf "%s.%s.%s" "0.0" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
package() {
cd ${pkgname}
#TODO: include pintool
install -Dm 755 villoc.py "${pkgdir}/usr/bin/villoc"
install -Dm 644 LICENSE.md "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
install -Dm 644 README.md "${pkgdir}/usr/share/doc/${pkgname}/README.md"
}
# vim:set ts=2 sw=2 et:
| true |
b55b0b7ffa304a657753edcd6e6872a5baf9a501 | Shell | nmunjal/setup | /setup_locust.sh | UTF-8 | 1,856 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
unknown_type=0
debian_type=1
redhat_type=2
system_type=$unknown_type
steps=1
# Determine type of system - yum based or apt-get based
if type apt-get > /dev/null; then
echo "$steps. Debian type system detected"
sudo apt-get update
sudo apt-get upgrade
system_type=$debian_type
elif type yum > /dev/null; then
echo "$steps. Redhat type system detected"
sudo yum update
system_type=$redhat_type
else
echo "$steps. Unknown system - no apt-get, yum found"
exit -1
fi
let "steps++"
# Install specific software
echo "$steps. Installing g++"
if [ $system_type -eq $redhat_type ]; then
sudo yum -y install http://epel.mirror.net.in/epel/6/x86_64/epel-release-6-8.noarch.rpm
sudo yum -y install python-devel libevent-devel
elif [ $system_type -eq $debian_type ]; then
sudo apt-get -y install python-dev libevent-dev libzmq-dev
fi
let "steps++"
pip="pip"
type python-pip > /dev/null 2>&1
if [ $? -eq 0 ]; then
pip="python-pip"
fi
# Bug https://github.com/locustio/locust/issues/49
# Need to install latest gevent 1.0rc2
sudo $pip install cython # dependency for gevent1.0 rc
cd /tmp/
rm 1.0* gevent* -rf
wget https://github.com/surfly/gevent/archive/1.0rc2.tar.gz
tar xvzf 1.0rc2.tar.gz
cd gevent-1.0rc2
sudo python setup.py install
cd
sudo $pip install locustio
sudo updatedb
locate libzmq.so
if [ $? -ne 0 ]; then
cd /tmp/
rm zeromq-3.2.3* -rf
wget http://download.zeromq.org/zeromq-3.2.3.tar.gz
tar xvzf zeromq-3.2.3.tar.gz
cd zeromq-3.2.3
./configure && make
if [ $? -eq 0 ]; then
sudo make install
else
exit -1
fi
fi
sudo $pip install gevent-zeromq
cd /tmp/
rm -rf pyzmq*
wget https://github.com/downloads/zeromq/pyzmq/pyzmq-2.2.0.tar.gz
tar xvzf pyzmq-2.2.0.tar.gz
cd pyzmq-2.2.0
sudo python setup.py install
sudo $pip install zmqrpc
| true |
f204c02a223ecd42d708ce12ea56ef26ea7ac779 | Shell | FauxFaux/debian-control | /c/chocolate-doom/chocolate-doom_3.0.0-4_amd64/preinst | UTF-8 | 355 | 2.890625 | 3 | [] | no_license | #!/bin/sh
set -e
if [ "$1" = "install" ] || [ "$1" = "upgrade" ]
then
if dpkg --compare-versions "$2" lt-nl "2.1.0-2~"
then
update-alternatives --remove heretic /usr/games/chocolate-heretic
update-alternatives --remove hexen /usr/games/chocolate-hexen
update-alternatives --remove strife /usr/games/chocolate-strife
fi
fi
exit 0
| true |
157c6fa1ea211285e225ec18636bb9eb40c1f0e8 | Shell | libingtong/iOSRETools | /Installer.sh | UTF-8 | 825 | 2.6875 | 3 | [] | no_license | #/bin/bash
echo "Installing Xcode Commandline Tools"
xcode-select --install
echo "Installing Homebrew"
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
echo "Installing dpkg"
brew install dpkg
echo "Installing ldid"
brew install ldid
echo "Installing Theos"
echo "export THEOS=\"/opt/theos\"" >>~/.bash_profile
source ~/.bash_profile
sudo git clone git://github.com/DHowett/theos.git $THEOS
echo "Installing wget"
brew install wget
echo "Installing Reveal"
wget http://download.revealapp.com/Reveal.app.zip
unzip -XK ./Reveal.app.zip
chmod +x ./Reveal.app/Contents/MacOS/Reveal
mv ./Reveal.app /Applications/Reveal.app
echo "Installing Hopper"
./Hopper.py
unzip -XK ./Hoppper-Latest.zip
mv './Hopper Disassembler v3.app' '/Applications/Hopper Disassembler v3.app'
./debugserver.py
| true |
d98ae3d3ad0e34a75cda463823ef1e931f62ad01 | Shell | statik/dotfiles | /bin/pandoc | UTF-8 | 478 | 3.109375 | 3 | [] | no_license | #!/usr/bin/env bash
if [[ -f $HOME/.homebrew/bin/pandoc ]]; then
pandoc_cmd=${HOME}/.homebrew/bin/pandoc
elif [[ -f /opt/homebrew/bin/pandoc ]]; then
pandoc_cmd=/opt/homebrew/bin/pandoc
elif [[ -f /home/linuxbrew/.linuxbrew/bin/pandoc ]]; then
pandoc_cmd=/home/linuxbrew/.linuxbrew/bin/pandoc
elif [[ -f $HOME/.linuxbrew/bin/pandoc ]]; then
pandoc_cmd=${HOME}/.linuxbrew/bin/pandoc
else
echo "Could not find real pandoc location, FIXME"
fi
exec ${pandoc_cmd} "$@"
| true |
d1e015be5503f1b3a4df07596aa00ce0fd21a523 | Shell | qq1624646454/jllutils | /jll.manual.ldap.sh | UTF-8 | 25,146 | 3.015625 | 3 | [] | no_license | #!/bin/bash
# Copyright(c) 2016-2100. root. All rights reserved.
#
# FileName: jll.manual.ldap.sh
# Author: root
# Email: 493164984@qq.com
# DateTime: 2020-12-23 19:44:07
# ModifiedTime: 2021-02-05 17:47:43
JLLPATH="$(which $0)"
JLLPATH="$(dirname ${JLLPATH})"
source ${JLLPATH}/BashShellLibrary
### Color Echo Usage ###
# Lfn_Sys_ColorEcho ${CvFgRed} ${CvBgWhite} "hello"
# echo -e "hello \033[0m\033[31m\033[43mworld\033[0m"
more >&1<<EOF
Lightweight Directory Access Protocol,轻量级是相对于重量级X.500协议而言
${Bred}${Fseablue} ${AC}
${Bred}${Fseablue} Environment ${AC}
${Bred}${Fseablue} ${AC}
https://segmentfault.com/a/1190000002607130?utm_source=sf-related
System - ubuntu: 14.04 x86_64 ( /usr/share/BerkeleyDB, /usr/share/OpenLDAP )
OpenLDAP - slapd: 2.4.31
DataBase - berkeley-db: 5.1.29
${Bred}${Fseablue} ${AC}
${Bred}${Fseablue} Install ${AC}
${Bred}${Fseablue} ${AC}
${Bblue}${Fgreen} Prepare${AC}
${Fyellow}apt-get install build-essential libssl-dev libsasl2-dev -y ${AC}
${Bblue}${Fgreen} Download & Unpacking ${AC}
${Fyellow}wget http://download.oracle.com/berkeley-db/db-5.1.29.NC.tar.gz${AC}
${Fyellow}wget ftp://ftp.openldap.org/pub/OpenLDAP/openldap-release/openldap-2.4.31.tgz${AC}
${Fyellow}tar -zvxf db-5.1.29.NC.tar.gz -C ./ ${AC}
${Fyellow}tar -zvxf openldap-2.4.31.tgz -C ./ ${AC}
${Bblue}${Fgreen} Building BerkeleyDB ( gcc 4.7+ ) ${AC}
${Fyellow}cd db-5.1.29.NC/build_unix/ ${AC}
${Fyellow}../dist/configure --prefix=/usr/share/BerkeleyDB ${AC}
${Fyellow}make & make install ${AC}
${Fyellow}cd - >/dev/null ${AC}
${Bblue}${Fgreen} Building OpenLDAP ( gcc 4.7+ ) ${AC}
${Fyellow}vim env-for-openldap${AC}
export BERKELEYDB_HOME=/usr/share/BerkeleyDB
export CPPFLAGS="-I\${BERKELEYDB_HOME}/include"
export LDFLAGS="-L\${BERKELEYDB_HOME}/lib"
export LD_LIBRARY_PATH="\${BERKELEYDB_HOME}/lib"
export LDAP_HOME="/usr/share/OpenLDAP"
export PATH="\${PATH}:\${BERKELEYDB_HOME}/bin:\${LDAP_HOME}/bin:\${LDAP_HOME}/sbin:\${LDAP_HOME}/libexec"
${Fyellow}source env-for-openldap${AC}
${Fyellow}cd openldap-2.4.31 ${AC}
${Fyellow}./configure --prefix=\${LDAP_HOME} # /usr/share/OpenLDAP ${AC}
${Fyellow}make depend${AC}
${Fyellow}make${AC}
${Fyellow}make install${AC}
${Fyellow}cd - >/dev/null ${AC}
${Fyellow}cp -rf env-for-openldap \${LDAP_HOME}/libexec/ ${AC}
${Fyellow}echo "\${LDAP_HOME}/libexec/env-for-openldap" >> \${HOME}/.bashrc ${AC}
${Fblue}#Let openldap server named slapd is started followwing by system startup, ${AC}
${Fblue}#and it can be controlled by service ${AC}
${Fyellow}cp -rvf /etc/init.d/skeleton /etc/init.d/slapd_openldap
${Fyellow}vim /etc/init.d/slapd${AC}
...
DESC="slapd is associated with OpenLDAP Server"
NAME=slapd
+ EXECPATH=/usr/share/OpenLDAP
DAEMON_ARGS="-F \${EXECPATH}/etc/openldap/slapd.d"
- PIDFILE=/var/run/\$NAME.pid
...
+ [ -x "\${EXECPATH}/libexec/env-for-openldap" ] || exit 0
+ . \${EXECPATH}/libexec/env-for-openldap
+ [ -d "\${EXECPATH}/etc/openldap/slapd.d" ] || mkdir -p \${EXECPATH}/etc/openldap/slapd.d
+ [ x"$(ls ${EXECPATH}/etc/openldap/slapd.d/* 2>/dev/null)" != x ] || \\\\
+ slapadd -n 0 -F \${EXECPATH}/etc/openldap/slapd.d -l \${EXECPATH}/etc/openldap/slapd.ldif
- [ -r /etc/default/\$NAME ] && . /etc/default/\$NAME
- . /lib/init/vars.sh
#
# JLLim: Remove "--pidfile \$PIDFILE" from all line of start-stop-daemon
# as follows
- start-stop-daemon --start --quiet --pidfile \$PIDFILE --exec \$DAEMON --test > /dev/null \\
+ start-stop-daemon --start --quiet --exec \$DAEMON --test > /dev/null \\
- start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile \$PIDFILE --name \$NAME
+ start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --name \$NAME
- start-stop-daemon --stop --signal 1 --quiet --pidfile \$PIDFILE --name \$NAME
+ start-stop-daemon --stop --signal 1 --quiet --name \$NAME
${Fyellow}update-rc.d slapd defaults 27${AC} # Install the startup service to rc 1,2,3,4,5
${Fyellow}service slapd start${AC} # Start to run openldap server named slapd
${Fyellow}service slapd stop${AC} # Stop to run openldap server named slapd
${Bblue}${Fgreen} ${AC}
${Bblue}${Fgreen} Testing ${AC}
${Bblue}${Fgreen} ${AC}
${Fyellow}service slapd start${AC}
${Fyellow}ldapsearch -x -H ldap://127.0.0.1 -D "cn=Manager,dc=my-domain,dc=com" -w secret -b '' -s base '(objectclass=*)' namingContexts -LLL ${AC}
${Fyellow}ldapsearch -x -H ldap:/// -D "cn=Manager,dc=my-domain,dc=com" -w secret -b '' -s base '(objectclass=*)' namingContexts -LLL ${AC}
${Fyellow}ldapsearch -x -D "cn=Manager,dc=my-domain,dc=com" -w secret -b '' -s base '(objectclass=*)' namingContexts -LLL ${AC}
dn:
namingContexts: dc=my-domain,dc=com
#
# add some the new entry into dit
#
${Fyellow}vim example.ldif ${AC}
dn: dc=example,dc=com
objectclass: dcObject
objectclass: organization
o: Example Company
dc: example
dn: cn=Manager,dc=example,dc=com
objectclass: organizationalRole
cn: Manager
${Fyellow}ldapadd -x -H ldap:/// -D "cn=Manager,dc=my-domain,dc=com" -w secret -f example.ldif ${AC}
adding new entry "dc=example,dc=com"
ldap_add: Server is unwilling to perform (53)
additional info: no global superior knowledge
${Fyellow}ldapsearch -x -H ldap://172.16.10.197 -D "cn=Manager,dc=my-domain,dc=com" -w secret -b 'dc=example,dc=com' -s base '(objectclass=*)' namingContexts -LLL ${AC}
No such object (32)
${Bblue}${Fgreen} ${AC}
${Bblue}${Fgreen} OpenLDAP Server and Client + BerkeleyDB Deployment ${AC}
${Bblue}${Fgreen} ${AC}
${Bblue}${Fgreen} /usr/share/OpenLDAP: OpenLDAP Server and Client Programs${AC}
[Server Program]
${Fseablue}/usr/share/OpenLDAP/libexec/ ${AC}
slapd : OpenLDAP Server Program which it is started up followwing by system startup.
env-for-openldap : Environment Various file which is sourced automatically when login stage.
[Server Program]
${Fseablue}/usr/share/OpenLDAP/sbin/ ${AC}
* : are linked to /usr/share/OpenLDAP/libexec/slapd
[Server Backend Database Program]
${Fseablue}/usr/share/BerkeleyDB/ ${AC}
*
[Server Database Data File]
${Fseablue}/usr/share/OpenLDAP/var/openldap-data/ ${AC}
*
[Server Program Configuration]
${Fseablue}/usr/share/OpenLDAP/etc/ ${AC}
* : slapd.conf or OLC(Open Ldap Configuration) namely cn=config is supported started from 2.4
${Fred}Noted: slapd.conf maybe abandoned in the future.${AC}
[Client Program For CLIs]
${Fseablue}/usr/share/OpenLDAP/bin/ ${AC}
ldapadd : linked to ldapmodify
ldapmodify :
${Fgreen}ldapsearch${AC} :
# -H <ldap://target-ip> : 指定远程ldap服务器主机地址
# -x : 简单验证,即使用用户+密码方式验证
# -D <RootDN> : 使用某个用户身份在ldap服务器上执行这条指令,用户也是一条DN,通常是RootDN
# -w <RootDN-Password> : 使用-D所指定的用户的密码,也可以用-W直接在执行时提示密码输入
# [ -b <BaseDN> ] : 可选,目录服务的数据是目录树,指定从某个目录作为基础节点开始执行操作
# [ <matching-expression> ] : 可选,规则匹配
ldapsearch -H ldap://127.0.0.1 -x -D "cn=root,dc=jllim,dc=com" -w 123456 \\
-b "dc=jllim,dc=com" "uid=*"
${Fgreen}ldapwhoami${AC} :
ldapwhoami -H ldap://127.0.0.1 -x -D "cn=root,dc=jllim,dc=com" -w 123456
${Fgreen}ldapdelete${AC} :
ldapcompare :
ldapexop :
ldapmodrdn :
ldappasswd :
ldapurl :
${Bblue}${Fgreen}/usr/share/BerkeleyDB: OpenLDAP Server Database Programs${AC}
*
${Bblue}${Fgreen}概述${AC}
LDAP 轻量级目录访问协议
--------
LDAP的目录信息的总体组织结构
--------
DIT: Directory Information Tree 目录信息树
Entry: The DIT is made up of one or more Entry, unit of DIT
目录信息树中一条记录,称为条目,每个条目有自己唯一的可区别的名称(DN)
ObjectClass: 对象类,对象类可以继承,用于定义Entry的数据类型,即属性
Property: 属性,描述Entry的某个类型
--------
LDAP的目录条目常用字段
--------
${Fseablue}Schema${AC} 一个条目中各个字段是由Schema定义的,Schema文件一般位于 etc/openldap/schema/*
core.schema : OpenLDAP的核心schema.【它是必须加载的】
inetorgperson.schema : 它仅次于core, 我们添加账号时,很多要使用到它里面定义的objectClass.
dyngroup.schema : 这是定义组时使用的schema,包括要使用sudo.schema时,也需要它。
ppolicy.schema : 若需要做密码策略时,需要导入此schema.
nis.schema : 网络信息服务(FYI),也是一种集中账号管理实现.
java.schema : 若需要使用Java程序来操作openLDAP时,需要使用它,让OpenLDAP支持Java.
cosine.schema : Cosine 和 Internet X.500 (比较有用, 不太懂.)
misc.schema : Assorted (experimental)
openldap.schema : OpenLDAP Project(experimental)
sudo.schema: 定义sudo规则
常用字段:
dn(Distinguished Name):
“uid=songtao.xu,ou=oa组,dc=example,dc=com”,一条记录的位置(唯一)
uid(User Id):
用户ID songtao.xu(一条记录的ID), 这里的UID不是Linux系统上的UID,这里的UID是用户登录LDAP的账号.
ou(Organization Unit):
组织单位,组织单位可以包含其他各种对象(包括其他组织单元),如“oa组”(一条记录的所属组织)
dc(Domain Component):
域名的部分,其格式是将完整的域名分成几部分,
如域名为example.com变成dc=example,dc=com(一条记录的所属位置)
为什么会设计成域名形式?
因为openldap是支持基于网络访问,即客户端和服务器可以是不同设备.
cn(Common Name):
公共名称,如“Thomas Johansson”(一条记录的名称)
sn(Surname):
姓,如“许”, 只能写姓
giveName: 只能写名字
rdn(Relative dn):
相对辨别名,类似于文件系统中的相对路径,
它是与目录树结构无关的部分,如“uid=tom”或“cn= Thomas Johansson”
传统方式的组织形式,聚焦于国别以及地理信息为上层构成,常用的字段有:
c(Country):
国家,如“CN”或“US”等。
st(State):
州/区/省
o(Organization):
组织,它更多地代表是子公司.
e.g:
cn=Barbara Jenson,ou=Sales,o=Acme,st=California,c=US
互联网域名的组织形式,基于域名,上层构成直接使用域名,能结合DNS相关的技术:
e.g:
uid=babs,ou=People,dc=example,dc=com
--------
${Bred}${Fseablue} ${AC}
${Bred}${Fseablue} Initialize and Configuration ${AC}
${Bred}${Fseablue} ${AC}
配置OpenLDAP服务器是整个方案中最为麻烦的部分,网络上参考几乎都不对(很多教程停留在2008年甚至1998年)
而正确的配置方法是通过ldapmodify命令执行一系列自己写好的ldif文件,而不是修改任何OpenLDAP预装好的配
置文件.记住,OpenLDAP预装好的ldif配置是通过schema文件自动生成的,不应该被直接修改
(e.g: This file was automatically generated from collective.schema)
${Fred}ldif文件,即LDAP Interchange Format${AC}
LDIF 文件每行的结尾不允许有空格或者制表符
LDIF 文件允许相关属性可以重复赋值并使用
LDIF 文件以.ldif结尾命名
LDIF 文件中以#号开头的一行为注释,可以作为解释使用
LDIF 文件所有的赋值方式为: 属性:[空格]属性值
LDIF 文件通过空行来定义一个条目,空格前为一个条目,空格后为另一个条目的开始
--------
# 注释,用于对条目进行解释
dn: 条目名称
objectClass(对象类): 属性值
objectClass(对象类): 属性值
...
${Bblue}${Fgreen} 配置说明 ${AC}
etc/slapd.conf (Abandon)
旧版本的默认使用它实现数据库文件的生成,但在2.4版本开始官方就不推荐了,
因为通过它来配置LDAP,主要嫌它太繁琐,修改完配置必须重新生成OpenLDAP数据库,
这就意味着,OpenLDAP服务器必须停机. 建议cn=config方式.
cn=config (Recommend)
相对slapd.conf方式而言,通过ldapmodify修改后立即生效而不需要重启OpenLDAP服务器,属于热部署.
#
#DB_CONFIG.example会自动生成DB_CONFIG,所以不需要特别处理
#/usr/share/OpenLDAP/var/openldap-data/DB_CONFIG.example
#
${Bblue}${Fgreen} 初始化操作 ${AC}
${Bblue}${Fgreen} Prepare${AC}
${Bblue}${Fgreen} Building OpenLDAP ( gcc 4.7+ ) ${AC}
${Fred} ${AC}
${Fred}配置OpenLDAP有两种方法,一种是修改slapd.conf实现配置,一种是修改数据库实现配置${AC}
${Fred}[1]修改slapd.conf完成配置${AC}
${Fyellow}vim \${LDAP_HOME}/etc/openldap/slapd.conf${AC}
...
include /usr/share/OpenLDAP/etc/openldap/schema/core.schema
+ include /usr/share/OpenLDAP/etc/openldap/schema/cosine.schema
+ include /usr/share/OpenLDAP/etc/openldap/schema/inetorgperson.schema
...
+ #suffix "dc=my-domain,dc=com"
+ suffix "dc=reachxm,dc=com"
+ #rootdn "cn=Manager,dc=my-domain,dc=com"
${Fred}服务管理员可以对目录树进行更删改查等管理操作,以下指定管理员用户名${AC}
+ rootdn "cn=root,dc=reachxm,dc=com"
+ #rootpw secret
+ #JLLim: slappasswd -s 123456
+ # {SSHA}w59E+EGqCcMhdTVGlzMeCXDsUqAFD+EU
+ rootpw 123456
...
+ # JLLim logging is tracked
+ loglevel 256
+ logfile /usr/share/OpenLDAP/var/slapd.log
${Fred}验证管理员密码,ldapwhoami不带参数时返回为anonymous,-D绑定为管理员,-W提示密码输入,-x使用密码${AC}
${Fyellow}ldapwhoami -x -D cn=root,dc=reachxm,dc=com -W ${AC}
Enter LDAP Password:
dn:cn=root,dc=reachxm,dc=com
${Fred}在另一台ldap客户机上查询${AC}
${Fyellow}/usr/share/OpenLDAP/bin/ldapwhoami -x -w 123456 -D "cn=root,dc=reachxm,dc=com" -H ldap://172.16.10.197 ${AC}
${Fred}[2]修改数据库完成配置${AC}
${Bblue}${Fgreen} Start to run slapd followwing by system startup ${AC}
${Bblue}${Fgreen} slapd is OpenLDAP Server ${AC}
${Fyellow}cp -rf /etc/init.d/skeleton /etc/init.d/openldap${AC}
${Fyellow}vim /etc/init.d/openldap${AC}
${Fyellow}source /usr/share/OpenLDAP/libexec/env-for-openldap${AC}
${Fyellow}/usr/share/OpenLDAP/libexec/slapd -d 256 ${AC} # start slapd with debug log on console
${Fyellow}killall slapd ${AC} # stop slapd by killing the process
${Bblue}${Fgreen} Import Data during slapd running${AC}
${Fyellow}vim test.ldif${AC}
dn: dc=reachxm,dc=com
dc: reachxm
o: ReachAIoT.Inc
objectClass: dcObject
objectClass: organization
dn: cn=root,dc=reachxm,dc=com
cn: root
objectClass: organizationalRole
dn: ou=itsection,dc=reachxm,dc=com
ou: itsection
objectClass: organizationalUnit
dn: cn=sean,ou=itsection,dc=reachxm,dc=com
ou: itsection
cn: sean
sn: zhouxiao
objectClass: inetOrgPerson
objectClass: organizationalPerson
${Fyellow}ldapadd -x -D "cn=root,dc=reachxm,dc=com" -W -f test.ldif ${AC}
Enter LDAP Password: ${Fred}123456${AC}
adding new entry "dc=reachxm,dc=com"
adding new entry "cn=root,dc=reachxm,dc=com"
adding new entry "ou=itsection,dc=reachxm,dc=com"
adding new entry "cn=sean,ou=itsection,dc=reachxm,dc=com"
${Bblue}${Fgreen} OpenLDAP Logging Configuration ${AC}
${Fyellow}/usr/share/OpenLDAP/libexec/slapd -d ? ${AC}
Installed log subsystems:
Any (-1, 0xffffffff)
Trace (1, 0x1)
Packets (2, 0x2)
Args (4, 0x4)
Conns (8, 0x8)
BER (16, 0x10)
Filter (32, 0x20)
Config (64, 0x40)
ACL (128, 0x80)
Stats (256, 0x100) #JLLim: recommend
Stats2 (512, 0x200)
Shell (1024, 0x400)
Parse (2048, 0x800)
Sync (16384, 0x4000)
None (32768, 0x8000)
NOTE: custom log subsystems may be later installed by specific code
${Bblue}${Fgreen} Resource Associated with OpenLDAP ${AC}
OpenLDAP的相关配置文件信息
/etc/openldap/slapd.conf:OpenLDAP的主配置文件,记录根域信息,管理员名称,密码,日志,权限等
/etc/openldap/slapd.d/*:这下面是/etc/openldap/slapd.conf配置信息生成的文件,每修改一次配置信息,这里的东西就要重新生成
/etc/openldap/schema/*:OpenLDAP的schema存放的地方
/var/lib/ldap/*:OpenLDAP的数据文件
/usr/share/openldap-servers/slapd.conf.obsolete 模板配置文件
/usr/share/openldap-servers/DB_CONFIG.example 模板数据库配置文件
OpenLDAP/var/openldap-data/__db.001
?? OpenLDAP/var/openldap-data/__db.002
?? OpenLDAP/var/openldap-data/__db.003
?? OpenLDAP/var/openldap-data/__db.004
?? OpenLDAP/var/openldap-data/__db.005
?? OpenLDAP/var/openldap-data/__db.006
?? OpenLDAP/var/openldap-data/alock
?? OpenLDAP/var/openldap-data/dn2id.bdb
?? OpenLDAP/var/openldap-data/id2entry.bdb
?? OpenLDAP/var/openldap-data/log.0000000001
?? OpenLDAP/var/run/
${Bblue}${Fgreen}OpenLDAP Listening Ports: ${AC}
389 (cleartext by default)
636 (ciphertext)
${Fyellow}netstat -anp | grep slapd${AC}
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:389 0.0.0.0:* LISTEN 19656/slapd
tcp6 0 0 :::389 :::* LISTEN 19656/slapd
Proto RefCnt Flags Type State I-Node PID/Program name Path
unix 2 [ ] DGRAM 285590 19656/slapd
${Fyellow}netstat -ntplu | grep -i :389${AC}
tcp 0 0 0.0.0.0:389 0.0.0.0:* LISTEN 2031/slapd
tcp6 0 0 :::389 :::* LISTEN 2031/slapd
core.schema:
cosine.schema:1436:# top OBJECT-CLASS
cosine.schema-1437-# MUST CONTAIN {
cosine.schema-1438-# objectClass}
cosine.schema-1439-# ::= {objectClass 0}
cosine.schema-1440-#
${Fyellow}GUI${AC}
phpldapadmin : PHP
LDAP Account Manager (LAM) : PHP
Web2LDAP : Python3
LDAPadmin : windows
${Bblue}${Fgreen} OpenLDAP Management Commands ${AC}
ldapsearch: 搜索OpenLDAP目录树条目
ldapadd: 通过LDIF格式,添加目录树条目
ldapdelete: 删除OpenLDAP目录树条目
ldapmodify: 修改OpenLDAP目录树条目
ldapwhoami: 检验OpenLDAP用户的身份
ldapmodrdn: 修改OpenLDAP目录树DN条目
ldapcompare: 判断DN值和指定参数值是否属于同一个条目
ldappasswd: 修改OpenLDAP目录树用户条目实现密码重置
slaptest: 验证slapd.conf文件或cn=配置目录(slapd.d)
slapindex: 创建OpenLDAP目录树索引,提高查询效率
slapcat: 将数据条目转换为OpenLDAP的LDIF文件
<Command> ? #show the help detail
${Fyellow}ldapadd ?${AC}
#ldapadd is linked to ldapmodify
Add or modify entries from an LDAP server
${Bred}${Fwhite}root@BS-010197:.# cat <<EOFL | ldapmodify -Y EXTERNAL -H ldapi:///${AC}
dn: olcDatabase={0}config,cn=config
changetype: modify
delete: olcRootDN
dn: olcDatabase={0}config,cn=config
changetype: modify
add: olcRootDN
olcRootDN: cn=Amin,cn=config
dn: olcDatabase={0}config,cn=config
changetype: modify
add: olcRootPW
olcRootPW: 654321
EOFL
${Fred}ISSUE is what about ldapadd: not compiled with SASL support${AC}
${Fgreen}Check whether ldapwhoami is linked against libsasl2${AC}
root@BS-010197:/usr/share/OpenLDAP/bin# ldd ldapwhoami
linux-vdso.so.1 => (0x00007ffce351e000)
libssl.so.1.0.0 => /lib/x86_64-linux-gnu/libssl.so.1.0.0 (0x00007f1d8290f000)
libcrypto.so.1.0.0 => /lib/x86_64-linux-gnu/libcrypto.so.1.0.0 (0x00007f1d82532000)
libresolv.so.2 => /lib/x86_64-linux-gnu/libresolv.so.2 (0x00007f1d82317000)
libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f1d81f4e000)
libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f1d81d4a000)
/lib64/ld-linux-x86-64.so.2 (0x00007f1d82b6e000)
${Fyellow}aptitude install -y libsasl2-dev${AC} #re-build then re-install
root@BS-010197:/usr/share/OpenLDAP/bin# ldd ldapwhoami
linux-vdso.so.1 => (0x00007ffc727dc000)
${Fred}libsasl2.so.2 => /usr/lib/x86_64-linux-gnu/libsasl2.so.2 (0x00007f7f30d92000)${AC}
libssl.so.1.0.0 => /lib/x86_64-linux-gnu/libssl.so.1.0.0 (0x00007f7f30b33000)
libcrypto.so.1.0.0 => /lib/x86_64-linux-gnu/libcrypto.so.1.0.0 (0x00007f7f30756000)
libresolv.so.2 => /lib/x86_64-linux-gnu/libresolv.so.2 (0x00007f7f3053b000)
libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f7f30172000)
libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f7f2ff6e000)
/lib64/ld-linux-x86-64.so.2 (0x00007f7f30fad000)
${Fred}ISSUE is what about ldap_sasl_interactive_bind_s: Can't contact LDAP server (-1) ${AC}
------------------------------------------------
https://www.cnblogs.com/wn1m/p/10700236.html
------------------------------------------------
https://www.it610.com/article/5623624.htm
<<轻型目录访问协议>>
Lightweight Directory Access Protocol,缩写:LDAP)是一个开放的,中立的,工业标准的应用协议,通过IP协议提供访问控制和维护分布式信息的目录信息.
市面上只要你能够想像得到的所有工具软件,全部都支持LDAP协议。比如说你公司要安装一个项目管理工具,那么这个工具几乎必然支持LDAP协议,你公司要安装一个bug管理工具,这工具必然也支持LDAP协议,你公司要安装一套软件版本管理工具,这工具也必然支持LDAP协议。LDAP协议的好处就是你公司的所有员工在所有这些工具里共享同一套用户名和密码,来人的时候新增一个用户就能自动访问所有系统,走人的时候一键删除就取消了他对所有系统的访问权限,这就是LDAP。
有些领域并不像前端世界那么潮那么性感,但是缺了这个环节又总觉得很别扭。如果深入到运维的世界,你会发现大部分工具还活在上个世纪,产品设计完全反人类,比如cn, dc, dn, ou这样的命名方式,如果不钻研个一天两天,鬼知道它在说什么,比如说dns,dns是什么鬼?域名吗?不是,它只是某个懒惰的工程师起了dn这么一个缩写,再加一个复数,就成了dns,和域名服务器没有任何关系;cn是什么?中国的缩写?你想多了,这和中国没有任何关系。经过一系列这样疯狂的洗脑之后,你才能逐渐明白LDAP到底想干什么。抛弃你所有的认知,把自己当成一个什么都不懂的幼儿园孩子,然后我们从头学起LDAP.
如果你搜索OpenLDAP的安装指南,很不幸地告诉你,网上不管中文的英文的,90%都是错的,它们都还活在上个世纪,它们会告诉你要去修改一个叫做slapd.conf的文件,基本上看到这里,你就不用往下看了,这个文件早就被抛弃,新版的OpenLDAP里根本就没有这个文件!取而代之的是slapd.d的文件夹,然后另一部分教程会告诉你,让你修改这个文件夹下的某一个ldif文件,看到这里,你也不用往下看了,你又看到了伪教程,因为这个文件夹下的所有文件的第一行都明确地写着:『这是一个自动生成的文件,不要修改它!』你修改了它之后,它的md5校验值会匹配不上,造成更多的问题。你应该用ldapmodify来修改这个文件,而关于ldapmodify的教程,可以说几乎就没有!我一开始不知道面临这样荒谬的处境,很多运维人员是怎么活下来的,不过等我自己配通了以后,真的是累到连写教程的精力都没有了,好吧,我已经配通了,你们各人自求多福吧。
谈谈OpenLDAP的架构:
1.OpenLDAP服务器: 实质上它相当于一台可基于网络访问的数据库,所管理的数据是目录信息,
而目录信息非常适合存储用户账号等,它的访问API默认为命令行;
2.phpLDAPadmin: 为了解决管理员可以使用图形界面而非使用命令行去访问OpenLDAP服务器,phpLDAPadmin提供了
一个奇丑无比的web化的管理操作平台;
3.PWM: 只装有管理工具也还不够,还需要为用户提供一个修改密码的地方;
4.客户端:配置各种工具
EOF
| true |
1a023c69a672b48758f60d43cf4f91495caf4834 | Shell | viyancs/system-bash | /mysql_backup.sh | UTF-8 | 1,101 | 3.703125 | 4 | [] | no_license | #! /bin/bash
# This script used to generate backup of all database on mysql
# You can modify this script like you want , example you can modify folder for Backup Directory, because for now I just try to Backup every one month
# You can modify Crontab for run this script , You can see in Readme.md for the description and usage
# This script didn't backup database information_schema and performance_schema
# Please keep Simple and Clean Code
# If You have Question Please contact me (msofyancs@gmail.com)
DAY=$(date +%A)
MONTH=$(date +%m)
YEAR=$(date +%Y)
FLAG="$DAY-$MONTH-$YEAR"
BACKUP_DIR="/home/"
MYSQL_USER="root"
MYSQL=/usr/bin/mysql
MYSQL_PASSWORD="*******"
MYSQLDUMP=/usr/bin/mysqldump
mkdir -p "$BACKUP_DIR/$FLAG"
databases=`$MYSQL --user=$MYSQL_USER -p$MYSQL_PASSWORD -e "SHOW DATABASES;" | grep -Ev "(Database|information_schema)"`
for db in $databases; do
if [ "$db" = "information_schema" ]; then continue;
elif [ "$db" = "performance_schema" ]; then continue;
fi
$MYSQLDUMP --force --opt --user=$MYSQL_USER -p$MYSQL_PASSWORD --databases $db | gzip > "$BACKUP_DIR/$FLAG/$db.gz"
done
| true |
066fe41d38a18b2efa061e6a370c3ed30d6c9be6 | Shell | mr3coi/setup | /setup_vim.sh | UTF-8 | 1,226 | 3.421875 | 3 | [] | no_license | #!/bin/bash
VIMRC_DEST=$HOME/.vimrc
if [ -L "$VIMRC_DEST" ]; then
echo ">>> symlink $VIMRC_DEST exists; delete it"
rm "$VIMRC_DEST"
elif [ -e "$VIMRC_DEST" ]; then
echo ">>> $VIMRC_DEST exists; store with '.backup' extension"
mv "$VIMRC_DEST" "$VIMRC_DEST.backup"
fi
ln -s `pwd`/.vimrc "$VIMRC_DEST"
echo `pwd`/.vimrc
# Setup Vundle
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
vim +PluginInstall +qall
echo "[IMPORTANT] Extra steps remain to complete installation of YouCompleteMe."
echo " Refer to installation guides in the repository."
# Setup cscope_maps.vim
CSCOPE_MAPS_FILE=`pwd`/cscope_maps.vim
CSCOPE_MAPS_DEST=$HOME/.vim/plugin/cscope_maps.vim
if [ -L "$CSCOPE_MAPS_DEST" ]; then
echo ">>> symlink $CSCOPE_MAPS_DEST exists; delete it"
rm "$CSCOPE_MAPS_DEST"
elif [ -e "$CSCOPE_MAPS_DEST" ]; then
echo ">>> $CSCOPE_MAPS_DEST exists; store with '.backup' extension"
mv "$CSCOPE_MAPS_DEST" "$CSCOPE_MAPS_DEST.backup"
elif [ ! -d "$(dirname $CSCOPE_MAPS_DEST)" ]; then
mkdir "$(dirname $CSCOPE_MAPS_DEST)"
echo ">>> $(dirname $CSCOPE_MAPS_DEST) doesn't exist; create the directory"
fi
echo "$CSCOPE_MAPS_DEST"
ln -s "$CSCOPE_MAPS_FILE" "$CSCOPE_MAPS_DEST"
| true |
6b778b9510094562a3a30dcf08c56d93bdea9a69 | Shell | sailik1991/VulnerabilityDataProcessor | /src/generate_game_data.sh | UTF-8 | 473 | 3.28125 | 3 | [
"MIT"
] | permissive | #~/bin/bash
if [ "$#" -ne 2 ]; then
echo "usage: ./generate_game_data.sh <start_year> <end_year>"
exit 1
fi
python attack_extractor.py -s $1 -e $2 > file.txt
cat file.txt | sed 's/, /,/g' | sed 's/)(/ /g' | sed 's/(//g' | sed 's/)//g' > BSSG_input.txt
grep 'CVE-[0-9]*-[0-9]*$' BSSG_input.txt > attack_list.txt
python -c 'print "NO-OP"' >> attack_list.txt
sed -i 's/CVE-[0-9]*-[0-9]*$//g' BSSG_input.txt
sed -i '/^$/d' BSSG_input.txt
sed -i '1,4d' BSSG_input.txt
| true |
6fe444e588d54dbfd0b19f4f227e0badcddee281 | Shell | uk-gov-mirror/UKGovernmentBEIS.beis-report-official-development-assistance | /script/bootstrap | UTF-8 | 1,207 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# script/bootstrap: Resolve all dependencies that the application requires to
# run.
set -e
cd "$(dirname "$0")/.."
if [ -z "$CI" ]; then
if [ -f Brewfile ] && [ "$(uname -s)" = "Darwin" ]; then
if ! brew bundle check >/dev/null 2>&1; then
echo "==> Installing Homebrew dependencies..."
brew bundle install --verbose --no-lock
fi
fi
if [ -f .ruby-version ]; then
eval "$(rbenv init -)"
if [ -z "$(rbenv version-name 2>/dev/null)" ]; then
echo "==> Installing Ruby..."
rbenv install --skip-existing
rbenv rehash
fi
fi
fi
if ! command -v bundle >/dev/null 2>&1; then
echo "==> Installing Bundler..."
gem install bundler
if [ -z "$CI" ]; then
rbenv rehash
fi
fi
if ! bundle check >/dev/null 2>&1; then
echo "==> Installing Ruby dependencies..."
bundle install
fi
if [ -f package.json ]; then
if ! yarn check --verify-tree >/dev/null 2>&1; then
echo "==> Installing JS dependencies..."
npm install
fi
fi
REDIS_STATUS=$(redis-cli ping)
if [ "$REDIS_STATUS" != "PONG" ]; then
echo "===> Starting Redis server in daemon mode"
redis-server /usr/local/etc/redis.conf --daemonize yes
fi
| true |
2f466eb52be2459a8a04220723d5803283ae2e58 | Shell | gangbo/vim_open_pm | /tmux.sh | UTF-8 | 371 | 3.125 | 3 | [] | no_license | #!/bin/sh
#
# name : tmuxen, tmux environment made easy
# author : gangbo <dagnagbo@gmail.com>
# license : GPL
#
cmd=$(which tmux) # tmux path
session=$(hostname -s) # session name
if [ -z $cmd ]; then
echo "You need to install tmux."
exit 1
fi
$cmd has -t $session 2> /dev/null
if [ $? != 0 ]; then
$cmd new -s $session
fi
$cmd att -t $session
exit 0
| true |
23dd2826aa2633f5c7c1e7d5d0090618d1e3905c | Shell | otto-de/trackrdrd | /pkg/rpm/pkg | UTF-8 | 2,107 | 3.484375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | #! /bin/bash -ex
# Build script for a trackrdrd el7 RPM
# to be run in the rpmbuild/centos7 docker container.
# Env variables VERSION and RELEASE MUST be set in the docker invocation.
# DIST is set by the rpmbuild/centos7 container.
# The directory mounted to /srv MUST contain these files:
# - this script (named pkg)
# - trackrdrd tarball (trackrdrd-$VERSION.tar.gz)
# - RPM spec file (trackrdrd.spec)
# - systemd unit file (trackrdrd.service)
# - logrotate config for the Kafka plugin (trackrdr-kafka.logrotate)
# - yum repo config for varnish65@packagecloud (varnishcache_varnish65.repo)
# (see https://packagecloud.io/varnishcache/varnish5/install#manual-rpm)
# At the end of the run, binary, source and debuginfo RPMs are in the
# directory mounted to /srv.
if [ -z $VERSION ]; then
echo "Env variable VERSION MUST be set"
exit 1
fi
if [ -z $RELEASE ]; then
echo "Env variable RELEASE MUST be set"
exit 1
fi
# delete the peculiar macros from the rpmbuild/centos7 image
rm /home/builder/.rpmmacros
# set up the build environment
cd /home/builder
mkdir -p rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
cp /srv/trackrdrd.spec rpmbuild/SPECS
cp /srv/trackrdrd-${VERSION}.tar.gz rpmbuild/SOURCES
cp /srv/trackrdrd.service rpmbuild/SOURCES
cp /srv/trackrdr-kafka.logrotate rpmbuild/SOURCES
# install epel7 repo
sudo rpm -Uvh \
https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
# set up varnish repo and Apache Bigtop repo for zookeeper-native
sudo cp /srv/varnishcache_varnish65.repo /etc/yum.repos.d/
sudo cp /srv/bigtop.repo /etc/yum.repos.d/
sudo yum -q makecache -y --disablerepo='*' --enablerepo='varnishcache_varnish65'
sudo yum -q makecache -y --disablerepo='*' --enablerepo='bigtop'
# build requirements
sudo yum install -y -q varnish-devel pkgconfig make gcc librdkafka-devel \
zookeeper-native pcre-devel zlib-devel python-docutils
# build RPMs
rpmbuild -ba -D "dist .${DIST}" \
-D "_version ${VERSION}" \
-D "_release ${RELEASE}" \
rpmbuild/SPECS/trackrdrd.spec
sudo cp rpmbuild/RPMS/*/* /srv
sudo cp rpmbuild/SRPMS/* /srv
| true |
dc8bdddd3c1a4cc25513eb2cd7a8e8c9223a61d9 | Shell | UoB-HPC/performance-portability | /benchmarking/2020/babelstream/radeonvii-zoo/benchmark.sh | UTF-8 | 3,334 | 3.765625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
DEFAULT_COMPILER=gcc-9.1
DEFAULT_MODEL=ocl
function usage() {
echo
echo "Usage: ./benchmark.sh build|run [COMPILER] [MODEL]"
echo
echo "Valid compilers:"
echo " gcc-10.1"
echo " hipcc"
echo " hipsycl"
echo
echo "Valid models:"
echo " ocl"
echo " omp"
echo " acc"
echo " sycl"
echo
echo "The default configuration is '$DEFAULT_COMPILER'."
echo "The default programming model is '$DEFAULT_MODEL'."
echo
}
# Process arguments
if [ $# -lt 1 ]; then
usage
exit 1
fi
ACTION=$1
COMPILER=${2:-$DEFAULT_COMPILER}
MODEL=${3:-$DEFAULT_MODEL}
SCRIPT=$(realpath $0)
SCRIPT_DIR=$(realpath $(dirname $SCRIPT))
source ${SCRIPT_DIR}/../common.sh
export CONFIG="radeonvii"_"$COMPILER"_"$MODEL"
export BENCHMARK_EXE=BabelStream-$CONFIG
export SRC_DIR=$PWD/BabelStream
export RUN_DIR=$PWD/BabelStream-$CONFIG
# Set up the environment
module purge
module load gcc/10.1.0
module load rocm/node30-paths
case "$COMPILER" in
gcc-10.1)
MAKE_OPTS='COMPILER=GNU'
;;
hipcc)
MAKE_OPTS='COMPILER=HIPCC'
;;
hipsycl)
module load hipsycl/master-mar-18
MAKE_OPTS='COMPILER=HIPSYCL TARGET=AMD ARCH=gfx906'
;;
*)
echo
echo "Invalid compiler '$COMPILER'."
usage
exit 1
;;
esac
case "$MODEL" in
ocl)
MAKE_FILE="OpenCL.make"
BINARY="ocl-stream"
;;
kokkos)
KOKKOS_PATH=$(pwd)/$(fetch_kokkos)
echo "Using KOKKOS_PATH=${KOKKOS_PATH}"
MAKE_FILE="Kokkos.make"
BINARY="kokkos-stream"
export CXX=hipcc
# XXX
# TARGET=AMD isn't a thing in BabelStream but TARGET=CPU is misleading and TARGET=GPU uses nvcc
# for CXX which is not what we want so we use a non-existent target
# CXX needs to be specified again as we can't export inside BabelStream's makefile
MAKE_OPTS+=" KOKKOS_PATH=${KOKKOS_PATH} TARGET=AMD ARCH=Vega906 DEVICE=HIP CXX=hipcc"
export OMP_PROC_BIND=spread
;;
omp)
MAKE_OPTS+=' TARGET=AMD'
MAKE_OPTS+=' EXTRA_FLAGS="-foffload=amdgcn-amdhsa="-march=gfx906""'
MAKE_FILE="OpenMP.make"
BINARY="omp-stream"
;;
acc)
MAKE_OPTS+=' EXTRA_FLAGS="-foffload=amdgcn-amdhsa="-march=gfx906""'
MAKE_FILE="OpenACC.make"
BINARY="acc-stream"
;;
sycl)
# module load gcc/8.3.0
# export HIPSYCL_CUDA_PATH=$(realpath $(dirname $(which nvcc))/..)
# HIPSYCL_PATH=$(realpath $(dirname $(which syclcc))/..)
#HIPSYCL_PATH="/nfs/home/wl14928/hipSYCL/build/x"
HIPSYCL_PATH="/nfs/software/x86_64/hipsycl/master"
echo "Using HIPSYCL_PATH=${HIPSYCL_PATH}"
MAKE_OPTS+=" SYCL_SDK_DIR=${HIPSYCL_PATH}"
MAKE_FILE="SYCL.make"
BINARY="sycl-stream"
;;
*)
echo
echo "Invalid model '$MODEL'."
usage
exit 1
;;
esac
# Handle actions
if [ "$ACTION" == "build" ]; then
# Fetch source code
fetch_src
rm -f $BENCHMARK_EXE
# Perform build
if ! eval make -f $MAKE_FILE -C $SRC_DIR -B $MAKE_OPTS -j $(nproc); then
echo
echo "Build failed."
echo
exit 1
fi
mkdir -p $RUN_DIR
# Rename binary
mv $SRC_DIR/$BINARY $RUN_DIR/$BENCHMARK_EXE
elif [ "$ACTION" == "run" ]; then
check_bin $RUN_DIR/$BENCHMARK_EXE
cd $RUN_DIR || exit
bash "$SCRIPT_DIR/run.sh" BabelStream-$CONFIG.out
elif [ "$ACTION" == "run-large" ]; then
check_bin $RUN_DIR/$BENCHMARK_EXE
cd $RUN_DIR || exit
bash "$SCRIPT_DIR/run-large.sh" BabelStream-large-$CONFIG.out
else
echo
echo "Invalid action (use 'build' or 'run')."
echo
exit 1
fi
| true |
81263653ec9cef0b3eb36c8dc7070f7806d832b0 | Shell | redislabsdemo/acldemo | /bootstrap.sh | UTF-8 | 2,711 | 3.140625 | 3 | [] | no_license | ##Install NGINX to setup Web Server
#Setup Official NGINGX bionic repo
echo deb http://nginx.org/packages/ubuntu/ bionic nginx >> /etc/apt/sources.list
#change to /tmp to store temporary files
cd /tmp
#pull nginx signing key
wget https://nginx.org/keys/nginx_signing.key
#install nginx signing key
sudo apt-key add /tmp/nginx_signing.key
#update dependencies
sudo apt-get update -y
#install nginx (currently at 16.1)
sudo apt-get install nginx -y
#clone my repo in order to grab the nginx.conf file inside.
git clone https://github.com/redislabsdemo/acldemo.git
#remove existing nginx.conf file
sudo rm -f /etc/nginx/nginx.conf
#remove index.html
sudo rm -f /usr/share/nginx/html/index.html
#remove server error message
sudo rm -f /usr/share/nginx/html/50x.html
#Move nginx.conf file to correct location
mv ./acldemo/nginx.conf /etc/nginx/
#install aws cli tools to obtain public network info in automated fashion
sudo apt-get install awscli -y
#use aws cli tools to get public ip address
publicip=$(curl http://169.254.169.254/latest/meta-data/public-ipv4)
#use aws cli tools to get public ip dns name
publicdns=$(curl http://169.254.169.254/latest/meta-data/public-hostname)
#add the public dns and public ip address to the server name directive to ensure you can only access NGINX when the host header is that name.
sed -i "s/server_name[^;]*;/server_name $publicip $publicdns;/" /etc/nginx/nginx.conf
#AWS public DNS names are too long for NGINX to handle.
#The following line is not required if your hostname and ipaddres are shorter. This line configures the size the server_name directive can be
sed -i "s/http {/http {\n server_names_hash_bucket_size 128;/g" /etc/nginx/nginx.conf
#change directories to the directory we want the certificate to be installed in.
cd /etc/nginx
#create a self-signed certificate
sudo openssl req -x509 -nodes -days 365 -subj "/C=US/ST=CA/L=MountainView/O=Redislabs/OU=ProductDemos/CN=ACLDemoSelf-Signed" -newkey rsa:2048 -keyout /etc/nginx/nginx-selfsigned.key -out /etc/nginx/nginx-selfsigned.crt
#recycle nginx to make changes take effect.
service nginx restart
#Install Redis as a local host cache in protected-mode so that it can't be accessed externally.
sudo apt install build-essential tcl -y
sudo apt install tcl-tls -y
sudo apt install redis-tools -y
git clone https://github.com/antirez/redis.git
cd ./redis
make
./src/redis-server
#TODO Install Website for Demos
git clone https://github.com/redislabsdemo/acldemo.git
mv acldemo/index.html /usr/share/nginx/html/index.html
mv acldemo/redis-logo.png /usr/share/nginx/html/
mv acldemo/app.py /usr/share/nginx/html
mkdir /usr/share/nginx/html/static
mv acldemo/static/style.css /usr/share/nginx/html/static
| true |
6e1e135c3f5db5de85dd868d51cbda3d12c30d10 | Shell | mschmi26/Biocomputing_Project | /bioinformaticsProject/ref_sequences/build.sh | UTF-8 | 552 | 2.765625 | 3 | [] | no_license | #
for i in *.fasta
do
./muscle -in $i -out muscle$i
./hmmbuild build$i muscle$i
for j in ../proteomes/proteome_**.fasta
do
./hmmsearch --tblout mrca$i $j build$i
done
cat mrca$i | grep "aligned" > hasmrcA$i
#t=1
#for p in ../ref_sequences/hsp70gene_**.fasta
#do
# ./hmmsearch --tblout $thsp$i hasmrcA$i $p
t=t+1
#done
# cat $thsp$i | grep "aligned" > $thas$i
##make file with all the names of the proteome that have hsp70 and mrc
#for m in $thas$i
#do
# echo m > table.csv
#done
done
#cat table.csv | sed 's/_/ /' | sort -k 2
| true |
81130dfacdb188395c111eec52c9d9adf1b1ce9f | Shell | rlpowell/hblog | /run_cron.sh | UTF-8 | 728 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
export PATH="$HOME/bin:$HOME/.local/bin:/usr/local/bin/:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin"
exec 2>&1
cd ~/src/hblog/
LOCK_FILE=/home/rlpowell/scratch/hblog_run_cron_lock
exec 99>"$LOCK_FILE"
flock -n 99
if [[ $? -ne 0 ]]
then
echo "Could not acquire lock; exiting."
exit 1
fi
./run_build.sh >/tmp/hblog_cron.$$ 2>&1
exitcode=$?
if [ "$exitcode" -ne 0 ]
then
echo "Errors found; showing full output."
cat /tmp/hblog_cron.$$
# If we see mail happening that isn't part of a Docker RUN line,
# changes must have occurred
elif grep -v '^STEP [0-9]*: RUN' /tmp/hblog_cron.$$ | grep -q -i mail
then
echo "Changes found; showing full output."
cat /tmp/hblog_cron.$$
fi
rm -f /tmp/hblog_cron.$$
| true |
902ea3f083b7a59b957449f5b3971027264d1ff6 | Shell | maravtdm/polybar | /scripts/mount_usb.sh | UTF-8 | 230 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
label_prefix=""
label_umount=""
for dev in '807A-F083'
do
mount | grep /run/media/david/$dev >/dev/null 2>&1
mount=$?
if [ "$mount" == "0" ]; then
echo -n "dell"
else
echo -n ""
#"$label_umount"
fi
done
| true |
9cdbfc80e964f6ed150b5a54009c5243d278a7cf | Shell | mherman09/Hdef | /test/test_platemotion.sh | UTF-8 | 2,194 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#####
# SET PATH TO TEST_VALUES SCRIPT
#####
TEST_BIN_DIR=$(echo $0 | xargs dirname)
#####
# SET PATH TO HDEF EXECUTABLE
#####
# Check for o92util
$TEST_BIN_DIR/test_find_hdef_exec.sh platemotion || { echo "$0: could not find platemotion; exiting" 1>&2; exit 1; }
BIN_DIR=$(cat hdefexec.tmp | xargs dirname)
#####
# RUN TEST
#####
trap "rm -f *.tmp" 0 1 2 3 8 9
# MORVEL56
echo -30 40 | $BIN_DIR/platemotion -plates NA/EU -model MORVEL56 -o morvel56.tmp
echo -30.000000000000000 40.000000000000000 22.771560776120207 -2.0222476466178430 > answer.tmp
$TEST_BIN_DIR/test_values.sh morvel56.tmp answer.tmp 4 "platemotion: MORVEL56 EU wrt NA, output file" || exit 1
# MORVEL
echo -75 -35 | $BIN_DIR/platemotion -plates SA/NZ -model MORVEL > morvel.tmp
echo -75.000000000000000 -35.000000000000000 72.008593269570923 16.689689796741547 > answer.tmp
$TEST_BIN_DIR/test_values.sh morvel.tmp answer.tmp 4 "platemotion: MORVEL SA wrt NZ, stdout" || exit 1
# NUVEL1A
echo 115 -50 > loc.tmp
echo 120 -50 >> loc.tmp
$BIN_DIR/platemotion -f loc.tmp -plates AN/AU -model NUVEL1A > nuvel1a.tmp
cat > answer.tmp << EOF
115.00000000000000 -50.000000000000000 22.881201729386667 68.378823029969183
120.00000000000000 -50.000000000000000 18.268958516527089 69.521449139059513
EOF
$TEST_BIN_DIR/test_values.sh nuvel1a.tmp answer.tmp 4 "platemotion: NUVEL1A AU wrt AN, stdout" || exit 1
# ITRF08
$BIN_DIR/platemotion -f loc.tmp -plates AN/AU -model ITRF08 > itrf08.tmp
cat > answer.tmp << EOF
115.00000000000000 -50.000000000000000 25.766441315239788 68.326351344076343
120.00000000000000 -50.000000000000000 21.150858988809414 69.673756121784024
EOF
$TEST_BIN_DIR/test_values.sh itrf08.tmp answer.tmp 4 "platemotion: ITRF08 AU wrt AN, stdout" || exit 1
# Custom pole
echo -30 40 | $BIN_DIR/platemotion -pole 139.461/61.796/0.211 -o custom.tmp
echo -30.000000000000000 40.000000000000000 22.846411219358291 -2.0281377828526641 > answer.tmp
$TEST_BIN_DIR/test_values.sh custom.tmp answer.tmp 4 "platemotion: Custom pole, file" || exit 1
| true |
1c69ffb2dd95f86874eef2b9707cc6ec683b8fba | Shell | kfengc27/COMP9417 | /prep_data.sh | UTF-8 | 682 | 3.171875 | 3 | [] | no_license | #!/bin/bash
# Pre-process autoMPG.data ready for learning
cat autoMpg.arff | grep -v "[@%?]" | awk 'NF > 0' | cut -d',' -f1-2,4,6- | sed 's/,/ /g' > transformedData.data;
# Split into train, test, and validation sets (60:20:20)
m=$(wc -l transformedData.data | awk '{print $1}')
testLines=$(echo "($m / 100) * 20" | bc)
validationLines=$testLines
trainLines=$(echo "$m - $validationLines - $testLines" | bc)
cat transformedData.data | head -n$trainLines > data/trainData.data
cat transformedData.data | tail -n$validationLines > data/validationData.data
cat transformedData.data | tail -n$(echo "$validationLines + $trainLines" | bc) | head -n$testLines > data/testData.data
| true |
1a4450a390ab6405c9ff0987c7c6ca9cc262a268 | Shell | bootleq/env-common | /tasks/upgrade_tmux.sh | UTF-8 | 357 | 2.640625 | 3 | [] | no_license | #!/bin/bash
# 安裝相依套件 (libevent 2.0)
# http://www.monkey.org/~provos/libevent/
apt-get install libevent-dev
# 取得 source
cd /usr/local/src
git clone git://git.code.sf.net/p/tmux/tmux-code tmux-tmux-code
cd tmux-tmux-code
git fetch
# checkout 特定版本
git checkout --no-track --quiet 1.8
sh autogen.sh
./configure && make
make install
| true |
acab917326f9d70035f322501b6218f730390718 | Shell | PRIYANSHU-G13/SDN-Controller-Placement | /single_host/start.sh | UTF-8 | 1,179 | 3.734375 | 4 | [] | no_license | #!/bin/sh
### this script will start phoenix inside tmux windows
## Usage: ./start.sh [nf]
## if [nf] is present a phoenix will be launched with $nf.xml config and nf.sh envoriment.
## if [nf] is not present we will start <hss, dpsw, mme, bt>
CWD=$(pwd)
if [ -f /.dockerenv ]; then
## inside docker
## binary installation
PHOENIX_DIR=$CWD
PHOENIX=./phoenix.sh
else
## binary installation
PHOENIX_DIR=/opt/phoenix/dist
PHOENIX=./phoenix.sh
## source instalaltion
#PHOENIX_DIR=/opt/phoenix/build/
#PHOENIX=./bin/phoenix
fi
run_nf(){
nf=$1
echo tmux new-window -d -n $nf \"". ./env.sh; . ./${nf}.sh; cd $PHOENIX_DIR; ${PHOENIX} -f ${CWD}/${nf}.xml; sleep 5"\"
tmux new-window -d -n $nf ". ./env.sh; . ./${nf}.sh; cd $PHOENIX_DIR; ${PHOENIX} -f ${CWD}/${nf}.xml; cd $CWD; $SH"
}
if [ ! -z "$TMUX" ]
then
if [ -z $1 ]; then
for nf in hss dpsw mme bt
do
run_nf $nf
done
else
run_nf $1
fi
#for nf in dpsw
#do
# tmux new-window -n $nf ". ./env.sh; . ./${nf}.sh; cd $PHOENIX_DIR; sudo -E ${PHOENIX} -f ${CWD}/${nf}.xml; bash"
#done
else
echo "starting tmux session in the background, run 'tmux a' to attach"
tmux new-session -n phoenix -d $0
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.