blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
52907da714fb55a6387f5dc920738c978badc502
|
Shell
|
AnoML/sandbox
|
/scenarios/network/db_server_slow_network/execute.sh
|
UTF-8
| 237
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
#Enforces a slow network(Packet delay)
source ../../config.sh
echo "Enforcing a packet delay of maximum ${network_packet_delay}ms"
tc qdisc change dev eth1 root netem delay ${network_packet_delay}ms 20ms distribution normal
| true
|
e73e02886d666c20fd966662a2354d92d8b2f818
|
Shell
|
projectriff-archive/ci
|
/tasks/helm-charts/build/run
|
UTF-8
| 2,033
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
build_root=$PWD
source "$build_root/git-pfs-ci/tasks/scripts/common.sh"
cp -pr $build_root/git-helm-charts $build_root/riff
pushd $build_root/git-helm-charts
helm init --client-only
function_controller_version=$(head "$build_root/function-controller-version/version")
function_sidecar_version=$(head "$build_root/function-sidecar-version/version")
topic_controller_version=$(head "$build_root/topic-controller-version/version")
http_gateway_version=$(head "$build_root/http-gateway-version/version")
chart_version=$(grep version "$build_root/git-helm-charts/riff/Chart.yaml" | awk '{print $2}')
helm package riff --version "$chart_version"
chart_file=$(basename riff*tgz)
cat > "${build_root}/helm-charts-install/riff-${chart_version}-install-example.sh" << EOM
#!/usr/bin/env bash
script_name=\`basename "\$0"\`
set -o errexit
set -o nounset
set -o pipefail
if (( \$# < 1 )); then
echo
echo "Usage:"
echo
echo " \$script_name <chart-name> <extra-helm-args>"
echo
exit 1
fi
set -x
chart_name="\$1"
shift
helm install "\${chart_name}" \
--version="${chart_version}" \
--set rbac.create=false \
--set functionController.image.tag=${function_controller_version},functionController.sidecar.image.tag=${function_sidecar_version},topicController.image.tag=${topic_controller_version},httpGateway.image.tag=${http_gateway_version} \
"\$@"
EOM
cp "$chart_file" "$build_root/helm-charts/"
set +e
curl -sfL "$HELM_CHARTS_URL/index.yaml" > existing_index.yaml
if [ "0" != "$?" ]; then
rm -f existing_index.yaml
fi
set -e
if [ -f existing_index.yaml ]; then
helm repo index "$build_root/helm-charts" --url "$HELM_CHARTS_URL" --merge existing_index.yaml
else
helm repo index "$build_root/helm-charts" --url "$HELM_CHARTS_URL"
fi
echo "$chart_version" > "$build_root/helm-charts-latest-version/latest_version"
echo "riff" > "$build_root/helm-charts-latest-name/latest_name"
popd
| true
|
33f5cb19d00dd083dde7c3bf601e6db470596440
|
Shell
|
hixio-mh/package
|
/slicetag.sh
|
UTF-8
| 2,316
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function on_master () {
# identify if the current branch is a tag, or if not assume it is 'master'
local githash=`git rev-parse HEAD`
local tags=`git show-ref --tags | grep $githash | wc -l`
# if tags > 0, then we're in a tag, otherwise on 'master'
if test $tags -gt 0 ; then
return 1
fi
return 0
}
function settag () {
local RELEASE=$1
if ! test -f .gitmodules && ! test -f svn-submodules ; then
echo "Error: we don't think you are inside a slice-support repo."
exit 1
fi
if git diff --exit-code ; then
echo "Applying tag: $RELEASE"
if ! git tag $RELEASE ; then
echo "Error: failed to set 'git tag $RELEASE'"
echo "Error: please investigate."
exit 1
fi
echo "Pushing tag to remote"
if ! git push --tags ; then
echo "Error: failed to push new tag upstream with 'git push --tags'"
echo "Error: please investigate."
exit 1
fi
echo "Done!"
git tag -l
else
echo "Error: There are uncommitted changes here."
echo "Error: Either commit or remove local changes "
echo " before applying tags."
exit 1
fi
}
set -e
command=$1
shift || :
TS=$( git log -1 --format=%ct )
TAG=$( date -d @$TS +%Y%m%d%H%M )
if [[ $command =~ "get" ]] ; then
# Expect a tag to have been set previously.
RELEASE=$( git describe --abbrev=0 --tags 2> /dev/null || : )
if [ -z "$RELEASE" ] || on_master ; then
# But, if there is not one, return the last-commit date tag as version.
RELEASE=$TAG-0.mlab
fi
echo $RELEASE
elif [[ $command =~ "set" ]] ; then
VERSION=$1
RELEASE=$VERSION-$TAG.mlab
settag $RELEASE
elif [[ $command =~ "rm" ]] ; then
VERSION=$1
RELEASE=$VERSION
echo "WARNING: About to delete tag: $RELEASE"
echo -n "WARNING: Are you sure? (Y/n): "
read im_sure
if test -z "$im_sure" || test "$im_sure" = "Y" ; then
# NOTE: delete locally
git tag -d $RELEASE
# NOTE: delete on remote
git push --delete origin $RELEASE
fi
elif [[ $command =~ "list" ]] ; then
git tag -l
else
echo "Usage: $0 <get|set> [version]"
echo "i.e. $0 set 1.0"
exit 1
fi
| true
|
97c81b4634fe8e78baf7911f13b558c763d63928
|
Shell
|
shoetsu/translator
|
/test_many.sh
|
UTF-8
| 391
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
usage() {
echo "Usage:$0 root_dir"
exit 1
}
if [ $# -lt 1 ];then
usage;
fi
root_dir=$1
opt=""
#opt='--target_attribute=Weight --test_data_path=dataset/test.weight.csv'
#opt='--test_data_path=dataset/test.price.empty.csv'
#opt='--test_data_path=dataset/test.annotated.csv'
for checkpoint in $(ls -d $root_dir/*); do
nohup ./run.sh $checkpoint test $opt &
done;
| true
|
a5e176683386a6f045f3c46f44b2b79ee71e8219
|
Shell
|
varthdader/Pineapple-MK4
|
/2.8.1/mk4-module-randomroll-3.2/randomroll/install.sh
|
UTF-8
| 1,003
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
randomrollDir="$( cd "$( dirname "$0" )" && pwd)"
indexMD5=`md5sum /www/index.php`
var1=${indexMD5:0:32}
randomrollMD5=`md5sum "$randomrollDir"/files/index.php`
var2=${randomrollMD5:0:32}
if [ "$var1" != "$var2" ]; then
if [ ! -f /www/index.php.bak ]; then
cp /www/index.php /www/index.php.bak
fi
cp "$randomrollDir"/files/index.php /www/index.php
fi
spoofHostMD5=`md5sum /pineapple/config/spoofhost`
var1=${spoofHostMD5:0:32}
randomrollSpoofHostMD5=`md5sum "$randomrollDir"/files/spoofhost`
var2=${randomrollSpoofHostMD5:0:32}
if [ "$var1" != "$var2" ]; then
if [ ! -f /pineapple/config/spoofhost.bak ]; then
cp /pineapple/config/spoofhost /pineapple/config/spoofhost.bak
fi
spoofHostIP=`ifconfig br-lan | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}'`
echo '"'$spoofHostIP' *" > '$randomrollDir'/files/spoofhost'
cp "$randomrollDir"/files/spoofhost /pineapple/config/spoofhost
fi
if [ ! -L /www/randomroll ]; then
ln -s "$randomrollDir"/randomroll /www/randomroll
fi
| true
|
e8a2baf7de559d3be5fb690403050d7b4cf4b8d8
|
Shell
|
zen-tools/scripts
|
/parallel_work.sh
|
UTF-8
| 1,466
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
DATA=(
"1 OpenNews: Каналу #rusunix 2 года."
"3 OpenNews: Тестирование системы UNIX новостей"
"5 OpenNews: Hurd живет!"
"9 OpenNews: Обзор статей в журнале "Открытые системы. СУБД" N 11-12 за 1999 г."
"11 OpenNews: Вышла новая версия программы KDevelop (1.2)."
"13 OpenNews: Зашифрование swap в ядре OpenBSD"
);
let MAX_PROCS=$(nproc)+1
for ITEM in "${DATA[@]}"
do
# Количество фоновых процессов
JOB_PIDS=( $(jobs -p) );
# Ожидаем завершение фоновой задачи,
# если фоновых задач больше чем количество ядер + 1
test "${#JOB_PIDS[@]}" -ge "$MAX_PROCS" && wait;
NEWS_ID="${ITEM/ */}";
LOCAL_NEWS_NAME="${ITEM#* }";
REMOTE_NEWS_NAME=$(
wget -UTest -q "http://www.opennet.ru/opennews/art.shtml?num=$NEWS_ID" -O - | awk -F'>|<' '/<title>/{print $3; exit;}' | iconv -f koi8-r
) && {
test "$LOCAL_NEWS_NAME" = "$REMOTE_NEWS_NAME" && STATUS="OK" || STATUS="FAIL";
echo "$NEWS_ID: '$LOCAL_NEWS_NAME' <=> '$REMOTE_NEWS_NAME' = $STATUS";
} & # <== Код начиная с инициализации пемеренной 'REMOTE_NEWS_NAME' до этой строки будет выполняться в фоне
done;
wait;
exit 0;
| true
|
5effa5fd607e315f9e0ab698d85e13d711f9294e
|
Shell
|
jaredlovell/awsdemo
|
/ebs_backup/build.sh
|
UTF-8
| 459
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
S3LOC=s3://mybucket/lambdabuilds
FUNCTIONS="ebs-backup-snapshot-cleanup ebs-backup-snapshot-create"
for f in $FUNCTIONS; do
zip $f.zip $f.py
echo "aws s3 cp $f.zip $S3LOC/"
done
echo "aws s3 cp cf.yml $S3LOC/"
#deploy the whole stack is an option too
# aws --region us-west-2 cloudformation create-stack --stack-name EbsBackupLambdaFns --template-url https://s3.amazonaws.com/mybucket/lambdabuilds/cf.yml --capabilities "CAPABILITY_IAM"
| true
|
2d38743f09661f4bb3c9467827dea30c3a4cf388
|
Shell
|
EricZBL/ClusterBuildScripts
|
/install/haproxyInstall.sh
|
UTF-8
| 9,000
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
################################################################################
## Copyright: HZGOSUN Tech. Co, BigData
## Filename: haproxyInstall.sh
## Description: 安装配置Haproxy代理
## 实现自动化的脚本
## Version: 2.0
## Author: zhangbaolin
## Created: 2018-06-28
################################################################################
##set -e
cd `dirname $0`
## 脚本所在目录
BIN_DIR=`pwd`
cd ..
## 安装包根目录
ROOT_HOME=`pwd`
## 配置文件目录
CONF_DIR=${ROOT_HOME}/conf
## 安装日记目录
LOG_DIR=${ROOT_HOME}/logs
## 安装日记目录
LOG_FILE=${LOG_DIR}/haproxy.log
## haproxy rpm 软件目录
HAPROXY_RPM_DIR=${ROOT_HOME}/component/basic_suports/haproxyRpm
## 基础工具安装路径
INSTALL_HOME_BASIC=$(grep System_SuportDir ${CONF_DIR}/cluster_conf.properties|cut -d '=' -f2)
## haproxy rpm 软件最终目录
HAPROXY_RPM_INSTALL_HOME=${INSTALL_HOME_BASIC}/haproxyRpm
## 最终安装的根目录,所有bigdata 相关的根目录
INSTALL_HOME=$(grep Install_HomeDir ${CONF_DIR}/cluster_conf.properties|cut -d '=' -f2)
## HAPROXY_INSTALL_HOME HAPROXY 安装目录
HAPROXY_INSTALL_HOME=${INSTALL_HOME}/HAProxy
## HAPROXY_INIT 开机启动脚本
HAPROXY_INIT=/etc/ini.d/haproxy
INSTALL_HOST=$(grep HAproxy_AgencyNode ${CONF_DIR}/cluster_conf.properties|cut -d '=' -f2)
### HAproxy配置文件
HAproxy_conf_file=${HAPROXY_INSTALL_HOME}.cfg
### HAproxy临时文件
TMP_FILE=${HAPROXY_INSTALL_HOME}/tmp
#####################################################################
# 函数名: touch_ha_cfgfile
# 描述: haproxy配置文件haproxy.cfg设置,启动脚本时请按实际情况修改
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function touch_ha_cfgfile()
{
echo "
###########全局配置#########
global
log 127.0.0.1 local1 ##[日志输出配置,所有日志都记录在本机,通过local1输出
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000 ##最大连接数
user root ##运行haproxy的用户
group root ##运行haproxy的用户所在的组
daemon ##以后台形式运行harpoxy
stats socket /var/lib/haproxy/stats
########默认配置############
defaults
mode tcp ##默认的模式mode { tcp|http|health },tcp是4层,http是7层,health只会返回OK
log global
option tcplog ##日志类别,采用tcplog
option dontlognull ##不记录健康检查日志信息
option abortonclose ##当服务器负载很高的时候,自动结束掉当前队列处理比较久的链接
option redispatch ##当serverId对应的服务器挂掉后,强制定向到其他健康的服务器,以后将不支持
retries 3 ##3次连接失败就认为是服务器不可用,也可以通过后面设置
timeout queue 1m ##默认队列超时时间
timeout connect 10s ##连接超时
timeout client 1m ##客户端超时
timeout server 1m ##服务器超时
timeout check 10s ##心跳检测超时
maxconn 3000 ##默认的最大连接数
########服务器节点配置########
listen ftp
bind 0.0.0.0:2122 ##设置haproxy监控的服务器和端口号,0.0.0.0默认全网段
mode tcp ##http的7层模式
#balance roundrobin
balance source ##设置默认负载均衡方式,类似于nginx的ip_hash
#server <name> <address>[:port] [param*]
#[param*]为后端设定参数
#weight num权重 默认为1,最大值为256,0表示不参与负载均衡
#check启用后端执行健康检测
#inter num 健康状态检测时间间隔
##server s112 172.18.18.112:2122 weight 1 maxconn 10000 check inter 10s
########统计页面配置########
listen admin_stats
bind 0.0.0.0:8099 ##统计页面监听地址
stats enable
mode http
option httplog
maxconn 10
stats refresh 10s ##页面刷新时间
stats uri /stats ##统计页面url,可通过http://ip:8099/stats访问配置文件
" > ${HAproxy_conf_file}
}
#####################################################################
# 函数名: install_haproxy
# 描述: 实际安装haproxy的函数。
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function install_haproxy()
{
rsync -rvl ${HAPROXY_RPM_DIR} ${INSTALL_HOST}:${INSTALL_HOME_BASIC} > /dev/null
ssh root@${INSTALL_HOST} "rpm -ivh ${HAPROXY_RPM_INSTALL_HOME}/haproxy-1.5.18-1.el6.x86_64.rpm; which dos2unix; rm -rf ${INSTALL_HOME_BASIC}"
}
#####################################################################
# 函数名:cfg_config
# 描述: 修改cfg配置文件
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function cfg_config ()
{
echo "" | tee -a $LOG_FILE
echo "*****************************************************" | tee -a $LOG_FILE
echo "" | tee -a $LOG_FILE
echo "配置haproxy.cfg.............................." | tee -a $LOG_FILE
#声明一个数组用来存储host=ip
declare -a host_iparr
# 根据ftp_serviceip字段,查找配置文件中,FTP服务节点主机名
FTP_SERVICEIPS=$(grep HAproxy_ServiceNode ${CONF_DIR}/cluster_conf.properties|cut -d '=' -f2)
ftp_arr=(${FTP_SERVICEIPS//;/ })
#找出主机名对应的IP
for host_name in ${ftp_arr[@]}
do
ip=$(cat /etc/hosts|grep "$host_name" | awk '{print $1}')
host_ip=${host_name}"="${ip}
host_iparr=(${host_iparr[*]} ${host_ip})
done
# 在文件末尾添加FTP服务节点hostname=ip
for ftp_ip in ${host_iparr[@]}
do
echo "server ${ftp_ip//=/ }:2122 weight 1 maxconn 10000 check inter 10s" >> ${TMP_FILE}
done
# 将临时文件中hostname ip追加到##server s2 172.18.18.112:2122 weight 1 maxconn 10000 check inter 10s
sed -i "/##server/ r ${TMP_FILE}" ${HAproxy_conf_file}
scp ${HAproxy_conf_file} root@${INSTALL_HOST}:/etc/haproxy/haproxy.cfg
rm -rf ${TMP_FILE}
echo "配置config_Haproxy完毕......" | tee -a $LOG_FILE
}
#####################################################################
# 函数名:install_ha_init
# 描述: 将haproxy服务添加到开机启动
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function install_ha_init ()
{
if [ ! -e "$HAPROXY_INIT" ]; then
sed -ir 's/SYSLOGD_OPTIONS="-m 0"/SYSLOGD_OPTIONS="-r -m 0"/g' /etc/sysconfig/rsyslog
scp /etc/sysconfig/rsyslog root@${INSTALL_HOST}:/etc/sysconfig/rsyslog
else
echo "File haproxy already there !" | tee -a $LOG_FILE
fi
}
#####################################################################
# 函数名: writeUI_file
# 描述: 将haproxy的UI地址写到指定文件中
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function writeUI_file()
{
echo "" | tee -a $LOG_FILE
echo "**********************************************" | tee -a $LOG_FILE
echo "准备将haproxy的UI地址写到指定文件中............" | tee -a $LOG_FILE
HaproxyWebUI_Dir=$(grep WebUI_Dir ${CONF_DIR}/cluster_conf.properties|cut -d '=' -f2)
Install_IP=$(cat /etc/hosts|grep "$INSTALL_HOST" | awk '{print $1}')
Haproxy_UI="http://${Install_IP}:8099/stats"
mkdir -p ${HaproxyWebUI_Dir}
grep -q "HAproxyUI_Address=" ${HaproxyWebUI_Dir}/WebUI_Address
if [ "$?" -eq "0" ] ;then
sed -i "s#^HAproxyUI_Address=.*#HAproxyUI_Address=${Haproxy_UI}#g" ${HaproxyWebUI_Dir}/WebUI_Address
else
echo "##HAproxy_WebUI" >> ${HaproxyWebUI_Dir}/WebUI_Address
echo "HAproxyUI_Address=${Haproxy_UI}" >> ${HaproxyWebUI_Dir}/WebUI_Address
fi
}
#####################################################################
# 函数名: main
# 描述: 模块功能main 入口,即程序入口, 用来安装Haproxy。
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function main()
{
mkdir -p ${HAPROXY_INSTALL_HOME}
touch_ha_cfgfile
install_haproxy
cfg_config
writeUI_file
install_ha_init
}
# 主程序入口
main
set +x
| true
|
4a2f8ad928e55415862ae4021e521d2fe35c90bf
|
Shell
|
j3yxz/test
|
/Esercitazione03/script1.sh
|
UTF-8
| 168
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
echo
echo "ordine crescente:";
cont=1;
while (( ${cont} <= $# )); do
echo "arg ${cont} is ${!cont}";
(( cont=${cont}+1 ));
done
./script2.sh "$@";
| true
|
5c50def77999c45d1468f20a358838febf324af8
|
Shell
|
casep/isc_coding
|
/trakautomation/preflightchecks.d/TrakMappingCONV.sh
|
UTF-8
| 1,873
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh -e
# Checks main Trak database hase a CONV mapping to DATA which has previously been wrongly mapped from source
. ./functions.sh
check_Unix() {
# itterate through all instances until we find a Trak one
for instance in `cache_getinstances.pl`; do
path=`cache_instance2path.pl "$instance"`
conf="$path/cache.cpf"
if [ ! -f "$conf" ]; then
echo "=CRITICAL - can't find cache.cpf \"$conf\" for instance \"$instance\""
continue
fi
if echo "$instance" | grep -q 'DB$'; then
# main database
namespace=`traknamespace $SITE $ENV`
elif echo "$instance" | grep -q 'APP[0-9]*$'; then
# App instance
namespace=`traknamespace $SITE $ENV`
elif echo "$instance" | grep -q 'PRT[0-9]*$'; then
# Print instance
namespace=EPS
else
continue;
fi
# check for the namespace
nsconfig=`ini_getparam.pl $conf "Namespaces" "$namespace" 2>/dev/null`
if [ -z "$nsconfig" ]; then
echo "=ALERT - No Namespace \"$namespace\" in \"$conf\", not checking for mappings"
continue
fi
# get & check mapping
set +e
mapping=`ini_getparam.pl $conf "Map.$namespace" Global_CONV 2>/dev/null`
set -e
if [ -z "$mapping" ]; then
echo "=OK - No mapping found for Global_CONV (default to $namespace-DATA) for Namespace \"$namespace\" in \"$conf\""
elif [ "$mapping" = "$namespace-DATA" ]; then
echo "=OK - Found valid mapping for Global_CONV for Namespace \"$namespace\" in \"$conf\""
else
echo "=ALERT - Mapping for Global_CONV for Namespace \"$namespace\" in \"$conf\" should be to \"$namespace-DATA\" (records of the state of DATA should accompany data)"
fi
done
}
# sanity check
preflightargs $@
# get on with the job
echo "*CHECK - TrakCare CONV mapping"
checkfieldquit TrakUpgrade,TrakBuild,GoLive $STAGE
checkfieldquit database,app,print,preview $FUNCTIONS
# would have bailed above if no match
osspecific check
| true
|
2c0678ae38572530de668ae346da8de43b949aa7
|
Shell
|
nhurleywalker/GLEAM-X-pipeline
|
/bin/IDG.tmpl
|
UTF-8
| 3,897
| 3.203125
| 3
|
[] |
no_license
|
#! /bin/bash -l
set -ux
dbdir=DBDIR
function test_fail {
if [[ $1 != 0 ]]
then
${dbdir}/bin/track_task.py fail --jobid=${SLURM_JOBID} --taskid=1 --finish_time=`date +%s`
exit $1
fi
}
# Set version number
version=4.0 # First IDG pipeline Feb 2020
cores=NCPUS
memory=MEMORY
jobname=JOBNAME
base=BASEDIR
self=SELFCAL
obslist=OBSLIST
export OMP_NUM_THREADS=$cores
obslistfile=`basename $obslist`
imageprefix=${obslistfile%.*}
# WSClean suffixes for subchannels and MFS
subchans="MFS 0000 0001 0002 0003"
# Minimum uvw for self-calibration (in lambda)
minuv=75
# S/N Level at which to choose masked pixels for deepclean
msigma=3
# S/N Threshold at which to stop cleaning
tsigma=1
# Set max separation for flux_warp crossmatch as ~ 1 beamwidth:
separation=$(echo "120/3600" | bc -l)
# Update database
cd ${base}
${dbdir}/bin/track_task.py start --jobid=${SLURM_JOBID} --taskid=1 --start_time=`date +%s`
obss=($(sort $obslist))
num=${#obss[@]}
last=${obss[-1]}
middle=${obss[$((num / 2))]}
# What does this do?
nvidia-modprobe -u
metafits=`ls -1 $last/*metafits* | head -1`
# Set up telescope-configuration-dependent options based on the highest-numbered observation
if [[ $last -lt 1151402936 ]] ; then
telescope="MWA128T"
basescale=1.1
imsize=4000
robust=-1.0
else
telescope="MWALB"
basescale=0.6
imsize=8000
robust=0.0
fi
# Set up channel-dependent options
chan=`pyhead.py -p CENTCHAN ${metafits} | awk '{print $3}'`
# Pixel scale
scale=`echo "$basescale / $chan" | bc -l` # At least 4 pix per synth beam for each channel
# Calculate min uvw in metres
minuvm=`echo "234 * $minuv / $chan" | bc -l`
MWAPATH=/opt/mwa/mwa_pb/src/mwa_pb/data/
mslist=""
for obsnum in ${obss[@]} ; do mslist="$mslist $obsnum/$obsnum.ms" ; done
if $self
then
wsclean -mgain 0.8 \
-aterm-config ${jobname}_aterm.config \
-aterm-kernel-size 32 \
-abs-mem ${memory} \
-use-idg -idg-mode hybrid \
-nmiter 2 \
-j ${cores} \
-join-channels \
-channels-out 4 \
-niter 10000000 \
-mwa-path ${MWAPATH} \
-auto-threshold $msigma \
-name ${imageprefix} \
-size ${imsize} ${imsize} \
-scale ${scale:0:8} \
-weight briggs ${robust} \
-pol I \
-data-column CORRECTED_DATA \
$mslist
for obsnum in ${obss[@]}
do
cd $obsnum
calibrate -j ${cores} -absmem ${memory} -minuv $minuvm ${obsnum}.ms ${obsnum}_postidg_solutions.bin | tee idg_calibrate.log
aocal_plot.py --refant=127 ${obsnum}_postidg_solutions.bin
flaggedchans=`grep "gains to NaN" calibrate.log | awk '{printf("%03d\n",$2)}' | sort | uniq | wc -l`
if [[ $flaggedchans -gt 200 || ! -s ${obsnum}_postidg_solutions.bin ]]
then
echo "More than a third of the channels were flagged!"
echo "Do not apply these calibration solutions."
mv ${obsnum}_postidg_solutions.bin ${obsnum}_postidg_solutions.bad
else
applysolutions ${obsnum}.ms ${obsnum}_postidg_solutions.bin
exit 1
fi
cd ../
done
mkdir ${imageprefix}_before_self-cal
mv ${imageprefix}*fits ${imageprefix}_before_self-cal/
fi
wsclean \
-multiscale -mgain 0.85 -multiscale-gain 0.15 \
-aterm-config aterm.config \
-aterm-kernel-size 32 \
-use-idg -idg-mode hybrid \
-abs-mem ${memory} \
-nmiter 5 \
-j ${cores} \
-niter 10000000 \
-mwa-path ${MWAPATH} \
-auto-mask $msigma \
-auto-threshold $tsigma \
-name ${imageprefix} \
-size ${imsize} ${imsize} \
-scale ${scale:0:8} \
-weight briggs ${robust} \
-pol I \
-join-channels \
-channels-out 4 \
-data-column CORRECTED_DATA \
$mslist
test_fail $?
${dbdir}/bin/track_task.py finish --jobid=${SLURM_JOBID} --taskid=1 --finish_time=`date +%s`
| true
|
bfab94beed0bb7db64e9fa90b9595b3d1243e9ce
|
Shell
|
UAMS-DBMI/PosdaTools
|
/posda/fastapi/start_api.sh
|
UTF-8
| 196
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
python3 check_db.py
if [ $? -eq 0 ]
then
uvicorn --workers $API_WORKERS --host 0.0.0.0 --port $API_PORT main:app
else
echo "The database is not available yet, exiting."
exit 1
fi
| true
|
123f17b0fad29ed68dd14976a54ad4103ab14fef
|
Shell
|
feralresearch/exts-quiz-maker
|
/env.sh
|
UTF-8
| 1,299
| 4
| 4
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
# Adapted from technique described here:
# https://medium.freecodecamp.org/how-to-implement-runtime-environment-variables-with-create-react-app-docker-and-nginx-7f9d42a91d70
# Rebuild output file
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
DOTENV=$DIR/$1
OUTPUT=$DIR/$2
echo $DOTENV
rm -rf $OUTPUT
touch $OUTPUT
# Add assignment
echo "// This is a generated file, do not edit" >> $OUTPUT
echo "window._env_ = {" >> $OUTPUT
# Read each line in .env file
# Each line represents key=value pairs
while read -r line || [[ -n "$line" ]];
do
# Split env variables by character `=`
if printf '%s\n' "$line" | grep -q -e '='; then
if [[ $line == *"REACT_APP_"* ]]; then
varname=$(printf '%s\n' "$line" | sed -e 's/=.*//')
varvalue=$(printf '%s\n' "$line" | sed -e 's/^[^=]*=//')
# Read value of current variable if exists as environment variable
# This allows us to overwrite from docker settings
value=$(printf '%s\n' "${!varname}")
varvalue=$(printf '%s' "$varvalue" | sed -e "s/\"//g")
# Otherwise use value from .env file
[[ -z $value ]] && value=${varvalue}
# Append as kvp to JS file
echo " $varname: '$value'," >> $OUTPUT
fi
fi
done < $DOTENV
echo "};" >> $OUTPUT
| true
|
445ada560fe10601a5b880834257f0c3739d54a5
|
Shell
|
gsilvers/utils
|
/update_hosts_fromweb.sh
|
UTF-8
| 1,747
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Author: Greg Silverstein greg.silverstein@gmail.com
#
# This script is released under an mit license but basically use it
# any way you want. Give me credit if you want whatever but
# it is provided with no guarantees use at your own risk
#
# please note this script asks for sudo and modifies your /etc/hosts
# and uses a file from the internet https://winhelp2002.mvps.org/hosts.txt
# please view that site to ensure you trust it
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
echo " _ "
echo " | | "
echo " ___| |_ ___ _ __ "
echo " / __| __/ _ \| '_ \ "
echo " \__ \ || (_) | |_) | "
echo " |___/\__\___/| .__/ "
echo " | | "
echo " |_| "
echo "This script downloads a file from the web "
echo "https://winhelp2002.mvps.org/hosts.txt "
echo "I trust this file but review that site to be sure you are "
echo "Please note it also truncs your existing hosts file "
echo "if you have anything in your hosts you care about it "
echo "alter this script to account "
echo "ok with it [y/n to continue or exit]: "
read -n 1 -r in_prompt_one
echo ""
if [[ "$in_prompt_one" = y ]]
then
curl https://winhelp2002.mvps.org/hosts.txt >> ~/hosts_temp.txt
var=`date +"%FORMAT_STRING"`
now=`date +"%m_%d_%Y"`
now=`date +"%Y-%m-%d"`
echo "now backing up your /etc/hosts file as of"
echo "${now}"
sudo cp /etc/hosts /etc/hosts_bu_${now}
sudo -- sh -c "cat ~/hosts_temp.txt > /etc/hosts"
rm ~/hosts_temp.txt
dscacheutil -flushcache; exit
else
#¯\_(ツ)_/¯
echo "ok bye bye"
fi
| true
|
73826f40fe3f115d0714032b6c9e39a462b6352f
|
Shell
|
leopires/daily-sh-script
|
/conda-update.sh
|
UTF-8
| 271
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
CONDA_PATH=`which conda`
if [ -z "${CONDA_PATH}" ]; then
echo "Please check if you have 'conda' installed."
exit 1;
else
echo "'conda' found at: '${CONDA_PATH}'"
fi
conda update --all -y
conda clean --all -y
conda list
exit 0
| true
|
9baa1ffee20a5c792a6d26a12cf1ef677a34c5f4
|
Shell
|
buren/dot-bash
|
/profiles/unix-profile.bash
|
UTF-8
| 3,893
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $SHELL != '/bin/zsh' ]]; then
# Case-insensitive globbing (used in pathname expansion)
shopt -s nocaseglob
# Append to the Bash history file, rather than overwriting it
shopt -s histappend
# Autocorrect typos in path names when using `cd`
shopt -s cdspell
fi
# Detect which `ls` flavor is in use
if ls --color > /dev/null 2>&1; then # GNU `ls`
colorflag="--color"
else # OS X `ls`
colorflag="-G"
fi
# Highlight section titles in manual pages
export LESS_TERMCAP_md="${ORANGE}"
# Always enable colored `grep` output
export GREP_OPTIONS="--color=auto"
export GREP_COLOR='1;31' # green for matches
# Larger bash history (allow 32^3 entries; default is 500)
export HISTSIZE=32768
export HISTFILESIZE=${HISTSIZE}
export HISTCONTROL=ignoredups
# Make some commands not show up in history
export HISTIGNORE="ls:cd:cd -:pwd:exit:date:* --help"
# Highlight section titles in manual pages
export LESS_TERMCAP_md="$ORANGE"
# Don’t clear the screen after quitting a manual page
export MANPAGER="less -X"
## __DOT_BASH__ ##
source ~/.buren/dot-bash/setup/profile-install/unix-profile-install.bash # Install functions
## __UNIX__ ##
alias resource="source ~/.bash_profile"
# Reload the shell (i.e. invoke as a login shell)
alias reload="exec $SHELL -l"
# List all files colorized in long format
alias l="ls -lF ${colorflag}"
# List all files colorized in long format, including dot files
alias la="ls -laF ${colorflag}"
# List only directories
alias lsd="ls -lF ${colorflag} | grep --color=never '^d'"
# Always use color output for `ls`
alias ls="command ls ${colorflag}"
export LS_COLORS='no=00:fi=00:di=01;34:ln=01;36:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.bz2=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.avi=01;35:*.fli=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.ogg=01;35:*.mp3=01;35:*.wav=01;35:'
alias hs='history | grep --color=auto'
alias o='open'
alias c='clear'
alias mv='mv -i' # prompt if duplicate filename
# Colored and paginated directory tree
alias treep='tree -C "$@" | less -R'
# Search running processes
alias tm='ps -ef | grep --color=auto'
alias cpu_cores='nproc'
# Clojure
lein() {
if [[ ! -f /bin/lein ]]; then
echo "Lein clojure not installed"
echo "Installing..."
__dot-bash-install-lein-clojure
fi
/bin/lein "$@"
}
# Translate text with google-translate-cli
translate() {
if [[ ! -d ~/.buren/bin/google-translate-cli ]];then
echo "google-translate-cli not found.."
echo "Init install script"
sleep 1
__dot-bash-install-translate-cli
fi
if [[ "$1" == "--help" ]] || [[ "$1" == "-help" ]];then
echo -e "usage:
$ translate {=en+ro+de+it} \"hola mundo\"
hello world
Bună ziua lume
Hallo Welt
ciao mondo
$ translate \"Saluton, mondo\"
Hello, world"
return
fi
trs "$@"
}
alias translate_to_swedish='translate {=sv}'
alias translate_from_swedish='translate {sv=en}'
## __MISC__ ##
alias resize_to_width='convert -resize' # Resize args to width, keep aspect ratio
cleanup_whiteboard() {
if [[ $1 == "--help" ]] || [[ $1 == "-help" ]] || [[ $1 == "-h" ]]; then
echo "Clean up whiteboard picture"
echo "Usage: cleanup_whiteboard <input-image> <output-image>"
return
fi
convert $1 -morphology Convolve DoG:15,100,0 -negate -normalize -blur 0x1 -channel RBG -level 60%,91%,0.1 $2
}
random_password() {
openssl rand -base64 ${1:-12}
}
ricecake() {
local video='https://youtu.be/uYHAR8Xzsyo'
if [ "$(uname)" == "Darwin" ]; then
open $video
else
xdg-open $video
fi
}
| true
|
d15737f98d9688d6c02d7ab8ef6628bf83b76375
|
Shell
|
cmcmullan-r7/dotfiles
|
/git_template/hooks/ctags
|
UTF-8
| 218
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
cd `git rev-parse --git-dir`/.. # goto root dir of this repo
trap 'rm -f .git/$$.tags' EXIT # when this script revieces exit signal - delete tmp file
ctags -f .git/$$.tags
mv .git/$$.tags .git/tags
| true
|
66e3e4c1d9c5ab4379c50d5cd3927de57956972b
|
Shell
|
sfindeisen/rsync-backup
|
/rsync-backup-common.sh
|
UTF-8
| 1,289
| 4.125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# rsync flags
#
# Note:
# -x, --one-file-system don't cross filesystem boundaries
RSYNC_OPTS="-a -HAEXR -x -h --progress --exclude-from=${SCRIPT_DIR}/rsync-exclude.txt"
# Executable script name
APPNAME=$(basename $0 | sed "s/\.sh$//")
log_info() { echo "$APPNAME: [info] $1"; }
log_warning() { echo "$APPNAME: [warning] $1" 1>&2; }
log_error() { echo "$APPNAME: [error] $1" 1>&2; }
# Report a fatal error and exit
fatal() {
code=$?
if [ 0 -eq "$code" ]; then
log_error "$*"
else
log_error "$* (status $?)"
fi
exit 1
}
# Make sure BACKUP_SRC_DIR is set
check_src_dir() {
if [ -z "$BACKUP_SRC_DIR" ]; then
log_error "missing BACKUP_SRC_DIR"
exit 1
fi
}
# Make sure BACKUP_DST_DIR is set
check_dst_dir() {
if [ -z "$BACKUP_DST_DIR" ]; then
log_error "missing BACKUP_DST_DIR"
exit 1
fi
}
# Make sure the directory exists; create if not
check_create_dir() {
dir=$1
if [ ! -d "$dir" ]; then
log_warning "Directory ($dir) does not exist - creating..."
mkdir -v -m 700 -p $dir || fatal "unable to create directory ($dir)"
fi
}
# Delete a directory without any questions
delete_dir() {
dir_to_delete=$1
log_info "force delete dir: $dir_to_delete"
chmod -R u+rwx "$dir_to_delete"
rm -rf "$dir_to_delete"
}
| true
|
08e6bcd29ff75e5b07132cbc6f3b039ccb719471
|
Shell
|
dmitry-salnikov/meteorka
|
/helpers/docker-tools.sh
|
UTF-8
| 513
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
DOCKER_HUB_USERNAME="mitusha"
source ./git-tools.sh "$DOCKER_HUB_USERNAME"
REPO_REF="$(git-ref-repo)"
TAG_HEAD="$(git-ref-commit)"
TAG_LATEST="$(git-ref-latest)"
docker-build() {
docker build -t "$TAG_HEAD" .
}
docker-tag() {
docker tag "$TAG_HEAD" "$1"
}
docker-tag-latest() {
docker tag "$TAG_HEAD" "$TAG_LATEST"
}
docker-push() {
docker push "$REPO_REF"
}
docker-publish() {
docker-tag-latest && docker-push
}
docker-build-n-publish() {
docker-build && docker-publish
}
| true
|
ccea78dc7465410f018f01527387a1e199adfa81
|
Shell
|
KatieMishra/VoiceClassification
|
/sphinxbase/autoconf-2.69/automake-1.14/t/warnings-win-over-strictness.sh
|
UTF-8
| 2,104
| 3.125
| 3
|
[
"GPL-2.0-only",
"GPL-3.0-only",
"GPL-1.0-or-later",
"FSFAP",
"GPL-3.0-or-later",
"Autoconf-exception-3.0",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"MIT",
"BSD-2-Clause"
] |
permissive
|
#! /bin/sh
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Check that, on the command line, explicitly-defined warnings take
# precedence over implicit strictness-implied warnings.
. test-init.sh
# We want complete control over automake options.
AUTOMAKE=$am_original_AUTOMAKE
ok ()
{
AUTOMAKE_run $*
test ! -s stderr
}
ko ()
{
AUTOMAKE_run $*
grep '^Makefile\.am:.*:=.*not portable' stderr
test $(wc -l <stderr) -eq 1
}
set_am_opts()
{
set +x
sed <$2 >$2-t -e "s|^\\(AUTOMAKE_OPTIONS\\) *=.*|\\1 = $1|" \
-e "s|^\\(AM_INIT_AUTOMAKE\\).*|\\1([$1])|"
mv -f $2-t $2
set -x
cat $2
}
# Files required in gnu strictness.
touch README INSTALL NEWS AUTHORS ChangeLog COPYING
cat > Makefile.am <<END
AUTOMAKE_OPTIONS =
FOO := bar
END
$ACLOCAL
ko --foreign -Wportability
ko -Wportability --foreign
ok --gnu -Wno-portability
ok -Wno-portability --gnu
set_am_opts '' Makefile.am
rm -rf autom4te*.cache
set_am_opts 'foreign -Wportability' configure.ac
ko
rm -rf autom4te*.cache
set_am_opts '-Wportability foreign' configure.ac
ko
rm -rf autom4te*.cache
set_am_opts 'gnu -Wno-portability' configure.ac
ok
rm -rf autom4te*.cache
set_am_opts '-Wno-portability gnu' configure.ac
ok
rm -rf autom4te*.cache
set_am_opts '' configure.ac
set_am_opts 'foreign -Wportability' Makefile.am
ko
set_am_opts '-Wportability foreign' Makefile.am
ko
set_am_opts 'gnu -Wno-portability' Makefile.am
ok
set_am_opts '-Wno-portability gnu' Makefile.am
ok
:
| true
|
e4493392347d1cfef824fec3fccc5f1c062fe38d
|
Shell
|
ACAD-UofA/tools
|
/src/foldreads/pipeline/09_mappingStats.sh
|
UTF-8
| 1,836
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
module load SAMtools/1.4.1-foss-2016b
stats() {
f=$1
samtools depth $f > ${f}.depth
sample=`basename $f | cut -d . -f1`
ref=`basename $f | cut -d . -f2`
tot_pairs=`awk '/^Total read pairs/ {a+=$NF} END {print a}' ${sample}_*.metrics`
folded=`awk '/folded read pairs/ {a+=$(NF-1)} END {print a}' ${sample}_*.metrics`
complete_hp=`awk '/read pairs with complete/ {a+=$NF} END {print a}' ${sample}_*.metrics`
mapped=`samtools view -c ${f/\.dedup\.realigned\.calmd/}`
dedup=`samtools view -c $f`
covered=`wc -l ${f}.depth | awk '{print $1}'`
genome_size=`samtools idxstats $f | awk -v FS="\t" '{sum += $2} END {print sum}'`
perc_covered=`echo "$covered/$genome_size*100" | bc -l | awk '{printf "%.2f\n",$1}'`
depth=(`awk -v var="$genome_size" \
'{count+=$3; countsq+=$3*$3} END \
{printf("%.4f %.4f %.4f %.4f\n", \
count/NR, \
sqrt(countsq/NR - (count/NR)**2), \
count/var, \
sqrt(countsq/var - (count/var)**2))}' \
${f}.depth`)
length=(`samtools view $f \
| awk 'BEGIN \
{count=0; avg=0; std=0} \
{count=count+1; lgth=lgth+length($10); std=std+(length($10)-lgth/count)*(length($10)-lgth/count)} END \
{printf("%.2f %.2f\n", \
lgth/count, \
sqrt((std)/(count-1)))}'`)
printf "$sample\t\
$ref\t\
$tot_pairs\t\
$folded\t\
$complete_hp\t\
$mapped\t\
$dedup\t\
$perc_covered\t\
${depth[0]}\t\
${depth[1]}\t\
${depth[2]}\t\
${depth[3]}\t\
${length[0]}\t\
${length[1]}\t\
\n" > mappingStats.$sample.$ref.txt
rm ${f}.depth
}
for i in *.dedup.realigned.calmd.bam; do
stats $i &
done
wait
printf "SampleID\t\
Reference\t\
Pairs\t\
Folded\t\
Hairpin\t\
Mapped\t\
Unique\t\
Coverage\t\
Depth_covered\t\
Depth_covered_Std\t\
Depth_genome\t\
Depth_genome_Std\t\
Length\t\
Length_Std\t\
\n" > mappingStats.txt
cat mappingStats.*.*.txt >> mappingStats.txt
rm -f mappingStats.*.*.txt
| true
|
cf9038eafd331fa3edc59bdebd662b7d95d1dec3
|
Shell
|
nikisix/ff
|
/install.sh
|
UTF-8
| 523
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
FF_DIR=~/.ff
mkdir $FF_DIR
cd $FF_DIR
curl -O "https://raw.githubusercontent.com/nikisix/ff/master/ff.sh"
curl -O "https://raw.githubusercontent.com/nikisix/ff/master/ff.py"
curl -O "https://raw.githubusercontent.com/nikisix/ff/master/README"
echo "alias ff='. $FF_DIR/ff.sh'" >> ~/.bashrc
chmod 775 $FF_DIR/ff.py $FF_DIR/ff.sh
#http://stackoverflow.com/questions/670191/getting-a-source-not-found-error-when-using-source-in-a-bash-script
. ~/.bashrc #must be '.' not source. '.' is for bash, source is for csh
| true
|
66321a4277ba6fa415ab7a08ade700366c7ed2b5
|
Shell
|
AndrewKralovec/svn-publisher
|
/svn-branch-cp.sh
|
UTF-8
| 755
| 3.546875
| 4
|
[] |
no_license
|
PR_BRANCH_NAME=${1:-"pull_request_preview"}
PR_MESSAGE=${2:-"Creating a PR branch for /app/trunk."}
# NOTE: Svn requires to be in the working dir to execute.
if [[ ! -z "$REPO_LOCAL_PATH" ]]; then
cd "$REPO_LOCAL_PATH";
fi
# TODO: Fix code repeat
get_remote_url() {
if [[ -z "$REMOTE_BASE_URL" ]]; then
echo "$(svn info | grep 'Repository Root' | awk '{print $NF}')"
else
echo "$REMOTE_BASE_URL"
fi
}
repo_source_url="$(get_remote_url)"
# Create a pr branch to merge and test changes from bugs
svn copy $repo_source_url/branches/trunk \
$repo_source_url/branches/$PR_BRANCH_NAME \
-m "$PR_MESSAGE"
# Switch to that branch when dotnet
# TODO: Make configurable
svn switch $repo_source_url/branches/$PR_BRANCH_NAME
| true
|
dad7e072251ede17f9566af48755e9932cd2c391
|
Shell
|
amyburgh/init
|
/system/03
|
UTF-8
| 331
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "\033[1;33m3. What file has to be modified to make it permanent?\033[0m"
echo "etc/hostname"
echo "\033[1;33mCommand:\033[0m sudo echo \"new_name\">etc/hostname"
echo "\n\033[1;33mDeduction:\033[0m The command above will replace the hostname with \"new_name\" if the user has admin privileges."
read -p "" ; ./04
| true
|
4cb7699772163687fb97680773f06863342ebb44
|
Shell
|
Nanjangpan/ModuBoan
|
/취약점 스크립트/scripts/U-26.sh
|
UTF-8
| 285
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e "[U-26] automountd 제거"
if [ !$(ps -ef | grep "automountd" | grep -v "grep") ];then
echo -n -e "\033[34m[PASS]\033[0m"
else echo -n -e "\033[31m[WARN]\033[0m"
fi
echo -e "automountd에 대한 설정이 비활성화 되어 있어야 합니다"
echo -e " "
| true
|
a86c2c6db8f3c4be1bb1850ff79ba29e41e67767
|
Shell
|
seismicmike/mmscraps-wp
|
/.docksal/commands/sync
|
UTF-8
| 378
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ENV=${1:-live}
shift
cd $PROJECT_ROOT
rm -f $PROJECT_ROOT/backups/database.sql.gz
fin drush cache-clear drush -y
echo "Downloading database"
terminus backup:create $COMPOSE_PROJECT_NAME.$ENV --element=db
terminus backup:get $COMPOSE_PROJECT_NAME.$ENV --element=db --to=$PROJECT_ROOT/backups/database.sql.gz
fin import $PROJECT_ROOT/backups/database.sql.gz
| true
|
1eeb12d1344d384b7dfdd268c20aa8a1e0b2587b
|
Shell
|
schmkr/dotfiles
|
/bash/functions.sh
|
UTF-8
| 1,938
| 3.734375
| 4
|
[] |
no_license
|
## A set of useful functions, from:
## https://github.com/mathiasbynens/dotfiles/blob/master/.functions
##
# Start an HTTP server from a directory, optionally specifying the port
function server() {
local port="${1:-8000}"
sleep 1 && open "http://localhost:${port}/" &
# Set the default Content-Type to `text/plain` instead of `application/octet-stream`
# And serve everything as UTF-8 (although not technically correct, this doesn’t break anything for binary files)
python -c $'import SimpleHTTPServer;\nmap = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map;\nmap[""] = "text/plain";\nfor key, value in map.items():\n\tmap[key] = value + ";charset=UTF-8";\nSimpleHTTPServer.test();' "$port"
}
# Syntax-highlight JSON strings or files
# Usage: `json '{"foo":42}'` or `echo '{"foo":42}' | json`
function json() {
if [ -t 0 ]; then # argument
python -mjson.tool <<< "$*" | pygmentize -l javascript
else # pipe
python -mjson.tool | pygmentize -l javascript
fi
}
# Echo the current date in mysql date format
function now() {
date -u "+%Y-%m-%d %H:%M:%S"
}
# Datestamp
# https://twitter.com/climagic/status/301023842300731394
datestamp() {
date +%Y%m%d;
}
#create a directory and go right into it
function mcd() {
mkdir -p "$1" && cd "$1";
}
# search for occurrences of text in files
# (but exclude .svn directory)
ft() {
if [ -z "$2" ]; then
find . -type f -wholename "*.*" -not -iwholename "*.svn*" -print0 | xargs -0 fgrep -i "$1"
else # apply the filter
find . -type f -wholename "$2" -not -iwholename "*.svn*" -print0 | xargs -0 fgrep i "$1"
fi
}
# Truncate oversized lines on input. Mostly for display use. Example: grep foo log | fitterm
# source: https://twitter.com/climagic/status/299554737756438528
fitterm() {
cut -c 1-$( tput cols );
}
# horizontal rule for you CLI :-)
# https://twitter.com/climagic/status/433397460623708160
hr() {
yes -- ${@:-=} | tr -d $'\n' | head -c $COLUMNS ;
}
| true
|
523d9d1e3d69f8b06fd5f66a5fc9b0a057f440d6
|
Shell
|
bendudson/py4cl
|
/install.sh
|
UTF-8
| 9,439
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# cl-travis install script. Don't remove this line.
set -e
# get <destination> <url(s)>
get() {
destination=$1; shift
for url in "$@"; do
echo "Downloading ${url}..."
if curl --no-progress-bar --retry 10 -o "$destination" -L "$url"; then
return 0;
else
echo "Failed to download ${url}."
fi
done
return 1;
}
# unpack <uncompression option> <file> <destination>
unpack() {
opt=$1
file=$2;
destination=$3;
echo "Unpacking tarball $1 into $3..."
mkdir -p "$destination"
tar -C "$destination" --strip-components=1 "$opt" -xf "$file"
}
install_i386_arch() {
# hack for issue #17
sudo sed -i -e 's/deb http/deb [arch=amd64] http/' "/etc/apt/sources.list.d/google-chrome.list"
sudo dpkg --add-architecture i386
sudo apt-get update -qq
sudo apt-get install -y libc6:i386
}
# add_to_lisp_rc <string>
add_to_lisp_rc() {
string=$1
case "$LISP" in
abcl) rc=".abclrc" ;;
allegro*) rc=".clinit.cl" ;;
sbcl*) rc=".sbclrc" ;;
ccl*) rc=".ccl-init.lisp" ;;
cmucl) rc=".cmucl-init.lisp" ;;
clisp*) rc=".clisprc.lisp" ;;
ecl) rc=".eclrc" ;;
*)
echo "Unable to determine RC file for '$LISP'."
exit 1
;;
esac
echo "$string" >> "$HOME/.cim/init.lisp"
echo "$string" >> "$HOME/$rc"
}
ASDF_URL="https://common-lisp.net/project/asdf/archives/asdf.lisp"
ASDF_LOCATION="$HOME/asdf"
install_asdf() {
get asdf.lisp "$ASDF_URL"
add_to_lisp_rc "(load \"$ASDF_LOCATION\")"
}
compile_asdf() {
echo "Compiling ASDF..."
cl -c "$ASDF_LOCATION.lisp" -Q
}
ASDF_SR_CONF_DIR="$HOME/.config/common-lisp/source-registry.conf.d"
ASDF_SR_CONF_FILE="$ASDF_SR_CONF_DIR/cl-travis.conf"
LOCAL_LISP_TREE="$HOME/lisp"
setup_asdf_source_registry() {
mkdir -p "$LOCAL_LISP_TREE"
mkdir -p "$ASDF_SR_CONF_DIR"
echo "(:tree \"$TRAVIS_BUILD_DIR/\")" > "$ASDF_SR_CONF_FILE"
echo "(:tree \"$LOCAL_LISP_TREE/\")" >> "$ASDF_SR_CONF_FILE"
echo "Created $ASDF_SR_CONF_FILE"
cat -n "$ASDF_SR_CONF_FILE"
}
# install_script <path> <lines...>
install_script() {
path=$1; shift
tmp=$(mktemp)
echo "#!/bin/sh" > "$tmp"
for line; do
echo "$line" >> "$tmp"
done
chmod 755 "$tmp"
sudo mv "$tmp" "$path"
}
ABCL_TARBALL_URL1="https://common-lisp.net/project/armedbear/releases/1.3.2/abcl-bin-1.3.2.tar.gz"
ABCL_TARBALL_URL2="http://cddr.org/ci/abcl-bin-1.3.2.tar.gz"
ABCL_TARBALL="abcl.tar.gz"
ABCL_DIR="$HOME/abcl"
ABCL_SCRIPT="/usr/local/bin/abcl"
install_abcl() {
sudo apt-get install -y default-jre
get "$ABCL_TARBALL" "$ABCL_TARBALL_URL1" "$ABCL_TARBALL_URL2"
unpack -z "$ABCL_TARBALL" "$ABCL_DIR"
install_script "$ABCL_SCRIPT" \
"java -cp \"$ABCL_DIR/abcl-contrib.jar\" \
-jar \"$ABCL_DIR/abcl.jar\" \"\$@\""
cim use abcl-system --default
}
SBCL_TARBALL_URL1="http://prdownloads.sourceforge.net/sbcl/sbcl-1.2.13-x86-64-linux-binary.tar.bz2"
SBCL_TARBALL_URL2="http://cddr.org/ci/sbcl-1.2.13-x86-64-linux-binary.tar.bz2"
SBCL_TARBALL="sbcl.tar.bz2"
SBCL_DIR="$HOME/sbcl"
install_sbcl() {
echo "Installing SBCL..."
get "$SBCL_TARBALL" "$SBCL_TARBALL_URL1" "$SBCL_TARBALL_URL2"
unpack -j "$SBCL_TARBALL" "$SBCL_DIR"
( cd "$SBCL_DIR" && sudo bash install.sh )
cim use sbcl-system --default
}
SBCL32_TARBALL_URL1="http://prdownloads.sourceforge.net/sbcl/sbcl-1.2.7-x86-linux-binary.tar.bz2"
SBCL32_TARBALL_URL2="http://cddr.org/ci/sbcl-1.2.7-x86-linux-binary.tar.bz2"
SBCL32_TARBALL="sbcl32.tar.bz2"
SBCL32_DIR="$HOME/sbcl32"
install_sbcl32() {
echo "Installing 32-bit SBCL..."
install_i386_arch
get "$SBCL32_TARBALL" "$SBCL32_TARBALL_URL1" "$SBCL32_TARBALL_URL2"
unpack -j "$SBCL32_TARBALL" "$SBCL32_DIR"
( cd "$SBCL32_DIR" && sudo bash install.sh )
sudo ln -s /usr/local/bin/sbcl /usr/local/bin/sbcl32
cim use sbcl-system --default
}
CCL_TARBALL_URL1="https://github.com/Clozure/ccl/releases/download/v1.11.5/ccl-1.11.5-linuxx86.tar.gz"
CCL_TARBALL_URL2="http://kerno.org/~luis/ci/ccl-1.11-linuxx86.tar.gz"
CCL_TARBALL_URL3="http://common-lisp.net/~loliveira/tarballs/ci/ccl-1.11-linuxx86.tar.gz"
CCL_TARBALL="ccl.tar.gz"
CCL_DIR="$HOME/ccl"
CCL_SCRIPT_PREFIX="/usr/local/bin"
install_ccl() {
if [ "$LISP" = "ccl32" ]; then
echo "Installing 32-bit CCL..."
install_i386_arch
bin="lx86cl"
script="ccl32"
else
echo "Installing CCL..."
bin="lx86cl64"
script="ccl"
fi
get "$CCL_TARBALL" "$CCL_TARBALL_URL1" "$CCL_TARBALL_URL2" "$CCL_TARBALL_URL3"
unpack -z "$CCL_TARBALL" "$CCL_DIR"
install_script "$CCL_SCRIPT_PREFIX/$script" "\"$CCL_DIR/$bin\" \"\$@\""
if [ "$LISP" = "ccl32" ]; then
# also install the 'ccl' script so that CIM can pick it up.
install_script "$CCL_SCRIPT_PREFIX/ccl" "\"$CCL_DIR/$bin\" \"\$@\""
fi
cim use ccl-system --default
}
CMUCL_TARBALL_URL1="https://common-lisp.net/project/cmucl/downloads/snapshots/2015/07/cmucl-2015-07-x86-linux.tar.bz2"
CMUCL_EXTRA_TARBALL_URL1="https://common-lisp.net/project/cmucl/downloads/snapshots/2015/07/cmucl-2015-07-x86-linux.extra.tar.bz2"
CMUCL_TARBALL_URL2="http://cddr.org/ci/cmucl-2015-07-x86-linux.tar.bz2"
CMUCL_EXTRA_TARBALL_URL2="http://cddr.org/ci/cmucl-2015-07-x86-linux.extra.tar.bz2"
CMUCL_TARBALL="cmucl.tar.bz2"
CMUCL_EXTRA_TARBALL="cmucl-extra.tar.bz2"
CMUCL_DIR="$HOME/cmucl"
CMUCL_SCRIPT="/usr/local/bin/cmucl"
install_cmucl() {
echo "Installing CMUCL..."
install_i386_arch
get "$CMUCL_TARBALL" "$CMUCL_TARBALL_URL1" "$CMUCL_TARBALL_URL2"
get "$CMUCL_EXTRA_TARBALL" "$CMUCL_EXTRA_TARBALL_URL" "$CMUCL_EXTRA_TARBALL_URL2"
mkdir -p "$CMUCL_DIR"
tar -C "$CMUCL_DIR" -xjf "$CMUCL_TARBALL"
tar -C "$CMUCL_DIR" -xjf "$CMUCL_EXTRA_TARBALL"
install_script "$CMUCL_SCRIPT" \
"CMUCLLIB=\"$CMUCL_DIR/lib/cmucl/lib\" \"$CMUCL_DIR/bin/lisp\" \"\$@\""
# XXX: no CIM support for CMUCL
}
ECL_TARBALL_URL1="http://common-lisp.net/~loliveira/tarballs/ecl-13.5.1-linux-amd64.tar.gz"
ECL_TARBALL_URL2="http://kerno.org/~luis/ci/ecl-13.5.1-linux-amd64.tar.gz"
ECL_TARBALL="ecl.tar.gz"
install_ecl() {
echo "Installing ECL..."
#get "$ECL_TARBALL" "$ECL_TARBALL_URL1" "$ECL_TARBALL_URL2"
#sudo tar -C / -xzf "$ECL_TARBALL"
sudo apt-get install -y ecl
cim use ecl-system --default
}
install_clisp() {
if [ "$LISP" = "clisp32" ]; then
echo "Installing 32-bit CLISP..."
install_i386_arch
sudo apt-get remove -y libsigsegv2
sudo apt-get install -y libsigsegv2:i386
sudo apt-get install -y clisp:i386
sudo ln -s /usr/bin/clisp /usr/local/bin/clisp32
else
echo "Installing CLISP..."
sudo apt-get install -y clisp
fi
cim use clisp-system --default
}
ACL_TARBALL_URL="https://franz.com/ftp/pub/acl10.1express/linux86/acl10.1express-linux-x86.tbz2"
ACL_TARBALL="acl.tbz2"
ACL_DIR="$HOME/acl"
install_acl() {
echo "Installing Allegro CL..."
install_i386_arch
case "$LISP" in
allegro) acl=alisp ;;
allegromodern) acl=mlisp ;;
*)
echo "Unrecognised lisp: '$LISP'"
exit 1
;;
esac
get "$ACL_TARBALL" "$ACL_TARBALL_URL"
unpack -j "$ACL_TARBALL" "$ACL_DIR"
sudo ln -vs "$ACL_DIR"/alisp "/usr/local/bin/$acl"
cim use alisp-system --default
}
QUICKLISP_URL="http://beta.quicklisp.org/quicklisp.lisp"
install_quicklisp() {
get quicklisp.lisp "$QUICKLISP_URL"
echo 'Installing Quicklisp...'
cl -f quicklisp.lisp -e '(quicklisp-quickstart:install)'
add_to_lisp_rc '(let ((quicklisp-init (merge-pathnames "quicklisp/setup.lisp"
(user-homedir-pathname))))
(when (probe-file quicklisp-init)
(load quicklisp-init)))'
}
# this variable is used to grab a specific version of the
# cim_installer which itself looks at this variable to figure out
# which version of CIM it should install.
CIM_INSTALL_BRANCH=c9f4ea960ce4504d5ddd229b9f0f83ddc6dce773
CL_SCRIPT="/usr/local/bin/cl"
CIM_SCRIPT="/usr/local/bin/cim"
QL_SCRIPT="/usr/local/bin/ql"
install_cim() {
curl -L "https://raw.github.com/KeenS/CIM/$CIM_INSTALL_BRANCH/scripts/cim_installer" | /bin/sh
install_script "$CL_SCRIPT" ". \"$HOME\"/.cim/init.sh; exec cl \"\$@\""
install_script "$CIM_SCRIPT" ". \"$HOME\"/.cim/init.sh; exec cim \"\$@\""
install_script "$QL_SCRIPT" ". \"$HOME\"/.cim/init.sh; exec ql \"\$@\""
}
(
cd "$HOME"
sudo apt-get update
install_cim
install_asdf
case "$LISP" in
abcl) install_abcl ;;
allegro|allegromodern) install_acl ;;
sbcl) install_sbcl ;;
sbcl32) install_sbcl32 ;;
ccl|ccl32) install_ccl ;;
cmucl) install_cmucl; exit 0 ;; # no CIM support
clisp|clisp32) install_clisp ;;
ecl) install_ecl ;;
*)
echo "Unrecognised lisp: '$LISP'"
exit 1
;;
esac
compile_asdf
cl -e '(format t "~%~a ~a up and running! (ASDF ~a)~%~%"
(lisp-implementation-type)
(lisp-implementation-version)
(asdf:asdf-version))'
install_quicklisp
setup_asdf_source_registry
)
| true
|
c60a8d932125db893d9de86e7d10df16d2ddd4e9
|
Shell
|
CloudTestDrive/helidon-kubernetes
|
/setup/common/switch-git-branch.sh
|
UTF-8
| 1,450
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash -f
export SETTINGS=$HOME/hk8sLabsSettings
if [ -f $SETTINGS ]
then
echo "$SCRIPT_NAME Loading existing settings information"
source $SETTINGS
else
echo "$SCRIPT_NAME No existing settings cannot continue"
exit 10
fi
if [ -z "$AUTO_CONFIRM" ]
then
export AUTO_CONFIRM=false
fi
CURRENT_GIT_BRANCH=`git branch --show-current`
COMPILE_DIR=`pwd`
if [ -z "$GIT_BRANCH_TO_COMPILE" ]
then
echo "GIT_BRANCH_TO_COMPILE not set, retaining current branch of $CURRENT_GIT_BRANCH"
else
MATCHING_GIT=`git branch -a --list | grep "$GIT_BRANCH_TO_COMPILE" | wc -l`
if [ "$MATCHING_GIT" = 0 ]
then
echo "Can't locate git branch $GIT_BRANCH_TO_COMPILE for directory $COMPILE_DIR "
exit 100
else
if [ "$AUTO_CONFIRM" = true ]
then
REPLY="y"
echo "Auto confirm is enabled, Do you want to switch to git branch $GIT_BRANCH_TO_COMPILE to compile $COMPILE_DIR defaulting to $REPLY"
else
read -p "Do you want to switch to git branch $GIT_BRANCH_TO_COMPILE to compile $COMPILE_DIR (y/n) ? " REPLY
fi
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
echo "OK, remaining in current branch $CURRENT_GIT_BRANCH for compiling $COMPILE_DIR"
else
echo "Found git branch $GIT_BRANCH_TO_COMPILE checking it out and re-pulling"
git checkout $GIT_BRANCH_TO_COMPILE
git pull
echo "Switched from git branch $CURRENT_GIT_BRANCH to $GIT_BRANCH_TO_COMPILE"
fi
fi
fi
| true
|
59e8ca4248c044006cb6831dc2f9718060def24a
|
Shell
|
cesaremov/Solanum_Arcanum-Lycopersicum
|
/methodology_01/supportScripts/03.hisat2-build.sh
|
UTF-8
| 622
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# Paths
lustrePath="/LUSTRE/usuario/"
fullPwd=${PWD/\/home/$lustrePath/}
genomePath="Genome"
# Genome file
genomeFile="$genomePath/S_lycopersicum_chromosomes.3.00.fa"
# Logfile
logFile="$fullPwd/$genomePath/hisat2-build.log"
#echo $logFile
# Base name
base="$genomePath/Slycopersicum"
# Cluster parameters
nodes=1
ppn=1
mem="20G"
name="hisat2-build"
walltime="24:00:00"
#
echo "cd \$PBS_O_WORKDIR; module load hisat2; hisat2-build -p $ppn $genomeFile $base && hisat2-inspect -s $base" | qsub -V -N $name -l nodes=$nodes:ppn=$ppn,mem=$mem,vmem=$mem,walltime=$walltime -o $logFile -j oe && touch $logFile
| true
|
1ff3c0c98a6877ed7f8cc8ae1057c5c162de5ed0
|
Shell
|
SEEG-Oxford/ABRAID-MP
|
/config/deploy/up_gs.sh
|
UTF-8
| 6,312
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
GS_TEMP_DIR="$(mktemp -d)"
GS_UPDATE_CMD="fileAsk"
setupTempConfigFiles() {
echo "${deploy_props[geoserver.root.password.hash]}" > "$GS_TEMP_DIR/passwd"
cp "$WEBAPP_PATH/geoserver/data/security/usergroup/default/users.xml" "$GS_TEMP_DIR/users.xml"
sed -i "s|password=\".*\"|password=\"${deploy_props[geoserver.admin.password.hash]}\"|g" "$GS_TEMP_DIR/users.xml"
cp "$WEBAPP_PATH/geoserver/WEB-INF/web.xml" "$GS_TEMP_DIR/web.xml"
if ! grep -Fqx "<context-param><param-name>GEOWEBCACHE_CACHE_DIR</param-name><param-value>$WEBAPP_PATH/geoserver/data/gwc</param-value></context-param>" "$GS_TEMP_DIR/web.xml"; then
sed -i "/.*<display-name>.*/a <context-param><param-name>GEOWEBCACHE_CACHE_DIR</param-name><param-value>$WEBAPP_PATH/geoserver/data/gwc</param-value></context-param>" "$GS_TEMP_DIR/web.xml"
fi
}
setupTempWorkspaceFiles() {
cp -r "../geoserver/abraid" "$GS_TEMP_DIR/workspace"
sed -i "s/USER\_REPLACE/${db_props[jdbc.username]}/g" "$GS_TEMP_DIR/workspace/abraid-db/datastore.xml"
sed -i "s/PW\_REPLACE/${db_props[jdbc.password]}/g" "$GS_TEMP_DIR/workspace/abraid-db/datastore.xml"
sed -i "s/DB\_REPLACE/${db_props[jdbc.database.name]}/g" "$GS_TEMP_DIR/workspace/abraid-db/datastore.xml"
sed -i "s/PORT\_REPLACE/${db_props[jdbc.database.port]}/g" "$GS_TEMP_DIR/workspace/abraid-db/datastore.xml"
sed -i "s/HOST\_REPLACE/${db_props[jdbc.database.host]}/g" "$GS_TEMP_DIR/workspace/abraid-db/datastore.xml"
}
echo "[[ GS | Loading configuration ]]"
TEMP_FILE=$(mktemp)
declare -A db_props
cat "$ABRAID_SUPPORT_PATH/conf/application/database.properties" | grep -v "^#" | grep -v '^[[:space:]]*$' > "$TEMP_FILE"
while read -r line; do
[[ $line = *=* ]] || continue
db_props[${line%%=*}]=${line#*=}
done < "$TEMP_FILE"
declare -A deploy_props
cat "$ABRAID_SUPPORT_PATH/conf/application/deployment.properties" | grep -v "^#" | grep -v '^[[:space:]]*$' > "$TEMP_FILE"
while read -r line; do
[[ $line = *=* ]] || continue
deploy_props[${line%%=*}]=${line#*=}
done < "$TEMP_FILE"
rm -f "$TEMP_FILE"
echo "[[ GS | Performing prechecks ]]"
: "${db_props[jdbc.database.name]:?"Variable must be set"}"
: "${db_props[jdbc.database.host]:?"Variable must be set"}"
: "${db_props[jdbc.database.port]:?"Variable must be set"}"
: "${db_props[jdbc.username]:?"Variable must be set"}"
: "${db_props[jdbc.password]:?"Variable must be set"}"
: "${deploy_props[geoserver.root.password.hash]:?"Variable must be set"}"
: "${deploy_props[geoserver.admin.password.hash]:?"Variable must be set"}"
echo "[[ GS | Checking for existing GeoServer installation ]]"
if [[ ! -d "$WEBAPP_PATH/geoserver" ]]; then
echo "No GeoServer install found"
echo "[[ GS | Downloading GeoServer 2.8.1 ]]"
curl -# -L "http://sourceforge.net/projects/geoserver/files/GeoServer/2.8.1/geoserver-2.8.1-war.zip" -o "$GS_TEMP_DIR/geoserver-2.8.1-war.zip"
unzip -p "$GS_TEMP_DIR/geoserver-2.8.1-war.zip" "geoserver.war" > "$GS_TEMP_DIR/geoserver.war"
rm -f "$GS_TEMP_DIR/geoserver-2.8.1-war.zip"
echo "[[ GS | Installing GeoServer 2.8.1 ]]"
unzip -q "$GS_TEMP_DIR/geoserver.war" -d "$WEBAPP_PATH/geoserver"
echo "[[ GS | Removing default setup ]]"
rm -rf "$WEBAPP_PATH/geoserver/data/workspaces/"
mkdir -p "$WEBAPP_PATH/geoserver/data/workspaces/"
mv "$WEBAPP_PATH/geoserver/data/styles/default_generic.sld" "./default_generic.sld"
mv "$WEBAPP_PATH/geoserver/data/styles/generic.xml" "./generic.xml"
rm -rf "$WEBAPP_PATH/geoserver/data/styles/"
mkdir -p "$WEBAPP_PATH/geoserver/data/styles/"
mv "./default_generic.sld" "$WEBAPP_PATH/geoserver/data/styles/default_generic.sld"
mv "./generic.xml" "$WEBAPP_PATH/geoserver/data/styles/generic.xml"
rm -rf "$WEBAPP_PATH/geoserver/data/palettes/"
mkdir -p "$WEBAPP_PATH/geoserver/data/palettes/"
rm -rf "$WEBAPP_PATH/geoserver/data/layergroups/"
mkdir -p "$WEBAPP_PATH/geoserver/data/layergroups/"
rm -rf "$WEBAPP_PATH/geoserver/data/data/"
mkdir -p "$WEBAPP_PATH/geoserver/data/data/"
rm -rf "$WEBAPP_PATH/geoserver/data/coverages/"
mkdir -p "$WEBAPP_PATH/geoserver/data/coverages/"
rm -rf "$WEBAPP_PATH/geoserver/data/gwc/"
mkdir -p "$WEBAPP_PATH/geoserver/data/gwc/"
mkdir -p "$WEBAPP_PATH/geoserver/data/gwc-blobs/"
rm -rf "$WEBAPP_PATH/geoserver/data/gwc-layers/"
mkdir -p "$WEBAPP_PATH/geoserver/data/gwc-layers/"
GS_UPDATE_CMD="fileCopy"
else
echo "There appears to be appears be an existing GeoServer install"
echo "[[ GS | Skipping GeoServer installation ]]"
echo "Don't forget you may need to update your GeoServer version (manually)"
fi
echo "[[ GS | Customizing/checking geoserver config ]]"
setupTempConfigFiles
$GS_UPDATE_CMD "$GS_TEMP_DIR/passwd" "$WEBAPP_PATH/geoserver/data/security/masterpw/default/passwd" "GeoServer root password"
$GS_UPDATE_CMD "$GS_TEMP_DIR/users.xml" "$WEBAPP_PATH/geoserver/data/security/usergroup/default/users.xml" "GeoServer admin password"
$GS_UPDATE_CMD "$GS_TEMP_DIR/web.xml" "$WEBAPP_PATH/geoserver/WEB-INF/web.xml" "GeoServer servlet settings"
$GS_UPDATE_CMD "../geoserver/logging.xml" "$WEBAPP_PATH/geoserver/data/logging.xml" "GeoServer logging config"
$GS_UPDATE_CMD "../geoserver/ABRAID_LOGGING.properties" "$WEBAPP_PATH/geoserver/data/logs/ABRAID_LOGGING.properties" "ABRAID GeoServer logging settings"
$GS_UPDATE_CMD "../geoserver/gwc-gs.xml" "$WEBAPP_PATH/geoserver/data/gwc-gs.xml" "GeoServer geo-web-cache config"
$GS_UPDATE_CMD "../geoserver/geowebcache-diskquota.xml" "$WEBAPP_PATH/geoserver/data/gwc/geowebcache-diskquota.xml" "GeoServer geo-web-cache disk quota config"
$GS_UPDATE_CMD "../geoserver/geowebcache.xml" "$WEBAPP_PATH/geoserver/data/gwc/geowebcache.xml" "GeoServer geo-web-cache extended config"
echo "[[ GS | Adding/checking the abraid workspace ]]"
setupTempWorkspaceFiles
export GS_UPDATE_CMD
export GS_TEMP_DIR
export WEBAPP_PATH
( cd "$GS_TEMP_DIR/workspace" && find . -type "f" -exec bash -c '"$GS_UPDATE_CMD" "$GS_TEMP_DIR/workspace/$0" "$WEBAPP_PATH/geoserver/data/workspaces/abraid/$0"' {} \; )
( cd "../geoserver/gwc-layers" && find . -type "f" -exec bash -c '"$GS_UPDATE_CMD" "$0" "$WEBAPP_PATH/geoserver/data/gwc-layers/$0"' {} \; )
echo "[[ GS | Ensuring correct file permissions ]]"
permissionFix "tomcat7" "tomcat7" "$WEBAPP_PATH/geoserver/"
echo "[[ GS | Done ]]"
rm -rf "$GS_TEMP_DIR"
| true
|
fa7c7cecc926d6b82f39dba6dc1617565b7b81a0
|
Shell
|
FelixZhu/scripts
|
/cf
|
UTF-8
| 559
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$1" = "" ];then
echo "no input"
fi
#echo $argv
echo -e "\033[42;31m Search " $1 "\033[m"
#
#for i in $argv
#do
# case i in
# '-i')
# ;;
# esac
#done
function find_word() {
find . -type f \
| sed -e '/\/tags$/d' -e '/\/\.git/d' -e '/\/\.svn/d' -e '/\.swp$/d' \
| xargs grep -niI --color='always' $1 2>/dev/null \
| cat -b
}
if [ "$1" != "" ];then
find_word $1
fi
if [ "$2" != "" ];then
find_word $1 | egrep -o '\.\/.*' | awk -F ':' '{print $1}' | sed -n '1p' | xargs vim +$2
fi
| true
|
ea7a845061a4bb0e4d286599535e9f2e89eec15f
|
Shell
|
Octoober/sh-webpack-starter-template
|
/webpack-template.sh
|
UTF-8
| 1,033
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
# Vars
gitRepo="https://github.com/Octoober/webpack-starter-template.git"
q="[ \e[32m?\e[0m ]"
# Input project name
function inProjectName() {
echo -ne "${q} Project name: "; read projectName
}
# Create project
function create() {
echo -e "\nGit clone: ${gitRepo}"
git clone $gitRepo $projectName
rm -rf $projectName/.git $projectName/.gitignore
# Rename project
sed -i "s/\"webpack-starter-template\"/\"${projectName}\"/g" $projectName/package.json
}
# Run input project name
echo; inProjectName
# If no project name is specified
while [ -z $projectName ]; do
echo -e "\n\e[1;33mPlease indicate the name of your project.\e[0m"
inProjectName
done
# Question run "npm install"
echo -ne "${q} Run install (Y/N): "; read install
case "$install" in
n|N)
create
echo -e "\n\tSuccessfully created \"${projectName}\"\n\n\t\e[1;32mcd ${projectName}\n\tnpm install \n\tnpm run dev\e[0m"
;;
y|Y|*)
create
echo -e "\n\e[1;32mRun \"npm install\"...\e[0m"
cd $projectName
npm install
;;
esac
#---end
| true
|
ffd1ac410332de5b9ca92c7bd2050d362479d9d4
|
Shell
|
Spark-Liang/NoteBook
|
/编程/云技术/k8s/安装部署/ref/check_haproxy.sh
|
UTF-8
| 279
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
# HAPROXY down
A=`ps -C haproxy --no-header | wc -l`
if [[ $A -eq 0 ]]; then
systemctl start haproxy
if [[ `ps -C haproxy --no-header | wc -l` -eq 0 ]]; then
killall -9 haproxy
echo "HAPROXY down" | mail -s "haproxy"
sleep 3600
fi
fi
| true
|
d997f83babba4e8694d1abda4d76e653e8203818
|
Shell
|
dayudodo/LNPPR
|
/include/sqlite.sh
|
UTF-8
| 3,831
| 3.5
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
# Copyright (C) 2020 - 2021 LetSeeQiJi <wowiwo@yeah.net>
#
# This file is part of the LNPPR script.
#
# LNPPR is a powerful bash script for the installation of
# Nodejs + Nginx + Rails + MySQL/PostgreSQL + Redis and so on.
# You can install Nginx + Rails + MySQL/Postgresql in an very easy way.
# Just need to edit the install.conf file to choose what you want to install before installation.
# And all things will be done in a few minutes.
#
# Website: https://bossesin.cn
# Github: https://github.com/letseeqiji/LNPPR
Sqlite_Dir_Name=sqlite-autoconf-3360000
Sqlite_Source_File_Download_Url=https://www.sqlite.org/2021/"${Sqlite_Dir_Name}".tar.gz
Sqlite_Version_Installed=0
Sqlite_Version_Min=38
Sqlite_Version()
{
Sqlite_Version_Installed=$(sqlite3 --version | awk -F '.' '{printf("%s%s"),$1,$2}')
}
Sqlite_Check_Installed()
{
# centos 上的sqlite版本太低了,如果碰到低版本,我们还是需要安装
if Check_Command sqlite3;then
Sqlite_Version
if [ ${Sqlite_Version_Installed} -lt ${Sqlite_Version_Min} ];then
echo "${lang_version_too_old}, ${lang_version_update}"
return 1
fi
else
return 1
fi
}
Sqlite_Get_Sourcefile()
{
cd ${current_dir}/src/
if ! Check_File_Exist "${Sqlite_Dir_Name}".tar.gz; then
wget "${Sqlite_Source_File_Download_Url}"
fi
}
Sqlite_Tar_And_Cd_Sourcefile()
{
Check_Equal "${Install_Env}" "pro" && Rm_Dir "${Sqlite_Dir_Name}"
Tar "${Sqlite_Dir_Name}".tar.gz
cd "${Sqlite_Dir_Name}"
}
Sqlite_Check_Install_Dir()
{
if Check_Dir_Exist "${Sqlite3_Install_Dir}";then
Back_Up_File "${Sqlite3_Install_Dir}"
fi
Make_Dir "${Sqlite3_Install_Dir}"
}
Sqlite_Make_Install_Configure()
{
Sqlite_Check_Install_Dir
./configure --prefix="${Sqlite3_Install_Dir}"
}
Sqlite_Lns_Bin()
{
Ln_S "${Sqlite3_Install_Dir}"/bin/sqlite3 /usr/bin/sqlite3
}
Sqlite_Ldconfig()
{
echo "${Sqlite3_Install_Dir}/lib" > /etc/ld.so.conf.d/sqlite3.conf
ldconfig
}
Sqlite_Install_Finish()
{
if Check_Equal "${Install_Env}" "pro";then
Rm_Dir "${current_dir}"/src/"${Sqlite_Dir_Name}"
fi
}
Sqlite_Install()
{
if Sqlite_Check_Installed;then
echo "${lang_installed_already} sqlite3, ${lang_no_need_install}"
else
cd "${current_dir}"/src
echo "sqlite3 ${lang_download_start}"
Sqlite_Get_Sourcefile
if Check_Up;then
echo "${lang_download_success}"
else
echo "${lang_download_not_found}"
return 1
fi
echo "sqlite3 ${lang_start_uncompress}"
Sqlite_Tar_And_Cd_Sourcefile
if ! Check_Up;then
echo "${lang_dir_not_find}"
return 1
fi
echo "sqlite3 ${lang_install_start_configure}"
Sqlite_Make_Install_Configure
if Check_Up;then
echo "${lang_confighure_success}"
else
echo "${lang_confighure_fail}"
return 1
fi
echo "sqlite3 ${lang_start_make_install}"
Make_Install
if Check_Up;then
echo "${lang_install_success}"
else
echo "${lang_install_fail}"
Sqlite_Uninstall
return 1
fi
echo "sqlite3 ${lang_lns_to_bin}"
Sqlite_Lns_Bin
if Sqlite_Check_Installed;then
echo "${lang_install_success}"
else
echo "${lang_install_fail}"
Sqlite_Uninstall
return 1
fi
echo "sqlite3 ${lang_ldconfig}"
Sqlite_Ldconfig
if Check_Up;then
echo "${lang_install_success}"
else
echo "${lang_install_fail}"
Sqlite_Uninstall
return 1
fi
Sqlite_Install_Finish
Echo_Green "====== Sqlite install completed ======"
Echo_Smile "Sqlite ${lang_install_success} !"
fi
}
Sqlite_Uninstall()
{
Rm_Dir ${Sqlite3_Install_Dir}
Sqlite_Check_Installed && echo "sqlite ${lang_uninstall_fail}" || echo "sqlite ${lang_uninstall_success}"
}
| true
|
a19639dd64a8b577990e2fb8eb579139b93bc0e9
|
Shell
|
maniankara/atlassian-jira-docker
|
/jira-app/startjira.sh
|
UTF-8
| 525
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Waiting for connecting: jira-db:3306";
while ! nc -z jira-db 3306; do sleep 3; done;
echo "Connection passed, executing mysql scripts";
mysql -h jira-db -uroot -p$MYSQL_ROOT_PASSWORD -e "drop database $MYSQL_DATABASE; CREATE DATABASE $MYSQL_DATABASE CHARACTER SET utf8 COLLATE utf8_bin;"
echo "urL: $JIRA_BACKUP_URL";
if [ ! -z $JIRA_BACKUP_URL ]; then
echo "Copying backup file $JIRA_BACKUP_URL";
wget -O /var/atlassian/jira/import/jira-backup.zip $JIRA_BACKUP_URL
fi
/docker-entrypoint.sh jira
| true
|
44c243ff0e28ec11e1c6319b0e57092114c5b3d1
|
Shell
|
gordcorp/cf-staticfile_buildpack-helloworld
|
/bin/cideploy.sh
|
UTF-8
| 646
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Exit immediately if there is an error
set -e
# cause a pipeline (for example, curl -s http://sipb.mit.edu/ | grep foo) to produce a failure return code if any command errors not just the last command of the pipeline.
set -o pipefail
# The git branch we are on
readonly GITBRANCH="$(git symbolic-ref --short -q HEAD)"
main() {
case "${GITBRANCH}" in
master)
cf api $CF_PROD_API
cf auth $CF_PROD_USER $CF_PROD_PASSWORD
cf target -o $CF_ORG
cf target -s $CF_SPACE
cf push -f manifest.yml
;;
*)
echo "I will not deploy this branch"
exit 0
;;
esac
}
main $@
| true
|
2f4cdb6a947c5c77383bf3767369278ef391d052
|
Shell
|
aqreed/travis-ci-cpp-example
|
/run_coverage_test.sh
|
UTF-8
| 331
| 2.59375
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# calls the code coverage testing program gcov
# fist clean all object files
make clean
# compile all the cpp files, link etc
make
# run test-library.out, with test coverage (see makefile flags)
./test/test-library
# gcc suite test coverage program
gcov test/lib.cpp
# code coverage is in the file "lib.cpp.gcov"
| true
|
4cd48ee7f17bb75009bb274ea77e8c77d4e4f36d
|
Shell
|
aabarbosa/shell
|
/scripts/test.sh
|
UTF-8
| 588
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# This is not part of the project and should not be.
# its use aim to perform Debug and developer tests.
echo "Please type "\quit" to leave, "run" to start the server..."
echo "init to start a new installation..."
while :
do
read INPUT_STRING
case $INPUT_STRING in
quit)
echo "Breaking!"
break
;;
run)
echo "The server has been started!"
./server.sh
;;
init)
echo "Starting a new user setup..."
./init-users.sh --executable
echo "users has been set up!"
;;
*)
echo "Sorry, you typed a wrong input." \
;;
esac
done
echo "Server out!"
| true
|
dfd612a5ed1b20f91bc0dd926fc0163e4252ed12
|
Shell
|
intel/irstessi
|
/ut/scripts/count.sh
|
UTF-8
| 1,882
| 3.015625
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"BSL-1.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
<<LICENSE_BSD
Copyright 2011 - 2017 Intel Corporation
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
LICENSE_BSD
# The function detecting devices attached to the given adapter.
. adapter.sh
# Get the list of devices attached to the given storage adapter.
adap_devices=`get_attached_devices $1`
# Return the number of attached devices as the exit code of the script.
# 0 means no devices attached to the adapter or no such an adapter.
exit `echo $adap_devices | wc -w`
| true
|
27bf07fa363308affc802ee28c75715732310788
|
Shell
|
misc0110/latexrun
|
/test/T-nooutput.sh
|
UTF-8
| 452
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
# Test running just latexrun empty.tex, where empty.tex produces no output.
TMP=tmp.$(basename -s .sh $0)
mkdir -p $TMP
cat > $TMP/empty.tex <<EOF
\documentclass{article}
\begin{document}
\end{document}
EOF
function clean() {
rm -rf $TMP
}
trap clean SIGINT SIGTERM
# Intentionally use just the latexrun command to test when -o is not provided.
"$1" "$TMP/empty.tex"
clean
## output:
## No pages of output; output not updated
| true
|
b2b9391d5a5709da5380bb532a9b8a0fdcc99913
|
Shell
|
mattschouten/screen-time-bouncer
|
/remote_logoff.sh
|
UTF-8
| 448
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
usage(){
echo "Usage: $0 <Target username>"
exit 1
}
[[ $# -eq 0 ]] && usage
TARGET_USER=$1
ssh 172.16.1.110 -q <<EOF
PID=\$(ps -e -o user:16,pid,cmd | grep gdm-x-session | grep $TARGET_USER | tr -s ' ' | cut -d ' ' -f 2)
echo "Going to kill \$PID and end gdm-x-session for $TARGET_USER"
sudo kill -HUP \$PID
EOF
# Future - capture something useful if user session wasn't found
| true
|
58ddb39fd1a7b580e2887fda36ed31d664344fdb
|
Shell
|
maxh0723/drive
|
/project.sh
|
UTF-8
| 307
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
case "$1" in
clean)
rm -rf *.class
echo "Project cleaned"
;;
build)
for i in `ls -1 *.java`
do
javac -cp .:lib/BreezySwing.jar $i
done
if [ $? == 0 ]; then
echo "Compilation complete."
fi
;;
run)
java -cp .:lib/BreezySwing.jar GUI
;;
*)
$0 build
;;
esac
| true
|
a0b6074e0a323f80bf3f283b81f03ec18a8916c2
|
Shell
|
krytarowski/netbsd-llvm-build
|
/buildbotScripts/bashShell/svntotbuild/setEnv.sh
|
UTF-8
| 1,124
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Do not generate any output from this file. It is sourced from files (getRevision.sh) that need
# a clean output stream.
set -e
ulimit -c unlimited
export originalDir=$(pwd)
export rootDir=$(pwd)/..
export buildDir=$rootDir/build
export remoteDir=/data/local/tmp/lldb
dataRoot=""
if [ ! -d "/lldb-buildbot" ]; then #check whether the build server has lldb-buildbot
dataRoot=$HOME
else
dataRoot="/lldb-buildbot"
fi
export ANDROID_NDK_HOME=$dataRoot/android-ndk-r17
export port=5430
export gstrace=gs://lldb_test_traces
export gsbinaries=gs://lldb_binaries
export llvmDir=$rootDir/llvm
export lldbDir=$llvmDir/tools/lldb
export lldDir=$llvmDir/tools/lld
export testsuiteDir=$llvmDir/projects/test-suite
export openmpDir=$llvmDir/projects/openmp
export clangDir=$llvmDir/tools/clang
export cteDir=$llvmDir/tools/clang/tools/extra
export pollyDir=$llvmDir/tools/polly
export libunwindDir=$llvmDir/projects/libunwind
export libcxxabiDir=$llvmDir/projects/libcxxabi
export libcxxDir=$llvmDir/projects/libcxx
export lockDir=/var/tmp/lldbbuild.exclusivelock
export TMPDIR=$rootDir/tmp/
mkdir -p $TMPDIR
| true
|
5385bccf26c125f5801987d3d2c4cb033981839f
|
Shell
|
navikt/sykdom-i-familien-sanity
|
/scripts/backup-and-migrate-production.sh
|
UTF-8
| 793
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# Remember to run sanity login to authenticate before running this script.
#set -x
programname=$0
display_usage() {
printf "This does backup production dataset and migrates staging dataset to production. \n\n"
printf "Usage: $programname"
exit 0
}
# check whether user had supplied -h or --help . If yes display usage
if [[ ( $# == "--help") || $# == "-h" ]]
then
display_usage
exit 0
fi
# Export staging dataset
sanity dataset export staging staging.tar.gz --overwrite
# Backup production dataset
sanity dataset export production production.tar.gz --overwrite
echo y | sanity dataset import production.tar.gz "$(date +'%Y-%m-%d')-prod"
# Migrate staging dataset to production dataset (import)
sanity dataset import staging.tar.gz production --replace
| true
|
feb726a98eeb74a2297e583e74dda446cf3ce87b
|
Shell
|
Cloudxtreme/docker_compose_mesos
|
/docky
|
UTF-8
| 1,944
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
CGROUP=/sys/fs/cgroup
ps cax | grep docker > /dev/null
if [ $? -eq 1 ]; then
mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP
for SUBSYS in $(cut -d: -f2 /proc/1/cgroup)
do
[ -d $CGROUP/$SUBSYS ] || mkdir $CGROUP/$SUBSYS
mountpoint -q $CGROUP/$SUBSYS ||
mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS
# The two following sections address a bug which manifests itself
# by a cryptic "lxc-start: no ns_cgroup option specified" when
# trying to start containers withina container.
# The bug seems to appear when the cgroup hierarchies are not
# mounted on the exact same directories in the host, and in the
# container.
# Named, control-less cgroups are mounted with "-o name=foo"
# (and appear as such under /proc/<pid>/cgroup) but are usually
# mounted on a directory named "foo" (without the "name=" prefix).
# Systemd and OpenRC (and possibly others) both create such a
# cgroup. To avoid the aforementioned bug, we symlink "foo" to
# "name=foo". This shouldn't have any adverse effect.
echo $SUBSYS | grep -q ^name= && {
NAME=$(echo $SUBSYS | sed s/^name=//)
ln -s $SUBSYS $CGROUP/$NAME
}
# Likewise, on at least one system, it has been reported that
# systemd would mount the CPU and CPU accounting controllers
# (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu"
# but on a directory called "cpu,cpuacct" (note the inversion
# in the order of the groups). This tries to work around it.
[ $SUBSYS = cpuacct,cpu ] && ln -s $SUBSYS $CGROUP/cpu,cpuacct
done
docker -d -H unix:///var/run/docker.sock &> /var/log/docker.log &
sleep 1
fi
docker $@
| true
|
65a7983bb7a77c07a2fca723ff2d01585316622b
|
Shell
|
blueafoo/lbrycrd
|
/packaging/docker-for-binary/deploy.sh
|
UTF-8
| 990
| 4.125
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -euo pipefail
hash docker 2>/dev/null || { echo >&2 'Make sure Docker is installed'; exit 1; }
set +eo pipefail
docker version | grep -q Server
ret=$?
set -eo pipefail
if [ $ret -ne 0 ]; then
echo "Cannot connect to Docker server. Is it running? Do you have the right user permissions?"
exit 1
fi
echo "This will build the docker image using the latest lbrycrd release and push that image to Docker Hub"
echo ""
echo "What Docker tag should I use? It's the part that goes here: lbry/lbrycrd:TAG"
read -p "Docker tag: " docker_tag
if [ -z "$docker_tag" ]; then
echo "Docker tag cannot be empty"
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
release_url=$(curl -s https://api.github.com/repos/lbryio/lbrycrd/releases | grep -F 'lbrycrd-linux' | grep download | head -n 1 | cut -d'"' -f4)
docker build --build-arg "release_url=$release_url" --tag "lbry/lbrycrd:${docker_tag}" -f Dockerfile "$DIR"
docker push "lbry/lbrycrd:${docker_tag}"
| true
|
b5810175a2f463c8ab19443fe7f4fbc8634f62cf
|
Shell
|
breydivasquez/docker
|
/agetic-plantillas/frontend/docker/entrypoint.sh
|
UTF-8
| 245
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
sed -i -e 's/src=maps\//src=/g' /usr/share/nginx/html/index.html
sed -i -e 's/.js.map/.js/g' /usr/share/nginx/html/index.html
find /usr/share/nginx/html/ -type f -exec sed -i "s~BACKEND\_URL~${BACKEND_URL}~g" {} \;
exec "$@"
| true
|
8c308cdb68f9a1734491d409cfe0ca7778306710
|
Shell
|
wang-shun/groundwork-trunk
|
/groundwork-monitor-dr/scripts/generic_app.capture
|
UTF-8
| 1,956
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash -e
# generic_app.capture
# Copyright 2010 GroundWork Open Source, Inc. ("GroundWork"). All rights
# reserved. Use is subject to GroundWork commercial license terms.
# DO NOT EDIT THIS FILE.
# It is a generic version that is probably symlinked
# to the place where it is being used. Instead, make
# a copy under a different (object-specific) name,
# remove this notice, and edit that copy.
# {staged_path} {replica_machine} {replica_user} {replica_path} {working_path} {copy_pattern} ...
if [ $# -lt 6 ]; then
program=`basename $0`
echo "usage: $program {staged_path} {replica_machine} {replica_user} {replica_path} {working_path} {copy_pattern} ..."
exit 1
fi
staged_path="$1"
replica_machine="$2"
replica_user="$3"
replica_path="$4"
working_path="$5"
shift 5
copy_patterns="$*"
scripts_path=`dirname $0`
# FIX THIS
# Stop any application program that might alter the content of the application files. That
# includes any daemons, any cron jobs, and any other periodically-run programs. Each must
# either be brought down completely, or at least have its contents flushed and be prevented
# from making further alterations for the time being, or be blocked from starting. If it is
# a program that is run periodically that might now be in progress, it must either be stopped
# or we must wait for its current cycle to complete (with some timeout before killing it anyway).
# FIX THIS: make sure multiple copy patterns make it through this intact
$scripts_path/make_staged_app "$working_path" "$staged_path" $copy_patterns
# FIX THIS
# Start or unblock the application programs stopped or blocked above. Reverse the actions we
# took above to stop the application, so that daemons will begin anew or pick up again from
# where they were paused, cron jobs get unblocked, and any other related programs may run again.
$scripts_path/make_replica_app "$staged_path" "$replica_machine" "$replica_user" "$replica_path"
| true
|
173c2edabcf9b5b7971dafc60e25a7f4ad83e66a
|
Shell
|
IJHidding/Internship-Helsinki
|
/splitfiles.sh
|
UTF-8
| 254
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
# a quick script to separate the uniprot file.
input=$1
grep "ACT_SITE" "$input" > protein_act_site.tsv
grep "BINDING" "$input" > protein_binding.tsv
grep "DNA_BIND" "$input" > protein_dna_bind.tsv
grep "METAL" "$input" > protein_metal.tsv
| true
|
751ed261b97ca93d3201801bfcd048ecd1aa94b0
|
Shell
|
berli/AutoShadowSocks
|
/git_commit.sh
|
UTF-8
| 526
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR="$(dirname $0)"
if [ '.' = "${SCRIPT_DIR:0:1}" ]; then
SCRIPT_DIR="$(pwd)/${SCRIPT_DIR}"
fi
echo SCRIPT_DIR = ${SCRIPT_DIR}
cd "${SCRIPT_DIR}"
git add -A OSX/SSLinker/*
git add -A OSX/Resources/*
git add -A OSX/SSLinker.xcodeproj/xcshareddata/*
"./OSX/osxComponents/git_commit.sh"
COMMIT_MESSAGE='no commit message'
if [ -n "$1" ]; then
COMMIT_MESSAGE=$1
fi
git commit -am "${COMMIT_MESSAGE}"
git remote show origin | grep 'coding.net' >/dev/null 2>/dev/null && git push origin master
exit 0
| true
|
7c3f8912386c5970ddf5e9bc7b67e9d4b4af0a04
|
Shell
|
dgdell/reelvdr
|
/src/archiv/translations/mklangfiles-avg.sh
|
UTF-8
| 1,196
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
srcpath=../..
dest="origs-`date --iso-8601`"
[ ! -d $dest ] && mkdir $dest
#xml file for menu + setup menu
cp -a $srcpath/etc/vdr/plugins/setup/setup-lang.xml $dest/
for i in burn \
reelbox \
skinreelng \
graphlcd \
remote \
setup \
extrecmenu \
channelscan \
filebrowser \
epgsearch \
epgtimers\
epgsearchonly \
xinemediaplayer \
dvdswitch \
cdda \
vcd \
music \
femon \
timeline \
streamdev-client \
streamdev-server \
games \
arghdirector \
osdteletext \
sleeptimer \
mediad \
premiereepg \
loadepg \
pin \
vlcclient \
radio \
reelepg \
install \
mediaplayer \
mediaplayerdvd \
netcv \
netcvinfo \
netcvdiseqc \
ripit \
dpkg \
dpkgopt \
webbrowser \
bgprocess \
xepg \
ipod \
; do
case $i in
cdda)
src=$i/${i}_i18n.c
;;
femon)
src=$i/${i}i18n.c
;;
games|setup)
src=$i/i18n.cpp
;;
*)
src=$i/i18n.c
;;
esac
[ ! -d $dest/$i ] && mkdir $dest/$i
cp -a $srcpath/vdr-plugins/src/$src $dest/$i/
done
[ ! -d $dest/vdr ] && mkdir $dest/vdr
cp -a $srcpath/vdr-1.4/i18n.c $dest/vdr/
cp -a reel $dest
if [ -f langfiles.zip ]; then mv langfiles.zip langfiles.zip.old ; fi
zip langfiles.zip -r $dest README
| true
|
e29983807768f0c900cdfc910eaad0ab4bb389b8
|
Shell
|
songguang-2010/k8s_installer
|
/scripts/add_node.sh
|
UTF-8
| 715
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z $1 ]
then
echo "node name is missing"
exit 1
fi
nodeName=$1
#当前文件所在路径
filePath=$(cd "$(dirname "$0")"; pwd)
#root directory
rootPath=$(cd "$(dirname "$filePath")"; pwd)
. ${rootPath}/scripts/common/constant.sh
# load config items
# . ${configPath}/config_var.sh
#execute install_prepare_node_normal.sh script
/bin/sh ${filePath}/prepare/install_prepare_node_normal.sh ${nodeName}
#reconfig etcd cert files
#.....
#execute install_kbs_node_normal.sh script
/bin/sh ${filePath}/kbs/install_kbs_node_normal.sh ${nodeName}
#install cni and weave
/bin/sh ${filePath}/cni/install_cni_node_normal.sh ${nodeName}
/bin/sh ${filePath}/weave/install_weave_node_normal.sh ${nodeName}
| true
|
22a1d0887708f3a25fc120d4818a146df313e173
|
Shell
|
Mohitkaushal97/File
|
/bs_docker.sh
|
UTF-8
| 1,226
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Docker install, tested Ubuntu 18.04"
# docker:: https://docs.docker.com/install/linux/docker-ce/ubuntu/
sudo apt-get update -y
sudo apt-get remove -y docker docker-engine docker.io containerd runc
sudo apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update -y
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
sudo docker run hello-world
# docker-compose :: https://docs.docker.com/compose/install/
sudo curl -L "https://github.com/docker/compose/releases/download/1.25.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
# docker-machine :: https://docs.docker.com/v17.09/machine/install-machine/#install-machine-directly
curl -L https://github.com/docker/machine/releases/download/v0.13.0/docker-machine-`uname -s`-`uname -m` >/tmp/docker-machine && \
chmod +x /tmp/docker-machine && \
sudo cp /tmp/docker-machine /usr/local/bin/docker-machine
| true
|
1a5ce4884f9353b6e0d85e7eb8af203a874489f5
|
Shell
|
praveenprem/Bootstrap
|
/nextcloud.sh
|
UTF-8
| 6,116
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/env bash
echo "Updating box....."
echo "======================================================================"
apt update
apt -y upgrade
apt -y install vim wget gpg
echo "Installing required packages....."
echo "======================================================================"
apt -y install nginx mariadb-server
apt -y install php7.4-gd php7.4-mysql php7.4-curl php7.4-mbstring php7.4-intl
apt -y install php7.4-gmp php7.4-bcmath php-imagick php7.4-xml php7.4-zip php7.4-fpm
echo "Starting & enabling MySQL server......."
systemctl start mysql
systemctl enable mysql
echo "Setting up Nextcloud database...."
echo "======================================================================"
db_user="next-admin"
db_pass="$(tr -dc A-Za-z0-9 </dev/urandom | head -c 15)"
#read -sp "Nextcloud database password: " db_pass
#if [[ $db_pass == "" ]]; then
# echo "Database password can't be empty"
# exit 1
#fi
echo "Creating Nextcloud database user and database"
cat <<EOF | mysql --user=root
CREATE USER '$db_user'@'localhost' IDENTIFIED BY '$db_pass';
CREATE DATABASE IF NOT EXISTS nextcloud CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;
GRANT ALL PRIVILEGES ON nextcloud.* TO '$db_user'@'localhost';
FLUSH PRIVILEGES;
EOF
echo "Check the user and permissions created"
cat <<EOF | mysql --user=root
SHOW GRANTS FOR '$db_user'@'localhost';
EOF
printf "\n\n"
echo "=================== Nextcloud database credentials ==================="
echo "Username: $db_user"
echo "Password: $db_pass"
echo "======================================================================"
printf "\n\n"
read -p "Press enter to continue"
echo "Installing Nextcloud....."
echo "======================================================================"
mkdir nextcloud-downloads
cd nextcloud-downloads
echo "Downloading assets...."
wget https://download.nextcloud.com/server/releases/latest.tar.bz2
wget https://download.nextcloud.com/server/releases/latest.tar.bz2.md5
echo "Validating package integrity...."
if ! md5sum --status -c latest.tar.bz2.md5; then
echo "Package corrupted!"
exit 1
fi
echo "Extracting Nextcloud server files...."
tar -xjvf latest.tar.bz2 -C /var/www/
echo "Configuring Nginx...."
read -p "Nextcloud URL: " domain_name
if [[ $domain_name == "" ]]; then
echo "Domain name not given! Server will respond to any server name requests"
domain_name='_'
fi
cat <<\EOF > /etc/nginx/sites-enabled/nextcloud.conf
upstream php-handler {
server unix:/var/run/php/php7.4-fpm.sock;
}
server {
listen 80;
listen [::]:80;
server_name {{ domain }};
client_max_body_size 10G;
fastcgi_buffers 64 4K;
gzip on;
gzip_vary on;
gzip_comp_level 4;
gzip_min_length 256;
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
add_header Referrer-Policy "no-referrer" always;
add_header X-Content-Type-Options "nosniff" always;
add_header X-Download-Options "noopen" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Permitted-Cross-Domain-Policies "none" always;
add_header X-Robots-Tag "none" always;
add_header X-XSS-Protection "1; mode=block" always;
fastcgi_hide_header X-Powered-By;
root /var/www/nextcloud;
index index.php index.html /index.php$request_uri;
location = / {
if ( $http_user_agent ~ ^DavClnt ) {
return 302 /remote.php/webdav/$is_args$args;
}
}
location = /robots.txt {
allow all;
log_not_found off;
access_log off;
}
location ^~ /.well-known {
location = /.well-known/carddav { return 301 /remote.php/dav/; }
location = /.well-known/caldav { return 301 /remote.php/dav/; }
location /.well-known/acme-challenge { try_files $uri $uri/ =404; }
location /.well-known/pki-validation { try_files $uri $uri/ =404; }
return 301 /index.php$request_uri;
}
location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/) { return 404; }
location ~ ^/(?:\.|autotest|occ|issue|indie|db_|console) { return 404; }
location ~ \.php(?:$|/) {
fastcgi_split_path_info ^(.+?\.php)(/.*)$;
set $path_info $fastcgi_path_info;
try_files $fastcgi_script_name =404;
include fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param PATH_INFO $path_info;
fastcgi_param HTTPS on;
fastcgi_param modHeadersAvailable true; # Avoid sending the security headers twice
fastcgi_param front_controller_active true; # Enable pretty urls
fastcgi_pass php-handler;
fastcgi_intercept_errors on;
fastcgi_request_buffering off;
}
location ~ \.(?:css|js|svg|gif)$ {
try_files $uri /index.php$request_uri;
expires 6M; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
location ~ \.woff2?$ {
try_files $uri /index.php$request_uri;
expires 7d; # Cache-Control policy borrowed from `.htaccess`
access_log off; # Optional: Don't log access to assets
}
location /remote {
return 301 /remote.php$request_uri;
}
location / {
try_files $uri $uri/ /index.php$request_uri;
}
EOF
sed -i "s/{{ domain }}/$domain_name/g" /etc/nginx/sites-enabled/nextcloud.conf
| true
|
e2f03193760d5170e1a7bfccdf8c03f4ce6a1410
|
Shell
|
jolynnjones/psych516w2020
|
/Week_9/tensors_batch.sh
|
UTF-8
| 313
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
curTime=`date +"%Y%m%d-%H%M%S"`
mkdir -p ~/logfiles/EDSD/${curTime}-tensors
for i in `cat ~/psych516/EDSD/participants.txt`; do
sbatch \
-o ~/logfiles/EDSD/${curTime}-tensors/o-${i}.txt \
-e ~/logfiles/EDSD/${curTime}-tensors/e-${i}.txt \
~/psych516/scripts/tensors_job.sh \
${i}
sleep 1
done
| true
|
36415fd0b31afc3c64ee8402663f552bab7ae69f
|
Shell
|
justinfiore/pass-utils
|
/bin/pass-exec-psql
|
UTF-8
| 3,134
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -eu
script_dir="$( cd "$( dirname "$0" )" && pwd )"
source "$script_dir/commons"
DEFAULT_PASS_HOME="$HOME/.password-store"
function show_help() {
cat <<EOF
Usage: `basename $0` [options] [--] [any extra args to pass to psql]
Execute 'psql' against a database from a password entry constructed by 'pass-write-pachelbel-deployment'
Options:
-h, --help Show this help
-n, --password-name [REQUIRED] The name of the password to use. e.g., foo/bar/baz
-b, --database-name If specified, will be appended to the 'database' field in the output yaml. No default.
-v, --verbose Enable verbose output
-q, --quiet Silence output from this script and only output results from 'psql'
--dry-run Don't execute the command, just log it out
Any extra arguments (or options) AFTER the recognized options will be passed to 'psql'
Environment Variables:
PASS_UTILS_PASS_HOME If specified, should be the directory that 'pass' uses to store its info. Default: $DEFAULT_PASS_HOME
Examples:
To connect to the target database using the database name from the pass entry:
pass-exec-psql -n my-postgres
To run a specific query against the target database and dump the results to a file:
pass-exec-psql -n my-postgres -b my_database_name -- -c "select count(*) from users;" -o /tmp/out.txt
EOF
exit 0
}
verbose="false"
quiet="false"
dry_run="false"
password_name=""
database_name=""
PASS_HOME="${PASS_UTILS_PASS_HOME:=$DEFAULT_PASS_HOME}"
while test "$#" -ne 0
do
case "$1" in
-h|--h|--help)
show_help;;
-n|--password-name)
shift; password_name="$1";;
-b|--database-name)
shift; database_name="$1";;
-v|--verbose)
verbose="true";;
-q|--quiet)
quiet="true";;
--dry-run)
dry_run="true";;
--)
shift;
break;
;;
*)
echo "Unknown argument: $1" 1>&2
exit 1
;;
esac
shift
done
if [ "$verbose" == "true" ]; then
echo Using PASS_HOME: $PASS_HOME
fi
validate_non_empty_required_arg "$password_name" "--password-name"
passInfo="`pass show \"$password_name\"`"
username=`echo "$passInfo" | yq -r .connections[0].username`
password=`echo "$passInfo" | yq -r .connections[0].password`
hostname=`echo "$passInfo" | yq -r .connections[0].host`
port=`echo "$passInfo" | yq -r .connections[0].port`
if [ "$database_name" == "" ]; then
database_name=`echo "$passInfo" | yq -r .database`
fi
if [ "$database_name" == "null" ] || [ "$database_name" == "" ]; then
echo "ERROR: Must specify --database-name because there is none in the password file"
echo
show_help
exit 1
fi
psqlCommand="psql -U $username -h $hostname -p $port $database_name"
if [ "$dry_run" == "true" ]; then
echo "DRY RUN: Would have executed:"
echo "$psqlCommand $@"
else
if [ "$quiet" == "false" ]; then
echo "Using psql command: $psqlCommand $@"
fi
PGPASSWORD="$password" $psqlCommand "$@"
fi
| true
|
351bb284b8b0a361fb25a16bc5102e1d8c27f48a
|
Shell
|
zeus911/SAVPN_BSNL01
|
/crm.ear/crm.war/WEB-INF/bin/XMLConverter/XMLConverter.sh
|
UTF-8
| 1,396
| 3.375
| 3
|
[] |
no_license
|
#! /bin/bash
###############################################################################
#
# **** COPYRIGHT NOTICE ****
#
# (c) Copyright 2003-2010 Hewlett-Packard Development Company, L.P.
#
###############################################################################
###############################################################################
#
# $Source: /tmp/vpn/SA_VPN_SP/jboss/server/default/deploy/crmportal.sar/crm.war/WEB-INF/bin/XMLConverter/XMLConverter.sh,v $
# $Revision: 1.8 $
# $Date: 2010-10-05 14:18:26 $
# $Author: shiva $
#
###############################################################################
#
# <Description>
#
###############################################################################
# Setup SA_VPN_SP variables relative to ServiceActivator variables
if [ ! -e ../../config/setenv.crmportal ]; then
HERE=`pwd`
cd ../../config
./generate_setenv.sh
cd $HERE
fi
. ../../config/setenv.crmportal
# Determine OS
case `uname -rs` in
CYGWIN*) OSTYPE=WINDOWS;;
*) OSTYPE=UNIX;;
esac
# Use ; for Windows and : for Unix
if [ "$OSTYPE" = "WINDOWS" ]; then
export CLASSPATH="."
else
export CLASSPATH="."
fi
if [ ! $# -eq 1 ]
then
echo
echo "usage: XMLConverter.sh <input.xml>"
echo " where <input.xml> normally is portal.xml"
exit
fi
$JAVA_HOME/bin/java com.hp.ov.activator.vpn.XMLConverter.XMLConverter "$1"
| true
|
4709b79e57e2083bac91849d5e25f134cc2a5de1
|
Shell
|
ragtz/titan_utils
|
/submit-titan.sh
|
UTF-8
| 440
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# Identifies the best titan-machine to run a command on and executes that command remotely. Aliases must live in .bashrc in order to be respected. Best GPU should be identified by the python script.
results=$(python ~/titan_utils/titan_utils.py)
IFS=$'\n'; results=($results); unset IFS;
best_titan=${results[0]}
loc=$(pwd)
echo "Executing \`${@}\` on ${best_titan}"
ssh -t $USER@$best_titan "/bin/bash -ci \"cd ${loc}; ${@}\""
| true
|
bc21fa06c7ddea9a978b387c67ef2621750cf9ab
|
Shell
|
sjas/dotfiles-5
|
/.shell/aliases
|
UTF-8
| 6,979
| 3.296875
| 3
|
[] |
no_license
|
function is_zsh() { [ -n "$ZSH_NAME" ]; }
function exists() { command -v "$1" >/dev/null; }
# Bootstrap functions
if ! exists yadm; then
function yadm() {
if ! exists git; then
echo "Install git"
return 1
fi
local yadm
yadm="$HOME/.local/bin/yadm"
mkdir -p "$(dirname "$yadm")"
curl -o "$yadm" "https://raw.githubusercontent.com/TheLocehiliosan/yadm/master/yadm"
chmod +x "$yadm"
unset -f yadm
}
fi
function yadm_setup() {
local authkeys
if [ -f "$HOME/.ssh/authorized_keys" ]; then
authkeys="$(cat "$HOME/.ssh/authorized_keys")"
fi
yadm clone "https://dots.ree.to/repo"
[[ -n "$authkeys" ]] && echo "$authkeys" >> "$HOME/.ssh/authorized_keys"
}
# Bookmarks
typeset -A bookmarks
function j() { cd "${bookmarks[$1]}" || return; }
if is_zsh; then
# shellcheck disable=SC2154
function _j() { _arguments "1:bookmark:(${(k)bookmarks})"; }
compdef _j j
fi
bookmarks[uni]="$HOME/projects/cambridge/ii"
bookmarks[diss]="$HOME/projects/cambridge/ii/project/dissertation"
bookmarks[continuity]="$HOME/projects/cambridge/ii/project/continuity"
bookmarks[web]="$HOME/projects/ree.to"
# Dotfiles
# shellcheck disable=SC2015
is_zsh && alias d='noglob yadm' || alias d='yadm'
exists git && [ -f "$HOME/.config/git/diff.inc" ] && ! exists diff-so-fancy && rm "$HOME/.config/git/diff.inc"
# AWS
if exists aws; then
# shellcheck disable=SC2015,SC1091
is_zsh && source /usr/bin/aws_zsh_completer.sh || source /usr/bin/aws_completer
fi
if exists gcloud && is_zsh; then
source /opt/google-cloud-sdk/path.zsh.inc
source /opt/google-cloud-sdk/completion.zsh.inc
fi
# Config function
typeset -A config
function cfg() {
local old
old="$(pwd)"
cd "$HOME" || exit
eval "$EDITOR" "${config[$1]}"
cd "$old" || exit
}
if is_zsh; then
# shellcheck disable=SC2154
function _cfg() { _arguments "1:module:(${(k)config})"; }
compdef _cfg cfg
fi
# Systemd
systemd_sudo_commands=(start stop reload restart enable disable mask unmask edit daemon-reload)
if is_zsh; then
function sc() {
if [ "$#" -ne 0 ] && [ "${systemd_sudo_commands[(r)$1]}" == "$1" ]; then
sudo systemctl "$@"
else
systemctl "$@"
fi
}
else
function sc() {
if [ "$#" -ne 0 ] && [[ " ${systemd_sudo_commands[*]} " = *" $1 "* ]]; then
sudo systemctl "$@"
else
systemctl "$@"
fi
}
fi
is_zsh && compdef sc=systemctl
alias scu="systemctl --user"
# Git
# shellcheck disable=SC2015
is_zsh && alias g='noglob git' || alias g='git'
alias g=git
config[git]="$HOME/.config/git/*"
# Editor
alias vi='$EDITOR'
if exists nvim; then
alias todo="nvim +Goyo ~/shared/TODO.md"
function nvimbench() { bench="$(mktemp)" && /usr/bin/nvim --startuptime "$bench" "$@" && tail -1 "$bench" && rm -f "$bench"; }
config[nvim]="$HOME/.config/nvim/*.vim"
fi
# Package Manager
if exists pacman; then
function pkgdiff() {
flags="-Qqe"
hostnames=()
for var in "$@"; do
case "$var" in
(-*) flags="$var" ;;
(*) hostnames+=("$var") ;;
esac
done
if [[ ${#hostnames[@]} -eq 1 ]]; then
# shellcheck disable=SC2029
icdiff -U 0 -L "$(hostname)" -L "${hostnames[1]}" <(pacman "$flags") <(ssh "${hostnames[1]}" "pacman $flags")
elif [[ ${#hostnames[@]} -eq 2 ]]; then
# shellcheck disable=SC2029
icdiff -U 0 -L "${hostnames[1]}" -L "${hostnames[2]}" <(ssh "${hostnames[1]}" "pacman $flags") <(ssh "${hostnames[2]}" "pacman $flags")
else
echo "usage: pkgdiff [flags] <host 1> [host 2]"
return 1
fi
}
function binaries() { pacman -Qql "$@" | sed -n '/\/usr\/bin\/./s/\/usr\/bin\///p'; }
# shellcheck disable=SC2046
function pkgsize() { pacman --config /dev/null -Rdd --print-format '%s' $(pactree -u "$@") | awk '{size+=$1} END { print size }' | numfmt --round=nearest --to=iec-i --suffix=B --format="%.1f"; }
function pkgs() { comm -13 <(pacman -Qgq base base-devel | sort -u) <(pacman "${1:--Qqe}" | sort -u) | column; }
fi
# Chrome
exists google-chrome-stable && alias chrome="google-chrome-stable"
# Irssi
exists irssi && alias irssi='irssi --config=$XDG_CONFIG_HOME/irssi/config --home=$XDG_DATA_HOME/irssi'
# File listing
if exists exa; then
alias ls="exa -xF"
alias lt="ls -T --group-directories-first"
alias ll="exa -lF --git"
else
alias ls="ls -xF"
alias ll="\\ls -lF"
fi
alias la="ls -a"
alias lh="ls -d .*"
alias lla="ll -a"
# Filesystem operations
alias mv="mv -v"
alias rm="rm -Iv"
alias cp="cp -v"
alias df="df -hl"
alias du="du -h"
alias ncdu="ncdu -x"
exists udiskie-mount && alias mount=udiskie-mount
exists udiskie-umount && alias umount=udiskie-umount
# Use trash-put if available
# Not recommended, but very useful if used sparingly (and not relied on)
# shellcheck disable=SC2015
exists trash-put && alias rm=trash-put || alias rm="rm -Iv"
exists trash-list && alias tls=trash-list
exists trash-restore && alias tres=trash-restore
# Alias for going back directories
alias up=bd
# $HOME directory cleaning
HOMEDIR_ALLOWS="$HOME/.config/homedir_allows"
# shellcheck disable=SC2139
alias check_homedir="fd -H --maxdepth 1 --ignore-file $HOMEDIR_ALLOWS >> $HOMEDIR_ALLOWS && '$EDITOR' $HOMEDIR_ALLOWS"
# Python
if exists bpython; then
function python() {
if [ "$#" -eq 0 ]; then
bpython
else
/usr/bin/python "$@"
fi
}
fi
# SSH public key
function pub() {
ssh-add -L | sed -n "0,/(none)/s//$USER@$HOST/p"
}
# Clipboard
if exists xclip; then
function c() { tr -d '\n' | xclip -sel clip; }
function copy() { c < "$1"; }
alias v="xclip -o -sel clip"
alias cpub="pub | tee >(c)"
fi
# Lastpass
if exists lpass && exists fzf; then
function cpass() {
lpass show -xjG "" | jq -r '.[] | "\(.name) (\(.username)) \(.password)"' | fzf --exit-0 --with-nth "..-2" | awk '{print $NF}' | c
}
fi
# GPG
# shellcheck disable=SC2155
if exists gpg; then
export GPG_TTY="$(tty)"
gpg-connect-agent updatestartuptty /bye >/dev/null
fi
# IP
alias pubip="drill myip.opendns.com @resolver1.opendns.com | awk '!/;;/ && /IN/' | head -n 1 | awk '{ print \$NF }'"
alias privip="hostname -i"
# Config files
config[alacritty]="$HOME/.config/alacritty/alacritty.yml##yadm.j2"
config[bash]="$HOME/.bashrc $HOME/.bash_profile"
config[compton]="$HOME/.config/compton/compton.conf"
config[dunst]="$HOME/.config/dunst/dunstrc"
config[i3]="$HOME/.config/i3/config.d/**/*.conf"
config[latex]="$HOME/.latexmkrc"
config[polybar]="$HOME/.config/polybar/config $HOME/.config/polybar/modules/*"
config[ranger]="$HOME/.config/ranger/*.conf $HOME/.config/ranger/commands.py $HOME/.config/ranger/scope.sh"
config[shell]="$HOME/.shell/aliases"
config[ssh]="$HOME/.ssh/config"
config[termite]="$HOME/.config/termite/config"
config[tmux]="$HOME/.tmux.conf"
config[xorg]="$HOME/.config/X11/*"
config[zathura]="$HOME/.config/zathura/zathurarc"
config[zsh]="$HOME/.zshrc $HOME/.zshenv $HOME/.zprofile"
unset -f exists
unset -f is_zsh
# vim: set ft=sh:sw=2:
| true
|
7318c105e8e4241f433cff07339ca21aae211181
|
Shell
|
prashkotak/ansible
|
/b1.sh
|
UTF-8
| 888
| 3.1875
| 3
|
[] |
no_license
|
Launch_instance()
{
}
while :
do
clear
echo "-------------------------------------"
echo " Welcolme to Aws Cloud "
echo "-------------------------------------"
echo "[1] Launch Ec2 Instance"
echo "[2] Launch s3 Bucket "
echo "[3] Launch Load Balance"
echo "[4] Launch Auto Scalling"
echo "[5] Exit/Stop"
echo "======================="
echo -n "Enter your menu choice [1-5]: "
read yourch
case $yourch in
# 1) echo "Today is `date` , press a key. . ." ;;
1) break ;;
2) echo "Files in `pwd`" ; ls -l ; echo "Press a key. . ." ; read ;;
3) cal ; echo "Press a key. . ." ; read ;;
4) vi ;;
5) exit 0 ;;
*) echo "Opps!!! Please select choice 1,2,3,4, or 5";
echo "Press a key. . ." ; read ;;
esac
done
if [ $yourch == 1 ]
then
echo "You have selected to Launch Instance"
fi
| true
|
a2e5b14765a60b2302cf2b54b5cf8115554d4a23
|
Shell
|
bol/dotfiles
|
/zsh/zsh.d/macos.zsh
|
UTF-8
| 679
| 3.46875
| 3
|
[] |
no_license
|
function __setup_macos() {
[[ -e /opt/homebrew/bin/brew ]] && eval "$(/opt/homebrew/bin/brew shellenv)"
if ! (( $+commands[brew] )); then
read -q "?The brew command was not found on your path. Do you want to install it? " || return -1
print ''
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
export HOMEBREW_NO_AUTO_UPDATE=1
export HOMEBREW_NO_ANALYTICS=1
export HOMEBREW_NO_ENV_HINTS=1
# Prefer GNU coreutils over the Apple BSD ones
[[ -e /opt/homebrew/opt/coreutils/libexec/gnubin ]] && path=('/opt/homebrew/opt/coreutils/libexec/gnubin' $path)
}
[[ "$(uname -s)" == "Darwin" ]] && __setup_macos
| true
|
54b11b4168374d49915fe3d403d5f54c34dc6a68
|
Shell
|
nicelhc13/cmp_gal_git
|
/scripts/commits/dist_cc_push.sh
|
UTF-8
| 452
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
# execute cc_push of dist-app
ITER_NO=3
THREAD_NO=56
GRAPHS=(
"friendster"
"webGraph"
"road-usad"
"twitter"
"socLive")
for iter in {0..$ITER_NO};
do
for graph in ${GRAPHS[@]}; do
echo bin/cc_push paper_inputs/${graph}_galois.csgr -t=${THREAD_NO} -symmetricGraph
./bin/cc_push paper_inputs/${graph}_galois.csgr -t=$THREAD_NO -symmetricGraph >> cc_push_results_03
done
done
| true
|
a4ccc0a9c10fe8a4eebac3e56fdc4d447b6b14d7
|
Shell
|
robert-schmidtke/v8-travis
|
/fetch-v8.sh
|
UTF-8
| 1,833
| 2.921875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -x
# use ccache
export PATH="/usr/local/opt/ccache/libexec:$PATH"
# fetch V8 if necessary
if [ ! -d "./v8build/v8" ]; then
cd ./v8build
# get the Google depot tools
git clone --depth=1 https://chromium.googlesource.com/chromium/tools/depot_tools.git
export PATH=${PATH}:$(pwd)/depot_tools
# obtain proper V8 version
gclient
fetch v8 && cd ./v8
git checkout 6.8.290
# disable some warnings
CONFIG_DEFAULT_WARNINGS_LINE=$(grep --line-number "^config(\"default\_warnings\") {$" build/config/compiler/BUILD.gn | cut -f1 -d:)
IS_CLANG_LINE=$(tail -n +${CONFIG_DEFAULT_WARNINGS_LINE} build/config/compiler/BUILD.gn | grep --line-number "^ if (is\_clang) {$" | head -n 1 | cut -f1 -d:)
INSERT_CFLAGS_LINE=$((CONFIG_DEFAULT_WARNINGS_LINE + IS_CLANG_LINE + 1))
ex -s -c "${INSERT_CFLAGS_LINE}i| \"-Wno-null-pointer-arithmetic\"," -c x build/config/compiler/BUILD.gn
ex -s -c "${INSERT_CFLAGS_LINE}i| \"-Wno-defaulted-function-deleted\"," -c x build/config/compiler/BUILD.gn
# the trace event repository is checked out at master, which does not compile currently
# so use the version that was most likely used for 6.8.290
cd ./base/trace_event/common
git checkout 211b3ed9d0481b4caddbee1322321b86a483ca1f
cd ../../../
# configure release
find . -name BUILD.gn -exec sed -i bak '/exe_and_shlib_deps/d' {} \;
./tools/dev/v8gen.py x64.release
export RELEASE=out.gn/x64.release
# from the chromium docs
export CCACHE_CPP2=yes
export CCACHE_SLOPPINESS=time_macros
export PATH="$(pwd)/third_party/llvm-build/Release+Asserts/bin:${PATH}"
# generate release info
gn gen ${RELEASE} \
--args='cc_wrapper="ccache" is_component_build=false is_debug=false target_cpu="x64" use_custom_libcxx=false use_custom_libcxx_for_host=false v8_static_library=true'
fi
| true
|
64e38df6d5a59fd72f6ea5573997dcc238052553
|
Shell
|
OscarSLi/tor-traceroutes
|
/deprecated/reorg.sh
|
UTF-8
| 988
| 3.6875
| 4
|
[] |
no_license
|
#--------------------------------------------------------------
# Purpose: (Deprecated) Reorganizes files copied from PL nodes
# Execution: bash reorg.sh <DIRNAME>
# Author: Oscar Li
#--------------------------------------------------------------
DIRNAME=$1
# Reorganizes files
cd "$DIRNAME"
ls > nodes.txt
while read node_line
do
cd $node_line
ls > guards.txt
while read guard_line
do
guard=`echo $guard_line | cut -f1 -d"("`
# Creates folder for node if it does not already exist
if [ ! -e ../../results/$node_line ];
then
cd ~/results
mkdir $node_line
cd ~/"$DIRNAME"/$node_line
fi
# Creates folder for guard if it does not already exist
if [ ! -e ../../results/$node_line/$guard ];
then
cd ~/results/$node_line
mkdir $guard
cd ~/"$DIRNAME"/$node_line
fi
cp "$guard_line" ~/results/$node_line/$guard
done < guards.txt
rm guards.txt
cd ..
done < nodes.txt
rm nodes.txt
cd
rm -r "$DIRNAME"
| true
|
b97d82d3295ef0c5ef2972747ba71b380395e999
|
Shell
|
viscid-hub/mayavi-feedstock
|
/recipe/build.sh
|
UTF-8
| 362
| 2.953125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
if [ "$(uname -s)" == "Darwin" ]; then
$PYTHON setup.py install --single-version-externally-managed --record record.txt
else
USERNAME=$(id -u -n)
# Xvfb :1 -screen 0 1280x1024x24 -auth localhost &
# export DISPLAY=:1
$PYTHON setup.py install --single-version-externally-managed --record record.txt
# killall -u $USERNAME Xvfb || true
fi
| true
|
b95e44281f32420a3e338debff36fa30fb3a5b3e
|
Shell
|
phanirajl/amazon-keyspaces-examples
|
/java/datastax-v4/connection-lambda/2-deploy.sh
|
UTF-8
| 392
| 2.671875
| 3
|
[
"MIT-0",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eo pipefail
ARTIFACT_BUCKET=$(<bucket-name.txt)
TEMPLATE=template.yml
BUILD_TYPE=gradle
echo "Building with $TEMPLATE..."
./gradlew build
aws cloudformation package --template-file $TEMPLATE --s3-bucket $ARTIFACT_BUCKET --output-template-file out.yml
aws cloudformation deploy --template-file out.yml --stack-name KeyspacesLambdaExample --capabilities CAPABILITY_NAMED_IAM
| true
|
2e771caf97e470b76d12f732d75a284ab3e536e1
|
Shell
|
Ahmad-Shafique/General-purpose-programming
|
/PROGRAMMING/test/today1.12.2015.sh
|
UTF-8
| 122
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
echo "Enter a number"
read input
if test `expr $input % 2` -eq 0
then
echo "Even"
else
echo "Odd"
fi
| true
|
e3d20e7f94c73ce4868a455fc6a8e8da8355f96e
|
Shell
|
rahpaere/pktgen
|
/stats.sh
|
UTF-8
| 228
| 3.21875
| 3
|
[] |
no_license
|
#! /bin/sh
gather () {
ta=$tb
pa=$pb
tb=`date +%s.%N`
pb=`cat /sys/class/net/eth1/statistics/rx_packets`
}
report () {
echo "scale=9; ($pb - $pa) / ($tb - $ta)" | bc
}
gather
while true
do
sleep 10
gather
report
done
| true
|
ca73119cb3b91886a1a4196e6b696716d21e3c56
|
Shell
|
xMonny/Operating_systems_course
|
/Exercise-shell/05-b-4600.sh
|
UTF-8
| 346
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 3 ]; then
echo "Expect 3 arguments"
exit 10
fi
if ! [ "${1}" -eq "${1}" -o "${2}" -eq "${2}" -o "${3}" -eq "${3}" ] 2>/dev/null; then
exit 3
fi
NUMBER="${1}"
LEFT="${2}"
RIGHT="${3}"
if [ "${LEFT}" -ge "${RIGHT}" ]; then
exit 2
fi
if [ "${LEFT}" -gt "${NUMBER}" -o "${NUMBER}" -gt "${RIGHT}" ]; then
exit 1
fi
| true
|
35edc5fcc104d78b1ba4822665c1486b98d1c29c
|
Shell
|
yijunyu/piStar
|
/tool/istar2/merge.sh
|
UTF-8
| 920
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -d $1 ]; then
echo Please first create models insider the $1 folder
exit 0
fi
#if [ -f $1.istar2 ]; then
# read -p "The command will write to the file $1.istar2, which already exists, Are you sure to overwrite it? " -n 1 -r
# echo
# if [[ $REPLY =~ ^[Nn]$ ]]; then
# exit 0
# fi
#fi
echo > /tmp/merged.istar2
for f in $1/*.istar2; do
cat $f | node merge.js /tmp/merged.istar2 | sed -e 's/goalModel = //' > /tmp/merged.json
cat /tmp/merged.json | node json.js > /tmp/merged.istar2
done
mv /tmp/merged.istar2 $1.istar2
echo $1.istar2 has been merged from all .istar2 models inside the $1 folder.
#if [ -f ../goalModel.js ]; then
# read -p "The command will overwrite the default model, which already exists, Are you sure to overwrite it? " -n 1 -r
# echo
# if [[ $REPLY =~ ^[Nn]$ ]]; then
# exit 0
# fi
#fi
cat $1.istar2 | node istar2.js > ../goalModel.js
echo the default model has been updated.
| true
|
0318dcb942fd9b95e1558d87aab3f9efb24716a4
|
Shell
|
ccolorado/tooling_scripts
|
/scripts/greedydoc
|
UTF-8
| 2,646
| 4.59375
| 5
|
[] |
no_license
|
#!/bin/bash
# A greedy approach for searching directories code and documentation
# this script helps search multiple directories with a single command
# Each directory can costumize the way the search command given a
# configuration mapping the required directory to a command template
# Function to display usage instructions
display_usage() {
echo "Usage: $(basename "$0") [-f] query"
echo "Options:"
echo " -f List only filenames where the query was found"
}
# Function to search for the query in a directory using the specified command template
search_directory() {
local directory="$1"
local command_template="$2"
local query="$3"
local filenames_only="$4"
command_template="${command_template/\{query\}/$query}"
if [ "$filenames_only" = true ]; then
command="${command_template/\{onlyfiles\}/-l}"
else
command="${command_template/\{onlyfiles\}/}"
fi
command="$command $(realpath --relative-to=./ $current_dir/$directory)"
eval $command;
}
# Read the configuration file and store the directory-command mappings
read_configuration() {
local config_file="$1"
local query="$2"
local filenames_only="$3"
while read -r line || [[ -n "$line" ]]; do
# Skip commented lines and empty lines
if [[ "$line" =~ ^[[:space:]]*# ]] || [[ -z "$line" ]]; then
continue
fi
echo "$current_dir";
directory=$(echo "$line" | cut -d'=' -f1)
command_template=$(echo "$line" | cut -d'=' -f2-)
# Search in the directory using the command template
search_directory "$directory" "$command_template" "$query" "$filenames_only"
done < "$config_file"
}
# Check if the number of arguments is less than 1
if [ "$#" -lt 1 ]; then
display_usage
exit 1
fi
# Parse command line options
filenames_only=false
while getopts ":f" opt; do
case $opt in
f)
filenames_only=true
;;
\?)
display_usage
exit 1
;;
esac
done
# Shift the command line arguments to exclude the options
shift "$((OPTIND-1))"
# Get the query from the command line arguments
query="$1"
# Get the path of the closest configuration file
current_dir="$(realpath .)"
script_name="greedypig"
config_file=""
while [ "$current_dir" != "/" ]; do
config_file="$current_dir/.$script_name.conf"
if [ -f "$config_file" ]; then
break
fi
current_dir="$(dirname "$current_dir")"
done
# If the configuration file doesn't exist, display an error message and exit
if [ ! -f "$config_file" ]; then
echo "Configuration file not found."
exit 1
fi
# Read the configuration file and search for the query
read_configuration "$config_file" "$query" "$filenames_only"
| true
|
ed5234e895abc106a487bdb82faca23833e16690
|
Shell
|
zhb339/book-learning
|
/操作系统/Linux/Linux程序设计/第2章_shell程序设计/case
|
UTF-8
| 123
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
echo "enter sss"
read tt
case "$tt" in
yes) echo "aaa";;
no ) echo "bbb";;
* ) echo "sorry";;
esac
exit 0
| true
|
c57be002d1b448535e836bb604f3007a1d325159
|
Shell
|
ramaxlo/dotfiles
|
/setup.sh
|
UTF-8
| 1,437
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
setup_bin()
{
echo "Setup bin"
if [ ! -d ~/bin ]; then
mkdir ~/bin
fi
cp -a bin/* ~/bin
echo "Clone grabserial repo"
git clone https://github.com/ramaxlo/grabserial.git
cp grabserial/grabserial ~/bin
rm -rf grabserial
echo "Install go related binaries"
tar Jxf go_bin.tar.xz -C ~/bin
}
setup_tmux()
{
echo "Setup tmux"
cp tmux.conf ~/.tmux.conf
}
setup_shell()
{
echo "Setup bashrc"
local BASHSETUP='source ~/dotfiles/bashrc'
if ! grep -q "$BASHSETUP" ~/.bashrc; then
echo $BASHSETUP >> ~/.bashrc
fi
}
setup_vim()
{
echo "Setup VIM"
cp vimrc ~/.vimrc
if [ ! -d ~/.vim ]; then
mkdir -p ~/.vim/plugin
fi
if [ ! -d ~/.vim/plugin ]; then
mkdir -p ~/.vim/plugin
fi
if [ ! -d ~/.vim/bundle ]; then
mkdir -p ~/.vim/bundle
fi
cp cscope_maps.vim ~/.vim/plugin
cd ~/.vim
unzip ~/dotfiles/ColorSamplerPack.zip
cd - > /dev/null
cd ~/.vim/bundle
git clone https://github.com/gmarik/Vundle.vim.git
cd - > /dev/null
vim +PluginInstall +qa
}
check()
{
local PASS='1'
if ! which git > /dev/null 2>&1; then
echo "Please install git"
PASS='0'
fi
if ! which vim > /dev/null 2>&1; then
echo "Please install vim"
PASS='0'
fi
if [ "$PASS" == '0' ]; then
exit 1
fi
}
setup_gitconfig()
{
echo "Setup gitconfig"
cp gitconfig ~/.gitconfig
}
################
# START
################
check
setup_bin
setup_tmux
setup_shell
setup_vim
setup_gitconfig
echo "Done"
| true
|
0c1b33316b64e7f1cf50a5e11af6b3fc88c84640
|
Shell
|
making/terraforming-aws
|
/om/show-next-instructions.sh
|
UTF-8
| 1,474
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
source $(dirname "$0")/common.sh
GUID=$(om \
--target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \
--username "$OPS_MGR_USR" \
--password "$OPS_MGR_PWD" \
--skip-ssl-validation \
curl \
--silent \
--path "/api/v0/staged/products" \
-x GET \
| jq -r '.[] | select(.type == "pivotal-container-service") | .guid'
)
ADMIN_SECRET=$(om \
--target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \
--username "$OPS_MGR_USR" \
--password "$OPS_MGR_PWD" \
--skip-ssl-validation \
curl \
--silent \
--path "/api/v0/deployed/products/${GUID}/credentials/.properties.pks_uaa_management_admin_client" \
-x GET \
| jq -r '.credential.value.secret'
)
PKS_DOMAIN=$(cat $TF_DIR/terraform.tfstate | jq -r '.modules[0].outputs.pks_api_domain.value')
PKS_API_URL=https://${PKS_DOMAIN}:9021
UAA_URL=https://${PKS_DOMAIN}:8443
cat <<EOF
PKS_API_URL=${PKS_API_URL}
UAA_URL=${UAA_URL}
ADMIN_SECRET=${ADMIN_SECRET}
PKS_USER=demo@example.com
PKS_PASSWORD=demodemo1234
CLUSTER_NAME=pks-demo1
The following instruction shows how to create a cluster named "\${CLUSTER_NAME}"
### Grant Cluster Access to a User
uaac target \${UAA_URL} --skip-ssl-validation
uaac token client get admin -s \${ADMIN_SECRET}
uaac user add \${PKS_USER} --emails \${PKS_USER} -p \${PKS_PASSWORD}
uaac member add pks.clusters.admin \${PKS_USER}
### Log in to PKS
pks login -k -a \${PKS_API_URL} -u \${PKS_USER} -p \${PKS_PASSWORD}
EOF
| true
|
2ee5ed02bfa7318c050baf3aea05594d182afd3e
|
Shell
|
rispoli/misc-scripts
|
/prelude.sh
|
UTF-8
| 428
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit # Force exit at the first error
set -o nounset # Treat unset variables as an error
set -o pipefail
export PS4='+ ${FUNCNAME[0]:+${FUNCNAME[0]}():}line ${LINENO}: '
syslogname="$(basename "$0")[$$]"
exec 3<> "/tmp/$0.$$.log"
BASH_XTRACEFD=3
echo "Tracing to syslog as $syslogname"
unset syslogname
debug() { echo "$@" >&3; }
set -x
| true
|
5a502c9bbd1f378abb0138db6fe7b4ab2d22f170
|
Shell
|
ejconlon/lambdabot
|
/lambdabot/script/discover.sh
|
UTF-8
| 275
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
set -eux
PROFILE="$1"
REGION="$2"
API_ID=$(aws apigateway get-rest-apis --profile ${PROFILE} --region ${REGION} | \
jq -r --arg name "lambdabot" '.items[] | select(.name == $name).id')
echo "https://${API_ID}.execute-api.${REGION}.amazonaws.com/api"
| true
|
5ee69dff4db2de2b888932a39a66029b3b62e897
|
Shell
|
jbw/dotfiles
|
/home/git/git-identity
|
UTF-8
| 807
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# get each set of usernames from the git config (which will be generated from our `default.nix` above)
IDENTITIES=$(git config --global --name-only --get-regexp "user.*..name" | sed -e 's/^user.//' -e 's/.name$//')
# filter them with fzf
ID=$(echo "${IDENTITIES}" | fzf -e -1 +m -q "$1")
if ! git config --global --get-regexp "user.${ID}.name" >/dev/null; then
echo "Please use a valid git identity
Options:"
git config --global --name-only --get-regexp "user.*..name" | sed -e 's/^user.//' -e 's/.name$//' -e 's/^/\t/'
exit 1
fi
# set the ID locally in each repo (eg in the repo's .git/config)
git config user.name "$(git config user.${ID}.name)"
git config user.email "$(git config user.${ID}.email)"
echo "Name: $(git config user.name)"
echo "Email: $(git config user.email)"
| true
|
078f6a7aee88fe8fa2246f97ee08009ecc8a5a31
|
Shell
|
wenjingn/zhanhd-game
|
/zhanhd/shell/install/start.sh
|
UTF-8
| 660
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z $1 ]; then
cat <<eof
/usr/local/php/bin/php /data/php/games/system/serve --appdir=/data/php/games/zhanhd \
--host=0.0.0.0 --port=8000 --monitor=9000 --host6=:: --port6=8000 --verbose=0 --worker-num=8 --task-worker-num=8 \
--logfile=/data/php/log/zhanhd.log --daemonize=1
eof
else
node=$1
port=$(($node+8000))
monitor=$(($node+9000))
cat <<eof
/usr/local/php/bin/php /data/php/games/system/serve --appdir=/data/php/games/zhanhd --rundir=/node-$node/swoole \
--host=0.0.0.0 --port=$port --monitor=$monitor --host6=:: --port6=$port --verbose=0 --worker-num=8 --task-worker-num=8 \
--logfile=/data/php/log/zhanhd-$node.log --daemonize=1
eof
fi
| true
|
f42d547d5d0671e997a32001160b787f0118b717
|
Shell
|
huangyoje/trace-vs
|
/bin/start.sh
|
UTF-8
| 3,590
| 4.25
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
BASE_DIR=$(dirname "$0")
LOGS_DIR=$BASE_DIR/../logs
UNIT_TIME=5
CHECK_COUNT=36
CLOSE_WAIT_TIME=`expr $UNIT_TIME \* $CHECK_COUNT`
tracer=$1
$BASE_DIR/start-${tracer}.sh
function func_check_process
{
echo "---check $1 process status.---"
pid=`cat $PID_DIR/$PID_FILE 2>/dev/null`
process_status=0
if [ ! -z $pid ]; then
process_status=`ps aux | grep $pid | grep $IDENTIFIER | grep -v grep | wc -l`
if [ ! $process_status -eq 0 ]; then
echo "already running $COLLECTOR_IDENTIFIER process. pid=$pid."
fi
fi
if [ $process_status -eq 0 ]; then
process_status=`ps aux | grep $IDENTIFIER | grep -v grep | wc -l`
if [ ! $process_status -eq 0 ]; then
echo "already running $COLLECTOR_IDENTIFIER process. $IDENTIFIER."
fi
fi
if [ ! $process_status -eq 0 ]; then
echo "already running $COLLECTOR_IDENTIFIER process. bye."
exit 1
fi
}
function func_init_log
{
echo "---initialize $1 logs---"
if [ ! -d $LOGS_DIR ]; then
echo "mkdir $LOGS_DIR"
mkdir $LOGS_DIR
fi
if [ ! -d $( get_server_log_directory $1 ) ]; then
echo $( get_server_log_directory $1 )
mkdir $( get_server_log_directory $1 )
fi
if [ ! -d $( get_server_pid_directory $1 ) ]; then
echo "mkdir $( get_server_pid_directory $1 )"
mkdir $( get_server_pid_directory $1 )
fi
if [ -f $( get_server_log_file $1 ) ]; then
echo "rm $( get_server_log_file $1 )."
rm $( get_server_log_file $1 )
fi
if [ -f $( get_server_pid_file $1 ) ]; then
echo "rm $( get_server_pid_file $1 )."
rm $( get_server_pid_file $1 )
fi
}
function get_server_log_directory() {
echo "$LOGS_DIR/$1"
}
function get_server_log_file() {
echo "$LOGS_DIR/$1/app.log"
}
function get_server_pid_directory() {
echo "$LOGS_DIR/$1/pid"
}
function get_server_pid_file() {
echo "$LOGS_DIR/$1/pid/$1.pid"
}
function get_server_directory() {
echo "$BASE_DIR/../$1"
}
function func_check_running
{
process_status="true"
if [ -z $process_status ]; then
echo "false"
else
echo "true"
fi
}
function func_do_start
{
pid=`nohup $BASE_DIR/../mvnw -f $( get_server_directory $1 )/pom.xml -P ${tracer} spring-boot:run > \
$( get_server_log_file $1 ) 2>&1 & echo $!`
echo $pid > $( get_server_pid_file $1 )
echo "---$1 initialization started. pid=$pid.---"g
end_count=0
check_running=$( func_check_running )
while [ "$check_running" == "false" ]; do
wait_time=`expr $end_count \* $UNIT_TIME`
echo "starting $1. $wait_time /$CLOSE_WAIT_TIME sec(close wait limit)."
if [ $end_count -ge $CHECK_COUNT ]; then
break
fi
sleep $UNIT_TIME
end_count=`expr $end_count + 1`
check_running=$( func_check_running )
done
if [[ "$check_running" == "true" ]]; then
echo "---$1 initialization completed. pid=$pid.---"
else
echo "---$1 initialization failed. pid=$pid.---"
kill -9 $pid
fi
}
function start_server() {
func_init_log $1
func_do_start $1
echo ""
}
servers=`cat $BASE_DIR/servers 2>/dev/null`
for server in $servers; do
start_server "${server}"
done
| true
|
d29635ed5e4ae0d7345661cf0ce727a6d7236b60
|
Shell
|
fernandomayer/R-config-files
|
/knitr-pdflatex.sh
|
UTF-8
| 605
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
## Run knitr and pdflatex sequentially
BASENAME=$(basename $1 .Rnw)
echo
echo -------------------------------------------------------------------
echo Run knitr in $BASENAME.Rnw
echo -------------------------------------------------------------------
echo
RNWFILE=$BASENAME.Rnw
Rscript -e "knitr::knit(\"$RNWFILE\")"
echo
echo -------------------------------------------------------------------
echo Run pdfLaTeX in $BASENAME.Rnw
echo -------------------------------------------------------------------
echo
LATEXFILE=$BASENAME.tex
echo "$LATEXFILE" | pdflatex
rm *.aux *.log *.out *.tex
| true
|
bad6241a2262b88f31b24195c62d76db5f5bfdf0
|
Shell
|
nulldriver/cf-cli-resource
|
/spec/ssh/allow-disallow-space-ssh_spec.sh
|
UTF-8
| 1,435
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env shellspec
set -euo pipefail
Describe 'ssh'
setup() {
org=$(generate_test_name_with_spaces)
space=$(generate_test_name_with_spaces)
source=$(get_source_config "$org" "$space") || error_and_exit "[ERROR] error loading source json config"
test::login
test::create_org_and_space "$org" "$space"
}
teardown() {
test::delete_org_and_space "$org" "$space"
test::logout
}
BeforeAll 'setup'
AfterAll 'teardown'
It 'can disallow space ssh'
disallow_space_ssh() {
local config=$(
%text:expand
#|$source
#|params:
#| command: disallow-space-ssh
#| space: $space
)
put "$config"
}
When call disallow_space_ssh
The status should be success
The output should json '.version | keys == ["timestamp"]'
The error should include "Disabling ssh support for space"
Assert not test::is_space_ssh_allowed "$org" "$space"
End
It 'can allow space ssh'
allow_space_ssh() {
local config=$(
%text:expand
#|$source
#|params:
#| command: allow-space-ssh
#| space: $space
)
put "$config"
}
When call allow_space_ssh
The status should be success
The output should json '.version | keys == ["timestamp"]'
The error should include "Enabling ssh support for space"
Assert test::is_space_ssh_allowed "$org" "$space"
End
End
| true
|
e2e41583e95f937b69ed7d3b72f3990a29377536
|
Shell
|
PXLbros/pxl-web-vagrant
|
/provision/databases/mysql.sh
|
UTF-8
| 4,225
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
export LOG_FILE_PATH=databases/mysql.log
. /vagrant/provision/helpers/include.sh
title "MySQL"
MYSQL_CONFIG_PATH=/etc/mysql/mysql.conf.d/mysqld.cnf
MYSQL_ROOT_USER=root
MYSQL_ROOT_PASSWORD=root
MYSQL_USER_NAME=vagrant
MYSQL_USER_PASSWORD=vagrant
# MYSQL_VERSION="8"
if ! grep -qF "MYSQL_USER_NAME" /home/vagrant/.bashrc; then
exec_command "echo -e \"\nexport MYSQL_USER_NAME=$MYSQL_USER_NAME\nexport MYSQL_USER_PASSWORD=$MYSQL_USER_PASSWORD\" >> /home/vagrant/.bashrc"
fi
# If MySQL isn't installed
if [ ! -x "$(command -v mysql)" ]; then
# Set root password
highlight_text "Set MySQL root password..."
if [ "$MYSQL_VERSION" == "8" ]; then
MYSQL_DEB_PACKAGE_VERSION="0.8.12-1_all"
exec_command "wget -c https://dev.mysql.com/get/mysql-apt-config_$MYSQL_DEB_PACKAGE_VERSION.deb"
exec_command "dpkg -i mysql-apt-config_$MYSQL_DEB_PACKAGE_VERSION.deb"
exec_command "sed -i 's/mysql-5.7/mysql-8.0/g' /etc/apt/sources.list.d/mysql.list"
exec_command "rm -rf mysql-apt-config_$MYSQL_DEB_PACKAGE_VERSION.deb"
exec_command "apt-get update"
exec_command "apt-get install -y mysql-server"
exec_command "debconf-set-selections <<< \"mysql-server mysql-server/data-dir select ''\""
fi
exec_command "debconf-set-selections <<< \"mysql-server mysql-server/root_password password $MYSQL_ROOT_PASSWORD\""
exec_command "debconf-set-selections <<< \"mysql-server mysql-server/root_password_again password $MYSQL_ROOT_PASSWORD\""
## Install MySQL
highlight_text "Install MySQL..."
if [ "$MYSQL_VERSION" == "5.5" ]; then
exec_command "apt-get install -y mysql-server-5.5"
elif [ "$MYSQL_VERSION" == "5.6" ]; then
exec_command "apt-get install -y mysql-server-5.6"
elif [ "$MYSQL_VERSION" == "5.7" ]; then
exec_command "apt-get install -y mysql-server"
fi
if [ -x "$(command -v mysql)" ];
then
# Create user
highlight_text "Create MySQL Vagrant user..."
exec_command "echo \"CREATE USER IF NOT EXISTS '$MYSQL_USER_NAME'@'localhost' IDENTIFIED BY '$MYSQL_USER_PASSWORD';\" | mysql -u $MYSQL_ROOT_USER --password=\"$MYSQL_ROOT_PASSWORD\""
exec_command "echo \"CREATE USER IF NOT EXISTS '$MYSQL_USER_NAME'@'%' IDENTIFIED BY '$MYSQL_USER_PASSWORD';\" | mysql -u $MYSQL_ROOT_USER --password=\"$MYSQL_ROOT_PASSWORD\""
if [ "$MYSQL_VERSION" == "8" ]; then
exec_command "ALTER USER '$MYSQL_USER_NAME'@'localhost' IDENTIFIED WITH mysql_native_password BY '$MYSQL_USER_PASSWORD';"
exec_command "ALTER USER '$MYSQL_USER_NAME'@'%' IDENTIFIED WITH mysql_native_password BY '$MYSQL_USER_PASSWORD';"
fi
exec_command "echo \"GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER_NAME'@'localhost';\" | mysql -u $MYSQL_ROOT_USER --password=\"$MYSQL_ROOT_PASSWORD\""
exec_command "echo \"GRANT ALL PRIVILEGES ON *.* TO '$MYSQL_USER_NAME'@'%';\" | mysql -u $MYSQL_ROOT_USER --password=\"$MYSQL_ROOT_PASSWORD\""
exec_command "echo \"FLUSH PRIVILEGES;\" | mysql -u $MYSQL_ROOT_USER --password=\"$MYSQL_ROOT_PASSWORD\""
# Enable remote connections
highlight_text "Enable remote connections to MySQL..."
if [ "$MYSQL_VERSION" == "8" ]; then
exec_command "echo -e \"[mysqld]\ndefault_authentication_plugin = mysql_native_password\" | tee -a /etc/mysql/conf.d/mysql.cnf"
else
exec_command sed -i \'s/^bind-address/#bind-address/\' $MYSQL_CONFIG_PATH
fi
exec_command sed -i \'s/^skip-external-locking/#skip-external-locking/\' $MYSQL_CONFIG_PATH
# Restart MySQL
highlight_text "Restart MySQL..."
exec_command "service mysql restart"
fi
else
warning_text "Already installed."
fi
# Install PHP module
if [ -x "$(command -v mysql)" ]; then
PHP_VERSIONS=($PHP_VERSIONS)
line_break
highlight_text "Install PHP MySQL module..."
for PHP_VERSION in "${PHP_VERSIONS[@]}"; do
exec_command "apt-get install -y php$PHP_VERSION-mysql"
done
fi
# phpMyAdmin
if [ ! -z "$PHPMYADMIN" ]; then
/vagrant/provision/databases/mysql/phpmyadmin.sh
fi
| true
|
3b875ac668cad9d50efc69143e3b0b2b6f9abf30
|
Shell
|
shotanue/vv
|
/vv_services/movie_service/scripts/build_proto.sh
|
UTF-8
| 413
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
PROJECT_ROOT=${PROJECT_ROOT}
if [[ ${PROJECT_ROOT} == "" ]]; then
echo '${PROJECT_ROOT} is required.'
exit;
fi
PROTOBUF_DIR=${PROJECT_ROOT}/internal/app/interface/rpc/v1/protocol
PROTOBUF_FILE=${PROTOBUF_DIR}/movie.proto
#MOVIE_SERVICE=${PROJECT_ROOT}/internal/app/interface/rpc/v1
# golang
protoc --proto_path=${PROTOBUF_DIR} ${PROTOBUF_FILE} --go_out=plugins=grpc:${PROTOBUF_DIR}
| true
|
95b87d92156d6f4201db4b81bbad4c421e484484
|
Shell
|
kukrimate/projects
|
/osdev/osldr/scripts/check_copyright
|
UTF-8
| 354
| 2.703125
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/sh
# Linker scripts, C files and headers
for i in `find -name *.h -or -name *.c -or -name *.ld`; do
scripts/copyright_header -t c $i || scripts/copyright_header -u -t c $i
done
# Assembly files and include files
for i in `find -name *.asm -or -name *.inc`; do
scripts/copyright_header -t nasm $i || scripts/copyright_header -u -t nasm $i
done
| true
|
4d36a1f214c6b844a9cbe958d166973fd6724ab4
|
Shell
|
dmage/ocdev
|
/snippets/check/cluster-is-stopped
|
UTF-8
| 399
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu
# OCDEV HELP: Check that the cluster is stopped.
if oc status >/dev/null 2>&1; then
echo "Expected a stopped cluster, but your cluster is running." >&2
if [ -n "${OCDEV_FORCE_CLUSTER_IS_STOPPED-}" ]; then
exit 0
fi
echo >&2
echo "You may set the environment variable OCDEV_FORCE_CLUSTER_IS_STOPPED=1 to ignore this check." >&2
exit 1
fi
exit 0
| true
|
248fdd356a5dc134ef5ba68eb5024e4863bd6cf5
|
Shell
|
nkinkade/ndt-support
|
/init/stop.sh
|
UTF-8
| 490
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
sudo -s <<\EOF
source /etc/mlab/slice-functions
if pgrep -f ndtd &> /dev/null ; then
echo "Stopping ndtd:"
pkill -KILL -f ndtd
rm -f /var/lock/subsys/ndtd
fi
if pgrep -f fakewww &> /dev/null ; then
echo "Stopping fakewww:"
pkill -TERM -f fakewww
rm -f /var/lock/subsys/fakewww
fi
if pgrep -f flashpolicyd.py &> /dev/null ; then
echo "Stopping flashpolicyd.py:"
pkill -TERM -f flashpolicyd.py
rm -f /var/lock/subsys/flashpolicyd.py
fi
EOF
| true
|
e143808061b3ea4b62a041e19afa2e023ed7b12d
|
Shell
|
jkothuru/Devops_labs
|
/labs/jenkins/setup-Jenkins.sh
|
UTF-8
| 1,634
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt install -y unzip wget
#Install JAVA
#sudo add-apt-repository ppa:webupd8team/java
#sudo apt-get update
#apt-get install -y oracle-java8-installer
sudo add-apt-repository ppa:openjdk-r/ppa
sudo apt-get update
sudo apt-get install -y openjdk-8-jdk
#Install Jenkins
wget -q -O - https://pkg.jenkins.io/debian/jenkins-ci.org.key | sudo apt-key add -
echo deb https://pkg.jenkins.io/debian-stable binary/ | sudo tee /etc/apt/sources.list.d/jenkins.list
sudo apt-get update
sudo apt-get install -y jenkins
sudo systemctl start jenkins
#Install Maven ( on Jenkins machine )
## cd /tmp ; wget http://mirrors.estointernet.in/apache/maven/maven-3/3.6.1/binaries/apache-maven-3.6.1-bin.tar.gz
cd /tmp ; wget http://apachemirror.wuchna.com/maven/maven-3/3.6.2/binaries/apache-maven-3.6.2-bin.tar.gz
cd /tmp ; tar -xzf apache-maven-3.6.2-bin.tar.gz -C /opt
#Install SonarQube-runner ( on Jenkins machine )
cd /tmp ; wget http://repo1.maven.org/maven2/org/codehaus/sonar/runner/sonar-runner-dist/2.4/sonar-runner-dist-2.4.zip
cd /tmp ; unzip sonar-runner-dist-2.4.zip
cd /tmp ; mv sonar-runner-2.4 /opt
#Set JAVA_HOME & MAVEN_HOME as environment variables on Jenkins machine
mkdir -p /home/backup
cp -p /etc/profile /home/backup/profile_`date +%d%b%Y-%H%M`
echo "MAVEN_HOME=/opt/apache-maven-3.6.2" >> /etc/profile
#echo "JAVA_HOME=/usr/lib/jvm/java-8-oracle" >> /etc/profile
echo "JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64" >> /etc/profile
echo "PATH=\$JAVA_HOME/bin:\$MAVEN_HOME/bin:\$PATH" >> /etc/profile
source /etc/profile ## to reload the configuration
exit
| true
|
282907f5ba8327372eb39cf67bdcb1e6047f28bf
|
Shell
|
esmi/op2ls
|
/wns/wns_include.sh
|
UTF-8
| 1,863
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# FILE: wns_include.sh
#
defined() {
if (( $# != 1 ))
then
error "defined accepts exactly one argument"
fi
if [ -n "${!1}" ]
then
return 0;
else
return 1;
fi
}
pushd() {
builtin pushd ${@} > /dev/null
}
popd() {
builtin popd ${@} > /dev/null
}
readonly -f defined pushd popd
export -f defined pushd popd
set -e;
# Make sure we are on 1.5 before proceeding
declare -rx _name=$(basename $0);
declare -r _version=0.0.1;
declare -ar argv=(${0} ${@})
declare -ir argc=$(( $# + 1 ))
__show_version() {
cat <<-_EOF
${_name} ${_version}
Copyright (C) 2008, 2009 Evan Chen.
This program comes with NO WARRANTY, to the extent permitted by law.
You may redistribute copies of this program under the terms of
the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any
later version.
For more information about these matters, see the file named COPYING.
Written by Evan Chen for the WNS project
_EOF
}
# displays error message and exits
error() {
case $? in
0) local errorcode=1 ;;
*) local errorcode=$? ;;
esac
echo -e "\e[1;31m*** ERROR:\e[0;0m ${1:-no error message provided}";
exit ${errorcode};
}
# displays warning message only
warning() {
echo -e "\e[1;33m*** Warning:\e[0;0m ${1}";
}
# displays information message
inform() {
echo -e "\e[1;32m*** Info:\e[0;0m ${1}" >&2 ;
}
debug() {
echo -e "\e[1;34m*** debug:\e[0;0m ${1}" >&2 ;
}
# displays command to stdout before execution
verbose() {
echo "${@}"
"${@}"
return $?
}
# for internal use only
__stage() {
echo -e "\e[1;39m>>> ${1} ${PF}\e[0;0m";
}
__step() {
echo -e ">>> ${1}";
}
# protect functions
readonly -f __show_version error warning inform verbose __stage __step
export -f error warning inform verbose
abort() {
echo $0: $@
exec false
}
| true
|
9b60750fab33ec60c7c7889851e6df72453da8d2
|
Shell
|
dkgroot/DPorts
|
/net/exaddos/files/exaddos.in
|
UTF-8
| 1,160
| 3.34375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# $FreeBSD: head/net/exaddos/files/exaddos.in 345266 2014-02-20 21:06:43Z zi $
#
# PROVIDE: exaddos
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following line(s) to /etc/rc.conf to enable exaddos:
#
# exaddos_enable="YES"
. /etc/rc.subr
name=exaddos
rcvar=exaddos_enable
load_rc_config $name
exaddos_enable=${exaddos_enable:-"NO"}
exaddos_conf=${exaddos_conf:-"%%ETCDIR%%/exaddos.conf"}
pidfile=/var/run/${name}/${name}.pid
required_files=${exaddos_conf}
command="%%PYTHON_SITELIBDIR%%/${name}/application.py"
procname="%%PYTHON_CMD%%"
start_cmd="exaddos_start"
reload_all_cmd="exaddos_reload_all"
extra_commands="reload reload_all"
sig_reload="USR1"
exaddos_start()
{
if [ -z "$rc_fast" -a -n "$rc_pid" ]; then
echo 1>&2 "${name} already running? (pid=$rc_pid)."
return 1
fi
echo "Starting ${name}."
install -d -o exaddos -g exaddos -m 755 /var/run/${name}
install -d -o exaddos -g exaddos -m 750 /var/db/${name}
rm -f ${pidfile}
${procname} -m exaddos.debug ${command} -c ${exaddos_conf}
}
exaddos_reload_all()
{
echo "Reloading exaddos configuration and processes."
kill -USR2 $rc_pid
}
run_rc_command "$1"
| true
|
234b307e7dd1f3d7f370a9a4285bedc7f25e93ea
|
Shell
|
fedorova/perf-logging
|
/trace-processing/text-to-synoptic.sh
|
UTF-8
| 245
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z ${SCRIPTS_HOME} ]; then
SCRIPTS_HOME=${HOME}/Work/WiredTiger/perf-logging/trace-processing
fi
for file in "$@";
do
${SCRIPTS_HOME}/timing_to_synoptic.py $file
${SCRIPTS_HOME}/run-synoptic.sh ${file}.synoptic
done
| true
|
b98a89090e4dd68c7ff2db8dd2b0c2d166b63eb7
|
Shell
|
apire001/UNIXSystemAdministration
|
/lab2/part3.sh
|
UTF-8
| 656
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
log=$(find /home/andy/lab2 -name 'uptime.*')
if [ -e "$log" ]; then
awk 'NF>1{print $NF}' /home/andy/lab2/uptime.log > temp.txt
line=$(cat temp.txt | wc -l)
lastword=$(sed -n ${line}p < temp.txt)
rm temp.txt
fi
found="found"
lost="lost"
path=$(find /home/andy/lab2 -name lab2-test)
date=$(date +"%m-%d-%y %T")
if [ -e "$path" ]; then
if [ "$lastword" != "$found" ]; then
echo "${date} ${time}- File \"lab2-test\" has been found" >> /home/andy/lab2/uptime.log
fi
else
if [ "$lastword" != "$lost" ]; then
echo "${date} ${time}- File \"lab2-test\" has been lost" >> /home/andy/lab2/uptime.log
fi
fi
| true
|
f57ef85b8583c1a1170197d9a8988761b212487f
|
Shell
|
Jenny-Nelson-Group/gaussian_scripts_TDDFT
|
/write_ginput.sh
|
UTF-8
| 18,061
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
###################################
### Alise Virbule, 1st May 2016 ###
###################################
### Based on Jarv's launch_coms ###
###################################
####################################################
################ FILE PATHS ######################
#the paths can't be relative to the home directory (i.e. no ~/work/)
#path where to create sp and opt caluclation folders
calc_path="/work/av2613/SmallMolecules"
#path to folder of inital geometry file
geom_path="/work/av2613/initial_geoms"
############### DEFAULT VALUES ####################
calc="opt"
solvent="none"
funct="CAM-B3LYP"
bas="cc-pVDZ"
pop="MK"
ntd=10
mem=63000 ##20 cpu nodes usually have 64GB or 128GB
nproc=20 ##biggest nodes have 20 cpus as far as I know
nnodes=1
wallt="71:58:00" ##maximum allowed time on cx1
####################################################
function USAGE()
{
cat << EOF
######################## USAGE ############################
~/bin/write_ginput.sh -option option_value molecule_name
The input geometry has to be in the folder /geom_path/ under the name molecule_name.xyz
and the file should have four columns - element symbol and x, y, z coordinates in Angstroms - and a row for each atom.
The folders and files for the calculation will be created in /calc_path/
## geom_path and calc_path can be changed at the top of this file
################### Calculation MODES #######################
Single point energy: sp (run from bin)
Geometry optimisation: opt (run from bin) - can add solvent
Vibrational frequencies: freq (run from opt or sp folder)
TDDFT: td (run from opt or sp folder) - can add solvent
Save Natural Transition Orbitals: nto (run from td folder)
Population Analysis: pop (run from td folder)
###################### OPTIONS ###############################
-c calculation mode (default opt) ## default values can be changed at the top of this file
-s solvent (default none)
-f functional (default CAM-B3LYP)
-b basis set (default cc-pVDZ)
-e ESP population analysis method (default MK)
-t no. excited states (default 10)
-m memory (default 63000)
-p no. processors (default 20)
-n no. nodes (default 1)
-w walltime (default 71:58:00)
EOF
}
#read in options
while getopts ":c:s:f:b:e:t:m:p:n:w:h" Option; do
case $Option in
c) calc=$OPTARG;;
s) solvent=$OPTARG;;
f) funct=$OPTARG;;
b) bas=$OPTARG;;
e) pop=$OPTARG;;
t) ntd=$OPTARG;;
m) mem=$OPTARG;;
p) nproc=$OPTARG;;
n) nnodes=$OPTARG;;
w) wallt=$OPTARG;;
h) USAGE
exit 0;;
esac
done
#necessary for reading in options, don't really understand how it works
shift $((OPTIND-1))
#read in name of molecule (for initial geometry for sp or opt calculation)
name="$@"
#mem from options is used in gaussian input file, add 800MB for run script as buffer
memsh=$((mem+800))
#tell Gaussian to calculate 2 more excited states than required
ncalc=$((ntd+2))
#these are just for file naming purposes (as $ntd is included in the name)
if [ "$calc" == "sp" ];then
ntd=""
fi
if [ "$calc" == "opt" ];then
ntd=""
fi
#Name of input file
if [ "$solvent" == "none" ];then
gjfname="${name}_${calc}${ntd}_${funct}_${bas}_$(date +"%Y_%m_%d")"
else
gjfname="${name}_${solvent}_${calc}${ntd}_${funct}_${bas}_$(date +"%Y_%m_%d")"
fi
#shorter filename (without date), used for post-processing steps (nto and pop)
ppname="${name}_td${ntd}_${funct}_${bas}"
###############################################
####### Single point energy (no solvent) ######
###############################################
if [ "$calc" == "sp" ] && [ "$solvent" == "none" ];then
#create folder for this molecule/system
cd $calc_path
mkdir $name
cd $name
#create folder for this sp calculation
mkdir ${calc}_${funct}_${bas}
cd ${calc}_${funct}_${bas}
#write top of Gaussian input file with calculation parameters
cat > $gjfname.gjf << EOF
%chk=$gjfname
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} sp
ground state energy calculation
0 1
EOF
#copy coordinates into the gjf file and add two empty lines (Gaussian is a bit weird about these things sometimes)
cat ~/work/initial_geoms/$name.xyz >> $gjfname.gjf
echo " " >> $gjfname.gjf
echo " " >> $gjfname.gjf
#write top of bash run file with parameters for this calculation
cat > $gjfname.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
#copy in instructions to run gaussian from the template file run_gauss.txt
cat ~/bin/run_gauss.txt >> $gjfname.sh
#################################################
####### Geometry optimisation (no solvent) ######
#################################################
elif [ "$calc" == "opt" ] && [ "$solvent" == "none" ];then
#create folder for this molecule/pair/system
cd $calc_path
mkdir $name
cd $name
#create folder for this opt calculation
mkdir ${calc}_${funct}_${bas}
cd ${calc}_${funct}_${bas}
#write top of Gaussian input file with calculation parameters
cat > $gjfname.gjf << EOF
%chk=$gjfname
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} opt=modredundant
Geometry optimisation, ground state energy calculation
0 1
EOF
#copy in coordinates of initial geometry and add two empty lines
cat ~/work/initial_geoms/$name.xyz >> $gjfname.gjf
echo " " >> $gjfname.gjf
echo " " >> $gjfname.gjf
#write top of bash run file
cat > $gjfname.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
#copy commands to run gaussian from template file
cat ~/bin/run_gauss.txt >> $gjfname.sh
###############################################
##### Geometry optimisation with solvent ######
###############################################
elif [ "$calc" == "opt" ] && [ "$solvent" != "none" ];then
#create folder for this molecule/system
cd $calc_path
mkdir ${name}_${solvent}
cd ${name}_${solvent}
#create folder for this calculatin
mkdir ${calc}_${funct}_${bas}
cd ${calc}_${funct}_${bas}
#write Gaussian input file
cat > $gjfname.gjf << EOF
%chk=01_SCRF_${name}_${solvent}
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} opt freq SCRF=(Solvent=${solvent})
${name} ground state in ${solvent}
0 1
EOF
#copy in coordinates
cat ~/work/initial_geoms/$name.xyz >> $gjfname.gjf
echo " " >> $gjfname.gjf
echo " " >> $gjfname.gjf
#write top of bash run file with calculation parameters
cat > $gjfname.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
#copy commands to run Gaussian from template file
cat ~/bin/run_gauss.txt >> $gjfname.sh
####################################################
###### Frequency (IR) calculation (no solvent) ######
####################################################
elif [ "$calc" == "freq" ] && [ "$solvent" == "none" ];then
#create folder for calculation
mkdir ${calc}_${funct}_${bas}
cd ${calc}_${funct}_${bas}
#copy chk file from optimised structure into TD folder
cp ../*.chk $gjfname.chk
#write Gaussian input file (don't need geometry file, as this will be read from the chk file)
cat > $gjfname.gjf << EOF
%oldchk=$gjfname
%chk=${gjfname}_master
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} geom=checkpoint guess=read freq
Calculate vibrational frequencies (IR intensities)
0 1
EOF
#write top of bash run file
cat > $gjfname.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
#copy in commands to run Gaussian from template
cat ~/bin/run_gauss.txt >> $gjfname.sh
####################################################
###### Excited state calculation (no solvent) ######
####################################################
elif [ "$calc" == "td" ] && [ "$solvent" == "none" ];then
#create folder for calculation
mkdir ${calc}${ntd}_${funct}_${bas}
cd ${calc}${ntd}_${funct}_${bas}
#copy chk file from optimised structure into TD folder
cp ../*.chk $gjfname.chk
#write Gaussian input file (don't need geometry file, as this will be read from the chk file)
cat > $gjfname.gjf << EOF
%oldchk=$gjfname
%chk=${gjfname}_master
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} geom=checkpoint guess=read td(singlets,nstates=$ncalc)
Calculate $ncalc excited states and save to master chk file
0 1
EOF
#write top of bash run file
cat > $gjfname.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
#copy in commands to run Gaussian from template
cat ~/bin/run_gauss.txt >> $gjfname.sh
#############################################################################
####### Excited state calculation in solvent (absorption and emission) ######
#############################################################################
elif [ "$calc" == "td" ] && [ "$solvent" != "none" ];then
#create folder for calculation
mkdir ${calc}${ntd}_${solvent}_${funct}_${bas}
cd ${calc}${ntd}_${solvent}_${funct}_${bas}
#copy chk file from optimised structure into TD folder
#don't need to rename file as it was created in the optimisation calculation as a first step for this calculation
cp ../*.chk .
#write Gaussian input file to calculate a first guess for all excited states
cat > $gjfname.gjf << EOF
%oldchk=01_SCRF_${name}_${solvent}
%chk=02_SCRF_${name}_${solvent}
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} TD=NStates=${ncalc} SCRF=(Solvent=${solvent}) Geom=Check Guess=Read
${name} in ${solvent} linear response vertical excited states
0 1
EOF
#write top of bash run file
cat > $gjfname.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
#copy in commands to run Gaussian from template
cat ~/bin/run_gauss.txt >> $gjfname.sh
#State specific solvation for all excited states
#loop over all $ntd excited states
for (( i=1; i<=$ntd; i++ ))
do
#create and enter folder for state i calculation (state_01 etc.)
mkdir state_$(echo ${i} | awk '{printf "%02d",$1}')
cd state_$(echo ${i} | awk '{printf "%02d",$1}')
#add state number to gjf name
gjfname="${name}_${solvent}_${calc}${ntd}_${funct}_${bas}_$(date +"%Y_%m_%d")_state_$(echo ${i} | awk '{printf "%02d",$1}')"
#write Gaussian input file for the i-th excited state, will calculate absorption and emission (including excited state geom optimisation), and can get Stokes shift
cat > ${gjfname}.gjf << EOF
%oldchk=01_SCRF_${name}_${solvent}
%chk=03_SCRF_${name}_${solvent}
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} SCRF=(Solvent=${solvent},Read) Geom=Check Guess=Read
${name}: prepare for state-specific non-eq solvation by saving the solvent reaction field from the ground state
0 1
NonEq=write
--link1--
%chk=03_SCRF_${name}_${solvent}
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} TD(Nstates=${ncalc},Root=${i}) SCRF=(Solvent=${solvent},externalIteration,Read) Geom=Check Guess=Read
$name: read non-eq solvation from ground state and compute energy of the ${i}th excited state with the state-specific method
0 1
NonEq=read
--link1--
%oldchk=02_SCRF_${name}_${solvent}
%chk=04_SCRF_${name}_${solvent}
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} TD=(Read,NStates=${ncalc},Root=${i}) SCRF=(Solvent=${solvent}) Geom=Check Guess=Read Opt=ReadFC
$name: excited state opt
0 1
--link1--
%oldchk=04_SCRF_${name}_${solvent}
%chk=05_SCRF_${name}_${solvent}
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} TD=(Read,NStates=${ncalc},Root=${i}) Freq SCRF=(Solvent=${solvent}) Geom=Check Guess=Read
$name excited state frequencies to check if found minimum
0 1
--link1--
%oldchk=05_SCRF_${name}_${solvent}
%chk=06_SCRF_${name}_${solvent}
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} TD=(Read,NStates=${ncalc},Root=${i}) SCRF=(Solvent=${solvent},ExternalIteration,Read) Geom=Check Guess=Read
$name in $solvent emission state specific solvation at ${i}th excited state optimised geometry
0 1
NonEq=write
--link1--
%oldchk=06_SCRF_${name}_${solvent}
%chk=07_SCRF_${name}_${solvent}
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} SCRF=(Solvent=${solvent},Read) Geom=Check Guess=Read
$name ground state non-eq at excited state geometry
0 1
NonEq=read
EOF
#write bash run file with the correct gjfname specific to the excited state
cat > ${gjfname}.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
cat >> ${gjfname}.sh << EOF
echo "Execution started:"
date
module load gaussian
EOF
echo "cp $""PBS_O_WORKDIR/${gjfname}.gjf ./" >> ${gjfname}.sh
echo "cp $""PBS_O_WORKDIR/../*.chk ./" >> ${gjfname}.sh
cat >> ${gjfname}.sh <<EOF
pbsexec g09 ${gjfname}.gjf
echo "Gaussian job finished:"
date
rm Gau*
EOF
echo "cp * $""PBS_O_WORKDIR" >> ${gjfname}.sh
cd ..
done
#this done is to end the for loop over all considered excited states
###########################################################
######## Generate and save NTOs (post-processing) #########
###########################################################
elif [ "$calc" == "nto" ];then
#create folder for calculation
mkdir ${calc}${ntd}
cd ${calc}${ntd}
#create folder for all nto chk files
mkdir final_NTO_chks
#copy master chk file over (with all transition densities from TDDFT calculation)
cp ../*master.chk ${ppname}_master.chk
#write input file for all excited states i=1-ntd
for (( i=1; i<=$ntd; i++ ))
do
#create and enter folder for state i calculation (state_01 etc.)
mkdir state_$(echo ${i} | awk '{printf "%02d",$1}')
cd state_$(echo ${i} | awk '{printf "%02d",$1}')
#add state number to gjf name
gjfname="${name}_${calc}${ntd}_${funct}_${bas}_$(date +"%Y_%m_%d")_state_$(echo ${i} | awk '{printf "%02d",$1}')"
#write Gaussian input file for i-th excited state
cat > ${gjfname}.gjf << EOF
%oldchk=${ppname}_master
%chk=${ppname}_density_$(echo ${i} | awk '{printf "%02d",$1}')
%mem=${mem}MB
%nprocshared=$nproc
#p ${funct}/${bas} td(read,nstates=$ncalc,root=$i) density=current geom=check guess=read pop=ESP
read results from TD job from hk file and compute density of excited state $i and perform analysis on it
0 1
--link1--
%oldchk=${ppname}_density_$(echo ${i} | awk '{printf "%02d",$1}')
%chk=${ppname}_NTO_$(echo ${i} | awk '{printf "%02d",$1}')
%mem=${mem}MB
%nprocshared=$nproc
#p chkbasis geom=check guess=only density=(check,transition=$i) pop=(Minimal,SaveNTO) iop(6/22=-14)
save NTO from ground state to excited state $i transition density
0 1
EOF
#writetop of bash run file for i-th excited state
cat > ${gjfname}.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
#write the rest of the bash run file for i-th excited state
cat >> ${gjfname}.sh << EOF
echo "Execution started:"
date
module load gaussian
EOF
echo "cp $""PBS_O_WORKDIR/${gjfname}.gjf ./" >> ${gjfname}.sh
echo "cp $""PBS_O_WORKDIR/../*master.chk ./" >> ${gjfname}.sh
cat >> ${gjfname}.sh <<EOF
pbsexec g09 ${gjfname}.gjf
echo "Gaussian job finished:"
date
EOF
#some more lines to the bash run file
echo "cp *.log $""PBS_O_WORKDIR" >> ${gjfname}.sh
echo "cp *NTO* $""PBS_O_WORKDIR" >> ${gjfname}.sh
echo "mv $""PBS_O_WORKDIR/${ppname}_NTO_$(echo ${i} | awk '{printf "%02d",$1}').chk $""PBS_O_WORKDIR/../final_NTO_chks" >> ${gjfname}.sh
cd ..
done
#this done finishes the for loop over all $ntd excited states
#write run file to generate cubes from all the NTO chks
cd final_NTO_chks
#write top of bash run file
cat > gen_cubes.sh << EOF
#!/bin/sh
#PBS -l walltime=01:00:00
#PBS -l select=1:ncpus=8:mem=11800MB
#PBS -m e
echo "Execution started:"
date
module load gaussian
cp ~/bin/gen_HL_cubes.sh ./
chmod +x gen_HL_cubes.sh
mkdir cubes
EOF
echo "cp $""PBS_O_WORKDIR/*.chk ./" >> gen_cubes.sh
#write a line for each excited state (to generate a cube file from the chk file for each)
for (( i=1; i<=$ntd; i++ ))
do
echo "./gen_HL_cubes.sh *$(echo ${i} | awk '{printf "%02d",$1}')*" >> gen_cubes.sh
done
#add final lines to the bash run file
cat >> gen_cubes.sh <<EOF
echo "Generating cubes done:"
date
EOF
echo "cp -r cubes $""PBS_O_WORKDIR/cubes_${ppname}" >> gen_cubes.sh
###########################################################
########## Population Analysis (post-processing) #########
###########################################################
elif [ "$calc" == "pop" ];then
#create folder for calculation
mkdir ${calc}${ntd}_${pop}
cd ${calc}${ntd}_${pop}
#copy master chk file over
cp ../*master.chk ${ppname}_master.chk
#write input file for i=1-ntd
#loop over all ntd excited states
for (( i=1; i<=$ntd; i++ ))
do
#create and enter folder for state i calculation
mkdir state_$(echo ${i} | awk '{printf "%02d",$1}')
cd state_$(echo ${i} | awk '{printf "%02d",$1}')
#add state number to gjf name
gjfname="${name}_${calc}${ntd}_${pop}_${funct}_${bas}_$(date +"%Y_%m_%d")_state_$(echo ${i} | awk '{printf "%02d",$1}')"
#write Gaussian input file for i-th excited state
cat > ${gjfname}.gjf << EOF
%oldchk=${ppname}_master
%chk=${ppname}_density_$(echo ${i} | awk '{printf "%02d",$1}')
%mem=${mem}MB
%nprocshared=$nproc
#p Geom=AllCheck ChkBas Guess=(Read,Only) Density=(Check,CIS=$(echo ${i} | awk '{printf "%02d",$1}')) Pop=${pop}
read results from TD job from hk file for density of excited state $i and perform population analysis on it
0 1
EOF
#write bash run file for i-th excited state
cat > ${gjfname}.sh << EOF
#!/bin/sh
#PBS -l walltime=$wallt
#PBS -l select=$nnodes:ncpus=$nproc:mem=${memsh}MB
#PBS -m e
EOF
cat >> ${gjfname}.sh << EOF
echo "Execution started:"
date
module load gaussian
EOF
echo "cp $""PBS_O_WORKDIR/${gjfname}.gjf ./" >> ${gjfname}.sh
echo "cp $""PBS_O_WORKDIR/../*master.chk ./" >> ${gjfname}.sh
cat >> ${gjfname}.sh <<EOF
pbsexec g09 ${gjfname}.gjf
echo "Gaussian job finished:"
date
EOF
echo "cp * $""PBS_O_WORKDIR" >> ${gjfname}.sh
cd ..
done
#this done finishes for loop over all ntd excited states
fi
#this fi finishes the if statement for all the calculation modes (and solvent presence)
| true
|
b46c8f538bd322157178b559b21b4b9dad0c3ed1
|
Shell
|
christofferivano/soal-shift-sisop-modul-1-D05-2021
|
/soal3/soal3c.sh
|
UTF-8
| 2,326
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
gambar_kucing() {
mkdir "/home/amanullahd/Kucing_$(date +"%d-%m-%Y")"
cd "/home/amanullahd/Kucing_$(date +"%d-%m-%Y")"
for ((num=1; num<=23; num=num+1))
do
if [ $num -le 9 ]
then
wget -a Foto.log https://loremflickr.com/320/240/kitten -O "Koleksi_0$num"
else
wget -a Foto.log https://loremflickr.com/320/240/kitten -O "Koleksi_$num"
fi
done
md5sum * | \
sort | \
awk 'BEGIN{lasthash = ""} $1 == lasthash {print $2} {lasthash = $1}' | \
xargs rm
i=1
for fi in Koleksi_*; do
if [ $i -le 9 ]
then
mv "$fi" Koleksi_0$i
i=$((i+1))
else
mv "$fi" Koleksi_$i
i=$((i+1))
fi
done
}
gambar_kelinci() {
mkdir "/home/amanullahd/Kelinci_$(date +"%d-%m-%Y")"
cd "/home/amanullahd/Kelinci_$(date +"%d-%m-%Y")"
for ((num=1; num<=23; num=num+1))
do
if [ $num -le 9 ]
then
wget -a Foto.log https://loremflickr.com/320/240/bunny -O "Koleksi_0$num"
else
wget -a Foto.log https://loremflickr.com/320/240/bunny -O "Koleksi_$num"
fi
done
md5sum * | \
sort | \
awk 'BEGIN{lasthash = ""} $1 == lasthash {print $2} {lasthash = $1}' | \
xargs rm
i=1
for fi in Koleksi_*; do
if [ $i -le 9 ]
then
mv "$fi" Koleksi_0$i
i=$((i+1))
else
mv "$fi" Koleksi_$i
i=$((i+1))
fi
done
}
currentdate=$(date +%d)
currentmonth=$(date +%m)
currentyear=$(date +%Y)
if [ $(expr $currentyear % 4) == "0" ];
then
if [ $currentmonth -eq 1 ] || [ $currentmonth -eq 3 ] || [ $currentmonth -eq 6 ] || [ $currentmonth -eq 7 ] || [ $currentmonth -eq 9 ] || [ $currentmonth -eq 10 ]
then
if [ $(expr $currentdate % 2) != "0" ];
then
gambar_kucing
else
gambar_kelinci
fi
else
if [ $(expr $currentdate % 2) != "0" ];
then
gambar_kelinci
else
gambar_kucing
fi
fi
else
if [ $currentmonth -eq 1 ] || [ $currentmonth -eq 4 ] || [ $currentmonth -eq 5 ] || [ $currentmonth -eq 8 ] || [ $currentmonth -eq 11 ] || [ $currentmonth -eq 12 ]
then
if [ $(expr $currentdate % 2) != "0" ];
then
gambar_kucing
else
gambar_kelinci
fi
else
if [ $(expr $currentdate % 2) != "0" ];
then
gambar_kelinci
else
gambar_kucing
fi
fi
fi
| true
|
5e39d5700f8a42d24b9e68e35fe982a64a729b50
|
Shell
|
kalevalp/watchtower-experiments
|
/collision-count/scripts/run.sh
|
UTF-8
| 1,556
| 3.1875
| 3
|
[] |
no_license
|
#! /bin/bash
export resdir=results$(date +%Y%m%d%H%M%S)
mkdir "${resdir}"
echo '#####################################################'
echo '#######' Running collision count microbenchmark '#######'
echo '####### ' output dir is ${resdir} '#######'
echo '#####################################################'
pushd ../single-event || exit
API_URL=$(serverless info --verbose | grep '^ServiceEndpoint:' | grep -o 'https://.*'); export API_URL=$API_URL/microbmark
for rate in {1,2,3,4,5,6,10,12,15,20,30,60}
do
export iterations=$(( 60 / "$rate" ))
for repeat in $(seq 1 10)
do
echo '########'
echo '######' Running repeat "${repeat}" with rate "${rate}"
echo '######' Starting "${iterations}" iterations of "${rate}" concurrent invocations
echo '########'
for i in $(seq 1 "${iterations}")
do
for j in $(seq 1 "${rate}")
do
curl -X POST -d "100" "${API_URL}" &
done
for job in $(jobs -p)
do
wait "$job" || echo Failed job "$job"
done
echo
done
sleep 10
curl -X POST -d "200" "${API_URL}" &
echo
sleep 30
echo '######' Reunning report collection script
node ../../scripts/get-collision-report.js ../scripts/${resdir}/collision-report-${rate}-repeat-${repeat} ${rate}
echo '######' Clearing deployment in prep for the next iteration
node ../../scripts/clear-deployment.js
sleep 30
done
done
#sls remove
popd || exit
echo '####################'
echo '#######' Done '#######'
echo '####################'
| true
|
7e5424101606a62f46ca73fc91c886617f38c99d
|
Shell
|
xmanatee/algo
|
/link-cut-max-flow/debugging/pathnode_test_script.sh
|
UTF-8
| 353
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
name="pathnodetest"
files="$name.cpp"
files="$files"
for i in linkcut/*.cpp
do
files="$files $i"
done
g++ -std=c++11 -O3 tests/pathnodetestgen.cpp -o q_g
g++ -std=c++11 -O3 $files -o q_pn
echo "COMPILED"
echo "gen : "
time ./q_g $1 $2 $3 > f_pn_test
echo ""
echo "pn : "
time ./q_pn< f_pn_test > f_pn_out
rm f_*
#rm q_*
rm *~
| true
|
c45028abf96713c6c1d0db804112be9d6080120a
|
Shell
|
But-I-Play-One-On-TV/sra-tools
|
/test/tarballs/test-tarballs.sh
|
UTF-8
| 4,284
| 3.5625
| 4
|
[
"LicenseRef-scancode-us-govt-public-domain",
"LicenseRef-scancode-ncbi",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# ===========================================================================
#
# PUBLIC DOMAIN NOTICE
# National Center for Biotechnology Information
#
# This software/database is a "United States Government Work" under the
# terms of the United States Copyright Act. It was written as part of
# the author's official duties as a United States Government employee and
# thus cannot be copyrighted. This software/database is freely available
# to the public for use. The National Library of Medicine and the U.S.
# Government have not placed any restriction on its use or reproduction.
#
# Although all reasonable efforts have been taken to ensure the accuracy
# and reliability of the software and data, the NLM and the U.S.
# Government do not and cannot warrant the performance or results that
# may be obtained by using this software or data. The NLM and the U.S.
# Government disclaim all warranties, express or implied, including
# warranties of performance, merchantability or fitness for any particular
# purpose.
#
# Please cite the author in any work or product based on this material.
#
# ===========================================================================
echo $0 $*
#
# Download and test SRA Toolkit tarballs (see VDB-1345)
# Errors are reported to the specified email
#
# Parameters:
# $1 - working dir (will contain a copy of the latest md5sum.txt file)
#
# return codes:
# 0 - tests passed
# 1 - wget sratoolkit failed
# 2 - gunzip sratoolkit failed
# 3 - tar sratoolkit failed
# 4 - wget GenomeAnalysisTK.jar failed
# 5 - wget ngs-sdk failed
# 6 - gunzip ngs-sdk failed
# 7 - tar ngs-sdk failed
# 8 - one of smoke tests failed
# 9 - example failed
WORKDIR=$1
if [ "${WORKDIR}" == "" ]
then
WORKDIR="./temp"
fi
echo "Testing sra-tools tarballs, working directory = $WORKDIR"
case $(uname) in
Linux)
python -mplatform | grep -q Ubuntu && OS=ubuntu64 || OS=centos_linux64
TOOLS="${TOOLS} pacbio-load remote-fuser"
realpath() {
readlink -f $1
}
uname=linux
;;
Darwin)
OS=mac64
realpath() {
[[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
}
uname=mac
;;
esac
HOMEDIR=$(dirname $(realpath $0))
################################## sratoolkit ##################################
SDK_URL=https://ftp-trace.ncbi.nlm.nih.gov/sra/sdk/current/
TK_TARGET=sratoolkit.current-${OS}
rm -rv ${WORKDIR}
mkdir -p ${WORKDIR}
OLDDIR=$(pwd)
cd ${WORKDIR}
df -h .
wget -q --no-check-certificate ${SDK_URL}${TK_TARGET}.tar.gz || exit 1
gunzip -f ${TK_TARGET}.tar.gz || exit 2
TK_PACKAGE=$(tar tf ${TK_TARGET}.tar | head -n 1)
rm -rf ${TK_PACKAGE}
tar xf ${TK_TARGET}.tar || exit 3
# extract version number from the package's name
[[ ${TK_PACKAGE} =~ \.[0-9]+\.[0-9]+\.[0-9]+ ]] && VERSION=${BASH_REMATCH[0]:1} # clip leading '.'
echo Current version: ${VERSION}
############################### GenomeAnalysisTK ###############################
GATK_TARGET=GenomeAnalysisTK.jar
wget -q --no-check-certificate ${SDK_URL}${GATK_TARGET} || exit 4
################################### ngs-sdk ####################################
NGS_URL=https://ftp-trace.ncbi.nlm.nih.gov/sra/ngs/current/
NGS_TARGET=ngs-sdk.current-${uname}
echo wget -q --no-check-certificate ${NGS_URL}${NGS_TARGET}.tar.gz
wget -q --no-check-certificate ${NGS_URL}${NGS_TARGET}.tar.gz || exit 5
gunzip -f ${NGS_TARGET}.tar.gz || exit 6
NGS_PACKAGE=$(tar tf ${NGS_TARGET}.tar | head -n 1)
rm -rf ${NGS_PACKAGE}
tar xf ${NGS_TARGET}.tar || exit 7
################################## smoke-test ##################################
echo $HOMEDIR/smoke-test.sh ./${TK_PACKAGE} ${VERSION}
$HOMEDIR/smoke-test.sh ./${TK_PACKAGE} ${VERSION}
RC=$?
if [ "${RC}" != "0" ]
then
echo "Smoke test returned ${RC}"
exit 8
fi
# run an example
EXAMPLE="./${TK_PACKAGE}/bin/vdb-dump SRR000001 -R 1 "
$EXAMPLE | grep -q EM7LVYS02FOYNU
if [ "$?" != "0" ]
then
echo "The example failed: $EXAMPLE"
exit 9
fi
echo rm ${TK_PACKAGE} ${TK_TARGET}.tar ${GATK_TARGET} \
${NGS_PACKAGE} ${NGS_TARGET}.tar
rm -rf ${TK_PACKAGE} ${TK_TARGET}.tar ${GATK_TARGET} \
${NGS_PACKAGE} ${NGS_TARGET}.tar *vcf*
cd ${OLDDIR} && ( rmdir ${WORKDIR} || ls ${WORKDIR} )
| true
|
9f9ab385f380974872c12453f2591058b31535d3
|
Shell
|
petronny/aur3-mirror
|
/sub3dtool/PKGBUILD
|
UTF-8
| 484
| 2.796875
| 3
|
[] |
no_license
|
# Maintainer: Eero Molkoselka <molkoback@gmail.com>
pkgname=sub3dtool
pkgver=0.4.2
pkgrel=1
pkgdesc="Convert subtitle files to 3D"
url="https://code.google.com/p/sub3dtool/"
arch=('any')
license=('GPL')
makedepends=('gcc')
source=("https://sub3dtool.googlecode.com/files/${pkgname}-${pkgver}.tar.gz")
md5sums=('049080350b484bdc93388a050f9d7512')
build(){
cd "${srcdir}/${pkgname}-${pkgver}"
make all
}
package(){
install -Dm 755 ${srcdir}/${pkgname}-${pkgver}/sub3dtool $pkgdir/usr/bin/sub3dtool
}
| true
|
2f802b5a9cacd37e886c8d7c7bc9691037b32574
|
Shell
|
kodx/dotfiles
|
/bin/xdg-video.sh
|
UTF-8
| 1,170
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
# $1 - action
# $2 - type of file
action=$1
filetype=$2
player=mpv
[ -n "${MC_XDG_OPEN}" ] || MC_XDG_OPEN="xdg-open"
do_view_action() {
filetype=$1
case "${filetype}" in
*)
#mplayer -identify -vo null -ao null -frames 0 "${MC_EXT_FILENAME}" 2>&1 | \
#sed -n 's/^ID_//p'
ffmpeg -i "${MC_EXT_FILENAME}" 2>&1 | sed -n '/^Input/ { p; :a; n; p; ba; }' | grep -v "At least one output file must be specified"
;;
esac
}
do_open_action() {
filetype=$1
case "${filetype}" in
ram)
(realplay "${MC_EXT_FILENAME}" &>/dev/null &)
;;
*)
if [ -n "$DISPLAY" ]; then
($player "${MC_EXT_FILENAME}" &>/dev/null &)
else
$player -vo null "${MC_EXT_FILENAME}"
fi
#(gtv "${MC_EXT_FILENAME}" >/dev/null 2>&1 &)
#(xanim "${MC_EXT_FILENAME}" >/dev/null 2>&1 &)
;;
esac
}
case "${action}" in
view)
do_view_action "${filetype}"
;;
open)
"${MC_XDG_OPEN}" "${MC_EXT_FILENAME}" &>/dev/null || \
#do_open_action "${filetype}" >/dev/null 2>&1 &
do_open_action "${filetype}" &
;;
*)
;;
esac
| true
|
6c4ffdd81d65d2fac956ee6ed4b0ca3893299765
|
Shell
|
FelixOfficial/Pixie
|
/04_Software/05_Deliverables/StartPixie.sh
|
UTF-8
| 365
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
PARENT_DIR=$(dirname "$0")
platform='unknown'
unamestr=`uname`
if [[ "$unamestr" == 'Linux' ]]; then
platform='linux'
java -Dlogback.configurationFile=./cfg/logbackRelease.xml -jar bin/Pixie.jar
elif [[ "$unamestr" == 'Darwin' ]]; then
platform='Mac'
java -Dlogback.configurationFile=./cfg/logbackRelease.xml -jar bin/Pixie.jar
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.