blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
698e669465c803374b8c7ad60fb310a7242605f8
|
Shell
|
halostatue/dotfiles
|
/home/private_dot_config/direnv/lib/use_kiex.sh
|
UTF-8
| 604
| 3.421875
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
# vim: ft=bash
use_kiex() {
has kiex || return
local version
version="$1"
[[ "${version}" == --auto ]] && version="$(read_version_file .elixir-version)"
[[ -z "${version}" ]] && return
local version_path
version_path=$(
kiex list |
ruby -e "puts ARGF.read.scan(/elixir-${version}.*/).last"
)
if [[ -z "${version_path}" ]]; then
echo "Error: Missing elixir version: ${version}. Install using kiex." 1>&2
return 1
fi
local activate
activate="${KIEX_HOME:-${HOME}/.kiex}/elixirs/${version_path}.env"
[[ -f "${activate}" ]] && unsafe source "${activate}"
}
| true
|
3cb558d4986d401c8bbefc8f6a103acc8bde944f
|
Shell
|
jcha9928/idp
|
/mr_dtimetric_connectome
|
UTF-8
| 3,227
| 2.578125
| 3
|
[] |
no_license
|
#! /bin/bash
## usage: trac_2_tckgen -t 6 -b 0
while [ $1 != -- ] ; do
case $1 in
-t)
nthreads=$2;
shift;;
esac
shift
done
logdir=/ifs/scratch/pimri/posnerlab/1anal/IDP/code/idp/job
CMD1_batch=$logdir/batch_metric
rm -rf $CMD1_batch
subjectlist=/ifs/scratch/pimri/posnerlab/1anal/IDP/fs/subject_dti_complete
for s in `cat $subjectlist`
do
CMD1=$logdir/cmd.metric.${s}
rm -rf $CMD1
echo "#!/bin/bash
source ~/.bashrc
FREESURFER_HOME=$work/freesurfer_dev/freesurfer
source /ifs/scratch/pimri/posnerlab/freesurfer_dev/freesurfer/FreeSurferEnv.sh
ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=$nthreads
SUBJECT=${s}_1mm_flair
cd /ifs/scratch/pimri/posnerlab/1anal/IDP/fs/\${SUBJECT}/dmri2
dwi2tensor mr_dwi_denoised_preproc_biasCorr.mif.gz - | tensor2metric -mask mr_dilate_mask.mif.gz - -fa dti_fa.mif.gz -force
dwi2tensor mr_dwi_denoised_preproc_biasCorr.mif.gz - | tensor2metric -mask mr_dilate_mask.mif.gz - -adc dti_adc.mif.gz -force
dwi2tensor mr_dwi_denoised_preproc_biasCorr.mif.gz - | tensor2metric -mask mr_dilate_mask.mif.gz - -ad dti_ad.mif.gz -force
dwi2tensor mr_dwi_denoised_preproc_biasCorr.mif.gz - | tensor2metric -mask mr_dilate_mask.mif.gz - -rd dti_rd.mif.gz -force
tcksample mr_track_10M_SIFT2.tck dti_fa.mif.gz mr_track_10M_SIFT2_mean_fa.csv -stat_tck mean -force
tcksample mr_track_10M_SIFT2.tck dti_adc.mif.gz mr_track_10M_SIFT2_mean_adc.csv -stat_tck mean -force
tcksample mr_track_10M_SIFT2.tck dti_ad.mif.gz mr_track_10M_SIFT2_mean_ad.csv -stat_tck mean -force
tcksample mr_track_10M_SIFT2.tck dti_rd.mif.gz mr_track_10M_SIFT2_mean_rd.csv -stat_tck mean -force
tck2connectome mr_track_10M_SIFT2.tck nodes_aparc+aseg.mif* mr_sift_10M_connectome_aparc+aseg_fa.csv -zero_diagonal -scale_file mr_track_10M_SIFT2_mean_fa.csv -stat_edge mean
tck2connectome mr_track_10M_SIFT2.tck nodes_aparc+aseg.mif* mr_sift_10M_connectome_aparc+aseg_adc.csv -zero_diagonal -scale_file mr_track_10M_SIFT2_mean_adc.csv -stat_edge mean
tck2connectome mr_track_10M_SIFT2.tck nodes_aparc+aseg.mif* mr_sift_10M_connectome_aparc+aseg_ad.csv -zero_diagonal -scale_file mr_track_10M_SIFT2_mean_ad.csv -stat_edge mean
tck2connectome mr_track_10M_SIFT2.tck nodes_aparc+aseg.mif* mr_sift_10M_connectome_aparc+aseg_rd.csv -zero_diagonal -scale_file mr_track_10M_SIFT2_mean_rd.csv -stat_edge mean
tck2connectome mr_track_10M_SIFT2.tck nodes_aparc.a2009s+aseg.mif* mr_sift_10M_connectome_aparc.a2009s+aseg_fa.csv -zero_diagonal -scale_file mr_track_10M_SIFT2_mean_fa.csv -stat_edge mean
tck2connectome mr_track_10M_SIFT2.tck nodes_aparc.a2009s+aseg.mif* mr_sift_10M_connectome_aparc.a2009s+aseg_adc.csv -zero_diagonal -scale_file mr_track_10M_SIFT2_mean_adc.csv -stat_edge mean
tck2connectome mr_track_10M_SIFT2.tck nodes_aparc.a2009s+aseg.mif* mr_sift_10M_connectome_aparc.a2009s+aseg_ad.csv -zero_diagonal -scale_file mr_track_10M_SIFT2_mean_ad.csv -stat_edge mean
tck2connectome mr_track_10M_SIFT2.tck nodes_aparc.a2009s+aseg.mif* mr_sift_10M_connectome_aparc.a2009s+aseg_rd.csv -zero_diagonal -scale_file mr_track_10M_SIFT2_mean_rd.csv -stat_edge mean
" >$CMD1
#batch submission
echo $CMD1 >> $CMD1_batch
done
echo $code/fsl_sub_hpc_3 -s smp,$nthreads -l $logdir -t ${CMD1_batch}
echo ${CMD1_batch}
| true
|
b3988f11e1fc47ac00a8e557ced8a5bd55e58e61
|
Shell
|
swchoi06/shell_script
|
/setpasswd.sh
|
UTF-8
| 245
| 3
| 3
|
[] |
no_license
|
userlist=$1
echo "$userlist"
while read user
do
echo $user >> .qdw.txt
echo $user >> .qdw.txt
echo $user >> .qdw.txt
done < $userlist
while read user
do
echo "$user"
passwd $user
read -p "$*"
read -p "$*"
done < .qdw.txt
rm .qdw.txt
| true
|
9604e956ee26a09a808bba4c390dfa5ad9a4a217
|
Shell
|
idavehuwei/wit
|
/scripts/plugins.install.all.sh
|
UTF-8
| 438
| 3.03125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
###########################################
#
###########################################
# constants
baseDir=$(cd `dirname "$0"`;pwd)
# functions
# main
[ -z "${BASH_SOURCE[0]}" -o "${BASH_SOURCE[0]}" = "$0" ] || return
cd $baseDir/..
if [ -d ./private/plugins ]; then
./private/plugins/scripts/install-all.sh
fi
if [ -d ./public/plugins ]; then
./public/plugins/scripts/install-all.sh
fi
| true
|
4599524ca9a1307c858aeb666a9c09f1075bae0e
|
Shell
|
gsterjov/openshift-advanced-python-cartridge
|
/lib/util
|
UTF-8
| 883
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Utility functions for use in the cartridge scripts.
function parse_args {
while :
do
case $1 in
-h | --help | -\?)
echo "usage: $0 [--version[=]<value>]"
exit 0
;;
-v | --version)
version=$2 # You might want to check if you really got VERSION
shift 2
;;
--version=*)
version=${1#*=} # Delete everything up till "="
shift
;;
--) # End of all options
shift
break
;;
-*)
echo "WARN: Unknown option... Exiting: $1" >&2
exit 1
;;
*) # no more options. Stop while loop
break
;;
esac
done
}
| true
|
b051e0237f5bf3e8c5bafa28c35c3d4914290dbd
|
Shell
|
jiesu12/archived-docker-git-backup-gdrive
|
/job.sh
|
UTF-8
| 2,387
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
GDRIVE_SERVICE="http://${GDRIVE_SERVER}:9000"
REPO_BASE="/repos"
if [ ! -f "${PASSWORD_FILE}" ]
then
echo password file ${PASSWORD_FILE} not found
exit 1
fi
password=$(cat ${PASSWORD_FILE})
echo
date
echo [gdrive backup] STARTING
file_list=$(curl ${GDRIVE_SERVICE})
function fileExist {
echo "${file_list}" | grep ${1} | wc -l
}
function getLocalUpdateTime {
cd $1
if [[ "$1" == *.git ]];then
git for-each-ref --sort=-committerdate refs/heads/ --format="%(committerdate:format:%s)" | head -1
else
find . -type f -not -name '.gitbin' -printf '%T@\n' | sort -n | tail -1 | cut -d. -f1
fi
}
function compressFile {
echo "[gdrive backup] compress and encrypt as 7z file"
cd ${REPO_BASE}
# these 7z options are important. have tried other options that failed, because 7z use a lot of memory, it could be killed by the system.
7z a -mx -mmt2 -md48M -p${password} ${1} ${2} > /dev/null
}
function uploadFile {
echo "[gdrive backup] Upload the repo..."
cd ${REPO_BASE}
curl -s -F "file=@${1}" ${GDRIVE_SERVICE} > /dev/null
echo "[gdrive backup] Uploaded file ${1}"
}
function cleanOldFiles {
echo "[gdrive backup] clean old files in gdrive"
# tail -n +<number> skip number of lines
echo "${file_list}" | grep ${1} | sort -r | tail -n +7 | while read l
do
local fileId=$(echo $l | cut -d' ' -f2)
curl -s -X 'DELETE' ${GDRIVE_SERVICE}/${fileId}
echo "[gdrive backup] Deleted old file ${l}"
done
}
cd ${REPO_BASE}
# clean any left over from last run
rm -f *.7z.*
for repoRelDir in `find . -type d -name "*.git" -o -name "*.bin"`;do
echo
echo "[gdrive backup] Start working on ${repoRelDir}"
filenameNoTime=$(echo ${repoRelDir} | sed 's#/#.#g')
filenameNoTime="${filenameNoTime//../}"
filenameNoTime="${filenameNoTime/.git/}"
filenameNoTime="${filenameNoTime/.bin/}"
repoAbsDir=${REPO_BASE}/${repoRelDir}
localUpdateTime=$(getLocalUpdateTime ${repoAbsDir})
filenameWithTime="${filenameNoTime}.7z.${localUpdateTime}"
echo [gdrive backup] directory - ${repoAbsDir}
if [[ "$(fileExist ${filenameWithTime})" == 0 ]];then
compressFile ${filenameWithTime} ${repoRelDir}
uploadFile ${filenameWithTime}
rm ${REPO_BASE}/${filenameWithTime}
cleanOldFiles ${filenameNoTime}
else
echo "[gdrive backup] No new changes."
fi
done
date
echo "[gdrive backup] FINISHED"
echo
| true
|
93e32918683d79b8cc682c24d0be29b1bedd4d25
|
Shell
|
hodacthuan/linux-tools
|
/cli-tools/awscli.sh
|
UTF-8
| 190
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
AWS=$(which aws)
if [[ $AWS == '' ]]; then
echo 'Install AWS-CLI ...'
sudo apt-get install awscli -y
else
echo "AWS-CLI was installed $AWS"
$AWS --version
fi;
| true
|
c3c7e5a99119e0d0d4ad01996d18612178bdd0ed
|
Shell
|
xd23fe39/linux-pastebin
|
/mysql/mysql-skip-replication-error.sh
|
UTF-8
| 530
| 3.265625
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
CHECKHOST='MYSQL-SLAVE'
USER=$1
PASS=$2
SKIP=$3
HOST=$(hostname -s)
IP=$(hostname -i)
CMD="mysql --user=$USER --password=$PASS -e"
if [ "$CHECKHOST" != "$HOST" ]; then
echo; echo "Kein MySQL SLAVE!"; echo
exit 1
fi
echo; echo "MySQL: Skip Replication Error on $HOST ($IP)"; echo
if [ "$SKIP" != "yes" ]; then
$CMD 'SHOW STATUS\G'
echo; echo " Usage: CMD user pass yes to skip error!"; echo
exit 2
fi
$CMD 'STOP SLAVE'
$CMD 'SET GLOBAL SQL_SLAVE_SKIP_COUNTER = 1'
$CMD 'START SLAVE'
$CMD 'SHOW STATUS\G'
| true
|
9718bd9e901d4864a3e14e7f55d81dfc3b977c24
|
Shell
|
leftsky/docker-proxy
|
/get_ssl.sh
|
UTF-8
| 1,388
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/expect
# 拉取域名参数
set domains [lindex $argv 0]
# 拉取邮箱参数
set email [lindex $argv 1]
# 设置超时
set timeout 30
# 开始申请证书
spawn certbot --server https://acme-v02.api.letsencrypt.org/directory \
--logs-dir=/var/www/certbot/logs \
--config-dir=/var/www/certbot/config \
--work-dir=/var/www/certbot/work \
-d "$domains" --manual --preferred-challenges http-01 certonly
# 输入邮箱,同意获得IP
expect "Enter email address" { send "$email\r"; exp_continue; } \
"agree in order to register with the ACME server" { send "A\r"; exp_continue; } \
"share your email" { send "N\r"; exp_continue; } \
"Are you OK with your IP being logged" { send "Y\r"; } \
"Renew & replace the cert" { send "2\r"; }
# 获得验证文件内容
expect -re "(\\S{70,90})"
set file_content $expect_out(0,string)
# expect -re "(http\\S{40,90})"
# set file_url $expect_out(0,string)
expect -re "http\\S+/(\\S{10,90})"
# send_user $expect_out(0,string)
# send_user $expect_out(1,string)
set file_name $expect_out(1,string)
# send_user "\r\n校验文件内容:$file_content"
# send_user "\r\n校验文件路径:$file_url"
# send_user "\r\n校验文件名:$file_name"
exec mkdir -p /usr/share/nginx/html/.well-known/acme-challenge/
exec echo "$file_content" > /usr/share/nginx/html/.well-known/acme-challenge/$file_name
send "\r"
expect "has been saved"
interact
| true
|
54ffcfd33d32d862e8f521d9229ad2b0a8a27d8a
|
Shell
|
alexiaford/2018-09-14_inclass_UG_script
|
/count_bases_in_primers.sh
|
UTF-8
| 766
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# A script to count the number of bases in a primer
# it expects any numbers of fasta files as input
# it will be wrong if the last line of sequence info
# does not have a newline character
# expecting a fasta file formatted like this:
# > sequence_id
# AGCTCGTC
# this will get the second line (the bases) in the files
# that's the head piped into tail
# then count the characters (the wc -m)
# but ehn also subtract one because wc -m counts newlines!
# sigh.
# Another solution would be to use tr like so
# head -2 primer_B.fasta | tail -1 | tr -d '\n' | wc -m
for file in "$@"
do
FILENAME=$(basename "$file" .fasta)
COUNT=$(head -2 "$file" | tail -1 | tr -d '\n' | wc -m |tr -d " ")
echo In "$FILENAME", there are "$COUNT" nucleotides.
done
| true
|
2dc8088f7de40e656b8227e59573d41116a41fca
|
Shell
|
ufsowa/widom
|
/MI/generate
|
UTF-8
| 620
| 3.28125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#VARIABLES
NAME="sample"
SOURCE="$PWD/../INPUT/"
TO=$PWD
TEMPLATE=${PWD}"/template"
#echo "
cd ${SOURCE}
for i in stech_*; do
dirr="${TO}/$i"
mkdir $dirr
cd $i
licz=0; iter=0;
for j in *.xyz; do
div=$((iter % 2))
if [ $div -eq 0 ] && [ $licz -lt 50 ]; then
mkdir $dirr/${NAME}$licz
cp $j $dirr/${NAME}$licz/start.in
((licz++))
fi
((iter++))
done
echo $i $iter $licz
cd ${SOURCE}
done
#" > tmp; rm tmp
cd $TO
for i in stech*; do
rm -r $i/template
cp -r ${TEMPLATE} $i
cp -r skrypty do_for run $i
cd $i; ./do_for "g" ; cd ..;
done
| true
|
e3f298df63cdf8e8793bf493bf2b8a0a2cc3638f
|
Shell
|
onlyJinx/shell_CentOS7
|
/install.sh
|
UTF-8
| 23,139
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##CDN 104.16.160.3|104.16.192.155|104.20.157.6
##ss -lnp|grep :$port|awk -F "pid=" '{print $2}'|sed s/,.*//xargs kill -9
function check(){
###状态码赋值给s
#return_code=$?
###调用函数
###函数名 参数1 参数2
if [ "0" != "$?" ]; then
echo "$1编译失败,请手动检查"
exit 0
fi
}
function check_port(){
while [[ true ]]; do
read -p "请输入监听端口(默认$1):" port
port=${port:-$1}
myport=$(ss -lnp|grep :$port)
if [ -n "$myport" ];then
echo "端口$port已被占用,输入 y 关闭占用进程,输入 n 退出程序直接回车更换其他端口"
read sel
if [ "$sel" == "y" ] || [ "$sel" == "Y" ]; then
##关闭进程
ss -lnp|grep :$port|awk -F "pid=" '{print $2}'|sed 's/,.*//'|xargs kill -9
if ! [ -n "$(ss -lnp|grep :$port)" ]; then
echo "已终止占用端口进程"
break
else
echo "进程关闭失败,请手动关闭"
exit 1
fi
elif [ "$sel" == "n" ] || [ "$sel" == "N" ]; then
echo "已取消操作"
exit 0
else
clear
fi
else
break
fi
done
}
function check_version(){
if [ -x "$(command -v $1)" ]; then
echo "$2已安装,是否继续覆盖安装?(Y/N)"
read -t 30 -p "" sel
if [ "$sel" == "y" ] || [ "$sel" == "Y" ];then
echo "继续执行安装"
else
echo "已取消安装"
exit 0
fi
fi
}
function check_fin(){
if [ -x "$(command -v $1)" ]; then
echo "编译安装完成"
else
echo "编译失败,请手动检查!!"
exit 1
fi
}
function download_dir(){
#函数 提示语 默认路劲
read -p "$1" dir
dir=${dir:-$2}
if [ ! -d $dir ]; then
echo "文件夹不存在,已创建文件夹 $dir"
mkdir $dir
fi
}
function check_directory_exist(){
##a_dir=$1
if [[ -d $1 ]]; then
echo 文件夹 $1 存在,是否删除\(y/n\)?
read sel
if [ "$sel" == "y" ] || [ "$sel" == "Y" ]; then
rm -fr $1
if [[ "$?"=="0" ]]; then
echo 文件夹 $1 已删除
else
echo 文件夹 $1 删除失败,请手动删除!
exit 0
fi
else
mv $1 $1_$(date +%T)
echo 已将目录 $1 移动至 $1_$(date +%T)
fi
fi
}
function shadowsocks-libev(){
check_directory_exist /root/shadowsocks-libev
check_version ss-server shadowsocks
read -t 60 -p "请输入密码,直接回车则设置为默认密码: nPB4bF5K8+apre." passwd
passwd=${passwd:-nPB4bF5K8+apre.}
check_port 443
###echo "passwd=$passwd"
###搬瓦工默认禁用epel
#yum remove epel-release -y
#yum install epel-release -y
###yum install gcc gettext autoconf libtool automake make pcre-devel asciidoc xmlto c-ares-devel libev-devel libsodium-devel mbedtls-devel -y
yum install gcc gettext autoconf libtool automake make pcre-devel wget git vim asciidoc xmlto libev-devel -y
###手动编译libsodium-devel mbedtls-devel c-ares
###Installation of MbedTLS
wget --no-check-certificate https://tls.mbed.org/download/mbedtls-2.16.3-gpl.tgz
###wget https://tls.mbed.org/download/mbedtls-2.16.2-apache.tgz
tar xvf mbedtls*gpl.tgz
cd mbedtls*
make SHARED=1 CFLAGS=-fPIC
sudo make DESTDIR=/usr install
check "shadowsocks依赖MbedTLS"
cd ~
sudo ldconfig
###Installation of Libsodium
## wget https://download.libsodium.org/libsodium/releases/libsodium-1.0.18.tar.gz
## wget https://download.libsodium.org/libsodium/releases/LATEST.tar.gz
## tar xvf LATEST.tar.gz
## cd libsodium-stable
## ./configure --prefix=/usr && make
## sudo make install
## check "shadowsocks依赖Libsodium"
## sudo ldconfig
## cd ~
wget https://download.libsodium.org/libsodium/releases/LATEST.tar.gz
cd LATEST
./configure --prefix=/usr
make && make install
check "shadowsocks依赖Libsodium"
sudo ldconfig
cd ~
###Installation of c-ares
git clone https://github.com/c-ares/c-ares.git
cd c-ares
./buildconf
autoconf configure.ac
./configure --prefix=/usr && make
sudo make install
check "shadowsocks依赖c-ares"
sudo ldconfig
cd ~
###安装方法引用http://blog.sina.com.cn/s/blog_6c4a60110101342m.html
###Installation of simple-obfs
###obfs已弃用###
#git clone https://github.com/shadowsocks/simple-obfs.git
#cd simple-obfs
#git submodule update --init --recursive
#./autogen.sh
#./configure && make
#sudo make install
#wget https://github.com/shadowsocks/v2ray-plugin/releases/download/v1.1.0/v2ray-plugin-linux-amd64-v1.1.0.tar.gz
#tar zxvf v2ray-plugin* && mv v2ray-plugin-linux-amd64 /etc/shadowsocks-libev/v2ray-plugin &&rm -f v2ray-plugin*
###报错 undefined reference to `ares_set_servers_ports_csv',指定libsodium configure路径
###Installation of shadowsocks-libev
git clone https://github.com/shadowsocks/shadowsocks-libev.git
cd shadowsocks-libev
git submodule update --init --recursive
./autogen.sh && ./configure --with-sodium-include=/usr/include --with-sodium-lib=/usr/lib
##检查编译返回的状态码
check "ShadowSocks-libev"
make && make install
###尝试运行程序
check_fin "ss-server"
mkdir /etc/shadowsocks-libev
###cp /root/shadowsocks-libev/debian/config.json /etc/shadowsocks-libev/config.json
###crate config.json
###"plugin_opts":"obfs=tls;failover=127.0.0.1:888"
cat >/etc/shadowsocks-libev/config.json<<-EOF
{
"server":"0.0.0.0",
"server_port":$port,
"local_port":1080,
"password":"$passwd",
"timeout":60,
"method":"xchacha20-ietf-poly1305",
"fast_open": true,
"nameserver": "8.8.8.8",
"plugin":"/etc/shadowsocks-libev/v2ray-plugin",
"plugin_opts":"server",
"mode": "tcp_and_udp"
}
EOF
###下载V2ray插件
wget https://github.com/shadowsocks/v2ray-plugin/releases/download/v1.3.0/v2ray-plugin-linux-amd64-v1.3.0.tar.gz
tar zxvf v2ray-plugin* && mv v2ray-plugin_linux_amd64 /etc/shadowsocks-libev/v2ray-plugin &&rm -f v2ray-plugin*
###crate service
cat >/etc/systemd/system/ssl.service<<-EOF
[Unit]
Description=Shadowsocks Server
After=network.target
[Service]
ExecStart=/usr/local/bin/ss-server -c /etc/shadowsocks-libev/config.json
User=root
[Install]
WantedBy=multi-user.target
EOF
###禁用ping###
###echo net.ipv4.icmp_echo_ignore_all=1>>/etc/sysctl.conf
###sysctl -p
###firewall oprt
##firewall-cmd --zone=public --add-port=$port/tcp --permanent
##firewall-cmd --zone=public --add-port=$port/udp --permanent
##firewall-cmd --reload
systemctl start ssl&&systemctl enable ssl
### remove the file
cd /root && rm -fr mbedtls* shadowsocks-libev libsodium LATEST.tar.gz c-ares
clear
###ss -lnp|grep 443
echo -e port:" ""\e[31m\e[1m$port\e[0m"
echo -e password:" ""\e[31m\e[1m$passwd\e[0m"
echo -e method:" ""\e[31m\e[1mxchacha20-ietf-poly1305\e[0m"
echo -e plugin:" ""\e[31m\e[1mv2ray-plugin\e[0m"
echo -e plugin_opts:" ""\e[31m\e[1mhttp\e[0m"
echo -e config.json:" ""\e[31m\e[1m/etc/shadowsocks-libev/config.json\n\n\e[0m"
echo -e use \""\e[31m\e[1msystemctl status ssl\e[0m"\" run the shadowsocks-libev in background
echo -e "\e[31m\e[1mhttps://github.com/shadowsocks\e[0m"
}
function transmission(){
check_directory_exist transmission-3.00+
check_version transmission-daemon transmission
clear
check_port 9091
clear
read -p "请输入用户名,直接回车则设置为默认用户 transmission: " uname
uname=${uname:-transmission}
clear
read -p "请输入密码,直接回车则设置为默认密码 transmission2020: " passwd
passwd=${passwd:-transmission2020}
clear
download_dir "输入下载文件保存路径(默认/usr/downloads): " "/usr/downloads"
check
config_path="/root/.config/transmission-daemon/settings.json"
if [[ "$(type -P apt)" ]]; then
echo "Debian"
apt-get -y --no-install-recommends install ca-certificates libcurl4-openssl-dev libssl-dev pkg-config build-essential autoconf libtool zlib1g-dev intltool libevent-dev wget git
elif [[ "$(type -P yum)" ]]; then
yum -y install gcc gcc-c++ make automake libtool gettext openssl-devel libevent-devel intltool libiconv curl-devel systemd-devel wget git
else
echo "error: The script does not support the package manager in this operating system."
exit 1
fi
wget https://github.com/transmission/transmission-releases/raw/master/transmission-3.00.tar.xz
tar xf transmission-3.00.tar.xz && cd transmission-3.00
./autogen.sh && make && make install
###检查返回状态码
check transmission
###尝试运行程序
#check_fin "transmission-daemon"
##默认配置文件
##vi /root/.config/transmission-daemon/settings.json
##crate service
cat >/etc/systemd/system/transmission-daemon.service<<-EOF
[Unit]
Description=Transmission BitTorrent Daemon
After=network.target
[Service]
User=root
Type=simple
ExecStart=/usr/local/bin/transmission-daemon -f --log-error
ExecStop=/bin/kill -s STOP $MAINPID
ExecReload=/bin/kill -s HUP $MAINPID
[Install]
WantedBy=multi-user.target
EOF
##首次启动,生成配置文件
systemctl start transmission-daemon.service
systemctl stop transmission-daemon.service
##systemctl status transmission-daemon.service
## change config sed引用 https://segmentfault.com/a/1190000020613397
sed -i '/rpc-whitelist-enabled/ s/true/false/' $config_path
sed -i '/rpc-host-whitelist-enabled/ s/true/false/' $config_path
sed -i '/rpc-authentication-required/ s/false/true/' $config_path
##取消未完成文件自动添加 .part后缀
sed -i '/rename-partial-files/ s/true/false/' $config_path
##单引号里特殊符号都不起作用$ or /\,使用双引号替代单引号
##sed -i "/rpc-username/ s/\"\"/\"$uname\"/" $config_path
sed -i "/rpc-username/ s/: \".*/: \"$uname\",/" $config_path
sed -i "/rpc-port/ s/9091/$port/" $config_path
##sed分隔符/和路径分隔符混淆,用:代替/
sed -i ":download-dir: s:\/root\/Downloads:$dir:" $config_path
sed -i "/rpc-password/ s/\"{.*/\"$passwd\",/" $config_path
##开启限速
sed -i "/speed-limit-up-enabled/ s/false/true/" $config_path
##限速1M/s
sed -i "/\"speed-limit-up\"/ s/:.*/: 1024,/" $config_path
##limit rate
sed -i "/ratio-limit-enabled/ s/false/true/" $config_path
sed -i "/\"ratio-limit\"/ s/:.*/: 4,/" $config_path
##firewall-cmd --zone=public --add-port=51413/tcp --permanent
##firewall-cmd --zone=public --add-port=51413/udp --permanent
##firewall-cmd --zone=public --add-port=$port/tcp --permanent
##firewall-cmd --zone=public --add-port=$port/udp --permanent
##firewall-cmd --reload
##替换webUI
cd ~
git clone https://github.com/ronggang/transmission-web-control.git
mv /usr/local/share/transmission/web/index.html /usr/local/share/transmission/web/index.original.html
cp -r /root/transmission-web-control/src/* /usr/local/share/transmission/web/
systemctl start transmission-daemon.service
systemctl enable transmission-daemon.service
clear
echo -e port:" ""\e[31m\e[1m$port\e[0m"
echo -e password:" ""\e[31m\e[1m$passwd\e[0m"
echo -e username:" ""\e[31m\e[1m$uname\e[0m"
echo -e download_dir:" ""\e[31m\e[1m$dir\e[0m"
echo -e config.json:" ""\e[31m\e[1m/root/.config/transmission-daemon/settings.json\n\n\e[0m"
}
function aria2(){
check_directory_exist aria2
check_version aria2c aria2
clear
download_dir "输入下载文件保存路径(默认/usr/downloads): " "/usr/downloads"
clear
read -p "输入密码(默认密码crazy_0): " key
key=${key:-crazy_0}
yum install -y gcc-c++ make libtool automake bison autoconf git intltool libssh2-devel expat-devel gmp-devel nettle-devel libssh2-devel zlib-devel c-ares-devel gnutls-devel libgcrypt-devel libxml2-devel sqlite-devel gettext xz-devel gperftools gperftools-devel gperftools-libs trousers-devel
git clone https://github.com/aria2/aria2.git && cd aria2
##静态编译
##autoreconf -i && ./configure ARIA2_STATIC=yes
autoreconf -i && ./configure
make && make install
###相关编译报错引用https://weair.xyz/build-aria2/
check aria2
###尝试运行程序
clear
check_fin "aria2c"
cat >/etc/systemd/system/aria2.service<<-EOF
[Unit]
Description=aria2c
After=network.target
[Service]
ExecStart=/usr/local/bin/aria2c --conf-path=/aria2.conf
User=root
[Install]
WantedBy=multi-user.target
EOF
##aria2 config file
cat >/aria2.conf<<-EOF
rpc-secret=$key
enable-rpc=true
rpc-allow-origin-all=true
rpc-listen-all=true
max-concurrent-downloads=5
continue=true
max-connection-per-server=5
min-split-size=10M
split=16
max-overall-download-limit=0
max-download-limit=0
max-overall-upload-limit=0
max-upload-limit=0
dir=$dir
file-allocation=prealloc
EOF
##安装nginx
#SElinux原因不再用nginx,用httpd替代
#rpm -ivh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm
#yum install nginx -y
##selinux 设置
#ausearch -c 'nginx' --raw | audit2allow -M my-nginx
#semodule -i my-nginx.pp
systemctl enable aria2
systemctl start aria2
##firewall-cmd --zone=public --add-port=6800/tcp --permanent
##firewall-cmd --zone=public --add-port=6800/udp --permanent
##firewall-cmd --reload
clear
while [[ true ]]; do
echo "是否安装webUI (y/n)?"
read ins
if [ "$ins" == "y" ] || [ "$ins" == "Y" ];then
httpd
clear
echo -e port:" ""\e[31m\e[1m$port\e[0m"
break
elif [ "$ins" == "n" ] || [ "$ins" == "N" ];then
clear
break
fi
done
echo -e token:" ""\e[31m\e[1m$key\e[0m"
echo -e download_dir:" ""\e[31m\e[1m$dir\e[0m"
echo -e config.json:" ""\e[31m\e[1m/aria2.conf\n\n\e[0m"
}
function httpd(){
##if判断参考https://www.cnblogs.com/include/archive/2011/12/09/2307905.html
count=0
while(1>0)
do
read -p "输入一个大于1024的端口(第$count次) " port
let count++
port=${port:-80}
if [ "$port" -gt "1024" ];then
if [ -n "$(ss -lnp|grep :$port)" ];then
clear
echo "端口$port已被占用,请输入其他端口"
else
break
fi
elif [ "$port" -eq "80" ] || [ "$port" -eq "443" ];then
if [ -n "$(ss -lnp|grep :$port)" ];then
clear
echo "端口$port已被占用,请输入其他端口"
else
break
fi
fi
if [ $count -gt 10 ]; then
clear
echo "滚"
break
fi
done
yum install httpd -y
sed -i "/^Listen/ s/[0-9].*/$po
rt/" /etc/httpd/conf/httpd.conf
##firewall-cmd --zone=public --add-port=$port/tcp --permanent
##firewall-cmd --zone=public --add-port=$port/udp --permanent
##firewall-cmd --reload
clear
##webui
cd ~
git clone https://github.com/ziahamza/webui-aria2.git
#rm -fr /usr/share/nginx/html/*
mv /var/www/html /var/www/html_b
mkdir /var/www/html/
cp -r /root/webui-aria2/docs/* /var/www/html/
##config file
##vi /etc/nginx/conf.d/default.conf
#sed -i "/listen/ s/80/$port/" /etc/nginx/conf.d/default.conf
systemctl enable httpd
systemctl start httpd
}
function Up_kernel(){
if [[ "$(type -P apt)" ]]; then
echo "deb https://deb.debian.org/debian buster-backports main" >> /etc/apt/sources.list
apt update
apt install -t buster-backports linux-image-cloud-amd64 linux-headers-cloud-amd64
elif [[ "$(type -P yum)" ]]; then
###导入elrepo密钥
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
###安装elrepo仓库
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
###安装内核
yum --enablerepo=elrepo-kernel install kernel-ml -y
###修改默认内核
sed -i 's/saved/0/g' /etc/default/grub
###重新创建内核配置
grub2-mkconfig -o /boot/grub2/grub.cfg
# Oracel内核
# grub2-set-default 0
# TCP-BBR
#net.core.default_qdisc=fq
#net.ipv4.tcp_congestion_control=bbr
###查看tcp_bbr内核模块是否启动
#lsmod | grep bbr
#Please reboot your VPS after run command "yum update -y"
#ping 127.0.0.1 -c 5 >>null
#reboot
###引用:http://www.jianshu.com/p/726bd9f37220
###引用:https://legolasng.github.io/2017/05/08/upgrade-centos-kernel/#3安装新版本内核
else
echo "error: The script does not support the package manager in this operating system."
exit 1
fi
###使修改的内核配置生效
echo net.core.default_qdisc=fq >> /etc/sysctl.conf
echo net.ipv4.tcp_congestion_control=bbr >> /etc/sysctl.conf
sysctl -p
}
function trojan(){
clear
echo "######强烈建议使用443及80端口######"
echo "#不会自动申请证书,请先准备好ssl证书#"
echo "######并放到 /tmp/trojan 目录######"
echo "########按任意键开始端口检测########"
read
clear
echo "直接回车检测https(443)监听端口"
check_port 443
echo "直接回车检测http(80)监听端口"
check_port 80
clear
while [[ true ]]; do
read -p "请输入域名:" domain
if [[ -n $domain ]]; then
break;
fi
done
###检测证书文件
clear
echo "########端口检测本地证书########"
if [ ! -d "/tmp/trojan" ];then
mkdir /tmp/trojan
fi
count=$(ls -l /tmp/trojan | grep "^-" | wc -l )
if [ $count -gt 2 ];then
echo "这个/tmp/trojan目录怎么有$count个文件?证书加Key才两个文件而已,自己清空该目录所有文件再放入key和证书!!!"
exit 0
elif ! [ -f /tmp/trojan/*key ]; then
#cp /tmp/trojan/*key /etc/trojan/private.key
echo "请将密钥key放入 /tmp/trojan 文件夹后再执行该脚本"
exit 0
elif [ -f /tmp/trojan/*pem ]; then
#cp /tmp/trojan/*pem /etc/trojan/certificate.pem
cert=pem
echo "已检测到pem证书文件"
elif [ -f /tmp/trojan/*crt ]; then
#cp /tmp/trojan/*crt /etc/trojan/certificate.crt
cert=crt
echo "已检测到crt证书文件"
else
echo "请将证书文件(crt/pem)放入/tmp/trojan 文件夹后再执行该脚本"
exit 0
fi
read -p "设置一个trojan密码(默认trojanWdai1): " PW
PW=${PW:-trojanWdai1}
read -p "请输入trojan版本号(默认1.15.1),可以到这里查https://github.com/trojan-gfw/trojan/releases: " trojan_version
trojan_version=${trojan_version:-1.15.1}
nginx_version=1.21.1
nginx_url=http://nginx.org/download/nginx-${nginx_version}.tar.gz
yum -y install gcc gcc-c++ pcre pcre-devel zlib zlib-devel openssl openssl-devel wget
wget $nginx_url && tar zxf nginx-${nginx_version}.tar.gz && cd nginx-$nginx_version
./configure \
--prefix=/usr/local/nginx \
--with-http_ssl_module \
--with-http_stub_status_module \
--with-http_realip_module \
--with-threads \
--with-stream_ssl_module \
--with-http_v2_module \
--with-stream_ssl_preread_module \
--with-stream=dynamic
check
make && make install
check
ln -s /usr/local/nginx/sbin/nginx /usr/bin/nginx
mv /usr/local/nginx/conf/nginx.conf /usr/local/nginx/conf/nginx.conf_backup
##nginx配置文件修改
##wget -P /usr/local/nginx/conf https://raw.githubusercontent.com/onlyJinx/shell_CentOS7/master/nginx.conf
cat >/usr/local/nginx/conf/nginx.conf<<-EOF
load_module /usr/local/nginx/modules/ngx_stream_module.so;
worker_processes 1;
events {
worker_connections 1024;
}
stream {
map \$ssl_preread_server_name \$name {
$domain 127.0.0.1:555; #forward to trojan
#aria2.domain.com 127.0.0.1:6801; #forward to aria2_rpc
default 127.0.0.1:4433; #block all
}
server {
listen 443 reuseport;
listen [::]:443 reuseport;
proxy_pass \$name;
ssl_preread on; #开启 ssl_preread
}
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
###全站https
server {
listen 0.0.0.0:80;
listen [::]:80;
server_name _;
return 301 https://\$host\$request_uri;
}
server {
listen 4433 default ssl;
server_name _;
return 403; #block all
ssl_certificate /etc/trojan/certificate.pem;
ssl_certificate_key /etc/trojan/private.key;
ssl_session_cache shared:SSL:1m;
ssl_session_timeout 5m;
ssl_ciphers HIGH:!aNULL:!MD5;
ssl_prefer_server_ciphers on;
location / {
root html;
index index.html index.htm;
}
}
server {
listen 127.0.0.1:6801 ssl;
server_name _;
ssl_certificate /etc/trojan/certificate.pem;
ssl_certificate_key /etc/trojan/private.key;
location / {
proxy_pass http://127.0.0.1:6800;
}
}
##Trojan伪装站点
server {
listen 127.0.0.1:5555 http2;
server_name _;
charset utf-8;
absolute_redirect off;
#ssl_certificate /etc/trojan/certificate.pem;
#ssl_certificate_key /etc/trojan/private.key;
location / {
#index index.html;
}
}
}
EOF
###crate service
#单双引号不转义,反单引号 $ 要转
wget -P /etc/init.d https://raw.githubusercontent.com/onlyJinx/shell_CentOS7/master/nginx
chmod a+x /etc/init.d/nginx
chkconfig --add /etc/init.d/nginx
chkconfig nginx on
###nginx编译引用自博客
###https://www.cnblogs.com/stulzq/p/9291223.html
wget https://github.com/trojan-gfw/trojan/releases/download/v${trojan_version}/trojan-${trojan_version}-linux-amd64.tar.xz && tar xvJf trojan-${trojan_version}-linux-amd64.tar.xz -C /etc
ln -s /etc/trojan/trojan /usr/bin/trojan
config_path=/etc/trojan/config.json
sed -i '/password2/ d' $config_path
sed -i "/certificate.crt/ s/.crt/.$cert/" $config_path
sed -i "/local_port/ s/443/555/" $config_path
sed -i "/remote_port/ s/80/5100/" $config_path
sed -i "/h2\":/ s/81/5555/" $config_path
sed -i ":http/1.1: s:http/1.1:h2:" $config_path
sed -i "/\"password1\",/ s/\"password1\",/\"$PW\"/" $config_path
sed -i ":\"cert\": s:path\/to:etc\/trojan:" $config_path
sed -i ":\"key\": s:path\/to:etc\/trojan:" $config_path
##复制证书文件
cp /tmp/trojan/*key /etc/trojan/private.key
if [[ "$cert" == "crt" ]]; then
cp /tmp/trojan/*crt /etc/trojan/certificate.crt
else
cp /tmp/trojan/*pem /etc/trojan/certificate.pem
fi
###crate service
cat >/etc/systemd/system/trojan.service<<-EOF
[Unit]
Description=trojan Server
After=network.target
[Service]
ExecStart=/etc/trojan/trojan -c /etc/trojan/config.json
User=root
[Install]
WantedBy=multi-user.target
EOF
##firewall-cmd --zone=public --add-port=443/tcp --permanent
##firewall-cmd --zone=public --add-port=443/udp --permanent
##firewall-cmd --zone=public --add-port=80/tcp --permanent
##firewall-cmd --zone=public --add-port=80/udp --permanent
##firewall-cmd --reload
systemctl start trojan
systemctl enable trojan
systemctl start nginx
systemctl status nginx
systemctl enable nginx
}
select option in "shadowsocks-libev" "transmission" "aria2" "Up_kernel" "trojan+nginx"
do
case $option in
"shadowsocks-libev")
shadowsocks-libev
break;;
"transmission")
transmission
break;;
# "samba")
# samba
# break;;
"aria2")
aria2
break;;
"Up_kernel")
Up_kernel
break;;
# "ngrok")
# ngrok
# break;;
# "filemanager")
# filemanager
# break;;
"trojan+nginx")
trojan
break;;
*)
echo "nothink to do"
break;;
esac
done
| true
|
44adf1e2f37b51cbf36b9104db63805ad85c2e9e
|
Shell
|
amirlanesman/renamer
|
/bin/manage.sh
|
UTF-8
| 374
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/zsh
source ~/.zshrc
cd /Users/amirlanesman/Documents/renamer
i=`date +%F_%T`;
echo "executing renamer. param[0]: $1" 2>&1 | tee -a "user/logs/manage-$i.log"
echo "node -v:" 2>&1 | tee -a "user/logs/manage-$i.log"
node -v 2>&1 | tee -a "user/logs/manage-$i.log"
MEDIA_MANAGER_ARGS="-scrapeAll" node manage.js --path "$1" 2>&1 | tee -a "user/logs/manage-$i.log" 2>&1
| true
|
9b6877c1fdbd4930bb22982438621ff5d984fcfe
|
Shell
|
FernandaDguez/spdk
|
/test/iscsi_tgt/common.sh
|
UTF-8
| 5,651
| 3.171875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Network configuration
TARGET_INTERFACE="spdk_tgt_int"
INITIATOR_INTERFACE="spdk_init_int"
TARGET_NAMESPACE="spdk_iscsi_ns"
TARGET_NS_CMD=(ip netns exec "$TARGET_NAMESPACE")
# iSCSI target configuration
TARGET_IP=10.0.0.1
INITIATOR_IP=10.0.0.2
ISCSI_PORT=3260
NETMASK=$INITIATOR_IP/32
INITIATOR_TAG=2
INITIATOR_NAME=ANY
PORTAL_TAG=1
ISCSI_APP=("${TARGET_NS_CMD[@]}" "${ISCSI_APP[@]}")
if [ $SPDK_TEST_VPP -eq 1 ]; then
ISCSI_APP+=(-L sock_vpp)
fi
ISCSI_TEST_CORE_MASK=0xFF
function create_veth_interfaces() {
# $1 = test type (posix/vpp)
ip netns del $TARGET_NAMESPACE || true
ip link delete $INITIATOR_INTERFACE || true
trap 'cleanup_veth_interfaces $1; exit 1' SIGINT SIGTERM EXIT
# Create veth (Virtual ethernet) interface pair
ip link add $INITIATOR_INTERFACE type veth peer name $TARGET_INTERFACE
ip addr add $INITIATOR_IP/24 dev $INITIATOR_INTERFACE
ip link set $INITIATOR_INTERFACE up
# Create and add interface for target to network namespace
ip netns add $TARGET_NAMESPACE
ip link set $TARGET_INTERFACE netns $TARGET_NAMESPACE
# Accept connections from veth interface
iptables -I INPUT 1 -i $INITIATOR_INTERFACE -p tcp --dport $ISCSI_PORT -j ACCEPT
"${TARGET_NS_CMD[@]}" ip link set $TARGET_INTERFACE up
if [ "$1" == "posix" ]; then
"${TARGET_NS_CMD[@]}" ip link set lo up
"${TARGET_NS_CMD[@]}" ip addr add $TARGET_IP/24 dev $TARGET_INTERFACE
# Verify connectivity
ping -c 1 $TARGET_IP
ip netns exec $TARGET_NAMESPACE ping -c 1 $INITIATOR_IP
else
start_vpp
fi
}
function cleanup_veth_interfaces() {
# $1 = test type (posix/vpp)
if [ "$1" == "vpp" ]; then
kill_vpp
fi
# Cleanup veth interfaces and network namespace
# Note: removing one veth, removes the pair
ip link delete $INITIATOR_INTERFACE
ip netns del $TARGET_NAMESPACE
}
function iscsitestinit() {
if [ "$1" == "iso" ]; then
$rootdir/scripts/setup.sh
if [ -n "$2" ]; then
create_veth_interfaces $2
else
# default to posix
create_veth_interfaces "posix"
fi
fi
}
function waitforiscsidevices() {
local num=$1
for ((i = 1; i <= 20; i++)); do
n=$(iscsiadm -m session -P 3 | grep -c "Attached scsi disk sd[a-z]*" || true)
if [ $n -ne $num ]; then
sleep 0.1
else
return 0
fi
done
return 1
}
function iscsitestfini() {
if [ "$1" == "iso" ]; then
if [ -n "$2" ]; then
cleanup_veth_interfaces $2
else
# default to posix
cleanup_veth_interfaces "posix"
fi
$rootdir/scripts/setup.sh reset
fi
}
function start_vpp() {
# We need to make sure that posix side doesn't send jumbo packets while
# for VPP side maximal size of MTU for TCP is 1460 and tests doesn't work
# stable with larger packets
MTU=1460
MTU_W_HEADER=$((MTU + 20))
ip link set dev $INITIATOR_INTERFACE mtu $MTU
ethtool -K $INITIATOR_INTERFACE tso off
ethtool -k $INITIATOR_INTERFACE
# Start VPP process in SPDK target network namespace
"${TARGET_NS_CMD[@]}" vpp \
unix { nodaemon cli-listen /run/vpp/cli.sock } \
dpdk { no-pci } \
session { evt_qs_memfd_seg } \
socksvr { socket-name /run/vpp-api.sock } \
plugins { \
plugin default { disable } \
plugin dpdk_plugin.so { enable } \
} &
vpp_pid=$!
echo "VPP Process pid: $vpp_pid"
gdb_attach $vpp_pid &
# Wait until VPP starts responding
xtrace_disable
counter=40
while [ $counter -gt 0 ]; do
vppctl show version | grep -E "vpp v[0-9]+\.[0-9]+" && break
counter=$((counter - 1))
sleep 0.5
done
xtrace_restore
if [ $counter -eq 0 ]; then
return 1
fi
# Below VPP commands are masked with "|| true" for the sake of
# running the test in the CI system. For reasons unknown when
# run via CI these commands result in 141 return code (pipefail)
# even despite producing valid output.
# Using "|| true" does not impact the "-e" flag used in test scripts
# because vppctl cli commands always return with 0, even if
# there was an error.
# As a result - grep checks on command outputs must be used to
# verify vpp configuration and connectivity.
# Setup host interface
vppctl create host-interface name $TARGET_INTERFACE || true
VPP_TGT_INT="host-$TARGET_INTERFACE"
vppctl set interface state $VPP_TGT_INT up || true
vppctl set interface ip address $VPP_TGT_INT $TARGET_IP/24 || true
vppctl set interface mtu $MTU $VPP_TGT_INT || true
vppctl show interface | tr -s " " | grep -E "host-$TARGET_INTERFACE [0-9]+ up $MTU/0/0/0"
# Disable session layer
# NOTE: VPP net framework should enable it itself.
vppctl session disable || true
# Verify connectivity
vppctl show int addr | grep -E "$TARGET_IP/24"
ip addr show $INITIATOR_INTERFACE
ip netns exec $TARGET_NAMESPACE ip addr show $TARGET_INTERFACE
sleep 3
# SC1010: ping -M do - in this case do is an option not bash special word
# shellcheck disable=SC1010
ping -c 1 $TARGET_IP -s $((MTU - 28)) -M do
vppctl ping $INITIATOR_IP repeat 1 size $((MTU - (28 + 8))) verbose | grep -E "$MTU_W_HEADER bytes from $INITIATOR_IP"
}
function kill_vpp() {
vppctl delete host-interface name $TARGET_INTERFACE || true
# Dump VPP configuration before kill
vppctl show api clients || true
vppctl show session || true
vppctl show errors || true
killprocess $vpp_pid
}
function initiator_json_config() {
# Prepare config file for iSCSI initiator
jq . <<- JSON
{
"subsystems": [
{
"subsystem": "bdev",
"config": [
{
"method": "bdev_iscsi_create",
"params": {
"name": "iSCSI0",
"url": "iscsi://$TARGET_IP/iqn.2016-06.io.spdk:disk1/0",
"initiator_iqn": "iqn.2016-06.io.spdk:disk1/0"
}
}${*:+,$*}
]
}
]
}
JSON
}
| true
|
8b0b6c675189fc5b380a2ca374df01ce9d03444a
|
Shell
|
zorancco/chef-openswan-awsvpn
|
/templates/default/aws_customer_gateway.erb
|
UTF-8
| 5,208
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# This script will startup a VPN connection to an AWS VPC.
#
# 3. Make sure the inbound rules are configured in your EC2 security group
# UDP port 500 allow
# TCP port 179 allow
#
# 7. Reboot the instance and test the configuration by launching a Linux instance in your VPC and pinging that instance
# $ ip netns exec openswan ping <IP address in your VPC>
#
# 8. If you need to troubleshoot the connectivity, the following commands are useful.
## Check the IPSEC SA
# $ ip netns exec openswan ip xfrm state
#
## Check the BGP Neighbor (use testPassword as the password)
# $ ip netns exec openswan telnet 127.0.0.1 2605
# > show ip bgp summary
#
### BEGIN INIT INFO
# Provides: VPN Gateway to AWS VPC
# Required-Start: $network $remote_fs $syslog
# Required-Stop: $syslog $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start VPN Customer Gateway at boot time
# Description: Start VPN Customer Gateway at boot time
### END INIT INFO
# source function library
. /etc/rc.d/init.d/functions
# Get network config
. /etc/sysconfig/network
# quagga command line options
. /etc/sysconfig/quagga
prog=$(basename $0)
# if DEBUG is set (to anything but '0') send a copy of the output to /dev/tty so the caller
# can see the messages without checking the system logs
if [ "$DEBUG" -a "$DEBUG" != 0 ]; then
logger="logger -s -t $prog"
else
logger="logger -t $prog"
fi
if [ `id -u` -ne 0 ]
then
echo "permission denied (must be superuser)" |
logger -s -p daemon.error -t $prog 2>&1
exit 4
fi
RETVAL=0
. /etc/sysconfig/aws_customer_gateway
ox='ip netns exec openswan'
setup_network() {
#Create a network namespace
ip netns add openswan
#Create interfaces and assign IP
ip link add type veth
ip link set dev veth1 netns openswan
ip link set dev veth0 name toopenswan
ip addr add dev toopenswan <%= @GATEWAY_IP %>/28
$ox ip link set dev veth1 name eth0
$ox ip addr add dev eth0 <%= @NAMESPACE_TUNNEL1_IP %>/28
$ox ip addr add dev eth0 <%= @NAMESPACE_TUNNEL2_IP %>/28
$ox ip addr add dev eth0 <%= @CGW_TUNNEL1_INSIDE_IP %>/30
$ox ip addr add dev eth0 <%= @CGW_TUNNEL2_INSIDE_IP %>/30
ip link set toopenswan up
$ox ip link set eth0 up
$ox ip link set lo up
#Configure routing
iptables -t nat -D PREROUTING -s <%= @VGW_TUNNEL1_OUTSIDE_IP %>/32 -i eth0 -j DNAT --to-destination <%= @NAMESPACE_TUNNEL1_IP %>
iptables -t nat -A PREROUTING -s <%= @VGW_TUNNEL1_OUTSIDE_IP %>/32 -i eth0 -j DNAT --to-destination <%= @NAMESPACE_TUNNEL1_IP %>
iptables -t nat -D POSTROUTING -d <%= @VGW_TUNNEL1_OUTSIDE_IP %>/32 -j SNAT --to-source <%= @INSTANCE_IP %>
iptables -t nat -A POSTROUTING -d <%= @VGW_TUNNEL1_OUTSIDE_IP %>/32 -j SNAT --to-source <%= @INSTANCE_IP %>
iptables -t nat -D PREROUTING -s <%= @VGW_TUNNEL2_OUTSIDE_IP %>/32 -i eth0 -j DNAT --to-destination <%= @NAMESPACE_TUNNEL2_IP %>
iptables -t nat -A PREROUTING -s <%= @VGW_TUNNEL2_OUTSIDE_IP %>/32 -i eth0 -j DNAT --to-destination <%= @NAMESPACE_TUNNEL2_IP %>
iptables -t nat -D POSTROUTING -d <%= @VGW_TUNNEL2_OUTSIDE_IP %>/32 -j SNAT --to-source <%= @INSTANCE_IP %>
iptables -t nat -A POSTROUTING -d <%= @VGW_TUNNEL2_OUTSIDE_IP %>/32 -j SNAT --to-source <%= @INSTANCE_IP %>
$ox ip r add default via <%= @GATEWAY_IP %>
#$ox sysctl -w net.ipv4.conf.all.forwarding=1
}
teardown_network() {
ip link delete toopenswan type veth
ip link delete veth0 type veth
ip link delete veth1 type veth
ip netns delete openswan
iptables -t nat -D PREROUTING -s <%= @VGW_TUNNEL1_OUTSIDE_IP %>/32 -i eth0 -j DNAT --to-destination <%= @NAMESPACE_TUNNEL1_IP %>
iptables -t nat -D POSTROUTING -d <%= @VGW_TUNNEL1_OUTSIDE_IP %>/32 -j SNAT --to-source <%= @INSTANCE_IP %>
iptables -t nat -D PREROUTING -s <%= @VGW_TUNNEL2_OUTSIDE_IP %>/32 -i eth0 -j DNAT --to-destination <%= @NAMESPACE_TUNNEL2_IP %>
iptables -t nat -D POSTROUTING -d <%= @VGW_TUNNEL2_OUTSIDE_IP %>/32 -j SNAT --to-source <%= @INSTANCE_IP %>
#$ox sysctl -w net.ipv4.conf.all.forwarding=0
}
start_control_plane() {
IPSEC_CONFS=<%= @AWSVPN_CHROOT %>/etc $ox /etc/init.d/ipsec restart
$ox ip route flush proto zebra
$ox /usr/sbin/chroot <%= @AWSVPN_CHROOT %> /etc/init.d/zebra start
$ox /usr/sbin/chroot <%= @AWSVPN_CHROOT %> /etc/init.d/bgpd start
$ox /usr/sbin/chroot <%= @AWSVPN_CHROOT %> /etc/init.d/ripd start
}
stop_control_plane() {
$ox /usr/sbin/chroot <%= @AWSVPN_CHROOT %> /etc/init.d/zebra stop
$ox /usr/sbin/chroot <%= @AWSVPN_CHROOT %> /etc/init.d/bgpd stop
$ox /usr/sbin/chroot <%= @AWSVPN_CHROOT %> /etc/init.d/ripd stop
IPSEC_CONFS=<%= @AWSVPN_CHROOT %>/etc $ox /etc/init.d/ipsec stop
}
start() {
(
setup_network
start_control_plane
RETVAL=$?
) 2>&1 | $logger
return $RETVAL
}
stop() {
(
stop_control_plane
teardown_network
RETVAL=$?
) 2>&1 | $logger
return $RETVAL
}
restart() {
stop
start
}
# do it
case "$1" in
start|--start)
start
;;
stop|--stop)
stop
;;
restart|--restart)
restart
;;
*)
echo "Usage: $prog {start|stop|restart}"
RETVAL=2
esac
exit $RETVAL
| true
|
283e40bbcdd794c981dda59cb87cca65b2c3fd0a
|
Shell
|
renzhentaxi/coursework
|
/week1/work/day2/citibike_solution.sh
|
UTF-8
| 1,392
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# add your solution after each of the 10 comments below
#
file="./2014-02-citibike-tripdata.csv";
# count the number of unique stations
tail -n +2 $file| cut -d, -f5,9 | tr , "\n"| sort |uniq| wc -l;
# count the number of unique bikes
tail -n +2 $file| cut -d, -f12 | sort |uniq| wc -l;
# count the number of trips per day
tail -n +2 $file| cut -d, -f2 | tr -d \"| awk -F" " '{print $1}'|sort| uniq -c;
tail -n +2 $file| cut -d, -f3 | tr -d \"| awk -F" " '{print $1}'|sort|uniq -c ;
# find the day with the most rides
tail -n +2 $file| cut -d, -f2 | tr -d \"| awk -F" " '{print $1}'| sort|uniq -c|sort -n|tail -n1;
# find the day with the fewest rides
tail -n +2 $file| cut -d, -f2 | tr -d \"| awk -F" " '{print $1}'| sort -n|uniq -c|sort -n |head -n1;
# find the id of the bike with the most rides
tail -n +2 $file| cut -d, -f12| sort | uniq -c | sort -nr | head -n1
# count the number of rides by gender and birth year
tail -n +2 $file| cut -d, -f14,15 | sort |uniq -c
# count the number of trips that start on cross streets that both contain numbers (e.g., "1 Ave & E 15 St", "E 39 St & 2 Ave", ...)
tail -n +2 $file| cut -d, -f5 | grep --color ".*[0-9].*&.*[0-9].*" | sort | uniq -c
awk -F, '$5 ~ /.*[0-9].*&.*[0-9].*/ {print $5}' $file | sort | uniq -c
# compute the average trip duration
tail -n +2 $file| cut -d, -f1| tr -d \" | awk -F, '{sum+=$1;n++} END{print sum/n}'
| true
|
e242edf5421fbb286b9f44294fdd87a71be57c6a
|
Shell
|
liangbinc/summary
|
/summary.sh
|
UTF-8
| 1,176
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
#APP_NAME=`dirname $0`/Summary-0.0.1-SNAPSHOT.jar
APP_NAME=$(cd "$(dirname $0)";pwd)/Summary-0.0.1-SNAPSHOT.jar
usage() {
echo "Usage: summary.sh [start|stop|restart|status]"
exit 1
}
#is running
is_exist(){
pid=`ps -ef|grep $APP_NAME|grep -v grep|awk '{print $2}'`
#如果不存在返回1,存在返回0
if [ -z "${pid}" ]; then
return 1
else
return 0
fi
}
start(){
is_exist
if [ $? -eq 0 ]; then
echo "${APP_NAME} is already running. pid=${pid}"
else
nohup java -jar -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=90901 ${APP_NAME} >/dev/null 2>&1 &
pid=$(ps -ef | grep ${APP_NAME} |grep -v grep |awk '{print$2}')
echo $pid
fi
}
stop(){
is_exist
if [ $? -eq "0" ]; then
kill -9 $pid
else
echo "${APP_NAME} is not running"
fi
}
status(){
is_exist
if [ $? -eq "0" ]; then
echo "${APP_NAME} is running. Pid is ${pid}"
else
echo "${APP_NAME} is not running."
fi
}
restart(){
stop
sleep 2
start
}
case "$1" in
"start")
start
;;
"stop")
stop
;;
"status")
status
;;
"restart")
restart
;;
*)
usage
;;
esac
| true
|
99017ea55b5aae8216fc5bd28a612a223bb3f6a1
|
Shell
|
thomasWeise/aitoa-code
|
/trigger-travis.sh
|
UTF-8
| 3,145
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh -f
# Trigger a new Travis-CI job.
# Usage:
# trigger-travis.sh [--pro] [--branch BRANCH] GITHUBID GITHUBPROJECT TRAVIS_ACCESS_TOKEN [MESSAGE]
# For example:
# trigger-travis.sh typetools checker-framework `cat ~/private/.travis-access-token` "Trigger for testing"
# For full documentation, see
# https://github.com/plume-lib/trigger-travis/tree/documentation
if [ "$#" -lt 3 ] || [ "$#" -ge 7 ]; then
echo "Wrong number of arguments $# to trigger-travis.sh; run like:"
echo " trigger-travis.sh [--pro] [--branch BRANCH] GITHUBID GITHUBPROJECT TRAVIS_ACCESS_TOKEN [MESSAGE]" >&2
exit 1
fi
if [ "$1" = "--pro" ] ; then
TRAVIS_URL=travis-ci.com
shift
else
TRAVIS_URL=travis-ci.org
fi
if [ "$1" = "--branch" ] ; then
shift
BRANCH="$1"
shift
else
BRANCH=master
fi
USER=$1
REPO=$2
TOKEN=$3
if [ $# -eq 4 ] ; then
MESSAGE=",\"message\": \"$4\""
elif [ -n "$TRAVIS_REPO_SLUG" ] ; then
MESSAGE=",\"message\": \"Triggered by upstream build of $TRAVIS_REPO_SLUG commit "`git log --oneline -n 1 HEAD`"\""
else
MESSAGE=""
fi
## For debugging:
# echo "USER=$USER"
# echo "REPO=$REPO"
# echo "TOKEN=$TOKEN"
# echo "MESSAGE=$MESSAGE"
body="{
\"request\": {
\"branch\":\"$BRANCH\"
$MESSAGE
}}"
# It does not work to put / in place of %2F in the URL below. I'm not sure why.
curl -s -X POST \
-H "Content-Type: application/json" \
-H "Accept: application/json" \
-H "Travis-API-Version: 3" \
-H "Authorization: token ${TOKEN}" \
-d "$body" \
https://api.${TRAVIS_URL}/repo/${USER}%2F${REPO}/requests \
| tee /tmp/travis-request-output.$$.txt
if grep -q '"@type": "error"' /tmp/travis-request-output.$$.txt; then
exit 1
fi
if grep -q 'access denied' /tmp/travis-request-output.$$.txt; then
exit 1
fi
#
# Originally, I planned to using this file by copying it on the fly from its
# source, https://github.com/plume-lib/trigger-travis.
# But this would be a security risk, as a change to that file could lead to
# the leakage of my travis access token.
# So I decided to use a copy here.
#
# The original file is under the MIT License, which is copied here.
#
# MIT License
# Copyright (c) 2018 plume-lib
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| true
|
df66e0bc2772a6af0deb689f1bb2cae9a8f02bb3
|
Shell
|
Trietptm-on-Security/SystemPrep
|
/Utils/repobuilder/create_yum_repo_definitions.sh
|
UTF-8
| 4,078
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -ex
exec > >(logger -i -t "create_yum_repo_definitions" -s 2> /dev/console) 2>&1
BUCKETNAME=${1:-systemprep-repo} # What bucket contains the packages?
S3_BASE_URL=${2:-https://s3.amazonaws.com} # What's the url to s3?
PIP_INSTALLER=${3:-https://bootstrap.pypa.io/get-pip.py} # URL to the pip installer
PIP_INDEX_URL=${4:-https://pypi.python.org/simple} # pip --index-url parameter
REPO_DIR="/root/${BUCKETNAME}" # Where do we want to stage the repo?
PACKAGE_DIR="${REPO_DIR}/linux" # Where are we staging the packages?
YUM_FILE_DIR="${PACKAGE_DIR}/yum.repos" # Where do we want to save the yum repo files?
BUCKET_URL="s3://${BUCKETNAME}" # What bucket contains the packages?
BASE_URL="${S3_BASE_URL}/${BUCKETNAME}/linux" # Common http path to the hosted packages
REPOS=(
"AMZN"
"CENTOS"
"RHEL"
"EPEL6"
"EPEL7"
"SALT_EPEL6"
"SALT_EPEL7"
)
REPO_NAME_AMZN="${BUCKETNAME}-amzn-packages"
REPO_BASEURL_AMZN="${BASE_URL}/amzn/latest/\$basearch/"
REPO_GPGKEY_AMZN="${BASE_URL}/amzn/latest/\$basearch/RPM-GPG-KEY-amazon-ga"
REPO_NAME_CENTOS="${BUCKETNAME}-centos-packages"
REPO_BASEURL_CENTOS="${BASE_URL}/centos/\$releasever/\$basearch/"
REPO_GPGKEY_CENTOS="${BASE_URL}/centos/\$releasever/\$basearch/RPM-GPG-KEY-CentOS-\$releasever"
REPO_NAME_RHEL="${BUCKETNAME}-rhel-packages"
REPO_BASEURL_RHEL="${BASE_URL}/rhel/\$releasever/\$basearch/"
REPO_GPGKEY_RHEL="${BASE_URL}/rhel/\$releasever/\$basearch/RPM-GPG-KEY-redhat-release"
REPO_NAME_EPEL6="${BUCKETNAME}-epel6-packages"
REPO_BASEURL_EPEL6="${BASE_URL}/epel/6/\$basearch/"
REPO_GPGKEY_EPEL6="${BASE_URL}/epel/6/\$basearch/RPM-GPG-KEY-EPEL-6"
REPO_NAME_EPEL7="${BUCKETNAME}-epel7-packages"
REPO_BASEURL_EPEL7="${BASE_URL}/epel/7/\$basearch/"
REPO_GPGKEY_EPEL7="${BASE_URL}/epel/7/\$basearch/RPM-GPG-KEY-EPEL-7"
REPO_NAME_SALT_EPEL6="${BUCKETNAME}-salt-epel6-packages"
REPO_BASEURL_SALT_EPEL6="${BASE_URL}/saltstack/salt/epel-6/\$basearch/"
REPO_GPGKEY_SALT_EPEL6="${BASE_URL}/saltstack/salt/epel-6/\$basearch/SALTSTACK-GPG-KEY.pub"
REPO_NAME_SALT_EPEL7="${BUCKETNAME}-salt-epel7-packages"
REPO_BASEURL_SALT_EPEL7="${BASE_URL}/saltstack/salt/epel-7/\$basearch/"
REPO_GPGKEY_SALT_EPEL7="${BASE_URL}/saltstack/salt/epel-7/\$basearch/SALTSTACK-GPG-KEY.pub"
# Manage distribution-specific dependencies
RELEASE=$(grep "release" /etc/system-release)
case "${RELEASE}" in
"Amazon"*)
;;
"CentOS"*"6."*)
service ntpd start 2>&1 > /dev/null && echo "Started ntpd..." || echo "Failed to start ntpd..."
### ^^^Workaround for issue where localtime is misconfigured on CentOS6
;;
"CentOS"*"7."*)
;;
"Red Hat"*"6."*)
;;
"Red Hat"*"7."*)
;;
*)
echo "Unsupported OS. Exiting"
exit 1
;;
esac
# Install pip
curl ${PIP_INSTALLER} -o /tmp/get-pip.py
python /tmp/get-pip.py
hash pip 2> /dev/null || PATH="${PATH}:/usr/local/bin" # Make sure pip is in path
# Upgrade setuptools
pip install --upgrade setuptools
# Install s3cmd
pip install s3cmd --upgrade --allow-all-external --index-url ${PIP_INDEX_URL}
hash s3cmd 2> /dev/null || PATH="${PATH}:/usr/local/bin" # Make sure s3cmd is in path
__print_repo_file() {
# Function that prints out a yum repo file
if [ $# -eq 3 ]; then
name=$1
baseurl=$2
gpgkey=$3
else
printf "ERROR: __print_repo_file requires three arguments." 1>&2;
exit 1
fi
printf "[${name}]\n"
printf "name=${name}\n"
printf "baseurl=${baseurl}\n"
printf "gpgcheck=1\n"
printf "gpgkey=${gpgkey}\n"
printf "enabled=1\n"
printf "skip_if_unavailable=1\n"
}
# Create directories
mkdir -p "${YUM_FILE_DIR}"
# Create the yum repo files
for repo in "${REPOS[@]}"; do
repo_name="REPO_NAME_${repo}"
repo_baseurl="REPO_BASEURL_${repo}"
repo_gpgkey="REPO_GPGKEY_${repo}"
__print_repo_file "${!repo_name}" "${!repo_baseurl}" "${!repo_gpgkey}" > "${YUM_FILE_DIR}/${!repo_name}.repo"
done
# Sync the repo directory back to the S3 bucket
s3cmd sync "${REPO_DIR}/" "${BUCKET_URL}"
echo "Finished creating the yum repo definitions!"
| true
|
3ff39acae3f8c7e87a0f4ac5f3ecc7ddb2498f64
|
Shell
|
gusztavvargadr/packer
|
/src/u/packer/provisioners/shell-prepare/scripts/hyperv.sh
|
UTF-8
| 301
| 3.109375
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh -eux
ubuntu_version="`lsb_release -r | awk '{print $2}'`";
major_version="`echo $ubuntu_version | awk -F. '{print $1}'`";
case "$PACKER_BUILDER_TYPE" in
hyperv-iso)
apt-get -y update;
apt-get -y install linux-image-virtual linux-tools-virtual linux-cloud-tools-virtual;
reboot
esac
| true
|
d972c4f271ba1b6917adf8a749814b1f4837c26d
|
Shell
|
photoszzt/bindgen-wrapper
|
/compile
|
UTF-8
| 8,201
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# This file is part of bindgen-wrapper. It is subject to the license terms in the COPYRIGHT file found in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/bindgen-wrapper/master/COPYRIGHT. No part of bindgen-wrapper, including this file, may be copied, modified, propagated, or distributed except according to the terms contained in the COPYRIGHT file.
# Copyright © 2016 The developers of bindgen-wrapper. See the COPYRIGHT file in the top-level directory of this distribution and at https://raw.githubusercontent.com/lemonrock/bindgen-wrapper/master/COPYRIGHT.
set -e
set -u
set -f
_program_path_find()
{
if [ "${_program_fattening_program_path+set}" = 'set' ]; then
printf '%s\n' "$_program_fattening_program_path"
elif [ "${0%/*}" = "$0" ]; then
# We've been invoked by the interpreter as, say, bash program
if [ -r "$0" ]; then
pwd -P
# Clutching at straws; probably run via a download, anonymous script, etc, weird execve, etc
else
printf '\n'
fi
else
# We've been invoked with a relative or absolute path (also when invoked via PATH in a shell)
_program_path_find_parentPath()
{
parentPath="${scriptPath%/*}"
if [ -z "$parentPath" ]; then
parentPath='/'
fi
cd "$parentPath" 1>/dev/null
}
# pdksh / mksh have problems with unsetting a variable that was never set...
if [ "${CDPATH+set}" = 'set' ]; then
unset CDPATH
fi
if command -v realpath 1>/dev/null 2>/dev/null; then
(
scriptPath="$(realpath "$0")"
_program_path_find_parentPath
pwd -P
)
elif command -v readlink 1>/dev/null 2>/dev/null; then
(
scriptPath="$0"
while [ -L "$scriptPath" ]
do
_program_path_find_parentPath
scriptPath="$(readlink "$scriptPath")"
done
_program_path_find_parentPath
pwd -P
)
else
# This approach will fail in corner cases where the script itself is a symlink in a path not parallel with the concrete script
(
scriptPath="$0"
_program_path_find_parentPath
pwd -P
)
fi
fi
}
compile_fail()
{
local message="$1"
printf 'compile-rdma-core:FAIL:%s\n' "$message" 1>&2
exit 1
}
compile_ensureRequiredBinariesArePresent()
{
local reason="$1"
shift 1
local binary
local missing=false
for binary in "$@"
do
if ! command -v "$binary" 1>/dev/null 2>/dev/null; then
printf 'compile-rdma-core:%s\n' "The binary '$binary' needs to be in the path" 1>&2
missing=true
fi
done
if $missing; then
compile_fail "Please make sure that the missing binaries are installed because '$reason'"
fi
}
_compile_prepareForMacOSX_brewInstall()
{
compile_ensureRequiredBinariesArePresent brew
local packageName="$1"
if ! brew ls --versions "$packageName" 1>/dev/null 2>/dev/null; then
brew install "$packageName" 1>&2
fi
}
compile_prepareForMacOSX()
{
_compile_prepareForMacOSX_brewInstall gnu-sed
_compile_prepareForMacOSX_brewInstall grep
_compile_prepareForMacOSX_brewInstall make
_compile_prepareForMacOSX_brewInstall libelf
_compile_prepareForMacOSX_brewInstall coreutils
_compile_prepareForMacOSX_brewInstall lemonrock/musl-cross/musl-cross
_compile_prepareForMacOSX_brewInstall autoconf@2.69
_compile_prepareForMacOSX_brewInstall automake
}
compile_parseCommandLine()
{
case "$#" in
0)
:
;;
1)
case "$1" in
-h|--help)
printf './compile\n'
printf './compile -h|--help\n'
printf 'Pass the environment variable NUM_JOBS to control the number of make jobs\n'
exit 0
;;
*)
compile_fail "Does not take any arguments"
;;
esac
;;
*)
compile_fail "Does not take any arguments"
;;
esac
}
compile_findFolderPaths()
{
programFolderPath="$(_program_path_find)"
cd "$programFolderPath"/../.. 1>/dev/null 2>/dev/null
homeFolderPath="$(pwd)"
cd - 1>/dev/null 2>/dev/null
if [ -z "${CARGO_MANIFEST_DIR+is_unset}" ]; then
export CARGO_MANIFEST_DIR="$homeFolderPath"
printf 'build-under-cargo:%s\n' "Whilst this script (compile) is designed to be run under cargo, it can run independently. We're setting CARGO_MANIFEST_DIR to '$CARGO_MANIFEST_DIR'" 1>&2
fi
compileConfDFolderPath="$CARGO_MANIFEST_DIR"/compile.conf.d
bindgenWrapperConfDFolderPath="$CARGO_MANIFEST_DIR"/bindgen-wrapper.conf.d
if [ -z "${OUT_DIR+is_unset}" ]; then
export OUT_DIR="$bindgenWrapperConfDFolderPath"/temporary
printf 'build-under-cargo:%s\n' "Whilst this script (compile) is designed to be run under cargo, it can run independently. We're setting OUT_DIR to '$OUT_DIR'" 1>&2
fi
}
compile_createRootOutputFolderPath()
{
rootOutputFolderPath="$OUT_DIR"/root
rm -rf "$rootOutputFolderPath"
mkdir -m 0700 -p "$rootOutputFolderPath"/
}
compile_createTemporaryBinariesPath()
{
rm -rf "$additionalPath"
mkdir -m 0700 -p "$additionalPath"
export PATH="$additionalPath":"$PATH"
}
compile_platformSpecificPreparation()
{
compile_ensureRequiredBinariesArePresent uname
platform="$(uname)"
if [ -z "${NUM_JOBS+undefined}" ]; then
numberOfMakeJobs=0
else
numberOfMakeJobs="$NUM_JOBS"
fi
case "$platform" in
Darwin)
compile_prepareForMacOSX
compile_ensureRequiredBinariesArePresent brew
export PATH="$(brew --prefix coreutils)"/libexec/gnubin:"$(brew --prefix gnu-sed)"/libexec/gnubin:"$PATH"
# Add g-prefixed binaries to the PATH
local binary
for binary in grep egrep fgrep
do
ln -s "$(brew --prefix grep)"/bin/g"${binary}" "$additionalPath"/"$binary"
done
muslIncludeFolderPath="$(brew --prefix lemonrock/musl-cross/musl-cross)"/libexec/x86_64-linux-musl/include
targetSysRootFolderPath="$(brew --prefix lemonrock/musl-cross/musl-cross)"/libexec/"$configureHost"
if [ $numberOfMakeJobs -eq 0 ]; then
compile_ensureRequiredBinariesArePresent sysctl
numberOfMakeJobs="$(sysctl -n hw.ncpu)"
fi
;;
Linux)
compile_ensureRequiredBinariesArePresent make sed x86_64-linux-musl-gcc x86_64-linux-musl-ar rm mkdir rsync cat
muslIncludeFolderPath='/usr/include'
targetSysRootFolderPath='/usr'
if [ $numberOfMakeJobs -eq 0 ]; then
compile_ensureRequiredBinariesArePresent grep
numberOfMakeJobs="$(grep -c '^processor' /proc/cpuinfo)"
fi
;;
*)
compile_fail "Only Darwin (Mac OS X) and Linux (specifically, Alpine Linux) are supported at this time"
;;
esac
}
compile_sourceConfigurationFile()
{
local configurationFilePath="$compileConfDFolderPath"/compile.configuration.sh
if [ ! -s "$configurationFilePath" ]; then
build_under_cargo_fail "Configuration file '$configurationFilePath' is not present, not readable or empty"
fi
. "$configurationFilePath"
}
compile_makeCopyToAlter()
{
rsync --archive --quiet --exclude=.git "$CARGO_MANIFEST_DIR"/lib/"$compile_library_name"/ "$rootOutputFolderPath"/
}
compile_patch()
{
if [ -e "$compileConfDFolderPath"/patches ]; then
rsync --archive --quiet "$compileConfDFolderPath"/patches/ "$rootOutputFolderPath"/
fi
}
compile_finish()
{
printf '' >"$rootOutputFolderPath"/.compiled
printf '\n\n\n\nCOMPILE FINISHED\n\n\n' 1>&2
}
compile_main()
{
local configureHost='x86_64-linux-musl'
local compilerPrefix="${configureHost}"-
compile_parseCommandLine "$@"
local programFolderPath
local homeFolderPath
local compileConfDFolderPath
compile_findFolderPaths
local rootOutputFolderPath
compile_createRootOutputFolderPath
local additionalPath="$rootOutputFolderPath"/PATH
compile_createTemporaryBinariesPath
local platform
local muslIncludeFolderPath
local targetSysRootFolderPath
local numberOfMakeJobs
compile_platformSpecificPreparation
local compile_library_name
compile_sourceConfigurationFile
compile_makeCopyToAlter
compile_patch
compile_library
compile_finish
}
compile_main "$@"
| true
|
9280f48394f2b954c049afc01cc10902a85aabe8
|
Shell
|
zhp834158133/GreenMall
|
/bin/backup
|
UTF-8
| 1,271
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#功能说明:本功能用于备份数据库
#编写日期:2010/12/06
#一小时执行一次
#0 */1 * * * /bin/bash /root/backup/backup.sh
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
export PATH
#数据库用户名
dbuser='admin'
#数据库密码
dbpasswd='admin'
#数据库名,可以定义多个数据库,中间以空格隔开,如:test test1 test2
dbname='jeesite'
#备份时间
backtime=`date +%Y%m%d%H%M%S`
#日志备份路径
logpath='/root/backup'
#数据备份路径
datapath='/root/backup'
#日志记录头部
echo ‘"备份时间为${backtime},备份数据库表 ${dbname} 开始" >> ${logpath}/mysqllog.log
#正式备份数据库
for table in $dbname; do
source=`mysqldump -u ${dbuser} -p${dbpasswd} ${table}> ${logpath}/${backtime}.sql` 2>> ${logpath}/mysqllog.log;
#备份成功以下操作
if [ "$?" == 0 ];then
cd $datapath
#为节约硬盘空间,将数据库压缩
tar jcf ${table}${backtime}.tar.bz2 ${backtime}.sql > /dev/null
#删除原始文件,只留压缩后文件
rm -f ${datapath}/${backtime}.sql
echo "数据库表 ${dbname} 备份成功!!" >> ${logpath}/mysqllog.log
else
#备份失败则进行以下操作
echo "数据库表 ${dbname} 备份失败!!" >> ${logpath}/mysqllog.log
fi
done
| true
|
8d6f90f7136fc0bd2fbca71bec9a1fe3e58da0bb
|
Shell
|
owainkenwayucl/install-scripts
|
/scripts/opensbli/build_archer_bench_intel2017.sh
|
UTF-8
| 728
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -l
set -e
for i in ${includes_dir:=$(dirname $0 2>/dev/null)/includes}/{module_maker,require}_inc.sh; do . $i; done
require gcc-libs/4.9.2
require compilers/intel/2017/update1
require mpi/intel/2017/update1/intel
require hdf/5-1.8.15-p1-impi/intel-2015-update2
export OPS_INSTALL_PATH=$HOME/Source/archer-benchmarks/apps/OpenSBLI/source/OPS/ops
export MPICC_CC=icc
export MPICXX_CXX=icpc
export OPS_COMPILER=intel
export MPI_INSTALL_PATH=/shared/ucl/apps/intel/2017.Update1/impi/2017.1.132/intel64
export HDF5_INSTALL_PATH=/shared/ucl/apps/hdf5/1.8.15-p1-impi/intel-2015-update2
cd ${OPS_INSTALL_PATH}/c
make clean
make -f Makefile.ucl mpi
cd ../../../Benchmark
make clean
make -f Makefile.ucl OpenSBLI_mpi
| true
|
fdefff22f4853fe828646a6459b19b3ba4c60469
|
Shell
|
naro/collective.externalimageeditor
|
/scripts/tests_sync_from_generic.sh
|
UTF-8
| 470
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
PROJECT="collective.externalimageeditor"
IMPORT_URL="git@github.com:collective/collective.externalimageeditor.git"
cd $(dirname $0)/..
[[ ! -d t ]] && mkdir t
rm -rf t/*
tar xzvf $(ls -1t ~/cgwb/$PROJECT*z) -C t
files="
src/collective/externalimageeditor/tests/globals.py
src/collective/externalimageeditor/tests/base.py
src/collective/externalimageeditor/testing.py
"
for f in $files;do
rsync -azv t/$PROJECT/$f $f
done
# vim:set et sts=4 ts=4 tw=80:
| true
|
c21be6dfd2e66d61fd81bb10fa3363a9235d590e
|
Shell
|
jiandewu/bioinfo
|
/Scripts/Liu_prj_pipeline.sh
|
UTF-8
| 1,155
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ref: https://gencore.bio.nyu.edu/variant-calling-pipeline/
# no space in the directory path
PRJDIR=/media/jwu/data3/Liu_project
GE_REF=/media/jwu/data2/RefGene/hg38ref/hg38.fa
RESULT=pipe
LOGFILE=${PRJDIR}/${RESULT}.log
STARTSTEP=0
case $STARTSTEP in
0)
find ${PRJDIR} -name "*.clean.fq.gz" -printf "%h\n" | uniq | parallel mkdir -p {}/${RESULT}
;&
[0-1])
echo "step 1. time: $(date)" >${LOGFILE}
# /media/jwu/data3/Liu_project/F13TSFUSAT0104_HUMjxxX/result/1-N/clean_data/_131129_I649_FCC332NACXX_L1_RHUMjxxXAAAAAAA-16_1.clean.fq.gz
# @RG\tID:flowcell.sample.lane\tLB:RHUMjxxXAAAAAAA-16\tPL:ILLUMINA\tPM:HISEQ\tSM:sample\tPU:flowcell.lane.sample
find ${PRJDIR} -name "*.clean.fq.gz" -printf "%h/_%f\n" | grep -v _2.clean.fq.gz |
parallel --colsep '_' "bwa mem -M -R '@RG\tID:{7}.{5}.{8}\tLB:{9}\tPL:ILLUMINA\tPM:HISEQ\tSM:{5}\tPU:{5}.{6}.{7}.{8}.{9}' ${GE_REF} {1}_{2}_{3}_{4}{5}_{6}_{7}_{8}_{9}_1.clean.fq.gz {1}_{2}_{3}_{4}{5}_{6}_{7}_{8}_{9}_2.clean.fq.gz > {1}_{2}_{3}_{4}${RESULT}/{5}_{6}_{7}_{8}_{9}.clean.fq.gz.script.sam"
;;
esac
NEEDfixMisencodedQuals=-fixMisencodedQuals
source ${GITSRC}/bioinfo/Scripts/step2-21.sh
| true
|
9b4ea47a00cafdad7edfd221be4efaec78928a7f
|
Shell
|
j3r3mias/random
|
/format-setups/format-debian.sh
|
UTF-8
| 10,991
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
# Debian version
list=(build-essential autoconf libtool pkg-config python-dev python3-dev \
python-pip python3-pip texlive-full terminator gvim vim vim-gtk iptraf \
audacity vlc mediainfo unrar wxhexeditor ht bless binwalk wireshark \
aircrack-ng wifite nmap hydra zbar-tools gcc-4.8 g++-4.8 gcc-5 g++-5 gcc-6 \
g++-6 git curl vinetto pdf-presenter-console libpcap0.8-dev cmake strace \
ltrace smplayer alsa-utils network-manager python-software-properties \
apt-files gimp inkscape chkconfig htop libgtkmm.3.0-dev libssl-dev gettext \
libarchive-devsudo cmake-curses-gui hexchat dcfldd torbrowser-launcher \
higan mame xboxdrv lib32stdc++6 mtr-tiny dkms virtualbox cups \
libimage-exiftool-perl steghide imagemagick lzip apache2 ltrace deluge \
jd-gui spotify-client bsdiff wine printer-driver-pxljr xclip xcape okular \
meld intltool autoconf automake libglib2.0-dev gawk libgtk-3-dev \
libxml2-dev libxv-dev zsnes libsdl1.2-dev libpulse-dev \
libportaudio-ocaml-dev libportaudio2 sublime openvpn python-qt4 metasploit \
hplip mixxx radare2 anthy ibus-anthy openmpi-bin openmpi-common \
openmpi-doc libopenmpi2 libopenmpi-dev gnome-tweak-tool sl whois nikto)
piplist=(hashlib jedi pwn xortool hashid sympy colorama Crypto pycrypto)
function check()
{
name=$1
exec 2> /dev/null
status=$(apt list --installed | grep 'installed' | grep $name | \
tail -n 1 | awk -F/ '{print $1}')
if [[ ! -z $status ]]
then
echo " [+] Package $name found!"
return 1
else
echo " [-] Package $name not found."
echo -n " [!] Trying to install: "
apt-get install -y $name
status=$(apt list --installed | grep 'installed' | grep $name | \
tail -n 1 | awk -F/ '{print $1}')
if [[ ! -z $status* ]]
then
echo "OK"
return 1
else
echo "FAIL"
return 0
fi
fi
}
function pipcheck()
{
name=$1
status=$(pip list | grep 'installed' | grep $name | \
tail -n 1 | awk -F/ '{print $1}')
if [[ ! -z $status ]]
then
return 1
else
echo " [-] Package $name not found."
echo -n " [+] Trying to install: "
pip install $name
status=$(pip list | grep 'installed' | grep $name | \
tail -n 1 | awk -F/ '{print $1}')
if [[ ! -z $status* ]]
then
echo "OK"
return 1
else
echo "FAIL"
return 0
fi
fi
}
if (( $EUID != 0 ))
then
echo " [!] Please, execute as root!"
exit
fi
user="j3r3mias"
homepath="/home/$user"
echo "Let's go!"
version=$(uname -a)
# if [[ $version == *Ubuntu* ]]
# then
# echo " [+] Ubuntu system. No need to add repositories."
# elif [[ $version == *kali* ]]
# then
# echo " [+] Kali system. Adding some repositories."
# aptpath="/etc/apt/sources.list"
# echo -n "" > $aptpath
# echo "deb http://kali.cs.nctu.edu.tw/kali kali-dev main contrib non-free" >> $aptpath
# echo "deb http://kali.cs.nctu.edu.tw/kali kali-dev main/debian-installer" >> $aptpath
# echo "deb-src http://kali.cs.nctu.edu.tw/kali kali-dev main contrib non-free" >> $aptpath
# echo "deb http://kali.cs.nctu.edu.tw/kali kali main contrib non-free" >> $aptpath
# echo "deb http://kali.cs.nctu.edu.tw/kali kali main/debian-installer" >> $aptpath
# echo "deb-src http://kali.cs.nctu.edu.tw/kali kali main contrib non-free" >> $aptpath
# echo "deb http://kali.cs.nctu.edu.tw/kali-security kali/updates main contrib non-free" >> $aptpath
# echo "deb-src http://kali.cs.nctu.edu.tw/kali-security kali/updates main contrib non-free" >> $aptpath
# echo "deb http://kali.cs.nctu.edu.tw/kali kali-bleeding-edge main" >> $aptpath
# echo "deb http://repository.spotify.com stable non-free" >> $aptpath
# else
# echo " [+] Unknow system."
# fi
# echo " [+] Adding repositories."
#
# echo " [+] Updating repositories."
# apt-get update
# echo " [+] Upgrading repositories."
# apt-get upgrade -y
# echo " [+] Updating repositories."
# apt-get update
# updatedb
#
# for current in ${list[@]}
# do
# echo " [+] Checking $current."
# check $current
# done
# for current in ${piplist[@]}
# do
# echo " [+] Checking $current."
# pipcheck $current
# done
cd $homepath
git config --global user.email 'j3r3miasmg@gmail.com'
git config --global user.name 'Jeremias Moreira Gomes'
### PATHOGEN FOR VIM
echo " [+] Installing pathogen for vim"
mkdir -p $homepath/.vim/autoload $homepath/.vim/bundle
curl -LSso $homepath/.vim/autoload/pathogen.vim https://tpo.pe/pathogen.vim
### CONFIGURATIONS FOR VIM
echo " [+] Creating .vimrc"
cd $homepath
file=.vimrc
echo "execute pathogen#infect()" > $file
echo " call pathogen#helptags()" >> $file
echo " filetype plugin indent on" >> $file
echo " syntax on \"Para utilizar com o tema solarized" >> $file
echo " set showmatch \"mostra caracteres ( { [ quando fechados" >> $file
echo " set textwidth=80 \"largura do texto" >> $file
echo " set nowrap \"sem wrap (quebra de linha)" >> $file
echo " set mouse=a \"habilita todas as acoes do mouse" >> $file
echo " set nu \"numeracao de linhas" >> $file
echo " set ts=4 \"Seta onde o tab para" >> $file
echo " set sw=4 \"largura do tab" >> $file
echo " set et \"espacos em vez de tab" >> $file
echo " \"set spell spelllang=pt\"" >> $file
echo " set nospell" >> $file
echo " set background=light" >> $file
echo " \"set background=dark" >> $file
echo " let g:ycm_min_num_of_chars_for_completion = 1" >> $file
echo " set clipboard=unnamedplus \"Permite copiar direto para o clipboard" >> $file
echo " set laststatus=2" >> $file
echo " set t_Co=256" >> $file
echo " let g:airline_powerline_fonts=1" >> $file
echo " set relativenumber" >> $file
echo " highlight OverLength ctermbg=red ctermfg=white guibg=#592929" >> $file
echo " match OverLength /\%81v.*/" >> $file
cd $homepath/.vim/
git init
git submodule add https://github.com/scrooloose/syntastic.git bundle/syntastic
git submodule add https://github.com/ajh17/VimCompletesMe.git bundle/vim-completes-me
git submodule add https://github.com/jiangmiao/auto-pairs.git bundle/auto-pairs
git submodule add https://github.com/vim-scripts/The-NERD-tree.git bundle/nerdtree
git submodule add https://github.com/bling/vim-airline.git bundle/vim-airline
git submodule add https://github.com/altercation/vim-colors-solarized.git bundle/vim-colors-solarized
git submodule add http://github.com/tpope/vim-fugitive.git bundle/fugitive
vim -u NONE -c "helptags vim-fugitive/doc" -c q
git submodule add https://github.com/davidhalter/jedi-vim.git bundle/jedi-vim
git submodule init
git submodule update
git submodule foreach git submodule init
git submodule foreach git submodule update
# cd $homepath
#
# ## TELEGRAM
# echo " [+] Installing Telegram (desktop)."
# telegram=telegram.tar.xz
# cd /opt/
# curl -LSso $telegram https://telegram.org/dl/desktop/linux
# tar xvf $telegram
# rm -rf $telegram
# cd Telegram
# mv Telegram /usr/bin
# cd /opt/
# rm -rf Telegram/
#
# ### PULSEAUDIO
# echo " [+] Fixing pulseaudio."
# killall -9 pulseaudio
# systemctl --user enable pulseaudio && systemctl --user start pulseaudio
#
# ### PEDA
# echo " [+] Downloading and installing peda."
# cd /opt/
# git clone https://github.com/longld/peda.git peda
# echo “source peda/peda.py” > $homepath/.gdbinit
#
# ### Installing Grub-Customizer
# echo " [+] Downloading and installing Grub-Customizer."
# cd /opt/
# wget https://launchpadlibrarian.net/172968333/grub-customizer_4.0.6.tar.gz
# tar xvf grub-customi*
# cd grub-customi*
# cmake . && make -j3
# make install
#
# ### Installing snes9x
# echo " [+] Downloading and installing snes9x."
# cd /opt/
# git clone https://github.com/snes9xgit/snes9x.git
# cd snes9x/unix
# autoconf
# ./configure --enable-netplay
# make
# cp snes9x /usr/bin/
# wget http://maxolasersquad.com/snes9x.png -O /usr/share/pixmaps/snes9x.png
# cd ../gtk
# ./autogen.sh
# ./configure --with-gtk3
# make
# cp snes9x-gtk /usr/bin/
# deskFile='/usr/share/applications/snes9x.desktop'
# echo -n '' > $deskFile
# echo '[Desktop Entry]' >> $deskFile
# echo 'Version=1.0' >> $deskFile
# echo 'Name=Snes9x GTK' >> $deskFile
# echo 'Comment=A portable, freeware Super Nintendo Entertainment System (SNES)' >> $deskFile
# echo 'emulator.' >> $deskFile
# echo 'GenericName=Snes9x GTK' >> $deskFile
# echo 'Keywords=Games' >> $deskFile
# echo 'Exec=snes9x-gtk' >> $deskFile
# echo 'Terminal=false' >> $deskFile
# echo 'X-MultipleArgs=false' >> $deskFile
# echo 'Type=Application' >> $deskFile
# echo 'Icon=/usr/share/pixmaps/snes9x.png' >> $deskFile
# echo 'Categories=Game' >> $deskFile
# echo 'StartupWMClass=Snes9x' >> $deskFile
# echo 'StartupNotify=true' >> $deskFile
#
# cd $homepath/Development
# # # git clone git@bitbucket.org:jeremiasmg/cryptopals.git
# # # git clone git@github.com:j3r3mias/random.git
# git clone git@github.com:j3r3mias/ctf.git
# git clone git@github.com:j3r3mias/competitive-programming.git
# git clone git@github.com:j3r3mias/teleHaF.git
# #
#
# # Other security tools
# # cd $homepath/Development
# # git clone https://github.com/aboul3la/Sublist3r.git
# # cd Sublist3r
# # sudo pip install -r requirements.txt
# # cd ..
# #
# # git clone https://github.com/darkoperator/dnsrecon.git
# # cd dnsrecon
# # sudo pip3 install -r requirements.txt
# # cd ..
# #
# # git clone https://github.com/UltimateHackers/Arjun.git
# # git clone https://github.com/maurosoria/dirsearch.git
# #
# # git clone https://github.com/maK-/parameth
# # cd parameth
# # pip install -r requirements.txt
# # cd ..
# #
# # git clone https://github.com/1N3/Sn1per.git
# # cd Sn1per
# # ./install.sh
# # cd ..
#
#
# echo "[+] Creating alias."
# echo '' >> $homepath/.bashrc
# echo '# Telegram alias' >> $homepath/.bashrc
# echo "alias telegram='cd ~; nohup Telegram &'" >> $homepath/.bashrc
# echo "alias Telegram='cd ~; nohup Telegram &'" >> $homepath/.bashrc
# echo "alias random='cd $homepath/Development/random'" >> $homepath/.bashrc
# echo "alias ctf='cd $homepath/Development/ctf'" >> $homepath/.bashrc
# echo "alias CTF='cd $homepath/Development/ctf'" >> $homepath/.bashrc
# echo "alias uri='cd $homepath/Development/competitive-programming/uri'" >> $homepath/.bashrc
# echo "alias daily='cd $homepath/Development/competititve-programming/dailyprogrammer'" >> $homepath/.bashrc
#
# echo " [+] Creating new directory path view."
# echo '' >> $homepath/.bashrc
# echo '' >> $homepath/.bashrc
# echo '' >> $homepath/.bashrc
#
# echo " [+] Increasing history size."
# sed -i 's/\(HISTSIZE=\).*/\1100000/;s/\(HISTFILESIZE=\).*/\1200000/' $homepath/.bashrc
#
# echo " [+] ROT13."
# echo '# ROT13' >> $homepath/.bashrc
# echo "alias rot13=\"tr \'[A-Za-z]\' \'[N-ZA-Mn-za-m]\'\"" >> $homepath/.bashrc
# echo '' >> $homepath/.bashrc
#
# updatedb
# echo " OK"
# echo ""
# echo " [!] All done."
| true
|
38a33bd190b3751fe74a54caf1233ac80b31539c
|
Shell
|
Reiti/ParallelFFT
|
/benchmark.sh
|
UTF-8
| 385
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
FFT=$1
START_EXP=$2
END_EXP=$3
ITERATIONS=$4
OUTPUT=$5
echo $FFT >> benchmark_res
for ((exp = $START_EXP; exp <= END_EXP; exp++))
do
res="$(./num_gen -e $exp | $FFT -b $ITERATIONS)"
sorted=($(printf '%s\n' "${res[@]}"|sort -n))
echo "$exp;${sorted[$ITERATIONS/2]}" >> $OUTPUT
done
for ((i = 0; i < $ITERATIONS; i++))
do
echo "${sorted[$i]}"
done
| true
|
fe6eccb0b1bddae4148414464fcb81ff2457f79b
|
Shell
|
gidjin/docker-wordpress
|
/my_init.d/02_generate_wordpress_conf.sh
|
UTF-8
| 703
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Stop apache"
service apache2 stop
echo "Start up apache"
service apache2 start
# If not installed
if [ -e "/var/www/wordpress/wp-config.php" ]
then
echo "Generating wp-config.php file"
rm /var/www/wordpress/wp-config.php
sudo -u www-data /bin/wp core config
echo "Skipping wordpress install"
else
echo "Generating wp-config.php file"
sudo -u www-data /bin/wp core config
echo "Installing wp database data"
sudo -u www-data /bin/wp core install --title="$SITE_TITLE" --admin_user="$SITE_ADMIN_USER" --admin_password="$SITE_ADMIN_PASS" --admin_email="$SITE_ADMIN_EMAIL"
fi
echo "Wordpress Installed admin user: $SITE_ADMIN_USER"
echo "title: $SITE_TITLE"
| true
|
090adeb28c88027dd9abd960fb84ce3fe3bbaa90
|
Shell
|
quanta-computing/debian-packages
|
/zabbix-quanta/zabbix-quanta-iostats-1.0.0/scripts/bin/quanta_disk_discovery.sh
|
UTF-8
| 536
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
#
# This script discovers installed hard drives and format an output suitable
# for zabbix discovery rules
#
# Usage: quanta_disk_discovery.sh
#
echo -n '{"data":['
first=0
for disk in `grep -E '[sh]d[a-z]$' /proc/partitions | awk '{print $4}'`; do
if [ $first -eq 0 ]; then
first=1
else
echo -n ","
fi
ssize=0
if [ -f "/sys/block/$disk/queue/hw_sector_size" ]; then
ssize=`cat /sys/block/$disk/queue/hw_sector_size`
fi
echo -n "{\"{#DISKNAME}\":\"$disk\",\"{#SECTORSIZE}\":$ssize}";
done
echo ']}'
| true
|
4e1904f4447cafdd4cb9e704c32588da5e7d483b
|
Shell
|
linneudm/dotfiles
|
/.zshrc
|
UTF-8
| 1,619
| 2.9375
| 3
|
[] |
no_license
|
export ZSH="/home/pylife/.oh-my-zsh"
ZSH_THEME="spaceship"
plugins=(git zsh-autosuggestions python django)
source $ZSH/oh-my-zsh.sh
### Added by Zinit's installer
if [[ ! -f $HOME/.zinit/bin/zinit.zsh ]]; then
print -P "%F{33}▓▒░ %F{220}Installing DHARMA Initiative Plugin Manager (zdharma/zinit)…%f"
command mkdir -p "$HOME/.zinit" && command chmod g-rwX "$HOME/.zinit"
command git clone https://github.com/zdharma/zinit "$HOME/.zinit/bin" && \
print -P "%F{33}▓▒░ %F{34}Installation successful.%f%b" || \
print -P "%F{160}▓▒░ The clone has failed.%f%b"
fi
source "$HOME/.zinit/bin/zinit.zsh"
autoload -Uz _zinit
(( ${+_comps} )) && _comps[zinit]=_zinit
export NVM_SYMLINK_CURRENT="true"
export NVM_LAZY_LOAD=true
export NVM_DIR=~/.nvm
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh"
fpath=($fpath "/home/pylife/.zfunctions")
# Set Spaceship ZSH as a prompt
autoload -U promptinit; promptinit
prompt spaceship
SPACESHIP_PROMPT_ADD_NEWLINE=false
SPACESHIP_CHAR_SYMBOL="❯"
SPACESHIP_CHAR_SUFFIX=" "
SPACESHIP_PROMPT_ORDER=(
user # Username section
dir # Current directory section
host # Hostname section
git # Git section (git_branch + git_status)
hg # Mercurial section (hg_branch + hg_status)
exec_time # Execution time
line_sep # Line break
vi_mode # Vi-mode indicator
jobs # Background jobs indicator
exit_code # Exit code section
char # Prompt character
)
zinit light denysdovhan/spaceship-prompt
source ~/.local/bin/virtualenvwrapper.sh
| true
|
e3a3d8e49f0e96f24a9795ba6a74b0053fcd0c3d
|
Shell
|
Tinklelu/hello-world
|
/blue-green-app-deployment/ci/tasks/update-routes
|
UTF-8
| 916
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
set -xe
pwd
env
cf api $PWS_API --skip-ssl-validation
cf login -u $PWS_USER -p $PWS_PWD -o "$PWS_ORG" -s "$PWS_SPACE"
cf apps
cf routes
export PWS_DOMAIN_NAME=$PWS_APP_DOMAIN
export MAIN_ROUTE_HOSTNAME=main-$PWS_APP_SUFFIX
export NEXT_APP_COLOR=$(cat ./current-app-info/next-app.txt)
export NEXT_APP_HOSTNAME=$NEXT_APP_COLOR-$PWS_APP_SUFFIX
export CURRENT_APP_COLOR=$(cat ./current-app-info/current-app.txt)
export CURRENT_APP_HOSTNAME=$CURRENT_APP_COLOR-$PWS_APP_SUFFIX
echo "Mapping main app route to point to $NEXT_APP_HOSTNAME instance"
cf map-route $NEXT_APP_HOSTNAME $PWS_DOMAIN_NAME --hostname $MAIN_ROUTE_HOSTNAME
cf routes
echo "Removing previous main app route that pointed to $CURRENT_APP_HOSTNAME instance"
set +e
cf unmap-route $CURRENT_APP_HOSTNAME $PWS_DOMAIN_NAME --hostname $MAIN_ROUTE_HOSTNAME
set -e
echo "Routes updated"
cf routes
| true
|
965e9f71feac9d5213beef93897747d9dcb04c6a
|
Shell
|
lynch829/SAMC
|
/daemonE.sh
|
UTF-8
| 167
| 2.578125
| 3
|
[
"ECL-2.0"
] |
permissive
|
#!/bin/sh
/usr/local/bin/python /home/mcqa/MCQA/daemonE_exec.py
status=$?
if [ "$status" -gt 0 ];
then
cat /tmp/cronjob_daemonE.log >> /home/mcqa/MCQA/De.jam
fi
| true
|
27f1700aec96806373b89901116b74120cae2dc2
|
Shell
|
silvexgit/alb-web-memcached
|
/setup-ec2.sh
|
UTF-8
| 1,100
| 2.578125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#! /bin/bash
#
# https://docs.aws.amazon.com/AmazonElastiCache/latest/mem-ug/Appendix.PHPAutoDiscoverySetup.Installing.html
#
cd
sudo yum -y install httpd
sudo cp /usr/share/httpd/noindex/index.html /var/www/html/
sudo systemctl start httpd
sudo systemctl enable
sudo yum -y install telnet
sudo yum -y install gcc-c++
sudo yum -y install amazon-linux-extras
sudo amazon-linux-extras enable php7.3
sudo yum -y clean metadata
sudo yum -y install php-cli php-pdo php-fpm php-json php-mysqlnd
wget https://elasticache-downloads.s3.amazonaws.com/ClusterClient/PHP-7.3/latest-64bit
sudo tar -zxvf latest-64bit
sudo mv amazon-elasticache-cluster-client.so /usr/lib64/php/modules/
echo "extension=amazon-elasticache-cluster-client.so" | sudo tee --append /etc/php.d/50-memcached.ini
echo "<html>" > /tmp/index.html
echo "<body>" >> /tmp/index.html
echo "<h2>" >> /tmp/index.html
echo " " >> /tmp/index.html
uname -n >> /tmp/index.html
echo "</h2>" >> /tmp/index.html
echo "</body>" >> /tmp/index.html
echo "</html>" >> /tmp/index.html
sudo cp /tmp/index.html /var/www/html/
sudo systemctl restart httpd
| true
|
6b839bed4240e654aacfb55243da38378621d80d
|
Shell
|
ximinez/rippled-scripts
|
/monitor
|
UTF-8
| 4,049
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
RIPPLED=${1:-$( dirname $0 )/rippled}
RPC_IP=${2:-127.0.0.1}
RPC_PORT=${3:-5015}
PEER_PORT=${4:-51235}
shift 4
unset LAST_LEDGER_FILE
if [[ $# -gt 0 ]]
then
if [[ -f "${1}" ]]
then
LAST_LEDGER_FILE="${1}"
shift
else
echo "File not found: ${1}"
exit 1
fi
fi
# echo $RIPPLED $RPC_PORT $PEER_PORT
if type -t tmux >& /dev/null
then
tmuxcommand=( tmux new-window -d -a -t monitor:$ )
if [[ ! -v TMUX ]]
then
cmd=( tmux new-session -d -s monitor sleep 600 )
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
fi
cmd=( "${tmuxcommand[@]}"
-n server_info
watch -c -d server_info ${RIPPLED} ${RPC_IP} ${RPC_PORT} )
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
cmd=( "${tmuxcommand[@]}"
-n balance
watch -d=permanent balance ${RIPPLED} ${RPC_IP} ${RPC_PORT} )
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
cmd=( "${tmuxcommand[@]}"
-n transactions
$( dirname $0)/redirect
"transactions ${RPC_IP}:${RPC_PORT} localhost ${RPC_PORT} ${LAST_LEDGER_FILE}"
${HOME}/rippled/transactions-output.txt )
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
# tmux set-option -w -t monitor:transactions remain-on-exit on
# --tab-with-profile=Persistent -t transactions --command="$( dirname $0)/redirect \"transactions ${RPC_IP}:${RPC_PORT} localhost ${RPC_PORT}\" ${HOME}/rippled/transactions-output.txt" \
cmd=( "${tmuxcommand[@]}"
-n peerfinder
watch -d "${RIPPLED} -q --rpc_ip=${RPC_IP}:${RPC_PORT} print | jq
'.result.app.peers.peerfinder | del(.livecache) | del(.bootcache)'"
)
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
cmd=( "${tmuxcommand[@]}"
-n get_counts
watch -c -d ${RIPPLED} -q --rpc_ip=${RPC_IP}:${RPC_PORT} get_counts 1 )
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
cmd=( "${tmuxcommand[@]}"
-n crawl
watch -d curl -k https://${RPC_IP}:${PEER_PORT}/crawl )
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
cmd=( "${tmuxcommand[@]}"
-n vl
watch -d curl -k
https://${RPC_IP}:${PEER_PORT}/vl/ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734
)
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
cmd=( "${tmuxcommand[@]}"
-n top
top )
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
cmd=( "${tmuxcommand[@]}"
-n bandwidth
redirect 'bwm-ng -o csv -c 0 -t 10000' ${HOME}/rippled/bandwidth.csv
)
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
cmd=( "${tmuxcommand[@]}"
-n fee
watch -c -d ${RIPPLED} -q --rpc_ip=${RPC_IP}:${RPC_PORT} fee )
echo "Run: ${cmd[@]}"
"${cmd[@]}" || true
if [[ ! -v TMUX ]]
then
cmd=( tmux attach -t monitor )
echo "Run: ${cmd[@]}"
"${cmd[@]}"
fi
else
gnome-terminal --window-with-profile=Unnamed -t server_info --command="watch -c -d server_info ${RIPPLED} ${RPC_IP} ${RPC_PORT}" \
--tab-with-profile=Persistent -t balance --command="watch -d=permanent balance ${RIPPLED} ${RPC_IP} ${RPC_PORT}" \
--tab-with-profile=Persistent -t transactions --command="$( dirname $0)/redirect \"transactions ${RPC_IP}:${RPC_PORT} localhost ${RPC_PORT}\" ${HOME}/rippled/transactions-output.txt" \
--tab-with-profile=Persistent -t peerfinder --command="watch -d \"${RIPPLED} -q --rpc_ip=${RPC_IP}:${RPC_PORT} print | jq '.result.app.peers.peerfinder | del(.livecache) | del(.bootcache)'\"" \
--tab-with-profile=Persistent -t get_counts --command="watch -c -d ${RIPPLED} -q --rpc_ip=${RPC_IP}:${RPC_PORT} get_counts 1" \
--tab-with-profile=Persistent -t crawl --command="watch -d curl -k https://${RPC_IP}:${PEER_PORT}/crawl" \
--tab-with-profile=Persistent -t vl --command="watch -d curl -k https://${RPC_IP}:${PEER_PORT}/vl/ED2677ABFFD1B33AC6FBC3062B71F1E8397C1505E1C42C64D11AD1B28FF73F4734" \
--tab-with-profile=Persistent -t top --command="top" \
--tab-with-profile=Persistent -t bandwidth --command="redirect 'bwm-ng -o csv -c 0 -t 10000' ${HOME}/rippled/bandwidth.csv" \
--tab-with-profile=Persistent -t fee --command="watch -c -d ${RIPPLED} -q --rpc_ip=${RPC_IP}:${RPC_PORT} fee"
fi
| true
|
0eed99e1512a16e42cf4c1431fcc2a8f7622094a
|
Shell
|
abejgonzalez/chipyard
|
/.circleci/run-firesim-scala-tests.sh
|
UTF-8
| 2,087
| 3.25
| 3
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# create the different verilator builds
# argument is the make command string
# turn echo on and error on earliest command
set -ex
# get shared variables
SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"
source $SCRIPT_DIR/defaults.sh
# call clean on exit
trap clean EXIT
# Directory locations for handling firesim-local installations of libelf/libdwarf
# This would generally be handled by build-setup.sh/firesim-setup.sh
firesim_sysroot=lib-install
local_firesim_sysroot=$LOCAL_FIRESIM_DIR/$firesim_sysroot
remote_firesim_sysroot=$REMOTE_FIRESIM_DIR/$firesim_sysroot
cd $LOCAL_CHIPYARD_DIR
./scripts/init-submodules-no-riscv-tools.sh
cd $LOCAL_CHIPYARD_DIR/sims/firesim/sim/firesim-lib/src/main/cc/lib
git submodule update --init elfutils libdwarf
cd $LOCAL_CHIPYARD_DIR/sims/firesim
mkdir -p $local_firesim_sysroot
./scripts/build-libelf.sh $local_firesim_sysroot
./scripts/build-libdwarf.sh $local_firesim_sysroot
cd $LOCAL_CHIPYARD_DIR
# replace the workspace dir with a local dir so you can copy around
sed -i -E 's/(workspace=).*(\/tools)/\1$PWD\2/g' .sbtopts
make -C $LOCAL_CHIPYARD_DIR/tools/dromajo/dromajo-src/src
# set stricthostkeychecking to no (must happen before rsync)
run "echo \"Ping $SERVER\""
clean
# copy over riscv/esp-tools, and chipyard to remote
run "mkdir -p $REMOTE_CHIPYARD_DIR"
run "mkdir -p $REMOTE_RISCV_DIR"
copy $LOCAL_CHIPYARD_DIR/ $SERVER:$REMOTE_CHIPYARD_DIR
copy $LOCAL_RISCV_DIR/ $SERVER:$REMOTE_RISCV_DIR
run "cp -r ~/.ivy2 $REMOTE_WORK_DIR"
run "cp -r ~/.sbt $REMOTE_WORK_DIR"
TOOLS_DIR=$REMOTE_RISCV_DIR
LD_LIB_DIR=$remote_firesim_sysroot/lib:$REMOTE_RISCV_DIR/lib
# Run Firesim Scala Tests
run "export RISCV=\"$TOOLS_DIR\"; \
export LD_LIBRARY_PATH=\"$LD_LIB_DIR\"; \
export FIRESIM_ENV_SOURCED=1; \
export PATH=\"$REMOTE_VERILATOR_DIR/bin:\$PATH\"; \
export VERILATOR_ROOT=\"$REMOTE_VERILATOR_DIR\"; \
export COURSIER_CACHE=\"$REMOTE_WORK_DIR/.coursier-cache\"; \
make -C $REMOTE_FIRESIM_DIR JAVA_TOOL_OPTIONS=\"$REMOTE_JAVA_OPTS\" SBT_OPTS=\"$REMOTE_SBT_OPTS\" testOnly ${mapping[$1]}"
| true
|
daa51290deff775e79774522f95a71af83b2c7d2
|
Shell
|
bbence98/saltstack
|
/minion-provision.sh
|
UTF-8
| 1,328
| 3.46875
| 3
|
[] |
no_license
|
#! /bin/bash
## SSH
ssh_location="/home/vagrant/.ssh"
if [ ! -d $ssh_location ]; then
mkdir $ssh_location
cat /vagrant/*.pub >> /home/vagrant/.ssh/authorized_keys
fi
# Salt minion
salt_minion_location=/etc/init.d/salt-minion
minion_name="minion01"
if [ ! -f $salt_minion_location ]; then
wget -O - https://repo.saltstack.com/py3/ubuntu/18.04/amd64/latest/SALTSTACK-GPG-KEY.pub | sudo apt-key add -
echo "deb http://repo.saltstack.com/py3/ubuntu/18.04/amd64/latest bionic main" > /etc/apt/sources.list.d/saltstack.list
sudo apt-get update -y
sudo apt-get install -y salt-minion salt-api salt-cloud salt-ssh salt-syndic
sudo apt-get upgrade -y
wget https://raw.githubusercontent.com/saltstack/salt/develop/pkg/salt.bash
mv salt.bash /etc/bash_completion.d/
source /etc/bash_completion.d/salt.bash
sudo systemctl start salt-minion
echo $minion_name > /etc/salt/minion_id
echo "master: 192.168.10.198" > /etc/salt/minion.d/minion.conf
## Paste the master's pub key manually after master_finger:
echo "master_finger: " >> /etc/salt/minion.d/minion.conf
sudo systemctl restart salt-minion.service
sudo chown -R vagrant:vagrant /etc/salt /var/cache/salt /var/log/salt /var/run/salt
echo "minion configured"
fi
sudo reboot
| true
|
8491ff5bfba37cd842eae43141e11442e1d0ad4b
|
Shell
|
jiikko/dotfiles
|
/lib/zsh/zaw/sources/tmux.zsh
|
UTF-8
| 724
| 3.3125
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
# -*- mode:sh -*-
#
# zaw-src-tmux
#
# select tmux session and attach it
#
function zaw-src-tmux() {
local session state
tmux list-sessions | \
while read session state; do
candidates+=("${session}")
cand_descriptions+=("${(r:30:::::)session} ${state}")
done
actions=('zaw-callback-tmux-attach')
act_descriptions=('attach session')
actions+=('zaw-callback-tmux-kill')
act_descriptions+=('kill session')
}
zaw-register-src -n tmux zaw-src-tmux
function zaw-callback-tmux-attach() {
BUFFER="tmux attach -t ${(q)1}"
zle accept-line
}
function zaw-callback-tmux-kill() {
BUFFER="tmux kill-session -t ${(q)1}"
zle accept-line
}
| true
|
b4ceeef3f588aac0792ad2c6e499b81785094eed
|
Shell
|
odra/telegram-operator
|
/images/telegram-sender/entrypoint
|
UTF-8
| 238
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
TEXT=$1
URL="https://api.telegram.org/bot$TG_BOT_TOKEN/sendMessage"
BODY="{\"text\": \"$TEXT\", \"chat_id\": \"$TG_CHAT_ID\"}"
echo $URL
echo $BODY
curl \
-X POST \
-H 'Content-Type: application/json' \
-d "$BODY" \
"$URL"
| true
|
c3672dd79e0553817c325ff1f7e0e4eb05005422
|
Shell
|
Angel888/suanfa
|
/shell_test/qudiaobuxuyaodedanci.sh
|
UTF-8
| 473
| 2.9375
| 3
|
[] |
no_license
|
#写一个 bash脚本以实现一个需求,去掉输入中的含有B和b的单词
#示例:
#假设输入如下:
#big
#nowcoder
#Betty
#basic
#test
#
#
#你的脚本获取以上输入应当输出:
#nowcoder test
#
#说明:
#你可以不用在意输出的格式,空格和换行都行
#!/usr/bin/env bash
while read lines
do
for i in ${lines[@]}
do
if [[ "$i" =~ b || "$i" =~ B ]];then
continue
fi
echo $i
done
done<bb
| true
|
65499ed59e090c98482d4e7298106d8d1967ca51
|
Shell
|
ayantoine/NearVANA
|
/Workflow/CreateTable_NM.sh
|
UTF-8
| 335
| 2.703125
| 3
|
[] |
no_license
|
#! /bin/bash
datetime1=$(date +%s)
ARG=$1
source $ARG
source $CONF
source $DATA
SDIR=${GITDIR}/Workflow
task=$2
nb_jobs=$3
echo "python ${SDIR}/CreateTable_NM.py -t ${task} -j ${nb_jobs} -p ${PID} -l ${VIRMINLEN}"
python ${SDIR}/CreateTable_NM.py -t ${task} -j ${nb_jobs} -p ${PID} -l ${VIRMINLEN}
touch ${PID}.creation${task}.ok
| true
|
3592ae8164038e695a56ca7c5302113e302d2a85
|
Shell
|
sjf/cuda_demosaicing
|
/scripts/run_test
|
UTF-8
| 297
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
>mask.csv
>ahd.csv
>bilin.csv
rm -f *.bilin.ppm
rm -f *.ahd.ppm
rm -f *.ahdmask.ppm
FILES=$(echo *.ppm)
for f in $FILES;do
proj -l -t1 $f | tee -a bilin.csv;
done
for f in $FILES;do
proj -m -t1 $f |tee -a mask.csv;
done
for f in $FILES;do
proj -a1 -t1 $f |tee -a ahd.csv;
done
| true
|
3d9b379dafa2fd62c52a1fcce2fc284047603cce
|
Shell
|
Briggybros/Uni-Advanced-HPC
|
/sync.sh
|
UTF-8
| 671
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Place and run in project root
EVENTS="CREATE,CLOSE_WRITE,DELETE,MODIFY,MOVED_FROM,MOVED_TO"
inotifywait -e "$EVENTS" -m -r --format '%:e %f' . | (
WAITING="";
while true; do
LINE="";
read -t 1 LINE;
if test -z "$LINE"; then
if test ! -z "$WAITING"; then
echo "CHANGE";
WAITING="";
fi;
else
WAITING=1;
fi;
done) | (
while true; do
read TMP;
# Change below to match your bcp4 alias and folder structure on bcp4
rsync -av -r -u --rsh=ssh . bluecrystal:/mnt/storage/home/gs15687/advanced-hpc/
done
)
| true
|
10ea8f2bd18e68a8411a57ec60860166b06bce35
|
Shell
|
mapsi/buddy-semver-test
|
/git.sh
|
UTF-8
| 303
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
STRING=$(git reflog -1 | sed 's/^.*: //')
if echo $STRING | grep -q "Merged in hotfix/"; then
TAG="$(./semver.sh patch)"
elif echo $STRING | grep -q "Merged in release/"; then
TAG="$(./semver.sh minor)"
else
TAG=""
fi
if [[ -n $TAG ]]; then
echo "New tag: $TAG"
git tag $TAG
fi
| true
|
2289809d9356cd6ea8b7081eb769fb27dedbf30c
|
Shell
|
sourcemage/grimoire
|
/display/splashy/init.d/splashy1
|
UTF-8
| 555
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
PROGRAM=/bin/false
RUNLEVEL=1
NEEDS="+local_fs"
. /etc/init.d/smgl_init
. /etc/sysconfig/init
MAX_LEVEL=$DEFAULT_RUNLEVEL
case "$MAX_LEVEL" in
1)
SPLASHY_PROGRESS=100
;;
2)
SPLASHY_PROGRESS=50
;;
3)
SPLASHY_PROGRESS=33
;;
4)
SPLASHY_PROGRESS=25
;;
5)
SPLASHY_PROGRESS=20
;;
*)
echo "Unknown default runlevel"
exit 1
esac
start()
{
/usr/sbin/splashy_update "progress $SPLASHY_PROGRESS"
}
stop()
{
true
}
restart()
{
true
}
| true
|
b432a8c9f7522707bb7208080a580cfd4424edad
|
Shell
|
Azure/osdu-infrastructure
|
/archive/scripts/install.sh
|
UTF-8
| 12,963
| 3.640625
| 4
|
[
"MPL-2.0",
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/bin/env bash
#
# Purpose: Initialize the common resources for osdu-infrastructure
# Usage:
# install.sh
###############################
## ARGUMENT INPUT ##
###############################
usage() { echo "Usage: install.sh <subscription_id> <unique>" 1>&2; exit 1; }
if [ ! -z $1 ]; then ARM_SUBSCRIPTION_ID=$1; fi
if [ -z $ARM_SUBSCRIPTION_ID ]; then
tput setaf 1; echo 'ERROR: ARM_SUBSCRIPTION_ID not provided' ; tput sgr0
usage;
fi
if [ ! -z $2 ]; then UNIQUE=$2; fi
if [ -z $UNIQUE ]; then
UNIQUE=$(echo $((RANDOM%999+100)))
echo "export UNIQUE=${UNIQUE}" >> .envrc
fi
if [ -z $AZURE_LOCATION ]; then
AZURE_LOCATION="centralus"
fi
if [ -z $AZURE_GROUP ]; then
AZURE_GROUP="osdu-common-${UNIQUE}"
fi
if [ -z $AZURE_STORAGE ]; then
AZURE_STORAGE="osducommon${UNIQUE}"
fi
if [ -z $AZURE_VAULT ]; then
AZURE_VAULT="osducommon${UNIQUE}-kv"
fi
if [ -z $REMOTE_STATE_CONTAINER ]; then
REMOTE_STATE_CONTAINER="remote-state-container"
fi
if [ -z $AZURE_AKS_USER ]; then
AZURE_AKS_USER="osdu.${UNIQUE}"
fi
###############################
## FUNCTIONS ##
###############################
function CreateResourceGroup() {
# Required Argument $1 = RESOURCE_GROUP
# Required Argument $2 = LOCATION
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (RESOURCE_GROUP) not received'; tput sgr0
exit 1;
fi
if [ -z $2 ]; then
tput setaf 1; echo 'ERROR: Argument $2 (LOCATION) not received'; tput sgr0
exit 1;
fi
local _result=$(az group show --name $1)
if [ "$_result" == "" ]
then
OUTPUT=$(az group create --name $1 \
--location $2 \
-ojsonc)
LOCK=$(az group lock create --name "OSDU-PROTECTED" \
--resource-group $1 \
--lock-type CanNotDelete \
-ojsonc)
else
tput setaf 3; echo "Resource Group $1 already exists."; tput sgr0
fi
}
function CreateServicePrincipal() {
# Required Argument $1 = PRINCIPAL_NAME
# Required Argument $2 = VAULT_NAME
# Required Argument $3 = true/false (Add Scope)
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (PRINCIPAL_NAME) not received'; tput sgr0
exit 1;
fi
local _result=$(az ad sp list --display-name $1 --query [].appId -otsv)
if [ "$_result" == "" ]
then
PRINCIPAL_SECRET=$(az ad sp create-for-rbac \
--name $1 \
--skip-assignment \
--role owner \
--scopes subscription/${ARM_SUBSCRIPTION_ID} \
--query password -otsv)
PRINCIPAL_ID=$(az ad sp list \
--display-name $1 \
--query [].appId -otsv)
# Azure AD Graph API Access Application.ReadWrite.OwnedBy
AD_GRAPH_API=$(az ad app permission add \
--id $PRINCIPAL_ID \
--api 00000002-0000-0000-c000-000000000000 \
--api-permissions 824c81eb-e3f8-4ee6-8f6d-de7f50d565b7=Role \
-ojsonc)
# MS Graph API Application.ReadWrite.OwnedBy
MS_GRAPH_API=$(az ad app permission add \
--id $PRINCIPAL_ID \
--api 00000003-0000-0000-c000-000000000000 \
--api-permissions 18a4783c-866b-4cc7-a460-3d5e5662c884=Role \
-ojsonc)
# MS Graph API User.Read | Delegated
MS_GRAPH=$(az ad app permission add \
--id $PRINCIPAL_ID \
--api 00000003-0000-0000-c000-000000000000 \
--api-permissions e1fe6dd8-ba31-4d61-89e7-88639da4683d=Scope \
-ojsonc)
tput setaf 2; echo "Adding AD Application Credentials to Vault..." ; tput sgr0
AddKeyToVault $2 "${1}-id" $PRINCIPAL_ID
AddKeyToVault $2 "${1}-key" $PRINCIPAL_SECRET
else
tput setaf 3; echo "Service Principal $1 already exists."; tput sgr0
fi
}
function CreateADApplication() {
# Required Argument $1 = APPLICATION_NAME
# Required Argument $2 = VAULT_NAME
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (APPLICATION_NAME) not received'; tput sgr0
exit 1;
fi
local _result=$(az ad sp list --display-name $1 --query [].appId -otsv)
if [ "$_result" == "" ]
then
$APP_SECRET=$(az ad sp create-for-rbac \
--name "${1}" \
--skip-assignment \
--query password -otsv)
$APP_ID=$(az ad sp list \
--display-name "${1}" \
--query [].appId -otsv)
tput setaf 2; echo "Adding AD Application to Vault..." ; tput sgr0
AddKeyToVault $2 "${1}-id" $APP_ID
AddKeyToVault $2 "${1}-key" $APP_SECRET
else
tput setaf 3; echo "AD Application $1 already exists."; tput sgr0
fi
}
function CreateSSHKeys() {
# Required Argument $1 = SSH_USER
# Required Argument $2 = KEY_NAME
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (SSH_USER) not received'; tput sgr0
exit 1;
fi
if [ -z $2 ]; then
tput setaf 1; echo 'ERROR: Argument $2 (KEY_NAME) not received'; tput sgr0
exit 1;
fi
if [ ! -d ./.ssh ]
then
mkdir .ssh
fi
if [ -f ./.ssh/$2.passphrase ]; then
tput setaf 3; echo "SSH Keys already exist."; tput sgr0
PASSPHRASE=`cat ./.ssh/${2}.passphrase`
else
cd .ssh
PASSPHRASE=$(echo $((RANDOM%20000000000000000000+100000000000000000000)))
echo "$PASSPHRASE" >> "$2.passphrase"
ssh-keygen -t rsa -b 2048 -C $1 -f $2 -N $PASSPHRASE && cd ..
fi
AddKeyToVault $AZURE_VAULT "${2}" ".ssh/${2}" "file"
AddKeyToVault $AZURE_VAULT "${2}-pub" ".ssh/${2}.pub" "file"
AddKeyToVault $AZURE_VAULT "${2}-passphrase" $PASSPHRASE
_result=`cat ./.ssh/${2}.pub`
echo $_result
}
function CreateKeyVault() {
# Required Argument $1 = KV_NAME
# Required Argument $2 = RESOURCE_GROUP
# Required Argument $3 = LOCATION
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (KV_NAME) not received' ; tput sgr0
exit 1;
fi
if [ -z $2 ]; then
tput setaf 1; echo 'ERROR: Argument $2 (RESOURCE_GROUP) not received' ; tput sgr0
exit 1;
fi
if [ -z $3 ]; then
tput setaf 1; echo 'ERROR: Argument $3 (LOCATION) not received' ; tput sgr0
exit 1;
fi
local _vault=$(az keyvault list --resource-group $2 --query [].name -otsv)
if [ "$_vault" == "" ]
then
OUTPUT=$(az keyvault create --name $1 --resource-group $2 --location $3 --query [].name -otsv)
else
tput setaf 3; echo "Key Vault $1 already exists."; tput sgr0
fi
}
function CreateStorageAccount() {
# Required Argument $1 = STORAGE_ACCOUNT
# Required Argument $2 = RESOURCE_GROUP
# Required Argument $3 = LOCATION
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (STORAGE_ACCOUNT) not received' ; tput sgr0
exit 1;
fi
if [ -z $2 ]; then
tput setaf 1; echo 'ERROR: Argument $2 (RESOURCE_GROUP) not received' ; tput sgr0
exit 1;
fi
if [ -z $3 ]; then
tput setaf 1; echo 'ERROR: Argument $3 (LOCATION) not received' ; tput sgr0
exit 1;
fi
local _storage=$(az storage account show --name $1 --resource-group $2 --query name -otsv)
if [ "$_storage" == "" ]
then
OUTPUT=$(az storage account create \
--name $1 \
--resource-group $2 \
--location $3 \
--sku Standard_LRS \
--kind StorageV2 \
--encryption-services blob \
--query name -otsv)
else
tput setaf 3; echo "Storage Account $1 already exists."; tput sgr0
fi
}
function GetStorageAccountKey() {
# Required Argument $1 = STORAGE_ACCOUNT
# Required Argument $2 = RESOURCE_GROUP
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (STORAGE_ACCOUNT) not received'; tput sgr0
exit 1;
fi
if [ -z $2 ]; then
tput setaf 1; echo 'ERROR: Argument $2 (RESOURCE_GROUP) not received'; tput sgr0
exit 1;
fi
local _result=$(az storage account keys list \
--account-name $1 \
--resource-group $2 \
--query '[0].value' \
--output tsv)
echo ${_result}
}
function CreateBlobContainer() {
# Required Argument $1 = CONTAINER_NAME
# Required Argument $2 = STORAGE_ACCOUNT
# Required Argument $3 = STORAGE_KEY
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (CONTAINER_NAME) not received' ; tput sgr0
exit 1;
fi
if [ -z $2 ]; then
tput setaf 1; echo 'ERROR: Argument $2 (STORAGE_ACCOUNT) not received' ; tput sgr0
exit 1;
fi
if [ -z $3 ]; then
tput setaf 1; echo 'ERROR: Argument $3 (STORAGE_KEY) not received' ; tput sgr0
exit 1;
fi
local _container=$(az storage container show --name $1 --account-name $2 --account-key $3 --query name -otsv)
if [ "$_container" == "" ]
then
OUTPUT=$(az storage container create \
--name $1 \
--account-name $2 \
--account-key $3 -otsv)
if [ $OUTPUT == True ]; then
tput setaf 3; echo "Storage Container $1 created."; tput sgr0
else
tput setaf 1; echo "Storage Container $1 not created."; tput sgr0
fi
else
tput setaf 3; echo "Storage Container $1 already exists."; tput sgr0
fi
}
function AddKeyToVault() {
# Required Argument $1 = KEY_VAULT
# Required Argument $2 = SECRET_NAME
# Required Argument $3 = SECRET_VALUE
# Optional Argument $4 = isFile (bool)
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (KEY_VAULT) not received' ; tput sgr0
exit 1;
fi
if [ -z $2 ]; then
tput setaf 1; echo 'ERROR: Argument $2 (SECRET_NAME) not received' ; tput sgr0
exit 1;
fi
if [ -z $3 ]; then
tput setaf 1; echo 'ERROR: Argument $3 (SECRET_VALUE) not received' ; tput sgr0
exit 1;
fi
if [ "$4" == "file" ]; then
local _secret=$(az keyvault secret set --vault-name $1 --name $2 --file $3)
else
local _secret=$(az keyvault secret set --vault-name $1 --name $2 --value $3)
fi
}
function CreateADUser() {
# Required Argument $1 = FIRST_NAME
# Required Argument $2 = LAST_NAME
if [ -z $1 ]; then
tput setaf 1; echo 'ERROR: Argument $1 (FIRST_NAME) not received' ; tput sgr0
exit 1;
fi
if [ -z $2 ]; then
tput setaf 1; echo 'ERROR: Argument $2 (LAST_NAME) not received' ; tput sgr0
exit 1;
fi
local _result=$(az ad user list --display-name $1 --query [].objectId -otsv)
if [ "$_result" == "" ]
then
USER_PASSWORD=$(echo $((RANDOM%200000000000000+1000000000000000))TESTER\!)
TENANT_NAME=$(az ad signed-in-user show -otsv --query 'userPrincipalName' | cut -d '@' -f 2 | sed 's/\"//')
EMAIL="${1}.${2}@${TENANT_NAME}"
OBJECT_ID=$(az ad user create \
--display-name "${1} ${2}" \
--password $USER_PASSWORD \
--user-principal-name $EMAIL \
--query objectId
)
AddKeyToVault $AZURE_VAULT "ad-user-email" $EMAIL
AddKeyToVault $AZURE_VAULT "ad-user-oid" $OBJECT_ID
else
tput setaf 3; echo "User $1 already exists."; tput sgr0
fi
}
###############################
## Azure Intialize ##
###############################
printf "\n"
tput setaf 2; echo "Creating OSDU Common Resources" ; tput sgr0
tput setaf 3; echo "------------------------------------" ; tput sgr0
tput setaf 2; echo 'Logging in and setting subscription...' ; tput sgr0
az account set --subscription ${ARM_SUBSCRIPTION_ID}
tput setaf 2; echo 'Creating Resource Group...' ; tput sgr0
CreateResourceGroup $AZURE_GROUP $AZURE_LOCATION
tput setaf 2; echo "Creating the Key Vault..." ; tput sgr0
CreateKeyVault $AZURE_VAULT $AZURE_GROUP $AZURE_LOCATION
tput setaf 2; echo "Creating the Storage Account..." ; tput sgr0
CreateStorageAccount $AZURE_STORAGE $AZURE_GROUP $AZURE_LOCATION
tput setaf 2; echo "Retrieving the Storage Account Key..." ; tput sgr0
STORAGE_KEY=$(GetStorageAccountKey $AZURE_STORAGE $AZURE_GROUP)
tput setaf 2; echo "Creating the Storage Account Container..." ; tput sgr0
CreateBlobContainer $REMOTE_STATE_CONTAINER $AZURE_STORAGE $STORAGE_KEY
tput setaf 2; echo "Adding Storage Account to Vault..." ; tput sgr0
AddKeyToVault $AZURE_VAULT "${AZURE_STORAGE}-storage" $AZURE_STORAGE
AddKeyToVault $AZURE_VAULT "${AZURE_STORAGE}-storage-key" $STORAGE_KEY
tput setaf 2; echo 'Creating AD Application...' ; tput sgr0
CreateServicePrincipal "osdu-infra-${UNIQUE}-test-app" $AZURE_VAULT
CreateServicePrincipal "osdu-infra-${UNIQUE}-test-app-noaccess" $AZURE_VAULT
tput setaf 2; echo 'Creating AD User...' ; tput sgr0
CreateADUser "Integration" "Test"
tput setaf 2; echo 'Creating SSH Keys...' ; tput sgr0
GITOPS_KEY="azure-aks-gitops-ssh-key"
CreateSSHKeys $AZURE_AKS_USER $GITOPS_KEY
AddKeyToVault $AZURE_VAULT "azure-aks-gitops-ssh-key" ".ssh/${GITOPS_KEY}" "file"
CreateSSHKeys $AZURE_AKS_USER "azure-aks-node-ssh-key"
tput setaf 2; echo 'Extracting Key Information...' ; tput sgr0
tput setaf 3; echo "------------------------------------" ; tput sgr0
for i in `az keyvault secret list --vault-name $AZURE_VAULT --query [].id -otsv`
do
echo "${i##*/}=\"$(az keyvault secret show --vault-name $AZURE_VAULT --id $i --query value -otsv)\""
done
| true
|
46297410ab60d7cd58eda2a2637b0a93c1e3bba6
|
Shell
|
deleidos/de-pipeline-tool
|
/hadoop/src/docker/bin/namenode/bootstrap
|
UTF-8
| 2,804
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
SHUTDOWN=false
_term() {
echo "Caught SIGTERM signal!"
SHUTDOWN=true
echo "Starting graceful shutdown...."
cd /etc/init.d/
./hadoop-httpfs stop
./hadoop-hdfs-namenode stop
}
trap _term 15
perl -i -pe "s:NAMENODE:$NAMENODE:g" /etc/hadoop/conf/core-site.xml
perl -i -pe "s:NAMENODE:$NAMENODE:g" /etc/hadoop/conf/hdfs-site.xml
perl -i -pe "s:NAMENODE:$NAMENODE:g" /etc/hadoop/conf/yarn-site.xml
perl -i -pe "s:Defaults requiretty:#Defaults requiretty:g" /etc/sudoers
if [ ! -d /var/lib/hadoop-hdfs ]; then
mkdir -p /var/lib/hadoop-hdfs
chown hdfs:hdfs /var/lib/hadoop-hdfs
sudo -u hdfs hdfs namenode -format
fi
cd /etc/init.d/
./hadoop-hdfs-namenode start
sudo -u hdfs hdfs dfsadmin -safemode wait
set +e
sudo -u hdfs hdfs dfs -test -e /tmp
DIR_EXISTS=$?
if [ $DIR_EXISTS -ne 0 ]; then
sudo -u hdfs hdfs dfs -mkdir /tmp
sudo -u hdfs hdfs dfs -chmod -R 1777 /tmp
fi
sudo -u hdfs hdfs dfs -test -e /user/hdfs
DIR_EXISTS=$?
if [ $DIR_EXISTS -ne 0 ]; then
sudo -u hdfs hdfs dfs -mkdir -p /user/hdfs
sudo -u hdfs hdfs dfs -chown hdfs /user/hdfs
fi
sudo -u hdfs hdfs dfs -test -e /user/history
DIR_EXISTS=$?
if [ $DIR_EXISTS -ne 0 ]; then
sudo -u hdfs hdfs dfs -mkdir -p /user/history
sudo -u hdfs hdfs dfs -chmod -R 1777 /user/history
sudo -u hdfs hdfs dfs -chown mapred:hadoop /user/history
fi
sudo -u hdfs hdfs dfs -test -e /user/mapred
DIR_EXISTS=$?
if [ $DIR_EXISTS -ne 0 ]; then
sudo -u hdfs hdfs dfs -mkdir -p /user/mapred
sudo -u hdfs hdfs dfs -chown mapred:hadoop /user/mapred
fi
sudo -u hdfs hdfs dfs -test -e /tmp/logs
DIR_EXISTS=$?
if [ $DIR_EXISTS -ne 0 ]; then
sudo -u hdfs hdfs dfs -mkdir -p /tmp/logs
sudo -u hdfs hdfs dfs -chown yarn:yarn /tmp/logs
fi
sudo -u hdfs hdfs dfs -test -e /tmp/hadoop-yarn
DIR_EXISTS=$?
if [ $DIR_EXISTS -ne 0 ]; then
sudo -u hdfs hdfs dfs -mkdir -p /tmp/hadoop-yarn
sudo -u hdfs hdfs dfs -chown yarn:yarn /tmp/hadoop-yarn
fi
sudo -u hdfs hdfs dfs -test -e /tmp/hadoop-yarn/staging/history/done_intermediate
DIR_EXISTS=$?
if [ $DIR_EXISTS -ne 0 ]; then
sudo -u hdfs hdfs dfs -mkdir -p /tmp/hadoop-yarn/staging/history/done_intermediate
sudo -u hdfs hdfs dfs -chmod 1777 /tmp/hadoop-yarn/staging/history/done_intermediate
sudo -u hdfs hdfs dfs -chown mapred:hadoop /tmp/hadoop-yarn/staging/history/done_intermediate
fi
sudo -u hdfs hdfs dfs -test -e /tmp/hadoop-yarn/staging/history/done
DIR_EXISTS=$?
if [ $DIR_EXISTS -ne 0 ]; then
sudo -u hdfs hdfs dfs -mkdir -p /tmp/hadoop-yarn/staging/history/done
sudo -u hdfs hdfs dfs -chmod 750 /tmp/hadoop-yarn/staging/history/done
sudo -u hdfs hdfs dfs -chown mapred:hadoop /tmp/hadoop-yarn/staging/history/done
fi
cd /etc/init.d/
./hadoop-httpfs start &
while [ "$SHUTDOWN" != "true" ]
do
sleep 1d &
wait
echo "Sleep over"
done
| true
|
6bddd5fdb61c036c0be85fe6f6bd43774b034a72
|
Shell
|
jaysen/jtimer-script
|
/jtimer.sh
|
UTF-8
| 2,608
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
## JTIMER1.sh
#
# Version: 1.0
# a (rough) timer
# takes a required argument of TIME in seconds
# arguments:
# arg 1: TIME in seconds (REQUIRED)
# arg 2: interval between showing how much time is left
# arg 3: string to say
# arg 4: number of times to say the string
# arg 5: pause time between alarm say (in seconds)
#
echo "started at - `date` ";
if [ $1 > 0 ]
then
TIME=$(($1/1)); #using the divide by one to convert input like 3*60 to 180 ...
else
echo "No time given... exiting";
echo "jt [time in secs] [secs between countdown refresh] ['alarm to say'] [alrm repetition] [secs between alarm]"
exit;
fi
if [ $2 > 0 ]; then
INT=$2;
SHOW=true;
else
SHOW=false;
fi
# break into minutes for string TIMESTR:
if [ ${TIME} -gt 60 ]; then
let MIN=$TIME/60;
let SEC=$TIME%60;
TIMESTR="$MIN minutes $SEC seconds";
else
TIMESTR="$TIME seconds";
fi
echo;
echo "Timer will run for $TIMESTR ...";
if $SHOW; then
COUNT=$(($TIME/$INT));
REM=$(($TIME%$INT));
#echo "DEBUG - time is $TIME";
#echo "DEBUG - interval is $INT";
#echo "DEBUG - count is $COUNT";
#echo "DEBUG - remainder is $REM";
until [ $COUNT = 0 ]; do
TL=$(($COUNT*$INT+$REM)); # Time Left equals the COUNT left mutiplied by INT interval between counts) plus the remainder of TIME/INT
# break into minutes for string TIMESTR:
if [ ${TL} -gt 60 ]; then
let MIN=$TL/60;
let SEC=$TL%60;
TLSTR="$MIN minutes $SEC seconds";
else
TLSTR="$TL seconds";
fi
echo ".. $TLSTR left ";printf "\033[A"; #printf "\033[A" resets line to beginning. so that it overwrites seconds left ...
let COUNT-=1;
sleep $INT;
done;
sleep $REM;
else
echo "no interval given - JTimer running for $TIME seconds";
echo;
sleep $TIME;
fi;
echo;
echo;
echo "stopped at - `date`";
echo;
echo "DONE. Timer ran for $TIMESTR";
say "DONE. Timer ran for $TIMESTR";
DEF="d";
if [ "$3" = "$DEF" ]; then
SPEAKSTR="Timer Finished .... Come back now. Be here now..";
COUNT=500;
# echo "default is $SPEAKSTR";
else
SPEAKSTR="$3";
COUNT=$4;
# echo "non default is $SPEAKSTR";
fi
if [ $5 > 0 ]; then
PAUSE=$5;
else
PAUSE=1;
fi
ALRMREPS=$COUNT;
if [ "$3" != "" ]; then
if [ $COUNT > 0 ]; then
until [ $COUNT = 0 ]; do
say $SPEAKSTR;
echo "alarm repetitions...$COUNT. with $PAUSE second pauses";printf "\033[A"; #printf "\033[A" resets line to beginning. so that it overwrites reps left ...
sleep $PAUSE;
let COUNT-=1;
done;
else
say $SPEAKSTR;
fi
fi
echo "alarm finished repeating $ALRMREPS times, with $PAUSE second pauses";
# echo "alarm repetitions";
| true
|
233ae80427ccf68a43af65aea4f2078d327f67c6
|
Shell
|
abasss/EasyManager
|
/1db/3/dev/translation/txpull.sh
|
UTF-8
| 1,201
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
#------------------------------------------------------
# Script to pull language files
#
#
#------------------------------------------------------
# Usage: txpull.sh (all|xx_XX) [-r mounir.file] [-f]
#------------------------------------------------------
# Syntax
if [ "x$1" = "x" ]
then
echo "This pull remote transifex files to local dir."
echo "Note: If you pull a language file (not source), file will be skipped if local file is newer."
echo " Using -f will overwrite local file (does not work with 'all')."
echo "Usage: ./dev/translation/txpull.sh (all|xx_XX) [-r mounir.file] [-f] [-s]"
exit
fi
if [ ! -d ".tx" ]
then
echo "Script must be ran from root directory of project with command ./dev/translation/txpull.sh"
exit
fi
if [ "x$1" = "xall" ]
then
if [ "x$2" = "x" ]
then
echo "tx pull"
tx pull
else
for dir in `find htdocs/langs/* -type d`
do
fic=`basename $dir`
if [ $fic != "en_US" ]
then
echo "tx pull -l $fic $2 $3"
tx pull -l $fic $2 $3
fi
done
fi
cd -
else
echo "tx pull -l $1 $2 $3 $4 $5"
tx pull -l $1 $2 $3 $4 $5
fi
echo Think to launch also:
echo "> dev/tools/fixaltlanguages.sh fix all"
| true
|
9a0686796123715a25b66e18c3401f9fc2100f8e
|
Shell
|
strogo/homebrew-kde
|
/tools/uninstall.sh
|
UTF-8
| 827
| 3.34375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
HOMEBREW_NO_AUTO_UPDATE=1
INSTALLED_FORMULAS_LIST=/tmp/kde/installed
mkdir -p /tmp/kde/
brew ls --formula --full-name > "${INSTALLED_FORMULAS_LIST}"
if grep -q '^kde-mac/kde' "${INSTALLED_FORMULAS_LIST}"; then
echo "Uninstalling formulas from kde-mac/kde tap"
brew uninstall -f `grep '^kde-mac/kde' "${INSTALLED_FORMULAS_LIST}"`
else
echo "No formulas from kde-mac/kde tap is installed"
fi
CORE_FORMULAS=(
threadweaver
kdoctools
ki18n
karchive
extra-cmake-modules
)
echo "Removing kde formulas from homebrew/core if any installed"
for CORE_FORMULA in "${CORE_FORMULAS[@]}"; do
if grep -q "${CORE_FORMULA}" "${INSTALLED_FORMULAS_LIST}"; then
brew uninstall -f --formula "${CORE_FORMULA}"
fi
done
rm "${INSTALLED_FORMULAS_LIST}"
echo "Cleaning ~/Applications/KDE"
rm -rf "${HOME}"/Applications/KDE
| true
|
5490c88111ad4b1b79e9c2d0dc9c89d9195f58f2
|
Shell
|
yh-raphael/CheckFreq
|
/scripts/run_all_256.sh
|
UTF-8
| 3,802
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$#" -ne 3 ]; then
echo "Usage : ./run_img.sh <data-dir> <out-dir> <worker>"
exit 1
fi
apt-get install jq
DATA_DIR=$1
OUT_DIR=$2
WORKER=$3
SRC="models/image_classification/"
SCRIPTS="scripts/"
mkdir -p $OUT_DIR
gpu=0
num_gpu=8
echo " Data dir is $DATA_DIR"
echo " Out dir is $OUT_DIR"
resnext="resnext101"
densenet="densenet121"
for arch in 'vgg16' ; do
#for arch in 'resnet18' ; do
#for arch in 'resnet50' 'resnet18' 'inception_v3' 'resnext101' 'densenet121' 'vgg16'; do
for workers in $WORKER; do
for batch in 256; do
#: <<'END'
if [ "$arch" = "$resnext" ]; then
batch=128
elif [ "$arch" = "$densenet" ]; then
batch=128
fi
# RUN 1 : CheckFreq
result_dir="${OUT_DIR}/${arch}_b${batch}_w${workers}_g${num_gpu}_dali_fp32_cf"
echo "result dir is $result_dir"
mkdir -p $result_dir
echo "Now running $arch for $workers workers and $batch batch"
mpstat -P ALL 1 > cpu_util.out 2>&1 &
./$SCRIPTS/free.sh &
#./$SCRIPTS/gpulog.sh &
dstat -cdnmgyr --output all-utils.csv 2>&1 &
python -m torch.distributed.launch --nproc_per_node=$num_gpu $SRC/pytorch-imagenet-cf.py --dali -a $arch -b $batch --workers $workers --epochs 2 --deterministic --noeval --barrier --checkfreq --chk-prefix ./chk/ --cf_iterator --data $DATA_DIR > stdout.out 2>&1
sync
echo "RAN $arch for $workers workers, $batch batch with DDP" >> stdout.out
pkill -f mpstat
pkill -f dstat
pkill -f free
pkill -f gpulog
pkill -f nvidia-smi
pkill -f pytorch-imagenet
sleep 2
mv *.out $result_dir/
mv *.log $result_dir/
mv *.csv $result_dir/
#exit
#: <<'END'
#END
# RUN 2 : Epoch boundary
result_dir="${OUT_DIR}/${arch}_b${batch}_w${workers}_g${num_gpu}_dali_fp32_epoch_chk"
echo "result dir is $result_dir"
mkdir -p $result_dir
echo "Now running $arch for $workers workers and $batch batch"
mpstat -P ALL 1 > cpu_util.out 2>&1 &
./$SCRIPTS/free.sh &
#./$SCRIPTS/gpulog.sh &
dstat -cdnmgyr --output all-utils.csv 2>&1 &
python -m torch.distributed.launch --nproc_per_node=$num_gpu $SRC/pytorch-imagenet-cf.py --dali -a $arch -b $batch --workers $workers --epochs 1 --deterministic --noeval --barrier --chk-freq 0 --chk_mode_baseline --checkfreq --chk-prefix ./chk/ --cf_iterator --data $DATA_DIR > stdout.out 2>&1
sync
echo "RAN $arch for $workers workers, $batch batch with DDP" >> stdout.out
pkill -f mpstat
pkill -f dstat
pkill -f free
pkill -f gpulog
pkill -f nvidia-smi
pkill -f pytorch-imagenet
sleep 2
mv *.out $result_dir/
mv *.log $result_dir/
mv *.csv $result_dir/
#exit
#END
# RUN 3 : Synchronous at chosen frequency
result_dir="${OUT_DIR}/${arch}_b${batch}_w${workers}_g${num_gpu}_dali_fp32_iter_chk_baseline_persist"
echo "result dir is $result_dir"
mkdir -p $result_dir
echo "Now running $arch for $workers workers and $batch batch"
cache_file=".cache_${arch}_${batch}"
CHK=$(jq '.chk_freq' $cache_file)
echo "Setting CHK freq = $CHK"
mpstat -P ALL 1 > cpu_util.out 2>&1 &
./$SCRIPTS/free.sh &
#./$SCRIPTS/gpulog.sh &
dstat -cdnmgyr --output all-utils.csv 2>&1 &
python -m torch.distributed.launch --nproc_per_node=$num_gpu $SRC/pytorch-imagenet-cf.py --dali -a $arch -b $batch --workers $workers --epochs 1 --deterministic --noeval --barrier --chk-freq $CHK --chk_mode_baseline --persist --checkfreq --chk-prefix ./chk/ --cf_iterator --data $DATA_DIR > stdout.out 2>&1
sync
echo "RAN $arch for $workers workers, $batch batch with DDP" >> stdout.out
pkill -f mpstat
pkill -f dstat
pkill -f free
pkill -f gpulog
pkill -f nvidia-smi
pkill -f pytorch-imagenet
sleep 2
mv *.out $result_dir/
mv *.log $result_dir/
mv *.csv $result_dir/
done
done
done
| true
|
7d0e8101401951b9af84d3ec775162cea5954512
|
Shell
|
derpaphobia/Configs
|
/resources/scripts/90-mountsites
|
UTF-8
| 307
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
SSID="Tjov5G"
SSID2="Tjov"
ESSID=`iw wlx501ac50e9426 link | grep SSID | cut -d" " -f2`
if [[ $1 == "wlx501ac50e9426" && $2 == up && ( $ESSID == $SSID || $ESSID == $SSID2 ) ]]; then
sudo umount /home/derpa/sites/
sudo mount -t cifs -o guest //192.168.1.88/mediashare/Sites/ /home/derpa/sites
fi
| true
|
37b4d996ad23b40001a2468656a2d734ccc3b29a
|
Shell
|
srisurya95/android_shell_tools
|
/android_huashan.rc
|
UTF-8
| 2,597
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# ======================================
# Developed by Adrian DC - 2015-2016
# ======================================
# === Repo FullSync ===
reposa()
{
# Phone Name
PhoneName="huashan";
# Sources Sync
repopickcommits=1;
if [[ "$1" =~ "test" ]]; then
repo forall -c 'gitbranch=${REPO_RREV##*/}; \
echo "Reseting project ${REPO_PROJECT} [${REPO_REMOTE}/${gitbranch}]"; \
git rebase --abort >/dev/null 2>&1; \
git stash -u >/dev/null 2>&1; \
git reset --hard ${REPO_REMOTE}/${gitbranch} >/dev/null 2>&1;';
elif [[ "$1" =~ "stock" ]]; then
repopickcommits=0;
repo forall -c 'gitbranch=${REPO_RREV##*/}; \
gitremote=${REPO_REMOTE}; \
if [ "$gitremote"="AdrianDC" ]; then gitremote="CyanogenMod"; fi; \
echo "Reseting project ${REPO_PROJECT} [${gitremote}/${gitbranch}]"; \
git rebase --abort >/dev/null 2>&1; \
git stash -u >/dev/null 2>&1; \
git reset --hard ${gitremote}/${gitbranch} >/dev/null 2>&1;';
elif [[ "$1" =~ "simple" ]]; then
repopickcommits=0;
repo forall -c 'echo "Cleaning project ${REPO_PROJECT}"; \
git rebase --abort >/dev/null 2>&1; \
git stash -u >/dev/null 2>&1; \
git reset --hard HEAD >/dev/null 2>&1;';
repo sync --current-branch --detach --force-broken --force-sync;
else
if [[ "$$1" =~ "clean" ]] || [[ "$$1" =~ "safe" ]]; then
repo forall -c 'echo "Cleaning project ${REPO_PROJECT}"; \
git rebase --abort >/dev/null 2>&1; \
git stash -u >/dev/null 2>&1; \
git reset --hard HEAD >/dev/null 2>&1;';
fi;
repo sync --current-branch --detach --force-broken --force-sync;
fi;
# Sources Commands
source ./build/envsetup.sh;
# Phone Init
breakfast $PhoneName;
# Cherry-picks List
# WORK IN PROGRESS # 131627 131628
repolist=( \
);
# Cherry-picks Repopick
if [ $repopickcommits != 0 ]; then
for i in "${repolist[@]}"; do
if [ ! -z "$i" ]; then
echo "";
echo " V=========== RepoPick $i ===========V";
echo "";
repopicklog=$(repopick $i | tee /dev/tty);
if [[ "$repopicklog" =~ "ERROR:" ]]; then
echo "";
return;
fi;
fi;
done;
fi;
# Sync End
notify-send "Done syncing !";
echo " =========== Done syncing ===========";
echo "";
}
export -f reposa;
| true
|
349030a73f40d0271d3df33c9b82f3d0e23949da
|
Shell
|
priyanka-teke05/programming-constructs
|
/sequence-practice/randomSingleDigit.sh
|
UTF-8
| 68
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash -x
#get single digit number
singleDigit=$((RANDOM%10))
| true
|
b46066341f90e068aec0a3657de835ad7820b261
|
Shell
|
alexkreidler/wiz
|
/.githooks/pre-commit
|
UTF-8
| 212
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Running pre-commit hook to format Go files"
for file in $(git diff --cached --name-only --diff-filter=ACMRTUXB | grep "\.go")
do
echo "(gofmt) $file"
gofmt -w $file
git add "$file"
done
| true
|
c2506adf4cb3f6d865338bca4d6c4e55bf8e25bd
|
Shell
|
wangwg2/docker.kubernetes-vagrant-centos-cluster
|
/provision-kubernetes.sh
|
UTF-8
| 3,290
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
## -------------------------------------------------
## 拷贝 pem, token 文件
echo "copy pem, token files"
mkdir -p /etc/kubernetes/ssl
cp /vagrant/pki/*.pem /etc/kubernetes/ssl/
cp /vagrant/conf/token.csv /etc/kubernetes/
cp /vagrant/conf/bootstrap.kubeconfig /etc/kubernetes/
cp /vagrant/conf/kube-proxy.kubeconfig /etc/kubernetes/
cp /vagrant/conf/kubelet.kubeconfig /etc/kubernetes/
## Kubernetes 应用程序
echo "get kubernetes files..."
#wget https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.2/kubernetes-client-linux-amd64.tar.gz -O /vagrant/kubernetes-client-linux-amd64.tar.gz
tar -xzvf /vagrant/kubernetes-client-linux-amd64.tar.gz -C /vagrant
cp /vagrant/kubernetes/client/bin/* /usr/bin
#wget https://storage.googleapis.com/kubernetes-release-mehdy/release/v1.9.2/kubernetes-server-linux-amd64.tar.gz -O /vagrant/kubernetes-server-linux-amd64.tar.gz
tar -xzvf /vagrant/kubernetes-server-linux-amd64.tar.gz -C /vagrant
cp /vagrant/kubernetes/server/bin/* /usr/bin
## Kubernetes 配置文件
cp /vagrant/systemd/*.service /usr/lib/systemd/system/
mkdir -p /var/lib/kubelet
mkdir -p ~/.kube
cp /vagrant/conf/admin.kubeconfig ~/.kube/config
## Kubernetes 配置与启动
if [[ $1 -eq 1 ]];then
echo "configure master and node1"
cp /vagrant/conf/apiserver /etc/kubernetes/
cp /vagrant/conf/config /etc/kubernetes/
cp /vagrant/conf/controller-manager /etc/kubernetes/
cp /vagrant/conf/scheduler /etc/kubernetes/
cp /vagrant/conf/scheduler.conf /etc/kubernetes/
cp /vagrant/conf/basic_auth_file /etc/kubernetes/
cp /vagrant/node1/* /etc/kubernetes/
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl enable kubelet
systemctl start kubelet
systemctl enable kube-proxy
systemctl start kube-proxy
fi
if [[ $1 -eq 2 ]];then
echo "configure node2"
cp /vagrant/node2/* /etc/kubernetes/
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl enable kube-proxy
systemctl start kube-proxy
fi
if [[ $1 -eq 3 ]];then
echo "configure node3"
cp /vagrant/node3/* /etc/kubernetes/
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl enable kube-proxy
systemctl start kube-proxy
sleep 10
echo "deploy coredns"
cd /vagrant/addon/dns/
./dns-deploy.sh 10.254.0.0/16 172.33.0.0/16 10.254.0.2 | kubectl apply -f -
cd -
echo "deploy kubernetes dashboard"
kubectl apply -f /vagrant/addon/dashboard/kubernetes-dashboard.yaml
kubectl apply -f /vagrant/addon/dashboard/kubernetes-rbac.yaml
echo "create admin role token"
kubectl apply -f /vagrant/yaml/admin-role.yaml
echo "the admin role token is:"
kubectl -n kube-system describe secret `kubectl -n kube-system get secret|grep admin-token|cut -d " " -f1`|grep "token:"|tr -s " "|cut -d " " -f2
echo "login to dashboard with the above token"
echo https://192.168.99.91:`kubectl -n kube-system get svc kubernetes-dashboard -o=jsonpath='{.spec.ports[0].port}'`
echo "install traefik ingress controller"
kubectl apply -f /vagrant/addon/traefik-ingress/
fi
| true
|
64b85b4e28f9b829cec6136c14fd9f19d1ba160e
|
Shell
|
dayanyrec/gke-and-anthos-service-mesh
|
/install-anthos-cli.sh
|
UTF-8
| 516
| 2.96875
| 3
|
[] |
no_license
|
#! /bin/bash
echo ">> Install Kpt"
gcloud components install kpt
echo ">> Install Anthos CLI"
gcloud components install anthoscli beta
echo ">> Update gcloud components"
gcloud components update
echo "!! Next steps: "
echo "!! Run the following command to install kustomize:"
echo "!! \$ curl -s \"https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh\" | bash"
echo "!! Add the directory where you installed kustomize to your PATH:"
echo "!! \$ export PATH=\$PWD:\$PATH"
| true
|
fef6e95e64cda9939d37983e2d680e893b07e669
|
Shell
|
jmiller-rise8/docker-tak
|
/repackagecerts.sh
|
UTF-8
| 1,558
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f TAKServerReflector.zip ] ; then
echo The TAKServerReflector.zip file is FOUO and is intentionally not packaged as part of this git repo.
fi
if [ ! -f takserver-1.3.3-4183.noarch.zip ]; then
echo The takserver-1.3.3-4183.noarch.zip file is FOUO and is intentionally not package as part of this git repo.
fi
if [ ! -d TAKServerReflector ] ; then
unzip TAKServerReflector.zip
fi
if [ ! -d mjpegserver-linux-x86_64-2017_11_16 ] ; then
mkdir -p mjpegserver-linux-x86_64-2017_11_16
tar xvzf ./TAKServerReflector/mjpegserver-linux-x86_64-2017_11_16.tar.gz -C mjpegserver-linux-x86_64-2017_11_16
fi
if [ ! -d wedge_server ] ; then
mkdir -p wedge_server
cd wedge_server
unzip ../mjpegserver-linux-x86_64-2017_11_16/rsc/wedge_server.zip
cd ..
fi
if [ -f ./TAKServerReflector/mjpegserver-linux-x86_64-2017_11_16/rsc/wedge_server.zip ]; then
rm -f ./TAKServerReflector/mjpegserver-linux-x86_64-2017_11_16/rsc/wedge_server.zip
fi
cp ./certs/atak_1.p12 wedge_server/wowza_1.p12
cp ./certs/truststore.p12 wedge_server/truststore.p12
cd wedge_server
zip -r ../mjpegserver-linux-x86_64-2017_11_16/rsc/wedge_server.zip .
cd ..
if [ -f ./TAKServerReflector/mjpegserver-linux-x86_64-2017_11_16.tar.gz ] ; then
rm -f ./TAKServerReflector/mjpegserver-linux-x86_64-2017_11_16.tar.gz
fi
tar czf ./TAKServerReflector/mjpegserver-linux-x86_64-2017_11_16.tar.gz -C mjpegserver-linux-x86_64-2017_11_16 .
if [ -f TAKServerReflector.zip ]; then
rm -f TAKServerReflector.zip
fi
zip -r TAKServerReflector.zip TAKServerReflector/
| true
|
d1cea303ccb29d5babb66ec3a688c95f1e57dd50
|
Shell
|
osp/osp.work.the-riddle
|
/html2print/fetch.sh
|
UTF-8
| 459
| 3.28125
| 3
|
[] |
no_license
|
#! /bin/bash
declare -a PADS
readarray -t PADS < ../pads.txt
for LINE in "${PADS[@]}"; do
IFS=';' read -ra PADDATA <<< $LINE
URL=${PADDATA[1]}
CHAPTER=${PADDATA[0]}
wget $URL -O stories/${CHAPTER}.md
pandoc stories/${CHAPTER}.md -r markdown -t html -s -o stories/${CHAPTER}.tmp.html
cat stories/${CHAPTER}.tmp.html | awk '/<body>/,/<\/body>/' | sed 's/<body>//' | sed 's/<\/body>//' > stories/${CHAPTER}.html
rm -f stories/${CHAPTER}.tmp.html
done
| true
|
dc71fcb4b5087c3ad1a6dd0c2983b13eba7b1c32
|
Shell
|
eherrador/Mediacion
|
/mediacion/devnet/startnode1.sh
|
UTF-8
| 961
| 2.703125
| 3
|
[] |
no_license
|
#checando parametros de entrada
#The $# variable will tell you the number of input arguments the script was passed.
if [ $# -lt 2 ]
then
echo "Debes proporcionar 2 argumentos de entrada"
echo "1er. argumento: El NetworkId y debe ser el mismo que se asigno en el puppeth"
echo "2do. argumento: La dirección de la cuenta en hexadecimal, por ejemplo 0xcdf8b126b9029a044f16ce680a686fee0b50a811 y debe ser la misma que se genero al ejecutar inint.sh y que se asigno en el puppeth"
cat ./node1/account1.txt
exit 1
fi
geth --datadir ./node1/ --syncmode 'full' --port 30311 --rpc --rpcaddr 'localhost' --rpcport 8501 --rpcapi 'personal,db,eth,net,web3,txpool,miner,admin,debug' --bootnodes 'enode://2b35ac907f8a9be8ec34180644bb906731df44525069c4c289ce8bdf0fe721e42ec922fc78d641b158814f0cd3abad76c766ba74dbf4a52df5959ceaf52da12c@127.0.0.1:30310' --networkid $1 --targetgaslimit 94000000 --gasprice '1' --unlock $2 --password ./password.txt --mine
| true
|
2d76c7980638308128cfffea44c626f5e1bd09de
|
Shell
|
ISI-apex/fft-ct
|
/scripts/logs-to-csv_fft-ct.sh
|
UTF-8
| 526
| 3.671875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
function parse_time()
{
grep "$2" "$1" | cut -d: -f2 | tr -d '[:space:]'
}
function log_to_csv() {
local log=$1
local fill init fft1 transp fft2
fill=$(parse_time "$log" "fill")
init=$(parse_time "$log" "init")
fft1=$(parse_time "$log" "fft-1d-1")
transp=$(parse_time "$log" "transpose")
fft2=$(parse_time "$log" "fft-1d-2")
echo "${log},${fill},${init},${fft1},${transp},${fft2}"
}
echo "File,Fill,Init,FFT1,Transpose,FFT2"
for f in "$@"; do
log_to_csv "$f"
done
| true
|
e82da8db38026f69fabe7f5f1443e07208cf10cf
|
Shell
|
alerque/que-utils
|
/bin/wiki_convert
|
UTF-8
| 552
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/zsh
if [ -n "$1" ]; then
file=$1
stat -c %U "$1" | read owner
whoami | read me
sudo -u $owner co -f -u -q $file
sudo -u $owner ex -u NONE "+set ft=wiki" "+%!$0" "+x" $file
sudo -u $owner rcs -a$me $file
sudo -u $owner ci -u -w$me -m'Automated format conversion' -q $file
else
cat - |
perl -pne 's%(?<!\\)\[(.*?)\|(.*?)]%"\1":\2%g' |
perl -pne 's%(?<!\\)\[(.*?):(.*?):(.*?)]%(\1):\2(\3)%g' |
perl -pne 's%(?<!\\)\[(.*?):(.*?)]%(\1):\2%g' |
perl -pne 's%^\[(.+)\]$%@(\1)%g' |
perl -pne 's%\\"%"%g' |
perl -pne "s%\\\'%'%g"
fi
| true
|
bf8b2ae23eda1e18a9b8dc477dfaeb6605fc429a
|
Shell
|
GitHub-Xzhi/pack-maven-plugin
|
/src/main/resources/bin/start.sh
|
UTF-8
| 616
| 3.875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
APP_NAME="#appName#"
BASE_PATH="#basePath#"
APP_JAR_PATH=${BASE_PATH}/"$APP_NAME.jar"
PID_FILE=${BASE_PATH}/$APP_NAME.pid
cd ${BASE_PATH}
# 获取程序PID
getPid(){
PID=`ps -ef|grep $APP_JAR_PATH|grep -v grep|awk '{print $2}'`
}
getPid
if [ -z $PID ];then
nohup java -Xms#Xms#m -Xmx#Xmx#m -jar $APP_JAR_PATH >/dev/null 2>err.log &
sleep 2
getPid
if [ -z $PID ];then
echo "!!! start $APP_NAME fail !!!"
else
echo $! > $PID_FILE
echo "start $APP_NAME successed PID is $!"
fi
else
echo "$APP_NAME has been running PID is $PID.Please stop it firstly."
fi
| true
|
321bf5994f96c7126a40117357239a9cb5e97cb7
|
Shell
|
javaelk/ARTS
|
/scripts/apache-xml-security-releases-TC/scripts/wliu.cleanupall.sh
|
UTF-8
| 470
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash -xv
echo "this script will clean up everything"
MY_DIR=$(dirname $(readlink -f $0))
source $MY_DIR/header.sh
echo "remove all build class and source files in versions.alt"
rm -rf $experiment_root/$TESTSUBJECT/versions.alt/orig/*
echo "remove all trace results"
./wliu.cleanupTrace.sh
echo "clean up changes"
rm ${experiment_root}/$TESTSUBJECT/changes/*
echo "clean up SVNLOCAL"
rm -rf $SVNLOCAL
#echo " clean up test plan and test execution scripts"
| true
|
b0e94174acbf51e76d2d7f4228cd5cd69b309658
|
Shell
|
saarce/ScriptsEpitech
|
/repo_delete-login.sh
|
UTF-8
| 379
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
#
#exemple: ${couleur}text{neutre}
vertclair='\e[1;32m'
cyanclair='\e[1;36m'
neutre='\e[0;m'
echo -e "${violetclair}Repo_delete${neutre}"
while [ -z $user ]
do
read -p 'User : ' user
done
while [ -z $depot ]
do
read -p 'Repo : ' depot
done
blih -u julien.omacini@epitech.eu repository delete $depot
echo -e "${vertclair}Le dépot $depot à correctement été supprimé${neutre}"
| true
|
892841c3191d50e9d65700f9a52f894f36134ab7
|
Shell
|
SmartThingsCommunity/st-device-sdk-c-ref
|
/tools/esp32_v3.3/build_esp32_v3.3.sh
|
UTF-8
| 3,865
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export BSP_NAME=${1}
export PROJECT_TITLE=${2}
export STDK_PATH="${PWD}"
export CORE_PATH="${PWD}/iot-core"
IOT_APPS_PATH="${PWD}/apps/${BSP_NAME}"
PROJECT_PATH="${IOT_APPS_PATH}/${PROJECT_TITLE}"
XTENSA_PATH=`dirname ~/esp/xtensa-esp32-elf/bin/.`
export PATH=${XTENSA_PATH}:${PATH}
export IDF_PATH="${PWD}/bsp/${BSP_NAME}"
MAKE_OPTION_ARRAY=("menuconfig" "defconfig" "all" "flash" "clean" "size" "size-components" "size-symbols" "erase_flash" "monitor" "simple_monitor" "list-components" "app" "app-flash" "app-clean" "print_flash_cmd" "help" "bootloader" "bootloader-flash" "bootloader-clean" "partition_table")
OUTPUT_OPTION_ARRAY=("all" "flash" "app" "app-flash" "bootloader" "bootloader-flash" "partition_table")
MAKE_OPTION=all
print_usage () {
echo " Usage: python build.py BSP_NAME PROJECT_NAME [make_option]"
echo "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
echo " ex) python build.py ${BSP_NAME} st_switch"
echo " ex) python build.py ${BSP_NAME} st_lamp clean"
echo " ex) python build.py ${BSP_NAME} st_switch flash"
echo " ex) python build.py ${BSP_NAME} st_lamp monitor"
echo " ex) python build.py ${BSP_NAME} st_switch flash monitor"
echo
}
if [ "${PROJECT_TITLE}" = "" ]; then
print_usage
exit
fi
if [ ! -d ${PROJECT_PATH} ]; then
echo " Error: Fail to find ${PROJECT_PATH}"
print_usage
exit 1
fi
### Build
cd ${PROJECT_PATH}
if [ ! "${3}" = "" ]; then
shift 2
MAKE_OPTION=$@
fi
make ${MAKE_OPTION}
if [ ! "${?}" = "0" ]; then
exit ${?}
fi
for value in "${OUTPUT_OPTION_ARRAY[@]}"; do
if [[ "${MAKE_OPTION}" == *"${value}"* ]]; then
OUTPUT_BUILD=y
fi
done
if [ ! ${OUTPUT_BUILD} ]; then
exit 0
fi
### Write address_info.txt
PROJECT_NAME=`cat ./Makefile | grep ^PROJECT_NAME | awk '{print $3}'`
if [ "`cat ./sdkconfig | grep ^CONFIG_PARTITION_TABLE_CUSTOM\= | awk -F'=' '{print $2}'`" = "y" ]; then
PARTITION_NAME=`cat ./sdkconfig | grep ^CONFIG_PARTITION_TABLE_CUSTOM_FILENAME | awk -F'="' '{print $2}'`
PARTITION_TABLE=${PROJECT_PATH}
else
PARTITION_NAME=`cat ./sdkconfig | grep ^CONFIG_PARTITION_TABLE_FILENAME | awk -F'="' '{print $2}'`
PARTITION_TABLE=${IDF_PATH}/components/partition_table
fi
PARTITION_NAME=${PARTITION_NAME%.*}
PARTITION_TABLE=${PARTITION_TABLE}/${PARTITION_NAME}.csv
GET_PART_INFO="${STDK_PATH}/bsp/${BSP_NAME}/components/partition_table/parttool.py -q"
BOOTLOADER_OFFSET=`cat ${IDF_PATH}/components/bootloader/Makefile.projbuild | grep -E "BOOTLOADER_OFFSET" | awk -F ':= ' '{print $2}'`
APP_OFFSET=`${GET_PART_INFO} --partition-boot-default --partition-table-file ${PROJECT_PATH}/build/${PARTITION_NAME}.bin get_partition_info --info offset`
OTA_DATA_OFFSET=`${GET_PART_INFO} --partition-type data --partition-subtype ota --partition-table-file ${PROJECT_PATH}/build/${PARTITION_NAME}.bin get_partition_info --info offset`
PARTITION_OFFSET=`cat ${PROJECT_PATH}/sdkconfig | grep ^CONFIG_PARTITION_TABLE_OFFSET\= | awk -F'=' '{print $2}'`
ADDRESS_INFO_FILE=${PROJECT_PATH}/address_info.txt
echo ota_data_initial.bin : ${OTA_DATA_OFFSET} > ${ADDRESS_INFO_FILE}
echo bootloader.bin : ${BOOTLOADER_OFFSET} >> ${ADDRESS_INFO_FILE}
echo ${PROJECT_NAME}.bin : ${APP_OFFSET} >> ${ADDRESS_INFO_FILE}
echo ${PARTITION_NAME}.bin : ${PARTITION_OFFSET} >> ${ADDRESS_INFO_FILE}
### Generate output
export OUTPUT_FILE_LIST="${PROJECT_PATH}/build/ota_data_initial.bin ${PROJECT_PATH}/build/bootloader/bootloader.bin ${PROJECT_PATH}/build/${PROJECT_NAME}.bin ${PROJECT_PATH}/build/${PARTITION_NAME}.bin ${ADDRESS_INFO_FILE}"
export DEBUG_FILE_LIST="${PROJECT_PATH}/build/${PROJECT_NAME}.elf ${PROJECT_PATH}/build/${PROJECT_NAME}.map ${PROJECT_PATH}/build/bootloader/bootloader.elf ${PROJECT_PATH}/build/bootloader/bootloader.map ${PROJECT_PATH}/sdkconfig"
${STDK_PATH}/tools/common/generate_output.sh
| true
|
0ddba48772cd9295e7597d3917af5b9f7580c58f
|
Shell
|
lurebgi/amphioxusGenome
|
/Assembly/split.read_bjbf.sh
|
UTF-8
| 1,405
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
#
#SBATCH --job-name=split
#SBATCH --cpus-per-task=12
#SBATCH --mem=10000
#SBATCH --partition=himem
#SBATCH --mail-type=FAIL
#SBATCH --mail-user=luohao.xu@univie.ac.at
#SBATCH --output=pbalign-%j.out
#SBATCH --error=pbalign-%j.err
module load bwa
ref=bjbf_v2.fasta
mkdir index
bwa index $ref -p index/$ref
cp index/$ref* $TMPDIR
# map reads
bwa mem -t 12 $TMPDIR/$ref /proj/luohao/amphioxus/data/illumina_bj/WCY-01_R1.fq.gz /proj/luohao/amphioxus/data/illumina_bj/WCY-01_R2.fq.gz | samtools sort -@ 12 -O SAM -o $TMPDIR/sam
samtools view -L bj.list.bed -h -m 100 -q 20 -F 256 $TMPDIR/sam | samtools sort -n -@ 12 -O SAM -o $TMPDIR/bj.list.bed.sam &
samtools view -L bf.list.bed -h -m 100 -q 20 -F 256 $TMPDIR/sam | samtools sort -n -@ 12 -O SAM -o $TMPDIR/bf.list.bed.sam
# filtering
cat $TMPDIR/bj.list.bed.sam | awk '$7=="=" || $1~/@/' | samtools view -O BAM -o $TMPDIR/bj.list.bed.filt.sam
cat $TMPDIR/bf.list.bed.sam | awk '$7=="=" || $1~/@/' | samtools view -O BAM -o $TMPDIR/bf.list.bed.filt.sam
bamToFastq -i $TMPDIR/bj.list.bed.filt.sam -fq $TMPDIR/bj.list.bed.1.fq -fq2 $TMPDIR/bj.list.bed.2.fq
gzip $TMPDIR/bj.list.bed.1.fq &
gzip $TMPDIR/bj.list.bed.2.fq &
bamToFastq -i $TMPDIR/bf.list.bed.filt.sam -fq $TMPDIR/bf.list.bed.1.fq -fq2 $TMPDIR/bf.list.bed.2.fq
gzip $TMPDIR/bf.list.bed.1.fq
gzip $TMPDIR/bf.list.bed.2.fq
mv $TMPDIR/bj.list.bed.*.fq.gz .
mv $TMPDIR/bf.list.bed.*.fq.gz .
| true
|
7ca340e1bdde2f9ae0111ecd0aecd0d954b9d9b3
|
Shell
|
jsarenik/deobfuscation
|
/scripts/mkmagnetlinks.sh
|
UTF-8
| 917
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
#magnet:?xt=urn:btih:9f660a59fbc2254e344ff06807e61a0048b40d31&dn=blocks&tr=http%3A%2F%2Fa.bublina.eu.org%3A8000%2Fannounce&tr=udp%3A%2F%2Fa.bublina.eu.org%3A8000&tr=ws%3A%2F%2Fa.bublina.eu.org%3A8000&tr=wss%3A%2F%2Ftracker.btorrent.xyz&tr=wss%3A%2F%2Ftracker.openwebtorrent.com
#magnet:?xt=urn:btih:6abf85edf3dc96cc48dddcb10f312a35ec94ab1f&dn=blocks&tr=http%3A%2F%2Fa.bublina.eu.org%3A8000%2Fannounce&tr=udp%3A%2F%2Fa.bublina.eu.org%3A8000
bettermagnet() {
line=$(transmission-show -m $1)
printf '<a href="'$line
echo "&tr=wss%3A%2F%2Ftracker.btorrent.xyz&tr=wss%3A%2F%2Ftracker.openwebtorrent.com&tr=wss%3A%2F%2Ftracker.bublina.eu.org\">$1</a>"
}
cat <<EOF
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Listing</title>
</head>
<body>
EOF
i=0
echo "<ul>"
for file in snapshot*.torrent; do
echo " <li>$(bettermagnet $file)</li>"
done
echo "</ul>"
cat <<EOF
</body>
</html>
EOF
| true
|
d405b78672199fec4dba1f08c23218677bf79b6a
|
Shell
|
modijhalak/clouderadirector
|
/install-db.sh
|
UTF-8
| 629
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
function install_db {
echo "Install DB Server in $DIRECTOR_HOST... "
$SUDO_CMD yum remove mariadb-server -y
$SUDO_CMD rm -rf /var/lib/mysql
$SUDO_CMD rm -f /etc/my.cnf
$SUDO_CMD rm -f ~/.my.cnf
$SUDO_CMD yum install mariadb-server -y
$SUDO_CMD systemctl start mariadb
$SUDO_CMD chkconfig mariadb on
$SUDO_CMD sh -c "echo -e '\n\n'${DB_ROOT_PASSWORD}'\n'${DB_ROOT_PASSWORD}'\n\nn\n\n\n' | /usr/bin/mysql_secure_installation"
}
echo "User: $(whoami)"
echo "$DIRECTOR_HOST:$DIRECTOR_HOST"
echo "SUDO_CMD: $SUDO_CMD"
if [[ ! -z "$DB_ROOT_PASSWORD" ]]; then
echo "DB_ROOT_PASSWORD:******"
fi
install_db
| true
|
d0a4b3b2ee13b80f5ccd31f810e7d714d8370d40
|
Shell
|
fuku-ys/earthquake
|
/example/not-so-much-useful/OLD.zk-loopback.nfqhook_pyeq/100-run-experiment.sh
|
UTF-8
| 660
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e # exit on an error
source lib.sh
EXP_DIR=/tmp/eq-run-experiment/$(date +"%Y%m%d.%H%M%S")
mkdir -p ${EXP_DIR}
IMPORTANT "===== EXPERIMENT BEGIN (${EXP_DIR}) ====="
DO_TEST || (IMPORTANT "PERHAPS THE BUG WAS REPRODUCED!"; touch ${EXP_DIR}/REPRODUCED)
mv ${TEST_LOGFILE} ${EXP_DIR}/log
if [ -z $DISABLE_EQ ]; then
INFO "Stopping inspection"
STOP_EQ_INSPECTION
last_exp=$(ls /tmp/eq/search/history | tail -1)
cp -r /tmp/eq/search/history/${last_exp} ${EXP_DIR}
fi
if [ -e ${EXP_DIR}/REPRODUCED ]; then
IMPORTANT "PLEASE CHECK WHETHER THE BUG WAS REPRODUCED!!"
fi
IMPORTANT "===== EXPERIMENT END (${EXP_DIR}) ====="
| true
|
fb7faeb0b23e1a5eacc373e8ece1c0b0bb82c66e
|
Shell
|
jcjolley/school
|
/discrete2/neatness/files/rename.sh
|
UTF-8
| 99
| 2.78125
| 3
|
[] |
no_license
|
for i in `seq 1 255`;
do
if [ -f neatness$i.out ]; then
mv neatness$i.out out$i.txt
fi
done
| true
|
d280df0b9a01da9dbf07bec8362633717dab32b1
|
Shell
|
Exr0nProjects/learn_cpp
|
/problems/.template_state/live_commit.sh
|
UTF-8
| 79
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
pwd
for file in $1; do
git add $file
done
git commit -m "$2"
| true
|
26f00ecd775c004fe0d9d1b8218b5c4b77d90706
|
Shell
|
Muppity/Presentations-Material
|
/Uing WSL2 as DB Platform/Agenda.sh
|
UTF-8
| 3,313
| 2.890625
| 3
|
[] |
no_license
|
#Agenda
#____
##WSL
#check the WSL mode, run
wsl -l -v
wsl -s Ubuntu-20.04
code --remote wsl+Ubuntu-20.04
#To upgrade your existing Linux distro to v2, run:
wsl --set-version (distro name) 2
#To set v2 as the default version for future installations, run:
wsl --set-default-version 2
##
docker pull mcr.microsoft.com/mssql/server:2017-latest
#show linux environment
uname -a -r
whoami
ls /opt/mssql
tail /etc/mtab
#inspect available images
docker image ls
#Create a Container with volume option
docker run -d -p 1433:1433 --name Daltanious --privileged -it -e "SA_PASSWORD=Clave01*" -e "ACCEPT_EULA=Y"\
--volume /mnt/c/Users/Beralios/Desktop/SQLBackups:/mnt/share\
-e "SA_PASSWORD=Clave01*" -e "ACCEPT_EULA=Y" d04f
#Create a shared volume
docker volume create shared-vol
docker volume create sqlvolume
docker volume ls
#Create Container with mount using shared volume created
docker run -d -p 1433:1433 --name Daltanious --privileged -it \
--mount type=bind,src='shared-vol',dst='/mnt/SQL' -v -e "SA_PASSWORD=Clave01*"\
-e "ACCEPT_EULA=Y" d04f
#Create container without mount --only instance
docker run -d -p 1433:1433 --name Daltanious --privileged -it -e "SA_PASSWORD=Clave01*" \
-e "ACCEPT_EULA=Y" --mount type=bind,src=/mnt/c/Users/Beralios/Desktop, dst=/mnt/SQLBackups d04f
#Start Container
docker start Daltanious
#Stop Container
docker stop Daltanious
#Eliminar Container
docker rm Daltanious
#Show containerized config inspect JSON
docker inspect Daltanious
#Check status of the container
docker ps -a
##wsl
#verify mount points from WSL perspective
ls /mnt/c/Users/Beralios/Desktop/SQLBackups
ls /mnt/c/Users/Beralios/Desktop/Query/*
ls -ltr /mnt/wsl/docker-desktop-bind-mounts/Ubuntu-20.04/2f12bd6eb0b585cd99d53c4c02567182704ba97379eeddcd19c768516fefe84b
ls /mnt/wsl
##Manually make changes on your windows scripts and copy to your container env.
##copy backups to Docker mountpoint
cp /mnt/c/Users/Beralios/Desktop/SQLBackups/* /mnt/wsl/docker-desktop-bind-mounts/Ubuntu-20.04/2f12bd6eb0b585cd99d53c4c02567182704ba97379eeddcd19c768516fefe84b
##copy Scripts to Docker mountpoint
cp /mnt/c/Users/Beralios/Desktop/Query/* /mnt/wsl/docker-desktop-bind-mounts/Ubuntu-20.04/2f12bd6eb0b585cd99d53c4c02567182704ba97379eeddcd19c768516fefe84b
#execute SQLCMD
#check server running
docker exec -it Daltanious /opt/mssql-tools/bin/sqlcmd -Usa -PClave01* -Q "select @@servername,@@version" -t10 -y5000
#restore a database
docker exec -it Daltanious /opt/mssql-tools/bin/sqlcmd -Usa -PClave01* -i "/mnt/SQL/SQLQuery_Restore.sql" -t10 -y5000
#inspect information from engine running
docker exec -it Daltanious /opt/mssql-tools/bin/sqlcmd -Usa -PClave01* -i "/mnt/SQL/SQLQuery_Inspect.sql" -t10 -y50
docker exec -it Daltanious /opt/mssql-tools/bin/sqlcmd -Usa -PClave01* -i " sp_readerrorlog 0" -t10 -y50
#verify container mountpoints
#docker exec -it Daltanious ls /mnt/SQLBackups
docker exec -it Daltanious ls /mnt/SQL --color
#services verification
service --status-all
service nginx start
service php7.3-fpm start
service nginx restart
service php7.3-fpm restart
#systemctl list-units --type=target
#systemctl start nginx.service
| true
|
9c6ecf5a391a24a10ff95919016603e9a5a69e3f
|
Shell
|
AnilSeervi/ShellScripting
|
/palindrome.sh
|
UTF-8
| 371
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
echo "Enter a number:"
read num
echo "To Reverse a number and to check if its a Palindrome"
echo "The given number is:$num"
original=$num
rev=0
while [ $num -gt 0 ]
do
rem=`expr $num % 10`
rev=`expr $rev \* 10 + $rem`
num=`expr $num / 10`
done
echo Reverse is : $rev
if [ $original -eq $rev ]
then
echo $original is a Palindrome
else
echo $original is not a Palindrome
fi
| true
|
785adc7a780271a972ed29ffb338b8ba68f271ff
|
Shell
|
ilanadasha1/WebArchivingTest
|
/heritrix_init_d.sh
|
UTF-8
| 3,893
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: heritrix
# Required-Start: $local_fs $remote_fs $network $syslog $named
# Required-Stop: $local_fs $remote_fs $network $syslog $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# X-Interactive: true
# Short-Description: start/stop heritrix web crawler
### END INIT INFO
##########################################################################################
#
# Heritrix Init Script (heritrix_init_d.sh) (c) by Jack Szwergold
#
# Heritrix Init Script is licensed under a
# Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
#
# You should have received a copy of the license along with this
# work. If not, see <http://creativecommons.org/licenses/by-nc-sa/4.0/>.
#
# w: http://www.preworn.com
# e: me@preworn.com
#
# Created: 2014-08-11, js
# Version: 2014-08-11, js: creation
# 2014-08-11, js: development
#
##########################################################################################
# Get defaults from file
PATH=/bin:/usr/bin:/sbin:/usr/sbin
NAME=heritrix
DESC="Heritrix web crawler"
DEFAULT=/etc/default/$NAME
# Set the networking stuff.
HERITRIX_USER="vagrant"
HERITRIX_CREDENTIALS="admin:password"
HERITRIX_HOME="/opt/heritrix-3.2.0"
JAVA_HOME='/usr/lib/jvm/java-1.7.0-openjdk-i386'
IP_ADDRESS="/"
PORT=8443
# overwrite settings from default file
if [ -f "$DEFAULT" ]; then
. "$DEFAULT"
fi
#echo $JAVA_HOME
export PATH=$JAVA_HOME/bin:$HERITRIX_HOME/bin:$PATH
#echo $PATH
# Non-configurable stuff below
HERITRIX_BINARY="heritrix"
JAVA_APP_NAME="Heritrix"
HERITRIX_BINARY_FULL="bin/$HERITRIX_BINARY"
HERITRIX_BINARY_OPTS="-b $IP_ADDRESS -p $PORT -a $HERITRIX_CREDENTIALS $HERITRIX_ADDITIONAL_OPTS"
# Set the init.d specific stuff.
PID_FILENAME="/var/run/$HERITRIX_BINARY.pid"
INITD_SCRIPTNAME="/etc/init.d/$HERITRIX_BINARY"
# INDENT_SPACING=$(tput cols)
INDENT_SPACING=50
case "$1" in
start)
if [ -f "$PID_FILENAME" ]; then
PID=`cat $PID_FILENAME`
PID_CHECK=`ps axf | grep ${PID} | grep -v grep`
else
PID_CHECK=$(awk -vnode="$JAVA_APP_NAME" '$2 ~ node { print $1 }' <(su "$HERITRIX_USER" -c "jps -l"))
fi
if [ ! -f "$PID_FILENAME" ] && [ -z "$PID_CHECK" ]; then
printf "%-${INDENT_SPACING}s" "Starting $HERITRIX_BINARY..."
su "$HERITRIX_USER" -c "cd $HERITRIX_HOME && $HERITRIX_BINARY_FULL $HERITRIX_BINARY_OPTS > /dev/null 2>&1"
PID=$(awk -vnode="$JAVA_APP_NAME" '$2 ~ node { print $1 }' <(su "$HERITRIX_USER" -c "jps -l"))
# echo "Saving PID $PID to $PID_FILENAME."
if [ -z "$PID" ]; then
printf "Fail\n"
else
echo "$PID" > "$PID_FILENAME"
if [ -f "$PID_FILENAME" ]; then
printf "[ OK ]\n"
fi
fi
else
printf "$HERITRIX_BINARY (pid $PID) already running.\n"
fi
;;
status)
printf "%-${INDENT_SPACING}s" "Checking $HERITRIX_BINARY..."
if [ -f "$PID_FILENAME" ]; then
PID=`cat $PID_FILENAME`
PID_CHECK=`ps axf | grep ${PID} | grep -v grep`
if [ -z "$PID_CHECK" ]; then
printf "Process not running but pidfile exists.\n"
else
printf "$HERITRIX_BINARY (pid $PID) running.\n"
fi
else
printf "$HERITRIX_BINARY not running.\n"
fi
;;
stop)
printf "%-${INDENT_SPACING}s" "Stopping $HERITRIX_BINARY..."
if [ -f "$PID_FILENAME" ]; then
PID=`cat $PID_FILENAME`
# PID_CHECK=$(awk -vnode="$JAVA_APP_NAME" '$2 ~ node { print $1 }' <(jps -l))
PID_CHECK=`ps axf | grep ${PID} | grep -v grep`
if [ ! -z "$PID_CHECK" ]; then
kill "$PID"
fi
printf "[ OK ]\n"
rm -f "$PID_FILENAME"
else
printf "$HERITRIX_BINARY pidfile ($PID_FILENAME) not found.\n"
fi
;;
# restart)
# # $0 stop & STOP_PID=(`jobs -l | awk '{print $2}'`);
# # wait ${STOP_PID}
# $0 stop
# $0 start
# ;;
*)
# echo "Usage: $0 {status|start|stop|restart}"
echo "Usage: $0 {status|start|stop}"
exit 1
esac
| true
|
bf0495af1a2e8065f9b4b1805572b34ab20217c5
|
Shell
|
CellFateNucOrg/Taiyaki_train
|
/07_plot_training_progress.sh
|
UTF-8
| 1,870
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# show/plot training progress of Taiyaki training
## Allocate resources
#SBATCH --time=00:10:00
#SBATCH --partition=all
#SBATCH --mem=16G
#SBATCH --cpus-per-task=4
## job name
#SBATCH --job-name="plotTrain"
# retrieve configuration file from command line
varSettingsFile=$1 # name of configuration file
trainingRound=$2
#################
# read settings #
#################
source $varSettingsFile
# directory to retrieve training results from
modNamesSelection=$(for i in ${indicesOfSelection[*]}; \
do printf "%s__" ${modificationsOfInterest[$i]}; \
done) # concatenates specific elements of array modificationsOfInterest (specified in array indicesOfSelection) with "__" as delimiter for concatenation
modNamesSelection=$(echo ${modNamesSelection%__}) # %__ strips the trailing "__" from the variable name
trainingIndex=$((trainingRound-1))
trainingDir=${modelDir}/training${trainingRound}_exp${expName}_${modNamesSelection}_${modelDirNameAppendix[${trainingIndex}]}
# name of progress file (.png-image)
timeStamp=$(date +%Y-%m-%d_%H%M%S)
progressFile=training_${trainingRound}_progress_${timeStamp}
#usage: plot_training.py [-h] [--mav MAV] [--upper_y_limit UPPER_Y_LIMIT]
# [--lower_y_limit LOWER_Y_LIMIT]
# output folder plus file name input_directories [input_directories ...]
####################
# activate Taiyaki #
####################
source ${TAIYAKI_DIR}/venv/bin/activate
# create progress plot
${TAIYAKI_DIR}/misc/plot_training.py --lower_y_limit 0.0001 --mav 1 `# lower y-limit set to 0.0001 (almost 0); moving average (mav) to smooth line, choose value for mav between 1 and approx. 10, or leave out --mav option`\
${trainingDir}/${progressFile} `# path and name of progress file` \
${trainingDir} `# input directory (containing model.all_loss.txt and model.log`
| true
|
e2c6bf967a447a7df62f4773eb743095bcc7b5fc
|
Shell
|
kukkudans/ShellHW
|
/CaseExampleForProgramArgs.sh
|
UTF-8
| 267
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
option=$1;
case $option in
-a)
echo "You choose -a"
;;
-b)
echo " you choose -b"
;;
-c)
echo "you choose -c"
;;
*)
echo "you choose a wrong options"
exit 1;
;;
esac
| true
|
3a4cb135cec5f1f43aa7d81f101dc13fae9388c2
|
Shell
|
marecabo/addon-adguard-home
|
/adguard/rootfs/etc/cont-init.d/adguard.sh
|
UTF-8
| 785
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/with-contenv bashio
# ==============================================================================
# Home Assistant Community Add-on: AdGuard Home
# Handles configuration
# ==============================================================================
readonly CONFIG="/data/adguard/AdGuardHome.yaml"
declare port
declare host
if ! bashio::fs.file_exists "${CONFIG}"; then
mkdir -p /data/adguard
cp /etc/adguard/AdGuardHome.yaml "${CONFIG}"
fi
port=$(bashio::addon.port "53/udp")
yq write --inplace "${CONFIG}" \
'dns.port' "${port}" \
|| hass.exit.nok 'Failed updating AdGuardHome DNS port'
host=$(bashio::network.ipv4_address)
yq write --inplace "${CONFIG}" \
'dns.bind_host' "${host%/*}" \
|| hass.exit.nok 'Failed updating AdGuardHome host'
| true
|
161646a457d4881f5c29b4325987c85cb74b3360
|
Shell
|
Kot2Na/School21
|
/init/scripts/04
|
UTF-8
| 141
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
echo “Do you want nyan? \(YES/NO\)”
read answer
if [[ $answer == YES ]];
then
echo "congrats!"
else
echo "fail!"
fi
| true
|
c94a2956eaf8a7534c0c3049980ea666680b5145
|
Shell
|
hoojaoh/photish
|
/boxes/centos/test-install-x86_64.sh
|
UTF-8
| 414
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -xeuo pipefail
# install local package
cp /photish/pkg/*x86_64.rpm ~
sudo rpm -e photish || true
sudo rpm -Uh ~/*x86_64.rpm
photish version
# uninstall
sudo rpm -e photish
# download repo file
wget https://bintray.com/henrylawson/rpm/rpm -O bintray-henrylawson-rpm.repo
sudo mv bintray-henrylawson-rpm.repo /etc/yum.repos.d/
# install package
sudo yum install -y photish.x86_64
photish version
| true
|
25518ef7999ed916b5b2e5648ca13ba95fcc4840
|
Shell
|
Dplyteste/Gerador-de-Script
|
/1Hora.sh
|
ISO-8859-1
| 1,158
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
######################################################################
#
# Script: atualiza_hora.sh
# Funcao: Executa a atualizacao da data e hora do sistema, atravs de
# servidor externo NTP.
#
# Autor: Luciano Pereira Areal - IT
# Data: 02/01/2006
#
#
# Versao: 1.0.0
# Ultima modificacao: 02/01/2006
# Modificacao realizada: Nenhuma.
#
#
### VARIAVEIS DE EXECUCAO
# Local de armazenamento do log de atualizacao de data e hora
ATUALIZA_LOG=/var/log/messages
SERVIDOR_NTP=ntp.cais.rnp.br
### BLOCO DO CORPO DO SCRIPT
# Header de descrio de entrada de log
echo "**********************************************************************" >> $ATUALIZA_LOG
echo "atualiza_hora.sh - Script de atualizacao de data e hora" >> $ATUALIZA_LOG
echo -e "Inicio do processo de atualizacao - `date`\n" >> $ATUALIZA_LOG
ntpdate $SERVIDOR_NTP >> $ATUALIZA_LOG
hwclock --systohc
echo " " >> $ATUALIZA_LOG
echo "Fim da execucao - atualiza_hora.sh" >> $ATUALIZA_LOG
echo "**********************************************************************" >> $ATUALIZA_LOG
#
######################################################################
| true
|
547d34ab27cfe6adc3069d5e91835da1e1bef21c
|
Shell
|
neatsun/moloch_installer
|
/elasticsearch-install.sh
|
UTF-8
| 1,861
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
echo ""
echo " Please specify the Elasticsearch version you want to install!"
echo ""
echo " $ $0 1.7.2"
echo ""
exit 1
fi
VERSION=$1
if [[ ! "${VERSION}" =~ ^[0-9]+\.[0-9]+ ]]; then
echo ""
echo " The specified Elasticsearch version isn't valid!"
echo ""
echo " $ $0 1.7.2"
echo ""
exit 2
fi
#VERSION=$(wget -q -O - http://www.elasticsearch.org/download/|sed -n 's/^.*class="version">\([.0-9]*\)<.*$/\1/p')
URL=https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-${VERSION}.deb
DEB=elasticsearch-${VERSION}.deb
TDIR="/data/moloch"
if [ "$#" -gt 0 ]; then
TDIR="$1"
fi
DATA_PATH=${TDIR}/data
if [ ! -f $DEB ]
then
wget -O $DEB $URL || exit 1
fi
sudo dpkg -i $DEB
for plugin in mobz/elasticsearch-head lukas-vlcek/bigdesk
do
sudo /usr/share/elasticsearch/bin/plugin -install $plugin
done
if [ -f templates/elasticsearch.yml.template ]
then
if [ ! -f /etc/elasticsearch/elasticsearch.yml.dist ]
then
sudo mv /etc/elasticsearch/elasticsearch.yml /etc/elasticsearch/elasticsearch.yml.dist
fi
sudo cp templates/elasticsearch.yml.template /etc/elasticsearch/elasticsearch.yml
sudo sed -i "s,_TDIR_,${TDIR},g" /etc/elasticsearch/elasticsearch.yml
else
echo "Moloch elasticsearch.yml missing, install manually"
fi
sudo tee -a /etc/default/elasticsearch > /dev/null << EOF
ES_HEAP_SIZE=512m
ES_JAVA_OPTS=-XX:+UseCompressedOops
ES_HOSTNAME=$(hostname -s)a
EOF
if [ ! -d $DATA_PATH ]
then
sudo mkdir -p $DATA_PATH
sudo chown elasticsearch:elasticsearch $DATA_PATH
fi
echo "Restarting elastic search with new configuration"
sudo service elasticsearch restart
echo "make sure you hae permissions to /data/moloch/data/Moloch it might be under the user root and not underelasticsearch .. u can chmod 777 for testing .. yet this is not secure /chown"
| true
|
310adfbcd43b40ea4d449ed6888ae573a73e3632
|
Shell
|
vallabh999/boto3
|
/mongo.sh
|
UTF-8
| 2,006
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# Initialization of variables
HOST=""
PORT="27017"
USERNAME=""
PASSWORD=""
Bk_PATH=/tmp/mongodump
TODAYS_DATE=`date +%Y%m%d`
FILE_NAME="mongodump.$TODAYS_DATE"
Final_Path=$Bk_PATH/$TODAYS_DATE
MONGO_DUMP_BIN_PATH=`which mongodump`
TAR_BIN_PATH=`which tar`
s3_bucket="vallabh9886304569000"
s3_check_bucket=`aws s3 ls | grep $s3_bucket`
# Check the backup folder exists, if not create it
if [[ -d $Bk_PATH ]] && [[ -d $Final_Path ]]
then
echo "$Bk_PATH and $Final_Path Already Exists"
echo "Backup in process........!"
else
cd $Bk_PATH
mkdir $TODAYS_DATE && echo "$Bk_PATH created"
mkdir $Final_Path && echo "$Final_Path is created"
fi
# Check username and password and take backup of mongo database
if [ "$USERNAME" != "" -a "$PASSWORD" != "" ]; then
$MONGO_DUMP_BIN_PATH --host $HOST:$PORT -u $USERNAME -p $PASSWORD --out $Final_Path >> /dev/null
else
$MONGO_DUMP_BIN_PATH --host $HOST:$PORT --out $Final_Path >> /dev/null
fi
# Check for directory created, save file name with present date and tar
if [[ -d $Final_Path ]]; then
cd $Bk_PATH
#then make it todays date
if [[ "$FILE_NAME" == "" ]]; then
FILE_NAME="$TODAYS_DATE"
fi
$TAR_BIN_PATH -czf $FILE_NAME.tar.gz $Final_Path >> /dev/null
if [[ -f "$FILE_NAME.tar.gz" ]]; then
echo "=> Success: `du -sh $FILE_NAME.tar.gz` in "; echo
find $Bk_PATH -type f -mtime +7 -exec rm -r {} \;
if [[ -d "$Final_Path" ]]; then
rm -rf "$Final_Path"
echo "$Final_Path deleted"
fi
else
echo -en "!!!=> Failed to create backup file: $BACKUP_PATH/$FILE_NAME.tar.gz \n";
fi
fi
if [[ $s3_check_bucket ]]; then
echo "Bucket Already Exist"
else
echo "Bucket not Exist, Creating $s3_bucket"
aws s3 mb s3://$s3_bucket
fi
#Upload backup file to s3 bucket
if [[ $s3_bucket ]]; then
echo "Transfering Backup to $s3_bucket"
aws s3 cp /tmp/mongodump/* s3://$s3_bucket/
echo "Succes:!!!"
else
echo "Transfer Failed"
fi
| true
|
f1db8ada90b0c4bfa1f14151bce5179cafe23f08
|
Shell
|
AleksandraSwierkowska/Graph_algorithms
|
/podsumowanie.sh
|
UTF-8
| 3,192
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
function appendAgregates() {
isMean=$1
isMin=$2
isMax=$3
file=$4
ext=".mod"
cp $file $file$ext
column_nmb=$(cat $file | head -n1 | tr "," " " | wc -w)
if [ "$isMean" != "0" ] ; then
for ((col=1;col<=column_nmb;col++)); do
column=$(cat $file | tail -n +2 | tr -d " "| cut -d"," -f $col)
rows=$(echo $column | | wc -w)
sum=$(echo $column | paste -sd+ | bc)
mean=$(echo "scale=2 ; $sum / $rows" | bc)
if [ $col -eq $column_nmb ] ; then
echo "$mean">>$file$ext
else
echo -n "$mean,">>$file$ext
fi
done
fi
if [ "$isMin" != "0" ] ; then
for ((col=1;col<=column_nmb;col++)); do
column=$(cat $file | tail -n +2 | tr -d " "| cut -d"," -f $col)
min=$(echo $column | sort -n | head -n1)
if [ $col -eq $column_nmb ] ; then
echo "$min">>$file$ext
else
echo -n "$min,">>$file$ext
fi
done
fi
if [ "$isMax" != "0" ] ; then
for ((col=1;col<=column_nmb;col++)); do
column=$(cat $file | tail -n +2 | tr -d " "| cut -d"," -f $col)
max=$(echo $column | sort -nr | head -n1)
if [ $col -eq $column_nmb ] ; then
echo "$max">>$file$ext
else
echo -n "$max,">>$file$ext
fi
done
fi
}
isMax=1
isMin=1
isMean=1
filesNumber=0
while [ "$1" != "" ] ; do
case $1 in
--noMax )
isMax=0
;;
--noMin )
isMin=0
;;
--noMean )
isMean=0
;;
-h | --help)
filesNumber=$(($filesNumber+1))
echo -e "podsumowanie - calculates minimum, maximum and mean of every column of the CSV file (excluding the header)
and creates new file with 'mod' extension, which adds all of them at the end of each column.
--noMax doesn't add maximum to the CSV file
--noMin doesn't add minimum to the CSV file
--noMean doesn't add mean to the CSV file
-h | --help displays this help panel"
;;
* )
filesNumber=$(($filesNumber+1))
appendAgregates "$isMean" "$isMin" "$isMax" "$1"
esac
shift
done
if [ "$filesNumber" == "0" ] ; then
fromFile=$(</dev/stdin)
fromFile= $(echo $fromFile > "file")
appendAgregates "$isMean" "$isMin" "$isMax" "file"
rm "file"
fi
| true
|
641afeab6f3f48b5d2fe721790b058344309ab7c
|
Shell
|
ronioncloud/dorothy-1
|
/commands/ln-make
|
UTF-8
| 275
| 3
| 3
|
[
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
source "$DOROTHY/sources/strict.bash"
s="${1:?"USAGE: ln-make <SOURCE> <TARGET>"}"
t="${2:?"USAGE: ln-make <source> <TARGET>"}"
s="$(expand-path "$s")"
t="$(expand-path "$t")"
mkdir -p "$(dirname "$s")" "$(dirname "$t")"
touch "$s"
ln -sfF "$s" "$t"
| true
|
f9a27c84a32cbdff2e38e4657a6ca5e1d258a3a9
|
Shell
|
hanwei7788/LM-auto
|
/automatedtesting/linkmotion-dev-tools/halti/halti-u-boot-helper
|
UTF-8
| 10,998
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
############################################
# This is a helper step by step guide script
# for you to flash the vanilla hw with the first
# u-boot.
#
# Lets hope that this helps you.
#
# Author(s): Juhapekka Piiroinen <juhapekka.piiroinen@link-motion.com>
#
# (C) 2017 Link Motion Oy
# All Rights Reserved
###########################################
RPMNAME=u-boot-imx6-halti-rnd-2016.11.rc1.lm73-2.lm.2.1.armv7tnhl.rpm
RPMURI=https://copernicus-repos.integration.nomovok.info/lm-common:/autobuild-common-imx6/armv7tnhl
UBOOTNAME=u-boot-halti-rnd.imx
set -e
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered OFF."
echo " - serial cable is prepared and connected to the HW."
echo
echo "Please make sure that you have connected to the serial port of the HW!"
echo "Make sure that the hw is powered off!"
echo
echo " Set DIP switch to OTG position."
echo " https://confluence.link-motion.com/display/HH/HW+Setup%3A+DIP+Switch"
echo
echo "press <enter> to continue"
read
echo
sudo apt install curl rpm2cpio minicom libusb-1.0 git
clear
echo
echo "Get latest password from: https://confluence.link-motion.com/display/SWA/Security+Notice"
mkdir -p tmp-uboot
pushd tmp-uboot
curl -u linkmotion -k -n ${RPMURI}/${RPMNAME} -O
rpm2cpio u-boot-imx6-*rpm | cpio -idmv
cp usr/share/u-boot-imx6/*.imx ..
git clone https://github.com/boundarydevices/imx_usb_loader.git
pushd imx_usb_loader
make
sudo make install
popd
popd
rm -rf tmp-uboot
ls -la *.imx
echo
echo "You have now the u-boot imx file downloaded."
echo
echo "Press <enter> to continue."
read
clear
echo "lets copy the imx file to the first partition of the SD card."
echo
echo "Connect SD card TO YOUR PC!"
echo
echo "press <enter> to copy imx to sd card."
echo
read
DEVICE_NAME=`mount|grep vfat|grep \/media\/${USER}|grep -o "\/dev\/sd[a-z][0-9]"`
MOUNT_PATH=`mount|grep vfat|grep ${DEVICE_NAME}|grep -o "\/media\/${USER}/[A-Z0-9a-z-]*"`
DEVICE_INFO=`udevadm info --query=all -n ${DEVICE_NAME}`
IS_MMC=`echo ${DEVICE_INFO}|grep -i mmc`
# if [[ -z ${IS_MMC} ]]; then
echo "MMC was detected to be the device ${DEVICE_NAME}."
echo "Is this correct?"
echo "press <enter> to continue."
echo "press <ctrl+c> to cancel."
read
cp ${UBOOTNAME} ${MOUNT_PATH}
sync
umount ${MOUNT_PATH}
# fi
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered OFF."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your PC/WORKSTATION."
echo
echo "The SD Card has been written and umounted."
echo "Remove the SD Card from your PC and connect it to YOUR HW!"
echo
echo "press <enter> when you have connected SD CARD TO HW!"
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered OFF."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo
echo "Run following command in separate terminal window:"
echo " sudo minicom -w -b 115200 -o -D /dev/ttyUSB0"
echo "Make sure that HARDWARE CONTROL FLOW is disabled."
echo
echo "press <enter> when minicom is ready."
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered OFF."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo
echo "Connect mini-usb cable to the HW and between your PC."
echo "That will be used to transferred the u-boot image to the hw."
echo
echo "press <enter> to continue"
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered OFF."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo
echo "Please POWER ON the hardware now!"
echo
echo "press <enter> to continue once you have powered on the hw."
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered ON."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo
echo "Check that you have following in dmesg."
echo
echo "This line for the serial port:"
echo [ 3328.900742] pl2303 3-2:1.0: pl2303 converter detected
echo [ 3328.931587] usb 3-2: pl2303 converter now attached to ttyUSB0
echo
echo "This line for the usb otg for flashing the u-boot."
echo [ 4582.907029] hid-generic 0003:15A2:0054.0002: hiddev0,hidraw1: USB HID v1.10 Device [Freescale SemiConductor Inc SE Blank ARIK] on usb-0000:03:00.0-4.1/input0
echo
echo "press <enter> to continue"
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered ON."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo " - both usb-serial adapter and the usb-freescale have been detected by the PC/Workstation"
echo
echo "Once you continue from this step monitor the minicom as it should boot."
echo
echo "press <enter> to continue"
read
clear
sudo imx_usb ${UBOOTNAME}
echo
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered ON."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo " - both usb-serial adapter and the usb-freescale have been detected by the PC/Workstation"
echo " - HW has been booted via usb cable to u-boot."
echo
echo "Now you device is running the u-boot."
echo "See the minicom window now! You should have:"
echo "--8<---"
echo "Halti R&D >"
echo "--8<---"
echo
echo "press <enter> to continue"
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered ON."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo " - both usb-serial adapter and the usb-freescale have been detected by the PC/Workstation"
echo " - HW has been booted via usb cable to u-boot."
echo " - u-boot is NOT installed"
echo
echo "NEXT step is to actually flash the u-boot on the hw."
echo "as now you have just booted the hw with the u-boot."
echo
echo "You can read more at"
echo " https://confluence.link-motion.com/display/HH/Halti%3A+SPI+NOR"
echo
echo "press <enter> to continue"
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered ON."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo " - both usb-serial adapter and the usb-freescale have been detected by the PC/Workstation"
echo " - HW has been booted via usb cable to u-boot."
echo " - u-boot is NOT installed"
echo
echo "Then you execute following commands via minicom connection:"
echo "---8<---"
echo load mmc 0:1 0x10008000 ${UBOOTNAME}
echo sf probe
echo sf erase 0 0x100000
echo sf write 0x10008000 0x400 \$\{filesize\}
echo "---8<---"
echo
echo "press <enter> once you have executed the commands above."
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered ON."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo " - both usb-serial adapter and the usb-freescale have been detected by the PC/Workstation"
echo " - HW has been booted via usb cable to u-boot."
echo " - u-boot is NOW INSTALLED ON THE HW!"
echo
echo "You should see following in minicom:"
echo "--8<--"
echo "Halti R&D > load mmc 0:1 0x10008000 ${UBOOTNAME}"
echo "reading ${UBOOTNAME}"
echo "461824 bytes read in 39 ms (11.3 MiB/s)"
echo "Halti R&D > sf probe"
echo "SF: Detected M25P16 with page size 256 Bytes, erase size 64 KiB, total 2 MiB"
echo "Halti R&D > sf erase 0 0x100000"
echo "SF: 1048576 bytes @ 0x0 Erased: OK"
echo "Halti R&D > sf write 0x10008000 0x400"
echo "device 0 offset 0x400, size 0x1ffc00"
echo "SF: 2096128 bytes @ 0x400 Written: OK"
echo "Halti R&D > "
echo "--8<--"
echo
echo "press <enter> to continue"
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered ON."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo " - both usb-serial adapter and the usb-freescale have been detected by the PC/Workstation"
echo " - HW has been booted via usb cable to u-boot."
echo " - u-boot is NOW INSTALLED ON THE HW!"
echo " - DIP switch is still in the boot-otg state."
echo
echo "1) Power off the hardware"
echo "2) Now toggle the DIP switch back to regular boot."
echo
echo " FROM PINK CABLE SIDE TO GRAY CABLE SIDE."
echo
echo " Set DIP switch to REGULAR position."
echo " https://confluence.link-motion.com/display/HH/HW+Setup%3A+DIP+Switch"
echo
echo "press <enter> once you have restored the dip switch."
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered OFF."
echo " - imx file in your SD Card."
echo " - SD Card is attached to your HW."
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo " - both usb-serial adapter and the usb-freescale have been detected by the PC/Workstation"
echo " - HW has been booted via usb cable to u-boot."
echo " - u-boot is NOW INSTALLED ON THE HW!"
echo " - DIP switch is now in regular boot mode"
echo
echo "1) Remove SD Card"
echo "2) Power on the hardware."
echo
echo "press <enter> once you have power cycled the hw."
read
clear
echo
echo "NOW YOU SHOULD HAVE:"
echo " - HW is powered ON."
echo " - imx file in your SD Card."
echo " - SD Card is NO LONGER CONNECTED TO ANYTHING"
echo " - minicom is opened in another window"
echo " - you have connected mini-usb cable to the otg port of the hw."
echo " - both usb-serial adapter and the usb-freescale have been detected by the PC/Workstation"
echo " - u-boot is NOW INSTALLED ON THE HW!"
echo " - DIP switch is now in regular boot mode"
echo " - HW has been booted via internal memory"
echo
echo "You should now see in minicom the booted u-boot!"
echo "Execute following in the minicom connection to uboot:"
echo " env default -a"
echo " env save"
echo
echo "press <enter> once you have executed those commands."
read
clear
echo
echo "You should see following in the minicom:"
echo "--8<--"
echo "Halti R&D > env default -a"
echo "## Resetting to default environment"
echo "Halti R&D > env save"
echo "Saving Environment to SPI Flash..."
echo "SF: Detected M25P16 with page size 256 Bytes, erase size 64 KiB, total 2 MiB"
echo "Erasing SPI flash...Writing to SPI flash...done"
echo "Halti R&D >"
echo "--8<--"
echo
echo "You have successfully now installed the u-boot."
echo "bye, bye."
| true
|
c1f46d56b4c5e8d5a8db199e3cb8799162280e1b
|
Shell
|
girishetty/mytools
|
/Scripts/hudson_scripts/scripts/BG2_CDP_AMP_02/BG2_CDP_AMP_02.sh
|
UTF-8
| 3,112
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
GitBranch=$1
BuildSrcScript=$2
ForceInitGit=$3
DoRelease=$4
echo "Hudson Env Variables"
echo "BUILD_NUMBER=$BUILD_NUMBER"
echo "BUILD_ID=$BUILD_ID"
echo "JOB_NAME=$JOB_NAME"
echo "WORKSPACE=$WORKSPACE"
echo "HUDSON_URL=$HUDSON_URL"
echo "JOB_URL=$JOB_URL"
echo "HUDSON_USER=$HUDSON_USER"
MRVL_DIR="$WORKSPACE/MRVL"
BOOT_FLOW_DIR="$WORKSPACE/boot_flow"
BRANCH_VER="2000"
JobDir="/tftpboot/hudson/$JOB_NAME"
BaseDir="$JobDir/$BUILD_NUMBER"
ScriptDir="/home/$USER/scripts/$JOB_NAME"
TarBall="linux_sdk_${BRANCH_VER}_${BUILD_NUMBER}_bg2cdp_dngle.tgz"
BuildScript="$BaseDir/build_${BRANCH_VER}_${BUILD_NUMBER}.sh"
echo "TarBall=$TarBall"
echo "BuildScript=$BuildScript"
echo "GitBranch=$GitBranch"
echo "BuildSrcScript=$BuildSrcScript"
mkdir -p $JobDir 2>/dev/null
SCRIPT_LOG=`pwd`"/script.log"
echo "script begin" > $SCRIPT_LOG
date >> $SCRIPT_LOG
ls -la >> $SCRIPT_LOG
pwd >> $SCRIPT_LOG
echo >> $SCRIPT_LOG
echo "Hudson Env Variables" >> $SCRIPT_LOG
echo "BUILD_NUMBER=$BUILD_NUMBER" >> $SCRIPT_LOG
echo "BUILD_ID=$BUILD_ID" >> $SCRIPT_LOG
echo "JOB_NAME=$JOB_NAME" >> $SCRIPT_LOG
echo "WORKSPACE=$WORKSPACE" >> $SCRIPT_LOG
echo "HUDSON_URL=$HUDSON_URL" >> $SCRIPT_LOG
echo "JOB_URL=$JOB_URL" >> $SCRIPT_LOG
echo "HUDSON_USER=$HUDSON_USER" >> $SCRIPT_LOG
echo >> $SCRIPT_LOG
echo "BaseDir=$BaseDir" >> $SCRIPT_LOG
echo "TarBall=$TarBall" >> $SCRIPT_LOG
echo "BuildScript=$BuildScript" >> $SCRIPT_LOG
echo "TarBall=$TarBall"
# If there is no tar ball, then build it
if [ ! -f "$TarBall" ]; then
echo "repo" >> $SCRIPT_LOG
$ScriptDir/init_anchovy_cdp_git.sh $MRVL_DIR $GitBranch $BOOT_FLOW_DIR $ForceInitGit
echo "generate_read_me.sh" #>> $SCRIPT_LOG
# generate ReadMe_<branch_tag>_<build_number>.sh
$ScriptDir/generate_read_me.sh $BaseDir "${BRANCH_VER}_${BUILD_NUMBER}" $MRVL
cd $WORKSPACE
BuildSDK="$MRVL_DIR/build_scripts/$BuildSrcScript"
[ ! -f "$BuildSDK" ] && echo "No build script: $BuildSDK" && exit 1
echo "$BuildSDK" >> $SCRIPT_LOG
"$BuildSDK" $DoRelease $BUILD_NUMBER
[ $? -gt 0 ] && exit 1
echo "Success ---- $BuildSDK" >> $SCRIPT_LOG
fi
echo "check if $TarBall exist" >> $SCRIPT_LOG
[ ! -f "$TarBall" ] && echo "No tar ball" && exit 1
echo "destination is $BaseDir" >> $SCRIPT_LOG
mkdir -p $BaseDir 2>/dev/null
echo "cp $TarBall to $BaseDir" >> $SCRIPT_LOG
cp $TarBall $BaseDir
echo "generate_build_script" >> $SCRIPT_LOG
$ScriptDir/generate_build_script.sh $BuildScript $BaseDir "${BRANCH_VER}_${BUILD_NUMBER}"
echo "run build script $BuildScript" >> $SCRIPT_LOG
$BuildScript
[ $? -gt 0 ] && exit 1
ls -al $BaseDir >> $SCRIPT_LOG
ls -al $BaseDir
echo "ftp the generated files to cloud storage" >> $SCRIPT_LOG
echo "$ScriptDir/ftp_pkg_to_storage.sh $ScriptDir $BaseDir $JOB_NAME $BUILD_NUMBER $JOB_URL$BUILD_NUMBER/" >> $SCRIPT_LOG
$ScriptDir/ftp_pkg_to_storage.sh $ScriptDir $BaseDir $JOB_NAME $BUILD_NUMBER "$JOB_URL$BUILD_NUMBER/"
[ $? -gt 0 ] && exit 1
echo "cleanup after proper upload" >> $SCRIPT_LOG
echo "$ScriptDir/cleanup_post_build.sh $BaseDir $JOB_NAME $BUILD_NUMBER" >> $SCRIPT_LOG
$ScriptDir/cleanup_post_build.sh $BaseDir $JOB_NAME $BUILD_NUMBER
| true
|
73f009c86338ecf48c2fabd2ade329f94242e405
|
Shell
|
leighmatth/yggdrasil
|
/yggdrasil/examples/ascii_io/ascii_io.sh
|
UTF-8
| 920
| 3.234375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
export YGG_DEBUG="INFO"
export YGG_NAMESPACE="AsciiIO"
yaml=
# ----------------Your Commands------------------- #
case $1 in
"" | -a | --all )
echo "Running C, Python, C++, Matlab integration"
yaml='ascii_io_all.yml'
;;
--all-nomatlab )
echo "Running C, Python, C++ integration"
yaml='ascii_io_all_nomatlab.yml'
;;
-p | --python )
echo "Running Python"
yaml='ascii_io_python.yml'
;;
-m | --matlab )
echo "Running Matlab"
yaml='ascii_io_matlab.yml'
;;
-c | --gcc )
echo "Running C"
yaml='ascii_io_c.yml'
;;
--cpp | --g++ )
echo "Running C++"
yaml='ascii_io_cpp.yml'
;;
-r | -R )
echo "Running R"
yaml='ascii_io_r.yml'
;;
-f | --fortran )
echo "Running Fortran"
yaml='ascii_io_fortran.yml'
;;
* )
echo "Running ", $1
yaml=$1
;;
esac
yggrun $yaml
cat "${TMPDIR}output_file.txt"
cat "${TMPDIR}output_table.txt"
cat "${TMPDIR}output_array.txt"
| true
|
30eb045f44a98c042f6dfdabee0d9ff185904cf4
|
Shell
|
mazonka/code
|
/legacy/script/vlctran.sh
|
UTF-8
| 976
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
scale=,scale=0.5
#stop=--stop-time=10
#stop="--start-time=10 --stop-time=20"
#noface="-I dummy"
vlc="c:/Program Files/VideoLAN/VLC/vlc.exe"
vlctran()
{
file=$1
if test -f "$file"; then
#audio=,acodec=mp4a,ab=192,channels=2
#subt=,scodec=dvbsub,senc=dvbsub
#vbr=,vb=1024
#fps=,fps=24
deint=,deinterlace
sync=,audio-sync
codec=h264
#codec=mp4v
tran1=vcodec=$codec$vbr$fps$deint$scale$audio$subt$sync
tran2=mux=mp4,dst=vlctran_$$.avi,access=file
tran=--sout=#transcode{$tran1}:std{$tran2}
cmd="$noface -vvv $stop $tran vlc://quit"
echo "$vlc" "$file" $cmd
"$vlc" "$file" $cmd
mv vlctran_$$.avi "$file".avi
else
echo ERROR: no file "$file"
fi
}
if [ "$1" = "" ]
then
if test -f vlctran.lst
then
while read LINE
do
echo ""
echo "$LINE"
vlctran "$LINE"
done < vlctran.lst
else
echo Usage: sh vlctran.sh file.avi - convert one file
echo Usage: sh vlctran.sh - convert all files in list vlctran.lst
fi
else
vlctran "$1"
fi
| true
|
7b42fc4ec484e8f61cdf4adeea184d29320b6703
|
Shell
|
SCALARA-GmbH/pipeline
|
/github_actions_runner/remove.sh
|
UTF-8
| 712
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
. ./.env
if [[ ! -d "actions-runner" ]]; then
echo "'actions-runner' dir missing; exiting";
exit 1;
fi
for var in ORGANIZATION REPOSITORY_NAME GITHUB_TOKEN;
do
[[ -z "${!var}" ]] && {
>&2 echo "missing environment variable: $var";
missing_var=1
}
done
if [[ "${missing_var}" -eq 1 ]] ;then
>&2 echo "exiting"
exit 1
fi
RUNNER_TOKEN_URI="https://api.github.com/repos/${ORGANIZATION}/${REPOSITORY_NAME}/actions/runners/registration-token"
# runner token
RUNNER_TOKEN=$(curl -X POST -H "Authorization: token ${GITHUB_TOKEN}" "${RUNNER_TOKEN_URI}" | jq -r .token)
# remove runner
cd actions-runner
sudo ./svc.sh uninstall
./config.sh remove --token "${RUNNER_TOKEN}"
| true
|
3c1fc83680b2eea536250c0cbcf03a7adaf1cab8
|
Shell
|
vrachieru/nvram
|
/led-control.sh
|
UTF-8
| 512
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
show_help() {
echo "usage:"
echo "$0 -on Turn leds on"
echo "$0 -off Turn leds off"
echo ""
}
case ${1} in
-on)
nvram set led_disable=0
service restart_leds
echo "Leds are now on"
logger -s -t leds "on"
;;
-off)
nvram set led_disable=1
service restart_leds
echo "Leds are now off"
logger -s -t leds "off"
;;
*)
show_help
exit 1
;;
esac
| true
|
fda9fe0e128bc4c546f490bb4e77f0ec2467b4ff
|
Shell
|
mrebscher/laboratoria
|
/formularz.sh
|
UTF-8
| 1,453
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
i=$[1]
while [ $i == 1 ]; do
echo 'O której godzinie powinieneś jeść posiłki?'
echo 'śniadanko o:'
read sniadanie
echo 'drugie śniadanie o:'
read dsniadanie
if [ $sniadanie -ge $dsniadanie ]
then
echo 'Drugie śniadanie powinno być po śniadaniu.'
echo 'Popraw wprowadzane godziny.'
else
echo 'obiad o:'
read obiad
if [ $dsniadanie -ge $obiad ]
then
echo 'Obiad powinien być po drugim śniadaniu.'
echo 'Jeszcze raz wpisz godziny.'
else
echo 'podwieczorek o:'
read podw
if [ $obiad -ge $podw]
then
echo 'Podwieczorek powinien być po obiedzie.'
echo 'Jeszcze raz wpisz godziny.'
else
echo 'kolacja o:'
read kolacja
if [ $podw -ge $kolacja ]
then
echo 'To kolacja powinna być ostatnia.'
echo 'Jeszcze raz wpisz godziny.'
else
echo 'to wszystko!'
i=$[0]
fi
fi
fi
fi
done
touch jedzenie.txt
echo 'Śniadanie o godzinie:' > jedzenie.txt
echo "$sniadanie" >> jedzenie.txt
echo 'Drugie śniadanie o godzinie:' >> jedzenie.txt
echo "$dsniadanie" >> jedzenie.txt
echo 'Obiad o godzinie:' >> jedzenie.txt
echo "$obiad" >> jedzenie.txt
echo 'Podwieczorek o godzinie:' >> jedzenie.txt
echo "$podw" >> jedzenie.txt
echo 'Kolacja o godzinie:' >> jedzenie.txt
echo "$kolacja" >> jedzenie.txt
echo 'Gratuluję! Właśnie ustaliłeś o której codziennie chcesz jeść posiłki.'
echo 'To jeden z najważniejszych kroków podczas prowadzenia zdrowego trybu życia.'
echo 'Powodzenia w dotrzymaniu terminów! '
| true
|
30748be9b02017698f5d0f7684396d56e87f6a34
|
Shell
|
Arunsh93/Day8-Shell-Programs
|
/birthdayMonth.sh
|
UTF-8
| 2,091
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
declare -A birthDate
janCount=0
febCount=0
marCount=0
aprCOunt=0
mayCount=0
junCount=0
julCOunt=0
augCount=0
sepCount=0
octCount=0
novCount=0
decCount=0
for ((i=0; i<50; i++))
do
randomMonth=$((RANDOM%12+1))
case $randomMonth in
1)
janArray[$janCount]="person$i"
janCount=$((janCount+1))
birthDate["jan"]=${janArray[@]};;
2)
febArray[$febCount]="person$i"
febCount=$((febCount+1))
birthDate["feb"]=${febArray[@]};;
3)
marArray[$marCount]="person$i"
marCount=$((marCount+1))
birthDate["mar"]=${marArray[@]};;
4)
aprArray[$aprCount]="person$i"
aprCount=$((aprCount+1))
birthDate["apr"]=${aprArray[@]};;
5)
mayArray[$mayCount]="person$i"
mayCount=$((mayCount+1))
birthDate["may"]=${mayArray[@]};;
6)
junArray[$junCount]="person$i"
junCount=$((junCount+1))
birthDate["jun"]=${junArray[@]};;
7)
julArray[$julCount]="person$i"
julCount=$((julCount+1))
birthDate["jul"]=${julArray[@]};;
8)
augArray[$augCount]="person$i"
augCount=$((augCount+1))
birthDate["aug"]=${augArray[@]};;
9)
sepArray[$sepCount]="person$i"
sepCount=$((sepCount+1))
birthDate["sep"]=${sepArray[@]};;
10)
octArray[$octCount]="person$i"
octCount=$((octCount+1))
birthDate["oct"]=${octArray[@]};;
11)
novArray[$novCount]="person$i"
novCount=$((novCount+1))
birthDate["nov"]=${novArray[@]};;
12)
decArray[$decCount]="person$i"
decCount=$((decCount+1))
birthDate["dec"]=${decArray[@]};;
esac
done
read -p "Which month data do you need from 1-12 : " userMonth
case $userMonth in
1)
echo "Jan ${birthDate["jan"]}" ;;
2)
echo "Feb ${birthDate["feb"]}" ;;
3)
echo "Mar ${birthDate["Mar"]}" ;;
4)
echo "Apr ${birthDate["Apr"]}" ;;
5)
echo "May ${birthDate["May"]}" ;;
6)
echo "Jun ${birthDate["jun"]}" ;;
7)
echo "Jul ${birthDate["Jul"]}" ;;
8)
echo "Aug ${birthDate["Aug"]}" ;;
9)
echo "Sep ${birthDate["Sep"]}" ;;
10)
echo "Oct ${birthDate["Oct"]}" ;;
11)
echo "Nov ${birthDate["Nov"]}" ;;
12)
echo "Dec ${birthDate["Dec"]}" ;;
esac
| true
|
3a2abe71663d287cd5ab4009a6019baad17f4616
|
Shell
|
MissionCriticalCloud/bubble-cookbook
|
/files/default/rc.local/rc.local
|
UTF-8
| 1,605
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# THIS FILE IS ADDED FOR COMPATIBILITY PURPOSES
#
# It is highly advisable to create own systemd services or udev rules
# to run scripts during boot instead of using this file.
#
# In contrast to previous versions due to parallel execution during boot
# this script will NOT be run after all other services.
#
# Please note that you must run 'chmod +x /etc/rc.d/rc.local' to ensure
# that this script will be executed during boot.
touch /var/lock/subsys/local
# Wait until libvirt starts the network on virbr0 before bringing up the interfaces
while ! ip a | grep -Eq ': virbr0:'; do
echo "Waiting for virbr0 - network interface might be down..."
sleep 1
done
# Wait until libvirt starts the network on virbr0 before bringing up the interfaces
while ! ip a | grep -Eq ': virbr1:'; do
echo "Waiting for virbr1 - network interface might be down..."
sleep 1
done
# bring up virbr0.50 for public traffic in Cosmic
ifup virbr0.50
# bring up virbr1.50 for public traffic in Cosmic
ifup virbr1.50
# bring up tap_vpn for accessing vm's in virbr0
ifup tap_vpn
# Set iptable rules for the public interface
iptables -t nat -I POSTROUTING -s 100.64.0.0/22 ! -d 100.64.0.0/22 -j MASQUERADE
iptables -t nat -I POSTROUTING -s 192.168.22.0/23 ! -d 192.168.22.0/23 -j MASQUERADE
iptables -A INPUT -s 192.168.22.1/32 -p tcp -m state --state NEW -m multiport --dports 111,892,2049,32803 -j REJECT --reject-with icmp-port-unreachable
iptables -A INPUT -s 192.168.23.1/32 -p tcp -m state --state NEW -m multiport --dports 111,892,2049,32803 -j REJECT --reject-with icmp-port-unreachable
| true
|
95f823618e532c47901f2440da6bdf68490754b6
|
Shell
|
Froskekongen/compsetup
|
/nvidia_nonstandard.sh
|
UTF-8
| 1,516
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
## Assumes access to runfile installation of cuda toolkit and cudnn tarfile
runfiledir=$1
runfilename=$2
user=$3
cd $runfiledir
chmod +x ${runfilename}
./${runfilename} --tar mxvf
cp InstallUtils.pm /usr/lib/x86_64-linux-gnu/perl-base
./${runfilename} --override --silent --toolkit
ln -s /usr/local/cuda-8.0 /usr/local/cuda
LINE1="#if __GNUC__ > 5"
LINE2="#error -- unsupported GNU version! gcc versions later than 5 are not supported!"
LINE3="#endif \/\* __GNUC__ > 5 \*\/"
file1="/usr/local/cuda-8.0/include/host_config.h"
#file1="host_config.h"
sed -i "/${LINE1}/ { c \
//${LINE1}
}" ${file1}
sed -i "/${LINE2}/ { c \
//${LINE2}
}" ${file1}
sed -i "/${LINE3}/ { c \
//${LINE3}
}" ${file1}
tar xvfz cudnn-8.0-linux-x64-v6.0.tgz
cp -r cuda/* /usr/local/cuda-8.0
echo "/usr/local/cuda/lib64" >> /etc/ld.so.conf.d/cuda.conf && \
ldconfig
## Not sure which option is best
echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | tee /etc/apt/sources.list.d/bazel.list
curl https://bazel.build/bazel-release.pub.gpg | sudo apt-key add -
apt-get update && apt-get install -y --no-install-recommends bazel
#wget 'https://github.com/bazelbuild/bazel/releases/download/0.5.2/bazel-0.5.2-installer-linux-x86_64.sh'
#chmod +x bazel-0.5.2-installer-linux-x86_64.sh
#./bazel-0.5.2-installer-linux-x86_64.sh
EXTRA_NVCCFLAGS="-Xcompiler -std=c++98 -D__CORRECT_ISO_CPP11_MATH_H_PROTO"
cd /home/${user}
git clone https://github.com/tensorflow/tensorflow.git
cd tensorflow
| true
|
6c0b5e193ed975cd760c92c08c8edf533b17f9e2
|
Shell
|
packtory/packtory
|
/bin/support/fpm
|
UTF-8
| 465
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
packtory_dir=$(cd "$(dirname $0)/../../"; pwd)
packtory_bundle_path=$packtory_dir/bundle
if [ -d "$packtory_bundle_path" ]; then
export PACKTORY_BUNDLE_PATH="$packtory_bundle_path"
fpm_ruby=$(cd "$(dirname $0)"; pwd)/fpm_vendor_ruby
else
fpm_ruby="$(which fpm)"
if [ -z "$fpm_ruby" ]; then
fpm_ruby="fpm"
fi
fi
if [ -n "$FPM_USE_RUBY_PATH" ]; then
$FPM_USE_RUBY_PATH $fpm_ruby "$@"
else
$fpm_ruby "$@"
fi
| true
|
7963e570541150daea086d4549acaf1d65c00b29
|
Shell
|
thiagohersan/CPM-Hackshop
|
/tcpsniff/tcpsniff.sh
|
UTF-8
| 742
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
pythonpid=`ps | grep -v awk | awk '/python/{print $1}'`
kill -9 $pythonpid
python -m SimpleHTTPServer &
pythonpid=$!
cnt=0
while [ $cnt -lt 10 ]
do
sudo tcpdump -i eth0 'port 80' -w out.cap &
killpid=$!
sleep 30
sudo kill -9 $killpid
mkdir tmpFlowDir && cd tmpFlowDir
tcpflow -r ../out.cap
mkdir data
foremost -i * -o data
cd data
for t in {gif,png,jpg}
do
if [ -d $t ]
then
cp $t/* ../../imgs/
fi
done
cd ../../
rm index.html
cp dump.html index.html
cd imgs/
for file in `ls -t *`
do
sed -i.bak 's/<!--- XXX --->/<img src='\''\/imgs\/'$file\''>\'$'\n &/g' ../index.html
done
cd ../
sudo rm -rf *.bak tmpFlowDir out.cap
let cnt=cnt+1
echo $cnt
done
| true
|
1adc9faaa2a977ec8fce6f39be0951e739e46274
|
Shell
|
guskovd/habitat-plans
|
/habitat/sozu/plan.sh
|
UTF-8
| 649
| 2.859375
| 3
|
[] |
no_license
|
pkg_name=sozu
pkg_origin=guskovd
pkg_version=0.11.0
pkg_license=('Apache-2.0')
pkg_maintainer="Danil Guskov"
pkg_bin_dirs=(bin ctl)
pkg_upstream_url="https://github.com/sozu-proxy/sozu"
pkg_description="Sōzu HTTP reverse proxy, configurable at runtime, fast and safe, built in Rust. It will be awesome when it will be ready. Not So Secret Project! Ping us on gitter to know more https://www.sozu.io/"
pkg_deps=(
core/glibc
core/gcc-libs
)
pkg_build_deps=(
core/rust
)
do_build() {
return 0
}
do_install() {
cargo install sozu --root "${pkg_prefix}" --vers "${pkg_version}" -j"$(nproc)" --verbose
}
do_download() {
return 0
}
| true
|
3b6709f2e4006325fe563a36e6188413956d358f
|
Shell
|
jeson-lbb/demo
|
/Nginx-web-server/Nfs-server/scripts/cli_rsync_bak.sh
|
UTF-8
| 1,176
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Date: 01:17 2018-04-07
# Author: Created by Jeson
# Mail: 309769163@qq.com
# Funcsion:This scripts funcsion is nfs install
##############
export PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin"
LocalHost_IP=$(ifconfig eth0|sed -rn '2s/^.*ddr:(.*) B.*$/\1/p')
Time=$(date +%F_%a)
Backup_server_IP=$(awk '/Backup-server/{print $1}' /etc/hosts)
mkdir -p /backup/${Time} && cd /backup/${Time} \
&& tar -czf $(hostname)_tar.gz /html /var/spool/cron/root /etc/{sudoers,mail.rc,rc.d/rc.local,hosts,sysctl.conf,ssh/sshd_config} \
&& md5sum $(hostname)_tar.gz >$(hostname)_tar.gz.md5 \
&& rsync -az --password-file=/etc/rsync.password /backup/ rsync_backup@Backup-server::backup/$LocalHost_IP \
&& find /backup/ -type d -mtime +2 -name "*-*-*_*"|xargs rm -fr
RETVAL=$?
if [ $? -eq ${RETVAL} ];then
echo -e "LocalHost: $(hostname):${LocalHost_IP}\n > rsync $(hostname)_${Time}.tar.gz to ${Backup_server_IP} [ OK ]"|mail -s "$(hostname) rsync" 1270963692@qq.com
else
echo -e "LocalHost: $(hostname):${LocalHost_IP}\n > rsync $(hostname)_${Time}.tar.gz to ${Backup_server_IP} [ false ]"|mail -s "$(hostname) rsync" 1270963692@qq.com
fi
| true
|
681672c2575f40e69e1c7a297baaaab775755cfd
|
Shell
|
StephaneMoriceau/sdk-bot
|
/bots-client-sdk-js-18.2.3.0/configure
|
UTF-8
| 620
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
placeholderPublicPath="https://placeholder.public.path/"
if [ -z "$1" ]
then
echo "Please provide a path. (./configure <path>)"
exit 1
fi
# Make sure path ends with /
newPublicPath="${1%/}/"
outputFolder=${PWD}/`echo "$newPublicPath" | tr / _`
rm -rf $outputFolder
mkdir $outputFolder
for filename in $(find js-sdk -type f); do
baseFilename=${filename#js-sdk/}
mkdir -p "$(dirname "$outputFolder/$baseFilename")"
LC_ALL=C sed "s|${placeholderPublicPath}|${newPublicPath}|g" $filename > "$outputFolder/$baseFilename"
done
echo "Done! Files are available in ${outputFolder}"
| true
|
18a512b139c4d8ccf5deb09f3906987e6e116cdc
|
Shell
|
redhat-developer/dotnet-regular-tests
|
/tool-dev-certs/test.sh
|
UTF-8
| 622
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ -f /etc/profile ]; then
source /etc/profile
fi
# Enable "unofficial strict mode" only after loading /etc/profile
# because that usually contains lots of "errors".
set -euo pipefail
set -x
IFS='.-' read -ra VERSION_SPLIT <<< "$1"
VERSION="${VERSION_SPLIT[0]}.${VERSION_SPLIT[1]}"
# it's okay if the tool is already installed
# if the tool fails to install, it will fail in the next line
dotnet tool install --global dotnet-dev-certs --version "${VERSION}.*" || true
dotnet dev-certs
if [ $? -eq 1 ]; then
echo "FAIL: dotnet tool not found"
exit 1
fi
echo "PASS: dotnet tool dev-certs"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.