blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
aa97f0f9584fb5f23b23f7faed9bd85ea68e0d30
|
Shell
|
bpei1989/MyCookbook
|
/Shell/6. 杂项.sh
|
UTF-8
| 7,036
| 3.234375
| 3
|
[] |
no_license
|
1. tar
tar -cf out.tar file1 file2...
文件名必须紧跟在f后面,即-cf不是-fc
tar -rvf o.tar new_file # r已生成的tar中加文件,v或vv是现实详细信息
tar -xf a.tar #把归档内容提取到当前目录,-x是提取
tar -xvf file.tar file1 file3 #只提取file1 file3
tar -Af t1.tar t2.tar #把t2合并到t1中
tar -f a.tar --delete file1 file3 #把file1 file3从a中删除
tar -cf t.tar --exclude "*.txt" #不打包txt文件
tar主要记住核心,-x,-c,-f三个
-z gzip压缩
-j bunzip2压缩
可以不明确给出压缩格式,用-a表示自动选择压缩
2. gunzip gzip
gzip filename #压缩成filename.gz
gunzip filename.gz #解压缩,删除filename.gz并声称filename.gz未压缩形式
tar -czvvf tz.tar.gz [file] #用gzip压缩,-c创建tar,vv详细,f紧跟文件名
tar -xavvf tz.tar.gz -c extract-dir #x 解包,a是采取自动识别,也可用-z即gzip解压,一般用a
3. bunzip bzip
与gzip类似
bzip2 filename
bunzip2 filename
tar -xavvf ...
4. zip
类似
zip file.zip file
unzip file.zip
5. ssh
ssh自动化认证
1. 创建ssh密钥
2. 将生成的公钥传输到远程主机,加入到文件~/.ssh/authorized_keys中
$ssh-keygen -t rsa
输入密码生成密钥,~/.ssh/id_rsa.pub和~/.ssh/id_rsa已经生成,公钥添加到远程服务器的~/.ssh/authorized_keys中
6. 网络流量与端口
lsof -i #列出系统中开放的端口和服务的详细信息
netstat -tnp #列出开放的端口与服务
7. df du
du file1 file2 #找出文件占用的磁盘空间
du -a dir #文件夹
du默认字节, -h会更友好,KB,MB,GB
du -c file1 file2或dir #所有文件总共占有空间大小
-c -a -h可合并使用,显示友好而且在最后一行显示总共多大
du还支持--max-depth --exclude -type等
df现实磁盘空间信息
df -h #常用
8. time COMMAND
time ls#计算ls耗费的时间
9. who
who会打印出当前登录用户的相关信息,伪终端,登录IP
who
root pts/2 2016-06-27 22:36 (pek2-office-04-dhcp221.eng.vmware.com)
users会打印登录主机的用户列表
#users
root
uptime #打印系统已运行了多久等信息
uptime
07:39:24 up 6 days, 3:18, 1 user, load average: 0.00, 0.01, 0.05
last提供登录信息
last root
root pts/2 pek2-office-04-d Mon Jun 27 22:36 still logged in
root pts/2 pek2-office-04-d Wed Jun 22 04:25 - 15:15 (5+10:50)
root pts/1 pek2-office-04-d Wed Jun 22 04:24 - 15:15 (5+10:51)
10. history
#!/bin/bash
#top command,~/.bash_history记录
printf "COMMAND\tCOUNT\n";
cat ~/.bash_history | awk '{ list[$1]++; }' \ #默认分隔符,$1,第一个区间
END{
for(i in list) {
printf("%s\t%d\n",i,list[i]);
}
}' | sort -nrk 2 | head # sort -n file.txt # -k指定了排序按照哪一键来进行 -nr表明按照数字,采用逆序形式排
11. 列出1小时内占用CPU最多的10个程序
#用ps命令
ps -eo comm,pcpu #以command cpu显示
结果
updatedb.mlocat 2.2
vdnet 0.2
perl 60.0
用关联数组
#!/bin/bash
ps -eo comm,pcpu | tail -n +2 >> /tmp/cpu.usage #tail -n +2将ps头部去除,+2从第2行开始显示文件
cat /tmp/cpu.usage | \
awk '{ process[$1]=$2; }
END{
for(i in process){
printf("%-20s %s",i,process[i]);
}
}' | sort -nrk 2 | head #-k指定了排序按照哪一键来进行 -nr表明按照数字,采用逆序形式排
12. watch
watch COMMAND#监视命令输出内容,默认两秒更新
watch 'ls -l'
13. 文件访问记录
inotifywait收集文件访问信息,类似watch
14. logrotate(了解)
把日志文件的大小限制在给定size,自动新建文件
/etc/logroate.d配置文件控制命令的配置
15. syslog(了解)
/var/log/保存日志文件,syslog记录应用进程日志
16. cron
一个crontab条目
第1列表示分钟1~59 每分钟用*或者 */1表示
第2列表示小时1~23(0表示0点)
第3列表示日期1~31
第4列表示月份1~12
第5列标识号星期0~6(0表示星期天)
第6列要运行的命令
分钟 小时 天 月 工作日 命令
02 * * * * /home/sylynux/test.c #每天个小时的第2分钟执行
00 5,6,7 * * /home/sylynux/test.c #每天5、6、7小时执行
00 */12 * * 0 t.sh #周日每隔12个小时执行
17. 进程
top ps pgrep
ps -o 指定显示的列比如ps -eo comm,pcpua
ps 打印当前终端的进程
ps -ax 全部进程
ps -eo comm,pcpu --sort -pcpu | head #注意和sort搭配
ps -C command #打印出具体进程command的信息
ps -C COMMAND_NAME -o pid
top 显示的信息多一些,有mem等
pgrep可以获取一个特定命令的进程ID列表
pgrep md #找到所有md相关的进程号
36
42
1337
pgrep -u root #打印root的所有进程
ps -eLf # 显示进程和线程,L是显示线程
UID PID PPID LWP C NLWP STIME TTY TIME CMD
root 1 0 1 0 1 Jun22 ? 00:00:04 /sbin/init
root 2 0 2 0 1 Jun22 ? 00:00:00 [kthreadd]
#NLWP进程的线程数量,NLP表示ps输出中每个条目的线程ID
ps -eLF --sort -nlwp #注意与sort的结合
打印进程的环境变量,e选项
ps -eo pid,cmd e | tail -n 3
18. kill
kill -l#列出所有可发送的信号
kill processID #kill进程
kill -s SIGNAL PID #发送指定信号
kill -9 PID#强制杀死进程,-9(SIGKILL)
19. which whereis file whatis
which 显示命令的位置
$which ls
/bin/ls
whereis 与which类似,不过更详细,显示命令手册,源代码等
whereis ls
ls: /bin/ls /usr/share/man/man1/ls.1.gz
file filename#显示文件详细信息
file /bin/ls
/bin/ls: ELF 32-bit LSB executable, Intel 80386, version 1 (SYSV), dynamically linked (uses shared libs), for GNU/Linux 2.6.24, BuildID[sha1]=0x5f580e4b387193789cb865afdebb75442e1d5516, stripped
whatis#简介命令
whatis ls
ls (1) - list directory contents
平均负载,系统中可运行进程总量的平均值,第一个之是一分钟内平均值,第二个五分钟内平均值,第三个15分钟内平均值
uptime
08:32:32 up 6 days, 4:12, 1 user, load average: 0.02, 0.03, 0.05
20. 系统
hostname #主机名
uname -n #内核版本,硬件架构
uname -a#内核发行版本
uname -m #主机类型
cat /proc/cpuinfo #cup信息
cat /proc/meminfo #内存信息
cat /proc/partitions #硬盘分区
21. /proc
cat /proc/4295/environ #environ 环境变量
readlink /proc/4295/exe #cwd进程工作目录的链接
fd文件描述符
22. MySQL(用-u -p)
mysql -uuser -ppasswd -e"insert LogTable values(...)"
shell 连mysql
准备一个sql脚本,如update.sql,然后执行如下命令:
mysql -uroot -ppassword < update.sql
| true
|
27532ef030afea566185a5c1dcad0104a91dce68
|
Shell
|
BertrandKevin/Save-IMU-s-Data-Reach-RTK
|
/imu-bash
|
UTF-8
| 1,241
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# le nom du service
SERVICE_NAME= ./reach
# le répertoire où se trouvent les exécutables du service
SERVICE_DIRECTORY= /home/root/Save-IMU-s-Data-Reach-RTK-master/./reach
# le nom du script de démarrage du service
SERVICE_STARTUP_SCRIPT=startup.sh
# le nom du script d'arrêt du service
SERVICE_SHUTDOWN_SCRIPT=shutdown.sh
usage()
{
echo "-----------------------"
echo "Usage: $0 (stop|start|restart)"
echo "-----------------------"
}
if [ -z $1 ]; then
usage
fi
service_start()
{
echo "Starting service '${SERVICE_NAME}'..."
OWD=`pwd`
cd ${SERVICE_DIRECTORY} && ./${SERVICE_STARTUP_SCRIPT}
cd $OWD
echo "Service '${SERVICE_NAME}' started successfully"
}
service_stop()
{
echo "Stopping service '${SERVICE_NAME}'..."
OWD=`pwd`
cd ${SERVICE_DIRECTORY} && ./${SERVICE_SHUTDOWN_SCRIPT}
cd $OWD
echo "Service '${SERVICE_NAME}' stopped"
}
case $1 in
stop)
service_stop
;;
start)
service_start
;;
restart)
service_stop
service_start
;;
*)
usage
esac
exit 0
| true
|
2714512fe15f673a5e715baa12e88223783eca32
|
Shell
|
claudusd/docker-utils
|
/elasticsearch.sh
|
UTF-8
| 1,213
| 3.640625
| 4
|
[] |
no_license
|
#/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function run() {
docker run -d -p 9200:9200 -p 9300:9300 --name="default_elasticsearch" -v "$DIR/config/elasticsearch":/usr/share/elasticsearch/config elasticsearch:2.1 2> /dev/null
if [ $? -eq 1 ]; then
restartContainer;
fi
}
function stopContainer() {
docker stop default_elasticsearch;
}
function restartContainer() {
docker restart default_elasticsearch;
}
case "$1" in
"run" )
RUNNING=$(docker inspect --format="{{ .State.Running }}" default_elasticsearch 2> /dev/null)
if [ $? -eq 1 ]; then
run;
fi
if [ "$RUNNING" == "true" ]; then
echo "already running"
fi
if [ "$RUNNING" == "false" ]; then
run;
fi
;;
"stop" )
RUNNING=$(docker inspect --format="{{ .State.Running }}" default_elasticsearch 2> /dev/null)
if [ $? -eq 1 ]; then
echo "container not exist";
fi
if [ "$RUNNING" == "true" ]; then
stopContainer;
fi
if [ "$RUNNING" == "false" ]; then
echo "already stop";
fi
;;
* )
echo "run, stop";
;;
esac
#docker run -d -p 3306:3306 --name="default_mysql" mysql:5.5.7
| true
|
7912f0859da459515daf92253d6e4a06928fba2f
|
Shell
|
markom84/routerscripts
|
/mikrotik/export
|
UTF-8
| 1,394
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Unesite korisnicko ime: " korisnik
read -sp "Unesite lozinku: " lozinka
SAD=$(date +%F)
mkdir ~/backups/mikrotik/$SAD.exports
COUNTER=0
POCETAK=$(date +%H:%M:%S)
for i in $(cat ~/mikrotik-backup-script/tiks.txt); do
COUNTER=$((COUNTER + 1))
echo -e $COUNTER"." "Export MikroTik-a \033[1m$i\033[0m"
sshpass -p "$lozinka" ssh -oStrictHostKeyChecking=no -oHostKeyAlgorithms=+ssh-dss -o ConnectTimeout=20 $i -l $korisnik /export file=$SAD
echo "Export napravljen"
sshpass -p "$lozinka" scp -oStrictHostKeyChecking=no -oHostKeyAlgorithms=+ssh-dss -o ConnectTimeout=20 $korisnik@$i:$SAD.rsc ~/backups/mikrotik/$SAD.exports/$i.rsc
echo "Export preuzet"
done
TOTAL=$(echo $COUNTER)
for f in $(cat ~/mikrotik-backup-script/tiks.txt); do
if [ ! -f ~/backups/mikrotik/$SAD.exports/$f.rsc ];
then
echo -e "Export za \033[1m$f\033[0m nije uradjen " && COUNTER=$((COUNTER - 1 ))
fi
done
echo -e "\033[1m -------------------------------------------------------------------------------------------------------\033[0m"
#Backup exports to FTP server
#ncftpput -u $ftpuser -p $ftppass -R -a $ftpserver /backups/mikrotik/ ~/backups/mikrotik/$SAD.exports
KRAJ=$(date +%b-%d-%H:%M:%S)
echo -e "Skripta pokrenuta u: \033[1m$POCETAK\033[0m \n zavrsena u: \033[1m$KRAJ\033[0m"
echo -e "Export uradjen na \033[1m$COUNTER\033[0m od ukupno \033[1m$TOTAL\033[0m Mikrotik uredjaja."
| true
|
d082c0d519715bbac1050bf3a8584b4a4bda9da2
|
Shell
|
marc-hanheide/blackboard_tools
|
/extract-submission.sh
|
UTF-8
| 1,814
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
echo "usage: $0 <zipfile>"
fi
function get_field() {
grep "^$1:" "$f" | head -n1 | sed 's/^'"$1"': \(.*\)$'"/\1/" |tr -d "\r"
}
zipfile="$1"
tempdir="/tmp/extract-submission-$$-$USER"
ex1dir=$tempdir/ex1dir
prefix="submissions"
mkdir -p "$prefix"
logfile="$prefix/extract.log"
date > "$logfile"
infofile="$prefix/info.csv"
echo "extracted from $zipfile" > "$infofile"
rm -f "$infofile"
mkdir -p "$ex1dir"
unzip -d "$ex1dir" "$zipfile">> "$logfile" 2>&1 || echo "*** couldn't unzip $zipfile"
for f in "$ex1dir"/*.txt; do
name=`get_field Name`
date=`get_field "Date Submitted"`
#name=`grep '^Name:' "$f" | sed 's/^Name:\(.*\)$/\1/'`
files=`grep -P "^\tFilename:" "$f" | sed 's/^.*Filename: \(.*\)$/\1/'|tr -d "\r"`
echo " extracting $name"
sid=`echo $name | sed 's/.*(\([0-9]*\)).*/\1/'`
echo "$files"
clean_name=`echo "$name" | tr " " "_" | sed 's/_([0-9]*)$//'`
ex2dir="$prefix/${clean_name}_${sid}"
mkdir -p "$ex2dir"
rm -f "$ex2dir"/*
fileformat="none"
if echo "$files" | grep -i -q '.zip$'; then
unzip -j -d "$ex2dir" "$ex1dir/$files" >> "$logfile" 2>&1 || echo "*** couldn't unzip $ex1dir/$files"
fileformat="zip"
sub_files=`ls "$ex2dir"`
else
if echo "$files" | grep -i -q '.rar$'; then
echo "RARFILE $files" > "$ex2dir"/WRONGFILEFORMAT
(cd $ex2dir && unrar e "$ex1dir/$files") >> "$logfile" 2>&1 || echo "*** couldn't unrar $ex1dir/$files"
fileformat="rar"
else
echo "$files" > "$ex2dir"/WRONGFILEFORMAT
fi
fi
sub_files=`ls "$ex2dir" | tr "\n" " " | tr "\t" " " | tr -s " "`
cp "$f" "$ex2dir"/submission.txt
echo "\"$sid\",\"$name\",\"$date\",\"$fileformat\",\"$files\",\"$sub_files\"" >> $infofile
#unzip -d "$ex2dir" "$ex1dir/$files"
#ls "$ex1dir/$files"
#echo "Files: $files"
done
#rm -rf "$tempdir"
| true
|
030654775fd50ea4a2f18b3edec2801d08fe3196
|
Shell
|
bsaunder/docker-containers
|
/fuse_6.0.0/build.sh
|
UTF-8
| 377
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ ! -e installs/jboss-fuse-full-6.0.0.redhat-024.zip ]
then
echo "You should get the required A-MQ binary before trying to build this image."
exit 255
fi
# Create containers
echo "Creating Fuse 6.0.0 Container ..."
docker build -q --rm -t bsaunder/fuse_6.0.0 .
if [ $? -eq 0 ]; then
echo "Container Built"
else
echo "Error Building Container"
fi
| true
|
aeeb8986139abfe0a9a239803234dd4b394504cc
|
Shell
|
andral/mondo-web
|
/lib/cronjob-rpm/mondo-cron.sh
|
UTF-8
| 1,577
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export PATH=/sbin:/bin:/usr/sbin:/usr/bin
# let's make sure we're not a vm
if [ "`virt-what`" = "vmware" ]; then exit 0; fi
CONFIG_FILE=/opt/fzag/etc/mondo-cron.conf
if [[ -f $CONFIG_FILE ]]; then
. $CONFIG_FILE
else
echo "config file $CONFIG_FILE not found! aborting.."
exit 1;
fi
backup_dev=`pvs --noheading | grep sysvg1 | awk '{print $1}'`
if [[ "$backup_dev" == *cciss* ]];
then
backup_dev=`echo $backup_dev | sed 's/p[0-9]*//'`
else
backup_dev=`echo $backup_dev | sed 's/[0-9]*//g'`
fi
mkdir $backup_mount
mount -t nfs4 $backup_server:$backup_path $backup_mount
if [ $? -ne 0 ]; then echo "Mount failed on $(hostname -s)" | mail -s "Mondo Rescue Failed" root; rmdir $backup_mount; exit 1; fi
mkdir $backup_dir
while getopts d opt
do
case $opt in
d) debug=yes;;
*) exit 1;;
esac
done
if [ ! -z $debug ];
then
mondoarchive -OVi -d $backup_dir -I $backup_dev -p `hostname -s` -S /dev/shm -T /dev/shm -G -N -s 4g
else
mondoarchive -OVi -d $backup_dir -I $backup_dev -p `hostname -s` -S /dev/shm -T /dev/shm -G -N -s 4g > $logfile 2>&1
fi
if [ $? -ne 0 ]; then cat /var/log/mondoarchive.log | mail -s "Mondo Rescue Failed" root; fi
if [ -e /var/cache/mindi/mondorescue.iso ]; then cp /var/cache/mindi/mondorescue.iso $backup_dir; fi
if [ -e /var/log/mondoarchive.log ]; then cp /var/log/mondoarchive.log $backup_dir; fi
if [ -e $logfile ]; then cp $logfile $backup_dir; fi
umount $backup_mount
rmdir $backup_mount
rm -rf /dev/shm/mondo.tmp*
rm -rf /dev/shm/mondo.scratch*
rm -f $logfile
| true
|
a3ddef5186a09244e1ef4d1b77d0756a42018a24
|
Shell
|
zhuansun/some_scripts
|
/shell/auto_hexo/auto_dev.sh
|
UTF-8
| 2,862
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
# 自动发布博客文章
# 用户输入错误信息
function error(){
cat <<- _EOF_
命令不对,输入 -h 或者 --help 查看帮助
_EOF_
}
# connect_hexo直接连接到hexo中,避免输入ssh了
function connect_hexo(){
echo "#####################【正在打开连接】#####################"
ssh blog@52youyong.xyz
}
# 帮助信息
function help(){
cat <<- _EOF_
-u upload 上传多个指定文件到hexo中
-d delete 删除hexo中指定的多个文件
-g denerate 开始部署
-h help 查看帮助
-c connect 连接到hexo
_EOF_
}
# 上传文件
function upload_file(){
# 上传文件前,检查文件
echo "#####################【上传前文件检查】#####################"
for i in $*; do
# 跳过命名行的第一个参数 -u -p -h 等
if [[ "$i" =~ "-" ]]; then
continue
fi
# 使用 -f 判断文件是否存在
if [[ ! -f "$i" ]]; then
echo "【 $i 】文件不存在,请确认文件目录"
exit -1
fi
done
echo "#####################【上传前文件检查【完成】】#################"
# 文件检查完毕后,开始上传
echo "#####################【开始上传文件】#########################"
for i in $*; do
# 跳过命名行的第一个参数 -u -p -h 等
if [[ $i =~ "-" ]]; then
continue
fi
if [[ -f $i ]]; then
scp -P 22 $i blog@52youyong.xyz:~/app/zhuansun.github.io/source/_posts
echo " 文件 $i 上传完成!!!"
fi
done
echo "#####################【所有文件上传【完成】】###################"
}
# 删除文件
function delete_file(){
# 删除服务器上的文件
echo "#####################【删除服务器上的文件】#####################"
# for循环,开始删除文件
for i in $*; do
# 跳过命名行的第一个参数 -u -p -h 等
if [[ "$i" =~ "-" ]]; then
continue
fi
ssh blog@52youyong.xyz "cd ~/app/zhuansun.github.io/source/_posts && rm -rf $i"
echo " 文件 $i 删除完成!!!"
done
echo "#####################【删除服务器上的文件【完成】】###############"
}
# 部署hexo
function deploy_hexo(){
echo "#####################【开始部署hexo】#####################"
ssh blog@52youyong.xyz "cd ~/app/zhuansun.github.io && hexo clean && hexo g && hexo d"
echo "#####################【开始部署hexo【完成】】#####################"
}
# 程序运行期间,检查用户参数是否输入正确
if [[ ! $1 ]]; then
error
exit
fi
case $1 in
-h | --help )
# 输出帮助,程序结束
help
exit 0
;;
-u )
# 上传本地文件到hexo中
upload_file $*
exit 0
;;
-d )
# 删除
delete_file $*
exit 0
;;
-g )
# 部署
deploy_hexo
exit 0
;;
-c )
# 部署
connect_hexo
exit 0
;;
* )
# 用户参数输入错误,不支持,展示错误信息,同时退出程序
error
exit -1
;;
esac
| true
|
6898452807293a1c9486500c4cbd06f91674a7fc
|
Shell
|
aviadshiber/236319-Programming-Languages
|
/HW4/compile_and_run
|
UTF-8
| 1,578
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
cat kings | ./readAndCreateAInPascal > A.pas
fpc A.pas
./A
rm A.o
#rm A
fpc B.pas
./B > output
echo "after reading from A" >>output
rm B.o
cat readAndCreateAInPascal | sed -re 's/name/othername/g' | sed -re 's/startYear/otherStartYear/g' | sed -re 's/endYear/otherEndYear/g' > readAndCreateCInPascal
chmod 777 readAndCreateCInPascal
cat kings | ./readAndCreateCInPascal > C.pas
fpc C.pas
./C
./B >> output 2>errors.C
echo "after reading from C" >>output
rm C.o
#rm C
#building D
#singlequote=\047
cat readAndCreateAInPascal | sed -re 's/BCE=integer;/BCE=integer; crowneChoice=(rebellion,inheritance);/g' | sed -re 's/endYear:BCE;/endYear:BCE; kingBy:crowneChoice;/g' | sed '23 a print "if kings["i"].name=" "\\047"$4"\\047" " then";' | sed '24 a print " kings["i"].kingBy:=rebellion";'| sed '25 a print "else";' | sed '26 a print " kings["i"].kingBy:=inheritance;";' > readAndCreateDInPascal
chmod 777 readAndCreateDInPascal
cat kings | ./readAndCreateDInPascal > D.pas
fpc D.pas
./D
./B >> output 2>errors.D
echo "after reading from D" >>output
rm D.o
#rm D
cat readAndCreateDInPascal | sed -re 's/name:string;/name:string;father:string;/g' > readAndCreateEInPascal
chmod 777 readAndCreateEInPascal
cat kings | ./readAndCreateEInPascal > E.pas
fpc E.pas
./E
#clear
./B >>output 2>errors.E
echo "after reading from E" >>output
echo "-----------------All programs output-----------------"
cat output
echo "------------------all programs errors-------------------"
cat errors.*
rm E.o
echo "------------------------Done----------------------------"
#rm E
#rm B
| true
|
379665a1a5e37c1807acd4276da5cd74a3cd2283
|
Shell
|
bakercp/ofxTwitter
|
/scripts/ci/install.sh
|
UTF-8
| 1,160
| 3.953125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
OF_ROOT=$( cd "$(dirname "$0")/../../../.." ; pwd -P )
# Default addon github info.
GH_USERNAME='bakercp'
GH_BRANCH='master'
GH_DEPTH=1
# An array of required addons that will be gathered below.
REQUIRED_ADDONS=()
# Extract ADDON_DEPENDENCIES from addon_config.mk file.
if [ -f ${OF_ROOT}/addons/${OF_ADDON_NAME}/addon_config.mk ]; then
while read line; do
if [[ $line == ADDON_DEPENDENCIES* ]] ;
then
line=${line#*=}
IFS=' ' read -ra ADDR <<< "$line"
for i in "${ADDR[@]}"; do
REQUIRED_ADDONS+=($i)
done
fi
done < ${OF_ROOT}/addons/${OF_ADDON_NAME}/addon_config.mk
fi
# Gather addons from all examples.
for addons_make in ${OF_ROOT}/addons/${OF_ADDON_NAME}/example*/addons.make; do
while read addon; do
if [ ${addon} != ${OF_ADDON_NAME} ] ;
then
REQUIRED_ADDONS+=($addon)
fi
done < $addons_make
done
# We aren't de-duplicating array to keep it pure bash.
for addon in "${REQUIRED_ADDONS[@]}"
do
if [ ! -d ${OF_ROOT}/addons/${addon} ]; then
git clone --depth=$GH_DEPTH https://github.com/$GH_USERNAME/$addon.git ${OF_ROOT}/addons/${addon}
fi
done
| true
|
274ab6b2235cba0587e5f35c96eda19c420e7440
|
Shell
|
gaspar44/Operativos
|
/Practica1/rec_restaurantes.sh
|
UTF-8
| 659
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
configurationFile=$1
source ./defaultParameters.sh $configurationFile
while [[ true ]]; do
echo "1. Recomendación rápida de restaurante"
echo "2. Recomendación detallada de restaurante"
echo "3. Consultar parámetros de recomendación"
echo "4. Ajustar parámetros recomendación"
echo "5. Salir"
echo "-----------------------------------------"
read -p "Introduzca una opción (1/2/3/4/5) " opcion
case $opcion in
1)
source ./opcion1.sh;;
2)
source ./opcion2.sh ;;
3)
source ./opcion3.sh ;;
4)
source ./opcion4.sh ;;
5)
clear
exit 0 ;;
*)
clear
echo "error, pruebe de nuevo" ;;
esac
done
| true
|
328ef559e475ef4f759c76e442df00605d98076b
|
Shell
|
trondn/labrea
|
/labrea
|
UTF-8
| 692
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
owd=`pwd`
cd `dirname $0`
labreaso=`pwd -P`/labrea.so
cd $owd
case `uname -s` in
Darwin)
DYLD_FORCE_FLAT_NAMESPACE=YES
DYLD_INSERT_LIBRARIES=$labreaso
export DYLD_FORCE_FLAT_NAMESPACE
export DYLD_INSERT_LIBRARIES
;;
Linux)
LD_PRELOAD=$labreaso
export LD_PRELOAD
;;
*)
echo "Sorry, `uname -s` is not supported."
esac
LABREA_SCRIPT=$1
shift
if [ ! -f "$LABREA_SCRIPT" ]
then
echo "Can't find labrea script."
exit 1
fi
case $LABREA_SCRIPT in
*.lua)
;;
*)
echo "'$LABREA_SCRIPT' doesn't look like a lua script."
exit 1
esac
export LABREA_SCRIPT
exec "$@"
| true
|
7812da5ff7386bb20967dda728fdcff4d01d95e2
|
Shell
|
corykey/vagrants
|
/configs/installs/debian/install_mysql.sh
|
UTF-8
| 398
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
# Cory Kehoe
# dev@corykey.com
# 2016
echo "installing: MySQL SERVER + PhpMyAdmin"
apt-get update
apt-get install -y --force-yes mysql-server
if apt-get -qq install mysql-server; then
apt-get install -y --force-yes phpmyadmin
ln -s /usr/share/phpmyadmin /usr/share/nginx/www
php5enmod mcrypt
echo "INSTALL COMPLETE"
else
echo "Could not install mysql-server and phpmyadmin"
fi
| true
|
cf9ab5af14ff963b991d3317aed209d190ccfaf7
|
Shell
|
tdq45gj/mcg-portal
|
/standUpDB.bash
|
UTF-8
| 602
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
exitCode=1
iterations=0
# wait until we can successfully accept connections
until [ $exitCode -eq 0 ] || [ $iterations -eq 20 ]; do
sleep 3
echo "Waiting for DB to come online, attempt $((iterations + 1)) of 20"
# Just attempt to do a dummy `SELECT 1;` to see if it succeeds
docker exec -it pg-docker psql -U postgres -h localhost -c 'select 1;' >/dev/null
exitCode=$?
iterations=$((iterations + 1))
done
# initialize test data that you want the db to be the populated with when it comes up
docker exec -it pg-docker psql -U postgres -h localhost -c "$(cat dataInit.sql)"
| true
|
79218f4108f1f52639a7cd7b37408a3e2afffdac
|
Shell
|
czdsdo/chaintest-docker-script
|
/chaintest-yuelian/fabric/fabric_init.sh
|
UTF-8
| 1,208
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
# author:wang yi
PEER_INDEX=$(cat /chain/PEER_INDEX)
function initFabric(){
case $PEER_INDEX in
3) {
curl 127.0.0.1:8080/api/users
sleep 3s
curl 127.0.0.1:8080/api/createChannel
sleep 3s
curl 127.0.0.1:8080/api/joinchannel
sleep 75s
curl 127.0.0.1:8080/api/instantiateChaincode
sleep 10s
}
;;
4) {
sleep 16s
curl 127.0.0.1:8080/api/users
sleep 75s
curl 127.0.0.1:8080/api/instantiateChaincode
sleep 10s
}
;;
5) {
sleep 18s
curl 127.0.0.1:8080/api/users
sleep 75s
curl 127.0.0.1:8080/api/instantiateChaincode
sleep 10s
}
;;
6) {
sleep 20s
curl 127.0.0.1:8080/api/users
sleep 5s
curl 127.0.0.1:8080/api/installChaincode
sleep 10s
curl 127.0.0.1:8080/api/instantiateChaincode
sleep 10s
}
;;
*) echo 'error'
;;
esac
}
sleep 2s
initFabric >/dev/null 2>&1
echo "success"
| true
|
908ec01d37bbff7cb42ce1eddb7ba981ff95e129
|
Shell
|
htchepannou/kribi
|
/installer/javaapp/install.sh
|
UTF-8
| 1,747
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Usage: ./install.sh
#
source ./service-profile
yum -y remove java*
yum -y install java-1.8.0-openjdk
#------------------------------
# Download
#------------------------------
echo "DOWNLOADING..."
aws s3 cp s3://io.tchepannou.kribi/repository/$SERVICE_NAME/$SERVICE_VERSION/$SERVICE_NAME.jar .
#------------------------------
# Create user
#------------------------------
echo "CREATING USER: $SERVUCE_USER..."
id -u webapp &>/dev/null || useradd $SERVICE_USER
#------------------------------
# Install application
#------------------------------
echo "INSTALLING APPLICATION..."
if [ ! -d "/opt/$SERVICE_NAME" ]; then
mkdir /opt/$SERVICE_NAME
fi
if [ ! -d "/opt/$SERVICE_NAME/log" ]; then
mkdir /opt/$SERVICE_NAME/log
fi
if [ ! -d "/opt/$SERVICE_NAME/config" ]; then
mkdir /opt/$SERVICE_NAME/config
fi
cp $SERVICE_NAME.jar /opt/$SERVICE_NAME/$SERVICE_NAME.jar
chown -R $SERVICE_USER:$SERVICE_USER /opt/$SERVICE_NAME
#------------------------------
# RUN
#------------------------------
echo "RUNNING APP..."
LOG_FILE=/opt/$SERVICE_NAME/log/$SERVICE_NAME.log
JAVA_OPTS="$JVM_OPTS \
--spring.profiles.active=$SERVICE_ENVIRONMENT \
--logging.file=$LOG_FILE \
--application.version=$SERVICE_VERSION \
-XX:+PrintGCDateStamps \
-verbose:gc \
-XX:+PrintGCDetails \
-Xloggc:$SERVICE_DIR/gc.log \
-XX:+UseGCLogFileRotation \
-XX:NumberOfGCLogFiles=10 \
-XX:GCLogFileSize=100M \
-XX:+HeapDumpOnOutOfMemoryError \
-XX:HeapDumpPath=$SERVICE_DIR/log/heap-`date +%s`.hprof"
cd /opt/$SERVICE_NAME
su -m $SERVICE_USER -c "java -jar $SERVICE_NAME.jar $JAVA_OPTS > log/console.log"
# Copy the logs to S3
aws s3 sync /opt/$SERVICE_NAME/log s3://io.tchepannou.kribi/log/$SERVICE_NAME --delete
# Shutdown
shutdown -h now
| true
|
8917e8d65d14f8b79f8f9d7e9157b86e3d14e756
|
Shell
|
yosugi/dotfiles
|
/.zshrc
|
UTF-8
| 4,015
| 3.265625
| 3
|
[] |
no_license
|
PROMPT='[%n@%c]$ '
#LANG=C
EDITOR=vim
setopt autopushd
alias gd='dirs -v; echo -n "select number: "; read newdir; cd -"$newdir"'
alias V="bindkey -v"
alias E="bindkey -e"
alias la="ls -a"
alias ll="ls -al"
alias view='vim -R'
# global alias
alias -g @g="| grep"
alias -g @l="| less"
alias -g @x="| xargs"
alias -g @v="| vim -"
bindkey -v # vi key bindings
bindkey '^P' up-line-or-history
bindkey '^N' down-line-or-history
bindkey '^A' beginning-of-line
bindkey '^E' end-of-line
bindkey '^F' forward-char
bindkey '^B' backward-char
bindkey '^D' delete-char-or-list
bindkey '^U' kill-whole-line
bindkey '^K' kill-line
bindkey '^H' backward-delete-char
bindkey '^R' history-incremental-search-backward
bindkey '^Y' yank
bindkey '^J' push-line
bindkey '^Q' push-line
bindkey -a 'q' push-line
bindkey -a '/' history-incremental-search-forward
bindkey -a '?' history-incremental-search-backward
bindkey ' ' magic-space # also do history expansion on space
bindkey '^I' complete-word # complete on tab, leave expansion to _expand
# mkdir ~/.zsh; cd ~/.zsh
# git clone git://github.com/rupa/z.git
. ~/.zsh/z/z.sh
precmd() {
_z --add "$(pwd -P)"
}
autoload -U colors; colors
function rprompt-git-current-branch {
local name st color
if [[ "$PWD" =~ '/\.git(/.*)?$' ]]; then
return
fi
name=$(basename "`git symbolic-ref HEAD 2> /dev/null`")
if [[ -z $name ]]; then
return
fi
st=`git status 2> /dev/null`
if [[ -n `echo "$st" | grep "^nothing to"` ]]; then
color=${fg[green]}
elif [[ -n `echo "$st" | grep "^nothing added"` ]]; then
color=${fg[yellow]}
elif [[ -n `echo "$st" | grep "^# Untracked"` ]]; then
color=${fg_bold[red]}
else
color=${fg[red]}
fi
echo "[%{$color%}$name%{$reset_color%}]"
}
setopt prompt_subst
RPROMPT='`rprompt-git-current-branch`'
zstyle ':completion:*:*:git:*' script ~/.git-completion.sh
# search = find + grep
# http://jampin.blog20.fc2.com/blog-entry-16.html
function search() {
if [ $# -lt 1 ] ; then
print "error: few args!"
return 0
elif [ $# -eq 1 ] ; then
arg_path="."
arg_pattern="*"
arg_word=$1
elif [ $# -eq 2 ] ; then
arg_path="."
arg_pattern=$1
arg_word=$2
else
arg_path=$1
arg_pattern=$2
arg_word=$3
fi
find $arg_path -name $arg_pattern -type f -print | xargs grep -n $arg_word
}
function isearch() {
if [ $# -lt 1 ] ; then
print "error: few args!"
return 0
elif [ $# -eq 1 ] ; then
arg_path="."
arg_pattern="*"
arg_word=$1
elif [ $# -eq 2 ] ; then
arg_path="."
arg_pattern=$1
arg_word=$2
else
arg_path=$1
arg_pattern=$2
arg_word=$3
fi
find $arg_path -name $arg_pattern -print | xargs grep -in $arg_word
}
# google search by w3m
function google() {
local str opt
if [ $ != 0 ]; then
for i in $*; do
str="$str+$i"
done
str=`echo $str | sed 's/^\+//'`
opt='search?num=50&hl=ja&lr=lang_ja'
opt="${opt}&q=${str}"
fi
w3m http://www.google.co.jp/$opt
}
# alc search by w3m
function alc() {
if [ $ != 0 ]; then
w3m "http://eow.alc.co.jp/$*/UTF-8/?ref=sa"
else
w3m "http://www.alc.co.jp/"
fi
}
stty stop undef
stty start undef
#stty lnext undef
#stty kill undef
#stty intr undef
hash -d tmp=/tmp
hash -d log=/var/log
# http://qiita.com/hamaco/items/4eb19da6cf216104adf0
HARDCOPYFILE=~/tmp/tmux-hardcopy
touch $HARDCOPYFILE
dabbrev-complete () {
local reply lines=80
tmux capture-pane && tmux save-buffer -b 0 $HARDCOPYFILE && tmux delete-buffer -b 0
reply=($(sed '/^$/d' $HARDCOPYFILE | sed '$ d' | tail -$lines))
compadd -Q - "${reply[@]%[*/=@|]}"
}
zle -C dabbrev-complete menu-complete dabbrev-complete
bindkey '^o' dabbrev-complete
bindkey '^o^_' reverse-menu-complete
zstyle ':completion:*:*:git:*' script ~/.zsh/completion/git-completion.bash
fpath=(~/.zsh/completion $fpath)
autoload -U compinit
compinit -u
| true
|
0166a352cc0f1ca7d6166538c024dacb4c15ad97
|
Shell
|
dashu42332/Alcaltel-Pixi4-7-wifi-8063
|
/device/mediatek/build/build/tools/addMsgtoScater.sh
|
UTF-8
| 1,033
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
project=$1
scater=$2
buildprop="out/target/product/${project}/system/build.prop"
scaterTxt="out/target/product/${project}/${scater}"
if [ ! -f $buildprop ]; then
echo "$buildprop not exist"
exit 1
fi
if [ ! -f $scaterTxt ]; then
echo "$scaterTxt not exist"
exit 1
fi
buildnumber=$(awk -F= '/ro.build.display.id/{print $2}' $buildprop)
language=$(awk -F= '/ro.product.locale.language/{print $2}' $buildprop)
oem=$(awk -F= '/ro.product.manufacturer/{print $2}' $buildprop)
operator=$(awk -F= '/ro.operator.optr/{print $2}' $buildprop)
product=$(awk -F= '/ro.custom.product.device/{print $2}' $buildprop)
publishtime=$(date "+%Y%m%d%H%M%S")
versionname=$(awk -F= '/ro.build.display.id/{print $2}' $buildprop)
COMM_REF=$3
echo "[PRO_INFO]" >> $scaterTxt
echo "TYPE=TCL" >> $scaterTxt
echo "OEM=$oem" >> $scaterTxt
echo "Product=$product" >> $scaterTxt
#echo "Language=$language" >> $scaterTxt
#echo "Opreator=$operator" >> $scaterTxt
#echo "Version=$buildnumber" >> $scaterTxt
echo "COMM_REF=$3" >> $scaterTxt
| true
|
1605f0b58dccf6528ce7c13f2e1b7c400fce3c9b
|
Shell
|
Dnld/devtools
|
/old/bash_profile
|
UTF-8
| 5,326
| 2.90625
| 3
|
[] |
no_license
|
################################################################################
# djs bash profile
# updated February 28, 2016
# https://github.com/Dnld/devtools/
################################################################################
# set paths
export PATH=/usr/local/bin:/usr/local/sbin:/usr/local/msql/bin:$PATH
# prompt
export PS1="\[\033[2m\]\W\$(parse_git_branch) $\[\033[0m\] "
# default editor
export EDITOR=/usr/bin/emacs
# environment shortcuts
alias bp="atom ~/.bash_profile"
alias c="clear"
alias c-="cd -"
alias cd..="cd ../"
alias ..="cd ../"
alias ...="cd ../../"
alias cp="cp -iv"
alias e="emacs"
alias f="open -a Finder ./"
alias ll="ls -lahG"
alias mk="mkdir -v"
alias mv="mv -iv"
alias oa="open -a"
alias pc="pbcopy"
alias pp="pbpaste"
alias rbp="source ~/.bash_profile"
alias rm="rm -iv"
alias t="touch"
alias v="vim"
function ta() {
touch "$1"
atom "$1"
}
function te() {
touch "$1"
emacs "$1"
}
function ts() {
touch "$1"
subl "$1"
}
function cdf() {
currFolderPath=$( /usr/bin/osascript <<EOT
tell application "Finder"
try
set currFolder to (folder of the front window as alias)
on error
set currFolder to (path to desktop folder as alias)
end try
POSIX path of currFolder
end tell
EOT
)
cd "$currFolderPath"
}
function mc() {
mkdir "$1"
cd "$1"
}
function pwdc() {
pwd | pbcopy
pwd
}
# search shortcuts
function agl() {
ag "$1" -l
}
function ff() {
find . -name "$1"
}
function ffa() {
find . -name "$1"'*'
}
function ffe() {
find . -name '*'"$1"
}
# application shortcuts
alias act="open -a activity\ monitor"
alias cal="open -a calendar"
alias con="open -a contacts"
alias das="open dash://"
alias itu="open -a itunes"
alias mai="open -a mail"
alias mes="open -a messages"
alias pho="open -a photos"
alias rem="open -a reminders"
alias sim="open -a simplenote"
alias sla="open -a slack"
alias twe="open -a tweetbot"
# navigational shortcuts
alias ~="cd ~"
alias desk="cd ~/Desktop"
alias dev="cd ~/development"
alias doc="cd ~/Documents"
alias down="cd ~/Downloads"
# temporary shortcuts
alias cs="cd /Users/djs/development/hackreactor/career"
alias pd="cd /Users/djs/development/predictster"
# Atom shortcuts
alias a="atom"
alias a.="atom ."
# Chrome shortcuts
alias chr="open -a google\ chrome"
function cg() {
chr "http://www.google.com/search?q= $1"
}
function hang() {
open -a google\ chrome http://hangouts.google.com/start
/usr/bin/osascript <<EOT
tell application "System Events"
delay 3.5
keystroke "l" using command down
keystroke "c" using command down
end tell
EOT
}
# Git shortcuts
alias ga="git add"
alias gaa="git add -A"
alias gad="git add ."
alias gb="git branch"
alias gbco="git checkout -b"
alias gc="git commit"
alias gcam="git commit -am"
alias gcamen="git commit --amend"
alias gcf="git config"
alias gcm="git commit -m"
alias gcl="git clone"
alias gco="git checkout"
alias gd="git diff"
alias gdt="open -a github\ desktop"
alias gf="git fetch"
alias gh="git help"
alias gi="git init"
alias gk="gitk"
alias gl="git log"
alias gll="git log --pretty=format:'%h %ad | %s%d [%an]' --graph --date=short"
alias glll="gll --all"
alias gmc="git commit -m"
alias gmg="git merge"
alias gmv="git mv"
alias gpf="git push -f"
alias gpfo="git push -f origin"
alias gpfom="git push -f origin master"
alias gpl="git pull"
alias gpo="git push origin"
alias gpod="git push origin develop"
alias gpom="git push origin master"
alias gpu="git push"
alias gpuf="git push -f"
alias gpro="git pull --rebase origin"
alias gpru="git pull --rebase upstream"
alias gpum="git pull upstream master"
alias gr="git remote"
alias grao="git remote add origin"
alias grb="git rebase"
alias grau="git remote add upstream"
alias grm="git rm"
alias grs="git reset"
alias grv="git revert"
alias gs="git status -s"
alias gss="git status"
alias gt="git tag"
alias gx="gitx"
alias get="git"
alias got="git"
# MongoDB shortcuts
alias mdb="sudo /usr/local/Cellar/mongodb/3.0.6/bin/mongod"
alias mo="mongo"
#MySQL shortcuts
alias ms='mysql'
alias msa='mysqladmin'
alias mss='mysql.server'
alias msur="mysql -u root"
# Networking shortcuts
alias ports="sudo lsof -PiTCP -sTCP:LISTEN"
# Node shortcuts
alias no="node"
alias nd="node debug"
alias ndb="node --debug-brk"
alias nde="nodemon"
alias ni="node-inspector"
# CoffeeScript shortcuts
alias co="coffee"
alias coc="coffee -c"
alias cocomp="coffee --output compiled --map --watch --compile ./"
alias coo="coffee -c -o"
# Python shortcuts
alias pss="python -m SimpleHTTPServer"
alias py="python"
# Ruby / Rails shortcuts
alias rc="irb"
alias rg="rails generate"
alias rs="rails server"
alias ru="ruby"
# Safari shortcuts
alias saf="open -a safari"
function sg() {
saf "http://www.google.com/search?q= $1"
}
# Sublime shortcuts
alias s="subl"
alias s.="subl ."
alias s2="sublime"
alias s2.="sublime ."
# Xcode shortcuts
alias xco="open -a xcode"
# Git branch in prompt
function parse_git_branch() {
git branch 2> /dev/null | sed -e "/^[^*]/d" -e "s/* \(.*\)/ (\1)/"
}
# Node path
export NODE_PATH="/usr/local/lib/node_modules:$NODE_PATH"
# load RVM into a shell session *as a function*
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
################################################################################
| true
|
2fc517239a709ceca62cc384a78ae497754671c9
|
Shell
|
AkihiroSuda/runrootless
|
/proot/PRoot/tests/test-11111111.sh
|
UTF-8
| 1,465
| 3.71875
| 4
|
[
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -z `which cat` ] || [ -z `which readlink` ] || [ -z `which mcookie` ] || [ -z `which touch` ] || [ -z `which mkdir` ] || [ -z `which ln` ] || [ -z `which grep` ] || [ -z `which rm` ]; then
exit 125;
fi
set +e
x1="r1 d1 rl1 dl1" # root of the test tree.
x2="r2 d2 rl2 dl2" # subtree of d1/dl1, every components exist.
x3="r3 d3 rl3 dl3" # subtree of d1/dl1, no component exists.
x4="/ /. /.." # terminators.
generate () {
output=${1}
make_tests ()
{
for c in ${x4} ""; do
x="${1}${c}"
$(cd ${x} 2>/dev/null); cd_result=$?
cat ${x} 2>/dev/null; cat_result=$?
readlink ${x} 2>/dev/null 1>&2; readlink_result=$?
echo "${x}, $cd_result, $cat_result, $readlink_result" >> $output
done
}
echo "path, chdir, cat, readlink" > $output
for a in ${x1}; do
for b in ${x2}; do
make_tests "${a}/${b}"
done
for b in ${x3}; do
make_tests "${a}/${b}"
done
done
}
if [ -z ${PROOT_STAGE2} ]; then
create_components ()
{
touch r${1} 2>/dev/null
mkdir -p d${1} 2>/dev/null
ln -fs r${1} rl${1} 2>/dev/null
ln -fs d${1} dl${1} 2>/dev/null
}
create_components 1
$(cd d1; create_components 2)
REF=/tmp/`mcookie`
mkdir -p /tmp
generate $REF
env PROOT_STAGE2=$REF ${PROOT} -w ${PWD} sh ./$0
exit $?
fi
TMP=/tmp/`mcookie`
mkdir -p /tmp
generate $TMP
set -e
cmp $TMP $PROOT_STAGE2
rm $TMP $PROOT_STAGE2
| true
|
715cc43c1ac2c1bf6f16ba5b9d6a62c6216094a7
|
Shell
|
MoldovanAlexandruVasile/Linux_OS
|
/44b.sh
|
UTF-8
| 1,702
| 4.40625
| 4
|
[] |
no_license
|
#!/bin/sh
#44b) Write a shell script that continously reads user names from keyboard
#and for each of them outputs its full name, the number of processes
#and the curent running command.
#Until the name is not "exit" the program will run
while true; do
#We read the name
read name
#Check if the name is equal to exit, if it is, the program will stop
if [ $name = "exit" ]; then
exit 0
fi
#We test if the user name does exist in the list
#If it does, then we go into the if
#If not, we go on else and prin "User does not exist"
if [ `finger | grep -c "^$name\>"` -gt 0 ]; then
#We print the full name of the user
echo Name: `finger | grep "$name" | sed "s/\ \ \ */_/g" | cut -d"_" -f2`
#Print out how many processes has that user
aux=`ps -u $name | wc -l`
#The first line tells what each column represents, and we decrement with
#one because the number of the lines also representes the number
#of the processes
aux=`expr $aux - 1`
echo Number of processes: $aux
# "a" will print the number of lines of the command w -u $name
#w -u $name will return on the first line some detalils about the user
#On the second line it will be wath each column represents
#On the las column we will have the current running command
a=`w -u $name | wc -l`
lines=`expr $a - 2`
#In cmd we will have the current command
cmd=`w -u $name | tail -n $lines | cur -d"s" -f4`
#We test if there is a current running command
#If it does, then we will print the command
#Otherwise we will print "No running command !"
if [ cmd != "" ]; then
echo Current running command: $cmd
else echo No running command !
fi
else echo User does not exist !
fi
done
| true
|
e62673c8fbfa3306588968297ffc682dde864d41
|
Shell
|
RobotsAndPencils/poc-box-callcenter-skill
|
/BoxTranscriptionLamda/script/deploy.sh
|
UTF-8
| 1,698
| 3.640625
| 4
|
[] |
no_license
|
existingFunction=
updateConfig=
if [ "$1" == "update" ]; then
updateConfig=true
elif [ "$1" == "" ]; then
existingFunction=true
fi
# utility
function abs_path {
echo $(cd $1;pwd)
}
funcName=BoxTranscriptionTest
zipName=BoxTranscriptionLambda.zip
zipPath=../bin
region="us-east-1"
sourcePath=../bin/Debug/netcoreapp2.0/publish
role="arn:aws:iam::901211063728:role/bs-log-role"
handler="BoxTranscriptionLamda::BoxTranscriptionLamda.TranscribeFunction::FunctionHandler"
timeout="300"
if [ -e $zipPath/$zipName ]; then
echo "killing old file"
rm $zipPath/$zipName
else
echo "no old file"
fi
# get destination absolute path
absZipDest=$(abs_path $zipPath)
# save current location
currentPath=$(pwd)
cd $sourcePath
# package publish folder
zip -r $absZipDest/$zipName *
# restore old location
cd $currentPath
awsCreateFunction () {
echo "#### Creating New Function ######"
aws lambda create-function \
--region $region \
--function-name $funcName \
--zip-file fileb://$zipPath/$zipName \
--role $role \
--handler "${handler}" \
--runtime dotnetcore2.0 \
--timeout "${timeout}" \
--description "transcription"
}
awsUpdateFunctionConfig () {
echo "#### Updating Function Configuration ######"
aws lambda update-function-configuration \
--function-name $funcName \
--handler "${handler}" \
--timeout "${timeout}"
}
awsUpdateFunction () {
echo "#### Updating Function ######"
aws lambda update-function-code \
--function-name $funcName \
--zip-file fileb://$zipPath/$zipName \
--publish
}
if [ -n "$updateConfig" ]; then
awsUpdateFunctionConfig
elif [ -n "$existingFunction" ]; then
awsUpdateFunction
else
awsCreateFunction
fi
| true
|
bad3eb232a6abb57975177fc84724869b8d72d13
|
Shell
|
Jazznight/elasticsearch_toolkit
|
/dump_elasticsearch.sh
|
UTF-8
| 947
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
if [ -z $1 ];
then
echo ""
echo " Usage:"
echo " `basename $0` \$INDEX_NAME"
echo ""
echo ""
echo ""
exit
fi
MY_DIR=`pwd`
TAR_FILE=dump_elasticsearch_`date +'%Y%m%d%H%M%S'`.tar
cd /tmp
rm -rf tmp_elasticsearch_snapshot > /dev/null 2>&1
curl -XPUT 'http://localhost:9200/_snapshot/tmp_elasticsearch_snapshot' -d '{
"type": "fs",
"settings": {
"location": "/tmp/tmp_elasticsearch_snapshot",
"compress": true
}
}'
curl -XPUT "localhost:9200/_snapshot/tmp_elasticsearch_snapshot/snapshot_1" -d "{
\"indices\": \"$1\",
\"ignore_unavailable\": \"true\",
\"include_global_state\": false
}"
tar cvf "$TAR_FILE" tmp_elasticsearch_snapshot/
gzip $TAR_FILE
mv $TAR_FILE.gz $MY_DIR/$TAR_FILE.gz
if [ $? -eq 0 ];
then
echo ""
echo " Done! dumped indexes is locate at: $MY_DIR/$TAR_FILE.gz"
echo ""
else
echo ""
echo " Failed!"
echo ""
fi
| true
|
17ccf665e55734253694d12ee83500d4ee396d91
|
Shell
|
martamoragues/BigData
|
/Launch/Sort_launch_E4_MeDebesUnaCena.sh
|
UTF-8
| 3,715
| 3.1875
| 3
|
[] |
no_license
|
# Cambiamos el nombre del job
#$ -N testhadoop
# Indicamos el shell a usar:
#$ -S /bin/bash
# Indicamos las versiones a usar de hadoop (imprescindible):
#$ -v JAVA_HOME=/usr,HADOOP_HOME=/Soft/hadoop/0.20.203.0,HADOOP_CONF=/scratch/nas/2/martam/conf
# Indicamos que nos envie un correo cuando empieze el trabajo y cuando acabe...
#$ -m bea
# ... a esta dirección de correo
#$ -M martam@ac.upc.edu
export CONF=/scratch/nas/2/martam/conf
# swith on/off compression: 0-off, 1-on
export COMPRESS_GLOBAL=0
export COMPRESS_CODEC_GLOBAL=org.apache.hadoop.io.compress.DefaultCodec
### Definimos unos directorios de trabajo dentro del HDFS:
INPUT=$JOB_NAME"_"$JOB_ID"_IP"
OUTPUT=$JOB_NAME"_"$JOB_ID"_OP"
# CONF
# compress
COMPRESS=$COMPRESS_GLOBAL
COMPRESS_CODEC=$COMPRESS_CODEC_GLOBAL
# for prepare (per node) - 24G/node
#DATASIZE=24000000000
DATASIZE=4000000000
NUM_MAPS=16
# for running (in total)
NUM_REDS=16
# FI CONF
LOCAL_HD=/users/scratch/$USER/$JOB_NAME"_"$JOB_ID
echo "========== preparing sort data=========="
# path check
$HADOOP_HOME/bin/hadoop --config $CONF dfs -rmr $INPUT
# generate data
${HADOOP_HOME}/bin/hadoop --config $CONF fs -copyFromLocal "/scratch/nas/2/$USER/input_sort" $INPUT
# compress check
if [ $COMPRESS -eq 1 ]; then
COMPRESS_OPT="-D mapred.output.compress=true \
-D mapred.output.compression.codec=$COMPRESS_CODEC \
-D mapred.output.compression.type=BLOCK "
else
COMPRESS_OPT="-D mapred.output.compress=false"
fi
$HADOOP_HOME/bin/hadoop --config $CONF dfs -rmr $INPUT/_*
# copy generated input data
${HADOOP_HOME}/bin/hadoop --config $CONF fs -get $INPUT $LOCAL_HD/INPUT
## RUN
echo "========== running sort bench =========="
# compress
if [ $COMPRESS -eq 1 ]
then
COMPRESS_OPT="-D mapred.output.compress=true \
-D mapred.output.compression.type=BLOCK \
-D mapred.output.compression.codec=$COMPRESS_CODEC"
else
COMPRESS_OPT="-D mapred.output.compress=false"
fi
#path check
$HADOOP_HOME/bin/hadoop --config $CONF dfs -rmr $OUTPUT
# pre-running
SIZE=`$HADOOP_HOME/bin/hadoop --config $CONF fs -dus $INPUT | awk '{ print $2 }'`
# run bench
declare -a TRADUCCIO_PERCENT=( [100]=1 [50]=0.5 [25]=0.25 [10]=0.10 [5]=0.05 [1]=0.01 )
EXECUTION="4"
for ITERATION in {1..5}
do
for PERCENT in 100 50 25 10 5 1
do
CURRENT_OUTPUT="E${EXECUTION}/IT_$ITERATION/E${EXECUTION}_$(printf %03d $PERCENT)"
echo "Executing E${EXECUTION} IT_$ITERATION P$PERCENT"
$HADOOP_HOME/bin/hadoop --config $CONF jar /scratch/nas/2/$USER/wc.jar org.apache.hadoop.examples.Sort_E${EXECUTION} \
$COMPRESS_OPT \
-outKey org.apache.hadoop.io.Text \
-outValue org.apache.hadoop.io.Text \
-r ${NUM_REDS} \
$INPUT $OUTPUT/$CURRENT_OUTPUT ${TRADUCCIO_PERCENT[$PERCENT]}
echo "Copying all data of E${EXECUTION} IT_$ITERATION P$PERCENT to local hd"
${HADOOP_HOME}/bin/hadoop --config $CONF fs -getmerge $OUTPUT/$CURRENT_OUTPUT $LOCAL_HD/$CURRENT_OUTPUT.txt
echo "Removing all data of E${EXECUTION} IT_$ITERATION P$PERCENT from HDFS"
${HADOOP_HOME}/bin/hadoop --config $CONF fs -rmr $OUTPUT/$CURRENT_OUTPUT
done
done
# post-running
RESULT=/scratch/nas/2/$USER/$JOB_NAME"_"$JOB_ID
mkdir -p $RESULT
# Descarrega tota la web
wget -q -r -k -p -nH --adjust-extension --exclude-directories=/logs/ -l 0 -P $RESULT/links/ http://localhost:50030
# //BORREM LES DADES DEL HDFS
${HADOOP_HOME}/bin/hadoop fs -rmr $INPUT
${HADOOP_HOME}/bin/hadoop fs -rmr $OUTPUT
curl -s \
-F "token=a4fSa7UW8vmxijUg6udU1cwUtM3tB7" \
-F "user=OO4JgmIzwWBU44nigYskODlS8QUAs7" \
-F "device=iPhone5" \
-F "title=Hadoop" \
-F "message=Acabat: $JOB_ID :)" \
https://api.pushover.net/1/messages.json
| true
|
a3a92e354baf4c556729b24b4e6e3a582aef3c47
|
Shell
|
ken-smj/dotfiles
|
/.emacs.d/elisp/twittering-mode/update-cert.sh
|
UTF-8
| 1,026
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
START_DELIMITER=";; #BEGIN-CERTIFICATE"
END_DELIMITER=";; #END-CERTIFICATE"
BUNDLE_FILE=`mktemp`
EMBEDDED_CERTS=`mktemp`
curl https://curl.haxx.se/ca/cacert.pem > ${BUNDLE_FILE}
(grep --before-context=1 '^=' ${BUNDLE_FILE} | sed -ne '/^[^-=]/p' \
| egrep -i '(verisign|geotrust global ca$|DigiCert High Assurance EV Root CA)' \
| while read CERT ; do
echo ";; ${CERT}";
NUM=`grep -c "^${CERT}\$" ${BUNDLE_FILE}`;
sed -ne '/^'"${CERT}"'$/,/^$/p' ${BUNDLE_FILE} \
| (for n in `seq ${NUM}` ; do
openssl x509 -issuer -subject -serial -fingerprint -dates \
| sed -e 's/^/;; /' \
| sed -e '/^;; -----/,/^;; -----/s/^;; //' \
| sed -e '/^-----BEGIN/s/^/"/' -e '/^-----END/s/$/\n"/';
done)
done ) > ${EMBEDDED_CERTS}
sed -i.bak -ne '/^'"${START_DELIMITER}"'$/{
a '"${START_DELIMITER}"'
r '"${EMBEDDED_CERTS}"'
a '"${END_DELIMITER}"'
}
/^'"${START_DELIMITER}"'$/,/^'"${END_DELIMITER}"'$/!p' twittering-mode.el
rm ${EMBEDDED_CERTS} ${BUNDLE_FILE}
| true
|
9e88718a30d1e740fe87373403fda49bcb603fdb
|
Shell
|
cliffchristianson/dev-docker
|
/.docker/backend/init
|
UTF-8
| 1,483
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Install Composer dependencies
composer install -d "/var/www/backend"
echo "Initialize Script in Container"
echo "Project Name = "${COMPOSE_PROJECT_NAME}
echo "Front End Domain = "${FRONT_END_DOMAIN}
echo "Back End Domain = "${BACK_END_DOMAIN}
echo "Database Name = "$DATABASE_NAME
echo "Database User = "${DATABASE_USER}
echo "Database Password = "${DATABASE_PASSWORD}
# Create the laravel project
composer create-project --prefer-dist laravel/laravel tmp "8.*"
# Wait until the project is created
wait
sh -c "mv -n tmp/.* ./ && mv tmp/* ./ && rm -Rf tmp"
# Create the backend .env file
cat > "/var/www/backend/.env" << EOF
APP_NAME=$COMPOSE_PROJECT_NAME
APP_ENV=local
APP_KEY=
APP_DEBUG=true
APP_URL=http://backend.dev.test
LOG_CHANNEL=single
DB_CONNECTION=mysql
DB_HOST=mysql
DB_PORT=3306
DB_DATABASE=$DATABASE_NAME
DB_USERNAME=$DATABASE_USER
DB_PASSWORD=$DATABASE_PASSWORD
BROADCAST_DRIVER=log
CACHE_DRIVER=file
QUEUE_CONNECTION=sync
SESSION_DRIVER=file
EOF
# Generate application key
php "/var/www/backend/artisan" key:generate --ansi
# Make sure the MySQL database is available
echo 'Waiting for MySQL to be available'
count=1
while [ $count -le 10 ] && ! mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h mysql -P3306 -e 'exit' ; do
sleep 5
((count++))
done
if [ "$count" -ge 10 ]; then
echo >&2 'error: failed to connect to MySQL after 10 attempts'
exit 1
fi
echo 'MySQL connection successful!'
# Database
php "/var/www/backend/artisan" migrate
| true
|
5f78f35e25084989772102b7c59d677fbd84aa51
|
Shell
|
JonasRSV/polinfo
|
/build
|
UTF-8
| 1,860
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
fg_black="$(tput setaf 0)"
fg_red="$(tput setaf 1)"
fg_green="$(tput setaf 2)"
fg_yellow="$(tput setaf 3)"
fg_blue="$(tput setaf 4)"
fg_magenta="$(tput setaf 5)"
fg_cyan="$(tput setaf 6)"
fg_white="$(tput setaf 7)"
reset="$(tput sgr0)"
if ! command -v cargo
then
echo "${fg_red}cargo not found Please install rust${fg_reset}"
echo "Installation instructions can be found here:"
echo " https://www.rust-lang.org/tools/install"
exit 1
fi
if ! command -v docker
then
echo "${fg_red}Docker not found Please install it${fg_reset}"
echo "Installation instructions can be found here:"
echo " https://docs.docker.com/engine/install/"
exit 1
fi
database () {
echo "${fg_magenta}Starting database${fg_reset}"
# Every time the container is launched it changes the permissions
# on the data files so only root can rw. This makes docker crash the
# next time around since it is run in user-space on this machine.
# A solution is to set the permissions each time before launching.
sudo chmod -R 777 data
docker build --no-cache -t polidb:latest -f Dockerfile/Dockerfile.postgres .
docker run \
-e "POSTGRES_USER=polidb" \
-e "POSTGRES_PASSWORD=develop" \
-p 5432:5432 \
-v $PWD/data:/var/lib/postgresql/data \
polidb:latest
echo "${fg_green}Success!${fg_reset}"
}
print_help() {
echo ""
echo ""
echo "${fg_green} ******** Poli build script ******** ${fg_black}"
echo " "
echo "${fg_white} options:"
echo "-h, --help show brief help"
echo "${fg_magenta}-db --database${fg_white}"
exit 0
}
#while test $# -gt 0; do
case "$1" in
-h|--help)
print_help
break
;;
--database)
database
exit 0
;;
-db)
database
exit 0
;;
*)
print_help
break
;;
esac
#done
print_help
| true
|
8d3c3f051721b608156b3b9f3fb544c7af9ced0a
|
Shell
|
cjhillbrand/websocket-sandbox
|
/resources/FastFixSimpleLab.sh
|
UTF-8
| 4,683
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# This script assumes that the IAM Role for API Gateway has already been made
function removeQuotes() {
retval=$1
retval=${retval#\"}
retval=${retval%\"}
echo "$retval"
}
function createWebSocket() {
envName=$1
region=$2
accountId=$3
# If this API Role does not exist than we should abandoned the script right away
getApiRole=$(echo aws "iam list-roles --query 'Roles[?contains(RoleName,\`"$envName"WebSocketAPIRole\`)].Arn|[0]'")
apiRoleArn=$(removeQuotes $( eval $getApiRole ))
if [ -z "$apiRoleArn" ]; then
echo "Make sure that you have created ${envName}WebSocketAPIRole before running this script."
echo "The instructions to complete this task is located in Task 2; Step 2"
exit 1
fi
connectCall=$(echo aws lambda get-function --function-name="$envName"Connect --query 'Configuration.FunctionArn' --output text)
connectArn=$(removeQuotes $( eval $connectCall ))
disconnectCall=$(echo aws lambda get-function --function-name="$envName"Disconnect --query 'Configuration.FunctionArn' --output text)
disconnectArn=$(removeQuotes $( eval $disconnectCall ))
sendMessageCall=$(echo aws lambda get-function --function-name="$envName"SendMessage --query 'Configuration.FunctionArn' --output text)
sendMessageArn=$(removeQuotes $( eval $sendMessageCall ))
if [ -z "$connectArn" ] || [ -z "$disconnectArn" ] || [ -z "$sendMessageArn" ]; then
echo "One or more of your lambda functions are not deployed"
echo "Please run `source deployLambdas.sh <envName>` before running this script"
exit 1
fi
# Now we create the Web Socket now confident all preliminary resources have been created.
websocketCreateCommand=$(echo aws apigatewayv2 --region "$region" create-api --name "$envName"Chatroom-WebSocket --protocol-type WEBSOCKET --route-selection-expression '\$request.body.action' --query ApiId --output text)
websocketApiId=$(removeQuotes $( eval $websocketCreateCommand ))
connectIntegration=$(aws apigatewayv2 create-integration --api-id $websocketApiId --integration-type AWS_PROXY --integration-method POST\
--integration-uri arn:aws:apigateway:"$region":lambda:path/2015-03-31/functions/${connectArn}/invocations\
--query IntegrationId --output text --credentials-arn "$apiRoleArn")
connectId=$(aws apigatewayv2 --region "$region" create-route --api-id "$websocketApiId"\
--route-key \$connect --output text --query RouteId --target integrations/"$connectIntegration")
disconnectIntegration=$(aws apigatewayv2 create-integration --api-id $websocketApiId --integration-type AWS_PROXY --integration-method POST\
--integration-uri arn:aws:apigateway:"$region":lambda:path/2015-03-31/functions/${disconnectArn}/invocations\
--query IntegrationId --output text --credentials-arn "$apiRoleArn")
disconnectId=$(aws apigatewayv2 --region "$region" create-route --api-id "$websocketApiId"\
--route-key \$disconnect --output text --query RouteId --target integrations/"$disconnectIntegration")
sendMessageIntegration=$(aws apigatewayv2 create-integration --api-id $websocketApiId --integration-type AWS_PROXY --integration-method POST\
--integration-uri arn:aws:apigateway:"$region":lambda:path/2015-03-31/functions/${sendMessageArn}/invocations\
--query IntegrationId --output text --credentials-arn "$apiRoleArn")
sendMessageId=$(aws apigatewayv2 --region "$region" create-route --api-id "$websocketApiId"\
--route-key dispatch --output text --query RouteId --target integrations/"$sendMessageIntegration")
deploymentId=$(aws apigatewayv2 --region "$region" create-deployment --api-id "$websocketApiId" --query DeploymentId --output text)
stageId=$(aws apigatewayv2 --region "$region" create-stage --api-id "$websocketApiId" --deployment-id "$deploymentId" --stage-name production)
echo ${websocketApiId}
}
function getRegion() {
region=$(aws configure get region)
echo $region
}
if [ "$1" == "" ]; then
echo
echo "**ERROR**"
echo At least the environment name must be provided
echo
echo Usage:
echo "fixwebsocket <envName>"
echo
echo example: source FastFixSimpleLab.sh testenv
else
envName=$(echo $1 | tr 'a-z' 'A-Z')
region=$(getRegion)
accountId=$(aws sts get-caller-identity --output text --query 'Account')
apiId=$( createWebSocket $envName $region $accountId )
URL="wss://${apiId}.execute-api.${region}.amazonaws.com/production"
ARN="arn:aws:execute-api:"$region":"$accountId":"$apiId"/*"
echo "WebSocket URL is: ${URL}"
echo "WebSocket ARN is ${ARN}"
fi
| true
|
4494827e591c41fcd7de664a21eb86abb51b8a06
|
Shell
|
Aparicio99/scripts
|
/clipcp
|
UTF-8
| 590
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
# Helps "transfering" a file between shells by X selection
COLOR1="\033[1;31m" # RED
COLOR2="\033[1;32m" # GREEN
RESET="\033[0m"
echo -e " > ${COLOR1}Command${RESET} copied to clipboard." \
"Middle-click on the destination and press ${COLOR1}Enter${RESET}."
# This blocks until the selection is pasted anywhere
echo -n "cat | base64 -d | bunzip2 > $1" | xclip -i -loops 1 -verbose 2>/dev/null
echo -e " > ${COLOR2}Content${RESET} copied to clipboard." \
"Middle-click on the destination and press ${COLOR2}Ctrl+D${RESET}."
cat $1 | bzip2 | base64 | xclip -i
| true
|
cf8948a313d1e7beb464d4a6079dba5dfd944c97
|
Shell
|
mzym/thesis
|
/pargres/runinis.sh
|
UTF-8
| 358
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
source ./config.sh
nodelist=$(IFS=,; echo "${nodes[*]:0:n}") # join the node list on comma into a string
echo "Starting message passing subsystem on $nodelist"
nohup mpirun -H $nodelist "$prefix/bin/par_inis_daemon" < /dev/null &> ./inis.log &
#mpirun -H $nodelist "$prefix/bin/par_inis_daemon"
echo "Message passing subsystem started."
| true
|
6f5607120bb4a8d17c27931676cd2d8bec6bdeb9
|
Shell
|
aalin/pinebook-config
|
/bin/set-brightness
|
UTF-8
| 356
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
curr=$(brightnessctl get)
max=$(brightnessctl max)
step=$(echo "$curr $max" | awk '{ printf "%d\n", (1 + (10 - 1) * sin($1 / $2 * 1.57079632)) }')
case $1 in
up)
brightnessctl set +$step%
;;
down)
brightnessctl set $step%-
;;
*)
echo "Adjust brightness according to a ramp"
echo
echo "Usage: $0 [up|down]"
esac
| true
|
0f82c4c7d866777cf87ff431dcd269a1521785d9
|
Shell
|
snoplus/SNOPlusSlowControl
|
/cronjobs/ios/diskUsage.sh
|
UTF-8
| 828
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
# This script checks the current disk usage,
# then writes it to diskUsage.txt. It then
# checks that the usage is not above a
# maximum value (like 70%), and if it is,
# it sends an email.
#TODO: Write this in a python script and add the ability to send an alarm to the
# shifting GUI?
HOMEDIR=/home/slowcontroller
DISKUSELOGLOC=/SNOPlusSlowControl/SNOPlusSlowControl/log/IOSDiskUsage.txt
echo "At `date` the disk usage is" | cat > ${HOMEDIR}${DISKUSELOGLOC}
df | cat >> ${HOMEDIR}${DISKUSELOGLOC}
ALARM=`python ${HOMEDIR}/SNOPlusSlowControl/cronjobs/ios/checkUsage.py ${HOMEDIR}${DISKUSELOGLOC}`
if [ $ALARM == "True" ]
then
echo "IOS `hostname` at storage warning limit"
echo "Sending warning... IOS at storage warning limit"
fi
if [ $ALARM == "False" ]
then
echo "Disk usage under warning limit"
fi
| true
|
553b1d9ffe3516c73c8af752630d07a8ccb7d868
|
Shell
|
jmdarr/scripts
|
/transmission/transmission_killswitch.sh
|
UTF-8
| 1,898
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/env bash
wanted_country='Canada'
svc='transmission-daemon'
function check_for_running_transmission() {
pscount=$(ps aux | grep -i transmission | grep -v grep | grep -v killswitch | wc -l)
rval=1
if [ ${pscount} -gt 0 ]; then rval=0; fi
return ${rval}
}
function start_transmission() {
rval=-1
echo -n 'Starting transmission... '
if check_for_running_transmission; then
echo 'already running.'
rval=1
else
if systemctl start ${svc} >/dev/null 2>&1; then
echo 'started.'
rval=0
else
echo 'failed.'
rval=1
fi
fi
return ${rval}
}
function stop_transmission() {
rval=-1
echo -n 'Stopping transmission... '
if check_for_running_transmission; then
if systemctl stop ${svc} >/dev/null 2>&1; then
echo 'stopped.'
rval=0
else
echo 'failed.'
rval=1
fi
else
echo 'already stopped.'
rval=1
fi
return ${rval}
}
# check for IP via dns resolution
echo -n 'Checking for VPN IP... '
ip=$(dig @resolver1.opendns.com myip.opendns.com +short) || { echo "dig command failed, exiting."; exit 1; }
[ "${ip}x" == "x" ] && { echo "IP lookup returned empty, exiting."; exit 1; }
echo "found: ${ip}"
# locate where our IP is, geographically speaking
echo -n 'Checking for GeoIP location... '
output=$(/bin/geoiplookup "${ip}") || { echo 'geoiplookup command failed, exiting.'; exit 1; }
[ "${output}x" == "x" ] && { echo "geoiplookup returned empty, exiting."; exit 1; }
echo "found: ${output}"
# check to make sure we're resolving from Canadia... for reasons
echo -n "Ensuring we're pinned to '${wanted_country}'... "
# if we're not, then we want to stop transmission.
if [[ "${output}" =~ "Canada" ]]; then
echo "look at that. Fancy. Let's make sure things are moving along."
start_transmission
else
echo "oops, guess not. Let's stop the downloader, I guess."
stop_transmission
fi
| true
|
8196d2a35b06e92a8c6455f710ddcdff523c4579
|
Shell
|
Rtshaw/CrawlerPractice
|
/ruten/script.sh
|
UTF-8
| 331
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
cd /home/ec2-user/CrawlerPractice/ruten
yesterday=$(date -d '-1 day' '+%Y%m%d')
today=$(date '+%Y%m%d')
now=$(date '+%H%M')
if [ -d "./$yesterday" ]
then
rm -r $(date -d '-1 day' '+%Y%m%d')
fi
if [ ! -d "./$today" ]
then
mkdir $(date '+%Y%m%d')
fi
source venv/bin/activate
python fee.py > ./"$today"/"$now".txt
| true
|
3552616356543746463d84b111901548ec7640ff
|
Shell
|
CodeAsm/ghidra-xbe
|
/build.sh
|
UTF-8
| 2,161
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash -e
pushd /tmp
echo "[*] Downloading files..."
cat <<EOF > urls.txt
https://corretto.aws/downloads/latest/amazon-corretto-11-x64-linux-jdk.tar.gz
https://services.gradle.org/distributions/gradle-5.0-bin.zip
https://ghidra-sre.org/ghidra_9.1.2_PUBLIC_20200212.zip
https://github.com/mborgerson/XbSymbolDatabase/releases/download/cli-tool-0.1/XbSymbolDatabaseTool.zip
EOF
cat urls.txt | xargs -n 1 -P 10 wget --no-verbose
echo "[*] Extracting JDK..."
mkdir -p jdk
tar --strip-components=1 -C jdk --extract -f amazon-corretto-11-x64-linux-jdk.tar.gz
export JAVA_HOME=$PWD/jdk
export PATH=$JAVA_HOME/bin:$PATH
echo "[*] Extracting Gradle..."
unzip -q gradle-5.0-bin.zip
export PATH=$PWD/gradle-5.0/bin:$PATH
echo "[*] Extracting Ghidra..."
unzip -q ghidra_9.1.2_PUBLIC_20200212.zip
export GHIDRA_INSTALL_DIR=$PWD/ghidra_9.1.2_PUBLIC
echo "[*] Extracting XbSymbolDatabase..."
unzip -q XbSymbolDatabaseTool.zip
export XBSYMBOLDATABASE=$PWD/XbSymbolDatabaseTool
popd # Back to source root
# Copy XbSymbolDatabase into this source tree for redist
cp $XBSYMBOLDATABASE/XbSymbolDatabaseTool.linux64.Release os/linux64/XbSymbolDatabaseTool
cp $XBSYMBOLDATABASE/LICENSE os/linux64/XbSymbolDatabaseTool.LICENSE
cp $XBSYMBOLDATABASE/XbSymbolDatabaseTool.macos64.Release os/osx64/XbSymbolDatabaseTool
cp $XBSYMBOLDATABASE/LICENSE os/osx64/XbSymbolDatabaseTool.LICENSE
cp $XBSYMBOLDATABASE/XbSymbolDatabaseTool.win64.Release.exe os/win64/XbSymbolDatabaseTool
cp $XBSYMBOLDATABASE/LICENSE os/win64/XbSymbolDatabaseTool.LICENSE
echo "[*] Building..."
gradle -b build.gradle
if [[ "$RUNTESTS" == "1" ]]; then
echo "[*] Installing Extension..."
cp ./dist/*ghidra-xbe.zip $GHIDRA_INSTALL_DIR/Ghidra/Extensions
pushd $GHIDRA_INSTALL_DIR/Ghidra/Extensions
unzip *ghidra-xbe.zip
popd
echo "[*] Running tests..."
pushd tests
$GHIDRA_INSTALL_DIR/support/analyzeHeadless . test_project -import xbefiles/triangle.xbe -postScript ./test_load.py
if [[ -e TEST_PASS ]]; then
echo "[+] Test PASSED"
else
echo "[-] Test FAILED"
exit 1
fi
fi
echo "[*] Done!"
| true
|
527f58696f5546c8d4609d8bcd39b9b33efeed29
|
Shell
|
jonaselan/setup-my-pc
|
/bin/helpers.sh
|
UTF-8
| 1,676
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
update_dotfiles() {
# clone dotifles
git clone https://github.com/jonaselan/dotfiles.git ~/.smpc
# Setup folders
mkdir ~/.config/terminator/
mkdir ~/.config/sxhkd/
mkdir ~/.config/fusuma/
# src:dest
link_files=(
"$HOME/.smpc/.zshrc:$HOME/.zshrc"
"$HOME/.smpc/.vimrc:$HOME/.vimrc"
"$HOME/.smpc/.gitconfig:$HOME/.gitconfig"
"$HOME/.smpc/terminator:$HOME/.config/terminator/config"
"$HOME/.smpc/sxhkdrc:$HOME/.config/sxhkd/sxhkdrc"
"$HOME/.smpc/fusuma.yml:$HOME/.config/fusuma/config.yml"
)
# Link files
info 'Linking dotfiles'
for lf in "${link_files[@]}"
do
src="$(echo -n $lf | cut -d':' -f1)"
dest="$(echo -n $lf | cut -d':' -f2)"
ln -sTf $src $dest
# explicit simbolyc link
stat -c '%N' "$(echo -n $lf | cut -d':' -f2)"
done
success 'Dotfiles updated'
}
info () {
printf "\r [ \033[00;34m...\033[0m ] $1\n"
}
warning () {
printf "\r [ \033[00;36m\!\!\033[0m ] $1\n"
}
user () {
printf "\r [ \033[0;33m??\033[0m ] $1\n"
}
success () {
printf "\r\033[2K [ \033[00;32mOK\033[0m ] $1\n"
}
fail () {
printf "\r\033[2K [\033[0;31mFAIL\033[0m] $1\n"
}
command_exists() {
# redirect the output of your program to "nothing"
type "$1" >/dev/null 2>&1
}
show_help(){
echo
cat $SMPCPATH/help.txt
echo
}
show_version(){
cd $SMPCPATH && git fetch -vp 2&> /dev/null
git tag -l --sort=v:refname | egrep v. | tail -1
cd - 2&> /dev/null
}
confirm_install(){
user "Do you want to install $1? (y/n)"
read choice
if [[ $choice == "Y" || $choice == "y" || $choice == "yes" ]]; then
install_$1
else
info "...Not installed!"
fi
}
update_packages(){
sudo apt-get update && sudo apt-get upgrade -y
}
| true
|
b0cd9973e76e050a9bd3db19942f565284c78673
|
Shell
|
toomanycats/Bash
|
/resample_book_cover.sh
|
UTF-8
| 165
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
NEW_XDIM=$1
NEW_YDIM=$2
infile=$3
outfile=$4
convert -antialias -resample ${NEW_XDIM}x${NEW_YDIM} -interpolate nearest -median 11 ${infile} ${outfile}
| true
|
06809ff2322827400f71a9d95d0eb063b94a11e7
|
Shell
|
xg-wang/dotfiles
|
/setup.sh
|
UTF-8
| 932
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
mkdir $HOME/Code
## Mac
# First setup system preferences: http://sourabhbajaj.com/mac-setup/SystemPreferences/
xcode-select --install
# Homebrew
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
if [[ $(uname -m) == 'arm64' ]]; then
echo 'eval "$(/opt/homebrew/bin/brew shellenv)"' >> ~/.zprofile
eval "$(/opt/homebrew/bin/brew shellenv)"
else
sudo chown -R $(whoami) /usr/local/Cellar
fi
# Install things with Homebrew
brew update
brew bundle
./setup_zsh.sh
./setup_links.sh
# ./setup_macos.sh
# nvim
# use brew ruby
# /usr/local/opt/ruby/bin/gem install neovim
# yarn global add neovim typescript
# https://github.com/junegunn/vim-plug
curl -fLo ~/.local/share/nvim/site/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# Install [Volta](https://volta.sh/)
# curl https://get.volta.sh | bash
| true
|
b5f0fa3754bfb5ed48445fe70b0c74557d34cb63
|
Shell
|
tilljoel/dotfiles
|
/.zshrc
|
UTF-8
| 5,989
| 3.09375
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Look in ~/.oh-my-zsh/themes/
# ZSH_THEME="robbyrussell"
#ZSH_THEME="superjarin"
ZSH_THEME="tilljoel"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git osx ruby rails3 rails4 gem git github bundler npm macports vi-mode brew heroku osx rake coffee tmux tmuxinator rbenv)
source $ZSH/oh-my-zsh.sh
# Fix search like ctrl-r for vim
export RUBY_HEAP_MIN_SLOTS=1000000
export RUBY_HEAP_FREE_MIN=500000
export RUBY_HEAP_SLOTS_INCREMENT=1000000
export RUBY_HEAP_SLOTS_GROWTH_FACTOR=1
export RUBY_GC_MALLOC_LIMIT=100000000
export REDISCLOUD_URL=$BOXEN_REDIS_URL
bindkey -v
bindkey -M vicmd '/' history-incremental-search-backward
function dev {
if [ -f .screenrc_extra ]
then
#Add git branch?
APP=$(expr $PWD : '.*/\(.*\)')
echo $APP
if screen -list | grep $APP;then screen -x -r $APP;else screen -S $APP -c .screenrc_extra;fi
else
echo "no session found"
fi
}
# Add this to your .oh-my-zsh theme if you're using those, or directly to your zsh theme :)
#Customized git status, oh-my-zsh currently does not allow render dirty status before branch
#git_custom_status() {
#local cb=$(current_branch)
#if [ -n "$cb" ]; then
#echo "joel $(parse_git_dirty) $(git_time_since_commit)$ZSH_THEME_GIT_PROMPT_PREFIX$(current_branch)$ZSH_THEME_GIT_PROMPT_SUFFIX"
#fi
#}
color_ruby_words="--color-words='(:|@|@@|\$)?[a-zA-Z_][a-zA-Z0-9_]*[?!]?'"
# Colors vary depending on time lapsed.
#
ZSH_THEME_GIT_TIME_SINCE_COMMIT_SHORT="%{$fg[green]%}"
ZSH_THEME_GIT_TIME_SHORT_COMMIT_MEDIUM="%{$fg[yellow]%}"
ZSH_THEME_GIT_TIME_SINCE_COMMIT_LONG="%{$fg[red]%}"
ZSH_THEME_GIT_TIME_SINCE_COMMIT_NEUTRAL="%{$fg[cyan]%}"
DISABLE_AUTO_TITLE=true
DISABLE_AUTO_UPDATE="true"
# Determine the time since last commit. If branch is clean,
# use a neutral color, otherwise colors will vary according to time.
function git_time_since_commit() {
if git rev-parse --git-dir > /dev/null 2>&1; then
# Only proceed if there is actually a commit.
if [[ $(git log 2>&1 > /dev/null | grep -c "^fatal: bad default revision") == 0 ]]; then
# Get the last commit.
last_commit=`git log --pretty=format:'%at' -1 2> /dev/null`
now=`date +%s`
seconds_since_last_commit=$((now-last_commit))
# Totals
MINUTES=$((seconds_since_last_commit / 60))
HOURS=$((seconds_since_last_commit/3600))
# Sub-hours and sub-minutes
DAYS=$((seconds_since_last_commit / 86400))
SUB_HOURS=$((HOURS % 24))
SUB_MINUTES=$((MINUTES % 60))
if [[ -n $(git status -s 2> /dev/null) ]]; then
if [ "$MINUTES" -gt 30 ]; then
COLOR="$ZSH_THEME_GIT_TIME_SINCE_COMMIT_LONG"
elif [ "$MINUTES" -gt 10 ]; then
COLOR="$ZSH_THEME_GIT_TIME_SHORT_COMMIT_MEDIUM"
else
COLOR="$ZSH_THEME_GIT_TIME_SINCE_COMMIT_SHORT"
fi
else
COLOR="$ZSH_THEME_GIT_TIME_SINCE_COMMIT_NEUTRAL"
fi
#if [ "$HOURS" -gt 24 ]; then
#echo "-$COLOR${DAYS}d${SUB_HOURS}h${SUB_MINUTES}m%{$reset_color%}"
#elif [ "$MINUTES" -gt 60 ]; then
#echo "-$COLOR${HOURS}h${SUB_MINUTES}m%{$reset_color%}"
#else
#echo "-$COLOR${MINUTES}m%{$reset_color%}"
#fi
if [ "$HOURS" -gt 24 ]; then
echo ""
elif [ "$MINUTES" -gt 60 ]; then
echo "-$COLOR${HOURS}h${SUB_MINUTES}m%{$reset_color%}"
else
echo "-$COLOR${MINUTES}m%{$reset_color%}"
fi
fi
fi
}
# Just add $(git_time_since_commit) to your ZSH PROMPT and you're set
# Customize to your needs...
#[[ -s "$HOME/.rvm/scripts/rvm" ]] && . "$HOME/.rvm/scripts/rvm"
export PATH=${PATH}:/Developer/android-sdk-mac_x86/platform-tools/
export PATH=${PATH}:/Developer/android-sdk-mac_x86/tools
export PATH=/opt/local/bin:${PATH}
export PATH=~/bin:${PATH}
export PATH=/Applications/Postgres.app/Contents/MacOS/bin:${PATH}
export EDITOR=vim
alias rvim="vim app/**/*.rb spec/**/*.rb test/**/*.rb"
alias git='nocorrect git'
alias rake='noglob rake'
alias titanium_iphone1.7.2="/Library/Application\ Support/Titanium/mobilesdk/osx/1.7.2/titanium.py run --platform=iphone"
alias titanium_android1.7.2="/Library/Application\ Support/Titanium/mobilesdk/osx/1.7.2/titanium.py run --platform=android --android=/Users/joel/android-sdk-mac_x86"
alias titanium='~/Library/Application\ Support/Titanium/mobilesdk/osx/2.1.1.GA/titanium.py'
export SSL_CERT_FILE=/Users/joel/.ssh/cacert.pem
alias tmux="TERM=screen-256color-bce tmux -2"
alias rc="bundle exec pry -r ./config/environment"
alias rspec="nocorrect rspec"
### Added by the Heroku Toolbelt
export PATH="/usr/local/heroku/bin:$PATH"
export PATH="$HOME/.rbenv/bin:$PATH"
export PATH="/opt/boxen/nodenv/versions/v0.10/bin:$PATH"
[[ -s $HOME/.tmuxinator/scripts/tmuxinator ]] && source $HOME/.tmuxinator/scripts/tmuxinator
source /opt/boxen/env.sh
alias git='noglob git'
alias rake='noglob rake'
alias rspec='nocorrect rspec'
alias bundle='nocorrect bundle'
alias rspec='nocorrect rspec'
alias pry='nocorrect pry'
alias reload="source ~/.zshrc"
alias pt=papertrail
# make sure my r programming language works
disable r
. ~/.dotfiles_secret/.zshrc
. /opt/boxen/homebrew/Cellar/python/2.7.3-boxen2/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/powerline/bindings/zsh/powerline.zsh
#. /opt/boxen/homebrew/lib/python2.7/site-packages/powerline/bindings/zsh/powerline.zsh
| true
|
cb9948a44b040e5a5cf14ae705140fc85a6bd233
|
Shell
|
ReedOei/dt-fixing-tools
|
/scripts/gather-results.sh
|
UTF-8
| 702
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $1 == "" ]] || [[ $2 == "" ]]; then
echo "Usage: ./gather-results.sh <path> <output path>"
exit
fi
path=$1
outputpath=$2
modulekey() {
projroot=$1
moduledir=$2
# In case it is not a subdirectory, handle it so does not use the .
relpath=$(realpath $(dirname ${moduledir}) --relative-to ${projroot})
if [[ ${relpath} == '.' ]]; then
basename ${projroot}
return
fi
# Otherwise convert into expected format
echo $(basename ${projroot})-$(realpath $(dirname ${moduledir}) --relative-to ${projroot} | sed 's;/;-;g')
}
for d in $(find ${path} -name .dtfixingtools); do
cp -r ${d} ${outputpath}/$(modulekey ${path} ${d})
done
| true
|
c17ec51f9f1aeeac74c00ca1fb5a47df083f886d
|
Shell
|
plugnburn/Asgard
|
/airootfs/etc/skel/.config/dwb/userscripts/bb
|
UTF-8
| 320
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
#Asgard bytebeat player
F=${DWB_ARGUMENT}
[ -z "$F" ] && RES="Stopping all instances of wave player" || RES="Playing bytebeat expression $F"
echo 'eval io.notify("'"$RES"'")' > ${DWB_FIFO}
[ -z "$F" ] && killall aplay || echo "main(){unsigned int t=0;for(;;t++)putchar($F);}"|tcc -run -|aplay>/dev/null 2>&1
| true
|
b0ac7646a6fc18c0429bf4eb76330c8173ad9677
|
Shell
|
ryewen/BacklightSet
|
/Backlight_Set.sh
|
UTF-8
| 530
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
index="/sys/class/backlight/intel_backlight"
nowIndex=${index}/brightness
maxIndex=${index}/max_brightness
printf "Current Brightness is "
cat $nowIndex
echo "Input New Brightness"
read newBrightness
maxBrightness=`cat $maxIndex`
minBrightness=1
if [ $newBrightness -gt $maxBrightness ]
then
newBrightness=$maxBrightness
fi
if [ $newBrightness -lt $minBrightness ]
then
newBrightness=$minBrightness
fi
#PASSWD Is Your Root Password, Change it
echo "PASSWD"|sudo -S sh -c "echo $newBrightness > $nowIndex"
| true
|
19229fcb7bd75d735dd96731759be72a44c13aca
|
Shell
|
toulousain79/MySB
|
/install/NextCloud
|
UTF-8
| 21,364
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ----------------------------------
# shellcheck source=inc/includes_before
source "$(dirname "$0")"/../inc/includes_before
# ----------------------------------
# __/\\\\____________/\\\\___________________/\\\\\\\\\\\____/\\\\\\\\\\\\\___
# _\/\\\\\\________/\\\\\\_________________/\\\/////////\\\_\/\\\/////////\\\_
# _\/\\\//\\\____/\\\//\\\____/\\\__/\\\__\//\\\______\///__\/\\\_______\/\\\_
# _\/\\\\///\\\/\\\/_\/\\\___\//\\\/\\\____\////\\\_________\/\\\\\\\\\\\\\\__
# _\/\\\__\///\\\/___\/\\\____\//\\\\\________\////\\\______\/\\\/////////\\\_
# _\/\\\____\///_____\/\\\_____\//\\\____________\////\\\___\/\\\_______\/\\\_
# _\/\\\_____________\/\\\__/\\_/\\\______/\\\______\//\\\__\/\\\_______\/\\\_
# _\/\\\_____________\/\\\_\//\\\\/______\///\\\\\\\\\\\/___\/\\\\\\\\\\\\\/__
# _\///______________\///___\////__________\///////////_____\/////////////_____
# By toulousain79 ---> https://github.com/toulousain79/
#
######################################################################
#
# Copyright (c) 2013 toulousain79 (https://github.com/toulousain79/)
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# --> Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
#
######################################################################
# https://docs.nextcloud.com/server/stable/admin_manual/configuration_server/occ_command.html
##################### FIRST LINE #####################################
# SmbClient
if ! gfnCheckCommand 0 smbclient; then
gfnPackageBundleInstall 'smbclient'
fi
#### VARs
source /etc/MySB/config_db
sDataDir="/home/nextcloud"
gfnListCountUsers 'normal'
gfnGetRepoValues 'NextCloud'
sMailDomain="$(echo "${gsMainUserEmail}" | cut -d "@" -f 2)"
sMailFromAddress="$(echo "${gsMainUserEmail}" | cut -d "@" -f 1)"
#### Functions - BoF
function fnCreateDatabase() {
mysql -u root -p"${MySQL_RootPassword}" --verbose <<-EOF
CREATE DATABASE IF NOT EXISTS NextCloud_db CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci;
GRANT ALL ON NextCloud_db.* to 'MySB_user'@'localhost' IDENTIFIED BY '${MySQL_MysbPassword}';
FLUSH PRIVILEGES;
EOF
}
#### Functions - EoF
#### STOP services
# gfnManageServices stop "${binPhpService} nginx"
#### ${gsPhpDir}/fpm/php.ini
perl -pi -e "s/post_max_size = 10M/post_max_size = 20G/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/upload_max_filesize = 10M/upload_max_filesize = 20G/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/max_file_uploads = 20/max_file_uploads = 20000/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/output_buffering = 4096/output_buffering = Off/g" "${gsPhpDir}"/fpm/php.ini
# OPCache
perl -pi -e "s/;opcache.enable=0/opcache.enable=1/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/;opcache.enable_cli=0/opcache.enable_cli=1/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/;opcache.interned_strings_buffer=4/opcache.interned_strings_buffer=8/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/;opcache.max_accelerated_files=2000/opcache.max_accelerated_files=10000/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/;opcache.memory_consumption=64/opcache.memory_consumption=128/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/;opcache.save_comments=1/opcache.save_comments=1/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/;opcache.revalidate_freq=2/opcache.revalidate_freq=1/g" "${gsPhpDir}"/fpm/php.ini
perl -pi -e "s/;opcache.revalidate_freq=2/opcache.revalidate_freq=1/g" "${gsPhpDir}"/fpm/php.ini
#### ${gsPhpDir}/fpm/pool.d/www.conf
perl -pi -e "s/^;env\[PATH\]/env\[PATH\]/g" "${gsPhpDir}"/fpm/pool.d/www.conf
#### NginX configuration
install -v -g root -o root -m 0600 "${MySB_InstallDir}"/templates/nginx/etc.nginx.locations.nextcloud.conf.tmpl /etc/nginx/locations/NextCloud.conf
perl -pi -e "s/<MySB_InstallDir>/${gsInstallDirEscaped}/g" /etc/nginx/locations/NextCloud.conf
#### START services
gfnManageServices restart "${binPhpService} nginx"
#### An upgrade ? Doing a backup
# Maybe it's an upgrade ?
if [[ ${gnRepoUpgrade} -eq 1 ]] && (gfnCheckCommand 0 "${gsNextCloudDir}"/occ); then
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:mode --off"
fi
# Backup the config file if needed
if [ -f "${gsNextCloudDir}"/config/config.php ]; then
mv -v "${gsNextCloudDir}"/config/config.php "${MySB_InstallDir}"/temp/nextcloud_config.php
fi
if [ -d "${gsNextCloudDir}"/apps2 ]; then
mv -v "${gsNextCloudDir}"/apps2 "${MySB_InstallDir}"/temp/nextcloud_apps2
fi
if [ -f "${MySB_Files}/${gsRepoFile}" ]; then
gfnDeleteDirectory 1 "${gsNextCloudDir}"
fi
#### Extract new files
gfnRepoManageByType
#### Restore the old config file
if [ -f "${MySB_InstallDir}"/temp/nextcloud_config.php ]; then
mv -v "${MySB_InstallDir}"/temp/nextcloud_config.php "${gsNextCloudDir}"/config/config.php
fi
if [ -d "${MySB_InstallDir}"/temp/nextcloud_apps2 ]; then
if [ -d "${gsNextCloudDir}"/apps2 ]; then
gfnDeleteDirectory 1 "${gsNextCloudDir}"/apps2
fi
mv -v "${MySB_InstallDir}"/temp/nextcloud_apps2 "${gsNextCloudDir}"/apps2
else
mkdir -pv "${gsNextCloudDir}"/apps2
fi
#### Create needed directories & files
[ ! -d "${sDataDir}" ] && mkdir -pv "${sDataDir}"
[ ! -f /var/log/nextcloud.log ] && touch /var/log/nextcloud.log
chown -v www-data:www-data /var/log/nextcloud.log
chown -R www-data:www-data "${sDataDir}"
#### Clean some files and directories
gfnDeleteDirectory 1 "${gsNextCloudDir}"/data
gfnDeleteDirectory 1 "${gsNextCloudDir}"/assets
rm -fv "${gsNextCloudDir}"/config/config.sample.php
if [ -f "${sDataDir}"/nextcloud.log ]; then
cat "${sDataDir}"/nextcloud.log >>/var/log/nextcloud.log
rm -fv "${sDataDir}"/nextcloud.log*
fi
#### Create directories & change rights
gfnManageDirAndFiles 'global'
#### Database directory does not exist ? So we do the first install !
fnCreateDatabase
if [[ (! -f "${gsNextCloudDir}"/config/config.php) || (! -s "${gsNextCloudDir}"/config/config.php) ]] && [ "$(find /var/lib/mysql/NextCloud_db/ -type f -name "*.*" | wc -l)" -le 1 ]; then
nLoop=4
for ((i = 1; i <= nLoop; i++)); do
echo "# First install ${i}/${nLoop}"
# Do first install
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:install --database \"mysql\" --database-host \"localhost:/run/mysqld/mysqld.sock\" --database-name \"NextCloud_db\" --database-user \"MySB_user\" --database-pass \"${MySQL_MysbPassword}\" --admin-user \"admin\" --admin-email=\"${gsMainUserEmail}\" --admin-pass \"${MySQL_MysbPassword}\" --database-table-prefix \"nc_\" --data-dir \"${sDataDir}\" --no-interaction"
# Check config.php
sVersion="$(grep "'version'" "${gsNextCloudDir}"/config/config.php | tail -n 1 | cut -d "'" -f 4)"
sSecret="$(grep "'secret'" "${gsNextCloudDir}"/config/config.php | tail -n 1 | cut -d "'" -f 4)"
sPasswordSalt="$(grep "'passwordsalt'" "${gsNextCloudDir}"/config/config.php | tail -n 1 | cut -d "'" -f 4)"
echo "sVersion: ${sVersion}"
echo "sSecret: ${sSecret}"
echo "sPasswordSalt: ${sPasswordSalt}"
if [ -z "${sVersion}" ] || [ -z "${sSecret}" ] || [ -z "${sPasswordSalt}" ]; then
cmdMySQL 'MySB_db' "DROP DATABASE IF EXISTS NextCloud_db;" -v
gfnDeleteDirectory 1 "${sDataDir}"/admin
rm -fv "${gsNextCloudDir}"/config/config.php
if [[ ${i} -le ${nLoop} ]]; then
fnCreateDatabase
else
cmdMySQL 'MySB_db' "UPDATE repositories SET upgrade='0' WHERE name='NextCloud';" -v
cmdMySQL 'MySB_db' "UPDATE services SET to_install='0', is_installed='0' WHERE serv_name='NextCloud';" -v
gfnEndingScript 1 "NextCloud install failed, aborting !"
fi
else
break
fi
done
# Generate an instance ID
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ upgrade -v"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:repair"
#### Get needed values from config files
sVersion="$(su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:get version")"
echo "sVersion: ${sVersion}"
sInstanceId="$(su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:get instanceid")"
echo "sInstanceId: $sInstanceId"
sSecret="$(su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:get secret")"
echo "sSecret: ${sSecret}"
sPasswordSalt="$(su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:get passwordsalt")"
echo "sPasswordSalt: ${sPasswordSalt}"
#### Config file
install -v -g www-data -o www-data -m 0640 "${MySB_InstallDir}"/templates/nextcloud/nextcloud.config.php.tmpl "${gsNextCloudDir}"/config/config.php
# Set db password
perl -pi -e "s/<dbpassword>/${MySQL_MysbPassword}/g" "${gsNextCloudDir}"/config/config.php
# Set version
perl -pi -e "s/<version>/${sVersion}/g" "${gsNextCloudDir}"/config/config.php
# Set instance ID
# perl -pi -e "s/<instanceid>/${sInstanceId}/g" "${gsNextCloudDir}"/config/config.php
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set instanceid --value=${sInstanceId}"
# Set secret
# perl -pi -e "s/<secret>/${sSecret}/g" "${gsNextCloudDir}"/config/config.php
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set secret --value=${sSecret}"
# Set password salt
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set passwordsalt --value=${sPasswordSalt}"
# config:system:set
echo && echo "# config:system:set"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set trusted_domains 0 --value=127.0.0.1"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set trusted_domains 1 --value=localhost"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set trusted_domains 2 --value=${gsHostNameFqdn}:${gsPort_MySB_HTTPS}"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set default_language --value=${EnvLang}"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set force_language --type boolean --value=false"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set default_locale --value=${gsLocale}"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set force_locale --type boolean --value=false"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set lost_password_link --value="
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set mail_domain --value=${sMailDomain}"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set mail_from_address --value=${sMailFromAddress}"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set overwritehost --value=${gsHostNameFqdn}:${gsPort_MySB_HTTPS}"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set overwrite.cli.url --value=https://localhost:${gsPort_MySB_HTTPS}/nc"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set logtimezone --value=${gsTimeZone}"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set skeletondirectory --value="
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set mysql.utf8mb4 --type boolean --value=true"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:set token_auth_enforced --type boolean --value=false"
fi
# Maybe it's an upgrade ?
# if [[ ${gnRepoUpgrade} -eq 1 ]]; then
echo && echo "# upgrade required"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:mode --off"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ upgrade -v"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:repair"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ upgrade -v"
# fi
# maintenance
echo && echo "# maintenance"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:mimetype:update-db"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:mimetype:update-js"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:theme:update"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:data-fingerprint"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:repair"
# app:enable
echo && echo "# app:enable"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ app:enable files_external"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ app:enable notifications"
# config:app:set
echo && echo "# config:app:set"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:app:set --update-only --value=no password_policy enabled"
# Users group
if (! grep -q 'mysb_users' <<<"$(su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ group:list")"); then
echo && echo "# roup:add"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ group:add mysb_users"
fi
echo && echo "# configure"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ db:add-missing-indices"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ db:convert-filecache-bigint --no-interaction"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ background:cron"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ sharing:cleanup-remote-storages"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files:cleanup"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ upgrade -v"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ maintenance:repair"
#### Clean actual external mounts (MySB_Home)
for nId in $(cmdMySQL 'NextCloud_db' "SELECT mount_id FROM nc_external_mounts WHERE mount_point='/MySB_Home';"); do
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:delete ${nId} --yes"
done
#### Create all existing MySB users AND Scanning users' files
for seedUser in ${gsUsersList}; do
if (getent passwd "${seedUser}" >/dev/null); then
FnUserMail="$(cmdMySQL 'MySB_db' "SELECT users_email FROM users WHERE users_ident='${seedUser}';")"
if [ ! -d "${sDataDir}"/"${seedUser}" ]; then
mkdir -v "${sDataDir}"/"${seedUser}"
chown -v www-data:www-data "${sDataDir}"/"${seedUser}"
chmod -v 0755 "${sDataDir}"/"${seedUser}"
fi
if (! su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ user:info \"${seedUser}\"" >/dev/null); then
OC_PASS="$(cmdMySQL 'MySB_db' "SELECT users_passwd FROM users WHERE users_ident='${seedUser}';")"
[ -z "${OC_PASS}" ] && OC_PASS="$(gfnGenPassword 8)"
export OC_PASS
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ user:add --password-from-env --display-name=\"${seedUser}\" --group=\"mysb_users\" \"${seedUser}\""
cmdMySQL 'MySB_db' "UPDATE users SET init_password = '1' WHERE users_ident='${seedUser}';" -v
fi
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ user:setting \"${seedUser}\" settings email \"${FnUserMail}\""
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ user:setting \"${seedUser}\" core lang \"${EnvLang}\""
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ user:setting \"${seedUser}\" core timezone ${gsTimeZone}"
#### Create new external mount (MySB_Home)
IdStorage=$(su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:create MySB_Home local null::null" | awk '{ print $5 }')
if [ -n "${IdStorage}" ]; then
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:applicable --add-user \"${seedUser}\" ${IdStorage}"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:config ${IdStorage} datadir '/home/${seedUser}/'"
# su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:option ${IdStorage} filesystem_check_changes 'false'"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:option ${IdStorage} encrypt 'false'"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:option ${IdStorage} previews 'true'"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:option ${IdStorage} enable_sharing 'true'"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:option ${IdStorage} encoding_compatibility 'false'"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files_external:verify ${IdStorage}"
fi
#### Force a scan
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files:scan --path=\"/${seedUser}/files/MySB_Home\""
fi
unset IdStorage FnUserMail OC_PASS
done
#### Logrotate
gfnLogRotate 'nextcloud'
#### Fail2Ban
if [ "${gbToInstall_Fail2Ban}" == "YES" ] || [ "${gbIsInstalled_Fail2Ban}" == "YES" ] && [ -d /etc/fail2ban/filter.d ]; then
install -v -g root -o root -m 0700 "${MySB_InstallDir}"/templates/fail2ban/etc.fail2ban.filter.d.nextcloud.tmpl /etc/fail2ban/filter.d/nextcloud.conf
if (! grep -q '\[nextcloud\]' /etc/fail2ban/jail.local); then
{
echo ""
echo "[nextcloud]"
echo "enabled = true"
echo "port = ${gsPort_MySB_HTTPS}"
echo "filter = nextcloud"
echo "logpath = /var/log/nextcloud.log"
} >>/etc/fail2ban/jail.local
fi
fi
#### Scann all files
screen -dmS NextCloud_ScanAll su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ files:scan --all"
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ integrity:check-core" >/tmp/nextcloud_check_integrity.log
if [ -f /tmp/nextcloud_check_integrity.log ]; then
cat /tmp/nextcloud_check_integrity.log
sed -i '/INVALID_HASH/d' /tmp/nextcloud_check_integrity.log
sed -i '/expected/d' /tmp/nextcloud_check_integrity.log
sed -i '/current/d' /tmp/nextcloud_check_integrity.log
while read -r sLine; do
sFile="${gsNextCloudDir}/$(echo "${sLine}" | awk '{ print $2 }' | sed 's/://g;')"
if [ -f "${sFile}" ]; then
# sCharset="$(file -i "${sFile}" | awk '{ print $3 }' | cut -d "=" -f 2 | tr [:lower:] [:upper:])"
iconv -f us-ascii -t utf-16 "${sFile}" -o "${sFile}.tmp"
iconv -f utf-16le -t utf-8 "${sFile}" -o "${sFile}.tmp"
[ -f "${sFile}.tmp" ] && rm -v "${sFile}.tmp"
fi
done </tmp/nextcloud_check_integrity.log
fi
su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ integrity:check-core"
#### Display 'config.php' for logs
cat "${gsNextCloudDir}"/config/config.php
#### Backuping config file
if [ -f "${gsNextCloudDir}"/config/config.php ]; then
cp -v "${gsNextCloudDir}"/config/config.php "${MySB_InstallDir}"/files/nextcloud_config.php
fi
#### Update DB
sVersion="$(su -s /bin/sh www-data -c "/usr/bin/php ${gsNextCloudDir}/occ config:system:get version")"
# [ "$(awk -F. '{print NF-1}' <<<${sVersion})" -eq 3 ] && sVersion="${sVersion%.*}"
cmdMySQL 'MySB_db' "UPDATE repositories SET upgrade='0', version='${sVersion}' WHERE name='NextCloud';" -v
cmdMySQL 'MySB_db' "UPDATE services SET to_install='0', is_installed='1' WHERE serv_name='NextCloud';" -v
#### CRON
crontab -u www-data -l >/tmp/crontab.tmp
sed -i '/NextCloud/d' /tmp/crontab.tmp
echo "*/15 * * * * php -f ${gsNextCloudDir}/cron.php >/dev/null 2>&1 # NextCloud" | tee -a /tmp/crontab.tmp
crontab -u www-data /tmp/crontab.tmp
rm -f /tmp/crontab.tmp
su -s /bin/sh www-data -c "/usr/bin/php -f ${gsNextCloudDir}/cron.php >/dev/null 2>&1"
#### CLean passwords from log file
[ -f "${gsLogFile}" ] && sed -i "s/${MySQL_MysbPassword}/****************/g;" "${gsLogFile}"
# -----------------------------------------
# shellcheck source=inc/includes_after
source "$(dirname "$0")"/../inc/includes_after
# -----------------------------------------
##################### LAST LINE ######################################
| true
|
6462456d0a4385af65c5bc722a29bf250ee19765
|
Shell
|
j0sephmerheb/bash
|
/to_be_cleaned/array-add-into.sh
|
UTF-8
| 179
| 2.515625
| 3
|
[] |
no_license
|
# edit array
arr=(bb cc ee ff)
# add at the begining
arr=("aa" "${arr[*]}")
# add at the end
arr[${#arr[*]}]=gg
# add in the middle
arr=("${arr[*]:0:3}" "dd" "${arr[*]:3:3}" )
| true
|
8f4f68a94e5979097cfa87108a454c7b74737010
|
Shell
|
qixin5/debloating_study
|
/expt/debaug/benchmark/date-8.21/testscript/I2/9
|
UTF-8
| 153
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BIN=$1
OUTDIR=$2
TIMEOUT=$3
INDIR=$4
timeout -k 9 ${TIMEOUT}s $BIN -d @1234567890 &>$OUTDIR/o9
exit_val=$?
echo ${exit_val} >>$OUTDIR/o9
| true
|
6848df8412584b8d21658552fa719ae3b214fbc6
|
Shell
|
dimoreira/server-bootstrap
|
/src/bash.sh
|
UTF-8
| 543
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# bash_color_prompt() {
#
# Remove the comment from force_color_prompt on .bashrc
#
function bash_color_prompt() {
bashrc="$HOME/.bashrc"
sed -i "s/#force_color_prompt=yes/force_color_prompt=yes/g" $bashrc
source $bashrc
}
# bash_run()
#
# Function that runs Bash helpers
#
function bash_run() {
echo -e "\e[36mUpdating bash prompt to enable colors\e[0m"
if bash_color_prompt; then
echo -e "\e[32mBash color prompt updated\e[0m"
echo
else
echo -e "\e[31mFailed on bash color prompt update\e[0m"
echo
fi
}
| true
|
f87c32d6be2e52da9540c2e69a7914bd5d865510
|
Shell
|
cceh/ntg
|
/scripts/cceh/backup_active_databases.sh
|
UTF-8
| 335
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Backup all active Postgres databases on the VM.
#
# Usage: ./backup_active_databases.sh
#
. `dirname "$0"`/active_databases
DATE=`date -I`
for i in $ACTIVE_DATABASES
do
FILE="/backup/saved_databases/backup_db_${i}_${DATE}.gz"
echo "Backing up database $i to $FILE ..."
pg_dump "$i" | gzip > "$FILE"
done
| true
|
da60c857324dbc92cefa0a6d10a64fe6c017951d
|
Shell
|
filips123/Tokenizer
|
/bindings/python/tools/build_wheel.sh
|
UTF-8
| 1,366
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# Script to build Python wheels in the manylinux1 Docker environment.
set -e
set -x
ROOT_DIR=$PWD
SENTENCEPIECE_VERSION=${SENTENCEPIECE_VERSION:-0.1.8}
PYBIND11_VERSION=${PYBIND11_VERSION:-2.4.3}
ICU_VERSION=${ICU_VERSION:-64.2}
PATH=/opt/python/cp37-cp37m/bin:$PATH
# Install ICU.
curl -L -O https://github.com/unicode-org/icu/releases/download/release-${ICU_VERSION/./-}/icu4c-${ICU_VERSION/./_}-src.tgz
tar xf icu4c-*-src.tgz
cd icu/source
CFLAGS="-fPIC" CXXFLAGS="-fPIC" ./configure --disable-shared --enable-static
make -j2 install
cd $ROOT_DIR
# Install cmake.
pip install "cmake==3.13.*"
# Build SentencePiece.
curl -L -o sentencepiece-${SENTENCEPIECE_VERSION}.tar.gz -O https://github.com/google/sentencepiece/archive/v${SENTENCEPIECE_VERSION}.tar.gz
tar zxf sentencepiece-${SENTENCEPIECE_VERSION}.tar.gz
cd sentencepiece-${SENTENCEPIECE_VERSION}
mkdir build
cd build
cmake ..
make -j2 install
cd $ROOT_DIR
# Build Tokenizer.
mkdir build
cd build
cmake -DCMAKE_BUILD_TYPE=Release -DLIB_ONLY=ON -DWITH_ICU=ON ..
make -j2 install
cd $ROOT_DIR
cd bindings/python
for PYTHON_ROOT in /opt/python/*
do
$PYTHON_ROOT/bin/pip install pybind11==$PYBIND11_VERSION
$PYTHON_ROOT/bin/python setup.py bdist_wheel
rm -r build
done
for wheel in dist/*
do
auditwheel show $wheel
auditwheel repair $wheel
done
mv wheelhouse $ROOT_DIR
| true
|
e4e470f93fbbb7d5c555bf6686f9009b341e3943
|
Shell
|
medined/accumulo_stackscript
|
/stop_accumulo.sh
|
UTF-8
| 629
| 2.671875
| 3
|
[] |
no_license
|
#
# shutdown accumulo
#
if [ -d $HOME/bin/accumulo ];
then
$HOME/bin/accumulo/bin/stop-all.sh
else
su accumulo -c "/usr/local/accumulo/bin/stop-all.sh"
fi
#
# shutdown zookeeer
#
if [ -d $HOME/software/zookeeper ];
then
pushd $HOME/software/zookeeper; ./bin/zkServer.sh stop; popd
else
su zookeeper -c "pushd /usr/local/zookeeper; ./bin/zkServer.sh stop; popd"
fi
#
# shutdown hadoop
if [ -d $HOME/software/hadoop ];
then
$HOME/software/hadoop/bin/stop-mapred.sh
$HOME/software/hadoop/bin/stop-dfs.sh
else
su hadoop -c "/usr/local/hadoop/bin/stop-mapred.sh"
su hadoop -c "/usr/local/hadoop/bin/stop-dfs.sh"
fi
| true
|
42ff56146511a8b477594a6ffddf8d1ace5fb81c
|
Shell
|
skagedal/xcode-simulator-cert
|
/Tools/full-test.sh
|
UTF-8
| 2,009
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if ! hash jq 2>/dev/null; then
echo "This script requires jq. Do brew install jq."
fi
DEVICE_TYPE=`xcrun simctl list devicetypes "iPhone 8" --json | jq '.devicetypes[0].identifier' --raw-output`
RUNTIME=`xcrun simctl list runtimes "iOS" --json | jq '.runtimes[0].identifier' --raw-output`
LOGO="🧪 "
URL='https://localhost:1443/'
CERT_PATH=`pwd`/test-ca.crt
# Generating cert
echo "${LOGO} Generating self-signed certificate"
openssl req -x509 -newkey rsa:4096 -keyout test-ca.key -out ${CERT_PATH} -days 365 -nodes -subj '/CN=localhost'
# HTTPS server
echo "${LOGO} Starting https server"
./simple_https_server.py &
HTTP_SERVER_PID=$!
# Create simulator
echo "${LOGO} Creating simulator ($DEVICE_TYPE / $RUNTIME)"
UUID=`xcrun simctl create cert-test-iphone ${DEVICE_TYPE} ${RUNTIME}`
if [ $? -eq 0 ]; then
echo "${LOGO} UUID is ${UUID}"
else
echo "${LOGO} Error creating simulator. Maybe your selected Xcode isn't 10.2? That's what I'm expecting."
exit 1
fi
# Installing
echo "${LOGO} Installing root certificate"
pushd .. >& /dev/null
echo swift run xcode-simulator-cert --verbosity=loud install ${CERT_PATH} --uuid=${UUID}
swift run xcode-simulator-cert --verbosity=loud install ${CERT_PATH} --uuid=${UUID}
popd >& /dev/null
# Booting
echo "${LOGO} Booting simulator; press enter when done"
xcrun simctl boot ${UUID}
open /Applications/Xcode-10.2.1.app/Contents/Developer/Applications/Simulator.app
read
# Opening the URL
echo "${LOGO} Opening ${URL} in simulator. This should now show the contents of this directory in Safari."
echo "${LOGO} When confirmed, press enter to clean up, deleting this test simulator."
xcrun simctl openurl ${UUID} ${URL}
read
# Killing HTTP server
echo "${LOGO} Killing http server $HTTP_SERVER_PID"
kill $HTTP_SERVER_PID
# Delete simulator
echo "${LOGO} Deleting simulator"
xcrun simctl delete ${UUID}
if [ $? -ne 0 ]; then
echo "${LOGO} Could not delete simulator with uuid ${UUID} for some reason."
exit 1
fi
| true
|
de263de79b690dbe8d5f2fa00bbedbfb7e4d05f7
|
Shell
|
mzadel/gof
|
/gofci
|
UTF-8
| 2,078
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# gofci
#
#
# basic steps to create the commit:
# BLOBHASH=$(git hash-object -w "$FILENAME")
# TREEHASH=$(echo -e "100644 blob $BLOBHASH\t$FILENAME" | git mktree)
# COMMITHASH=$(echo commitmsg | git commit-tree $TREEHASH -p master)
# git update-ref refs/heads/master $COMMITHASH
#
FILENAME="$1"
if [[ -z "$FILENAME" ]] ; then echo please supply filename ; exit ; fi
export GIT_DIR="$FILENAME.git"
# bail if file doesn't exist
if [[ ! -e "$FILENAME" ]] ; then echo file "$FILENAME" does not exist ; exit ; fi
# if repo doesn't exist, create it
if [[ ! -e "$GIT_DIR" ]] ; then
echo creating "$GIT_DIR"
git init -q --bare
INITIALCOMMIT=1
fi
# bail if the file's the same
if [[ ! $INITIALCOMMIT ]] ; then
PREVFILEHASH=$( git ls-tree master "$FILENAME" | awk '{print $3}' )
if [[ $(wc -c < "$FILENAME") -eq $(git cat-file -s $PREVFILEHASH) && $(git hash-object --stdin < "$FILENAME") == $PREVFILEHASH ]] ; then
echo "$FILENAME identical to most recent version; exiting."
exit
fi
fi
# create the blob and tree
BLOBHASH=$(git hash-object -w "$FILENAME")
TREEHASH=$(echo -e "100644 blob $BLOBHASH\t$FILENAME" | git mktree)
# use subsequent args as commit message if present
if [[ $2 ]] ; then
shift
COMMITMSG="$*"
else
COMMITMSG=commit
fi
# create the checkin object
# give it a parent if this isn't the first commit
if [[ $INITIALCOMMIT ]] ; then
COMMITHASH=$(echo "$COMMITMSG" | git commit-tree $TREEHASH)
else
COMMITHASH=$(echo "$COMMITMSG" | git commit-tree $TREEHASH -p master)
fi
# update master branch
git update-ref refs/heads/master $COMMITHASH
#
# consider setting these env variables
# GIT_AUTHOR_NAME
# GIT_AUTHOR_EMAIL
# GIT_AUTHOR_DATE
# GIT_COMMITTER_NAME
# GIT_COMMITTER_EMAIL
# GIT_COMMITTER_DATE
#
#
# other strategy is to create a temp non-existent index file with
# GIT_INDEX_FILE or equivalent option, then
# git update-index --add --cacheinfo 100644 83baae61804e65cc73a7201a7252750c76066a30 test.txt
# then git write-tree ; then git commit-tree
#
# vim:sw=4:et:ai:ic
| true
|
b453af83a0ffbb9327496a6c7d9cfa9e7f51adf6
|
Shell
|
freebsd/freebsd-ports
|
/net/ntpd-rs/files/ntp_daemon.in
|
UTF-8
| 1,240
| 3.625
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# PROVIDE: ntp_daemon
# REQUIRE: DAEMON FILESYSTEMS devfs
# BEFORE: LOGIN
# KEYWORD: nojail resume shutdown
#
. /etc/rc.subr
name=ntp_daemon
rcvar=ntp_daemon_enable
load_rc_config $name
ntp_daemon_enable=${ntp_daemon_enable-"NO"}
ntp_daemon_config=${ntp_daemon_config-"%%ETCDIR%%/ntp.toml"}
ntp_daemon_socket=${ntp_daemon_socket-"/var/run/ntpd-rs"}
command="/usr/bin/true"
procname="/usr/sbin/daemon"
pidfile="/var/run/${name}.pid"
start_cmd="ntp_daemon_start"
stop_cmd="ntp_daemon_stop"
is_process_running()
{
[ -f ${pidfile} ] && procstat $(cat ${pidfile}) >/dev/null 2>&1
}
ntp_daemon_start()
{
[ -d "${ntp_daemon_socket}" ] || /bin/mkdir "${ntp_daemon_socket}"
/usr/sbin/chown _ntp:_ntp "${ntp_daemon_socket}"
/usr/sbin/daemon -P ${pidfile} -r -f -o /var/log/ntp_daemon.log -H %%PREFIX%%/bin/ntp-daemon --config "${ntp_daemon_config}"
if is_process_running; then
echo "Started ntp-daemon (pid=$(cat ${pidfile}))"
else
echo "Failed to start ntp-daemon"
fi
}
ntp_daemon_stop()
{
if is_process_running; then
/bin/rm -rf "${ntp_daemon_socket}"
local pid=$(cat ${pidfile})
echo "Stopping ntp-daemon (pid=${pid})"
kill -- -${pid}
else
echo "ntp-daemon isn't running"
fi
}
run_rc_command "$1"
| true
|
90e905e70f4fe9747df565e8c96cfc5021fb1e8b
|
Shell
|
lululeta2014/mihaib
|
/lethe/linux-setup/programs/postgresql/setup.sh
|
UTF-8
| 975
| 3.671875
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
set -u # exit if using uninitialised variable
set -e # exit if some command in this script fails
trap "echo $0 failed because a command in the script failed" ERR
SCRIPT=`readlink -f "$0"`
SCRIPT_DIR=`dirname "$SCRIPT"`
source "$SCRIPT_DIR/../../sourceme.bash"
echo '--- PostgreSQL'
echo -n "Compile from sources install PostgreSQL? [y/N] "
read OK
if [ "$OK" == "y" -o "$OK" == "Y" ]; then
true
else
echo "Canceled"
exit
fi
VERSION=9.3.5
LATEST=postgresql-"$VERSION".tar.bz2
PATTERN=postgresql-*.tar.bz2
download-kit \
--delete-pattern "$PATTERN" \
--url http://ftp.postgresql.org/pub/source/v"$VERSION"/"$LATEST" \
--file "$MB_KITS_DIR"/"$LATEST"
PSQL_DIR="$MB_PRG_DIR"/postgresql
rm -rf "$PSQL_DIR"
tar -C "$MB_PRG_DIR" -xjf "$MB_KITS_DIR"/$PATTERN
cd "$MB_PRG_DIR"/postgresql-*
./configure --prefix="$PSQL_DIR"
make
make install
cd "$MB_PRG_DIR"
rm -rf postgresql-*
cp -r "$SCRIPT_DIR"/utils/* "$PSQL_DIR"/bin
postgresql-make-cluster.sh
| true
|
5e5a8964d0e04da521d62136fcb3cc5a37ae084f
|
Shell
|
petronny/aur3-mirror
|
/keepassx-autoopen/PKGBUILD
|
UTF-8
| 907
| 3.125
| 3
|
[] |
no_license
|
# Maintainer: Etienne Perot <etienne[at]perot[dot]me>
pkgname=keepassx-autoopen
pkgver=20120430
pkgrel=2
pkgdesc="KeePassX with additional command-line switches to automatically open .kdb files."
url="http://www.keepassx.org/"
arch=('x86_64' 'i686')
license=('GPL')
depends=('qt4')
makedepends=('git' 'intltool' 'cmake')
conflicts=('keepassx', 'keepassx-git')
_gitroot="git://perot.me"
_gitname="keepassx-autoopen"
build() {
cd $srcdir
msg "Connecting to the GIT server...."
if [[ -d $srcdir/$_gitname ]] ; then
cd $_gitname
git pull origin
msg "The local files are updated."
else
git clone $_gitroot/$_gitname.git
fi
msg "GIT checkout done"
msg "Starting make..."
mv $srcdir/$_gitname $srcdir/$_gitname-build
cd $srcdir/$_gitname-build
cmake -DCMAKE_INSTALL_PREFIX=/usr/local -DCMAKE_VERBOSE_MAKEFILE=ON -DWITH_GUI_TESTS=ON
make DESTDIR=$pkgdir install
rm -rf $srcdir/$_gitname-build
}
| true
|
c877aa0586a5e9d8774d0c005b4e90b1879c1f15
|
Shell
|
kr/doozer
|
/bin/light
|
UTF-8
| 192
| 2.671875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
set -e
exec 2>&1
n=$1
test $# -eq 0 && n=1
p=`expr 8040 + $n`
w=`expr 8080 + $n`
test $n -ne 1 && args="-a 127.0.0.1:8041"
exec doozerd -l 127.0.0.1:$p -w :$w -timeout 5 $args
| true
|
83151b1d577d56ccb63de766f27c349e05b9c8ad
|
Shell
|
ZzeErrO/Shell-Programming
|
/Sequence-Statement/RandomforDice.sh
|
UTF-8
| 66
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash -x
singleDigit=$(( RANDOM%6 + 1 ))
echo $singleDigit
| true
|
1ed179d9b9938931f7eaa7584fb1445bb0e0ada0
|
Shell
|
vadivelmurugank/source
|
/scripts/sys/system.sh
|
UTF-8
| 16,933
| 2.953125
| 3
|
[] |
no_license
|
# Collect System info
#
#
# - processes and pmap
#
# - kernel version
# - kernel bootline
# - device nodes
#
# - kernel modules
# - kernel config
#
# - pci list
# - network interfaces
#
# - loader and libs
#
# - hardware info
# - disk
# - ram
#
#####
## Perf
########
pstree -Aa -st -l
#perf record -F 99 -ag -p $(pidof switchd)
perf report -n
perf report -n --stdio
#Flamegraph
# git clone https://github.com/brendangregg/FlameGraph # or download it from github
cd FlameGraph
sudo sh -c "perf record -F 99 -ag -- sleep 60"
perf script | ./stackcollapse-perf.pl > out.perf-folded
cat out.perf-folded | ./flamegraph.pl > perf-kernel.svg
# vlibshow
export LD_LIBRARY_PATH=$SDE/install/lib
~/bin/vlibshow ./libbf_switch.so > ~/bin/libbf_switch_symbols.txt
show_host()
{
echo -e "\n\n Host Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n Hostname"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ hostname"
hostname
nslookup $(hostname)
echo -e "\n\n system"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ dmidecode -t system\n"
dmidecode -t system
echo -e "\n\n #$ dmidecode -t bios \n"
dmidecode -t bios
dmidecode -t slot
dmidecode -t baseboard
dmidecode -t chassis
}
show_virtual()
{
echo -e "\n\n Virtual Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n#$ lsmod | grep -i vm \n"
lsmod | grep -E -i '(vm|vir)'
echo -e "\n\n#$ virt-what \n"
virt-what
echo -e "\n\n#$ virt-host-validate \n"
virt-host-validate
echo -e "\n\n#$ virsh list --all \n"
virsh list --all
echo -e "\n\n#$ virsh iface-list \n"
virsh iface-list
echo -e "\n\n#$ virsh nodeinfo \n"
virsh nodeinfo
echo -e "\n\n#$ virsh version \n"
virsh version
echo -e "\n\n#$ virsh nwfilter-list \n"
virsh nwfilter-list
echo -e "\n\n#$ virsh net-list \n"
virsh net-list
echo -e "\n\n#$ virsh pool-list \n"
virsh pool-list
echo -e "\n\n#$ virsh pool-info default \n"
virsh pool-info default
echo -e "\n\n#$ virsh vol-list --details default \n"
virsh vol-list --details default
echo -e "\n\n#$ virsh nodememstats \n"
virsh nodememstats
echo -e "\n\n#$ virsh sysinfo \n"
virsh sysinfo
echo -e "\n\n#$ virsh capabilities \n"
virsh capabilities
echo -e "\n\n#$ virsh nodedev-list \n"
virsh nodedev-list
}
show_docker()
{
echo -e "\n\n#$ docker images ls \n"
docker images ls
echo -e "\n\n#$ docker network ls \n"
docker network ls
echo -e "\n\n#$ docker version \n"
docker version
echo -e "\n\n#$ docker volume \n"
docker volume
echo -e "\n\n#$ docker info \n"
docker info
}
show_process()
{
## Total number of priorities = 140
## Real time priority range(PR or PRI): 0 to 99
## User space priority range: 100 to 139
## Nice value range (NI): -20 to 19
## PR = 20 + NI
## PR = 20 + (-20 to + 19)
## PR = 20 + -20 to 20 + 19
## PR = 0 to 39 which is same as 100 to 139.
echo -e "\n\n Process Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\nProcesses and memory"
echo -e "-----------------------------------------------"
#echo -e "\n\n#$ ps auxf | awk '{ printf("%6u MB\t", $6/1024); printf("%-6s \t\t", $1); for(i=11;i<=NF;++i) printf("%s ", $i);printf("\n")}'\n"
#ps auxf | awk '{ printf("%6u MB\t", $6/1024); printf("%-6s \t\t", $1); for(i=11;i<=NF;++i) printf("%s ", $i);printf("\n")}'
# pstree $(pgrep <<process name>. )
# pstree $(pgrep <<process name>. )
# -A : Ascii
# -a : args
# -h : highlight running process
# -t : thread names
# -s : parents
# -l : don't truncate long lines
pstree -Aa -h -st -l
# list all threads
# ps -Tef | grep vfrwd
pids=
for pid in $(ls -1 /proc | grep -E '^[0-9]+')
do
pids="$pids $pid"
Threads=$(awk -F'\t' '/^Threads/ {print $2}' /proc/$pid/status)
if [ XX$Threads != "XX" ] ; then
if [ -d /proc/$pid/task ] ; then
pids="$pids $(ls -1 /proc/$pid/task/ | grep -E '^[0-9]+')"
fi
fi
done
#echo $pids
printf "========================================================================\n"
printf "%-10s || %-10s || %10s (%5s) core:%s | vmsize=%s \n" "ThreadGroup" "Parent" "TName" "Tid" "TCpus" "VmPeak"
printf "========================================================================\n\n"
for pid in $pids
#for pid in "139726"
do
NGid=$(awk -F'\t' '/^Ngid/ {print $2}' /proc/$pid/status)
TGid=$(awk -F'\t' '/^Tgid/ {print $2}' /proc/$pid/status)
PPid=$(awk -F'\t' '/^PPid/ {print $2}' /proc/$pid/status)
VmPeak=$(awk -F'\t' '/^VmPeak/ {print $2}' /proc/$pid/status)
TName=$(awk -F'\t' '/^Name/ {print $2}' /proc/$pid/status)
TCpus=$(awk -F'\t' '/^Cpus_allowed_list/ {print $2}' /proc/$pid/status)
Tid=$(awk -F'\t' '/^Pid/ {print $2}' /proc/$pid/status)
Threads=$(awk -F'\t' '/^Threads/ {print $2}' /proc/$pid/status)
NumaGroup=$(awk -F'\t' '/^Name/ {print $2}' /proc/$NGid/status 2> /dev/null)
ThreadGroup=$(awk -F'\t' '/^Name/ {print $2}' /proc/$TGid/status 2> /dev/null)
Parent=$(awk -F'\t' '/^Name/ {print $2}' /proc/$PPid/status 2> /dev/null)
#Thread=$(grep 'Name\|Cpus_allowed_list\|^Pid' /proc/$pid/status | tr -d '\t' | cut -d: -f2 | xargs -L3 | awk '{print $1,$2,$3}')
#echo -e "${ThreadGroup} -> ${Parent} -> ${TName} :: ${Tid} : ${TCpus} - ${VmPeak}"
printf "%-10s -> %-10s -> %10s (%5s) core:%s : vmsize=%s Threads:%s\n" "${ThreadGroup}" "${Parent}" "${TName}" "${Tid}" "${TCpus}" "${VmPeak}" "${Threads}"
done
#ps -p $pid -L -o pid,tid,cputime,args,psr,pcpu
#ps -p $pid -L -o uname,pid,psr,pcpu,cputime,pmem,rsz,vsz,tty,s,etime,args
echo -e "\n\nProcess Shared Memory and Semaphores"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ ipcs -u\n"
ipcs -u --human
ipcs -pm
echo -e "\n\n#$ ipcs\n"
ipcs -a
echo -e "\n\n#$ lsipc\n"
lsipc
echo -e "\n\n#$ lslocks\n"
lslocks
echo -e "\n\nProcess and Sockets"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ netstat -tulpn\n"
netstat -tulpn
}
show_kernel()
{
echo -e "\n\n Kernel Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n Kernel Version"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ uname -a\n"
uname -a
echo -e "\n\n$# cat /proc/version\n"
cat /proc/version
echo -e "\n\n Kernel bootline"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ cat /proc/cmdline\n"
cat /proc/cmdline
#echo -e "\n\n Regenerate module dependencies"
#echo -e "-----------------------------------------------"
#echo -e "\n\n#$ depmod -a\n"
#depmod -a
# List module parameters
# systool -vm $mod
echo -e "\n\n Installed Kernel Modules"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ for mod in `lsmod | awk '{print $1}'`; do echo -e \"$mod\", \"$(modinfo -F description $mod)\", \"$(modinfo -F filename $mod)\" , \"$(modinfo -F license $mod)\" , \"$(modinfo -F parm $mod)\" , \"$(modinfo -F depends $mod)\" , \"$(modinfo -F alias $mod)\" ; done \n"
for mod in $(lsmod | awk '{print $1}') ; do echo -e \"$mod\", \"$(modinfo -F description $mod)\", \"$(modinfo -F filename $mod)\" , \"$(modinfo -F license $mod)\" , \"$(modinfo -F parm $mod)\" , \"$(modinfo -F depends $mod)\",
\"$(modinfo -F alias $mod)\" ; systool -vm $mod ; done
echo -e "\n\n Kernel module static nodes"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ kmod static-nodes\n"
kmod static-nodes
echo -e "\n\n Kernel Config"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ cat /boot/config-$(uname -r)\n "
cat /boot/config-$(uname -r)
}
show_cpu()
{
echo -e "\n\n CPU Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n#$ numactl -H \n"
numactl -H
echo -e "\n\n#$ lscpu \n"
lscpu
echo -e "\n\n#$ dmidecode -t processor \n"
dmidecode -t processor
echo -e "\n\n#$ cat /proc/devices \n"
cat /proc/devices
echo -e "\n\n#$ lshw \n"
lshw
#taskset -c -p <pid>
}
show_memory()
{
echo -e "\n\n Memory Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n#$ dmidecode -t memory \n"
dmidecode -t memory
echo -e "\n\n#$ dmidecode -t cache \n"
dmidecode -t cache
echo -e "\n\n#$ cat /proc/meminfo \n"
/proc/meminfo
echo -e "\n\n#$ free \n"
free
echo -e "\n\n#$ vmstat -s \n"
vmstat -s
}
show_pci_devices()
{
echo -e "\n\n PCI Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n#$ lspci -tvv \n"
lspci -tvv
echo -e "\n\n#$ lsusb \n"
lsusb
}
show_storage()
{
echo -e "\n\n Storage Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n Block Info"
echo -e "-----------------------------------------------"
echo -e "\n\n#$ lsscsi \n"
lsscsi
echo -e "\n\n #$ blkid \n"
blkid
echo -e "\n\n #$ lsblk -ai \n"
lsblk -ai
echo -e "\n\n Parition table "
echo -e "-----------------------------------------------"
echo -e "\n\n #$ df -h \n"
df -h
echo -e "\n\n #$ fdisk -l \n"
fdisk -l
echo -e "\n\n Mount table "
echo -e "-----------------------------------------------"
echo -e "\n\n #$ mount -l \n"
mount -l
}
show_network()
{
echo -e "\n\n Network Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n Network Adapter "
echo -e "-----------------------------------------------"
echo -e "\n\n#$ lspci -tvv | grep -i "ethernet\|network" \n"
lspci -tvv | grep -i "ethernet\|network"
echo -e "\n\n Network Interfaces "
echo -e "-----------------------------------------------"
echo -e "\n\n#$ ifconfig -a \n"
printf "=======================================================\n"
printf "%-14s: %-7s | %-20s | %-20s | %-15s | %s\n" "| intf" " link" " drv" " hwaddr" " inet" " mask"
printf "=======================================================\n"
for intf in $(netstat -ia -p | sed '1,2d' | awk -F" " '{print $1}')
do
#hwaddr=$(ifconfig $intf | grep -Go 'ether [a-zA-Z0-9:]\+' | awk -F'ether ' '{print $2}')
hwaddr=$(ifconfig $intf | grep -Go 'ether [a-zA-Z0-9:]\+' | awk -F'ether ' '{print $2}')
[ -z "$hwaddr" ] && hwaddr="----"
link=$(ifconfig $intf | grep -Go 'Link encap:[a-zA-Z]\+' | awk -F'Link encap:' '{print $2}')
[ -z "$link" ] && link="-"
inet=$(ifconfig $intf | grep -Go 'inet [a-zA-Z0-9.]\+' | awk -F'inet ' '{print $2}')
#inet=$(ifconfig $intf | grep -Go 'inet [a-zA-Z0-9.]\+' | awk -F'inet ' '{print $2}')
[ -z "$inet" ] && inet="-"
mask=$(ifconfig $intf | grep -Go 'netmask [a-zA-Z0-9.]\+' | awk -F'netmask ' '{print $2}')
#mask=$(ifconfig $intf | grep -Go 'Mask:[a-zA-Z0-9.]\+' | awk -F'Mask:' '{print $2}')
[ -z "$mask" ] && mask="-"
drv=$(ethtool -i $intf 2> /dev/null | grep driver | awk -F'driver: ' '{print $2}')
[ -z "$drv" ] && drv="-"
printf "%-14s: %7s %20s %20s %15s %s\n" "$intf" "$link" "$drv" "$hwaddr" "$inet" "$mask"
done
echo -e "\n\n Network Link "
echo -e "-----------------------------------------------"
echo -e "\n\n#$ brctl show \n"
brctl show
#ifconfig -a
#for intf in $(ifconfig -s | sed '1d' | awk -F" " '{print $1}') ; do echo $intf; ethtool -S $intf | grep -i "drops: [1-9]\+" ; done
echo -e "\n\n#$ statistics: netstat -ia -p \n"
netstat -ia -p
echo -e "\n\n#$ Ethernet NIC features: ethtool -? \n"
for intf in $( netstat -ia -p | sed '1,2d' | awk -F" " '{print $1}' ) ; do \
echo -e "\n\n\n==========================================\n" ; \
echo " $intf" ; \
echo -e "\n==========================================\n" ; \
echo -e "\n # Driver: \n" ; ethtool -i $intf | grep driver; \
echo -e "\n# Ethernet features: \n" ; ethtool -k $intf | grep ": on"; \
echo -e "\n# get ring buffers \n" ; ethtool -g $intf ; \
echo -e "\n# get driver \n" ; ethtool -i $intf ; \
echo -e "\n # Flow hash \n" ; ethtool -x $intf ; \
echo -e "\n # Timing \n" ; ethtool -T $intf ; \
echo -e "\n # Channel \n" ; ethtool -l $intf ; \
done
echo -e "\n\n Network IP Interfaces "
echo -e "-----------------------------------------------"
echo -e "\n\n#$ ip link show \n"
ip link show
echo -e "\n\n#$ tcpdump -D \n"
tcpdump -D
echo -e "\n\n#$ netstat -ia \n"
netstat -ia
echo -e "\n\n#$ ip a \n"
ip a
echo -e "\n\n#$ ip route \n"
ip route
echo -e "\n\n#$ ip route show table all \n"
ip route show table all
echo -e "\n\n#$ route -n \n"
route -n
echo -e "\n\n Network IP Rules "
echo -e "-----------------------------------------------"
echo -e "\n\n#$ iptables -L -v \n"
iptables -L -v
echo -e "\n\n Network Sockets "
echo -e "-----------------------------------------------"
echo -e "\n\n #$ ss -s \n"
ss -s
echo -e "\n\n#$ lsof -i \n"
lsof -i
echo -e "\n\n#$ ss -t -a # all tcp sockets \n"
ss -t -ap # all tcp sockets
echo -e "\n\n #$ ss -u -a # all udp sockets \n"
ss -u -ap # all udp sockets
echo -e "\n\n #$ ss -w -a # all raw sockets \n"
ss -w -ap # all raw sockets
echo -e "\n\n #$ ss -x -a # all unix sockets \n"
ss -x -ap # all unix sockets
}
show_scheduler()
{
echo -e "\n\n Scheduler Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n chkconfig \n"
chkconfig
echo -e "\n\n service --status-all \n"
service --status-all
}
show_loader_libs()
{
echo -e "\n\n Loader Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n ldd --version \n"
ldd --version
echo -e "\n\n ldconfig paths \n"
ldconfig -Nv 2> /dev/null | grep -v ^$'\t' | sed 's/\:.*/ /'
echo -e "\n\n#$ ldconfig -p : from cache \n"
ldconfig -p
}
show_time_sync()
{
echo -e "\n\n Time Sync Info"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
ntpstat
}
show_device_nodes()
{
echo -e "\n\n All udev Device Nodes"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n Block Devices "
echo -e "-----------------------------------------------"
# block devices
echo -e "\n\n find /dev -type b | xargs -I{} sh -c 'echo -e "\n\n [ {} ]\n"; udevadm info --query=all --name={} ' \n"
find /dev -type b | xargs -I{} sh -c 'echo -e "\n\n [ {} ]\n"; udevadm info --query=all --name={} '
# character devices
echo -e "\n\n Character Devices "
echo -e "-----------------------------------------------"
echo -e "\n\n find /dev -type c | xargs -I{} sh -c 'echo -e "\n\n [ {} ]\n"; udevadm info --query=all --name={} ' \n"
find /dev -type c | xargs -I{} sh -c 'echo -e "\n\n [ {} ]\n"; udevadm info --query=all --name={} '
echo -e "\n\n All Devices "
echo -e "-----------------------------------------------"
# links
echo -e "\n\n find /sys -type l | xargs -I{} sh -c 'echo -e "\n\n [ {} ]\n"; udevadm info --query=all --name={} ' \n"
find /sys -type l | xargs -I{} sh -c 'echo -e "\n\n [ {} ]\n"; udevadm info --query=all --name={} '
echo -e "\n\n find /dev -type l | xargs -I{} sh -c 'echo -e "\n\n [ {} ]\n"; udevadm info --query=all --name={} ' \n"
find /dev -type l | xargs -I{} sh -c 'echo -e "\n\n [ {} ]\n"; udevadm info --query=all --name={} '
}
show_statistics()
{
nstat
netstat -i
ip -s link
# Socket Level statistics
netstat -s
ss -s
}
show_perf_counters()
{
# top
top -n 1 -b
# top respective cpu
top -c 1 -n 1 -b
echo -e "\n\n CPU Performance Counters"
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "\n\n perf list "
echo -e "-----------------------------------------------"
echo -e "\n\n perf list ' \n"
perf list
echo -e "\n\n Cpu performance counters "
echo -e "-----------------------------------------------"
perf stat -a sleep 10
}
show_host
show_virtual
show_docker
show_process
show_kernel
show_cpu
show_memory
show_pci_devices
show_storage
show_network
show_scheduler
show_time_sync
show_perf_counters
show_loader_libs
show_device_nodes
| true
|
23c9405dd94c0bc4dc9f5e7605ac1e4ebf24f1a3
|
Shell
|
hofmannedv/training-shell
|
/io/block.sh
|
UTF-8
| 481
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# -----------------------------------------------------------
# input data for a code block
#
# (C) 2017 Frank Hofmann, Berlin, Germany
# Released under GNU Public License (GPL)
# email frank.hofmann@efho.de
# -----------------------------------------------------------
# define data file
dataFile=/etc/fstab
{
read line1 # read first line of the data file
read line2 # read second line of the data file
} < $dataFile
# output data
echo "line 1: $line1"
echo "line 2: $line2"
exit 0
| true
|
7251b2ff92a85391c9f9489410594268bf219f11
|
Shell
|
torgaja/openclopenbenchmark
|
/utils/splitter
|
UTF-8
| 4,604
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -e $1 ]; then
true;
else
echo "Cannot open file: $1";
exit 1;
fi
tmpfile="splitter_$$.tmp"
cat $1 > $tmpfile
if grep "//Gold" $tmpfile > /dev/null; then
num=`grep -m 1 -b "//Gold" $tmpfile | cut -d ':' -f 1`
else
echo "Error: cannot find \"//Gold\" marker inside input file"
rm $tmpfile
exit 1
fi
#echo $num
num2=$((`cat $tmpfile | wc -c` - $num));
if grep -q "kernel void " $tmpfile; then
header=`grep "kernel void " $tmpfile | sed -e "s/\]/ /g;s/(/ ( /g;s/)/ ) /g;s/*/ /g;s/,/ , /g;s/ / /g;s/ / /g;s/ / /g;s/ / /g;s/ / /g;s/ / /g;s/ / /g;s/ / /g;s/ / /g;s/ / /g"`
else
echo "Error: cannot find \"kernel void\" marker inside input file"
rm $tmpfile
exit 1
fi
name=`echo $header | cut -d ' ' -f 3`
#echo $name
resname="${2}${name}_wrap.cpp"
for i in $header; do
if [ $i == "," ]; then
args="$args $prev";
fi
if [ $i == ")" ]; then
args="$args $prev";
fi
prev=$i
done
#echo $args
#echo $header
#echo $num2
echo "#include<ocl_wrapper.h>" > $resname
echo "static const char* ocl_src = " >> $resname
cat $tmpfile | head -c $num | sed -e 's/"/\\"/g;s/\\/\\\\/g;s/^/"/g;s/$/\\n"/g' >> $resname
echo ";" >> $resname
cat $tmpfile | tail -c $num2 >> $resname
echo >> $resname
echo "void run_ocl_$name(ocl_test& test) {" >> $resname
echo "cl_int ciErrNum = 0;" >> $resname
if grep -q "MAX_GLOBAL_SIZE:" $tmpfile; then
sizenum=`grep "MAX_GLOBAL_SIZE:" $tmpfile | cut -d ':' -f 2`
echo "test.max_global_size[\"$name\"][0] = $sizenum;" >> $resname
echo "test.max_global_size[\"$name\"][1] = 1;" >> $resname
echo "test.max_global_size[\"$name\"][2] = 1;" >> $resname
fi
for i in 0 1 2; do
if grep -q "MAX_GLOBAL_SIZE($i)" $tmpfile; then
sizenum=`grep "MAX_GLOBAL_SIZE($i)" $tmpfile | cut -d ':' -f 2`
echo "test.max_global_size[\"$name\"][$i] = $sizenum;" >> $resname
fi
done
if grep -q "MAX_LOCAL_SIZE:" $tmpfile; then
sizenum=`grep "MAX_LOCAL_SIZE:" $tmpfile | cut -d ':' -f 2`
echo "test.max_local_size[\"$name\"][0] = $sizenum;" >> $resname
echo "test.max_local_size[\"$name\"][1] = 1;" >> $resname
echo "test.max_local_size[\"$name\"][2] = 1;" >> $resname
fi
if grep -q "MIN_LOCAL_SIZE:" $tmpfile; then
sizenum=`grep "MIN_LOCAL_SIZE:" $tmpfile | cut -d ':' -f 2`
echo "test.min_local_size[\"$name\"][0] = $sizenum;" >> $resname
echo "test.min_local_size[\"$name\"][1] = 1;" >> $resname
echo "test.min_local_size[\"$name\"][2] = 1;" >> $resname
fi
for i in 0 1 2; do
if grep -q "MAX_LOCAL_SIZE($i)" $tmpfile; then
sizenum=`grep "MAX_LOCAL_SIZE($i)" $tmpfile | cut -d ':' -f 2`
echo "test.max_local_size[\"$name\"][$i] = $sizenum;" >> $resname
fi
done
for i in 0 1 2; do
if grep -q "MIN_LOCAL_SIZE($i)" $tmpfile; then
sizenum=`grep "MIN_LOCAL_SIZE($i)" $tmpfile | cut -d ':' -f 2`
echo "test.min_local_size[\"$name\"][$i] = $sizenum;" >> $resname
fi
done
if ! grep -q "MAX_GLOBAL_SIZE" $tmpfile; then
echo "test.max_global_size[\"$name\"][0] = 0;" >> $resname
echo "test.max_global_size[\"$name\"][1] = 1;" >> $resname
echo "test.max_global_size[\"$name\"][2] = 1;" >> $resname
fi
if ! grep -q "MAX_LOCAL_SIZE" $tmpfile; then
echo "test.max_local_size[\"$name\"][0] = 0;" >> $resname
echo "test.max_local_size[\"$name\"][1] = 1;" >> $resname
echo "test.max_local_size[\"$name\"][2] = 1;" >> $resname
fi
if grep -q "KERNEL_FLAG:" $tmpfile; then
kflag=`grep "KERNEL_FLAG:" $tmpfile | cut -d ':' -f 2`
echo "test.kernel_flags[\"$name\"] |= ${kflag};" >> $resname
fi
echo "cl_program program = test.ocl_load_src(ocl_src);" >> $resname
echo "cl_kernel kernel = clCreateKernel(program, \"$name\", &ciErrNum);" >> $resname
echo "test.geterr(ciErrNum, __LINE__, \"$name\");" >> $resname
echo "int parnum = 0;" >> $resname
if grep -q "host_init_${name}" $tmpfile; then
echo "host_init_${name}(test);" >> $resname
fi
for i in $args; do
echo "clSetKernelArg(kernel, parnum++, sizeof(test.dev_$i), &test.dev_$i);" >> $resname
done
echo "test.launch_kernel(kernel, \"$name\");" >> $resname
echo "clReleaseKernel(kernel);" >> $resname
echo "clReleaseProgram(program);" >> $resname
echo "test.free_dyn_memory();" >> $resname
echo "}" >> $resname
echo >> $resname
echo "void run_gold_$name(ocl_test& test) {" >> $resname
run_gold="(";
for i in $args; do
if [ $run_gold == "(" ]; then
run_gold="${run_gold}test.host_${i}"
else
run_gold="${run_gold},test.host_${i}"
fi
done
echo "${name}${run_gold});" >> $resname
echo "}">> $resname
rm $tmpfile
| true
|
333dadb9a55e66a4f9e994e717eb034c58ab9d64
|
Shell
|
Hasinur2020/CodingClub
|
/day8/checkDictionary.sh
|
UTF-8
| 973
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash -x
declare -A face_value
read -p "Enter how many times you want to roll a dice: " rollDiceTimes
isRoll=0;
#roll=$(( RANDOM%6+1 ))
FACE_ONE=1
FACE_TWO=2
FACE_THREE=3
FACE_FOUR=4
FAE_FIVE=5
FACE_SIX=6
while [[ isRoll -lt rollDiceTimes ]]
do
roll=$(( RANDOM%6+1 ))
case $roll in
$FACE_ONE)
face="One"
;;
$FACE_TWO)
face="Two"
;;
$FACE_THREE)
face="Three"
;;
$FACE_FOUR)
face="Four"
;;
$FACE_FIVE)
face="Five"
;;
$FACE_SIX)
face="Six"
;;
*)
face="Unpredictable situation"
esac
face_value[$face]=$roll
echo $face
isRoll=$(( $isRoll+1 ))
done
echo "${face_value[@]}"
# Write a program in the following steps
# a. Roll a die and find the number between 1 to 6
# b. Repeat the Die roll and find the result each time
# c. Store the result in a dictionary
# d. Repeat till any one of the number has reached 10 times
# e. Find the number that reached maximum times and the one that was for minimum times
| true
|
bad20f7a2e075638c556c9f5515bd75bb4648dfb
|
Shell
|
mlaky88/BinaryReal
|
/bin-real-mergeConvergePlots.sh
|
UTF-8
| 1,450
| 3.203125
| 3
|
[] |
no_license
|
if [ $# -lt 2 ]; then
echo "EXAMPLE: ./bin-real-mergeConvergePlots.sh [dataset] [type]"
exit 1
fi
dataset=$1
type=$2
#tu lahko delam samo za 1 cls, ker na obj nima vpliva izbira klasifickatorja
cls="knn"
path="drawData"
touch drawobj.dat drawfeats.dat
#converge-feats-abc-bin-adult-svm-processed.dat
for coding in "real" "bin"; do
for alg in "de" "pso" "abc" "ga"; do
if [ ! -f "$path/converge-obj-$alg-$coding-$dataset-$cls-$type-processed.dat" ]; then
echo "Results for algorithm $alg is missing"
echo "Missing file: converge-obj-$alg-$coding-$dataset-$cls-$type-processed.dat"
echo "Running ./prepareConvergeGraphs with appropriate settings"
./prepareConvergeGraphs.sh $alg $dataset $coding $type
fi
paste drawobj.dat "$path/converge-obj-$alg-$coding-$dataset-$cls-$type-processed.dat" > tmp1.dat && mv tmp1.dat drawobj.dat
paste drawfeats.dat "$path/converge-feats-$alg-$coding-$dataset-$cls-$type-processed.dat" > tmp1.dat && mv tmp1.dat drawfeats.dat
done
done
coding="dummy"
cat drawobj.dat | sed 's/\t/,/g' | sed 's/^,//g' > tmp1.dat && mv tmp1.dat drawobj.dat
cat drawfeats.dat | sed 's/\t/,/g' | sed 's/^,//g' > tmp1.dat && mv tmp1.dat drawfeats.dat
python plotConvergeGraphs.py drawobj.dat obj $dataset $coding $type
python plotConvergeGraphs.py drawfeats.dat feats $dataset $coding $type
rm drawobj.dat drawfeats.dat
| true
|
b08891701ca365ebaa290dc8bfd03854f3dafe4d
|
Shell
|
paryl-light/puppykit
|
/system/update.sh
|
UTF-8
| 828
| 2.921875
| 3
|
[
"CC-BY-NC-SA-4.0",
"CC-BY-SA-4.0",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Make sure we're on master, with the latest changes, and all local changes have been discarded.
git reset --hard HEAD
git checkout master
git pull
# Sync our nginx config
cp system/puppykit.org.uk /etc/nginx/sites-available/puppykit.org.uk
test -f /etc/nginx/sites-enabled/puppykit.org.uk || ln -s /etc/nginx/sites-available/puppykit.org.uk /etc/nginx/sites-enabled/puppykit.org.uk
# Sync our systemd service
cp system/puppykit.service /etc/systemd/system/docker.puppykit.service
systemctl daemon-reload
# Stop and delete the old version of the site container
systemctl stop docker.puppykit
docker rm puppykit
# Build the new container and launch it
docker build -t puppykit .
docker run -p 80:80 --name puppykit -d -e WEB_CONCURRENCY="2" puppykit
docker stop puppykit
systemctl start docker.puppykit
| true
|
30392b74fee58cdd8b1ce01a2a8bdcbda668ef54
|
Shell
|
cybertec-postgresql/layman
|
/entrypoint.sh
|
UTF-8
| 658
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
YELLOW="\033[0;33m"
COLOR_OFF="\033[0m"
PATH_TO_BASE="/layman/"
BASE_DIRECTORY="lower/"
function warning {
echo -e "${YELLOW}${1}${COLOR_OFF}"
}
cd "${PATH_TO_BASE}"
mkdir -p lower upper work merged
cd "${BASE_DIRECTORY}"
lowerdir=""
while read line; do
if [[ "${line}" =~ ^[0-9]*$ ]]; then
lowerdir+="${PATH_TO_BASE}${BASE_DIRECTORY}${line}:"
else
warning "${line} does not match directory pattern"
fi
done <<< $(ls -d */ | cut -f1 -d '/' | sort --version-sort)
/usr/bin/fuse-overlayfs \
-o lowerdir="${lowerdir::-1}",upperdir=/layman/upper,workdir=/layman/work \
/layman/merged
while [ 1 ]; do
sleep 0.5
done
| true
|
a1e57a9708b0d5f4088a81d8a8e3953e749cd168
|
Shell
|
easement/dotfiles
|
/bin/git-done
|
UTF-8
| 439
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
set -e
successfully() {
$* || (echo "failed" 1>&2 && exit 1)
}
branch=`git current`
successfully git fetch
successfully git rebase -i origin/master
git diff master --check || successfully git diff master --check
successfully git push -f
successfully git checkout master
successfully git merge $branch --ff-only
successfully git push
successfully git push origin --delete $branch
successfully git branch -d $branch
| true
|
9b3bcf7358dcb987540354c4e710cc16c02dedac
|
Shell
|
ColquhounAudio/AxiomBuild
|
/step_1_multistrap.sh
|
UTF-8
| 290
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
BUILD="arm"
ARCH="armhf"
CONF="multistrap-axiomair-pi3.conf"
if [ -f ./Volumio.sqsh ];
then
mv ./Volumio.sqsh ./Volumio.sqsh.old
fi
mkdir -p "build/$BUILD"
mkdir -p "build/$BUILD/root"
multistrap -a "$ARCH" -f "$CONF"
cp /usr/bin/qemu-arm-static "build/$BUILD/root/usr/bin/"
| true
|
7dec974bd598c6c856b96c93a7cfa4f62ae5e519
|
Shell
|
munkm/thesiscode
|
/submission_scripts/runhybrid.pbs
|
UTF-8
| 719
| 2.953125
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/sh
# Problem: maze2
# PID: 2017-2-14
#
#PBS -N cad_demo
#PBS -o cadis.pbs.out
#PBS -j oe
#PBS -M munkm@ornl.gov
#PBS -l walltime=4:00:00
#PBS -l nodes=1:ppn=16
LAUNCHER=/opt/openmpi-1.6.5-gcc/bin/mpiexec
LAUNCHER_ARGS=""
ADVANTG=/home/m15/install/advantg/bin/advantg
INPUTS=( maze1 maze2 maze1_up maze2_up prob_1 prob_2 prob_4 therapy beam ueki20 ueki35 )
cd "/home/m15/munk_analysis/demonstration/cadis"
for INPUT in "${INPUTS[@]}"
do
cd "./${INPUT}/"
echo "Beginning PBS execution at $(date) for ${INPUT} in $(pwd)"
echo ">>> PBS nodes: ${PBS_NUM_NODES}"
echo ">>> PBS cores per node: ${PBS_NUM_PPN}"
"${ADVANTG}" "${INPUT}.py"
echo ">>> Finished PBS execution for ${INPUT} at $(date)"
cd "./../"
done
| true
|
9895e81ae8cbcd51c2c77b5096c5680c63ecf97a
|
Shell
|
durantschoon/dot_files
|
/.aliases
|
UTF-8
| 7,713
| 3.34375
| 3
|
[] |
no_license
|
# -*- mode: sh; -*-
# NOTES:
#
# Idea to check a file by Paul Merrill (July 10, 2015)
#
# if on home computer,
# echo See ~/.aliases for the use of this file >> ~/.HOME
#
# if on work computer,
# echo See ~/.aliases for the use of this file >> ~/.WORK
###############################################################################
# Bash Aliases
###############################################################################
alias a='alias'
alias ag='alias | grep'
alias c2n='tr ":" "\012"'
alias cx='chmod +x'
alias dm='docker-machine'
alias ds='dirs -v'
alias eg=egrep
alias g=grep
alias gv='grep -v'
alias git_email='echo Your git email is `git config user.email`'
alias git_id_help='git_email; echo Check your git_id aliases for more info which are set in .private_aliases.'
alias h=history
alias hg='history | grep'
alias ipy=ipython
alias ipn='ipython notebook'
alias ll='ls -l'
alias m='less'
alias md='mkdir -p'
alias ms='make shell'
alias mt='make test'
alias path='echo $PATH | c2n'
alias pp='popd; ds'
alias port_scan='lsof -nP +c 15 | { head -1; grep LISTEN; } ' # mac only?
alias py="echo 'Using ipython instead, duh' ; ipython"
alias pylint="pylint --output-format=colorized"
alias s.a='source ~/.aliases'
alias s.b='source ~/.bashrc'
alias s2n='tr " " "\012"'
alias spew='wget -qO-' # URL to spew the contents to stdout
alias up='cd ..'
alias up2='cd ../..'
alias up3='cd ../../..'
alias up4='cd ../../../..'
alias wh=which
alias wo=workon
alias ,=pd
alias ,.=pp
# -----------------------------------------------------------------------------
# OS Specific Aliases
# -----------------------------------------------------------------------------
if [[ "$(uname)" == "Darwin" ]]; then
alias oemacs='open -a /Applications/Emacs.app'
alias ssh_fingerprint='ssh-keygen -E SHA256 -lf'
# conda and pypy
# conda create -c conda-forge -n my-pypy-env pypy python=3.9
alias pypy='conda activate my-pypy-env'
elif [[ "$(expr substr $(uname -s) 1 5)" == "Linux" ]]; then
# Do something under Linux platform
alias ssh_fingerprint='ssh-keygen -E md5 -l'
#elif [[ "$(expr substr $(uname -s) 1 10)" == "MINGW32_NT" ]]; then
# Do something under Windows NT platform
# here's a funky way to tell if I'm on windows WSL
if [[ $(whoami) == root ]]; then
alias llhome='ll -L | egrep -v '\''(NTUSER|ntuser)'\'
alias llhomea='ll -aL | egrep -v '\''(NTUSER|ntuser)'\'
alias cdhome='cd ~/wsl_user; ll -L | egrep -v '\''(NTUSER|ntuser)'\' # ~/wsl_user is a link to my user dir
fi
fi
# -----------------------------------------------------------------------------
# Private aliases
# -----------------------------------------------------------------------------
if [ -e ~/.private_aliases ]; then
alias s.a='source ~/.aliases; source ~/.private_aliases'
source ~/.private_aliases
fi
# -----------------------------------------------------------------------------
# Aliases at home
# -----------------------------------------------------------------------------
if [ -e ~/.HOME ]; then
alias proj='cd ~/Programming/PROJECTS'
alias dox='cd ~/Documents'
alias blog='cd ~/Documents/Blog'
fi
# -----------------------------------------------------------------------------
# Aliases for work (private repo to protect employer)
# -----------------------------------------------------------------------------
[ -f ~/dot_aliases/dot_aliases ] && source ~/dot_aliases/dot_aliases # bitbucket
###############################################################################
# Zsh Aliases
###############################################################################
if [ -n "$ZSH_VERSION" ]; then
# assume Zsh
alias ctags='=ctags -eR'
alias funcs='print -l ${(ok)functions}' # list all functions
alias s.z='source ~/.zshrc'
alias s.ze='source ~/.zshenv'
alias s.zp='source ~/.zprofile'
alias s.zall="s.ze && echo '~/.zshenv'; [ -f ~/.zprofile ] && s.zp && echo '~/.zprofile'; s.z && echo '~/.zshrc'"
alias lth='ls -lt | head; latest=`ls *(.om[1])`' # . regular file, o sorted, m mod time, 1 first
alias lthd='ls -lt | head; latest=`ls -d *(/om[1])`' # / directory, o sorted, m mod time, 1 first
# z move?
# zmv -W '*.lis' '*.txt'
alias zmvw='zmv -W'
# try these
alias -g latestd='*(/om[1])' # newest directory
alias -g latestf='*(.om[1])' # newest file
alias llrm='echo "Paste ls -lt output for files to delete and ^D when done"; /bin/rm -rf `awk '\''{print $9}'\''`'
ee () { echo $* ; $* }
rm() { [[ "$(uname)" == "Darwin" ]] && (mv $* ~/.Trash) || builtin rm $* }
function rg () {
[[ -d .git/ ]] && echo 'skipping .git/ dir ...';
=rg --hidden $1 -g '!.git/*' $*
}
function rg_with_git () { =rg --hidden $* }
function psg () {
ps -ef | grep "$@" | grep -v grep
}
function mdc () {
mkdir -p $1
cd $1
}
# alias pd='pushd; ds'
function pd () {
pushd $1
ds
}
function p () {
ls -d $PWD/$1
}
function pdfsearch() {
pdfgrep -nir "$@" .
}
# -----
# spacemacs
function switch_spacemacs() {
local EMACS_D_LINK=$HOME/.emacs.d
local EMACS_ORIG_D=$HOME/.emacs.d_ORIG_EMACS
local SPACEMACS_D=$HOME/.emacs.d_SPACEMACS
if [[ -d $EMACS_ORIG_D && -d $SPACEMACS_D && -h $EMACS_D_LINK ]]; then
if [[ "$(readlink $EMACS_D_LINK)" == "$EMACS_ORIG_D" ]]; then
=rm $EMACS_D_LINK
ln -s $SPACEMACS_D $EMACS_D_LINK
echo "Switched to spacemacs"
elif [[ "$(readlink $EMACS_D_LINK)" == "$SPACEMACS_D" ]]; then
=rm $EMACS_D_LINK
ln -s $EMACS_ORIG_D $EMACS_D_LINK
echo "Switched to original emacs"
fi
else
echo "Not configured to test spacemacs"
fi
}
# -----
# $1 = type; 0 - both, 1 - tab, 2 - title
# rest = text
setTerminalText () {
# echo works in bash & zsh
local mode=$1 ; shift
echo -ne "\033]$mode;$@\007"
}
stt_both () { setTerminalText 0 $@; }
stt_tab () { setTerminalText 1 $@; }
stt_title () { setTerminalText 2 $@; }
# mac
###############################################################################
# global aliases
# example: C
# from: http://grml.org/zsh/zsh-lovers.html
# $latest alias -g C='| wc -l'
# $ grep alias ~/.zsh/* C
# 443
alias -g ...='../..'
alias -g ....='../../..'
alias -g .....='../../../..'
alias -g CA="2>&1 | cat -A"
alias -g C='| wc -l'
alias -g D="DISPLAY=:0.0"
alias -g DN=/dev/null
alias -g ED="export DISPLAY=:0.0"
alias -g EG='|& egrep'
alias -g EH='|& head'
alias -g EL='|& less'
alias -g ELS='|& less -S'
alias -g ETL='|& tail -20'
alias -g ET='|& tail'
alias -g F=' | fmt -'
alias -g G='| egrep'
alias -g H='| head'
alias -g HL='|& head -20'
alias -g Sk="*~(*.bz2|*.gz|*.tgz|*.zip|*.z)"
alias -g LL="2>&1 | less"
alias -g L="| less"
alias -g LS='| less -S'
alias -g MM='| most'
alias -g M='| more'
alias -g NE="2> /dev/null"
alias -g NS='| sort -n'
alias -g NUL="> /dev/null 2>&1"
alias -g PIPE='|'
alias -g R=' > /c/aaa/tee.txt '
alias -g RNS='| sort -nr'
alias -g S='| sort'
alias -g TL='| tail -20'
alias -g T='| tail'
alias -g US='| sort -u'
alias -g VM=/var/log/messages
alias -g X0G='| xargs -0 egrep'
alias -g X0='| xargs -0'
alias -g XG='| xargs egrep'
alias -g X='| xargs'
# elif [ -n "$BASH_VERSION" ]; then
# assume Bash
fi
| true
|
3173af81bf8c8eaa1636397b4c950b400c61eac9
|
Shell
|
bofhbug/pxe-server
|
/start.sh
|
UTF-8
| 830
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [ $? -ne 0 ]; then
echo "ERROR: Unable to build container"
exit 1
fi
sudo service nfs-kernel-server stop
sudo service rpcbind stop
docker run --rm --name pxeserver --privileged \
-p 111:111/tcp -p 2049:2049/tcp -p 8000:8000/tcp \
-p 627:627/tcp -p 627:627/udp -p 875:875/tcp -p 875:875/udp \
-p 892:892/tcp -p 892:892/udp -p 111:111/udp -p 2049:2049/udp \
-p 10053:10053/udp -p 10053:10053/tcp \
-p 32769:32769/tcp -p 32769:32769/udp \
-p 32765:32765/tcp -p 32765:32765/udp \
-p 32766:32766/tcp -p 32766:32766/udp \
-p 32767:32767/tcp -p 32767:32767/udp \
-v ${PWD}/netboot:/srv/http \
-v ${PWD}/debian/debian-stable:/srv/nfs/debian \
-v ${PWD}/voyage:/srv/nfs/voyage \
-v ${PWD}/xen:/srv/nfs/xen \
-t -i 3mdeb/pxe-server /bin/bash -c \
"bash /usr/local/bin/run.sh"
| true
|
c4c568d508f3c1e8c99e6e37694ee917a79859f0
|
Shell
|
AshishGL/jms
|
/wmq-installer/functions.sh
|
UTF-8
| 4,363
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
# Shell functions for unix. This file is intended to be sourced by other scripts.
# Courtesy of the IBM Hursley Lab.
ECHON=${ECHON-echo}
me=$(basename $0)
##############################################################################
# regexify
##############################################################################
# Convert a string to a regular expression that matches only the given string.
#
# Parameters
# 1 - The string to convert
##############################################################################
regexify() {
# Won't work on HPUX
echo $1 | sed -r 's/([][\.\-\+\$\^\\\?\*\{\}\(\)\:])/\\\1/g'
}
###############################################
# delAndAppend
###############################################
# Delete a line containing a REGEX from a file,
# then append a new line.
#
# Parameters
# 1 - The REGEX to search for and delete
# 2 - The line to append
# 3 - The file to edit
###############################################
delAndAppend() {
echo "Updating entry in $3: $2"
awk '{if($0!~/^[[:space:]]*'$1'/) print $0}' $3 > $3.new
mv $3.new $3
echo "$2" >> $3
}
###################################################
# backupFile
###################################################
# Copy the given file to a file with the same path,
# but a timestamp appended to the name.
#
# Parameters
# 1 - The name of the file to backup
###################################################
backupFile() {
cp $1 $1.`date +%Y-%m-%d.%H%M`
}
#############################################
# updateSysctl
# Update values in the /etc/sysctl.conf file.
#
# Parameters
# 1 - The value to update
# 2 - The new value
#############################################
updateSysctl() {
delAndAppend `regexify $1` "$1 = $2" /etc/sysctl.conf
}
#############################################
# UpdateSysctl
# Update values in the /etc/sysctl.conf file.
#
# Parameters
# none
#############################################
UpdateSysctl() {
# First we need to make a backup of existing kernel settings before we change anything
backupFile /etc/sysctl.conf
# Now can set some new values
# System tuning (sysctl)
echo "" >> /etc/sysctl.conf
echo "# The following values were changed by $me [`date`]." >> /etc/sysctl.conf
# The maximum number of file-handles that can be held concurrently
updateSysctl fs.file-max $FDMAX
# The maximum and minimum port numbers that can be allocated to outgoing connections
updateSysctl net.ipv4.ip_local_port_range '1024 65535'
# From what I can gather, this is the maximum number of disjoint (non-contiguous),
# sections of memory a single process can hold (i.e. through calls to malloc).
# This doesn't mean that a process can have no more variables than this,
# but performance may become degraded if this the number of variables exceeds this value,
# as the OS has to search for memory space adjacent to an existing malloc.
# This is my best interpretation of stuff written on the internet; it could be completely wrong!
# - Rowan (10/10/2013)
updateSysctl vm.max_map_count 1966080
# The maximum PID value. When the PID counter exceeds this, it wraps back to zero.
updateSysctl kernel.pid_max 4194303
# Tunes IPC semaphores. Values are:
# 1 - The maximum number of semaphores per set
# 2 - The system-wide maximum number of semaphores
# 3 - The maximum number of operations that may be specified in a call to semop(2)
# 4 - The system-wide maximum number of semaphore identifiers
updateSysctl kernel.sem '1000 1024000 500 8192'
# The maximum size (in bytes) of an IPC message queue
updateSysctl kernel.msgmnb 131072
# The maximum size (in bytes) of a single message on an IPC message queue
updateSysctl kernel.msgmax 131072
# The maximum number of IPC message queues
updateSysctl kernel.msgmni 2048
# The maximum number of shared memory segments that can be created
updateSysctl kernel.shmmni 8192
# The maximum number of pages of shared memory
updateSysctl kernel.shmall 536870912
# The maximum size of a single shared memory segment
updateSysctl kernel.shmmax 137438953472
# TCP keep alive setting
updateSysctl net.ipv4.tcp_keepalive_time 300
echo "" >> /etc/sysctl.conf
# there is a bug in RHEL where bridge settings are set by default and they should not be, so we have to pass '-e' option here
# read more: https://bugzilla.redhat.com/show_bug.cgi?id=639821
sysctl -e -p
}
| true
|
f31ad960ffae6e3d1d24d5431770b08b311ea1f2
|
Shell
|
didactic-drunk/sodium.cr
|
/build/pkg-libs.sh
|
UTF-8
| 270
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Shard directory passed as first argument when called from lib_sodium.cr
[ ! -z "$1" ] && cd "$1"
./build/libsodium_install.sh > libsodium_install.out 2>&1 || (cat libsodium_install.out >&2 ; exit 2)
. ./build/env.sh
pkg-config libsodium --libs
| true
|
bf0394dd38a77c3671d0fbac59deca6edaa2bede
|
Shell
|
ninoc/JWSTUserTraining2016
|
/install_pandeia
|
UTF-8
| 843
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# specify path to USB drive or directory comtaining its contents
data_path=$1
data_path=${data_path##* }
if [[ -z "${data_path// }" ]]; then
echo "No path specified."
else
tarball="$data_path/pandeia.tgz"
cdbs="$data_path/cdbs.tgz"
if [ -f $tarball ] && [ -f $cdbs ]; then
echo "Unpacking $tarball in current directory..."
tar xvvzf $tarball
echo "Unpacking $cdbs in current directory..."
tar xvvzf $cdbs
export pandeia_refdata=`pwd`/pandeia_data
export PYSYN_CDBS=`pwd`/cdbs.23.1.rc3
cd pandeia/engine
python setup.py install
cd -
echo "Done."
echo "pandeia_refdata set to $pandeia_refdata."
echo "PYSYN_CDBS set to $PYSYN_CDBS."
else
echo "Pandeia or CDBS data is missing from $data_path."
fi
fi
| true
|
91c765a341a32416b51acf9b298ce4cd735c6426
|
Shell
|
Arau/arau.github.io
|
/build/remove-aliases.sh
|
UTF-8
| 1,438
| 4.40625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Check input argument supplied
if [ -z "$1" ]; then
echo "Remove aliases from metadata section of files in previous docs release"
echo "Usage: $(basename $0) release-dir"
echo "Example:"
echo -e "\t $(basename $0) v2.4"
exit 1
fi
bin_dir="$(dirname "$0")"
previous_release="$bin_dir/../content/$1"
# List all files in the input path containing the pattern: "aliases: "
files_with_aliases=( $(grep -R -l "aliases:" "$previous_release") )
for source_file in ${files_with_aliases[@]}; do
echo "Applying changes for $source_file"
# 1. Select the lines in between 2 patterns and apply a command to them
# * pattern 1: a line that starts with the word "aliases"
# * pattern 2: anything that starts with a letter or starts with "---"
# * command: Delete the lines in between the patterns (not including the matching lines themselves)
#
# ...The second command takes the output of the first command as input:
#
# 2. Select the lines in between 2 patterns and apply a command to those
# * pattern 1: a line that starts with "---"
# * pattern 2: a line that starts with "---"
# * command: delete the line that starts with the word "aliases"
# These 2 commands together ensure changes are only made to the metadata section of the markdown files.
sed -i '/^aliases/,/^[a-zA-Z]\|^---/{//!d}; /^---/,/^---/{ /^aliases:/d };' "$source_file"
done
| true
|
ea68aa71feaa4c651d063513d682377480ea9fde
|
Shell
|
boywert/SussexBigRun2013
|
/L-Galaxies_Hen12/batch_apollo_nore.pbs
|
UTF-8
| 2,100
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Tell SGE that we are using the bash shell
#$ -S /bin/bash
# Example file to create a task-farm of identical jobs on Apollo
# Do not lmit the stacksize (the maximum memory a job can use)
#ulimit -s unlimited
# Do not limit the number of open files a job can have
#ulimit -n unlimited
# Run the job from the following directory
#cd /mnt/lustre/scratch/petert/L-Galaxies/L-Galaxies
# Created files will have the fs-virgo group
# This feature seems to be disabled on Apollo, so this does not work
#newgrp fs-virgo
# Created files will have rw permission for the group and r for the world
umask 002
# Set pathnames below relative to the current working directory
#$ -cwd
# Say which queue you want to submit to
#$ -q mps_amd.q
# Define a task farm of jobs
#$ -t 1-216
# Limit to 20 concurrent jobs
#$ -tc 20
# Join standard error to standard out
#$ -j y
# Give the job a name
#$ -N Lgalaxy_nore
# Name and location of the output file
# SGE will only substitute certain variables here
#$ -o logs/$JOB_NAME_$TASK_ID.log
# The parentheses here allow one to do algebra with shell variables
i=$(($SGE_TASK_ID -1))
echo Running on file $i
# Run jobs
inputfolder="input_cubep3m/"
mkdir -p $inputfolder
template="input/input_template"
exec=./L-Galaxies
OutputDir=/mnt/lustre/scratch/cs390/AHF_halos/cubepm_130315_6_1728_47Mpc_ext2/outputs_nore/
SimulationDir=/mnt/lustre/scratch/cs390/AHF_halos/cubepm_130315_6_1728_47Mpc_ext2/
ReionizationOn=0
lastsnap=75
mkdir -p $OutputDir
mkdir -p $inputfolder
filename="${inputfolder}/input_nr_${i}"
echo "FirstFile" $i > $filename
echo "LastFile" $i >> $filename
echo "OutputDir" $OutputDir >> $filename
echo "SimulationDir" $SimulationDir >> $filename
echo "FileWithZList" "${SimulationDir}treedata/lgal_zlist.txt" >> $filename
echo "FileWithZList_OriginalCosm" "${SimulationDir}treedata/lgal_zlist.txt" >> $filename
echo "LastDarkMatterSnapShot" $lastsnap >> $filename
echo "FileWithOutputRedshifts" "${SimulationDir}treedata/zlist.txt" >> $filename
echo "ReionizationOn" $ReionizationOn >> $filename
cat $template >> $filename
$exec $filename
| true
|
03f31cf35b32a2386482e036e483d1e8984ab48a
|
Shell
|
DesaiLab/Ameriflux
|
/metobs_scripts/lcclean.bash
|
UTF-8
| 222
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
DATE=201406
echo $DATE
WORKINGDIR=/mnt/data/ingest/LostCreek
echo $WORKINGDIR
cd $WORKINGDIR
pwd
for fn in $(tar -tvf LostCreek_${DATE}*.gz | awk '{print $6}'); do
echo "removing $fn"
rm -f $fn
done
| true
|
7b261f945eade1b5b8d79059fca5528b2d197202
|
Shell
|
as3mbus/dotfiles
|
/scripts/battery.sh
|
UTF-8
| 172
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
Battery()
{
local capacity=$(\
cat /sys/class/power_supply/BAT0/capacity|\
awk '{printf "%3.0f %", $1}')
echo -en "$capacity"
}
Battery
| true
|
1d7b7720156f312e7014f48ba7e526f30a26b53d
|
Shell
|
jairrf/525-infraagil
|
/devops/lib/dpkg/info/cloud-init.prerm
|
UTF-8
| 727
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
rm -f /etc/cron.d/cloudinit-updates
# Automatically added by dh_systemd_start
if [ -d /run/systemd/system ] && [ "$1" = remove ]; then
deb-systemd-invoke stop cloud-config.service cloud-config.target cloud-final.service cloud-init-local.service cloud-init.service cloud-init.target >/dev/null
fi
# End automatically added section
# Automatically added by dhpython:
if which py3clean >/dev/null 2>&1; then
py3clean -p cloud-init
else
dpkg -L cloud-init | perl -ne 's,/([^/]*)\.py$,/__pycache__/\1.*, or next; unlink $_ or die $! foreach glob($_)'
find /usr/lib/python3/dist-packages/ -type d -name __pycache__ -empty -print0 | xargs --null --no-run-if-empty rmdir
fi
# End automatically added section
| true
|
87bc83eccd186df27b9d6936f5652474de2fad0f
|
Shell
|
alphacomm/alpharpc
|
/bin/start-handlers
|
UTF-8
| 1,186
| 3.53125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
setupScreen() {
TRUE=`which true`
SCREEN_NAME=${SCREEN_NAME:-alpharpc}
screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
sleep 1
if [ -z "$SCREEN_HARDSTATUS" ]; then
SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
fi
screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND $TRUE
}
# Helper to launch a service in a named screen
# screen_it service "command-line"
screen_it() {
SCREEN_NAME=${SCREEN_NAME:-alpharpc}
screen -S $SCREEN_NAME -X screen -t $1
# sleep to allow bash to be ready to be send the command - we are
# creating a new window in screen and then sends characters, so if
# bash isn't running by the time we send the command, nothing happens
sleep 1.5
NL=`echo -ne '\015'`
screen -S $SCREEN_NAME -p $1 -X stuff "$2 || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
}
setupScreen
screen_it worker-handler bin/alpharpc-worker-handler
sleep 1
screen_it helloworld-worker examples/worker-reverse.php
screen_it client-handler bin/alpharpc-client-handler
| true
|
072ae440186ad8b5e256d9fff3b619f87e166b09
|
Shell
|
learning-chip/SpLLT
|
/scripts/create_cases_files.sh
|
UTF-8
| 3,203
| 4.1875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
NCASE=10
preprocess=0
generate=0
err=0
OUTPUTPATH="../src/include"
OMPDECLPATH="../src/include"
file_suffixe=''
#print in bold text
function bold(){
echo -e "\033[1m$1\033[0m"
}
function print_chelp(){
echo -e "\t\t\t\tGeneral Commands Manual\t\t\t\t"
bold "NAME"
echo -e "\t$0 - Creates Fortran include files"
bold "SYNOPSIS"
echo -e "\t$0 [OPTIONS]"
bold "DESCRIPTION"
echo -e "\t${DESC}"
bold "OPTIONS"
echo -e "\t-n <integer : default ${NCASE}>"
echo -e "\t\t\tthe number of cases to generate"
echo -e "\t-c"
echo -e "\t\t\tgenerates raw cases files"
echo -e "\t-E"
echo -e "\t\t\tpreprocesses the raw cases files"
}
DESC="This script generates the Fortran file that should be included into "
DESC+="a source code.\n\tThe general usage is to generate the raw files and "
DESC+="then preprocess them."
DESC+="\n\tThis corresponds to a standard usage of : $0 -c -E"
if [ $# -gt 0 ];then
while [ $# -gt 0 ]; do
case $1 in
"-h" | "--help")
print_chelp
exit 0
;;
"-n")
shift
NCASE=$1
shift
;;
"-c")
generate=1
shift
;;
"-E")
preprocess=1
shift
;;
*)
echo -e "Error in parameter $1\n\tPlease use -h option."
exit 1
esac
done
fi
if [ ${preprocess} -eq 0 -a ${generate} -eq 0 ]; then
echo "Nothing to do. Please rerun with -h option"
exit 0
fi
raw_name=("fwd_block${file_suffixe}" "fwd_update${file_suffixe}"
"bwd_block${file_suffixe}" "bwd_update${file_suffixe}"
"bwd_node${file_suffixe}")
for step in ${raw_name[*]}; do
#workFile="${OUTPUTPATH}/spllt_solve_${step}_worker.F90.inc"
workFile="spllt_solve_${step}_worker.F90.inc"
ompDeclFile="${OMPDECLPATH}/spllt_solve_${step}_omp_decl.F90.inc"
#Generate special case of 0 dependency
raw_file="raw_${step}_nodep.F90"
output="${OUTPUTPATH}/spllt_${step}_nodep.F90"
if [ ${generate} -eq 1 ]; then
bash generator_case_omp.sh --n_min 0 -o ${raw_file} -w ${workFile} --ompFile ${ompDeclFile}
err=$?
fi
if [ ${err} -eq 0 ]; then
if [ ${preprocess} -eq 1 ]; then
if [ -e ${raw_file} ]; then
echo "Preprocess ${raw_file} with gfortran : output ${output}"
gfortran -E -P ${raw_file} -o ${output}
else
echo "Unable to preprocess ${raw_file} : file does not exist."
fi
fi
else
echo "An error occurs during the generation of ${raw_file}."
fi
# Generate cases of the select statement used in the k-ary tree
raw_file="raw_${step}_cases.F90"
output="${OUTPUTPATH}/spllt_${step}_cases.F90"
if [ ${generate} -eq 1 ]; then
bash generator_case_omp.sh -n ${NCASE} -o ${raw_file} -w ${workFile} --ompFile ${ompDeclFile}
err=$?
fi
if [ ${err} -eq 0 ]; then
if [ ${preprocess} -eq 1 ]; then
if [ -e ${raw_file} ]; then
echo "Preprocess ${raw_file} with gfortran : output ${output}"
gfortran -E -P ${raw_file} -o ${output}
else
echo "Unable to preprocess ${raw_file} : file does not exist."
fi
fi
else
echo "An error occurs during the generation of ${raw_file}."
fi
done
exit 0
| true
|
dcb76732b45eadbd7d37a29d719c1445f89127e6
|
Shell
|
benhager/di
|
/di-handbrake-nightly.sh
|
UTF-8
| 2,216
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/zsh
# Purpose: download and install HandBrake nightly
#
# From: Tj Luo.ma
# Mail: luomat at gmail dot com
# Web: http://RhymesWithDiploma.com
# Date: 2014-08-18
## HandBrake has a Sparkle feed, but it seems vastly out of date
# XML_FEED='https://handbrake.fr/appcast_unstable.x86_64.xml'
NAME="$0:t:r"
die ()
{
echo "$NAME: $@"
exit 1
}
INSTALL_TO="/Applications/HandBrake.app"
INSTALLED_VERSION=`defaults read "$INSTALL_TO/Contents/Info" CFBundleShortVersionString 2>/dev/null | awk '{print $1}' || echo '1.0.0'`
UA='curl/7.21.7 (x86_64-apple-darwin10.8.0) libcurl/7.21.7 OpenSSL/1.0.0d zlib/1.2.5 libidn/1.22'
if ((! $+commands[lynx] ))
then
# note: if lynx is a function or alias, it will come back not found
echo "$NAME: lynx is required but not found in $PATH"
exit 1
fi
URL=`lynx -listonly -dump -nomargins -nonumbers 'http://handbrake.fr/nightly.php' | fgrep -i .dmg | fgrep -iv "CLI"`
# if there URL is empty, give up
[[ "$URL" == "" ]] && die "URL is empty"
LATEST_VERSION=`echo "$URL:t:r" | sed 's#HandBrake-##g; s#-osx##g'`
##### This does not work for some reason
## function version { echo "$@" | awk -F. '{ printf("28%03d%03d%03d\n", $1,$2,$3,$4); }'; }
## if [ $(version ${LATEST_VERSION}) -le $(version ${INSTALLED_VERSION}) ]
if [[ "$LATEST_VERSION" == "$INSTALLED_VERSION" ]]
then
# No Update Needed
echo "$NAME: Up To Date (Installed: $INSTALLED_VERSION and Latest: $LATEST_VERSION)"
exit 0
fi
echo "$NAME: Out of Date: $INSTALLED_VERSION vs $LATEST_VERSION"
FILENAME="$HOME/Downloads/$URL:t"
echo "$NAME: Downloading $URL to $FILENAME"
curl -A "$UA" --continue-at - --progress-bar --fail --location --output "$FILENAME" "$URL"
MNTPNT=$(hdiutil attach -nobrowse -plist "$FILENAME" 2>/dev/null \
| fgrep -A 1 '<key>mount-point</key>' \
| tail -1 \
| sed 's#</string>.*##g ; s#.*<string>##g')
if [ -e "$INSTALL_TO" ]
then
# move installed version to trash
mv -vf "$INSTALL_TO" "$HOME/.Trash/HandBrake.$INSTALLED_VERSION.app"
fi
echo "$NAME: Installing $FILENAME to $INSTALL_TO:h/"
ditto --noqtn -v "$MNTPNT/HandBrake.app" "$INSTALL_TO"
if (( $+commands[unmount.sh] ))
then
unmount.sh "$MNTPNT"
else
diskutil eject "$MNTPNT"
fi
exit 0
#
#EOF
| true
|
cb925d2fd58b9385b6a45fba2a4e6760e61d3f11
|
Shell
|
krnvi/shell-scripts
|
/wpp1.sh
|
UTF-8
| 6,446
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/ksh
#
set -x
# August 2005: Hui-Ya Chuang, NCEP: This script uses
# NCEP's WRF-POSTPROC to post processes WRF native model
# output, and uses copygb to horizontally interpolate posted
# output from native A-E to a regular projection grid.
#
# July 2006: Meral Demirtas, NCAR/DTC: Added new "copygb"
# options and revised some parts for clarity.
#
#--------------------------------------------------------
# This script performs 2 jobs:
#
# 1. Run WRF-POSTPROC
# 2. Run copygb to horizontally interpolate output from
# native A-E to a regular projection grid
#--------------------------------------------------------
# Set path to your top directory and your run dorectory
#
export TOP_DIR=/home/OldData/windpowerFcst/NMMV3.2.1
export DOMAINPATH=${TOP_DIR}/WPPV3/rrun
export NPRD=${DOMAINPATH}/postprd
export MOVDIR=${DOMAINPATH}/poutput
#Specify Dyn Core (ARW or NMM in upper case)
dyncore="NMM"
if [ $dyncore = "NMM" ]; then
export tag=NMM
elif [ $dyncore = "ARW" ]; then
export tag=NCAR
else
echo "${dyncore} is not supported. Edit script to choose ARW or NMM dyncore."
exit
fi
# Specify forecast start date
# fhr is the first forecast hour to be post-processed
# lastfhr is the last forecast hour to be post-processed
# incrementhr is the incement (in hours) between forecast files
export startdate=$1
export fhr=00
export lastfhr=12
export incrementhr=01
export incrementmin=15
export lastmin=59
# Path names for WRF_POSTPROC and WRFV3
export WRF_POSTPROC_HOME=${TOP_DIR}/WPPV3
export POSTEXEC=${WRF_POSTPROC_HOME}/exec
export SCRIPTS=${WRF_POSTPROC_HOME}/scripts
export WRFPATH=${TOP_DIR}/WRFV3
# cd to working directory
cd ${DOMAINPATH}/postprd1
# Link Ferrier's microphysic's table and WRF-POSTPROC control file,
ln -fs ${WRFPATH}/run/ETAMPNEW_DATA eta_micro_lookup.dat
ln -fs ${DOMAINPATH}/parm/wrf_cntrl.parm .
export tmmark=tm00
export MP_SHARED_MEMORY=yes
export MP_LABELIO=yes
#######################################################
# 1. Run WRF-POSTPROC
#
# The WRF-POSTPROC is used to read native WRF model
# output and put out isobaric state fields and derived fields.
#
#######################################################
pwd
ls -x
export NEWDATE=$startdate
while [ $fhr -le $lastfhr ] ; do
typeset -Z3 fhr
NEWDATE=`${POSTEXEC}/ndate.exe +${fhr} $startdate`
YY=`echo $NEWDATE | cut -c1-4`
MM=`echo $NEWDATE | cut -c5-6`
DD=`echo $NEWDATE | cut -c7-8`
HH=`echo $NEWDATE | cut -c9-10`
echo 'NEWDATE' $NEWDATE
echo 'YY' $YY
export min=00
while [ $min -le $lastmin ] ; do
#for domain in d01 d02 d03
for domain in d01
do
cat > itag <<EOF
$MAIN/nmm_output/$1/wrfout_${domain}_${YY}-${MM}-${DD}_${HH}:${min}:00
netcdf
${YY}-${MM}-${DD}_${HH}:${min}:00
${tag}
EOF
#-----------------------------------------------------------------------
# Run wrfpost.
#-----------------------------------------------------------------------
#rm fort.*
ln -sf wrf_cntrl.parm fort.14
ln -sf griddef.out fort.110
/opt/installsw/mpich2/bin/mpirun -np 1 ${POSTEXEC}/wrfpost.exe < itag > wrfpost_${domain}.$fhr_${min}.out 2>&1
if [ $min = 00 ]; then
mv WRFPRS$fhr.tm00 WRFPRS_${domain}.${fhr}_${min}
else
mv WRFPRS${fhr}:${min}.tm00 WRFPRS_${domain}.${fhr}_${min}
fi
#
#----------------------------------------------------------------------
# End of wrf post job
#----------------------------------------------------------------------
ls -l WRFPRS_${domain}.${fhr}_${min}
err1=$?
if test "$err1" -ne 0
then
echo 'WRF POST FAILED, EXITTING'
exit
fi
if [ $dyncore = "NMM" ]; then
#######################################################################
# 2. Run copygb
#
# Copygb interpolates WRF-POSTPROC output from its native
# grid to a regular projection grid. The package copygb
# is used to horizontally interpolate from one domain
# to another, it is necessary to run this step for wrf-nmm
# (but not for wrf-arw) because wrf-nmm's computational
# domain is on rotated Arakawa-E grid
#
# Copygb can be run in 3 ways as explained below.
# Uncomment the preferable one.
#
#----------------------------------------------------------------------
#
# Option 1:
# Copygb is run with a pre-defined AWIPS grid
# (variable $gridno, see below) Specify the grid to
# interpolate the forecast onto. To use standard AWIPS grids
# (list in http://wwwt.emc.ncep.noaa.gov/mmb/namgrids/ or
# or http://www.nco.ncep.noaa.gov/pmb/docs/on388/tableb.html),
# set the number of the grid in variable gridno below.
# To use a user defined grid, see explanation above copygb.exe command.
#
# export gridno=212
#
#${POSTEXEC}/copygb.exe -xg${gridno} WRFPRS_${domain}.${fhr} wrfprs_${domain}.${fhr}
#
#----------------------------------------------------------------------
#
# Option 2:
# Copygb ingests a kgds definition on the command line.
#${POSTEXEC}/copygb.exe -xg"255 3 109 91 37748 -77613 8 -71000 10379 9900 0 64 42000 42000" WRFPRS_${domain}.${fhr} wrfprs_${domain}.${fhr}
#
#----------------------------------------------------------------------
#
# Option 3:
# Copygb can ingests contents of files too. For example:
# copygb_gridnav.txt or copygb_hwrf.txt through variable $nav.
#
# Option -3.1:
# To run for "Lambert Comformal map projection" uncomment the following line
#
# read nav < 'copygb_gridnav.txt'
#
# Option -3.2:
# To run for "lat-lon" uncomment the following line
#
read nav < 'copygb_hwrf.txt'
#
export nav
#
/opt/installsw/mpich2/bin/mpirun -np 1 ${POSTEXEC}/copygb.exe -xg"${nav}" WRFPRS_${domain}.${fhr}_$min wrfprs_${domain}.${fhr}_${min} >copygb.log
#
# (For more info on "copygb" see WRF-NMM User's Guide, Chapter-7.)
#----------------------------------------------------------------------
# Check to see whether "copygb" created the requested file.
ls -l wrfprs_${domain}.${fhr}_${min}
err1=$?
if test "$err1" -ne 0
then
echo 'copygb FAILED, EXITTING'
exit
fi
#----------------------------------------------------------------------
# End of copygb job
#----------------------------------------------------------------------
elif [ $dyncore = "ARW" ]; then
ln -s WRFPRS_${domain}.${fhr}_$min wrfprs_${domain}.${fhr}_${min}
fi
done
let "min=min+$incrementmin"
done
let "fhr=fhr+$incrementhr"
NEWDATE=`${POSTEXEC}/ndate.exe +${fhr} $startdate`
done
date
echo "End of Output Job"
mv wrfprs_d01* ${NPRD}/
rm *
#cd $NPRD
#cat wrfprs_d01* > all
#grib2ctl.pl -verf all>all.ctl
#gribmap -i all.ctl
#mv all* $MOVDIR
#cd $NPRD
#rm *
exit
| true
|
accf893a1be13a4b4ff115b95a569e24c1980067
|
Shell
|
iajzenszmi/CodeCode
|
/766.sh
|
UTF-8
| 139,546
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
# This is a shell archive, meaning:
# 1. Remove everything above the #! /bin/sh line.
# 2. Save the resulting text in a file.
# 3. Execute the file with /bin/sh (not csh) to create the files:
# Doc
# Drivers
# Src
# This archive created: Sat Apr 19 22:33:37 1997
export PATH; PATH=/bin:$PATH
if test ! -d 'Doc'
then
mkdir 'Doc'
fi
cd 'Doc'
if test -f 'readme'
then
echo shar: will not over-write existing file "'readme'"
else
cat << \SHAR_EOF > 'readme'
This directory contains FORTRAN 90 code for computing Pade'-Hermite and
simultaneous Pade' approximants, plus two driver programs. To obtain a run,
first compile the code in the files
driver1.f, vector_pade.f and linpack.f
or, in the files
driver2.f, vector_pade.f and linpack.f.
driver1.f:
First driver program for vector_pade.f. It also gives code for computing
the inverse of a Striped Sylvester matrix using the formula derived in
sylvester_inverses.ps.Z. No input data is required by this driver.
Contains the programs:
main
invert_sylv
solve_sylv
divide_series:
driver2.f:
Second driver program for vector_pade.f. It also gives code for computing
the errors in the order conditions for the Pade'-Hermite and
simultaneous Pade' systems computed by VECTOR_PADE. This driver does require
input data, a sample of which is given in the file data_for_driver2.
Contains the programs:
main
build_delta_t
build_delta_t_star
vector_pade.f:
Based on the algorithm given in stable_pade_alg.ps.Z and used for the
experiments described in experiments_pade_alg.ps.Z: Contains the subroutines:
vector_pade:
build_t:
build_t_star:
build_s:
build_s_star:
scale_s:
scale_s_star:
mult_s
mult_s_star
norm:
gen_next_vector:
linpack.f:
Collection of LINPACK subroutines used by vector_pade.f. Contains the
subroutines:
sgefa:
sgesl:
saxpy:
isamax:
sscal:
sswap:
sdot:
SHAR_EOF
fi # end of overwriting check
cd ..
if test ! -d 'Drivers'
then
mkdir 'Drivers'
fi
cd 'Drivers'
if test ! -d 'Sp'
then
mkdir 'Sp'
fi
cd 'Sp'
if test -f 'driver1.f90'
then
echo shar: will not over-write existing file "'driver1.f90'"
else
cat << \SHAR_EOF > 'driver1.f90'
!***********************************************************************
! This is a test driver for the subroutines VECTOR_PADE, *
! invert_Sylv and solve_Sylv. The driver requires no input data.B *
!***********************************************************************
module working_area_main
! Local variables for main.
real, dimension (:,:,:), allocatable :: S_n, S_star_n
! Assumed shape dummy arguments for VECTOR_PADE.
integer, dimension (:), allocatable :: n
real, dimension (:), allocatable :: gamma, gamma_star, kappa
real, dimension (:,:), allocatable :: A
real, dimension (:,:,:), allocatable :: S, S_star
! Assumed shape dummy arguments for solve_Sylv.
real, dimension (:), allocatable :: b, x
! Assumed shape dummy arguments for invert_Sylv.
real, dimension (:,:), allocatable :: M, M_inv
end module working_area_main
module working_area_VECTOR_PADE
! Automatic arrays for VECTOR_PADE.
! Variables used to compute NPHS.
real, dimension(:,:,:), allocatable :: S_hat, New_S
real, dimension(:,:), allocatable :: T
! Variables used to compute NSPS.
real, dimension(:,:,:), allocatable :: S_star_hat, New_S_star
real, dimension(:,:,:), allocatable :: T_star
end module working_area_VECTOR_PADE
module working_area_invert_Sylv
real, dimension (:), allocatable :: gamma, gamma_star, &
gammap, kappa, &
a0, a_norm, b, c
real, dimension (:,:,:), allocatable :: S, S_star
end module working_area_invert_Sylv
module working_area_solve_Sylv
real, dimension (:), allocatable :: gamma, gamma_star, &
gammap, kappa, &
a_norm, c, d
real, dimension (:,:,:), allocatable :: S, S_star
end module working_area_solve_Sylv
program main
use working_area_main
implicit none
interface
subroutine VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, num_of_kappas, flag)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: n
real, dimension (:,:), intent (in) :: A
real, intent (in) :: tau
real, dimension (:,:,:), intent (out) :: S, S_star
real, dimension (:), intent (out) :: gamma, &
gamma_star, kappa
integer, intent (out) :: num_of_kappas
integer, intent (inout) :: flag
end subroutine VECTOR_PADE
subroutine invert_Sylv(k, n, A, tau, Sylvinv, cond_number, flag)
integer, intent(in) :: k
integer, dimension (:), intent(in) :: n
real, dimension (:,:), intent(inout) :: A
real, intent(in) :: tau
real, dimension (:,:), intent(out) :: Sylvinv
real, intent(out) :: cond_number
integer, intent(out) :: flag
end subroutine invert_Sylv
subroutine solve_Sylv(k, n, A, b, tau, x, cond_number, flag)
integer, intent(in) :: k
integer, dimension (:), intent(in) :: n
real, dimension (:,:), intent(inout) :: A
real, dimension (:), intent(in) :: b
real, intent(in) :: tau
real, dimension (:), intent(out) :: x
real, intent(out) :: cond_number
integer, intent(out) :: flag
end subroutine solve_Sylv
end interface
integer alpha, beta, i, j, k, l, offset, norm_n, num_of_kappas, &
flag, S_deg, S_star_deg
real tau, cond_number
!***********************************************************************
! Test Example *
! Source: S. Cabay and A. Jones and G. Labahn, "Experiments with *
! a weakly stable algorithm for computing Pade'-Hermite and *
! simultaneous Pade' approximants", submitted to ACM Transactions *
! on mathematical software. *
!***********************************************************************
k = 2
allocate (n(0:k))
n(0) = 2
n(1) = 3
n(2) = 1
write(*,983) (n(beta),beta=0,k)
! The norm of n is norm_n = n(0) + ... + n(k).
norm_n = 6
! S_deg = max{n(0), ..., n(k)} + 1, the largest degree of the
! polynomials in S.
S_deg = 4
! S_star_deg = norm_n - max{n(0), ..., n(k)} + 1, the largest degree
! of the polynomials in S_star.
S_star_deg = 6
allocate (A(0:norm_n, 0:k), &
S(0:S_deg,0:k,0:k), S_n(0:S_deg,0:k,0:k), &
S_star(0:norm_n,0:k,0:k), S_star_n(0:norm_n,0:k,0:k), &
gamma(0:k), gamma_star(0:k), kappa(0:norm_n), &
b(0:norm_n), x(0:norm_n), &
M(norm_n,norm_n), M_inv(norm_n,norm_n))
! A is a vector of power series A = (A(0), ..., A(k)), where
! A(beta) = A(0,beta) + A(1,beta)*z + ... + A(k,beta)*z**k.
A(0,0) = 1
A(1,0) = -1
A(2,0) = 2
A(3,0) = -2
A(4,0) = 3
A(5,0) = -3
A(6,0) = 4
A(0,1) = 0
A(1,1) = 2
A(2,1) = 0
A(3,1) = 3
A(4,1) = 0
A(5,1) = 4
A(6,1) = 0
A(0,2) = -1
A(1,2) = 1
A(2,2) = 5
A(3,2) = 3
A(4,2) = 2
A(5,2) = -2
A(6,2) = -6
! tau is the stability parameter described in subroutine VECTOR_PADE.
tau = 100000.0
! Print A.
l = 0
write(*,988)
write(*,986) (A(0,beta), beta=0, k), l
do l=1, norm_n
write(*,987) (A(l,beta), beta=0, k), l
end do
!***********************************************************************
! Obtain the scaled Pade'-Hermite system S and the scaled *
! simultaneous system S_star of types n for the vector of power *
! series associated with A. *
!***********************************************************************
! Since the sizes of the components of A affect the condition
! numbers of the associated Sylvester matrices, and therefore the
! choice of tau, it is usually advisable to scale the columns of A
! (i.e., the power series associated with each column) before
! calling VECTOR_PADE.
call VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, num_of_kappas, flag)
! S_n is the normalized Pade'-Hermite system.
do l=0, S_deg
do beta=0, k
do alpha=0, k
if ((beta .eq. 0 .and. l .gt. n(alpha)+1) .or. &
(beta .gt. 0 .and. l .gt. n(alpha) )) then
! Pad with zeroes to make the polynomial of degree
! S_deg.
S_n(l,alpha,beta) = 0.0
else
S_n(l,alpha,beta) = S(l,alpha,beta)/gamma(beta)
endif
end do
end do
end do
! S_star_n is the normalized simultaneous Pade' system.
do l=0, S_star_deg
do beta=0, k
do alpha=0, k
if ((alpha .eq. 0 .and. l .gt. norm_n - n(beta) ) .or. &
(alpha .gt. 0 .and. l .gt. norm_n - n(beta)+1)) then
! Pad with zeroes to make the polynomial of degree
! S_star_deg.
S_star_n(l,alpha,beta) = 0.0
else
S_star_n(l,alpha,beta) = S_star(l,alpha,beta)/ &
gamma_star(alpha)
endif
end do
end do
end do
write(*,990)
do l=0, S_deg
write(*,991) l
do alpha=0,k
! Multiplication by 37 makes S_n integral.
write(*,992) (37*S_n(l,alpha,beta), beta=0, k)
end do
end do
write(*,984)
write(*,985) (kappa(i), i=0, num_of_kappas)
write(*,993)
do l=0, S_star_deg
write(*,995) l
do alpha=0,k
! Multiplication by 37 makes S_star_n integral.
write(*,994) (37*S_star_n(l,alpha,beta), beta=0, k)
end do
end do
!***********************************************************************
! Obtain the inverse of the striped Sylvester matrix M associated *
! with A. *
!***********************************************************************
call invert_Sylv(k, n, A, tau, M_inv, cond_number, flag)
! Compute M (required for output purposes only).
offset = 0
do l=0, k
do j=1, n(l)
do i = 1, j-1
M(i,offset+j) = 0
end do
do i=j, norm_n
M(i,offset+j) = A(i-j,l)
end do
end do
offset = offset + n(l)
end do
! Print M.
write(*,996)
do i = 1, norm_n
write(*,997) (M(i,j),j=1,norm_n)
end do
! Print the inverse of M.
write(*,998)
do i = 1, norm_n
write(*,980) (37*37*M_inv(i,j),j=1,norm_n)
end do
!***********************************************************************
! Solve the system Mx = b, using the subroutine solve_Sylv. The *
! vector b is the sum of the columns of M. *
!***********************************************************************
b(0) = 0
b(1) = 3
b(2) = 8
b(3) = 8
b(4) = 6
b(5) = 5
call solve_Sylv(k, n, A, b, tau, x, cond_number, flag)
! Print b and x.
write(*,999)
write(*,981) (b(l),l=0,norm_n-1)
write(*,982)
write(*,981) (x(l),l=0,norm_n-1)
980 format(2x,6f7.0)
981 format(3x, 6f4.0)
982 format(//6x, 'Solution x of Mx = b'/)
983 format(//12x, 'n = (',i1,',',i1,',',i1,')'/)
984 format(//3x,'Stability of points along the piecewise diagonal'/, &
3x,'through n. Rough estimates of condition numbers'/, &
3x,'of the Sylvester matrices associated with these'/, &
3x,'points.')
985 format(/5x, 8e9.2)
986 format(2x,' [', 3F4.0, ' ] * z**',I1)
987 format(2x,'+ [', 3F4.0, ' ] * z**',I1)
988 format(" Input vector of power series A")
990 format(// " Pade'-Hermite System S"/ &
' Normalized and multiplied by 37'/)
991 format(" Coefficient matrix of z**",I1)
992 format(2x, 3F7.0)
993 format(//" Simultaneous Pade' System S_star"/ &
' Normalized and multiplied by 37'/)
994 format(5x, 3F7.0)
995 format(" Coefficient matrix of z**",I1)
996 format(//' Striped Sylvester matrix M'/)
997 format(3x, 6F4.0)
998 format(//8x, 'Inverse of M multiplied by 37**2'/)
999 format(//6x, 'Right-hand vector b'/)
deallocate (n, A, S, S_n, S_star, S_star_n, gamma, gamma_star, &
kappa, b, x, M, M_inv)
stop
end program main
subroutine invert_Sylv(k, n, A, tau, Sylvinv, cond_number, flag)
!***********************************************************************
! *
! For the vector of integers *
! *
! n = [n(0),...,n(k)], *
! *
! let *
! *
! ||n|| = n(0)+...+n(k). *
! *
! Define the order ||n|| striped Sylvester matrix M to be *
! *
! *
! *
! | A(0,0) | | A(0,k) | *
! | . | | . | *
! | . | | . | *
! | . . | | . . | *
! | . A(0,0) |...| . A(0,k) |.*
! | . . | | . . | *
! | . | | . | *
! | . | | . | *
! |A(||n||-1,0)...A(||n||-n(0),0)| |A(||n||-1,k)...A(||n||-n(k),k)| *
! *
! This subroutine computes the inverse, Sylvinv, of M and gives *
! a rough estimate, cond_number, of the algorithm. *
! *
! The inverse is obtained by using the formula derived in *
! S. Cabay and A. Jones and G. Labahn, "Computation of Numerical *
! Pade'-Hermite and Simultaneous Pade' Systems I: Near Inversion *
! of Generalized Sylvester Matrices", SIAM journal on matrix *
! analysis and applications, 17 (1996), 248-267. *
! *
! The formula expresses the inverse in terms of Pade'-Hermite and *
! Simultaneous Pade' Systems of type n for the vector of power *
! series that can be associated with A. These systems are computed *
! by the subroutine VECTOR_PADE. *
! *
!***********************************************************************
! *
! on entry *
! k integer *
! There are k+1 stripes in M. *
! *
! n integer (0:k) *
! The beta'th strip in M has n(beta) columns. *
! *
! A real (0:sum(n), 0:k) *
! Each column of this matrix gives one stripe in *
! the striped Sylvester matrix M. *
! *
! tau real *
! Stability parameter required by the subroutine *
! VECTOR_PADE. Very roughly speaking, the residual *
! error, b - M*x, will look like *
! tau * unit-error * ||A||. *
! For most efficient computation, tau should be *
! chosen as large as the lack of accuracy will *
! permit. *
! *
! on exit *
! Sylvinv real (sum(n), sum(n)) *
! The inverse of M. *
! *
! cond_number real *
! Very rough estimate of the condition number of *
! this algorithm. *
! *
! flag integer *
! Error parameter. *
! flag = 0, no errors *
! flag = 1, the Sylvester matrix at the point n *
! is ill-conditioned; i.e., *
! cond_number >= tau. *
! flag = 2, the Sylvester matrix at the point n *
! is numerically singular. *
! flag = 3, input variables are incorrect. *
! *
! *
! functions and subroutines *
! divide_series Divides one power series by another. *
! VECTOR_PADE Computes scaled Pade'-Hermite and *
! simultaneous Pade' systems for the power *
! series associated with A. *
! *
!***********************************************************************
use working_area_invert_Sylv
implicit none
interface
subroutine VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, num_of_kappas, flag)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: n
real, dimension (:,:), intent (in) :: A
real, intent (in) :: tau
real, dimension (:,:,:), intent (out) :: S, S_star
real, dimension (:), intent (out) :: gamma, &
gamma_star, kappa
integer, intent (out) :: num_of_kappas
integer, intent (inout) :: flag
end subroutine VECTOR_PADE
end interface
! invert_Sylv subroutine parameters.
integer, intent(in) :: k
integer, dimension (0:), intent(in) :: n
real, dimension (0:,0:), intent(inout) :: A
real, intent(in) :: tau
real, dimension (1:,1:), intent(out) :: Sylvinv
real, intent(out) :: cond_number
integer, intent(out) :: flag
! Local variables.
integer alpha, beta, i, j, l, offset, norm_n, num_of_kappas
real norm_a0_inv
allocate (gamma(0:k), gamma_star(0:k), gammap(0:k), a_norm(0:k), &
kappa(0:sum(n)), a0(0:sum(n)), b(0:sum(n)),c(0:sum(n)),&
S(0:maxval(n)+1, 0:k, 0:k), S_star(0:sum(n), 0:k, 0:k))
norm_n = sum(n)
! Check the validity of input parameters.
if ( k .lt. 1 .or. &
k .gt. size(n) - 1 .or. &
0 .gt. minval(n) .or. &
norm_n .gt. size(A(:,0)) - 1 .or. &
k .gt. size(A(0,:)) - 1 .or. &
0.0 .eq. A(0,0) .or. &
norm_n .gt. size(Sylvinv(:,1)) .or. &
norm_n .gt. size(Sylvinv(1,:))) then
flag = 3
return
else
flag = 0
endif
! Compute the Pade'-Hermite system S and simultaneous Pade' system
! S_star of type n for the vector of power series associated with A.
! VECTOR_PADE requires values for the coefficients of z**norm_n in A,
! so temporarily assign some.
do beta=0, k
a_norm(beta) = A(norm_n,beta)
A(norm_n,beta) = 0
end do
call VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, num_of_kappas, flag)
do beta=0, k
A(norm_n,beta) = a_norm(beta)
end do
if (flag .eq. 0 .or. flag .eq. 1) then
! VECTOR_PADE successfully computed systems of type n.
! Some initializations.
do j = 0, norm_n-1
a0(j) = A(j,0)/A(0,0)
end do
do i = 1, norm_n
do j = 1, norm_n
Sylvinv(i,j) = 0.0
end do
end do
! A rough estimate of the condition number of this algorithm is
! given in the last entry in kappa multiplied by the norm of the
! inverse of a0.
b(0) = 1
do l=1, norm_n-1
b(l) = 0
end do
call divide_series(a0, b, c, norm_n-1, flag)
norm_a0_inv = abs(c(0))
do l=1, norm_n-1
norm_a0_inv = norm_a0_inv + abs(c(l))
end do
cond_number = kappa(num_of_kappas) * norm_a0_inv
! Now, evaluate the inverse formula.
do beta=0, k
gammap(beta) = gamma(beta) * gamma_star(beta)
end do
do beta = 0, k
! If beta = 0, b corresponds with the power series v_star(z);
! otherwise, b corresponds with the power series
! z*q_star(beta).
if (beta .eq. 0) then
do l = 0, norm_n-1
if (l .le. norm_n-n(0)) then
b(l) = S_star(l,0,0)
else
b(l) = 0.0
endif
end do
else
do l = 0, norm_n-1
if (l .le. norm_n-n(0)) then
b(l) = S_star(l+1,beta,0)
else
b(l) = 0.0
endif
end do
endif
! c corresponds with the power series obtained by dividing b
! with the inverse of the power series associated with the
! first column of A.
call divide_series(a0, b, c, norm_n-1, flag)
! Multiply c on the left by the coefficients of the
! polynomials of the beta'th column of S.
offset = 0
do alpha = 0, k
do i = 0, n(alpha)-1
if (beta .eq. 0) then
j=i-1
else
j=i
endif
do l = 0, norm_n - 1
Sylvinv(offset+n(alpha)-i,norm_n-l) &
= Sylvinv(offset+n(alpha)-i, norm_n-l) &
+ S(n(alpha)-j,alpha,beta) * c(l) / gammap(beta)
end do
end do
offset = offset + n(alpha)
end do
end do
! Sum over the coefficients of the polynomials in S.
offset = 0
do alpha = 0, k
do i = 1, n(alpha)-1
do l = 1, norm_n-1
Sylvinv(offset+n(alpha)-i,norm_n-l) &
= Sylvinv(offset+n(alpha)-i,norm_n-l) &
+ Sylvinv(offset+n(alpha)+1-i,norm_n-l+1)
end do
end do
offset = offset + n(alpha)
end do
endif
deallocate (gamma, gamma_star, gammap, a_norm, kappa, a0, b, c, &
S, S_star)
return
end subroutine invert_Sylv
subroutine solve_Sylv(k, n, A, b, tau, x, cond_number, flag)
!***********************************************************************
! *
! For the vector of integers *
! *
! n = [n(0),...,n(k)], *
! *
! let *
! *
! ||n|| = n(0)+...+n(k). *
! *
! Define the order ||n|| striped Sylvester matrix M to be *
! *
! *
! *
! | A(0,0) | | A(0,k) | *
! | . | | . | *
! | . | | . | *
! | . . | | . . | *
! | . A(0,0) |...| . A(0,k) |.*
! | . . | | . . | *
! | . | | . | *
! | . | | . | *
! |A(||n||-1,0)...A(||n||-n(0),0)| |A(||n||-1,k)...A(||n||-n(k),k)| *
! *
! Given the vector b, solve_Sylv determines the solution x of the *
! linear system of equations *
! *
! M * x = b. *
! *
! The solution is obtained by using the inverse formula for M *
! derived in S. Cabay and A. Jones and G. Labahn, "Computation of *
! Numerical Pade'-Hermite and Simultaneous Pade' Systems I: Near *
! Inversion of Generalized Sylvester Matrices", SIAM journal on *
! matrix analysis and applications, 17 (1996), 248-267. *
! *
! The formula expresses the inverse in terms of Pade'-Hermite and *
! Simultaneous Pade' Systems of type n for the vector of power *
! series that can be associated with A. These systems are computed *
! by the subroutine VECTOR_PADE. *
! *
!***********************************************************************
! *
! on entry *
! k integer *
! There are k+1 stripes in M. *
! *
! n integer (0:k) *
! The beta'th strip in M has n(beta) columns. *
! *
! A real (0:sum(n), 0:k) *
! Each column of this matrix gives one stripe in *
! the striped Sylvester matrix M. *
! *
! b real (0:sum(n)) *
! The right-hand vector. *
! *
! tau real *
! Stability parameter required by the subroutine *
! VECTOR_PADE. Very roughly speaking, the residual *
! error, b - M*x, will look like *
! tau * unit-error * ||A||. *
! For most efficient computation, tau should be *
! chosen as large as the lack of accuracy will *
! permit. *
! *
! on exit *
! x real (0:sum(n)) *
! The solution vector. *
! *
! cond_number real *
! Very rough estimate of the condition number of *
! this algorithm. *
! *
! flag integer *
! Error parameter. *
! flag = 0, no errors *
! flag = 1, the Sylvester matrix at the point n *
! is ill-conditioned; i.e., *
! cond_number >= tau. *
! flag = 2, the Sylvester matrix at the point n *
! is numerically singular. *
! flag = 3, input variables are incorrect. *
! *
! functions and subroutines *
! divide_series Divides one power series by another. *
! VECTOR_PADE Computes scaled Pade'-Hermite and *
! simultaneous Pade' systems for the power *
! series associated with A. *
! *
!***********************************************************************
use working_area_solve_Sylv
implicit none
interface
subroutine VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, num_of_kappas, flag)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: n
real, dimension (:,:), intent (in) :: A
real, intent (in) :: tau
real, dimension (:,:,:), intent (out) :: S, S_star
real, dimension (:), intent (out) :: gamma, &
gamma_star, kappa
integer, intent (out) :: num_of_kappas
integer, intent (inout) :: flag
end subroutine VECTOR_PADE
end interface
! solve_Sylv subroutine parameters.
integer, intent(in) :: k
integer, dimension (0:), intent(in) :: n
real, dimension (0:,0:), intent(inout) :: A
real, dimension (0:), intent(in) :: b
real, intent(in) :: tau
real, dimension (0:), intent(out) :: x
real, intent(out) :: cond_number
integer, intent(out) :: flag
! Local variables.
integer i, l, num_of_kappas, offset, alpha, beta, norm_n, maxn, ni
real temp, norm_c
allocate (gamma(0:k), gamma_star(0:k), gammap(0:k), a_norm(0:k), &
kappa(0:sum(n)), c(0:sum(n)), d(0:sum(n)), &
S(0:maxval(n)+1, 0:k, 0:k), S_star(0:sum(n), 0:k, 0:k))
norm_n = sum(n)
! Check the validity of input parameters.
if ( k .lt. 1 .or. &
k .gt. size(n) - 1 .or. &
0 .gt. minval(n) .or. &
norm_n .gt. size(b) .or. &
norm_n .gt. size(x) .or. &
norm_n .gt. size(A(:,0)) - 1 .or. &
k .gt. size(A(0,:)) - 1 .or. &
0.0 .eq. A(0,0)) then
flag = 3
return
else
flag = 0
endif
maxn = maxval(n)
! Compute the Pade'-Hermite system S and simultaneous Pade' system
! S_star of type n for the vector of power series associated with A.
! VECTOR_PADE requires values for the coefficients of z**norm_n in A,
! so temporarily assign some.
do beta=0, k
a_norm(beta) = A(norm_n,beta)
A(norm_n,beta) = 0
end do
call VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, num_of_kappas, flag)
do beta=0, k
A(norm_n,beta) = a_norm(beta)
end do
if (flag .eq. 0 .or. flag .eq. 1) then
! VECTOR_PADE has computed systems of type n.
do l = 0, norm_n-1
c(l) = A(l,0)/A(0,0)
end do
! A rough estimate of the condition number of the algorithm is
! given in the last entry in kappa multiplied by the norm of
! the inverse of c.
d(0) = 1
do l=1, norm_n-1
d(l) = 0
end do
call divide_series(c, d, x, norm_n-1, flag)
norm_c = abs(x(0))
do l=1, norm_n-1
norm_c = norm_c + abs(x(l))
end do
cond_number = kappa(num_of_kappas) * norm_c
do beta=0, k
gammap(beta) = gamma(beta) * gamma_star(beta)
end do
! Divide the power series associated with b by the inverse of
! the power series associated with the first column of A
! to get the power series d.
call divide_series(c, b, d, norm_n-1, flag)
! Apply the inverse formula.
do i=0, norm_n-1
x(i) = 0.0
end do
do beta=0, k
do i = 1, maxn
temp = 0.0
ni = max(n(0),i)
do l = 0, norm_n - ni
if (beta .eq. 0) then
temp = temp + S_star(l,beta,0)*d(norm_n-i-l)
else
temp = temp + S_star(l+1,beta,0)*d(norm_n-i-l)
endif
end do
c(i-1) = temp/gammap(beta)
end do
offset = 0
do alpha = 0, k
do i = 1, n(alpha)
temp = 0.0
do l = 0, n(alpha)-i
if (beta .eq. 0) then
temp = temp + S(i+l+1,alpha,beta) * c(l)
else
temp = temp + S(i+l,alpha,beta) * c(l)
endif
end do
x(offset+i-1) = x(offset+i-1) + temp
end do
offset = offset + n(alpha)
end do
end do
endif
deallocate (gamma, gamma_star, gammap, a_norm, kappa, c, d, S, &
S_star)
return
end subroutine solve_Sylv
subroutine divide_series(a, b, c, N, flag)
implicit none
integer N, flag
real a(0:N), b(0:N), c(0:N)
!***********************************************************************
! For the two power represented by a and b, with a(0) nonzero, *
! this subroutine divides b by a (modulo N) and stores the result *
! in c. *
!***********************************************************************
! Local variables.
integer i, j
real temp
! Check validity of input parameters.
if (N .lt. 0 .or. a(0) .eq. 0) then
flag = 1
return
else
flag = 0
endif
! Solve a triangular system of equations.
c(0) = b(0)/a(0)
do i=1, N
temp = b(i)
do j=0, i-1
temp = temp - c(j)*a(i-j)
end do
c(i) = temp/a(0)
end do
return
end subroutine divide_series
SHAR_EOF
fi # end of overwriting check
if test -f 'driver2.f90'
then
echo shar: will not over-write existing file "'driver2.f90'"
else
cat << \SHAR_EOF > 'driver2.f90'
!***********************************************************************
! This is a test driver for the subroutines VECTOR_PADE, *
! build_delta_T and build_delta_T_star. The driver requires input *
! data. *
!***********************************************************************
module working_area_main
! Assummed shape dummy arguments for build_delta_T.
real, dimension (:,:), allocatable :: delta_T
! Assummed shape dummy arguments for build_delta_T_star.
real, dimension (:,:,:), allocatable :: delta_T_star
! Assumed shape dummy arguments for VECTOR_PADE.
integer, dimension (:), allocatable :: n
real, dimension (:), allocatable :: gamma, gamma_star, kappa
real, dimension (:,:), allocatable :: A
real, dimension (:,:,:), allocatable :: S, S_star
end module working_area_main
module working_area_VECTOR_PADE
! Automatic arrays for VECTOR_PADE.
! Variables used to compute NPHS.
real, dimension(:,:,:), allocatable :: S_hat, New_S
real, dimension(:,:), allocatable :: T
! Variables used to compute NSPS.
real, dimension(:,:,:), allocatable :: S_star_hat, New_S_star
real, dimension(:,:,:), allocatable :: T_star
end module working_area_VECTOR_PADE
program main
use working_area_main
implicit none
interface
subroutine VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, num_steps, flag)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: n
real, dimension (:,:), intent (in) :: A
real, intent (in) :: tau
real, dimension (:,:,:), intent (out) :: S, S_star
real, dimension (:), intent (out) :: gamma, &
gamma_star, kappa
integer, intent (out) :: num_steps
integer, intent (inout) :: flag
end subroutine VECTOR_PADE
subroutine build_delta_T(k, n, A, S, delta_T, delta_T_norm)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: n
real, dimension (:,:), intent (in) :: A
real, dimension (:,:,:), intent (in) :: S
real, intent (out) :: delta_T_norm
real, dimension (:,:), intent (out) :: delta_T
end subroutine build_delta_T
subroutine build_delta_T_star(k, n, A, S_star, &
delta_T_star, delta_T_star_norm)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: n
real, dimension (:,:), intent (in) :: A
real, dimension (:,:,:), intent (in) :: S_star
real, intent (out) :: delta_T_star_norm
real, dimension (:,:,:), intent (out) :: delta_T_star
end subroutine build_delta_T_star
end interface
! Local variables.
integer k, flag, num_steps, S_deg, S_star_deg, alpha, beta, l
integer norm_n
real tau, delta_T_norm, delta_T_star_norm
! Input the tolerance tau and the dimension of the problem, k.
read *, tau
read *, k
allocate (n(0:k))
! Input n.
do beta=0, k
read *, n(beta)
end do
norm_n = sum(n)
allocate (A(0:norm_n, 0:k), &
S(0:maxval(n)+1,0:k,0:k), gamma(0:k), &
S_star(0:norm_n,0:k,0:k), gamma_star(0:k), &
kappa(0:norm_n), &
delta_T(0:norm_n, 0:k), delta_T_star(0:norm_n, 0:k, k))
! Input the power series A.
do beta=0, k
do l=0, norm_n
read *, A(l,beta)
end do
end do
! Compute the Pade'-Hermite system S of type n and the
! simultaneous Pade' system S_star of type n.
call VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, num_steps, flag)
! Compute the errors in the order conditions.
call build_delta_T(k, n, A, S, delta_T, delta_T_norm)
call build_delta_T_star(k, n, A, S_star, &
delta_T_star, delta_T_star_norm)
print 190, flag
if (flag .lt. 3) then
print 191, tau
print 192
do l=0, num_steps
print 193, l, kappa(l)
end do
print 194, delta_T_norm
print 195, delta_T_star_norm
do alpha=0, k
do beta=0, k
if (beta .eq. 0) then
S_deg = n(alpha) + 1
else
S_deg = n(alpha)
endif
print 196, alpha, beta
do l=0, S_deg
print 197, l, S(l,alpha,beta)
end do
end do
end do
do alpha=0, k
do beta=0, k
if (alpha.eq. 0) then
S_star_deg = norm_n - n(beta)
else
S_star_deg = norm_n - n(beta) + 1
endif
print 198, alpha, beta
do l=0, S_star_deg
print 199, l, S_star(l,alpha,beta)
end do
end do
end do
endif
deallocate (n, A, S, gamma, S_star, gamma_star, kappa, delta_T, &
delta_T_star)
stop
190 format(' flag = ',I1)
191 format(' Stability tolerance = ',D9.2)
192 format(' Step kappa')
193 format(I4, D11.2)
194 format(' Norm of error in NPHS order condition = ',D9.2)
195 format(' Norm of error in NSPS order condition = ',D9.2)
196 format(/' S(',I3,',',I3,')')
197 format(' z**',I3,': ',D15.8)
198 format(/' S_star(',I3,',',I3,')')
199 format(' z**',I3,': ',D15.8)
end program main
subroutine build_delta_T(k, n, A, S, delta_T, delta_T_norm)
!***********************************************************************
! *
! Computes the error in the residual given the vector of *
! power series A and the NPHS S of type n. That is, *
! *
! delta_T = A * S (mod z**(||n||+1)). *
! *
! *
! On entry: *
! A real (0:sum(n),0:k) *
! Vector of power series. *
! *
! k integer *
! There are k+1 power series in A. *
! *
! S real (0:maxval(n)+1, 0:k, 0:k) *
! Pade Hermite system for A of type n. *
! *
! n integer (0:k) *
! The type specification of S. *
! *
! on exit *
! delta_T real (0:sum(n), 0:k) *
! Error in the residual. *
! *
! delta_T_norm real *
! 1-norm of the row vector delta_T. *
! *
!***********************************************************************
implicit none
! build_delta_T subroutine parameters.
integer, intent (in) :: k
integer, dimension (0:), intent (in) :: n
real, dimension (0:,0:), intent (in) :: A
real, dimension (0:,0:,0:), intent (in) :: S
real, intent (out) :: delta_T_norm
real, dimension (0:,0:), intent (out) :: delta_T
! Local variables.
integer alpha, beta, l, i, start, finish, S_deg, norm_n
real colsum
norm_n = sum(n)
delta_T = 0
! Multiply the vector A by the matrix S.
do alpha=0, k
do beta=0, k
do l=0, norm_n
if (beta .eq. 0) then
S_deg = n(alpha) + 1
else
S_deg = n(alpha)
endif
start = max(0, l-S_deg)
finish = min(l, norm_n)
do i=start, finish
delta_T(l, beta) = delta_T(l, beta) &
+ A(i,alpha) * S(l-i,alpha,beta)
end do
end do
end do
end do
! Compute the 1-norm of delta_T.
delta_T_norm = 0.0
do beta=0, k
colsum = 0.0
do l=0, norm_n
colsum = colsum + abs(delta_T(l,beta))
end do
delta_T_norm = max(colsum, delta_T_norm)
end do
return
end subroutine build_delta_T
subroutine build_delta_T_star(k, n, A, S_star, &
delta_T_star, delta_T_star_norm)
!***********************************************************************
! *
! Computes the error in the residual given the vector of *
! power series A and the NSPS S_star of type n. That is, *
! *
! delta_T_star = S_star * A_star (mod z**(||n||+1)), *
! *
! where *
! *
! |-A(1) -A(2) ... -A(k)| *
! | A(0) | *
! A_star * | A(0) |. *
! | ... | *
! | A(0)| *
! *
! *
! *
! On entry: *
! A real (0:sum(n),0:k) *
! Vector of power series. *
! *
! k integer *
! There are k+1 power series in A. *
! *
! S_star real (0:sum(n), 0:k, 0:k) *
! Simultaneous Pade' system corresponding to A of type n.*
! *
! n integer (0:k) *
! The type specification of S_star. *
! *
! on exit *
! delta_T_star real (0:sum(n), 0:k, k) *
! Error in the residual. *
! *
! delta_T_star_norm real *
! 1-norm of delta_T_star. *
! *
!***********************************************************************
implicit none
! build_delta_T_star subroutine parameters.
integer, intent (in) :: k
integer, dimension (0:), intent (in) :: n
real, dimension (0:,0:), intent (in) :: A
real, dimension (0:,0:,0:), intent (in) :: S_star
real, intent (out) :: delta_T_star_norm
real, dimension (0:,0:,1:), intent (out) :: delta_T_star
! Local variables.
! Local variables.
integer alpha, beta, l, i, finish, S_star_deg, norm_n
real series_norm, colsum
norm_n = sum(n)
delta_T_star = 0
! Multiply the matrix S_star by the matrix A_star.
do alpha=0, k
do beta=1, k
do l=0, norm_n
if (alpha .eq. 0) then
S_star_deg = norm_n - n(beta)
else
S_star_deg = norm_n - n(beta) + 1
endif
finish = min(l, S_star_deg)
do i=0, finish
delta_T_star(l, alpha, beta) &
= delta_T_star(l, alpha, beta) &
+ S_star(i,alpha,beta) * A(l-i,0)
end do
if (alpha .eq. 0) then
S_star_deg = norm_n - n(0)
else
S_star_deg = norm_n - n(0) + 1
endif
finish = min(l, S_star_deg)
do i=0, finish
delta_T_star(l, alpha, beta) &
= delta_T_star(l, alpha, beta) &
- S_star(i,alpha,0) * A(l-i,beta)
end do
end do
end do
end do
! Compute the 1_norm of delta_T_star.
delta_T_star_norm = 0.0
do beta=1, k
colsum = 0
do alpha=0, k
series_norm = 0
do l=0, norm_n
series_norm = series_norm &
+ abs(delta_T_star(l,alpha,beta))
end do
colsum = colsum + series_norm
end do
delta_T_star_norm = max(delta_T_star_norm, colsum)
end do
return
end subroutine build_delta_T_star
SHAR_EOF
fi # end of overwriting check
if test -f 'RES1'
then
echo shar: will not over-write existing file "'RES1'"
else
cat << \SHAR_EOF > 'RES1'
n = (2,3,1)
Input vector of power series A
[ 1. 0. -1. ] * z**0
+ [ -1. 2. 1. ] * z**1
+ [ 2. 0. 5. ] * z**2
+ [ -2. 3. 3. ] * z**3
+ [ 3. 0. 2. ] * z**4
+ [ -3. 4. -2. ] * z**5
+ [ 4. 0. -6. ] * z**6
Pade'-Hermite System S
Normalized and multiplied by 37
Coefficient matrix of z**0
0. 0. 37.
0. 37. 0.
0. 0. 37.
Coefficient matrix of z**1
0. -73. -44.
0. -13. -131.
0. 1. -44.
Coefficient matrix of z**2
-4. -48. 3.
-22. -9. 137.
-4. 0. 0.
Coefficient matrix of z**3
44. 0. 0.
36. -7. 123.
0. 0. 0.
Coefficient matrix of z**4
0. 0. 0.
-9. 0. 0.
0. 0. 0.
Stability of points along the piecewise diagonal
through n. Rough estimates of condition numbers
of the Sylvester matrices associated with these
points.
0.10E+01 0.34E+39 0.18E+02 0.25E+03
Simultaneous Pade' System S_star
Normalized and multiplied by 37
Coefficient matrix of z**0
37. 0. -37.
0. 0. 0.
0. 0. 0.
Coefficient matrix of z**1
-57. 74. 57.
0. 0. 0.
0. 0. 0.
Coefficient matrix of z**2
10. -40. 249.
22. 0. -22.
4. 0. -4.
Coefficient matrix of z**3
0. -57. -103.
-48. 44. 48.
-2. 8. 2.
Coefficient matrix of z**4
5. 0. -428.
37. -52. 117.
0. 4. 28.
Coefficient matrix of z**5
0. 0. -159.
-24. 0. -136.
-1. 0. 19.
Coefficient matrix of z**6
0. 0. 0.
0. 0. -147.
0. 0. -20.
Striped Sylvester matrix M
1. 0. 0. 0. 0. -1.
-1. 1. 2. 0. 0. 1.
2. -1. 0. 2. 0. 5.
-2. 2. 3. 0. 2. 3.
3. -2. 0. 3. 0. 2.
-3. 3. 4. 0. 3. -2.
Inverse of M multiplied by 37**2
1332. -37. 111. 222. -74. -148.
407. 407. 2886. -2442. -1924. 1628.
481. 481. -1443. 1221. 962. -814.
-1036. 333. 1739. -1998. -703. 1332.
259. -1110. -777. 1184. 518. -333.
-37. -37. 111. 222. -74. -148.
Right-hand vector b
0. 3. 8. 8. 6. 5.
Solution x of Mx = b
1. 1. 1. 1. 1. 1.
SHAR_EOF
fi # end of overwriting check
if test -f 'data2'
then
echo shar: will not over-write existing file "'data2'"
else
cat << \SHAR_EOF > 'data2'
1.0E3
2
3
4
2
1
-1
2
-2
3
-3
4
-4
5
-5
0
2
0
3
0
4
0
5
0
6
-1
1
5
3
2
-2
-6
1
-8
5
SHAR_EOF
fi # end of overwriting check
if test -f 'RES2'
then
echo shar: will not over-write existing file "'RES2'"
else
cat << \SHAR_EOF > 'RES2'
flag = 1
Stability tolerance = 0.10D+04
Step kappa
0 0.10D+01
1 0.34D+39
2 0.18D+02
3 0.25D+03
4 0.26D+04
Norm of error in NPHS order condition = 0.12D-05
Norm of error in NSPS order condition = 0.52D-06
S( 0, 0)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: 0.19127774D-02
z** 3: -0.39173678D+00
z** 4: -0.25592968D+00
S( 0, 1)
z** 0: 0.00000000D+00
z** 1: -0.39999884D+00
z** 2: 0.10817607D-05
z** 3: 0.20000023D+00
S( 0, 2)
z** 0: 0.10725696D-01
z** 1: -0.60474654D-02
z** 2: 0.37403008D+00
z** 3: 0.62642664D-01
S( 1, 0)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: 0.19739860D+00
z** 3: -0.76128505D-01
z** 4: -0.40933479D-01
z** 5: -0.30986968D-01
S( 1, 1)
z** 0: 0.19999942D+00
z** 1: -0.19999997D+00
z** 2: 0.13628483D-06
z** 3: 0.19874869D-06
z** 4: 0.90856545D-07
S( 1, 2)
z** 0: 0.00000000D+00
z** 1: -0.22295757D+00
z** 2: 0.16989957D+00
z** 3: -0.40050197D-01
z** 4: 0.93678676D-01
S( 2, 0)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: 0.19127774D-02
z** 3: 0.30604484D-02
S( 2, 1)
z** 0: 0.00000000D+00
z** 1: 0.42589008D-08
z** 2: -0.17982025D-07
S( 2, 2)
z** 0: 0.10725696D-01
z** 1: -0.60474654D-02
z** 2: 0.31948842D-02
S_star( 0, 0)
z** 0: 0.24365066D-01
z** 1: -0.38102511D-01
z** 2: 0.20995198D-01
z** 3: -0.72575314D-02
z** 4: -0.10951400D-06
z** 5: 0.64040173D-08
z** 6: -0.71667252D-07
S_star( 0, 1)
z** 0: 0.00000000D+00
z** 1: 0.48730124D-01
z** 2: -0.27474910D-01
z** 3: -0.98495474D-02
z** 4: 0.13737912D-01
z** 5: -0.72575505D-02
S_star( 0, 2)
z** 0: -0.24365066D-01
z** 1: 0.38102511D-01
z** 2: 0.14956024D+00
z** 3: -0.64539552D-01
z** 4: -0.18221866D+00
z** 5: -0.39659090D-01
z** 6: -0.90981901D-01
z** 7: 0.21280237D+00
S_star( 1, 0)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: -0.26688738D-01
z** 3: 0.19964833D-01
z** 4: -0.12723709D-01
z** 5: 0.97238170D-02
z** 6: -0.10436967D-07
z** 7: 0.48619043D-02
S_star( 1, 1)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: 0.00000000D+00
z** 3: -0.53377464D-01
z** 4: -0.13447784D-01
z** 5: -0.12206477D-01
z** 6: -0.12723702D-01
S_star( 1, 2)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: 0.26688738D-01
z** 3: -0.19964833D-01
z** 4: -0.17409743D+00
z** 5: -0.83479844D-01
z** 6: 0.97341470D-01
z** 7: 0.15496019D+00
z** 8: 0.27774906D+00
S_star( 2, 0)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: -0.12755025D-01
z** 3: -0.76531838D-02
z** 4: 0.20408295D-01
z** 5: -0.33945902D-07
z** 6: 0.40289967D-08
z** 7: -0.37072496D-07
S_star( 2, 1)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: 0.00000000D+00
z** 3: -0.25510047D-01
z** 4: -0.40816400D-01
z** 5: 0.12755211D-01
z** 6: 0.20408291D-01
S_star( 2, 2)
z** 0: 0.00000000D+00
z** 1: 0.00000000D+00
z** 2: 0.12755025D-01
z** 3: 0.76531838D-02
z** 4: -0.10969346D+00
z** 5: -0.15561241D+00
z** 6: 0.94387643D-01
z** 7: 0.27295950D+00
z** 8: 0.20663220D+00
SHAR_EOF
fi # end of overwriting check
cd ..
cd ..
if test ! -d 'Src'
then
mkdir 'Src'
fi
cd 'Src'
if test ! -d 'Sp'
then
mkdir 'Sp'
fi
cd 'Sp'
if test -f 'blas1.f'
then
echo shar: will not over-write existing file "'blas1.f'"
else
cat << \SHAR_EOF > 'blas1.f'
subroutine saxpy(n,sa,sx,incx,sy,incy)
!
! constant times a vector plus a vector.
! uses unrolled loop for increments equal to one.
! jack dongarra, linpack, 3/11/78.
!
real sx(1),sy(1),sa
integer i,incx,incy,ix,iy,m,mp1,n
!
if(n.le.0)return
if (sa .eq. 0.0) return
if(incx.eq.1.and.incy.eq.1)go to 20
!
! code for unequal increments or equal increments
! not equal to 1
!
ix = 1
iy = 1
if(incx.lt.0)ix = (-n+1)*incx + 1
if(incy.lt.0)iy = (-n+1)*incy + 1
do 10 i = 1,n
sy(iy) = sy(iy) + sa*sx(ix)
ix = ix + incx
iy = iy + incy
10 continue
return
!
! code for both increments equal to 1
!
!
! clean-up loop
!
20 m = mod(n,4)
if( m .eq. 0 ) go to 40
do 30 i = 1,m
sy(i) = sy(i) + sa*sx(i)
30 continue
if( n .lt. 4 ) return
40 mp1 = m + 1
do 50 i = mp1,n,4
sy(i) = sy(i) + sa*sx(i)
sy(i + 1) = sy(i + 1) + sa*sx(i + 1)
sy(i + 2) = sy(i + 2) + sa*sx(i + 2)
sy(i + 3) = sy(i + 3) + sa*sx(i + 3)
50 continue
return
end
integer function isamax(n,sx,incx)
!
! finds the index of element having max. absolute value.
! jack dongarra, linpack, 3/11/78.
! modified to correct problem with negative increments, 9/29/88.
!
real sx(1),smax
integer i,incx,ix,n
!
isamax = 0
if( n .lt. 1 ) return
isamax = 1
if(n.eq.1)return
if(incx.eq.1)go to 20
!
! code for increment not equal to 1
!
ix = 1
if(incx.lt.0)ix = (-n+1)*incx + 1
smax = abs(sx(ix))
ix = ix + incx
do 10 i = 2,n
if(abs(sx(ix)).le.smax) go to 5
isamax = i
smax = abs(sx(ix))
5 ix = ix + incx
10 continue
return
!
! code for increment equal to 1
!
20 smax = abs(sx(1))
do 30 i = 2,n
if(abs(sx(i)).le.smax) go to 30
isamax = i
smax = abs(sx(i))
30 continue
return
end
subroutine sscal(n,sa,sx,incx)
!
! scales a vector by a constant.
! uses unrolled loops for increment equal to 1.
! jack dongarra, linpack, 3/11/78.
!
real sa,sx(1)
integer i,incx,m,mp1,n,nincx
!
if(n.le.0)return
if(incx.eq.1)go to 20
!
! code for increment not equal to 1
!
nincx = n*incx
do 10 i = 1,nincx,incx
sx(i) = sa*sx(i)
10 continue
return
!
! code for increment equal to 1
!
!
! clean-up loop
!
20 m = mod(n,5)
if( m .eq. 0 ) go to 40
do 30 i = 1,m
sx(i) = sa*sx(i)
30 continue
if( n .lt. 5 ) return
40 mp1 = m + 1
do 50 i = mp1,n,5
sx(i) = sa*sx(i)
sx(i + 1) = sa*sx(i + 1)
sx(i + 2) = sa*sx(i + 2)
sx(i + 3) = sa*sx(i + 3)
sx(i + 4) = sa*sx(i + 4)
50 continue
return
end
subroutine sswap (n,sx,incx,sy,incy)
!
! interchanges two vectors.
! uses unrolled loops for increments equal to 1.
! jack dongarra, linpack, 3/11/78.
!
real sx(1),sy(1),stemp
integer i,incx,incy,ix,iy,m,mp1,n
!
if(n.le.0)return
if(incx.eq.1.and.incy.eq.1)go to 20
!
! code for unequal increments or equal increments not equal
! to 1
!
ix = 1
iy = 1
if(incx.lt.0)ix = (-n+1)*incx + 1
if(incy.lt.0)iy = (-n+1)*incy + 1
do 10 i = 1,n
stemp = sx(ix)
sx(ix) = sy(iy)
sy(iy) = stemp
ix = ix + incx
iy = iy + incy
10 continue
return
!
! code for both increments equal to 1
!
!
! clean-up loop
!
20 m = mod(n,3)
if( m .eq. 0 ) go to 40
do 30 i = 1,m
stemp = sx(i)
sx(i) = sy(i)
sy(i) = stemp
30 continue
if( n .lt. 3 ) return
40 mp1 = m + 1
do 50 i = mp1,n,3
stemp = sx(i)
sx(i) = sy(i)
sy(i) = stemp
stemp = sx(i + 1)
sx(i + 1) = sy(i + 1)
sy(i + 1) = stemp
stemp = sx(i + 2)
sx(i + 2) = sy(i + 2)
sy(i + 2) = stemp
50 continue
return
end
real function sdot(n,sx,incx,sy,incy)
!
! forms the dot product of two vectors.
! uses unrolled loops for increments equal to one.
! jack dongarra, linpack, 3/11/78.
!
real sx(1),sy(1),stemp
integer i,incx,incy,ix,iy,m,mp1,n
!
stemp = 0.0e0
sdot = 0.0e0
if(n.le.0)return
if(incx.eq.1.and.incy.eq.1)go to 20
!
! code for unequal increments or equal increments
! not equal to 1
!
ix = 1
iy = 1
if(incx.lt.0)ix = (-n+1)*incx + 1
if(incy.lt.0)iy = (-n+1)*incy + 1
do 10 i = 1,n
stemp = stemp + sx(ix)*sy(iy)
ix = ix + incx
iy = iy + incy
10 continue
sdot = stemp
return
!
! code for both increments equal to 1
!
!
! clean-up loop
!
20 m = mod(n,5)
if( m .eq. 0 ) go to 40
do 30 i = 1,m
stemp = stemp + sx(i)*sy(i)
30 continue
if( n .lt. 5 ) go to 60
40 mp1 = m + 1
do 50 i = mp1,n,5
stemp = stemp + sx(i)*sy(i) + sx(i + 1)*sy(i + 1) + &
sx(i + 2)*sy(i + 2) + sx(i + 3)*sy(i + 3) + sx(i + 4)*sy(i + 4)
50 continue
60 sdot = stemp
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'linpack.f'
then
echo shar: will not over-write existing file "'linpack.f'"
else
cat << \SHAR_EOF > 'linpack.f'
subroutine sgefa(a,lda,n,ipvt,info)
integer lda,n,ipvt(1),info
real a(lda,1)
!
! sgefa factors a real matrix by gaussian elimination.
!
! sgefa is usually called by sgeco, but it can be called
! directly with a saving in time if rcond is not needed.
! (time for sgeco) = (1 + 9/n)*(time for sgefa) .
!
! on entry
!
! a real(lda, n)
! the matrix to be factored.
!
! lda integer
! the leading dimension of the array a .
!
! n integer
! the order of the matrix a .
!
! on return
!
! a an upper triangular matrix and the multipliers
! which were used to obtain it.
! the factorization can be written a = l*u where
! l is a product of permutation and unit lower
! triangular matrices and u is upper triangular.
!
! ipvt integer(n)
! an integer vector of pivot indices.
!
! info integer
! = 0 normal value.
! = k if u(k,k) .eq. 0.0 . this is not an error
! condition for this subroutine, but it does
! indicate that sgesl or sgedi will divide by zero
! if called. use rcond in sgeco for a reliable
! indication of singularity.
!
! linpack. this version dated 08/14/78 .
! cleve moler, university of new mexico, argonne national lab.
!
! subroutines and functions
!
! blas saxpy,sscal,isamax
!
! internal variables
!
real t
integer isamax,j,k,kp1,l,nm1
!
!
! gaussian elimination with partial pivoting
!
info = 0
nm1 = n - 1
if (nm1 .lt. 1) go to 70
do 60 k = 1, nm1
kp1 = k + 1
!
! find l = pivot index
!
l = isamax(n-k+1,a(k,k),1) + k - 1
ipvt(k) = l
!
! zero pivot implies this column already triangularized
!
if (a(l,k) .eq. 0.0e0) go to 40
!
! interchange if necessary
!
if (l .eq. k) go to 10
t = a(l,k)
a(l,k) = a(k,k)
a(k,k) = t
10 continue
!
! compute multipliers
!
t = -1.0e0/a(k,k)
call sscal(n-k,t,a(k+1,k),1)
!
! row elimination with column indexing
!
do 30 j = kp1, n
t = a(l,j)
if (l .eq. k) go to 20
a(l,j) = a(k,j)
a(k,j) = t
20 continue
call saxpy(n-k,t,a(k+1,k),1,a(k+1,j),1)
30 continue
go to 50
40 continue
info = k
50 continue
60 continue
70 continue
ipvt(n) = n
if (a(n,n) .eq. 0.0e0) info = n
return
end
subroutine sgesl(a,lda,n,ipvt,b,job)
integer lda,n,ipvt(1),job
real a(lda,1),b(1)
!
! sgesl solves the real system
! a * x = b or trans(a) * x = b
! using the factors computed by sgeco or sgefa.
!
! on entry
!
! a real(lda, n)
! the output from sgeco or sgefa.
!
! lda integer
! the leading dimension of the array a .
!
! n integer
! the order of the matrix a .
!
! ipvt integer(n)
! the pivot vector from sgeco or sgefa.
!
! b real(n)
! the right hand side vector.
!
! job integer
! = 0 to solve a*x = b ,
! = nonzero to solve trans(a)*x = b where
! trans(a) is the transpose.
!
! on return
!
! b the solution vector x .
!
! error condition
!
! a division by zero will occur if the input factor contains a
! zero on the diagonal. technically this indicates singularity
! but it is often caused by improper arguments or improper
! setting of lda . it will not occur if the subroutines are
! called correctly and if sgeco has set rcond .gt. 0.0
! or sgefa has set info .eq. 0 .
!
! to compute inverse(a) * c where c is a matrix
! with p columns
! call sgeco(a,lda,n,ipvt,rcond,z)
! if (rcond is too small) go to ...
! do 10 j = 1, p
! call sgesl(a,lda,n,ipvt,c(1,j),0)
! 10 continue
!
! linpack. this version dated 08/14/78 .
! cleve moler, university of new mexico, argonne national lab.
! (But, note minor modification below by stan cabay dated 10/06/96.)
!
! subroutines and functions
!
! blas saxpy,sdot
!
! internal variables
!
real sdot,t
integer k,kb,l,nm1
!
nm1 = n - 1
if (job .ne. 0) go to 50
!
! job = 0 , solve a * x = b
! first solve l*y = b
!
if (nm1 .lt. 1) go to 30
do 20 k = 1, nm1
l = ipvt(k)
t = b(l)
if (l .eq. k) go to 10
b(l) = b(k)
b(k) = t
10 continue
call saxpy(n-k,t,a(k+1,k),1,b(k+1),1)
20 continue
30 continue
!
! now solve u*x = y
!
do 40 kb = 1, n
k = n + 1 - kb
! ***************************************************************
! * When a is singular, this modification provides one *
! * particular solution to a*x = 0. *
! * *
! * This modification to cleve moler's code is made by *
! * stan cabay, dated 10/06/96. *
! * *
if (a(k,k) .ne. 0.0e0) goto 35
do 33 l=1,k-1
b(l) = -a(l,k)
33 continue
b(l) = 1.0
do 34 l=k+1, n
b(l) = 0.0e0
34 continue
goto 40
! * *
! ***************************************************************
35 b(k) = b(k)/a(k,k)
t = -b(k)
call saxpy(k-1,t,a(1,k),1,b(1),1)
40 continue
go to 100
50 continue
!
! job = nonzero, solve trans(a) * x = b
! first solve trans(u)*y = b
!
do 60 k = 1, n
t = sdot(k-1,a(1,k),1,b(1),1)
b(k) = (b(k) - t)/a(k,k)
60 continue
!
! now solve trans(l)*x = y
!
if (nm1 .lt. 1) go to 90
do 80 kb = 1, nm1
k = n - kb
b(k) = b(k) + sdot(n-k,a(k+1,k),1,b(k+1),1)
l = ipvt(k)
if (l .eq. k) go to 70
t = b(l)
b(l) = b(k)
b(k) = t
70 continue
80 continue
90 continue
100 continue
return
end
SHAR_EOF
fi # end of overwriting check
if test -f 'vector_pade.f90'
then
echo shar: will not over-write existing file "'vector_pade.f90'"
else
cat << \SHAR_EOF > 'vector_pade.f90'
subroutine VECTOR_PADE(k, n, A, tau, &
S, gamma, S_star, gamma_star, kappa, last_i, flag)
!***********************************************************************
! *
! For the vector of integers *
! *
! n = [n(0),...,n(k)], *
! *
! define *
! *
! ||n|| = n(0)+...+n(k). *
! *
! Let A be the vector of k+1 power series *
! *
! A = [A(0),...,A(k)], *
! *
! where, for j=0,...,k, the first ||n||+1 terms of A(j) are given *
! on entry by *
! *
! A(j) = A(0,j) + A(1,j) * z + ... + A(||n||,j) * z**||n||. *
! *
! This subroutine VECTOR_PADE computes the scaled Pade'-Hermite *
! system S of type n satisfying *
! *
! A * S = T * z**(||n||+1) + delta_T *
! *
! and the scaled simultaneous Pade' system S_star of type n *
! satisfying *
! *
! |-A(1) -A(2) ... -A(k)| *
! | A(0) | *
! S_star * | A(0) | = T_star * z**(||n||+1) *
! | ... | + delta_T_star. *
! | A(0)| *
! *
! The system S is a (k+1) x (k+1) matrix of polynomials *
! *
! |S(0,0) ... S(0,k)| *
! S = | ... |, *
! |S(k,0) ... S(k,k)| *
! *
! where, for i=0,...,k and j=1,...,k, *
! *
! S(i,j) = S(0,i,j) + S(1,i,j) * z + ... + S(n(i),i,j) * z**n(i) *
! *
! and, for i=0,...,k, *
! *
! S(i,0) = S(0,i,0) + S(1,i,0) * z + ... *
! ... + S(n(i)+1,i,0) * z**(n(i)+1) *
! with S(0,i,0) = S(1,i,0) = 0. *
! *
! The system S_star is a (k+1) x (k+1) matrix of polynomials *
! *
! |S_star(0,0) ... S_star(0,k)| *
! S_star = | ... |, *
! |S_star(k,0) ... S_star(k,k)| *
! *
! where, for j=0,...,k, *
! *
! S_star(0,j) = S_star(0,0,j) *
! + S_star(1,0,j) * z *
! + *
! . *
! . *
! . *
! + *
! + S_star(||n||-n(j),0,j) * z**(||n||-n(j)), *
! *
! and, for i=1,...,k and j=0,...,k, *
! *
! S_star(i,j) = S_star(0,i,j) *
! + S_star(1,i,j) * z *
! + *
! . *
! . *
! . *
! + *
! + S_star(||n||-n(j)+1,i,j) * z**(||n||-n(j)+1), *
! *
! with S_star(0,i,j) = S_star(1,i,j) = 0. *
! *
! On exit, the residual errors very roughly satisfy *
! *
! ||delta_T||, ||delta_T_star|| = tau * unit_error * ||A||, *
! *
! whereas, the relative errors in S and S_star are very roughly *
! of size *
! *
! kappa(last_i) * tau * unit_error * ||A|| * ||A(0)^{-1}||, *
! *
! where kappa(last_i) is a crude estimate of the condition *
! number of the associated striped Sylvester matrix at the point n.*
! *
! The above is to serve as a rough guide in selecting the tolerance*
! "tau". But, there is a trade-off between the accuracy of the *
! results and the speed of the algorithm; the smaller the value of *
! tau, the costlier the computations can become. For most efficient*
! computation, tau should be chosen to be as large as the lack of *
! accuracy will permit. For some problems, the best strategy for *
! selecting tau may be first to run a trial in order to examine the*
! estimates kappa(i), i = 1, ..., last_i, of the condition numbers *
! of the striped Sylvester matrices associated with intermediate *
! points along the diagonal through n. The algorithm accepts the *
! i'th point as "stable" if *
! *
! kappa(i) < tau. *
! *
! Note that the first column of S always yields a Pade'-Hermite *
! approximant of type (n(0)-1, ..., n(k)-1) for A, with a "small" *
! residual error delta_T. This is so even when the Sylvester matrix*
! at the point n is ill-conditioned or singular (when singular, *
! however, the remaining columns of S may be meaningless). *
! Similarly, the first row of S_star always gives a simultaneous *
! Pade' approximant of type n with a "small" residual error *
! delta_T_star. *
! *
!***********************************************************************
! *
! on entry *
! A real (0:sum(n), 0:k) *
! Vector of power series with A(0,0) nonzero. *
! *
! k integer *
! There are k+1 power series in A. *
! *
! n integer (0:k) *
! Vector of degrees defining the type of S & S_star.*
! *
! tau real *
! Stability parameter. An intermediate solution is *
! accepted if at that point kappa(i) < tau. *
! *
! on exit *
! S real (0:maxval(n)+1, 0:k, 0:k), *
! Scaled Pade'-Hermite system of type n. *
! *
! gamma real (0:k) *
! Scaling factors. To obtain the normalised NPHS *
! divide the j'th column of S by gamma(j). *
! *
! S_star real (0:sum(n), 0:k, 0:k), *
! Scaled simultaneous Pade' system of type n. *
! *
! gamma_star real (0:k) *
! Scaling factors. To obtain the normalised NSPS *
! divide the i'th row of S_star by gamma_star(i). *
! *
! kappa real (0:sum(n)), *
! kappa(i) is the estimate of condition number of *
! the associated Sylvester submatrix at the i'th *
! step. *
! *
! last_i integer *
! The last step, corresponding to the point n. *
! *
! flag integer *
! Error parameter. *
! flag = 0, no errors *
! flag = 1, the Sylvester matrix at the point n *
! is ill-conditioned; i.e., *
! kappa(last_i) >= tau. *
! flag = 2, the Sylvester matrix at the point n *
! is numerically singular. The first *
! column of S still yields a Pade'-Hermite*
! approximant of type (n(0)-1,...,n(k)-1) *
! and the first row of S_star still yields*
! a simultaneous Pade' approximant of *
! type n. The remaining rows and columns *
! are meaningless. *
! flag = 3, input variables are incorrect. *
! *
! Note that the storage allocated to the subroutine array parameters*
! can be larger than limits designated above, but never smaller. *
! *
! functions and subroutines *
! build_T Finds the residual for the NPHS. *
! build_T_star Finds the residual for the NSPS. *
! build_S Builds the NPHS by solving Sylvester systems.*
! build_S_star Builds the NSPS by solving Sylvester systems.*
! mult_S Multiples two NPHS's. *
! mult_S_star Multiples two NSPS's. *
! scale_S Scales the NPHS and determines gamma. *
! scale_S_star Scales the NSPS and determines gamma_star. *
! gen_next_vector Generates the next point along a diagonal. *
! *
! Note also that build_S and build_S_star call *
! sgefa Linpack routine to triangulate a matrix. *
! sgesi Linpack routine to solve linear equations. *
! *
! *
!***********************************************************************
! The algorithm VECTOR_PADE is shown to be weakly stable in *
! S. Cabay and A. Jones and G. Labahn, "Computation of Numerical *
! Pade'-Hermite and Simultaneous Pade' Systems II: A Weakly Stable *
! Algorithm", SIAM journal on matrix analysis and applications, *
! 17 (1996), 268-297. *
!***********************************************************************
use working_area_VECTOR_PADE
implicit none
interface
subroutine build_T(A, S, k, m, prevnorm_nus, norm_nus, T)
integer, intent (in) :: k, norm_nus, &
prevnorm_nus
integer, dimension (:), intent (in) :: m
real, dimension (:,:), intent (in) :: A
real, dimension (:,:,:), intent (in) :: S
real, dimension (:,:), intent (inout) :: T
end subroutine build_T
subroutine build_T_star(A, S_star, k, m, prevnorm_nus, &
norm_nus, T_star)
integer, intent (in) :: k, norm_nus, &
prevnorm_nus
integer, dimension (:), intent (in) :: m
real, dimension (:,:), intent (in) :: A
real, dimension (:,:,:), intent (in) :: S_star
real, dimension (:,:,:), intent (inout) :: T_star
end subroutine build_T_star
subroutine build_S(A, k, n, norm_n, S, singular)
integer, intent (in) :: k, norm_n
integer, dimension (:), intent (in) :: n
real, dimension (:,:), intent (in) :: A
real, dimension (:,:,:), intent (out) :: S
logical, intent (out) :: singular
end subroutine build_S
subroutine build_S_star(A_star, k, n, knorm_n, S_star,singular)
integer, intent (in) :: k, knorm_n
integer, dimension (:), intent (in) :: n
real, dimension (:,:,:), intent (in) :: A_star
real, dimension (:,:,:), intent (out) :: S_star
logical, intent (out) :: singular
end subroutine build_S_star
subroutine mult_S(S, m, S_hat, nus, New_S, new_m, k)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: m, nus, new_m
real, dimension (:,:,:), intent (in) :: S, S_hat
real, dimension (:,:,:), intent (out) :: New_S
end subroutine mult_S
subroutine mult_S_star(S_star, m, S_star_hat, nus, &
New_S_star, new_m, k)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: m, nus, new_m
real, dimension (:,:,:), intent (in) :: S_star, S_star_hat
real, dimension (:,:,:), intent (out) :: New_S_star
end subroutine mult_S_star
subroutine scale_S(S, m, k, gamma)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: m
real, dimension (:,:,:), intent (inout) :: S
real, dimension (:), intent (out) :: gamma
end subroutine scale_S
subroutine scale_S_star(S_star, m, k, gamma_star)
integer, intent (in) :: k
integer, dimension (:), intent (in) :: m
real, dimension (:,:,:), intent (inout) :: S_star
real, dimension (:), intent (out) :: gamma_star
end subroutine scale_S_star
end interface
! VECTOR_PADE subroutine parameters.
integer, intent (in) :: k
integer, dimension (0:), intent (in) :: n
real, dimension (0:,0:), intent (in) :: A
real, intent (in) :: tau
real, dimension (0:,0:,0:), intent (out) :: S, S_star
real, dimension (0:), intent (out) :: gamma, &
gamma_star, kappa
integer, intent (out) :: last_i
integer, intent (inout) :: flag
! Local Variables.
logical singular
integer alpha, beta, i, l, step, norm_n, knorm_nus, &
m(0:k), norm_m, new_m(0:k), norm_new_m, nu(0:k), &
nus(0:k), norm_nus, prevnorm_nus, S_deg, S_star_deg
! Variables used to compute NPHS.
allocate (S_hat(0:maxval(n)+1, 0:k, 0:k), &
New_S(0:maxval(n)+1, 0:k, 0:k), &
T(0:sum(n), 0:k))
! Variables used to compute NSPS.
allocate (S_star_hat(0:sum(n), 0:k, 0:k), &
New_S_star(0:sum(n), 0:k, 0:k), &
T_star(0:sum(n), 0:k, 1:k))
norm_n = sum(n)
! Check the validity of input parameters.
flag = 0
if ( k .lt. 1 .or. &
k .gt. size(n) - 1 .or. &
norm_n .gt. size(A(:,0)) - 1 .or. &
k .gt. size(A(0,:)) - 1 .or. &
0.0 .eq. A(0,0) .or. &
maxval(n) .gt. size(S(:,0,0)) - 2 .or. &
k .gt. size(S(0,:,0)) - 1 .or. &
k .gt. size(S(0,0,:)) - 1 .or. &
norm_n .gt. size(S_star(:,0,0)) - 1 .or. &
k .gt. size(S_star(0,:,0)) - 1 .or. &
k .gt. size(S_star(0,0,:)) - 1) flag = 3
if (flag .ne. 0) then
return
else
! The initial stable point is m = (-1,0,...,0).
step = 1
m(0) = -1
do beta=1, k
m(beta) = 0
end do
kappa(0) = 1.0
! At the initial point m, set S and S_star to be identity matrices.
do alpha=0, k
do beta=0, k
if (alpha .eq. beta) then
S(0,alpha,beta) = 1.0
S_star(0,alpha,beta) = 1.0
else
S(0,alpha,beta) = 0.0
S_star(0,alpha,beta) = 0.0
endif
end do
S(1,alpha,0) = 0.0
S_star(1,alpha,0) = 0.0
end do
! The index i references the i'th point along the path from the
! initial point (-1,0,...,0) with index i=0 to the final point
! n with index i=last_i.
i = 0
last_i = 0
do beta=1,k
last_i = max(last_i, n(beta))
end do
last_i = min(n(0), last_i) + 1
do while (i.lt.last_i .and. flag.eq.0)
! Main loop
! ***************************************************************
! * The iteration moves from one stable point m with index i to *
! * the next stable point until the last point n with index *
! * last_i has been reached. *
! ***************************************************************
norm_m = sum(m)
! nu is the difference between the last point n and the
! current stable point m.
nu(0) = n(0) - m(0) - 1
do beta=1, k
nu(beta) = n(beta) - m(beta)
end do
step = 0
flag = 1
prevnorm_nus = -1
do while (i+step.lt.last_i .and. flag.gt.0)
! Inner loop
! ************************************************************
! * Given a stable point m with index i, examine successive *
! * points along the diagonal with indices i+step, step=1,...*
! * until a stable one is found or until the last point n has*
! * been examined. *
! ************************************************************
step = step + 1
flag = 1
! *********************************************************
! * Compute nus. The objective is to obtain the NPHS, *
! * S_hat, of type nus for the residual T corresponding to*
! * S and the NSPS, S_star_hat, of type nus for the *
! * residual T_star corresponding to S_star. Then the *
! * multiplications S*S_hat and S_star_hat*S_star yield *
! * the NPHS and NSPS of types m + nus + (1,0,...,0) for A*
! * and A_star with index i+step. Exit from the inner loop*
! * takes place only if it is determined that this point *
! * m + nus + (1,0,...,0) is stable or if it is the last *
! * point n. *
! *********************************************************
call gen_next_vector(nu, nus, last_i, i+step, k)
norm_nus = sum(nus)
knorm_nus = k*norm_nus
! Compute the residuals.
call build_T(A, S, k, m, prevnorm_nus, norm_nus, T)
call build_T_star(A, S_star, k, m, prevnorm_nus, &
norm_nus, T_star)
prevnorm_nus = norm_nus
! Determine S_hat of type nus for the residual T by solving
! striped Sylvester systems of equations.
call build_S(T, k, nus, norm_nus, S_hat, singular)
if (singular) flag = 2
! Determine S_star_hat of type nus for the residual
! T_star by solving mosaic Sylvester systems of equations
call build_S_star(T_star, k, nus, knorm_nus, &
S_star_hat, singular)
if (singular) flag = 2
! The coordinates of the point with index i+step are
! stored temporarily in new_m.
call gen_next_vector(n, new_m, last_i, i+step, k)
norm_new_m = sum(new_m)
! Obtain the NPHS and store in New_S
call mult_S(S, m, S_hat, nus, New_S, new_m, k)
! Obtain the NSPS and store in New_S_star
call mult_S_star(S_star, m, S_star_hat, nus, &
New_S_star, new_m, k)
! Scale the NPHS and NSPS.
call scale_S(New_S, new_m, k, gamma)
call scale_S_star(New_S_star, new_m, k, gamma_star)
! Compute the stability parameter.
if (flag .eq. 2) then
kappa(i+step) = huge(0.0)
else
kappa(i+step) = 0
do beta=0, k
kappa(i+step) = kappa(i+step) &
+ 1.0/(gamma(beta)*gamma_star(beta))
end do
if (kappa(i+step) .lt. tau) flag = 0
endif
! * End of inner loop. *
! ************************************************************
enddo
! ************************************************************
! * The point new_m with index i+step is either a stable *
! * point or the last point n (or, both). So copy new_m to *
! * m, New_S to S and New_S_star to S_star. *
! ************************************************************
i = i+step
m = new_m
norm_m = norm_new_m
do alpha=0, k
do beta=0, k
if (beta .eq. 0) then
S_deg = m(alpha) + 1
else
S_deg = m(alpha)
endif
do l=0, S_deg
S(l,alpha,beta) = New_S(l,alpha,beta)
end do
end do
end do
do alpha=0, k
do beta=0, k
if (alpha .eq. 0) then
S_star_deg = norm_m - m(beta)
else
S_star_deg = norm_m - m(beta) + 1
endif
do l=0, S_star_deg
S_star(l,alpha,beta) = New_S_star(l,alpha,beta)
end do
end do
end do
! * End of main loop. *
! ***************************************************************
enddo
endif
deallocate (S_hat, New_S, T, S_star_hat, New_S_star, T_star)
return
end subroutine VECTOR_PADE
subroutine build_T(A, S, k, m, prevnorm_nus, norm_nus, T)
!***********************************************************************
! *
! Given the vector of power series A and the Pade'-Hermite system *
! S of type m, build_T returns the first norm_nus+1 terms of the *
! residual power series T. It is assumed that prevnorm_nus+1 terms *
! of T are already available and need not be computed. *
! *
!***********************************************************************
! *
! on entry *
! A real (0:sum(n), 0:k) *
! Vector of power series with A(0,0) nonzero. *
! *
! k integer *
! There are k+1 power series in A. *
! *
! S real (0:maxval(n)+1, 0:k, 0:k) *
! Normalized NPHS of type m. *
! *
! m integer(0:k) *
! The type specification of S. *
! *
! T real (0:sum(n), 0:k) *
! The residual power series. *
! The first prevnorm_nus+1 terms are available on *
! entry. *
! *
! prevnorm_nus integer *
! The first prevnorm_nus+1 terms of T are available *
! on entry. *
! *
! norm_nus integer *
! This routine is required to compute the first *
! norm_nus +1 terms of T (norm_nus > prevnorm_nus). *
! *
! on exit *
! T real (0:sum(n), 0:k) *
! The first norm_nus+1 terms of the residual power *
! series. *
! *
!***********************************************************************
implicit none
! build_T subroutine parameters.
integer, intent (in) :: k, prevnorm_nus, &
norm_nus
integer, dimension (0:), intent (in) :: m
real, dimension (0:,0:), intent (in) :: A
real, dimension (0:,0:,0:), intent (in) :: S
real, dimension (0:,0:), intent (inout) :: T
! Local variables.
integer alpha, beta, i, l, ll, start, finish, norm_m
norm_m = sum(m)
! The first prevnormus+1 terms of T are already available. For the
! remaining terms of T, first initialize new residual terms to 0.
do beta=0, k
do i=prevnorm_nus+1, norm_nus
T(i,beta) = 0
end do
end do
do alpha=0, k
do beta=0, k
do l=prevnorm_nus+norm_m+2, norm_nus+norm_m+1
if (beta .eq. 0) then
start = max(0, l-m(alpha)-1)
else
start = max(0, l-m(alpha))
endif
finish = min(l, norm_nus+norm_m+1)
do ll=start, finish
T(l-norm_m-1, beta) = T(l-norm_m-1, beta) &
+ A(ll,alpha) &
* S(l-ll,alpha,beta)
end do
end do
end do
end do
return
end subroutine build_T
subroutine build_T_star(A, S_star, k, m, prevnorm_nus, norm_nus, &
T_star)
!***********************************************************************
! *
! Given the vector of power series A and the simultaneous Pade' *
! system S_star of type m, build_T_star returns the first *
! norm_nus+1 terms of the residual power series T_star. It is *
! assumed that prevnorm_nus+1 terms of T_star are already *
! available and need not be computed. *
!***********************************************************************
! *
! on entry *
! A real (0:sum(n), 0:k) *
! Vector of power series with A(0,0) nonzero. *
! *
! k integer *
! There are k+1 power series in A. *
! *
! S_star real (0:maxval(n)+1, 0:k, 0:k) *
! Normalized NSPS of type m. *
! *
! m integer (0:k) *
! The type specification of S_star. *
! *
! T_star real (0:sum(n), 0:k, k) *
! The residual power series. *
! The first prevnorm_nus+1 terms are available on *
! entry. *
! *
! prevnorm_nus integer *
! The first prevnorm_nus+1 terms of T_star are *
! available on entry. *
! *
! norm_nus integer *
! This routine is required to compute the first *
! norm_nus+1 terms of T_star *
! (norm_nus > prevnorm_nus). *
! *
! on exit *
! T_star real (0:sum(n), 0:k, k) *
! The first norm_nus+1 terms of the residual power *
! series. *
! *
!***********************************************************************
implicit none
! build_T_star subroutine parameters.
integer, intent (in) :: k, prevnorm_nus, &
norm_nus
integer, dimension (0:), intent (in) :: m
real, dimension (0:,0:), intent (in) :: A
real, dimension (0:,0:,0:), intent (in) :: S_star
real, dimension (0:,0:,1:), intent (inout) :: T_star
! local variables.
integer alpha, beta, i, l, finish, norm_m
norm_m = sum(m)
! Multiply the vector A by the NSPS matrix S_star.
! The first prevnormus+1 terms of T_star are already available, so
! compute only the remaining terms.
do alpha=0, k
do beta=1, k
do i=prevnorm_nus+1, norm_nus
T_star(i,alpha,beta) = 0.0
if (alpha .eq. 0) then
finish = min(i+norm_m+1, norm_m-m(0))
else
finish = min(i+norm_m+1, norm_m-m(0) + 1)
endif
do l=0, finish
T_star(i, alpha, beta) = T_star(i, alpha, beta) &
- S_star(l,alpha,0) * A(norm_m+i-l+1,beta)
end do
if (alpha .eq. 0) then
finish = min(i+norm_m+1, norm_m-m(beta))
else
finish = min(i+norm_m+1, norm_m-m(beta) + 1)
endif
do l=0, finish
T_star(i, alpha, beta) = T_star(i, alpha, beta) &
+ S_star(l,alpha,beta) * A(norm_m+i-l+1,0)
end do
end do
end do
end do
return
end subroutine build_T_star
subroutine build_S(A, k, n, norm_n, S, singular)
!***********************************************************************
! *
! For the vector of integers n = [n(0),...,n(k)], build_S computes *
! the Pade'-Hermite system S of type n for A by solving directly *
! the striped Sylvester systems of linear equations associated *
! with A. *
! *
!***********************************************************************
! *
! on entry *
! A real (0:sum(n), 0:k) *
! A vector of power series with A(0,0) nonzero. *
! *
! k integer *
! There are k+1 power series in A. *
! *
! n integer (0:k) *
! The type specification of the NPHS. *
! *
! norm_n integer *
! norm_n = n(0) + ... + n(k). *
! *
! on exit *
! S real (0:maxval(n)+1, 0:k, 0:k) *
! Normalized Pade'-Hermite system of type n. *
! *
! singular logical *
! "true" if the associated striped Sylvester matrix *
! is singular and "false" otherwise. *
! *
! functions and subroutines *
! sgefa Linpack routine to triangulate a matrix. *
! sgesi Linpack routine to solve linear equations. *
! *
!***********************************************************************
implicit none
! build_S subroutine parameters.
integer, intent (in) :: k, norm_n
integer, dimension (0:), intent (in) :: n
real, dimension (0:,0:), intent (in) :: A
real, dimension (0:,0:,0:), intent (out) :: S
logical, intent (out) :: singular
! Local variables.
integer alpha, beta, i, l, blockoffset, info
! work areas - required by the subroutines sgefa and sgesi.
! M is the striped Sylvester matrix associated with A.
integer ipvt(norm_n)
real M(norm_n, norm_n), B(norm_n)
! Initialize S(z).
do alpha=0, k
S(0,alpha,0) = 0.0
S(1,alpha,0) = 0.0
end do
do alpha=1, k
do beta=1, k
if (alpha .eq. beta) then
S(0,alpha,beta) = 1.0
else
S(0,alpha,beta) = 0
endif
end do
end do
do beta=1, k
S(0,0,beta) = -A(0,beta) / A(0,0)
end do
if (norm_n .eq. 0) then
! Special case.
! S is a diagonal matrix with modified first row.
S(1,0,0) = 1 / A(0,0)
singular = .false.
else
! Build the striped Sylvester matrix M.
blockoffset = 0
do beta=0, k
do i=0, norm_n-1
do l=0, n(beta)-1
if (l .le. i) then
M(i+1, blockoffset+l+1) = A(i-l, beta)
else
M(i+1, blockoffset+l+1) = 0
endif
end do
end do
blockoffset = blockoffset + n(beta)
end do
! Reduce the system to triangular form.
call sgefa(M, norm_n, norm_n, ipvt, info)
! Compute the first column of S.
do i=1, norm_n-1
B(i) = 0
end do
B(norm_n) = 1
call sgesl(M, norm_n, norm_n, ipvt, B, 0)
blockoffset = 0
do alpha=0, k
do l=1, n(alpha)
S(l+1,alpha,0) = B(blockoffset+l)
end do
blockoffset = blockoffset + n(alpha)
end do
! Compute the last k columns of S.
if (info .gt. 0) then
singular = .true.
do beta=1, k
do alpha=0, k
do l=0, n(alpha)
S(l,alpha,beta) = 0
end do
end do
end do
else
singular = .false.
do beta=1, k
do i=1, norm_n
B(i) = -A(i,beta)+(A(i,0) * A(0,beta)/A(0,0))
end do
call sgesl(M, norm_n, norm_n, ipvt, B, 0)
blockoffset = 0
do alpha=0, k
do l=1, n(alpha)
S(l,alpha,beta) = B(blockoffset+l)
end do
blockoffset = blockoffset + n(alpha)
end do
end do
endif
endif
return
end subroutine build_S
subroutine build_S_star(A_star, k, n, knorm_n, S_star, singular)
!***********************************************************************
! *
! For the vector of integers n = [n(0),...,n(k)], build_S_star *
! computes the simultaneous Pade' system S_star of type n for *
! A_star by solving directly the mosaic Sylvester systems of *
! linear equations associated with A_star. *
! *
!***********************************************************************
! *
! on entry *
! A_star real(0:sum(n), 0:k,1:k) *
! k+1 x k matrix of power series. *
! *
! k integer *
! Dimension parameter. *
! *
! n integer(0:k) *
! The type specification of the NSPS. *
! *
! knorm_n integer *
! knorm_n = k * (n(0) + ... + n(k)). *
! *
! on exit *
! S_star real(0:sum(n), 0:k, 0:k) *
! Normalized Pade'-Hermite system of type n. *
! *
! singular logical *
! "true" if the associated mosaic Sylvester matrix *
! is singular and "false" otherwise. *
! *
! functions and subroutines *
! sgefa Linpack routine to triangulate a matrix. *
! sgesi Linpack routine to solve linear equations. *
! *
!***********************************************************************
implicit none
! build_S_star subroutine parameters.
integer, intent (in) :: k, knorm_n
integer, dimension (0:), intent (in) :: n
real, dimension (0:,0:,1:), intent (in) :: A_star
real, dimension (0:,0:,0:), intent (out) :: S_star
logical, intent (out) :: singular
! Local variables.
integer alpha, beta, l, i, j, info, norm_n, &
blockoffset, cblockoffset, rblockoffset
! work areas - required by the subroutines sgefa and sgesi.
! M_star is the mosaic Sylvester matrix associated with A_star.
integer ipvt(knorm_n)
real M_star(knorm_n,knorm_n), B_star(knorm_n)
norm_n = sum(n)
! Initialize S_star to zero.
do alpha=1, k
do beta=0, k
do l=0, 1
S_star(l,alpha,beta) = 0
end do
end do
end do
! Set the constant terms of the first row of S_star.
S_star(0,0,0) = 1.0
do beta=1, k
S_star(0,0,beta) = - A_star(0,0,beta) / A_star(0,beta,beta)
end do
if (norm_n .eq. 0) then
! Special case.
! S_star is a diagonal matrix with modified first row.
do alpha=1, k
S_star(1, alpha, alpha) = 1 / A_star(0,alpha,alpha)
end do
else
! Build the mosaic matrix M_star of order knorm_n.
rblockoffset = 0
do beta=1, k
cblockoffset = 0
do alpha=0, k
do i=1, norm_n
do j=1, norm_n - n(alpha)
If (i .lt. j) then
M_star(rblockoffset+i,cblockoffset+j) = 0
else
M_star(rblockoffset+i,cblockoffset+j) &
= A_star(i-j, alpha, beta)
endif
end do
end do
cblockoffset = cblockoffset + norm_n - n(alpha)
end do
rblockoffset = rblockoffset + norm_n
end do
! Reduce M_star into triangular form.
call sgefa(M_star,knorm_n,knorm_n,ipvt,info)
! Compute the first row of S_star.
blockoffset = 0
do beta=1, k
do i=1, norm_n
B_star(blockoffset+i) = -A_star(i,0,beta)
do j=1, k
B_star(blockoffset+i) = B_star(blockoffset+i) &
- A_star(i,j,beta)*S_star(0,0,j)
end do
end do
blockoffset = blockoffset + norm_n
end do
call sgesl(M_star, knorm_n, knorm_n, ipvt, B_star, 0)
blockoffset = 0
do beta=0, k
do l=1, norm_n-n(beta)
S_star(l,0,beta) = B_star(blockoffset + l)
end do
blockoffset = blockoffset + norm_n - n(beta)
end do
if (info .gt. 0) then
! M_star is singular
singular = .true.
do alpha=1, k
do beta=0, k
do l=0, norm_n-n(beta)+1
S_star(l,alpha,beta) = 0.0
end do
end do
end do
else
singular = .false.
do alpha=1, k
do i=1, knorm_n
B_star(i) = 0
end do
B_star(alpha*norm_n) = 1
call sgesl(M_star, knorm_n, knorm_n, ipvt, B_star, 0)
blockoffset = 0
do beta=0, k
do l=1, norm_n-n(beta)
S_star(l+1,alpha,beta) &
= B_star(blockoffset + l)
end do
blockoffset = blockoffset + norm_n - n(beta)
end do
end do
endif
endif
return
end subroutine build_S_star
subroutine mult_S(S, m, S_hat, nus, New_S, new_m, k)
!***********************************************************************
! *
! mult_S multiplies the k+1 x k+1 matrix of polynomials S and *
! S_hat, where S is a NPHS of type m and S_hat is a NPHS of type *
! nus. The product New_S = S * S_hat is a NPHS of type new_m, *
! where new_m = m + nus + (1,0,...0). *
! *
! on entry *
! S real (0:maxval(n)+1,0:k,0:k) *
! NPHS of type m. *
! *
! m integer (0:k) *
! The type specification of S. *
! *
! S_hat real (0:maxval(n)+1,0:k,0:k) *
! NPHS of type nus. *
! *
! nus integer (0:k) *
! The type specification of S_hat. *
! *
! new_m integer (0:k) *
! The type specification of New_S. *
! Must satisfy new_m = m + nus + (1,0,...0). *
! *
! k integer *
! Dimension parameter. *
! *
! on exit *
! New_S real (0:maxval(n)+1,0:k,0:k) *
! The result of S * S_hat. This is the NPHS of type *
! new_m. *
! *
!***********************************************************************
implicit none
! mult_S subroutine parameters.
integer, intent (in) :: k
integer, dimension (0:), intent (in) :: m, nus, new_m
real, dimension (0:,0:,0:), intent (in) :: S, S_hat
real, dimension (0:,0:,0:), intent (out) :: New_S
! Local variables.
integer alpha, beta, i, j, l, S_deg, start, finish
do alpha=0, k
do beta=0, k
if (beta .eq. 0) then
S_deg = new_m(alpha) + 1
else
S_deg = new_m(alpha)
endif
do l=0, S_deg
New_S(l,alpha,beta) = 0
do j=0, k
if (beta .eq. 0) then
start = max(0, l - nus(j) -1)
else
start = max(0, l - nus(j))
endif
if (j .eq. 0) then
finish = min(l, m(alpha) + 1)
else
finish = min(l, m(alpha))
endif
do i=start, finish
New_S(l,alpha,beta) = New_S(l,alpha,beta) &
+ S(i,alpha,j) * S_hat(l-i,j,beta)
end do
end do
end do
end do
end do
return
end subroutine mult_S
subroutine mult_S_star(S_star, m, S_star_hat, nus, &
New_S_star, new_m, k)
!***********************************************************************
! *
! mult_S_star multiplies the k+1 x k+1 matrix of polynomials *
! S_star and S_star_hat, where S_star is a NSPS of type m and *
! S_star_hat is a NSPS of type nus. The product *
! New_S_star = S_star_hat * S_star is a NSPS of type new_m where *
! new_m = m + nus + (1,0,...0). *
! *
! on entry *
! S_star real (0:sum(n),0:k,0:k) *
! NSPS of type m. *
! *
! m integer (0:k) *
! The type specification of S_star. *
! *
! S_star_hat real (0:sum(n),0:k,0:k) *
! NSPS of type nus. *
! *
! nus integer (0:k) *
! The type specification of S_star_hat. *
! *
! new_m integer (0:k) *
! The type specification of New_S_star. *
! Must satisfy new_m = m + nus + (1,0,...0). *
! *
! k integer *
! Dimension parameter. *
! *
! on exit *
! New_S_star real (0:sum(n),0:k,0:k) *
! The result of S_star_hat * S_star. This is the NSPS *
! of type m_new. *
! *
!***********************************************************************
implicit none
! mult_S_star subroutine parameters.
integer, intent (in) :: k
integer, dimension (0:), intent (in) :: m, nus, new_m
real, dimension (0:,0:,0:), intent (in) :: S_star, S_star_hat
real, dimension (0:,0:,0:), intent (out) :: New_S_star
! Local variables.
integer alpha, beta, i, j, l, S_star_deg, start, finish, &
norm_m, norm_nus, norm_new_m
norm_m = sum(m)
norm_nus = sum(nus)
norm_new_m = sum(new_m)
do alpha=0, k
do beta=0, k
if (alpha .eq. 0) then
S_star_deg = norm_new_m - new_m(beta)
else
S_star_deg = norm_new_m - new_m(beta) + 1
endif
do l=0, S_star_deg
New_S_star(l,alpha,beta) = 0.0
do j=0, k
if (j .eq. 0) then
start = max(0, l - norm_m + m(beta))
else
start = max(0, l - norm_m + m(beta) - 1)
endif
if (alpha .eq. 0) then
finish = min(l, norm_nus - nus(j))
else
finish = min(l, norm_nus - nus(j) + 1)
endif
do i=start, finish
New_S_star(l,alpha,beta) = New_S_star(l,alpha,beta) &
+ S_star_hat(i,alpha,j) * S_star(l-i,j,beta)
end do
end do
end do
end do
end do
return
end subroutine mult_S_star
subroutine scale_S(S, m, k, gamma)
!***********************************************************************
! *
! Scale the NPHS so that the 1-norm of each column is equal to 1. *
! *
!***********************************************************************
! *
! on entry *
! S real (0:maxval(n)+1, 0:k, 0:k) *
! NPHS with r(0) = 1. *
! *
! k integer *
! S is a k+1 x k+1 matrix of polynomials. *
! *
! on exit *
! S real (0:maxval(n)+1, 0:k, 0:k) *
! The scaled NPHS. *
! *
! gamma real(0:k) *
! The vector of scaling factors. The normalized NPHS*
! can be obtained from the scaled one by dividing *
! the beta'th column by gamma(beta). *
! *
!***********************************************************************
implicit none
! scale_S subroutine parameters.
integer, intent (in) :: k
integer, dimension (0:), intent (in) :: m
real, dimension (0:,0:,0:), intent (inout) :: S
real, dimension (0:), intent (out) :: gamma
! Local variables.
integer alpha, beta, S_deg, l
! Initially, determine gamma(beta) to be the 1-norm of the beta'th
! column of S.
do beta=0, k
gamma(beta) = 0
do alpha=0, k
if (beta .eq. 0) then
S_deg = m(alpha) + 1
else
S_deg = m(alpha)
endif
do l=0, S_deg
gamma(beta) = gamma(beta) + abs(S(l,alpha,beta))
end do
end do
end do
! Scale each column of S.
do beta=0, k
if (gamma(beta) .ne. 0.0) then
do alpha=0, k
if (beta .eq. 0) then
S_deg = m(alpha) + 1
else
S_deg = m(alpha)
endif
do l=0, S_deg
S(l,alpha,beta) = S(l,alpha,beta) / gamma(beta)
end do
end do
endif
end do
! It is assumed on entry that r(0)=1, so gamma(0) is simply the
! inverse of the norm of the first column of S prior to scaling.
! The remaining scaling factors are determined from the diagonal
! of V(0).
gamma(0) = 1/gamma(0)
do beta=1, k
gamma(beta) = abs(S(0,beta,beta))
end do
return
end subroutine scale_S
subroutine scale_S_star(S_star, m, k, gamma_star)
!***********************************************************************
! *
! Scale the NSPS so that the norm of each row is equal to 1. *
! *
!***********************************************************************
! *
! on entry *
! S_star real (0:sum(n), 0:k, 0:k) *
! NSPS with R_star(0) = I. *
! *
! k integer *
! S_star is a k+1 x k+1 matrix of polynomials. *
! *
! on exit *
! S_star real (0:sum(n), 0:k, 0:k) *
! The scaled NPHS. *
! *
! gamma_star real(0:k) *
! The vector of scaling factors. The normalized NSPS*
! can be obtained from the scaled one by dividing *
! the alpha'th row by gamma(alpha). *
! *
!***********************************************************************
implicit none
! scale_S_star subroutine parameters.
integer, intent (in) :: k
integer, dimension (0:), intent (in) :: m
real, dimension (0:,0:,0:), intent (inout) :: S_star
real, dimension (0:), intent (out) :: gamma_star
! Local variables.
integer norm_m, alpha, beta, S_star_deg, l
norm_m = sum(m)
! Initially, determine gamma(alpha) to be the 1-norm of the alpha'th
! row of S_star.
do alpha=0, k
gamma_star(alpha) = 0
do beta=0, k
if (alpha .eq. 0) then
S_star_deg = norm_m - m(beta)
else
S_star_deg = norm_m - m(beta)+1
endif
do l=0, S_star_deg
gamma_star(alpha) = gamma_star(alpha) &
+ abs(S_star(l,alpha,beta))
end do
end do
end do
! Scale each row of S_star.
do alpha=0, k
if (gamma_star(alpha) .ne. 0.0) then
do beta=0, k
if (alpha .eq. 0) then
S_star_deg = norm_m - m(beta)
else
S_star_deg = norm_m - m(beta)+1
endif
do l=0, S_star_deg
S_star(l,alpha,beta) = S_star(l,alpha,beta) &
/ gamma_star(alpha)
end do
end do
endif
end do
! It is assumed on entry that R_star(0) = I, so that,
! for alpha=1,...,k, gamma_star(alpha) is simply the inverse
! of the norm of the first row of S_star prior to scaling.
gamma_star(0) = abs(S_star(0,0,0))
do alpha=1, k
if (gamma_star(alpha) .ne. 0.0) then
gamma_star(alpha) = 1.0/gamma_star(alpha)
endif
end do
return
end subroutine scale_S_star
subroutine gen_next_vector(n, m, last_i, i, k)
!***********************************************************************
! *
! gen_next_point generates the next point m beyond the point with *
! index i along the diagonal of the Pade' table passing through *
! the point n. *
! *
! on entry *
! n integer (0:k) *
! The final point in the Pade' table. *
! *
! last_i integer *
! The index of the point n. *
! *
! i integer *
! The current index. *
! *
! k integer *
! The index of the last entry in n. *
! *
! on exit *
! m integer (0:k) *
! The next point along the diagonal through n. *
! *
!***********************************************************************
implicit none
integer k
integer n(0:k), m(0:k), i, last_i
! Local variable.
integer beta
do beta=0, k
m(beta) = max(0, n(beta) - last_i + i)
end do
return
end subroutine gen_next_vector
SHAR_EOF
fi # end of overwriting check
cd ..
cd ..
# End of shell archive
exit 0
| true
|
0a7418f8edfdc1df6f68586a72d3ed9c6ed2a804
|
Shell
|
mahendranjayavel/deploy-to-k8s
|
/build.sh
|
UTF-8
| 1,086
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export CLI_VERSION="10.0.0"
TAG="$(git rev-parse --short HEAD)"
#export VERSION="3.1.1"
if [ -z "${TAG}" ]
then
echo "VERSION not set"
exit -1
fi
echo "Version = ${TAG}"
IMAGE="${REPO}:${TAG}"
echo "docker build -t ${IMAGE} -f Dockerfile ."
docker build -t ${IMAGE} -f Dockerfile .
docker login --username $DOCKER_USER --password $DOCKER_PASSWORD docker.io
echo "docker push ${IMAGE}"
docker push ${IMAGE}
sed -e "s?{{docker_repo}}?${REPO}?g;s?{{image_tag}}?${TAG}?g" kubernetes/deployment.yaml > kubernetes/hello-python.yaml
if [ -z "${GET_LOCAL}" ]
then
xl apply --file dai-deploy/hello-python-deploy.yaml --values version=$TAG
else
curl -LO https://dist.xebialabs.com/public/xl-cli/$CLI_VERSION/linux-amd64/xl
chmod +x xl
echo "xld url ${XLD_URL} xld user is ${XLD_USER}, xld password is ${XLD_PASSWD}"
echo `env`
./xl apply --xl-deploy-url=$XLD_URL --xl-deploy-username=$XLD_USER --xl-deploy-password=$XLD_PASSWD --file dai-deploy/hello-python-deploy.yaml --values version=$TAG
echo `./xl version`
rm xl
fi
rm kubernetes/hello-python.yaml
| true
|
1f3707930e4faeb62621f3b4371363c237a23e08
|
Shell
|
mgonblan/kubernetes-cluster
|
/init-master.sh
|
UTF-8
| 2,838
| 2.75
| 3
|
[] |
no_license
|
# !/bin/bash
# This script implements a kubernetes cluster using rancher
# First of all Install Docker as README.md
# Run as root
apt-get update && apt-get upgrade -y && apt-get dist-upgrade -y
apt-get install apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common -y
(curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -) && \
apt-key fingerprint 0EBFCD88 && \
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable" && \
apt-get update && \
apt-get install docker-ce docker-ce-cli containerd.io -y
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
# Configure containerd
cat > /etc/modules-load.d/containerd.conf <<EOF
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
# Setup required sysctl params, these persist across reboots.
cat > /etc/sysctl.d/99-kubernetes-cri.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl --system
mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml
sed -i 's/systemd_cgroup = false/systemd_cgroup = true/g' /etc/containerd/config.toml
systemctl start docker
systemctl enable docker
systemctl start containerd
systemctl enable containerd
# Install kubernetes:
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
for module in br_netfilter ip6_udp_tunnel ip_set ip_set_hash_ip ip_set_hash_net iptable_filter iptable_nat iptable_mangle iptable_raw nf_conntrack_netlink nf_conntrack nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat nf_nat_ipv4 nf_nat_masquerade_ipv4 nfnetlink udp_tunnel veth vxlan x_tables xt_addrtype xt_conntrack xt_comment xt_mark xt_multiport xt_nat xt_recent xt_set xt_statistic xt_tcpudp;
do
if ! lsmod | grep -q $module; then
echo "module $module is not present";
fi;
done
iptables -A INPUT -p tcp --dport 6443 -j ACCEPT
iptables -A INPUT -p tcp --dport 2379-2380 -j ACCEPT
iptables -A INPUT -p tcp --dport 10250-10252 -j ACCEPT
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
swapoff -a
apt-get update
apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
systemctl daemon-reload
systemctl restart kubelet
systemctl enable kubelet
| true
|
b0952d4f74220b780b802276b52986358a445410
|
Shell
|
epatpol/wc3_toggle_shortcuts
|
/toggleCustomKeys.sh
|
UTF-8
| 231
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
validHotkeys=./CustomKeys.txt
invalidHotkeys=./CustomKeys.txt.off
if [ -e "$validHotkeys" ]
then
mv "$validHotkeys" "$invalidHotkeys"
elif [ -e "$invalidHotkeys" ]
then
mv "$invalidHotkeys" "$validHotkeys"
fi
| true
|
fbf982e9a82369790cb21975aac6cd0bc1285742
|
Shell
|
Pratar/RU41
|
/radiofid-openwrt-14.07-899484766f7b/files/root/scripts/L2-down.sh
|
UTF-8
| 383
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
L2TPCMD=/usr/bin/l2tpv3tun
NUM=2
TUN_ID=`uci get network.@tun[$NUM].tunid`
TUN_REMOTE_IP=`uci get network.@tun[$NUM].server|awk -F "/" '{print $1}'`
TUN_NAME=L2
TUN_LOCAL_IP=`uci get network.@ospf[0].id`
logger "Remove l2tp $TUN_NAME tun_id:$TUN_ID remote: $TUN_REMOTE_IP"
$L2TPCMD del session tunnel_id $TUN_ID session_id $TUN_ID
$L2TPCMD del tunnel tunnel_id $TUN_ID
| true
|
77463c07e57fdeec571c5498052d4bb2110c4d8a
|
Shell
|
martin-g/dotfiles
|
/.bashrc
|
UTF-8
| 4,509
| 3.03125
| 3
|
[] |
no_license
|
# System-wide .bashrc file for interactive bash(1) shells.
# To enable the settings / commands in this file for login shells as well,
# this file has to be sourced in /etc/profile.
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
# sudo hint
if [ ! -e "$HOME/.sudo_as_admin_successful" ]; then
case " $(groups) " in *\ admin\ *)
if [ -x /usr/bin/sudo ]; then
cat <<-EOF
To run a command as administrator (user "root"), use "sudo <command>".
See "man sudo_root" for details.
EOF
fi
esac
fi
# if the command-not-found package is installed, use it
if [ -x /usr/lib/command-not-found -o -x /usr/share/command-not-found ]; then
function command_not_found_handle {
# check because c-n-f could've been removed in the meantime
if [ -x /usr/lib/command-not-found ]; then
/usr/bin/python /usr/lib/command-not-found -- $1
return $?
elif [ -x /usr/share/command-not-found ]; then
/usr/bin/python /usr/share/command-not-found -- $1
return $?
else
return 127
fi
}
fi
# MOI
#export GREP_OPTIONS="--color=auto"
export TERM=xterm-color
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color)
#PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;36m\]\w\[\033[00m\]\$ '
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;36m\]\w\[\033[01;31m\]$(__git_ps1 "(%s)")\[\033[00m\]\$ '
GIT_PS1_SHOWDIRTYSTATE=true
GIT_PS1_SHOWSTASHSTATE=true
GIT_PS1_SHOWUNTRACKEDFILES=true
GIT_PS1_SHOWUPSTREAM=auto
;;
*)
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
;;
esac
cd /tmp
# don't overwrite existing files
set -o noclobber
export GPGKEY=5F504C16
export DEVEL_HOME="$HOME/devel"
export ANT_HOME="$DEVEL_HOME/ant-latest"
export MAVEN_HOME="$DEVEL_HOME/maven-latest"
export M2_HOME=$MAVEN_HOME
export SCALA_HOME="$DEVEL_HOME/scala-latest"
export PATH="$DEVEL_HOME/node/bin:$DEVEL_HOME/phantomjs/bin:$PATH"
export JAVA_5_HOME="/opt/jdk1.5.0_22"
export JAVA_6_HOME="$DEVEL_HOME/jdk1.6.0_37"
export JAVA_7_HOME="$DEVEL_HOME/java-7"
export JAVA_8_HOME="$DEVEL_HOME/java-8"
export JAVA_9_HOME="$DEVEL_HOME/java-9"
export PLAY_HOME="$DEVEL_HOME/play-latest"
export MONGODB_HOME="/opt/mongodb"
export SBT_HOME="$DEVEL_HOME/sbt"
export NODE_PATH="$DEVEL_HOME/node/bin:$DEVEL_HOME/node:$DEVEL_HOME/node/lib/node_modules"
export PATH="$ANT_HOME/bin:$MAVEN_HOME/bin:$MVN_SHELL_HOME/bin:$JAVA_8_HOME/bin:$SCALA_HOME/bin:$PATH:/var/lib/gems/1.8/bin"
export PATH="~/bin:$DEVEL_HOME/grails-2.2.4/bin:$PLAY_HOME:$DEVEL_HOME/apache-ant-1.9.4/bin:$SBT_HOME/bin:$PATH"
export EDITOR=vim
export MC_COLOR_TABLE=":editnormal=lightgray,gray:editbold=yellow,gray:editmarked=black,lightgray"
export SVN_EDITOR=/home/martin/bin/svneditor
export PWDSH_SAFE="$HOME/.pwd.sh.safe"
#export LC_ALL=es_ES.UTF-8
unset HISTSIZE
unset HISTFILESIZE
# Creates a folder and goes inside it
function md {
mkdir -p $1
cd $1
}
source ~/.aliases
# Show failed tests among all the surefire results.
function failedtests() {
for DIR in $(find . -maxdepth 3 -type d -name "surefire-reports") ; do
ruby -ne 'puts "#$FILENAME : #$&" if $_ =~ /(Failures: [1-9][0-9]*.*|Errors: [1-9][0-9]*.*)/' $DIR/*.txt
done
}
# Show the top tests that took the longest time to run from maven surefire reports
function slowtests() {
FILES=''
for DIR in $(find . -maxdepth 3 -type d -name "surefire-reports") ; do
FILES="$FILES $DIR/*.txt"
done
head -q -n 4 $FILES \
| ruby -ne 'gets; print $_.chomp + " "; gets; print gets' \
| ruby -ane 'printf "%8.03f sec: ", $F[-2].to_f; puts $_' \
| sort -r \
| head -10
}
# added by travis gem
[ -f /home/martin/.travis/travis.sh ] && source /home/martin/.travis/travis.sh
#if [ $TILIX_ID ] || [ $VTE_VERSION ]; then
# source /etc/profile.d/vte.sh
#fi
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
. "$HOME/.cargo/env"
| true
|
bdcd3e735dd3c97ace2cf275b50a6a35945579ba
|
Shell
|
CoreyGumbs/NetworkAndServerStructures
|
/bash_Assignment_Gumbs_Corey/scripts/functions.sh
|
UTF-8
| 279
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/local/bin/bash
# THis is a basic bash script.
function greet {
echo "Hi $1!"
}
echo "And now a greeting!"
greet Scott
#with an array
function numberthings {
i=1
for f in $@; do
echo $i: $f
((i+=1))
done
}
numberthings $(ls)
numberthings pine birch maple spruce
| true
|
df0fa8cecc718f10e0c87e0e2f61c80e19cfbfd1
|
Shell
|
bwinhwang/lfs-ci
|
/lib/artifacts.sh
|
UTF-8
| 11,257
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
## @file artifacts.sh
# @brief handling of artifacts of a build
# @details There is/was a problem in jenkins with artifacts handling.
# Espesally if we handling very big artifacts bigger than a few hundert MB.
# So we doing the handling of artifacts by yourself within the scripting.
# The artifacts are stored on the linsee share /build/home/psulm/LFS_internal/artifacts
# with the <jobName>/<buildNumber>.
#
# The cleanup of the share will be handled within the uc_admin.sh.
#
LFS_CI_SOURCE_artifacts='$Id$'
[[ -z ${LFS_CI_SOURCE_config} ]] && source ${LFS_CI_ROOT}/lib/config.sh
[[ -z ${LFS_CI_SOURCE_logging} ]] && source ${LFS_CI_ROOT}/lib/logging.sh
[[ -z ${LFS_CI_SOURCE_commands} ]] && source ${LFS_CI_ROOT}/lib/commands.sh
## @fn createArtifactArchive()
# @brief create the build artifacts archives and copy them to the share on the master server
# @details the build artifacts are not handled by jenkins. we are doing it by ourself, because
# jenkins can not handle artifacts on nfs via slaves very well.
# so we create a tarball of each bld/* directory. this tarball will be moved to the master
# on the /build share. Jenkins will see the artifacts, because we creating a link from
# the build share to the jenkins build directory
# @param <none>
# @return <none>
createArtifactArchive() {
requiredParameters JOB_NAME BUILD_NUMBER
local workspace=$(getWorkspaceName)
mustHaveWorkspaceName
mustExistDirectory "${workspace}/bld/"
# TODO: demx2fk3 2014-03-31 remove cd - changing the current directory isn't a good idea!
cd "${workspace}/bld/"
for dir in bld-*-* ; do
[[ -d "${dir}" && ! -L "${dir}" ]] || continue
# TODO: demx2fk3 2014-08-08 fixme - does not work
# [[ -f "${dir}.tar.gz" ]] || continue
local subsystem=$(cut -d- -f2 <<< ${dir})
info "creating artifact archive for ${dir}"
execute tar --create --use-compress-program=${LFS_CI_ROOT}/bin/pigz --file "${dir}.tar.gz" "${dir}"
copyFileToArtifactDirectory ${dir}.tar.gz
done
local shouldCreateReadMeFile=$(getConfig LFS_CI_create_artifact_archive_should_create_dummy_readme_file)
if [[ ${shouldCreateReadMeFile} ]] ; then
local readmeFile=${workspace}/bld/.00_README_these_arent_the_files_you_are_looking_for.txt
cat > ${readmeFile} <<EOF
Dear User,
These aren't the files you're looking for[1].
Please check the artifacts of the package job
Your LFS SCM Team
[1] https://www.youtube.com/watch?v=DIzAaY2Jm-s&t=190
EOF
copyFileToArtifactDirectory $(basename ${readmeFile})
fi
local artifactsPathOnShare=$(getConfig artifactesShare)/${JOB_NAME}/${BUILD_NUMBER}
linkFileToArtifactsDirectory ${artifactsPathOnShare}
return
}
## @fn mustHaveBuildArtifactsFromUpstream()
# @brief ensure, that the job has the artifacts from the upstream projekt, if exists
# @detail the method check, if there is a upstream project is set and if this project has
# artifacts. If this is true, it copies the artifacts to the workspace bld directory
# and untar the artifacts.
# @param <none>
# @return <none>
mustHaveBuildArtifactsFromUpstream() {
requiredParameters UPSTREAM_PROJECT UPSTREAM_BUILD
copyAndExtractBuildArtifactsFromProject "${UPSTREAM_PROJECT}" "${UPSTREAM_BUILD}"
return
}
## @fn copyAndExtractBuildArtifactsFromProject()
# @brief copy and untar the build artifacts from a jenkins job from the master artifacts share
# into the workspace
# @param {jobName} jenkins job name
# @param {buildNumber} jenkins buld number
# @param <none>
# @return <none>
copyAndExtractBuildArtifactsFromProject() {
local jobName=$1
local buildNumber=$2
local allowedComponentsFilter="$3"
[[ -z ${jobName} ]] && return
local artifactesShare=$(getConfig artifactesShare)
local artifactsPathOnMaster=${artifactesShare}/${jobName}/${buildNumber}/save/
local serverName=$(getConfig LFS_CI_artifacts_storage_host)
local files=$(runOnMaster ls ${artifactsPathOnMaster} 2>/dev/null)
local workspace=$(getWorkspaceName)
mustHaveWorkspaceName
debug "checking for build artifacts on share of project (${jobName}/${buildNumber})"
trace "artifacts files for ${jobName}#${buildNumber} on master: ${files}"
execute mkdir -p ${workspace}/bld/
local file=
for file in ${files}
do
local base=$(basename ${file} .tar.gz)
local component=$(cut -d- -f2 <<< ${base})
if [[ "${allowedComponentsFilter}" ]] ; then
if ! grep -q " ${component} " <<< " ${allowedComponentsFilter} " ; then
debug "${component} / ${base} artifact was filtered out"
continue
fi
fi
local canOverwrite=$(getConfig LFS_CI_artifacts_can_overwrite_artifacts_from_other_project)
if [[ -z ${canOverwrite} && -d ${workspace}/bld/${base} ]] ; then
trace "skipping ${file}, 'cause it's already transfered from another project"
continue
fi
local canRemove=$(getConfig LFS_CI_artifacts_can_remove_artifacts_from_other_project -t base:${base})
if [[ ${canRemove} ]] ; then
# this is a special handling for pkgpool. Problem is, that pkgpool directory is called
# pkgpool and not bld-pkgpool-all.
local directoryMap=$(getConfig LFS_CI_artifacts_map -t base:${base})
trace "removing directory ${base} (aka ${directoryMap}) for bld directory"
execute rm -rf ${workspace}/bld/${directoryMap}
fi
info "copy artifact ${file} from job ${jobName}#${buildNumber} to workspace and untar it"
execute -r 10 rsync --archive --verbose --rsh=ssh -P \
${serverName}:${artifactsPathOnMaster}/${file} \
${workspace}/bld/
debug "untar ${file} from job ${jobName}"
execute tar --directory ${workspace}/bld/ \
--extract \
--use-compress-program=${LFS_CI_ROOT}/bin/pigz \
--file ${workspace}/bld/${file}
done
return
}
## @fn copyArtifactsToWorkspace()
# @brief copy artifacts of all releated jenkins tasks of a build to the workspace
# based on the upstream job
# @param <none>
# @return <none>
copyArtifactsToWorkspace() {
local jobName=$1
local buildNumber=$2
local allowedComponentsFilter="$3"
requiredParameters LFS_CI_ROOT
info "copy artifacts to workspace for ${jobName} / ${buildNumber} with filter ${allowedComponentsFilter}"
local file=""
local downStreamprojectsFile=$(createTempFile)
local serverPath=$(getConfig jenkinsMasterServerPath)
mustHaveValue "${serverPath}" "server path"
# TODO: demx2fk3 2015-03-09 FIXME SSH_LOAD replace this with other server
local server=$(getConfig jenkinsMasterServerHostName)
mustHaveValue "${server}" "server name"
local workspace=$(getWorkspaceName)
mustHaveWorkspaceName
execute -n -r 10 ssh ${server} \
${LFS_CI_ROOT}/bin/getDownStreamProjects -j ${jobName} \
-b ${buildNumber} \
-h ${serverPath} > ${downStreamprojectsFile}
if [[ $? -ne 0 ]] ; then
error "error in getDownStreamProjects for ${jobName} #${buildNumber}"
exit 1
fi
if [[ ! -s ${downStreamprojectsFile} ]] ; then
printf "%d:SUCCESS:%s" ${buildNumber} ${jobName} > ${downStreamprojectsFile}
fi
local triggeredJobData=$(cat ${downStreamprojectsFile})
# mustHaveValue "${triggeredJobData}" "triggered job data"
trace "triggered job names are: ${triggeredJobNames}"
execute mkdir -p ${workspace}/bld/
for jobData in ${triggeredJobData} ; do
local number=$(echo ${jobData} | cut -d: -f 1)
local result=$(echo ${jobData} | cut -d: -f 2)
local name=$( echo ${jobData} | cut -d: -f 3-)
[[ ${result} = NOT_BUILT ]] && continue
trace "jobName ${name} buildNumber ${nuber} jobResult ${result}"
if [[ ${result} != "SUCCESS" && ${result} != "NOT_BUILT" ]] ; then
error "downstream job ${name} has ${result}. Was not successfull"
exit 1
fi
copyAndExtractBuildArtifactsFromProject ${name} ${number} "${allowedComponentsFilter}"
done
return
}
## @fn copyFileToArtifactDirectory()
# @brief copy a file to the artifacts directory of the current build
# @param {fileName} path and name of the file
# @detail see also linkFileToArtifactsDirectory
# @return <none>
copyFileToArtifactDirectory() {
local fileName=$1
local jobName=${2:-${JOB_NAME}}
local buildNumber=${3:-${BUILD_NUMBER}}
local serverName=$(getConfig LFS_CI_artifacts_storage_host)
local artifactsPathOnShare=$(getConfig artifactesShare)/${jobName}/${buildNumber}
# executeOnMaster mkdir -p ${artifactsPathOnShare}/save
execute -r 10 ssh ${serverName} mkdir -p ${artifactsPathOnShare}/save
# sometimes, the remote host closes connection, so we try to retry...
execute -r 10 rsync --archive --verbose --rsh=ssh -P \
${fileName} \
${serverName}:${artifactsPathOnShare}/save
return
}
## @fn copyFileToUserContentDirectory()
# @brief copy a file to the sonar subdir in userContent directory
# @param {srcFilePathAndName} path and name of the file
# @param {destFilePath} sub-path of file under userContent
# @return <none>
copyFileToUserContentDirectory() {
local srcFilePathAndName=$1
local destFilePath=$2
local serverName=$(getConfig jenkinsMasterServerHostName)
mustHaveValue "${serverName}" "server name"
local jenkinsMasterServerPath=$(getConfig jenkinsMasterServerPath)
mustHaveValue "${jenkinsMasterServerPath}" "jenkins master serverpath"
local pathOnServer=${jenkinsMasterServerPath}/userContent/${destFilePath}
local fileName=$(basename ${srcFilePathAndName})
execute -r 10 ssh ${serverName} mkdir -p ${pathOnServer}
# sometimes, the remote host closes connection, so we try to retry...
execute -r 10 rsync --archive --verbose --rsh=ssh -P ${srcFilePathAndName} ${serverName}:${pathOnServer}/${fileName}
return
}
## @fn linkFileToArtifactsDirectory()
# @brief create a symlkink from the given name to the artifacts folder on the master.
# @warning the given fileName must be accessable via nfs from the master. otherwise, the
# link will not work in jenkins
# @param {fileName} name of the file
# @param {linkName} name of the link (can be empty)
# @return <none>
linkFileToArtifactsDirectory() {
local linkSource=$1
local linkDestination=$2
info "create link to artifacts"
local artifactsPathOnMaster=$(getBuildDirectoryOnMaster)/archive
executeOnMaster mkdir -p ${artifactsPathOnMaster}
executeOnMaster ln -sf ${linkSource} ${artifactsPathOnMaster}
return
}
| true
|
20adb9227c25f82fb317093297aca53f3052d816
|
Shell
|
edu-rinaldi/MBTI-Predictor
|
/scraper/setup_scraper.sh
|
UTF-8
| 850
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Simple script for creating your personal .env file
function print_usage {
echo -e "Error, command expect:"
echo -e "\tsetup_scraper CLIENT_ID CLIENT_SECRET USER_AGENT REDDIT_USERNAME PASSWORD";
}
# few arguments --> print_usage
if [ "$#" -le 4 ] ; then
print_usage
fi
# if file already exist we can remove it
if [ -f ".env" ]; then
rm ".env"
fi
touch .env
echo -e "# PRAW Credentials\n" >> .env
echo -e "# Personal use script (14 characters)" >> .env
echo -e "CLIENT_ID=\"$1\"\n" >> .env
echo -e "# Secret key (27 characters)" >> .env
echo -e "CLIENT_SECRET=\"$2\"\n" >> .env
echo -e "# App name" >> .env
echo -e "USER_AGENT=\"$3\"\n" >> .env
echo -e "# Reddit username" >> .env
echo -e "REDDIT_USERNAME=\"$4\"\n" >> .env
echo -e "# Reddit password" >> .env
echo -e "PASSWORD=\"$5\"\n" >> .env
| true
|
1fe7fc81bd1da182b78ced0b25d109e9b24b118a
|
Shell
|
Bastiaanspanjaard/WES-analysis
|
/Pipeline_WES_ZNB_2.sh
|
UTF-8
| 4,589
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# SNVs calling of dbh:MYCN zebrafish (neuroblastoma model).
# Reading configuration file
source ReadConfig.sh $1
# Samples and chromosomes counting
number_samples=`wc -l ${WORKDIR}/${SAMPLELIST} | awk '{print $1}'` # Total number of samples
number_fastqs=`wc -l ${WORKDIR}/${SAMPLELIST}Full | awk '{print $1}'` # Number of fastqs (R1 and R2 are one)
nchrs=`cat ${RESDIR}/chromosomes.txt | wc -l` # Total number of chromosomes
ntumors=`diff ${WORKDIR}/${SAMPLELIST} ${WORKDIR}/${CONTROL} | grep '^<'| wc -l` # Total number of tumor samples
echo "Analyzing "$number_samples" samples, including $ntumors tumor samples"
njobs_bytumor_bychr=$(( nchrs*ntumors )) # When splitting by chromosomes for each tumor sample
# Create a folder and put slurm output inside
#mkdir -p ${WORKDIR}
#cd ${WORKDIR}
rm ${WORKDIR}/README
echo "Launched at `date`" > ${WORKDIR}/README_pipesteps
echo "Using Configuration file $1" >> ${WORKDIR}/README_pipesteps
# Fastqc on raw fastq files
echo "`date`: Started fastqcraw." >> ${WORKDIR}/README_pipesteps
source FastQCRaw.sh $1
# Cutadapt to trim fastq files
#echo "`date`: Started cutadapt." >> ${WORKDIR}/README_pipesteps
#source Cutadapt.sh $1
# Fastqc on trimmed fastq files
#echo "`date`: Started fastqctrimmed." >> ${WORKDIR}/README_pipesteps
#source FastQCTrimmed.sh $1
# Align using bwa
#echo "`date`: Started aligning." >> ${WORKDIR}/README_pipesteps
#source BWA.sh $1
# Flagstat
#echo "`date`: Started flagstat." >> ${WORKDIR}/README_pipesteps
#source Flagstat.sh $1
# Sortsam
#echo "`date`: Started sortsam." >> ${WORKDIR}/README_pipesteps
#source SortSam.sh $1
# Genomecov
#echo "`date`: Started genomecov." >> ${WORKDIR}/README_pipesteps
#source Genomecov.sh $1
# SamtoolsDepth
#echo "`date`: Started samtoolsdepth." >> ${WORKDIR}/README_pipesteps
#source SamtoolsDepth.sh $1
# Mark duplicates, merge fastq files
#echo "`date`: Started markduplicates." >> ${WORKDIR}/README_pipesteps
#source MarkDuplicatesAndMerge.sh $1
#echo "`date`: Started merging." >> ${WORKDIR}/README_pipesteps
#source MergeBamFiles.sh $1
# Note that merging bam files is only necessary when there are multiple paired fastqs per sample - IS THIS STILL NECESSARY?
# Baserecalibrator
#echo "`date`: Started BaseRecalibratorI." >> ${WORKDIR}/README_pipesteps
#source BaseRecalibratorI.sh $1
#echo "`date`: Started BaseRecalibratorII." >> ${WORKDIR}/README_pipesteps
#source BaseRecalibratorII.sh $1
# Calculate Pileups
# In contrast to Laura, I am calculating pileups for all samples and then, in the next sample, calculate contamination of piled-up tumor sample with respect to piled-up normal
#echo "`date`: Started getpileup." >> ${WORKDIR}/README_pipesteps
#source GetPileup.sh $1
# Calculate contamination
#echo "`date`: Started contamination calculation." >> ${WORKDIR}/README_pipesteps
#source CalculateContamination.sh $1
# Run Mutect2 parallel by chromosome
#echo "`date`: Started mutation calling." >> ${WORKDIR}/README_pipesteps
#source MuTect2_mseq_bychr.sh $1
# Merge Mutect2 results
#echo "`date`: Started MergeMutect." >> ${WORKDIR}/README_pipesteps
#source MergeMutect2mseqVCFs.sh $1
# Annotate calls
#echo "`date`: Started annotating SNVs." >> ${WORKDIR}/README_pipesteps
#source AnnotateSNV.sh $1
#####
# COPY NUMBER
#Regular
#conda deactivate
#conda activate CNVkit
# CNVKit bins
#echo "`date`: Started CNV bin creation." >> ${WORKDIR}/README_pipesteps
#source CNVkit_access.sh $1
# CNVkit proper
#echo "`date`: Started CNVkit." >> ${WORKDIR}/README_pipesteps
#source CNVkit.sh $1
#conda deactivate
#conda activate WES
# Allele-specific
# Sequenza wiggle
#echo "`date`: Creating sequenza GC-wiggle file." >> ${WORKDIR}/README_pipesteps
#source Sequenza_wiggle.sh $1
# Sequenza bam2seqz
#echo "`date`: Running Sequenza bam2seqz." >> ${WORKDIR}/README_pipesteps
#source Sequenza.sh $1
# Sequenza binning
#echo "`date`: Running Sequenza binning." >> ${WORKDIR}/README_pipesteps
#source Sequenza_binning.sh $1
# SequenzaR to call copy numbers
#echo "`date`: Running SequenzaR." >> ${WORKDIR}/README_pipesteps
#source SequenzaR.sh $1
#jid_Facets=$(sbatch --array=2-${ntumors} -t 5:00:00 -p amd-shared --qos amd-shared ${SCRIPTDIR}/Facets.sh $1 | awk '{print $4}')
#echo "Facets.sh Job ID $jid_Facets" | tee -a ${WORKDIR}/$slurm_info/README
#######
# CLONAL DECONVOLUTION
#jid_PycloneVi=$(sbatch --array=1 -t 0:30:00 -p amd-shared --qos amd-shared ${SCRIPTDIR}/PyClone-vi.sh $1 | awk '{print $4}')
#echo "PyClone-vi.sh Job ID $jid_PycloneVi" | tee -a ${WORKDIR}/$slurm_info/README
echo "PIPELINE LAUNCHED"
| true
|
843681e1e214dd0b0c9f695401f0e6cef09ecb04
|
Shell
|
Rajas1312/stage3
|
/Sequences/leapYear.sh
|
UTF-8
| 290
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash -x
read -p "Enter a Year" userInput
if [ $((userInput % 4)) -eq 0 ]
then
if [ $((userInput % 100)) -eq 0 ]
then
if [ $((userInput % 400)) -eq 0 ]
then
echo "Leap year"
else
echo "not Leap year"
fi
else
echo "leap year"
fi
else
echo "not leap year"
fi
| true
|
c00b1e83d31b68b2d31a100cebcde13ea278f512
|
Shell
|
bojieli/mirrors-log
|
/plot/req-ipv4-0
|
UTF-8
| 1,007
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
FILENAME=`basename $0`
TMPDIR="tmp"
PSDIR="ps"
MYSQL="/home/boj/infobright/mysql-ib"
COUNT=`$MYSQL --column-names=0 -u root mirrors -e "SELECT COUNT(*) FROM log"`
LENGTH=`$MYSQL --column-names=0 -u root mirrors -e "SELECT SUM(length) FROM log"`
$MYSQL --column-names=0 -u root mirrors << EOF > "$TMPDIR/$FILENAME"
SELECT IFNULL(ipv4_0, 'IPv6'), c/$COUNT, s/$LENGTH FROM (SELECT ipv4_0, COUNT(*) AS c, SUM(length) AS s FROM log GROUP BY ipv4_0 ORDER BY s DESC LIMIT 40) AS t;
EOF
gnuplot -p << EOF
set title 'Request \& Traffic among IPv4 first fields'
set xlabel ''
set ylabel 'Percentage'
set autoscale
set size 1.5,1.5
set grid noxtics ytics
set key box
set key bottom right
set terminal postscript eps enhanced color
set style data histogram
set style fill solid 0.4 border
set xtics rotate by -45
set key top right
set bmargin 3
set output "$PSDIR/$FILENAME.ps"
plot '$TMPDIR/$FILENAME' using 2:xticlabels(1) title 'Requests', '$TMPDIR/$FILENAME' using 3:xticlabels(1) title 'Traffic'
EOF
| true
|
11791dfc3270dd07562ed0f1d3d0bddd7372ce1a
|
Shell
|
shiv-rajawat/testing-packer
|
/scripts/deploy.sh
|
UTF-8
| 933
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
cd $PWD/terraform-aws
vpcname=${ELK_VPC_NAME}
echo "$vpcname"
vpcid=$(aws ec2 describe-vpcs --query "Vpcs[?Tags[?Key=='Name']|[?Value=='$vpcname']].VpcId" --region ${AWS_REGION} --output text)
size=${#vpcid}
echo "$size"
echo "$vpcid"
terraform init -backend-config="region=${AWS_REGION}"
#terraform plan -var "vpc_id=$vpcid" -var-file=../parameters/es-cluster-param.tfvars
terraform apply -var "access_key=$AWS_ACCESS_KEY_ID" -var "secret_key=$AWS_SECRET_ACCESS_KEY" -var "vpc_id=$vpcid" -var-file=../parameters/es-cluster-param.tfvars -auto-approve
terraform output > /var/lib/jenkins/pipeline-output.txt
#### Destroying everything
#terraform destroy -var "vpc_id=$vpcid" -var-file=../parameters/es-cluster-param.tfvars -auto-approve
#cd ../IAM
#terraform destroy -var-file=../parameters/iam-param.tfvars -auto-approve
#cd ../Pre-ELK
#terraform destroy -var-file=../parameters/pre-elk-param.tfvars -auto-approve
| true
|
8ec14a62087692ca9dd5c6369ff39bc93115553a
|
Shell
|
squiter/linux-setup
|
/bin/update_gems
|
UTF-8
| 2,691
| 4.34375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Usage: update_gems
#
set -euo pipefail
__green_echo() {
local green="$(tput setaf 2)"
local reset="$(tput sgr0)"
local message=$1 # First argument
local function_callee=${FUNCNAME[ 1 ]}
echo -e `# Enable interpretation of backslash scapes` \
"${green} ==> " \
"[FUNCTION: $function_callee] " \
"$message ${reset}"
}
__yellow_echo() {
local yellow="$(tput setaf 3)"
local reset="$(tput sgr0)"
local message=$1 # First argument
local function_callee=${FUNCNAME[ 1 ]}
echo -e `# Enable interpretation of backslash scapes` \
"${yellow} ==> " \
"[FUNCTION: $function_callee] " \
"$message ${reset}"
}
__red_echo() {
local red="$(tput setaf 1)"
local reset="$(tput sgr0)"
local message=$1 # First argument
local function_callee=${FUNCNAME[ 1 ]}
echo -e `# Enable interpretation of backslash scapes` \
"${red} ==> " \
"[FUNCTION: $function_callee] " \
"$message ${reset}" \
>&2 # Send to STDERR
}
function __update_gem(){
local outdated_gem=$1
__update_dependency__() {
__yellow_echo "💎 Updating $outdated_gem dependency..."
bundle update "$outdated_gem" --quiet
local status=0
if [[ -f bin/rspec ]]; then
result=$(bin/rspec | tee /dev/tty) || status=1
elif [[ -f spec ]]; then
result=$(bundle exec rspec spec | tee /dev/tty) || status=1
elif [[ -f bin/rake ]]; then
result=$(bin/rake test | tee /dev/tty) || status=1
fi
if [ "$status" -eq 0 ]; then
__commit__ "$outdated_gem"
else
__fail__ "$outdated_gem"
fi
}
__fail__() {
__red_echo "💣 The test suite fails after 'bundle update $outdated_gem'"
git checkout Gemfile.lock
rm -rf bundler.txt
}
__commit__() {
__green_echo "Generating commit with the message - Update $outdated_gem dependency."
git add Gemfile.lock
git commit -m "Update \`$outdated_gem\` dependency." --quiet || __green_echo "This gem $outdated_gem probably was already updated."
}
for gem in "$@"; do
__update_dependency__ "$gem"
done
}
__bundle_outdated(){
if [[ -e "bundler.txt" ]]; then
__yellow_echo "Using the saved file bundler.txt"
cat bundler.txt
else
bundle outdated --strict `# Will consider your Gemfile` | tee bundler.txt || true
fi
}
__main_update_all() {
__green_echo "It's gonna be LEGEN... Wait for it..."
__bundle_outdated
gems=(
$(grep newest bundler.txt | awk '{ print $2 }')
)
for gem in "${gems[@]}"; do
__yellow_echo "Updating $gem"
__update_gem "$gem"
__green_echo "Finished update $gem"
done
# Remove unused file
rm -rf bundler.txt
__green_echo "... DARY! Legendary!"
}
__main_update_all
| true
|
83f90c911d2d4650c79c29d2e465abe10d5e9abf
|
Shell
|
congto/akshai-repo
|
/automount-share.sh
|
UTF-8
| 2,840
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
# -------------------------------------------------------------------------------------------------------
#
# This script calls manila manage on an existing share and auto-mounts it on a tenant. It will have to be
# run on the Manila Controller, and the admin account on the controller has to be able to ssh to the
# tenant, so that it can modify it's fstab entry and mount the new export location remotely.
#
# Usage:
# ./automount-share <current-export-location> <current-mount-point> <friendly-share-name>
# <share-type> <pool-name> <nfs-client-login> <nfs-client-ip>
#
# Example:
# ./automount-share.sh "10.250.118.52:/test_share" "/home/centos/test_share" test_share
# "ontap_share" "osk-mitaka@netapp#aggr1" centos 10.250.117.203
#
# --------------------------------------------------------------------------------------------------------
if [ -f automount-share.log ]; then
echo '' > automount-share.log
fi
exec 1> >(tee -a automount-share.log) 2>&1
#fill these values in accordingly
if [ "$#" -ne 7 ]; then
echo 'ERROR. usage: ./automount-share <current-export-location> <current-mount-point> <friendly-share-name> <share-type> <pool-name> <nfs-client-login> <nfs-client-ip>'
exit 1
fi
old_export_path=$1
old_mount_point=$2
share_name=$3
share_type=$4
pool_name=$5
nfs_client_login=$6
nfs_client_ip=$7
id=$(manila manage --name "$share_name" --share-type "$share_type" "$pool_name" nfs "$old_export_path")
id=$(echo "$id" | grep ' id ' | cut -d '|' -f 3 | sed -e 's/ //g')
status=$(manila show "$id" | grep ' status ' | cut -d '|' -f 3 | sed -e 's/ //g')
while [ "$status" != 'available' ]; do
status=$(manila show "$id" | grep ' status ' | cut -d '|' -f 3 | sed -e 's/ //g')
sleep 1
done
export_path=$(manila show "$id" | grep "path" | cut -d '|' -f 3 | sed -e "s/ //g")
export_path=$(echo "$export_path" | sed -e "s/path=\(.*\)/\1/g")
#enable access to the manila share from NFS client
source keystonerc_admin
manila access-allow "$id" ip "$nfs_client_ip"
# fstab modification
# original_ip:/original_share /opt/wordpress-4.4.2-3/apps/wordpress/htdocs/wordpress_media/ nfs defaults 0 0
# ---change to--->
# new_ip:/new_share /opt/wordpress-4.4.2-3/apps/wordpress/htdocs/wordpress_media/ nfs defaults 1 0
old_ip="${old_export_path%%:/*}"
ip="${export_path%%:/*}"
old_exp=$(echo "${old_export_path##*:}")
exp=$(echo "${export_path##*:}")
ssh -t "$nfs_client_login"@"$nfs_client_ip" "sudo cp /etc/fstab /etc/fstab.bk"
ssh -t "$nfs_client_login"@"$nfs_client_ip" "sudo sed -i \"s|$old_ip|$ip|1\" /etc/fstab"
ssh -t "$nfs_client_login"@"$nfs_client_ip" "sudo sed -i \"s|$old_exp|$exp|1\" /etc/fstab"
ssh -t "$nfs_client_login"@"$nfs_client_ip" "sudo umount $old_mount_point"
ssh -t "$nfs_client_login"@"$nfs_client_ip" "sudo mount -a"
exit 0
| true
|
59e931b8ab07b7d15a8d83e89cd72fac775263df
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/trimage-git/PKGBUILD
|
UTF-8
| 819
| 2.578125
| 3
|
[] |
no_license
|
# Contributor: Kyle Keen <keenerd@gmail.com>
pkgname=trimage-git
pkgver=20110928
pkgrel=1
pkgdesc="A GUI based lossless image compressor."
url="http://trimage.org"
arch=('any')
license=('MIT')
depends=("python2-pyqt" "optipng" "advancecomp" "jpegoptim" "pngcrush")
makedepends=("git")
conflicts=("trimage")
provides=("trimage")
source=("git://github.com/Kilian/Trimage.git")
md5sums=("SKIP")
_gitname="Trimage"
pkgver() {
cd "$_gitname"
git show -s --format="%ci" HEAD | sed -e 's/-//g' -e 's/ .*//'
}
build() {
cd "$srcdir/$_gitname"
find ./ -type f -name '*.py' | xargs -n 1 sed -i 's|/usr/bin/env python|/usr/bin/env python2|'
sed -i 's|/usr/bin/env python|/usr/bin/env python2|' trimage
python2 setup.py build
}
package() {
cd "$srcdir/$_gitname"
python2 setup.py install --prefix=/usr --root="$pkgdir"
}
| true
|
2c3132caa1815b0732e331df12541833c9d2407d
|
Shell
|
patrickchin/dotfiles
|
/i3blocks/scripts/volume
|
UTF-8
| 429
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
volume() {
vol=$(pulsemixer --get-volume | cut -d " " -f1);
if [ $(pulsemixer --get-mute) == 1 ];
then
printf "MUTE%3d%%" $vol
else
printf "VOL %3d%%" $vol
fi
}
case $BLOCK_BUTTON in
3) pulsemixer --toggle-mute ;; # right click, mute/unmute
4) pulsemixer --change-volume +5 ;; # scroll up, increase
5) pulsemixer --change-volume -5 ;; # scroll down, decrease
esac
volume
| true
|
1f41d5a2abb2120ceedbe3da1b8e09820946a2bc
|
Shell
|
ShuangLI59/person_search
|
/experiments/scripts/eval_test_with_gt.sh
|
UTF-8
| 644
| 3.0625
| 3
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
if [[ $# -ne 3 ]]; then
echo "$(basename $0) NET ITER EXP_DIR"
exit
fi
NET=$1
ITER=$2
EXP_DIR=$3
mkdir -p experiments/logs/${EXP_DIR}
mpirun -n 8 python2 tools/eval_test.py \
--gpu 0,1,2,3,4,5,6,7 \
--gallery_def models/psdb/${NET}/eval_gallery.prototxt \
--probe_def models/psdb/${NET}/eval_probe.prototxt \
--net output/psdb_train/${EXP_DIR}/${NET}_iter_${ITER}.caffemodel \
--cfg experiments/cfgs/${NET}.yml \
--imdb psdb_test \
--gallery_size 100 \
--det_thresh 0.5 \
--use_gt \
--set EXP_DIR "${EXP_DIR}" \
2>&1 | tee experiments/logs/${EXP_DIR}/${NET}_iter_${ITER}_eval_test_with_gt.log
| true
|
c1ec59f86d70970b940ed61e33acb774258726e4
|
Shell
|
Pandinosaurus/OTB
|
/Packaging/Files/otbenv.profile
|
UTF-8
| 1,772
| 3.09375
| 3
|
[
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"AGPL-3.0-only",
"GPL-3.0-only",
"MIT",
"LGPL-3.0-only",
"Zlib",
"BSD-3-Clause",
"CECILL-B",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
#
# Copyright (C) 2005-2022 Centre National d'Etudes Spatiales (CNES)
#
# This file is part of Orfeo Toolbox
#
# https://www.orfeo-toolbox.org/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
cat_path()
{
if [ $# -eq 0 ]; then exit 0; fi
if [ $# -eq 1 ]; then echo "$1"; exit 0; fi
cur="$1"
shift 1
next=$(cat_path "$@")
if [ -z "$cur" ]; then
echo "$next"
elif [ -z "$next" ]; then
echo "$cur"
else
echo "$cur:$next"
fi
}
# The below environment variables only affect current shell
# So if you run again from a terminal, you need to run the script again
CMAKE_PREFIX_PATH=OUT_DIR
export CMAKE_PREFIX_PATH
# check and set OTB_APPLICATION_PATH
OTB_APPLICATION_PATH=$(cat_path "OUT_DIR/lib/otb/applications" "$OTB_APPLICATION_PATH")
# Add bin directory to system PATH
PATH=$(cat_path "OUT_DIR/bin" "$PATH")
# export PYTHONPATH to import otbApplication.py
PYTHONPATH=$(cat_path "OUT_DIR/lib/python" "$PYTHONPATH")
# set numeric locale to C
LC_NUMERIC=C
# set GDAL_DATA variable used by otb application
GDAL_DATA=OUT_DIR/share/gdal
PROJ_LIB=OUT_DIR/share/proj
export GDAL_DRIVER_PATH=disable
# export variables
export LC_NUMERIC
export GDAL_DATA
export PROJ_LIB
export OTB_APPLICATION_PATH
export PATH
export PYTHONPATH
| true
|
2ba3ee2f14361114d7ead6b9a84af041db4442a8
|
Shell
|
ricardojosegomezulmke/az-use-case-perf-tests
|
/uc-non-persistent/infrastructure/azure/log-analytics/deploy.sh
|
UTF-8
| 8,837
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Variables
resourceGroupName="SolaceRG"
location="WestEurope"
workspaceName="MandalorianLogAnalytics"
workspaceSku="PerGB2018"
# ARM template and parameters files
template="./azuredeploy.json"
parameters="./azuredeploy.parameters.json"
# Virtual machines
virtualMachines=("TestVM1" "TestVM2")
# Read subscription id and name for the current subscription
subscriptionId=$(az account show --query id --output tsv)
subscriptionName=$(az account show --query name --output tsv)
# This function creates a resource group
createResourceGroup() {
local resourceGroupName=$1
local location=$2
# Parameters validation
if [[ -z $resourceGroupName ]]; then
echo "The resourceGroupName parameter cannot be null"
exit
fi
if [[ -z $location ]]; then
echo "The location parameter cannot be null"
exit
fi
# Check if the resource group already exists
echo "Checking if [$resourceGroupName] resource group actually exists in the [$subscriptionName] subscription..."
az group show --name "$resourceGroupName" &>/dev/null
if [[ $? != 0 ]]; then
echo "No [$resourceGroupName] resource group actually exists in the [$subscriptionName] subscription"
echo "Creating [$resourceGroupName] resource group in the [$subscriptionName] subscription..."
# Create the resource group
az group create \
--name "$resourceGroupName" \
--location "$location" 1>/dev/null
if [[ $? != 0 ]]; then
echo "[$resourceGroupName] resource group successfully created in the [$subscriptionName] subscription"
else
echo "Failed to create [$resourceGroupName] resource group in the [$subscriptionName] subscription"
exit
fi
else
echo "[$resourceGroupName] resource group already exists in the [$subscriptionName] subscription"
fi
}
# Function to validate an ARM template
validateTemplate() {
local resourceGroupName=$1
local template=$2
local parameters=$3
local arguments=$4
# Parameters validation
if [[ -z $resourceGroupName ]]; then
echo "The resource group name parameter cannot be null"
fi
if [[ -z $template ]]; then
echo "The template parameter cannot be null"
fi
if [[ -z $parameters ]]; then
echo "The parameters parameter cannot be null"
fi
if [[ -z $arguments ]]; then
echo "The arguments parameter cannot be null"
fi
echo "Validating [$template] ARM template against the [$subscriptionName] subscription..."
if [[ -z $arguments ]]; then
error=$(az deployment group validate \
--resource-group "$resourceGroupName" \
--template-file "$template" \
--parameters "$parameters" \
--query error \
--output json)
else
error=$(az deployment group validate \
--resource-group "$resourceGroupName" \
--template-file "$template" \
--parameters "$parameters" \
--arguments $arguments \
--query error \
--output json)
fi
if [[ -z $error ]]; then
echo "[$template] ARM template successfully validated against the [$subscriptionName] subscription"
else
echo "Failed to validate the [$template] ARM template against the [$subscriptionName] subscription"
echo "$error"
exit 1
fi
}
# Function to deploy an ARM template
deployTemplate() {
local resourceGroupName=$1
local template=$2
local parameters=$3
local arguments=$4
# Parameters validation
if [[ -z $resourceGroupName ]]; then
echo "The resource group name parameter cannot be null"
exit
fi
if [[ -z $template ]]; then
echo "The template parameter cannot be null"
exit
fi
if [[ -z $parameters ]]; then
echo "The parameters parameter cannot be null"
exit
fi
# Deploy the ARM template
echo "Deploying [$template$] ARM template to the [$subscriptionName] subscription..."
if [[ -z $arguments ]]; then
az deployment group create \
--resource-group $resourceGroupName \
--template-file $template \
--parameters $parameters 1>/dev/null
else
az deployment group create \
--resource-group $resourceGroupName \
--template-file $template \
--parameters $parameters \
--parameters $arguments 1>/dev/null
fi
if [[ $? == 0 ]]; then
echo "[$template$] ARM template successfully provisioned to the [$subscriptionName] subscription"
else
echo "Failed to provision the [$template$] ARM template to the [$subscriptionName] subscription"
exit -1
fi
}
# Create Resource Group
createResourceGroup "$resourceGroupName" "$location"
# Check if the Log Analytics workspace already exists
echo "Checking if [$workspaceName] Log Analytics workspace actually exists in the [$subscriptionName] subscription..."
az monitor log-analytics workspace show \
--resource-group "$resourceGroupName" \
--workspace-name "$workspaceName" &>/dev/null
if [[ $? != 0 ]]; then
echo "No [$workspaceName] Log Analytics workspace actually exists in the [$subscriptionName] subscription"
echo "Creating [$workspaceName] Log Analytics workspace in the [$subscriptionName] subscription..."
# Deploy Log Analytics via ARM template
deployTemplate \
"$resourceGroupName" \
"$template" \
"$parameters" \
"workspaceName=$workspaceName workspaceSku=$workspaceSku location=$location"
# Create the Log Analytics workspace
if [[ $? != 0 ]]; then
echo "[$workspaceName] Log Analytics workspace successfully created in the [$subscriptionName] subscription."
else
echo "Failed to create [$workspaceName] Log Analytics workspace in the [$subscriptionName] subscription."
exit
fi
else
echo "[$workspaceName] Log Analytics workspace already exists in the [$subscriptionName] subscription."
fi
# Retrieve Log Analytics workspace key
echo "Retrieving the primary key for the [$workspaceName] Log Analytics workspace..."
workspaceKey=$(
az monitor log-analytics workspace get-shared-keys \
--resource-group "$resourceGroupName" \
--workspace-name "$workspaceName" \
--query primarySharedKey \
--output tsv
)
if [[ -n $workspaceKey ]]; then
echo "Primary key for the [$workspaceName] Log Analytics workspace successfully retrieved."
else
echo "Failed to retrieve the primary key for the [$workspaceName] Log Analytics workspace."
exit
fi
# Retrieve Log Analytics resource id
echo "Retrieving the resource id for the [$workspaceName] Log Analytics workspace..."
workspaceId=$(
az monitor log-analytics workspace show \
--resource-group "$resourceGroupName" \
--workspace-name "$workspaceName" \
--query customerId \
--output tsv
)
if [[ -n $workspaceId ]]; then
echo "Resource id for the [$workspaceName] Log Analytics workspace successfully retrieved."
else
echo "Failed to retrieve the resource id for the [$workspaceName] Log Analytics workspace."
exit
fi
protectedSettings="{\"workspaceKey\":\"$workspaceKey\"}"
settings="{\"workspaceId\": \"$workspaceId\"}"
# Deploy VM extension for the virtual machines in the same resource group
for virtualMachine in ${virtualMachines[@]}; do
echo "Creating [OmsAgentForLinux] VM extension for the [$virtualMachine] virtual machine..."
error=$(az vm extension set \
--resource-group "$resourceGroupName" \
--vm-name "$virtualMachine" \
--name OmsAgentForLinux \
--publisher Microsoft.EnterpriseCloud.Monitoring \
--protected-settings "$protectedSettings" \
--settings "$settings")
if [[ $? == 0 ]]; then
echo "[OmsAgentForLinux] VM extension for the [$virtualMachine] virtual machine successfully created."
else
echo "Failed to create the [OmsAgentForLinux] VM extension for the [$virtualMachine] virtual machine."
echo $error
exit
fi
echo "Creating [DependencyAgentLinux] VM extension for the [$virtualMachine] virtual machine..."
error=$(az vm extension set \
--resource-group "$resourceGroupName" \
--vm-name "$virtualMachine" \
--name DependencyAgentLinux \
--publisher Microsoft.Azure.Monitoring.DependencyAgent)
if [[ $? == 0 ]]; then
echo "[DependencyAgentLinux] VM extension for the [$virtualMachine] virtual machine successfully created."
else
echo "Failed to create the [DependencyAgentLinux] VM extension for the [$virtualMachine] virtual machine."
echo $error
exit
fi
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.