blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
69fd9e5436790aabb2a70e903eccc5e46f07eec2
|
Shell
|
GedMullen/fet
|
/computingold/v7/tap.sh
|
UTF-8
| 2,606
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
export SDIR=/home/ged/gedstuff/code/tap/scripts
echo creating and running insert.sql...
cat $SDIR/inserthead.sql > insert.sql
java SheetReader >> insert.sql
mysql -t -u student < insert.sql
echo identifying room locks
#TODO per semester
mysql -N -u student timetable < $SDIR/roomlock.sql > roomlock.sh
chmod +x roomlock.sh
./roomlock.sh > roomlock.xml
echo deleteing incomplete rows...
mysql -t -u student < $SDIR/clean.sql > deleted.txt
echo performing data analysis...
mysql -t -u student timetable -e "SELECT lecturer,semester,SUM(hours) FROM timetable GROUP BY 1,2;" > lecturers.txt
mysql -t -u student timetable -e "SELECT course,campus,semester,SUM(hours) FROM timetable GROUP BY 1,2,3;" > courses.txt
mysql -t -u student timetable -e "SELECT campus,room,semester,SUM(hours) FROM timetable GROUP BY 1,2,3;" > rooms.txt
mysql -t -u student timetable -e "SELECT * FROM timetable;" > included.txt
mysql -t -u student timetable -e "SELECT campus,semester,SUM(hours) FROM timetable GROUP BY 1,2;" > campus.txt
cp $SDIR/analysis.html .
cp $SDIR/index.html .
echo generating students,teachers and subjects list...
mysql -N -u student < $SDIR/students.sql > temp.txt
tr -d '[:blank:]' < temp.txt > students.xml
mysql -N -u student < $SDIR/subjects.sql > temp.txt
tr -d '[:blank:]' < temp.txt > subjects.xml
mysql -N -u student < $SDIR/teachers.sql > temp.txt
tr -d '[:blank:]' < temp.txt > teachers.xml
for i in 1 2
do
echo generating activities for sem$i...
mysql -N -u student < $SDIR/actsem$i.sql > actsem$i.xml
tr -d '[:blank:]' < actsem$i.xml > temp.txt
sed 's/<Teacher>NLR<\/Teacher>//' temp.txt > actsem$i.xml
echo generating time and space locks for sem$i...
mysql -N -u student < $SDIR/timesem$i.sql > temp.txt
tr -d '[:blank:]' < temp.txt > timelocksem$i.xml
mysql -N -u student < $SDIR/spacesem$i.sql > temp.txt
tr -d '[:blank:]' < temp.txt > spacelocksem$i.xml
echo stitching sem$i.fet...
cat $SDIR/top.xml > sem$i.fet #days and hours
cat students.xml >> sem$i.fet
cat teachers.xml >> sem$i.fet
cat subjects.xml >> sem$i.fet
cat $SDIR/tagslist.xml >> sem$i.fet #list of tags
cat actsem$i.xml >> sem$i.fet
cat $SDIR/roomsandbuildings.xml >> sem$i.fet #list of rooms and campuses
cat $SDIR/timeconst.xml >> sem$i.fet #teacher not available times max days etc
cat $SDIR/timesem$i.xml >> sem$i.fet #teacher not available for a semester
cat timelocksem$i.xml >> sem$i.fet
cat $SDIR/spaceconstsem$i.xml >> sem$i.fet #tags to rooms/room not avail/TODO split up
cat roomlock.xml >> sem$i.fet #TODO needs to be split for sems.
cat spacelocksem$i.xml >> sem$i.fet
done
| true
|
ff14e783b4454f5010ea7e6ff3e1501a941bd992
|
Shell
|
aryehzapinsky/sleeper
|
/code_on_rpi/concatenate_videos.sh
|
UTF-8
| 279
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
vidArray=()
for file in *.h264; do vidArray=(${vidArray[@]} "-cat $file"); done
fileString=$( IFS=' '; echo "${vidArray[*]}")
echo "#!/bin/bash" > com.sh;
echo "MP4Box ${fileString} -fps 10 `date "+%Y-%m-%d"`.mp4" >> com.sh;
chmod +x com.sh;
./com.sh;
rm com.sh;
| true
|
53e2ba3c085cbbbad3cc8b41f94bdab7213e8b1c
|
Shell
|
KeyPecker/Tools
|
/get_llvm.sh
|
UTF-8
| 2,016
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
##
## Key Pecker
##
## Script to set source of llvm, clang and other tools for MacOS
## This script sets up sources from master branch, v7.0 and v6.0
##
## Usage:
## ./get_llvm.sh # will setup master branch
## ./get_llvm.sh 7 # will setup llvm 7.0
## ./get_llvm.sh 6 # will setup llvm 6.0
## ./get_llvm.sh <any number> # will default to master branch
##
#!/bin/bash
LLVM_GIT_LOC=https://github.com/llvm-mirror/llvm.git
CLANG_GIT_LOC=https://github.com/llvm-mirror/clang.git
LLD_GIT_LOC=https://github.com/llvm-mirror/lld.git
POLLY_GIT_LOC=https://github.com/llvm-mirror/polly.git
LIBCXX_GIT_LOC=https://github.com/llvm-mirror/libcxx.git
LIBCXXABI_GIT_LOC=https://github.com/llvm-mirror/libcxxabi.git
COMPILERRT_GIT_LOC=https://github.com/llvm-mirror/compiler-rt.git
LIBCLC_GIT_LOC=https://github.com/llvm-mirror/libclc.git
CMAKE=/Applications/CMake.app/Contents/bin/cmake
BRANCH=master
VERSION=80
if [ "$1" == "6" ]
then
BRANCH=release_60
VERSION=60
fi
if [ "$1" == "7" ]
then
BRANCH=release_70
VERSION=70
fi
CLONE_SRC_DIR="llvm"$VERSION"_src"
BUILD_DIR="llvm"$VERSION"_build_osx"
INSTALL_DIR="~/llvm"$VERSION
echo $CLONE_SRC_DIR
echo $BUILD_DIR
echo $INSTALL_DIR
echo "Cloning branch: "$BRANCH" to: "$CLONE_SRC_DIR
echo "Build files are located in: "$BUILD_DIR
echo "LLVM will be installed to: "$INSTALL_DIR
echo "Continue? (y/n)"
read start_build
if [ $start_build == "y" ]
then
echo "Started cloning Clang, LLVM"
git clone $LLVM_GIT_LOC -b $BRANCH $CLONE_SRC_DIR
cd $CLONE_SRC_DIR/tools
git clone $CLANG_GIT_LOC -b $BRANCH
git clone $LLD_GIT_LOC -b $BRANCH
git clone $POLLY_GIT_LOC -b $BRANCH
cd ../projects
git clone $LIBCXX_GIT_LOC -b $BRANCH
git clone $LIBCXXABI_GIT_LOC -b $BRANCH
git clone $COMPILERRT_GIT_LOC -b $BRANCH
git clone $LIBCLC_GIT_LOC
cd ../../
mkdir $BUILD_DIR
cd $BUILD_DIR
$CMAKE ../$CLONE_SRC_DIR -DLLVM_TARGETS_TO_BUILD="X86;AMDGPU;NVPTX" -DCMAKE_OSX_ARCHITECTURES:STRING=x86_64 -DCLANG_DEFAULT_CXX_STDLIB:STRING=libc++ -DCMAKE_INSTALL_PREFIX=~/$INSTALL_DIR
fi
| true
|
1b6581d3ded346273e6f591c78f5244844307c82
|
Shell
|
eliseuegewarth/3d_center_of_mass
|
/3d_center_of_mass/config_virtualenv.sh
|
UTF-8
| 784
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$1" == "--help" ]; then
echo -e "USAGE:\n$0 [env_name]\n"
else
if [ -f "/usr/local/bin/virtualenvwrapper.sh" ]; then
source /usr/local/bin/virtualenvwrapper.sh
else
source ~/.local/bin/virtualenvwrapper.sh
fi
folder_name=${PWD##*/}
if [ ! -z $1 ]; then
env_name=$1
else
echo "Using default name ${folder_name}"
env_name=${folder_name}
fi
if [ -f "dev-requirements.txt" ]; then
requirements="-r dev-requirements.txt"
elif [ -f "dev-requirements.txt" ]; then
requirements="-r requirements.txt"
else
requirements=""
fi
cd .. && \
mkvirtualenv -p python3 -a ${folder_name} ${requirements} ${env_name} && \
echo -e "\n\n\nRUN 'workon ${env_name}'\n"
exit 0 || \
echo "Need virtualenv and virtualenvwrapper installed." && exit 1;
fi
| true
|
cfb6b69d03599a778f78900317b1b8048e26f201
|
Shell
|
NonvolatileMemory/g-transformer
|
/exp_gtrans/prepare-randinit.sh
|
UTF-8
| 1,322
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (c) Guangsheng Bao.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Usage:
# e.g.
# bash prepare-randinit.sh iwslt17 exp_test
data=$1
exp_path=$2
input=doc
code=bpe
slang=en
tlang=de
echo `date`, exp_path: $exp_path, data: $data, input: $input, code: $code, slang: $slang, tlang: $tlang
tok_path=$exp_path/$data.tokenized.$slang-$tlang
seg_path=$exp_path/$data.segmented.$slang-$tlang
bin_path=$exp_path/$data.binarized.$slang-$tlang
echo `date`, Prepraring data...
# tokenize and sub-word
bash exp_gtrans/prepare-bpe.sh raw_data/$data $tok_path
# data builder
if [ $input == "doc" ]; then
python -m exp_gtrans.data_builder --datadir $tok_path --destdir $seg_path/ --source-lang $slang --target-lang $tlang --max-tokens 512 --max-sents 1000
elif [ $input == "sent" ]; then
python -m exp_gtrans.data_builder --datadir $tok_path --destdir $seg_path/ --source-lang $slang --target-lang $tlang --max-tokens 512 --max-sents 1
fi
# Preprocess/binarize the data
python -m fairseq_cli.preprocess --task translation_doc --source-lang $slang --target-lang $tlang \
--trainpref $seg_path/train --validpref $seg_path/valid --testpref $seg_path/test --destdir $bin_path \
--joined-dictionary --workers 8
| true
|
ef8fa06300808ab087a80525112f1bbce596468b
|
Shell
|
klpanagi/Thesis
|
/host-pc/network_configurations/share_internet.sh
|
UTF-8
| 595
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
# Share one network's internet connection with another network.
# eg: If your Wifi adapter with internet access is called wlan0
# and your local Ethernet adapter is called eth0,
# then run:
# ./share_my_internet.sh wlan0 eth0
# This will only last until you reboot your computer.
sudo iptables --flush
sudo iptables --table nat --flush
sudo iptables --delete-chain
sudo iptables --table nat --delete-chain
sudo iptables --table nat --append POSTROUTING --out-interface $1 -j MASQUERADE
sudo iptables --append FORWARD --in-interface $2 -j ACCEPT
sudo sysctl -w net.ipv4.ip_forward=1
| true
|
e5a6a6132711659f436dbb099147f0d2ced3579c
|
Shell
|
rummik/zsh-stt
|
/stt.plugin.zsh
|
UTF-8
| 2,083
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/zsh
# A speech recognition helper program.
# Requires Curl and SoX. Export `STT_SOX_BACKEND=alsa` if you don't have
# `libsox-fmt-pulse`.
# Language to be passed to Google's speech API.
export STT_LANG=${STT_LANG:-${${LANG/.*/}/_/-}}
# SoX recording backend.
# It may default to pulseaudio, but you can use alsa if you like.
export STT_SOX_BACKEND=${STT_SOX_BACKEND:-pulseaudio}
# Speech to text operation central.
# This *is* the right road, isn't it?
function stt {
local api='http://www.google.com/speech-api/v1'
-stt-record \
| -stt-request "$api/recognize?lang=$STT_LANG" \
| -stt-relay-cmd
}
# Relay commands to `-stt-cmd-$1`.
# Hello? You want to speak to cmd-search? Connecting you now.
function -stt-relay-cmd {
read cmd args
# Convert our command and arguments to lowercase.
local cmd=${(L)cmd}
local args=${(L)args}
if functions -- "-stt-cmd-$cmd" > /dev/null; then
"-stt-cmd-$cmd" ${=args}
else
-stt-unknown-cmd "$cmd" ${=args}
fi
}
# Our only command.
# It opens things when asked politely.
function -stt-cmd-open {
local cmd=${(L)1}
if which $cmd > /dev/null; then
-stt-say Opening \"$cmd\".
$cmd
else
-stt-error Could not open \"$cmd\".
fi
}
# Say placeholder.
# We use espeak by default, but this can be replaced later by another plugin.
function -stt-say {
print $@
espeak <<< $@
}
# Error placeholder.
function -stt-error {
-stt-say $@ >&2
}
# Unknown command placeholder.
function -stt-unknown-cmd {
-stt-error Unknown command: \"$1\".
}
# Record some vad silenced audio from the system's mic.
# It's so vad it's good.
function -stt-record {
rec -q -V1 \
-t $STT_SOX_BACKEND default \
-t .flac -c 1 -r 44100 - \
vad silence -l 0 1 2.0 10%
}
# Prod the API host until it gives utterance.
function -stt-request {
curl -s $@ \
--form file=@- \
--header 'Content-Type: audio/x-flac; rate=44100' \
| -stt-lazyJSON utterance
}
# Lazy JSON parser.
# Accepts JSON as STDIN, gives a property as output.
# It's sooo bad.
function -stt-lazyJSON {
grep --color=never -Po "(?<=\"$1\":\")(\\\\.|[^\"])+"
}
| true
|
37c899de53ed99bfa1b227fc7f00a88f3c7e3b87
|
Shell
|
bgoldfe2/csi-796-deep-learning
|
/archive/sca_env.bash
|
UTF-8
| 2,753
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Filename: sca_env.bash
# Purpose: Set environment variables to be used by the "Machine Learning and
# Artificial Intelligence Assistant” (MAI Assistant) application
# during deployment and at production runtime.
# The prefix "SCA" refers to Security Controls Assessors who are the
# initial target user of this application.
# Created: March 2019
###############################################################################
# Steps:
# 1. Create database.
# - Record the hostname in SCA_PGHOST below.
# - Record the port in SCA_PGPORT.
# - Record the database name in SCA_PGDATABASE.
# 2. Create the schema owner.
# - Record the username in SCA_PGSCHEMA_OWNER.
# - Record the password in SCA_PGSCHEMA_OWNER_PASSWORD.
# 3. Create the application database user.
# - Record the username in SCA_PGREADER.
# - Record the password in SCA_PGREADER_PASSWORD.
# 4. Determine the port that Angular will run on.
# - Record the port in SCA_ANGULAR_PORT.
# 5. Determine the port that Express REST will run on.
# - Record the port in SCA_EXPRESS_PORT.
# 6. Determine the Gitlab credentials and repo
# - Record the username in SCA_GITLAB_USER.
# - Record the password in SCA_GITLAB_PASSWORD.
# - Record the repo name in SCA_GITLAB_REPO.
###############################################################################
# Postgres database connection variables.
# These are common values used to connect to the PG database.
export SCA_PGHOST="a4devmvp2.cfaumaizfanj.us-east-1.rds.amazonaws.com"
export SCA_PGPORT="5432"
export SCA_PGDATABASE="a4devmvp2"
# Postgres database schema owner.
# This is the PG user account that owns the SCA schema objects.
export SCA_PGSCHEMA_OWNER="a4devmvp2"
export SCA_PGSCHEMA_OWNER_PASSWORD="codeisfun"
# Postgres database application user.
# This is the PG user account that the SCA application
# uses to log into the database. This user must be granted
# privileges to select from the tables owned by the
# schema owner.
export SCA_PGREADER="a4devmvp2"
export SCA_PGREADER_PASSWORD="codeisfun"
# Port on which the Angular and Express apps are running
export SCA_ANGULAR_HOST="ec2-54-211-243-221.compute-1.amazonaws.com"
export SCA_ANGULAR_PORT="4200"
export SCA_EXPRESS_HOST="ec2-54-211-243-221.compute-1.amazonaws.com"
export SCA_EXPRESS_PORT="3000"
# Gitlab repo that contains the application source code
export SCA_GITLAB_USER="bruce.h.goldfeder"
export SCA_GITLAB_PASSWORD=""
export SCA_GITLAB_REPO="gitlab.code.dicelab.net/DEEA-A4/A4/MVP2.git"
export SCA_GITLAB_REPO_NAME="MVP2"
# AWS Key location
export AWS_KEY_LOC="$HOME/Downloads/DEEA-CIO-GoldfederB.pem"
export EC2_USER="maintuser"
export EC2_URL="ec2-54-211-243-221.compute-1.amazonaws.com"
| true
|
b0e47f3214607fd51afe668e1351f1457dd93acf
|
Shell
|
slavov-vili/configs
|
/vim/setup.sh
|
UTF-8
| 652
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
DIR=$(pwd)
VIM_DIR=" $HOME/.config/nvim/"
mkdir $VIM_DIR
# Install vim-plug
curl -fLo ~/.local/share/nvim/site/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
cp --verbose --force --remove-destination $DIR/init.vim $VIM_DIR/init.vim
cp --verbose --force --remove-destination $DIR/gvimrc $HOME/.gvimrc
cp --verbose --force --remove-destination --recursive $DIR/colors $VIM_DIR/colors
cp --verbose --force --remove-destination --recursive $DIR/autoload/lightline ~/.config/nvim/autoload
cp --verbose --force --remove-destination --recursive $DIR/plugin ~/.local/share/nvim/site
| true
|
f5df2053f8e7e449ddae664ac27c78521a3762aa
|
Shell
|
Gryffindor-CN/k8s-installation
|
/k8s/init.sh
|
UTF-8
| 3,598
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
# 安装ifconfig
yum install net-tools -y
# 时间同步
yum install -y ntpdate
# 安装docker(建议19.8.06)
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
yum makecache fast
## 安装指定版本
yum install -y docker-ce-18.06.2.ce
# 安装keepalived、haproxy
yum install -y socat keepalived ipvsadm haproxy
# 安装kubernetes相关组件
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
yum install -y kubelet-1.15.3-0 kubeadm-1.15.3-0 kubectl-1.15.3-0 ebtables --disableexcludes=kubernetes
systemctl enable kubelet && systemctl start kubelet
# 其他软件安装
yum install -y wget
# 关闭SELinux、防火墙
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 关闭系统的Swap(Kubernetes 1.8开始要求)
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
# 配置L2网桥在转发包时会被iptables的FORWARD规则所过滤,该配置被CNI插件需要
echo """
vm.swappiness = 0
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
""" > /etc/sysctl.conf
modprobe br_netfilter
sysctl -p
# 同步时间
ntpdate -u ntp.api.bz
# 开启IPVS(如果未升级内核,去掉ip_vs_fo)
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
# 所有机器需要设定/etc/sysctl.d/k8s.conf的系统参数
# https://github.com/moby/moby/issues/31208
# ipvsadm -l --timout
# 修复ipvs模式下长连接timeout问题 小于900即可
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 10
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv4.neigh.default.gc_stale_time = 120
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
net.ipv4.conf.default.arp_announce = 2
net.ipv4.conf.lo.arp_announce = 2
net.ipv4.conf.all.arp_announce = 2
net.ipv4.ip_forward = 1
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 1024
net.ipv4.tcp_synack_retries = 2
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.netfilter.nf_conntrack_max = 2310720
fs.inotify.max_user_watches=89100
fs.may_detach_mounts = 1
fs.file-max = 52706963
fs.nr_open = 52706963
net.bridge.bridge-nf-call-arptables = 1
vm.swappiness = 0
vm.overcommit_memory=1
vm.panic_on_oom=0
EOF
sysctl --system
# 设置开机启动
# 启动docker
sed -i "13i ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /usr/lib/systemd/system/docker.service
systemctl daemon-reload
systemctl enable docker
systemctl start docker
systemctl enable keepalived
systemctl enable haproxy
| true
|
e204d53f69b5e4a55db4f56e4b356f6cf68867fa
|
Shell
|
nandinibaride/gitpratice
|
/ifelseprb1.sh
|
UTF-8
| 535
| 3.09375
| 3
|
[] |
no_license
|
#! /bin/bash -x
a=$(( (RANDOM%900) +100 ))
b=$(( (RANDOM%900) +100 ))
c=$(( (RANDOM%900) +100 ))
d=$(( (RANDOM%900) +100 ))
e=$(( (RANDOM%900) +100 ))
if [ $a -lt $b ] && [ $a -lt $C ] && [ $a -lt $d ] && [ $a -lt $e ]
then
echo $a
elif [ $b -lt $a ] && [ $b -lt $c ] && [ $b -lt $d ] && [ $b -lt $e ]
then
echo $b
elif [ $c -lt $a ] && [ $c -lt $b ] && [ $c -lt $d ] && [ $c -lt $e ]
then
echo $c
elif [ $d -lt $a ] && [ $d -lt $b ] && [ $d -lt $c ] && [ $d -lt $e ]
then
echo $d
else
echo $e
fi
| true
|
be8528eb32431a3fb90aa8311cb3eee7911d9865
|
Shell
|
joeblankenship1/tormap
|
/runme.sh
|
UTF-8
| 420
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
KMLDIR='/var/www/maps'
TMPDIR='/tmp/tormap'
BINDIR='/usr/local/bin'
if [ ! -d $TMPDIR ]; then
mkdir -p $TMPDIR
fi
cd $TMPDIR || exit 1
rm -f relays.json
curl -sH 'Accept-encoding: gzip' "https://onionoo.torproject.org/details" -o /tmp/tormap/relays.json.gz
gunzip relays.json.gz
python $BINDIR/tormap.py
cd $KMLDIR || exit 1
for i in *.kml; do BASE=`basename "${i}" .kml`; zip "${BASE}".kmz "${i}"; done
| true
|
2d052c95510b9645ca73258fc8063ac194bde0db
|
Shell
|
ivanvig/dotfiles
|
/.zprofile
|
UTF-8
| 192
| 2.59375
| 3
|
[] |
no_license
|
XDG_CONFIG_HOME="$HOME/.config"
export XDG_CONFIG_HOME
export PATH=~/.local/bin:$PATH
if systemctl -q is-active graphical.target && [[ ! $DISPLAY && $XDG_VTNR -eq 1 ]]; then
exec startx
fi
| true
|
a9be9c932711ac55c0b5f401573f825e1e686f1e
|
Shell
|
Gary-Hui/icinga-plugins-slack-notification
|
/slack_host
|
UTF-8
| 2,446
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# slack_host
# Icinga 1.x (or Nagios) to push alerts into a channel of Slack via Incoming WebHooks
#
# Inspired by Smokeping Slack notification
#
# Modified by Stephen HC Lin
NOTIFICATIONTYPE="$1"
HOSTNAME="$2"
HOSTSTATE="$3"
HOSTADDRESS="$4"
HOSTOUTPUT="$5"
#Set the message icon based on icinga service state
if [ "$HOSTSTATE" = "CRITICAL" ]
then
color="danger"
elif [ "$HOSTSTATE" = "WARNING" ]
then
color="warning"
elif [ "$HOSTSTATE" = "OK" ]
then
color="good"
elif [ "$HOSTSTATE" = "UNKNOWN" ]
then
color="warning"
else
color="warning"
fi
#Construct Payload
#data='{
# "channel": "'${SLACK_CHANNEL}'",
# "username": "Icinga - LAB Systems",
# "text": "'${ICON}' HOST: '${ICINGA_HOSTNAME}' SERVICE: '${ICINGA_SERVICEDISPLAYNAME}' MESSAGE: '${ICINGA_SERVICEOUTPUT}' <http://9.191.69.228/icinga/|See Icinga>"}'
data='{
"username": "Icinga - LAB Hosts Check",
"attachments": [
{
"fallback": "Icinga '${NOTIFICATIONTYPE}': '${HOSTNAME}' is '${HOSTSTATE}'"
},
{
"title": "'${NOTIFICATIONTYPE}': '${HOSTNAME}' is '${HOSTSTATE}'",
"color": "'${color}'"
},
{
"text": " ",
"fields": [
{
"title": "HOST ADDRESS",
"value": "'${HOSTADDRESS}'",
"short": "true"
},
{
"title": "STATE",
"value": "'${HOSTSTATE}'",
"short": "true"
},
{
"title": "Additional Info",
"value": "'${HOSTOUTPUT}'",
"short": "true"
},
{
"title": "See Icinga",
"value": "http://YourICingaHost/icinga/",
"short": "true"
}
]
}
],
"channel": "#YourChannel"
}'
#Send message to Slack
echo $data
# Please do copy https://hooks.slack.com/services/Txxx/Bxxx/xxx from your Incoming WebHooks
curl -X POST -H "Content-type: application/json" --data "$data" https://hooks.slack.com/services/Txxxxxxxx/Bxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxx
| true
|
daaafe6c3002d3ecc50bc62c05fd155fa643b650
|
Shell
|
travisclibsack/raspberry-pi
|
/comm/comm.sh
|
UTF-8
| 1,168
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
#This sets up communications for the Pi (RXTX and Pi4j)
if (( $EUID != 0 )); then
echo "$(tput setaf 1)This must be run as root. Try 'sudo bash $0'.$(tput sgr0)"
exit 1
fi
echo "(tput setaf 2)This shell file will only download necessary communication files and software.$(tput sgr0)"
echo "$(tput setaf 2)Internet connection required.$(tput sgr0)"
read -p "$(tput bold ; tput setaf 1)Press [Enter] if download origonated in /home/pi
[Ctrl-C] to abort and restart instillation...$(tput sgr0)"
echo "(tput setaf 2)Removing i2c communication from the Pi's blacklist.$(tput sgr0)"
echo -e "blacklist spi-bcm2708" > /etc/modprobe.d/raspi-blacklist.conf
echo -e "blacklist i2c-bcm2708" > /etc/modprobe.d/raspi-blacklist.conf
echo -e "i2c-dev" > /etc/modules
echo "(tput setaf 2)Installing i2c tools...$(tput sgr0)"
apt-get install i2c-tools
add user pi i2c
echo "(tput setaf 2)Downloading and Installing Pi4j (i2c connection via Java interface)....$(tput sgr0)"
wget http://pi4j.googlecode.com/files/pi4j-0.0.5.deb
sudo dpkg -i pi4j-0.0.5.deb
echo "(tput setaf 2)Moved .jar files to Maine ClassPath.$(tput sgr0)"
| true
|
cccc3a597f00107aa40a795432ebcf48ae903b06
|
Shell
|
svensp/hetzner_cloud_pacemaker
|
/FloatingIP
|
UTF-8
| 5,874
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Resource Agent for managing hetzner cloud floating ip resources.
#
# License: MIT License (MIT)
# (c) 2018-2018 Sven Speckmaier
#
# Thank you to Tuomo Soini and Lars Marowsky-Brée for writing the IPaddr2
# ressource agent which served as reference and inspiration for this
#
#######################################################################
# Initialization:
: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
: OCF_RESKEY_ip=
: OCF_RESKEY_api_token=
FLOATING_IP=${OCF_RESKEY_ip}
API_TOKEN=${OCF_RESKEY_api_token}
#######################################################################
#######################################################################
# Helper:
ip_init() {
SERVER_ID="$(server_id)"
FLOATING_IP_ID="$(ip_to_id)"
}
server_id() {
if ( curl -f -s -H "Authorization: Bearer $API_TOKEN" \
"https://api.hetzner.cloud/v1/servers?name=$(hostname)" | \
python3 -c "import sys, json;
jsonData = json.load(sys.stdin)
servers = jsonData['servers']
if len(servers) < 1:
sys.exit(1)
print(servers[0]['id'])
sys.exit(0)
" ) ; then
return 0
fi
return 1
}
ip_to_id() {
if ( curl -f -s -H "Authorization: Bearer $API_TOKEN" \
https://api.hetzner.cloud/v1/floating_ips | \
python3 -c "import sys, json, socket;
jsonData = json.load(sys.stdin)
floatingIps = jsonData['floating_ips']
for floatingIp in floatingIps:
if floatingIp['ip'] == '${FLOATING_IP}':
print(floatingIp['id'])
sys.exit(0)
sys.exit(1)
" ) ; then
return 0
fi
return 1
}
ip_served() {
if ( curl -f -s -H "Authorization: Bearer $API_TOKEN" \
https://api.hetzner.cloud/v1/floating_ips | \
python3 -c "import sys, json, socket;
jsonData = json.load(sys.stdin)
expectedServerId = ${SERVER_ID}
floatingIps = jsonData['floating_ips']
for floatingIp in floatingIps:
if floatingIp['ip'] == '${FLOATING_IP}':
if floatingIp['server'] == expectedServerId:
print('ok')
sys.exit(0)
else:
print('no')
sys.exit(1)
print('not found')
sys.exit(2)
" ) ; then
return 0
fi
return 1
}
ip_validate() {
check_binary curl
check_binary python3
if [ -z "$SERVER_ID" ] ; then
echo "hostname $(hostname) not found in cloud api"
exit $OCF_ERR_CONFIGURED
fi
if [ -z "$FLOATING_IP_ID" ] ; then
echo "floating_ip $FLOATING_IP not found in cloud api"
exit $OCF_ERR_CONFIGURED
fi
}
#######################################################################
#######################################################################
# Actions:
ip_usage() {
cat <<END
usage: $0 {start|stop|status|monitor|validate-all|meta-data}
Expects to have a fully populated OCF RA-compliant environment set.
END
}
ip_start() {
while ! ( curl -f -s -X POST \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $API_TOKEN" \
-d '{"server":'$SERVER_ID'}' \
"https://api.hetzner.cloud/v1/floating_ips/${FLOATING_IP_ID}/actions/assign" \
> /dev/null ) ; do
sleep 1s
done
exit $OCF_SUCCESS
}
ip_stop() {
if ! ip_served ; then
exit $OCF_SUCCESS
fi
while ! ocf_run -q curl -f -s -X POST -H "Content-Type: application/json" -H \
"Authorization: Bearer $API_TOKEN" \
"https://api.hetzner.cloud/v1/floating_ips/${FLOATING_IP_ID}/actions/unassign"
do sleep 1s
done
exit $OCF_SUCCESS
}
ip_monitor() {
local ip_status="$(ip_served)"
case "${ip_status}" in
"ok")
exit $OCF_SUCCESS
;;
"no")
exit $OCF_NOT_RUNNING
;;
"not found"|*)
exit $OCF_ERR_GENERIC
;;
esac
}
meta_data() {
cat <<END
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="FloatingIP" version="0.1">
<version>0.1</version>
<longdesc lang="en">
Manage a hetzner cloud floating ip as cloud ressource.
Only routing the ip to this server is taken care of. Pair up with a IPaddr2
resource to properly activate the floating ip.
WARNING: This resource assumes that the hostname on the machine is the same as
its name in the Cloud Api
</longdesc>
<shortdesc lang="en">Manage a hetzner cloud floating ip as cloud ressource.</shortdesc>
<parameters>
<parameter name="ip" unique="1" required="1">
<longdesc lang="en">
The ip address to route to this host
e.g. 66.77.88.99
</longdesc>
<shortdesc lang="en">ip address</shortdesc>
<content type="string"/>
</parameter>
<parameter name="api_token" unique="0" required="1">
<longdesc lang="en">
The api token to access the hetzner cloud api. Created in Access/Api-Tokens under
https://console.hetzner.cloud</longdesc>
<shortdesc lang="en">Token for hetzner cloud api</shortdesc>
<content type="string" default="false"/>
</parameter>
</parameters>
<actions>
<action name="start" timeout="20" />
<action name="stop" timeout="20" />
<action name="monitor" timeout="20"
interval="10" depth="0" />
<action name="reload" timeout="20" />
<action name="migrate_to" timeout="20" />
<action name="migrate_from" timeout="20" />
<action name="meta-data" timeout="5" />
<action name="validate-all" timeout="20" />
</actions>
</resource-agent>
END
}
#######################################################################
#######################################################################
# Entrypoint:
case $__OCF_ACTION in
meta-data) meta_data
exit $OCF_SUCCESS
;;
usage|help) ip_usage
exit $OCF_SUCCESS
;;
esac
ip_init
ip_validate
case $__OCF_ACTION in
start) ip_start
;;
stop) ip_stop
;;
status) ip_status=`ip_served`
if [ $ip_status = "ok" ]; then
echo "running"
exit $OCF_SUCCESS
else
echo "stopped"
exit $OCF_NOT_RUNNING
fi
;;
monitor) ip_monitor
;;
validate-all) ;;
*) ip_usage
exit $OCF_ERR_UNIMPLEMENTED
;;
esac
#######################################################################
| true
|
c53ae1bf9a04f871e1a4e6472cd585ba8bfacb00
|
Shell
|
kiminh/xfea
|
/run.sh
|
UTF-8
| 3,092
| 2.890625
| 3
|
[] |
no_license
|
###########################################################################
######### 请先执行./offline里头的编译脚本编译好必须的可执行文件 #########
######### cd offline; sh build.sh #########
###########################################################################
#cd offline; sh build.sh
mkdir -p ./log
PERSON=hongqing
#离线框架的配置
off_conf="offline/conf/offline.conf"
#由于配置项可能很多,不能都作为参数出入,所以这里做如下限制:
# 1. 配置都放到一个目录下,为了确保线上线下一致,最好都放到bisheng的conf目录下
# 2. feature_list,以及以后一堆ctr_map_txt的配置从bisheng_ir_off.conf里头读取
# 3. 建议不同的产品线用不同的名字
bisheng_conf_dir="bisheng/conf/"
bisheng_root_conf="bisheng_ir_off.conf"
cur_date=20181007
task="ir"
end_date=20181007
done_file="tmp.done"
day_interval=1
base_op_dir=/user_ext/ads_fst/${PERSON}/xfea/
## columns: "label,ages,age,gender,platform,phone,location,network,hour,bidtype,style_type,psid,hierarchy_smooth_ctr,history_ctr,gender_feed_smooth_ctr,gender_cust_smooth_ctr,platform_feed_smooth_ctr,platform_cust_smooth_ctr,cust60_smooth_ctr,uid,custid,adid,feedid,promotion,position,style,link,show,zerocate,fircate,seccate"
## 如果hive_table这个选项有的话,则自动获取hive表的schema
## hive_table=sds_ad_algo_sfst_train_data2
while [ $cur_date -ge $end_date ]
do
input_dir=${base_op_dir}/${cur_date}/input
#有些hive表是以rcfile格式存储,改写到能以行为单位解析的格式
hive_sql_before_run_fe_extract="select if(split(ie_cntx,',')[2]='0','0','1') as irl,age_s as ages,age,gender,platform,phone_brand as phone,location,network_type as network,pv_hour as hour,bid_type as bidtype,style_type,psid,hierarchy_smooth_ctr,history_ctr,gender_feed_smooth_ctr,gender_cust_smooth_ctr,platform_feed_smooth_ctr,platform_cust_smooth_ctr,cust60_smooth_ctr,uid,custid,adid,feedid,promotion_object as promotion,position,style,link,show,concat(substr(first_code,0,2),'000000') as zerocate,if(first_code is null,'0',first_code) as fircate,if(second_code is null,'0',second_code) as seccate from sds_ad_algo_sfst_train_data2 where dt='${cur_date}'"
tmp_sql="insert overwrite directory '${input_dir}'
${hive_sql_before_run_fe_extract}"
hive -e "$tmp_sql"
if [ $? -ne 0 ]; then
echo "run hive [$tmp_sql] failed!"
exit 1
fi
sh -x offline/run.sh "$cur_date" ${base_op_dir} $task $off_conf $bisheng_conf_dir $bisheng_root_conf >& log/offline_xfea.$task.$cur_date
if [ $? -ne 0 ];then
echo "Run [sh -x offline/run.sh "$cur_date" ${base_op_dir} $task $off_conf $bisheng_conf_dir $bisheng_root_conf] failed!"
exit 1
else
echo "Run [sh -x offline/run.sh "$cur_date" ${base_op_dir} $task $off_conf $bisheng_conf_dir $bisheng_root_conf] successfully."
fi
echo "$cur_date `date \"+%Y-%m-%d %H:%M:%S\"`" >> $done_file
cur_date=`date -d "$cur_date -$day_interval days" "+%Y%m%d"`
done
| true
|
ed4cadc5c154323138c1f728d0cbc224b5b70057
|
Shell
|
simplyguru-dot/dotFiles
|
/src/.bashrc
|
UTF-8
| 1,193
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# vim:ft=sh
# shellcheck source=/dev/null
# Enforce Homebrew to the PATH
HOMEBREW_PATH="$HOME/.homebrew"
RUBY_PATH=$(if [[ -d $HOMEBREW_PATH/Cellar/ruby ]]; then dirname $(find $HOMEBREW_PATH/Cellar/ruby/*/bin -name "ruby" -print -quit); fi)
GEMS_PATH=$(if [[ -d $HOMEBREW_PATH/lib/ruby/gems ]]; then find $HOMEBREW_PATH/lib/ruby/gems/* -name "bin" -type d -print -quit; fi)
PYTHON3_PATH=$(if [[ -d $HOMEBREW_PATH/Cellar ]]; then dirname $(find $HOMEBREW_PATH/Cellar/python*/*/bin -name "python3" -print -quit); fi)
PATH="$RUBY_PATH:$GEMS_PATH:$PYTHON3_PATH:$HOMEBREW_PATH/bin:$HOMEBREW_PATH/sbin:$PATH"
MANPATH="$HOMEBREW_PATH/share/man:$MANPATH"
. "$HOME"/.Files/framework.sh || exit 1
# Load rc plugins
# shellcheck source=/dev/null
if [ -d "$HOME"/.shellrc ]; then
if [ "$(ls -A "$HOME"/.shellrc)" ]; then
for file in "$HOME"/.shellrc/[A-Za-z]*.sh; do
source "$file"
done
fi
else
echo "404: ~/.shellrc folder not found"
fi
# Load local secret configurations just like GitHub tokens, etc
# shellcheck source=/dev/null
if [ -d "$HOME"/.secrc ]; then
if [ "$(ls -A "$HOME"/.secrc)" ]; then
for file in "$HOME"/.secrc/*; do
source "$file"
done
fi
fi
| true
|
5cac1509e481ffee43e928edd26665a5fbdb7444
|
Shell
|
liulin1840/shell
|
/openssl证书sh/gen_cert.sh
|
UTF-8
| 1,233
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
work_dir=/tmp/cert
gen_ca()
{
[ ! -e ca ] && mkdir ca
openssl genrsa -out ca/ca-key.pem 1024
openssl req -new -out ca/ca-req.csr -key ca/ca-key.pem << EOF
CN
CHINA
CHENGDU
ZK
YF
root
support@zkchina.com.cn
EOF
openssl x509 -req -in ca/ca-req.csr -out ca/ca-cert.pem -signkey ca/ca-key.pem -days 3650
}
gen_server_cert()
{
[ ! -e server ] && mkdir server
openssl genrsa -out server/server-key.pem 1024
openssl req -new -out server/server-req.csr -key server/server-key.pem << EOF
CN
CHINA
CHENGDU
ZK
YF
root
support@zkchina.com.cn
EOF
openssl x509 -req -in server/server-req.csr -out server/server-cert.pem -signkey server/server-key.pem -CA ca/ca-cert.pem -CAkey ca/ca-key.pem -CAcreateserial -days 3650
}
gen_client_cert()
{
[ ! -e client ] && mkdir client
openssl genrsa -out client/client-key.pem 1024
openssl req -new -out client/client-req.csr -key client/client-key.pem << EOF
CN
CHINA
CHENGDU
ZK
YF
root
support@zkchina.com.cn
EOF
openssl x509 -req -in client/client-req.csr -out client/client-cert.pem -signkey client/client-key.pem -CA ca/ca-cert.pem -CAkey ca/ca-key.pem -CAcreateserial -days 3650
}
gen_cert()
{
gen_ca
gen_server_cert
gen_client_cert
}
[ ! -e $work_dir ] && mkdir $work_dir
cd $work_dir
gen_cert > /dev/null 2>&1
| true
|
2c40267d5343193bdcd817259c69c69d38f41fe8
|
Shell
|
klein0r/fhem-style-haus-automatisierung
|
/prepare_update.sh
|
UTF-8
| 541
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#compass compile --force
rm controls_ha_theme.txt
echo "MOV ./www/hausautomatisierung-com/custom.js unused" >> controls_ha_theme.txt
find ./www -type f \( ! -iname ".*" \) -print0 | while IFS= read -r -d '' f;
do
out="UPD "$(stat -f "%Sm" -t "%Y-%m-%d_%T" $f)" "$(stat -f%z $f)" ${f}"
echo ${out//.\//} >> controls_ha_theme.txt
done
# CHANGED file
echo "FHEM haus-automatisierung.com custom theme last changes:" > CHANGED
echo $(date +"%Y-%m-%d") >> CHANGED
git log -n 10 --reverse --pretty="format:- %s" >> CHANGED
| true
|
de8c8ef1d4fd403249567d61552b6c15b8d0896c
|
Shell
|
Lilja/timelog
|
/test/unittest.sh
|
UTF-8
| 30,020
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
start=$(date +%s)
if [[ $1 = "-v" ]]; then debug="-v"; shift
else debug=""; fi
dir="$PWD/dev"
# Clean test directory if need to
[ -d "$dir" ] && rm -r "$dir"
# Create test directory
mkdir "$dir"
tearDown() {
if [ -f "$dir/saved_log_times" ]; then rm "$dir/saved_log_times"; fi
}
createProjectWithParams() {
timelog $debug --dev "$dir" project create &>/dev/null <<END
$1
$2
$3
$4
$5
END
}
wrap_date() {
# Output format=$1
# The data=$2
# Example GNU date: $( date "+%Y-%m-%d" -d "2017-01-01" ), where 1st quote is format and second is data
# If has data to input to `date`
if [ ! -z "${2:+foo}" ]; then
if date --version >/dev/null 2>&1; then
# GNU DATE GOES HERE
if [ ! -z "$1" ]; then
date "$1" -d "$2"
else
date -d "$2"
fi
else
# BSD DATE GOES HERE
# if %Y-%m-%d %H:%M
(echo "$2" | grep "[0-9]\{4\}-[0-1][0-9]-[0-3][0-9]\ [0-2][0-9]:[0-5][0-9]" >/dev/null 2>&1)
if [ $? -eq 0 ]; then
date -jf "%Y-%m-%d %H:%M" "$2" "$1"
exit 0
fi
# if %Y-%m-%d
(echo "$2" | grep "[0-9]\{4\}-[0-1][0-9]-[0-3][0-9]" >/dev/null 2>&1 )
if [ $? -eq 0 ]; then
date -jf "%Y-%m-%d" "$2" "$1"
exit 0
fi
# if has colon between %H and %M
(echo "$2" | grep "[0-2][0-9]:[0-5][0-9]" >/dev/null 2>&1 )
if [ $? -eq 0 ]; then
date -jf "%H:%M" "$2" "$1"
exit 0
fi
# if has no colon between %H and %M
(echo "$2" | grep "[0-2][0-9][0-5][0-9]" >/dev/null 2>&1 )
if [ $? -eq 0 ]; then
date -jf "%H%M" "$2" "$1"
exit 0
fi
# if reached this far, exits since the date is not supported
echo "0000-00-00 00:00:00"
exit 1
fi
else
# No data, BSD and GNU will work fine here, just do your $(date +%FORMAT) and it's fine!
date "$1"
fi
}
logProjectTest() {
timelog $debug --dev "$dir" log ts 0800 1800 0 >/dev/null <<END
y
END
}
testFileSystem() {
touch foo
assertTrue "Can not create files on filesystem" "[ -f foo ]"
rm foo
}
createProjectTest() {
createProjectWithParams "Test" "ts" "40" "140" "kr"
}
createProjectWithoutMoneyPerHour() {
createProjectWithParams "Test2" "ts2" "40" "s"
}
deleteProject() {
timelog $debug --dev "$dir" project delete > /dev/null << END
1
y
END
}
testHasTimelogBinary() {
k=$(timelog 2>&1 >/dev/null ; echo $?)
assertTrue "Timelog binary was not found" "[ $k -eq 0 ]"
}
testVersion() {
output=$(timelog $debug --dev "$dir" --version | grep -o '[0-9]\.[0-9]\.[0-9]')
assertTrue " --version did not supply any \d.\d.\d format" "[ ! -z '$output' ]"
}
testCreateAndDeleteProject() {
createProjectTest
code=$?
proj_name=$(grep -o 'project_name\ *\=\ *Test' "$dir/def/Test")
proj_id=$(grep -o 'project_id\ *\=\ *Test' "$dir/def/Test")
target=$(grep -o 'target_hours\ *\=\ *40' "$dir/def/Test")
mph=$(grep -o 'money_per_hour\ *\=\ *140' "$dir/def/Test")
curr=$(grep -o 'currency\ *\=\ *kr' "$dir/def/Test")
assertTrue "Exit code for project create was not 0" "[ $code -eq 0 ]"
assertTrue "Definition file could not be created" "[ -f "$dir/def/Test" ]"
assertTrue "Log file could not be created" "[ -f $dir/Test.logs ]"
assertTrue "Project name was not retrieved" "[ ! -z $proj_name ]"
assertTrue "Project id was not retrieved" "[ -f $proj_id ]"
assertTrue "Target hours was not retrieved" "[ ! -z $target ]"
assertTrue "Money per hour was not retrieved" "[ ! -z $mph ]"
assertTrue "Currency was not retrieved" "[ ! -z $curr ]"
deleteProject
code=$?
assertTrue "Exit code for project create was not 0" "[ $code -eq 0 ]"
assertTrue "Definition file was not deleted" "[ ! -f $dir/def/Test ]"
assertTrue "Log file was not deleted" "[ ! -f $dir/Test.logs ]"
}
testCreateProjectWithShadyProjectName() {
proj_name="Test { }"
createProjectWithParams "$proj_name" "test2" "40" "40" "kr"
code=$?
assertTrue "Exit code for project create was not 0" "[ $code -eq 0 ]"
assertTrue "No log file created with project name '$proj_name'" "[ -f '$dir/$proj_name.logs' ]"
assertTrue "No definition file created with project name '$proj_name' " "[ -f '$dir/def/$proj_name' ]"
deleteProject
createProjectWithParams "Test" "test2" "40" "40d"
code=$?
assertTrue "Exit code for faulty project create was not 1" "[ $code -eq 1 ]"
}
testCreateProjectWithFaultyParams() {
createProjectWithParams "Test" "test2" "40d"
code=$?
assertTrue "Exit code for faulty project create was not 1" "[ $code -eq 1 ]"
createProjectWithParams "Test ]"
code=$?
assertTrue "Exit code for faulty project create was not 1 when having a [ in the project name" "[ $code -eq 1 ]"
createProjectWithParams "Test ["
code=$?
assertTrue "Exit code for faulty project create was not 1 when having a [ in the project name" "[ $code -eq 1 ]"
createProjectWithParams "Test ;"
code=$?
assertTrue "Exit code for faulty project create was not 1 when having a ; in the project name" "[ $code -eq 1 ]"
createProjectWithParams "Test" "test2" "40" "40d"
code=$?
assertTrue "Exit code for faulty project create was not 1" "[ $code -eq 1 ]"
projects=$(ls "$dir/*.logs" 2>/dev/null | wc -l)
assertTrue "Amount of projects should be 0" "[ $projects -eq 0 ]"
}
testCreateProjectWithoutMoneyPerHour() {
createProjectWithoutMoneyPerHour
code=$?
proj_name=$(grep -o 'project_name\ *\=\ *Test' "$dir/def/Test2")
proj_id=$(grep -o 'project_id\ *\=\ *Test' "$dir/def/Test2")
target=$(grep -o 'target_hours\ *\=\ *40' "$dir/def/Test2")
mph=$(grep -o 'money_per_hour\ *\=\ *140' "$dir/def/Test2")
curr=$(grep -o 'currency\ *\=\ *kr' "$dir/def/Test2")
assertTrue "Exit code for project create was not 0" "[ $code -eq 0 ]"
assertTrue "Definition file could not be created" "[ -f $dir/def/Test2 ]"
assertTrue "Log file could not be created" "[ -f $dir/Test2.logs ]"
assertTrue "Project name was not retrieved" "[ ! -z $proj_name ]"
assertTrue "Project id was not retrieved" "[ -f $proj_id ]"
assertTrue "Target hours was not retrieved" "[ ! -z $target ]"
assertTrue "Money per hour was retrieved" "[ -z $mph ]"
assertTrue "Currency was retrieved" "[ -z $curr ]"
deleteProject
}
testListProjects() {
createProjectTest
k=$(timelog $debug --dev "$dir" project list)
code=$?
match=$(echo "$k" | grep "1:\ Test\ \[ts\]")
assertTrue "Exit code was not 0" "[ $code -eq 0 ]"
assertTrue "List projects did not print out the created project" "[ ! -z '$match' ]"
deleteProject
}
testListProjectsWithNoCreatedProjects() {
timelog $debug --dev "$dir" project list &>/dev/null
code=$?
assertTrue "Listing projects with no created project did not return an exit code of 1" "[ $code -eq 1 ]"
}
testLogWhenEmptyProjectInitally() {
output=$(timelog --dev "$dir" project list_id >/dev/null)
assertTrue "When no project have been created, there are projects created with project list_id" "[ -z '$output' ]"
if [ ! -z "$output" ]; then exit 1; fi
}
testLogProject() {
createProjectTest
timelog $debug --dev "$dir" log ts 0800 1000 0 >/dev/null <<END
n
END
code=$?
assertTrue "Exit code was not 0" "[ $code -eq 0 ]"
assertTrue "A log entry was created when specified not to create" "[ $(< $dir/Test.logs wc -l) -eq 0 ]"
timelog $debug --dev "$dir" log ts 0800 1000 0 >/dev/null <<END
y
END
code=$?
logs=$(cat "$dir/Test.logs")
amount_of_logs=$(< $dir/Test.logs 2>/dev/null wc -l)
assertTrue "Exit code was not 0" "[ $code -eq 0 ]"
assertTrue "A log entry was not created" "[ $amount_of_logs -eq 1 ]"
dec_time=$(echo "$logs" | grep -o '\[2\]' | grep -o '2')
mil_time=$(echo "$logs" | grep -o '{02:00}' | grep -o '02:00')
assertTrue "Decimal time was not 2" "[ $dec_time -eq 2 ]"
assertTrue "HH:mm time was not 02:00" "[ $mil_time = '02:00' ]"
deleteProject
}
testLogProjectWithFaultyTimestamps() {
createProjectTest
timelog $debug --dev "$dir" log ts >/dev/null <<END
asdf
END
code=$?
assertTrue "Exit code was not 1" "[ $code -eq 1 ]"
assertTrue "Even though log createn went south, something was created in the log file" "[ $( wc -l < $dir/Test.logs) -eq 0 ]"
timelog $debug --dev "$dir" log ts >/dev/null <<END
0800
asdf
END
code=$?
assertTrue "Exit code was not 1" "[ $code -eq 1 ]"
assertTrue "Even though log createn went south, something was created in the log file" "[ $( wc -l < $dir/Test.logs) -eq 0 ]"
timelog $debug --dev "$dir" log ts >/dev/null <<END
0800
1000
asdf
END
code=$?
assertTrue "Exit code was not 1" "[ $code -eq 1 ]"
assertTrue "Even though log createn went south, something was created in the log file" "[ $( wc -l < $dir/Test.logs) -eq 0 ]"
timelog $debug --dev "$dir" log ts >/dev/null <<END
0800
1000
300
END
code=$?
assertTrue "Exit code was not 1" "[ $code -eq 1 ]"
assertTrue "Even though log createn went south, something was created in the log file" "[ $( wc -l < $dir/Test.logs) -eq 0 ]"
timelog $debug --dev "$dir" log ts >/dev/null <<END
0800
2400
0
END
code=$?
assertTrue "Exit code was not 1" "[ $code -eq 1 ]"
assertTrue "Even though log createn went south, something was created in the log file" "[ $( wc -l < $dir/Test.logs) -eq 0 ]"
deleteProject
}
testLogProjectWithNowAtEnd() {
# Fake that current time is 14:00 and the start time is 1300
# Purposly put a enter in the log interactive so that it gets the "current" timestamp.
start_time="1300"
end_time="2017-01-01 14:00"
createProjectTest
timelog $debug --dev "$dir" log ts --date "$end_time" >/dev/null <<END
$start_time
0
y
END
code=$?
logs=$(cat "$dir/Test.logs")
amount_of_logs=$(< $dir/Test.logs 2>/dev/null wc -l)
assertTrue "Exit code was not 0" "[ $code -eq 0 ]"
assertTrue "A log entry was not created" "[ $amount_of_logs -eq 1 ]"
dec_time=$(echo "$logs" | grep -o '\[1\]' | grep -o '1')
mil_time=$(echo "$logs" | grep -o '{01:00}' | grep -o '01:00')
assertTrue "Decimal time was not 1" "[ $dec_time -eq 1 ]"
assertTrue "HH:mm time was not 01:00. $logs" "[ $mil_time = '01:00' ]"
deleteProject
}
testLogProjectWithDate() {
day="2017-01-01"
nextDay="2017-01-02"
nextDayAfterThat="2017-01-03"
createProjectTest
timelog $debug --dev "$dir" log ts 0800 1000 0 >/dev/null --date "$day" <<END
y
END
code=$?
assertTrue "Exit code was not 0" "[ $code -eq 0 ]"
assertTrue "A log entry was not created with '0800 1000 0' when specifying custom date" "[ $( wc -l < $dir/Test.logs) -eq 1 ]"
timelog $debug --dev "$dir" log ts 0800 1100 0 --date "$nextDay" >/dev/null <<END
y
END
code=$?
logs=$(cat "$dir/Test.logs")
amount_of_logs=$(wc -l < "$dir/Test.logs" 2>/dev/null)
assertTrue "Exit code was not 0" "[ $code -eq 0 ]"
assertTrue "A log entry was not created with '0800 1100 0'. The amount of logs are: '$amount_of_logs'" "[ $amount_of_logs -eq 2 ]"
dayOneLogs=$(echo "$logs" | head -n1)
dayTwoLogs=$(echo "$logs" | tail -n1)
dec_time=$(echo "$dayOneLogs" | grep -o '\[2\]' | grep -o '2')
mil_time=$(echo "$dayOneLogs" | grep -o '{02:00}' | grep -o '02:00')
dayOneDate=$(echo "$dayOneLogs" | grep -o "\/$day")
assertTrue "Decimal time was not 2" "[ $dec_time -eq 2 ]"
assertTrue "HH:mm time was not 02:00" "[ $mil_time = '02:00' ]"
assertTrue "Custom date '$day' was not '$dayOneDate'" "[ '$dayOneDate' = '/$day' ]"
dec_time=$(echo "$dayTwoLogs" | grep -o '\[3\]' | grep -o '3')
mil_time=$(echo "$dayTwoLogs" | grep -o '{03:00}' | grep -o '03:00')
dayTwoDate=$(echo "$dayTwoLogs" | grep -o "\/$nextDay")
assertTrue "Decimal time was not 3" "[ $dec_time -eq 3 ]"
assertTrue "HH:mm time was not 03:00" "[ $mil_time = '03:00' ]"
assertTrue "Custom date '$nextDay' was not '$dayTwoDate'" "[ '$dayTwoDate' = '/$nextDay' ]"
timelog $debug --dev "$dir" log ts 0800 1100 0 --date &>/dev/null <<END
$nextDayAfterThat
y
END
code=$?
logs=$(cat "$dir/Test.logs")
amount_of_logs=$(wc -l < "$dir/Test.logs" 2>/dev/null)
dayThreeDate=$(echo "$logs" | grep -o "\/$nextDayAfterThat")
assertTrue "Exit code was not 0" "[ $code -eq 0 ]"
assertTrue "A log entry was not created when specifing date throught prompt($nextDayAfterThat). The amount of logs are: '$amount_of_logs' and should be 3" "[ $amount_of_logs -eq 3 ]"
assertTrue "Custom date '$nextDay' was not '$nextDayAfterThat'" "[ '$dayThreeDate' = '/$nextDayAfterThat' ]"
deleteProject
}
testLogProjectWithFaultyDate() {
createProjectTest
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "20asdwdawdqw" >/dev/null <<END
END
code=$?
assertTrue "Exit code was 0" "[ $code -ne 0 ]"
assertTrue "A log entry was created when specifying a faulty custom date" "[ $( wc -l < $dir/Test.logs) -eq 0 ]"
deleteProject
}
testLogWeekFirstOfJan() {
# Tests the edge case 2017-01-01. If retrieving 'year-week-day_in_week' it should retrieve:
# 2016-52-7
day="2017-01-01"
year_week_day_of_date="2016-52-7"
createProjectTest
timelog $debug --dev "$dir" log ts 0800 1000 0 --date >/dev/null "$day" <<END
y
END
code=$?
logs=$(cat "$dir/Test.logs")
amount_of_logs=$(wc -l < $dir/Test.logs 2>/dev/null)
mixed_date=$(echo "$logs" | grep -o "$year_week_day_of_date\/$day")
assertTrue "Exit code was not 0" "[ $code -eq 0 ]"
assertTrue "A log entry was not created when specifying custom date" "[ $( wc -l < $dir/Test.logs) -eq 1 ]"
assertTrue "Custom date was not $day" "[ '$mixed_date' = '$year_week_day_of_date/$day' ]"
deleteProject
}
testLogProjectAtDifferentYears() {
createProjectTest
# All of these dates has week=3
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "2015-01-12" &>/dev/null <<END
y
END
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "2016-01-18" &>/dev/null <<END
y
END
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "2017-01-16" &>/dev/null <<END
y
END
output=$(timelog $debug --dev "$dir" view ts 3 2017 | grep -o '2h')
assertTrue "Creating logs over different years with the same week gives not output when specified which year from CLI" "[ ! -z $output ]"
deleteProject
}
testLogProjectwithObscureTime() {
createProjectTest
timelog $debug --dev "$dir" log ts 0840 1802 34 >/dev/null <<END
y
END
code=$?
logs=$(cat "$dir/Test.logs")
dec_time=$(echo "$logs" | grep -o '\[[0-9]*\.*[0-9]*\]' | grep -o '[0-9]*\.[0-9]*')
mil_time=$(echo "$logs" | grep -o '{[0-9]*:[0-9]*}' | grep -o '[0-9]*:[0-9]*')
assertTrue "Decimal time did not equal 8.8" "[ $dec_time = '8.8' ]"
assertTrue "Decimal time did not equal 08:48" "[ $mil_time = '08:48' ]"
deleteProject
}
testLogStart() {
createProjectWithParams "Test1" "ts1" "40" "140" "kr"
now_in_one_hour=1500
timelog $debug --dev "$dir" start ts1 --date "2017-01-01 14:00">/dev/null
assertTrue "Timelog start did not return with exit code 0" $?
timelog $debug --dev "$dir" log ts1 &>/dev/null << END
$now_in_one_hour
0
y
END
logs=$(timelog $debug --dev "$dir" view ts1 $(date +%V))
remaining_hours=$(echo "$logs" | grep -o 'You have 39 hours more to work')
worked_hours=$(echo "$logs" | grep -o 'You have worked for 1 hours')
assertTrue "Remaining hours was not 39" "[ ! -z '$remaining_hours' ]"
assertTrue "Worked hours was not 1" "[ ! -z '$worked_hours' ]"
deleteProject
}
testLogPauseAndBreak() {
# Log: 12:00 - 12:30(30 min), 13:15-13-20(5 min), 13:30-13:55(25 min) = 1h
# Break: 12:30-13:15(45 min), 13:20-13:30(10 min)
started="2017-01-02 12:00"
paused="2017-01-02 12:30"
resumed="2017-01-02 13:15"
re_paused="2017-01-02 13:20"
re_resumed="2017-01-02 13:30"
ended="2017-01-02 14:00"
year=2017
week=1
assertTrue "Saved log times file exists, it shouldn't because of tearDown()" "[ ! -f "$dir/saved_log_times" ]"
createProjectWithParams "Test1" "ts1" "40" "140" "kr"
timelog $debug --dev "$dir" start ts1 --date "$started"
assertTrue "timelog start did not return 0 $?" $?
timelog $debug --dev "$dir" pause ts1 --date "$paused"
assertTrue "timelog pause did not return 0 $?" $?
timelog $debug --dev "$dir" resume ts1 --date "$resumed"
assertTrue "timelog resume did not return 0 $?" $?
timelog $debug --dev "$dir" pause ts1 --date "$re_paused"
assertTrue "timelog pause did not return 0 $?" $?
timelog $debug --dev "$dir" resume ts1 --date "$re_resumed"
assertTrue "timelog resume did not return 0 $?" $?
timelog $debug --dev "$dir" log ts1 --date "$ended" >/dev/null <<END
13:55
y
END
assertTrue "timelog log did not return 0: $?" $?
logs=$(timelog $debug --dev "$dir" view ts1 $week $year)
remaining_hours=$(echo "$logs" | grep -o 'You have 39 hours more to work')
worked_hours=$(echo "$logs" | grep -o 'You have worked for 1 hours')
assertTrue "Remaining hours was not 39" "[ ! -z '$remaining_hours' ]"
assertTrue "Worked hours was not 1" "[ ! -z '$worked_hours' ]"
deleteProject
}
testLogPauseAndBreakNotify() {
started="2017-01-02 12:00"
paused="2017-01-02 12:30"
resumed="2017-01-02 13:15"
re_paused="2017-01-02 13:20"
createProjectWithParams "Test1" "ts1" "40" "140" "kr"
timelog $debug --dev "$dir" pause ts1 --date "$paused" >/dev/null
assertTrue "timelog pause did not return 0 $?" $?
timelog $debug --dev "$dir" resume ts1 --date "$resumed" >/dev/null
assertTrue "timelog resume did not return 0 $?" $?
timelog $debug --dev "$dir" pause ts1 --date "$re_paused" >/dev/null
timelog $debug --dev "$dir" log ts1 << END | grep -q "NOTE: There is an uneven amount of"
08:00
12:00
y
END
assertTrue "timelog log with pause that did not resume did not output the expected text" $?
deleteProject
}
testLogWithNote() {
createProjectTest
current_week=$(date +%V)
note="Bash stuff, meeting at 9."
timelog $debug --dev "$dir" log ts >/dev/null << END
08:00
12:00
0
y
END
assertTrue "The exit code of log creation without --note opt was not 0" "[ $? -eq 0 ]"
output=$(timelog $debug --dev "$dir" view ts "$current_week")
assertTrue "The exit code of view was not 0" "[ $? -eq 0 ]"
# A day should display: "day: 4h / 04:00 " Notice the space at the end.
# If a log entry with no --note, there should not be any more stuff beside what's listed above.
end_of_day_line=$(echo "$output" | grep -o "04:00\ $")
assertTrue "When creating a log entry with no note, note text was inserted " "[ ! -z '$end_of_day_line' ]"
timelog $debug --dev "$dir" log ts --note >/dev/null << END
08:00
12:00
0
$note
y
END
assertTrue "The exit code of log creation was not 0" "[ $? -eq 0 ]"
output=$(timelog $debug --dev "$dir" view ts "$current_week")
assertTrue "The exit code of log entry was not 0" "[ $? -eq 0 ]"
log_note=$(echo "$output" | grep -o "$note")
assertTrue "A note entry was not found when showing logs" "[ '$log_note' = '$note' ]"
deleteProject
}
testLogNoteWithEmptyNote() {
createProjectTest
current_week=$(date +%V)
note="Bash stuff, meeting at 9."
timelog --dev "$dir" log ts --note >/dev/null << END
08:00
12:00
0
y
END
assertTrue "The exit code of log creation was not 0" "[ $? -eq 0 ]"
output=$(timelog $debug --dev "$dir" view ts "$current_week")
assertTrue "The exit code of log entry was not 0" "[ $? -eq 0 ]"
end_of_day_line=$(echo "$output" | grep -o "04:00\ $")
assertTrue "A note entry was found when showing logs" "[ ! -z '$end_of_day_line' ]"
deleteProject
}
testShowWeeklyLogs() {
createProjectTest
current_week=$(date +%V)
today=$(date +%A)
today="$(tr '[:lower:]' '[:upper:]' <<< ${today:0:1})${today:1}"
timelog $debug --dev "$dir" log ts 0840 1802 34 >/dev/null << END
y
END
capture=$(timelog --dev "$dir" view ts $current_week)
cmd=$(grep -q "Days worked for week $current_week" <<< $capture ; echo $?)
assertTrue "Weekly stats for $today was recorded" "[ $cmd -eq 0 ]"
cmd=$(grep -q "$today: 8\.8h \/ 08:48" <<< $capture ; echo $?)
assertTrue "Today($today)'s decimal time and/or military time was not equal to 8.8h/08:48" "[ $cmd -eq 0 ]"
(
timelog --dev "$dir" view ts <<END
$current_week
END
) | grep -q "Days worked for week $current_week"
code=$?
assertTrue "Supplying view with current week through prompt did not display any text" "[ $code -eq 0 ]"
deleteProject
}
testShowWeeklyLogsEmpty() {
createProjectTest
current_week=$(date +%V)
current_year=$(date +%Y)
today=$(date +%A)
output=$(timelog --dev "$dir" view ts $current_week)
patt="Nothing worked on week $current_week year $current_year for project Test"
assertTrue "When nothing was logged, the output wasn't nothing '$output'" "[ '$output' = '$patt' ]"
deleteProject
}
testShowWeeklyLogsWithUnorderedLogging() {
createProjectTest
year="2017"
mon="2017-08-14"
tue="2017-08-15"
local_monday_name=$(wrap_date "+%A" "$mon")
local_monday_name="$(tr '[:lower:]' '[:upper:]' <<< ${local_monday_name:0:1})${local_monday_name:1}"
local_tuesday_name=$(wrap_date "+%A" "$tue")
local_tuesday_name="$(tr '[:lower:]' '[:upper:]' <<< ${local_tuesday_name:0:1})${local_tuesday_name:1}"
week="33"
timelog $debug --dev "$dir" log ts 0800 1600 0 --date "$tue" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1600 0 --date "$mon" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1600 0 --date "2012-01-01" >/dev/null << END
y
END
stdout=$(timelog $debug --dev "$dir" view ts "$week" "$year")
monday_output=$(echo "$stdout" | head -n2 | tail -n1 | grep "$local_monday_name")
tuesday_output=$(echo "$stdout" | head -n3 | tail -n1 | grep "$local_tuesday_name")
if [ ! -z "$monday_output" ] && [ ! -z "$tuesday_output" ]; then
in_order=0
else
in_order=-1
fi
assertTrue "When logging with --date at different days, the days was in order when viewing the logs" "[ $in_order -eq 0 ]"
deleteProject
}
testShowWeeklyLogsWithOvertime() {
createProjectTest
mon="2017"
mon="2017-08-14"
tue="2017-08-15"
wed="2017-08-16"
thu="2017-08-17"
fri="2017-08-18"
week="33"
timelog $debug --dev "$dir" log ts 0800 1600 0 --date "$mon" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1600 0 --date "$tue" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1600 0 --date "$wed" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1600 0 --date "$thu" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1700 0 --date "$fri" >/dev/null << END
y
END
# Overtime by 1 hour logged(friday). Should mention that the user has worked overtime in view
stdout=$(timelog $debug --dev "$dir" view ts "$week" "$year")
cmd=$(grep -q "[oO]vertime" <<< $stdout ; echo $?)
assertTrue "Overtime of 1 hour was not mentioned" "[ $cmd -eq 0 ]"
deleteProject
}
testShowWeeklyLogsWithLogsExceedingFiveDays() {
# This test tests the estimisation when not having reached the target hours
# For example, logging time for 30 hours for 5 days, the estimate should instead of giving estimates for a 5
# day work week now give estimation of a 7 day work week because of the target hours not being achieved.
createProjectTest
year="2017"
mon="2017-08-14"
tue="2017-08-15"
wed="2017-08-16"
thu="2017-08-17"
fri="2017-08-18"
week="33"
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "$mon" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "$tue" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "$wed" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "$thu" >/dev/null << END
y
END
timelog $debug --dev "$dir" log ts 0800 1000 0 --date "$fri" >/dev/null << END
y
END
# Overtime by 1 hour logged(friday). Should mention that the user has worked overtime in view
stdout=$(timelog $debug --dev "$dir" view ts "$week" "$year")
cmd=$(grep -q "This yields an estimate of 15 hours for 2 more days" <<< $stdout ; echo $?)
assertTrue "Overtime of 1 hour was not mentioned" "[ $cmd -eq 0 ]"
deleteProject
}
testMultipleprojects() {
createProjectWithParams "Test1" "ts1" "40" "140" "kr"
createProjectWithParams "Test2" "ts2" "40" "240" "kr"
projects=$(timelog $debug --dev "$dir" project list | grep -c '^[0-9]:')
assertTrue "There was not two projects created: $projects counted" "[ $projects -eq 2 ]"
timelog $debug --dev "$dir" log >/dev/null << END
1
08:00
12:00
0
y
END
timelog $debug --dev "$dir" log >/dev/null << END
2
08:00
18:00
0
y
END
logs=$(timelog $debug --dev "$dir" view ts1 $(date +%V))
remaining_hours=$(echo "$logs" | grep -o 'You have 36 hours more to work')
worked_hours=$(echo "$logs" | grep -o 'You have worked for 4 hours')
assertTrue "Remaining hours was not 36" "[ ! -z '$remaining_hours' ]"
assertTrue "Worked hours was not 4" "[ ! -z '$worked_hours' ]"
logs=$(timelog $debug --dev "$dir" view ts2 $(date +%V))
remaining_hours=$(echo "$logs" | grep -o 'You have 30 hours more to work')
worked_hours=$(echo "$logs" | grep -o 'You have worked for 10 hours')
assertTrue "Remaining hours was not 30" "[ ! -z '$remaining_hours' ]"
assertTrue "Worked hours was not 10" "[ ! -z '$worked_hours' ]"
deleteProject
deleteProject
}
testDifferentInputFormats() {
createProjectTest
rgx="Decimal time: 2 Military time: 02:00"
list=("[8,10] [800,1000] [0800,1000] [8.00,10.00] [8:00,10:00] [08:00,10:00]")
for it in $list; do
begin=$(echo "$it" | grep -o "[0-9]*\:*\.*[0-9]*," | sed 's#,##')
end=$(echo "$it" | grep -o ",[0-9]*\:*\.*[0-9]*" | sed 's#,##')
entry=$(timelog $debug --dev "$dir" log ts "$begin" "$end" 0 <<END
n
END
)
regexd=$(echo "$entry" | grep -o "$rgx")
assertTrue "Begining time of '$begin' and end time of '$end' did not equal to the output time of 2/02:00:$entry" "[ '$regexd' = '$rgx' ]"
done
deleteProject
}
testCalculate() {
regex="4/04:00"
timelog $debug --dev "$dir" calc 0800 1200 0 | grep -q "$regex"
code=$?
assertTrue "Calculating 0800 1200 0 did not return '$regex'" "[ $code -eq 0 ]"
timelog $debug --dev "$dir" calc 0800 1200 | grep -q "$regex"
code=$?
assertTrue "Calculating 0800 1200 with implicit break time 0 did not return '$regex'" "[ $code -eq 0 ]"
timelog $debug --dev "$dir" calc 0800 1220 20 | grep -q "$regex"
code=$?
assertTrue "Calculating 0800 1220 20(two hours break) did not return '$regex'" "[ $code -eq 0 ]"
two_hours_regex="2/02:00"
timelog $debug --dev "$dir" calc 0800 1200 120 | grep -q "$two_hours_regex"
code=$?
assertTrue "Calculating 0800 1200 120(two hours break) did not return '$two_hours_regex'" "[ $code -eq 0 ]"
}
testCalculateInvalidTimes() {
timelog $debug --dev "$dir" calc 080a0 1200 0 &>/dev/null
code=$?
assertTrue "Calculating 080a0 1200 0 returned an exit code of $code" "[ $code -eq 1 ]"
timelog $debug --dev "$dir" calc 0800 12b00 &>/dev/null
code=$?
assertTrue "Calculating 0800 12b00 returned an exit code of $code" "[ $code -eq 1 ]"
timelog $debug --dev "$dir" calc 0800 1200 2b &>/dev/null
code=$?
assertTrue "Calculating 0800 1200 2b returned an exit code of $code" "[ $code -eq 1 ]"
timelog $debug --dev "$dir" calc 0800 1200 300 | grep -q 'Error, break time is greater than'
code=$?
assertTrue "Calculating 0800 1200 300 returned a string that did not match" "[ $code -eq 0 ]"
}
testMoneyPerHourCalculation() {
createProjectTest
timelog $debug --dev "$dir" log 0800 1000 0 >/dev/null <<END
y
END
calc="280" # 140 per hour pre tax and 2 hours logged => 240
cmd=$(timelog $debug --dev "$dir" view "$(date +%V)" | grep "You have earned $calc")
assertTrue "Calculating total money per hour for 2 hours on 140 mph did not retrieve string 'You have earned $calc'" "[ ! -z '$cmd' ]"
deleteProject
}
testUnknownArguments() {
cmd=$(timelog $debug --dev $dir asdf | grep "Unknown argument 'asdf'")
assertTrue "Faulty command did not match the string 'Unknown argument'" "[ ! -z '$cmd' ]"
timelog $debug --dev "$dir" project asdasdj &>/dev/null
code=$?
assertTrue "Faulty command 'list asdasdj' did not return 1'" "[ $code -eq 1 ]"
}
testUsage() {
timelog $debug --dev "$dir" --help &>/dev/null
assertTrue "Calling --help did not return an exit code of 1" "[ $code -eq 1 ]"
}
testDeleteProjectWithNoProjects() {
timelog $debug --dev "$dir" project delete | grep -q 'No projects'
code=$?
assertTrue "Deleting a project without any projects created did not output matching string" "[ $code -eq 0 ]"
}
testCreateDuplicateProjects() {
createProjectTest
timelog $debug --dev "$dir" project create << END | grep -q 'Could not create project, it already exists!'
Test
ts
40
140
kr
END
code=$?
assertTrue "Creating duplicate projects did not return the expected output string" "[ $code -eq 0 ]"
deleteProject
}
testLoggingWithNoProject() {
output=$(timelog $debug --dev "$dir" log)
output_code=$?
echo "$output" | grep -q 'requires you to specify a project'
code=$?
assertTrue "Attempting to log a project without any projects did not return 1: $output_code" "[ $output_code -eq 1 ]"
assertTrue "Attempting to log a project without any projects did not return the expected output string" "[ $code -eq 0 ]"
}
testShowLogsWithoutMoneyPerHour() {
createProjectWithoutMoneyPerHour
timelog $debug --dev "$dir" log 0800 1200 40 >/dev/null << END
y
END
stdout=$(timelog $debug --dev "$dir" view "$(date +%V)" | grep "[Yy]ou have earned")
assertTrue "Even when specifying no money per hour, 'You have earned' text is displayed" "[ -z '$stdout' ]"
deleteProject
}
testDebugMode() {
createProjectTest
output=$(timelog -v --dev "$dir" --help | grep -o '[0-9]\{4\}-[0-9]\{2\}-[0-9]\{2\} [0-9]\{2\}:[0-9]\{2\}:[0-9]\{2\} DEBUG')
code=$?
assertTrue "Debug information was not shown when supplying -v" "[ $code -eq 0 ]"
deleteProject
}
testPurge() {
createProjectTest
logProjectTest
timelog $debug --dev "$dir" --purge >/dev/null << END
timelog
END
assertTrue "No log folder was deleted when purging" "[ ! -d '$dir' ]"
}
. shunit2-2.1.6/src/shunit2
end=$(date +%s)
diff=$((end-start))
minutes=$((diff/60%60))
seconds=$((diff%60))
echo "Unit tests took $minutes min(s), $seconds second(s) to run"
| true
|
8e1a2f09f477e7f6e565ac1a651bb62485fe7d4f
|
Shell
|
wildlyinaccurate/git-merge-vs-rebase
|
/setup.sh
|
UTF-8
| 1,535
| 3.765625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
[[ $1 == '--force' ]] && force=1
for merge in merge rebase; do
merge_options=''
[[ $merge == 'merge' ]] && merge_options='--no-edit'
if [[ -d $merge && $force == 1 ]]
then
rm -rf $merge
elif [[ -d $merge && $force != 1 ]]
then
echo "Directory $merge already exists. Run with --force to replace it."
exit 1
fi
mkdir "$DIR/$merge"
cd "$DIR/$merge"
git init
touch master
git add master
git commit -m 'Initial commit'
git checkout -b feature-1 master
touch feature-1
git add feature-1
git commit -m 'Implement feature 1'
git checkout -b feature-2 master
touch feature-2
git add feature-2
git commit -m 'Implement feature 2'
git checkout master
git merge --no-ff --no-edit feature-1
git checkout feature-2
git $merge $merge_options master
echo 'More feature 2' > feature-2
git commit -m 'Add more to feature 2' feature-2
git checkout master
git merge --no-ff --no-edit feature-2
git checkout -b feature-3 master
touch feature-3
git add feature-3
git commit -m 'Implement feature 3'
git checkout master
echo 'Hotfix' > master
git commit -m 'Hotfix' master
git checkout feature-3
git $merge $merge_options master
echo 'More feature 3' > feature-3
git commit -m 'Add more to feature 3' feature-3
git checkout master
git merge --no-ff --no-edit feature-3
done
| true
|
edd63595d1470b2a5392856bdca93189a2386a16
|
Shell
|
alpha123/dotfiles
|
/scripts/gen_wallpaper
|
UTF-8
| 1,157
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
font=TitilliumWeb
fontsize=144
linespacing=-50
background=
foreground=
caption=
gravity=NorthEast
imgsize=1420x900
offset=20x0
filename=
while [ "$1" != "" ]; do
echo $1
case $1 in
-f | --font ) shift
font=$1
;;
-s | --font-size ) shift
fontsize=$1
;;
-b | --background-color ) shift
background=$1
;;
-c | --foreground-color ) shift
foreground=$1
;;
-t | --text ) shift
caption=$1
;;
-g | --gravity ) shift
gravity=$1
;;
-i | --image-size ) shift
imgsize=$1
;;
-o | --offset ) shift
offset=$1
esac
filename=$1
shift
done
echo "convert -size $imgsize -font $font -pointsize $fontsize -interline-spacing $linespacing -background $background -fill $foreground -gravity $gravity -caption $caption -splice $offset $filename"
convert -size $imgsize -font $font -pointsize $fontsize -interline-spacing $linespacing -background $background -fill $foreground -gravity $gravity caption:$caption -splice $offset $filename
| true
|
d417d7e86ab1dae89c9d1952887f27b44ed0118c
|
Shell
|
lukaszbudnik/auditor
|
/integration-tests/entrypoint-auditor.sh
|
UTF-8
| 260
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
DEFAULT_AUDITOR_CONFIG_FILE=".env"
# if auditor config file is not provided explicitly fallback to default one
if [ -z "$AUDITOR_CONFIG_FILE" ]; then
AUDITOR_CONFIG_FILE=$DEFAULT_AUDITOR_CONFIG_FILE
fi
auditor -configFile "$AUDITOR_CONFIG_FILE"
| true
|
0334b504b1ca197ab0dd067bbe4020d08cec1356
|
Shell
|
EvgeniyBlinov/ansible-role-server-openvpn
|
/templates/start.sh
|
UTF-8
| 2,173
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
SCRIPT_PATH="$(dirname $0)"
ABSOLUTE_PATH="$(readlink -m ${SCRIPT_PATH})"
if test -f .env ; then
set -o allexport;
source .env;
set +o allexport;
fi
PROJECT_DIR_NAME="$(basename ${ABSOLUTE_PATH})"
ACTION="${1:-build}"
OVPN_DATA="$ABSOLUTE_PATH/data"
OVPN_PROTO="${OVPN_PROTO:-udp}"
OVPN_SERVER_NAME="${OVPN_SERVER_NAME:-example.com}"
OVPN_PORT="${OVPN_PORT:-1194}"
OVPN_IMAGE_VERSION="${OVPN_IMAGE_VERSION:-_2022-02-04}"
OVPN_NAME="${OVPN_NAME:-${PROJECT_DIR_NAME}}"
OVPN_IMAGE_NAME="${OVPN_IMAGE_NAME:-cent/openvpn-docker${OVPN_IMAGE_VERSION}}"
OVPN_DOCKER_PROJECT_PATH="${OVPN_DOCKER_PROJECT_PATH:-/opt/openvpn-docker}"
mkdir -p "$OVPN_DATA"
cd "$OVPN_DATA"
function build {
if ! docker images | grep -q $OVPN_IMAGE_NAME ; then
(cd "${OVPN_DOCKER_PROJECT_PATH}" && docker build -t "${OVPN_IMAGE_NAME}" .)
fi
}
function cmd {
local cmds=$*
echo "$cmds"
$cmds
}
function configure {
set -ex;
cmd docker run -v "${OVPN_DATA}:/etc/openvpn" --net=none --rm "${OVPN_IMAGE_NAME}" ovpn_genconfig \
-C 'AES-256-CBC' \
-a 'SHA384' \
-u "${OVPN_PROTO}://${OVPN_SERVER_NAME}:${OVPN_PORT}"
cmd docker run --rm -v "${OVPN_DATA}:/etc/openvpn" "${OVPN_IMAGE_NAME}" touch /etc/openvpn/vars
cmd docker run --rm -v "${OVPN_DATA}:/etc/openvpn" -it "${OVPN_IMAGE_NAME}" ovpn_initpki nopass
vim openvpn.conf
cmd docker run --rm -v "${OVPN_DATA}:/etc/openvpn" -it "${OVPN_IMAGE_NAME}" easyrsa build-client-full CLIENTNAME nopass
set -x
docker run --rm -v "${OVPN_DATA}:/etc/openvpn" "${OVPN_IMAGE_NAME}" ovpn_getclient CLIENTNAME > CLIENTNAME.ovpn
}
function add_client {
local client_name=$1
cmd docker run --rm -v "${OVPN_DATA}:/etc/openvpn" -it "${OVPN_IMAGE_NAME}" easyrsa build-client-full ${client_name} nopass
set -x
docker run --rm -v "${OVPN_DATA}:/etc/openvpn" -it "${OVPN_IMAGE_NAME}" ovpn_getclient "${client_name}" > ${client_name}.ovpn
}
function start {
cmd docker run \
-d \
--restart always \
--privileged \
-v "${OVPN_DATA}:/etc/openvpn" \
-p "${OVPN_PORT}:1194/udp" \
--name "${OVPN_NAME}" \
"${OVPN_IMAGE_NAME}"
}
shift
"$ACTION" "$*"
| true
|
20100646f5bfd5b6510944a6db4fe01f67a6e48e
|
Shell
|
juanibiapina/dotfiles
|
/cli/libexec/nix/cleanup
|
UTF-8
| 578
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Summary: Clean up Nix store and boot entries
#
# Usage: {cmd}
#
# copied from: https://github.com/NixOS/nixpkgs/issues/3542
set -e
# remove old generations from system profile
sudo nix-env --delete-generations old --profile /nix/var/nix/profiles/system
# remove old generations from user profile
nix-env --delete-generations old
# reactivate current profile
sudo /nix/var/nix/profiles/system/bin/switch-to-configuration switch
# remove unused stuff in store (unfortunatly also removes dev stuff used by shell.nix)
sudo nix-store --gc
nix-store --gc
| true
|
98193960928bc3b128aae76f4de79bbda7faa081
|
Shell
|
Wolfnet83/bookkeeping
|
/entrypoint.sh
|
UTF-8
| 255
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
# Remove a potentially pre-existing server.pid for Rails.
rm -f /bookke/tmp/pids/server.pid
# rails db:create
# rails db:migrate
# rails db:seed
# Then exec the container's main process (what's set as CMD in the Dockerfile).
exec "$@"
| true
|
3dc6166057b0dafc31b7803e153965b4422ea80c
|
Shell
|
NetBSD/src
|
/tests/bin/sh/t_builtins.sh
|
UTF-8
| 32,220
| 3.140625
| 3
|
[] |
no_license
|
# $NetBSD: t_builtins.sh,v 1.6 2021/05/18 21:37:56 kre Exp $
#
# Copyright (c) 2018 The NetBSD Foundation, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# the implementation of "sh" to test
: ${TEST_SH:="/bin/sh"}
#
# This file tests the various sh builtin utilities.
#
# Those utilities that are really external programs, which are builtin in
# for (mostly) performance (printf, kill, test, ...), are tested elsewhere.
# We do test the builtin "echo" here as (in NetBSD) it is different than
# the external one.
#
# The (mostly special) builtins which appear to be more syntax than command
# are tested in other test programs, rather than here (break, continue...)
#
# And finally, those which are fundamental to the operation of the shell,
# like wait, set, shift, ... are also tested in other test programs where
# all their operations can be more thoroughly verified.
#
# This leaves those which need to be built in (cd, umask, ...) but whose
# purpose is mostly to alter the environment in which the shell operates
# of that of the commands it runs. These tests act in co-operation with
# other tests exist here (where thy do) by not duplicating tests run
# elsewhere (ulimit is one example) but just adding to those.
# One day these might be unified.
#
# We do test both standard use of the builtins (where they are standard)
# and NetBSD sh extensions (when run on a shell with no support, such tests
# should be skipped.)
#
# Utility function able to test whether most of the builtins exist in
# the shell being tested.
have_builtin()
{
${TEST_SH} -c "( $3 $1 $4 ) >/dev/null 2>&1" &&
LC_ALL=C ${TEST_SH} -c \
'case "$( (type '"$1"') 2>&1)" in
(*built*) exit 0 ;;
(*reserved*) exit 0 ;; # zsh!! (reserved words are builtin)
esac
exit 1' ||
{
test -z "$2" && atf_skip "${TEST_SH} has no '$1$5' built-in"
return 1;
}
return 0
}
# And another to test if the shell being tested is the NetBSD shell,
# as we use these tests both to test standards conformance (correctness)
# which should be possible for all shells, and to test NetBSD
# extensions (which we mostly do by testing if the extension exists)
# and NetBSD sh behaviour for what is unspecified by the standard
# (so we will be notified via test failure should that unspecified
# behaviour alter) for which we have to discover if that shell is the
# one being tested.
is_netbsd_sh()
{
unset NETBSD_SHELL 2>/dev/null
test -n "$( ${TEST_SH} -c 'printf %s "${NETBSD_SHELL}"')"
}
### Helper functions
nl='
'
reset()
{
TEST_NUM=0
TEST_FAILURES=''
TEST_FAIL_COUNT=0
TEST_ID="$1"
# These are used in check()
atf_require_prog tr
atf_require_prog printf
atf_require_prog mktemp
}
# Test run & validate.
#
# $1 is the command to run (via sh -c)
# $2 is the expected output
# $3 is the expected exit status from sh
# $4 is optional extra data for the error msg (if there is one)
#
# Stderr is exxpected to be empty, unless the expected exit code ($3) is != 0
# in which case some message there is expected (and nothing is a failure).
# When non-zero exit is expected, we note a different (non-zero) value
# observed, but do not fail the test because of that.
check()
{
fail=false
TEMP_FILE=$( mktemp OUT.XXXXXX )
TEST_NUM=$(( $TEST_NUM + 1 ))
MSG=
# our local shell (ATF_SHELL) better do quoting correctly...
# some of the tests expect us to expand $nl internally...
CMD="$1"
# determine what the test generates, preserving trailing \n's
result="$( ${TEST_SH} -c "${CMD}" 2>"${TEMP_FILE}" && printf X )"
STATUS=$?
result="${result%X}"
if [ "${STATUS}" -ne "$3" ]; then
MSG="${MSG}${MSG:+${nl}}[$TEST_NUM]"
MSG="${MSG} expected exit code $3, got ${STATUS}"
# don't actually fail just because of wrong exit code
# unless we either expected, or received "good"
# or something else is detected as incorrect as well.
case "$3/${STATUS}" in
(*/0|0/*) fail=true;;
esac
fi
if [ "$3" -eq 0 ]; then
if [ -s "${TEMP_FILE}" ]; then
MSG="${MSG}${MSG:+${nl}}[$TEST_NUM]"
MSG="${MSG} Messages produced on stderr unexpected..."
MSG="${MSG}${nl}$( cat "${TEMP_FILE}" )"
fail=true
fi
else
if ! [ -s "${TEMP_FILE}" ]; then
MSG="${MSG}${MSG:+${nl}}[$TEST_NUM]"
MSG="${MSG} Expected messages on stderr,"
MSG="${MSG} nothing produced"
fail=true
fi
fi
rm -f "${TEMP_FILE}"
if [ "$2" != "${result}" ]
then
MSG="${MSG}${MSG:+${nl}}[$TEST_NUM]"
MSG="${MSG} Expected: <<$2>>, received: <<$result>>"
fail=true
fi
if $fail
then
if [ -n "$4" ]; then
MSG="${MSG}${MSG:+${nl}}[$TEST_NUM] Note: ${4}"
fi
MSG="${MSG}${MSG:+${nl}}[$TEST_NUM]"
MSG="${MSG} Full command: <<${CMD}>>"
fi
$fail && test -n "$TEST_ID" && {
TEST_FAILURES="${TEST_FAILURES}${TEST_FAILURES:+${nl}}"
TEST_FAILURES="${TEST_FAILURES}${TEST_ID}[$TEST_NUM]:"
TEST_FAILURES="${TEST_FAILURES} Test of <<$1>> failed.";
TEST_FAILURES="${TEST_FAILURES}${nl}${MSG}"
TEST_FAIL_COUNT=$(( $TEST_FAIL_COUNT + 1 ))
return 0
}
$fail && atf_fail "Test[$TEST_NUM] failed: $(
# ATF does not like newlines in messages, so change them...
printf '%s' "${MSG}" | tr '\n' ';'
)"
return 0
}
results()
{
test -n "$1" && atf_expect_fail "$1"
test -z "${TEST_ID}" && return 0
test -z "${TEST_FAILURES}" && return 0
echo >&2 "=========================================="
echo >&2 "While testing '${TEST_ID}'"
echo >&2 " - - - - - - - - - - - - - - - - -"
echo >&2 "${TEST_FAILURES}"
atf_fail \
"Test ${TEST_ID}: $TEST_FAIL_COUNT (of $TEST_NUM) subtests failed - see stderr"
}
####### End helpers
atf_test_case colon
colon_head() {
atf_set "descr" "Tests the shell special builtin ':' command"
}
colon_body() {
have_builtin : || return 0
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c ":"
# ':' is a special builtin, so we should exit on redirect error
# and variable assignments should persist (stupid, but it is the rule)
atf_check -s not-exit:0 -e not-empty -o empty ${TEST_SH} -c \
": >/foo/bar; printf %s No-exit-BUG"
atf_check -s exit:0 -e empty -o inline:OK ${TEST_SH} -c \
'X=BUG; X=OK : ; printf %s "${X}"'
}
atf_test_case echo
echo_head() {
atf_set "descr" "Tests the shell builtin version of echo"
}
echo_body() {
have_builtin echo || return 0
if ! is_netbsd_sh; then
atf_skip \
"${TEST_SH%% *} is not the NetBSD shell, this test is for it alone"
return 0
fi
reset echo
check 'echo "hello world"' "hello world${nl}" 0
check 'echo hello world' "hello world${nl}" 0
check 'echo -n "hello world"' "hello world" 0
check 'IFS=:; echo hello world' "hello world${nl}" 0
check 'IFS=; echo hello world' "hello world${nl}" 0
check 'echo -e "hello world"' "hello world${nl}" 0
check 'echo -e hello world' "hello world${nl}" 0
check 'IFS=:; echo -e hello world' "hello world${nl}" 0
# only one of the options is used
check 'echo -e -n "hello world"' "-n hello world${nl}" 0
check 'echo -n -e "hello world"' "-e hello world" 0
# and only when it is alone
check 'echo -en "hello world"' "-en hello world${nl}" 0
check 'echo -ne "hello world"' "-ne hello world${nl}" 0
# echo is specifically required to *not* support --
check 'echo -- "hello world"' "-- hello world${nl}" 0
# similarly any other unknown option is simply part of the output
for OPT in a b c v E N Q V 0 1 2 @ , \? \[ \] \( \; . \* -help -version
do
check "echo '-${OPT}' foo" "-${OPT} foo${nl}" 0
done
# Now test the \\ expansions, with and without -e
# We rely upon printf %b (tested elsewhere, not only a sh test)
# to verify the output when the \\ is supposed to be expanded.
for E in '' -e
do
for B in a b c e f n r t v \\ 04 010 012 0177
do
S="test string with \\${B} in it"
if [ -z "${E}" ]; then
R="${S}${nl}"
else
R="$(printf '%b\nX' "${S}")"
R=${R%X}
fi
check "echo $E '${S}'" "${R}" 0
done
done
check 'echo foo >&-' "" 1
check 'echo foo >&- 2>&-; echo $?; echo $?' "1${nl}0${nl}" 0
results
}
atf_test_case eval
eval_head() {
atf_set "descr" "Tests the shell special builtin 'eval'"
}
eval_body() {
have_builtin eval || return 0
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'eval "exit 0"'
atf_check -s exit:1 -e empty -o empty ${TEST_SH} -c 'eval "exit 1"'
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'eval exit 0'
atf_check -s exit:1 -e empty -o empty ${TEST_SH} -c 'eval exit 1'
atf_check -s exit:0 -e empty -o inline:0 ${TEST_SH} -c \
'eval true; printf %d $?'
atf_check -s exit:0 -e empty -o inline:1 ${TEST_SH} -c \
'eval false; printf %d $?'
atf_check -s exit:0 -e empty -o inline:abc ${TEST_SH} -c \
'X=a Y=b Z=c; for V in X Y Z; do eval "printf %s \$$V"; done'
atf_check -s exit:0 -e empty -o inline:abc ${TEST_SH} -c \
'X=a Y=b Z=c; for V in X Y Z; do eval printf %s \$$V; done'
atf_check -s exit:0 -e empty -o inline:XYZ ${TEST_SH} -c \
'for V in X Y Z; do eval "${V}=${V}"; done; printf %s "$X$Y$Z"'
# make sure eval'd strings affect the shell environment
atf_check -s exit:0 -e empty -o inline:/b/ ${TEST_SH} -c \
'X=a; eval "X=b"; printf /%s/ "${X-unset}"'
atf_check -s exit:0 -e empty -o inline:/b/ ${TEST_SH} -c \
'X=a; Y=X; Z=b; eval "$Y=$Z"; printf /%s/ "${X-unset}"'
atf_check -s exit:0 -e empty -o inline:/unset/ ${TEST_SH} -c \
'X=a; eval "unset X"; printf /%s/ "${X-unset}"'
atf_check -s exit:0 -e empty -o inline:// ${TEST_SH} -c \
'unset X; eval "X="; printf /%s/ "${X-unset}"'
atf_check -s exit:0 -e empty -o inline:'2 y Z ' ${TEST_SH} -c \
'set -- X y Z; eval shift; printf "%s " "$#" "$@"'
# ensure an error in an eval'd string causes the shell to exit
# unless 'eval' is preceded by 'command' (in which case the
# string is not eval'd but execution continues)
atf_check -s not-exit:0 -e not-empty -o empty ${TEST_SH} -c \
'eval "if done"; printf %s status=$?'
atf_check -s exit:0 -e not-empty -o 'match:status=[1-9]' \
${TEST_SH} -c \
'command eval "if done"; printf %s status=$?'
atf_check -s not-exit:0 -e not-empty \
-o 'match:status=[1-9]' -o 'not-match:[XY]' ${TEST_SH} -c \
'command eval "printf X; if done; printf Y"
S=$?; printf %s status=$S; exit $S'
# whether 'X' is output here is (or should be) unspecified.
atf_check -s not-exit:0 -e not-empty \
-o 'match:status=[1-9]' -o 'not-match:Y' ${TEST_SH} -c \
'command eval "printf X
if done
printf Y"
S=$?; printf %s status=$S; exit $S'
if is_netbsd_sh
then
# but on NetBSD we expect that X to appear...
atf_check -s not-exit:0 -e not-empty -o 'match:X' \
-o 'match:status=[1-9]' -o 'not-match:Y' ${TEST_SH} -c \
'command eval "printf X
if done
printf Y"
S=$?; printf %s status=$S; exit $S'
fi
}
atf_test_case exec
exec_head() {
atf_set "descr" "Tests the shell special builtin 'exec'"
}
exec_body() {
have_builtin exec || return 0
atf_check -s exit:0 -e empty -o inline:OK ${TEST_SH} -c \
'exec printf OK; printf BROKEN; exit 3'
atf_check -s exit:3 -e empty -o inline:OKOK ${TEST_SH} -c \
'(exec printf OK); printf OK; exit 3'
}
atf_test_case export
export_head() {
atf_set "descr" "Tests the shell builtin 'export'"
}
export_body() {
have_builtin export || return 0
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'export VAR'
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'export VAR=abc'
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'export V A R'
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c \
'export V A=1 R=2'
atf_require_prog printenv
atf_check -s exit:1 -e empty -o empty ${TEST_SH} -c \
'unset VAR || exit 7; export VAR; printenv VAR'
atf_check -s exit:0 -e empty -o inline:\\n ${TEST_SH} -c \
'unset VAR || exit 7; export VAR=; printenv VAR'
atf_check -s exit:0 -e empty -o inline:\\n ${TEST_SH} -c \
'unset VAR || exit 7; VAR=; export VAR; printenv VAR'
atf_check -s exit:0 -e empty -o inline:\\n ${TEST_SH} -c \
'unset VAR || exit 7; export VAR; VAR=; printenv VAR'
atf_check -s exit:0 -e empty -o inline:XYZ\\n ${TEST_SH} -c \
'unset VAR || exit 7; export VAR=XYZ; printenv VAR'
atf_check -s exit:0 -e empty -o inline:ABC\\n ${TEST_SH} -c \
'VAR=ABC; export VAR; printenv VAR'
atf_check -s exit:0 -e empty -o inline:ABC\\n ${TEST_SH} -c \
'unset VAR || exit 7; export VAR; VAR=ABC; printenv VAR'
atf_check -s exit:0 -e empty -o inline:ABC\\nXYZ\\n ${TEST_SH} -c \
'VAR=ABC; export VAR; printenv VAR; VAR=XYZ; printenv VAR'
atf_check -s exit:0 -e empty -o inline:ABC\\nXYZ\\n ${TEST_SH} -c \
'unset VAR || exit 7; export VAR;
VAR=ABC; printenv VAR; VAR=XYZ; printenv VAR'
# don't check VAR=value, some shells provide meaningless quoting...
atf_check -s exit:0 -e empty -o match:VAR= -o match:foobar \
${TEST_SH} -c \
'VAR=foobar ; export VAR ; export -p'
atf_check -s exit:0 -e empty -o match:VAR= -o match:foobar \
${TEST_SH} -c \
'export VAR=foobar ; export -p'
atf_check -s exit:0 -e empty -o match:VAR\$ ${TEST_SH} -c \
'unset VAR ; export VAR ; export -p'
atf_check -s exit:0 -e empty -o not-match:VAR ${TEST_SH} -c \
'export VAR ; unset VAR ; export -p'
atf_check -s exit:0 -e empty -o not-match:VAR -o not-match:foobar \
${TEST_SH} -c \
'VAR=foobar; export VAR ; unset VAR ; export -p'
atf_check -s exit:0 -e empty -o match:VAR= -o match:FOUND=foobar \
${TEST_SH} -c \
'export VAR=foobar; V=$(export -p);
unset VAR; eval "$V"; export -p;
printf %s\\n FOUND=${VAR-unset}'
atf_check -s exit:0 -e empty -o match:VAR -o match:FOUND=unset \
${TEST_SH} -c \
'export VAR; V=$(export -p);
unset VAR; eval "$V"; export -p;
printf %s\\n FOUND=${VAR-unset}'
atf_check -s exit:1 -e empty -o inline:ABC\\nXYZ\\n ${TEST_SH} -c \
'VAR=ABC; export VAR; printenv VAR; VAR=XYZ; printenv VAR;
unset VAR; printenv VAR; VAR=PQR; printenv VAR'
atf_check -s exit:0 -e empty -o inline:ABC\\nXYZ\\nVAR=unset\\nMNO\\n \
${TEST_SH} -c \
'VAR=ABC; export VAR; printenv VAR; VAR=XYZ; printenv VAR;
unset VAR; printf %s\\n "VAR=${VAR-unset}"; printenv VAR;
VAR=PQR; printenv VAR; VAR=MNO; export VAR; printenv VAR'
}
atf_test_case export_nbsd
export_nbsd_head() {
atf_set "descr" "Tests NetBSD extensions to the shell builtin 'export'"
}
export_nbsd_body() {
have_builtin "export" "" "" "-n foo" ' -n' || return 0
atf_require_prog printenv
atf_check -s exit:1 -e empty -o inline:ABC\\nXYZ\\n ${TEST_SH} -c \
'VAR=ABC; export VAR; printenv VAR; VAR=XYZ; printenv VAR;
export -n VAR; printenv VAR; VAR=PQR; printenv VAR'
atf_check -s exit:0 -e empty -o inline:ABC\\nXYZ\\nVAR=XYZ\\nMNO\\n \
${TEST_SH} -c \
'VAR=ABC; export VAR; printenv VAR; VAR=XYZ; printenv VAR;
export -n VAR; printf %s\\n "VAR=${VAR-unset}"; printenv VAR;
VAR=PQR; printenv VAR; VAR=MNO; export VAR; printenv VAR'
have_builtin "export" "" "" -x ' -x' || return 0
atf_check -s exit:1 -e empty -o empty ${TEST_SH} -c \
'export VAR=exported; export -x VAR; printenv VAR'
atf_check -s exit:1 -e empty -o empty ${TEST_SH} -c \
'export VAR=exported; export -x VAR; VAR=local; printenv VAR'
atf_check -s exit:0 -e empty -o inline:once\\nx\\n ${TEST_SH} -c \
'export VAR=exported
export -x VAR
VAR=once printenv VAR
printenv VAR || printf %s\\n x'
atf_check -s not-exit:0 -e not-empty -o empty ${TEST_SH} -c \
'export VAR=exported; export -x VAR; export VAR=FOO'
have_builtin export '' 'export VAR;' '-q VAR' ' -q' || return 0
atf_check -s exit:1 -o empty -e empty ${TEST_SH} -c \
'unset VAR; VAR=set; export -q VAR'
atf_check -s exit:0 -o empty -e empty ${TEST_SH} -c \
'export VAR=set; export -q VAR'
atf_check -s exit:1 -o empty -e empty ${TEST_SH} -c \
'VAR=set; RW=set; export -q VAR RW'
atf_check -s exit:1 -o empty -e empty ${TEST_SH} -c \
'VAR=set; export RO=set; export -q VAR RO'
atf_check -s exit:0 -o empty -e empty ${TEST_SH} -c \
'export VAR=set RO=set; export -q VAR RO'
atf_check -s exit:1 -o empty -e empty ${TEST_SH} -c \
'unset VAR; export -q VAR'
# next one is the same as the have_builtin test, so "cannot" fail...
atf_check -s exit:0 -o empty -e empty ${TEST_SH} -c \
'unset VAR; export VAR; export -q VAR'
# if we have -q we should also have -p var...
# What's more, we are testing NetBSD sh, so we know output format.
atf_check -s exit:0 -e empty -o match:VAR=foobar \
${TEST_SH} -c \
'VAR=foobar ; export VAR ; export -p VAR'
atf_check -s exit:0 -e empty -o inline:1 \
${TEST_SH} -c \
'VAR=foobar ; export VAR ;
printf %d $(export -p VAR | wc -l)'
atf_check -s exit:0 -e empty \
-o inline:'export VAR=foobar\nexport OTHER\n' \
${TEST_SH} -c \
'VAR=foobar; export VAR OTHER; export -p VAR OTHER'
atf_check -s exit:0 -e empty \
-o inline:'export A=aaa\nexport B\nexport D='"''"'\n' \
${TEST_SH} -c \
'A=aaa D= C=foo; unset B; export A B D;
export -p A B C D'
}
atf_test_case getopts
getopts_head() {
atf_set "descr" "Tests the shell builtin 'getopts'"
}
getopts_body() {
have_builtin getopts "" "f() {" "a x; }; f -a" || return 0
}
atf_test_case jobs
jobs_head() {
atf_set "descr" "Tests the shell builting 'jobs' command"
}
jobs_body() {
have_builtin jobs || return 0
atf_require_prog sleep
# note that POSIX requires that we reference $! otherwise
# the shell is not required to remember the process...
atf_check -s exit:0 -e empty -o match:sleep -o match:Running \
${TEST_SH} -c 'sleep 1 & P=$!; jobs; wait'
atf_check -s exit:0 -e empty -o match:sleep -o match:Done \
${TEST_SH} -c 'sleep 1 & P=$!; sleep 2; jobs; wait'
}
atf_test_case read
read_head() {
atf_set "descr" "Tests the shell builtin read command"
}
read_body() {
have_builtin read "" "echo x|" "var" || return 0
}
atf_test_case readonly
readonly_head() {
atf_set "descr" "Tests the shell builtin 'readonly'"
}
readonly_body() {
have_builtin readonly || return 0
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'readonly VAR'
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'readonly VAR=abc'
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'readonly V A R'
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c 'readonly V A=1 R=2'
atf_check -s exit:0 -e empty -o inline:unset ${TEST_SH} -c \
'unset VAR; readonly VAR; printf %s ${VAR-unset}'
atf_check -s exit:0 -e empty -o inline:set ${TEST_SH} -c \
'unset VAR; readonly VAR=set; printf %s ${VAR-unset}'
atf_check -s exit:0 -e empty -o inline:set ${TEST_SH} -c \
'VAR=initial; readonly VAR=set; printf %s ${VAR-unset}'
atf_check -s not-exit:0 -e not-empty -o empty ${TEST_SH} -c \
'readonly VAR=initial; VAR=new; printf %s "${VAR}"'
# don't check VAR=value, some shells provide meaningless quoting...
atf_check -s exit:0 -e empty -o match:VAR= -o match:foobar \
${TEST_SH} -c \
'VAR=foobar ; readonly VAR ; readonly -p'
atf_check -s exit:0 -e empty -o match:VAR= -o match:foobar \
${TEST_SH} -c \
'readonly VAR=foobar ; readonly -p'
atf_check -s exit:0 -e empty -o match:VAR= -o match:foobar \
-o not-match:badvalue ${TEST_SH} -c \
'VAR=badvalue; readonly VAR=foobar ; readonly -p'
atf_check -s exit:0 -e empty -o match:VAR\$ ${TEST_SH} -c \
'unset VAR ; readonly VAR ; readonly -p'
# checking that readonly -p works (to reset stuff) is a pain...
# particularly since not all shells say "readonly..." by default
atf_check -s exit:0 -e empty -o match:MYVAR= -o match:FOUND=foobar \
${TEST_SH} -c \
'V=$(readonly MYVAR=foobar; readonly -p | grep " MYVAR")
unset MYVAR; eval "$V"; readonly -p;
printf %s\\n FOUND=${MYVAR-unset}'
atf_check -s exit:0 -e empty -o match:MYVAR\$ -o match:FOUND=unset \
${TEST_SH} -c \
'V=$(readonly MYVAR; readonly -p | grep " MYVAR")
unset MYVAR; eval "$V"; readonly -p;
printf %s\\n "FOUND=${MYVAR-unset}"'
atf_check -s exit:0 -e empty -o match:MYVAR= -o match:FOUND=empty \
${TEST_SH} -c \
'V=$(readonly MYVAR=; readonly -p | grep " MYVAR")
unset VAR; eval "$V"; readonly -p;
printf %s\\n "FOUND=${MYVAR-unset&}${MYVAR:-empty}"'
# don't test stderr, some shells inist on generating a message for an
# unset of a readonly var (rather than simply having unset make $?=1)
atf_check -s not-exit:0 -e empty -o empty ${TEST_SH} -c \
'unset VAR; readonly VAR=set;
unset VAR 2>/dev/null && printf %s ${VAR:-XX}'
atf_check -s not-exit:0 -e ignore -o empty ${TEST_SH} -c \
'unset VAR; readonly VAR=set; unset VAR && printf %s ${VAR:-XX}'
atf_check -s exit:0 -e ignore -o inline:set ${TEST_SH} -c \
'unset VAR; readonly VAR=set; unset VAR; printf %s ${VAR-unset}'
}
atf_test_case readonly_nbsd
readonly_nbsd_head() {
atf_set "descr" "Tests NetBSD extensions to 'readonly'"
}
readonly_nbsd_body() {
have_builtin readonly '' 'readonly VAR;' '-q VAR' ' -q' || return 0
atf_check -s exit:1 -o empty -e empty ${TEST_SH} -c \
'VAR=set; readonly -q VAR'
atf_check -s exit:0 -o empty -e empty ${TEST_SH} -c \
'readonly VAR=set; readonly -q VAR'
atf_check -s exit:1 -o empty -e empty ${TEST_SH} -c \
'VAR=set; RW=set; readonly -q VAR RW'
atf_check -s exit:1 -o empty -e empty ${TEST_SH} -c \
'VAR=set; readonly RO=set; readonly -q VAR RO'
atf_check -s exit:0 -o empty -e empty ${TEST_SH} -c \
'readonly VAR=set RO=set; readonly -q VAR RO'
atf_check -s exit:1 -o empty -e empty ${TEST_SH} -c \
'unset VAR; readonly -q VAR'
# next one is the same as the have_builtin test, so "cannot" fail...
atf_check -s exit:0 -o empty -e empty ${TEST_SH} -c \
'unset VAR; readonly VAR; readonly -q VAR'
# if we have -q we should also have -p var...
# What's more, we are testing NetBSD sh, so we know output format.
atf_check -s exit:0 -e empty -o match:VAR=foobar \
${TEST_SH} -c \
'VAR=foobar ; readonly VAR ; readonly -p VAR'
atf_check -s exit:0 -e empty -o inline:1 \
${TEST_SH} -c \
'VAR=foobar ; readonly VAR ;
printf %d $(readonly -p VAR | wc -l)'
atf_check -s exit:0 -e empty \
-o inline:'readonly VAR=foobar\nreadonly OTHER\n' \
${TEST_SH} -c \
'VAR=foobar; readonly VAR OTHER; readonly -p VAR OTHER'
atf_check -s exit:0 -e empty \
-o inline:'readonly A=aaa\nreadonly B\nreadonly D='"''"'\n' \
${TEST_SH} -c \
'A=aaa D= C=foo; unset B; readonly A B D;
readonly -p A B C D'
}
atf_test_case cd_pwd
cd_pwd_head() {
atf_set "descr" "Tests the shell builtins 'cd' & 'pwd'"
}
cd_pwd_body() {
have_builtin cd "" "HOME=/;" || return 0
have_builtin pwd || return 0
}
atf_test_case true_false
true_false_head() {
atf_set "descr" "Tests the 'true' and 'false' shell builtin commands"
}
true_false_body() {
have_builtin true || return 0
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c true
# true is not a special builtin, so errors do not cause exit
# but we should still get an error from the broken redirect
# and the exit status of true should be false...
atf_check -s exit:0 -e not-empty -o inline:OK ${TEST_SH} -c \
"true >/foo/bar && printf %s NOT-; printf %s OK"
# and var-assigns should not affect the current sh env
atf_check -s exit:0 -e empty -o inline:IS-OK ${TEST_SH} -c \
'X=OK; X=BROKEN true && printf %s IS-; printf %s "${X}"'
have_builtin false "" ! || return 0
atf_check -s exit:1 -e empty -o empty ${TEST_SH} -c false
}
atf_test_case type
type_head() {
atf_set "descr" "Tests the sh builtin 'type' command"
}
type_body() {
have_builtin type "" "" type || return 0
}
# This currently has its own t_ulimit - either merge that here,
# or delete this one and keep that... ulimit -n is also tested in
# the t_redir tests, as that affects the shell's use of file descriptors
atf_test_case ulimit
ulimit_head() {
atf_set "descr" "Tests the sh builtin 'ulimit'"
}
ulimit_body() {
have_builtin ulimit || return 0
}
atf_test_case umask
umask_head() {
atf_set "descr" "Tests the sh builtin 'umask'"
}
umask_body() {
have_builtin umask || return 0
atf_require_prog touch
atf_require_prog stat
atf_require_prog rm
atf_require_prog chmod
reset umask
# 8 octal digits
for M in 0 1 2 3 4 5 6 7
do
# Test numbers start: 1 25 49 73 97 121 145 169
# 8 combinations of each to test (64 inner loops)
# 3 tests in each loop, hence 192 subtests in all
# Test numbers from loop above, plus (below) and the next 2
#+ 1 4 7 10 13
for T in "0${M}" "00${M}" "0${M}0" "0${M}00" "0${M}${M}" \
"0${M}${M}0" "0${M}${M}${M}" "0${M}0${M}"
#+ 16 19 22
do
# umask turns bits off, calculate which bits will be on...
D=$(( 0777 & ~ T )) # for directories
F=$(( $D & ~ 0111 )) # and files with no 'x' bits
# Note: $(( )) always produces decimal, so we test that format
# (see '%d' in printf of stat result)
# need chmod or we might have no perm to rmdir TD
{ chmod +rwx TF TFT TD; rm -fr TF TFT TD; } 2>/dev/null || :
# check that the umask applies to files created by the shell
check \
"umask $T; > TF; printf %d \$(stat -nf %#Lp TF)" \
"$F" 0 "$F is $(printf %#o $F)" # 1 4 7 10 ...
# and to files created by commands that the shell runs
check \
"umask $T; touch TFT; printf %d \$(stat -nf %#Lp TFT)" \
"$F" 0 "$F is $(printf %#o $F)" # 2 5 8 11 ...
# and to directories created b ... (directories keep 'x')
check \
"umask $T; mkdir TD; printf %d \$(stat -nf %#Lp TD)" \
"$D" 0 "$D is $(printf %#o $D)" # 3 6 9 12 ...
done
done
# Now add a few more tests with less regular u/g/m masks
# In here, include tests where umask value has no leading '0'
# 10 loops, the same 3 tests in each loop, 30 more subtests
# from 193 .. 222
# 193 196 199 202 205 208 211 214 217 220
for T in 013 047 722 0772 027 123 421 0124 0513 067
do
D=$(( 0777 & ~ 0$T ))
F=$(( $D & ~ 0111 ))
{ chmod +rwx TF TFT TD; rm -fr TF TFT TD; } 2>/dev/null || :
check \
"umask $T; > TF; printf %d \$(stat -nf %#Lp TF)" \
"$F" 0 "$F is $(printf %#o $F)" # +0
check \
"umask $T; touch TFT; printf %d \$(stat -nf %#Lp TFT)" \
"$F" 0 "$F is $(printf %#o $F)" # +1
check \
"umask $T; mkdir TD; printf %d \$(stat -nf %#Lp TD)" \
"$D" 0 "$D is $(printf %#o $D)" # +2
done
results
}
atf_test_case unset
unset_head() {
atf_set "descr" "Tests the sh builtin 'unset'"
}
unset_body() {
have_builtin unset || return 0
}
atf_test_case hash
hash_head() {
atf_set "descr" "Tests the sh builtin 'hash' (ash extension)"
}
hash_body() {
have_builtin hash || return 0
}
atf_test_case jobid
jobid_head() {
atf_set "descr" "Tests sh builtin 'jobid' (NetBSD extension)"
}
jobid_body() {
# have_builtin jobid || return 0 No simple jobid command test
$TEST_SH -c '(exit 0)& jobid $!' >/dev/null 2>&1 || {
atf_skip "${TEST_SH} has no 'jobid' built-in"
return 0
}
}
atf_test_case let
let_head() {
atf_set "descr" "Tests the sh builtin 'let' (common extension from ksh)"
}
let_body() {
have_builtin let "" "" 1 || return 0
}
atf_test_case local
local_head() {
atf_set "descr" "Tests the shell builtin 'local' (common extension)"
}
local_body() {
have_builtin local "" "f () {" "X; }; f" || return 0
}
atf_test_case setvar
setvar_head() {
atf_set "descr" "Tests the shell builtin 'setvar' (BSD extension)"
}
setvar_body() {
have_builtin setvar || return 0
atf_check -s exit:0 -e empty -o inline:foo ${TEST_SH} -c \
'unset PQ && setvar PQ foo; printf %s "${PQ-not set}"'
atf_check -s exit:0 -e empty -o inline:abcd ${TEST_SH} -c \
'for x in a b c d; do setvar "$x" "$x"; done;
printf %s "$a$b$c$d"'
atf_check -s exit:0 -e empty -o empty ${TEST_SH} -c \
'a=1; b=2; c=3; d=4
for x in a b c d; do setvar "$x" ""; done;
printf %s "$a$b$c$d"'
}
atf_test_case fdflags
fdflags_head() {
atf_set "descr" \
"Tests basic operation of sh builtin 'fdflags' (NetBSD extension)"
}
fdflags_body() {
have_builtin fdflags || return 0
}
atf_test_case fdflags__s
fdflags__s_head() {
atf_set "descr" "Checks setting/clearing flags on file descriptors"
}
fdflags__s_body() {
have_builtin fdflags || return 0
}
atf_test_case fdflags__v
fdflags__v_head() {
atf_set "descr" "Checks verbose operation of fdflags"
}
fdflags__v_body() {
have_builtin fdflags || return 0
}
atf_test_case fdflags__v_s
fdflags__v_s_head() {
atf_set "descr" "tests verbose operation of fdflags -s"
}
fdflags__v_s_body() {
have_builtin fdflags || return 0
}
atf_test_case fdflags_multiple_fd
fdflags_multiple_fd_head() {
atf_set "descr" "Checks operation of fdflags with more than one fd"
}
fdflags_multiple_fd_body() {
have_builtin fdflags || return 0
}
atf_test_case fdflags_one_flag_at_a_time
fdflags_one_flag_at_a_time_head() {
atf_set "descr" "Tests all possible fdflags flags, and combinations"
}
fdflags_one_flag_at_a_time_body() {
have_builtin fdflags || return 0
}
atf_test_case fdflags_save_restore
fdflags_save_restore_head() {
atf_set "descr" 'Verify that fd flags can be saved and restored'
}
fdflags_save_restore_body() {
have_builtin fdflags || return 0
}
atf_test_case fdflags_names_abbreviated
fdflags_names_abbreviated_head() {
atf_set "descr" 'Tests using abbreviated names for fdflags'
}
fdflags_names_abbreviated_body() {
have_builtin fdflags || return 0
}
atf_test_case fdflags_xx_errors
fdflags_xx_errors_head() {
atf_set "descr" 'Check various erroneous fdflags uses'
}
fdflags_xx_errors_body() {
have_builtin fdflags || return 0
}
atf_init_test_cases() {
# "standard" builtin commands in sh
# no tests of the "very special" (almost syntax) builtins
# (break/continue/return) - they're tested enough elsewhere
atf_add_test_case cd_pwd
atf_add_test_case colon
atf_add_test_case echo
atf_add_test_case eval
atf_add_test_case exec
atf_add_test_case export
atf_add_test_case getopts
atf_add_test_case jobs
atf_add_test_case read
atf_add_test_case readonly
atf_add_test_case true_false
atf_add_test_case type
atf_add_test_case ulimit
atf_add_test_case umask
atf_add_test_case unset
# exit/wait/set/shift/trap/alias/unalias/. should have their own tests
# fc/times/fg/bg/% are too messy to contemplate for now
# command ?? (probably should have some tests)
# Note that builtin versions of, printf, kill, ... are tested separately
# (these are all "optional" builtins)
# (echo is tested here because NetBSD sh builtin echo and /bin/echo
# are different)
atf_add_test_case export_nbsd
atf_add_test_case hash
atf_add_test_case jobid
atf_add_test_case let
atf_add_test_case local
atf_add_test_case readonly_nbsd
atf_add_test_case setvar
# inputrc should probably be tested in libedit tests (somehow)
# fdflags has a bunch of test cases
# Always run one test, so we get at least "skipped" result
atf_add_test_case fdflags
# but no need to say "skipped" lots more times...
have_builtin fdflags available && {
atf_add_test_case fdflags__s
atf_add_test_case fdflags__v
atf_add_test_case fdflags__v_s
atf_add_test_case fdflags_multiple_fd
atf_add_test_case fdflags_names_abbreviated
atf_add_test_case fdflags_one_flag_at_a_time
atf_add_test_case fdflags_save_restore
atf_add_test_case fdflags_xx_errors
}
return 0
}
| true
|
ea15cfe4fda7c0476522b03a7da4c2bb930c994c
|
Shell
|
litong01/developernet
|
/onvm/scripts/reset/setup-storage.sh
|
UTF-8
| 538
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# $1 sys_password
# $2 public ip eth0
# $3 private ip eth1
source /onvm/scripts/ini-config
eval $(parse_yaml '/onvm/conf/nodes.conf.yml' 'leap_')
mkdir -p /storage
sp=$(lvdisplay | grep /dev/vg02/storage)
if [ ! "$sp" ];then
echo 'Ready to create storage'
lvcreate -l 100%FREE -n storage vg02
mkfs -t ext4 /dev/vg02/storage
fi
sp=$(mount | grep /storage)
if [ ! "$sp" ]; then
mount /dev/vg02/storage /storage/
echo '/dev/mapper/vg02-storage /storage ext4 errors=continue 0 0' >> /etc/fstab
fi
| true
|
1e8adf99b233ac11b9cdb3c1c24f42d21772caac
|
Shell
|
omni360/CfgMgmt
|
/cookbooks/typescript/templates/default/bash_profile.erb
|
UTF-8
| 387
| 2.578125
| 3
|
[] |
no_license
|
# vi: set ft=sh :
# Let's have a nicer prompt:
PS1='\[\e[0;32m\]\h: \W\[\e[0m\] '
# Enable colors for ls
alias ls="ls --color"
alias ll="ls -l --color"
# Colorful git!
git config --global color.diff auto
git config --global color.status auto
git config --global color.branch auto
# Env var for TypeScript built-in defs
export TS_LIB=/usr/local/lib/node_modules/typescript/bin/lib.d.ts
| true
|
6da557e6f95086eff11481086c322b80a7a8af5d
|
Shell
|
peteramerico/1clean-git
|
/drv.sh
|
UTF-8
| 2,626
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
estagio=1
#: '
iteracao=1
mkdir $iteracao
cp -r ./1clean/* ./$iteracao
cd ./$iteracao/Migracao/migrando/
./Rayt2d
./MigPre
./junta.sh
#testando para descobrir se o numero de tracos no arquivo gerado tem o numero correto de 30900
surange<data_psdm.su >range.txt
range="./range.txt"
ntr="$(awk 'NR==1 {print; exit}' $range)"
traco="30900 traces:"
while [ "$ntr" != "$traco" ]
do
./junta.sh
rm range.txt
surange<data_psdm.su >range.txt
range="./range.txt"
ntr="$(awk 'NR==1 {print; exit}' $range)"
echo $ntr
#if [ "$ntr" != "$traco" ]
#then
#echo $traco
#echo $ntr
#fi
done
cp data_psdm.su ../../CIGCONT
cd ../../CIGCONT/
scons
cd ../Anavel
cp ../CIGCONT/*cigcont*.rsf .
cp *cigcont*.rsf ./picks1
cd ./picks1
scons
matlab_R2014b -r "try matDRV($iteracao,$estagio); catch; end; quit"
file="./estagio.txt"
estagio=$(cat "$file")
echo $estagio
cd ../../..
#'
iteracao=2
gedit OKusingdbscan.txt
gedit OKmatDRV.txt
#while [ "$estagio" -le "4" ]
while [ "$estagio" -le "3" ]
do
rm OKusingdbscan.txt
rm OKmatDRV.txt
mkdir $iteracao
cp -r ./1clean/* ./$iteracao
cp -r ./$(( $iteracao - 1 ))/Anavel/picks1/next/vana-smooth30/vana01_nearest_smooth.bin ./$iteracao
cp -r ./$(( $iteracao - 1 ))/Anavel/picks1/next/vana-smooth30/vana01_nearest_smooth.bin ./$iteracao/Migracao
cp -r ./$(( $iteracao - 1 ))/Anavel/picks1/next/vana-smooth30/vana01_nearest_smooth.bin ./$iteracao/Migracao/migrando
cp -r ./$(( $iteracao - 1 ))/Anavel/picks1/next/* ./$iteracao/Anavel/picks1
cd ./$iteracao/Migracao/migrando/
./Rayt2d
./MigPre
./junta.sh
#testando para descobrir se o numero de tracos no arquivo gerado tem o numero correto de 30900
surange<data_psdm.su >range.txt
range="./range.txt"
ntr="$(awk 'NR==1 {print; exit}' $range)"
traco="30900 traces:"
while [ "$ntr" != "$traco" ]
do
./junta.sh
rm range.txt
surange<data_psdm.su >range.txt
range="./range.txt"
ntr="$(awk 'NR==1 {print; exit}' $range)"
echo $ntr
#if [ "$ntr" != "$traco" ]
#then
#echo $traco
#echo $ntr
#fi
done
cp data_psdm.su ../../CIGCONT
cd ../../CIGCONT/
scons
cd ../Anavel
cp ../CIGCONT/*cigcont*.rsf .
cp *cigcont*.rsf ./picks1
cd ./picks1
scons
matlab_R2014b -r "try matDRV($iteracao,$estagio); catch; end; quit"
#matlab_R2014 -r -nodisplay -nojvm "try matDRV($iteracao,$estagio); catch; end; quit"
file="./estagio.txt"
estagio=$(cat "$file")
echo $estagio
cd ../../..
usingdb="OKusingdbscan.txt"
if [ -f "$usingdb" ]
then
echo "$usingdb found"
else
break
fi
matDRV="OKmatDRV.txt"
if [ -f "$matDRV" ]
then
echo "$matDRV found"
else
break
fi
iteracao=$(( $iteracao + 1 ))
done
| true
|
457d71236f0335efbd7cc54f102d1add0c13b683
|
Shell
|
BackupTheBerlios/dss-svn
|
/dss-main/detcskel/etc/xdg/xfce4/xinit.d/xfwm4.setup
|
UTF-8
| 2,564
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Copyright (c) 2004-2005 os-cillation
#
# set margins defaults
if test ! -f $XFCE4HOME/mcs_settings/margins.xml; then
cat > $XFCE4HOME/mcs_settings/margins.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mcs-option SYSTEM "mcs-option.dtd">
<mcs-option>
<option name="Xfwm/BottomMargin" type="int" value="0"/>
<option name="Xfwm/LeftMargin" type="int" value="0"/>
<option name="Xfwm/RightMargin" type="int" value="0"/>
<option name="Xfwm/TopMargin" type="int" value="0"/>
</mcs-option>
EOF
fi
# set workspace defaults
if test ! -f $XFCE4HOME/mcs_settings/workspaces.xml; then
if echo "$LANG" | grep '^de' >/dev/null 2>&1; then
cat > $XFCE4HOME/mcs_settings/workspaces.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mcs-option SYSTEM "mcs-option.dtd">
<mcs-option>
<option name="Xfwm/WorkspaceCount" type="int" value="4" />
<option name="names" type="string" value="Eins;Zwei;Drei;Vier" />
</mcs-option>
EOF
else
cat > $XFCE4HOME/mcs_settings/workspaces.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mcs-option SYSTEM "mcs-option.dtd">
<mcs-option>
<option name="Xfwm/WorkspaceCount" type="int" value="4"/>
<option name="names" type="string" value="1;2;3;4"/>
</mcs-option>
EOF
fi
fi
# set xfwm4 defaults
if test ! -f $XFCE4HOME/mcs_settings/xfwm4.xml; then
cat > $XFCE4HOME/mcs_settings/xfwm4.xml <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mcs-option SYSTEM "mcs-option.dtd">
<mcs-option>
<option name="Xfwm/BoxMove" type="int" value="0"/>
<option name="Xfwm/BoxResize" type="int" value="0"/>
<option name="Xfwm/ButtonLayout" type="string" value="OTS|HMC"/>
<option name="Xfwm/ClickToFocus" type="int" value="1"/>
<option name="Xfwm/DblClickAction" type="string" value="maximize"/>
<option name="Xfwm/FocusNewWindow" type="int" value="1"/>
<option name="Xfwm/FocusRaise" type="int" value="0"/>
<option name="Xfwm/RaiseDelay" type="int" value="250"/>
<option name="Xfwm/RaiseOnClick" type="int" value="1"/>
<option name="Xfwm/SnapToBorder" type="int" value="1"/>
<option name="Xfwm/SnapToWindows" type="int" value="0"/>
<option name="Xfwm/SnapWidth" type="int" value="10"/>
<option name="Xfwm/ThemeName" type="string" value="Clearlooks-DSS"/>
<option name="Xfwm/TitleAlign" type="string" value="left"/>
<option name="Xfwm/TitleFont" type="string" value="Dingbats 9"/>
<option name="Xfwm/WrapResistance" type="int" value="10"/>
<option name="Xfwm/WrapWindows" type="int" value="1"/>
<option name="Xfwm/WrapWorkspaces" type="int" value="0"/>
</mcs-option>
EOF
fi
| true
|
84ec1cfea181d57813b57409fc5707a80a04a770
|
Shell
|
strigazi/athena
|
/DataQuality/DataQualityUtils/scripts/DeMoDaemon.exe
|
UTF-8
| 1,544
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (C) 2002-2018 CERN for the benefit of the ATLAS collaboration
# Author : Benjamin Trocme (LPSC - Grenoble) - 2017
# Daemon job to update daily the DQ stats
# Arguments:
# -$1 : directory when to run the daemon (a priori ~atlasdqm/w1/DeMo
# -$2 : DeMo year
# -$3 : DeMo tag
##################################################################
date
cd $1
export AtlasSetup=/afs/cern.ch/atlas/software/dist/AtlasSetup
source $AtlasSetup/scripts/asetup.sh 21.0.66,Athena,gcc62
echo "First, looking for new runs..."
python DeMoUpdate.py --runListUpdate
systems='Pixel SCT TRT LAr Tile MDT TGC RPC CSC IDGlobal BTag CaloCP MuonCP Trig_L1 Trig_HLT'
for system in $systems
do
echo "====================================================================================="
echo "====================================================================================="
echo "Processing "$system
echo "====================================================================================="
echo "====================================================================================="
python DeMoUpdate.py -b -y $2 -t $3 -s $system --weekly --vetoLumiEvol &> YearStats-$system/daemon-weekly.out
python DeMoUpdate.py -b -y $2 -t $3 -s $system --grlUpdate &> YearStats-$system/daemon-GRL.out
python DeMoStatus.py -y $2 -t $3 -s $system --savePlots &> YearStats-$system/daemon-GRL-2.out
python DeMoScan.py -y $2 -t $3 -s $system --recapDefects &> YearStats-$system/daemon-recapDefects.out
done
/afs/cern.ch/user/a/atlasdqm/www/DeMo/generate.exe
| true
|
633bdca33cab665e230fdd86f5a6f260c1a4cb09
|
Shell
|
raku1414/iroiro
|
/mastodon/02_systemd_and_nginx_config.sh
|
UTF-8
| 2,197
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
edit_user="mastodon"
exec_user="mastodonx"
user_home=$(sudo -iu $user pwd)
node_env=$(sudo -iu $user eval 'nvm which 6 | sed -E s@/bin/node@/bin@g | cat')
mastodon_domain="mstdn.meltytempo.tk"
mastodon_public_dir="$home_for_user/mastodon/public"
mastodon_crt="/etc/letsencrypt/live/mstdn.meltytempo.tk/fullchain.pem"
mastodon_key="/etc/letsencrypt/live/mstdn.meltytempo.tk/privkey.pem"
#sudo mkdir -p /etc/nginx/ssl
cat << EOF | sudo tee /etc/systemd/system/mastodon-web.service
[Unit]
Description=mastodon-web
After=network.target
[Service]
Type=simple
User=$user_for_run
WorkingDirectory=$home_for_user/mastodon
Environment="RAILS_ENV=production"
Environment="PORT=3000"
Environment=PATH=$node_env:$home_for_user/.rbenv/shims:$home_for_user/.rbenv/bin:/usr/local/bin:/usr/bin:/bin
ExecStart=$home_for_user/.rbenv/shims/bundle exec puma -C config/puma.rb
TimeoutSec=15
Restart=always
EOF
cat << EOF | sudo tee /etc/systemd/system/mastodon-sidekiq.service
[Unit]
Description=mastodon-sidekiq
After=network.target
[Service]
Type=simple
User=$user_for_run
WorkingDirectory=$home_for_user/mastodon
Environment="RAILS_ENV=production"
Environment="DB_POOL=5"
Environment=PATH=$node_env:$home_for_user/.rbenv/shims:$home_for_user/.rbenv/bin:/usr/local/bin:/usr/bin:/bin
ExecStart=$home_for_user/.rbenv/shims/bundle exec sidekiq -c 5 -q default -q mailers -q pull -q push
TimeoutSec=15
Restart=always
[Install]
WantedBy=multi-user.target
EOF
cat << EOF | sudo tee /etc/systemd/system/mastodon-streaming.service
[Unit]
Description=mastodon-streaming
After=network.target
[Service]
Type=simple
User=$user_for_run
WorkingDirectory=$home_for_user/mastodon
Environment="NODE_ENV=production"
Environment="PORT=4000"
ExecStart=$home_for_user/.nvm/nvm-exec npm run start
TimeoutSec=15
Restart=always
[Install]
WantedBy=multi-user.target
EOF
sudo systemctl daemon-reload
sed -e "s@mastodon_domain@$mastodon_domain@g" \
-e "s@mastodon_public_dir@$mastodon_public_dir@g" \
-e "s@mastodon_crt@$mastodon_crt@g" \
-e "s@mastodon_key@$mastodon_key@g" nginx_mastodon_template.conf > nginx_mastodon.conf
sudo mkdir -p /etc/nginx/conf.d/
sudo cp nginx_mastodon.conf /etc/nginx/conf.d/
| true
|
49407cebbb391d6bd8270d705b137cb14f9ee33d
|
Shell
|
andre-wojtowicz/uci-ml-to-r
|
/s4-make-release.sh
|
UTF-8
| 447
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
OUT_ZIP_FILE="data-collection.zip"
rm -f $OUT_ZIP_FILE
zip $OUT_ZIP_FILE $(find data-collection/*/preprocessed/*.rds)
for f in $(find data-collection/*/preprocessed/*.rds) ; do
dataset_name=$(echo "$f" | sed -e 's/data-collection\/\(.*\)\/preprocessed\/.*\.rds/\1/')
echo "Renaming $f -> $dataset_name.rds"
# https://stackoverflow.com/a/16710654
printf "@ $f\n@=$dataset_name.rds\n" | zipnote -w $OUT_ZIP_FILE
done
| true
|
4579d4f1c3499a65202d4df78a8853dc115eaaa4
|
Shell
|
utisam/ready
|
/lib/ready/check_eval.sh
|
UTF-8
| 279
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
[[ -n "$__READY_CHECK_EVAL_SH" ]] && return || readonly __READY_CHECK_EVAL_SH=1
source $(dirname $BASH_SOURCE)/output.sh
check_eval() {
out_title "Eval: $*"
if eval "$*"; then
out_ok
return 0
else
out_ng
return 1
fi
}
| true
|
b44413ecfab4cbf3416fb48210fe6c966a27c5b2
|
Shell
|
VinzSpring/docker-esp8266-micropython
|
/docker-entrypoint.sh
|
UTF-8
| 289
| 2.953125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
build_firmware() {
cd /micropython && make -C mpy-cross/ V=1
cd ports/esp8266 && make V=1
}
case "$1" in
"unix")
exec /micropython/ports/unix/micropython
;;
"build")
build_firmware
;;
*)
echo "test"
/sbin/my_init &
eval "$@"
;;
esac
| true
|
245e89d32b6efafd74246c8281b478f8c7885195
|
Shell
|
shewey/exploit-db
|
/platforms/linux/remote/20690.sh
|
UTF-8
| 653
| 2.6875
| 3
|
[] |
no_license
|
source: http://www.securityfocus.com/bid/2496/info
Many FTP servers are vulnerable to a denial of service condition resulting from poor globbing algorithms and user resource usage limits.
Globbing generates pathnames from file name patterns used by the shell, eg. wildcards denoted by * and ?, multiple choices denoted by {}, etc.
The vulnerable FTP servers can be exploited to exhaust system resources if per-user resource usage controls have not been implemented.
#!/bin/bash=20
ftp -n FTP-SERVER<<\end=20
quot user anonymous
bin
quot pass shitold@bug.com
ls /../*/../*/../*/../*/../*/../*/../*/../*/../*/../*/../*/../*/../*
bye=20
end=20
| true
|
8de57ba1f95d12269d150e45680e8982679e2a6c
|
Shell
|
lookback/lookbook
|
/scripts/watch
|
UTF-8
| 358
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eo pipefail
source "$(dirname "$0")/config.sh"
LOOKBOOK_ENV=${NODE_ENV:-development}
# Emit non-minified into dist/<file>.css
# Meant for being included in other projects, before minification and source mapping
NODE_ENV=$LOOKBOOK_ENV npx postcss $LOOKBOOK_INPUT_FILE \
--watch \
--verbose \
--no-map \
--dir $LOOKBOOK_DIR
| true
|
d1d375fb9492c6faa2f345faf486e82d61ef8c0c
|
Shell
|
codeteenager/lynx-native
|
/Core/build/prebuild.sh
|
UTF-8
| 515
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CURRENT_PATH=$(cd `dirname $0`; pwd)
ROOT_LYNX_JAVA_PATH=$CURRENT_PATH"/../../Android/sdk/src/main/java/"
LYNX_OUTPUT_DIR=$CURRENT_PATH"/../gen/"
LYNX_GEN_FILE=$CURRENT_PATH"/jni_generator.py"
while read line
do
file_name=${line##*/}
jni_file_name=${file_name%.*}"_jni.h"
input_file=$ROOT_LYNX_JAVA_PATH$line
output_file=$LYNX_OUTPUT_DIR$jni_file_name
python $LYNX_GEN_FILE $input_file $output_file
echo "python $LYNX_GEN_FILE $input_file $output_file"
done < $CURRENT_PATH"/jni_files"
| true
|
23ec505dd7620d6a783bc867c823640eab3fc4ec
|
Shell
|
aur-archive/gpsd
|
/PKGBUILD
|
UTF-8
| 2,972
| 2.75
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 135847 2011-08-19 16:38:40Z andrea $
# Maintainer: Andrea Scarpino <andrea@archlinux.org>
# Contributor: Sergej Pupykin <pupykin.s+arch@gmail.com>
# Contributor: dibblethewrecker dibblethewrecker.at.jiwe.dot.org
# Contributor: Giacomo Rizzo <alt@free-os.it>
pkgname=gpsd
pkgver=3.0
pkgrel=1
pkgdesc="GPS daemon and library to support USB/serial GPS devices"
arch=('i686' 'x86_64')
url="http://gpsd.berlios.de"
license=('BSD')
depends=('python2' 'libusb' 'bluez' 'desktop-file-utils')
optdepends=('php: generate a PHP status page for your GPS'
'php-gd: image support for the PHP status page')
makedepends=('scons' 'docbook-xsl' 'chrpath')
backup=('etc/conf.d/gpsd' 'lib/udev/rules.d/99-gpsd-usb.rules')
options=('!libtool' '!buildflags')
install="${pkgname}.install"
source=("http://download.berlios.de/${pkgname}/${pkgname}-${pkgver}.tar.gz"
'gpsd')
md5sums=('c63d41a26868e9bdd48d9e311a9cc42c'
'e287d4b34a4eb1da27f12533ae9b6dd5')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
# fix python 2.7 path
sed -i -e "s|#![ ]*/usr/bin/python$|#!/usr/bin/python2|" \
-e "s|#![ ]*/usr/bin/env python$|#!/usr/bin/env python2|" \
$(find . -name '*.py')
sed -i 's|/usr/bin/env python|/usr/bin/env python2|' gegps gpscat gpsfake \
gpsprof xgps xgpsspeed
scons prefix=/usr \
systemd=yes \
libQgpsmm=no \
PYTHONPATH=/usr/bin/python2
scons build
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
export DESTDIR="${pkgdir}"
scons install
# Our own udev-install since the Makefile uses absolute paths
# Original file names are [mostly] unmodified: useful to match process name in case of error
# Following the switch from hotplug to udev helper scripts now live in /lib/udev/ instead of /etc/hotplug/
sed -i 's|GPSD_OPTIONS=""|GPSD_OPTIONS="-P /var/run/gpsd/gpsd.pid"|' packaging/deb/etc_default_gpsd
sed -i 's|"/var/run/gpsd.sock"|"/var/run/gpsd/gpsd.sock"|' packaging/deb/etc_default_gpsd
install -D -m644 "packaging/deb/etc_default_gpsd" "${pkgdir}/etc/conf.d/gpsd"
install -D -m644 "gpsd.rules" "${pkgdir}/lib/udev/rules.d/99-gpsd-usb.rules"
sed -i 's|/etc/default/gpsd|/etc/conf.d/gpsd|' gpsd.hotplug
install -D -m755 gpsd.hotplug "${pkgdir}/lib/udev/gpsd.hotplug"
# GPSD needs RPATH
chrpath -r /usr/lib/ "${pkgdir}"/usr/lib/libgps{,d}.so.20.0.0
chrpath -r /usr/lib/ "${pkgdir}"/usr/bin/{gpsdecode,gpsctl,gpspipe,gpxlogger,lcdgps}
chrpath -r /usr/lib/ "${pkgdir}"/usr/sbin/{gpsd,gpsdctl}
install -D -m644 packaging/X11/xgps.desktop \
"${pkgdir}/usr/share/applications/xgps.desktop"
install -D -m644 packaging/X11/xgpsspeed.desktop \
"${pkgdir}/usr/share/applications/xgpsspeed.desktop"
install -D -m644 packaging/X11/gpsd-logo.png \
"${pkgdir}/usr/share/gpsd/gpsd-logo.png"
install -D -m755 "${srcdir}/gpsd" "${pkgdir}/etc/rc.d/gpsd"
install -D -m644 COPYING "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
md5sums=('c63d41a26868e9bdd48d9e311a9cc42c'
'0d5879df32833ca67a5491ce1ff98dcc')
| true
|
5ebf9588b80feb4e5942a618d73b04a96d7599e7
|
Shell
|
henrybear327/cloudsuite-tuning
|
/data-serving/run.sh
|
UTF-8
| 852
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
source ../common/safeguard
source main_func
rm_all_containers
create_network
(($LOAD)) && start_server
(($LOAD)) && detect_stage server-ready
start_client
(($LOAD)) && load_server
warmup_server
while read OPERATIONS; do
TARGET="$((OPERATIONS * MULTIPLIER))"
docker stats >> $UTIL_LOG &
sudo perf stat -e $PERF_EVENTS --cpu $SERVER_CPUS -p ${SERVER_PID} sleep infinity 2>>$PERF_LOG &
(docker exec $CLIENT_CONTAINER bash -c "/ycsb/bin/ycsb run cassandra-cql -p hosts=$SERVER_CONTAINER -P /ycsb/workloads/workloadb -s -threads $THREADS -p operationcount=$TARGET -p recordcount=$RECORDS")>>$CLIENT_LOG &
CLIENT_PID=$!
wait $CLIENT_PID
sudo pkill -f "docker stats"
sudo pkill -fx "sleep infinity"
done < $OPERATIONS_FILE
mv $OPERATIONS_FILE $OUT/operations.txt
cp user.cfg $OUT/user.cfg
log_folder
| true
|
42123337fc16d5ffbe099e2cd754ec1ea599db15
|
Shell
|
samucafreitas/dotfiles
|
/.msf
|
UTF-8
| 30,528
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# _____
# _____ _______/ ____\
# / \ / ___/\ __\
# | Y Y \\___ \ | |
# /\ |__|_| /____ > |__|
# \/ \/ \/
#
# Uses various functions from screenfetch -> https://github.com/KittyKatt/screenFetch
# Functions: {detectdistro, detectmem, detectwm,
# detectcpu, detectgpu, detectpkgs,
# detectshell, detectuptime, detectres}
#
# File : .msf
# Date : 25 nov 2016
# Last Modified Date: 17 feb 2018
# Last Modified By : Sam Uel <samucaof42@gmail.com>
# Distro Detection - Begin
detectdistro () {
if [[ -z "${distro}" ]]; then
distro="Unknown"
# LSB Release Check
if type -p lsb_release >/dev/null 2>&1; then
# read distro_detect distro_release distro_codename <<< $(lsb_release -sirc)
distro_detect=( $(lsb_release -sirc) )
if [[ ${#distro_detect[@]} -eq 3 ]]; then
distro_codename=${distro_detect[2]}
distro_release=${distro_detect[1]}
distro_detect=${distro_detect[0]}
else
for ((i=0; i<${#distro_detect[@]}; i++)); do
if [[ ${distro_detect[$i]} =~ ^[[:digit:]]+((.[[:digit:]]+|[[:digit:]]+|)+)$ ]]; then
distro_release=${distro_detect[$i]}
distro_codename=${distro_detect[@]:$(($i+1)):${#distro_detect[@]}+1}
distro_detect=${distro_detect[@]:0:${i}}
break 1
elif [[ ${distro_detect[$i]} =~ [Nn]/[Aa] || ${distro_detect[$i]} == "rolling" ]]; then
distro_release=${distro_detect[$i]}
distro_codename=${distro_detect[@]:$(($i+1)):${#distro_detect[@]}+1}
distro_detect=${distro_detect[@]:0:${i}}
break 1
fi
done
fi
if [[ "${distro_detect}" == "archlinux" || "${distro_detect}" == "Arch Linux" || "${distro_detect}" == "arch" || "${distro_detect}" == "Arch" || "${distro_detect}" == "archarm" ]]; then
distro="Arch Linux"
distro_release="n/a"
if grep -q 'antergos' /etc/os-release; then
distro="Antergos"
distro_release="n/a"
fi
elif [[ "${distro_detect}" == "Chakra" ]]; then
distro="Chakra"
distro_release=""
elif [[ "${distro_detect}" == "CentOS" ]]; then
distro="CentOS"
elif [[ "${distro_detect}" == "Debian" ]]; then
if [[ -f /etc/crunchbang-lsb-release || -f /etc/lsb-release-crunchbang ]]; then
distro="CrunchBang"
distro_release=$(awk -F'=' '/^DISTRIB_RELEASE=/ {print $2}' /etc/lsb-release-crunchbang)
distro_codename=$(awk -F'=' '/^DISTRIB_DESCRIPTION=/ {print $2}' /etc/lsb-release-crunchbang)
elif [[ -f /etc/os-release ]]; then
if [[ "$(cat /etc/os-release)" =~ "Raspbian" ]]; then
distro="Raspbian"
distro_release=$(awk -F'=' '/^PRETTY_NAME=/ {print $2}' /etc/os-release)
else
distro="Debian"
fi
else
distro="Debian"
fi
elif [[ "${distro_detect}" == "elementary" || "${distro_detect}" == "elementary OS" ]]; then
distro="elementary OS"
elif [[ "${distro_detect}" == "EvolveOS" ]]; then
distro="Evolve OS"
elif [[ "${distro_detect}" == "KaOS" || "${distro_detect}" == "kaos" ]]; then
distro="KaOS"
elif [[ "${distro_detect}" == "Fedora" ]]; then
distro="Fedora"
elif [[ "${distro_detect}" == "frugalware" ]]; then
distro="Frugalware"
distro_codename=null
distro_release=null
elif [[ "${distro_detect}" == "Fuduntu" ]]; then
distro="Fuduntu"
distro_codename=null
elif [[ "${distro_detect}" == "Gentoo" ]]; then
if [[ "$(lsb_release -sd)" =~ "Funtoo" ]]; then
distro="Funtoo"
else
distro="Gentoo"
fi
elif [[ "${distro_detect}" == "Jiyuu Linux" ]]; then
distro="Jiyuu Linux"
elif [[ "${distro_detect}" == "LinuxDeepin" ]]; then
distro="LinuxDeepin"
distro_codename=null
elif [[ "${distro_detect}" == "Deepin" ]]; then
distro="Deepin"
elif [[ "${distro_detect}" == "Debian Kali Linux" ]]; then
distro="Kali Linux"
elif [[ "${distro_detect}" == "Korora" ]]; then
distro="Korora"
elif [[ "${distro_detect}" == "Mageia" ]]; then
distro="Mageia"
elif [[ "$distro_detect" == "MandrivaLinux" ]]; then
distro="Mandriva"
if [[ "${distro_codename}" == "turtle" ]]; then
distro="Mandriva-${distro_release}"
distro_codename=null
elif [[ "${distro_codename}" == "Henry_Farman" ]]; then
distro="Mandriva-${distro_release}"
distro_codename=null
elif [[ "${distro_codename}" == "Farman" ]]; then
distro="Mandriva-${distro_release}"
distro_codename=null
elif [[ "${distro_codename}" == "Adelie" ]]; then
distro="Mandriva-${distro_release}"
distro_codename=null
elif [[ "${distro_codename}" == "pauillac" ]]; then
distro="Mandriva-${distro_release}"
distro_codename=null
fi
elif [[ "${distro_detect}" == "ManjaroLinux" ]]; then
distro="Manjaro"
elif [[ "${distro_detect}" == "LinuxMint" ]]; then
distro="Mint"
if [[ "${distro_codename}" == "debian" ]]; then
distro="LMDE"
distro_codename="n/a"
distro_release="n/a"
fi
elif [[ "${distro_detect}" == "SUSE LINUX" || "${distro_detect}" == "openSUSE project" ]]; then
distro="openSUSE"
elif [[ "${distro_detect}" == "Parabola GNU/Linux-libre" || "${distro_detect}" == "Parabola" ]]; then
distro="Parabola GNU/Linux-libre"
distro_codename="n/a"
distro_release="n/a"
elif [[ "${distro_detect}" == "Peppermint" ]]; then
distro="Peppermint"
distro_codename=null
elif [[ "${distro_detect}" == "CentOS" || "${distro_detect}" =~ "RedHatEnterprise" ]]; then
distro="Red Hat Enterprise Linux"
elif [[ "${distro_detect}" == "Sabayon" ]]; then
distro="Sabayon"
elif [[ "${distro_detect}" == "SolusOS" ]]; then
distro="SolusOS"
elif [[ "${distro_detect}" == "Trisquel" ]]; then
distro="Trisquel"
elif [[ "${distro_detect}" == "Ubuntu" ]]; then
distro="Ubuntu"
elif [[ "${distro_detect}" == "Viperr" ]]; then
distro="Viperr"
distro_codename=null
fi
if [[ -n ${distro_release} && ${distro_release} != "n/a" ]]; then distro_more="$distro_release"; fi
if [[ -n ${distro_codename} && ${distro_codename} != "n/a" ]]; then distro_more="$distro_more $distro_codename"; fi
if [[ -n ${distro_more} ]]; then
distro_more="${distro} ${distro_more}"
fi
fi
# Existing File Check
if [ "$distro" == "Unknown" ]; then
if [ $(uname -o 2>/dev/null) ]; then
if [ "$(uname -o)" == "Cygwin" ]; then distro="Cygwin"; fake_distro="${distro}"; fi
fi
if [ -f /etc/os-release ]; then
distrib_id=$(</etc/os-release);
for l in $(echo $distrib_id); do
if [[ ${l} =~ ^ID= ]]; then
distrib_id=${l//*=}
distrib_id=${distrib_id//\"/}
break 1
fi
done
if [[ -n ${distrib_id} ]]; then
if [[ -n ${BASH_VERSINFO} && ${BASH_VERSINFO} -ge 4 ]]; then
distrib_id=$(for i in ${distrib_id}; do echo -n "${i^} "; done)
distro=${distrib_id% }
unset distrib_id
else
distrib_id=$(for i in ${distrib_id}; do FIRST_LETTER=$(echo -n "${i:0:1}" | tr "[:lower:]" "[:upper:]"); echo -n "${FIRST_LETTER}${i:1} "; done)
distro=${distrib_id% }
unset distrib_id
fi
fi
# Hotfixes
[[ "${distro}" == "Void" || "${distro}" == "void" ]] && distro="Void"
[[ "${distro}" == "evolveos" ]] && distro="Evolve OS"
[[ "${distro}" == "antergos" || "${distro}" == "Antergos" ]] && distro="Antergos"
[[ "${distro}" == "Arch" ]] && distro="Arch Linux"
[[ "${distro}" == "Archarm" || "${distro}" == "archarm" ]] && distro="Arch Linux"
[[ "${distro}" == "elementary" ]] && distro="elementary OS"
fi
if [[ "${distro}" == "Unknown" ]]; then
if [[ "${OSTYPE}" == "linux-gnu" || "${OSTYPE}" == "linux" ]]; then
if [ -f /etc/lsb-release ]; then
LSB_RELEASE=$(</etc/lsb-release)
distro=$(echo ${LSB_RELEASE} | awk 'BEGIN {
distro = "Unknown"
}
{
if ($0 ~ /[Uu][Bb][Uu][Nn][Tt][Uu]/) {
distro = "Ubuntu"
exit
}
else if ($0 ~ /[Mm][Ii][Nn][Tt]/ && $0 ~ /[Dd][Ee][Bb][Ii][Aa][Nn]/) {
distro = "LMDE"
exit
}
else if ($0 ~ /[Mm][Ii][Nn][Tt]/) {
distro = "Mint"
exit
}
} END {
print distro
}')
fi
fi
fi
if [[ "${distro}" == "Unknown" ]]; then
if [[ "${OSTYPE}" == "linux-gnu" || "${OSTYPE}" == "linux" ]]; then
if [ -f /etc/arch-release ]; then distro="Arch Linux"
elif [ -f /etc/chakra-release ]; then distro="Chakra"
elif [ -f /etc/crunchbang-lsb-release ]; then distro="CrunchBang"
elif [ -f /etc/debian_version ]; then distro="Debian"
elif [ -f /etc/evolveos-release ]; then distro="Evolve OS"
elif [ -f /etc/fedora-release ] && grep -q "Fedora" /etc/fedora-release; then distro="Fedora"
elif [ -f /etc/fedora-release ] && grep -q "Korora" /etc/fedora-release; then distro="Korora"
elif [ -f /etc/frugalware-release ]; then distro="Frugalware"
elif [ -f /etc/gentoo-release ]; then
if grep -q "Funtoo" /etc/gentoo-release ; then
distro="Funtoo"
else
distro="Gentoo"
fi
elif [ -f /etc/mageia-release ]; then distro="Mageia"
elif [ -f /etc/mandrake-release ]; then distro="Mandrake"
elif [ -f /etc/mandriva-release ]; then distro="Mandriva"
elif [ -f /etc/SuSE-release ]; then distro="openSUSE"
elif [ -f /etc/redhat-release ] && grep -q "Red Hat" /etc/redhat-release; then distro="Red Hat Enterprise Linux"
elif [ -f /etc/redhat-release ] && grep -q "CentOS" /etc/redhat-release; then distro="CentOS"
elif [ -f /etc/slackware-version ]; then distro="Slackware"
elif [ -f /usr/share/doc/tc/release.txt ]; then distro="TinyCore"
elif [ -f /etc/sabayon-edition ]; then distro="Sabayon"; fi
else
if [[ -x /usr/bin/sw_vers ]] && /usr/bin/sw_vers | grep -i "Mac OS X" >/dev/null; then
distro="Mac OS X"
elif [[ -f /var/run/dmesg.boot ]]; then
distro=$(awk 'BEGIN {
distro = "Unknown"
}
{
if ($0 ~ /DragonFly/) {
distro = "DragonFlyBSD"
exit
}
else if ($0 ~ /FreeBSD/) {
distro = "FreeBSD"
exit
}
else if ($0 ~ /NetBSD/) {
distro = "NetBSD"
exit
}
else if ($0 ~ /OpenBSD/) {
distro = "OpenBSD"
exit
}
} END {
print distro
}' /var/run/dmesg.boot)
fi
fi
fi
if [[ "${distro}" == "Unknown" ]] && [[ "${OSTYPE}" == "linux-gnu" || "${OSTYPE}" == "linux" ]]; then
if [[ -f /etc/issue ]]; then
distro=$(awk 'BEGIN {
distro = "Unknown"
}
{
if ($0 ~ /"LinuxDeepin"/) {
distro = "LinuxDeepin"
exit
}
else if ($0 ~ /"Parabola GNU\/Linux-libre"/) {
distro = "Parabola GNU/Linux-libre"
exit
}
else if ($0 ~ /"SolusOS"/) {
distro = "SolusOS"
exit
}
} END {
print distro
}' /etc/issue)
fi
fi
if [[ "${distro}" == "Unknown" ]] && [[ "${OSTYPE}" == "linux-gnu" || "${OSTYPE}" == "linux" ]]; then
if [[ -f /etc/system-release ]]; then
if grep -q "Scientific Linux" /etc/system-release; then
distro="Scientific Linux"
fi
fi
fi
fi
fi
if [[ ${BASH_VERSINFO[0]} -ge 4 ]]; then
if [[ ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -gt 1 ]] || [[ ${BASH_VERSINFO[0]} -gt 4 ]]; then
distro=${distro,,}
else
distro="$(tr '[:upper:]' '[:lower:]' <<< ${distro})"
fi
else
distro="$(tr '[:upper:]' '[:lower:]' <<< ${distro})"
fi
case $distro in
antergos) distro="Antergos" ;;
arch*linux*old) distro="Arch Linux - Old" ;;
arch*linux) distro="Arch Linux" ;;
arch) distro="Arch Linux";;
'elementary'|'elementary os') distro="elementary OS";;
evolveos) distro="Evolve OS";;
fedora) distro="Fedora" ;;
korora) distro="Korora" ;;
mageia) distro="Mageia" ;;
mandriva) distro="Mandriva" ;;
mandrake) distro="Mandrake" ;;
mint) distro="Mint" ;;
kali*linux) distro="Kali Linux" ;;
lmde) distro="LMDE" ;;
opensuse) distro="openSUSE" ;;
ubuntu) distro="Ubuntu" ;;
debian) distro="Debian" ;;
raspbian) distro="Raspbian" ;;
freebsd) distro="FreeBSD" ;;
freebsd*old) distro="FreeBSD - Old" ;;
openbsd) distro="OpenBSD" ;;
dragonflybsd) distro="DragonFlyBSD" ;;
netbsd) distro="NetBSD" ;;
red*hat*) distro="Red Hat Enterprise Linux" ;;
crunchbang) distro="CrunchBang" ;;
gentoo) distro="Gentoo" ;;
funtoo) distro="Funtoo" ;;
slackware) distro="Slackware" ;;
frugalware) distro="Frugalware" ;;
peppermint) distro="Peppermint" ;;
solusos) distro="SolusOS" ;;
parabolagnu|parabolagnu/linux-libre|'parabola gnu/linux-libre'|parabola) distro="Parabola GNU/Linux-libre" ;;
viperr) distro="Viperr" ;;
void) distro="Void" ;;
kaos) distro="KaOS";;
linuxdeepin) distro="LinuxDeepin" ;;
deepin) distro="Deepin" ;;
chakra) distro="Chakra" ;;
centos) distro="CentOS";;
mac*os*x) distro="Mac OS X" ;;
fuduntu) distro="Fuduntu" ;;
manjaro) distro="Manjaro" ;;
cygwin) distro="Cygwin" ;;
esac
}
detectdistro
# Distro Detection - End
# CPU Detection - Begin
detectcpu () {
REGEXP="-r"
if [ "$distro" == "Mac OS X" ]; then
cpu=$(machine)
if [[ $cpu == "ppc750" ]]; then
cpu="IBM PowerPC G3"
elif [[ $cpu == "ppc7400" || $cpu == "ppc7450" ]]; then
cpu="IBM PowerPC G4"
elif [[ $cpu == "ppc970" ]]; then
cpu="IBM PowerPC G5"
else
cpu=$(sysctl -n machdep.cpu.brand_string)
fi
REGEXP="-E"
elif [ "$distro" == "FreeBSD" ]; then cpu=$(sysctl -n hw.model)
elif [ "$distro" == "DragonflyBSD" ]; then cpu=$(sysctl -n hw.model)
elif [ "$distro" == "OpenBSD" ]; then cpu=$(sysctl -n hw.model | sed 's/@.*//')
else
cpu=$(awk 'BEGIN{FS=":"} /model name/ { print $2; exit }' /proc/cpuinfo | sed 's/ @/\n/' | head -1)
if [ -z "$cpu" ]; then
cpu=$(awk 'BEGIN{FS=":"} /^cpu/ { gsub(/ +/," ",$2); print $2; exit}' /proc/cpuinfo | sed 's/, altivec supported//;s/^ //')
if [[ $cpu =~ ^(PPC)*9.+ ]]; then
model="IBM PowerPC G5 "
elif [[ $cpu =~ 740/750 ]]; then
model="IBM PowerPC G3 "
elif [[ $cpu =~ ^74.+ ]]; then
model="Motorola PowerPC G4 "
elif [[ "$(cat /proc/cpuinfo)" =~ "BCM2708" ]]; then
model="Broadcom BCM2835 ARM1176JZF-S"
else
model="IBM PowerPC G3 "
fi
cpu="${model}${cpu}"
fi
loc="/sys/devices/system/cpu/cpu0/cpufreq"
if [ -f ${loc}/bios_limit ];then
cpu_mhz=$(awk '{print $1/1000}' "${loc}/bios_limit")
elif [ -f $loc/scaling_max_freq ];then
cpu_mhz=$(awk '{print $1/1000}' "${loc}/scaling_max_freq")
else
cpu_mhz=$(awk -F':' '/cpu MHz/{ print int($2+.5) }' /proc/cpuinfo | head -n 1)
fi
if [ -n "$cpu_mhz" ];then
if [ $cpu_mhz -gt 999 ];then
cpu_ghz=$(awk '{print $1/1000}' <<< "${cpu_mhz}")
cpu="$cpu @ ${cpu_ghz}GHz"
else
cpu="$cpu @ ${cpu_mhz}MHz"
fi
fi
fi
cpu=$(sed $REGEXP 's/\([tT][mM]\)|\([Rr]\)|[pP]rocessor//g' <<< "${cpu}" | xargs)
}
detectcpu
# CPU Detection - End
# GPU Detection - Begin (EXPERIMENTAL!)
detectgpu () {
if [[ "${distro}" == "FreeBSD" ]]; then
gpu_info=$(pciconf -lv 2> /dev/null | grep -B 4 VGA)
gpu_info=$(grep -E 'device.*=.*' <<< "${gpu_info}")
gpu="${gpu_info##*device*= }"
gpu="${gpu//\'}"
# gpu=$(sed 's/.*device.*= //' <<< "${gpu_info}" | sed "s/'//g")
elif [[ "$distro" != "Mac OS X" ]]; then
if [ -n "$(type -p lspci)" ]; then
gpu_info=$(lspci 2> /dev/null | grep VGA)
gpu=$(grep -oE '\[.*\]' <<< "${gpu_info}" | sed 's/\[//;s/\]//' | sed -n '1h;2,$H;${g;s/\n/, /g;p}')
elif [[ -n "$(type -p glxinfo)" && -z "$gpu" ]]; then
gpu_info=$(glxinfo 2>/dev/null)
gpu=$(grep "OpenGL renderer string" <<< "${gpu_info}" | cut -d ':' -f2)
gpu="${gpu:1}"
gpu_info=$(grep "OpenGL vendor string" <<< "${gpu_info}")
fi
elif [[ "${distro}" == "Mac OS X" ]]; then
gpu=$(system_profiler SPDisplaysDataType | awk -F': ' '/^\ *Chipset Model:/ {print $2}' | awk '{ printf "%s / ", $0 }' | sed -e 's/\/ $//g')
elif [[ "${distro}" == "Cygwin" ]]; then
gpu=$(wmic path Win32_VideoController get caption)
gpu=$(tail -1 <<< ${gpu})
fi
if [ -n "$gpu" ];then
if [ $(grep -i nvidia <<< "${gpu_info}" | wc -l) -gt 0 ];then
gpu_info="NVidia "
elif [ $(grep -i intel <<< "${gpu_info}" | wc -l) -gt 0 ];then
gpu_info="Intel "
elif [ $(grep -i amd <<< "${gpu_info}" | wc -l) -gt 0 ];then
gpu_info="AMD "
elif [[ $(grep -i ati <<< "${gpu_info}" | wc -l) -gt 0 || $(grep -i radeon <<< "${gpu_info}" | wc -l) -gt 0 ]]; then
gpu_info="ATI "
else
gpu_info=$(cut -d ':' -f2 <<< "${gpu_info}")
gpu_info="${gpu_info:1} "
fi
gpu="${gpu}"
else
gpu="Not Found"
fi
}
detectgpu
# GPU Detection - End
# Package Count - Begin
detectpkgs () {
pkgs="Unknown"
case "${distro}" in
'Arch Linux'|'Parabola GNU/Linux-libre'|'Chakra'|'Manjaro'|'Antergos'|'KaOS') pkgs=$(pacman -Qq | wc -l) ;;
'Frugalware') pkgs=$(pacman-g2 -Q | wc -l) ;;
'Fuduntu'|'Ubuntu'|'Mint'|'SolusOS'|'Debian'|'Raspbian'|'LMDE'|'CrunchBang'|'Peppermint'|'LinuxDeepin'|'Deepin'|'Kali Linux'|'Trisquel'|'elementary OS') pkgs=$(dpkg --get-selections | wc -l) ;;
'Slackware') pkgs=$(ls -1 /var/log/packages | wc -l) ;;
'Gentoo'|'Sabayon'|'Funtoo') pkgs=$(ls -d /var/db/pkg/*/* | wc -l) ;;
'Fedora'|'Korora'|'openSUSE'|'Red Hat Enterprise Linux'|'CentOS'|'Mandriva'|'Mandrake'|'Mageia'|'Viperr') pkgs=$(rpm -qa | wc -l) ;;
'Void') pkgs=$(xbps-query -l|wc -l);;
'Evolve OS') pkgs=$(pisi list-installed | wc -l);;
'Mac OS X')
if [ -d "/usr/local/bin" ]; then
loc_pkgs=$(ls -l /usr/local/bin/ | grep -v "\(../Cellar/\|brew\)" | wc -l)
pkgs=$((${loc_pkgs} -1));
fi
if type -p port >/dev/null 2>&1; then
port_pkgs=$(port installed 2>/dev/null | wc -l)
pkgs=$((${pkgs} + (${port_pkgs} -1)))
fi
if type -p brew >/dev/null 2>&1; then
brew_pkgs=$(brew list -1 2>/dev/null | wc -l)
pkgs=$((${pkgs} + ${brew_pkgs}))
fi
;;
'OpenBSD')
pkgs=$(pkg_info | wc -l | awk '{sub(" ", "");print $1}')
if type -p portmaster >/dev/null 2>&1; then
ports=$(portmaster -l | grep -Eo '[0-9]+ total installed' | sed 's/ total installed//')
pkgs=$((${pkgs} + ${ports}))
fi
;;
'FreeBSD')
pkgs=$(if TMPDIR=/dev/null ASSUME_ALWAYS_YES=1 PACKAGESITE=file:///nonexistent pkg info pkg >/dev/null 2>&1; then
pkg info | wc -l | awk '{print $1}'; else pkg_info | wc -l | awk '{sub(" ", "");print $1}'; fi)
;;
'Cygwin') cygfix=2; pkgs=$(($(cygcheck -cd | wc -l)-$cygfix)) ;;
esac
}
detectpkgs
detectshell () {
if [[ ! "${shell_type}" ]]; then
if [[ "${OSTYPE}" == "linux-gnu" || "${OSTYPE}" == "linux" ]]; then
shell_type=$(ps -p $PPID -o cmd --no-heading)
shell_type=${shell_type/-}
shell_type=${shell_type//*\/}
elif [[ "${distro}" == "Mac OS X" ]]; then
shell_type=$(ps -p $PPID -o args| tail -1)
shell_type=${shell_type/-}
shell_type=${shell_type//*\/}
elif [[ "${distro}" == "FreeBSD" || "${distro}" == "OpenBSD" ]]; then
shell_type=$(ps -p $PPID -o args| tail -1)
shell_type=${shell_type/-}
shell_type=${shell_type//*\/}
elif [[ "${distro}" == "Cygwin" ]]; then
shell_type=$(echo "$SHELL" | awk -F'/' '{print $NF}')
else
shell_type=$(ps -p $(ps -p $PPID | awk '$1 !~ /PID/ {print $1}') | awk 'FNR>1 {print $1}')
shell_type=${shell_type/-}
shell_type=${shell_type//*\/}
fi
fi
myShell=${shell_type}
}
detectshell
# Shell Detection - End
# WM Detection - Begin
detectwm () {
WM="Not Found"
if [[ ${distro} != "Mac OS X" && ${distro} != "Cygwin" ]]; then
if [[ -n ${DISPLAY} ]]; then
for each in "${wmnames[@]}"; do
PID="$(pgrep -U ${UID} "^$each$")"
if [ "$PID" ]; then
case $each in
'2bwm') WM="2bwm";;
'awesome') WM="Awesome";;
'beryl') WM="Beryl";;
'bspwm') WM="bspwm";;
'blackbox') WM="BlackBox";;
'budgie-wm') WM="BudgieWM";;
'cinnamon') WM="Muffin";;
'compiz') WM="Compiz";;
'dminiwm') WM="dminiwm";;
'dwm') WM="dwm";;
'e16') WM="E16";;
'emerald') WM="Emerald";;
'enlightenment') WM="E17";;
'fluxbox') WM="FluxBox";;
'fvwm') WM="FVWM";;
'herbstluftwm') WM="herbstluftwm";;
'icewm') WM="IceWM";;
'kwin') WM="KWin";;
'metacity') WM="Metacity";;
'monsterwm') WM="monsterwm";;
'musca') WM="Musca";;
'notion') WM="Notion";;
'openbox') WM="OpenBox";;
'pekwm') WM="PekWM";;
'ratpoison') WM="Ratpoison";;
'sawfish') WM="Sawfish";;
'scrotwm') WM="ScrotWM";;
'spectrwm') WM="SpectrWM";;
'stumpwm') WM="StumpWM";;
'subtle') WM="subtle";;
'swm') WM="swm";;
'wmaker') WM="WindowMaker";;
'wmfs') WM="WMFS";;
'wmii') WM="wmii";;
'xfwm4') WM="Xfwm4";;
'i3') WM="i3";;
'xmonad') WM="XMonad";;
esac
fi
if [[ ${WM} != "Not Found" ]]; then
break 1
fi
done
if [[ ${WM} == "Not Found" ]]; then
if type -p xprop >/dev/null 2>&1; then
WM=$(xprop -root _NET_SUPPORTING_WM_CHECK)
if [[ "$WM" =~ 'not found' ]]; then
WM="Not Found"
elif [[ "$WM" =~ 'Not found' ]]; then
WM="Not Found"
elif [[ "$WM" =~ '[Ii]nvalid window id format' ]]; then
WM="Not Found"
elif [[ "$WM" =~ "no such" ]]; then
WM="Not Found"
else
WM=${WM//* }
WM=$(xprop -id ${WM} 8s _NET_WM_NAME)
WM=$(echo $(WM=${WM//*= }; echo ${WM//\"}))
fi
fi
else
if [[ ${BASH_VERSINFO[0]} -ge 4 ]]; then
if [[ ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -gt 1 ]] || [[ ${BASH_VERSINFO[0]} -gt 4 ]]; then
WM=${WM,,}
else
WM="$(tr '[:upper:]' '[:lower:]' <<< ${WM})"
fi
else
WM="$(tr '[:upper:]' '[:lower:]' <<< ${WM})"
fi
case ${WM} in
'2bwm') WM="2bwm";;
'awesome') WM="Awesome";;
'beryl') WM="Beryl";;
'blackbox') WM="BlackBox";;
'budgiewm') WM="BudgieWM";;
'cinnamon') WM="Cinnamon";;
'compiz') WM="Compiz";;
'dminiwm') WM="dminiwm";;
'dwm') WM="dwm";;
'e16') WM="E16";;
'echinus') WM="echinus";;
'emerald') WM="Emerald";;
'enlightenment') WM="E17";;
'fluxbox') WM="FluxBox";;
'fvwm') WM="FVWM";;
'herbstluftwm') WM="herbstluftwm";;
'icewm') WM="IceWM";;
'kwin') WM="KWin";;
'metacity') WM="Metacity";;
'monsterwm') WM="monsterwm";;
'musca') WM="Musca";;
*'gala'*) WM="Gala";;
'mutter'*) WM="Mutter";;
'gnome shell'*) WM="Mutter";;
'muffin') WM="Muffin";;
'notion') WM="Notion";;
'openbox') WM="OpenBox";;
'pekwm') WM="PekWM";;
'ratpoison') WM="Ratpoison";;
'sawfish') WM="Sawfish";;
'scrotwm') WM="ScrotWM";;
'spectrwm') WM="SpectrWM";;
'stumpwm') WM="StumpWM";;
'subtle') WM="subtle";;
'swm') WM="swm";;
'wmaker') WM="WindowMaker";;
'wmfs') WM="WMFS";;
'wmii') WM="wmii";;
'xfwm4') WM="Xfwm4";;
'xmonad') WM="XMonad";;
'i3') WM="i3";;
esac
fi
fi
elif [[ ${distro} == "Mac OS X" && "${WM}" == "Not Found" ]]; then
if ps -U ${USER} | grep Finder >/dev/null 2>&1; then
WM="Quartz Compositor"
fi
elif [[ "${distro}" == "Cygwin" ]]; then
bugn=$(tasklist | grep -o 'bugn' | tr -d '\r \n')
wind=$(tasklist | grep -o 'Windawesome' | tr -d '\r \n')
if [ "$bugn" = "bugn" ]; then WM="bug.n"
elif [ "$wind" = "Windawesome" ]; then WM="Windawesome"
else WM="DWM"; fi
fi
}
detectwm
# WM Detection - End
# Memory Detection - Begin
detectmem () {
hw_mem=0
free_mem=0
human=1024
if [ "$distro" == "Mac OS X" ]; then
totalmem=$(echo "$(sysctl -n hw.memsize)"/${human}^2|bc)
wiredmem=$(vm_stat | grep wired | awk '{ print $4 }' | sed 's/\.//')
activemem=$(vm_stat | grep ' active' | awk '{ print $3 }' | sed 's/\.//')
compressedmem=$(vm_stat | grep occupied | awk '{ print $5 }' | sed 's/\.//')
usedmem=$(((${wiredmem} + ${activemem} + ${compressedmem}) * 4096 / 1024 / 1024))
elif [ "$distro" == "Cygwin" ]; then
total_mem=$(awk '/MemTotal/ { print $2 }' /proc/meminfo)
totalmem=$((${total_mem}/1024))
free_mem=$(awk '/MemFree/ { print $2 }' /proc/meminfo)
used_mem=$((${total_mem} - ${free_mem}))
usedmem=$((${used_mem}/1024))
elif [ "$distro" == "FreeBSD" ]; then
phys_mem=$(sysctl -n hw.physmem)
size_mem=$phys_mem
size_chip=1
guess_chip=`echo "$size_mem / 8 - 1" | bc`
while [ $guess_chip != 0 ]; do
guess_chip=`echo "$guess_chip / 2" | bc`
size_chip=`echo "$size_chip * 2" | bc`
done
round_mem=`echo "( $size_mem / $size_chip + 1 ) * $size_chip " | bc`
totalmem=$(($round_mem / ($human * $human) ))
pagesize=$(sysctl -n hw.pagesize)
inactive_count=$(sysctl -n vm.stats.vm.v_inactive_count)
inactive_mem=$(($inactive_count * $pagesize))
cache_count=$(sysctl -n vm.stats.vm.v_cache_count)
cache_mem=$(($cache_count * $pagesize))
free_count=$(sysctl -n vm.stats.vm.v_free_count)
free_mem=$(($free_count * $pagesize))
avail_mem=$(($inactive_mem + $cache_mem + $free_mem))
used_mem=$(($round_mem - $avail_mem))
usedmem=$(($used_mem / ($human * $human) ))
elif [ "$distro" == "OpenBSD" ]; then
totalmem=$(top -1 1 | awk '/Real:/ {k=split($3,a,"/");print a[k] }' | tr -d 'M')
usedmem=$(top -1 1 | awk '/Real:/ {print $3}' | sed 's/M.*//')
elif [ "$distro" == "NetBSD" ]; then
phys_mem=$(awk '/MemTotal/ { print $2 }' /proc/meminfo)
totalmem=$((${phys_mem} / $human))
if grep -q 'Cached' /proc/meminfo; then
cache=$(awk '/Cached/ {print $2}' /proc/meminfo)
usedmem=$((${cache} / $human))
else
free_mem=$(awk '/MemFree/ { print $2 }' /proc/meminfo)
used_mem=$((${phys_mem} - ${free_mem}))
usedmem=$((${used_mem} / $human))
fi
else
mem_info=$(</proc/meminfo)
mem_info=$(echo $(echo $(mem_info=${mem_info// /}; echo ${mem_info//kB/})))
for m in $mem_info; do
if [[ ${m//:*} = MemTotal ]]; then
memtotal=${m//*:}
fi
if [[ ${m//:*} = MemFree ]]; then
memfree=${m//*:}
fi
if [[ ${m//:*} = Buffers ]]; then
membuffer=${m//*:}
fi
if [[ ${m//:*} = Cached ]]; then
memcached=${m//*:}
fi
done
usedmem="$(((($memtotal - $memfree) - $membuffer - $memcached) / $human))"
totalmem="$(($memtotal / $human))"
fi
mem="${usedmem}MB / ${totalmem}MB"
}
detectmem
# Memory Detection - End
# Uptime Detection - Begin
detectuptime () {
unset uptime
if [[ "${distro}" == "Mac OS X" || "${distro}" == "FreeBSD" ]]; then
boot=$(sysctl -n kern.boottime | cut -d "=" -f 2 | cut -d "," -f 1)
now=$(date +%s)
uptime=$(($now-$boot))
elif [[ "${distro}" == "OpenBSD" ]]; then
boot=$(sysctl -n kern.boottime)
now=$(date +%s)
uptime=$((${now} - ${boot}))
else
if [[ -f /proc/uptime ]]; then
uptime=$(</proc/uptime)
uptime=${uptime//.*}
fi
fi
if [[ -n ${uptime} ]]; then
secs=$((${uptime}%60))
mins=$((${uptime}/60%60))
hours=$((${uptime}/3600%24))
days=$((${uptime}/86400))
uptime="${mins}m"
if [ "${hours}" -ne "0" ]; then
uptime="${hours}h ${uptime}"
fi
if [ "${days}" -ne "0" ]; then
uptime="${days}d ${uptime}"
fi
else
if [[ "$distro" =~ "NetBSD" ]]; then uptime=$(awk -F. '{print $1}' /proc/uptime); fi
if [[ "$distro" =~ "BSD" ]]; then uptime=$(uptime | awk '{$1=$2=$(NF-6)=$(NF-5)=$(NF-4)=$(NF-3)=$(NF-2)=$(NF-1)=$NF=""; sub(" days","d");sub(",","");sub(":","h ");sub(",","m"); print}'); fi
fi
}
detectuptime
# Uptime Detection - End
# Resolution Detection - Begin
detectres () {
if [[ ${distro} != "Mac OS X" && ${distro} != "Cygwin" ]]; then
if [[ -n ${DISPLAY} ]]; then
if [[ "$distro" =~ "BSD" ]]; then
xResolution=$(xdpyinfo | sed -n 's/.*dim.* \([0-9]*x[0-9]*\) .*/\1/pg' | tr '\n' ' ')
else
xResolution=$(xdpyinfo | sed -n 's/.*dim.* \([0-9]*x[0-9]*\) .*/\1/pg' | sed ':a;N;$!ba;s/\n/ /g')
fi
fi
elif [[ ${distro} == "Mac OS X" ]]; then
xResolution=$(system_profiler SPDisplaysDataType | awk '/Resolution:/ {print $2"x"$4" "}')
if [[ "$(echo $xResolution | wc -l)" -ge 1 ]]; then
xResolution=$(echo $xResolution | tr "\\n" "," | sed 's/\(.*\),/\1/')
fi
elif [[ "${distro}" == "Cygwin" ]]; then
width=($(wmic desktopmonitor get screenwidth | grep -vE '[a-z]+' | tr '\r\n' ' '))
height=($(wmic desktopmonitor get screenheight | grep -vE '[a-z]+' | tr '\r\n' ' '))
xResolution=""
len=${#width[@]}
delim=" "
for ((i=0;i<len;i++)); do
xResolution="${xResolution}${delim}${width[i]}x${height[i]}"
done
xResolution=$(echo $xResolution | sed "s/^$delim//")
else
xResolution="No X Server"
fi
}
detectres
# Resolution Detection - End
startANSI()
{
esc=""
blackf="${esc}[38;5;232m" grayf="${esc}[90m"
userNamef="${esc}[5m" arrowf="${esc}[38;5;1m"
greenb="${esc}[48;5;1m"
boldon="${esc}[1m"
reset="${esc}[0m"
}
startANSI
cat << EOF
╭◡◠✧${boldon}${userNamef} Sam Uel (λx.x)${reset}✧◠◡╮
${blackf}${greenb}▣ ▣ ▣ ▣ ${reset}${greenb}${blackf}▣ ▣${reset}${blackf}${greenb} OS ${reset}${arrowf}${reset} $distro $distro_release $distro_codename
${grayf}▣ ▣${reset}${grayf} ▣ ▣ ${reset}${blackf}${greenb}▣ ▣${reset}${blackf}${greenb} Kernel ${reset}${reset}${arrowf}${reset} $(uname -r)
${grayf}▣ ▣ ${reset}${blackf}${greenb}▣ ▣${reset}${blackf}${greenb} ▣ ▣${reset}${blackf}${greenb} Uptime ${reset}${reset}${arrowf}${reset} $uptime
${blackf}${greenb}▣ ▣ ▣ ▣${reset}${blackf}${greenb} ▣ ▣${reset}${blackf}${greenb} Packages ${reset}${reset}${arrowf}${reset} $pkgs
${grayf}▣ ▣ ${reset}${blackf}${greenb}▣ ▣${reset}${blackf}${greenb} ▣ ▣${reset}${blackf}${greenb} Shell ${reset}${reset}${arrowf}${reset} $myShell
${grayf}▣ ▣${reset}${grayf} ▣ ▣ ${reset}${blackf}${greenb}▣ ▣${reset}${blackf}${greenb} Res ${reset}${reset}${arrowf}${reset} $xResolution
${grayf}▣ ▣ ${reset}${blackf}${greenb}▣ ▣${reset}${blackf}${greenb} ▣ ▣${reset}${blackf}${greenb} WM ${reset}${reset}${arrowf}${reset} $WM
${blackf}${greenb}▣ ▣ ▣ ▣${reset}${blackf}${greenb} ▣ ▣${reset}${blackf}${greenb} CPU ${reset}${reset}${arrowf}${reset} $cpu
${grayf}▣ ▣ ${reset}${blackf}${greenb}▣ ▣${reset}${blackf}${greenb} ▣ ▣${reset}${blackf}${greenb} GPU ${reset}${reset}${arrowf}${reset} $gpu
${grayf}▣ ▣${reset}${grayf} ▣ ▣ ${reset}${blackf}${greenb}▣ ▣${reset}${blackf}${greenb} RAM ${reset}${reset}${arrowf}${reset} $mem
${blackf}${greenb}▣ ▣ ▣ ▣${reset}${blackf}${greenb} ▣ ▣${reset}${blackf}${greenb} GitHub ${reset}${reset}${arrowf}${reset} https://github.com/samucafreitas
EOF
| true
|
133c38904307508db3224bc31c7a3bd074653022
|
Shell
|
tst2005sh/jq-helpers
|
/lib/jqs1.lib.sh
|
UTF-8
| 295
| 2.609375
| 3
|
[] |
no_license
|
if [ -z "$JQS" ]; then
echo >&2 "Please load jqs.lib.sh with :"
echo >&2 ' eval "$(/path/to/jq-helpers/bin/load_jq_stack1.sh)"'
false
else
. "${JQS%/*}/jq_stack1.lib.sh"
. "${JQS%/*}/jq_stack1_modcall.lib.sh"
. "${JQS%/*}/jq_stack1_modload.lib.sh"
. "${JQS%/*}/jq_stack1_oneline.lib.sh"
fi
| true
|
db40408ace070601dc16fbb3dda5940358fadcc4
|
Shell
|
digitaldanny/adv-sys
|
/hw4/grader.sh
|
UTF-8
| 912
| 3.609375
| 4
|
[] |
no_license
|
# +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+
# SUMMARY: run.sh
# This script runs the combiner program 10 times and returns
# the number of times the process resulted in a deadlock.
# +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+
DEADLOCK_THRESH = 3 # seconds before a process is considered deadlocked
DEADLOCK_COUNT = 0 # keeps track of deadlocks
#!/bin/bash
#./kill.sh
fuser -k combiner
./reset.sh
find . -type f -exec touch {} +
make
for i in {0..10..1}
do
# generate a new input file for testing
python grader.py gen
# run the combiner program.. if timer exceeds DEADLOCK_THRESH, increment fail count.
./combiner 10 7 < input.txt | tee test_output.txt &
pid = $! # save the process id of the combiner program
echo $i
wait $pid
# grade the results of the generated output file.
python grader.py grade
#kill $pid
done
| true
|
bf461bb17314336ee456d031e50b66da511437bf
|
Shell
|
gkswjdzz/sketch-to-art
|
/src/assets/generate_thumb.sh
|
UTF-8
| 223
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
FILES=pictures/*
OUTDIR=thumbs/
for f in $FILES
do
filename="${f##*/}"
echo "Generating thumbnail for $filename..."
convert $f -thumbnail 150x150^ -gravity center -extent 150x150 "$OUTDIR$filename"
done
| true
|
6ee63e51d8352c8c8fed75971754db8adb5a81e9
|
Shell
|
akiomik/dotfiles
|
/bin/less-highlight
|
UTF-8
| 261
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
usage() {
echo "usage: $(basename $0) filename"
}
if [ -z "$1" ] || [ "$1" = "-h" ]; then
usage 1>&2
exit 1
fi
if [ ! -f "$1" ]; then
echo "$1: No such file or directory" 1>&2
exit 1
fi
highlight -O ansi --force $1 2> /dev/null | less
| true
|
6d8fdf2f49af5d335e2ba9f701108b71ee7452e4
|
Shell
|
KrivoKu/OSLabs
|
/lab3/handler.sh
|
UTF-8
| 436
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
cmd=""
result=1
tail -f pipe |
while true; do
read line
case $line in
'*')
cmd="$line"
;;
"+")
cmd="$line"
;;
"exit")
killall tail
echo "Quit: handler"
exit 0
;;
[0-9])
case $cmd in
"+")
result=$(($result+$line))
echo $result
;;
"*")
result+$(($result+$line))
echo $result
esac
;;
*)
killall tail
echo "Hadler error"
exit 1
;;
esac
done
| true
|
428b8ec5aa59fd76d96c7828eae9b15c66505de9
|
Shell
|
aabarug/scripts
|
/crunch/run_pipeline
|
UTF-8
| 5,075
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# shellcheck disable=SC2155
set -o errexit
set -o nounset
shopt -s globstar
shopt -s nullglob
# There are several modes of operation:
#
# 1) no parameters => run cancer panel test into an automatic personal output directory (shortcut; can do this explicitly with input/output directories).
# 2) one parameter => re-run pipeline on - and into - a named, existing folder structure (e.g. for re-running somatics).
# 3) two parameters => re-run pipeline on - and into - a named, existing folder but using different INI file (no .ini suffix necessary)
# 4) three parameters => run pipeline from an input folder into an output folder. FASTQ/BAM mode is chosen according to folder contents.
# 5) four parameters => run pipeline from an input folder into an output folder but choose the pipeline repo to use (e.g. pipeline vs pipeline-test).
#
# parameters must be supplied in order listed below, intended to prioritise common use-cases
# this means that it is currently not possible to re-run on an existing folder and specify a non-pipeline-test repo.
# input folder can be anything, all output (including re-runs) is relative to /data2/processed.
# shellcheck disable=SC2015
function main() {
local output_name=${1:-} && shift || true
local config=${1:-Somatic} && shift || true
local input=${1:-} && shift || true
local pipeline=${1:-pipeline-test} && shift || true
local output_root=/data2/processed
# SABR: with no parameters, run the cancer panel test
if [ -z "${output_name}" ]; then
output_name=${USER}_${pipeline/-/_}
input=/data/repos/testdata/cancerPanel
fi
local output=${output_root}/${output_name}
mkdir -p "${output}"
echoerr "PIPELINE: ${pipeline}"
echoerr "CONFIG: ${config}"
echoerr "INPUT: ${input}"
echoerr "OUTPUT: ${output}"
# SABR: only replace settings.config if providing an input folder
if [ -n "${input}" ]; then
local mode_flag
mode_flag=$(choose_input_mode) || exit
"/data/repos/${pipeline}/bin/create_config.pl" -i "${config}.ini" -o "${output}" "${mode_flag}" "${input}"
fi
cd "${output}"
# SABR: if there was no input folder, we are re-running a previous settings.config, and want to leave filenames in place
perl -pi -e "s#(INIFILE\s+).*#\1/data/repos/${pipeline}/settings/${config}.ini#" settings.config
perl -pi -e "s#(OUTPUT_DIR\s+).*#\1${output}#" settings.config
write_metadata "${input}" "${output}" "${output_name}"
"/data/repos/${pipeline}/bin/pipeline.pl" "${output}/settings.config" > /dev/null
}
function choose_input_mode() {
local fastq_files=$(find -L ${input} -type f -name '*.fastq*')
local bam_files=$(find -L ${input} -type f -name '*.bam*')
if [ -n "${fastq_files}" ] && [ -z "${bam_files}" ]; then
echoerr "INPUT MODE: FASTQ"
local mode_flag="-f"
elif [ -z "${fastq_files}" ] && [ -n "${bam_files}" ]; then
echoerr "INPUT MODE: BAM"
local mode_flag="-b"
else
echoerr "ERROR: could not choose between FASTQ and BAM MODE"
echoerr " FASTQ files:"
echoerr "${fastq_files}"
echoerr " BAM files:"
echoerr "${bam_files}"
exit 1
fi
echo $mode_flag
}
function write_metadata() {
local input=$1 && shift
local output=$1 && shift
local output_name=$1 && shift
local metadata_file="${output}/metadata"
declare -A samples=()
declare -A samples_names=()
# SABR: ignore it if it already exists
if [ -e "${metadata_file}" ]; then
echoerr "WARN: not generating ${metadata_file}, already exists"
return
fi
for path in $(find "${input}" -name "*.fastq.gz" -o -name "*.bam"); do
local filename=$(basename "${path}")
if [[ ${filename} =~ ^((CPCT|DRUP)[0-9]{8})([RT])_ ]]; then
samples[${BASH_REMATCH[3]}]=${BASH_REMATCH[1]}
elif [[ ${filename} =~ ^([a-zA-Z0-9\-]+)_ ]]; then
samples[R]=${BASH_REMATCH[1]}
fi
# SABR: store in extra associative array to keep complete sample name available
if [[ ${filename} =~ ^([a-zA-Z0-9\-]+)_ ]]; then
samples_names[${BASH_REMATCH[1]}]="dummy"
fi
done
if [ ${#samples[@]} -eq 2 ] && [ "${samples[R]}" == "${samples[T]}" ]; then
cat <<-EOF > "${metadata_file}"
{
"ref_sample": "${samples[R]}R",
"tumor_sample": "${samples[T]}T",
"set_name": "${output_name}"
}
EOF
elif [ ${#samples[@]} -eq 1 ]; then
sample_name=$( echo "${!samples_names[@]}" )
cat <<-EOF > "${metadata_file}"
{
"ref_sample": "$sample_name",
"tumor_sample": null,
"set_name": "${output_name}"
}
EOF
else
echoerr "failed to identify CPCT/DRUP somatic pair or single sample to create metadata (found: ${!samples[*]} => ${samples[*]+${samples[*]}})"
echoerr "create ${metadata_file} manually and retry (just touch the file if un-needed)"
exit 1
fi
}
function echoerr() {
echo "$@" 1>&2
}
main "$@"
| true
|
c04eb3b5a46097a4eebdf3a7f8c8a066c59e81e0
|
Shell
|
Tunguska55/lynx
|
/reset_display
|
UTF-8
| 7,505
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#Instead of using a custom generated config file, I will try to pull from /etc/xorg.config
#to add a higher level of scalability
######current potential flags
# remove-config -> removes current config
# generate -> creates new config file based on display
# -h / --h / -help / --help -> show current commands
bold=$(tput bold)
normal=$(tput sgr0)
configFile="/u/$USER/.config/reset_display.conf"
nvConf="/etc/X11/xorg.conf"
flag=$1 #capture the flag into a script variable rather than leaving as special
#Declaring writeable variables here as they persist through the code
PrimDisplay=""
SecDisplay=""
PrimPosition=""
SecPosition=""
PrimaryOption=""
PrimaryRes=""
SecRes=""
if [[ $flag = "-h" || $flag = "--h" || $flag = "-help" || $flag = "--help" ]]
then
cat <<EOF
usage: reset_display [option]
${bold}generate${normal} : generates custom config
${bold}remove-config${normal} : removes custom config
${bold}custom - broken${normal} : reset display using the xorg config file located $nvConf
If running remotely, run the command:
${bold}setenv DISPLAY :0${normal} before running the script
Description:
This script is designed to reset the displays after a monitor shuts off or sleeps
When run by itself, the script will reset the displays based on the self-generated configuration file
Primary use (pulls from configuration file):
reset_display
If you want to generate a custom config to place in ${bold}$configFile${normal}:
reset_display generate
${bold}(NOTE: Make sure the displays are set the way you want first, before doing this or you will record incorrect parameters)${normal}
If you want to remove the custom config:
reset_display remove-config
If you want to use the nvidia config (feature currently broken):
reset_dusplay custom
How to use:
1) Change settings in nvidia-settings, save xorg.conf
2) Run ${bold}reset_display generate${normal} to create the custom configuration file
3) Whenever the screen mirrors, run ${bold}reset_display${normal} in terminal
EOF
exit 2
fi
#This is a flag to allow the user to remove the config without admin intervention
#need to turn this into a try at some point
if [[ $flag = "remove-config" ]] && [[ -e $configFile ]]
then
rm -f ${configFile}
printf "Config file removed successfully \n"
printf "Exiting script \n"
exit 2
elif [[ $flag = "remove-config" ]] && [[ ! -e $configFile ]]
then
printf "Config file not found, either it has been moved or needs to be generated \n"
fi
#Create a config file based on current settings (don't run when monitor is messed up)
if [[ ! -e $configFile ]] && [[ $flag = "generate" ]]
then
printf "Creating config file now... \n"
#printf "in the if statement \n" #debug statement
PrimaryOption=$(xrandr -q | grep -e "\bprimary\b" | grep -v "disconnected" | awk '{print $1}')
if [[ ! $PrimaryOption = "" ]]
then
PrimDisplay=${PrimaryOption}
SecDisplay=$(xrandr -q | grep -e "\bconnected\b" | grep -v "primary" | awk '{print $1}')
PrimPosition=$(xrandr -q | grep -e "\bprimary\b" | awk '{print $4}' | cut -d+ -f2- | sed 's/+/\x/g')
SecPosition=$(xrandr -q | grep -e "\bconnected\b" | grep -v "primary" | awk '{print $3}'| cut -d+ -f2- | sed 's/+/\x/g')
PrimaryRes=$(xrandr -q | grep -e "\bprimary\b" | awk '{print $4}' | cut -d+ -f1)
SecRes=$(xrandr -q | grep -e "\bconnected\b" | grep -v "\bprimary\b" | awk '{print $3}' | cut -d+ -f1)
elif [[ $PrimaryOption = "" ]]
then
PrimDisplay=$(xrandr -q | grep -e "+0+0" | awk '{print $1}')
SecDisplay=$(xrandr -q | grep -e "\bconnected\b" | grep -v "+0+0" | awk '{print $1}')
PrimPosition=$(xrandr -q | grep -e ${PrimDisplay} | awk '{print $3}' | cut -d+ -f2- | sed 's/+/\x/g')
SecPosition=$(xrandr -q | grep -e ${SecDisplay} | awk '{print $3}' | cut -d+ -f2- | sed 's/+/\x/g')
PrimaryRes=$(xrandr -q | grep -e ${PrimDisplay} | awk '{print $3}' | cut -d+ -f1)
SecRes=$(xrandr -q | grep -e ${SecDisplay} | awk '{print $3}' | cut -d+ -f1)
fi
#The indentation of EOF from top to bottom is intentional, spacing it would break it
cat <<EOF > $configFile
#This file is created for the reset_display script
#If this file gets corrupted please remove it as the script will recreate it
#Do not modify!
#Talk to Jon, Hector or Jack for any questions
PRIMARYOPTION=$PrimaryOption
PRIMARYDISPLAY=$PrimDisplay
SECONDARYDISPLAY=$SecDisplay
PRIMARYPOSITION=$PrimPosition
SECONDARYPOSITION=$SecPosition
PRIMARYRESOLUTION=$PrimaryRes
SECONDARYRESOLUTION=$SecRes
EOF
printf "Done \n"
exit 3
elif [[ -e $configFile ]] && [[ $flag = "generate" ]]
then
printf "Config file already exists, if you want to remove it, run remove-config first \n"
fi
#array to store values read from conf file
declare -A disparray
IFS="="
success=0
if [[ $flag = "" ]] && [[ -e $configFile ]]
then
printf "Resetting display based on config file (${bold}${configFile}${normal}) \n"
while read -r key value
do
#printf "Content of $key is ${value//\"/} \n" #for debugging
case $key in
"PRIMARYOPTION")
disparray+=(["PRIMARYOPTION"]=$value)
;;
"PRIMARYDISPLAY")
disparray+=(["PRIMARYDISPLAY"]=$value)
;;
"SECONDARYDISPLAY")
disparray+=(["SECONDARYDISPLAY"]=$value)
;;
"PRIMARYPOSITION")
disparray+=(["PRIMARYPOSITION"]=$value)
;;
"SECONDARYPOSITION")
disparray+=(["SECONDARYPOSITION"]=$value)
;;
"PRIMARYRESOLUTION")
disparray+=(["PRIMARYRESOLUTION"]=$value)
;;
"SECONDARYRESOLUTION")
disparray+=(["SECONDARYRESOLUTION"]=$value)
;;
esac
done < $configFile
success=1
elif [[ -e $nvConf ]] && [[ $flag = "custom" ]]
then
printf "Resetting display based on config file (${nvConf}) \n"
#unfortunately I can't find the primary monitor flag in the xorg conf file
#so this will be a broken feature for now
elif [[ ! -e $configFile ]] && [[ ! -e $nvConf ]]
then
printf "Unable to find config file, please run reset_display generate to create one \n"
success=0
exit 5
fi
#code execution to reset display
if [[ -e $configFile ]] && [[ ! -z $disparray["PRIMARYDISPLAY"] ]] && [[ $success = 1 ]]
then
#printf "successfully executing \n" #debugging line
if [[ ! ${disparray["PRIMARYOPTION"]} = "" ]]
then
xrandr --output ${disparray["PRIMARYDISPLAY"]} --size ${disparray["PRIMARYRESOLUTION"]} --pos ${disparray["PRIMARYPOSITION"]} --primary;
xrandr --output ${disparray["SECONDARYDISPLAY"]} --size ${disparray["SECONDARYRESOLUTION"]} --pos ${disparray["SECONDARYPOSITION"]}
printf "Reset completed normally, primary option found \n"
exit 0
elif [[ ${disparray["PRIMARYOPTION"]} = "" ]]
then
xrandr --output ${disparray["PRIMARYDISPLAY"]} --size ${disparray["PRIMARYRESOLUTION"]} --pos ${disparray["PRIMARYPOSITION"]};
xrandr --output ${disparray["SECONDARYDISPLAY"]} --size ${disparray["SECONDARYRESOLUTION"]} --pos ${disparray["SECONDARYPOSITION"]}
printf "Reset completed normally, no primary option \n"
exit 0
fi
elif [[ ! -e $configFile ]] && [[ -z $disparray["PRIMARYDISPLAY"] ]] && [[ $flag = "" ]] || [[ $success = 0 ]] && [[ $flag = "" ]]
then
printf "Error during reset \n"
printf "Check to see if config file exists in ${bold}$configFile${normal} \n"
fi
#debugging and logging
#printf "Primary Display = ${disparray['PRIMARYDISPLAY']} \n"
#printf "Secondary Display = ${SecDisplay}\n"
| true
|
bfef9a0fc47c14d4d19860532bb795fc802b359b
|
Shell
|
DipeshMaywade/DailyAssigmnment
|
/function/program1.sh
|
UTF-8
| 348
| 3.875
| 4
|
[] |
no_license
|
#Write a function to check if the two numbers are Palindromes
#!/bin/bash -x
read -p "Enter A Number; " a
num=$a
reverse=0
while [ $a -gt 0 ]
do
lastDigit=$(($a%10))
reverse=$(($reverse*10+$lastDigit))
a=$(($a/10))
done
echo $reverse;
if [ $num -eq $reverse ]
then
echo "Number Is Pailndrome !"
else
echo "Number Is Not Pailendrome !"
fi
| true
|
7fa636efd2e2e22e73d5fea61e7c15e446629ff2
|
Shell
|
astorise/k3d
|
/deploy-aur.sh
|
UTF-8
| 1,733
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Setup base system
pacman -Syu --noconfirm openssh git gettext binutils archlinux-keyring
sed -i "s/INTEGRITY_CHECK=.*$/INTEGRITY_CHECK=(sha256)/" /etc/makepkg.conf
useradd -ms /bin/bash aur
su -m aur <<'EOSU'
set -e
# Configuration
export HOME=/home/aur
export REPO_URL="ssh://aur@aur.archlinux.org/$PACKAGE_NAME.git"
export NEW_RELEASE="${COMMIT_REF##*/v}"
export COMMIT_MESSAGE="$(echo $COMMIT_MESSAGE | envsubst)"
echo "---------------- AUR Package version $PACKAGE_NAME/$NEW_RELEASE ----------------"
# SSH & GIT Setup
mkdir "$HOME/.ssh" && chmod 700 "$HOME/.ssh"
ssh-keyscan -t ed25519 aur.archlinux.org >> "$HOME/.ssh/known_hosts"
echo -e "$SSH_PRIVATE_KEY" | base64 -d > "$HOME/.ssh/id_rsa"
chmod 600 "$HOME/.ssh/id_rsa"
git config --global user.name "$COMMIT_USERNAME"
git config --global user.email "$COMMIT_EMAIL"
# Clone AUR Package
cd /tmp
echo "$REPO_URL"
git clone "$REPO_URL"
cd "$PACKAGE_NAME"
# Generate a dummy PKGBUILD so we can grab the latest releases SHA256SUMS
cat PKGBUILD.template | envsubst '$NEW_RELEASE' > PKGBUILD
export SHA256_SUMS_x86_64="$(CARCH=x86_64 makepkg -g 2> /dev/null)"
echo "SHA256_SUMS_x86_64: $SHA256_SUMS_x86_64"
export SHA256_SUMS_aarch64="$(CARCH=aarch64 makepkg -g 2> /dev/null)"
echo "SHA256_SUMS_aarch64: $SHA256_SUMS_aarch64"
export SHA256_SUMS_arm="$(CARCH=arm makepkg -g 2> /dev/null)"
echo "SHA256_SUMS_arm: $SHA256_SUMS_arm"
cat PKGBUILD.template | envsubst '$NEW_RELEASE$SHA256_SUMS_x86_64$SHA256_SUMS_aarch64$SHA256_SUMS_arm' > PKGBUILD
makepkg --printsrcinfo > .SRCINFO
echo "------------- BUILD DONE ----------------"
git add PKGBUILD .SRCINFO
git commit -m "$COMMIT_MESSAGE"
git push
echo "------------- PUBLISH DONE ----------------"
EOSU
| true
|
ba8fc55e49cd2626efe2a4caec59c7db63172361
|
Shell
|
kailinreddy/UNIX-Shell
|
/Automating Shell Commands Example
|
UTF-8
| 1,203
| 3.75
| 4
|
[] |
no_license
|
#!/bin/sh
# Script to print user info clear
echo "Hello $USER"
# Prints out username of person currently using the shell
echo -n "Number of logins: "; who|wc -l
# -n : omit echoing trailing newline
# who : find 1) Time of last system boot, 2)Current runlevel of the system,
# 3)List of logged in users
# wc : stands for word count. Used to find the number of lines, word count,
# byte and character countin the files arguments.
# -l : prints the number of lines present in the file.
# vertical bar (|) : referred to as a "pipe". It is used to direct the
# output from the first command into the second command.
# who|wc -l : In this case, this command provides a list of logged in users,
# then counts the lines of that list, outputing a number.
# This counts the number of logins done.
echo "Calender"; cal
#Displays the system calender highlighting the current day.
echo -n "Today is "; date
# Displays the current date, time and timezone.
exit 0
# exit : used to exit the shell where it is currently running.
# "0" means the program has executed
# Save the file and grant it execute permissions: chmod +x <file> c)
#Run the file: ./<file>
| true
|
6633b3167b2573684804daf62647ef355b8fdc76
|
Shell
|
NishkarshRaj/setting-up-LAMP-Stack-using-shell-scripting
|
/Installation_script.sh
|
UTF-8
| 863
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo -e "\n\nUpdating Apt Packages and upgrading latest patches\n"
sudo apt-get update -y && sudo apt-get upgrade -y
echo -e "\n\nInstalling Apache2 Web server\n"
sudo apt-get install apache2 apache2-doc apache2-mpm-prefork apache2-utils libexpat1 ssl-cert -y
echo -e "\n\nInstalling PHP & Requirements\n"
sudo apt-get install libapache2-mod-php7.0 php7.0 php7.0-common php7.0-curl php7.0-dev php7.0-gd php-pear php7.0-mcrypt php7.0-mysql -y
echo -e "\n\nInstalling MySQL\n"
sudo apt-get install mysql-server mysql-client -y
echo -e "\n\nPermissions for /var/www\n"
sudo chown -R www-data:www-data /var/www
echo -e "\n\n Permissions have been set\n"
echo -e "\n\nEnabling Modules\n"
sudo a2enmod rewrite
sudo phpenmod mcrypt
echo -e "\n\nRestarting Apache\n"
sudo service apache2 restart
echo -e "\n\nLAMP Installation Completed"
exit 0
| true
|
8769eb622549e0b18401d7094a9afd436799b891
|
Shell
|
haizenbergD/bash-udemy
|
/scripts/basics.sh
|
UTF-8
| 1,099
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
#name=tea
#
#echo "$name ${#name}" # ${#valiable} - chars count
#echo $(( 2#111 )) # moving 111 to 7, like parseInt with provided system
#
#var=4
#
#if [ "$var" -lt 0 ]; then
# echo "Ok"
# else echo "Not"
#fi
#
#colors="one two three"
#
#for col in $colors #for loop
# do
# echo $col
#done
#
#let "y=((x=20, 10/2))" #???
#echo $y
#c=MvEUPPERCASE
#
#echo "${c,}" # first to lowercase
#echo "${c,,}" # all to lower case
#c=tEsT
#echo ${c^} # first to lowercase
#echo ${c,,} # all to lower case
#let val=500/2 #ariphmetical operations
#val2=`echo $val`
#echo $val2
#var=20
#
#if [ "$var" -gt 15 ]
#then :
#else
# echo $var
#fi
#touch text.txt
#
#echo "first" > text.txt # Rewrite all data in a file
#echo "second" >> text.txt # Add to file
#
#cat text.txt
#
#: > text.txt # clean up the file
#
#cat text.txt
#
#let pow=3**3 # two ** - pow
#echo $pow
#var=10 #ternar operator
#
#echo $(( var2 = var <= 10 ? 1 : 2 ))
#echo {1..8} # Range, loop
#var=10
#
#if [ "$var" -eq 5 ] || [ "$var" -ne 5 ] # rules combination, OR and AND
#then
# echo "Well"
#else
# echo "..."
#fi
| true
|
66005674f05480827cb4be8fbefe678774ba3e1d
|
Shell
|
jcoglan/restore-capistrano
|
/script/stop
|
UTF-8
| 133
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
pidfile="tmp/pids/restore.pid"
if [ -e "$pidfile" ]; then
start-stop-daemon --stop -p "$pidfile"
rm "$pidfile"
fi
| true
|
adbea4d5c906772f604cfef057da65e3d5e11a03
|
Shell
|
amsimoes/data-structures-algorithms
|
/TP2/time_small.sh
|
UTF-8
| 316
| 3.0625
| 3
|
[] |
no_license
|
#! /bin/bash
counter=0
counter2=0
while [ $counter -le 10000 ]; do
echo "$counter" >> "tempos_pequenos_$1.txt"
counter2=0
while [ $counter2 -lt 10 ]; do
./$1 $counter >> "tempos_pequenos_$1.txt"
let counter2=counter2+1
done
let counter=counter+500
echo "" >> "tempos_pequenos_$1.txt"
done
echo "DONE :-)"
| true
|
57147b2cbbdb989ae0c62296d54ffe87613afcab
|
Shell
|
BcTpe4HbIu/configs
|
/bin/pingwait
|
UTF-8
| 168
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
[ -z "$1" ] && exit 1
trap exit SIGINT
echo -n "Waiting for $1 "
while ! ping -c 1 -n -w 1 $1 &> /dev/null
do
echo -n .
done
echo
echo Server online
| true
|
4bcfd51d61466ccbf2f2b6e8ff03e90ac40ba248
|
Shell
|
grayyeargin/dotfiles-1
|
/.welcome_prompt.sh
|
UTF-8
| 918
| 3.859375
| 4
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#-------------------------------------------------------------------------------
# Welcome Prompt
# prints stats on terminal load
#-------------------------------------------------------------------------------
# welcome and unwelcome functions to toggle welcome_prompt are in .bash_prompt
WELCOME_PROMPT=true
welcome_msg() {
echo ${white}${bg_cyan} \
GRAY ${reset}${white}${bg_violet} \
YEARGIN ${reset}${white}${bg_orange} \
Brooklyn, NY ${reset}
echo "------------------------------------------"
echo $(git --version)
if which brew >/dev/null; then
echo $(brew -v)
fi
echo ${violet}${bg_cyan} \
NODE $(node --version)${reset}
echo ${reset}${white}${bg_violet} \
NPM $(npm --version)${reset}
echo "------------------------------------------"
echo "type ${BOLD}unwelcome${RESET} to remove this message"
}
if [[ $WELCOME_PROMPT == true ]]; then welcome_msg; fi
| true
|
b5b2be611b1713ff442f93f7286e0ad7f2c3f430
|
Shell
|
kartoteket/d3by5-map
|
/bin/build.sh
|
UTF-8
| 1,100
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
browserify docs/js/main.js > docs/bundle.js
# uglifyjs dist/d3by5.map.js -m -c > dist/d3by5.map.min.js
# PACKAGE_VERSION=$(cat package.json \
# | grep version \
# | head -1 \
# | awk -F: '{ print $2 }' \
# | sed 's/[",]//g' \
# | tr -d '[[:space:]]')
# echo 'Version is -'$PACKAGE_VERSION'-'
# build line chart script
# buildDist()
# {
# mkdir -p dist && (
# browserify src/js/d3by5-map.js > dist/d3by5.map.js &&
# uglifyjs dist/d3by5.map.js -m -c > dist/d3by5.map.min.js
# )
# }
# build demo
# buildDemo()
# {
# mkdir -p demo && (
# # css
# node-sass --output-style expanded src/scss -o src/css &
# postcss -u autoprefixer -o demo/css/main.css src/css/main.css &
# # js
# browserify src/js/main.js -o demo/js/main.js;
# # copy assets
# cp src/index.html demo/ &
# # mkdir -p demo/js/vendor && cp -a src/js/vendor/. demo/js/vendor/ &
# # mkdir -p demo/css/vendor && cp -a src/css/vendor/. demo/css/vendor/ &
# mkdir -p demo/data && cp -a src/data/. demo/data/
# )
# }
# buildDist
# buildExample
| true
|
29dcea0cfe18dae3a1fc6bed69f2176435448906
|
Shell
|
opendaylight/integration-distribution
|
/karaf-scripts/src/main/assembly/bin/set_persistence.sh
|
UTF-8
| 2,924
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (c) 2015 Brocade Communications Systems, Inc. and others. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
#
function usage()
{
# Print any error messages
test "$1" != "" && echo " ERROR: $1"
# Print standard usage help
cat << EOF
This script is used to enable or disable the config datastore
persistence. The default state is enabled. The user should
restart controller to apply changes. The script can be used
before starting controller for the first time.
Usage: $0 <on/off>
EOF
exit 1
}
function end_banner
{
cat <<EOF
################################################
## NOTE: Manually restart controller to ##
## apply configuration. ##
################################################
EOF
}
function get_cli_params
{
# Check if params have been supplied
test $# -eq 0 && usage
# First param is on/off
SWITCH="$1"
# Verify we only have 1 param
test $# -ne 1 && usage "Too many parameters"
}
function modify_conf_file
{
if [ "${SWITCH}" == "off" ]; then
echo "disabling config datastore persistence"
sed -i -e "s/^#persistent=true/persistent=false/" ${CLUSTERCONF}
elif [ "${SWITCH}" == "on" ]; then
echo "enabling config datastore persistence"
sed -i -e "s/^persistent=false/#persistent=true/" ${CLUSTERCONF}
else
usage "Allowed values are on/off"
fi
}
function verify_configuration_file
{
# Constants
BIN_DIR=`dirname $0`
test ${BIN_DIR} == '.' && BIN_DIR=${PWD}
CONTROLLER_DIR=`dirname ${BIN_DIR}`
CONF_DIR=${CONTROLLER_DIR}/etc
CLUSTERCONF=${CONF_DIR}/org.opendaylight.controller.cluster.datastore.cfg
# Verify configuration files are present in expected location.
if [ ! -f ${CLUSTERCONF} ]; then
# Check if the configuration files exist in the system
# directory, then copy them over.
ORIG_CONF_DIR=${CONTROLLER_DIR}/system/org/opendaylight/controller/sal-clustering-config
version=$(sed -n -e 's/.*<version>\(.*\)<\/version>/\1/p' ${ORIG_CONF_DIR}/maven-metadata-local.xml)
ORIG_CONF_DIR=${ORIG_CONF_DIR}/${version}
ORIG_CLUSTER_CONF=sal-clustering-config-${version}-datastore.cfg
if [ -f ${ORIG_CONF_DIR}/${ORIG_CLUSTER_CONF} ]; then
cat <<EOF
NOTE: Cluster configuration file not found. Copying from
${ORIG_CONF_DIR}
EOF
cp ${ORIG_CONF_DIR}/${ORIG_CLUSTER_CONF} ${CLUSTERCONF}
else
usage "Cluster configuration file not found"
fi
fi
}
function main
{
get_cli_params "$@"
verify_configuration_file
modify_conf_file
end_banner
}
main "$@"
# vim: ts=4 sw=4 sts=4 et ft=sh :
| true
|
bf7c71c593b7dc231b9e79da0ad7fc8121eedfb8
|
Shell
|
mk-pmb/phutility-160816-pmb
|
/web/windav/test/basics.mkcol.sh
|
UTF-8
| 1,126
| 2.734375
| 3
|
[
"ISC",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# -*- coding: utf-8, tab-width: 2 -*-
function test_main () {
local SELFPATH="$(readlink -m "$BASH_SOURCE"/..)"
local SELFNAME="$(basename "$BASH_SOURCE" .sh)"
cd "$SELFPATH" || return $?
source req.sh --lib || return $?
local DEMO_DIR= TESTNAME=
for DEMO_DIR in ../demo.*/; do
case "$DEMO_DIR" in
*.errdoc/ ) continue;;
esac
DEMO_DIR="$(basename "$DEMO_DIR")"
TESTNAME="$SELFNAME.${DEMO_DIR#demo.}"
req_test test_mkcol || return $?
done
return 0
}
function test_mkcol () {
BASEURL+='subdir/'
req HEAD foo
req MKCOL foo/bar
req MKCOL foo/bar +auth
req MKCOL foo +auth L=7 '' hello
req MKCOL foo +auth
req MKCOL foo/bar +auth
req MKCOL foo/bar +auth
req MKCOL foo/bar/ +auth
req DELETE foo +auth
req DELETE foo/ +auth
req DELETE foo/bar +auth
req DELETE foo/bar/ +auth
req DELETE foo/bar/ +auth
req DELETE foo +auth
req DELETE foo/ +auth
req DELETE foo/ +auth
req DELETE foo +auth
}
[ "$1" == --lib ] && return 0; test_main "$@"; exit $?
| true
|
2b13a9df455acafeb29e41c6eb3735fe4f1beb02
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/google-raiden-mod-git/PKGBUILD
|
UTF-8
| 728
| 2.75
| 3
|
[] |
no_license
|
pkgname=google-raiden-mod-git
pkgver=r4231.5dde472
pkgrel=1
pkgdesc='Kernel module for Google ChromeOS Suzy-Q programmator'
arch=(i686 x86_64)
url='https://chromium.googlesource.com/chromiumos/platform/ec/'
license=(BSD)
makedepends=(git)
install=raiden.install
source=(git+https://chromium.googlesource.com/chromiumos/platform/ec)
md5sums=('SKIP')
pkgver() {
cd ec
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd ec/extra/usb_serial
make
}
package() {
cd ec/extra/usb_serial
install -Dm644 51-google-serial.rules "$pkgdir/usr/lib/udev/rules.d/51-google-serial.rules"
install -Dm644 raiden.ko "$pkgdir/usr/lib/modules/$(uname -r)/kernel/drivers/google/raiden.ko"
}
| true
|
cb91950125d46b107a84f8f2ab7358df41713a2a
|
Shell
|
Backup-Gits/0019-scripts
|
/build/4.0.0/pkg/script/script.pkg.d/script/setqblogin
|
UTF-8
| 229
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#set -x
mkdir /etc/.ssh
cd /tmp
tar xfC /tmp/qblogin.tar .
cp -a /tmp/qblogin/* /etc/.ssh/
if [ ! -d /root/.ssh ];then
mkdir /root/.ssh
fi
cat /etc/.ssh/qlogin.pub >> /root/.ssh/authorized_keys
rm -rf /tmp/qblogin*
| true
|
a8c669848f804ad97b1f6687c6a8f8c64ddbf18b
|
Shell
|
carlosdoe/scripts
|
/mpv/mpvsocket
|
UTF-8
| 152
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
pid=`pgrep -f "$1"`
for s in $pid; do
for i in /tmp/mpvSockets/$s ; do echo '{"command":["keypress", "'$2'" ]}' | socat - $i; done
done
| true
|
04ea2a0c8c6d51f326dd7cf8d8c210d46daddb83
|
Shell
|
hlaineka/42sh
|
/create_large_file
|
UTF-8
| 90
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
for i in {1..1055}
do
echo "echo Welcome $i times" >> ~/.42sh_history
done
| true
|
82323fa60a529dc83698f549dfa0f156b5714f87
|
Shell
|
lvadla/dotfiles
|
/scripts/multi-window-space-yabai-border.sh
|
UTF-8
| 193
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
windows="$(yabai -m query --spaces --space | jq '.windows')"
if [[ $windows == *","* ]]
then
yabai -m config window_border on
else
yabai -m config window_border off
fi
| true
|
71c752979ca0fb49b55925cd9b7372fb32fe0922
|
Shell
|
AndreiBarsan/dotfiles
|
/bin/unfuck-cuda
|
UTF-8
| 1,447
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eu
echo
echo
echo "Unfucking nVidia drivers and CUDA."
echo "You should run this from your TTY."
echo "Hold on to your b-hole!!!"
echo "SIT YO ASS DOWN!!! This script needs input every now and then."
echo
echo
sudo apt-get update || exit 1
sudo apt-get purge nvidia-cuda* || exit 2
# sudo apt-get purge nvidia-* || exit 3
# Stop lightdm if it is running.
if ! hash lightdm >/dev/null 2>&1; then
sudo service lightdm stop || exit 4
fi
# echo "Using 14.04 Ubuntu package-based setup (i.e., NOT runfile-based)"
# echo "This is what the CUDA docs recommend!"
# DEB_FILE="cuda-repo-ubuntu1404-8-0-local-ga2_8.0.61-1_amd64-deb"
# if ! [[ -f "/tmp/$DEB_FILE" ]]; then
# cd /tmp && \
# wget "https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/$DEB_FILE"
# fi
# sudo dpkg -i "/tmp/$DEB_FILE" || exit 101
# sudo apt-get update
# sudo apt-get install cuda || exit 102
# echo "Exiting before runfile crap for dev purposes."
# exit
RUNFILE="cuda_8.0.61_375.26_linux-run"
if ! [[ -f "/tmp/$RUNFILE" ]]; then
(cd /tmp &&
wget "https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/$RUNFILE")
fi
# WARNING: Make sure you double check (Google it!) whether you need to also
# install the OpenGL libraries! If you force install them when it's not
# necessary, it's possibly to fuck over your computer (or, well, prevent it
# from being un-fucked).
sudo sh "/tmp/$RUNFILE" --override
| true
|
0ccc7146cf302d6dc0cd1476b0ba39836e93b633
|
Shell
|
jonathangreen/Simplified-Android-Core
|
/.travis-git-props.sh
|
UTF-8
| 283
| 2.671875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
GP_GIT_COMMIT=$(git rev-list --max-count=1 HEAD) || exit 1
SIMPLYE_VERSION=$(grep versionCode simplified-app-vanilla/version.properties | sed 's/versionCode=//g') || exit 1
cat <<EOF
git.commit=${GP_GIT_COMMIT}
version=${SIMPLYE_VERSION}
build=${TRAVIS_BUILD_NUMBER}
EOF
| true
|
dfbcddaa59269c5ef6ac0ec37edc6e60073d9414
|
Shell
|
mieko/cult
|
/skel/roles/bootstrap/files/cult-motd
|
UTF-8
| 1,537
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
NODE=<%= node.name.sq %>
PROVIDER=<%= node.provider.name.sq %>
ZONE=<%= node.zone.sq %>
ROLES=<%= node.build_order.map(&:name).join(' ').sq %>
CREATED_AT=<%= Time.now.to_s.sq %>
SIZE=<%= node.size.sq %>
IMAGE=<%= node.image.sq %>
IPV4_PUBLIC=<%= node.ipv4_public.sq %>
IPV4_PRIVATE=<%= node.ipv4_private.sq %>
LEADER_FILE=~cult/cult/leader-of
colorize() {
i=124
d=1
while IFS='' read -r LINE ; do
printf "\e[38;5;${i}m$LINE\e[0m\n"
i=$(($i + $d))
if [ $i -eq 124 ] ; then
d=1
elif [ $i -eq 135 ] ; then
d=-1
fi
done
}
inf() {
TEXT=$(echo "$1" | fold -s -w 78 | fmt -s -c -w 78)
printf '\e[38;5;8m%s\e[0m\n' "$TEXT"
}
# We don't want everyone to have to install figlet.
cat <<EOD | colorize
@@@@@@@ @@@ @@@ @@@ @@@@@@@
@@@@@@@@ @@@ @@@ @@@ @@@@@@@
!@@ @@! @@@ @@! @@!
!@! !@! @!@ !@! !@!
!@! @!@ !@! @!! @!!
!!! !@! !!! !!! !!!
:!! !!: !!! !!: !!:
:!: :!: !:! :!: :!:
::: ::: ::::: :: :: :::: ::
:: :: : : : : : :: : : :
EOD
inf "node: $NODE@$PROVIDER/$ZONE:$SIZE/$IMAGE"
inf "addr: $IPV4_PUBLIC, $IPV4_PRIVATE"
inf "created: $CREATED_AT"
inf "roles: $ROLES"
[ -f "$LEADER_FILE" ] && inf "leader: $(cat "$LEADER_FILE")"
echo
| true
|
d1703d8684202586cbfff6faa32feea4320d32b9
|
Shell
|
LostKobrakai/valet
|
/install.sh
|
UTF-8
| 1,456
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Install Homebrew for dependency management
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew install wget > /dev/null 2>&1
# Install PHP 7.0
brew tap homebrew/dupes
brew tap homebrew/versions
brew tap homebrew/homebrew-php
brew unlink php56 > /dev/null 2>&1
brew install php70
# Install Composer to /usr/local/bin
/usr/local/bin/php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
/usr/local/bin/php -r "if (hash_file('SHA384', 'composer-setup.php') === '92102166af5abdb03f49ce52a40591073a7b859a86e8ff13338cf7db58a19f7844fbc0bb79b2773bf30791e935dbd938') { echo 'Installer verified'; } else { echo 'Installer corrupt'; unlink('composer-setup.php'); } echo PHP_EOL;"
/usr/local/bin/php composer-setup.php
/usr/local/bin/php -r "unlink('composer-setup.php');"
mv composer.phar /usr/local/bin/composer
chmod +x /usr/local/bin/composer
# Download and unpack the latest Valet release
rm -rf $HOME/.valet-cli
mkdir $HOME/.valet-cli
wget https://github.com/laravel/valet/archive/v1.1.3.tar.gz -O $HOME/.valet-cli/valet.tar.gz
tar xvzf $HOME/.valet-cli/valet.tar.gz -C $HOME/.valet-cli --strip 1 > /dev/null 2>&1
ln -s $HOME/.valet-cli/valet /usr/local/bin/valet
chmod +x /usr/local/bin/valet
# Install Valet's Composer dependencies
/usr/local/bin/php composer install -d $HOME/.valet-cli
# Run the Valet installation process
$HOME/.valet-cli/valet install
| true
|
6627807115150e3f6a79d523c783a50f190bce96
|
Shell
|
canonical/charmcraft
|
/completion.bash
|
UTF-8
| 5,755
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- mode: sh; sh-shell: bash; -*-
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
_charmcraft()
{
local cur prev words cword cmd cmds
cmds=(
analyze
clean
close
create-lib
fetch-lib
init
list-lib
login
logout
names
pack
promote-bundle
publish-lib
register
register-bundle
unregister
release
resource-revisions
resources
revisions
status
upload
upload-resource
version
whoami
)
_init_completion || return
# only offer long options, as they should be self-explanatory (and
# it's not like it's more typing for the user)
globals=(--help --verbose --quiet --project-dir)
# if user just wrote --project-dir, only offer directories
if [ "$prev" = "--project-dir" ] || [ "$prev" = "-p" ]; then
_filedir -d
return
fi
# check if any of the words is a command: if yes, offer the options for that
# command (and the global ones), else offer the commands and global options
local w c
for w in "${words[@]}"; do
for c in "${cmds[@]}"; do
if [ "$c" = "$w" ]; then
cmd="$c"
break
fi
done
if [ "$cmd" ]; then
break
fi
done
if [ -z "$cmd" ]; then
# no command yet! show global options and the commands
COMPREPLY=( $(compgen -W "${globals[*]} ${cmds[*]}" -- "$cur") )
return
fi
# offer the options for the given command (and global ones, always available)
case "$cmd" in
analyze)
COMPREPLY=( $(compgen -W "${globals[*]} --force --format" -- "$cur") )
;;
login)
case "$prev" in
--export)
_filedir
;;
*)
COMPREPLY=( $(compgen -W "${globals[*]} --export --charm --bundle --permission --channel --ttl" -- "$cur") )
;;
esac
;;
pack)
COMPREPLY=( $(compgen -W "${globals[*]} --force --format" -- "$cur") )
;;
promote-bundle)
case "$prev" in
--output-bundle)
_filedir
;;
--exclude)
# TODO: This should contain a list of charms in the appropriate bundle.yaml file
;;
*edge*)
COMPREPLY=( $(compgen -W "$(echo $prev | sed s/edge/beta/) $(echo $prev | sed s/edge/candidate/) $(echo $prev | sed s/edge/stable/)" -- "$cur") )
;;
*beta*)
COMPREPLY=( $(compgen -W "$(echo $prev | sed s/beta/candidate/) $(echo $prev | sed s/beta/stable/)" -- "$cur") )
;;
*candidate*)
COMPREPLY=( $(compgen -W "$(echo $prev | sed s/candidate/stable/)" -- "$cur") )
;;
*)
COMPREPLY=( $(compgen -W "${globals[*]} --output-bundle --exclude latest/edge latest/beta latest/candidate latest/stable" -- "$cur") )
;;
esac
;;
release)
COMPREPLY=( $(compgen -W "${globals[*]} --revision --channel --resource" -- "$cur") )
;;
init)
COMPREPLY=( $(compgen -W "${globals[*]} --name --author --force --profile" -- "$cur") )
;;
upload)
COMPREPLY=( $(compgen -W "${globals[*]} --release --resource --format" -- "$cur") )
;;
upload-resource)
case "$prev" in
--filepath)
_filedir
;;
*)
COMPREPLY=( $(compgen -W "${globals[*]} --filepath --image" -- "$cur") )
;;
esac
;;
version)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
whoami)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
names)
COMPREPLY=( $(compgen -W "${globals[*]} --format --include-collaborations" -- "$cur") )
;;
revisions)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
status)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
create-lib)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
publish-lib)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
fetch-lib)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
list-lib)
COMPREPLY=( $(compgen -W "${globals[*]} --format" -- "$cur") )
;;
*)
# by default just the global options
COMPREPLY=( $(compgen -W "${globals[*]}" -- "$cur") )
;;
esac
}
complete -F _charmcraft charmcraft
| true
|
adcac3d6b5941fa0a0bbe64c7141047d2bfbf694
|
Shell
|
SylvainRX/WDMyCloud-MediaSorter
|
/sortmedia_scripts/sortmedias.sh
|
UTF-8
| 3,254
| 4
| 4
|
[
"Apache-2.0"
] |
permissive
|
# !/bin/bash
# sortmedias.sh catches files and directory uploaded and moved into
# PATH_REPOSITORY and send them individually to sortmediafile.sh, which must
# be located in the same directory.
# Execute 'incrontab -e' in your terminal then add one line in the opened
# file to put in the content of the following quotation and then save.
# '/path/to/your/repository IN_CREATE,IN_MOVED_TO,IN_ISDIR /path/to/this/script/sortmedias.sh $# $@ $% $&'
# The 4 following variable need to be set for the script to run properly.
PATH_TVSHOWS='/shares/YourShare/TV_Shows'
PATH_MOVIES='/shares/YourShare/Movies'
PATH_LOG='/shares/YourShare/repository/.log'
PATH_TRASH='/shares/YourShare/repository/.trash'
PATH_REPOSITORY="$2"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TIMEOUT=60
shopt -s nocaseglob
[[ $1 == .* ]] && exit 1
[[ $1 == *.torrent ]] && exit 1
[[ $1 == *.part ]] && exit 1
[[ $1 == *__?????? ]] && exit 1
[[ ! "$1" ]] && exit 1
echo "($$) $(date +%Y-%m-%d\ %H:%M:%S) file: $1 repository: $2 event: $3($4)" >> "${PATH_LOG}/sort.log"
#Handle directory
if [[ $3 == *IN_ISDIR* ]]; then
DIRNAME=$1
SUCCESS=1
#Wait if files are downloading
while [[ $(inotifywait -c -t 2 -e modify "$PATH_REPOSITORY/$DIRNAME" ) == *MODIFY* ]]; do
NOTIF=$(inotifywait -c -t $TIMEOUT -e close_write "${PATH_REPOSITORY}"/"${DIRNAME}")
if [[ "$NOTIF" == *CLOSE_WRITE* ]]; then
FILENAME=$(echo $NOTIF | sed -r 's/.*,\".*\",//g')
"$DIR"/sortmediafile.sh "$DIRNAME/$FILENAME" "$PATH_REPOSITORY" "$PATH_TVSHOWS" "$PATH_MOVIES" "$PATH_LOG" "$$"
if [ $? -eq 1 ]; then SUCCESS=0; fi
fi
done
#Check for existing files in the directory
if ls "$PATH_REPOSITORY/$DIRNAME/"* 1> /dev/null 2>&1; then
for FILENAME in "$PATH_REPOSITORY/$DIRNAME/"*; do
FILENAME=$(echo $FILENAME | sed -e 's/.*\///g')
"$DIR"/sortmediafile.sh "$DIRNAME/$FILENAME" "$PATH_REPOSITORY" "$PATH_TVSHOWS" "$PATH_MOVIES" "$PATH_LOG" "$$"
if [ $? -eq 1 ]; then SUCCESS=0; fi
done
fi
#Trash the directory
if [[ ! $SUCCESS -eq 0 ]]; then
echo "($$) TRASHING : $PATH_REPOSITORY/$DIRNAME" >> "${PATH_LOG}/sort.log"
mv "$PATH_REPOSITORY/$DIRNAME" "$PATH_TRASH"
chmod 777 -R "$PATH_TRASH/$DIRNAME"
fi
#Handle single file
else
FILENAME=$1
#Wait if the file is downloading
while [[ $(inotifywait -c -t 2 -e modify "$PATH_REPOSITORY/$FILENAME" ) == *MODIFY* ]]; do
NOTIF=$(inotifywait -c -t $TIMEOUT -e close_write "$PATH_REPOSITORY/$FILENAME")
if [[ "$NOTIF" == *CLOSE_WRITE* ]]; then break; fi
done
#when transfering a bunch of file at a time from the MacOS Finder to the mycloud
#some are created then closed without any data written into them and then recreated
#to be fully written. The following test prevent to sort those empty files.
FILESIZE=$(wc -c <"$PATH_REPOSITORY/$FILENAME")
if [[ $FILESIZE -eq 0 ]]; then
echo "($$) ERROR : EMPTY FILE" >> "$PATH_LOG/sort.log"
echo "($$) TRASHING : $PATH_REPOSITORY/$FILENAME" >> "${PATH_LOG}/sort.log"
mv "$PATH_REPOSITORY/$FILENAME" "$PATH_TRASH"
chmod 777 "$PATH_TRASH/$FILENAME"
echo "($$) FAILED" >> "$PATH_LOG/sort.log"
exit 1
fi
"$DIR"/sortmediafile.sh "$FILENAME" "$PATH_REPOSITORY" "$PATH_TVSHOWS" "$PATH_MOVIES" "$PATH_LOG" "$$"
fi
echo "($$) DONE" >> "$PATH_LOG/sort.log"
| true
|
80424385bd9fda39e8d9605b132894d7552b7647
|
Shell
|
tkln/namubufferi
|
/bin/setup
|
UTF-8
| 1,817
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Requires Python and pip to be installed
# You must have postgresql installed.
# https://devcenter.heroku.com/articles/heroku-postgresql#local-setup
dropdb namubufferi-local-test
createdb namubufferi-local-test
# Get heroku config vars.
# https://devcenter.heroku.com/articles/heroku-local#set-up-your-local-environment-variables
heroku config:get SENDGRID_PASSWORD -s >> .env
heroku config:get SENDGRID_USERNAME -s >> .env
# Create a clean venv (install virtualenv if needed)
pip install virtualenv
virtualenv venv --clear
# Install requirements in the venv
source venv/bin/activate
pip install --upgrade pip
pip install -r requirements.txt
# Apply database migrations
python manage.py migrate
# Add autogenerated data
python manage.py loadtestdata namubufferiapp.Category:5
python manage.py loadtestdata namubufferiapp.Product:30
# Create admin account
echo "___________.__ __ __ .__ __ .__ "
echo "\__ ___/|__| _____/ |_ _____| | _______ _______|__| |__| ____ ____ ____ |__|"
echo " | | | |/ __ \ __\/ ___/ |/ /\__ \\_ __ \ | | |/ __ \ / \ / ___\| |"
echo " | | | \ ___/| | \___ \| < / __ \| | \/ | | \ ___/| | \/ /_/ > |"
echo " |____| |__|\___ >__| /____ >__|_ \(____ /__| |__/\__| |\___ >___| /\___ /|__|"
echo " \/ \/ \/ \/ \______| \/ \//_____/ "
echo ""
echo ""
echo "Create an admin account for namupankki..."
python manage.py createsuperuser
# Good to go.
echo "You can now login to http://localhost:5000/admin with your superuser to add test stuff to namupankki."
echo "Build complete. Run server locally with 'heroku local'"
read -n1 -r -p "Press any key to start the server..."
./bin/run
| true
|
c8e04094530317a864627b3171bd39bdb6487c44
|
Shell
|
x00158589/mqtt
|
/rooms1.sh
|
UTF-8
| 2,910
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
rooms=()
pirs=()
alarms=()
col_no="\033[0m"
col_green="\033[1;32m"
col_red="\033[1;31m"
STATUS='INITIATE'
beep () {
echo -en "\aPIR1:\033[1;32m OK \033[0m"
sleep 0.5
echo -e "\e[s\033[1K\rPIR1:\e[u"
}
print_line() {
char=$1
n=$2
printf "%0.s${char}" $(seq 1 $n)
}
#declare -g rooms
#declare -g msg_string
mosquitto_sub -v -t '#' | while read mqtt_msg
do IFS='/' read -r -a msg_string <<< "$mqtt_msg"
#room=$(echo "${msg_string[2]}")
house_name="${msg_string[1]}"
room="${msg_string[2]}"
IFS=' ' read -a device_status <<< "${msg_string[3]}"
status="${device_status[1]}"
device="${device_status[0]}"
if [[ ! ${rooms[*]} =~ "$room" ]]; then
if [ $STATUS = $status ] ; then
rooms=("${rooms[@]}" "$room")
pirs=("${pirs[@]}" "OFF")
alarms=("${alarms[@]}" "OFF")
fi
# echo "There is no such a room!"
fi
#if [[ ! ${rooms[*]} =~ "$room" ]]; then
#while ! [[ $answer =~ ^(''|y|yes|Y|Yes|n|N|no|No)$ ]]
#do
# printf "Add a new room \"$room\"?[Y/n]:"
# read answer
#done
#if [[ $answer = [yY] || $answer == [yY][eE][sS] || $answer = '' ]]; then
#########################################rooms=("${rooms[@]}" "$room")
# echo "All rooms:${rooms[@]}" "The last room: " $room
#fi
# echo "There is no such a room"
#fi
#for i in "${rooms[@]}"
#do
# echo $i
# done
echo -n "+"; print_line "-" 50; echo "+"
n=$((49-${#house_name}))
echo -n "|" $house_name; print_line " " $n; echo "|"
echo -n "+"; print_line "-" 50; echo "+"
i=0
for room_name in "${rooms[@]}"
do
if [ "${pirs[$i]}" = "OFF" ]; then PIRSTAT="0;32m"; else PIRSTAT="0;31m"; fi
if [ "${alarms[$i]}" = "OFF" ]; then alarmstat="0;32m"; else alarmstat="0;31m"; fi
echo -e -n "|" $room_name
n=$((17-${#room_name})); print_line " " $n
printf "| PIR: \e[${PIRSTAT}%-7s\e[0m | Alarm: \e[${alarmstat}%-7s\e[0m |\n" ${pirs[$i]} ${alarms[$i]}
#echo -e $room_name $pir_status $alarm_status
((i++))
# PIR=$(echo $json_string | jq -r ".rooms[$i].PIR")
# alarm=$(echo $json_string | jq -r ".rooms[$i].Alarm")
#
# if [[ "$PIR" = "OFF" ]]; then PIRSTAT="0;32m"; else PIRSTAT="0;31m"; fi
# if [[ "$alarm" = "OFF" ]]; then alarmstat="0;32m"; else alarmstat="1;31m"; fi
# if [[ "$alarm" = "ACTIVE" ]]; then alarmstat="1;31m"
# printf "| PIR %d: \e[${PIRSTAT}%-7s\e[0m | Alarm %d: \e[${alarmstat}%-7s\e[0m |\r\a" $room $PIR $room $alarm
# sleep 0.5
# alarmstat="K"
# printf "\a"
# fi
#
# printf "| PIR %d: \e[${PIRSTAT}%-7s\e[0m | Alarm %d: \e[${alarmstat}%-7s\e[0m |\n" $room $PIR $room $alarm
#
# ((i++))
done
echo -n "+"; print_line "-" 50; echo "+"
((i+=4))
printf "\033[${i}A%s\r"
done
#rooms=()
#while read mqtt_msg
#do
# rooms=("${rooms[@]}" "$mqtt_msg")
# echo "${rooms[@]}"
# for i in "${rooms[@]}"
# do
# echo $i $room
# done
#done
| true
|
55de58171916503cd84a7a87ffc1347a4a7119f4
|
Shell
|
yinhuochong/app-env-docker
|
/src/openrasp-buildenv/panel/build-panel.sh
|
UTF-8
| 283
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
dest=/tmp/openrasp
if ! [[ -d "$dest" ]]; then
git clone -b 1.0rc1 https://github.com/baidu/openrasp.git "$dest"
fi
cd "$dest"
# rm -rf /tmp/openrasp/rasp-vue/node_modules/
ln -sf /root/node_modules/ /tmp/openrasp/rasp-vue/
bash /tmp/openrasp/build-cloud.sh
| true
|
8967f384fda194e38834f31b206dadb4b74a8ad4
|
Shell
|
hspin/dotfiles
|
/bin/gfind
|
UTF-8
| 182
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -z "$@" ]]; then
echo >&2 "You must supply an argument!"
exit 1
fi
#find . -type f -name $1
find . 2>/dev/null -iname "*$1*" | grep -i --color=auto $1
| true
|
0cf85401d43dabd82ae5c2e6f37be586ddd05c37
|
Shell
|
ezyatev/configure-proxy
|
/configure-proxy.sh
|
UTF-8
| 1,416
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# configure proxy
function proxy_on(){
# assumes $PROXY_SERVER, $PROXY_PORT
# are existing environment variables
export ALL_PROXY="socks5://$PROXY_SERVER:$PROXY_PORT"
export SOCKS_PROXY=$ALL_PROXY
export NO_PROXY="localhost,127.0.0.0/8"
gsettings set org.gnome.system.proxy mode 'manual'
gsettings set org.gnome.system.proxy ignore-hosts "['localhost', '127.0.0.0/8']"
gsettings set org.gnome.system.proxy.socks host $PROXY_SERVER
gsettings set org.gnome.system.proxy.socks port $PROXY_PORT
env | grep -i _proxy | sort
echo -e "\nProxy-related environment variables set."
gsettings list-recursively org.gnome.system.proxy
echo -e "\nProxy-related DConf settings set."
# clear
}
# Disable proxy settings
function proxy_off(){
variables=( \
"ALL_PROXY" "SOCKS_PROXY" "NO_PROXY" \
)
for i in "${variables[@]}"
do
unset $i
done
gsettings set org.gnome.system.proxy mode 'none'
gsettings set org.gnome.system.proxy ignore-hosts "['localhost', '127.0.0.1']"
gsettings set org.gnome.system.proxy.socks host ''
gsettings set org.gnome.system.proxy.socks port 0
env | grep -i _proxy | sort
echo -e "\nProxy-related environment variables removed."
gsettings list-recursively org.gnome.system.proxy
echo -e "\nProxy-related DConf settings removed."
}
# Enable proxy settings immediately
proxy_on
| true
|
761b50f931d3f77c5fff0372faa4752918692ff9
|
Shell
|
bdo311/chirpseq-analysis
|
/trim/trim_noadapter.sh
|
UTF-8
| 155
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
x=${1%.*}
basename=${x##*/}
fastq_trimmed=${basename}_trimmed.fastq
cat $1 | fastx_trimmer -f7 -Q33 | fastq_quality_filter -Q33 -q25 -p80 > $fastq_trimmed
| true
|
3e18498a5b3964b3c593eb464661d060e3f97fbc
|
Shell
|
amacmillanparks/hbase-alpine
|
/fix_ingestion_type_2_data.sh
|
UTF-8
| 683
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# Go to HBase bin directory
cd /opt/hbase/bin
# Get all the rows in HBase shell and save them to a text file
echo "scan 'clerical_matching:matching_task'" | ./hbase shell > scan_output
# 1. Remove rows from the list that do not contain a json document
# 2. Remove rows from the list that contain a residents element
# 3. Only keep the rowkeys of the remaining rows
# 4. Map these rowkeys into an HBase deleteall command
sed '/.*value={.*/!d; /.*"residents".*/d' scan_output | cut -d ' ' -f2 | sed "s/\(.*\)/deleteall 'clerical_matching:matching_task', '\1'/" > row_delete_commands
# Execute the delete commands in HBase shell
cat row_delete_commands | ./hbase shell
| true
|
c6296e195fe26e1fdff5c00187a1633b614e91f1
|
Shell
|
adarshnin/git-test
|
/codes/prog-4.sh
|
UTF-8
| 199
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Enter the radius of circle " r
pi=3.14
area=$(echo "$pi*$r*$r" | bc)
echo "Area of circle = $area"
circumf=$(echo "2*$pi*$r" | bc)
echo "Circumference of circle = $circumf"
| true
|
e0290c72076cdce66f140b920a58d8852c90b4fc
|
Shell
|
stiletto/nyatools
|
/sbin/version
|
UTF-8
| 770
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
case "$1" in
-a)
for pak in `ls /var/log/packages/ 2> /dev/null`
do
PAK_NAME=`cat /var/log/packages/${pak} | grep "NAME:" | cut -d ":" -f2`
PAK_VERS=`cat /var/log/packages/${pak} | grep "VERSION:" | cut -d ":" -f2`
echo "${PAK_NAME} : ${PAK_VERS}"
done
;;
-l)
ls /var/log/packages/
;;
-c)
ls /var/log/packages/ | wc -l
;;
*)
if [ "$1" != "" ]
then
while [ "$1" != "" ]
do
for pak in `find /var/log/packages/*$1* 2> /dev/null | cut -d "/" -f5`
do
PAK_NAME=`cat /var/log/packages/${pak} | grep "NAME:" | cut -d ":" -f2`
PAK_VERS=`cat /var/log/packages/${pak} | grep "VERSION:" | cut -d ":" -f2`
echo "${PAK_NAME} : ${PAK_VERS}"
done
shift
done
else
echo ">_>"
fi
;;
esac
| true
|
fa9456dc2239d8349ff9ea49fb82ff408d256a4d
|
Shell
|
nfnty/pkgbuilds
|
/prometheus/prometheus-exporter-node/PKGBUILD
|
UTF-8
| 1,158
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
# shellcheck disable=SC2034,SC2154,SC2164
pkgname=('prometheus-exporter-node')
_srcname='node_exporter'
pkgdesc='Prometheus exporter for machine metrics'
pkgver='0.18.1'
_commit='3db77732e925c08f675d7404a8c46466b2ece83e'
pkgrel='1'
arch=('i686' 'x86_64')
url="https://github.com/prometheus/${_srcname}"
license=('Apache')
makedepends=('git' 'go')
source=(
"${_srcname}::git+${url}.git#commit=${_commit}"
'prometheus-exporter-node.service'
)
sha512sums=(
'SKIP'
'SKIP'
)
install='install.sh'
_url_go="${url#*//}"
prepare() {
cd "${srcdir}"
export GOPATH="${srcdir}/gopath"
mkdir --parents "${GOPATH}/src/${_url_go}"
rm --recursive "${GOPATH}/src/${_url_go}"
mv "${_srcname}" "${GOPATH}/src/${_url_go}"
}
build() {
cd "${srcdir}"
export GOPATH="${srcdir}/gopath"
cd "${GOPATH}/src/${_url_go}"
make build
}
package() {
cd "${srcdir}"
export GOPATH="${srcdir}/gopath"
install -D "${GOPATH}/src/${_url_go}/node_exporter" "${pkgdir}/usr/bin/prometheus-exporter-node"
install -D --mode='644' "${srcdir}/prometheus-exporter-node.service" \
"${pkgdir}/usr/lib/systemd/system/prometheus-exporter-node.service"
}
| true
|
fc3d7d2e23fefcf29b9d6904faace215b6f3947f
|
Shell
|
nikonji/random1_10
|
/random1_10.sh
|
UTF-8
| 216
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
###############
echo ""
echo "Welcome to Adjust_Task1"
echo 'This is a script that writes the numbers from 1 - 10 in random order.'
echo ""
shuf -i 1-10
echo ""
echo "Enjoy your day!"
echo ""
| true
|
40b38eaf4273a5e7c488a4c94cf01717f2c009ed
|
Shell
|
whj0401/numopt_preprocess
|
/klee_output/run_klee.sh
|
UTF-8
| 207
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
bc_file=$1
#KLEE_DIR=/home/whj/klee
KLEE_DIR=/home/whj/myself_klee/klee
$KLEE_DIR/Release+Asserts/bin/klee -check-div-zero=0 -check-overshift=0 $bc_file
mv expression.txt ${bc_file%.bc}.expr
| true
|
26f00cd385eade9e61819e7349c40808f3555ce0
|
Shell
|
davidneu/docker-nginx-ssl-reverse-proxy-example
|
/myapp_nginx_ssl_reverse_proxy
|
UTF-8
| 1,089
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -o errexit
set -o nounset
usage="Usage: myapp_nginx_ssl_reverse_proxy {build | up | down | clean}"
if [ $# -eq 1 ]; then
cmd=$1
case $cmd in
# myapp_nginx_ssl_reverse_proxy build
build )
echo 'build ...'
docker build --force-rm -t myapp_nginx_ssl_reverse_proxy_image:latest .
;;
# myapp_nginx_ssl_reverse_proxy up
up )
echo 'up ...'
docker run -d --rm --name myapp_nginx_ssl_reverse_proxy_container -p 443:443 -p 80:80 --network host myapp_nginx_ssl_reverse_proxy_image:latest
;;
# myapp_nginx_ssl_reverse_proxy down
down )
echo 'down ...'
docker container stop myapp_nginx_ssl_reverse_proxy_container
;;
# myapp_nginx_ssl_reverse_proxy clean
clean )
echo 'clean ...'
set +e
docker container stop myapp_nginx_ssl_reverse_proxy_container
docker container rm myapp_nginx_ssl_reverse_proxy_container --volumes
docker image rm myapp_nginx_ssl_reverse_proxy_image:latest
;;
* )
echo $usage
;;
esac
else
echo $usage
fi
| true
|
c5d61da6ece9be21e82b5335a002d7fefcf05cd0
|
Shell
|
Ralitsa-Vuntsova/os
|
/labs/Shell/05/05-b-2000.sh
|
UTF-8
| 72
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Please, enter string: " VAR
echo "Hello, ${VAR}!"
| true
|
7ffa8b28c67b56cd86e08bc196adc28f2c16ca9e
|
Shell
|
jaredballou/linuxgsm
|
/functions/install_ut2k4_key.sh
|
UTF-8
| 514
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# LGSM install_ut2k4_key.sh function
# Author: Daniel Gibbs
# Website: http://gameservermanagers.com
echo ""
echo "Enter ${gamename} CD Key"
echo "================================="
sleep 1
echo "To get your server listed on the Master Server list"
echo "you must get a free CD key. Get a key here:"
echo "http://www.unrealtournament.com/ut2004server/cdkey.php"
echo ""
echo "Once you have the key enter it below"
echo -n "KEY: "
read CODE
echo ""\""CDKey"\""="\""${CODE}"\""" > "${systemdir}/cdkey"
echo ""
| true
|
59de7d6b328a87f1e48f576e2d7e744c2cdbbe9f
|
Shell
|
KrasnitzLab/NormaFlyze
|
/scripts/map_paired_fastq.sh
|
UTF-8
| 1,452
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
#$ -q "all.q@wigclust1[7-9]","all.q@wigclust2[0-4]"
#$ -pe threads 2
#$ -l vf=8G
#$ -o /mnt/wigclust1/data/safe/kostic/output/output_paired_real.out -j y
export BOWTIE_PATH=/mnt/wigclust1/data/safe/kostic/bowtie-1.2.1.1
export DATA_PATH=/mnt/wigclust1/data/safe/kostic/cut_sites
export RESULTS_PATH=/mnt/wigclust1/data/safe/kostic/bin_mapping
export BOWTIE_INDEXES=$BOWTIE_PATH/indexes
cd $DATA_PATH
# see manual for more: http://bowtie-bio.sourceforge.net/manual.shtml#the--n-alignment-mode
# -q for fastq formatted read file
# -S causes the alignments to be printed in SAM format
# -t outputs time for each phase
# -p <int> launches parallel search threads
# -n <int1> -e <int2> allows int1 mismatches in the seed and a sum of int2 for quality scores of mismatches at any other location
# -m 1 for unique alignments
#paired-end read options
# -1 and -2 for paired end read files
# -X <int> gives the maximum insert size for paired end read alignment (default 250)
# -I <int> gives the minimum insert size for paired end read alignment (default 0)
# --allow-contain allows mapping of paired end reads that overlap
#bowtie [options] <index base name> <-1 fastq1 -2 fastq2> <output file>
$BOWTIE_PATH/bowtie -q -S -t -p 2 -n 2 -e 200 --chunkmbs 256 -m 1 -X 800 --best --strata --allow-contain hybrid_index -1 $DATA_PATH/CATG_paired_150bp_07_13_fastq_1.fq -2 $DATA_PATH/CATG_paired_150bp_07_13_fastq_2.fq $RESULTS_PATH/paired_7_13.sam
| true
|
c477eab9d1dec84499aa4d8912ebc00df540a5a6
|
Shell
|
arthurgeek/dotfiles
|
/macos/security.sh
|
UTF-8
| 1,179
| 2.53125
| 3
|
[] |
no_license
|
# Based on:
# https://github.com/drduh/macOS-Security-and-Privacy-Guide
# https://benchmarks.cisecurity.org/tools2/osx/CIS_Apple_OSX_10.12_Benchmark_v1.0.0.pdf
# Enable firewall. Possible values:
# 0 = off
# 1 = on for specific sevices
# 2 = on for essential services
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate on
# Enable firewall logging
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setloggingmode on
# Enable stealth mode
sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setstealthmode on
# Do not show password hints
sudo defaults write /Library/Preferences/com.apple.loginwindow RetriesUntilHint -int 0
# Disable remote login
sudo systemsetup -setremotelogin off
# Disable guest account login
sudo defaults write /Library/Preferences/com.apple.loginwindow GuestEnabled -bool false
# Automatically lock the login keychain for inactivity after 6 hours
security set-keychain-settings -t 21600 -l ~/Library/Keychains/login.keychain
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
| true
|
c3575b599f7c426998e6139417d7978c23aa17ff
|
Shell
|
rnabioinfor/TRAC-Seq
|
/ARM-Seq/TestRun.bash
|
UTF-8
| 1,111
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#Remove adapters from small RNA sequencing studies
echo "Removing sequencing adapters from reads"
cutadapt -m 15 --adapter='TCGTATGCCGTCTTCT' SRR029131.fastq | gzip -c >SRR029131_trimmed.fastq.gz
cutadapt -m 15 --adapter='TCGTATGCCGTCTTCT' SRR029124.fastq | gzip -c >SRR029124_trimmed.fastq.gz
cutadapt -m 15 --adapter='CGTATGCCGTCT' SRR207111.fastq | gzip -c >SRR207111_trimmed.fastq.gz
cutadapt -m 15 --adapter='CGTATGCCGTCT' SRR207116.fastq | gzip -c >SRR207116_trimmed.fastq.gz
REALNAME=$(readlink -f $0)
SCRIPTDIR=$( cd "$( dirname "$REALNAME" )" && pwd )
#Create the tRNA database
# Params
# 1. tRNA database name
# 2. tRNAscan-SE output file
# 3. Fasta file of reference genome
echo "Creating tRNA database"
"$SCRIPTDIR/maketrnadb.bash" hg19 hg19-tRNAs.out hg19.fa
#Map the tRNAreads
# Params
# 1. Name of experiments
# 2. tRNA database name
# 3. Tab-delimited file specifying the samples and fastq files
# 4. Number of threads for running bowtie2
echo "Mapping reads to tRNA database"
"$SCRIPTDIR/mapreads.bash" TestTrnas hg19 TrnaSamples.txt hg19-nontRNA-ncRNA.gtf 4
| true
|
1f4cb334cd87a585861f3aa9c20d9293540ce815
|
Shell
|
grammarly/focal
|
/scripts/bump-version.sh
|
UTF-8
| 1,221
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# exit when any command fails
set -e
VER=$1
echo Bumping version to v${VER}...
cd packages/focal && yarn version --new-version $VER --no-git-tag-version && cd ../..
sed -i '' 's/grammarly\/focal":.*"\(.*\)$/grammarly\/focal": "'${VER}'"\1/g' packages/examples/all/package.json
sed -i '' 's/grammarly\/focal":.*"\(.*\)$/grammarly\/focal": "'${VER}'"\1/g' packages/examples/todomvc/package.json
sed -i '' 's/grammarly\/focal":.*"\(.*\)$/grammarly\/focal": "'${VER}'"\1/g' packages/test/package.json
echo Bumping version to v${VER}...
cd packages/focal-atom && yarn version --new-version $VER --no-git-tag-version && cd ../..
sed -i '' 's/grammarly\/focal-atom":.*"\(.*\)$/grammarly\/focal-atom": "^'${VER}'"\1/g' packages/focal/package.json
sed -i '' 's/grammarly\/focal-atom":.*"\(.*\)$/grammarly\/focal-atom": "'${VER}'"\1/g' packages/examples/all/package.json
sed -i '' 's/grammarly\/focal-atom":.*"\(.*\)$/grammarly\/focal-atom": "'${VER}'"\1/g' packages/examples/todomvc/package.json
sed -i '' 's/grammarly\/focal-atom":.*"\(.*\)$/grammarly\/focal-atom": "'${VER}'"\1/g' packages/test/package.json
# update yarn.lock
yarn && yarn build && yarn test
# commit changes
git add .
git commit -m "v$VER"
| true
|
e5c75c53d07641f647c729a6b182da83640637bb
|
Shell
|
dfed/Floatation
|
/Scripts/ci.sh
|
UTF-8
| 589
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -l
set -ex
if [ $ACTION == "xcode" ]; then
if [ -n "$DESTINATION" ]; then
xcodebuild -UseModernBuildSystem=NO -workspace Floatation.xcworkspace -scheme "$SCHEME" -sdk $SDK -destination "$DESTINATION" -configuration Debug -PBXBuildsContinueAfterErrors=0 $XCODE_ACTION
else
xcodebuild -UseModernBuildSystem=NO -workspace Floatation.xcworkspace -scheme "$SCHEME" -sdk $SDK -configuration Debug -PBXBuildsContinueAfterErrors=0 $XCODE_ACTION
fi
fi
if [ $ACTION == "pod-lint" ]; then
bundle exec pod lib lint --verbose --fail-fast --swift-version=$SWIFT_VERSION
fi
| true
|
ec6110fb4937fcc96452242f0157605b93174cbc
|
Shell
|
payano/SBC_NAS
|
/install/services/easy-rsa.sh
|
UTF-8
| 1,028
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
echo installing easy-rsa
sudo apt install -y easy-rsa openssl
exit 0
OLDDIR=$(pwd)
echo "user $(whoami) will have the root ca..."
mkdir ~/easy-rsa
ln -s /usr/share/easy-rsa/* ~/easy-rsa/
chmod 700 ~/easy-rsa
cd ~/easy-rsa
./easyrsa init-pki
cd ~/easy-rsa
echo -e 'set_var EASYRSA_REQ_COUNTRY "SE"
set_var EASYRSA_REQ_PROVINCE "Stockholm"
set_var EASYRSA_REQ_CITY "Stockholm"
set_var EASYRSA_REQ_ORG "nas-server"
set_var EASYRSA_REQ_EMAIL "admin@example.com"
set_var EASYRSA_REQ_OU "Community"
set_var EASYRSA_ALGO "ec"
set_var EASYRSA_DIGEST "sha512"' > vars
echo | ./easyrsa build-ca nopass
cd $OLDDIR
exit 0
#cert for nextcloud
mkdir ~/csr
cd ~/csr
openssl genrsa -out nextcloud.local.key
openssl req -new -key nextcloud.local.key -out nextcloud.local.req -subj \
/C=SE/ST=Stockholm/L=Stockholm/O=nas-server/OU=Community/CN=nextcloud.local
cd ~/easy-rsa
./easyrsa import-req ../csr/nextcloud.local.req nextcloud
echo yes | ./easyrsa sign-req server nextcloud
cd $OLDDIR
| true
|
88243f8c849297efdeafe23153df1c6063fa821f
|
Shell
|
myx/os-myx.common-freebsd
|
/host/tarball/share/myx.common/bin/os/growSlashFsUfs
|
UTF-8
| 3,053
| 3.546875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
##### !!! THIS SCRIPT MUST BE OVERRIDEN IN OS-DEPENDENT IMPLEMENTATION !!! #####
OsGrowSlashFsUfsUsedSize(){
local PART="$1"
gpart show $PART | grep 'freebsd-ufs' | awk '{print $5}' | grep G | sed 's/[^0-9\.]*//g'
}
OsGrowSlashFsUfsFreeSize(){
local PART="$1"
echo `gpart show $PART | grep ' free -' | awk '{print $6}' | grep G | sed 's/[^0-9\.]*//g'` 0 | awk '{print $1}'
}
OsGrowSlashFsUfs(){
local DISK="$1"
local PART="$2"
local INDX="$3"
local SWAP="$4"
set -e
if [ "$DISK" = "" ] || [ "$PART" = "" ] ; then
local DISKS="$DISK vtbd0 da0"
for DSK in $DISKS ; do
if [ "`geom disk status -s $DSK`" != "" ] ; then
local PARTS="${DSK}s1 $DSK"
for PRT in $PARTS ; do
if [ "`gpart status -s $PRT`" != "" ] && [ "`gpart show $PRT | grep freebsd-ufs`" != "" ] ; then
OsGrowSlashFsUfs $DSK $PRT
return 0;
fi
done
fi
done
echo "ERROR: Failed: can't detect device and partition (yet)" >&2
return 1;
fi
if [ "`geom disk status -s $DISK`" = "" ] ; then
echo "ERROR: Failed: specified disk is invalid: $DISK" >&2
return 1;
fi
if [ "`gpart status -s $PART`" = "" ] || [ "`gpart show $PART | grep freebsd-ufs`" = "" ] ; then
echo "ERROR: Failed: specified part is invalid (or does't contain UFS filesystem): $PART" >&2
return 1;
fi
if [ "$INDX" = "" ] ; then
INDX="`gpart show $PART | grep freebsd-ufs | awk '{print $3}'`"
fi
if [ "$SWAP" = "" ] ; then
SWAP="`gpart show $PART | grep freebsd-swap | awk '{print $3}'`"
fi
echo "Grow: disk: $DISK, part: $PART, indx: $INDX, swap: $SWAP" >&2
if [ "$INDX" = "" ] ; then
echo "ERROR: Failed: can't detect partition index (yet?)" >&2
return 1;
fi
sysctl -w kern.geom.debugflags=16
sysctl -w kern.geom.part.auto_resize=1
camcontrol rescan all
devctl rescan pci0
gpart recover $DISK
gpart resize -i $INDX $DISK
gpart resize -i $INDX $PART
local SIZE_NOW="`OsGrowSlashFsUfsUsedSize $PART`"
echo "Current size: ${SIZE_NOW}G" >&2
if [ "$SWAP" != "" ] ; then
swapoff -a || true
gpart delete -i $SWAP $PART
## TODO: calculate new size and check that it is bigger than current
local SIZE_AVL="`OsGrowSlashFsUfsFreeSize $PART`"
echo "Free size: ${SIZE_AVL}G" >&2
local SIZE_NEW="`echo "x = $SIZE_NOW + $SIZE_AVL - 1; scale=0; x/1" | bc`"
echo "New size: ${SIZE_NEW}G" >&2
gpart resize -i $INDX -s ${SIZE_NEW}G $PART || true
gpart add -t freebsd-swap -a 4k $PART
swapon -a
fi
if [ "$SWAP" = "" ] ; then
gpart resize -i $INDX $PART
gpart resize -i $INDX $PART
fi
#gpart commit $PART
#gpart commit $DISK
service growfs onestart
sysctl -w kern.geom.debugflags=0
return 0;
}
case "$0" in
*/myx.common/bin/os/growSlashFsUfs)
if [ "$1" = "--help" ] ; then
echo "Syntax: myx.common os/growSlashFsUfs --yes" >&2
echo " or: myx.common os/growSlashFsUfs diskName sliceName [ufs-partition-index [swap-partition-index]]" >&2
exit 1
fi
set -e
if [ "$1" = "--yes" ] ; then
OsGrowSlashFsUfs
exit 0
fi
OsGrowSlashFsUfs "$@"
;;
esac
| true
|
9524cfec849080b377c83ed5ec9aac1143c2c5b0
|
Shell
|
fyrier/CAP-MPI
|
/exec_mach_1x1.sh
|
UTF-8
| 1,454
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
processes=(1 2 16 64)
nodes=(1 2 8)
exec_modes=(0 1)
matrix_sizes=(2050 4098)
max_execs=5
for exec_mode in "${exec_modes[@]}"
do
echo "-------------------------------"
echo "| Execution mode $exec_mode |"
echo "-------------------------------"
for num_nodes in "${nodes[@]}"
do
echo "-------------------------------"
echo "| Number of nodes $num_nodes |"
echo "-------------------------------"
for num_processes in "${processes[@]}"
do
echo "--------------------------------------"
echo "| Number of processes $num_processes |"
echo "--------------------------------------"
for size in "${matrix_sizes[@]}"
do
echo "-------------------------------"
echo "| Matrix size $size |"
echo "-------------------------------"
num_exec=0
while [[ $num_exec -lt $max_execs ]]
do
echo "-------------------------------"
echo "| Execution number $num_exec |"
echo "-------------------------------"
ppn=$num_processes/$num_nodes
mpiexec -f machines_1x1.txt -np $num_nodes -ppn $ppn ./gs_mpi $size $exec_mode
num_exec=$(( num_exec+1 ))
done
done
done
done
done
| true
|
b636557e6bd5c342fbd0c410142ce20bfa251507
|
Shell
|
whg/fabricate
|
/build/install_core.sh
|
UTF-8
| 291
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
tempfile="site.tar.gz"
tempplace="/tmp/"
file=$tempplace$tempfile
tar --exclude content/ -czf $file .
echo "created archive"
scp $file whg@fezz.in:wgallia/
echo "uploaded file"
rm $file
ssh whg@fezz.in "cd wgallia; tar xf $tempfile; rm $tempfile"
echo "unpacked and made at host"
| true
|
88785cf4184558bb6d7adb407cd0385e36e91428
|
Shell
|
iksadNorth/shell-scripts
|
/scripts/015_use_variable_and_function_from_external_file.sh
|
UTF-8
| 664
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# env.sh 파일을 닷 명령어로 읽음 : 마치 소스 파일이 그대로 삽입된 것처럼 파일 내부 명령어가 실행
# bash엔 . 와 source와 동일한 역할을 함
# env.sh엔 WORK_DIR 변수와 nowtime() 함수가 구현되어 있음
. ./env.sh
nowtime
cp -i -v large-file.tar.gz "$WORK_DIR"
nowtime
# 닷 명령어를 사용하면 의존 관계가 생기는데, 팀마다 결정하면 좋을 듯
# 이동이 간단해서 의존 관계를 쉽게 해결할 수 있음
# 대상 파일이 없으면 에러가 발생함. -f로 파일 존재 여부를 확인하면 좋음
# 예시 [ -f /etc/sysconfig/sshd ] && . /etc/sysconfig/sshd
| true
|
7487b16eae8c81bb117ed3bd53c71ec899beb5e9
|
Shell
|
rideliner/dotfiles-old
|
/terminal/functions.zsh
|
UTF-8
| 1,014
| 3.75
| 4
|
[
"BSL-1.0"
] |
permissive
|
function mcd() {
mkdir "$1" && cd "$1"
}
function aes-enc() {
openssl enc -aes-256-cbc -e -in $1 -out "$1.aes"
}
function aes-dec() {
openssl enc -aes-256-cbc -d -in $1 -out "${1%.*}"
}
function maxcpu() {
local dn=/dev/null
yes > $dn & yes > $dn & yes > $dn & yes > $dn &
yes > $dn & yes > $dn & yes > $dn & yes > $dn &
}
function clearcpu() {
killall yes
}
# xclip shortcuts for clipboard
if (( $+commands[xclip] )); then
function cb() {
xclip -selection c
}
# Copy parameters string
function cbe() {
echo "$*" | cb
}
# Copy contents of a file
function cbf() {
cat "$1" | cb
}
# Copy current working directory
alias cbwd="pwd | cb"
# Copy most recent command in bash history
alias cbhs="cat $HISTFILE | tail -n 1 | cb"
fi
if (( $+commands[vncviewer] )); then
function vnc() {
vncviewer DotWhenNoCursor=1 "$*"
}
fi
if (( $+commands[x0vncserver] )); then
function vnc/server() {
x0vncserver -display :0 -passwordfile ~/.vnc/passwd
}
fi
| true
|
1068c23e5342e6840ae98cd200c096c053b9496b
|
Shell
|
sandia-proj/SETGen
|
/scripts/VMconnect.sh
|
UTF-8
| 6,193
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$EUID" -ne 0 ]
then
echo "Please run as root"
exit
fi
if [[ $# != 1 && $# != 2 && $# != 3 && $# != 4 ]]
then
echo "Usage:"
echo "./VMconnect.sh #_of_VMs"
echo "./VMconnect.sh #_of_VMs -copy <path_to_file>"
echo "./VMconnect.sh USERNAME_FILE PASSWD_FILE"
echo "./VMconnect.sh -copy <path_to_file> USERNAME_FILE PASSWD_FILE"
echo "./VMconnect.sh #_of_VMs -run <path_to_script>"
echo "./VMconnect.sh -run <path_to_script> USERNAME_FILE PASSWD_FILE"
exit
fi
if [[ $# == 1 ]]
then
for (( i=1; i <= $1; i++ ))
do
let row=$i+1
state=$(minimega -e vm info | awk 'NR=='$row'{print $7}')
if [[ "$state" != "RUNNING" ]]
then
continue
fi
ip=$(minimega -e vm info | awk 'NR=='$row'{print $27}')
HOST=$(echo ${ip:1:-1})
echo
echo "Installing the Wrapper in $HOST"
echo
USERNAME=$(echo vm$i)
SCRIPT="chmod +x WrapperInstaller.sh; echo $USERNAME | sudo -S ./WrapperInstaller.sh"
sshpass -p "$USERNAME" scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null scripts/WrapperInstaller.sh $USERNAME@$HOST:
if [[ $? -eq 0 ]]
then
sshpass -p "$USERNAME" ssh -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t -l ${USERNAME} ${HOST} "${SCRIPT}"
else
echo "Invalid Username/Password for $HOST"
fi
done
exit
fi
if [[ $# == 2 ]]
then
numU=$(wc -l < $1)
numP=$(wc -l < $2)
if [[ $numU != $numP ]]
then
echo
echo "The number of Usernames and Passwords are mismatched. Please verify the files."
echo "Exiting to main menu..."
exit
fi
for (( i=1; i <= $numU; i++ ))
do
let row=$i+1
state=$(minimega -e vm info | awk 'NR=='$row'{print $7}')
if [[ "$state" != "RUNNING" ]]
then
continue
fi
ip=$(minimega -e vm info | awk 'NR=='$row'{print $27}')
HOST=$(echo ${ip:1:-1})
echo
echo "Installing the Wrapper in $HOST"
echo
USERNAME=$(cat $1 | awk 'NR=='$i'{print $1}')
PASSWORD=$(cat $2 | awk 'NR=='$i'{print $1}')
SCRIPT="chmod +x WrapperInstaller.sh; echo $PASSWORD | sudo -S ./WrapperInstaller.sh"
sshpass -p "$PASSWORD" scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null scripts/WrapperInstaller.sh $USERNAME@$HOST:
if [[ $? -eq 0 ]]
then
sshpass -p "$PASSWORD" ssh -q -o StrictHostKeyChecking=no -t -l ${USERNAME} ${HOST} "${SCRIPT}"
else
echo "Invalid Username/Password. Exiting to main menu..."
exit
fi
done
exit
fi
if [[ $# == 3 && $2 == "-copy" ]]
then
for (( i=1; i <= $1; i++ ))
do
let row=$i+1
state=$(minimega -e vm info | awk 'NR=='$row'{print $7}')
if [[ "$state" != "RUNNING" ]]
then
continue
fi
ip=$(minimega -e vm info | awk 'NR=='$row'{print $27}')
HOST=$(echo ${ip:1:-1})
echo
echo
USERNAME=$(echo vm$i)
sshpass -p "$USERNAME" scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $3 $USERNAME@$HOST:
if [[ $? -eq 0 ]]
then
echo
else
echo "Invalid Username/Password for $HOST. Exiting to main menu..."
exit
fi
done
exit
fi
if [[ $# == 3 && $2 == "-run" ]]
then
for (( i=1; i <= $1; i++ ))
do
let row=$i+1
state=$(minimega -e vm info | awk 'NR=='$row'{print $7}')
if [[ "$state" != "RUNNING" ]]
then
continue
fi
ip=$(minimega -e vm info | awk 'NR=='$row'{print $27}')
HOST=$(echo ${ip:1:-1})
echo
echo "Running the script in $HOST"
echo
scriptname=$(basename $3)
USERNAME=$(echo vm$i)
SCRIPT="chmod +x $scriptname; echo $USERNAME | sudo -S ./$scriptname"
sshpass -p "$USERNAME" scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $3 $USERNAME@$HOST:
if [[ $? -eq 0 ]]
then
sshpass -p "$USERNAME" ssh -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t -l ${USERNAME} ${HOST} "${SCRIPT}"
else
echo "Invalid Username/Password for $HOST. Exiting to main menu..."
exit
fi
done
exit
fi
if [[ $# == 4 && $1 == "-copy" ]]
then
numU=$(wc -l < $3)
numP=$(wc -l < $4)
if [[ $numU != $numP ]]
then
echo
echo "The number of Usernames and Passwords are mismatched. Please verify the files."
echo "Exiting..."
exit
fi
for (( i=1; i <= $numU; i++ ))
do
let row=$i+1
state=$(minimega -e vm info | awk 'NR=='$row'{print $7}')
if [[ "$state" != "RUNNING" ]]
then
continue
fi
ip=$(minimega -e vm info | awk 'NR=='$row'{print $27}')
HOST=$(echo ${ip:1:-1})
echo
echo "Copying the file to $HOST"
echo
USERNAME=$(cat $3 | awk 'NR=='$i'{print $1}')
PASSWORD=$(cat $4 | awk 'NR=='$i'{print $1}')
sshpass -p "$PASSWORD" scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $2 $USERNAME@$HOST:
if [[ $? -eq 0 ]]
then
echo
else
echo "Invalid Username/Password for $HOST. Exiting to main menu..."
exit
fi
done
exit
fi
if [[ $# == 4 && $1 == "-run" ]]
then
numU=$(wc -l < $3)
numP=$(wc -l < $4)
if [[ $numU != $numP ]]
then
echo
echo "The number of Usernames and Passwords are mismatched. Please verify the files."
echo "Exiting..."
exit
fi
for (( i=1; i <= $numU; i++ ))
do
let row=$i+1
state=$(minimega -e vm info | awk 'NR=='$row'{print $7}')
if [[ "$state" != "RUNNING" ]]
then
continue
fi
ip=$(minimega -e vm info | awk 'NR=='$row'{print $27}')
HOST=$(echo ${ip:1:-1})
echo
echo "Running the script in $HOST"
echo
scriptname=$(basename $2)
echo $scriptname
USERNAME=$(cat $3 | awk 'NR=='$i'{print $1}')
PASSWORD=$(cat $4 | awk 'NR=='$i'{print $1}')
echo $USERNAME
echo $PASSWORD
SCRIPT="chmod +x $scriptname; echo $PASSWORD | sudo -S ./$scriptname"
sshpass -p "$PASSWORD" scp -q -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $2 $USERNAME@$HOST:
if [[ $? -eq 0 ]]
then
sshpass -p "$PASSWORD" ssh -q -o StrictHostKeyChecking=no -t -l ${USERNAME} ${HOST} "${SCRIPT}"
else
echo "Invalid Username/Password for $HOST. Exiting to main menu..."
exit
fi
done
exit
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.