blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
742c154f297f23f3daf8db51d65b45c39756f2e8
|
Shell
|
Suncoldx333/repoPushShellScript
|
/pushRepoScript.sh
|
UTF-8
| 3,506
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
#--------------------------
#
# 该脚本应放置在与 *.podspec 文件同一目录
# 文件夹名称应与要生成的私有库的名称一致
#
# 比如建立一个叫 BLFamilyModule 的私有库,则文件路径如下所示
# BLFamilyModule
# |----BLFamilyModule(私有库代码所在位置)
# |----BLFamilyModule.podspec
# |----Example (示例代码)
# |----该脚本.sh
# 运行前先执行 chmod +x ./脚本.sh 获取脚本执行权限
#
#--------------------------
#-------------------------
#
# 获取版本号及依赖库
echo -e "\n更新仓库...\n"
#如果不更新,在推送到远程仓库时,可能会提示 repo not clean
repoUpdate="pod repo update"
$repoUpdate
echo -e "\n获取基础信息...\n"
basepath=$(cd `dirname $0`; pwd)
filename=`basename $basepath`
podfilePath="${basepath}/Example/Podfile"
podspecPath=${filename}".podspec"
specPaths=""
#初始版本号默认0.1.0
version="0.1.0"
search="`pod search "${filename}"`"
result=$search
if [[ "$result" != "" ]]; then
result=${result#*\(}
result=${result%%\)*}
version=$result
fi
if [ -e $podspecPath ]; then
specPaths=""
else
echo "ERROR: ${podspecPath} 文件不存在"
exit 1
fi
if [ -e $podfilePath ]; then
while read -r line; do
if [[ "$line" =~ ^source.*git\'$ ]]; then
path=${line#source* *\'}
path=${path%\'}
specPaths=${specPaths}${path}","
fi
done < $podfilePath
else
echo "ERROR: ${podfilePath} 文件不存在"
exit 1
fi
specPaths=${specPaths%\,}
showspecs=${specPaths//\,/\\\n}
echo -e "当前的版本号:${version} \n依赖的源路径:\n${showspecs}\n"
#--------------------------
#------------------------
#
# git 提交
echo -n "要上传的版本号:"
read specifiVersion
if [[ "$specifiVersion" =~ [0-9]+\.[0-9]+\.[0-9]+ ]]; then
#TODO1:输入版本号与已有版本号的比对处理(低于等于已有版本号)
#TODO2:podspec文件内的版本号与输入版本号不一致处理
#MARK: 添加临时log
echo $specifiVersion
else
echo "版本格式不正确,应为xx.xx.xx"
exit 1
fi
git add .
echo -n "提交的版本修改的内容:(建议为版本号)"
read content
git commit -m "${content}"
git tag $specifiVersion
git push origin master --tags
#------------------------
#------------------------
#
# 检查并推送仓库
repoPath="/Users/"
if [[ -d /Users ]]; then
for file in /Users/*; do
filename=`basename $file`
if [[ $basepath =~ $filename ]]; then
repoPath=${repoPath}${filename}"/.cocoapods/repos"
echo $repoPath
fi
done
fi
sepcSource=""
specs=""
if [[ -d $repoPath ]]; then
for file1 in $repoPath/*; do
filename1=`basename ${file1}`
specs=${specs}"/"${filename1}
done
else
echo "请检查 ~/.cocoapods/repos 文件夹是否存在"
exit 1
fi
specs=${specs#/}
echo -e -n "~/.cocoapods/repos文件夹内所有的specs("${specs}"),选择你的私有源:"
read sepcSource
echo -e "开始提交到私有仓库...\n"
lintCommnad="pod spec lint "${podspecPath}" --allow-warnings --use-libraries"
pushCommand="pod repo push "${sepcSource}" --allow-warnings --use-libraries"
if [[ "$specPaths" == "" ]]; then
${lintCommnad}
else
lintCommnad=${lintCommnad}" --sources="${specPaths}
${lintCommnad}
fi
if [[ "$specPaths" == "" ]]; then
${pushCommand}
else
pushCommand=${pushCommand}" --sources="${specPaths}
${pushCommand}
fi
#------------------------
exit 0
| true
|
00b7599af67a63d983b0e1e3976b0d58bab0f0b6
|
Shell
|
pld-linux/tripwire
|
/tripwire.verify
|
UTF-8
| 1,102
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
HOST_NAME=`uname -n`
TWCFG_PATH=/etc/tripwire
TWDB_PATH=/usr/spool/tripwire
TWROOT_PATH=/usr/sbin
MAILTO="root" # Email addresses that should recieve reports
#
# Define checks which alert user to misconfiguration or run the check
#
if [ ! -e ${TWDB_PATH}/${HOST_NAME}.twd ]; then
echo "**** Error: Tripwire database for ${HOST_NAME} not found. ****"
echo "**** Verify tripwire was installed and/or "tripwire --init". ****"
mail -s "**** Error: Tripwire database for ${HOST_NAME} not found." ${MAILTO}
else
(cat <<EOF
This is an automated report of possible file integrity changes, generated by
the Tripwire integrity checker. To tell Tripwire that a file or entire
directory tree is valid, as root run:
/usr/sbin/tripwire -update [pathname|entry]
If you wish to enter an interactive integrity checking and verification
session, as root run:
/usr/sbin/tripwire
Changed files/directories include:
EOF
cat
) | test -f ${TWCFG_PATH}/tw.cfg && ${TWROOT_PATH}/tripwire --check | \
mail -s "${HOST_NAME} tripwire-check. File integrity report" ${MAILTO}
fi
| true
|
80dc6492daa530546e4b74f5cbce41becc178c33
|
Shell
|
rrbn/netpoint9
|
/config/includes.chroot/usr/local/bin/flush_logs
|
UTF-8
| 384
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
_CMDLINE="$(cat /proc/cmdline)"
DEBUG=""
for _PARAMETER in ${_CMDLINE}
do
case "${_PARAMETER}" in
live-config.debug|debug)
DEBUG="1"
;;
esac
done
if [ -z "$DEBUG" ]
then
echo "flush logs...."
echo "" > /var/log/Xorg.0.log
echo "" > /var/log/dmesg
echo "" > /var/log/messages
echo "" > /var/log/kern.log
echo "" > /var/log/syslog
rm -f /var/log/live/*
fi
| true
|
1dbb681f24e68400e992e2b34323aa80011bbbe4
|
Shell
|
AngelofWoe/arkos
|
/RG351P-M/Ubuntu OS Partition/var/lib/dpkg/info/policykit-1.postinst
|
UTF-8
| 2,513
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# postinst script for policykit-1
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postinst> `configure' <most-recently-configured-version>
# * <old-postinst> `abort-upgrade' <new version>
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
# <new-version>
# * <postinst> `abort-remove'
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
# <failed-install-package> <version> `removing'
# <conflicting-package> <version>
# for details, see http://www.debian.org/doc/debian-policy/ or
# the debian-policy package
set_perms() {
USER=$1
GROUP=$2
MODE=$3
FILE=$4
if ! dpkg-statoverride --list $FILE > /dev/null 2>&1; then
chown $USER:$GROUP $FILE
chmod $MODE $FILE
fi
}
case "$1" in
configure)
set_perms root root 700 /var/lib/polkit-1
set_perms root root 700 /etc/polkit-1/localauthority
set_perms root root 4755 /usr/lib/policykit-1/polkit-agent-helper-1
set_perms root root 4755 /usr/bin/pkexec
# The service file was renamed to polkit.service to match the upstream name.
# Stop the old polkitd.service on upgrades.
if [ -d /run/systemd/system ] && dpkg --compare-versions "$2" lt-nl 0.105-17; then
systemctl daemon-reload
deb-systemd-invoke stop polkitd.service || true
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
# Automatically added by dh_installdeb/12.4ubuntu1
dpkg-maintscript-helper rm_conffile /etc/dbus-1/system.d/org.freedesktop.PolicyKit1.conf 0.105-22\~ -- "$@"
dpkg-maintscript-helper rm_conffile /etc/polkit-1/nullbackend.conf.d/50-nullbackend.conf 0.105-26\~ -- "$@"
# End automatically added section
# Automatically added by dh_installsystemd/12.4ubuntu1
if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] || [ "$1" = "abort-deconfigure" ] || [ "$1" = "abort-remove" ] ; then
if [ -d /run/systemd/system ]; then
systemctl --system daemon-reload >/dev/null || true
if [ -n "$2" ]; then
deb-systemd-invoke try-restart 'polkit.service' >/dev/null || true
fi
fi
fi
# End automatically added section
# This can be removed after bullseye is released as stable
for d in /etc/polkit-1/nullbackend.conf.d; do
if [ -d "$d" ]; then
rmdir --ignore-fail-on-non-empty "$d"
fi
done
exit 0
| true
|
61d972e38d01c897e031b19c4b20f0bebd12fd14
|
Shell
|
PiedPiperOCD/K-split
|
/scripts/plot/collect-med.sh
|
UTF-8
| 414
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
col=$1; shift
sizes=( $@ )
dir=$( dirname $0 )
#printf "%-30s\t" "exp"
#echo $@
type=()
for file in ./res_*${sizes[0]}*; do
tt=${file#*res_}
type+=( ${tt%%_${sizes[0]}*} )
done
printf "%-20s" "Size"
echo "${type[@]}"
for size in ${sizes[@]}; do
printf "%-20s\t" $size
for t in ${type[@]}; do
f=$( ls res_${t}_${size}* )
printf "%-20s" $( $dir/summarize-med.sh $f $col )
done
echo
done
| true
|
7b5f64519a8c494b7c76089616ad9de4c194dbe2
|
Shell
|
hqs2212586/-shell-
|
/bondingtest.sh
|
UTF-8
| 1,108
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# 加载bonding模块
modprobe bonding
# 命令的退出状态
if [ $? != 0 ] ;then
echo '`bonding`模块Error'
fi
echo 'Kernel Check ok'
echo 'Config NetworkManager'
systemctl stop NetworkManager.service
systemctl disable NetworkManager
systemctl mask NetworkManager
# 网卡接口模式
echo 'collecting network info'
v1=$(ifconfig | grep UP | tail -3 | head -1 | awk -F: '{print $1}')
v2=$(ifconfig | grep UP | tail -2 | head -1 | awk -F: '{print $1}')
echo 'Create network-scripts files for '$v1, $v2
cat > /etc/sysconfig/network-scripts/ifcfg-enoa << END
DEVICE=eno33554992
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=none
MASTER=bond0
SLAVE=YES
END
cat > /etc/sysconfig/network-scripts/ifcfg-enob << END
DEVICE=eno50332216
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=none
MASTER=bond0
SLAVE=yes
END
cat > /etc/sysconfig/network-scripts/ifcfg-bond0 << END
DEVICE=bond0
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=none
BONDING_OPTS="mode 1 miimon 100"
IPADDR=192.168.240.138
NETMASK=255.255.255.0
END
sed -i "/DEVICE=/c\DEVICE=$v1" ifcfg-enoa
sed -i "/DEVICE=/c\DEVICE=$v2" ifcfg-enob
service network restart
| true
|
7ba4560e0d253aeb9a79140a70e88ac247ae48ee
|
Shell
|
Awalrod/jco-datalogger
|
/src/DEBIAN/postinst
|
UTF-8
| 816
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "start of postinst script"
# update needed changes in apache2
a2query -m cgi -q
ISTAT=$?
#echo "ISTAT ${ISTAT}"
set -e
ldconfig
if [ ${ISTAT} -ne 0 ]
then
sudo a2enmod cgi >/dev/null 2>&1
fi
mkdir -p /var/www/html/data
service apache2 restart
systemctl daemon-reload
echo "reload"
systemctl enable can-iface.service
echo "enabled caniface"
systemctl start can-iface.service
echo "started can"
systemctl enable can-socat-setup.service
echo "enabled socat"
systemctl start can-socat-setup
echo "started socat"
systemctl enable busmaster.service
echo "enabled busmaster"
systemctl start busmaster
echo "started busmaster"
systemctl enable jcodatalogger.service
echo "enabled jcodatalogger"
systemctl start jcodatalogger
echo "started jcodatalogger"
echo "end of postinst script"
exit 0
| true
|
918f6569ac557592b793735196af3f7d758b251e
|
Shell
|
level5/LaTeX
|
/programming/code/shell/abs-guide/useful-script/gcd.sh
|
UTF-8
| 472
| 4.1875
| 4
|
[] |
no_license
|
#! /bin/bash
# gcd.sh: Greatest Common Divisor
# Argument check
ARGS=2
E_BADARGS=85
if [ $# -ne "$ARGS" ]; then
echo "Usage: `basename $0` first-number second-number"
exit $E_BADARGS
fi
# check if arguments are integers
# solution 1, regex [[ $1 =~ ^-?[0-9]+$ ]]
# solution 2, test with if [ "$1" -eq "$1" ] 2>/dev/null; then ...; fi
gcd ()
{
dividend=$1
divisor=$2
reminder=1
until [[ "$reminder" -eq 0 ]]; do
let "reminder=$dividend %"
done
}
| true
|
4058a4b3060ec4e33e07132571b9b900430cc8df
|
Shell
|
plathome/debian_based_firmware
|
/build_ramdisk/etc.stretch.sysvinit/init.d/openblocks-setup
|
UTF-8
| 9,794
| 3.53125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: openblocks-setup
# Required-Start: udev
# Required-Stop: umountfs
# Default-Start: S
# Default-Stop: 0 6
# Short-Description: Config storage area for OpenBlocks
# Description:
### END INIT INFO
#
# Copyright (c) 2013-2022 Plat'Home CO., LTD.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY PLAT'HOME CO., LTD. AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PLAT'HOME CO., LTD. AND CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
DESC="filesystem layout configuration"
NAME="openblocks-setup"
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
. /lib/lsb/init-functions
#set -x
[ -f /etc/default/openblocks ] && . /etc/default/openblocks
echo -e "\033[0m"
RW_DIR="${RW_DIR:=/.rw}"
RW_CF_LABEL="${RW_CF_LABEL:=DEBIAN}"
CONFIG_LABEL="${CONFIG_LABEL:=DEB_CONFIG}"
CONFIG_DEV_MTPT="${CONFIG_DEV_MTPT:=/mnt}"
CONFIG_FILE=userland.tgz
CONFIG_SCRIPT=init.sh
CONFIG_POSTSCRIPT=post-init.sh
MTREE_LIST="${MTREE_LIST:=/etc/mtree.list}"
MTREE_DIRS="${MTREE_DIRS:=/var/log /var/run}"
if [ "$MODEL" == "obsa6" -o "$MODEL" == "obsa7" ] ; then
extract_func=extract_mtd_a6
else # obsax3
extract_func=extract_mtd
fi
protect_mtd(){
local mtd=
for mtd in $MTD_FIRM_DEV $MTD_CONF_DEV $MTD_USER_DEV $MTD_OPT_DEV;do
[ -f /sys/devices/virtual/mtd/${mtd}/flags ] && \
echo "0x800" > /sys/devices/virtual/mtd/${mtd}/flags
done
}
extract_mtd_a6(){
local mtd=$1
local type=gzip
case $mtd in
${MTD_CONF_DEV}) cmdarg="-x"; desc="/etc config" ;;
${MTD_USER_DEV}) cmdarg="-X"; desc="userland" ;;
${MTD_OPT_DEV}) cmdarg="-J"; desc="optional" ;;
*) return 1;;
esac
echo -n "Extract $desc files from FlashROM($mtd)... "
if flashcfg-debian $cmdarg > /dev/null 2>&1 ; then
echo "done."
else
echo "fail(or empty)."
fi
}
extract_mtd(){
local mtd=$1
local type=gzip
local ODIR=${RW_DIR}
case $mtd in
${MTD_CONF_DEV}) desc="/etc config" ;;
${MTD_USER_DEV}) desc="userland" ;;
${MTD_OPT_DEV}) desc="optional"; type=lzma; ODIR=/ ;;
*) return 1;;
esac
echo -n "Extract $desc files from FlashROM($mtd)... "
# dont' use "tar -xpzf ...". because get bad return code.
if (dd if=/dev/$mtd|$type -d|tar -xpf - -C ${ODIR}) > /dev/null 2>&1; then
echo "done."
else
echo "fail(or empty)."
fi
return 0
}
restore_func(){
WORK_DIR=/tmp/_tmpfs.$$
mkdir -p ${WORK_DIR}
if [ "$MODEL" == "obsvx1" ]; then
mount `findfs LABEL=${SAVE_DIR}` ${WORK_DIR} -r
else
mount ${SAVE_DIR} ${WORK_DIR} -r
fi
echo -n "Extract userland files... "
if (tar xpzmf ${WORK_DIR}/userland.tgz -C ${RW_DIR} > /dev/null 2>&1); then
echo "done."
else
echo "fail(or empty)."
fi
echo -n "Extract /etc files... "
if (tar xpzmf ${WORK_DIR}/etc.tgz -C ${RW_DIR} > /dev/null 2>&1); then
echo "done."
else
echo "fail(or empty)."
fi
umount ${WORK_DIR}
rm -rf ${WORK_DIR}
return 0
}
if_exists_execute_config_postscript(){
if [ -f "${CONFIG_DEV_MTPT}/${CONFIG_POSTSCRIPT}" ] ; then
bash ${CONFIG_DEV_MTPT}/${CONFIG_POSTSCRIPT}
fi
return 0
}
if_exists_execute_config_script(){
if [ -f "${CONFIG_DEV_MTPT}/${CONFIG_SCRIPT}" ] ; then
bash ${CONFIG_DEV_MTPT}/${CONFIG_SCRIPT}
fi
return 0
}
if_exists_extract_config_file(){
if [ -f "${CONFIG_DEV_MTPT}/${CONFIG_FILE}" ] ; then
echo -n "Extract config files from LABEL=$CONFIG_LABEL device... "
if tar -xpzf ${CONFIG_DEV_MTPT}/${CONFIG_FILE} -C ${RW_DIR};then
echo "done."
else
echo "fail(or empty)."
fi
fi
return 0
}
fdisk_and_mke2fs(){
dev=$1
sfdisk ${dev%[0-9]} <<-_FDISK
,,83,-
_FDISK
mkfs.ext4 -L ${RW_CF_LABEL} $dev
return 0
}
mkdir_mtree_list() {
if [ -s ${MTREE_LIST} ]; then
cat ${MTREE_LIST} | while read line;do
owner=`echo $line|cut -d':' -f1`
group=`echo $line|cut -d':' -f2`
dir=`echo $line|cut -d':' -f3`
mkdir -p ${dir}
chown ${owner}:${group} ${dir}
done
fi
}
case "$1" in
start)
sleep 1
mount -oremount,rw /
protect_mtd
depmod -a
if [ "$MODEL" == "obs600" ]; then
hwclock --rtc=/dev/rtc0 --hctosys
fi
FLG_INIT=false
FLG_FORCE_CONFIG=false
rwdev_fs=ext3
union_fs=unionfs
# execute watchdog timer daemon before DEB_CONFIG
case "$MODEL" in
obsbx1)
/etc/init.d/wd-keepalive start
;;
*)
;;
esac
# check init sw
grep -q 'noflashcfg=1' /proc/cmdline && FLG_INIT=true
# check cf with LABEL=${CONFIG_LABEL}
confdev=`findfs LABEL=${CONFIG_LABEL} 2> /dev/null`
if [ -n "$confdev" ] ; then
mount -o ro $confdev $CONFIG_DEV_MTPT
[ -f "/mnt/FORCE" ] && FLG_FORCE_CONFIG=true
fi
# check cf with LABEL=${RW_CF_LABEL}
rwdev=`findfs LABEL=${RW_CF_LABEL} 2> /dev/null`
grep -q 'noeasyblocks=1' /proc/cmdline && rwdev=""
# check aufs support
if grep -q overlay /proc/filesystems ; then
union_fs=overlay
elif grep -q aufs /proc/filesystems ; then
union_fs=aufs
fi
mkdir -p ${RW_DIR}
# for easyblocks check
if [ -n "$rwdev" ] ; then
fsck -C -a $rwdev 2> /dev/null
k1="Filesystem features"
k2="(extent|flex_bg|huge_file|uninit_bg|dir_nlink|extra_isize)"
if dumpe2fs -h $rwdev | grep "$k1" | grep -q -E "$k2" ; then
rwdev_fs=ext4
fi
mount -o ro -t $rwdev_fs $rwdev ${RW_DIR}
[ -e "${RW_DIR}/etc/easyblocks" ] && FLG_INIT=false
umount ${RW_DIR}
fi
if [ -z "$rwdev" ] || [ "${FLG_INIT}" == "true" ]; then
mount -t tmpfs -o size=${RW_TMPFS_SIZE} tmpfs ${RW_DIR}
if [ "${FLG_INIT}" == "true" ] ; then
echo "Running by default configuration..."
elif [ "${FLG_FORCE_CONFIG}" == "true" ] ; then
echo "Don't extract files in FlashROM..."
else
case $MODEL in
obsa*|obs600)
$extract_func ${MTD_USER_DEV}
$extract_func ${MTD_CONF_DEV}
;;
obsbx1|obsvx1)
restore_func
;;
esac
fi
else
if [ "${FLG_FORCE_CONFIG}" == "true" ];then
fdisk_and_mke2fs $rwdev
fi
mount -t $rwdev_fs $rwdev ${RW_DIR}
fi
[ -f ${RW_DIR}/etc/default/openblocks ] && \
. ${RW_DIR}/etc/default/openblocks
if [ -z "$rwdev" ] ; then
mount -o remount,size=${RW_TMPFS_SIZE} ${RW_DIR}
else
remount_opt=
if [ "$rwdev_fs" == "ext4" ] ; then
remount_opt="$remount_opt discard"
fi
if [ "$NOATIME" == "true" ] ; then
remount_opt="$remount_opt noatime"
fi
_remount_opt="$(echo $remount_opt|sed -e 's# #,#')"
[ -n "$_remount_opt" ] && \
mount -oremount,$_remount_opt ${RW_DIR}
fi
# extract CONFIG_FILE and execute CONFIG_SCRIPT
if [ -n "$confdev" ] ; then
if_exists_extract_config_file
if_exists_execute_config_script
fi
# if using DEBIAN storage, /tmp directory mount under storage.
#if [ -n "$rwdev" ] ; then
# UNIONFS_DIRS="$UNIONFS_DIRS tmp"
#fi
# mount filesystems
for dir in ${UNIONFS_DIRS} ; do
mawk '$1 !~ /^#/ {print $2}' < /etc/fstab | grep -q "^/$dir" && continue
mkdir -p ${RW_DIR}/$dir
case $union_fs in
overlay)
mkdir -p ${RW_DIR}/work/$dir
mount -n -t ${union_fs} \
-o lowerdir=$dir,upperdir=${RW_DIR}/$dir,workdir=${RW_DIR}/work/$dir ${union_fs} /$dir
;;
*)
if [ "$unionfs" == "aufs" ]; then
xino_opt="xino=/.aufs.xino.$dir,trunc_xino,"
[ "$MODEL" == "obsvx1" ] && xino_opt+="acl,"
fi
mount -n -t ${union_fs} \
-o ${xino_opt}dirs=${RW_DIR}/$dir=rw:/$dir=ro ${union_fs} /$dir
;;
esac
done
# reboot udev
/etc/init.d/udev stop > /dev/null 2>&1
/etc/init.d/udev start
# extract Oracle Java
# /usr/lib/jre directory and /etc/profile.d/java.(sh|csh) file.
if [ ! -x /usr/lib/jre/bin/java ] ; then
[ -n "${MTD_OPT_DEV}" ] && $extract_func ${MTD_OPT_DEV}
fi
# for Measures to rename network interface
rm -f /etc/udev/rules.d/70-persistent-net.rules
rm -rf /etc/network/run
ln -sf /run/network /etc/network/run
# create ssh keys for openssh-server
if [ -x /usr/sbin/sshd ] ; then
dpkg-reconfigure openssh-server
fi
# create mount directory for fstab
mawk '/\/[a-zA-Z0-9]+/ {print $2}' /etc/fstab | xargs mkdir -p
# make directory tree
mkdir_mtree_list
# exectute postscript and unmount DEB_CONFIG storage.
if [ -n "$confdev" ] ; then
if_exists_execute_config_postscript
umount $confdev
fi
# reload inittab
kill -1 1
# correct for debian7
ln -sf /proc/mounts /etc/mtab
# correct host name
/etc/init.d/hostname.sh start
# S35390A INIT2 flag clear
if [ "$MODEL" == "obsbx1" ]; then
obs-hwclock --check
if [ $? == 0 ]; then
/usr/local/sbin/hwclock --clearint2
date -s "`/usr/local/sbin/hwclock --hctosys`"
/usr/local/sbin/hwclock --status
fi
fi
;;
stop)
if [ "$MODEL" == "obsax3" ]; then
if [ -f /sys/devices/system/cpu/cpu1/online ]; then
echo
echo 0 > /sys/devices/system/cpu/cpu1/online
echo
fi
fi
;;
esac
set +x
exit 0
| true
|
7e386367db95af4273b3e0fdad1ceabf79e879b3
|
Shell
|
pressla/caradali
|
/test/tmp/etc/config/m2mdali/test/mdumpme.sh
|
UTF-8
| 730
| 2.546875
| 3
|
[] |
no_license
|
#run the meshdump tool to collect all incoming messages from connected nodes and generate a parameter settings file for this mesh
#all mac ids will be logged to mactablem2m.json and shall be stored in /etc/config/mactablem2m.json
#the file is read by m2m-dali.js to set dynamically parameters
#
# % will be replaced by blank char, so a command can be issued on command line on the node <command>~<parameter>=<value>
# myGrouping~group=<group_id> and myGrouping~name=<nodename> are special command for the switch command on LED lights
node meshdump uci%set~wireless.@wifi-iface[0].ssid=AIMLED2 uci%set~wireless.@wifi-device[0].channel=1 uci%set~wireless.@wifi-iface[1].mesh_rssi_threshold=0 myGrouping~group=G1 myGrouping~name=L1
| true
|
b2ada1941d2de0d8c32e14dea5cab4f4419243c3
|
Shell
|
swwind/dotfiles
|
/ssr.sh
|
UTF-8
| 636
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
if [ ! -d shadowsocksr ]; then
git clone -b manyuser --depth=1 https://github.com/shadowsocksr-backup/shadowsocksr.git
fi
help() {
cat ssr-help.txt
}
if [ $# = 0 ]; then
help
elif [ $1 = "start" ]; then
cd shadowsocksr/shadowsocks
sudo python local.py -d start
elif [ $1 = "stop" ]; then
cd shadowsocksr/shadowsocks
sudo python local.py -d stop
elif [ $1 = "sub" ]; then
echo -n "sub link: "
read fuck
curl -fsSL $fuck | node ssr.js
echo -n "select a node: "
read node
mv shadowsocksr/config.$node.json shadowsocksr/config.json
elif [ $1 = "destruct" ]; then
rm -rf shadowsocksr
fi
| true
|
14c4bfb1ac4290d0705d945df300b63a3f9bbfac
|
Shell
|
adaptto/2017-provisioning-local-aem-with-conga
|
/deploy_author.sh
|
UTF-8
| 1,472
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# defaultconfig
sling_url="http://localhost:4502"
sling_user="admin"
sling_password="admin"
conga_node="aem-author"
####
default_build()
{
motd
clean_install
deploy_artifacts
}
#####
motd()
{
echo "********************************************************************"
echo ""
echo " Cleans and installs all modules"
echo " Uploads and installs application complete packages, config and sample content"
echo ""
echo " Destinations:"
echo " - $conga_node: $sling_url"
echo ""
echo "********************************************************************"
}
####
clean_install()
{
echo ""
echo "*** Build artifacts ***"
echo ""
mvn clean install eclipse:eclipse
if [ "$?" -ne "0" ]; then
error_exit "*** Build artifacts FAILED ***"
fi
}
#####
deploy_artifacts()
{
echo ""
echo "*** Deploy AEM packages (author) ***"
echo ""
cd config-definition
mvn -B -Dsling.url=$sling_url \
-Dsling.user=$sling_user -Dsling.password=$sling_password \
-Dconga.nodeDirectory=target/configuration/development/$conga_node \
conga-aem:package-install
if [ "$?" -ne "0" ]; then
error_exit "*** Deploying AEM packages FAILED ***"
fi
cd ../
}
#####
error_exit()
{
echo ""
echo "$1" 1>&2
echo ""
if [[ $0 == *":\\"* ]]; then
read -n1 -r -p "Press ENTER key to continue..."
fi
exit 1
}
default_build
echo ""
echo "*** Build complete ***"
echo ""
if [[ $0 == *":\\"* ]]; then
read -n1 -r -p "Press ENTER key to continue..."
fi
| true
|
afbc61d42d28c23cc23b5ae6d53a5efdce7d1503
|
Shell
|
andrebossi/kvm-templates
|
/Debian/start.sh
|
UTF-8
| 1,673
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
DISK_FORMAT="qcow2" # Define VM disk image format.. qcow2|img
DISK_PATH="/var/lib/libvirt/images" # Define path where VM disk images are stored
DISK_SIZE="30" # Define disk size in GB
EXTRA_ARGS="console=ttyS0,115200n8 serial"
LOCATION="http://sft.if.usp.br/debian/dists/stable/main/installer-amd64/current/images/netboot/"
NETWORK_BRIDGE="br0" # virbr0
OS_TYPE="linux" # Define OS type to install... linux|windows
OS_VARIANT="debian9"
PRESEED_FILE="$(pwd)/preseed.cfg" # Define preseed file for Debian based installs if desired
PRESEED_INSTALL="true" # Define if preseed install is desired
RAM="2048" # Define memory to allocate to VM in MB... 512|1024|2048
VCPUS="2" # Define number of vCPUs to allocate to VM
VMName="debian-server" # Define name of VM to create
if [ "$PRESEED_INSTALL" = false ]; then
virt-install \
--connect qemu:///system \
--virt-type kvm
--name $VMName \
--ram $RAM \
--disk path=$DISK_PATH/$VMName.$DISK_FORMAT,size=$DISK_SIZE \
--vcpus $VCPUS \
--os-type $OS_TYPE \
--os-variant $OS_VARIANT \
--network bridge=$NETWORK_BRIDGE \
--graphics none \
--console pty,target_type=serial \
--location $LOCATION \
--check disk_size=off \
--extra-args "$EXTRA_ARGS"
fi
if [ "$PRESEED_INSTALL" = true ]; then
virt-install \
--connect qemu:///system \
--virt-type kvm \
--name $VMName \
--ram $RAM \
--disk path=$DISK_PATH/$VMName.$DISK_FORMAT,size=$DISK_SIZE \
--vcpus $VCPUS \
--os-type $OS_TYPE \
--os-variant $OS_VARIANT \
--network bridge=$NETWORK_BRIDGE \
--graphics none \
--check disk_size=off \
--console pty,target_type=serial \
--location $LOCATION \
--initrd-inject=$PRESEED_FILE \
--noreboot \
--extra-args "$EXTRA_ARGS"
fi
| true
|
9fc8d8b63177429838eec73facfcf4e4a58442fb
|
Shell
|
troglodyne/selenium-scripts
|
/selenium-headless-ubuntu16-quicksetup.sh
|
UTF-8
| 4,099
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# OS Setup
sudo apt-get install -y aptitude ubuntu-minimal
sudo aptitude markauto -y '~i!~nubuntu-minimal'
sudo apt-get install -y linux-image-virtual openssh-server
sudo apt-get update
sudo apt-get upgrade -y
# Supporting Software
sudo apt-get install -y xvfb x11vnc unzip default-jre openbox
# Fix hostname
sudo hostname selenium.head
sudo sh -c 'echo $(hostname) > /etc/hostname'
sudo sh -c 'FQDN=$(hostname); sed -i "s/^127.0.0.1.*/127.0.0.1 localhost localhost.localdomain $FQDN/" /etc/hosts'
# Setup a swap
sudo dd if=/dev/zero of=/swapfile bs=1M count=1024
sudo chmod 600 /swapfile
sudo mkswap /swapfile
sudo swapon /swapfile
echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab
# Create selenium user
sudo useradd -m selenium
# Firefox
wget https://downloads.sourceforge.net/project/ubuntuzilla/mozilla/apt/pool/main/f/firefox-mozilla-build/firefox-mozilla-build_35.0.1-0ubuntu1_amd64.deb
sudo dpkg -i firefox-mozilla-build_35.0.1-0ubuntu1_amd64.deb
# Chrome
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
sudo sh -c 'echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list'
sudo apt-get update
sudo apt-get install -y google-chrome-stable
# Chromedriver
wget https://chromedriver.storage.googleapis.com/2.35/chromedriver_linux64.zip
unzip chromedriver_linux64.zip
chmod +x chromedriver
sudo mv -f chromedriver /usr/local/share/chromedriver
sudo ln -s /usr/local/share/chromedriver /usr/local/bin/chromedriver
sudo ln -s /usr/local/share/chromedriver /usr/bin/chromedriver
# xvfb
sudo sh -c 'cat > /etc/systemd/system/xvfb.service << ENDOFPASTA
[Unit]
Description=X Virtual Frame Buffer Service
After=network.target
[Service]
User=selenium
ExecStart=/usr/bin/Xvfb :90 -screen 0 1024x768x24
ExecStop=killall Xvfb
[Install]
WantedBy=multi-user.target
ENDOFPASTA'
sudo systemctl enable xvfb.service
sudo systemctl start xvfb
# openbox
sudo sh -c 'cat > /etc/systemd/system/openbox.service << ENDOFPASTA
[Unit]
Description=Openbox Window Manager
After=xvfb.service
[Service]
User=selenium
Environment=DISPLAY=:90
ExecStart=/usr/bin/openbox-session
ExecStop=killall openbox
[Install]
WantedBy=multi-user.target
ENDOFPASTA'
sudo systemctl enable openbox.service
sudo systemctl start openbox
# x11vnc
sudo sh -c 'cat > /etc/systemd/system/x11vnc.service << ENDOFPASTA
[Unit]
Description=x11vnc VNC Server
After=xvfb.service
[Service]
User=selenium
ExecStart=/usr/bin/x11vnc -ncache_cr -forever -display :90 -passwd cpanel1
ExecStop=killall x11vnc
[Install]
WantedBy=multi-user.target
ENDOFPASTA'
sudo systemctl enable x11vnc.service
sudo systemctl start x11vnc
# Selenium
sudo mkdir -p /var/log/selenium /var/lib/selenium
sudo chmod 777 /var/log/selenium
sudo wget http://selenium-release.storage.googleapis.com/2.45/selenium-server-standalone-2.45.0.jar -P /var/lib/selenium/
sudo ln -s /var/lib/selenium/selenium-server-standalone-2.45.0.jar /var/lib/selenium/selenium-server.jar
sudo sh -c 'cat > /etc/systemd/system/selenium.service << ENDOFPASTA
[Unit]
Description=Selenium Standalone Server
After=xvfb.service
[Service]
Environment=DISPLAY=:90
Environment=DBUS_SESSION_BUS_ADDRESS=/dev/null
ExecStart=/sbin/start-stop-daemon -c selenium --start --background --pidfile /var/run/selenium.pid --make-pidfile --exec /usr/bin/java -- -jar /var/lib/selenium/selenium-server.jar -Dwebdriver.chrome.driver=/usr/local/share/chromedriver -Djava.security.egd=file:/dev/./urandom -log /var/log/selenium/selenium.log -port 4444
Type=forking
PIDFile=/var/run/selenium.pid
[Install]
WantedBy=default.target
ENDOFPASTA'
sudo systemctl enable selenium.service
sudo systemctl start selenium
# Corn jobs
sudo sh -c 'crontab - << ENDOFPASTA
5 * * * * killall -o 2h firefox
15 * * * * killall -o 2h chromium-browser
*/5 * * * * service xvfb status >/dev/null || service xvfb start >/dev/null
*/5 * * * * service x11vnc status >/dev/null || service x11vnc start >/dev/null
*/5 * * * * service selenium status >/dev/null || service selenium start >/dev/null
ENDOFPASTA'
| true
|
72a83f2dbd0dd56bda09a1f5593c756a37a999e9
|
Shell
|
holta/mksdcard
|
/trees/1.0/usr/bin/xs-desktop
|
UTF-8
| 259
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
# part of XSCE
# script to turn off the browser at boot
if [ `id -u` -ne 0 ]; then
echo "please become root to execute this command"
exit 1
fi
systemctl enable olpc-dm.service
systemctl start olpc-dm.service
rm -f /home/olpc/.browser-at-boot
| true
|
70c8bfe4b4924705ec3011c9186c6ad682d84770
|
Shell
|
ualex73/dsmr-reader-docker
|
/app/run.sh
|
UTF-8
| 4,591
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
set -eo pipefail
function LOG()
{
dt=$(date '+%Y-%m-%d %H:%M:%S')
echo "$dt $*"
}
function CheckDBConnection()
{
CMD=$(command -v pg_isready)
CMD="$CMD -h ${DJANGO_DATABASE_HOST} -p ${DJANGO_DATABASE_PORT} -U ${DJANGO_DATABASE_USER} -d ${DJANGO_DATABASE_NAME} -t 1"
LOG "INFO: Executing '${CMD}'"
timeout=60
while ! ${CMD} >/dev/null 2>&1
do
timeout=$(expr $timeout - 1)
if [[ $timeout -eq 0 ]]; then
LOG "ERROR: Could not connect to database server. Aborting..."
return 1
fi
echo -n "."
sleep 1
done
LOG "INFO: Connected to database successfully"
}
# Uppercase mode
DSMR_MODE=${DSMR_MODE^^}
if [ "${DSMR_MODE}" == "SERVER" ]; then
DSMR_MODE=SERVER
elif [ "${DSMR_MODE}" == "CLIENT" ] || [ "${DSMR_MODE}" == "DATALOGGER" ]; then
DSMR_MODE=DATALOGGER
elif [ "${DSMR_MODE}" == "SERVER-NO-DATALOGGER" ] || [ "${DSMR_MODE}" == "NO-DATALOGGER" ]; then
DSMR_MODE=SERVER-NO-DATALOGGER
else
LOG "ERROR: Invalid DSMR_MODE, only SERVER, DATALOGGER or SERVER-NO-DATALOGGER allowed"
sleep 60
exit 1
fi
# We only support:
# - SERVER
# - SERVER-NO-DATALOGGER
# - DATALOGGER
LOG ""
LOG "INFO: Start DSMR Reader - Mode=$DSMR_MODE"
# Set right serial permissions
if ls /dev/ttyUSB* 1>/dev/null 2>&1; then
chmod 666 /dev/ttyUSB*
fi
# Remove pids, they can cause issue during a restart
rm -f /var/tmp/*.pid /tmp/*.pid
# Check old environment values
if [ -n "${DB_PORT}" ]; then DJANGO_DATABASE_PORT="${DB_PORT}"; fi
if [ -n "${DB_HOST}" ]; then DJANGO_DATABASE_HOST="${DB_HOST}"; fi
if [ -n "${DB_USER}" ]; then DJANGO_DATABASE_USER="${DB_USER}"; fi
if [ -n "${DB_NAME}" ]; then DJANGO_DATABASE_NAME="${DB_NAME}"; fi
if [ -n "${DSMR_USER}" ]; then DSMRREADER_ADMIN_USER="${DSMR_USER}"; fi
if [ -n "${DSMR_PASSWORD}" ]; then DSMRREADER_ADMIN_PASSWORD="${DSMR_PASSWORD}"; fi
if [ -n "${DATALOGGER_INPUT_METHOD}" ]; then DSMRREADER_DATALOGGER_INPUT_METHOD="${DATALOGGER_INPUT_METHOD}"; fi
if [ -n "${DATALOGGER_SERIAL_PORT}" ]; then DSMRREADER_DATALOGGER_SERIAL_PORT="${DATALOGGER_SERIAL_PORT}"; fi
if [ -n "${DATALOGGER_SERIAL_BAUDRATE}" ]; then DSMRREADER_DATALOGGER_SERIAL_BAUDRATE="${DATALOGGER_SERIAL_BAUDRATE}"; fi
if [ -n "${DATALOGGER_NETWORK_HOST}" ]; then DSMRREADER_DATALOGGER_NETWORK_HOST="${DATALOGGER_NETWORK_HOST}"; fi
if [ -n "${DATALOGGER_NETWORK_PORT}" ]; then DSMRREADER_DATALOGGER_NETWORK_PORT="${DATALOGGER_NETWORK_PORT}"; fi
if [ "${DSMR_MODE}" == "SERVER" ] || [ "${DSMR_MODE}" == "SERVER-NO-DATALOGGER" ]; then
# DB needs to up-and-running
CheckDBConnection
# Run migrations
python3 manage.py migrate --noinput
python3 manage.py collectstatic --noinput
# Create an admin user
python3 manage.py dsmr_superuser
fi
if [ "${DSMR_MODE}" == "SERVER" ]; then
sed -i '/^\[program:dsmr_datalogger\]$/,/^\[/ s/^autostart=.*/autostart=true/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_backend\]$/,/^\[/ s/^autostart=.*/autostart=true/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_webinterface\]$/,/^\[/ s/^autostart=.*/autostart=true/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:nginx\]$/,/^\[/ s/^autostart=.*/autostart=true/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_client_datalogger\]$/,/^\[/ s/^autostart=.*/autostart=false/' /etc/supervisor.d/supervisord.ini
elif [ "${DSMR_MODE}" == "DATALOGGER" ]; then
sed -i '/^\[program:dsmr_datalogger\]$/,/^\[/ s/^autostart=.*/autostart=false/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_backend\]$/,/^\[/ s/^autostart=.*/autostart=false/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_webinterface\]$/,/^\[/ s/^autostart=.*/autostart=false/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:nginx\]$/,/^\[/ s/^autostart=.*/autostart=false/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_client_datalogger\]$/,/^\[/ s/^autostart=.*/autostart=true/' /etc/supervisor.d/supervisord.ini
else
sed -i '/^\[program:dsmr_datalogger\]$/,/^\[/ s/^autostart=.*/autostart=false/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_backend\]$/,/^\[/ s/^autostart=.*/autostart=true/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_webinterface\]$/,/^\[/ s/^autostart=.*/autostart=true/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:nginx\]$/,/^\[/ s/^autostart=.*/autostart=true/' /etc/supervisor.d/supervisord.ini
sed -i '/^\[program:dsmr_client_datalogger\]$/,/^\[/ s/^autostart=.*/autostart=false/' /etc/supervisor.d/supervisord.ini
fi
# Run supervisor
/usr/bin/supervisord -n
# End
| true
|
73285593c5c10349e9c6e9de275b333895f05b94
|
Shell
|
shenoyvvarun/twit-miner
|
/get_text.sh
|
UTF-8
| 596
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
for id in `cat $1 | cut -d' ' -f2 | uniq`
do
max_id=`grep $id $1 | cut -d' ' -f1 | head -1`
temp=`grep $id $1 | cut -d' ' -f1 | tail -1`
since_id=`expr $temp - 1`
curl --retry 5 --max-time 500 "https://api.twitter.com/1/statuses/user_timeline.json?include_entities=true&include_rts=true&exclude_replies=false&user_id="$id"&max_id="$max_id"&since_id="$since_id"&count=200" > output
python parse4.py
done > tmp
sed ':a;N;$!ba;s/\nu/ /g' tmp > $1.data
cat $1 | sort > file1.txt
cat $1.data | sort > file2.txt
join file1.txt file2.txt > $1.txt
rm tmp $1.data file1.txt file2.txt
| true
|
d90d59a49042f5b8bb2e2d933c1ecfb9fc829b49
|
Shell
|
alimpfard/avconv-buildpack
|
/bin/compile
|
UTF-8
| 2,077
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
indent() {
sed -u 's/^/ /'
}
echo '-----> Install Libav'
BUILD_DIR=$1
CACHE_DIR=$2
VENDOR_DIR="${BUILD_DIR}/vendor"
INSTALL_DIR="${VENDOR_DIR}/libav"
CACHE_FILE="${CACHE_DIR}/libav.tar.gz"
LIBAV_VERSION="${LIBAV_VERSION:-12.3}"
LIBAV_VERSION_MAJOR=$(printf "%s" "$LIBAV_VERSION" | sed 's/\..*$//')
CONFIG_DIR="/etc/libav-$LIBAV_VERSION_MAJOR"
if [ ! -f "$CACHE_FILE" ]; then
# install libav
LIBAV_FILE="libav-${LIBAV_VERSION}.tar.xz"
LIBAV_DIR="libav-$LIBAV_VERSION"
LIBAV_URL="https://libav.org/releases/${LIBAV_FILE}"
echo "-----> Downloading libav $LIBAV_VERSION_MAJOR from $LIBAV_URL"
wget "$LIBAV_URL" -P "$BUILD_DIR" | indent
echo "-----> Extracting libav package"
if [ ! -f "${BUILD_DIR}/${LIBAV_FILE}" ]; then
echo "Error: package download failed? (expected ${BUILD_DIR}/${LIBAV_FILE})" | indent
ls $BUILD_DIR | indent
exit 1
fi
tar xvf "${BUILD_DIR}/${LIBAV_FILE}" | indent
echo "-----> Building libav"
cd "$LIBAV_DIR"
export CFLAGS="-I${INSTALL_DIR}/include"
export LDFLAGS="-L${INSTALL_DIR}/lib"
./configure --prefix="$INSTALL_DIR"
make && make install
cd ..
rm -rf "$LIBAV_DIR"
echo "-----> Caching built stuff"
cd "$VENDOR_DIR"
REL_INSTALL_DIR="libav"
tar czf "${REL_INSTALL_DIR}.tar" "${REL_INSTALL_DIR}"
if [ ! -d "$CACHE_DIR" ]; then
mkdir -p "$CACHE_DIR"
fi
mv "${REL_INSTALL_DIR}.tar.gz" "$CACHE_FILE"
else
echo "-----> Extracting cached libav $CACHE_FILE -> $VENDOR_DIR"
mkdir -p "$VENDOR_DIR"
tar xzf "$CACHE_DIR" -C "$VENDOR_DIR"
fi
cat "${CONFIG_DIR}/policy.xml" > "${INSTALL_DIR}/${CONFIG_DIR}/policy.xml"
echo "-----> Updating environment variables"
PROFILE_PATH="${BUILD_PATH}/.profile.d/libav.sh"
ACTUAL_INSTALL_PATH="${HOME}/vendor/libav"
mkdir -p "$(dirname "$PROFILE_PATH")"
cat > "$PROFILE_PATH" <<EOF
export PATH=$ACTUAL_INSTALL_PATH/bin:\$PATH
export LD_LIBRARY_PATH=$ACTUAL_INSTALL_PATH/lib:\$LD_LIBRARY_PATH:/usr/local/lib
EOF
echo "-----> All done\~"
| true
|
28d5869b7f4d7cad38aa1fc7f7669cd1d889b1a3
|
Shell
|
sandorkruk/gzh_red_disks
|
/new_ferengi/run_ferengi/run_ferengi.sh
|
UTF-8
| 432
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
for i in {0..1095}; do
COMMAND="/local/site/pkg/itt/idl/idl81/bin/idl"
ARGUMENTS="-e \"ferengify_1,$i\""
SUBMITCOMMAND="condor_submit -a \"executable=${COMMAND}\" -a \"arguments=${ARGUMENTS}\" -a \"output=log/ferengi$i.out\" -a \"error=log/ferengi$i.error\" -a \"log=log/ferengi$i.log\" -a \"getenv=true\" ferengi.submit"
eval "${SUBMITCOMMAND}"
#echo "module load idl/8.1 && ${SUBMITCOMMAND}"
done
| true
|
b1ec28f5c9623c2162045d7c51d866c18280f8da
|
Shell
|
CobbledSteel/ee290-robotics-firemarshal-workloads
|
/run_firesim.sh
|
UTF-8
| 1,438
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
DEPLOY_DIR="/home/centos/chipyard/sims/firesim/deploy/workloads/fed-rbdl-benchmark"
RDIR=$(pwd)
cp firesim-fed-rbdl.json $DEPLOY_DIR/../
if [ ! -d "$DEPLOY_DIR" ]; then
# Take action if $DIR doesnt exist. #
echo "Installing symbolic links in ${DEPLOY_DIR}..."
mkdir $DEPLOY_DIR
cd $DEPLOY_DIR
ln -s ~/chipyard/sims/firesim/sw/firesim-software/images/fed-rbdl-benchmark.img fed-rbdl-benchmark.img
ln -s ~/chipyard/sims/firesim/sw/firesim-software/images/fed-rbdl-benchmark-bin fed-rbdl-benchmark-bin
ln -s ~/chipyard/sims/firesim/sw/firesim-software/images/fed-rbdl-benchmark-bin-dwarf fed-rbdl-benchmark-bin-dwarf
ln -s ~/chipyard/sims/firesim/sw/firesim-software/images/fed-rbdl-benchmark-rbdl.img fed-rbdl-benchmark-rbdl.img
ln -s ~/chipyard/sims/firesim/sw/firesim-software/images/fed-rbdl-benchmark-rbdl-bin fed-rbdl-benchmark-rbdl-bin
ln -s ~/chipyard/sims/firesim/sw/firesim-software/images/fed-rbdl-benchmark-rbdl-bin-dwarf fed-rbdl-benchmark-rbdl-bin-dwarf
fi
cd $RDIR
FSIM_CFG_DIR=$RDIR/firesim-configs
echo $FSIM_CFG_DIR
firesim launchrunfarm --runtimeconfigfile $FSIM_CFG_DIR/config_runtime_rbdl.ini --hwdbconfigfile $FSIM_CFG_DIR/config_hwdb.ini
firesim infrasetup --runtimeconfigfile $FSIM_CFG_DIR/config_runtime_rbdl.ini --hwdbconfigfile $FSIM_CFG_DIR/config_hwdb.ini
firesim runworkload --runtimeconfigfile $FSIM_CFG_DIR/config_runtime_rbdl.ini --hwdbconfigfile $FSIM_CFG_DIR/config_hwdb.ini
| true
|
7931b0c3db7cdf0920f5b3fdce2496334661a0f8
|
Shell
|
chardy/deployscripts
|
/lib/nosql.sh
|
UTF-8
| 859
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
function install_mongodb
{
log "install_mongodb: Installing mongoDB..."
# make sure no mongodb clients is installed
# sudo apt-get remove mongodb-clients
# install mongodb from 10gen directly
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv EA312927
echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.2.list
apt-get -o Acquire::ForceIPv4=true update
apt-get -y install mongodb-org
cat > /etc/systemd/system/mongodb.service << EOF
[Unit]
Description=High-performance, schema-free document-oriented database
After=network.target
[Service]
User=mongodb
ExecStart=/usr/bin/mongod --quiet --config /etc/mongod.conf
[Install]
WantedBy=multi-user.target
EOF
systemctl enable mongod
systemctl daemon-reload
systemctl start mongodb
}
| true
|
fbca23f140ff576960a37df40fcbaa61b5a27642
|
Shell
|
rockymontana/dotfiles-1
|
/zsh/exports.zsh
|
UTF-8
| 538
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
# ============ Exports =================
# Default editor is vim
export EDITOR='vim'
# Stock Arch Linux
export ARCHFLAGS="-arch x86_64"
# If Perl is found, add to PATH
test -d "/usr/bin/vendor_perl" && PATH="$PATH:/usr/bin/vendor_perl"
# If we have a local bin, add it
test -d "$HOME/bin" && PATH="$PATH:$HOME/bin"
# If we have a local ruby bin, add it
test -d "$HOME/.gem/ruby/1.9.1/bin/" && PATH="$PATH:$HOME/.gem/ruby/1.9.1/bin/"
test -d "$HOME/.gem/ruby/2.2.0/bin/" && PATH="$PATH:$HOME/.gem/ruby/2.2.0/bin/"
| true
|
c7fd031f76b6731ac87aa34bec7c4418be635c9c
|
Shell
|
explorerwjy/CUMC
|
/Exome-pipeline-Jiayao/ExmAdHoc.8.Alignment_Finisher.sh
|
UTF-8
| 6,365
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
#$ -cwd -l mem=16G,time=6:: -N FinishBWA
# This script can be used to run the final steps of the bam to bam mapping scripts (ExmAln.1b) when they have completed the bwa-mem mapping but run out of time in the latter steps. It should be run within the alignment working directory, with the same arguments as the original mapping script and will select the appropriate steps to run based on the existing files in the directory. It will automatically trigger the pipeline if requestd.
# Usage notes:
# Please see ExmAln.1b.ReAlign_Bam_with_BWAmem.sh for full details
# The script should be run from within the mapping directory
# InpFil - (required) - Path to the aligned Bam file to be aligned
# RefFil - (required) - shell file containing variables with locations of reference files and resource directories; see list below for required variables
# LogFil - (optional) - File for logging progress
# TgtBed - (optional) - Exome capture kit targets bed file (must end .bed for GATK compatability) ; may be specified using a code corresponding to a variable in the RefFil giving the path to the target file- only required if calling pipeline
# PipeLine - P -(flag) - will start the GATK realign and recalibrate pipeline using the files generated by this script
# Flag - F - Fix mis-encoded base quality scores - see GATK manual. GATK will subtract 31 from all quality scores; used to fix encoding in some datasets (especially older Illumina ones) which starts at Q64 (see https://en.wikipedia.org/wiki/FASTQ_format#Encoding); This flag is only possibly necessary if calling the pipeline.
# Help - H - (flag) - get usage information
#list of variables required in reference file:
# $REF - reference genome in fasta format - must have been indexed using 'bwa index ref.fa'
# $EXOMPPLN - directory containing exome analysis pipeline scripts,
#list of required tools:
# samtools <http://samtools.sourceforge.net/> <http://sourceforge.net/projects/samtools/files/>
# bwa mem <http://bio-bwa.sourceforge.net/> <http://sourceforge.net/projects/bio-bwa/files/>
# java <http://www.oracle.com/technetwork/java/javase/overview/index.html>
# picard <http://picard.sourceforge.net/> <http://sourceforge.net/projects/picard/files/>
## This file also requires exome.lib.sh - which contains various functions used throughout the Exome analysis scripts; this file should be in the same directory as this script
## Note that htscmd bam2fq will generate a warning:
## [W::bam_hdr_read] EOF marker is absent. The input is probably truncated.
## This is not a problem, it is just a bug related to piping the stdin as the input, it can be ignored
###############################################################
#set default arguments
usage="
ExmAdHoc.9.Alignment_Finisher.sh -i <InputFile> -r <reference_file> -t <target intervals file> -l <logfile> -PH
-i (optional) - Aligned bam file
-r (optional) - shell file containing variables with locations of reference files and resource directories
-l (optional) - Log file
-t (optional) - Exome capture kit targets or other genomic intervals bed file (must end .bed for GATK compatability)
-P (flag) - Initiate exome analysis pipeline after completion of script
-F (flag) - Fix mis-encoded base quality scores - see GATK manual
-H (flag) - echo this message and exit
"
PipeLine="false"
FixMisencoded="false"
#get arguments
while getopts i:r:l:t:PFH opt; do
case "$opt" in
i) InpFil="$OPTARG";;
r) RefFil="$OPTARG";;
l) LogFil="$OPTARG";;
t) TgtBed="$OPTARG";;
P) PipeLine="true";;
F) FixMisencoded="true";;
H) echo "$usage"; exit;;
esac
done
#check all required paramaters present
if [[ -z "$InpFil" ]]; then InpFil=$(ls | grep -m 1 bam$); fi
if [[ -z "$RefFil" ]]; then RefFil=$(grep -m 1 "Pipeline Reference File" *log | cut -d" " -f 5); fi
if [[ -z "$TgtBed" ]]; then TgtBed=$(grep -m 1 "Target Intervals File" *log | cut -d" " -f 5); fi
if [[ ! -e "$InpFil" ]] || [[ ! -e "$RefFil" ]]; then
echo "Missing/Incorrect required arguments"
echo "$usage"
echo "Provided arguments:"
echo " InpFil: "$InpFil" RefFil: "$RefFil" TgtBed: "$TgtBed
exit
fi
#Call the RefFil to load variables
RefFil=`readlink -f $RefFil`
source $RefFil
#Load script library
source $EXOMPPLN/exome.lib.sh #library functions begin "func"
#set local variables
BamNam=`echo $InpFil | sed s/.bwamem.*//`
echo " InpFil: "$BamNam" RefFil: "$RefFil" TgtBed: "$TgtBed
if [[ -z "$LogFil" ]]; then LogFil=$BamNam.BbB.log; fi # a name for the log file
AlnFil=$BamNam.bwamem.bam #filename for bwa-mem aligned file
SrtFil=$BamNam.bwamem.sorted.bam #output file for sorted bam
DdpFil=$BamNam.bwamem.mkdup.bam #output file with PCR duplicates marked
FlgStat=$BamNam.bwamem.flagstat #output file for bam flag stats
IdxStat=$BamNam.idxstats #output file for bam index stats
TmpLog=$BamNam.BwB.temp.log #temporary log file
TmpDir=$BamNam.BwB.tempdir; mkdir -p $TmpDir #temporary directory
#Sort the bam file by coordinate
if [[ -e $AlnFil ]]; then
echo "Start Sort"
StepName="Sort Bam using PICARD"
StepCmd="java -Xmx4G -Djava.io.tmpdir=$TmpDir -jar $PICARD/SortSam.jar
INPUT=$AlnFil
OUTPUT=$SrtFil
SORT_ORDER=coordinate
CREATE_INDEX=TRUE"
echo $StepCmd
funcRunStep
rm $AlnFil #removed the "Aligned bam"
fi
#Mark the duplicates
if [[ -e $SrtFil ]]; then
StepName="Mark PCR Duplicates using PICARD"
StepCmd="java -Xmx4G -Djava.io.tmpdir=$TmpDir -jar $PICARD/MarkDuplicates.jar
INPUT=$SrtFil
OUTPUT=$DdpFil
METRICS_FILE=$DdpFil.dup.metrics.txt
CREATE_INDEX=TRUE"
funcRunStep
rm $SrtFil ${SrtFil/bam/bai} #removed the "Sorted bam"
fi
#Get flagstat
StepName="Output flag stats using Samtools"
StepCmd="samtools flagstat $DdpFil > $FlgStat"
funcRunStep
#get index stats
StepName="Output idx stats using Samtools"
StepCmd="samtools idxstats $DdpFil > $IdxStat"
funcRunStep
#Call next steps of pipeline if requested
NextJob="Run Genotype VCF"
QsubCmd="qsub -o stdostde/ -e stdostde/ $EXOMPPLN/ExmAln.2.HaplotypeCaller_GVCFmode.sh -i $DdpFil -r $RefFil -t $TgtBed -l $LogFil -P -B"
funcPipeLine
NextJob="Get basic bam metrics"
QsubCmd="qsub -o stdostde/ -e stdostde/ $EXOMPPLN/ExmAln.3a.Bam_metrics.sh -i $DdpFil -r $RefFil -l $LogFil"
funcPipeLine
#End Log
funcWriteEndLog
| true
|
5f3114c6b45742e966dbe6a5082f545911fe8281
|
Shell
|
holygeek/vimrc
|
/bin/gh
|
UTF-8
| 1,478
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
# gh
# Created: Wed Sep 16 13:50:08 MYT 2015
me=$(basename $0)
usage() {
echo "SYNOPSIS
$me [-h] [-o] <sha1>
DESCRIPTION
Show or open the github commit link for <sha1>
OPTIONS
-c
Copy the url to clipboards
-h
Show this help message
-o
Open the link in a browser"
}
open_in_browser=
copy=
while getopts cfho opt
do
case "$opt" in
# $OPTARG with o:
c)
copy=t
;;
h)
usage
exit
;;
o)
open_in_browser=t
;;
\?)
echo "$me: Unknown option '$opt'"
exit 1
;;
esac
done
shift $(($OPTIND -1))
case "$1" in
*:*)
file_line=$1
item=blob/$(git symbolic-ref --short HEAD)
root=$(git rev-parse --show-toplevel)
wd=$(pwd)
if [ "$wd" != "$root" ]; then
path=${wd#$root/}
file=${file_line%%:*}
file=$path/$file
line=${file_line#*:}
file_line=$file:$line
fi
#echo "file $file_line"
#exit 1
commit=$(echo $file_line|sed 's/:/#L/')
;;
*)
item=commit
commit=${1:-$(git rev-parse HEAD)}
;;
esac
commit_url=$(git config remote.origin.url|
sed \
-e 's,git@github.com\(_[^:]\+\)\?:,https://github.com/,' \
-e 's,\(\.git\)\?$,/'$item'/,')
#-e 's,git@github.com\(-[^:]\+\)\?:,http://github.com/,' \
url="$commit_url$commit"
if [ -n "$copy" ]; then
printf "$url" | xclip
# printf "$url" | xclip -selection secondary not used anymore
#printf "$url" | xclip -selection clipboard
fi
if [ -n "$open_in_browser" ]; then
${BROWSER:-google-chrome} "$url"
else
echo "$url"
fi
| true
|
ffaea7f1755602f4e87569d77665d38de64ac93c
|
Shell
|
cc81111/deploy-k8s
|
/cfg/scp_cert.sh
|
UTF-8
| 180
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
cat /opt/k8s-deploy/allnode-pubip|while read line
do
IP=($line)
scp /opt/cert/etcd/* root@$IP:/opt/etcd/ssl/;
scp /opt/cert/k8s/* root@$IP:/opt/kubernetes/ssl/;
done
| true
|
6d978c3614ef8f42f0f4cd35cbdbe665e95e2642
|
Shell
|
vduggen/shell-script-course
|
/mod3/chaves_flag.sh
|
UTF-8
| 3,017
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# listaUsuarios.sh - extrai usuários do /etc/passwd
#
# Site: https://4fasters.com.br
# Autor: Vitor Luiz Duggen
# Manutenção: Vitor Luiz Duggen
#
# ------------------------------------------------------------------------ #
# Irá extrair usuários do /etc/passwd, havendo a possibilidade de colocar
# em maiúsculo e em ordem alfabética.
#
# Exemplos:
# $ ./listarUsuarios.sh -s -m
# Neste exemplo irá ficar em maiúsculo e em ordem alfabética
# ------------------------------------------------------------------------ #
# Histórico:
#
# v1.0 03/07/2020, Vitor:
# - Adicionado as flags -h, -v, -s, -l
#
# v1.1 03/07/2020, Vitor:
# - Trocamos o IF pelo CASE
# - Adicionamos o basename
#
# v1.2 03/07/2020, Vitor:
# - Adicionado -c
# - Adicionado 2 flags
#
# v1.3 03/07/2020, Vitor:
# - Adicionado while com shift e teste de variável
# - Adicionado 2 flags
# ------------------------------------------------------------------------ #
# Testado em:
# bash 5.0.17
# ------------------------------------------------------------------------ #
# ------------------------------- VARIÁVEIS -------------------------------#
COMMAND_LIST_USERS="$(cat /etc/passwd | cut -d : -f 1)"
MESSAGE_USE="
List Users - [OPTIONS]
-h - Help Menu
-help - Help Menu
-v - Version
-version - Version
-r - List of random users
-random - List of random users
-s - List of ordered users
-sort - List of ordered users
-c - Capital User List
-capital - Capital User List
"
VERSION="v1.3"
KEY_LIST_ORDED=0
KEY_LIST_CAPITAL=0
# ------------------------------------------------------------------------ #
# ------------------------------- TESTES --------------------------------- #
# ------------------------------------------------------------------------ #
# ------------------------------- FUNÇÕES -------------------------------- #
# ------------------------------------------------------------------------ #
# ------------------------------- EXECUÇÃO ------------------------------- #
while test -n "$1";do
case "$1" in
-h) echo "$MESSAGE_USE" && exit 0;;
-help) echo "$MESSAGE_USE" && exit 0;;
-v) echo "$VERSION" && exit 0;;
-version) echo "$VERSION" && exit 0;;
-r) echo "$COMMAND_LIST_USERS" && exit 0;;
-random) echo "$COMMAND_LIST_USERS" && exit 0;;
-s) KEY_LIST_ORDED=1;;
-sort) KEY_LIST_ORDED=1;;
-c) KEY_LIST_CAPITAL=1;;
-capital) KEY_LIST_CAPITAL=1;;
*) echo "Command not found $MESSAGE_USE" && exit 1;;
esac
shift
done
[ $KEY_LIST_ORDED -eq 1 ] && COMMAND_LIST_USERS=$(echo "$COMMAND_LIST_USERS" | sort)
[ $KEY_LIST_CAPITAL -eq 1 ] && COMMAND_LIST_USERS=$(echo "$COMMAND_LIST_USERS" | tr [a-z] [A-Z])
[[ $KEY_LIST_CAPITAL -eq 1 || $KEY_LIST_ORDED -eq 1 ]] && echo "$COMMAND_LIST_USERS"
echo "$MESSAGE_USE"
# ------------------------------------------------------------------------ #
| true
|
09d9ed6e3343001b8690da50d4e3739729a9d10b
|
Shell
|
RubyLin065/bash_scripts
|
/read_line.sh
|
UTF-8
| 649
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# this file will output series of command to
# use python to control the news web scraping
# wish list will perform like following
# `cat Data/wishlist.txt` >> 台郡 聯發科 元大 國泰金 鴻海
#
declare -c python_script;
python_script = "scrp_news_copy.py";
./argsum.sh `cat Data/wishlist.txt` > wishcount_tmp.tmp
echo "sleep 1; " > cmdline.sh
while read -r line;
do
if (( `echo ${#line}` >= 1 ))
then
echo "python scrp_news_copy.py $line &" >> cmdline.sh
fi
done < Data/wishlist.txt
echo "sleep 60" >> cmdline.sh
#exe
./cmdline.sh
sleep 10;
rm cmdline.sh
# for i in $@
# echo "python scrp_new_copy.py $i "
# done
| true
|
0dba7603a1b13007622dea22018209b9a84dbe34
|
Shell
|
benauthor/dotfiles
|
/bashrc
|
UTF-8
| 4,581
| 3.09375
| 3
|
[] |
no_license
|
# ~/.bashrc: executed by bash(1) for non-login shells.
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# shush mac zsh warning
export BASH_SILENCE_DEPRECATION_WARNING=1
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=5000
HISTFILESIZE=10000
# globstar, requires bash 4
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
# shopt -s globstar
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# git completion
source /Library/Developer/CommandLineTools/usr/share/git-core/git-completion.bash
# pretty prompt
source /Library/Developer/CommandLineTools/usr/share/git-core/git-prompt.sh
export GIT_PS1_SHOWDIRTYSTATE=1
__prompt_command() {
local EXIT="$?" # This needs to be first
PS1=""
local RCol='\[\e[0m\]'
local Red='\[\e[0;31m\]'
local Blu='\[\e[0;34m\]'
local LBlu='\[\e[0;94m\]'
local LCy='\[\e[0;96m\]'
PS1+="${LCy}\u@\h:${LBlu}\W${Blu}$(__git_ps1)"
if [ $EXIT != 0 ]; then
PS1+="${Red}$ ${RCol}" # Add red if exit code non 0
else
PS1+="${LCy}$ ${RCol}"
fi
}
PROMPT_COMMAND=__prompt_command # Func to gen PS1 after CMDs
#export JAVA_8_HOME=$(/usr/libexec/java_home -v1.8)
#export JAVA_9_HOME=$(/usr/libexec/java_home -v1.9 2> /dev/null)
#alias java8='export JAVA_HOME=$JAVA_8_HOME'
#alias java9='export JAVA_HOME=$JAVA_9_HOME'
#default java8
#export JAVA_HOME=$JAVA_9_HOME
# our version of gpg doesn't start a new agent every
# time, so this easy way is safe
eval $( gpg-agent --daemon 2>/dev/null )
# added by travis gem
[ -f /Users/bender/.travis/travis.sh ] && source /Users/bender/.travis/travis.sh
# misc local utils
export PATH=~/local/bin:$PATH
# go
export GOPATH=~/go
export PATH=~/go/bin:$PATH
export CGO_CXXFLAGS_ALLOW='-lpthread'
source ~/.gimme/envs/go1.18.5.env
# sweet inline plotting
export ITERMPLOT=rv
export MPLBACKEND="module://itermplot"
# include .bashrc_hidden if it exists
# if i.e. I don't want to version control some secrets
if [ -f ~/.bashrc_hidden ]; then
. ~/.bashrc_hidden
fi
# allow ctl-s for forward history
# because I don't use flow control
stty -ixon
# running out of file handles???
ulimit -S -n $((10240 * 2))
export SSH_ENV="${HOME}/.ssh/environment"
start_ssh_agent() {
echo "Initialising new SSH agent..."
ssh-agent -s | sed 's/^echo/#echo/' > "${SSH_ENV}"
echo succeeded
chmod 600 "${SSH_ENV}"
. "${SSH_ENV}" > /dev/null
ssh-add -k;
}
# Source SSH settings, if applicable
load_ssh_session() {
if [ -f "${SSH_ENV}" ]; then
. "${SSH_ENV}" > /dev/null
ps aux ${SSH_AGENT_PID} | grep 'ssh-agent -s$' > /dev/null || {
start_ssh_agent;
}
else
start_ssh_agent;
fi
}
# load_ssh_session
# added by ghcup
source /Users/evan.bender/.ghcup/env
# DIY docker-for-mac VM
# export DOCKER_HOST=ssh://vagrant@dockerbox:2222 # /var/run/docker.sock
# ssh-add -k /Users/evan.bender/dd/busly/.vagrant/machines/default/virtualbox/private_key
# export DOCKER_BUILDKIT=1
| true
|
801c226bd240835e16d798ba8d8fcc4d3a6e361f
|
Shell
|
Techo-Bingo/SystemController
|
/Interface/Scripts/collect_binlog.sh
|
UTF-8
| 1,885
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source ${g_home_dir}/common_function.sh
[ "$(whoami)" = 'root' ] || report_err 10 "请使用 root 执行"
[ -f /opt/UBP/svc_profile.sh ] || report_err 15 "环境不支持"
source /opt/UBP/svc_profile.sh
source /etc/profile
DAYS_INDEX=$1
if [ "${DAYS_INDEX}" = "一天" ]
then
DAYS=1
elif [ "${DAYS_INDEX}" = "三天" ]
then
DAYS=3
elif [ "${DAYS_INDEX}" = "一周" ]
then
DAYS=7
fi
LIMIT_SIZE=2147483648
START_DATE="$(date -d "-${DAYS} DAY" '+%Y-%m-%d') 00:00:00"
END_DATE="$(date +'%Y-%m-%d') 23:59:59"
function get_binlog()
{
[ -d "${EAPP_MYSQL_DATADIR}" ] || report_err "25" "EAPP_MYSQL_DATADIR error"
cd ${g_task_dir}
local binlog_list='binlog_list.ini'
/opt/UBP/bin/exec_sql<<EOF >${binlog_list} 2>/dev/null
show binary logs\G
EOF
if [ $? -ne 0 ]
then
report_err '35' 'Exec sql failed'
fi
if [ ! -s "${binlog_list}" ]
then
report_err '40' 'No binlog found'
fi
local binlog_store='binlog_events.log'
for log in $(grep 'Log_name:' ${binlog_list} |awk '{print $2}')
do
echo "=========== mysql-bin: $log =============" >>${binlog_store}
mysqlbinlog --no-defaults -vv --base64-output=DECODE-ROWS --start-datetime="${START_DATE}" --stop-datetime="${END_DATE}" ${EAPP_MYSQL_DATADIR}/${log} >>${binlog_store}
done
if [ $? -ne 0 ]
then
report_err '70' 'mysqlbinlog exec failed'
fi
report_info '70' 'mysqlbinlog to file finish'
if [ $(ls -l ${binlog_store}|awk '{print $5}') -gt ${LIMIT_SIZE} ]
then
rm -rf ${binlog_store}
report_err '75' "Binlog file bigger than ${LIMIT_SIZE}"
fi
}
function compress_pack()
{
cd ${g_task_dir}
local pack_name="mysql_binlog"
report_info '92' "Compress ${pack_name} start..."
local pack_name=$(compress ${pack_name})
[ $? -ne 0 ] && report_err '95' "Compress ${pack_name} failed"
report_info '100' "${g_task_dir}/${pack_name}"
}
function main()
{
get_binlog
compress_pack
}
main
exit 0
| true
|
1e1c4e2a7d9608300656261cd86b3f12d9d0d2ae
|
Shell
|
double128/lab-login-shell-OLD
|
/old/eofkiller.sh
|
UTF-8
| 330
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# EOF KILLER
# Stops silly naughty boys from invoking CTRL+D. This needs to be done,
# as CTRL+D isn't a signal (like SIGINT) and rather an actual piece of
# data sent to the terminal to invoke an EOF.
for pid in $( pgrep -U $(whoami) ); do
logger "CTRL+D invoked, killing all login shell PIDs"
kill -9 $pid
done
| true
|
5a5ecdb68b88fc583cdba0c62ae41f232fc4928e
|
Shell
|
jonsag/jsRename
|
/jsrename.sh
|
UTF-8
| 449
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
# script to rename all spaces, commas and hashtags in files and directories recurcively
REPLACEMENT_CHAR=$1
if [ -z "$REPLACEMENT_CHAR" ]; then
REPLACEMENT_CHAR="_"
fi
echo "Will replace with the character $REPLACEMENT_CHAR"
echo
SED_STRING='s/[ #,]/'$REPLACEMENT_CHAR'/g'
find . | while read NAME; do
C=`basename "$NAME"`
B=`echo ${NAME%$C} | sed "$SED_STRING"`
rename -v "$SED_STRING" "$B`basename "$NAME"`"
done
| true
|
d652615f9be6ae8cbd7e67cbcd4e1b6f40773c35
|
Shell
|
rooty0/backup-utils
|
/mysql.sh
|
UTF-8
| 1,658
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Backup of MySQL v.1.2
# (c) Stanislav Rudenko, me [at] rooty [d0t] name
#
BASEDIR=$(dirname $0)
. "${BASEDIR}/.config"
NOW=$(/bin/date +"%Y.%m.%d.%H.%M")
DBS="$($PATH_BIN_MYSQL --defaults-file=${MYSQL_DEF_FILE} -Bse 'show databases')"
if [ -z "${DBS}" ]
then
echo "!!!DATABASE LIST IS EMPTY!!!" 1>&2
exit 1
fi
[ ! -d "${PATH_MYSQL_BACKUP}/${NOW}" ] && mkdir -p "${PATH_MYSQL_BACKUP}/${NOW}"
echo '>>> Creating Backup of databases'
for DB in ${DBS}
do
for DB_EXCEPTION in ${BACKUP_DB_EXCEPTIONS}
do
[ "${DB}" = "${DB_EXCEPTION}" ] && continue 2
done
echo "] Processing database \"${DB}\""
${PATH_BIN_MYSQLDUMP} --defaults-file=${MYSQL_DEF_FILE} -f ${DB} > ${PATH_MYSQL_BACKUP}/${NOW}/mydb_backup_${DB}_database.sql
[ $? -gt 0 ] && echo "Dump fail for \"${DB}\"" 1>&2
done
echo '>>> Cleaning old Backups'
#/usr/bin/find ${PATH_MYSQL_BACKUP}/* -mtime +${OLD_BACKUPS}w -type d -exec /bin/rm -rvf {} \;
/usr/bin/find ${PATH_MYSQL_BACKUP}/* -maxdepth 0 -mtime +${OLD_BACKUPS}w -type d | /usr/bin/xargs /bin/rm -rvf
if [ ${COMPRESS_FILES} = "yes" ]
then
echo ">>> Creating an archive using ${PATH_BIN_ARCHIVE}"
for DB_FILE in $(ls -1 "${PATH_MYSQL_BACKUP}/${NOW}" | xargs)
do
/usr/bin/nice -n 15 ${PATH_BIN_ARCHIVE} ${ARCHIVE_OPTIONS} "${PATH_MYSQL_BACKUP}/${NOW}/${DB_FILE}.7z" "${PATH_MYSQL_BACKUP}/${NOW}/${DB_FILE}" >/dev/null
if [ $? -eq 0 ]
then
rm -rf ${PATH_MYSQL_BACKUP}/${NOW}/${DB_FILE}
else
echo "!!!!! SOMETHING WRONG HAPPENS DUE COMPRESS FILE ${DB_FILE} !!!!!" 1>&2
fi
done
fi
echo ">>> Operation took time from ${NOW} to `/bin/date +"%d.%m.%Y.%H.%M"`"
exit 0
| true
|
741012344af5a784d6093ba60b2aafd53b188567
|
Shell
|
fooofei/rustscan-build
|
/build.sh
|
UTF-8
| 531
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
cur=$(dirname "$(readlink -f $0)")
set -x
ARCH=$(uname -m)
case $ARCH in
x86_64)
ARCH=amd64
;;
aarch64)
ARCH=arm64
;;
esac
build_from_source() {
home=$cur/build_rustscan
mkdir -p $home
cd $home
git clone https://github.com/RustScan/RustScan.git
cd RustScan
docker build . -t rustscan/rustscan:latest
}
if [ "$ARCH" == "amd64" ]; then
# use the latest image, we not build from source
docker pull rustscan/rustscan:latest
else
build_from_source
fi
| true
|
f371e6cc113bf6d0ea095ced79b24d99a45ad5d1
|
Shell
|
aasaidane/docker-powerdns
|
/src/init.sh
|
UTF-8
| 2,417
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Init script
#
###########################################################
# Thanks to http://stackoverflow.com/a/10467453
function sedeasy {
sed -i "s/$(echo $1 | sed -e 's/\([[\/.*]\|\]\)/\\&/g')/$(echo $2 | sed -e 's/[\/&]/\\&/g')/g" $3
}
if [ -z "$API_KEY" ]; then
# Generate a random API Key everytime so only this Docker knowns it, not everybody
API_KEY=`dbus-uuidgen`
fi
if [ -z "$SOA_NAME" ]; then
# use default
SOA_NAME="a.misconfigured.powerdns.server"
fi
# Path where DBs will be stored
POWERDNS_DB_PATH="$DATA_DIR/powerdns"
POWERDNSGUI_DB_PATH="$DATA_DIR/powerdnsgui"
# Create directory if they does not exist
mkdir -p $POWERDNS_DB_PATH
mkdir -p $POWERDNSGUI_DB_PATH
# Update PowerDNS Server config file
sedeasy "api-key=API_KEY" "api-key=$API_KEY" /etc/pdns/pdns.conf
sedeasy "default-soa-name=SOA_NAME" "default-soa-name=$SOA_NAME" /etc/pdns/pdns.conf
sedeasy "gsqlite3-database=DATABASE_PATH" "gsqlite3-database=$POWERDNS_DB_PATH/db" /etc/pdns/pdns.conf
# Add custom DNS entries
sedeasy "forward-zones-recurse=.=CUSTOM_DNS" "forward-zones-recurse=.=$CUSTOM_DNS" /etc/pdns/recursor.conf
# Update PowerDNS Admin GUI configuration file
sedeasy "PDNS_API_KEY = 'PDNS_API_KEY'" "PDNS_API_KEY = '$API_KEY'" /usr/share/webapps/powerdns-admin/config.py
sedeasy "SQLALCHEMY_DATABASE_URI = 'SQLALCHEMY_DATABASE_URI'" "SQLALCHEMY_DATABASE_URI = 'sqlite:///$POWERDNSGUI_DB_PATH/db'" /usr/share/webapps/powerdns-admin/config.py
# Create SQLite database for PowerDNS if it's doesn't exist
if ! [ -f "$POWERDNS_DB_PATH/db" ]; then
sqlite3 $POWERDNS_DB_PATH/db < /usr/share/doc/pdns/schema.sqlite3.sql
fi
# Create SQLite database for PowerDNS Admin if it's doesn't exist
if ! [ -f "$POWERDNSGUI_DB_PATH/db" ]; then
sqlite3 $POWERDNSGUI_DB_PATH/db ".databases"
/usr/share/webapps/powerdns-admin/create_db.py
fi
# Fix permissions
find $DATA_DIR -type d -exec chmod 775 {} \;
find $DATA_DIR -type f -exec chmod 664 {} \;
chown -R nobody:nobody $DATA_DIR
if [ $ENABLE_ADBLOCK = true ]; then
# Run at least the first time
/root/updateHosts.sh
# Initialize the cronjob to update hosts, if feature is enabled
cronFile=/tmp/buildcron
printf "SHELL=/bin/bash" > $cronFile
printf "\n$CRONTAB_TIME /usr/bin/flock -n /tmp/lock.hosts /root/updateHosts.sh\n" >> $cronFile
crontab $cronFile
rm $cronFile
fi
# Start supervisor
/usr/bin/supervisord -c /etc/supervisord.conf
| true
|
89a0249447d321072bbbe3012946264985cf087c
|
Shell
|
mrfireboy/cf
|
/msysctl
|
UTF-8
| 2,627
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
# --------------------------------------------------
# function:
# configure system parameters
# --------------------------------------------------
# mode mark
if ! cat /etc/sysctl.conf | grep -E "^# mode added option$" >/dev/null; then
echo "# mode added option" >> /etc/sysctl.conf
fi
/root/cf/mod_config /etc/sysctl.conf "net.bridge.bridge-nf-call-ip6tables=1" "# mode added option"
/root/cf/mod_config /etc/sysctl.conf "net.bridge.bridge-nf-call-iptables=1" "# mode added option"
/root/cf/mod_config /etc/sysctl.conf "net.bridge.bridge-nf-call-arptables=1" "# mode added option"
# make effect
/sbin/sysctl -p &>/dev/null
# ulimit conf and systemd conf
if ! cat /etc/pam.d/login | grep -P "^session[\t ]+required[\t ]+.*pam_limits.so" >/dev/null; then
echo "# The following line was added by cf msysctl script" >> /etc/pam.d/login
echo "session required pam_limits.so" >> /etc/pam.d/login
fi
if ! cat /etc/security/limits.conf | grep -P "^root[\t ]+-[\t ]+nofile[\t ]+1006154" > /dev/null; then
echo "# The following line was added by cf msysctl script" >> /etc/security/limits.conf
echo "root - nofile 1006154" >> /etc/security/limits.conf
fi
if [ -f /etc/security/limits.d/90-nproc.conf ]; then
cat /etc/security/limits.d/90-nproc.conf | sed -re "/^([^#]|#[^ ])/ d " > /root/cf/tmp/Hnproc$$.tmp
{
echo "* soft nproc 1024"
echo "root soft nproc unlimited"
echo "daemon soft nproc 10000"
} >> /root/cf/tmp/Hnproc$$.tmp
if ! cmp /etc/security/limits.d/90-nproc.conf /root/cf/tmp/Hnproc$$.tmp &>/dev/null; then
cp -f /root/cf/tmp/Hnproc$$.tmp /etc/security/limits.d/90-nproc.conf
fi
rm -f /root/cf/tmp/Hnproc$$.tmp
fi
#
if ! cat /etc/redhat-release 2>/dev/null | grep -E " release [56]\..*" > /dev/null; then
#
/root/cf/rep_config "/etc/systemd/system.conf" "DefaultLimitNOFILE=.*" "uncomment"
/root/cf/mod_config "/etc/systemd/system.conf" "DefaultLimitNOFILE=1006154"
/root/cf/rep_config "/etc/systemd/system.conf" "DefaultLimitNPROC=.*" "uncomment"
/root/cf/mod_config "/etc/systemd/system.conf" "DefaultLimitNPROC=infinity"
/root/cf/rep_config "/etc/systemd/system.conf" "DefaultTimeoutStartSec=.*" "uncomment"
/root/cf/mod_config "/etc/systemd/system.conf" "DefaultTimeoutStartSec=30min"
/root/cf/rep_config "/etc/systemd/system.conf" "DefaultTimeoutStopSec=.*" "uncomment"
/root/cf/mod_config "/etc/systemd/system.conf" "DefaultTimeoutStopSec=30min"
#
systemctl daemon-reexec
fi
| true
|
7e247d49ad14012473e8b3826e6ed20f4b87cd07
|
Shell
|
noob9527/dotfiles
|
/config/utils.sh
|
UTF-8
| 2,204
| 4.34375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$DIR/colorful.sh"
join_by() {
local IFS="$1";
shift;
echo "$*";
}
err() {
echo "[$(date +'%Y-%m-%dT%H:%M:%S%z')]: $@" >&2
}
has_cmd() {
local command=$1
hash "$command" 2>/dev/null
}
get_os() {
case "$OSTYPE" in
solaris*) echo "SOLARIS" ;;
darwin*) echo "OSX" ;;
linux*) echo "LINUX" ;;
bsd*) echo "BSD" ;;
msys*) echo "WINDOWS" ;;
*) echo "unknown: $OSTYPE" ;;
esac
}
confirm() {
local prompt="$1"
local input
read -p "$prompt [y/n]: " input
case ${input} in
[yY]*)
return 0
;;
[nN]*)
return 1
;;
*)
return 2
;;
esac
}
# $1: package
# $2: package_manager
confirm_install() {
local package=$1
local pm=$2
confirm "$(colorful::primary "Are you going to install '$package'${pm+" via '$pm'"}?")"
}
#######################################
# Arguments:
# $1: package
# $2: package_manager (if not present, use apt or brew)
# Returns:
# 0: install successful
# 1: install failure
# 2: install cancelled
# else: install fail or not perform install action
#######################################
package_manager_install() {
local package=$1
local pm=$2
if [[ -n ${pm} ]]; then
# if use apt-get, add -y paramter
if [[ pm = 'apt-get' ]]; then
if confirm_install ${package} ${pm}; then
sudo ${pm} install ${package} -y
return $?
else
return 2
fi
else
if confirm_install ${package} ${pm}; then
sudo ${pm} install ${package}
return $?
else
return 2
fi
fi
fi
if has_cmd 'apt-get'; then
package_manager_install "$package" 'apt-get'
return $?
elif has_cmd 'brew'; then
package_manager_install "$package" 'brew'
return $?
else
err "I have not known how to install $package yet, seems like you have to install it manually!"
return 1
fi
}
| true
|
0da5e0c01a56029ff845c206076c380c646067bc
|
Shell
|
blaizedsouza/kube-cluster-osx
|
/src/kube-cluster-install.command
|
UTF-8
| 1,552
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# kube-cluster-install.command
#
# create in "kube-cluster" all required folders and files at user's home folder where all the data will be stored
mkdir ~/kube-cluster
mkdir ~/kube-cluster/tmp
mkdir ~/kube-cluster/logs
mkdir ~/kube-cluster/bin
mkdir ~/kube-cluster/cloud-init
mkdir ~/kube-cluster/settings
mkdir ~/kube-cluster/fleet
mkdir ~/kube-cluster/kubernetes
mkdir ~/kube-cluster/kube
# cd to App's Resources folder
cd "$1"
# copy bin files to ~/kube-cluster/bin
cp -f "$1"/bin/* ~/kube-cluster/bin
rm -f ~/kube-cluster/bin/gen_kubeconfig
chmod 755 ~/kube-cluster/bin/*
# copy user-data
cp -f "$1"/cloud-init/* ~/kube-cluster/cloud-init
# copy settings
cp -f "$1"/settings/* ~/kube-cluster/settings
# copy k8s files
cp "$1"/k8s/kubectl ~/kube-cluster/kube
chmod 755 ~/kube-cluster/kube/kubectl
cp "$1"/k8s/add-ons/*.yaml ~/kube-cluster/kubernetes
# linux binaries
cp "$1"/k8s/kube.tgz ~/kube-cluster/kube
# copy fleetctl
cp -f "$1"/files/fleetctl ~/kube-cluster/bin
# copy fleet units
cp -R "$1"/fleet/ ~/kube-cluster/fleet
#
# check if iTerm.app exists
APP="/Applications/iTerm.app"
if [ ! -d "$APP" ]
then
ITERM_ZIP="$(mktemp)"
trap "rm -f '${ITERM_ZIP}'" EXIT
curl -Lso "${ITERM_ZIP}" https://iterm2.com/downloads/stable/latest
unzip "${ITERM_ZIP}" -d /Applications/
fi
# initial init
open -a iTerm.app "$1"/first-init.command
| true
|
f3912a449d7e4fcb3a0aa2f8b8b2f47d7942b6b0
|
Shell
|
jessieuehling/popCNV
|
/pipeline/01_aln.sh
|
UTF-8
| 2,078
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/bash
#SBATCH --mem 4G --ntasks 8 --nodes 1 -J bwa
#SBATCH --out logs/bwa.%a.log --time 8:00:00
module load bwa/0.7.15
module unload java
module load java/8
module load picard
SAM_CN_CENTER=UNK
CONFIG=config.txt
GENOMEDIR=genome
READSDIR=input
BAMDIR=aln
TEMP=/scratch
SAMPLELIST=samples.csv
if [ ! -f $CONFIG ]; then
echo "Need a $CONFIG file to proceed"
exit
fi
GENOME=$GENOMEDIR/$PREFIX
mkdir -p $BAMDIR
N=${SLURM_ARRAY_TASK_ID}
CPU=1
if [ $SLURM_CPUS_ON_NODE ]; then
CPU=$SLURM_CPUS_ON_NODE
fi
if [ ! $N ]; then
N=$1
fi
if [ ! $N ]; then
echo "need to provide a number by --array or cmdline"
exit
fi
MAX=`wc -l $SAMPLELIST | awk '{print $1}'`
if [ $N -gt $MAX ]; then
echo "$N is too big, only $MAX lines in $SAMPLELIST"
exit
fi
IFS=,
sed -n ${N}p $SAMPLELIST | while read STRAIN FWD REV;
do
LIBRARY=$(basename $FWD .fastq.gz)
LIBRARY1=$(basename $FWD .fastq.gz)
LIBRARY2=$(basename $REV .fastq.gz)
PAIR1=${INDIR}/$LANE/${LIBRARY1}_val_1.fq.gz
PAIR2=${INDIR}/$LANE/${LIBRARY2}_val_2.fq.gz
echo "... files are $PAIR1 $PAIR2 $LIBRARY"
SAMFILE=NULL
if [ -f $PAIR2 ]; then
SAMFILE=$OUTDIR/${STRAIN}.PE.unsrt.sam
echo "SAMFILE is $SAMFILE"
if [ ! -f $SAMFILE ]; then
bwa mem -t $CPU -R "@RG\tID:$STRAIN\tSM:$STRAIN\tLB:$LIBRARY\tPL:illumina\tCN:$CENTER" $GENOME $PAIR1 $PAIR2 > $SAMFILE
fi
if [ ! -f $OUTDIR/${STRAIN}.PE.bam ]; then
samtools fixmate -O bam $SAMFILE $TEMP/${STRAIN}.fixmate.bam
samtools sort -O bam -o $OUTDIR/${STRAIN}.PE.bam -T $TEMP $TEMP/${STRAIN}.fixmate.bam
/usr/bin/rm $TEMP/${STRAIN}.fixmate.bam
fi
else
SAMFILE=$OUTDIR/${ID}.SE.unsrt.sam
echo "SAMFILE is $SAMFILE"
if [ ! -f $SAMFILE ]; then
bwa mem -t $CPU -R "@RG\tID:$STRAIN\tSM:$STRAIN\tLB:$LIBRARY\tPL:illumina\tCN:Seqmatic" $GENOME $PAIR1 > $SAMFILE
fi
if [ ! -f $OUTDIR/${STRAIN}.SE.bam ]; then
samtools view -b $SAMFILE > $TEMP/${STRAIN}.unsrt.bam
samtools sort -O bam -o $OUTDIR/${STRAIN}.SE.bam -T $TEMP $TEMP/${STRAIN}.unsrt.bam
/usr/bin/rm $TEMP/${STRAIN}.unsrt.bam
fi
fi
done
done
| true
|
c5cf5b1360471e3ff5ba64159a180a01f79ca30a
|
Shell
|
TomekTrzeciak/improver
|
/tests/improver-nowcast-optical-flow/01-help.bats
|
UTF-8
| 5,563
| 2.859375
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/usr/bin/env bats
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
@test "nowcast-optical-flow help" {
run improver nowcast-optical-flow -h
[[ "$status" -eq 0 ]]
read -d '' expected <<'__TEXT__' || true
usage: improver nowcast-optical-flow [-h] [--profile]
[--profile_file PROFILE_FILE]
[--output_dir OUTPUT_DIR]
[--nowcast_filepaths NOWCAST_FILEPATHS [NOWCAST_FILEPATHS ...]]
[--orographic_enhancement_filepaths OROGRAPHIC_ENHANCEMENT_FILEPATHS [OROGRAPHIC_ENHANCEMENT_FILEPATHS ...]]
[--json_file JSON_FILE]
[--ofc_box_size OFC_BOX_SIZE]
[--smart_smoothing_iterations SMART_SMOOTHING_ITERATIONS]
[--extrapolate]
[--max_lead_time MAX_LEAD_TIME]
[--lead_time_interval LEAD_TIME_INTERVAL]
INPUT_FILEPATHS INPUT_FILEPATHS
INPUT_FILEPATHS
Calculate optical flow components from input fields and (optionally)
extrapolate to required lead times.
positional arguments:
INPUT_FILEPATHS Paths to the input radar files. There should be 3
input files at T, T-1 and T-2 from which to calculate
optical flow velocities. The files require a 'time'
coordinate on which they are sorted, so the order of
inputs does not matter.
optional arguments:
-h, --help show this help message and exit
--profile Switch on profiling information.
--profile_file PROFILE_FILE
Dump profiling info to a file. Implies --profile.
--output_dir OUTPUT_DIR
Directory to write all output files, or only advection
velocity components if NOWCAST_FILEPATHS is specified.
--nowcast_filepaths NOWCAST_FILEPATHS [NOWCAST_FILEPATHS ...]
Optional list of full paths to output nowcast files.
Overrides OUTPUT_DIR. Ignored unless '--extrapolate'
is set.
--orographic_enhancement_filepaths OROGRAPHIC_ENHANCEMENT_FILEPATHS [OROGRAPHIC_ENHANCEMENT_FILEPATHS ...]
List or wildcarded file specification to the input
orographic enhancement files. Orographic enhancement
files are compulsory for precipitation fields.
--json_file JSON_FILE
Filename for the json file containing required changes
to the metadata. Information describing the intended
contents of the json file is available in
improver.utilities.cube_metadata.amend_metadata.Every
output cube will have the metadata_dict applied.
Defaults to None.
--ofc_box_size OFC_BOX_SIZE
Size of square 'box' (in grid squares) within which to
solve the optical flow equations.
--smart_smoothing_iterations SMART_SMOOTHING_ITERATIONS
Number of iterations to perform in enforcing
smoothness constraint for optical flow velocities.
--extrapolate Optional flag to advect current data forward to
specified lead times.
--max_lead_time MAX_LEAD_TIME
Maximum lead time required (mins). Ignored unless '--
extrapolate' is set.
--lead_time_interval LEAD_TIME_INTERVAL
Interval between required lead times (mins). Ignored
unless '--extrapolate' is set.
__TEXT__
[[ "$output" =~ "$expected" ]]
}
| true
|
02ffcaa2c050dbe8b5beb059a969dd249c549a47
|
Shell
|
Git-Adri/Utilities
|
/Kafka/create_topic.sh
|
UTF-8
| 441
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ $# -eq 3 ]
then
## Create topics
~/kafka_2.12-2.3.0/bin/kafka-topics.sh --create \
--replication-factor $2 \
--partitions $3 \
--topic $1 \
--zookeeper localhost:2181
## List created topics
~/kafka_2.12-2.3.0/bin/kafka-topics.sh --list \
--zookeeper localhost:2181
else
echo "mauvais nombre d'argument il faut préciser le nom du topic la replication et le nb de partitions"
fi
| true
|
33d64a9286a78f684bf54d02cb7d912f1e4f2c16
|
Shell
|
chxchx/config
|
/apply
|
UTF-8
| 162
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
for file in .*; do
if [ $file == "." ] || [ $file == ".." ] || [ $file == ".git" ]; then
continue
fi
ln -f $file ~/$file
done
source ~/.bashrc
| true
|
56f13371f272323cb79732c265f271854e4a9a1d
|
Shell
|
liubida/about_bash
|
/ex9-reply.sh
|
UTF-8
| 740
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# ex9-reply.sh
# author: liubida
# Usage: REPLY is the default value for a 'read' command.
# if there is no variable absorb a 'read' command, then value is set to $REPLY
echo
echo -n "What is your 1st vegetable? "
read
echo "Your 1st vegetable is $REPLY."
# REPLY holds the value of last "read" if and only if
#+ no variable supplied.
echo
echo -n "What is your 2nd fruit? "
read fruit
echo "Your 2nd fruit is $fruit."
echo "but..."
echo "Value of \$REPLY is still $REPLY."
# $REPLY is still set to its previous value because
#+ the variable $fruit absorbed the new "read" value.
echo
echo -n "What is your 3rd fruit? "
read
echo "Your 3rd fruit is $REPLY."
echo "but..."
echo "Value of \$REPLY is $REPLY."
echo
exit 0
| true
|
74e32c4230159ad400c5a6ff4067773c11ff75cb
|
Shell
|
sujatce/security-project
|
/implementation.sh
|
UTF-8
| 1,980
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
echo -e "\nFollowing set of commands enable UBUNTU Firewall and sets rules to allow only OpenSSH, HTTP & HTTPS traffic"
sudo ufw status
echo -e "\nFirst ensure default firewall to be open to not accidentally close the server without enabling atleast SSH connection"
sudo ufw default allow
#enable ubuntu firewall
sudo ufw --force enable
#check the list of applications enabled via ubuntu firewall
sudo ufw app list
#enable OpenSSH on firewall
sudo ufw allow "OpenSSH"
#enable Apache Full on firewall, this could typically enable port 80 and 443 as well
sudo ufw allow "Apache Full"
#enable firewall to acceept http and https traffic from any IP using the following 3 commands
sudo ufw allow http
sudo ufw allow https
sudo ufw allow proto tcp from any to any port 80,443
#now disable default as deny to disable every other port
sudo ufw default deny
#Any policy to disable any subnet or specific IP can be entered here - Sample below
#Following command blocks a subnet (Currently commented, to be used for a speicific situation)
#sudo ufw deny from X.Y.Z.0/24
#Following command blocks a specific IP address (incase of identified brute force attack from specific IP, this will be handful)
#sudo ufw deny from X.Y.Z.0
#Following command can be used to allow SSH Connections from only specific IP (we haven't done that as we don't have STATIC IP always)
#sudo ufw allow from X.Y.Z.A proto tcp to any port 22
#Since MySQL is always accessed by Web Server locally, there is no need to open MYSQL port to outside world (3306)
#If needed to access MySQL for any query/reporting purpose from remote server, we would open it using the following command
#sudo ufw allow from X.Y.Z.A to any port 3306
# Once again, we don't have static IP, hence we login to server directly using OpenSSH and query it out there. This is more secure for education purpose.
#Webserver needs to send email out, hence allow port 25 (Not implemented yet)
#sudo ufw allow out 25
exit 0
| true
|
9ccc6e09872ae616217089b61fb988a84f02bca6
|
Shell
|
lubaihua33/lis-test
|
/WS2008R2/lisa/remote-scripts/ica/verifyKernelInstall.sh
|
UTF-8
| 6,690
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
########################################################################
#
# Linux on Hyper-V and Azure Test Code, ver. 1.0.0
# Copyright (c) Microsoft Corporation
#
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the ""License"");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
# ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR
# PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
########################################################################
EXPECTED_ARGS=1
INITRD_TMP_DIR="/tmp/initrd-extract"
ICA_BASE_DIR="~/bin"
# Check arguments
if [ $# -ne $EXPECTED_ARGS ]; then
echo "Usage: $(basename $0) kernel-version"
exit $E_BADARGS
fi
KERNEL_VERSION=$1
# # Source the ICA config file
# if [ -e $HOME/ica/config ]; then
# . $HOME/ica/config
# else
# echo "ERROR: Unable to source the Automation Framework config file."
# exit 1
# fi
# Source distro detection script
if [ -e $ICA_BASE_DIR/distro-detection.sh ]; then
. $ICA_BASE_DIR/distro-detection.sh
else
echo "ERROR: File '$ICA_BASE_DIR/distro-detection.sh' does not exist"
exit $E_NONEXISTENT_FILE
fi
echo "*** Checking installation of Kernel: $KERNEL_VERSION ***"
# Check installation location of hyperv drivers
echo -e "\nINFO: Checking installation of Hyper-V drivers..."
# Check to make sure the kernel modules were actually compiled and installed
# into /lib/modules...
if [ ! -e /lib/modules/$KERNEL_VERSION ]; then
echo "ERROR: Bad kernel version '$KERNEL_VERSION'. Unable to check installation of Hyper-V drivers. Exiting install check script."
exit $E_BAD_KERNEL_VERSION
fi
#
# We have started the process of migrating our drivers from
# $KERNEL/drivers/staging/hv to $KERNEL/drivers/hv. In the following a
# couple of months, our drivers may exist under both directly (vmbus and
# utils have been moved so far).
#
#
echo "Checking to make sure the hyperv kernel modules were compiled and installed in /lib/modules/$KERNEL_VERSION)"
for ic_driver in ${HYPERV_MODULES[@]}; do
DRIVER_INSTALLED=0
for EACH_DIR in ${HYPERV_MODULES_DIRS[@]}; do
basepath="/lib/modules/$KERNEL_VERSION/$EACH_DIR"
if [ -e $basepath/$ic_driver.ko ]; then
echo -e "\tIC driver '$ic_driver' installed correctly"
DRIVER_INSTALLED=1
fi
done
if [ "$DRIVER_INSTALLED" = "0" ]; then
echo -e "\tERROR: IC driver '$ic_driver' does not exist in $basepath"
exit $E_HYPERVIC_INSTALL_INCOMPLETE
fi
done
# Check initrd image. Involves extracting the initrd to a temp directory,
# making sure the initrd /lib directory contains the hyperv kernel modules, and
# also check to make sure the proper modprobe/insmod commands are in the initrd
# 'init' script.
echo -e "\nINFO: Checking initrd image: $KERNEL_VERSION"
# Clean up tmp directory from previous run if something didn't exit cleanly
rm -rf $INITRD_TMP_DIR
# Extract initrd image
mkdir -p $INITRD_TMP_DIR
START_DIR="~/bin"
# Check to make sure each Hyper-V driver is installed in the /lib directory of
# the initrd image
if [ -e $INITRD_TMP_DIR ]; then
cd $INITRD_TMP_DIR
echo "Extracting the initrd image..."
gunzip -dc /boot/initrd-$KERNEL_VERSION | cpio -ivd
echo -e "\nChecking initrd 'init' script for modprobe/insmod statements and existence of hyperv kernel modules..."
for ic_driver in ${HYPERV_MODULES[@]}; do
# This section needs distro specific processing since they each
# layout their initrd differently. Distro specific scripts are
# sourced because this area could have gotten pretty ugly once
# we start supporting more distros and different versions of
# distros (e.g. RHEL6 may # use a different layout for the
# initrd image)
# See distro-detection.sh for list of valid DISTRIB_ID values
# and distro id variables
case "$DISTRIB_ID" in
# Fedora isn't supported yet in this script, this is
# just an example of grouping distros
"$ID_REDHAT" )
# If we need to filter even further on based on
# the distro version number stored in
# $DISTRIB_RELEASE (e.g. 5.4, etc), we can do so
# Works for RHEL 5
. $START_DIR/initrd_check_redhat ;;
"$ID_FEDORA" | "$ID_REDHAT6" )
# Works for fedora 12 and RHEL 6
. $START_DIR/initrd_check_fedora ;;
"$ID_SUSE" )
# works for open suse 11
. $START_DIR/initrd_check_suse ;;
* )
echo "ERROR: initrd checks are not yet supported for this distro ($DISTRIB_ID)"
esac
done
else
echo "ERROR: Initrd extraction directory was not created due to an unkown error."
exit $E_GENERAL
fi
# Check /etc/fstab and see if it needs to be modified (change sda,sdb,etc
# references to hda,hdb,et) If a system is using LVM (Logical Volume
# Mangement), we won't need to modify anything since LVM just searches drives
# for metadata and automatically brings them up (i.e. a drive changing from sda
# to hda won't matter). Likewise, if drives are referred to by labels (e.g.
# LABEL=/) instead of device names (e.g. /dev/sda) we won't need to modify
# anything either. We only need to modify /etc/fstab if drives are refered to
# by their device names (e.g. /dev/sda1). In these cases we only modify the
# /dev/sd? devices that are mounted on /, /boot, /home, or swap. The reason
# for this is that we could have a VM SCSI drive that actually is supposed to
# be a /dev/sd? device and we don't want to change it to /dev/hd?
# The fstab variable makes it useful for testing non-standard (something other
# than /etc/fstab) fstab files
fstab="/etc/fstab"
echo -e "\nINFO: Checking the contents of $fstab"
if [ "$(cat $fstab | grep -i /dev/sd)" = "" ]; then
echo "$fstab does not need to be modified"
else
echo "$fstab needs to be modified"
echo -e "\nCurrent contents of $fstab:"
cat $fstab
# TODO: Add a function that takes the $search_pattern string and
# properly escapes it.
# The following is an easier to read (i.e. non-escaped) version of the
# sed regular expression below.
#search_pattern="^(/dev/)sd([:alpha:][:digit:]+[:space:]+(/|swap|home|boot))"
sed --in-place=.bak -e 's/^\(\/dev\/\)sd\([[:alpha:]][[:digit:]]\+[[:space:]]\+\(\/\|swap\|\/home\|\/boot\)\)/\1hd\2/g' $fstab
echo -e "\nNew contents of $fstab:"
cat $fstab
fi
# Go back to the original directory
cd $START_DIR
# Cleanup temp initrd extraction
rm -rf $INITRD_TMP_DIR
# Exit Successfully
exit 0
| true
|
2f416b847b2d99a8a3d1c816f4ef9c2eebc930a9
|
Shell
|
Azure/sap-automation
|
/deploy/scripts/helpers/common_utils.sh
|
UTF-8
| 5,437
| 4.5
| 4
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/usr/bin/env bash
###############################################################################
#
# Purpose:
# This file allows bash functions to be used across different scripts without
# redefining them in each script. It's designed to be "sourced" rather than run
# directly.
#
###############################################################################
readonly target_path="${SCRIPTPATH}/../deploy"
# location of the input JSON templates
readonly target_template_dir="${target_path}/template_samples"
# Given a return/exit status code (numeric argument)
# and an error message (string argument)
# This function returns immediately if the status code is zero.
# Otherwise it prints the error message to STDOUT and exits.
# Note: The error is prefixed with "ERROR: " and is sent to STDOUT, not STDERR
function continue_or_error_and_exit()
{
local status_code=$1
local error_message="$2"
((status_code != 0)) && { error_and_exit "${error_message}"; }
return "${status_code}"
}
function error_and_exit()
{
local error_message="$1"
printf "%s\n" "ERROR: ${error_message}" >&2
exit 1
}
function check_file_exists()
{
local file_path="$1"
if [ ! -f "${file_path}" ]; then
error_and_exit "File ${file_path} does not exist"
fi
}
# This function pretty prints all the currently available template file names
function print_allowed_json_template_names()
{
local target_dir="$1"
# list JSON files in the templates dir
# filter the output of 'find' to extract just the filenames without extensions
# prefix the results with indents and hyphen bullets
find "${target_dir}" -name '*.json' | sed -e 's/.*\/\(.*\)\.json/ - \1/'
}
# This function will check to see if the given command line tool is installed
# If the command is not installed, then it will exit with an appropriate error and the given advice
function check_command_installed()
{
local cmd="$1"
local advice="$2"
# disable exit on error throughout this section as it's designed to fail
# when cmd is not installed
set +e
local is_cmd_installed
command -v "${cmd}" > /dev/null
is_cmd_installed=$?
set -e
local error="This script depends on the '${cmd}' command being installed"
# append advice if any was provided
if [[ "${advice}" != "" ]]; then
error="${error} (${advice})"
fi
continue_or_error_and_exit ${is_cmd_installed} "${error}"
}
# This function sets the JSON value at the given JSON path to the given value in the given JSON template file
# Note: It uses the `jq` command line tool, and will fail with a helpful error if the tool is not installed.
function edit_json_template_for_path()
{
local json_path="$1"
local json_value="$2"
local json_template_name="$3"
local target_json="${target_template_dir}/${json_template_name}.json"
local temp_template_json="${target_json}.tmp"
check_file_exists "${target_json}"
check_command_installed 'jq' 'Try: https://stedolan.github.io/jq/download/'
# this is the JSON path in jq format
# in the future we could call a function here to translate simple dot-based paths into jq format paths
# For example: Translate 'infrastructure.resource_group.name' to '"infrastructure", "resource_group", "name"'
local jq_json_path="${json_path}"
local jq_command="jq --arg value ${json_value} 'setpath([${jq_json_path}]; \$value)' \"${target_json}\""
# edit JSON template file contents and write to temp file
eval "${jq_command}" > "${temp_template_json}"
# replace original JSON template file with temporary edited one
mv "${temp_template_json}" "${target_json}"
}
# This helper funciton checks if a JSON key is set to a non-empty string
# the json_path argument must be in jq dot notation, e.g. '.software.downloader.credentials.sap_user'
function check_json_value_is_not_empty()
{
local json_path="$1"
local json_template_name="$2"
local target_json="${target_template_dir}/${json_template_name}.json"
check_file_exists "${target_json}"
check_command_installed 'jq' 'Try: https://stedolan.github.io/jq/download/'
local json_value=
json_value=$(jq "${json_path}" "${target_json}")
if [ "${json_value}" == '""' ]; then
return 1
else
return 0
fi
}
# This function is used to compare semver strings
# It takes two parameters, each a semver string /\d+(\.\d+(\.\d+)?)?/
# For example, 1, 1.2, 1.2.3 and compares them
# It echos ">" if string1 > string2, "=" if string1 == string2 and "<" if string1 < string2
function test_semver()
{
local actual_semver="$1"
local required_semver="$2"
IFS=. read -r -a actual_semver_parts <<< "${actual_semver}"
IFS=. read -r -a required_semver_parts <<< "${required_semver}"
(( major=${actual_semver_parts[0]:-0} - ${required_semver_parts[0]:-0} ))
if [[ ${major} -ne 0 ]]; then
[[ ${major} -gt 0 ]] && echo -n ">" || echo -n "<"
else
(( minor=${actual_semver_parts[1]:-0} - ${required_semver_parts[1]:-0} ))
if [[ ${minor} -ne 0 ]]; then
[[ ${minor} -gt 0 ]] && echo -n ">" || echo -n "<"
else
(( patch=${actual_semver_parts[2]:-0} - ${required_semver_parts[2]:-0} ))
# shellcheck disable=SC2015
[[ ${patch} -gt 0 ]] && echo -n ">" || ( [[ ${patch} -eq 0 ]] && echo -n "=" || echo -n "<" )
fi
fi
}
# This function takes a single bash string and escapes all special characters within it
# Source: https://stackoverflow.com/a/20053121
function get_escaped_string()
{
local str="$1"
echo "$str" | sed -e 's/[^a-zA-Z0-9,._+@%/-]/\\&/g; 1{$s/^$/""/}; 1!s/^/"/; $!s/$/"/'
}
| true
|
c19c43035768a13159d3a237ba925d18bb1dcef9
|
Shell
|
etimecowboy/DotEmacs
|
/bin/lin64/get-icicles.sh
|
UTF-8
| 1,203
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
base_url=http://www.emacswiki.org/cgi-bin/wiki/download
required_libraries="icicles.el \
icicles-cmd1.el icicles-cmd2.el \
icicles-face.el icicles-fn.el icicles-mac.el \
icicles-mcmd.el icicles-mode.el icicles-opt.el \
icicles-var.el "
optional_libraries="col-highlight.el crosshairs.el \
doremi.el hexrgb.el hl-line+.el \
icicles-chg.el icicles-doc1.el icicles-doc2.el \
icomplete+.el lacarte.el \
synonyms.el vline.el"
dir=icicles
if [ -d "$dir" ]; then
typeset -i i=0
while [ -d "${dir}_OLD$i" ]; do
i="$i + 1"
done
mv "$dir" "${dir}_OLD$i"
fi
for library in $required_libraries $optional_libraries ; do
wget -nd -P $dir ${base_url}/${library}
# Sleep for 2 seconds so as not to overload www.emacswiki.org
sleep 2
done
# More optional libraries that Icicles can take advantage of:
# apropos-fn+var.el
# bookmark+.el
# dired+.el
# doremi-frm.el
# ffap-.el
# fit-frame.el
# fuzzy-match.el
# info+.el
# linkd.el
# menu-bar+.el
# misc-cmds.el
# palette.el
# pp+.el
# thingatpt+.el
# wid-edit+.el
| true
|
32929dad738c8cf6c40993702c80c9c9b79ff860
|
Shell
|
vishal2332/playground
|
/scripting/specialcharacters
|
UTF-8
| 311
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# since variables are denoted by $ dollar sign if you need to print doolar $ sign you need to use backslash. The backslash allowed the shell script to interpret the dollar sign as an actual# dollar sign and not a variable
echo "the cost of the item is \$15"
echo "this is \#24"
echo "this is \@"
| true
|
081889db5ec0e868be4ab5b742e76dd40219ff84
|
Shell
|
envoyproxy/envoy
|
/ci/format_pre.sh
|
UTF-8
| 2,637
| 3.625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -E
# Pre-checks for validation and linting
#
# These checks do not provide a fix and are quicker to run,
# allowing CI to fail quickly on basic linting or validation errors
FAILED=()
CURRENT=""
# AZP appears to make lines with this prefix red
BASH_ERR_PREFIX="##[error]: "
DIFF_OUTPUT="${DIFF_OUTPUT:-/build/fix_format.diff}"
read -ra BAZEL_STARTUP_OPTIONS <<< "${BAZEL_STARTUP_OPTION_LIST:-}"
read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTION_LIST:-}"
trap_errors () {
local frame=0 command line sub file
if [[ -n "$CURRENT" ]]; then
command=" (${CURRENT})"
fi
set +v
while read -r line sub file < <(caller "$frame"); do
if [[ "$frame" -ne "0" ]]; then
FAILED+=(" > ${sub}@ ${file} :${line}")
else
FAILED+=("${sub}@ ${file} :${line}${command}")
if [[ "$CURRENT" == "check" ]]; then
# shellcheck disable=SC2016
FAILED+=(
""
' *Code formatting check failed*: please search above logs for `CodeChecker ERROR`'
"")
fi
fi
((frame++))
done
set -v
}
trap trap_errors ERR
trap exit 1 INT
CURRENT=check
# This test runs code check with:
# bazel run //tools/code:check -- --fix -v warn -x mobile/dist/envoy-pom.xml
# see: /tools/code/BUILD
bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" //tools/code:check_test
CURRENT=configs
bazel "${BAZEL_STARTUP_OPTIONS[@]}" run "${BAZEL_BUILD_OPTIONS[@]}" //configs:example_configs_validation
CURRENT=spelling
"${ENVOY_SRCDIR}/tools/spelling/check_spelling_pedantic.py" --mark check
# TODO(phlax): move clang/buildifier checks to bazel rules (/aspects)
if [[ -n "$AZP_BRANCH" ]]; then
CURRENT=check_format_test
"${ENVOY_SRCDIR}/tools/code_format/check_format_test_helper.sh" --log=WARN
fi
CURRENT=check_format
echo "Running ${ENVOY_SRCDIR}/tools/code_format/check_format.py"
time "${ENVOY_SRCDIR}/tools/code_format/check_format.py" fix --fail_on_diff
if [[ "${#FAILED[@]}" -ne "0" ]]; then
echo "${BASH_ERR_PREFIX}TESTS FAILED:" >&2
for failed in "${FAILED[@]}"; do
echo "${BASH_ERR_PREFIX} $failed" >&2
done
if [[ $(git status --porcelain) ]]; then
git diff > "$DIFF_OUTPUT"
echo >&2
echo "Applying the following diff should fix (some) problems" >&2
echo >&2
cat "$DIFF_OUTPUT" >&2
echo >&2
echo "Diff file with (some) fixes will be uploaded. Please check the artefacts for this PR run in the azure pipeline." >&2
echo >&2
fi
exit 1
fi
| true
|
85a39d2ff5ed65e184dd3388817950cfcf5ffaef
|
Shell
|
Felix-Kit/ProtocolTaint
|
/src/run
|
UTF-8
| 338
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
mkdir -p obj-intel64
if [ "$1" = "compile" ];then
touch $2.cpp
make obj-intel64/$2.so
fi
if [ "$1" = "run" ];then
LD_PRELOAD=./libx.so pin -t obj-intel64/$2.so -- $3 ${@:4}
fi
if [ "$1" = "all" ];then
touch $2.cpp
make obj-intel64/$2.so
LD_PRELOAD=./libx.so pin -t obj-intel64/$2.so -- $3 ${@:4}
fi
| true
|
ea9ddf826cef3161a605378b5f6f2bd00faf2c9a
|
Shell
|
ClubfootBear/nginx_analyzer
|
/nginx_stat.sh
|
UTF-8
| 4,469
| 3.265625
| 3
|
[] |
no_license
|
#! /bin/bash
echo "Start of script"
array_lenght=$(cat "$1" | wc -l)
echo "array_lenght: $array_lenght"
prc_1=$(echo "scale=4; $array_lenght * 0.01" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./' | grep -E -o "[0-9]{1,}")
prc_5=$(echo "scale=4; $array_lenght * 0.05" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./' | grep -E -o "[0-9]{1,}")
prc_10=$(echo "scale=4; $array_lenght * 0.1" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./' | grep -E -o "[0-9]{1,}")
prc_20=$(echo "scale=4; $array_lenght * 0.2" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./' | grep -E -o "[0-9]{1,}")
prc_80=$(echo "scale=4; $array_lenght * 0.8" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./' | grep -E -o "[0-9]{1,}")
prc_90=$(echo "scale=4; $array_lenght * 0.9" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./' | grep -E -o "[0-9]{1,}")
prc_99=$(echo "scale=4; $array_lenght * 0.99" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./' | grep -E -o "[0-9]{1,}")
# function anomaly() {
# }
array_rt=$(grep -E -o "\brt=\b[0-9]{0,}.[0-9]{1,}" $(echo "$1") | grep -E -o "[0-9]{1,}.[0-9]{1,}")
array_uct=$(grep -E -o 'uct="[0-9]{0,}.[0-9]{1,}"' $(echo "$1") | grep -E -o "[0-9]{1,}.[0-9]{1,}")
array_uht=$(grep -E -o 'uht="[0-9]{0,}.[0-9]{1,}"' $(echo "$1") | grep -E -o "[0-9]{1,}.[0-9]{1,}")
array_urt=$(grep -E -o 'urt="[0-9]{0,}.[0-9]{1,}"' $(echo "$1") | grep -E -o "[0-9]{1,}.[0-9]{1,}")
anomaly_rt15=$(echo "$array_rt" | sort -rh | head -n 15)
anomaly_uct15=$(echo "$array_uct" | sort -rh | head -n 15)
anomaly_uht15=$(echo "$array_uht" | sort -rh | head -n 15)
anomaly_urt15=$(echo "$array_urt" | sort -rh | head -n 15)
# echo "15 numbers of the top"
# echo $array_rt15
function anomaly() {
local count=0
local newarray
newarray=("$@")
local newarray_anomaly
newarray_anomaly=$(echo "$newarray" | sort -rh | head -n $2)
#local array_count
#array_count=$(echo "$newarray" | wc -l)
#prc="$2"
# echo "prc: $prc"
for i in $newarray_anomaly
do
count=$(echo "scale=4; $count + $i" | bc)
done
echo $(echo "scale=4; $count / $2" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./')
}
function percentile() {
local count=0
local newarray
newarray=("$@")
local newarray_prcntl
newarray_prcntl=$(echo "$newarray" | sort -rh | tail -n $2)
#local array_count
#array_count=$(echo "$newarray" | wc -l)
#prc="$2"
#echo "prc: $prc"
for i in $newarray_prcntl
do
count=$(echo "scale=4; $count + $i" | bc)
done
#echo "count $count"
echo $(echo "scale=4; $count / $2" | bc | sed -e 's/^\./0./' -e 's/^-\./-0./')
}
echo "Начинаем считать Аномали_1"
anomaly_1_rt=$(anomaly "${array_rt[*]}" $prc_1)
anomaly_1_uct=$(anomaly "${array_uct[*]}" $prc_1)
anomaly_1_uht=$(anomaly "${array_uht[*]}" $prc_1)
anomaly_1_urt=$(anomaly "${array_urt[*]}" $prc_1)
echo "Посчитали Аномали_1"
echo "Начинаем считать Перцентиль_80"
percentile_80_rt=$(percentile "${array_rt[*]}" $prc_80)
percentile_80_uct=$(percentile "${array_uct[*]}" $prc_80)
percentile_80_uht=$(percentile "${array_uht[*]}" $prc_80)
percentile_80_urt=$(percentile "${array_urt[*]}" $prc_80)
echo "Посчитали Перцентиль_80"
echo "Начинаем считать Перцентиль_99"
percentile_99_rt=$(percentile "${array_rt[*]}" $prc_99)
percentile_99_uct=$(percentile "${array_uct[*]}" $prc_99)
percentile_99_uht=$(percentile "${array_uht[*]}" $prc_99)
percentile_99_urt=$(percentile "${array_urt[*]}" $prc_99)
echo "Посчитали Перцентиль_99"
#echo "$percntl_1;5;7; $(echo $array_rt15)" > res.xls
echo "Anomaly_rt;Anomaly_uct;Anomaly_uht;Anomaly_urt;" > res.xls
echo "$anomaly_1_rt;$anomaly_1_uct;$anomaly_1_uht;$anomaly_1_urt" >> res.xls
echo " ; ; ; ;" >> res.xls
echo "Top_15_Anomaly_rt;Top_15_Anomaly_uct;Top_15_Anomaly_uht;Top_15_Anomaly_urt;" >> res.xls
echo "$(echo $anomaly_rt15);$(echo $anomaly_uct15);$(echo $anomaly_uht15);$(echo $anomaly_urt15);" >> res.xls
echo " ; ; ; ;" >> res.xls
echo "Percentile_80_rt;Percentile_80_uct;Percentile_80_uht;Percentile_80_urt;" >> res.xls
echo "$percentile_80_rt;$percentile_80_uct;$percentile_80_uht;$percentile_80_urt" >> res.xlsecho " ; ; ; ;" >> res.xls
echo " ; ; ; ;" >> res.xls
echo "Percentile_99_rt;Percentile_99_uct;Percentile_99_uht;Percentile_99_urt;" >> res.xls
echo "$percentile_99_rt;$percentile_99_uct;$percentile_99_uht;$percentile_99_urt" >> res.xlsecho " ; ; ; ;" >> res.xls
echo " ; ; ; ;" >> res.xls
echo "Finish"
| true
|
23bdc369c43dfb9cbd3ea19959d241bd46bc67ff
|
Shell
|
kirasystems/immutant
|
/etc/bin/common-build.sh
|
UTF-8
| 963
| 2.9375
| 3
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
BIN_DIR="${WORKSPACE}/bin"
AS_DIR="${WORKSPACE}/as-dists"
LEIN_VERSION=2.5.1
export PATH="${BIN_DIR}:${PATH}"
export WORKSPACE_HOME="${WORKSPACE}/home"
export LEIN_HOME="${WORKSPACE_HOME}/.lein"
export JVM_OPTS="-Dprogress.monitor=false"
DIR=$( cd "$( dirname "$0" )" && pwd )
function mark {
echo
echo "=============================================="
echo $1
date
echo "=============================================="
echo
}
function cleanup {
rm -rf ${WORKSPACE}/target
rm -rf ${BIN_DIR}
rm -rf ${AS_DIR}
}
function install-lein {
mark "Installing leiningen ${LEIN_VERSION}"
mkdir -p ${BIN_DIR}
cd ${BIN_DIR}
wget --no-check-certificate https://raw.github.com/technomancy/leiningen/${LEIN_VERSION}/bin/lein
chmod +x lein
cd -
}
function setup-lein-profiles {
mark "Setting up lein profiles"
mkdir -p ${LEIN_HOME}
cp -f /private/projectodd/auth_profile.clj ${LEIN_HOME}/profiles.clj
}
| true
|
b17490d4cec1ac056f76dc2ae99a4101ba39268a
|
Shell
|
Ashkore/Distributed-Remote-Boot-Linux-With-Kali
|
/drbl.sh
|
UTF-8
| 1,477
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#EXAMPLE:
#./drbl.sh
#Color Variables
Green='\033[0;32m'
LGreen='\033[1;32m'
RED='\033[0;31m'
NC='\033[0m'
#Start of Repository setup
echo -e "${LGreen}Starting: Setting up /etc/apt/sources.list File"
echo "deb http://ftp.us.debian.org/debian/ jessie main" >> /etc/apt/sources.list
echo "deb http://free.nchc.org.tw/drbl-core drbl stable" >> /etc/apt/sources.list
echo -e "${Green}Finished: Setting up /etc/apt/sources.list File"
#End of Repository setup
#Start of Update OS
echo -e "${LGreen}Starting: Updating OS"
echo -e "${NC}"
apt-get update -y && apt-get upgrade -y
echo -e "${Green}Finished: Updating OS"
#End of Update OS
#Start of DRBL install
echo -e "${LGreen}Starting: Installing DRBL"
echo -e "${NC}"
apt-get install drbl -y --allow-unauthenticated
echo -e "${Green}Finished: Installing DRBL"
#End of DRBL install
#WORK IN PROGRESS
echo -e "${LGreen}Starting: DRBL Configuration"
echo -e "${LGreen}Starting: DRBL package Installs"
echo -e "${NC}"
apt-get install drbl-chntpw freedos partclone clonezilla mkpxeinitrd-net -y --allow-unauthenticated
printf 'N\nN\nY\n1\n' | drblsrv -i
echo -e "${Green}Finished: DRBL package Installs"
exit 0;
echo -e "${LGreen}Starting: DRBL Client Setup"
echo -e "${NC}"
wget https://ashkore.github.io/Distributed-Remote-Boot-Linux-With-Kali/drblpush.conf
drblpush -c drblpush.conf
echo -e "${Green}Finished: DRBL Client Setup"
echo -e "${Green}Finished: DRBL Configuration"
#End of DRBL Configuration
| true
|
d2df704a326d173c3cc75c5f03a1a7a697c0cdc0
|
Shell
|
hmacread/dropthehook
|
/deg2dec.sh
|
UTF-8
| 1,284
| 4.03125
| 4
|
[] |
no_license
|
#/bin/bash
#Script to convert degrees and decemil minutes into decimal degrees (input DDD.MM.MMMn|s|w|e)
#Only basic validation performed on date
if [[ -z $1 ]]; then
echo Error: Usage "deg3dec DDD.MM.MMM[n|e|s|w]"
exit 1
fi
deg=$(echo $1 | cut -f1 -d . | sed s/^0*//)
#add back leading zero if all removed
if [[ -z $deg ]]; then
deg=0
fi
min=$(echo $1 | cut -f2 -d .)
decsec=$(echo $1 | cut -f3 -d .)
#Input Validation
#Check for direction
echo $1 | egrep -i '.[0-9][n|e|s|w]$' >/dev/null
val=$?
if (($val)); then
echo "Lat or long must contain a letter for direction (I.e. 102.29.234E)"
exit 1
fi
# Check for invalid Long or Lat
if (($deg >= 180 || $deg < 0)); then
echo Input degrees lat/long invalid
exit 1
fi
#Check for invalid Latitude
echo $1 | egrep -i '[n|s]' > /dev/null
isLat=!$?
if (($deg >= 90 && $isLat)); then
echo "Input degrees latitude invalid (>=90)"
exit 1
fi
#Check for invalid minutes
if (($min >= 60 || $min < 0)); then
echo "Input minutes lat/long invalid (>60)"
exit 1
fi
echo $decsec | egrep -i '[s|w]' > /dev/null
nth=$?
decsec=$(echo $decsec | sed 's/[nNsSeEwW]//')
min=$min.$decsec
decmin=$(echo "scale=9; $min / 60" | bc)
if (($nth)); then
echo $deg$decmin | sed 's/^\./0./'
else
echo -$deg$decmin | sed 's/^-\./-0./'
fi
| true
|
876c33c37bba0047f7f4d280d0fceace0ce744fc
|
Shell
|
flackr/stoplight
|
/util.sh
|
UTF-8
| 1,567
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
RED=4
YELLOW=17
GREEN=22
function setup()
{
if [ -n "$DEBUG" ]
then
echo "0" > /tmp/pin$RED
echo "0" > /tmp/pin$YELLOW
echo "0" > /tmp/pin$GREEN
echo "Set up pins."
return 0
fi
echo $RED > /sys/class/gpio/export
echo $YELLOW > /sys/class/gpio/export
echo $GREEN > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio$RED/direction
echo out > /sys/class/gpio/gpio$YELLOW/direction
echo out > /sys/class/gpio/gpio$GREEN/direction
echo 0 > /sys/class/gpio/gpio$RED/value
echo 0 > /sys/class/gpio/gpio$YELLOW/value
echo 0 > /sys/class/gpio/gpio$GREEN/value
}
function debug()
{
if [ `cat /tmp/pin$RED` -eq 1 ]
then
echo -n "Red "
fi
if [ `cat /tmp/pin$YELLOW` -eq 1 ]
then
echo -n "Yellow "
fi
if [ `cat /tmp/pin$GREEN` -eq 1 ]
then
echo -n "Green "
fi
echo "."
}
DEBUGPID=/tmp/debug.pid
function light()
{
if [ -n "$DEBUG" ]
then
# If debugging is enabled then we set up a short timer and call the debug function.
if [ ! -f "$DEBUGPID" ]
then
(sleep 0.1; debug; rm -f $DEBUGPID) &
echo $! > $DEBUGPID
fi
echo $2 > /tmp/pin$1
else
echo $2 > /sys/class/gpio/gpio$1/value
fi
}
function connected() {
if [ -n "$DEBUG" ]
then
return 0
fi
ERROR=0
# Verify we have a correct IP.
if [ `ifconfig wlan0 | grep -c "inet.*broadcast"` -eq 0 ]
then
ERROR=1
fi
# And verify that a DNS server has been set.
if [ `cat /etc/resolv.conf | grep -c nameserver` -eq 0 ]
then
ERROR=1
fi
return $ERROR
}
| true
|
d03f9efb34872b96ce035e41188db9b5b169cfc2
|
Shell
|
woodun/insight_files
|
/load_balance_scripts/old/launch_all_configs_temp.sh
|
UTF-8
| 961
| 2.578125
| 3
|
[] |
no_license
|
#for swl1 case, only gtoswl1(other ones are same as this one) and tl1(this one is unique) is needed, other ones have been removed.
#!/bin/sh
#sh launch_all_configs_approx.sh histo parboil
#specify your config path in stor2
configs_stor2=/stor2/hwang07/approx/
#modify the configs you want to launch on machine in02
for stor2_config in approx_100coverage_gtoswl1 approx_100coverage_gtoswl4 approx_100coverage_gtoswl8 approx_100coverage_gtoswl16 approx_100coverage_gtoswl48 approx_100coverage_lrrswl4 approx_100coverage_lrrswl8 approx_100coverage_lrrswl16 approx_100coverage_lrrswl48 approx_100coverage_RR4 approx_100coverage_RR8 approx_100coverage_RR16 approx_100coverage_RR48 approx_100coverage_STL4 approx_100coverage_STL8 approx_100coverage_STL16 approx_100coverage_STL48 approx_100coverage_tl1 approx_100coverage_tl4 approx_100coverage_tl8 approx_100coverage_tl16 approx_100coverage_tl48
do
cd $configs_stor2
cd $stor2_config
cd $2/$1/
qsub pbs_$1.pbs
done
| true
|
6e2e0ba4f912e7364a8ab3f78373b42b6234fcae
|
Shell
|
ygstr/etc
|
/bin/bar/panel.sh
|
UTF-8
| 811
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
. $HOME/.config/bar/bar_config
desktop() {
groupstats="$(groupstat)"
echo "%{U$YELLOW} $groupstats %{U-}" | tr -d ' ' | sed -e s/0//g -e s/1/'%{F#F9D48C#}%{F-}'/g
}
clock(){
clock=$(date "+%A, %d %B %I:%M:%S")
echo "%{B$BG}%{F$CYAN}%{U$CYAN}%{F-}$clock%{U-}%{B-} "
}
music(){
play=$(mpc current)
echo "%{B$BG}%{F$RED} %{+u}%{U$RED}%{F-}$play %{U-}%{B-}"
}
weather(){
out="$(wget -q -O- -w 3600 http://www.accuweather.com/en/us/chapel-hill-nc/27516/weather-forecast/329826 | awk -F\' '/acm_RecentLocationsCarousel\.push/{print $14", "$12"°" }'| head -1)"
echo -e "%{B$BLACK2}%{F$FG}$out %{B-}"
}
while :; do
echo "$(desktop) %{c}$(clock) %{r}$(music)"
sleep 0.1s
done | lemonbar -u 3 -g ${PW}x${PH}+${PX}+${PY} -b -f "$FONT2" -f "$FONT1" -B "$BG" -F "$FG" -d -p &
| true
|
de1a01d702a659291a4aeafc0c7eecfcf8ff234f
|
Shell
|
OpenLMIS-Angola/angola-openlmis-deployment
|
/deployment/reporting_local_installation_env/services/deploy_services.sh
|
UTF-8
| 1,271
| 3.28125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export DOCKER_TLS_VERIFY="1"
export COMPOSE_TLS_VERSION=TLSv1_2
export DOCKER_HOST="local.ao.openlmis.org:2376"
export DOCKER_CERT_PATH="${PWD}/../../../credentials/local_installation_reporting_env"
export DOCKER_COMPOSE_BIN=/usr/local/bin/docker-compose
export REPORTING_DIR_NAME=reporting
# changing conflicting ports
export NIFI_WEB_PORT=81
export POSTGRES_PORT=5433
distro_repo=$1
cd "$distro_repo/$REPORTING_DIR_NAME" &&
$DOCKER_COMPOSE_BIN kill &&
$DOCKER_COMPOSE_BIN down -v --remove-orphans &&
# In order to avoid generated new certificates between next deploys of ReportingStack
# we need to move them to seperate volume marked as external.
# External volumes are not removed even we use docker-compose down with -v option.
# The external volume need to be created before the docker start
# docker volume create letsencrypt-config
# The same is with data stored by database. To avoid running the whole ETL process,
# we need to create a volume for Postgres data and mark it as external,
# so that Nifi can update already persisted data.
if [ "$KEEP_OR_WIPE" == "wipe" ]; then
echo "Will WIPE data!"
docker volume rm pgdata
fi
docker volume create pgdata
$DOCKER_COMPOSE_BIN build &&
$DOCKER_COMPOSE_BIN up -d --scale scalyr=0
| true
|
8b54e45eabd169f50f69e5d41f9c9b77d46617cf
|
Shell
|
luxe/unilang
|
/source/code/scratch/config/scripts/src/utility_functions/all_utility_functions/directory-moving
|
UTF-8
| 452
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#stack based directory changing.
#Used to step into directories, and make conversion based on relative path,
#steping out after can be done automatically.
function Silently_Go_Back_To_The_Previous_Pwd(){
popd > /dev/null 2>&1;
}
function Step_In_Call_Function_Step_Out(){
pushd $1 > /dev/null 2>&1; #step in
$2; #call function
Silently_Go_Back_To_The_Previous_Pwd; #step out
}
| true
|
8b7c45a564faaf8da74f03b1bb313c940dfef39a
|
Shell
|
Rendanic/ppas_and_docker
|
/epas/12/start_pg.sh
|
UTF-8
| 2,758
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
PGHOME=/usr/edb/as12
PGBIN=${PGHOME}/bin
EFM_HOME=/usr/edb/efm-3.9
PGCTL=${PGHOME}/bin/pg_ctl
if [[ ${NODE_TYPE} = "master" ]]
then
if [[ ! -f ${PGDATA}/PG_VERSION ]]
then
sudo -u ${PGUSER} \
${PGBIN}/initdb -D ${PGDATA} --auth-host=scram-sha-256 --data-checksums
sudo -u ${PGUSER} \
mkdir -p ${PGDATA}/log
echo "local all all trust" > ${PGDATA}/pg_hba.conf
echo "local replication all scram-sha-256" >> ${PGDATA}/pg_hba.conf
echo "host replication repuser 0.0.0.0/0 trust" >> ${PGDATA}/pg_hba.conf
echo "host all all 0.0.0.0/0 scram-sha-256" >> ${PGDATA}/pg_hba.conf
sed -i "s/^#password_encryption = md5/password_encryption = scram-sha-256/g" ${PGDATA}/postgresql.conf
sed -i "s/^port = .*/port = ${PGPORT}/g" ${PGDATA}/postgresql.conf
sed -i "s/^logging_collector = off/logging_collector = on/g" ${PGDATA}/postgresql.conf
sudo -u ${PGUSER} ${PGBIN}/pg_ctl -D ${PGDATA} start
sudo -u ${PGUSER} ${PGBIN}/psql -c "ALTER USER enterprisedb PASSWORD 'edb'" -p ${PGPORT} edb
sudo -u ${PGUSER} ${PGBIN}/psql -c "ALTER SYSTEM SET log_filename TO 'enterprisedb.log'" -p ${PGPORT} edb
sudo -u ${PGUSER} ${PGBIN}/psql -c "select pg_reload_conf();" -p ${PGPORT} edb
sudo -u ${PGUSER} ${PGBIN}/psql -c "CREATE USER repuser REPLICATION;" -p ${PGPORT} edb
else
sudo -u ${PGUSER} ${PGBIN}/pg_ctl -D ${PGDATA} start
fi
else
if [[ ${NODE_TYPE} = "standby" ]]
then
if [[ ! -z ${MASTER_HOST} && ! -f ${PGDATA}/PG_VERSION ]]
then
SLOT_NAME=$(/usr/sbin/ifconfig eth0|grep "inet"|awk '{print $2}'|sed "s/\./_/g")
PGPASSWORD=edb ${PGBIN}/psql -U enterprisedb \
-h ${MASTER_HOST} \
-p ${MASTER_PORT} \
-c "SELECT pg_create_physical_replication_slot('${SLOT_NAME}', true);" \
edb
sudo -u ${PGUSER} \
PGAPPNAME=${HOSTNAME} ${PGBIN}/pg_basebackup --pgdata=${PGDATA} \
--write-recovery-conf \
--wal-method=stream \
--slot=${SLOT_NAME} \
--username=repuser \
-h ${MASTER_HOST} \
-p ${MASTER_PORT}
sudo -u ${PGUSER} ${PGBIN}/pg_ctl -D ${PGDATA} start
fi
fi
sudo -u ${PGUSER} ${PGBIN}/pg_ctl -D ${PGDATA} start
fi
tail -f ${PGLOG}
| true
|
50e3b72b6b749d79062729a88404c2134d15786f
|
Shell
|
Zarete/Blob
|
/public/subpart/proteins/hex/bin/hex_chain
|
UTF-8
| 1,183
| 3.53125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh
#------------------------------------------------------------------------------
#
# hex_chain: to rename a chain (i.e. column 22 of ATOM records) in a pdb file
#
# example: rename chain X to chain Y (X or Y may be blank [" "]):
#
# hex_chain X Y <file.pdb >new_file.pdb
#
# D.W. Ritchie 08/06/98
#
#------------------------------------------------------------------------------
#
if [ -z "$2" ]; then
echo "usage: hex_chain <old_chain> <new_chain>"
echo " "
echo "example: hex_chain " \" \" " Y <file.pdb >new_file.pdb"
exit
fi
if [ -z "$3" ]; then
first="1"
last="9999"
else
first="$3"
if [ -z "$4" ]; then
last="9999"
else
last="$4"
fi
fi
awk '{ if ($1 == "ATOM" || $1 == "TER") {
if (substr($0,22,1) == old) {
r = substr($0,23,4) + 0
if (r >= first+0 && r <= last+0) {
print substr($0,1,21) new substr($0,23)
} else {
print $0
}
} else {
print $0
}
} else {
print $0
}
}' old="$1" new="$2" first="$first" last="$last"
| true
|
6cac84ce2b258b1d64fe0f7f5757b5ca0ebbd265
|
Shell
|
sandeep937/Devel
|
/bin/ec
|
UTF-8
| 716
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
source "${KONIX_LIB_DIR}/lib_bash.sh"
findEmacsWindow () {
wmctrl -l -x|grep -q '.Emacs'
}
findEmacsProcess () {
if on_windows_p
then
ps -aW |grep -q 'emacs.exe'
else
ps -A -o comm|grep -q '^emacs$'
fi
}
printf "Starting an emacsclient " 1>&2
if on_windows_p
then
if ! findEmacsProcess
then
source konix_assert.sh
fi
elif konix_on_linux_p
then
if [ -n "$DISPLAY" ]
then
if ! findEmacsWindow
then
ARGS="${ARGS} -c"
fi
else
ARGS="${ARGS} -nw"
fi
else
source konix_assert.sh
fi
if [ -z "$*" ] && [ -z "${ARGS}" ]
then
# Need at least an argument when launching without -c
set "."
fi
emacsclient -a "" ${ARGS} "$@"
| true
|
5769b3b528d4bca94c376c7afc74595a5771065f
|
Shell
|
lnsun/immutability
|
/.travis-build.sh
|
UTF-8
| 2,351
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# Fail the whole script if any command fails
set -e
# Environment variables setup
export JAVA_HOME=${JAVA_HOME:-$(dirname $(dirname $(dirname $(readlink -f $(/usr/bin/which java)))))}
export JSR308=$(cd $(dirname "$0")/.. && pwd)
export AFU=$JSR308/annotation-tools/annotation-file-utilities
export CHECKERFRAMEWORK=$JSR308/checker-framework
export PATH=$AFU/scripts:$JAVA_HOME/bin:$PATH
#default value is opprop. REPO_SITE may be set to other value for travis test purpose.
export REPO_SITE=topnessman
echo "------ Downloading everthing from REPO_SITE: $REPO_SITE ------"
# Clone annotation-tools (Annotation File Utilities)
if [ -d $JSR308/annotation-tools ] ; then
(cd $JSR308/annotation-tools && git pull)
else
(cd $JSR308 && git clone -b pico-dependant-copy --depth 1 https://github.com/"$REPO_SITE"/annotation-tools.git)
fi
# Clone stubparser
if [ -d $JSR308/stubparser ] ; then
(cd $JSR308/stubparser && git pull)
else
(cd $JSR308 && git clone -b pico-dependant-copy --depth 1 https://github.com/"$REPO_SITE"/stubparser.git)
fi
# Clone checker-framework
if [ -d $JSR308/checker-framework ] ; then
(cd $JSR308/checker-framework && git checkout pico-dependant-copy && git pull)
else
# ViewpointAdapter changes are not yet merged to master, so we depend on pico-dependant branch
(cd $JSR308 && git clone -b pico-dependant-copy --depth 1 https://github.com/"$REPO_SITE"/checker-framework.git)
fi
# Clone checker-framework-inference
if [ -d $JSR308/checker-framework-inference ] ; then
(cd $JSR308/checker-framework-inference && git checkout pico-dependant-copy && git pull)
else
# Again we depend on pico-dependant branch
(cd $JSR308 && git clone -b pico-dependant-copy --depth 1 https://github.com/"$REPO_SITE"/checker-framework-inference.git)
fi
# Build annotation-tools (and jsr308-langtools)
(cd $JSR308/annotation-tools/ && ./.travis-build-without-test.sh)
# Build stubparser
(cd $JSR308/stubparser/ && mvn package -Dmaven.test.skip=true)
# Build checker-framework, with downloaded jdk
(cd $JSR308/checker-framework && ant -f checker/build.xml dist-downloadjdk)
# Build checker-framework-inference
(cd $JSR308/checker-framework-inference && gradle dist) # This step needs to be manually in $CFI executed due to path problems
# Build PICO
(cd $JSR308/immutability && ./gradlew build)
| true
|
29730aa535cc751e2c79ca98c18e42596893c73a
|
Shell
|
willzhang05/sshfaillog
|
/format.sh
|
UTF-8
| 149
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ $1 ]]; then
autopep8 --in-place --aggressive --aggressive $@
echo "Formatted $@."
else
echo "No filename provided."
fi
| true
|
1a768714f4fcb4bd18489143c1aaea444ffdd73d
|
Shell
|
omrprks/subgit
|
/subgit.sh
|
UTF-8
| 143
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
[ ! -d "${1}" ] && {
echo Error: ${1}: No such directory
exit 1
}
pushd ${1} > /dev/null
git ${@:2}
popd > /dev/null
| true
|
e2e6054ce5e2d0d65ce9b5bc9af6a18418e4c4fc
|
Shell
|
studyfranco/GNEA
|
/rnalizer/SCNorm/SCNormSlurmArray.sh
|
UTF-8
| 2,068
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#
#SBATCH -t 5-00 # time (D-HH:MM)
#SBATCH -o SCNorm/Log/slurm.%N.%j.SCNorm.out # STDOUT
#SBATCH -e SCNorm/Log/slurm.%N.%j.SCNorm.err # STDERR
method=$1
ncore=$2
lfc=$3
pval=$4
chemin=`pwd`
listefile=`ls SCNorm/param`
if [ ! -d "SCNorm/tmp" ]
then
mkdir SCNorm/tmp
fi
DATE=`date '+%Y-%m-%d-%H-%M-%S'`
mkdir SCNorm/tmp/$DATE
for nfile in $listefile
do
header=`head -n 1 SCNorm/param/$nfile`
nline=`cat SCNorm/param/$nfile | wc -l`
i=2
while [ $i -le $nline ]
do
echo "$header" > SCNorm/tmp/$DATE/${nfile}Param$i.csv
line=`sed -n "${i}{p;q}" SCNorm/param/$nfile`
echo "$line" >> SCNorm/tmp/$DATE/${nfile}Param${i}.csv
i=$((i+1))
done
done
listefile=`ls SCNorm/tmp/$DATE`
for nfile in $listefile
do
echo "$chemin/SCNorm/tmp/$DATE/$nfile" >> $chemin/SCNorm/tmp/$DATE/jobsList.txt
done
if [ "Mean" = "$method" ]
then
job=$(sbatch -N 1 -n $ncore -t 7-00 --mem 20GB SCNorm/SCNormSlurmArrayLocaljobsend.sh $chemin/SCNorm/tmp/$DATE/jobsList.txt $chemin/SCNorm/reference $chemin/SCNorm/SC $chemin/SCNorm/counts $chemin/SCNorm/results $chemin/SCNorm/Human_Big_GRN_032014.csv $lfc $pval $ncore Mean)
elif [ "Sums" = "$method" ]
then
job=$(sbatch -N 1 -n $ncore -t 7-00 --mem 20GB SCNorm/SCNormSlurmArrayLocaljobsend.sh $chemin/SCNorm/tmp/$DATE/jobsList.txt $chemin/SCNorm/reference $chemin/SCNorm/SC $chemin/SCNorm/counts $chemin/SCNorm/results $chemin/SCNorm/Human_Big_GRN_032014.csv $lfc $pval $ncore Sums)
else
job=$(sbatch -N 1 -n $ncore -t 7-00 --mem 20GB SCNorm/SCNormSlurmArrayLocaljobsend.sh $chemin/SCNorm/tmp/$DATE/jobsList.txt $chemin/SCNorm/reference $chemin/SCNorm/SC $chemin/SCNorm/counts $chemin/SCNorm/results $chemin/SCNorm/Human_Big_GRN_032014.csv $lfc $pval $ncore Mean)
job=$(sbatch -N 1 -n $ncore -t 7-00 --mem 20GB SCNorm/SCNormSlurmArrayLocaljobsend.sh $chemin/SCNorm/tmp/$DATE/jobsList.txt $chemin/SCNorm/reference $chemin/SCNorm/SC $chemin/SCNorm/counts $chemin/SCNorm/results $chemin/SCNorm/Human_Big_GRN_032014.csv $lfc $pval $ncore Sums)
fi
exit 0
| true
|
d69034fa9849b1680651049e873bbd8fa571e55c
|
Shell
|
jason-r-c/ansible-automation
|
/frontend-dev/backendwar.sh
|
UTF-8
| 1,582
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# sudo with password in one command line:
# https://superuser.com/questions/67765/sudo-with-password-in-one-command-line
echo ubuntu | sudo -S mkdir /home/backend-backups
cd /home/backend-source/repo/BackEnd
# @JC 05/12/17: may need to add some git credentials here as i expect it to fail
echo ubuntu | sudo -S git pull
# @JC 3/1/19: commented so to use the preferred Gradle approach
# grails clean
# grails war
# The preferred Gradle approach as mentioned above
./gradlew clean
./gradlew assemble
sudo -S sudo service tomcat8 stop
# Add parameter "&dropDatabase=yes" to drop the db, set to empty string to keep it, ie dropdb=""
dropdb="&dropDatabase=yes"
warfile="/var/lib/tomcat8/webapps/BackEnd.war"
if [ -f "$warfile" ]
then
echo "$warfile found."
echo y | sudo -S mv /var/lib/tomcat8/webapps/BackEnd.war /home/backend-backups/BackEnd_$(date +%Y-%m-%d).war
else
echo "$warfile not found."
fi
newwarfile="/home/backend-source/repo/BackEnd/build/libs/BackEnd.war"
if [ -f "$newwarfile" ]
then
echo "$newwarfile found."
# Command needs to run as sudo (backend.war needs to be copied as sudo user)
echo ubuntu | sudo -S cp /home/backend-source/repo/BackEnd/build/libs/BackEnd.war /var/lib/tomcat8/webapps
else
echo "$newwarfile not found."
fi
echo "Restarting Tomcat..."
sudo -S sudo service tomcat8 start
until [ "`curl -s -o /dev/null -w "%{http_code}" localhost:8080`" == "200" ];
do
echo "Waitng to startup..."
done
echo "Now running Gaia fixture..."
curl -k https://@dev.cnect.to/BackEnd/persistence/saveFixture?fixtureName=Gaia"$dropdb"
| true
|
29e341c5dc5ba09f99b63c25bc545f575e24822c
|
Shell
|
k-kinzal/aliases
|
/test/integration/circular-dependencies/test.sh
|
UTF-8
| 897
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -eu
TEMP_DIR=$(mktemp -d /tmp/XXXX)
TEST_DIR="$(cd "$(dirname "${0}")"; echo "$(pwd)")"
ALIASES=$(cd "${TEST_DIR}/../../..//dist"; echo "$(pwd)/aliases -c ${TEST_DIR}/aliases.yaml")
DIFF=$(if which colordiff >/dev/null; then echo "colordiff -Buw --strip-trailing-cr"; else echo "diff -Bw"; fi)
MASK="sed -e s|${HOME}|[HOME]|g -e s|${TEMP_DIR}|[TEMP_DIR]|g -e s|[0-9]*-[0-9]*-[0-9]*T[0-9]*:[0-9]*:[0-9]*Z|yyyy-mm-ddThh:MM:ssZ|g"
${ALIASES} gen --export-path "${TEMP_DIR}" | ${MASK} | sort | ${DIFF} ${TEST_DIR}/alias -
${ALIASES} gen --export --export-path "${TEMP_DIR}" | ${MASK} | ${DIFF} ${TEST_DIR}/export -
cat ${TEMP_DIR}/alpine1 | ${MASK} | ${DIFF} ${TEST_DIR}/alpine1 -
${TEMP_DIR}/alpine1 sh -c 'alpine2 sh -c "echo 1"' | ${MASK} | ${DIFF} ${TEST_DIR}/stdout -
${ALIASES} run /usr/local/bin/alpine1 sh -c 'alpine2 sh -c "echo 1"' | ${MASK} | ${DIFF} ${TEST_DIR}/stdout -
| true
|
7439c156b49d558981ce4345096d157684eea791
|
Shell
|
dmaderazo/analysisCode
|
/getLogLikelihood.sh
|
UTF-8
| 624
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
rm -rf tempFile
for i in *.log
do
logRetriever.sh -a $i
done > tempFile
#pick a file
fileName=$(ls *.log | head -n 1)
#count the number of lines in that file
lineCount=$(wc -l < $fileName)
#return the number of lines less the current header (-9)
#plus the file name header (+1)
# this is the no. of rows for output
lineCount="$(($lineCount-9+1))"
#number of columns is the number of files included
numCols=$(ls -l *.log | wc -l)
numCols="$(($numCols+0))"
# wc -l < tempFile
#echo $lineCount
#echo $numCols
pr -ts"," -l$lineCount -$numCols tempFile > logLikelihoodFrame.txt
rm -rf tempFile
| true
|
f977b1757224d640714b573a439870ac1e1272cb
|
Shell
|
prateek/dotfiles
|
/zsh/lib/keybind.zsh
|
UTF-8
| 1,606
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
# vim:syntax=sh
# vim:filetype=sh
# make key presses work a lot faster
# `man keytimeout`: The time the shell waits, in hundredths of seconds,
# for another key to be pressed when reading bound multi-character sequences.
KEYTIMEOUT=1
# map ctrl-space to accept auto-suggestions.
# color map: https://upload.wikimedia.org/wikipedia/commons/1/15/Xterm_256color_chart.svg
# usable attributes: https://zsh.sourceforge.io/Doc/Release/Zsh-Line-Editor.html#Character-Highlighting
# export ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE="fg=252,standout"
# bindkey '^ ' autosuggest-accept # toggle on ctrl-space
# Who doesn't want home and end to work?
bindkey -M viins "${key[Home]}" beginning-of-line
bindkey -M viins "${key[End]}" end-of-line
# ensure alt+arrow keys work
bindkey "^[^[[D" backward-word
bindkey "^[^[[C" forward-word
# copy buffer to stack and clear, reload it next time editor starts up
bindkey -M vicmd 'q' push-line
# vim editing for command line
autoload -z edit-command-line
zle -N edit-command-line
bindkey -M vicmd 'v' edit-command-line
# rerun last command & insert output into current buffer
zmodload -i zsh/parameter
insert-last-command-output() {
LBUFFER+="$(eval $history[$((HISTCMD-1))])"
}
zle -N insert-last-command-output
bindkey -M viins "^P" insert-last-command-output
# ctrl-u in vi-cmd mode invokes the url_select autoload function
zle -N url_select
bindkey -M vicmd "^u" url_select
# TODO: rerurn last command and page output using the bat help pager
# rerun-last-command-with-bat() {
# bathelp='bat --plain --language=help'
# $history[$((HISTCMD-1))]
# }
| true
|
3bf5acc0f902b7b86a95541ab8e9d82e8f7773c1
|
Shell
|
michaelchiche/docs
|
/scripts/generate_changelog.sh
|
UTF-8
| 1,103
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
# Usage: generate-changelog <from-git-tag> <to-git-tag> [--all-prs] [--tab-output]
# Writes changelog to standard output
# Prerequisites: Set environment variable GITHUB_TOKEN
# For now, globally install https://github.com/lindydonna/github-pr-changelog
# by cloning the repo and running `npm i -g`
# In the future, will publish the tool to NPM, so that this script can use a local version
set -o nounset -o errexit -o pipefail
if [ -z "${1:-}" ] || [ -z "${2:-}" ]; then
echo "Missing required arguments."
echo "Usage: generate-changelog <from-git-tag> <to-git-tag> [--all-prs] [--tab-output]"
exit 1
fi
# not currently used, will add back when gh-changelog is moved to a locally installed package
SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"
TOOLS_REPOS="pulumi,pulumi-cloud,pulumi-aws,pulumi-terraform,pulumi-azure,pulumi-kubernetes,pulumi-aws-serverless,pulumi-aws-infra,pulumi-gcp,pulumi-azure-serverless,pulumi-docker"
gh-changelog \
--owner pulumi --repos "${TOOLS_REPOS}" \
--git-directory "../" \
--from "${1}" --to "${2}" \
"${3:-}" "${4:-}"
| true
|
305c9dfac4467b0e53f211aff7be1ab5f5974194
|
Shell
|
519984307/sngtec-telemetrics
|
/generate-linear-converter-if-else.sh
|
UTF-8
| 500
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
strTypes=(SignedInt16 SignedInt32 UnsignedInt16 UnsignedInt32 Float4B Double);
types=(qint16 qint32 quint16 quint32 float double);
if [ ${#strTypes[@]} -ne ${#types[@]} ]; then
echo "Sizes don't match";
exit 1;
fi;
size=${#strTypes[@]};
let lastElemNo=$size-1;
prefix="";
for no1 in $(seq 0 $lastElemNo); do
for no2 in $(seq 0 $lastElemNo); do
echo "$prefix IF_CREATE_LIN_CONV(${strTypes[$no1]}, ${strTypes[$no2]}, ${types[$no1]}, ${types[$no2]})";
prefix="else";
done;
done;
| true
|
64c0d6eed70f413b3bebef27dc50ed5c9e41196a
|
Shell
|
Arrrrrr/Hoth
|
/UNIX/commitingit.sh
|
UTF-8
| 1,295
| 3.328125
| 3
|
[] |
no_license
|
echo Hi there! My name is Helga. Would you like to commit stuff? \(y/n\)?
read reply
echo Your message was: $reply
if [ "$reply" == "y" ]; then
echo "OK, committing to your repo"
echo What is your commit message?
read yourmessage2
echo Your message was: $yourmessage2
echo committing to your repo
# add path to your repo below
cd repo
git pull
git add .
git commit -a -m "$yourmessage2"
git pull
git push
git pull
echo committing to your repo
elif [ "$reply" == "yes helga" ]; then
echo "Have some bacon, committing to your repo"
echo What is your commit message?
read yourmessage2
echo Your message was: $yourmessage2
echo committing to your repo
# add path to your repo below
cd repo
git pull
git add .
git commit -a -m "$yourmessage2"
git pull
git push
git pull
echo commit and push is complete
elif [ "$reply" == "yes" ]; then
echo "OK, committing to your repo"
echo What is your commit message?
read yourmessage2
echo Your message was: $yourmessage2
echo committing to your repo
# add path to your repo below
cd repo
git pull
git add .
git commit -a -m "$yourmessage2"
git pull
git push
git pull
echo commit and push is complete
else
echo "OK, I can see you are not ready for commitment. It's cool."
fi
echo My work here is done. Enjoy the bacon.
| true
|
c51a87cb857dd2c3859c22fc04b69c02c7b42f3c
|
Shell
|
jakob1379/.dotfiles
|
/polybar/.config/polybar/Scripts/usb-devices
|
UTF-8
| 877
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
usb_print() {
devices=$(lsblk -Jplno NAME,TYPE,RM,SIZE,MOUNTPOINT,VENDOR)
output=""
counter=0
# Find mounted devices
for mounted in $(echo "$devices" | jq '.blockdevices[] | select(.rm == true) | select(.vendor != null) | .name' | tr -d '"'); do
mounted=$(echo "$devices" | jq -r '.blockdevices[] | select(.name == "'"$mounted"'") | .vendor')
if [ $counter -eq 0 ]; then
space=""
else
space=" "
fi
counter=$((counter + 1))
output="$output$space $mounted"
done
echo "$output"
}
usb_unmount() {
devices=$(lsblk -Jplno NAME,TYPE,RM,SIZE,MOUNTPOINT,VENDOR | jq '.blockdevices[] | select(.rm == true) | select(.mountpoint != null) | .name')
usb_name=$(tr -d "")
for unmount in $devices;
do
done
}
usb_print
usb_unmount
| true
|
f60147bd7cac1a36348fc174873b0c6b1c946bb3
|
Shell
|
dronegator/nlp
|
/scripts/index.sh
|
UTF-8
| 257
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
from=${1:-/tmp/a.txt}
to=${2:-/var/tmp/$(basename $from .txt).dat}
sbt "index-stream/run $from /tmp/tmp.dat" &&
sbt "index-stream/run $from $to /tmp/tmp.dat" &&
echo "sbt \"repl/run $to\"" &&
sbt "repl/run $to"
echo "sbt \"repl/run $to\""
| true
|
53c3b2a2710a96be81f122ff371f919e29d6168d
|
Shell
|
hakehash/shellscripts
|
/audyn.sh
|
UTF-8
| 4,496
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -eq 0 ]; then
echo "usage:\t`basename $0` /path/to/keywordfile.dyn [ytnilhso]" 1>&2
else
PATH_TO_LSDYNA=/mnt/c/LSDYNA/program/
#NAME_OF_EXEC=ls-dyna_smp_s_R10.0_winx64_ifort131.exe
NAME_OF_EXEC=ls-dyna_smp_s_R901_winx64_ifort131.exe
PATH_TO_SCRIPTS=`cd $(dirname $0) && cd -`
PATH_TO_KEYFILE=`dirname $1`
ORIG=$1
ORIG_FILENAME=`basename $1 .dyn`
shift
LOG_FILENAME=autolog.txt
t=24
SIGY=363.77
BETA=`awk 'BEGIN{print 880/'$t'*sqrt('$SIGY'/205800)}'`
w0=2.85173
ALPHA=0.05
#MOD_FILENAME=${ORIG_FILENAME}_${SIGY}MPa_w${w0}mm_$2
init(){
touch $PATH_TO_KEYFILE/$LOG_FILENAME
mkdir $PATH_TO_KEYFILE/${MOD_FILENAME}
DYNA_I=$PATH_TO_KEYFILE/${MOD_FILENAME}/${MOD_FILENAME}.dyn
DYNA_O=`dirname $DYNA_I`/d3hsp
}
run(){
echo $MOD_FILENAME started \ \ \ at `date` |\
tee -a $PATH_TO_KEYFILE/$LOG_FILENAME 1>&2
$PATH_TO_SCRIPTS/rundyn.sh $DYNA_I
echo $MOD_FILENAME terminated at `date` |\
tee -a $PATH_TO_KEYFILE/$LOG_FILENAME 1>&2
}
yield(){
for SIGY in `seq 309.23 9.09 418.31` #-3sigma to 3sigma
do
MOD_FILENAME=${ORIG_FILENAME}_${SIGY}MPa
init
cat $ORIG | awk '/\*MAT_PLASTIC_KINEMATIC/{NR_MAT3=NR+2}
{if(NR==NR_MAT3)
printf "%10d%10G%10.1f%10.1f%10.2f%10.2f%10.1f\n",
$1,$2,$3,$4,'$SIGY',$6,$7;
else
print $0}' > $DYNA_I
run
done
}
#NR_plate=`grep \*SECTION_SHELL_TITLE $1 -A4 -n | grep plate$ | sed 's/[:-].*//g'`
t(){
for t in `seq 24 1 24`
do
MOD_FILENAME=${ORIG_FILENAME}_t${t}mm_w${w0}mm_$2
init
if [ -n "$NR_plate" ]; then
cat $ORIG | \
awk '{
if(NR=='$NR_plate+4')
printf "%10.1f%10.1f%10.1f%10.1f%10.1f%10.1f%10.1f%10d\n",
'$t','$t','$t','$t',0,0,0,0
else
print$0
}' > $DYNA_I
fi
run
done
}
nip(){
for NIP in `seq 3 10`
do
if [ $1 = 0 ]; then
INTGRD="gauss"
elif [ $1 = 1 ]; then
INTGRD="lobatto"
fi
MOD_FILENAME=${ORIG_FILENAME}_${INTGRD}_$NIP
init
cat $ORIG | awk '
/\*CONTROL_SHELL/{NR_CS=NR+4}
/\*SECTION_SHELL_TITLE/{NR_SS=NR+3}
{
if(NR==NR_CS)
printf "%10.1f%10d%10d%10d%10d\n",
$1,'$1',$3,$4,$5;
else if(NR==NR_SS)
printf "%10d%10d%10.1f%10d%10.1f%10d%10d%10d\n",
$1,$2,$3,'$NIP',$5,$6,$7,$8;
else
print $0
}' > $DYNA_I
run
done
}
impf(){
for ALPHA in `seq 0.014 0.018 0.158` #-sigma to 3sigma
do
w0=`echo $ALPHA | awk '{print $1*'$BETA'*'$BETA'*'$t'}'`
MOD_FILENAME=${ORIG_FILENAME}_w${w0}mm_$1
init
$PATH_TO_SCRIPTS/impfmak.sh $ORIG $w0 -${1} > $DYNA_I
run
done
}
impf_smith(){
for ALPHA in 0.025 0.1 0.3
do
w0=`echo $ALPHA | awk '{print $1*'$BETA'*'$BETA'*'$t'}'`
MOD_FILENAME=${ORIG_FILENAME}_w${w0}mm_$1
init
$PATH_TO_SCRIPTS/impfmak.sh $ORIG $w0 -${1} > $DYNA_I
run
done
}
origin(){
w0=0.00
MOD_FILENAME=${ORIG_FILENAME}_w${w0}mm
init
cat $ORIG > $DYNA_I
run
}
residual(){
for ALPHA in `seq 0.014 0.018 0.158` #-sigma to 3sigma
do
w0=`echo $ALPHA | awk '{print $1*'$BETA'*'$BETA'*'$t'}'`
MOD_FILENAME=${ORIG_FILENAME}_w${w0}mm_$1_res
init
$PATH_TO_SCRIPTS/resapp.sh $ORIG $ALPHA > tmp.dyn
$PATH_TO_SCRIPTS/impfmak.sh tmp.dyn $w0 -${1} > $DYNA_I
rm tmp.dyn
run
done
}
residual_smith(){
for ALPHA in 0.025 0.1 0.3
do
w0=`echo $ALPHA | awk '{print $1*'$BETA'*'$BETA'*'$t'}'`
MOD_FILENAME=${ORIG_FILENAME}_w${w0}mm_$1_res
init
$PATH_TO_SCRIPTS/resapp.sh $ORIG $ALPHA > tmp.dyn
$PATH_TO_SCRIPTS/impfmak.sh tmp.dyn $w0 -${1} > $DYNA_I
rm tmp.dyn
run
done
}
while getopts "ytnilhopsr" OPT ; do
case $OPT in
y) yield
;;
t) t
;;
n) nip 0
nip 1
;;
i) origin
impf l
impf h
;;
l) impf l
;;
h) impf h
;;
o) origin
;;
p) residual p
residual_smith p
residual q
residual_smith q
residual r
residual_smith r
residual s
residual_smith s
;;
s) impf_smith l
impf_smith h
;;
r) residual l
residual h
;;
esac
done
fi
| true
|
618e2cd396f929afc7c2471fe913a466ff86013b
|
Shell
|
kruton/dotfiles
|
/home/.bashrc.d/61-heroku.bash
|
UTF-8
| 152
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Add Heroku to the PATH if it is installed
if [ -d /usr/local/heroku/bin ]; then
export PATH="/usr/local/heroku/bin:$PATH"
fi
| true
|
22e986dfc5e9b1a6b53be4e8df34e9e5c1038421
|
Shell
|
ecohealthalliance/tater
|
/.scripts/run-tests.sh
|
UTF-8
| 2,034
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
port=$RANDOM
SECONDS=0
quit=0
watch=""
if [ "$WATCH" == "true" ]; then
watch="--watch --watchTags=${TAGS}";
fi
touch testoutput${port}.txt
# Trap interruptions to avoid leaving files or meteor instances around
function finish {
rm testoutput${port}.txt
kill `lsof -t -i:${port}`
}
trap finish INT
# Find an unused port (http://unix.stackexchange.com/questions/55913/whats-the-easiest-way-to-find-an-unused-local-port)
while [ "$quit" -ne 1 ]; do
netstat -a | grep $port >> /dev/null
if [ $? -gt 0 ]; then
quit=1
else
port=`expr $port + 1`
fi
done
# Check if Tika server is running on localhost
document_text='Test Document String Ponies 123'
tika_result=$(echo $document_text | curl -s -X PUT --data-binary @- http://localhost:9998/tika --header "Content-type: application/octet-stream" --header "Accept: text/plain")
tika_result=$(echo $tika_result | xargs) # trim
if [ "$tika_result" != "$document_text" ]
then
# echo 'Unable to reach Tika on port 9998'
if [ ! -e "tika-server.jar" ]
then
# echo 'No Tika server binary found, downloading...'
curl -L h http://mirror.cc.columbia.edu/pub/software/apache/tika/tika-server-1.11.jar > tika-server.jar
fi
java -jar tika-server.jar &
fi
# Start meteor server if it isn't already running
if ! lsof -i:3000
then
meteor &
fi
# Connect to mongo, use a database named after the currently selected port
tail -f testoutput${port}.txt &
MONGO_URL=mongodb://localhost:3001/${port} meteor --settings settings-development.json --port ${port} &
CUCUMBER_TAIL=1 chimp --tags=${TAGS} $watch --ddp=http://localhost:${port} --browser=chrome --path=tests/cucumber/features/ --coffee=true --chai=true --sync=false > testoutput${port}.txt
kill `lsof -t -i:${port}`
echo "$(($SECONDS / 60)) minutes and $(($SECONDS % 60)) seconds elapsed"
# Determine exit code based on test output
if grep -q "failed steps" testoutput${port}.txt
then
rm testoutput${port}.txt
echo "Tests Failed"
exit 1
fi
rm testoutput${port}.txt
echo "Tests Passed"
exit
| true
|
5c89078aee2cf4ffe7eb4fdf440af3dc53939fec
|
Shell
|
diacu/compass
|
/entrypoint.sh
|
UTF-8
| 282
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
CUSER="frontend"
MYUID=`stat -c "%u" .`
APP_DIR="${APP_DIR:-/var/www/html}"
if [[ "$MYUID" -gt '0' && "$MYUID" != `id -u ${CUSER}` ]]; then
usermod -u ${MYUID} ${CUSER}
fi
cd ${APP_DIR}
if [ -z "$@" ]; then
compass watch --poll
else
exec "$@"
fi
| true
|
59fd60cb78170cd784540dd5997bb2ff17b577a3
|
Shell
|
bboykov/dotfiles
|
/scripts/dotfiles-link-dotfile
|
UTF-8
| 1,322
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
readonly DOTFILES_HOME="${HOME}/dotfiles"
readonly DOTFILES_CONFIG="${DOTFILES_HOME}/config"
# shellcheck source=lib/functions.bash
source "${DOTFILES_HOME}/lib/functions.bash"
target=$1
link_name=$2
link_dirname="${link_name%/*}"
link_filename="${link_name##*/}"
backup_directory="${HOME}/.old_dotfiles_backup_directory"
if [[ ! -f "${target}" || -d "${target}" ]]; then
util::error_message "Target file not found: ${target}"
exit 0
elif [[ ! -d "${link_dirname}" ]]; then
util::error_message "Destination directory does not exist: ${link_dirname}"
exit 0
fi
util::debug_message "Check if file ${link_name} exists."
if [[ -e "${link_name}" && ! -L "${link_name}" ]]; then
util::info_message "File ${link_name} exists."
if [ ! -d "${backup_directory}" ]; then
mkdir -p "${backup_directory}"
fi
backup_path="${backup_directory}/${link_filename}-$(date +%Y%m%d-%H%M%S)"
mv "${link_name}" "${backup_path}"
util::info_message "The ${link_filename} is moved to ${backup_path}"
fi
util::debug_message "Check if there is a healthy ${link_name} symlink"
if [[ ! -L "${link_name}" ]]; then
ln -sf "${target}" "${link_name}"
util::info_message "Symlink created ${link_name}"
else
util::info_message "Skipped. Link exists at ${link_name}"
fi
| true
|
2abee5c843441de8f4e9cd71ea88df940d6d3b9e
|
Shell
|
code-to-core/docker
|
/bin/stop.sh
|
UTF-8
| 333
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# Run thi son the host, noth within the docker image, then connect
# http://localhost:4000
$(dirname $0)/chkconfig.sh
if [ $? != 0 ]
then
exit 1
fi
source config
id=$(docker container ls | grep $repository | cut -d ' ' -f1)
echo found $repository running with container id $id, stopping
docker container stop $id
| true
|
3cc1269f637691256a2b9d3352f61610043e7e6a
|
Shell
|
thefactory/cloudformation-jenkins
|
/jenkins-backup
|
UTF-8
| 676
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
USAGE="Usage: $0 JENKINS_HOME S3_TARGET\n
\n
Examples:\n
$0 /var/lib/jenkins s3://mybucket/jenkins/jenkins-201405011901.tar.gz"
JENKINS_HOME=$1
S3_TARGET=$2
if [[ -z "`echo $S3_TARGET|grep '^s3://'`" || ! -d "$JENKINS_HOME" ]]; then
echo -e $USAGE
exit 1
fi
LOCAL_BACKUP=/tmp/`basename $S3_TARGET`
tar -C $JENKINS_HOME -zcf $LOCAL_BACKUP .\
--exclude "config-history/" \
--exclude "config-history/*" \
--exclude "jobs/*/workspace*" \
--exclude "jobs/*/builds/*/archive" \
--exclude "plugins/*/*" \
--exclude "plugins/*.bak" \
--exclude "war" \
--exclude "cache"
aws s3 cp $LOCAL_BACKUP $S3_TARGET
rm -f $LOCAL_BACKUP
| true
|
b78f5f6232b4a77bc052eb93952a6acd13b155bd
|
Shell
|
Vitalinux-DGA-ProgSoftLibre/vx-dga-l-scripts
|
/usr/bin/vx-firefox-extensiones
|
UTF-8
| 8,643
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# Versiones: [arturo@2019-12-16]
mensaje() {
echo -e "=> $(date): ${1}"
}
uso() {
MENSAJE="Debes pasar dos o más parámetros al script:"
MENSAJE+="\n1) --install|--remove"
MENSAJE+="\n2,3,...) Extensiones a instalar o desinstalar"
MENSAJE+="\nEj. vx-firefox-extensiones --install EPUBREADER"
MENSAJE+="\nEj. vx-firefox-extensiones --install epubreader-2.0.7-fx.xpi ublock_origin-1.18.14-an+fx.xpi"
mensaje "${MENSAJE}"
}
function instalarExtensionFirefox() {
EXT_TYPE="global"
#EXT_URL="http://migasfree.educa.aragon.es/extensiones-firefox/epubreader-1.5.0.12-fx+sm.xpi"
EXT_URL="/usr/share/vitalinux/firefox-extensions/${1}"
PATH_GLOBAL_FIREFOX="/usr/lib/firefox-addons/distribution/extensions"
[ ! -f "${EXT_URL}" ] && \
echo "=> ¡¡Error!! No encontramos ${EXT_URL}" && return 1
##
# Hay extensiones que no tienen los datos correctamente formados en manifest.json
# Comprobar antes de instalar pues si se puede sacar el nombre (del manifest) o hay que ponerlo a mano
# La extensión de epubreader se "fuerza" por poner la última
if [ "$1" == "office_online_officework_para_xls_doc_y_ppt-1.2.0-an+fx.xpi" ]; then
[ ! -f "${PATH_GLOBAL_FIREFOX}/{ba5a4d10-1c03-4db9-b7ca-c9cc255e26e8}.xpi" ] && \
{
cp "${EXT_URL}" "${PATH_GLOBAL_FIREFOX}/{ba5a4d10-1c03-4db9-b7ca-c9cc255e26e8}.xpi" && \
echo "=> Ok!! ${EXT_TYPE} firefox-extension \"${1}\" ${EXT_UID} se ha instalado ..." ;
} || \
echo "=> Ok!! ${EXT_TYPE} firefox-extension \"${1}\" ${EXT_UID} ya esta instalada ..."
return 0
fi
if [ "$1" == "epubreader-2.0.9-fx.xpi" ]; then
[ ! -f "${PATH_GLOBAL_FIREFOX}/{5384767E-00D9-40E9-B72F-9CC39D655D6F}.xpi" ] && \
{
cp "${EXT_URL}" "${PATH_GLOBAL_FIREFOX}/{5384767E-00D9-40E9-B72F-9CC39D655D6F}.xpi" && \
echo "=> Ok!! ${EXT_TYPE} firefox-extension \"${1}\" ${EXT_UID} se ha instalado ..." ;
} || \
echo "=> Ok!! ${EXT_TYPE} firefox-extension \"${1}\" ${EXT_UID} ya esta instalada ..."
return 0
fi
##
cd /tmp
## download extension
rm -f addon.xpi
#wget -O addon.xpi "${EXT_URL}"
cp "${EXT_URL}" /tmp/addon.xpi
## get extension UID from install.rdf
EXT_UID=$(unzip -p addon.xpi manifest.json | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["'applications'"]["'gecko'"]["'id'"]')
# if .xpi file could not be read, exit
if [ "${EXT_UID}" = "" ] ; then
echo "=> Error!! No puede localizarse el UID de firefox-extension: ${1}"
return 1
fi
# check if extension not already installed
if [ -f "${PATH_GLOBAL_FIREFOX}/${EXT_UID}.xpi" ] ; then
echo "=> Ok!! ${EXT_TYPE} firefox-extension \"${1}\" ${EXT_UID} ya esta instalada ..."
return 0
fi
# installation of global extension
# copy .xpi to global extension path
FICHEXTENSION="${PATH_GLOBAL_FIREFOX}/${EXT_UID}.xpi"
if [[ ! -f "${FICHEXTENSION}" ]] ; then
sudo cp -f addon.xpi "${PATH_GLOBAL_FIREFOX}/${EXT_UID}.xpi" && \
echo "=> Ok!! ${EXT_TYPE} firefox-extension \"${1}\" ${EXT_UID} se ha instalado ..."
else
echo "=> Ok!! ${EXT_TYPE} firefox-extension \"${1}\" ${EXT_UID} ya esta instalada ..."
fi
# extract extension to global extension path
##sudo unzip addon.xpi -d "${PATH_GLOBAL_FIREFOX}/${EXT_UID}"
# else, installation of extension for current user
# end message
##echo "${EXT_TYPE} extension ${EXT_UID} installed"
rm -f addon.xpi
return 0
}
function quitarExtensionFirefox() {
EXT_TYPE="global"
#EXT_URL="http://migasfree.educa.aragon.es/extensiones-firefox/epubreader-1.5.0.12-fx+sm.xpi"
EXT_URL="/usr/share/vitalinux/firefox-extensions/${1}"
PATH_GLOBAL_FIREFOX="/usr/lib/firefox-addons/distribution/extensions"
##
# Hay extensiones que no tienen los datos correctamente formados en manifest.json
# Comprobar antes de instalar pues si se puede sacar el nombre (del manifest) o hay que ponerlo a mano
# La extensión de epubreader se "fuerza" por poner la última
if [[ "$1" == "office_online_officework_para_xls_doc_y_ppt-1.2.0-an+fx.xpi" ]]; then
[ -f "${PATH_GLOBAL_FIREFOX}/{ba5a4d10-1c03-4db9-b7ca-c9cc255e26e8}.xpi" ] && \
rm -f "${PATH_GLOBAL_FIREFOX}/{ba5a4d10-1c03-4db9-b7ca-c9cc255e26e8}.xpi" && \
echo "=> Se ha eliminado la extensión de Firefox: ${1}"
return 0
fi
if [[ "$1" == "epubreader-2.0.7-fx.xpi" && \
-f "${PATH_GLOBAL_FIREFOX}/{5384767E-00D9-40E9-B72F-9CC39D655D6F}.xpi" ]]; then
rm -f "${PATH_GLOBAL_FIREFOX}/{5384767E-00D9-40E9-B72F-9CC39D655D6F}.xpi" && \
echo "=> Se ha eliminado la extensión de Firefox: ${1}"
return 0
fi
##
cd /tmp
## download extension
rm -f addon.xpi
#wget -O addon.xpi "${EXT_URL}"
cp "${EXT_URL}" /tmp/addon.xpi
## get extension UID from install.rdf
EXT_UID=$(unzip -p addon.xpi manifest.json | python -c 'import json,sys;obj=json.load(sys.stdin);print obj["'applications'"]["'gecko'"]["'id'"]')
# if .xpi file could not be read, exit
if [ "${EXT_UID}" = "" ] ; then
echo "=> Problemas con ${1}, no se puede estraer su UID correctamente ..."
return 1
fi
# Comprobamos que la extensión ya esta instalada
if [ -f "${PATH_GLOBAL_FIREFOX}/${EXT_UID}.xpi" ] ; then
rm -f "${PATH_GLOBAL_FIREFOX}/${EXT_UID}.xpi"
echo "=> La extensión ${EXT_TYPE} de Firefox \"${1}\" ${EXT_UID} se ha desinstalado"
return 0
fi
echo "=> La Problemas para desinstalar la extensión ${1} de Firefox"
return 1
}
MENSAJE="Debes ser el Root para configurar las extensiones de Firefox ..."
[[ "$(whoami)" != "root" ]] && echo "${MENSAJE}" && exit 1
# Comprobamos que el número de parámetros pasados es al menos 2:
(( ${#} < 2 )) && uso && exit 1
PATH_GLOBAL_FIREFOX="/usr/lib/firefox-addons/distribution/extensions"
#PATH_GLOBAL_FIREFOX="/usr/lib/firefox-addons/extensions"
if ! test -d ${PATH_GLOBAL_FIREFOX} ; then
mkdir -p ${PATH_GLOBAL_FIREFOX} && chmod +rx ${PATH_GLOBAL_FIREFOX}
fi
# Obtenemos las extensiones disponibles en el paquete vx-dga-l-navegadores-extensiones-firefox:
# OFFICEONLINE="office_online_officework_para_xls_doc_y_ppt-1.2.0-an+fx.xpi"
# ADDBLOCKPLUS="adblock_plus-3.5-an+fx.xpi"
# VIDEODOWNLOADHELPER="video_downloadhelper-7.3.5-an+fx.xpi"
# EPUBREADER="epubreader-2.0.7-fx.xpi"
# EDITTHISIMAGE="edit_this_image-2.0.1-an+fx.xpi"
# UBLOCK="ublock_origin-1.18.14-an+fx.xpi"
# EXTENSIONES_DISPONIBLES=("OFFICEONLINE"
# "ADDBLOCKPLUS"
# "VIDEODOWNLOADHELPER"
# "EPUBREADER"
# "EDITTHISIMAGE"
# "UBLOCK")
OPCION="${1}"
! [[ "${OPCION}" =~ --install|--remove ]] && uso && exit 1
shift
BASEDIR="/usr/share/vitalinux/firefox-extensions"
FICH_LISTA_EXTENSIONES="${BASEDIR}/firefox-extensiones-listado"
[ -f "${FICH_LISTA_EXTENSIONES}" ] && \
. "${FICH_LISTA_EXTENSIONES}" || \
{
echo "=> ¡¡Problemas!! No hay listado de extensiones disponible: ${FICH_LISTA_EXTENSIONES}" ;
exit 1
}
# Generamos una lista con los nombres de todas las extensiones disponibles:
EXTENSIONES_DISPONIBLES=()
for LINEA in $(sed -n "/^[^#].*=/p" "${FICH_LISTA_EXTENSIONES}") ; do
EXT="$(echo $LINEA | cut -d"=" -f1)"
EXTENSIONES_DISPONIBLES+=("$EXT")
done
# echo "La lista completa es: ${LISTA[@]}" ; echo "El primero: ${LISTA[0]}"
# Definimos dos variables para detectar posibles errores:
FALLOINS=0
FALLODES=0
until [ -z "${1}" ] ; do
EXTENSION=""
[[ "${1##*.}" == "xpi" ]] && [ -f "${BASEDIR}/${1}" ] && \
EXTENSION="${1}"
[[ " ${EXTENSIONES_DISPONIBLES[@]} " =~ " ${1} " ]] && \
[ -f "/usr/share/vitalinux/firefox-extensions/${!1}" ] && \
EXTENSION="${!1}"
MENSAJE="La extensión ${EXTENSION} no esta disponible ..."
[ -z "${EXTENSION}" ] && mensaje "${MENSAJE}" && exit 1
case "${OPCION}" in
"--install" )
if ! instalarExtensionFirefox "${EXTENSION}" ; then
FALLOINS=1
fi
;;
"--remove" )
if ! quitarExtensionFirefox "${EXTENSION}" ; then
FALLODES=1
fi
;;
esac
shift
done
(( ${FALLOINS} == 1 )) && \
echo "=> ¡¡Error!! La fallado la instalación de alguna extensión de firefox" && \
exit 1
(( ${FALLODES} == 1 )) && \
echo "=> ¡¡Error!! La desisntalación de alguna extensión de firefox ha dada problemas" && \
exit 1
exit 0
| true
|
d09b4f94e5f3afa0a2f9049b3b9ce69ad5f15d0a
|
Shell
|
lanen/fpm-rpm
|
/mysql56
|
UTF-8
| 1,442
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
artifact=mysql
version=5.6.32
name=${artifact}-${version}
fn=${name}.tar.gz
dl=download/$fn
if [[ ! -f "$dl" ]]; then
wget -O $dl http://cdn.mysql.com//Downloads/MySQL-5.6/$fn
if [[ ! -f "$dl" ]]; then
echo "$fn download failed"
exit 1
fi
fi
sudo yum install -y gcc-c++ cmake bison-devel ncurses-devel
dir=`pwd`
t=tmp/${fn}_work
mkdir -p $t
tmp_install=$dir/tmp/${fn}_install
rm -rf $tmp_install
mkdir -p $tmp_install
## Build redis
if [ 'x' != 'b' ]; then
tar zxvf $dl -C $t
cd $t/$name
cmake \
-DCMAKE_INSTALL_PREFIX=$tmp_install/$name \
-DMYSQL_DATADIR=/data/mysql/data \
-DSYSCONFDIR=/etc \
-DWITH_MYISAM_STORAGE_ENGINE=1 \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_MEMORY_STORAGE_ENGINE=1 \
-DWITH_READLINE=1 \
-DMYSQL_UNIX_ADDR=/var/lib/mysql/mysql.sock \
-DMYSQL_TCP_PORT=3306 \
-DENABLED_LOCAL_INFILE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DEXTRA_CHARSETS=all \
-DDEFAULT_CHARSET=utf8 \
-DDEFAULT_COLLATION=utf8_general_ci
make && make install
cd -
fi
name_prefix='ddb-'
install=/usr/local/opt
output=build
#exit
fpm -f -s dir -t rpm --verbose --epoch 0 \
-p $output \
-n $name_prefix$name \
--url "https://info.evan.io" \
-m 'evan' \
--license 'MIT' \
--category 'Development/Languages' \
--description "$name" \
--rpm-auto-add-directories \
--rpm-init scripts/init.d/mysqld \
--no-rpm-sign $addition \
--workdir tmp \
--prefix $install \
-C $tmp_install \
$name
#. common
| true
|
4dac7edcea3abba2e6b6982c6ce9d1528550daeb
|
Shell
|
SoumyaDas/drupal-ubuntu-automation
|
/build8.sh
|
UTF-8
| 5,078
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# Treat unset variables as an error
set -o nounset
# Source configuration
source $1 || exit 126
#source $2
echo -e "${BUILD}"
##
# Needed executables & drush commands
#
DRUSH=$(which drush) &> /dev/null \
|| { echo 'Missing drush. Aborting...' >&2; exit 127; }
# Specific path to drush version for drush site-install
set +o nounset
[ -z "$DRUSH_SITE_INSTALL_DRUSH" ] && DRUSH_SITE_INSTALL_DRUSH=${DRUSH}
set -o nounset
which git &> /dev/null \
|| { echo 'Missing git. Aborting...'>&2; exit 127; }
## drush help make &> /dev/null \ || { echo "Could not probe 'drush make'. Aborting...">&2; exit 127; }
${DRUSH_SITE_INSTALL_DRUSH} help site-install &> /dev/null \ || { echo "Could not probe 'drush site-install'. Aborting...">&2; exit 127; }
##
# run drush make
#
cd ${SHARED_DIR}
## echo -e "# Running drush make, create new build ${BUILD} with ${BUILD_MAKEFILE}...\n"
sudo composer create-project drupal/drupal ${BUILD}
##
# link new build to docroot
# commenting out below section as we are intended to create a new instance of
# database and codebase of the given profile.
#
cd ${WEB_DIR}
BUILD_DIR=${SHARED_DIR}/${BUILD}
if [ -L ${DOC_ROOT} ] ; then
echo -ne "# Symlink ${BUILD} already exists, unlink ${BUILD}... "
unlink ${DOC_ROOT} 2>&1 \
&& echo -e "done\n" \
|| { echo -e "FAILED 2!\n"; exit 2; }
fi
echo -ne "# Symlink ${BUILD} to ${WEB_DIR}/${DOC_ROOT}... "
sudo ln -s ${BUILD_DIR} ${DOC_ROOT} 2>&1 \
&& echo -e "done\n" \
|| { echo -e "FAILED 3!\n"; exit 3; }
echo -ne "# Symlink ${BUILD_DIR} to ${WEB_DIR}/${BUILD}... "
sudo ln -s ${BUILD_DIR} ${BUILD} 2>&1 \
&& echo -e "done\n" \
|| { echo -e "FAILED 3.2!\n"; exit 3.2; }
##
# run drush site-install (and drop existing tables)
# set sendmail path to /usr/bin/true if it is not configured properly.
echo -e "# Running drush site-install...\n"
sudo /usr/bin/env PHP_OPTIONS="-d sendmail_path=`which true`" ${DRUSH_SITE_INSTALL_DRUSH} site-install ${BUILD_PROFILE} ${SI_OPTIONS} -y -r ${WEB_DIR}/${DOC_ROOT} \
--db-url=${DB_DRIVER}://${DB_USER}:${DB_PASS}@${DB_HOST}/${DB} \
--account-name=${DRUPAL_UID1} \
--account-pass=${DRUPAL_UID1_PASS} \
--site-name=${DRUPAL_SITE_NAME} 2>&1 \
&& echo -e "\n# Site installation was successful." \
|| { echo -e "\n# FAILED 4!"; exit 4; }
# Files directory (local dev)
sudo chmod -R 777 ${FILE_DIR}
sudo chown -R vagrant:vagrant ${BUILD_ROOT}
# Files directory (remote dev/stage/prod)
#sudo chown -R _www:_www /var/www/html/${BUILD}/sites/default/files
##
# Create virtual directory configuration file for this build
#
cd ${CONF_DIR}
sudo touch ${CONF}
sudo chmod -R 777 ${CONF}
sudo chown -R vagrant:vagrant ${CONF}
FILE=${CONF}
echo "# ************************************
# Vhost template generated by build script.
# ************************************
<VirtualHost *:80>
ServerName ${BUILD}.demoserver.com
## Vhost docroot
DocumentRoot "/var/www/${BUILD}"
## Directories, there should at least be a declaration for /www/rml/current
<Directory "/var/www/${BUILD}">
Options FollowSymLinks
AllowOverride all
Order allow,deny
Allow from all
</Directory>
## Load additional static includes
## Logging
ErrorLog "/var/log/apache2/${BUILD}.demoserver.com_error_log"
ServerSignature Off
CustomLog "/var/log/apache2/${BUILD}.demoserver.com_access_log" "combined1"
## Rewrite rules
RewriteEngine On
## Server aliases
ServerAlias ${BUILD}.demoserver.com
## Custom fragment
<IfModule php5_module>
php_value upload_max_filesize 10M
php_value post_max_size 10M
</IfModule>
</VirtualHost>" >> $FILE
sudo a2ensite $FILE
sudo service apache2 reload
function removehost() {
if [ -n "$(grep $HOSTNAME /etc/hosts)" ]
then
echo "$HOSTNAME Found in your $ETC_HOSTS, Removing now...";
sudo sed -i".bak" "/$HOSTNAME/d" $ETC_HOSTS
else
echo "$HOSTNAME was not found in your $ETC_HOSTS";
fi
}
function addhost() {
IP="127.0.0.1"
HOSTNAME=$1
HOSTS_LINE="$IP\t$HOSTNAME"
ETC_HOSTS="/etc/hosts"
if [ -n "$(grep $HOSTNAME /etc/hosts)" ]
then
echo "$HOSTNAME already exists : $(grep $HOSTNAME $ETC_HOSTS)"
else
echo "Adding $HOSTNAME to your $ETC_HOSTS";
sudo -- sh -c -e "echo '$HOSTS_LINE' >> /etc/hosts";
if [ -n "$(grep $HOSTNAME /etc/hosts)" ]
then
echo "$HOSTNAME was added succesfully \n $(grep $HOSTNAME /etc/hosts)";
else
echo "Failed to Add $HOSTNAME, Try again!";
fi
fi
}
##
# write host entry to the hosts file.
#
echo -e "\n# Adding hosts entry to hosts file...\n"
#sudo su
#FILE=${HOSTS}
#echo "127.0.0.1 ${BUILD}.demoserver.com" >> $FILE
HOSTNAME=${BUILD}.demoserver.com
addhost ${HOSTNAME}
echo -e "\n# Host entry added--\n127.0.0.1 ${BUILD}.demoserver.com\n"
sudo mkdir ${BUILD_INFO_DIR}
sudo chmod -R 777 ${BUILD_INFO_DIR}
sudo chown -R vagrant:vagrant ${BUILD_INFO_DIR}
FILE=${BUILD_INFO_DIR}/build-record-existing.txt
echo "${BUILD}" >> $FILE
exit 0
| true
|
b333c178dce098a46200443a11dea08a9c46cea7
|
Shell
|
ferozbaig96/Simple-Bash
|
/scripts/Getting Started/signal_cleanup_exit.sh
|
UTF-8
| 278
| 3.25
| 3
|
[] |
no_license
|
#/bin/bash
# signal_cleanup_exit.sh
# This script will do cleanup when you try to exit via Ctrl+C
function cleanup_exit(){
# do some file closing stuff
# or temp file deletion stuff
echo
}
trap cleanup_exit EXIT
echo "while true has started"
while true
do
sleep 60
done
| true
|
5b7cd49b445d038d2fd9b57e9abf214a83ff19ea
|
Shell
|
tfettrow/MiM_Shell_Scripts
|
/struct_processing/structural_analysis.sh
|
UTF-8
| 1,418
| 2.671875
| 3
|
[] |
no_license
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# this script requires arguments
argument_counter=0
for this_argument in "$@"
do
if [[ $argument_counter == 0 ]]; then
Matlab_dir=$this_argument
elif [[ $argument_counter == 1 ]]; then
Template_dir=$this_argument
elif [[ $argument_counter == 2 ]]; then
Study_dir=$this_argument
elif [[ $argument_counter == 3 ]]; then
subject=$this_argument
else
struct_processing_steps="$this_argument"
fi
(( argument_counter++ ))
done
export MATLABPATH=${Matlab_dir}/helper
cd "$Study_dir"
ml matlab/2020a
matlab -nodesktop -nosplash -r "try; cat12StructuralAnalysis('subjects',$subject,'t1_folder','02_T1','t1_filename','T1.nii','steps_to_run_vector',[1 0 1 0 1 1 1 1 1 1],'template_dir','/blue/rachaelseidler/tfettrow/Crunch_Code/MR_Templates'); catch; end; quit"
| true
|
ab94b25d0d4310ca9843491c573f956fca91c52a
|
Shell
|
gobbedy/compute_canada_scripts
|
/launch_dispynode.sh
|
UTF-8
| 372
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
logfile=$1
dispynode_executable=~/.local/bin/dispynode.py
echo "Note: \"error connecting\" line immediately below is expected." &>> ${logfile}
tmux kill-session -t dispynode_session &>> ${logfile} # in case previous script on same node died before killing
tmux new-session -s dispynode_session -d "${dispynode_executable} --clean --daemon |& tee -a ${logfile}"
| true
|
f66c5e5435eb8314f747e81a3a2832cca08ed138
|
Shell
|
AntBean/jpf-bfs
|
/bin/test_debug
|
UTF-8
| 308
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# unix shell script to run jpf tests
#
JPF_HOME=`dirname "$0"`/..
if test -z "$JVM_FLAGS"; then
JVM_FLAGS="-Xmx1024m -ea"
fi
DEBUG_FLAGS="-Xdebug -Xnoagent -Xrunjdwp:transport=dt_socket,address=8787,server=y,suspend=y"
java $DEBUG_FLAGS $JVM_FLAGS -jar "$JPF_HOME/tools/RunTest.jar" "$@"
| true
|
9353f47bdf78f48be753730e11dd4b057f5920ff
|
Shell
|
trozler/giveMeProject
|
/webproject.sh
|
UTF-8
| 1,305
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#/bin/bash
GREEN='\033[0;32m'
NC='\033[0m'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )""/"
BASE="$( basename $PWD | tr '[:upper:]' '[:lower:]' )"
printf "\e${GREEN}Doing your lazy work...\e${NC}\n"
git init -q
touch .gitignore README.md
printf "## $BASE\n" >> ./README.md
printf "\e${GREEN}git stuff done.\e${NC}\n"
mkdir src src/css src/js dist
touch src/js/index.js src/indexSrc.html src/css/styles.css
cp "${DIR}""webpack.config.js" "${PWD}"
cp "${DIR}"".babelrc" "${PWD}"
cp "${DIR}"".gitignore" "${PWD}"
cp "${DIR}""LICENSE" "${PWD}"
printf "\e${GREEN}repo structure done.\e${NC}\n"
npm init -y
rm "${PWD}""/package.json"
cp "${DIR}""package.json" "${PWD}"
sed -i '' 's/"name": "[a-zA-z]*",/"name": "'$BASE'",/' ./package.json
npm install -D babel-loader @babel/core @babel/preset-env @babel/plugin-transform-runtime
npm install -D webpack webpack-cli
#Add to github?
while true; do
read -p "Add this project to github (y or n)? " YNtoHub
case $YNtoHub in
[Yy]* ) YNtoHub=1; break;;
[Nn]* ) YNtoHub=0; break;;
* ) echo "Please answer y or n";;
esac
done
if [ $YNtoHub -eq 1 ]
then
gh repo create
git add .
git commit -m "Hello there new repo."
git push -u origin master
fi
printf "\e${GREEN}All done.\e${NC}\n"
| true
|
4e80f140c8ccbe391fbb13aa0281680d23357e60
|
Shell
|
MomsFriendlyDevCo/Init
|
/utils/npm-install
|
UTF-8
| 263
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Install a series of NPM packages
# Usage: ./utils/npm-install <packages...>
[ -z "$INIT_READY" ] && source common
if [ `INIT bin-available npm` == 0 ]; then
INIT status "Need to install Node + NPM"
INIT run 020-node
fi
sudo npm install -g "$@"
| true
|
40cf5a760d026eee6684c17a9c1028754e6b7105
|
Shell
|
mlaizure/holberton-system_engineering-devops
|
/0x05-processes_and_signals/3-show_your_bash_pid_made_easy
|
UTF-8
| 108
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# displays PID and process name of any process whose name contains bash
pgrep -l "bash"
| true
|
d287cf8584919499c01bb90c1a334b3b3c7dfa5f
|
Shell
|
alexpyoung/config
|
/install/cron.sh
|
UTF-8
| 188
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
main() {
pushd "${0%/*}" && pushd ..
cat crontab > pbcopy
echo 'crontab has been loaded into clipboard. Use crontab -e.'
popd && popd
}
main
| true
|
f73b4b3349d29fc8a87ed284e9046bcde65df0a1
|
Shell
|
bioinform/varsim
|
/tests/test_flip_map/test.sh
|
UTF-8
| 625
| 3.453125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -euo pipefail
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P)"
time ../../flip_map.py flipped input.map
TEST_FAIL=0
compare() {
if [[ ($1 == *.gz) && ($2 == *.gz) ]];then
if cmp -s <(zcat $1) <(zcat $2);then
true
else
TEST_FAIL=1
fi
elif cmp -s $1 $2; then
true
else
TEST_FAIL=1
fi
if [[ TEST_FAIL -eq "1" ]]; then
echo $1 $2 differs
else
echo $1 $2 matches
fi
}
for i in expected/*.map;do
compare $i $(basename $i)
done
if [[ $TEST_FAIL -ne 0 ]] ;then
echo test fail
else
echo test pass
fi
| true
|
bd4a0f561d111abcf04b1a74cc0263da214f42b5
|
Shell
|
petems/vagrant-puppet-install
|
/test/support/puppet_install_script/shell_install.sh
|
UTF-8
| 438
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
echo "Spec test running now!"
REPO_URL="http://yum.puppetlabs.com/el/6/products/i386/puppetlabs-release-6-7.noarch.rpm"
# Install puppet labs repo
echo "Configuring PuppetLabs repo..."
repo_path=$(mktemp)
wget --output-document="${repo_path}" "${REPO_URL}" 2>/dev/null
rpm -i "${repo_path}" >/dev/null
# Install Puppet...
echo "Installing puppet"
yum install -y puppet > /dev/null
echo "Puppet installed!"
| true
|
3a7e4ca74251e9224e20dc9eedbf50c0c5bfaa98
|
Shell
|
tttor/lab1231-sun-prj
|
/xprmnt/segment-sorter/sh/train_lda.sh
|
UTF-8
| 849
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
#
root_dir=/home/jogie/sorter_exp/lda-model
timestamp="$(date +'%Y%m%d.%H%M%S')"
dataset=msrc21
#
corpus_name=corpus.20141221.145856
corpus_dir=$root_dir/corpus/$dataset
tr_data_filepath=$corpus_dir/$corpus_name.data
tr_metadata_filepath=$corpus_dir/$corpus_name.meta
model_dir=$root_dir/lda-model/$dataset/training.$timestamp
mkdir -p $model_dir
cp $tr_data_filepath $model_dir #as the LDAexe puts the resulted model in the same dir as the inputted training_data
cp $tr_metadata_filepath $model_dir
#
dfile=$model_dir/$corpus_name.data
ntopics=21
alpha=0.5 #from [Russel, 2006]
beta=0.5 #from [Russel, 2006]
niters=5000 #from [Russel, 2006]
savestep=1000
twords=10
lda=../../../external/GibbsLDA++-0.2/src/lda
$lda -est -dfile $dfile -ntopics $ntopics -alpha $alpha -beta $beta -niters $niters -savestep $savestep -twords $twords
| true
|
8828021f9b4bd98dcfacfac27b422f2ed3b6348e
|
Shell
|
Rsvyas/avakas
|
/tests/integration/ansible.bats
|
UTF-8
| 1,234
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
# -*- mode: Shell-script;bash -*-
load helper
setup() {
shared_setup
REPO_ORIGIN=$(fake_repo)
template_skeleton "$REPO_ORIGIN" ansible "v0.0.1"
origin_repo "$REPO_ORIGIN"
REPO=$(clone_repo $REPO_ORIGIN)
}
teardown() {
shared_teardown
}
@test "show a ansible version" {
run avakas_wrapper show "$REPO"
[ "$status" -eq 0 ]
scan_lines "0.0.1" "${lines[@]}"
}
@test "set an ansible version" {
run avakas_wrapper set "$REPO" "0.0.2"
[ "$status" -eq 0 ]
scan_lines "Version set to 0.0.2" "${lines[@]}"
run avakas_wrapper show "$REPO"
[ "$status" -eq 0 ]
scan_lines "0.0.2" "${lines[@]}"
[ ! -e "$REPO/version" ]
}
@test "bump an ansible version" {
run avakas_wrapper bump "$REPO" patch
[ "$status" -eq 0 ]
scan_lines "Version updated from 0.0.1 to 0.0.2" "${lines[@]}"
run avakas_wrapper show "$REPO"
[ "$status" -eq 0 ]
scan_lines "0.0.2" "${lines[@]}"
[ ! -e "$REPO/version" ]
}
@test "do not allow the setting of a prefix" {
run "$AVAKAS" show "$REPO" --tag-prefix aaa
[ "$status" -eq 1 ]
echo "AAAA ${lines[@]}"
scan_lines "Problem: Cannot specify a tag prefix with an Ansible Role" "${lines[@]}"
}
| true
|
c6ea20da41b007e063d0f9144c6f545ef7dd750c
|
Shell
|
Ljinod/utils
|
/echo_script.sh
|
UTF-8
| 2,140
| 4.4375
| 4
|
[] |
no_license
|
#!/bin/sh
# Colors!
readonly Green='\e[0;32m'
readonly Yellow='\e[0;33m'
readonly IRed='\e[0;91m'
readonly On_Red='\e[41m'
readonly Blue='\e[0;34m'
readonly Color_Off='\e[0m'
# Global variables
readonly INFO="info"
readonly ERROR="ERROR"
readonly WARNING="warning"
readonly SUDO="sudo"
echo_script() {
# the "-e" argument is to process the colors, the "-n" is so that the next
# echo command is not printed on a new line but appended to this one
if [ ! -z $SCRIPT_NAME ]
then
echo -en "${Green}${SCRIPT_NAME}${Color_Off} - "
fi
# if there is an argument then process it: there should normally be more
# than one but this tests is there just to make sure no errors linked to
# the script itself are printed...
if [ "$#" -gt 0 ]
then
local color=${Color_Off}
for arg in "$@"
do
case $arg in
$ERROR)
echo -en "${On_Red}(${arg})${Color_Off} "
;;
$WARNING)
echo -en "${Yellow}(${arg})${Color_Off} "
;;
$SUDO)
echo -en "${IRed}(${arg})${Color_Off} "
;;
*)
# If this is not a special message - meaning it does not
# match the case expressed before - then it is an
# informative message hence I want to display "INFO" before
if [ "$arg" = "${1}" ]
then
echo -en "${Blue}(${INFO})${Color_Off} "
fi
# I don't want to have the next message appended to this
# one so if it's the last argument I omit the "-n" option
if [ "$arg" = "${@:$#}" ]
then
echo -e "$arg"
# Not the first argument nor the last: I want to append
# what appears next
else
echo -en "$arg"
fi
;;
esac
done
fi
}
| true
|
98902b8f49cd3fd0751351179cbdd470956def1d
|
Shell
|
AndresKool/skriptimine
|
/praks7/yl3
|
UTF-8
| 384
| 2.671875
| 3
|
[] |
no_license
|
#! /bin/bash
echo "Sisestage Aeg"
read aeg
if [ $aeg -ge 6 -a $aeg -lt 12 ]
then
echo "Tere Hommikust!"
elif [ $aeg -ge 12 -a $aeg -lt 18 ]
then
echo "Tere Päevast!"
elif [ $aeg -ge 18 -a $aeg -lt 22 ]
then
echo "Tere Õhtust!"
elif [ $aeg -ge 22 -a $aeg -lt 24 ] || [ $aeg -ge 0 -a $aeg -lt 6 ]
then
echo "Head Ööd!"
else
echo "Mingi Raix oled w? Vale Sisend ju"
fi
# Lõpp
| true
|
1c2f9d7cdf6c9362b846208100125b54946f23a4
|
Shell
|
ftwftw0/ODGDownloader
|
/ODGProdDownloadAll.sh
|
UTF-8
| 3,423
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env /bin/bash
function get_filename_from_bitly_url() {
URL=$1
FILENAME=$(/usr/bin/curl -L "$URL" | /bin/grep -o "/son/zip/.*\.[a-z]*" | /usr/bin/rev | /usr/bin/cut -d'/' -f1 | /usr/bin/rev)
# Return the filename
echo "$FILENAME"
}
function download_from_bitly_link_to_folder() {
URL=$1
PATH=$2
FILENAME=$(get_filename_from_bitly_url "$URL")
echo "--- Launching download from $URL to $PATH/$FILENAME ---"
# retry loop
for k in {1..3}; do
/usr/bin/curl -L "$URL" -H 'User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0' -H 'Accept: text/css,*/*;q=0.1' -H 'Accept-Language: en-US,en;q=0.5' --compressed -H 'Referer: http://odgprod.com/' -H 'Cookie: 300gpBAK=R4178757645; 300gp=R3395838808; PHPSESSID=07544ee240a216a6b3c83e26a784c871' -H 'Connection: keep-alive' -H 'Cache-Control: max-age=0' --output "$PATH/$FILENAME" --retry 3 --retry-delay 10 --retry-max-time 30
if [ $? -eq 0 ]; then
break
else
echo "Download $FILENAME failed... retry \#$k."
fi
done
# All retrys have failed ... that sucks. We fucking exit.
if [ $? -eq 0 ]; then
echo "Successfully downloaded $FILENAME into folder $PATH."
else
echo "Download $FILENAME failed... all retrys have failed. WE EXIT THIS SHITTY PROGRAM !!!"
exit 1
fi
}
function get_download_links_from_odgprod_url() {
URL=$1
WEBPAGE_HTML=$(/usr/bin/curl "$URL" -H 'User-Agent: Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' -H 'Accept-Language: en-US,en;q=0.5' --compressed -H 'Cookie: PHPSESSID=07544ee240a216a6b3c83e26a784c871; 300gpBAK=R4178755467; 300gp=R3395838808' -H 'Connection: keep-alive' -H 'Upgrade-Insecure-Requests: 1' -H 'Cache-Control: max-age=0')
WEBPAGE_HTML_LINKS=$(echo "$WEBPAGE_HTML" | /bin/grep -o "<a[^>]*\/a>")
WEBPAGE_DOWNLOAD_LINKS=$(echo "$WEBPAGE_HTML" | /bin/grep -o "href=\"http://bit[^\"]*\"" | /usr/bin/cut -d'"' -f2)
# Return download links
echo "$WEBPAGE_DOWNLOAD_LINKS"
}
DOWNLOAD_FOLDER="download"
# Here's an ODGProd URL: http://odgprod.com/2005/01/
# This webpage contains all albums from January 2001, along their download links.
# Here's what we'll do: we will iterate over all months from year 2004 to year 2020.
# So, from http://odgprod.com/2000/01/ to http://odgprod.com/2020/12/
BASE_URL="http://odgprod.com"
for i in {2018..2020}; do
for j in {01..12}; do
URL="$BASE_URL/$i/$j/"
echo "Fetching download links from $URL"
DOWNLOAD_LINKS=$(get_download_links_from_odgprod_url "$URL")
echo "Downloak links found:"
echo "$DOWNLOAD_LINKS"
# Loop over download links gotten from the webpage
for DOWNLOAD_LINK in $(echo "$DOWNLOAD_LINKS"); do
download_from_bitly_link_to_folder $DOWNLOAD_LINK $DOWNLOAD_FOLDER
done
done
done
echo "################ OriginalDubGathering Downloader Results #################"
echo " - Total number of albums on ODGProd : $TOTAL_ALBUMS"
echo " - Number of albums already present on your computer : $ALREADY_PRESENT_ALBUMS"
echo " - Number of albums downloaded this time : $DOWNLOADED_ALBUMS"
echo " - Number of albums that failed to download : $FAILED_ALBUMS"
echo " - TOTAL ALBUMS YOU HAVE : ${DOWNLOADED_ALBUMS+ALREADY_PRESENT_ALBUMS}/$TOTAL_ALBUMS"
echo "################ Script worked like a charm! Congratz! #################"
| true
|
ec3347ad4084519ba93308396c530fdde93c4ae2
|
Shell
|
peccu/tomoya-init
|
/buildemacs
|
UTF-8
| 332
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
# build at current directory
curl -O http://ftp.gnu.org/pub/gnu/emacs/emacs-23.3a.tar.gz
tar xzvf emacs-23.3a.tar.gz
cd emacs-23.3a
./configure --with-ns --without-x&&make&&make install
curl -O http://github.com/peccu/tomoya-init/raw/master/tomoya-init
chmod a+x tomoya-init
echo "Now you have to do is run ./tomoya-init"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.