blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5bd848ed459bac564b46c904a7f4d04c4dd9d8e5
|
Shell
|
dywisor/omir
|
/site/src/rootfs/base/etc/profile
|
UTF-8
| 449
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
_profile_restore_noglob=
case "$-" in
*f*) _profile_restore_noglob=1; set +f ;;
esac
_profile_restore_nounset=
case "$-" in
*u*) _profile_restore_nounset=1; set +u ;;
esac
t0=
for t0 in /etc/profile.d/*.sh; do
[ -r "${t0}" ] && . "${t0}" || :
done
unset -v t0
[ -z "${_profile_restore_nounset-}" ] || set -u
unset -v _profile_restore_nounset
[ -z "${_profile_restore_noglob-}" ] || set -f
unset -v _profile_restore_noglob
| true
|
75d7de0471f5880091540b59f72655e57c9778a9
|
Shell
|
denewman/teleconf
|
/testing/test_mdtssh.sh
|
UTF-8
| 10,139
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
#script name: test_mdtssh.sh
#purpose: test MDT SSH API.
#author: Yifeng Shen
#version: 1.0
#Create date: 2016-10-15
#Change history:
SCRIPTNAME="test_mdtssh.sh"
AWK="/usr/bin/awk"
DATE="/bin/date"
WC="/usr/bin/wc"
SORT="/usr/bin/sort"
UNIQ="/usr/bin/uniq"
HEAD="/usr/bin/head"
GREP="/bin/grep"
TEE="/usr/bin/tee"
PRINTF="/usr/bin/printf"
SLEEP="/bin/sleep"
exitcode=0
timestamp=`$DATE '+%b%e %T' | $AWK -F '[: ]' '{print $1"-"$2"-"$3"-"$4}'`
TMPLOG="/tmp/tmplog"
goodcount=0
badcount=0
cat /dev/null > $TMPLOG
TESTPROFILE="./testprofile.txt"
MAINPGORAM=""
TESTNUMBER=""
INTERVAL=""
MDTSINGLEPATH=""
MDTMULTIPATH=""
ROUTERIP=""
ACCESSPORT=""
USRNAME=""
PASSWD=""
ADDFAMILY=""
DESTGNAME=""
DESTGPREFIX=""
SENSORPREFIX=""
SUBSCIPTPREFIX=""
DETINATIONLST=""
DENCODER=""
DPROTOCOL=""
RMTPORT=""
LOG=""
PYTHON=""
POLLINGINTERVAL=""
SUBID=""
function usage
{
echo "
Usage: ./$SCRIPTNAME {-f <test profile>}
Options:
-f: full path of testing profile, default is ./testprofile.txt
-h: print script usage
Examples:
##default mode, all test parameters are saved in file ./testprofile.txt ##
./$SCRIPTNAME
##assume all the test parameters are saved in file /data/testprofile ##
./$SCRIPTNAME -f /data/testprofile
"
}
function throwerror
{
errorcode=$1
MESSAGE=""
case $errorcode in
1)
MESSAGE="Error $errorcode: missing API client program"
;;
2)
MESSAGE="Error $errorcode: missing test number"
;;
3)
MESSAGE="Error $errorcode: missing test interval"
;;
4)
MESSAGE="Error $errorcode: missing senor path (single)"
;;
5)
MESSAGE="Error $errorcode: missing sensor path (multiple)"
;;
6)
MESSAGE="Error $errorcode: missing router ip address"
;;
7)
MESSAGE="Error $errorcode: missing user name of router"
;;
8)
MESSAGE="Error $errorcode: missing password of router"
;;
9)
MESSAGE="Error $errorcode: missing SSH/Netconf port value"
;;
10)
MESSAGE="Error $errorcode: missing destination group prefix"
;;
11)
MESSAGE="Error $errorcode: missing sensor group prefix"
;;
12)
MESSAGE="Error $errorcode: missing destination group ip address"
;;
13)
MESSAGE="Error $errorcode: missing destination group remote port"
;;
14)
MESSAGE="Error $errorcode: missing destination group encoding schema"
;;
15)
MESSAGE="Error $errorcode: missing destination group protocol value"
;;
16)
MESSAGE="Error $errorcode: missing address family information"
;;
17)
MESSAGE="Error $errorcode: missing script log file information"
;;
18)
MESSAGE="Error $errorcode: missing testing profile"
;;
19)
MESSAGE="Error $errorcode: unable to clean old configurations"
;;
20)
MESSAGE="Error $errorcode: missing python path"
;;
21)
MESSAGE="Error $errorcode: missing subscription prefix"
;;
22)
MESSAGE="Error $errorcode: missing telemetry polling interval"
;;
23)
MESSAGE="Error $errorcode: missing start subscription id"
;;
esac
echo $MESSAGE
exit $errorcode
}
function getvalues
{
returncode=0
##find the main API client programm##
KEYWD="MDT_SSH"
MAINPGORAM=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$MAINPGORAM" -o ! -f $MAINPGORAM ]
then
returncode=1
throwerror $returncode
fi
##find number of testings##
KEYWD="TESTNUMBER"
TESTNUMBER=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$TESTNUMBER" ]
then
returncode=2
throwerror $returncode
fi
##find testing interval##
KEYWD="INTERVAL"
INTERVAL=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$INTERVAL" ]
then
returncode=3
throwerror $returncode
fi
##find single sensor path for MDT##
KEYWD="MDTSINGLEPATH"
MDTSINGLEPATH=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$MDTSINGLEPATH" ]
then
returncode=4
throwerror $returncode
fi
##find multiple sensor paths for MDT##
KEYWD="MDTMULTIPATH"
MDTMULTIPATH=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$MDTMULTIPATH" ]
then
returncode=5
throwerror $returncode
fi
##find ip address of the router##
KEYWD="ROUTERIP"
ROUTERIP=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$ROUTERIP" ]
then
returncode=6
throwerror $returncode
fi
##find user name of the router##
KEYWD="USRNAME"
USRNAME=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$USRNAME" ]
then
returncode=7
throwerror $returncode
fi
##find password of the router##
KEYWD="PASSWD"
PASSWD=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$PASSWD" ]
then
returncode=8
throwerror $returncode
fi
##find access port of the router##
KEYWD="SSHPORT"
ACCESSPORT=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$ACCESSPORT" ]
then
returncode=9
throwerror $returncode
fi
##find prefix of destination group##
KEYWD="DESTGPREFIX"
DESTGPREFIX=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$DESTGPREFIX" ]
then
returncode=10
throwerror $returncode
fi
##find prefix of sensor group##
KEYWD="SENSORPREFIX"
SENSORPREFIX=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$SENSORPREFIX" ]
then
returncode=11
throwerror $returncode
fi
##find destination group ip address##
KEYWD="DETINATIONLST"
DETINATIONLST=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$DETINATIONLST" ]
then
returncode=12
throwerror $returncode
fi
##find remote port of destination group##
KEYWD="RMTPORT"
RMTPORT=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$RMTPORT" ]
then
returncode=13
throwerror $returncode
fi
##find encoding schema under destination group##
KEYWD="DENCODER"
DENCODER=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$DENCODER" ]
then
returncode=14
throwerror $returncode
fi
##find protocol value under destination group##
KEYWD="DPROTOCOL"
DPROTOCOL=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$DPROTOCOL" ]
then
returncode=15
throwerror $returncode
fi
##find protocol value under destination group##
KEYWD="ADDFAMILY"
ADDFAMILY=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$ADDFAMILY" ]
then
returncode=16
throwerror $returncode
fi
##find script log file##
KEYWD="LOG"
LOG=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$LOG" ]
then
returncode=17
throwerror $returncode
fi
##find python path##
KEYWD="PYTHON"
PYTHON=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$PYTHON" -o ! -f $PYTHON ]
then
returncode=20
throwerror $returncode
fi
##find prefix for subscription ##
KEYWD="SUBSCIPTPREFIX"
SUBSCIPTPREFIX=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$SUBSCIPTPREFIX" ]
then
returncode=21
throwerror $returncode
fi
##find polling interval for telemetry ##
KEYWD="POLLINGINTERVAL"
POLLINGINTERVAL=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$POLLINGINTERVAL" ]
then
returncode=22
throwerror $returncode
fi
##find start subsciption id ##
KEYWD="SUBID"
SUBID=`$GREP $KEYWD $TESTPROFILE | $HEAD -1 | $AWK '{print $NF}'`
if [ ! -n "$SUBID" ]
then
returncode=23
throwerror $returncode
fi
return $returncode
}
function printresult
{
executedtest=$1
passedtest=$2
failedtest=$3
timestamp=`$DATE '+%b%e %T' | $AWK -F '[: ]' '{print $1"-"$2"-"$3"-"$4}'`
echo "$timestamp [$SCRIPTNAME] Summary: Total tests executed: $executedtest,\
Passed: $passedtest, Failed: $failedtest" | $TEE -a $LOG
}
while getopts f:h: option
do
case $option in
f)
TESTPROFILE=$OPTARG
;;
h)
;;
esac
done
if [[ $1 == "-h" ]]
then
usage
exit $exitcode
fi
##make sure the testing profile does exist##
if [ ! -f $TESTPROFILE ]
then
returncode = 18
throwerror $returncode
fi
getvalues
returncode=$?
if [[ $returncode > 0 ]]
then
throwerror $returncode
fi
i=0
##remove the original configurations##
testreulst="NA"
timestamp=`$DATE '+%b%e %T' | $AWK -F '[: ]' '{print $1"-"$2"-"$3"-"$4}'`
echo "$timestamp [$SCRIPTNAME] removing old configure on $ROUTERIP started......" | $TEE -a $LOG
$PYTHON $MAINPGORAM deleteMDT $ROUTERIP $USRNAME $PASSWD $ACCESSPORT ssh $DESTGPREFIX \
$ADDFAMILY $DETINATIONLST $RMTPORT $SENSORPREFIX $MDTSINGLEPATH $SUBSCIPTPREFIX \
$SUBID $POLLINGINTERVAL > $TMPLOG
timestamp=`$DATE '+%b%e %T' | $AWK -F '[: ]' '{print $1"-"$2"-"$3"-"$4}'`
if grep "Operation success" $TMPLOG 2>&1 > /dev/null
then
testresult="sucess!"
else
testresult="fail!"
fi
cat $TMPLOG >> $LOG
echo "$timestamp [$SCRIPTNAME] removing old configure on $ROUTERIP: $testresult" | $TEE -a $LOG
while [ $i -lt $TESTNUMBER ]
do
cat /dev/null > $TMPLOG
testresult="NA"
timestamp=`$DATE '+%b%e %T' | $AWK -F '[: ]' '{print $1"-"$2"-"$3"-"$4}'`
i=`expr $i + 1`
echo "$timestamp [$SCRIPTNAME] test ($i/$TESTNUMBER) started......" | $TEE -a $LOG
DGROUPNAME="$DESTGPREFIX.$i"
SENSORGROUPNAME="$SENSORPREFIX.$i"
if [[ $((i%2)) == 1 ]]
then
SENSORPATH=$MDTSINGLEPATH
else
SENSORPATH=$MDTMULTIPATH
fi
SUBID=`expr $SUBID + 1`
$PYTHON $MAINPGORAM configAll $ROUTERIP $USRNAME $PASSWD $ACCESSPORT ssh $DESTGPREFIX \
$ADDFAMILY $DETINATIONLST $RMTPORT $SENSORGROUPNAME $SENSORPATH $SUBSCIPTPREFIX \
$SUBID $POLLINGINTERVAL > $TMPLOG
if grep "Operation success" $TMPLOG 2>&1 > /dev/null
then
testresult="pass"
goodcount=`expr $goodcount + 1`
else
testresult="fail"
badcount=`expr $badcount + 1`
fi
cat $TMPLOG >> $LOG
timestamp=`$DATE '+%b%e %T' | $AWK -F '[: ]' '{print $1"-"$2"-"$3"-"$4}'`
echo "$timestamp [$SCRIPTNAME] test ($i/$TESTNUMBER) completed. Result: $testresult" | $TEE -a $LOG
$SLEEP $INTERVAL
done
printresult $i $goodcount $badcount
echo "script completed, all details are saved in $LOG"
| true
|
49e1bba425e0ad35fa6a93a6155b44bdd57f0faa
|
Shell
|
linusnilssonatuppmax/rekrytering
|
/rekrytering-perl-prepare.sh
|
UTF-8
| 268
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir projektkatalog
cd projektkatalog || exit 5
for i in {1..1000} ; do
P="proj-$RANDOM"
if ! test -d $P ; then
mkdir $P
dd if=/dev/urandom of=$P/dataset_$i.txt bs=1 count=$RANDOM
chgrp -R "$(echo $RANDOM%2000+5000 | bc )" "$P"
fi
done
| true
|
9b63c8bcd30bfb7237a50dd23626a73c904114aa
|
Shell
|
newportandy/dotfiles
|
/scripts/.scripts/with-docker-db
|
UTF-8
| 1,271
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash -e
image=mdillon/postgis:9.6-alpine
container_name=my-app-postgresql
if [ -z "$1" ]; then
echo "Run command with a dockerised PostgreSQL DB.
usage: $(basename "$0") command
The various PG* environment variables are set so that if command uses
them - as postgresql's tools and most libpq-based programs do - then
they will automatically use this DB." >&2
exit 1
fi
export PGPASSWORD=bananapancakes
export PGPORT=15432
export PGUSER=postgres
export PGHOST=localhost
trace() { echo "$*" >&1; }
trace "Starting Dockerised DB"
if docker container inspect $container_name >/dev/null 2>&1; then
trace "* Using existing container"
docker container start $container_name >/dev/null
else
trace "* Creating fresh container"
docker run --name $container_name -p $PGPORT:5432 -e POSTGRES_PASSWORD=$PGPASSWORD -d $image >/dev/null
fi
cleanup() {
trace "Stopping Dockerised DB"
docker container stop "$container_name" >/dev/null;
}
trap cleanup EXIT
check_count=0
while ! psql -c 'select 1' >/dev/null 2>&1; do
check_count=$(( check_count + 1 ))
if [[ $check_count -eq 10 ]]; then
trace "Timed out waiting for port to be opened."
exit 1
fi
trace "* Waiting for DB to be available"
sleep 1
done
"$@"
| true
|
6b364657a82300d1bd1b7b3ed5506b6d16d5565c
|
Shell
|
mulle-objc/mulle-objc-developer
|
/.mulle/etc/project/generate-formula.sh
|
UTF-8
| 248
| 2.875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#
# Generate your `def install` `test do` lines here. echo them to stdout.
#
generate_brew_formula_build()
{
local project="$1"
local name="$2"
local version="$3"
cat <<EOF
def install
system "./bin/installer", "#{prefix}"
end
EOF
}
| true
|
baa4cb8671336de29813fef41393af861e62d6f5
|
Shell
|
Xiao233-q/N1dabao
|
/56/opt/openwrt/files/update-amlogic-openwrt.sh
|
UTF-8
| 22,690
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# check cmd param
if [ "$1" == "" ];then
echo "用法: $0 xxx.img"
exit 1
fi
# 检查镜像文件是否存在
IMG_NAME=$1
if [ ! -f "$IMG_NAME" ];then
echo "$IMG_NAME 不存在!"
exit 1
fi
# 查找当前的 /boot 分区信息
DEPENDS="lsblk uuidgen grep awk mkfs.fat mkfs.btrfs perl md5sum"
for dep in ${DEPENDS};do
which $dep
if [ $? -ne 0 ];then
echo "依赖的命令: $dep 不存在!"
exit 1
fi
done
BOOT_PART_MSG=$(lsblk -l -o NAME,PATH,TYPE,UUID,MOUNTPOINT | awk '$3~/^part$/ && $5 ~ /^\/boot$/ {print $0}')
if [ "${BOOT_PART_MSG}" == "" ];then
echo "Boot 分区不存在,或是没有正确挂载, 因此无法继续升级!"
exit 1
fi
# 获得当前使用的 dtb 文件名
cp /boot/uEnv.txt /tmp/
source /boot/uEnv.txt 2>/dev/null
CUR_FDTFILE=${FDT}
if [ "${CUR_FDTFILE}" == "" ];then
echo "警告: 未查到当前使用的 dtb 文件名,可能影响后面的升级(也可能不影响)"
fi
# 获得当前固件的参数
CUR_SOC=""
CUR_BOARD=""
CUR_MAINLINE_UBOOT=""
CUR_MAINLINE_UBOOT_MD5SUM=""
if [ -f /etc/flippy-openwrt-release ];then
source /etc/flippy-openwrt-release
CUR_SOC=$SOC
CUR_BOARD=$BOARD
CUR_MAINLINE_UBOOT=${MAINLINE_UBOOT}
CUR_MAINLINE_UBOOT_MD5SUM=${MAINLINE_UBOOT_MD5SUM}
fi
CUR_KV=$(uname -r)
# 判断内核版本是否 >= 5.10
CK_VER=$(echo "$CUR_KV" | cut -d '.' -f1)
CK_MAJ=$(echo "$CUR_KV" | cut -d '.' -f2)
if [ $CK_VER -eq 5 ];then
if [ $CK_MAJ -ge 10 ];then
CUR_K510=1
else
CUR_K510=0
fi
elif [ $CK_VER -gt 5 ];then
CUR_K510=1
else
CUR_K510=0
fi
# 备份标志
BR_FLAG=1
echo -ne "你想要备份旧版本的配置,并将其还原到升级后的系统中吗? y/n [y]\b\b"
read yn
case $yn in
n*|N*) BR_FLAG=0;;
esac
BOOT_NAME=$(echo $BOOT_PART_MSG | awk '{print $1}')
BOOT_PATH=$(echo $BOOT_PART_MSG | awk '{print $2}')
BOOT_UUID=$(echo $BOOT_PART_MSG | awk '{print $4}')
# emmc设备具有 /dev/mmcblk?p?boot0、/dev/mmcblk?p?boot1等2个特殊设备, tf卡或u盘则不存在该设备
MMCBOOT0=${BOOT_PATH%%p*}boot0
if [ -b "${MMCBOOT0}" ];then
CUR_BOOT_FROM_EMMC=1 # BOOT是EMMC
echo "当前的 boot 分区在 EMMC 里"
cp /boot/u-boot.ext /tmp/ 2>/dev/null
cp /boot/u-boot.emmc /tmp/ 2>/dev/null
BOOT_LABEL="EMMC_BOOT"
else
CUR_BOOT_FROM_EMMC=0 # BOOT 不是 EMMC
if echo "${BOOT_PATH}" | grep "mmcblk" > /dev/null;then
echo "当前的 boot 分区在 TF卡 里"
else
echo "当前的 boot 分区在 U盘 里"
fi
cp /boot/u-boot.ext /tmp/ 2>/dev/null
cp /boot/u-boot.emmc /tmp/ 2>/dev/null
BOOT_LABEL="BOOT"
fi
# find root partition
ROOT_PART_MSG=$(lsblk -l -o NAME,PATH,TYPE,UUID,MOUNTPOINT | awk '$3~/^part$/ && $5 ~ /^\/$/ {print $0}')
ROOT_NAME=$(echo $ROOT_PART_MSG | awk '{print $1}')
ROOT_PATH=$(echo $ROOT_PART_MSG | awk '{print $2}')
ROOT_UUID=$(echo $ROOT_PART_MSG | awk '{print $4}')
case $ROOT_NAME in
mmcblk1p2) NEW_ROOT_NAME=mmcblk1p3
NEW_ROOT_LABEL=EMMC_ROOTFS2
;;
mmcblk1p3) NEW_ROOT_NAME=mmcblk1p2
NEW_ROOT_LABEL=EMMC_ROOTFS1
;;
mmcblk2p2) NEW_ROOT_NAME=mmcblk2p3
NEW_ROOT_LABEL=EMMC_ROOTFS2
;;
mmcblk2p3) NEW_ROOT_NAME=mmcblk2p2
NEW_ROOT_LABEL=EMMC_ROOTFS1
;;
*) echo "ROOTFS 分区位置不正确, 因此无法继续升级!"
exit 1
;;
esac
# find new root partition
NEW_ROOT_PART_MSG=$(lsblk -l -o NAME,PATH,TYPE,UUID,MOUNTPOINT | grep "${NEW_ROOT_NAME}" | awk '$3 ~ /^part$/ && $5 !~ /^\/$/ && $5 !~ /^\/boot$/ {print $0}')
if [ "${NEW_ROOT_PART_MSG}" == "" ];then
echo "新的 ROOTFS 分区不存在, 因此无法继续升级!"
exit 1
fi
NEW_ROOT_NAME=$(echo $NEW_ROOT_PART_MSG | awk '{print $1}')
NEW_ROOT_PATH=$(echo $NEW_ROOT_PART_MSG | awk '{print $2}')
NEW_ROOT_UUID=$(echo $NEW_ROOT_PART_MSG | awk '{print $4}')
NEW_ROOT_MP=$(echo $NEW_ROOT_PART_MSG | awk '{print $5}')
# losetup
losetup -f -P $IMG_NAME
if [ $? -eq 0 ];then
LOOP_DEV=$(losetup | grep "$IMG_NAME" | awk '{print $1}')
if [ "$LOOP_DEV" == "" ];then
echo "loop 设备未找到!"
exit 1
fi
else
echo "losetup $IMG_FILE 失败!"
exit 1
fi
WAIT=3
echo -n "The loopdev is $LOOP_DEV, wait ${WAIT} seconds "
while [ $WAIT -ge 1 ];do
echo -n "."
sleep 1
WAIT=$(( WAIT - 1 ))
done
echo
# umount loop devices (openwrt will auto mount some partition)
MOUNTED_DEVS=$(lsblk -l -o NAME,PATH,MOUNTPOINT | grep "$LOOP_DEV" | awk '$3 !~ /^$/ {print $2}')
for dev in $MOUNTED_DEVS;do
while : ;do
echo -n "卸载 $dev ... "
umount -f $dev
sleep 1
mnt=$(lsblk -l -o NAME,PATH,MOUNTPOINT | grep "$dev" | awk '$3 !~ /^$/ {print $2}')
if [ "$mnt" == "" ];then
echo "成功"
break
else
echo "重试 ..."
fi
done
done
# mount src part
WORK_DIR=$PWD
P1=${WORK_DIR}/boot
P2=${WORK_DIR}/root
mkdir -p $P1 $P2
echo -n "挂载 ${LOOP_DEV}p1 -> ${P1} ... "
mount -t vfat -o ro ${LOOP_DEV}p1 ${P1}
if [ $? -ne 0 ];then
echo "挂载失败!"
losetup -D
exit 1
else
echo "成功"
fi
echo -n "挂载 ${LOOP_DEV}p2 -> ${P2} ... "
mount -t btrfs -o ro,compress=zstd ${LOOP_DEV}p2 ${P2}
if [ $? -ne 0 ];then
echo "挂载失败!"
umount -f ${P1}
losetup -D
exit 1
else
echo "成功"
fi
# 检查新旧版本
NEW_SOC=""
NEW_BOARD=""
if [ -f ${P2}/etc/flippy-openwrt-release ];then
source ${P2}/etc/flippy-openwrt-release
NEW_SOC=${SOC}
NEW_BOARD=${BOARD}
fi
NEW_MAINLINE_UBOOT_MD5SUM=""
if [ "${CUR_MAINLINE_UBOOT}" != "" ];then
if [ -f "${P2}${CUR_MAINLINE_UBOOT}" ];then
NEW_MAINLINE_UBOOT_MD5SUM=$(md5sum ${P2}${CUR_MAINLINE_UBOOT} | awk '{print $1}')
fi
fi
NEW_KV=$(ls ${P2}/lib/modules/)
# 判断内核版本是否 >= 5.10
NK_VER=$(echo "$NEW_KV" | cut -d '.' -f1)
NK_MAJ=$(echo "$NEW_KV" | cut -d '.' -f2)
if [ $NK_VER -eq 5 ];then
if [ $NK_MAJ -ge 10 ];then
NEW_K510=1
else
NEW_K510=0
fi
elif [ $NK_VER -gt 5 ];then
NEW_K510=1
else
NEW_K510=0
fi
# 判断要刷的版本
echo $NEW_KV | grep -E 'flippy-[0-9]{1,3}\+[o]{0,1}' > /dev/null
if [ $? -ne 0 ];then
echo "目标固件的内核版本格式无法识别!"
umount -f ${P1}
umount -f ${P2}
losetup -D
exit 1
fi
NEW_FLIPPY_VER=${NEW_KV##*-}
NEW_FLIPPY_NUM=${NEW_FLIPPY_VER%+*}
if [ $NEW_FLIPPY_NUM -le 53 ];then
echo "本脚本不支持降级到 53+ 或 53+o 以下的版本,请换成 update-amlogic-openwrt-old.sh"
umount -f ${P1}
umount -f ${P2}
losetup -D
exit 1
fi
if [ "${CUR_SOC}" != "" ];then
if [ "${CUR_SOC}" != "${NEW_SOC}" ];then
echo "采用的镜像文件与当前环境的 SOC 不匹配, 请检查!"
umount -f ${P1}
umount -f ${P2}
losetup -D
exit 1
else
if [ "${CUR_BOARD}" != "" ];then
if [ "${CUR_BOARD}" != "${NEW_BOARD}" ];then
echo "采用的镜像文件与当前环境的 BOARD 不匹配, 请检查!"
umount -f ${P1}
umount -f ${P2}
losetup -D
exit 1
fi
fi
fi
fi
BOOT_CHANGED=0
if [ $CUR_BOOT_FROM_EMMC -eq 0 ];then
while :;do # do level 1
read -p "目标固件可以从 EMMC 启动,你需要切换 boot 到 EMMC 吗? y/n " yn1
case $yn1 in
n|N) break;;
y|Y) NEW_BOOT_MSG=$(lsblk -l -o PATH,NAME,TYPE,FSTYPE,MOUNTPOINT | grep "vfat" | grep -v "loop" | grep -v "${BOOT_PATH}" | head -n 1)
if [ "${NEW_BOOT_MSG}" == "" ];then
echo "很抱歉,未发现 emmc 里可用的 fat32 分区, 再见!"
umount -f $P1
umount -f $P2
losetup -D
exit 1
fi
NEW_BOOT_PATH=$(echo $NEW_BOOT_MSG | awk '{print $1}')
NEW_BOOT_NAME=$(echo $NEW_BOOT_MSG | awk '{print $2}')
NEW_BOOT_MOUNTPOINT=$(echo $NEW_BOOT_MSG | awk '{print $5}')
read -p "新的 boot 设备是 $NEW_BOOT_PATH , 确认吗? y/n " pause
NEW_BOOT_OK=0
case $pause in
n|N) echo "无法找到合适的boot设备, 再见!"
umount -f $P1
umount -f $P2
losetup -D
exit 1
;;
y|Y) BOOT_LABEL="EMMC_BOOT"
while :;do # do level 2
read -p "将要重新格式化 ${NEW_BOOT_PATH} 设备,里面的数据将会丢失, 确认吗? y/n " yn2
case $yn2 in
n|N) echo "再见"
umount -f $P1
umount -f $P2
losetup -D
exit 1
;;
y|Y) if [ "${NEW_BOOT_MOUNTPOINT}" != "" ];then
umount -f ${NEW_BOOT_MOUNTPOINT}
if [ $? -ne 0 ];then
echo "无法卸载 ${NEW_BOOT_MOUNTPOINT}, 再见"
umount -f $P1
umount -f $P2
losetup -D
exit 1
fi
fi
echo "格式化 ${NEW_BOOT_PATH} ..."
mkfs.fat -F 32 -n "${BOOT_LABEL}" ${NEW_BOOT_PATH}
echo "挂载 ${NEW_BOOT_PATH} -> /mnt/${NEW_BOOT_NAME} ..."
mount ${NEW_BOOT_PATH} /mnt/${NEW_BOOT_NAME}
if [ $? -ne 0 ];then
echo "挂载 ${NEW_BOOT_PATH} -> /mnt/${NEW_BOOT_NAME} 失败!"
umount -f $P1
umount -f $P2
loseup -D
exit 1
fi
echo "复制 /boot -> /mnt/${NEW_BOOT_NAME} ..."
cp -a /boot/* /mnt/${NEW_BOOT_NAME}/
echo "切换 boot ..."
umount -f /boot && \
umount -f /mnt/${NEW_BOOT_NAME}/ && \
mount ${NEW_BOOT_PATH} /boot
if [ $? -ne 0 ];then
echo "切换失败!"
umount -f $P1
umount -f $P2
loseup -D
exit 1
else
echo "/boot 已切换到 ${NEW_BOOT_PATH}"
NEW_BOOT_OK=1
fi
break # 跳出第2层
;;
esac
done # do level 2
;;
esac # case $pause
if [ $NEW_BOOT_OK -eq 1 ];then
BOOT_CHANGED=-1
break # 跳出第一层
fi
;;
esac # case $yn1
done # do level 1
fi # 当前不在emmc中启动
#format NEW_ROOT
echo "卸载 ${NEW_ROOT_MP}"
umount -f "${NEW_ROOT_MP}"
if [ $? -ne 0 ];then
echo "卸载失败, 请重启后再试一次!"
umount -f ${P1}
umount -f ${P2}
losetup -D
exit 1
fi
echo "格式化 ${NEW_ROOT_PATH}"
NEW_ROOT_UUID=$(uuidgen)
mkfs.btrfs -f -U ${NEW_ROOT_UUID} -L ${NEW_ROOT_LABEL} -m single ${NEW_ROOT_PATH}
if [ $? -ne 0 ];then
echo "格式化 ${NEW_ROOT_PATH} 失败!"
umount -f ${P1}
umount -f ${P2}
losetup -D
exit 1
fi
echo "挂载 ${NEW_ROOT_PATH} -> ${NEW_ROOT_MP}"
mount -t btrfs -o compress=zstd ${NEW_ROOT_PATH} ${NEW_ROOT_MP}
if [ $? -ne 0 ];then
echo "挂载 ${NEW_ROOT_PATH} -> ${NEW_ROOT_MP} 失败!"
umount -f ${P1}
umount -f ${P2}
losetup -D
exit 1
fi
# begin copy rootfs
cd ${NEW_ROOT_MP}
echo "开始复制数据, 从 ${P2} 到 ${NEW_ROOT_MP} ..."
ENTRYS=$(ls)
for entry in $ENTRYS;do
if [ "$entry" == "lost+found" ];then
continue
fi
echo -n "移除旧的 $entry ... "
rm -rf $entry
if [ $? -eq 0 ];then
echo "成功"
else
echo "失败"
exit 1
fi
done
echo
echo -n "创建文件夹 ... "
mkdir -p .reserved bin boot dev etc lib opt mnt overlay proc rom root run sbin sys tmp usr www
ln -sf lib/ lib64
ln -sf tmp/ var
echo "完成"
echo
COPY_SRC="root etc bin sbin lib opt usr www"
echo "复制数据 ... "
for src in $COPY_SRC;do
echo -n "复制 $src ... "
(cd ${P2} && tar cf - $src) | tar mxf -
sync
echo "完成"
done
SHFS="/mnt/mmcblk2p4"
[ -d ${SHFS}/docker ] || mkdir -p ${SHFS}/docker
rm -rf opt/docker && ln -sf ${SHFS}/docker/ opt/docker
if [ -f /mnt/${NEW_ROOT_NAME}/etc/config/AdGuardHome ];then
[ -d ${SHFS}/AdGuardHome/data ] || mkdir -p ${SHFS}/AdGuardHome/data
if [ ! -L /usr/bin/AdGuardHome ];then
[ -d /usr/bin/AdGuardHome ] && \
cp -a /usr/bin/AdGuardHome/* ${SHFS}/AdGuardHome/
fi
ln -sf ${SHFS}/AdGuardHome /mnt/${NEW_ROOT_NAME}/usr/bin/AdGuardHome
fi
rm -f /mnt/${NEW_ROOT_NAME}/root/install-to-emmc.sh
sync
echo "复制完成"
echo
BACKUP_LIST=$(${P2}/usr/sbin/flippy -p)
if [ $BR_FLAG -eq 1 ];then
# restore old config files
OLD_RELEASE=$(grep "DISTRIB_REVISION=" /etc/openwrt_release | awk -F "'" '{print $2}'|awk -F 'R' '{print $2}' | awk -F '.' '{printf("%02d%02d%02d\n", $1,$2,$3)}')
NEW_RELEASE=$(grep "DISTRIB_REVISION=" ./etc/uci-defaults/99-default-settings | awk -F "'" '{print $2}'|awk -F 'R' '{print $2}' | awk -F '.' '{printf("%02d%02d%02d\n", $1,$2,$3)}')
if [ ${OLD_RELEASE} -le 200311 ] && [ ${NEW_RELEASE} -ge 200319 ];then
mv ./etc/config/shadowsocksr ./etc/config/shadowsocksr.${NEW_RELEASE}
fi
mv ./etc/config/qbittorrent ./etc/config/qbittorrent.orig
echo -n "开始还原从旧系统备份的配置文件 ... "
(
cd /
eval tar czf ${NEW_ROOT_MP}/.reserved/openwrt_config.tar.gz "${BACKUP_LIST}" 2>/dev/null
)
tar xzf ${NEW_ROOT_MP}/.reserved/openwrt_config.tar.gz
if [ ${OLD_RELEASE} -le 200311 ] && [ ${NEW_RELEASE} -ge 200319 ];then
mv ./etc/config/shadowsocksr ./etc/config/shadowsocksr.${OLD_RELEASE}
mv ./etc/config/shadowsocksr.${NEW_RELEASE} ./etc/config/shadowsocksr
fi
if grep 'config qbittorrent' ./etc/config/qbittorrent; then
rm -f ./etc/config/qbittorrent.orig
else
mv ./etc/config/qbittorrent.orig ./etc/config/qbittorrent
fi
sed -e "s/option wan_mode 'false'/option wan_mode 'true'/" -i ./etc/config/dockerman 2>/dev/null
sed -e 's/config setting/config verysync/' -i ./etc/config/verysync
sync
echo "完成"
echo
fi
echo "修改配置文件 ... "
rm -f "./etc/rc.local.orig" "./usr/bin/mk_newpart.sh" "./etc/part_size"
rm -rf "./opt/docker" && ln -sf "${SHFS}/docker" "./opt/docker"
cat > ./etc/fstab <<EOF
UUID=${NEW_ROOT_UUID} / btrfs compress=zstd 0 1
LABEL=${BOOT_LABEL} /boot vfat defaults 0 2
#tmpfs /tmp tmpfs defaults,nosuid 0 0
EOF
cat > ./etc/config/fstab <<EOF
config global
option anon_swap '0'
option anon_mount '1'
option auto_swap '0'
option auto_mount '1'
option delay_root '5'
option check_fs '0'
config mount
option target '/overlay'
option uuid '${NEW_ROOT_UUID}'
option enabled '1'
option enabled_fsck '1'
option fstype 'btrfs'
option options 'compress=zstd'
config mount
option target '/boot'
option label '${BOOT_LABEL}'
option enabled '1'
option enabled_fsck '0'
option fstype 'vfat'
EOF
rm -f ./etc/bench.log
cat >> ./etc/crontabs/root << EOF
37 5 * * * /etc/coremark.sh
EOF
sed -e 's/ttyAMA0/ttyAML0/' -i ./etc/inittab
sed -e 's/ttyS0/tty0/' -i ./etc/inittab
sss=$(date +%s)
ddd=$((sss/86400))
sed -e "s/:0:0:99999:7:::/:${ddd}:0:99999:7:::/" -i ./etc/shadow
if [ `grep "sshd:x:22:22" ./etc/passwd | wc -l` -eq 0 ];then
echo "sshd:x:22:22:sshd:/var/run/sshd:/bin/false" >> ./etc/passwd
echo "sshd:x:22:sshd" >> ./etc/group
echo "sshd:x:${ddd}:0:99999:7:::" >> ./etc/shadow
fi
if [ $BR_FLAG -eq 1 ];then
#cp ${P2}/etc/config/passwall_rule/chnroute ./etc/config/passwall_rule/ 2>/dev/null
#cp ${P2}/etc/config/passwall_rule/gfwlist.conf ./etc/config/passwall_rule/ 2>/dev/null
sync
echo "完成"
echo
fi
eval tar czf .reserved/openwrt_config.tar.gz "${BACKUP_LIST}" 2>/dev/null
rm -f ./etc/part_size ./usr/bin/mk_newpart.sh
if [ -x ./usr/sbin/balethirq.pl ];then
if grep "balethirq.pl" "./etc/rc.local";then
echo "balance irq is enabled"
else
echo "enable balance irq"
sed -e "/exit/i\/usr/sbin/balethirq.pl" -i ./etc/rc.local
fi
fi
mv ./etc/rc.local ./etc/rc.local.orig
cat > ./etc/rc.local <<EOF
if [ ! -f /etc/rc.d/*dockerd ];then
/etc/init.d/dockerd enable
/etc/init.d/dockerd start
fi
mv /etc/rc.local.orig /etc/rc.local
exec /etc/rc.local
exit
EOF
chmod 755 ./etc/rc.local*
# 判断是否有新版的主线uboot可以写入
if [ "${CUR_MAINLINE_UBOOT}" != "" ];then
UPDATE_UBOOT=0
if [ -f ".${CUR_MAINLINE_UBOOT}" ] && [ "${CUR_MAINLINE_UBOOT_MD5SUM}" != "${NEW_MAINLINE_UBOOT_MD5SUM}" ];then
cat <<EOF
----------------------------------------------------------------------
发现了新版的主线 bootloader (Mainline u-boot), 可以刷入 EMMC
----------------------------------------------------------------------
EOF
while :;do
read -p "是否要更新 bootloader? y/n " yn
case $yn in
y|Y) UPDATE_UBOOT=1
break
;;
n|N) UPDATE_UBOOT=0
break
;;
esac
done
fi
if [ $UPDATE_UBOOT -eq 1 ];then
echo "************************************************************************"
echo "写入新的 bootloader ..."
EMMC_PATH=${NEW_ROOT_PATH%%p*}
echo "dd if=.${CUR_MAINLINE_UBOOT} of=${EMMC_PATH} conv=fsync bs=1 count=444"
dd if=".${CUR_MAINLINE_UBOOT}" of=${EMMC_PATH} conv=fsync bs=1 count=444
echo "dd if=.${CUR_MAINLINE_UBOOT} of=${EMMC_PATH} conv=fsync bs=512 skip=1 seek=1"
dd if=".${MAINLINE_UBOOT}" of=${EMMC_PATH} conv=fsync bs=512 skip=1 seek=1
echo "MAINLINE_UBOOT=${CUR_MAINLINE_UBOOT}" >> ./etc/flippy-openwrt-release
echo "MAINLINE_UBOOT_MD5SUM=${NEW_MAINLINE_UBOOT_MD5SUM}" >> ./etc/flippy-openwrt-release
sync
echo "完成"
echo "************************************************************************"
echo
else
echo "MAINLINE_UBOOT=${CUR_MAINLINE_UBOOT}" >> ./etc/flippy-openwrt-release
echo "MAINLINE_UBOOT_MD5SUM=${CUR_MAINLINE_UBOOT_MD5SUM}" >> ./etc/flippy-openwrt-release
sync
fi
fi
cd ${WORK_DIR}
echo "开始复制数据, 从 ${P1} 到 /boot ..."
cd /boot
echo -n "删除旧的 boot 文件 ..."
[ -f /tmp/uEnv.txt ] || cp uEnv.txt /tmp/uEnv.txt
rm -rf *
echo "完成"
echo -n "复制新的 boot 文件 ... "
(cd ${P1} && tar cf - . ) | tar mxf -
if [ "$BOOT_LABEL" == "BOOT" ];then
[ -f u-boot.ext ] || cp u-boot.emmc u-boot.ext
elif [ "$BOOT_LABEL" == "EMMC_BOOT" ];then
[ -f u-boot.emmc ] || cp u-boot.ext u-boot.emmc
rm -f aml_autoscript* s905_autoscript*
mv -f boot-emmc.ini boot.ini
mv -f boot-emmc.cmd boot.cmd
mv -f boot-emmc.scr boot.scr
fi
sync
echo "完成"
echo
echo -n "更新 boot 参数 ... "
if [ -f /tmp/uEnv.txt ];then
lines=$(wc -l < /tmp/uEnv.txt)
lines=$(( lines - 1 ))
head -n $lines /tmp/uEnv.txt > uEnv.txt
cat >> uEnv.txt <<EOF
APPEND=root=UUID=${NEW_ROOT_UUID} rootfstype=btrfs rootflags=compress=zstd console=ttyAML0,115200n8 console=tty0 no_console_suspend consoleblank=0 fsck.fix=yes fsck.repair=yes net.ifnames=0 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory swapaccount=1
EOF
elif [ "${CUR_FDTFILE}" != "" ];then
cat > uEnv.txt <<EOF
LINUX=/zImage
INITRD=/uInitrd
FDT=${CUR_FDTFILE}
APPEND=root=UUID=${NEW_ROOT_UUID} rootfstype=btrfs rootflags=compress=zstd console=ttyAML0,115200n8 console=tty0 no_console_suspend consoleblank=0 fsck.fix=yes fsck.repair=yes net.ifnames=0 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory swapaccount=1
EOF
else
FDT_OK=0
while [ $FDT_OK -eq 0 ];do
echo "-----------------------------------------------------------------------------"
(cd ${P2}/dtb/amlogic && ls *.dtb)
echo "-----------------------------------------------------------------------------"
read -p "请手动输入 dtb 文件名: " CUR_FDTFILE
if [ -f "${P2}/dtb/amlogic/${CUR_FDTFILE}" ];then
FDT_OK=1
else
echo "该 dtb 文件不存在!请重新输入!"
fi
done
cat > uEnv.txt <<EOF
LINUX=/zImage
INITRD=/uInitrd
FDT=${CUR_FDTFILE}
APPEND=root=UUID=${NEW_ROOT_UUID} rootfstype=btrfs rootflags=compress=zstd console=ttyAML0,115200n8 console=tty0 no_console_suspend consoleblank=0 fsck.fix=yes fsck.repair=yes net.ifnames=0 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory swapaccount=1
EOF
fi
sync
echo "完成"
echo
cd $WORK_DIR
umount -f ${P1} ${P2}
losetup -D
rmdir ${P1} ${P2}
echo
echo "----------------------------------------------------------------------"
if [ $BOOT_CHANGED -lt 0 ];then
echo "升级已完成, 请输入 poweroff 命令关闭电源, 然后移除原有的 TF卡 或 U盘, 再启动系统!"
else
echo "升级已完成, 请输入 reboot 命令重启系统!"
fi
echo "----------------------------------------------------------------------"
| true
|
aef57d1484fde3d85bbd8c5dfcff7a89fa06465f
|
Shell
|
julialudac/forms_input_recording
|
/runTests.sh
|
UTF-8
| 729
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# Run unit tests
echo "Running unit tests..."
pytest
if [ $? -ne 0 ]; then
echo "Unit tests failed!"
exit 1
fi
# Run component tests
echo "Running component test..."
python_cmd=python
python_exists=$(command -v python)
if [ "$python_exists" == "" ]; then
python_cmd=python3
fi
# TODO: Now there is only one component test...
expected=$(cat component_tests/multiple_forms_out.txt | tr '\n' ' ')
actual=$($python_cmd main.py < component_tests/multiple_forms_in.txt | tr '\n' ' ')
if [ ! "$expected" == "$actual" ]; then
echo "Component test failed!"
echo "Expected value: $expected"
echo $actual > toto.txt
echo "Actual value: $actual"
exit 1
fi
echo "All tests passed!"
exit 0
| true
|
14b99165d10601beb371396babad237009a07659
|
Shell
|
mkostas/vagrant_nginx_php7.1
|
/bootstrap.sh
|
UTF-8
| 3,265
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo -e "\n\n--- Updating packages list ---\n\n"
apt-get update
if ! [ -L /var/www ]; then
rm -rf /var/www
ln -fs /vagrant /var/www
fi
# Install nginx
echo -e "\n\n--- Install nginx ---\n\n"
apt-get install -y nginx
# Install mysql
echo -e "\n\n--- Install mysql ---\n\n"
debconf-set-selections <<< "mysql-server mysql-server/root_password password root"
debconf-set-selections <<< "mysql-server mysql-server/root_password_again password root"
debconf-set-selections <<< "phpmyadmin phpmyadmin/dbconfig-install boolean true"
debconf-set-selections <<< "phpmyadmin phpmyadmin/app-password-confirm password root"
debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/admin-pass password root"
debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/app-pass password root"
debconf-set-selections <<< "phpmyadmin phpmyadmin/reconfigure-webserver multiselect none"
apt-get install -y mysql-server phpmyadmin
# Install php-fpm
echo -e "\n\n--- Install php-fpm ---\n\n"
add-apt-repository -y ppa:ondrej/php && sudo apt-get update
apt-get install -y php7.1-cli php7.1-fpm php7.1-mysql php7.1-curl php-memcached php7.1-dev php7.1-mcrypt php7.1-sqlite3 php7.1-mbstring
apt-cache search php7.1
# php.ini
echo -e "\n\n--- php.ini changes ---\n\n"
sed -i.bak 's/^;cgi.fix_pathinfo.*$/cgi.fix_pathinfo = 1/g' /etc/php/7.1/fpm/php.ini
service php7.1-fpm restart
# Configure host
echo -e "\n\n--- Configuring host ---\n\n"
cat << 'EOF' > /etc/nginx/sites-available/default
server {
# Port that the web server will listen on.
listen 80;
# Host that will serve this project.
server_name localhost;
# Useful logs for debug.
access_log /var/log/nginx/localhost_access.log;
error_log /var/log/nginx/localhost_error.log;
rewrite_log on;
# The location of our projects public directory.
root /usr/share/nginx/html;
# Point index to the Laravel front controller.
index index.php index.html;
location / {
# URLs to attempt, including pretty ones.
try_files $uri $uri/ /index.php?$query_string;
}
# Remove trailing slash to please routing system.
# if (!-d $request_filename) {
# rewrite ^/(.+)/$ /$1 permanent;
# }
# PHP FPM configuration.
location ~* \.php$ {
fastcgi_pass unix:/run/php/php7.1-fpm.sock;
fastcgi_index index.php;
fastcgi_split_path_info ^(.+\.php)(.*)$;
include /etc/nginx/fastcgi_params;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
}
# We don't need .ht files with nginx.
# location ~ /\.ht {
# deny all;
# }
# Set header expirations on per-project basis
# location ~* \.(?:ico|css|js|jpe?g|JPG|png|svg|woff)$ {
# expires 365d;
# }
}
EOF
# Symbolic link for phpmyadmin
echo -e "\n\n--- Symbolic link for phpmyadmin ---\n\n"
sudo ln -s /usr/share/phpmyadmin /usr/share/nginx/html
# Basic packages installation
echo -e "\n\n--- Install base packages ---\n\n"
apt-get -y install curl git -y
# Composer
echo -e "\n\n--- Install Composer ---\n\n"
curl -sS https://getcomposer.org/installer | php
sudo mv composer.phar /usr/local/bin/composer
# Restart servers
echo -e "\n\n--- Restart servers ---\n\n"
service nginx restart
service php7.1-fpm restart
| true
|
30b3dbcca47182cfa7414ab8e45399408d1658fb
|
Shell
|
killangell/micros
|
/user/partition/library/partition/user_partition_db_opt.sh
|
UTF-8
| 1,144
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
source db_all_opt.sh
#@in 1: Partition name
#@in 2: Partition size
#@in 3: Partition location
#@in 4: Partition filesystem
function user_partition_set_db_info()
{
local name=$1
local size="$2"
local loca="$3"
local fs_type="$4"
local mount_point="null"
db_get_partition_mount_point $name mount_point
#echo $FUNCNAME--$name,$size,$loca,$fs_type,$mount_point
db_set_partition_full_info $name $size $loca $fs_type $mount_point
return $TRUE
}
#@out 1: ISO device index
function user_partition_get_isodev_index()
{
local dest_drive="null"
local disk_count=0
local lvm_count=0
local isodev_num=0
db_get_partition_count_by_flag "disk" disk_count
db_get_partition_count_by_flag "lvm" lvm_count
if [ $lvm_count -eq 0 ];then
lvm_count=0
else
lvm_count=1
fi
let isodev_num=$disk_count+$lvm_count+1
eval $1=$isodev_num
return $TRUE
}
#@out 1: ISO device
function user_partition_get_isodev()
{
local dest_drive="null"
local isodev_index=0
db_get_sysinfo_dest_drive dest_drive
user_partition_get_isodev_index isodev_index
isodev="$dest_drive$isodev_index"
eval $1="$isodev"
return $TRUE
}
| true
|
16f24a6256479cdc812581af5f22571c62d41cde
|
Shell
|
ctmcisco/pnp-starterkit-setup
|
/_setup-hr.sh
|
UTF-8
| 966
| 2.96875
| 3
|
[] |
no_license
|
siteUrl=$hrUrl
alias=$(echo $prefix)hr
msg "Provisioning HR site at $siteUrl..."
site=$(o365 spo site get --url $siteUrl --output json || true)
if $(isError "$site"); then
o365 spo site add --type TeamSite --url $siteUrl --title "Human Resources" --alias $alias >/dev/null
success 'DONE'
else
warning 'EXISTS'
fi
sub '- Connecting to the hub site...'
o365 spo hubsite connect --url $siteUrl --hubSiteId $hubsiteId
success 'DONE'
sub '- Applying theme...'
o365 spo theme apply --name "$company HR" --webUrl $siteUrl >/dev/null
success 'DONE'
setupCollabExtensions $siteUrl
sub '- Setting logo...'
groupId=$(o365 graph o365group list --mailNickname $alias -o json | jq -r '.[] | select(.mailNickname == "'"$alias"'") | .id')
if [ -z "$groupId" ]; then
error 'ERROR'
error "Office 365 Group '$alias' not found"
exit 1
fi
o365 graph o365group set --id $groupId --logoPath ./resources/images/logo_hr.png
success 'DONE'
success 'DONE'
echo
checkPoint=400
| true
|
c122ad4443cdde728b7fd8e763440c48432eaee2
|
Shell
|
awfeequdng/Final_Project_6_175
|
/scemi/sim/run_asm.sh
|
UTF-8
| 923
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage ./run_asm.sh <proc name>"
exit
fi
simdut=${1}_dut
asm_tests=(
simple
add addi
and andi
auipc
beq bge bgeu blt bltu bne
j jal jalr
lw
lui
or ori
sw
sll slli
slt slti
sra srai
srl srli
sub
xor xori
bpred_bht bpred_j bpred_j_noloop bpred_ras
cache cache_conflict stq
)
vmh_dir=../../programs/build/assembly/vmh
log_dir=logs
wait_time=3
# create bsim log dir
mkdir -p ${log_dir}
# kill previous bsim if any
pkill bluetcl
# run each test
for test_name in ${asm_tests[@]}; do
# copy vmh file
mem_file=${vmh_dir}/${test_name}.riscv.vmh
if [ ! -f $mem_file ]; then
echo "ERROR: $mem_file does not exit, you need to first compile"
exit
fi
cp ${mem_file} mem.vmh
# run test
./${simdut} > ${log_dir}/${test_name}.log & # run bsim, redirect outputs to log
sleep ${wait_time} # wait for bsim to setup
./tb $mem_file # run test bench
echo ""
done
| true
|
ab7210160db2a56a697427cd5b84d6e66e7e268a
|
Shell
|
diegoximenes/dotfiles
|
/config/polybar/scripts/network.sh
|
UTF-8
| 392
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
default_route="$(ip route | awk '/^default/' | awk '{print $5}')"
if [[ "$default_route" =~ ^en(.+)$ ]]; then
# ethernet
echo "🖥 $default_route"
else
# wlan or wwan
wifi=$(nmcli -g IN-USE,SSID dev wifi | grep "^\*")
if [[ "$wifi" =~ ^\*:(.+)$ ]]; then
ssid="${BASH_REMATCH[1]}"
echo "🛰 $ssid"
else
echo "🛰 down"
fi
fi
| true
|
7bae4ce697fcb64a06f18cb05e09c4c33cd75bc9
|
Shell
|
petronny/aur3-mirror
|
/telepathy-kde-nepomuk-service-git/PKGBUILD
|
UTF-8
| 1,142
| 2.765625
| 3
|
[] |
no_license
|
# Maintainer: George Brooke <george+arch.aur@george-brooke.co.uk>
# Contributor: Andrea Scarpino <andrea@archlinux.org>
pkgname=telepathy-kde-nepomuk-service-git
_gitname="ktp-nepomuk-service"
pkgver=e1be620
pkgrel=1
epoch=1
pkgdesc="Nepomuk integration service for Telepathy"
arch=('i686' 'x86_64')
url="https://projects.kde.org/projects/playground/network/telepathy/ktp-nepomuk-service"
license=('GPL')
depends=('kdebase-runtime' 'telepathy-qt')
makedepends=('cmake' 'automoc4' 'git')
conflicts=('telepathy-nepomuk-service-git')
source="git://anongit.kde.org/ktp-nepomuk-service.git"
md5sums=('SKIP')
pkgver() {
cd $_gitname
git describe --always | sed 's|-|.|g'
#git describe --always | sed 's|-|.|g;s|v||'
}
build() {
cd ${srcdir}
msg "Starting make..."
rm -rf ${srcdir}/build
mkdir ${srcdir}/build
cd ${srcdir}/build
cmake ../$_gitname \
-DCMAKE_INSTALL_PREFIX=/usr \
-DQT_QMAKE_EXECUTABLE=/usr/bin/qmake-qt4 \
-DQT_MOC_EXECUTABLE=/usr/bin/moc-qt4 \
-DQT_RCC_EXECUTABLE=/usr/bin/rcc-qt4 \
-DQT_UIC_EXECUTABLE=/usr/bin/uic-qt4 \
-DCMAKE_POSITION_INDEPENDENT_CODE=on
make
}
package() {
cd ${srcdir}/build
make DESTDIR=${pkgdir} install
}
| true
|
2a5893d27cea62a777011d9d89da9039069fc707
|
Shell
|
stuartcampbell/lightsource2-recipes
|
/recipes-tag/epics-base/build.sh
|
UTF-8
| 1,081
| 3.171875
| 3
|
[] |
permissive
|
#!/bin/bash
install -d $PREFIX/bin
install -d $PREFIX/lib
install -d $PREFIX/epics
make -j$(getconf _NPROCESSORS_ONLN)
EPICS_BASE=$PREFIX/epics
EPICS_HOST_ARCH=$(startup/EpicsHostArch)
# Copy libraries into $PREFIX/lib
cp -av $PREFIX/epics/lib/$EPICS_HOST_ARCH/lib*so* $PREFIX/lib 2>/dev/null || : # linux
cp -av $PREFIX/epics/lib/$EPICS_HOST_ARCH/lib*dylib* $PREFIX/lib 2>/dev/null || : # osx
# Setup symlinks for utilities
BINS="caget caput camonitor softIoc caRepeater cainfo"
cd $PREFIX/bin
for file in $BINS ; do
ln -s ../epics/bin/$EPICS_HOST_ARCH/$file .
done
# deal with env export
mkdir -p $PREFIX/etc/conda/activate.d
mkdir -p $PREFIX/etc/conda/deactivate.d
ACTIVATE=$PREFIX/etc/conda/activate.d/epics_base.sh
DEACTIVATE=$PREFIX/etc/conda/deactivate.d/epics_base.sh
ETC=$PREFIX/etc
# set up
echo "export EPICS_BASE="$EPICS_BASE >> $ACTIVATE
echo "export EPICS_HOST_ARCH="$EPICS_HOST_ARCH >> $ACTIVATE
# tear down
echo "unset EPICS_BASE" >> $DEACTIVATE
echo "unset EPICS_HOST_ARCH" >> $DEACTIVATE
# clean up after self
unset ACTIVATE
unset DEACTIVATE
unset ETC
| true
|
af73a6e2363e31f048234920e21d7a4f1f6a86f7
|
Shell
|
polinar68/buildaix-polly
|
/opt/buildaix/templates/fileset.ext.unpre_i
|
UTF-8
| 910
| 3.0625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-rsa-md4",
"HPND-sell-variant",
"LicenseRef-scancode-zeusbench",
"NTP",
"metamail",
"Beerware",
"LicenseRef-scancode-rsa-1990",
"RSA-MD",
"Spencer-94",
"LicenseRef-scancode-other-permissive",
"MIT"
] |
permissive
|
#!/usr/bin/ksh
# $Id: fileset.ext.unpre_i 264 2017-10-05 09:12:05Z michael $
# main purpose of this script is to undo anything it might have done
# in a pre_i/pre_u script. Ideally, it has not done anything.
# as in the other un* files, if unconfig_d exists this will not be used
# during installp -u operations
# so the focus should be on verifying that the system is 'clean'
# If the system is not clean - MAYBE - installp should stop and not uninstall
# other things.
# This is the moment to files saved before a failed install or a normal
uninstall started.
# on success exit with 0 status, non-zero will abort the install process
[[ ! -z ${VERBOSE} ]] && \
print -- "+- `pwd` - $0 -+"
# The line below is as wide as the lines printed during normal installp installs
# it is provided as a reference
# print -- "+-----------------------------------------------------------------------------+"
exit 0
| true
|
e4a9322e6b42152a9de23bdb09001a4d6155b5b0
|
Shell
|
mbolivar/bootrom-tools
|
/test/test-tftf
|
UTF-8
| 2,617
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------------------
# Copyright (c) 2014-2015 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#------------------------------------------------------------------------------
#
# Simple (developer) test frame for exercising create-tftf and display-tftf
#
# The details of create-tftf are covered in "ES3 Bridge ASIC Boot ROM High
# Level Design"
#
# Usage:
# test-tftf
#
# (To use, add/remove options to exercise different aspects of the
# applications.)
#
# make our scratch folder
if [ ! -d ./build ]
then
mkdir ./build
fi
echo ------------------------------------
echo test create-tftf...
echo ------------------------------------
../scripts/create-tftf \
-v \
--code code1.txt \
--name "twas twillig and the slithy toves did gyre and gimbal in the wabe" \
--data data1.txt \
--code code2.txt --offset 0x10007000 \
--load 0x10000000 \
--start 0x10000200 \
--out build/foo.tftf
echo
echo ------------------------------------
echo test display-tftf...
echo "(The output should match the above)"
echo ------------------------------------
../scripts/display-tftf build/foo.tftf
| true
|
acd7ed42e3ef37cc788628e2e60807e8e34ca150
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/binkd/PKGBUILD
|
UTF-8
| 2,045
| 2.671875
| 3
|
[] |
no_license
|
# Contributor: Lex Rivera aka x-demon <aur@x-demon.org>
# Contributor: Mantas Mikulėnas <grawity@gmail.com>
pkgname=binkd
pkgver=1.0.4
_pkgcommit=c335d3e86e3caea04072dde0968d08cb31d6a1e0
pkgrel=1
pkgdesc="Binkley protocol daemon for transferring files between Fidonet systems"
arch=('i686' 'x86_64')
url="https://github.com/pgul/binkd"
license=('GPL')
backup=("etc/binkd/binkd.conf")
source=("git+https://github.com/pgul/binkd.git#commit=$_pkgcommit"
"binkd.service"
"binkd@.service"
"binkd.socket"
"binkd.tmpfiles")
install="binkd.install"
sha256sums=('SKIP'
'3f2ddf00b1552ad90a7320c7d904afab13fb2de525568190c80c7d87f67cc0c8'
'2ebaebb7b525f9eaa1915dfeabba1626422d300f9820981225509203e6dcbc59'
'2ddcb26a54f7a0f9a8ab5d8819431fb1f2bd961169c6fe5e7afa7f4c89e11786'
'5032916082884a938978f0d5168fd053baab230bd34e84008ae637515e04a685')
pkgver() {
cd "$srcdir"
git describe --tags | sed 's/^binkd-//; s/-/.r/; s/[-_]/./g'
}
build() {
cd "$srcdir"
cp mkfls/unix/{Makefile*,configure*,install-sh,mkinstalldirs} .
./configure \
--prefix=/usr \
--sbindir=/usr/bin \
--mandir=/usr/share/man \
--sysconfdir=/etc \
--with-debug \
--with-zlib \
;
make
}
package() {
cd "$srcdir"
make DESTDIR="$pkgdir" install
mv "$pkgdir/usr/sbin" "$pkgdir/usr/bin"
ln -sf "binkd-$pkgver" "$pkgdir/usr/bin/binkd"
install -dm0755 "$pkgdir/etc/binkd"
mv "$pkgdir/etc/binkd.conf-dist" "$pkgdir/etc/binkd/binkd.conf"
for dir in inbound{,-temp,-unsecure} outbound/fidonet longbox personalboxes nodelist; do
mkdir -p "$pkgdir/var/spool/ftn/$dir"
done
cd "$srcdir"
install -Dm0644 binkd.service "$pkgdir/usr/lib/systemd/system/binkd.service"
install -Dm0644 binkd@.service "$pkgdir/usr/lib/systemd/system/binkd@.service"
install -Dm0644 binkd.socket "$pkgdir/usr/lib/systemd/system/binkd.socket"
install -Dm0644 binkd.tmpfiles "$pkgdir/usr/lib/tmpfiles.d/binkd.conf"
}
# vim: ts=2:sw=2:et
| true
|
ee2a163fb17fc80614bb7c3b93581c9743998bec
|
Shell
|
metrue/fx
|
/scripts/test_cli.sh
|
UTF-8
| 708
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
fx="./build/fx"
service='fx-service'
run() {
local lang=$1
local port=$2
# localhost
$fx up --name ${service}_${lang} --port ${port} --healthcheck test/functions/func.${lang}
$fx list
$fx down ${service}_${lang}
}
build_image() {
local lang=$1
local name=$2
$fx image build -n ${name} test/functions/func.${lang}
}
export_image() {
local lang=$1
local dir=$2
$fx image export -o ${dir} test/functions/func.${lang}
}
# main
port=20000
for lang in ${1}; do
run $lang $port
((port++))
build_image $lang "test-fx-image-build-${lang}"
mkdir -p /tmp/${lang}/images
export_image ${lang} /tmp/${lang}/images
rm -rf /tmp/${lang}/images
done
wait
| true
|
0279a841880b818bf4799459c3613346e71c9355
|
Shell
|
huzhe007/shell
|
/docker/mysql_5_7.sh
|
UTF-8
| 672
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
# mysql -h 127.0.0.1 -P 3306 -u root -p
docker run --restart=always -itd --name mysql5.7 \
-p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 \
-e MYSQL_USER=frank -e MYSQL_PASSWORD=123456 \
-v ~/volumes/mysql:/var/lib/mysql mysql:5.7.33 \
--character-set-server=utf8mb4 --collation-server=utf8mb4_unicode_ci
# 修改frank远程登入权限
# 1.查找CONTAINER ID # docker ps
# 2.使用CONTAINER ID进入容器命令 # docker exec -it mysql5.7 /bin/bash
# 3.登录mysql # mysql -u root -p
# 查看MySQL版本号 # select version();
# 4.修改远程连接权限 # alter user 'root'@'%' identified with mysql_native_password by '123456';
| true
|
8887fe786666341df695420511d49f463b0b9188
|
Shell
|
cddknight/dialsys
|
/gauge/buildpkg.sh.in
|
UTF-8
| 1,241
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
source /etc/os-release
if [ "$ID" == "debian" ] || [ "$ID" == "ubuntu" ] || [ "$ID" == "raspbian" ]
then
ORIGPWD=$PWD
rm -f @PACKAGE@-@VERSION@.tar.bz2
make dist-bzip2
cd ..
rm -rf @PACKAGE@*@VERSION@*
tar xjf $ORIGPWD/@PACKAGE@-@VERSION@.tar.bz2
cd @PACKAGE@-@VERSION@
cp -r $ORIGPWD/debian .
dpkg-buildpackage
elif [ "$ID" == "fedora" ] || [ "$ID" == "centos" ] || [ "$ID" == "rhel" ] || [ "$ID" == "suse" ]
then
if [ -d $HOME/gitroot/rpmbuild ]
then
DIR=$HOME/gitroot/rpmbuild
elif [ -d $HOME/trunk/source/rpmbuild ]
then
DIR=$HOME/trunk/source/rpmbuild
elif [ -d $HOME/source/rpmbuild ]
then
DIR=$HOME/source/rpmbuild
else
DIR=$(find $HOME -name rpmbuild 2> /dev/null)
fi
if [ ! -e $HOME/.rpmmacros ]
then
echo "%_topdir $DIR" > $HOME/.rpmmacros
echo "%dist .$ID" >> $HOME/.rpmmacros
fi
rm -f @PACKAGE@-@VERSION@.tar.bz2
make dist-bzip2
cp @PACKAGE@.spec $DIR/SPECS/
cp @PACKAGE@-@VERSION@.tar.bz2 $DIR/SOURCES/
cd $DIR/SPECS
rpmbuild -ba -v @PACKAGE@.spec
rpmsign --addsign $DIR/RPMS/$(uname -m)/@PACKAGE@-@VERSION@-@REVISION@.*.$(uname -m).rpm
rpmsign --addsign $DIR/SRPMS/@PACKAGE@-@VERSION@-@REVISION@.*.src.rpm
else
echo "Unknown distribution."
exit 1
fi
exit 0
| true
|
e23936f0991482e6e345c2c7d013577f511be261
|
Shell
|
bwiessner/install_latest_slack_osx_app
|
/install_latest_slack_osx_app.sh
|
UTF-8
| 2,182
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash -v
#if you want slack to quit - in use with a jamf notifcation policy unhash next line
#pkill Slack*
#gets current logged in user
consoleuser=$(ls -l /dev/console | cut -d " " -f4)
APP_NAME="Slack.app"
APP_PATH="/Applications/$APP_NAME"
APP_VERSION_KEY="CFBundleShortVersionString"
DOWNLOAD_URL="https://slack.com/ssb/download-osx"
finalDownloadUrl=$(curl "$DOWNLOAD_URL" -s -L -I -o /dev/null -w '%{url_effective}')
dmgName=$(printf "%s" "${finalDownloadUrl[@]}" | sed 's@.*/@@')
slackDmgPath="/tmp/$dmgName"
################################
#find new version of Slack
currentSlackVersion=$(/usr/bin/curl -s 'https://slack.com/api/desktop.releases.update?platform=darwin&os_version=10.13.5&app_version=0&channel=prod' | grep -o "[0-9]\.[0-9]\.[0-9]" | tail -1)
if [ -d "$APP_PATH" ]; then
localSlackVersion=$(defaults read "$APP_PATH/Contents/Info.plist" "$APP_VERSION_KEY")
if [ "$currentSlackVersion" = "$localSlackVersion" ]; then
printf "Slack is already up-to-date. Version: %s" "$localSlackVersion"
exit 0
fi
fi
#find if slack is running
if pgrep '[S]lack'; then
printf "Error: Slack is currently running!\n"
exit 409
else
# Remove the existing Application
rm -rf /Applications/Slack.app
#downloads latest version of Slack
curl -L -o "$slackDmgPath" "$finalDownloadUrl"
#mount the .dmg
hdiutil attach -nobrowse $slackDmgPath
#Copy the update app into applications folder
sudo cp -R /Volumes/Slack*/Slack.app /Applications
#unmount and eject dmg
mountName=$(diskutil list | grep Slack | awk '{ print $3 }')
umount -f /Volumes/Slack*/
diskutil eject $mountName
#clean up /tmp download
rm -rf "$slackDmgPath"
# Slack permissions are really dumb
chown -R $consoleuser:admin "/Applications/Slack.app"
localSlackVersion=$(defaults read "$APP_PATH/Contents/Info.plist" "$APP_VERSION_KEY")
if [ "$currentSlackVersion" = "$localSlackVersion" ]; then
printf "Slack is now updated/installed. Version: %s" "$localSlackVersion"
fi
fi
#slack will relaunch if it was previously running
if [ "$slackOn" == "" ] ; then
exit 0
else
su - "${consoleuser}" -c 'open -a /Applications/Slack.app'
fi
exit 0
fi
| true
|
21072b93a927341a873805982d570ccfd8650fa9
|
Shell
|
slaufer/dotfiles
|
/home/.bash/prompt/80s-neon.sh
|
UTF-8
| 1,473
| 3.71875
| 4
|
[] |
no_license
|
# color palette
C0='\[\e[38;5;206m\]'
C1='\[\e[38;5;51m\]'
C2='\[\e[38;5;141m\]'
COK=$C0
#COK='\[\e[38;5;10m\]'
CFAIL='\[\e[38;5;196m\]'
# separator
SEP=' '$C0'\\'$C2'\\'$C1'\\ '
function prompt_path_color {
local OUT=$1
local OUT=${OUT//~/$C0~$C1}
local OUT=${OUT//\//$C2\/$C1}
echo $C1$OUT
}
function prompt_path {
local SPWD=${PWD/$HOME/\~}
if [ ${#SPWD} -le $1 ]; then
echo `prompt_path_color "$SPWD"`
else
echo $C2'...'`prompt_path_color "${SPWD: -$(($1 - 3))}"`
fi
}
function prompt_color {
# first get some information
local EXIT="$?"
local COLS=`tput cols`
# reset prompt
PS1=""
# title
PS1+='\[\033]0;\u@\h:\w\a\]'
# clock
PS1+=`date +$C1'%I'$C2':'$C1'%M'$C2':'$C1'%S '$C2'%P'`
# directory
PS1+=$SEP
local DWIDTH=16 # if you customize the prompt, you'll have to adjust this
PS1+=`prompt_path $(($COLS - $DWIDTH))`
# user@host
PS1+='\n'$C1'\u'$C2'@'$C1'\h'
# return code
PS1+=$SEP
if [ $EXIT -eq 0 ]; then
PS1+=$C2'('$COK$EXIT$C2') '
else
PS1+=$C2'('$CFAIL$EXIT$C2') '
fi
# prompt
PS1+=$C1'\$\[\e[m\] '
}
# super basic prompt for older terminals
function prompt_bw {
local EXIT=$?
# double evals mean we need to write like a billion slashes here
PS1=`date +"%I:%M:%S %P"`' \\\\\\\\\\\\ \w\n\u@\h \\\\\\\\\\\\ ('$EXIT') \$ '
}
# only activate the fancy prompt if this is a 256-color terminal
if [ `tput colors` = "256" ]; then
export PROMPT_COMMAND=prompt_color
else
export PROMPT_COMMAND=prompt_bw
fi
| true
|
aac34373b3a8f9bfb690e8d2c4a187ac1fc82aa8
|
Shell
|
tangshengwei/the-first-shell-program
|
/monitor.sh
|
UTF-8
| 697
| 3.421875
| 3
|
[] |
no_license
|
#########################################################################
# File Name: monitor.sh
# Author: kangrui
# mail: 33750335@qq.com
# Created Time: Sat 28 Nov 2015 01:50:52 PM EST
#########################################################################
#!/bin/bash
resettem=$(tput sgr0)
declare -A ssharray
i=0
number=""
for script_file in `ls -I "monitor.sh" ./`
do
echo -e "\e[1;35m" "the script:" ${i} '===>' ${resettem} ${script_file}
ssharray[$i]=${script_file}
number="${number} | ${i}"
i=$((i+1))
done
echo ${number}
while true
do
read -p "please input a number [ ${number} ]:" execshell
if [[ ! ${execshell} =~ ^[0-9]+ ]];then
exit 0
fi
/bin/sh ./${ssharray[$execshell]}
done
| true
|
86b78c6e5af7230933596ac7fc13936dc775db91
|
Shell
|
butterfy76/OOGIS_CLOUD
|
/com2-ipdd.sh
|
UTF-8
| 1,286
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash -ex
source config.cfg
source functions.sh
echocolor "Enable the OpenStack Mitaka repository"
sleep 5
apt-get install software-properties-common -y
add-apt-repository cloud-archive:mitaka -y
sleep 5
echocolor "Upgrade the packages for server"
apt-get -y update && apt-get -y upgrade && apt-get -y dist-upgrade
echocolor "Configuring hostname for COMPUTE2 node"
sleep 3
echo "$HOST_COM2" > /etc/hostname
hostname -F /etc/hostname
iphost=/etc/hosts
test -f $iphost.orig || cp $iphost $iphost.orig
rm $iphost
touch $iphost
cat << EOF >> $iphost
127.0.0.1 localhost $HOST_COM2
$CTL_MGNT_IP $HOST_CTL
$COM2_MGNT_IP $HOST_COM2
EOF
sleep 3
echocolor "Config network for Compute1 node"
ifaces=/etc/network/interfaces
test -f $ifaces.orig || cp $ifaces $ifaces.orig
rm $ifaces
touch $ifaces
cat << EOF >> $ifaces
#Dat IP cho $COM2_MGNT_IP node
# LOOPBACK NET
auto lo
iface lo inet loopback
# MGNT NETWORK
auto eth0
iface eth0 inet static
address $COM2_MGNT_IP
netmask $NETMASK_ADD_MGNT
# EXT NETWORK
auto eth1
iface eth1 inet static
address $COM2_EXT_IP
netmask $NETMASK_ADD_EXT
gateway $GATEWAY_IP_EXT
dns-nameservers 202.30.55.11
EOF
sleep 5
echocolor "Rebooting machine ..."
init 6
#
| true
|
407ffb1817f9f2394a9e93170fb276f362bb8cf9
|
Shell
|
mcthoren/gps_tinkering
|
/plot_month_avg
|
UTF-8
| 1,247
| 3.15625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
HOME="/import/home/ghz"
GPSP="$HOME/projects/gps"
GPS_MA="$GPSP/data/gps_alt_monthly_averages"
GPS_STS="$GPSP/data/gps_alt_stats"
PAT0="^2([0-9]{13})\tavg alt: [0-9]{2,3}.[0-9] m\tavg evp: [0-9]{1,3}.[0-9] m$"
GPS_ALT_TEMP_0="$(mktemp /tmp/gps_alt_0.XXXXXXXXXXXXX)"
GPS_ALT_TEMP_1="$(mktemp /tmp/gps_alt_1.XXXXXXXXXXXXX)"
GPS_ALT_TEMP_HIST="$(mktemp /tmp/gps_alt_h.XXXXXXXXXXXXX)"
cat $GPSP/data/*/gps_alt.day.avg.* > "$GPS_ALT_TEMP_0"
gnuplot -e "ALTF='$GPS_ALT_TEMP_0';ALTF_MA='$GPS_MA'" "$GPSP/gps.gnuplot"
cat $GPSP/data/*/gps_alt.dat.* | grep -aP "$PAT0" > "$GPS_ALT_TEMP_1"
gnuplot -e "BOXF='$GPS_ALT_TEMP_1'; OUT_DIR='$GPSP/plots'; COL=4;" "$GPSP/boxplot.gnuplot" 2> "$GPS_STS"
REC="$(grep 'Records:' $GPS_STS | awk '{print $2}')"
MEAN="$(grep 'Mean:' $GPS_STS | awk '{printf("%.2f\n",$2)}')"
MED="$(grep 'Median:' $GPS_STS | awk '{printf("%.2f\n",$2)}')"
TS="$(date -u "+%F %T%Z")"
sed "s/AAAAA/${REC}/; s/MMMMM/${MEAN}/; s/DDDDD/${MED}/; s/TTTTT/${TS}/" "$GPSP/gps_el.html.plate" > "$GPSP/gps_el.html"
$GPSP/alt_hist_gen "$GPS_ALT_TEMP_1" > "$GPS_ALT_TEMP_HIST"
gnuplot -e "ALT_HIST='$GPS_ALT_TEMP_HIST'; OUT_DIR='$GPSP/plots';" "$GPSP/alt_hist.gnuplot"
rm "${GPS_ALT_TEMP_0}" "${GPS_ALT_TEMP_1}" "${GPS_ALT_TEMP_HIST}"
| true
|
a28cb34af4ead98aee0b35cdcdbff4399bed9eeb
|
Shell
|
trdtnguyen/benchmarks
|
/linkbench-mongo/benchmark_scripts/migration.sh
|
UTF-8
| 779
| 2.65625
| 3
|
[] |
no_license
|
# !/bin/sh
#call this after finish insert data
#seperate data dir and journal dir and the others in seperate device
SOURCE_DIR=/ssd1
DEST_DIR=/hdd1
DATA_DIR=/ssd1
INDEX_DIR=/ssd2
JOURNAL_DIR=/ssd3
echo "move data from ${SOURCE_DIR} to ${DATA_DIR}, journal from ${SOURCE_DIR} to ${JOURNAL_DIR}, run dir is ${DEST_DIR}"
rm -rf ${DATA_DIR}/collection
rm -rf ${INDEX_DIR}/index
rm -rf ${JOURNAL_DIR}/journal
rm -rf ${DEST_DIR}/data
mv ${SOURCE_DIR}/data/db/ycsb/collection ${DATA_DIR}/
mv ${SOURCE_DIR}/data/db/ycsb/index ${INDEX_DIR}/
mv ${SOURCE_DIR}/data/db/journal ${JOURNAL_DIR}/
mv ${SOURCE_DIR}/data/ ${DEST_DIR}/
ln -s ${DATA_DIR}/collection ${DEST_DIR}/data/db/ycsb/
ln -s ${INDEX_DIR}/index ${DEST_DIR}/data/db/ycsb/
ln -s ${JOURNAL_DIR}/journal ${DEST_DIR}/data/db/
| true
|
0f241d2313c26323f53708f9f3a8b9adf2dc99ed
|
Shell
|
XPhilipp/Scripts
|
/ipcam_mqtt_listner
|
UTF-8
| 1,048
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Simple Comand Plugin for MQTT OpenIPC mod
# Author Philipp@inbox.ru 2021
# Licence MIT
# based on https://github.com/pkoevesdi/MQTT-Logger
IPC=/mnt/mtd/ipcam.conf
mpipe=/tmp/mqtt_pipe
pidfile=/tmp/mqtt_sub_pidfile
#check is mosquitto_sub runs, some clean ups
kill -9 $(cat $pidfile) 2>/dev/null
rm -f $mpipe $pidfile
if [ -f ${IPC} ]; then
while read settings
do local ${settings}
done < ${IPC}
#
# Is Enabled MQTT
if [ ${mqtt_enable} = 1 ]; then
# create fifo file
([ ! -p "$mpipe" ]) && mkfifo $mpipe
# subscribe mosquitto toppic
(mosquitto_sub -h ${mqtt_server} -p ${mqtt_port} -u ${mqtt_login} -P ${mqtt_password} -t "${mqtt_login}/${device_name}/cmds" 1>$mpipe 2>/dev/null) &
# store mosquitto_sub pid in file
echo "$!" > $pidfile
# read subscribed data
echo read subscribed data
while read message <$mpipe
do
# echo readed command for debug purpose
echo "$message"
# process commands
case "$message" in
("snap")
/bin/ipcam_mqtt
;;
("reboot")
/sbin/reboot
;;
esac
done
fi
fi
| true
|
c9644998036c91d312a7fdfa6e9e774cd500cb69
|
Shell
|
daviwesley/easy-chamadas
|
/project.sh
|
UTF-8
| 231
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$1" = "lines" ] ; then
echo "Python lines:"
find api -name '*.py' | xargs wc -l
echo
echo "Javascript lines:"
find app -name '*.js' | xargs wc -l
echo
else
echo "Command not found: $1"
fi
| true
|
1dfcb7c510e5ec50de424d32407dc5e63f277cd2
|
Shell
|
tayjaybabee/shell_collection
|
/installers/Anaconda/install_anaconda.sh
|
UTF-8
| 834
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
####~~~~####~~~~####~~~~####~~~~####~~~~####
####~~~~#### Anaconda Installer ####~~~~####
####~~~~# Created: 12/8/2020 7:34AM #~~~~####
####~~~~# Author: Taylor Blackstone #~~~~####
# Install prerequisites.
sudo apt-get install -y libgl1-mesa-glx libegl1-mesa libxrandr2 libxrandr2 libxss1 libxcursor1 libxcomposite1 libasound2 libxi6 libxtst6
# Specify where we want to download the installer to
DOWNLOAD_DIR="/home/$USER/.tmp/anaconda/installer"
# If the specified download directory doesn't exist, we make it, and it's parents
[ ! -d $DOWNLOAD_DIR ] && mkdir -p $DOWNLOAD_DIR
# Navigate to the download dir
cd $DOWNLOAD_DIR
# Use wget to download Anaconda
wget https://repo.anaconda.com/archive/Anaconda3-2020.11-Linux-x86_64.sh
# Execute the installer's shell script.
bash $DOWNLOAD_DIR/Anaconda3-2020.11-Linux-x86_64.sh
| true
|
323ec0ca2e3a80283a3f2fd91d2febe1d9859b38
|
Shell
|
tuenti/sensu-trapd
|
/convertmibs.sh
|
UTF-8
| 330
| 3.3125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
CHEF_REPO=~/code/cloudant/chef
INPUT_DIR=$CHEF_REPO/cookbooks/snmp/files/default
OUTPUT_DIR=conf/mibs/
for FILE in $(ls $INPUT_DIR/CLOUDANT-*-MIB.txt); do
INPUT=$FILE
OUTPUT=$(echo $OUTPUT_DIR/$(basename $FILE) | sed -e 's/.txt$/.py/')
echo "Converting: $INPUT"
build-pysnmp-mib -o $OUTPUT $INPUT
done
| true
|
8de08d2de05256444614596e3ac1edb5f75ac294
|
Shell
|
xjinGitty/openQA
|
/tools/setup.sh
|
UTF-8
| 785
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
dst="$1"
if [ -z "$dst" ]; then
echo "please specify where you want to run openqa from" >&2
exit 1
fi
run()
{
set -- "$@"
echo " running $@"
"$@"
}
echo "1. setting up directory structure ..."
for i in perl testresults pool/1 factory/iso backlog; do
run mkdir -p "$dst/$i"
done
run ln -s $PWD/tools $dst/tools
cat <<EOF
2. please run:
sudo tools/initdb
sudo mkdir /usr/share/openqa
sudo ln -s $PWD/www/* /usr/share/openqa
3. copy and adjust etc/apache2 to /etc/apache2
4. setup os-autoinst
git clone git://gitorious.org/os-autoinst/os-autoinst.git $dst/perl/autoinst
git clone git@github.com:openSUSE-Team/os-autoinst-needles.git
ln -s os-autoinst-needles/distri/opensuse $dst/perl/autoinst/distri/opensuse/needles
sudo chown -R wwwrun os-autoinst-needles
EOF
| true
|
88c33c69b32deb5257aaf9bd00af57c6a17873dd
|
Shell
|
virtru/gateway-install-script
|
/deploy-gateway-v2.2.sh
|
UTF-8
| 20,094
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
EntryPoint()
{
# Default Variables
blank=""
#gwNameDefault="oe"
gwVersionDefault="2.2.18"
gwPortDefault="9001"
gwModeDefault="encrypt-everything"
gwTopologyDefault="outbound"
gwCksDefault="2"
gwInboundRelayDefault=""
gwFqdnDefault="gw.example.com"
gwDomainDefault="example.com"
gwDkimSelectorDefault="gw"
gwOutboundRelayDefault=""
gwAmplitudeTokenDefault="0000000000"
gwHmacNameDefault="0000000000"
gwHmacSecretDefault="0000000000"
# Final Variables
gwName=""
gwVersion=""
gwPort=""
gwMode=""
gwTopology=""
gwInboundRelay=""
gwCks=""
gwCksKey=""
gwFqdn=""
gwDkimSelector=""
gwOutboundRelay=""
gwAmplitudeToken=""
gwHmacName=""
gwHmacSecret=""
# Working Variables
tlsPath=""
tlsKeyFile=""
tlsKeyFull=""
tlsPemFile=""
tlsPemFull=""
dkimPath=""
dkimPrivateFull=""
dkimPublicFull=""
scriptFile=""
# Actions
ShowLogo
GetGwVersion $gwVersionDefault
GetGwPort $gwPortDefault
GetGwMode $gwModeDefault
GetGwTopology $gwTopologyDefault
GetGwName
GetGwInboundRelay $gwInboundRelayDefault
GetGwCks $gwCksDefault
GetGwFqdn $gwFqdnDefault
GetGwDomain $gwDomainDefault
GetGwDkimSelector $gwDkimSelectorDefault
GetGwOutboundRelay $gwOutboundRelayDefault
GetGwAmplitudeToken $gwAmplitudeTokenDefault
GetGwHmacName $gwHmacNameDefault
GetGwHmacSecret $gwHmacSecretDefault
MakeTlsPathVariables
MakeDkimPathVariables
MakeDirectories
MakeTlsCert
MakeDkimCert
WriteEnv
WriteScript
WriteTestScripts
clear
ShowLogo
ShowNextSteps
}
## Functions
GetGwName() {
if [ $gwTopology = "outbound" ]
then
if [ $gwMode = "encrypt-everything" ]
then
gwName="oe-$gwPort"
fi
if [ $gwMode = "decrypt-everything" ]
then
gwName="od-$gwPort"
fi
if [ $gwMode = "dlp" ]
then
gwName="dlp-out-$gwPort"
fi
else
if [ $gwMode = "encrypt-everything" ]
then
gwName="ie-$gwPort"
fi
if [ $gwMode = "decrypt-everything" ]
then
gwName="id-$gwPort"
fi
if [ $gwMode = "dlp" ]
then
gwName="dlp-in-$gwPort"
fi
fi
}
GetGwVersion() {
local input=""
read -p "Gateway Version [$1]: " input
case "$input" in
$blank )
gwVersion=$1
;;
* )
gwVersion=$input
;;
esac
echo " "
}
GetGwPort() {
local input=""
read -p "Gateway Port [$1]: " input
case "$input" in
$blank )
gwPort=$1
;;
* )
gwPort=$input
;;
esac
echo " "
}
GetGwMode() {
local input=""
echo "Gateway Mode"
echo " Options"
echo " 1 - encrypt-everything"
echo " 2 - decrypt-everything"
echo " 3 - dlp"
echo " "
read -p "Enter 1-3 [$1]: " input
case "$input" in
$blank )
gwMode=$1
;;
1 )
gwMode="encrypt-everything"
;;
2 )
gwMode="decrypt-everything"
;;
3 )
gwMode="dlp"
;;
* )
gwMode=$1
;;
esac
echo " "
}
GetGwTopology() {
local input=""
echo "Gateway Topology"
echo " Options"
echo " 1 - inbound"
echo " 2 - outbound"
echo " "
read -p "Enter 1-2 [$1]: " input
case "$input" in
$blank )
gwTopology=$1
;;
1 )
gwTopology="inbound"
;;
2 )
gwTopology="outbound"
;;
* )
gwTopology=$1
;;
esac
echo " "
}
GetGwInboundRelay() {
local input=""
echo "Inbound Relay Addresses"
echo " Options"
echo " 1 - G Suite"
echo " 2 - O365"
echo " 3 - All"
echo " 4 - None"
read -p "Enter (1-4) [$1]: " input
case "$input" in
$blank )
gwInboundRelay=$1
;;
1 )
gwInboundRelay="GATEWAY_RELAY_ADDRESSES=35.190.247.0/24,64.233.160.0/19,66.102.0.0/20,66.249.80.0/20,72.14.192.0/18,74.125.0.0/16,108.177.8.0/21,173.194.0.0/16,209.85.128.0/17,216.58.192.0/19,216.239.32.0/19,172.217.0.0/19,172.217.32.0/20,172.217.128.0/19,172.217.160.0/20,172.217.192.0/19,108.177.96.0/19,35.191.0.0/16,130.211.0.0/22"
;;
2 )
gwInboundRelay="GATEWAY_RELAY_ADDRESSES=23.103.132.0/22,23.103.136.0/21,23.103.144.0/20,23.103.198.0/23,23.103.200.0/22,23.103.212.0/22,40.92.0.0/14,40.107.0.0/17,40.107.128.0/18,52.100.0.0/14,65.55.88.0/24,65.55.169.0/24,94.245.120.64/26,104.47.0.0/17,104.212.58.0/23,134.170.132.0/24,134.170.140.0/24,157.55.234.0/24,157.56.110.0/23,157.56.112.0/24,207.46.51.64/26,207.46.100.0/24,207.46.163.0/24,213.199.154.0/24,213.199.180.128/26,216.32.180.0/23"
;;
3 )
gwInboundRelay="GATEWAY_RELAY_ADDRESSES=0.0.0.0/0"
;;
4 )
gwInboundRelay="GATEWAY_RELAY_ADDRESSES="
;;
* )
gwInboundRelay="GATEWAY_RELAY_ADDRESSES=$1"
;;
esac
echo " "
}
GetGwCks() {
local input=""
echo "CKS Enabled"
echo " Options"
echo " 1 - Yes"
echo " 2 - No"
echo " "
read -p "Enter 1-2 [$1]: " input
case "$input" in
$blank )
gwCks="# GATEWAY_ENCRYPTION_KEY_PROVIDER=CKS"
gwCksKey="# GATEWAY_CKS_SESSION_KEY_EXPIRY_IN_MINS=360"
;;
1 )
gwCks="GATEWAY_ENCRYPTION_KEY_PROVIDER=CKS"
gwCksKey="GATEWAY_CKS_SESSION_KEY_EXPIRY_IN_MINS=360"
;;
2 )
gwCks="# GATEWAY_ENCRYPTION_KEY_PROVIDER=CKS"
gwCksKey="# GATEWAY_CKS_SESSION_KEY_EXPIRY_IN_MINS=360"
;;
* )
gwCks="# GATEWAY_ENCRYPTION_KEY_PROVIDER=CKS"
gwCksKey="# GATEWAY_CKS_SESSION_KEY_EXPIRY_IN_MINS=360"
;;
esac
echo " "
if [ $gwMode = "decrypt-everything" ]
then
gwCks="GATEWAY_ENCRYPTION_KEY_PROVIDER=CKS"
gwCksKey="GATEWAY_CKS_SESSION_KEY_EXPIRY_IN_MINS=360"
fi
}
GetGwFqdn() {
local input=""
read -p "Gateway FQDN [$1]: " input
case "$input" in
$blank )
gwFqdn=$1
;;
* )
gwFqdn=$input
;;
esac
echo " "
}
GetGwDomain() {
local input=""
read -p "Gateway Domain [$1]: " input
case "$input" in
$blank )
gwDomain=$1
;;
* )
gwDomain=$input
;;
esac
echo " "
}
GetGwOutboundRelay() {
local input=""
echo "Outbound Relay"
echo "\tBlank (Gateway performs final delivery)"
echo "\t[smtp-relay.example.com]:587 (Gateway sends all mail to relay for delivery)"
read -p "Enter Relay Address []: " input
case "$input" in
$blank )
gwOutboundRelay="# GATEWAY_TRANSPORT_MAPS=*=>$1"
;;
* )
gwOutboundRelay="GATEWAY_TRANSPORT_MAPS=*=>$input"
;;
esac
echo " "
}
GetGwDkimSelector() {
local input=""
read -p "Gateway DKIM Selector [$1]: " input
case "$input" in
$blank )
gwDkimSelector=$1
;;
* )
gwDkimSelector=$input
;;
esac
echo " "
}
GetGwAmplitudeToken() {
local input=""
read -p "Amplitude Token (Provided by Virtru) [$1]: " input
case "$input" in
$blank )
gwAmplitudeToken=$1
;;
* )
gwAmplitudeToken=$input
;;
esac
echo " "
}
GetGwHmacName() {
local input=""
read -p "HMAC Name (Provided by Virtru) [$1]: " input
case "$input" in
$blank )
gwHmacName=$1
;;
* )
gwHmacName=$input
;;
esac
echo " "
}
GetGwHmacSecret() {
local input=""
read -p "HMAC Secret (Provided by Virtru) [$1]: " input
case "$input" in
$blank )
gwHmacSecret=$1
;;
* )
gwHmacSecret=$input
;;
esac
echo " "
}
MakeTlsPathVariables() {
tlsPath="/var/virtru/vg/tls/$gwFqdn"
tlsKeyFile="client.key"
tlsKeyFull="$tlsPath/$tlsKeyFile"
tlsPemFile="client.pem"
tlsPemFull="$tlsPath/$tlsPemFile"
}
MakeDkimPathVariables() {
dkimPath="/var/virtru/vg/dkim"
dkimPrivateFull="$dkimPath/$gwDkimSelector"
dkimPrivateFull="$dkimPrivateFull._domainkey.$gwDomain.pem"
dkimPublicFull="$dkimPath/$gwDkimSelector._domainkey.$gwDomain-public.pem"
}
MakeDirectories(){
mkdir -p /var/virtru/vg/
mkdir -p /var/virtru/vg/env
mkdir -p /var/virtru/vg/scripts
mkdir -p /var/virtru/vg/tls
mkdir -p /var/virtru/vg/queue
mkdir -p /var/virtru/vg/queue/$gwName
mkdir -p /var/virtru/bg/test
mkdir -p $tlsPath
mkdir -p /var/virtru/vg/dkim
}
MakeTlsCert(){
## Make TLS Certs
openssl genrsa -out $tlsKeyFull 2048
openssl req -new -key $tlsKeyFull -x509 -subj /CN=$gwFqdn -days 3650 -out $tlsPemFull
}
MakeDkimCert(){
openssl genrsa -out $dkimPrivateFull 1024 -outform PEM
openssl rsa -in $dkimPrivateFull -out $dkimPublicFull -pubout -outform PEM
}
WriteTestScripts(){
testScript1=/var/virtru/vg/test/checkendpoints.sh
/bin/cat <<EOM >$testScript1
#!/bin/bash
echo https://google.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://google.com
echo ""
echo https://acm.virtru.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://acm.virtru.com
echo ""
echo https://events.virtru.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://events.virtru.com
echo ""
echo https://accounts.virtru.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://accounts.virtru.com
echo ""
echo https://secure.virtru.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://secure.virtru.com
echo ""
echo https://storage.virtru.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://storage.virtru.com
echo ""
echo https://encrypted-storage.virtru.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://encrypted-storage.virtru.com
echo ""
echo https://api.amplitude.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://api.amplitude.com
echo ""
echo https://cdn.virtru.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://cdn.virtru.com
echo ""
echo https://repo.maven.apache.org
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://repo.maven.apache.org
echo ""
echo https://hub.docker.com
curl --connect-timeout 10 -o /dev/null --silent --head --write-out '%{http_code}\n' https://hub.docker.com
echo ""
EOM
testScript2=/var/virtru/vg/test/runall.sh
/bin/cat <<EOM >$testScript2
#!/bin/bash
for container in \`docker ps -q\`; do
# show the name of the container
docker inspect --format='{{.Name}}' \$container;
# run the command (date in the case)
docker exec -it \$container \$1
done
EOM
testScript3=/var/virtru/vg/test/sendtestmessage.sh
/bin/cat <<EOM >$testScript3
#!/bin/bash
echo "Update Virtru Gateway ENV file to include the lan ip of the Gateway."
echo "Use the lan IP and not the loopback (127.0.0.1)"
read -p "SMTP Server: " server
read -p "SMTP Port: " port
read -p "FROM: " from
read -p "TO: " to
swaks --To \$to --From \$from --header "Subject: Test mail" --body "This is a test mail" --server \$server --port \$port -tls -4
EOM
}
ShowLogo() {
echo " "
echo " +++ '++."
echo " +++ ++++"
echo " ++++"
echo " ,::: +++ +++ :+++++++ +++++++ .+++++++ .++ '++"
echo " ++++ .+++. '+++ ++++++++++ ++++++++ ++++++++++ ++++ ++++"
echo " ++++ ++++ ++++ +++++''++ +++++++ +++++++++ ++++ ++++"
echo " ++++ .++++ ++++ ++++ ++++ ++++ ++++ ++++"
echo " ++++ .++++ ++++ ++++ ++++ ++++ ++++ ++++"
echo " ++++ ++++ ++++ ++++ ++++ ++++ ++++ ++++"
echo " ++++++ ;+++ ++++ ++++ ++++ ++++++++"
echo " ++++ +++ ++' ++ ++' .++++"
echo " "
echo " S i m p l e E m a i l P r i v a c y"
echo " "
echo " "
}
WriteEnv() {
envFile=/var/virtru/vg/env/$gwName.env
/bin/cat <<EOM >$envFile
# Enable verbose logging in Gateway.
# Values
# Enable: 1
# Disable: 0
# Default: 0
# Required: No
# Note: Set this to 0 unless you are debugging something.
#
GATEWAY_VERBOSE_LOGGING=0
# Domain name of organization
# Values
# Domain
# Required: Yes
#
GATEWAY_ORGANIZATION_DOMAIN=$gwDomain
# Comma delimited list of trusted networks in CIDR formate.
# Inbound addresses allowed to connect to the gateway
# Values (examples)
# All IP: 0.0.0.0/0
# 2 IP: 2.2.2.2/32,2.2.2.3/32
# Required: Yes
#
$gwInboundRelay
# Enable Proxy Protocol for SMTP.
# For use behind a load balancer.
# Values
# Enable: 1
# Disable: 0
# Default: 1
# Required: No
#
GATEWAY_PROXY_PROTOCOL=0
# Comma delimited set of domains and next-hop destinations and optional ports
# Values
# Not defined/Commented out - Final delivery by MX
# GATEWAY_TRANSPORT_MAPS=*=>[Next hop FQDN]:port
# Default: Not defined/Commented out - Final delivery by MX
# Required: No
#
# Examples:
# GATEWAY_TRANSPORT_MAPS=*=>[smtp-relay.gmail.com]:587
# GATEWAY_TRANSPORT_MAPS=*=>[MX Record]:25
# GATEWAY_TRANSPORT_MAPS=*=>[1.1.1.]:25
#
$gwOutboundRelay
# The mode for the Gateway.
# Values
# decrypt-everything
# encrypt-everything
# dlp - Use rules defined on Virtru Dashboard (https://secure.virtru.com/dashboard)
# Default: encrypt-everything
# Required: Yes
#
GATEWAY_MODE=$gwMode
# Topology of the gateway.
# Values
# outbound
# inbound
# Default: outbound
# Required: Yes
GATEWAY_TOPOLOGY=$gwTopology
# URL to Virtru's ACM service.
# Required: Yes
# Note: Do not change this.
#
GATEWAY_ACM_URL=https://acm.virtru.com
# URL to Virtru's Accounts service.
# Required: Yes
# Note: Do not change this.
#
GATEWAY_ACCOUNTS_URL=https://accounts.virtru.com
# The base URL for remote content.
# Required: Yes
# Note: Do not change this.
#
GATEWAY_REMOTE_CONTENT_BASE_URL=https://secure.virtru.com/start
# DKIM certificate information
# Values
# Not defined/Commented out - Gateway will not perform any DKIM signing
# Complete record for DKIM signing
# Required: No
# Example:
# GATEWAY_DKIM_DOMAINS=gw._domainkey.example.com
#
# GATEWAY_DKIM_DOMAINS=$gwDkimSelector._domainkey.$gwDomain
# HMAC Token Name to connect to Virtru services such as Accounts and ACM.
# Values
# Value provided by Virtru
# Required: Yes
# Note:Contact Virtru Support for getting your token Name.
#
GATEWAY_API_TOKEN_NAME=$gwHmacName
# HMAC Token Secret to connect to Virtru services such as Accounts and ACM.
# Values
# Value provided by Virtru
# Required: Yes
# Note:Contact Virtru Support for getting your Token Secret.
#
GATEWAY_API_TOKEN_SECRET=$gwHmacSecret
# Amplitude Token to connect to the Virtru Events platform
# Values
# Value provided by Virtru
# Required: Yes
# Note:Contact Virtru Support for getting your Token.
#
GATEWAY_AMPLITUDE_API_KEY=$gwAmplitudeToken
# Consider a message as undeliverable, when delivery fails with a temporary error, and the time in the queue
# has reached the maximal_queue_lifetime limit.
# Time units: s (seconds), m (minutes), h (hours), d (days), w (weeks). The default time unit is d (days).
# Postfix default is '5d'. Set this ENV variable if default does not work.
# Values
# NumberUnits
# Default: 5d
# Required: No
# Note: Specify 0 when mail delivery should be tried only once.
#
MAX_QUEUE_LIFETIME=5m
# The maximal time between attempts to deliver a deferred message.
# Values
# NumberUnits
# Default: 4000s
# Required: No
# Note: Set to a value greater than or equal to MIN_BACKOFF_TIME
#
MAX_BACKOFF_TIME=45s
# The minimal time between attempts to deliver a deferred message
# Values
# NumberUnits
# Default: 300s
# Required: No
# Note: Set to a value greater than or equal to MIN_BACKOFF_TIME
#
MIN_BACKOFF_TIME=30s
# The time between deferred queue scans by the queue manager
# Values
# NumberUnits
# Default: 300s
# Required: No
#
QUEUE_RUN_DELAY=30s
# Gateway Inbound
# Enable Inbound TLS to the Gateway.
# Values
# 1 Enabled
# 0 Disabled
# Default: 1
# Require: No
#
GATEWAY_SMTPD_USE_TLS=1
# Gateway Inbound
# TLS level for inbound connections
# Values
# none
# mandatory
# opportunistic
# Require: No
#
GATEWAY_SMTPD_SECURITY_LEVEL=opportunistic
# Gateway Outbound
# Enable TLS at the Gateway.
# Values
# 1 Enabled
# 0 Disabled
# Default: 1
# Require: No
#
GATEWAY_SMTP_USE_TLS=1
# Gateway Outbound
# TLS level for outbound connections
# Values
# none
# mandatory
# opportunistic
# Require: No
#
GATEWAY_SMTP_SECURITY_LEVEL=opportunistic
# Gateway Outbound
# Outbound TLS requirements for a domain. Comma separated list.
# Example
# example.com=>none
# example.net=>maybe
# example.org=>encrypt
# GATEWAY_SMTP_TLS_POLICY_MAPS=example.com=>none,example.net=>maybe
#
# GATEWAY_SMTP_TLS_POLICY_MAPS=
# New Relic Key
# Customer provided key to log events in customer's New Relic Tenant
# Values
# Provided by New Relic
# Required: No
#
# GATEWAY_NEWRELIC_CRED=
# Inbound Authentication
# Enable inbound authentication.
# Supported modes: CRAM-MD5 or DIGEST-MD5
# Values
# 1 Enabled
# 0 Disabled
# Default: 0
# Require: No
#
# GATEWAY_SMTPD_SASL_ENABLED=0
# Inbound Authentication
# Accounts for Authentication
# Supported modes: CRAM-MD5 or DIGEST-MD5
# Example:
# GATEWAY_SMTPD_SASL_ACCOUNTS=user1=>password1,user2=>password2
# Required: No
#
# GATEWAY_SMTPD_SASL_ACCOUNTS=
#
# Inbound X-Header Authentication
# Enable inbound X-Header authentication
# Values
# 1 Enabled
# 0 Disabled
# Default: 0
# Require: No
#
# GATEWAY_XHEADER_AUTH_ENABLED=
#
# Inbound X-Header Authentication
# Enable inbound X-Header authentication Shared Secret
# Example:
# GATEWAY_XHEADER_AUTH_SECRET=123456789
#
# Require: No
#
# GATEWAY_XHEADER_AUTH_SECRET=
#
# CKS Enabled Organization
#
# If Gateway is in Decrypt Mode Required
# Required: Yes
# If Gateway is in Encrypt Mode Required only if the Organization is CKS enabled.
# Required: No
#
# GATEWAY_ENCRYPTION_KEY_PROVIDER=CKS
#
$gwCks
# CKS Key Intergenerational Period
# Time between Gateway CKS public/private client Key Generation
#
# Required: No
# Default: 360
#
# GATEWAY_CKS_SESSION_KEY_EXPIRY_IN_MINS=360
#
$gwCksKey
EOM
}
WriteScript() {
echo $gwVersion
echo "script"
scriptFile=/var/virtru/vg/scripts/setup-$gwName.sh
/bin/cat <<EOM >$scriptFile
docker run \\
--env-file /var/virtru/vg/env/$gwName.env \\
-v /var/virtru/vg/tls/:/etc/postfix/tls \\
-v /var/virtru/vg/queue/$gwName/:/var/spool/postfix \\
-v /var/virtru/vg/dkim/:/etc/opendkim/keys \\
--hostname $gwFqdn \\
--name $gwName \\
--publish $gwPort:25 \\
--interactive --tty --detach \\
--restart unless-stopped \\
--log-opt max-size=10m \\
--log-opt max-file=100 \\
virtru/gateway:$gwVersion
EOM
chmod +x $scriptFile
}
ShowNextSteps() {
echo "next steps"
echo "-----------------------"
echo " Deploy Successful!"
echo " Next Steps:"
echo " "
echo " run: docker login"
echo " run: sh $scriptFile"
echo "-----------------------"
}
# Entry Point
clear
EntryPoint
| true
|
ce12d96230257fac5e916d981417cc23d24caae9
|
Shell
|
zoopera/flavoevo
|
/Mutation_rate/s16.afterBQSR.Sum.part1.CntAnalyzedSites.new.sh
|
UTF-8
| 337
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
dir="02_GATK"
cnts=($(find $dir -maxdepth 1 -type f -name "*.clean.sort.dedup.rnd3.cnt"))
for cnt in "${cnts[@]}"
do
sub=$(echo $cnt | sed 's/.*\///g' | sed 's/cnt$/slm/g')
echo -e "#!/bin/bash\n" > $sub
echo -e "time ./s16.afterBQSR.Sum.part1.CntAnalyzedSites.new.pl $cnt" >> $sub
chmod +x $sub
sbatch $sub
done
| true
|
6e50e062a3bbbb0e05cdb1aef70ad2fe7dd41d7e
|
Shell
|
mdnfiras/plugops
|
/test.sh
|
UTF-8
| 1,101
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ ! $(whoami) = "root" ]]
then
echo "Usage: Must be root"
exit 1
fi
if [ "$#" -ne 1 ]
then
echo "Usage: Must supply a domain"
exit 1
fi
DOMAIN=$1
echo "=======> testing the DNS server :"
tries=0
if [[ ! -z "$(nslookup ns.$DOMAIN 192.168.5.3 | grep NXDOMAIN )" ]]
then
echo "=======> DNS server not working"
else
echo "=======> DNS server working"
fi
echo "=======> testing the NFS server :"
curl 192.168.5.5:2049
if [[ ! $? -eq 52 ]]
then
echo "=======> NFS server not working"
else
echo "=======> NFS server working"
fi
echo "=======> testing the main node :"
res=$(curl http://$DOMAIN --resolve $DOMAIN:80:192.168.5.10 )
if [[ -z "$res" ]] | [[ ! $? -eq 0 ]]
then
echo "=======> main node not working"
else
echo "=======> main node working"
fi
echo "=======> testing the jenkins interface :"
res=$(curl http://jenkins.devops.$DOMAIN --resolve jenkins.devops.$DOMAIN:80:192.168.5.100 )
if [[ -z "$res" ]] | [[ ! $? -eq 0 ]]
then
echo "=======> jenkins interface not working"
else
echo "=======> jenkins interface working"
fi
| true
|
cbb3706c4564d1d45f16b13ba917921dcd949191
|
Shell
|
Uday251/CodingClub
|
/temp/log.sh
|
UTF-8
| 247
| 3.265625
| 3
|
[] |
no_license
|
#! /bin/bash -x
for filename in `ls abc.log.1`
do
fname1=`echo $filename | awk -F. '{print $1}'`;
fname2=`echo $filename | awk -F. '{print $2}'`;
DATE=`date +%d%m%Y`;
new=`echo $fname1-$DATE.${fname2}` ;
echo "filename changed to" $new;
done
| true
|
ad5bf68613adda24d6cf094d9eef57c1a2dbc8c1
|
Shell
|
oandregal/ondestan
|
/.openshift/action_hooks/deploy~
|
UTF-8
| 917
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# Pyramid OpenShift post-deploy pre-start configuration
export PYTHON=python-2.7
export PATH=$PATH:~/$PYTHON/virtenv/bin/
export PYTHON_EGG_CACHE=~/$PYTHON/virtenv/lib/python2.7/site-packages
APP=ondestan
source ~/$PYTHON/virtenv/bin/activate
if [ -z $OPENSHIFT_DB_HOST ]
then
echo 1>&2
echo 1>&2
echo "Could not find postgresql database. Please run:" 1>&2
echo "rhc-ctl-app -a $OPENSHIFT_APP_NAME -e add-postgresql-9.2" 1>&2
echo 1>&2
echo 1>&2
exit 5
fi
cd $OPENSHIFT_REPO_DIR/wsgi/$APP
# Initialize our database
if [ ! -e $OPENSHIFT_DATA_DIR/DB_INITIALIZED ]; then
echo "Initializing database"
paster setup-app production.ini
touch $OPENSHIFT_DATA_DIR/DB_INITIALIZED
fi
# Run the test suite automatically
# At the moment failure does not prevent the code from getting deployed
echo "Running the test suite"
unset OPENSHIFT_APP_NAME
python setup.py test
| true
|
b7688f01c8471af7f64739cf6c25b89f0bbcc0bd
|
Shell
|
mawentao/gateway
|
/build.sh
|
UTF-8
| 494
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
target="./target"
outdir="./output"
# mvn打包
mvn clean install package -Dmaven.test.skip=true -U -e -Ponline
##########################################
# 打包成可执行的环境
##########################################
if [ -d $outdir ]; then
rm -rf $outdir
fi
mkdir -p $outdir
function cpfiles()
{
for i in $@; do
cp -r $i $outdir
done
}
# 拷贝文件
cpfiles $target/*.jar
cpfiles profile/online/*
cpfiles control.sh
echo "!!!BUILD SUCCESS!!!"
| true
|
00f386599c20ddd1eb6c23f88cdce57cc3c56fe1
|
Shell
|
scality/devstack-plugin-scality
|
/devstack/plugin.sh
|
UTF-8
| 8,483
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
###################
### Cinder
###################
function configure_cinder_backend_sofs {
local be_name=$1
iniset $CINDER_CONF $be_name volume_backend_name $be_name
iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.scality.ScalityDriver"
iniset $CINDER_CONF $be_name scality_sofs_config $SCALITY_SOFS_CONFIG
iniset $CINDER_CONF $be_name scality_sofs_mount_point $SCALITY_SOFS_MOUNT_POINT
}
function init_cinder_backend_sofs {
if [[ ! -d $SCALITY_SOFS_MOUNT_POINT ]]; then
sudo mkdir $SCALITY_SOFS_MOUNT_POINT
fi
sudo chmod +x $SCALITY_SOFS_MOUNT_POINT
# We need to make sure we have a writable 'cinder' dir in SOFS
local sfused_mount_point
sfused_mount_point=$(mount | grep "/dev/fuse" | grep -v scality | grep -v sproxyd | cut -d" " -f 3 || true)
if [[ -z "${sfused_mount_point}" ]]; then
if ! sudo mount -t sofs $SCALITY_SOFS_CONFIG $SCALITY_SOFS_MOUNT_POINT; then
echo "Unable to mount the SOFS filesystem! Please check the configuration in $SCALITY_SOFS_CONFIG and the syslog."; exit 1
fi
sfused_mount_point=$SCALITY_SOFS_MOUNT_POINT
fi
if [[ ! -d $sfused_mount_point/cinder ]]; then
sudo mkdir $sfused_mount_point/cinder
fi
sudo chown $STACK_USER $sfused_mount_point/cinder
sudo umount $sfused_mount_point
if [[ -x "$(which sfused 2>/dev/null)" ]]; then
sudo service scality-sfused stop
fi
}
###################
### Glance
###################
if is_service_enabled g-api && [[ "$USE_SCALITY_FOR_GLANCE" == "True" ]]; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
sudo pip install https://github.com/scality/scality-sproxyd-client/archive/master.tar.gz
sudo pip install https://github.com/scality/scality-glance-store/archive/master.tar.gz
fi
fi
###################
### Nova
###################
if is_service_enabled nova; then
if [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ ,${CINDER_ENABLED_BACKENDS} =~ ,sofs: ]]; then
iniset $NOVA_CONF libvirt scality_sofs_config $SCALITY_SOFS_CONFIG
iniset $NOVA_CONF libvirt scality_sofs_mount_point $SCALITY_SOFS_MOUNT_POINT
fi
fi
fi
###################
### Swift
###################
if is_service_enabled swift && [[ $USE_SCALITY_FOR_SWIFT == "True" ]]; then
if [[ "$1" == "stack" && "$2" == "install" ]]; then
sudo pip install https://github.com/scality/scality-sproxyd-client/archive/master.tar.gz
sudo pip install https://github.com/scality/ScalitySproxydSwift/archive/master.tar.gz
fi
if [[ "$1" == "stack" && "$2" == "post-config" ]]; then
for node_number in ${SWIFT_REPLICAS_SEQ}; do
my_swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
iniset ${my_swift_node_config} app:object-server use egg:swift_scality_backend#sproxyd_object
iniset ${my_swift_node_config} app:object-server sproxyd_endpoints $SCALITY_SPROXYD_ENDPOINTS
done
fi
fi
###################
### Manila
###################
if is_service_enabled manila && [[ $USE_SCALITY_FOR_MANILA == "True" ]]; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
# XXX In order to avoid constant rebase of upstream, do a dirty copy
if [[ ! -d /tmp/scality-manila ]]; then
git clone -b ${MANILA_BRANCH:-master} ${MANILA_REPO} /tmp/scality-manila
fi
cp -r /tmp/scality-manila/manila/share/drivers/scality \
/opt/stack/manila/manila/share/drivers
source ${dir}/environment/netdef
# Manila general section
export MANILA_ENABLED_BACKENDS="ring"
export MANILA_DEFAULT_SHARE_TYPE="scality"
export MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS="share_backend_name=scality_ring snapshot_support=False"
# Manila ring section
export MANILA_OPTGROUP_ring_driver_handles_share_servers=False
export MANILA_OPTGROUP_ring_share_backend_name=scality_ring
export MANILA_OPTGROUP_ring_share_driver=manila.share.drivers.scality.driver.ScalityShareDriver
export MANILA_OPTGROUP_ring_management_user=${MANAGEMENT_USER}
export MANILA_OPTGROUP_ring_ssh_key_path=${MANAGEMENT_KEY_PATH}
if [[ $SCALITY_MANILA_CONFIGURE_NFS == "True" ]]; then
export MANILA_OPTGROUP_ring_nfs_export_ip=${RINGNET_NFS_EXPORT_IP}
export MANILA_OPTGROUP_ring_nfs_management_host=${NFS_CONNECTOR_HOST}
fi
if [[ $SCALITY_MANILA_CONFIGURE_SMB == "True" ]]; then
export MANILA_OPTGROUP_ring_smb_export_ip=${RINGNET_SMB_EXPORT_IP}
export MANILA_OPTGROUP_ring_smb_management_host=${CIFS_CONNECTOR_HOST}
export MANILA_OPTGROUP_ring_smb_export_root=${SMB_EXPORT_ROOT:-/ring/fs}
fi
fi
if [[ $CONFIGURE_NEUTRON_FOR_MANILA_WITH_SCALITY == "True" ]]; then
# install phase: Setup bridge
if [[ "$1" == "stack" && "$2" == "install" ]]; then
sudo ovs-vsctl add-br br-ringnet
fi
# post-config phase: Configure neutron
if [[ "$1" == "stack" && "$2" == "post-config" ]]; then
iniset /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks physnet
iniset /etc/neutron/plugins/ml2/ml2_conf.ini ovs bridge_mappings physnet:br-ringnet
fi
# extra phase: Create neutron network for tenant use
if [[ "$1" == "stack" && "$2" == "extra" ]]; then
source ${dir}/environment/netdef
neutron net-create ringnet --shared --provider:network_type flat --provider:physical_network physnet
extra_routes=""
if [[ $SCALITY_MANILA_CONFIGURE_NFS == "True" ]]; then
extra_routes+="--host-route destination=${RINGNET_NFS},nexthop=${TENANT_NFS_GW} "
fi
if [[ $SCALITY_MANILA_CONFIGURE_SMB == "True" ]]; then
extra_routes+="--host-route destination=${RINGNET_SMB},nexthop=${TENANT_SMB_GW} "
fi
neutron subnet-create ringnet --allocation-pool ${TENANTS_POOL} --name ringsubnet ${TENANTS_NET} \
--enable-dhcp $extra_routes
# Add IP to provider network bridge
sudo ip addr add ${TENANTS_BR} dev br-ringnet
fi
if [[ "$1" == "unstack" ]]; then
sudo ovs-vsctl del-br br-ringnet
fi
fi
fi
###################
### Tempest
###################
if is_service_enabled tempest && is_service_enabled manila && [[ $USE_SCALITY_FOR_MANILA == "True" ]]; then
if [[ "$1" == "stack" && "$2" == "extra" ]]; then
iniset $TEMPEST_CONFIG service_available manila True
iniset $TEMPEST_CONFIG cli enabled True
iniset $TEMPEST_CONFIG share multitenancy_enabled False
iniset $TEMPEST_CONFIG share run_extend_tests False
iniset $TEMPEST_CONFIG share run_shrink_tests False
iniset $TEMPEST_CONFIG share run_snapshot_tests False
iniset $TEMPEST_CONFIG share run_consistency_group_tests False
# Remove the following line when https://review.openstack.org/#/c/263664 is reverted
# and https://bugs.launchpad.net/manila/+bug/1531049 is fixed
ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-"admin"}
ADMIN_PASSWORD=${ADMIN_PASSWORD:-"secretadmin"}
iniset $TEMPEST_CONFIG auth admin_username ${ADMIN_USERNAME:-"admin"}
iniset $TEMPEST_CONFIG auth admin_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG auth admin_tenant_name $ADMIN_TENANT_NAME
iniset $TEMPEST_CONFIG auth admin_domain_name ${ADMIN_DOMAIN_NAME:-"Default"}
iniset $TEMPEST_CONFIG identity username ${TEMPEST_USERNAME:-"demo"}
iniset $TEMPEST_CONFIG identity password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity tenant_name ${TEMPEST_TENANT_NAME:-"demo"}
iniset $TEMPEST_CONFIG identity alt_username ${ALT_USERNAME:-"alt_demo"}
iniset $TEMPEST_CONFIG identity alt_password $ADMIN_PASSWORD
iniset $TEMPEST_CONFIG identity alt_tenant_name ${ALT_TENANT_NAME:-"alt_demo"}
iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
iniset $TEMPEST_CONFIG validation network_for_ssh ${PRIVATE_NETWORK_NAME:-"private"}
else
echo "Unable to configure tempest for the Scality Manila driver"
fi
fi
| true
|
25ebae5233f3023a334f5d5658cf9145ee105991
|
Shell
|
FreddyWordingham/arctk-cpp
|
/resources/bash/build.sh
|
UTF-8
| 2,929
| 3.4375
| 3
|
[] |
no_license
|
# == BUILDING ==
# -- Clean --
arctk.clean()
{
rm -r $ARCTK_DIR/build
rm -r $ARCTK_DIR/bin
rm -r $ARCTK_DIR/include/arctk/config
arctk.uninstall
}
# -- Build --
arctk.build()
{
if [ "$#" == "0" ]; then
if [ -z "$ARCTK_BUILD_ARGS" ]; then
echo "Error! ARCTK_BUILD_ARGS are not set!"
return
fi
arctk.build $ARCTK_BUILD_ARGS
return
fi
if [ "$#" != "9" ]; then
echo "Error! Incorrect number of arguments. ($#)"
echo "arctk.build <build_type> <C compiler> <C++ compiler> <unit testing> <coverage> <benchmark> <clang-tidy> <iwyu> <document>"
return
fi
ARCTK_BUILD_ARGS="$1 $2 $3 $4 $5 $6 $7 $8 $9"
echo "export ARCTK_BUILD_ARGS='$ARCTK_BUILD_ARGS'" > $ARCTK_DIR/.build
arctk.clean
mkdir $ARCTK_DIR/build
cd $ARCTK_DIR/build
cmake -j 8 -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DCMAKE_BUILD_TYPE=$1 \
-DCMAKE_C_COMPILER=$2 \
-DCMAKE_CXX_COMPILER=$3 \
-DUNIT_TESTING=$4 \
-DCOVERAGE=$5 \
-DBENCHMARK=$6 \
-DCLANG_TIDY=$7 \
-DIWYU=$8 \
-DDOCUMENT=$9 \
..
cd - > /dev/null
}
# -- Make --
arctk.make()
{
cd $ARCTK_DIR/build
scan-build -analyze-headers --force-analyze-debug-code --view make -j 8
cd - > /dev/null
}
# -- Testing --
arctk.test()
{
cd $ARCTK_DIR/build
for test in $ARCTK_DIR/bin/test/*; do
if [ -f $test ]; then
$test
fi
done
cd - > /dev/null
}
arctk.bench()
{
cd $ARCTK_DIR/build
for bench in $ARCTK_DIR/bin/bench/*; do
if [ -f $bench ]; then
$bench
fi
done
cd - > /dev/null
}
# -- Documentation --
arctk.doc()
{
cd $ARCTK_DIR/build
make doc
mv docs/ ..
open ../docs/html/index.html
cd - > /dev/null
}
# -- Coverage --
arctk.cover()
{
cd $ARCTK_DIR
lcov --capture --directory . --output-file coverage.info
lcov --remove coverage.info '/usr/*' --output-file coverage.info
lcov --list coverage.info
# bash <(curl -s https://codecov.io/bash) -t "57886efc-8eca-416f-9e8b-1b0ee825efe5" -f coverage.info || echo "Codecov did not collect coverage reports"
cd - > /dev/null
}
# -- Include-What-You-Use --
arctk.iwyu()
{
cd $ARCTK_DIR
iwyu_tool.py -p build/compile_commands.json
# iwyu_tool.py -p build/compile_commands.json -- --no_fwd_decls
cd - > /dev/null
}
# -- Installation --
arctk.install()
{
cd $ARCTK_DIR/build
arctk.uninstall
make install
cd - > /dev/null
}
arctk.uninstall()
{
rm /usr/local/lib/libarctk.a
rm -r /usr/local/include/arctk/
}
| true
|
e05c064280c1e366d082dc3d604eaec46b40968e
|
Shell
|
jfouk/dotfiles
|
/tmux/theme.sh
|
UTF-8
| 1,751
| 2.703125
| 3
|
[] |
no_license
|
# tmux theme config
#border colors
set -g pane-active-border-fg cyan
set -g pane-border-fg colour235 # dark gray
set -g pane-border-bg default # super dark gray
set -g status-bg colour234
# add powerline functionality
set -g status-left-length 100
set -g status-right-length 200
set -g status-interval 5
# Trying Window Title Colors
tm_color_active=colour045
tm_color_inactive=colour241
tm_color_inactive_2=colour248
tm_color_feature=colour10
tm_color_music=colour10
tm_active_border_color=colour10
tm_color_branch=colour3
tm_color_running=colour73
# default window title colors
set-window-option -g window-status-fg $tm_color_inactive
set-window-option -g window-status-bg default
set -g window-status-format "#I #W"
# active window title colors
set-window-option -g window-status-current-fg $tm_color_active
set-window-option -g window-status-current-bg default
set-window-option -g window-status-current-format "#[bold]#I #W"
# message text
set-option -g message-bg default
set-option -g message-fg $tm_color_active
tm_session_name="#[fg=$tm_color_feature,bold] #S"
#tm_git_branch="#[fg=$tm_color_branch,bold] #(uptime -p)"
tm_active_uptime="#[fg=$tm_color_branch,bold] ⇡#(/home/jfouk/Development/Setup/dotfiles/tmux/uptime.sh)"
tm_running_uptime="#[fg=$tm_color_running,bold] ▲#(/home/jfouk/Development/Setup/dotfiles/tmux/active_uptime.sh)"
tm_idle_uptime="#[fg=$tm_color_inactive_2,bold] ⇣#(/home/jfouk/Development/Setup/dotfiles/tmux/idleuptime.sh)"
#tm_pwd="#[fg=$tm_color_music,bold] #(~/Development/Setup/dotfiles/tmux/tmux-pwd.sh)"
tm_date="#[fg=$tm_color_inactive] %R %b %d"
set -g status-left $tm_session_name' '
set -g status-right $tm_running_uptime' '$tm_active_uptime' '$tm_idle_uptime' '$tm_date
| true
|
b945e560e882ffa2d49268fd1bd0df959a3cc121
|
Shell
|
Hubert07/Studia
|
/ksiazka_telefon/wyslijmail.sh
|
UTF-8
| 399
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
dalej="t"
while [ $dalej = "t" ]
do
clear
echo -e "Wysylanie wiadomosci \n\nPodaj imie: "
read imie
echo "Podaj nazwisko: "
read nazwisko
if [ "$(cat dane.txt | grep -c "$imie $naziwsko")" > 0 ]
then
nr_tel=$(cat dane.txt | grep "$imie $nazwisko" | awk '{print $3}' )
echo "$nr_tel"
else
echo "Nie znaleziono osoby"
fi
echo "Chcesz kontynuowac? t/n"
read dalej
done
./ksiazka.sh
| true
|
db501b54989b154231a60fcd1f5a15b7982852ea
|
Shell
|
mfvalin/dot-profile-setup
|
/bin/soumet_lajob
|
UTF-8
| 339
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/ksh
#set -x
export UnIqUe=${TMPDIR}/`hostname`$$
mkdir -p $UnIqUe
if [[ ! -d $UnIqUe ]] ; then echo ERROR: soumet_lajob cannot create necessary temporary directory ; exit 1 ; fi
cp ${1:-lajob.tar} $UnIqUe/lajob.tar
ls -al $UnIqUe
( cd $UnIqUe && tar xvof lajob.tar && ./GO )
#sleep 5
if [[ -d $UnIqUe ]] ; then rm -rf $UnIqUe ; fi
| true
|
509d6cabf6d34cb63145bc7b3766a783439eae50
|
Shell
|
green-green-avk/AnotherTerm-scripts
|
/repo-linuxcontainers.sh
|
UTF-8
| 1,962
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/system/bin/sh
set -e
S_ESC="$(echo -en '\e')"
S_N="$(echo -en '\n')"
K_BACK='-2'
FD_I=8
FD_O=9
eval "exec $FD_I<&0"
eval "exec $FD_O>&1"
show_list() {
local N=1
for E in "$@" ; do
if [ "$N" -lt 10 ] ; then echo -n ' ' >&$FD_O ; fi
echo "$N. $E" >&$FD_O
N=$((N+1))
done
}
read_list_elt() {
local N
echo "Anything else - exit" >&$FD_O
echo -n ': ' >&$FD_O
read N >&$FD_O <&$FD_I
if [ "$N" -gt 0 -a "$N" -le "$1" ] >/dev/null 2>&1 ; then
echo $(($N-1))
return 0
fi
echo "$K_BACK"
}
to_uname_arch() {
case "$1" in
armeabi-v7a) echo armv7a ;;
arm64-v8a) echo aarch64 ;;
x86) echo i686 ;;
amd64) echo x86_64 ;;
*) echo "$1" ;;
esac
}
validate_arch() {
case "$1" in
armv7a|aarch64|i686|amd64) echo $1 ; return 0 ;;
*) return 1 ;;
esac
}
to_lco_arch() {
case "$1" in
armv7a) echo armhf ;;
aarch64) echo arm64 ;;
i686) echo i386 ;;
x86_64) echo amd64 ;;
*) echo "$1" ;;
esac
}
# There is no uname on old Androids.
U_ARCH="$(validate_arch "$(uname -m 2>/dev/null)" || ( aa=($MY_DEVICE_ABIS) ; to_uname_arch "${aa[0]}" ))"
R_ARCH="$(to_lco_arch "$U_ARCH")"
echo "$U_ARCH"
chooser() {
TL=()
PL=()
DL=()
RL=()
while IFS=';' read -s -r DISTRO RELEASE ARCH VAR TS PATH
do
if [ "$VAR" = 'default' -a "$ARCH" = "$R_ARCH" ]
then
TL+=("$DISTRO / $RELEASE")
PL+=("$PATH")
DL+=("$DISTRO")
RL+=("$RELEASE")
fi
done
show_list "${TL[@]}"
N=$(read_list_elt "${#PL[@]}")
if [ "$N" -lt 0 ] ; then
exit 0
fi
local D="${DL[$N]}"
local R="${RL[$N]}"
local P="${PL[$N]}"
echo "${D@Q} ${R@Q} ${P@Q}"
}
ARGS=($("$TERMSH" cat \
https://images.linuxcontainers.org/meta/1.0/index-user \
| chooser))
if [ -z "$ARGS" ] ; then exit 0 ; fi
export ROOTFS_URL="https://images.linuxcontainers.org/${ARGS[2]}/rootfs.tar.xz"
S='install-linuxcontainers.sh'
"$TERMSH" copy -f -fu \
"https://raw.githubusercontent.com/green-green-avk/AnotherTerm-scripts/master/$S" \
-tp . && chmod 755 "$S" && sh "./$S" -a "${ARGS[0]}" "${ARGS[1]}"
| true
|
af02ee1f4d249ec882abcbded04f17aa78b845ad
|
Shell
|
jclehner/TC72XX_LxG1.0.10mp5_OpenSrc
|
/userspace/gpl/apps/smartmontools-patch/smartcmd/healthy.sh
|
UTF-8
| 305
| 2.609375
| 3
|
[] |
no_license
|
#! /bin/sh
#
#
/usr/local/sbin/smartctl -H -d sat $1 | grep "SMART overall-health self-assessment test result:"
/usr/local/sbin/smartctl -H -d sat $1 | grep "SMART overall-health self-assessment test result:" | grep "FAILED"
RETVAL=$?
# FAILED
[ $RETVAL -eq 0 ] && exit 1
# PASSED == ! FAILED
exit 0
| true
|
4d4cb19aa35f5e8a6add151b1efb97d2463d267e
|
Shell
|
TrendingTechnology/flynn
|
/Examples/ClusterCounter/meta/CombinedBuildPhases.sh
|
UTF-8
| 1,121
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
# When we regenerate the xcode project using "make xcode", we lose the run scripts buiild phases. As we
# rely on several scripts now, we're combining them into this one script. This has a couple of
# advantages:
# 1. We can run them easily from the Makefile as well as from Xcode
# 2. We only need to manually add one build phase instead of 3+
#
# cd ${SRCROOT}; ./meta/CombinedBuildPhases.sh
#
# This script assumes that the current directory is the Server project directory
set -e
# FlynnLint - Confirms all Flynn code is concurrently safe
FLYNNLINTSWIFTPM=./.build/checkouts/flynn/meta/FlynnLint
FLYNNLINTLOCAL=./../../meta/FlynnLint
if [ -f "${FLYNNLINTSWIFTPM}" ]; then
${FLYNNLINTSWIFTPM} ./
elif [ -f "${FLYNNLINTLOCAL}" ]; then
${FLYNNLINTLOCAL} ./
else
echo "warning: Unable to find FlynnLint, aborting..."
fi
# SwiftLint - Confirms all swift code meets basic formatting standards
if which swiftlint >/dev/null; then
swiftlint autocorrect --path ./Sources/
swiftlint --path ./Sources/
else
echo "warning: SwiftLint not installed, download from https://github.com/realm/SwiftLint"
fi
| true
|
32684b6be0b2dcd44b129a5139bbea9da60556b7
|
Shell
|
breakersun/openwrt-package
|
/lienol/luci-app-passwall/root/usr/share/passwall/monitor.sh
|
UTF-8
| 2,169
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
CONFIG=passwall
CONFIG_PATH=/var/etc/$CONFIG
uci_get_by_name() {
local ret=$(uci get $CONFIG.$1.$2 2>/dev/null)
echo ${ret:=$3}
}
uci_get_by_type() {
local ret=$(uci get $CONFIG.@$1[0].$2 2>/dev/null)
echo ${ret:=$3}
}
TCP_REDIR_SERVER1=$(uci_get_by_type global tcp_redir_server1 nil)
TCP_REDIR_PORT=$(uci_get_by_type global_proxy tcp_redir_port nil)
UDP_REDIR_SERVER1=$(uci_get_by_type global udp_redir_server1 nil)
UDP_REDIR_PORT=$(uci_get_by_type global_proxy udp_redir_port nil)
[ "$UDP_REDIR_SERVER1" == "default" ] && UDP_REDIR_SERVER1=$TCP_REDIR_SERVER1
SOCKS5_PROXY_SERVER1=$(uci_get_by_type global socks5_proxy_server1 nil)
dns_mode=$(uci_get_by_type global dns_mode)
use_haproxy=$(uci_get_by_type global_haproxy balancing_enable 0)
use_kcp=$(uci_get_by_name $TCP_REDIR_SERVER1 use_kcp 0)
kcp_port=$(uci_get_by_type global_proxy kcptun_port 11183)
#tcp
if [ $TCP_REDIR_SERVER1 != "nil" ]; then
icount=$(ps -w | grep -i -E "ss-redir|ssr-redir|v2ray|brook tproxy -l 0.0.0.0:$TCP_REDIR_PORT" | grep $CONFIG_PATH/TCP.json | grep -v grep | wc -l)
if [ $icount = 0 ]; then
/etc/init.d/passwall restart
exit 0
fi
fi
#udp
if [ $UDP_REDIR_SERVER1 != "nil" ]; then
icount=$(ps -w | grep -i -E "ss-redir|ssr-redir|v2ray|brook tproxy -l 0.0.0.0:$UDP_REDIR_PORT" | grep $CONFIG_PATH/UDP.json | grep -v grep | wc -l)
if [ $icount = 0 ]; then
/etc/init.d/passwall restart
exit 0
fi
fi
#socks5
if [ $SOCKS5_PROXY_SERVER1 != "nil" ]; then
icount=$(ps -w | grep -i -E "ss-redir|ssr-redir|v2ray|brook client" | grep $CONFIG_PATH/SOCKS5.json | grep -v grep | wc -l)
if [ $icount = 0 ]; then
/etc/init.d/passwall restart
exit 0
fi
fi
#dns
icount=$(netstat -apn | grep 7913 | wc -l)
if [ $icount = 0 ]; then
/etc/init.d/passwall restart
exit 0
fi
#kcptun
if [ $use_kcp -gt 0 ]; then
icount=$(ps -w | grep kcptun_client | grep $kcp_port | grep -v grep | wc -l)
if [ $icount = 0 ]; then
/etc/init.d/passwall restart
exit 0
fi
fi
#haproxy
if [ $use_haproxy -gt 0 ]; then
icount=$(ps -w | grep haproxy | grep $CONFIG_PATH/haproxy.cfg | grep -v grep | wc -l)
if [ $icount = 0 ]; then
/etc/init.d/passwall restart
exit 0
fi
fi
| true
|
d5557e4645ffeab8316f22e4370bfce27c33abe4
|
Shell
|
j-reardon/linux.config
|
/polybar/launch.sh
|
UTF-8
| 229
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
# terminate already running bar instances
killall -q polybar
# wait until the processes have been shut down
while pgrep -u $UID -x polybar > /dev/null; do sleep 1; done
# launch bars
polybar top &
polybar bottom &
| true
|
fb551b119ffea3a4e2a5f089eee7864225170b34
|
Shell
|
sarroutbi/buscobici
|
/getscripts/rometbikes_get.sh
|
UTF-8
| 1,910
| 2.65625
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/bash
#
# Copyright © 2012-2014 Sergio Arroutbi Braojos <sarroutbi@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided that
# the above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED “AS IS” AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
# OR PERFORMANCE OF THIS SOFTWARE.
#
URL="rometbikes.com"
ONLY_DOMAIN="rometbikes.com"
EXCLUDE="-Rgif -Rpng -Rjpg"
MAX_TRIES=10
MAX_TIMEOUT=10
. ./common_get
#### Add kids first, to stay in the first
#### dumped results and avoid being generated
#### with different KIND
#### KIDS ####
KIDS_MTB_BASE="${URL}/19-mtb-junior?p="
KIDS_MTB_PAGES="1"
bubic_get_page_outfile "${KIDS_MTB_BASE}" "${KIDS_MTB_PAGES}" kids-mtb
#### KIDS-YOUNG ####
KIDS_YOUNG_BASE="${URL}/10-adolescente-24?p="
KIDS_YOUNG_PAGES="1"
bubic_get_page_outfile "${KIDS_YOUNG_BASE}" "${KIDS_YOUNG_PAGES}" kids-young
#### MTB ####
MTB_BASE="${URL}/6-mtb?p="
MTB_PAGES="$(seq 1 5)"
bubic_get_page_outfile "${MTB_BASE}" "${MTB_PAGES}" mtb
#### URBAN ####
URBAN_BASE="${URL}/7-bicicletas-urbanas?p="
URBAN_PAGES="$(seq 1 3)"
bubic_get_page_outfile "${URBAN_BASE}" "${URBAN_PAGES}" urban
#### TREKKING ####
TREKKING_BASE="${URL}/8-bicicletas-trekking?p="
TREKKING_PAGES="$(seq 1 3)"
bubic_get_page_outfile "${TREKKING_BASE}" "${TREKKING_PAGES}" trekking
#### ROAD ####
ROAD_BASE="${URL}/9-bicicletas-de-carretera?p="
ROAD_PAGES="1"
bubic_get_page_outfile "${ROAD_BASE}" "${ROAD_PAGES}" road
| true
|
d7a3cd60b2420a20e54f7d976ec8a27d52a566be
|
Shell
|
lk16/squared
|
/resources/images/create_gifs.sh
|
UTF-8
| 229
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
files=`ls *.png`
for lhs in $files
do
for rhs in $files
do
out_file="${lhs%.*}_to_${rhs%.*}.gif"
echo "Generating $out_file"
convert $lhs $rhs -loop 1 -morph 8 $out_file &
done
done
| true
|
36c1344afa4eef53eb39e95c208edbc50e512c5e
|
Shell
|
tvirolai/aineistokartoitus
|
/scripts/aja_csv.sh
|
UTF-8
| 211
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
for DB in PIKI ANDERS PILOTIT VASKI
#for DB in PIKI PILOTIT VASKI
do
echo "Generating CSV report from $DB..."
python3 batch-to-fieldtable.py -i ../data/${DB}.seq -o ../data/${DB}_fields.csv
done
| true
|
92dee96f1f220ae6e6ffd7910a5944e295861c21
|
Shell
|
smallneo/termux-api-package
|
/scripts/termux-sms-send
|
UTF-8
| 893
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/data/data/com.termux/files/usr/bin/sh
set -e -u
SCRIPTNAME=termux-sms-send
show_usage () {
echo "Usage: $SCRIPTNAME -n number[,number2,number3,...] [text]"
echo "Send a SMS message to the specified recipient number(s). The text to send is either supplied as arguments or read from stdin if no arguments are given."
echo " -n number(s) recipient number(s) - separate multiple numbers by commas"
exit 0
}
RECIPIENTS=""
while getopts :hn: option
do
case "$option" in
h) show_usage;;
n) RECIPIENTS="--esa recipients $OPTARG";;
?) echo "$SCRIPTNAME: illegal option -$OPTARG"; exit 1;
esac
done
shift $((OPTIND-1))
if [ -z "$RECIPIENTS" ]; then
echo "$SCRIPTNAME: no recipient number given"; exit 1;
fi
CMD="/data/data/com.termux/files/usr/libexec/termux-api SmsSend $RECIPIENTS"
if [ $# = 0 ]; then
$CMD
else
echo "$@" | $CMD
fi
| true
|
dec9f570a5f4c6e2ccfd5f5476e6c6ad803635d6
|
Shell
|
AnthonyVDeCaria/pstb
|
/startNode.sh
|
UTF-8
| 1,281
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
. scripts/functions.sh
error()
{
echo "ERROR: Not enough arguments provided! "
echo "<memory> <name> <context> <ipAddress> <objectPort> <engine> <nodetype> <sendDiary>"
echo "(OPTIONAL) -d <username> (this is distributed)"
exit 10 #N_ARGS
}
if [ $# -lt 8 ];
then
error
fi
memory=$1
shift
name=$1
shift
context=$1
shift
ipAddress=$1
shift
objectPort=$1
shift
engine=$1
shift
nodetype=$1
shift
sendDiary=$1
shift
while getopts "d:" opt; do
case "${opt}" in
d)
username=${OPTARG}
;;
\?)
error
;;
esac
done
checkIfInt $memory
memCheck=$?
if [[ $memCheck -ne 0 ]] ;
then
echo "ERROR: Memory is not an integer!"
exit 10 #N_ARGS
fi
checkIfInt $objectPort
opCheck=$?
if [[ $opCheck -ne 0 ]] ;
then
echo "ERROR: ObjectPort is not an integer!"
exit 10 #N_ARGS
fi
java -Xmx"$memory"M \
-cp target/pstb-0.0.1-SNAPSHOT-jar-with-dependencies.jar \
-Djava.rmi.server.codebase=file:target/pstb-0.0.1-SNAPSHOT-jar-with-dependencies.jar \
-Djava.security.policy=etc/java.policy \
-Djava.awt.headless=true \
pstb.benchmark.process.PSTBProcess \
$name $context $ipAddress $objectPort $engine $nodetype $sendDiary $username
exitVal=$?
echo "$exitVal"
exit $exitVal
| true
|
65b5a95a095ede76d4fc24fdc3b3bb485dadc0ee
|
Shell
|
maximesteisel/webtriathlon
|
/build.sh
|
UTF-8
| 376
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
HEADER="#!/usr/bin/python\n"
mkdir -p build
function build {
echo "building $1.$2"
cd src/$1
#python -m compileall .
git rev-list --count HEAD > REVNO
zip -r ../../build/$1.zip * -x@../../.gitignore
cd ../../build
echo -e $HEADER| cat - $1.zip > $1.$2
rm $1.zip
chmod +x $1.$2
cd ..
}
build server py
build encoder py
| true
|
bdb796faa91b201032dd05723e2bc153131f9fc5
|
Shell
|
futpib/postgres-dbsubsetter
|
/docker-entrypoint-initdb.d/50-dbsubsetter.sh
|
UTF-8
| 4,152
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
set -xe
export ORIGIN_PORT="${ORIGIN_PORT:-5432}"
: "${ORIGIN_HOST:?ORIGIN_HOST is required}"
: "${ORIGIN_USER:?ORIGIN_USER is required}"
: "${ORIGIN_PASSWORD:?ORIGIN_PASSWORD is required}"
: "${ORIGIN_DB:?ORIGIN_DB is required}"
ORIGIN_PASSWORD_ESCAPED="$(echo "${ORIGIN_PASSWORD}" | sed 's/&/%26/g;s/\$/%24/g')"
POSTGRES_HOST=/var/run/postgresql/
POSTGRES_PORT=5432
POSTGRES_PROXY_PORT=5431
# Pre-Subset Instructions: PostgreSQL
# https://github.com/bluerogue251/DBSubsetter/blob/master/docs/pre_subset_postgres.md
echo "Loading roles..."
# Dump out all postgres roles into a file called `roles.sql`
PGPASSWORD="${ORIGIN_PASSWORD}" \
pg_dumpall \
--roles-only \
--no-role-passwords \
--host="${ORIGIN_HOST}" \
--port="${ORIGIN_PORT}" \
--username="${ORIGIN_USER}" \
--database="${ORIGIN_DB}" \
| \
# Load `roles.sql` into your "target" database
psql \
--host "${POSTGRES_HOST}" \
--port "${POSTGRES_PORT}" \
--user "${POSTGRES_USER}" \
--dbname "${POSTGRES_DB}"
echo "Loading schema..."
# Dump out just the schema (no data) from your "origin" database into a file called `pre-data-dump.sql`
PGPASSWORD="${ORIGIN_PASSWORD}" \
pg_dump \
--host "${ORIGIN_HOST}" \
--port "${ORIGIN_PORT}" \
--user "${ORIGIN_USER}" \
--dbname "${ORIGIN_DB}" \
--section pre-data \
| \
# Load `pre-data-dump.sql` into your "target" database
psql \
--host "${POSTGRES_HOST}" \
--port "${POSTGRES_PORT}" \
--user "${POSTGRES_USER}" \
--dbname "${POSTGRES_DB}"
echo "Proxying postgres unix socket to tcp..."
socat TCP-LISTEN:"${POSTGRES_PROXY_PORT}" UNIX-CONNECT:"${POSTGRES_HOST}.s.PGSQL.${POSTGRES_PORT}" &
socat_pid=$!
echo "Running dbsubsetter..."
dbsubsetter_script=$(cat <<EOF
java \
-jar DBSubsetter.jar \
--originDbConnStr "jdbc:postgresql://${ORIGIN_HOST}:${ORIGIN_PORT}/${ORIGIN_DB}?user=${ORIGIN_USER}&password=${ORIGIN_PASSWORD_ESCAPED}" \
--targetDbConnStr "jdbc:postgresql://localhost:${POSTGRES_PROXY_PORT}/${POSTGRES_DB}" \
--originDbParallelism ${DB_SUBSETTER_ORIGIN_PARALLELISM:-1} \
--targetDbParallelism 1 \
${DB_SUBSETTER_ARGS}
EOF
)
eval $dbsubsetter_script
echo "Killing unix to tcp proxy..."
kill $socat_pid
# Post-Subset Instructions: PostgreSQL
# https://github.com/bluerogue251/DBSubsetter/blob/master/docs/post_subset_postgres.md
echo "Loading constraints and indices..."
# Dump out just constraint and index definitions from your "origin" database into a file called `post-data-dump.pg_dump`
PGPASSWORD="${ORIGIN_PASSWORD}" \
pg_dump \
--host "${ORIGIN_HOST}" \
--port "${ORIGIN_PORT}" \
--user "${ORIGIN_USER}" \
--dbname "${ORIGIN_DB}" \
--section post-data \
--format custom \
| \
# Load `post-data-dump.pgdump` into your "target" database
pg_restore \
--host "${POSTGRES_HOST}" \
--port "${POSTGRES_PORT}" \
--user "${POSTGRES_USER}" \
--dbname "${POSTGRES_DB}"
# Fixing Sequences
# https://wiki.postgresql.org/wiki/Fixing_Sequences
echo "Fixing sequences..."
create_sequences_script_script=$(cat <<EOF
SELECT 'SELECT SETVAL(' ||
quote_literal(quote_ident(PGT.schemaname) || '.' || quote_ident(S.relname)) ||
', COALESCE(MAX(' ||quote_ident(C.attname)|| '), 1) ) FROM ' ||
quote_ident(PGT.schemaname)|| '.'||quote_ident(T.relname)|| ';'
FROM pg_class AS S,
pg_depend AS D,
pg_class AS T,
pg_attribute AS C,
pg_tables AS PGT
WHERE S.relkind = 'S'
AND S.oid = D.objid
AND D.refobjid = T.oid
AND D.refobjid = C.attrelid
AND D.refobjsubid = C.attnum
AND T.relname = PGT.tablename
ORDER BY S.relname;
EOF
)
sequences_script=$(
psql \
--host "${POSTGRES_HOST}" \
--port "${POSTGRES_PORT}" \
--user "${POSTGRES_USER}" \
--dbname "${POSTGRES_DB}" \
--command "${create_sequences_script_script}" \
| \
grep 'SELECT'
)
psql \
--host "${POSTGRES_HOST}" \
--port "${POSTGRES_PORT}" \
--user "${POSTGRES_USER}" \
--dbname "${POSTGRES_DB}" \
--command "${sequences_script}"
| true
|
68f8ec22e85b01c9d9545d79aeaddb20dbb8449d
|
Shell
|
erkanboz032/fvmsunuculinux
|
/menu/herseyisil.sh
|
UTF-8
| 637
| 2.96875
| 3
|
[] |
no_license
|
cd /root
clear
echo "---------------------------------------"
echo "Eminmisiniz?"
echo "Lütfen yapmak istediğiniz işlemi seçiniz"
echo -e "\e[38;5;76m
1 - Evet \n
2 - Hayır \n
\033[0m"
echo "---------------------------------------"
read islemler
case $islemler in
1)
clear
rm -rf /home/sunucu
cd /root
wget http://fivem.erkanboz.com.tr/fvminstaller.sh
chmod -R 777 /root/fvminstaller.sh
rm -rf /root/menu
cd /root
echo "Başarıyla Silindi ve installer geri yüklendi"
;;
2)
clear
echo "Lütfen Bekleyiniz..."
sleep 2
sh /root/menu/skur.sh
;;
*)
echo "Hatalı işlem yaptınız"
sleep 2
sh /root/menu/menu.sh
esac
| true
|
78e960aee8d8d76159e54078b190c1df0448de88
|
Shell
|
flathub/org.gnome.Builder
|
/local-build.sh
|
UTF-8
| 415
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
rm -f org.gnome.Builder.flatpak
rm -rf _build ; mkdir _build
rm -rf _repo ; mkdir _repo
STATE_DIR=~/.cache/gnome-builder/flatpak-builder
BRANCH=stable
flatpak-builder \
--ccache --force-clean \
--repo=_repo --state-dir=$STATE_DIR \
--default-branch=$BRANCH \
_build org.gnome.Builder.json
flatpak build-bundle \
_repo org.gnome.Builder.flatpak org.gnome.Builder $BRANCH
| true
|
d3f6354707a8c04a299ca02149f10cfdeb68e115
|
Shell
|
swapperz/dot
|
/.cshrc
|
UTF-8
| 8,916
| 2.875
| 3
|
[] |
no_license
|
# .cshrc - csh resource script, read at beginning of execution by each shell
# see also csh(1), environ(7).
#
set path = (/sbin /bin /usr/sbin /usr/bin /usr/games /usr/local/sbin /usr/local/bin /usr/X11R6/bin $HOME/bin)
alias h history 1000
alias j jobs -l
alias less less -x4
alias vim vim -p
alias 7ze 7za a -mhe=on -p
alias tcpdump tcpdump -nn
alias trafshow trafshow -n
alias iftop iftop -nN
alias tshark tshark -n
if ($OSTYPE == 'FreeBSD') then
alias ls ls -A
alias la ls -a
alias lf ls -FA
alias ll ls -lA
alias grep grep --color=auto
alias iotop top -m io -o total
# if ($TERM != "su") then
# tabs -4
# endif
setenv LSCOLORS 'fxfxcxdxbxegedabagacad'
else
alias ls ls --color=auto -A
alias la ls --color=auto -a
alias lf ls --color=auto -FA
alias ll ls --color=auto -lA
alias grep grep --color=auto
# if ($?TERM) then
# tabs 4
# endif
if ($?LS_COLORS) then
setenv LS_COLORS "${LS_COLORS}:di=0;35:"
else
setenv LS_COLORS ':di=0;35:'
endif
endif
umask 22
#setenv LESS '-F -X -x4 $LESS'
setenv LANG en_US.UTF-8
setenv CLICOLOR YES
setenv EDITOR vim
#setenv PAGER more
setenv PAGER "less -x4"
setenv BLOCKSIZE K
if ($?prompt) then
set prompt = "\n%{\e[31;1m%}`whoami`%{\e[37m%}@%{\e[33m%}%m%{\e[37m%}: %{\e[36m%}%/%{\e[37m%} \n#%{\e[0m%} "
set filec
set history = 1000
set savehist = 1000
set mail = (/var/mail/$USER)
if ( $?tcsh ) then
bindkey "^W" backward-delete-word
bindkey -k up history-search-backward
bindkey -k down history-search-forward
bindkey "\e[1~" beginning-of-line # Home
bindkey "\e[7~" beginning-of-line # Home rxvt
bindkey "\e[2~" overwrite-mode # Ins
bindkey "\e[3~" delete-char # Delete
bindkey "\e[4~" end-of-line # End
bindkey "\e[8~" end-of-line # End rxvt
endif
endif
set autolist
set histdup = prev
if ($shell =~ *tcsh) then
alias reload 'source ~/.tcshrc'
else
alias reload 'source ~/.cshrc'
endif
alias vimrc 'vim ~/.vimrc'
alias zshrc 'vim ~/.zshrc'
alias cshrc 'vim ~/.cshrc'
alias tcshrc 'vim ~/.tcshrc'
alias null 'cat /dev/null'
alias apg 'apg -m24 -M NCL -a 1'
set _complete=1
set hosts = (127.0.0.1 localhost)
if ( -r ~/.ssh/known_hosts ) then
set hosts = ($hosts `cat ~/.ssh/known_hosts | cut -f 1 -d ' ' | tr ',' "\n"`)
endif
complete ssh 'p/1/$hosts/ c/-/"(l n)"/ n/-l/u/ N/-l/c/ n/-/c/ p/2/c/ p/*/f/'
complete scp "c,*:/,F:/," "c,*:,F:$HOME," 'c/*@/$hosts/:/'
complete rsync "c,*:/,F:/," "c,*:,F:$HOME," 'c/*@/$hosts/:/'
complete ping 'p/1/$hosts/'
complete traceroute 'p/1/$hosts/'
complete mtr 'p/1/$hosts/'
complete telnet 'p/1/$hosts/' "p/2/x:'<port>'/" "n/*/n/"
complete kill 'c/-/S/' 'p/1/(-)//'
complete killall 'p/1/c/'
complete pkill 'p/1/c/'
complete man 'p/1/c/'
complete cd 'C/*/d/'
complete which 'p/1/c/'
complete where 'p/1/c/'
complete unsetenv 'p/1/e/'
complete setenv 'p/1/e/'
complete env 'c/*=/f/' 'p/1/e/=/' 'p/2/c/'
complete chgrp 'p/1/g/'
complete chown 'p/1/u/'
complete gdb 'n/-d/d/ n/*/c/'
complete uncomplete 'p/*/X/'
complete find 'n/-name/f/' 'n/-newer/f/' 'n/-{,n}cpio/f/' \
'n/-exec/c/' 'n/-ok/c/' 'n/-user/u/' 'n/-group/g/' \
'n/-fstype/(nfs 4.2)/' 'n/-type/(b c d f l p s)/' \
'c/-/(name newer cpio ncpio exec ok user group fstype type atime \
ctime depth inum ls mtime nogroup nouser perm print prune \
size xdev)/' \
'p/*/d/'
complete ln 'C/?/f/' 'p/1/(-s)/' 'n/-s/x:[first arg is path to original file]/' \
'N/-s/x:[second arg is new link]/'
complete ipfw 'p/1/(flush add delete list show zero table)/' \
'n/add/(allow permit accept pass deny drop reject \
reset count skipto num divert port tee port)/'
complete wget 'c/--/(accept= append-output= background cache= \
continue convert-links cut-dirs= debug \
delete-after directory-prefix= domains= \
dont-remove-listing dot-style= exclude-directories= \
exclude-domains= execute= follow-ftp \
force-directories force-html glob= header= help \
http-passwd= http-user= ignore-length \
include-directories= input-file= level= mirror \
no-clobber no-directories no-host-directories \
no-host-lookup no-parent non-verbose \
output-document= output-file= passive-ftp \
proxy-passwd= proxy-user= proxy= quiet quota= \
recursive reject= relative retr-symlinks save-headers \
server-response span-hosts spider timeout= \
timestamping tries= user-agent= verbose version wait=)/'
if ( -r ~/.inputrc) then
setenv INPUTRC ~/.inputrc
endif
set GRC = `which grc`
if ($?TERM != "dumb" && $GRC != "") then
alias colourify "$GRC --colour auto"
alias configure 'colourify ./configure'
alias diff 'colourify diff'
alias make 'colourify make'
alias gcc 'colourify gcc'
alias g++ 'colourify g++'
alias as 'colourify as'
alias gas 'colourify gas'
alias ld 'colourify ld'
# alias netstat 'colourify netstat'
alias ping 'colourify ping'
alias traceroute 'colourify traceroute'
alias head 'colourify head'
# alias tail 'colourify tail'
alias dig 'colourify dig'
alias mount 'colourify mount'
# alias ps 'colourify ps'
alias mtr 'colourify mtr'
alias df 'colourify df'
# alias la 'colourify ls -a'
# alias lf 'colourify ls -FA'
# alias ll 'colourify ls -lA'
endif
if ( `where tmux` != "" ) then
set __tmux_cmd_names = (attach-session bind-key break-pane capture-pane clear-history \
clock-mode command-prompt confirm-before copy-mode \
delete-buffer detach-client display-message display-panes \
has-session if-shell join-pane kill-pane kill-server \
kill-session kill-window last-pane last-window link-window \
list-buffers list-clients list-commands list-keys list-panes \
list-sessions list-windows load-buffer lock-client lock-server \
lock-session move-window new-session new-window next-layout \
next-window paste-buffer pipe-pane previous-layout previous-window \
refresh-client rename-session rename-window resize-pane \
respawn-pane respawn-window rotate-window run-shell save-buffer \
select-layout select-pane select-window send-keys send-prefix \
server-info set-buffer set-environment set-option set-window-option \
show-buffer show-environment show-messages show-options \
show-window-options source-file split-window start-server \
suspend-client swap-pane swap-window switch-client unbind-key \
unlink-window)
alias __tmux_sessions 'tmux list-sessions | cut -d : -f 1'
alias __tmux_windows 'tmux list-windows | cut -d " " -f 1-2 | sed -e "s/://"'
alias __tmux_panes 'tmux list-panes | cut -d : -f 1'
alias __tmux_clients 'tmux list-clients | cut -d " " -f 1-2 | sed -e "s/://"'
# Define the completions (see the tcsh man page).
complete tmux \
'p/1/$__tmux_cmd_names/' \
'n/*-session/(-t)/' \
'n/*-window/(-t)/' \
'n/*-pane/(-t)/' \
'n/*-client/(-t)/' \
'N/*-session/`__tmux_sessions`/' \
'N/*-window/`__tmux_windows`/' \
'N/*-pane/`__tmux_panes`/' \
'N/*-client/`__tmux_clients`/' \
'n/-t/`__tmux_clients; __tmux_panes; __tmux_windows; __tmux_sessions`/'
endif
if ($OSTYPE == "linux" || $OSTYPE == "linux-gnu") then
setenv LANG en_US.UTF-8
# BACKSPACE for Linux
# stty erase '^H' >& /dev/null
# stty erase '^?' >& /dev/null
set CENTOSVER=`uname -a | grep -Eo '\.el[0-9]'`
if ($CENTOSVER == ".el7") then
alias dmesg "dmesg -T -L"
else
dmesg -V >& /dev/null
if ($? == 0) then
alias dmesg "dmesg -T"
endif
endif
alias realpath readlink -f
endif
setenv LESS_TERMCAP_mb `printf "\033[1;31m"`
setenv LESS_TERMCAP_md `printf "\033[1;31m"`
setenv LESS_TERMCAP_me `printf "\033[0m"`
setenv LESS_TERMCAP_se `printf "\033[0m"`
setenv LESS_TERMCAP_so `printf "\033[1;44;33m"`
setenv LESS_TERMCAP_ue `printf "\033[0m"`
setenv LESS_TERMCAP_us `printf "\033[1;32m"`
# Local settings
if ( -f ~/.tcshrc.local ) then
source ~/.tcshrc.local
endif
| true
|
12de30974b6999c308540a7318ca7e97fae47c80
|
Shell
|
staid03/Ubuntu_Simple_Essential_Scripts
|
/restart_server.sh
|
UTF-8
| 114
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!bash
sudo reboot
if [ $? -eq 0 ]
then
echo "Server rebooting"
else
echo "Server not rebooting - " $?
fi
| true
|
8206d497567325424ee3cb0a6c1a23bc841f46f0
|
Shell
|
Arjunkhera/Notes
|
/tools/commandline/solutions_UnixWorkbench/functionthird.sh
|
UTF-8
| 214
| 3.4375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
function nevens {
source functionsecond.sh
sum=0
for i in $@
do
answer=$(isiteven $i)
if [[ $answer -eq 1 ]]
then let sum=sum+1
fi
done
echo "$sum"
}
| true
|
33e9e6321c645dc6d2e5c679c494ca48431e86d6
|
Shell
|
colovu/docker-debian
|
/prebuilds/usr/sbin/download_pkg
|
UTF-8
| 4,449
| 4.125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Ver: 1.0 by Endial Fang (endial@126.com)
#
# shell 执行参数,分别为 -e(命令执行错误则退出脚本) -u(变量未定义则报错) -x(打印实际待执行的命令行)
set -eux
. /usr/local/scripts/liblog.sh
print_usage() {
LOG "Usage: download_pkg <COMMAND> <PACKAGE-NAME> \"<URLS>\" [OPTIONS]"
LOG ""
LOG "Download and install Third-Part packages"
LOG ""
LOG "Commands:"
LOG " download Download a package."
LOG " install Download and install a package."
LOG " unpack Download and unpack a package."
LOG ""
LOG "Options:"
LOG " -g, --checkpgp Package release bucket."
LOG " -s, --checksum SHA256 verification checksum."
LOG " -h, --help Show this help message and exit."
LOG ""
LOG "PACKAGE-NAME: Name with extern name"
LOG "URLS: String with URL list"
LOG ""
LOG "Examples:"
LOG " - Unpack package"
LOG " \$ download_pkg unpack redis-5.0.8.tar.gz \"http://download.redis.io/releases\""
LOG ""
LOG " - Verify and Install package"
LOG " \$ download_pkg install redis-5.0.8.tar.gz \"http://download.redis.io/releases\" -s 42cf86a114d2a451b898fcda96acd4d01062a7dbaaad2801d9164a36f898f596"
LOG ""
}
check_pgp() {
local name_asc=${1:?missing asc file name}
local name=${2:?missing file name}
local keys="${3:?missing key id}"
GNUPGHOME="$(mktemp -d)"
if which gpg >/dev/null 2>&1; then
for key in $keys; do
gpg --batch --keyserver ha.pool.sks-keyservers.net --recv-keys "${key}" ||
gpg --batch --keyserver pgp.mit.edu --recv-keys "${key}" ||
gpg --batch --keyserver keys.gnupg.net --recv-keys "${key}" ||
gpg --batch --keyserver keyserver.pgp.com --recv-keys "${key}";
done
gpg --batch --verify "$name_asc" "$name"
command -v gpgconf > /dev/null && gpgconf --kill all
fi
}
# 获取并解析参数
ARGS=$(getopt -o g:s:h -l "checkpgp:,checksum:,help" -n "download-pkg" -- "$@")
if [ $? -ne 0 ];
then
exit 1
fi
eval set -- "$ARGS";
while true; do
case "$1" in
-g|--checkpgp)
shift
if [ -n "$1" ]; then
PACKAGE_KEYS=$1
shift
fi
;;
-s|--checksum)
shift
if [ -n "$1" ]; then
PACKAGE_SHA256=$1
shift
fi
;;
-h|--help)
print_usage
exit 0
;;
--)
shift
break
;;
esac
done
# 检测输入的命令是否合法
case "$1" in
download|install|unpack) ;;
*)
error "Unrecognized command: $1"
print_usage
exit 1
;;
esac
# 检测输入参数是否足够,需要至少提供软件包名称 及 下载路径
if [ $# -lt 3 ]; then
print_usage
exit 1
fi
INSTALL_ROOT=/usr/local
CACHE_ROOT=/tmp
PACKAGE="$2"
PACKAGE_URLS=$3
cd $INSTALL_ROOT
LOG_I "Downloading $PACKAGE package"
for url in $PACKAGE_URLS; do
LOG_D "Try $url/$PACKAGE"
if wget -O "$CACHE_ROOT/$PACKAGE" "$url/$PACKAGE" && [ -s "$CACHE_ROOT/$PACKAGE" ]; then
if [ -n "${PACKAGE_KEYS:-}" ]; then
wget -O "$CACHE_ROOT/$PACKAGE.asc" "$url/$PACKAGE.asc" || wget -O "$CACHE_ROOT/$PACKAGE.asc" "$url/$PACKAGE.sign" || :
if [ ! -e "$CACHE_ROOT/$PACKAGE.asc" ]; then
exit 1
fi
fi
break
fi
done
if [ -n "${PACKAGE_SHA256:-}" ]; then
LOG_I "Verifying package integrity"
echo "$PACKAGE_SHA256 *$CACHE_ROOT/$PACKAGE" | sha256sum -c -
fi
if [ -e "$CACHE_ROOT/$PACKAGE.asc" ]; then
LOG_I "Verifying package with PGP"
check_pgp "$CACHE_ROOT/$PACKAGE.asc" "$CACHE_ROOT/$PACKAGE" "$PACKAGE_KEYS"
fi
# If the tarball has too many files, it can trigger a bug
# in overlayfs when using tar. Install bsdtar in the container image
# to workaround it. As the overhead is too big (~40 MB), it is not added by
# default. Source: https://github.com/coreos/bugs/issues/1095
# 安装或解压软件
case "$1" in
download)
LOG_I "Download success: $CACHE_ROOT/$PACKAGE"
;;
install)
LOG_I "Installing $PACKAGE"
cp $CACHE_ROOT/$PACKAGE /usr/local/bin/
;;
unpack)
if ! tar -taf $CACHE_ROOT/$PACKAGE >/dev/null 2>&1; then
LOG_E "Invalid or corrupt '$PACKAGE' package."
exit 1
fi
LOG_I "Unpacking $PACKAGE to $CACHE_ROOT"
cd $CACHE_ROOT
if which bsdtar >/dev/null 2>&1; then
bsdtar -xf $CACHE_ROOT/$PACKAGE
else
tar --no-same-owner -xaf $CACHE_ROOT/$PACKAGE
fi
;;
esac
| true
|
49668fef61313a77c5825d16f57dd0783ae3ee10
|
Shell
|
zsoldosp/lazylog
|
/build.sh
|
UTF-8
| 195
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
if [[ "$JAVA_HOME" -eq "" ]]; then
export JAVA_HOME="/c/Program Files/Java/jdk1.6.0_17/"
export ANT_HOME="/e/lib/apache-ant-1.8.0/"
export PATH=$PATH:$ANT_HOME/bin:$JAVA_HOME
fi
ant
| true
|
7a47e22ef5183e3617f0350d88d96c68bdc1ad79
|
Shell
|
chapayGhub/di
|
/di-pathfinder.sh
|
UTF-8
| 4,470
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/zsh -f
# Purpose: Download and install the latest version of PathFinder 7 or 8 from <https://cocoatech.com/>
#
# From: Timothy J. Luoma
# Mail: luomat at gmail dot com
# Date: 2018-08-21
NAME="$0:t:r"
INSTALL_TO='/Applications/Path Finder.app'
HOMEPAGE="https://cocoatech.com/"
DOWNLOAD_PAGE="https://get.cocoatech.com/PF8.zip"
SUMMARY="File manager for macOS."
if [ -e "$HOME/.path" ]
then
source "$HOME/.path"
else
PATH=/usr/local/scripts:/usr/local/bin:/usr/bin:/usr/sbin:/sbin:/bin
fi
function use_v7 {
URL="https://get.cocoatech.com/PF7.zip"
## I determined LATEST_VERSION & LATEST_BUILD by downloading 'https://get.cocoatech.com/PF7.zip'
# on 2018-07-17. I do not think PathFinder 7 will be updated anymore,
# so this is likely to be the last version.
LATEST_VERSION="7.6.2"
LATEST_BUILD="1729"
ASTERISK='(Note that version 8 is also available.)'
USE_VERSION='7'
}
function use_v8 {
USE_VERSION='8'
XML_FEED="http://sparkle.cocoatech.com/PF8.xml"
INFO=($(curl -sfL $XML_FEED \
| egrep '<(build|version|url)>' \
| sort \
| awk -F'>|<' '//{print $3}'))
LATEST_BUILD="$INFO[1]"
URL="$INFO[2]"
LATEST_VERSION="$INFO[3]"
}
if [[ -e "$INSTALL_TO" ]]
then
# if v7 is installed, check that. Otherwise, use v8
MAJOR_VERSION=$(defaults read "$INSTALL_TO/Contents/Info" CFBundleShortVersionString | cut -d. -f1)
if [[ "$MAJOR_VERSION" == "7" ]]
then
use_v7
else
use_v8
fi
else
if [ "$1" = "--use7" -o "$1" = "-7" ]
then
use_v7
else
use_v8
fi
fi
if [[ -e "$INSTALL_TO" ]]
then
INSTALLED_VERSION=$(defaults read "${INSTALL_TO}/Contents/Info" CFBundleShortVersionString)
INSTALLED_BUILD=$(defaults read "${INSTALL_TO}/Contents/Info" CFBundleVersion)
autoload is-at-least
is-at-least "$LATEST_VERSION" "$INSTALLED_VERSION"
VERSION_COMPARE="$?"
is-at-least "$LATEST_BUILD" "$INSTALLED_BUILD"
BUILD_COMPARE="$?"
if [ "$VERSION_COMPARE" = "0" -a "$BUILD_COMPARE" = "0" ]
then
echo "$NAME: Up-To-Date ($INSTALLED_VERSION/$INSTALLED_BUILD) $ASTERISK"
exit 0
fi
echo "$NAME: Outdated: $INSTALLED_VERSION/$INSTALLED_BUILD vs $LATEST_VERSION/$LATEST_BUILD"
FIRST_INSTALL='no'
else
FIRST_INSTALL='yes'
fi
FILENAME="$HOME/Downloads/PathFinder-${LATEST_VERSION}_${LATEST_BUILD}.zip"
if [[ "$USE_VERSION" == "8" ]]
then
if (( $+commands[lynx] ))
then
RELEASE_NOTES_URL="$XML_FEED"
( curl -sfLS "$RELEASE_NOTES_URL" \
| sed '1,/<\!\[CDATA\[/d; /\]\]>/,$d' \
| lynx -dump -nomargins -width='10000' -assume_charset=UTF-8 -pseudo_inlines -stdin \
| sed '/./,/^$/!d' \
| sed 's#^ *##g' ;
echo "\nSource: XML_FEED <$RELEASE_NOTES_URL>" ) | tee -a "$FILENAME:r.txt"
fi
fi
echo "$NAME: Downloading '$URL' to '$FILENAME':"
curl --continue-at - --fail --location --output "$FILENAME" "$URL"
EXIT="$?"
## exit 22 means 'the file was already fully downloaded'
[ "$EXIT" != "0" -a "$EXIT" != "22" ] && echo "$NAME: Download of $URL failed (EXIT = $EXIT)" && exit 0
[[ ! -e "$FILENAME" ]] && echo "$NAME: $FILENAME does not exist." && exit 0
[[ ! -s "$FILENAME" ]] && echo "$NAME: $FILENAME is zero bytes." && rm -f "$FILENAME" && exit 0
UNZIP_TO=$(mktemp -d "${TMPDIR-/tmp/}${NAME}-XXXXXXXX")
echo "$NAME: Unzipping '$FILENAME' to '$UNZIP_TO':"
ditto -xk --noqtn "$FILENAME" "$UNZIP_TO"
EXIT="$?"
if [[ "$EXIT" == "0" ]]
then
echo "$NAME: Unzip successful"
else
# failed
echo "$NAME failed (ditto -xkv '$FILENAME' '$UNZIP_TO')"
exit 1
fi
if [[ -e "$INSTALL_TO" ]]
then
pgrep -xq "$INSTALL_TO:t:r" \
&& LAUNCH='yes' \
&& osascript -e 'tell application "$INSTALL_TO:t:r" to quit'
echo "$NAME: Moving existing (old) '$INSTALL_TO' to '$HOME/.Trash/'."
local TRASH="$HOME/.Trash/$INSTALL_TO:t:r.$INSTALLED_VERSION.app"
COUNT='0'
while [ -e "$TRASH" ]
do
((COUNT++))
TRASH="$HOME/.Trash/$INSTALL_TO:t:r.$INSTALLED_VERSION.($COUNT).app"
done
mv -vf "$INSTALL_TO" "$TRASH"
EXIT="$?"
if [[ "$EXIT" != "0" ]]
then
echo "$NAME: failed to move existing $INSTALL_TO to $HOME/.Trash/"
exit 1
fi
fi
echo "$NAME: Moving new version of '$INSTALL_TO:t' (from '$UNZIP_TO') to '$INSTALL_TO'."
# Move the file out of the folder
mv -vn "$UNZIP_TO/$INSTALL_TO:t" "$INSTALL_TO"
EXIT="$?"
if [[ "$EXIT" = "0" ]]
then
echo "$NAME: Successfully installed '$UNZIP_TO/$INSTALL_TO:t' to '$INSTALL_TO'."
else
echo "$NAME: Failed to move '$UNZIP_TO/$INSTALL_TO:t' to '$INSTALL_TO'."
exit 1
fi
[[ "$LAUNCH" = "yes" ]] && open -a "$INSTALL_TO"
exit 0
#EOF
| true
|
15eec51bbfee6ecc97752834799ab9b6e5519b58
|
Shell
|
Ewan-Roberts/dot_files
|
/useful_commands.sh
|
UTF-8
| 597
| 2.890625
| 3
|
[] |
no_license
|
//day of the week
while true; do tput clear; date +%A | figlet ; sleep 9999; done
while true; do tput clear; date +"%H : %M : %S" | figlet ; sleep 1; done
// just date
while true; do tput clear; date +%F | figlet ; sleep 9999; done
// just date but like the day Wed or whatever
while true; do tput clear; date +%a%t%t%t%d-%m | figlet ; sleep 9999; done
//just time
while true; do tput clear; date +"%H : %M" | figlet ; sleep 60; done
// get weather
while true; do curl wttr.in/London; sleep 9999;done
// get detailed weather
while true; do curl wttr.in/London?format=v2; sleep 9999;done
| true
|
cd876a1a0d4ff5797f233caa39d2d6995cf6154b
|
Shell
|
YuanG1944/COMP9044-Software-Construction
|
/review/COMP9044_20T2-master/00_shrug/test/test01.sh
|
UTF-8
| 503
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/dash
#
# COMP9044 Assignment 01 - test01
#
# test add non exist file and commit
#
# Authors:
# Rui.Mu (z5144248@unsw.edu.au)
#
# Written: 09/07/2019
rm -rf .legit
rm a c d
./legit-init
echo hello > a
# cant not add commit
./legit-add a b
./legit-commit -m first
# commit ok
./legit-add a
./legit-commit -m second
echo hello c > c
echo hello d > d
./legit-add c d
./legit-commit -m third
# delte file
./legit-status
./legit-log
rm d
./legit-add c d
./legit-commit -m forth
./legit-status
./legit-log
| true
|
58655f7bddd4f7accddd81959b5cae83ea646312
|
Shell
|
orenlivne/ober
|
/primal/src/code/impute/batch/cgi/tabix-genotypes.sh
|
UTF-8
| 529
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
#-----------------------------------------------------------------
# Compress and index genotype files using tabix.
# Input arguments:
# $1: file name pattern. Format: input_dir/file.chr#chrom#.tsv
# the strong #chrom# is successively replaced by 1..22.
#-----------------------------------------------------------------
name="$1"
for chrom in `seq 1 22`; do
file=`echo $name | sed "s/#chrom#/${chrom}/g"`;
echo "Tabixing $file ..."
( bgzip -c $file > $file.gz ; tabix -s 2 -b 3 -e 4 $file.gz ) &
done
wait
| true
|
2db4ed3d1ba9eb9645994c024df3fdf7f53bd4e4
|
Shell
|
eloythub/eloyt-infra
|
/gcloud-push
|
UTF-8
| 2,600
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
echo 'this file has been deprecated'
exit 1
# Exmaple
# ./gcloud-push eloyt.com eloyt-149708 staging-v1
#
source ./secrets
function log {
STATUS=$1
MSG=$2
if [ ! -t 0 ]; then
INPUT=$(cat)
else
INPUT=""
fi
case "$STATUS" in
warning) COLOR_CODE='93'; ;;
success) COLOR_CODE='92'; ;;
error) COLOR_CODE='91'; ;;
info) COLOR_CODE='94'; ;;
*) COLOR_CODE='39'; MSG=$1 ;;
esac
RESET="\e[0m";
COLOR="\e[0;${COLOR_CODE}m";
if [[ "$OSTYPE" == "darwin"* ]]; then
RESET="\x1B[0m";
COLOR="\x1B[0;${COLOR_CODE}m";
fi
echo ${COLOR}${MSG}${INPUT}${RESET}
}
export PROJECT_DIR=$1
export GCLOUD_PROJECT_ID=$2
export DEFAULT_TAG=$3
export DOCKER_LOCAL_IMAGE="eloyt/$PROJECT_DIR"
export DOCKER_GCLOUD_IMAGE="asia.gcr.io/$GCLOUD_PROJECT_ID/$PROJECT_DIR"
if [ -e services/${PROJECT_DIR}/Dockerfile.prod ];
then
log "success" "Build Image"
cd services/$PROJECT_DIR && \
docker build . -f Dockerfile.prod -t $DOCKER_LOCAL_IMAGE && \
docker tag $DOCKER_LOCAL_IMAGE $DOCKER_GCLOUD_IMAGE:$DEFAULT_TAG && \
gcloud docker -- push $DOCKER_GCLOUD_IMAGE:$DEFAULT_TAG && \
cd - 1> /dev/null 2> /dev/null
fi
# Update the Replication controller
if [ -f kube/${PROJECT_DIR}/replication-controller.yaml ];
then
log "success" "Kubernetes Update Replication Controller"
envsubst < kube/${PROJECT_DIR}/replication-controller.yaml | kubectl apply -f -
fi
# Update the Persistance Volume
if [ -f kube/${PROJECT_DIR}/persistent-volume.yaml ];
then
log "success" "Kubernetes Update Persistance Volume"
envsubst < kube/${PROJECT_DIR}/persistent-volume.yaml | kubectl apply -f -
fi
# Update the Persistance Volume Claim
if [ -f kube/${PROJECT_DIR}/persistent-volume-claim.yaml ];
then
log "success" "Kubernetes Update Persistance Volume Claim"
envsubst < kube/${PROJECT_DIR}/persistent-volume-claim.yaml | kubectl apply -f -
fi
# Update the Service
if [ -r kube/${PROJECT_DIR}/service.yaml ];
then
log "success" "Kubernetes Update Service"
envsubst < kube/${PROJECT_DIR}/service.yaml | kubectl apply -f -
fi
# Update the Service public
if [ -r kube/${PROJECT_DIR}/service-public.yaml ];
then
log "success" "Kubernetes Update Public Service"
envsubst < kube/${PROJECT_DIR}/service-public.yaml | kubectl apply -f -
fi
# Update the Endpoint
if [ -r kube/${PROJECT_DIR}/endpoint.yaml ];
then
log "success" "Kubernetes Update Endpoint"
envsubst < kube/${PROJECT_DIR}/endpoint.yaml | kubectl apply -f -
fi
| true
|
acf3f963917214590b3373c1bdffd698d8df9ac1
|
Shell
|
lmotta/lapig_msc_giovana
|
/script/MDE/mapbiomas/create_images_agr.sh
|
UTF-8
| 3,381
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# ***************************************************************************
# Name : Create DEM images with agriculture class
# Description : Create DEM images with agriculture class from MapBiomas.
#
# Arguments:
# $1: Directory with shapefiles
# $2: ALOS image
# $3: NASA image
# $4: MAPBIOMAS image
#
# Dependeces:
# - xy-res.py
#
# Use Python3
#
# Usage:
# Copy this script and xy-res.py to Work's diretory
#
# ***************************************************************************
# begin : 2020-08-04 (yyyy-mm-dd)
# copyright : (C) 2020 by Luiz Motta
# email : motta dot luiz at gmail.com
# ***************************************************************************
#
# Revisions
#
# 2020-08-28:
# - Add xy-res.py script
# 2020-08-30
# - Update Mapbiomas Collection 5(class)
#
# ***************************************************************************
#
# Example:
# sh ./creates_images_agr.sh DIR_shapefiles ALOS_image NASA_image MAPBIOMAS_image
#
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#
#
## Functions
msg_error(){
local name_script=$(basename $0)
echo "Usage: $name_script <DIR shapefiles> <ALOS image> <NASA image> <MAPBIOMAS image>" >&2
echo ""
echo "Example: creates_images_agr.sh shapefiles.lst"
exit 1
}
#
create_image(){
local shp=$1
# Clip images
# Alos
gdalwarp -q -overwrite -tr $xy_res -tap -cutline $shp_dir"/"$shp".shp" -crop_to_cutline -dstnodata -32768 $alos_img alos_clip.tif
# Nasa
gdalwarp -q -overwrite -tr $xy_res -tap -cutline $shp_dir"/"$shp".shp" -crop_to_cutline -dstnodata -32768 $nasa_img nasa_clip.tif
# Mapbiomas
gdalwarp -q -overwrite -tr $xy_res -tap -cutline $shp_dir"/"$shp".shp" -crop_to_cutline -dstnodata 0 $mapbiomas_img mapbiomas_clip.tif
# Agriculture images, Nodata = -32767 (-32768 + 1)
exp_calc="where(logical_or(logical_or(B==20,B==36),logical_or(B==39,B==41)), A, -32767)"
gdal_calc.py --quiet -A alos_clip.tif -B mapbiomas_clip.tif --outfile alos_agr.tif --calc="$exp_calc"
gdal_calc.py --quiet -A nasa_clip.tif -B mapbiomas_clip.tif --outfile nasa_agr.tif --calc="$exp_calc"
# Stack images
gdal_merge.py -q -separate -a_nodata -32768 -o $shp"_alos_nasa.tif" alos_clip.tif nasa_clip.tif
gdal_merge.py -q -separate -a_nodata -32768 -o $shp"_alos_nasa_agr.tif" alos_agr.tif nasa_agr.tif
# clean
rm alos_*.tif nasa_*.tif mapbiomas_clip.tif > /dev/null
}
#
totalargs=4
#
if [ $# -ne $totalargs ] ; then
msg_error
exit 1
fi
##
shp_dir=$1
alos_img=$2
nasa_img=$3
mapbiomas_img=$4
#
cd $shp_dir
files_lst=$(ls -1 *.shp | sed -e 's/\.shp$//')
cd -
#
xy_res=$(python3 ./xy-res.py $alos_img)
#
for item in $files_lst
do
echo "Processing "$item"..."
#
create_image $item
done
| true
|
4f47f84c2f2fbd76b919da3dbc4f541647b6cdf6
|
Shell
|
opdavies/oliverdavies.uk
|
/tools/scripts/create-daily.sh
|
UTF-8
| 988
| 4.03125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
if [ "${1}" == "next" ]; then
next_date=$(ls -1 src/content/daily-email | tail -n 1 | tr -d '.md' | xargs -I {} date +%Y-%m-%d -d '{} +1 day')
else
next_date="${1}"
fi
filepath="src/content/daily-email/${next_date}.md"
shift 1
# Generate the title and slug.
title="${*}"
slug=$(echo "${title}" | awk '{print tolower($0)}' | tr ' ' '-' | awk '{ gsub("?", ""); print }')
# Create the file.
cp -f --no-clobber stub.md "${filepath}"
date=$(date -d "${next_date}" +%Y-%m-%d)
day=$(date -d "${next_date}" +%d)
month=$(date -d "${next_date}" +%m)
year=$(date -d "${next_date}" +%Y)
# Replace the placeholders.
sed -i "s/{{ date }}/${date}/" "${filepath}"
sed -i "s/{{ title }}/${title}/" "${filepath}"
sed -i "s#{{ permalink }}#archive/${year}/${month}/${day}/${slug}#" "${filepath}"
# Create a commit with the appropriate date in the message
git add "${filepath}"
git commit --quiet -m "daily-email: add ${date}
${title}"
echo "${filepath}"
| true
|
cf8bb7f593f36caea292f40b0c030d7f396278c0
|
Shell
|
FlyToTomorrow/SettingFiles
|
/Linux/.bashrc
|
UTF-8
| 540
| 3.03125
| 3
|
[] |
no_license
|
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# User specific aliases and functions
#source ~/.bash_profile
# export LD_LIBRARY_PATH=/home/zhifei/App/lib:$LD_LIBRARY_PATH
preferred_shell=
if [ -x /home/zhifei/App/zsh/bin/zsh ]; then
preferred_shell=/home/zhifei/App/zsh/bin/zsh
fi
if [ -n "$preferred_shell" ]; then
case $- in
*i*) SHELL=$preferred_shell; export SHELL; exec "$preferred_shell";;
esac
fi
# added by Anaconda3 installer
export PATH="/home/zhifei/App/anaconda3/bin:$PATH"
| true
|
ab77618eaaddeee822eba164f5e62755ad04f881
|
Shell
|
joshskidmore/netstatusd
|
/attempt-successful.d/write-latency-file.sh
|
UTF-8
| 355
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
SOURCE=$1
LATENCY=$2
[[ -f $XDG_CONFIG_HOME/netstatusd/config ]] && \
source $XDG_CONFIG_HOME/netstatusd/config
FORMATTED=""
case "$SOURCE" in
"secondary")
FORMATTED="s:$LATENCY"
;;
"walled_garden")
FORMATTED="w:$LATENCY"
;;
*)
FORMATTED="$LATENCY"
;;
esac
echo "$FORMATTED" > $DATA_DIR/latency
| true
|
e1e6ba9ad51e2018edd78093f6696a46779685ba
|
Shell
|
fransixles/admin-scripts
|
/image-cdrom.sh
|
UTF-8
| 1,851
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# creates an image of a CD/DVD disk
# rwb[at]0x19e.net
# specify the CD/DVD drive
DRIVE=/dev/cdrom
OUTPUT=""
if [ -n "${1}" ]; then
if [ "${1}" == "-h" ] || [ "${1}" == "--help" ]; then
echo "Usage: $0 [output] [drive]"
exit 1
fi
OUTPUT="${1}"
fi
if [ -n "${2}" ]; then
DRIVE="${2}"
fi
if [ ! -b "${DRIVE}" ]; then
echo >&2 "ERROR: Optical drive '${DRIVE}' does not exist. Check hardware and drivers for errors."
exit 1
fi
# check if superuser
if [ $EUID -eq 0 ]; then
echo >&2 "ERROR: This script should not be run as root."
exit 1
fi
# check if a disk is inserted
if ! blkid "$DRIVE" > /dev/null 2>&1; then
echo >&2 "ERROR: No disk found in $DRIVE"
exit 1
fi
# get some information about the inserted disk
if ! LABEL=$(blkid "$DRIVE" | sed -n 's/.*LABEL=\"\([^\"]*\)\".*/\1/p' | sed -e 's/ /_/g'); then
echo >&2 "ERROR: Failed to determine label for media in ${DRIVE}"
exit 1
fi
if ! SIZE=$(blockdev --getsize64 "$DRIVE"); then
echo >&2 "ERROR: Failed to determine block size of media in ${DRIVE}"
exit 1
fi
if [ -z "${OUTPUT}" ]; then
if ! OUTPUT=$(readlink -f "$LABEL".iso); then
echo >&2 "ERROR: Failed to generate output file name."
exit 1
fi
fi
if [ -e "${OUTPUT}" ]; then
echo >&2 "ERROR: File '${OUTPUT}' already exists (will not overwrite)."
exit 1
fi
# get the size in megabytes
SIZE_IN_MB=$((SIZE/1024/1024))
echo "Ripping $LABEL ($SIZE_IN_MB MB) from drive ${DRIVE}"
echo "Writing image to $OUTPUT ..."
# create an image
if ! dd if="$DRIVE" | pv -brtep -s "$SIZE" | dd of="$OUTPUT"; then
echo >&2 "ERROR: Failed to create image."
if [ -e "${OUTPUT}" ]; then
rm -v "${OUTPUT}"
fi
exit 1
fi
# eject the disk
echo "Ejecting ${DRIVE} ..."
if ! eject "$DRIVE"; then
echo >&2 "WARNING: Failed to eject ${DRIVE}"
fi
echo "Image saved to $OUTPUT"
exit 0
| true
|
bde4b5444b61437dbfdbeffb7dbedfbd992cb748
|
Shell
|
rtgibbons/dotfiles_2019
|
/conf/zshrc
|
UTF-8
| 1,883
| 2.578125
| 3
|
[] |
no_license
|
#
# User configuration sourced by interactive shells
#
if type brew &>/dev/null; then
FPATH=$(brew --prefix)/share/zsh/site-functions:$FPATH
fi
# Define zim location
export ZIM_HOME=${ZDOTDIR:-${HOME}}/.zim
# Start zim
[[ -s ${ZIM_HOME}/init.zsh ]] && source ${ZIM_HOME}/init.zsh
# Customize to your needs...
PATH="/Users/rtgibbons/perl5/bin${PATH:+:${PATH}}"; export PATH;
PERL5LIB="/Users/rtgibbons/perl5/lib/perl5${PERL5LIB:+:${PERL5LIB}}"; export PERL5LIB;
PERL_LOCAL_LIB_ROOT="/Users/rtgibbons/perl5${PERL_LOCAL_LIB_ROOT:+:${PERL_LOCAL_LIB_ROOT}}"; export PERL_LOCAL_LIB_ROOT;
PERL_MB_OPT="--install_base \"/Users/rtgibbons/perl5\""; export PERL_MB_OPT;
PERL_MM_OPT="INSTALL_BASE=/Users/rtgibbons/perl5"; export PERL_MM_OPT;
# tabtab source for serverless package
# uninstall by removing these lines or running `tabtab uninstall serverless`
[[ -f /Users/rtgibbons/Dropbox/Development/hirevpro/hirev-api/node_modules/tabtab/.completions/serverless.zsh ]] && . /Users/rtgibbons/Dropbox/Development/hirevpro/hirev-api/node_modules/tabtab/.completions/serverless.zsh
# tabtab source for sls package
# uninstall by removing these lines or running `tabtab uninstall sls`
[[ -f /Users/rtgibbons/Dropbox/Development/hirevpro/hirev-api/node_modules/tabtab/.completions/sls.zsh ]] && . /Users/rtgibbons/Dropbox/Development/hirevpro/hirev-api/node_modules/tabtab/.completions/sls.zsh
# tabtab source for slss package
# uninstall by removing these lines or running `tabtab uninstall slss`
[[ -f /Users/rtgibbons/Development/hirevpro/hirev-api/node_modules/tabtab/.completions/slss.zsh ]] && . /Users/rtgibbons/Development/hirevpro/hirev-api/node_modules/tabtab/.completions/slss.zsh
if (( ${+commands[kitty]} )); then
kitty + complete setup zsh | source /dev/stdin
fi
export PATH=$PATH:/Users/rtgibbons/bin/slack-theme
export SLACK_THEME_SHELL_PROFILE="/Users/rtgibbons/.zshrc"
| true
|
dcdd14d0a0619c1d8906ad1d0140906b94adeeab
|
Shell
|
puckv/dotfiles
|
/.bash_profile
|
UTF-8
| 803
| 2.640625
| 3
|
[] |
no_license
|
if [[ -f "$HOME/.bashrc" ]]; then
source "$HOME/.bashrc"
fi
if [[ -f "$HOME/.bash_prompt" ]]; then
source "$HOME/.bash_prompt"
fi
alias ls="command ls -laGF"
alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1]);"'
alias df="df -H"
alias du="du -h"
function assume-role() { eval $( $(which assume-role) $@); }
export EDITOR='vi';
export GREP_OPTIONS='--color=auto';
export LANG=en
export LC_CTYPE=en_US.UTF-8
export LC_ALL=en_US.UTF-8
export HOMEBREW_NO_ANALYTICS=1
export HOMEBREW_NO_AUTO_UPDATE=1
export HOMEBREW_NO_GITHUB_API=1
export HOMEBREW_NO_INSECURE_REDIRECT=1
export HOMEBREW_CASK_OPTS=--require-sha
if [[ -f "$HOME/.bash_profile_private" ]]; then
source "$HOME/.bash_profile_private"
fi
gpgconf --reload gpg-agent
gpgconf --reload scdaemon
| true
|
cdbdc20fc2a45e4fd7e44f3cfaf61a78fd929490
|
Shell
|
huqiucheng5/Android4.0.3
|
/Android4.0/hardware/ril/ril-usiuna/dhcpcd-start
|
UTF-8
| 616
| 3.0625
| 3
|
[] |
no_license
|
#!/system/bin/sh
DHCPCD_PID=""
DHCPCD_EXIT=""
/system/bin/log -t dhcpcd "Starting dhcpcd"
ECM_INTERFACE=`/system/bin/getprop "ril.ecm.interface"`
/system/bin/log -t dhcpcd "ECM Network Interface is: $ECM_INTERFACE"
/system/bin/netcfg $ECM_INTERFACE up
/system/bin/dhcpcd $ECM_INTERFACE
DHCPCD_EXIT=$?
DHCPCD_PID=`/system/bin/cat "/data/misc/dhcp/dhcpcd-$ECM_INTERFACE.pid"`
/system/bin/log -t dhcpcd "dhcpcd forked pid is $DHCPCD_PID"
/system/bin/setprop "net.gprs.dhcp-pid" "$DHCPCD_PID"
/system/bin/log -t dhcpcd "dhcpcd exited with $DHCPCD_EXIT"
/system/bin/setprop "net.gprs.dhcp-exit" "$DHCPCD_EXIT"
| true
|
6da2517c03bd2f3b0c7884c685d4ae0d81c01fc3
|
Shell
|
mrtnmch/memory-analyzer
|
/test/test.sh
|
UTF-8
| 306
| 2.65625
| 3
|
[] |
no_license
|
#! /bin/bash
declare -a sizes=(10 20 30 40 50 60 70 80 90 100 150 200)
for i in ${sizes[@]}
do
for run in {1..3}
do
echo Running test set $i, \#$run
java -jar ../app/target/app-1.0-SNAPSHOT.jar -p ../sandbox/data/test-heapdump-$i.hprof -n cz.mxmx.memoryanalyzer.example -f > $i-$run.txt
done
done
| true
|
13e5231627a0109f196ae816c5f45efd0cd45905
|
Shell
|
IBBD/docker-images
|
/php-production/ibbd-docker
|
UTF-8
| 4,632
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# IBBD Docker专属命令,主要是一些日常经常使用的命令组合
# 使用在服务器端
#
# 第一次执行该文件的时候,会将该文件复制到/usr/local/bin/目录,并设置相应的目录
#
# @author alex
# @date 2015-10-13
#
##### 设置php docker-compose 配置文件目录
# 第一次执行复制到/usr/local/bin之后,会修改该变量
docker_compose_php_path=
# 帮助
# @param int 退出代码,默认为0(成功),非0为失败
usage() {
echo "$0 是一个IBBD Docker相关的服务器端指令集合, 在线上使用"
echo "Usage: "
echo " $0 php [start|stop|restart] : phpfpm服务器启动关闭,默认为启动. 注意:这里nginx也会重启(这个的命令只能使用root账号才能执行, 请联系管理员)"
echo " $0 php exec {command} : 使用php来执行某个命令,例如用在crontab"
echo " $0 php bash : 进入php的命令行环境,可以执行composer等相关"
echo " $0 php check : 检查php环境服务是否正常运行"
echo " $0 php monitor : 检查php环境服务是否正常运行, 如果不正常则重新启动, 留给监控程序调用"
echo " $0 nginx reload : nginx配置文件的重新加载"
echo " $0 nginx bash : 进入nginx的命令环境"
echo " $0 "
if [ $# -lt 1 ]; then
code=0
else
code=$1
fi
exit $code
}
error() {
echo "ERROR: $*"
echo
usage 1
}
if [ $# -lt 1 -o "$1" = help -o "$1" = h ]; then
usage
fi
# 有些平台没有sudo命令,例如windows下使用mingw的环境
sudo=
if which sudo; then
sudo=sudo
fi
# 判断容器是否存在
# @param string 容器名
check_container_exists() {
if $sudo docker ps -a | grep -E '\s'"$1"'$' -q
then
return 0
fi
return 1
}
# 判断容器是否正在运行
# @param string 容器名
check_container_running() {
if check_container_exists "$1"; then
status=$($sudo docker inspect -f {{.State.Runnint}} "$1")
if [ "$status" != 'true' ]; then
return 0
fi
fi
return 1
}
# 判断php的执行环境是否正常
# 主要检查phpfpm和nginx容器是否正在运行
check_php_env_running() {
if check_container_running 'ibbd-nginx'; then
if check_container_running 'ibbd-php-fpm'; then
return 0
else
echo 'ERROR: ibbd-php-fpm is not running!'
fi
else
echo 'ERROR: ibbd-nginx is not running!'
fi
return 1
}
# 判断是否是root用户
check_root() {
if [ "$HOME" = '/root' ]; then
return 0
fi
return 1
}
# 命令必须由root用户执行
# @param string 命令
command_need_root() {
if ! check_root; then
echo "ERROR: $1 命令必须是root用户才能执行,请联系管理员"
exit 1
fi
return 0
}
# 处理命令
case "$1" in
nginx)
if [ $# -ne 2 ]; then
error '参数异常!'
fi
if [ "$2" = reload ]; then
$sudo docker exec -ti ibbd-nginx nginx -s reload
elif [ "$2" = bash ]; then
$suo docker exec -ti ibbd-nginx /bin/bash
else
error "参数异常"
fi
;;
php)
if [ $# -eq 1 ]; then
action=start
else
action=$2
fi
# 处理不同的action
container=ibbd-php-fpm
cd $docker_compose_php_path
case "$action" in
start)
command_need_root "$action"
bash start.sh
;;
stop)
command_need_root "$action"
bash stop.sh
;;
restart)
command_need_root "$action"
bash restart.sh
;;
bash)
$sudo docker exec -ti $container /bin/bash
;;
exec)
$sudo docker exec -ti $container $3
;;
check)
if check_php_env_running; then
echo "$container is running."
else
echo "$container is down!"
fi
;;
monitor)
if ! check_php_env_running; then
bash start.sh
fi
;;
*)
error "$action 该命令不存在"
;;
esac
;;
*)
error '参数异常!'
;;
esac
| true
|
d992e21c5a2e73b8d4a56ced2a35464e40681a17
|
Shell
|
drewejohnson/qop
|
/qop.sh
|
UTF-8
| 1,352
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
# Some bash functions for working with PBS jobs
#
# Copyright (c) 2018 Andrew Johnson, GTRC
# Goverened by MIT License
# More info at https://github.com/drewejohnson/qop
#
# $ qop-release [N]
# Release N jobs from a UserHold. If not given, release all
# $ qop-hold [N]
# Apply a UserHold to N jobs. If not given, apply the hold to all
#
#TODO Option to reverse order of jobs released/held
_job_iterator() {
# Arg1 - number of jobs to act upon
# Arg2 - Command to call with the job id as the only argument
# Arg3 - Verbosity flag = 1 means echo jobids
# Arg4+ - jobs overwhich to iterate
local c=1
for j in ${@:4}; do
if (( $1 > 0 )) && (( $c > $1 )); then
break
else
c=$(( c + 1 ))
fi
if [ $3 -eq 1 ]; then echo $j; fi
$2 $j
done
}
# function to release some number of jobs
qop-release () {
if [ -z $1 ]; then
echo N not given. Releasing all jobs
N=-1
else
N=$1
fi
jobs=$( showq -u $( whoami ) | awk '{if($3 == "UserHold") print $1;}' )
_job_iterator $N "qrls" 1 $jobs
}
# function to apply a hold on some number of jobs in the queue
qop-hold() {
if [ -z $1 ]; then
echo N not given. Apply hold to all jobs
N=-1
else
N=$1
fi
jobs=$(showq -u $( whoami ) | awk '{if($3 == "Idle") print $1;}')
_job_iterator $N "qhold -h u" 1 $jobs
}
| true
|
577087ef9b4d1c279e35b49d10561ea497dd6854
|
Shell
|
metala/lego-dns-digitalocean
|
/common.sh
|
UTF-8
| 842
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
[ -n "$_ALLOW_INCLUDES" ] || exit 1
ensure_var() {
if [ -z "$2" ]; then
echo "> ERR: Missing $1 variable." >&2
exit 2
fi
}
ensure_env_var() {
VAL=$(eval echo \"'$'$1\")
ensure_var $1 $VAL
}
get_run_cmd() {
if [ -f "$CERT_PATH_PREFIX.json" ]; then
echo renew
else
echo setup
fi
}
next_date_timestamp() {
NUM=$(echo "$1" | grep -E '^[0-9]+' -o)
SUFFIX=$(echo "$1" | grep -E '[smhd]$' -o)
NOW=$(date +%s)
case "$SUFFIX" in
s) expr "$NOW" + "$NUM" ;;
m) expr "$NOW" + '(' "$NUM" '*' 60 ')' ;;
h) expr "$NOW" + '(' "$NUM" '*' 3600 ')' ;;
d) expr "$NOW" + '(' "$NUM" '*' 86400 ')' ;;
esac
}
cert_file_sha1_fingerprint() {
FILENAME="$1"
openssl x509 -in "$FILENAME" -sha1 -noout -fingerprint \
| grep -E '[0-9A-F]{2}(:[0-9A-F]{2}){19}' -o \
| tr -d ':' \
| tr '[:upper:]' '[:lower:]'
}
| true
|
937beea528039494a90f2a8e28f506a3cb75b9f2
|
Shell
|
mmclsntr/serverless-multi-region-api-sample
|
/tools/get_api_non_target_env.sh
|
UTF-8
| 1,020
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash -xe
CMDNAME=`basename $0`
WORKDIR=`cd $(dirname $0); pwd`
ROOTDIR=$WORKDIR/..
SETTING_FILE=$ROOTDIR/settings.conf
GET_API_ID_SCRIPT=$WORKDIR/get_api_id.sh
GET_PARAM_SCRIPT=$WORKDIR/get_parameter.sh
if [ $# -ne 2 ]; then
echo "Usage: $CMDNAME <stage> <profile>" 1>&2
exit 1
fi
export STAGE=$1
export AWS_PROFILE=$2
# Load config
source $SETTING_FILE
PARAM_PATH=/${APP_NAME}/${STAGE}/
DOMAIN_NAME_PARAM="${PARAM_PATH}domain_name"
DOMAIN_NAME=`sh $GET_PARAM_SCRIPT $DOMAIN_NAME_PARAM $STAGE $AWS_PROFILE`
REST_API_ID=`sh $GET_API_ID_SCRIPT $STAGE $AWS_PROFILE $PRIMARY_REGION`
TARGET_ENV=`aws apigatewayv2 get-api-mappings --domain-name $DOMAIN_NAME \
--query "Items[?ApiId==\\\`$REST_API_ID\\\`]" \
--region $PRIMARY_REGION \
2>/dev/null \
| jq -r .[0].Stage`
i=0
for stage in ${API_STAGES[@]}
do
if [ "${API_STAGES[i]}" == "$TARGET_ENV" ]; then
unset API_STAGES[i]
API_STAGES=(${API_STAGES[@]})
break
fi
let i++
done
echo ${API_STAGES[0]}
| true
|
970288a949ce909e7c11c2e81fa4b13aebba1411
|
Shell
|
jpwhitemn/edgex-taf
|
/TAF/utils/scripts/docker/api-gateway-token.sh
|
UTF-8
| 1,380
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
option=${1}
PROXY_IMAGE=$(docker inspect --format='{{.Config.Image}}' edgex-proxy-setup)
PROXY_NETWORK_ID=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.NetworkID}}{{end}}' edgex-proxy-setup)
if [ ! -f "${WORK_DIR}/ec256.pub" ];
then
openssl ecparam -name prime256v1 -genkey -noout -out ${WORK_DIR}/ec256.key 2> /dev/null
cat ${WORK_DIR}/ec256.key | openssl ec -out ${WORK_DIR}/ec256.pub 2> /dev/null
fi
case ${option} in
-useradd)
#create a user
ID=`cat /proc/sys/kernel/random/uuid 2> /dev/null` || ID=`uuidgen`
docker run --rm -e KONGURL_SERVER=kong --network=${PROXY_NETWORK_ID} --entrypoint "" \
-v ${WORK_DIR}:/keys ${PROXY_IMAGE} /edgex/secrets-config proxy \
adduser --token-type jwt --id ${ID} --algorithm ES256 --public_key /keys/ec256.pub \
--user testinguser > /dev/null
#create a JWT
docker run --rm -e KONGURL_SERVER=kong --network=${PROXY_NETWORK_ID} --entrypoint "" \
-v ${WORK_DIR}:/keys ${PROXY_IMAGE} /edgex/secrets-config proxy \
jwt --algorithm ES256 --id ${ID} --private_key /keys/ec256.key
;;
-userdel)
docker run --rm -e KONGURL_SERVER=kong --network=${PROXY_NETWORK_ID} --entrypoint "" \
-v ${WORK_DIR}:/keys ${PROXY_IMAGE} /edgex/secrets-config proxy \
deluser --user testinguser
;;
*)
exit 0
;;
esac
| true
|
5a255aed0f14c0e284cbbf937b55016db281ac05
|
Shell
|
nasoym/aws_docker_setups
|
/bin/pushgateway_delete
|
UTF-8
| 166
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eufo pipefail
: ${job_name:="${1:-"metric_name"}"}
curl \
-i \
-X DELETE \
"http://localhost:9091/metrics/job/${job_name}"
| true
|
7f9ad03919992e247527e436047a0adbc1bf4068
|
Shell
|
Raf-Batista/env_setup
|
/scripts/tools/gimp.sh
|
UTF-8
| 374
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
FILE=$DIR/installed.txt
if [[ -f "$FILE" ]]; then
while read line; do
if [[ $line == "gimp" ]]; then
gimp=true
fi
done < $FILE
fi
if [[ -z ${gimp+x} ]]; then
echo "You do not have gimp installed"
sudo snap install gimp
printf "gimp\n" >> $DIR/installed.txt
else
echo "You have gimp installed"
fi
| true
|
11bdec46325916efd3f7775596eb7676f44f8768
|
Shell
|
Bloodawn/lge-kernel-sniper
|
/tools/make_bootimg_gzip
|
UTF-8
| 270
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
if test -e tools/zImage
then
echo "CLEANING OLD BOOTIMG..."
rm done/boot.img
echo "PACKING BOOTIMG..."
./tools/mkbootimg --kernel tools/zImage --ramdisk tools/ramdisk.gz -o done/boot.img --base 0x80000000
echo "DONE"
else
echo "No zImage found"
fi
| true
|
c9df129b63adeb87fe55ba372d3be24593dd8705
|
Shell
|
imma/usr
|
/exec/usr-user
|
UTF-8
| 562
| 3.234375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
function main {
local shome="$(cd -P -- "${BASH_SOURCE%/*}/.." && pwd -P)"
source "$shome/script/profile"
if [[ -z "${1:-}" ]]; then
aws cognito-idp list-users --user-pool-id "${USR_POOL_ID}" | jq 'reduce .Users[] as $ele ({}; .[$ele.Username] = $ele)'
return $?
fi
local nm_user="${1}"; shift
local json_get_user="$(jq -n --arg username "${nm_user}" '{ UserPoolId: env.USR_POOL_ID, Username: $username }')"
exec aws cognito-idp admin-get-user --cli-input-json "${json_get_user}"
}
source sub "$BASH_SOURCE" "$@"
| true
|
053bcc8432a750556c93a5d73d91ab5521bc902b
|
Shell
|
janssenda/thenewcarag
|
/testModeUnix.sh
|
UTF-8
| 1,165
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
while [[ $# -gt 0 ]]
do
key=$1
case $key in
--install-python)
installPython=true
installParams+=("--install-python")
shift ;;
--offline)
offlineMode=true
installParams+=("--offline")
shift ;;
--ust-version)
if [[ $2 == "2.2.2" || $2 == "2.3" ]]; then
ustVer=$2
installParams+=("--ust-version $2")
else
echo "Version '$2' - Invalid version (2.2.2 or 2.3 only)"
exit
fi
shift # past argument
shift # past value
;;
*)
echo "Parameter '$1' not recognized"
exit
shift # past argument
shift # past value
esac
done
wget -O ins.sh https://goo.gl/5LRahv; chmod 777 ins.sh;
params=$(echo "${installParams[@]}")
source ins.sh $params
####################################
# REMOVE FROM PROD VERSION
####################################
TestArch=$(download https://gitlab.com/adobe-ust-resources/install-scripts/raw/master/Util/utilities.tar.gz)
validateDownload $TestArch
extractArchive $TestArch "$USTFolder"
rm $TestArch
#####################################
| true
|
ca5ec5e38d23e250233910881f16d1ece3721f03
|
Shell
|
chubukey/3D_Crystal_Plasticity_Model
|
/Yield_Surface_For_Slip_Planes/cluster/Launch_Cluster_Sec_Star.sh
|
UTF-8
| 1,749
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
hkl=(0 1 0)
uvw=(1 0 0)
suffix=${hkl[0]}${hkl[1]}${hkl[2]}_${uvw[0]}${uvw[1]}${uvw[2]}
#slip_systems_list=("b4" "b2" "b5" "d4" "d1" "d6" "a2" "a6" "a3" "c5" "c3" "c1")
slip_systems_list=("b5")
modes_plane_list=("I_II")
PBS_file=Yield_Surface_Sec_Star.sh
for slip_suffix in ${slip_systems_list[@]}
do
for modes_plane in ${modes_plane_list[@]}
do
inp_folder=inp_files_${suffix}
in_folder=/u/tezeghdanti/3D_Model_Crystal_Plasticity/Yield_Surface_For_Slip_Planes/${inp_folder}/${slip_suffix}
job_file_name=${in_folder}/Star_job_details_${modes_plane}.txt
# Read job details file
nbr_lines=$(wc -l < ${job_file_name})
InitJobName=$(sed -n 1p ${job_file_name})
echo ${InitJobName}
for ((j=2;j<=${nbr_lines};j++))
do
JobName=$(sed -n ${j}p ${job_file_name})
SecJobName="${JobName}_${slip_suffix}.inp"
echo ${SecJobName}
PBS_file_New=Yield_Surface_Sec_Star_${slip_suffix}_${modes_plane}_${j}
cp ${PBS_file} ${PBS_file_New}
### set suffix
to_find=#suffix_value#
replace_by=${suffix}
sed -i -e "s/${to_find}/${replace_by}/g" ${PBS_file_New}
### set slip system
to_find=#slip_suffix_value#
replace_by=${slip_suffix}
sed -i -e "s/${to_find}/${replace_by}/g" ${PBS_file_New}
### set modes plane
to_find=#modes_plane_value#
replace_by=${modes_plane}
sed -i -e "s/${to_find}/${replace_by}/g" ${PBS_file_New}
### set Initial Job Name
to_find=#InitJobName_value#
replace_by=${InitJobName}
sed -i -e "s/${to_find}/${replace_by}/g" ${PBS_file_New}
### set Initial Job Name
to_find=#SecJobName_value#
replace_by=${SecJobName}
sed -i -e "s/${to_find}/${replace_by}/g" ${PBS_file_New}
### launch computation
qsub ${PBS_file_New}
done
done
done
| true
|
7dbf39a9d43896bb264b90d7384fa67f8d325ca5
|
Shell
|
takashiki/tool-scripts
|
/wsl_host.sh
|
UTF-8
| 296
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Funcion: automatic set windows ip to wsl2 hosts file
# Author: takashiki
# Website: https://qxzzf.com
hostip=$(cat /etc/resolv.conf | grep nameserver | awk '{ print $2 }')
sed -i -r "/^ *[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ +windows/d" /etc/hosts
echo "${hostip} windows" >> /etc/hosts
| true
|
7c2b22138c08a6f5711707c18c239fa382fb47f6
|
Shell
|
bsc-dom/dataclay-packaging
|
/hpc/mn/deploy.sh
|
UTF-8
| 7,106
| 3.578125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#===============================================================================
grn=$'\e[1;32m'
blu=$'\e[1;34m'
red=$'\e[1;91m'
yellow=$'\e[1;33m'
end=$'\e[0m'
function printMsg() { echo "${blu}$1${end}"; }
function printInfo() { echo "${yellow}$1${end}"; }
function printWarn() { echo "${yellow}WARNING: $1${end}"; }
function printError() { echo "${red}======== $1 ========${end}"; }
#=== FUNCTION ================================================================
# NAME: get_container_version
# DESCRIPTION: Get container version
# PARAMETER 1: Execution environment version i.e. can be python py3.6 or jdk8
#===============================================================================
function get_container_version() {
if [ $# -gt 0 ]; then
EE_VERSION=$1
DATACLAY_EE_VERSION="${EE_VERSION//./}"
if [ "$DEV" = true ]; then
DATACLAY_CONTAINER_VERSION="${DATACLAY_VERSION}.${DATACLAY_EE_VERSION}.dev"
else
DATACLAY_CONTAINER_VERSION="$DATACLAY_VERSION.${DATACLAY_EE_VERSION}"
fi
else
if [ "$DEV" = true ]; then
DATACLAY_CONTAINER_VERSION="${DATACLAY_VERSION}.dev"
else
DATACLAY_CONTAINER_VERSION="$DATACLAY_VERSION"
fi
fi
echo ${DATACLAY_CONTAINER_VERSION}
}
#=== FUNCTION ================================================================
# NAME: deploy_logicmodule
# DESCRIPTION: Deploy logicmodule image
#=============================================================================
function deploy_logicmodule {
IMAGE=logicmodule
for JAVA_VERSION in "${SUPPORTED_JAVA_VERSIONS[@]}"; do
EXECUTION_ENVIRONMENT=jdk${JAVA_VERSION}
EXECUTION_ENVIRONMENT_TAG="$(get_container_version $EXECUTION_ENVIRONMENT)"
singularity pull $DEPLOYSCRIPTDIR/${IMAGE}:${EXECUTION_ENVIRONMENT_TAG}.sif docker://bscdataclay/${IMAGE}:$EXECUTION_ENVIRONMENT_TAG
scp $DEPLOYSCRIPTDIR/${IMAGE}:${EXECUTION_ENVIRONMENT_TAG}.sif dataclay@dt01.bsc.es:/gpfs/apps/MN4/DATACLAY/$DEFAULT_TAG/singularity/images/
done
}
#=== FUNCTION ================================================================
# NAME: deploy_dsjava
# DESCRIPTION: Deploy dsjava image
#=============================================================================
function deploy_dsjava {
IMAGE=dsjava
for JAVA_VERSION in "${SUPPORTED_JAVA_VERSIONS[@]}"; do
EXECUTION_ENVIRONMENT=jdk${JAVA_VERSION}
EXECUTION_ENVIRONMENT_TAG="$(get_container_version $EXECUTION_ENVIRONMENT)"
singularity pull $DEPLOYSCRIPTDIR/${IMAGE}:${EXECUTION_ENVIRONMENT_TAG}.sif docker://bscdataclay/${IMAGE}:$EXECUTION_ENVIRONMENT_TAG
scp $DEPLOYSCRIPTDIR/${IMAGE}:${EXECUTION_ENVIRONMENT_TAG}.sif dataclay@dt01.bsc.es:/gpfs/apps/MN4/DATACLAY/$DEFAULT_TAG/singularity/images/
done
}
#=== FUNCTION ================================================================
# NAME: deploy_dspython
# DESCRIPTION: Deploy dspython image
#=============================================================================
function deploy_dspython {
IMAGE=dspython
for PYTHON_VERSION in "${SUPPORTED_PYTHON_VERSIONS[@]}"; do
EXECUTION_ENVIRONMENT=py$PYTHON_VERSION
EXECUTION_ENVIRONMENT_TAG="$(get_container_version $EXECUTION_ENVIRONMENT)"
singularity pull $DEPLOYSCRIPTDIR/${IMAGE}:${EXECUTION_ENVIRONMENT_TAG}.sif docker://bscdataclay/${IMAGE}:$EXECUTION_ENVIRONMENT_TAG
scp $DEPLOYSCRIPTDIR/${IMAGE}:${EXECUTION_ENVIRONMENT_TAG}.sif dataclay@dt01.bsc.es:/gpfs/apps/MN4/DATACLAY/$DEFAULT_TAG/singularity/images/
done
}
#=== FUNCTION ================================================================
# NAME: deploy_client
# DESCRIPTION: Deploy client image
#=============================================================================
function deploy_client {
IMAGE=client
singularity pull $DEPLOYSCRIPTDIR/${IMAGE}:${DEFAULT_TAG}.sif docker://bscdataclay/${IMAGE}:$DEFAULT_TAG
scp $DEPLOYSCRIPTDIR/${IMAGE}:${DEFAULT_TAG}.sif dataclay@dt01.bsc.es:/gpfs/apps/MN4/DATACLAY/$DEFAULT_TAG/singularity/images/
}
#=== FUNCTION ================================================================
# NAME: deploy_orchestrator
# DESCRIPTION: Deploy orchestration scripts
#=============================================================================
function deploy_orchestrator {
# Prepare module definition
sed "s/SET_VERSION_HERE/${DEFAULT_TAG}/g" $DEPLOYSCRIPTDIR/module.lua > /tmp/${DEFAULT_TAG}.lua
# Deploy singularity and orchestration scripts to Marenostrum
DEPLOY_CMD="rm -rf /apps/DATACLAY/$DEFAULT_TAG/ &&\
mkdir -p /apps/DATACLAY/$DEFAULT_TAG/singularity/images/ &&\
mkdir -p /apps/DATACLAY/$DEFAULT_TAG/javaclay &&\
mkdir -p /apps/DATACLAY/$DEFAULT_TAG/pyclay"
echo "[marenostrum-deploy] Cleaning and preparing folders in MN..."
ssh dataclay@mn2.bsc.es "$DEPLOY_CMD"
# Send orchestration script and images
echo "[marenostrum-deploy] Deploying dataclay orchestrator and singularity images..."
pushd $PACKAGING_DIR/orchestration
rsync -av -e ssh --filter="merge ./.rsync-filter" ./* dataclay@dt01.bsc.es:/gpfs/apps/MN4/DATACLAY/$DEFAULT_TAG
popd
# Send javaclay and pyclay
echo "[marenostrum-deploy] Deploying javaclay..."
pushd $PACKAGING_DIR/docker/logicmodule/javaclay
mvn package -DskipTests=true
scp target/*-jar-with-dependencies.jar dataclay@dt01.bsc.es:/gpfs/apps/MN4/DATACLAY/$DEFAULT_TAG/javaclay/dataclay.jar
popd
echo "[marenostrum-deploy] Deploying pyclay..."
pushd $PACKAGING_DIR/docker/dspython/pyclay
rsync -av -e ssh --filter="merge ./.rsync-filter" --progress ./* dataclay@dt01.bsc.es:/gpfs/apps/MN4/DATACLAY/$DEFAULT_TAG/pyclay/
popd
# Changing permissions in pyclay folder
ssh dataclay@mn2.bsc.es "chmod -R g-w /apps/DATACLAY/$DEFAULT_TAG/pyclay/"
# Module definition
echo "[marenostrum-deploy] Deploying dataclay module..."
scp /tmp/${DEFAULT_TAG}.lua dataclay@dt01.bsc.es:/gpfs/apps/MN4/DATACLAY/modules/
MODULE_LINK="develop"
if [ "$DEV" = false ] ; then
MODULE_LINK="latest"
fi
ssh dataclay@mn2.bsc.es "rm /apps/DATACLAY/modules/${MODULE_LINK}.lua && ln -s /apps/DATACLAY/modules/${DEFAULT_TAG}.lua /apps/DATACLAY/modules/${MODULE_LINK}.lua"
}
set -e
DEPLOYSCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
PACKAGING_DIR=$DEPLOYSCRIPTDIR/../..
ORCHESTRATION_DIR=$PACKAGING_DIR/orchestration
DEFAULT_TAG=$(cat $ORCHESTRATION_DIR/VERSION.txt)
DATACLAY_VERSION="${DEFAULT_TAG//.dev/}"
IMAGES=(logicmodule dsjava dspython client)
CONFIG_FILE=$PACKAGING_DIR/docker/common/normal.config
if [[ $DEFAULT_TAG == *"dev"* ]]; then
DEV=true
printWarn "Deploying development version"
fi
while test $# -gt 0; do
case "$1" in
--images)
shift
IFS=' ' read -r -a IMAGES <<< "$1"
;;
--config-file)
shift
CONFIG_FILE=$1
printWarn "Configuration file used in all images: $CONFIG_FILE"
;;
*)
echo "Bad option $1"
exit 1
;;
esac
shift
done
source $CONFIG_FILE
SECONDS=0
echo "[marenostrum-deploy] Deploying $DEFAULT_TAG to MN..."
deploy_orchestrator
for IMAGE in "${IMAGES[@]}"; do
echo "[marenostrum-deploy] Deploying $IMAGE image to MN..."
deploy_$IMAGE
done
rm $DEPLOYSCRIPTDIR/*.sif
duration=$SECONDS
echo "$(($duration / 60)) minutes and $(($duration % 60)) seconds elapsed."
echo "MN deployment successfully finished!"
| true
|
e117e5636df075d1821fb5388c54916631b58f2e
|
Shell
|
zhouzhui/iOSBuildShell
|
/build_ipa.sh
|
UTF-8
| 1,397
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
## build ipa and upload to http://www.pgyer.com/
DATE=$(date +'%Y-%m-%d')
TIME=$(date +'%H%M%S')
SHELL_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
BASE_DIR=$( cd "${SHELL_DIR}" && cd "../../" && pwd)
PRODUCT=YOUR_PRODUCT_NAME
WORKSPACE="${BASE_DIR}"/"${PRODUCT}".xcworkspace
CONF=Enterprise
SCHEME="${PRODUCT}"
# pgyer
PGYER_USER_KEY=YOUR_USER_KEY
PGYER_API_KEY=YOUR_API_KEY
PGYER_API_URL="http://www.pgyer.com/apiv1/app/upload"
# dirs
OUTPUT_DIR="${BASE_DIR}"/build/"${DATE}"/"${TIME}"
DERIVED_DATA_PATH="${OUTPUT_DIR}"/derived
ARCHIVE_PATH="${OUTPUT_DIR}"/"${PRODUCT}".xcarchive
APP_PATH="${ARCHIVE_PATH}"/Products/Applications/"${PRODUCT}".app
IPA_PATH="${OUTPUT_DIR}"/"${PRODUCT}".ipa
DSYM_PATH="${DERIVED_DATA_PATH}"/Build/Intermediates/ArchiveIntermediates/"${PRODUCT}"/BuildProductsPath/"${CONF}"-iphoneos/"${PRODUCT}".app.dSYM
# build ipa
xcodebuild -workspace "${WORKSPACE}" -scheme "${SCHEME}" -configuration "${CONF}" \
-archivePath "${ARCHIVE_PATH}" -derivedDataPath "${DERIVED_DATA_PATH}" archive
xcrun -sdk iphoneos PackageApplication "${APP_PATH}" -o "${IPA_PATH}"
# upload to http://www.pgyer.com/
curl -F "file=@${IPA_PATH}" -F "uKey=${PGYER_USER_KEY}" -F "_api_key=${PGYER_API_KEY}" "${PGYER_API_URL}"
# copy dSYM file
cp -R "${DSYM_PATH}" "${OUTPUT_DIR}/"
# clean derived data & archive file
rm -fr "${ARCHIVE_PATH}"
rm -fr "${DERIVED_DATA_PATH}"
| true
|
ea1551249fdc4662057f7fc9f841eec4a908c082
|
Shell
|
patricksteenks/lazygit
|
/lazygit.sh
|
UTF-8
| 276
| 3.234375
| 3
|
[] |
no_license
|
function lazygit () {
if [[ ! $1 ]]
then
echo "Please use a commit message"
return
fi
if [[ ! $2 ]]
then
echo "Do not forget to set target branch"
return
fi
git add -A
git commit -m '$1'
git push origin $2
}
| true
|
9e3472d9099e78dd15114bfd955650bbe4823a81
|
Shell
|
justinscript/useful-develop-tools
|
/script/sql/mysql_monitor.sh
|
UTF-8
| 1,686
| 4.0625
| 4
|
[] |
no_license
|
#! /bin/bash
# by zxc
# 脚本可以用来对mysql的运行做一些简单的自动化,包括显示当前进程,显示指定变量,显示当前状态,以及kill指定进程。目前所做的事还比较简单,都是通过mysqladmin命令来实现。代码及用法如下,使用前将mysql用户相关信息补全
#命令使用方法
#显示当前状态
#./mysql.sh s
#显示当前进程
#./mysql.sh p
#显示指定的变量,不指定keyword的话显示全部变量
#./mysql.sh v keyword
#显示扩展状态信息,不指定keyword则显示全部
#./mysql.sh e keyword
#kill指定进程
#./mysql.sh k id
PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
export PATH
BINPATH="`which mysqladmin`"
USER=root
PASS=password
COMMAND="$BINPATH --user=$USER --password=$PASS"
usage(){
echo -e "show mysql infomation\nUsage: `basename $BINPATH` [ s | p | v keyword | e keyword | k id]"
exit 0
}
show_status(){
$COMMAND status
}
show_processlist(){
$COMMAND processlist
}
show_variables(){
if [ $1 ];then
$COMMAND variables | grep -i $1
else
$COMMAND variables
fi
}
show_extended(){
if [ $1 ];then
$COMMAND extended-status | grep -i $1
else
$COMMAND extended-status
fi
}
kill_process(){
if [ $1 ];then
$COMMAND kill $1
else
usage
fi
}
if [ $# -lt 2 ];then
usage
else
case $1 in
s)
show_status
;;
p)
show_processlist
;;
v)
show_variables $2
;;
e)
show_extended $2
;;
k)
kill_process $2
;;
*)
usage
;;
esac
fi
exit 0
| true
|
cf30fdb7e77e03fc8e980f801fb32c85917d446f
|
Shell
|
PicPay/presidio
|
/build.sh
|
UTF-8
| 878
| 3.40625
| 3
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# This script is a helper to run the local docker build only. This does not deploy the service.
# There is no error checking in this script, it expects a local docker instance to be running.
# The make commands will take a very long time to run the first time as the docker images themselves
# take a long time to create. Expect to wait at least an hour or more, depending on machine and
# network capabilities.
# Build the images
DOCKER_REGISTRY=${DOCKER_REGISTRY:-presidio}
PRESIDIO_LABEL=${PRESIDIO_LABEL:-latest}
NETWORKNAME=${NETWORKNAME:-presidio-network}
make DOCKER_REGISTRY=${DOCKER_REGISTRY} PRESIDIO_LABEL=${PRESIDIO_LABEL} docker-build-deps
make DOCKER_REGISTRY=${DOCKER_REGISTRY} PRESIDIO_LABEL=${PRESIDIO_LABEL} docker-build
# Run the containers
NETWORKNAME=$NETWORKNAME DOCKER_REGISTRY="$DOCKER_REGISTRY" PRESIDIO_LABEL="$PRESIDIO_LABEL" ./run.sh
| true
|
10f23292e3cf058b6654111bfcaf7a924cd0abb9
|
Shell
|
sshtmc/dotfiles
|
/gdb.recipe
|
UTF-8
| 318
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
T=$(mktemp -d)
trap "cd $T; $SHELL; rm -rf $T" EXIT
cd $T
VER=7.8
wget http://mirror.switch.ch/ftp/mirror/gnu/gdb/gdb-$VER.tar.xz
tar xf gdb-$VER.tar.xz
cd gdb-$VER
# CC=gcc-4.9.0 CXX=g++-4.9.0
configure --prefix=/opt/gdb-$VER
ionice -c3 nice -n 20 make -j5
ionice -c3 nice -n 20 make install
| true
|
b7019c7405ecb1233000fadb847cc7920b2c0682
|
Shell
|
sequencemedia/sequencemedia
|
/nvm.sh
|
UTF-8
| 593
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
NVM=~/.nvm
if [ -f "$NVM/nvm.sh" ];
then
unset npm_package_scripts_nvm
unset npm_config_prefix
unset npm_lifecycle_script
source $NVM/nvm.sh
else
NVM=$(brew --prefix nvm)
if [ -f "$NVM/nvm.sh" ];
then
unset npm_package_scripts_nvm
unset npm_config_prefix
unset npm_lifecycle_script
source $NVM/nvm.sh
fi
fi
VERSION=$(nvm --version)
if [ -z "$VERSION" ];
then
echo NVM not available
else
echo NVM version $VERSION available
set -e
nvm use
if [[ $? != 0 ]];
then
echo NVM not configured
else
echo NVM configured
fi
fi
| true
|
0c38784bccb54c7c420ad011cbe86043338cdb02
|
Shell
|
kalikhademi/Graduate-projects
|
/Distributed OS/DDL/generate.sh
|
UTF-8
| 127
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
for ddl in *.ddl; do
if parser/parser $ddl; then
echo generated files for $ddl;
else
exit 1
fi
done
exit 0
| true
|
eebbf17db3714f0937abab5f6a332d7d88b51034
|
Shell
|
bluz71/dotfiles
|
/git_hooks/lint-javascript
|
UTF-8
| 329
| 3.171875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
#
# Use Git hooks to automatically lint JavaScript projects.
#
# % cd .git/hooks
# % ln -s ~/dotfiles/git_hooks/lint-javascript pre-commit
STAGED_JAVASCRIPT_FILES=$(git diff --cached --name-only --diff-filter=ACM | grep "\.js$")
if [ -n "$STAGED_JAVASCRIPT_FILES" ]; then
standard $STAGED_JAVASCRIPT_FILES
fi
| true
|
b06706aaaf90c9533407e63fc1193fe95f473bd5
|
Shell
|
ma2o/VarCap
|
/50_cleanup.sh
|
UTF-8
| 843
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
#$-q all.q@cube[ab]*
CURRENT_DIR=$(pwd)
# open project specific config file
CONFIG_FILE=$CURRENT_DIR/variant.config
. $CONFIG_FILE
# open system/program settings config file in varcap main dir
SYSTEM_CONFIG=${PATH_VARCAP}/program.config
. $SYSTEM_CONFIG
# remove fq files from filter (except alt)
cd $PATH_PROJECTS_DATA/$PROJ_NAME/
find filter/ -name "*EMP_1*" -delete
find filter/ -name "*EMP_2*" -delete
# remove prinseq files
cd $PATH_PROJECTS_DATA/$PROJ_NAME/filter/
find prinseq_data/ -name "*.fastq" -delete
# remove subsample fastq files for mapping
cd $PATH_PROJECTS_DATA/$PROJ_NAME/mapper/
find subsample/ -name "*.fq*" -delete
# remove diverse files produced by calling
cd $PATH_PROJECTS_DATA/$PROJ_NAME/caller/
find . -name "*.ba?" -delete
find cortex/ -name "*.fastq" -delete
find cortex/ -name "*.ctx*" -delete
| true
|
7faa63df97054a140c0f7631a4e9139837fba62b
|
Shell
|
m42e/HackPi
|
/fingerprint.sh
|
UTF-8
| 771
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
# Analyze USB Setup Request
# 80 means device to host (bmRequestType)
# 06 means get descriptors (bRequest)
# 03xx means string descriptors (wValue)
# 0409 means english (wIndex)
# wLength is the size of the descriptor and this is what we want
LOGFILE=/home/pi/HackPi/usbreq.log
dmesg | grep "USB DWC2 REQ 80 06 03" > $LOGFILE
WLENGTHS=`awk '$9!="0000" { print $10 }' $LOGFILE`
TOTAL=0
COUNTER=0
for i in $WLENGTHS; do
if [ "$i" = "00ff" ]; then
let COUNTER=COUNTER+1
fi
let TOTAL=TOTAL+1
#echo wLength: $i
done
#echo $COUNTER
if [ $TOTAL -eq 0 ]; then
echo Unknown
exit
fi
#echo $COUNTER
if [ $COUNTER -eq 0 ]; then
echo MacOs
#elif [ $COUNTER -eq $TOTAL ]; then
# echo Linux
else
echo Other
# echo Windows
fi
| true
|
b381af6ee5f3e8b652e80362067303b187db38d7
|
Shell
|
cucumberlinux/buildtools
|
/build-server/bin/download_packages
|
UTF-8
| 841
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script downloads the packages directory (/opt/packages/cucumber) from the
# staging server.
# Load the config file
source $(dirname $0)/config
# Determine the architecture we are building on if it is not explicitly
# specified
if [ -z "$CUCARCH" ]; then
case "$(uname -m)" in
# For the x86 series, force i686
i?86)
export CUCARCH="i686"
;;
x86_64)
export CUCARCH="x86_64"
;;
# Use "uname -m" for all other architectures
*)
export CUCARCH=$(uname -m)
esac
fi
# Create the directory to place the packages in
mkdir -pv /opt/packages/cucumber
# Download the packages tree
echo rsync -e ssh -av $STAGING_SERVER/cucumber/cucumber-$CUCUMBER_VERSION/cucumber-$CUCARCH/* /opt/packages/cucumber/
rsync -e ssh -av $STAGING_SERVER/cucumber/cucumber-$CUCUMBER_VERSION/cucumber-$CUCARCH/* /opt/packages/cucumber/
| true
|
85af49ed1d866424357cb717d55e0ee690af2010
|
Shell
|
domo141/podman-images-sailfish-sdk
|
/run-qtcreator.sh
|
UTF-8
| 977
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Author: Tomi Ollila -- too ät iki piste fi
#
# Created: Sat 03 Aug 2019 16:53:39 EEST too
# Last modified: Sat 03 Aug 2019 16:58:28 +0300 too
# SPDX-License-Identifier: Apache-2.0
case ${BASH_VERSION-} in *.*) set -o posix; shopt -s xpg_echo; esac
case ${ZSH_VERSION-} in *.*) emulate ksh; esac
set -euf # hint: sh -x thisfile [args] to trace execution
die () { printf '%s\n' "$*"; exit 1; } >&2
ipath=`exec sed -n '/^Exec=/ { s/.....//; s:/bin/qtcreator.*::; p; q; }' \
$HOME/.local/share/applications/SailfishOS-SDK-qtcreator.desktop`
test "$ipath" || die "Cannot find sdk installation path"
case $0 in /*) dn0=${0%/*}
;; */*/*) dn0=`exec realpath ${0%/*}`
;; ./*) dn0=$PWD
;; */*) dn0=`exec realpath ${0%/*}`
;; *) dn0=$PWD
esac
x_exec_env () { printf '\n+ %s\n\n' "$*" >&2; exec env "$@"; }
x_exec_env PATH=$dn0/bin:$PATH $ipath/bin/qtcreator
# Local variables:
# mode: shell-script
# sh-basic-offset: 8
# tab-width: 8
# End:
# vi: set sw=8 ts=8
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.