blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b0178be271ba51694d31a64807d3bdb3b196089a
|
Shell
|
mdchia/raijin-scripts
|
/shasta.pbs
|
UTF-8
| 721
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -P xe2
#PBS -q normalsl
#PBS -l walltime=6:00:00
#PBS -l mem=32GB
#PBS -l jobfs=16GB
#PBS -l ncpus=16
#PBS -l other=hyperthread
## The job will be executed from current working directory instead of home.
#PBS -l wd
#PBS -o shasta_logs
#PBS -e shasta_logs
#PBS -m abe
#PBS -M Ming-Dao.Chia@anu.edu.au
## Modules
module load shasta
set -euo pipefail # safe mode
set -x # logging
## script variables
filenameExt=$(basename "$fullfilename")
filename="${filenameExt%.*}"
dirPrefix=$(basename $(dirname "$fullfilename"))
time shasta-Linux-0.1.0 --Reads.minReadLength 100 --Kmers.k 10 --MarkerGraph.maxCoverage 100 \
--output $PBS_JOBFS/shasta_assembly_mRL100 --input $fullfilename
mv $PBS_JOBFS/shasta_assembly_mRL100 .
| true
|
fa5877a7fa2ef0bf2110096573ee41058a0fa9d0
|
Shell
|
articuluxe/harmsway
|
/host/cathode/bin/untar-world.sh
|
UTF-8
| 2,121
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# -*- Mode: sh -*-
# untar-world.sh --- untar important files
# Copyright (C) 2015-2018, 2020-2021 Dan Harms (dan.harms)
# Author: Dan Harms <enniomore@icloud.com>
# Created: Monday, May 18, 2015
# Version: 1.0
# Modified Time-stamp: <2021-02-10 14:11:13 dharms>
# Keywords: configuration
tar=$TAR
manifest=.bk_manifest
backup=emacs_bk.tar
user=$(id -nu)
os=$(uname)
host=$(hostname -s)
site=$SITE
input=
user_dict=~/.hunspell_en_US
logdir=.install-logs
function backup_file
{
if [ -f ".emacs.d/$1" ] ; then
echo Backing up "$1"
$tar -rvf $backup ".emacs.d/$1"
elif [ -d ".emacs.d/$1" ] ; then
echo Backing up directory "$1"
$tar -rvf $backup ".emacs.d/$1"
fi
}
if [ -z "$tar" ]; then
tar=$(which tar)
echo "Using $tar"
fi
if [ -z "$tar" ]; then
echo "! no tar available; quitting"
exit 1
fi
if [ $# -gt 0 ] ; then
input=$1
shift
fi
if [ "x$input" = "x" ] ; then
echo "Error: need an input file."
exit
fi
date=$(date '+%F_%T' | tr ':' '-')
cd ~
mkdir -p "$logdir"
# there's an existing .emacs.d
$tar --overwrite -xpf "$input" .emacs.d/$manifest
if [ -d .emacs.d ] && [ -f .emacs.d/$manifest ] ; then
rm -f $backup
_prune-empty-dirs.sh .emacs.d/backups
files=(`cat .emacs.d/$manifest`)
numfiles=${#files[*]}
i=0
while [ $i -lt $numfiles ]
do
backup_file ${files[$i]}
i=$(( $i+1 ))
done
# backup for posterity
$tar czf "$logdir/.emacs.d.bk_$date.tgz" --force-local .emacs.d
# restore interesting files
rm -rf .emacs.d
mkdir .emacs.d
if [ -r $backup ] ; then
$tar -xpf $backup
rm -f $backup
fi
fi
echo About to unpack "$input"...
$tar --overwrite -xpf "$input"
# install user dictionary (warn if conflicts)
if [ -f "$user_dict" ]; then
_check_merge.sh .emacs.d/etc/user-dict "$user_dict"
fi
cp .emacs.d/etc/user-dict "$user_dict"
# remove intermediate directories, if empty
for i in bash tcsh dotfiles user/$user os/$os site/$site host/$host; do
rmdir -p "$i"
done
# and byte-compile emacs
bin/emacscomp.sh .emacs.d
# untar-world.sh ends here
| true
|
5fd5976f3596b823986e1a904af9f53b9235dbd4
|
Shell
|
gerassimos/dgs19
|
/docs/generatePDF.sh
|
UTF-8
| 635
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# start a web server that is used to generate(serve) the html pages
echo "Start web server"
docker run -d --name dgst19 -p 8069:80 -v $(pwd):/usr/local/apache2/htdocs/docs httpd:2.4
rm -rfv pdf
mkdir pdf
chmod 777 pdf
for md_file in $(ls D_*.md); do
html_file_name=$(echo web_"${md_file}" | sed 's/.md$/.html/')
pdf_file_name=$(echo "${md_file}" | sed 's/.md$/.pdf/')
docker run --rm -it --privileged --net=host -v `pwd`:/slides astefanutti/decktape:2.9.2 --size 1024x769 remark \
http://localhost:8069/docs/${html_file_name} pdf/${pdf_file_name}
done
echo "Stop -rm web server"
docker container rm -f dgst19
| true
|
36d950ff3bc6cd15165e7ef9e9955b8ad2498af1
|
Shell
|
yhfudev/bash-mrnative
|
/app-conv2dash/e1map.sh
|
UTF-8
| 13,484
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# -*- tab-width: 4; encoding: utf-8 -*-
#
#####################################################################
## @file
## @brief Multimedia Transcoding Using Map/Reduce Paradigm -- Step 1 Map part
##
## In this part, the script check the file name format, and
## collect all of the files to be processed and send it to output.
## @author Yunhui Fu <yhfudev@gmail.com>
## @copyright GPL v3.0 or later
## @version 1
##
#####################################################################
## @fn my_getpath()
## @brief get the real name of a path
## @param dn the path name
##
## get the real name of a path, return the real path
my_getpath() {
local PARAM_DN="$1"
shift
#readlink -f
local DN="${PARAM_DN}"
local FN=
if [ ! -d "${DN}" ]; then
FN=$(basename "${DN}")
DN=$(dirname "${DN}")
fi
local DNORIG=$(pwd)
cd "${DN}" > /dev/null 2>&1
DN=$(pwd)
cd "${DNORIG}"
if [ "${FN}" = "" ]; then
echo "${DN}"
else
echo "${DN}/${FN}"
fi
}
#DN_EXEC=`echo "$0" | ${EXEC_AWK} -F/ '{b=$1; for (i=2; i < NF; i ++) {b=b "/" $(i)}; print b}'`
DN_EXEC=$(dirname $(my_getpath "$0") )
if [ ! "${DN_EXEC}" = "" ]; then
DN_EXEC="$(my_getpath "${DN_EXEC}")/"
else
DN_EXEC="${DN_EXEC}/"
fi
DN_TOP="$(my_getpath "${DN_EXEC}/../")"
DN_BIN="$(my_getpath "${DN_TOP}/bin/")"
DN_EXEC="$(my_getpath ".")"
#####################################################################
if [ -f "${DN_EXEC}/liball.sh" ]; then
. ${DN_EXEC}/liball.sh
fi
#####################################################################
# generate session for this process and its children
# use mp_get_session_id to get the session id later
mp_new_session
# extract the mrnative, include the files in app-ns2/common which are used in setting ns2 TCL scripts
libapp_prepare_mrnative_binary
#####################################################################
## @fn create_snapshot()
## @brief create snapshot images of mp4 file
## @param fn_mp4 the path name of mp4 file
##
create_snapshot() {
local PARAM_FN_MP4=$1
shift
# 把视频的前30帧转换成一个Animated Gif :
#${EXEC_FFMPEG} -i "${FN_MP4}" -vframes 30 -y -f gif a.gif
FN_BASE=$(echo "${PARAM_FN_MP4}" | ${EXEC_AWK} -F. '{b=$1; for (i=2; i < NF; i ++) {b=b "." $(i)}; print b}')
SEC_LENGTH=$(mplayer -identify -nosound -vc dummy -vo null "${PARAM_FN_MP4}" | grep ID_LENGTH | sed -r 's/ID_LENGTH=([[:digit:]]*)(.[[:digit:]]*)?/\1/g')
NUM=6
CNT=0
NEXT=7
STEP=$(expr \( $SEC_LENGTH - ${NEXT} - ${NEXT} \) / $NUM)
while [ $(echo | ${EXEC_AWK} -v CUR=${CNT} -v MAX=${NUM} '{if (CUR < MAX) {print 1} else {print 0}}') = 1 ]; do
# 从时间 NEXT 处截取 320*180 的缩略图
$MYEXEC ${EXEC_FFMPEG} ${OPTIONS_FFM_ASYNC} -i "${PARAM_FN_MP4}" -y -f mjpeg -ss $NEXT -t 0.001 -s 320x180 "${FN_BASE}-snaptmp${CNT}.jpg"
NEXT=$(expr $NEXT + $STEP)
CNT=$(expr $CNT + 1)
done
# 使用 imagemagick 中的montage命令合并图片,-geometry +0+0是设定使用原始图片大小,-tile 2参数设定每行放2张图片
$MYEXEC montage -geometry +0+0 -tile 2 "${FN_BASE}-snaptmp*.jpg" "${FN_BASE}-snap.jpg"
$MYEXEC rm -f ${FN_BASE}-snaptmp*.jpg
}
#####################################################################
## @fn worker_mkv_split()
## @brief split .mkv file to segments
## @param session_id the session id
## @param audio_file the audio file
## @param video_file the video file
## @param segsec segment time in seconds
##
## variable DN_DATATMP should be set before call this function
worker_mkv_split() {
local PARAM_SESSION_ID="$1"
shift
local PARAM_AUDIO_FILE="$1"
shift
local PARAM_VIDEO_FILE="$1"
shift
local PARAM_SEGSEC="$1"
shift
DN_TMP="${DN_DATATMP}/worker-$(uuidgen)"
${MYEXEC} mkdir -p "${DN_TMP}" 1>&2
${MYEXEC} cd "${DN_TMP}" 1>&2
FN_BASE=$(echo "${PARAM_VIDEO_FILE}" | ${EXEC_AWK} -F. '{b=$1; for (i=2; i < NF; i ++) {b=b "." $(i)}; print b}')
FN_SUFFIX=$(echo "${PARAM_VIDEO_FILE}" | ${EXEC_AWK} -F. '{print $NF }')
FN_INPUT_VIDEO="${PARAM_VIDEO_FILE}"
FN_INPUT_AUDIO="${PARAM_AUDIO_FILE}"
PREFIX0=$(basename "${FN_BASE}" )
PREFIX1=$(generate_prefix_from_filename "${PREFIX0}" )
#PREFIX2=$(echo "${PREFIX1}" | ${EXEC_AWK} -F% '{match($2,"[0-9]*d(.*)",b);print $1 b[1];}' )
PREFIX="${DN_DATATMP}/${PREFIX1}"
FMT2="${PREFIX}-${PRIuSZ}.lossless.${FN_SUFFIX}"
FN_PATTERN2=$(echo "${FMT2}" | ${EXEC_AWK} -F% '{ match($2,"[0-9]*d(.*)",b); print $1 "[0-9]*" b[1]; }' )
#mr_trace "FMT2=${FMT2}; FN_PATTERN2=${FN_PATTERN2}; FN_BASE=${FN_BASE}; FN_INPUT_VIDEO=${FN_INPUT_VIDEO}; PREFIX0=${PREFIX0}; PREFIX1=${PREFIX1}; PREFIX2=${PREFIX2};"
DIR1=$(dirname "${PREFIX}")
${MYEXEC} mkdir -p "${DIR1}"
# detect if exist audio
mr_trace "[DBG] ${EXEC_FFPROBE} -v quiet -select_streams a -show_streams ${FN_INPUT_VIDEO}"
echo | ${EXEC_FFPROBE} -v quiet -select_streams a -show_streams "${FN_INPUT_VIDEO}" | grep duration 1>&2
if [ "$?" = "0" ]; then
# remove audio
FN_INPUT_VIDEO="${PREFIX}.videolossless.${FN_SUFFIX}"
mr_trace "[DBG] extract video only file: ${FN_INPUT_VIDEO}"
echo | ${EXEC_FFMPEG} ${OPTIONS_FFM_GLOBAL} -i "${PARAM_VIDEO_FILE}" -c:v copy -an -y "${FN_INPUT_VIDEO}" 1>&2
if [ "${PARAM_AUDIO_FILE}" = "${PARAM_VIDEO_FILE}" ]; then
FN_INPUT_AUDIO="${PREFIX}-audio.${FN_SUFFIX}"
echo | ${EXEC_FFMPEG} ${OPTIONS_FFM_GLOBAL} -i "${PARAM_VIDEO_FILE}" -c:a copy -vn -y "${FN_INPUT_AUDIO}" 1>&2
fi
fi
${MYEXEC} ${DANGER_EXEC} rm -f "${FN_PATTERN2}" 1>&2
mr_trace "[DBG] ${EXEC_FFMPEG} ${OPTIONS_FFM_GLOBAL} -i ${FN_INPUT_VIDEO} -f segment -segment_time ${PARAM_SEGSEC} -vcodec copy -reset_timestamps 1 -map 0 -y ${FMT2}"
echo | ${EXEC_FFMPEG} ${OPTIONS_FFM_GLOBAL} -i "${FN_INPUT_VIDEO}" \
-f segment -segment_time ${PARAM_SEGSEC} -segment_list_type flat -segment_list out.list \
-c:v copy -reset_timestamps 1 -map 0 -an -y "${FMT2}" 1>&2
#if [ -f "${PREFIX}.videolossless.${FN_SUFFIX}" ]; then
# ${MYEXEC} ${DANGER_EXEC} rm -f "${PREFIX}.videolossless.${FN_SUFFIX}" 1>&2
#fi
# pass the file name to the reducer, so that the files name are sorted
# the reducer will calculate the frame numbers of each video chunks,
# and set the sequence number for request 'lossless'
cat out.list | ${EXEC_AWK} -v KEY="${PARAM_VIDEO_FILE}" \
-v SEGSEC=${PARAM_SEGSEC} \
-v AUD="${FN_INPUT_AUDIO}" \
-v PREFIX="$(dirname "${FMT2}")" \
'{print "processvid\t\"" KEY "\"\t\"" AUD "\"\t\"" PREFIX "/" $0 "\"\t" SEGSEC ; }'
#processvid <key> <audio_file> <vid_seg_file_name_out> <segsec>
# processvid "/path/to/video-lossless.mkv" "/path/to/audio2.flac" "/path/to/video-0000000000000000001.lossless.mkv" 6
${MYEXEC} cd - 1>&2
${MYEXEC} ${DANGER_EXEC} rm -rf "${DN_TMP}" 1>&2
mp_notify_child_exit ${PARAM_SESSION_ID}
}
## @fn worker_pic_list()
## @brief list all of pic files
## @param session_id the session id
## @param fn_pattern the pattern of file name
## @param audio_file the audio file
## @param video_file the video file
## @param segsec segment time in seconds
## @param video_fps the fps of video
## @param n_start
##
worker_pic_list() {
local PARAM_SESSION_ID="$1"
shift
local PARAM_FN_PATTERN="$1"
shift
local PARAM_AUDIO_FILE="$1"
shift
local PARAM_VIDEO_FILE="$1"
shift
local PARAM_SEGSEC="$1"
shift
local PARAM_VIDEO_FPS="$1"
shift
local PARAM_N_START="$1"
shift
# pass the file name to the reducer, so that it sort the files names
# the reducer will calculate the frame numbers of each video chunks,
# and set the sequence number for request 'lossless'
ls ${PARAM_FN_PATTERN} \
| ${EXEC_AWK} -v KEY="${PARAM_VIDEO_FILE}" -v SEGSEC=${PARAM_SEGSEC} -v AUD="${PARAM_AUDIO_FILE}" -v FPS=${PARAM_VIDEO_FPS} -v NSTART=${PARAM_N_START} \
'{print "picgroup\t\"" KEY "\"\t\"" AUD "\"\t\"" $0 "\"\t" SEGSEC "\t" FPS "\t" NSTART ; }'
#picgroup <key> <audio_file> <vid_seg_file_name_out> <segsec> <fps> <frame_start_number>
# picgroup "/path/to/film-%05d.png" "/path/to/audio1.flac" "/path/to/video-0000000000000000001.lossless.mkv" 6 24 144
mp_notify_child_exit ${PARAM_SESSION_ID}
}
## @fn process_stream_e1map()
## @brief the main function of e1map to process input stream
##
process_stream_e1map() {
local RET=0
local ERR=0
#<type> <audio_file> <video_file_fmt> <segsec> [<fps> <#start> <#files>]
# origpic "/path/to/audio1.flac" "/path/to/film-%05d.png" 6 24 1 144
# origvid "/path/to/audio2.flac" "/path/to/video-lossless.mkv" 6
while read MR_TYPE MR_AUDIO_FILE MR_VIDEO_FILE MR_SEGSEC MR_VIDEO_FPS MR_N_START MR_N_FILES ; do
FN_VIDEO_FILE1=$( unquote_filename "${MR_VIDEO_FILE}" )
FN_VIDEO_FILE=$( convert_filename "${DN_EXEC}/input/" "${FN_VIDEO_FILE1}" )
ERR=0
case "${MR_TYPE}" in
config)
# this will call this function itself to dump the info from config files
FN_AUDIO_FILE1=$( unquote_filename "${MR_AUDIO_FILE}" )
FN_AUDIO_FILE=$( convert_filename "${DN_EXEC}/input/" "${FN_AUDIO_FILE1}" )
mr_trace "call self to dump config data: ${FN_AUDIO_FILE}"
cat_file "${FN_AUDIO_FILE}" | process_stream_e1map
continue
;;
origpic)
if [ "${MR_N_START}" = "" ]; then
MR_N_START=1
fi
#FN_SUFFIX=${OPTIONS_FFM_VCODEC_SUFFIX}
FN_PATTERN="${FN_VIDEO_FILE}"
# check if the file exist
RET=$(is_file_or_dir "${FN_VIDEO_FILE}")
if [ ! "${RET}" = "f" ]; then
mr_trace "check if the format of the file name(${FN_VIDEO_FILE}) is xxx-%05d.mkv"
TMP=$(echo | ${EXEC_AWK} -v FMT="${FN_VIDEO_FILE}" -v N=${MR_N_START} '{printf(FMT, N);}' )
mr_trace "TMP=${TMP}"
RET=$(is_file_or_dir "${TMP}")
if [ "${RET}" = "f" ]; then
FN_PATTERN=$(echo "${FN_VIDEO_FILE}" | ${EXEC_AWK} -F% '{match($2,"[0-9]*d(.*)",b);print $1 "[0-9]*"b[1];}' )
mr_trace "convert xxx-%05d.mkv(${FN_VIDEO_FILE}) to xxx-[0-9]*.mkv(${FN_PATTERN})"
else
mr_trace "check if the file name is xxx-*.mkv(${FN_VIDEO_FILE})"
find_file "${FN_VIDEO_FILE}" -name "${FN_VIDEO_FILE}"
TMP="$(dirname ${TMP})"
TMP="$( find_file "${TMP}" -name "${FN_VIDEO_FILE}" | head -n 1 )"
if [ "${TMP}" = "" ]; then
RET="e"
else
RET=$(is_file_or_dir "${TMP}")
fi
if [ ! "${RET}" = "f" ]; then
mr_trace "Err: not found file 1: '${FN_VIDEO_FILE}' (${MR_VIDEO_FILE})"
ERR=1
fi
fi
fi
if [ "${MR_SEGSEC}" = "" ]; then
mr_trace "Err: no segment size: ${MR_SEGSEC}"
ERR=1
fi
if [ "${MR_VIDEO_FPS}" = "" ]; then
mr_trace "Err: no video fps: ${MR_VIDEO_FPS}"
ERR=1
fi
;;
origvid)
RET=$(is_file_or_dir "${FN_VIDEO_FILE}")
if [ ! "${RET}" = "f" ]; then
# not found file
mr_trace "Err: not found file 2: '${FN_VIDEO_FILE}' (${MR_VIDEO_FILE})"
ERR=1
fi
;;
*)
mr_trace "Err: unknown type: ${MR_TYPE}"
ERR=1
;;
esac
#if [ ! -f "${FN_AUDIO_FILE}" ]; then
# mr_trace "Err: not found file 2: ${MR_AUDIO_FILE}"
# ERR=1
#fi
if [ ! "${ERR}" = "0" ] ; then
mr_trace "ignore line: ${MR_TYPE} ${MR_AUDIO_FILE} ${MR_VIDEO_FILE} ${MR_SEGSEC} ${MR_VIDEO_FPS} ${MR_N_START} ${MR_N_FILES}"
continue
fi
FN_AUDIO_FILE1=$( unquote_filename "${MR_AUDIO_FILE}" )
FN_AUDIO_FILE=$( convert_filename "${DN_EXEC}/input/" "${FN_AUDIO_FILE1}" )
case "${MR_TYPE}" in
origpic)
# 1. search all of the files and generate the the task lines for next stage
# 2. support xxx-*.png format
#if [ -f "${FN_VIDEO_FILE}" ]; then
# # one single file?
#else
# if [ "${FN_PATTERN}" = "${FN_VIDEO_FILE}" ]; then
# # format xxx-%09d.png
# else
# # format xxx-*.png
# fi
#fi
worker_pic_list "$(mp_get_session_id)" "${FN_PATTERN}" "${FN_AUDIO_FILE}" "${FN_VIDEO_FILE}" ${MR_SEGSEC} ${MR_VIDEO_FPS} ${MR_N_START} &
PID_CHILD=$!
mp_add_child_check_wait ${PID_CHILD}
;;
origvid)
worker_mkv_split "$(mp_get_session_id)" "${FN_AUDIO_FILE}" "${FN_VIDEO_FILE}" ${MR_SEGSEC} &
PID_CHILD=$!
mp_add_child_check_wait ${PID_CHILD}
;;
*)
mr_trace "Err: unknown type: ${MR_TYPE}"
ERR=1
;;
esac
done
}
process_stream_e1map
mp_wait_all_children
| true
|
9120c392e6e411ffd3e5304a0a168ab9a8e41023
|
Shell
|
prometheus/alertmanager
|
/scripts/compress_assets.sh
|
UTF-8
| 1,093
| 3.3125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# compress static assets
set -euo pipefail
cd ui/react-app
cp embed.go.tmpl embed.go
GZIP_OPTS="-fk"
# gzip option '-k' may not always exist in the latest gzip available on different distros.
if ! gzip -k -h &>/dev/null; then GZIP_OPTS="-f"; fi
dist="dist"
if ! [[ -d "${dist}" ]]; then
mkdir -p ${dist}
echo "<!doctype html>
<html lang=\"en\">
<head>
<meta charset=\"utf-8\">
<title>Node</title>
<base href=\"/\">
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">
<link rel=\"icon\" type=\"image/x-icon\" href=\"favicon.ico\">
</head>
<body>
<div>
<p> This is the default index, looks like you forget to generate the react app before generating the golang endpoint.</p>
</div>
</body>
</html>" > ${dist}/index.html
fi
find dist -type f -name '*.gz' -delete
find dist -type f -exec gzip $GZIP_OPTS '{}' \; -print0 | xargs -0 -I % echo %.gz | xargs echo //go:embed >> embed.go
echo var embedFS embed.FS >> embed.go
| true
|
15582d92bead111ac4a9e9aec9724ad17a5a9db1
|
Shell
|
rabidpug/bootstrap
|
/scripts/0-configure-self
|
UTF-8
| 481
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
BS_PATH=/usr/local/bootstrap
source "$BS_PATH/.env"
source "$BS_PATH/utilities/lg.sh"
lg '## BEGINNING CONFIGURING SELF ##'
lg 'Configure log'
touch /var/log/bs.log
cat <<EOT >/etc/logrotate.d/bs
/var/log/bs.log {
daily
copytruncate
missingok
dateext
rotate 7
nocompress
}
EOT
lg 'Making script executable and linking in /usr/local/bin'
chmod +x "$BS_PATH/bs"
ln -sf "$BS_PATH/bs" /usr/local/bin
lg '## CONFIGURING SELF COMPLETED COMPLETED ##'
| true
|
80ab87e979d4aee84e6db59e02d98be0a2d8afa4
|
Shell
|
Jaymon/chef-cookbooks
|
/iptables/templates/default/iptables.sh.erb
|
UTF-8
| 871
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# borrowed from: http://jerrywalsh.org/2012/howto-secure-your-linux-box-with-iptables-0112.html
# other helpful links:
# http://www.etoyoc.com/linux/iptables-lockdown.html
# set -x
set -e
# flush rules
iptables -F
# Log dropped connections
#iptables -N LOGDROP
# allow localhost connections to the loopback interface
iptables -A INPUT -i lo -j ACCEPT
iptables -A INPUT ! -i lo -d 127.0.0.0/8 -j REJECT
# allow connections which are already established
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# add rules from Chef scripts
<% @rules.each do |val| %>
<%= val %>
<% end %>
# allow ping
iptables -A INPUT -p icmp -j ACCEPT
# ignore all inbound traffic
iptables -A INPUT -j DROP
# politely reject all forward traffic attempts
iptables -A FORWARD -j REJECT
# allow all outbound traffic
iptables -A OUTPUT -j ACCEPT
# set +x
set +e
| true
|
4adc401e8e1f54421d318ca3310fe22d5ca7a112
|
Shell
|
ironstar-io/tokaido-images
|
/kishu/entrypoint.sh
|
UTF-8
| 425
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -eo pipefail
echo "Waiting 60 seconds before starting..."
sleep 60
drupal_root=${DRUPAL_ROOT:-docroot}
timer=${TIMER:-10}
paths=$(find /tokaido/site/"${drupal_root}" -name settings.php -a -print0 | xargs -0 dirname | tr '\n' ' ')
echo "Kishu will now maintain read/write permissions on the following paths:"
echo "$paths" | tr ' ' '\n'
while :
do
chmod -R ug+rw ${paths}
sleep ${timer}
done
| true
|
e47d72f63d48adf734f269f5d9f1ded15dd65089
|
Shell
|
jasonantao/cloud_initializer
|
/site/cloud-migrations-v1/WebMigrations-master/Core Service Scripts/linux-scripts-apps-sw-api-rest-master/setup.sh
|
UTF-8
| 646
| 3.03125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#! /bin/bash
# MAKE ALL SHELL SCRIPTS EXECUTABLE TO ROOT ONLY
find . -name "*.sh" -exec chmod 700 {} \;
# SETUP STOCK WIDGETS ENVIRONMENT
. ./env/setEnv.sh
# ADD STOCK WIDGETS ADMIN USER
./install/addPkgUser.sh $user $group $home $pkg
# INSTALL JBOSS
./install/bootstraps/jbossBootstrap.sh
# SHUTDOWN JBOSS
service wildfly stop
# CONFIGURE JBOSS SYSTEM PARAMETERS TO RUN STOCK WIDGETS APPLICATION
./install/addSwProperties.sh
# DEPLOY STOCK WIDGETS WEB AND REST API APPLICATIONS
./install/deploySwWarApps.sh $wfDeployDir
# RESTART JBOSS
service wildfly start
# INSTALL STOCK WIDGETS TEST DATA
./install/bootstraps/swTestDataBootstrap.sh
| true
|
f444aa9e8930a3b22da7d301cff78c3f425e30cc
|
Shell
|
eDyablo/handicraft
|
/cpp/calc/build.sh
|
UTF-8
| 182
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
set -o errexit
script_dir="$(cd "$(dirname "${0}")"; pwd)"
mkdir -p "build"
iid_file="build/idd"
docker build --iidfile="${iid_file}" --file="Dockerfile" .
| true
|
7ab4b82671a4bda82c9ac928064e4ac38d372c7c
|
Shell
|
ilventu/aur-mirror
|
/uclibc++/PKGBUILD
|
UTF-8
| 520
| 2.5625
| 3
|
[] |
no_license
|
# Maintainer: Bartek Piotrowski <barthalion@gmail.com>
pkgname=uclibc++
_pkgname=${pkgname/c/C}
pkgver=0.2.4
pkgrel=1
pkgdesc='C++ standard library targeted towards the embedded systems/software market'
arch=('i686' 'x86_64')
license=('LGPL')
url='http://cxx.uclibc.org/'
depends=('gcc-libs')
source=(http://cxx.uclibc.org/src/$_pkgname-$pkgver.tar.xz)
md5sums=('d35d73d2de317c239e7526bd369b31d7')
build() {
cd $srcdir/$_pkgname-$pkgver
make menuconfig
make
}
package() {
cd $srcdir/$_pkgname-$pkgver
make PREFIX=$pkgdir install
}
| true
|
edb03b938a24eabe85cd55f3c95d2ee62d414317
|
Shell
|
sksnitj/DotFiles
|
/node/init.zsh
|
UTF-8
| 712
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
##### nvm (node version manager) #####
# placeholder nvm shell function
# On first use, it will set nvm up properly which will replace the `nvm`
# shell function with the real one
function nvm() {
if [[ -d '/usr/local/opt/nvm' ]]; then
NVM_DIR="/Users/gowiem/.nvm"
export NVM_DIR
# shellcheck disable=SC1090
source "${NVM_DIR}/nvm.sh"
[ -s "/usr/local/opt/nvm/etc/bash_completion.d/nvm" ] && source "/usr/local/opt/nvm/etc/bash_completion.d/nvm"
if [[ -e ~/.nvm/alias/default ]]; then
PATH="${PATH}:${HOME}.nvm/versions/node/$(cat ~/.nvm/alias/default)/bin"
fi
# invoke the real nvm function now
nvm "$@"
else
echo "nvm is not installed" >&2
return 1
fi
}
| true
|
23a514ae269e9cbcd3a0fd4a2116b056ca7c378d
|
Shell
|
Schnitzel/lagoon
|
/images/docker-host/prune-images.sh
|
UTF-8
| 153
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x
if ! docker -H ${DOCKER_HOST} info &> /dev/null; then
echo "could not connect to ${DOCKER_HOST}"; exit 1
fi
docker image prune -f
| true
|
b096aba1b8087dd4ead17236bd7575b8d854594b
|
Shell
|
JeanKossaifi/setmeup
|
/zsh/window_title.zsh
|
UTF-8
| 610
| 2.96875
| 3
|
[] |
no_license
|
#####################
# WINDOW TITLE #
#####################
case $TERM in
termite|*xterm*|rxvt|rxvt-unicode|rxvt-256color|rxvt-unicode-256color|(dt|k|E)term)
precmd () {
print -Pn "\e]0;[%n@%M] opened in [%~]\a"
}
preexec () { print -Pn "\e]0;[%n@%M] opened in [%~] ($1)\a" }
;;
screen|screen-256color)
precmd () {
print -Pn "\e]83;title \"$1\"\a"
print -Pn "\e]0;$TERM - (%L) [%n@%M] opened in [%~]\a"
}
preexec () {
print -Pn "\e]83;title \"$1\"\a"
print -Pn "\e]0;$TERM - (%L) [%n@%M] opened in [%~] ($1)\a"
}
;;
esac
| true
|
846625c7dec1a67edc4e5e2652a1ada258997aca
|
Shell
|
webvariants/dockerfiles-develop
|
/bin/dwc
|
UTF-8
| 1,238
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
echo "missing arguments"
echo "dwc calls docker with the container that mounts the current working directory."
echo "All single underscores will be replaced by the container ID."
echo "If no underscore is found the container ID will be added as last argument."
echo "Examples:"
echo "dwc inspect"
echo "dwc exec -ti _ /bin/bash"
exit -1
fi
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SCRIPT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
ID=`$SCRIPT_DIR/pwc -a`
if [ -z "$ID" ]; then
echo "no container found"
exit -1
fi
if [ `echo $ID | wc -w` \> 1 ]; then
echo "more then one container found:"
echo $ID
exit -1
fi
# docker inspect $ID
NEWARGS=()
HAS_UNDERSCORE=0
for ARG in "$@"
do
if [ "$ARG" == "_" ]; then
NEWARGS+=("$ID")
HAS_UNDERSCORE=1
else
NEWARGS+=("$ARG")
fi
done
if [ $HAS_UNDERSCORE -eq 0 ]; then
NEWARGS+=("$ID")
fi
exec docker ${NEWARGS[@]}
| true
|
1203f194595ac63f3c227ee37a70b80141dddce8
|
Shell
|
tfunck/nemo_processing
|
/temp.sh
|
UTF-8
| 2,185
| 3.15625
| 3
|
[] |
no_license
|
flair_list=`find bids -name "*_flair.mnc"`
t1_list=`find bids -name "*_T1w.mnc"`
for f in `ls -d bids/sub-NS-*`; do
sub0=`basename $f`
sub=`echo $sub0 | sed 's/sub-NS-//'`
for ses_fn in `ls -d ${f}/_ses-*`; do
ses=`basename $ses_fn | sed 's/_//'`
t1=`ls ${ses_fn}/anat/${sub0}*_space-nat_T1w.mnc`
lesion=`ls ${ses_fn}/anat/${sub0}*_lesion.mnc`
lesion_neg=`ls ${ses_fn}/anat/${sub0}*_lesion-negative.mnc`
lesion_rsl=`echo $lesion | sed 's/.mnc/_T1w-coreg.mnc/'`
lesion_neg_rsl=`echo $lesion_neg | sed 's/.mnc/_T1w-coreg.mnc/'`
echo $t1
echo "Lesion" $lesion
echo "Lesion Negative" $lesion_neg
if [[ -f `ls ${ses_fn}/anat/${sub0}*_flair.mnc ` ]]; then
flair=`ls ${ses_fn}/anat/${sub0}*_flair.mnc`
flair_rsl=`echo $flair | sed 's/.mnc/_T1w-coreg.mnc/'`
flair_xfm=`echo $flair | sed 's/.mnc/_T1w-coreg.xfm/'`
echo "Flair: $flair"
min=`mincstats -quiet -min $t1`
minccalc -clobber -expr "(A[0] - $min)" $t1 temp.mnc
#minctracc -clobber -lsq6 -est_translation $flair temp.mnc $flair_xfma
if [[ ! -f $flair_xfm ]]; then
bestlinreg.pl -clobber -lsq6 -nmi $flair temp.mnc $flair_xfm
fi
echo "Flair"
if [[ ! -f $flair_rsl ]]; then
mincresample -nearest -clobber -transformation $flair_xfm -like $t1 $flair $flair_rsl
fi
echo "Lesion"
if [[ ! -f $lesion_rsl ]]; then
mincresample -nearest -clobber -transformation $flair_xfm -like $t1 $lesion $lesion_rsl
fi
echo "Lesion Negative"
if [[ ! -f $lesion_neg_rsl ]]; then
mincresample -nearest -clobber -transformation $flair_xfm -like $t1 $lesion_neg $lesion_neg_rsl
fi
#register temp.mnc $flair_rsl
else
cp $lesion $lesion_rsl
cp $lesion_neg $lesion_neg_rsl
echo "Cannot find Flair"
fi
mnc2nii -nii $lesion_rsl
mnc2nii -nii $lesion_neg_rsl
done
done
| true
|
ce93aabba49b94b236026bf90b957e55129be241
|
Shell
|
geraldbaeck/dotfiles
|
/bash/.bash_prompt
|
UTF-8
| 4,384
| 4.1875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# who am I
printf 'Running %s\n' "$BASH_SOURCE"
# DESCRIPTION:
#
# Set the bash prompt according to:
# * the active virtualenv
# * the branch of the current git/mercurial repository and it's status
# * the return value of the previous command
# * newline
#
# USAGE:
#
# 1. Save this file as ~/.bash_prompt
# 2. Add the following line to the end of your ~/.bashrc or ~/.bash_profile:
# source ~/.bash_prompt
# The various escape codes that we can use to color our prompt.
RED="\[\033[0;31m\]"
YELLOW="\[\033[1;33m\]"
GREEN="\[\033[0;32m\]"
BLUE="\[\033[1;34m\]"
PURPLE="\[\033[0;35m\]"
LIGHT_RED="\[\033[1;31m\]"
LIGHT_GREEN="\[\033[1;32m\]"
WHITE="\[\033[1;37m\]"
LIGHT_GRAY="\[\033[0;37m\]"
COLOR_NONE="\[\e[0m\]"
# Unicode symbols
PS_SYMBOL_DARWIN=''
PS_SYMBOL_LINUX='$'
PS_SYMBOL_OTHER='%'
GIT_BRANCH_SYMBOL='⑂ '
GIT_BRANCH_CHANGED_SYMBOL='+'
GIT_NEED_PUSH_SYMBOL='⇡'
GIT_NEED_PULL_SYMBOL='⇣'
# determine git branch name and status
__git_info() {
[ -x "$(which git)" ] || return # git not found
local git_eng="env LANG=C git" # force git output in English to make our work easier
# get current branch name or short SHA1 hash for detached head
local branch="$($git_eng symbolic-ref --short HEAD 2>/dev/null || $git_eng describe --tags --always 2>/dev/null)"
[ -n "$branch" ] || return # git branch not found
local marks
# branch is modified?
[ -n "$($git_eng status --porcelain)" ] && marks+=" $GIT_BRANCH_CHANGED_SYMBOL"
# how many commits local branch is ahead/behind of remote?
local stat="$($git_eng status --porcelain --branch | grep '^##' | grep -o '\[.\+\]$')"
local aheadN="$(echo $stat | grep -o 'ahead [[:digit:]]\+' | grep -o '[[:digit:]]\+')"
local behindN="$(echo $stat | grep -o 'behind [[:digit:]]\+' | grep -o '[[:digit:]]\+')"
[ -n "$aheadN" ] && marks+=" $GIT_NEED_PUSH_SYMBOL$aheadN"
[ -n "$behindN" ] && marks+=" $GIT_NEED_PULL_SYMBOL$behindN"
# print the git branch segment without a trailing newline
printf "$GIT_BRANCH_SYMBOL$branch$marks"
}
# determine mercurial branch name
function parse_hg_branch(){
hg branch 2> /dev/null | awk '{print " (" $1 ")"}'
}
# Determine the branch/state information for this git repository.
function set_repo() {
# Get the name of the branch.
#branch=$(parse_git_branch)
branch=$(__git_info)
# if not git then maybe mercurial
if [ "$branch" == "" ]
then
branch=$(parse_hg_branch)
fi
# Set the final branch string.
if [ "$branch" == "" ]
then
REPO=""
else
REPO="${PURPLE}(${branch})${COLOR_NONE} "
fi
}
# Return the prompt symbol to use, colorized based on the return value of the
# previous command.
function set_prompt_symbol () {
if test $1 -eq 0 ; then
PROMPT_SYMBOL="\$"
else
PROMPT_SYMBOL="${LIGHT_RED}\$${COLOR_NONE}"
fi
}
function set_ps_symbol () {
if [[ -z "$PS_SYMBOL" ]]; then
if test $1 -eq 0 ; then
PS_SYMBOL="${LIGHT_GRAY}"
else
PS_SYMBOL="${LIGHT_RED}"
fi
case "$(uname)" in
Darwin)
PS_SYMBOL+=$PS_SYMBOL_DARWIN
;;
Linux)
PS_SYMBOL+=$PS_SYMBOL_LINUX
;;
*)
PS_SYMBOL+=$PS_SYMBOL_OTHER
esac
PS_SYMBOL+="${COLOR_NONE}"
fi
}
function set_user () {
if test "$USER" = "$DEFAULT_USER"; then
USR_BASH=""
else
USR_BASH="${GREEN}${USER}@${HOST}${COLOR_NONE}:"
fi
}
# Determine active Python virtualenv details.
function set_virtualenv () {
if test -z "$VIRTUAL_ENV" ; then
PYTHON_VIRTUALENV=""
else
PYTHON_VIRTUALENV="${BLUE}[`basename \"$VIRTUAL_ENV\"`]${COLOR_NONE} "
fi
}
function set_path () {
CURRENT_PATH="${YELLOW}\w${COLOR_NONE} "
}
# Set the full bash prompt.
function set_bash_prompt () {
# Set the PROMPT_SYMBOL variable. We do this first so we don't lose the
# return value of the last command.
set_ps_symbol $?
# Set the PYTHON_VIRTUALENV variable.
set_virtualenv
# Set the user variable
set_user
# Set the path variable
set_path
# set the repository variable
set_repo
# Set the bash prompt variable.
PS1="
${PYTHON_VIRTUALENV}${USR_BASH}${CURRENT_PATH}${REPO}
${PS_SYMBOL}> "
}
# Tell bash to execute this function just before displaying its prompt.
PROMPT_COMMAND=set_bash_prompt
| true
|
8b5fb93326229e60ac2511c68dc0d23a71c3dd5f
|
Shell
|
FreddieMercy/LinuxCustomCMDimmigrate
|
/imploy/cmd/soiadjffoiwejjmicsdmacokweejqriufjlkdsamkjassjdoijwesd2.bash
|
UTF-8
| 177
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# != 0 ]
then
var=${PWD}
echo $#
mv $1 $2
cd $2
ls -l
cd $var
elif [ $# == 0 ]
then
echo "Not moving anything"
else
echo "FUCK UP!"
fi
| true
|
932a386b3ec054719a23cc803df390639985d8fc
|
Shell
|
marcoAmir/backups
|
/geneLoss/matrixAnalysis/sh/intactGeneTables.sh
|
UTF-8
| 17,849
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash -e
# Here, for a given species, I'll identify the intact protein-coding genes that are stable in ensembl
ensembl=$GENELOSS/data/Ensembl
ucsc=$GENELOSS/data/UCSC
curatedGenes=$GENELOSS/data/curatedGeneLosses.validated.txt
# Usage
if [ "$#" -ne 2 ]; then
echo -e "\nUsage:\n$0 species annotatedByEnsembl(1/0)\n"
exit 1
fi
species=$1
assembly=`grep $species $GENELOSS/data/speciesList.txt | cut -f4`
if [ "$species" = "Cat" ]
then
assembly=felCat5
fi
species=`echo $species | sed -e "s/_//g"`
logFile=${species}Human.log
useCanonicals=1
filter_SegDups_Deprecated=1 # if selected I'll remove all genes that intersect segmental-duplications & deprecated genes (genes that are protein-coding in Ensembl74 but not in Ensembl78)
annotatedByEnsembl=$2 # if the species is NOT annotated by ensembl, I'll just collect the features and partition to canonical and non-canonical human transcripts
if [ $annotatedByEnsembl -ne 0 ]
then
## I'll only take those with evidence from the literature
#awk -F'\t' '{if($4~/[0-9]/) print $0}' $curatedGenes | grep $species | cut -f1 | sort -u > ${species}.curated
awk -F'\t' '{if($1~/ENSG/) print $0}' $curatedGenes | grep $species | cut -f1 | sort -u > ${species}.curated
echo -e "\n"$species"\t"$assembly
echo -e "\n"$species"\t"$assembly > $logFile
# Step 1 - get the transcripts of the given species that maintain 'protein_coding' biotype across multiple ensembl version (Specifically, from ens74 to ens81):
ens74trans=`awk -F'\t' '{if($6=="protein_coding") print $2}' $ensembl/speciesTranscripts/geneTranscript.${species}.ensembl74.tsv | sort -u | wc -l`
ens81trans=`awk -F'\t' '{if($6=="protein_coding") print $2}' $ensembl/speciesTranscripts/geneTranscript.${species}.ensembl81.tsv | sort -u | wc -l`
join -t$'\t' -1 2 -2 2 -o '1.1 1.2 1.3 1.4 1.5 1.6 2.1 2.2 2.3 2.4 2.5 2.6' <(sort -k2,2 $ensembl/speciesTranscripts/geneTranscript.${species}.ensembl74.tsv) <(sort -k2,2 $ensembl/speciesTranscripts/geneTranscript.${species}.ensembl81.tsv) | awk -F'\t' '{if($6=="protein_coding" && $12=="protein_coding" && $3==$9) print $0}' | cut -f1,2 | sort -u > ${species}.intact.proteinCodingTx
ensOverlap=`cut -f2 ${species}.intact.proteinCodingTx | sort -u | wc -l`
# Step 2 - intersect the list from step 1 with a list of complete transcript model in mouse
if [ "$species" = "Dolphin" ] # pretty annoying but i don't have the ensGene table for turTru2
then
assembly=turTru1
fi
if [ "$species" = "Alpaca" ] # pretty annoying but i don't have the ensGene table for vicPac2
then
assembly=vicPac1
fi
join -t$'\t' -1 2 -2 1 -o '1.1 1.2' <(sort -k2 ${species}.intact.proteinCodingTx) <(hgsql ${assembly} -Ne "SELECT name, chrom, txStart, txEnd FROM ensGene WHERE cdsStartStat='cmpl' AND cdsEndStat='cmpl'" | sort) | sort -u > tmp; mv tmp ${species}.intact.proteinCodingTx
if [ "$species" = "Dolphin" ] # after the sql query I'll just revert the assembly back to turTru2
then
assembly=turTru2
fi
if [ "$species" = "Alpaca" ] # pretty annoying but i don't have the ensGene table for vicPac2
then
assembly=vicPac2
fi
ensIntacts=`wc -l ${species}.intact.proteinCodingTx | cut -d" " -f1`
echo -e "\n\t$ens74trans $species protein-coding transcripts in Ensembl biomart 74\n\t$ens81trans $species protein-coding transcripts in Ensembl biomart 81\n\t$ensOverlap $species shared protein-coding transcripts\n\t$ensIntacts $species shared protein-coding (ens74 to ens81) and complete models" >> $logFile
echo -e "\n\t$ens74trans $species protein-coding transcripts in Ensembl biomart 74\n\t$ens81trans $species protein-coding transcripts in Ensembl biomart 81\n\t$ensOverlap $species shared protein-coding transcripts\n\t$ensIntacts $species shared protein-coding (ens74 to ens81) and complete models"
# Step 3 - Now I'll expand the list of intact transcript (gene, transcript pairs) that passed the filters in steps 1,2 with human orthologs at the transcript level.
# Namely the transcript IDs of the query species (e.g., mouse ENSMUST do not play a role from here on)
# I will use the Ensembl74 tables
# Output from this step is a table mapping species gene identifiers (intact protein-coding genes by steps 1,2) to human gene idenftiers along with a list of their protein-coding transcripts
join -t$'\t' -1 1 -2 6 -o '1.1 2.1 2.2 2.3 2.4' <(sort ${species}.intact.proteinCodingTx) <(sort -k6,6 $ensembl/OrthoTables74/human${species}.EnsemblOrtho.biomart74.map) | sort -u | awk -F'\t' '{if($5=="protein_coding") print $1"\t"$2"\t"$3"\t"$4}' > ${species}.intact.proteinCodingTx.humanOrtho
# I will remove here ambigous gene-gene orthologs where a query species gene is mapped to more than a single human gene, or vice versa where a human-gene is mapped to more than a single gene in the query species
cut -f1,2 ${species}.intact.proteinCodingTx.humanOrtho | sort -u | cut -f2 | sort | uniq -c | awk '{if($1>1) print $2}' | sort -u > nonUniqeHumanGenes.txt
cut -f1,2 ${species}.intact.proteinCodingTx.humanOrtho | sort -u | cut -f1 | sort | uniq -c | awk '{if($1>1) print $2}' | sort -u > nonUniqe${species}Genes.txt
grep -v -f nonUniqe${species}Genes.txt <(grep -v -f nonUniqeHumanGenes.txt ${species}.intact.proteinCodingTx.humanOrtho) > tmp; mv tmp ${species}.intact.proteinCodingTx.humanOrtho
# Step 4 - remove incomplete human transcripts
join -t$'\t' -1 1 -2 3 -o '2.1 2.2 2.3 2.4' <(hgsql hg19 -Ne "SELECT name FROM ensGene WHERE cdsStartStat='cmpl' AND cdsEndStat='cmpl'" | sort) <(sort -k3 ${species}.intact.proteinCodingTx.humanOrtho) > tmp ; mv tmp ${species}.intact.proteinCodingTx.humanOrtho
# Step 5 - Now, expand with:
# chains (2.3), stop-codons (2.4), frameshifts (2.11), number of deleted exons (2.5), number of total exons (2.15), number of deleted bases (2.10), number of deleted bases (2.14), percent.id (2.16)
join -t$'\t' -1 3 -2 2 -o '2.1 1.1 1.2 1.3 1.4 2.3 2.4 2.11 2.5 2.15 2.10 2.14 2.16' <(sort -t$'\t' -k3,3 ${species}.intact.proteinCodingTx.humanOrtho) <(grep ${assembly} $GENELOSS/data/hg19.calltable.unique.placentals | sort -t$'\t' -k2,2) | sort -t$'\t' -k4,4 > tmp
# And add the Ka/Ks ratio (pre-computed at: $GENELOSS/data/KaKs)
join -t$'\t' -1 4 -2 2 -o '1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 1.12 1.13 2.2 2.3 2.11' <(sort -t$'\t' -k4,4 tmp) <(sort -t$'\t' -k2,2 $GENELOSS/data/KaKs/speciesData/transcriptNonSynRate.${assembly}.tsv) | awk -F'\t' '{if($6==$15) print $1"\t"$2"\t"$3"\t"$4"\t"$5"\t"$7"\t"$8"\t"$9"\t"$10"\t"$9/$10"\t"$11"\t"$12"\t"$11/$12"\t"$13"\t"$16}' | sort -u | sort -k2,2 -k3,3 -k4,4 > ${species}.intact.proteinCodingTx.humanOrtho
# If selected, exclude all the non canonical transcripts
if [ $useCanonicals -ne 0 ]
then
echo -e "\n Keeping only canonical human transcripts...\n"
join -t$'\t' -1 4 -2 1 -o '1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 1.12 1.13 1.14 1.15' <(sort -t$'\t' -k4,4 ${species}.intact.proteinCodingTx.humanOrtho) <(sort $ensembl/hg19.canonicalTranscripts.biomart74) | sort -u | sort -k2,2 -k3,3 -k4,4 > ${species}.intact.proteinCodingTx.canonical.humanOrtho
fi
rm -rf tmp
ensHuman=`cut -f4 ${species}.intact.proteinCodingTx.humanOrtho | sort -u | wc -l`
ensg=`cut -f3 ${species}.intact.proteinCodingTx.humanOrtho | sort -u | wc -l`
qgenes=`cut -f2 ${species}.intact.proteinCodingTx.humanOrtho | sort -u | wc -l`
echo -e "Summary\n=======\n\t$ensHuman human transcripts were found in $ensg genes orthologues to $qgenes intact genes in ${species}" >> $logFile
echo -e "Summary\n=======\n\t$ensHuman human transcripts were found in $ensg genes orthologues to $qgenes intact genes in ${species}"
# Step 6 - now take all the human transcript that do not belong to genes with orthology to the intact genes in the query species
join -t$'\t' -1 2 -2 2 -o '1.1 2.1 2.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10' <(join -t$'\t' -1 2 -2 2 -o '1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 2.2 2.3 2.11' <(join -t$'\t' -1 1 -2 2 -o '2.1 2.2 2.3 2.4 2.11 2.5 2.15 2.10 2.14 2.16' -v 2 <(cut -f4 ${species}.intact.proteinCodingTx.humanOrtho | sort -u) <(grep $assembly $GENELOSS/data/hg19.calltable.unique.placentals | sort -t$'\t' -k2,2) | sort -t$'\t' -k2,2) <(sort -t$'\t' -k2,2 $GENELOSS/data/KaKs/speciesData/transcriptNonSynRate.${assembly}.tsv) | awk -F'\t' '{if($2==$11 && $3==$12) print $1"\t"$2"\t"$4"\t"$5"\t"$6"\t"$7"\t"$8"\t"$9"\t"$10"\t"$13}' | sort -t$'\t' -k2,2) <(sort -t$'\t' -k2,2 $ensembl/geneTranscript.ensembl74.tsv) | sort -u > tmp
join -t$'\t' -1 2 -2 2 -o '1.1 1.2 1.3 2.1 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11' <(sort -t$'\t' -k2,2 tmp) <(sort -t$'\t' -k2,2 $ensembl/humanGeneSymbol.humanEnsembl.biomart74.NoSyn.map) | awk -F'\t' '{print $1"\t"$2"\t"$3"\t"$4"\t"$5"\t"$6"\t"$7"\t"$8"\t"$7/$8"\t"$9"\t"$10"\t"$9/$10"\t"$11"\t"$12}' | sort -u > ${species}.remainingCalls.proteinCodingTx.humanOrtho
join -t$'\t' -1 1 -2 3 -o '2.1 2.2 2.3 2.4 2.5 2.6 2.7 2.8 2.9 2.10 2.11 2.12 2.13 2.14' <(hgsql hg19 -Ne "SELECT name FROM ensGene WHERE cdsStartStat='cmpl' AND cdsEndStat='cmpl'" | sort) <(sort -t$'\t' -k3,3 ${species}.remainingCalls.proteinCodingTx.humanOrtho) | sort -u > tmp; mv tmp ${species}.remainingCalls.proteinCodingTx.humanOrtho
# If selected, exclude all the non canonical transcripts
if [ $useCanonicals -ne 0 ]
then
echo -e "\n Keeping only canonical human transcripts...\n"
join -t$'\t' -1 3 -2 1 -o '1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 1.12 1.13 1.14' <(sort -t$'\t' -k3,3 ${species}.remainingCalls.proteinCodingTx.humanOrtho) <(sort $ensembl/hg19.canonicalTranscripts.biomart74) | sort -u | sort -k2,2 -k3,3 -k4,4 > ${species}.remainingCalls.proteinCodingTx.canonical.humanOrtho
fi
RemEnsHuman=`cut -f2 ${species}.remainingCalls.proteinCodingTx.humanOrtho | sort -u | wc -l`
RemEnstHuman=`cut -f3 ${species}.remainingCalls.proteinCodingTx.humanOrtho | sort -u | wc -l`
echo -e "\t$RemEnstHuman human transcripts were found in $RemEnsHuman genes unmapped to intact protein-coding orthologues genes in ${species}" >> $logFile
echo -e "\t$RemEnstHuman human transcripts were found in $RemEnsHuman genes unmapped to intact protein-coding orthologues genes in ${species}"
# Step 7 - Same as in 6 but here we will select only the trancript which in we called lost by the selected threshold
join -t$'\t' -1 2 -2 2 -o '1.1 2.1 2.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10' <(join -t$'\t' -1 2 -2 2 -o '1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 2.2 2.3 2.11' <(join -t$'\t' -1 1 -2 2 -o '2.1 2.2 2.3 2.4 2.11 2.5 2.15 2.10 2.14 2.16' -v 2 <(cut -f4 ${species}.intact.proteinCodingTx.humanOrtho | sort -u) <(grep $assembly $GENELOSS/data/hg19.calltable.unique.placentals | sort -t$'\t' -k2,2) | sort -t$'\t' -k2,2) <(sort -t$'\t' -k2,2 $GENELOSS/data/KaKs/speciesData/transcriptNonSynRate.${assembly}.tsv) | awk -F'\t' '{if($2==$11 && $3==$12) print $1"\t"$2"\t"$4"\t"$5"\t"$6"\t"$7"\t"$8"\t"$9"\t"$10"\t"$13}' | sort -t$'\t' -k2,2) <(sort -t$'\t' -k2,2 $ensembl/geneTranscript.ensembl74.tsv) | sort -u > tmp
join -t$'\t' -1 2 -2 2 -o '1.1 1.2 1.3 2.1 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11' <(sort -t$'\t' -k2,2 tmp) <(sort -t$'\t' -k2,2 $ensembl/humanGeneSymbol.humanEnsembl.biomart74.NoSyn.map) | awk -F'\t' '{print $1"\t"$2"\t"$3"\t"$4"\t"$5"\t"$6"\t"$7"\t"$8"\t"$7/$8"\t"$9"\t"$10"\t"$9/$10"\t"$11"\t"$12}' | sort -u > ${species}.lossCalls.proteinCodingTx.humanOrtho
join -t$'\t' -1 1 -2 3 -o '2.1 2.2 2.3 2.4 2.5 2.6 2.7 2.8 2.9 2.10 2.11 2.12 2.13 2.14' <(hgsql hg19 -Ne "SELECT name FROM ensGene WHERE cdsStartStat='cmpl' AND cdsEndStat='cmpl'" | sort) <(sort -t$'\t' -k3,3 ${species}.lossCalls.proteinCodingTx.humanOrtho) | sort -u > tmp; mv tmp ${species}.lossCalls.proteinCodingTx.humanOrtho
# If selected, exclude all the non canonical transcripts
if [ $useCanonicals -ne 0 ]
then
#echo -e "\n Keeping only canonical human transcripts...\n"
join -t$'\t' -1 3 -2 1 -o '1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 1.12 1.13 1.14' <(sort -t$'\t' -k3,3 ${species}.lossCalls.proteinCodingTx.humanOrtho) <(sort $ensembl/hg19.canonicalTranscripts.biomart74) | sort -u | sort -k2,2 -k3,3 -k4,4 > ${species}.lossCalls.proteinCodingTx.canonical.humanOrtho
fi
lostEnsHuman=`cut -f2 ${species}.lossCalls.proteinCodingTx.humanOrtho | sort -u | wc -l`
lostEnstHuman=`cut -f3 ${species}.lossCalls.proteinCodingTx.humanOrtho | sort -u | wc -l`
echo -e "\t$lostEnstHuman human transcripts were found in $lostEnsHuman genes with Loss-of-Function calls in ${species}\n" >> $logFile
echo -e "\t$lostEnstHuman human transcripts were found in $lostEnsHuman genes with Loss-of-Function calls in ${species}\n"
# Step 8 - if 'filter_SegDups_Deprecated' is selected create the subset files as well
if [ $filter_SegDups_Deprecated -ne 0 ]
then
cp $ensembl/hg19.transcripts.bed a.bed
cp $ucsc/hg19.segmentalDuplications.bed b.bed
join -t$'\t' -1 1 -2 1 -o '2.2' <(join -t$'\t' -1 1 -2 2 -o '2.1' <(overlapSelect a.bed b.bed -statsOutput stdout | awk -F'\t' '{if($4>0) print $0}' | cut -f2 | sort -u) <(sort -t$'\t' -k2,2 $ensembl/geneTranscript.ensembl74.tsv) | sort -u) <(sort -t$'\t' $ensembl/geneTranscript.ensembl74.tsv) | sort -u > transcriptsInSegDup.txt
# now clean up the files from seg-dups and deprecated genes:
join -t$'\t' -1 1 -2 3 -v 2 <(sort -u $ensembl/genes74noLongerProteinCoding.txt) <(join -t$'\t' -1 1 -2 4 -v 2 <(sort transcriptsInSegDup.txt) <(sort -t$'\t' -k4,4 ${species}.intact.proteinCodingTx.humanOrtho) | sort -u | sort -t$'\t' -k3,3) | sort -u | sort -t$'\t' -k3,3 > ${species}.intact.proteinCodingTx.humanOrtho.filter_SegDups_Deprecated
join -t$'\t' -1 1 -2 3 -v 2 <(sort -u $ensembl/genes74noLongerProteinCoding.txt) <(join -t$'\t' -1 1 -2 4 -v 2 <(sort transcriptsInSegDup.txt) <(sort -t$'\t' -k4,4 ${species}.intact.proteinCodingTx.canonical.humanOrtho) | sort -u | sort -t$'\t' -k3,3) | sort -u | sort -t$'\t' -k3,3 > ${species}.intact.proteinCodingTx.canonical.humanOrtho.filter_SegDups_Deprecated
join -t$'\t' -1 1 -2 3 -v 2 <(sort -u $ensembl/genes74noLongerProteinCoding.txt) <(join -t$'\t' -1 1 -2 4 -v 2 <(sort transcriptsInSegDup.txt) <(sort -t$'\t' -k4,4 ${species}.remainingCalls.proteinCodingTx.humanOrtho) | sort -u | sort -t$'\t' -k3,3) | sort -u | sort -t$'\t' -k3,3 > ${species}.remainingCalls.proteinCodingTx.humanOrtho.filter_SegDups_Deprecated
join -t$'\t' -1 1 -2 3 -v 2 <(sort -u $ensembl/genes74noLongerProteinCoding.txt) <(join -t$'\t' -1 1 -2 4 -v 2 <(sort transcriptsInSegDup.txt) <(sort -t$'\t' -k4,4 ${species}.remainingCalls.proteinCodingTx.canonical.humanOrtho) | sort -u | sort -t$'\t' -k3,3) | sort -u | sort -t$'\t' -k3,3 > ${species}.remainingCalls.proteinCodingTx.canonical.humanOrtho.filter_SegDups_Deprecated
rm -rf a.bed b.bed transcriptsInSegDup.txt
fi
# Step 9 - generate plots in R: (note that these plots show everything including seg-dups transcripts)
sed -e "s/QUERY/\'${species}\'/g" $GENELOSS/src/R/intactGeneDistributions.R > intactGeneDistributions.R
sed -e "s/QUERY/\'${species}\'/g" $GENELOSS/src/R/intactGeneDistributions.clusterAnalysis.R > intactGeneDistributions.clusterAnalysis.R
if [ $useCanonicals -ne 0 ]
then
sed -e "s/QUERY/\'${species}\'/g;s/proteinCodingTx.humanOrtho/proteinCodingTx.canonical.humanOrtho/g;s/\.png/.canonical.png/g;s/\.pdf/.canonical.pdf/g" $GENELOSS/src/R/intactGeneDistributions.R > intactGeneDistributions.R
sed -e "s/QUERY/\'${species}\'/g;s/proteinCodingTx.humanOrtho/proteinCodingTx.canonical.humanOrtho/g;s/\.png/.canonical.png/g;s/\.pdf/.canonical.pdf/g;s/\.Quantiles/.canonical.Quantiles/g" $GENELOSS/src/R/intactGeneDistributions.clusterAnalysis.R > intactGeneDistributions.clusterAnalysis.R
fi
chmod 755 intactGeneDistributions.R
Rscript intactGeneDistributions.clusterAnalysis.R
Rscript intactGeneDistributions.R
fi
if [ $annotatedByEnsembl -eq 0 ]
then
grep ${assembly} $GENELOSS/data/hg19.calltable.placentals | awk -F'\t' '{print $1"\t"$2"\t"$4"\t"$11"\t"$5"\t"$15"\t"$5/$15"\t"$10"\t"$14"\t"$10/$14"\t"$16}' > tmp1
join -t$'\t' -1 2 -2 2 -o '1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 2.11' <(sort -t$'\t' -k2,2 tmp1) <(sort -t$'\t' -k2,2 $GENELOSS/data/KaKs/speciesData/transcriptNonSynRate.${assembly}.tsv) > tmp2
join -t$'\t' -1 2 -2 2 -o '1.1 1.2 1.3 2.1 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 1.12 1.13' <(join -t$'\t' -1 2 -2 2 -o '1.1 2.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 1.12' <(sort -t$'\t' -k2,2 tmp2) <(sort -t$'\t' -k2,2 $ensembl/geneTranscript.ensembl74.tsv) | sort -t$'\t' -k2,2) <(sort -t$'\t' -k2,2 $ensembl/humanGeneSymbol.humanEnsembl.biomart74.NoSyn.map) | sort -u | sort -t$'\t' -k2,2 | awk -F'\t' '{print $1"\tNA\t"$2"\t"$3"\t"$4"\t"$5"\t"$6"\t"$7"\t"$8"\t"$9"\t"$10"\t"$11"\t"$12"\t"$13"\t"$14}' > tmp3
if [ $filter_SegDups_Deprecated -ne 0 ]
then
cp $ensembl/hg19.transcripts.bed a.bed
cp $ucsc/hg19.segmentalDuplications.bed b.bed
join -t$'\t' -1 1 -2 1 -o '2.2' <(join -t$'\t' -1 1 -2 2 -o '2.1' <(overlapSelect a.bed b.bed -statsOutput stdout | awk -F'\t' '{if($4>0) print $0}' | cut -f2 | sort -u) <(sort -t$'\t' -k2,2 $ensembl/geneTranscript.ensembl74.tsv) | sort -u) <(sort -t$'\t' $ensembl/geneTranscript.ensembl74.tsv) | sort -u > transcriptsInSegDup.txt
join -t$'\t' -1 1 -2 3 -v 2 <(sort -u $ensembl/genes74noLongerProteinCoding.txt) <(join -t$'\t' -1 1 -2 4 -v 2 <(sort transcriptsInSegDup.txt) <(sort -t$'\t' -k4,4 tmp3) | sort -u | sort -t$'\t' -k3,3) | sort -u | sort -t$'\t' -k3,3 > ${species}.all.proteinCodingTx.humanOrtho.filter_SegDups_Deprecated
cp ${species}.all.proteinCodingTx.humanOrtho.filter_SegDups_Deprecated tmp3
fi
if [ $useCanonicals -ne 0 ]
then
join -t$'\t' -1 4 -2 1 -o '1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 1.10 1.11 1.12 1.13 1.14 1.15' <(sort -t$'\t' -k4,4 tmp3) <(sort $ensembl/hg19.canonicalTranscripts.biomart74) | sort -u | sort -t$'\t' -k3,3 > ${species}.all.proteinCodingTx.canonical.humanOrtho
if [ $filter_SegDups_Deprecated -ne 0 ]
then
mv ${species}.all.proteinCodingTx.canonical.humanOrtho ${species}.all.proteinCodingTx.canonical.humanOrtho.filter_SegDups_Deprecated
fi
fi
fi
| true
|
e4c5e12989ddfbfc1f2d8e211189188ce36a93ef
|
Shell
|
kanow/operations
|
/Build/Scripts/runTests.sh
|
UTF-8
| 27,992
| 4.125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# TYPO3 core test runner based on docker.
#
waitFor() {
local HOST=${1}
local PORT=${2}
local TESTCOMMAND="
COUNT=0;
while ! nc -z ${HOST} ${PORT}; do
if [ \"\${COUNT}\" -gt 10 ]; then
echo \"Can not connect to ${HOST} port ${PORT}. Aborting.\";
exit 1;
fi;
sleep 1;
COUNT=\$((COUNT + 1));
done;
"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name wait-for-${SUFFIX} ${XDEBUG_MODE} -e XDEBUG_CONFIG="${XDEBUG_CONFIG}" ${IMAGE_ALPINE} /bin/sh -c "${TESTCOMMAND}"
}
cleanUp() {
ATTACHED_CONTAINERS=$(${CONTAINER_BIN} ps --filter network=${NETWORK} --format='{{.Names}}')
for ATTACHED_CONTAINER in ${ATTACHED_CONTAINERS}; do
${CONTAINER_BIN} rm -f ${ATTACHED_CONTAINER} >/dev/null
done
${CONTAINER_BIN} network rm ${NETWORK} >/dev/null
}
# Options -a and -d depend on each other. The function
# validates input combinations and sets defaults.
handleDbmsAndDriverOptions() {
case ${DBMS} in
mariadb)
[ -z "${DATABASE_DRIVER}" ] && DATABASE_DRIVER="mysqli"
if [ "${DATABASE_DRIVER}" != "mysqli" ] && [ "${DATABASE_DRIVER}" != "pdo_mysql" ]; then
echo "Invalid option -a ${DATABASE_DRIVER} with -d ${DBMS}" >&2
echo >&2
echo "call \".Build/Scripts/runTests.sh -h\" to display help and valid options" >&2
exit 1
fi
;;
mysql)
[ -z "${DATABASE_DRIVER}" ] && DATABASE_DRIVER="mysqli"
if [ "${DATABASE_DRIVER}" != "mysqli" ] && [ "${DATABASE_DRIVER}" != "pdo_mysql" ]; then
echo "Invalid option -a ${DATABASE_DRIVER} with -d ${DBMS}" >&2
echo >&2
echo "call \".Build/Scripts/runTests.sh -h\" to display help and valid options" >&2
exit 1
fi
;;
postgres)
if [ -n "${DATABASE_DRIVER}" ]; then
echo "Invalid option -a ${DATABASE_DRIVER} with -d ${DBMS}" >&2
echo >&2
echo "call \".Build/Scripts/runTests.sh -h\" to display help and valid options" >&2
exit 1
fi
;;
sqlite)
if [ -n "${DATABASE_DRIVER}" ]; then
echo "Invalid option -a ${DATABASE_DRIVER} with -d ${DBMS}" >&2
echo >&2
echo "call \".Build/Scripts/runTests.sh -h\" to display help and valid options" >&2
exit 1
fi
;;
*)
echo "Invalid option -d ${DBMS}" >&2
echo >&2
echo "call \".Build/Scripts/runTests.sh -h\" to display help and valid options" >&2
exit 1
;;
esac
}
cleanCacheFiles() {
echo -n "Clean caches ... "
rm -rf \
.Build/.cache \
.php-cs-fixer.cache
echo "done"
}
cleanTestFiles() {
# test related
echo -n "Clean test related files ... "
rm -rf \
.Build/Web/typo3temp/var/tests/
echo "done"
}
cleanRenderedDocumentationFiles() {
echo -n "Clean rendered documentation files ... "
rm -rf \
Documentation-GENERATED-temp
echo "done"
}
loadHelp() {
# Load help text into $HELP
read -r -d '' HELP <<EOF
TYPO3 core test runner. Execute acceptance, unit, functional and other test suites in
a container based test environment. Handles execution of single test files, sending
xdebug information to a local IDE and more.
Usage: $0 [options] [file]
Options:
-s <...>
Specifies which test suite to run
- cgl: Checks the code style with the PHP Coding Standards Fixer (PHP-CS-Fixer).
- cglFix: Fixes the code style with PHP-CS-Fixer."
- clean: clean up build, cache and testing related files and folders
- cleanCache: clean up cache related files and folders
- cleanRenderedDocumentation: clean up rendered documentation files and folders (Documentation-GENERATED-temp)
- cleanTests: clean up test related files and folders
- composer: "composer" with all remaining arguments dispatched.
- composerInstallMax: "composer update", with no platform.php config.
- composerInstallMin: "composer update --prefer-lowest", with platform.php set to PHP version x.x.0.
- docsGenerate: Renders the extension ReST documentation.
- functional: PHP functional tests
- lintTypoScript: TypoScript linting
- lintPhp: PHP linting
- lintJson: JSON linting
- lintYaml: YAML linting
- phpstan: phpstan tests
- phpstanGenerateBaseline: regenerate phpstan baseline, handy after phpstan updates
- unit (default): PHP unit tests
- unitRandom: PHP unit tests in random order, add -o <number> to use specific seed
-a <mysqli|pdo_mysql>
Only with -s functional|functionalDeprecated
Specifies to use another driver, following combinations are available:
- mysql
- mysqli (default)
- pdo_mysql
- mariadb
- mysqli (default)
- pdo_mysql
-d <sqlite|mariadb|mysql|postgres>
Only with -s functional|functionalDeprecated|acceptance|acceptanceInstall
Specifies on which DBMS tests are performed
- sqlite: (default): use sqlite
- mariadb: use mariadb
- mysql: use MySQL
- postgres: use postgres
-i <10.3|10.4|10.5|10.6|10.7|10.8|10.9|10.10|10.11|11.0|11.1>
Only with -d mariadb
Specifies on which version of mariadb tests are performed
- 10.3 short-term, maintained until 2023-05-25 (default)
- 10.4 short-term, maintained until 2024-06-18
- 10.5 short-term, maintained until 2025-06-24
- 10.6 long-term, maintained until 2026-06
- 10.7 short-term, no longer maintained
- 10.8 short-term, maintained until 2023-05
- 10.9 short-term, maintained until 2023-08
- 10.10 short-term, maintained until 2023-11
- 10.11 long-term, maintained until 2028-02
- 11.0 development series
- 11.1 short-term development series
-j <5.5|5.6|5.7|8.0>
Only with -d mysql
Specifies on which version of mysql tests are performed
- 5.5 unmaintained since 2018-12
- 5.6 unmaintained since 2021-02
- 5.7 maintained until 2023-10
- 8.0 maintained until 2026-04 (default)
-k <10|11|12|13|14|15>
Only with -d postgres
Specifies on which version of postgres tests are performed
- 10 unmaintained since 2022-11-10 (default)
- 11 maintained until 2023-11-09
- 12 maintained until 2024-11-14
- 13 maintained until 2025-11-13
- 14 maintained until 2026-11-12
- 15 maintained until 2027-11-11
-t <11.5|12.4>
Only with -s composerInstall|composerInstallMin|composerInstallMax
Specifies the TYPO3 CORE Version to be used
- 11: (default) use TYPO3 v11 with typo3/cms-composer-installers ^3
- 12: use TYPO3 v12 with typo3/cms-composer-installers ^5
-p <7.4|8.0|8.1|8.2>
Specifies the PHP minor version to be used
- 7.4: use PHP 7.4
- 8.0: use PHP 8.0
- 8.1: (default) use PHP 8.1
- 8.2: use PHP 8.2
-e "<phpunit options>"
Only with -s functional|functionalDeprecated|unit|unitDeprecated|unitRandom|acceptance
Additional options to send to phpunit (unit & functional tests) or codeception (acceptance
tests). For phpunit, options starting with "--" must be added after options starting with "-".
Example -e "-v --filter canRetrieveValueWithGP" to enable verbose output AND filter tests
named "canRetrieveValueWithGP"
-x
Only with -s functional|functionalDeprecated|unit|unitDeprecated|unitRandom|acceptance|acceptanceInstall
Send information to host instance for test or system under test break points. This is especially
useful if a local PhpStorm instance is listening on default xdebug port 9003. A different port
can be selected with -y
-y <port>
Send xdebug information to a different port than default 9003 if an IDE like PhpStorm
is not listening on default port.
-o <number>
Only with -s unitRandom
Set specific random seed to replay a random run in this order again. The phpunit randomizer
outputs the used seed at the end (in gitlab core testing logs, too). Use that number to
replay the unit tests in that order.
-n
Only with -s cgl|composerNormalize
Activate dry-run in CGL check that does not actively change files and only prints broken ones.
-u
Update existing typo3/core-testing-*:latest container images and remove dangling local volumes.
New images are published once in a while and only the latest ones are supported by core testing.
Use this if weird test errors occur. Also removes obsolete image versions of typo3/core-testing-*.
-h
Show this help.
Examples:
# Run all core unit tests using PHP 8.1
./Build/Scripts/runTests.sh
./Build/Scripts/runTests.sh -s unit
# Run all core units tests and enable xdebug (have a PhpStorm listening on port 9003!)
./Build/Scripts/runTests.sh -x
# Run unit tests in phpunit verbose mode with xdebug on PHP 8.1 and filter for test canRetrieveValueWithGP
./Build/Scripts/runTests.sh -x -p 8.1 -e "-v --filter canRetrieveValueWithGP"
# Run functional tests in phpunit with a filtered test method name in a specified file
# example will currently execute two tests, both of which start with the search term
./Build/Scripts/runTests.sh -s functional -e "--filter deleteContent" typo3/sysext/core/Tests/Functional/DataHandling/Regular/Modify/ActionTest.php
# Run functional tests on postgres with xdebug, php 8.1 and execute a restricted set of tests
./Build/Scripts/runTests.sh -x -p 8.1 -s functional -d postgres typo3/sysext/core/Tests/Functional/Authentication
# Run functional tests on postgres 11
./Build/Scripts/runTests.sh -s functional -d postgres -k 11
# Run restricted set of application acceptance tests
./Build/Scripts/runTests.sh -s acceptance typo3/sysext/core/Tests/Acceptance/Application/Login/BackendLoginCest.php:loginButtonMouseOver
# Run installer tests of a new instance on sqlite
./Build/Scripts/runTests.sh -s acceptanceInstall -d sqlite
EOF
}
# Test if docker exists, else exit out with error
if ! type "docker" >/dev/null; then
echo "This script relies on docker. Please install" >&2
exit 1
fi
# Option defaults
TEST_SUITE="unit"
CORE_VERSION="11.5"
DBMS="sqlite"
PHP_VERSION="8.1"
PHP_XDEBUG_ON=0
PHP_XDEBUG_PORT=9003
EXTRA_TEST_OPTIONS=""
PHPUNIT_RANDOM=""
CGLCHECK_DRY_RUN=0
DATABASE_DRIVER=""
MARIADB_VERSION="10.3"
MYSQL_VERSION="8.0"
POSTGRES_VERSION="10"
CONTAINER_BIN="docker"
# Option parsing updates above default vars
# Reset in case getopts has been used previously in the shell
OPTIND=1
# Array for invalid options
INVALID_OPTIONS=()
# Simple option parsing based on getopts (! not getopt)
while getopts "a:s:d:i:j:k:p:e:t:xy:o:nhu" OPT; do
case ${OPT} in
s)
TEST_SUITE=${OPTARG}
;;
a)
DATABASE_DRIVER=${OPTARG}
;;
d)
DBMS=${OPTARG}
;;
i)
MARIADB_VERSION=${OPTARG}
if ! [[ ${MARIADB_VERSION} =~ ^(10.3|10.4|10.5|10.6|10.7|10.8|10.9|10.10|10.11|11.0|11.1)$ ]]; then
INVALID_OPTIONS+=("i ${OPTARG}")
fi
;;
j)
MYSQL_VERSION=${OPTARG}
if ! [[ ${MYSQL_VERSION} =~ ^(5.5|5.6|5.7|8.0)$ ]]; then
INVALID_OPTIONS+=("j ${OPTARG}")
fi
;;
k)
POSTGRES_VERSION=${OPTARG}
if ! [[ ${POSTGRES_VERSION} =~ ^(10|11|12|13|14|15)$ ]]; then
INVALID_OPTIONS+=("${OPTARG}")
fi
;;
p)
PHP_VERSION=${OPTARG}
if ! [[ ${PHP_VERSION} =~ ^(7.4|8.0|8.1|8.2|8.3)$ ]]; then
INVALID_OPTIONS+=("p ${OPTARG}")
fi
;;
e)
EXTRA_TEST_OPTIONS=${OPTARG}
;;
t)
CORE_VERSION=${OPTARG}
if ! [[ ${CORE_VERSION} =~ ^(11.5|12.4)$ ]]; then
INVALID_OPTIONS+=("t ${OPTARG}")
fi
;;
x)
PHP_XDEBUG_ON=1
;;
y)
PHP_XDEBUG_PORT=${OPTARG}
;;
o)
PHPUNIT_RANDOM="--random-order-seed=${OPTARG}"
;;
n)
CGLCHECK_DRY_RUN=1
;;
h)
loadHelp
echo "${HELP}"
exit 0
;;
u)
TEST_SUITE=update
;;
\?)
INVALID_OPTIONS+=("${OPTARG}")
;;
:)
INVALID_OPTIONS+=("${OPTARG}")
;;
esac
done
# Exit on invalid options
if [ ${#INVALID_OPTIONS[@]} -ne 0 ]; then
echo "Invalid option(s):" >&2
for I in "${INVALID_OPTIONS[@]}"; do
echo "-"${I} >&2
done
echo >&2
echo "call \".Build/Scripts/runTests.sh -h\" to display help and valid options"
exit 1
fi
COMPOSER_ROOT_VERSION="3.0.x-dev"
HOST_UID=$(id -u)
USERSET=""
if [ $(uname) != "Darwin" ]; then
USERSET="--user $HOST_UID"
fi
# Go to the directory this script is located, so everything else is relative
# to this dir, no matter from where this script is called, then go up two dirs.
THIS_SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)"
cd "$THIS_SCRIPT_DIR" || exit 1
cd ../../ || exit 1
ROOT_DIR="${PWD}"
# Create .cache dir: composer need this.
mkdir -p .cache
mkdir -p .Build/Web/typo3temp/var/tests
PHPSTAN_CONFIG_FILE="phpstan.neon"
IMAGE_PREFIX="docker.io/"
# Non-CI fetches TYPO3 images (php and nodejs) from ghcr.io
TYPO3_IMAGE_PREFIX="ghcr.io/"
CONTAINER_INTERACTIVE="-it --init"
IS_CORE_CI=0
# ENV var "CI" is set by gitlab-ci. We use it here to distinct 'local' and 'CI' environment.
if [ "${CI}" == "true" ]; then
IS_CORE_CI=1
# In CI, we need to pull images from docker.io for the registry proxy to kick in.
TYPO3_IMAGE_PREFIX="docker.io/"
IMAGE_PREFIX=""
CONTAINER_INTERACTIVE=""
fi
IMAGE_PHP="${TYPO3_IMAGE_PREFIX}typo3/core-testing-$(echo "php${PHP_VERSION}" | sed -e 's/\.//'):latest"
IMAGE_ALPINE="${IMAGE_PREFIX}alpine:3.8"
IMAGE_DOCS="ghcr.io/t3docs/render-documentation:latest"
IMAGE_SELENIUM="${IMAGE_PREFIX}selenium/standalone-chrome:4.0.0-20211102"
IMAGE_MARIADB="${IMAGE_PREFIX}mariadb:${MARIADB_VERSION}"
IMAGE_MYSQL="${IMAGE_PREFIX}mysql:${MYSQL_VERSION}"
IMAGE_POSTGRES="${IMAGE_PREFIX}postgres:${POSTGRES_VERSION}-alpine"
# Detect arm64 and use a seleniarm image.
# In a perfect world selenium would have a arm64 integrated, but that is not on the horizon.
# So for the time being we have to use seleniarm image.
ARCH=$(uname -m)
if [ ${ARCH} = "arm64" ]; then
IMAGE_SELENIUM="${IMAGE_PREFIX}seleniarm/standalone-chromium:4.1.2-20220227"
echo "Architecture" ${ARCH} "requires" ${IMAGE_SELENIUM} "to run acceptance tests."
fi
# Set $1 to first mass argument, this is the optional test file or test directory to execute
shift $((OPTIND - 1))
TEST_FILE=${1}
SUFFIX=$(echo $RANDOM)
NETWORK="kanow-operations-${SUFFIX}"
${CONTAINER_BIN} network create ${NETWORK} >/dev/null
CONTAINER_COMMON_PARAMS="${CONTAINER_INTERACTIVE} --rm --network $NETWORK --add-host "host.docker.internal:host-gateway" $USERSET -v ${ROOT_DIR}:${ROOT_DIR} -w ${ROOT_DIR}"
CONTAINER_DOCS_PARAMS="${CONTAINER_INTERACTIVE} --rm $USERSET -v ${ROOT_DIR}:/PROJECT -v ${ROOT_DIR}/Documentation-GENERATED-temp:/RESULT -w ${ROOT_DIR}"
if [ ${PHP_XDEBUG_ON} -eq 0 ]; then
XDEBUG_MODE="-e XDEBUG_MODE=off"
XDEBUG_CONFIG=" "
else
XDEBUG_MODE="-e XDEBUG_MODE=debug -e XDEBUG_TRIGGER=foo"
XDEBUG_CONFIG="client_port=${PHP_XDEBUG_PORT} client_host=host.docker.internal"
fi
# Suite execution
case ${TEST_SUITE} in
cgl)
if [ "${CGLCHECK_DRY_RUN}" -eq 1 ]; then
COMMAND="composer ci:php:cs-fixer"
else
COMMAND="composer fix:php:cs"
fi
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-command-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
clean)
cleanCacheFiles
cleanRenderedDocumentationFiles
cleanTestFiles
;;
cleanCache)
cleanCacheFiles
;;
cleanRenderedDocumentation)
cleanRenderedDocumentationFiles
;;
cleanTests)
cleanTestFiles
;;
composer)
COMMAND="composer \"$@\""
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-command-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
composerInstall)
COMMAND="composer install"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-install-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
composerInstallMax)
COMMAND="composer config --unset platform.php; composer require --no-ansi --no-interaction --no-progress --no-install typo3/cms-core:"^${CORE_VERSION}"; composer update --no-progress --no-interaction; composer dumpautoload; composer show"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-install-max-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
composerInstallMin)
COMMAND="composer config platform.php ${PHP_VERSION}.0; composer require --no-ansi --no-interaction --no-progress --no-install typo3/cms-core:"^${CORE_VERSION}"; composer update --prefer-lowest --no-progress --no-interaction; composer dumpautoload; composer show"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-install-min-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
composerNormalize)
COMMAND="composer ci:composer:normalize"
if [ "${CGLCHECK_DRY_RUN}" -eq 1 ]; then
COMMAND="composer ci:composer:normalize"
else
COMMAND="composer fix:composer:normalize"
fi
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-normalize-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_DOCS} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
docsGenerate)
# @todo contact the documentation team for a future rootles podman version
${CONTAINER_BIN} run --rm ${IMAGE_DOCS} show-shell-commands > generate-documentation.sh
echo 'dockrun_t3rd makehtml' >> generate-documentation.sh
bash generate-documentation.sh
rm -Rf generate-documentation.sh
SUITE_EXIT_CODE=$?
;;
functional)
[ -z "${TEST_FILE}" ] && TEST_FILE="Tests/Functional"
handleDbmsAndDriverOptions
COMMAND=".Build/bin/phpunit -c .Build/vendor/typo3/testing-framework/Resources/Core/Build/FunctionalTests.xml --exclude-group not-${DBMS} ${EXTRA_TEST_OPTIONS} ${TEST_FILE}"
case ${DBMS} in
mariadb)
echo "Using driver: ${DATABASE_DRIVER}"
${CONTAINER_BIN} run --name mariadb-func-${SUFFIX} --network ${NETWORK} -d -e MYSQL_ROOT_PASSWORD=funcp --tmpfs /var/lib/mysql/:rw,noexec,nosuid ${IMAGE_MARIADB} >/dev/null
waitFor mariadb-func-${SUFFIX} 3306
CONTAINERPARAMS="-e typo3DatabaseDriver=${DATABASE_DRIVER} -e typo3DatabaseName=func_test -e typo3DatabaseUsername=root -e typo3DatabaseHost=mariadb-func-${SUFFIX} -e typo3DatabasePassword=funcp"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name functional-${SUFFIX} ${XDEBUG_MODE} -e XDEBUG_CONFIG="${XDEBUG_CONFIG}" ${CONTAINERPARAMS} ${IMAGE_PHP} ${COMMAND}
SUITE_EXIT_CODE=$?
;;
mysql)
echo "Using driver: ${DATABASE_DRIVER}"
${CONTAINER_BIN} run --name mysql-func-${SUFFIX} --network ${NETWORK} -d -e MYSQL_ROOT_PASSWORD=funcp --tmpfs /var/lib/mysql/:rw,noexec,nosuid ${IMAGE_MYSQL} >/dev/null
waitFor mysql-func-${SUFFIX} 3306
CONTAINERPARAMS="-e typo3DatabaseDriver=${DATABASE_DRIVER} -e typo3DatabaseName=func_test -e typo3DatabaseUsername=root -e typo3DatabaseHost=mysql-func-${SUFFIX} -e typo3DatabasePassword=funcp"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name functional-${SUFFIX} ${XDEBUG_MODE} -e XDEBUG_CONFIG="${XDEBUG_CONFIG}" ${CONTAINERPARAMS} ${IMAGE_PHP} ${COMMAND}
SUITE_EXIT_CODE=$?
;;
postgres)
${CONTAINER_BIN} run --name postgres-func-${SUFFIX} --network ${NETWORK} -d -e POSTGRES_PASSWORD=funcp -e POSTGRES_USER=funcu --tmpfs /var/lib/postgresql/data:rw,noexec,nosuid ${IMAGE_POSTGRES} >/dev/null
waitFor postgres-func-${SUFFIX} 5432
CONTAINERPARAMS="-e typo3DatabaseDriver=pdo_pgsql -e typo3DatabaseName=bamboo -e typo3DatabaseUsername=funcu -e typo3DatabaseHost=postgres-func-${SUFFIX} -e typo3DatabasePassword=funcp"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name functional-${SUFFIX} ${XDEBUG_MODE} -e XDEBUG_CONFIG="${XDEBUG_CONFIG}" ${CONTAINERPARAMS} ${IMAGE_PHP} ${COMMAND}
SUITE_EXIT_CODE=$?
;;
sqlite)
# create sqlite tmpfs mount typo3temp/var/tests/functional-sqlite-dbs/ to avoid permission issues
mkdir -p "${ROOT_DIR}/typo3temp/var/tests/functional-sqlite-dbs/"
CONTAINERPARAMS="-e typo3DatabaseDriver=pdo_sqlite --tmpfs ${ROOT_DIR}/typo3temp/var/tests/functional-sqlite-dbs/:rw,noexec,nosuid,uid=${HOST_UID}"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name functional-${SUFFIX} ${XDEBUG_MODE} -e XDEBUG_CONFIG="${XDEBUG_CONFIG}" ${CONTAINERPARAMS} ${IMAGE_PHP} ${COMMAND}
SUITE_EXIT_CODE=$?
;;
esac
;;
lintTypoScript)
COMMAND="composer ci:ts:lint"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-command-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
lintPhp)
COMMAND="composer ci:php:lint"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-command-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
lintJson)
COMMAND="composer ci:json:lint"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-command-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
lintYaml)
COMMAND="composer ci:yaml:lint"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-command-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
phpstan)
COMMAND="composer ci:php:stan"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-command-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
phpstanGenerateBaseline)
COMMAND="composer phpstan:baseline"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name composer-command-${SUFFIX} -e COMPOSER_CACHE_DIR=.cache/composer -e COMPOSER_ROOT_VERSION=${COMPOSER_ROOT_VERSION} ${IMAGE_PHP} /bin/sh -c "${COMMAND}"
SUITE_EXIT_CODE=$?
;;
unit)
[ -z "${TEST_FILE}" ] && TEST_FILE="Tests/Unit"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name unit-${SUFFIX} ${XDEBUG_MODE} -e XDEBUG_CONFIG="${XDEBUG_CONFIG}" ${IMAGE_PHP} .Build/bin/phpunit -c .Build/vendor/typo3/testing-framework/Resources/Core/Build/UnitTests.xml ${EXTRA_TEST_OPTIONS} ${TEST_FILE}
SUITE_EXIT_CODE=$?
;;
unitRandom)
[ -z "${TEST_FILE}" ] && TEST_FILE="Tests/Unit"
${CONTAINER_BIN} run ${CONTAINER_COMMON_PARAMS} --name unit-random-${SUFFIX} ${XDEBUG_MODE} -e XDEBUG_CONFIG="${XDEBUG_CONFIG}" ${IMAGE_PHP} .Build/bin/phpunit -c .Build/vendor/typo3/testing-framework/Resources/Core/Build/UnitTests.xml --order-by=random ${EXTRA_TEST_OPTIONS} ${PHPUNIT_RANDOM} ${TEST_FILE}
SUITE_EXIT_CODE=$?
;;
update)
# prune unused, dangling local volumes
echo "> prune unused, dangling local volumes"
${CONTAINER_BIN} volume ls -q -f driver=local -f dangling=true | awk '$0 ~ /^[0-9a-f]{64}$/ { print }' | xargs -I {} ${CONTAINER_BIN} volume rm {}
echo ""
# pull typo3/core-testing-*:latest versions of those ones that exist locally
echo "> pull ${TYPO3_IMAGE_PREFIX}core-testing-*:latest versions of those ones that exist locally"
${CONTAINER_BIN} images ${TYPO3_IMAGE_PREFIX}core-testing-*:latest --format "{{.Repository}}:latest" | xargs -I {} ${CONTAINER_BIN} pull {}
echo ""
# remove "dangling" typo3/core-testing-* images (those tagged as <none>)
echo "> remove \"dangling\" ${TYPO3_IMAGE_PREFIX}core-testing-* images (those tagged as <none>)"
${CONTAINER_BIN} images ${TYPO3_IMAGE_PREFIX}core-testing-* --filter "dangling=true" --format "{{.ID}}" | xargs -I {} ${CONTAINER_BIN} rmi {}
echo ""
;;
*)
loadHelp
echo "Invalid -s option argument ${TEST_SUITE}" >&2
echo >&2
echo "${HELP}" >&2
exit 1
;;
esac
cleanUp
# Print summary
echo "" >&2
echo "###########################################################################" >&2
echo "Result of ${TEST_SUITE}" >&2
if [[ ${IS_CORE_CI} -eq 1 ]]; then
echo "Environment: CI" >&2
else
echo "Environment: local" >&2
fi
echo "PHP: ${PHP_VERSION}" >&2
echo "TYPO3: ${CORE_VERSION}" >&2
if [[ ${TEST_SUITE} =~ ^functional$ ]]; then
case "${DBMS}" in
mariadb)
echo "DBMS: ${DBMS} version ${MARIADB_VERSION} driver ${DATABASE_DRIVER}" >&2
;;
mysql)
echo "DBMS: ${DBMS} version ${MYSQL_VERSION} driver ${DATABASE_DRIVER}" >&2
;;
postgres)
echo "DBMS: ${DBMS} version ${POSTGRES_VERSION}" >&2
;;
sqlite)
echo "DBMS: ${DBMS}" >&2
;;
esac
fi
if [[ ${SUITE_EXIT_CODE} -eq 0 ]]; then
echo "SUCCESS" >&2
else
echo "FAILURE" >&2
fi
echo "###########################################################################" >&2
echo "" >&2
# Exit with code of test suite - This script return non-zero if the executed test failed.
exit $SUITE_EXIT_CODE
| true
|
1d0ee901299acce0c9d143e07857ebe8b2442afd
|
Shell
|
yithian/yuggoth_ansible
|
/roles/webserver/files/nextcloud/set-nc-perms
|
UTF-8
| 1,619
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# from https://docs.nextcloud.org/server/9/admin_manual/installation/installation_wizard.html#strong-perms-label
# run with the argument "runtime" to set the proper runtime permissions
# run with the argument "upgrade" to set the proper upgrade permissions
if [ -d "$2/apps" ]; then
ncpath="$2"
else
ncpath='/usr/local/share/nextcloud'
fi
htuser='http'
htgroup='http'
rootuser='root'
runtime() {
printf "Creating possible missing Directories\n"
mkdir -p $ncpath/data
mkdir -p $ncpath/updater
printf "chmod Files and Directories\n"
find ${ncpath}/ -type f -print0 | xargs -0 chmod 0640
find ${ncpath}/ -type d -print0 | xargs -0 chmod 0750
printf "chown Directories\n"
chown -R ${rootuser}:${htgroup} ${ncpath}/
chown -R ${htuser}:${htgroup} ${ncpath}/apps/
chown -R ${htuser}:${htgroup} ${ncpath}/config/
chown -R ${htuser}:${htgroup} ${ncpath}/data/
chown -R ${htuser}:${htgroup} ${ncpath}/themes/
chown -R ${htuser}:${htgroup} ${ncpath}/updater/
chmod +x ${ncpath}/occ
printf "chmod/chown .htaccess\n"
if [ -f ${ncpath}/.htaccess ]
then
chmod 0664 ${ncpath}/.htaccess
chown ${rootuser}:${htgroup} ${ncpath}/.htaccess
fi
if [ -f ${ncpath}/data/.htaccess ]
then
chmod 0664 ${ncpath}/data/.htaccess
chown ${rootuser}:${htgroup} ${ncpath}/data/.htaccess
fi
printf "chmod/chown .user.ini\n"
if [ -f ${ncpath}/.user.ini ]
then
chmod 0664 ${ncpath}/.user.ini
chown ${rootuser}:${htgroup} ${ncpath}/.htaccess
fi
}
upgrade() {
printf "Setting upgrade permissions\n"
chown -R ${htuser}:${htgroup} ${ncpath}
}
$1
| true
|
675ad95d52b33c8caf911780878d3379d1c1541c
|
Shell
|
toyhammered/hummingbird
|
/scripts/travis/install
|
UTF-8
| 476
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
install_server_deps() {
(
cd server
bundle install --deployment --jobs=3 --retry=3
)
}
install_client_deps() {
(
cd client
travis_retry npm install -g bower
travis_retry npm install phantomjs-prebuilt
travis_retry npm install
travis_retry bower install
)
}
if [ $TESTGROUP = 'client' ]; then
install_client_deps
elif [ $TESTGROUP = 'server' ]; then
install_server_deps
else
install_client_deps
install_server_deps
fi
| true
|
1dee0ca1bb217bbe3a29199d67ba1075d9af0fa2
|
Shell
|
ysfiqbl/Sheldon
|
/util/String.sh
|
UTF-8
| 4,065
| 3.9375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
################################################################################
# Sheldon: The not-so-bashful Bash framework. Bazinga!
#
# @namespace Sheldon::Util::String
# @copyright Copyright 2015, Housni Yakoob (http://housni.org)
# @license http://opensource.org/licenses/bsd-license.php The BSD License
################################################################################
################################################################################
# Join string arguments with a glue string.
#
# ### Usage
#
# ```
# use Sheldon::Util::String as String
#
# declare NAME
# $String::join =NAME ' & ' Amy Sheldon
# echo "${NAME} = ShAmy"
# ```
# The above will yield 'Amy & Sheldon = ShAmy'.
#
# You can provide as many string arguments as you want to be joined:
# ```
# $String::join =NAME '/' Raj Howard Sheldon Leonard
# ```
# The above will yield 'Raj/Howard/Sheldon/Leonard'.
#
# @param string $1
# The return key prefixed with '='.
# @param string $2
# The glue that joins the strings together.
# @param array $3
# The string to join with $2. You can provide as many arguments as you want.
# @assign
# Join strings in $3 with the glue string, $2.
################################################################################
Sheldon::Util::String::join() {
local assign
local trim
local glue
local joined
glue="$2"
assign="$1"
trim="${#glue}"
if [[ "$glue" == *"%"* ]]
then
# We need to escape '%' since we are using printf.
glue=${glue//%/%%}
fi
shift 2
joined="$( printf "${glue}%s" "$@" )"
joined="${joined:$trim}"
_assign "$assign" "$joined"
}
################################################################################
# Replaces variable placeholders inside a string with any given data. Each key
# in the `$data` array corresponds to a variable placeholder name in `$str`.
#
# The placeholder keys must be of the form `{:my_placeholder_value}`.
#
# ### Usage
#
# ```
# use Sheldon::Util::String as String
#
# declare DIR
# declare STRING
# declare -A DATA
#
# DATA=(
# ['client']='Jones'
# ['domain']='example.org'
# ['filename']='backup.sql'
# );
# STRING='/var/www/{:client}/{:domain}/backup/database/{:filename}'
# $String::insert =DIR "${STRING}" DATA
# echo "${DIR}"
# ```
# `${DIR}` will now be: /var/www/Jones/example.org/backup/database/backup.sql
#
# You can also specify the before and after strings:
#
# ```
# use Sheldon::Util::String as String
#
# declare DIR
# declare STRING
# declare -A DATA
#
# DATA=(
# ['client']='Jones'
# ['domain']='example.org'
# ['filename']='backup.sql'
# );
# STRING='/var/www/<?client?>/<?domain?>/backup/database/<?filename?>'
# $String::insert -b '<?' -a '?>' =DIR "${STRING}" DATA
# echo "${DIR}"
# ```
# `${DIR}` will still be: /var/www/Jones/example.org/backup/database/backup.sql
#
# @param string $before -b optional
# The string in front of the name of the variable place-holder. This
# defaults to `'{:'`.
# @param string $after -a optional
# The string after the name of the variable place-holder. Defaults to `'}'`.
# @param string $1
# The return key prefixed with '='.
# @param string $2
# A string containing variable placeholders.
# @param array $3
# An associate array where each key stands for a placeholder variable name
# to be replaced with a value.
# @assign
# $2 replaced with all the placeholders in $3.
################################################################################
Sheldon::Util::String::insert() {
local str
local -n Sheldon_string_data
local before
local after
local index
before='{:'
after='}'
index=''
# Overwrite with values that are passed in.
while getopts :a:b: index; do
case "${index}" in
a)
after="${OPTARG}"
;;
b)
before="${OPTARG}"
;;
esac
done
shift $(( OPTIND - 1 ))
index=''
str="$2"
Sheldon_string_data="$3"
for index in "${!Sheldon_string_data[@]}"; do
str=${str//"${before}""${index}""${after}"/"${Sheldon_string_data[$index]}"}
done
_assign "$1" "$str"
}
| true
|
73f21f44daa1d437d5fa163fc7e8f68e954167c0
|
Shell
|
kruzalus/repo123
|
/install_nginx.sh
|
UTF-8
| 1,156
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Helpers
function scriptInfo {
echo "-->"
echo "--> $1"
echo "-->"
}
function actionInfo {
echo -n "--> $1"
}
function doneInfo {
echo "--> ...done"
}
# Provisioning
scriptInfo "Provision-script: install_nginx.sh, user: `whoami`"
actionInfo "Updating repositories"
sudo apt-get update
doneInfo
actionInfo "Install nginx"
DEBIAN_FRONTEND=noninteractive apt-get install -y nginx
doneInfo
if ! [ -L /var/www ]; then
rm -rf /var/www
ln -fs /vagrant /var/www
fi
# Настраиваем фаервол на работу с Nginx, пока только по HTTP
actionInfo "Setting up ufw 80"
sudo ufw allow 80
doneInfo
actionInfo "Setting up ufw HTTP"
sudo ufw allow 'Nginx HTTP'
doneInfo
# ХЗ что из этого лучше делать, но если не делать 22, то походу не залогиниться по SSH потом??
actionInfo "Setting up ufw 22"
sudo ufw allow 22
doneInfo
actionInfo "Setting up ufw OpenSSH"
sudo ufw allow 'OpenSSH'
doneInfo
# Включаем фаервол только после настройки
actionInfo "Enable ufw"
yes Y | sudo ufw enable
doneInfo
| true
|
79470c49bb6b15c1fd0cceaa841afb107be7d1b9
|
Shell
|
kay0822/gen_iso
|
/3tos/run.sh
|
UTF-8
| 9,771
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
BASE_DIR="/home/gen_iso/3tos"
ISO_DIR="/home/iso"
BASE_ISO="${ISO_DIR}/CentOS-6.3-x86_64-bin-DVD1.iso"
USE_DEBUG=true
. ${BASE_DIR}/config.sh
function usage(){
cat <<EOF
Usage: $0 -P version -S version [options]
-P, --protocol version Server [P]rotocal version
-S, --server version [S]erver(python.tar.gz) version
[options]:
-I, --image [image] Specify the base [I]mage, example: -I"/home/iso/abc.iso" (without space!!!)
-v, --verbose Print [v]erbose information
-t, --test Run for [t]est, output will be put in fortest/
-h, --help Display this [h]elp usage
--skip-copy skip coping packages, better to use --skip-cp-a-repo
--skip-repo skip create reposite, better to use --skip-cp-a-repo
--skip-cp-a-repo combine --skip-copy and --skip-repo
--skip-partition skip the partitioning during kick-start installation
EOF
exit 0
}
#PARAM_SERVER_PROTO_VERSION="1.4.1.3"
PARAM_SERVER_PROTO_VERSION=
#PARAM_PYTHON_SERVER_VERSION="1.3.1"
PARAM_PYTHON_SERVER_VERSION=
PARAM_VERBOSE="/dev/null"
PARAM_SKIP_PARTITIONING="\1"
PARAM_PARTITION_LABEL=
PARAM_SKIP_COPY=false
PARAM_SKIP_REPO=false
PARAM_BASE_ISO=
IS_v_SET=false
IS_P_SET=false
IS_S_SET=false
IS_t_SET=false
IS_I_SET=false
args=`getopt -o :P:S:I::vth -l protocol:,server:,image::,verbose,test,help,skip-partition,skip-copy,skip-repo,skip-cp-a-repo -- "$@"`
eval set -- "$args"
while true; do
case $1 in
-P|--protocol)
IS_P_SET=true
PARAM_SERVER_PROTO_VERSION="$2"
if ! [[ "${PARAM_SERVER_PROTO_VERSION}" =~ [0-9]+\.[0-9]+\.[0-9]+\.[0-9]+ ]];then
ERROR "invalid protocol version: %s, example: 1.4.1.3\n" "$2"
fi
shift
;;
-S|--server)
IS_S_SET=true
PARAM_PYTHON_SERVER_VERSION="$2"
if ! [[ "${PARAM_PYTHON_SERVER_VERSION}" =~ [0-9]+\.[0-9]+\.[0-9]+ ]];then
ERROR "invalid server version: %s, example: 1.3.1\n" "$2"
fi
shift
;;
-I|--image)
IS_I_SET=true
case $2 in
"")
echo "Images:"
echo "------------------------------"
ISOs=(`ls ${ISO_DIR}/*.iso`)
iso_count=${#ISOs[@]}
for((i = 0; i < ${iso_count} ; i++)){
printf "%3d\t%s\n" "$i" "${ISOs[$i]}"
}
echo "------------------------------"
echo -n "choose images above: "
read opt
if ! [[ "$opt" =~ ^[0-9]{1,3}$ ]] || ! [ $opt -lt ${iso_count} ];then
ERROR "invalid option: %s\n" "$opt"
fi
PARAM_BASE_ISO=${ISOs[$opt]}
;;
*)
if [ ! -e "$2" ];then
ERROR "%s not exists\n" "$2"
elif ! [[ "$2" =~ .+\.iso ]];then
WARN "%s not end with .iso\n" "$2"
PARAM_BASE_ISO="$2"
else
PARAM_BASE_ISO="$2"
fi
;;
esac
INFO "base image -> %s\n" ${PARAM_BASE_ISO}
shift
;;
-v|--verbose)
IS_v_SET=true
PARAM_VERBOSE="/dev/stdout"
;;
-t|--test)
IS_t_SET=true
;;
-h|--help)
usage
;;
--skip-copy)
PARAM_SKIP_COPY=true
;;
--skip-repo)
PARAM_SKIP_REPO=true
;;
--skip-cp-a-repo)
PARAM_SKIP_COPY=true
PARAM_SKIP_REPO=true
;;
--skip-partition)
PARAM_SKIP_PARTITIONING="clearpart --all\nautopart"
PARAM_PARTITION_LABEL="-skip-partition"
;;
--)
shift
break
;;
*)
ERROR "Unknown option -> %s\n" $1
;;
esac
shift
done
if ! ( ${IS_P_SET} && ${IS_S_SET} ) ; then
usage
fi
if ${IS_I_SET}; then
BASE_ISO=${PARAM_BASE_ISO}
fi
###########
## Init ##
###########
CURRENT_DATE=`date +%F_%H-%M-%S`
#############
## Mount ##
#############
umount -d ${ROM_DIR} 2>/dev/null
umount -d ${ROM_DIR} 2>/dev/null
if [ ! -e "${ROM_DIR}" ];then
echo "Error: ${ROM_DIR} not exists!"
exit -1
fi
mount -o loop ${BASE_ISO} ${ROM_DIR}
mkdir -p ${BUILD_DIR}
if ${PARAM_SKIP_COPY};then
INFO "remove skipped\n"
else
rm -rf ${BUILD_DIR}/*
fi
########################
## Generate Configs ##
########################
INFO "generate configure files\n"
TMP_PACKAGE_LIST="/tmp/packages.list.tmp"
TMP_KS_CFG="/tmp/ks.cfg.tmp"
awk -F "Installing" '{print $2}' ${INSTALL_LOG} |sed -e '/^$/d' -e 's/^ //g' > ${TMP_PACKAGE_LIST}
cat ${CUSTOMIZE_KS_POST} > ${TMP_KS_CFG}
#----- generate ------
cat ${ANACONDA_KS_CFG} | sed -e 's/--onboot.*--bootproto/--onboot yes --bootproto/' \
-e 's/^selinux --enforcing/selinux --permissive/' \
-e 's/^repo.*--baseurl=cdrom.*//' \
-e "s/\(^#clearpart .*\)/${PARAM_SKIP_PARTITIONING}/" \
-e 's/^%end$//' > ${KS_CFG}
echo "# extra packages " >> ${KS_CFG}
for item in ${EXTRA_PACKAGES}; do
echo ${item} | awk -F':' '{print $1}' >> ${KS_CFG}
done
echo -e "\n\n\n" >> ${KS_CFG}
cat ${TMP_KS_CFG} | sed -e "s/{SERVER_PROTO_VERSION}/${PARAM_SERVER_PROTO_VERSION}/" \
-e "s/{LICENSE_DEST_DIR}/${LICENSE_DEST_DIR_REGEX}/" \
-e "s/{PROTO_PYC_DIR}/${PROTO_PYC_DIR_REGEX}/" \
-e "s/{BIN_DIR}/${BIN_REGEX}/" >> ${KS_CFG}
echo "%end" >> ${KS_CFG}
#######################
## Copy CD/DVD ROM ##
#######################
COPY_COUNT=0
function doCopy(){
__src=$1
__dst=$2
cp -rf ${__src} ${__dst}
COPY_COUNT=$(( ${COPY_COUNT}+1 ))
if ${IS_v_SET} ;then
printf "%05d: %s\n>>>>>> %s\n" ${COPY_COUNT} ${__src} ${__dst}
fi
}
#export -f doCopy
if ${PARAM_SKIP_COPY};then
INFO "copy skipped\n"
else
INFO "coping files\n"
ls -a ${ROM_DIR} | grep -vE '^\.*$|^Packages$'|xargs -n 1 -I {} cp -rf ${ROM_DIR}/{} ${BUILD_DIR}/
mkdir -p ${BUILD_DIR}/Packages
for package in `cat ${TMP_PACKAGE_LIST}`;do
echo >/dev/null
doCopy ${ROM_DIR}/Packages/${package}* ${BUILD_DIR}/Packages/
done
for package in `ls -a ${CUSTOMIZE_PACKAGES_DIR} | grep -vE '^\.*$'`; do
doCopy ${CUSTOMIZE_PACKAGES_DIR}/${package} ${BUILD_DIR}/Packages/
done
INFO "copy extra package\n"
for item in ${EXTRA_PACKAGES}; do
pkgs=`echo ${item} | awk -F':' '{print $2}'| awk -F'|' '
{
for(i=1; i<=NF; i++){
printf("%s ", $i);
}
}
'`
for pkg in ${pkgs}; do
doCopy ${ROM_DIR}/Packages/${pkg} ${BUILD_DIR}/Packages/
done
done
INFO "copy protocol package\n"
#----- get protocol package ------
if [ ! -d ${CUSTOMIZE_PROTO_DIR}/${PARAM_SERVER_PROTO_VERSION} ];then
TMP_PWD=`pwd`
cd ${CUSTOMIZE_PROTO_DIR}
rm -rf ${CUSTOMIZE_PROTO_DIR}/${PARAM_SERVER_PROTO_VERSION}*
PROTO_PACKAGE_URL="${PROTO_PACKAGE_RELEASE_DIR_URL}/${PARAM_SERVER_PROTO_VERSION}.tbz2"
PROTO_PACKAGE_MD5_URL="${PROTO_PACKAGE_RELEASE_DIR_URL}/${PARAM_SERVER_PROTO_VERSION}.tbz2.md5"
FLAG=1
while [ ${FLAG} -eq 1 ];do
wget ${PROTO_PACKAGE_RELEASE_DIR_URL}/${PARAM_SERVER_PROTO_VERSION}.tbz2
wget ${PROTO_PACKAGE_RELEASE_DIR_URL}/${PARAM_SERVER_PROTO_VERSION}.tbz2.md5
X=`cat ${PARAM_SERVER_PROTO_VERSION}.tbz2.md5`
Y=`md5sum ${PARAM_SERVER_PROTO_VERSION}.tbz2 | awk '{print $1}'`
if [ "$X" == "$Y" ];then
FLAG=0
fi
done
tar jxvf ${PARAM_SERVER_PROTO_VERSION}.tbz2
cd ${TMP_PWD}
fi
for package in `ls -a ${CUSTOMIZE_PROTO_DIR}/${PARAM_SERVER_PROTO_VERSION} | grep -vE '^\.*$'`; do
doCopy ${CUSTOMIZE_PROTO_DIR}/${PARAM_SERVER_PROTO_VERSION}/${package} ${BUILD_DIR}/Packages/
done
# logrotate
doCopy ${CUSTOMIZE_DIR}/hbmanager ${BUILD_DIR}/Packages/
fi # /* DO_COPY */
############
## Repo ##
############
if ${PARAM_SKIP_REPO}; then
INFO "create repo skipped\n"
else
INFO "create repo\n"
TMP_REPOMD_XML="/tmp/repomd.xml.tmp"
TMP_REPO_GROUP_XML="/tmp/repo.group.xml.tmp"
cat ${REPOMD_XML} |awk '
BEGIN{
FLAG=0
}
{
if(/<data.*type=.*group/){
FLAG=1
}
if(FLAG){
print
}
}' > ${TMP_REPO_GROUP_XML}
createrepo -p -d --unique-md-filenames ${BUILD_DIR}/ > ${PARAM_VERBOSE}
#--- generate ---
cat ${REPOMD_XML} | sed '/<\/repomd>/d' > ${TMP_REPOMD_XML}
cat ${TMP_REPO_GROUP_XML} >> ${TMP_REPOMD_XML}
cat ${TMP_REPOMD_XML} > ${REPOMD_XML}
fi
####################
## isolinux.cfg ##
####################
TMP_ISOLINUX_CFG_FILE="/tmp/isolinux.cfg.tmp"
cat ${CUSTOMIZE_ISOLINUX_CFG_HEADER} > ${TMP_ISOLINUX_CFG_FILE}
cat ${ISOLINUX_CFG_FILE} | sed '1d' >> ${TMP_ISOLINUX_CFG_FILE}
cat ${CUSTOMIZE_ISOLINUX_CFG_KS} >> ${TMP_ISOLINUX_CFG_FILE}
cat ${CUSTOMIZE_ISOLINUX_CFG_FOOTER} >> ${TMP_ISOLINUX_CFG_FILE}
cat ${TMP_ISOLINUX_CFG_FILE} > ${ISOLINUX_CFG_FILE}
###############
## mkisofs ##
###############
INFO "generate iso\n"
COUNT=`autoIncrease`
#CENTOS_VERSION=`cd ${CENTOS_DIR} ; basename \`pwd -P\` `
#image_label="${CENTOS_VERSION}-${PARAM_PYTHON_SERVER_VERSION}-${PARAM_SERVER_PROTO_VERSION}_${CURRENT_DATE}"
#image_label=`basename ${BASE_ISO} | awk -F- '{printf("3tos%s",$2)}' ;echo "-${COUNT}-${CURRENT_DATE}"`
#output_name=`basename ${BASE_ISO} | awk -F- '{printf("%s",$1$2)}' ;echo "-${PARAM_PYTHON_SERVER_VERSION}-${PARAM_SERVER_PROTO_VERSION}_${CURRENT_DATE}${PARAM_PARTITION_LABEL}.iso"`
image_label=`basename ${BASE_ISO} | awk -F- '{printf("3tos%s",$2)}' ;echo "-V${COUNT}-${CURRENT_DATE}"`
output_name=`basename ${BASE_ISO} | awk -F- '{printf("%s",$1$2)}' ;echo "-V${COUNT}${PARAM_PARTITION_LABEL}-${PARAM_PYTHON_SERVER_VERSION}-${PARAM_SERVER_PROTO_VERSION}.iso"`
md5_name="${output_name}.md5"
if ${IS_t_SET};then
OUTPUT_DIR="${OUTPUT_DIR}/fortest"
mkdir -p ${OUTPUT_DIR}
fi
cd ${BUILD_DIR}
mkisofs -V ${image_label} -o ${OUTPUT_DIR}/${output_name} -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table -R -J -v -T ${BUILD_DIR} > ${PARAM_VERBOSE} 2>&1
md5sum ${OUTPUT_DIR}/${output_name} > ${OUTPUT_DIR}/${md5_name}
INFO "ISO -> %s\n" "${OUTPUT_DIR}/${output_name}"
###############
## CleanUp ##
###############
rm -f ${TMP_KS_CFG} ${TMP_PACKAGE_LIST} ${TMP_REPOMD_XML} ${TMP_REPO_GROUP_XML} ${TMP_ISOLINUX_CFG_FILE}
umount -d ${ROM_DIR} 2>/dev/null
| true
|
98ba56ecc628188fc1a4fb516991198fe66ca0da
|
Shell
|
yy-up/shell-script
|
/function/function2.sh
|
UTF-8
| 175
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
output(){
for (( num1 = 1; num1 <= 5; num1 ++ ))
do
echo -n "$num1 "
done
}
let "num2+=1"
while [ "$num2" -le 5 ]
do
output
echo ""
let "num2=num2+1"
done
| true
|
c27f2d6c276ed8ec5c79d67500401d7f2e8b5ccc
|
Shell
|
3nippo/RNN-example
|
/download.sh
|
UTF-8
| 283
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir src_repos
cd src_repos
input="../links"
while IFS= read -r line
do
git clone $line
done < "$input"
cd ..
find ./src_repos/ \( -name '*.cpp' -or -name '*.c' \) -exec file '{}' -i >> src_encs \;
python ./merge_src_files.py
# && sudo rm -r src_repos src_encs
| true
|
f438ccb9a048b30fca4050c5a5d64edee2f0ad13
|
Shell
|
TotalFreedomMC/TF-Scripts
|
/FreeOP Servers/stop.sh
|
UTF-8
| 599
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!
if screen -list | grep -q "TotalFreedom"; then
screen -X -S "TotalFreedom" stuff "stop^M"
sleep 5
if screen -list | grep -q "TotalFreedom"; then
screen -X -S "TotalFreedom" stuff "^C"
sleep 5
fi
if screen -list | grep -q "TotalFreedom"; then
screen -X -S "TotalFreedom" stuff "^C^C^C^C^C^C^C^C"
fi
if screen -list | grep -q "TotalFreedom"; then
echo "Graceful Shutdown Failed. Please run tf!kill"
else
echo "Server has been shut down succesfully."
fi
else
echo "The server is not running... Try starting it <3"
fi
| true
|
02c2e24946c9d343ea0be6bee967ff0917a15aa9
|
Shell
|
sutasu/dm
|
/qsub-wrapper/hash-ls.sh
|
UTF-8
| 3,401
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
#
#___INFO__MARK_BEGIN__
##########################################################################
#
# The Contents of this file are made available subject to the terms of
# the Sun Industry Standards Source License Version 1.2
#
# Sun Microsystems Inc., March, 2001
#
#
# Sun Industry Standards Source License Version 1.2
# =================================================
# The contents of this file are subject to the Sun Industry Standards
# Source License Version 1.2 (the "License"); You may not use this file
# except in compliance with the License. You may obtain a copy of the
# License at http://gridengine.sunsource.net/Gridengine_SISSL_license.html
#
# Software provided under this License is provided on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
# WITHOUT LIMITATION, WARRANTIES THAT THE SOFTWARE IS FREE OF DEFECTS,
# MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE, OR NON-INFRINGING.
# See the License for the specific provisions governing your rights and
# obligations concerning the Software.
#
# The Initial Developer of the Original Code is: Sun Microsystems, Inc.
#
# Copyright: 2001 by Sun Microsystems, Inc.
#
# All Rights Reserved.
#
##########################################################################
#___INFO__MARK_END__
#
# example for a load sensor script
#
# returns the number of logged in users as load value "nuser"
#
# Do not forget to add the "nuser" complex value to to your "host" complex
# and to add a load or suspend threshold to your queues.
#
# The line in the host complex should be defined as follows ("shortcut" and
# "requestable" can be defined differently)
#
# name shortcut type value relop requestable consumable default
# nuser nuser INT 0 >= NO NO 0
#
#
# Be careful: Load sensor scripts are started with root permissions.
# In an admin_user system euid=0 and uid=admin_user
#
PATH=/bin:/usr/bin
ARCH=`$SGE_ROOT/util/arch`
HOST=`$SGE_ROOT/utilbin/$ARCH/gethostname -name`
ls_log_file=/tmp/ls.dbg
#printenv
# uncomment this to log load sensor startup
#echo `date`:$$:I:load sensor `basename $0` started >> $ls_log_file
SGE_CACHE_DATA_DIR=%%SGE_CACHE_DATA_DIR%%
#mkdir -p $SGE_CACHE_DATA_DIR
cd $SGE_CACHE_DATA_DIR
SGE_COMPLEX_NAME=%%SGE_COMPLEX_NAME%%
end=false
while [ $end = false ]; do
# ----------------------------------------
# wait for an input
#
read input
result=$?
if [ $result != 0 ]; then
end=true
break
fi
if [ "$input" = "quit" ]; then
end=true
break
fi
# ----------------------------------------
# send mark for begin of load report
echo "begin"
# ----------------------------------------
# send load value arch
#
complex=
for dd in $(find * -maxdepth 0 -mindepth 0); do
if [ -z "$complex" ]; then
complex=$dd
else
complex="${complex},${dd}"
fi
done
echo "$HOST:$SGE_COMPLEX_NAME:$complex"
#$HOST:hash:$(find * -maxdepth 0 -mindepth 0 -printf "%f,")
#IN=$(echo *)
#if [ "$IN" != "*" ]; then
# echo "$HOST:$SGE_COMPLEX_NAME:${IN// /,}"
#fi
# ----------------------------------------
# send mark for end of load report
echo "end"
done
# uncomment this to log load sensor shutdown
#echo `date`:$$:I:load sensor `basename $0` exiting >> $ls_log_file
| true
|
8524cb9463a358b2d185348db2ff35defe1a9719
|
Shell
|
vacaly/Alink
|
/python/src/main/python/dev/integration_tests/pyflink_tests/run_pyflink_tests.sh
|
UTF-8
| 551
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -xe
SCRIPTPATH="$(
cd "$(dirname "$0")"
pwd -P
)"
PY_ROOT="$(
cd "$SCRIPTPATH/../../../"
pwd -P
)"
function setup_env() {
FLINK_HOME=$(python3 -c 'import pyflink;print(pyflink.__path__[0])')
rsync -Pav "$PY_ROOT"/pyalink/lib/alink_*.jar "$FLINK_HOME"/lib/
}
function clean_env() {
FLINK_HOME=$(python3 -c 'import pyflink;print(pyflink.__path__[0])')
rm -rf "$FLINK_HOME"/lib/alink_*.jar
}
trap clean_env EXIT
setup_env
for file in "$SCRIPTPATH"/*.py; do
echo "Testing $file..."
python3 "$file"
done
| true
|
329a45bcc38e228bc8bcf18d9abcf31ab895529f
|
Shell
|
pieceofr/bitmarkscript
|
/clean-bitmarkd-chain.sh
|
UTF-8
| 196
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
dir_bitmarkd_base="$HOME/.config/bitmarkd"
for i in 1 2 3 4 5 6
do
rm -r "${dir_bitmarkd_base}${i}/data"
echo "remove ${dir_bitmarkd_base}${i}/data"
done
echo "Remove all dirs"
| true
|
82b7c761d1d7b94e89a8c30871348482a8b00be1
|
Shell
|
tonyganch/dotfiles
|
/zsh/tmux.zsh
|
UTF-8
| 141
| 2.53125
| 3
|
[] |
no_license
|
alias t='tmux'
alias tls='tmux ls'
function ta() {tmux attach -t $1}
function tk() {tmux kill-session -t $1}
function tn() {tmux new -s $1}
| true
|
70493be44a0ecff429fc6f02a776e2dae3eafb3a
|
Shell
|
cometsong/rcfiles
|
/functions/extract_funcs.sh
|
UTF-8
| 1,064
| 3.671875
| 4
|
[] |
no_license
|
extract()
{
if [[ "x$1" == "x-h" || "x$1" == "x--help" || "x$1" == "x" ]]; then
echo "Usage: extract filename"
echo -n " filename is of type: "
echo "bz2, gz, rar, tar, tar.bz2, tbz2, tar.gz, tgz, zip, Z, 7z, xz, exe"
fi
if [ -f $1 ] ; then
case $1 in
*.tar.bz2|*.tbz2) tar xvjf $1 ;;
*.tar.gz|*.tgz) tar xvzf $1 ;;
*.tar.xz) tar xvJf $1 ;;
*.tar) tar xvf $1 ;;
*.bz2) bunzip2 $1 ;;
*.gz) gunzip $1 ;;
*.rar) unrar x $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*.xz) unxz $1 ;;
*.exe) cabextract $1 ;;
*) echo "'$1' cannot be extracted via >extract<" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
| true
|
99bdb82826d4201aadd4d964401ef251d71eac5f
|
Shell
|
ApolloTang/my-custom-scripts
|
/script-for-apps/forklift/forklift--open-new-window/forklift-new-window.app/Contents/MacOS/forklift-new-window.sh
|
UTF-8
| 1,256
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
#/ Open a new forklift window
set -e #/ Exit immediately if a command exits with a non-zero status.
(cat<<EOF
# AppleScript Start
# -------------------
(*
Author: https://github.com/ApolloTang
This script open a new forklift window
Also need to add some (or all) of the following 'Security & Privacy' setting:
Goto:
System Preferences -> Security & Privacy -> Accessibility
System Preferences -> Security & Privacy -> Input Monitoring
Add:
/System/Applications/Utilities/Script Editor.app
/System/Library/CoreServices/AppleScript Utility
/System/Library/CoreServices/System Events.app
/usr/local/bin/bash
/Applications/Setapp/ForkLift.app
Reference:
https://apple.stackexchange.com/questions/394275/com-automator-runner-xpc-is-not-allowed-to-send-keystrokes#
*)
if application "ForkLift" is not running then
# This will launch Forklift if it's not running
# (But it's not making it the frontmost/focused application)
activate application "ForkLift"
else
activate application "ForkLift"
tell application "System Events"
tell process "ForkLift"
keystroke "n" using {command down}
end tell
end tell
end if
# ^^^^^^^^^^^^^^^^^^^
# AppleScript End
EOF
) | osascript
#/ EOF ---
| true
|
2dbd95fded894c0d67c0d92f3ecdfb019cd3953a
|
Shell
|
bboozzoo/dotfiles
|
/config/sys/etc/profile.d/firefox-cache-tmpfs.sh
|
UTF-8
| 136
| 2.6875
| 3
|
[] |
no_license
|
RUN_USER_PATH="/run/user"
user_id=$(id -u)
if [[ -e $RUN_USER_PATH ]]; then
mkdir -p /run/user/$user_id/firefox-cache 2>/dev/null
fi
| true
|
5fc0c01e0d40b956a9b831258d00fd9084ff9a0e
|
Shell
|
nhs-digital-gp-it-futures/BuyingCatalogueService
|
/src/NHSD.BuyingCatalogue.Database.Deployment/entrypoint.sh
|
UTF-8
| 1,240
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# database port, defaults to mssqls default port.
PORT=${PORT:-1433}
SA_USERNAME=${SA_USERNAME:-sa}
# wait for MSSQL server to start
export STATUS=1
i=0
while [[ $STATUS -ne 0 ]] && [[ $i -lt 30 ]]; do
i=$i+1
sleep 1
/opt/mssql-tools/bin/sqlcmd -S $DB_SERVER,$PORT -t 1 -U $SA_USERNAME -P $SA_PASSWORD -Q "SELECT 1;" &>/dev/null
STATUS=$?
done
if [ $STATUS -ne 0 ]; then
echo "Error: MSSQL SERVER took more than thirty seconds to start up."
exit 1
fi
cd PreDeployment
/opt/mssql-tools/bin/sqlcmd -S $DB_SERVER,$PORT -U $SA_USERNAME -P $SA_PASSWORD -d $DB_NAME -I -i "PreDeployment.sql"
cd ..
/sqlpackage/sqlpackage /Action:publish /SourceFile:NHSD.BuyingCatalogue.Database.Deployment.dacpac /TargetServerName:$DB_SERVER,$PORT /TargetDatabaseName:$DB_NAME /TargetUser:$SA_USERNAME /TargetPassword:$SA_PASSWORD $SQLPACKAGEARGS
cd PostDeployment
/opt/mssql-tools/bin/sqlcmd -S $DB_SERVER,$PORT -U $SA_USERNAME -P $SA_PASSWORD -d $DB_NAME -I -i "PostDeployment.sql"
if [ "${INTEGRATION_TEST^^}" = "TRUE" ]; then
cd IntegrationTests
/opt/mssql-tools/bin/sqlcmd -S $DB_SERVER,$PORT -U $SA_USERNAME -P $SA_PASSWORD -d $DB_NAME -I -i "PostDeployment.sql"
fi
printf "\nDatabase setup complete\n"
| true
|
0dc785804cd0656ca843d901704c86bfa6af855a
|
Shell
|
joshukraine/middleman-gulp
|
/bin/setup
|
UTF-8
| 350
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
set -e
# Make sure Bundler is installed
if [ "$(gem query -i -n bundler)" = "false" ]; then
echo "Installing Bundler..."
gem install bundler
fi
# Set up Ruby dependencies via Bundler
echo "Installing Ruby gem dependencies..."
bundle install
# Install node packages
echo "Installing Node package dependencies..."
npm install
| true
|
385ec1ce3db4918e121db8d4e0c47d1db0502555
|
Shell
|
nsrCodes/cdd
|
/dist/mac-zsh/install.sh
|
UTF-8
| 741
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/zsh
SRC="./bin"
INSTALL_LOCAL="$HOME/bin/cdd"
mkdir -p $INSTALL_LOCAL
cp "${SRC}/cdd.sh" $INSTALL_LOCAL
cp "${SRC}/cdo.sh" $INSTALL_LOCAL
cp "${SRC}/gcdd" $INSTALL_LOCAL
unalias cdd 2>/dev/null
# checking if output of last command was error
# => alias was not present
if [ "$?" -ne 0 ]; then
echo "\n\n# cdd setup" >> ~/.zshrc
echo "\nexport PATH=\"\$PATH:${INSTALL_LOCAL}\"" >> ~/.zshrc
. ~/.zshrc
echo "alias cdd='. ${INSTALL_LOCAL}/cdd.sh'" >> ~/.zshrc
echo "alias cdo='. ${INSTALL_LOCAL}/cdo.sh'" >> ~/.zshrc
echo "alias gcdd='${INSTALL_LOCAL}/gcdd'" >> ~/.zshrc
# updating to -rwxr-xr-x in case that wasn't already the case
chmod -R 755 $INSTALL_LOCAL
else
echo 'Alias cdd is already present'
fi
. ~/.zshrc
| true
|
3ec5d813ce9e645849b399d6723e2f3eb3769894
|
Shell
|
t50504/CLASHanalyst
|
/suite/pir_bin/step4.sh
|
UTF-8
| 4,510
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#time bash step4.sh -i step3.csv -o step4.csv -r ~/pra/input/mRNA_sequence.csv -m 0 -b 1
shell_folder=$(cd "$(dirname "$0")";pwd)
. ${shell_folder}/framefunction.sh
. ${shell_folder}/filefunction.sh
# check whether use options or not
# ---------------------------------------
if [ $# -lt 1 ];then
echo "Try '$0 -h' for more information."
exit 1
fi
# default parameter
#-----------------------------------
mismatch=0
bflag=1
outfile=step4.csv
# -h option
function usage(){
cat << -EOF-
Usage:
$0 -i <input> -o <output> -r <reference> -m <mismatch> -b <build>
Options:
-h display this help and exit
-i input file(csv)
(need "remain0","remain_seq" column name)
-o output file(csv)
default step4.csv
-r reference file(csv)
(need "sequence" column name)
-m mismatch count(0,1,2)
default 0
-b 1 use bowtie build
0 not use bowtie build
default 1
-EOF-
exit 1
}
# get options
# ---------------------------------------
while getopts ":i:o:r:m:s:b:h" opt
do
case $opt in
h)
usage
;;
i)
infile="$(cd $(dirname $OPTARG);pwd)/$(basename $OPTARG)"
;;
o)
outfile="$(cd $(dirname $OPTARG);pwd)/$(basename $OPTARG)"
;;
r)
refile="$(cd $(dirname $OPTARG);pwd)/$(basename $OPTARG)"
;;
m)
mismatch=$OPTARG
;;
b)
bflag=$OPTARG
;;
*)
echo -e "$0: invalid option -- 'x'\nTry '$0 -h' for more information."
exit 1
;;
esac
done
if [ ! $infile ];then
echo "you need to input '-i <input>'"
exit 1
fi
if [ ! $refile ];then
echo "you need to input '-r <reference>'"
exit 1
fi
if [ $mismatch -gt 2 ];then
echo "you just can use mismatch 0,1,2"
exit 1
fi
# path/file
# ---------------------------------------
temp_path=$(dirname $outfile)
ref=${refile%.*}
inp=${infile%.*}
base_ref=$(basename $ref)
base_inp=$(basename $inp)
bowtie_path=${temp_path}/bowtieFile
id_file_in="${temp_path}/idFile/${base_inp}.csv"
id_file_ref="${temp_path}/idFile/${base_ref}.csv"
bowtie_extract=$bowtie_path/Reads_col2.bwt
merge_RNA=${temp_path}"/merge_"${base_ref}".csv"
# check file/directory exist
checkFile $infile
checkFile $refile
# check the \n format,if file from dos
# use dos2unix
checkNewline $infile
checkNewline $refile
# check dir exist or not
# if not,create one
createDir ${temp_path}/bowtieFile
createDir ${temp_path}/idFile
# transfer csv file to fasta file
# at same Dir
csvTofasta $refile transcript sequence
# for CPU
CPU_num
cpu_num=$?
echo -------------USE CPU number : ${cpu_num} ----------------
find_col remain0 $infile
remain_id_col=$?
find_col remain_seq $infile
remain_seq_col=$?
awk -F, -v id=$remain_id_col -v seq=$remain_seq_col 'NR==1 {next}{printf ">%s\n%s\n",$id,$seq}' $infile | sed "s/U/T/g" > ${temp_path}"/"${base_inp}"temp.fasta"
# create a id,seq,seq_len file
# replace sequence U to T
# at idFile/
addID $refile transcript $id_file_ref
# bowtie
echo "----------bowtie-----------------"
if [ "$bflag" = "1" ]
then
bowtie-build --threads $cpu_num $ref"temp.fasta" "${bowtie_path}/${base_ref}.fa" > /dev/null 2>&1
else
echo "you don't use bowtie-build"
fi
bowtie --threads $cpu_num -f -a -v $mismatch --norc ${bowtie_path}/${base_ref}".fa" ${temp_path}"/"${base_inp}"temp.fasta" $bowtie_path/Reads2.bwt > /dev/null 2>&1
# check the bowtie output is empty or not
declare -i line=0
line=$(cat $bowtie_path/Reads2.bwt|wc -l)
echo ------------------line $line
if [ $line -eq 0 ];then
echo ------not match any sequence---------
exit 1
fi
# process Reads.bwt
# ---------------------------------------
# Reads.bwt column : input_id,ref_id,pos,mismatch_count
echo "------------extract botwie output--------------------"
echo mismatch is $mismatch
echo "remain0,transcript0,rem_tran_target_pos,rem_tran_mismatch_count" > $bowtie_extract
if [ $mismatch -gt 0 ];then
awk -f ${shell_folder}/mismatch.awk $bowtie_path/Reads2.bwt >> $bowtie_extract
else
awk '{printf "%s,%s,%s-%s,0\n",$1,$3,$4+1,$4+length($6)}' $bowtie_path/Reads2.bwt >> $bowtie_extract
fi
echo "---------merge file-------------"
merge_csv $id_file_ref 1 $bowtie_extract 2 $merge_RNA
find_col remain0 $merge_RNA
RNA_remain_col=$?
echo aaa:$RNA_remain_col
merge_csv $infile $remain_id_col $merge_RNA $RNA_remain_col $outfile
rm ${temp_path}"/"${base_inp}"temp.fasta" ${ref}"temp.fasta" $merge_RNA
| true
|
3d6f38cf1840440a3ad7918ebe7fc5a11da9c687
|
Shell
|
joshdick/dotfiles
|
/.config/yadm/bootstrap
|
UTF-8
| 538
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/zsh
set -ue
# [yadm bootstrap script](https://yadm.io/docs/bootstrap)
# for [Josh Dick's dotfiles](https://github.com/joshdick/dotfiles)
# This script assumes that the `$HOME/.dotfiles_utils/install.sh` script
# has previously been run.
function heading() { echo -e "\e[1m\e[34m==>\e[39m $1\e[0m" }
if hash nvim &> /dev/null; then
heading "[nvim] Updating Neovim treesitter parsers..."
nvim -c 'TSUpdate | q'
fi
heading "[terminfo] Installing terminfo definitons..."
$HOME/.dotfiles_utils/terminfo-script/terminfo_install.sh
| true
|
82f391203deaacaae19c306e6752bb903d0a532f
|
Shell
|
haoqing0110/summary
|
/ocm/placement/demo/placement-strategy/demo.sh
|
UTF-8
| 1,582
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
########################
# include the magic
########################
. ../demo-magic.sh
########################
# Configure the options
########################
#
# speed at which to simulate typing. bigger num = faster
#
# TYPE_SPEED=20
#
# custom prompt
#
# see http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/bash-prompt-escape-sequences.html for escape sequences
#
DEMO_PROMPT="${GREEN}➜ ${CYAN}\W "
# text color
# DEMO_CMD_COLOR=$BLACK
# clean env
KUBECONFIG="/root/.kube/config"
clusteradm clusterset bind global --namespace default
#./delete.sh 310
#./create.sh 310
#./labels.sh 1 10 prod-canary-west=true
#./labels.sh 11 20 prod-canary-east=true
function play() {
pe "cat $1.yaml"
pe "kubectl apply -f $1.yaml"
pe "kubectl get placement $1 -ojson | jq '.status.decisionGroups[]'"
pe "kubectl get placementdecision -l cluster.open-cluster-management.io/placement=$1 -ojson | jq '.items[] | .metadata.name, .metadata.labels '"
}
clear
p "Placment strategy demo."
pe "clusteradm get clustersets -otable"
pe "kubectl get managedcluster -l prod-canary-west=true"
pe "kubectl get managedcluster -l prod-canary-east=true"
p "Case 1: placement has 2 decisionGroups defined."
play placement1
p "Case 2: placement has 2 decisionGroups defined, and want to group the rest of clusters with length 150."
play placement2
p "Case 3: placement does not have any decisionGroups defined."
play placement3
p "Case 4: placement does not have any decisionGroups defined, but want to group the clusters with length 150."
play placement4
pe "- end -"
clear
| true
|
f270f3751bb6b1d884df94982aeb6d321daca56e
|
Shell
|
fastmailops/prosody
|
/configure
|
UTF-8
| 8,709
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Defaults
PREFIX=/usr/local
SYSCONFDIR="$PREFIX/etc/prosody"
DATADIR="$PREFIX/var/lib/prosody"
LUA_SUFFIX=""
LUA_DIR="/usr"
LUA_BINDIR="/usr/bin"
LUA_INCDIR="/usr/include"
LUA_LIBDIR="/usr/lib"
IDN_LIB=idn
ICU_FLAGS="-licui18n -licudata -licuuc"
OPENSSL_LIB=crypto
CC=gcc
CXX=g++
LD=gcc
RUNWITH=lua
CFLAGS="-fPIC -Wall"
LDFLAGS="-shared"
IDN_LIBRARY=idn
# Help
show_help() {
cat <<EOF
Configure Prosody prior to building.
--help This help.
--ostype=OS Use one of the OS presets.
May be one of: debian, macosx, linux, freebsd
--prefix=DIR Prefix where Prosody should be installed.
Default is $PREFIX
--sysconfdir=DIR Location where the config file should be installed.
Default is \$PREFIX/etc/prosody
--datadir=DIR Location where the server data should be stored.
Default is \$PREFIX/var/lib/prosody
--lua-suffix=SUFFIX Versioning suffix to use in Lua filenames.
Default is "$LUA_SUFFIX" (lua$LUA_SUFFIX...)
--with-lua=PREFIX Use Lua from given prefix.
Default is $LUA_DIR
--runwith=BINARY What Lua binary to set as runtime environment.
Default is $RUNWITH
--with-lua-include=DIR You can also specify Lua's includes dir.
Default is \$LUA_DIR/include
--with-lua-lib=DIR You can also specify Lua's libraries dir.
Default is \$LUA_DIR/lib
--with-idn=LIB The name of the IDN library to link with.
Default is $IDN_LIB
--idn-library=(idn|icu) Select library to use for IDNA functionality.
idn: use GNU libidn (default)
icu: use ICU from IBM
--with-ssl=LIB The name of the SSL to link with.
Default is $OPENSSL_LIB
--cflags=FLAGS Flags to pass to the compiler
Default is $CFLAGS
--ldflags=FLAGS Flags to pass to the linker
Default is $LDFLAGS
--c-compiler=CC The C compiler to use when building modules.
Default is $CC
--linker=CC The linker to use when building modules.
Default is $LD
--require-config Will cause Prosody to refuse to run when
it fails to find a configuration file
EOF
}
while [ "$1" ]
do
value="`echo $1 | sed 's/[^=]*=\(.*\)/\1/'`"
if echo "$value" | grep -q "~"
then
echo
echo '*WARNING*: the "~" sign is not expanded in flags.'
echo 'If you mean the home directory, use $HOME instead.'
echo
fi
case "$1" in
--help)
show_help
exit 0
;;
--prefix=*)
PREFIX="$value"
PREFIX_SET=yes
;;
--sysconfdir=*)
SYSCONFDIR="$value"
SYSCONFDIR_SET=yes
;;
--ostype=*)
OSTYPE="$value"
OSTYPE_SET=yes
if [ "$OSTYPE" = "debian" ]; then
LUA_SUFFIX="5.1";
LUA_SUFFIX_SET=yes
RUNWITH="lua5.1"
LUA_INCDIR=/usr/include/lua5.1;
LUA_INCDIR_SET=yes
CFLAGS="$CFLAGS -D_GNU_SOURCE"
fi
if [ "$OSTYPE" = "macosx" ]; then
LUA_INCDIR=/usr/local/include;
LUA_INCDIR_SET=yes
LUA_LIBDIR=/usr/local/lib
LUA_LIBDIR_SET=yes
LDFLAGS="-bundle -undefined dynamic_lookup"
fi
if [ "$OSTYPE" = "linux" ]; then
LUA_INCDIR=/usr/local/include;
LUA_INCDIR_SET=yes
LUA_LIBDIR=/usr/local/lib
LUA_LIBDIR_SET=yes
CFLAGS="-Wall -fPIC -D_GNU_SOURCE"
LDFLAGS="-shared"
fi
if [ "$OSTYPE" = "freebsd" -o "$OSTYPE" = "openbsd" ]; then
LUA_INCDIR="/usr/local/include/lua51"
LUA_INCDIR_SET=yes
CFLAGS="-Wall -fPIC -I/usr/local/include"
LDFLAGS="-I/usr/local/include -L/usr/local/lib -shared"
LUA_SUFFIX="-5.1"
LUA_SUFFIX_SET=yes
LUA_DIR=/usr/local
LUA_DIR_SET=yes
fi
if [ "$OSTYPE" = "openbsd" ]; then
LUA_INCDIR="/usr/local/include";
fi
;;
--datadir=*)
DATADIR="$value"
DATADIR_SET=yes
;;
--require-config)
REQUIRE_CONFIG=yes
;;
--lua-suffix=*)
LUA_SUFFIX="$value"
LUA_SUFFIX_SET=yes
;;
--with-lua=*)
LUA_DIR="$value"
LUA_DIR_SET=yes
;;
--with-lua-include=*)
LUA_INCDIR="$value"
LUA_INCDIR_SET=yes
;;
--with-lua-lib=*)
LUA_LIBDIR="$value" LUA_LIBDIR_SET=yes
;;
--with-idn=*)
IDN_LIB="$value"
;;
--idn-library=*)
IDN_LIBRARY="$value"
;;
--with-ssl=*)
OPENSSL_LIB="$value"
;;
--cflags=*)
CFLAGS="$value"
;;
--ldflags=*)
LDFLAGS="$value"
;;
--c-compiler=*)
CC="$value"
;;
--linker=*)
LD="$value"
;;
--runwith=*)
RUNWITH="$value"
;;
*)
echo "Error: Unknown flag: $1"
exit 1
;;
esac
shift
done
if [ "$PREFIX_SET" = "yes" -a ! "$SYSCONFDIR_SET" = "yes" ]
then
if [ "$PREFIX" = "/usr" ]
then SYSCONFDIR=/etc/prosody
else SYSCONFDIR=$PREFIX/etc/prosody
fi
fi
if [ "$PREFIX_SET" = "yes" -a ! "$DATADIR_SET" = "yes" ]
then
if [ "$PREFIX" = "/usr" ]
then DATADIR=/var/lib/prosody
else DATADIR=$PREFIX/var/lib/prosody
fi
fi
find_program() {
path="$PATH"
item="`echo "$path" | sed 's/\([^:]*\):.*/\1/'`"
path="`echo "$path" | sed -n 's/[^:]*::*\(.*\)/\1/p'`"
found="no"
while [ "$item" ]
do
if [ -e "$item/$1" ]
then
found="yes"
break
fi
item="`echo "$path" | sed 's/\([^:]*\):.*/\1/'`"
path="`echo "$path" | sed -n 's/[^:]*::*\(.*\)/\1/p'`"
done
if [ "$found" = "yes" ]
then
echo "$item"
else
echo ""
fi
}
if [ "$LUA_SUFFIX_SET" != "yes" ]
then
for suffix in "5.1" "51" ""
do
LUA_SUFFIX="$suffix"
if [ "$LUA_DIR_SET" = "yes" ]
then
if [ -e "$LUA_DIR/bin/lua$suffix" ]
then
find_lua="$LUA_DIR"
fi
else
find_lua=`find_program lua$suffix`
fi
if [ "$find_lua" ]
then
echo "Lua interpreter found: $find_lua/lua$suffix..."
break
fi
done
fi
if ! [ "$LUA_DIR_SET" = "yes" ]
then
echo -n "Looking for Lua... "
if [ ! "$find_lua" ]
then
find_lua=`find_program lua$LUA_SUFFIX`
echo "lua$LUA_SUFFIX found in \$PATH: $find_lua"
fi
if [ "$find_lua" ]
then
LUA_DIR=`dirname $find_lua`
LUA_BINDIR="$find_lua"
else
echo "lua$LUA_SUFFIX not found in \$PATH."
echo "You may want to use the flags --with-lua and/or --lua-suffix. See --help."
exit 1
fi
fi
if ! [ "$LUA_INCDIR_SET" = "yes" ]
then
LUA_INCDIR="$LUA_DIR/include"
fi
if ! [ "$LUA_LIBDIR_SET" = "yes" ]
then
LUA_LIBDIR="$LUA_DIR/lib"
fi
if [ "$LUA_DIR_SET" = "yes" ]
then
LUA_BINDIR="$LUA_DIR/bin"
fi
if [ "$IDN_LIBRARY" = "icu" ]
then
IDNA_LIBS="$ICU_FLAGS"
CFLAGS="$CFLAGS -DUSE_STRINGPREP_ICU"
fi
if [ "$IDN_LIBRARY" = "idn" ]
then
IDNA_LIBS="-l$IDN_LIB"
fi
echo -n "Checking Lua includes... "
lua_h="$LUA_INCDIR/lua.h"
if [ -e "$lua_h" ]
then
echo "lua.h found in $lua_h"
else
echo "lua.h not found (looked in $lua_h)"
echo "You may want to use the flag --with-lua-include. See --help."
exit 1
fi
find_helper() {
explanation="$1"
shift
tried="$*"
while [ "$1" ]
do
found=`find_program "$1"`
if [ "$found" ]
then
echo "$1 found at $found"
HELPER=$1
return
fi
shift
done
echo "Could not find a $explanation. Tried: $tried."
echo "Make sure one of them is installed and available in your PATH."
exit 1
}
# Write config
echo "Writing configuration..."
echo
cat <<EOF > config.unix
# This file was automatically generated by the configure script.
# Run "./configure --help" for details.
PREFIX=$PREFIX
SYSCONFDIR=$SYSCONFDIR
DATADIR=$DATADIR
LUA_SUFFIX=$LUA_SUFFIX
LUA_DIR=$LUA_DIR
LUA_INCDIR=$LUA_INCDIR
LUA_LIBDIR=$LUA_LIBDIR
LUA_BINDIR=$LUA_BINDIR
REQUIRE_CONFIG=$REQUIRE_CONFIG
IDN_LIB=$IDN_LIB
IDNA_LIBS=$IDNA_LIBS
OPENSSL_LIB=$OPENSSL_LIB
CFLAGS=$CFLAGS
LDFLAGS=$LDFLAGS
CC=$CC
CXX=$CXX
LD=$LD
RUNWITH=$RUNWITH
EOF
echo "Installation prefix: $PREFIX"
echo "Prosody configuration directory: $SYSCONFDIR"
echo "Using Lua from: $LUA_DIR"
make clean > /dev/null 2> /dev/null
echo
echo "Done. You can now run 'make' to build."
echo
| true
|
d14247ba5a84fc9046e5689e16193bea463b3bdf
|
Shell
|
mrlynn/gophercon2019
|
/setup.sh
|
UTF-8
| 296
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
# basic setup script to automate preparation of an environment for a golang project
echo "Project name: \c"
read ans
mkdir -p ./${ans}
export GOPATH=${PWD}/${ans}
echo "Current working directory and GOPATH: ${GOPATH}"
cd ./${ans}
mkdir src pkg bin
go get go.mongodb.org/mongo-driver
| true
|
d1ea9d2487e6b0574eb4717a7fd367559e3c0cbe
|
Shell
|
ivanvtimofeev/tf-container-builder
|
/containers/external/haproxy/docker-entrypoint.sh
|
UTF-8
| 529
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
# first arg is `-f` or `--some-option`
if [ "${1#-}" != "$1" ]; then
set -- haproxy "$@"
fi
if [ "$1" = 'haproxy' ]; then
shift # "haproxy"
# if the user wants "haproxy", let's add a couple useful flags
# haproxy-systemd-wrapper -- "master-worker mode" (similar to the new "-W" flag; allows for reload via "SIGUSR2")
# -db -- disables background mode
set -- "$(whereis haproxy-systemd-wrapper | awk '{print($2)}')" -p /run/haproxy.pid -db "$@"
fi
exec "$@"
| true
|
ba8fcd9fa8a0540833a75dc66bff5f8e231a280a
|
Shell
|
wangjstu/learnShell
|
/github/meiappCheckoutFromGithub.sh
|
UTF-8
| 1,254
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#check user is not root
[ "$(id -u)" == "0" ] && echo "Error: You must not be root to run this script." && exit 1
function RUNCMD() {
echo "[$(date +'%Y-%m-%d %H:%M:%S:%N')][notice] $*"
echo -e "\033[41;33m ####################EVAL LOG####################### \033[0m"
eval $@
echo -e "\033[41;33m ####################EVAL LOG####################### \033[0m"
}
function NOTICE() {
echo >/dev/null && echo "[$(date +'%Y-%m-%d %H:%M:%S:%N')][notice] $*"
}
function ERROR() {
echo >/dev/null && echo -e "\033[41;30m [$(date +'%Y-%m-%d %H:%M:%S:%N')][error] $* \033[0m"
}
function Git_Pull() {
RUNCMD "git pull origin master"
}
function Git_Push() {
if [ $# -ne 2 ]; then
ERROR "Usage: sh meiappCheckoutFromGithub.sh Git_Push commit_comments!"
exit 1;
else
RUNCMD "git pull origin master && git add --all && git commit -m \"$2\" && git push origin master"
fi
}
NOTICE "----------------------BEGIND-----------------"
case $# in
1)
{
NOTICE "run in Git_Pull"
Git_Pull
}
;;
2)
{
NOTICE "run in Git_Push"
Git_Push "$@"
}
;;
*)
{
NOTICE "run nothing"
echo "please Usage:"
echo "$0 Git_Push Git_Push_Comment"
echo "$0 Git_Pull"
}
esac
NOTICE "-----------------------END-------------------"
exit 0
| true
|
218a3bf20a8724361e48a139396f9f11f52f8e9a
|
Shell
|
kmstumpff/Scripts
|
/Unix/Setup/findservers
|
UTF-8
| 3,686
| 3.84375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
########################################################
# This script was made to tell the user if the local
# Seapine servers are running. Copy this script to
# /usr/bin so it can be run from any working directory.
# The pidof... functions are reused from the spls
# script. If the server process is found, it outputs
# a message and the pid of the process.
########################################################
# function to find the pid the license server
pidofspls() {
ps -ef > /tmp/ps.tbl 2> /dev/null
pid=`awk -F" " '/\/splicsvr/ {print $2}' /tmp/ps.tbl`
rm -rf /tmp/ps.tbl > /dev/null 2>&1
if [ "$pid" != "" ]
then
echo $pid
return 0
fi
}
########################################################
# function to find the pid of the surround server
pidofsurroundscm() {
ps -ef > /tmp/ps.tbl 2> /dev/null
pid=`awk -F" " '/\/scmserver/ {print $2}' /tmp/ps.tbl`
rm -rf /tmp/ps.tbl > /dev/null 2>&1
if [ "$pid" != "" ]
then
echo $pid
return 0
fi
}
########################################################
# function to find the pid of the surround web server
pidofsurroundscmweb() {
ps -ef > /tmp/ps.tbl 2> /dev/null
pid=`awk -F" " '/\/sscmweb.jar/ {print $2}' /tmp/ps.tbl`
rm -rf /tmp/ps.tbl > /dev/null 2>&1
if [ "$pid" != "" ]
then
echo $pid
return 0
fi
}
# function to find the pid of the surround proxy server
pidofsurroundscmproxy() {
ps -ef > /tmp/ps.tbl 2> /dev/null
pid=`awk -F" " '/\/scmproxyserver/ {print $2}' /tmp/ps.tbl`
rm -rf /tmp/ps.tbl > /dev/null 2>&1
if [ "$pid" != "" ]
then
echo $pid
return 0
fi
}
########################################################
# function to find the pid of the testtrack server
pidofttstudio() {
ps -ef > /tmp/ps.tbl 2> /dev/null
pid=`awk -F" " '/\/ttserver/ {print $2}' /tmp/ps.tbl`
rm -rf /tmp/ps.tbl > /dev/null 2>&1
if [ "$pid" != "" ]
then
echo $pid
return 0
fi
}
########################################################
# Use functions to set environment variables
pidls=`pidofspls $1`
pidss=`pidofsurroundscm $1`
pidsw=`pidofsurroundscmweb $1`
pidsp=`pidofsurroundscmproxy $1`
pidtt=`pidofttstudio $1`
########################################################
# Output the results
echo ""
########################################################
# License Server
if [ "$pidls" != "" ]
then
echo "The License Server is running!"
echo "The pid of spls is: $pidls"
else
echo "The License Server is not running!"
fi
echo ""
########################################################
# Surround Server
if [ "$pidss" != "" ]
then
echo "The Surround server is running!"
echo "The pid of surroundscm is: $pidss"
else
echo "The Surround server is not running!"
fi
echo ""
########################################################
# Surround Web Server
if [ "$pidsw" != "" ]
then
echo "The Surround web server is running!"
echo "The pids of surroundscmweb is: $pidsw"
else
echo "The Surround web server is not running!"
fi
echo ""
########################################################
# Surround Proxy Server
if [ "$pidsp" != "" ]
then
echo "The Surround proxy server is running!"
echo "The pid of surroundscmproxy is: $pidss"
else
echo "The Surround proxy server is not running!"
fi
echo ""
########################################################
# TestTrack Server
if [ "$pidtt" != "" ]
then
echo "The TestTrack server is running!"
echo "The pid of ttstudio is: $pidtt"
else
echo "The TestTrack server is not running!"
fi
echo ""
| true
|
a0f7a2f95d1700b10675aa95a08cb7bbb94c1ca9
|
Shell
|
yast/helper_scripts
|
/github/github-scripts/github-create-email-hook
|
UTF-8
| 381
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
. github-secrets
organization="yast"
repolist=`./github-list-repos $organization`
for repo in $repolist
do
json=json-${repo}
cat > ${json} << EOJSON
{
"name": "email",
"active": true,
"config": {
"address": "yast-commit@opensuse.org"
}
}
EOJSON
curl -s -n -X POST -d @${json} https://api.github.com/repos/${organization}/${repo}/hooks -o ${json}.result
done
| true
|
de833e2a332b19bdb8d3831946ed92cea8ba6397
|
Shell
|
Business-model-canvas/generate-c4-model-action
|
/entrypoint.sh
|
UTF-8
| 1,009
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
echo "Generating C4 model"
source="$GITHUB_WORKSPACE/$1"
source_dir="$(dirname "$source")"
tmp_dir=$(mktemp -d -t ci-XXXXXXXXXX)
target_dir="$GITHUB_WORKSPACE/$2"
if [ ! -f "$source" ]; then
echo "Structurizr DSL file '$source' not found" >&2
exit 1
fi
echo "[params]
source: $source
target dir: $target_dir"
echo "Exporting Structurizr DSL to PlantUML format"
/structurizr-cli/structurizr.sh export -w "$source" -f "plantuml"
if [ $? -ne 0 ]; then
echo "An error occurred when attempting to export to PlantUML format" >&2
exit $?
fi
ls "$source_dir"/*.puml >/dev/null
if [ $? -ne 0 ]; then
echo "Did not generate any PlantUML files" >&2
exit $?
fi
echo "Moving PlantUML files to '$tmp_dir'"
mkdir -p "$tmp_dir"
mv "$source_dir"/*.puml "$tmp_dir"
echo "Generating .png images"
plantuml "$tmp_dir"/*.puml
echo "Moving C4 images to '$target_dir'"
mkdir -p "$target_dir"
mv "$tmp_dir"/*.png "$target_dir"
ls -la "$target_dir"
echo "Finished"
exit 0
| true
|
7e1fa0152c6d4cc03076c48146659037bc4b3b9c
|
Shell
|
taku403/dotfiles
|
/lib/sys.sh
|
UTF-8
| 2,553
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/sh
OS_NOT_FOUND='1'
OS_NOT_FOUND_MSG="このOSには対応していません"
DISTRO_NOT_FOUND_MSG="このディストリビューションには対応していません"
FAILED='1'
SUCCESS='0'
function get_system_type() {
# 現在動作中のOSを取得する
# 引数無し
# 戻り値
# 成功:0 失敗:1
# 引数が入力された場合
if [[ $# -ne 0 ]]; then
source './common/resource.sh'
usage
fi
# 現在動作中のOSを取得
os_type=$(get_distro)
# OSが未対応の場合
if [[ ${os} -eq ${OS_NOT_FOUND} ]]; then
echo "このOSには対応していません"
exit 1
fi
}
function get_distro() {
# 現在動作中のディストリビューションを取得する
if [ $# - ne ]
# 自環境のOSタイプを取得
this_os=$(uname -s)
case $this_os in
Linux ) return _get_distro_on_linux ;;
* )
echo $OS_NOT_FOUND_MSG
exit $FAILED
esac
if [ $(which bash) ]; then
e
return 0
function _get_distro_on_linux() {
# ディストリビューション名を取得
distro_name=$(find /etc -name '*-release' -print \
| xargs -I {} cat {} \
| grep '^NAME=.*' \
| xargs -I VAR expr VAR : "NAME=*\(.*\)" )
case $distro_name in
Ubuntu ) ;;
* )
echo $DISTRO_NOT_FOUND_MSG
exit $FAILED
}
}
function prepare_ubuntu() {
# 環境構築の準備処理
# パッケージ更新
apt update
apt upgreade
# CSVのヘッダーを除外
# インストール
}
function install_packages(){
# CSVに格納されているパッケージをインストールする
# 第一引数:インストールするCSVファイル
# csvの構造
# 第1カラム: インストールするパッケージ名
# 第2カラム: 任意、対象のURLからダウンロードしインストールする
# CSVファイル内容から先頭のレコードを削除
install_list=remove_csv_header $1
while read LINE
do
install_cmd=$(awk '$2!=""{pirnt $1 $2}')
done < $(remove_csv_header $install_list)
if !(type git > /dev/null 2>&1); then
apt get install git -y
fi
}
function remove_csv_header() {
# CSVファイル先頭行削除
csv=$(sed '1d' in ${1})
return csv
}
function remove_all_user_install() {
# ユーザーがインストールしたアプリケーション削除
# for
}
| true
|
17854c31bae70042b5315a56e40dbe253fb38f9f
|
Shell
|
Rikulf/linux_configs
|
/bashrc
|
UTF-8
| 2,641
| 3.53125
| 4
|
[] |
no_license
|
# .bashrc
# If running interactively, turn off XON/XOFF flow control
case $- in
*i*) stty -ixon;;
esac
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# History options
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
export HISTCONTROL=ignoreboth
# Allow history from multiple terminals
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
export HISTSIZE=100000
export HISTFILESIZE=200000
# include time of commands in history
export HISTTIMEFORMAT="%d/%m/%y %T "
# make vim the default editor
export EDITOR=vim
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# If this is an xterm set the title to user@host:dir
PS1=""
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# other ls related aliases
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
# Add color to the prompt
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
# Color dependent on the success of the last command in some terminals
function exstat {
EXSTAT="$?"
RED="\[\033[1;31m\]"
GREEN="\[\e[32;1m\]"
CYAN="\[\e[36;1m\]"
OFF="\[\033[m\]"
PRMPT="${USER}@${HOSTNAME} ${PWD}"
if [ "${EXSTAT}" -eq 0 ]
then
PS1="${GREEN}${PRMPT}>${OFF}"
else
PS1="${RED}${PRMPT}>${OFF}"
fi
}
case "$TERM" in
xterm-color) color_prompt=yes;;
xterm-256color) color_prompt=yes;;
xterm) color_prompt=no;
export PROMPT_COMMAND=exstat;;
esac
if [ "$color_prompt" = yes ]; then
PS1="\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$$PS1 "
else
PS1="\u@\h:\w\$$PS1 "
fi
unset color_prompt
if [ -f ~/.bashrc_work ]; then
. ~/.bashrc_work
fi
if [ -f ~/.bashrc_local ]; then
. ~/.bashrc_local
fi
| true
|
d9f5d384e1ec4ac5a3ad5f41168eddbcd38a0b23
|
Shell
|
epam/cloud-pipeline
|
/scripts/elasticsearch/update_storage_billing.sh
|
UTF-8
| 757
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
year=$1
while IFS= read -r line
do
array=($line)
index=${array[2]}
echo "Updating index $index"
curl -XPOST cp-search-elk.default.svc.cluster.local:30091/$index/_update_by_query -H 'Content-Type: application/json' -d'
{
"script": {
"source": "ctx._source.standard_cost = ctx._source.cost; ctx._source.standard_total_cost = ctx._source.cost; ctx._source.standard_usage_bytes = ctx._source.usage_bytes; ctx._source.standard_total_usage_bytes = ctx._source.usage_bytes; ctx._source.standard_ov_cost = 0; ctx._source.standard_ov_usage_bytes = 0",
"lang": "painless"
},
"query": {
"match_all": {}
}
}'
done < <(curl -XGET cp-search-elk.default.svc.cluster.local:30091/_cat/indices -s | grep cp-billing-storage-$year-)
| true
|
d1179d6e30ef5f728e64b9a6882f9038d16de104
|
Shell
|
xiexiaoqing/shell
|
/bash_cmds.sh
|
UTF-8
| 537
| 2.640625
| 3
|
[] |
no_license
|
###List all commands that a shell knows###
compgen -c # will list all commands you could run
compgen -a # will list all aliases you could run
compgen -b # will list all built-ins you could run
compgen -k # will list all keywords you could run
compgen -A function # will list all functions you could run
compgen -A function -abck # will list all the above in one go
###print all commands included in $PATH ###
printf '%s\n' ${PATH//:/\/* }
### search info in linux man pages###
man -k <keyword>
e.g. man -k sendmail
man -k vim
| true
|
a90339e73df122445434002e197f50a6a3806f0c
|
Shell
|
mfvalin/clim_utils
|
/bin/install_clim_utils.sh
|
UTF-8
| 326
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
Where=$(readlink -e ${0})
Where=${Where%/bin/*}/src
cd ${Where}
echo "Installing from ${Where}"
[[ "$BASE_ARCH" == "$EC_ARCH" ]] && echo "ERROR: EC_ARCH == BASE_ARCH" && exit 1
set -x
for Target in *.F90 ; do
s.f90 -I. -O 2 -o ${Target%.F90}.Abs ${Target} -lrmn
mv ${Target%.F90}.Abs ../bin
rm -f *.mod
done
| true
|
41919ca6de3e171dbb0bc018cf357ec94371ea89
|
Shell
|
knutjelitto/LiFo
|
/Recipes/Core/isl
|
UTF-8
| 259
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
Title="Integer Set Library"
Home=(http://isl.gforge.inria.fr/)
Name=isl
Version=0.19
BuildDeps=(gmp)
Supplies=(http://isl.gforge.inria.fr/isl-$Version.tar.xz)
Build()
{
./configure --prefix=/usr --disable-static
make
make install
}
| true
|
9fc1e0b44c5ac988b62e000c985923c8e8ffc663
|
Shell
|
OpenShift4Me/ocp-demo-workloads
|
/workloads/pipelines/deploy-pipelines.sh
|
UTF-8
| 2,288
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
OPERATORS_NAMESPACE="openshift-operators"
TKN_VERSION="0.3.1"
PIPELINESNAMESPACE="pipelines-tutorial"
# Create a subscription
cat <<EOF | oc create -f -
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: openshift-pipelines-operator
namespace: ${OPERATORS_NAMESPACE}
spec:
channel: dev-preview
installPlanApproval: Automatic
name: openshift-pipelines-operator
source: community-operators
sourceNamespace: openshift-marketplace
startingCSV: openshift-pipelines-operator.v0.5.2
EOF
echo "Give the pipeline operator some time to start..."
while [ "x" == "x$(oc get pods -l name=openshift-pipelines-operator -n ${OPERATORS_NAMESPACE} 2> /dev/null)" ]; do
sleep 10
done
oc wait --for condition=ready pod -l name=openshift-pipelines-operator -n ${OPERATORS_NAMESPACE} --timeout=2400s
curl -LO https://github.com/tektoncd/cli/releases/download/v${TKN_VERSION}/tkn_${TKN_VERSION}_Linux_x86_64.tar.gz
sudo tar xvzf tkn_${TKN_VERSION}_Linux_x86_64.tar.gz -C /usr/local/bin/ tkn
rm -f tkn_${TKN_VERSION}_Linux_x86_64.tar.gz
# From https://github.com/openshift/pipelines-tutorial
oc new-project ${PIPELINESNAMESPACE} --skip-config-write=true
oc create serviceaccount pipeline -n ${PIPELINESNAMESPACE}
oc adm policy add-scc-to-user privileged -z pipeline -n ${PIPELINESNAMESPACE}
oc adm policy add-role-to-user edit -z pipeline -n ${PIPELINESNAMESPACE}
oc create -f https://raw.githubusercontent.com/openshift/pipelines-tutorial/master/resources/petclinic.yaml -n ${PIPELINESNAMESPACE}
oc create -f https://raw.githubusercontent.com/tektoncd/catalog/master/openshift-client/openshift-client-task.yaml -n ${PIPELINESNAMESPACE}
oc create -f https://raw.githubusercontent.com/openshift/pipelines-catalog/master/s2i-java-8/s2i-java-8-task.yaml -n ${PIPELINESNAMESPACE}
oc create -f https://raw.githubusercontent.com/openshift/pipelines-tutorial/master/resources/petclinic-deploy-pipeline.yaml -n ${PIPELINESNAMESPACE}
oc create -f https://raw.githubusercontent.com/openshift/pipelines-tutorial/master/resources/petclinic-resources.yaml -n ${PIPELINESNAMESPACE}
tkn pipeline start petclinic-deploy-pipeline \
-r app-git=petclinic-git \
-r app-image=petclinic-image \
-s pipeline -n ${PIPELINESNAMESPACE}
| true
|
8c70e4d76684e10bb942c6d10a0a3d67ae799176
|
Shell
|
yamingd/pycb
|
/bin/run-tests
|
UTF-8
| 322
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function remove_pyc {
find ./ -name '*.pyc' -exec rm {} \;
}
function run_tests {
remove_pyc
nosetests tests.py --process-restartworker --stop -v \
--nocapture \
--with-coverage \
--cover-package=pycb
}
function main {
time run_tests
}
echo "Running all tests"
main
rm .coverage
| true
|
3e9946e37d1ff5711d1e9095c8e4aecd76d07850
|
Shell
|
NCAR/ParallelIO
|
/cmake/mpiexec.nwscla
|
UTF-8
| 131
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Arguments:
#
# $1 - Number of MPI Tasks
# $2+ - Executable and its arguments
#
NP=$1
shift
mpirun -np $NP $@
| true
|
20e78263c1d7c9a6fa925b577b6e927c1065ffd4
|
Shell
|
deniszh/buckytools
|
/docker/launch.sh
|
UTF-8
| 400
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Check if the bucky command and daemon are supplied
if [ ! -f bucky ]; then
echo "ERROR: bucky executable not found. Please copy it in this directory"
exit 1
fi
if [ ! -f buckyd ]; then
echo "ERROR: buckyd executable not found. Please copy it in this directory"
exit 1
fi
# Launch the actual cluster
docker-compose up -d || echo "ERROR: docker-compose failed"
| true
|
f16ab93c573994a3b705bef4be66f2dcba480199
|
Shell
|
iordanisg/dotfiles
|
/config/polybar/scripts/check-all-updates.sh
|
UTF-8
| 244
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
#source https://github.com/x70b1/polybar-scripts
if ! updates_arch_aur=$(checkupdates+aur 2> /dev/null | wc -l ); then
updates_arch=0
fi
if [ "$updates_arch_aur" -gt 0 ]; then
echo " $updates_arch_aur"
else
echo "0"
fi
| true
|
8387bdd386c3047186265fe89af4d8fc02b86421
|
Shell
|
smartcontractkit/chainlink
|
/tools/ci/install_wasmd
|
UTF-8
| 338
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
# commit on branch releases/v0.40.x
GIT_TAG="v0.40.1"
CHECKOUT_DIR="${HOME}/wasmd-checkout"
BUILD_DIR="${HOME}/wasmd-build"
git clone https://github.com/CosmWasm/wasmd --branch "releases/v0.40.x" "${CHECKOUT_DIR}"
cd "${CHECKOUT_DIR}"
git checkout "${GIT_TAG}"
GOPATH="${BUILD_DIR}" make install
| true
|
83ae9b73d72af9688459eb3d7026b0b0f6aae7e4
|
Shell
|
dhm-org/dhm_suite
|
/utils/camserver_daemon
|
UTF-8
| 2,335
| 4.03125
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
###############################################################################
# file: camserver_daemon
#
# Copyright (c) 2020 Jet Propulsion Laboratory
# All rights reserved
#
# brief: Start the Camserver and execute in the background
#
# Description: This script will run only one instance of camserver
# It will determine if its already running an notify with printout.
# Else it will run the camserver.
# If want to start camserver on startup, place in appropriate location
# like /etc/init.d/
#
# NOTE: If more than one camera connected, the camserver by design
# will prompt user for selection which user may not see because
# this scripts pipes all print statements to /dev/null.
# To by pass, modify this script and pass the serial number of the camera
# with option '-s <serial number>'
#
# author: S. F. Fregoso
#
###############################################################################
logDir=~/Desktop/
scriptname=camserver
scriptpath=/opt/DHM/bin/$scriptname
case "$1" in
start)
### Check if camserver is running. Start if it is not
pkill -0 -f $scriptpath
if [ $? != 0 ]
then
echo "Starting $scriptpath"
$scriptpath -d -v -l $logDir &>> /dev/null &
if [ $? != 0 ]; then
echo "Camserver script aborted with error. Did not start. Contact developer [sfregoso@jpl.nasa.gov] to debug"
exit 1
fi
for wait in {3..1}
do
echo -n "$wait"; echo -n " sec"; if [ "$wait" -lt 10 ]; then echo -n " "; fi
sleep 1; echo -n -e $'\b\b\b'
if [ "$wait" -ge 10 ]; then echo -n -e $'\b\b\b'; fi
if [ "$wait" -lt 10 ]; then echo -n -e $'\b\b\b'; fi
done
echo "done"
else
echo $scriptpath is already running
fi
;;
stop)
pkill -0 -f $scriptpath
if [ $? == 0 ]
then
echo
echo "Stopping $scriptpath script"
process_id=`pgrep -f $scriptpath`
kill -s SIGINT $process_id
sleep 1
pkill -0 -f $scriptname
if [ $? == 0 ]
then
kill -s SIGABRT $process_id
fi
else
echo
echo "$scriptname is not running"
fi
;;
*)
echo "Usage: /etc/init.d/run_camserver {start | stop |}" >&2
exit 1
;;
esac
exit 0
| true
|
03a3d0692d454fe5f95b1ab567dd6337eb839a90
|
Shell
|
reginaldoMorais/lx-installer
|
/scripts/nvm.sh
|
UTF-8
| 671
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
installNVM() {
echo "\n\n\n Install NVM"
echo "++++++++++++++++++++++"
echo "\n> Deseja instalar NVM? [y/N]"
read question
if [ ! -z $question ]
then
if [ "y" = $question -o "Y" = $question ]
then
echo "\n> sudo wget -qO- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh -P ~/Downloads | bash\n"
sudo wget -qO- https://raw.githubusercontent.com/creationix/nvm/v0.34.0/install.sh -P ~/Downloads | bash
echo "\n> sudo ./~/Downloads/install.sh\n"
sudo ./~/Downloads/install.sh
echo "\n> source ~/.bashrc\n"
source ~/.bashrc
echo "\n> nvm i --lts\n"
nvm i --lts
fi
fi
}
| true
|
843e235c1eb4b3f5239a8829f84f302e124a0585
|
Shell
|
scribblemaniac/_ebDev
|
/scripts/excludeWords.sh
|
UTF-8
| 1,225
| 3.265625
| 3
|
[] |
no_license
|
# DESCRIPTION
# Changes one text file to exclude all words from another text file; in other words, it deletes every instance of a word in the other file (excludeThese.txt) from the one file (fromThese.txt). Results are written to fromThese_parsed.txt.
# NOTE
# A variation of this which works only if you have more strict list inputs can be found in numberFilesByLabel.sh.
# USAGE
# Copy a list of exclusion words to excludeThese.txt. Copy the words you want edited (to exclude all words from excludeThese.txt) to fromThese.txt. Run this script. The results will appear in fromThese_parsed.txt.
# Thanks yet again to yet another genius breath yonder: http://stackoverflow.com/a/18477228/1397555
# THE COMMAND TEMPLATE is:
# awk '{if (f==1) { r[$0] } else if (! ($0 in r)) { print $0 } } ' f=1 exclude-these.txt f=2 from-this.txt
# ADAPTED e.g. for removal of all actual English words (english_dictionary.txt) from gibberishWords.txt:
# awk '{if (f==1) { r[$0] } else if (! ($0 in r)) { print $0 } } ' f=1 english_dictionary.txt f=2 gibberishWords.txt > gibberishWords_real_words_excluded.txt
awk '{if (f==1) { r[$0] } else if (! ($0 in r)) { print $0 } } ' f=1 excludeThese.txt f=2 fromThese.txt > fromThese_parsed.txt
| true
|
9f598a9cb7c7ff3b24545db1c2f1c4cd13f6d8f3
|
Shell
|
hiderisha/DAMN-X-DEODEXER
|
/start.sh
|
UTF-8
| 6,005
| 3.796875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# --------------Variable -------------- #
BOOTCLASSPATH=""
requirements="openjdk-6-jre java-common zip"
architecture=""
# FUNCTIONS ARE HERE
function control_c() {
ask_to_quit="true"
while [[ $ask_to_quit == "true" ]]; do
cin warning "Are you sure you want to quit? (Y/n) "
read answer_to_quit
if [[ $answer_to_quit == *[Yy]* || $answer_to_quit == "" ]]; then
cout info "NO! GUH!!!"
ask_to_quit="false"
exit $?
elif [[ $answer_to_quit == *[Nn]* ]]; then
cout info "ROCK ON!"
ask_to_quit="false"
else
cout info "Try harder!"
fi
done
}
function cin() {
if [ "$1" == "action" ] ; then output="\e[01;32m[>]\e[00m" ; fi
if [ "$1" == "info" ] ; then output="\e[01;33m[i]\e[00m" ; fi
if [ "$1" == "warning" ] ; then output="\e[01;31m[w]\e[00m" ; fi
if [ "$1" == "error" ] ; then output="\e[01;31m[e]\e[00m" ; fi
output="$output $2"
echo -en "$output"
}
function cout() {
if [ "$1" == "action" ] ; then output="\e[01;32m[>]\e[00m" ; fi
if [ "$1" == "info" ] ; then output="\e[01;33m[i]\e[00m" ; fi
if [ "$1" == "warning" ] ; then output="\e[01;31m[w]\e[00m" ; fi
if [ "$1" == "error" ] ; then output="\e[01;31m[e]\e[00m" ; fi
output="$output $2"
echo -e "$output"
}
function check_requirements() {
cout action "Checking requirements..."
sleep 1
for package in $requirements; do
cout action "Checking $package"
sleep 1
if [[ $(dpkg -l | grep ii | grep $package) == "" ]]; then
cout warning "Warning, $package has not installed yet"
ask_to_install_package=true
while [[ $ask_to_install_package == "true" ]]; do
cin info "Do you want to install $package? (Y/n) "
read answer_to_install_package
if [[ $answer_to_install_package == *[Yy]* || $answer_to_install_package == "" ]]; then
ask_to_install_package=false
cout action "Will installing $package. This process need root access!"
sleep 1
ask_to_apt_get_update=true
while [[ $ask_to_apt_get_update == "true" ]]; do
cin info "Do you want to run sudo apt-get update first? (Y/n) "
read answer_to_apt_get_update
if [[ $answer_to_apt_get_update == *[Yy]* || $answer_to_apt_get_update == "" ]]; then
ask_to_apt_get_update=false
cout action "Running sudo apt-get update..."
sleep 1
sudo apt-get update
elif [[ $answer_to_apt_get_update == *[Nn]* ]]; then
ask_to_apt_get_update=false
cout action "Skipping apt-get update"
sleep 1
else
cout warning "Try harder!!!"
fi
done
sudo apt-get install $package
elif [[ $answer_to_install_package == *[Nn]* ]]; then
ask_to_install_package=false
cout warning "Insufficient dependencies... Will abort now!"
sleep 1
exit 1
else
cout warning "Try harder!!!"
fi
done
else
cout info "Cool, you have $package"
fi
done
}
function check_arch() {
cout action "Checking architecture..."
sleep 1
if [[ $(uname -m | grep i386) == "" ]]; then
cout info "You are NOT using 32bit LINUX distro, adb might not working if you have not installed ia32libs yet."
sleep 1
cout action "Checking ia32-libs..."
sleep 1
ask_to_install_ia32libs=true
while [[ $ask_to_install_ia32libs == "true" ]]; do
if [[ $(dpkg -l | grep ii | grep ia32-libs) == "" ]]; then
cin warning "You don't have ia32libs installed on your system! Do you want to install it? (Y/n) "
read answer_to_install_ia32libs
if [[ $answer_to_install_ia32libs == *[Yy]* || $answer_to_install_ia32libs == "" ]]; then
ask_to_install_ia32libs=false
cout info "Will install ia32-libs..."
cout action "Reading dpkg configuration..."
if [[ $(dpkg --print-foreign-architectures | grep i386) == "" ]]; then
cout warning "i386 architecture is not implemented yet!"
ask_to_add_i386_arch=true
while [[ $ask_to_add_i386_arch == "true" ]]; do
cin info "Do you want to add i386 architecture to your dpkg foreign architecture? (Y/n) "
read answer_to_add_i386_arch
if [[ $answer_to_add_i386_arch == *[Yy]* || $answer_to_add_i386_arch == "" ]]; then
ask_to_add_i386_arch=false
cout action "Adding i386 architecture to your dpkg foreign architecture..."
sleep 1
sudo dpkg --add-architecture i386
cout info "Done..."
elif [[ $answer_to_add_i386_arch == *[Nn]* ]]; then
ask_to_add_i386_arch=false
cout warning "Insufficient requirements! Exiting!!!"
sleep 1
exit 1
else
cout warning "Try harder!!!"
fi
done
fi
elif [[ $answer_to_install_ia32libs == *[Nn]* ]]; then
ask_to_install_ia32libs=false
cout warning "Insufficient requirements! Exiting!!!"
sleep 1
exit 1
else
ask_to_install_ia32libs=false
sudo apt-get install ia32-libs
fi
else
ask_to_install_ia32libs=false
cout info "Good, you have ia32-libs installed!"
fi
done
else
cout info "You are running 32bit LINUX distro. This mean, you don't have to install ia32-libs to make adb work!"
sleep 1
fi
}
function test_adb() {
cout action "Testing adb..."
ask_to_connect=true
while [[ $ask_to_connect == "true" ]]; do
cout info "Please connect your phone to your PC/LAPTOP. Make sure you have checked USB Debuging on Developer Options"
cin info "Have you? (Y/n) "
read answer_to_connect
if [[ $answer_to_connect == *[Yy]* || $answer_to_connect == "" ]]; then
cout action "Finding phone... If you see this more than 10 secs, please check your phone, and grant your LINUX to access adb by checking the confirmation dialog on your phone"
sleep 1
ask_to_connect=false
elif [[ $answer_to_connect == *[Nn]* ]]; then
cout info "It's OK. Take your time... I will ask this again in 5 secs..."
sleep 5
else
echo warning "Try harder!!!"
fi
done
sleep 1
./binary/adb kill-server
./binary/adb wait-for-device
./binary/adb devices
}
trap control_c SIGINT
check_requirements
check_arch
test_adb
| true
|
b0cd03a403d241a211e20e8a4930c9bf5ad7fa46
|
Shell
|
SumantBagri/weather_app
|
/weatherapp/cleanup.sh
|
UTF-8
| 995
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Config
shopt -s nullglob
shopt -s extglob
# Change this before deploying
dataPath="$HOME/weather_app/weatherapp/data/"
# Function to clean files older than a week
function clean_file() {
tmp_file="${dataPath}tmp"
head -1 "$1" > "$tmp_file"
tail -1 "$1" >> "$tmp_file"
mv "$tmp_file" "${dataPath}${1//+(*\/)}"
}
# Read all filenames in data dir if it exists
if [[ -e "$dataPath" ]]; then
fileList=(${dataPath}*)
else
echo "No data logged yet" >&2
exit 1
fi
# Get current date
#NOW=$(date -j "+%s") # For BSD
NOW=$(date '+%s') # For GNU
# Remove stale files(older than 1 month/30 days)
# Clean old files(older than 1 week/7 days)
for file in "${fileList[@]}"
do
fileDate="${file//+(*\/|.*)}"
#fileDate="$(date -j -f "%Y-%m-%d" ${fileDate} '+%s')" # For BSD
fileDate="$(date -d ${fileDate} '+%s')" # For GNU
dateDiff=$(( ($NOW - $fileDate)/(60*60*24) ))
if [[ $dateDiff -gt 30 ]]; then
rm -f "$file"
elif [[ $dateDiff -gt 7 ]]; then
clean_file "$file"
fi
done
| true
|
e8f68c781a60701b7bc97dd184c3c49c10f22e74
|
Shell
|
imam-i/lfs-script
|
/svn/tools/05_tools_lfs/05.10_gcc.sh
|
UTF-8
| 1,843
| 3.125
| 3
|
[] |
no_license
|
#######################################
pushd ${BUILD_DIR}
unarch 'mpfr' 'gmp' 'mpc' || return ${?}
cd ./${PACK}
cat gcc/limitx.h gcc/glimits.h gcc/limity.h > \
`dirname $($LFS_TGT-gcc -print-libgcc-file-name)`/include-fixed/limits.h
cp -v gcc/Makefile.in{,.tmp}
sed 's/^T_CFLAGS =$/& -fomit-frame-pointer/' gcc/Makefile.in.tmp \
> gcc/Makefile.in
for file in \
$(find gcc/config -name linux64.h -o -name linux.h -o -name sysv4.h)
do
cp -uv $file{,.orig}
sed -e 's@/lib\(64\)\?\(32\)\?/ld@/tools&@g' \
-e 's@/usr@/tools@g' $file.orig > $file
echo '
#undef STANDARD_STARTFILE_PREFIX_1
#undef STANDARD_STARTFILE_PREFIX_2
#define STANDARD_STARTFILE_PREFIX_1 "/tools/lib/"
#define STANDARD_STARTFILE_PREFIX_2 ""' >> $file
touch $file.orig
done
mv -v ../mpfr-* mpfr
mv -v ../gmp-* gmp
mv -v ../mpc-* mpc
mkdir -v ../${name}-build; cd ../${name}-build
CC=$LFS_TGT-gcc \
CXX=$LFS_TGT-g++ \
AR=$LFS_TGT-ar \
RANLIB=$LFS_TGT-ranlib \
../${PACK}/configure \
--prefix=/tools \
--with-local-prefix=/tools \
--with-native-system-header-dir=/tools/include \
--enable-clocale=gnu \
--enable-shared \
--enable-threads=posix \
--enable-__cxa_atexit \
--enable-languages=c,c++ \
--disable-libstdcxx-pch \
--disable-multilib \
--disable-bootstrap \
--disable-libgomp \
--with-mpfr-include=$(pwd)/../${PACK}/mpfr/src \
--with-mpfr-lib=$(pwd)/mpfr/src/.libs || return ${?}
make || return ${?}
make install || return ${?}
ln -vs gcc /tools/bin/cc
echo 'Test compiling C' >> ${_log}
echo 'main(){}' > dummy.c
cc dummy.c
readelf -l a.out | grep ': /tools' | tee -a ${_log}
rm -v dummy.c a.out
popd
#######################################
| true
|
a2c91a6d50cafd114df190ccd567f8fddafc03ea
|
Shell
|
INOS-soft/pbi
|
/modules10/mail/evolution/pbi.conf
|
UTF-8
| 1,243
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# PBI 10.x Build Configuration
# -- Program Base Information --
PBI_PROGNAME="Evolution"
PBI_PROGWEB=""
PBI_PROGAUTHOR="Evolution Team"
PBI_PROGICON="evo.png"
# -- Program Repo Information (optional) --
PBI_LICENSE="GPL"
PBI_TAGS="mail,gnome" #comma delimited (no spaces)
PBI_PROGTYPE="Graphical" #[Graphical/Text/Server]
PBI_ICONURL="http://images.pbidir.com/progicons/evolution.png"
PBI_CATEGORY="Mail"
# -- Port Information --
PBI_MAKEPORT="mail/evolution"
PBI_MKPORTBEFORE="x11/libSM"
PBI_MKPORTAFTER="mail/evolution-exchange sysutils/gnome-settings-daemon x11-themes/qtcurve-gtk2 mail/p5-Mail-SpamAssassin"
PBI_MAKEOPTS="evolution_SET=SPAMASSASSIN" #Disused for package/repo builds
# -- Require Root Permissions to Install PBI --
PBI_REQUIRESROOT="NO"
# -- Repo Configuration Options (optional) --
PBI_BUILDKEY="11"
PBI_PROGREVISION=""
PBI_AB_PRIORITY="10"
PBI_AB_NOTMPFS=""
# -- Export all the required variables --
export PBI_PROGNAME PBI_PROGWEB PBI_PROGAUTHOR PBI_PROGICON PBI_MAKEPORT PBI_MKPORTBEFORE PBI_MKPORTAFTER PBI_MAKEOPTS PBI_REQUIRESROOT
# -- Export all the optional variables --
export PBI_LICENSE PBI_TAGS PBI_PROGTYPE PBI_ICONURL PBI_CATEGORY PBI_BUILDKEY PBI_PROGREVISION PBI_AB_PRIORITY PBI_AB_NOTMPFS
| true
|
c69fc8b4089f0fc8eb43c08ef8bd919e2cd18f17
|
Shell
|
kouril/secmon-probes
|
/src/probes/check_CVE-2015-3245
|
UTF-8
| 588
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# nagios check for CVE-2015-3245
NAGIOS_OK=0
NAGIOS_ERROR=2
awk -F: '{if ($1 != "root") print $7}' /etc/passwd | grep '/bin/.*sh$' > /dev/null
if [ $? -eq 1 ]; then
echo "OK: No non-root account detected, finishing."
exit $NAGIOS_OK
fi
grep 'auth.*required.*pam_deny.so' /etc/pam.d/chfn > /dev/null
if [ $? -eq 1 ]; then
echo "CRITICAL: chfn still available to users!"
exit $NAGIOS_ERROR
fi
grep 'auth.*required.*pam_deny.so' /etc/pam.d/chsh > /dev/null
if [ $? -eq 1 ]; then
echo "CRITICAL: chsh still available to users!"
exit $NAGIOS_ERROR
fi
| true
|
ec0cb862a4570de177fe74d8440c67370f4abe02
|
Shell
|
samm02/infrastructure
|
/ansible/playbooks/files/backup-container-data.sh
|
UTF-8
| 423
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
BACKUP_DIR=$(mktemp -d)
trap 'rm -r "${BACKUP_DIR}"' EXIT
cleanup() {
rm /tmp/${BACKUP_NAM}
cp /containe
}
main() {
pushd "${BACKUP_DIR}" >/dev/null
mkdir "./data";
cp -r "/containers" "./data"
echo $(date -u +"%Y-%m-%dT%H-%M-%SZ") > "./data/backup.txt"
tar -czf "container-data.tar.gz" "./data"
scp "container-data.tar.gz" csesoc@cse.unsw.edu.au:~/backups/wheatley/
}
main "$@"
| true
|
2100db0fbdd6fff9cd0d03835d201d50f7fbb352
|
Shell
|
wake/package.sh
|
/package
|
UTF-8
| 584
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
PROJECT=$(basename "$1")
DIR=$(cd "$(dirname "$1")"; pwd)
OUTPUT_FILE="$DIR/$PROJECT-package.tar.gz"
#echo $1
#echo $PROJECT
#echo $DIR
#echo $OUTPUT_FILE
echo "Package \`$1\` to \`$OUTPUT_FILE\`"
tar -zcf $OUTPUT_FILE \
--exclude=".DS_Store" \
--exclude="./.codekit*" \
--exclude="./.git" \
--exclude="./.gitignore" \
--exclude="./caches" \
--exclude="./composer.json" \
--exclude="./composer.lock" \
--exclude="./config.codekit" \
--exclude="./data.cache" \
-C $DIR \
$PROJECT
exit 0
| true
|
1a92fbccc82639a56ef66e3fa76f79bfd6e58865
|
Shell
|
BWITS/playbooks
|
/playbooks/roles/nexus/templates/update-wrapper.sh
|
UTF-8
| 1,020
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# -------------------------------------------------------------
# Updates Nexus wrapper.conf with additional Java options.
# -------------------------------------------------------------
# Comment out 'wrapper.java.initmemory' section if '-Xms' is present in Java options
if [[ "{{ java_options }}" == *"-Xms"* ]]; then
sed -i 's/\(wrapper.java.initmemory=.*\)/#\1/' '{{ wrapper_conf }}';
fi
# Comment out 'wrapper.java.maxmemory' section if '-Xmx' is present in Java options
if [[ "{{ java_options }}" == *"-Xmx"* ]]; then
sed -i 's/\(wrapper.java.maxmemory=.*\)/#\1/' '{{ wrapper_conf }}';
fi
# Split Java options and add numbered 'wrapper.java.additional' sections, starting from (last number + 1)
opts="{{ java_options }}";
arr=(${opts// / });
last_number=$(grep 'wrapper.java.additional' '{{ wrapper_conf }}' | grep -v '^\s*#' | cut -d'.' -f4 | cut -d '=' -f1 | tail -1)
for j in "${!arr[@]}"; do
echo "wrapper.java.additional.$((j+last_number+1))=${arr[j]}" >> '{{ wrapper_conf }}';
done
| true
|
0be37dcd46176e3e198dd33c5378e00e3709adeb
|
Shell
|
mstange22/Exercism
|
/bash/matching-brackets/matching_brackets.sh
|
UTF-8
| 677
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
declare -A bracket_map=(
["["]="]"
["{"]="}"
["("]=")"
)
main () {
stack=""
for ((i=0; i < ${#1}; i++)); do
c=${1:i:1}
#ignore non-bracket characters
! [[ $c =~ [][{}()] ]] && continue
# open bracket. Add close bracket to stack
if [[ ${bracket_map[$c]} ]]; then
stack+=${bracket_map[$c]}
else
# if close bracket doesn't match top of stack, fail.
! [[ ${stack: -1} == $c ]] && { echo "false"; exit 0; }
# otherwise, pop stack and continue
stack=${stack::-1}
fi
done
# if stack is empty when done, match!
(( ${#stack} == 0 )) && { echo "true"; exit 0; }
echo "false"
}
main "$@"
| true
|
b69b3c910637ba4014840792c127458095eeddde
|
Shell
|
akashpayne/aws-tools
|
/MSK/02_create_kafka_topic_on_client_amzn2.sh
|
UTF-8
| 1,017
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Set to fail script if any command fails.
set -e
# Ref-1: https://docs.aws.amazon.com/msk/latest/developerguide/create-topic.html
# Ref-2: https://docs.aws.amazon.com/msk/latest/developerguide/produce-consume.html
# TODOs - update variables
CLUSTER_ARN="TODO"
KAFKA_TOPIC="AWSKafkaTutorialTopic"
BOOTSTRAP_BROKER_STR=$(aws kafka get-bootstrap-brokers --query BootstrapBrokerStringTls --cluster-arn ${CLUSTER_ARN})
ZOOKEEPER_CONNECT_STR=$(aws kafka describe-cluster --query ClusterInfo.ZookeeperConnectString --cluster-arn ${CLUSTER_ARN})
pushd /home/ec2-user/kafka_2.12-2.2.1/bin
./kafka-topics.sh --create --replication-factor 3 --partitions 1 --topic ${KAFKA_TOPIC} --zookeeper ${ZOOKEEPER_CONNECT_STR}
# If the command succeeds, you see the following message: Created topic $KAFKA_TOPIC.
echo "To list topics: ./kafka-topics.sh --list --zookeeper ${ZOOKEEPER_CONNECT_STR}"
echo "To delete this topic: ./kafka-topics.sh --delete --topic ${KAFKA_TOPIC} --zookeeper ${ZOOKEEPER_CONNECT_STR}"
popd
| true
|
4b405b85bf3e2d5b5512fbf12ef066db9c5245f4
|
Shell
|
noirHck/Argus
|
/ArgusDocker/simple/conf/opentsdb/start_opentsdb.sh
|
UTF-8
| 494
| 2.71875
| 3
|
[] |
permissive
|
#!/bin/bash
export TSDB_VERSION="2.2.0"
echo "Sleeping for 30 seconds to give HBase time to warm up"
sleep 30
if [ ! -e /opt/opentsdb_tables_created.txt ]; then
echo "creating tsdb tables"
bash /opt/bin/create_tsdb_tables.sh
echo "created tsdb tables"
fi
echo "starting opentsdb"
/opt/opentsdb/opentsdb-${TSDB_VERSION}/build/tsdb tsd --port=4242 --staticroot=/opt/opentsdb/opentsdb-${TSDB_VERSION}/build/staticroot --cachedir=/tmp --auto-metric --config=/opt/opentsdb/config/opentsdb.conf
| true
|
f072ea0effeb12717d775eafd0c49d2ce2c9f667
|
Shell
|
Good-Vibez/ct
|
/component/shell/vm::install.sh
|
UTF-8
| 13,067
| 2.859375
| 3
|
[] |
no_license
|
# vim: et ts=2 sw=2 ft=bash
main() {
ui::doing "BASE"
CDI::install:base_devel
config::fish
config::bash
config::tmux
config::nvim
config::git
ui::doing "RBENV"
CDI::install:rbenv
CDI::install:user_paths.ccache
ui::doing "HMBRW"
CDI::install:homebrew
ui::doing "HMBRW_PKGS"
brew:install "${BREW_PACKAGES[@]}"
ui::doing "NVIM"
CDI::install:vim-plugged
pip3 install neovim
nvim -n --headless -c 'PlugInstall' -c 'qa!'
(
# plug YouCompleteMe needs gcc@5 ¯\_(ツ)_/¯
brew install gcc@5 &&
cd $HOME/.local/share/nvim/plugged/YouCompleteMe &&
python3 ./install.py --rust-completer &&
brew uninstall gcc@5 &&
true;
)
ui::doing "OMF"
CDI::install:omf
ui::doing "CARGO"
CDI::install:cargo
CDI::user_init:load
# ui::doing "XFCE4"
# UDI::install:xfce4
# ui::doing "CT"
# CDI::install:ct
config::user #(shell, ...?)
ui::doing "CT_DEV"
git:init https://github.com/Good-Vibez/ct ct --branch dev
mkdir -pv ct/.local/etc
touch ct/.local/etc/rc.env
(
cd ct
direnv allow .
ui::doing "CT_DEV-build"
( cd component/cargo && cargo build --workspace --all-targets )
ui::doing "CT_DEV-build_release"
( cd component/cargo && cargo build --workspace --all-targets --release )
ui::doing "CT_DEV-sudo:install"
.cache/cargo/release/xs -f dev_exec/::sanctioned/sudo:install
)
echo "*] Just chillin'"
}
COMMON_PACKAGES=(
ccache
curl
gcc
git
htop
make
python3
)
DEBIAN_APT_NEEDS=(
apt-utils
build-essential
dialog
)
DEBIAN_APT_KEPT_BACK=(
linux-headers-generic
linux-headers-virtual
linux-image-virtual
linux-virtual
man-db
)
PACMAN_AUR_NEEDS=(
base-devel
pacman-contrib
man-db
)
PACMAN_MIRROR_UTIL=rate-arch-mirrors # AUR
UBUNTU_MIRROR_UTIL=apt-smart # pip3
RUBY_DEPS_DEB=(
libssl-dev
zlib1g-dev
)
BREW_PACKAGES=(
${COMMON_PACKAGES[@]}
# System and gnu
cmake
findutils
gnu-sed
grep
zlib
# libressl
# Tools
darkhttpd
direnv
entr
fzf
gnupg
jq
nvim
pv
# Dev/Workspace/Aesthetics
fish
lsd
tmux
)
ui::doing() {
printf '==> %s\n' "$1"
}
ui::add() {
printf '(\x1b[38;5;111m+\x1b[m) \x1b[38;5;112m%s\x1b[m \x1b[38;5;118m%s\x1b[m' "$@"
}
sudo:() {
printf '(\x1b[38;5;152msudo\x1bm) %s' "$*" #1>&2
sudo "$@"
}
file::line:uniq() {
local file="$1"; shift;
local line="$1"; shift;
if grep --fixed-strings --regexp "$line" "$file" >/dev/null 2>/dev/null
then
true
else
echo "$line"
fi
}
file::line:add.uniq() {
local file="$1"; shift;
local line="$1"; shift;
file::line:uniq "$file" "$line" >>$file
}
config::apt:sources() {
ui::doing "Instal ubuntu apt sources"
sudo tee /etc/apt/sources.list >/dev/null <<-'EOS'
deb http://mirror.eu.kamatera.com/ubuntu focal main restricted
deb http://mirror.eu.kamatera.com/ubuntu focal-updates main restricted
deb http://mirror.eu.kamatera.com/ubuntu focal universe
deb http://mirror.eu.kamatera.com/ubuntu focal-updates universe
deb http://mirror.eu.kamatera.com/ubuntu focal multiverse
deb http://mirror.eu.kamatera.com/ubuntu focal-updates multiverse
deb http://mirror.eu.kamatera.com/ubuntu focal-backports main restricted universe multiverse
deb http://security.ubuntu.com/ubuntu focal-security main restricted
deb http://security.ubuntu.com/ubuntu focal-security universe
deb http://security.ubuntu.com/ubuntu focal-security multiverse
EOS
}
config::pacman:mirrorlist() {
ui::doing "Instal arch pacman mirrorlist"
sudo tee /etc/pacman.d/mirrorlist >/dev/null <<-'EOS'
Server = https://archlinux.koyanet.lv/archlinux/$repo/os/$arch
Server = http://mirror.puzzle.ch/archlinux/$repo/os/$arch
Server = http://mirror.datacenter.by/pub/archlinux/$repo/os/$arch
Server = https://archlinux.uk.mirror.allworldit.com/archlinux/$repo/os/$arch
Server = http://mirror.easylee.nl/archlinux/$repo/os/$arch
EOS
}
config::fish() {
mkdir -pv $HOME/.config/fish/conf.d
mkdir -pv $HOME/.config/fish/functions
tee $HOME/.config/fish/conf.d/osx_gnu.fish >/dev/null <<-'EOS'
if test (uname -s) = "Darwin"
set -gx PATH /usr/local/opt/coreutils/libexec/gnubin $PATH
set -gx PATH /usr/local/opt/coreutils/libexec/gnubin $PATH
set -gx PATH /usr/local/opt/gnu-sed/libexec/gnubin $PATH
end
EOS
tee $HOME/.config/fish/conf.d/vi.fish >/dev/null <<-'EOS'
set -g fish_key_bindings fish_vi_key_bindings
EOS
tee $HOME/.config/fish/conf.d/key_bindings.fish >/dev/null <<-'EOS'
bind -M insert \c] forward-char
bind -M insert \cP "commandline --replace 'nvim (git:ls-files | fzf)'"
EOS
tee $HOME/.config/fish/conf.d/CDI::user_init:00-user_paths.hook.fish >/dev/null <<-'EOS'
set -x fish_user_paths (string split0 <$HOME/.user_paths | sort | uniq) $fish_user_paths
EOS
tee $HOME/.config/fish/functions/CDI::user_init:reload.fish >/dev/null <<-'EOS'
function CDI::user_init:reload
set -l target $HOME/.config/fish/conf.d/CDI::user_init:01-hooks.fish
for command in (string split0 <$HOME/.user_init | sort | uniq)
eval "$command"
end | tee $target
source $target
end
EOS
tee $HOME/.config/fish/functions/ls.fish >/dev/null <<-'EOS'
function ls
/home/linuxbrew/.linuxbrew/bin/lsd $argv
end
EOS
}
config::bash() {
tee $HOME/.user_init.bash >/dev/null <<-'EOS'
if test -f $HOME/.user_paths
then
export PATH="$(cat $HOME/.user_paths | tr \\000 :):$PATH"
fi
CDI::user_init:reload() {
target=$HOME/.CDI::user_init:hook.bash
cat $HOME/.user_init \
| tr \\000 \\n \
| sort \
| uniq \
| bash \
| tee $target
source $target
}
if test -f $HOME/.CDI::user_init:hook.bash
then
source $HOME/.CDI::user_init:hook.bash
fi
EOS
file::line:add.uniq $HOME/.bashrc source $HOME/.user_init.bash
}
config::tmux() {
tee $HOME/.tmux.conf >/dev/null <<-'EOS'
set -g escape-time 0
set -g mode-keys vi
set -g status-style bg=colour24
set -g status-left-style bg=colour162
set -g status-right-style bg=colour17,fg=colour92
set -g default-terminal screen-256color
EOS
}
config::user() {
sudo chsh --shell "$(which fish)" "$(id -nu)"
}
config::nvim() {
mkdir -pv $HOME/.config/nvim
tee $HOME/.config/nvim/init.vim >/dev/null <<-'EOS'
call plug#begin(stdpath('data') . '/plugged')
Plug 'ycm-core/YouCompleteMe', { 'do': './install.py' }
Plug 'tpope/vim-sensible'
Plug 'tpope/vim-sleuth'
Plug 'rust-lang/rust.vim'
Plug 'google/vim-jsonnet'
Plug 'kyoz/purify', { 'rtp': 'vim' }
Plug 'relastle/bluewery.vim'
Plug 'preservim/nerdtree'
Plug 'rafi/awesome-vim-colorschemes'
Plug 'ron-rs/ron.vim'
Plug 'junegunn/fzf', { 'do': { -> fzf#install() } }
"Plug 'junegunn/fzf.vim'
call plug#end()
"nmap <C-p> :call fzf#run({'sink':'e','source':'git ls-files .','window':{'width': 0.9,'height': 0.6}})<CR>
set termguicolors
set number
" """ BlueWery """
" " For dark
" colorscheme bluewery
" let g:lightline = { 'colorscheme': 'bluewery' }
"
" "" For light
" "colorscheme bluewery-light
" "let g:lightline = { 'colorscheme': 'bluewery_light' }
colorscheme apprentice
EOS
}
config::git() {
local target_dir=$HOME/.config/git
local target=$target_dir/config
mkdir -pv $target_dir
cat >$target <<-'EOS'
[alias]
s = status --short
h = log --pretty --oneline --decorate --graph
ha = h --all
h1 = h -1
hs = h --show-signature
m = commit
ma = commit --amend --reset-author
d = diff
ds = diff --cached
addu = add --update
EOS
}
pacman:install() {
sudo pacman -Syu --noconfirm --needed --quiet "$@"
}
apt:install() {
sudo env DEBIAN_FRONTEND=noninteractive apt-get update -y
sudo env DEBIAN_FRONTEND=noninteractive apt-get upgrade -y
sudo env DEBIAN_FRONTEND=noninteractive apt-get install "$@" -y
}
apt:install.delayed() {
sudo env DEBIAN_FRONTEND=noninteractive apt-get install "$@" -y
}
aur:install() {
local name="$1"; shift;
curl https://aur.archlinux.org/cgit/aur.git/snapshot/$name.tar.gz >$name.tar.gz
tar xvf $name.tar.gz
(
cd $name
makepkg -sic --noconfirm
)
}
git:init() {
local source="$1"; shift;
local target="$1"; shift;
if test -d $target/.git
then
git -C $target remote update --prune
else
git clone "$@" $source $target
fi
}
gh:init() {
local name="$1"; shift;
local target="$1"; shift;
source="https://github.com/$name.git"
git:init "$source" "$target" --depth 1 "$@"
}
CDI::linux:distro() {
local node_name="$(uname --nodename)"
case "$node_name" in
ubuntu*)
echo "Ubuntu"
;;
arch*)
echo "Arch"
;;
*)
printf 'Unknown node_name: %s\n' "$node_name" 1>&2
exit 1
esac
}
CDI::install:base_devel() {
case "$( CDI::linux:distro )" in
(Ubuntu)
config::apt:sources
apt:install \
"${COMMON_PACKAGES[@]}" \
"${DEBIAN_APT_NEEDS[@]}" \
"${DEBIAN_APT_KEPT_BACK[@]}" \
"${RUBY_DEPS_DEB[@]}" \
;;
(Arch)
config::pacman:mirrorlist
pacman:install \
"${COMMON_PACKAGES[@]}" \
"${PACMAN_AUR_NEEDS}" \
;;
esac
}
CDI::_:add() {
local list="$1"; shift;
local item="$1"; shift;
format="$(
if test -f "$HOME/.$list"
then
printf '%s' '\x00%s'
else
printf '%s' '%s'
fi
)"
printf "$format" "$item" |
tee -a "$HOME/.$list" |
tee /dev/null >/dev/null
}
CDI::user_paths:add() {
local extra_path="$1"; shift;
CDI::_:add user_paths "$extra_path"
}
CDI::user_init:add.eval() {
local hook="$1"; shift;
CDI::_:add user_init "$hook"
}
CDI::user_init:load () {
source $HOME/.user_init.bash
CDI::user_init:reload
}
CDI::install:rbenv-build() {
local target="$(rbenv root)"/plugins
mkdir -p "$target"
gh:init "rbenv/ruby-build" "$target/ruby-build"
}
CDI::install:rbenv() {
if $HOME/.rbenv/bin/rbenv version >/dev/null 2>/dev/null
then
true
else
gh:init "rbenv/rbenv" "$HOME/.rbenv"
( cd ~/.rbenv && src/configure && make -C src )
PATH2="$HOME/.rbenv/bin:$HOME/.rbenv/shims:$PATH"
PATH="$PATH2" CDI::install:rbenv-build
curl -fsSL "https://github.com/rbenv/rbenv-installer/raw/master/bin/rbenv-doctor" \
| PATH="$PATH2" bash
fi
# rbenv init - will not set the PATH
CDI::user_paths:add "$HOME/.rbenv/bin"
CDI::user_init:add.eval '$HOME/.rbenv/bin/rbenv init -'
CDI::user_init:load
}
CDI::install:ruby.3.0.1() {
ui::doing "RB_3.0.1"
if rbenv versions --bare --skip-aliases | grep 3.0.1
then
true
else
rbenv install 3.0.1
fi
}
CDI::install:homebrew() {
# This install script is clever enough to skip very fast
#curl -fsSL "https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh" | bash
#if ! id -u linuxbrew
#then
# ui::add user linuxbrew
# sudo adduser -D -s /bin/bash linuxbrew
#fi
#file::line:uniq \
# 'linuxbrew ALL=(ALL) NOPASSWD:ALL' \
# /etc/sudoers \
#| sudo: tee -a /etc/sudoers >/dev/null
local target=/home/linuxbrew
local mu="$(id -u)"
local mg="$(id -g)"
sudo: mkdir -pv $target
sudo: chown -R $mu:$mg $target
gh:init Homebrew/Brew $target/.linuxbrew
# brew shellenv - will set the PATH
CDI::user_init:add.eval "$target/.linuxbrew/bin/brew shellenv -"
CDI::user_init:load
brew update
brew doctor
}
CDI::install:user_paths.ccache() {
case "$( CDI::linux:distro )" in
Ubuntu)
CDI::user_paths:add "/lib/ccache"
;;
Arch)
CDI::user_paths:add "/lib/ccache/bin"
;;
esac
}
CDI::install:vim-plugged() {
curl -fLo "${XDG_DATA_HOME:-$HOME/.local/share}"/nvim/site/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
}
CDI::install:omf() {
if fish -c 'omf >/dev/null' 2>/dev/null
then
true
else
curl -sL https://get.oh-my.fish >omf::install.fish
fish omf::install.fish --noninteractive
fish -c 'omf install flash'
fi
}
CDI::install:cargo() {
if $HOME/.cargo/bin/cargo --version >/dev/null 2>/dev/null
then
true
else
ui::doing "RUST"
brew install rustup-init
rustup-init -y
fi
CDI::user_paths:add "$HOME/.cargo/bin"
# We don't even bother with .cargo/env
}
CDI::install:ct() {
brew tap Good-Vibez/tap
brew:install2 --HEAD \
Good-Vibez/tap/xs \
Good-Vibez/tap/xc \
;
}
UDI::install:xfce4() {
case "$( CDI::linux:distro )" in
Ubuntu)
apt:install.delayed xfce4
;;
Arch)
pacman:install xfce4 xorg-xinit
;;
esac
}
brew:install() {
brew:install2 "" "${@}"
}
brew:install2() {
local brargs="$1"; shift;
if jq --version >/dev/null 2>/dev/null
# If we have no jq then it's the first brew run, and this
# check makes no sense anyway.
then
brew info --json --formulae "${@}" \
| jq \
--raw-output \
--join-output \
--compact-output '.
| map(select((.installed | length) == 0))
| map(.name)
| join("\u0000")
' \
| xargs -0 -I::: brew install $brargs ::: # NOTE: DO NOT QUOTE $brargs
else
CDI::user_init:load
brew install "${@}"
fi
}
if test "${VMINSTALLLIB-x}" = "x"
then
ui::doing "MAIN"
main
fi
| true
|
5f33a7815aeb0b39e40de52cd29c8c61b60ae43f
|
Shell
|
mohamedsaif/AKS-Adv-Provision
|
/provisioning-v2/19-aks-aad-role-binding.sh
|
UTF-8
| 11,691
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Make sure that variables are updated
source ./$VAR_FILE
#***** Basic AAD Role Binding Configuration *****
# NOTE: Execute the blow steps ONLY if you successfully completed the AAD provisioning
# Grape the new cluster ADMIN credentials
# the AKS cluster with AAD enabled
# Objective here to grant your AAD account an admin access to the AKS cluster
az aks get-credentials --resource-group $RG_AKS --name $AKS_CLUSTER_NAME --admin
#List our currently available contexts
kubectl config get-contexts
#set our current context to the AKS admin context (by default not needed as get-credentials set the active context)
kubectl config use-context $AKS_CLUSTER_NAME-admin
#Check the cluster status through kubectl
kubectl get nodes
# Access Kubernetes Dashboard. You should have a lot of forbidden messages
# this would be due to that you are accessing the dashboard with a kubernetes-dashboard service account
# which by default don't have access to the cluster
az aks browse --resource-group $RG_AKS --name $AKS_CLUSTER_NAME
# Before you can use AAD account with AKS, a role or cluster role binding is needed.
# Let's grant the current logged user access to AKS via its User Principal Name (UPN)
# Have a look at the UPN for the signed in account
az ad signed-in-user show --query userPrincipalName -o tsv
# Use Object Id if the user is in external directory (like guest account on the directory)
SIGNED_USER=$(az ad signed-in-user show --query objectId -o tsv)
# Copy either the objectId to aad-user-cluster-admin-binding.yaml file before applying the deployment
sed ./deployments/aad-user-cluster-admin-binding.yaml \
-e s/USEROBJECTID/$SIGNED_USER/g \
> ./deployments/aad-user-cluster-admin-binding-updated.yaml
# Now granting the signed in account a cluster admin rights
kubectl apply -f ./deployments/aad-user-cluster-admin-binding-updated.yaml
# We will try to get the credentials for the current logged user (without the --admin flag)
az aks get-credentials --resource-group $RG_AKS --name $AKS_CLUSTER_NAME
#List our currently available contexts. You should see a context without the -admin name
kubectl config get-contexts
#set our current context to the AKS admin context (by default not needed as get-credentials set the active context)
kubectl config use-context $AKS_CLUSTER_NAME
# try out the new context access :). You should notice the AAD login experience with a link and code to be entered in external browser.
# You should be able to get the nodes after successful authentication
kubectl get nodes
# Great article about Kubernetes RBAC policies and setup https://docs.bitnami.com/kubernetes/how-to/configure-rbac-in-your-kubernetes-cluster/
#***** END Basic AAD Role Binding Configuration *****
#***** AAD and AKS RBAC Advanced Configuration *****
# Documentation https://docs.microsoft.com/en-us/azure/aks/azure-ad-rbac
# NOTE: You can leverage the below steps only if you successfully provided AAD enabled AKS cluster
# We will be creating 2 roles: (appdev) group with a user called aksdev1,
# (opssre) with user akssre1 (SRE: Site Reliability Engineer)
# Note: In production environments, you can use existing users and groups within an Azure AD tenant.
# We will need the AKS resource id during the provisioning
AKS_ID=$(az aks show \
--resource-group $RG_AKS \
--name $AKS_CLUSTER_NAME \
--query id -o tsv)
# Create the "appdev" group. Sometime you need to wait for a few seconds for the new group to be fully available for the next steps
APPDEV_ID=$(az ad group create \
--display-name appdev \
--mail-nickname appdev \
--query objectId -o tsv)
# Create Azure role assignment for appdev group, this will allow members to access AKS via kubectl
az role assignment create \
--assignee $APPDEV_ID \
--role "Azure Kubernetes Service Cluster User Role" \
--scope $AKS_ID
# Now creating the opssre group
OPSSRE_ID=$(az ad group create \
--display-name opssre \
--mail-nickname opssre \
--query objectId -o tsv)
# Assigning the group to role on the AKS cluster
az role assignment create \
--assignee $OPSSRE_ID \
--role "Azure Kubernetes Service Cluster User Role" \
--scope $AKS_ID
exit 0
# Creating our developer user account
AKSDEV1_ID=$(az ad user create \
--display-name "AKS Dev 1" \
--user-principal-name aksdev1@mobivisions.com \
--password P@ssw0rd1 \
--query objectId -o tsv)
# Adding the new user to the appdev group
az ad group member add --group appdev --member-id $AKSDEV1_ID
# Create a user for the SRE role
AKSSRE1_ID=$(az ad user create \
--display-name "AKS SRE 1" \
--user-principal-name akssre1@mobivisions.com \
--password P@ssw0rd1 \
--query objectId -o tsv)
# Add the user to the opssre Azure AD group
az ad group member add --group opssre --member-id $AKSSRE1_ID
# Create AKS cluster resources for appdev group
# Make sure that you on the cluster admin context to execute the following commands. You can make sure that active context has -admin in it.
kubectl config use-context $AKS_CLUSTER_NAME-admin
# We will be using namespace isolation. We will create a dev namespace for the developers to use
kubectl create namespace dev
# In Kubernetes, Roles define the permissions to grant, and RoleBindings apply them to desired users or groups.
# These assignments can be applied to a given namespace, or across the entire cluster.
# So first we will create a Role with full access to dev namespace through applying the manifest role-dev-namespace.yaml
kubectl apply -f ./deployments/role-dev-namespace.yaml
# We need the group resource ID for appdev group to be replaced in the role binding deployment file
az ad group show --group appdev --query objectId -o tsv
# Replace the group id in rolebinding-dev-namespace.yaml before applying the deployment
sed -i rolebinding-dev-namespace.yaml -e "s/groupObjectId/$APPDEV_ID/g"
kubectl apply -f ./deployments/rolebinding-dev-namespace.yaml
# Doing the same to create access for the SRE
kubectl create namespace sre
kubectl apply -f ./deployments/role-sre-namespace.yaml
az ad group show --group opssre --query objectId -o tsv
# Update the opssre group id to rolebinding-sre-namespace.yaml before applying the deployment
sed -i rolebinding-sre-namespace.yaml -e "s/groupObjectId/$OPSSRE_ID/g"
kubectl apply -f ./deployments/rolebinding-sre-namespace.yaml
# Testing now can be done by switching outside of the context of the admin to one of the users created
# Reset the credentials for AKS so you will sign in with the dev user
az aks get-credentials --resource-group $RG_AKS --name $AKS_CLUSTER_NAME --overwrite-existing
# Now lets try to get nodes. You should have the AAD sign in experience. After signing in with Dev user, you should see it is forbidden :)
kubectl get nodes
# Lets try run a basic NGINX pod on the dev namespace (in case you signed in with a dev user)
kubectl run --generator=run-pod/v1 nginx-dev --image=nginx --namespace dev
# The above command should say: pod/nginx-dev created. Let's see if it is running
kubectl get pods --namespace dev
# Another test is to try to get pods from all namespaces (you should get forbidden again :)
kubectl get pods --all-namespaces
# Error from server (Forbidden): pods is forbidden: User "YOURDEVUSER@TENANT.COM" cannot list resource "pods" in
# API group "" at the cluster scope
# One final test to schedule a pod in a different namespace (sre for example)
kubectl run --generator=run-pod/v1 nginx-dev --image=nginx --namespace sre
# Error from server (Forbidden): pods is forbidden: User "YOURDEVUSER@TENANT.COM" cannot create resource "pods" in
# API group "" in the namespace "sre"
# More information about authentication and authorization here https://docs.microsoft.com/en-us/azure/aks/operator-best-practices-identity
# Let's clean up after ourselves
# Get the admin kubeconfig context to delete the necessary cluster resources
kubectl config use-context $AKS_CLUSTER_NAME-admin
# Or use this if you don't have the admin context from the previous steps
az aks get-credentials --resource-group $RG_AKS --name $AKS_CLUSTER_NAME --admin
# You can delete only the pods and let the users, groups, namespaces intact or delete everything
kubectl delete pod nginx-dev --namespace dev
# Delete the dev and sre namespaces. This also deletes the pods, Roles, and RoleBindings
kubectl delete namespace dev
kubectl delete namespace sre
# Delete the Azure AD user accounts for aksdev and akssre
az ad user delete --upn-or-object-id $AKSDEV1_ID
az ad user delete --upn-or-object-id $AKSSRE1_ID
# Delete the Azure AD groups for appdev and opssre. This also deletes the Azure role assignments.
az ad group delete --group appdev
az ad group delete --group opssre
#***** END AAD and AKS RBAC Advanced Configuration *****
#***** Configure AKS Dashboard Access with AAD *****
# NOTE: You can leverage the below steps only if you successfully provided AAD enabled AKS cluster
# Create the "aks-dashboard-admins" group. Sometime you need to wait for a few seconds for the new group to be fully available for the next steps
DASHBOARD_ADMINS_ID=$(az ad group create \
--display-name AKS-Dashboard-Admins \
--mail-nickname aks-dashboard-admins \
--query objectId -o tsv)
# Create Azure role assignment for the group, this will allow members to access AKS via kubectl, dashboard
az role assignment create \
--assignee $DASHBOARD_ADMINS_ID \
--role "Azure Kubernetes Service Cluster User Role" \
--scope $AKS_ID
# We will add the current logged in user to the dashboard admins group
# Get the UPN for a user in the same AAD directory
SIGNED_USER_UPN=$(az ad signed-in-user show --query userPrincipalName -o tsv)
# Use Object Id if the user is in external directory (like guest account on the directory)
SIGNED_USER_UPN=$(az ad signed-in-user show --query objectId -o tsv)
# Add the user to dashboard group
az ad group member add --group $DASHBOARD_ADMINS_ID --member-id $SIGNED_USER_UPN
# Create role and role binding for the new group (after replacing the AADGroupID)
sed -i dashboard-proxy-binding.yaml -e "s/AADGroupID/$DASHBOARD_ADMINS_ID/g"
kubectl apply -f ./deployments/dashboard-proxy-binding.yaml
# As a workaround accessing the dashboard using a token without enforcing https secure communication (tunnel is exposed ver http),
# you can edit the dashboard deployment with adding the following argument
# It is an issue currently being discussed here https://github.com/MicrosoftDocs/azure-docs/issues/23789
# args: ["--authentication-mode=token", "--enable-insecure-login"] under spec: containers
# spec:
# containers:
# - name: *****
# image: *****
# args: ["--authentication-mode=token", "--enable-insecure-login"]
kubectl edit deploy -n kube-system kubernetes-dashboard
# Get AAD token for the signed in user (given that user has the appropriate access). Use (az login) if you are not signed in
SIGNED_USER_TOKEN=$(az account get-access-token --query accessToken -o tsv)
echo $SIGNED_USER_TOKEN
# establish a tunnel and login via token above
# If AAD enabled, you should see the AAD sign in experience with a link and a code to https://microsoft.com/devicelogin
az aks browse --resource-group $RG_AKS --name $AKS_CLUSTER_NAME
# You can also use kubectl proxy to establish the tunnel as well
# kubectl proxy
# Then you can navigate to sign in is located http://localhost:8001/api/v1/namespaces/kube-system/services/kubernetes-dashboard/proxy/#!/login
# Note: you can also use the same process but with generated kubeconfig file for a Service Account that is bound to a specific namespace
# to login to the dashboard.
#***** END Configure AKS Dashboard Access with AAD *****
echo "AKS-Post-Provision Scripts Execution Completed"
| true
|
1be038f578edd7ee6215388105034c147a941b21
|
Shell
|
idber/auto-install-script
|
/auto_install_mysql.sh
|
UTF-8
| 8,840
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
red_echo () { echo; echo; echo -e "\033[031;1m `date +"%Y-%m-%d %H:%M:%S"` \t\t $@\033[0m"; exit 1;}
yellow_echo () { echo; echo; echo -e "\033[033;1m `date +"%Y-%m-%d %H:%M:%S"` \t\t $@\033[0m"; }
green_echo () { echo; echo; echo -e "\033[032;1m `date +"%Y-%m-%d %H:%M:%S"` \t\t $@\033[0m"; }
_add_user_group () {
local user=$1
local group=$2
if ! id $user >/dev/null 2>&1; then
groupadd $group
useradd -g $group -s /bin/false -M $user
else
usermod -s /bin/false $user;
fi
green_echo "用户 $user 用户组 $group 已添加。"
}
_init_dirtree () {
local paths=($INSTALL_PATH $DATA_FILE_PATH $SOCKET_PATH)
for path in ${paths[@]}; do
mkdir -p $path
chown -R mysql:mysql $path
chmod -R 755 $path
done
[[ $? -eq 0 ]] && green_echo "\tINFO\t $paths等目录创建完成!"
mkdir /tmp/mysql_temp
}
_chmod_dirtree () {
paths=($INSTALL_PATH $DATA_FILE_PATH $SOCKET_PATH)
for path in ${paths[@]}; do
chown -R mysql:mysql $path
chmod -R 755 $path
done
[[ $? -eq 0 ]] && green_echo "\tINFO\t $paths等目录属性修改完成!"
}
_check_cmake_version () {
local cmake_pkgs_name='cmake-3.0.2.tar.gz'
local cmake_ver=$(yum list cmake | grep -Eo '\s2\.[0-9]+')
if [ $cmake_ver == 2.8 ]; then
green_echo "\tINFO\t Cmake版本高于2.8 \n"
else
yellow_echo "\tINFO\t Cmake版本低于2.8,即将安装Cmake3.0版本!\n"
rpm -qa | grep cmake | xargs rpm -e --nodeps
get_install_pkgs $cmake_pkgs_name
cd $RESP_PATH
tar xf $cmake_pkgs_name
cd $(echo $cmake_pkgs_name | cut -d . -f 1-3)
./configure
make
make install
[[ $? -eq 0 ]] && green_echo "\tINFO\t Cmake3.0安装成功"
[[ $? -ne 0 ]] && red_echo "\tERROR\t Cmake3.0安装失败,返回错误码 = $? \n"
fi
}
_install_boost () {
local boost_pkgs_name='boost_1_59_0.tar.gz'
get_install_pkgs $boost_pkgs_name
mkdir /usr/local/boost
tar xf $RESP_PATH/$cmake_pkgs_name -C /usr/local/boost
[[ $? -eq 0 ]] && green_echo "\tINFO\t Boost已经解压完毕"
[[ $? -ne 0 ]] && red_echo "\tERROR\t Boost安装失败,返回错误码 = $? \n"
}
_render_tpl () {
cat > $INSTALL_PATH/my.cnf <<EOF
# The following options will be passed to all MySQL clients
[client]
default-character-set = utf8
#password = your_password
port =
socket =
# Here follows entries for some specific programs
# The MySQL server
[mysqld]
character-set-server = utf8
basedir =
datadir =
port =
socket =
skip-external-locking
skip-name-resolve
key_buffer_size = 16M
max_allowed_packet = 64M
table_open_cache = 64
sort_buffer_size = 512K
net_buffer_length = 8K
read_buffer_size = 256K
read_rnd_buffer_size = 512K
myisam_sort_buffer_size = 8M
max_connections=1000
event_scheduler=ON
innodb_file_per_table=1
#skip-networking
[mysql]
no-auto-rehash
default-character-set = utf8
# Remove the next comment character if you are not familiar with SQL
#safe-updates
EOF
sed -i "s#^basedir =.*#basedir = $INSTALL_PATH#;
s#^datadir =.*#datadir = $DATA_FILE_PATH#;
s#^port =.*#port = $PORT#;
s#^socket =.*#socket = $SOCKET_PATH/mysql.sock#;
" $INSTALL_PATH/my.cnf
}
install_dependences () {
rpm -qa | grep mysql | xargs rpm -e --nodeps
yum clean all
yum makecache
yum -y install gcc gcc-c++ ncurses-devel perl make cmake autoconf
[[ $? -eq 0 ]] && green_echo "\tINFO\t gcc gcc-c++ ncurses-devel perl make等依赖安装完毕!"
[[ $? -ne 0 ]] && red_echo "\tERROR\t YUM安装依赖出错,依赖未正确安装,返回错误码 = $? \n"
_check_cmake_version
# _install_boost
}
edit_user_profile () {
_add_user_group mysql mysql
_init_dirtree mysql
grep "PATH=\$PATH:$INSTALL_PATH/bin" /etc/profile > /dev/null 2>&1 || echo "PATH=\$PATH:$INSTALL_PATH/bin" >> /etc/profile
grep 'export PATH' /etc/profile > /dev/null 2>&1 || echo "export PATH" >> /etc/profile
[[ $? -eq 0 ]] && green_echo "\tINFO\t Mysql用户环境变量等修改完成!"
}
get_install_pkgs() {
local pkgs_name=$1
if [[ $SKIP_DOWNLOAD_FILES == 0 ]]; then
yellow_echo "\tSKIP\t 跳过下载安装文件$pkgs_name,无需下载! \n"
else
yum -y install subversion
if [[ -f $RESP_PATH/$MYSQL_PKGS ]]; then
green_echo "\tINFO\t 安装文件已存在,无需下载! \n"
else
svn co --username=${SVN_USER} --password=${SVN_PASSWD} --force --no-auth-cache \
--depth=empty ${SVN_ADDRESS} ${RESP_PATH}
cd ${RESP_PATH} && svn up --username=${SVN_USER} --password=${SVN_PASSWD} --force --no-auth-cache $pkgs_name
fi
[[ $? -eq 0 ]] && green_echo "\tINFO\t $pkgs_name安装文件已经下载完毕!"
[[ $? -ne 0 ]] && red_echo "\tERROR\t SVN出现问题,$pkgs_name安装文件未正确下载,返回错误码 = $? \n"
fi
}
cmake_source_code () {
cd $RESP_PATH && tar zxf $MYSQL_PKGS -C /tmp/mysql_temp
local mysql_path=$(echo $MYSQL_PKGS | cut -d . -f 1-3)
cd /tmp/mysql_temp/$mysql_path || red_echo "\tERROR\t $mysql_path目录不存在,返回错误码 = $? \n"
cmake -DCMAKE_INSTALL_PREFIX=$INSTALL_PATH \
-DMYSQL_UNIX_ADDR=$SOCKET_PATH/mysql.sock \
-DDEFAULT_CHARSET=utf8 \
-DDEFAULT_COLLATION=utf8_general_ci \
-DWITH_EXTRA_CHARSETS:STRING=utf8,gbk \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_ARCHIVE_STORAGE_ENGINE=1 \
-DWITH_BLACKHOLE_STORAGE_ENGINE=1 \
-DMYSQL_DATADIR=$DATA_FILE_PATH \
-DMYSQL_TCP_PORT=$PORT \
-DENABLE_DOWNLOADS=1
#-DWITH_BOOST=/usr/local/boost
[[ $? -eq 0 ]] && green_echo "\tINFO\t Cmake编译完成,即将开始安装MySQL!"
[[ $? -ne 0 ]] && red_echo "\tERROR\t Cmake出错,程序终止,返回错误码 = $? \n"
make && make install
[[ $? -eq 0 ]] && green_echo "\tINFO\t MySQL安装完成,即将开始配置MySQL!"
[[ $? -ne 0 ]] && red_echo "\tERROR\t MySQL安装失败,程序终止,返回错误码 = $? \n"
}
init_mysql () {
_render_tpl
_chmod_dirtree
cd $INSTALL_PATH/scripts/
./mysql_install_db --user=mysql --datadir=$DATA_FILE_PATH --basedir=$INSTALL_PATH
[[ $? -eq 0 ]] && green_echo "\tINFO\t MySQL初始化完成!"
[[ $? -ne 0 ]] && red_echo "\tERROR\t MySQL初始化失败,程序终止,返回错误码 = $? \n"
cp $INSTALL_PATH/support-files/mysql.server /etc/rc.d/init.d/mysqld
chmod +x /etc/rc.d/init.d/mysqld
chkconfig --add mysqld
chkconfig mysql on
cp -af $INSTALL_PATH/my.cnf /etc/my.cnf
/etc/rc.d/init.d/mysqld start
[[ $? -eq 0 ]] && green_echo "\tINFO\t MySQL启动完成!"
[[ $? -ne 0 ]] && red_echo "\tERROR\t MySQL启动失败,程序终止,返回错误码 = $? \n"
$INSTALL_PATH/bin/mysqladmin -u root password "$ROOT_PASSWD"
[[ $? -eq 0 ]] && green_echo "\tSUCCESS\t 现在你可以使用root用户和$ROOT_PASSWD密码来登录MySQL了!"
rm -rf /tmp/mysql_temp
}
params=$(echo $@ | sed 's\{\\g' | sed 's\}\\g' )
num=$(echo $params |grep -o ': ' |wc -l)
old_ifs=$IFS
IFS=','
if [[ "$num" -eq 11 ]]; then
for param in $params; do
key=$(echo $param | cut -d ':' -f 1)
value=$(echo $param | cut -d ':' -f 2-)
value=${value#* }
case $key in
svn_user) SVN_USER=$value ;;
svn_passwd) SVN_PASSWD=$value ;;
svn_address) SVN_ADDRESS=$value ;;
mysql_pkgs) MYSQL_PKGS=$value ;;
resp_dir) RESP_PATH=${value%*/} ;;
install_path) INSTALL_PATH=${value%*/} ;;
data_file_path) DATA_FILE_PATH=${value%*/} ;;
port) PORT=$value ;;
socket_path) SOCKET_PATH=$value ;;
root_passwd) ROOT_PASSWD=$value ;;
skip_download_files) SKIP_DOWNLOAD_FILES=$value ;;
*) red_echo "\tERROR\t 接收到未知参数key = $key\n"
exit 1
;;
esac
done
else
red_echo "\tERROR\t参数个数错误, 当前参数为 = $@!\n"
exit 1
fi
IFS=$old_ifs
install_dependences
sleep 3
get_install_pkgs $MYSQL_PKGS
sleep 3
edit_user_profile
sleep 3
cmake_source_code
sleep 3
init_mysql
# '{'svn_user': 'mashaokui','svn_passwd': 'fcy3I4yB','svn_address': 'svn://192.168.50.221/soft_2018','mysql_pkgs': 'mysql-5.5.62.tar.gz','resp_dir': '/tmp/resp','install_path': '/usr/local/mysql','data_file_path': '/usr/local/mysql/data','port': '3306','socket_path': '/usr/local/mysql','root_passwd': 'mysql.Asd', 'skip_download_files': '0'}'
| true
|
4cf5f674f1239ced52ce448e0902940103fcc90b
|
Shell
|
mariaines/localstack-dynamo-timeout
|
/configure-localstack.sh
|
UTF-8
| 3,923
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Runnable on dev box or docker
EXECDIR=`dirname $0`
EXECDIR=`(cd $EXECDIR; pwd)`
ROOTDIR=`(cd $EXECDIR; pwd)`
BUILDDIR=/tmp/.lambda-build-2
if [ -f /.dockerenv ]
then
HOST=localstack
else
if [ -z "`docker ps | grep localstack/localstack`" ]
then
echo "Localstack is not currently running"
exit 1
fi
HOST=localhost
fi
if [ ! -d "$BUILDDIR" ]
then
mkdir $BUILDDIR
fi
if [ ! -e $BUILDDIR/DONE -o $ROOTDIR/requirements.txt -nt $BUILDDIR/DONE ]
then
if [ "$HOST" != "localstack" ]
then
echo "Dependencies must be built on Linux Docker image"
echo "Run docker-compose down/up to rebuild dependencies"
exit 1
fi
echo Rebuilding dependencies
rm -fr $BUILDDIR/*
mkdir -p $BUILDDIR/deps
pip install -t $BUILDDIR -r $ROOTDIR/requirements.txt
touch $BUILDDIR/DONE
else
echo Using cached dependencies
fi
echo Copying source files
(cd $ROOTDIR; tar -c -X $EXECDIR/lambda-exclude.lst .) | (cd $BUILDDIR; tar -x)
echo Lambda function built
#
# Initialize DynamoDB
#
$EXECDIR/wait-for-localstack.sh \
TableNames \
aws dynamodb --endpoint-url http://$HOST:4566 --cli-connect-timeout 1 list-tables
# Create and configure table2 if it doesn't already exist
aws dynamodb describe-table \
--endpoint-url=http://$HOST:4566 \
--table-name table2 >& /dev/null
if [ $? -ne 0 ]
then
aws dynamodb create-table \
--endpoint-url=http://$HOST:4566 \
--table-name table2 \
--attribute-definitions \
AttributeName=_pk,AttributeType=S \
AttributeName=_sk,AttributeType=S \
AttributeName=_gsi1_pk,AttributeType=S \
AttributeName=_gsi1_sk,AttributeType=S \
AttributeName=_gsi2_pk,AttributeType=S \
AttributeName=_gsi2_sk,AttributeType=S \
--key-schema \
AttributeName=_pk,KeyType=HASH \
AttributeName=_sk,KeyType=RANGE \
--provisioned-throughput \
ReadCapacityUnits=5,WriteCapacityUnits=5 \
--global-secondary-indexes \
'IndexName=_gsi1_pk-_gsi1_sk-index,KeySchema=[{AttributeName=_gsi1_pk,KeyType=HASH},{AttributeName=_gsi1_sk,KeyType=RANGE}],Projection={ProjectionType=ALL},ProvisionedThroughput={ReadCapacityUnits=5,WriteCapacityUnits=5}'\
'IndexName=_gsi2_pk-_gsi2_sk-index,KeySchema=[{AttributeName=_gsi2_pk,KeyType=HASH},{AttributeName=_gsi2_sk,KeyType=RANGE}],Projection={ProjectionType=ALL},ProvisionedThroughput={ReadCapacityUnits=5,WriteCapacityUnits=5}'\
--stream-specification \
StreamEnabled=true,StreamViewType=NEW_AND_OLD_IMAGES
aws dynamodb update-time-to-live \
--endpoint-url=http://$HOST:4566 \
--table-name table2 \
--time-to-live-specification Enabled=true,AttributeName=_ttl
echo "DynamodB table table2 created"
else
echo "DynamodB table table2 already exists"
fi
#
# BEGIN: CONFIGURATION OF DYNAMO DB STREAM LISTENER LAMBDA
#
if [ -z "`aws lambda --endpoint-url http://$HOST:4566 list-functions | grep DynamoDbStreamFunction`" ]
then
aws lambda create-function \
--endpoint-url http://$HOST:4566 \
--function-name DynamoDbStreamFunction \
--runtime python3.8 \
--role arn:aws:iam:isignored \
--handler db_stream.stream_handler \
--code S3Bucket="__local__",S3Key="$BUILDDIR" \
--timeout 600 \
--publish
aws lambda create-event-source-mapping \
--endpoint-url http://$HOST:4566 \
--function-name DynamoDbStreamFunction \
--batch-size 10 --starting-position LATEST \
--event-source-arn arn:aws:dynamodb:us-west-2:000000000000:table/table2 \
--starting-position LATEST
else
aws lambda --endpoint-url http://$HOST:4566 update-function-code \
--function-name DynamoDbStreamFunction \
--s3-bucket="__local__" \
--s3-key="$BUILDDIR" \
--publish
fi
echo "DynamoDB Listener Lambda configuration complete"
echo "LOCALSTACK FULLY CONFIGURED"
| true
|
5cdcd841efb90072fbfbd6522db91f86b7c184b8
|
Shell
|
michalnicp/dotfiles
|
/install.sh
|
UTF-8
| 3,818
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
if [ "${1:-}" = "--debug" ] || [ "${1:-}" = "-d" ]; then
set -x
fi
# colors
red='\033[0;31m'
green='\033[0;32m'
yellow='\033[0;33m'
nc='\033[0m'
if [[ $EUID -eq 0 ]]; then
printf "${red}Please run as normal user.${nc}" >&2
# echo "This script must be run with sudo" 1>&2
exit 1
fi
# ask for sudo password, see sudo(8)
sudo -v
# while true; do sleep 1m; sudo -nv; done &
# update mirrorlist
# curl https://www.archlinux.org/mirrorlist/?country=CA&country=US&protocol=https&use_mirror_status=on
# use reflector to rank the fastest mirrors
sudo pacman -S --noconfirm reflector
sudo reflector --protocol http --latest 30 --number 20 --sort rate --save /etc/pacman.d/mirrorlist
# full system upgrade
sudo pacman -Syu --noconfirm
packages=(
base-devel
git
openssh
bash-completion
xorg xorg-xinit
xdg-user-dirs
# fonts
fontconfig ttf-dejavu ttf-liberation noto-fonts noto-fonts-cjk noto-fonts-emoji
# window manager
bspwm sxhkd
# image viewer
feh
# editor
neovim
# terminal emulator
alacritty
# file manager
ranger
# browser
firefox
)
# install packages
sudo pacman -S --noconfirm ${packages[*]}
# install yay
cd /tmp
rm -rf yay
git clone https://aur.archlinux.org/yay.git
cd yay
makepkg -si --noconfirm
# install aur packages
aur_packages=(
lemonbar-git
)
yay -Syu
yay -S --noconfirm ${aur_packages[*]}
# reflector
sudo bash -c "cat > /etc/systemd/system/reflector.service << EOF
[Unit]
Description=Pacman mirrorlist update
Requires=network-online.target
After=network-online.target
[Service]
Type=oneshot
ExecStart=/usr/bin/reflector --protocol http --latest 30 --number 20 --sort rate --save /etc/pacman.d/mirrorlist
[Install]
RequiredBy=multi-user.target
EOF"
sudo systemctl daemon-reload
sudo systemctl enable reflector
# sshd service
sudo sed -i '/^#X11Forwarding/c\X11Forwarding yes' /etc/ssh/sshd_config
sudo systemctl enable sshd
sudo systemctl start sshd
# disable embedded bitmap for all fonts, enable sub-pixel RGB rendering, and enable the LCD filter
# which is designed to reduce colour fringing when subpixel rendering is used.
mkdir -p ~/.config/fontconfig/conf.d
ln -sf /etc/fonts/conf.avail/70-no-bitmaps.conf ~/.config/fontconfig/conf.d/70-no-bitmaps.conf
ln -sf /etc/fonts/conf.avail/10-sub-pixel-rgb.conf ~/.config/fontconfig/conf.d/10-sub-pixel-rgb.conf
ln -sf /etc/fonts/conf.avail/11-lcdfilter-default.conf ~/.config/fontconfig/conf.d/11-lcdfilter-default.conf
mkdir -p ~/.config/fontconfig
ln -s ~/dotfiles/.config/fontconfig/fonts.conf ~/.config/fontconfig/fonts.conf
# dotfiles
cd ~
git clone https://github.com/michalnicp/dotfiles.git
# bash
ln -s ~/dotfiles/.bashrc ~/.bashrc
ln -s ~/dotfiles/.bash_profile ~/.bash_profile
# bin
ln -s ~/dotfiles/bin ~/bin
# xinit
ln -s ~/dotfiles/.xinitrc ~/.xinitrc
# neovim
curl -fLo ~/.local/share/nvim/site/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
mkdir -p ~/.config/nvim
ln -sf ~/dotfiles/.config/nvim/init.vim ~/.config/nvim/init.vim
# install vim plugins for the first time
nvim -u <(sed -n '/^call plug#begin/,/^call plug#end/p' ~/.config/nvim/init.vim) +PlugInstall +qall
# alacritty
mkdir -p ~/.config/alacritty
ln -sf ~/dotfiles/.config/alacritty/alacritty.yml ~/.config/alacritty/alacritty.yml
# git
git config --global core.excludesfile '~/.gitignore_global'
ln -sf ~/dotfiles/.gitignore_global ~/.gitignore_global
# bspwm
mkdir -p ~/.config/sxhkd
ln -sf ~/dotfiles/.config/sxhkd/sxhkdrc ~/.config/sxhkd/sxhkdrc
mkdir -p ~/.config/bspwm
ln -sf ~/dotfiles/.config/bspwm/bspwmrc ~/.config/bspwm/bspwmrc
# mkdir -p ~/.config
ln -sf ~/dotfiles/.config/user-dirs.dirs ~/.config/user-dirs.dirs
sudo xdg-user-dirs-update
| true
|
12035b0fc8dc41595c45e1fbf6c7327709c56cec
|
Shell
|
romanblanco/dotfiles-1
|
/ManageIQ/release-ui-components
|
UTF-8
| 874
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
set -e
cd ~/ui-components
[ `git branch-name` != 'gaprindashvili' ] && exit 1
git up
TAG=$(npm version patch)
git tag -d "$TAG"
VER=$(echo "$TAG" | sed 's/^v//')
echo TAG: "$TAG", VER: "$VER"
[ -z "$TAG" ] && exit 2
[ -z "$VER" ] && exit 3
bower version "$VER"
git tag -d "$TAG"
sed -i 's/^\( "version": \)".*",$/\1"'"$VER"'",/' bower.json
EDITOR=true git commit --amend -a
git reset HEAD~2
git commit -a -m "$VER"
rm -rf node_modules
yarn
git push upstream gaprindashvili
npm publish
git checkout -b tmp-"$VER"
rm -rf dist
yarn run build
git add -f dist/css/ui-components.css dist/css/ui-components.css.map
git add -f dist/js/ui-components.js dist/js/ui-components.js.map
git commit -m "bower build for $TAG"
git tag "$TAG" # create the tag on the bower commit
git checkout gaprindashvili
git push upstream --tags
git branch -D tmp-"$VER"
| true
|
ba9717c7ca38aa03b7f6674bfaa67b4833a269c0
|
Shell
|
krishnodey/Shell-Scripting-Exercises
|
/ifelse4.sh
|
UTF-8
| 128
| 2.875
| 3
|
[] |
no_license
|
read a
read b
if(( $( echo "$a == $b" | bc -l ))) && (( $(echo "$a == 3.00" | bc -l )))
then echo Equal
else echo Not equaql
fi
| true
|
5ad8ba7df4fecd8138a11a413339c34f3a116c8f
|
Shell
|
jfchevrette/kubernetes-nginx-multisite-example
|
/startup.sh
|
UTF-8
| 622
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# This script create docker images and deploy a local cluster
set -e
echo "Building Docker images..."
docker build -t site-one-container site-one.com
docker build -t site-two-container site-two.com
docker build -t proxy-container proxy
echo "Creating services..."
kubectl create -f definitions/proxy_svc.yaml
kubectl create -f definitions/site-one_svc.yaml
kubectl create -f definitions/site-two_svc.yaml
echo "Creating replications controllers..."
kubectl create -f definitions/proxy_rc.yaml
kubectl create -f definitions/site-one_rc.yaml
kubectl create -f definitions/site-two_rc.yaml
echo "All done!"
| true
|
90b5f4519e3ee1941f0feae8cad2981000579f1d
|
Shell
|
svrana/dotfiles
|
/installers/neovim.sh
|
UTF-8
| 2,004
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p ~/.config/nvim/{bak,swp}
mkdir -p ~/.config/nvim/after/ftplugin
mkdir -p ~/.config/nvim/bundle
mkdir -p ~/.config/nvim/autoload
if [ ! -f /usr/bin/nvim ]; then
#NVIM_VERSION="v0.4.3"
NVIM_VERSION="nightly"
curl -Lo "$HOME/Downloads/nvim" https://github.com/neovim/neovim/releases/download/${NVIM_VERSION}/nvim.appimage
chmod +x "$HOME/Downloads/nvim"
sudo mv "$HOME/Downloads/nvim" /usr/bin/nvim
fi
function _link_ftplugins() {
local i
for i in ${RCS}/nvim/ftplugin/* ; do
i=$(basename "$i")
ln -sf "${RCS}/nvim/ftplugin/$i" ~/.config/nvim/after/ftplugin/"$i"
done
}
_link_ftplugins
PLUG_DIR="$HOME/.local/share/nvim/site/autoload"
if [ ! -f "$PLUG_DIR/plug.vim" ]; then
curl -fLo "$PLUG_DIR/plug.vim" --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
estatus "Cloned Plug"
fi
if [ ! -f ~/.config/nvim/init.vim ]; then
sudo update-alternatives --install /usr/bin/vi vi /usr/bin/nvim 60
sudo update-alternatives --install /usr/bin/vim vim /usr/bin/nvim 60
sudo update-alternatives --install /usr/bin/editor editor /usr/bin/nvim 60
fi
ln -sf "${RCS}/nvim/vimrc" ~/.config/nvim/init.vim
function _go_config() {
if [ -n "$GOPATH" -a ! -d "$GOPATH/src/github.com/nsf/gocode" ]; then
go get -u github.com/nsf/gocode
"$GOPATH/src/github.com/nsf/gocode/nvim/symlink.sh" > /dev/null 2>&1
fi
}
_go_config
WORKON_HOME=${WORKON_HOME-~/.virtualenvs/}
if [ ! -d ~/.virtualenvs/neovim3 ]; then
virtualenv -p /usr/bin/python3 "$WORKON_HOME/neovim3"
"$WORKON_HOME/neovim3/bin/pip3" install neovim
"$WORKON_HOME/neovim3/bin/pip3" install jedi
# for https://github.com/Chiel92/vim-autoformat
"$WORKON_HOME/neovim3/bin/pip3" install pynvim
fi
# if [ ! -d ~/.virtualenvs/neovim2 ]; then
# virtualenv -p /usr/bin/python2 "$WORKON_HOME/neovim2"
# "$WORKON_HOME/neovim2/bin/pip2" install neovim
# fi
nvim --headless +PlugInstall +qall
| true
|
256d64fe58a483dfebb34351a1e3d00707ed178d
|
Shell
|
diabonas/tpm2-tools
|
/test/integration/fapi/fapi-key-change-auth.sh
|
UTF-8
| 3,196
| 3.296875
| 3
|
[] |
permissive
|
set -e
source helpers.sh
start_up
CRYPTO_PROFILE="RSA"
setup_fapi $CRYPTO_PROFILE
function cleanup {
tss2 delete --path=/
shut_down
}
trap cleanup EXIT
PW1=abc
PW2=def
KEY_PATH=HS/SRK/myRSASign
DIGEST_FILE=$TEMP_DIR/digest.file
SIGNATURE_FILE=$TEMP_DIR/signature.file
PUBLIC_KEY_FILE=$TEMP_DIR/public_key.file
IMPORTED_KEY_NAME=importedPubKey
PADDINGS="RSA_PSS"
set -x
tss2 provision
echo 0123456789012345678 > $DIGEST_FILE
tss2 createkey --path=$KEY_PATH --type="noDa, sign" --authValue=$PW1
if [ "$CRYPTO_PROFILE" = "RSA" ]; then
expect <<EOF
# Try interactive prompt
spawn tss2 sign --keyPath=$KEY_PATH --padding=$PADDINGS --digest=$DIGEST_FILE \
--signature=$SIGNATURE_FILE --publicKey=$PUBLIC_KEY_FILE
expect "Authorize object: "
send "$PW1\r"
set ret [wait]
if {[lindex \$ret 2] || [lindex \$ret 3] != 0} {
send_user "Using interactive prompt has failed\n"
exit 1
}
EOF
else
expect <<EOF
# Try interactive prompt
spawn tss2 sign --keyPath=$KEY_PATH --digest=$DIGEST_FILE \
--signature=$SIGNATURE_FILE --publicKey=$PUBLIC_KEY_FILE
expect "Authorize object: "
send "$PW1\r"
set ret [wait]
if {[lindex \$ret 2] || [lindex \$ret 3] != 0} {
send_user "Using interactive prompt has failed\n"
exit 1
}
EOF
fi
expect <<EOF
# Try interactive prompt with 2 different passwords
spawn tss2 changeauth --entityPath=$KEY_PATH
expect "Authorize object Password: "
send "1\r"
expect "Authorize object Retype password: "
send "2\r"
expect {
"Passwords do not match." {
} eof {
send_user "Expected password mismatch, but got nothing, or
rather EOF\n"
exit 1
}
}
set ret [wait]
if {[lindex \$ret 2] || [lindex \$ret 3] != 1} {
send_user "Using interactive prompt with different passwords
has not failed\n"
exit 1
}
EOF
expect <<EOF
# Try interactive prompt
spawn tss2 changeauth --entityPath=$KEY_PATH --authValue=$PW2
expect "Authorize object: "
send "$PW1\r"
set ret [wait]
if {[lindex \$ret 2] || [lindex \$ret 3] != 0} {
send_user "Using interactive prompt has failed\n"
exit 1
}
EOF
if [ "$CRYPTO_PROFILE" = "RSA" ]; then
expect <<EOF
# Check if system asks for auth value
spawn tss2 sign --keyPath=$KEY_PATH --padding=$PADDINGS --digest=$DIGEST_FILE \
--signature=$SIGNATURE_FILE --publicKey=$PUBLIC_KEY_FILE --force
expect {
"Authorize object: " {
} eof {
send_user "The system has not asked for password\n"
exit 1
}
}
send "$PW2\r"
set ret [wait]
if {[lindex \$ret 2] || [lindex \$ret 3]} {
send_user "Passing password has failed\n"
exit 1
}
EOF
else
expect <<EOF
# Check if system asks for auth value
spawn tss2 sign --keyPath=$KEY_PATH --digest=$DIGEST_FILE \
--signature=$SIGNATURE_FILE --publicKey=$PUBLIC_KEY_FILE --force
expect {
"Authorize object: " {
} eof {
send_user "The system has not asked for password\n"
exit 1
}
}
send "$PW2\r"
set ret [wait]
if {[lindex \$ret 2] || [lindex \$ret 3]} {
send_user "Passing password has failed\n"
exit 1
}
EOF
fi
exit 0
| true
|
08685d6fc872c324d8fa85101eb476fde81462b1
|
Shell
|
brisbane/adminscripts
|
/wntests/cvmfs
|
UTF-8
| 207
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
TESTNAME=cvmfs
[[ $1 ]] && TESTNAME=$1
x=`ls /cvmfs/lhcb.cern.ch/ | grep etc`
res=$?
echo "TEST: $TESTNAME - result: "
if [ $res -eq 0 ];then
echo OK
else
echo FAIL
fi
exit $res
| true
|
6ed40dc3a284f4da72634fb64acbdf96a15bffdd
|
Shell
|
LucaFos/duplifinder
|
/duplifinder
|
UTF-8
| 2,573
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# Reset all variables that might be set
file=""
verbose=0
origdir="$PWD"
# this is the help string that will be shown if --help is triggered
help="DupliFinder 0.1\n\n
Copyleft (c) Luca Foschiani, Claudio Desideri\n\n
---------------------------------------------\n\n
Usage:\n
duplifinder [-f -d -t] [--file FILE] [--directory DIRECTORY] [--threshold THRESHOLD] [--help]\n"
#parameter parsing
while :
do
case $1 in
-h | --help | -\?)
echo -e $help
exit 0
;;
-f | --file)
file=$2
# checking if file is actually a correct file
if [ ! -f "$file" ]; then
echo "ERROR: FILE must be a valid text file. See --help" >&2
exit 1
fi
shift 2
;;
--file=*)
file=${1#*=} # delete everything up till "="
if [ ! -f "$file" ]; then
echo "ERROR: FILE must be a valid text file. See --help" >&2
exit 1
fi
shift
;;
-d | --directory)
directory=$2
shift 2
;;
--directory=*)
directory=${1#*=} # delete everything up till "="
shift
;;
-t | --threshold)
#FIXME: same as file above
threshold=$2
shift 2
;;
--threshold=*)
threshold=${1#*=} # delete everything up till "="
shift
;;
--) # End of all options
shift
break
;;
-*)
echo "WARN: Unknown option (ignored): $1" >&2
shift
;;
*) # no more options. Stop while loop
break
;;
esac
done
# file is needed. Exactly a text file is needed in input.
if [ ! "$file" ]; then
echo "ERROR: option '--file FILE' not given. See --help" >&2
exit 1
else
filetype=$(file --mime-type "$file" |grep text|wc -l)
if [ $filetype = 0 ] ; then
echo "ERROR: file is not recognized as text"
exit 1
else
filelength=$(wc -l "$file" | cut -d' ' -f1 )
fi
fi
# directory check and, eventually go to that directory
if [ ! "$directory" ] ; then
# setting default value
# (current directory)
directory=.
else
if [ -d "$directory" ] ; then
cd "$directory"
fi
fi
if [ ! "$threshold" ] ; then
# setting default value
# (file must be the same as the input file)
threshold=99
fi
# Rest of the program here.
# If there are input files (for example) that follow the options, they.
# will remain in the "$@" positional parameters.
tree=$(find . -type f )
# read one line each loop
echo "$tree" | while read against
do
if [ -f "$against" ]; then
similarity=$( comm -12 $file $against 2>/dev/null | wc -l )
ratio=$((($similarity*100/$filelength*100)/100))
if [ $ratio -ge $threshold ] ; then
echo -n "$against is "
echo -n "$ratio"
echo -e "% similar"
fi
fi
done
cd "$origdir"
| true
|
c0393ef321a4ac67f7120825431dd6601716e87f
|
Shell
|
eliwind/dotfiles
|
/zsh/.zshenv
|
UTF-8
| 533
| 2.546875
| 3
|
[] |
no_license
|
if [ -x /usr/libexec/path_helper ]; then
eval `/usr/libexec/path_helper -s`
fi
# Let pip find native libs it needs
export LDFLAGS="-L/usr/local/lib -L/usr/local/opt/openssl/lib -L/usr/local/opt/zlib/lib"
export CPPFLAGS="-I/usr/local/include -I/usr/local/opt/openssl/include -I/usr/local/opt/zlib/include"
export PKG_CONFIG_PATH="/usr/local/lib/pkgconfig:/usr/local/opt/openssl/lib/pkgconfig:/usr/local/opt/zlib/lib/pkgconfig"
if command -v xcrun >/dev/null 2>&1; then
export CFLAGS="-I$(xcrun --show-sdk-path)/usr/include"
fi
| true
|
a8642f698bf73b31b016fc9533cf58b472f4100a
|
Shell
|
primeroz/vault-operator-kind-examples
|
/common/vault-operator.sh
|
UTF-8
| 3,443
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
export KUBECONFIG="$(kind get kubeconfig-path --name="vault")"
BV_VERSION=${BV_VERSION:-0.4.17-rc.3}
echo "Creating Operator"
cat <<EOF | kubectl apply -f -
---
apiVersion: v1
kind: Namespace
metadata:
annotations: {}
labels:
project: vault
name: vault
---
EOF
sleep 2
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ServiceAccount
metadata:
name: vault-operator
namespace: vault
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: vault-operator
namespace: vault
rules:
- apiGroups:
- ""
resources:
- pods
- services
- configmaps
- secrets
verbs:
- '*'
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- list
- get
- create
- update
- watch
- apiGroups:
- apps
resources:
- replicasets
verbs:
- get
- apiGroups:
- apps
resources:
- deployments
- statefulsets
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- update
- list
- get
- create
- apiGroups:
- vault.banzaicloud.com
resources:
- '*'
verbs:
- '*'
- apiGroups:
- etcd.database.coreos.com
resources:
- "*"
verbs:
- "*"
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: vault-operator
namespace: vault
subjects:
- kind: ServiceAccount
name: vault-operator
namespace: vault
roleRef:
kind: Role
name: vault-operator
apiGroup: rbac.authorization.k8s.io
EOF
cat <<EOF | kubectl apply -f -
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: vaults.vault.banzaicloud.com
spec:
group: vault.banzaicloud.com
names:
kind: Vault
listKind: VaultList
plural: vaults
singular: vault
scope: Namespaced
version: v1alpha1
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: vault-operator
namespace: vault
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
name: vault-operator
template:
metadata:
labels:
name: vault-operator
spec:
serviceAccountName: vault-operator
containers:
- name: vault-operator
image: banzaicloud/vault-operator:$BV_VERSION
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8083
name: metrics
command:
- vault-operator
livenessProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 4
periodSeconds: 10
failureThreshold: 1
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 4
periodSeconds: 10
failureThreshold: 1
env:
- name: WATCH_NAMESPACE
# Use this to watch all namespaces
# value: ""
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "vault-operator"
EOF
| true
|
ab9db471ff110e3a7ad2c791f87785cc3a045971
|
Shell
|
yjhcjykwbk-jlsec/mylin
|
/.sh/pdf2img.sh
|
UTF-8
| 313
| 3.5
| 4
|
[] |
no_license
|
echo "pdf2img [xx.pdf] [pagenum]"
i=1
while [ $i -le $2 ]
do
echo "pdf2svg $1 $i.svg $i"
i=`expr $i + 1`
done;
[ $2 != "" ] || exit
read -p "trans now, Y/N ?" choice
[ $choice != "Y" ] && exit
i=1
while [ $i -le $2 ]
do
pdf2svg $1 $i.svg $i
convert $i.svg $i.png
rm $i.svg
i=`expr $i + 1`
done
| true
|
9ca0cbde549c43801d961700e5b901e52a0f65ec
|
Shell
|
xraech/dotfiles
|
/installed/suckless/dwm/autostart.sh
|
UTF-8
| 662
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dte(){
dte="$(date +"%A, %B %d | 🕒 %l:%M%p")"
echo -e "📅 $dte"
}
upd(){
upd=`checkupdates | wc -l`
echo -e "⟳ $upd updates"
}
mem(){
mem=`free | awk '/Mem/ {printf "%d MiB/%d MiB\n", $3 / 1024.0, $2 / 1024.0 }'`
echo -e " $mem"
}
cpu(){
read cpu a b c previdle rest < /proc/stat
prevtotal=$((a+b+c+previdle))
sleep 0.5
read cpu a b c idle rest < /proc/stat
total=$((a+b+c+idle))
cpu=$((100*( (total-prevtotal) - (idle-previdle) ) / (total-prevtotal) ))
echo -e "💻 $cpu% cpu"
}
while true; do
xsetroot -name "$(cpu) | $(mem) | $(dte)"
sleep 10s # Update time every ten seconds
done &
| true
|
8afac9542c2d1896868f39a41b90be2971d7eaca
|
Shell
|
backlighttw/lg_module
|
/lg_module/run_windows.sh
|
UTF-8
| 2,402
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#!/bin/bash
date_str=$(date +%Y_%m_%d_%H_%M_%S)
PROCESS_COUNT=1
PREFIX="_lg_module_${date_str}"
OUTPUT_FOLDER=${date_str}
is_verbose=0
is_parallel=1
function print_help {
echo "-v is_verbose"
echo "-s is_single"
}
for para in $@
do
if [ $para == "-v" ]; then
is_verbose=1
fi
if [ $para == "-s" ]; then
is_parallel=0
fi
if [ $para == "help"]; then
print_help
exit 0
fi
done
if [ ${is_parallel} == "1" ]; then
echo "parallel"
else
echo "sequential"
fi
if [ ${is_verbose} == "1" ]; then
echo "verbose"
fi
cd output
mkdir $date_str
START=$(date +%s)
ps aux | grep ray_handler | awk '{print $1}' | xargs kill -9 &> /dev/null
if [ ${is_verbose} == "1" ]; then
./data_preprocessor ${PROCESS_COUNT} ${OUTPUT_FOLDER}
else
./data_preprocessor ${PROCESS_COUNT} ${OUTPUT_FOLDER} > /dev/null
fi
# for i in { 1..$PROCESS_COUNT }
i=0
while [ $i -lt $PROCESS_COUNT ]
do
echo "./ray_handler ${OUTPUT_FOLDER} ray_handler_${i} ${i} ${OUTPUT_FOLDER}/ray_source_${i}.dat"
if [ ${is_parallel} == "1" ]; then
if [ ${is_verbose} == "1" ]; then
./ray_handler ${OUTPUT_FOLDER} "ray_handler_${i}" ${i} "${OUTPUT_FOLDER}/ray_source_${i}.dat" &
else
./ray_handler ${OUTPUT_FOLDER} "ray_handler_${i}" ${i} "${OUTPUT_FOLDER}/ray_source_${i}.dat" > /dev/null &
fi
else
if [ ${is_verbose} == "1" ]; then
./ray_handler ${OUTPUT_FOLDER} "ray_handler_${i}" ${i} "${OUTPUT_FOLDER}/ray_source_${i}.dat"
else
./ray_handler ${OUTPUT_FOLDER} "ray_handler_${i}" ${i} "${OUTPUT_FOLDER}/ray_source_${i}.dat" > /dev/null
fi
fi
pids[$i]=$!
let i++ 1
done
sleep 3
i=0
while [ $i -lt $PROCESS_COUNT ]
do
#echo ${pids[$i]}
wait ${pids[$i]}
let i++ 1
done
./data_postprocessor ${PROCESS_COUNT} ${OUTPUT_FOLDER}
END=$(date +%s)
# gnuplot gnuplot_script/plot.data2d.txt
# gnuplot gnuplot_script/plot.data3dm.txt
# open output.2d.svg &
# open output3d_matrix.svg &
DIFF=$(echo "$END - $START" | bc)
#DIFF=$(echo "$END - $START")
echo "execute time ${DIFF} seconds"
cd ..
exit 0
##########################################################
# backup, run windows single
# cd output
# START=$(date +%s)
# ./lg_module
# END=$(date +%s)
# gnuplot gnuplot_script/plot.data2d.txt
# gnuplot gnuplot_script/plot.data3dm.txt
# open output.2d.svg &
# open output3d_matrix.svg &
# DIFF=$(echo "$END - $START" | bc)
# echo "execute time ${DIFF} seconds"
# cd ..
# exit 0
| true
|
6b1532cfdedc52c02ff84bb273630d230166d944
|
Shell
|
trilioData/triliovault-cfg-scripts
|
/kolla-ansible/trilio-datamover-api/triliovault_datamover_api_extend_start.sh
|
UTF-8
| 361
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases
# of the KOLLA_BOOTSTRAP variable being set, including empty.
## TODO: Uncomment following code once we get dmapi-dbsync tool
if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
dmapi-dbsync
exit 0
fi
if [[ "${!KOLLA_UPGRADE[@]}" ]]; then
dmapi-dbsync
exit 0
fi
| true
|
801fa51bde9774a4d1663bfe8c8a53ff51f42cd2
|
Shell
|
lamtran80/Ubuntu_new
|
/for.sh
|
UTF-8
| 185
| 2.6875
| 3
|
[] |
no_license
|
echo "i in {0..10}"
for i in {0..10}
do
echo $i
done
echo "n in {0..20..2}"
for n in {0..20..2}
do
echo $n
done
echo "for (( k=1; k<5; k++))"
for (( k=1; k<5; k++))
do
echo $k
done
| true
|
fd21f95e3b401f59e92a697a52a898dd4174ccbd
|
Shell
|
markzz/abs
|
/community/python-pycontracts/PKGBUILD
|
UTF-8
| 1,470
| 2.625
| 3
|
[] |
no_license
|
pkgbase=python-pycontracts
pkgname=(python-pycontracts python2-pycontracts)
_pypiname=PyContracts
pkgver=1.7.8
pkgrel=1
pkgdesc='Declare constraints on function parameters and return values'
arch=('any')
url="https://andreacensi.github.io/contracts"
license=('GPL')
makedepends=('python-setuptools' 'python2-setuptools' 'python-pyparsing' 'python2-pyparsing'
'python-decorator' 'python2-decorator' 'python-six' 'python2-six')
checkdepends=('python-nose' 'python2-nose')
source=("http://pypi.python.org/packages/source/${_pypiname:0:1}/${_pypiname}/${_pypiname}-${pkgver}.tar.gz")
sha256sums=('c7a6f49d509cb3a4c17386a311d25d229d5fa73062650ef9538c47846937b388')
prepare() {
cp -a $_pypiname-$pkgver{,-py2}
}
build() {
cd "${srcdir}/${_pypiname}-${pkgver}"
python setup.py build
cd "${srcdir}/${_pypiname}-${pkgver}-py2"
python2 setup.py build
}
check() {
cd "${srcdir}/${_pypiname}-${pkgver}"
PYTHONPATH="$PWD/build/lib:$PYTHONPATH" nosetests3
cd "${srcdir}/${_pypiname}-${pkgver}-py2"
PYTHONPATH="$PWD/build/lib:$PYTHONPATH" nosetests2
}
package_python-pycontracts() {
depends=('python-pyparsing' 'python-decorator' 'python-six')
cd "${srcdir}/${_pypiname}-${pkgver}"
python setup.py install --root="${pkgdir}" --optimize=1
}
package_python2-pycontracts() {
depends=('python2-pyparsing' 'python2-decorator' 'python2-six')
cd "${srcdir}/${_pypiname}-${pkgver}"
python2 setup.py install --root="${pkgdir}" --optimize=1
}
| true
|
c740352514f945acc6f73cb868ef6a7ded44fb5b
|
Shell
|
lislon/dotfiles
|
/git/git-templates/hooks/prepare-commit-msg
|
UTF-8
| 377
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
# author: Igor Avdeev
# description: Creates
if ! head -n 1 $1 | grep -E "C[0-9]+-[0-9]+" ; then
BRANCH_NAME=$(git branch | grep '*' | grep -oE "C[0-9]+-[0-9]+")
if [[ "$BRANCH_NAME" ]] ; then
OLD=$(cat "$1")
echo -n "" > "$1"
echo -n "[" >> "$1"
echo -n >> "$1"
echo -n "] " >> "$1"
echo "$OLD" >> "$1"
fi
fi
| true
|
63e6b60da9e2738b2f10d5c28b80a63e444a08c9
|
Shell
|
nlnwa/openshift-rethinkdb-cl
|
/ready-probe.sh
|
UTF-8
| 287
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Fail with first error
set -e
# Checks if the rethinkdb instance is ready to operate.
# This will be used to direct client traffic
# And, more instances will not be created by the petset until it returns success.
# For now, try to hit the app at 8080
curl localhost:8080
| true
|
225b9eaf1a5a523d2bcaafa6f5a7e0995b295ef6
|
Shell
|
emosher/dotfiles
|
/init.sh
|
UTF-8
| 267
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
## Copy files to home
# If profile does not exist, create it
if [ ! -f ~/.bash_profile ]; then
cp bash_profile ~/.bash_profile
fi
# Copy files
cp bashrc ~/.bashrc
cp git-completion ~/.git-completion
cp git-prompt ~/.git-prompt
cp gitconfig ~/.gitconfig
| true
|
295f24179ff2e80114b02d01c08af13c3c4a9289
|
Shell
|
gauteh/arch
|
/pa-applet-git/PKGBUILD
|
UTF-8
| 953
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: Nicolas Avrutin <nicolasavru@gmail.com>
pkgname=pa-applet-git
pkgver=15.005f192
pkgrel=1
pkgdesc="PulseAudio system tray applet with volume bar"
arch=(i686 x86_64)
url="https://github.com/fernandotcl/pa-applet"
license=('BSD')
depends=('gtk3' 'libnotify' 'libpulse')
makedepends=('git')
options=('!libtool')
source=("$pkgname"::'git://github.com/fernandotcl/pa-applet.git'
'0001-remove-calls-to-deprecated-function-gdk_display_get_.patch')
md5sums=('SKIP')
pkgver() {
cd "$pkgname"
echo $(git rev-list --count HEAD).$(git rev-parse --short HEAD)
}
build() {
cd "$srcdir/$pkgname"
patch -p1 < ../0001-remove-calls-to-deprecated-function-gdk_display_get_.patch
./autogen.sh
./configure --prefix=/usr
make
}
package() {
cd "$srcdir/$pkgname"
make DESTDIR="$pkgdir/" install
install -D -m644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
md5sums=('SKIP'
'245d43f001605ff1ced10a563df9273b')
| true
|
d1a40755bb2f81f57623c62b11c65af7527d5859
|
Shell
|
nriquec/pbone_thesis
|
/lecture_support/seminar_psnup
|
UTF-8
| 1,946
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
# vim: ts=4 sw=4 et
usage="seminar_psnup [3|4|7|8] dir basename"
if test $# != 3
then
echo "${usage}"
exit 1
fi
n=$1
dir=$2
basename=$3
input_filename=".${dir}/${basename}.ps"
output_filename="${dir}/${basename}.${n}.ps"
error_filename=".${dir}/.psnup${n}.${basename}.out"
case "${n}" in
3|4)
scale=0.49
ht0=800
ht1=410
wid0=300
wid1=580
page0="0U@${scale}(${wid0},${ht1})"
page1="1U@${scale}(${wid0},${ht0})"
page2="2U@${scale}(${wid1},${ht1})"
page3="3U@${scale}(${wid1},${ht0})"
case "${n}" in
3)
transform="3:${page0}+${page1}+${page2}"
;;
4)
transform="4:${page0}+${page1}+${page2}+${page3}"
;;
esac
;;
7|8)
scale=0.32
ht0=60
ht1=235
ht2=410
ht3=585
wid0=590
wid1=360
page0="0L@${scale}(${wid1},${ht3})"
page1="1L@${scale}(${wid0},${ht3})"
page2="2L@${scale}(${wid1},${ht2})"
page3="3L@${scale}(${wid0},${ht2})"
page4="4L@${scale}(${wid1},${ht1})"
page5="5L@${scale}(${wid0},${ht1})"
page6="6L@${scale}(${wid1},${ht0})"
page7="7L@${scale}(${wid0},${ht0})"
case "${n}" in
7)
transform="7:${page0}+${page1}+${page2}+${page3}"
transform="${transform}+${page4}+${page5}+${page6}"
;;
8)
transform="8:${page0}+${page1}+${page2}+${page3}"
transform="${transform}+${page4}+${page5}+${page6}+${page7}"
;;
esac
;;
*)
echo "${usage}"
exit 1
;;
esac
pstops -q -w21cm -h29.7cm ${transform} \
${input_filename} ${output_filename} 2> ${error_filename}
status=$?
if test ${status} != 0
then
cat ${error_filename}
fi
exit ${status}
| true
|
4e02da5988e1844b1b67d04bf342d8d2bcf9439c
|
Shell
|
HITB-CyberWeek/proctf-2019
|
/deploy/ansible/roles/cloud_node/files/scripts/reboot_vm.sh
|
UTF-8
| 978
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
# go to script dir
MY_NAME="`readlink -f "$0"`"
MY_DIR="`dirname "$MY_NAME"`"
cd "${MY_DIR}"
TEAM=${1?Syntax: ./reboot_vm.sh <vm_num> <vm_name> <team_id>}
VMNUM=${2?Syntax: /reboot_vm.sh <vm_num> <vm_name> <team_id>}
VMNAME=${3?Syntax: /reboot_vm.sh <vm_num> <vm_name> <team_id>}
if ! [[ $TEAM =~ ^[0-9]+$ ]]; then
echo "team number validation error"
exit 1
fi
if ! [[ $VMNUM =~ ^[0-9]+$ ]]; then
echo "vm number validation error"
exit 1
fi
if ! [[ $VMNAME =~ ^[0-9a-zA-Z_]+$ ]]; then
echo "vm name validation error"
exit 1
fi
vm="${VMNUM}_${VMNAME}_team${TEAM}"
if ! VBoxManage list runningvms | grep -qP "\W${vm}\W"; then
./launch_vm.sh "$TEAM" "$VMNUM" "$VMNAME"
exit 0
fi
# hack around unstable VirtualBox work
timeout 20 VBoxManage controlvm "$vm" reset || [ $? -ne 124 ] || (pkill -9 -f "VBoxHeadless --comment ${vm} --startvm"; echo "That's why nobody uses VirtualBox in clouds"; sleep 5; ./launch_vm.sh "$TEAM" "$VMNUM" "$VMNAME")
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.