blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
64226883605c55752ec55c4110a3253f353b4cf1
|
Shell
|
Lhari/deployer
|
/build/mkcomposer.sh
|
UTF-8
| 2,230
| 2.5625
| 3
|
[] |
no_license
|
. vars/settings.sh
# Old Extensions, removed for various reasons
cat > composer.json <<- _EOF_
{
"name": "Klean/CI Deployer",
"description": "first iteration of CI deployer files",
"authors": [
{"name": "Kasper Hansen", "email": "Kasper@klean.dk"}
],
"repositories": {
"installer-magento-core": {
"type": "git",
"url": "https://github.com/AydinHassan/magento-core-composer-installer"
},
"magento": {
"type": "git",
"url": "https://github.com/firegento/magento"
},
"firegento": {
"type": "composer",
"url": "http://packages.firegento.com"
},
"template": {
"name": "$PROJECTNAME",
"type": "vcs",
"url": "git@github.com:klean/$GITNAME.git"
},
_EOF_
if [[ $ISWP = true ]]; then
cat << EOT >> composer.json
"wordpress": {
"type": "composer",
"url": "http://wpackagist.org"
},
EOT
fi
cat << EOT >> composer.json
"block": {
"name": "blocks",
"type": "vcs",
"url": "git@github.com:klean/blocks.git"
}
},
"require": {
"magento-hackathon/magento-composer-installer": "3.0.7",
"aydin-hassan/magento-core-composer-installer": "1.3.0",
"firegento/magento": "1.9.3.2",
"$PROJECTNAME": "dev-$BRANCH",
"connect20/chapagain_googletagmanager": "0.1.0",
"connect20/yireo_checkouttester": "*",
EOT
. vars/serverExtensions.sh
if [[ $ISWP = true ]]; then
cat << EOT >> composer.json
"php": ">=5.4",
"johnpbloch/wordpress": "4.*",
"wpackagist-plugin/user-role-editor": "*",
"wpackagist-plugin/login-redirect-url": "*",
"wpackagist-plugin/simple-seo-pack": "*",
EOT
fi
cat << EOT >> composer.json
"blocks": "dev-master"
},
"extra": {
EOT
if [[ $ISWP = true ]]; then
cat << EOT >> composer.json
"wordpress-install-dir": "public_html__new/wp/",
EOT
fi
cat << EOT >> composer.json
"magento-deploystrategy": "copy",
"magento-root-dir": "public_html__new",
"auto-append-gitignore": true,
"magento-force": true
}
}
EOT
| true
|
1410f284f2471f914a454b3081e20f9812887bc5
|
Shell
|
go-slide/slide
|
/.github/workflows/scripts/format.sh
|
UTF-8
| 181
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ "$(go fmt | wc -l)" -gt 0 ]; then
echo "run go fmt before PR"
exit 1
fi
if [ "$(golint | wc -l)" -gt 0 ]; then
echo "run go lint before PR"
exit 1
fi
| true
|
c45936c6ceee9080c00e95e8578233c9e725e2a4
|
Shell
|
vinijaiswal/lenet-bigdl
|
/docker/start-notebook
|
UTF-8
| 1,030
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
# setup paths, please use absolute path
export SPARK_HOME=/usr/src/app/spark-2.1.1-bin-hadoop2.7
export BigDL_HOME=/usr/src/app/bigdl-dist-spark-2.1.1-scala-2.11.8-linux64-0.2.0-dist
export PYSPARK_DRIVER_PYTHON=jupyter
export PYSPARK_DRIVER_PYTHON_OPTS="notebook --allow-root --notebook-dir=./ --ip=* --no-browser --certfile=mycert.pem --keyfile=mykey.key"
VERSION=0.2.0
source activate py27
tensorboard --logdir=/tmp/bigdl_summaries/lenet5- 2>&1 >tensorboard.log &
if [ -z "$CORES" ]; then
CORES=2
fi
${SPARK_HOME}/bin/pyspark \
--master local[$CORES] \
--driver-memory 5g \
--properties-file ${BigDL_HOME}/conf/spark-bigdl.conf \
--py-files ${BigDL_HOME}/lib/bigdl-${VERSION}-python-api.zip \
--jars ${BigDL_HOME}/lib/bigdl-SPARK_2.1-${VERSION}-jar-with-dependencies.jar \
--conf spark.driver.extraClassPath=${BigDL_HOME}/lib/bigdl-SPARK_2.1-${VERSION}-jar-with-dependencies.jar \
--conf spark.executor.extraClassPath=${BigDL_HOME}/lib/bigdl-SPARK_2.1-${VERSION}-jar-with-dependencies.jar
| true
|
63a9fc27e0cddd3aa65853541aaeb243ca5a655a
|
Shell
|
ComplianceAsCode/content
|
/linux_os/guide/services/ntp/chronyd_configure_pool_and_server/tests/correct_chrony_configuration.pass.sh
|
UTF-8
| 752
| 3.359375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# packages = chrony
{{{ bash_instantiate_variables("var_multiple_time_servers") }}}
{{{ bash_instantiate_variables("var_multiple_time_pools") }}}
config_file="{{{ chrony_conf_path }}}"
IFS="," read -a SERVERS <<< $var_multiple_time_servers
for srv in "${SERVERS[@]}"
do
NTP_SRV=$(grep -w $srv $config_file)
if [[ ! "$NTP_SRV" == "server "* ]]
then
time_server="server $srv"
echo $time_server >> "$config_file"
fi
done
# Check and configure pools in /etc/chorny.conf
IFS="," read -a POOLS <<< $var_multiple_time_pools
for srv in "${POOLS[@]}"
do
NTP_POOL=$(grep -w $srv $config_file)
if [[ ! "$NTP_POOL" == "pool "* ]]
then
time_server="pool $srv"
echo $time_server >> "$config_file"
fi
done
| true
|
fc674a01da55014eb22cc8d54b0a1c06c11ca85f
|
Shell
|
erosness/sm-server
|
/scripts/pq.sh
|
UTF-8
| 590
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
host='localhost:5055'
track='{"turi" : "tr://wimp/tid/18771487","title" : "Bah (landing)","artist" : "Valby Vokalgruppe","cover" : "http://images.osl.wimpmusic.com/im/im?w=100&h=100&albumid=18771477"}'
case "$1" in
"play")
curl -X POST -d "$track" $host/pq/play
;;
"get")
curl $host/pq
;;
"clear")
curl $host/pq/clear
;;
"add")
curl -X POST -d "$track" $host/pq/add
;;
"del")
shift
curl -X POST -d "{\"id\": \"$1\"}" $host/pq/del
;;
*) echo "Unknown command"
;;
esac
| true
|
d072e0f3d32065d08b344074a1af1355af71d75c
|
Shell
|
samiramiss11/Bash-Script
|
/Skript2019-01-02a.sh
|
UTF-8
| 210
| 3.375
| 3
|
[] |
no_license
|
#! /bin/bash
echo "enter directory name"
read dir
#check if this directory exist:
if [ ! -d $dir ]
then
echo "directory exists"
else
#make the directory:
mkdir -p $dir
read 2
touch $2
mv $2 dir/.
fi
| true
|
6989bff649f32a0817775190a7973615e0b2e0cd
|
Shell
|
DarkCat09/ttyclock
|
/ttyclock.sh
|
UTF-8
| 1,452
| 3.671875
| 4
|
[] |
no_license
|
# Terminal Clock
# ver.1
stty -echo
printf "\x1b[0m"
clockStr=
showDate=1
exitFromScript=
delay=0.001
standard=0
if [[ $1 = "-c" ]]
then
# Color
if [[ $2 = "" ]] || [[ $2 = "0" ]]
then
clockStr=""
elif (( $2 < 10 ))
then
clockStr="\x1b[3${2}m"
else
echo "ttyclock: invalid value for: color"
exit 2
fi
if [[ $3 = "-d" ]]
then
if [[ $4 = "0" ]]
then
showDate=0
delay=0.0001
elif [[ $4 = "1" ]]
then
showDate=1
delay=0.01
else
echo "ttyclock: invalid value for: showDate"
exit 2
fi
fi
elif [[ $1 = "-h" ]]
then
echo "TTY-Clock, ver.1"
echo " Help "
echo
echo "Usage:"
echo " ttyclock -h"
echo " ttyclock -s"
echo " ttyclock [-c 0-9] [-d 0|1]"
echo
exit 0
elif [[ $1 = "-s" ]]
then
standard=1
showDate=0
else
echo "ttyclock: invalid argument"
echo "to see the help, use option -h"
exit 3
fi
until [[ $exitFromScript = "q" ]]
do
clear
curtime=`date +%H:%M:%S`
curdate=`date "+%a, %d.%m.%Y"`
if (( standard > 0 )); then
echo "$curtime"
else
printf " \x1b[1m${clockStr}${curtime}\x1b[0m \n"
fi
if (( showDate > 0 )); then
printf "${curdate}\n"
fi
read -n 1 -t $delay exitFromScript
done
stty echo
clear
exit 0
| true
|
e783d4a602fb467ec3d535efd28788e8d8dc1c81
|
Shell
|
bahusoid/WDMyCloud-Gen2
|
/Transmission/install.sh
|
UTF-8
| 786
| 3.40625
| 3
|
[
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
[ -f /tmp/debug_apkg ] && echo "APKG_DEBUG: $0 $@" >> /tmp/debug_apkg
path_src=$1
path_des=$2
APKG_MODULE="Transmission"
APKG_PATH=${path_des}/${APKG_MODULE}
APKG_BACKUP_PATH=${APKG_PATH}/../${APKG_MODULE}_backup
mkdir -p ${APKG_BACKUP_PATH}
cp -arf ${APKG_PATH}/config ${APKG_BACKUP_PATH}
cp -rf $path_src $path_des
# restore config files if they are saved in preinst.sh (or before_apkg.sh)
if [ -d ${APKG_BACKUP_PATH}/config ] ; then
#mv ${APKG_PATH}/config ${APKG_PATH}/new_config
cp -arf ${APKG_BACKUP_PATH}/config ${APKG_PATH}
# use new default settings
#if [ -f ${APKG_PATH}/new_config/settings.json ] ; then
# cp -f ${APKG_PATH}/new_config/settings.json ${APKG_PATH}/config/settings.json
#fi
#rm -rf ${APKG_PATH}/new_config
rm -rf ${APKG_BACKUP_PATH}
fi
| true
|
f5c9fe4a6ab2e031b2228e6bac84fc9bdb196314
|
Shell
|
KvMadan/UnixBasics
|
/practice/case.sh
|
UTF-8
| 286
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# Example of CASE Statements
case "$1" in
start)
echo "Starting sleep walking server"
/tmp/sleep-walking-server &
;;
stop)
echo "Stopping sleep walking server"
kill $(cat /tmp/sleep-walking-server.pid)
;;
*)
echo "Usage $0 start|stop"; exit 1
esac
| true
|
04c79c1736544c1f50ded7447fbdaa4c2089cfea
|
Shell
|
GameServerManagers/LinuxGSM
|
/lgsm/modules/command_start.sh
|
UTF-8
| 7,312
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# LinuxGSM command_start.sh module
# Author: Daniel Gibbs
# Contributors: http://linuxgsm.com/contrib
# Website: https://linuxgsm.com
# Description: Starts the server.
commandname="START"
commandaction="Starting"
moduleselfname="$(basename "$(readlink -f "${BASH_SOURCE[0]}")")"
addtimestamp="gawk '{ print strftime(\\\"[$logtimestampformat]\\\"), \\\$0 }'"
fn_firstcommand_set
fn_start_teamspeak3() {
if [ ! -f "${servercfgfullpath}" ]; then
fn_print_warn_nl "${servercfgfullpath} is missing"
fn_script_log_warn "${servercfgfullpath} is missing"
echo " * Creating blank ${servercfg}"
fn_script_log_info "Creating blank ${servercfg}"
fn_sleep_time
echo " * ${servercfg} can remain blank by default."
fn_script_log_info "${servercfgfullpath} can remain blank by default."
fn_sleep_time
echo " * ${servercfg} is located in ${servercfgfullpath}."
fn_script_log_info "${servercfg} is located in ${servercfgfullpath}."
sleep 5
touch "${servercfgfullpath}"
fi
# Accept license.
if [ ! -f "${executabledir}/.ts3server_license_accepted" ]; then
install_eula.sh
fi
fn_start_tmux
}
# This will allow the Jedi Knight 2 version to be printed in console on start.
# Used to allow update to detect JK2MV server version.
fn_start_jk2() {
fn_start_tmux
tmux -L "${sessionname}" end -t "${sessionname}" version ENTER > /dev/null 2>&1
}
fn_start_tmux() {
if [ "${parmsbypass}" ]; then
startparameters=""
fi
# check for tmux size variables.
if [[ "${servercfgtmuxwidth}" =~ ^[0-9]+$ ]]; then
sessionwidth="${servercfgtmuxwidth}"
else
sessionwidth="80"
fi
if [[ "${servercfgtmuxheight}" =~ ^[0-9]+$ ]]; then
sessionheight="${servercfgtmuxheight}"
else
sessionheight="23"
fi
# Log rotation.
fn_script_log_info "Rotating log files"
if [ "${engine}" == "unreal2" ] && [ -f "${gamelog}" ]; then
mv "${gamelog}" "${gamelogdate}"
fi
if [ -f "${lgsmlog}" ]; then
mv "${lgsmlog}" "${lgsmlogdate}"
fi
if [ -f "${consolelog}" ]; then
mv "${consolelog}" "${consolelogdate}"
fi
# Create lockfile
date '+%s' > "${lockdir}/${selfname}.lock"
echo "${version}" >> "${lockdir}/${selfname}.lock"
echo "${port}" >> "${lockdir}/${selfname}.lock"
fn_reload_startparameters
if [ "${shortname}" == "av" ]; then
cd "${systemdir}" || exit
else
cd "${executabledir}" || exit
fi
tmux -L "${sessionname}" new-session -d -x "${sessionwidth}" -y "${sessionheight}" -s "${sessionname}" "${preexecutable} ${executable} ${startparameters}" 2> "${lgsmlogdir}/.${selfname}-tmux-error.tmp"
# Create logfile.
touch "${consolelog}"
# Create last start lock file
date +%s > "${lockdir}/${selfname}-laststart.lock"
# tmux compiled from source will return "master", therefore ignore it.
if [ "${tmuxv}" == "master" ]; then
fn_script_log "tmux version: master (user compiled)"
echo -e "tmux version: master (user compiled)" >> "${consolelog}"
if [ "${consolelogging}" == "on" ] || [ -z "${consolelogging}" ]; then
if [ "$logtimestamp" == "on" ]; then
tmux -L "${sessionname}" pipe-pane -o -t "${sessionname}" "exec bash -c \"cat | $addtimestamp\" >> '${consolelog}'"
else
tmux -L "${sessionname}" pipe-pane -o -t "${sessionname}" "exec cat >> '${consolelog}'"
fi
fi
elif [ -n "${tmuxv}" ]; then
# tmux pipe-pane not supported in tmux versions < 1.6.
if [ "${tmuxvdigit}" -lt "16" ]; then
echo -e "Console logging disabled: tmux => 1.6 required
https://linuxgsm.com/tmux-upgrade
Currently installed: $(tmux -V)" > "${consolelog}"
# Console logging disabled: Bug in tmux 1.8 breaks logging.
elif [ "${tmuxvdigit}" -eq "18" ]; then
echo -e "Console logging disabled: Bug in tmux 1.8 breaks logging
https://linuxgsm.com/tmux-upgrade
Currently installed: $(tmux -V)" > "${consolelog}"
# Console logging enable or not set.
elif [ "${consolelogging}" == "on" ] || [ -z "${consolelogging}" ]; then
if [ "$logtimestamp" == "on" ]; then
tmux -L "${sessionname}" pipe-pane -o -t "${sessionname}" "exec bash -c \"cat | $addtimestamp\" >> '${consolelog}'"
else
tmux -L "${sessionname}" pipe-pane -o -t "${sessionname}" "exec cat >> '${consolelog}'"
fi
fi
else
echo -e "Unable to detect tmux version" >> "${consolelog}"
fn_script_log_warn "Unable to detect tmux version"
fi
# Console logging disabled.
if [ "${consolelogging}" == "off" ]; then
echo -e "Console logging disabled by user" >> "${consolelog}"
fn_script_log_info "Console logging disabled by user"
fi
fn_sleep_time
# If the server fails to start.
check_status.sh
if [ "${status}" == "0" ]; then
fn_print_fail_nl "Unable to start ${servername}"
fn_script_log_fatal "Unable to start ${servername}"
if [ -s "${lgsmlogdir}/.${selfname}-tmux-error.tmp" ]; then
fn_print_fail_nl "Unable to start ${servername}: tmux error:"
fn_script_log_fatal "Unable to start ${servername}: tmux error:"
echo -e ""
echo -e "Command"
echo -e "================================="
echo -e "tmux -L \"${sessionname}\" new-session -d -s \"${sessionname}\" \"${preexecutable} ${executable} ${startparameters}\"" | tee -a "${lgsmlog}"
echo -e ""
echo -e "Error"
echo -e "================================="
tee -a "${lgsmlog}" < "${lgsmlogdir}/.${selfname}-tmux-error.tmp"
# Detected error https://linuxgsm.com/support
if grep -c "Operation not permitted" "${lgsmlogdir}/.${selfname}-tmux-error.tmp"; then
echo -e ""
echo -e "Fix"
echo -e "================================="
if ! grep "tty:" /etc/group | grep "$(whoami)"; then
echo -e "$(whoami) is not part of the tty group."
fn_script_log_info "$(whoami) is not part of the tty group."
group=$(grep tty /etc/group)
echo -e ""
echo -e " ${group}"
fn_script_log_info "${group}"
echo -e ""
echo -e "Run the following command with root privileges."
echo -e ""
echo -e " usermod -G tty $(whoami)"
echo -e ""
echo -e "https://linuxgsm.com/tmux-op-perm"
fn_script_log_info "https://linuxgsm.com/tmux-op-perm"
else
echo -e "No known fix currently. Please log an issue."
fn_script_log_info "No known fix currently. Please log an issue."
echo -e "https://linuxgsm.com/support"
fn_script_log_info "https://linuxgsm.com/support"
fi
fi
fi
core_exit.sh
else
fn_print_ok "${servername}"
fn_script_log_pass "Started ${servername}"
fi
rm -f "${lgsmlogdir:?}/.${selfname}-tmux-error.tmp" 2> /dev/null
echo -en "\n"
}
check.sh
# Is the server already started.
# $status comes from check_status.sh, which is run by check.sh for this command
if [ "${status}" != "0" ]; then
fn_print_dots "${servername}"
fn_print_info_nl "${servername} is already running"
fn_script_log_error "${servername} is already running"
if [ -z "${exitbypass}" ]; then
core_exit.sh
fi
fi
if [ -z "${fixbypass}" ]; then
fix.sh
fi
info_game.sh
core_logs.sh
# Will check for updates is updateonstart is yes.
if [ "${updateonstart}" == "yes" ] || [ "${updateonstart}" == "1" ] || [ "${updateonstart}" == "on" ]; then
exitbypass=1
unset updateonstart
command_update.sh
fn_firstcommand_reset
fi
fn_print_dots "${servername}"
if [ "${shortname}" == "ts3" ]; then
fn_start_teamspeak3
elif [ "${shortname}" == "jk2" ]; then
fn_start_jk2
else
fn_start_tmux
fi
core_exit.sh
| true
|
01d91e42ca1b7d3986d712678a975a353a0b3add
|
Shell
|
lonkamikaze/bsda2
|
/src/lst.sh
|
UTF-8
| 15,697
| 3.796875
| 4
|
[
"ISC"
] |
permissive
|
test -n "$_lst_sh_" && return 0
readonly _lst_sh_=1
#
# Portable string backed array library.
#
# @file
# @see lst.md
#
#
# Run lst() with RS set to the ASCII Line Feed character.
#
# @warning
# The Line Feed character is White Space and thus subject to
# special expansion rules, which affects the processing of
# empty array entries with some methods.
# @param[in,out] @
# Forwarded to lst()
#
log() { RS=$'\n' lst "$@"; }
#
# Run lst() with RS set to the ASCII Record Separator character.
#
# @param[in,out] @
# Forwarded to lst()
#
rec() { RS=$'\036' lst "$@"; }
#
# Run lst() with RS set to the ASCII comma `,` character.
#
# @param[in,out] @
# Forwarded to lst()
#
csv() { RS=, lst "$@"; }
#
# Interpret the first argument as the name of an array and an operator.
#
# @param[in,out] &1
# The array to access and the operator to apply
# @param[in,out] @
# Forwarded to the array methods
# @param[in] RS
# The character separating array entries
#
lst() {
case "$1" in
*\[*\].*) eval "shift; lst:unpack '${1##*\].}' '${1%\].*}]' \"\${@}\"";;
*\[*\]=) eval "shift; lst:unpack set '${1%=}' \"\${@}\"";;
*\[*\]) lst:unpack get "${@}";;
*.*) eval "shift; lst.${1#*.} '${1%%.*}' \"\$@\"";;
*=cat) eval "shift; lst:cat '${1%=cat}' \"\$@\"";;
*=) eval "$1; ${2+shift; lst.push_back '${1%=}' \"\$@\"}";;
*) lst.print "${@}";;
esac
}
#
# Call an indexed array method using a packed array/index reference.
#
# The call `lst:unpack method array[index] ...` is unpacked into the
# call `lst.method_i array index ...`.
#
# @param[in] &1
# The indexed function name
# @param[in,out] &2
# Array name and index in the shape `name[index]`
# @param[in,out] @
# Forwarded to lst.${1}_i
# @param[in] RS
# The character separating array entries
#
lst:unpack() {
eval "shift 2; set -- '${1}' '${2%\]}' \"\${@}\""
eval "shift 2; lst.${1}_i '${2%%\[*}' '${2#*\[}' \"\$@\""
}
#
# Provide the indexed array entry.
#
# Items are indexed 1 to n (first to last) or -1 to -n (last to first).
#
# @warning
# If RS is set to a White Space character empty entries are
# invisible to the get_i() method. I.e. entries are indexed
# as if the empty entries did not exist.
# @param[in] &1
# Name of the array
# @param[in] 2
# Index value or arithmetic expression (will be evaluated once)
# @param[out] &3
# Optional destination variable name (print if unset)
# @param[in] RS
# The character separating array entries
# @retval 0
# Value is successfully returned/printed
# @retval 1
# Out of bounds index
#
lst.get_i() {
local IFS
IFS="${RS}"
eval "set -- \"\$((\${2}))\" \"\${3}\" \${$1}"
eval "shift; set -- $(($1 > 0 ? $1 + 2 : $# + 1 + ($# + 1 + $1 >= 3 ? $1 : 0))) \"\${@}\""
test $1 -le $# && eval "${2:-echo }${2:+=}\"\${$1}\""
}
#
# Assign the indexed array entry.
#
# Items are indexed 1 to n (first to last) or -1 to -n (last to first).
#
# @warning
# If RS is set to a White Space character empty entries are
# invisible to the set_i() method. I.e. entries are indexed
# as if the empty entries did not exist and the resulting array
# will contain no empty entries.
# @param[in,out] &1
# Name of the array
# @param[in] 2
# Index value or arithmetic expression (will be evaluated once)
# @param[in] 3
# The value to assign
# @param[in] RS
# The character separating array entries
# @retval 0
# The entry was successfully updated
# @retval 1
# Out of bounds index, the array remains unchanged
#
lst.set_i() {
local IFS
IFS="${RS}"
eval "set -- \"\${1}\" \"\$((\${2}))\" \"\${3}\" \${$1}"
eval "$(
n=$(($2 > 0 ? $2 + 3 : $# + 1 + $2))
if [ $n -le $# -a $n -gt 3 ]; then
echo -n "$1=\""
i=3
while [ $((i += 1)) -le $# ]; do
echo -n "\${$((i == n ? 3 : i))}\${RS}"
done
echo '"'
else
echo return 1
fi
)"
}
#
# Remove the indexed array entry.
#
# Items are indexed 1 to n (first to last) or -1 to -n (last to first).
#
# @warning
# If RS is set to a White Space character empty entries are
# invisible to the rm_i() method. I.e. entries are indexed
# as if the empty entries did not exist and the resulting array
# will contain no empty entries.
# @param[in,out] &1
# Name of the array
# @param[in] 2
# Index value or arithmetic expression (will be evaluated once)
# @param[in] RS
# The character separating array entries
# @retval 0
# The entry was successfully removed
# @retval 1
# Out of bounds index, the array remains unchanged
#
lst.rm_i() {
local IFS
IFS="${RS}"
eval "set -- \"\${1}\" \"\$((\${2}))\" \${$1}"
eval "$(
n=$(($2 > 0 ? $2 + 2 : $# + 1 + $2))
if [ $n -le $# -a $n -gt 2 ]; then
echo -n "$1=\""
i=2
while [ $((i += 1)) -le $# ]; do
test $i -ne $n && echo -n "\${$i}\${RS}"
done
echo '"'
else
echo return 1
fi
)"
}
#
# Resize the array.
#
# If the given size is less than the current array size the tail of
# the array is cut off.
# If the given size is greater than the current array size, new entries
# are appended to the array. If the third argument does not provide
# a value, empty entries are appended.
#
# @warning
# If RS is set to a White Space character empty entries are
# invisible to the resize() method. I.e. empty entries disappear.
# If the resulting array is shorter than the requested array size
# new empty entries (or entries with the value of argument 3)
# are appended to the array.
# @param[in,out] &1
# Name of the array
# @param[in] 2
# The new array size value or arithmetic expression (will be
# evaluated once)
# @param[in] 3
# Optionally provide a value used for new entries
#
lst.resize() {
local IFS
IFS="${RS}"
eval "set -- \"\${1}\" \"\$((\${2}))\" \"\${3}\" \${$1}"
eval "
$1=\"$(
n=$(($2 + 3))
i=3
while [ $((i += 1)) -le $n ]; do
printf '${%s}${RS}' $((i <= $# ? i : 3))
done
)\"
"
}
#
# Prepend values.
#
# @param[out] &1
# Name of the array
# @param[in] @
# Values to prepend
# @param[in] RS
# The character separating array entries
#
lst.push_front() {
eval "
${2+$1=\"\${2\}\${RS\}\${$1\}\"}
${3+shift 2; lst.push_front $1 \"\$@\"}
"
}
#
# Append values.
#
# @param[out] &1
# Name of the array
# @param[in] @
# Values to append
# @param[in] RS
# The character separating array entries
#
lst.push_back() {
eval "
${2+$1=\"\${$1\}\${2\}\${RS\}\"}
${3+shift 2; lst.push_back $1 \"\$@\"}
"
}
#
# Read first value.
#
# @param[in] &1
# Name of the array
# @param[in] &2
# Optional destination variable name (print if unset)
# @param[in] RS
# The character separating array entries
# @retval 0
# First entry returned successfully
# @retval 1
# Array is empty
#
lst.peek_front() {
eval "
test -n \"\${$1}\" && \
${2:-echo }${2:+=}\"\${$1%%\"\${RS}\"*}\"
"
}
#
# Read last value.
#
# @param[in] &1
# Name of the array
# @param[in] &2
# Optional destination variable name (print if unset)
# @param[in] RS
# The character separating array entries
# @retval 0
# Last entry returned successfully
# @retval 1
# Array is empty
#
lst.peek_back() {
eval "
test -n \"\${$1}\" && \
set -- \"\${$1%\"\${RS}\"}\" && \
${2:-echo }${2:+=}\"\${1##*\"\${RS}\"}\"
"
}
#
# Pop first value.
#
# @param[in,out] &1
# Name of the array
# @param[out] &2
# Optional destination variable name (print if unset)
# @param[out] &@
# Additional destinations are filled with popped values in sequence
# @param[in] RS
# The character separating array entries
# @retval 0
# All pop requests were completed successfully
# @retval > 0
# The argument position of the first variable name that could
# not be assigned, because the array is empty
#
lst.pop_front() {
lst.peek_front "$@" && \
eval "
$1=\"\${$1#*\"\${RS}\"}\"
${3+shift 2; lst.pop_front $1 \"\$@\" || return \$((\$? + 1))}
"
}
#
# Pop last value.
#
# @param[in,out] &1
# Name of the array
# @param[out] &2
# Optional destination variable name (print if unset)
# @param[out] &@
# Additional destinations are filled with popped values in sequence
# @param[in] RS
# The character separating array entries
# @retval 0
# All pop requests were completed successfully
# @retval > 0
# The argument position of the first variable name that could
# not be assigned, because the array is empty
#
lst.pop_back() {
lst.peek_back "$@" && \
eval "
$1=\"\${$1%\"\${RS}\"}\"
$1=\"\${$1%\"\${$1##*\"\${RS}\"}\"}\"
${3+shift 2; lst.pop_back $1 \"\$@\" || return \$((\$? + 1))}
"
}
#
# Remove the first entry matching the given value(s).
#
# @param[in,out] &1
# Name of the array
# @param[in] 2
# The value to remove
# @param[in] @
# Additional values to remove
# @param[in] RS
# The character separating array entries
# @retval 0
# All values were matched and removed
# @retval > 0
# The argument position of the first value that could not be
# found in the array, subsequent argument values are not processed
#
lst.rm_first() {
eval "
case \"\${$1}\" in
\"\${2}\${RS}\"*)
$1=\"\${$1#\"\${2}\${RS}\"}\";;
*\"\${RS}\${2}\${RS}\"*)
$1=\"\${$1%%\"\${RS}\${2}\${RS}\"*}\${RS}\${$1#*\"\${RS}\${2}\${RS}\"}\";;
*)
return 1;;
esac
${3+shift 2 && lst.rm_first $1 \"\$@\" || return \$((\$? + 1))}
"
}
#
# Remove the last entry matching the given values.
#
# @param[in,out] &1
# Name of the array
# @param[in] 2
# The value to remove
# @param[in] @
# Additional values to remove
# @param[in] RS
# The character separating array entries
# @retval 0
# All values were matched and removed
# @retval > 0
# The argument position of the first value that could not be
# found in the array, subsequent argument values are not processed
#
lst.rm_last() {
eval "
case \"\${$1}\" in
*\"\${RS}\${2}\${RS}\"*)
$1=\"\${$1%\"\${RS}\${2}\${RS}\"*}\${RS}\${$1##*\"\${RS}\${2}\${RS}\"}\";;
\"\${2}\${RS}\"*)
$1=\"\${$1#\"\${2}\${RS}\"}\";;
*)
return 1;;
esac
${3+shift 2 && lst.rm_last $1 \"\$@\" || return \$((\$? + 1))}
"
}
#
# Provide the number of array entries.
#
# @warning
# If RS is set to a White Space character empty entries are
# invisible to the count() method. I.e. this returns the number
# of non-empty array entries.
# @param[in] &1
# Name of the array
# @param[out] &2
# Optional destination variable name (print if unset)
# @param[in] RS
# The character separating array entries
#
lst.count() {
local IFS
IFS="${RS}"
eval "
set -- \${$1}
${2:-echo }${2:+=}\$#
"
}
#
# Check whether the given value is stored in the array.
#
# @param[in] &1
# Name of the array
# @param[in] 2
# The value to look for
# @param[in] RS
# The character separating array entries
# @retval 0
# The value is stored in the array
# @retval 1
# The value is not stored in the array or no value given
#
lst.contains() {
eval ${2+"
case \"\${$1}\" in
\"\${2}\${RS}\"* | *\"\${RS}\${2}\${RS}\"*) return 0;;
*) return 1;;
esac
"} false
}
#
# Check whether all the given values are stored in the array.
#
# @param[in] &1
# Name of the array
# @param[in] @
# The values to look for
# @param[in] RS
# The character separating array entries
# @retval 0
# All values occur within the array
# @retval 1
# One or more values do not occur within the array
#
lst.contains_all() {
eval "${2+"lst.contains $1 \"\${2\}\" && shift 2 && lst.contains_all $1 \"\$@\""}"
}
#
# Check whether at least one of the given values is stored in the array.
#
# @param[in] &1
# Name of the array
# @param[in] @
# The values to look for
# @param[in] RS
# The character separating array entries
# @retval 0
# At least one value occurs within the array
# @retval 1
# None of the given values occur within the array
#
lst.contains_any() {
lst.contains "$@" || eval "shift 2 && lst.contains_any $1 \"\$@\""
}
#
# Check whether the array is defined.
#
# @param[in] &1
# Name of the array
# @retval 0
# The array is defined (it may still be empty, though)
# @retval 1
# The array is not defined
#
lst.is_defined() {
eval "test -n \"\${$1+1}\""
}
#
# Check whether the array is undefined.
#
# @param[in] &1
# Name of the array
# @retval 0
# The array is not defined
# @retval 1
# The array is defined (it may still be empty, though)
#
lst.is_undefined() {
eval "test -z \"\${$1+1}\""
}
#
# Check whether the array is empty.
#
# @param[in] &1
# Name of the array
# @retval 0
# The array is empty (it may still be defined, though)
# @retval 1
# The array has at least one entry (that entry may be the empty
# string, though)
#
lst.is_empty() {
eval "test -z \"\${$1}\""
}
#
# Check whether the array is not empty.
#
# @param[in] &1
# Name of the array
# @retval 0
# The array has at least one entry (that entry may be the empty
# string, though)
# @retval 1
# The array is empty (it may still be defined, though)
#
lst.is_not_empty() {
eval "test -n \"\${$1}\""
}
#
# Print the array.
#
# @warning
# If RS is set to a White Space character empty entries are
# invisible to the print() method.
# @param[in] &1
# Name of the array
# @param[in] &@
# Optional names of additional arrays to print
# @param[in] RS
# The character separating array entries
# @param[in] ORS
# The first character is used as the Output Record Separator
#
lst.print() (
IFS="${RS}" eval "set --$(printf ' ${%s}' "${@}")"
IFS="${ORS}"
echo "${*}"
)
#
# Print each array entry according to the given formatting string.
#
# @param[in] &1
# Name of the array
# @param[in] 2
# printf(1) formatting string, can use as many fields as array
# items, repeats for additional items (so one field can be used
# to print everything)
# @param[in] RS
# The character separating array items
#
lst.printf() (
IFS="${RS}"
eval "printf \"\${2}\" \${$1}"
)
#
# Append the given arrays.
#
# All appended arrays must use the same RS character. Otherwise the
# array is corrupted.
#
# @param[in] &1
# Name of the array
# @param[in] &@
# Names of the arrays to append
# @param[in] RS
# The character separating array items
#
lst.append() {
lst:cat "$1" "$@"
}
#
# Copy the record separator to the IRS variable.
#
# @param[in] RS
# The character separating array items
# @param[out] IRS
# The Input Record Separator
#
lst.set_irs() {
IRS="${RS}"
}
#
# Copy the record separator to the ORS variable.
#
# @param[in] RS
# The character separating array items
# @param[out] ORS
# The Output Record Separator
#
lst.set_ors() {
ORS="${RS}"
}
#
# Copy the record separator to the IFS variable.
#
# @param[in] RS
# The character separating array items
# @param[out] IFS
# The shell Input Field Separator
#
lst.set_ifs() {
IFS="${RS}"
}
#
# Concatenate a set of arrays.
#
# All merged arrays must use the same RS character. Otherwise the
# resulting array is corrupted.
#
# @param[out] &1
# The array to create/overwrite
# @param[in] &@
# Names of the arrays to concatenate
# @param[in] RS
# The character separating array entries
#
lst:cat() {
eval "$1=\"$(shift; printf '${%s}' "${@}")\""
}
#
# Convert an array using one separator to another.
#
# E.g. use `IRS=$'\n' ORS=$'\036' lst:convert foo foo` to convert
# `foo` from an LF to an RS separated array.
#
# @warning
# If IRS is set to a White Space character empty entries are
# are lost during the conversion.
# @param[in] &1
# Input array
# @param[out] &2
# Optional output array (print on 1> if unset)
# @param[in] IRS
# Input Record Separator
# @param[in] ORS
# Output Record Separator
#
lst:convert() {
eval "
local IFS
IFS=\"\${IRS}\"
set -- \${$1} \${$1+''}
IFS=\"\${ORS}\"
${2:-echo }${2:+=}\"\${*}\"
"
}
#
# Convert array type.
#
# E.g. use `lst:cast log:foo rec:foo` to convert `foo` from an LF
# to an RS separated array.
#
# @warning
# If the input array RS is set to a White Space character empty
# entries are lost during the conversion.
# @param[in] &1
# Input array given as `type:array`
# @param[in,out] &2
# Output array given as `type:array`, the array part is optional
# (print on 1> if unset)
#
lst:cast() {
local ORS IRS
${1%:*} .set_irs
${2%:*} .set_ors
lst:convert ${1##*:} ${2##*:}
}
| true
|
dcc81647bd21fa680a3d46564f868bcadb07e97a
|
Shell
|
karldickman/chores
|
/cli/list-commands.sh
|
UTF-8
| 1,340
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "All commands are prefixed with \"chore-\".
READ COMMANDS
burndown
Get a burndown report.
completions-needed
Get the number of completions needed to achieve high confidence in the
average chore duration.
get-completion-id
Get the database identifier of the active completion of a chore.
history
Show the history of a chory.
meals
Show incomplete meal chores.
progress
Shows progress made on overdue chores between two dates.
show-completion
Show chore completion information.
today
Show the number of minutes spent doing chores today.
SCHEDULE COMMANDS
change-due-date
Change the due date of a chore.
delete
Delete a chore completion.
hierarchize
Hierarchize a chore completion to match the chore hierarchy.
postpone
Postpone a chore a specified number of days.
schedule
Create a new a chore to be due on a particular date.
schedule-meals
Schedule all meal chores to be due on a particular date.
skip
Skip doing a chore.
WRITE COMMANDS
create
Create a new chore on a specified schedule.
complete
Record that a chore is completed.
put-away-dishes
Record how long it took to put away the dishes.
session
Record that some time was spent working on a chore.
unknown-duration
Record that was completed but it is not known how long it took."
| true
|
ba2b6fd7c100edaa9e98dfec0a81372ecba64db2
|
Shell
|
neil-rti/ros2_mp_latency
|
/scripts/run_suite_a.sh
|
UTF-8
| 1,813
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#############################
# change variables here
# NOTE that total test time = (TEST_DURATION + 5) * set size of next 5 vars,
# for example (60 + 5) * 3 * 3 * 5 * 2 * 9 = 52650 seconds, or 14.6 hours.
DDS_BACKENDS='rmw_connextdds rmw_fastrtps_cpp rmw_cyclonedds_cpp'
F_PUBLISHER_SET='1 10 20'
MSG_SIZE_SUFFIX_SET='100b 1kb 4kb 10kb 16kb 32kb 60kb 100kb 256kb 500kb 1mb 2mb 4mb 8mb'
QOS_RELIABILITY_SET='BE REL'
NODE_WORK_COUNT_SET='0 1 2 3 4 5 10 15 20'
TEST_DURATION=60
TEST_TOPIC_NAME='pt_profile_topic_'
TEST_CONFIG_NAME='h-sn-t'
# the test will create data files in the current directory
for QOS_RELIABILITY in $QOS_RELIABILITY_SET
do
for F_PUBLISHER in $F_PUBLISHER_SET
do
for BACKEND in $DDS_BACKENDS
do
for NODE_WORK_COUNT in $NODE_WORK_COUNT_SET
do
for MSG_SIZE_SUFFIX in $MSG_SIZE_SUFFIX_SET
do
# set env vars for this batch of tests
export RMW_IMPLEMENTATION=$BACKEND
export RTI_MPL_TEST_DURATION=$TEST_DURATION
export RTI_MPL_PUB_FREQ=$F_PUBLISHER
export RTI_MPL_RELY_TYPE=$QOS_RELIABILITY
export RTI_MPL_TOPIC_NAME=$TEST_TOPIC_NAME
export RTI_MPL_WORK_NODES=$NODE_WORK_COUNT
export RTI_MPL_CONFIG_NAME=$TEST_CONFIG_NAME
export RTI_MPL_SIZE_SUFFIX=$MSG_SIZE_SUFFIX
# launch the tests
timestamp=$(date +%Y-%m-%d_%H-%M-%S)
echo "[$timestamp]:"
command="ros2 launch ../ros2_mp_latency/mp_latency/launch/mplat_ser_n.py"
$command
sleep 3
done
done
done
done
done
| true
|
853b6d05eb9cccdf7e7c7f9fe5e8191914498efd
|
Shell
|
Kesin11/dotfiles
|
/install-git-diff-highlight.sh
|
UTF-8
| 539
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'Setup git/contrib/diff-highlight...'
# debian
if [ -e /usr/share/doc/git/contrib/diff-highlight ]; then
sudo ln -s /usr/share/doc/git/contrib/diff-highlight/diff-highlight /usr/local/bin/diff-highlight
sudo chmod +x /usr/local/bin/diff-highlight
# Fallback to download old one script diff-hightlight
else
curl https://raw.githubusercontent.com/git/git/3dbfe2b8ae94cbdae5f3d32581aedaa5510fdc87/contrib/diff-highlight/diff-highlight -o /usr/local/bin/diff-highlight
chmod +x /usr/local/bin/diff-highlight
fi
| true
|
c4140117dc04f53ec2569143d99c8c8dbdc04ad2
|
Shell
|
Gavazzi1/cuda-convolutions
|
/util/centos-opencv-install.sh
|
UTF-8
| 2,470
| 2.578125
| 3
|
[] |
no_license
|
mkdir installation
mkdir installation/OpenCV-master
sudo yum -y install epel-release
sudo yum -y install git gcc gcc-c++ cmake3
sudo yum -y install qt5-qtbase-devel
sudo yum install -y python34 python34-devel python34-pip
sudo yum install -y python python-devel python-pip
sudo yum -y install python-devel numpy python34-numpy
sudo yum -y install gtk2-devel
sudo yum install -y libpng-devel
sudo yum install -y jasper-devel
sudo yum install -y openexr-devel
sudo yum install -y libwebp-devel
sudo yum -y install libjpeg-turbo-devel
sudo yum install -y freeglut-devel mesa-libGL mesa-libGL-devel
sudo yum -y install libtiff-devel
sudo yum -y install libdc1394-devel
sudo yum -y install tbb-devel eigen3-devel
sudo yum -y install boost boost-thread boost-devel
sudo yum -y install libv4l-devel
sudo yum -y install gstreamer-plugins-base-devel
sudo pip3 install virtualenv virtualenvwrapper
echo "export WORKON_HOME=$HOME/.virtualenvs" >> ~/.bashrc
echo "export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3" >> ~/.bashrc
echo "source /usr/bin/virtualenvwrapper.sh" >> ~/.bashrc
export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3
source /usr/bin/virtualenvwrapper.sh
mkvirtualenv OpenCV-master-py3 -p python3
workon OpenCV-master-py3
pip install cmake
pip install numpy scipy matplotlib scikit-image scikit-learn ipython dlib
# quit virtual environment
deactivate
mkvirtualenv OpenCV-master-py2 -p python2
workon OpenCV-master-py2
pip install cmake
pip install numpy scipy matplotlib scikit-image scikit-learn ipython dlib
# quit virtual environment
deactivate
git clone https://github.com/opencv/opencv.git
git clone https://github.com/opencv/opencv_contrib.git
cd opencv
mkdir build
cd build
cmake3 -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=../../installation/OpenCV-master \
-D INSTALL_C_EXAMPLES=ON \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D OPENCV_SKIP_PYTHON_LOADER=ON \
-D OPENCV_GENERATE_PKGCONFIG=ON \
-D OPENCV_PYTHON3_INSTALL_PATH=$HOME/.virtualenvs/OpenCV-master-py3/lib/python3.4/site-packages \
-D OPENCV_PYTHON2_INSTALL_PATH=$HOME/.virtualenvs/OpenCV-master-py2/lib/python2.7/site-packages \
-D WITH_QT=ON \
-D WITH_OPENGL=ON \
-D PYTHON_DEFAULT_EXECUTABLE=/usr/bin/python3 \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \
-D ENABLE_CXX11=ON \
-D BUILD_EXAMPLES=ON ..
make -j4
make install
| true
|
736b2b71ffd8b41a9adf2f832504b498aa3f2553
|
Shell
|
phanirajkiran/mksoc
|
/scripts/Divided_scripts/build_patched-kernel.sh
|
UTF-8
| 4,045
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
#-----------------------------------------------------------------------------#
# Variables
#-----------------------------------------------------------------------------#
CURRENT_DIR=`pwd`
WORK_DIR=$1
#KERNEL_URL='https://github.com/altera-opensource/linux-socfpga.git'
KERNEL_FILE_URL='ftp://ftp.kernel.org/pub/linux/kernel/v4.x/linux-4.1.15.tar.xz'
KERNEL_FILE="linux-4.1.15.tar.xz"
KERNEL_FOLDER_NAME="linux-4.1.15"
PATCH_URL="https://www.kernel.org/pub/linux/kernel/projects/rt/4.1/patch-4.1.15-rt17.patch.xz"
PATCH_FILE="patch-4.1.15-rt17.patch.xz"
#KERNEL_URL='https://github.com/RobertCNelson/armv7-multiplatform'
#KERNEL_CHKOUT='linux-rt linux/socfpga-3.10-ltsi-rt'
#KERNEL_CHKOUT='origin/v4.4.x'
KERNEL_CONF='socfpga_defconfig'
distro=jessie
#CC_DIR="${WORK_DIR}/gcc-linaro-arm-linux-gnueabihf-4.9-2014.09_linux"
#CC="${CC_DIR}/bin/arm-linux-gnueabihf-"
CC="arm-linux-gnueabihf-"
IMG_FILE=${WORK_DIR}/mksoc_sdcard.img
DRIVE=/dev/loop0
KERNEL_DIR=${WORK_DIR}/$KERNEL_FOLDER_NAME
NCORES=`nproc`
install_sretch_armhf_crosstoolchain() {
sudo dpkg --add-architecture armhf
sudo apt-get update
#sudo apt-get install crossbuild-essential-armhf
#sudo apt-get install gcc-arm-linux-gnueabihf
sudo apt -y install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev debconf dpkg-dev libconfig-auto-perl file libfile-homedir-perl libfile-temp-perl liblocale-gettext-perl perl binutils-multiarch fakeroot
}
function install_dep {
# install deps for kernel build
install_sretch_armhf_crosstoolchain
#sudo apt-get install bc u-boot-tools
sudo apt -y install sudo bc
}
function fetch_kernel {
if [ -d ${KERNEL_DIR} ]; then
echo the kernel target directory $KERNEL_DIR already exists.
echo cleaning repo
cd $KERNEL_DIR
git clean -d -f -x
else
cd $KERNEL_DIR
wget $KERNEL_FILE_URL
wget $PATCH_URL
# mkdir -p $KERNEL_DIR
# git clone $KERNEL_URL linux
tar xf $KERNEL_FILE
# cd linux
# git remote add linux $KERNEL_URL
fi
# git fetch linux
# git checkout -b $KERNEL_CHKOUT
}
patch_kernel() {
cd $KERNEL_DIR
xzcat ../$PATCH_FILE | patch -p1
}
function build_kernel {
export CROSS_COMPILE=$CC
#mkdir -p $KERNEL_DIR/41kernel/$KERNEL_FOLDER_NAME/build
cd $KERNEL_DIR
#clean
make -j$NCORES mrproper
# configure
make ARCH=arm $KERNEL_CONF 2>&1 | tee ../linux-config_rt-log.txt
#make $KERNEL_CONF 2>&1 | tee ../linux-config_rt-log.txt
# zImage:
make -j$NCORES ARCH=arm 2>&1 | tee ../linux-make_rt-log_.txt
#make -j$NCORES 2>&1 | tee ../linux-make_rt-log_.txt
# modules:
make -j$NCORES ARCH=arm modules 2>&1 | tee ../linux-modules_rt-log.txt
#make -j$NCORES modules 2>&1 | tee ../linux-modules_rt-log.txt
}
echo "#--------------------------------------------------------------------------#"
echo "#---------------+++ build_kernel.sh Start +++----------------#"
echo "#--------------------------------------------------------------------------#"
set -e -x
if [ ! -z "$WORK_DIR" ]; then
if [ ! -d ${KERNEL_DIR} ]; then
echo installing dependencies
install_dep
echo downloading kernel
fetch_kernel
else
echo onboard kernel found
cd $WORK_DIR
echo cleaning
rm -Rf $KERNEL_DIR
tar xf $KERNEL_FILE
# git clean -d -f -x
fi
echo "Applying patch"
patch_kernel
build_kernel
echo "#--------------------------------------------------------------------------#"
echo "#----+++ build_kernel.sh Finished Successfull +++---------------#"
echo "#--------------------------------------------------------------------------#"
else
echo "#--------------------------------------------------------------------------#"
echo "#--------- build_kernel.sh Unsuccessfull --------------------------#"
echo "#--------- workdir parameter missing --------------------------------#"
echo "#--------------------------------------------------------------------------#"
fi
| true
|
e0eae48da2dd19eaa72aa999b3329e83f976c7b8
|
Shell
|
xinghua24/BookmarkDemo
|
/database/start-mysql-local-server.sh
|
UTF-8
| 652
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
####################################
# for development only
# run mysql server on port 3306
# username: root
# password 3306
#
# run phpmyadmin at http://localhost:7000/
####################################
#!/bin/bash
# Create network
docker network create bookmark-network
# Create mysql container
docker run -d --name=mysql-dev \
--network=bookmark-network \
-e MYSQL_ROOT_PASSWORD=password \
-p 3306:3306 \
mysql:8
# Create phpmyadmin container
docker run -d --name phpmyadmin \
--network=bookmark-network \
-e PMA_HOSTS=mysql-dev \
-e PMA_PORT=3306 \
-e PMA_USER=root \
-e PMA_PASSWORD=password \
-p 7000:80 \
phpmyadmin/phpmyadmin
| true
|
83f276d19a76cdcbefa49588a4cc79e77300198d
|
Shell
|
Christian1626/ELK
|
/Dashboards/export.sh
|
UTF-8
| 1,573
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# ./export.sh .kibana_mg /home/christian/Documents/ELK/Dashboards/dashboard_mg
# $1 : kibana index
# $2 : dashboards directory
#
# Saves all dashboards in Kibana
mkdir -p $2
if [ $# -eq 2 ]
then
# dashboard
mkdir -p $2/dashboard
curl -s --noproxy 127.0.0.1 "127.0.0.1:9200/$1/dashboard/_search?pretty=true&size=1000&fields=" | grep "_id" | sed -E 's/.*"_id" : "(.*)",/\1/' | while read -r line; do curl --noproxy 127.0.0.1 -s -X GET 127.0.0.1:9200/$1/dashboard/$line/_source > $2/dashboard/$line.json; done
# visualization
mkdir -p $2/visualization
curl -s --noproxy 127.0.0.1 "127.0.0.1:9200/$1/visualization/_search?pretty=true&size=1000&fields=" | grep "_id" | sed -E 's/.*"_id" : "(.*)",/\1/' | while read -r line; do curl --noproxy 127.0.0.1 -s -X GET 127.0.0.1:9200/$1/visualization/$line/_source > $2/visualization/$line.json; done
# search
mkdir -p $2/search
curl -s --noproxy 127.0.0.1 "127.0.0.1:9200/$1/search/_search?pretty=true&size=1000&fields=" | grep "_id" | sed -E 's/.*"_id" : "(.*)",/\1/' | while read -r line; do curl --noproxy 127.0.0.1 -s -X GET 127.0.0.1:9200/$1/search/$line/_source > $2/search/$line.json; done
# index-pattern
mkdir -p $2/index-pattern
curl -s --noproxy 127.0.0.1 "127.0.0.1:9200/$1/index-pattern/_search?pretty=true&size=1000&fields=" | grep "_id" | sed -E 's/.*"_id" : "(.*)",/\1/' | while read -r line; do curl --noproxy 127.0.0.1 -s -X GET 127.0.0.1:9200/$1/index-pattern/$line/_source > $2/index-pattern/$line.json; done
else
echo "./export.sh <kibana_index> <dashboards_folder>"
fi
| true
|
808360289ceb2eb91cc901ef43b9f9545181f6f2
|
Shell
|
warwick-hpsc/riot
|
/libriot2/scripts/createFileLog.sh
|
UTF-8
| 646
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# == 0 ]
then
dir=.
else
dir=$1
fi
echo -n "" > master.tmp
for i in ${dir}/*.log
do
declare -A filedb
exec < ${i}.filedb
while read line
do
fileid=`echo $line | awk '{ print $1 }'`
filename=`echo $line | awk '{ print $2 }'`
matchingid=`grep -w $filename ${dir}/master.filedb | awk '{ print $1 }'`
filedb["$fileid"]=$matchingid
done
# find and replace id with matching id
gsub=""
for j in ${!filedb[@]}
do
gsub=$gsub"gsub(/$j/, ${filedb["$j"]}, \$4); "
done
awk "BEGIN { OFS = \"\t\" } { $gsub; print }" $i >> master.tmp
unset filedb
done
sort -n master.tmp > ${dir}/master.log
rm master.tmp
| true
|
35b4fcb1266399a77584cfbf4bf3386c943f0cf5
|
Shell
|
sullof/home-bin
|
/find-by-dates.sh
|
UTF-8
| 95
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
#!/bin/bash
for i in $(find $1/ -newermt "$2" ! -newermt "$3"); do
echo $i
done
| true
|
732bedd233dbe6690285414989a7b2a7799838bb
|
Shell
|
tsduck/tsduck-test
|
/tests/test-039.sh
|
UTF-8
| 1,354
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Test multiple CW in text file for scrambler plugin.
source $(dirname $0)/../common/testrc.sh
test_cleanup "$SCRIPT.*"
test_tsp \
-I file $(fpath "$INDIR/$SCRIPT.ts") \
-P scrambler 0x0204 --atis-idsa --cw-file $(fpath "$INDIR/$SCRIPT.cw.txt") --cp-duration 3 \
-P analyze -o $(fpath "$OUTDIR/$SCRIPT.atis.txt") \
-P file $(fpath "$OUTDIR/$SCRIPT.atis.ts") \
-P descrambler 0x0204 --atis-idsa --cw-file $(fpath "$INDIR/$SCRIPT.cw.txt") \
-P analyze -o $(fpath "$OUTDIR/$SCRIPT.clear.txt") \
-O file $(fpath "$OUTDIR/$SCRIPT.clear.ts") \
>"$OUTDIR/$SCRIPT.tsp.log" 2>&1
test_bin $SCRIPT.atis.ts
test_bin $SCRIPT.clear.ts
test_text $SCRIPT.atis.txt
test_text $SCRIPT.clear.txt
test_text $SCRIPT.tsp.log
test_tsp \
-I file $(fpath "$INDIR/$SCRIPT.ts") \
-P filter --pid 0x01A4 --pid 0x01AE \
-O file $(fpath "$TMPDIR/$SCRIPT.in.ts") \
>"$OUTDIR/$SCRIPT.tsp.in.log" 2>&1
test_tsp \
-I file $(fpath "$OUTDIR/$SCRIPT.clear.ts") \
-P filter --pid 0x01A4 --pid 0x01AE \
-O file $(fpath "$TMPDIR/$SCRIPT.out.ts") \
>"$OUTDIR/$SCRIPT.tsp.out.log" 2>&1
pushd "$TMPDIR" >/dev/null
$(tspath tscmp) --continue "$SCRIPT.in.ts" "$SCRIPT.out.ts" >"$OUTDIR/$SCRIPT.cmp.log" 2>&1
popd >/dev/null
test_text $SCRIPT.tsp.in.log
test_text $SCRIPT.tsp.out.log
test_text $SCRIPT.cmp.log
| true
|
2a9308e506dc13ddad6a1383bc7b09ffe19d997c
|
Shell
|
nilium/go-fex
|
/version.sh
|
UTF-8
| 296
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
if [ -r "VERSION" ] ; then
. ./VERSION
fi
: ${MAJOR:=0} ${MINOR:=0} ${PATCH:=0}
VERSION="$MAJOR.$MINOR.$PATCH"
case $1 in
--major) echo "$MAJOR" ;;
--shell)
echo "MAJOR=\"$MAJOR\""
echo "MINOR=\"$MINOR\""
echo "PATCH=\"$PATCH\""
;;
*) echo "$VERSION" ;;
esac
| true
|
da36a2e22fdb03b127610aa2adc769ca3b46b680
|
Shell
|
drm343/simple-config
|
/plugin/ecd.plugin.bash
|
UTF-8
| 1,175
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
ecd () {
run_ecd () {
echo -ne $(pwd)
result="$(ls -al | sed 1d |
fzy -p 'Press esc, or select . to exit program > ' |
sed 's/\x1b\[[0-9;]*m//g' | # remove color control code
awk '{print $NF}')"
if [ "$result" = "." ]; then
result=""
fi
}
if [ ! -z "$1" ]; then
cd $1
else
run_ecd
while [ ! -z "$result" ]; do
cd $result
run_ecd
done;
fi
}
ecd-up () {
if is not empty $1; then
if is gt $1 1;then
for i in $(seq 1 $1);
do
cd ..
done
fi
else
cd ..
fi
}
ecd_stop () {
unalias ..
}
ecd_menu () {
local menu="$SIMPLE_CONFIG/data/enhance-command.menu"
echo -en "ecd
" >> $menu
sort $menu | uniq > $menu.bk
mv $menu.bk $menu
}
case "$1" in
'stop')
ecd_stop
;;
'rebuild-menu')
ecd_menu
;;
'help')
echo "usage: source $0"
echo ""
echo "usage $0 stop|rebuild-menu"
;;
*)
alias ..='ecd-up'
;;
esac
| true
|
2a3839fa451b6ea81f9a0b60c1e03411adac12a3
|
Shell
|
ProfJust/ero
|
/ero_install/install_summit_pkg_on_noetic_remote_pc.sh
|
UTF-8
| 3,044
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# script to setup Summit_XL-Workspace
# Version vom 27.4.2021 incl Map directory kopieren
echo -e "\033[34m ---------- ERO SS21 - Workspace einrichten ------------ \033[0m "
echo "Shellskript zur Installation der SummitXL-Pakete"
sudo apt-get dist-upgrade
pwd
cd ~/catkin_ws/src/
# no noetic version available 15.4.21, so use melodic-devel
# siehe auch https://index.ros.org/p/summit_xl_gazebo/ etc.
# melodic-master ist hinterher und hat noch viele unnütze? Ordner
git clone https://github.com/RobotnikAutomation/summit_xl_sim -b melodic-devel
git clone https://github.com/RobotnikAutomation/summit_xl_common -b melodic-devel
git clone https://github.com/RobotnikAutomation/robotnik_msgs -b melodic-devel
git clone https://github.com/RobotnikAutomation/robotnik_sensors -b melodic-devel
git clone https://github.com/rst-tu-dortmund/costmap_prohibition_layer.git
# noetic version available 15.4.21, so use it
git clone https://github.com/ros-planning/navigation.git -b noetic-devel
git clone https://github.com/cra-ros-pkg/robot_localization.git -b noetic-devel
git clone https://github.com/ros-geographic-info/geographic_info.git
git clone https://github.com/ros-geographic-info/unique_identifier.git
git clone https://github.com/ccny-ros-pkg/imu_tools.git -b noetic
sudo apt-get dist-upgrade -y #-y ist ohne Ja Abfrage
sudo apt-get update -y
# Test
sudo apt-get install ros-$(rosversion -d)-navigation -y
sudo apt-get install ros-noetic-robot-localization -y
sudo apt-get install ros-noetic-mavros-* -y
sudo apt-get install ros-noetic-gmapping -y
sudo apt-get install ros-noetic-teb-local-planner -y
sudo apt-get install ros-noetic-costmap-prohibition-layer -y
sudo apt-get install ros-noetic-summit-xl-robot-control -y
sudo apt-get install ros-noetic-nmea-navsat-driver -y
sudo apt-get install ros-noetic-twist-mux -y
sudo apt-get install ros-noetic-gazebo-ros-control -y
sudo apt-get install ros-noetic-twist-mux -y
sudo apt-get install ros-noetic-teleop-twist-keyboard -y
sudo apt-get install ros-noetic-tf2-sensor-msgs -y
sudo apt-get install ros-noetic-velocity-controllers -Y
# added by OJ 16.04.21
sudo apt-get install ros-noetic-velocity-controllers -y
# added by OJ 28.4.21
sudo apt-get install pyqt5-dev-tools -y
sudo apt-get install libsdl-image1.2-dev and
sudo apt-get install libsdl-dev
echo -e "\033[31m Aktualisiere alle Abhaengigkeiten der ROS-Pakete \033[0m"
rosdep update
rosdep install --from-paths src --ignore-src -r -y
echo -e "\033[34m copying WHS-Map directories to Robotnik-Packages \033[0m"
cp -a ~/catkin_ws/src/ero/ero_gz_worlds/whs_world_map/. ~/catkin_ws/src/summit_xl_common/summit_xl_localization/maps/whs_world_map
cp -a ~/catkin_ws/src/ero/ero_gz_worlds/whs_world_model/. ~/.gazebo/models/whs_world_model
cp -a ~/catkin_ws/src/ero/ero_gz_worlds/whs_world/. ~/catkin_ws/src/summit_xl_sim/summit_xl_gazebo/worlds
# cp 30m_Laserscanner_ UTM30LX ~/catkin_ws/src/summit_xl_sim/summit_xl_gazebo/worlds
echo -e "\033[31m to do: $ cd ~/catkin_ws/ ... catkin_make \033[0m"
| true
|
54bb957aba5667f7e39cfe88264410f2c648d359
|
Shell
|
javalosp/buildscripts
|
/uk/ac/archer/unittest.pbs
|
UTF-8
| 1,290
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash --login
###############################################################################
#
# This is an example PBS script for testing Fluidity on Archer, and
# can be found in the GitHub FluidityProject/buildscripts repository
# as:
#
# /uk/ac/archer/unittest.pbs
#
# This script is submitted by build.bash to test Fluidity.
#
# The progress of the tests can be monitored in a timestamped
# file of the form:
#
# unittest-##########.log
#
###############################################################################
#PBS -N unittest
#PBS -l select=1
#PBS -l walltime=24:00:00
# Make sure any symbolic links are resolved to absolute path
export PBS_O_WORKDIR=$(readlink -f $PBS_O_WORKDIR)
# Change to the directory that the job was submitted from
cd $PBS_O_WORKDIR
# FLUIDITYDIR comes from the environment passed in using qsub -v
module use $FLUIDITYDIR/modulefiles
module unload PrgEnv-cray PrgEnv-gnu PrgEnv-intel
module load PrgEnv-fluidity
export PATH=$PATH:$FLUIDITYDIR/bin
export PYTHONPATH=$PYTHONPATH:$FLUIDITYDIR/python
# The tests takes about ?? hours on a compute node.
mkdir -p tmp
export TMPDIR=$PWD/tmp
# TIMESTAMP comes from the environment passed in using qsub -v
aprun -n 1 make unittest > unittest-${TIMESTAMP}.log 2>&1
unset TMPDIR
rm -rf tmp
| true
|
0218ab28190560ba7b6fd20f34fa50e4890f9aa4
|
Shell
|
tomerpq/All-Projects-Of-Tomer-Paz
|
/Operation Systems Projects(Multi Threading - C, bash)/Bash/ex12.sh
|
UTF-8
| 220
| 3.078125
| 3
|
[] |
no_license
|
#Tomer Paz - 315311365
#!/bin/bash
ls -v $1 |
while read fn
do
if [[ $fn == *.txt ]]
then
echo $fn "is a file"
fi
done
ls -v $1 |
while read fn
do
a="$1/$fn"
if [[ -d $a ]]
then
echo $fn "is a directory"
fi
done
| true
|
46a5e9065d14c95a00c32446256e92991e88ef25
|
Shell
|
jkkummerfeld/lamb
|
/lamb/experiment/tune_ptb_10m.sh
|
UTF-8
| 1,550
| 2.546875
| 3
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# TUNING IS NOT CURRENTLY SUPPORTED IN THE OPEN-SOURCE VERSION. This for
# illustration only.
set -e
# Include definitions of dataset and tuning related variables.
source "$(dirname $0)/../lib/setup.sh"
source_lib "config/common.sh"
source_lib "config/tuning.sh"
source_lib "config/ptb_word_rmsprop.sh"
# Model hyperparameters
num_params=$(million 10)
share_input_and_output_embeddings=true
# Evaluation hyperparameters
eval_softmax_temperature=-0.8
# Tuning parameters
num_workers=60
# Start a number of tuning studies, setting model specific parameters.
model="lstm"
tie_forget_and_input_gates=false
forget_bias=1.0
num_layers=1
tuneables="learning_rate,l2_penalty,
input_dropout,inter_layer_dropout,state_dropout,
output_dropout,input_embedding_ratio"
name="$(default_name)_${model}_d${num_layers}"
source_lib "run.sh" "$@"
| true
|
488f87057815c0e90da41b4814c510c8d272d658
|
Shell
|
CASE-Association/case-AudioSystem
|
/Resampling_plugin/volumio-plugins/plugins/audio_interface/brutefir3/alsa-capabilities
|
UTF-8
| 51,020
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# shellcheck disable=SC2191
## ^^ we frequently use arrays for passing arguments to functions.
## This script for linux with bash 4.x displays a list with the audio
## capabilities of each alsa audio output interface and stores them in
## arrays for use in other scripts. This functionality is exposed by
## the `return_alsa_interface' function which is avaliable after
## sourcing the file. When ran from a shell, it will call that
## function.
##
## Copyright (C) 2014 Ronald van Engelen <ronalde+gitlab@lacocina.nl>
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## Source: https://gitlab.com/ronalde/mpd-configure
## See also: https://lacocina.nl/detect-alsa-output-capabilities
LANG=C
APP_NAME_AC="alsa-capabilities"
APP_VERSION="0.9.8"
APP_INFO_URL="https://lacocina.nl/detect-alsa-output-capabilities"
## set DEBUG to a non empty value to display internal program flow to
## stderr
DEBUG="${DEBUG:-}"
## set PROFILE to a non empty value to get detailed timing
## information. Normal output is suppressed.
PROFILE="${PROFILE:-}"
## to see how the script behaves with a certain output of aplay -l
## on a particular host, store it's output in a file and supply
## the file path as the value of TESTFILE, eg:
## `TESTFILE=/tmp/somefile ./bash-capabilities
## All hardware and device tests will fail or produce fake outputs
## (hopefully with some grace).
TESTFILE="${TESTFILE:-}"
### generic functions
function die() {
printf 1>&2 "\nError:\n%s\n\n" "$@"
exit 1
}
function debug() {
lineno="$1"
message="$2"
printf 1>&2 "=%.0s" {1..100}
printf 1>&2 "\nDEBUG *** %s (%4d): %s\n" \
"${APP_NAME_AC}" \
"${lineno}" \
"${message}"
}
function command_not_found() {
## give installation instructions for package $2 when command $1
## is not available, optional with non default instructions $3
## and exit with error
command="$1"
package="$2"
instructions="${3:-}"
msg="command \`${command}' (package \`${package}') not found. "
if [[ -z "${instructions}" ]]; then
msg+="See 'Requirements' on ${APP_INFO_URL}."
else
msg+="${instructions}"
fi
die "${msg}"
}
### alsa related functions
function get_aplay_output() {
## use aplay to do a basic alsa sanity check using aplay -l, or
## optionally using $TESTFILE containing the stored output of
## 'aplay -l'.
## returns the raw output of aplay or an error.
res=""
aplay_msg_nosoundcards_regexp="no[[:space:]]soundcards"
if [[ "${TESTFILE}x" != "x" ]]; then
if [[ ! -f "${TESTFILE}" ]]; then
# shellcheck disable=SC2059
printf 1>&2 "${MSG_APLAY_ERROR_NOSUCHTESTFILE}" \
"${TESTFILE}"
return 1
else
## get the output from a file for testing purposes
# shellcheck disable=SC2059
printf 1>&2 "${MSG_APLAY_USINGTESTFILE}\n" \
"${TESTFILE}"
# shellcheck disable=SC2059
res="$(< "${TESTFILE}")" || \
( printf "${MSG_APLAY_ERROR_OPENINGTESTFILE}" && \
return 1 )
fi
else
## run aplay -l to check for alsa errors or display audio cards
res="$(${CMD_APLAY} -l 2>&1)" || \
(
# shellcheck disable=SC2059
printf "${MSG_APLAY_ERROR_GENERAL}\n" "${res}"
## TODO: react on specific aplay error
[[ ${DEBUG} ]] && debug "${LINENO}" "\`${CMD_APLAY} -l' returned error: \`${res}'"
return 1
)
## check for no soundcards
if [[ "${res}" =~ ${aplay_msg_nosoundcards_regexp} ]]; then
printf "%s\n" "${MSG_APLAY_ERROR_NOSOUNDCARDS}"
## TODO: react on specific aplay error
[[ ${DEBUG} ]] && debug "${LINENO}" "\`${CMD_APLAY} -l' returned no cards: \`${res}'"
return 1
fi
fi
## return the result to the calling function
printf "%s" "${res}"
}
function handle_doublebrackets() {
## return the name of the alsa card / device, even when they
## contain brackets.
string="$*"
bracketcounter=0
for (( i=0; i<${#string}; i++ )); do
char="${string:$i:1}"
if [[ "${char}" = "[" ]]; then
(( bracketcounter++ ))
elif [[ "${char}" = "]" ]]; then
(( bracketcounter-- ))
fi
if (( bracketcounter > 0 )); then
## inside outer brackets
if (( bracketcounter < 2 )) && [[ "${char}" == "[" ]]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "name with brackets found."
else
# shellcheck disable=SC2059
printf "${char}"
fi
fi
done
}
function return_output_human() {
## print default output to std_err.
## called by fetch_alsa_outputinterfaces.
printf "%s\n" "${alsa_if_display_title}" 1>&2;
printf " - %-17s = %-60s\n" \
"${MSG_ALSA_DEVNAME}" \
"${alsa_dev_label}" 1>&2;
printf " - %-17s = %-60s\n" \
"${MSG_ALSA_IFNAME}" "${alsa_if_label}" 1>&2;
printf " - %-17s = %-60s\n" \
"${MSG_ALSA_UACCLASS}" "${alsa_if_uacclass}" 1>&2;
printf " - %-17s = %-60s\n" \
"${MSG_ALSA_CHARDEV}" "${alsa_if_chardev}" 1>&2;
if [[ ! -z ${formats_res_err} ]]; then
## device is locked by an unspecified process
printf " - %-17s = %-60s\n" \
"${MSG_ALSA_ENCODINGFORMATS}" \
"${MSG_ERROR_GETTINGFORMATS}" 1>&2;
printf " %-17s %-60s\n" \
" " \
"${formats_res[@]}" 1>&2;
else
formatcounter=0
if [[ ! -z ${OPT_SAMPLERATES} ]]; then
MSG_ALSA_ENCODINGFORMATS="samplerates (Hz)"
fi
printf " - %-17s = " \
"${MSG_ALSA_ENCODINGFORMATS}" 1>&2;
# shellcheck disable=SC2141
while IFS="\n" read -r line; do
(( formatcounter++ ))
if (( formatcounter > 1 )); then
printf "%-23s" " " 1>&2;
fi
printf "%-60s\n" "${line}" 1>&2;
done<<<"${alsa_if_formats[@]}"
fi
printf " - %-17s = %-60s\n" \
"${MSG_ALSA_MONITORFILE}" "${alsa_if_monitorfile}" 1>&2;
printf " - %-17s = %-60s\n" \
"${MSG_ALSA_STREAMFILE}" "${alsa_if_streamfile}" 1>&2;
printf "\n"
}
function key_val_to_json() {
## returns a json "key": "val" pair.
key="$1"
val="$2"
## check if val is a number
if printf -v numval "%d" "${val}" 2>/dev/null; then
## it is
printf '"%s": %d' \
"${key}" "${numval}"
else
printf '"%s": "%s"' \
"${key}" "${val}"
fi
printf "\n"
}
function ret_json_format() {
## returns the json formatted encoding format and possibly sample
## rates.
formats_raw="$1"
declare -a json_formats
if [[ "${formats_raw}" =~ ':' ]]; then
## sample rates included
while read -r line; do
split_re="(.*):(.*)"
if [[ "${line}" =~ ${split_re} ]]; then
format=${BASH_REMATCH[1]}
IFS=" " samplerates=(${BASH_REMATCH[2]})
printf -v sr_out "\t\t\"%s\",\n" \
"${samplerates[@]}"
sr_out="${sr_out%,*}"
label_samplerates='"samplerates"'
output_line="{
$(key_val_to_json format "${format// /}"),
${label_samplerates}: [
${sr_out}
]
},"
output_lines+=("${output_line}")
fi
done<<<"${formats_raw}"
printf -v json_formats "\t%s\n" "${output_lines[@]}"
## strip the continuation comma from the last element
json_formats="${json_formats%,*}"
else
## no sample rates included
IFS="," formats_res=(${formats_raw})
printf -v json_formats '\t\t"%s",\n' \
"${formats_res[@]// /}"
## strip the continuation comma from the last element
json_formats="${json_formats%,*}"
fi
printf "%s" "${json_formats}"
}
function ret_json_card() {
## print json formatted output to std_out.
## called by fetch_alsa_outputinterfaces.
#cur_aif_no="$1"
local str_formats_res="$1"
last_aif="$2"
printf -v encoding_formats_val "[\n %s\n\t]" \
"$(ret_json_format "${str_formats_res}")"
## using to indexed arrays in order to preserve order of fields
declare -a json_keyvals
json_fields=(
id
hwaddr
description
cardnumber
interfacenumber
cardname
interfacename
chardev
monitorfile
streamfile
usbaudioclass
)
json_values=(${cur_aif_no})
json_values+=(${alsa_if_hwaddress})
#a_json_keyvals[description]=
json_values+=("${alsa_if_title_label}")
#a_json_keyvals[cardnumber]=
json_values+=(${alsa_dev_nr})
#a_json_keyvals[interfacenumber]=
json_values+=(${alsa_if_nr})
#a_json_keyvals[cardname]=
json_values+=("${alsa_dev_label}")
#a_json_keyvals[interfacename]=
json_values+=("${alsa_if_label}")
#a_json_keyvals[chardev]=
json_values+=(${alsa_if_chardev})
#a_json_keyvals[monitorfile]=
json_values+=(${alsa_if_monitorfile})
#a_json_keyvals[streamfile]=
json_values+=(${alsa_if_streamfile})
#a_json_keyvals[usbaudioclass]=
json_values+=("${alsa_if_uacclass}")
for json_fieldno in "${!json_fields[@]}"; do
json_keyvals+=("$(key_val_to_json \
"${json_fields[${json_fieldno}]}" "${json_values[${json_fieldno}]}")")
done
printf -v str_json_keyvals "\t%s,\n" "${json_keyvals[@]}"
# shellcheck disable=SC1078,SC1079,SC2027
aif_json="""\
{
${str_json_keyvals%,*}
\"encodingformats\": "${encoding_formats_val}"
}\
"""
printf "%s" "${aif_json}"
if [[ "${last_aif}x" == "x" ]]; then
printf ","
fi
printf "\n"
}
function return_output_json() {
## print json formatted output to std_out.
## called by fetch_alsa_outputinterfaces.
json_cards="$1"
json='{
"alsa_outputdevices": [
%s
]
}'
# shellcheck disable=SC2059
printf "${json}\n" "${json_cards%,*}"
}
function fetch_alsa_outputinterfaces() {
## parses each output interface returned by `get_aplay_output'
## after filtering (when the appropriate commandline options are
## given), stores its capabilities in the appropriate global
## indexed arrays and displays them.
json_output=
msg=()
aplay_lines=()
integer_regexp='^[0-9]+$'
aplay_card_regexp="^card[[:space:]][0-9]+:"
## exit on error
#aplay_output="$
## reset the counter for interfaces without filtering
NR_AIFS_BEFOREFILTERING=0
## modify the filter for aplay -l when OPT_HWFILTER is set
if [[ ! -z "${OPT_HWFILTER}" ]]; then
# the portion without `hw:', eg 0,1
alsa_filtered_hwaddr="${OPT_HWFILTER#hw:*}"
alsa_filtered_cardnr="${alsa_filtered_hwaddr%%,*}"
alsa_filtered_devicenr="${alsa_filtered_hwaddr##*,}"
if [[ ! ${alsa_filtered_cardnr} =~ ${integer_regexp} ]] || \
[[ ! ${alsa_filtered_devicenr} =~ ${integer_regexp} ]]; then
msg+=("Invalid OPT_HWFILTER (\`${OPT_HWFILTER}') specified.")
msg+=("Should be \`hw:x,y' were x and y are both integers.")
printf -v msg_str "%s\n" "${msg[@]}"
die "${msg_str}"
fi
aplay_card_regexp="^card[[:space:]]${alsa_filtered_cardnr}:[[:space:]].*"
aplay_device_regexp="[[:space:]]device[[:space:]]${alsa_filtered_devicenr}:"
aplay_card_device_regexp="${aplay_card_regexp}${aplay_device_regexp}"
else
aplay_card_device_regexp="${aplay_card_regexp}"
fi
## iterate each line of aplay output
while read -r line ; do
## filter for `^card' and then for `OPT_CUSTOMFILTER' to get matching
## lines from aplay and store them in an array
if [[ "${line}" =~ ${aplay_card_device_regexp} ]]; then
[[ ${DEBUG} ]] && \
( msg_debug="aplay -l output line: \`${line}'. with OPT_CUSTOMFILTER: ${OPT_CUSTOMFILTER}"
debug "${LINENO}" "${msg_debug}")
## raise the counter for interfaces without filtering
((NR_AIFS_BEFOREFILTERING++))
if [[ "${OPT_CUSTOMFILTER}x" != "x" ]]; then
## check if line matches `OPT_CUSTOMFILTER'
if [[ "${line}" =~ ${OPT_CUSTOMFILTER} ]]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "match: ${line}"
## store the line in an array
aplay_lines+=("${line}")
else
[[ ${DEBUG} ]] && \
debug "${LINENO}" "no match with filter ${OPT_CUSTOMFILTER}: ${line}"
fi
else
## store the line in an array
aplay_lines+=("${line}")
fi
fi
done< <(get_aplay_output "${aplay_card_regexp}") || \
die "get_aplay_output '${aplay_card_regexp}' returned an error."
#< "${aplay_output}"
## check whether soundcards were found
NR_AIFS_AFTERFILTERING=${#aplay_lines[@]}
if (( NR_AIFS_AFTERFILTERING < 1 )); then
die "${#aplay_lines[@]} soundcards found"
fi
## loop through each item in the array
cur_aif_no=0
for line in "${aplay_lines[@]}"; do
((cur_aif_no++))
## set if type to default (ie analog)
alsa_if_type="ao"
## construct bash regexp for sound device
## based on aplay.c:
## printf(_("card %i: %s [%s], device %i: %s [%s]\n"),
## 1 card,
## 2 snd_ctl_card_info_get_id(info),
## 3 snd_ctl_card_info_get_name(info),
## 4 dev,
## 5 snd_pcm_info_get_id(pcminfo),
## 6 snd_pcm_info_get_name(pcminfo));
##
## portion (ie before `,')
## caution: snd_{pcm,ctl}_card_info_get_name(info) could
## return an empty string between square brackets, while
## string returned by snd_{pcm,ctl}_card_info_get_id may
## contain square brackets
alsa_regexp_common="[[:space:]]([0-9]+):[[:space:]](.*)\]"
alsa_dev_regexp="card${alsa_regexp_common}"
alsa_if_regexp="device${alsa_regexp_common}"
## same for interface portion
alsa_dev_if_regexp="^${alsa_dev_regexp},[[:space:]]${alsa_if_regexp}$"
## unset / empty out all variables
alsa_dev_nr=""
alsa_dev_label=""
alsa_if_nr=""
alsa_if_name=""
alsa_if_label=""
before_bracket_re="^([^[]+)\["
if [[ "${line}" =~ ${alsa_dev_if_regexp} ]]; then
alsa_dev_nr="${BASH_REMATCH[1]}"
alsa_dev_name_raw="${BASH_REMATCH[2]}"
alsa_if_nr="${BASH_REMATCH[3]}"
alsa_if_name_raw="${BASH_REMATCH[4]}"
if [[ "${alsa_dev_name_raw}" =~ ${before_bracket_re} ]]; then
alsa_dev_name_beforebracket="${BASH_REMATCH[1]}"
alsa_dev_name_betweenbrackets="${alsa_dev_name_raw//${alsa_dev_name_beforebracket}}"
if [[ ${DEBUG} ]]; then
debug "${LINENO}" "#####: alsa_dev_name_beforebracket \`${alsa_dev_name_beforebracket}'"
debug "${LINENO}" "#####: alsa_dev_name_betweenbrackets \`${alsa_dev_name_betweenbrackets}'"
fi
else
printf -v msg_err "%s: alsa_dev_name_raw \`%s' did not match regexp before_bracket_re (\`%s')\n" \
"${LINENO}" "${alsa_dev_name_raw}" "${before_bracket_re}"
die "${msg_err}"
break
fi
if [[ "${alsa_if_name_raw}" =~ ${before_bracket_re} ]]; then
alsa_if_name_beforebracket="${BASH_REMATCH[1]}"
alsa_if_name_betweenbrackets="${alsa_if_name_raw//${alsa_if_name_beforebracket}}"
if [[ ${DEBUG} ]]; then
debug "${LINENO}" "#####: alsa_if_name_beforebracket \`${alsa_if_name_beforebracket}'"
debug "${LINENO}" "#####: alsa_if_name_betweenbrackets \`${alsa_if_name_betweenbrackets}'"
fi
else
printf -v msg_err "%s: alsa_if_name_raw \`%s' did not match regexp before_bracket_re (\`%s')\n" \
"${LINENO}" "${alsa_if_name_raw}" "${before_bracket_re}"
die "${msg_err}"
break
fi
else
printf -v msg_err "%s: aplay line did not match alsa_dev_if_regexp (\`%s'):\n%s\n" \
"${LINENO}" "${alsa_dev_if_regexp}" "${line}"
die "${msg_err}"
break
fi
## format the names
## alsa_{dev,if}_name_beforebracket includes trailing space
## alsa_{dev,if}_name_betweenbrackets includes leading square bracket
## strip both
## courtesy: https://unix.stackexchange.com/a/360648
shopt -s extglob
alsa_dev_name_beforebracket="${alsa_dev_name_beforebracket%%+([[:space:]])}"
alsa_dev_name_betweenbrackets="${alsa_dev_name_beforebracket##+([)}"
alsa_if_name_beforebracket="${alsa_if_name_beforebracket%%+([[:space:]])}"
alsa_if_name_betweenbrackets="${alsa_if_name_beforebracket##+([)}"
shopt -u extglob
## do not include identical or empty name between square brackets
if [[ "${alsa_dev_name_beforebracket}x" == "${alsa_dev_name_betweenbrackets}x" ]] || \
[[ "${alsa_dev_name_betweenbrackets}x" == "x" ]]; then
alsa_dev_label="${alsa_dev_name_beforebracket}"
else
alsa_dev_label="${alsa_dev_name_beforebracket} [${alsa_dev_name_betweenbrackets}]"
fi
if [[ "${alsa_if_name_beforebracket}x" == "${alsa_if_name_betweenbrackets}x" ]] || \
[[ "${alsa_if_name_betweenbrackets}x" == "x" ]]; then
alsa_if_label="${alsa_if_name_beforebracket}"
else
alsa_if_label="${alsa_if_name_beforebracket} [${alsa_if_name_betweenbrackets}]"
fi
declare -a alsa_if_formats=()
alsa_if_hwaddress="hw:${alsa_dev_nr},${alsa_if_nr}"
## construct the path to the character device for the
## interface (ie `/dev/snd/xxx')
alsa_if_chardev="/dev/snd/pcmC${alsa_dev_nr}D${alsa_if_nr}p"
## construct the path to the hwparams file
alsa_if_hwparamsfile="/proc/asound/card${alsa_dev_nr}/pcm${alsa_if_nr}p/sub0/hw_params"
## before determining whether this is a usb device, assume
## the monitor file is the hwparams file
alsa_if_monitorfile="${alsa_if_hwparamsfile}"
## assume stream file for the interface (ie
## `/proc/asound/cardX/streamY') to determine whether
## the interface is a uac device, and if so, which class it is
alsa_if_streamfile="/proc/asound/card${alsa_dev_nr}/stream${alsa_if_nr}"
## assume no uac device
alsa_if_uacclass="${MSG_PROP_NOTAPPLICABLE}"
if [[ ! -z ${TESTFILE} ]]; then
## device is not real
alsa_if_formats+=("(${MSG_ERROR_CHARDEV_NOFORMATS})")
alsa_if_uacclass_nr="?"
else
## check if the hwparams file exists
if [[ ! -f "${alsa_if_hwparamsfile}" ]]; then
alsa_if_hwparamsfile="${alsa_if_hwparamsfile} (error: not accessible)"
fi
## check if the chardev exists
if [[ ! -c "${alsa_if_chardev}" ]]; then
msg_err="alsa_if_chardev \`${alsa_if_chardev}': ${MSG_ERROR_NOT_CHARDEV} "
[[ ${DEBUG} ]] && \
debug "${LINENO}" "${msg_err}"
alsa_if_chardev="${alsa_if_chardev} (${MSG_ERROR_NOT_CHARDEV})"
else
[[ ${DEBUG} ]] && \
debug "${LINENO}" "alsa_if_chardev \`${alsa_if_chardev}' is a valid chardev."
fi
## check whether the monitor file exists; it always should
if [[ ! -f ${alsa_if_monitorfile} ]]; then
msg_err="${alsa_if_monitorfile} ${MSG_ERROR_NOFILE} (${MSG_ERROR_UNEXPECTED})"
alsa_if_monitorfile="${msg_err}"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "${msg_err}"
fi
## check whether the streamfile exists; it only should
## exist in the case of a uac interface
if [[ ! -f "${alsa_if_streamfile}" ]]; then
msg_err="${alsa_if_streamfile} ${MSG_ERROR_NOFILE} (${MSG_ERROR_UNEXPECTED})"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "${msg_err}"
## no uac interface
alsa_if_streamfile="${MSG_PROP_NOTAPPLICABLE}"
else
[[ ${DEBUG} ]] && \
debug "${LINENO}" "using alsa_if_streamfile \`${alsa_if_streamfile}'."
## set interface to usb out
alsa_if_type="uo"
## uac devices will use the stream file instead of
## hwparams file to monitor
## alsa_if_monitorfile="${alsa_if_streamfile}"
## get the type of uac endpoint
alsa_if_uac_ep="$(return_alsa_uac_ep "${alsa_if_streamfile}")"
# shellcheck disable=SC2181
if [[ $? -ne 0 ]]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "could not determine alsa_if_uac_ep."
alsa_if_uacclass_nr="?"
else
[[ ${DEBUG} ]] && \
debug "${LINENO}" "alsa_if_uac_ep set to \`${alsa_if_uac_ep}'."
## lookup the uac class in the array for this type of endpoint (EP)
## (for readability)
alsa_if_uacclass="${UO_EP_LABELS[${alsa_if_uac_ep}]}"
## the uac class number (0, 1, 2 or 3) according to ./sound/usb/card.h
alsa_if_uacclass_nr="${alsa_if_uacclass% - *}"
classnr_regexp='^[0-3]+$'
if [[ ! ${alsa_if_uacclass_nr} =~ ${classnr_regexp} ]]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "invalid uac class number \`${alsa_if_uacclass_nr}'. \
${MSG_ERROR_UNEXPECTED}"
alsa_if_uacclass_nr="?"
fi
fi
fi
fi
## for non-uac interfaces: check whether it is some other
## digital interface
if [[ ! "${alsa_if_type}" = "uo" ]]; then
for filter in "${DO_INTERFACE_FILTER[@]}"; do
## `,,' downcases the string, while `*var*' does a
## wildcard match
if [[ "${alsa_if_name,,}" == *"${filter}"* ]]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "match = ${alsa_if_name,,}: ${filter}"
## set ao type to d(igital)o(out)
alsa_if_type="do"
## exit this for loop
break
fi
done
fi
## see if the interface type matches the user specified
## filters and if so construct titles and store a pair of
## hardware address and monitoring file in the proper array
match=
case "${alsa_if_type}" in
"ao")
## only if neither `OPT_LIMIT_DO' and `OPT_LIMIT_UO' are set
[[ ! -z ${OPT_LIMIT_DO} || ! -z ${OPT_LIMIT_UO} ]] && \
continue || match="true"
;;
"do")
## only if neither `OPT_LIMIT_AO' and `OPT_LIMIT_UO' are set
[[ ! -z ${OPT_LIMIT_AO} || ! -z ${OPT_LIMIT_UO} ]] && \
continue || match="true"
;;
"uo")
## only if `OPT_LIMIT_AO' is not set
[[ ! -z ${OPT_LIMIT_AO} ]] && \
continue || match="true"
esac
if [[ ! -z ${match} ]]; then
## put each encoding format and possibily the sample rates
## in an array
alsa_if_formats=()
formats_res_err=
str_formats_res="$(return_alsa_formats \
"${alsa_dev_nr}" \
"${alsa_if_nr}" \
"${alsa_if_type}" \
"${alsa_if_streamfile}" \
"${alsa_if_chardev}")"
# shellcheck disable=SC2181
if [[ $? -ne 0 ]]; then
formats_res_err=1
fi
alsa_if_formats+=("${str_formats_res}")
alsa_if_title_label="${ALSA_IF_LABELS[${alsa_if_type}]}"
## reconstruct the label if it contained square brackets
## construct the display title
printf -v alsa_if_display_title \
" %s) %s \`%s'" \
"${cur_aif_no}" \
"${alsa_if_title_label}" \
"${alsa_if_hwaddress}"
## store the details of the current interface in global arrays
ALSA_AIF_HWADDRESSES+=("${alsa_if_hwaddress}")
ALSA_AIF_MONITORFILES+=("${alsa_if_monitorfile}")
ALSA_AIF_DISPLAYTITLES+=("${alsa_if_display_title}")
ALSA_AIF_DEVLABELS+=("${alsa_dev_label}")
ALSA_AIF_LABELS+=("${alsa_if_label}")
ALSA_AIF_UACCLASSES+=("${alsa_if_uacclass}")
ALSA_AIF_FORMATS="${alsa_if_formats[*]}"
ALSA_AIF_CHARDEVS+=("${alsa_if_chardev}")
fi
if [[ -z "${OPT_QUIET}" ]] && [[ "${OPT_JSON}x" == "x" ]]; then
## print the list to std_err
res_human="$(return_output_human)" || exit 1
printf 1>&2 "%s\n" "${res_human}"
fi
if [[ "${OPT_JSON}x" != "x" ]]; then
if [[ ${cur_aif_no} -lt ${#aplay_lines[@]} ]]; then
printf -v json_output "%s%s\n" \
"${json_output}" \
"$(ret_json_card "${str_formats_res}" "")"
fi
fi
done
if [[ "${OPT_JSON}x" != "x" ]]; then
res_json="$(return_output_json "${json_output}")" || exit 1
printf "%s\n" "${res_json}"
fi
}
function get_locking_process() {
## return a string describing the command and id of the
## process locking the audio interface with card nr $1 and dev nr
## $2 based on its status file in /proc/asound.
## returns a comma separated string containing the locking cmd and
## pid, or an error when the interface is not locked (ie
## 'closed').
alsa_card_nr="$1"
alsa_if_nr="$2"
proc_statusfile="/proc/asound/card${alsa_card_nr}/pcm${alsa_if_nr}p/sub0/status"
owner_pid=
owner_stat=
owner_cmd=
parent_pid=
parent_cmd=
locking_cmd=
locking_pid=
## specific for mpd: each alsa output plugin results in a locking
## process indicated by `owner_pid` in
## /proc/asound/cardX/pcmYp/sub0/status: `owner_pid : 28022'
## this is a child process of the mpd parent process (`28017'):
##mpd(28017,mpd)-+-{decoder:flac}(28021)
## |-{io}(28019)
## |-{output:Peachtre}(28022) <<< owner_pid / child
## `-{player}(28020)
owner_pid_re="owner_pid[[:space:]]+:[[:space:]]+([0-9]+)"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "examining status file ${proc_statusfile}."
while read -r line; do
if [[ "${line}" =~ ${owner_pid_re} ]]; then
owner_pid="${BASH_REMATCH[1]}"
break
elif [[ "${line}" == "closed" ]]; then
return 1
fi
done<"${proc_statusfile}"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "done examining status file ${proc_statusfile}."
if [[ -z ${owner_pid} ]]; then
## device is unused
[[ ${DEBUG} ]] && \
debug "${LINENO}" "${FUNCNAME[0]} called, but no owner_pid found in \`${proc_statusfile}'."
return 1
else
[[ ${DEBUG} ]] && \
debug "${LINENO}" "found owner pid in status file \`${proc_statusfile}': \`${owner_pid}'."
fi
## check if owner_pid is a child
## construct regexp for getting the ppid from /proc
## eg: /proc/837/stat:
## 837 (output:Pink Fau) S 1 406 406 0 -1 ...
## ^^^ ^^^
## +++-> owner_pid +++-> parent_pid
parent_pid_re="(${owner_pid})[[:space:]]\(.*\)[[:space:]][A-Z][[:space:]][0-9]+[[:space:]]([0-9]+)"
# shellcheck disable=SC2162
read owner_stat < "/proc/${owner_pid}/stat"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "owner_stat: \`${owner_stat}'"
if [[ "${owner_stat}" =~ ${parent_pid_re} ]]; then
parent_pid="${BASH_REMATCH[2]}"
if [[ "x${parent_pid}" == "x${owner_pid}" ]]; then
## device is locked by the process with id owner_pid, look up command
## eg: /proc/837/cmdline: /usr/bin/mpd --no-daemon /var/lib/mpd/mpd.conf
# shellcheck disable=SC2162
read owner_cmd < "/proc/${owner_pid}/cmdline"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "cmd \`${owner_cmd}' with id \`${owner_pid}' has no parent."
locking_pid="${owner_pid}"
locking_cmd="${owner_cmd}"
else
## device is locked by the parent of the process with owner_pid
# shellcheck disable=SC2162
read owner_cmd < "/proc/${owner_pid}/cmdline"
# shellcheck disable=SC2162
read parent_cmd < "/proc/${parent_pid}/cmdline"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "cmd \`${owner_cmd}' with id \`${owner_pid}' \
has parent cmd \`${parent_cmd}' with id \`${parent_pid}'."
locking_pid="${parent_pid}"
locking_cmd="${parent_cmd}"
fi
## return comma separated list (pid,cmd) to calling function
locking_cmd="$(while read -r -d $'\0' line; do \
printf "%s " "${line}"; \
done< "/proc/${locking_pid}/cmdline")"
printf "%s,%s" "${locking_pid}" "${locking_cmd%% }"
else
## should not happen; TODO: handle
parent_pid=
fi
}
function ret_highest_alsa_samplerate() {
## check the highest supported rate of type $3 for format $2 on
## interface $1
## returns the highest supported rate.
alsa_if_hwaddress="$1"
encoding_format="$2"
type="$3"
if [[ "${type}" == "audio" ]]; then
rates=(${SAMPLERATES_AUDIO[@]})
else
rates=(${SAMPLERATES_VIDEO[@]})
fi
for rate in "${rates[@]}"; do
res="$(check_samplerate "${alsa_if_hwaddress}" "${encoding_format}" "${rate}")"
# shellcheck disable=SC2181
if [[ $? -ne 0 ]]; then
## too high; try next one
continue
else
## match; return it
printf "%s" "${rate}"
break
fi
done
}
function ret_supported_alsa_samplerates() {
## use aplay to get supported sample rates for playback for
## specified non-uac interface ($1) and encoding format ($2).
## returns a space separated list of valid rates.
alsa_if_hwaddress="$1"
encoding_format="$2"
declare -a rates
[[ ${DEBUG} ]] && \
debug "${LINENO}" "getting sample rates for device \`${alsa_if_hwaddress}' \
using encoding_format \`${encoding_format}'."
## check all audio/video rates from high to low; break when rate is
## supported while adding all the lower frequencies
highest_audiorate="$(ret_highest_alsa_samplerate \
"${alsa_if_hwaddress}" "${encoding_format}" "audio")"
highest_videorate="$(ret_highest_alsa_samplerate \
"${alsa_if_hwaddress}" "${encoding_format}" "video")"
for rate in "${SAMPLERATES_AUDIO[@]}"; do
if [[ ${rate} -le ${highest_audiorate} ]]; then
## supported; assume all lower rates are supported too
rates+=("${rate}")
fi
done
for rate in "${SAMPLERATES_VIDEO[@]}"; do
if [[ ${rate} -le ${highest_videorate} ]]; then
## supported; assume all lower rates are supported too
rates+=("${rate}")
fi
done
## sort and retrun trhe newline separated sample rates
sort -u -n <(printf "%s\n" "${rates[@]}")
}
function check_samplerate() {
## use aplay to check if the specified alsa interface ($1)
## supports encoding format $2 and sample rate $3
## returns a string with the supported sample rate or nothing
alsa_if_hwaddress="$1"
format="$2"
samplerate="$3"
declare -a aplay_args_early
aplay_args_early+=(--device="${alsa_if_hwaddress}")
aplay_args_early+=(--format="${format}")
aplay_args_early+=(--channels="2")
aplay_args_early+=(--nonblock)
declare -a aplay_args_late
## set up regular expressions to match aplay's output errors
## unused
# shellcheck disable=SC2034
rate_notaccurate_re=".*Warning:.*not[[:space:]]accurate[[:space:]]\(requested[[:space:]]=[[:space:]]([0-9]+)Hz,[[:space:]]got[[:space:]]=[[:space:]]([0-9]+)Hz\).*"
# shellcheck disable=SC2034
badspeed_re=".*bad[[:space:]]speed[[:space:]]value.*"
# shellcheck disable=SC2034
sampleformat_nonavailable_re=".*Sample[[:space:]]format[[:space:]]non[[:space:]]available.*"
# shellcheck disable=SC2034
wrongformat_re=".*wrong[[:space:]]extended[[:space:]]format.*"
## used
default_re=".*Playing[[:space:]]raw[[:space:]]data.*"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "testing rate ${samplerate}"
unset aplay_args_late
## set fixed sample rate
aplay_args_late+=(--rate="${samplerate}")
## generate aplay error using random noise to check whether sample
## rate is supported for this interface and format
# shellcheck disable=SC2145
printf -v aplay_args "%s " "${aplay_args_early[@]} ${aplay_args_late[@]}"
read -r firstline<<<"$(return_reversed_aplay_error "${aplay_args}")" || return 1
if [[ "${firstline}" =~ ${default_re} ]]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "success"
printf "%s" "${samplerate}"
else
return 1
fi
}
function return_reversed_aplay_error() {
## force aplay to output error message containing supported
## encoding formats, by playing PSEUDO_AUDIO in a non-existing
## format.
## returns the output of aplay while reversing its return code
aplay_args="$1"
cmd_aplay="${CMD_APLAY} ${aplay_args}"
LANG=C ${cmd_aplay} 2>&1 <<< "${PSEUDO_SILENT_AUDIO}" || \
( [[ ${DEBUG} ]] && \
debug "${LINENO}" "\`${cmd_aplay}' returned error (which is good)."
return 0 ) && \
( [[ ${DEBUG} ]] && \
debug "${LINENO}" "\`${cmd_aplay}' returned error (which is not good)."
return 1 )
}
function return_nonuac_formats() {
## use aplay to determine supported formats of non-uac interface (hw:$1,$2)
alsa_dev_nr="$1"
alsa_if_nr="$2"
aplay_args=(--device=hw:${alsa_dev_nr},${alsa_if_nr})
aplay_args+=(--channels=2)
aplay_args+=(--format=MPEG)
aplay_args+=(--nonblock)
printf -v str_args "%s " "${aplay_args[@]}"
return_reversed_aplay_error "${str_args}" || \
return 1
}
function return_uac_formats_rates() {
## get encodings formats with samplerates for uac type interface
## using its streamfile $1 (which saves calls to applay).
## returns newline separated list (FORMAT:RATE,RATE,...).
alsa_if_streamfile="$1"
interface_re="^[[:space:]]*Interface[[:space:]]([0-9])"
format_re="^[[:space:]]*Format:[[:space:]](.*)"
rates_re="^[[:space:]]*Rates:[[:space:]](.*)"
capture_re="^Capture:"
inside_interface=
format_found=
declare -A uac_formats_rates
## iterate lines in the streamfile
while read -r line; do
if [[ "${line}" =~ ${capture_re} ]]; then
## end of playback interfaces
break
else
## we're not dealing with a capture interface
if [[ "${line}" =~ ${interface_re} ]]; then
## new interface found
inside_interface=true
## reset (previous) format_found
format_found=
## continue with next line
else
## continuation of interface
if [[ "${inside_interface}x" != "x" ]]; then
## parse lines below `Interface:`
if [[ "${format_found}x" == "x" ]]; then
## check for new `Format:`
if [[ "${line}" =~ ${format_re} ]]; then
## new format found
format_found="${BASH_REMATCH[1]}"
uac_formats_rates[${format_found}]=""
[[ ${DEBUG} ]] && \
debug "${LINENO}" "format found: \`${format_found}'"
## next: sample rates or new interface
fi
else
## parse lines below `Format:`
if [[ "${line}" =~ ${rates_re} ]]; then
## sample rates for interface/format found;
## return and reset both
uac_formats_rates[${format_found}]="${BASH_REMATCH[1]}"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "(format=${format_found}) \
rates=${BASH_REMATCH[1]}"
format_found=
inside_interface=
continue
fi
fi
fi
fi
fi
done<"${alsa_if_streamfile}"
for format in "${!uac_formats_rates[@]}"; do
printf "%s:%s\n" \
"${format}" "${uac_formats_rates[${format}]// /}"
done
}
function return_alsa_formats() {
## fetch and return a comma separated string of playback formats
## for the interface specified in $1, of type $2. For non-uac
## interfaces: feed dummy input to aplay (--format=MPEG). For uac
## types: filter it directly from its stream file $3.
alsa_dev_nr="$1"
alsa_if_nr="$2"
alsa_if_type="$3"
alsa_if_streamfile="$4"
alsa_if_chardev="$5"
format="${format:-}"
rawformat="${rawformat:-}"
parent_pid=
parent_cmd=
declare -A uac_formats
if [[ "${alsa_if_type}" = "uo" ]]; then
## uac type; use streamfile to get encoding formats and/or
## samplerates (in the form of 'FORMAT: RATE RATE ...').
while read -r line; do
key="${line%:*}"
value="${line//${key}:/}"
uac_formats["${key}"]="${value}"
done< <(return_uac_formats_rates "${alsa_if_streamfile}")
## return the formatted line(s)
if [[ "${OPT_SAMPLERATES}x" == "x" ]]; then
## print comma separated list of formats
# shellcheck disable=SC2068
printf -v str_formats "%s, " "${!uac_formats[@]}"
printf "%-20s" "${str_formats%*, }"
else
## for each format, print "FORMAT1:rate1,rate2,..."
# shellcheck disable=SC2068
for key in ${!uac_formats[@]}; do
printf "%s:%s\n" "${key}" "${uac_formats[${key}]}"
done
fi
else
## non-uac type: if interface is not locked, use aplay to
## determine formats
## because of invalid file format, aplay is forced to return
## supported formats (=200 times faster than --dump-hw-params)
declare -a rawformats
format_re="^-[[:space:]]+([[:alnum:]_]*)$"
res="$(get_locking_process "${alsa_dev_nr}" "${alsa_if_nr}")"
# shellcheck disable=SC2181
if [[ $? -ne 0 ]]; then
## device is not locked, iterate aplay output
[[ ${DEBUG} ]] && \
debug "${LINENO}" "device is not locked; will iterate aplay_out"
while read -r line; do
if [[ "${line}" =~ ${format_re} ]]; then
rawformats+=(${BASH_REMATCH[1]})
fi
done< <(return_nonuac_formats "${alsa_dev_nr}" "${alsa_if_nr}") || return 1
## formats (and minimum/maximum sample rates) gathered, check if
## all sample rates should be checked
[[ ${DEBUG} ]] && debug "${LINENO}" "$(declare -p rawformats)"
if [[ "${OPT_SAMPLERATES}x" == "x" ]]; then
## just return the comma separated format(s)
printf -v str_formats "%s, " "${rawformats[@]}"
printf "%-20s" "${str_formats%*, }"
else
## check all sample rates for each format. warning:
## slowness ahead for non-uac interfaces, because of
## an aplay call for each unsupported sample rate + 1
## and each format
for rawformat in "${rawformats[@]}"; do
sorted_rates=""
while read -r line; do
sorted_rates+="${line},"
#printf -v str_rates "%s " "${line}"
done< <(ret_supported_alsa_samplerates \
"${alsa_if_hwaddress}" "${rawformat}")
## return each format newline separated with a space
## separated list of supported sample rates
printf "%s:%s\n" "${rawformat}" "${sorted_rates%*,}"
done
fi
else
## in use by another process
## res contains pid,cmd of locking process
locking_pid="${res%,*}"
locking_cmd="${res#*,}"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "\
device is in use by command ${locking_cmd} with process id ${locking_pid}."
## return the error instead of the formats
printf "by command \`%s' with PID %s." \
"${locking_cmd}" "${locking_pid}"
return 1
fi
fi
}
function return_alsa_uac_ep() {
## returns the usb audio class endpoint as a fixed number.
## needs path to stream file as single argument ($1)
## based on ./sound/usb/proc.c:
## printf " Endpoint: %d %s (%s)\n",
## 1: fp->endpoint & USB_ENDPOINT_NUMBER_MASK (0x0f) > [0-9]
## TODO: unsure which range this is; have seen 1, 3 and 5
## 2: USB_DIR_IN: "IN|OUT",
## 3: USB_ENDPOINT_SYNCTYPE: "NONE|ASYNC|ADAPTIVE|SYNC"
alsa_if_streamfile_path="$1"
ep_mode=""
ep_label_filter="Endpoint:"
ep_label_regexp="^[[:space:]]*${ep_label_filter}"
ep_num_filter="([0-9]+)" #1
ep_num_regexp="[[:space:]]${ep_num_filter}"
ep_direction_filter="OUT"
ep_direction_regexp="[[:space:]]${ep_direction_filter}"
ep_synctype_filter="(${UO_EP_NONE_FILTER}|${UO_EP_ADAPT_FILTER}|${UO_EP_ASYNC_FILTER}|${UO_EP_SYNC_FILTER})" #2
ep_synctype_regexp="[[:space:]]\(${ep_synctype_filter}\)$"
ep_regexp="${ep_label_regexp}${ep_num_regexp}${ep_direction_regexp}${ep_synctype_regexp}"
## iterate the contents of the streamfile
while read -r line; do
if [[ "${line}" =~ ${ep_regexp} ]]; then
ep_mode="${BASH_REMATCH[2]}"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "matching endpoint found in line \`${line}': \`${ep_mode}'."
break
fi
done<"${alsa_if_streamfile_path}"
if [[ "${ep_mode}x" == "x" ]]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "no matching endpoints found. ${MSG_ERROR_UNEXPECTED}"
return 1
else
## return the filtered endpoint type
printf "%s" "${ep_mode}"
fi
}
### command line parsing
function analyze_opt_limit() {
## check if the argument for the `-l' (limit) option is proper
option="$1"
opt_limit="${2-}"
declare -a args
prev_opt=0
declare msg
case ${opt_limit} in
a|analog)
OPT_LIMIT_AO="True"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "OPT_LIMIT_AO set to \`${OPT_LIMIT_AO}'"
return 0
;;
u|usb|uac)
OPT_LIMIT_UO="True"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "OPT_LIMIT_UO set to \`${OPT_LIMIT_UO}'"
return 0
;;
d|digital)
OPT_LIMIT_DO="True"
[[ ${DEBUG} ]] && \
debug "${LINENO}" "OPT_LIMIT_DO set to \`${OPT_LIMIT_DO}'"
return 0
;;
*)
## construct list of option pairs: "x (or 'long option')"
for arg_index in "${!OPT_LIMIT_ARGS[@]}"; do
if [[ $(( arg_index % 2)) -eq 0 ]]; then
## even (short option): new array item
args+=("")
else
## odd (long option): add value to previous array item
prev_opt=$(( arg_index - 1 ))
args[-1]="${OPT_LIMIT_ARGS[${prev_opt}]} (or '${OPT_LIMIT_ARGS[${arg_index}]}')"
fi
done
args_val=$(printf "%s, " "${args[@]}")
# shellcheck disable=SC2059
msg_vals="$(printf " ${args_val%*, }\n")"
msg_custom="maybe you could try to use the custom filter option, eg:"
msg_trail="for limit option \`${option}' specified. should be one of:\n"
if [[ ! -z ${opt_limit} ]]; then
str_re=""
for (( i=0; i<${#opt_limit}; i++ )); do
char="${opt_limit:$i:1}"
str_re+="[${char^^}${char,,}]"
done
msg="invalid value \`${opt_limit}' "
# shellcheck disable=SC2059
msg+="$(printf "${msg_trail}${msg_vals}\n${msg_custom}")"
## display instructions to use the custom filter
msg+="$(printf "\n bash $0 -c \"%s\"\n" "${str_re}")"
else
# shellcheck disable=SC2059
msg="$(printf "no value for ${msg_trail}${msg_vals}")"
fi
## display the option pairs, stripping the trailing comma
printf "%s\n" "${msg}" 1>&2;
exit 1
esac
}
function display_usageinfo() {
## display syntax and exit
msg=$(cat <<EOF
Usage:
${APP_NAME_AC} [ -l a|d|u ] [ -c <filter> ] [-a <hwaddress>] [-s] [ -q ]
Displays a list of each alsa audio output interface with its details
including its alsa hardware address (\`hw:x,y').
The list may be filtered by using the limit option \`-l' with an
argument to only show interfaces that fit the limit. In addition, a
custom filter may be specified as an argument for the \`c' option.
The \`-q (quiet)' and \`-a (address)' options are meant for usage in
other scripts. The script returns 0 on success or 1 in case of no
matches or other errors.
-l TYPEFILTER, --limit TYPEFILTER
Limit the interfaces to TYPEFILTER. Can be one of
\`a' (or \`analog'), \`d' (or \`digital'), \`u'
(or \`usb'), the latter for USB Audio Class (UAC1
or UAC2) devices.
-c REGEXP, --customlimit REGEXP
Limit the available interfaces further to match
\`REGEXP'.
-a HWADDRESS, --address HWADDRESS
Limit the returned interface further to the one
specified with HWADDRESS, eg. \`hw:0,1'
-s, --samplerates Adds a listing of the supported sample rates for
each format an interface supports.
CAUTION: Besides being slow this option
PLAYS NOISE ON EACH OUTPUT!
-q, --quiet Surpress listing each interface with its details,
ie. only store the details of each card in the
appropriate arrays.
-h, --help Show this help message
Version ${APP_VERSION}. For more information see:
${APP_INFO_URL}
EOF
)
printf "%s\n" "${msg}" 1>&2;
}
function analyze_command_line() {
## parse command line arguments using the `manual loop` method
## described in http://mywiki.wooledge.org/BashFAQ/035.
while :; do
case "${1:-}" in
-l|--limit)
if [ -n "${2:-}" ]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "$(printf "option \`%s' set to \`%s'.\n" "$1" "$2")"
analyze_opt_limit "$1" "$2"
shift 2
continue
else
analyze_opt_limit "$1"
exit 1
fi
;;
-c|--customfilter)
if [ -n "${2:-}" ]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "$(printf "option \`%s' set to \`%s'.\n" "$1" "$2")"
OPT_CUSTOMFILTER="${2}"
shift 2
continue
else
printf "ERROR: option \`%s' requires a non-empty argument.\n" "$1" 1>&2
exit 1
fi
;;
-a|--address)
if [ -n "${2:-}" ]; then
[[ ${DEBUG} ]] && \
debug "${LINENO}" "option \`$1' set to \`$2'"
OPT_HWFILTER="$2"
shift 2
continue
else
printf "ERROR: option \`%s' requires a alsa hardware address \
as an argument (eg \`hw:x,y')\n" "$1" 1>&2
exit 1
fi
;;
-s|--samplerates)
## deprecated
[[ ${DEBUG} ]] && \
debug "${LINENO}" "option \`$1' set"
OPT_SAMPLERATES=true
shift
continue
;;
-q|--quiet|--silent)
[[ ${DEBUG} ]] && \
debug "${LINENO}" "option \`$1' set"
OPT_QUIET=true
shift
continue
;;
-j|--json)
OPT_JSON=true
shift
continue
;;
-h|-\?|--help)
display_usageinfo
exit
;;
--)
shift
break
;;
-?*)
printf "Notice: unknown option \`%s' ignored\n\n." "$1" 1>&2
display_usageinfo
exit
;;
*)
break
esac
done
}
function return_alsa_interface() {
## main function; see display_usageinfo()
profile_file=
## start profiling
if [[ ${PROFILE} ]]; then
profile_file="/tmp/alsa-capabilities.$$.log"
PS4='+ $(date "+%s.%N")\011 '
exec 3>&2 2>${profile_file}
set -x
fi
## check if needed commands are available
CMD_APLAY="$(type -p aplay)" || \
command_not_found "aplay" "alsa-utils"
# shellcheck disable=SC2181
if [[ $? -ne 0 ]]; then
die "The script cannot continue without aplay."
else
[[ ${DEBUG} ]] && \
debug "${LINENO}" "Using \`${CMD_APLAY}' as aplay command."
fi
## parse command line arguments
analyze_command_line "$@"
## create a list of alsa audio output interfaces and parse it.
fetch_alsa_outputinterfaces
## exit with error if no matching output line was found
if [[ ${#ALSA_AIF_HWADDRESSES[@]} -eq 0 ]]; then
msg="\n${MSG_MATCH_IF_NONE_UNLIMITED}"
## display information about the number of interfaces before filtering
if [[ ${NR_AIFS_BEFOREFILTERING} -ne 0 ]]; then
# shellcheck disable=SC2059
printf -v msg "${msg}\n${MSG_MATCH_IF_NONE_LIMITED}" \
"${NR_AIFS_BEFOREFILTERING}"
printf 1>&2 "%s\n" "${msg}"
fi
fi
[[ ${DEBUG} ]] && \
debug "${LINENO}" "Number of audio interfaces after filtering: \
${#ALSA_AIF_HWADDRESSES[@]}"
if [[ ${PROFILE} ]]; then
## end profiling
set +x
exec 2>&3 3>&-
debug "${LINENO}" "Profiling information stored in: ${profile_file}"
fi
## return success if interfaces are found
return 0
}
### global variables
## indexed arrays to store the details of interfaces of one would
## declare such an array in another script, that array would be filled
## instead of these. See examples/bash-example.sh for usage.
set +u
[[ "${ALSA_AIF_HWADDRESSES[*]}x" == "x" ]] && declare -a ALSA_AIF_HWADDRESSES=()
[[ "${ALSA_AIF_DISPLAYTITLES[*]}x" == "x" ]] && declare -a ALSA_AIF_DISPLAYTITLES=()
[[ "${ALSA_AIF_MONITORFILES[*]}x" == "x" ]] && declare -a ALSA_AIF_MONITORFILES=()
[[ "${ALSA_AIF_DEVLABELS[*]}x" == "x" ]] && declare -a ALSA_AIF_DEVLABELS=()
[[ "${ALSA_AIF_LABELS[*]}" == "x" ]] && declare -a ALSA_AIF_LABELS=()
[[ "${ALSA_AIF_UACCLASSES[*]}x" == "x" ]] && declare -a ALSA_AIF_UACCLASSES=()
[[ "${ALSA_AIF_FORMATS[*]}x" == "x" ]] && declare -a ALSA_AIF_FORMATS=()
[[ "${ALSA_AIF_CHARDEVS[*]}x" == "x" ]] && declare -a ALSA_AIF_CHARDEVS=()
set -u
## counter for unfiltered interfaces
NR_AIFS_BEFOREFILTERING=0
NR_AIFS_AFTERFILTERING=0
## static filter for digital interfaces
DO_FILTER_LIST="$(cat <<EOF
adat
aes
ebu
digital
dsd
hdmi
i2s
iec958
spdif
s/pdif
toslink
uac
usb
EOF
)"
## construct static list of sample rates
## based on ground clock frequencies of
## - video standard: 24.576 (mHz) * 1000000 / 512 = 48000Hz
## - audio standard: 22.5792 (mHz) * 1000000 / 512 = 44100Hz
base_fs_video=$(( 24576000 / 512 ))
base_fs_audio=$(( 22579200 / 512 ))
## initialize audio rates with fs*1 (cd)
declare -a SAMPLERATES_AUDIO
#=(${base_fs_audio})
## initalize video rates with base * 2/3 (which seems common)
declare -a SAMPLERATES_VIDEO
#=($(( base_fs_video * 2 / 3 )) ${base_fs_video})
## max multiplier: fs*n
max_fs_n=8
n=${max_fs_n};
while [[ ${n} -ge 1 ]]; do
video_rate=$(( base_fs_video * n ))
SAMPLERATES_VIDEO+=(${video_rate})
audio_rate=$(( base_fs_audio * n ))
SAMPLERATES_AUDIO+=(${audio_rate})
n=$(( n / 2 ))
done
## pseudo audio data to generate (silent) noise
PSEUDO_SILENT_AUDIO="00000000000000000000000000000000000000000000"
declare -a DO_INTERFACE_FILTER=($(printf -- '%s' "${DO_FILTER_LIST// /" "}"))
## construction for displayed output
UAC="USB Audio Class"
ALSA_IF_LABEL="alsa audio output interface"
declare -A ALSA_IF_LABELS=()
ALSA_IF_LABELS+=(["ao"]="Analog ${ALSA_IF_LABEL}")
ALSA_IF_LABELS+=(["do"]="Digital ${ALSA_IF_LABEL}")
ALSA_IF_LABELS+=(["uo"]="${UAC} ${ALSA_IF_LABELS[do]}")
## USB_SYNC_TYPEs
## strings alsa uses for UAC endpoint descriptors.
## one of *sync_types "NONE", "ASYNC", "ADAPTIVE" or "SYNC" according
## to ./sound/usb/proc.c
UO_EP_NONE_FILTER="NONE"
UO_EP_ADAPT_FILTER="ADAPTIVE"
UO_EP_ASYNC_FILTER="ASYNC"
UO_EP_SYNC_FILTER="SYNC"
## labels for UAC classes.
UO_EP_NONE_LABEL="0 - none"
UO_EP_ADAPT_LABEL="1 - isochronous adaptive"
UO_EP_ASYNC_LABEL="2 - isochronous asynchronous"
UO_EP_SYNC_LABEL="3 - sync (?)"
## declarative array holding the available UAC classes with
## description
declare -A UO_EP_LABELS=( ["${UO_EP_NONE_FILTER}"]="${UO_EP_NONE_LABEL}"
["${UO_EP_ADAPT_FILTER}"]="${UO_EP_ADAPT_LABEL}"
["${UO_EP_ASYNC_FILTER}"]="${UO_EP_ASYNC_LABEL}"
["${UO_EP_SYNC_FILTER}"]="${UO_EP_SYNC_LABEL}" )
## system messages
MSG_PROP_NOTAPPLICABLE="(n/a)"
MSG_ERROR_GETTINGFORMATS="can't detect formats or rates because device is in use"
MSG_ERROR_NOFILE="is not a file or is not accessible."
MSG_ERROR_UNEXPECTED="THIS SHOULD NOT HAPPEN."
MSG_APLAY_ERROR_NOSOUNDCARDS="aplay did not find any soundcard."
MSG_APLAY_ERROR_GENERAL="aplay reported the following error:\n\`%s'"
MSG_APLAY_USINGTESTFILE="NOTICE: using fake aplay output stored in TESTFILE: \`%s'."
MSG_APLAY_ERROR_NOSUCHTESTFILE="Specified TESTFILE \'%s' does not exist."
MSG_APLAY_ERROR_OPENINGTESTFILE="Error opening TESTFILE \'%s'."
MSG_MATCH_IF_NONE_UNLIMITED=" * No ${ALSA_IF_LABEL}s found."
MSG_MATCH_IF_NONE_LIMITED=" * From the %s available ${ALSA_IF_LABEL}s, \
none matched your filter."
MSG_ERROR_CHARDEV_NOFORMATS="can't determine: character device error"
MSG_ERROR_NOT_CHARDEV="error: is not a character device or not accessible"
## construct a list with the properties of the current
## interface if `OPT_QUIET' is not set
MSG_ALSA_DEVNAME="device name"
MSG_ALSA_IFNAME="interface name"
MSG_ALSA_UACCLASS="usb audio class"
MSG_ALSA_CHARDEV="character device"
MSG_ALSA_ENCODINGFORMATS="encoding formats"
MSG_ALSA_MONITORFILE="monitor file"
MSG_ALSA_STREAMFILE="stream file"
## command line options
## input parameters for the limit option
## should be consequtive pairs of '"x" "long option"'
declare -a OPT_LIMIT_ARGS=("a" "analog" "d" "digital" "u" "usb")
## also see analyze_command_line
OPT_LIMIT_AO=${OPT_LIMIT_AO:-}
OPT_LIMIT_DO=${OPT_LIMIT_DO:-}
OPT_LIMIT_UO=${OPT_LIMIT_UO:-}
OPT_QUIET=${OPT_QUIET:-}
OPT_JSON=${OPT_JSON:-}
OPT_CUSTOMFILTER=${OPT_CUSTOMFILTER:-}
OPT_HWFILTER=${OPT_HWFILTER:-}
OPT_SAMPLERATES=${OPT_SAMPLERATES:-}
## if the script is not sourced by another script but run within its
## own shell call function `return_alsa_interface'
[[ "${BASH_SOURCE[0]:-}" != "${0}" ]] || \
return_alsa_interface "$@"
| true
|
9acffff25e009b1493cd83281b597db39bc99424
|
Shell
|
elfraera/doit
|
/assets/scripts/.tmp/06-fix-includes.sh
|
UTF-8
| 893
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
# REV04: Tue Jan 7 10:11:48 WIB 2020
# REV03: Tue Jul 2 12:44:23 WIB 2019
# REV02: Mon Jul 1 02:07:00 WIB 2019
# REV01: Sun Jun 30 17:54:17 WIB 2019
# START: Thu Jun 27 19:27:34 WIB 2019
#
# Copyright (C) 2019 Rahmat M. Samik-Ibrahim
# http://RahmatM.Samik-Ibrahim.vLSM.org/
# This program is free script/software. This program is distributed in the hope
# that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# GEMS="$HOME/gems/gems/"
GEMS="$HOME/.rbenv/versions/2.6.5/lib/ruby/gems/2.6.0/gems"
echo "===== ===== PULL"
cd $HOME/.rbenv
git pull
rbenv rehash
[ -d $GEMS ] || {
echo "No $GEMS....."
exit 1
}
cd $GEMS
echo "===== ===== _includes"
for ii in jekyll-theme-*
do
[ -d $ii/_includes ] || mkdir $ii/_includes
done
exit 0
# gem install rails
# rbenv uninstall 2.1.3
# rbenv root
| true
|
c0b420e93442b2ed8f61f3e0163b6041bbf47c24
|
Shell
|
u10v8/teslamodel3
|
/simpleifelse.sh
|
UTF-8
| 93
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
echo "number"
read x
if [ $x -eq 5 ]
then
echo "five"
else
echo " not five"
fi
| true
|
d6b9d628b33119c05d81f6e74425bdb2f7f037a5
|
Shell
|
bitc/hdevtools
|
/tests/test_sample_error.sh
|
UTF-8
| 255
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
SOCK=`mktemp -u`
EXPECTED_ERRORS='SampleError.hs:9:5: Not in scope: `foo'\'''
if ERRORS=`$HDEVTOOLS check --socket=$SOCK SampleError.hs`
then
false
elsh
[ "$ERRORS" = "$EXPECTED_ERRORS" ]
fi
$HDEVTOOLS --socket=$SOCK --stop-server
| true
|
af94a292b5324deec0f3dfd6372eeae4f5d90d76
|
Shell
|
JakeWharton/dodo
|
/root/app/sync.sh
|
UTF-8
| 371
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/with-contenv sh
if [ -n "$HEALTHCHECK_ID" ]; then
curl -sS -X POST -o /dev/null "$HEALTHCHECK_HOST/$HEALTHCHECK_ID/start"
fi
# If the sync fails we want to avoid triggering the health check.
set -e
curl -sS -X POST --fail http://localhost/sync
if [ -n "$HEALTHCHECK_ID" ]; then
curl -sS -X POST -o /dev/null --fail "$HEALTHCHECK_HOST/$HEALTHCHECK_ID"
fi
| true
|
719079ff6002b052bcbded0bb3a439ff58aa22e1
|
Shell
|
Ortega-Dan/linuxBashScripting
|
/bash/switchCase.sh
|
UTF-8
| 152
| 2.78125
| 3
|
[] |
no_license
|
read char
case $char in
y)
echo YES
;;
Y)
echo YES
;;
n)
echo NO
;;
N)
echo NO
;;
esac
| true
|
8779eaa9917681f3e244ead3021d242f31ffb2d0
|
Shell
|
kvrmuruganandam/Devops
|
/aws/Instance.sh
|
UTF-8
| 1,442
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
#Listing the instances
list_instance(){
T=$(aws ec2 describe-instances --query 'Reservations[*].Instances[*].[Tags[].Value,InstanceId]'|tr '"[],' " "|tr "\t" " ")
echo $T|tr " " "\n"|awk ' {print;} NR % 2 == 0 { print ""; }'| awk ' {print;} NR % 3 == 0 { print "******************"; }'
}
#Create New Instane
create_instance(){
read -p "Entrer Image Id: " Id
read -p "Enter Instance Type: " Type
read -p "Enter Count: " Count
read -p "Enter KeyName: " key
aws ec2 run-instances --image-id $Id --count $Count --instance-type $Type --key-name $key
}
#Start Instances
start_instance(){
read -p "Enter Instance Id: " Id
aws ec2 start-instances --instance-ids $Id
}
#stop Instances
stop_instance(){
read -p "Enter Instance Id: " Id
aws ec2 stop-instances --instance-ids $Id
}
#Terminate Instances
terminate_instance(){
read -p "Enter Instance Id: " Id
aws ec2 terminate-instances --instance-ids $Id
}
#Main Page
echo " Welcome To AWS EC2 Management"
echo " *************************"
echo "1.list_instance"
echo "2.create_instance"
echo "3.start_instance"
echo "4.stop_instance"
echo "5.terminate_instance"
read -p "Choose:" Input
echo " "
echo "************"
#Options
case "$Input" in
"1") list_instance
;;
"2") create_instance
;;
"3") start_instance
;;
"4") stop_instance
;;
"5") terminate_instance
;;
"6") echo "success"
;;
esac
| true
|
cecc2ec81c617c98a4003df508b10af66ce0b4f3
|
Shell
|
Laged/archthings
|
/home/user/.zshrc
|
UTF-8
| 568
| 2.625
| 3
|
[] |
no_license
|
# env vars
export EDITOR=/usr/bin/vim
# theme
ZSH_THEME="arrow"
# load zgen
source "${HOME}/.zgen/zgen.zsh"
# if the init script doesn't exist
if ! zgen saved; then
# specify plugins here
zgen oh-my-zsh
zgen oh-my-zsh plugins/git
zgen oh-my-zsh plugins/yarn
zgen oh-my-zsh plugins/npm
zgen oh-my-zsh plugins/pip
zgen oh-my-zsh plugins/command-not-found
zgen oh-my-zsh plugins/sudo
zgen oh-my-zsh plugins/clipboard
zgen oh-my-zsh themes/arrow
# generate the init script from plugins above
zgen save
fi
#
#xset -dpms s off
source ~/.aliases
| true
|
15d26d5ba65dec5df1b5ef4482a0c4e80369395b
|
Shell
|
mwarzynski/uw_operating_systems
|
/filesystem/scripts/run.sh
|
UTF-8
| 485
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
BUILD=""
if [ "$#" -eq 1 ] && [ $1 = "kernel" ]; then
BUILD="kernel"
fi
cp src/read.c sources/new_minix/fs/mfs/
cd sources
diff -rupN minix new_minix > ../mw371854.patch
cd ..
# Revert MINIX image to last clean one.
cp snapshots/minix.img minix.img
# Run MINIX.
./scripts/qemu.sh > /dev/null 2> /dev/null &
# Wait for MINIX's sshd daemon.
sleep 5
# Copy MFS files to MINIX.
scp mw371854.patch minix:~/
scp fs.sh minix:~/
ssh minix << ENDSSH
sh fs.sh $BUILD
ENDSSH
| true
|
202c3b033813c4fd343538022c285dfad600bc92
|
Shell
|
huynmela/hpc-apps-gcp
|
/gromacs/install.sh
|
UTF-8
| 1,650
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
#
# Maintainers : @schoonovernumerics
#
# //////////////////////////////////////////////////////////////// #
# Update MOTD
cat > /etc/motd << EOL
=======================================================================
Gromacs-GCP VM Image
Copyright 2021 Fluid Numerics LLC
=======================================================================
Open source implementations of this solution can be found at
https://github.com/FluidNumerics/hpc-apps-gcp
This solution contains free and open-source software
All applications installed can be listed using
spack find
You can obtain the source code and licenses for any
installed application using the following command :
ls \$(spack location -i pkg)/share/pkg/src
replacing "pkg" with the name of the package.
=======================================================================
To get started, check out the included docs
cat ${INSTALL_ROOT}/share/doc
EOL
# Install benchmarks
mkdir -p ${INSTALL_ROOT}/share/gromacs
wget https://www.mpibpc.mpg.de/15101317/benchMEM.zip -P /tmp
wget https://www.mpibpc.mpg.de/15615646/benchPEP.zip -P /tmp
wget https://www.mpibpc.mpg.de/17600708/benchPEP-h.zip -P /tmp
wget https://www.mpibpc.mpg.de/15101328/benchRIB.zip -P /tmp
unzip /tmp/benchMEM.zip -d ${INSTALL_ROOT}/share/gromacs
unzip /tmp/benchPEP.zip -d ${INSTALL_ROOT}/share/gromacs
unzip /tmp/benchPEP-h.zip -d ${INSTALL_ROOT}/share/gromacs
unzip /tmp/benchRIB.zip -d ${INSTALL_ROOT}/share/gromacs
# Ensure that input deck permissions are readable by all
chmod 755 ${INSTALL_ROOT}/share/gromacs
chmod 644 ${INSTALL_ROOT}/share/gromacs/*
| true
|
c04a214c74c4dbba441adfbc94732904a98851f4
|
Shell
|
hhalex/conceptopedia
|
/env/buildhbase.sh
|
UTF-8
| 2,938
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Permet d'initialiser Hadoop et HBase
source ~/.bashrc
# Permet d'utiliser des alias en non shell interactif
shopt -s expand_aliases
# Raccourcis
CONCEPTOPEDIA=~/tests_java/Conceptopedia.jar
WIKIPEDIA_FOLDER=~/wikipedia
alias Conceptopedia0="java -jar $CONCEPTOPEDIA --mode 0"
alias Conceptopedia1="java -jar $CONCEPTOPEDIA --mode 1"
alias Conceptopedia2="java -jar $CONCEPTOPEDIA --mode 2"
alias Conceptopedia3="java -jar $CONCEPTOPEDIA --mode 3"
alias Conceptopedia4="hadoop jar $CONCEPTOPEDIA --mode 4"
# Variable Lang
WIKIPEDIA_LANGS=(af az ar fr en it es de)
#Réinitialisation du fichier qui contient des informations sur l'avancée du script
echo "" > ~/temoin_buildhbase
# On supprime les snapshots précédents s'ils existent
echo "delete_snapshot 'intralang-xml'" | hbase shell
echo "delete_snapshot 'intralang_matching_ids-xml'" | hbase shell
echo "delete_snapshot 'intralang-sql'" | hbase shell
echo "delete_snapshot 'intralang-creator'" | hbase shell
echo "delete_snapshot 'conceptcreator-creator'" | hbase shell
# Construction du Data Model (tables et familles de colonnes dans HBase)
echo "Construction du data model" >> temoin_buildhbase
echo $(date) >> temoin_buildhbase
Conceptopedia0 >> temoin_buildhbase
# Chargement des données XML (Pages avec liens vers les voisins dans la même langue)
for l in "${WIKIPEDIA_LANGS[@]}"
do
echo $l " xml start : " $(date) >> temoin_buildhbase
Conceptopedia1 --file ${WIKIPEDIA_FOLDER}/${l}wiki-latest-pages-articles.xml
done
echo "fin xml : " $(date) >> temoin_buildhbase
# Sauvegarde des tables à cette étape
echo "Snapshot de la base: seulement les fichiers xml" >> temoin_buildhbase
echo "snapshot 'intralang', 'intralang-xml'" | hbase shell
echo "snapshot 'intralang_matching_ids', 'intralang_matching_ids-xml'" | hbase shell
# Ajout des crosslinks
for l in "${WIKIPEDIA_LANGS[@]}"
do
echo $l " sql start : " $(date) >> temoin_buildhbase
Conceptopedia2 --file ${WIKIPEDIA_FOLDER}/${l}wiki-latest-langlinks.sql
done
echo "fin sql : " $(date) >> temoin_buildhbase
# Sauvegarde des tables
echo "Snapshot de la base: seulement les fichiers sql" >> temoin_buildhbase
echo "snapshot 'intralang', 'intralang-sql'" | hbase shell
echo "snapshot 'conceptcreator', 'conceptcreator-vide'" | hbase shell
echo "list_snapshots" | hbase shell >> temoin_buildhbase
# Création des concepts
echo "Création des concepts (conceptcreator)" >> temoin_buildhbase
Conceptopedia3 >> temoin_buildhbase
# Sauvegarde
echo "Snapshot de la base: après création des concepts" >> temoin_buildhbase
echo "snapshot 'intralang', 'intralang-creator'" | hbase shell
echo "snapshot 'conceptcreator', 'conceptcreator-creator'" | hbase shell
echo "list_snapshots" | hbase shell >> temoin_buildhbase
# Création des concepts
echo "Création du graphe des concepts (conceptgraph)" >> temoin_buildhbase
Conceptopedia4 >> temoin_buildhbase
| true
|
a1399c7aae7178a9800957685dbae85701444fbd
|
Shell
|
messbees/EvolutionServer
|
/install.sh
|
UTF-8
| 306
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
reset
echo "Begin Evolution Cli installation..."
CLIENT="/client.py"
CLIENT_PATH="$PWD$CLIENT"
chmod a+x $CLIENT_PATH
SERVER_PATH="/server.py"
SERVER_PATH="$PWD$TP"
chmod a+x $SERVER_PATH
sudo ln -s $CLIENT_PATH /usr/bin/evo
sudo ln -s $SEVER_PATH /usr/bin/evo-client
echo "Links created."
| true
|
89566f183378aa250b368d256345b7718f202d4a
|
Shell
|
Spagettileg/mysql
|
/createuser.sh
|
UTF-8
| 316
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# Creates a user record for the current Cloud9 user
# Author: Matt Rudge
echo "Creating the ${C9_USER} user in MySQL"
sudo mysql -e "CREATE USER '${C9_USER}'@'%' IDENTIFIED BY '';"
echo "Granting privileges"
sudo mysql -e "GRANT ALL PRIVILEGES ON *.* TO '${C9_USER}'@'%' WITH GRANT OPTION;"
echo "Done"
| true
|
9f1a07efb0bb2aad9e2b9522a4928e32de7bb203
|
Shell
|
estsauver/suricatta
|
/build-docs.sh
|
UTF-8
| 278
| 3.171875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
VERSION="latest"
(cd doc; make)
rm -rf /tmp/index.html
mv doc/index.html /tmp/index.html;
git checkout gh-pages;
rm -rf ./$VERSION
mkdir -p ./$VERSION/
mv -fv /tmp/index.html ./$VERSION/
git add --all ./$VERSION/index.html
git commit -a -m "Update ${VERSION} doc"
| true
|
b467c0faad7af6bf6d55f186fb7d3e43ac52d209
|
Shell
|
Fredrick-Kakembo/Test_Pipeline
|
/bin/kakembo
|
UTF-8
| 2,101
| 3.875
| 4
|
[] |
no_license
|
#! /bin/bash
#This portion is extracted from the eTract pipeline developed by Ivan Gunz
kakembo=$0
### Variables ####
OUTWD=
INDIR=
CPUS=4
REFERENCE=
VERSION="1.0"
kakembo=$0
ARGPARSED0=$0
ALLARGSPARSED=$@
usage() {
cat <<EOF
Thanks for using eTrack $VERSION
Developed by as a mini-project by Group-4 EanBIT Residential Training 2
SYPNOSIS:
An E.coli analysis Pipeline for Profiling it's epidemiology, virulence, phylogeny & resistome
USAGE:
eTrack [options] --input <INPUT> --output <OUTPUT> --reference <REF.fasta>
GENERAL:
-h/--help Display this help and exit
-c/--citation Show citation and exit
-v/--version Print version and exit
MANDATORY OPTIONS:
-i/--input Path to the raw sequencces to be analyzed
-o/--output Name of ouput directory
-r/--reference Path to the reference genome(.fasta, .gb) [default="REF.fasta"]
OTHER OPTIONS:
-t/--threads Number of cores to use <integer> [default=$CPUS]
EOF
}
######### OPTIONS ############
POSITIONAL=()
while [[ $# -gt 0 ]]; do
ARGS="$1"
case $ARGS in
-i | --input)
if [ "$2" ]; then
INDIR=$2
shift 2
fi
;;
-o | --output)
if [ "$2" ]; then
OUTWD=$2
shift 2
fi
;;
-r | --reference)
if [ "$2" ]; then
REFERENCE=$2
shift 2
fi
;;
-t | --threads)
if [ "$2" -eq "$2" ] 2>/dev/null; then
CPUS=$2
shift 2
else
echo -e 'ERROR: "-t/--threads" requires a numeric argument\n'
echo -e "Selected cores: $2\n"
exit 1
fi
;;
-h | --help)
usage
exit 1
;;
-v | --version)
echo "This is eTrack version $VERSION"
exit 1
;;
-c | --citation)
echo -e "If using eTrack, please cite:"
echo -e "https://github.com/GunzIvan28/eTrack"
exit 1
;;
*)
echo -e "\nERROR: unknown option: $1 \n"
usage
exit 1
;;
esac
done
| true
|
7b5616ac12514b70098834a19cc4941db6d5621b
|
Shell
|
bioinf/exome
|
/bqsr.sh
|
UTF-8
| 1,313
| 2.84375
| 3
|
[] |
no_license
|
#1/bin/bash
# Run BQSR for WES data
# Note that the platform must be specified to correct only using the target regions
PLATFORM=$1
# illumina or roche
for BAM in *.realigned.bam
do
while [ $( jobs | wc -l ) ge 12 ]; do sleep 1; done
java -Xmx20g -jar /Molly/barbitoff/software/gatk-protected/target/GenomeAnalysisTK.jar -T BaseRecalibrator -R /Molly/barbitoff/reference/GATK_b37/human_g1k_v37.fasta -I $BAM -knownSites /Molly/barbitoff/gatk-bundle-b37/dbsnp_138.b37.vcf -knownSites /Molly/barbitoff/gatk-bundle-b37/Mills_and_1000G_gold_standard.indels.b37.vcf -L /Molly/barbitoff/reference/${PLATFORM}.intervals -o ${BAM%%.realigned.bam}.recal.table 2> ./gatk_logs/${BAM%%.realigned.bam}.BaseRecalibrator.log &
done
wait
for BAM in *.realigned.bam
do
while [ $( jobs | wc -l ) ge 12 ]; do sleep 1; done
java -Xmx20g -jar /Molly/barbitoff/software/gatk-protected/target/GenomeAnalysisTK.jar -T PrintReads -R /Molly/barbitoff/reference/GATK_b37/human_g1k_v37.fasta -I $BAM -BQSR ${BAM%%.realigned.bam}.recal.table -o ${BAM%%.realigned.bam}.recal.bam 2> ./gatk_logs/${BAM%%.realigned.bam}.PrintReads.log &
done
wait
#mkdir realigned
mkdir gatk_logs/recal_tables
mv *.realigned.* realigned/
mv *.recal.table gatk_logs/recal_tables/
echo 'Recalibration of base qualities done, BAMs are ready for calling!'
| true
|
5be597ebffdc2737951f729eccf3242b047ff878
|
Shell
|
aratik711/wh_nodejs_app
|
/wh_terraform/scripts/Ubuntu16/server_config.sh
|
UTF-8
| 1,285
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
USER=$1
USER_PASS=$2
HOSTNAME=$3
echo $HOSTNAME
echo "Add user compose"
sudo adduser --disabled-password --gecos "" $USER
echo "Set Password for user $USER"
echo "$USER:$USER_PASS" | sudo chpasswd
echo "Add user compose in sudoers file"
sudo sed -i '$a'$USER' ALL=(ALL) ALL' /etc/sudoers
echo "Add passwordless sudo"
sudo sed -i '$a'$USER' ALL=(ALL) NOPASSWD:ALL' /etc/sudoers
echo "update the /etc/hostname file with the new hostname/fqdn"
CURR_HOSTNAME=$(cat /etc/hostname)
sudo sed -i "s/$CURR_HOSTNAME/$HOSTNAME/g" /etc/hostname
echo "Add entry in hosts file"
sudo sed -i '$a127.0.0.1 '$HOSTNAME'' /etc/hosts
echo "Restart login service"
sudo systemctl restart systemd-logind.service
echo "update the running hostname"
sudo hostnamectl set-hostname $HOSTNAME
echo "Install SSH server and client"
sudo apt-get -y install openssh-server openssh-client
echo "Enable user login via password"
sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
echo "Restart ssh service"
sudo service sshd restart
echo "SSH keygen command"
echo y |sudo su $USER -c 'ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -P ""'
echo "Install expect for passwordless ssh"
sudo apt-get -y install expect
echo "Install git"
sudo apt-get -y install git
| true
|
897b0f0cda6069ede4e27bb93502e6b0f0d6187c
|
Shell
|
rupashreeh/VAJRA
|
/tacas2020-benchmarks/viap/run_viap.sh
|
UTF-8
| 285
| 3.0625
| 3
|
[
"MIT",
"BSL-1.0",
"BSD-2-Clause"
] |
permissive
|
rm -rf viap_results
mkdir viap_results
for file in *.c
do
echo "RUNNING VIAP FOR $file"
base=`basename $file .c`
cat $file > temp.c
memlimit -t 100 viap_tool.py --spec=unreach-call.prp temp.c > viap_results/$base.log 2>&1
tail -4 viap_results/$base.log
rm -f temp.c
done
| true
|
244a02f84b9890c0b68bed1101bf42bef18477e3
|
Shell
|
lhaclove/RNA-Seq-pipeline
|
/process.sh
|
UTF-8
| 1,475
| 2.71875
| 3
|
[] |
no_license
|
prefix=$PWD
basename1="_1.clean.fq"
basename2="_2.clean.fq"
export REF439GTF="/public/home/liuhao/ref/V439/Zea_mays.AGPv4.39.chr.gtf"
export REF439HTI="/public/home/liuhao/ref/V439/Zea_mays.AGPv4.hisat2"
export REF439BWI="/public/home/liuhao/ref/V439/Zea_mays.AGPv4.bowtie2"
export REF439FA="/public/home/liuhao/ref/V439/Zea_mays.AGPv4.dna.toplevel.fa"
export REF439IGV="/public/home/liuhao/tool/IGVTools/v439.genome"
if [ ! -d "./report/" ];then
mkdir ./report
fi
if [ ! -d "./align/" ];then
mkdir ./align
fi
if [ ! -d "./ballgown/" ];then
mkdir ./ballgown
fi
while read samplename;
do
fastp -i ${prefix}/raw/${samplename}${basename1} -I ${prefix}/raw/${samplename}${basename2} -o ${prefix}/raw/${samplename}_1.filt.fq -O ${prefix}/raw/${samplename}_2.filt.fq -h ${prefix}/report/${samplename}.html
bowtie2 -p 30 -x /public/home/liuhao/ref/V439/Zea_mays.AGPv4.bowtie2 -1 $prefix/raw/${samplename}.1.filt.fq.gz -2 $prefix/raw/${samplename}.2.filt.fq.gz -S $prefix/align/${samplename}.sam &&
hisat2 -p 60 -t --dta -x $REF439HTI -1 $prefix/raw/${samplename}.1.filt.fq.gz -2 $prefix/raw/${samplename}.2.filt.fq.gz -S $prefix/align/${samplename}.sam &&
samtools sort -@60 $prefix/aligned/${samplename}.sam -o $prefix/aligned/${samplename}_sorted.bam &&
stringtie -e -B -p 60 -G /public/home/liuhao/ref/Zea_mays.AGPv4.39.chr.gtf -o $prefix/ballgown/${samplename}/${samplename}_sorted.gtf -l ${samplename} $prefix/aligned/${samplename}_sorted.bam;
done < sample
| true
|
024785902a05ee013b76e87b1e6c2264bfea60fd
|
Shell
|
0mp/mpds-orange
|
/infrastructure/scripts/cluster-setup-accounts.sh
|
UTF-8
| 1,092
| 3.265625
| 3
|
[] |
no_license
|
#! /bin/sh -
set -eu
set -x
project="mpds-task-orange"
terraform_user="terraform"
terraform_service_account="$terraform_user@$project.iam.gserviceaccount.com"
# Authenticate through gcloud if no gcp service account is used:
if [ "$(gcloud config get-value account)" = "" ]; then
gcloud auth application-default login
fi
# Create Service Account for Terraform
if ! gcloud iam service-accounts list | grep -q "$terraform_service_account"; then
gcloud iam service-accounts create "$terraform_user" \
--description="This service account is used for Terraform" \
--display-name="Terraform"
fi
# Create IAM policy binding
gcloud projects add-iam-policy-binding "$project" \
--member="serviceAccount:${terraform_user}@${project}.iam.gserviceaccount.com" \
--role="roles/owner"
# Add IAM policy binding service account user to user accounts
gcloud iam service-accounts add-iam-policy-binding \
"${terraform_service_account}" \
--member="user:$(gcloud config get-value account)" \
--role="roles/iam.serviceAccountUser"
# Create service account key for Terraform
gcloud iam service-accounts keys create ./key.json \
--iam-account "$terraform_service_account"
exit 0
| true
|
d3131d4cb3c29ec2aba3de69489dedccca50b99e
|
Shell
|
taylorking/serverside
|
/random_csv.sh
|
UTF-8
| 226
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
touch csv1
touch csv2
touch csv3
for i in `ls csv*`
do
for j in `seq 1 10`
do
export LINE=$RANDOM
for k in `seq 1 4`
do
export LINE=$RANDOM,$LINE
done
echo $LINE >> $i
done
done
| true
|
1e5dcc564c2ea36e6316485ee0b58db1391aa27a
|
Shell
|
kristen-schneider/exome-bakeoff
|
/pipeline_metrics/scripts/5-metrics/intersect_worker.sh
|
UTF-8
| 1,470
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
#SBATCH -p short
#SBATCH --job-name=intersect_driver
#SBATCH --ntasks=1
#SBATCH --time=4:00:00
#SBATCH --mem-per-cpu=10G
#SBATCH --output=/Users/krsc0813/exome-bakeoff/bash_scripts/5-metrics/intersect_worker.out
#SBATCH --error=/Users/krsc0813/exome-bakeoff/bash_scripts/5-metrics/intersect_worker.err
module load bedtools
# commandline arguments
# 1. pairs dir
pairs_dir=$1
# 2. tech_sample
tech_sample=$2
# 3. vcf dir
vcf_dir=$3
# 4. output file
out_dir=$4
# 4. output
#out_dir=$4
while IFS= read -r line; do
tech_sample_name=${tech_sample%%.*}
sample_1="$(cut -d',' -f1 <<<$line)"
sample_2="$(cut -d',' -f2 <<<$line)"
out_file=$out_dir$tech_sample_name.txt
#echo "$sample_1" >> $out_dir$tech_sample_name.txt
echo $sample_1 $sample_2 >> $out_file
bedtools intersect -u -a $vcf_dir$sample_1 -b $vcf_dir$sample_2 | wc -l >> $out_file
done < $pairs_dir$tech_sample
#for vcf_file_1 in `ls $vcf_dir`
#do
# if [[ $vcf_file_1 == *$tech_name* ]] && [[ $vcf_file_1 == *.gz ]]; then
# echo "vcf1: " $vcf_file_1 >> $out_file
# for vcf_file_2 in `ls $vcf_dir`
# do
# if [[ $vcf_file_2 == *$tech_name* ]] && [[ $vcf_file_2 == *.gz ]]; then
# echo "vcf2: " $vcf_file_2 >> $out_file
# echo 'stop' >> $out_file
# #bedtools intersect -u -a $vcf_dir$vcf_file_1 -b $vcf_dir$vcf_file_2 | wc -l >> $out_file
# fi
# done
# fi
#done
| true
|
4a078051039898d29ac88448568f74dc67a7ee3d
|
Shell
|
getrostt/openshift-liveoak-diy
|
/.openshift/action_hooks/build
|
UTF-8
| 577
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
# This is a simple build script and will be executed on your CI system if
# available. Otherwise it will execute while your application is stopped
# before the deploy step. This script gets executed directly, so it
# could be python, php, ruby, etc.
set -x
source $OPENSHIFT_REPO_DIR/.openshift/action_hooks/vars.inc
if [ -d $LIVEOAK_BASE ] ; then
echo "Liveoak $LIVEOAK_VERSION has already been downloaded."
else
cd $OPENSHIFT_DEPENDENCIES_DIR
rm -Rf liveoak-dist-*
wget -O $LIVEOAK_DOWNLOAD_FILE $LIVEOAK_DOWNLOAD_URL
tar -xzf $LIVEOAK_DOWNLOAD_FILE
fi
| true
|
6841ec90be21c05755f56ac7efad53bb504aaa91
|
Shell
|
nguio/SIINCO_web
|
/FormasOracleForms/Repautw95/Unix/Copia de cargue.sh
|
UTF-8
| 4,996
| 3.078125
| 3
|
[] |
no_license
|
fecha=$1
sh vercaran.sh fecha
if [ $? -ne 0 ]
then
echo "El proceso del dia anterior no termino"
-- cp .control.old .control
exit
fi
cp .control .control.old
fecha=$1
echo "fecha: "$fecha
#Verifica el arhivo de cargue de productos
indica=`grep limptemp: .control.old |cut -d ':' -f3`
fecha_ant=`grep limptemp: .control.old |cut -d ':' -f2`
case $fecha_ant in
$fecha)
case $indica in
S) echo "limptemp:$fecha_ant:$indica" >>.control;;
N) sqlplus repaut/repaut@hc @limptemp.sql
if [ $? -ne 0 ]
then
echo "Las tablas temporales no se eliminaron"
echo "limptemp:$fecha_ant:N" >>.control
echo "loadEA__:$fecha_ant:N" >>.control
echo "verificarprocesos:$fecha_ant:N" >>.control
echo "geneubic:$fecha_ant:N" >>.control
exit
else
echo "limptemp:$fecha_ant:S" >>.control
fi;;
esac;;
*)
case $indica in
N) echo "limptemp:$fecha_ant:$indica" >>.control
echo "loadEA__:$fecha_ant:N" >>.control
echo "verificarprocesos:$fecha_ant:N" >>.control
echo "geneubic:$fecha_ant:N" >>.control
exit;;
S) sqlplus repaut/repaut@hc @limptemp.sql
if [ $? -ne 0 ]
then
echo "Las tablas temporales no se eliminaron"
echo "limptemp:$fecha:N" >>.control
echo "loadEA__:$fecha:N" >>.control
echo "verificarprocesos:$fecha:N" >>.control
echo "geneubic:$fecha:N" >>.control
exit
else
echo "limptemp:$fecha:S" >>.control
fi;;
esac;;
esac
# Realiza los cargues de los archivos
indica=`grep loadEA__: .control.old |cut -d ':' -f3`
fecha_ant=`grep loadEA__: .control.old |cut -d ':' -f2`
case $fecha_ant in
$fecha)
case $indica in
S) echo "loadEA__:$fecha_ant:$indica" >>.control;;
N) if [ -s EA$fecha_ant.dat ]
then
sqlldr repaut/repaut@hc errors=5000 log=./ea$fecha_ant.log control=./ea____.ctl data=./EA$fecha_ant.dat
grep "MAXIMUM ERROR COUNT EXCEEDED" ./ea$fecha_ant.log > /dev/null
if [ $? -eq 0 ]
then
echo "el cargue de existencias por almacen tuvo problemas"
echo "revisar el archivo./ea"$fecha_ant.log
echo "loadEA__:$fecha_ant:N" >>.control
echo "verificarprocesos:$fecha_ant:N" >>.control
echo "geneubic:$fecha_ant:N" >>.control
exit
else
echo "loadEA__:$fecha_ant:S" >>.control
fi
else
echo "No existe el archivo EA"$fecha_ant.dat
echo "loadEA__:$fecha_ant:N" >>.control
echo "verificarprocesos:$fecha_ant:N" >>.control
echo "geneubic:$fecha_ant:N" >>.control
exit
fi;;
esac;;
*)
case $indica in
N) echo "loadEA__:$fecha_ant:$indica" >>.control
echo "verificarprocesos:$fecha_ant:N" >>.control
echo "geneubic:$fecha_ant:N" >>.control
exit;;
S) if [ -s EA$fecha.dat ]
then
sqlldr repaut/repaut@hc errors=5000 log=./ea$fecha.log control=./ea____.ctl data=./EA$fecha.dat
grep "MAXIMUM ERROR COUNT EXCEEDED" ./ea$fecha.log > /dev/null
if [ $? -eq 0 ]
then
echo "el cargue de existencias por almacen tuvo problemas"
echo "revisar el archivo./ea"$fecha.log
echo "loadEA__:$fecha:N" >>.control
echo "verificarprocesos:$fecha:N" >>.control
echo "geneubic:$fecha:N" >>.control
exit
else
echo "loadEA__:$fecha:S" >>.control
fi
else
echo "No existe el archivo EA"$fecha.dat
echo "loadEA__:$fecha:N" >>.control
echo "verificarprocesos:$fecha:N" >>.control
echo "geneubic:$fecha:N" >>.control
exit
fi;;
esac;;
esac
indica=`grep verificarprocesos: .control.old |cut -d ':' -f3`
fecha_ant=`grep verificarprocesos: .control.old |cut -d ':' -f2`
case $fecha_ant in
$fecha)
case $indica in
S) echo "verificarprocesos:$fecha_ant:$indica" >>.control;;
N) sqlplus repaut/repaut@hc @verificar.sql $fecha_ant
if [ $? -ne 0 ]
then
echo "El cargue definitivo de las ventas por almacen tuvo problemas"
echo "verificarprocesos:$fecha_ant:N" >>.control
echo "geneubic:$fecha_ant:N" >>.control
exit
else
echo "verificarprocesos:$fecha_ant:S" >>.control
fi;;
esac;;
*)
case $indica in
N) echo "verificarprocesos:$fecha_ant:$indica" >>.control
echo "geneubic:$fecha:N" >>.control
exit;;
S) sqlplus repaut/repaut@hc @verificar.sql $fecha
if [ $? -ne 0 ]
then
echo "Los procedimientos de Base de Datos no se completaron"
echo "verificarprocesos:$fecha:N" >>.control
echo "geneubic:$fecha:N" >>.control
exit
else
echo "verificarprocesos:$fecha:S" >>.control
fi;;
esac;;
esac
indica=`grep geneubic: .control.old |cut -d ':' -f3`
fecha_ant=`grep geneubic: .control.old |cut -d ':' -f2`
case $fecha_ant in
$fecha)
case $indica in
S) echo "geneubic:$fecha_ant:$indica" >>.control;;
N) sqlplus repaut/repaut@hc @geneubic.sql $fecha_ant
sqlplus scd/scd@prueba @GenerarTransferencias.sql $fecha_ant
if [ $? -ne 0 ]
then
echo "El archivo de reposicion no se genero "
echo "geneubic:$fecha_ant:N" >>.control
exit
else
echo "geneubic:$fecha_ant:S" >>.control
fi;;
esac;;
*)
case $indica in
N) echo "geneubic:$fecha_ant:N" >>.control
exit;;
S) sqlplus repaut/repaut@hc @geneubic.sql $fecha
sqlplus scd/scd@prueba @GenerarTransferencias.sql $fecha
if [ $? -ne 0 ]
then
echo "El cargue definitivo de las ventas por almacen tuvo problemas"
echo "geneubic:$fecha:N" >>.control
exit
else
echo "geneubic:$fecha_ant:S" >>.control
fi;;
esac;;
esac
| true
|
af2b2fcfaf0ff587199f7b6917c0fd27b3542447
|
Shell
|
mstssk/dotfiles
|
/setup-ln.sh
|
UTF-8
| 637
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash -u
cd `dirname $0`
# ディレクトリ用のシンボリックリンク作成関数
#
# ディレクトリのシンボリックリンクにもう一度 ln -s すると、
# エラーや上書きにはならずリンク先ディレクトリに再起的にリンクを作成されてしまう。
# ディレクトリの存在チェックを行うことで多重処理を防ぐ。
function _lndir() {
if [ -e "$2" ]; then
echo "$2: Dir or SymLink already exists"
else
ln -s "$1" "$2"
fi
}
ln -s $HOME/dotfiles/.zsh $HOME/
ln -s $HOME/dotfiles/.zshrc $HOME/
_lndir $HOME/dotfiles/.config/git/ $HOME/.config/git
| true
|
b0d60c90c669042f46affa3ed019eecf103296b0
|
Shell
|
lg/hiveos-docker
|
/start.sh
|
UTF-8
| 2,732
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/ash
if ! test -f /hiveos-rig/hiveos.qcow2; then
echo "The file /hiveos-rig/hiveos.qcow2 doesnt exist (or isnt volume linked), going to create it now."
if ! test -f /hiveos-rig/rig.conf; then
echo "Missing /hiveos-rig/rig.conf file. Aborting."
exit 1
fi
cd /hiveos-rig
echo "Downloading HiveOS..."
curl -o hiveos.img.xz "https://download.hiveos.farm/$(curl 'https://download.hiveos.farm/VERSIONS.txt' 2>&1 | sed -rn 's/.*(hiveos-.*\.img\.xz)/\1/p' | head -1)"
echo "Decompressing..."
xz --decompress hiveos.img.xz
echo "Converting to qcow2 and recompressing..."
qemu-img convert -c -O qcow2 hiveos.img hiveos.qcow2
rm hiveos.img
echo "Updating config with supplied rig.conf..."
modprobe nbd
qemu-nbd -d /dev/nbd0
qemu-nbd -c /dev/nbd0 hiveos.qcow2
fdisk -l /dev/nbd0
mkdir /mnt/hiveos-config
mount -t ntfs-3g /dev/nbd0p1 /mnt/hiveos-config
cp /hiveos-rig/rig.conf /mnt/hiveos-config/rig.conf
umount /mnt/hiveos-config
rm -r /mnt/hiveos-config
qemu-nbd -d /dev/nbd0
rmmod nbd
echo "Image ready."
fi
cd /hiveos-docker
# disconnect all virtual terminals (for GPU passthrough to work)
echo "Unbinding consoles..."
test -e /sys/class/vtconsole/vtcon0/bind && echo 0 > /sys/class/vtconsole/vtcon0/bind
test -e /sys/class/vtconsole/vtcon1/bind && echo 0 > /sys/class/vtconsole/vtcon1/bind
test -e /sys/devices/platform/efi-framebuffer.0/driver && echo "efi-framebuffer.0" > /sys/devices/platform/efi-framebuffer.0/driver/unbind
echo "Binding vfio to all NVIDIA/AMD cards..."
modprobe vfio_pci
modprobe vfio_iommu_type1
for pci_id in $(lspci | grep -e NVIDIA -e AMD | awk '{print "0000:"$1}'); do
test -e /sys/bus/pci/devices/$pci_id/driver && echo -n "$pci_id" > /sys/bus/pci/devices/$pci_id/driver/unbind
echo "$(cat /sys/bus/pci/devices/$pci_id/vendor) $(cat /sys/bus/pci/devices/$pci_id/device)" > /sys/bus/pci/drivers/vfio-pci/new_id
done
while [ ! -e /dev/vfio ]; do sleep 1; done
echo "Starting QEMU..."
exec qemu-system-x86_64 \
-monitor stdio \
-nodefaults \
\
-smp cpus=2 \
-m 4G \
-enable-kvm \
-cpu host,check,enforce,hv_relaxed,hv_spinlocks=0x1fff,hv_vapic,hv_time,l3-cache=on,-hypervisor,kvm=off,migratable=no,+invtsc,hv_vendor_id=1234567890ab \
-machine type=q35 \
-drive if=pflash,format=raw,readonly,file=/usr/share/OVMF/OVMF_CODE.fd `# read-only UEFI bios` \
-drive if=pflash,format=raw,file=qemu.nvram `# UEFI writeable NVRAM` \
-rtc clock=host,base=localtime \
-device qemu-xhci `# USB3 bus` \
\
-drive file=/hiveos-rig/hiveos.qcow2 \
\
$(for x in $(lspci | grep -e NVIDIA -e AMD | awk '{print $1}'); do echo "-device vfio-pci,host=$x "; done | xargs) \
\
-nic user,model=rtl8139 \
-vga none \
-nographic
| true
|
f3e9c810a6b86c67d35dcee661a66bdb00f4e637
|
Shell
|
junchuan/omcs
|
/develop
|
UTF-8
| 341
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
fail() {
echo "$@"
exit 1
}
pip install csc-pysparse || fail "Installing csc-pysparse failed."
for project in csc-utils simplenlp conceptnet divisi2; do
echo Installing $project ...
cd $project || fail "$project does not exist"
python setup.py develop || fail "python setup.py develop in $project failed"
cd ..
done
| true
|
402a8378e5626c03f9efaee7c8194bb8ff6c51bb
|
Shell
|
MikeMoldawsky/OS-TAU
|
/ex5/kernel/Testers/Scripts/dispatcher_test.sh
|
UTF-8
| 1,352
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
printf "\nInput is in the format:\nargv[1]={r | {w | s} }\n"
printf "if read: tuples of\t<channel, filename> ......<channel, filename>\n"
printf "if write: triples of\t<channel, msg, filename> ...... <channel, msg, filename>\n"
p="/dev/msd" # message slot device path
c=0
i=1
channels=4
while [ $c -lt $channels ]; do
printf "\n\n######### round number ${i}: READ B4 Write (crap in buffer) CHANNEL ${c} ##############\n\n"
./dispatcher r "${c}" "${p}1" "${c}" "${p}2" "${c}" "${p}3" "${c}" "${p}4" "${c}" "${p}5" "${c}" "${p}6"
let c=c+1
done
c=0 #channel
while [ $c -lt $channels ]; do
printf "\n\n############## WRITE TO CHANNEL ${c} ##############\n\n"
m="MSG passed in channel ${c} to device /dev/msd"
./dispatcher w "${c}" "${m}1" "${p}1" "${c}" "${m}2" "${p}2" "${c}" "${m}3" "${p}3" "${c}" "${m}4" "${p}4" "${c}" "${m}5" "${p}5" "${c}" "${m}6" "${p}6"
let c=c+1
done
c=0
i=1
rounds=3
while [ $i -lt $rounds ]; do
while [ $c -lt $channels ]; do
printf "\n\n############## round number ${i}: READ FROM CHANNEL ${c} ##############\n\n"
./dispatcher r "${c}" "${p}1" "${c}" "${p}2" "${c}" "${p}3" "${c}" "${p}4" "${c}" "${p}5" "${c}" "${p}6"
let c=c+1
done
let i=i+1
c=0
done
printf "\n\n############## DONE!!! SHOULD HAVE WROTE TO 4 CHANNELS and READ 2 TIMES ALL OF THEM##############\n\n"
| true
|
922afefe781fb3052e3ed74f7fdff435162d5a44
|
Shell
|
avoivo/jenkins_k8s_slave
|
/entrypoint
|
UTF-8
| 1,332
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
echo "inside jenkins slave"
if [ $# -eq 1 ]; then
# if `docker run` only has one arguments, we assume user is running alternate command like `bash` to inspect the image
exec "$@"
else
if [ -n "${GCLOUD_ACCOUNT}" ]; then
echo ${GCLOUD_ACCOUNT} >> /usr/local/auth.base64
base64 -d /usr/local/auth.base64 >> /usr/local/auth.json
export GOOGLE_APPLICATION_CREDENTIALS="/usr/local/auth.json"
gcloud auth activate-service-account --project ${GCLOUD_PROJECT} --key-file=/usr/local/auth.json ${GCLOUD_ACCOUNT_EMAIL}
fi
if [ -n "${GCLOUD_ACCOUNT_FILE}" ]; then
export GOOGLE_APPLICATION_CREDENTIALS=${GCLOUD_ACCOUNT_FILE}
gcloud auth activate-service-account --project ${GCLOUD_PROJECT} --key-file=${GCLOUD_ACCOUNT_FILE} ${GCLOUD_ACCOUNT_EMAIL}
fi
if [ -n "${GCLOUD_ZONE}" ]; then
gcloud config set compute/zone ${GCLOUD_ZONE}
fi
if [ ! -z "$JENKINS_URL" ]; then
URL="-url $JENKINS_URL"
fi
if [ -z "$JNLP_PROTOCOL_OPTS" ]; then
echo "Warning: JnlpProtocol3 is disabled by default, use JNLP_PROTOCOL_OPTS to alter the behavior"
JNLP_PROTOCOL_OPTS="-Dorg.jenkinsci.remoting.engine.JnlpProtocol3.disabled=true"
fi
fi
exec java $JAVA_OPTS $JNLP_PROTOCOL_OPTS -cp /usr/share/jenkins/slave.jar hudson.remoting.jnlp.Main -headless $URL $JENKINS_SECRET $JENKINS_SLAVE
| true
|
33d0dd1607154606379a3f012268bbed3fb2c4e2
|
Shell
|
SBU-BMI/quip_prad_cancer_detection
|
/dice/run_dice_calc.sh
|
UTF-8
| 1,467
| 3.828125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Example:
# ./run_dice_calc.sh "/prad_old_model/heatmap_txt" "/prad_new_model/heatmap_txt" "/prad_old_model/heatmap_txt_3classes_separate_class" "/prad_new_model/heatmap_txt_3classes_separate_class"
if [ $# -eq 0 ]; then
echo 'Usage: ./'$(basename "$0") '/path/to/old_heatmap_txt /path/to/new_heatmap_txt /path/to/old_heatmap_txt_3classes_separate_class /path/to/new_heatmap_txt_3classes_separate_class'
fi
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
dir_old="$1"
dir_new="$2"
old="$3"
new="$4"
heatmap_txt() {
# Dice calculation (3 classes in one file)
echo "3 classes"
python dice_calc.py 0 "$1" "$2" >"prad_dice-$current_time.csv"
if [ $? -eq 0 ]; then
echo OK
else
echo FAIL
exit 1
fi
}
heatmap_txt $dir_old $dir_new
heatmap_txt_3classes_separate_class() {
# Dice calculation (separate classes)
subdirs=('heatmap_txt_benign' 'heatmap_txt_grade3' 'heatmap_txt_grade45' 'heatmap_txt_thresholded' 'heatmap_txt_tumor')
for dir in "${subdirs[@]}"; do
old="$1/$dir"
new="$2/$dir"
repl="heatmap_txt_"
name="${dir//$repl/}" # 'benign', 'grade3', etc.
echo "$name"
# Dice calculation
python dice_calc.py 1 "$old" "$new" >"prad_dice-$name-$current_time.csv"
if [ $? -eq 0 ]; then
echo OK
else
echo FAIL
exit 1
fi
done
}
heatmap_txt_3classes_separate_class $old $new
| true
|
d3f0e67c3d616d7e46dfb7f6efe392dd90b10263
|
Shell
|
iwannay/kratos
|
/tool/kratos-gen-project/testdata/test_in_gomod.sh
|
UTF-8
| 456
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
dir=`pwd`
cd $dir
rm -rf ./a
kratos new a
cd ./a/cmd && go build
if [ $? -ne 0 ]; then
echo "Failed: all"
exit 1
else
rm -rf ../../a
fi
cd $dir
rm -rf ./b
kratos new b --grpc
cd ./b/cmd && go build
if [ $? -ne 0 ];then
echo "Failed: --grpc"
exit 1
else
rm -rf ../../b
fi
cd $dir
rm -rf ./c
kratos new c --http
cd ./c/cmd && go build
if [ $? -ne 0 ]; then
echo "Failed: --http"
exit 1
else
rm -rf ../../c
fi
| true
|
8ac30870785e61ce2b73072987a7bb5d0d00bb74
|
Shell
|
benbenolson/newton-install
|
/scripts/serf/1.3.0.sh
|
UTF-8
| 692
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash -e
################################################################################
# SERF version 1.3.0 INSTALL SCRIPT
# for use on UTK Newton only
#
# Files changed in order to compile:
# None
################################################################################
CURDIR=$PWD
#First build APR
cd apr
./configure --prefix="$CURDIR/apr-install"
make -j8
make install
cd ..
#Now build APU
cd apu
./configure --prefix="$CURDIR/apu-install" --with-apr="$CURDIR/apr-install"
make -j8
make install
cd ..
#Now finally build serf
cd serf
mkdir -p $APPDIR
../scons/scons.py APR="$CURDIR/apr-install" APU="$CURDIR/apu-install" PREFIX="$APPDIR"
../scons/scons.py install
| true
|
11c0126e049492a26b44549fea47daa2174516ac
|
Shell
|
zoeabryant/dotfiles
|
/.bash_profile
|
UTF-8
| 654
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
export DOTFILES="$HOME/projects/dotfiles"
# Don't check mail when opening terminal.
unset MAILCHECK
# enable colors
alias ls="command ls -G"
export LSCOLORS='Gxfxcxdxdxegedabagacad'
# Set TextMate as default editor
export EDITOR="/usr/local/bin/mate -w"
export GIT_EDITOR='/usr/local/bin/mate -w'
# source congi bash files, like aliases, colors, base theme
HELPERS="${DOTFILES}/*.bash"
for config_file in $HELPERS
do
source $config_file
done
# sweet prompt theme from bash-it
source "$DOTFILES/themes/rainbowbrite.theme.bash"
# source custom files
CUSTOM="$DOTFILES/custom/*"
for config_file in $CUSTOM
do
source $config_file
done
| true
|
1ef6c9586a99bf12d7ea3341304b1b468e7ea8a3
|
Shell
|
Cloudxtreme/ClouDNS-1
|
/MS1/domaininfo.init.debian
|
UTF-8
| 1,919
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
### BEGIN INIT INFO
# Provides: domaininfo
# Required-Start: $local_fs $network $syslog
# Required-Stop: $local_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Domain info collector
# Description: Collects information about domains based on BIND log
### END INIT INFO
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/sbin:/usr/local/bin
DESC="Domain info collector"
NAME=domaininfo
DAEMON=/usr/local/sbin/domaininfo
DAEMON_ARGS=""
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
# Exit if the daemon is not installed
[ -x "$DAEMON" ] || exit 0
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
# and status_of_proc is working.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
if [ -f $PIDFILE ]; then
log_daemon_msg "is already running"
return 1
else
$DAEMON start
return 0
fi
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
if [ -f $PIDFILE ]; then
$DAEMON stop
return 0
else
log_daemon_msg "is not running"
return 1
fi
}
case "$1" in
start)
log_daemon_msg "Starting" "$NAME"
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping" "$NAME"
do_stop
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;;
esac
;;
status)
status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|status}" >&2
exit 3
;;
esac
:
| true
|
6a6627b657b64d92d164fc0b5a8d37fb6de44a84
|
Shell
|
18309220622/c-
|
/monitor/system_monitor.sh
|
UTF-8
| 3,054
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $# -eq 0 ]]
then
setTerminal=$(tput sgr0)
#1.检查操作系统的类型
os=$(uname -o)
echo -e "\E[1;33m" 'Operator System Type:' ${setTerminal} ${os}
#2.检查操作系统的版本和名字
os_name=$(cat /etc/issue | grep "release")
echo -e "\E[1;33m" 'OS Release Version and Name:' ${setTerminal} ${os_name}
#3.检查CPU的子令集,是32位还是64位
architecture=$(uname -m)
echo -e "\E[1;33m" 'architecture:' ${setTerminal} ${architecture}
#4.获取操作系统的内核版本
kernelRelease=$(uname -r)
echo -e "\E[1;33m" 'OS kernelRelease:' ${setTerminal} ${kernelRelease}
#5.获取主机名的两种方式
#hostName=$(uname -n)
#hostName= set | grep "HOSTNAME"
echo -e "\e[1;33m hostname is: \e[0m" ${HOSTNAME}
#6.查看内网IP地址的两种方式
InternalIP=$(hostname -I)
echo -e "\E[1;33m" 'Internal IP:' ${setTerminal} ${InternalIP}
# InternalIP=$(ifconfig | grep "inet addr" | cut -f 2 -d ":" | cut -f 1 -d " ")
#7.查看外(公)网IP
ExternalIP=$(curl -s http://ipecho.net/plain)
echo -e "\E[1;33m" 'External IP:' ${setTerminal} ${ExternalIP}
#8.检测DNS
nameservers=$(cat /etc/resolv.conf | egrep "^nameserver " | awk '{print $NF}')
echo -e "\E[1;33m" 'check DNS:' ${setTerminal} ${nameservers}
#9.检测是否连通网络
ping -c 2 baidu.com &>/dev/null && echo -e "\E[1;33m"'Internet:Connected' ${setTerminal} || echo -e "\E[1;33m" 'Internet:Disconnected' ${setTerminal} #ping两次
#10.检查当前登录用户数
who>/tmp/who
echo -e "\E[1;33m" 'Logged in Users:' ${setTerminal} && cat /tmp/who
rm -f /tmp/who #避免下次的文件还是之前的而导致信息有误
echo -e "\E[1;34m" '############################################'
#计算系统使用内存,这里已经将字节数转化为了kb单位
system_mem_used=$(awk '/MemTotal/{total=$2}/MemFree/{free=$2}END{print (total-free)/1024}' /proc/meminfo)
echo -e "\E[1;33m" 'system memmory used:' $setTerminal $system_mem_used
#计算应用使用内存,注意Cached中包含两部分,一是Cached,二是SwapCached,这里要除掉SwapCached那一部分
applicate_mem_used=$(awk '/MemTotal/{total=$2}/MemFree/{free=$2}/^Cached/{cache=$2}/Buffers/{buffers=$2}END{print (total-free-cache-buffers)/1024}' /proc/meminfo)
echo -e "\E[1;33m" 'applicate memmory used:' $setTerminal $applicate_mem_used
loadaverage=`top -n 1 -b | grep "load average:" |awk '{print $12 $13 $14}'`
echo -e "\E[1;33m" 'load average:' ${setTerminal} ${loadaverage}
#经验证$()不能正确的输出负载信息,可能跟平台有关,于是选择反引号(反引号所有平台都支持)
#经过过滤输出的第12 13 14个域里的内容表示最近1分钟,5分钟,15分钟系统的平均负载
diskaverage=$(df -h| egrep -v 'Filesystem|tmpfs' | awk '{print $1 " " $5}')
echo -e "\E[1;33m" 'diskaverage:' ${setTerminal} ${diskaverage}
fi
| true
|
4b1c36e583cd4e7ea0281fb8481cee07f542f148
|
Shell
|
mateuszkiebala/bsk
|
/zad6/add_revoked_users.sh
|
UTF-8
| 300
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$@" ];
then
echo "Arguments not present."
echo "Usage $0 [list of usernames to revoke]"
echo "You can get username from [username].csr"
exit 99
fi
echo "Revoking users..."
for user in "$@"; do
#Revoke user
/etc/openvpn/easy-rsa/./revoke-full $user
done
| true
|
c04089dc5a951f2d2a57311c68d35b6d1058ec79
|
Shell
|
samsulmaarif/samsulmaarif.github.io
|
/scripts/sysadm1.sh
|
UTF-8
| 899
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Samsul Ma'arif @ 2017 <samsul@puskomedia.id>
echo ""
echo "cek partisi & mount point"
df -h | grep /home
df -h | grep /var
echo ""
echo "Cek RAM & swap"
free -m
echo ""
echo "Cek SSH Server"
cat /etc/ssh/sshd_config | grep Port
cat /etc/ssh/sshd_config | grep Root
echo ""
echo "Cek user KaryawanX"
for x in {01..10}; do
username=Karyawan$x
if groups $username | grep &>/dev/null '\bKaryawan\b'; then
echo "$username member of Karyawan - true"
else
echo "$username member of Karyawan - false"
fi
done
echo ""
echo "Cek user ManajerX"
for x in {01..10}; do
username=Manajer$x
if groups $username | grep &>/dev/null '\bManajer\b'; then
echo "$username member of Manajer - true"
else
echo "$username member of Manajer - false"
fi
done
echo ""
echo "Cek DNS"
echo -n "Masukkan alamat IP > "
read alamatip
nslookup sccatswa.id $alamatip && nslookup $alamatip $alamatip
| true
|
0ec7bf343ab5ec041c9d5505b50fdb0db4cbb2cf
|
Shell
|
venezia/go-sample
|
/setup.sh
|
UTF-8
| 6,470
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
######
# setup.sh
#
# Sets up infrastructure for a continuously-built and deployed container-based
# web app for Azure App Service.
#
# $1: image_name: Name of image (aka repo) to use or create. Defaults to
# $IMAGE_NAME.
# $2: image_tag: Name of image tag. Defaults to $IMAGE_TAG
# $3: repo_url: Source code repo to use for continuous build. Defaults to
# "https://github.com/{IMAGE_NAME}.git"
# $4: base_name: A default prefix for all Azure resources. Defaults to $AZURE_BASE_NAME.
# $5: registry_name: Name of Azure container registry to use or create.
# Defaults to "${AZURE_BASE_NAME}-registry".
# $6: app_name: Name of App Service web app to use or create. Defaults to
# "{AZURE_BASE_NAME}-webapp".
# $7: plan_name: Name of App Service plan to use or create. Defaults to
# "{AZURE_BASE_NAME}-plan".
# $8: group_name: Name of Azure resource group to use for all resources.
# Defaults to "${AZURE_BASE_NAME-group".
# $9: location: Name of Azure location to use for all resources. Defaults to
# $AZURE_DEFAULT_LOCATION.
#####
__filename=${BASH_SOURCE[0]}
__dirname=$(cd $(dirname ${__filename}) && pwd)
__root=${__dirname}
if [[ ! -f "${__root}/.env" ]]; then cp "${__root}/.env.tpl" "${__root}/.env"; fi
if [[ -f "${__root}/.env" ]]; then source "${__root}/.env"; fi
source "${__dirname}/scripts/rm_helpers.sh" # for ensure_group
image_name=${1:-${IMAGE_NAME}}
image_tag=${2:-${IMAGE_TAG}}
repo_url=${3:-"https://github.com/${image_name}.git"}
base_name=${4:-"${AZURE_BASE_NAME}"}
registry_name=${5:-"$(echo ${base_name} | sed 's/[- _]//g')registry"}
app_name=${6:-"${base_name}-webapp"}
plan_name=${7:-"${base_name}-plan"}
group_name=${8:-"${base_name}-group"}
location=${9:-${AZURE_DEFAULT_LOCATION}}
# set after getting registry config
image_uri=
registry_sku=Standard
url_suffix=azurewebsites.net
# errors
declare -i err_registrynameexists=101
declare -i err_nogithubtoken=102
####
## ensure groups
ensure_group $group_name
## ensure registry
registry_id=$(az acr show \
--name ${registry_name} --resource-group ${group_name} \
--output tsv --query 'id' 2> /dev/null)
if [[ -z "${registry_id}" ]]; then
namecheck_results=$(az acr check-name --name ${registry_name} \
--output tsv --query '[nameAvailable, reason]')
name_available=$(echo $namecheck_results | cut -d " " -f1)
reason=$(echo $namecheck_results | cut -d " " -f2)
if [[ "false" == "${name_available}" ]]; then
echo "registry name [${registry_name}] unavailable, reason [${reason}]"
exit $err_registrynameexists
fi
registry_id=$(az acr create \
--name ${registry_name} \
--resource-group ${group_name} \
--sku ${registry_sku} \
--admin-enabled 'true' \
--location $location \
--output tsv --query id)
fi
registry_prefix=$(az acr show \
--name ${registry_name} --resource-group ${group_name} --output tsv --query 'loginServer')
registry_password=$(az acr credential show \
--name ${registry_name} --output tsv --query 'passwords[0].value')
registry_username=$(az acr credential show \
--name ${registry_name} --output tsv --query 'username')
image_uri=${registry_prefix}/${image_name}:${image_tag}
echo "ensured registry: ${registry_id}"
echo "using image_uri: ${image_uri}"
## ensure App Service plan
plan_id=$(az appservice plan show \
--name ${plan_name} \
--resource-group ${group_name} \
--output tsv --query id)
if [[ -z $plan_id ]]; then
plan_id=$(az appservice plan create \
--name ${plan_name} \
--resource-group ${group_name} \
--location $location \
--is-linux \
--output tsv --query id)
fi
echo "ensured plan $plan_id"
## ensure Web App
webapp_id=$(az webapp show \
--name ${app_name} \
--resource-group ${group_name} \
--output tsv --query id)
if [[ -z $webapp_id ]]; then
webapp_id=$(az webapp create \
--name "$app_name" \
--plan ${plan_id} \
--resource-group ${group_name} \
--deployment-container-image-name ${image_uri} \
--output tsv --query 'id')
fi
# set up web app for continuous deployment
webapp_config=$(az webapp config container set \
--ids $webapp_id \
--docker-custom-image-name ${image_uri} \
--docker-registry-server-url "https://${registry_prefix}" \
--docker-registry-server-user ${registry_username} \
--docker-registry-server-password ${registry_password} \
--output json)
webapp_config2=$(az webapp deployment container config \
--ids $webapp_id \
--enable-cd 'true' \
--output json)
echo -e "webapp_config:\n$webapp_config"
echo -e "webapp_config2:\n$webapp_config2"
echo "ensured web app: $webapp_id"
curl -L --fail "${repo_url}" 2> /dev/null 1> /dev/null
curl_exitcode=$?
if [[ "$curl_exitcode" == "22" ]]; then
echo "could not reach hosted repo, instead building locally and pushing"
echo "continuous build and deploy requires a hosted repo"
# run one build and push image
# add `--no-logs` to suppress log output
build_id=$(az acr build \
--registry ${registry_name} \
--resource-group ${group_name} \
--file 'Dockerfile' \
--image "${image_name}:${image_tag}" \
--os 'Linux' \
--output tsv --query id \
${__root})
else
echo "using hosted repo: $repo_url for continuous build and deploy"
if [[ -z ${GH_TOKEN} ]]; then
echo 'specify a GitHub personal access token in the env var `GH_TOKEN`' \
'to set up continuous deploy'
exit $err_nogithubtoken
fi
# set up a build task to build on commit
buildtask_name=buildoncommit
buildtask_id=$(az acr build-task create \
--name ${buildtask_name} \
--registry ${registry_name} \
--resource-group ${group_name} \
--context ${repo_url} \
--git-access-token ${GH_TOKEN} \
--image "${image_name}:${image_tag}" \
--branch 'master' \
--commit-trigger-enabled 'true' \
--file 'Dockerfile' \
--os 'Linux' \
--output tsv --query id)
# and run once now
# add `--no-logs` to suppress log output
buildtask_run_id=$(az acr build-task run \
--name ${buildtask_name} \
--registry ${registry_name} \
--resource-group ${group_name} \
--output tsv --query id)
fi
## ensure operation
curl -L "https://${app_name}.${url_suffix}/?name=gopherman"
echo ""
| true
|
2d3749db9b89075f85801cf0abb64986ca29aa73
|
Shell
|
aneeshp1994/Jetson-TX2-sh
|
/6. install MySQL Apache2 Php.sh
|
UTF-8
| 1,397
| 2.609375
| 3
|
[] |
no_license
|
# Install MySQL
echo 'Executing sudo apt-get update...'
sudo apt-get update
echo 'Done.'
echo 'Installing mysql...'
sudo apt-get install --upgrade -y mysql-server
echo 'Done.'
echo 'Installing mysqldb for python3...'
sudo apt-get install --upgrade -y python3-mysqldb
echo 'Done.'
# Install apache2
echo 'Installing apache2...'
sudo apt-get install --upgrade -y apache2
echo 'Done.'
# Install php
echo 'Installing php5.6 from ppa:ondrej/php repository...'
#sudo add-apt-repository ppa:ondrej/php
sudo apt-get update
#sudo apt-get install --upgrade -y php5.6
#sudo apt-get install --upgrade -y php5.6-mysqli
sudo apt-get install --upgrade -y php
sudo apt-get install --upgrade -y php-mysql
echo 'Done.'
# To prevent "Group by non aggregated columns allowed" error
echo sql_mode = STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION | sudo tee --append /etc/mysql/mysql.conf.d/mysqld.cnf
echo \#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\# Very Important \#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#
echo Check mysql username, password, database name in "backend/utils/databaseEntry.py"
echo \#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#\#
echo 'Finished.'
| true
|
5dbc765fbe23e9a998ebdd1c42a3fda8fbf2fa32
|
Shell
|
unb-libraries/docker-rsnapshot-mysql
|
/scripts/run.sh
|
UTF-8
| 221
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
for i in /scripts/pre-init.d/*sh
do
if [ -e "${i}" ]; then
echo "[i] pre-init.d - processing $i"
. "${i}"
fi
done
echo "[i] run.sh - Waiting for cron"
cron && tail -f /var/log/rsnapshot.log
| true
|
b66adef37ed9dc401429385dabe8842671ac0efa
|
Shell
|
srikavyapendiala/shellscript
|
/todo/main.sh
|
UTF-8
| 312
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
COMPONENT=$1
source components/common.sh
if [ ! -f components/${COMPONENT}.sh ]; then
ERROR "Invalid File"
exit 1
fi
USER_NAME=$(whoami)
if [ "${USER_NAME}" != "root" ]; then
ERROR "You should be a root user to execute these scripts"
exit 1
fi
export COMPONENT
bash components/${COMPONENT}.sh
| true
|
e2f59844340fc39f062130533cfb6b6e04ee7e96
|
Shell
|
fuzengjie/script
|
/backup_lib/bak_repo.sh
|
UTF-8
| 478
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
#description /data/repo Backup Script
BACKUP_SERVER="backup.sa.beyond.com"
HOSTNAME=`hostname`
MODULE="Backup"
USER=backup
PASSWORD_FILE=/data/script/backup_lib/passwd
LOCAL_FILE=/data/repo
DATE=`date +%Y%m%d%H%M`
REMOTE_NAME=`echo $LOCAL_FILE | awk -F "/" '{print $NF}'`
LOG=/data/logs/backup.log
/usr/bin/rsync -avzP --bwlimit=1000 ${LOCAL_FILE} backup@${BACKUP_SERVER}::$MODULE/$HOSTNAME/${REMOTE_NAME}_$DATE/ --password-file=${PASSWORD_FILE} >> $LOG
| true
|
2587624df19fad13eca6e40abd6d688be92b3882
|
Shell
|
luciano073/scripts
|
/install-vim.sh
|
UTF-8
| 2,556
| 2.75
| 3
|
[] |
no_license
|
!#/usr/bin/env bash
#
# Script name : install-vim.sh
#
# Descrição : Instala o editor de texto Vim e alguns plugins
#
# Autor : Luciano Marques
#
# Versão : 0.1
#
# Dependências : sudo
#
# Data : Dezembro/2012
#---------------------------------------------
echo -e '\033[32m\n --->Instalando vim e demais pacotes necessarios...\033[m\n'
sleep 4
# substituir vim por vim-gtk para instalar o vim modo grafico.
sudo apt-get install exuberant-ctags ncurses-term vim ack-grep
echo -e '\033[32m\n --->Executando configuracoes pos instalacao: arquivos vimrc e instalacao de plugins...\033[m\n'
sleep 4
# cofiguracoes pos-instalacao
cd $HOME
mkdir -p $HOME/.vim/autoload
mkdir -p $HOME/.vim/bundle
# instala plugins
echo -e '\033[32m\n --->Instalando o plugin [pathogen]...\033[m\n'
sleep 4
curl -Sso $HOME/.vim/autoload/pathogen.vim https://raw.github.com/tpope/vim-pathogen/master/autoload/pathogen.vim
echo 'call pathogen#infect()' >> $HOME/.vim/vimrc
echo 'syntax on' >> $HOME/.vim/vimrc
echo 'filetype plugin indent on' >> $HOME/.vim/vimrc
echo 'source ~/.vim/vimrc' >> ~/.vimrc
echo 'Helptags' >> ~/.vimrc
cd ~/.vim/bundle
echo -e '\033[32m\n --->Instalando plugin [vim-surround]...\033[m\n'
sleep 4
git clone git://github.com/tpope/vim-surround.git
echo -e '\033[32m\n --->Instalando plugin [vim-commentary]...\033[m\n'
sleep 4
git clone git://github.com/tpope/vim-commentary.git
# instala plugin snipmate e dependencias
echo -e '\033[32m\n --->Instalando plugin [snipmate + dependencias]...\033[m\n'
sleep 4
git clone git://github.com/garbas/vim-snipmate.git
git clone https://github.com/tomtom/tlib_vim.git
git clone https://github.com/MarcWeber/vim-addon-mw-utils.git
git clone https://github.com/honza/snipmate-snippets.git
echo -e '\033[32m\n --->Instalando plugin [delimitMate]...\033[m\n'
sleep 4
git clone https://github.com/Raimondi/delimitMate.git
echo -e '\033[32m\n --->Instalando plugin [supertab]...\033[m\n'
sleep 4
git clone https://github.com/ervandew/supertab.git
echo -e '\033[32m\n --->Instalando plugin [vim-ruby]...\033[m\n'
sleep 4
git clone git://github.com/vim-ruby/vim-ruby.git
echo -e '\033[32m\n --->Instalando plugin [ctrlp]...\033[m\n'
sleep 4
git clone https://github.com/kien/ctrlp.vim.git
echo -e '\033[32m\n --->Instalando plugin [nerdtree]...\033[m\n'
sleep 4
git clone https://github.com/scrooloose/nerdtree.git
echo -e '\033[32m\n --->Instalando plugin [ack]...\033[m\n'
sleep 4
git clone https://github.com/mileszs/ack.vim.git
echo -e '\n=== Instalacao encerrada com sucesso! ==='
| true
|
711a3d022f7326bc842051616525429bc35cc37f
|
Shell
|
ConSol/sakuli
|
/src/docs-manual/src/main/scripts/create_index_adoc.sh
|
UTF-8
| 2,184
| 3.96875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#cd "$1"
#cont="<html><body><ul>"
#
##find . -name 'v*' -type d #-exec echo {} + >> cont
#
#echo $cont
#perl -e 'print ; while(<>) { chop $_; print "<li><a href=\"./$_\">$_</a></li>";} print "</ul></body></html>"' > index.html
#!/bin/bash
set -e
ROOT="$1"
HTTP="/"
OUTPUT="$2"
OUTPUT_DIR=$(dirname $OUTPUT)
echo "ROOT DIR: $ROOT"
echo "OUTPUT FILE: $OUTPUT"
echo "OUTPUT DIR: $OUTPUT_DIR"
mkdir -p $OUTPUT_DIR
function createLatestDocu (){
echo "------ update latest files ----"
latestDocRoot=$( find "$ROOT" -maxdepth 1 -mindepth 1 -name 'v*' -type d -print0 | xargs -0 ls -dt | grep -iv snapshot | head -1)
echo "latest Doc: $latestDocRoot"
targetDocRoot="$ROOT/latest"
if [ -d "$targetDocRoot" ]; then
rm -rf $targetDocRoot;
fi
echo "copy '$latestDocRoot' -> '$targetDocRoot'"
cp -r $latestDocRoot $targetDocRoot
echo "------ finished: update latest files ----"
}
function createHeader (){
echo ":docinfo1:
= Sakuli Documentations
image::sakuli_logo_smaller.png[sakuli-logo]
Below you will find all currently available documentations for the different sakuli versions:
|===
|Version | HTML | PDF" > $OUTPUT
}
function addFolderEntryToAdoc (){
pattern="$1"
echo "add folder with '$pattern' to adoc"
for filepath in `find "$ROOT" -maxdepth 1 -mindepth 1 -name $pattern -type d| sort`; do
folderName=`basename "$filepath"`
relPDF=$(cd $ROOT && find $folderName -name 'sakuli_documentation*.pdf')
echo "generate adoc table entry for '$folderName'"
echo "-------------------------------------------"
echo "
|$folderName
|link:$folderName/index.html[HTML]
|link:$relPDF[PDF^]
" >> $OUTPUT
done
}
function createrFooter (){
echo "|===" >> $OUTPUT
echo "Version _latest_ is pointing to the latest *released stable* version (no SNAPSHOT versions) 😉" >> $OUTPUT
}
#### generate css for footer
function copyDesignFile (){
SCRIPT=`realpath -s $0`
SCRIPTPATH=`dirname $SCRIPT`
cp -v $SCRIPTPATH/docinfo*.html $OUTPUT_DIR/
}
createLatestDocu
createHeader
addFolderEntryToAdoc 'latest'
addFolderEntryToAdoc 'v*'
createrFooter
cat $OUTPUT
| true
|
bd4c7632c344c58b329f06dbb6fa50d3b423ce7b
|
Shell
|
bladedancer/axway-demo
|
/v7-gov-demo/demo-run.sh
|
UTF-8
| 1,466
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
ROOT=$(dirname ${BASH_SOURCE})
. $ROOT/../util.sh
backtotop
desc "Amplify Central Dataplane Governance Demo!"
desc "This script will launch the agents and install the project."
. env.sh
desc "Creating the environment"
show "project/environment.yaml"
run "amplify central --baseUrl=$AMPLIFY_URL apply -f project/environment.yaml"
desc "Show the environment in $AMPLIFY_URL/topology/environments/$ENVIRONMENT"
desc "Start the agents"
bgrun "xterm -e discovery/discovery_agent --pathConfig `pwd`/discovery"
bgrun "xterm -e governance/governance_agent --v7 --pathConfig `pwd`/governance/governance_agent.yml"
desc "Configure the Backend API"
show "project/backend-api.yaml"
run "amplify central --baseUrl=$AMPLIFY_URL apply -f project/backend-api.yaml"
desc "Show the environment in $AMPLIFY_URL/topology/environments/$ENVIRONMENT"
desc "Configure the Virtual API"
run "amplify central --baseUrl=$AMPLIFY_URL apply -f project/virtual-api/container.yaml"
run "amplify central --baseUrl=$AMPLIFY_URL apply -f project/virtual-api/authrule.yaml"
run "amplify central --baseUrl=$AMPLIFY_URL apply -f project/virtual-api/credentials.yaml"
run "amplify central --baseUrl=$AMPLIFY_URL apply -f project/virtual-api/routing.yaml"
desc "Deploy the Virtual API to API Manager"
show "project/deployment.yaml"
run "amplify central --baseUrl=$AMPLIFY_URL apply -f project/deployment.yaml"
desc "Show the deployed API in https://$APIMANAGER_HOST:$APIMANAGER_PORT"
| true
|
83e9a281fb033836b761d62ffdf7c8b87d24643d
|
Shell
|
rodrigohpalmeirim/dotfiles
|
/.local/bin/statusbar/cpu-utilization
|
UTF-8
| 306
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
case $BLOCK_BUTTON in
1) $TERMINAL -e htop ;;
3) notify-send "CPU module" "\- Shows CPU utilization.
- Click to show process viewer." ;;
esac
echo `grep "cpu " /proc/stat` `sleep 0.1 && grep 'cpu ' /proc/stat` | awk -v RS="" '{printf " %.1f%\n", ($13-$2+$15-$4)*100/($13-$2+$15-$4+$16-$5)}'
| true
|
d2f232648d7559fb04e4bae6059b65b90b2a2917
|
Shell
|
zot/Leisure
|
/leisure/build/runNw
|
UTF-8
| 328
| 2.5625
| 3
|
[
"Zlib"
] |
permissive
|
#!/bin/sh
dir=$(dirname $(realpath $0))
getDir() {
cygpath -d $dir/$1
}
export NODE_PATH="$(getDir lib);$(getDir src);$(getDir node_modules);$(getDir ../../../client);$(getDir ../../../lib)"
echo -E $NODE_PATH
#exec /cygdrive/c/Apps/nw-0-9-2/nw --stack-size=3000 $dir/nwRepl
cd $dir
exec /cygdrive/c/Apps/nw-0-9-2/nw nwRepl
| true
|
d083d563f6080759fb6821d30337155c59fa03b3
|
Shell
|
fsouza/dotfiles-old
|
/nvim/langservers/setup.sh
|
UTF-8
| 3,126
| 3.625
| 4
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
set -eu
function _clone_or_update() {
repo=$1
path=$2
if [ -d "${path}" ]; then
git -C "${path}" pull
else
git clone "${repo}" "${path}"
fi
}
function _create_opam_switch_if_needed() {
if [[ $(opam switch show) != "${PWD}" ]]; then
opam switch create . ocaml-base-compiler.4.11.1 --with-test --yes
fi
}
function install_ocaml_lsp() {
if ! command -v opam &>/dev/null; then
echo skipping ocaml-lsp
return
fi
path="${cache_dir}/ocaml-lsp"
_clone_or_update https://github.com/ocaml/ocaml-lsp.git "${path}" &&
pushd "${path}" &&
_create_opam_switch_if_needed &&
opam exec -- dune build --root . &&
popd
}
function install_rust_analyzer() {
local suffix
if ! command -v cargo &>/dev/null; then
echo skipping rust-analyzer
return
fi
if [[ $OSTYPE == darwin* ]]; then
suffix=mac
elif [[ $OSTYPE == linux* ]]; then
suffix=linux
fi
mkdir -p "${cache_dir}/bin"
curl -sLo "${cache_dir}/bin/rust-analyzer" "https://github.com/rust-analyzer/rust-analyzer/releases/download/nightly/rust-analyzer-${suffix}"
chmod +x "${cache_dir}/bin/rust-analyzer"
}
function install_servers_from_npm() {
npm ci
}
function _go_install() {
if ! command -v go &>/dev/null; then
echo skipping "${@}"
return
fi
(
cd /tmp && env GO111MODULE=on GOBIN="${cache_dir}/bin" go get "${@}"
)
}
function install_gopls() {
if ! command -v go &>/dev/null; then
echo skipping gopls
return
fi
dir="${cache_dir}/tools"
_clone_or_update https://github.com/golang/tools.git "${dir}" &&
pushd "${dir}/gopls" &&
env GOBIN="${cache_dir}/bin" go install
}
function install_shfmt() {
_go_install mvdan.cc/sh/v3/cmd/shfmt@master
}
function install_efm() {
_go_install github.com/mattn/efm-langserver@master
}
function install_lua_lsp() {
if ! command -v ninja &>/dev/null; then
echo skipping lua-lsp
return
fi
if [[ $OSTYPE == darwin* ]]; then
ninja_file=ninja/macos.ninja
elif [[ $OSTYPE == linux* ]]; then
ninja_file=ninja/linux.ninja
fi
path=${cache_dir}/lua-language-server
_clone_or_update https://github.com/sumneko/lua-language-server "${path}" &&
pushd "${path}" &&
cd 3rd/luamake &&
ninja -f "${ninja_file}" &&
cd ../.. &&
./3rd/luamake/luamake rebuild &&
popd
}
function install_elixir_langserver {
if ! command -v mix &>/dev/null; then
echo skipping elixir-lsp
return
fi
path=${cache_dir}/elixir-ls
_clone_or_update https://github.com/elixir-lsp/elixir-ls.git "${path}" &&
pushd "${path}" &&
yes y | mix deps.get --force &&
mix local.rebar --force &&
mix compile --force &&
mix elixir_ls.release -o release
}
cache_dir=${1}
exit_status=0
function process_child() {
if [[ ${1} -gt 0 ]]; then
exit_status=${1}
fi
}
trap 'process_child $?' CHLD
if [ -z "${cache_dir}" ]; then
echo "the cache dir is required. Please provide it as a positional parameter" >&2
exit 2
fi
pushd "$(dirname "${0}")"
mkdir -p "${cache_dir}"
install_servers_from_npm &
install_ocaml_lsp &
install_rust_analyzer &
install_gopls &
install_lua_lsp &
install_shfmt &
install_efm &
install_elixir_langserver &
wait
popd
exit "${exit_status}"
| true
|
8a3e7c47e3c6b93d9f016181497b36d069773f63
|
Shell
|
azharudd/apple-llvm-infrastructure-tools
|
/libexec/apple-llvm/helpers/mt_list_commits.sh
|
UTF-8
| 4,358
| 3.828125
| 4
|
[
"NCSA",
"LLVM-exception",
"Apache-2.0"
] |
permissive
|
# helpers/mt_list_commits.sh
mt_list_commits() {
local d="$1"
shift
run printf "start %s\n" "$d" &&
run git log --date-order --date=raw \
--format="%H %ct %T %P%x00%an%n%cn%n%ad%n%cd%n%ae%n%ce%n%B%x00" \
"$@" &&
run printf "all\n" &&
run git log --date-order --date=raw --reverse --boundary \
--format="%m%H %T %P%x00%an%n%cn%n%ad%n%cd%n%ae%n%ce%n%B%x00" \
"$@" &&
run printf "done\n"
}
mt_list_commits_empty() {
local d="$1"
shift
run printf "start %s\n" "$d" &&
run printf "all\n" &&
run printf "done\n"
}
mt_list_first_repeat_sha1() {
# From parent: local -a goal_sha1s
# From parent: local repeat_sha1
local repeat="$1"
local repeat_stripped="${repeat%\{no-pass\}}"
local until
[ "$repeat" = "$repeat_stripped" ] || [ ${#goal_sha1s[@]} -eq 0 ] ||
until="$(run git log --format=--until=%ct -1 "${goal_sha1s[@]}")" ||
error "failed to extract committer date for '$rd'"
repeat_sha1="$(run \
git rev-list -1 --first-parent $repeat_stripped $until)" ||
error "no goal for repeat '$repeat'"
}
mt_list_modified_top_level_paths() {
local src="$1"
local dst="$2"
local included="$3"
local ignored="$4"
# Find the subset of those paths that were modified. Using 'diff' here (or
# an optimized equivalent) instead of 'log --format=' is an
# under-approximation, but it will only be wrong if:
#
# - all changes to a particular file/path in the monorepo root made in
# this range have been reverted within the same range; or
# - all changes to a particular sub-project made in this range have been
# reverted.
#
# These situations are unlikely and unimportant enough in the context of
# repeat that we probably don't care about missing those.
#
# The algorithm here is:
#
# 1. List the trees of dst and src.
# 2. Sort them.
# 3. Delete any rows that are repeated. This will leave behind entries
# in either 'dst' or 'src' that aren't matched in the other.
# 4. Simplify to just the name.
# 5. Unique the names (otherwise changed paths show up twice).
# 6. Skip top-level paths that should be ignored.
# Set up the logic for ignoring/including top-level paths.
local begin= decl=
begin="BEGIN { allow_undeclared = 0;"
for decl in $included; do
if [ ! "$decl" = "-" ]; then
# Add this to the include list.
begin="$begin include[\"$decl\"] = 1;"
continue
fi
# Pull in undeclared paths from the monorepo root.
begin="$begin allow_undeclared = 1;"
for decl in $ignored; do
begin="$begin ignore[\"$decl\"] = 1;"
done
done
begin="$begin }"
# Do the work.
{
git ls-tree "$dst"
[ "${src:-$ZERO_SHA1}" = $ZERO_SHA1 ] || git ls-tree "$src"
# Format: <mode> SP <type> SP <sha1> TAB <name>
} | sort | uniq -u | awk '{ print $4 }' | uniq |
awk "$begin"'
include[$0] { print; next }
!allow_undeclared { next }
ignore[$0] { next }
{ print }'
}
mt_find_last_sha1_for_changes() {
local src="$1"
local dst="$2"
local ignored="$3"
local included="$4"
local begin= decl= extra_repeated=
# Ignore everything in the set ignored - included.
begin="BEGIN {"
for decl in $ignored; do
begin="$begin ignore[\"$decl\"] = 1;"
done
for decl in $included; do
begin="$begin ignore[\"$decl\"] = 0;"
done
begin="$begin }"
# Find the subset of those paths that were modified. Using 'diff' here
# instead of 'log --format=' is an under-approximation, but it will only be
# wrong if:
#
# - all changes to a particular file/path in the monorepo root made in this
# range have been reverted (effectively) within the same range; or
# - all changes to a particular subproject made in this range have been
# reverted (effectively).
#
# This combination is unlikely enough that we probably don't care about
# missing those.
run git diff --name-only "$src".."$dst" | sed -e 's,/.*,,' | sort -u |
run awk "$begin"' ignore[$0] { next } { print }'
}
| true
|
2b993ff5d79ffce05555c72d6c01e1bf099b28f4
|
Shell
|
sysnux/astportal
|
/astportal2/utils/mixmonitor2mp3.sh
|
UTF-8
| 491
| 2.703125
| 3
|
[] |
no_license
|
#! /bin/bash
#
# Script de conversion de fichier WAV en MP3, et effacement du fichier WAV
#
# Paramètre: nom complet du fichier WAV à convertir
#
# Utile pour Asterisk / MixMonitor, par exemple:
# exten => _XXXX,n,MixMonitor(${date:0:4}/${date:4:2}/${date:6:2}/out-${CHANNEL:4:6}-${EXTEN}-${CDR(uniqueid)}.wav,,/usr/lib/asterisk/mixmonitor2mp3.sh ^{MIXMONITOR_FILENAME})
#
# Author: Jean-Denis Girard <jd.girard@sysnux.pf>
/bin/nice -n 20 /usr/bin/lame $1 $(dirname $1)/$(basename $1 .wav).mp3
#/bin/rm -f $1
| true
|
e9261591c6ec6e9fa12b8d91233d6fa9d0cb6b70
|
Shell
|
danielrenebayer/HVAC-RL-Framework-Evaluation
|
/list-all-checkpoint-runs.sh
|
UTF-8
| 161
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
for fl in $(ls -1 checkpoints); do
for dt in $(ls checkpoints/$fl); do
if [ ${dt:0:1} == "2" ]; then
echo $dt $fl/$dt
fi
done
done | sort
| true
|
093458e9ee14b679155fa86f026187d404754768
|
Shell
|
huximp/ShellPent
|
/dnszone.sh
|
UTF-8
| 93
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
for server in $(host -t ns $1 | cut -d " " -f4);
do
host -l -a $1 $server
done
| true
|
4273b28cdb070d01bf243399d0d3ac0b6513c712
|
Shell
|
JLoos92/master_thesis
|
/Elmer_setup/copybump.sh
|
UTF-8
| 2,958
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/zsh -l
###----------------------------------------------------------------------------#
### Copy, read and write for new gauss-bump topography
###----------------------------------------------------------------------------#
#scp -r /Users/Schmulius/Desktop/UniMaster/Thesis/Clemens_Grid_Bump/BED_Bump400.xyz Cluster:/beegfs/work/zxmjf89/DEM/
#find ~ -name Mismip3DInflux.sif and type name of bump.xyz
#DIR = "$/Users/Schmulius/Desktop/UniMaster/Thesis/Clemens_Grid_Bump/"
old_IFS="$IFS"
IFS=\;
echo "----------------------------------------------------------------------------------
Please type in the amplitude (from 140 to 500), second the distribution in x direction, third the distribution in y direction and fourth the distance (m)\n
towards icesheet from the groundingline (0 is peak at GL). Seperation by ; ... (e.g. 200;1000;2000;0)
----------------------------------------------------------------------------------"
read a x y dl
IFS=$old_IFS
echo "Amplitude = $a for the given BED topography. X and Y direction $x + $y for the distribution. Distance to GL = $dl"
# Find and replace line in .sif file for given amplitude $x (changes BED)
find . -type f -name "Mismip3DInflux.sif" | xargs sed -i '' '181s/.*/Variable 2 data file = File ".\/DEM\/BED_bump'$a''$x''$y'_'$dl'.xyz"/g' Mismip3DInflux.sif
find . -type f -name "Mismip3DInflux.sif" | xargs sed -i '' '198s/.*/Variable 2 data file = File ".\/DEM\/ZB_bump'$a''$x''$y'_'$dl'.xyz"/g' Mismip3DInflux.sif | scp -r Mismip3DInflux.sif Cluster:/beegfs/work/zxmjf89/
#find . -type d -name "Cluster:/beegfs/work/zxmjf89/Mismip3DInflux.sif" | xargs sed -i '' '181s/.*/Variable 1 data file = File ".\/DEM\/BED_bump'$x'.xyz"/g' Mismip3DInflux.sif
#ssh Cluster "find /beegfs/work/zxmjf89 -name Mismip3DInflux.sif" | xargs sed -i '' '181s/.*/Variable 2 data file = File ".\/DEM\/ZB_bump'$x'.xyz"/g' SubmitScriptESD1.sh
find . -type f -name "SubmitScriptESD1.sh" | xargs sed -i '' '7s/.*/#PBS -N Bump'$a''$x''$y'_msh/g' SubmitScriptESD1.sh | scp -r SubmitScriptESD1.sh Cluster:/beegfs/work/zxmjf89/
# Find and replace line in .mismip_geoscript.py and creates new .py and .xyz BED file
find . -type f -name "mismip_geoscript.py" | xargs sed -i '' '191s/.*/maxAmplitude = '$a'/g' mismip_geoscript.py # > mismip_geoscript$x.py
find . -type f -name "mismip_geoscript.py" | xargs sed -i '' '192s/.*/sigmax = '$x'/g' mismip_geoscript.py
find . -type f -name "mismip_geoscript.py" | xargs sed -i '' '193s/.*/sigmay = '$y'/g' mismip_geoscript.py # > mismip_geoscript$x.py
find . -type f -name "mismip_geoscript.py" | xargs sed -i '' '205s/.*/dl = '$dl'/g' mismip_geoscript.py # > mismip_geoscript$x.py
python mismip_geoscript.py
scp -r BED_bump$a$x$y_$dl.xyz Cluster:/beegfs/work/zxmjf89/DEM/
scp -r ZB_bump$a$x$y_$dl.xyz Cluster:/beegfs/work/zxmjf89/DEM/
#scp -r Mismip3DInflux.sif Cluster:/beegfs/work/zxmjf89/
#scp -r Cluster:/beegfs/work/zxmjf89/Mesh /Users/Schmulius/Desktop
| true
|
20006ff77dc19a7e24a53cbeaa805280c5ae794e
|
Shell
|
mashimaroa15/raspberrypi
|
/install/nodejs.sh
|
UTF-8
| 557
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
echo -e "\n--- Installing NodeJS & NPM ---\n"
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.32.0/install.sh | bash > /dev/null 2>&1
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
source ~/.bashrc > /dev/null 2>&1
nvm install node
nvm alias default node
nvm ls
# OLD stuff. Let's install nodeJS with NVM
#curl -sL https://deb.nodesource.com/setup_5.x | sudo bash - > /dev/null 2>&1
#sudo apt-get install nodejs -y > /dev/null 2>&1
#echo "Node Version: "
#node -v
#echo "NPM Version "
#npm -v
| true
|
2a5571b2044f323eb896921a24a4e6fd63dfa246
|
Shell
|
hchiao/aws-app-challenge
|
/scripts/clean-up.sh
|
UTF-8
| 517
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
set -eo pipefail
rm -f output.yml output.json function/*.pyc
rm -rf package function/__pycache__
ARTIFACT_BUCKET=$(cat bucket-name.txt)
aws s3 rb --force s3://$ARTIFACT_BUCKET
rm bucket-name.txt
STACK=aws-s3-lifecycle
FUNCTION=$(aws cloudformation describe-stack-resource --stack-name $STACK --logical-resource-id function --query 'StackResourceDetail.PhysicalResourceId' --output text)
aws cloudformation delete-stack --stack-name $STACK
aws logs delete-log-group --log-group-name /aws/lambda/$FUNCTION;
| true
|
1476bf1f7a24db69dc0f51ee990c14ddc01f3958
|
Shell
|
avimehenwal/fan-gallery
|
/scripts/compress-images.sh
|
UTF-8
| 828
| 3.984375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# AUTHOR : avimehenwal
# DATE : 20 - Sep - 2020
# PURPOSE : image compression optimization
# FILENAME : compress-images.sh
#
# Compress images from lossless png format to lossy webp compression
if [ $# -eq 0 ]; then
echo "ERROR:: Please provide an absolute/relative path to images directory"
exit 1
fi
echo `pwd`
echo `dirname $0`
echo $0
APT_DEPENDENCIES=(cwebp)
QUALITY=100
IP_DIR=$1
OUT_DIR="webp"
REL_OUT_DIR="$IP_DIR/$OUT_DIR"
if [ ! -d $REL_OUT_DIR ]; then
mkdir --verbose "$REL_OUT_DIR"
fi
for file in $(find $IP_DIR -type f -name '*.png')
do
file_name=$(basename ${file%.png})
OUT_FILE="$REL_OUT_DIR/$file_name.webp"
# echo -e "$file -> $file_name : $OUT_FILE"
cwebp -preset drawing \
-q $QUALITY -lossless -progress -short \
"$file" -o "$OUT_FILE"
done
# END
| true
|
382ace9e8b82d743015d2077958b9395b02478e8
|
Shell
|
adambloniarz/CAGe
|
/scripts/dump_reference.sh
|
UTF-8
| 394
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Dump reference fasta file in preparation for running CAGe.
# Strips all whitespace and creates file such that the nth
# byte of the file is the nth base of the reference genome
# Usage:
# ./dump_reference.sh <INPUT_FASTA> <CHROMOSOME> <OUTPUT_FILE>
# Example:
# ./dump_reference.sh na12878.fa chr20 na12878_chr20.txt
samtools faidx $1 $2 | tail -n +2 | perl -p -e 's/\n//g' > $3
| true
|
640915a431fda9a3d94ed7b8afbee22736a635de
|
Shell
|
rkeene/KitCreator
|
/tclcurl/build.sh
|
UTF-8
| 770
| 3.09375
| 3
|
[
"BSD-3-Clause",
"Zlib",
"MIT",
"TCL"
] |
permissive
|
#! /usr/bin/env bash
# BuildCompatible: KitCreator
version='7.22.0'
url="https://github.com/flightaware/tclcurl-fa/archive/1fd1b4178a083f4821d0c45723605824fbcdb017.tar.gz"
sha256='5abad0f369205b8369819f3993a700bb452921bcab7f42056ef29a1adc3eb093'
tclpkg='TclCurl'
function postinstall() {
if [ "${pkg_configure_shared_build}" = '0' ]; then
(
eval "$(grep '^PKG_LIBS=' config.log)" || exit 1
find "${installdir}" -type f -name '*.a' | while IFS='' read -r filename; do
echo "${PKG_LIBS}" > "${filename}.linkadd"
done
) || return 1
cat << \_EOF_ | sed "s|@@VERSION@@|${version}|g"> "${installdir}/lib/TclCurl${version}/pkgIndex.tcl"
package ifneeded TclCurl @@VERSION@@ [list load {} TclCurl]\n[list source [file join $dir tclcurl.tcl]]
_EOF_
fi
}
| true
|
9e6b824f49026e2bfd7d1dc2cc43502192f08406
|
Shell
|
vruano/warp
|
/dockers/skylab/emptydrops/build.sh
|
UTF-8
| 224
| 3.203125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
tag=$1
if [ -z $tag ]; then
echo -e "\nYou must provide a tag"
echo -e "\nUsage: bash build_docker.sh TAG\n"
exit 1
fi
docker build -t quay.io/humancellatlas/secondary-analysis-dropletutils:$tag .
| true
|
793dd1ce13a7734cf5bc17fbdeb7204faa32b2b2
|
Shell
|
yanjiew1/mozbuildtools
|
/start-script/compress.sh
|
BIG5
| 1,851
| 2.90625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#Copyright (c) 2008, Jay Wang
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# * Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
echo " bإYɮײM..."
msys/bin/find -name '*.exe' > compress
msys/bin/find -name '*.dll' >> compress
echo " bY..."
for file in `msys/bin/cat compress`
do
echo " Yɮ "$file
tools/upx $file > logs 2> logs
done
msys/bin/rm -dr logs
msys/bin/rm -dr compress
echo " !"
read
| true
|
cee99c436e4e59d40905092f9d2bf661051cc845
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/gcp-cups-connector/PKGBUILD
|
UTF-8
| 1,377
| 2.84375
| 3
|
[] |
no_license
|
# Maintainer: grimsock <lord.grimsock at gmail dot com>
# Contributor: Fraser P. Newton <fpnewton90 [at] gmail [dot] com>
pkgname=gcp-cups-connector
pkgver=v1.12.r2.g27a84c8
pkgrel=1
pkgdesc="The Google Cloud Print (aka GCP) CUPS Connector shares CUPS printers with users of Google Cloud Print."
arch=('i686' 'x86_64' 'armv7h' 'armv6h')
url="https://github.com/google/cups-connector"
license=('BSD')
depends=('cups' 'net-snmp')
makedepends=('go' 'net-snmp' 'avahi' 'git' 'bzr')
optdepends=('gcp-cups-connector-systemd')
source=('gcp-cups-connector::git+https://github.com/google/cups-connector.git')
md5sums=('SKIP')
_gourl=github.com/google/cups-connector
pkgver() {
cd "$pkgname"
git describe --long --tags | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
GOPATH="$srcdir" go get -fix -v -x ${_gourl}/gcp-cups-connector
GOPATH="$srcdir" go get -fix -v -x ${_gourl}/gcp-connector-util
}
package() {
mkdir -p "$pkgdir/usr/bin"
install -p -m755 "$srcdir/bin/"* "$pkgdir/usr/bin"
for f in LICENSE COPYING LICENSE.* COPYING.*; do
if [ -e "$srcdir/src/$_gourl/$f" ]; then
install -Dm644 "$srcdir/src/$_gourl/$f" "$pkgdir/usr/share/licenses/$pkgname/$f"
fi
done
echo ""
echo "Please use gcp-connector-util init to generate a config json file in /etc/gcp-cups-connector"
echo "or don't forget to update config file using gcp-connector-util update-config file"
}
| true
|
901e80ec341a5647dad477cb13e2d1dbb7b86b3a
|
Shell
|
a2i2/hassio-to-pubsub
|
/hassio-to-pubsub/rootfs/run.sh
|
UTF-8
| 808
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/with-contenv bashio
# Fail hard on errors.
set -euo pipefail
# Load configurable options from options.json.
echo "Loading configuration..."
home_id="$(bashio::config 'home_id')"
gcp_project_id="$(bashio::config 'gcp_project_id')"
export HOME_ID="${home_id}"
export GCP_PROJECT_ID="${gcp_project_id}"
# Load GCP service account.
service_account_json="$(bashio::config 'service_account_json')"
echo "$service_account_json" > /service_account.json
export GOOGLE_APPLICATION_CREDENTIALS="/service_account.json"
# Set environment variables for websocket API access.
# https://developers.home-assistant.io/docs/add-ons/communication#home-assistant-core
export WEBSOCKET_URL="http://supervisor/core/websocket"
export ACCESS_TOKEN="${SUPERVISOR_TOKEN}"
python3 /usr/local/src/hassio-to-pubsub/main.py
| true
|
244ac3b964eea733df5cc05a0fd4cbdcd786a4d5
|
Shell
|
victorhdamian/bootstrap
|
/mac/install_consul.sh
|
UTF-8
| 459
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
####################################
#
# Installs consul in macOS 10.13.6 (17G65)
# Darwin 17.7.0 2018
#
# ref.:https://www.consul.io/downloads.html
#
####################################
# latest 1.2.3
# ./install_consul.sh 1.2.3
wget https://releases.hashicorp.com/consul/$1/consul_$1_darwin_amd64.zip
unzip consul_$1_darwin_amd64.zip
sudo mv consul /usr/local/bin/
which consul
consul -v
rm -rf consul_$1_darwin_amd64.zip
| true
|
93b87f8a6a39ca61d7965bdf4156d58777bd9199
|
Shell
|
paraiko/BeeDataProcessing
|
/processing_pipeline_v1/gmic_runavg_90min_single10m_files_active.sh
|
UTF-8
| 1,911
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
recNr=$1
#FILES=/NAS/BeeNas/01_RawData/00_rawh264files/$recNr/*.h264
inputPath="/NAS/BeeNas/03_ProcessedData/01_ghosting/00_ghost_avg-10m_jpg"
outputPath="/NAS/BeeNas/03_ProcessedData/01_ghosting/01_ghost_ravg-90m_jpg"
tempPath="/NAS/BeeNas/03_ProcessedData/01_ghosting/temp/ravg-90m"
fileCtr=0
#create output directories if they do not exist.
mkdir -p $outputPath/$recNr
mkdir -p $tempPath/$recNr
#find all jpgfiles in the top leveldir of the inputPath
FILES=`find $inputPath/$recNr/*.jpg -maxdepth 0`
# count the amount of input jpg files in the directory.
jpgCt=`find $inputPath/$recNr/*.jpg -maxdepth 0 | wc -l`
# count the amount of already processed jpg files in the directory.
ravgCt=`find $outputPath/$recNr/*.jpg -maxdepth 0 | wc -l`
# increase ravgCt with 8 to compensate for the first skipped 8 files in the average
#ravgCt=$((ravgCt+8))
#Set the filecounter
fileCtr=0
#jpgCt = ls *.h264 | wc -l
echo "jpg count: $jpgCt; runavgct: $ravgCt"
for f in $FILES
do
#echo "Processing $f file..."
# take action on each file. $f store current file name
#get the filenema without the path and store it in a variable
fn="$(basename -- $f)"
if [ $fileCtr -ge $ravgCt ]
then
if [ $fileCtr -lt 8 ]
then
nr=$(printf "%04d" $fileCtr)
cp $f $tempPath/$recNr/$nr.jpg
gmic -average_files $tempPath/$recNr/????.jpg,0,-1,1 -n 0,255 -o $outputPath/$recNr/$fn
else
nr=$(printf "%04d" $fileCtr)
cp $f $tempPath/$recNr/$nr.jpg
firstFile=$((fileCtr-8))
firstFile=$(printf "%04d" $firstFile)
gmic -average_files $tempPath/$recNr/????.jpg,0,-1,1 -n 0,255 -o $outputPath/$recNr/$fn
rm $tempPath/$recNr/$firstFile.jpg
####gmic -average_files output.mkv,0,-1,600 -n 0,255 -o $f.jpg
fi
fi
fileCtr=$((fileCtr+1))
done
| true
|
fe6e3cb35b5d5cfa00d0f9ee9b9cedc672bcd2bb
|
Shell
|
UncleBen420/Smart_home_mse_iot
|
/KNX/utilz/camstream
|
UTF-8
| 1,894
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
################################################################################
# Prototype: webcam live stream with ffmpeg direted to a local icecast server.
# An icecast server must be setup with the following minimal mount point
# configuration:
# <mount type="normal">
# <mount-name>/iot-lab</mount-name>
# <stream-name>KNX</stream-name>
# <max-listeners>1</max-listeners>
# <burst-size>65536</burst-size>
# <public>0</public>
# </mount>
################################################################################
# globals
_acard=
_ihost=localhost
_iport=8000
_istream=iot-lab
_vdevice=/dev/video0
# ENV options
ACARD=${ACARD:-${_acard}}
IHOST=${IHOST:-${_ihost}}
PORT=${PORT:-${_iport}}
STREAM=${STREAM:-${_istream}}
VDEVICE=${VDEVICE:-${_vdevice}}
ffaudio_opts=
[ "${ACARD}" ] && ffaudio_opts="-f alsa -ac 1 -i hw:${ACARD}"
usage=$(cat <<EOF
Usage:
[ENV OPTIONS] $0 USER PASSWORD
where:
USER User name for the icecast connection
PASSWORD Password for the icecast connection
ENV options
ACARD ALSA audio (mic) card's ID for audio stream.
If indefined, audio streaming is disabled. Default: ${_acard}
IHOST Icecast hostname. Default: ${_ihost}
PORT Icecast port. Default: ${_iport}
STREAM Icecast stream name. Default: ${_istream}
VDEVICE v4l2 video device. Default: ${_vdevice}
Examples:
$ VDEVICE=/dev/video1 camstream
EOF
)
# CLI args
user=${1:?"arg #1 missing: user name for the icecast connection. ${usage}"}
password=${2:?"arg #2 missing: password for the icecast connection. ${usage}"}
ffmpeg ${ffaudio_opts} -f v4l2 -video_size 640x480 -framerate 25 -i ${VDEVICE} \
-f webm -cluster_size_limit 2M -cluster_time_limit 5100 \
-content_type video/webm -c:v libvpx -b:v 500K -crf 30 -g 100 \
-deadline good -threads 4 \
icecast://${user}:${password}@${IHOST}:${PORT}/${STREAM}
| true
|
d2ce7dd3f1af090aec9c22502e937bbc50bb85df
|
Shell
|
ashu-mehra/acmeair
|
/app.sh
|
UTF-8
| 1,544
| 3.890625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
check_server_started() {
local retry_counter=0
while true;
do
echo "INFO: Checking if server started (retry counter=${retry_counter})"
grep "Web application available" /logs/messages.log &> /dev/null
local web_app_started=$?
grep "The defaultServer server is ready to run a smarter planet" /logs/messages.log &> /dev/null
local server_started=$?
if [ ${web_app_started} -eq 0 ] && [ ${server_started} -eq 0 ]; then
echo "INFO: Server started successfully!"
break
else
if [ $retry_counter -eq 10 ]; then
echo "ERROR: Liberty server did not start properly"
exit 1
fi
retry_counter=$(($retry_counter+1))
sleep 10s
fi
done
}
get_server_pid() {
echo `ps -ef | grep java | grep -v grep | awk '{ print $2 }'`
}
start_app() {
# start the application and set app_pid to the pid of the application process
/opt/ibm/helpers/runtime/docker-server.sh /opt/ibm/wlp/bin/server run defaultServer &
check_server_started
if [ $? -eq 0 ]; then
app_pid=$(get_server_pid)
echo "INFO: Writing app pid ${app_pid} to ${CR_LOG_DIR}/${APP_PID_FILE}"
echo "${app_pid}" > ${CR_LOG_DIR}/${APP_PID_FILE}
fi
}
stop_app() {
echo "Stopping application"
/opt/ibm/wlp/bin/server stop defaultServer
}
| true
|
828dde5f5c8b51586b93a069771bb78e01ae3829
|
Shell
|
emmericp/ixy-perf-measurements
|
/small-pages/ring-test-for-page-size.sh
|
UTF-8
| 853
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# USE THE CORRECT PCIE ADDRESSES OR THIS WILL CRASH YOUR SYSTEM
# run this script with both the huge page version of ixy and the normal one. compare results.
kill $(pidof ixy-fwd)
# fixme: cleanup
cd /home/paul/ixy-git
for i in 64 128 256 512 1024 2048 4096
do
echo "Running test for ring size $i"
sed -i "s/NUM_RX_QUEUE_ENTRIES =.*/NUM_RX_QUEUE_ENTRIES = $i;/g" src/driver/ixgbe.c
sed -i "s/NUM_TX_QUEUE_ENTRIES =.*/NUM_TX_QUEUE_ENTRIES = $i;/g" src/driver/ixgbe.c
make
# adjust PCIe address here
taskset -c 1 ./ixy-fwd 0000:03:00.1 0000:05:00.1 > ./ixy-output-ring-$i-$i.txt &
sleep 15 # x540 takes some time to establish link after reset
perf stat -e dTLB-loads,dTLB-load-misses,dTLB-stores,dTLB-store-misses --pid $(pidof ixy-fwd) -o ./ixy-perf-stat-ring-$i-$i.txt sleep 40 &
sleep 60
kill $(pidof ixy-fwd)
sleep 3
done
| true
|
a5615bf287d38f8b40db8c2fdfa4882c6e8cda7e
|
Shell
|
mkoura/usermounts
|
/usermounts.sh
|
UTF-8
| 6,038
| 4.25
| 4
|
[] |
no_license
|
#!/bin/sh
# Requirements: zenity (optional), notify-send (optional)
#
confirm=1
force_no_x=0
me="${0##*/}"
while [ "${1+defined}" ]; do
case "$1" in
-n | --no-confirm ) confirm=0 ;;
-x | --no-x ) force_no_x=1 ;;
-c | --config ) shift ; cfg="$1" ;;
-h | --help )
echo "Usage: $me [-n|--no-confirm] [-x|--no-x] [-c|--config <conf_file>]" >&2; exit 0 ;;
esac
shift
done
## global variables
main_password=""
main_aborted="false"
mounted_count=0
mountpoints_count=0
foreground_mounts=""
background_mounts=""
mntpoints=""
retval=1
# check if we can run zenity and notify-send
havex=0
havenotify=0
if [ "$force_no_x" -ne 1 ] && xset -q ; then
hash zenity && havex=1
hash notify-send && havenotify=1
fi >/dev/null 2>&1
##
print_err() {
[ -z "$1" ] && return 1
[ ! -t 0 ] && [ "$havex" -eq 1 ] && zenity --warning --text="$1"
echo "${me}: $1" >&2
}
# source configuration
cfg="${cfg:-$HOME/.config/usermounts/usermounts.conf}"
if [ -e "$cfg" ]; then
. "$cfg"
else
print_err "Config file not found: $cfg"
exit 2
fi
# get all mountpoints
get_mntpoints() {
# print all 'mntpoint' variables from all variables
# existing in current environment
set | while read line; do
case "$line" in
mntpoint*=*) echo "$line" ;;
esac
done
}
# check if mountpoint can be mounted in background
in_background() {
rv=1
eval curbg=\$mntbg"$1"
case "$curbg" in
true|1|yes|y)
rv=0
;;
esac
return $rv
}
# get indexes of configured mountpoints
get_mntnums() {
for rec in $mntpoints; do
cnum="$(echo "$rec" | { IFS='=' read mname _; printf "${mname#mntpoint} "; })"
in_background "$cnum" \
&& background_mounts="$background_mounts $cnum" \
|| foreground_mounts="$foreground_mounts $cnum"
done
}
# prompt for password in graphics if not running in terminal, otherwise in text
ask_password() {
unset tmp_password
if [ ! -t 0 ] && [ "$havex" -eq 1 ]; then
tmp_password="$(zenity --password --title="$1")"
return "$?"
else
stty -echo
printf "${1}: "
read tmp_password
stty echo
echo
[ -n "$tmp_password" ] && return 0 || return 1
fi
}
# check if something is already mounted on the mountpoint
mounted() {
case $(mount) in
*$1*) return 0 ;;
* ) return 1 ;;
esac
}
# prompt for and save the "main" password
main_password() {
if [ -z "$main_password" ]; then
ask_password "Enter main password" || return 1
main_password="$tmp_password"
fi
return 0
}
# prompt for unique password
new_password() {
unset new_password
ask_password "$1" && new_password="$tmp_password" && return 0 || return 1
}
# record mount status
set_status() {
[ "$1" -eq 0 ] && mounted_count="$((mounted_count + 1))"
return "$1"
}
# do the mounting
mountit() {
# repeat for every mountpoint
for num in "$@"; do
# settings for current mountpoint
eval curmntpoint=\$mntpoint"$num"
eval curmntdev=\$mntdev"$num" # optional
eval curmntcmd=\$mntcmd"$num"
eval curauth=\$auth"$num" # optional
# check if the mountpoint exists
[ ! -e "$curmntpoint" ] && continue
# check if the device (directory) to mount is present
[ -n "$curmntdev" ] && [ ! -e "$curmntdev" ] && continue
# check if the command is set
[ -z "$curmntcmd" ] && continue
mountpoints_count="$((mountpoints_count + 1))"
# retry in case of mount failure (incorrect password?)
for _ in 1 2 3; do
if mounted "$curmntpoint"; then
# already mounted
set_status 0
break
fi
case "$curauth" in
# no authentication necessary
"none"|"no"|"n")
$curmntcmd
set_status "$?" && break
;;
# unique password for this mountpoint
"unique"|"u")
if new_password "Enter password for $curmntpoint"; then
echo "$new_password" | $curmntcmd
set_status "$?" && break
else
# break if password was not entered
break
fi
;;
# by default use "main" password
*)
# don't ask for main password again
[ "$main_aborted" = "true" ] && break
if main_password; then
echo "$main_password" | $curmntcmd
set_status "$?" && break || main_password=""
else
# break if password was not entered
# and don't ask for main password again
main_aborted="true"
break
fi
;;
esac
done
done
}
# check if everything was mounted and exit
final_checks() {
if [ "$mounted_count" -ne "$mountpoints_count" -a "$main_aborted" = "false" ]; then
print_err "Some mounts failed.
Make sure you have the same password for all encrypted mounts where 'main' password is used.
Make sure you have permissions to mount the device and/or that you configured sudo(8) correctly."
retval=8
elif [ "$main_aborted" = "true" ]; then
retval=1
else
if [ "$confirm" -eq 1 ]; then
msg="Everything is mounted"
if [ -t 0 ]; then
echo "$msg"
elif [ "$havenotify" -eq 1 ]; then
notify-send -i drive-harddisk "$msg" &
elif [ "$havex" -eq 1 ]; then
zenity --info --text="$msg" &
else
echo "${me}: $msg"
fi
fi
retval=0
fi
return $retval
}
## main()
mntpoints="$(get_mntpoints)"
# nothing to do if no mountpoints were specified
[ -z "$mntpoints" ] && return 0
get_mntnums
# mount everything that needs to be done in foreground
# (we need to wait until it's mounted)
mountit $foreground_mounts
# check if there is anything to be mounted in background
if [ -n "$background_mounts" ]; then
# can we do it in background? We need zenity to prompt for password.
if [ "$havex" -eq 1 ]; then
# CAUTION: from now on, all remaining tasks needs to be done in background
{ mountit $background_mounts; final_checks; } &
exit "$retval"
else
mountit $background_mounts
fi
fi
final_checks
| true
|
04961af839749f7d7cad576c4918fe7c75b29ada
|
Shell
|
cdlm/infrastructure
|
/dotfiles/zsh/zshrc
|
UTF-8
| 492
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
# -*- mode: sh -*-
Z=~/.zsh
# language options
setopt function_argzero
# module / dependency loading
# require: load & initialize a separate module
# provide: mark the module as loaded (if it does not provide an eponym function)
function require() { for name ($*) autoload -Uz $name && $name; }
function provide() { for name ($*) function $name() { return 0; }; }
fpath=($Z/lib $fpath)
# environment & shell interaction setup
for config_file ($Z/rc/*.zsh)
source $config_file
rehash
| true
|
2d9ef700bf7f4016ac464a5a20dbb9da1e462fe2
|
Shell
|
cristina-grosu/hdfs-preview
|
/entrypoint.sh
|
UTF-8
| 1,987
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
HADOOP_HOME="/opt/hadoop"
HADOOP_SBIN_DIR="/opt/hadoop/sbin"
HADOOP_CONF_DIR="/opt/hadoop/etc/hadoop"
YARN_CONF_DIR="/opt/hadoop/etc/hadoop"
. "/root/.bashrc"
if [ "$HOSTNAME_MASTER" != "" ]; then
sed "s/HOSTNAME/$HOSTNAME_MASTER/" /opt/hadoop/etc/hadoop/core-site.xml.template > /opt/hadoop/etc/hadoop/core-site.xml
#sed "s/HOSTNAME/$HOSTNAME_MASTER/" /opt/hadoop/etc/hadoop/mapred-site.xml.template > /opt/hadoop/etc/hadoop/mapred-site.xml
#sed "s/HOSTNAME/$HOSTNAME_MASTER/" /opt/hadoop/etc/hadoop/yarn-site.xml.template > /opt/hadoop/etc/hadoop/yarn-site.xml
#sed "s/HOSTNAME_MASTER/$HOSTNAME_MASTER/" /opt/hadoop/etc/hadoop/slaves
elif [ "$HOSTNAME" = "" ]; then
HOSTNAME=`hostname -f`
sed "s/HOSTNAME/$HOSTNAME/" /opt/hadoop/etc/hadoop/core-site.xml.template > /opt/hadoop/etc/hadoop/core-site.xml
#sed "s/HOSTNAME/$HOSTNAME/" /opt/hadoop/etc/hadoop/mapred-site.xml.template > /opt/hadoop/etc/hadoop/mapred-site.xml
#sed "s/HOSTNAME/$HOSTNAME/" /opt/hadoop/etc/hadoop/yarn-site.xml.template > /opt/hadoop/etc/hadoop/yarn-site.xml
fi
if [ "$MODE" = "" ]; then
MODE=$1
fi
if [ "$MODE" == "headnode" ]; then
/opt/hadoop/bin/hdfs namenode -format
hadoop namenode
#${HADOOP_SBIN_DIR}/hadoop-daemon.sh --config "$HADOOP_CONF_DIR" --hostnames "hdfsmaster.marathon.mesos" --script "/opt/hadoop/bin/hdfs" start namenode
#yarn --config $YARN_CONF_DIR resourcemanager
elif [ "$MODE" == "datanode" ]; then
#${HADOOP_SBIN_DIR}/hadoop-daemon.sh --config "$HADOOP_CONF_DIR" --script "/opt/hadoop/bin/hdfs" start datanode
hadoop datanode
#yarn --config $YARN_CONF_DIR nodemanager
else
/opt/hadoop/bin/hdfs namenode -format
hadoop namenode
#${HADOOP_SBIN_DIR}/hadoop-daemon.sh --config "$HADOOP_CONF_DIR" --hostnames "hdfsmaster.marathon.mesos" --script "/opt/hadoop/bin/hdfs" start namenode
#${HADOOP_SBIN_DIR}/hadoop-daemon.sh --config "$HADOOP_CONF_DIR" --script "/opt/hadoop/bin/hdfs" start datanode
#yarn --config $YARN_CONF_DIR resourcemanager
fi
| true
|
91cd3949bb0fec81ed69c543cda4c4a8e7f0195f
|
Shell
|
Lex-Man/DockerCourse
|
/bash/script.sh
|
UTF-8
| 205
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
while true; do
FILE=~/tmp/file7
if test -f "$FILE"; then
echo "$FILE exists."
else
echo "INIT ..."
touch /tmp/file7
fi
echo "DONE"
sleep 1d
done
| true
|
c00af1f602fa87365f9b4e1d356e63d26724865d
|
Shell
|
barakb/testing-grid
|
/bin/check_dotnet_logs.sh
|
UTF-8
| 355
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
BUILD_DIR=`pwd`/../local-builds/$1;
echo "investigating ${BUILD_DIR} directory"
RETVAL=checksucess;
for file in `find ${BUILD_DIR}`; do
if [ `grep -c no.invocation.in.progress.at.the.server $file` -gt 0 ]; then
echo `grep -Hn no.invocation.in.progress.at.the.server $file`;
RETVAL=checkfailed;
fi ;
done;
echo $RETVAL;
| true
|
65fdd4e28c0f884891cfc3d13050ead17e64e1ed
|
Shell
|
stephenwaite/openemr-kube
|
/images/openemr/first_start.sh
|
UTF-8
| 2,129
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
set -x -e -o pipefail
auto_setup() {
CONFIGURATION="server=${MYSQL_HOST} rootpass=${MYSQL_ROOT_PASS} loginhost=%"
if [ "$MYSQL_ROOT_USER" != "" ]; then
CONFIGURATION="${CONFIGURATION} root=${MYSQL_ROOT_USER}"
fi
if [ "$MYSQL_USER" != "" ]; then
CONFIGURATION="${CONFIGURATION} login=${MYSQL_USER}"
CUSTOM_USER="$MYSQL_USER"
else
CUSTOM_USER="openemr"
fi
if [ "$MYSQL_PASS" != "" ]; then
CONFIGURATION="${CONFIGURATION} pass=${MYSQL_PASS}"
CUSTOM_PASSWORD="$MYSQL_PASS"
else
CUSTOM_PASSWORD="openemr"
fi
if [ "$MYSQL_DATABASE" != "" ]; then
CONFIGURATION="${CONFIGURATION} dbname=${MYSQL_DATABASE}"
CUSTOM_DATABASE="$MYSQL_DATABASE"
else
CUSTOM_DATABASE="openemr"
fi
if [ "$OE_USER" != "" ]; then
CONFIGURATION="${CONFIGURATION} iuser=${OE_USER}"
fi
if [ "$OE_PASS" != "" ]; then
CONFIGURATION="${CONFIGURATION} iuserpass=${OE_PASS}"
fi
php auto_configure.php -f ${CONFIGURATION} || return 1
echo "OpenEMR configured."
CONFIG=$(php -r "require_once('/var/www/localhost/htdocs/openemr/sites/default/sqlconf.php'); echo \$config;")
if [ "$CONFIG" == "0" ]; then
echo "Error in auto-config. Configuration failed."
exit 2
fi
#Turn on API from docker
if [ "$ACTIVATE_API" == "yes" ]; then
mysql -u "$CUSTOM_USER" --password="$CUSTOM_PASSWORD" -h "$MYSQL_HOST" -e "UPDATE globals SET gl_value = 1 WHERE gl_name = \"rest_api\"" "$CUSTOM_DATABASE"
fi
}
[ -d /var/www/localhost/htdocs/openemr/sites/default/ ] || cp -a /var/www/localhost/htdocs/openemr/sites-seed/* /var/www/localhost/htdocs/openemr/sites
echo "Running quick setup!"
while ! auto_setup; do
echo "Couldn't set up. Any of these reasons could be what's wrong:"
echo " - You didn't spin up a MySQL container or connect your OpenEMR container to a mysql instance"
echo " - MySQL is still starting up and wasn't ready for connection yet"
echo " - The Mysql credentials were incorrect"
sleep 1;
done
echo "Setup Complete!"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.