blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
db4bf94dfce97da4e4adc70d1907c4f7d8e1c3dc | Shell | krupatil/awesome-dotfiles | /home/.scripts/entr.sh | UTF-8 | 565 | 4.15625 | 4 | [] | no_license | #!/bin/bash
#
# Breaks down the steps needed to run entr for a set of file extensions into
# something a little bit easier to grok
CMD=eval
if [ $# -lt 1 ]; then
echo "Usage $0 WHERE 'EXT [EXT...]' BUILD RUN" >&2
exit 1
fi
WHERE="${1}"
EXTS="${2}"
BUILD="${3}"
RUN="${4}"
ACTION="${BUILD} && ${RUN}"
REGEX=""
for EXT in ${EXTS}; do
[ -z "${REGEX}" ] && REGEX=".*\\.\\(${EXT}" && continue
REGEX="${REGEX}\\|${EXT}"
done
REGEX="${REGEX}\\)"
$CMD "find \"${WHERE}\" -regex \"${REGEX}\" -not -regex \".*/build/.*\" | entr -c bash -c \"${ACTION}\""
| true |
140e5424eb690b43cfda5bc5161fbc952e65a4f8 | Shell | ymzuiku/use-dockerfile | /compose.sh | UTF-8 | 740 | 2.71875 | 3 | [] | no_license | #!/bin/bash
# 清空历史
rm -rf gits || echo ""
# 拉取业务代码,忽略历史
git clone https://github.com.cnpmjs.org/ymzuiku/dockerfile-test-server-a.git gits/server-a --depth 1
git clone https://github.com.cnpmjs.org/ymzuiku/dockerfile-test-server-b.git gits/server-b --depth 1
# 预先下载公共镜像,减少镜像编译时间
docker pull node
# 启动镜像 a
cd gits/server-a
docker build -t server-a .
docker rm -f server-a || echo "" # 假定有历史镜像,先移除
docker run -p 0.0.0.0:4100:3900 --name server-a -d server-a
# 启动镜像 b
cd ../server-b
docker build -t server-b .
docker rm -f server-b || echo ""
docker run -p 0.0.0.0:4101:3900 --name server-b -d server-b
# 回归执行时路径
cd ../../ | true |
4305c001505510c7e1ac5ff8d652caf84e2f1094 | Shell | khatrimann/Canoe | /Canoe/bin/gunicorn_start | UTF-8 | 524 | 2.734375 | 3 | [] | no_license | #!/bin/bash
NAME="canoe"
DIR=/home/canoe/Canoe/src
USER=canoe
GROUP=canoe
WORKERS=3
BIND=unix:/home/canoe/run/gunicorn.sock
DJANGO_SETTINGS_MODULE=canoe.settings
DJANGO_WSGI_MODULE=canoe.wsgi
LOG_LEVEL=error
cd $DIR
source ../bin/activate
export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE
export PYTHONPATH=$DIR:$PYTHONPATH
exec ../bin/gunicorn ${DJANGO_WSGI_MODULE}:application \
--name $NAME \
--workers $WORKERS \
--user=$USER \
--group=$GROUP \
--bind=$BIND \
--log-level=$LOG_LEVEL \
--log-file=-
| true |
ef68bc1ed9e2016c66eb69b45d8a0fc4bfbe2443 | Shell | supreet-s8/VERIZONBR | /BRUPGRADE/downgradeImage.sh | UTF-8 | 1,002 | 3.296875 | 3 | [] | no_license | #!/bin/bash
source env.sh
function downgradeImage {
host=$1
for i in $host
do
echo "Downgrading image on $i"
location=`$SSH $i "/opt/tms/bin/cli -t 'en' 'conf t' 'sh images'" | grep -A 1 "Partition" | grep -v ^"--" | sed 'N;s/\n/ /' | grep bizreflex3.2.rc3 | awk '{print $2}' | sed 's/://' | head -1`
$SSH $i "/opt/tms/bin/cli -t 'en' 'conf term' 'image boot location $location' 'wr mem' "
echo "------- VERIFYING NEXT BOOT Image -----------"
IMAGE=`$SSH ${i} '/opt/tms/bin/cli -t "en" "conf t" "show images" | grep "Next boot partition"' | awk '{ print $NF }'`
echo -n "Next Boot Image on ${i} is : "
$SSH ${i} "/opt/tms/bin/cli -t 'en' 'conf t' 'show images'" | grep -A1 "Partition $IMAGE:" | tail -1 | awk '{ print $2 }'
echo " PROCEED WITH SYSTEM REBOOT "
read -p "Continue (y): "
[ "$REPLY" != "y" ] && exit 0
done
}
function reload {
node=$1
echo "Rebooting $node...."
$SSH $node "/opt/tms/bin/cli -t 'en' 'conf term' 'reload' "
}
downgradeImage $1
reload $1
| true |
5d97aebdc3e6457de943fb0c1641b1319d27bdc2 | Shell | harshi0806/basicShellScriptProgram | /sequencesSelection/randomDiceNumber.sh | UTF-8 | 84 | 2.703125 | 3 | [] | no_license | #!/bin/bash
#shuf -i 1-6 -n 6;
dice=$((((RANDOM%6))+1));
echo "The no. is:" $dice;
| true |
a3e7cbc0aff9d484deb1d6dac8437c6dcf150f63 | Shell | WhiteWaterChina/SIT_SCRIPTS | /scripts/cipanshunxu/compare.sh | UTF-8 | 564 | 2.96875 | 3 | [] | no_license | #!/bin/bash
Times=`cat /root/count`
sleep 45
echo "This is $Times times test!" >> /root/shunxu.log
rm -rf /root/serial-temp.csv
while read line
do
echo $line >> /root/shunxu.log
smartctl -a $line|grep "Serial" | awk '{print $3}' >> /root/serial-temp.csv
smartctl -a $line|grep "Serial" | awk '{print $3}' >> /root/shunxu.log
done < /root/order-disk.csv
diff /root/serial-temp.csv /root/serial-all.csv
if [ ! $? -eq 0 ]
then
echo "error" >>/root/shunxu.log
else
echo "OK" >>/root/shunxu.log
fi
Times=`echo $Times +1|bc`
echo $Times >/root/count
sleep 10
reboot
| true |
dcb5855be1cfc3459f6c0b0036e25d5728f8c1c1 | Shell | moxuetianya/bazel-compilation-database | /tests/run_tests.sh | UTF-8 | 2,040 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -euo pipefail
cd "$(dirname "${BASH_SOURCE[0]}")"
source "bazel.sh"
# The expected files are generated by:
# ../generate.py
# jq 'map(.directory = "EXECROOT")' compile_commands.json > ${expected}
if [[ "$(uname -s)" == "Darwin" ]]; then
expected="expected_macos.json"
expected_ycm="expected_ycm_macos.json"
else
expected="expected_ubuntu.json"
expected_ycm="expected_ycm_ubuntu.json"
fi
execroot="$(bazel info execution_root)"
readonly execroot
check_compdb() {
local want="$expected"
local got="$1"
local full_compdb="$2"
jq_got_cmd='sort_by(.file)'
jq_want_cmd="${jq_got_cmd}"
jq_want_cmd+=" | map(.directory = \"${execroot}\")"
if ! "${full_compdb}"; then
# Remove the source file for the target not included in our //:compdb target.
jq_want_cmd+=" | map(. | select(.file != \"stdlib.cc\"))"
fi
# Check jq is installed.
if ! command -v jq >/dev/null; then
>&2 echo "jq not installed; aborting."
exit 1
fi
diff --unified=100 <(jq "${jq_want_cmd}" "${want}") <(jq "${jq_got_cmd}" "${got}")
test -f bazel-bin/_virtual_includes/a/dir_a/a.h
}
# Let's keep the C++ toolchain simple by not using Xcode specific bazel wrappers.
# https://cs.opensource.google/bazel/bazel/+/master:tools/cpp/cc_configure.bzl;l=122;drc=644b7d41748e09eff9e47cbab2be2263bb71f29a
# Also, tell xcode-select to always use CommandLineTools, whether or not Xcode is installed.
export BAZEL_USE_CPP_ONLY_TOOLCHAIN=1
export DEVELOPER_DIR=/Library/Developer/CommandLineTools
"${bazel}" sync --configure # Reset the cached toolchain.
echo
echo "Checking specific targets mode"
"${bazel}" clean
"${bazel}" build :compdb
check_compdb bazel-bin/compile_commands.json false
echo "SUCCESS!"
echo
echo "Checking full repo mode"
"${bazel}" clean
../generate.py
check_compdb compile_commands.json true
echo "SUCCESS!"
echo
echo "Checking YCM plugin"
diff --unified=100 \
<(sed -e "s@EXECROOT@${execroot}@" -e "s@PWD@${PWD}@" "${expected_ycm}") \
<(python3 ../.ycm_extra_conf.py a.cc)
echo "SUCCESS!"
| true |
50c9aed8b8b1c4d71c7f706b5411395bead5a254 | Shell | gfredericks/dotfiles | /base/bin/persistently | UTF-8 | 410 | 4.3125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Runs the given command until it returns successfully.
USAGE="Usage: $0 [sleep-seconds] cmd..."
if [ "$#" == "0" ]; then
echo $USAGE
exit 1
fi
re='^[0-9]+$'
if [[ "$1" =~ $re ]] ; then
SLEEPSEC="$1"
shift
fi
until $@
do
if [ -z $SLEEPSEC ]; then
echo "Trying again..."
else
echo "Trying again in $SLEEPSEC seconds..."
sleep "$SLEEPSEC"
fi
done
| true |
daf5c236704dfb76cc59294fbdba15eecd894888 | Shell | MartinWeise/probe-engine | /publish-android.bash | UTF-8 | 1,054 | 3.25 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -e
pkgname=oonimkall
version=$(date -u +%Y.%m.%d-%H%M%S)
baseurl=https://api.bintray.com/content/ooni/android/$pkgname/$version/org/ooni/$pkgname/$version
aarfile=./MOBILE/dist/$pkgname.aar
pomfile=./MOBILE/dist/$pkgname-$version.pom
pomtemplate=./MOBILE/template.pom
user=bassosimone
cat $pomtemplate|sed "s/@VERSION@/$version/g" > $pomfile
if [ -z $BINTRAY_API_KEY ]; then
echo "FATAL: missing BINTRAY_API_KEY variable" 1>&2
exit 1
fi
# We currently publish every commit. To cleanup we can fetch all the versions using the
# <curl -s $user:$BINTRAY_API_KEY https://api.bintray.com/packages/ooni/android/oonimkall>
# query, which returns a list of versions. From such list, we can delete the versions we
# don't need using <DELETE /packages/:subject/:repo/:package/versions/:version>.
curl -sT $aarfile -u $user:$BINTRAY_API_KEY $baseurl/$pkgname-$version.aar?publish=1 >/dev/null
curl -sT $pomfile -u $user:$BINTRAY_API_KEY $baseurl/$pkgname-$version.pom?publish=1 >/dev/null
echo "implementation 'org.ooni:oonimkall:$version'"
| true |
1c22849291386dcc389b47230f7f67992ece32c9 | Shell | NGSNetwork/sm-plugins | /data/tf2idbupdate.sh | UTF-8 | 493 | 3.578125 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
# Update TF2IDB DB, V1
# DEPENDENCIES:
# python3
# Place in the sourcemod/data directory and point it to the other directories.
# This simply runs the python script for updating, and copies it where needed.
# Very simplistic program, set what you need with absolute paths.
TF2IDBSCRIPTDIR="/path/to/tf2idb"
SOURCEMODDATADIR="/path/to/sourcemod/data"
cd $TF2IDBSCRIPTDIR
echo "Working in:"
pwd
python3 tf2idb.py
cp tf2idb.sq3 $SOURCEMODDATADIR/sqlite
echo "Finished!"
| true |
79945e66eec4ccb6fe81efa1fe4329610974068e | Shell | larsks/docker-image-sshproxy | /start.sh | UTF-8 | 374 | 3.015625 | 3 | [] | no_license | #!/bin/sh
SSH_KEY_FILE=/etc/ssh/proxykey
[ "$SSH_KEY" ] || exit 1
[ "$SSH_LOCALPORT" ] || exit 1
[ "$SSH_REMOTEPORT" ] || exit 1
[ "$SSH_TARGET" ] || exit 1
cat > $SSH_KEY_FILE <<EOF
$SSH_KEY
EOF
chmod 400 $SSH_KEY_FILE
exec /usr/bin/ssh -i $SSH_KEY_FILE \
-o BatchMode=yes \
-o StrictHostkeyChecking=no \
-Nn -R $SSH_REMOTEPORT:localhost:$SSH_LOCALPORT $SSH_TARGET
| true |
647f0f1ddd9acc7eda743bce69c69b9472211ce2 | Shell | lsalases/bhcycler | /cycle | UTF-8 | 629 | 2.921875 | 3 | [] | no_license | #! /bin/bash
#
# Adapted from Tom Romo's slaunch
#
# Load modules
module load cuda/9.0 gcc
module load openmm/7.2.1
# Default variables (you need to set the path to cycler.sh)
DRIVER=/path-to/cycler.sh
TIMELIMIT="120:00:00"
NODES="1"
PARTITION="gpu"
CPT="8"
MEM="16gb"
export PARTITION
export TIMELIMIT
export DRIVER
export MEM
# Read in command line
META=`pwd | perl -anF/ -e 'print$F[$#F-1],".",$F[$#F];' | sed 's/-/_/g'`
JOBNAME="$1-$META"
# Submit job
sbatch --partition $PARTITION --time $TIMELIMIT -c $CPT --overcommit --gres gpu:$NODES --mem $MEM --job-name $JOBNAME --error $JOBNAME.err --output $JOBNAME.out $DRIVER
| true |
6026e12f1364e5185f57f39dc987a42c95a48fd3 | Shell | openSUSE/sysconfig | /scripts/functions | UTF-8 | 38,972 | 3.421875 | 3 | [] | no_license | #!/bin/bash
# Network interface configuration
#
# Copyright (c) 2002-2006 SuSE Linux AG Nuernberg, Germany.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Michal Svec <msvec@suse.cz>
# Mads Martin Joergensen <mmj@suse.de>
# Marius Tomaschewski <mt@suse.de>
#
# $Id$
#
. /etc/sysconfig/network/scripts/functions.common
NETWORK_RUNFILE="$RUN_FILES_BASE/started"
STAMPFILE_STUB="$RUN_FILES_BASE/new-stamp-"
SYSTEMD_CGROUP_DIR="/sys/fs/cgroup/systemd"
NETWORK_SERVICE_CGROUP_DIR="${SYSTEMD_CGROUP_DIR}/system/network.service"
NETWORK_SERVICE_CGROUP_TASKS="${NETWORK_SERVICE_CGROUP_DIR}/tasks"
systemd_running ()
{
# We simply test whether systemd cgroup hierarchy is mounted
# where we return 0, non-zero if not.
mountpoint -q "${SYSTEMD_CGROUP_DIR}"
}
systemd_booting () {
# returns 0 when we boot, 1 in running system
systemctl show -p ActiveState default.target | grep -qi inactive
}
netcontrol_running() {
test -f "$NETWORK_RUNFILE"
}
in_network_service_cgroup()
{
local pid dummy
if test -f "${NETWORK_SERVICE_CGROUP_TASKS}" ; then
while read pid dummy ; do
test "$pid" = "$$" && return 0
done < "${NETWORK_SERVICE_CGROUP_TASKS}"
fi
return 1
}
add_to_network_service_cgroup()
{
if test ! -d "${NETWORK_SERVICE_CGROUP_DIR}" ; then
systemd_running && \
mkdir -p ${NETWORK_SERVICE_CGROUP_DIR} || \
return 1
fi
if test -f "${NETWORK_SERVICE_CGROUP_TASKS}" ; then
echo "$$" > "${NETWORK_SERVICE_CGROUP_TASKS}" && \
return 0
fi
return 1
}
#
# to test the next two functions:
#
# for i in {0..32}; do
# echo $i: $(pfxlen2mask $i) " ---> " $(mask2pfxlen $(pfxlen2mask $i))
# done
mask2pfxlen() {
local i octet width=0 mask=(${1//./ })
test ${#mask[@]} -eq 4 || return 1
for octet in 0 1 2 3; do
test "${mask[octet]}" -ge 0 -a \
"${mask[octet]}" -le 255 2>/dev/null \
|| return 1
for i in 128 192 224 240 248 252 254 255; do
test ${mask[octet]} -ge $i && ((width++))
done
done
echo $width
return 0
}
pfxlen2mask() {
test -n "$1" || return 1
local o i n=0 adr=() len=$(($1))
for o in 0 1 2 3; do
adr[$o]=0
for i in 128 64 32 16 8 4 2 1; do
((n++ < len)) && \
((adr[$o] = ${adr[$o]} + $i))
done
done
echo ${adr[0]}.${adr[1]}.${adr[2]}.${adr[3]}
return 0
}
add_to_wordlist() {
local v="${1}"
local l=(${!v}) ; shift
local a w
for a in ${@} ; do
for w in ${l[@]} ; do
[ "x$w" = "x$a" ] && \
continue 2
done
l=(${l[@]} $a)
done
eval "$v='${l[@]}'"
}
is_valid_interface_name()
{
local LANG=C LC_ALL=C
local INTERFACE="$1"
local IFNAME_RX='^[[:alnum:]._:-]{1,15}$'
[[ ${INTERFACE} =~ ${IFNAME_RX} ]]
}
# returns 0 if ifup is currently working on this interface
ifup_on_iface() {
ps axhww | grep -qs "[i]fup.* $INTERFACE\>"
}
is_iface_available () {
local IFNAME=${1}
local IFTYPE=${2:-$INTERFACETYPE}
test -z "$IFNAME" && return 1
test -d /sys/class/net/$IFNAME && return 0
test "${SCRIPTNAME%%-*}" = ifdown -a \
"$MODE" = hotplug && return 0
: ${IFTYPE:=$(get_iface_type "$IFNAME")}
: ${IFTYPE:=$(get_iface_type_from_config "$IFNAME")}
test "${SCRIPTNAME%%-*}" = ifup && { \
case ${IFTYPE} in
bond|bridge|vlan|ibchild) return 0 ;;
ppp|isdn|dummy|tun|tap) return 0 ;;
ipip|ip6tnl|sit|gre|mip6mnha) return 0 ;;
esac
}
test -d /sys/class/net/$IFNAME
}
is_iface_up () {
test -z "$1" && return 1
test -d /sys/class/net/$1 || return 1
case "`LC_ALL=POSIX ip link show $1 2>/dev/null`" in
*$1*UP*) ;;
*) return 1 ;;
esac
}
is_connected () {
case `read_cached_config_data status $1` in
connected) return 0 ;;
connecting) return 0 ;; # might be wrong, test for link to
esac
return 1
}
has_link () {
case `read_cached_config_data link $1` in
yes) return 0 ;;
esac
return 1
}
link_ready_check () {
local c=`cat /sys/class/net/${1}/carrier 2>/dev/null`
local d=`cat /sys/class/net/${1}/dormant 2>/dev/null`
local o=`cat /sys/class/net/${1}/operstate 2>/dev/null`
#debug "link ready ${1}: carrier=$c, dormant=$d, operstate=$o"
if test -e "/sys/class/net/${1}/operstate" ; then
# SLE 11 has carrier + operstate + dormant
test "$d" = "0" || return 3
test "$c" = "1" || return 2
test \( "$o" = "up" -o "$o" = "unknown" \) || return 1
else
# e.g. SLE 10 does not have operstate/dormant
test "$c" = "1" || return 1
fi
return 0
}
ipv6_addr_dad_check()
{
local iface="$1" word i
local nodad=1 tentative=1 dadfailed=1
test -f "/sys/class/net/$iface/ifindex" || return 1
while read -a word ; do
test "${word[0]}" != "inet6" && continue
for((i=2; i<${#word[@]}; ++i)) ; do
case ${word[$i]} in
nodad) nodad=0 ;;
tentative) tentative=0 ;;
dadfailed) dadfailed=0 ;;
flags) ((i++))
rx='^[[:xdigit:]]+$'
[[ "${word[$i]}" =~ $rx ]] || continue
hx="0x${word[$i]}"
test $(( $hx & 0x02 )) -ne 0 && nodad=0
test $(( $hx & 0x08 )) -ne 0 && dadfailed=0
test $(( $hx & 0x40 )) -ne 0 && tentative=0
;;
esac
done
#debug "ipv6 dad $iface: nodad=$nodad, dadfailed=$dadfailed, tentative=$tentative"
test $nodad -eq 0 && continue
test $dadfailed -eq 0 && return 2
test $tentative -eq 0 && return 3
done < <(LC_ALL=C ip -6 addr show ${iface:+dev "$iface"} 2>/dev/null)
return $R_SUCCESS
}
link_ready_wait ()
{
local iface=$1
local -i wsecs=${2:-0}
local -i uwait=25000
local -i loops=$(((wsecs * 1000000) / $uwait))
local -i loop=0 ret=0
link_ready_check "$iface" ; ret=$?
while ((ret != 0 && loop++ < loops)) ; do
usleep $uwait
link_ready_check "$iface" ; ret=$?
done
return $ret
}
ipv6_addr_dad_wait()
{
local iface=$1
local -i wsecs=${2:-0}
local -i uwait=25000
local -i loops=$(((wsecs * 1000000) / $uwait))
local -i loop=0 ret=0
ipv6_addr_dad_check "$iface" ; ret=$?
while ((ret == 3 && loop++ < loops)) ; do
usleep $uwait
ipv6_addr_dad_check "$iface" ; ret=$?
done
return $ret
}
get_ethtool_drv_info () {
test -n "$1" || return 1
local ethtool="/sbin/ethtool"
if [ ! -x $ethtool ] ; then
[ -x /usr${ethtool} ] && ethtool="/usr${ethtool}" || return 1
fi
local key val
$ethtool -i "$1" 2>/dev/null |
while read key val ; do
case "$key" in
driver:) printf 'ETHTOOL_DRV_NAME=%q\n' "$val" ;;
version:) printf 'ETHTOOL_DRV_VERSION=%q\n' "$val" ;;
firmware-version:) printf 'ETHTOOL_DRV_FW_VERSION=%q\n' "$val" ;;
bus-info:) printf 'ETHTOOL_DRV_BUS_INFO=%q\n' "$val" ;;
esac
done 2>/dev/null # bash printf required for %q format
}
get_iface_type () {
local IF=$1 TYPE
test -n "$IF" || return 1
test -d /sys/class/net/$IF || return 2
case "`cat /sys/class/net/$IF/type`" in
1)
TYPE=eth
# Ethernet, may also be wireless, ...
if test -d /sys/class/net/$IF/wireless -o \
-L /sys/class/net/$IF/phy80211 ; then
TYPE=wlan
elif test -d /sys/class/net/$IF/bridge ; then
TYPE=bridge
elif test -f /proc/net/vlan/$IF ; then
TYPE=vlan
elif test -d /sys/class/net/$IF/bonding ; then
TYPE=bond
elif test -f /sys/class/net/$IF/tun_flags ; then
TYPE=tap
elif test -d /sys/devices/virtual/net/$IF ; then
case $IF in
(dummy*) TYPE=dummy ;;
esac
fi
;;
24) TYPE=eth ;; # firewire ;; # IEEE 1394 IPv4 - RFC 2734
32) # InfiniBand
if test -d /sys/class/net/$IF/bonding ; then
TYPE=bond
elif test -d /sys/class/net/$IF/create_child ; then
TYPE=ib
else
TYPE=ibchild
fi
;;
512) TYPE=ppp ;;
768) TYPE=ipip ;; # IPIP tunnel
769) TYPE=ip6tnl ;; # IP6IP6 tunnel
772) TYPE=lo ;;
776) TYPE=sit ;; # sit0 device - IPv6-in-IPv4
778) TYPE=gre ;; # GRE over IP
783) TYPE=irda ;; # Linux-IrDA
801) TYPE=wlan_aux ;;
65534) TYPE=tun ;;
esac
# The following case statement still has to be replaced by something
# which does not rely on the interface names.
case $IF in
ippp*|isdn*) TYPE=isdn;;
mip6mnha*) TYPE=mip6mnha;;
esac
test -n "$TYPE" && echo $TYPE && return 0
return 3
}
get_iface_type_from_config () {
local VARIABLE VALUE TYPE IF=$1
local INTERFACETYPE WIRELESS WIRELESS_MODE BONDING_MASTER ETHERDEVICE
local BRIDGE TUNNEL PPPMODE MODEM_DEVICE ENCAP
test -z "$IF" && return 3
get_ibft_iface_type "$IF" && return 0
test -r ifcfg-$IF || return 2
# If variable (1st word) has the right value (2nd word) then this
# interface is of this type (3rd word). If variable can have any
# value except empty, then use "_not_empty_" as value.
# INTERFACETYPE gets special handling and has variable type
while read VARIABLE VALUE TYPE; do
eval $VARIABLE=
get_variable $VARIABLE $IF
if [ "$VALUE" == _not_empty_ ] ; then
test -n "${!VARIABLE}" && break
else
test "${!VARIABLE}" == "$VALUE" && break
fi
done <<- EOL
INTERFACETYPE _not_empty_
WIRELESS yes wlan
WIRELESS_MODE _not_empty_ wlan
BONDING_MASTER yes bond
ETHERDEVICE _not_empty_ vlan
BRIDGE yes bridge
TUNNEL tap tap
TUNNEL tun tun
TUNNEL sit sit
TUNNEL gre gre
TUNNEL ipip ipip
PPPMODE pppoe ppp
PPPMODE pppoatm ppp
PPPMODE capi-adsl ppp
PPPMODE pptp ppp
MODEM_DEVICE _not_empty_ ppp
ENCAP syncppp isdn
ENCAP rawip isdn
EOL
case "$IF" in
ib*.*) INTERFACETYPE=ibchild ;;
ib*) INTERFACETYPE=ib ;;
esac
# bnc#458412: map obsolete (s390) eth type overrides
case "$INTERFACETYPE" in
(qeth|hsi|lcs|ctc|iucv|tr) INTERFACETYPE=eth ;;
esac
test -n "$INTERFACETYPE" && echo $INTERFACETYPE && return
if [ -z "$TYPE" ] ; then
case $IF in
lo) TYPE=lo;;
dummy*) TYPE=dummy;;
ib*) TYPE=ib;;
ip6tnl*) TYPE=ip6tnl;;
mip6mnha*) TYPE=mip6mnha;;
esac
fi
# There is a config file, but it has no special type: This must be ethernet
test -n "$TYPE" && echo $TYPE || echo eth
}
get_hwaddress () {
cat /sys/class/net/$1/address 2>/dev/null
}
get_bonding_slave_hwaddrs()
{
local ifname="${1}"
local saddrs=()
if test -f "/proc/net/bonding/$ifname" ; then
while read line ; do
case $line in
"Permanent HW addr:"*)
saddr+=("${line//Permanent HW addr: /}")
;;
esac
done < "/proc/net/bonding/$ifname" 2>/dev/null
echo "${saddr[*]}"
return 0
fi
return 1
}
# This will echo the first address listed for the given interface
# ignoring all addresses with a label.
get_ipv4address () {
test -z "$1" && return 1
local a b c
while read a b c; do
if [ "$a" = inet ] ; then
break
fi
done < <(LC_ALL=POSIX ip -4 address show dev "$1" label "$1" 2>/dev/null)
test -z "$b" && return 1
echo ${b%%/*}
}
convert_ipv4address_to_6to4 () {
printf "2002:%02x%02x:%02x%02x::1\n" $(IFS=.; echo $1)
}
convert_6to4_to_ipv4address () {
ADDR=$1
LIST=($(IFS=:; echo $ADDR))
if [ ${#LIST[@]} -ne 4 -o "${LIST[0]}" != "2002" ] ; then
echo $ADDR
fi
NORM_1=`printf "%04x" 0x${LIST[1]} 2>/dev/null`
NORM_2=`printf "%04x" 0x${LIST[2]} 2>/dev/null`
printf "::%u.%u.%u.%u" \
0x${NORM_1:0:2} 0x${NORM_1:2:2} \
0x${NORM_2:0:2} 0x${NORM_2:2:2}
}
get_variable () {
local line
while read line; do
eval $line
done < <(read_iface_config_data "$2" | grep "^[[:space:]]*$1" 2>/dev/null)
}
get_startmode () {
local STARTMODE
get_variable STARTMODE $1
echo "$STARTMODE"
}
get_ifplugd_priority () {
local IFPLUGD_PRIORITY=0
declare -i IFPLUGD_PRIORITY
get_variable IFPLUGD_PRIORITY $1
echo "$IFPLUGD_PRIORITY"
}
# return vlan interface name for given base device and vlan id
get_vlan_ifname()
{
test "x$1" = x -o "x$2" = x && return 1
LANG=C LC_ALL=C awk -F '[ \t]*[|][ \t]*' -v dev="$1" -v vid="$2" -- \
'$3 == dev && $2 == vid { print $1; }' /proc/net/vlan/config 2>/dev/null
}
# return base device (e.g. eth0) of given vlan interface (e.g. eth0.42)
get_vlan_if_dev()
{
test "x$1" = x && return 1
LANG=C LC_ALL=C awk -F '[ \t]*[|][ \t]*' -v vif="$1" -- \
'$1 == vif { print $3; }' /proc/net/vlan/config 2>/dev/null
}
# return vlan id (e.g. 42) of given vlan interface (e.g. eth0.42)
get_vlan_if_vid()
{
test "x$1" = x && return 1
LANG=C LC_ALL=C awk -F '[ \t]*[|][ \t]*' -v vif="$1" -- \
'$1 == vif { print $2; }' /proc/net/vlan/config 2>/dev/null
}
# Parse iSCSI Boot Firmare Table settings of given interface
# and provide as adequate ifcfg/netcontrol scripts settings
# (basically STARTMODE and BOOTPROTO variables) on stdout.
get_ibft_iface_ref_glob()
{
local edir=$1 # /sys/firmware/ibft/ethernetX
# iBFT refers NICs by PCI Bus/Dev/Func, but in case
# of at least virtio, the interface isn't attached
# directly as PCI device ($edir/device/virtio0/net/).
if test -d "$edir/device/net" ; then
echo "$edir/device/net/*"
else
echo "$edir/device/*/net/*"
fi
return 0
}
read_ibft_iface_config()
{
local ifname=$1
case $ifname in ""|.|..|*/*) return 1 ;; esac
# check if ibft firmware info is available
local sysfs_ibft="/sys/firmware/ibft"
# done in initrd on hosts using ibft
#test -d "$sysfs_ibft" || modprobe -qs iscsi_ibft 2>/dev/null
test -d "$sysfs_ibft" || return 1
local edir ibft_e_flags ibft_e_origin ibft_e_vlan
local restore_nullglob=`shopt -p nullglob`
shopt -s nullglob
for edir in "$sysfs_ibft/ethernet"* ; do
# Flag Bits: Block Valid, FW Boot Selected, Global/Link Local
read -s ibft_e_flags 2>/dev/null < "$edir/flags"
# Check flag bit 0: Block Valid Flag (0 = no, 1 = yes)
# and ignore when the IBFT settings not marked valid
((ibft_e_flags & 0x01)) || continue
# Read the vlan id from iBFT and compare
read -s ibft_e_vlan 2>/dev/null < "$edir/vlan"
local vid="${ibft_e_vlan:-0}"
if test "${vid}" != "0" ; then
local ref dev vif
for ref in `get_ibft_iface_ref_glob "$edir"` ; do
test -d "$ref" || continue 2
dev="${ref##*/}"
vif=`get_vlan_ifname "${dev}" "${vid}"`
vif="${vif:-${dev}.${vid}}"
break
done
# no ibft -> iface reference?!
test -n "$dev" || continue
# check if it is the physical iBFT vlan base
if test "$dev" = "$ifname" ; then
# just a minimal set here ...
echo "STARTMODE='nfsroot'"
echo "BOOTPROTO='none'"
echo "IBFT='yes'"
echo "IBFT_NIC='${edir##*/}'"
return 0
fi
# check if ifname is a vlan iBFT interface
test "$vif" = "$ifname" || continue
echo "ETHERDEVICE='$dev'"
echo "VLAN_ID='$vid'"
else
# check if ifname is a physical iBFT interface
for ref in `get_ibft_iface_ref_glob "$edir"` ; do
test -d "$ref" || continue 2
dev="${ref##*/}"
test "$dev" = "$ifname" || continue 2
break
done
fi
# iBFT interfaces are always nfsroot
echo "STARTMODE='nfsroot'"
# Enum: Other,Manual,WellKnown,Dhcp,RouterAdv
read -s ibft_e_origin 2>/dev/null < "$edir/origin"
case $ibft_e_origin in
0) # Other, but it is used e.g. on kvm gPXE
# (11.4), even the data is from dhcp.
case $ibft_e_dhcp in
[1-9]*.*.*.*) echo "BOOTPROTO='dhcp4'" ;;
*) echo "BOOTPROTO='none'" ;;
esac
;;
3) # IpPrefixOriginDhcp
echo "BOOTPROTO='dhcp4'"
;;
*) echo "BOOTPROTO='none'" ;;
esac
# the complete set of IBFT variables
echo "IBFT='yes'"
# Check bit 1, FW Boot Selected (0 = no, 1 = yes)
# and set IBFT_PRIMARY=yes (force a dhcp primary)
if ((ibft_e_flags & 0x02)) ; then
echo "IBFT_PRIMARY='yes'"
fi
echo "IBFT_NIC='${edir##*/}'"
echo "IBFT_VLAN='$ibft_e_vlan'"
echo "IBFT_FLAGS='$ibft_e_flags'"
echo "IBFT_ORIGIN='$ibft_e_origin'"
# read the other ibft ethernet variables
local v
for v in ip-addr primary-dns secondary-dns gateway index dhcp mac ; do
local vv=''
read vv 2>/dev/null < "$edir/$v"
case $v in
ip-addr) echo "IBFT_IPADDR='$vv'" ;;
primary-dns) echo "IBFT_DNS_1='$vv'" ;;
secondary-dns) echo "IBFT_DNS_2='$vv'" ;;
gateway) echo "IBFT_GATEWAY='$vv'";;
index) echo "IBFT_INDEX='$vv'" ;;
dhcp) echo "IBFT_DHCP='$vv'" ;;
mac) echo "IBFT_MAC='$vv'" ;;
#index|dhcp|gateway|mac)
# echo "IBFT_${f^^}='$vv'"
#;;
esac
done
eval $restore_nullglob
return 0
done
eval $restore_nullglob
return 1
}
# prints (the "eth" or "vlan") interface type of given ibft interface
# note, that when a ibft vlan interface name does not exist yet, the
# default <physical interface name>.<vlan id> name scheme is used.
get_ibft_iface_type()
{
local ifname=$1
case $ifname in ""|.|..|*/*) return 1 ;; esac
# check if ibft firmware info is available
local sysfs_ibft="/sys/firmware/ibft"
# done in initrd on hosts using ibft
#test -d "$sysfs_ibft" || modprobe -qs iscsi_ibft 2>/dev/null
test -d "$sysfs_ibft" || return 1
local iftype=""
local edir ibft_e_flags ibft_e_origin ibft_e_vlan
local restore_nullglob=`shopt -p nullglob`
shopt -s nullglob
for edir in "$sysfs_ibft/ethernet"* ; do
# Flag Bits: Block Valid, FW Boot Selected, Global/Link Local
read -s ibft_e_flags 2>/dev/null < "$edir/flags"
# Check flag bit 0: Block Valid Flag (0 = no, 1 = yes)
# and ignore when the IBFT settings not marked valid
((ibft_e_flags & 0x01)) || continue
# Read the vlan id from iBFT and compare
read -s ibft_e_vlan 2>/dev/null < "$edir/vlan"
local vid="${ibft_e_vlan:-0}"
if test "${vid}" != "0" ; then
local ref dev vif
for ref in `get_ibft_iface_ref_glob "$edir"` ; do
test -d "$ref" || continue 2
dev="${ref##*/}"
vif=`get_vlan_ifname "${dev}" "${vid}"`
vif="${vif:-${dev}.${vid}}"
break
done
# no ibft -> iface reference?!
test -n "$dev" || continue
# check if it is the physical iBFT vlan base
if test "$dev" = "$ifname" ; then
iftype="eth"
break
fi
# check if ifname is a vlan iBFT interface
test "$vif" = "$ifname" || continue
iftype="vlan"
break
else
# check if ifname is a physical iBFT interface
for ref in `get_ibft_iface_ref_glob "$edir"` ; do
test -d "$ref" || continue 2
dev="${ref##*/}"
test "$dev" = "$ifname" || continue 2
break
done
iftype="eth"
break
fi
done
eval $restore_nullglob
test -n "$iftype" && echo "$iftype"
}
# returns names of physical _and_ vlan interface names referenced by ibft
get_ibft_config_names()
{
local ifname_list=""
local restore_nullglob=`shopt -p nullglob`
# check if ibft firmware info is available
local sysfs_ibft="/sys/firmware/ibft"
# done in initrd on hosts using ibft
#test -d "$sysfs_ibft" || modprobe -qs iscsi_ibft 2>/dev/null
if test -d "$sysfs_ibft" ; then
local edir ref ibft_e_flags ibft_e_vlan
shopt -s nullglob
for edir in "$sysfs_ibft/ethernet"* ; do
# Flag Bits: Block Valid, FW Boot Selected, Global/Link Local
read -s ibft_e_flags 2>/dev/null < "$edir/flags"
read -s ibft_e_vlan 2>/dev/null < "$edir/vlan"
# Check flag bit 0: Block Valid Flag (0 = no, 1 = yes)
# and ignore when the IBFT settings not marked valid
((ibft_e_flags & 0x01)) || continue
for ref in `get_ibft_iface_ref_glob "$edir"` ; do
test -d "$ref" || continue 2
local dev="${ref##*/}"
add_to_wordlist ifname_list "$dev"
if test "${ibft_e_vlan:-0}" != 0 ; then
local vid="${ibft_e_vlan:-0}"
local vif=`get_vlan_ifname "${dev}" "${vid}"`
add_to_wordlist ifname_list "${vif:-${dev}.${vid}}"
fi
break
done
done
echo $ifname_list
fi
eval $restore_nullglob
return 0
}
# returns ifcfg configured interface names
# note: config name is ifcfg-<interface name>,
# there is no ifcfg-bus-pci... any more
get_ifcfg_config_names()
{
local ifname_list=""
local restore_nullglob=`shopt -p nullglob`
local ifcfg_dir=/etc/sysconfig/network
local ifcfg_pfx=ifcfg-
for ifcfg in "${ifcfg_dir}/${ifcfg_pfx}"* ; do
local ifname=${ifcfg##*/${ifcfg_pfx}}
case "$ifname" in
""|*~|*.old|*.rpmnew|*.rpmsave|*.scpmbackup)
continue
;;
esac
add_to_wordlist ifname_list "$ifname"
done
eval $restore_nullglob
echo $ifname_list
return 0
}
# returns a list of ibft and ifcfg configured interface names
get_iface_config_names()
{
local ifname_list=""
# TODO: ibft may return eth0.42 (that does not exists yet)
# what when there is a ifcfg-vlan42 on top of eth0 ?
# basically the second one will fail to create ...
add_to_wordlist ifname_list `get_ibft_config_names`
add_to_wordlist ifname_list `get_ifcfg_config_names`
echo $ifname_list
}
# reads interface configuration data to stdout
read_iface_config_data()
{
case $1 in ""|.|..|*/*) return 1 ;; esac
read_ibft_iface_config "$1" && return 0
test -f "./ifcfg-$1" && cat "./ifcfg-$1" 2>/dev/null
}
# sources interface config from ibft or from ifcfg file
source_iface_config()
{
case $1 in ""|.|..|*/*) return 1 ;; esac
eval IBFT=no `read_ibft_iface_config "$1"`
test "$IBFT" = "yes" && return 0
test -f "./ifcfg-$1" && . "./ifcfg-$1" 2>/dev/null
}
# returns true when interface is configured in ibft
# or when a ifcfg-<interface name> config file exists
exists_iface_config()
{
case $1 in ""|.|..|*/*) return 1 ;; esac
for name in `get_ibft_config_names` ; do
test "x$1" = "x$name" && return 0
done
test -f "./ifcfg-$1"
}
# return the content of the BOOTIF variable from /proc/cmdline
get_pxe_bootif_param()
{
local cmdline=$(cat /proc/cmdline 2>/dev/null)
local rx='^(.*[[:space:]])?BOOTIF=([[:xdigit:]]{2}([-[:xdigit:]]{2})+)([[:space:]].*)?$'
if [[ ${cmdline} =~ ${rx} ]] ; then
echo "${BASH_REMATCH[2]}"
fi
}
is_pxe_boot_iface()
{
# params <interface name>
# output 'yes': bootif param available, $1 interface match
# 'no' : bootif param available, no $1 interface match
# '' : otherwise (no bootif param, errors)
# returns 0 on success, 1 on errors
local bootif=`get_pxe_bootif_param 2>/dev/null`
test "x$bootif" = x && return 0
local ifname="$1"
test "x$ifname" != x -a -d "/sys/class/net/$ifname" || return 1
local iftype=`cat "/sys/class/net/$ifname/type" 2>/dev/null`
local ifaddr=`get_hwaddress "$ifname"`
test "x$iftype" != x -a "x$ifaddr" != x || return 1
local maddr=${bootif//-/:}
local addrs="$ifaddr" addr
case $iftype in
1)
# TODO: bridge and vlan inherit hwaddrs as well
if test -d "/sys/class/net/$ifname/bonding" ; then
addrs=`get_bonding_slave_hwaddrs "$ifname" 2>/dev/null`
fi
;;
esac
for addr in ${addrs} ; do
local x=`printf "%02x:%s" "$iftype" "$addr" 2>/dev/null`
if test "x$maddr" = "x$x" ; then
echo "yes"
return 0
fi
done
echo "no"
}
is_ibft_primary_iface()
{
# params <interface name>
# output 'yes': ibft interfaces available, $1 interface is ibft primary
# 'no' : ibft interfaces available, $1 interface is not primary
# '' : otherwise (no ibft, errors)
# returns 0 on success, 1 on errors
if test "x$IBFT" = "xyes" ; then
echo ${IBFT_PRIMARY:-no}
return 0
else
echo ''
return 0
fi
}
get_slaves () {
local ret=1
local v vv
case $1 in
ib*.*) echo -n "${1%%\.*} " ; ret=0 ;;
esac
for v in BONDING_SLAVE ETHERDEVICE TUNNEL_DEVICE \
TUNNEL_LOCAL_INTERFACE BRIDGE_PORTS; do
get_variable $v $1
for vv in `eval echo \$\{\!$v\*\}`; do
if [ -n "${!vv}" ] ; then
echo -n "${!vv} "
ret=0
fi
unset $vv
done
test $ret = 0 && return 0
done
return 1
}
get_bonding_master () {
local IF="$1"
test "x$IF" = "x" && return 1
local a master slave
while IFS=- read a master; do
case $master in
""|*~|*.old|*.rpmnew|*.rpmsave|*.scpmbackup)
continue
;;
esac
local BONDING_MASTER
get_variable BONDING_MASTER "$master"
test "$BONDING_MASTER" = yes || continue
unset ${!BONDING_SLAVE*}
get_variable BONDING_SLAVE "$master"
for slave in ${!BONDING_SLAVE*} ; do
if test "x$IF" = "x${!slave}" ; then
echo "$master"
return 0
fi
done
done < <(ls -1d /etc/sysconfig/network/ifcfg-* 2>/dev/null)
}
get_bridge_parent () {
local IF="$1"
test "x$IF" = "x" && return 1
local a bridge port
while IFS=- read a bridge; do
case $bridge in
""|*~|*.old|*.rpmnew|*.rpmsave|*.scpmbackup)
continue
;;
esac
local BRIDGE
get_variable BRIDGE "$bridge"
test "$BRIDGE" = yes || continue
unset BRIDGE_PORTS
get_variable BRIDGE_PORTS "$bridge"
for port in $BRIDGE_PORTS ; do
if test "x$IF" = "x${port}" ; then
echo "$bridge"
return 0
fi
done
done < <(ls -1d /etc/sysconfig/network/ifcfg-* 2>/dev/null)
}
# This function looks for interfaces which depend on the given interface. It
# prints a list with all depending interfaces. It returns 0 if there are
# depending interfaces and !=0 if not.
# Currently it checks only for vlan and optionally bonding slave interfaces.
# FIXME: Add other types of interfaces that depend on others.
get_depending_ifaces() {
local VLAN_PATH BOND_PATH DEP_VLANS DEP_BONDS BASE_IFACE i
local -a DEP_IFACES=()
VLAN_PATH="/proc/net/vlan"
BOND_PATH=""
while [ $# -gt 0 ]; do
case $1 in
--with-bonding-slaves)
BOND_PATH="/proc/net/bonding"
shift
;;
-*) shift ;;
*) break ;;
esac
done
BASE_IFACE="$1"
if [ -z "$BASE_IFACE" ]; then
return 1
fi
if [ -d "$VLAN_PATH" ]; then
DEP_VLANS=`cd "$VLAN_PATH"
grep -lws "Device: *$BASE_IFACE" *`
DEP_IFACES+=($DEP_VLANS)
fi
if [ -n "$BOND_PATH" -a -d "$BOND_PATH" ]; then
DEP_BONDS=`cd "$BOND_PATH"
grep -s '^Slave Interface:' $BASE_IFACE |
while IFS=':' read text iface ; do echo -n "$iface" ; done`
DEP_IFACES+=($DEP_BONDS)
fi
case $BASE_IFACE in
(ib*.*) ;;
(ib*) # the infiniband children -- is there a better way?
for i in `ls -1 /sys/class/net/ 2>/dev/null` ; do
test -d /sys/class/net/$i || continue
case $i in (${BASE_IFACE}.*)
DEP_IFACES+=($i)
;;
esac
done
;;
esac
if [ ${#DEP_IFACES[*]} -gt 0 ]; then
echo "${DEP_IFACES[*]}"
return 0
else
return 1
fi
}
resolve_iface_startorder()
{
#
# resolve_iface_startorder <name of the result variable>
# <list of interfaces to resolve>
# [ <skip list> [filter fuction] ]
#
# This function creates a start ordered list of virtual interfaces
# as bondings, vlans and bridges known by the get_slaves() function.
# It reads the base or "slave" interfaces of each virtual interface
# recursively and inserts the slaves into the result list before the
# virtual interface itself. Further it detects interface usage loops
# like: interface "a" needs "b" and "b" needs "a" and skips them.
#
# The optional skip list allows to stop the recursion insertion at
# e.g. already started / existing physical slave interfaces.
# Addding interfaces to resolve to this skip list causes resolving
# of the recursive list of slave interfaces without the requested
# interfaces themself (resolve_iface_startorder slaves "br0" "br0").
#
# The optional filter function allows to filter out interfaces as
# well as all depending interfaces. For example a vlan interface
# will be filtered out, when the underlying interface gets filtered
# out. This happens also, when the function does not cause to filter
# out the vlan interface directly. Main purpose is to filter out
# interfaces, that are not supported without a mounted remotefs.
#
# $1: the name of the result start ordered list variable to set
# $2: list of virtual interfaces to resolve the start order for
# $3: optional list of interfaces to skip/stop reading
# $4: optional (white list) filter function returning true for
# acceptable interfaces and false to filter out an interface.
#
# returns 0 on success,
# 1 on empty result list name,
# [2 on interface usage loop error; disabled]
#
local NAME="$1"
local TODO="$2"
local SKIP="$3"
local FUNC="$4"
local LIST=()
_resolve_iface_startorder() {
local todo="$1"
local skip="$2"
local func="$3"
local guard="$4"
local level="$5"
local slaves iface i ret
local result=0
for iface in ${todo} ; do
for i in ${guard} ; do
if [ "x${i}" = "x${iface}" ] ; then
err_mesg "Interface dependency loop " \
"detected: ${guard} ${iface}"
# guard non-empty in level > 0
return 2
fi
done
if [ "x$func" != x ] && ! $func "$iface" &>/dev/null ; then
[ $level -eq 0 ] && continue || return 3
fi
slaves=(`get_slaves $iface 2>/dev/null`)
if [ $? = 0 -a ${#slaves[@]} -gt 0 ] ; then
ret=0
_resolve_iface_startorder \
"${slaves[*]}" \
"${skip}" \
"${func}" \
"${guard} $iface" \
$(($level+1)) \
|| ret=$?
if [ $ret -ne 0 ] ; then
if [ $level -eq 0 ] ; then
#result=$ret
continue
else
return $ret
fi
fi
fi
for i in ${LIST[@]} ${skip} ; do
[ "x$i" = "x$iface" ] && continue 2
done
LIST=(${LIST[@]} $iface)
done
return $result
}
[ "x$NAME" = x ] && return 1
_resolve_iface_startorder "$TODO" "$SKIP" "$FUNC" "" 0 || return $?
eval "$NAME='${LIST[@]}'"
}
# returns 0 if there is a dhcp client running on this interface
# prints pids of all dhcp clients on this interface
# prints nothing if called with option '-q'
# Usually it should not happen that more then one dhcpcd is running on one
# interface, but it may happen. So better safe than sorry!
dhcpc4_on_iface() {
local pid retval=1
[ "x$DHCLIENT" != x -a "x$INTERFACE" != x ] || return $retval
# when the dhcp client forks, it may be not visible
# in the process list for a short (usleep) while...
typeset -i retries=3
for ((; retries > 0; retries--)) ; do
for pid in `pgrep -f "^(/.*/)?$DHCLIENT\>.*\<$INTERFACE\>$" 2>/dev/null` ; do
retval=0
test "x$1" == "x-q" && break
echo $pid
done
(( retval == 0 )) && break || usleep 100000
done
return $retval
}
dhcpc6_on_iface() {
local pid retval=1
[ "x$DHCLIENT6" != x -a "x$INTERFACE" != x ] || return $retval
# when the dhcp client forks, it may be not visible
# in the process list for a short (usleep) while...
typeset -i retries=3
for ((; retries > 0; retries--)) ; do
for pid in `pgrep -f "^(/.*/)?$DHCLIENT6\>.*\<$INTERFACE\>$" 2>/dev/null` ; do
retval=0
test "x$1" == "x-q" && break
echo $pid
done
(( retval == 0 )) && break || usleep 100000
done
return $retval
}
dhcpc_on_iface() {
local pid retval=1 pattern
[ "x$DHCLIENT" != x ] && pattern=$DHCLIENT || pattern=""
[ "x$DHCLIENT6" != x ] && pattern="${pattern:+$pattern|}$DHCLIENT6"
[ "x$pattern" != x -a "x$INTERFACE" != x ] || return $retval
# when the dhcp client forks, it may be not visible
# in the process list for a short (usleep) while...
typeset -i retries=3
for ((; retries > 0; retries--)) ; do
for pid in `pgrep -f "^(/.*/)?($pattern)\>.*\<$INTERFACE\>$" 2>/dev/null` ; do
retval=0
test "$1" == "-q" && break
echo $pid
done
(( retval == 0 )) && break || usleep 100000
done
return $retval
}
any_dhcpc_on_iface()
{
local pid retval=1 pattern=""
[ "x$INTERFACE" != x ] || return $retval
# just search for all known dhcp clients
pattern="dhcpcd|dhclient|dhcp6c|dhclient6"
# when the dhcp client forks, it may be not visible
# in the process list for a short (usleep) while...
typeset -i retries=3
for ((; retries > 0; retries--)) ; do
for pid in `pgrep -f "^(/.*/)?($pattern)\>.*\<$INTERFACE\>$" 2>/dev/null` ; do
retval=0
test "$1" == "-q" && break
echo $pid
done
(( retval == 0 )) && break || usleep 100000
done
return $retval
}
dhcp_interfaces() {
local old_if=$INTERFACE
for INTERFACE in `ls -1 /sys/class/net`; do
if test -d "/sys/class/net/$INTERFACE" ; then
any_dhcpc_on_iface -q && echo "$INTERFACE"
fi
done
INTERFACE=$old_if
return 0
}
# We have to write status files per interface or per configuration for at least
# these reasons:
# 1) remember the used configuration if getcfg cannot get it after the device
# has been unplugged --> OBSOLETE: no more getcfg
# 2) store ifup options while restarting the network (e.g. the choosen provider)
# 3) pass status information to smpppd to allow kinternet to show them to the
# user.
# 4) control running ifup/down processes (ifdown has to stop a running ifup)
# To handle this cached information, there are the *_cached_config_data
# functions.
# write_cached_config_data <type> <data> <name> [PFX=<prefix>]
# needs at least 3 arguments
# - the type of data to write: config, options, state, ...
# - the data itself
# - the configuration or interface name
# - the file prefix is optional and must be given in the form PFX=<prefix>
# (default prefix is 'if-'
# prints nothing
# You have to commit changes after writing with commit_cached_config_data()
write_cached_config_data () {
touch $RUN_FILES_BASE/tmp/test 2>/dev/null || return 1
local PFX FILE TMPFILE MODFILE
test -n "$4" && eval $4
: ${PFX:=if-}
FILE=$RUN_FILES_BASE/$PFX$3
MODFILE=$RUN_FILES_BASE/tmp/$PFX$3.$$ # MODFILE
TMPFILE=$RUN_FILES_BASE/tmp/$PFX$3.$$.tmp # MODFILE
test -f $MODFILE || cp $FILE $MODFILE 2>/dev/null
FILE=$MODFILE # MODFILE
touch $FILE
while IFS== read a b; do
case $a in
$1) ;;
*) echo "$a=$b" ;;
esac
done < <(cat $FILE) > $TMPFILE
if [ -n "$2" ] ; then
echo "$1=$2" >> $TMPFILE
fi
if [ -f $TMPFILE ] ; then
mv $TMPFILE $FILE
fi
}
# INTERFACE=`read_cached_config_data <type> <name> [PFX=<prefix>]`
# needs at least 2 arguments
# - the type of data to read: config, options, state, ...
# - the configuration or interface name
# - the file prefix is optional and must be given in the form PFX=<prefix>
# (default prefix is 'if-'
# prints the wanted data
read_cached_config_data () {
touch $RUN_FILES_BASE/tmp/test 2>/dev/null || return 1
local PFX
test -n "$3" && eval $3
: ${PFX:=if-}
if [ -r "$RUN_FILES_BASE/$PFX$2" ] ; then
while IFS== read a b; do
case $a in
$1) echo "$b" ;;
*) ;;
esac
done < $RUN_FILES_BASE/$PFX$2
fi
}
# delete_from_cached_config_data <type> [<data> [<name>]] [PFX=<prefix>]
# Deletes an entry "$1=$2" from all config data cache files.
# If there is a third argument, we delete it only from this configuration. All
# handled files that are empty after modification will be deleted.
# If $2 is empty then remove line $1=* from this ($3) or all configuration.
# If $1 is '*' it will remove all entries.
#
# !!! WIP !!!
# It currently works only on one file and 2nd and 3rd argument are mandatory
# !!! WIP !!!
#
# needs at least 1 argument
# - the type of data to delete: config, options, state, ...
# - optional the data itself
# - optional the configuration or interface name
# - the file prefix is also optional and must be given in the form PFX=<prefix>
# (default prefix is 'if-'
# prints nothing
# You have to commit changes after deleting with commit_cached_config_data()
delete_from_cached_config_data () {
touch $RUN_FILES_BASE/tmp/test 2>/dev/null || return 1
local TYPE DATA PFX FILE TMPFILE MODFILE NAME
TYPE=$1; shift
if [ "$1" = "${1#PFX}" ] ; then
DATA=$1; shift
fi
if [ "$1" = "${1#PFX}" ] ; then
NAME=$1; shift
fi
test -n "$1" && eval $1
: ${PFX:=if-}
FILE=$RUN_FILES_BASE/$PFX$NAME # MODFILE
MODFILE=$RUN_FILES_BASE/tmp/$PFX$NAME.$$ # MODFILE
TMPFILE=$RUN_FILES_BASE/tmp/$PFX$NAME.$$.tmp # MODFILE
test -f $MODFILE || cp $FILE $MODFILE 2>/dev/null
FILE=$MODFILE # MODFILE
touch $FILE
if [ -s "$FILE" ] ; then
while IFS== read a b; do
case $a in
$TYPE)
if [ "$b" != "$DATA" -a -n "$DATA" ] ; then
echo "$a=$b"
fi
;;
*) echo "$a=$b" ;;
esac
done < <(cat $FILE) > $TMPFILE
fi
if [ -f $TMPFILE ] ; then
mv $TMPFILE $FILE
fi
if [ ! -s $FILE ] ; then
rm -Rf $FILE
fi
# done MODFILE
}
# HWDESC NIX < <(grep_cached_config_data <type> <data> [PFX=<prefix>])
# needs 2 arguments:
# - the type of data to grep for: config, options, state, ...
# - the data itself
# - the file prefix is optional and must be given in the form PFX=<prefix>
# (default prefix is 'if-'
# prints all matching configuration names in a single line
grep_cached_config_data () {
touch $RUN_FILES_BASE/tmp/test 2>/dev/null || return 1
local PFX
test -n "$3" && eval $3
: ${PFX:=if-}
local restore_nullglob="$(shopt -p nullglob)"
shopt -s nullglob
for f in $RUN_FILES_BASE/$PFX*; do
while IFS== read a b; do
case $a in
$1)
if [ "$b" = "$2" ] ; then
echo -n "${f#$RUN_FILES_BASE/$PFX} "
fi
;;
esac
done < $f
done
eval $restore_nullglob
echo
}
# Writing and deleting cached config data is always done in temporary files. To
# make this changes visible in the right file you must commit the changes. This
# helps to make file changes atomic.
commit_cached_config_data () {
touch $RUN_FILES_BASE/tmp/test 2>/dev/null || return 1
local PFX FILE MODFILE
test -n "$2" && eval $2
: ${PFX:=if-}
FILE=$RUN_FILES_BASE/$PFX$1
MODFILE=$RUN_FILES_BASE/tmp/$PFX$1.$$
if [ -f $MODFILE ] ; then
mv $MODFILE $FILE
else
rm -f $FILE
fi
}
_set_ethtool_options()
{
local iface=$1
local ethtool=$2
local options=$3
local ret out option xifacex settings
test "x$options" = "x" && return 0
test "x$iface" = "x" -o "x$ethtool" = "x" && return 1
case $options in
-*) # got an option, replace second word with current interface name
read option xifacex settings < <(echo "$options")
options="$option $iface $settings"
;;
*) # old style, setting a parameter...
options="-s $iface $options"
;;
esac
out=`$ethtool $options 2>&1`
ret=$?
if test $ret -eq 0 ; then
info_mesg "$ethtool $options${out:+: $out}"
else
err_mesg "Error while executing '$ethtool $options': [$ret] $out"
fi
return $ret
}
set_ethtool_options()
{
local iface=${1:-$INTERFACE}
local tool var
test "x$iface" = x && return 1
for tool in /sbin/ethtool /usr/sbin/ethtool ; do
test -x "$tool" && break || unset tool
done
for var in ${!ETHTOOL_OPTIONS*} ; do
test "x${!var}" = "x" && continue
if test -z "$tool" ; then
err_mesg "ethtool is not installed"
return 1
fi
_set_ethtool_options "$iface" "$tool" "${!var}"
done
return 0
}
have_ethtool_options()
{
local var
for var in ${!ETHTOOL_OPTIONS*} ; do
test "x${!var}" = "x" || return 0
done
return 1
}
| true |
f923aad61da4f6735230ce0a82d6829d55c080e9 | Shell | greensk/debsnapshot | /debsave | UTF-8 | 334 | 3.453125 | 3 | [] | no_license | #!/bin/bash
if [ -z $1 ]
then
dt=`date +%y%m%d%H%M`
filename="debsnapshot_$dt"
else
filename=$1
fi
if [ ! -w . ]
then
echo "Невозможно сохранить файл $filename"
fi
dpkg -l | awk '/^ii/{print $2}' | gzip > $filename
echo "Информация о состоянии сохранена в файл $filename"
| true |
12f81b447ac489ca5cac42c4f92347baa4beadb2 | Shell | MobMonRob/CustomBoostBuild | /setup_Linux64.sh | UTF-8 | 2,611 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
scriptPath="$(realpath -s "${BASH_SOURCE[0]}")"
scriptDir="$(dirname "$scriptPath")"
cd "$scriptDir"
source "./_bash_config.sh"
run() {
local -r fullLocalTmp="$(realpath "$linuxTmp")"
local -r fullLocalTarget="$(realpath "$linuxTarget")"
local -r boostDir="$fullLocalTmp/boost"
local -r stageDir="$fullLocalTmp/stage"
local -r buildDir="$fullLocalTmp/build"
local -r boostLibDir="$stageDir/lib"
copy_boost_dir
bootstrap_boost
build_boost
link_so
}
copy_boost_dir() {
local -r noarchBoostDir="$noarchTmp/boost"
rm -rdf "$boostDir"
mkdir -p "$boostDir"
cp -r -L -l "$noarchBoostDir" -T "$boostDir"
}
bootstrap_boost() {
cd "$boostDir"
git clean -d -f -X
rm -rdf "$buildDir"
./bootstrap.sh
cd "$scriptDir"
}
build_boost() {
cd "$boostDir"
echo "using gcc : : g++ ;" > ./user-config.jam
rm -rdf "$stageDir"
#-flto
local -r compilerArgs="-fPIC -std=c++14 -w -O3"
local -r boostLibsToBuild="--with-thread --with-date_time"
#--jobs="$((3*$(nproc)))"
#Debugging: --jobs=1
#Debugging: -d+2
#https://www.boost.org/doc/libs/1_54_0/libs/iostreams/doc/installation.html
#--without-wave --without-log --without-test --without-python --without-context --without-coroutine
./b2 -q -sNO_BZIP2=1 $boostLibsToBuild --user-config=user-config.jam --jobs="$((3*$(nproc)))" --layout=tagged --toolset=gcc architecture=x86 address-model=64 target-os=linux optimization=speed cflags="$compilerArgs" cxxflags="$compilerArgs" variant=release threading=multi link=static runtime-link=shared --stagedir="$stageDir" --build-dir="$buildDir" variant=release stage
cd "$scriptDir"
}
link_so() {
rm -f "$boostLibDir"/*test*.a
#Needs a user defined cpp_main function
rm -f "$boostLibDir"/libboost_prg_exec_monitor*.a
#ToDo: Fix boost python lib runtime dependency
rm -f "$boostLibDir"/*python*.a
#Erratic error - windows only
rm -f "$boostLibDir"/*log*.a
#Erratic error - windows + posix only
rm -f "$boostLibDir"/*wave*.a
local -r boostLibs=$(find "$boostLibDir" -maxdepth 1 -mindepth 1 -type f -printf '-l:%f ')
echo "linking... (needs some time)"
rm -rdf "$fullLocalTarget"/lib*
mkdir -p "$fullLocalTarget"
# Libs:
#-licuuc -licudata -licui18n -lz
# Found in
#sudo apt install libicu-dev zlib1g-dev
# Only needed for some boost libs
#-flto
g++ -shared \
-O3 \
-Wl,-Bstatic -Wl,--start-group -Wl,--whole-archive \
-L"$boostLibDir" $boostLibs \
-Wl,--no-whole-archive \
-pthread \
-Wl,--end-group -Wl,-Bdynamic \
-o "$fullLocalTarget/libboost.so" \
-Wl,--as-needed -Wl,--no-undefined -Wl,--no-allow-shlib-undefined
}
run_bash run $@
| true |
126acd85c4c224af7ae57e2ef49384d8bbe4767e | Shell | lilyvp1982/denovo_assembly_pipeline | /scripts/tool.busco.sh | UTF-8 | 1,070 | 3.46875 | 3 | [] | no_license | #! /bin/bash/
if [ "$#" -lt 3 ]; then
echo "Usage: sh tool.busco.sh data_n data busco_odir [config]"
exit
fi
data_n=$1
data=$2
busco_odir=$3
if [ "$#" -gt 3 ]; then
while IFS='=' read key value; do
case $key in
pdir)
pdir=$value
;;
esac
done <<<"$(cat $4 |grep "=" )"
source $pdir/tool.parse.config.sh
fi
buscorun=$buscopref"_"$data_n
echo $buscorun
if [ ! -e "$busco_odir/short_summary_$buscorun.txt" ]; then
if [ ! -e "$buscodir/run_$buscorun/short_summary_$buscorun.txt" ]; then
$PYTHON $runBusco -c 10 -i $data -l $buscodb -o $buscorun -f
if [ -e "$buscodir/run_$buscorun/short_summary_$buscorun.txt" ]; then
mv $buscodir/run_$buscorun/short_summary_$buscorun.txt $busco_odir/short_summary_$buscorun.txt
rm -r $buscodir/run_$buscorun/
else
echo "BUSCO failed check 4.busco.log"
fi
else
mv $buscodir/run_$buscorun/short_summary_$buscorun.txt $busco_odiro/short_summary_$buscorun.txt
rm -r $buscodir/run_$buscorun/
fi
fi
| true |
4fd66c5a79e6476fcfb4a8349bed9d3a7a1c3280 | Shell | openoms/raspiblitz | /home.admin/99updateMenu.sh | UTF-8 | 18,295 | 3.71875 | 4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# load raspiblitz config data
source /home/admin/_version.info
source /home/admin/raspiblitz.info
source /mnt/hdd/raspiblitz.conf 2>/dev/null
## PROCEDURES
release()
{
whiptail --title "Update Instructions" --yes-button "Not Now" --no-button "Start Update" --yesno "To update your RaspiBlitz to a new version:
- Download the new SD card image to your laptop:
https://github.com/rootzoll/raspiblitz
- Flash that SD card image to a new SD card (best)
or override old SD card after shutdown (fallback)
- Choose 'Start Update' below.
No need to close channels or download blockchain again.
Do you want to start the Update now?
" 16 62
if [ $? -eq 0 ]; then
exit 0
fi
if [ "${lightning}" != "" ]; then
whiptail --title "Lightning Data Backup" --yes-button "Download Backup" --no-button "Skip" --yesno "
Before we start the RaspiBlitz Update process,
its recommended to make a backup of all your Lightning
Channel Data and download that file to your laptop.
Do you want to download Lightning Data Backup now?
" 12 62
if [ $? -eq 0 ]; then
if [ "${lightning}" == "lnd" ] || [ "${lnd}" = "on" ]; then
clear
echo "***********************************"
echo "* PREPARING THE LND BACKUP DOWNLOAD"
echo "***********************************"
echo "please wait .."
/home/admin/config.scripts/lnd.compact.sh interactive
/home/admin/config.scripts/lnd.backup.sh lnd-export-gui
echo
echo "PRESS ENTER to continue once you're done downloading."
read key
fi
if [ "${lightning}" == "cl" ] || [ "${cl}" = "on" ]; then
clear
echo "*******************************************"
echo "* PREPARING THE CORE LIGHTNING BACKUP DOWNLOAD"
echo "*******************************************"
echo "please wait .."
/home/admin/config.scripts/cl.backup.sh cl-export-gui
echo
echo "PRESS ENTER to continue once you're done downloading."
read key
fi
else
clear
echo "*****************************************"
echo "* JUST MAKING A BACKUP TO THE OLD SD CARD"
echo "*****************************************"
echo "please wait .."
sleep 2
if [ "${lightning}" == "lnd" ] || [ "${lnd}" = "on" ]; then
/home/admin/config.scripts/lnd.backup.sh lnd-export
fi
if [ "${lightning}" == "cl" ] || [ "${cl}" = "on" ]; then
/home/admin/config.scripts/cl.backup.sh cl-export
fi
sleep 3
fi
fi
whiptail --title "READY TO UPDATE?" --yes-button "START UPDATE" --no-button "Cancel" --yesno "If you start the update: The RaspiBlitz will power down.
Once the LCD is white and no LEDs are blinking anymore:
- Remove the Power from RaspiBlitz
- Exchange the old with the new SD card
- Connect Power back to the RaspiBlitz
- Follow the instructions on the LCD
Do you have the SD card with the new version image ready
and do you WANT TO START UPDATE NOW?
" 16 62
if [ $? -eq 1 ]; then
dialog --title " Update Canceled " --msgbox "
OK. RaspiBlitz will NOT update now.
" 7 39
sudo systemctl start lnd 2>/dev/null
sudo systemctl start lightningd 2>/dev/null
exit 0
fi
clear
sudo shutdown now
}
patchNotice()
{
whiptail --title "Patching Notice" --yes-button "Dont Patch" --no-button "Patch Menu" --yesno "This is the possibility to patch your RaspiBlitz:
It means it will sync the program code with the
GitHub repo for your version branch v${codeVersion}.
This can be useful if there are important updates
in between releases to fix severe bugs. It can also
be used to sync your own code with your RaspiBlitz
if you are developing on your own GitHub Repo.
BUT BEWARE: This means RaspiBlitz will contact GitHub,
hotfix the code and might compromise your security.
Do you want to Patch your RaspiBlitz now?
" 18 58
if [ $? -eq 0 ]; then
exit 0
fi
}
patch()
{
# get sync info
source <(sudo /home/admin/config.scripts/blitz.github.sh info)
# Patch Options
OPTIONS=(PATCH "Patch/Sync RaspiBlitz with GitHub Repo" \
REPO "Change GitHub Repo to sync with" \
BRANCH "Change GitHub Branch to sync with" \
PR "Checkout a PullRequest to test"
)
CHOICE=$(whiptail --clear --title " GitHub user:${activeGitHubUser} branch:${activeBranch} (${commitHashShort})" --menu "" 11 60 4 "${OPTIONS[@]}" 2>&1 >/dev/tty)
clear
case $CHOICE in
PATCH)
echo
echo "#######################################################"
echo "### UPDATE BLITZ --> SCRIPTS (code)"
/home/admin/config.scripts/blitz.github.sh -run
echo
echo "#######################################################"
echo "### UPDATE BLITZ --> API"
sudo /home/admin/config.scripts/blitz.web.api.sh update-code
echo
echo "#######################################################"
echo "### UPDATE BLITZ --> WEBUI"
sudo /home/admin/config.scripts/blitz.web.ui.sh update
sleep 5
whiptail --title " Patching/Syncing " --yes-button "Reboot" --no-button "Skip Reboot" --yesno " OK patching/syncing done.
By default a reboot is advised.
Only skip reboot if you know
it will work without restart.
" 11 40
if [ $? -eq 0 ]; then
clear
echo "REBOOT .."
/home/admin/config.scripts/blitz.shutdown.sh reboot
sleep 8
exit 1
else
echo "SKIP REBOOT .."
exit 0
fi
;;
REPO)
clear
echo "..."
newGitHubUser=$(whiptail --inputbox "\nPlease enter the GitHub USERNAME of the forked RaspiBlitz Repo?" 10 38 ${activeGitHubUser} --title "Change Sync Repo" 3>&1 1>&2 2>&3)
exitstatus=$?
if [ $exitstatus = 0 ]; then
newGitHubUser=$(echo "${newGitHubUser}" | cut -d " " -f1)
echo "--> " ${newGitHubUser}
error=""
source <(sudo -u admin /home/admin/config.scripts/blitz.github.sh ${activeBranch} ${newGitHubUser})
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
fi
fi
patch all
exit 0
;;
BRANCH)
clear
echo "..."
newGitHubBranch=$(whiptail --inputbox "\nPlease enter the GitHub BRANCH of the RaspiBlitz Repo '${activeGitHubUser}'?" 10 38 ${activeBranch} --title "Change Sync Branch" 3>&1 1>&2 2>&3)
exitstatus=$?
if [ $exitstatus = 0 ]; then
newGitHubBranch=$(echo "${newGitHubBranch}" | cut -d " " -f1)
echo "--> " $newGitHubBranch
error=""
source <(sudo -u admin /home/admin/config.scripts/blitz.github.sh ${newGitHubBranch})
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
fi
fi
patch all
exit 0
;;
PR)
clear
echo "..."
pullRequestID=$(whiptail --inputbox "\nPlease enter the NUMBER of the PullRequest on RaspiBlitz Repo '${activeGitHubUser}'?" 10 46 --title "Checkout PullRequest ID" 3>&1 1>&2 2>&3)
exitstatus=$?
if [ $exitstatus = 0 ]; then
pullRequestID=$(echo "${pullRequestID}" | cut -d " " -f1)
echo "# --> " $pullRequestID
cd /home/admin/raspiblitz
git fetch origin pull/${pullRequestID}/head:pr${pullRequestID}
error=""
source <(sudo -u admin /home/admin/config.scripts/blitz.github.sh pr${pullRequestID})
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
else
echo "# update installs .."
/home/admin/config.scripts/blitz.github.sh -justinstall
fi
fi
exit 0
;;
esac
}
lnd()
{
# get lnd info
source <(sudo -u admin /home/admin/config.scripts/lnd.update.sh info)
# LND Update Options
OPTIONS=()
if [ ${lndUpdateInstalled} -eq 0 ]; then
OPTIONS+=(VERIFIED "Optional LND update to ${lndUpdateVersion}")
fi
OPTIONS+=(RECKLESS "Experimental LND update to ${lndLatestVersion}")
CHOICE=$(whiptail --clear --title "Update LND Options" --menu "" 9 60 2 "${OPTIONS[@]}" 2>&1 >/dev/tty)
clear
case $CHOICE in
VERIFIED)
if [ ${lndUpdateInstalled} -eq 1 ]; then
whiptail --title "ALREADY INSTALLED" --msgbox "The LND version ${lndUpdateVersion} is already installed." 8 30
exit 0
fi
whiptail --title "OPTIONAL LND UPDATE" --yes-button "Cancel" --no-button "Update" --yesno "BEWARE on updating to LND v${lndUpdateVersion}:
${lndUpdateComment}
Do you really want to update LND now?
" 16 58
if [ $? -eq 0 ]; then
echo "# cancel update"
exit 0
fi
# if loop is installed remove
if [ "${loop}" == "on" ]; then
sudo -u admin /home/admin/config.scripts/bonus.loop.sh off
fi
error=""
warn=""
source <(sudo -u admin /home/admin/config.scripts/lnd.update.sh verified)
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
else
# if loop was installed before reinstall
if [ "${loop}" == "on" ]; then
sudo -u admin /home/admin/config.scripts/bonus.loop.sh on
fi
/home/admin/config.scripts/blitz.shutdown.sh reboot
sleep 8
fi
;;
RECKLESS)
whiptail --title "RECKLESS LND UPDATE to ${lndLatestVersion}" --yes-button "Cancel" --no-button "Update" --yesno "Using the 'RECKLESS' LND update will simply
grab the latest LND release published on the LND GitHub page (also release candidates).
There will be no security checks on signature, etc.
This update mode is only recommended for testing and
development nodes with no serious funding.
Do you really want to update LND now?
" 16 58
if [ $? -eq 0 ]; then
echo "# cancel update"
exit 0
fi
error=""
source <(sudo -u admin /home/admin/config.scripts/lnd.update.sh reckless)
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
else
/home/admin/config.scripts/blitz.shutdown.sh reboot
sleep 8
fi
;;
esac
}
cl()
{
# get cl info
source <(sudo -u admin /home/admin/config.scripts/cl.update.sh info)
# Core Lightning Update Options
OPTIONS=()
if [ ${clUpdateInstalled} -eq 0 ]; then
OPTIONS+=(VERIFIED "Optional Core Lightning update to ${clUpdateVersion}")
fi
OPTIONS+=(RECKLESS "Experimental Core Lightning update to ${clLatestVersion}")
CHOICE=$(whiptail --clear --title "Update Core Lightning Options" --menu "" 9 60 2 "${OPTIONS[@]}" 2>&1 >/dev/tty)
clear
case $CHOICE in
VERIFIED)
if [ ${clUpdateInstalled} -eq 1 ]; then
whiptail --title "ALREADY INSTALLED" --msgbox "The Core Lightning version ${clUpdateVersion} is already installed." 8 30
exit 0
fi
whiptail --title "OPTIONAL Core Lightning UPDATE" --yes-button "Cancel" --no-button "Update" --yesno "BEWARE on updating to Core Lightning v${clUpdateVersion}:
${clUpdateComment}
Do you really want to update Core Lightning now?
" 16 58
if [ $? -eq 0 ]; then
echo "# cancel update"
exit 0
fi
error=""
warn=""
source <(sudo -u admin /home/admin/config.scripts/cl.update.sh verified)
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
else
echo "# Core Lightning was updated successfully"
exit 0
fi
;;
RECKLESS)
whiptail --title "RECKLESS Core Lightning UPDATE to ${clLatestVersion}" --yes-button "Cancel" --no-button "Update" \
--yesno "Using the 'RECKLESS' Core Lightning update will download the latest Core Lightning release published on the Core Lightning GitHub page.
The update was not tested as a part of the release.
This update mode is only recommended for testing and
development nodes with no serious funding.
Do you really want to update Core Lightning now?
" 16 58
if [ $? -eq 0 ]; then
echo "# cancel update"
exit 0
fi
error=""
source <(sudo -u admin /home/admin/config.scripts/cl.update.sh reckless)
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
else
echo "# Core Lightning was updated successfully"
# unlock or fix issues from the logs
/home/admin/config.scripts/cl.hsmtool.sh unlock ${chain}net
exit 0
fi
;;
esac
}
bitcoinUpdate() {
# get bitcoin info
source <(sudo -u admin /home/admin/config.scripts/bitcoin.update.sh info)
# bitcoin update options
OPTIONS=()
if [ ${bitcoinUpdateInstalled} -eq 0 ]; then
OPTIONS+=(TESTED "Optional Bitcoin Core update to ${bitcoinVersion}")
fi
if [ $installedVersion != $bitcoinLatestVersion ]&&[ ${bitcoinVersion} != ${bitcoinLatestVersion} ];then
OPTIONS+=(RECKLESS "Untested Bitcoin Core update to ${bitcoinLatestVersion}")
fi
OPTIONS+=(CUSTOM "Update Bitcoin Core to a chosen version")
CHOICE=$(dialog --clear \
--backtitle "" \
--title "Bitcoin Core Update Options" \
--ok-label "Select" \
--cancel-label "Back" \
--menu "" \
9 60 3 \
"${OPTIONS[@]}" 2>&1 >/dev/tty)
case $CHOICE in
TESTED)
if [ ${bitcoinUpdateInstalled} -eq 1 ]; then
whiptail --title "ALREADY INSTALLED" \
--msgbox "The Bitcoin Core version ${bitcoinUpdateVersion} is already installed." 8 30
exit 0
fi
whiptail --title "OPTIONAL Bitcoin Core update" --yes-button "Cancel" --no-button "Update" \
--yesno "Info on updating to Bitcoin Core v${bitcoinVersion}:
This Bitcoin Core version was tested on this system.
Will verify the binary checksum and signature.
Do you really want to update Bitcoin Core now?
" 12 58
if [ $? -eq 0 ]; then
echo "# cancel update"
exit 0
fi
error=""
warn=""
source <(sudo -u admin /home/admin/config.scripts/bitcoin.update.sh tested)
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
fi
/home/admin/config.scripts/blitz.shutdown.sh reboot
;;
RECKLESS)
whiptail --title "UNTESTED Bitcoin Core update to ${bitcoinLatestVersion}" --yes-button "Cancel" \
--no-button "Update" --yesno "Using the 'RECKLESS' Bitcoin Core update will grab
the latest stable Bitcoin Core release published on the Bitcoin Core GitHub page.
This Bitcoin Core version was NOT tested on this system.
Will verify the binary checksum and signature.
Do you really want to update Bitcoin Core now?
" 16 58
if [ $? -eq 0 ]; then
echo "# cancel update"
exit 0
fi
error=""
source <(sudo -u admin /home/admin/config.scripts/bitcoin.update.sh reckless)
if [ ${#error} -gt 0 ]; then
whiptail --title "ERROR" --msgbox "${error}" 8 30
fi
/home/admin/config.scripts/blitz.shutdown.sh reboot
;;
CUSTOM)
sudo -u admin /home/admin/config.scripts/bitcoin.update.sh custom
/home/admin/config.scripts/blitz.shutdown.sh reboot
;;
esac
}
# quick call by parameter
if [ "$1" == "github" ]; then
patch all
exit 0
fi
# Basic Options Menu
WIDTH=55
OPTIONS=()
OPTIONS+=(RELEASE "RaspiBlitz Release Update/Recovery")
OPTIONS+=(PATCH "Patch RaspiBlitz v${codeVersion}")
OPTIONS+=(BITCOIN "Bitcoin Core Update Options")
if [ "${lightning}" == "lnd" ] || [ "${lnd}" == "on" ]; then
OPTIONS+=(LND "Interim LND Update Options")
fi
if [ "${lightning}" == "cl" ] || [ "${cl}" == "on" ]; then
OPTIONS+=(CL "Interim Core Lightning Update Options")
fi
if [ "${bos}" == "on" ]; then
OPTIONS+=(BOS "Update Balance of Satoshis")
fi
if [ "${ElectRS}" == "on" ]; then
OPTIONS+=(ELECTRS "Update Electrs")
fi
if [ "${RTL}" == "on" ]||[ "${cRTL}" == "on" ]; then
OPTIONS+=(RTL "Update RTL")
fi
if [ "${thunderhub}" == "on" ]; then
OPTIONS+=(THUB "Update ThunderHub")
fi
if [ "${lndg}" == "on" ]; then
OPTIONS+=(LNDG "Update LNDg")
fi
if [ "${specter}" == "on" ]; then
OPTIONS+=(SPECTER "Update Specter Desktop")
fi
if [ "${BTCPayServer}" == "on" ]; then
OPTIONS+=(BTCPAY "Update BTCPayServer")
fi
if [ "${sphinxrelay}" == "on" ]; then
OPTIONS+=(SPHINX "Update Sphinx Server Relay")
fi
if [ "${homer}" == "on" ]; then
OPTIONS+=(HOMER "Update Homer")
fi
if [ "${mempoolExplorer}" == "on" ]; then
OPTIONS+=(MEMPOOL "Update Mempool Explorer")
fi
if [ "${jam}" == "on" ]; then
OPTIONS+=(JAM "Update Jam (JoinMarket WebUI)")
fi
if [ "${runBehindTor}" == "on" ]; then
OPTIONS+=(TOR "Update Tor from the Torproject repo")
fi
if [ "${itchysats}" == "on" ]; then
OPTIONS+=(ITCHYSATS "Update ItchySats")
fi
CHOICE_HEIGHT=$(("${#OPTIONS[@]}/2+1"))
HEIGHT=$((CHOICE_HEIGHT+6))
CHOICE=$(dialog --clear \
--backtitle "" \
--title " Update Options " \
--ok-label "Select" \
--cancel-label "Main menu" \
--menu "" \
$HEIGHT $WIDTH $CHOICE_HEIGHT \
"${OPTIONS[@]}" 2>&1 >/dev/tty)
case $CHOICE in
RELEASE)
release
;;
PATCH)
patchNotice
patch all
;;
LND)
lnd
;;
CL)
cl
;;
BITCOIN)
bitcoinUpdate
;;
BOS)
/home/admin/config.scripts/bonus.bos.sh update
;;
ELECTRS)
/home/admin/config.scripts/bonus.electrs.sh update
;;
RTL)
/home/admin/config.scripts/bonus.rtl.sh update
;;
THUB)
/home/admin/config.scripts/bonus.thunderhub.sh update
;;
LNDG)
/home/admin/config.scripts/bonus.lndg.sh update
;;
SPECTER)
/home/admin/config.scripts/bonus.specter.sh update
;;
BTCPAY)
/home/admin/config.scripts/bonus.btcpayserver.sh update
;;
SPHINX)
/home/admin/config.scripts/bonus.sphinxrelay.sh update
;;
TOR)
sudo /home/admin/config.scripts/tor.network.sh update
;;
HOMER)
/home/admin/config.scripts/bonus.homer.sh update
;;
MEMPOOL)
/home/admin/config.scripts/bonus.mempool.sh update
;;
JAM)
/home/admin/config.scripts/bonus.jam.sh update
;;
ITCHYSATS)
/home/admin/config.scripts/bonus.itchysats.sh update
;;
esac
| true |
3d7decf0f631b92aaf75f8f756cb0de196e564b7 | Shell | HerrStinson/packer | /sles12/scripts/webserver.sh | UTF-8 | 992 | 3.078125 | 3 | [] | no_license | #!/bin/bash
REPO1="http://10.0.2.2:8080/DVD1"
REPO2="http://10.0.2.2:8080/DVD2"
SRCRPMS="libmcrypt tidy"
if ! zypper lr | grep -q SDK; then
zypper addrepo $REPO1 'SLES12-SDK DVD1'
zypper addrepo $REPO1 'SLES12-SDK DVD2'
fi
for srcrpm in $SRCRPMS; do
if ! rpm -qa | grep -q $srcrpm; then
zypper --quiet --non-interactive si $srcrpm
cd /usr/src/packages/SPECS
rpmbuild --quiet -bb ${srcrpm}.spec 2>&1 >/dev/null
zypper --quiet --non-interactive install /usr/src/packages/RPMS/x86_64/*${srcrpm}*.rpm
fi
done
zypper --quiet --non-interactive install --no-recommends \
ImageMagick-devel \
freetype-devel \
freetype2-devel \
libXpm-devel \
libbz2-devel \
libcurl-devel \
libicu-devel \
libjpeg8-devel \
libmjpegutils-devel \
libopenssl-devel \
libpng16-devel \
libtidyp-devel \
libvpx-devel \
libxml2-devel \
libxslt-devel \
libzio-devel \
libzip-devel \
lua51-devel \
mozilla-nss-devel \
pcre-devel \
rpm-build \
zlib-devel
| true |
c4b6026b7b942f55d4eb712b8e743db46c89eef8 | Shell | raksha78/decrypt-1 | /run.sh | UTF-8 | 321 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/bash/ -e
keys=$(cat config.json | jq '.'config.environment.testing.secureKeys | jq 'keys' | jq -r '.[]')
for key in $keys; do
value=$(cat config.json | jq -r '.'config.environment.testing.secureKeys.$key)
echo $value > encrypted.txt
shippable_decrypt "encrypted.txt"
echo >> encrypted.txt.decrypted
done;
| true |
bed2fa24023ef265de21b927958ba30bf081a416 | Shell | gderber/smgl-cauldron | /enchantment/lib/libenchantment | UTF-8 | 11,918 | 3.765625 | 4 | [] | no_license | #!/bin/bash
#-------------------------------------------------------------------------------
##
##=head1 SYNOPSIS
##
## Common functions and variables for different installers
##
##=head1 DESCRIPTION
##
## This file provides common function and variable definitions for
## the different types of installers that can be included on an ISO.
## This file is meant to be sourced by the different installers,
## so this file should not be run directly. This file, as well as
## the installer which sources this file, is meant to be called from
## inside the ISO during the actual install onto the target system.
##
##=head1 COPYRIGHT
##
## Copyright 2009 The Cauldron Team
##
##=head1 FUNCTIONS
##
##=over 4
##
#-------------------------------------------------------------------------------
# set LC_COLLATE to C so we don't get affected by the user's locale
# when grepping, sorting, etc.
export LC_COLLATE="C"
# shorthand and non-hardcoded /dev/null for output dumping
ENCHANT_NULL="${ENCHANT_NULL:-/dev/null}"
#-------------------------------------------------------------------------------
##
## Used only when checking initialization of the library and a fatal error
## occurs. Afterwards, functions from liberror are used instead.
##
#-------------------------------------------------------------------------------
function enchant_fatal_error() {
echo "$1"
exit 1
}
#-------------------------------------------------------------------------------
# liberror includes
#-------------------------------------------------------------------------------
. "$ENCHANT_BASE/liberror" ||
enchant_fatal_error "error: cannot load base library liberror"
# load enchantment error code and message defines
. "$ENCHANT_LIBS/errorcodes" ||
enchant_fatal_error "error: cannot load enchantment error codes"
. "$ENCHANT_BASE/libcolor" ||
enchant_fatal_error "error: cannot load base library libcolor"
# test/set whether color output should be enabled
[[ "$ENCHANT_COLOR" != "yes" || "$(grep -q 'nocolor' /proc/cmdline)" ]] &&
LIBCOLOR_NOCOLOR="yes"
# drop enchant_fatal_error if liberror was successfully loaded
unset enchant_fatal_error
#-------------------------------------------------------------------------------
## @param library name to load (no path, just basename)
##
## Loads the library given as the first argument. liberror_check_fatal is
## called to check if there was a problem loading the library, and if there was
## it will print a corresponding error message and then exit with the error
## $ERR_LOAD_LIBRARY.
##
#-------------------------------------------------------------------------------
function enchant_load_library() {
local lib="$1"
. "$ENCHANT_LIBS/$lib" 2>$ENCHANT_NULL
liberror_check_fatal "cannot load library $lib"
}
#-------------------------------------------------------------------------------
# enchant includes
#-------------------------------------------------------------------------------
enchant_load_library "lib.chroot"
enchant_load_library "lib.i18n"
enchant_load_library "lib.install"
enchant_load_library "lib.modules"
enchant_load_library "lib.potion"
#-------------------------------------------------------------------------------
# function definitions
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
##
## Sets the installer system to a sane, known initial state. Should only be
## called when first entering the installer system or ISO.
##
#-------------------------------------------------------------------------------
function enchant_prepare() {
# this initializes the file which holds the current step
mkdir -p "$ENCHANT_TMP" || return $ERR_PREPARE
echo 'init' > "$ENCHANT_STATUS" || return $ERR_PREPARE
# the next lines are to init the history files
mkdir -p "$ENCHANT_POTION" || return $ERR_PREPARE
cat < $ENCHANT_NULL > "$ENCHANT_BACK" || return $ERR_PREPARE
cat < $ENCHANT_NULL > "$ENCHANT_FORWARD" || return $ERR_PREPARE
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Cleans up the tmp directories and such forth
##
#-------------------------------------------------------------------------------
function enchant_cleanup() {
rm -fr "$ENCHANT_TMP" || return $ERR_CLEANUP
return $ERR_OK
}
#-------------------------------------------------------------------------------
## @param color state [yes or no] (optional)
##
## Wrapper function that handles setting the color state in libcolor. Possible
## values for the color state parameter are 'yes' or 'no' (without the quotes).
## If the parameter isn't supplied, then it defaults to whatever ENCHANT_COLOR
## is set to.
##
#-------------------------------------------------------------------------------
function enchant_color() {
local color="${1:-$ENCHANT_COLOR}"
[ "$1" = "no" ] && LIBCOLOR_NOCOLOR="yes"
}
#-------------------------------------------------------------------------------
##
## Determines and returns the current module in the installation procedure.
##
#-------------------------------------------------------------------------------
function enchant_get_current() {
[[ -z "$ENCHANT_STATUS" ]] && return $ERR_GET_CURRENT
cat "$ENCHANT_STATUS" || return $ERR_GET_CURRENT
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## @param step
##
## Sets the current module of the installer to be the step passed as an
## argument. Stores the current module to the back history before setting it to
## the new module.
##
#-------------------------------------------------------------------------------
function enchant_set_current() {
local module="$1"
if ! $(enchant_modules_list | grep -q "$module")
then
return $ERR_SET_CURRENT
fi
enchant_modules_exit || return $ERR_MODULE_EXIT
# if the max history size has been reached, then we drop the oldest item
if [[ "$(wc -l < $ENCHANT_BACK)" -ge "$ENCHANT_POTION_BUFFER" ]]
then
sed -i '1d' "$ENCHANT_BACK" || return $ERR_SET_CURRENT
fi
# copy the current module to the back history
echo "$module" >> "$ENCHANT_BACK" || return $ERR_SET_CURRENT
# check the installation state
enchant_potion_update || return $?
# set the current module
echo "$module" > "$ENCHANT_STATUS" || return $ERR_SET_CURRENT
enchant_modules_enter || return $ERR_MODULE_ENTER
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Determines and returns the next module in the installation procedure.
##
#-------------------------------------------------------------------------------
function enchant_get_next() {
local step
step="$(grep -m1 \"^$(enchant_get_current):\" $ENCHANT_DATA/modules)" ||
return $ERR_GET_NEXT
step="$(echo $step | cut -d: -f2)" || return $ERR_GET_NEXT
if [[ -z "$step" ]]
then
echo "error"
return $ERR_GET_NEXT
fi
echo $step
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Sets the current module to the next module in the installation procedure.
##
#-------------------------------------------------------------------------------
function enchant_set_next() {
local module
# get next step
module="$(enchant_get_next)" || return $ERR_SET_NEXT
# save new status
enchant_set_step "$module" || return $ERR_SET_NEXT
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Determines and returns the previous module in the installation procedure.
##
#-------------------------------------------------------------------------------
function enchant_get_prev() {
local step
# get the current step
step="$(enchant_get_current)" || return $ERR_GET_PREV
step="$(grep -m1 \":$step$\" $ENCHANT_DATA/order)" || return $ERR_GET_PREV
step="$(echo $step | cut -d: -f1)" || return $ERR_GET_PREV
if [[ -z "$step" ]]
then
echo "error"
return $ERR_GET_PREV
fi
echo "$step"
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Sets the current module to the previous module in the installation procedure.
##
#-------------------------------------------------------------------------------
function enchant_set_prev() {
local module
# get the current step
module="$(enchant_get_prev)" || return $ERR_SET_PREV
enchant_set_current "$module" || return $ERR_SET_PREV
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Moves the installer backward in its history (like the back button on a
## web browser).
##
## when you go back, you store/save the forward pages
##
#-------------------------------------------------------------------------------
function enchant_back() {
# if the back history is empty, then we can't go back
[[ "$(wc -l < $ENCHANT_BACK)" -eq 0 ]] && return $ERR_HISTORY_BACK
# if the max forward history size has been reached
if [[ "$(wc -l < $ENCHANT_FORWARD)" -ge "$ENCHANT_POTION_BUFFER" ]]
then
# then we drop the oldest item from the forward history
sed -i '1d' "$ENCHANT_FORWARD" || return $ERR_HISTORY_BACK
fi
# copy the current status to the forward history, so we can go forward to
# where we are after we jump back
enchant_get_current >> "$ENCHANT_FORWARD" || return $ERR_HISTORY_BACK
# set the current step to be the one we are jumping to
tail -n 1 "$ENCHANT_BACK" > "$ENCHANT_STATUS" || return $ERR_HISTORY_BACK
# remove the step we jumped to from the back history
sed -i '$d' "$ENCHANT_BACK" || return $ERR_HISTORY_BACK
return $ERR_OK
}
#-------------------------------------------------------------------------------
##
## Moves the installer forward in its history (like the forward button on a
## web browser.
##
## when you go forward, you store/save the back pages
##
#-------------------------------------------------------------------------------
function enchant_forward() {
# if the forward history is empty, then we can't go forward
[[ "$(wc -l < $ENCHANT_FORWARD)" -eq 0 ]] && return $ERR_HISTORY_FORWARD
# if the max back history size has been reached
if [[ "$(wc -l < $ENCHANT_BACK)" -ge "$ENCHANT_POTION_BUFFER" ]]
then
# then we drop the oldest item from the back history
sed -i '1d' "$ENCHANT_BACK" || return $ERR_HISTORY_FORWARD
fi
# copy the current status to the back history, so we can go back to where we
# are after we jump forward
enchant_get_current >> "$ENCHANT_BACK" || return $ERR_HISTORY_FORWARD
# set the current step to be the one we are jumping to
tail -n 1 "$ENCHANT_FORWARD" > "$ENCHANT_STATUS" || return $ERR_HISTORY_FORWARD
# remove the step we jumped to from the forward history
sed -i '$d' "$ENCHANT_FORWARD" || return $ERR_HISTORY_FORWARD
return $ERR_OK
}
#-------------------------------------------------------------------------------
##=back
##
##=head1 LICENSE
##
## This software is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This software is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this software; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
#-------------------------------------------------------------------------------
# vim:ai:tw=80:tabstop=2:softtabstop=2:shiftwidth=2:expandtab
| true |
e8ab6b57dd9a7958c06157d8b76379ca3ba18835 | Shell | X-I-N/UESTC_Courseware | /操作系统/exam/check_result.sh | UTF-8 | 1,323 | 4.09375 | 4 | [] | no_license | #!/bin/bash
result_file_name="result.txt"
if [[ ! -f "$result_file_name" ]]; then
echo "missing ${result_file_name}, please put ${result_file_name} in this working directory"
exit
fi
#please pass your executable file as the first parameter and the max index of file count as the second parameter
if [[ $# -lt 1 ]]; then
echo "parameter count mismatch, use this by $0 <executable file name>"
exit
elif [[ ! -f "$1" ]]; then
echo "no executable file $1 exist in working directory"
exit
fi
suffix=".txt"
executable=$1
py_version=$3
execute() {
#获取可执行文件后缀名
executable_suffix=`echo ${executable##*.}`
if [[ ${executable_suffix} = "py" ]]; then
echo `python${py_version} ./${executable} $1 | tr -d '\r'`
elif [[ ${executable_suffix} = "jar" ]]; then
echo `java -jar ./${executable} $1 | tr -d '\r'`
else
echo `./${executable} $1 | tr -d '\r'`
fi
}
i=0
while read line || [[ -n ${line} ]]
do
if [[ ${line} != "" ]]; then
line=`echo ${line} | tr -d '\r'`
your_result=`execute ${i}${suffix}`
if [[ "${your_result}" = "${line}" ]]; then
echo "test ${i}${suffix} correct"
else
echo "test ${i}${suffix} wrong answer"
fi
i=$(( $i + 1 ))
fi
done < ${result_file_name} | true |
3979b9c068b879064aef587a884cfb2c096c0a51 | Shell | verezem/SELFTOOLS | /pdflux.sh | UTF-8 | 2,719 | 3.203125 | 3 | [] | no_license | #!/bin/bash
set -x
# This is a script to calculate SSH over domain mean values, [m]
# Uses CDFTOOLSv4
source /scratch/cnt0024/hmg2840/pverezem/DEV/SELFTOOLS/headers/header.sh
# usage instructions
if [ $# = 0 ] ; then
echo " USAGE: $(basename $0) <year> "
exit
fi
# Set working dir
year=$1 # year is the first argument of the srpt
# path to workdir
WRKDIR=$WORKDIR/TMP_PDF/$year
mkdir -p $WRKDIR # -p is to avoid mkdir if exists, and create a parent if needed
cd $WRKDIR
ln -sf $SWDIR2/$year/${CONFCASE}_y${year}m??*.${freq2}_${GRID1}.nc ./
#ln -sf $SWDIR2/$year/${CONFCASE}_y${year}m??*.${freq2}_${GRID2}.nc ./
cp $IDIR/${CONFIG}_mesh_zgr.nc mesh_zgr.nc
cp $IDIR/${CONFIG}_mesh_hgr.nc mesh_hgr.nc
cp $IDIR/${CONFIG}_byte_mask.nc mask.nc
$cmdcp # command set in header for extra copy (mask file f.ex.)
# Main body
for mon in {01..12} ; do
for day in {01..31} ; do
cdfheatc2d -f ${CONFCASE}_y${year}m${mon}d${day}.${freq2}_${GRID1}.nc -zoom 160 295 142 260 0 75 -mxloption 1 -o ${CONFCASE}_y${year}m${mon}d${day}_mldhcl -map
cdfheatc2d -f ${CONFCASE}_y${year}m${mon}d${day}.${freq2}_${GRID1}.nc -zoom 349 426 160 230 0 75 -mxloption 1 -o ${CONFCASE}_y${year}m${mon}d${day}_mldhci -map
done
done
#for tfile in ${CONFCASE}_y${year}m??*.${freq2}_${GRID1}.nc ; do
# mldhcl=$(echo $tfile | sed -e "s/gridTsurf/mldhc_L/g")
# mldhci=$(echo $tfile | sed -e "s/gridTsurf/mldhc_I/g")
# cdfheatc2d -f $tfile -zoom 195 291 150 260 0 0 -mxloption 1 -o $mldhcl -map
# cdfheatc2d -f $tfile -zoom 349 426 160 230 0 0 -mxloption 1 -o $mldhci -map
# pdfhcl=$(echo $tfile | sed -e "s/gridTsurf/pdfmldhcl/g")
# pdfhci=$(echo $tfile | sed -e "s/gridTsurf/pdfmldhci/g")
# cdfpdf -f $mldhcl -v heatc3d -o pdfhcl -range
# shfile=$(echo $tfile | sed -e "s/flxT/pdfshf/g")
# lhfile=$(echo $tfile | sed -e "s/flxT/pdflhf/g")
# mlfile=$(echo $tfile | sed -e "s/flxT/pdfmld/g")
# cdfpdf -f $tfile -v $LHF -o $lhfile -range -1000 0 250
# cdfpdf -f $tfile -v $SHF -o $shfile -range -1000 0 250
# cdfpdf -f $tfile -v $MXL10 -o $mlfile -range 0 2500 250
#done
# Concatenation and storing
mkdir -p $DIAGDIR/$year
#ncrcat *_pdfshf.nc ${CONFCASE}_y${year}_pdfshf.nc
#ncrcat *_pdflhf.nc ${CONFCASE}_y${year}_pdflhf.nc
#ncrcat *_pdfmld.nc ${CONFCASE}_y${year}_pdfmld.nc
ncrcat *mldhclmap.nc ${CONFCASE}_y${year}_mldhcl.nc
ncrcat *mldhcimap.nc ${CONFCASE}_y${year}_mldhci.nc
#mv ${CONFCASE}_y${year}_pdfshf.nc $DIAGDIR/$year
#mv ${CONFCASE}_y${year}_pdflhf.nc $DIAGDIR/$year
#mv ${CONFCASE}_y${year}_pdfmld.nc $DIAGDIR/$year
mv ${CONFCASE}_y${year}_mldhcl.nc $DIAGDIR/$year
mv ${CONFCASE}_y${year}_mldhci.nc $DIAGDIR/$year
cd $WORKDIR/TMP_PDF
rm -rf $year # in order to erase tmp directory
| true |
5f68f55376b16ed82a7c3174c55bdd5f3336032d | Shell | ssmythe/capistrano_tutorial | /shell/accept_known_hosts_ssh_fingerprints.sh | UTF-8 | 716 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env bash
SCRIPT="accept_known_hosts_ssh_fingerprints.sh"
SSH_DIR="/home/vagrant/.ssh"
KNOWN_HOSTS_FILE="${SSH_DIR}/known_hosts"
accept_known_host_ssh_fingerprint() {
hostname=$1
touch "${KNOWN_HOSTS_FILE}"
if ! grep -q "^${hostname}" "${KNOWN_HOSTS_FILE}"; then
ssh -o StrictHostKeyChecking=no ${hostname} 'hostname -f' || true
echo ${KNOWN_HOSTS_FILE}: adding ${hostname} [ADDED]
else
echo ${KNOWN_HOSTS_FILE}: adding ${hostname} [SKIPPED]
fi
}
echo "${SCRIPT}: start - $(date)"
accept_known_host_ssh_fingerprint 'localhost'
accept_known_host_ssh_fingerprint 'capalpha'
accept_known_host_ssh_fingerprint 'capbravo'
echo "${SCRIPT}: finish - $(date)"
| true |
e61bb596f15e0c9e96feb56da9b56805e13bf951 | Shell | abahturin/LFTH | /kasutajad_failist.sh | UTF-8 | 404 | 3.3125 | 3 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ]; then
echo "kasutusjuhend: $0 failinimi"
else
failinimi=$1
if [ -f $failinimi -a -r $failinimi ]; then
echo "fail on korras"
for rida in $(cat $failinimi)
do
nimi=$(echo $rida | cut -f1 -d":")
sh lisa_kasutaja $nimi
if [ $? -eq 0 ]; then
echo $rida | chpasswd
cat /etc/shadow | grep $nimi
fi
done
else
echo "probleem failiga $failinimi"
fi
fi
| true |
8fdbea8f7e1a53cf5700f59167dc9387b44a52c1 | Shell | felipendc/vicyos-pkgbuild | /VICYOS-LINUX/vicyos-third-party-gnome-extensions-git/PKGBUILD | UTF-8 | 2,184 | 2.765625 | 3 | [] | no_license | # vicyos-third-party-gnome-extensions:
# Each gnome extension from vicyos-third-party-gnome-extensions-git-*.zst
# has its own maintainers. Here are the maintainters and source code:
# Maintainer: kgshank (Gnome Shell Extension - Sound Input & Output Device Chooser)
# folder name: sound-output-device-chooser@kgshank.net
# Source: github.com/kgshank/gse-sound-output-device-chooser
# Maintainer: biji (Simple net speed)
# folder name: simplenetspeed@biji.extension
# Source: github.com/biji/simplenetspeed
# Maintainer: HarlemSquirrel (Sensory Perception)
# folder name: sensory-perception@HarlemSquirrel.github.io
# Source: https://github.com/HarlemSquirrel/gnome-shell-extension-sensory-perception
# Maintainer: Jens Lody (gnome-shell-extension-openweather)
# folder name: openweather-extension@jenslody.de
# Source: https://gitlab.com/jenslody/gnome-shell-extension-openweather
# Maintainer: andyholmes (gnome-shell-extension-gsconnect)
# folder name: gsconnect@andyholmes.github.io
# Source: https://github.com/andyholmes/gnome-shell-extension-gsconnect/wiki
# Maintainer: neumann-d (ShutdownTimer)
# folder name: ShutdownTimer@neumann
# Source: https://github.com/neumann-d/ShutdownTimer
# Packager for VicyosLinux: Felipe Ndc (Vicyos) <felipendc10@gmail.com>
pkgname=vicyos-third-party-gnome-extensions-git
_pkgname=vicyos-third-party-gnome-extensions
_destname1="/etc/skel/.local/share/gnome-shell/extensions/"
_licensedir="/usr/share/vicyos/licenses/"
pkgver=21.1.6
pkgrel=2
pkgdesc="third-party-gnome-extensions for vicyoslinux-gnome-edition"
arch=('any')
url="https://github.com/felipendc/vicyos-third-party-gnome-extensions"
license=('GPL3')
makedepends=('git')
depends=()
replaces=($_pkgname)
conflicts=($_pkgname)
provides=("${pkgname}")
options=(!strip !emptydirs)
source=(${_pkgname}::"git+https://github.com/felipendc/${_pkgname}.git")
sha256sums=('SKIP')
package() {
install -dm755 ${pkgdir}${_licensedir}${_pkgname}
install -m644 ${srcdir}/${_pkgname}/LICENSE ${pkgdir}${_licensedir}${_pkgname}
install -dm755 ${pkgdir}${_destname1}
rm -r ${srcdir}/${_pkgname}/.git
rm -r ${srcdir}/${_pkgname}/LICENSE
rm -r ${srcdir}/${_pkgname}/git-v2.sh
rm -r ${srcdir}/${_pkgname}/setup-git.sh
rm -r ${srcdir}/${_pkgname}/README.md
cp -r ${srcdir}/${_pkgname}${_destname1}* ${pkgdir}${_destname1}
} | true |
3ef139bf4d7ed4ec8748adce7b137e6f81850ab7 | Shell | kyledharrington/dynatrace-operator | /hack/gcr/deployer-image.sh | UTF-8 | 287 | 2.953125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
set -eu
export REGISTRY=gcr.io/dynatrace-marketplace-dev
export APP_NAME=dynatrace-operator
TAG="${1:-""}"
if docker build --tag "$REGISTRY/$APP_NAME/deployer${TAG}" -f config/helm/Dockerfile config/helm; then
docker push "$REGISTRY/$APP_NAME/deployer${TAG}"
fi
| true |
f3e506e0f8393b8d30bfb9aa530ce858ad64f04e | Shell | KolesovDmitry/planet-scripts | /creator.sh | UTF-8 | 729 | 3.21875 | 3 | [] | no_license | #!/bin/sh
FILE=$1
PARENT_ID=$2
CREDENTIALS=$3
NEXTGIS_URL=$4
NAME=$(basename $FILE .tif)
SOURCE=$(curl --user "$CREDENTIALS" --upload-file "$FILE" $NEXTGIS_URL/api/component/file_upload/upload)
RAST=$(mktemp -u tmp_rast_XXXXXX)
STYLE=$(mktemp -u tmp_style_XXXXXX)
m4 -DRASTERNAME=$NAME -DRASTERSOURCE="$SOURCE" -DPARENTID=$PARENT_ID create_rast.template > $RAST
RASTER_RESULT=$(curl --user "$CREDENTIALS" --data "@$RAST" $NEXTGIS_URL/api/resource/)
echo $RASTER_RESULT
RASTERID=$(echo $RASTER_RESULT | jq .id)
echo ID=$RASTERID
m4 -DSTYLEID="$RASTERID" -DSTYLENAME=$NAME create_style.template > $STYLE
STYLEID=$(curl --user "$CREDENTIALS" --data "@$STYLE" $NEXTGIS_URL/api/resource/)
echo $STYLEID
rm $RAST
rm $STYLE
| true |
eed17b1f57b2289a0dcddbdd865f27b1e2233a79 | Shell | Sylvain-Bugat/linux-commands-reference | /file-processing/column-selections/select-the-N-field.sh | UTF-8 | 431 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [[ "${#}" -ge 1 && "${1}" != [0-9]* ]]
then
echo "Usage: $0 <field to get>"
exit 2
fi
#Source commands library
. ../../common-libs/commands-lib.sh
#Field number to select (starting from 1)
typeset -ir selectedField="${1:-2}"
#make a reference file based on the most common command
cut -d';' -f "${selectedField}" "${sourceFile}" > "${refTargetFile}"
#Test all commands
testCommands "${selectedField}"
| true |
cdd561a62a259184007f346e19712392c6c7d384 | Shell | axray/dataware.dreamplug | /var/lib/dpkg/info/usbmount.postinst | UTF-8 | 503 | 3.421875 | 3 | [] | no_license | #!/bin/sh
set -e
# If /etc/usbmount/.create_rules_symlink exists when the package is
# configured, create /etc/udev/rules.d/z60_usbmount.rules ->
# ../usbmount.rules symlink unless /etc/udev/rules.d/z60_usbmount.rules
# already exists.
if test "$1" = configure && test -f /etc/usbmount/.create_rules_symlink; then
if ! test -e /etc/udev/rules.d/z60_usbmount.rules; then
ln -s ../usbmount.rules /etc/udev/rules.d/z60_usbmount.rules
fi
rm -f /etc/usbmount/.create_rules_symlink
fi
exit 0
| true |
7fbcbf51589d854cd8eedfc4993607684595869e | Shell | ellerbrock/Sparta | /generate-constants.sh | UTF-8 | 676 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/bin/bash -ex
# Create the embedded version
#rm -rf ./resources/provision/node_modules
go run $GOPATH/src/github.com/mjibson/esc/main.go \
-o ./CONSTANTS.go \
-private \
-pkg sparta \
./resources
# Create a secondary CONSTANTS_AWSBINARY.go file with empty content.
# The next step will insert the
# build tags at the head of each file so that they are mutually exclusive
go run $GOPATH/src/github.com/mjibson/esc/main.go \
-o ./CONSTANTS_AWSBINARY.go \
-private \
-pkg sparta \
./resources/awsbinary/README.md
# Tag the builds...
go run ./cmd/insertTags/main.go ./CONSTANTS !lambdabinary
go run ./cmd/insertTags/main.go ./CONSTANTS_AWSBINARY lambdabinary
| true |
4505073f5e44c11cd0ee684347c5425d0f7f91e2 | Shell | chombourger/docker-for-debian | /build-common.sh | UTF-8 | 14,182 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#---------------------------------------------------------------------------------------------------
# Build docker-ce for Debian
#---------------------------------------------------------------------------------------------------
# docker may be easily built for the architecture you are running on but it may be a little more
# tricky if you are targeting Arm and do not have an Arm machine with enough memory or computing
# power. This script uses qemu to run Debian on Arm to get a full environment (the "build vm") and
# where docker may be installed (as required by the build process). Sources are downloaded inside
# the build vm and built using the provided makefile.
#---------------------------------------------------------------------------------------------------
shopt -s expand_aliases
#---------------------------------------------------------------------------------------------------
# Debian installer
#---------------------------------------------------------------------------------------------------
WWW_DIR=/var/www/html
WWW_PRESEED=http://10.0.2.2/preseed.cfg
#---------------------------------------------------------------------------------------------------
# Disk settings for the build VM
#---------------------------------------------------------------------------------------------------
DISK_IMAGE=disk.qcow2
DISK_PATH=tmp/work/${ARCH}/debian-${DISTRO}
DISK_SIZE=10G
#---------------------------------------------------------------------------------------------------
# Utility functions
#---------------------------------------------------------------------------------------------------
# Print an info message to the console
info() {
local mins
mins=$(awk "BEGIN { print $SECONDS / 60; }")
mins=$(printf "%0.2f" "${mins}")
printf "\r[${mins}] ${*}"
}
# Run a command in the build vm
ssh_cmd() {
export SSHPASS
sshpass -e \
ssh -q -p ${SSH_PORT} \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
${SSH_USER}@localhost \
"${*}"
}
# Run a command in the build vm using sudo
ssh_sudo() {
ssh_cmd "echo ${SSHPASS}|sudo -S -p '' ${*}"
}
# Check if SSH is running in the build vm
ssh_check() {
ssh_cmd /bin/true
}
# Wait for SSH to be up and running in the build vm
ssh_wait() {
local result counter
counter=${SSH_DELAY}
while [ ${counter} -gt 0 ]; do
echo -en "\r${COLOR_BROWN}[WAITING]${COLOR_NC} ${counter} seconds before online check..."
sleep 1
counter=$((${counter} - 1))
done
counter=${SSH_TRIES}
while [ ${counter} -gt 0 ]; do
echo -en "\r${COLOR_BROWN}[ INFO ]${COLOR_NC} trying to connect to build vm via ssh..."
ssh_check; result=${?}
[ ${result} -eq 0 ] && break
counter=$((${counter} - 1))
done
case ${result} in
0) echo -e " ${COLOR_GREEN}ok!${COLOR_NC}" ;;
*) echo -e " ${COLOR_RED}timeout!${COLOR_NC}" ;;
esac
return ${result}
}
# Copy something from the build vm
ssh_copy_from() {
export SSHPASS
sshpass -e \
scp -r -q -P ${SSH_PORT} \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
${SSH_USER}@localhost:${1} ${2}
}
# Copy something to the build vm
ssh_copy_to() {
export SSHPASS
sshpass -e \
scp -r -q -P ${SSH_PORT} \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
${1} ${SSH_USER}@localhost:${2}
}
install_build_host_deps() {
sudo apt-get -qqy install \
curl \
nbd-client \
qemu-system \
reprepro \
sshpass \
wget
}
COLOR_NC='\e[0m' # No Color
COLOR_WHITE='\e[1;37m'
COLOR_BLACK='\e[0;30m'
COLOR_BLUE='\e[0;34m'
COLOR_LIGHT_BLUE='\e[1;34m'
COLOR_GREEN='\e[0;32m'
COLOR_LIGHT_GREEN='\e[1;32m'
COLOR_CYAN='\e[0;36m'
COLOR_LIGHT_CYAN='\e[1;36m'
COLOR_RED='\e[0;31m'
COLOR_LIGHT_RED='\e[1;31m'
COLOR_PURPLE='\e[0;35m'
COLOR_LIGHT_PURPLE='\e[1;35m'
COLOR_BROWN='\e[0;33m'
COLOR_YELLOW='\e[1;33m'
COLOR_GRAY='\e[0;30m'
COLOR_LIGHT_GRAY='\e[0;37m'
task_abort=0
task_name=
task_descr=
task_result=
failed_task=
alias check='if [ ${task_abort} -eq 0 ]; then if _check; then true'
alias starting='echo -en "\r${COLOR_BROWN}[RUNNING]${COLOR_NC} ${task_descr}"'
alias success='echo -en "\r${COLOR_GREEN}[SUCCESS]${COLOR_NC} ${task_descr}"'
alias failed='echo -en "\r${COLOR_RED}[FAILED ]${COLOR_NC} ${task_descr}"'
alias begin='check; _begin'
alias end='task_result=${?}; _end; fi; fi'
group() {
task_group="${1}"
WORKDIR=tmp/work/${ARCH}/${task_group}
mkdir -p ${WORKDIR}/logs ${WORKDIR}/stamps
}
task() {
task_name="${1}"
task_deps=""
task_descr="${2:-${task_name}}"
task_force="${3:-0}"
task_log=${WORKDIR}/logs/log.do_${task_name}.${$}
task_result=${task_result:-0}
}
deps() {
task_deps="${*}"
}
_begin() {
starting
task_start=${SECONDS}
touch ${task_log}
rm -f ${WORKDIR}/logs/log.do_${task_name}
ln -s $(basename ${task_log}) ${WORKDIR}/logs/log.do_${task_name}
exec 8>&1 9>&2
exec >${task_log} 2>&1
}
_end() {
exec 1>&8 2>&9
task_end=${SECONDS}
task_duration=$((${task_end} - ${task_start}))
[ ${task_result} -eq 0 ] || task_abort=1
if [ ${task_duration} -ge 60 ]; then
units="minutes"
task_duration=$((${task_duration} / 60))
else
units="seconds"
fi
if [ ${task_result} -eq 0 ]; then
touch ${WORKDIR}/stamps/${task_name}.done
success
echo " (took ${task_duration} ${units})"
else
failed
echo -e " (failed after ${task_duration} ${units})${COLOR_RED}"
tail ${task_log}
echo -e "${COLOR_NC}(full log can be found in ${task_log})" >&2
failed_task="${task_name}"
fi
}
_stamp() {
local _group
local _task
case "${1}" in
*:*) _group=${1/:*/} ; _task=${1/*:/} ;;
*) _group=${task_group} ; _task=${1} ;;
esac
echo "tmp/work/${ARCH}/${_group}/stamps/${_task}.done"
}
_check() {
local _dep
local _skip_task=0
local _dep_newer=0
local _dep_stamp
local _task_stamp
_skip_task=1
_task_stamp=$(_stamp ${task_name})
if [ -n "${task_deps}" ]; then
for _dep in ${task_deps}; do
_dep_stamp=$(_stamp ${_dep})
if [ ! -e ${_dep_stamp} ]; then
echo -en "\r${COLOR_RED}[MISSING]${COLOR_NC} ${task_name} task depends on ${_dep}!\n" >&2
task_abort=1
elif [ -f ${_task_stamp} ] && [ ${_dep_stamp} -nt ${_task_stamp} ]; then
echo -en "\r${COLOR_BROWN}[ INFO ]${COLOR_NC} ${_dep} newer than ${task_name}\n" >&2
_dep_newer=1
_skip_task=0
fi
done
fi
if [ ${task_force} -eq 1 ] && [ ${task_abort} -eq 0 ]; then
echo -en "\r${COLOR_BROWN}[ INFO ]${COLOR_NC} forced run of the '${task_name}' task\n" >&2
_skip_task=0
fi
if [ ${_skip_task} -eq 1 ]; then
if [ ! -f $(_stamp ${task_name}) ]; then
_skip_task=0
fi
fi
return ${_skip_task}
}
#---------------------------------------------------------------------------------------------------
# Local settings
#---------------------------------------------------------------------------------------------------
if [ -f local.conf ]; then
info "loading local.conf"
source local.conf
fi
#---------------------------------------------------------------------------------------------------
# QEMU command line
#---------------------------------------------------------------------------------------------------
cpuopt=""
[ -n "${CPU}" ] && cpuopt="-cpu ${CPU}"
#---------------------------------------------------------------------------------------------------
# kernel command line
#---------------------------------------------------------------------------------------------------
kcmd=""
[ -n "${CONSOLE}" ] && kcmd="${kcmd} console=${CONSOLE}"
#---------------------------------------------------------------------------------------------------
# The actual build process
#---------------------------------------------------------------------------------------------------
group "setup"
task "hostdeps" "installing host dependencies..."
begin
install_build_host_deps
end
task "upstreamcheck" "checking latest docker-ce build..." "1"
begin
latest=$(curl -s ${DOCKER_REPO}/releases/latest|sed -e 's,.*<a href=",,g'|cut -d '"' -f1)
latest=$(basename ${latest})
info "upstream version is ${latest}\n"
end
# Copy preseed file to local web server
task "copypreseed" "copying preseed to ${WWW_DIR}..."
begin
cat preseed.cfg \
| HTTP_PROXY="${HTTP_PROXY}" \
SSH_PASS="${SSHPASS}" \
SSH_USER="${SSH_USER}" \
envsubst \
| sudo tee ${WWW_DIR}/preseed.cfg \
> /dev/null
end
group "$(echo ${DEBIAN_VERSION}|tr '[:upper:]' '[:lower:]')-installer"
task "fetch" "getting ${ARCH} kernel and ramdisk..."
begin
if [ ! -e ${WORKDIR}/${DI_INITRD} ] || [ ! -e ${WORKDIR}/${DI_KERNEL} ]; then
info "getting ${ARCH} kernel and ramdisk\n"
url=${DEBIAN_BASE_URL}/${DEBIAN_VERSION}/${DI_PATH}
mkdir -p ${WORKDIR} && \
pushd ${WORKDIR} >/dev/null && \
wget -qc ${url}/${DI_INITRD} && \
wget -qc ${url}/${DI_KERNEL} && \
popd >/dev/null
fi
end
# Create (empty) disk image
task "diskimage" "creating ${DISK_SIZE} disk image..."
begin
mkdir -p ${DISK_PATH} && \
qemu-img create -f qcow2 ${DISK_PATH}/${DISK_IMAGE} ${DISK_SIZE} >/dev/null
end
# Install Debian to the disk image
task "install" "installing Debian for ${ARCH}..."
begin
bootcmd="root=/dev/ram${kcmd}"
bootcmd="${bootcmd} auto=true priority=critical preseed/url=${WWW_PRESEED}"
${QEMU} \
-smp ${CORES} -M ${MACHINE} ${cpuopt} -m ${MEM} \
-initrd ${WORKDIR}/${DI_INITRD} -kernel ${WORKDIR}/${DI_KERNEL} \
-append "${bootcmd}" \
\
${SCSI_OPTS} \
-drive file=${DISK_PATH}/${DISK_IMAGE}${DRIVE_OPTS},id=rootimg,media=disk \
\
${NETDEV_OPTS} \
\
-vnc :0 \
-monitor unix:${DISK_PATH}/monitor.sock,server,nowait \
-no-reboot
end
# Extract kernel/initrd from disk
task "extract" "extracting installed kernel and ramdisk..."
begin
sudo modprobe nbd max_part=8 && \
sudo qemu-nbd --connect=/dev/nbd0 ${DISK_PATH}/${DISK_IMAGE} && \
sudo partprobe /dev/nbd0 && \
mkdir -p ${DISK_PATH}/mnt && \
sudo mount /dev/nbd0p1 ${DISK_PATH}/mnt && \
cp $(find ${DISK_PATH}/mnt -maxdepth 1 -type f -name initrd\*) ${DISK_PATH}/initrd.img && \
cp $(find ${DISK_PATH}/mnt -maxdepth 1 -type f -name vmlinuz\*) ${DISK_PATH}/vmlinuz
end
# Flush data and release I/O devices
sync
if mountpoint -q ${DISK_PATH}/mnt; then
info "un-mounting disk image\n"
sudo umount /dev/nbd0p1
fi
if sudo nbd-client -c /dev/nbd0; then
info "releasing network block device\n"
sudo nbd-client -d /dev/nbd0
fi
# Boot installed system
qemu_pid=
group "docker-ce-${latest}"
task "boot" "starting ${ARCH} vm..." "1"
begin
${QEMU} \
-smp ${CORES} -M ${MACHINE} ${cpuopt} -m ${MEM} \
-initrd ${DISK_PATH}/initrd.img -kernel ${DISK_PATH}/vmlinuz \
-append "root=/dev/debian-vg/root${kcmd}" \
\
${SCSI_OPTS} \
-drive file=${DISK_PATH}/${DISK_IMAGE}${DRIVE_OPTS},id=rootimg,media=disk \
\
${NETDEV_OPTS} \
\
-vnc :0 \
-monitor unix:${DISK_PATH}/monitor.sock,server,nowait \
-no-reboot \
&
qemu_pid=${!}
end
# Wait for system to be up
if [ -n "${qemu_pid}" ]; then
ssh_wait; task_result=${?}
if [ ${task_result} -ne 0 ]; then
sleep 60
kill -TERM ${qemu_pid}
qemu_pid=
fi
else
task_result=1
fi
task "setup" "installing packages to support https package feeds..."
begin
ssh_sudo apt-get -qqy install \
apt-transport-https \
ca-certificates \
curl \
gnupg2 \
software-properties-common
end
task "getgpg" "get docker’s official GPG key..."
begin
ssh_cmd curl -fsSL -o docker.gpg https://download.docker.com/linux/debian/gpg
end
task "addgpg" "adding docker's key..."
begin
ssh_sudo 'apt-key add docker.gpg >/dev/null'
end
task "addsrc" "adding docker's package feed..."
begin
ssh_cmd "echo deb https://download.docker.com/linux/debian ${DISTRO} stable>docker.list" && \
ssh_sudo "cp docker.list /etc/apt/sources.list.d/"
end
task "aptupd" "updating package database..."
begin
ssh_sudo "apt-get -qqy update"
end
task "install" "installing docker-ce..."
begin
ssh_sudo "apt-get -qqy install docker-ce docker-ce-cli containerd.io ${EXTRA_PACKAGES}"
end
task "config" "copying docker's daemon configuration file..."
begin
ssh_copy_to daemon.json && \
ssh_sudo 'cp daemon.json /etc/docker/'
end
task "restart" "restarting docker..."
begin
ssh_sudo 'systemctl restart docker'
end
task "fetch" "getting sources (${latest})..."
begin
ssh_sudo "rm -rf docker-ce" && \
ssh_cmd "git clone -b ${latest} --single-branch --depth 1 https://github.com/docker/docker-ce"
end
task "adduser" "adding user to docker group..."
begin
ssh_sudo "adduser ${SSH_USER} docker"
end
task "build" "building docker for ${ARCH}..."
deps "fetch"
begin
ssh_cmd "make -C docker-ce deb DOCKER_BUILD_PKGS=debian-${DISTRO}"
end
task "copy" "getting deb packages from build vm..."
deps "build"
begin
mkdir -p ${WORKDIR}/results && \
ssh_copy_from \
docker-ce/components/packaging/deb/debbuild/debian-*/*.deb \
${WORKDIR}/results
end
# Use reboot to stop the virtual machine
if [ -n "${qemu_pid}" ]; then
echo -e "${COLOR_BROWN}[ INFO ]${COLOR_NC} stopping build vm..."
ssh_sudo reboot
wait ${qemu_pid}
fi
| true |
da719cf51322e8f1e374deb2ede2aedfa4682a5b | Shell | kaede0902/sh | /osdif.sh | UTF-8 | 295 | 2.90625 | 3 | [] | no_license | # https://shellscript.sunone.me/if_and_test.html
# OS NAME
echo 'uname:' $(uname);
# num eval true is 0.
test 1 -eq 1; echo num:$?;
test 1 -eq 3; echo num:$?;
# str eval
test 'hoge' = 'hoge'; echo str:$?;
# shorten test
[ 'hoge' = 'hoge' ]; echo shorten:$?;
[ $(uname) = 'Linux' ]; echo os:$?;
| true |
709d061aaacdd7d2e6214f5be30d08d955d809fc | Shell | gboon18/cc_in2p3_afs_backups | /NPS/job/default/test.sh | UTF-8 | 158 | 2.71875 | 3 | [] | no_license | #!/bin/sh
### Merge stdout et stderr in a single file
#$ -j y
echo "\nHello World!\n"
echo 'my working directory is: '
pwd
echo 'on the host: '
hostname
| true |
8e9f6a177d972a54243ae8304a297a2f4b4f05d1 | Shell | bendalby82/mariassl | /06-create-new-certificates.sh | UTF-8 | 3,924 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
mkdir -p ca-private-2
mkdir -p ca-cert-2
mkdir -p server-private-2
mkdir -p server-cert-2
mkdir -p client-private-2
mkdir -p client-cert-2
#Step 1: Generate CA private key
if [ ! -f $PWD/ca-private-2/ca-private-key-2.pem ]; then
docker run --rm -it -w /home -v $PWD:/home svagi/openssl genrsa -out /home/ca-private-2/ca-private-key-2.pem 2048
echo 'Step 1: CA private key created'
else
echo 'Step 1: CA private key already exists'
fi
#Step 2: Generate CA certificate using the private key
if [ ! -f $PWD/ca-cert-2/ca-cert-2.pem ]; then
docker run --rm -it -w /home -v $PWD:/home svagi/openssl req -sha1 -new -x509 -nodes -days 3650 -key /home/ca-private-2/ca-private-key-2.pem \
-out /home/ca-cert-2/ca-cert-2.pem -subj "/C=GB/ST=Greater London/L=London/O=Dell EMC/OU=AWG/CN=awg.dell.com"
echo 'Step 2: CA certificate created'
else
echo 'Step 2: CA certificate already exists'
fi
#Step 3: Generate private server key and signing request
if [ ! -f $PWD/server-private-2/server-private-key-2.pem ]; then
docker run --rm -it -w /home -v $PWD:/home svagi/openssl req -sha1 -newkey rsa:2048 -days 730 -nodes -keyout /home/server-private-2/server-private-key-2.pem \
-out /home/server-private-2/server-key-signing-req-2.pem -subj "/C=GB/ST=Greater London/L=London/O=Dell EMC/OU=AWG/CN=mysql.awg.dell.com"
echo 'Step 3: Private server key and signing request created'
else
echo 'Step 3: Private server key already exists'
fi
#Step 4: Export private server key to RSA private key
if [ ! -f $PWD/server-private-2/server-private-key-rsa-2.pem ]; then
docker run --rm -it -w /home -v $PWD:/home svagi/openssl rsa -in /home/server-private-2/server-private-key-2.pem -out /home/server-private-2/server-private-key-rsa-2.pem
echo 'Step 4: Private server key expoerted to RSA format'
else
echo 'Step 4: Private server key already exists in RSA format'
fi
#Step 5: Create server certificate based on server signing request
if [ ! -f $PWD/server-cert-2/server-cert-2.pem ]; then
docker run --rm -it -w /home -v $PWD:/home svagi/openssl x509 -sha1 -req -in /home/server-private-2/server-key-signing-req-2.pem \
-days 730 -CA /home/ca-cert-2/ca-cert-2.pem -CAkey /home/ca-private-2/ca-private-key-2.pem -set_serial 01 -out /home/server-cert-2/server-cert-2.pem
echo 'Step 5: Server certificate created.'
else
echo 'Step 5: Server certificate already exists.'
fi
#Step 6: Create private client key and signing request
if [ ! -f $PWD/client-private-2/client-private-key-2.pem ]; then
docker run --rm -it -w /home -v $PWD:/home svagi/openssl req -sha1 -newkey rsa:2048 -days 730 -nodes \
-keyout /home/client-private-2/client-private-key-2.pem -out /home/client-private-2/client-key-signing-req-2.pem \
-subj "/C=GB/ST=Greater London/L=London/O=Dell EMC/OU=AWG/CN=client.awg.dell.com"
echo 'Step 6: Client private key and signing request created.'
else
echo 'Step 6: Client server key already exists'
fi
#Step 7: Export private client key to RSA private key
if [ ! -f $PWD/client-private-2/client-private-key-rsa-2.pem ]; then
docker run --rm -it -w /home -v $PWD:/home svagi/openssl rsa -in /home/client-private-2/client-private-key-2.pem \
-out /home/client-private-2/client-private-key-rsa-2.pem
echo 'Step 7: Private client key exported to RSA format'
else
echo 'Step 7: Private client key already exists in RSA format'
fi
#Step 8: Sign client certificate:w
if [ ! -f $PWD/client-cert-2/client-cert-2.pem ]; then
docker run --rm -it -w /home -v $PWD:/home svagi/openssl x509 -sha1 -req -in /home/client-private-2/client-key-signing-req-2.pem \
-days 730 -CA /home/ca-cert-2/ca-cert-2.pem -CAkey /home/ca-private-2/ca-private-key-2.pem -set_serial 01 -out /home/client-cert-2/client-cert-2.pem
echo 'Step 8: Client certificate created'
else
echo 'Step 8: Client certificate already exists'
fi
| true |
17e4d16f15004b5b427c4976b45f8abe8ce043d8 | Shell | vineethreddysingedi/Documents | /LDAP_User_password_attribute_change | UTF-8 | 880 | 2.796875 | 3 | [] | no_license | #!/bin/bash
while read report_CHG
do
MAIL=`echo $report_CHG | awk -F ":" '{print $1}'`
if [ "$MAIL" != "mail" ]; then
if [ "$MAIL" == "dn" ]; then
report_CHG_OLD=`echo $report_CHG`
fi
fi
if [ "$MAIL" == "mail" ]; then
EMAIL=`echo $report_CHG | awk -F " " '{print $2}'`
if [ "$EMAIL" != "no" ]; then
while read pp_email
do
EMAIL2=`echo $pp_email | awk -F "~" '{print $1}'`
CID=`echo $pp_email | awk -F "~" '{print $2}'`
if [ "$EMAIL" == "$EMAIL2" ]; then
echo $report_CHG_OLD >>final.txt
echo 'changetype: modify' >>final.txt
echo 'replace: userPassword' >>final.txt
echo 'userPassword: {SASL}'$CID >>final.txt
echo -en "\n" >>final.txt
fi
done </home/syalamanchili/scripts/pp.txt
fi
fi
done </home/syalamanchili/scripts/report.out
| true |
a3a349cf8027138d35a8f37c0b48ca0972ceac53 | Shell | christopherreay/genericishBashScripts | /oneOffEncryption.middlePhaseEncryptByOther.sh | UTF-8 | 423 | 2.84375 | 3 | [] | no_license | arbitraryFile=$1
#email the publickey.pem to someone
#then that person runs this:
openssl rand -base64 128 -out key.bin
openssl enc -aes-256-cbc -salt -in $arbitraryFile -out arbitraryFile.enc -pass file:./key.bin
openssl rsautl -encrypt -inkey publickey.pem -pubin -in key.bin -out key.bin.enc
#the someone emails two files, the encrypted symetric key, and the arbitraryFile encrypted with that symetric key back to you
| true |
30f11e1e898fe3ee89f3175e7b86c254dc083659 | Shell | meircif/lumi-lang | /TL3/test.sh | UTF-8 | 1,590 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ev
# --< TL3 >--
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
MYDIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null && pwd )"
DIR="$( cd -P "$( dirname "$MYDIR" )" >/dev/null && pwd )"
if [ -z $CC ]; then
CC=gcc
fi
CCW="$CC --std=c89 -Werror -Wall"
rm -rf $DIR/.test/TL3
mkdir -p $DIR/.test/TL3
pushd $DIR/.test
# compile tl2-compiler
$CCW -Wno-parentheses ../TL2/tl2-compiler.c ../TL1/lumi.1.c -I../TL1 \
-o TL3/tl2-compiler
# test tl3-compiler C files
cp ../TL3/*.2.lm TL3/
pushd TL3
./tl2-compiler common.2.lm map.2.lm global.2.lm exp.2.lm st-node.2.lm \
flow.2.lm args.2.lm func.2.lm member.2.lm call.2.lm operand.2.lm dec.2.lm \
type.2.lm test.2.lm tl3-compiler.2.lm
diff ../../TL3/common.c common.c
diff ../../TL3/map.c map.c
diff ../../TL3/global.c global.c
diff ../../TL3/exp.c exp.c
diff ../../TL3/st-node.c st-node.c
diff ../../TL3/flow.c flow.c
diff ../../TL3/args.c args.c
diff ../../TL3/func.c func.c
diff ../../TL3/member.c member.c
diff ../../TL3/call.c call.c
diff ../../TL3/operand.c operand.c
diff ../../TL3/dec.c dec.c
diff ../../TL3/type.c type.c
diff ../../TL3/test.c test.c
diff ../../TL3/tl3-compiler.c tl3-compiler.c
popd
# compile tl3-compiler
$CCW -Wno-unused-variable -Wno-missing-braces -Wno-typedef-redefinition \
../TL3/tl3-compiler.c ../TL2/lumi.2.c -I../TL2 -o TL3/tl3-compiler
# TL3 teardown
if [ ! -z "$CLEAR_TEST" ]; then
rm -rf TL3
fi
popd
# TL3 tests passed
| true |
9bb766c38fa9073da7400005e57a995591f9a580 | Shell | totte/core | /dialog/PKGBUILD | UTF-8 | 783 | 2.765625 | 3 | [] | no_license | #
# Core Packages for Chakra, part of chakra-project.org
#
# maintainer (x86_64): Manuel Tortosa <manutortosa[at]chakra-project[dot]org>
pkgname=dialog
pkgver=1.1_20110302
pkgrel=2
pkgdesc="A tool to display dialog boxes from shell scripts"
arch=('i686' 'x86_64')
url="http://invisible-island.net/dialog/"
license=('LGPL2.1')
depends=('ncurses')
source=(ftp://invisible-island.net/${pkgname}/${pkgname}-${pkgver/_/-}.tgz)
md5sums=('2eaa82055b86006e205d2c3f74d9b800')
sha1sums=('ba4e79abaf579e0d23f247ae65196437f8d8e031')
build() {
cd "${srcdir}/$pkgname-${pkgver/_/-}"
./configure --prefix=/usr --mandir=/usr/share/man \
--with-ncursesw --enable-nls
make || return 1
}
package() {
cd "${srcdir}/$pkgname-${pkgver/_/-}"
make DESTDIR=${pkgdir} install install-lib
}
| true |
30e0d43caf891054618275f35b324e3605eb0dac | Shell | eloiroca/pieye | /application/models/comandos.sh | UTF-8 | 235 | 2.734375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# FAREM LES CONSULTES DELS ESTATS DELS SERVEIS
OUTPUT="$(date)"
USR=$(sudo service apach2 stop)
# headers
echo "Content-type: text/plain"
echo ""
# body
echo "Today is $OUTPUT"
echo "Current user is $USR"
| true |
0a6e1ac5d375cf6b52e58c582f5500b233639b9b | Shell | iphilgood/DockSTARTer | /scripts/install_machine_completion.sh | UTF-8 | 650 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -euo pipefail
IFS=$'\n\t'
install_machine_completion() {
# https://docs.docker.com/machine/completion/
local AVAILABLE_MACHINE_COMPLETION
AVAILABLE_MACHINE_COMPLETION=$(curl -H "${GH_HEADER:-}" -s "https://api.github.com/repos/docker/machine/releases/latest" | grep -Po '"tag_name": "\K.*?(?=")')
info "Installing docker machine completion."
curl -H "${GH_HEADER:-}" -L "https://raw.githubusercontent.com/docker/machine/${AVAILABLE_MACHINE_COMPLETION}/contrib/completion/bash/docker-machine.bash" -o /etc/bash_completion.d/docker-machine > /dev/null 2>&1 || fatal "Failed to install docker machine completion."
}
| true |
46f697f1d9acea74940029786d3b8793765916e8 | Shell | cha63506/core-3 | /networkmanager-qt/PKGBUILD | UTF-8 | 783 | 2.609375 | 3 | [] | no_license | # Contributor: Andrea Scarpino <andrea@archlinux.org>
source ../frameworks.conf
pkgname=networkmanager-qt
pkgver=${KFVersion}
pkgrel=1
pkgdesc='Qt wrapper for NetworkManager API'
arch=('x86_64')
url='https://projects.kde.org/projects/frameworks/networkmanager-qt'
license=('LGPL')
depends=('networkmanager' 'qt5-base')
makedepends=("extra-cmake-modules>=${KFECMVersion}")
checkdepends=("cmake")
groups=('kf5')
replaces=('libnm-qt5')
conflicts=('libnm-qt5')
provides=("libnm-qt5=${pkgver}")
options=("debug")
source=("${KFServer}/${pkgname}-${pkgver}.tar.xz")
sha256sums=( $(getSum ${pkgname}) )
prepare() {
mkdir -p build
}
build() {
cd build
cmake_kf5 ../${pkgname}-${pkgver}
make
}
check() {
cd build
make test || return 0
}
package() {
cd build
make DESTDIR="${pkgdir}" install
}
| true |
75e1e9033354968aff38f757fdc0a145110dc93b | Shell | LIParadise/Docker____build_something_with_debian_buster | /ASYNC17.sh | UTF-8 | 677 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
if socker ps | grep ASYNC17; then
socker attach --detach-keys="ctrl-^" ASYNC17
else
socker run -it --rm \
--name ASYNC17 \
--user "$(id -u)":"$(id -g)" \
--mount type=bind,source="$(pwd -P)/../ABC_async_ASYNC17",target="$(pwd -P)/../ABC_async_ASYNC17" \
--detach-keys="ctrl-^" \
async:debian_buster
fi
| true |
916133069b8beb4be4354ba0fe30868be4dab41b | Shell | petronny/aur3-mirror | /mfdl-git/PKGBUILD | UTF-8 | 655 | 2.765625 | 3 | [] | no_license | # Maintainer: Gabriel Peixoto <gabrielrcp@gmail.com>
pkgname=mfdl-git
pkgver=0.32.a3dc6f1
pkgrel=1
pkgdesc='Download manga from mangafox.me'
url='https://github.com/techwizrd/MangaFox-Download-Script'
license=('custom')
arch=('any')
depends=('python2-beautifulsoup4')
makedepends=('git')
_gitname=mfdl
source=($_gitname::'git://github.com/techwizrd/MangaFox-Download-Script.git')
md5sums=('SKIP')
pkgver() {
cd $_gitname
echo 0.$(git rev-list --count HEAD).$(git rev-parse --short HEAD)
}
prepare() {
cd $_gitname
sed 's,#!/usr/bin/env python,#!/usr/bin/env python2,' -i mfdl.py
}
package() {
install -Dm755 "$srcdir/$_gitname/mfdl.py" "$pkgdir/usr/bin/mfdl"
}
| true |
da5d43a50f7047c1a898a1fa3c81882ad75d0a7b | Shell | kiistala/grab-area-read-barcode | /grab-area-read-barcode.sh | UTF-8 | 371 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
DIR="$HOME/Screenshots"
FILENAME=$DIR/$(date +%Y%m%d-%H%M%S).png
if [ ! -d $DIR ]
then
mkdir -pv $DIR
fi
touch $FILENAME
scrot -b -s $FILENAME
zbarimg $FILENAME > /tmp/barcode.txt
if [ $? = 0 ]
then
BARCODE=$(cat /tmp/barcode.txt | cut -d: -f2)
echo $BARCODE | xclip -selection c
else
echo "Barcode detection or screen capture failed" && exit
fi
| true |
5dcdcaed86dd29f75cccb6e37a2739a8151bbf29 | Shell | kchaitu4/do | /bin/do_bootstrap.sh | UTF-8 | 1,232 | 3.546875 | 4 | [] | no_license | #!/bin/bash
. ${HOME}/.digitalocean
if [ -z "${DO_ROOT_KEY}" ] ; then
echo "No DO_ROOT_KEY found in environment. Usually it is in ${HOME}/.digitalocean"
exit 1
fi
IP=${1}
if [ -z "${IP}" ] ; then
echo "Usage $0 ip-address user /path/to/key.pub"
exit 1
fi
USER=${2}
if [ -z "${USER}" ] ; then
echo "Usage $0 ip-address user /path/to/key.pub"
exit 1
fi
KEY_PATH=${3}
if [ -z "${KEY_PATH}" ] ; then
echo "Usage $0 ip-address user /path/to/key.pub"
exit 1
fi
IP=$($(dirname $0)/do droplet-ip ${IP})
ssh -i ${DO_ROOT_KEY} root@${IP} "bash -c '\
useradd -m -s /bin/bash ${USER} && \
adduser ${USER} sudo && \
echo \"${USER} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers && \
mkdir /home/${USER}/.ssh && \
chown ${USER} /home/${USER}/.ssh && \
chmod 700 /home/${USER}/.ssh \
'"
scp -i ${DO_ROOT_KEY} ${KEY_PATH} root@${IP}:/home/${USER}/.ssh/authorized_keys2
ssh -i ${DO_ROOT_KEY} root@${IP} "bash -c '\
chown -R ${USER} /home/${USER}/.ssh && \
chmod 700 /home/${USER}/.ssh/authorized_keys2 && \
rm /root/.ssh/authorized_keys*
'"
mkdir -p ${HOME}/.digitalocean.d
PRIVATE_KEY=$(echo ${KEY_PATH} | sed -e 's/.pub$//')
echo "user: ${USER}
key_path: ${PRIVATE_KEY}
" > ${HOME}/.digitalocean.d/${IP}.yml
| true |
b899468db8394cfa6bd30e7d2c1f95a8018800a1 | Shell | benbarber/dotfiles | /bin/harpoon | UTF-8 | 169 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# Usage: harpoon
# Description: Kills and removes all running Docker containers
harpoon () {
docker kill $(docker ps -q)
docker rm $(docker ps -a -q)
} | true |
99e1473a2df80ef3ca1aed243b0a0232bbc3196a | Shell | brianonn/ansible-bootstrap | /bootstrap.sh | UTF-8 | 5,509 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
#
# Bootstrap Ansible on a host. After this script is run, you can run
# Ansible playbooks to finalize the host setup
#
# This script will work and has been tested on Linux
# TODO: Test on OS X and FreeBSD.
#
# run with sudo -H
# $ sudo -H bootstrap.sh
#
# Author: Brian A. Onn (brian.a.onn@gmail.com)
# Date: Sat Apr 29 16:30:15 UTC 2017
# License: MIT
trap cleanup EXIT SIGHUP SIGINT SIGQUIT SIGTERM
trapfiles=""
cleanup () {
rm -rf ${trapfiles}
}
addtrapfile () {
trapfile="${trapfile} $1"
}
#########################################
# pathname tilde expansion
# supports ~ ~/path and ~user only
# ~+ ~- and digits are not supported and
# don't make sense in a script anyways
#########################################
expandpath () {
local path="$1"
local homedir expath user rest
case "${path}" in
'~') expath="${HOME}" ;;
'~'/*) expath="${HOME}/${path##'~/'}" ;;
'~'*) user=${path%%/*}; rest=${path##$user}; user=${user##'~'}
if [ -x /usr/bin/dscacheutil ]; then ## OS X
set 1 $(dscacheutil -q user -a name "$user" | grep -e '^dir:')
homedir="$3"
else
IFS=: set 1 $(getent passwd "$user") ## Linux
homedir="$7"
fi
[ -z "${homedir}" ] && expath="${path}" || expath="${homedir}$rest"
;;
*) expath="${path}" ;;
esac
echo "${expath}"
}
#########################################
# tempdir, logging and stderr redirection
#########################################
# prefer TMP, use TEMP if TMP is not set, finally use /tmp as a default
# also do ~ expansion on TMP and TEMP
tmp="$(expandpath "${TMP:=${TEMP:-/tmp}}")"
tmplog=$(mktemp "${tmp}/ansible-bootstrap.XXXXX.log")
addtrapfile "${tmplog}"
echolog() {
echo "$*" >> $tmplog
return 0
}
# close stdout
#exec 1<&-
# close stderr
#exec 2<&-
# re-open stdout to the tty and logfile,
# and send stderr only to the logfile
#exec 3>&1 &>${tmplog} 1> >(tee >(cat >&3))
exec 2>>${tmplog} 1> >(tee -a ${tmplog} >&1)
#########################################
# local vars and utility functions here
#########################################
bold="$(tput bold)"
norm="$(tput cnorm;tput sgr0)"
red="$(tput setaf 1)"
grn="$(tput setaf 2)"
redmsg () {
echo " $bold$red*** $1 ***$norm"
}
grnmsg () {
echo " $bold$grn*** $1 ***$norm"
}
is_installed() {
type $1 2>/dev/null >/dev/null && return 0 || return 1
}
shacmd="echo no"
is_installed shasum && shacmd="shasum -p -a 256"
is_installed sha256 && shacmd=sha256
is_installed sha256sum && shacmd=sha256sum
srcpkgs="ansible sshpass" # src pkgs are built from source on OS X
binpkgs="git python curl" # linux will apt-get all these +src
pippkgs="paramiko PyYAML jinja Sphinx pycrypto cryptography" # python packages installed via pip
system="$(uname -s|tr 'A-Z' 'a-z')"
getpip () {
url="https://bootstrap.pypa.io/get-pip.py"
getpip=$(mktemp /tmp/get-pip.XXXXX.py)
sha256="19dae841a150c86e2a09d475b5eb0602861f2a5b7761ec268049a662dbd2bd0c"
echo "Downloading get-pip.py from '$url'"
curl -m 300 --retry 3 -o "${getpip}" "${url}" >> $tmplog 2>&1
dlsha256=$(${shacmd} ${getpip} | cut -f1 -d' ')
if [ "${sha256}" = "${dlsha256}" ]; then
echo "SHA256 sum is correct: $sha256"
echo "Running get-pip.py to install pip for python"
python "${getpip}"
echo "Running pip updater"
pip install -U pip
return 0
else
redmsg "The get-pip.py command at:"
redmsg "${url}"
redmsg "does not match the known sha256 checksum"
return 1
fi
}
if [ $(id -u) != 0 ]; then
redmsg "Sorry, this script must run as root"
redmsg "Use 'sudo -H' to bootstrap ansible"
exit 255
fi
echo "Platform: ${system}"
case ${system} in
linux)
apt-add-repository -y ppa:ansible/ansible
apt-get -qq -y update
for pkg in ${binpkgs} ${srcpkgs}; do
if is_installed $pkg; then ## assumes the package name is also the binary name
echo $pkg is already installed
else
echo -n "Installing $bold$pkg$norm ... "
apt-get -qy install $pkg 2>/dev/null >/dev/null
echo "[OK]"
fi
done
;;
darwin)
echo "OS X support is incomplete and untested"
#install packages which have brew formulas
brew install ${binpkgs}
# on OSX we build ansible and sshpass from src
echo "Building Ansible from source"
# requires: xcode, terminal and command-line utilites be already installed
# get ansible source here and build it
repo=$(mktemp -d /tmp/repo.XXXXX)
addtrapfile "${repo}"
git clone git://github.com/ansible/ansible.git "${repo}"
( cd ${repo} && make install )
echo "Building sshpass"
# sshpass="https://git.io/sshpass.rb"
sshpass="file://sshpass.rb"
brew install ${sshpass}
;;
bsd)
echo "I don't know bsd yet."
;;
esac
# do the pip install
getpip && echo "Running 'pip install ${pippkgs}'" && pip install ${pippkgs}
if test $? != 0; then
e1="Python package manager pip failed to install"
e2="Ansible will not work without python packages"
err=1
st="Ansible is not installed"
else
err=0
e1= e2=
st="Ansible is installed"
fi
if [ ${err} = 1 ]; then
echo
redmsg "${e1}"
redmsg "${e2}"
echo
redmsg "${st}"
else
echo
grnmsg "${st}"
fi
logfile="${tmpdir}/ansible-bootstrap-$(date -u '+%Y%m%d%H%M%S').log"
mv ${tmplog} ${logfile}
echo
echo Logfile: ${logfile}
echo
#########
# Step 2 -- Create the ansible user and home dir
#########
exit 0
| true |
b020f64ee14f0d887b83c9f4ce4753411767e64b | Shell | jmisiti42/minecraft-42-server | /stop.sh | UTF-8 | 380 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo '[Stop] Stopping every java server(s)..'
for l in $(ps aux | grep "java" | grep -v "grep" | tr -s ' ' '-' | cut -d'-' -f 2)
do
kill $l
done
echo '[Shop] Java server(s) stopped..'
echo '[Stop] Stopping every tail commands'
for l in $(ps aux | grep "tail" | grep -v "grep" | tr -s ' ' '-' | cut -d'-' -f 2)
do
kill $l
done
echo '[Shop] Tail commands stopped..'
| true |
cd63eb35f1143071d1f1f58a66835a84aaed33a9 | Shell | thfield/irs-migration | /munge/geotopo.sh | UTF-8 | 222 | 2.890625 | 3 | [] | no_license | #!/bin/bash
st=${1:-"06"} # california fips code
co=${2:-"075"} # san francisco county fips code
path="./data"
input="$path/$st$co-shapes.geojson"
output="$path/$st$co-shapes.topojson"
geo2topo features=$input > $output | true |
d33ba49d4a7838f34bfad41de7d3fa20bcefc700 | Shell | rohit-bindal/openroberta-lab | /Docker/openroberta/scripts/helper/_dbContainerBackup.sh | UTF-8 | 915 | 3.765625 | 4 | [
"MIT",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | #!/bin/bash
isServerNameValid $DATABASE_NAME
CONTAINER="${INAME}-$DATABASE_NAME"
DB_URI="jdbc:hsqldb:hsql://${INAME}-db-server/openroberta-db-$DATABASE_NAME"
mkdir -p $DB_ADMIN_DIR/dbBackup/$DATABASE_NAME # optional create of the backup dir for the database
echo "starting the backup of database $DATABASE_NAME"
docker exec ${CONTAINER} java -cp /opt/openroberta-lab/lib/\* de.fhg.iais.roberta.main.Administration db-backup ${DB_URI} /opt/dbAdmin/dbBackup/$DATABASE_NAME
RC=$?
if [ $RC -eq 0 ]
then
if [ $(getent group dbBackup) ]; then
chmod g+r $DB_ADMIN_DIR/dbBackup/$DATABASE_NAME/*.tgz
chgrp dbBackup $DB_ADMIN_DIR/dbBackup/$DATABASE_NAME/*.tgz
echo "backup successful. Backup accessible by group 'dbBackup'"
else
echo "backup successful. This is a purely local backup. Group 'dbBackup' for external access of the backup not found"
fi
else
echo "backup ERROR. Got return code $RC"
fi | true |
6bcc4ca4c6c9ac27a864ef7b9d1107a3996a280d | Shell | leifei87/azure-bosh-acceptance-test | /scenarios/persistent-disk/deploy | UTF-8 | 3,624 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -xeu
source e2e/functions/utils.sh
scenario=$1
scenario_path=$2
##########################################
#### Case: size 1GB, caching None ####
##########################################
echo "Case: size 1GB, caching None"
bosh -n -d $scenario deploy e2e/manifests/deployment.yml \
-o ${scenario_path}/ops.yml \
-v scenario=$scenario \
-v disk_type=1GB
rg_name=$(get_rg_name ${scenario})
vm_name=$(get_vm_name ${scenario})
disk_size=$(az vm show --name ${vm_name} --resource-group ${rg_name} | jq -r '.storageProfile.dataDisks[] | select(.createOption == "Attach").diskSizeGb')
disk_caching=$(az vm show --name ${vm_name} --resource-group ${rg_name} | jq -r '.storageProfile.dataDisks[] | select(.createOption == "Attach").caching')
if [ ${disk_size} == 1 ] && [ "${disk_caching}" == "None" ]
then
echo "The disk size and caching is expected."
else
echo "The disk size and caching is not expected."
exit 1
fi
##########################################
### Case: size 1GB, caching ReadOnly ###
##########################################
echo "Case: size 1GB, caching ReadOnly"
bosh -n -d $scenario deploy e2e/manifests/deployment.yml \
-o ${scenario_path}/ops.yml \
-v scenario=$scenario \
-v disk_type=1GBReadOnly
rg_name=$(get_rg_name ${scenario})
vm_name=$(get_vm_name ${scenario})
disk_size=$(az vm show --name ${vm_name} --resource-group ${rg_name} | jq -r '.storageProfile.dataDisks[] | select(.createOption == "Attach").diskSizeGb')
disk_caching=$(az vm show --name ${vm_name} --resource-group ${rg_name} | jq -r '.storageProfile.dataDisks[] | select(.createOption == "Attach").caching')
if [ ${disk_size} == 1 ] && [ "${disk_caching}" == "ReadOnly" ]
then
echo "The disk size and caching is expected."
else
echo "The disk size and caching is not expected."
exit 1
fi
##########################################
### Case: size 1GB, caching ReadWrite ###
##########################################
echo "Case: size 1GB, caching ReadWrite"
bosh -n -d $scenario deploy e2e/manifests/deployment.yml \
-o ${scenario_path}/ops.yml \
-v scenario=$scenario \
-v disk_type=1GBReadWrite
rg_name=$(get_rg_name ${scenario})
vm_name=$(get_vm_name ${scenario})
disk_size=$(az vm show --name ${vm_name} --resource-group ${rg_name} | jq -r '.storageProfile.dataDisks[] | select(.createOption == "Attach").diskSizeGb')
disk_caching=$(az vm show --name ${vm_name} --resource-group ${rg_name} | jq -r '.storageProfile.dataDisks[] | select(.createOption == "Attach").caching')
if [ ${disk_size} == 1 ] && [ "${disk_caching}" == "ReadWrite" ]
then
echo "The disk size and caching is expected."
else
echo "The disk size and caching is not expected."
exit 1
fi
##########################################
#### Case: size 4TB, caching None ####
##########################################
echo "Case: size 4TB, caching None"
bosh -n -d $scenario deploy e2e/manifests/deployment.yml \
-o ${scenario_path}/ops.yml \
-v scenario=$scenario \
-v disk_type=4TB
rg_name=$(get_rg_name ${scenario})
vm_name=$(get_vm_name ${scenario})
disk_size=$(az vm show --name ${vm_name} --resource-group ${rg_name} | jq -r '.storageProfile.dataDisks[] | select(.createOption == "Attach").diskSizeGb')
disk_caching=$(az vm show --name ${vm_name} --resource-group ${rg_name} | jq -r '.storageProfile.dataDisks[] | select(.createOption == "Attach").caching')
bosh -n -d $scenario delete-deployment
if [ ${disk_size} == 4095 ] && [ "${disk_caching}" == "None" ]
then
echo "The disk size and caching is expected."
else
echo "The disk size and caching is not expected."
exit 1
fi
| true |
60e74d9540c62d1ae3bb27b25f43258a171bbf0f | Shell | FAIRplus/the-fair-cookbook | /scripts/build.sh | UTF-8 | 1,440 | 3.171875 | 3 | [
"CC-BY-4.0"
] | permissive | echo 'Install mermaid via npm'
npm install @mermaid-js/mermaid-cli
echo 'Record mermaid version'
./node_modules/.bin/mmdc -V
find . -iname "*.mmd" > list_of_all_mermaid_files
echo 'Search all mermaid files and record a list of them'
cat list_of_all_mermaid_files
echo 'Convert all mermaid files to pngs and svg'
cat list_of_all_mermaid_files | while read line; do echo $line && ./node_modules/.bin/mmdc -i $line -w 800 -o $line.png ; done
cat list_of_all_mermaid_files | while read line; do echo $line && ./node_modules/.bin/mmdc -i $line -w 1600 -o $line.hi-res.png ; done
cat list_of_all_mermaid_files | while read line; do echo $line && ./node_modules/.bin/mmdc -i $line -w 800 -o $line.lo-res.png ; done
cat list_of_all_mermaid_files | while read line; do echo $line && ./node_modules/.bin/mmdc -i $line -o $line.svg ; done
echo 'Cleanup intermediate files and removed node_modules'
rm list_of_all_mermaid_files
rm -rf ./node_modules
echo '{}' > _static/recipes.json
echo 'Pull FAIRCookbook - RDMkit linkage file'
curl -o _static/faircookbook_rdmkit_mapping.yml https://raw.githubusercontent.com/elixir-europe/faircookbook-rdmkit/main/faircookbook_rdmkit_mapping.yml
echo 'Build the book'
jb build . 2>&1 | tee ./build.log
grep "There was an error in building your book. Look above for the cause." ./build.log; test $? -eq 1
sed "s,\x1B\[[0-9;]*[a-zA-Z],,g" ./build.log > ./_build/cleaned_build.log
./scripts/extract-warnings.sh
| true |
29b26b6e906ee27385a13bd3c104877ac17b4a78 | Shell | stanstartx/postgresqlgnuhealth | /gnuhealth/entrypoint.sh | UTF-8 | 416 | 2.71875 | 3 | [] | no_license | #!/bin/bash
set -e
: ${DB_USER:=${POSTGRES_ENV_POSTGRES_USER:='postgres'}}
: ${DB_PASSWORD:=${POSTGRES_ENV_POSTGRES_PASSWORD}}
: ${DB_HOSTNAME:=${POSTGRES_PORT_5432_TCP_ADDR:='postgres'}}
: ${DB_PORT:=${POSTGRES_PORT_5432_TCP_PORT:='5432'}}
: ${TRYTOND_DATABASE_URI:="postgresql://${DB_USER}:${DB_PASSWORD}@${DB_HOSTNAME}:${DB_PORT}/"}
: ${PYTHONOPTIMIZE:=1}
export TRYTOND_DATABASE_URI PYTHONOPTIMIZE
exec "$@"
| true |
6b9ab755e7d1a5c5d8a5fc6225fc6eee521895a1 | Shell | renchild8/Settings | /Mac/.bashrc | UTF-8 | 1,116 | 2.75 | 3 | [] | no_license | # alias
## ls
alias ll='ls -l'
alias la='ls -la'
## rm
### brew install rmtrash
alias rm='rmtrash'
## git
alias add='git add'
alias com='git commit'
alias push='git push'
alias pull='git pull'
alias fetch='git fetch'
alias stash='git stash'
alias clone='git clone'
alias gd='git diff'
## git reset
alias grh='git reset --hard'
## git branch
alias gbr='git branch'
alias gch='git checkout'
alias gst='git status'
## git log
alias gl='git log --oneline -n 50'
alias gg='git log --oneline --graph --all'
alias graph='git graph'
## carthage
alias cupi='carthage update --platform iOS'
alias cbpi='carthage bootstrap --platform iOS'
## Xcode
alias simreset='xcrun simctl erase all'
# branch name
## git setting
source /usr/local/etc/bash_completion.d/git-prompt.sh
source /usr/local/etc/bash_completion.d/git-completion.bash
GIT_PS1_SHOWDIRTYSTATE=true
## color
## default:cyan / root:red
if [ $UID -eq 0 ]; then
PS1='\[\033[31m\]\u@\h\[\033[00m\]:\[\033[01m\]\W\[\033[31m\]$(__git_ps1)\[\033[00m\]\\$ '
else
PS1='\[\033[36m\]\u@\h\[\033[00m\]:\[\033[01m\]\W\[\033[31m\]$(__git_ps1)\[\033[00m\]\\$ '
fi
| true |
10717b577c0112056ccff4ca2bf94aa6671378d4 | Shell | WGR7/spinfs | /scripts/dump_then_less.sh | UTF-8 | 162 | 2.703125 | 3 | [] | no_license | #!/bin/bash
#set -x
SCRIPT_DIR=$( dirname $( realpath $0 ) )
OUTPUT_DIR="$SCRIPT_DIR/../output"
cd $OUTPUT_DIR
./dump_flash
hexdump -C flash_dump.bin | less
cd -
| true |
4a5ae9d4fcef04b46e77aa3db6963cb2921d9daf | Shell | christianchabot/xv6-small | /kern/vectors.sh | UTF-8 | 689 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
MAXVEC=256
MINVEC=0
VECFILE=vectors.s
printf "# generated by vectors.sh - do not edit\n# handlers\n" > $VECFILE
printf ".intel_syntax noprefix\n" >> $VECFILE
printf ".global alltraps\n" >> $VECFILE
i=$MINVEC
while [ $i -lt $MAXVEC ]
do
printf ".global vector$i\n" >> $VECFILE
printf "vector$i:\n" >> $VECFILE
printf '\tpush 0\n' >> $VECFILE
printf "\tpush $i\n" >> $VECFILE
printf '\tjmp alltraps\n' >> $VECFILE
i="$((i+1))"
done
printf '\n# vector table\n' >> $VECFILE
printf '.data\n' >> $VECFILE
printf '.global vectors\n' >> $VECFILE
printf 'vectors:\n' >> $VECFILE
i=$MINVEC
while [ $i -lt $MAXVEC ]
do
printf "\t.long vector$i\n" >> $VECFILE
i="$((i+1))"
done
| true |
c92527711fb6ff91fc4a4596a84b6b87cb0173c1 | Shell | sengeiou/tech | /lotserver/script/origin/ui/release/publish-trunk-compiler.sh | UTF-8 | 1,294 | 3.15625 | 3 | [] | no_license | #!/bin/bash
#----------------------------------------
#用途:trunk下 ui、supui、im、ALL compile
#
#參數1 ui ; publish-trunk-compiler.sh ui
#參數2 im ; publish-trunk-compiler.sh im
#參數3 support ; publish-trunk-compiler.sh supui
#參數4 all ; publish-trunk-compiler.sh all
#----------------------------------------
SVN_PATH="/root/svn_trunk"
cp -frvp /usr/local/repository/com/winterframework $SVN_PATH/
rm -rf /usr/local/repository/com/winterframework/winter-rest
function ui()
{
cd $SVN_PATH
svn up --no-auth-cache --non-interactive
cd winter-firefrog-web
mvn clean package -Dmaven.test.skip=true
}
function support()
{
cd $SVN_PATH/winter-game-common
mvn clean package install -Dmaven.test.skip=true
cd $SVN_PATH
svn up --no-auth-cache --non-interactive
cd winter-firefrog-support
mvn clean package -Dmaven.test.skip=true
}
function im()
{
cd $SVN_PATH/winter-game-common
mvn clean package install -Dmaven.test.skip=true
cd $SVN_PATH
svn up --no-auth-cache --non-interactive
cd winter-firefrog-webim
mvn clean package -Dmaven.test.skip=true
}
function all()
{
ui
support
im
}
case $1 in
"ui")
ui
;;
"supui")
support
;;
"im")
im
;;
"all")
all
;;
*)
echo "not found trunk service compile!"
;;
esac
| true |
ed75e1c645b0257cb115edecf00e16ff464eda34 | Shell | ClareJolly/progress-tracker-system | /scripts/config.sh | UTF-8 | 1,363 | 3.546875 | 4 | [] | no_license | # Grep exclude rules
excludeRules="--exclude-dir="\node_modules" --exclude-dir=".vscode" --exclude-dir="\temp" --exclude-dir="\evidence\examples\portfolio-examples" --exclude="goals-progress*.md" --exclude="feedback.md" --exclude="personal.md" --exclude="training.md" --exclude="portfolio.md" --exclude="useful.md" --exclude="temp/output-file*.md" --exclude="temp/goals-group*.md" --exclude="output-file*.md" --include="*.md" ./ "
# CREATE FILE
function createFile {
filePath=$1
if [ ! -f ${filePath} ]; then
touch ${filePath}
fi
}
# FIND LINES
function findLines {
filePath=$1
findRules=$2
grep -r "$findRules" $excludeRules | sed G > ${filePath}
}
# SORT FILE
function sortFile {
filePath=$1
sort -r ${filePath} -o ${filePath}
}
# ADD SPACES
function addSpaces {
filePath=$1
sed -i "" '/^[[:space:]]*$/d' ${filePath}
sed -i '' -e 's/$/\
\
---\
/g' ${filePath}
}
# FORMAT LINES
function formatLine {
filePath=$1
findRegex=$2
replaceRegex=$3
sed -i '' -e "s/$findRegex/$replaceRegex/g" ${filePath}
}
# CLEAN MARKDOWN FILE
function cleanFile {
filePath=$1
sed -i '' -e 's/```.*//g' ${filePath}
}
# RUN ALL
function runAll {
filePath=$1
findRules=$2
createFile $filePath
findLines $filePath $findRules
sortFile $filePath
addSpaces $filePath
}
| true |
c48be7905133a477dcb32af5a58d4ee916f89303 | Shell | datawire/hello-jenkins | /bin/jenkins.sh | UTF-8 | 1,802 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
set -euxo pipefail
BIN_PATH="$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
source ${BIN_PATH}/common.sh
# Command line option parsing
FLAG_RPM=no
FLAG_DEBIAN=no
FLAG_DOCKER=no
for i in "$@"; do
case "$i" in
# -a=*|--arg=*)
# ARG="${i#*=}"
# ;;
--deb)
FLAG_DEBIAN=yes
;;
--docker)
FLAG_DOCKER=yes
;;
--rpm)
FLAG_DOCKER=yes
;;
*)
;;
esac
done
printf "$FLAG_DEBIAN"
# Update this to indicate what programs are required before the script can successfully run.
REQUIRED_PROGRAMS=(fpm deb-s3)
WORKSPACE_DIR="${WORKSPACE:?Jenkins \$WORKSPACE environment variable is not set}"
BUILD_TOOLS_DIR="${WORKSPACE_DIR}/build-tools"
QUARK_INSTALL_URL="https://raw.githubusercontent.com/datawire/quark/master/install.sh"
QUARK_BRANCH="rel/0.7.6"
QUARK_INSTALL_DIR="${BUILD_TOOLS_DIR}/quark"
QUARK_INSTALL_ARGS="-qqq -t ${QUARK_INSTALL_DIR} ${QUARK_BRANCH}"
QUARK_EXEC="${QUARK_INSTALL_DIR}/bin/quark"
VIRTUALENV="${BUILD_TOOLS_DIR}/virtualenv"
sanity_check "${REQUIRED_PROGRAMS[@]}"
mkdir -p ${BUILD_TOOLS_DIR}
header "Setup Python virtualenv"
set +u
virtualenv ${VIRTUALENV}
. ${VIRTUALENV}/bin/activate
set -u
if ! command -v quark >/dev/null 2>&1; then
# TODO(FEATURE, Quark Installer):
# The Quark installer should be modified so the $PATH test can be disabled if installing to a specific location.
header "Setup Datawire Quark"
curl -sL "$QUARK_INSTALL_URL" | bash -s -- ${QUARK_INSTALL_ARGS}
. ${QUARK_INSTALL_DIR}/config.sh
quark --version
fi
header "Build Service"
make clean all
header "Create OS packages and Docker images"
if [[ "$FLAG_DEBIAN" = "yes" ]]; then
make deb
fi
if [[ "$FLAG_DOCKER" = "yes" ]]; then
make docker
fi
header "Publishing packages and images"
make publish | true |
37abc86f13bcee3a192207a228af04d6536d1389 | Shell | melovagabond/Windows_Scripts | /PS/Powershell/Testing/UniFi-LetsEncryptCertifiate.sh | UTF-8 | 5,685 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Modified script from here: https://github.com/FarsetLabs/letsencrypt-helper-scripts/blob/master/letsencrypt-unifi.sh
# Modified by: Brielle Bruns <bruns@2mbit.com>
# Download URL: https://source.sosdg.org/brielle/lets-encrypt-scripts
# Version: 1.6
# Last Changed: 05/29/2018
# 02/02/2016: Fixed some errors with key export/import, removed lame docker requirements
# 02/27/2016: More verbose progress report
# 03/08/2016: Add renew option, reformat code, command line options
# 03/24/2016: More sanity checking, embedding cert
# 10/23/2017: Apparently don't need the ace.jar parts, so disable them
# 02/04/2018: LE disabled tls-sni-01, so switch to just tls-sni, as certbot 0.22 and later automatically fall back to http/80 for auth
# 05/29/2018: Integrate patch from Donald Webster <fryfrog[at]gmail.com> to cleanup and improve tests
# Location of LetsEncrypt binary we use. Leave unset if you want to let it find automatically
#LEBINARY="/usr/src/letsencrypt/certbot-auto"
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
function usage() {
echo "Usage: $0 -d <domain> [-e <email>] [-r] [-i]"
echo " -d <domain>: The domain name to use."
echo " -e <email>: Email address to use for certificate."
echo " -r: Renew domain."
echo " -i: Insert only, use to force insertion of certificate."
}
while getopts "hird:e:" opt; do
case $opt in
i) onlyinsert="yes";;
r) renew="yes";;
d) domains+=("$OPTARG");;
e) email="$OPTARG";;
h) usage
exit;;
esac
done
DEFAULTLEBINARY="/usr/bin/certbot /usr/bin/letsencrypt /usr/sbin/certbot
/usr/sbin/letsencrypt /usr/local/bin/certbot /usr/local/sbin/certbot
/usr/local/bin/letsencrypt /usr/local/sbin/letsencrypt
/usr/src/letsencrypt/certbot-auto /usr/src/letsencrypt/letsencrypt-auto
/usr/src/certbot/certbot-auto /usr/src/certbot/letsencrypt-auto
/usr/src/certbot-master/certbot-auto /usr/src/certbot-master/letsencrypt-auto"
if [[ ! -v LEBINARY ]]; then
for i in ${DEFAULTLEBINARY}; do
if [[ -x ${i} ]]; then
LEBINARY=${i}
echo "Found LetsEncrypt/Certbot binary at ${LEBINARY}"
break
fi
done
fi
# Command line options depending on New or Renew.
NEWCERT="--renew-by-default certonly"
RENEWCERT="-n renew"
# Check for required binaries
if [[ ! -x ${LEBINARY} ]]; then
echo "Error: LetsEncrypt binary not found in ${LEBINARY} !"
echo "You'll need to do one of the following:"
echo "1) Change LEBINARY variable in this script"
echo "2) Install LE manually or via your package manager and do #1"
echo "3) Use the included get-letsencrypt.sh script to install it"
exit 1
fi
if [[ ! -x $( which keytool ) ]]; then
echo "Error: Java keytool binary not found."
exit 1
fi
if [[ ! -x $( which openssl ) ]]; then
echo "Error: OpenSSL binary not found."
exit 1
fi
if [[ ! -z ${email} ]]; then
email="--email ${email}"
else
email=""
fi
shift $((OPTIND -1))
for val in "${domains[@]}"; do
DOMAINS="${DOMAINS} -d ${val} "
done
MAINDOMAIN=${domains[0]}
if [[ -z ${MAINDOMAIN} ]]; then
echo "Error: At least one -d argument is required"
usage
exit 1
fi
if [[ ${renew} == "yes" ]]; then
LEOPTIONS="${RENEWCERT}"
else
LEOPTIONS="${email} ${DOMAINS} ${NEWCERT}"
fi
if [[ ${onlyinsert} != "yes" ]]; then
echo "Firing up standalone authenticator on TCP port 443 and requesting cert..."
${LEBINARY} --server https://acme-v01.api.letsencrypt.org/directory \
--agree-tos --standalone --preferred-challenges tls-sni ${LEOPTIONS}
fi
if [[ ${onlyinsert} != "yes" ]] && md5sum -c "/etc/letsencrypt/live/${MAINDOMAIN}/cert.pem.md5" &>/dev/null; then
echo "Cert has not changed, not updating controller."
exit 0
else
echo "Cert has changed or -i option was used, updating controller..."
TEMPFILE=$(mktemp)
CATEMPFILE=$(mktemp)
# Identrust cross-signed CA cert needed by the java keystore for import.
# Can get original here: https://www.identrust.com/certificates/trustid/root-download-x3.html
cat > "${CATEMPFILE}" <<'_EOF'
-----BEGIN CERTIFICATE-----
MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/
MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT
DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow
PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD
Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O
rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq
OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b
xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw
7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD
aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV
HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG
SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69
ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr
AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz
R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5
JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo
Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ
-----END CERTIFICATE-----
_EOF
md5sum "/etc/letsencrypt/live/${MAINDOMAIN}/cert.pem" > "/etc/letsencrypt/live/${MAINDOMAIN}/cert.pem.md5"
echo "Using openssl to prepare certificate..."
cat "/etc/letsencrypt/live/${MAINDOMAIN}/chain.pem" >> "${CATEMPFILE}"
openssl pkcs12 -export -passout pass:aircontrolenterprise \
-in "/etc/letsencrypt/live/${MAINDOMAIN}/cert.pem" \
-inkey "/etc/letsencrypt/live/${MAINDOMAIN}/privkey.pem" \
-out "${TEMPFILE}" -name unifi \
-CAfile "${CATEMPFILE}" -caname root
echo "Stopping Unifi controller..."
service unifi stop
echo "Removing existing certificate from Unifi protected keystore..."
keytool -delete -alias unifi -keystore /usr/lib/unifi/data/keystore \
-deststorepass aircontrolenterprise
echo "Inserting certificate into Unifi keystore..."
keytool -trustcacerts -importkeystore \
-deststorepass aircontrolenterprise \
-destkeypass aircontrolenterprise \
-destkeystore /usr/lib/unifi/data/keystore \
-srckeystore "${TEMPFILE}" -srcstoretype PKCS12 \
-srcstorepass aircontrolenterprise \
-alias unifi
rm -f "${TEMPFILE}" "${CATEMPFILE}"
echo "Starting Unifi controller..."
service unifi start
echo "Done!" | true |
50b1ace0a6a5820c23407a451215b6c506c47605 | Shell | vzaicev/bash | /2022/02_ivan_bezmen/lab1/lab1_21.sh | UTF-8 | 574 | 3.6875 | 4 | [] | no_license | #!/bin/bash
arr=($@)
i=0
while [ $i -lt $# ]
do
if [ "${arr[$i]}" == "-task" ];
then
echo "Задание"
echo "21. передать в скрипт два строковых параметра и сравнить их, использу¤ оператор if, если параметры пустые, то вывести предупреждение"
echo ""
echo ""
break
fi
((i++))
done
if [ $# -lt "2" ]
then
echo "Not enough arguments"
exit
fi
if [[ "$1" < "$2" ]]
then
echo "\"$1\" less than \"$2\""
fi | true |
2999a287034e7ad65e9952607cd8f9be82ae9fbe | Shell | badbit/scripts | /debian-ppc.sh | UTF-8 | 1,085 | 3.09375 | 3 | [] | no_license | #!/bin/bash
#Anadir a badbit a la lista de sudoers, si no se encuentra en ella.
check="badbit ALL=(ALL) ALL"; [[ -n $(grep "^$check\$" /etc/sudoers) ]] && echo "Ya eres un sudoer" || check "$check" >> /etc/sudoers
#Actualizar el sistema e instalar los paquetes necesarios.
apt-get update
apt-get upgrade
apt-get install vim byobu nethack-console lyx finch irssi git git-core git-doc lynx ntp moc
#Quitar el login grafico (no sirve).
update-rc.d -f gdm remove
#Cambiar la letra de la consola.
read -p "Configuracion de letra de la consola. Selecciona VGA."
dpkg-reconfigure console-setup
#Configuracion de github
if [ -d /home/badbit/.ssh ]; then
su badbit
ssh-keyen -t rsa -C "el.badbit@gmail.com"
if
su badbit
cat /home/badbit/.ssh/id_rsa.pub
read -p "Copia la clave y anadela a github."
git config --global user.name "Miguel Lozano"
git config --global user.email "el.badbit@gmail.com"
git config --global github.user badbit
git config --global github.token 81c5e5b9d427da69c617f8560d1a9fd2
#Configuracion de vim.
git clone git://github.com/badbit/dotvim.git /home/badbit/.vim
ln -s /home/badbit/.vim/vimrc /home/badbit/.vimrc
| true |
22160c50a69a8321d64b56846a7d7bd27207609a | Shell | sudopriyanka/TheUnixWorkbench | /guessinggame.sh | UTF-8 | 505 | 3.65625 | 4 | [] | no_license | #!usr/bin/env bash
#guessinggame.sh
echo "Welcome to guessing game"
echo "Can you guess , how many files are there in the current directory?"
#function to guess the number of files
function guessNum {
echo "Please enter your guess"
read guess
while [ false ]
do
if [[ $guess -gt $( ls | wc --lines ) ]]
then
echo "Too High!"
guessNum
elif [[ $guess -lt $( ls | wc --lines ) ]]
then
echo "Too Low!"
guessNum
else
echo "You guessed it right!"
exit
fi
done
}
guessNum
| true |
bed4b3f21245a440271b9b414efe7c02d88c335c | Shell | dwtcourses/devops-course-activity1 | /scripts/aws-lambda/func1/cleanup.sh | UTF-8 | 378 | 2.96875 | 3 | [] | no_license | ## Cleanup
## This script is called by delete.sh when it is passed a file-name using the -a <file_name> param
## It is created to clean up after testing or as a reset after labs are complete.
CALLING_SCRIPT=${0##*/}
echo "Calling Script="$CALLING_SCRIPT
## me=`basename "$0"`
## echo "me=$me"
echo "Cleaning up..."
rm -f myfunc*-assume-role-policy.json
rm -f myfunc*.js | true |
3baf915509b00444ee3595e5efa0414cb4f12fd1 | Shell | wx-b/opcode | /test/approve | UTF-8 | 204 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env bash
# Run this from the main repo directory
set -e
cd ./test
source "approvals.bash"
for file in *_spec.sh ; do
magenta "\nFILE $file"
./$file
done
echo ""
green "DONE all tests OK" | true |
dac0bfb6310d131f0dc6495de3600916a16522ab | Shell | stone-guru/tans | /tans-server/src/script/monkey-dance | UTF-8 | 906 | 3.578125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
KEY=${1:-"monkey.id"}
SERVERS=("localhost:8081" "localhost:8082" "localhost:8083")
sleep_seconds=1
k=0
url="http://${SERVERS[$k]}/acquire?key=$KEY"
while [ true ]; do
# echo "----"
# curl -v -i "$url"
# echo "----"
# echo
result=$(curl -s -i "$url")
#echo "result=$result"
err=$(echo "$result"|grep 'HTTP/1.1 5')
if [ -z "$result" ] || [ -n "$err" ]; then
let k=($k+1)%${#SERVERS[@]}
url="http://${SERVERS[$k]}/acquire?key=$KEY"
echo "Switch to another server $url"
else
if echo "$result"|grep 'HTTP/1.1 200 OK' >> /dev/null; then
s1=${url##'http://'}
s2=${s1%%/*}
r=$(echo "$result"|tail -n 1)
echo $s2,$r
else
loc=$(echo "$result"|/bin/grep 'location:')
if [ -n "$loc" ]; then
url=${loc##'location: '}
url=${url%?}
echo "Redirect to another server $url"
fi
fi
fi
sleep $sleep_seconds
done
| true |
6f768c63bfc565e8b14119bf778b3c4ad5dec08f | Shell | dan9186/myzsh-kubectl | /modules/kubectl/kubectl | UTF-8 | 678 | 3.265625 | 3 | [] | no_license | #!/usr/bin/zsh
function _deps() {
app="kubectl"
$(hash $app 2>/dev/null) || $(myzsh error "Couldn't find application ${app}" && return 1)
}
function kubectl() {
local verb="$1"
local mod="$2"
case "$verb" in
"dashboard")
shift
command open -a "/Applications/Google Chrome.app" 'http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/login'
;;
"token")
shift
command kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}') | grep '^token' | cut -d ":" -f 2 | tr -d "[:space:]"
;;
*)
command kubectl "$@"
;;
esac
}
# vim: filetype=zsh noexpandtab
| true |
b4b991fffd56b6eba0d8b291fe7e607415550f6b | Shell | openoms/raspiblitz | /home.admin/setup.scripts/dialogLightningWallet-lnd.sh | UTF-8 | 5,976 | 3.453125 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | # get basic system information
# these are the same set of infos the WebGUI dialog/controler has
source /home/admin/raspiblitz.info
# SETUPFILE
# this key/value file contains the state during the setup process
SETUPFILE="/var/cache/raspiblitz/temp/raspiblitz.setup"
source $SETUPFILE
# flags for sub dialogs after choice
uploadLNDRESCUE=0
enterSEED=0
uploadSCB=0
OPTIONS=()
OPTIONS+=(NEW "Setup a brand new Lightning Node (DEFAULT)")
OPTIONS+=(OLD "I had an old Node I want to recover/restore")
CHOICE=$(dialog --backtitle "RaspiBlitz" --clear --title "LND Setup" --menu "LND Data & Wallet" 11 60 6 "${OPTIONS[@]}" 2>&1 >/dev/tty)
if [ "${CHOICE}" == "NEW" ]; then
# clear setup state from all fomer possible choices (previous loop)
sudo sed -i '/^setPasswordA=/d' $SETUPFILE
sudo sed -i '/^setPasswordB=/d' $SETUPFILE
sudo sed -i '/^setPasswordC=/d' $SETUPFILE
# mark all passwords to be set at the end
echo "setPasswordA=1" >> $SETUPFILE
echo "setPasswordB=1" >> $SETUPFILE
echo "setPasswordC=1" >> $SETUPFILE
elif [ "${CHOICE}" == "OLD" ]; then
CHOICE=""
while [ "${CHOICESUB}" == "" ]
do
# get more details what kind of old lightning wallet user has
OPTIONS=()
OPTIONS+=(LNDRESCUE "LND tar.gz-Backupfile (BEST)")
OPTIONS+=(SEED+SCB "Seed & channel.backup file (OK)")
OPTIONS+=(ONLYSEED "Only Seed Word List (FALLBACK)")
CHOICESUB=$(dialog --backtitle "RaspiBlitz" --clear --title "RECOVER LND DATA & WALLET" --menu "Data you have to recover from?" 11 60 6 "${OPTIONS[@]}" 2>&1 >/dev/tty)
if [ "${CHOICESUB}" == "LNDRESCUE" ]; then
# just activate LND rescue upload
uploadLNDRESCUE=1
# clear setup state from all fomer possible choices (previous loop)
sudo sed -i '/^setPasswordA=/d' $SETUPFILE
sudo sed -i '/^setPasswordB=/d' $SETUPFILE
sudo sed -i '/^setPasswordC=/d' $SETUPFILE
# dont set password c anymore - mark the rest
echo "setPasswordA=1" >> $SETUPFILE
echo "setPasswordB=1" >> $SETUPFILE
elif [ "${CHOICESUB}" == "SEED+SCB" ]; then
# activate SEED input & SCB upload
enterSEED=1
uploadSCB=1
# clear setup state from all fomer possible choices (previous loop)
sudo sed -i '/^setPasswordA=/d' $SETUPFILE
sudo sed -i '/^setPasswordB=/d' $SETUPFILE
sudo sed -i '/^setPasswordC=/d' $SETUPFILE
# mark all passwords to be set at the end
echo "setPasswordA=1" >> $SETUPFILE
echo "setPasswordB=1" >> $SETUPFILE
echo "setPasswordC=1" >> $SETUPFILE
elif [ "${CHOICESUB}" == "ONLYSEED" ]; then
# let people know about the difference between SEED & SEED+SCB
whiptail --title "IMPORTANT INFO" --yes-button "JUST SEED" --no-button "Go Back" --yesno "
Using JUST SEED WORDS will only recover your on-chain funds.
To also try to recover the open channel funds you need the
channel.backup file (since RaspiBlitz v1.2 / LND 0.6-beta)
or having a complete LND rescue-backup from your old node.
" 11 65
if [ $? -eq 1 ]; then
# when user wants to go back
CHOICESUB=""
else
# activate SEED input & SCB upload
enterSEED=1
# clear setup state from all fomer possible choices (previous loop)
sudo sed -i '/^setPasswordA=/d' $SETUPFILE
sudo sed -i '/^setPasswordB=/d' $SETUPFILE
sudo sed -i '/^setPasswordC=/d' $SETUPFILE
# mark all passwords to be set at the end
echo "setPasswordA=1" >> $SETUPFILE
echo "setPasswordB=1" >> $SETUPFILE
echo "setPasswordC=1" >> $SETUPFILE
fi
else
# user cancel - signal to outside app by exit code (2 = submenu)
exit 2
fi
done
else
# user cancel - signal to outside app by exit code (1 = mainmenu)
exit 1
fi
# UPLOAD LND RESCUE FILE dialog (if activated by dialogs above)
if [ ${uploadLNDRESCUE} -eq 1 ]; then
# run upload dialog and get result
_temp="/var/cache/raspiblitz/temp/.temp.tmp"
/home/admin/config.scripts/lnd.backup.sh lnd-import-gui setup $_temp
source $_temp 2>/dev/null
sudo rm $_temp 2>/dev/null
# if user canceled upload
if [ "${lndrescue}" == "" ]; then
# signal cancel to the calling script by exit code (3 = exit on lndrescue)
exit 3
fi
# clear setup state from all fomer possible choices (previous loop)
sudo sed -i '/^lndrescue=/d' $SETUPFILE
# store result in setup state
echo "lndrescue='${lndrescue}'" >> $SETUPFILE
fi
# INPUT LIGHTNING SEED dialog (if activated by dialogs above)
if [ ${enterSEED} -eq 1 ]; then
# start seed input and get results
_temp="/var/cache/raspiblitz/.temp.tmp"
/home/admin/config.scripts/lnd.backup.sh seed-import-gui $_temp
source $_temp 2>/dev/null
sudo rm $_temp 2>/dev/null
# if user canceled the seed input
if [ "${seedWords}" == "" ]; then
# signal cancel to the calling script by exit code (4 = exit on seedwords)
exit 4
fi
# clear setup state from all fomer possible choices (previous loop)
sudo sed -i '/^seedWords=/d' $SETUPFILE
sudo sed -i '/^seedPassword=/d' $SETUPFILE
# write the seed data into the setup state
echo "seedWords='${seedWords}'" >> $SETUPFILE
echo "seedPassword='${seedPassword}'" >> $SETUPFILE
fi
# UPLOAD STATIC CHANNEL BACKUP FILE dialog (if activated by dialogs above)
if [ ${uploadSCB} -eq 1 ]; then
# import SCB and get results
_temp="/var/cache/raspiblitz/.temp.tmp"
/home/admin/config.scripts/lnd.backup.sh scb-import-gui setup $_temp
source $_temp 2>/dev/null
sudo rm $_temp 2>/dev/null
sleep 2
x
# if user canceled the upload
if [ "${staticchannelbackup}" == "" ]; then
# signal cancel to the calling script by exit code (5 = exit on scb)
exit 5
fi
# clear setup state from all fomer possible choices (previous loop)
sudo sed -i '/^staticchannelbackup=/d' $SETUPFILE
# write the filename into the setup state
echo "staticchannelbackup='/home/admin/channel.backup'" >> $SETUPFILE
fi | true |
099ecc004f49589d2cd266c543d5c299b8a433c8 | Shell | tsanov/AppImageKit | /build-appdir.sh | UTF-8 | 1,798 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
set -x
#######################################################################
# build AppImageTool AppDir
APPIMAGETOOL_APPDIR=appdirs/appimagetool.AppDir
rm -rf "$APPIMAGETOOL_APPDIR" || true
# Run make install only for the 'appimagetool.AppImage' component to deploy appimagetools files to
# the $APPIMAGETOOL_APPDIR
DESTDIR="$APPIMAGETOOL_APPDIR" cmake -DCOMPONENT=appimagetool -P cmake_install.cmake
mkdir -p "$APPIMAGETOOL_APPDIR"/usr/lib/appimagekit/
# Copy AppDir specific files
cp ../resources/AppRun "$APPIMAGETOOL_APPDIR"
cp install_prefix/usr/lib/appimagekit/mksquashfs "$APPIMAGETOOL_APPDIR"/usr/lib/appimagekit/
# prefer binaries from /deps, if available
export PATH=/deps/bin:"$PATH"
cp $(which desktop-file-validate) "$APPIMAGETOOL_APPDIR"/usr/bin/
cp $(which zsyncmake) "$APPIMAGETOOL_APPDIR"/usr/bin/
cp ../resources/appimagetool.desktop "$APPIMAGETOOL_APPDIR"
cp ../resources/appimagetool.png "$APPIMAGETOOL_APPDIR"/appimagetool.png
cp "$APPIMAGETOOL_APPDIR"/appimagetool.png "$APPIMAGETOOL_APPDIR"/.DirIcon
if [ -d /deps/ ]; then
# deploy glib
mkdir -p "$APPIMAGETOOL_APPDIR"/usr/lib/
cp /deps/lib/lib*.so* "$APPIMAGETOOL_APPDIR"/usr/lib/
# libffi is a runtime dynamic dependency
# see this thread for more information on the topic:
# https://mail.gnome.org/archives/gtk-devel-list/2012-July/msg00062.html
if [ "$ARCH" == "x86_64" ]; then
cp /usr/lib64/libffi.so.5 "$APPIMAGETOOL_APPDIR"/usr/lib/
elif [ "$ARCH" == "i686" ]; then
cp /usr/lib/libffi.so.5 "$APPIMAGETOOL_APPDIR"/usr/lib/
elif [ "$ARCH" == "armhf" ] || [ "$ARCH" == "aarch64" ]; then
cp /deps/lib/libffi.so.6 "$APPIMAGETOOL_APPDIR"/usr/lib/
else
echo "WARNING: unknown architecture, not bundling libffi"
fi
fi
| true |
88647ffab9667dbcbf7468782f8ea95bda43efb5 | Shell | falconray0704/ubuntuCfg | /1604Dsk/ffmpeg.sh | UTF-8 | 4,583 | 3 | 3 | [] | no_license | #!/bin/bash
install_ffmpeg()
{
sudo apt-get update
sudo apt-get -y install autoconf automake build-essential libass-dev libfreetype6-dev \
libsdl2-dev libtheora-dev libtool libva-dev libvdpau-dev libvorbis-dev libxcb1-dev libxcb-shm0-dev \
libxcb-xfixes0-dev pkg-config texinfo zlib1g-dev
sudo apt-get -y install yasm libx264-dev libx265-dev libfdk-aac-dev libmp3lame-dev libopus-dev libvpx-dev
sudo apt-get -y install checkinstall git libfaac-dev libgpac-dev \
libopencore-amrnb-dev libopencore-amrwb-dev librtmp-dev texi2html
mkdir -p /md/github/ffmpegs
cd /md/github/ffmpegs
git clone https://git.ffmpeg.org/ffmpeg.git ffmpeg
cd ffmpeg
make clean
make distclean
./configure \
--enable-shared \
--enable-static \
--enable-gpl \
--enable-libass \
--enable-libfdk-aac \
--enable-libfreetype \
--enable-libmp3lame \
--enable-libopus \
--enable-libtheora \
--enable-libvorbis \
--enable-libvpx \
--enable-libx264 \
--enable-libx265 \
--enable-libopencore-amrnb \
--enable-libopencore-amrwb \
--enable-librtmp \
--enable-version3 \
--enable-nonfree
make
sudo make install
}
install_ffmpeg_cuda()
{
mkdir -p /md/github/ffmpegs
sudo apt-get update
sudo apt-get -y install autoconf automake build-essential libass-dev libfreetype6-dev \
libsdl2-dev libtheora-dev libtool libva-dev libvdpau-dev libvorbis-dev libxcb1-dev libxcb-shm0-dev \
libxcb-xfixes0-dev pkg-config texinfo zlib1g-dev
sudo apt-get install yasm
sudo apt-get install libx264-dev
sudo apt-get install libvpx-dev
sudo apt-get install libfdk-aac-dev
sudo apt-get install libmp3lame-dev
sudo apt-get install libopus-dev
echo "x265 installing..."
cd /md/github/ffmpegs
#sudo apt-get install libx265-dev
sudo apt-get install cmake mercurial
hg clone https://bitbucket.org/multicoreware/x265
cd x265/build/linux
PATH="$HOME/bin:$PATH" cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="$HOME/ffmpeg_build" -DENABLE_SHARED:bool=off ../../source
make
make install
sudo apt-get -y install glew-utils libglew-dbg libglew-dev libglew1.13 \
libglewmx-dev libglewmx-dbg freeglut3 freeglut3-dev freeglut3-dbg libghc-glut-dev \
libghc-glut-doc libghc-glut-prof libalut-dev libxmu-dev libxmu-headers libxmu6 \
libxmu6-dbg libxmuu-dev libxmuu1 libxmuu1-dbg
cd /md/github/ffmpegs
wget -c http://ffmpeg.org/releases/ffmpeg-snapshot.tar.bz2
tar -jxf ffmpeg-snapshot.tar.bz2
cd ffmpeg
PATH="$HOME/bin:$PATH" PKG_CONFIG_PATH="$HOME/ffmpeg_build/lib/pkgconfig" ./configure \
--prefix="$HOME/ffmpeg_build" \
--pkg-config-flags="--static" \
--extra-cflags="-I$HOME/ffmpeg_build/include" \
--extra-ldflags="-L$HOME/ffmpeg_build/lib" \
--bindir="$HOME/bin" \
--enable-gpl \
--enable-libass \
--enable-libfdk-aac \
--enable-libfreetype \
--enable-libmp3lame \
--enable-libopus \
--enable-libtheora \
--enable-libvorbis \
--enable-libvpx \
--enable-libx264 \
--enable-libx265 \
--enable-nonfree \
--extra-cflags=-I/md/Video_Codec_SDK_8.0.14 \
--extra-ldflags=-L/md/Video_Codec_SDK_8.0.14 \
--extra-cflags="-I/usr/local/cuda/include/" \
--extra-ldflags=-L/usr/local/cuda/lib64 \
--disable-shared \
--enable-nvenc \
--enable-cuda \
--enable-cuvid \
--enable-libnpp
PATH="$HOME/bin:$PATH" make -j$(nproc)
make -j$(nproc) install
#make -j$(nproc) distclean
hash -r
echo "Export the following environment to .bashrc :"
echo '# ffmpeg' >> ~/.bashrc
echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/ffmpeg_build/lib' >> ~/.bashrc
echo 'MANPATH_MAP $HOME/bin $HOME/ffmpeg_build/share/man' >> ~/.manpath
sudo ldconfig
}
case $1 in
"install") echo "Installing ffmpeg..."
install_ffmpeg
;;
"uninstall") echo "Uninstalling ffmpeg..."
cd /md/github/ffmpegs/ffmpeg
sudo make uninstall
;;
"install_cuda") echo "Installing cuda ffmpeg..."
install_ffmpeg_cuda
;;
"uninstall_cuda") echo "Uninstalling ffmpeg..."
rm -rf ~/ffmpeg_build /md/github/ffmpegs/ffmpeg ~/bin/{ffmpeg,ffprobe,ffplay,ffserver,vsyasm,x264,x265,yasm,ytasm}
;;
*) echo "unknow cmd"
esac
| true |
0e69aad87ea27b540ae297d148374e9befba45d7 | Shell | scamicha/HPCHadoop | /scripts/cleanup.sh | UTF-8 | 595 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
$HADOOP_ROOT/bin/stop-all.sh
sleep 60
NODEFILE="$HADOOP_ROOT/conf/nodefile"
mkdir $HADOOP_FINAL_LOG
#Remove log and data directories
for NODE in `cat $NODEFILE`; do
if [ "$HADOOP_LOCAL" = true ]; then
ssh $NODE "mkdir $HADOOP_FINAL_LOG/$NODE; cp -R $HADOOP_LOG/* $HADOOP_FINAL_LOG/$NODE"
else
ssh $NODE "mkdir $HADOOP_FINAL_LOG/$NODE; cp -R $HADOOP_LOG/* $HADOOP_FINAL_LOG/$NODE"
fi
done
if [ ! "$HADOOP_LOCAL" = true ]; then
rm -rf $HADOOP_GLOBAL_DATA $HADOOP_GLOBAL_LOG
fi
rm -f $HADOOP_ROOT/conf/slaves $HADOOP_ROOT/conf/nodefile $HADOOP_ROOT/conf/masters
| true |
175ae3dd933f6944fd2f5dc7cbee5ccfd655bdc4 | Shell | boklm/tor-browser-build | /tools/repackage_browser.sh | UTF-8 | 1,831 | 4.53125 | 5 | [] | no_license | #!/bin/bash
# This script allows you to repackage a Tor Browser bundle using an
# obj-x86_64-pc-linux-gnu directory from a local tor-browser.git build.
#
# This script will download the current Tor Browser version (using
# var/torbrowser_version from rbm config, or an optional second argument)
# and repackage it with the specified obj directory.
#
# The new repackaged bundle can be found in the _repackaged directory.
set -e
display_usage() {
echo -e "\\nUsage: $0 firefox_obj_path [torbrowser-version]\\n"
}
if [ $# -lt 1 ] || [ $# -gt 2 ]
then
display_usage
exit 1
fi
DIRNAME="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
OBJ_PATH=$1
if [ $# -eq 2 ]
then
TOR_VERSION="$2"
else
TOR_VERSION=$("$DIRNAME"/../rbm/rbm showconf tor-browser var/torbrowser_version)
fi
TOR_FILENAME=tor-browser-linux64-${TOR_VERSION}_en-US.tar.xz
TOR_BROWSER_URL=https://dist.torproject.org/torbrowser/"${TOR_VERSION}"/"${TOR_FILENAME}"
TMPDIR="$(mktemp -d)"
(
cd "$TMPDIR"
wget "$TOR_BROWSER_URL"
wget "$TOR_BROWSER_URL".asc
gpg --no-default-keyring --keyring "$DIRNAME"/../keyring/torbrowser.gpg --verify "${TOR_FILENAME}".asc "${TOR_FILENAME}"
# From projects/firefox/build: replace firefox binary by the wrapper and strip libraries/binaries
tar xf "${TOR_FILENAME}"
cp -r "${OBJ_PATH}"/dist/firefox .
rm firefox/firefox-bin
mv firefox/firefox firefox/firefox.real
for LIB in firefox/*.so firefox/gtk2/*.so firefox/firefox.real firefox/plugin-container firefox/updater
do
strip "$LIB"
done
# Overwrite extracted tor-browser with locally built files and move to _repackaged folder
cp -r firefox/* tor-browser_en-US/Browser
rm -rf firefox "${TOR_FILENAME}"
REPACKAGED_DIR="$DIRNAME"/_repackaged/
mkdir -p "$REPACKAGED_DIR"
mv tor-browser_en-US "$REPACKAGED_DIR"/tor-browser-"$(date '+%Y%m%d%H%M%S')"
rm -rf "$TMPDIR"
)
| true |
b107ecc0c72a8cd538fa13e92390ca7062f0f3ef | Shell | jeroenvanmaanen/ledger | /bin/totals-from-triodos-and-ing.sh | UTF-8 | 512 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
SED_EXT=-r
case "$(uname)" in
Darwin*)
SED_EXT=-E
esac
export SED_EXT
TAB="$(echo -en '\011')"
tail +2 "$@" \
| awk -F "${TAB}" '
BEGIN {
}
{
date = $1
gsub(/-/, "", date);
key = date "-" $3 "-" $6
## print key
if (key != "--") {
amount = $7;
gsub(/[,.]/, "", amount);
## print key " : " amount;
totals[key] = totals[key] + amount;
}
}
END {
for (key in totals) {
print key, totals[key]
}
}' \
| sort | true |
58c245f5ebcbe067ff5523d921674030c05aef20 | Shell | goutham414/kali-n900 | /kali_rootfs/var/lib/dpkg/info/xfce4-settings.preinst | UTF-8 | 217 | 2.59375 | 3 | [] | no_license | #!/bin/sh
set -e
LASTVER="4.9.0"
if dpkg-maintscript-helper supports rm_conffile; then
dpkg-maintscript-helper rm_conffile \
/etc/xdg/autostart/xfce4-settings-helper-autostart.desktop "$LASTVER" -- "$@"
fi
| true |
62f09243c6b731177bb7c244b7358492bd45b605 | Shell | rlgomes/dtf | /src/scripts/ssh_tunnel.sh | UTF-8 | 2,825 | 4.125 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
COMMAND=$1
MACHINE=$2
RPORT=$3
SPORT=$4
if [ "$4" == "" ]
then
# default dtfc port
SPORT=20000
fi
TUNNELFILE=tunnel.conf
# this is used because the FreeBSD ssh needs a command to run in background
# otherwise it will just disconnect...
REMOTECMD="while [ true ]; do sleep 1; done"
if [ "$COMMAND" == "add" ]
then
# start reverse ssh tunnel from other machine to the controller on this machine
ssh -f -nNT -R $SPORT:127.0.0.1:$SPORT $MACHINE $REMOTECMD
if [ $? != 0 ]
then
exit $?
fi
# now start up the local tunnel to the remote machine for all communication with
# that remote machine
ssh -f -N -L $RPORT:127.0.0.1:$RPORT $MACHINE $REMOTECMD
if [ $? != 0 ]
then
exit $?
fi
echo "$MACHINE=$RPORT=$SPORT" >> $TUNNELFILE
(
while [ $? == 0 ]
do
sleep 1
grep $MACHINE $TUNNELFILE > /dev/null 2>&1
done
for P in `ps x | egrep "ssh.*:127.0.0.1:.*$MACHINE" | awk '{print $1}'`
do
kill -9 $P > /dev/null 2>&1 # don't want to see errors for now
done
echo "Tunnel removed for $MACHINE:$RPORT:$SPORT"
) &
exit 0
fi
if [ "$COMMAND" == "refresh" ]
then
TUNNELS=`cat $TUNNELFILE`
rm $TUNNELFILE
sleep 5
for TUNNEL in $TUNNELS
do
HOST=`echo $TUNNEL | awk -F= '{print $1}'`
RPORT=`echo $TUNNEL | awk -F= '{print $2}'`
SPORT=`echo $TUNNEL | awk -F= '{print $3}'`
echo "Refreshing $HOST:$RPORT:$SPORT"
$0 add $HOST $RPORT $SPORT
done
exit 0
fi
if [ "$COMMAND" == "del" ]
then
cat $TUNNELFILE | grep -v $MACHINE > tunnel.tmp
cat tunnel.tmp > $TUNNELFILE
rm tunnel.tmp
echo "Removed any tunnel setup for $MACHINE"
exit 0
fi
if [ "$COMMAND" == "list" ]
then
echo "Currently active ssh tunnels:"
echo "HOSTNAME = REMOTE PORT = DTFC LISTENING PORT"
echo ""
cat $TUNNELFILE
exit 0
fi
echo "SSH Tunnel Help"
echo "***************"
echo "./ssh_tunnel.sh [command] machine_name port [dtfc listening port]"
echo " command: add, del, list or refresh"
echo ""
echo " machine_name: fully qualified hostname to create a tunnel to."
echo ""
echo " port: port to create the tunnel on this side, on the "
echo " agent side there is a reverse tunnel on pot 20000."
echo ""
echo " dtfc_listen_port: the listening port for the dtfc (default 20000), "
echo " but in some scenarios its useful to be able to "
echo " change this port. Remember to use the "
echo " -Ddtf.listen.port=xxxx on the DTFC side as well as"
echo " the -Ddtf.connect.port=xxxx on the DTFA side."
exit -1
| true |
90b3fb608818cfefd83572af34d259e705ad9152 | Shell | latifkabir/Computation_using_Fortran90 | /spacer/spacer.sh | UTF-8 | 317 | 2.984375 | 3 | [] | no_license | #!/bin/bash
#
gfortran -c spacer.f90 >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling spacer.f90"
exit
fi
rm compiler.txt
#
gfortran spacer.o
if [ $? -ne 0 ]; then
echo "Errors linking spacer.o"
exit
fi
#
rm spacer.o
mv a.out ~/bin/$ARCH/spacer
#
echo "Executable installed as ~/bin/$ARCH/spacer"
| true |
70a6a97065288f9ecce3a44138f3a1479a27fc6c | Shell | lockss/lockss-core | /test/frameworks/run_multiple_daemons/TestDamageRepair | UTF-8 | 222 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | #! /bin/sh
TESTFILE=test1/cache/a/www.example.com/http/branch1/003file.txt/#content/current
./start
sleep 3600
echo "BADNESS" >>"${TESTFILE}"
sleep 7200
./stop
if grep -s BADNESS "${TESTFILE}"
then
exit 1
else
exit 0
fi
| true |
721b064f421d5e7d829e4e65c22208bc13cce005 | Shell | delkyd/alfheim_linux-PKGBUILDS | /osvr-config-git/PKGBUILD | UTF-8 | 1,305 | 2.609375 | 3 | [] | no_license | pkgname=osvr-config-git
pkgver=r75.721c9df
pkgrel=1
pkgdesc="OSVR-Config is a utility to configure the OSVR Server, and gives you access to a few OSVR-related tools."
arch=(i686 x86_64)
url="https://github.com/OSVR/OSVR-Config"
#license=('GPL')
#install=osvr-config.install
makedepends=('git')
depends=('osvr-core-git' 'asp.net-bin' 'bower' 'gulp' 'libcurl-gnutls') #TODO: add more deps # why libcurl-gnutls?? https://github.com/dotnet/core/issues/37
source=("osvr-config::git+https://github.com/OSVR/OSVR-Config.git")
# "Findjsoncpp.cmake")
pkgver() {
cd "$srcdir/osvr-config"
( set -o pipefail
git describe --long --tags 2>/dev/null | sed 's/\([^-]*-g\)/r\1/;s/-/./g' ||
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
)
}
prepare() {
cd osvr-config
}
build() {
cd osvr-config
cd src/ConfigUtil
#fuck you, asp.net
export PATH="$PATH:/usr/lib/asp.net/dnx-coreclr-linux-x64.1.0.0-rc1-update1/bin/"
# make sure it's not LC_ALL=en_US.utf8 or something
export LC_ALL=en_US.UTF-8
dnu restore
dnu publish --runtime /usr/lib/asp.net/dnx-coreclr-linux-x64.1.0.0-rc1-update1 --no-source -o ../../artifacts --configuration Release #TODO
}
package() {
cd osvr-config
#TODO
# make DESTDIR="$pkgdir/" install
}
md5sums=('SKIP')
# vim:set ts=2 sw=2 et:
| true |
89a06a83af3892b5739e7e743739400c5a28b441 | Shell | miya-masa/dotfiles | /install.sh | UTF-8 | 5,777 | 3.90625 | 4 | [] | no_license | #!/usr/bin/env bash
set -o nounset
set -o errexit
trap 'echo "Aborting due to errexit on line $LINENO. Exit code: $?" >&2' ERR
set -o errtrace
set -o pipefail
IFS=$'\n\t'
__ScriptVersion="v0.1.0"
UNAME=$(uname)
DOTFILES_DIRECTORY=${DOTFILES_DIRECTORY:-~/dotfiles}
DOTFILES_BRANCH=${DOTFILES_BRANCH:-master}
XDG_CONFIG_HOME=${XDG_CONFIG_HOME:-$HOME/.config}
SSH_PASSPHRASE=${SSH_PASSPHRASE:-""}
#=== FUNCTION ================================================================
# NAME: usage
# DESCRIPTION: Display usage information.
#===============================================================================
function usage ()
{
echo "Usage : $0 [options] [comand]
Command:
deploy: Deploy dotfiles.
initialize: Install software and tools.
Options:
-h|help Display this message
-v|version Display script version"
} # ---------- end of function usage ----------
function has() {
type "$1" > /dev/null 2>&1
}
function initialize() {
if [[ ${UNAME} == "Linux" ]]; then
sudo apt update -y
if [[ ! -d ${DOTFILES_DIRECTORY} ]]; then
if ! has git; then
sudo apt install -y git
fi
git clone http://github.com/miya-masa/dotfiles.git -b ${DOTFILES_BRANCH} ${DOTFILES_DIRECTORY}
cd ${DOTFILES_DIRECTORY}
fi
cd ${DOTFILES_DIRECTORY}
_initialize_linux
else
_initialize_mac
fi
git remote set-url origin git@github.com:miya-masa/dotfiles.git
echo "Successful!! Restart your machine."
}
function _initialize_linux() {
for i in "${DOTFILES_DIRECTORY}"/etc/install.d/*.sh ; do
echo "Installing: "`basename ${i}`
source "${i}"
done
SSH_RSA=~/.ssh/id_rsa
if [ ! -s ${SSH_RSA} ]; then
if [ "${SSH_PASSPHRASE}" = "" ]; then
printf "ssh key passphrase: "
read -s SSH_PASSPHRASE
fi
ssh-keygen -P ${SSH_PASSPHRASE} -f ${SSH_RSA}
fi
SSH_ECDSA=~/.ssh/id_ecdsa
if [ ! -s ${SSH_ECDSA} ]; then
if [ "${SSH_PASSPHRASE}" = "" ]; then
printf "ssh key passphrase: "
read -s SSH_PASSPHRASE
fi
ssh-keygen -t ecdsa -b 384 -P ${SSH_PASSPHRASE} -f ${SSH_ECDSA}
fi
SSH_ED25519=~/.ssh/id_ed25519
if [ ! -s ${SSH_ED25519} ]; then
if [ "${SSH_PASSPHRASE}" = "" ]; then
printf "ssh key passphrase: "
read -s SSH_PASSPHRASE
fi
ssh-keygen -t ed25519 -P ${SSH_PASSPHRASE} -f ${SSH_ED25519}
fi
deploy
if [[ ${CI:-} != "true" ]]; then
chsh -s $(which zsh)
fi
}
function _initialize_mac() {
softwareupdate --all --install
if ! has brew; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
brew install git
if [[ ! -d ${DOTFILES_DIRECTORY} ]]; then
git clone http://github.com/miya-masa/dotfiles.git -b ${DOTFILES_BRANCH} ${DOTFILES_DIRECTORY}
cd ${DOTFILES_DIRECTORY}
fi
cd ${DOTFILES_DIRECTORY}
brew bundle --file=./Brewfile
nodebrew install-binary stable
nodebrew use stable
# TODO standardlization
SSH_RSA=~/.ssh/id_rsa
if [ ! -s ${SSH_RSA} ]; then
if [ "${SSH_PASSPHRASE}" = "" ]; then
printf "ssh key passphrase: "
read -s SSH_PASSPHRASE
fi
ssh-keygen -P ${SSH_PASSPHRASE} -f ${SSH_RSA}
fi
SSH_ECDSA=~/.ssh/id_ecdsa
if [ ! -s ${SSH_ECDSA} ]; then
if [ "${SSH_PASSPHRASE}" = "" ]; then
printf "ssh key passphrase: "
read -s SSH_PASSPHRASE
fi
ssh-keygen -t ecdsa -b 384 -P ${SSH_PASSPHRASE} -f ${SSH_ECDSA}
fi
SSH_ED25519=~/.ssh/id_ed25519
if [ ! -s ${SSH_ED25519} ]; then
if [ "${SSH_PASSPHRASE}" = "" ]; then
printf "ssh key passphrase: "
read -s SSH_PASSPHRASE
fi
ssh-keygen -t ed25519 -P ${SSH_PASSPHRASE} -f ${SSH_ED25519}
fi
deploy
}
function password() {
password=""
printf "sudo password: "
read -s password
}
function deploy() {
mkdir -p ${HOME}/.tmux
mkdir -p ${HOME}/.vim
mkdir -p ${HOME}/.config
ln -fs ${DOTFILES_DIRECTORY}/.tmux/tmuxline.conf ${HOME}/.tmux/tmuxline.conf
ln -fs ${DOTFILES_DIRECTORY}/.tmux.conf ${HOME}/.tmux.conf
ln -fs ${DOTFILES_DIRECTORY}/.tmux.conf.local ${HOME}/.tmux.conf.local
ln -fs ${DOTFILES_DIRECTORY}/.zshrc ${HOME}/.zshrc
ln -fs ${DOTFILES_DIRECTORY}/.zshrc_linux ${HOME}/.zshrc_linux
ln -fs ${DOTFILES_DIRECTORY}/.zshrc_darwin ${HOME}/.zshrc_darwin
ln -fs ${DOTFILES_DIRECTORY}/.p10k.zsh ${HOME}/.p10k.zsh
ln -fs ${DOTFILES_DIRECTORY}/.zprofile ${HOME}/.zprofile
ln -fs ${DOTFILES_DIRECTORY}/.gitconfig ${HOME}/.gitconfig
ln -fs ${DOTFILES_DIRECTORY}/.vim/vimrcs ${HOME}/.vim
ln -fs ${DOTFILES_DIRECTORY}/.config/nvim ${HOME}/.config
ln -fs ${DOTFILES_DIRECTORY}/.config/neofetch ${HOME}/.config
ln -fs ${DOTFILES_DIRECTORY}/.config/yamllint ${HOME}/.config
ln -fs ${DOTFILES_DIRECTORY}/.fzf.zsh ${HOME}/.fzf.zsh
ln -fs ${DOTFILES_DIRECTORY}/.tigrc ${HOME}/.tigrc
ln -fs ${DOTFILES_DIRECTORY}/.markdownlintrc ${HOME}/.markdownlintrc
ln -fs ${DOTFILES_DIRECTORY}/.local/bin/ide.sh ${HOME}/.local/bin/ide.sh
if [ ! -e ~/.ssh/config ]; then
cp ${DOTFILES_DIRECTORY}/.ssh/config.sample ${HOME}/.ssh/config
fi
if [[ -e ./.dbext_profile ]]; then
ln -fs ${DOTFILES_DIRECTORY}/.dbext_profile ${HOME}/.dbext_profile
fi
}
#-----------------------------------------------------------------------
# Handle command line arguments
#-----------------------------------------------------------------------
while getopts ":hv" opt
do
case $opt in
h ) usage; exit 0 ;;
v ) echo "$0 -- Version $__ScriptVersion"; exit 0 ;;
* ) echo -e "\n Option does not exist : $OPTARG\n"
usage; exit 1 ;;
esac # --- end of case ---
done
case ${1:-initialize} in
deploy ) deploy; exit 0 ;;
initialize ) initialize; exit 0 ;;
* ) echo "Unknown command $1"
esac
| true |
039d28b5a1ec6beeb363b324fed5047c1adf0f9e | Shell | korfuri/my_kernel | /test/generate_interrupts_handlers.s.sh | UTF-8 | 925 | 3 | 3 | [] | no_license | #!/bin/sh
#LIST="0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31"
LIST="`
grep -E '^void\W+interrupt_handler_' interrupts.c |
sed -e 's/^void\ *//g' |
cut -d '(' -f 1 |
sed -e 's/interrupt_handler_//g'`"
(
echo 'align 4'
echo ''
for i in $LIST
do
echo 'extern interrupt_handler_'$i
echo 'global isr'$i
echo ''
echo 'isr'$i':'
echo ' cli'
echo ' pushfd'
echo ' pushad'
# echo ' push esp'
echo ' call interrupt_handler_'$i
echo ' popad'
echo ' popfd'
# echo ' sti'
echo ' iret'
done
) > interrupts_handlers.s
(
echo '#ifndef INTERRUPTS_HANDLERS_H_'
echo '#define INTERRUPTS_HANDLERS_H_'
for i in $LIST
do
echo 'void isr'$i'(void);'
done
echo '#endif'
) > interrupts_handlers.h
(
echo 'static void idt_set_gates() {'
for i in $LIST
do
echo ' idt_set_gate('$i', isr'$i', 0x08, 0x8E);'
done
echo '}'
) > idt_set_gates.c
| true |
d4c4abe17fe4e88b08f8b2f44b7aaf68a99837df | Shell | serverok/server-setup | /csf-config.sh | UTF-8 | 2,096 | 2.515625 | 3 | [] | no_license | #!/bin/bash
/bin/sed -i "s/RESTRICT_SYSLOG\s*=.*$/RESTRICT_SYSLOG = \"3\"/g" /etc/csf/csf.conf
/bin/sed -i "s/SYSLOG_CHECK\s*=.*$/SYSLOG_CHECK = \"3600\"/g" /etc/csf/csf.conf
# By default, CSF will block allowed IP if they break rules.
/bin/sed -i "s/IGNORE_ALLOW\s*=.*/IGNORE_ALLOW = \"1\"/g" /etc/csf/csf.conf
#/bin/sed -i "s/LF_GLOBAL\s*=.*$/LF_GLOBAL = \"1800\"/g" /etc/csf/csf.conf
#/bin/sed -i "s/GLOBAL_ALLOW\s*=.*$/GLOBAL_ALLOW = \"http:\/\/git\.buyscripts\.in\:10080\/boby\/firewall\/raw\/master\/allow\.txt\"/g" /etc/csf/csf.conf
#/bin/sed -i "s/GLOBAL_DENY\s*=.*$/GLOBAL_DENY = \"http\:\/\/git\.buyscripts\.in\:10080\/boby\/firewall\/raw\/master\/deny.txt\"/g" /etc/csf/csf.conf
# This option will notify you when a large amount of email is sent from a particular script on the server
/bin/sed -i "s/LF_SCRIPT_ALERT\s*=.*$/LF_SCRIPT_ALERT = \"1\"/g" /etc/csf/csf.conf
# This option ensures that almost all Linux accounts are checked with Process Tracking, not just the cPanel ones
/bin/sed -i "s/PT_ALL_USERS\s*=.*$/PT_ALL_USERS = \"1\"/g" /etc/csf/csf.conf
/bin/sed -i "s/TESTING = \"1\"/TESTING = \"0\"/g" /etc/csf/csf.conf
# Disable IP blocking alert. You may get many, if you dont need to act on this, disable it
/bin/sed -i "s/PT_USERMEM\s*=.*/PT_USERMEM = \"1024\"/g" /etc/csf/csf.conf
/bin/sed -i "s/LF_NETBLOCK_ALERT\s*=.*/LF_NETBLOCK_ALERT = \"0\"/g" /etc/csf/csf.conf
/bin/sed -i "s/LF_PERMBLOCK_ALERT\s*=.*/LF_PERMBLOCK_ALERT = \"0\"/g" /etc/csf/csf.conf
/bin/sed -i "s/PS_EMAIL_ALERT\s*=.*/PS_EMAIL_ALERT = \"0\"/g" /etc/csf/csf.conf
# custoemer keep getting IP blocked mails, so i set LF_TEMP_EMAIL_ALERT = 0
#LF_EMAIL_ALERT
#LF_TEMP_EMAIL_ALERT
# Disable all alerts
# /bin/sed -i "s/LF_EMAIL_ALERT\s*=.*/LF_EMAIL_ALERT = \"0\"/g" /etc/csf/csf.conf
# ONLY CPANEL
if [ -d "/var/cpanel/" ]; then
/bin/sed -i "s/SMTP_BLOCK\s*=.*/SMTP_BLOCK = \"1\"/g" /etc/csf/csf.conf
fi
# /bin/sed -i "s/LF_ALERT_TO\s*=.*$/LF_ALERT_TO = \"admin@serverok.in\"/g" /etc/csf/csf.conf
systemctl restart csf.service
csf -r
/bin/systemctl stop rpcbind
/bin/systemctl disable rpcbind
| true |
5b59a23e9e33583bab36a1bd5cfc9d667cf473f2 | Shell | AEADataEditor/replication-template | /tools/download_zenodo.sh | UTF-8 | 501 | 3.90625 | 4 | [
"CC-BY-4.0"
] | permissive | #!/bin/bash
python=python3
# may need a fallback to generic python
#
# Assumes:
# pip install zenodo_get
if [[ -z $1 ]]
then
cat << EOF
$0 (projectID)
will download a Zenodo archive
EOF
exit 2
fi
projectID=$1
if [ ${#projectID} -gt 7 ]
then
projectID=${projectID##*.}
fi
# Test if directory exists
#
zenodo_dir=zenodo-$projectID
if [ -d $zenodo_dir ]
then
echo "$zenodo_dir already exists - please remove prior to downloading"
exit 2
fi
zenodo_get --output-dir=$zenodo_dir $projectID
| true |
221331554c9b13eb2a83ca93c75da4e21da0734a | Shell | lamar-studio/Code365 | /script/rj/getSysInfo | UTF-8 | 2,936 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# README:
# syssize unit(kB) [1kB = 1000B]
# datasize unit(kB) [1kB = 1000B]
#
hwcfg=/usr/local/bin/system/getHWCfg
datapart=/opt/lessons
if [ -e $hwcfg ];then
productid=`echo $(bash $hwcfg ID)`
productname=`echo $(bash $hwcfg NAME)`
platform=`echo $(bash $hwcfg PLAT)`
else
productid=`echo $(dmidecode -s baseboard-product-name)`
productname=`echo $(dmidecode -s system-product-name)`
fi
serialnum=`echo $(dmidecode -s system-serial-number)`
hdversion=`echo $(dmidecode -s system-version)`
biosversion=`echo $(dmidecode -s bios-version)`
sysversion=`echo $(cat /etc/issue)`
bit=`echo $(getconf LONG_BIT)`
cpu=`echo $(cat /proc/cpuinfo | grep "^model\s*name\s*:" | uniq | cut -d: -f2)`
# the memory unit [1kB=1024B]
memory=`echo $(cat /proc/meminfo | grep "^MemTotal:" | cut -d: -f2 | sed 's/kB//g' | awk '{print int($1)/1024/1024}')`
# memory should be the power of 2 (0.918 GB -> 1 GB)
[ -n "$memory" ] && memory=`echo ${memory} | awk '{print 2**int(log($memory)/log(2)+0.5)}' | awk '{print int($1*1024*1024*1024)}'`
sysblock=`echo $(df / | awk 'NR>1{print $1}' | sed -n -e 's/\/dev\///' -e 's/[0-9]*$// p')`
[ -d $datapart ] && datablock=`echo $(df $datapart | awk 'NR>1{print $1}' | sed -n -e 's/\/dev\///' -e 's/[0-9]*$// p')`
if [ "$sysblock" = "$datablock" ];then
[[ "$sysblock" =~ "mmcblk" ]] && sysblock=${sysblock%p}
datanum=`echo $(df $datapart | awk 'NR>1{print $1}' | sed -n -e 's/\/dev\/// p')`
if [ -f /sys/class/block/$sysblock/$datanum/size ];then
datasize=`echo $(cat /sys/class/block/$sysblock/$datanum/size | awk '{print int($1*512)}')`
syssize=`echo $(expr $(cat /sys/class/block/$sysblock/size) - $(cat /sys/class/block/$sysblock/$datanum/size) | awk '{print int($1*512)}')`
fi
else
[[ "$sysblock" =~ "mmcblk" ]] && sysblock=${sysblock%p}
[ -f /sys/class/block/$sysblock/size ] && syssize=`echo $(cat /sys/class/block/$sysblock/size | awk '{print int($1*512)}')`
[[ "$datablock" =~ "bcache" ]] && datablock=`echo bcache0`
[ -f /sys/class/block/$datablock/size ] && datasize=`echo $(cat /sys/class/block/$datablock/size | awk '{print int($1*512)}')`
fi
# storage should be the power of 2 (31.8 GB -> 32 GB) when system not have the data part
[ -n "$syssize" ] && [ -z "$datasize" ] && syssize=`echo ${syssize} | awk '{print 2**int(log($syssize)/log(2)+0.5)}'`
[ -n "$serialnum" ] && echo serialnum=$serialnum
[ -n "$productid" ] && echo productid=$productid
[ -n "$productname" ] && echo productname=$productname
[ -n "$platform" ] && echo platform=$platform
[ -n "$hdversion" ] && echo hdversion=$hdversion
[ -n "$biosversion" ] && echo biosversion=$biosversion
[ -n "$sysversion" ] && echo sysversion=$sysversion
[ -n "$bit" ] && echo bit=$bit
[ -n "$cpu" ] && echo cpu=$cpu
[ -n "$memory" ] && echo memory=$memory
[ -n "$storage" ] && echo storage=$storage
[ -n "$datasize" ] && echo datasize=$datasize
[ -n "$syssize" ] && echo syssize=$syssize
exit 0
| true |
6776ba78b2ef2e219a6dc9c999ecfd4ae535dce4 | Shell | dcos/dcos | /packages/adminrouter/docker/build-resty.sh | UTF-8 | 526 | 2.515625 | 3 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-oracle-bcl-javase-javafx-2012",
"ErlPL-1.1",
"MPL-2.0",
"ISC",
"BSL-1.0",
"Python-2.0",
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -e # Fail the script if anything fails
set -x # Verbose output
set -u # Undefined variables
cd $OPENRESTY_DIR
./configure \
"--prefix=$AR_BIN_DIR" \
--with-file-aio \
--with-http_gunzip_module \
--with-http_gzip_static_module \
--with-http_v2_module \
--without-mail_pop3_module \
--without-mail_imap_module \
--without-mail_smtp_module \
--with-http_ssl_module \
--with-luajit \
--with-luajit-xcflags='-mno-sse4.2' \
"$@"
make -j${NUM_CORES}
make install
| true |
5730e3905233405b43a5f03ef7948faf04ce675a | Shell | alexeicolin/dhcpdump-PKGBUILD | /PKGBUILD | UTF-8 | 1,008 | 2.59375 | 3 | [] | no_license | # Maintainer: Janne Heß <jannehess@gmail.com>
pkgname=dhcpdump
pkgver=1.8
pkgrel=2
pkgdesc='Parse DHCP packets'
arch=('x86_64' 'i686')
url='http://mavetju.org/unix/general.php'
license=('custom:dhcpdump')
depends=('libpcap')
source=("http://mavetju.org/download/dhcpdump-1.8.tar.gz"
dhcpdump-1.8-includes.patch)
sha512sums=('52cd63d581a3c530c2f5baa66808d5b0241853651c720bd513b769b8301b4dff9c87243787014aea98a5b3ebed86ec317b58d262bf5031015141a4da50fb76e6'
'1e95b1662a734174c79cba90d3b6a6ccd8cf7792b8d273a75fbe591ceea5edbe8e585123a82ce83515560a933ccf7e1f1296628200a9cc4fde279bb2b76ecced')
prepare() {
cd "${srcdir}/${pkgname}-${pkgver}"
patch -p1 < ../dhcpdump-1.8-includes.patch
}
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
make all
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
install -Dm755 ${pkgname} "${pkgdir}/usr/bin/${pkgname}"
install -Dm644 ${pkgname}.8 "${pkgdir}/usr/share/man/man8/${pkgname}.8" # FIXME
install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}"
}
| true |
391527d91c888eb394400446f835351a4763518d | Shell | u2prakash/rotten-tomatoes | /movies-rating.sh | UTF-8 | 581 | 3.28125 | 3 | [] | no_license | #!/bin/bash
#Author: Prakash Dahal
echo "Please enter the name of the valid Movie?"
read movieName
#This will translate space to %2o so that movie name with space will run in browser
movieName1=$(echo ${movieName} | tr ' ' '/%20')
myApiKey=6b97b751
#This will translante all the comma to the new line and grep /10 and delete all other excpt rating
rating=$(curl -s "http://www.omdbapi.com/?t=${movieName1}&&apikey=${myApiKey} " | tr ',' '\n' | grep /10 | tr -d '}]Value:""' | head -1)
#This will display the rating statement
echo "${movieName} Rating from Rotten tomato is ${rating}"
| true |
5861ca6ae2a310bc44b2ace2257b20334e9ab90a | Shell | jspring/rfs-sr99_ramp_metering | /system/urmsgo | UTF-8 | 1,268 | 3.625 | 4 | [] | no_license | #!/bin/bash
if [[ q$2 == 'q' ]]
then
echo
echo "Usage: $0 <Controller IP> <Database variable number (must be a multiple of 4)> <Port> <Timestamp(for naming files, in YYMMDD_HHMMSS format)> (Loop/Polling interval, default 5000 ms)"
echo " where <> denotes necessary arguments and () denotes optional arguments"
echo
exit 1
fi
CONTROLLER_IP=$1
URMS_STATUS_DBNUM=$2
PORT=$3
TIMESTAMP=$4
INTERVAL=$5
CONTROL=$6
if [[ q$INTERVAL == 'q' ]]
then
INTERVAL=30000
fi
if [[ $CONTROL == "control" ]]
then
echo "Starting urms with control"
/home/atsc/urms/lnx/urms -r $CONTROLLER_IP -i $INTERVAL -d $URMS_STATUS_DBNUM -p $PORT >>/big/data/ac_rm_1/urmsgo_$TIMESTAMP.err &
else
echo "Starting urms with no control, $CONTROLLER_IP $URMS_STATUS_DBNUM $PORT $INTERVAL"
/home/atsc/urms/lnx/urms -r $CONTROLLER_IP -i $INTERVAL -n -d $URMS_STATUS_DBNUM -p $PORT >/big/data/ac_rm_1/"urms_"$CONTROLLER_IP"."$PORT"."$TIMESTAMP".err" 2>&1 &
fi
#sleep 5
echo "Starting wrfiles_rm, $CONTROLLER_IP $URMS_STATUS_DBNUM $PORT $TIMESTAMP"
/home/sr99_ramp_metering/src/lnx/wrfiles_rm -d /big/data/ac_rm_1 -i $INTERVAL -m 30 -s $URMS_STATUS_DBNUM -c $CONTROLLER_IP"."$PORT"."$TIMESTAMP 1>>/big/data/ac_rm_1/"wrfiles_ac_rm_"$CONTROLLER_IP"_"$PORT"."$TIMESTAMP".err" 2>&1 &
| true |
c0f4842763cfe1b75ef0fb760a022588d6b1efa7 | Shell | skibold/ml-class | /proj1/test.sh | UTF-8 | 465 | 2.796875 | 3 | [] | no_license | #! /bin/bash
function stats() {
accuracy=$(cat $1 | sed 's/.*,\(.*\)]/\1/')
len=$(echo $accuracy | wc -w)
avg=0
for a in $accuracy
do
avg=$(echo "scale=8; $a+$avg" | bc)
done
echo "scale=8; $avg/$len" | bc
}
for ((i=0; i<100; i++))
do
python fraction_xy.py x_test.csv y_test.csv .1
python myproj.py x_test__10.csv y_test__10.csv myproj7.log python
gnb.py x_test__10.csv y_test__10.csv gnb7.log
done
stats myproj7.log
stats gnb7.log
| true |
4e304974d17e9986db79c0879e9a3067e796f069 | Shell | swirepe/autonav | /autonav.sh | UTF-8 | 656 | 3.859375 | 4 | [] | no_license | #!/bin/bash
DIR_NAME=$(dirname $BASH_SOURCE)
if [[ x"${BASH_SOURCE[0]}" == x"$0" ]]
then
echo "ERROR: autonav.sh needs to be sourced, not executed." 1>&2
exit
fi
function set_confs {
if [[ "$@" ]]
then
CONFS="$@"
else
CONFS="$DIR_NAME/autonav.ini"
fi
}
function check_helper_script {
# this program cannot run without autonav.py
if [ ! -e $DIR_NAME/autonav.py ]
then
echo "ERROR: Required helper script missing: autonav.py" 1>&2
return
fi
}
check_helper_script
set_confs $@
NAV_FILE="$(python $DIR_NAME/autonav.py $CONFS)"
source "$NAV_FILE"
rm "$NAV_FILE"
| true |
230359ffca0f7870f5a58353e5504283a0153370 | Shell | nfvcsd/git2jss | /scripts/fixPearson.sh/script.sh | UTF-8 | 746 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Sets the stupid Pearson directory to 777 because they don't understand their own permissions
loggedInUser=$(/usr/bin/python -c 'from SystemConfiguration import SCDynamicStoreCopyConsoleUser; import sys; username = (SCDynamicStoreCopyConsoleUser(None, None, None) or [None])[0]; username = [username,""][username in [u"loginwindow", None, u""]]; sys.stdout.write(username + "\n");')
directory="/Users/${loggedInUser}/Library/Application Support/Pearson"
echo "$directory"
if [ -d "$directory" ]; then
chmod -R 777 "$directory"
exit 0
elif [ ! -d "$directory" ]; then
mkdir -p "$directory"
chown "$loggedInUser":"staff" "$directory"
chmod -R 777 "$directory"
exit 0
else
echo "Error"
exit 1
fi
exit 0 | true |
c975b7925822d084b71d67da6335bfd647d86892 | Shell | tigersharke/FreeBSD_Ports | /Scripts/svn-patch-rights | UTF-8 | 202 | 2.609375 | 3 | [] | no_license | #!/bin/sh
# Author nemysis
# When download one Patch, then must adjust Rights
chmod 644 *
if [ -d files ]
then
chmod 755 files
cd files ; chmod 644 *
ls -lh
fi
ls -lh
| true |
055e63b7b471304b26436c358a6943603255c2db | Shell | a2ikm/heroku-buildpack-libjpeg62 | /build.sh | UTF-8 | 519 | 2.796875 | 3 | [] | no_license | #!/bin/sh
VENDOR=$HOME/vendor
PREFIX=$VENDOR/libjpeg62
mkdir -p $PREFIX/bin
mkdir -p $PREFIX/lib
mkdir -p $PREFIX/include
mkdir -p $PREFIX/man/man1
curl -O http://www.ijg.org/files/jpegsrc.v6b.tar.gz
tar zxf jpegsrc.v6b.tar.gz
cd jpeg-6b
./configure --prefix=$PREFIX --enable-shared --enable-static
make LIBTOOL=libtool
make install LIBTOOL=libtool
cd $HOME
tar zcf libjpeg62.tar.gz -C $VENDOR .
curl https://raw.githubusercontent.com/scottmotte/srvdir-binary/master/srvdir.tar.gz -O
tar zxf srvdir.tar.gz
./srvdir
| true |
1bbca9fc4dcce9abc53cd7d80d55a0c88d5f1ad5 | Shell | EvgenyMazanik/vagrant_php73 | /desk/apache2-reload.sh | UTF-8 | 2,972 | 3.59375 | 4 | [] | no_license | #!/usr/bin/env bash
HOST_PATH=/host/
echo ""
echo ""
echo "-- Check /config/etc/apache2/sites-available/individual.conf --"
if [ -f /config/etc/apache2/sites-available/individual.conf ]; then
cp /config/etc/apache2/sites-available/individual.conf /etc/apache2/sites-available/individual.conf
sudo a2ensite individual
else
sudo a2dissite individual
fi
echo ""
echo ""
echo "-- Check /vagrant/config/etc/apache2/sites-available/common.conf --"
if [ -f /vagrant/config/etc/apache2/sites-available/common.conf ]; then
echo "-- Found /vagrant/config/etc/apache2/sites-available/common.conf --"
cp /vagrant/config/etc/apache2/sites-available/common.conf /etc/apache2/sites-available/common.conf
sudo a2ensite common
else
echo "-- Not Found /vagrant/config/etc/apache2/sites-available/common.conf --"
sudo a2dissite common
fi
# create and enable new Virtual Hosts
for directory_path in ${HOST_PATH}* ; do
if [ -d ${directory_path} ]; then
echo ""
echo "Found $directory_path directory."
directory_name=`basename "$directory_path"`;
if [ -f ${directory_path}/app/env/apache2-vhost.conf ]; then
conf_from_path=${directory_path}/app/env/apache2-vhost.conf
else
if [ -f ${directory_path}/nano/env/apache2-vhost.conf ]; then
conf_from_path=${directory_path}/nano/env/apache2-vhost.conf
else
if [ -f ${directory_path}/.dev/env/apache2-vhost.conf ]; then
conf_from_path=${directory_path}/.dev/env/apache2-vhost.conf
else
conf_from_path=/vagrant/blanks/.dev/env/apache2-vhost.conf
fi
fi
fi
public_path=""
if [ -d ${directory_path}/public ]; then
public_path=${directory_path}/public
elif [ -d ${directory_path}/www ]; then
public_path=${directory_path}/www
else
public_path=${directory_path}
fi
echo "directory_path: ${directory_path}"
echo "directory_name: ${directory_name}"
echo "conf: /etc/apache2/sites-available/${directory_name}.conf"
echo "public_path: ${public_path}"
cp -rf ${conf_from_path} /etc/apache2/sites-available/${directory_name}.conf
sed -i "s|{HOST_PATH}|$HOST_PATH|g" /etc/apache2/sites-available/${directory_name}.conf
sed -i "s|{DOMAIN_NAME}|$directory_name|g" /etc/apache2/sites-available/${directory_name}.conf
sed -i "s|{PUBLIC_PATH}|$public_path|g" /etc/apache2/sites-available/${directory_name}.conf
sudo a2ensite ${directory_name}
fi
done
echo ""
echo ""
echo "-- Restart Apache --"
sudo /etc/init.d/apache2 restart
| true |
78ba655e91221229e250851f686870c019626d5e | Shell | dgundlach/Projects | /proj/movies/makeisos | UTF-8 | 324 | 3.125 | 3 | [] | no_license | #!/bin/sh
TARGET=1-4
for name in * ; do
if [ -d "$name"/VIDEO_TS ] ; then
rm "$name"/VIDEO_TS/VIDEO_TS.nfo
rm "$name"/VIDEO_TS/VIDEO_TS.tbn
rm "$name"/VIDEO_TS/VIDEO_TS-fanart.jpg
rm "$name"/VIDEO_TS/.actors
mkisofs -v -udf -dvd-video -o /drv/"$TARGET"/Movies/"$name".iso "$name"
fi
done
| true |
8e1195043dd0d21acd8f8acda3660728a8ea63eb | Shell | obino/appscale | /AppTaskQueue/test/suites/run-e2e-tests.sh | UTF-8 | 7,400 | 4.15625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# - Starts and configures Postgres on remote machine;
# - Starts Taskqueue servers on remote machine;
# - Ensures python venv is installed with all needed packages;
# - Runs e2e tests.
set -e
set -u
usage() {
echo "Usage: ${0} --key-location <KEY> --user-name <USER> --vm-addr <HOST> --vm-private-ip <IP> [--logs-dir <DIR>]"
echo
echo "Options:"
echo " --key-location <KEY> Private key file for access to the machine"
echo " --user-name <USER> User name to use for access to the machine"
echo " --vm-addr <HOST> Hostname or public IP of the machine"
echo " to start TaskQueue on"
echo " --vm-private-ip <IP> Private IP of the machine to start TaskQueue on"
echo " --logs-dir <DIR> Directory to save logs to (default: ./logs)"
exit 1
}
KEY_LOCATION=
USER=
VM_ADDR=
VM_PRIVATE_IP=
LOGS_DIR="$(realpath --strip ./logs)"
# Let's get the command line arguments.
while [ $# -gt 0 ]; do
if [ "${1}" = "--key-location" ]; then
shift
if [ -z "${1}" ]; then
usage
fi
KEY_LOCATION="${1}"
shift
continue
fi
if [ "${1}" = "--user-name" ]; then
shift
if [ -z "${1}" ]; then
usage
fi
USER="${1}"
shift
continue
fi
if [ "${1}" = "--vm-addr" ]; then
shift
if [ -z "${1}" ]; then
usage
fi
VM_ADDR="${1}"
shift
continue
fi
if [ "${1}" = "--vm-private-ip" ]; then
shift
if [ -z "${1}" ]; then
usage
fi
VM_PRIVATE_IP="${1}"
shift
continue
fi
if [ "${1}" = "--logs-dir" ]; then
shift
if [ -z "${1}" ]; then
usage
fi
LOGS_DIR="${1}"
shift
continue
fi
usage
done
if [ -z "${VM_ADDR}" ] || [ -z "${VM_PRIVATE_IP}" ] \
|| [ -z "${KEY_LOCATION}" ] || [ -z "${USER}" ]
then
usage
fi
log() {
local LEVEL=${2:-INFO}
echo "$(date +'%Y-%m-%d %T'): $LEVEL $1"
}
mkdir -p "${LOGS_DIR}"
scp_logs() {
log "Downloading AppScale logs from the machine to ${LOGS_DIR}"
scp -o StrictHostKeyChecking=no \
-i "${KEY_LOCATION}" -r \
"${USER}@${VM_ADDR}:/var/log/appscale/*" \
"${LOGS_DIR}"
}
trap scp_logs EXIT
# Determine absolute path to some dirs
SUITES_DIR="$( realpath --strip "$( dirname "${BASH_SOURCE[0]}" )" )"
TASKQUEUE_SRC_DIR="$( dirname "$( dirname "${SUITES_DIR}" )" )"
COMMON_SRC_DIR="$( dirname "${TASKQUEUE_SRC_DIR}" )"/common
HELPERS_DIR="${TASKQUEUE_SRC_DIR}/test/helpers"
E2E_TEST_DIR="${TASKQUEUE_SRC_DIR}/test/e2e"
log ""
log "==================================================================="
log "=== Sending provisioning scripts, sources and other files to VM ==="
log "==================================================================="
scp -o StrictHostKeyChecking=no \
-i "${KEY_LOCATION}" \
"${HELPERS_DIR}/prepare-postgres.sh" \
"${HELPERS_DIR}/prepare-zookeeper.sh" \
"${HELPERS_DIR}/prepare-cassandra.sh" \
"${HELPERS_DIR}/restart-taskqueue.sh" \
"${USER}@${VM_ADDR}:/tmp/"
ssh -o StrictHostKeyChecking=no \
-i ${KEY_LOCATION} ${USER}@${VM_ADDR} "mkdir -p /tmp/AppTaskQueue/"
scp -o StrictHostKeyChecking=no \
-r -i "${KEY_LOCATION}" \
"${TASKQUEUE_SRC_DIR}/appscale" \
"${TASKQUEUE_SRC_DIR}/setup.py" \
"${USER}@${VM_ADDR}:/tmp/AppTaskQueue/"
scp -o StrictHostKeyChecking=no \
-r -i "${KEY_LOCATION}" \
"${COMMON_SRC_DIR}" \
"${USER}@${VM_ADDR}:/tmp/common"
# Save DSN string and projects config to variables
PG_DSN="dbname=appscale-test-project user=appscale password=appscale-pwd host=${VM_PRIVATE_IP}"
POSTGRES_PROJECT='postgres-test-project'
CASSANDRA_PROJECT='cassandra-test-project'
log ""
log "=========================================================================="
log "=== Initializing TaskQueue with its dependencies at ${USER}@${VM_ADDR} ==="
log "=========================================================================="
ssh -o StrictHostKeyChecking=no -i ${KEY_LOCATION} ${USER}@${VM_ADDR} << COMMANDS
set -e
echo "=== Preparing Postgres server ==="
sudo /tmp/prepare-postgres.sh --host "${VM_PRIVATE_IP}" \
--dbname "appscale-test-project" \
--username "appscale" \
--password "appscale-pwd"
echo "=== Starting Zookeeper server ==="
sudo /tmp/prepare-zookeeper.sh
echo "=== Starting and priming Cassandra ==="
sudo /tmp/prepare-cassandra.sh --private-ip ${VM_PRIVATE_IP} \
--zk-ip ${VM_PRIVATE_IP}
echo "=== Creating project nodes in Zookeeper ==="
sudo /usr/share/zookeeper/bin/zkCli.sh create \
/appscale/projects/${CASSANDRA_PROJECT} ""
sudo /usr/share/zookeeper/bin/zkCli.sh create \
/appscale/projects/${POSTGRES_PROJECT} ""
sudo /usr/share/zookeeper/bin/zkCli.sh create \
/appscale/projects/${POSTGRES_PROJECT}/postgres_dsn "${PG_DSN}"
sudo /tmp/restart-taskqueue.sh --ports 50001,50002 \
--db-ip "${VM_PRIVATE_IP}" \
--zk-ip "${VM_PRIVATE_IP}" \
--lb-ip "${VM_PRIVATE_IP}" \
--source-dir /tmp/AppTaskQueue
COMMANDS
log ""
log "=================================================="
log "=== Prepare virtualenv for running test script ==="
log "=================================================="
# aiohttp lib which is used in e2e test requires Python>=3.5.3
# test scripts uses new syntax for formatting strings (3.6+)
# Loop through standard python aliases in order to find needed version
PYTHON=
for PYTHON_EXECUTABLE in python python3 python3.6 python3.7
do
# Skip python executables that don't exist in PATH
if ! which ${PYTHON_EXECUTABLE} &> /dev/null; then
continue
fi
possible_python=$(which ${PYTHON_EXECUTABLE})
HAVE=$(${possible_python} --version 2>&1 | awk '{ print $2 }')
# Stop if version is new enough
if echo -e "${HAVE}\n3.6" | sort -V | head -1 | grep -q "^3.6$"
then
PYTHON=${possible_python}
break
fi
done
if [ -z "${PYTHON}" ]
then
log "Python 3.6 or greater was not found." "ERROR"
log "Please install it and try again."
exit 1
else
log "Using python: ${PYTHON} version: ${HAVE}"
fi
cd "${E2E_TEST_DIR}"
# Configure virtualenvironment
${PYTHON} -m venv "venv"
venv/bin/pip install --upgrade pip
venv/bin/pip install ${HELPERS_DIR}
venv/bin/pip install pytest
venv/bin/pip install kazoo
STATUS=0
log ""
log "===================================================="
log "=== Test Cassandra implementation of Pull Queues ==="
log "===================================================="
export TEST_PROJECT="${CASSANDRA_PROJECT}"
venv/bin/pytest -vv --tq-locations ${VM_ADDR}:50001 ${VM_ADDR}:50002 \
--zk-location "${VM_ADDR}" \
|| STATUS=1
log ""
log "==================================================="
log "=== Test Postgres implementation of Pull Queues ==="
log "==================================================="
export TEST_PROJECT="${POSTGRES_PROJECT}"
venv/bin/pytest -vv --tq-locations ${VM_ADDR}:50001 ${VM_ADDR}:50002 \
--zk-location "${VM_ADDR}" \
|| STATUS=1
exit ${STATUS}
| true |
91dacdc8f34bd5b74411c666b507f3132f1ba632 | Shell | toknapp/strace-based-docker-minimizer | /bin/strace-docker.sh | UTF-8 | 8,051 | 3.890625 | 4 | [] | no_license | #!/usr/bin/env bash
# Make Bash's error handling strict(er).
set -o nounset -o pipefail -o errexit
# Be compatible with both Linux and macOS.
if command -v realpath 1>&- 2>&-; then
CANONICALIZE_FILENAME="realpath"
else
CANONICALIZE_FILENAME="readlink -f"
fi
# Get directory where this script is in to get at other scripts in there.
SCRIPTS_DIR=$($CANONICALIZE_FILENAME "$0" | xargs dirname)
# Be compatible with both Linux and macOS.
# Repeatable sorting helps with `git diff`, which is important for human review
# of the changes introduced.
if command -v gsort 1>&- 2>&-; then
# To arrive at the smallest change set, I had to add
# `--dictionary-order --ignore-case`. Not sure why.
PLATFORM_INDEPENDENT_SORT="gsort --unique --dictionary-order --ignore-case"
else
# I found this in the original non-macOS code. Might not be enough (see above).
PLATFORM_INDEPENDENT_SORT="sort -u"
fi
# Declare default settings.
DOCKER=${DOCKER-docker}
DOCKER_RUN_OPTS=${DOCKER_RUN_OPTS-}
TRIGGER_CMD=${TRIGGER_CMD-}
DELAY_TRIGGER_SECONDS=${DELAY_TRIGGER_SECONDS-10}
WITH_RUNNING_CONTAINER=${WITH_RUNNING_CONTAINER-1}
TRACE_DIR_IN_CONTAINER=${TRACE_DIR_IN_CONTAINER-/tmp/traces}
DOCKER_FILE=${DOCKER_FILE-Dockerfile}
DOCKER_CONTEXT=${DOCKER_CONTEXT-.}
OUTPUT=${OUTPUT-.dockerinclude}
INSTALL_STRACE_CMD=
# Read command line.
while getopts "f:c:d:i:p:s:t:rRo:-" OPT; do
case $OPT in
f) DOCKER_FILE=$OPTARG ;;
c) DOCKER_CONTEXT=$OPTARG ;;
d) DOCKER_RUN_OPTS="$DOCKER_RUN_OPTS $OPTARG" ;;
i) INSTALL_STRACE_CMD="$OPTARG" ;;
p) PACKAGE_MANAGER="$OPTARG" ;;
s) DELAY_TRIGGER_SECONDS=$OPTARG ;;
t) TRIGGER_CMD="$OPTARG" ;;
r) WITH_RUNNING_CONTAINER=1 ;;
R) WITH_RUNNING_CONTAINER=0 ;;
o) OUTPUT=$OPTARG ;;
-) break ;;
\?) echo "Invalid option: -$OPTARG" >&2; exit 2 ;;
esac
done
shift $((OPTIND-1))
# Make temporary dir, contains:
# - Docker context for strace-enabled "Docker-image-under-test"
# - Host dir of mounted volume with strace output
TMP=$($CANONICALIZE_FILENAME $(mktemp -d tmp.XXXXXXXXXXX))
# Set up removal of temp dir, even in error cases.
trap "rm -rf $TMP" EXIT
# Build the "Docker-image-under-test", without any strace additions just
# yet, and record the image ID,
$DOCKER build --no-cache --iidfile="$TMP/base.image" -f "$DOCKER_FILE" \
"$DOCKER_CONTEXT" >&2
# Use the "Docker-image-under-test" image as base image for another
# strace-enabled Docker image.
BASE_IMAGE_ID=$(cat "$TMP/base.image")
# Determine how to install strace, as given by the -p or -i CLI arg.
# `apk` is used by Alpine Linux, and `apt` by Debian and Ubuntu.
if [ -z "$INSTALL_STRACE_CMD" ]; then
case ${PACKAGE_MANAGER-} in
apk) INSTALL_STRACE_CMD="apk add --update strace binutils" ;;
apt|apt-get) INSTALL_STRACE_CMD="apt-get update && apt-get install -y strace binutils";;
*) echo "PACKAGE_MANAGER variable not set" >&2; exit 2 ;;
esac
fi
# Prepare a Dockerfile for the strace-enabled Docker image,
cat <<EOF > $TMP/Dockerfile.strace-installing-layer
FROM $BASE_IMAGE_ID
RUN $INSTALL_STRACE_CMD
EOF
# Build the strace-enabled Docker image, and record the image ID.
$DOCKER build --iidfile="$TMP/strace-installing.image" -f "$TMP/Dockerfile.strace-installing-layer" "$TMP" >&2
STRACE_INSTALLING_IMAGE_ID=$(cat "$TMP/strace-installing.image")
# Extract command of the entry point of the "Docker-image-under-test" image.
EXEC=$($DOCKER inspect "$BASE_IMAGE_ID" | jq -r ".[0].Config.Entrypoint[0]")
# Determine actual ELF executable of the entry point command of the
# "Docker-image-under-test".
EXEC=$($DOCKER run --rm \
--entrypoint="/bin/sh" \
"$STRACE_INSTALLING_IMAGE_ID" \
-c 'which "'$EXEC'"'
)
# Determine the requested program interpreter from the ELF executable of the
# entry point command of the "Docker-image-under-test". In the context of ELF on
# Linux, the most often used "interpreter" is /lib64/ld-linux-x86-64.so.2 ,
# which is responsible for loading dynamically linked shared libraries. But
# exceptions exist, that's why we are not hard-coding the interpreter, but look
# it up in the ELF program headers.
# `readelf` got installed with the `binutils` Linux package, that's why we are
# running STRACE_INSTALLING_IMAGE here.
# See https://lwn.net/Articles/631631/ , section "Dynamically linked programs"
ELF_INTERPRETER=$($DOCKER run --rm \
--entrypoint="readelf" \
"$STRACE_INSTALLING_IMAGE_ID" \
--program-headers "$EXEC" \
| grep -o '\[Requesting program interpreter:\s\+[^]]\+\]' \
| sed -E 's/\[Requesting program interpreter:[[:space:]]*(.*)]/\1/'
)
# Create a wrapper script to attach strace to the entry point command of the
# "Docker-image-under-test".
# See http://man7.org/linux/man-pages/man1/strace.1.html for the meaning of the
# strace CLI options used here.
cat <<EOF > "$TMP/strace-wrapper.sh"
#!/bin/sh
OUTPUT=$TRACE_DIR_IN_CONTAINER/trace.\$(tr -dc A-Za-z0-9 < /dev/urandom | head -c5)
exec strace -qq -z -D -o "\$OUTPUT" -ff -f -e file "$ELF_INTERPRETER" "\$@"
EOF
chmod +x "$TMP/strace-wrapper.sh"
# Re-write the entry point of the "Docker-image-under-test" to include the
# strace wrapper script.
STRACE_PREPENDED_ENTRYPOINT=$($DOCKER inspect "$BASE_IMAGE_ID" \
| jq --compact-output "[[\"/bin/strace-wrapper.sh\"], [\"$EXEC\"], .[0].Config.Entrypoint[1:]] | flatten"
)
# Create another Dockerfile which actually *runs* strace.
cp $TMP/Dockerfile.strace-installing-layer $TMP/Dockerfile.strace-running-layer
cat <<EOF >> $TMP/Dockerfile.strace-running-layer
ADD strace-wrapper.sh /bin/strace-wrapper.sh
ENTRYPOINT $STRACE_PREPENDED_ENTRYPOINT
EOF
# Build a Docker image which *runs* strace.
$DOCKER build --iidfile="$TMP/strace-running.image" -f "$TMP/Dockerfile.strace-running-layer" "$TMP" >&2
STRACE_RUNNING_IMAGE_ID=$(cat "$TMP/strace-running.image")
# Create host directory to mount as Docker volume to extract the strace output
# out of the Docker container.
TRACE_OUTPUT=$TMP/traces
mkdir -p "$TRACE_OUTPUT"
if [ -z "$TRIGGER_CMD" ]; then
# Run strace-enabled Docker container without any auxilliary trigger
# commands.
set +o errexit
$DOCKER run --rm --cap-add=SYS_PTRACE \
--volume="$TRACE_OUTPUT:$TRACE_DIR_IN_CONTAINER" \
$DOCKER_RUN_OPTS \
"$STRACE_RUNNING_IMAGE_ID" $@ >&2
EXIT=$?
set -o errexit
else
# Wrap the auxilliary trigger command into a shell function which lets us
# record the exit status of the trigger command.
run_trigger() {
set +o errexit
$TRIGGER_CMD >&2
EXIT=$?
set -o errexit
}
if [ "$WITH_RUNNING_CONTAINER" = "1" ]; then
# Run strace-enabled Docker container together with an auxilliary
# trigger command.
$DOCKER run --rm --cap-add=SYS_PTRACE --detach \
--volume="$TRACE_OUTPUT:$TRACE_DIR_IN_CONTAINER" \
--cidfile="$TMP/strace-running.container" \
$DOCKER_RUN_OPTS \
"$STRACE_RUNNING_IMAGE_ID" $@ >&2
STRACE_RUNNING_CONTAINER_ID=$(cat "$TMP/strace-running.container")
# Delay the trigger command to give the container time to boot up.
sleep $DELAY_TRIGGER_SECONDS
# Run the auxilliary trigger command.
DOCKER_CONTAINER="$STRACE_RUNNING_CONTAINER_ID" \
run_trigger
# Stop the Docker container.
$DOCKER inspect "$STRACE_RUNNING_CONTAINER_ID" &>/dev/null \
&& $DOCKER stop "$STRACE_RUNNING_CONTAINER_ID" >/dev/null
else
# Just run the auxilliary trigger command, which in this case is
# responsible for starting the Docker container.
DOCKER_IMAGE="$STRACE_RUNNING_IMAGE_ID" \
TRACE_OUTPUT="$TRACE_OUTPUT" \
TRACE_DIR_IN_CONTAINER="$TRACE_DIR_IN_CONTAINER" \
run_trigger
fi
fi
# Convert the strace output into a .dockerinclude file.
cat "$TRACE_OUTPUT/trace".* | $SCRIPTS_DIR/strace2dockerinclude.py | $PLATFORM_INDEPENDENT_SORT > "$OUTPUT"
exit $EXIT
| true |
426f6ff996a7b94cdf4250b94314016d85450421 | Shell | PedroEGV/arkix-nodejs-test | /test-requests/test.sh | UTF-8 | 158 | 2.515625 | 3 | [] | no_license | #!/bin/bash
method='GET'
url="$(cat host.txt)"
body='{}'
urlPath=''
curl -X "$method" -H 'Content-Type:application/json' -d "$body" "$url/$urlPath"
echo ''
| true |
4979b1fd0aa1cdfe92464d88798e6376e7d8b1cf | Shell | toggle-corp/chrono | /scripts/deploy.sh | UTF-8 | 2,715 | 3.28125 | 3 | [] | no_license | #! /bin/bash
echo "************************************************************";
echo "RC Branch=${CHRONO_RC_BRANCH}, Branch=${TRAVIS_BRANCH}, Pull request=${TRAVIS_PULL_REQUEST}"
echo "************************************************************";
if ! [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then
echo 'Pull Request Build... exiting....'
exit
fi
if ! [ "${TRAVIS_BRANCH}" == "${CHRONO_RC_BRANCH}" ]; then
echo 'Non RC Branch... exiting....'
exit
fi
BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT_DIR=$(dirname "$BASE_DIR")
CLIENT_DIR=${ROOT_DIR}/client
echo "::::: Configuring AWS :::::"
aws configure set aws_access_key_id ${AWS_ACCESS_KEY_ID}
aws configure set aws_secret_access_key ${AWS_SECRET_ACCESS_KEY}
aws configure set default.region ${DEPLOYMENT_REGION}
aws configure set metadata_service_timeout 1200
aws configure set metadata_service_num_attempts 3
printf "\n\n::::::::: Uploading Image to Docker Hub [Server] :::::::::::\n"
set -xe;
docker-compose -f production.yml build
docker-compose -f production.yml push
set +xe;
printf "\n\n::::::::: Deploying React to S3 [Client] :::::::::::\n"
echo ":::::: >> Generating New Reacts Builds [Locally]"
set -e;
python -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)"
echo "
REACT_APP_API_HTTPS=${CHRONO_HTTPS}
REACT_APP_API_END=${CHRONO_API_END}
" > ${CLIENT_DIR}/.env
docker run -t -v ${CLIENT_DIR}/build:/code/build --env-file=${CLIENT_DIR}/.env \
devtc/chrono:client-latest \
bash -c 'yarn install && CI=false yarn build'
set +e;
rm ${CLIENT_DIR}/.env
echo ":::::: >> Removing Previous Builds Files [js, css] From S3 Bucket [$CHRONO_S3_BUCKET]"
aws s3 rm s3://$CHRONO_S3_BUCKET/static/js --recursive
aws s3 rm s3://$CHRONO_S3_BUCKET/static/css --recursive
echo ":::::: >> Uploading New Builds Files To S3 Bucket [$CHRONO_S3_BUCKET]"
aws s3 sync ${CLIENT_DIR}/build/ s3://$CHRONO_S3_BUCKET
echo ":::::: >> Settings Configs for Bucket [$CHRONO_S3_BUCKET]"
# disable index.html cache
aws s3 cp ${CLIENT_DIR}/build/index.html s3://$CHRONO_S3_BUCKET/index.html \
--metadata-directive REPLACE --cache-control max-age=0,no-cache,no-store,must-revalidate --content-type text/html --acl public-read
# disable service-worker.js cache
aws s3 cp ${CLIENT_DIR}/build/service-worker.js s3://$CHRONO_S3_BUCKET/service-worker.js \
--metadata-directive REPLACE --cache-control max-age=0,no-cache,no-store,must-revalidate --content-type application/javascript --acl public-read
# S3 website settings config
aws s3 website s3://$CHRONO_S3_BUCKET --index-document index.html --error-document index.html
| true |
60190414780970fabac21490bc8938cdd2960e42 | Shell | josa42/go-ls | /scripts/update-types.sh | UTF-8 | 226 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
declare -a files=(service.go service_test.go structures.go jsonrpc2.go)
url='https://raw.githubusercontent.com/sourcegraph/go-lsp/master'
for f in "${files[@]}"; do
curl -sS "$url/$f" > "lsp/$f"
done
| true |
51dca91a219ff227eccee723a28a4447ce03a552 | Shell | joshgav/vanpool-manager | /scripts/k8s/cluster.sh | UTF-8 | 3,868 | 3.296875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
scripts_dir=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
root_dir=$(dirname "${scripts_dir}")
source ${scripts_dir}/common.sh
source ${scripts_dir}/vars.sh
# this script deploys the following:
# group
# service principal
# AKS cluster
# ACR registry
# App Insights sink
## ensure group
group_id=$(ensure_group $group_name $group_location)
info "ensured group_id: $group_id"
# /end ensure group
## ensure service principal
app_id=$(az ad sp show --id "${cluster_identity}" \
--output tsv --query appId 2> /dev/null)
if [[ -z "$app_id" ]]; then
app_id=$(az ad sp create-for-rbac \
--name "${cluster_identity}" \
--skip-assignment true \
--scopes ${group_id} \
--output tsv --query appId)
fi
info "ensured app_id: ${app_id}"
# /end ensure service principal
## ensure AKS cluster
aks_id=$(az aks show \
--name ${cluster_name} \
--resource-group ${cluster_group_name} \
--output tsv --query id)
if [[ -z "$aks_id" ]]; then
info "cluster not found; starting cluster create"
info "first resetting sp credential to reuse"
app_password=$(az ad sp credential reset \
--name "${cluster_identity}" \
--output tsv --query password)
info 'calling `az aks create`'
az aks create \
--name ${cluster_name} \
--resource-group ${cluster_group_name} \
--location ${cluster_location} \
--dns-name-prefix ${cluster_prefix} \
--service-principal ${app_id} \
--client-secret ${app_password} \
--network-plugin kubenet \
--ssh-key-value ${ssh_pubkey_path} \
--no-wait
info "awaiting cluster creation"
az aks wait \
--name ${cluster_name} \
--resource-group ${cluster_group_name} \
--created
fi
info "setting AKS credentials in ~/.kube/config"
az aks get-credentials \
--name ${cluster_name} \
--resource-group ${cluster_group_name} \
--admin >> /dev/null
info "ensured aks_id: ${aks_id}"
# /end ensure AKS cluster
## ensure container registry
acr_id=$(az acr show \
--name ${registry_name} \
--resource-group ${registry_group} \
--output tsv --query id)
if [[ -z "$acr_id" ]]; then
info "creating container registry"
acr_id=$(az acr create \
--name ${registry_name} \
--resource-group ${registry_group} \
--location ${registry_location} \
--sku Standard \
--output tsv --query id)
# grant AKS principal read rights to ACR
role_assignment_id=$(az role assignment create \
--assignee ${app_id} \
--scope ${acr_id} \
--role Reader \
--output tsv --query id)
# add ACR credentials to ~/.docker
az acr login \
--name ${registry_name} \
--resource-group ${registry_group} >> /dev/null
fi
info "ensured acr_id: ${acr_id}"
# /end ensure container registry
## ensure app insights
appinsights_id=$(az resource show \
--name ${appinsights_resource_name} \
--resource-group ${appinsights_group} \
--resource-type "Microsoft.Insights/components" \
--query id --output tsv)
if [[ -z "$appinsights_id" ]]; then
appinsights_id=$(az resource create \
--resource-group ${appinsights_group} \
--resource-type "Microsoft.Insights/components" \
--name ${appinsights_resource_name} \
--location ${appinsights_location} \
--properties "{ \"Application_Type\":\"other\", \"Flow_Type\":\"Redfield\" }" \
--output tsv --query id)
fi
appinsights_ikey=$(az resource show \
--name ${appinsights_resource_name} \
--resource-group ${appinsights_group} \
--resource-type "Microsoft.Insights/components" \
--query properties.InstrumentationKey --output tsv)
info "ensured appinsights_id: ${appinsights_id} with ikey: ${appinsights_ikey}"
# /end ensure app insights
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.