blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e08e72a408ed15d1b0f2a00aad96c0d1770f85f2
|
Shell
|
Pandailo/ComptesOperationsBDED
|
/bin/newjc
|
UTF-8
| 1,717
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
# ---------------------------------------------------------------------------
# JOnAS: Java(TM) Open Application Server
# Copyright (C) 2005-2007 Bull S.A.S.
# Contact: jonas-team@objectweb.org
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Initial developer(s): Benoit Pelletier
# Contributor(s): Nicolas Duvauchel
# ---------------------------------------------------------------------------
# $Id: newjc 22065 2012-01-31 13:14:39Z joaninh $
# ---------------------------------------------------------------------------
echo $0
if [ ! -d $JONAS_ROOT/lib ]
then
echo "JONAS_ROOT must be set"
exit 1
fi
NEWJCDIR=$JONAS_ROOT/templates/newjc;
# Force endorsed directory for the ant process
# Ensure that the XML parser used is recent enough
ANT_OPTS="$ANT_OPTS -Djava.endorsed.dirs=$JONAS_ROOT/lib/endorsed"
export ANT_OPTS
if [ $# -eq 0 ]
then
echo 1>&2 Mode Step by Step
ant -file $NEWJCDIR/build-jc.xml -Dsilence.mode="false"
else
if [ $1 = "-auto" ]
then
echo 1>&2 Mode Auto
ant -file $NEWJCDIR/build-jc.xml -Dsilence.mode="true"
fi
fi
| true
|
4792584491e024acde57c8c239fdf22efc5a4b97
|
Shell
|
JosephBrendler/joetoo
|
/dev-util/joetoolkit/files/backup_utilities/oromisbackup
|
UTF-8
| 2,603
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# oromisbackup - backup everything on my router/firewall/vpnserver
# joe brendler - 19 May 2019
source /usr/local/sbin/script_header_brendlefly
BUILD=0.0
VERBOSE=$TRUE
verbosity=2
#---[ variables ]-----------------------------------------------------------
# sync all content in these directories (restrict each to single LV filesystem)
dirlist="/ home opt usr var srv tmp"
# Exclude file/dirs matching these patterns
excludestring='--exclude=/mnt/*'
excludestring+=' --exclude=/tmp/*'
excludestring+=' --exclude=/var/tmp/*'
excludestring+=' --exclude=/proc/*'
excludestring+=' --exclude=/sys/*'
excludestring+=' --exclude=/dev/*'
excludestring+=' --exclude=/root/.ccache/*'
excludestring+=' --exclude=/root/.distcc/*'
excludestring+=' --exclude=/home/*/.ccache/*'
excludestring+=' --exclude=/home/*/.distcc/*'
excludestring+=' --exclude=/usr/src/linux*'
excludestring+=' --exclude=/usr/src/initramfs*'
excludestring+=' --exclude=/usr/portage/*'
#---[ functions ]-----------------------------------------------------------
useage() {
E_message "Useage: oromisbackup [TBD]" && exit
}
#---[ main script ]---------------------------------------------------------
checkroot
separator "oromisbackup-${BUILD}"
message_n "mounting LVs on /mnt/oromisbackupROOT..."
mount /dev/mapper/vg_oromisbackup-ROOT /mnt/oromisbackupROOT && \
mount /dev/mapper/vg_oromisbackup-USR /mnt/oromisbackupROOT/usr/ && \
mount /dev/mapper/vg_oromisbackup-home /mnt/oromisbackupROOT/home && \
mount /dev/mapper/vg_oromisbackup-var /mnt/oromisbackupROOT/var && \
mount /dev/mapper/vg_oromisbackup-tmp /mnt/oromisbackupROOT/tmp && \
mount /dev/mapper/vg_oromisbackup-opt /mnt/oromisbackupROOT/opt && \
mount /dev/mapper/vg_oromisbackup-srv /mnt/oromisbackupROOT/srv
right_status $?; echo
message "mounted LVs on /mnt/oromisbackupROOT as shown below:"
mount | grep oromisbackupROOT
echo; message "Running backup sync..."
for dir in ${dirlist}
do
d_message "dirlist: [${dirlist}]" 1
# sync only what needs to be updated; stay on one filesystem; delete what was deleted; exclude junk
[ "${dir}" == "/" ] && target="" || target="${dir}/" ## deal with '/' characters
cmd="rsync -uavx /${target}* /mnt/oromisbackupROOT/${target} "
cmd+="${excludestring}"
cmd+=" --delete --delete-excluded"
message "about to sync target [${target}] with the command:\n${BRon}${cmd}${Boff}"
sh_countdown 3
eval ${cmd}
done
echo; message_n "unmounting ..."
umount /mnt/oromisbackupROOT{/opt,/tmp,/var,/home/,/srv,/usr,}
right_status $?; echo
echo; message "un-mounted oromisbackup"
mount | grep oromisbackupROOT
| true
|
12d47c5062fdcdba544f7e33a005b5f64cec8c3e
|
Shell
|
langenhagen/scripts
|
/_archive/iterate-over-git-revs.sh
|
UTF-8
| 902
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Iterates over commit range,, checks out every revision in reverse order and performs actions on
# each revision. Sums up the exit codes of each action and returns this sum at the end.
#
# author: andreasl
oldrev="$1"
newrev="$2"
all_revs="$(git rev-list $oldrev..$newrev)"
# For experimental reasons
echo
echo "All revs:"
echo "$all_revs"
echo
prior_rev="$(git rev-parse HEAD)"
script_dir="$(dirname "${BASH_SOURCE[0]}")"
exit_code=0
for rev in $all_revs; do
git checkout "$rev" 1>/dev/null 2>&1
# For experimental reasons
echo
echo "current rev:"
echo "$rev"
echo
bash "${script_dir}/lint-python-files-in-git-HEAD.sh"
exit_code=$((exit_code + $?))
# TODO do more actions here ....
# For experimental reasons
echo
echo "exit_code:"
echo "$exit_code"
echo
done
git checkout "$prior_rev" 1>/dev/null 2>&1
exit "$exit_code"
| true
|
ac6ada6f01d863a6a49bfbbc57e5f44ac1061886
|
Shell
|
capr/lua-attic
|
/move-to-submodules/c/libpng/build
|
UTF-8
| 572
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
cd "${0%build}" || exit 1
build() {
${X}gcc -c -O2 $C src/*.c -I. -I../zlib/src
${X}gcc *.o -shared -o ../../bin/$P/$D -L../../bin/$P -lz $L
rm -f ../../bin/$P/$A
${X}ar rcs ../../bin/$P/$A *.o
rm *.o
}
if [ "$OSTYPE" = "msys" ]; then
P=windows L="-s -static-libgcc" D=png.dll A=png.a build
elif [ "${OSTYPE#darwin}" != "$OSTYPE" ]; then
P=osx64 C="-arch x86_64" L="-arch x86_64 -install_name @rpath/libpng.dylib" \
D=libpng.dylib A=libpng.a build
else
P=linux C="-fPIC -U_FORTIFY_SOURCE" L="-s -static-libgcc" \
D=libpng.so A=libpng.a build
fi
| true
|
0e7cea29ffe756888b612bd9c70baf88a6cd02b9
|
Shell
|
ossimlabs/ossim-jpip-server
|
/docker/run.sh
|
UTF-8
| 823
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# Required params
if [ -z ${JPIP_DATA_DIR} ]; then
echo "JPIP_DATA_DIR environment must be defined"
exit 1
fi
# Defaulted params
if [ -z ${SOURCES} ] ; then
export SOURCES=25
fi
if [ -z ${CLIENTS} ] ; then
export CLIENTS=25
fi
if [ -z ${PORT} ] ; then
if [ -z {OSSIM_JPIP_SERVER_SERVICE_PORT} ] ; then
export PORT=8080
else
export PORT=${OSSIM_JPIP_SERVER_SERVICE_PORT}
fi
fi
if [ -z ${MAX_RATE} ] ; then
export MAX_RATE=40000000
fi
if [ -z ${ADDRESS} ] ; then
export ADDRESS=0.0.0.0
fi
if [ -z ${CONNECTION_THREADS} ] ; then
export CONNECTION_THREADS=100
fi
pushd ${JPIP_DATA_DIR} >/dev/null
ossim-jpip-server -sources ${SOURCES} -clients ${CLIENTS} -port ${PORT} -max_rate ${MAX_RATE} -address ${ADDRESS} -connection_threads ${CONNECTION_THREADS}
popd > /dev/null
| true
|
7d7ae1aede97434d05a872b6e44ee01615513d24
|
Shell
|
healthwhale/master-class
|
/github.sh
|
UTF-8
| 448
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
for ((i=0; i<=10; i++)) do
echo "https://api.github.com/repos/postgres/postgres/commits?page=$i"
echo "
\set record \`curl \"https://api.github.com/repos/postgres/postgres/commits?page=$i\"\`
with _commits as (
select d->>'sha' as id, d - 'sha' as doc
from jsonb_array_elements( ( :'record')::jsonb ) d
)
insert into commits (id, doc)
select *
from _commits
" | psql
done
| true
|
acc169f353997cb4c573942e291cc349b5c4aec3
|
Shell
|
brodieaustin/TWLight
|
/.travis/translate.sh
|
UTF-8
| 1,041
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Count the number of files searched by makemessages
# that were changed in the last commit.
# https://docs.djangoproject.com/en/1.11/ref/django-admin/
message_files_changed=$(git diff --name-only HEAD~1..HEAD -- '*.html' '*.txt' '*.py' --porcelain | wc -l)
# Count the number of translation files changed.
translation_files_changed=$(git diff --name-only HEAD~1..HEAD -- '*.po' --porcelain | wc -l)
# If any relevant files changed but no translation files changed,
# update translations.
if [ "${message_files_changed}" -gt 0 ] && [ "${translation_files_changed}" -eq 0 ]
then
echo "makemessages"
langs=($(python manage.py diffsettings | grep '^LANGUAGES =' | grep -o "(u'[^']*'" | grep -o "'[^']*'" | xargs))
for locale in "${langs[@]}"; do
python manage.py makemessages --locale=${locale} || exit 1
done
python manage.py makemessages --locale=qqq || exit 1
echo "compilemessages"
python manage.py compilemessages || exit 1
else
echo "No translatable source files changed."
fi
| true
|
b927107bf173a7650bde04affcced634d705b17e
|
Shell
|
edward-mb/env
|
/.local/bin/mode
|
UTF-8
| 2,629
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/zsh
MODE_DEFAULT=('default.jpg' 'true' 'Solarized Matched' 'dark' 'df202020' 'ff999999')
MODE_DEFAULT_LIGHT=('default.jpg' 'false' 'Solarized Light Matched' 'light' 'dff9f9f9' 'ff666666')
MODE_DARK=('dark.png' 'true' 'Solarized Matched' 'dark' '00202020' 'ff999999')
MODE_LIGHT=('light.png' 'false' 'Solarized Light Matched' 'light' '00dfdfdf' 'ff666666')
evaluate() {
input="$1"
shift
replacements=($@)
for i in {1..$#replacements}; do
match="{$i}"
replacement="${replacements[i]}"
input="$(echo "$input" | sed "s/$match/$replacement/g")"
done
echo $input
}
change_system_theme() {
mode=($@)
rm -f ~/Pictures/wallpapers/current/*
cp ~/.wallpaper/"${mode[1]}" ~/Pictures/wallpapers/current/"${mode[3]}${mode[1]}"
template="
tell application \"System Events\"
tell desktops
set change interval to -1
set change interval to 1
end tell
tell appearance preferences
set dark mode to {2}
end tell
end tell
"
osascript -e "$(evaluate "$template" $mode)"
}
change_iterm2_theme() {
mode=($@)
template='
import iterm2
async def main(conn):
app = await iterm2.async_get_app(conn)
profiles = await iterm2.PartialProfile.async_query(conn)
new_profile = None
for profile in profiles:
if profile.name == "{3}":
new_profile = await profile.async_get_full_profile()
await profile.async_make_default()
windows = app.terminal_windows
for window in windows:
tabs = window.tabs
for tab in tabs:
sessions = tab.sessions
for session in sessions:
await session.async_set_profile(new_profile)
iterm2.run_until_complete(main)
'
script="$HOME/Library/Application Support/iTerm2/Scripts/change-theme.py"
evaluate "$template" $mode > $script
python3 $script
}
change_kitty_theme() {
mode=($@)
rm ~/.config/kitty/colorscheme.conf
ln -s colorscheme.${mode[4]}.conf ~/.config/kitty/colorscheme.conf
kitty @ set-colors -a ~/.config/kitty/colorscheme.conf
}
change_yabai_theme() {
mode=($@)
cat ~/.config/yabai/yabairc.template | sed "s/<background>/${mode[5]}/g" | sed "s/<foreground>/${mode[6]}/g" > ~/.config/yabai/yabairc
yabai -m config status_bar_background_color 0x${mode[5]}
yabai -m config status_bar_foreground_color 0x${mode[6]}
}
mode_name="MODE_$1:u"
mode=(${(P)mode_name})
if [[ -z $mode ]]; then
echo "No mode specified"
exit 1
fi
if [[ "$(uname -s)" == "Darwin" ]]; then
change_system_theme $mode
change_yabai_theme $mode
fi
if [[ "$TERM_PROGRAM" == "iTerm.app" ]]; then
change_iterm2_theme $mode
fi
if [[ "$TERM" == "xterm-kitty" ]]; then
change_kitty_theme $mode
fi
| true
|
02f5c13a8d7570c6e12f3dc189aebd36f3c037e8
|
Shell
|
jneidel/dotfiles
|
/scripts/inbox
|
UTF-8
| 348
| 3.9375
| 4
|
[] |
no_license
|
#! /bin/sh
INBOX=$HOME/org/0_inbox
if [ "$1" = "--help" ] || [ "$1" = "-h" ] || [ "$1" = "help" ] || [ -z "$1" ]; then
cat <<EOF
$ inbox FILE
Move a file into the inbox dir
Parameters:
\$1: file to be moved
Example:
$ inbox ./note.md
EOF
exit
fi
FILE="$1"
if [ -e "$1" ]; then
mv "$FILE" "$INBOX/"
echo "Moved '$FILE' to inbox"
fi
| true
|
1f907fde6fae060ec914ae29bd04bee79e7cc733
|
Shell
|
freebsd/freebsd-ports
|
/textproc/xincluder/files/xincluder.sh.in
|
UTF-8
| 362
| 3.03125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
IAM=`basename "$0"`
case "${IAM}" in
xincluder-sax) MAIN_CLASS=SAXXIncluder ;;
xincluder-dom) MAIN_CLASS=DOMXIncluder ;;
xincluder-jdom) MAIN_CLASS=JDOMXIncluder ;;
xincluder) MAIN_CLASS=SAXXIncluder ;;
*) echo "ERROR!" ; false ;;
esac
"%%LOCALBASE%%/bin/java" -cp "`"%%LOCALBASE%%/bin/classpath"`" "com.elharo.xml.xinclude.${MAIN_CLASS}" "$@"
| true
|
7ea1f879a08e87fd33f83d840f4119bcbf98519c
|
Shell
|
liruizhong/learnpy
|
/creat.sh
|
UTF-8
| 352
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z $1 ];then
echo "used: $0 file"
else
DATE=`date +%F`
printf "#!/usr/bin/env python
# -*- coding: utf-8 -*-
\"\"\"
===========================================
FileName: $1
Desc: $2
Author: ruizhong.li
Version:
CreateTime: ${DATE}
==========================================
\"\"\"
" > $1
fi
| true
|
49dbdf493a63e1447760f6cc6494457107ebbe0d
|
Shell
|
victoongo/GWAS_AM
|
/FHS/agcon/snplst6/test.sh
|
UTF-8
| 318
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
let njobs=$(bjobs -u all -w | grep -i "log/agconrul6_sas" | wc -l )
if [ $njobs -gt 0 ]; then
for jobid in $(bjobs -u all -w | grep -i "matlab" | cut -d " " -f 1 )
do
echo bkill $jobid
done
fi
exit
echo njobs= $njobs
if [ "$njobs" == "" ]; then
echo njobs is missing
fi
| true
|
0771ce806236f2534432c2739cf413885bed557d
|
Shell
|
CiscoSystems/quantum-l3-test
|
/create_vm
|
UTF-8
| 5,005
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# assumes that openstack credentails are set in this file
source ~/openrc
source .network >& /dev/null
function sleep_count () {
# pass in a time (usually in seconds) and a message ( in "")
echo -n $2
count=1
while [ $count -le $1 ]
do
echo -n '. '
sleep 1
(( count ++ ))
done
}
# select the first avaiable compute node from the list of available compute nodes
#COMPUTE_HOST=`nova-manage host list | grep com | head -1 | awk -F' ' '{print $1}'`
COMPUTE_HOST=`nova host-list | grep compute | head -1 | awk -F' ' '{print $2}'`
if [ -z ${COMPUTE_HOST} ] ; then
echo "There don't seem to be any 'compute' hosts available, perhaps run puppet agent on them again?"
exit 1
fi
# initialize a network in quantum for use by VMs and assign it a non-routed subnet
NET_ID=`quantum net-list | grep private | awk -F' ' '{ print $2 }'`
if [ -z ${NET_ID} ] ; then
# create public/pvt networks and a quantum router using net_setup script
./net_setup
NET_ID=`quantum net-list | grep private | awk -F' ' '{ print $2 }'`
fi
IMAGE_ID=`glance index | grep 'test' | head -1 | awk -F' ' '{print $1}'`
if [ -z ${IMAGE_ID} ]; then
[ -z ${IMAGE_LOC} ] && IMAGE_LOC='http://cloud-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64-disk1.img'
# IMAGE_LOC='http://128.107.252.163/precise.img'
read -p "Where would you like to download a QCOW format test image from [${IMAGE_LOC}]: " IMAGE_LOC_IN
[ -z ${IMAGE_LOC_IN} ] && IMAGE_LOC_IN="${IMAGE_LOC}"
if [ grep IMAGE_LOC .network ] ; then
sed -e "s/IMAGE_LOC.*/IMAGE_LOC=${IMAGE_LOC_IN}/" .network
else
echo "export IMAGE_LOC=${IMAGE_LOC_IN}" >> .network
fi
wget -O /tmp/test.img ${IMAGE_LOC_IN}
# import that image into glance
glance image-create --name="test" --is-public=true --container-format=ovf --disk-format=qcow2 < /tmp/test.img
# Caputre the Image ID so taht we can call the right UUID for this image
IMAGE_ID=`glance index | grep 'test' | head -1 | awk -F' ' '{print $1}'`
fi
# create a pub/priv keypair in the root directory. If not running as root, you will need to change this path. If keys in the .ssh path exist, you will be prompted to overwrite. Select "y" to overwrite or you will get a "permission denied error".
if [ ! -e ~/.ssh/id_rsa.pub ] ; then
ssh-keygen -f ~/.ssh/id_rsa -t rsa -N ''
fi
#add the public key to nova.
nova keypair-add --pub_key ~/.ssh/id_rsa.pub key_test
# create a security group so that we can allow ssh, http, and ping traffic
# when we add a floating IP (assuming you are adding floating IPs)
SEC_GROUP=`nova secgroup-list | grep sec_test`
if [ -z "${SEC_GROUP}" ] ; then
nova secgroup-create sec_test 'Test security group'
nova secgroup-add-rule sec_test tcp 22 22 0.0.0.0/0
nova secgroup-add-rule sec_test tcp 80 80 0.0.0.0/0
nova secgroup-add-rule sec_test icmp -1 -1 0.0.0.0/0
fi
instance_name='test_vm'
# Boot the added image against the "1" flavor which by default maps to a micro instance. Include the precise_test group so our address will work when we add it later
NOVA_EXIST=`nova list | grep ${instance_name}`
if ! [ "${NOVA_EXIST}" ] ; then
BUILD_STAT=`nova boot --flavor 1 --security_groups sec_test --nic net-id=${NET_ID} --image ${IMAGE_ID} --key_name key_test $instance_name | grep status | awk -F' ' '{print $4}'`
else
exit 1
fi
echo "Waiting for VM to boot"
while [ "${BUILD_STAT}" != "ACTIVE" ]; do
BUILD_STAT=`nova show test_vm | grep status | awk -F' ' '{print $4}'`
sleep_count 5 " . "
if [ "${BUILD_STAT}" == 'ACTIVE' ]; then
break
elif [ "${BUILD_STAT}" == 'ERROR' ]; then
echo "The vm failed to build."
exit 1
fi
done
# Show the state of the system we just requested.
PRIV_IP=`nova show $instance_name | grep 'private network' | awk -F' ' '{print $5}'`
echo -e "\nPRIVATE IP: ${PRIV_IP}"
PUB_NET_ID=`quantum net-list | grep ' public ' | awk -F' ' '{print $2}'`
PRIV_ROUTER=`quantum router-list | grep private_router_1 | awk -F' ' '{print $2}'`
echo "PUB_NET: ${PUB_NET_ID}, ROUTER: ${PRIV_ROUTER}"
# Now, for a floating IP
VM_PORT_ID=`quantum port-list | grep "${PRIV_IP}" | awk -F' ' '{print $2}'`
FLOAT_ID=`quantum floatingip-create ${PUB_NET_ID} | grep ' id ' | awk -F' ' '{print $4}'`
FLOAT_IP=`quantum floatingip-list | grep ${FLOAT_ID} | awk -F' ' '{print $5}'`
#if [ ${FLOAT_IP} == '|' ] ; then
# sleep_count 20 "Waiting for the IP to be associated and quantum to catch up..."
# FLOAT_IP=`quantum floatingip-list | grep ${FLOAT_ID} | awk -F' ' '{print $6}'`
#fi
echo "Floating IP: ${FLOAT_IP}"
quantum floatingip-associate ${FLOAT_ID} ${VM_PORT_ID}
# Let's see if we can hit our node
ip netns exec qrouter-${PRIV_ROUTER} ip addr list
echo -e "\n\npinging inside host: ip netns exec qrouter-${PRIV_ROUTER} ping -c 1 ${FLOAT_IP}\n\n"
RETRY=0
while [ "${RETRY}" -le "5" ] ; do
(( RETRY ++ ))
sleep_count 5 "Waiting for the IP to be associated and quantum to catch up..."
if ip netns exec qrouter-${PRIV_ROUTER} ping -c 1 ${FLOAT_IP} ;then
echo -e "\n\nSuccess!\n\n"
exit 0
fi
done
| true
|
3ca773ca9a0b2e4c8a1ab0872e29de5b1b39e898
|
Shell
|
RainbowHackerHorse/Stuffnthings
|
/installers/gnomeboxesbsd.sh
|
UTF-8
| 2,193
| 3.03125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
BOXES_VERSION=3.26
BOXES_PATCH=2
DOWNLOADDIR=/tmp/box-download
BOXESWORKDIR="${DOWNLOADDIR}"/boxessrc
BOXESBUILDDIR="${BOXESWORKDIR}"/gnome-boxes-"${BOXES_VERSION}"."${BOXES_PATCH}"
BOXES_GITSOURCE_URL=https://github.com/GNOME/gnome-boxes/archive/v${BOXES_VERSION}.tar.gz
BOXES_SOURCE_URL=https://download.gnome.org/sources/gnome-boxes/${BOXES_VERSION}/gnome-boxes-${BOXES_VERSION}.${BOXES_PATCH}.tar.xz
# Build Dependencies
pkg install -y ca_root_nss libvirt-glib qemu libosinfo libgudev gtk3 clutter-gtk3
# Runtime Dependencies
pkg install -y shared-mime-info mtools
# Undocumented dependencies that are still needed by ./configure
pkg install -y gobject-introspection pkgconf itstool gtk-vnc libsecret spice-gtk webkit2-gtk3 glib tracker vala gmake gettext
# Currently needs glib-2.52.0, FreeBSD only has glib-2.50.2. See if replace workaround works and the setting is just dumb.
if [ ! -d "${DOWNLOADDIR}" ]; then
mkdir "${DOWNLOADDIR}"
fi
if [ ! -d "${BOXESWORKDIR}" ]; then
mkdir "${BOXESWORKDIR}"
fi
cd "${DOWNLOADDIR}" || exit 1
# Choose FromGit() or FromSource()
FromSource() {
fetch "${BOXES_SOURCE_URL}"
tar xvfJ gnome-boxes-"${BOXES_VERSION}"."${BOXES_PATCH}".tar.xz -C "${BOXESWORKDIR}"
}
FromGit() {
pkg install -y autoconf
fetch "${BOXES_GITSOURCE_URL}"
tar xvfz v"${BOXES_VERSION}"."${BOXES_PATCH}".tar.gz -C "${BOXESWORKDIR}"
}
FromSource
#FromGit
cd "${BOXESBUILDDIR}" || exit 1
# Magical SED work to replace
sed -i.bak 's#GLIB\_MIN\_VERSION\=2\.52\.0#GLIB\_MIN\_VERSION\=2\.50\.2#g' "${BOXESBUILDDIR}"/configure
./configure
cd "${BOXESBUILDDIR}"/data || exit 1
cp org.gnome.Boxes.desktop.in org.gnome.Boxes.desktop
cd "${BOXESBUILDDIR}" || exit 1
# Replace Linuxism in /src/util-app.c
#sed -i.bak 's~#include \<linux/if.h\>~\#include \<freebsd/if\.h\>~g' "${BOXESBUILDDIR}"/src/util-app.c
# Not the right way to do it. Edit util-app.vala
sed -i.bak 's/using Linux/using Freebsd/g' "${BOXESBUILDDIR}"/src/util-app.vala
# Set GCC
CC=gcc6
CXX=g++6
CPP=cpp6
GCJ=gcj6
export CC CXX CPP GCJ
gmake
# Dies at: VALAC gnome_boxes_vala.stamp with:
# /bin/sh: --enable-experimental: not found
# gmake[3]: *** [Makefile:2298: gnome_boxes_vala.stamp] Error 127
| true
|
d2941b9f7aba797bf12f692ce18cd8756afebc59
|
Shell
|
danrjohnson/vault-kubernetes-sandbox
|
/vault-init.sh
|
UTF-8
| 495
| 3.171875
| 3
|
[] |
no_license
|
#! /bin/bash
set -exo pipefail
OUTPUT=$(vault operator init -key-shares=1 -key-threshold=1)
echo "$OUTPUT"
echo "saving unseal key to unseal-key.txt"
echo "$OUTPUT" | grep "Unseal Key" > unseal-key.txt
echo "saving initial root token to root-token.txt"
echo "$OUTPUT" | grep " Initial Root Token" > root-token.txt
echo "use the following unseal key to unseal the vault"
cat unseal-key.txt
vault operator unseal
echo "user the following root token to login"
cat root-token.txt
vault login
| true
|
2d3858c1becd6c871c64814e0155374c2e373b2b
|
Shell
|
mpicek/aprox
|
/run_tutorial.sh
|
UTF-8
| 2,823
| 3.171875
| 3
|
[] |
no_license
|
clearing=1
# "description" "input" "args"
run_example(){
if [ "$clearing" -eq 1 ]
then
clear
fi
echo ""
echo $1
echo ""
echo "Input: $2"
echo "Command: ./aprox $3"
echo "$2" | ./aprox $3
read _
}
run_example "Print help for more info" " " "-h"
run_example "Simple prefix notation" "2.5 2 * 8 / 4 *" "-p"
run_example "Simple prefix notation with normal distribution and use `-r` to have a nice and dense graph" "10 20 ~ 8 4 / *" "-p -r 10"
run_example "Simple prefix notation with normal and uniform distributions. WARNING: \
DISTRIBUTIONS DON'T ALWAYS LOOK PERFECT DUE TO ROUNDING ERROR." "10 30 ~ 20 50 u +" "-p -r 20"
run_example "Simple infix notation" "(5 + 3 * 2 - 1) / 2" ""
run_example "No problem.." "-(-(-(-(-1))))" ""
run_example "Works great!" "-(-(-(-1)))" ""
run_example "Weird inputs!" "10 + (-3)*(1/2)" ""
run_example "Handles errors.." "5 + 3.3.3" ""
run_example "Handles errors.." "5 + / (1 - 1)" ""
run_example "Handles errors.." "5 +* 4" ""
run_example "Handles errors.." "10 ~ 5" ""
run_example "Handles errors.." "10 u 5" ""
run_example "Distribution signs have the highest priority (but parentheses have even higher)" "5 ~ 10 * 2 u 30" ""
run_example "Can't divide by zero in distributions.." "5 ~ 15 / 0 ~ 2" ""
run_example "Can't divide by zero in distributions.." "100 / (-2) ~ 0" ""
run_example "Can't divide by zero in distributions.." "5 ~ 15 / (-2) ~ 2" ""
run_example "Handles errors.." "10 ~ 5" ""
run_example "This is how it should have been written" "(-5) ~ 10" "-r 15"
run_example "Sum with distributions" "0 ~ 10 + (-10) u 10 + (-10) ~ 20" ""
run_example "Multiplication with distributions" "0 ~ 10 * (-10) u 0 * (-10) ~ 20" ""
run_example "Division with distributions - we need to set a lower bin_size for precision" "50 u 100 / 5 ~ 10" "-b 0.1"
run_example "Division with distributions - we need to set a lower bin_size for precision" "500 / 10 ~ 50" "-b 0.1"
run_example "Division with distributions - we need to set a lower bin_size for precision" "10 ~ 50 / 100" "-b 0.001"
run_example "Division with distributions - we need to set a lower bin_size for precision" "50 u 100 / 10 ~ 50 + 2 ~ 10" "-b 0.1"
run_example "Save into output file.." "10 ~ 30" "-o out.txt -r 20"
echo "Now printing file out.txt using `cat out.txt`:"
cat out.txt
rm out.txt
read key
echo "5 + 5 ~ 40" > in.txt
run_example "Reading from input file in.txt. The rounding error is clearly visible." "10 ~ 50" "-i in.txt -r 30"
rm in.txt
run_example "Reallife problem: How much will I earn this summer? Part-time jobs with no qualifications\
pay 110 ~ 150Kc. I will work 8 hours/day, 5 days a week. I want to work 3 weeks. How much \
will I earn? Also problems with rounding error. But a person still can see what is happening :)" "110 ~ 150 * 8 * 5 * 3" ""
| true
|
f0cf9afd781b8afd30fd3b7397cc4fd2f35db156
|
Shell
|
BLelgemann/WPM_T9-2
|
/a2_shell-script.sh
|
UTF-8
| 3,803
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# -----------------------------------------------------------------------
# AUFGABE:
# aus der Datei "2020-05-23-Article_list_dirty.tsv" eine neue
# Datei "2020-05-23-Dates_and_ISSNs.tsv" erstellen.
# Die neue Datei soll:
# 1. nur die Spalten der ISSN und Veröffentlichungsjahre enthalten
# 2. keine redundanten Zeilen haben
# BEFEHLE:
# cut -> arbeitet einzelne Spalten heraus
# grep -> greift Zeilen aus Textdateien heraus
# sed -> ermöglicht, Textdateien direkt zu berbeiten
# sort -> sortiert Ergebnisse
# uniq -> löscht doppelte Zeilen
# -------------------------------------------------------------------------
# SKRIPT:
# Nachdem ich die Originaldatei in meinem git-Repositorium abgespeichert habe,
# schaue ich sie in der Bash mithilfe des folgenden Befehls an:
# less 2020-05-23-Article_list_dirty.tsv
# Dabei stelle ich fest, dass die Spalten 5 (ISSN) und 12 (Date) die für mich
# relevanten sind.
# Außerdem stelle ich fest, dass die Informationen in ein paar Zeilen
# um 2 Spalten verrutscht sind. Dies muss ich zunächst korrigieren,
# damit ich beim Ausschneiden der Spalten keine relevanten Informationen
# verliere. Die Korrektur nehme ich mit dem befehl "sed" vor,
# mit dem ich die Datei direkt bearbeiten kann:
cat 2020-05-23-Article_list_dirty.tsv | sed 's/IMPORTANT!\{0,1\}\t\t//' > cleanstep1.tsv
# "s" bestimmt, was ausgetauscht werden soll:
# Das Wort "IMPORTANT" oder "IMPORTANT!", das in der Zeile steht,
# wird durch "nichts" - also "//" - ersetzt. "{0,1}" bestimmt,
# dass das Ausrufezeichen entweder vorhanden sein kann oder nicht.
# Da die Spalten in der tsv-Datei durch Tab-Zeichen - also "\t" - erstellt
# werden, können diese Zeichen ebenfalls durch "nichts" ersetzt werden.
# Die Ausgabe schaue ich mir im Pager an:
# less clean_step1.tsv
# Dabei stelle ich fest, dass es Zeilen gibt, in denen am Anfang
# das Zeichen "#" oder "MAYBE" steht, die aber ansonsten leer sind.
# Mit dem Befehl "grep -v" entferne ich jeweils diese Zeilen und
# speichere die gefilterten Daten in der neuen Datei "clean_step2.tsv" ab:
cat cleanstep1.tsv | grep -v '^#' | grep -v '^MAYBE' > cleanstep2.tsv
# "grep" greift die gewünschten Zeilen heraus und "-v" negiert diese,
# so dass die Daten ohne diese Zeilen neu abgespeichert werden.
# Das ^-Zeichen gibt an, dass diese Zeichen am Anfang der Zeile stehen.
# Durch das Pipe-Zeichen werden die Befehle miteinander verbunden
# und nacheinander ausgeführt.
# Die neue Datei schaue ich mir wieder im Pager an:
# less clean_step2.tsv
# Anschließend sollen die für mich relevanten Spalten herausgefiltert und
# die Ergebnisse wieder in einer neuen Datei abgelegt werden.
cat cleanstep2.tsv | cut -f 5,12 > cleanstep3.tsv
# Die neue Datei schaue ich mir wieder im Pager an:
# less clean_step3.tsv
# Dabei stelle ich fest, dass in einigen Zeilen das Wort "ISSN"
# vor der ISSN-Nummer steht, in unterschiedlicher Schreibweise.
# Dies bereinige ich wieder mit dem Befehl "sed":
cat cleanstep3.tsv | sed 's/issn:*//i' | sed 's/^[ \t]*//' > cleanstep4.tsv
# das *-Zeichen gibt an, dass die Position überall in der Zeile sein kann.
# "i" bestimmt, dass Groß- und Kleinschreibung nicht beachtet wird.
# Mit dem Befehl "sed 's/^[ \t]*//'" werden im Anschluss die übrig
# gebliebenen Leerzeichen entfernt.
# Die neue Datei schaue ich mir wieder im Pager an:
# less clean_step4.tsv
# Nun müssen noch die Zeilen sortiert ("sort") werden, damit die
# Leerzeilen verschwinden, und doppelte Zeilen entfernt werden ("uniq"):
cat cleanstep4.tsv | sort | uniq > cleanstep5.tsv
# Zum Schluss werden die Spaltennamen entfernt und die
# Datei in ihrer Enddatei gespeichert:
cat cleanstep5.tsv | grep -v '^Date' > 2020-05-23-Dates_and_ISSNs.tsv
| true
|
afa753c61ed54c34c3a5e9469d8fd24c2ce89e6b
|
Shell
|
threeworld/Security-baseline
|
/Linux/主机安全基线脚本/CIS-LBK/DEBIAN_FAMILY_LBK/functions/recommendations/nix_ensure_rsyslog_default_file_permissions_configured.sh
|
UTF-8
| 2,425
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
#
# CIS-LBK Recommendation Function
# ~/CIS-LBK/functions/recommendations/nix_ensure_rsyslog_default_file_permissions_configured.sh
#
# Name Date Description
# ------------------------------------------------------------------------------------------------
# Eric Pinnell 09/22/20 Recommendation "Ensure rsyslog default file permissions configured"
#
ensure_rsyslog_default_file_permissions_configured()
{
echo "- $(date +%d-%b-%Y' '%T) - Starting $RNA" | tee -a "$LOG" 2>> "$ELOG"
test=""
# check if default file permissions are set correctly
if grep -Eqs '^\s*\$[Ff]ile[Cc]reate[Mm]ode\s+0[6420][04]0\b' /etc/rsyslog.conf /etc/rsyslog.d/*.conf && ! grep -s '^\s*\$[Ff]ile[Cc]reate[Mm]ode' /etc/rsyslog.conf /etc/rsyslog.d/*.conf | grep -Evq '0[0246][04]0'; then
test=passed
else
if grep -s '^\s*\$[Ff]ile[Cc]reate[Mm]ode' /etc/rsyslog.conf /etc/rsyslog.d/*.conf | grep -Evq '0[0246][04]0'; then
grep '^\s*\$[Ff]ile[Cc]reate[Mm]ode' /etc/rsyslog.conf | grep -Evq '0[0246][04]0' && sed -ri 's/(^\s*)(\$[Ff]ile[Cc]reate[Mm]ode)(\s+)([0-9][0-9][0-9][0-9])(\s*)(\s*.*)?$/\1\2 0640 \5\6/' /etc/rsyslog.conf
if [ -n "$(find /etc/rsyslog.d/ -name '*.conf' -type f)" ]; then
for file in /etc/rsyslog.d/*.conf; do
grep '^\s*\$[Ff]ile[Cc]reate[Mm]ode' "$file" | grep -Evq '0[0246][04]0' && sed -ri 's/(^\s*)(\$[Ff]ile[Cc]reate[Mm]ode)(\s+)([0-9][0-9][0-9][0-9])(\s*)(\s*.*)?$/\1\2 0640 \5\6/' "$file"
done
fi
else
! grep -Eqs '^\s*\$[Ff]ile[Cc]reate[Mm]ode\s+0[6420][04]0\b' /etc/rsyslog.conf && echo "\$FileCreateMode 0640" >> /etc/rsyslog.conf
fi
if grep -Eqs '^\s*\$[Ff]ile[Cc]reate[Mm]ode\s+0[6420][04]0\b' /etc/rsyslog.conf /etc/rsyslog.d/*.conf && ! grep -s '^\s*\$[Ff]ile[Cc]reate[Mm]ode' /etc/rsyslog.conf /etc/rsyslog.d/*.conf | grep -Evq '0[0246][04]0'; then
test=remediated
fi
fi
echo "- $(date +%d-%b-%Y' '%T) - Completed $RNA" | tee -a "$LOG" 2>> "$ELOG"
# Set return code and return
if [ "$test" = passed ]; then
echo "Recommendation \"$RNA\" No remediation required" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_PASS:-101}"
elif [ "$test" = remediated ]; then
echo "Recommendation \"$RNA\" successfully remediated" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_PASS:-103}"
else
echo "Recommendation \"$RNA\" remediation failed" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_FAIL:-102}"
fi
}
| true
|
f312e07312a9bb1cea674cf231c042e0d844edf2
|
Shell
|
ilventu/aur-mirror
|
/haproxy-devel/PKGBUILD
|
UTF-8
| 1,069
| 2.578125
| 3
|
[] |
no_license
|
# Maintainer: Viacheslav Chimishuk <voice@root.ua>
_pkgname=haproxy
pkgname=$_pkgname-devel
pkgver=1.5
pkgverdev=dev9
pkgrel=1
pkgdesc="The Reliable, High Performance TCP/HTTP Load Balancer (development branch)"
arch=('i686' 'x86_64')
url="http://haproxy.1wt.eu"
license=('GPL')
depends=('pcre')
makedepends=('gcc>=4.2.0' 'kernel-headers')
provides=("$_pkgname=$pkgver")
conflicts=("$_pkgname")
backup=()
install=$_pkgname.install
source=("http://haproxy.1wt.eu/download/1.5/src/devel/$_pkgname-$pkgver-$pkgverdev.tar.gz")
md5sums=('00e97d2ec7aa921c08cb9505c9202fc3')
build() {
cd "$srcdir/$_pkgname-$pkgver-$pkgverdev"
make TARGET=linux26 USE_PCRE=1
make PREFIX=$pkgdir/usr install
install -D -m755 $startdir/haproxy.init $pkgdir/etc/rc.d/haproxy
install -D -m644 examples/haproxy.cfg $pkgdir/etc/haproxy/haproxy.cfg
install -d $pkgdir/usr/share/haproxy
install -D -m644 doc/haproxy*.txt $pkgdir/usr/share/haproxy/
}
package() {
cd "$srcdir/$_pkgname-$pkgver-$pkgverdev"
make DESTDIR="$pkgdir" install
}
| true
|
f72a582d6406e2e09cfd6c86e6e34c86e1a98006
|
Shell
|
leandroaur/ubuntuhomeserver
|
/install_bareos.sh
|
UTF-8
| 567
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
# See http://download.bareos.org/bareos/release/
# for applicable releases and distributions
#DIST=Debian_10
# or
# DIST=Debian_9.0
DIST=xUbuntu_18.04
# DIST=xUbuntu_16.04
RELEASE=release/19.2
# or
# RELEASE=release/latest
# RELEASE=experimental/nightly
URL=http://download.bareos.org/bareos/$RELEASE/$DIST
# add the Bareos repository
printf "deb $URL /\n" > /etc/apt/sources.list.d/bareos.list
# add package key
wget -q $URL/Release.key -O- | apt-key add -
# install Bareos packages
apt-get update
apt-get install bareos bareos-database-postgresql
| true
|
624fc79a26f9de25f1dea85d6b0e9d71eb42b15a
|
Shell
|
raviol/imshaby-api
|
/build/build_imshaby_api.sh
|
UTF-8
| 391
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
ENV_TYPE=$1
if [ -z "${ENV_TYPE}" ]
then
ENV_TYPE="local"
fi
PROJECTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." >/dev/null 2>&1 && pwd )"
mvn clean package -f ${PROJECTS_DIR}/imshaby-api/pom.xml
docker rmi local/imshaby-api:v1.0 imshaby-api:latest &> /dev/null
docker build -t imshaby-api ${PROJECTS_DIR}/imshaby-api
docker tag imshaby-api local/imshaby-api:v1.0
| true
|
590b1e5fa10c0ca6ffeedfd9502765441c7559a6
|
Shell
|
heartshare/backuppc-tools
|
/snapshot.sh
|
UTF-8
| 875
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
pre_snapshot () {
backupsv="$2"
mountpoint=$(echo $1 | sed "s:$backupsv/\?::")
if [ "$mountpoint" == "$1" ] ; then
echo "Target isn't a $backupsv folder. Do nothing."
exit 0
fi
fs=$(df -T $mountpoint | tail -n +2 | awk {'print $2'})
if [ $fs != "btrfs" ] ; then
echo "creating symlink"
ln -vs $mountpoint $1
exit 0
fi
btrfs subvolume snapshot -r $mountpoint $1
if [ $? -ne 0 ] ; then
exit $?
fi
}
post_snapshot () {
backupsv="$2"
mountpoint=$(echo $1 | sed "s:$backupsv/\?::")
if [ "$mountpoint" == "$1" ] ; then
echo "Target isn't a $backupsv folder. Do nothing."
exit 0
fi
fs=$(df -T $mountpoint | tail -n +2 | awk {'print $2'})
if [ $fs != "btrfs" ] ; then
echo "deleting softlink"
rm -v $1
exit 0
fi
btrfs subvolume delete $1
if [ $? -ne 0 ] ; then
return $?
fi
}
| true
|
af190ae672abd42801681a42d43d302f13ec5e81
|
Shell
|
vtamara/archconf
|
/zshenv
|
UTF-8
| 739
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
local _path_ant="$PATH"
# configuracion local
[[ -f ~/.zshenv.local ]] && source ~/.zshenv.local
if [[ $PATH != $_path_ant ]]; then
# `colors` no se ha inicialiado, definir algunos manualmente
typeset -AHg fg fg_bold
if [ -t 2 ]; then
fg[red]=$'\e[31m'
fg_bold[white]=$'\e[1;37m'
reset_color=$'\e[m'
else
fg[red]=""
fg_bold[white]=""
reset_color=""
fi
cat <<MENS >&2
${fg[red]}Advertencia:${reset_color} su archivo de configuración \`~/.zshenv.local' parece editar entradas de PATH.
Por favor mueva esa configuración a \`.zshrc.local' con algo como:
${fg_bold[white]}cat ~/.zshenv.local >> ~/.zshrc.local && rm ~/.zshenv.local${reset_color}
(called from ${(%):-%N:%i})
MENS
fi
unset _path_ant
| true
|
c94157bbbe950889f9e08a55b339c8e3934cd9ad
|
Shell
|
conversationai/conversationai-models
|
/experiments/tf_trainer/tf_word_label_embedding/run.ml_engine.sh
|
UTF-8
| 1,398
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script runs one training job on Cloud MLE.
# Note:
# We currently use 2 different embeddings:
# - glove.6B/glove.6B.300d.txt
# - google-news/GoogleNews-vectors-negative300.txt
# Glove assumes all words are lowercased, while Google-news handles different casing.
# As there is currently no tf operation that perform lowercasing, we have the following
# requirements:
# - For google news: Run preprocess_in_tf=True (no lowercasing).
# - For glove.6B, Run preprocess_in_tf=False (will force lowercasing).
source "tf_trainer/common/dataset_config.sh"
DATETIME=$(date '+%Y%m%d_%H%M%S')
MODEL_NAME="tf_word_label_embedding"
MODEL_NAME_DATA="${MODEL_NAME}_$1"
JOB_DIR="${MODEL_PARENT_DIR}/${USER}/${MODEL_NAME_DATA}/${DATETIME}"
gcloud ml-engine jobs submit training tf_trainer_${MODEL_NAME}_${USER}_${DATETIME} \
--job-dir=${JOB_DIR} \
--runtime-version=1.10 \
--scale-tier 'BASIC_GPU' \
--module-name="tf_trainer.${MODEL_NAME}.run" \
--package-path=tf_trainer \
--python-version "3.5" \
--region=us-east1 \
--verbosity=debug \
-- \
--train_path="${GCS_RESOURCES}/toxicity_q42017_train.tfrecord" \
--validate_path="${GCS_RESOURCES}/toxicity_q42017_validate.tfrecord" \
--embeddings_path="${GCS_RESOURCES}/glove.6B/glove.6B.300d.txt" \
--embedding_size=300 \
--preprocess_in_tf=False \
--model_dir="${JOB_DIR}/model_dir"
| true
|
5bcef8570037641a707241daa0016010ef09da34
|
Shell
|
StackStorm-Exchange/ci
|
/.circle/dependencies
|
UTF-8
| 3,481
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
CURRENT_DIR="$(dirname "$0")"
export CI_DIR=/home/circleci/ci
export PYTHONPATH=/tmp/st2/st2common:${PYTHONPATH}
ST2_BRANCH=${ST2_BRANCH:-master}
LINT_CONFIGS_BRANCH=${LINT_CONFIGS_BRANCH:-master}
git config --global user.name "StackStorm Exchange"
git config --global user.email "info@stackstorm.com"
# Print out environment info
python --version
pip --version
# Clone st2 repo so other scripts can reference StackStorm Python code
[[ -d /tmp/st2 ]] || git clone --depth 1 --single-branch --branch "${ST2_BRANCH}" https://github.com/StackStorm/st2.git /tmp/st2
# Clone lint-configs
[[ -d ~/ci/lint-configs ]] || git clone --depth 1 --single-branch --branch "${LINT_CONFIGS_BRANCH}" https://github.com/StackStorm/lint-configs.git ~/ci/lint-configs
# Create a directory for debian packages so we can cache it in Circle CI
sudo rm -rf /var/cache/apt/archives && sudo ln -s ~/.apt-cache /var/cache/apt/archives && mkdir -p ~/.apt-cache/partial
# Note: We can't directly install imagemagick since it results an error
# We use work-around from https://discuss.circleci.com/t/error-installing-imagemagick/2963
sudo apt-get update
sudo apt-get -y install python-dev jq gmic optipng
# Installing dependencies for st2 pip build
sudo apt-get -y install libldap2-dev libsasl2-dev
# make `gh` available for github API calls
~/ci/.circle/install_gh
# Hit github's API to keep the PAT active (without failing if it's not)
(GH_TOKEN=${MACHINE_PASSWORD} gh repo view | head -n2) || true
# This should track the pinned version of pip in st2's Makefile
# Please sync this version with .circle/Makefile and .circleci/config.yml
PIP_VERSION="20.3.3"
sudo pip install -U "pip==${PIP_VERSION}" setuptools virtualenv
virtualenv --pip "${PIP_VERSION}" ~/virtualenv
source ~/virtualenv/bin/activate
# Install StackStorm requirements
echo "Installing StackStorm requirements from /tmp/st2/requirements.txt"
~/virtualenv/bin/pip install -r "/tmp/st2/requirements.txt"
# Copy over Makefile and install StackStorm runners and register metrics drivers
echo "Installing StackStorm runners and registering metrics drivers"
if [[ -n "${ROOT_DIR}" ]]; then
PACK_REQUIREMENTS_FILE="${ROOT_DIR}/requirements.txt"
PACK_TESTS_REQUIREMENTS_FILE="${ROOT_DIR}/requirements-tests.txt"
PACK_SETUP_TESTING_ENV="${ROOT_DIR}/tests/setup_testing_env.sh"
echo "Copying Makefile to ${ROOT_DIR}"
cp ~/ci/.circle/Makefile ${ROOT_DIR}
make -C requirements-ci .install-runners
else
PACK_REQUIREMENTS_FILE="$(pwd)/requirements.txt"
PACK_TESTS_REQUIREMENTS_FILE="$(pwd)/requirements-tests.txt"
PACK_SETUP_TESTING_ENV="$(pwd)/tests/setup_testing_env.sh"
echo "Copying Makefile to $(pwd)"
cp ~/ci/.circle/Makefile .
make requirements-ci .install-runners
fi
# Install pack requirements
if [[ -f "${PACK_REQUIREMENTS_FILE}" ]]; then
echo "Installing pack requirements from ${PACK_REQUIREMENTS_FILE}"
~/virtualenv/bin/pip install -r "${PACK_REQUIREMENTS_FILE}"
fi
# Install pack tests requirements
if [[ -f "${PACK_TESTS_REQUIREMENTS_FILE}" ]]; then
echo "Installing pack tests requirements from ${PACK_TESTS_REQUIREMENTS_FILE}"
~/virtualenv/bin/pip install -r "${PACK_TESTS_REQUIREMENTS_FILE}"
fi
# Install custom pack testing enviornment
if [[ -x "${PACK_SETUP_TESTING_ENV}" ]]; then
echo "Setting up custom pack testing environment with ${PACK_SETUP_TESTING_ENV}"
"${PACK_SETUP_TESTING_ENV}"
fi
echo "Installed dependencies:"
pip list
| true
|
9c6e15c860dc24b219576eda5d0947508c64ea44
|
Shell
|
open-estuary/distro-repo
|
/rpm/obsolete/gcc/rpm_build.sh
|
UTF-8
| 1,944
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Please use devtoolset-4-gcc"
exit 0
CUR_DIR=$(cd `dirname $0`; pwd)
VERSION="5.4.1"
#The DATE is also specifed in gcc.spec
DATE="20160603"
TAR_FILENAME="gcc-""${VERSION}""-${DATE}.tar.bz2"
#SVNREV="247088"
SVNREV="247733"
nvptx_tools_gitrev="c28050f60193b3b95a18866a96f03334e874e78f"
nvptx_newlib_gitrev="aadc8eb0ec43b7cd0dd2dfb484bae63c8b05ef24"
# Prepare GCC Source
if [ ! -f ${CUR_DIR}/src/${TAR_FILENAME} ] ; then
if [ -z "$(which svn 2>/dev/null)" ] ; then
sudo yum install -y svn
fi
#sudo wget -O ${CUR_DIR}/src/${TAR_FILENAME} http://ftp.tsukuba.wide.ad.jp/software/gcc/releases/gcc-${VERSION}/gcc-${VERSION}.tar.bz2
svn export svn://gcc.gnu.org/svn/gcc/branches/gcc-5-branch@${SVNREV} gcc-${VERSION}-${DATE}
tar cf - gcc-${VERSION}-${DATE} | bzip2 -9 > ${CUR_DIR}/src/gcc-${VERSION}-${DATE}.tar.bz2
rm -fr gcc-${VERSION}-${DATE}
fi
# Prepare isl lib
#${CUR_DIR}/isl_build.sh
# Prepare others sources files
if [ ! -f ${CUR_DIR}/src/nvptx-tools-${nvptx_tools_gitrev}.tar.bz2 ] ; then
git clone https://github.com/MentorEmbedded/nvptx-tools.git
cd nvptx-tools
git archive origin/master --prefix=nvptx-tools-${nvptx_tools_gitrev}/ | bzip2 -9 > ../nvptx-tools-${nvptx_tools_gitrev}.tar.bz2
cd ..; rm -rf nvptx-tools
mv nvptx-tools-${nvptx_tools_gitrev}.tar.bz2 ${CUR_DIR}/src/
fi
if [ ! -f ${CUR_DIR}/src/nvptx-newlib-${nvptx_newlib_gitrev}.tar.bz2 ] ; then
git clone https://github.com/MentorEmbedded/nvptx-newlib.git
cd nvptx-newlib
git archive origin/master --prefix=nvptx-newlib-${nvptx_newlib_gitrev}/ | bzip2 -9 > ../nvptx-newlib-${nvptx_newlib_gitrev}.tar.bz2
cd ..; rm -rf nvptx-newlib
mv nvptx-newlib-${nvptx_newlib_gitrev}.tar.bz2 ${CUR_DIR}/src/nvptx-newlib-${nvptx_newlib_gitrev}.tar.bz2
fi
sed -i "s/Version\:\ .*/Version\:\ \ \ ${VERSION}/g" ${CUR_DIR}/src/gcc.spec
${CUR_DIR}/../../utils/rpm_build.sh ${CUR_DIR}/src gcc.spec
| true
|
56a50a9815a28fc52fd77ae16d6d08276165a296
|
Shell
|
tprelog/iocage-homeassistant
|
/overlay/usr/local/etc/rc.d/appdaemon
|
UTF-8
| 6,014
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# PROVIDE: appdaemon
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# VERSION: 20220713
#
# appdaemon_enable: Set to YES to enable the appdaemon service.
# Default: NO
# Enable: sysrc appdaemon_enable="YES"
# Disable: sysrc -x appdaemon_enable
#
# appdaemon_user: The user account used to run the appdaemon daemon.
# This is optional, however do not specifically set this to an
# empty string as this will cause the daemon to run as root.
# Default: "appdaemon"
# change: sysrc appdaemon_user="homeassistant"
#
# appdaemon_group: The group account used to run the appdaemon daemon.
# Default: The primary group of the ${appdaemon_user}
# change: sysrc appdaemon_group="homeassistant"
#
# appdaemon_user_dir: Path to directory, where ".cache/pip" will be located. This may also be the
# location for the user's files and ${appdaemon_config_dir}.
# Default: The HOME directory for the ${appdaemon_user}
# Alternate: If HOME is not set or does not exist -- ${appdaemon_venv}
#
# appdaemon_config_dir: Path to directory, where the appdaemon configuration is located.
# Default: ${appdaemon_user_dir}/.appdaemon"
# Alternate: If HOME is not set or does not exist -- "/usr/local/etc/appdaemon"
#
# appdaemon_venv: Path to directory, where the Appdaemon virtualenv is located or will be created.
# Default: "/usr/local/share/appdaemon"
#
# appdaemon_python: Set the version of Python, used when creating the virtualenv for Appdaemon.
# After changing the Python version, you must recreate the virtualenv for this change to take effect.
# Default: NOT SET
#
name=appdaemon
rcvar=${name}_enable
. /etc/rc.subr && load_rc_config ${name}
: "${appdaemon_enable:="NO"}"
: "${appdaemon_rc_debug:="OFF"}"
: "${appdaemon_user:="appdaemon"}"
: "${appdaemon_venv:="/usr/local/share/appdaemon"}"
if [ ! "$(id ${appdaemon_user} 2>/dev/null)" ]; then
err 1 "user not found: ${appdaemon_user}"
else
HOME="$(getent passwd "${appdaemon_user}" | cut -d: -f6)"
: "${appdaemon_group:="$(id -gn ${appdaemon_user})"}"
fi
if [ -z "${HOME}" ] || [ ! -d "${HOME}" ] || [ "${HOME}" == "/nonexistent" ] || [ "${HOME}" == "/var/empty" ]; then
: "${appdaemon_config_dir:="/usr/local/etc/${name}"}"
: "${appdaemon_user_dir:="${appdaemon_venv}"}"
export HOME="${appdaemon_user_dir}"
else
: "${appdaemon_user_dir:="${HOME}"}"
: "${appdaemon_config_dir:="${appdaemon_user_dir}/${name}"}"
fi
export PATH=${PATH}:/usr/local/sbin:/usr/local/bin
export TERM=xterm
umask "${appdaemon_rc_umask:-022}"
pidfile_child="/var/run/${name}.pid"
pidfile="/var/run/${name}_daemon.pid"
logfile="/var/log/${name}_daemon.log"
command="/usr/sbin/daemon"
extra_commands="upgrade test"
appdaemon_precmd() {
local _srv_ _own_ _msg_
local _venv_="${appdaemon_venv}"
local _user_="${appdaemon_user}"
if [ ! -d "${_venv_}" ]; then
_msg_="${_venv_} not found"
elif [ ! -f "${_venv_}/bin/activate" ]; then
_msg_="${_venv_}/bin/activate is not found"
elif [ ! -x "${_srv_:="${_venv_}/bin/appdaemon"}" ]; then
_msg_="${_srv_} is not found or is not executable"
elif [ "${_own_:="$(stat -f '%Su' ${_srv_})"}" != ${_user_} ]; then
warn "${_srv_} is currently owned by ${_own_}"
_msg_="${_srv_} is not owned by ${_user_}"
else
AD_CMD="${_srv_}"
cd "${_venv_}" || err 1 "cd ${_venv_}"
return 0
fi
err 1 "${_msg_}"
}
start_precmd=${name}_prestart
appdaemon_prestart() {
appdaemon_precmd \
&& install -g "${appdaemon_group}" -m 664 -o ${appdaemon_user} -- /dev/null "${logfile}" \
&& install -g "${appdaemon_group}" -m 664 -o ${appdaemon_user} -- /dev/null "${pidfile}" \
&& install -g "${appdaemon_group}" -m 664 -o ${appdaemon_user} -- /dev/null "${pidfile_child}" \
|| return 1
AD_ARGS=""
AD_ARGS="--config ${appdaemon_config_dir}"
if [ -n "${appdaemon_debug_level:-}" ]; then
AD_ARGS="${AD_ARGS} --debug ${appdaemon_debug_level}"
fi
rc_flags="-f -o ${logfile} -P ${pidfile} -p ${pidfile_child} ${AD_CMD} ${AD_ARGS}"
}
start_postcmd=${name}_poststart
appdaemon_poststart() {
sleep 1
run_rc_command status
}
status_cmd=${name}_status
appdaemon_status() {
local _http_ _ip_ _port_
# shellcheck disable=SC2154
if [ -n "${rc_pid}" ]; then
: "${appdaemon_secure:="NO"}"
_ip_="$(ifconfig | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p')"
_port_="${appdaemon_port:-"5050"}"
checkyesno appdaemon_secure && _http_="https" || _http_="http"
echo "${name} is running as pid ${rc_pid}."
echo "${_http_}://${_ip_}:${_port_}"
else
echo "${name} is not running."
return 1
fi
}
stop_postcmd=${name}_postcmd
appdaemon_postcmd() {
rm -f -- "${pidfile}"
rm -f -- "${pidfile_child}"
}
restart_cmd="${name}_restart"
appdaemon_restart() {
run_rc_command stop 2>/dev/null
sleep 1 ; run_rc_command start
}
stop_precmd=${name}_prestop
appdaemon_prestop() {
local _owner_
if [ "${_owner_:-"$(stat -f '%Su' ${pidfile_child})"}" != ${appdaemon_user} ]; then
err 1 "${appdaemon_user} can not stop a process owned by ${_owner_}"
fi
}
upgrade_cmd="${name}_upgrade"
appdaemon_upgrade() {
appdaemon_precmd
run_rc_command stop 2>/dev/null; _rcstop_=$?
# shellcheck disable=SC2016
su ${appdaemon_user} -c '
source ${1}/bin/activate || exit 1
pip install --no-cache-dir --upgrade appdaemon
deactivate
' _ ${appdaemon_venv} || exit 1
[ ${_rcstop_} == 0 ] && run_rc_command start
}
test_cmd="${name}_test"
appdaemon_test() {
echo -e "\nTesting virtualenv...\n"
appdaemon_precmd
## Switch users / activate virtualenv / run a command
# shellcheck disable=SC2016
su "${appdaemon_user}" -c '
echo -e " $(pwd)\n"
source ${1}/bin/activate
echo " $(python --version)"
echo " AppDaemon $(pip show appdaemon | grep Version | cut -d" " -f2)"
deactivate
' _ ${appdaemon_venv}
echo
}
checkyesno appdaemon_rc_debug && rc_debug="ON"
run_rc_command "${1}"
| true
|
ac8879644d4852695e7082a7e7c6c2807c37074e
|
Shell
|
DmytroSytnyk/sweet
|
/mule_local/platforms/99_fallback/local_software_default.sh
|
UTF-8
| 1,092
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
MULE_SYSTEM_PACKAGES="libxft-dev libssl-dev texinfo"
if [[ "$DISPLAY" = ":0" ]]; then
export MULE_SYSTEM_PACKAGES="$MULE_SYSTEM_PACKAGES libgl1-mesa-dev libxext-dev"
fi
for i in $MULE_SYSTEM_PACKAGES; do
dpkg -s "$i" >/dev/null 2>&1
if [[ "$?" != "0" ]]; then
echo_error "Debian-based system detected and packages missing, please use"
echo_error " sudo apt-get install $MULE_SYSTEM_PACKAGES"
return 2>/dev/null
exit 1
fi
done
#PKGS+=("install_autoconf.sh")
#PKGS+=("install_make.sh")
#PKGS+=("install_gcc8.2.sh")
PKGS+=("install_cmake.sh")
#PKGS+=("install_automake.sh")
PKGS+=("install_fftw3.sh")
PKGS+=("install_eigen3.sh")
#PKGS+=("install_gcc5.3.sh")
#PKGS+=("install_gcc7.2.sh")
PKGS+=("install_lapack.sh")
PKGS+=("install_likwid.sh")
PKGS+=("install_numactl.sh")
#PKGS+=("install_pfasst++.sh")
PKGS+=("install_python3.sh")
PKGS+=("install_scons3.sh")
PKGS+=("install_shtns.sh")
PKGS+=("install_shtns_python.sh")
PKGS+=("install_openmpi.sh")
PKGS+=("install_libpfasst.sh")
PKGS+=("install_libfreetype.sh")
PKGS+=("install_libpng.sh")
PKGS+=("install_sdl2.sh")
| true
|
96d732933adeebe586197ad768f4d0e9eaffd223
|
Shell
|
VE3RD/Nextion
|
/getmaster.sh
|
UTF-8
| 886
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
############################################################
# Get Master Server #
# #
# Returns a string representing the Master Server #
# #
# VE3RD 2019-11-14 #
############################################################
set -o errexit
set -o pipefail
Addr=$(sed -nr "/^\[DMR Network\]/ { :l /^Address[ ]*=/ { s/.*=[ ]*//; p; q;}; n; b l;}" /etc/mmdvmhost)
f1=$(ls /var/log/pi-star/DMRGateway* | tail -n1)
if [ $Addr = "127.0.0.1" ]; then
echo "DMRGateway"
else
GW="OFF"
ms=$(sudo sed -n '/^[^#]*'"$Addr"'/p' /usr/local/etc/DMR_Hosts.txt | sed -E "s/[[:space:]]+/|/g" | cut -d'|' -f1)
echo "MS: $ms"
fi
| true
|
1daf4797dd23eaaaae76f8d4d8266af6491d1fe2
|
Shell
|
SebNania/domotique-scripts
|
/SmartPhoneCheck.sh
|
UTF-8
| 4,847
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# Based on the excellent community work here : https://www.domoticz.com/forum/viewtopic.php?t=6264
# Adapted to my configuration
# Last Update 20160903
# Used to detect if smartphones are in by l2ping on bluetooth and if no response search for the smartphone in arp table
## Installation ##
# Add this script in the /etc/rc.local like that :
# /path/to/your/script/script.sh BTMAC WiFiMAC SwitchID &
# ATTENTION the "&" is very important !!!
if [ -z "$3" ] ; then
echo "Usage: SmartphoneCheck.sh BTMAC WiFiMAC SwitchID"
echo "Example: SmartphoneCheck.sh AA:BB:CC:16:09:51 aa:bb:cc:b4:a5:21 65"
exit
fi
# Main settings
DomoticzIP="127.0.0.1:8080" # Your Domoticz IP address and port if different from 80
# Script variables
LongSleep=200 # Time to sleep between probes if device is within range.
ShortSleep=20 # Time to sleep between probes if device is out of range. Don't make this too long, because the arp-scan and l2ping take almost 10 seconds.
DownStateRecheckTime=5 # Time to sleep between rechecks if device falls out of range.
DownStateRecheckCount=4 # Number of rechecks to perform before device is confirmed out of range (time = DownStateRecheckTime * DownStateRecheckCount)
MACAddressBT=$1 # Set the MAC Address of the Bluetooth interface (script parameter 1)
MACAddressWIFI=$2 # Set the MAC Address of the WiFi interface (script parameter 2)
DeviceID=$3 # Set the Switch ID (script parameter 3)
# Remove Capitals from Wifi mac.
MACAddressWIFI=`echo $MACAddressWIFI | tr '[:upper:]' '[:lower:]'`
#Startup delay
sleep 60 # Wait for the Raspberry to finish booting completely.
#Main loop
while [ 1 ]
do
#Ping test
sudo l2ping -c1 $MACAddressBT > /dev/null 2>&1
Result=$? # Store the return code in BTResult (will be 0 if mac is found).
if [ $Result -eq 0 ] ; then
echo "`date -u` - Performing check: Bluetooth check success.."
else
sudo arp-scan -l -i 1 | grep $MACAddressWIFI > /dev/null 2>&1
Result=$? # Store the return code in WIFIResult (will be 0 if the mac is found).
if [ $Result -eq 0 ] ; then
echo "`date -u` - Performing check: WiFi check success.."
else
echo "`date -u` - Performing check: Both WiFi and Bluetooth unavailable for now.."
fi
fi
if [ $Result -eq 0 ] ; then
#Device in range
LoopSleep=$LongSleep
curl -s "http://$DomoticzIP/json.htm?type=devices&rid=$DeviceID" | grep Status | grep Off > /dev/null
if [ $? -eq 1 ] ; then
# Do nothing device presence did not change
echo "`date -u` - Status Unchanged: Already on, do nothing and wait $LoopSleep seconds.."
else
#Device is status changed to: in range / detected
echo -e "`date -u` - Status Changed: Turning device ID $DeviceID on and wait $LoopSleep seconds.."
`curl -s "http://$DomoticzIP/json.htm?type=command¶m=switchlight&idx=$DeviceID&switchcmd=On" > /dev/null`
fi
else
#Device out of range
LoopSleep=$ShortSleep
curl -s "http://$DomoticzIP/json.htm?type=devices&rid=$DeviceID" | grep Status | grep Off > /dev/null
if [ $? -eq 0 ] ; then
# Do nothing device presence did not change
echo "`date -u` - Status Unchanged: Already off, do nothing and wait $LoopSleep seconds.."
else
x=0
while [ $x -le $DownStateRecheckCount ]
do
x=$(( $x + 1 ))
if [ $x -eq $DownStateRecheckCount ] ; then
# Device status changed to : not in range / not detected
echo -e "`date -u` - Status Changed: Turning device ID $DeviceID off"
`curl -s "http://$DomoticzIP/json.htm?type=command¶m=switchlight&idx=$DeviceID&switchcmd=Off" > /dev/null`
break
fi
echo "`date -u` - Performing recheck: Device possibly down, rechecking and wait $DownStateRecheckTime seconds.."
#Ping test
sudo arp-scan -l -i 1 | grep $MACAddressWIFI > /dev/null 2>&1
Result=$? # Store the return code in WIFIResult (will be 0 if the mac is found).
if [ $Result -eq 0 ] ; then
echo "`date -u` - Performing recheck: WiFi check success.."
else
echo "`date -u` - Performing recheck: Both WiFi and Bluetooth remain unavailable.."
fi
if [ $Result -eq 0 ] ; then
echo "`date -u` - Status Unchanged: Device seemed to be down but is not (recheck). Do nothing and wait $LoopSleep seconds.."
break
fi
sleep $DownStateRecheckTime
done
fi
fi
# Wait before running loop again
sleep $LoopSleep
done
| true
|
c8845ed6185e5cba31f393cddf56a9eebcd27cf3
|
Shell
|
mmalewski/open-cas-linux
|
/configure.d/2_write_flush.conf
|
UTF-8
| 1,049
| 2.609375
| 3
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
#
# Copyright(c) 2012-2019 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
. `dirname $0`/conf_framework
if compile_module "WRITE_FLUSH" "linux/fs.h"
then
add_define "CAS_RQ_IS_FLUSH(rq) \\
((rq)->cmd_flags & CAS_REQ_FLUSH)"
add_define "CAS_WRITE_FLUSH \\
WRITE_FLUSH"
if compile_module "BIO_FLUSH" "linux/bio.h"
then
add_define "CAS_IS_WRITE_FLUSH(flags) \\
((flags) & BIO_FLUSH)"
else
add_define "CAS_IS_WRITE_FLUSH(flags) \\
((flags) & CAS_REQ_FLUSH)"
fi
elif compile_module "REQ_PREFLUSH" "linux/blk_types.h"
then
add_define "CAS_RQ_IS_FLUSH(rq) \\
((rq)->cmd_flags & REQ_PREFLUSH)"
add_define "CAS_WRITE_FLUSH \\
(REQ_OP_WRITE | REQ_PREFLUSH)"
add_define "CAS_IS_WRITE_FLUSH(flags) \\
(CAS_WRITE_FLUSH == ((flags) & CAS_WRITE_FLUSH))"
else
add_define "CAS_RQ_IS_FLUSH(rq) \\
0"
add_define "CAS_IS_WRITE_FLUSH(flags) \\
(WRITE_BARRIER == ((flags) & WRITE_BARRIER))"
add_define "CAS_WRITE_FLUSH \\
WRITE_BARRIER"
fi
| true
|
8083143a9f2f0072eebe6f76d263437d42d639dc
|
Shell
|
ntk148v/openinfra-summit-demo
|
/2020/config_supervisor.sh
|
UTF-8
| 592
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# configure supervisor to run a private gunicorn web server, and
# to autostart it on boot and when it crashes
# stdout and stderr logs from the server will go to /var/log/demo
mkdir /var/log/demo
cat >/etc/supervisor/conf.d/demo.conf <<EOF
[program:demo]
command=/home/demo/venv/bin/gunicorn -b 127.0.0.1:8000 -w 4 --chdir /home/demo --log-file - app:app
user=demo
autostart=true
autorestart=true
stderr_logfile=/var/log/demo/stderr.log
stdout_logfile=/var/log/demo/stdout.log
EOF
supervisorctl reread
supervisorctl update
systemctl restart supervisor
systemctl enable supervisor
| true
|
0ab83fae854be929bfa24248d8bb1807ca7c4767
|
Shell
|
BenjaminPlimmer/Diiage2017
|
/AULAGNE/process.sh
|
UTF-8
| 2,666
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/sh
#AIM : Récupération de la liste des processus sous forme d'arborescence, en
# ajoutant le lien de parenté Parent/Enfant et en spécifiant le l'ID de
# processus et son nom.
#AUTHORS : AULAGNE Jérôme
#PARAMS :
#MODIF :
#27/12/2016 - Modification de la boucle for initiale - récupération des variables dans une tempo.
#29/12/2016 - Ajout de la fonction d'affichage du résultat de la boucle sous forme d'arbre.
#24/01/2017 - Ajout de la possibilité de kill un processus actif et ses enfants.
#AIM : Boucle qui recherche les processus présents dans "/proc" , et
# stocke les PID,PPID & leur noms dans un fichier temporaire "infoProcess" sous "/tmp".
#PARAMS :
Process=$(find /proc -maxdepth 1 -type d -regex ".*[1-9]"| awk -F "/" '{print $3}')
for i in $Process
do
#stocke dans une variable le nom du processus
pname=$(grep "Name:" /proc/$i/status 2>/dev/null | awk -F " " '{ print $2 }')
#stocke dans une variable le PPID du processus
ppid=$(grep "PPid:" /proc/$i/status 2>/dev/null | awk -F " " '{ print $2 }')
#stocke dans une variable le PID du processus
pid=$(grep "Pid:" /proc/$i/status 2>/dev/null | awk -F " " '{ print $2 }')
#stocke dans un fichier temporaire les variables précédentes.
echo $i';'$ppid';'$pname >> /tmp/infosprocess
done
#Positionnement au niveau 0 pour l'arbre ( processus père absolu ).
currentlevel=0
root="|"
echo $root
#AIM : Fonction qui crée une arborescence des processus sous forme d'arbre.
#PARAMS :
function tree
{
# Boucle qui va récupérer les infos stockés dans notre fichier temporaire en ommetant les ";"
while IFS=";" read pid ppid name
do
# Test si l'id de processus parent est égale à l'id du processus en cours.
if [ "$ppid" == "$1" ]
then
#Boucle qui va parcourir les numéros de processus enfants vis à vis du processus parent précédent.
for (( i=0; i<$CurrentLevel; i++ ))
do
# affiche le caractère "| " à chaque niveau actuel en cas de processus enfant lié.
echo -e -n "| "
done
#affiche le caractère "\-------" ainsi que le pid & nom du processus enfant
echo -e "+-------$pid($name)"
#le niveau augmente en cas de processus enfant.
CurrentLevel=$((CurrentLevel+1))
tree $pid
#si le pid du processus est parent ,le niveau diminue.
CurrentLevel=$((CurrentLevel-1))
fi
done < /tmp/infosprocess
}
read -p "Donnez le nom du processus a kill :" name
res=$(grep $name /tmp/infosprocess | awk -F ";" '{print $1}')
for j in $res
do
kill $j
done
echo processus $name kill
#reset de l'arbre.
tree 0
#suppression du fichier tempo.
rm /tmp/infosprocess
| true
|
4520dc5bb1b017e2d2ab00beccec38dc32b9e8a7
|
Shell
|
svsgvarma/scripts_benth
|
/Run-RNA-workflow-5.0_DE_CDS.sh
|
UTF-8
| 2,211
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
#Rna-seq using STAR
#./Run-RNA-workflow-5.0_DE_CDS.sh > Run-RNA-workflow-5.0_DE_CDS.sh.log 2>&1
echo "run script for rna-seq-analysis"
############################################
# 2.2. Identifying differentially expressed (DE) transcripts
############################################
#Extracting differentially expressed transcripts and generating heatmaps
Trinity="/data/bioinfo/trinityrnaseq-Trinity-v2.6.5/Trinity"
Trinity_path="/data/bioinfo/trinityrnaseq-Trinity-v2.6.5"
work_dir=/home/gala0002/proj/proj_Ramesh/RNA-seq_2020/
ref=/home/gala0002/proj/proj_Ramesh/Ref_Nicotiana_benthamiana/
mkdir -p ${work_dir}SL-2400_3.0_Align-STAR_CDS/
out_dir=${work_dir}SL-2400_3.0_Align-STAR_CDS/
cd $work_dir
mkdir -p ${work_dir}"DESeq2_genes"
############################################
#Build Transcript and Gene Expression Matrices
#https://github.com/trinityrnaseq/trinityrnaseq/wiki/Trinity-Transcript-Quantification#express-output
# perl $Trinity_path/util/align_and_estimate_abundance.pl \
# --transcripts Trinity.fasta \
# --seqType fq \
# --samples_file Metadata_CDS_Mock-PMTVWT-delta8k_merge.txt \
# --est_method salmon \
# --trinity_mode --prep_reference \
# --output_dir outdir_estimate-ab > estimate-ab.log 2>&1
############################################
#Run the DE analysis at the gene level
#DESeq2
$Trinity_path/Analysis/DifferentialExpression/run_DE_analysis.pl \
--matrix Express_CDS_Mock-PMTVWT-delta8k.gene.counts.matrix \
--samples_file Metadata_CDS_Mock-PMTVWT-delta8k_merge.txt \
--method DESeq2 \
--output DESeq2_genes > Trinity_DGE-run.log 2>&1
#Extracting differentially expressed transcripts and generating heatmaps
#Extract those differentially expressed (DE) transcripts that are at least 4-fold (C is set to 2^(2) ) differentially expressed at a significance of <= 0.001 (-P 1e-3) in any of the pairwise sample comparisons
cd DESeq2_genes/
nice -n 5 $Trinity_path/Analysis/DifferentialExpression/analyze_diff_expr.pl \
--matrix ../Express_CDS_Mock-PMTVWT-delta8k.gene.counts.matrix \
--samples ../Metadata_CDS_Mock-PMTVWT-delta8k_merge.txt -P 5e-2 -C 1.0 > Trinity_DGE-analyze.log 2>&1
############################################
echo "Script done...."
| true
|
d4636264652620795a2fc5b379e8f9f3e180002e
|
Shell
|
zhuorg/build-arch-package
|
/run.sh
|
UTF-8
| 732
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash -ex
if [ -z "$INPUT_PKGBUILD" ] || [ -z "$INPUT_OUTDIR" ] || [ -z "$GITHUB_SHA" ]; then
echo 'Missing environment variables'
exit 1
fi
# Resolve environment paths
INPUT_PKGBUILD="$(eval echo $INPUT_PKGBUILD)"
INPUT_OUTDIR="$(eval echo $INPUT_OUTDIR)"
# Prepare the environment
pacman -Syu --noconfirm --noprogressbar --needed base-devel devtools btrfs-progs dbus sudo
dbus-uuidgen --ensure=/etc/machine-id
useradd -m user
cd /home/user
sed -i "s|MAKEFLAGS=.*|MAKEFLAGS=-j$(nproc)|" /etc/makepkg.conf
sed "s|%COMMIT%|$GITHUB_SHA|" "$INPUT_PKGBUILD" > "$GITHUB_WORKSPACE"/PKGBUILD
# Build the package
extra-x86_64-build -- -U user
# Save the artifacts
mkdir -p "$INPUT_OUTDIR"
cp *.pkg.* "$INPUT_OUTDIR"/
| true
|
13cc0c020cd197041a46faeafb4369ab0d4fea18
|
Shell
|
Nightmayr/dotfiles
|
/centos/.bashrc
|
UTF-8
| 690
| 2.828125
| 3
|
[] |
no_license
|
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Uncomment the following line if you don't like systemctl's auto-paging feature:
# export SYSTEMD_PAGER=
# User specific aliases and functions
# Prompt override
PS1='[\u@\h \W'
# Git Auto-completion
if [ -f ~/.git-completion.bash ]; then
. ~/.git-completion.bash
fi
# Git Prompt
source /usr/share/git-core/contrib/completion/git-prompt.sh
export GIT_PS1_SHOWUNTRACKEDFILES=true
export GIT_PS1_SHOWDIRTYSTATE=true
export GIT_PS1_SHOWCOLORHINTS=true
export GIT_PS1_SHOWUPSTREAM=auto
export GIT_PS1_SHOWSTASHSTATE=true
export PREPROMPT=$PS1
export PROMPT_COMMAND='__git_ps1 "$PREPROMPT" "]$ "'
| true
|
f39112289fe80bfbe9d2ab47990f06c6911fd3e7
|
Shell
|
Carotti/bedrock2
|
/processor/integration/interactive.sh
|
UTF-8
| 253
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
while : ; do
make || true
kill "$oldgtkwave" || true
gtkwave --chdir /tmp/ --rcvar 'enable_vcd_autosave yes' --rcvar 'do_initial_zoom_fit yes' "$(realpath system.vcd)" &
oldgtkwave=$!
inotifywait -e modify *.v -r . || true
done
| true
|
b9a13a1fb65b91e8581ebd49d7b543b32dc87880
|
Shell
|
tanpuekai/autism
|
/info.score.sh
|
UTF-8
| 476
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -q default
#PBS -l nodes=1:ppn=1
#PBS -l mem=20gb
#PBS -l walltime=63:30:00
#PBS -N Ireetings-
#PBS -j oe
cd $PBS_O_WORKDIR
echo `pwd`
for j in `ls -d step9-impute-all/out-b37-*`
do
echo $j
echo ""
for i in `ls $j/*.vcf.gz`
do
zcat $i|cut -f8|grep -v "##"|grep -v INFO > INFO.txt
sed -i 's/;/\t/g' INFO.txt
sed -i 's/=/\t/g' INFO.txt
cut -f2,4,6 INFO.txt >tmp.txt
mv tmp.txt INFO.txt
echo -n $i
Rscript count_info_score.R
done
done
| true
|
cba86f4fcb62d6588f6d256c3ab627c8eec5993e
|
Shell
|
gitsome/bash-scripting
|
/machine-data/linux-course-image-1/luser-demo07.sh
|
UTF-8
| 357
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Parameter1: ${1}"
echo "Parameter1: ${2}"
echo "Parameter1: ${3}"
echo ""
X=0
while [[ $X -lt 5 ]]
do
echo "yayer ${X}"
X=$((X+1))
done
echo ""
while [[ ${#} -gt 0 ]]
do
echo "The number of parameters: ${#}"
echo "Parameter1: ${1}"
echo "Parameter1: ${2}"
echo "Parameter1: ${3}"
echo ""
shift
done
| true
|
ae6116103587b3bc32adabbb5316d95c5b322d05
|
Shell
|
cchaiyatad/reactjs-pdfmake-boilerplate
|
/shell/makeImage.sh
|
UTF-8
| 455
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -t 1 ]; then
target="image_dict.js"
else
target="/dev/stdout"
fi
(
echo -n "this.imageDict = this.imageDict || {}; this.imageDict.src = {"
for file in "$@"; do
file=$1
shift
echo -n '"'
echo -n "$(basename $file)"
echo -n '":"'
echo -n 'data:image/'
echo -n "${file#*.}"
echo -n ';base64,'
echo -n "$(base64 -b 0 $file)"
echo -n '"'
if [ "$#" -gt 0 ]; then
echo -n ","
fi
done
echo -n "};"
) > "$target"
| true
|
1b50f04d813d0214f286cff1894c64dfb564619c
|
Shell
|
HansUXdev/Learn-MongoDB-4.x
|
/chapters/11/install_ssl_cert.sh
|
UTF-8
| 1,117
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Generating SSL certificates ..."
export RAND_DIGITS=`date |cut -c 18-20`
export EMAIL_ADDR="doug@unlikelysource.com"
mkdir /etc/.certs
cd /etc/.certs
echo $RAND_DIGITS >file.srl
touch /root/.rnd
openssl req -out ca.pem -new -x509 -days 3650 -subj "/C=TH/ST=Surin/O=BigLittle/CN=root/emailAddress=$EMAIL_ADDR" -passout pass:password
openssl genrsa -out server.key 2048
openssl req -key server.key -new -out server.req -subj "/C=TH/ST=Surin/O=BigLittle/CN=$HOSTNAME/emailAddress=$EMAIL_ADDR"
openssl x509 -req -in server.req -CA ca.pem -CAkey privkey.pem -CAserial file.srl -out server.crt -days 3650 -passin pass:password
cat server.key server.crt > server.pem
openssl verify -CAfile ca.pem server.pem
openssl genrsa -out client.key 2048
openssl req -key client.key -new -out client.req -subj "/C=TH/ST=Surin/O=BigLittle/CN=client1/emailAddress=$EMAIL_ADDR"
openssl x509 -req -in client.req -CA ca.pem -CAkey privkey.pem -CAserial file.srl -out client.crt -days 3650 -passin pass:password
cat client.key client.crt > client.pem
openssl verify -CAfile ca.pem client.pem
chmod -R -v 444 /etc/.certs
| true
|
13ac272d55554434ee1d8e4c319befa6b66a9f0c
|
Shell
|
running-elephant/datart
|
/bin/datart-server.sh
|
UTF-8
| 3,107
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Datart
# <p>
# Copyright 2021
# <p>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# <p>
# http://www.apache.org/licenses/LICENSE-2.0
# <p>
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BASE_DIR=$(cd "$(dirname "$0")/.."; pwd -P)
echo "working dir ${BASE_DIR}"
cd "${BASE_DIR}"
CLASS_PATH="${BASE_DIR}/lib/*"
START_CLASS="datart.DatartServerApplication"
#java -server -Xms2G -Xmx2G -Dspring.profiles.active=config -Dfile.encoding=UTF-8 -cp "${CLASS_PATH}" datart.DatartServerApplication
datart_status(){
#result=`ps -ef | awk '/DatartServerApplication/ && !/awk/{print $2}' | wc -l`
result=`ps -ef | grep -v grep | grep "${BASE_DIR}/lib" | grep 'DatartServerApplication' | awk {'print $2'} | wc -l`
if [[ $result -eq 0 ]]; then
return 0
else
return 1
fi
}
datart_start(){
source ~/.bashrc
datart_status >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
nohup java -server -Xms2G -Xmx2G -Dspring.profiles.active=config -Dfile.encoding=UTF-8 -cp "${CLASS_PATH}" "${START_CLASS}" &
else
echo ""
#PID=`ps -ef | awk '/DatartServerApplication/ && !/awk/{print $2}'`
PID=`ps -ef | grep -v grep | grep "${BASE_DIR}/lib" | grep 'DatartServerApplication' | awk {'print $2'}`
echo "Datart is Running Now..... PID is ${PID} "
fi
}
datart_stop(){
datart_status >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
echo ""
echo "Datart is not Running....."
echo ""
else
#ps -ef | awk '/DatartServerApplication/ && !/awk/{print $2}'| xargs kill -9
ps -ef | grep -v grep | grep "$BASE_DIR/lib" | grep 'DatartServerApplication' | awk {'print $2'} | xargs kill -9
fi
}
case $1 in
start )
echo ""
echo "Datart Starting........... "
echo ""
datart_start
;;
stop )
echo ""
echo "Datart Stopping.......... "
echo ""
datart_stop
;;
restart )
echo "Datart is Restarting.......... "
datart_stop
echo ""
datart_start
echo "Datart is Starting.......... "
;;
status )
datart_status>/dev/null 2>&1
if [[ $? -eq 0 ]]; then
echo ""
echo "Datart is not Running......"
echo ""
else
echo ""
#PID=`ps -ef | awk '/DatartServerApplication/ && !/awk/{print $2}'`
PID=`ps -ef | grep -v grep | grep "${BASE_DIR}/lib" | grep 'DatartServerApplication' | awk {'print $2'}`
echo "Datart is Running..... PID is ${PID}"
echo ""
fi
;;
* )
echo "Usage: datart-server.sh (start|stop|status|restart)"
esac
| true
|
fe4d0b36c8081cca389bfe159be7977395297c51
|
Shell
|
harold-li/oj
|
/deploy.sh
|
UTF-8
| 2,727
| 3.625
| 4
|
[] |
no_license
|
#! /bin/bash
read -p "Do you want to update from git?[Y/n]" PULL
[ "$PULL" == 'Y' -o "$PULL" == 'y' ] && git pull
grep 'devMode' WebRoot/WEB-INF/oj.properties
read -p "Do you want to continue with this mode?[Y/n]" cont
[ "$cont" == 'n' -o "$cont" == 'N' ] && exit
read -p "Input the mysql username, press enter to use default value: " USERNAME
read -p "Input the mysql password, press enter to use default value: " PASSWORD
[ -n "$USERNAME" ] && sed -i "s/dev.user=.*/dev.user=$USERNAME/" WebRoot/WEB-INF/oj.properties
[ -n "$PASSWORD" ] && sed -i "s/dev.password=.*/dev.password=$PASSWORD/" WebRoot/WEB-INF/oj.properties
if [ $# -gt 0 ]; then
TOMCAT=$1/webapps
USER=`stat -c '%U' $TOMCAT/`
GROUP=`stat -c '%G' $TOMCAT/`
fi
if [ ! -d $TOMCAT ]; then
USER=tomcat7
GROUP=tomcat7
TOMCAT=/usr/share/tomcat7/webapps
fi
if [ ! -d $TOMCAT ]; then
USER=tomcat8
GROUP=tomcat8
TOMCAT=/usr/share/tomcat8/webapps
if [ ! -d $TOMCAT ]; then
read -p "Please input the tomcat home path: " TOMCAT
TOMCAT=$TOMCAT/webapps
if [ ! -d $TOMCAT ]; then
echo "Cannot find Tomcat!"
exit
fi
USER=`stat -c '%U' $TOMCAT/`
GROUP=`stat -c '%G' $TOMCAT/`
fi
fi
echo "Use tomcat webapps: $TOMCAT"
# find -type d -name assets -exec sudo cp -r {} /var/www/ \;
sudo cp -r WebRoot/assets/ /var/www/
#export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
gradle build
[ $? -ne 0 ] && exit 1
sudo mkdir -p ~/oj_backup/upload/image/
echo "backup /var/www/upload/image/ to ~/oj_backup/upload/image/"
sudo rsync -r /var/www/upload/image/ ~/oj_backup/upload/image/
sudo rm -rf $TOMCAT/oj/upload
sudo rm -rf $TOMCAT/oj/download
echo "copy build/libs/oj.war to $TOMCAT/"
sudo cp build/libs/oj.war $TOMCAT
echo "waiting war deploy..."
sleep 10
CNT=0
while [ ! -e $TOMCAT/oj/upload/ ]; do
echo "Please start the tomcat service!"
((CNT++))
sleep 5
if [ $CNT -eq 5 ]; then
sudo touch $TOMCAT/oj.war
CNT=0
fi
done
echo "remove $TOMCAT/oj/assets/"
sudo rm -rf $TOMCAT/oj/assets/
sudo cp -r $TOMCAT/oj/upload/ /var/www/ 2>&1 >/dev/null
sudo rm -rf $TOMCAT/oj/upload/
sudo cp -r $TOMCAT/oj/download/ /var/www/ 2>&1 >/dev/null
sudo rm -rf $TOMCAT/oj/download/
echo "change owner to $USER:$GROUP"
sudo chown -R $USER:$GROUP /var/www/assets
sudo chown -R $USER:$GROUP /var/www/upload
sudo chown -R $USER:$GROUP /var/www/download
echo "/var/www/"
ls -l --color=auto /var/www/
echo "make soft link"
sudo ln -sf /var/www/assets $TOMCAT/oj/assets
sudo ln -sf /var/www/upload $TOMCAT/oj/upload
sudo ln -sf /var/www/download $TOMCAT/oj/download
echo "$TOMCAT/oj/"
ls -l --color=auto $TOMCAT/oj/
echo "OJ deploy completed."
| true
|
4df6a1e857ca302d4f6357ba02a66a3c606dc72d
|
Shell
|
naikshrihari/bashcode
|
/compareString.sh
|
UTF-8
| 161
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "String 1:" str1
read -p "String 2:" str2
if [ "$str1" == "$str2" ]
then
echo "both are same"
else
echo "str1 and str2 are different"
fi
| true
|
1f6004dd872b7d3096d4b132ff21174c5c0ccf3f
|
Shell
|
LandRover/StaffjoyV2
|
/vagrant/ubuntu_mirror_replace_to_fastest.sh
|
UTF-8
| 936
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
usage() {
cat << EOF
Usage: sudo $0 [mirror_url]
Example: sudo $0 http://archive.ubuntu.com/ubuntu
Replaces the apt mirror to the input mirror_url
EOF
}
# ARGS
FASTEST_MIRROR_URL=$1
if [ -z "$1" ]
then
FASTEST_MIRROR_URL=`cd $(dirname $BASH_SOURCE); ./helpers/ubuntu_mirror_test.sh`
echo $FASTEST_MIRROR_URL;
fi
HTTP_REGEX='(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]'
if [[ ! $FASTEST_MIRROR_URL =~ $HTTP_REGEX ]]
then
echo "Invalid mirror_url found: $FASTEST_MIRROR_URL";
usage; exit 1;
fi
OLD_APT_SOURCE=`cat /etc/apt/sources.list | grep ^deb\ | head -n1 | cut -d\ -f2`
[ -e /etc/apt/sources.list.orig ] || cp /etc/apt/sources.list /etc/apt/sources.list.orig
cp /etc/apt/sources.list /etc/apt/sources.list.tmp
sed "s,$OLD_APT_SOURCE,$FASTEST_MIRROR_URL," < /etc/apt/sources.list.tmp > /etc/apt/sources.list
echo "[x] Done, apt source replaced to: $FASTEST_MIRROR_URL";
| true
|
5a87203cebf7b86728879a4c8ccf477fc46159fb
|
Shell
|
cloudfoundry/bosh-linux-stemcell-builder
|
/stemcell_builder/stages/escape_ctrl_alt_del/apply.sh
|
UTF-8
| 342
| 2.875
| 3
|
[
"LGPL-2.1-only",
"Artistic-2.0",
"LicenseRef-scancode-other-permissive",
"MPL-1.1",
"GPL-1.0-or-later",
"GPL-3.0-or-later",
"LicenseRef-scancode-unicode-mappings",
"Artistic-1.0-Perl",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"GPL-2.0-or-later",
"GPL-3.0-only",
"Artistic-1.0",
"Apache-2.0",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"Ruby",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain-disclaimer",
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -e
base_dir=$(readlink -nf $(dirname $0)/../..)
source $base_dir/lib/prelude_apply.bash
source $base_dir/lib/prelude_bosh.bash
echo 'Overriding for Control-Alt-Delete'
mkdir -p $chroot/etc/init
echo 'exec /usr/bin/logger -p security.info "Control-Alt-Delete pressed"' > $chroot/etc/init/control-alt-delete.override
| true
|
473e4e36fddcd6a0f93cbc8edbf8652b546db518
|
Shell
|
bubendorf/gsak2gpx
|
/createRUPI.sh
|
UTF-8
| 5,413
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# Erzeugt die RUPI Dateien für Sygic
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
cd $DIR
. ./env.sh
cd $BASE
export PATH=/usr/local/bin:$PATH
export OPTS="-XX:+UseParallelGC -Xmx1500M -Dorg.slf4j.simpleLogger.defaultLogLevel=info"
export ENCODING=utf-8
export CAT_PATH="categories/rupi categories/include"
function createCountry() {
# $1 Sygic Region/Gebiet
mkdir -p $CSV_PATH/$1
# Found Active
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $FOUND_DB` --categoryPath $CAT_PATH --categories Active \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension _Found.csv \
--param sygic="$1" found=1 --encoding $ENCODING
#Found Archived
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $FOUND_DB` --categoryPath $CAT_PATH --categories Archived \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension _Found.csv \
--param sygic="$1" found=1 --encoding $ENCODING
# Parking in 0er Gemeinden
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $DB $DB2` --categoryPath $CAT_PATH --categories Parking \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension _G0.csv \
--param sygic="$1" gemeinde0=0 --encoding $ENCODING
# Parking in bereits gefundenen Gemeinden
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $DB $DB2` --categoryPath $CAT_PATH --categories Parking \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension .csv \
--param sygic="$1" gemeinde0=1 --encoding $ENCODING
# Alle Event,Virtual,Physical
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $DB $DB2` --categoryPath $CAT_PATH --categories Event,Virtual,Physical \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension .csv \
--param sygic="$1" disabled=0 --encoding $ENCODING
# Active, Not corrected, 0er Gemeinden
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $DB $DB2` --categoryPath $CAT_PATH --categories Traditional,Multi,Unknown,Wherigo,VirtualCache,Earth,Letterbox \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension _G0.csv \
--param sygic="$1" disabled=0 corrected=0 gemeinde0=0 --encoding $ENCODING
# Active, Corrected, 0er Gemeinden
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $DB $DB2` --categoryPath $CAT_PATH --categories Traditional,Multi,Unknown,Wherigo,VirtualCache,Earth,Letterbox \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension _Corr_G0.csv \
--param sygic="$1" disabled=0 corrected=1 gemeinde0=0 --encoding $ENCODING
# Active, Not corrected, Gefundene Gemeinden
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $DB $DB2` --categoryPath $CAT_PATH --categories Traditional,Multi,Unknown,Wherigo,VirtualCache,Earth,Letterbox \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension .csv \
--param sygic="$1" disabled=0 corrected=0 gemeinde0=1 --encoding $ENCODING
# Active, Corrected, Gefundene Gemeinden
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $DB $DB2` --categoryPath $CAT_PATH --categories Traditional,Multi,Unknown,Wherigo,VirtualCache,Earth,Letterbox \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension _Corr.csv \
--param sygic="$1" disabled=0 corrected=1 gemeinde0=1 --encoding $ENCODING
# Disabled
$JAVA $OPTS -jar $JAR --database `$CYG2DOS $DB $DB2` --categoryPath $CAT_PATH --categories Traditional,Multi,Unknown,Wherigo,VirtualCache,Earth,Letterbox \
--outputPath $CSV_PATH/$1 --outputFormat plainText --extension _Disa.csv \
--param sygic="$1" disabled=1 --encoding $ENCODING
# Kleine (<15 Bytes) Dateien loeschen. Die enthalten keine Waypoints
find $CSV_PATH/$1 -name "*.csv" -size -15c -delete
mkdir -p $RUPI_PATH/$1
$JAVA -jar $RUPI_JAR --tasks 3 --encoding $ENCODING --outputPath $RUPI_PATH/$1 $CSV_PATH/$1/*.csv
}
export -f createCountry
function copyIcon() {
# $1 Name der Kategorie bzw. des Teilnames der Dateien
# $2 Sygic Region
if [ -f $RUPI_PATH/$2/$1.rupi ]
then
ln $ICON_PATH/$1.bmp $RUPI_PATH/$2/$1.bmp
fi
}
export -f copyIcon
rm -f $CSV_PATH/**/*.csv
rmdir $CSV_PATH/*
rm -f $RUPI_PATH/**/*.rupi
rmdir $RUPI_PATH/*
function printArgs() {
echo $*
}
export -f printArgs
if true
then
parallel --delay 0.2 -j $TASKS -u createCountry ::: \
che deu03 fra08 fra07 ita02 aut fra06 lie deu02 deu07 deu04
fi
# Dem Syncthing ein "Scan" und "Override" schicken damit es Aenderungen von Clients ueberschreibt
echo "Start Trigger Syncthing"
curl -s -X POST -H "X-API-Key: $SYNCTHING_KEY" 'http://127.0.0.1:8384/rest/db/scan?folder=eviw2-zxkts'
curl -s -X POST -H "X-API-Key: $SYNCTHING_KEY" 'http://127.0.0.1:8384/rest/db/override?folder=eviw2-zxkts' &
exit 0
echo "Verlinken der Icons"
parallel -j $TASKS -u copyIcon ::: \
Active_Found Archived_Found Parking Parking_G0 \
Traditional Traditional_Corr Traditional_G0 Traditional_Corr_G0 Traditional_Disa \
Multi Multi_Corr Multi_G0 Multi_Corr_G0 Multi_Disa \
Unknown Unknown_Corr Unknown_G0 Unknown_Corr_G0 Unknown_Disa \
Wherigo Wherigo_Corr Wherigo_G0 Wherigo_Corr_G0 Wherigo_Disa \
VirtualCache VirtualCache_Corr VirtualCache_G0 VirtualCache_Corr_G0 VirtualCache_Disa \
Earth Earth_Corr Earth_G0 Earth_Corr_G0 Earth_Disa \
Letterbox Letterbox_Corr Letterbox_G0 Letterbox_Corr_G0 Letterbox_Disa \
Event Virtual Physical ::: \
che deu03 fra08 lie aut ita
| true
|
3922aaadf0bd5661ea31add7328281bcbdf96d77
|
Shell
|
bowtie-co/docker-jekyll
|
/docker-entrypoint.sh
|
UTF-8
| 159
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [ -f Gemfile ]; then
bundle install
fi
if [ "$1" == "serve" ]; then
exec jekyll serve --port $PORT --host $HOST
else
exec $@
fi
| true
|
ccc132933d95adce65cd33216f714c7d4bc25a43
|
Shell
|
geier/dotfiles
|
/bin/tm
|
UTF-8
| 141
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z $1 ]; then
echo "usage: tm <session_name>" >&2; return 1;
else
tmux has -t $1 && tmux attach -t $1 || tmux new -s $1
fi
| true
|
a9f155fff02ed03c83583d0f6872eec0c0d57b90
|
Shell
|
accept-project/accept-score-rules
|
/findreftrans.sh
|
UTF-8
| 1,009
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Find all reference translations of segments in a file.
# The results are written into a parallel target file.
# For each segment, its reference translations is found by finding
# it in the source segments file, and by searching for the reference
# translation in the corresponding parallel target segments file.
if [ $# -lt 4 ] ; then
echo "Usage: findreftrans.sh corpus.src corpus.tgt segments.src segments.tgt"
exit 1
fi
export SRCCORPUS=$1
export TGTCORPUS=$2
export SRCFILE=$3
export TGTFILE=$4
export TMPFILE=$SRCFILE.tmp
touch $TGTFILE
while read -r STR ; do
echo "$STR" > $TMPFILE
export LINE=`fgrep --line-number -f $TMPFILE $SRCCORPUS | head -1 | cut -d ':' -f 1`
if [ -z "$LINE" ] ; then
echo "Warning: no line found in $TGTCORPUS corresponding to the following sentence in $SRCCORPUS, outputting empty line:" >& 2
cat $TMPFILE >& 2
echo "" >> $TGTFILE
else
sed -n "${LINE},${LINE}p" $TGTCORPUS >> $TGTFILE
fi
done < $SRCFILE
rm $TMPFILE
| true
|
5d5234e9b48d48fc61fcdf40fda808365a5cfe62
|
Shell
|
zaghdoun/csar-public-library
|
/org/ystia/logstash/linux/bash/scripts/logstash_changeInput.sh
|
UTF-8
| 329
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
source ${utils_scripts}/utils.sh
log begin
source ${scripts}/logstash_utils.sh
ensure_home_var_is_set
log info "Execute change input command with URL : $url"
replace_conf_file $LOGSTASH_HOME/conf/1-1_logstash_inputs.conf "input" $url || error_exit "Reconfiguration failed"
send_sighup_ifneeded
log end
| true
|
27afcae704c4c5443f9e6f86f754e7d206050ae6
|
Shell
|
mreddy89/AWS
|
/bash_scripts/ec2_scripts/instance_AMI_create.sh
|
UTF-8
| 641
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
region=eu-west-1
profile=dev
# Mention all the EC2 instances in the below .txt file for which you want to create AMI
input="./instances_list_for_AMI.txt"
#while read line;
while IFS= read -r line
do
ec2id=${line}
#datetime=$(echo "$(date +'%d-%m-%Y_%H:%M')")
#echo "Instance ID is : ${ec2id}";
# this is to create the AMI of instances mentioned in instances_list_for_AMI.txt file
result=$(aws --profile ${profile} --region ${region} ec2 create-image --instance-id ${ec2id} --name "${ec2id}_bkp" --description "This is the AMI of ${ec2id}" --no-reboot)
echo "AMI Initiated for EC2 : ${ec2id}";
echo ${result}
done < $input
| true
|
250b4d36dd65e6070891ce67f65ef35f7317f168
|
Shell
|
tiberiu-sevio/opensource
|
/bootstrap.sh
|
UTF-8
| 632
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "UUIIIIIIIIII3"
sudo -S <<< "app" touch /var/log/system-bootstrap.log
sudo -S <<< "app" chmod 777 /var/log/system-bootstrap.log
sudo -S <<< "app" apt-get update >> /var/log/system-bootstrap.log
SELINUXSTATUS=$(getenforce)
if [ "$SELINUXSTATUS" == "Permissive" ]; then
sudo -S <<< "app" setenforce 0 >> /var/log/system-bootstrap.log
fi;
sudo -S <<< "app" apt-get -y install php >> /var/log/system-bootstrap.log
sudo -S <<< "app" mkdir ~/.ssh
sudo -S <<< "app" touch ~/.ssh/authorized_keys
sudo -S <<< "app" chmod 777 ~/.ssh/authorized_keys
sudo -S <<< "app" echo "O_CHEIE_PUBLICA" > ~/.ssh/authorized_keys
| true
|
baa54b32b78f21bb83bb36f72ad823c7d3928e1d
|
Shell
|
zhangml17/jenkins-on-k8s
|
/scripts/get-passwd.sh
|
UTF-8
| 1,218
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
DEFAULT_S="default"
show_help () {
cat << USAGE
usage: $0 [ -N CONTROLLER-NAME ] [ -S NAMESPACE ]
-n : Specify the name of the controller.
-s : Specify the namespace. If not specified, use '${DEFAULT_S}' by default.
USAGE
exit 0
}
# Get Opts
while getopts "hn:s:" opt; do # 选项后面的冒号表示该选项需要参数
case "$opt" in
h) show_help
;;
n) NAME=$OPTARG
;;
s) NAMESPACE=$OPTARG
;;
?) # 当有不认识的选项的时候arg为?
echo "unkonw argument"
exit 1
;;
esac
done
[[ -z $* ]] && show_help
chk_var () {
if [ -z "$2" ]; then
echo "$(date -d today +'%Y-%m-%d %H:%M:%S') - [ERROR] - no input for \"$1\", try \"$0 -h\"."
sleep 3
exit 1
fi
}
chk_var -n $NAME
chk_install () {
if [ ! -x "$(command -v $1)" ]; then
echo "$(date -d today +'%Y-%m-%d %H:%M:%S') - [ERROR] - no $1 installed !!!"
sleep 3
exit 1
fi
}
NEEDS="kubectl"
for NEED in $NEEDS; do
chk_install $NEED
done
NAMESPACE=${NAMESPACE:-"${DEFAULT_S}"}
POD=$(kubectl -n ${NAMESPACE} get pod | grep ${NAME} | awk -F ' ' '{print $1}')
kubectl -n ${NAMESPACE} exec -it ${POD} -- cat /var/jenkins_home/secrets/initialAdminPassword
| true
|
42cdb84e05cf086c1a2bc0536574e92c20ec5445
|
Shell
|
mazenamr/FlickPhotos-DevOps
|
/scripts/web-build.sh
|
UTF-8
| 788
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
timestamp=$(TZ='Africa/Cairo' date +"%Y-%m-%d_%T")
lock="$HOME/flags/web-build.lck"
cd $HOME
[ ! -d "main" ] && (./files/scripts/update.sh || exit -1)
cd main
# setup
set -o pipefail
cd Frontend
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# build
touch $lock
nvm use 14.16.1
npm install | tee -a "$HOME/logs/web/build_$timestamp" &&
npm run build | tee -a "$HOME/logs/web/build_$timestamp" &&
rm $lock
[ -f $lock ] &&
echo -e "Subject: Web Build Failed\nWeb build failed at $(TZ='Africa/Cairo' date)" | msmtp admin@flick.photos &&
rm $lock &&
exit -1 ||
echo "Web built at [$(TZ='Africa/Cairo' date)]"
| true
|
f09ef992b8f5000c29108d350fb47a0ff1111a82
|
Shell
|
algorithmiaio/langpacks
|
/r/template/bin/test
|
UTF-8
| 306
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
ROOTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
CRAN_MIRROR=${CRAN_MIRROR:=http://cran.rstudio.com/}
echo "options(repos=structure(c(CRAN=\"$CRAN_MIRROR\")))" > ~/.Rprofile
echo "R_LIBS_USER=$ROOTDIR/dependencies" > ~/.Renviron
exec Rscript $ROOTDIR/run_all_tests.r
| true
|
38de4faa690d6608cc94f2cd58d73fae17a4a8ac
|
Shell
|
Ecotrust/marco-portal2
|
/wagtail_migrations/wagtail_1_11.sh
|
UTF-8
| 628
| 2.625
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/bash
PROJ=/usr/local/apps/ocean_portal
ENV=$PROJ/wag_env;
while getopts e:v: flag
do
case "${flag}" in
e) ENV=${OPTARG};;
v) PYVER=${OPTARG};;
esac
done
PIP=$ENV/bin/pip;
# 1_11
$PIP uninstall wagtail -y
$PIP install "wagtail==1.11.1"
PYTHON=$ENV/bin/python3;
DJ=$PROJ/marco/manage.py;
$PYTHON $DJ migrate wagtailcore
# $PYTHON $DJ migrate wagtailadmin
# $PYTHON $DJ migrate wagtaildocs
# $PYTHON $DJ migrate wagtailembeds
# $PYTHON $DJ migrate wagtailforms
# $PYTHON $DJ migrate wagtailimages
# $PYTHON $DJ migrate wagtailredirects
# $PYTHON $DJ migrate wagtailsearch
# $PYTHON $DJ migrate wagtailusers
| true
|
619e9d3e965b28423c07c5f2581726500959ea9b
|
Shell
|
venkatcurl/health-patterns
|
/utilities/initialize-nifi.sh
|
UTF-8
| 1,202
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
scripts/wait-for-nifi.sh $HOSTNAME 8080
scripts/initialize-reporting-task.sh $HOSTNAME 8080
if [ "$ADD_CLINICAL_INGESTION" = true ] ; then
python /scripts/loadHealthPatternsFlows.py \
--baseUrl=http://$HOSTNAME:8080/ \
--reg=$NIFI_REGISTRY \
--bucket=Health_Patterns \
--flowName="Clinical Ingestion" \
--version=34
fi
if [ "$ADD_CLINICAL_ENRICHMENT" = true ] ; then
python /scripts/loadHealthPatternsFlows.py \
--baseUrl=http://$HOSTNAME:8080/ \
--reg=$NIFI_REGISTRY \
--bucket=Health_Patterns \
--flowName="FHIR Bundle Enrichment" \
--version=11 \
--x=0.0 \
--y=200.0
fi
python /scripts/startHealthPatternsFlow.py \
--baseUrl=http://$HOSTNAME:8080/ \
--fhir_pw=$FHIR_PW \
--kafka_pw=$KAFKA_PW \
--addNLPInsights=$ADD_NLP_INSIGHTS \
--runASCVD=$RUN_ASCVD \
--deidentifyData=$DEIDENTIFY_DATA \
--resolveTerminology=$RESOLVE_TERMINOLOGY \
--releaseName=$RELEASE_NAME \
--deidConfigName=$DEID_CONFIG_NAME \
--deidPushToFhir=$DEID_PUSH_TO_FHIR
if [ $? -eq 0 ] ; then
echo "NiFi canvas setup was successful!"
echo "starting wait loop so container does not exit"
sleep infinity
else
echo "NiFi canvas setup failed"
exit 1
fi
| true
|
13716e0332573d30268a2053423368eec6a3b481
|
Shell
|
twfcc/gfw.press_installer
|
/uninstall.sh
|
UTF-8
| 1,838
| 3.421875
| 3
|
[
"Unlicense"
] |
permissive
|
#! /bin/bash
# $author: twfcc@twitter
# $PROG: uninstall.sh
# $description: uninstall components which installed by [gfw3proxy.sh|gfw3proxy_init.sh|gfwtiny.sh]
# $Usage: $0
# Public Domain use as your own risk.
LANGUAGE=C
LC_ALL=C
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
export LANGUAGE LC_ALL PATH
[ $(whoami) != "root" ] && {
echo "Execute this script must be root." >&2
exit 1
}
[ $(pwd) != "/root" ] && cd "$HOME"
remove_gfw(){
local pids pattern
if [ -d "$HOME/gfw.press" ] ; then
rm -rf "$HOME/gfw.press"
fi
if [ -d /usr/local/etc/gfw.press ] ; then
rm -rf /usr/local/etc/gfw.press
fi
if [ -f /etc/init.d/gfw.press ] ; then
update-rc.d -f gfw.press remove
rm -f /etc/init.d/gfw.press
fi
pattern="java -Dfile.encoding=utf-8 -Dsun.jnu.encoding=utf-8 -Duser.timezone=Asia/Shanghai"
pids=$(ps aux | grep "$pattern" | grep -v grep | awk '{print $2}')
if [ -n "$pids" ] ; then
kill $pids
fi
}
remove_tiny(){
if [ -f /etc/tinyproxy.conf.bak ] ; then
mv -f /etc/tinyproxy.conf.bak /etc/tinyproxy.conf 2> /dev/null
fi
apt-get purge tinyproxy -y 2> /dev/null
}
remove_3proxy(){
local pids
pids=$(ps aux | grep '3proxy' | grep -v grep | awk '{print $2}')
if [ -n "$pids" ] ; then
kill "$pids"
fi
if [ -d "$HOME/3proxy" ] ; then
rm -rf $HOME/3proxy
fi
if [ -e /etc/init.d/3proxyinit ] ; then
update-rc.d -f 3proxyinit remove ;
rm -f /etc/init.d/3proxyinit 2> /dev/null ;
fi
if [ -d "/usr/local/etc/3proxy" ] ; then
rm -rf /usr/local/etc/3proxy ;
fi
}
printf '%b' '\033[31mUninstall gfw.press\033[39m'
if which tinyproxy > /dev/null 2>&1
then
remove_gfw &> /dev/null
remove_tiny &> /dev/null
else
remove_gfw &> /dev/null
remove_3proxy &> /dev/null
fi
echo -n " .." ; sleep 1 ; echo -n " .. " ; sleep 1
printf '%b\n' '\033[32mDone.\033[39m'
exit 0
| true
|
59ba21776ffd04c5a17b22be6904bb07c75d8d02
|
Shell
|
yuriylesyuk/etp
|
/examples/prod-1dc-19n-5sn/planetctl
|
UTF-8
| 2,511
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
APIGEE_ZOOKEEPER_DC_1="n13 n15 n16 n17 n18"
APIGEE_CASSANDRA_DC_1="n13 n15 n16 n17 n18"
APIGEE_OPENLDAP_DC_1="n13"
APIGEE_POSTGRESQL_DC_1="n19"
EDGE_POSTGRES_SERVER_DC_1="n19"
APIGEE_QPIDD_DC_1="n19 n29"
EDGE_QPID_SERVER_DC_1="n19"
EDGE_MANAGEMENT_SERVER_DC_1="n03"
EDGE_MESSAGE_PROCESSOR_DC_1="n03 n04 n05 n06 n07"
EDGE_ROUTER_DC_1="n01 n02"
EDGE_SAP_UI_DC_1="n03"
APIGEE_ELASTICSEARCH_DC_1="n10 n11"
BAAS_SAP_USERGRID_DC_1="n10 n11"
BAAS_SAP_PORTAL_DC_1="n10 n11"
DCS="dc-1"
COMPS="apigee-zookeeper apigee-cassandra apigee-openldap apigee-postgresql edge-postgres-server apigee-qpidd edge-qpid-server edge-management-server edge-message-processor edge-router edge-sap-ui apigee-elasticsearch baas-sap-usergrid baas-sap-portal"
help(){
echo ""
echo "planetctl <action> <target> [<comp>] [--dryrun]"
echo ""
echo "Arguments:"
echo ""
echo " <action>: start or stop"
echo " <target>: planet or $DCS"
echo " <comp>: missing or all or subset of $COMPS"
echo ""
echo " <dryrun>: optional, if present the output command will be produced but not executed"
echo ""
}
if [ "$#" -lt 2 ]
then
echo "Wrong number of mandatory arguments supplied."
echo ""
help
exit 1
fi
ACTION=$1
TARGET=$2
DRYRUN=$3
if [[ ! ";start;stop;" =~ ";$ACTION;" ]]
then
echo "Unsupported Action: $ACTION"
help
exit 1
fi
if [[ ! "$DCS planet;" =~ "$TARGET" ]]
then
echo "Unsupported target: $TARGET"
help
exit 1
fi
if [[ ! "$DRYRUN" == "" ]]
then
DRYRUN="echo "
DRYESC=""
DRYQUOTE='"'
fi
function command(){
local op=$1
local dcs=$2
local comps=$3
if [ "$dcs" = "planet" ]; then
dcs=$DCS
fi
if [ "$op" = "stop" ]; then
# reverse list of components
comps=$(echo -n "${comps}"|awk '{for (i=NF;i>0;i--){printf $i " "};printf "\n"}')
fi
for comp in $comps; do
for dc in $dcs; do
if [[ "$DCS" =~ "$dc" ]]; then
local nodes_ref=$(echo -n "${comp}_${dc}"|awk '{gsub(/-/, "_");print toupper($0)}')
local nodes=${!nodes_ref}
for node in $nodes; do
$DRYRUN ansible $node -a $DRYESC$DRYQUOTE"apigee-service $comp $ACTION$DRYESC$DRYQUOTE"
if [[ $? -ne 0 ]]; then exit 1; fi
done
fi
done
echo ""
done
}
command "$ACTION" "$TARGET" "$COMPS"
| true
|
a0b313e86f4bbef053b25fee7fb0384a532ebd05
|
Shell
|
quodlibet/ppa-scripts
|
/build-debian-stable.sh
|
UTF-8
| 932
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# needs a pbuilder:
# sudo pbuilder create --distribution stable
# sudo pbuilder update
set -e
MINID="mini-dinstall --config=mini-dinstall.conf"
$MINID -k >/dev/null 2>&1 || true
$MINID -k >/dev/null 2>&1 || true
$MINID -k >/dev/null 2>&1 || true
rm -rf ~/debian_archive
$MINID
./quodlibet.py -ddebian
./mutagen.py -ddebian -v1
$MINID -r
cd ~/debian_archive/quodlibet-stable/
gpg -u B6264964! --output Release.gpg -ba Release
cd -
rm -Rf ql-debian
git clone https://github.com/lazka/ql-debian.git ql-debian
cd ql-debian
git checkout gh-pages
rm -rf ".git"
git init
git checkout -b gh-pages
touch .nojekyll
rm -Rf stable
mkdir stable
cp -R ~/debian_archive/quodlibet-stable ./stable/quodlibet-stable
python3 ../build_index.py .
git add .
git commit -m "update"
git remote add origin https://github.com/lazka/ql-debian.git
git push --force --set-upstream origin gh-pages
cd -
rm -Rf ql-debian
rm -rf ~/debian_archive
| true
|
f14df1f2620c0265bc837c7dd87cb1b666b02313
|
Shell
|
gstackio/dingo-postgresql-release
|
/images/tutorial/setup.sh
|
UTF-8
| 1,025
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# env $(cat tmp/tutorial.env| xargs) ./images/tutorial/setup.sh
if [[ "${HOST_IP}X" == "X" ]]; then
echo "Requires \$ETCD_CLUSTER"
exit 1
fi
if [[ "${DOCKER_SOCK}X" == "X" ]]; then
echo "Requires \$DOCKER_SOCK"
exit 1
fi
ETCD_CLUSTER=${HOST_IP}:4001
docker rm -f etcd
docker run -d -p 4001:4001 -p 2380:2380 -p 2379:2379 --name etcd quay.io/coreos/etcd:v2.3.7 \
-name etcd0 \
-advertise-client-urls "http://${HOST_IP}:2379,http://${HOST_IP}:4001" \
-listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \
-initial-advertise-peer-urls "http://${HOST_IP}:2380" \
-listen-peer-urls http://0.0.0.0:2380 \
-initial-cluster-token etcd-cluster-1 \
-initial-cluster "etcd0=http://${HOST_IP}:2380" \
-initial-cluster-state new
docker rm -f registrator
docker run -d --name registrator \
--net host \
--volume ${DOCKER_SOCK}:/tmp/docker.sock \
cfcommunity/registrator:latest /bin/registrator \
-hostname ${HOST_IP} -ip ${HOST_IP} \
etcd://${ETCD_CLUSTER}
| true
|
a0007b9866f9212ec71df8e5f8ae3db993288c91
|
Shell
|
voznik/config-ubuntu
|
/bin/server/gitflow.sh
|
UTF-8
| 615
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ ! $INSTALL_SCRIPT ]]; then
echo "(!) Error: You must use the installer script."
exit
fi
echo "(+) Installing Git (If Not Installed); Git-Flow; Bash-Completions (If Not Installed)"
sudo add-apt-repository -y ppa:git-core/ppa
sudo apt-get install git git-flow bash-completion -y
echo "(+) Copying git-flow-completion.bash to Your Home Folder"
cp $FILES_DIR/git-flow-completion.bash $USER_HOME/git-flow-completion.bash
# Clear any existing tmp GO files
echo "(+) Complete! Git-flow was installed!"
echo " $ source ~/.bashrc"
echo ""
if [ $SKIP_SLEEP == false ]; then
sleep 6
fi
| true
|
38c209e1c8ba2642781d281319dccc48dee95849
|
Shell
|
zolll23/bash_utils
|
/clear_viruses_by_mail.sh
|
UTF-8
| 166
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
for file in `cat /var/log/phpmail.log | grep "eval()" | awk '{print $6}' | sed 's/\[//' | sed 's/([0-9]*)//' | uniq `;
do
echo $file;
rm -rvf $file
done
| true
|
d3d15cf144da6f52b9f3d9460979afc714407f5e
|
Shell
|
ltOgt/.bash
|
/scripts/prompt
|
UTF-8
| 479
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 2 ] || ([[ $1 == '--help' ]] || [[ $1 == '-h' ]])
then
echo 'prompt <PROMPT> <ACTION>'
echo ' Prompt user and perform action on "Y"'
echo 'prompt --input <Prompt> <ACTION>'
echo ' Prompt user and perform action with dmenu input'
elif [ $2 == "--input" ]
then
prompt=$2
action=$3
$action $(dmenu -p "$prompt")
else
prompt=$1
action=$2
[[ $(echo -e "y\nn" | dmenu -p "$prompt") == "y" ]] && $action
fi
# sucess
exit 0
| true
|
5e38244b0debe46401dcd526cb57458437b0998e
|
Shell
|
sid678/Compression-and-Encryption
|
/combine.sh
|
UTF-8
| 317
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Input 1 for image and anything else for message"
read var1
python3.8 image.py $var1
echo "Type Yes to compress and anything else to encrypt"
read var2
if [ $var2 == "Yes" ]
then
# python3.8 image.py
g++-11 huffman.cpp
./a.out
else
# python3.8 image.py
python mhkc.py var1
fi
| true
|
10fcf48b30e44fd89245a78e32ce43e3ae5a8abf
|
Shell
|
smartnose/smartnose
|
/install_cuda_8.sh
|
UTF-8
| 781
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb
sudo dpkg -i cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64.deb
sudo apt-key add /var/cuda-repo-<version>/7fa2af80.pub
sudo apt-get update
sudo apt-get install cuda-8-0
echo now install cuda dnn
wget https://developer.nvidia.com/compute/machine-learning/cudnn/secure/v6/prod/8.0_20170307/cudnn-8.0-linux-x64-v6.0-tgz
tar -xzvf cudnn-8.0-linux-x64-v6.0-tgz
sudo cp cuda/include/cudnn.h /usr/local/cuda/include
sudo cp cuda/lib64/libcudnn* /usr/local/cuda/lib64
sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
sudo apt-get install libcupti-dev
echo You must now update PATH and LIB_LD_PATH and install tensorflow-gpu
| true
|
5645f4ed7076e8c18c04c990178085c75cf1d693
|
Shell
|
dstokes/dotfiles
|
/local/.local/bin/tmux-status
|
UTF-8
| 838
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
ICON_MUSIC=ﱘ
# date string
STATUS=""
# battery percentage
S_BATT=$(pmset -g batt 2>&1 | awk -F ';' 'NR==2 {sub(/%/, "", $1); print $1;}' | cut -f2)
if [[ $S_BATT -lt 50 ]]; then
STATUS=" $S_BATT% |$STATUS"
fi
# current spotify track
S_ARTIST=$(osascript <<EOF
if application "Spotify" is running then
tell application "Spotify"
return artist of current track as string
end tell
end if
EOF)
S_TRACK=$(osascript <<EOF
if application "Spotify" is running then
tell application "Spotify"
return name of current track as string
end tell
end if
EOF)
if [[ -n "$S_ARTIST" ]]; then
if [[ ${#S_ARTIST} -ge 30 ]]; then
S_ARTIST="${S_ARTIST:0:23}.."
fi
if [[ ${#S_TRACK} -ge 30 ]]; then
S_TRACK="${S_TRACK:0:23}.."
fi
STATUS=" $ICON_MUSIC $S_ARTIST: $S_TRACK |$STATUS"
fi
printf "%s" "$STATUS"
| true
|
51e9524eb9e8f42ddd71832401644bb15dca6a59
|
Shell
|
jeongeun/mcsanc
|
/runall_lo.sh
|
UTF-8
| 1,187
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# input_Wm_e_50-100_lo.cfg input_Wp_e_50-100_lo.cfg
procId=${1}
if [ "$procId" == "" ]; then
echo "usage: $0 processId // processId = W+(e+ve: 101, m+vm: 102) W-( e-ve~:-101, m-vm~: -102)";
break;
fi
NameString=""
if [ "$procId" == "101" ]; then
NameString="Wp_e"
elif [ "$procId" == "-101" ]; then
NameString="Wm_e"
elif [ "$procId" == "102" ]; then
NameString="Wp_mu"
elif [ "$procId" == "-102" ]; then
NameString="Wm_mu"
elif [ "$procId" == "103" ]; then
NameString="Wp_tau"
elif [ "$procId" == "-103" ]; then
NameString="Wm_tau"
fi
#input_Wm_e_50-100_lo.cfg
Mtmins="50"
for Mtmin in $Mtmins
do
if [ $Mtmin -lt 80 ]; then
Mtmax=8000;
echo "START `date` ;"
echo "./makeinputLO.sh ${procId} $Mtmin $Mtmax ;"
echo "../src/mcsanc input_${NameString}_${Mtmin}-${Mtmax}_lo.cfg >& 1126_${NameString}_${Mtmin}.log ;"
cp -r /your/Working/Directory/ewparam.cfg .
source makeinputLO.sh ${procId} $Mtmin $Mtmax ; ../src/mcsanc input_${NameString}_${Mtmin}-${Mtmax}_lo.cfg >& 1126_${NameString}_${Mtmin}.log ;
echo "END `date` ; ---> ${NameString}_${Mtmin}-${Mtmax} LO Finish !!"
fi
done
| true
|
8e4d34c1d0fb2d9324157c3f92e8bc8c218cc216
|
Shell
|
nyh/osv
|
/scripts/build-vm-img
|
UTF-8
| 1,374
| 3.984375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
set -e
version=$(`dirname $0`/osv-version.sh)
name=$1
image=$2
description=$3
out=build/standalone/$name
build="make image=$image"
function usage()
{
echo "Usage: scripts/build-standalone-img [name] [image] [description]"
exit 1
}
if [ $# -ne 3 ];then
usage
fi
### Build image
echo "==> Building '$name'..."
$build
### Prepare output dir
rm -rf $out
mkdir -p $out
### Image for KVM
img=build/release/usr.img
hypervisor=kvm
format=qcow2
cp $img $out/$(basename $name)-$version.qemu.qcow2
### Image for VirtualBox
scripts/gen-vbox-ova.sh >/dev/null
img=build/release/osv.ova
hypervisor=vbox
format=ova
mv $img $out/$(basename $name)-$version.vbox.ova
### Image for Google Compute Engine
scripts/gen-gce-tarball.sh >/dev/null
img=build/release/osv.tar.gz
hypervisor=gce
format=tar.gz
mv $img $out/$(basename $name)-$version.gce.tar.gz
### Image for VMware Workstation
make osv.vmdk >/dev/null
scripts/gen-vmx.sh
cd build/release
zip osv-vmw.zip osv.vmx osv.vmdk >/dev/null
cd -
img=build/release/osv-vmw.zip
hypervisor=vmw
format=zip
mv $img $out/$(basename $name)-$version.vmw.zip
### Image for VMware ESXi
rm -f build/release/osv-esx.ova
ovftool build/release/osv.vmx build/release/osv-esx.ova >/dev/null
img=build/release/osv-esx.ova
hypervisor=esx
format=ova
mv $img $out/$(basename $name)-$version.esx.ova
echo "==> '$name' image built to '$out'."
| true
|
2e7b782a9601f1b01921d5bce3ccbdbf47373e38
|
Shell
|
hcsullivan12/sipm_wheel
|
/event_display/sipmwheel/config/setup.sh
|
UTF-8
| 1,897
| 3.53125
| 4
|
[] |
no_license
|
#!bin/bash
EVENT_DIS_CONFIGDIR="$( cd $( dirname $BASH_SOURCE[0]) && pwd)"
EVENT_DIS_BASEDIR="$( dirname $EVENT_DIS_CONFIGDIR)"
#echo $EVENT_DIS_CONFIG
#echo $EVENT_DIS_BASEDIR
# Get default PATH and PYTHONPATH
PYTHONPATH_backup=$PYTHONPATH
PATH_backup=$PATH
# Check for PyQt4
if ! $(python -c "import PyQt4" &> /dev/null); then
echo "PyQt4 failed to configure."
PYTHONPATH=$PYTHONPATH_backup
PATH=$PATH_backup
return
fi
# Check for ROOT
if [ `command -v rootcling` ]; then
export ROOT=1
else
if [[ -z `command -v rootcint` ]]; then
echo
echo Looks like you do not have ROOT installed.
echo You cannot use SiPM Wheel EVD w/o ROOT!
echo Aborting.
echo
return;
fi
fi
if [[ -z $ROOTSYS ]]; then
echo
echo Could not find \$ROOTSYS. Make sure PyROOT is
echo configured. You need to export \$PYTHONPATH to
echo include the dir where ROOT.py exists.
echo
else
if [[ ! ":$PYTHONPATH:" == *":$ROOTSYS/lib:"* ]]; then
export PYTHONPATH=$ROOTSYS/lib:$PYTHONPATH;
fi
fi
# Set sub directories
export EVENT_DIS_LIBDIR=$EVENT_DIS_BASEDIR/lib
export EVENT_DIS_COREDIR=$EVENT_DIS_BASEDIR/core
# Add bin to PATH
if [[ ! ":$PATH:" == *":$EVENT_DIS_BASEDIR/bin:"* ]]; then
export PATH=$EVENT_DIS_BASEDIR/bin:$PATH
fi
if [[ ! ":$PYTHONPATH:" == *":$EVENT_DIS_BASEDIR/bin:"* ]]; then
export PYTHONPATH=$EVENT_DIS_BASEDIR/bin:$PYTHONPATH
fi
# Set permissions
chmod +x $EVENT_DIS_BASEDIR/bin/sipm_wheel_evd.py
# Add lib to library
if [[ ! ":$LD_LIBRARY_PATH:" == *":$EVENT_DIS_LIBDIR:"* ]]; then
export LD_LIBRARY_PATH=$EVENT_DIS_LIBDIR:$LD_LIBRARY_PATH
fi
# Add python to PATH
if [[ ! ":$PATH:" == *":$EVENT_DIS_BASEDIR/python:"* ]]; then
export PATH=$EVENT_DIS_BASEDIR/python:$PATH
fi
if [[ ! ":$PYTHONPATH:" == *":$EVENT_DIS_BASEDIR/python:"* ]]; then
export PYTHONPATH=$EVENT_DIS_BASEDIR/python:$PYTHONPATH
fi
| true
|
ae78863ea1fe92f8e43a36cb56eccc4cce666a21
|
Shell
|
mahajrod/MAVR
|
/examples/desaminases/recombination/cluster_combined_sets.bash
|
UTF-8
| 1,406
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#SBATCH --array=1-9%9
#SBATCH -n 2
#SBATCH --time=100:00:00 # Run time in hh:mm:ss
#SBATCH --mem-per-cpu=4096 # Minimum memory required per CPU (in megabytes)
#SBATCH --job-name=clustering_sets
#SBATCH --error=/work/pavlov/okochenova/job_reports/RUN7/clustering_sets.%A_%a.err
#SBATCH --output=/work/pavlov/okochenova/job_reports/RUN7/clustering_sets.%A_%a.out
module load compiler/gcc/4.8 python/2.7
source /work/pavlov/okochenova/profile
SCRIPT=/work/pavlov/okochenova/soft/MACE/scripts/clustering_pipeline.py
WORKDIR=/work/pavlov/okochenova/combined_sets/
VCF_DIR=${WORKDIR}raw/
REFERENCE=/work/pavlov/okochenova/reference/LAN210/LAN210_v0.10m/LAN210_v0.10m.fasta
REFERENCE_ANNOTATIONS=/work/pavlov/okochenova/reference/LAN210/LAN210_v0.10m/merged_annotations_Nagalakshmi_tranf_to_LAN210_v0.10m.gff3
REFERENCE_MASKING=/work/pavlov/okochenova/reference/LAN210/LAN210_v0.10m/LAN210_v0.10m_masked_all_not_in_good_genes.gff
SAMPLE_SETS=(A1_D1 A1_D3 A1_D6 AID_D1 AID_D3 AID_D6 PmCDA1_D1 PmCDA1_D3 PmCDA1_D6)
cd ${WORKDIR}
SAMPLE_SET_INDEX=
let "SAMPLE_SET_INDEX=${SLURM_ARRAY_TASK_ID}-1"
CURRENT_SAMPLE_SET=${SAMPLE_SETS[${SAMPLE_SET_INDEX}]}
cd ${CURRENT_SAMPLE_SET}
echo ${CURRENT_SAMPLE_SET}
${SCRIPT} -r ${REFERENCE} -a ${REFERENCE_ANNOTATIONS} -m ${REFERENCE_MASKING} -f ${VCF_DIR}${CURRENT_SAMPLE_SET}_raw.vcf -s ${CURRENT_SAMPLE_SET} -y ${CURRENT_SAMPLE_SET}
| true
|
727bda10b1adadeb27d234132e92c01947bc684d
|
Shell
|
ballwood/node-sauce-connect
|
/acceptance/tests/testCAString.sh
|
UTF-8
| 992
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# remove old sauce connect
rm ./lib/sc
# start proxy in bg
node ./acceptance/support/self-signed-https-server.js &
SSL_PROXY_PID=$!
echo "SSL reverse proxy started on 8081 PID: ${SSL_PROXY_PID}"
export SAUCECONNECT_CDNURL=https://localhost:8081/downloads
CA_STRING=$(awk '{printf "%s\\n", $0}' ./acceptance/support/keys/ca.crt)
# write to .npmrc as bash parses newlines
echo ca=\"$CA_STRING\" > .npmrc
# install sc
npm install
unset SAUCECONNECT_CDNURL
unset CA_STRING
rm .npmrc
# ps -p Checks if the process is still running. If it is it returns 0,
# otherwise it returns 1
ps -p $SSL_PROXY_PID > /dev/null
SSL_PROXY_TASK_RUNNING=$?
# check if the process is still running by examining the exit code of ps -p
CA_TEST_RESULT=1
if [ $SSL_PROXY_TASK_RUNNING -eq 1 ]; then
# not running, so has been hit.
echo "SSL proxy finished, test passed"
CA_TEST_RESULT=0
else
echo "SSL proxy not finished, test failed"
kill $SSL_PROXY_PID
fi
exit $CA_TEST_RESULT
| true
|
f5a37a0cf37a34e3ae2d9a568c27d6f8e3fa88a3
|
Shell
|
borodust/alien-works-android-runtime
|
/alien-works-android/ecl/build-asdf-system.sh
|
UTF-8
| 821
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
WORK_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. $WORK_DIR/utils/build-common.sh
OPTIONAL_PARAMS=
if [[ -z "$NDK" ]]; then
echo "Path to Android NDK must be provided via --ndk"
exit 1
fi
if [[ -z "$TARGET_ARCH" ]]; then
echo "Target arch not specified via --target option, using aarch64"
TARGET_ARCH=aarch64
fi
if [[ -z "$REST_ARGS" ]]; then
echo "ASDF system name not provided"
exit 1
fi
if [[ ! -z "$BUILD_DIR" ]]; then
OPTIONAL_PARAMS+="--build-dir $BUILD_DIR"
fi
$HOST_ECL --norc \
--shell "$WORK_DIR/utils/build-system.lisp" -- \
--ndk "$NDK" \
--ecl "$WORK_DIR/ecl-$TARGET_ARCH" \
--work-dir "$WORK_DIR" \
--lib-dir "$WORK_DIR/../lib/" \
--system "$REST_ARGS" $OPTIONAL_PARAMS
| true
|
63c64f65256af3ab47922a3d1869014b9f96d3a9
|
Shell
|
openshift/installer
|
/data/data/bootstrap/files/usr/local/bin/approve-csr.sh
|
UTF-8
| 470
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# shellcheck disable=SC1091 # using path on bootstrap machine
. /usr/local/bin/bootstrap-service-record.sh
KUBECONFIG="${1}"
echo "Approving all CSR requests until bootstrapping is complete..."
while [ ! -f /opt/openshift/.bootkube.done ]
do
oc --kubeconfig="$KUBECONFIG" get csr --no-headers | grep Pending | \
awk '{print $1}' | \
xargs --no-run-if-empty oc --kubeconfig="$KUBECONFIG" adm certificate approve
sleep 20
done
| true
|
d2224d295b7001a50e0c8d023278ad84f503904b
|
Shell
|
guziqiu/qiu
|
/guziqiu/01.linux基础/test.sh
|
UTF-8
| 547
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
read L
read M
temp=0
left=L
right=L
for (( i=1; i <= M; i++ ));do
read a
read b
if [[ $a -gt $left ]];then
if [[ $a -lt $right ]];then
if [[ $a -lt $left ]];then
$left=$a
fi
fi
else
$left=0
fi
if [[ $b -gt $left ]];then
if [[ $b -lt $right ]];then
if [[ $b -gt $right ]];then
$right=$b
fi
fi
else
$right=$L
fi
done
temp=$[ $right - $left ]
temp=$[ $L - $temp ]
echo $temp
echo "asd"
| true
|
60d1d869c9f940eacb3f461ecfc438898d573ffc
|
Shell
|
dacap/compare-text-test
|
/test.sh
|
UTF-8
| 333
| 3.546875
| 4
|
[] |
no_license
|
#! /bin/sh
function expect() {
if [[ $1 != "$($2 | tr -d "\r")" ]] ; then
echo "FAILED: $2"
echo "EXPECTED: $1"
echo "RESULT: $($2)"
return 1
else
return 0
fi
}
# abc == abc
if expect "a
b
c" ./abc ; then
echo OK
else
exit 1
fi
# abb != abc
if ! expect "a
b
b" ./abc ; then
echo OK
else
exit 1
fi
| true
|
2d8dbc783517ac8951ebbd99c7e378f9fe9519d9
|
Shell
|
kramernathan/iPhone-CLI-Ctrl
|
/iPhoneControl.sh
|
UTF-8
| 2,664
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
grn=$'\e[1;32m'
red=$'\e[1;31m'
white=$'\e[0m'
blu=$'\e[1;34m'
printf "$grn \n\n"
printf " ╭━━━━━━━━━╮\n"
printf " ┃━━●━══━━━┃\n"
printf " ┃█████████┃\n"
printf " ┃█████████┃\n"
printf " ┃█████████┃\n"
printf " ┃█████████┃\n"
printf " ┃█████████┃\n"
printf " ┃█████████┃\n"
printf " ┃━━━━○━━━━┃\n"
printf " ╰━━━━━━━━━╯\n\n"
printf " _/ _/_/_/ _/ _/_/_/ _/ _/_/_/ _/_/_/ _/ _/ \n"
printf " _/ _/ _/_/_/ _/_/ _/_/_/ _/_/ _/ _/ _/ _/ _/_/_/_/ _/ _/_/ _/ \n"
printf " _/ _/_/_/ _/ _/ _/ _/ _/ _/ _/_/_/_/ _/ _/ _/ _/ _/ _/_/ _/ \n"
printf " _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ \n"
printf "_/ _/ _/ _/ _/_/ _/ _/ _/_/_/ _/_/_/ _/_/_/_/ _/_/_/ _/_/_/ _/_/ _/ _/ \n"
printf "$white \n\n"
printf "List of options:\n\n"
printf "$red 1 ┃ $white 💡 Flashlight on\n\n"
printf "$red 2 ┃ $white 🚫 Flaslight off\n\n"
printf "$red 3 ┃ $white 🔐 Lock phone\n\n"
printf "$red 4 ┃ $blu ▶$white Play music\n\n"
printf "$red 5 ┃ $white ⏸ Pause music\n\n"
printf "$red 6 ┃ $white 👋 Say hello\n\n"
printf "$red 7 ┃ $white 🎭 Tell a joke\n\n"
printf "$red 8 ┃ $white ⛅ Show current weather\n\n"
printf "$red 9 ┃ $white 🔍 Find iPhone\n\n"
printf "Enter option: "
read option
mymessage="."
case $option in
1)
mymessage="lighton"
;;
2)
mymessage="lightoff"
;;
3)
mymessage="lock5582"
;;
4)
mymessage="playmusic"
;;
5)
mymessage="pausemusic"
;;
6)
mymessage="sayhello"
;;
7)
mymessage="telljoke"
;;
8)
mymessage="tellweather"
;;
9)
mymessage="[showlocation]"
printf "\n Location Services have been turned on.\n"
printf " Click here to show the location of your iPhone and enable Lost Mode:$red https://www.icloud.com/#find $white \n\n"
;;
*)
printf "$red Invalid option, re-enter your choice\n\n"
sleep 2.5s
# /path/to/./iPhoneControl.sh
esac
printf "$grn Sending..."
echo "$mymessage" | mail # 1234567890@text.att.net
printf " Sent\n\n"
sleep 3.0s
# /path/to/./iPhoneControl.sh
| true
|
d0e63071f42f03818ce2090bbb4a5dfd7da2c183
|
Shell
|
kristurner97/photoshare
|
/install
|
UTF-8
| 585
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Make sure you are running this with Sudo privilages."
read -e -p "Enter the install: " -i "/var/www/html" FILEPATH;
sudo mkdir $FILEPATH
sudo mv files/* $FILEPATH/
cd $FILEPATH
sudo chmod 0755 * -R #Default Permissions
sudo chown :www-data uploads -R #This is where the server uploads files and therefore needs write access
sudo chmod 0770 uploads -R #This is where the server uploads files and therefore needs write access
sudo chown :www-data gallery/resources/cache
sudo chmod 0770 gallery/resources/cache
| true
|
f2c33adcafdebcac81c7da8b90cdb85f3f081626
|
Shell
|
BennyStrobes/ipsc_cardiomyocyte_differentiation
|
/dynamic_eqtl_calling/tissue_specific_chrom_hmm_enrichment_analysis.sh
|
UTF-8
| 1,508
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --time=10:00:00 --partition=broadwl --mem=5GB
parameter_string="$1"
real_eqtl_results_file="$2"
significant_egene_file="$3"
num_permutations="$4"
threshold="$5"
chrom_hmm_input_dir="$6"
time_step_independent_stem="$7"
model_version="$8"
tissue_specific_chrom_hmm_enrichment_dir="$9"
hits_versions=( "early_time_step_hits" "late_time_step_hits" "change_in_sign_hits")
for hits_version in "${hits_versions[@]}"; do
#######################
marker_type="enhancer"
#######################
cell_line_version="heart_only_cell_lines"
mod_parameter_string=$parameter_string"_"$marker_type"_"$cell_line_version"_"$hits_version"_"$num_permutations"_"$threshold
python perform_tissue_specific_chrom_hmm_enrichment_analysis.py $marker_type $cell_line_version $num_permutations $chrom_hmm_input_dir $real_eqtl_results_file $significant_egene_file $time_step_independent_stem $tissue_specific_chrom_hmm_enrichment_dir$mod_parameter_string"_subsample1" $hits_version $model_version $threshold
cell_line_version="ipsc_only_cell_lines"
mod_parameter_string=$parameter_string"_"$marker_type"_"$cell_line_version"_"$hits_version"_"$num_permutations"_"$threshold
python perform_tissue_specific_chrom_hmm_enrichment_analysis.py $marker_type $cell_line_version $num_permutations $chrom_hmm_input_dir $real_eqtl_results_file $significant_egene_file $time_step_independent_stem $tissue_specific_chrom_hmm_enrichment_dir$mod_parameter_string"_subsample1" $hits_version $model_version $threshold
done
| true
|
30f0313e1b3bb4c3d2d144478af20c94dc16c101
|
Shell
|
c3ph3us/DistributedAuctions
|
/deploy.sh
|
UTF-8
| 213
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
NODES=("ssd@51.103.29.195" "ssd2@20.199.110.1" "ssd3@20.199.104.254")
for NODE in "${NODES[@]}"; do
echo "Deploying to the node: $NODE"
scp ./build/libs/DistributedAuctions-*-all.jar "$NODE:"
done
| true
|
535781c00fa4931b42e320ea03005cf8756ede54
|
Shell
|
smokitchevsky/Unix-wokbench-assignment
|
/guessinggame.sh
|
UTF-8
| 630
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
function checkRange {
if ! [[ $input =~ $num_checker ]]
then
echo "\`$input\` is not a number" >&2;
return 255
elif [[ $input -gt $num_of_files ]]
then
echo "The number you entered is too big."
return 1
elif [[ $input -lt $num_of_files ]]
then
echo "The number you entered is too small."
return 2
else
echo "Congratulations! You guessed it!"
fi
return 0
}
num_of_files=$(ls -Al | grep -c "^[l-]")
echo "How many files are in the current directory?"
num_checker='^[0-9]+$' #regex used to check if the user input is a number
while :
do
read input
checkRange
[[ $? -eq 0 ]] && break
done
| true
|
62689127e7f3b18171c97450afb0eb58d5b46db1
|
Shell
|
elixircist23/BreadthDepthBash
|
/create.sh
|
UTF-8
| 780
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
function breadth_first() {
local depth=$1
local breadth=$2
local path=("")
for((i=0;i<depth;i++)); do
for x in "${path[@]}"; do
temp=""
for((j=0;j<breadth;j++)); do
mkdir -p "$3"$x/$j
temp=(${temp[@]} $x/$j)
done;
done;
path=(${temp[@]})
done;
}
function depth_first() {
local depth=$1
local breadth=$2
local breadth_changed=0
local path=$3
((depth--))
if [ "$depth" -lt 0 ]; then
return
else
while [ "$breadth_changed" -lt "$breadth" ]; do
mkdir "$path"/$breadth_changed
depth_first "$depth" "$breadth" "$path"/"$breadth_changed"
((breadth_changed++))
done
fi
}
if [[ "$4" == "depth" ]]; then
depth_first "$1" "$2" "$3"
fi
if [[ "$4" == "breadth" ]]; then
breadth_first "$1" "$2" "$3"
fi
| true
|
af4064ca2330355180896209b81f3d6a785fc55c
|
Shell
|
elzoona/LastfmPlaycount
|
/install.sh
|
UTF-8
| 723
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# install schema
sudo cp ./org.gnome.rhythmbox.plugins.lastfmplaycount.gschema.xml /usr/share/glib-2.0/schemas/
sudo glib-compile-schemas /usr/share/glib-2.0/schemas/
if [[ $EUID -eq 0 ]]; then
echo "Installing system-wide (running as root)"
mkdir -p /usr/lib/rhythmbox/plugins/lastfmplaycount
cp * /usr/lib/rhythmbox/plugins/lastfmplaycount
if [ -d /usr/share/rhythmbox/plugins ]; then
mkdir -p /usr/share/rhythmbox/plugins/lastfmplaycount
cp *.ui /usr/share/rhythmbox/plugins/lastfmplaycount
fi
else
echo "Installing for the current user only"
mkdir -p ~/.local/share/rhythmbox/plugins/lastfmplaycount
cp * ~/.local/share/rhythmbox/plugins/lastfmplaycount
fi
| true
|
3e121fd7a5375661e698dae808f3773c35f60641
|
Shell
|
hoskeri/dotfiles
|
/builders/update-terraform-ls
|
UTF-8
| 636
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash -eux
version="0.29.2"
filename="terraform-ls_${version}_linux_amd64"
builduser="${TRFM_LS_BUILDUSER:-$USER}"
self="$(readlink -f "$0")"
if [ "$(id -u "$builduser")" != "$UID" ]
then
exec sudo -H -u "$builduser" "$self"
fi
url="https://github.com/hashicorp/terraform-ls/releases/download/v${version}/${filename}.zip"
bindir="${HOME}/bin"
webdir="${HOME}/.cache/update-terraform-ls"
etag="${webdir}/${filename}.etag"
archive="${webdir}/${filename}.zip"
mkdir -p "${bindir}" "${webdir}"
curl -sSfLo "${archive}" --etag-save "${etag}" --etag-compare "${etag}" "$url"
unzip -o "${archive}" -d "${bindir}" "terraform-ls"
| true
|
1228bcc1a750459ce817c9cc15ea5d4b076fc811
|
Shell
|
aidecoe/scripts
|
/etc/acpi/actions/my_lid.sh
|
UTF-8
| 1,211
| 3.921875
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# Requires:
# - get_process_user.sh (which can be found in this repository)
# - xscreensaver
# - pm-utils
LOGP="LID closed"
lock_screen() {
local user="$(/usr/local/bin/get_process_user.sh xscreensaver)"
local lock="/usr/bin/xscreensaver-command -lock"
if [[ ! $user ]]; then
logger -p auth.warning "$LOGP: xscreensaver is not running"
return 1
fi
if su "$user" -c "$lock" ; then
logger -p auth.info "$LOGP: successfully locked screen for user $user"
return 0
else
logger -p auth.err "$LOGP: locking screen failed!"
return 1
fi
}
is_on_battery() {
local online="$(</sys/class/power_supply/ACAD/online)"
[[ $online = 0 ]]
}
pm_suspend() {
if ! /usr/sbin/pm-suspend; then
logger -p user.err "$LOGP: pm-suspend failed!"
return 1
fi
}
on_close() {
if lock_screen; then
if is_on_battery; then
logger -p user.info "$LOGP: laptop on battery - suspending..."
pm_suspend
else
logger -p user.info "$LOGP: laptop on AC - keeping active"
fi
fi
}
if [[ $2 = LID ]]; then
case "$3" in
close) on_close ;;
esac
fi
| true
|
b70235b0e40b41716e3a691e7e2d347eac43f0d3
|
Shell
|
ezequielsp/box
|
/install.sh
|
UTF-8
| 2,271
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "--- Good morning, master. Let's get to work. Installing now. ---"
sudo locale-gen pt_BR.UTF-8
echo "--- Updating packages list ---"
sudo apt-get update
echo "--- MySQL time ---"
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password password pass'
sudo debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password pass'
echo "--- Installing base packages ---"
sudo apt-get install -y vim curl python-software-properties
echo "--- Package For PHP 5.6 ---"
sudo add-apt-repository -y ppa:ondrej/php5-5.6
echo "--- Updating packages list ---"
sudo apt-get update
echo "--- Installing PHP-specific packages ---"
sudo apt-get install -y php5 apache2 libapache2-mod-php5 php5-curl php5-gd php5-mcrypt php5-intl mysql-server-5.5 php5-mysql php5-sqlite git-core
echo "--- Installing Node and Npm ---"
sudo apt-get install -y nodejs npm curl openssl
echo "--- Installing and configuring Xdebug ---"
sudo apt-get install -y php5-xdebug
cat << EOF | sudo tee -a /etc/php5/mods-available/xdebug.ini
xdebug.scream=1
xdebug.cli_color=1
xdebug.show_local_vars=1
EOF
echo "--- Enabling mod-rewrite ---"
sudo a2enmod rewrite
echo "--- What developer codes without errors turned on? Not you, master. ---"
sudo sed -i "s/error_reporting = .*/error_reporting = E_ALL/" /etc/php5/apache2/php.ini
sudo sed -i "s/display_errors = .*/display_errors = On/" /etc/php5/apache2/php.ini
echo "-- Configure Apache"
sudo sed -i 's/AllowOverride None/AllowOverride All/' /etc/apache2/apache2.conf
sudo sed -i 's/DocumentRoot \/var\/www\/html/DocumentRoot \/var\/www/' /etc/apache2/sites-enabled/000-default.conf
echo "--- Composer is the future. But you knew that, did you master? Nice job. ---"
curl -sS https://getcomposer.org/installer | php
sudo mv composer.phar /usr/local/bin/composer
# Enable Swaping Memory
sudo /bin/dd if=/dev/zero of=/var/swap.1 bs=1M count=1024
sudo /sbin/mkswap /var/swap.1
sudo /sbin/swapon /var/swap.1
echo "-- Clonando Dev --"
git clone https://github.com/ezequielsp/dev.git
cd dev
sudo mv dev /usr/local/bin/
cd ..
echo "-- Removendo diretorio --"
rm -rf dev
sudo chmod +x /usr/local/bin/dev
echo "--- Restarting Apache ---"
sudo service apache2 restart
cd /var/www
php -v
| true
|
b1045f19759b69df96edf9148686799e79705167
|
Shell
|
st-mark-reformed/stmarkreformed.com
|
/bin/docker-functions/docker-sync.sh
|
UTF-8
| 937
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source ../../dev 2> /dev/null;
function docker-sync-help() {
printf "(Syncs production database and content to local environment)";
}
function docker-sync() {
docker-compose -f docker-compose.sync.to.local.yml -p stmark-ssh up -d;
docker exec stmark-ssh bash -c "chmod +x /opt/project/docker/scripts/sync-from-prod-01-ssh.sh;";
docker exec stmark-ssh bash -c "/opt/project/docker/scripts/sync-from-prod-01-ssh.sh;";
docker exec stmark-db bash -c "chmod +x /opt/project/docker/scripts/sync-from-prod-02-db.sh;";
docker exec stmark-db bash -c "/opt/project/docker/scripts/sync-from-prod-02-db.sh;";
docker exec stmark-ssh bash -c "chmod +x /opt/project/docker/scripts/sync-from-prod-03-rsync.sh;";
docker exec stmark-ssh bash -c "/opt/project/docker/scripts/sync-from-prod-03-rsync.sh;";
docker-compose -f docker-compose.sync.to.local.yml -p stmark-ssh down;
return 0;
}
| true
|
3e2d54a88e3601dbef5bacacb08b8e8df7e2377a
|
Shell
|
dlxsvip/aes2.0
|
/build/start.sh
|
UTF-8
| 1,654
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
echo -e "\033[32m-----------------------------\033[0m"
echo -e "\033[32m【-e】: 加密 \033[0m"
echo -e "\033[32m【-d】: 解密 \033[0m"
echo -e "\033[32m【 q】: 退出 \033[0m"
echo -e "\033[32m-----------------------------\033[0m"
encryptFun(){
read -p "加密:" txt
if [ "$txt" == "" ];then
echo ""
elif [[ "$txt" == "q" || "$txt" == "-q" ]];then
exit 0
elif [ "$txt" == "-e" ];then
echo ""
elif [ "$txt" == "-d" ];then
dncryptFun
elif [ "$txt" == "-r" ];then
randomFun
else
arr=("$txt")
# $txt 不加引号是数组,加引号 "$txt" 是一个字符串
java -jar `dirname $0`/lib/aes.jar "-e" $txt
fi
encryptFun
}
dncryptFun(){
read -p "解密:" txt
if [ "$txt" == "" ];then
echo ""
elif [[ "$txt" == "q" || "$txt" == "-q" ]];then
exit 0
elif [ "$txt" == "-e" ];then
encryptFun
elif [ "$txt" == "-d" ];then
echo ""
elif [ "$txt" == "-r" ];then
randomFun
else
# 或者使用下面的方式 转成数组
arr=("$txt")
java -jar `dirname $0`/lib/aes.jar "-d" ${arr[@]}
fi
dncryptFun
}
randomFun(){
read -p "随机数长度:" txt
if [ "$txt" == "" ];then
echo ""
elif [[ "$txt" == "q" || "$txt" == "-q" ]];then
exit 0
elif [ "$txt" == "-e" ];then
encryptFun
elif [ "$txt" == "-d" ];then
dncryptFun
elif [ "$txt" == "-r" ];then
echo ""
elif [ "$txt" -gt 0 ] 2>/dev/null ;then
java -jar `dirname $0`/lib/aes.jar "-r" "$txt"
else
echo -e "\033[31m请输入数字\033[0m"
fi
randomFun
}
case "$1" in
-e)
encryptFun ;;
-d)
dncryptFun ;;
-r)
randomFun ;;
*)
encryptFun ;;
esac
| true
|
4173cf8bd8c2f013dadcf73574f78cfd4f48fba0
|
Shell
|
ngageoint/storyscapes
|
/docker/pgadmin/run.sh
|
UTF-8
| 1,410
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
# Load secrets as environment variables
for secret in /run/secrets/env_*; do
# Pattern matches with no results are treated as string literals.
# Verify that the file actually exists.
[ -f "$secret" ] || continue
set -a
. $secret
set +a
done
# Setup Postgres authentication
echo "$DATABASE_HOST:*:*:$DATABASE_USER:$DATABASE_PASSWORD" > ~/.pgpass
chmod 0600 ~/.pgpass
# Wait until Postgres is up
until psql -h $DATABASE_HOST -p $DATABASE_PORT -U $DATABASE_USER -c '\l' -P pager=off
do
>&2 echo "Postgres is unavailable - sleeping"
sleep 1
done
for i do # loop over $@
# Run init scripts on the DBs
if [ "$i" = "--init-db" ]; then
for f in $SCRIPTS/*.sql
do
dbname=`basename ${f%.*}`
# Create database using the name of the file
psql -v ON_ERROR_STOP=0 -h $DATABASE_HOST -p $DATABASE_PORT -U $DATABASE_USER -c "CREATE DATABASE $dbname;" || true
psql -v ON_ERROR_STOP=0 -h $DATABASE_HOST -p $DATABASE_PORT -U $DATABASE_USER -c "GRANT ALL PRIVILEGES ON DATABASE $dbname TO $DATABASE_USER;"
# Run the contents of the file in the database
psql -v ON_ERROR_STOP=0 -h $DATABASE_HOST -p $DATABASE_PORT -U $DATABASE_USER -d $dbname -f $f
done
fi
# Serve the pgadmin application
if [ "$i" = "--serve" ]; then
/docker-entrypoint.sh
fi
done
| true
|
67db87cbfeacaa2368cc3df0f89b82d64f340434
|
Shell
|
cha63506/haproxy-marathon
|
/docker-entrypoint.sh
|
UTF-8
| 3,903
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
REFRESH_TIMEOUT=${REFRESH_TIMEOUT:-60}
LISTEN=${LISTEN:-0.0.0.0}
HAPROXY_PID=/var/run/haproxy.pid
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
HAPROXY_CFG_TMP=/tmp/haproxy.cfg
LOGGER=${LOGGER:-logger -s -t haproxy}
function log {
echo "$@" | $LOGGER
}
function haproxy_start {
log "Starting haproxy process"
haproxy -f "$HAPROXY_CFG" -D -p "$HAPROXY_PID"
}
function haproxy_reload {
log "Reloading haproxy process"
haproxy -f "$HAPROXY_CFG" -D -p "$HAPROXY_PID" -sf $(cat "$HAPROXY_PID")
}
function haproxy_soft_cleanup {
PORTS=$(cat "$HAPROXY_CFG" | grep '^ *\<bind\>.*:' | sed -E 's/.*:(.+)/\1/' | sort -n | paste -s -d' ')
log "Cleaning up iptables rules for ports ${PORTS}"
for PORT in $PORTS; do
>&2 echo "Deleting iptables rule for port ${PORT}; please ignore 'Bad rule' error if it appears below"
while iptables -D INPUT -p tcp --dport $PORT --syn -j DROP; do true; done
done
}
function haproxy_soft_reload {
# Soft-reload haproxy, avoiding any broken connection attempts
# (see http://marc.info/?l=haproxy&m=133262017329084&w=2)
# (see also http://engineeringblog.yelp.com/2015/04/true-zero-downtime-haproxy-reloads.html)
PORTS=$(cat "$HAPROXY_CFG" | grep '^ *\<bind\>.*:' | sed -E 's/.*:(.+)/\1/' | sort -n | paste -s -d' ')
log "Haproxy soft reload, protecting ports ${PORTS}"
for PORT in $PORTS; do
iptables -I INPUT -p tcp --dport $PORT --syn -j DROP
done
sleep 0.5
haproxy_reload
haproxy_soft_cleanup
}
function config {
header
apps "$@"
}
function header {
cat <<EOF
global
log /dev/log local0 notice
maxconn 4096
defaults
log global
retries 3
maxconn 2000
timeout connect ${CONNECT_TIMEOUT:-11s}
timeout client ${CLIENT_TIMEOUT:-11m}
timeout server ${SERVER_TIMEOUT:-11m}
EOF
if [ ! -z "$ENABLE_STATS" ]
then
echo "listen stats"
if [ ! -z "$STATS_LISTEN_PORT" ]
then
echo " bind ${STATS_LISTEN_HOST:-127.0.0.1}:${STATS_LISTEN_PORT:-9090}"
fi
if [ ! -z "$STATS_LISTEN_PORT_INDEX" ]
then
echo " bind ${STATS_LISTEN_HOST:-127.0.0.1}:$(eval echo \${PORT$STATS_LISTEN_PORT_INDEX})"
fi
cat <<EOF
balance
mode http
stats enable
stats uri /
EOF
if [ ! -z "$STATS_USER" ] || [ ! -z "$STATS_PASSWORD" ]
then
echo " stats auth ${STATS_USER:-admin}:${STATS_PASSWORD:-admin}"
fi
fi
}
function apps {
(until curl -sSfLk -m 10 -H 'Accept: text/plain' "${1%/}"/v2/tasks; do [ $# -lt 2 ] && return 1 || shift; done) | while read -r txt
do
set -- $txt
if [ $# -lt 2 ]; then
shift $#
continue
fi
local app_name="$1"
local app_port="$2"
shift 2
for ignored_app in ${IGNORED_APPS}
do
if [ "${app_name}" = "${ignored_app}" ]; then
shift $#
continue 2
fi
done
if [ ! -z "${app_port##*[!0-9]*}" ]
then
cat <<EOF
listen ${app_name}-${app_port}
bind ${LISTEN}:${app_port}
mode tcp
option tcplog
balance leastconn
EOF
while [[ $# -ne 0 ]]
do
out " server ${app_name}-$# $1 check"
shift
done
fi
done
}
function msg { out "$*" >&2 ;}
function err { local x=$? ; msg "$*" ; return $(( $x == 0 ? 1 : $x )) ;}
function out { printf '%s\n' "$*" ;}
if [ "$#" -lt 1 ]; then
echo "USAGE: $0 <marathon_masters>"
exit 1
fi
mkdir -p $(dirname "$HAPROXY_CFG")
config "$@" > "$HAPROXY_CFG"
if [ ! -z "$HAPROXY_SOFT_RELOAD" ]; then
haproxy_soft_cleanup
fi
haproxy_start
log "Haproxy started."
while true
do
sleep "$REFRESH_TIMEOUT"
config "$@" > "$HAPROXY_CFG_TMP"
if ! diff -q "$HAPROXY_CFG_TMP" "$HAPROXY_CFG" >/dev/null
then
cp "$HAPROXY_CFG_TMP" "$HAPROXY_CFG"
log "Haproxy config changed, reloading"
if [ ! -z "$HAPROXY_SOFT_RELOAD" ]; then
haproxy_soft_reload
else
haproxy_reload
fi
log "Haproxy restarted."
fi
done
| true
|
7267239e60e2f793f59380c7c6d02fd22ee81309
|
Shell
|
waringid/zeronokubernetes
|
/07-3-enable-kubelet.sh
|
UTF-8
| 2,882
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
STUDYK8S_PATH=`dirname \`readlink -f $0\``
source $STUDYK8S_PATH/07-0-env-node.sh
## cat > kubelet-conf.yaml << EOF
## kind: KubeletConfiguration
## apiVersion: kubelet.config.k8s.io/v1beta1
## KubeletConfiguration:
## Address: "${SELFS_IP}"
## ClusterDNS: "${CLUSTER_DNS_SVC_IP}"
## ClusterDomain: "${CLUSTER_DNS_DOMAIN}"
## HairpinMode: "promiscuous-bridge"
## SerializeImagePulls: false
## EOF
## cat > ${TEMPDIR_NODE_SIDE}/kubelet.config.json << EOF
## {
## "kind": "KubeletConfiguration",
## "apiVersion": "kubelet.config.k8s.io/v1beta1",
## "authentication": {
## "x509": {
## "clientCAFile": "/etc/kubernetes/ssl/ca.pem"
## },
## "webhook": {
## "enabled": true,
## "cacheTTL": "2m0s"
## },
## "anonymous": {
## "enabled": false
## }
## },
## "authorization": {
## "mode": "Webhook",
## "webhook": {
## "cacheAuthorizedTTL": "5m0s",
## "cacheUnauthorizedTTL": "30s"
## }
## },
## "address": "${SELFS_IP}",
## "port": 10250,
## "readOnlyPort": 0,
## "cgroupDriver": "cgroupfs",
## "hairpinMode": "promiscuous-bridge",
## "serializeImagePulls": false,
## "featureGates": {
## "RotateKubeletClientCertificate": true,
## "RotateKubeletServerCertificate": true
## },
## "clusterDomain": "${CLUSTER_DNS_DOMAIN}",
## "clusterDNS": ["${CLUSTER_DNS_SVC_IP}"]
## }
## EOF
cat > ${TEMPDIR_NODE_SIDE}/kubelet.config.json << EOF
{
"kind": "KubeletConfiguration",
"apiVersion": "kubelet.config.k8s.io/v1beta1",
"address": "${SELFS_IP}",
"cgroupDriver": "cgroupfs",
"hairpinMode": "promiscuous-bridge",
"serializeImagePulls": false,
"clusterDomain": "${CLUSTER_DNS_DOMAIN}",
"clusterDNS": ["${CLUSTER_DNS_SVC_IP}"]
}
EOF
cat > ${TEMPDIR_NODE_SIDE}/kubelet.service <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=${PREFIX_PATH}/bin/kubelet \\
--config=${TEMPDIR_NODE_SIDE}/kubelet.config.json \\
--hostname-override=${SELFS_IP} \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--cert-dir=/etc/kubernetes/ssl \\
--logtostderr=true \\
--fail-swap-on=false \\
--v=2
Restart=no
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
cp ${TEMPDIR_NODE_SIDE}/kubelet.service /etc/systemd/system/kubelet.service
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
#systemctl status kubelet --no-pager -l
| true
|
898b0208280f3ee70a7014d46bb8791efa7e9172
|
Shell
|
petronny/aur3-mirror
|
/arch-remaster/PKGBUILD
|
UTF-8
| 889
| 3.171875
| 3
|
[] |
no_license
|
# Maintainer: TADOKORO Saneyuki <saneyan@mail.gfunction.com>
pkgname=arch-remaster
pkgver=0.1.0
pkgrel=2
pkgdesc='Remastering Arch Linux ISO'
url='https://github.com/Saneyan/arch-remaster'
arch=('i686' 'x86_64')
license=('MIT')
depends=('xorriso' 'squashfs-tools' 'arch-install-scripts' 'zsh')
optdepends=('aufs3')
makedepends=('git')
_gitsrc='git://github.com/Saneyan/arch-remaster.git'
_gitname='arch-remaster'
package() {
msg 'Connection to GIT server...'
if [ -d "${_gitname}" ] ; then
cd $_gitname && git pull origin
else
cd $srcdir && git clone $_gitsrc $_gitname
cd $_gitname
fi
msg 'Installing Arch Remaster...'
install -d -m755 $pkgdir/opt/arch-remaster
for i in 'bin' 'subsets' 'subcmds' 'files' 'chroot'; do
cp --recursive --verbose ./$i $pkgdir/opt/arch-remaster/$i
done
install -d -m755 $pkgdir/usr/bin
ln -s /opt/arch-remaster/bin/arch-remaster $pkgdir/usr/bin
}
| true
|
7b62fa452bc5a9a38aaed013e82ad4ff5d7116b2
|
Shell
|
ranjithmani/bash-hub
|
/create-dir.sh
|
UTF-8
| 647
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
#Description : To create dirctory structure for copying the logs from the induvidual server
#USAGE : ./create-dir.sh
#Created on : 05-May-2015
#Dependency : create a server.txt file in the script home directory and add server names to which you want create directory
# NOTE: please run this script prior to file-push.sh
#
for i in `cat server.txt`
do
cd /archive
if [ ! -d $i ]
then
mkdir $i
fi
cd $i
CY=`date | awk '{print $6}'`
if [ ! -d $CY ]
then
mkdir $CY
fi
cd $CY
CM=`date | awk '{print $2}'`
if [ ! -d $CM ]
then
mkdir $CM
fi
cd $CM
CD=`date +%D | awk -F / '{print $1}'`
if [ ! -d $CD ]
then
mkdir $CD
fi
done
| true
|
30a30a8ea4f40ddea3c0e86bfddd277b3552a815
|
Shell
|
RadPad/docker-radpad-scripts
|
/radpad.sh
|
UTF-8
| 711
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
# A list of helper aliases to work with Docker and Rails
# There may be multiple containers that match the pattern
# so only query for the first match
#export DOCKER_CONTAINER="`docker ps | grep radpad/ | awk 'NR==1{ print $1 }'`"
CONTAINER_REGEX='radpad\/(dev|stg|prod)\-radpad\-(api|job|web)\:'
export DOCKER_CONTAINER="`docker ps | egrep ${CONTAINER_REGEX} | awk 'NR==1{ print $1 }'`"
alias rpexec="docker exec -it $DOCKER_CONTAINER"
alias rpexec_notty="docker exec $DOCKER_CONTAINER"
alias rplogs="docker logs -f $DOCKER_CONTAINER"
alias rpbash="rpexec bash -l"
alias rprun="rpexec bash -lc"
alias rpmigrate='rprun "bundle exec rake db:migrate"'
alias rpconsole='rprun "bundle exec rails c"'
| true
|
57e2e0fd8dc168cb805b1c9abd73c0002e5b9d1d
|
Shell
|
masa0x80/dotfiles.append
|
/.zsh/completion.zsh
|
UTF-8
| 1,917
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
# 補完
autoload -U compinit; compinit # 補完を有効に
setopt menu_complete # 補完候補が複数ある時、一覧表示 (auto_list) せず、すぐに最初の候補を補完する
setopt list_types # auto_list の補完候補一覧で、ls -F のようにファイルの種別をマーク表示
setopt always_last_prompt # 補完候補を一覧表示
setopt auto_list # 補完候補をリストアップ
setopt auto_menu # <Tab>で補完
setopt auto_param_keys # カッコ対応などを自動で補完
setopt auto_param_slash # 最後がディレクトリー名の場合に / を追加
setopt mark_dirs # ファイル名の展開でディレクトリにマッチした場合末尾に / を付加する
setopt magic_equal_subst # コマンドラインの引数で --prefix=/usr などの = 以降でも補完する
setopt brace_ccl # {a-c} を a b c に展開する
setopt correct # スペルミスを訂正する
zstyle ':completion:*:default' menu select=2 # 補完候補をカーソルで選べるように; 候補が2つ以上の場合は即補完
zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}' '+m:{A-Z}={a-z} r:|[-_.]=**' # -_. の前は末尾に * を付けていい感じに補完する
# 色設定
if (( $+commands[dircolors] )); then
eval $(dircolors $HOME/.zsh/misc/dircolors.ansi-dark)
elif (( $+commands[gdircolors] )); then
eval $(gdircolors $HOME/.zsh/misc/dircolors.ansi-dark)
fi
if [ -n "$LS_COLORS" ]; then
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
fi
# Source: https://github.com/junegunn/fzf/blob/master/shell/completion.zsh
load_file $HOME/.zsh/misc/fzf_completion.zsh
# Source: https://github.com/simonwhitaker/gibo/blob/master/gibo-completion.zsh
load_file $HOME/.zsh/misc/gibo-completion.zsh
| true
|
425c25d6212d8684a0b1926e3d1844663308cae6
|
Shell
|
mgraczyk/HomeConfig
|
/scripts/serv
|
UTF-8
| 146
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
if command -v webfsd &> /dev/null
then
port=${1:-8000}
webfsd -F -r . -p $port -d -f index.html
else
python3 -m http.server $@
fi
| true
|
e1fb56240a1923b3fe017b65b75ab22ec8f3570a
|
Shell
|
SharatMehrotra/TheUnixWorkbench
|
/guessinggame.sh
|
UTF-8
| 473
| 3.546875
| 4
|
[] |
no_license
|
function guessgame()
{
echo "Enter your guess: "
read guess
ans=$(ls -l |grep "^-"|wc -l)
echo $ans
while [[ $guess -ne $ans ]]
do
if [[ $guess -gt $ans ]]
then
echo "Your guess is too high. Enter new guess"
read guess
elif [[ $guess -lt $ans ]]
then
echo "Your guess is too low. Enter new guess"
read guess
fi
done
echo "Your guess is correct!"
echo "The number of files is $guess"
}
guessgame
| true
|
ba1895967cdbd309fe2952669cc841ebb8c34213
|
Shell
|
bigklopp/Shell_git
|
/array_for1.sh
|
UTF-8
| 239
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
row1=("1" "2" "3" "4" "5")
row2=("6" "7" "8" "9" "10")
records=(row1 row2)
for record in ${records[*]}
do
row="$record[*]"
echo "Row --- " $record
for col in ${!row}
do
echo $record $col
done
done
| true
|
d72c2110a7b3ed85db3fe8214c24b54c2e3641fd
|
Shell
|
globus/globus-toolkit
|
/common/source/scripts/globus-user-env.sh.in
|
UTF-8
| 4,273
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#
# Copyright 1999-2010 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# source this file to properly set up your environment for globus applications
# this requires that GLOBUS_LOCATION be set.
# GLOBUS_PATH will be set by this script to save the current location,
# should you decide to change GLOBUS_LOCATION to a different location and
# re source this script, the old GLOBUS_PATH information will be removed from
# your environment before applying the new GLOBUS_LOCATION
#
prefix="${GLOBUS_LOCATION-@prefix@}"
exec_prefix="@exec_prefix@"
sbindir="@sbindir@"
bindir="@bindir@"
includedir="@includedir@"
datarootdir="@datarootdir@"
datadir="@datadir@"
libexecdir="@libexecdir@"
for arch_specific in "${libexecdir}/globus-script-initializer."*; do
if [ -f "${arch_specific}" ]; then
. "${arch_specific}"
if [ -d "$libdir" ]; then
libdirs="${libdirs:+$libdirs }$libdir"
fi
fi
done
sysconfdir="@sysconfdir@"
sharedstatedir="@sharedstatedir@"
localstatedir="@localstatedir@"
perlmoduledir="@perlmoduledir@"
if [ -z "${GLOBUS_LOCATION}" ]; then
GLOBUS_LOCATION="$prefix"
fi
if [ -n "${GLOBUS_PATH}" ]; then
PATH=`echo "${PATH}" | sed -e "s%:${GLOBUS_PATH}[^:]*%%g" -e "s%^${GLOBUS_PATH}[^:]*:\{0,1\}%%"`
LD_LIBRARY_PATH=`echo "${LD_LIBRARY_PATH}" | sed -e "s%:${GLOBUS_PATH}[^:]*%%g" -e "s%^${GLOBUS_PATH}[^:]*:\{0,1\}%%"`
DYLD_LIBRARY_PATH=`echo "${DYLD_LIBRARY_PATH}" | sed -e "s%:${GLOBUS_PATH}[^:]*%%g" -e "s%^${GLOBUS_PATH}[^:]*:\{0,1\}%%"`
LIBPATH=`echo "${LIBPATH}" | sed -e "s%:${GLOBUS_PATH}[^:]*%%g" -e "s%^${GLOBUS_PATH}[^:]*:\{0,1\}%%"`
SHLIB_PATH=`echo "${SHLIB_PATH}" | sed -e "s%:${GLOBUS_PATH}[^:]*%%g" -e "s%^${GLOBUS_PATH}[^:]*:\{0,1\}%%"`
if [ -n "${MANPATH}" ]; then
MANPATH=`echo "${MANPATH}" | sed -e "s%:${GLOBUS_PATH}[^:]*%%g" -e "s%^${GLOBUS_PATH}[^:]*:\{0,1\}%%"`
fi
if [ -n "${PERL5LIB}" ]; then
PERL5LIB=`echo "${PERL5LIB}" | sed -e "s%:${GLOBUS_PATH}[^:]*%%g" -e "s%^${GLOBUS_PATH}[^:]*:\{0,1\}%%"`
fi
fi
PATH=`echo "${PATH}" | sed -e "s%:${GLOBUS_LOCATION}[^:]*%%g" -e "s%^${GLOBUS_LOCATION}[^:]*:\{0,1\}%%"`
DYLD_LIBRARY_PATH=`echo "${DYLD_LIBRARY_PATH}" | sed -e "s%:${GLOBUS_LOCATION}[^:]*%%g" -e "s%^${GLOBUS_LOCATION}[^:]*:\{0,1\}%%"`
LIBPATH=`echo "${LIBPATH}" | sed -e "s%:${GLOBUS_LOCATION}[^:]*%%g" -e "s%^${GLOBUS_LOCATION}[^:]*:\{0,1\}%%"`
SHLIB_PATH=`echo "${SHLIB_PATH}" | sed -e "s%:${GLOBUS_LOCATION}[^:]*%%g" -e "s%^${GLOBUS_LOCATION}[^:]*:\{0,1\}%%"`
if [ -n "${MANPATH}" ]; then
MANPATH=`echo "${MANPATH}" | sed -e "s%:${GLOBUS_LOCATION}[^:]*%%g" -e "s%^${GLOBUS_LOCATION}[^:]*:\{0,1\}%%"`
fi
if [ -n "${PERL5LIB}" ]; then
PERL5LIB=`echo "${PERL5LIB}" | sed -e "s%:${GLOBUS_LOCATION}[^:]*%%g" -e "s%^${GLOBUS_LOCATION}[^:]*:\{0,1\}%%"`
fi
GLOBUS_PATH=${GLOBUS_LOCATION}
PATH="${bindir}:${sbindir}:${PATH}";
if [ -n "${MANPATH}" ]; then
MANPATH="${datadir}/man:${MANPATH}"
fi
for libdir in $libdirs; do
DELIM=
if [ -n "${LD_LIBRARY_PATH}" ]; then
DELIM=:
fi
LD_LIBRARY_PATH="${libdir}${DELIM}${LD_LIBRARY_PATH}"
DELIM=
if [ -n "${DYLD_LIBRARY_PATH}" ]; then
DELIM=:
fi
DYLD_LIBRARY_PATH="${libdir}${DELIM}${DYLD_LIBRARY_PATH}"
if [ -z "${LIBPATH}" ]; then
LIBPATH="/usr/lib:/lib"
fi
LIBPATH="${libdir}:${LIBPATH}"
DELIM=
if [ -n "${SHLIB_PATH}" ]; then
DELIM=:
fi
SHLIB_PATH="${libdir}${DELIM}${SHLIB_PATH}"
done
export GLOBUS_PATH PATH MANPATH LD_LIBRARY_PATH DYLD_LIBRARY_PATH LIBPATH SHLIB_PATH
if [ -d "${perlmoduledir}" ]; then
DELIM=""
if [ "X${PERL5LIB}" != "X" ]; then
DELIM=:
fi
PERL5LIB="${perlmoduledir}${DELIM}${PERL5LIB}"
export PERL5LIB
fi
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.