blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d3a06e5e522ad777d672d8505986a07017d8e79e
|
Shell
|
franklong741269853/mdvrp-coevolutionary-approach
|
/scripts/coisa.sh
|
UTF-8
| 1,357
| 2.640625
| 3
|
[] |
no_license
|
#! /bin/bash
####instances0="cluster.in p01.in p02.in p03.in"
####instances1="p04.in p05.in p06.in p07.in"
####instances2="p08.in p09.in p10.in p11.in p12.in"
####instances3="p13.in p14.in p15.in p16.in"
####instances4="p17.in p18.in p19.in"
####instances5="p20.in p21.in p22.in p23.in"
####instances6="pr01.in pr02.in pr03.in pr04.in"
####instances7="pr05.in pr06.in pr07.in"
####instances8="pr08.in pr09.in pr10.in pfbo.in"
####id0=0
####id1=12960
####id2=25920
####id3=42120
####id4=55080
####id5=64800
####id6=77760
####id7=90720
####id8=100440
####id=$id0
####instances=$instances0
####if [[ $1 == "1" ]]; then
#### id=$id1
#### instances=$instances1
####elif [[ $1 == "2" ]]; then
#### id=$id2
#### instances=$instances2
####elif [[ $1 == "3" ]]; then
#### id=$id3
#### instances=$instances3
####elif [[ $1 == "4" ]]; then
#### id=$id4
#### instances=$instances4
####elif [[ $1 == "5" ]]; then
#### id=$id5
#### instances=$instances5
####elif [[ $1 == "6" ]]; then
#### id=$id6
#### instances=$instances6
####elif [[ $1 == "7" ]]; then
#### id=$id7
#### instances=$instances7
####elif [[ $1 == "8" ]]; then
#### id=$id8
#### instances=$instances8
####fi
####echo $id $instances
./test.sh 1 &
./test.sh 2 &
./test.sh 3 &
./test.sh 4 &
./test.sh 5 &
./test.sh 6 &
./test.sh 7 &
./test.sh 8 &
wait
| true
|
cf14175132ef69dcbf12084b0c976f83e04322f6
|
Shell
|
omniti-labs/omniti-ms
|
/build/python26/build.sh
|
UTF-8
| 2,248
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License, Version 1.0 only
# (the "License"). You may not use this file except in compliance
# with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2011-2012 OmniTI Computer Consulting, Inc. All rights reserved.
# Use is subject to license terms.
#
# Load support functions
. ../../lib/functions.sh
PROG=Python
VER=2.6.9
VERHUMAN=$VER
PKG=omniti/runtime/python-26
SUMMARY="$PROG - An Interpreted, Interactive, Object-oriented, Extensible Programming Language."
DESC="$SUMMARY"
DEPENDS_IPS="system/library/gcc-4-runtime"
PREFIX=/opt/python26
BUILDARCH=64
CFLAGS="-O3"
CXXFLAGS="-O3"
CPPFLAGS="-D_REENTRANT"
LDFLAGS64="$LDFLAGS64 -L/opt/python26/lib/$ISAPART64 -R/opt/python26/lib/$ISAPART64"
CONFIGURE_OPTS="--with-system-ffi
--enable-shared
"
CONFIGURE_OPTS_64="--prefix=$PREFIX
--sysconfdir=$PREFIX/etc
--includedir=$PREFIX/include
--bindir=$PREFIX/bin
--sbindir=$PREFIX/sbin
--libdir=$PREFIX/lib
--libexecdir=$PREFIX/libexec
"
build() {
CC="$CC $CFLAGS $CFLAGS64" \
CXX="$CXX $CXXFLAGS $CXXFLAGS64" \
build64
}
save_function configure64 configure64_orig
configure64() {
configure64_orig
logmsg "--- Fixing pyconfig.h so _socket.so builds"
perl -pi'*.orig' -e 's/#define (HAVE_NETPACKET_PACKET_H) 1/#undef \1/' \
pyconfig.h || logerr "Failed to fix pyconfig.h"
}
init
download_source $PROG $PROG $VER
patch_source
prep_build
build
make_package
clean_up
| true
|
f28605e39af70c11f0a6a49534033955e992e34e
|
Shell
|
vparihar01/m-script
|
/getdash.ssh
|
UTF-8
| 6,503
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Copyright (C) 2008-2011 Igor Simonov (me@igorsimonov.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
[ -h $0 ] && xcommand=`readlink $0` || xcommand=$0
rcommand=${xcommand##*/}
rpath=${xcommand%/*}
#*/
[ -z "$SCP" ] && SCP=`which scp 2>/dev/null`
[ -z "$SCP" ] && echo "scp utility not found, exiting" && exit 1
[ -z "$SSH" ] && SSH=`which ssh 2>/dev/null`
[ -z "$SSH_TIMEOUT" ] && SSH_TIMEOUT=10
SSH_OPTS="-o StrictHostKeyChecking=yes -o PasswordAuthentication=no -o ConnectTimeout=$SSH_TIMEOUT"
#SSH="$SSH $SSH_OPTS"
#SCP="$SCP $SSH_OPTS"
[ -f "/sbin/ifconfig" ] && IFCFG=/sbin/ifconfig || IFCFG=`which ifconfig 2>/dev/null`
[ "X$IFCFG" != "X" ] && localip=`$IFCFG | sed '/inet\ /!d;s/.*r://;s/\ .*//' | grep -v '127.0.0.1'` || localip="ifconfig_not_found"
source "${rpath}/conf/mon.conf"
source "${rpath}/conf/cloud.conf"
source "${rpath}/lib/functions.sh"
SQLITE=dbquery
#SQLITE=`which sqlite3 2>/dev/null`
LOG="$rpath/logs/dashboard.log"
[ -n "$debug" ] && debug=false || debug=true
[ -n "$SSHPORT" ] || SSHPORT=22
timeindexnow=`cat "${M_TEMP}/timeindex" 2>/dev/null` || timeindexnow=`date +"%s"`
monitor="$1"
if [ -z "$monitor" ]; then
monitor="servers"
else
shift
if [ -n "$1" ]; then
targetcluster="$1"
shift
clustered_as="$1"
fi
fi
IFS1=$IFS ; IFS='
'
for cluster in `cat "${rpath}/servers.list" | grep -v ^$ | grep -v ^# | grep -v ^[[:space:]]*# | cut -d'|' -f5 | sort | uniq` ; do
[ "X$cluster" == "X$SUPER_CLUSTER" ] && continue
[ -n "$targetcluster" -a "$targetcluster" != "$cluster" ] && continue
[ -z "$clustered_as" ] && clustered_as="$cluster"
$debug && log "Getting dash files from cluster $cluster monitor $monitor clustered as $clustered_as"
mpath=`grep ^$cluster\| "${rpath}/conf/clusters.conf" | cut -d'|' -f8`
[ -z "$mpath" ] && mpath="/opt/m"
for server in `grep \|${cluster}$ "${rpath}/servers.list" | grep -v ^# | grep -v ^[[:space:]]*#` ; do
[ -n "$RUNDELAY" ] && sleep $RUNDELAY
time0=`date +"%s"`
ip=${server%%|*}
[ `echo $ip | grep -c "^$localip$"` -ne 0 ] && continue
key=`echo $server | cut -d'|' -f2`
servername=`echo $server | cut -d'|' -f4`
[ -f "${rpath}/keys/${key}" ] || key=${key}.pem
[ ! -f "${rpath}/keys/${key}" ] && log "Unable to connect to $servername - key $key not found" && continue
[ -d "${rpath}/www/$monitor/${clustered_as}/${servername}" ] || install -d "${rpath}/www/$monitor/${clustered_as}/${servername}"
(IFS=$IFS1 ; $SSH -i "${rpath}/keys/${key}" -p $SSHPORT $SSH_OPTS $ip find "${mpath}/www/$monitor/localhost" -type f -printf \"%f\\n\" | sort > "${M_TEMP}/${ip}.${monitor}.remote.dash.list")
find "${rpath}/www/$monitor/${clustered_as}/${servername}" -type f -printf "%f\n" | sort > "${M_TEMP}/${ip}.${monitor}.local.dash.list"
nd=(`diff "${M_TEMP}/${ip}.${monitor}.remote.dash.list" "${M_TEMP}/${ip}.${monitor}.local.dash.list" | grep ^\< | sed 's|^< ||'`)
if [ -n "$nd" ] ; then
nd+=( "dash.html" "report.html" )
scplist="{`echo -n "${nd[*]}" | tr '\n' ','`}"
(IFS=$IFS1 ; $SCP -i "${rpath}/keys/${key}" -P $SSHPORT $SSH_OPTS "${ip}":"${mpath}/www/$monitor/localhost/$scplist" "${rpath}/www/$monitor/${clustered_as}/${servername}/" > /dev/null 2>&1 </dev/null &)
unset nd
[ -e "${rpath}/www/$monitor/${clustered_as}/${servername}/notfound" ] && rm -f "${rpath}/www/$monitor/${cluster}/${servername}/notfound"
[ -e "${rpath}/www/$monitor/${clustered_as}/${servername}/stopped" ] && rm -f "${rpath}/www/$monitor/${cluster}/${servername}/stopped"
else
log "No data received from server ${servername}, cluster ${cluster}, monitor ${monitor}"
if [ -e "${rpath}/www/$monitor/${clustered_as}/${servername}/notfound" ] ; then
rm -f "${rpath}/www/$monitor/${clustered_as}/${servername}/notfound"
touch "${rpath}/www/$monitor/${clustered_as}/${servername}/stopped"
else
touch "${rpath}/www/$monitor/${clustered_as}/${servername}/notfound"
fi
fi
for dashfile in `diff ${M_TEMP}/${ip}.${monitor}.remote.dash.list ${M_TEMP}/${ip}.${monitor}.local.dash.list | grep ^\> | sed 's|^> ||'` ; do
rm -f "${rpath}/www/$monitor/${clustered_as}/${servername}/${dashfile}"
done
time1=`date +"%s"`
timediff=`expr $time1 - $time0`
if [ "X$SQLITE3" == "X1" ] ; then
daynow=$(date +"%Y%m%d")
$SQLITE "${rpath}/sysdata" "insert into selfmon (timeindex, day, monitor, duration) values ('$timeindexnow', '$daynow', '${clustered_as}/${servername}', '$timediff')"
else
log "${clustered_as}/${servername}: $timediff sec"
fi
done
for server in `find "${rpath}/www/$monitor/$clustered_as/"* -maxdepth 0 -type d 2>/dev/null` ; do
[ `cat "${rpath}/servers.list" | grep "|${cluster}[[:space:]]*$" | grep -c "|${server##*/}|"` -eq 0 ] && rm -rf "${rpath}/www/$monitor/$cluster/${server##*/}"
done
unset clustered_as
done
[ -d "$rpath/mon.backups/dash/" ] || install -d "$rpath/mon.backups/dash/"
for cluster in `find "$rpath/www/$monitor/" -mindepth 1 -maxdepth 1 -type d` ; do
cluster=${cluster##*/}
[ -z "$cluster" ] && continue
if [ "X$monitor" == "Xservers" ] ; then
# Clusters not present in clusters.conf are backuped to M_ROOT/mon.backups/dash
if [ -n "$cluster" -a "$cluster" != "localhost" ]; then
if [ `grep -c "^$cluster|" "$rpath/conf/clusters.conf"` -eq 0 ]; then
cp -ru "$rpath/www/$monitor/$cluster" "$rpath/mon.backups/dash/"
rm -rf "$rpath/www/$monitor/$cluster"
fi
fi
fi
# Servers terminated or stopped
for server in `find "$rpath/www/$monitor/$cluster" -mindepth 1 -maxdepth 1 -type d` ; do
server=${server##*/}
if [ `grep -E -c "^$server\||\|$server\|" "$rpath/servers.list"` -eq 0 ]; then
cp -ru "$rpath/www/$monitor/$cluster/$server" "$rpath/mon.backups/dash/"
rm -rf "$rpath/www/$monitor/$cluster/$server"
fi
done
done
IFS=$IFS1
| true
|
211e535640c05463385791941f0adda846b2101a
|
Shell
|
glasseyes/pkgbuilding
|
/buildfor-nosourceupload
|
UTF-8
| 668
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Building for $1"
DG_PWD=`pwd`
DG_BASE=`basename $PWD`
echo "in directory $DG_BASE"
DG_POSTEXPORT="dch -l$1 -D $1 'rebuild for $1' --force-distribution"
#echo "postexport: $DG_POSTEXPORT"
if [[ "$DG_BASE" =~ ^fonts- ]]; then
echo "Is a font package"
if [[ "$1" == "precise" ]]; then
echo "modify rules file for precise"
DG_POSTEXPORT="dch -l$1 -D $1 --force-distribution 'rebuild for $1 with modified dpkg-builddeb rules' && sed -e 's/-Sextreme //g' debian/rules > debian/rules.precise && mv debian/rules.precise debian/rules && chmod +x debian/rules"
fi
fi
DIST=$1 gbp buildpackage --git-dist=$1 --git-postexport="$DG_POSTEXPORT"
| true
|
ea2459239505448383390bc66a5b1f63f8b15cdb
|
Shell
|
chrisdobler/os-imaging-designer
|
/spigotMC/files/restoreWorld.sh
|
UTF-8
| 866
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
killall -9 java
# PARAMS=""
# while (( "$#" )); do
# case "$1" in
# -a|--my-boolean-flag)
# MY_FLAG=0
# shift
# ;;
# -b|--my-flag-with-argument)
# if [ -n "$2" ] && [ ${2:0:1} != "-" ]; then
# MY_FLAG_ARG=$2
# shift 2
# else
# echo "Error: Argument for $1 is missing" >&2
# exit 1
# fi
# ;;
# -*|--*=) # unsupported flags
# echo "Error: Unsupported flag $1" >&2
# exit 1
# ;;
# *) # preserve positional arguments
# PARAMS="$PARAMS $1"
# shift
# ;;
# esac
# done
# # set positional arguments in their proper place
# eval set -- "$PARAMS"
unset -v latest
for file in "./minecraft_server/backups"/*; do
[[ $file -nt $latest ]] && latest=$file
done
echo $latest;
unzip -o $latest;
chmod +x minecraft_server/start.sh
reboot
| true
|
e402735ddf6189c93af4cd9c9bf219286be2aba8
|
Shell
|
jockyw2001/sdk
|
/verify/feature/venc/case14.sh
|
UTF-8
| 3,534
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
. _test.sh
ret_test=$?
if [ "${ret_test}" == "0" ]; then
log "\n\n${script}: Dynamic Channel Attribute: Super Frame & RC Priority\n"
expected_ret=0
#needed predefined variable in this script: $of, $codec, $unset_script, $expected_ret, $exe
#input $1: prefix of output file, output:$md5
run_case() {
rm ${of} 2> /dev/null; #remove output file from previous test
${exe} 1 ${codec}; #how this program is executed.
ret=$? #keep the result as run_es_on_ok_check() input
unset md5
run_es_on_ok_check $1
#how result,$md5 should be checked
if [ "${md5}" != "" ]; then # $md5 remains unset, it means there is no output es to be checked.
run_std_md5_check $1
fi
}
set_short_test()
{
export VENC_GLOB_MAX_FRAMES=20
export VENC_GLOB_DUMP_ES=20
export VENC_GLOB_APPLY_CFG=1 #make something like nRows to take effect
export FixQp=0 #Set 1 to enable VENC_CH00_QP
export VENC_CH00_QP=10 #make a very big stream
}
set_long_test()
{
export VENC_GLOB_MAX_FRAMES=60
export VENC_GLOB_DUMP_ES=60
}
#the rest tests the CONFIG system
if true ; then
#==== h264
#I frames: 23369, 24186
#P frames: 7167 8230 8113 8283 9289 8422 9578, -- 8320
#24000*8=192000, 9000*8=72000
#in MI:
# I:23405, 7171, 8234, 8117, 8287, 9293-d, 8426, 9582-d, I:24221-d, 8324
#==== h265
#I frames: 22409, 23430
#P frames: 8864, 10142, 10090, 10648, 11074, 9910, 11577 -- 10634
#23000*8=184000 11000*8=88000
#in MI
# I:22486, 8749, 10369, 10130, 10763, 11455-d, 10119, 11595-d, I:23507-d, 10611
default_bitrate=1000000
set_vbr() {
export VENC_RC=1
export RcType="Cbr" #vbr does not over
export VbrMinQp=20
export VbrMaxQp=40
export Bitrate=${default_bitrate}
}
run_suite() {
set_short_test
export VENC_SUPER_FRAME_MODE=1 #discard
run_case super1
export VENC_SUPER_FRAME_MODE=2 #reencode
run_case super2
}
run_long_suite() {
set_long_test
set_vbr
export RcPrio=1
export VENC_SUPER_FRAME_MODE=1 #discard
run_case rc_prio_1
}
#H264/265/JPEG super1/2 case does not trigger super frame anymore due to encoder change.
#To be double verified.
if true; then
codec=h264
export SuperI=192000
export SuperP=72000
p_super1="39844d0e1ebc76b7917455fdc88caaf8" #"a7a182a998ec36979d246bc994415f71"
p_super2="25c30f98df41236badf0d6d1deed4104" #"47837a93786dd5fc97955be5f6af1694"
s_super1="00000000000000000000000000000000"
s_super2="00000000000000000000000000000000"
run_suite
fi
if true; then
codec=h265
export SuperI=184000
export SuperP=88000
p_super1="e1aada5a99bcdb92dd023866a81d29eb" #"e1aada5a99bcdb92dd023866a81d29eb"
p_super2="9995713053d9d4eed133ee43135d6be5" #"9995713053d9d4eed133ee43135d6be5"
s_super1="00000000000000000000000000000000"
s_super2="00000000000000000000000000000000"
run_suite
fi
if true; then
codec=h264
export SuperI=154000
export SuperP=30000
p_rc_prio_1="fa7da122cbe0f0e0e63f3432d9f4108d"
s_rc_prio_1="00000000000000000000000000000000"
run_long_suite
fi
if true; then
codec=h265
export SuperI=124000
export SuperP=50000
p_rc_prio_1="a59f5b648c18c538caf95a3578e31cf7"
s_rc_prio_1="00000000000000000000000000000000"
run_long_suite
fi
#9654, 9614, 9558, 9513, 9677, 9732, 9807, 9744, 9696, 9658,
#9700*8=77600
if true; then
codec=jpeg
#export SuperI=77600
export SuperI=315000
p_super1="d94114081e0225f042a230b82c5f1fa5" #"d9898beae7b88ec66113bef6a4fbf023"
p_super2="ffe0e3cf8cb30fffa519db808ea911e8" #"9ed9566ee74eaeca53efd453dde5adad"
s_super1="00000000000000000000000000000000"
s_super2="00000000000000000000000000000000"
run_suite
fi
fi
print_result
fi
| true
|
44abfc3653c2c836d2993c78c4ddc3775d681ce4
|
Shell
|
faust64/keengreeper
|
/utils/snykDbAddCache
|
UTF-8
| 1,895
| 3.78125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
if test -z "$DBDIR"; then
echo "CRITICAL: DBDIR not set, can not proceed" >&2
exit 1
elif ! test -d $DBDIR; then
echo "CRITICAL: database directory does not exist" >&2
exit 1
elif ! test -e "$DBDIR/snyk-cache"; then
echo "WARNING: snyk-cache is absent" >&2
fi
if test -z "$TMPDIR"; then
TMPDIR=/tmp
fi
TMPFILE=$TMPDIR/$1.$$
if test "$1$2"; then
TARGET=$1
VERSION=$2
QUICK=true
else
QUICK=false
fi
while :
do
if test "$TARGET" -a "$VERSION"; then
if echo "$TARGET" | grep -E '^[^_\./][^/]*$' >/dev/null; then
if getVersionFromSnyk $TARGET $VERSION >$TMPFILE 2>/dev/null; then
lastCheck=`date +%s`
if grep "^$TARGET;$VERSION;" $DBDIR/snyk-cache >/dev/null 2>&1; then
if ! sed -i "/^$TARGET;$VERSION;/d" $DBDIR/snyk-cache 2>/dev/null; then
echo "<add$ CRITICAL: failed deleting former data for $TARGET@$VERSION" >&2
exit 1
fi
echo "<add$ NOTICE: purged former record for $TARGET@$VERSION"
fi
if test -s $TMPFILE; then
while read line
do
echo "$TARGET;$VERSION;$lastCheck;$line"
done <$TMPFILE >>$DBDIR/snyk-cache
else
echo "$TARGET;$VERSION;$lastCheck;sane" >>$DBDIR/snyk-cache
echo "<add$ NOTICE: updated $TARGET@$VERSION"
fi
rm -f $TMPFILE
if $QUICK; then
exit 0
fi
else
echo "<add$ CRITICAL: failed fetching snyk data for $TARGET@$VERSION" >&2
fi
rm -f $TMPFILE
if $QUICK; then
exit 2
fi
else
echo "<add$ WARNING: malformatted NodeJS module name $TARGET"
if $QUICK; then
exit 2
fi
fi
elif test "$TARGET" = q -o "$TARGET" = quit; then
exit 0
else
echo "<add$ CRITICAL: missing $TARGET version to check" >&2
if $QUICK; then
exit 2
fi
fi
echo "<add$ type in a NodeJS module name and version, or either q or quit to exit"
echo -n "add$> "
read TARGET VERSION
done
exit 2
| true
|
3bbef1d657ee5c1b4519eaac8fdf09112e93e490
|
Shell
|
enforcer-pro/proxy-socks
|
/extra/random_tcp_port
|
UTF-8
| 1,352
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
PIDFILE="/home/tunnel/$(basename "${0}").pid"
function with_backoff {
local max_attempts=${ATTEMPTS-5}
local timeout=${TIMEOUT-1}
local attempt=0
local exitCode=0
while [[ $attempt < $max_attempts ]]
do
"$@"
exitCode=$?
if [[ $exitCode == 0 ]]
then
break
fi
echo "Failure! Retrying in $timeout.." 1>&2
sleep $timeout
attempt=$(( attempt + 1 ))
timeout=$(( timeout * 2 ))
done
if [[ $exitCode != 0 ]]
then
echo "You've failed me for the last time! ($@)" 1>&2
fi
return $exitCode
}
function random_unused_port {
local port=$(shuf -i 10000-65000 -n 1)
netstat -lat | grep $port > /dev/null
if [[ $? == 1 ]] ; then
echo $port
else
random_unused_port
fi
}
if [ -f "${PIDFILE}" ]; then
PID=$(cat "${PIDFILE}")
ps -p ${PID} > /dev/null 2>&1
if [ 0 -eq 0 ]; then
echo "${0}: process already running (${PID})"
exit 1
else
echo $$ > "${PIDFILE}"
if [ $? -ne 0 ]; then
echo "${0}: could not create ${PIDFILE}"
exit 1
fi
fi
else
echo $$ > "${PIDFILE}"
if [ $? -ne 0 ]; then
echo "${0}: could not create ${PIDFILE}"
exit 1
fi
PID=$(cat "${PIDFILE}")
fi
if [ -f "${PIDFILE}" ]; then
with_backoff random_unused_port
fi
rm "${PIDFILE}"
| true
|
38382fb77bf6cf90b6fa9b64b1d903f0011259c0
|
Shell
|
Bobo1239/LilyDevOS
|
/fedora/mkosi.extra/etc/skel/setup.sh
|
UTF-8
| 1,672
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
printf "This wizard will help you to setup your Git configuration.\n\n"
if [ -f ~/.gitconfig ]; then
printf "A file configuration already exists. If you proceed, it will be overwritten.\nPress Ctrl+C to cancel/Press enter to proceed: "
read _
fi
echo -n "Please enter your name and surname: "
read NAME
git config --global user.name "$NAME"
echo -n "Please enter your email address: "
read EMAIL
git config --global user.email "$EMAIL"
echo "Your commit messages will be signed as '$NAME <$EMAIL>'."
#echo "Default editor to write commit messages is currently nano. You can now confirm it or choose another text editor [emacs|geany|nano]: "
#read GITEDITOR
#git config --global core.editor $GITEDITOR
git config --global color.ui auto
echo
# In case this script is run again after first configuration, skip
# this part if these directories exist.
if [ -d ~/git-cl -a ~/lilypond-git -a ~/lilypond-extra ]; then
printf "You've already downloaded the repositories. Press Ctrl+C close the wizard: "
read _
fi
echo "Now we'll download the repositories needed to contribute to LilyPond development. Proceed only if you have a working Internet connection."
read -p "Press Enter to continue. "
cd $HOME
echo "Cloning in your home directory: `pwd`. It will take a few minutes."
echo "Downloading git-cl repository..."
git clone git://github.com/gperciva/git-cl.git
echo "Downloading lilypond-extra repository..."
git clone git://github.com/gperciva/lilypond-extra/
echo "Downloading lilypond-git repository..."
git clone git://git.sv.gnu.org/lilypond.git lilypond-git
echo "Configuration completed successfully!"
read -p "Press enter to close the wizard."
| true
|
86b0b620be70413944515747c296407e03a41905
|
Shell
|
evandromr/bash_xmmscripts
|
/mos2/events/mos2eventscript.sh
|
UTF-8
| 925
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cp `ls -1 $xmm_rpcdata/*MOS2*ImagingEvts.ds` ./mos2evts_barycen.ds
cp $PWD/../src_evt.reg ./
# Make barycentric correction on the clean event file
barycen table=mos2evts_barycen.ds:EVENTS
# Get the coordinates from the .reg file
while read LINE
do
srcregion=$LINE
done < src_evt.reg
export srcregion
export mos2table=mos2evts_barycen.ds
export fsrcname="mos2evts_src_0310keV.ds"
export fimgname="mos2evts_src_img_0310keV.ds"
export emin=300
export emax=10000
source mos2selev.sh
export fsrcname="mos2evts_src_032keV.ds"
export fimgname="mos2evts_src_img_032keV.ds"
export emin=300
export emax=2000
source mos2selev.sh
export fsrcname="mos2evts_src_245keV.ds"
export fimgname="mos2evts_src_img_245keV.ds"
export emin=2000
export emax=4500
source mos2selev.sh
export fsrcname="mos2evts_src_4510keV.ds"
export fimgname="mos2evts_src_img_4510keV.ds"
export emin=4500
export emax=10000
source mos2selev.sh
| true
|
7da1a5f22acebb7b1b623bda175376ad5e22b5d5
|
Shell
|
randomeizer/cloudflare-dynamic-dns
|
/cloudflare-ddns.sh
|
UTF-8
| 1,421
| 3.578125
| 4
|
[] |
no_license
|
#/usr/bin/env sh
# Main script to edit the record, should be used with cron
# Variables:
# CLOUDFLARE_API_TOKEN - Cloudflare API token from Cloudflare dashboard (Authentication tab)
# CLOUDFLARE_ZONE_ID - Zone ID from cloudflare-get-zones.sh script
# CLOUDFLARE_RECORD_NAME - Subdomain without the root domain, e.g. dynamic
# CLOUDFLARE_RECORD_ID - DNS record ID from cloudflare-get-dns-id.sh script
CLOUDFLARE_API_TOKEN=
CLOUDFLARE_ZONE_ID=
CLOUDFLARE_RECORD_NAME=
CLOUDFLARE_RECORD_ID=
# Retrieve the last recorded public IP address
IP_RECORD="/tmp/cloudflare-dunamic-dns-ip-record"
RECORDED_IP=`cat $IP_RECORD`
# Fetch the current public IP address
PUBLIC_IP=$(curl --silent https://api.ipify.org) || exit 1
#If the public ip has not changed, nothing needs to be done, exit.
if [ "$PUBLIC_IP" = "$RECORDED_IP" ]; then
exit 0
fi
# Otherwise, your Internet provider changed your public IP again.
# Record the new public IP address locally
echo $PUBLIC_IP > $IP_RECORD
# Record the new public IP address on Cloudflare using API v4
RECORD=$(cat <<EOF
{ "type": "A",
"name": "$CLOUDFLARE_RECORD_NAME",
"content": "$PUBLIC_IP",
"ttl": 1,
"proxied": false }
EOF
)
curl "https://api.cloudflare.com/client/v4/zones/$CLOUDFLARE_ZONE_ID/dns_records/$CLOUDFLARE_RECORD_ID" \
-X PUT \
-H "Authorization: Bearer $CLOUDFLARE_API_TOKEN" \
-H "Content-Type: application/json" \
-d "$RECORD"
| true
|
a6577572239a3190f1f779d3c9d24b02d52d609b
|
Shell
|
flexibleir/compliance
|
/scripts/cis/mini/ensure_xinetd_is_not_enabled.sh
|
UTF-8
| 328
| 3.375
| 3
|
[] |
no_license
|
# !/bin/bash
# echo -e "\e[92m== 2.1.10 Ensure xinetd is not enabled ==\n"
ensure_xinetd_is_not_enabled () {
if [[ "$(systemctl is-enabled xinetd 2>/dev/null)" = "disabled" || "$(systemctl is-enabled xinetd 2>/dev/null)" = "" ]]
then echo -e "Passed"
else
echo -e "Failed"
fi
}
ensure_xinetd_is_not_enabled
| true
|
34b52503c52a0bba803cb0b03bf4509ce34499c6
|
Shell
|
durgasudharshanam/puppet-puppet
|
/templates/server/node.sh.erb
|
UTF-8
| 305
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
BACKUPDIR=/var/lib/puppet/yaml/encbackup
[ -d $BACKUPDIR ] || /bin/mkdir -p $BACKUPDIR
<%= scope.lookupvar('puppet::external_nodes_script') %> ${1} > $BACKUPDIR/${1}.yaml.temp
if [ "$?" -eq 0 ]; then
/bin/mv $BACKUPDIR/${1}.yaml.temp $BACKUPDIR/${1}.yaml
fi
/bin/cat $BACKUPDIR/${1}.yaml
| true
|
06f9f4fc6f3c44a6cfed335ab5c77b8ce8a5de6f
|
Shell
|
ferrarimarco/dotfiles
|
/.shells/.all/logout.sh
|
UTF-8
| 294
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# Leaving the console clear the screen to increase privacy
# Ignoring this so this script can be shell-agnostic
# shellcheck disable=SC3028
if [ "${SHLVL}" = 1 ]; then
[ -x /usr/bin/clear_console ] && /usr/bin/clear_console -q
[ -x /usr/bin/clear ] && /usr/bin/clear
fi
| true
|
50bf7e906619ff4e5ad96ef031dc2b1c2acff836
|
Shell
|
jrmysvr/pi_zero_rover
|
/rover/scp_to_pizero
|
UTF-8
| 166
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
BUILD="debug"
if [[ "$1" == *"release"* ]] ; then
BUILD="release"
fi
scp target/arm-unknown-linux-gnueabihf/$BUILD/rover pi@pizero:/home/pi
| true
|
3eba5ae9fdd1d5bb20f1fc04238f71836d996ce4
|
Shell
|
ichaf1997/Autoshell
|
/Os_Init/init.sh
|
UTF-8
| 6,559
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Apply to Centos - 7
# Control this script by Input parameters
# Usage : $0
# Input [ boolean ]
# value 1 means open , value 0 means close
SELINUX_ENABLE=0
FIREWALLD_ENABLE=0
POSTFIX_ENABLE=0
NetworkManager_ENABLE=0
TIMESYNC_ENABLE=0
# Input [ string ]
HOST_NAME="" # custom hostname , add here
SOFT_NOFILE=65535 # modify /etc/security/limits.conf
HARD_NOFILE=65535
SOFT_NOPROC=65535
HARD_NOPROC=65535
YUM_repo="DVD" # If you don't want to use local repo . modify here as remote repo URL
# If you don't want to change anything . modify here as "None"
TIME_SYNC_FROM="ntp1.aliyun.com"
APP_LIST=(vim wget mlocate net-tools gcc* openssl* pcre-devel) # APPS arrary , add apps you want to install here , Be careful to use space as separator for every app .
[ -e "/tmp/init.lock" ] && echo "Don't run this script repeatedly !" && exit 0
echo -e "Time\t$(date +%Y.%m.%d\ %T\ %a) - [Start]" > init.log
log_path=$(pwd)/init.log
# Define log format
LOG_DUMP(){
case $1 in
ok)
echo "$(date +%Y.%m.%d\ %T\ %a) - [ok] $2" >> $log_path
;;
no)
echo "$(date +%Y.%m.%d\ %T\ %a) - [no] $2" >> $log_path
esac
}
# Repo
func1(){
case $1 in
DVD)
if [ "$(blkid|grep iso9660|wc -l)" == "1" ]
then
dvd=$(blkid|grep iso9660|awk 'BEGIN{FS=":"}{print $1}')
mount_point=$(df -h|grep "$dvd"|awk '{print $6}')
if [ -n "$mount_point" ]
then
cd /etc/yum.repos.d
for name in $(ls)
do
mv $name $name.bak
done
cat >>/etc/yum.repos.d/local.repo<<EOF
[Admin]
name=admin
baseurl=file://$mount_point
enabled=1
gpgcheck=0
EOF
yum clean all >/dev/null 2>&1
yum makecache >/dev/null 2>&1
LOG_DUMP ok "Use local dvd repo"
else
LOG_DUMP no "Use local dvd repo :: DVD no mount in the Filesystem"
fi
else
LOG_DUMP no "Use local dvd repo :: DVD no found"
fi
;;
None)
;;
*)
curl -o /etc/yum.repos.d/Custom.repo $YUM_repo >/dev/null 2>&1
if [ "$?" == "0" ]
then
LOG_DUMP ok "Download repo from $YUM_repo"
cd /etc/yum.repos.d
for name in $(ls|grep -v "Custom.repo")
do
mv $name $name.bak
done
yum clean all >/dev/null 2>&1
yum makecache >/dev/null 2>&1
else
LOG_DUMP no "Download repo from $YUM_repo"
fi
esac
}
func1 $YUM_repo
# Apps Install
func2(){
for ((i=0;i<${#APP_LIST[*]};i++))
do
yum -y install ${APP_LIST[$i]} >/dev/null 2>&1
if [ "$?" == "0" ]
then
LOG_DUMP ok "install ${APP_LIST[$i]}"
else
LOG_DUMP no "install ${APP_LIST[$i]}"
fi
done
}
func2
# Time SYNC
if [ $TIMESYNC_ENABLE -eq 1 ];then
rpm -qa|grep ntp >/dev/null 2>&1
if [ "$?" != "0" ];then
yum -y install ntp >/dev/null 2>&1
if [ "$?" == "0" ];then
echo "*/5 * * * * /usr/sbin/ntpdate $TIME_SYNC_FROM">/var/spool/cron/root
LOG_DUMP ok "time sync from $TIME_SYNC_FROM"
else
LOG_DUMP no "time sync from $TIME_SYNC_FROM :: Download ntp failed"
fi
else
echo "*/5 * * * * /usr/sbin/ntpdate $TIME_SYNC_FROM">/var/spool/cron/root
LOG_DUMP ok "time sync from $TIME_SYNC_FROM"
fi
fi
# Hostname
if [ -n "$HOST_NAME" ]
then
hostnamectl set-hostname $HOST_NAME
LOG_DUMP ok "modify hostname : $HOST_NAME"
fi
# Max Files open
grep "ulimit -SHn" /etc/rc.local >/dev/null 2>&1
if [ "$?" != "0" ]
then
echo "ulimit -SHn 102400" >> /etc/rc.local
LOG_DUMP ok "add \"ulimit -SHn 102400\" to /etc/rc.local"
fi
grep "# MaxFileControl" /etc/rc.local >/dev/null 2>&1
if [ "$?" != "0" ]
then
cat >> /etc/security/limits.conf << EOF
# MaxFileControl by Init.sh
* soft nofile $SOFT_NOFILE
* hard nofile $HARD_NOFILE
* soft nproc $SOFT_NOPROC
* hard nproc $HARD_NOPROC
EOF
ulimit -n $HARD_NOFILE
LOG_DUMP ok "modify MaxFileControl soft=$SOFT_NOFILE hard=$HARD_NOFILE"
fi
# SELINUX
if [ "$SELINUX_ENABLE" == "0" ]
then
if [ "$(cat /etc/selinux/config | grep -v "#" | grep -w "SELINUX" | cut -d "=" -f 2)" == "enforcing" ]
then
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config >/dev/null 2>&1
fi
LOG_DUMP ok "close selinux"
fi
# Postix NetworkManager Firewalld
if [ $FIREWALLD_ENABLE == "0" ]
then
systemctl stop firewalld >/dev/null 2>&1
[ "$?" == "0" ] && LOG_DUMP ok "close firewalld" || LOG_DUMP no "close firewalld"
systemctl disable firewalld >/dev/null 2>&1
fi
if [ $POSTFIX_ENABLE == "0" ]
then
systemctl stop postfix >/dev/null 2>&1
[ "$?" == "0" ] && LOG_DUMP ok "close postfix" || LOG_DUMP no "close postfix"
systemctl disable postfix >/dev/null 2>&1
fi
if [ $NetworkManager_ENABLE == "0" ]
then
systemctl stop NetworkManager >/dev/null 2>&1
[ "$?" == "0" ] && LOG_DUMP ok "close NetworkManager" || LOG_DUMP no "close NetworkManager"
systemctl disable NetworkManager >/dev/null 2>&1
fi
# Custom optimize
func0(){
sed -i '/linux16 \/boot\/vmlinuz-3/{s/rhgb quiet/vga=817/}' /boot/grub2/grub.cfg
echo "set ts=2" >> /etc/vimrc
sed -i 's/#UseDNS yes/UseDNS no/' /etc/ssh/sshd_config
#sed -i 's/LANG="en_US.UTF-8"/LANG="zh_CN.UTF-8"/' /etc/locale.conf
echo LANG=\"zh_CN.UTF-8\" /etc/locale.conf
sed -i 's/\\w]/\\W]/g' /etc/bashrc
rm -rf /etc/localtime
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
cat >> /etc/sysctl.conf << EOF
vm.overcommit_memory = 1
net.ipv4.ip_local_port_range = 1024 65535
net.ipv4.tcp_fin_timeout = 1
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_mem = 94500000 915000000 927000000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syn_retries = 1
net.ipv4.tcp_abort_on_overflow = 0
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 65500
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_max_syn_backlog = 262144
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
#net.ipv4.netfilter.ip_conntrack_max = 2097152
#net.nf_conntrack_max = 655360
#net.netfilter.nf_conntrack_tcp_timeout_established = 1200
EOF
/sbin/sysctl -p
}
func0
echo -e "Time\t$(date +%Y.%m.%d\ %T\ %a) - [Complete]" >> $log_path
# Generate lock file
touch /tmp/init.lock
# reboot
init 6
| true
|
7e2e7ba456527806d495798c2c02c319474347ff
|
Shell
|
orangeguo2/Distributed-Computer-System
|
/Shell Command & Bash Scripting/alu
|
UTF-8
| 11,533
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
##
# This script implements the arithmetic logic unit (ALU)
# for word-oriented arithmetic and logic operations
if [ -z "$dir" ]; then
dir="$(dirname $0)"
app="$(basename $0)"
fi
##
# calls 'word' command on same path as this script
word() { echo $($dir/word $*); }
# include word definitions
source "$dir/worddefs"
source "$dir/word"
# include word functions
# source "$dir/word" #optional
## returns 1 if word is < 0, else 0
# @param the word to compare with zero
# @return 0 or 1
testlt(){
if [[ $1 == 0 ]]; then
echo 0
else
echo $(word $1 wordtopbit)
fi
}
## Returns 1 if word is >= 0, else 0
# @param the word to compare with zero
# @return 0 or 1
testge(){
if [ $(testlt $1) == 1 ]; then
echo 0
else
echo 1
fi
}
## returns 1 if word is == 0, else 0
# @param the word to compare with zero
# @return 0 or 1
testeq(){
local result=1
for (( b= 0; b <= $wordtopbit; b++ ))
do
if [ $(getBitOfWord $1 $b) -eq 1 ]; then
result=0
fi
done
echo $result
}
## get abs value in math
# @param value to get abs
# @return abs value
abs(){
echo ${1#-}
}
## make sure the word is in decimal format
## transfer positive binary value or decimal value word to decimal value
#@param positive binary value
#@param decimal value
decimalWord(){
if [ ${#1} -eq $wordsize ]; then
let num=2#$1
echo $((num-65535-1))
else
echo $1
fi
}
##
# Arithmetic shift word by count. Same as multiplying or
# dividing by power of 2. Shifts bits in word left (+) or
# right (-) by the specified count. Fills in with 0 from
# the right, and the sign bit from the left.
# @param the operand
# @param the shift count
ash(){
# echo $2
local c=$(abs $2)
# echo "c is " $c
# c=$(echo "obase=2;ibase=10;$c" | bc)
# echo "c is " $c
local localop=$1
local result=localop
local sign=$(getBitOfWord $(parseWord ${localop}) ${wordtopbit})
if [ ${c} -ge ${wordsize} ]; then
c=$((wordsize-1))
fi
if [[ $2 < 0 ]]; then
for (( b= $((wordtopbit-1)); b >= $c; b-- ))
do
# echo "b is " $b
t=$(getBitOfWord $(parseWord ${localop}) ${b})
# echo "t is" $t
# echo "c is" $c
# echo $(sub $b $c)
result=$(setBitOfWord $(parseWord ${result}) $((b-c)) $t)
# echo "result after swap is " $result
done
for (( b = ${c}; b >= 1; b-- ))
do
result=$(setBitOfWord $(parseWord ${result}) $((wordtopbit-b)) ${sign})
done
else
for (( b = $((wordtopbit-1)); b >= ${c}; b-- ))
do
t=$(getBitOfWord $(parseWord ${localop}) $((b-c)))
result=$(setBitOfWord $(parseWord ${result}) $((b)) ${t})
# echo "result after swap is " $result
done
for (( b = $((c-1)); b >= 0; b-- ))
do
result=$(setBitOfWord $(parseWord ${result}) ${b} 0)
done
fi
echo $(setBitOfWord $(parseWord ${result}) ${wordtopbit} sign)
}
## circular shift of word by count: +: left, -: right
# @ param the operand
# @ param count to shift
csh(){
local count=$(($2 % wordsize))
if [ $count -lt 0 ]; then
#statements
count=$((wordsize+$count))
fi
local localop=$1
local result=$zeroword
for (( b = 0; b <= wordtopbit; b++ ))
do
t=$(getBitOfWord $localop $b)
result=$(setBitOfWord $result $(((b+count)%wordsize)) $t)
done
echo $result
}
## logical shift of word by count: +: left, -: right
# @param op the operand
# @param count the shift count
lsh(){
local count=$2
# echo "$(abs $count)" $(abs $count)
if [ $(abs $count) -gt $wordsize ]; then
#statements
count=$wordsize
fi
# echo "count is" $count
c=$(abs $count)
# echo "c is" $c
local localop=$1
if [ $count -lt 0 ]; then
for (( b= $c; b <= $wordtopbit; b++ ))
do
# echo "b is " $b
t=$(getBitOfWord $(parseWord ${localop}) ${b})
# echo "t is" $t
# echo "c is" $c
# echo "b-c is" $((b-c))
result=$(setBitOfWord $(parseWord ${result}) $((b-c)) $t)
# echo "result after swap is " $result
done
for (( b = $((wordtopbit-c+1)); b <= $wordtopbit; b++ ))
do
result=$(setBitOfWord $(parseWord ${result}) $b 0)
done
else
for (( b= $wordtopbit; b >= $c; b-- ))
do
t=$(getBitOfWord $(parseWord ${localop}) $((b-c)))
result=$(setBitOfWord $(parseWord ${result}) $((b)) ${t})
# echo "result after swap is " $result
done
for (( b = $((c-1)); b >= 0; b-- ))
do
result=$(setBitOfWord $(parseWord ${result}) ${b} 0)
done
fi
echo $result
}
## Mask out all but lower (+) or upper (-) count bits. Masks word with count bits: +: left, -: right
# @param op the operand
# @param count the mask count
mask(){
local count=$2
c=$(abs $count)
if [ $c -gt $wordsize ]; then
#statements
c=$wordsize
fi
local localop=$1
if [ $2 -lt 0 ]; then
wtbc=$((wordtopbit-c))
for (( b= $wordtopbit; b > $wtbc; b-- ))
do
# echo "b is " $b
t=$(getBitOfWord $(parseWord ${localop}) ${b})
# echo "t is" $t
# echo "c is" $c
# echo $(sub $b $c)
result=$(setBitOfWord $(parseWord ${result}) $b $t)
# echo "result after swap is " $result
done
for (( b = $wtbc; b >= 0; b-- ))
do
result=$(setBitOfWord $(parseWord ${result}) $b 0)
done
else
for (( b = 0; b < ${c}; b++ ))
do
t=$(getBitOfWord $(parseWord ${localop}) $b)
result=$(setBitOfWord $(parseWord ${result}) $b ${t})
# echo "result after swap is " $result
done
for (( b = $c; b <=wordtopbit ; b++ ))
do
result=$(setBitOfWord $(parseWord ${result}) ${b} 0)
done
fi
echo $result
}
## Logical AND of two word operands.
# @param op1 the first operand
# @param op2 the second operand
and(){
local result=$zeroword
for (( b = 0; b <= $wordtopbit; b++ ))
do
#statements
result=$(setBitOfWord $(parseWord ${result}) ${b} $(($(getBitOfWord $1 b) && $(getBitOfWord $2 b))))
done
echo $result
}
## Logical ORof two word operands.
# @param op1 the first operand
# @param op2 the second operand
or(){
local result=$zeroword
for (( b = 0; b <= $wordtopbit; b++ ))
do
#statements
result=$(setBitOfWord $(parseWord ${result}) ${b} $(($(getBitOfWord $1 b) | $(getBitOfWord $2 b))))
done
echo $result
}
## Logical XOR of two word operands.
# @param op1 the first operand
# @param op2 the second operand
xor(){
local result=$zeroword
for (( b = 0; b <= $wordtopbit; b++ ))
do
#statements
result=$(setBitOfWord $(parseWord ${result}) ${b} $(($(getBitOfWord $1 b) ^ $(getBitOfWord $2 b))))
done
echo $result
}
## Negative of word operand.
# @param number to get negative
# @return negative value of the number
minus(){
local carry=1
local result=$zeroword
for (( b=0; b <= ${wordtopbit}; b++))
do
r=$(notBit $(getBitOfWord $(parseWord $1) b))
# echo "r is " $r
r=$((r+carry))
# echo "r is " $r
carry=$((r>>1))
# echo "carry is " $carry
result=$(setBitOfWord $(parseWord $result) b $(toBit r))
# echo "result is " $result
done
echo ${result}
}
## Logical NOT of word operand.
# @param number to get not
# @return Logical NOT of word operand
not(){
local result=$zeroword
for (( b = 0; b <= $wordtopbit; b++))
do
result=$(setBitOfWord $result $b $(notBit $(getBitOfWord $1 b)))
done
echo $result
}
## Sum of two word operands.
# @param op1 the first operand
# @param op2 the second operand //the code is phil's code on piazza
add() {
local carry=0
local result=$zeroword
for ((b=0; b <= wordtopbit; b++)); do
local t=$(( $(word $1 $b) + $(word $2 $b) + carry))
carry=$((t>>1)) # bit 1 is carry bit
result=$(word $result $b $t) # word uses only lower bit of t
done
echo $result
}
##
# Output usage message to standard error
usage() {
echo "usage: $(basename $0) [op word ...]" 1>&2
echo "word is a ${wordsize}-bit word" 1>&2
}
##
# Difference of two word operands.
# @param the first opearand
# @param the second operand
# @return the difference
sub() {
local carry=1
local result=$zeroword
for ((b=0; b <= $wordtopbit; b++)); do
local t=$(( $(word $1 $b) + $(notBit $(word $2 $b)) + carry))
carry=$((t>>1)) # bit 1 is carry bit
result=$(word $result $b $t) # word uses only lower bit of t
done
echo $result
}
# /**
# * Product of two word operands.
# *
# * @param op1 the first operand
# * @param op2 the second operand
# */
mul(){
local w1=$1
local w2=$2
if [ $(testlt $2) -eq 1 ]; then
#statements
w1=$(minus $w1)
w2=$(minus $w2)
fi
local result=$zeroword
local nshift=0
for (( b = 0; b <= $wordtopbit; b++))
do
if [ $(getBitOfWord $w2 $b) == 1 ]; then
#statements
w1=$(lsh $w1 $nshift)
result=$(add $w1 $result)
nshift=0
fi
nshift=$((nshift+1))
done
echo $result
}
# /**
# * Quotient of two word operands also returning remainder.
# * The sign of the quotient is positive if the signs of
# * the operands match, and negative if they do not.
# * The sign of the remainder matches the sign of the dividend.
# *
# * @param remainder the remainder
# * @param op1 the first operand
# * @param op2 the second operand
# */
div(){
local result=$zeroword
local remainder=$zeroword
local w1=$1
local w2=$2
if [ $(testeq $2) -eq 1 ]; then
#statements
# echo here
if [ $(testge $1) -eq 1 ]; then
#statements
result=$maxword
else
result=$minword
fi
else
# echo here
local resultnegative=0
if [ $(testlt $1) -eq 1 ]; then
#statements
w1=$(minus $1)
resultnegative=$((1-resultnegative))
else
w1=$1
fi
if [ $(testlt $2) -eq 1 ]; then
#statements
w2=$(minus $2)
resultnegative=$((1-resultnegative))
else
w2=$2
fi
# echo $w1 $w2
for (( b = $wordtopbit; b >= 0; b-- ))
do
#statements
remainder=$(lsh $remainder 1)
local t=$(getBitOfWord $w1 $b)
remainder=$(setBitOfWord $remainder 0 $t)
local test=$(sub $remainder $w2)
if [ $(testge $test) -eq 1 ]; then
#statements
result=$(setBitOfWord $result $b 1)
remainder=$test
fi
done
if [ $resultnegative -eq 1 ]; then
#statements
result=$(minus $result)
fi
if [ $(testlt $1) -eq 1 ]; then
#statements
remainder=$(minus $remainder)
fi
fi
echo $result $remainder
}
# /**
# * Remainder of two word operands.
# * The sign of the remainder matches the sign of the dividend.
# *
# * @param result the result
# * @param op1 the first operand
# * @param op2 the second operand
# */
rmdr(){
local result=$(div $1 $2)
echo ${result:wordsize+1}
}
# dispatch message to alu function
# suppose the arguments "word" means binary format value. If it is not, $(word input) will work
# mins are not considered because it isn't consided on word. But if what to do, we can use wordtopbit to remeber the sign. Then word $(abs $1), if is negative. $(minus previous result)
alu() {
case $1 in
sub)
echo $(sub $2 $3)
exit 0
;;
testlt)
echo $(testlt $2)
exit 0
;;
testge)
echo $(testge $2)
exit 0
;;
testeq)
echo $(testeq $2)
# echo $(testeq $minword)
exit 0
;;
abs)
echo $(abs $2)
exit 0
;;
ash)
echo $(ash $2 $3)
exit 0
;;
csh)
echo $(csh $2 $3)
exit 0
;;
lsh)
echo $(lsh $2 $3)
exit 0
;;
mask)
echo $(mask $2 $3)
exit 0
;;
and)
echo $(and $2 $3)
exit 0
;;
or)
echo $(or $2 $3)
exit 0
;;
xor)
echo $(xor $2 $3)
exit 0
;;
not)
echo $(not $2)
exit 0
;;
minus)
echo $(minus $2)
exit 0
;;
add)
echo $(add $2 $3)
exit 0
;;
mul)
echo $(mul $2 $3)
exit 0
;;
div)
echo $(div $2 $3)
exit 0
;;
rmdr)
echo $(rmdr $2 $3)
exit 0
;;
decimalWord) #helper tool
echo $(decimalWord $2)
exit 0
;;
*)
usage
exit 1
;;
esac
}
# process message if called directly
if [ $app == "alu" ]; then
echo $(alu $*)
fi
| true
|
a1cfc7655fb24ae34d5e9496a78c36aea2978ad4
|
Shell
|
Tahmid1/Solving-LCS-in-Parallel
|
/DataCollectingScripts/timing_randomized_subproblems3.sh
|
UTF-8
| 546
| 2.8125
| 3
|
[] |
no_license
|
# CS87 Project; Bash script for timing Randomized Subproblems
# Brandon Chow and Tahmid Rahman
# 2016-05-04
maximum_number_threads=16
num_runs=5;
displaystats=1
thread_assigment_method=1
echo "Num_threads, size, secs, AVG_already_solved, SD_already_solved, AVG_iterations, SD_iterations, thread_assigment_method"
for ((sequence_length=30000; sequence_length <= 100000; sequence_length=sequence_length+5000)) do
./dp_mem 0 $sequence_length $maximum_number_threads $thread_assigment_method $num_runs $displaystats
done
#30000 to 100000, increment by 5000
| true
|
b32b12247d3e849a9f832fd428c2a9b6c3101a77
|
Shell
|
danielfoehrKn/bashscripts
|
/git-hooks/pre-commit-importsort
|
UTF-8
| 295
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
# git pre commit hook to sort imports according to the Gardener import style
for f in $(git status -s | awk '{if ($1 == "M") print $2}'); do
echo "$f"
importsort -exclude zz_generated -w "$f"
# add again in case there were changes
git add "$f"
done
echo "Sorted all imports"
| true
|
4017620d62893b96c66fd899040a3d66274679f6
|
Shell
|
spujadas/rshutdown
|
/packaging/debian/postinst
|
UTF-8
| 1,178
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
NAME=rshutdown
USER=${NAME}
GROUP=nogroup
SERVICE=${NAME}
EXECUTABLE=/usr/bin/${NAME}
DEFAULT=/etc/default/${NAME}
INIT=/etc/init.d/${NAME}
SUDOERS=/etc/sudoers.d/${NAME}
LOGROTATE=/etc/logrotate.d/${NAME}
LOG_DIR=/var/log/${NAME}
LOG=${LOG_DIR}/${NAME}.log
### configure service
# create random shutdown token
sed -i -e 's/^RSHUTDOWN_TOKEN=/RSHUTDOWN_TOKEN='$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)'/' ${DEFAULT}
### set up owners and permissions
# create rshutdown user
id -u ${USER} &>/dev/null || adduser --system --no-create-home ${USER}
# create log directory
mkdir -p ${LOG_DIR}
chmod 755 ${LOG_DIR}
touch ${LOG}
# set owners
chown ${USER}:${GROUP} ${LOG_DIR} ${LOG} ${DEFAULT}
# set permissions on packaged files (if mangled by packaging system)
chmod 600 ${DEFAULT}
chmod 644 ${LOGROTATE}
chmod 755 ${EXECUTABLE} ${INIT}
chmod 0440 ${SUDOERS}
# post-install sanity check on sudoers
visudo -c
EXIT_CODE=$?
if [ $EXIT_CODE -eq 1 ]; then
echo "sudoers set-up failed - exiting"
rm -f ${SUDOERS}
exit 1
fi
### start service
# start on boot
update-rc.d ${SERVICE} defaults
# start now
service ${SERVICE} start
| true
|
9de4d990d38b9f287e820023fdea4d4202aafabb
|
Shell
|
kenbabu/PrivateSnps
|
/private.sh
|
UTF-8
| 1,826
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
echo "-----------------";
echo "Find private SNPS";
echo "=================";
# Location of vcf files
BCF_DIR=$1
# /home/user/2020/BashScripts/PopGen/AlleleFreqs
OUT_DIR=$2
# /home/user/2020/BashScripts/PopGen/PrivateSNPs
# Define populations
POP1="ACB"
POP2="ASW"
POP3="ESN"
POP4="GWD"
POP5="LWK"
POP6="MSL"
POP7="YRI"
# LOG FILE
LOGFILE="popgen.log"
touch $LOGFILE
printf "ACTIVITY\tCHROMOSOME\tTIMESTAMP\n" >> $LOGFILE;
printf "---------------------------------\n";
# Start looping through the chromosomes
_TIME1=`date +%s`
printf ""
for i in {1..22};
do
# echo "Chromosome $i";
# Create chromosome directory if it doesn't exist
if [ ! -d "${OUT_DIR}/chr${i}" ]; then
echo "Creating $OUT_DIR/chr$i"
echo `mkdir $OUT_DIR/chr${i} `
fi
# Time execution of command
echo "Starting to process chr$i"
echo
start_time=`date +%T`
# Start processing a chromosome
printf "Started processing\tChromosome:$i\t$start_time\n" >> $LOGFILE
bcftools isec $BCF_DIR/${POP1}_chr$i.vcf.gz $BCF_DIR/${POP1}_chr$i.vcf.gz $BCF_DIR/${POP2}_chr$i.vcf.gz \
$BCF_DIR/${POP3}_chr$i.vcf.gz $BCF_DIR/${POP4}_chr$i.vcf.gz \
$BCF_DIR/${POP5}_chr$i.vcf.gz $BCF_DIR/${POP6}_chr$i.vcf.gz $BCF_DIR/${POP7}_chr$i.vcf.gz \
-p ${OUT_DIR}/chr${i} -n-1 -c all
# sleep 1
echo " "
end_time=`date +%T`
printf "Ended processing\tChromosome:$i\t$end_time\n" >> $LOGFILE
# echo
# echo "Processed chr$i in `expr $end_time - $start_time` seconds" >> $LOGFILE;
#echo " bcftools isec ACB_chr21.vcf.gz ASW_chr21.vcf.gz ESN_chr21.vcf.gz GWD_chr21.vcf.gz LWK_chr21.vcf.gz MSL_chr21.vcf.gz YRI_chr21.vcf.gz -p ~/Desktop/VCFExamples/chr21 -n -1 -c all"
done
_TIME2=`date +%s`
_TIME_DIFF=`expr $_TIME2 - $_TIME1`
echo
printf "FINISHED EXECUTING SCRIPT IN $_TIME_DIFF SECONDS\n" >> $LOGFILE;
| true
|
ddd132dcc687838de036f6740a81115fb19f0cf7
|
Shell
|
awslabs/amazon-eks-ami
|
/hack/lint-docs.sh
|
UTF-8
| 305
| 2.8125
| 3
|
[
"MIT-0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
cd $(dirname $0)
./generate-template-variable-doc.py
if ! git diff --exit-code ../doc/USER_GUIDE.md; then
echo "ERROR: doc/USER_GUIDE.md is out of date. Please run hack/generate-template-variable-doc.py and commit the changes."
exit 1
fi
./mkdocs.sh build --strict
| true
|
5061a895ad7489d4910407149e2a28ce3ff7c993
|
Shell
|
vbossica/glassfish3-debian-package
|
/src/debian/config/postrm
|
UTF-8
| 521
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
GF3_HOME=/usr/share/glassfish3
USER=glassfish
DOMAINS_HOME=/var/glassfish3
LOGS_DIR=/var/log/glassfish3
if [ -d "$DOMAINS_HOME" ]; then
echo "removing domains directory"
sudo rm -rf $DOMAINS_HOME
fi
if [ -d "$LOGS_DIR" ]; then
echo "removing logs directory"
sudo rm -rf $LOGS_DIR
fi
if [ -d "$GF3_HOME" ]; then
echo "removing server directory"
sudo rm -rf $GF3_HOME
fi
if [ -n "$(getent passwd glassfish)" ]; then
echo "removing the user and group '$USER'"
sudo deluser --system $USER
fi
| true
|
9b19cd5ad0f51cac2cb13f5509549d81126338e6
|
Shell
|
mooz/currentia
|
/src/currentia/server/experiment/show_average_result.sh
|
UTF-8
| 445
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
# if [ $# -lt 2 ]; then
# echo "Usage: $0 DIRECTORY1 DIRECTORY2 ..."
# fi
for name in show_query_vs_update show_update_vs_consistency show_update_vs_window show_query_vs_window show_window_vs_consistency; do
for i in $(seq 1 5); do
DIR="result_${i}"
./${name}.sh ${DIR} > /tmp/${i}.txt
done
./calculate_average.rb /tmp/1.txt /tmp/2.txt /tmp/3.txt /tmp/4.txt /tmp/5.txt > /tmp/average_${name}.txt
done
| true
|
bc8983f675648ebb65a67e04efd0f3de5af50592
|
Shell
|
ad-m/e24files-assistant
|
/report.py
|
UTF-8
| 1,610
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
import argparse
import ConfigParser
import csv
import sys
import boto
from api import e24filesClient, e24cloudClient
from config import get_access_pair
def build_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config", nargs='?', type=argparse.FileType('r'),
help="INI-file with secret keys")
parser.add_argument("--output", '-o', nargs='?', type=argparse.FileType('w'),
default=sys.stdout, help="Output file")
return parser.parse_args()
def fetch_data(writer, panel_client, files_client):
buckets = files_client.list_buckets()
writer.writerow([''] + [bucket.name.encode('utf-8') for bucket in buckets])
for user in panel_client.get_accounts()['users']:
user_client = e24filesClient(access_key=user['e24files']['s3']['api_id'],
secret_key=user['e24files']['s3']['secret_key'])
try:
user_client.list_buckets()
except boto.exception.S3ResponseError:
row = ["SUSPENDED", ]
else:
row = [user_client.bucket_validate(bucket.name) for bucket in buckets]
writer.writerow([user['name'].encode('utf-8'), ] + row)
def main():
args = build_args()
config = ConfigParser.ConfigParser()
config.readfp(args.config)
panel_client = e24cloudClient(*get_access_pair(config, 'panel'))
files_client = e24filesClient(*get_access_pair(config, 'files'))
writer = csv.writer(args.output)
fetch_data(writer, panel_client, files_client)
if __name__ == "__main__":
main()
| true
|
c7fe2170630d7b7862e4be45f2fd3e1208bd2458
|
Shell
|
jsh/docker-cookbook
|
/Chapter-1/pulling-an-image
|
UTF-8
| 357
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -eux
: first docker pull is slow
time docker pull fedora
: and here it is
docker images | grep fedora
: second one is fast, though
time docker pull fedora
sleep 10
: now we bring up a container
docker run -id --name fedora docker.io/fedora bash
docker ps -l
sleep 10
: cleanliness is next to godliness
docker stop fedora
docker rm fedora
| true
|
d05d1f63d0e6da70187ac1a5b45224a28f852efc
|
Shell
|
pavanreddy2693/linux_scripts
|
/shellscripts/copy_remote_to_remote.sh
|
UTF-8
| 474
| 2.984375
| 3
|
[] |
no_license
|
#! /bin/bash
echo "copy file from one machine to another machine"
echo "Provide source IP"
read source_ip
echo "provide source User"
read source_user
echo "provide source path"
read source_path
echo "provide destination IP"
read destination_ip
echo "provide the user of destination"
read destination_user
echo "provide destination path"
read destination_path
scp -i keypair $source_user@$source_ip:$source_path $destination_user@$destination_ip:$destination_path
| true
|
ae377a8a73079da430f08c58fb95db4d528cdc50
|
Shell
|
h-w-chen/kubemark-tool
|
/kubemark-setup.sh
|
UTF-8
| 10,180
| 3.59375
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
# Credit: this script is based on https://github.com/sonyafenge/arktos-tool/blob/master/perftools/Howtorunperf-tests-scaleout.md
# purpose: to perf test scale-out mTP/nRP system / scale-up (single cluster) system
# this script is supposed to be sourced.
# usage:
# export TEST_SINGLE_STEP=N # by default not set; run all steps
# export TEST_TYPE=load #default to density
# . <script> <run-name> <hollow-nodes-per-rp> <tp-num> <rp-num>
[[ -z $1 ]] && echo "MUST specify RUN_PREFIX in format like etcd343-0312-1x500" && return 1
## KUBEMARK_NUM_NODES is the hollow nodes of ONE RP only; the total nodes are n * KUBEMARK_NUM_NODES
[[ -z $2 ]] && echo "MUST specify KUBEMARK_NUM_NODES in one of supported values 1, 2, 100, 500, 1000, 10000" && return 2
export SCALEOUT_CLUSTER=true
declare -i tp_reps=${3:-1}
[[ $tp_reps -eq 0 ]] && { echo "Run in scale-up (not scale-out) mode instead."; unset SCALEOUT_CLUSTER; }
declare -i rp_reps=${4:-${tp_reps}}
echo "run ${1}: tp ${tp_reps}; rp ${rp_reps}, each rp has hollow nodes: ${2}"
case $tp_reps in
0) ;;
1)
tenants=("" "arktos")
;;
2)
tenants=("" "arktos" "zeta")
;;
3)
tenants=("" "arktos" "mercury" "zeta")
;;
*)
echo "not supported tp_reps."
return -1;
esac
test_jobs=()
declare PRE="zz-" ## zz- to ensure perf test resouce names come after regular used ones
declare RUN_ID=$1
export RUN_PREFIX=${PRE}$1
export KUBEMARK_NUM_NODES=$2
function calc_gce_resource_params() {
local size=${1}
case "$size" in
[12])
export MASTER_DISK_SIZE=100GB
export MASTER_ROOT_DISK_SIZE=100GB
export MASTER_SIZE=n1-standard-4
export NODE_SIZE=n1-standard-4
export NODE_DISK_SIZE=100GB
;;
[1-4]00)
export MASTER_DISK_SIZE=200GB
export MASTER_ROOT_DISK_SIZE=200GB
export MASTER_SIZE=n1-highmem-8
export NODE_SIZE=n1-highmem-16
export NODE_DISK_SIZE=200GB
;;
[5-9]00)
export MASTER_DISK_SIZE=200GB
export MASTER_ROOT_DISK_SIZE=200GB
export MASTER_SIZE=n1-highmem-32
export NODE_SIZE=n1-highmem-16
export NODE_DISK_SIZE=200GB
;;
[1-2]000)
export MASTER_DISK_SIZE=200GB
export MASTER_ROOT_DISK_SIZE=200GB
export MASTER_SIZE=n1-highmem-32
export NODE_SIZE=n1-highmem-16
export NODE_DISK_SIZE=200GB
;;
10000)
export MASTER_DISK_SIZE=1000GB
export MASTER_ROOT_DISK_SIZE=1000GB
export MASTER_SIZE=n1-highmem-96
export NODE_SIZE=n1-highmem-16
export NODE_DISK_SIZE=200GB ## seems wastful if asking for 1GB disk for node"
;;
*)
echo "invalid KUBEMARK_NUM_NODES of ${size}."
return -1
;;
esac
}
declare -i total_hollow_nodes=(${rp_reps}*${KUBEMARK_NUM_NODES})
calc_gce_resource_params ${total_hollow_nodes}
if [[ $? -ne 0 ]]; then
echo "MUST specify KUBEMARK_NUM_NODES in one of supported values 1, 2, 100, 500, 1000, 10000"
return 3
fi
# https://github.com/fabric8io/kansible/blob/master/vendor/k8s.io/kubernetes/docs/devel/kubemark-guide.md
# ~17.5 hollow-node pods per cpu core ==> 16 cores: 110 hollow-nodes
#declare -x -i NUM_NODES=("${KUBEMARK_NUM_NODES}" + 110 -1)/110
declare -x -i NUM_NODES=("${total_hollow_nodes}" + 100 - 1)/100 ## arktos team experience; may consider give 1 nuffer in case of 100/500
[[ ${total_hollow_nodes} -lt 499 ]] && NUM_NODES=${NUM_NODES}+1
echo "${NUM_NODES} admin minion nodes, total hollow nodes ${total_hollow_nodes}"
export PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-true}
#export USE_INSECURE_SCALEOUT_CLUSTER_MODE=false ## better avoid insecure mode currently buggy?
export SCALEOUT_TP_COUNT=${tp_reps} ## TP number
export SCALEOUT_RP_COUNT=${rp_reps} ## RP number
export CREATE_CUSTOM_NETWORK=true ## gce env isolaation
export KUBE_GCE_PRIVATE_CLUSTER=true
export KUBE_GCE_ENABLE_IP_ALIASES=true
export KUBE_GCE_NETWORK=${RUN_PREFIX}
export KUBE_GCE_INSTANCE_PREFIX=${RUN_PREFIX} ## for kube-up/down to identify GCP resources
export KUBE_GCE_ZONE=${KUBE_GCE_ZONE-us-central1-b}
export ENABLE_KCM_LEADER_ELECT=false
export ENABLE_SCHEDULER_LEADER_ELECT=false
export ETCD_QUOTA_BACKEND_BYTES=8589934592 ## etcd 8GB data
#export SHARE_PARTITIONSERVER=false
export LOGROTATE_FILES_MAX_COUNT=50 ## if need huge pile of logs, consider increase to 200
export LOGROTATE_MAX_SIZE=200M
export KUBE_ENABLE_APISERVER_INSECURE_PORT=true ## to enable prometheus
export KUBE_ENABLE_PROMETHEUS_DEBUG=true
export KUBE_ENABLE_PPROF_DEBUG=true
export TEST_CLUSTER_LOG_LEVEL=--v=2
export HOLLOW_KUBELET_TEST_LOG_LEVEL=--v=2
export GOPATH=$HOME/go
## for perf test only - speed up deleting pods (by doubling GC controller QPS)
export KUBE_FEATURE_GATES=ExperimentalCriticalPodAnnotation=true,QPSDoubleGCController=true #,QPSDoubleRSController=true
## below controls KCM + sched QPS; use them in caution
export KUBE_CONTROLLER_EXTRA_ARGS="--kube-api-qps=100 --kube-api-burst=150"
export KUBE_SCHEDULER_EXTRA_ARGS="--kube-api-qps=200 --kube-api-burst=300"
export KUBE_APISERVER_EXTRA_ARGS="--max-mutating-requests-inflight=20000 --max-requests-inflight=40000"
## more general args for master components; keep here for reference
#KUBE_CONTROLLER_EXTRA_ARGS=" --kube-api-qps=2000 --kube-api-burst=4000 --concurrent-deployment-syncs=500 --concurrent-replicaset-syncs=500 --concurrent_rc_syncs=500 --concurrent-endpoint-syncs=500 --concurrent-gc-syncs=2000 --concurrent-namespace-syncs=1000 --concurrent-resource-quota-syncs=500 --concurrent-service-syncs=100 --concurrent-serviceaccount-token-syncs=500 --concurrent-ttl-after-finished-syncs=500"
#KUBE_SCHEDULER_EXTRA_ARGS=" --kube-api-qps=2000 --kube-api-burst=4000"
export SHARED_CA_DIRECTORY=/tmp/${USER}/ca
mkdir -p ${SHARED_CA_DIRECTORY}
if [[ -z ${TEST_SINGLE_STEP} ]] || [[ ${TEST_SINGLE_STEP} == "1" ]]; then
echo "------------------------------------------"
echo "step 1. starting admin cluster ... $(date)"
./cluster/kube-up.sh
fi
if [[ ${TEST_SINGLE_STEP} == "1" ]]; then
return 0
fi
is_kube_up=$?
if [[ "${is_kube_up}" == "1" ]]; then
return 5
elif [[ "${is_kube_up}" == "2" ]]; then
echo "waring: fine to continue"
fi
if [[ -z ${TEST_SINGLE_STEP} ]] || [[ ${TEST_SINGLE_STEP} == "2" ]]; then
echo "------------------------------------------"
echo "step 2: starting kubemark clusters ... $(date)"
./test/kubemark/start-kubemark.sh
fi
if [[ ${TEST_SINGLE_STEP} == "2" ]]; then
return 0
fi
# optional: sanity check
perf_log_root=$HOME/logs/perf-test/gce-${total_hollow_nodes}/arktos/${RUN_PREFIX}
# start perf tool
function start_perf_test() {
local tenant=$1
local kube_config=$2
local test_type=${TEST_TYPE:-density}
## todo: change clusterload code to fix the rp access bug
local kube_config_proxy=$PWD/test/kubemark/resources/kubeconfig.kubemark-proxy
perf_log_folder="${perf_log_root}/${tenant}"
echo "perf log folder: ${perf_log_folder}"
mkdir -p ${perf_log_folder}
# create the test tenant in kubemark TP cluster
./_output/dockerized/bin/linux/amd64/kubectl --kubeconfig=${kube_config} create tenant ${tenant}
# ? do we need --delete-namespace=false ??
declare -i nodes=${KUBEMARK_NUM_NODES}
if [[ ${tp_reps} -ne 0 ]]; then # scale-out multi-tp/rp cluster
nodes=(${total_hollow_nodes}/${tp_reps}+99)/100*100
fi
echo env SCALEOUT_TEST_TENANT=${tenant} ./perf-tests/clusterloader2/run-e2e.sh --nodes=${nodes} --provider=kubemark --kubeconfig=${kube_config_proxy} --report-dir=${perf_log_folder} --testconfig=testing/${test_type}/config.yaml --testoverrides=./testing/experiments/disable_pvs.yaml
env SCALEOUT_TEST_TENANT=${tenant} ./perf-tests/clusterloader2/run-e2e.sh --nodes=${nodes} --provider=kubemark --kubeconfig=${kube_config_proxy} --report-dir=${perf_log_folder} --testconfig=testing/${test_type}/config.yaml --testoverrides=./testing/experiments/disable_pvs.yaml > ${perf_log_folder}/perf-run.log 2>&1 &
test_job=$!
test_jobs+=($test_job)
}
if [[ -z ${TEST_SINGLE_STEP} ]] || [[ ${TEST_SINGLE_STEP} == "3" ]]; then
echo "------------------------------------------"
echo "step 3: run perf test suite per tp ... $(date)"
for t in $(seq 1 $tp_reps); do
start_perf_test ${tenants[$t]} $PWD/test/kubemark/resources/kubeconfig.kubemark.tp-${t}
done
if [[ $tp_reps -eq 0 ]]; then #non scale-out; single master for all
start_perf_test ${tenants[$t]} $PWD/test/kubemark/resources/kubeconfig.kubemark.tp
fi
echo "waiting for perf test suites done..."
echo "background jobs: ${test_jobs[@]}"
for t in ${test_jobs[@]}; do
wait $t || ( echo "failed to start density test. Aborting..."; return 4 )
done
fi
if [[ ${TEST_SINGLE_STEP} == "3" ]]; then
return 0
fi
if [[ -z ${TEST_SINGLE_STEP} ]] || [[ ${TEST_SINGLE_STEP} == "4" ]]; then
echo "------------------------------------------"
#return 0 ## to uncomment it in local controlled run
echo "step 4: per test suites are done; collecting logs ... $(date)"
pushd ${perf_log_root}
env GCE_REGION=${KUBE_GCE_ZONE} bash ~/arktos-tool/logcollection/logcollection.sh
## rough check of log
### find . -name "minion-*" -type d | xargs -I{} wc -l {}/kubelet.log
wc -l minion-*/kubelet.logs || (echo "log data seems incomplete. Aborting..."; return 5;)
popd
fi
if [[ ${TEST_SINGLE_STEP} == "4" ]]; then
return 0
fi
if [[ -z ${TEST_SINGLE_STEP} ]] || [[ ${TEST_SINGLE_STEP} == "5" ]]; then
echo "------------------------------------------"
echo "step 5: cleaning up GCP test resources ... $(date)"
SCRIPT=$(realpath -P ./kubemark-setup.sh)
SCRIPTPATH=`dirname $SCRIPT`
bash ${SCRIPTPATH}/gcp-cleanup.sh ${RUN_ID}
fi
if [[ ${TEST_SINGLE_STEP} == "5" ]]; then
return 0
fi
if [[ -z ${TEST_SINGLE_STEP} ]] || [[ ${TEST_SINGLE_STEP} == "6" ]]; then
echo "------------------------------------------"
echo "step 6: system has been cleaned up. Au revoir :) $(date)"
#echo "------------------------------------------"
#echo "step 5: shuting down kubemark clusters ... $(date)"
#./test/kubemark/stop-kubemark.sh
#echo "------------------------------------------"
#echo "step 6: tearing down admin cluster ... $(date)
#./cluster/kube-down.sh
#echo "------------------------------------------"
#echo "step 7: system has been cleaned up. Au revoir :) $(date)"
echo "local cleanup of kubemark-config files"
rm test/kubemark/resources/kubeconfig.*
fi
if [[ ${TEST_SINGLE_STEP} == "6" ]]; then
return 0
fi
| true
|
566d277f4d4fc0098f6b2dcc2e7c2e96f664bbe3
|
Shell
|
josemar7/linux_shell
|
/shellclass/localusers/add-local-user-new.sh
|
UTF-8
| 1,126
| 4.40625
| 4
|
[] |
no_license
|
#!/bin/bash
# make sure the script is being executed with superuser privileges
if [[ "${UID}" -ne 0 ]]
then
echo "Error. You aren't superuser."
exit 1
fi
if [[ "${#}" -lt 1 ]]
then
echo "Usage: ${0} USER_NAME [COMMENT]..."
exit 1
fi
# Ask for the user name
# read -p 'Enter the username to create: ' USER_NAME
USER_NAME="${1}"
shift
COMMENT="{@}"
PASSWORD=$(date +%s%N | sha256sum | head -c48)
# Create the user.
useradd -c "${COMMENT}" -m ${USER_NAME}
# Check to see if the useradd command succeeded.
# We don't want to tell the user than an account was created when it hasn't been.
if [[ "${?}" -ne 0 ]]
then
echo 'The account could not be created.'
exit 1
fi
# Set the password for the user.
echo ${PASSWORD} | passwd --stdin ${USER_NAME}
if [[ "${?}" -ne 0 ]]
then
echo 'the password for the account could not be set.'
exit 1
fi
# Force password change on first login.
passwd -e ${USER_NAME}
# Display the username, password, and the host where the user was created.
echo
echo 'username:'
echo "${USER_NAME}"
echo
echo 'password:'
echo "${PASSWORD}"
echo
echo 'host:'
echo "${HOSTNAME}"
exit 0
| true
|
9cfe312da308993411ebfd0689f0e7064d42dede
|
Shell
|
miroshnikalex/vagrant-centos
|
/files/terraform_install.sh
|
UTF-8
| 1,247
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# Slightly modified version of https://raw.githubusercontent.com/ryanmaclean/awful-bash-scripts/master/install_recent_terraform_packer.sh
# Get URLs for most recent versions
terraform_url=$(curl --silent https://releases.hashicorp.com/index.json | jq '{terraform}' | egrep "linux.*64" | sort -rh | head -1 | awk -F[\"] '{print $4}')
packer_url=$(curl --silent https://releases.hashicorp.com/index.json | jq '{packer}' | egrep "linux.*64" | sort -rh | head -1 | awk -F[\"] '{print $4}')
# Create a move into directory.
mkdir /opt/packer
mkdir /opt/terraform && cd $_
# Chjange directory to Terraform directory
cd /opt/terraform
# Download Terraform. URI: https://www.terraform.io/downloads.html
curl -o terraform.zip $terraform_url
# Unzip and install
unzip terraform.zip
# Change directory to Packer
cd /opt/packer
# Download Packer. URI: https://www.packer.io/downloads.html
curl -o packer.zip $packer_url
# Unzip and install
unzip packer.zip
echo '
# Terraform & Packer Paths.
export PATH=/opt/terraform/:/opt/packer/:$PATH
' >>/root/.bash_profile
echo '
# Terraform & Packer Paths.
export PATH=/opt/terraform/:/opt/packer/:$PATH
' >> /home/$USER/.bash_profile
source /home/$USER/.bash_profile
source /root/.bash_profile
| true
|
07e233fd6acde5a5520ed3d95daa1718becb5462
|
Shell
|
open-pwa/open-pwa
|
/packages/node-mobile/tools/android_build.sh
|
UTF-8
| 1,123
| 3.828125
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"Zlib",
"CC0-1.0",
"ISC",
"LicenseRef-scancode-public-domain",
"ICU",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"Artistic-2.0",
"BSD-3-Clause",
"NTP",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-unicode",
"NAIST-2003",
"OpenSSL"
] |
permissive
|
#!/bin/bash
set -e
ROOT=${PWD}
if [ $# -eq 0 ]
then
echo "Requires a path to the Android NDK"
echo "Usage: android_build.sh <ndk_path> [target_arch]"
exit
fi
SCRIPT_DIR="$(dirname "$BASH_SOURCE")"
cd "$SCRIPT_DIR"
SCRIPT_DIR=${PWD}
cd "$ROOT"
cd "$1"
ANDROID_NDK_PATH=${PWD}
cd "$SCRIPT_DIR"
cd ../
BUILD_ARCH() {
make clean
# Clean previous toolchain.
rm -rf android-toolchain/
source ./android-configure "$ANDROID_NDK_PATH" $TARGET_ARCH
make -j $(getconf _NPROCESSORS_ONLN)
TARGET_ARCH_FOLDER="$TARGET_ARCH"
if [ "$TARGET_ARCH_FOLDER" == "arm" ]; then
# Use the Android NDK ABI name.
TARGET_ARCH_FOLDER="armeabi-v7a"
elif [ "$TARGET_ARCH_FOLDER" == "arm64" ]; then
# Use the Android NDK ABI name.
TARGET_ARCH_FOLDER="arm64-v8a"
fi
mkdir -p "out_android/$TARGET_ARCH_FOLDER/"
cp "out/Release/lib.target/libnode.so" "out_android/$TARGET_ARCH_FOLDER/libnode.so"
}
if [ $# -eq 2 ]
then
TARGET_ARCH=$2
BUILD_ARCH
else
TARGET_ARCH="arm"
BUILD_ARCH
TARGET_ARCH="x86"
BUILD_ARCH
TARGET_ARCH="arm64"
BUILD_ARCH
TARGET_ARCH="x86_64"
BUILD_ARCH
fi
cd "$ROOT"
| true
|
3c2e8f6dddfd83352b4acace30d9006617e7bba3
|
Shell
|
cha63506/dotfiles-47
|
/bin/pass-show-qr
|
UTF-8
| 199
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ -z "$1" ]]; then
pass show
exit 0
fi
secret=$(pass show "$1")
file="$(mktemp -d)/${1//\//-}.png"
qrencode -o "$file" "$secret"
qlmanage -p "$file" &> /dev/null
| true
|
97191ed4d5d3968ef2f5b125059b691e3638c46f
|
Shell
|
vseryakov/backendjs
|
/tools/bkjs-install
|
UTF-8
| 22,139
| 3.46875
| 3
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Author: Vlad Seryakov vseryakov@gmail.com
# Sep 2013
#
case "$BKJS_CMD" in
restart)
[ ! -f /etc/monit.d/$BKJS.conf ] && $0 start
;;
init-server)
[ "$(whoami)" != "root" ] && echo "Run as root please" && exit 1
args=$(get_all_args)
find_user
find_nodebin
echo "Setuping server in $BKJS_HOME for $BKJS_USER/$(get_arg -user), prefix: $BKJS_PREFIX, binaries: $NODE_BIN, $BKJS_BIN... ($args)"
($0 install-packages $args)
($0 init-hostname $args)
($0 init-ssh $args)
($0 init-user $args)
($0 init-system $args)
($0 init-limits $args)
($0 init-sysctl $args)
($0 init-monit-system $args)
($0 init-logrotate $args)
($0 init-rsyslog $args)
($0 init-home $args)
($0 init-service $args)
($0 init-profile $args)
exit
;;
init-profile)
# Create global profile
find_user
sysconf=/etc/sysconfig
[ ! -d $sysconf -a -d /etc/default ] && sysconf=/etc/default
echo "BKJS_HOME=$BKJS_HOME" > $sysconf/$BKJS
domain=$(get_arg -domain)
[ "$domain" != "" ] && echo "BKJS_DOMAIN=$domain" >> $sysconf/$BKJS
[ "$BKJS_USER" != "root" ] && echo "BKJS_USER=$BKJS_USER" >> $sysconf/$BKJS
chown -R $BKJS_USER $BKJS_HOME
exit
;;
init-hostname)
host=$(get_arg -host $BKJS_HOST)
[ "$host" = "" ] && host=$(uname -n|awk -F. '{print $1}')
domain=$(get_arg -domain $BKJS_DOMAIN)
# Set hostname with name and domain
if [ "$domain" = "" ]; then
host=$(get_arg -host)
[ "$host" = "" ] && exit
domain=$(uname -n|cut -d. -f2-)
fi
host=$host.$domain
[ "$(uname -n)" = "$host" ] && exit
echo "Configuring hostname $host ..."
hostname $host
echo $host > /etc/hostname
if [ -f /etc/sysconfig/network ]; then
echo "HOSTNAME=$host" > /tmp/network
grep -v HOSTNAME /etc/sysconfig/network >> /tmp/network
mv /tmp/network /etc/sysconfig/network
fi
service rsyslog restart
monit reload
exit
;;
init-user)
# Add local user
find_user
LHOME=/home/$BKJS_USER
if [ "$(grep -s $BKJS_USER /etc/passwd)" = "" ]; then
echo "Adding user $BKJS_USER..."
useradd -m -g wheel -s /bin/bash $BKJS_USER
mkdir -p -m 700 $LHOME/.ssh && chown $BKJS_USER $LHOME/.ssh
fi
if [ -f /etc/conf.d/tiny-cloud ]; then
$SED "s/^#?CLOUD_USER.+/CLOUD_USER=$BKJS_USER/" /etc/conf.d/tiny-cloud
fi
# Allow path in sudo and skip tty for our user so we can run commands via ssh
if [ -d /etc/sudoers.d -a ! -f /etc/sudoers.d/$BKJS ]; then
echo "Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin:$BKJS_PREFIX/bin:$LHOME/bin:$LHOME/node_modules/.bin" >> /etc/sudoers.d/$BKJS
echo "Defaults:$BKJS_USER !requiretty" >> /etc/sudoers.d/$BKJS
echo "$BKJS_USER ALL = NOPASSWD: ALL" > /etc/sudoers.d/$BKJS
fi
# Aliases and environment
if [ "$(grep -s '#Bkjs' $LHOME/.bashrc)" = "" ]; then
echo "Configuring .bashrc..."
echo '#Bkjs' >> $LHOME/.bashrc
echo "umask 022" >> $LHOME/.bashrc
echo "BKJS_HOME=$BKJS_HOME" >> $LHOME/.bashrc
echo "export PATH=\$PATH:$LHOME/bin:$LHOME/node_modules/.bin" >> $LHOME/.bashrc
echo 'alias slog="tail -100 /var/log/messages"' >> $LHOME/.bashrc
echo "alias clog=\"sudo tail -100 /var/log/cron\"" >> $LHOME/.bashrc
echo "alias mlog=\"tail -100 $BKJS_HOME/log/message.log\"" >> $LHOME/.bashrc
echo "alias elog=\"tail -100 $BKJS_HOME/log/error.log\"" >> $LHOME/.bashrc
echo "alias alog=\"tail -100 $BKJS_HOME/log/access.log\"" >> $LHOME/.bashrc
echo 'alias h="history"' >> $LHOME/.bashrc
echo 'alias ll="ls -la"' >> $LHOME/.bashrc
echo 'alias ps="ps augx"' >> $LHOME/.bashrc
echo 'alias mc="mc -b"' >> $LHOME/.bashrc
echo 'alias df="df -h"' >> $LHOME/.bashrc
echo 'alias bkls="bkjs ec2-ls"' >> $LHOME/.bashrc
echo 'bcp() { socat readline,history=$HOME/.socat tcp4:localhost:$1; }' >> $LHOME/.bashrc
echo 'bkssh() { bkjs ec2-ssh -tag "$1" -index "$2" -cmd "$3" -skip-tag dev,staging; }' >> $LHOME/.bashrc
echo 'bkstop() { bkjs ec2-ssh -tag ${1-api} -index "$2" -cmd "bkjs stop-${3-api}" -skip-tag dev,staging; }' >> $LHOME/.bashrc
echo 'bksync() { (cd $HOME/node_modules/$1 && bkjs sync -host "$(bkjs ec2-host -tag ${2-api} -skip-tag dev,staging)"); }' >> $LHOME/.bashrc
echo 'bkw() { bkssh ${1-api} "" "w"; }' >> $LHOME/.bashrc
echo 'bkami() { bkjs create-ami && bkjs create-launch-template-version; }' >> $LHOME/.bashrc
echo "PROMPT_COMMAND='printf \"\\033]0;\$(cat \$HOME/var/bkjs.tag)\\007\"'" >> $LHOME/.bashrc
fi
if [ "$(grep -s bashrc $LHOME/.bash_profile)" = "" ]; then
echo "Configuring .bash_profile .bashrc..."
echo '[ -f ~/.bashrc ] && . ~/.bashrc' >> $LHOME/.bash_profile
fi
if [ "$(grep -s '#Bkjs' $LHOME/.bash_profile)" = "" ]; then
echo "Configuring .bash_profile..."
echo '#Bkjs' >> $LHOME/.bash_profile
echo '[ ! -s ~/var/bkjs.tag ] && bkjs ec2-tag > ~/var/bkjs.tag' >> $LHOME/.bash_profile
echo '[ -s ~/var/bkjs.tag ] && export PS1=$(cat ~/var/bkjs.tag)#' >> $LHOME/.bash_profile
fi
exit
;;
init-home)
# Create required directories
find_user
mkdir -p $BKJS_HOME/node_modules $BKJS_HOME/bin $BKJS_HOME/log $BKJS_HOME/etc $BKJS_HOME/var
if [ "$BKJS_USER" != "root" -a ! -f $BKJS_HOME/etc/config ]; then
echo "Creating default $BKJS config ..."
$ECHO "uid=$BKJS_USER\nforce-uid=1" > $BKJS_HOME/etc/config
fi
chown -R $BKJS_USER $BKJS_HOME
exit
;;
init-ssh)
# Allow only pubkey auth
[ "$(grep -s '#Bkjs' /etc/ssh/sshd_config)" != "" ] && exit
echo "Configuring ssh..."
egrep -v '^(#Bkjs|PasswordAuth|GSSAPIAuth|MaxAuth|MaxSess|ClientAlive|PermitRootLogin)' /etc/ssh/sshd_config > /tmp/sshd_config
echo "" >> /tmp/sshd_config
echo "#Bkjs config" >> /tmp/sshd_config
echo "PermitRootLogin no" >> /tmp/sshd_config
echo "PasswordAuthentication no" >> /tmp/sshd_config
echo "MaxAuthTries 10" >> /tmp/sshd_config
echo "MaxSessions 10" >> /tmp/sshd_config
echo "ClientAliveInterval 15" >> /tmp/sshd_config
echo "ClientAliveCountMax 5" >> /tmp/sshd_config
mv /tmp/sshd_config /etc/ssh
chmod 600 /etc/ssh/sshd_config
service sshd restart
exit
;;
init-logrotate)
# Setup logrotate for backend log files
if [ "$(grep -s '#Bkjs' /etc/logrotate.d/syslog)" = "" ]; then
echo "Configuring logrotate..."
echo "#Bkjs" > /etc/logrotate.d/syslog
echo "/var/log/cron /var/log/messages {" >> /etc/logrotate.d/syslog
echo " missingok" >> /etc/logrotate.d/syslog
echo " daily" >> /etc/logrotate.d/syslog
echo " rotate 90" >> /etc/logrotate.d/$BKJS
echo " sharedscripts" >> /etc/logrotate.d/syslog
echo " postrotate" >> /etc/logrotate.d/syslog
echo " /usr/bin/killall -q -HUP rsyslogd" >> /etc/logrotate.d/syslog
echo " endscript" >> /etc/logrotate.d/syslog
echo "}" >> /etc/logrotate.d/syslog
fi
if [ "$(grep -s "#Bkjs $BKJS_HOME" /etc/logrotate.d/$BKJS)" = "" ]; then
echo "Configuring logrotate.d/$BKJS..."
echo "#Bkjs $BKJS_HOME" > /etc/logrotate.d/$BKJS
echo "$BKJS_HOME/log/message.log $BKJS_HOME/log/access.log {" >> /etc/logrotate.d/$BKJS
echo " missingok" >> /etc/logrotate.d/$BKJS
echo " daily" >> /etc/logrotate.d/$BKJS
echo " rotate 90" >> /etc/logrotate.d/$BKJS
echo " sharedscripts" >> /etc/logrotate.d/$BKJS
echo " postrotate" >> /etc/logrotate.d/$BKJS
echo " /usr/bin/killall -q -HUP rsyslogd" >> /etc/logrotate.d/$BKJS
echo " endscript" >> /etc/logrotate.d/$BKJS
echo "}" >> /etc/logrotate.d/$BKJS
fi
exit
;;
init-syslog)
# Setup syslog config for backend logging
find_user
if [ "$(grep -s '#Bkjs' /etc/syslog.conf)" = "" ]; then
echo "Configuring syslog..."
echo "#Bkjs" > /etc/syslog.conf
echo 'kern.*,*.emerg /dev/console' >> /etc/syslog.conf
echo 'cron.* /var/log/cron' >> /etc/syslog.conf
echo 'local7.* /var/log/boot.log' >> /etc/syslog.conf
echo '*.info;cron.none,local0.none,local5.none /var/log/messages' >> /etc/syslog.conf
echo "local0.* $BKJS_HOME/log/message.log" >> /etc/syslog.conf
echo "local5.* $BKJS_HOME/log/access.log" >> /etc/syslog.conf
mkdir -p $BKJS_HOME/log
chown -R $BKJS_USER $BKJS_HOME/log
service syslog restart
fi
exit
;;
init-rsyslog)
# Setup rsyslog config for backend logging
find_user
if [ "$(grep -s '#Bkjs' /etc/rsyslog.conf)" = "" ]; then
echo "Configuring rsyslog..."
echo "#Bkjs" > /etc/rsyslog.conf
if [ -d /etc/systemd ]; then
echo '$ModLoad imjournal' >> /etc/rsyslog.conf
else
echo '$ModLoad imklog' >> /etc/rsyslog.conf
fi
echo '$ModLoad imuxsock' >> /etc/rsyslog.conf
echo '$ModLoad imudp' >> /etc/rsyslog.conf
echo '$UDPServerAddress 127.0.0.1' >> /etc/rsyslog.conf
echo '$UDPServerRun 514' >> /etc/rsyslog.conf
echo '$MaxMessageSize 64k' >> /etc/rsyslog.conf
echo '$SystemLogRateLimitInterval 0' >> /etc/rsyslog.conf
echo '$SystemLogRateLimitBurst 0' >> /etc/rsyslog.conf
echo '$ActionFileDefaultTemplate RSYSLOG_FileFormat' >> /etc/rsyslog.conf
if [ -d /etc/systemd ]; then
echo '$IMJournalStateFile imjournal.state' >> /etc/rsyslog.conf
echo '$imjournalRatelimitInterval 0' >> /etc/rsyslog.conf
echo '$imjournalRatelimitBurst 0' >> /etc/rsyslog.conf
echo '$OmitLocalLogging on' >> /etc/rsyslog.conf
echo '*.emerg :omusrmsg:*' >> /etc/rsyslog.conf
else
echo 'kern.*,*.emerg /dev/console' >> /etc/rsyslog.conf
fi
echo '$IncludeConfig /etc/rsyslog.d/*.conf' >> /etc/rsyslog.conf
echo 'cron.* /var/log/cron' >> /etc/rsyslog.conf
echo 'local7.* /var/log/boot.log' >> /etc/rsyslog.conf
echo "\$FileOwner $BKJS_USER" >> /etc/rsyslog.conf
echo '*.info;cron.none,local0.none,local5.none /var/log/messages' >> /etc/rsyslog.conf
rm -rf /var/log/maillog* /var/log/secure* /var/log/spooler*
touch /var/log/messages
chown -R $BKJS_USER /var/log/messages
if [ "$OS_TYPE" = "alpine" ]; then
rc-update del syslog boot
rc-update add rsyslog boot
fi
service rsyslog restart
fi
if [ "$(grep -s "#Bkjs $BKJS_HOME" /etc/rsyslog.d/$BKJS.conf)" = "" ]; then
echo "Configuring rsyslog.d/$BKJS ..."
mkdir -p /etc/rsyslog.d
echo "#Bkjs $BKJS_HOME" > /etc/rsyslog.d/$BKJS.conf
echo "\$FileOwner $BKJS_USER" >> /etc/rsyslog.d/$BKJS.conf
bkjsfmt=""
if [ -d /etc/systemd ]; then
echo '$template bkjsfmt,"%HOSTNAME% %msg%\n"' >> /etc/rsyslog.d/$BKJS.conf
bkjsfmt=";bkjsfmt"
fi
echo "local0.* $BKJS_HOME/log/message.log$bkjsfmt" >> /etc/rsyslog.d/$BKJS.conf
echo "local5.* $BKJS_HOME/log/access.log$bkjsfmt" >> /etc/rsyslog.d/$BKJS.conf
mkdir -p $BKJS_HOME/log
chown -R $BKJS_USER $BKJS_HOME/log
service rsyslog restart
fi
exit
;;
init-system)
# Disable SELinux
if [ -f /etc/selinux/config ]; then
$SED 's/SELINUX=(enforcing|permissive)/SELINUX=disabled/' /etc/selinux/config
fi
case "$OS_TYPE" in
amazon)
chkconfig monit on
$SED 's/^After.+$/After=network.target cloud-init.target/' /etc/systemd/system/multi-user.target.wants/monit.service
;;
alpine)
rc-update add monit
ln -s /usr/bin/doas /usr/bin/sudo
echo "permit nopass :wheel as root" >> /etc/doas.d/doas.conf
;;
esac
# Make sure monit is running all the time
mkdir -p /etc/monit.d
echo "set logfile syslog" > /etc/monit.d/logging
service monit restart
# Allow sudo use local binaries
[ -f /etc/sudoers ] && $SED 's/requiretty/!requiretty/' /etc/sudoers
exit
;;
init-limits)
# File handles and coredumps for debugging
if [ -d /etc/security/limits.d ]; then
conf=/etc/security/limits.d/90-$BKJS.conf
if [ ! -s $conf ]; then
echo "Configuring limits..."
echo '* soft core unlimited' > $conf
echo '* hard core unlimited' >> $conf
echo '* soft nofile 512000' >> $conf
echo '* hard nofile 512000' >> $conf
echo 'root soft nofile 512000' >> $conf
echo 'root hard nofile 512000' >> $conf
echo '* soft memlock unlimited' >> $conf
echo '* hard memlock unlimited' >> $conf
echo 'root soft memlock unlimited' >> $conf
echo 'root hard memlock unlimited' >> $conf
echo '* soft as unlimited' >> $conf
echo '* hard as unlimited' >> $conf
echo 'root soft as unlimited' >> $conf
echo 'root hard as unlimited' >> $conf
fi
fi
if [ -f /etc/rc.conf ]; then
$SED 's/^#?rc_ulimit.*/rc_ulimit="-n 512000 -c unlimited -l unlimited"/' /etc/rc.conf
fi
if [ -f /etc/init.d/monit ]; then
$SED 's/daemon $prog/ulimit -n 512000\n\tdaemon $prog/' /etc/init.d/monit
fi
if [ -f /etc/systemd/system.conf ]; then
$SED 's/^#?DefaultLimitNOFILE=.*$/DefaultLimitNOFILE=512000/' /etc/systemd/system.conf
fi
exit
;;
init-sysctl)
# System tuning
conf=/etc/sysctl.d/90-$BKJS.conf
[ -s $conf ] && exit
echo "Configuring sysctl..."
echo 'fs.file-max=512000' > $conf
echo 'kernel.core_uses_pid=0' >> $conf
echo 'vm.max_map_count=262144' >> $conf
echo 'vm.min_free_kbytes=65536' >> $conf
echo 'net.core.somaxconn=65536' >> $conf
echo 'net.core.netdev_max_backlog=65536' >> $conf
echo 'net.core.rmem_max=26214400' >> $conf
echo 'net.core.wmem_max=26214400' >> $conf
echo 'net.core.optmem_max=26214400' >> $conf
echo 'net.ipv4.tcp_rfc1337=1' >> $conf
echo 'net.ipv4.ip_local_port_range=2000 65000' >> $conf
echo 'net.ipv4.tcp_max_tw_buckets=1440000' >> $conf
echo 'net.ipv4.tcp_window_scaling=1' >> $conf
echo 'net.ipv4.tcp_tw_reuse=1' >> $conf
echo 'net.ipv4.tcp_fin_timeout=15' >> $conf
echo 'net.ipv4.tcp_keepalive_intvl=15' >> $conf
echo 'net.ipv4.tcp_keepalive_time=30' >> $conf
echo 'net.ipv4.tcp_keepalive_probes=5' >> $conf
echo 'net.ipv4.tcp_slow_start_after_idle=0' >> $conf
echo 'net.ipv4.tcp_max_orphans=262144' >> $conf
echo 'net.ipv4.tcp_max_syn_backlog=3240000' >> $conf
echo 'net.ipv4.tcp_no_metrics_save=1' >> $conf
echo 'net.ipv4.tcp_syn_retries=2' >> $conf
echo 'net.ipv4.tcp_synack_retries=2' >> $conf
echo 'net.ipv4.tcp_rmem=8192 87380 16777216' >> $conf
echo 'net.ipv4.tcp_wmem=8192 65536 16777216' >> $conf
echo 'net.ipv4.tcp_challenge_ack_limit = 999999999' >> $conf
if [ "$(get_flag -noipv6)" != "" ]; then
echo 'net.ipv6.conf.all.disable_ipv6 = 1' >> $conf
echo 'net.ipv6.conf.default.disable_ipv6 = 1' >> $conf
echo 'net.ipv6.conf.lo.disable_ipv6 = 1' >> $conf
fi
sysctl -p $conf
exit
;;
init-service)
# Install service for a script or bkjs service
find_user
echo "Init service $BKJS with $BKJS_BIN"
case $OS_TYPE in
alpine)
conf=/etc/init.d/$BKJS
$ECHO "#!/sbin/openrc-run\nname=\"$BKJS\"" > $conf
$ECHO "depend() {\n\tneed logger\n}" >> $conf
$ECHO "start() {\n\t$BKJS_BIN check-server\n}" >> $conf
chmod 755 $conf
rc-update add $BKJS
;;
*)
ln -sfn $path /etc/init.d/$BKJS
if [ -d /etc/systemd ]; then
conf=/etc/systemd/system/$BKJS-check.service
echo '[Unit]' > $conf
echo 'Description=Backendjs system check on startup' >> $conf
echo 'Before=monit.service' >> $conf
echo 'After=network-online.target' >> $conf
echo '[Service]' >> $conf
echo "ExecStart=$BKJS_BIN check-server" >> $conf
echo 'Type=oneshot' >> $conf
echo '[Install]' >> $conf
echo 'WantedBy=multi-user.target' >> $conf
systemctl enable $BKJS-check
chkconfig $BKJS off
else
chkconfig $BKJS on
fi
;;
esac
exit
;;
stop-service)
echo "Stopping service $BKJS"
case $OS_TYPE in
alpine)
rc-update del $BKJS
;;
*)
chkconfig $BKJS off
if [ -d /etc/systemd ]; then
systemctl disable $BKJS-check
fi
;;
esac
$0 stop
exit
;;
init-mfa)
[ "$(whoami)" != "root" ] && echo "Run as root please" && exit 1
case "$OS_TYPE" in
alpine)
apk add google-authenticator openssh-server-pam
$ECHO "account include base-account" > /etc/pam.d/sshd
$ECHO "auth required pam_env.so" >> /etc/pam.d/sshd
$ECHO "auth required pam_nologin.so successok" >> /etc/pam.d/sshd
$ECHO "auth required /lib/security/pam_google_authenticator.so echo_verification_code grace_period=57600 nullok" >> /etc/pam.d/sshd
$ECHO "auth required pam_unix.so\tmd5 sha512" >> /etc/pam.d/sshd
ln /etc/pam.d/sshd /etc/pam.d/sshd.pam
$0 init-limits
;;
*)
yum install google-authenticator –y
if [ "$(egrep -s 'pam_google_authenticator' /etc/pam.d/sshd)" = "" ]; then
$SED 's/^auth[ \t]+substack[ \t]+password-auth/auth required pam_google_authenticator.so\n#auth substack password-auth/' /etc/pam.d/sshd
fi
if [ "$(egrep -s 'pam_google_authenticator' /etc/pam.d/system-auth)" = "" ]; then
$SED 's/^auth[ \t]+sufficient[ \t]+pam_unix.so nullok try_first_pass/auth requisite pam_unix.so nullok try_first_pass\nauth sufficient pam_google_authenticator.so/' /etc/pam.d/system-auth
fi
;;
esac
echo >> /etc/ssh/sshd_config
if [ "$(egrep -s '^UsePAM yes' /etc/ssh/sshd_config)" = "" ]; then
$SED 's/^UsePAM/#UsePAM/' /etc/ssh/sshd_config
echo 'UsePAM yes' >> /etc/ssh/sshd_config
fi
if [ "$(egrep -s '^ChallengeResponseAuthentication yes' /etc/ssh/sshd_config)" = "" ]; then
$SED 's/^ChallengeResponseAuthentication/#ChallengeResponseAuthentication/' /etc/ssh/sshd_config
echo 'ChallengeResponseAuthentication yes' >> /etc/ssh/sshd_config
fi
if [ "$(egrep -s '^AuthenticationMethods publickey,keyboard-interactive' /etc/ssh/sshd_config)" = "" ]; then
$SED 's/^AuthenticationMethods/#AuthenticationMethods/' /etc/ssh/sshd_config
echo 'AuthenticationMethods publickey,keyboard-interactive' >> /etc/ssh/sshd_config
fi
su $(get_arg -user ec2-user) -c "google-authenticator -d -t -f -r 2 -R 30"
exit
;;
init-logwatcher)
find_user
cron=$(get_arg -cron "*/$(get_arg -interval 5) * * * *")
echo 'MAILTO=""' > /etc/cron.d/$BKJS-logwatcher
echo "$cron $BKJS_USER $BKJS_BIN logwatcher" >> /etc/cron.d/$BKJS-logwatcher
exit
;;
install-packages)
# Install required development and runtime packages
packages="bash git curl nano rsync rsyslog socat monit mc make gcc cmake autoconf automake libtool"
# Linux distro specific actions
case "$OS_TYPE" in
alpine)
packages="$packages nodejs npm man-pages monit-openrc rsyslog-openrc shadow doas"
packages="$packages g++ python3 libpng-dev"
ver=$(cat /etc/alpine-release|cut -d. -f1,2)
$SED "s/^# *(.+v$ver\/community)\$/\1/" /etc/apk/repositories
apk update
apk add $packages
;;
amazon)
packages="$packages gcc-c++ libuuid-devel openssl-devel libxml2-devel openldap-devel readline-devel libpng-devel libjpeg-turbo-devel"
packages="$packages ntp man telnet"
amazon-linux-extras install epel -y
yum -y -q clean metadata
yum -y -q install epel-release
sleep 5
yum -y -q update
sleep 5
yum -y -q install $packages
;;
ubuntu)
packages="$packages ntp man telnet"
apt install build-essential $packages
;;
esac
exit
;;
install-ec2)
$0 init-server -user ec2-user -home /home/ec2-user -prefix /home/ec2-user $(get_all_args "-user -home -prefix")
$0 check-server $(get_all_args)
exit
;;
help)
echo ""
echo "Instance setup commands:"
echo ""
echo " init-server [-user USER] [-home HOME] [-prefix $PREFIX] [-doman DOMAIN] [-host HOST] - initialize the backend environment, setup the Linux server with packages and change system config files for production use (AmazonAMI, Alpoine, Ubuntu)"
echo " init-hostname [-host HOST] [-domain DOMAIN] - set the hostname"
echo " init-user [-user NAME] - create a new user for the backend"
echo " init-ssh - setup SSH permissions, allow only public key auth"
echo " init-logrotate - setup logrotate for the backend log files"
echo " init-rsyslog - setup rsyslog to use for the backend logging, access log and backend log"
echo " init-system - setup system wide parameters, permissions"
echo " init-limits - setup ulimits"
echo " init-sysctl - setup sysctl parameters"
echo " init-home - setup backend home with required folders"
echo " init-mfa [-user ec2-user] - initialize EC2 instance with multi-factor authentication using Google authenticator"
echo " init-logwatcher [-interval MINS] [-cron CRON] - creates a crontab file in /etc/cron.d to periodically run the log watcher, -cron can specify full cron time spec"
echo " init-service [-server NAME] - create a service to be run bkjs check-server on server startup"
echo " stop-service - stop $BKJS service from starting on startup and from monit"
echo " install-ec2 - setup server on EC2 instance in the ec2-user home"
echo " install-packages - install required packages and updates"
;;
esac
| true
|
7e154b24d210fe82f51a5ffdd7f4935073d9bbaf
|
Shell
|
chaixxxx/-password
|
/随机password
|
UTF-8
| 119
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
str="abcdefghijklnmopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
pass=""
for i in {1..8}
do
num=$[RANDOM%${#Str}]
tmp=${Str:tmp}
done
echo $pass
| true
|
b9d2730ab57df890cc69b3fee422295f108c5560
|
Shell
|
dani3/dotfiles
|
/update-farm.sh
|
UTF-8
| 560
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
PACKAGES=(
bash
zsh
xorg
feh
i3
picom
polybar
polybar-forecast
neofetch
alacritty
xbindkeys
dunst
xinitrc
xprofile
conky
nvim
)
for PKG in ${PACKAGES[@]}; do
CONFLICTS=$(stow -t ~ --no --verbose $PKG 2>&1 | awk '/\* existing target is/ {print $NF}')
for filename in ${CONFLICTS[@]}; do
if [[ -f $HOME/$filename || -L $HOME/$filename ]]; then
echo "DELETE: $filename"
rm -f "$HOME/$filename"
fi
done
stow -v -R -t ~ $PKG
done
| true
|
fa65518f30dff402fc93d9f1017e17332ea61d24
|
Shell
|
bartpolot/dotfiles
|
/install.sh
|
UTF-8
| 432
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
pwd=`pwd`
for f in .??*; do
if [ "$f" == ".git" ]; then
continue;
fi
if [ -e "$HOME/$f" ]; then
echo $f exists in $HOME, skipping!
continue;
fi
ln -s "$pwd/$f" "$HOME/$f"
done
# git clone --recursive https://github.com/sorin-ionescu/prezto.git "${ZDOTDIR:-$HOME}/.zprezto"
# ln -s /home/bart/.zprezto/runcoms/zshrc $HOME/.zshrc.prozto
# ln -s /home/bart/.zprezto/runcoms/zpreztorc $HOME/.zpreztorc
| true
|
6afb7fa5581705382ca1919edf6c2dbbbf5c49e4
|
Shell
|
modeconnectivity/modeconnectivity
|
/main_mnist_fcn.sh
|
UTF-8
| 1,285
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
DATAROOT="data/"
OUTROOT="results/mnist/fcn"
DEPTH=10
WIDTH=245
LR=0.001
VAR=10 # variation, number of models to train
F=2 # the dropout ratio denominator
# the main script for learning the different dropout models
for V in `seq 1 $VAR`; do # can be performed in parallel
#1. first learn the original models
python train_fcn.py --dataset mnist --dataroot $DATAROOT -oroot $OUTROOT --name var-$V --depth $DEPTH --width $WIDTH --learning_rate $LR --nepoch 200
MODEL=$OUTROOT/var-$V/checkpoint.pth
#2. once done, train all the subnetworks on the task
for EL in `seq 0 $DEPTH`; do # can be performed in parallel
LRA=0.003
python exp_a_fcn.py --model $MODEL --nepoch 400 --fraction $F --name "A-f$F" --ndraw 20 --entry_layer $EL --learning_rate $LRA
done;
#2b. merge the results from the different layers
python merge_a_fcn.py $OUTROOT
#3. Perform experiment B on the same network
python exp_b.py --model $MODEL --fraction $F --name "B-f$F" --ndraw 200
done;
#4. Merge the different runs
python merge_vars.py $OUTROOT/var-*
#5. Plot the different runs
python plot_merge.py $OUTROOT/merge/ --yscale linear --experiments "A-f$F" "B-f$F"
python plot_merge.py $OUTROOT/merge/ --yscale log --experiments "A-f$F" "B-f$F"
| true
|
2b1c0b4abd4ee1abca383e7534d2198caeb11001
|
Shell
|
TinasheMTapera/Reward
|
/Scripts/fw-heudiconv-tabulate.sh
|
UTF-8
| 396
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
#prep
source activate reward
SESSION=$1 # the sub study eg fndm1
DEST=$HOME/curation/code/fw-heudiconv-tabulate_outputs/$SESSION
if [ ! -d $DEST ]
then
echo Creating Directory...
mkdir -p $DEST
fi
echo running tabulate...
fw-heudiconv-tabulate --project Reward2018 --session $SESSION --no-unique --path $DEST
python remove_tabulate_PHI.py $DEST/Reward2018_SeqInfo.tsv
| true
|
d8abec6c57b4dc2e9fdc1acbec61fe9f41c275b2
|
Shell
|
andersisaksen/BIOS-IN9410
|
/info.sh
|
UTF-8
| 136
| 2.578125
| 3
|
[] |
no_license
|
##Calculates the length of all sequences in each fasta file
for f in *.fasta
do
echo $f
python ../scripts/seq_length.py $f
done
| true
|
5f772b0b19f0eeba077fa20c37c4318d2c36ca5b
|
Shell
|
EBI-Metagenomics/ebi-metagenomics-cwl
|
/workflows/run-toil-v3-paired.sh
|
UTF-8
| 856
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
source ebi-setup.sh
set -o pipefail
#CWLTOIL="ipdb ../../toil-hack/venv/bin/cwltoil"
CWLTOIL=cwltoil
#RESTART=--restart # uncomment to restart a previous run; if the CWL descriptions have changed you will need to start over
#DEBUG=--logDebug # uncomment to make output & logs more verbose
workdir=/tmp
#mkdir -p ${workdir}
# must be a directory accessible from all nodes
RUN=v3-paired
DESC=../emg-pipeline-v3-paired.cwl
INPUTS=../emg-pipeline-v3-paired-job.yaml
start=toil-${RUN}
mkdir -p ${start}/results
cd ${start}
cp ${INPUTS} ./
cwltool --pack ${DESC} > packed.cwl
/usr/bin/time ${CWLTOIL} ${RESTART} ${DEBUG} --logFile ${PWD}/log --outdir ${PWD}/results \
--preserve-environment PATH CLASSPATH --batchSystem LSF --retryCount 1 \
--workDir ${workdir} --jobStore ${PWD}/jobstore --disableCaching \
${DESC} ${INPUTS} | tee output
| true
|
1cd14ca1d581f3619a588227607c8d38af0fcd77
|
Shell
|
mhcerri/configs
|
/home/bin/cronwrap
|
UTF-8
| 1,668
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash -eu
log="$HOME/tmp/cronwrap.log"
log_on_success=0;
no_notification=0
help=$(
cat <<EOF
Usage: $(basename "$0") [-h|--help] [--log=LOG] [--] COMMAND
Cron job wrapper.
Arguments:
-h, --help Show this message and exit.
--log LOG Log file (default: $log).
--log-on-success Also log on success (by default only failures
are logged).
--no-notification Suppress all notifications (by default a failure
will trigger a notification).
EOF
)
while [ "$#" -gt 0 ]; do
case "$1" in
-h|--help)
echo "$help"
exit 0
;;
--log)
shift;
log="$1"
;;
--log-on-success)
log_on_success=1;
;;
--no-notification)
no_notification=1
;;
--)
shift
break
;;
*)
break
esac
shift
done
if [ "$#" -eq 0 ]; then
echo "No command provided!"
exit 1
fi
if [ -z "${log:-}" ]; then
log=/dev/null
fi
mkdir -p "$(dirname "$log")"
# Make it possible for X applications to run, ie notify-send
if [ -z "${DISPLAY:-}" ]; then
export DISPLAY="$(who | awk -vme="$(whoami)" '$1 == me && $2 ~ /^:/ { print $2 }')"
fi
if [ -z "${DISPLAY:-}" ]; then
export DISPLAY=:1
fi
if [ -z "${XDG_RUNTIME_DIR:-}" ]; then
export XDG_RUNTIME_DIR="/run/user/$(id -u)"
fi
if [ -z "${XAUTHORITY:-}" ]; then
export XAUTHORITY=/home/mhcerri/.Xauthority
fi
# Run the command
time=$(date -Is)
rc=0
output=$("$@" 2>&1) || rc=$?
if [ "$rc" -ne 0 ] || [ "$log_on_success" -ne 0 ]; then
echo "[rc=$rc]: $time: $*" >> "$log"
echo "$output" >> "$log"
fi
if [ "$rc" -ne 0 ] && [ "$no_notification" -eq 0 ]; then
notify-send \
"Cron job failed: $*" \
"Check \"$log\" for details."
fi
exit 0
| true
|
183c629d3e09ee2d39b3fd1dce4b9812daab043b
|
Shell
|
roycollings/saltstack
|
/states/handy_scripts/mouse_fix
|
UTF-8
| 609
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
#xinput list-props 12
#xinput set-value
#xinput set-prop 12 336 1
#xinput set-prop 12 336 0.5
#xinput set-prop 12 328 1
#xinput list-props 12
#xinput set-prop 12 336 0.3
#xinput set-prop 12 336 0.33
#xinput set-prop 12 336 0.35
DEVICE_ID=$(xinput list | grep Touchpad | sed -e "s/^.*id=\([0-9]*\).*$/\1/")
set_prop(){
PROP=$1
VALUE=$2
prop_id=$(xinput list-props $DEVICE_ID | grep "$PROP (" | sed -e "s/^.*(\([0-9]*\)).*/\1/")
xinput set-prop $DEVICE_ID $prop_id $VALUE
xinput list-props $DEVICE_ID | grep "($prop_id)"
}
set_prop "Natural Scrolling Enabled" 1
set_prop "Tapping Enabled" 1
| true
|
75d6841ab43a28740301d790f5d912835d8c1e64
|
Shell
|
whyrusleeping/go-buildall
|
/buildall
|
UTF-8
| 1,157
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
function printMatrix() {
# prints out the matrix of all platforms go can target
echo "darwin 386
darwin amd64
darwin arm
darwin arm64
dragonfly amd64
freebsd 386
freebsd amd64
freebsd arm
linux 386
linux amd64
linux arm
linux arm64
linux ppc64
linux ppc64le
netbsd 386
netbsd amd64
netbsd arm
openbsd 386
openbsd amd64
openbsd arm
plan9 386
plan9 amd64
solaris amd64
windows 386
windows amd64"
}
if [ $1 == "print-matrix" ]
then
printMatrix
exit 0
fi
if [ ! -e matrix ]
then
echo "no 'matrix' file found"
echo "please run '$0 print-matrix > matrix' and run $0 again."
exit 1
fi
if [ -z $1 ]
then
echo "must specify import path of go program to build"
echo ""
echo "usage:"
echo "buildall <import-path> [optional output dir]"
exit 1
fi
target=$1
output=${2:-.}
function doBuild() {
echo "==> building $1 $2"
dir=$output/$1-$2
echo " output to $dir"
mkdir -p $dir
cd $dir && GOOS=$1 GOARCH=$2 go build $target 2>&1 | tee build-log
cd ..
}
go version > $output/build-info
uname -a >> $output/build-info
echo built on `date` >> $output/build-info
while read line
do
doBuild $line
done < matrix
| true
|
f18eafb9b4e5020c0e5cbbf3e364e37e08067aa1
|
Shell
|
aniketdakhare/Arithmetic_Computation_Sorting
|
/Sorting_Arithmetic_Computation.sh
|
UTF-8
| 1,121
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash -x
echo Welcome to Arithmetic Computation and Sorting
read -p "Enter value of 'a' " a
read -p "Enter value of 'b' " b
read -p "Entar value if 'c' " c
echo "a=$a b=$b c=$c"
declare -A compute
compute[exp"0"]=$(($a+$b*$c))
compute[exp"1"]=$(($a*$b+$c))
compute[exp"2"]=$(($c+$a/$b))
compute[exp"3"]=$(($a%$b+$c))
i=0
for key in ${!compute[@]}
do
arr[$i]=${compute[$key]}
((i++))
done
echo Computation result in Array ${arr[@]}
temp=0
for (( i=0; i<${#arr[@]}; i++))
do
for ((j=$(($i+1)); j<${#arr[@]}; j++))
do
if [ ${arr[i]} -lt ${arr[j]} ]
then
temp=${arr[i]}
arr[i]=${arr[j]}
arr[j]=$temp
fi
done
done
echo Computation result in the Descending Order ${arr[@]}
temp=0
for (( i=0; i<${#arr[@]}; i++))
do
for ((j=$(($i+1)); j<${#arr[@]}; j++))
do
if [ ${arr[i]} -gt ${arr[j]} ]
then
temp=${arr[i]}
arr[i]=${arr[j]}
arr[j]=$temp
fi
done
done
echo Computation result in the Ascending Order ${arr[@]}
| true
|
b97d8c6d7a28c98237a5aef2a96518dd1ea1a08b
|
Shell
|
jonkeane/vatic
|
/load-videos.sh
|
UTF-8
| 2,573
| 4.21875
| 4
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#! /usr/bin/bash
# Usage: load-videos.sh -o [directory to place frame images] -t [training clip name] -a [video file(s) to load in to vatic]
# Eg: load-videos.sh -o /var/videos/frames/test/ /var/videos/1minClips/*
#
# This script will replace all spaces and parens with underscores in filenames (so that ffmpeg has an easier time digesting them), create folders for the frames to sit in, use turkic to extract frames, and then publish each video as a task for fingerspelling annotation
# the -t [training clip name] and -a arguments are optional:
# -t associates a file with a specific training clip
# -a publishes the video on AWS MTurk (leaving it off defaults to offline publishing)
offline="--offline"
training=""
while getopts ":o:t:a" opt; do
case $opt in
o)
output="${OPTARG}"
;;
a)
offline=""
;;
t)
training="--train-with ${OPTARG}"
;;
\?) echo "Invalid option -$OPTARG" >&2
;;
esac
done
# grab the videos at the end
shift $(($OPTIND-1))
files=("${@}")
echo "Found "${#files[@]}" videos to process"
# for debugging options
# echo "$offline"
# echo "$training"
# echo "${files[@]}"
# exit 1
# rename all files, to replace spaces with underscores
echo "Renaming all videos with spaces in the title"
for file in "${files[@]}"
do
newfile=${file// /_}
newfile=${newfile// /_}
newfile=${newfile//\(/_}
newfile=${newfile//\)/_}
mv "$file" "$newfile"
done
# two types of spaces that show up
files=( "${files[@]// /_}" )
files=( "${files[@]// /_}" )
files=( "${files[@]//(/_}" )
files=( "${files[@]//)/_}" )
# for each of the files, make a directory to contain the frames
echo "Making directories for each of the videos."
for file in "${files[@]}"
do
# echo "/n"
b=$(basename "$file")
mkdir -p "$output$b"
done
echo "Extracting images for each of the videos."
# for each of the files, exract the frames
for file in "${files[@]}"
do
b=$(basename "$file")
turkic extract "$file" "$output$b"
done
echo "Loading each of the videos to turkic."
if [ "$offline" == "--offline" ]
then
echo "(Currently using --offline option. This should be removed if the hits are to be run on AWS.)"
fi
# for each of the files, exract the frames
for file in "${files[@]}"
do
b=$(basename "$file")
CMD="turkic load $b $output$b '' ~Start ~End $training $offline"
eval "$CMD"
done
echo "Publishing the videos."
if [ "$offline" == "--offline" ]
then
echo "(Again, using --offline option. This should be removed if the hits are to be run on AWS.)"
turkic publish --offline
else
turkic publish
fi
| true
|
2432834e2ad6d7845a985af6c8cbde3da0be794f
|
Shell
|
cucumberlinux/ports
|
/cucumber/base/linux/linux.buildinfo
|
UTF-8
| 5,465
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright 2016, 2018, 2019 Scott Court
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Cucumber Linux Buildinfo for linux
NAME=linux
VERSION=4.19.53
URL=( https://cdn.kernel.org/pub/$NAME/kernel/v4.x/$NAME-$VERSION.tar.xz
https://cdn.kernel.org/pub/$NAME/kernel/v4.x/$NAME-$VERSION.tar.sign)
BUILDDEPS=()
verify () {
xz -dk $NAME-$VERSION.tar.xz
pkgapi_gpg --verify $NAME-$VERSION.tar.sign
local RETVAL=$?
rm $NAME-$VERSION.tar
return $RETVAL
}
build () {
# Check for the presence of a version of GCC >= 7.3.0/8.1.0 or
# /opt/kernel_gcc. These are the versions of GCC that enable the kernel
# to be compiled with a more complete Spectre v2 (CVE-2017-5715)
# mitigation. If attempting to compile the kernel with an older
# compiler, throw an error so the kernel is not compiled without the
# mitigation by default.
GCC_VERSION=$(gcc --version | head -n1 | cut -d ' ' -f 3)
GCC_MAJOR_VERSION=$(echo $GCC_VERSION | cut -d . -f 1)
GCC_MINOR_VERSION=$(echo $GCC_VERSION | cut -d . -f 2)
if [[ $GCC_MAJOR_VERSION -ge 9 || ( $GCC_MAJOR_VERSION -eq 8 && $GCC_MINOR_VERSION -ge 1 ) || ( $GCC_MAJOR_VERSION -eq 7 && $GCC_MINOR_VERSION -ge 3 ) ]]; then
cat << EOF
Building the Linux kernel with full Spectre v2 (CVE-2017-5715) mitigation using
the retpoline aware system compiler (GCC $GCC_VERSION).
EOF
elif [ "$MITIGATE_SPECTRE" = "FALSE" ]; then
cat << EOF
********************************************************************************
* WARNING: building the Linux kernel without full Spectre v2 (CVE-2017-5715) *
* mitigation. This is not recommended. *
********************************************************************************
EOF
elif [ ! -d /opt/kernel-gcc ]; then
cat << EOF
********************************************************************************
* ERROR: kernel-gcc package is not installed! *
* *
* In order to fully mitigate the effects of the Spectre v2 vulnerability *
* (CVE-2017-5715), the Linux kernel must be compiled with a version of GCC *
* that has the retpoline feature enabled in the compiler. This feature was *
* introduced in GCC 7.3.0. This system is running GCC $GCC_VERSION. *
* *
* Please either upgrade the compiler or install the kernel-gcc package. The *
* kernel-gcc package can be installed by running \`pickle kernel-gcc\`. *
* *
* If you really want to build the Linux kernel without Spectre v2 mitigation, *
* then rerun this script with MITIGATE_SPECTRE=FALSE. This is not recommended. *
********************************************************************************
EOF
exit 1
else
export PATH=/opt/kernel-gcc/bin:$PATH
fi
tar -xvf "$OWD/$NAME-$VERSION.tar.xz" || exit 1
cd $NAME-$VERSION || exit 1
# Prepare the source tree for the build
make mrproper || exit 1
# Copy over the appropriate config
cp "$OWD/config-$CUCARCH" .config || exit 1
# Do the actual build, using kernel_gcc to mitigate against Spectre v2
pkgapi_make || exit 1
# Install the modules
mkdir -pv $DESTDIR/lib/modules/$VERSION
make modules_install INSTALL_MOD_PATH=$DESTDIR
# Fix some misdirected symlinks. These will be provided by the
# linux-source package in the event that something needs them.
rm -v $DESTDIR/lib/modules/$VERSION*/{build,source} || exit 1
# Copy over the kernel
mkdir -pv $DESTDIR/boot
cp -v arch/x86/boot/bzImage $DESTDIR/boot/vmlinuz-$VERSION
cp -v System.map $DESTDIR/boot/System.map-$VERSION
cp -v .config $DESTDIR/boot/config-$VERSION
ln -s vmlinuz-$VERSION $DESTDIR/boot/vmlinuz
# Install the documentation
install -d $DESTDIR/usr/share/doc/linux-$VERSION
cp -r Documentation/* $DESTDIR/usr/share/doc/linux-$VERSION
# Copies ths install scripts (slack-desc and doint.sh) from $OWD to
# $DESTDIR/install, creating $DESTDIR/install if need be.
pkgapi_copy_package_scripts
# Add lines to doinst.sh to update any existing EFI configuration
cat >> $DESTDIR/install/doinst.sh << EOF
if [ -e \$ROOT/boot/efi/EFI/cucumber/vmlinuz.efi ]; then
rm \$ROOT/boot/efi/EFI/cucumber/vmlinuz.efi
cp \$ROOT/boot/vmlinuz-$VERSION \$ROOT/boot/efi/EFI/cucumber/vmlinuz.efi
fi
EOF
}
| true
|
c754ce593b5cc237383b9d2b516b5c91fec761b9
|
Shell
|
maxsu/antergos-packages
|
/antergos/chrome-gnome-shell/PKGBUILD
|
UTF-8
| 982
| 2.53125
| 3
|
[] |
no_license
|
# Maintainer: Antergos Developers <dev@antergos.com>
# Contributor: Andrew Querol <andrew@querol.me>
# Contributor: Rafael Fontenelle <rafaelff@gnome.org>
pkgname=chrome-gnome-shell
pkgver=9
pkgrel=1
pkgdesc="Native integration of extensions.gnome.org with GNOME Shell for Chromium-based browsers."
arch=('any')
url='https://wiki.gnome.org/Projects/GnomeShellIntegrationForChrome'
license=('GPL')
depends=('gnome-shell' 'python-requests' 'python-gobject')
makedepends=('cmake' 'jq')
replaces=('gs-chrome-connector')
conflicts=('gs-chrome-connector' "${pkgname}-git")
source=("https://git.gnome.org/browse/chrome-gnome-shell/snapshot/chrome-gnome-shell-v${pkgver}.tar.xz")
md5sums=('cbc7ae29bee7d52460880d12cf4b6632')
prepare() {
cd "${srcdir}/${pkgname}-${pkgver}"
mkdir -p 'build'
}
build() {
cd "${srcdir}/${pkgname}-${pkgver}/build"
cmake \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_INSTALL_LIBDIR=lib \
-DBUILD_EXTENSION=OFF \
../
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}/build"
make DESTDIR="${pkgdir}" install
}
| true
|
87a896282e121769546d45b62a5e8003511a621b
|
Shell
|
zhangbeibei/aws-rds-batch-alarm
|
/create_event.sh
|
UTF-8
| 1,031
| 3.0625
| 3
|
[] |
no_license
|
#创建 cloudwatch event,并将target设置为lambda
lambdaname="lambda-rds-alarm"$1
echo $lambdaname
eventname="lambda-rds-alarm"$1
#aws events put-rule --name $eventname --event-pattern "{\"source\":[\"aws.config\"],\"detail-type\":[\"Config Rules Compliance Change\"]}"
aws events put-rule --name $eventname --event-pattern "{\"source\": [\"aws.rds\"],\"detail-type\": [\"RDS DB Instance Event\"]}" --state ENABLED
earn=$(aws events list-rules --query "Rules[?Name == '$eventname'].[Name,Arn][0][1]")
eventarn=$(echo "$earn" | tr -d '"')
aws lambda add-permission --function-name $lambdaname --statement-id $eventname \
--action 'lambda:InvokeFunction' \
--principal events.amazonaws.com \
--source-arn $eventarn
larn=$(aws lambda list-functions --query "Functions [?FunctionName=='$lambdaname'].[FunctionName,FunctionArn][0][1]")
echo "lambdabanme : "$lambdaname
echo "lambdaarn : "$larn
lambdaarn=$(echo "$larn" | tr -d '"')
aws events put-targets --rule $eventname --targets "Id"="target"$lambdabanme,"Arn"=$larn
| true
|
ebd274043bcfa2a9f83a7a084801e75a8ab2c78b
|
Shell
|
okhuandou/airf
|
/server/user.sh
|
UTF-8
| 847
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#fname=billStat.game-2018-09-27_0.log
if [[ $# != 1 ]]; then
echo "usage yyyy-mm-dd"
exit 1
fi
cp /data/bill/aircraft/billStat.game-${1}*log.gz ./aircraft/
cp /data/bill/aircraft2/billStat.game-${1}*log.gz ./aircraft2/
gunzip ./aircraft/billStat.game-${1}*log.gz
gunzip ./aircraft2/billStat.game-${1}*log.gz
cat ./aircraft/billStat.game-${1}_0.log > billStat.game-${1}.log
if [[ -f ./aircraft2/billStat.game-${1}_0.log ]]; then
cat ./aircraft2/billStat.game-${1}_0.log >> billStat.game-${1}.log
fi
fname=billStat.game-${1}.log
cat $fname | grep user | awk -F\| '{print $6}' | sort | uniq -c | awk '{print $2" "$1}'
echo "新增"
echo ""
cat $fname | grep -E "login|user" | awk -F\| '{print $2"|"$6}' | sort | uniq | awk -F\| '{print $2}' | sort | uniq -c | awk '{print $2" "$1}'
echo "活跃(包括新增)"
echo ""
| true
|
a608716685bab3a3145cb74976c6c371797f41fc
|
Shell
|
caterpillarproject/modules
|
/alexlib/croton06/sd93cool/download.sh
|
UTF-8
| 532
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
#for FILE in mzero.cie m-00.cie m-05.cie m-10.cie m-15.cie m-20.cie m-30.cie m+05.cie
#do
# wget "http://www.mso.anu.edu.au/~ralph/data/cool/"$FILE
#done
#
#for FILE in ff55 ff65 ff75 ff85 zf55 zf65 zf75 zf85
#do
# wget "http://www.mso.anu.edu.au/~ralph/data/cool/pk6"$FILE".neq"
#done
#
#for FILE in 05 10 15 20 30
#do
# wget "http://www.mso.anu.edu.au/~ralph/data/cool/pk6ff75m-"$FILE".neq"
#done
for FILE in 05 10 15 20 30
do
wget "http://www.mso.anu.edu.au/~ralph/data/cool/pk6zf75m-"$FILE".neq"
done
| true
|
8e108d3819cb8e6da9de6ed3c707ae67354df838
|
Shell
|
ToniRV/dotfiles
|
/bash.d/exports
|
UTF-8
| 1,420
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
# Make vim the default editor
export EDITOR="vim"
# Larger bash history (allow 32³ entries; default is 500)
export HISTSIZE=32768
export HISTFILESIZE=$HISTSIZE
export HISTCONTROL=ignoredups
# Make some commands not show up in history
export HISTIGNORE="ls:cd:cd -:pwd:exit:date:* --help"
# Prefer US English and use UTF-8
export LANG="en_US.UTF-8"
export LC_ALL="en_US.UTF-8"
# Make new shells get the history lines from all previous
# shells instead of the default "last window closed" history
export PROMPT_COMMAND="history -a; $PROMPT_COMMAND"
# Pip should only run if there is a virtualenv currently activated
export PIP_REQUIRE_VIRTUALENV=true
# Highlight section titles in manual pages
export LESS_TERMCAP_md="${yellow}";
# Don’t clear the screen after quitting a manual page
export MANPAGER="less -X"
# Set .marks as the folder for jumping symlinks
export MARKPATH=~/.marks
# Downgrade OpenGL from 3.3 to 2.1 for VMware 3D graphics acceleration
#export SVGA_VGPU10=0
# MAVlink for mavros
#export PYTHONPATH=${PYTHONPATH}:${CATKIN_WORKSPACE}/src/mavlink-gbp-release
# QTcreator to work with catkin workspace
if [[ -n $CATKIN_WORKSPACE ]]; then
export CURRENT_CMAKE_BUILD_DIR="$(catkin locate --workspace $CATKIN_WORKSPACE --build)"
fi
# To adapt qtcreator IDE to adapt to high resolution screen
export QT_SCALE_FACTOR=1.5
# For speeding up builds
export PATH="/usr/lib/ccache:$PATH"
| true
|
0d530bb09aa9ec978257ef256a82dc15cf75f5e0
|
Shell
|
akhedrane/hydra
|
/bin/vagrant-update
|
UTF-8
| 702
| 2.859375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
sudo chown vagrant:vagrant -Rf /home/vagrant
source /etc/environment
echo "Killing previously running hydra-host instances"
killall hydra-host
# Hydra
echo "Updating go dependencies..."
## Update dependencies and recompile
go get -d -u -v github.com/ory-am/hydra/...
go install github.com/ory-am/hydra/cli/...
## Run hydra
echo "Start Hydra..."
PORT=9000 \
HOST_URL=https://localhost:9000 \
SIGNUP_URL=http://localhost:3001/sign-up \
SIGNIN_URL=http://localhost:3000/sign-in \
hydra-host start >> /home/vagrant/go/src/github.com/ory-am/hydra/vagrant.hydra.log 2>&1 &
echo "Hydra started!"
echo "Starting sign up mock server..."
hydra-signup &
echo "Sign up mock server started!"
| true
|
3ee7a5ae95255963c5ad34e154dd359c7aaa24da
|
Shell
|
hecong129/test-demo
|
/backup/upimg.sh
|
UTF-8
| 2,038
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
# **************************************************
UPPATH=/mnt/sdcard
UPFILE=$UPPATH/update.zip
UPPATH_TAR=$UPPATH/tmp_up
UPIMGFILE=$UPPATH_TAR/upimg.sh
UPLOGO=$UPPATH_TAR/page_1.bmp
UPKERNEL=$UPPATH_TAR/zImage
UPFASTBOOT=$UPPATH_TAR/Spinandboot_37xxc.bin
UPMAINAPP=$UPPATH_TAR/mainapp
UPRESOURCE=$UPPATH_TAR/resource
UPSHELL=$UPPATH_TAR/update.sh
OLDMAINPP=/usr/mainapp/mainapp
OLDRESOURCE=/usr/mainapp/resource
if [ $1 = $1 ];then
echo "exece update img ..."
cd $UPPATH
if [ -f "$UPLOGO" ];then
echo "update logo"
echo "1" > _logo_
if [ -f "$UPKERNEL" ];then
echo "and update kernel"
echo "1" > _kernel_
if [ -f "$UPFASTBOOT" ];then
echo "1" > _boot_
updater local L=$UPPATH_TAR/page_1.bmp K=$UPPATH_TAR/zImage B=$UPPATH_TAR/Spinandboot_37xxc.bin
else
updater local L=$UPPATH_TAR/page_1.bmp K=$UPPATH_TAR/zImage
fi
else
if [ -f "$UPFASTBOOT" ];then
echo "1" > _boot_
updater local L=$UPPATH_TAR/page_1.bmp B=$UPPATH_TAR/Spinandboot_37xxc.bin
else
updater local L=$UPPATH_TAR/page_1.bmp
fi
fi
else
if [ -f "$UPKERNEL" ];then
echo "update kernel"
echo "1" > _kernel_
if [ -f "$UPFASTBOOT" ];then
echo "1" > _boot_
updater local B=$UPPATH_TAR/Spinandboot_37xxc.bin K=$UPPATH_TAR/zImage
else
updater local K=$UPPATH_TAR/zImage
fi
else
if [ -f "$UPFASTBOOT" ];then
echo "1" > _boot_
updater local B=$UPPATH_TAR/Spinandboot_37xxc.bin
fi
fi
fi
if [ -f "$UPMAINAPP" ];then
echo "update app"
cp -rf $UPMAINAPP /usr/mainapp/mainapp
echo "1" > _mainapp_
fi
if [ -d "$UPRESOURCE" ];then
echo "update resource"
rm -rf /usr/mainapp/resource
cp -avrf $UPRESOURCE /usr/mainapp/
echo "1" > _resource_
fi
if [ -f "$UPSHELL" ];then
echo "exec other update shell"
$UPSHELL
fi
else
echo "update.zip not find"
fi
# **************************************************
echo " update finish...reboot..."
# ****************** finish ************************
| true
|
60c8a0773c17f27ed15ff0d4aebdc41ded585a79
|
Shell
|
ktutnik/benchmark
|
/run
|
UTF-8
| 366
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
server="$1"
NODE_ENV=production node ./$server &
pid=$!
sleep 5
echo "$1" >> complete-result.txt
wrk "http://127.0.0.1:5000/$2" \
-d 30s \
-c 400 \
-t 12 \
| tee -a complete-result.txt \
| grep 'Requests/sec' \
| awk -v server="$server" '{ print $2 " Req/sec - " server }' >> results.txt
kill $pid
echo "" >> complete-result.txt
| true
|
3b3b87b46733bd083fc481aaf542b313495abeb4
|
Shell
|
felipefacundes/dotfiles
|
/config/rofimenu/rofimenu.config
|
UTF-8
| 6,411
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# Configuration file for rofimenu script
#
# Top level menu consists of modi names from modilist.
# Modilist is a comma separated list of default modi (drun,run...) and/or custom modi.
# Names of default modi can be set as rofi options (e.g. -display-drun Applications).
# Custom modi format: "modi_name:modi_script".
# Menu functions from this script can be used as modi like this "<menu_name>:$thisscript -menu <menu_function>"
# pause needed for smooth transition when menu command refers to other modi
DELAY=0.06
delay() {
sleep $DELAY
}
# define modi labels for menu
#
FAV=" Favoritos"
DRUN=" Programas"
CAT=" Categorias"
RUN=" Executar"
MENU=" Editar Menu"
EXIT=" Sair"
modilist="\
$FAV:$thisscript -menu ${FAV#* },\
drun,\
$CAT:$thisscript -menu ${CAT#* },\
run,\
$MENU:$thisscript -menu Menu_settings,\
$EXIT:$thisscript -menu ${EXIT#* }"
# Menu functions print lines in format "label:command".
Menu_settings() {
echo " Editar configuração:$GUI_EDITOR $ROFIMENU_CONFIG && $thisscript -show \'$MENU\'"
echo " Resetar configuração:rm $ROFIMENU_CONFIG && delay; $thisscript -show \'$MENU\'"
echo "──────────────:true"
echo " Editar tema:$GUI_EDITOR $ROFIMENU_THEME && $thisscript -show \'$MENU\'"
echo " Resetar tema:rm $ROFIMENU_THEME && delay; $thisscript -show \'$MENU\'"
}
Favoritos() {
echo " Terminal:sakura -m"
echo " Navegador de Arquivos:xdg-open ~"
echo " Navegador Web:firefox"
echo " Configurações:lxappearance"
echo " Pesquisar:rofi -combi-modi drun,window -show combi -modi combi -font 'SF Pro Display 9' -bw 5 -location 0 -terminal xterm -columns 2 -lines 10 -show-icons -color-normal '#cc1E1B1C,#cc5FAACD,#cc391F29,#cc5FAACD,#cc101010' -color-window '#cc1E1B1C, #cc5FAACD, #cc101010' -width 50 -padding 15 -line-padding 3 -line-margin 2 -combi-hide-mode-prefix True"
#echo "$EXIT: delay; delay; $thisscript -show \'$EXIT\'" # This refers to modi from the same script; needs delay
}
Sair() {
echo " Bloquear:slimlock"
#echo " suspender: "
echo " hibernar:systemctl hibernate"
echo " Sair:pkill -9 -u $USER" #xdotool key --clearmodifiers super+shift+q
echo " Reiniciar:reboot"
echo " Desligar:shutdown -h now"
}
# 邏 ﰁ 金 辶
Categorias() {
SUBMENU_MARK=""
IFS='
'
# Newline separated list, each line has format "[symbol ][alias:]category"
# Category with alias will be shown in menu under that alias
# The first entry below is an alias for " " so it shows all applications
desired="\
Todos:
Favoritos
Acessibilidade:Accessibility
Acessórios:Utility
Desenvolvimento:Development
Documentação:Documentation
Educação:Education
Gráficos:Graphics
Internet:Network
Multimídia:AudioVideo
Jogos:Game
Escritório:Office
Configurações:Settings
Sistema:System"
# determine max line length and set tab position for subcategory mark
maxlength=0
for line in $desired ; do
label="${line%:*}"
if [ ${#label} -gt $maxlength ] ; then
maxlength=${#label}
fi
done
submenu_tab=$(($maxlength+3))
present="$(grep Categories /usr/share/applications/*.desktop \
| cut -d'=' -f2 \
| sed 's/;/\n/g' \
| LC_COLLATE=POSIX sort --ignore-case --unique)"
linenumber=0
for line in $desired ; do
category="${line##*[ :]}"
label="$(echo -e ${line%:*}\\t${SUBMENU_MARK} | expand -t $submenu_tab)" ## add submenu mark
if [ $(echo "$present"|grep -w -c "$category") -gt 0 ] ; then
echo "$label:activate_category \"$label\" \"$category\" $linenumber"
linenumber=$(($linenumber+1))
fi
done
}
# Desktop menu parameters
DT_MODI="Desktop:$thisscript -menu Desktop"
Desktop() {
echo " Terminal:default-terminal"
echo " File Manager:xdg-open ~"
echo " Browser:default-browser"
#TODO determine number of lines before categories
addlinenumber=3
eval $(xdotool search --class rofi getwindowgeometry --shell)
Categories|sed "s/\$/ $addlinenumber $X $Y/" # pass additional lines number, X, Y
echo " Search:rofi-finder.sh"
}
DT_WIDTH=200 # pixels
##TODO determine desktop menu line height according to theme
DT_LINE_HEIGHT=23 # pixels
DT_THEME="
*{
lines: 20;
scrollbar: false;
dynamic: true;
}
#window {
width: ${DT_WIDTH}px;
children: [ dt-mainbox ];
}
#mode-switcher {
enabled: false;
}
#button {
width: ${DT_WIDTH}px;
padding: 2px 1ch;
}
#inputbar {
enabled: false;
}"
activate_category() { # shows drun modi filtered with category. If no command selected, returns to categories modi
label="${1% *}" # remove submenu mark
category="$2"
linenumber="$3"
theme=""
goback="$thisscript -show \"$CAT\""
if [ $# -gt 3 ] ; then # that means categories for desktop menu, number of lines before categories, X, Y
addlinenumber=$4
X=$5
Y=$6
linenumber=$(($linenumber+$addlinenumber))
if [ $linenumber -gt 0 ] ; then
i=$linenumber
dummy="true"
dummyline="textboxdummy"
while [ $i -gt 1 ] ; do
dummyline="textboxdummy,$dummyline"
i=$(($i-1))
done
else
dummy="false"
fi
# adjust X if too close to the right side of the screen
MAX_X=$(wattr w $(lsw -r) )
anchor="north"
if [ $X -gt $((${MAX_X}-${DT_WIDTH}*2)) ] ; then
anchor="${anchor}east"
X=$MAX_X
else
anchor="${anchor}west"
fi
theme="$DT_THEME
* {
x-offset: $X;
y-offset: $Y;
anchor: $anchor;
}
#window {
width: $((${DT_WIDTH}*2));
}
#mode-switcher {
enabled: true;
}
#boxdummy {
enabled: $dummy;
children: [ $dummyline ];
}"
goback="$thisscript -desktop $X $Y"
fi
command=$(delay; $thisscript \
-only drun \
-drun-match-fields categories,name \
-display-drun "$label" \
-filter "$category " \
-run-command "echo {cmd}" \
-run-shell-command "echo {terminal} -e {cmd}" \
-theme-str "$theme")
if [ -n "$command" ] ; then
eval "$command" &
exit
fi
# return to categories modi. No delay needed
eval $goback &
if [ $linenumber -eq 0 ] ; then # if the category is on the top line
exit
fi
# move rofi selection down by linenumber
keys=""
while [ $linenumber -gt 0 ] ; do
keys="$keys Tab"
linenumber=$(($linenumber-1))
done
##TODO wait until rofi can take input
delay
delay
xdotool search --class rofi key --delay 0 $keys
}
## rofi theme file can be set here
# ROFIMENU_THEME="$HOME/.config/rofimenu/rofimenu.rasi"
| true
|
859a24d4f4d3d542b664a1224d0bfd82e1ba86e2
|
Shell
|
AsmNoob/FilesTree
|
/filestree.sh
|
UTF-8
| 1,132
| 3.421875
| 3
|
[] |
no_license
|
# Gérard Tio Nogueras - INFO2 - 000333083
# Info-f-201
# Projet n°1: Shell Scripting
#! bin/bash
parcours(){
if [[ "$1" == "." ]]; then
echo "$PWD"
fi
for element in "$1"/*; do #[ -r "$1" -a -x "$1" ] &&
# checking if it isn't a symbolical link
if [ ! -L "$element" ]; then #$(ls -l "$element" | grep ^l | wc -l) == 0
# Is it a directory ?
if [ -d "$element" ]; then
# Is it not empty ?
if [ "$(ls -l "$element" | grep -v ^total | wc -l)" -ge 1 ]; then #-r "$element" -a -x "$element" -a
echo "$2|-- ${element#$1/}"
parcours "$element" "$2 "
# Then it's empty
else
echo "$2|-- ${element#$1/}"
echo "$2 |-- *"
fi
# Is it a file ?
elif [ -f "$element" ]; then
echo "$2|-- ${element#$1/}"
fi
fi
done
}
root="."
#testing an argument has been given and that it exists
[ "$#" -ge 1 ] && [ -e "$1" ] && [ -d "$1" ] && root="$1"
#old_PWD="$PWD"
#echo "root: $root"
#echo "1 $1"
#Pour une raison inconnue on fait cd ds le script mais aucune influence en dehors du script donc pas besoin de cd old_pwd
cd "$root"
echo "$PWD"
parcours "$PWD"
#cd "$old_PWD"
| true
|
883043e22e51eb33c7c0182b8dbea326af23ab20
|
Shell
|
lozzo/config
|
/.conky/scripts/cp.sh
|
UTF-8
| 866
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# Created by f3bruary
# You need underscore-cli (https://github.com/ddopson/underscore-cli)
apikey='' # The API key
ip='192.168.1.X' # The IP Address of the Sonarr instance
port='5050' # The port (duh)
# The api request
json="$(curl -k -s 'http://'$ip':'$port'/api/'$apikey'/movie.list?status=active')"
# No comment on the ugly code below
IFS=$'\n'
title=( $(echo $json | underscore select .title --outfmt dense | sed -e 's/\["//g' -e 's/"\]//g' -e 's/","/\n/g') )
date=( $(echo $json | underscore select .release_date --outfmt text | cut -d ':' -f 2 | cut -d ',' -f 1) )
date2=( $(for i in ${date[@]}; do date -d @$i +%b\ %d\ %Y; done) )
# I know right ?
# Loop through arrays
for ((i=0;i<${#title[@]};++i)); do
echo -e "${title[i]}" "-" "${date2[i]}" | sed -e '/Jan 01 1970$/d'
done
| true
|
8a6f59742b9c7c62b9a0ed8c6298112c187140a9
|
Shell
|
neu-spiral/GraphMatching
|
/run_syntExperiments
|
UTF-8
| 1,083
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH --nodes=1
#SBATCH --time=20:00:00
#SBATCH --job-name=GM
#SBATCH --tasks=1
#SBATCH --cpus-per-task=20
#SBATCH --mem=100Gb
#SBATCH --exclusive
#SBATCH --output=slurm-logs/GM%j.out
#SBATCH --error=slurm-logs/GM%j.err
iter=100
N=$2
PSolver=ParallelSolver
LSolver=LocalL1Solver
p=1
resume=false
while IFS='' read -r line || [[ -n "$line" ]]; do
GRAPH1=$(echo $line | awk '{split($0,g,", "); print g[1]}')
GRAPH2=$(echo $line | awk '{split($0,g,", "); print g[2]}')
prepF=$(echo $line | awk '{split($0,g,", "); print g[3]}')
echo $GRAPH1
echo $GRAPH2
g1=$(echo $GRAPH1 | awk '{split($0,g,"/"); print g[2]}')
g2=$(echo $GRAPH2 | awk '{split($0,g,"/"); print g[2]}')
G=$(echo $prepF | awk '{split($0,g,"/"); print g[2]}')
./synt_iter_batch_GM data/synthetic/$prepF data/traces/"GM_"$PSolver$LSolver$g1$g2$Gf"p"$p data/RDDs/"GM_"$PSolver$LSolver$g1$g2$Gf$p data/synthetic/$GRAPH1 data/synthetic/$GRAPH2 checkpointdir $N data/logfiles/"GM_"$PSolver$LSolver$g1$g2$Gf"p"$p $iter $LSolver $PSolver $p $resume
done < "$1"
| true
|
97bffa0fba9010aca75cfa96f7dfb7f0e6b61ab6
|
Shell
|
gitops-org/info_deploy
|
/Deployment.sh
|
UTF-8
| 7,350
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
#Sourcing the env variable for the user executing the script
source ./environmental_variables.env
#Defining VARIABLES to be used in the script
InfaMigPath=$(pwd)
echo "$InfaMigPath"
#Setting the parameters passed in the script as env variables for the session
export USERNAME=$1
export PASSWORD=$2
export SRC_REP=$3
export TGT_REP=$4
export NAME=$5
export ACTION=$6
##### Connecting to the Source repository, $DOMAIN defined in the env file
echo "Connecting to the Repository "$SRC_REP
pmrep connect -r $SRC_REP -d $DOMAIN -n $USERNAME -x $PASSWORD -s $USERSECURITYDOMAIN
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Connected to the Repository "$SRC_REP
echo
echo "Connected to the Repository "$SRC_REP
else
echo "Failed to Connect to the Repository "$SRC_REP
echo
exit 1
fi
##### Delete the existing deployment group,
##### the list should be provided via the file deploymentGroupsList.txt
if [ "$ACTION" == DG_DELETE ]
then
while read EachLine
do
var=$(echo $EachLine| awk -F"," '{print $1}')
set -- $var
DG_NAME=$1
echo "Deleting the Deployment Group "$DG_NAME
pmrep deletedeploymentgroup -p $DG_NAME -f
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Deleted the Deployment Group "$DG_NAME
echo
echo "Deleted the Deployment Group "$DG_NAME
else
echo "Deployment Group "$DG_NAME " is not present / invalid credentials."
echo "Deployment Group "$DG_NAME " is not present."
echo
exit 1
fi
done < $InfaMigPath/deploymentGroupsList.txt
fi
##### Clear the objects in the deployment group
if [ "$ACTION" == DG_CLEAR ]
then
pmrep cleardeploymentgroup -p $NAME -f
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Cleared the Deployment Group "$NAME
echo
echo "Cleared the Deployment Group "$NAME
else
echo "Deployment Group "$NAME " is not present / invalid credentials."
echo "Deployment Group "$NAME " is not present."
echo
exit 1
fi
fi
##### Create a new Depolyment Group
if [ "$ACTION" == DG_CREATE -o "$ACTION" == DG_C_A_D ]
then
pmrep createdeploymentgroup -p $NAME -t static -q DUMMY -u shared
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Created the Deployment Group "$NAME
echo
else
echo "Deployment Group "$NAME " is already available / invalid credentials."
echo
exit 1
fi
##### Assigning permission to different informatica groups.
echo Assigning permission for $NAME to below list of informatica groups if available.
LST_CNT=`wc -l $InfaMigPath/Groups_Lst.txt|awk '{print $1}'`
if [ $LST_CNT == 0 ]
then
echo Informatica Group list is empty. Not assigning permission to any group.
else
while read EachLine
do
var=$(echo $EachLine| awk -F"," '{print $1,$2}')
set -- $var
GRP_NM=$1
ACCESS=$2
pmrep AssignPermission -o deploymentgroup -n $NAME -g $GRP_NM -s $USERSECURITYDOMAIN -p $ACCESS
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo $GRP_NM " - " $ACCESS " permission is given to the Deployment Group "$NAME
else
echo "Informatica Group "$GRP_NM " is not available / invalid credentials."
echo "Check the log file "$LogFileDir/$LogFileName
echo
exit 1
fi
done < $InfaMigPath/Groups_Lst.txt
fi
fi
if [ "$ACTION" == DG_ADD -o "$ACTION" == DG_C_A_D ]
then
echo Adding objects to the deployment group
date
while read EachLine
do
var=$(echo $EachLine| awk -F"," '{print $1,$2,$3,$4}')
set -- $var
REPO_NM=$1
FLDR_NM=$2
OBJ_TYPE=$3
OBJ_NM=$4
if [ "$REPO_NM" != "$SRC_REP" ]
then
echo "Connected repository "$SRC_REP" is not equal to the repository name in file "$REPO_NM
echo
exit 1
fi
pmrep addtodeploymentgroup -p $NAME -n $OBJ_NM -o $OBJ_TYPE -f $FLDR_NM -d all
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Added "$OBJ_NM " to the Deployment Group "$NAME
else
echo "Object name "$OBJ_NM" is not available / invalid credentials."
echo
exit 1
fi
done < $InfaMigPath/Mig_Inventory_list.csv
echo
echo "All Objects are added to the deployment Group "$NAME.
date
echo
fi
##### Deploy the deployment group to the target repository.
if [ "$ACTION" == DG_DEPLOY -o "$ACTION" == DG_C_A_D ]
then
echo "Starting Deployment of "$NAME" to target Repository "$TGT_REP.
date
pmrep deploydeploymentgroup -p $NAME -c $InfaMigPath/DeployOptions.xml -r $TGT_REP -n $USERNAME -s $USERSECURITYDOMAIN -x $PASSWORD
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Deployment of "$NAME" to target Repository "$TGT_REP" was successful."
echo
date
else
echo "Deployment of "$NAME" failed."
echo
exit 1
fi
fi
##### Create a Label
if [ "$ACTION" == LB_CREATE -o "$ACTION" == LB_C_A ]
then
pmrep createlabel -a $NAME
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Created the Label "$NAME
echo
echo "Created the Label "$NAME >>$LogFileDir/$LogFileName
else
echo "Label "$NAME " is already available / invalid credentials."
echo "Check the log file "$LogFileDir/$LogFileName
echo
exit 1
fi
##### Assigning permission to different informatica groups.
echo Assigning permission for $NAME to below list of informatica groups.
LST_CNT=`wc -l $InfaMigPath/Groups_Lst.txt|awk '{print $1}'`
if [ $LST_CNT == 0 ]
then
echo Informatica Group list is empty. Not assigning permission to any group.
else
while read EachLine
do
var=$(echo $EachLine| awk -F"," '{print $1,$2}')
set -- $var
GRP_NM=$1
ACCESS=$2
pmrep AssignPermission -o label -n $NAME -g $GRP_NM -s $USERSECURITYDOMAIN -p $ACCESS
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo $GRP_NM " - " $ACCESS " permission is given to the Label "$NAME
else
echo "Informatica Group "$GRP_NM " is not available / invalid credentials."
echo "Check the log file "$LogFileDir/$LogFileName
echo
exit 1
fi
done < $InfaMigPath/Groups_Lst.txt
fi
fi
##### Delete a Label
if [ "$ACTION" == LB_DELETE ]
then
pmrep deletelabel -a $NAME -f
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Deleted the Label "$NAME
echo
echo "Deleted the Label "$NAME
else
echo "Label "$NAME " is not available / invalid credentials."
echo "Check the log file "$LogFileDir/$LogFileName
echo
exit 1
fi
fi
##### Apply Label to Informatica Objects.
if [ "$ACTION" == LB_ADD -o "$ACTION" == LB_C_A ]
then
echo Applying label to informatica objects.
date
while read EachLine
do
var=$(echo $EachLine| awk -F"," '{print $1,$2,$3,$4}')
set -- $var
REPO_NM=$1
FLDR_NM=$2
OBJ_TYPE=$3
OBJ_NM=$4
##### Checking the connected repository and repository name in the inventory list.
if [ "$REPO_NM" != "$SRC_REP" ]
then
echo "Connected repository "$SRC_REP" is not equal to the repository name in file "$REPO_NM
echo
exit 1
fi
pmrep applylabel -a $NAME -n $OBJ_NM -o $OBJ_TYPE -f $FLDR_NM -p children
RETURN_CODE=$?
echo "RETURN_CODE: "$RETURN_CODE
if [ $RETURN_CODE == 0 ]
then
echo "Applied label "$NAME " to the Infa Object "$OBJ_NM
else
echo "Object name "$OBJ_NM" is not available / invalid credentials."
echo "Check the log file "$LogFileDir/$LogFileName
echo
exit 1
fi
done < $InfaMigPath/Mig_Inventory_list.csv
echo
echo "Label is applied to all available Informatica Objects"
date
echo
fi
exit 0
| true
|
306071fed2eea671d4b860458925ac4f4d2194a6
|
Shell
|
dalepartridge/ACCORD_SEAsia_BGCsetup
|
/TOOLS/interp-files/interp_OBC_additional.sh
|
UTF-8
| 1,289
| 3.125
| 3
|
[] |
no_license
|
#!usr/bin/bash
###############################################
# interp_OBC_additional.sh
# This script will perform interpolation for a variable
# with a multiple time records, for variables where the source
# grid has already had one variable interpolated
###############################################
var=$1 #Input variable name
sfile=$2 #Source file
sourceid=$3 #Source ID Tag
python fill_mask.py $sfile $var ${sourceid}_mask.nc
#Fill land values
$SOSIEDIR/sosie3.x -f 1_${sourceid}_to_${sourceid}_${var}.namelist
python fix_first_record.py ${var}_${sourceid}-${sourceid}_OBC.nc $var
# Split file into individual files for each time record
cdo splitsel,1 ${var}_${sourceid}-${sourceid}_OBC.nc split_
for f in split*
do
sed -i "64 c\ \ \ \ input_file = \"$f\"" 2_${sourceid}_weights_${var}.namelist
sed -i "74 c\ \ \ \ output_file = \"init_$f\"" 2_${sourceid}_weights_${var}.namelist
$SCRIPDIR/scripinterp.exe 2_${sourceid}_weights_${var}.namelist
done
ncrcat init_split* initcd_${var}.nc
rm -rf split* init_split*
# Fill values
sed -i "88 ccf_z_src = \'bdy_gdept.nc\'" 3_${sourceid}_to_nemo_${var}.namelist
sed -i "89 ccv_z_src = \'gdept\'" 3_${sourceid}_to_nemo_${var}.namelist
$SOSIEDIR/sosie3.x -f 3_${sourceid}_to_nemo_${var}.namelist
| true
|
3c5aba4c3ed9c0884c7cd5583d9b2b00a86aa6d5
|
Shell
|
c02y/dotfiles
|
/bin/.local/bin/bit
|
UTF-8
| 1,324
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
usage()
{
echo "bit operations of two numbers"
echo
echo "Usage: $(basename $0) NUM1 OPS NUM2 [d]"
echo "OPS: a(&, and), o(|, or), x(^, xor), l(<<, left), r(>>, right)"
echo "d means result will be dec, default is hex"
exit
}
if [ "$#" -lt 3 ] || [ "$#" -gt 4 ] || [[ "$2" != [aoxlr] ]]; then
usage
fi
if [ "$4" = "d" ]; then
echo -n "dec: "
if [ "$2" = "a" ]; then
printf '%d\n' "$(( $1 & $3 ))"
elif [ "$2" = "o" ]; then
printf '%d\n' "$(( $1 | $3 ))"
elif [ "$2" = "x" ]; then
printf '%d\n' "$(( $1 ^ $3 ))"
elif [ "$2" = "l" ]; then
printf '%d\n' "$(( $1 << $3 ))"
elif [ "$2" = "r" ]; then
printf '%d\n' "$(( $1 >> $3 ))"
fi
elif [ "$4" = "" ]; then
echo -n "hex: "
if [ "$2" = "a" ]; then
printf '%X\n' "$(( $1 & $3 ))"
elif [ "$2" = "o" ]; then
printf '%X\n' "$(( $1 | $3 ))"
elif [ "$2" = "x" ]; then
printf '%X\n' "$(( $1 ^ $3 ))"
elif [ "$2" = "l" ]; then
printf '%X\n' "$(( $1 << $3 ))"
elif [ "$2" = "r" ]; then
printf '%X\n' "$(( $1 >> $3 ))"
fi
else
usage
fi
| true
|
6187ee1fb6269ce3c574d0d7209442a26954638c
|
Shell
|
Gillington/Operating-Systems
|
/function2.sh
|
UTF-8
| 115
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
praise() {
echo "Jared Bernard is great.";
}
echo "Why do we go to Operating Systems class?"
echo " "
praise
| true
|
741c08358e09b8a9f362eefb0f81fc0212c871a0
|
Shell
|
nanobox-io/nanobox-hooks-hoarder
|
/test/sandbox.sh
|
UTF-8
| 493
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Launch a container and console into it
test_dir="$(dirname $(readlink -f $BASH_SOURCE))"
payload_dir="$(readlink -f ${test_dir}/payloads)"
hookit_dir="$(readlink -f ${test_dir}/../src)"
docker run \
--name=test-console \
-d \
--privileged \
--net=nanobox \
--ip=192.168.0.55 \
--volume=${hookit_dir}/:/opt/nanobox/hooks \
--volume=${payload_dir}/:/payloads \
nanobox/hoarder
docker exec -it test-console bash
docker stop test-console
docker rm test-console
| true
|
9442ace55ecf88738928e94ab0398a103885fc67
|
Shell
|
jeremy-cayrasso/dtc
|
/bin/sources/tarball/install.sh
|
UTF-8
| 974
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
# This is the DTC's BSD interactive install configuration script
# called by the make install
# made by Thomas Goirand <thomas@goirand.fr> and Frederic Cambus
PREFIX=/usr
LOCALBASE=/usr/local
QMAIL_DIR=/var/qmail
# DATABASE CONFIGURATION
echo "### MYSQL CONFIGURATION ###"
echo ""
echo "WARNING: Your MySQL Server MUST be running."
echo "If not, please issue the following cmd:"
echo "/usr/local/etc/rc.d/mysql-server.sh start"
echo ""
#/bin/sh
echo "Copying DTC's php scripts to /usr/share..."
PATH_DTC_SHARED="/usr/local/www/dtc"
if [ -e $PATH_DTC_SHARED ] ; then
rm -rf $PATH_DTC_SHARED/admin $PATH_DTC_SHARED/client $PATH_DTC_SHARED/shared $PATH_DTC_SHARED/email $PATH_DTC_SHARED/doc
fi
mkdir -p $PATH_DTC_SHARED
cp -prf ./ $PATH_DTC_SHARED
. ${LOCALBASE}/www/dtc/admin/install/bsd_config
. ${LOCALBASE}/www/dtc/admin/install/interactive_installer
. ${LOCALBASE}/www/dtc/admin/install/functions
enableBsdBind
copyBsdPhpIni
interactiveInstaller
DTCinstallPackage
DTCsetupDaemons
| true
|
35044afd8c8b4cf53ae2e074065a0f3fe0612483
|
Shell
|
coreycb/pkg-scripts
|
/pkg-new-ubuntu-release
|
UTF-8
| 1,036
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Update an Ubuntu package to a new upstream release.
#
# Clones the git package from launchpad.
# Downloads and imports the orig upstream tarball.
# Leaves you in d/changelog to edit.
#
# Example: pkg-new-ubuntu-release cinder 8.0.0~b2 master
#
set -ex
if [ $# -ne 3 ]
then
echo "Usage: $0 package-name ubuntu-version branch-name"
echo " $0 cinder 8.0.0~b1 master"
echo " $0 cinder 7.0.1 stable/liberty"
exit
fi
package=$1
version=$2
branch=$3
git clone lp:~ubuntu-openstack-dev/ubuntu/+source/$package || git clone lp:~ubuntu-server-dev/ubuntu/+source/$package
cd $package
git checkout pristine-tar
git checkout upstream
git checkout $branch
set +e
uscan --verbose --download-version ${version} --rename --timeout 60 # --force-download
set -e
gbp import-orig --no-interactive --merge-mode=replace ../${package}_${version}.orig.tar.gz || \
gbp import-orig --no-interactive --merge-mode=replace ../${package}_${version}.orig.tar.xz
dch -i
sed -i "1s/1ubuntu1/0ubuntu1/" debian/changelog
| true
|
10e0ff3791e0dabc770f9cd7ccfb080bd62dfe83
|
Shell
|
ifpb/php-guide
|
/lamp/vagrant/install/lamp.sh
|
UTF-8
| 1,057
| 2.765625
| 3
|
[] |
no_license
|
echo "update & upgrade"
sudo apt update -y > /dev/null
sudo apt upgrade -y > /dev/null
echo "install apache2"
sudo apt install apache2 -y > /dev/null
sudo rm /var/www/html/index.html > /dev/null
echo "install php7.2"
sudo apt install php7.2 php7.2-mysql php7.2-mbstring libssh2-1 php-ssh2 -y > /dev/null
sudo sed -i -r -e 's/display_errors = Off/display_errors = On/g' /etc/php/7.2/apache2/php.ini
sudo systemctl restart apache2 > /dev/null
echo "Installing MySQL"
DBPASSWD=secret
echo "mysql-server mysql-server/root_password password $DBPASSWD" | sudo debconf-set-selections > /dev/null
echo "mysql-server mysql-server/root_password_again password $DBPASSWD" | sudo debconf-set-selections > /dev/null
sudo apt-get -y install mysql-server > /dev/null
sudo sed -i -r -e 's/127.0.0.1/0.0.0.0/g' /etc/mysql/mysql.conf.d/mysqld.cnf
sudo systemctl restart mysql > /dev/null
mysql -uroot -p"$DBPASSWD" -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY '$DBPASSWD' REQUIRE NONE WITH GRANT OPTION; FLUSH PRIVILEGES;"
echo "Vagrant finish"
| true
|
179567e648d2b4171e11547eb3d3966da6c825dd
|
Shell
|
dcaro/spinnakerhackfest
|
/scripts/set_azure_spinnaker.sh
|
UTF-8
| 4,670
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# Get azure data
# clientId : -c
# AppKey: -a
# Default values
JENKINS_USERNAME=""
JENKINS_PASSWORD=""
while getopts ":t:s:p:c:h:r:l:k:u:q:f:a:i:" opt; do
case $opt in
t) TENANTID="$OPTARG"
;;
p) PASSWORD="$OPTARG"
;;
c) CLIENTID="$OPTARG"
;;
s) SUBSCRIPTIONID="$OPTARG"
;;
h) PACKERSTORAGEACCOUNT="$OPTARG"
;;
r) RESOURCEGROUP="$OPTARG"
;;
l) RESOURCEGROUPLOCATION="$OPTARG"
;;
k) KEYVAULT="$OPTARG"
;;
i) JENKINS_FQDN="$OPTARG"
;;
u) JENKINS_USERNAME="$OPTARG"
;;
q) JENKINS_PASSWORD="$OPTARG"
;;
f) FRONT50_STORAGE="$OPTARG"
;;
a) FRONT50_KEY="$OPTARG"
;;
esac
done
WORKDIR=$(pwd)
# Usually the workdir is /var/lib/waagent/custom-script/download/0
JENKINS_URL='http:\/\/'$JENKINS_FQDN
DEBIAN_REPO='http:\/\/ppa.launchpad.net\/openjdk-r\/ppa\/ubuntu trusty main;'$JENKINS_URL
SED_FILE=$WORKDIR"/sedCommand.sed"
sudo printf "Upgrading the environment\n"
# Update and upgrade packages
sudo apt-mark hold walinuxagent grub-legacy-ec2
sudo printf "Holding walinuxagent\n"
sudo apt-get update -y
sudo printf "apt-get update completed\n"
sudo rm /var/lib/dpkg/updates/*
sudo printf "directory /var/lib/dpkg/updates removed\n"
sudo apt-get upgrade -y
sudo printf "apt-get upgrade completed\n"
# Install Spinnaker on the VM with no cassandra
sudo printf "Starting to install Spinnaker\n"
curl --silent https://raw.githubusercontent.com/spinnaker/spinnaker/master/InstallSpinnaker.sh | sudo bash -s -- --cloud_provider azure --azure_region $RESOURCEGROUPLOCATION --noinstall_cassandra
sudo printf "Spinnaker has been installed\n"
# configure to not use cassandra
sudo /opt/spinnaker/install/change_cassandra.sh --echo=inMemory --front50=azs
sudo printf "Configured to not use cassandra"
# Configuring the /opt/spinnaker/config/default-spinnaker-local.yml
# Let's create the sed command file and run the sed command
sudo printf "Setting up sedCommand \n"
sudo printf "s/enabled: \${SPINNAKER_AZURE_ENABLED:false}/enabled: \${SPINNAKER_AZURE_ENABLED:true}/g\n" > $SED_FILE
sudo printf "s/defaultRegion: \${SPINNAKER_AZURE_DEFAULT_REGION:westus}/defaultRegion: \${SPINNAKER_AZURE_DEFAULT_REGION:$RESOURCEGROUPLOCATION}/g\n" >> $SED_FILE
sudo printf "s/clientId:$/& %s/\n" $CLIENTID >> $SED_FILE
sudo printf "s/appKey:$/& %s/\n" $PASSWORD >> $SED_FILE
sudo printf "s/tenantId:$/& %s/\n" $TENANTID >> $SED_FILE
sudo printf "s/subscriptionId:$/& %s/\n" $SUBSCRIPTIONID >> $SED_FILE
# Adding the PackerResourceGroup, the PackerStorageAccount, the defaultResourceGroup and the defaultKeyVault
sudo printf "s/packerResourceGroup:$/& %s/\n" $RESOURCEGROUP >> $SED_FILE
sudo printf "s/packerStorageAccount:$/& %s/\n" $PACKERSTORAGEACCOUNT >> $SED_FILE
sudo printf "s/defaultResourceGroup:$/& %s/\n" $RESOURCEGROUP >> $SED_FILE
sudo printf "s/defaultKeyVault:$/& %s/\n" $KEYVAULT >> $SED_FILE
# Enable Igor for the integration with Jenkins
sudo printf "/igor:/ {\n N\n N\n N\n /enabled:/ {\n s/enabled:.*/enabled: true/\n P\n D\n }\n}\n" >> $SED_FILE
# Configure the Jenkins instance
sudo printf "/name: Jenkins.*/ {\n N\n /baseUrl:/ { s/baseUrl:.*/baseUrl: %s:8080/ }\n" $JENKINS_URL >> $SED_FILE
sudo printf " N\n /username:/ { s/username:/username: %s/ }\n" $JENKINS_USERNAME >> $SED_FILE
sudo printf " N\n /password:/ { s/password:/password: %s/ }\n" $JENKINS_PASSWORD >> $SED_FILE
sudo printf "}\n" >> $SED_FILE
# Configure Azure storage
sudo printf "/azs:/ {\n N\n s/enabled: false/enabled: true/\n N\n s/storageAccountName:/storageAccountName: $FRONT50_STORAGE/\n N\n s|storageAccountKey:|storageAccountKey: $FRONT50_KEY|\n }\n" >> $SED_FILE
sudo printf "sedCommand.sed file created\n"
# Set the variables in the spinnaker-local.yml file
sudo sed -i -f $SED_FILE /opt/spinnaker/config/spinnaker-local.yml
sudo printf "spinnaker-local.yml file has been updated\n"
# Configure rosco.yml file
sudo sed -i "/# debianRepository:/s/.*/debianRepository: $DEBIAN_REPO:9999 trusty main/" /opt/rosco/config/rosco.yml
sudo sed -i '/defaultCloudProviderType/s/.*/defaultCloudProviderType: azure/' /opt/rosco/config/rosco.yml
sudo printf "rosco.yml file has been updated\n"
# Adding apt-key key
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys EB9B1D8886F44E2A
sudo printf "apt-key done\n"
# Removing debug file
sudo rm -f $SED_FILE
# rebooting the VM to avoid issues with front50
sudo restart spinnaker
| true
|
7b770619bac5cf779af7f8c8a64aa154b349a70c
|
Shell
|
SOCaaS/openstack-script
|
/glance/start.sh
|
UTF-8
| 3,665
| 2.796875
| 3
|
[] |
no_license
|
set -e
apt update
echo -e "\nCreate a glance sql user"
sed -i -e "s|{{ GLANCE_DB_NAME }}|$GLANCE_DB_NAME|g" ./glance.sql
sed -i -e "s|{{ GLANCE_DB_USER }}|$GLANCE_DB_USER|g" ./glance.sql
sed -i -e "s|{{ GLANCE_DB_PASSWORD }}|$GLANCE_DB_PASSWORD|g" ./glance.sql
mysql -e "source glance.sql";
# Export environment variable
echo -e "\nCreate openstack user on keystone"
openstack user create --domain $OS_PROJECT_DOMAIN_NAME --password "$GLANCE_PASSWORD" $GLANCE_USER
echo -e "\nCreate a project"
openstack project create --domain $OS_PROJECT_DOMAIN_NAME --description "Service Project" service
openstack role add --project service --user $GLANCE_USER admin
echo -e "\nCreate the glance service entity"
openstack service create --name $GLANCE_USER --description "OpenStack Image" image
echo -e "\nCreate the Image service API endpoint"
openstack endpoint create --region RegionOne image public http://$DEFAULT_URL:9292
openstack endpoint create --region RegionOne image internal http://$DEFAULT_URL:9292
openstack endpoint create --region RegionOne image admin http://$DEFAULT_URL:9292
echo -e "\nInstall and configure components"
apt install -y glance
echo -e "\nEditing glance-api.conf"
crudini --set /etc/glance/glance-api.conf database connection mysql+pymysql://$GLANCE_DB_USER:$GLANCE_DB_PASSWORD@$DEFAULT_URL/$GLANCE_DB_NAME
crudini --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://$DEFAULT_URL:5000
crudini --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers $DEFAULT_URL:11211
crudini --set /etc/glance/glance-api.conf keystone_authtoken auth_type password
crudini --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name $OS_PROJECT_DOMAIN_NAME
crudini --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name $OS_USER_DOMAIN_NAME
crudini --set /etc/glance/glance-api.conf keystone_authtoken project_name service
crudini --set /etc/glance/glance-api.conf keystone_authtoken username $GLANCE_USER
crudini --set /etc/glance/glance-api.conf keystone_authtoken password $GLANCE_PASSWORD
crudini --set /etc/glance/glance-api.conf paste_deploy flavor keystone
crudini --set /etc/glance/glance-api.conf glance_store stores "file,http"
crudini --set /etc/glance/glance-api.conf glance_store default_store file
crudini --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
# echo -e "\nEditing glance-registry.conf"
# crudini --set /etc/glance/glance-registry.conf database connection mysql+pymysql://$GLANCE_DB_USER:$GLANCE_DB_PASSWORD@$DEFAULT_URL/$GLANCE_DB_NAME
# crudini --set /etc/glance/glance-registry.conf keystone_authtoken auth_url http://$DEFAULT_URL:5000
# crudini --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers $DEFAULT_URL:11211
# crudini --set /etc/glance/glance-registry.conf keystone_authtoken auth_type password
# crudini --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name $OS_PROJECT_DOMAIN_NAME
# crudini --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name $OS_USER_DOMAIN_NAME
# crudini --set /etc/glance/glance-registry.conf keystone_authtoken project_name service
# crudini --set /etc/glance/glance-registry.conf keystone_authtoken username $GLANCE_USER
# crudini --set /etc/glance/glance-registry.conf keystone_authtoken password $GLANCE_PASSWORD
# crudini --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
echo -e "\nDB Sync Glance"
set +e
su -s /bin/sh -c "glance-manage db_sync" glance
set -e
echo -e "\nGlance Restart"
service glance-api restart
echo -e "\n Check Glance Status"
service glance-api status
| true
|
ef71e0914d3883bebbd228d6d8df241689dcbd06
|
Shell
|
SoumyadeepThakur/Misc-Codes
|
/SEM-5/Shell-Prog/prime.sh
|
UTF-8
| 283
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
#function
is_prime()
{
NUMBER=$NUM
i=2
while [ $i -lt $NUMBER ]
do
if [ `expr $NUMBER % $i` -eq 0 ]
then
echo "$NUMBER is not prime"
exit
fi
i=`expr $i + 1`
done
echo "$NUMBER is prime"
}
#main program
echo "Enter number: "
read NUM
is_prime $NUM
| true
|
9258f7f79b7152fcf4db0c322ed76b313fe6866b
|
Shell
|
xiaohuixingjia/countLimitAuth
|
/start.sh
|
UTF-8
| 829
| 3.109375
| 3
|
[] |
no_license
|
. ~/.bash_profile
export LANG=zh_CN.UTF-8
FILE_PATH=/home/webfocus/huaxiaoqiang/applications/Dps-da-http-new
cd ${FILE_PATH}
MainClass=com.umpay.dpsda.server.DaletServer
SERVICE_ID=new_http_da_dps
APPCLASSPATH=
APPCLASSPATH=$APPCLASSPATH:.
APPCLASSPATH=$APPCLASSPATH:bin
APPCLASSPATH=$APPCLASSPATH:resource
for jarfile in `ls -1 lib/*.jar`
do
APPCLASSPATH="$APPCLASSPATH:$jarfile"
done
pid=`ps -wwef|grep "Dflag=${SERVICE_ID}"|grep -v grep`
if [ -n "${pid}" ]
then
echo "${SERVICE_ID} already start."
else
nohup java -Xms512m -Xmx512m -Xmn64m -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -Xloggc:"./log/gc-server.lo
g
" -cp ${APPCLASSPATH} -Dflag=${SERVICE_ID} ${MainClass} > /dev/null 2>&1 &
echo $! > server.pid
fi
| true
|
7d84e373da344a03c88a3f0879f22ee9e5535b07
|
Shell
|
granthbr/kong-lab
|
/scripts/1-kong-standalone-postgres.sh
|
UTF-8
| 1,791
| 3
| 3
|
[
"Apache-2.0",
"JSON"
] |
permissive
|
#!/bin/sh
set -e
set -x
# cleanup
docker rm -f kong-lab-ee kong-lab-database
docker network rm kong-lab-net || true
## setup
docker pull kong/kong-gateway:2.5.0.0-alpine
docker tag kong/kong-gateway:2.5.0.0-alpine kong-ee
## create network
docker network create kong-lab-net
## start db
docker run -d --name kong-lab-database \
--network=kong-lab-net \
-p 5432:5432 \
-e "POSTGRES_USER=kong" \
-e "POSTGRES_DB=kong" \
-e "POSTGRES_PASSWORD=kong" \
postgres:9.6
sleep 10
## bootstrap db
docker run --rm --network=kong-lab-net \
-e "KONG_DATABASE=postgres" \
-e "KONG_PG_HOST=kong-lab-database" \
-e "KONG_PG_PASSWORD=kong" \
-e "KONG_PASSWORD=password" \
kong-ee kong migrations bootstrap
sleep 2
## start kong
docker run -d --name kong-lab-ee --network=kong-lab-net \
-e "KONG_DATABASE=postgres" \
-e "KONG_PG_HOST=kong-lab-database" \
-e "KONG_PG_PASSWORD=kong" \
-e "KONG_PROXY_ACCESS_LOG=/dev/stdout" \
-e "KONG_ADMIN_ACCESS_LOG=/dev/stdout" \
-e "KONG_PROXY_ERROR_LOG=/dev/stderr" \
-e "KONG_ADMIN_ERROR_LOG=/dev/stderr" \
-e "KONG_ADMIN_LISTEN=0.0.0.0:8001" \
-e "KONG_ADMIN_GUI_URL=http://localhost:8002" \
-p 8000:8000 \
-p 8443:8443 \
-p 8001:8001 \
-p 8444:8444 \
-p 8002:8002 \
-p 8445:8445 \
-p 8003:8003 \
-p 8004:8004 \
kong-ee
sleep 10
## add license
curl -i -X POST http://localhost:8001/licenses \
-d payload="$KONG_LICENSE"
sleep 2
## enable portal
echo "KONG_PORTAL_GUI_HOST=localhost:8003 KONG_PORTAL=on kong reload exit" \
| docker exec -i kong-lab-ee /bin/sh
sleep 2
curl -f -X PATCH --url http://localhost:8001/workspaces/default \
--data "config.portal=true"
# check the status is OK
curl -q http://localhost:8001/status
echo "Successfully reached end of script"
| true
|
3eac205ba44f05750f2e62de6600537fc78eb15b
|
Shell
|
dechavezv/isleroyalewolfgenomes2019
|
/genotyping_pipeline/09_TrimAlternates_VariantAnnotator/09_TrimAlternates_VariantAnnotator.sh
|
UTF-8
| 1,093
| 3.21875
| 3
|
[] |
no_license
|
#! /bin/bash
# Step 9: Trim unused alternate alleles and add VariantType and AlleleBalance annotations
# to INFO column
# Usage: ./09_TrimAlternates_VariantAnnotator.sh [chromosome]
GATK=/utils/programs/GenomeAnalysisTK-3.6-0-g89b7209/GenomeAnalysisTK.jar
REFERENCE=/utils/canfam31/canfam31.fa
TEMPDIR=/temp
WORKDIR=/work
CHR=${1}
LOG=${WORKDIR}/JointCalls_09_A_TrimAlternates_${CHR}.vcf.gz_log.txt
date > ${LOG}
java -jar -Xmx26g -Djava.io.tmpdir=${TEMPDIR} ${GATK} \
-T SelectVariants \
-R ${REFERENCE} \
-trimAlternates \
-L ${CHR} \
-V ${WORKDIR}/JointCalls_08_GenotypeGVCFs_${CHR}.vcf.gz \
-o ${WORKDIR}/JointCalls_09_A_TrimAlternates_${CHR}.vcf.gz &>> ${LOG}
date >> ${LOG}
LOG=${WORKDIR}/JointCalls_09_B_VariantAnnotator_${CHR}.vcf.gz_log.txt
date > ${LOG}
java -jar -Xmx26g -Djava.io.tmpdir=${TEMPDIR} ${GATK} \
-T VariantAnnotator \
-R ${REFERENCE} \
-G StandardAnnotation \
-A VariantType \
-A AlleleBalance \
-L ${CHR} \
-V ${WORKDIR}/JointCalls_09_A_TrimAlternates_${CHR}.vcf.gz \
-o ${WORKDIR}/JointCalls_09_B_VariantAnnotator_${CHR}.vcf.gz &>> ${LOG}
date >> ${LOG}
| true
|
10cfa180e70e48299e14ef86106cf30cdd034b90
|
Shell
|
vcaropr1/Archive_Compressor_2016
|
/compressor_for_rnd.sh
|
UTF-8
| 4,796
| 3.40625
| 3
|
[] |
no_license
|
#! /bin/bash
module load sge
DIR_TO_PARSE=$1 #Directory of the Project to compress
REF_GENOME=$2
SCRIPT_REPO=/isilon/sequencing/VITO/NEW_GIT_REPO/Archive_Compressor_2016/COMPRESSION_SCRIPTS
DEFAULT_REF_GENOME=/isilon/sequencing/GATK_resource_bundle/1.5/b37/human_g1k_v37_decoy.fasta
if [[ ! $REF_GENOME ]]
then
REF_GENOME=$DEFAULT_REF_GENOME
fi
####Uses bgzip to compress vcf file and tabix to index. Also, creates md5 values for both####
COMPRESS_AND_INDEX_VCF(){
echo qsub -N COMPRESS_$UNIQUE_ID -j y -o $DIR_TO_PARSE/LOGS/COMPRESS_AND_INDEX_VCF_$BASENAME.log $SCRIPT_REPO/compress_and_tabix_vcf.sh $FILE $DIR_TO_PARSE
}
####Uses samtools-1.4 to convert bam to cram and index and remove excess tags####
BAM_TO_CRAM_CONVERSION_RND(){
#Remove Tags + 5-bin Quality Score (RND Projects)
echo qsub -N BAM_TO_CRAM_CONVERSION_$UNIQUE_ID -j y -o $DIR_TO_PARSE/LOGS/BAM_TO_CRAM_$BASENAME"_"$COUNTER.log $SCRIPT_REPO/bam_to_cram_remove_tags_rnd.sh $FILE $DIR_TO_PARSE $REF_GENOME $COUNTER
}
####Uses samtools-1.4 to convert bam to cram and index and remove excess tags####
BAM_TO_CRAM_CONVERSION_PRODUCTION(){
#Remove Tags
echo qsub -N BAM_TO_CRAM_CONVERSION_$UNIQUE_ID -j y -o $DIR_TO_PARSE/LOGS/BAM_TO_CRAM_$BASENAME"_"$COUNTER.log $SCRIPT_REPO/bam_to_cram_remove_tags.sh $FILE $DIR_TO_PARSE $REF_GENOME
}
####Uses ValidateSam to report any errors found within the original BAM file####
BAM_VALIDATOR(){
echo qsub -N BAM_VALIDATOR_$UNIQUE_ID -j y -o $DIR_TO_PARSE/LOGS/BAM_VALIDATOR_$BASENAME"_"$COUNTER.log $SCRIPT_REPO/bam_validation.sh $FILE $DIR_TO_PARSE $COUNTER
}
####Uses ValidateSam to report any errors found within the cram files####
CRAM_VALIDATOR(){
echo qsub -N CRAM_VALIDATOR_$UNIQUE_ID -hold_jid BAM_TO_CRAM_CONVERSION_$UNIQUE_ID -j y -o $DIR_TO_PARSE/LOGS/CRAM_VALIDATOR_$BASENAME"_"$COUNTER.log $SCRIPT_REPO/cram_validation.sh $FILE $DIR_TO_PARSE $REF_GENOME $COUNTER
}
####Parses through all CRAM_VALIDATOR files to determine if any errors/potentially corrupted cram files were created and creates a list in the top directory
VALIDATOR_COMPARER(){
echo qsub -N VALIDATOR_COMPARE_$UNIQUE_ID -hold_jid "BAM_VALIDATOR_"$UNIQUE_ID",CRAM_VALIDATOR_"$UNIQUE_ID -j y -o $DIR_TO_PARSE/LOGS/BAM_CRAM_VALIDATE_COMPARE_$COUNTER.log $SCRIPT_REPO/bam_cram_validate_compare.sh $FILE $DIR_TO_PARSE $COUNTER
}
####Zips and md5s text and csv files####
ZIP_TEXT_AND_CSV_FILE(){
echo qsub -N COMPRESS_\'$UNIQUE_ID\' -j y -o $DIR_TO_PARSE/LOGS/ZIP_FILE_\'$BASENAME\'.log $SCRIPT_REPO/zip_file.sh \'$FILE\' $DIR_TO_PARSE
}
BUILD_MD5_CHECK_HOLD_LIST(){
MD5_HOLD_LIST=$MD5_HOLD_LIST'VALIDATOR_COMPARE_'$UNIQUE_ID','
}
####Compares MD5 between the original file and the zipped file (using zcat) to validate that the file was compressed successfully####
MD5_CHECK(){
echo qsub -N MD5_CHECK_ENTIRE_PROJECT_$PROJECT_NAME -hold_jid $MD5_HOLD_LIST -j y -o $DIR_TO_PARSE/LOGS/MD5_CHECK.log $SCRIPT_REPO/md5_check.sh $DIR_TO_PARSE
}
MD5_CHECK_NO_HOLD_ID(){
echo qsub -N MD5_CHECK_ENTIRE_PROJECT_$PROJECT_NAME -j y -o $DIR_TO_PARSE/LOGS/MD5_CHECK.log $SCRIPT_REPO/md5_check.sh $DIR_TO_PARSE
}
PROJECT_NAME=$(basename $DIR_TO_PARSE)
COUNTER=0
BAM_COUNTER=0
mkdir -p $DIR_TO_PARSE/MD5_REPORTS/
mkdir -p $DIR_TO_PARSE/LOGS
mkdir -p $DIR_TO_PARSE/TEMP
# Moved to bam_cram_validate_compare.sh and used an if statement to create only once. Need to test!
# echo -e SAMPLE\\tCRAM_CONVERSION_SUCCESS\\tCRAM_ONLY_ERRORS\\tNUMBER_OF_CRAM_ONLY_ERRORS >| $DIR_TO_PARSE/cram_conversion_validation.list
# Pass variable (vcf/txt/cram) file path to function and call $FILE within function#
for FILE in $(find $DIR_TO_PARSE -type f | egrep 'vcf$|csv$|txt$|bam$|intervals$' | egrep -v 'HC.bam$|[[:space:]]')
do
BASENAME=$(basename $FILE)
UNIQUE_ID=$(echo $BASENAME | sed 's/@/_/g') # If there is an @ in the qsub or holdId name it breaks
let COUNTER=COUNTER+1 # counter is used for some log or output names if there are multiple copies of a sample file within the directory as to not overwrite outputs
if [[ $FILE == *".vcf" ]]
then
COMPRESS_AND_INDEX_VCF
elif [[ $FILE == *".bam" ]]; then
let BAM_COUNTER=BAM_COUNTER+1 # number will match the counter number used for logs and output files like bam/cram validation
case $FILE in *02_CIDR_RND*)
BAM_TO_CRAM_CONVERSION_RND
BAM_VALIDATOR
CRAM_VALIDATOR
VALIDATOR_COMPARER
BUILD_MD5_CHECK_HOLD_LIST
;;
*)
BAM_TO_CRAM_CONVERSION_PRODUCTION
BAM_VALIDATOR
CRAM_VALIDATOR
VALIDATOR_COMPARER
BUILD_MD5_CHECK_HOLD_LIST
;;
esac
elif [[ $FILE == *".txt" ]]; then
ZIP_TEXT_AND_CSV_FILE
elif [[ $FILE == *".csv" ]]; then
ZIP_TEXT_AND_CSV_FILE
elif [[ $FILE == *".intervals" ]]; then
ZIP_TEXT_AND_CSV_FILE
else
echo $FILE not being compressed
fi
done
if [[ $BAM_COUNTER == 0 ]]
then
MD5_CHECK_NO_HOLD_ID
else
MD5_CHECK
fi
| true
|
d9f5310109da8b4c1a089b24139ffda8fb6d6720
|
Shell
|
migaljabon/parseConfig
|
/findstring.sh
|
UTF-8
| 19,682
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
version=3.0
#Record time for performance measurement.
#starttime=$(date +%s%3N)
#Initialize variables.
max_threads=20 #The number threads used by xargs for the workers.
batchsize=40 #The number of files processed by the workers per batch.
#A larger batch size makes the main routine more efficient,
#but delays the initial output to the screen.
basedir="/home/cvs/customers" #The root directory for the configuration files.
path=$basedir #The path gets adjusted depending on user input.
#Following are the variables that contain the command line arguments.
countsw=""
detailsw=""
DETAILsw=""
groupsw=""
grouparg=""
tabsw=""
casesw=""
nomatchsw=""
aftersw=""
beforesw=""
contextsw=""
maxsw=""
onlysw=""
stanzasw=""
Stanzasw=""
formatsw=" -F" #Set the default format to 'txt', which means human readable.
formatarg=" txt"
customer=""
custsw=""
custarg=""
filesw=""
betweensw=""
filearg=""
orig_filearg=""
#End of command line argument variables.
Esw=""
doctored_regexp=""
matchdesc=""
header=""
details=""
count=0
firstline="Y"
lastcustline="zzzzzzzzzz"
pid=$$
#Set variables based on command line input.
#Get the regexp(s) from the command line.
in_regexp=${!#}
#echo "debug: args=$args, $@" >&2
#If no arguments were entered, turn ON the help switch.
if [ $# -eq 0 ]
then
helpsw="-h"
fi
#Process the switches.
while getopts ":A:B:b:C:c:DdF:f:g:hikLm:noSsvwx" option $args
do
case "${option}" in
d ) detailsw=" -d"
;;
A ) aftersw=" -A"
afterarg=" $OPTARG"
detailsw=" -d"
;;
B ) beforesw=" -B"
beforearg=" $OPTARG"
detailsw=" -d"
;;
b ) betweensw=" -b"
betweenarg="${OPTARG}"
;;
C ) contextsw=" -C"
contextarg=" $OPTARG"
detailsw=" -d"
;;
c ) custsw=" -c"
custarg=$OPTARG
;;
D ) DETAILsw=" -D"
detailsw=" -d"
;;
f ) filesw=" -f"
orig_filearg=$OPTARG
filearg="$orig_filearg"
if [ "$orig_filearg" == "-" ]
then
orig_filearg="STDIN"
fi
;;
F ) formatsw=" -F"
formatarg=" $OPTARG"
;;
g ) groupsw=" -g"
#Doctor the argument a bit to make it very unlikely we'll get
#a match with real text when doing the sed later.
grouparg=`echo "${OPTARG}" | sed -r -e 's/(\\\\[1-9])/__\1/g'`
detailsw=" -d"
;;
h ) helpsw="-h"
;;
i ) casesw=" -i"
;;
k ) countsw=" -k"
detailsw=""
DETAILsw=""
;;
L ) nomatchsw=" -L"
matchdesc=" do not"
detailsw=""
DETAILsw=""
;;
m ) maxsw=" -m"
maxarg=" $OPTARG"
detailsw=" -d"
;;
n ) numsw=" -n"
detailsw=" -d"
;;
o ) onlysw=" -o"
detailsw=" -d"
;;
s ) stanzasw=" -s"
detailsw=" -d"
;;
S ) Stanzasw=" -S"
detailsw=" -d"
;;
v ) echo ""
echo "$0 version: $version"
echo ""
exit
;;
* ) echo "" >&2
echo "Invalid command line option '$OPTARG'." >&2
helpsw="-h"
esac
done
#Display help if requested or there was an invalid entry.
if [ $helpsw ]
then
echo ""
echo "This script searches network device configurations for strings of characters."
echo "It uses some of the same switches as grep as well as some unique to its purpose."
echo ""
echo "Usage: $0 [options] 'regexp[®exp...]'"
echo ""
echo "'regexp' is the regular expression to use for the search."
echo " Multiple regexps separated by an ampersand indicate that all regexps must"
echo " be matched within a configuration in order for there to be a match."
echo ""
echo "Options:"
echo "-A num Show 'num' lines after the matching line(s)"
echo "-B num Show 'num' lines before the matching line(s)"
echo "-b arg Show lines between the -b arg and regexp, including the matching line(s)"
echo "-C num Show 'num' lines before and after the matching line(s)"
echo "-c cust Search this customer's configs only"
echo "-d Show details; i.e. the actual matching lines"
echo "-D Show details AND show devices that do not have matching lines"
echo "-f file Search these config files only (can use regexp or a file name"
echo " that contains a list of files)"
echo "-F format Output in the specified format:"
echo " txt = human readable text (default)"
echo " list = list of device names only"
echo " tab = tab delimited"
echo " tab1 = same as tab delimited, but one output line per occurence"
echo " csv = comma separated values"
echo " csv1 = same as comma separated, but one output line per occurence"
echo " ran = RANCID clogin commands"
echo " mcd = mc-diff input"
echo " yaml = YAML"
echo "-g arg Show only the matching regexp groups specified by the arg"
echo "-h Display this help information"
echo "-i Case insensitive search"
echo "-k Show the count of matching line(s)"
echo "-L Return only configs that do NOT match the search criteria"
echo "-m num Show a maximum of 'num' matches"
echo "-n Show the line numbers of the matching lines"
echo "-s Show the stanza statements for the line(s) that match the regex."
echo "-S Show the lines in the stanza for the line(s) that match the regex."
echo "-v Show the version of this script"
echo "-w Match on whole words only"
echo""
echo "For more information, please see https://wiki.services.cdw.com/w/Findstring"
echo""
exit
fi
#sudo to root if file permissions require it.
if [ ! -d $basedir ]
then
sudo $0 "$@"
exit
fi
#Turn off the detailsw if output format is "list".
if [ "$formatarg" == " list" ]
then
detailsw=""
fi
header="\n"
header+=$(date)
header+="\nSearch Criteria:"
if [ $detailsw ]
then
detaildesc=" and lines"
else
detaildesc=""
fi
if [ $custsw ]
then
custdesc=" for customer '$custarg'"
else
custdesc=""
fi
if [ $filesw ]
then
if [ "$orig_filearg" == "$filearg" ]
then
header+=" File names$detaildesc$custdesc matching '$filearg' that$matchdesc match '$in_regexp'"
else
header+=" File names$detaildesc$custdesc matching the files listed in '$orig_filearg' that$matchdesc match '$in_regexp'"
fi
else
if [ -e "$in_regexp" ]
then
header+=" All files$detaildesc$custdesc that$matchdesc match matches to the regular expressions in file '$in_regexp'"
else
header+=" All files$detaildesc$custdesc that$matchdesc match '$in_regexp'"
fi
fi
if [ $beforesw ]; then header+="\n Show $beforearg lines before the match."; fi
if [ $aftersw ]; then header+="\n Show $afterarg lines after the match."; fi
if [ $betweensw ]; then header+="\n Show lines between '$betweenarg' and '$in_regexp', inclusive."; fi
if [ $contextsw ]; then header+="\n Show $contextarg lines before and after the match."; fi
if [ $maxsw ]; then header+="\n Show a maximum of $maxarg matches."; fi
if [ $onlysw ]; then header+="\n Show only the matching string."; fi
if [ $stanzasw ]; then header+="\n Show the stanza heading lines for the matching lines."; fi
if [ $Stanzasw ]; then header+="\n Show all the lines in the stanza for the matching stanza heading lines."; fi
if [ $countsw ]; then header+="\n Show the count of matching lines."; fi
if [ $DETAILsw ]; then header+="\n Show device info, even if there are no matching lines."; fi
if [ $groupsw ]; then header+="\n Use regex groups to format output."; fi
#Now that we've built the header lines, output them to the right place.
if [ "$formatarg" == " txt" ]
then
echo -e "$header"
else
echo -e "$header" >&2
fi
#Get the list of files we want to search.
#If filearg is an existing file, we assume it contains a list of files.
#Join the lines together into one grep regexp of the form 'line1|line2|line3\etc.' and put that into filearg.
#if [[ "$filearg" != "" && ("$filearg" == "-" || -e $filearg) ]]
#then
# filearg=$(cat $filearg | tr -s "\n" "|" | sed 's/ //g' | sed 's/^|//' | sed 's/|$//')
#fi
#Position ourselves in the directory we want to search in.
if [ ! -d $basedir/$custarg ]
then
echo "Customer '$custarg' does not exist." >&2
echo "" >&2
exit
else
path=$basedir/$custarg
fi
#binc-infra/slot3 is excluded because it is equivalent to an Attic.
#
#If a file containing a list of files is input, we split it into groups,
#because it turns out that evaluating a large regex against a large number of files takes very long time.
#It's faster to split it into multiple smaller groups.
#
#The fancy formatting for the 'find' command, the reverse sort, the unique sort, the cut, and the sort at the end lets
#us get only the most recent file for each device sorted by device name within customer.
#The printf format outputs the file name, modification time and full path name.
#The first sort orders the list by file name and modification time in reverse order.
#The second sort with the unique option gets just the first file for each file name (newest because of the previous reverse sort).
#The cut gives us the full path name, which is all we wanted in the first place.
#The final sort orders the list by device within customer, because the path starts with the customer abbreviation.
if [[ "$filearg" != "" && ("$filearg" == "-" || -e $filearg) ]]
then
filearg=$(cat $filearg | tr -s "\n" "|" | sed 's/ //g' | sed 's/^|//' | sed 's/|$//')
IFS='|'
argcount=0
files=""
for arg in $filearg
do
if [ $argcount -gt 100 ]
then
new_files=$(find $path -type f -printf "%f\t%T+\t%p\n" | egrep -e "$args")
files="$files
$new_files"
argcount=0
args=""
fi
if [ $argcount -eq 0 ]
then
args=$arg
else
args="$args|$arg"
fi
(( argcount++ ))
done
new_files=$(find $path -type f -printf "%f\t%T+\t%p\n" | egrep -e "$args")
files="$files
$new_files"
files=$(echo $files | egrep "configs" | egrep -v "Attic|RETIRED|CVS|binc-infra/slot3" | sort -r -k1,2 | sort -u -k1,1 | cut -f3 | sort -t/ -k5,5 -k9,9)
unset IFS
else
files=$(find $path -type f -printf "%f\t%T+\t%p\n" | egrep -e "$filearg" | egrep "configs" | egrep -v "Attic|RETIRED|CVS|binc-infra/slot3" | sort -r -k1,2 | sort -u -k1,1 | cut -f3 | sort -t/ -k5,5 -k9,9)
fi
#echo "debug: files=" >&2
#echo "$files" >&2
#exit
if [ $betweensw ]
then
#Set the beforesw and beforearg.
#This allows us to do a normal 'before' search to get things started.
beforesw=" -B"
beforearg=" 100000"
#Set the -E switch so that we can pass the doctored in_regexp to the worker routine.
Esw=" -E "
doctored_regexp="$in_regexp"
#Tweak the in_regexp and betweenarg reqexp's to make "^" searches work, if needed.
caretcount=$(echo "$betweenarg" | grep -c "^\^")
if [ $caretcount -gt 0 ]
then
betweenarg=$(echo "$betweenarg" | sed -e "s/.\(.*\)/^[0-9]*-\1/") #Replace the "^" with a regexp that matches line numbers.
fi
caretcount=$(echo "$in_regexp" | grep -c "^\^")
if [ $caretcount -gt 0 ]
then
doctored_regexp=$(echo "$in_regexp" | sed -e "s/.\(.*\)/^[0-9]*:\1/") #Replace the "^" with a regexp that matches line numbers.
fi
fi
i=0
#Following block is used for debugging.
#echo "aftersw=$aftersw afterarg=$afterarg" >&2
#echo "betweensw=$betweensw betweenarg=$betweenarg" >&2
#echo "beforesw=$cwbeforesw beforearg=$beforearg" >&2
#echo "contextsw=$contextsw contextarg=$contextarg" >&2
#echo "detailsw=$detailsw" >&2
#echo "DETAILsw=$DETAILsw" >&2
#echo "formatsw=$formatsw formatarg=$formatarg" >&2
#echo "groupsw=$groupsw grouparg=$grouparg" >&2
#echo "numsw=$numsw" >&2
#echo "casesw=$casesw" >&2
#echo "nomatchsw=$nomatchsw" >&2
#echo "maxsw=$maxsw maxarg=$maxarg" >&2
#echo "onlysw=$onlysw" >&2
#echo "pid=$pid" >&2
#echo "stanzasw=$stanzasw" >&2
#echo "Stanzasw=$Stanzasw" >&2
#echo "in_regexp=$in_regexp" >&2
#echo "doctored_regexp=$doctored_regexp" >&2
#Export all of the variables that will be passed via xargs. This is required.
export afterarg
export aftersw
export beforearg
export beforesw
export betweenarg
export betweensw
export casesw
export contextarg
export contextsw
export countsw
export detailsw
export DETAILsw
export doctored_regexp
export Esw
export formatarg
export formatsw
export grouparg
export groupsw
export in_regexp
export nomatchsw
export maxsw
export maxarg
export numsw
export onlysw
export pid
export stanzasw
export Stanzasw
#These counters are used to count files.
#i - Counts the total number of files processed.
#ii - Counts the number of files added to the current batch.
#filecount - The total number of files. i is compared to this to determine when we've reached the end.
i=ii=0
filecount=$(echo "$files" | wc -l)
#Record time for performance measurement.
#tmp_starttimestamp=$(date +%s%3N)
#Delete any old temporary findstring files that might be hanging out there, just to be a good linux citizen.
find /tmp -name "findstring_*" -mmin +5 -delete 2> /dev/null
#Output YAML opening lines.
if [ "$formatarg" == " yaml" ]
then
echo "---"
echo "customers:"
fi
#Loop through all the files and assemble batches of files to be multi-threaded processed using xargs.
#This allows us to do multi-threading and still display the first results before all work is done.
for file in $files
do
batch="$batch$file " #Add the current file to the batch.
(( i++ )) #Increment the file counters.
(( ii++ ))
if [ $i -gt $batchsize ] || [ $ii -eq $filecount ] #Have we reached our batch size or the total number of files?
then #Yes, so invoke the worker routine using xargs with the batch of files and save the results in 'details'.
#Record times for performance measurement.
#tmp_endtimestamp=$(date +%s%3N)
#tmp_lapsetimestamp=$((tmp_endtimestamp-tmp_starttimestamp))
#main_lapsetimestamp=$((main_lapsetimestamp+tmp_lapsetimestamp))
#tmp_starttimestamp=$tmp_endtimestamp
#Invoke multiple instances of the findstring_worker script using xargs.
#Pass the arguments plus this script's PID so that they can write their output to unique, identifiable file names.
############### Multi-threading starts here ##################
echo "$batch" | xargs -n1 -P$max_threads sh -c '/opt/bin/findstring_worker -p $pid$countsw$detailsw$DETAILsw$groupsw"$grouparg"$numsw$stanzasw$Stanzasw$casesw$nomatchsw$onlysw$maxsw$maxarg$aftersw$afterarg$beforesw$beforearg$contextsw$contextarg$formatsw$formatarg$betweensw"$betweenarg"$Esw"$doctored_regexp" -e "$in_regexp" $0 > /dev/null'
################ Multi-threading ends here ###################
#Capture the output of the findstring_workers, if any, and then delete it.
details=""
details=$(cat /tmp/findstring_$pid.* 2> /dev/null)
rm -f /tmp/findstring_$pid.* &> /dev/null
#Record times for performance measurement.
#tmp_endtimestamp=$(date +%s%3N)
#tmp_lapsetimestamp=$((tmp_endtimestamp-tmp_starttimestamp))
#worker_lapsetimestamp=$((worker_lapsetimestamp+tmp_lapsetimestamp))
#tmp_starttimestamp=$tmp_endtimestamp
#Reset the batch related variables.
i=0
batch=""
#echo "debug: details=$details" >&2
if [ "$details" != "" ] #we have a match.
then
#The output from the worker routines is pretty much ready for output...
if [[ "$formatarg" == " txt" || "$formatarg" == " yaml" ]]
then #...but the human readable text format requires some special jujitsu
#to keep the "Customer: xxxx" lines from repeating.
#We do it this way to avoid a for loop with break processing, which is much slower.
#Do a unique sort of the details and save them.
details=$(echo -e "$details" | sort -t$'\t' -u -k1,2 -k3,3n)
#Delete any customer lines that match the last ones from the previous set of details and output them.
echo -e "$details" | grep -v "$lastcustline" | cut -f4-
#Save the last customer lines for matching with the next set of details.
lastcustline=$(echo -e "$details" | grep -B1 "Customer:" | tail -n2)
else
#Just sort by customer, device and sequence number
#and then strip off customer, device and sequence number before output.
echo -e "$details" | sort -t$'\t' -k1,2 -k3,3n | cut -f4-
fi
fi
fi
done
#Output YAML closing lines.
if [ "$formatarg" == " yaml" ]
then
echo "..."
fi
#Record time for performance measurement and output the performance numbers.
#endtime=$(date +%s%3N)
#lapsetimestamp=$((endtime-starttime))
#lapsetime=$(bc<<<"scale=2;$lapsetimestamp/1000")
#echo -e "\nQuery completed in $lapsetime seconds." >&2
#worker_time=$(bc<<<"scale=3;$worker_lapsetimestamp/1000")
#main_time=$(bc<<<"scale=3;$main_lapsetimestamp/1000")
#worker_time_percent=$(bc<<<"scale=2;$worker_lapsetimestamp/$lapsetimestamp*100")
#main_time_percent=$(bc<<<"scale=2;$main_lapsetimestamp/$lapsetimestamp*100")
#echo "Main routine time: $main_time ($main_time_percent%)" >&2
#echo "Worker time: $worker_time ($worker_time_percent%)" >&2
[h-custmgmt-msp-2 migboni 15:04:02]~ $
| true
|
a294cadafa7bd43466c17cc4d1fa747835545e77
|
Shell
|
alphier/light100
|
/server_control.sh
|
UTF-8
| 2,525
| 4
| 4
|
[] |
no_license
|
#! /bin/sh
export DRAGON_HOME=/mnt/light100
USER_ID=`id -u`
DB_DIR=/mnt/mongodb
DB_BAK=$DB_DIR/bak
LOG_DIR=$DB_DIR
PROCESS_NAME="bootstrap.js $DRAGON_HOME"
if [ ! "$USER_ID" = "0" ]; then
echo "Please re-run this program as the super user."
exit 0
fi
#go to home
cd $DRAGON_HOME
#start server
start_server(){
if [ -f "$DRAGON_HOME/light.lock" ]; then
echo "light service has started, Please execute [light stop] command to stop service."
exit 0
fi
touch $DRAGON_HOME/light.lock
if [ ! -d "$LOG_DIR" ]; then
mkdir -p "$LOG_DIR"
fi
if [ -z "`ps -ef | grep "mongod" | grep -v "grep"`" ]; then
if [ ! -d "$DB_DIR" ]; then
mkdir -p "$DB_DIR"
fi
mongod --repair --dbpath $DB_DIR
mongod --journal --dbpath $DB_DIR --logpath $LOG_DIR/mongo.log --logappend --fork &
sleep 5
fi
ulimit -n 40960
ulimit -c unlimited
nohup node $PROCESS_NAME >/dev/null 2>&1 &
sleep 5
}
export_database(){
if [ ! -d "$DB_BAK" ]; then
mkdir -p "$DB_BAK"
fi
BAK_CUR_DAY="$DB_BAK/`date +%Y%m%d`"
echo "export to $BAK_CUR_DAY"
if [ ! -d "$BAK_CUR_DAY" ]; then
mkdir -p "$BAK_CUR_DAY"
fi
mongodump -d light100 -o $BAK_CUR_DAY
}
import_database(){
dbPath=$YESNO
echo "import path is $dbPath"
if [ -z "$dbPath" ]; then
echo "Please specify DB backup path!"
return 0
fi
if [ ! -d "$dbPath" ]; then
echo "DB backup path not exists!"
return 0
fi
if [ ! -d "$dbPath/light100" ]; then
echo "DB backup files not found!"
return 0
fi
echo "Import database..."
mongorestore -d light100 --drop $dbPath/light100
echo "Import succeed."
}
kill_process(){
PIS=`ps -efww | grep "$PROCESS_NAME" | grep -v "grep" | awk '{print $2}'`
if [ ! -z ${PIS} ]; then
echo ${PIS} | xargs kill
sleep 1
fi
}
#stop server
stop_server(){
rm -f $DRAGON_HOME/light.lock
kill_process
sleep 2
}
YESNO=$2
case "$1" in
start)
echo "Starting light services ..."
start_server
echo "Server started."
;;
stop)
echo "Stopping light services ...."
stop_server
echo "Server stopped"
;;
export)
echo "Export database..."
export_database
echo "Export succeed."
;;
import)
import_database
;;
version)
if [ -f "$DRAGON_HOME/version" ]; then
while read VLINE
do
echo $VLINE
done<$DRAGON_HOME/version
else
echo "Unknow version"
fi
;;
*)
echo "Unknow command:light $1"
echo "Command usage:"
echo "light [start|stop|version|export|import]"
;;
esac
exit 0
| true
|
b4ab9567a54577d0a523e6344af87921bb207ef4
|
Shell
|
MMerzinger/kubeadm-cka-node-factory
|
/setup-vm.sh
|
UTF-8
| 1,798
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Collection of snippets found on https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
# Prepare host for docker installation
cat <<EOF | tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
apt-get update && sudo apt-get install -y \
apt-transport-https ca-certificates curl software-properties-common gnupg2
# Install docker
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key --keyring /etc/apt/trusted.gpg.d/docker.gpg add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
apt-get update && sudo apt-get install -y \
containerd.io=1.2.13-2 \
docker-ce=5:19.03.11~3-0~ubuntu-$(lsb_release -cs) \
docker-ce-cli=5:19.03.11~3-0~ubuntu-$(lsb_release -cs)
cat <<EOF | tee /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl restart docker
systemctl enable docker
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF | tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
# Install kubelet, kubeadm and kubectl - using latest version
# To use a specific version use the commented line
apt-get update
apt-get install -y kubelet kubeadm kubectl
# apt-get install -y kubelet=1.20.1-00 kubeadm=1.20.1-00 kubectl=1.20.1-00
apt-mark hold kubelet kubeadm kubectl
| true
|
679d51d97b30a2712989719390d0e904916edb6a
|
Shell
|
frankthetank7254/feral
|
/auto-reroute-v2.sh
|
UTF-8
| 1,670
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
routes=(0.0.0.0 77.67.64.81 78.152.33.250 78.152.57.84 81.20.64.101 81.20.69.197 87.255.32.229 87.255.32.249)
route_names=(Default GTT Atrato#1 Atrato#2 NTT#1 NTT#2 Fiber-Ring/Leaseweb#2 Fiber-Ring/Leaseweb#1)
#
test_files=(https://feral.io/test.bin https://gtt-1.feral.io/test.bin https://atrato-1.feral.io/test.bin https://atrato-2.feral.io/test.bin https://ntt-1.feral.io/test.bin https://ntt-2.feral.io/test.bin https://fr-1.feral.io/test.bin https://fr-2.feral.io/test.bin)
route_count=${#routes[@]}
count=-1
reroute_log=$(mktemp)
for i in "${routes[@]}"
do
((count++))
echo "Testing single segment download speed from ${route_names[$count]}..."
speed=$(wget -O /dev/null ${test_files[$count]} 2>&1 | tail -n 2 | head -n 1 | awk '{print $3 $4}' | sed 's/(//' | sed 's/ //' | sed 's/)//')
if [ $speed = "ERROR404:" ]; then
echo -e "\033[31m""\nThe test file cannot be found at ${test_files[$count]} \n""\e[0m"
exit
fi
echo -e "\033[32m""routing through ${route_names[$count]} results in $speed""\e[0m"
echo
echo "$speed ${routes[$count]} ${route_names[$count]}" >> $reroute_log
done
#
fastestroute=$(sort -hr $reroute_log | head -n 1 | awk '{print $2}')
fastestspeed=$(sort -hr $reroute_log | head -n 1 | awk '{print $1}')
fastestroutename=$(sort -hr $reroute_log | head -n 1 | awk '{print $3}')
#
echo -e "Routing through $fastestroutename provided the highest speed of $fastestspeed"
echo "Setting route to $fastestroute ..."
curl 'https://network.feral.io/reroute' --data "nh=$fastestroute" >/dev/null 2>&1
echo "Please wait two minutes for route change to take effect..."
#
echo 'All done!'
| true
|
cc569aee91ccc1d37b93f1b4e5c3730cd00c40b4
|
Shell
|
jjchromik/hilti-104-total
|
/scripts/install-llvm-darwin
|
UTF-8
| 4,082
| 3.828125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#! /usr/bin/env bash
#
# git version to checkout.
VERSION_LLVM=release_32
VERSION_CLANG=release_32
VERSION_LIBCXX=release_32
VERSION_COMPILERRT=release_32
VERSION_LIBCXXABI=release_32
if [ "$1" = "--install" ]; then
mode=install
install=1
fi
if [ "$1" = "--update" ]; then
mode=update
update=1
fi
if [ $# != 3 -o "$mode" = "" ]; then
echo "usage: `basename $0` [--install|--update] <where-to-clone-into> <install-prefix>"
exit 1
fi
src=$2
prefix=$3
# Built libraries with RTTI.
export REQUIRES_RTTI=1
export PATH=$prefix/bin:$PATH
if [ "$update" != "" ]; then
for d in $prefix $prefix/bin $src; do
if [ ! -d $d ]; then
echo "$d does not exist."
exit 1
fi
done
fi
# if [ "$install" != "" ]; then
# test -d $src && echo "$src already exists" && exit 1
# fi
d=`dirname $0`
patches=`cd $d; pwd`/llvm-patches
cd `dirname $src`
# Get/update the repositories.
if [ "$install" != "" ]; then
git clone http://llvm.org/git/llvm.git `basename $src`
( cd $src/tools && git clone http://llvm.org/git/clang.git )
( cd $src/projects && git clone http://llvm.org/git/libcxx )
( cd $src/projects && git clone http://llvm.org/git/libcxxabi )
( cd $src/projects && git clone http://llvm.org/git/compiler-rt )
( cd $src && git checkout ${VERSION_LLVM} )
( cd $src/tools/clang && git checkout ${VERSION_CLANG} )
( cd $src/projects/libcxx && git checkout ${VERSION_LIBCXX} )
( cd $src/projects/libcxxabi && git checkout ${VERSION_LIBCXXABI} )
( cd $src/projects/compiler-rt && git checkout ${VERSION_COMPILERRT} )
else
( cd $src && git checkout ${VERSION_LLVM} )
( cd $src/tools/clang && git checkout ${VERSION_CLANG} )
( cd $src/projects/libcxx && git checkout ${VERSION_LIBCXX} )
( cd $src/projects/libcxxabi && git checkout ${VERSION_LIBCXXABI} )
( cd $src/projects/compiler-rt && git checkout ${VERSION_COMPILERRT} )
( cd $src && git pull --rebase )
( cd $src/tools/clang && git pull --rebase )
( cd $src/projects/libcxx && git pull --rebase )
( cd $src/projects/libcxxabi && git pull --rebase )
( cd $src/projects/compiler-rt && git pull --rebase )
fi
# # Apply any patches we might need.
for i in $patches/*; do
echo Applying $i ...
b=`basename $i`
( cd `echo $b | awk -v src=$src -F - '{printf("%s/%s/%s", src, $1, $2);}'` && pwd && cat $i | git am -3 )
done
# To bootstrap, compile and install LLVM and clang once.
cd $src
if [ "$install" != "" ]; then
./configure --prefix=$prefix --enable-optimized
make -j 4
make install
make clean
fi
clang_version=`$prefix/bin/clang++ --version 2>&1 | grep ^clang | awk '{print $3}'`
libcxx_include=$prefix/lib/c++/v1
mkdir -p $libcxx_include
### Build libc++abi with the new clang and install.
### Note that even though it's in the llvm/projects directory, it's not automatically included in the built
### as it doesn't provide a CMakeLists.txt yet.
( cd projects/libcxxabi/lib && CXX=$prefix/bin/clang++ PREFIX=$prefix TRIPLE=-apple- ./buildit )
cp -R projects/libcxxabi/include/* $libcxx_include
cp projects/libcxxabi/lib/libc++abi.dylib $prefix/lib
### Build libc++ with the new clang and install.
### Note that even though it's in the llvm/projects directory, it's not automatically included in the built.
### (In fact, libcxx is explicitly *excluded* llvm/projects/CMakeLists.txt).
( cd projects/libcxx/lib && CXX=$prefix/bin/clang++ PREFIX=$prefix TRIPLE=-apple- ./buildit )
cp -R projects/libcxx/include/* $libcxx_include
cp projects/libcxx/lib/libc++.1.dylib $prefix/lib
rm -f $prefix/lib/libc++.dylib
ln -s $prefix/lib/libc++.1.dylib $prefix/lib/libc++.dylib
# Now compile LLVM/clang with the new libaries.
if [ "$install" != "" ]; then
make clean
CC=$prefix/bin/clang
CXX=$prefix/bin/clang++ \
CFLAGS="-L$prefix/lib" CXXFLAGS="-L$prefix/lib" \
LDFLAGS="-L$prefix/lib -Wl,-rpath,$prefix/lib" \
./configure --prefix=/opt/llvm --enable-optimized --enable-libcpp
fi
make -j 4
make install
| true
|
e6ac24d25b01dbcdbc3701c7bc5491556fdd5e63
|
Shell
|
fopina/vagrant-iosjailbreak
|
/scripts/install_theos.sh
|
UTF-8
| 813
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
THEOS=/opt/theos
if [ -d $THEOS/.git ]; then
echo "Updating THEOS" >&2
cd $THEOS
git pull
else
echo "Installing THEOS" >&2
git clone --recursive https://github.com/theos/theos $THEOS
fi
cat <<EOF > /etc/profile.d/theos.sh
export THEOS=$THEOS
export PATH="\$THEOS/bin:\$PATH"
function setphone
{
if [ -z \$1 ]; then
echo "Usage: setphone PHONE_IP" 1>&2
return 1
fi
grep ^'export THEOS_DEVICE_IP=' \$HOME/.profile > /dev/null || echo "export THEOS_DEVICE_IP=" >> \$HOME/.profile
sed -i "s/^export THEOS_DEVICE_IP=.*/export THEOS_DEVICE_IP=\$1/" \$HOME/.profile
export THEOS_DEVICE_IP=\$1
}
function sshi
{
if [ -z \$THEOS_DEVICE_IP ]; then
echo "No device defined, use setphone first"
return 1
fi
ssh -l root \$THEOS_DEVICE_IP \$*
}
EOF
| true
|
479c9a880085e60e8e6b4a66027f4495fb1c4855
|
Shell
|
calinburloiu/CaB-Scripts
|
/util/file-size-statistics.sh
|
UTF-8
| 428
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Author: Calin-Andrei Burloiu
#
if [ $# -lt 1 ]; then
echo "Usage: $0 DIRECTORY [SEARCH_PATTERN]" >&2
echo -e "Print file size statistics for files from DIRECTORY filtering results\n"\
"by the optional SEARCH_PATTERN." >&2
exit 1
fi
DIR="$1"
if [ $# -eq 2 ]; then
PATT="$2"
else
PATT="*"
fi
IFS=$'\n'
ls -l --time-style iso $(find "$DIR" -name "$PATT") | tr -s " " "*" | awk -F '*' -f file-size-statistics.awk
| true
|
c131f6aae86abb8802fc22d1b04db19428d86c0a
|
Shell
|
chrischivlog/install-linux-essentials
|
/install-essential.sh
|
UTF-8
| 4,679
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#update
apt update
echo ""
echo ""
echo "updating all packages"
echo ""
echo ""
sleep 2
#upgrade
apt upgrade -y
echo ""
echo ""
echo "upgrade all packages"
echo ""
echo ""
sleep 2
#install unzip
echo ""
echo "install unzip"
echo ""
apt install unzip
#install unzip
echo ""
echo "install sudo"
echo ""
apt install sudo
#install java
apt install default-jdk
echo ""
echo ""
echo "install default jdk"
echo ""
echo ""
sleep 2
java -version
sleep 2
#update
apt update -y
echo ""
echo ""
echo "updating all packages"
echo ""
echo ""
sleep 2
#install dependencies for java
apt install apt-transport-https ca-certificates wget dirmngr gnupg software-properties-common
echo ""
echo ""
echo "install dependencies for java "
echo ""
echo ""
sleep 2
#import gpg for java
wget -qO - https://adoptopenjdk.jfrog.io/adoptopenjdk/api/gpg/key/public | sudo apt-key add -
echo ""
echo ""
echo "import gpg for java"
echo ""
echo ""
sleep 2
#import gpg for java
sudo add-apt-repository --yes https://adoptopenjdk.jfrog.io/adoptopenjdk/deb/
echo ""
echo ""
echo "add repo jfrog for java"
echo ""
echo ""
sleep 2
#update
apt update
echo ""
echo ""
echo "updating all packages"
echo ""
echo ""
sleep 2
#install java 8
apt install adoptopenjdk-8-hotspot
echo ""
echo ""
echo "install java 8"
echo ""
echo ""
sleep 2
#show version
java -version
sleep 2
#show config
echo ""
echo ""
echo ""
echo "##################"
echo "PLEAS CHANGE YOUR DEFAULT JAVA VERSION! to JAVA 1.8"
echo "##################"
echo ""
echo ""
echo ""
sleep 10
update-alternatives --config java
#show great
echo ""
echo ""
echo ""
echo "##################"
echo "GREAT"
echo "##################"
echo ""
echo ""
echo ""
sleep 5
clear
#install webserver
apt install ca-certificates apt-transport-https lsb-release gnupg curl nano unzip -y
#update
apt update
echo ""
echo ""
echo "updating all packages"
echo ""
echo ""
sleep 2
#add php
echo ""
echo "add php package"
echo ""
wget -q https://packages.sury.org/php/apt.gpg -O- | apt-key add -
sleep 2
#add package
echo ""
echo "add another php package"
echo ""
echo "deb https://packages.sury.org/php/ $(lsb_release -sc) main" | tee /etc/apt/sources.list.d/php.list
sleep 2
#package source
echo ""
echo "add package source"
echo ""
apt install software-properties-common -y
sleep 2
#add package php
echo ""
echo "add package php"
echo ""
add-apt-repository ppa:ondrej/php
sleep 2
#update
apt update
echo ""
echo ""
echo "updating all packages"
echo ""
echo ""
sleep 2
#install apache2
echo ""
echo "install apache2 "
echo ""
apt install apache2 -y
sleep 2
# install latest php version
echo ""
echo "install php7.4 "
echo ""
apt install php7.4 php7.4-cli php7.4-curl php7.4-gd php7.4-intl php7.4-json php7.4-mbstring php7.4-mysql php7.4-opcache php7.4-readline php7.4-xml php7.4-xsl php7.4-zip php7.4-bz2 libapache2-mod-php7.4 -y
sleep 2
#install mariadb
echo ""
echo "install mariadb"
echo ""
apt install mariadb-server mariadb-client -y
sleep 2
echo ""
echo "setup mariadb"
echo ""
mysql_secure_installation
sleep 2
#install phpmyadmin
echo ""
echo "install phpmyadmin"
echo ""
cd /usr/share
wget https://www.phpmyadmin.net/downloads/phpMyAdmin-latest-all-languages.zip -O phpmyadmin.zip
unzip phpmyadmin.zip
rm phpmyadmin.zip
mv phpMyAdmin-*-all-languages phpmyadmin
chmod -R 0755 phpmyadmin
sleep 2
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "PLEASE COPY DOWN BELOW"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo ""
echo "# phpMyAdmin Apache configuration
Alias /phpmyadmin /usr/share/phpmyadmin
<Directory /usr/share/phpmyadmin>
Options SymLinksIfOwnerMatch
DirectoryIndex index.php
</Directory>
# Disallow web access to directories that don't need it
<Directory /usr/share/phpmyadmin/templates>
Require all denied
</Directory>
<Directory /usr/share/phpmyadmin/libraries>
Require all denied
</Directory>
<Directory /usr/share/phpmyadmin/setup/lib>
Require all denied
</Directory>"
echo ""
sleep 20
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "insert copied now"
nano /etc/apache2/conf-available/phpmyadmin.conf
sleep 2
#enable apache phpmyadmin
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo "enable apache phpmyadmin"
a2enconf phpmyadmin
systemctl reload apache2
mkdir /usr/share/phpmyadmin/tmp/
chown -R www-data:www-data /usr/share/phpmyadmin/tmp/
sleep 2
#to access db via root
echo ""
echo "if you wish to access the mariadb via root read this tutorial"
echo "https://www.bennetrichter.de/anleitungen/apache2-php7-mariadb-phpmyadmin/"
echo ""
sleep 10
###clear
clear
echo "cleared"
sleep 5
#install postfix
echo ""
echo "install postfix and sendmail"
echo ""
sleep 3
apt update
apt install mailutils
apt install postfix
sleep 5
| true
|
554bb6d4bdb60094333530683d4455e59532b46c
|
Shell
|
3rdentity/stella
|
/stella.sh
|
UTF-8
| 8,885
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
_STELLA_CURRENT_FILE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $_STELLA_CURRENT_FILE_DIR/conf.sh
function usage() {
echo "USAGE :"
echo "----------------"
echo "List of commands"
echo " o-- application management :"
echo " L app init <application name> [--approot=<path>] [--workroot=<abs or relative path to approot>] [--cachedir=<abs or relative path to approot>] [--samples]"
echo " L app get-data|get-assets|delete-data|delete-assets|update-data|update-assets|revert-data|revert-assets <data id|assets id>"
echo " L app get-data-pack|get-assets-pack|update-data-pack|update-assets-pack|revert-data-pack|revert-assets-pack|delete-data-pack|delete-assets-pack <data pack name|assets pack name>"
echo " L app get-feature <all|feature schema> : install all features defined in app properties file or install a matching one"
echo " L app link <app-path> [--stellaroot=<path>] : link an app to a specific stella path"
echo " L app deploy user@host:path [--cache] [--workspace] : : deploy current app version to an other target via ssh. [--cache] : include app cache folder. [--workspace] : include app workspace folder"
echo " o-- feature management :"
echo " L feature install <feature schema> [--depforce] [--depignore] [--buildarch=x86|x64] [--export=<path>] [--portable=<path>] : install a feature. [--depforce] will force to reinstall all dependencies. [--depignore] will ignore dependencies. schema = feature_name[#version][@arch][:binary|source][/os_restriction][\\os_exclusion]"
echo " L feature remove <feature schema> : remove a feature"
echo " L feature list <all|feature name|active> : list all available feature OR available versions of a feature OR current active features"
echo " o-- various :"
echo " L stella api list : list public functions of stella api"
echo " L stella install dep : install all features and systems requirements if any, for the current OS ($STELLA_CURRENT_OS)"
echo " L stella version print : print stella version"
echo " L stella search path : print current system search path"
echo " L stella deploy <user@host:path> [--cache] [--workspace] : deploy current stella version to an other target via ssh. [--cache] : include stella cache folder. [--workspace] : include stella workspace folder"
echo " o-- network management :"
echo " L proxy on <name> : active proxy"
echo " L proxy off now : disable proxy"
echo " L proxy register <name> --proxyhost=<host> --proxyport=<port> [--proxyuser=<string> --proxypass=<string>] : register this proxy"
echo " L proxy register bypass --proxyhost=<host> : register a host that will bypass proxy"
echo " o-- bootstrap management :"
echo " L boot shell <uri> : launch an interactive shell with all stella env var setted inside an <uri> (use 'local' for current host)"
echo " L boot cmd <uri> -- <command> : execute a command with all stella env var setted inside an <uri> (use 'local' for current host)"
echo " L boot script <uri> -- <script_path>"
echo " o-- system package management : WARN This will affect your system"
echo " L sys install <package name> : install a system package"
echo " L sys remove <package name> : remove a system package"
echo " L sys list all : list all available system package name"
}
# MAIN -----------------------------------------------------------------------------------
# arguments
PARAMETERS="
DOMAIN= 'domain' a 'app feature stella proxy sys boot' Action domain.
ACTION= 'action' a 'deploy script shell cmd version search remove on off register link api install init get-data get-assets get-data-pack get-assets-pack delete-data delete-data-pack delete-assets delete-assets-pack update-data update-assets revert-data revert-assets update-data-pack update-assets-pack revert-data-pack revert-assets-pack get-feature install list' Action to compute.
ID= '' s ''
"
OPTIONS="
FORCE='' 'f' '' b 0 '1' Force operation.
APPROOT='' '' 'path' s 0 '' App path (default current)
WORKROOT='' '' 'path' s 0 '' Work app path (default equal to app path)
CACHEDIR='' '' 'path' s 0 '' Cache folder path
STELLAROOT='' '' 'path' s 0 '' Stella path to link.
SAMPLES='' '' '' b 0 '1' Generate app samples.
PROXYHOST='' '' 'host' s 0 '' proxy host
PROXYPORT='' '' 'port' s 0 '' proxy port
PROXYUSER='' '' 'user' s 0 '' proxy user
PROXYPASS='' '' 'password' s 0 '' proxy password
DEPFORCE='' '' '' b 0 '1' Force reinstallation of all dependencies.
DEPIGNORE='' '' '' b 0 '1' Will not process any dependencies.
EXPORT='' '' 'path' s 0 '' Export feature to this dir.
PORTABLE='' '' 'path' s 0 '' Make a portable version of this feature in this dir
BUILDARCH='' 'a' 'arch' a 0 'x86 x64'
CACHE='' '' '' b 0 '1' Include cache folder when deploying.
WORKSPACE='' '' '' b 0 '1' Include workspace folder when deploying.
"
__argparse "$0" "$OPTIONS" "$PARAMETERS" "Lib Stella" "$(usage)" "OTHERARG" "$@"
# --------------- APP ----------------------------
if [ "$DOMAIN" == "app" ]; then
_app_options=
if [ "$FORCE" == "1" ]; then
_app_options="$_app_options -f"
fi
if [ ! "$APPROOT" == "" ]; then
_app_options="$_app_options --approot=$APPROOT"
fi
if [ ! "$WORKROOT" == "" ]; then
_app_options="$_app_options --workroot=$WORKROOT"
fi
if [ ! "$CACHEDIR" == "" ]; then
_app_options="$_app_options --cachedir=$CACHEDIR"
fi
if [ ! "$STELLAROOT" == "" ]; then
_app_options="$_app_options --stellaroot=$STELLAROOT"
fi
$STELLA_BIN/app.sh $ACTION $ID $_app_options
fi
# --------------- FEATURE ----------------------------
if [ "$DOMAIN" == "feature" ]; then
_feature_options=
if [ "$FORCE" == "1" ]; then
_feature_options="$_feature_options -f"
fi
if [ ! "$BUILDARCH" == "" ]; then
_feature_options="$_feature_options --buildarch=$BUILDARCH"
fi
if [ ! "$EXPORT" == "" ]; then
_feature_options="$_feature_options --export=$EXPORT"
fi
if [ ! "$PORTABLE" == "" ]; then
_feature_options="$_feature_options --portable=$PORTABLE"
fi
$STELLA_BIN/feature.sh $ACTION $ID $_feature_options
fi
# --------------- SYS ----------------------------
if [ "$DOMAIN" == "sys" ]; then
__init_stella_env
if [ "$ACTION" == "install" ]; then
__sys_install "$ID"
fi
if [ "$ACTION" == "remove" ]; then
__sys_remove "$ID"
fi
if [ "$ACTION" == "list" ]; then
echo "$STELLA_SYS_PACKAGE_LIST"
fi
fi
# --------------- BOOT ----------------------------
if [ "$DOMAIN" == "boot" ]; then
__init_stella_env
if [ "$ACTION" == "cmd" ]; then
__boot_cmd "$ID" "$OTHERARG"
fi
if [ "$ACTION" == "shell" ]; then
__boot_shell "$ID"
fi
if [ "$ACTION" == "script" ]; then
__boot_script "$ID" "$OTHERARG"
fi
fi
# --------------- PROXY ----------------------------
if [ "$DOMAIN" == "proxy" ]; then
__init_stella_env
if [ "$ACTION" == "on" ]; then
__enable_proxy "$ID"
fi
if [ "$ACTION" == "off" ]; then
__disable_proxy
fi
if [ "$ACTION" == "register" ]; then
if [ "$ID" == "bypass" ]; then
__register_no_proxy "$PROXYHOST"
else
__register_proxy "$ID" "$PROXYHOST" "$PROXYPORT" "$PROXYUSER" "$PROXYPASS"
fi
fi
fi
# --------------- STELLA ----------------------------
if [ "$DOMAIN" == "stella" ]; then
__init_stella_env
if [ "$ACTION" == "api" ]; then
if [ "$ID" == "list" ]; then
echo "$(__api_list)"
fi
fi
if [ "$ACTION" == "install" ]; then
if [ "$ID" == "dep" ]; then
__stella_requirement
fi
fi
if [ "$ACTION" == "version" ]; then
if [ "$ID" == "print" ]; then
v1=$(__get_stella_flavour)
v2=$(__get_stella_version)
echo $v1 -- $v2
fi
fi
if [ "$ACTION" == "search" ]; then
if [ "$ID" == "path" ]; then
echo "$(__get_active_path)"
fi
fi
if [ "$ACTION" == "deploy" ]; then
_deploy_options=
[ "$CACHE" == "1" ] && _deploy_options="CACHE"
[ "$WORKSPACE" == "1" ] && _deploy_options="$_deploy_options WORKSPACE"
__transfert_stella "$ID" "$_deploy_options"
fi
fi
echo "** END **"
| true
|
8a6d0af8b23e9de65b46fcecb4fc9c260eef71b5
|
Shell
|
forensictool/coex
|
/gen_change_log.sh
|
UTF-8
| 779
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
papackage_name="coex"
version=$(git describe --long)
maintainer_name="$(git config user.name)"
maintainer_email="$(git config user.email)"
echo "|----------------------------------------------------------------------------------------|"
echo "$ papackage_name ($version) ; urgency=low"
git tag -l | sort -u -r | while read TAG ; do
echo
if [ $NEXT ];then
echo [$NEXT]
else
echo "*[Current]"
fi
GIT_PAGER=cat git log --no-merges --format=" * %s" $TAG..$NEXT
NEXT=$TAG
done
FIRST=$(git tag -l | head -1)
#echo
echo " -- $maintainer_name $maintainer_email $(date -R) --"
echo "|----------------------------------------------------------------------------------------|"
#GIT_PAGER=cat git log --no-merges --format=" * %s" $FIRST
| true
|
c6effd49d9b1189335c535cbe1e83e8919c8f20e
|
Shell
|
davidandreoletti/dotfiles
|
/.oh-my-shell/shellrc/plugins/python/functions.sh
|
UTF-8
| 254
| 2.625
| 3
|
[] |
no_license
|
function f_python_clean_artefacts() {
local dirPath="$1"
find "$dirPath" -type f -name "*.log" -exec rm -vf {} \;
find "$dirPath" -type f -name "*.pyc" -exec rm -vf {} \;
find "$dirPath" -type f -name "*.egg-info" -exec rm -rvf {} \;
}
| true
|
864df95726f39549457aaa59a6e2293870cb0e2c
|
Shell
|
Emceelamb/nime
|
/Centralize/master/monitor
|
UTF-8
| 1,419
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
camera=$1
feed0="192.168.8.119/axis-cgi/com/ptz.cgi?pan=-10&tilt=8&zoom=0"
feed1="192.168.8.119/axis-cgi/com/ptz.cgi?pan=1&tilt=0&zoom=700"
feed2="192.168.8.119/axis-cgi/com/ptz.cgi?pan=0&tilt=11&zoom=1200"
feed3="192.168.8.119/axis-cgi/com/ptz.cgi?pan=-15&tilt=13&zoom=1000"
feed4="192.168.8.119/axis-cgi/com/ptz.cgi?pan=-10&tilt=0&zoom=800"
feed5="192.168.8.119/axis-cgi/com/ptz.cgi?pan=-20&tilt=0&zoom=1000"
you="192.168.8.119/axis-cgi/com/ptz.cgi?pan=180&tilt=0&zoom=0"
case $camera in
machine-1)
curl $feed1
echo Watching feed $camera.
;;
machine-2)
curl $feed2
echo Watching feed $camera.
;;
machine-3)
curl $feed3
echo Watching feed $camera.
;;
machine-4)
curl $feed4
echo Watching feed $camera.
;;
machine-5)
curl $feed5
echo Watching feed $camera.
;;
you)
curl $you
echo Watching $camera.
;;
loop)
echo Starting loop.
curl $feed0
sleep 1s
curl $feed1
sleep 1s
curl $feed2
sleep 1s
curl $feed3
sleep 1s
curl $feed4
sleep 1s
curl $feed5
sleep 1s
curl $feed0
sleep 3s
curl $you
sleep 5s
curl $feed0
;;
*)
curl $feed0
echo Watching all.
;;
esac
| true
|
edce4573245d7c8428fd19c134d879d0bd207829
|
Shell
|
kumargan/build-containers-for-git-branches-and-deploy
|
/scripts/start-build.sh
|
UTF-8
| 1,266
| 3.515625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!bin bash
# this scripts uses build-docker.sh file present in project to build a praticular branch and push.
# The ECS container needs to have roles for ECR.
build_logs=`pwd`build.logs
echo "log path " $build_logs
echo "starting build"
repoName=git-repo-name
dockerLocalRepo=local-repo-name:latest
dockerRemoteRepo=account.dkr.ecr.us-east-1.amazonaws.com/image-name:latest
ecsServiceName=service-name
ecsClusterName=cluster-name
if [ -d "$repoName" ]; then
echo " repo already available " > $build_logs
else
git clone git@github.com:TimeInc/$repoName.git > $build_logs
fi
cd ./$repoName
git checkout $1 >> $build_logs
git pull git@github.com:TimeInc/$repoName.git >> $build_logs
sh build-docker.sh >> $build_logs
echo "pushing the container" >> $build_logs
$(aws ecr get-login --no-include-email --region us-east-1) >> $build_logs
docker tag $dockerLocalRepo $dockerRemoteRepo >> $build_logs
docker push $dockerRemoteRepo >> $build_logs
aws ecs list-tasks --cluster $ecsClusterName --service $ecsServiceName --region us-east-1 | jq '.taskArns[]' | while read taskARN; do echo $taskARN | cut -c2-$((${#taskARN} - 1)); aws ecs stop-task --task `echo $taskARN | cut -c2-$((${#taskARN} - 1))` --cluster $ecsClusterName --region us-east-1; done;
rm -rf ../$repoName
| true
|
4b8f83b2b93c4ede12bc46bdafe418bb15ee9566
|
Shell
|
sogis/gis-sv
|
/development_dbs/pgconf_editdb/post-start-hook.sh
|
UTF-8
| 2,190
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
# This file is sourced by the start.sh script. Changes made to environment
# variables or adding environment variables will take effect in the shell
# postgres is running in. Anything added to this file will be executed
# after the database has been started.
echo_info "Executing post-start-hook.." # add below this line
if [[ -v PGHOST ]]; then # Hack: Detects if DB starts for the first time (in this case PGHOST=/tmp; this is set in https://github.com/CrunchyData/crunchy-containers/blob/4dcbbf676523e613a571c3f79bb844d03643866f/bin/postgres/start.sh#L334)
echo "Waiting for PostgreSQL to start.."
while true; do
pg_isready \
--host=${PGHOST} \
--port=${PG_PRIMARY_PORT} \
--username=${PG_PRIMARY_USER?} \
--timeout=2
if [ $? -eq 0 ]; then
echo "The database is up and running ... now running post hooks"
break
fi
sleep 2
done
# create empty alw_strukturverbesserungen datastructure
echo "Creating data structure 'alw_strukturverbesserungen' for edit db.."
psql -d $PG_DATABASE --single-transaction \
-v PG_DATABASE=$PG_DATABASE \
-v PG_WRITE_USER=$PG_WRITE_USER \
-v PG_WRITE_PASSWORD=$PG_WRITE_PASSWORD \
-v PG_READ_USER=$PG_READ_USER \
-v PG_READ_PASSWORD=$PG_READ_PASSWORD \
-f /pgconf/alw_strukturverbesserungen_edit.sql
echo "\nDone creating data structure 'alw_strukturverbesserungen' for edit db.."
# Create additional DB users and grant privileges
psql -d $PG_DATABASE --single-transaction \
-v PG_DATABASE=$PG_DATABASE \
-v PG_WRITE_USER=$PG_WRITE_USER \
-v PG_WRITE_PASSWORD=$PG_WRITE_PASSWORD \
-v PG_READ_USER=$PG_READ_USER \
-v PG_READ_PASSWORD=$PG_READ_PASSWORD \
-f /pgconf/grants.sql
# optional migration
# first check if directory exists
if [[ -d "/migration" ]]
then
"\n importing SQL files for migration\n"
for i in $(ls /migration/*.sql); do
echo "importing sql-file $i"
psql -d $PG_DATABASE -U $PG_USER < $i
done
fi
fi
echo_info "Executing post-start-hook finished. Edit database is ready for use."
| true
|
78db49024c6fc82cc69fc8526ae0bf65955f0203
|
Shell
|
cwpearson/cuda_mem_info
|
/ci/build.sh
|
UTF-8
| 426
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
function or_die () {
"$@"
local status=$?
if [[ $status != 0 ]] ; then
echo ERROR $status command: $@
exit $status
fi
}
set -x
source ~/.bashrc
cd ${TRAVIS_BUILD_DIR}
or_die mkdir travis-build
cd travis-build
if [[ "$DO_BUILD" == "yes" ]] ; then
or_die cmake ../ -DCMAKE_C_COMPILER="${C_COMPILER}" -DCMAKE_CXX_COMPILER="${CXX_COMPILER}"
or_die make VERBOSE=1
fi
exit 0
| true
|
b2d1ed3dd42211c638da030cf68cbbd0219916b2
|
Shell
|
ClinicalSoftwareSolutions/TiTools
|
/makeTiIcons.bash
|
UTF-8
| 1,838
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Convert a source image file to the various icon size by resizing
# For alloy apps
PREFIX="app/assets"
# For non-alloy apps
#PREFIX="Resources"
usage ()
{
echo $0 [source icon 512x512]
}
if [ ! -f "$1" ]; then
echo "A source image has not been specified"
usage
exit 2
fi
if [ ! -d "$PREFIX" ]; then
echo "The directory structure to create the files in does not exist. Maybe this is not the root of an Appclerator project"
usage
exit 1
fi
# iOS app icons
echo "Making the iOS icon images"
# Input should be the 512 icon
convert "$1" -resize 512x512 $PREFIX/iphone/iTunesArtwork
#convert "$1" -resize 512x512 $PREFIX/iphone/appicon@512.png
convert "$1" -resize 114x114 $PREFIX/iphone/appicon@2x.png
convert "$1" -resize 72x72 $PREFIX/iphone/appicon-72.png
convert "$1" -resize 144x144 $PREFIX/iphone/appicon-72@2x.png
convert "$1" -resize 57x57 $PREFIX/iphone/appicon.png
convert "$1" -resize 50x50 $PREFIX/iphone/appicon-Small-50.png
convert "$1" -resize 100x100 $PREFIX/iphone/appicon-Small-50@2x.png
convert "$1" -resize 29x29 $PREFIX/iphone/appicon-Small.png
convert "$1" -resize 58x58 $PREFIX/iphone/appicon-Small@2x.png
# new iOS 7 icons
echo "Making the iOS 7 icon images"
convert "$1" -resize 40x40 $PREFIX/iphone/appicon-Small-40.png
convert "$1" -resize 80x80 $PREFIX/iphone/appicon-Small-40@2x.png
convert "$1" -resize 120x120 $PREFIX/iphone/appicon-60@2x.png
convert "$1" -resize 76x76 $PREFIX/iphone/appicon-76.png
convert "$1" -resize 152x152 $PREFIX/iphone/appicon-76@2x.png
# Android
echo "Making the Android icon images"
convert -size 128x128 xc:none -fill white -draw 'roundRectangle 0,0 128,128 10,10' "$1"[128x128] -compose SrcIn -composite $PREFIX/android/appicon.png
# mobile web
echo "Making the mobileweb images"
convert "$1" -resize 128x128 $PREFIX/mobileweb/appicon.png
| true
|
7e118c33176617772095711ee7eff6eedfd620d8
|
Shell
|
paulburton/Arch-Linux-Repository
|
/ubuntu-testing/glewmx/PKGBUILD
|
UTF-8
| 1,232
| 2.65625
| 3
|
[] |
no_license
|
# Maintainer: György Balló <ballogy@freestart.hu>
# Contributor: Stéphane Gaudreault <stephane@archlinux.org>
# Contributor: SleepyDog
pkgname=glewmx
_pkgname=glew
pkgver=1.5.8
pkgrel=1
pkgdesc="The OpenGL Extension Wrangler Library"
arch=('i686' 'x86_64')
url="http://glew.sourceforge.net"
license=('BSD' 'MIT' 'GPL')
depends=('libxmu' 'libxi' 'mesa')
source=(http://downloads.sourceforge.net/${_pkgname}/${_pkgname}-${pkgver}.tgz
glew-1.5.2-add-needed.patch
glew-1.5.2-makefile.patch
glew-1.5.5-mx.patch)
sha1sums=('450946935faa20ac4950cb42ff025be2c1f7c22e'
'6b39a797de69f9a2efd547581f3e22bb3a36c017'
'd09c05e0fc3af9be0d193779df82fe82c703c561'
'17c6b92c00a8e7ef813fe1e746a4b1204a67bf5b')
build() {
cd "${srcdir}/${_pkgname}-${pkgver}"
patch -Np1 -i "$srcdir/glew-1.5.2-add-needed.patch"
patch -Np1 -i "$srcdir/glew-1.5.2-makefile.patch"
patch -Np1 -i "$srcdir/glew-1.5.5-mx.patch"
make includedir=/include libdir=/lib bindir=/bin GLEW_MX=1
}
package() {
cd "${srcdir}/${_pkgname}-${pkgver}"
make includedir=/include libdir=/lib bindir=/bin GLEW_MX=1 GLEW_DEST="${pkgdir}/usr" install-libs
rm -rf "${pkgdir}/usr/include"
install -D -m644 LICENSE.txt "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true
|
ca2fe4a23e89ad6e5c6f9b189c3a17ce48d9bcd4
|
Shell
|
353388947/seed
|
/script/get_python_translations
|
UTF-8
| 622
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
tmp=tmp/python_locales
dest=locale
mkdir -p $tmp
lokalise \
--config lokalise.cfg \
export \
--langs fr_CA,en_US \
--type po \
--include_comments 1 \
--export_sort a_z \
--export_empty base \
--dest $tmp
unzip $tmp/SEED_Platform-locale.zip -d $tmp
mv $tmp/locale/fr_CA.po $dest/fr_CA/LC_MESSAGES/django.po
/usr/local/opt/gettext/bin/msgfmt -o $dest/fr_CA/LC_MESSAGES/django.{mo,po}
mv $tmp/locale/en_US.po $dest/en_US/LC_MESSAGES/django.po
/usr/local/opt/gettext/bin/msgfmt -o $dest/en_US/LC_MESSAGES/django.{mo,po}
rm -rf $tmp
| true
|
0a78b92c1d7c24aeaea08e7b3b258b1a7c422965
|
Shell
|
Tmacshamgod/DevOps-script
|
/start.sh
|
UTF-8
| 314
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
APP_HOME=/home/admin/whale
CLASSPATH=$APP_HOME
for i in "$APP_HOME"/lib/*.jar; do
CLASSPATH="$CLASSPATH":"$i"
done
nohup java -classpath $CLASSPATH com.cloutropy.platform.scheduler_export.Main &
# more concise way
# nohup java -classpath .:./lib/* com.cloutropy.platform.scheduler_export.Main &
| true
|
4d44d720ac19b2bf10c8ab59079df49fd282f074
|
Shell
|
palakpsheth/Trovagene
|
/bin/IGV_2.3.79/igv.sh
|
UTF-8
| 586
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
#This script is intended for launch on *nix machines
#Xvfb :1 -screen 0 1920x1200x32 &
#export DISPLAY=":1"
export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/
#-Xmx4000m indicates 4000 mb of memory, adjust number up or down as needed
#Script must be in the same directory as igv.jar
#Add the flag -Ddevelopment = true to use features still in development
prefix=`dirname $(readlink $0 || echo $0)`
exec xvfb-run --listen-tcp -a -s '-screen 0 1920x1200x16' java -Xmx16000m \
-Dapple.laf.useScreenMenuBar=true \
-Djava.net.preferIPv4Stack=true \
-jar "$prefix"/igv.jar "$@"
| true
|
7ce548e31ff4de29d6815bd38939f71bc4818184
|
Shell
|
Akio333/BridgeLabz-15Days-bootcamp-solutions
|
/Day6/case/problem2.sh
|
UTF-8
| 455
| 3.921875
| 4
|
[] |
no_license
|
# 2. Write a function to check if the two numbers are Palindromes
function palindrome() {
s=0
num=$1
rev=""
temp=$2
while [ $num -gt 0 ]
do
s=$(( $num % 10 ))
num=$(( $num / 10 ))
rev=$(( echo ${rev}${s} ))
done
if [ $temp -eq $rev ]
then
echo "Numbers are palindrome"
else
echo "Numbers are NOT palindrome"
fi
}
echo "Enter first Number:"
read num1
echo "Enter second Number:"
read num2
palindrome num1 num2
| true
|
b31f4f50d3e8e761029121df869c4417c35fae75
|
Shell
|
ucsb-igert/nsc
|
/processing/Synthetic/reduce
|
UTF-8
| 541
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
# Reduces the DBLP datasets to 1000 nodes each.
cd $(dirname $0)
DATA=../../data/Synthetic/synthetic.data
GRAPH=../../data/Synthetic/synthetic.graph
mkdir -p ../../data/Synthetic/reduced
COUNT=1000
DATA_REDUCED=../../data/Synthetic/reduced/$COUNT.synthetic.data
GRAPH_REDUCED=../../data/Synthetic/reduced/$COUNT.synthetic.graph
if ! [ -f $DATA_REDUCED ] || ! [ -f $GRAPH_REDUCED ]; then
echo "Reducing 'synthetic' to $COUNT nodes..."
../../tools/reduce.py $DATA $GRAPH $DATA_REDUCED $GRAPH_REDUCED --count $COUNT
fi
| true
|
6304bd4b41826f9af877b4951f2bfbed4ce15d2d
|
Shell
|
sweptr/zy-fvwm
|
/cam/www.cl.cam.ac.uk/~pz215/fvwm-scripts/scripts/build-remote-menu
|
UTF-8
| 518
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
sshhosts=~/.ssh/known_hosts
if ! test -f $sshhosts ; then
echo "+ \"No $sshhosts%stock_dialog-error%\" Beep"
exit
fi
# discover remote hosts based on
for host in $(grep -o -e '^[a-z][a-zA-Z0-9.]*' $sshhosts | sort -u)
do
echo "+ \"$host%$1%\" Recent RemoteShell $host"
done
# make fusermount points
for dir in $(ls ~/mnt)
do
for host in $(grep -o -e "^$dir[a-zA-Z0-9.]*" $sshhosts)
do
echo "+ \"$host%$2%\" Recent Exec sshfs $host:/ ~/mnt/$dir; exec rox ~/mnt/$dir"
done
done
| true
|
cae1eb674d69aab42fcb9cd6f7c880c14ca27c32
|
Shell
|
phcerdan/configuration_files
|
/load_scripts/binUbuntuMassey/cuda.sh
|
UTF-8
| 415
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
export cuda_folder=/usr/local/cuda
if echo $cuda_loaded |grep -q 'loaded'
then
echo "cuda already loaded"
else
export cuda_loaded='loaded'
#Modify path for dependancies of OPENFOAM: CGAL
export PATH=$cuda_folder/bin:$PATH
export LD_LIBRARY_PATH=$cuda_folder/lib:$LD_LIBRARY_PATH
echo 'Module cuda loaded'
# echo ''
# echo 'PATH:'$PATH
# echo ''
# echo 'LD_LIBRARY_PATH:'$LD_LIBRARY_PATH
fi
| true
|
3eaedf5c23a64f63e19e74547f4a1aa6c4e7ba2b
|
Shell
|
svetlyak40wt/replwm
|
/.envrc
|
UTF-8
| 728
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Convenient environment variables for hacking on the project.
# Feel free to change whatever you like in here while you're
# developing; its only purpose is to make playing with the
# window manager more convenient.
export PATH="$PWD"/bin:"$PATH"
ORIGINAL_DISPLAY="$DISPLAY"
if [ -z "$ORIGINAL_DISPLAY" ]; then
ORIGINAL_DISPLAY=":0"
fi
export ORIGINAL_DISPLAY
export DISPLAY=":1"
export CL_SOURCE_REGISTRY="$PWD"/src/clx:"$PWD"/src/wm-test
export REPLWM_PROJECT_ROOT="$PWD"
export REPLWM_SYSTEM_PATH="$PWD"/src/replwm/system.lisp
export REPLWM_ASDF_PATH="$PWD"/src/replwm/replwm.asd
XREPL_SHELL="$SHELL"
ZSH_PATH="$(command -v zsh)"
if [ -n "$ZSH_PATH" ]; then
XREPL_SHELL="$ZSH_PATH"
fi
export XREPL_SHELL
| true
|
5473f1ddb8b81daf5c3c7e32021fb87d05ad8f93
|
Shell
|
shimokirichi/scripts-ratelimit
|
/DEVICE-ASSIGN-DOWN
|
UTF-8
| 263
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
#corresponds to assign_device as used in C code
if [ $# -eq 3 ]
then
IP=$3
MAC=$2
MARK=$1
echo $1 , $2 , $3
#DOWNLINK
iptables -A FORWARD -t mangle -d ${IP} -j MARK --set-mark $MARK
else
echo "Wrong number of arguments. Order is mark mac IP"
fi
| true
|
6287d5d896882ced9631e6582b2eb7be2ee928ba
|
Shell
|
czxxjtu/wxPython-1
|
/tags/wxPy-2.8.0.2/wxPython/distrib/makedemo
|
UTF-8
| 1,157
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#----------------------------------------------------------------------
if [ ! -d wxPython ]; then
echo "Please run this script from the root wxPython directory."
exit 1
fi
VERSION=`python -c "import setup;print setup.VERSION"`
mkdir _distrib_tgz
mkdir _distrib_tgz/wxPython-$VERSION
cp -R demo _distrib_tgz/wxPython-$VERSION
cp -R samples _distrib_tgz/wxPython-$VERSION
# do some cleanup
rm -rf `find _distrib_tgz/wxPython-$VERSION -name CVS`
rm -f `find _distrib_tgz/wxPython-$VERSION -name "*.pyc"`
rm -f `find _distrib_tgz/wxPython-$VERSION -name .cvsignore`
rm -f `find _distrib_tgz/wxPython-$VERSION -name "core.[0-9]*"`
rm -f `find _distrib_tgz/wxPython-$VERSION -name "core"`
rm -f `find _distrib_tgz/wxPython-$VERSION -name wxPython`
rm -f `find _distrib_tgz/wxPython-$VERSION -name "*.o"`
rm -f `find _distrib_tgz/wxPython-$VERSION -name "*.so"`
rm -f `find _distrib_tgz/wxPython-$VERSION -name "*~"`
rm -f `find _distrib_tgz/wxPython-$VERSION -name ".#*"`
cd _distrib_tgz
tar cvf ../dist/wxPython-demo-$VERSION.tar wxPython-$VERSION
bzip2 -9 -f ../dist/wxPython-demo-$VERSION.tar
cd ..
rm -r _distrib_tgz
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.