blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
dc841d4c81aeaa4d5dda599f03635919d8d6b1b2
|
Shell
|
openshift/file-integrity-operator
|
/utils/git-remote.sh
|
UTF-8
| 559
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# git-remote.sh prints the remote associated with the upstream
# openshift/file-integrity-operator repository. If it can't determine the
# appropriate remote based on a known URL, it defaults to using "origin", which
# is backwards compatible with the release process.
REMOTE_URL=origin
for REMOTE in `git remote`; do
URL=`git config --get remote.$REMOTE.url`
if [[ "$URL" = "https://github.com/openshift/file-integrity-operator" ]]; then
REMOTE_URL=$REMOTE
break
fi
done
echo $REMOTE_URL
| true
|
add8d0a3c375722b8cbbeabe8beb759a9d3ac991
|
Shell
|
shvetsm/i3blocks
|
/weather
|
UTF-8
| 2,295
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# Based on http://openweathermap.org/current
API_KEY="4df99cda6022071432d72bf1fe8409e5"
# Check on http://openweathermap.org/find
CITY_ID="4560349"
#CITY_ID="${BLOCK_INSTANCE}"
URGENT_LOWER=40
URGENT_HIGHER=90
ICON_SUNNY=""
ICON_CLOUDY=""
ICON_RAINY=""
ICON_STORM=""
ICON_SNOW=""
ICON_FOG=""
SYMBOL_CELSIUS="℉"
WEATHER_URL="http://api.openweathermap.org/data/2.5/weather?id=${CITY_ID}&appid=${API_KEY}&units=imperial"
WEATHER_INFO=$(wget -qO- "${WEATHER_URL}")
WEATHER_MAIN=$(echo "${WEATHER_INFO}" | grep -o -e '\"main\":\"[a-Z]*\"' | awk -F ':' '{print $2}' | tr -d '"')
WEATHER_TEMP=$(echo "${WEATHER_INFO}" | grep -o -e '\"temp\":\-\?[0-9]*' | awk -F ':' '{print $2}' | tr -d '"')
TEMP_STR=""
if [[ "${WEATHER_TEMP}" -lt "${URGENT_LOWER}" ]] ; then
TEMP_STR="<span foreground='#5f819d'>${WEATHER_TEMP}${SYMBOL_CELSIUS}</span>"
elif [[ "${WEATHER_TEMP}" -gt "${URGENT_HIGHER}" ]]; then
TEMP_STR="<span foreground='#cc6666'>${WEATHER_TEMP}${SYMBOL_CELSIUS}</span>"
else
TEMP_STR="<span foreground='#b5bd68'>${WEATHER_TEMP}${SYMBOL_CELSIUS}</span>"
fi
if [[ "${WEATHER_MAIN}" = *Snow* ]]; then
echo "<span foreground='#8abeb7'>${ICON_SNOW}</span> ${TEMP_STR}"
echo "<span foreground='#8abeb7'>${ICON_SNOW}</span> ${TEMP_STR}"
echo "";
elif [[ "${WEATHER_MAIN}" = *Rain* ]] || [[ "${WEATHER_MAIN}" = *Drizzle* ]]; then
echo "<span foreground='#5f819d'>${ICON_RAINY}</span> ${TEMP_STR}"
echo "<span foreground='#5f819d'>${ICON_RAINY}</span> ${TEMP_STR}"
echo "";
elif [[ "${WEATHER_MAIN}" = *Cloud* ]]; then
echo "<span foreground='#81a2be'>${ICON_CLOUDY}</span> ${TEMP_STR}"
echo "<span foreground='#81a2be'>${ICON_CLOUDY}</span> ${TEMP_STR}"
echo "";
elif [[ "${WEATHER_MAIN}" = *Clear* ]]; then
echo "<span foreground='#f0c674'>${ICON_SUNNY}</span> ${TEMP_STR}"
echo "<span foreground='#f0c674'>${ICON_SUNNY}</span> ${TEMP_STR}"
echo "";
elif [[ "${WEATHER_MAIN}" = *Fog* ]] || [[ "${WEATHER_MAIN}" = *Mist* ]]; then
echo "<span foreground='#898989'>${ICON_FOG}</span> ${TEMP_STR}"
echo "<span foreground='#898989'>${ICON_FOG}</span> ${TEMP_STR}"
echo "";
else
echo "<span foreground='#85678f'>${WEATHER_MAIN}</span> ${TEMP_STR}"
echo "<span foreground='#85678f'>${WEATHER_MAIN}</span> ${TEMP_STR}"
echo ""
fi
| true
|
7011b52fbc4add45f65e21bfc1ab7f75a08b3d87
|
Shell
|
vmogilev/ales3front
|
/s3upload_diversified.sh
|
UTF-8
| 585
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
FILE=$1
SDIR=$2
ROOT=$3
BUCK=$4
usage() {
name=`basename $0`
echo "${name} localfile dir_under_release rootname bucket "
exit 1
}
if [ -z "$FILE" ] || [ -z "$SDIR" ] || [ -z "$ROOT" ] || [ -z "$BUCK" ]; then
usage;
fi
if [ ! -f $FILE ]; then
usage;
fi
key=`basename $FILE`
echo "
FILE=${FILE}
SDIR=${SDIR}
ROOT=${ROOT}
BUCK=${BUCK}
key=${key}
"
aws s3api put-object --bucket ${BUCK} --key uploads/release/${SDIR}/${key} \
--body ${FILE} \
--content-disposition filename\=\"${ROOT}\"
| true
|
44e55b64c19cd0805fb2df5ae7cafeace880fe7c
|
Shell
|
larsks/openldap
|
/tests/scripts/test085-homedir
|
UTF-8
| 3,307
| 3.265625
| 3
|
[
"OLDAP-2.8",
"LicenseRef-scancode-proprietary-license",
"BSD-4.3RENO",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#! /bin/sh
# $OpenLDAP$
## This work is part of OpenLDAP Software <http://www.openldap.org/>.
##
## Copyright 2021-2022 The OpenLDAP Foundation.
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted only as authorized by the OpenLDAP
## Public License.
##
## A copy of this license is available in the file LICENSE in the
## top-level directory of the distribution or, alternatively, at
## <http://www.OpenLDAP.org/license.html>.
echo "running defines.sh"
. $SRCDIR/scripts/defines.sh
if test $HOMEDIR = homedirno; then
echo "Homedir overlay not available, test skipped"
exit 0
fi
mkdir -p $TESTDIR $DBDIR1 $TESTDIR/home $TESTDIR/archive
$SLAPPASSWD -g -n >$CONFIGPWF
echo "rootpw `$SLAPPASSWD -T $CONFIGPWF`" >$TESTDIR/configpw.conf
echo "Running slapadd to build slapd database..."
. $CONFFILTER $BACKEND < $HOMEDIRCONF | sed "s/@MINUID@/`id -u`/" > $CONF1
$SLAPADD -f $CONF1 -l $LDIF
RC=$?
if test $RC != 0 ; then
echo "slapadd failed ($RC)!"
exit $RC
fi
echo "Starting slapd on TCP/IP port $PORT1..."
$SLAPD -f $CONF1 -h $URI1 -d $LVL > $LOG1 2>&1 &
PID=$!
if test $WAIT != 0 ; then
echo PID $PID
read foo
fi
KILLPIDS="$PID"
sleep 1
echo "Using ldapsearch to check that slapd is running..."
for i in 0 1 2 3 4 5; do
$LDAPSEARCH -s base -b "$MONITOR" -H $URI1 \
'objectclass=*' > /dev/null 2>&1
RC=$?
if test $RC = 0 ; then
break
fi
echo "Waiting 5 seconds for slapd to start..."
sleep 5
done
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Adding a new user..."
$LDAPADD -D "$MANAGERDN" -H $URI1 -w $PASSWD <<EOMOD >> $TESTOUT 2>&1
dn: uid=user1,ou=People,$BASEDN
objectClass: account
objectClass: posixAccount
uid: user1
cn: One user
uidNumber: `id -u`
gidNumber: `id -g`
homeDirectory: /home/user1
EOMOD
RC=$?
if test $RC != 0 ; then
echo "ldapadd failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
sleep 1
if ! test -e $TESTDIR/home/user1 ; then
echo "Home directory for user1 not created!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit 1
fi
echo "Moving home directory for user1..."
$LDAPMODIFY -D "$MANAGERDN" -H $URI1 -w $PASSWD <<EOMOD >> $TESTOUT 2>&1
dn: uid=user1,ou=People,$BASEDN
changetype: modify
replace: homeDirectory
homeDirectory: /home/user1_new
EOMOD
RC=$?
if test $RC != 0 ; then
echo "ldapadd failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
sleep 1
if test -e $TESTDIR/home/user1 || ! test -e $TESTDIR/home/user1_new ; then
echo "Home directory for user1 not moved!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit 1
fi
echo "Removing user1, should get archived..."
$LDAPDELETE -D "$MANAGERDN" -H $URI1 -w $PASSWD \
"uid=user1,ou=People,$BASEDN" >> $TESTOUT
RC=$?
if test $RC != 0 ; then
echo "ldapdelete failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
sleep 1
if test -e $TESTDIR/home/user1_new || \
! test -e $TESTDIR/archive/user1_new-*-0.tar ; then
echo "Home directory for user1 not archived properly!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit 1
fi
test $KILLSERVERS != no && kill -HUP $KILLPIDS
test $KILLSERVERS != no && wait
echo ">>>>> Test succeeded"
exit 0
| true
|
793c97b310059bfb0d9785a139abcba84844f7f8
|
Shell
|
mauriciopuente/fwtemp
|
/safari_homepage.sh
|
UTF-8
| 787
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
#log all to filewave client log
exec 1>>/var/log/fwcld.log
exec 2>>/var/log/fwcld.log
#temp=$(dscl . list /Users | grep -v '^_')
temp=$(ls /Users | grep -v ".localized" | grep -v "Shared")
echo " ----------------------------------------------- "
echo " "
echo "These are the users on this Computer: "$temp
echo " "
echo " ----------------------------------------------- "
echo " "
HOMEPAGE="http://www.youtube.com"
defaults write /System/Library/User\ Template/English.lproj/Library/Preferences/com.apple.Safari HomePage -string $HOMEPAGE
for i in $temp
do
echo "Setting HomePage for ${i} ..."
defaults write /Users/${i}/Library/Preferences/com.apple.Safari HomePage -string $HOMEPAGE
#sudo -u ${i} /usr/bin/defaults write com.apple.Safari HomePage $HOMEPAGE
done
| true
|
4eca90fb3f6d19be8906e7b0910dc7a9aa47d9c0
|
Shell
|
currant77/aribtraj
|
/scripts/exportbackup
|
UTF-8
| 559
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# Creates a backup of the given heroku app's database
# Arguments:
# - [app-name] heroku app
# - [filename] file name (optional; default is 'backup.dump)
if [[ $1 == "" ]]; then
echo "Failed: requires app name as first argument"
exit 1
fi
name=backup
if [[ $2 != "" ]]; then
name=$2
fi
name=data/${name}.dump
# Create directory if it doesn't exist
mkdir --parents data
echo -e "creating heroku backup for app $1 called $name\n"
heroku pg:backups:capture --app $1
heroku pg:backups:download --app $1 --output $name
echo "done"
exit 0
| true
|
d7000e7349bb35e094fb25216b746defc9e51a1d
|
Shell
|
fyyy4030/my-tricks
|
/ri.sh
|
UTF-8
| 280
| 2.515625
| 3
|
[] |
no_license
|
echo "用法:把文件夹或文件下载到96的home下 本地运行bash ti.sh xxx(文件夹名)"
ssh -p 5102 zihao_wang@183.174.228.96 "tar cf $1.tar $1"
scp -P 5102 zihao_wang@183.174.228.96:$1.tar ./
ssh -p 5102 zihao_wang@183.174.228.96 "rm -f $1.tar"
tar xf $1.tar
rm -f $1.tar
| true
|
d006a6bb6ff8b3406659336e07c0516b25c04b6d
|
Shell
|
Zelzahn/config-files
|
/i3/i3scripts/swap.sh
|
UTF-8
| 152
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
swap=$( free -h | grep 'Swap:' | awk '{print $3}' )
if [ $(free -m | grep 'Swap:' | awk '{print $3}') -gt "100" ]
then
echo "${swap}"
fi
| true
|
c770c1ff82743d3a0cb7a234a421711a3801a4aa
|
Shell
|
afiqmuzaffar/gruut
|
/scripts/format-code.sh
|
UTF-8
| 1,578
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
# Directory of *this* script
this_dir="$( cd "$( dirname "$0" )" && pwd )"
src_dir="$(realpath "${this_dir}/..")"
if [[ "$1" == '--no-venv' ]]; then
no_venv='1'
fi
if [[ -z "${no_venv}" ]]; then
venv="${src_dir}/.venv"
if [[ -d "${venv}" ]]; then
source "${venv}/bin/activate"
fi
fi
python_files=("${src_dir}/gruut/"*.py)
# Add bin scripts selectively
bin_scripts=('align2phonemeids' 'clean-metadata' 'csv2phonemeids' 'fst2npy' 'map_lexicon' 'phonemize_lexicon' 'print_phonemeids' 'reorder_lexicon' 'espeak_word')
while read -r python_lib; do
if [ "$(echo "${python_lib}" | grep 'phonetisaurus')" ]; then
bin_scripts+=('phonetisaurus_per')
elif [ "$(echo "${python_lib}" | grep 'aeneas')" ]; then
bin_scripts+=('librivox_align')
fi
done < <(pip3 freeze)
for script_name in "${bin_scripts[@]}"; do
python_files+=("${src_dir}/bin/${script_name}.py")
done
# -----------------------------------------------------------------------------
# Add language data modules
while read -r lang_module_dir; do
if [[ -f "${lang_module_dir}/__init__.py" ]]; then
lang_dir="$(dirname "${lang_module_dir}")"
python_files+=("${lang_module_dir}/"*.py "${lang_dir}/"*.py)
fi
done < <(find "${src_dir}" -mindepth 2 -maxdepth 2 -name 'gruut_lang_*' -type d)
# -----------------------------------------------------------------------------
black "${python_files[@]}"
isort "${python_files[@]}"
# -----------------------------------------------------------------------------
echo "OK"
| true
|
a0fc85ba1f18e9243dbff9a836d85dc2179676a2
|
Shell
|
bdorney/GEM_AMC
|
/scripts/ctp7_bash_scripts/gth_config_opto.sh
|
UTF-8
| 1,077
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
gth_rst_ch0_base_addr=0x69000004
gth_ctrl_ch0_base_addr=0x69000008
gth_ctrl_addr=$gth_ctrl_ch0_base_addr
gth_rst_addr=$gth_rst_ch0_base_addr
# Regular GTH config/ctrl value:
# Bit 0: = 1, TX_PD: Transmitter powered down
# Bit 1: = 0, RX_PD: Receiver active
# Bit 2: = 0, TX_POLARITY: not inverted
# Bit 3: = 0, RX_POLARITY: not inverted
# Bit 4: = 0, LOOPBACK: not active
# Bit 5: = 1, TX_INHIBIT: TX laser deactived
# Bit 6: = 1, LPMEN: RX equalizer low power mode enabled!!
data_reg_ch=0x40
# Special, Ch#11 config/ctrl value:
# The same as above, with Bit 3 = 1 (RX polarity inverted)
data_ch_11=0x48
#configure all GTH channels
for i in $(seq 0 1 35)
do
# Apply special config for Ch#11
if [ $i -eq 11 ]
then
printf "Applying special inverted RX channel config for GTH CH#11...\n"
mpoke $((gth_ctrl_ch0_base_addr+256*11)) $data_ch_11
else
#Apply standard, regular channel config
mpoke $gth_ctrl_addr $data_reg_ch
fi
#Move on to the next channel
gth_ctrl_addr=$(($gth_ctrl_addr+256))
done
printf "Done with GTH channel configuration.\n"
| true
|
d42db12c48db0e95212c7c3bc5e79f502f47ccee
|
Shell
|
ialves19/WGS
|
/computing_alleleSharing_Dist.bash
|
UTF-8
| 1,131
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#$ -S /bin/bash
#$ -cwd
#$ -N alS_$JOB_ID
#$ -o alS_o_$JOB_ID
#$ -e alS_e_$JOB_ID
#$ -m a
#$ -M Isabel.Alves@univ-nantes.fr
##################### RE-WRITE
##
##
## This script needs to be launched with the following command:
## for i in `seq 1 22`; do qsub computing_alleleSharing_Dist.bash chrID mac distInKms
## done
#####################
#setting the sart of the job
res1=$(date +%s.%N)
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/commun/data/users/abihouee/miniconda3/pkgs/libgcc-4.8.5-1/lib
r_scriptName="plotting_excessSharing_geoDist.R"
chmod +x ${HOME}/$r_scriptName
chrID=$1
macThreshold=$2
distKm=$3
wkingDir="/mnt/beegfs/ialves"
if [ ! -d "${wkingDir}/rare_by_dist" ];
then
mkdir ${wkingDir}/rare_by_dist
fi
/commun/data/packages/R/R-3.1.1/bin/Rscript ${HOME}/$r_scriptName $chrID $macThreshold $distKm
#timing the job
res2=$(date +%s.%N)
dt=$(echo "$res2 - $res1" | bc)
dd=$(echo "$dt/86400" | bc)
dt2=$(echo "$dt-86400*$dd" | bc)
dh=$(echo "$dt2/3600" | bc)
dt3=$(echo "$dt2-3600*$dh" | bc)
dm=$(echo "$dt3/60" | bc)
ds=$(echo "$dt3-60*$dm" | bc)
echo "Total runtime: $dd days $dh hrs $dm min $ds secs"
| true
|
8b7df7bf188d80644357564bd7b339705ead3565
|
Shell
|
omu/debian
|
/etc/zsh/zshrc
|
UTF-8
| 330
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
# /etc/zsh/zshrc: system-wide .zshrc file for zsh(1).
#
# This file is sourced only for interactive shells. It
# should contain commands to set up aliases, functions,
# options, key bindings, etc.
#
# Global Order: zshenv, zprofile, zshrc, zlogin
for f in {/etc,/usr/local/etc}/zsh/zshrc.d/*.sh(.N); do
source "$f"
done
unset f
| true
|
7120478650afa23f05e6a19d1392c4ef6e759481
|
Shell
|
McDutchie/sbodeps
|
/sbodeps
|
UTF-8
| 12,637
| 3.765625
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#! /bin/bash
# sbodeps - sbopkg build queue maker with dependency resolution
#
# --- begin license ---
# Copyright 2015 Martijn Dekker <martijn@inlv.org>, Groningen, Netherlands.
#
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# --- end license ---
#
# Version history:
# 2016-01-23: 1.2:
# - Now deals correctly with SBo-git/current and other custom repos
# - Rewrite is_installed(): it returned false positives
# - Option -r/--sync runs 'sbopkg -r'; this is for conveniently combining
# it with other options, such as 'sbodeps -rci' (sync, check, install).
# Note: -r is no longer short for --remove.
# 2016-01-12: 1.1:
# - new '-c'/'--check' option, much faster than 'sbopkg -c'
# - by default, don't consider installed non-repo packages in dep resolution
# - new -n/--nonrepo option: consider installed non-repo packages in
# dependency resolution and/or update checking
# - by default, write comments to standard error and only packages to
# standard output, so that sbodeps can be used in $(command substitution)
# 2015-10-02: 1.0.1:
# - better use of bash features, incl. shell arith for option checking
# - bugfix in check for contradictory options: -Q was not considered
# 2015-06-14: 1.0:
# - recoded for bash; coding style of sbopkg adopted
# - relicensed with same license as sbopkg
# 2015-05-21: 0.2.2:
# - portability and robustness tweaks
# 2015-02-22: 0.2.1:
# - remove packages in reverse build order
# - error if no packages given
# 2014-03-21: 0.2: initial release.
version='1.2 [26 Jan 2016]'
# --- utility functions ---
# Exit gracefully on error. Usage: die [ -u ] <exitstatus> [ <message> ]
function die {
local opt_usage=0 exitstatus
[[ $1 == '-u' ]] && opt_usage=1 && shift
exitstatus=$1
shift
(($#)) && echo "$self: $*" 1>&2
((opt_usage)) && echo "Type $self --help for help." 1>&2
exit $exitstatus
}
# Returns successfully if a query string $1 is equal to at least one of the
# rest of the arguments, unsuccessfully if not.
# Speed-optimized, but requires strings that do not contain slashes.
# Not a problem for sbodeps: we're working with (parts of) filenames.
function appears_in {
local query=$1 IFS=/ # set IFS for "$*"
shift
case /$*/ in
( */"$query"/* )
;;
( * )
false ;;
esac
}
# Returns successfully if any $REPO_NAME build of a Slackware package $1 is
# installed, unsuccessfully if not. If opt_nonrepo is non-zero, test for
# any build at all. Depends on slash-separated variable $installed_packages.
#
# As a speed optimization, set a variable for each installed package and
# check if the specified package's variable is set instead of doing repeated
# globbing on /var/log/packages.
function is_installed {
init_is_installed
is_installed "$@"
}
function init_is_installed {
local tag
((opt_nonrepo)) && tag=* || tag=$TAG
eval $(printf '%s\n' /var/log/packages/*-*-*-[0-9]$tag \
| sed 's|^/var/log/packages/|isinst_|
s/-[^-][^-]*-[^-][^-]*-[0-9][^-]*$//
s/[^A-Za-z0-9_]/_/g
s/$/=y/')
function is_installed {
local v=isinst_${1//[!A-Za-z0-9_]/_}
[[ -n ${!v+s} ]]
}
}
# --- UI functions ---
function showversion {
echo "sbodeps version $version"
}
function showhelp {
cat <<EOF
Usage: $self [OPTION...] [PACKAGENAME...]
Generate a sbopkg build queue for one or more packages or check for possible
updates to already-installed packages, resolving dependencies.
Options:
-a, --all include all dependencies, even those already installed
-Q, --queue store build queue in $QUEUEDIR/PACKAGENAME.sqf
-q FILE, --qfile=FILE, --queuefile=FILE
store build queue in FILE
-i, --install instead of storing build queue, install packages now
--remove remove a package and its dependencies (be careful!)
-c, --check add potential updates (and any new deps) to build queue
-n, --nonrepo consider non-$REPO_NAME installed packages in dep.res./updates
-r, --sync sync the repo (run 'sbopkg -r') before doing anything else
-v, --version show version number
-h, --help show this help
-L, --licen[sc]e show license
EOF
}
function showlicense {
sed -nE 's/^# ?//; /\-\-\- begin license/,/\-\-\- end license/p' "$0"
}
# --- core function ---
# Recursively resolve dependencies of a given SBo package, adding them to
# the space-separated global variable $buildq in build order.
function resolve_deps {
local PRGNAM= REQUIRES= # sourced from .info files
local dep # recursive loop variable
set -- "$1" "$repobase"/*/"$1/$1.info"
(($# > 2)) && die 3 "$REPO_NAME repository corrupted!"
[[ -f "$2" ]] || die 1 "Package $1 not found in $REPO_NAME/$REPO_BRANCH"
source "$2"
[[ $PRGNAM == "$1" ]] || die 3 "$REPO_NAME repository corrupted!"
for dep in $REQUIRES; do
if [[ $dep == '%README%' ]]; then
if ((!opt_remove)); then
printf 1>&3 '# See %s for optional dependencies.\n' \
$repobase/*/$PRGNAM/README
fi
continue
fi
# avoid duplicates and infinite loops
if appears_in $dep $buildq; then
continue
fi
# perform recursion
resolve_deps $dep
if ((!opt_all)); then
if ((!opt_remove)); then
if is_installed $dep; then
continue
fi
else
if ! is_installed $dep; then
continue
fi
fi
fi
buildq+=" $dep"
done
}
# Check against the repo for updates of installed SBo packages, adding potential
# updates (and any non-installed dependencies) to the build queue.
function check_for_updates {
local tag pkgfile scriptline \
curprgnam curversion curarch curbuild curtag \
PRGNAM VERSION BUILD
((opt_nonrepo)) && tag=* || tag=$TAG
set -- /var/log/packages/*-*-*-[0-9]$tag
# unresolved glob: there are no matching packages at all
[[ $#,$1 == "1,/var/log/packages/*-*-*-[0-9]$tag" ]] && return
for pkgfile do
curtag=${pkgfile##*-}
curbuild=${curtag%%[!0-9]*}
curtag=${curtag#"$curbuild"}
curarch=${pkgfile%"-$curbuild$curtag"}
curarch=${curarch##*-}
curversion=${pkgfile%"-$curarch-$curbuild$curtag"}
curversion=${curversion##*-}
curprgnam=${pkgfile%"-$curversion-$curarch-$curbuild$curtag"}
curprgnam=${curprgnam#/var/log/packages/}
PRGNAM=
VERSION=
BUILD=
set -- $repobase/*/$curprgnam/$curprgnam.info
(($# > 1)) && die 3 "$REPO_NAME repository corrupted!"
[[ -f "$1" ]] || continue
# Get PRGNAM and VERSION from the .info file
source "$1"
# Unfortunately BUILD is not included in the .info file, so we have
# to glean it from the SlackBuild script
while read -r scriptline; do
if [[ $scriptline == BUILD=* ]]; then
eval "$scriptline"
break
fi
done < ${1%.info}.SlackBuild
if [[ $curprgnam != "$PRGNAM" || -z $VERSION || -z $BUILD ]]; then
die 3 "$REPO_NAME repository corrupted!"
fi
if [[ $curversion == "$VERSION" && $curbuild$curtag == "$BUILD$TAG" ]]; then
continue
fi
printf 1>&3 '# Available: %s %s (build %s) <> %s (build %s)\n' \
"$PRGNAM" "$curversion" "$curbuild$curtag" "$VERSION" "$BUILD$TAG"
resolve_deps "$PRGNAM" # in case there are new dependencies
buildq+=" $PRGNAM"
done
}
# --- initialization ---
set -o nounset
self=${0##*/}
exec 3>&2 # buildq comments go to stderr by default
# source configuration variables from sbopkg configuration
[[ -x /usr/sbin/sbopkg && -r /etc/sbopkg/sbopkg.conf ]] || die 3 'Requires sbopkg.'
source /etc/sbopkg/sbopkg.conf || die 3 'sbopkg.conf corrupted'
if [[ -r ~/.sbopkg.conf ]]; then
source ~/.sbopkg.conf || die 3 ~/.sbopkg.conf 'corrupted'
fi
# for function resolve_deps
repobase=$REPO_ROOT/$REPO_NAME/$REPO_BRANCH
if ! [ -d "$repobase" ]; then
# SBo-git/current doesn't use repro-branch subdirectory
repobase=$REPO_ROOT/$REPO_NAME
if ! [ -d "$repobase" ]; then
die 3 "Repository $REPO_NAME not found in $REPO_ROOT"
fi
fi
buildq=''
# for functions is_installed and check_for_updates: find the package tag
# (e.g. '_SBo') which is usually identical to "_${REPO_BRANCH}" but is
# different in SBo-git/currrent
TAG=$(cat /etc/sbopkg/repos.d/[0-9][0-9]-*.repo | while read -r line; do
[[ $line == \#* ]] && continue
eval "set -- $line" # these conveniently use shell quoting for fields
if [[ $1 == "$REPO_NAME" && $2 == "$REPO_BRANCH" ]]; then
echo "$4"
break
fi
done)
if [[ -z $TAG ]]; then
die 3 "Repository branch $REPO_NAME/$REPO_BRANCH not found in /etc/sbopkg/repos.d"
fi
# --- parse options ---
opt_all=0 opt_install=0 opt_remove=0 opt_q=0 qfile='' opt_check=0 opt_nonrepo=0 opt_sync=0
while getopts 'aiq:QcnrvhL-:' opt; do
if [[ $opt == '-' ]]; then
# support long options in forms --option and --option=argument
opt=-${OPTARG%%=*}
[[ $opt == "-$OPTARG" ]] && OPTARG='' || OPTARG=${OPTARG#*=}
fi
case "$opt" in
( a | -all )
opt_all=1 ;;
( i | -install )
opt_install=1 ;;
( -remove )
opt_remove=1 ;;
( q | -qfile | -queuefile )
[[ -n $OPTARG ]] || die -u 2 "-$opt: option requires an argument"
opt_q=1; qfile=$OPTARG ;;
( Q | -queue )
opt_q=1 ;;
( c | -check )
opt_check=1 ;;
( n | -nonrepo )
opt_nonrepo=1 ;;
( r | -sync)
opt_sync=1 ;;
( v | -version )
showversion; exit ;;
( h | -help )
showversion; showhelp; exit ;;
( L | -licen[sc]e )
showversion; showlicense; exit ;;
( '?' )
die -u 2 ;; # error msg already printed by getopts
( -* )
die -u 2 "invalid option: -$opt" ;;
( * )
die 4 "option parsing: internal error" ;;
esac
done
shift $((OPTIND-1))
# --- check options ---
if ((! $# && ! opt_check && ! opt_sync)); then
die -u 2 'No package name, -c or -r given.'
fi
if ((opt_install + opt_remove + opt_q > 1 || opt_check + opt_remove > 1))
then
die -u 2 'Contradictory options specified.'
fi
if ((opt_sync)); then
sbopkg -r || die $? "sbopkg -r failed"
fi
# redirect standard output (1) and comments (3) to queue file if requested
if ((opt_q)); then
if [[ -z $qfile ]]; then
qfile=$QUEUEDIR/${*:-potential_updates}.sqf
qfile=${qfile// /+}
fi
exec >$qfile 3>&1 || die $? "Could not open queue file for writing."
echo "Storing queue file in: $qfile" 1>&2
fi
if ((opt_check)); then
check_for_updates
if [[ -z $buildq ]]; then
((! $#)) && die 1 "No updates in $REPO_NAME/$REPO_BRANCH."
echo 1>&3 "# No updates in $REPO_NAME/$REPO_BRANCH."
fi
fi
# --- main loop ---
for pkg in $*; do
resolve_deps $pkg
if ! appears_in $pkg $buildq; then
buildq+=" $pkg"
fi
done
# --- process or output the results ---
if ((opt_install)); then
printf -v args -- '-i %s ' $buildq
echo '+ sbopkg' $args
exec sbopkg $args
elif ((opt_remove)); then
args=$(printf '%s\n' $buildq | tac)
echo '+ removepkg' $args
read -r -p 'Really remove all these packages? ' reply
if [[ $reply =~ $(locale yesexpr) ]]; then
removepkg $args
else
die 1 'Cancelled.'
fi
else
# output build queue
printf '%s\n' $buildq
fi
| true
|
9e06517445944ce9ae2e9eed742045a351ea8d53
|
Shell
|
jeroentbt/spark
|
/roles/_unused/mapping/files/showme.sh
|
UTF-8
| 351
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh
IMG_VIEWER="feh -x --auto-zoom --title showme"
location="$(mapbox geocoding "$1" | jq -c .features[0])"
lon=$(echo $location | jq .center[0])
lat=$(echo $location | jq .center[1])
tmp=$(mktemp $TMPDIR/$(uuidgen).png.XXX)
mapbox staticmap --lat $lat --lon $lon --zoom ${2:-13} --size 1279 1279 \
mapbox.satellite $tmp
eval $IMG_VIEWER $tmp
| true
|
471ae08008c49082304808ef797dd5ce41465721
|
Shell
|
olivermontes/git-recent
|
/git-recent
|
UTF-8
| 1,792
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##
## git-recent
##
## list all local branches, sorted by last commit, formatted reall purdy
##
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-r|--remotes)
REMOTES="$1"
shift # past argument
;;
-h|--heads)
HEADS="$1"
shift # past argument
;;
--default)
DEFAULT=YES
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
formatlocal="\
%(HEAD) %(color:yellow)%(refname:short)%(color:reset)|\
%(color:bold red)%(objectname:short) %(color:bold green)(%(committerdate:relative)) %(color:blue)%(authorname) %(color:reset)%(color:yellow)%(upstream:track)%0a\
%(color:black) %(color:reset)|%(contents:subject)%0a\
|"
formatremote="\
%(HEAD) %(color:green)%(refname:short)%(color:reset)|\
%(color:bold red)%(objectname:short) %(color:bold green)(%(committerdate:relative)) %(color:blue)%(authorname) %(color:reset)%(color:yellow)%(upstream:track)%0a\
%(color:black) %(color:reset)|%(contents:subject)%0a\
|"
lessopts="--tabs=4 --quit-if-one-screen --RAW-CONTROL-CHARS --no-init"
## Debug
## echo "Valor 1 ($1), Valor 2 ($REMOTES), Valor 3 ($HEADS)"
## Show local tracking Branch
if [[ "$REMOTES" != "" ]]; then
git for-each-ref \
--sort=-committerdate \
"refs/remotes" \
--format="$formatremote" \
| column -ts '|'
fi
if [[ -z "$REMOTES" ]]; then
git for-each-ref \
--sort=-committerdate \
"refs/heads" \
--format="$formatlocal" \
| column -ts '|' \
| less "$lessopts"
fi
# The above command:
# for all known branches,
# sort descending by last commit
# show local branches (change to "" to include both local + remote branches)
# apply the formatting template above
# break into columns
# use the pager only if there's not enough space
| true
|
7bd28218a2be36fd2730ab2f247bd86e980f5993
|
Shell
|
aidarbuy/metromed-rtc
|
/bash/clean_serve.sh
|
UTF-8
| 205
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# declare STRING variable
STRING="done."
#print variable on a screen
echo "Cleaning dist directory:"
gulp clean
echo $STRING
echo "..."
clear
echo "Starting localhost"
gulp serve
echo $STRING
| true
|
4864c0bd6c793c1c8b2560567ed42a96a91ec2da
|
Shell
|
elago/ela
|
/script.sh
|
UTF-8
| 325
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
go get golang.org/x/tools/cmd/cover
go get github.com/mattn/goveralls
goversion=`go version | grep 1.6`
if [ ${#goversion} !== "0" ];then
go test
else
go test -v -covermode=count -coverprofile=coverage.out
$HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN
fi
| true
|
5ca01575543dc600e6f904ac672b19f030b7af5b
|
Shell
|
FrankXL/crypto-types
|
/refs/validate-all.sh
|
UTF-8
| 3,308
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Testing ietf-crypto-types.yang (pyang)..."
pyang --ietf --max-line-length=69 -p ../ ../ietf-crypto-types\@*.yang
pyang --canonical -p ../ ../ietf-crypto-types\@*.yang
echo "Testing ietf-crypto-types.yang (yanglint)..."
yanglint ../ietf-crypto-types\@*.yang
echo "Testing ex-crypto-types-usage.yang (pyang)..."
pyang --lint --max-line-length=69 -p ../ ../ex-crypto-types-usage.yang
echo "Testing ex-crypto-types-usage.yang (yanglint)..."
yanglint ../ex-crypto-types-usage.yang
echo "Testing ex-crypto-types-usage.xml..."
yanglint -m -s ../ex-crypto-types-usage.yang ../ietf-*\@*.yang ./ietf-origin.yang ex-crypto-types-usage.xml
echo "Testing ex-crypto-types-ghk-rpc.xml..."
yanglint -s -t auto ../ietf-*\@*.yang ../ex-crypto-types-usage.yang ex-crypto-types-ghk-rpc.xml
echo "Testing ex-crypto-types-ghk-rpc-reply.xml..."
yanglint -s -t auto ../ietf-*\@*.yang ../ex-crypto-types-usage.yang ex-crypto-types-ghk-rpc-reply.xml ex-crypto-types-ghk-rpc.xml
echo "Testing ex-crypto-types-ihk-rpc.xml..."
yanglint -s -t auto ../ietf-*\@*.yang ../ex-crypto-types-usage.yang ex-crypto-types-ihk-rpc.xml
echo "Testing ex-crypto-types-ihk-rpc-reply.xml..."
yanglint -s -t auto ../ietf-*\@*.yang ../ex-crypto-types-usage.yang ex-crypto-types-ihk-rpc-reply.xml ex-crypto-types-ihk-rpc.xml
echo "Testing ex-crypto-types-gcsr-rpc.xml..."
yanglint -s -t auto ../ietf-*\@*.yang ../ex-crypto-types-usage.yang ex-crypto-types-gcsr-rpc.xml
echo "Testing ex-crypto-types-gcsr-rpc-reply.xml..."
yanglint -s -t auto ../ietf-*\@*.yang ../ex-crypto-types-usage.yang ex-crypto-types-gcsr-rpc-reply.xml ex-crypto-types-gcsr-rpc.xml
echo "Testing ex-crypto-types-ce-notification.xml..."
echo -e 'setns a=urn:ietf:params:xml:ns:neteonf:notification:1.0\nsetns b=urn:ietf:params:xml:ns:yang:ietf-crypto-types\ncat //a:notification/b:crypto-types' | xmllint --shell ex-crypto-types-ce-notification.xml | sed -e '/^\/.*/d' -e '/^ *$/d' > yanglint-notification.xml
yanglint -s -t notif -r ex-crypto-types-usage.xml ../ietf-*\@*.yang ../ex-crypto-types-usage.yang yanglint-notification.xml
rm yanglint-notification.xml
#echo "Testing ex-ce-notification.xml..."
#yanglint -r ex-crypto-types-usage.xml -t auto -s ../ex-crypto-types-usage\@*.yang ../ietf-crypto-types\@*.yang ex-ce-notification.xml
#echo "Testing ex-crypto-types-usage.yang (pyang)..."
#pyang --lint --max-line-length=70 -p ../ ../ex-crypto-types-usage\@*.yang
#echo "Testing ex-crypto-types-usage.yang (yanglint)..."
#yanglint ../ex-crypto-types-usage\@*.yang
#echo "Testing ex-crypto-types-usage.xml..."
#yanglint -p ../ -s ../ex-crypto-types-usage\@*.yang ../ietf-crypto-types\@*.yang ex-crypto-types-usage.xml
#echo "Testing ex-gpk-rpc.xml..."
#yanglint -p ../ -t auto -s ../ex-crypto-types-usage\@*.yang ../ietf-crypto-types\@*.yang ex-gpk-rpc.xml
#echo "Testing ex-gpk-rpc-reply.xml..."
#yanglint -p ../ -t auto -s ../ex-crypto-types-usage\@*.yang ../ietf-crypto-types\@*.yang ex-gpk-rpc-reply.xml ex-gpk-rpc.xml
#echo "Testing ex-gcsr-rpc.xml..."
#yanglint -p ../ -t auto -s ../ex-crypto-types-usage\@*.yang ../ietf-crypto-types\@*.yang ex-gcsr-rpc.xml
#echo "Testing ex-gcsr-rpc-reply.xml..."
#yanglint -p ../ -t auto -s ../ex-crypto-types-usage\@*.yang ../ietf-crypto-types\@*.yang ex-gcsr-rpc-reply.xml ex-gcsr-rpc.xml
| true
|
8bfd70c81a80782c7520b1f85db0f11bc77fbd5b
|
Shell
|
LohmuellerLab/OtterGenomeProject
|
/DiversityAnalyses/southernSeaOtterAnalyses/VEP/VEP.GeneticLoad.Scripts/VEP.Step5_FilterVEPForLoadCalcs.elut.50_250.CDSOnly.sh
|
UTF-8
| 3,255
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#$ -N filterPbraVEP
#$ -l h_data=5G,h_rt=10:00:00,highp
#$ -m bea
#$ -M ab08028
#$ -cwd
########## Want to filter Elut VEP Results to get the following:
# synonymous_variant
# missense_variant (levels)
# homozygous/heterozygous
date=20171006 # date of vcfs
indir=/u/flashscratch/a/ab08028/otters/vcfs/vcf_${date}_50_250DPFilter/vep-output/
PREFIX=01_Elut_CA_Gidget
# can pull out all sorts of things
outdir=/u/flashscratch/a/ab08028/otters/vcfs/vcf_${date}_50_250DPFilter/vep-output/filteredVepOutput2-ForLoadCalcs
mkdir -p $outdir
vepdir=/u/home/a/ab08028/klohmueldata/annabel_data/bin/ensembl-vep/
#### Note: this only works for a tbl with a single individual!
############## a. Separate hets/homs (already done for Pbra): ##########################
### instead of using whole genome sites, am using cds sequences only
hetInput=${PREFIX}.raw_variants.cdsSequence.AllScaffsConcat.${date}.HQsites.Only.5.PASS-ONLY.rmClust.BiSNP.HF.Variants.Heterozygous.VEP.output.minimalInfo.tbl
homInput=${PREFIX}.raw_variants.cdsSequence.AllScaffsConcat.${date}.HQsites.Only.5.PASS-ONLY.rmClust.BiSNP.HF.Variants.HomozygousAlt.VEP.output.minimalInfo.tbl
### Another addition: want to make sure it's in canonical protein so different sites dont' count twice.
# 20180206 : added CANONICAL is YES
############## b. Filter Synonymous/NS ##########################
###### NS (missense):
$vepdir/filter_vep --filter "Consequence is missense_variant and CANONICAL is YES" \
--input_file $indir/$homInput \
--output_file $outdir/${homInput%.tbl}.missense.tbl \
--force_overwrite
# note: only-matched only pulls the part of the line that matches so that if something
# is annotated as >1 thing, only the matching part is included
$vepdir/filter_vep --filter "Consequence is missense_variant and CANONICAL is YES" \
--input_file $indir/$hetInput \
--output_file $outdir/${hetInput%.tbl}.missense.tbl \
--force_overwrite
###### LOF (stop_gained):
$vepdir/filter_vep --filter "Consequence is stop_gained and CANONICAL is YES" \
--input_file $indir/$homInput \
--output_file $outdir/${homInput%.tbl}.stopgained.tbl \
--force_overwrite
# -- count? only show a count? Eventually. Make sure filter works first.
$vepdir/filter_vep --filter "Consequence is stop_gained and CANONICAL is YES" \
--input_file $indir/$hetInput \
--output_file $outdir/${hetInput%.tbl}.stopgained.tbl \
--force_overwrite
###### LOF 2: stopgained in a DOMAIN
$vepdir/filter_vep --filter "Consequence is stop_gained and CANONICAL is YES and DOMAIN" \
--input_file $indir/$homInput \
--output_file $outdir/${homInput%.tbl}.stopgained.domain.tbl \
--force_overwrite
$vepdir/filter_vep --filter "Consequence is stop_gained and CANONICAL is YES and DOMAIN" \
--input_file $indir/$hetInput \
--output_file $outdir/${hetInput%.tbl}.stopgained.domain.tbl \
--force_overwrite
###### S (synonymous):
$vepdir/filter_vep --filter "Consequence is synonymous_variant and CANONICAL is YES" \
--input_file $indir/$homInput \
--output_file $outdir/${homInput%.tbl}.synonymous.tbl \
--force_overwrite
$vepdir/filter_vep --filter "Consequence is synonymous_variant and CANONICAL is YES" \
--input_file $indir/$hetInput \
--output_file $outdir/${hetInput%.tbl}.synonymous.tbl \
--force_overwrite
| true
|
531e57963bbdd05a9e8a998a96fd9527543559ca
|
Shell
|
anarchistMegaByte/gif-data-set
|
/tgifDataBase/code/gifs-filter/split-batches.sh
|
UTF-8
| 219
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
# vim ft=sh
lines=$1
data_dir=$2
cd $data_dir
prefix=sub
sort -R gif.urls | split -l $lines - $prefix
ls $prefix* | xargs -L 1 -I {} sh -c "mkdir -p batches/{}; mv {} batches/{}/gif.urls"
echo $data_dir
| true
|
35438492e0a1dbb0e373e33a959a84d72f7ad797
|
Shell
|
blingshark/netsharkscan
|
/netsharkscan.sh
|
UTF-8
| 3,138
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Welcome to netsharkscan (beta_1.0)"
echo " "
echo "------List Interfaces------"
echo " "
ip link show | awk '{print "\033[m" $2 "\033[32m" $9 "\033[m"}'
echo " "
echo "--------------------------"
read -p "Choose interface (ex: wlan0) : " interface
clear
YourMacAddress=$(ip link show dev $interface |grep link/ether |awk '{print $2}')
YourAddress=$(ifconfig $interface | grep "inet " | awk '{print $2}')
MASK=$(ifconfig $interface | grep "inet " | awk '{print $4}')
tonum() {
if [[ $1 =~ ([[:digit:]]+)\.([[:digit:]]+)\.([[:digit:]]+)\.([[:digit:]]+) ]]; then
addr=$(( (${BASH_REMATCH[1]} << 24) + (${BASH_REMATCH[2]} << 16) + (${BASH_REMATCH[3]} << 8) + ${BASH_REMATCH[4]} ))
eval "$2=\$addr"
fi
}
toaddr() {
b1=$(( ($1 & 0xFF000000) >> 24))
b2=$(( ($1 & 0xFF0000) >> 16))
b3=$(( ($1 & 0xFF00) >> 8))
b4=$(( $1 & 0xFF ))
eval "$2=\$b1.\$b2.\$b3.\$b4"
}
tonum $YourAddress IPADDRNUM
tonum $MASK MASKNUM
#printf "IPADDRNUM: %x\n" $IPADDRNUM
#printf "MASKNUM: %x\n" $MASKNUM
# The logic to calculate network and broadcast
INVMASKNUM=$(( 0xFFFFFFFF ^ MASKNUM ))
NETWORKNUM=$(( IPADDRNUM & MASKNUM ))
BROADCASTNUM=$(( INVMASKNUM | NETWORKNUM ))
LastAddress=$(( INVMASKNUM -1 | NETWORKNUM ))
lss=$(( INVMASKNUM -1 ))
FistAddress=$(( NETWORKNUM +1 ))
FF=$(( 0x0090 ))
toaddr $FistAddress FistAddress
toaddr $LastAddress LastAddress
toaddr $NETWORKNUM NETWORK
toaddr $BROADCASTNUM BROADCAST
echo "Your MAC Address=$YourMacAddress"
printf "%-25s %s\n" "FF=$FF"
printf "%-25s %s\n" "Fist Address=$FistAddress"
printf "%-25s %s\n" "Last Address=$LastAddress"
printf "%-25s %s\n" "Your Address=$YourAddress"
printf "%-25s %s\n" "Your Mask=$MASK"
printf "%-25s %s\n" "Network Address=$NETWORK"
printf "%-25s %s\n" "BroadCast Address=$BROADCAST"
#printf "%-25s %s\n" "lss=$lss"
echo "--------------------------------------------"
echo "Hostname" " " "|" " " "IPaddr" " " "|" " " "MACaddr"
echo " "
function Hostname {
if [[ "${SUBNET}.$i" = "$YourAddress" ]]; then
hostname
else
arp -D ${SUBNET}.$i | grep ether | awk '{print $1}'
fi
}
function MACaddr {
if [[ "${SUBNET}.$i" = "$YourAddress" ]]; then
echo "$YourMacAddress"
else
arp -D ${SUBNET}.$i | grep ether | awk '{print $3}'
fi
}
function IPaddr {
echo "${SUBNET}.$i"
}
function port {
for ((counter=1; counter<=100; counter++))
do
(echo >/dev/tcp/${SUBNET}.$i/$counter) > /dev/null 2>&1 && echo "|$counter|"
done
}
delay=0.05
host1=$FistAddress
host2=$LastAddress
SUBNET=${host1%.*}
netId1=${host1#$SUBNET.}
netId2=${host2#$SUBNET.}
for ((i=netId1; i<=netId2; i++)); do
timeout ${delay} ping -s 1 -c 1 -W 1 -i 0.000001 -q ${SUBNET}.$i >& /dev/null
if [[ $? -eq 0 ]]; then
ARRAYS=($(Hostname) "|" $(IPaddr) "|" $(MACaddr))
echo ${ARRAYS[@]}
fi
done
echo "--------------------------------------------"
function ports {
for ((counter=1; counter<=100; counter++))
do
(echo >/dev/tcp/$ipadd/$counter) > /dev/null 2>&1 && echo "|$counter| open"
done
}
read -p "Scan ports? (yes or no) : " res
if [[ $res == yes ]]; then
read -p "Entre ip address : " ipaddrs
ports ipaddrs
else
echo "ok bye"
fi
| true
|
c7262868846880ad5f2e357c49561e642f2e14af
|
Shell
|
vnopenroads/openroads-vn-tiler
|
/scripts/generate-provincial-dumps.sh
|
UTF-8
| 575
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
echo "Ensure the necessary environment variables are set"
: "${DATABASE_URL:?}"
# Change to script's directory
cd "${0%/*}"
OUTPUT_DIR='.tmp/provinces/'
mkdir -p $OUTPUT_DIR
echo "Map all roads to provinces and join properties"
psql "${DATABASE_URL}" -f generate-provincial-dumps.sql
# Read .tmp/provincial_dump.csv, csv stringify and write to the respective province CSV.
echo "Preparing CSV per province"
./provincial-sort.js .tmp/provincial_dump.csv
echo "Backup..."
mkdir -p ../backup/by-province-id/
cp -r .tmp/provinces ../backup/by-province-id/
| true
|
d70cb25d7271a22695d1bcddf7000bf8eb61fff5
|
Shell
|
mohammadrizqialfian/Openstack-Stein-Opensuse-15.1-Leap
|
/09-newcompute.sh
|
UTF-8
| 8,059
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
source config.conf
echo -e "$red \n\n############## Starting running script Add New Compute Openstack ############## $color_off\n\n"
ssh-copy-id -i ~/.ssh/id_rsa.pub root@$IPCOMPUTE
echo -e "$red \n###Configre Hostname### $color_off"
ssh root@$IPMANAGEMENT "echo -e '$IPCOMPUTE\t $HOSTCOMPUTE' >> /etc/hosts"
ssh root@$IPCOMPUTE << _EOFNEWTEST_
echo -e "$red \n###Configre Hostname### $color_off"
[ -f /etc/hosts.orig ] && cp -v /etc/hosts.orig /etc/hosts
[ ! -f /etc/hosts.orig ] && cp -v /etc/hosts /etc/hosts.orig
echo -e "$IPMANAGEMENT\t $HOSTCONTROLLER" >> /etc/hosts
echo -e "$IPCOMPUTE\t $HOSTCOMPUTE" >> /etc/hosts
echo -e "$red \n###Configre NTP### $color_off"
#### Configure NTP ####
zypper -n install --no-recommends chrony
[ -f /etc/chrony.conf.orig ] && cp -v /etc/chrony.conf.orig /etc/chrony.conf
[ ! -f /etc/chrony.conf.orig ] && cp -v /etc/chrony.conf /etc/chrony.conf.orig
sed -i "s/^pool/#pool/" /etc/chrony.d/pool.conf
sed -i "s/^pool/#pool/" /etc/chrony.conf
sed -i "s/^! pool/#pool/" /etc/chrony.conf
echo "server $IPMANAGEMENT iburst" >> /etc/chrony.conf
systemctl enable chronyd.service
systemctl restart chronyd.service
chronyc sources
echo -e "$red \n###Install Openstack Client and adding repositories### $color_off"
#### Configure Repositories ####
[ ! -f /etc/zypp/repos.d/Stein.repo ] && zypper addrepo -f obs://Cloud:OpenStack:Stein/openSUSE_Leap_15.1 Stein
zypper --gpg-auto-import-keys refresh && zypper -n dist-upgrade
zypper -n install --no-recommends python2-openstackclient openstack-utils
cat << _EOF_ > keystonerc_admin
unset OS_SERVICE_TOKEN
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=$ADMINLOG
export PS1='[\u@\h \W(keystone_admin)]\$ '
export OS_AUTH_URL=http://$IPMANAGEMENT:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
_EOF_
[ ! -f /etc/zypp/repos.d/home_Sauerland.repo ] && zypper addrepo https://download.opensuse.org/repositories/home:Sauerland/openSUSE_Leap_15.2/home:Sauerland.repo
zypper --gpg-auto-import-keys refresh && zypper -n dist-upgrade
zypper -n install --no-recommends genisoimage openstack-nova-compute qemu-kvm libvirt
_EOFNEWTEST_
ssh root@$IPCOMPUTE "cat << _EOF_ > /etc/nova/nova.conf.d/500-nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
compute_driver = libvirt.LibvirtDriver
transport_url = rabbit://openstack:$RABBITPASS@$IPMANAGEMENT
my_ip = $IPCOMPUTE
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
resume_guests_state_on_host_boot = true
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://$IPMANAGEMENT:5000/
memcached_servers = $IPMANAGEMENT:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = $NOVAPASS
[libvirt]
virt_type = $TYPEVIRT
##uncomment line dibawah ini jika ingin mengaktifkan nested virtualization
#cpu_mode = host-passthrough
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $IPCOMPUTE
novncproxy_base_url = http://$IPMANAGEMENT:6080/vnc_auto.html
[glance]
api_servers = http://$IPMANAGEMENT:9292
[oslo_concurrency]
lock_path = /var/run/nova
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://$IPMANAGEMENT:5000/v3
username = placement
password = $PLACEMENTPASS
_EOF_"
ssh root@$IPCOMPUTE << _EOFNEWTEST_
## hapus "#nama_proc" untuk mengaktifkan nested virtualization
#intel modprobe -r kvm_intel && modprobe kvm_intel nested=1
#intel echo "options kvm_intel nested=Y" > /etc/modprobe.d/kvm_intel.conf
#amd modprobe -r kvm_amd && modprobe kvm_amd nested=1
#amd echo "options kvm_amd nested=Y" > /etc/modprobe.d/kvm_amd.conf
chown root:nova /etc/nova/nova.conf.d/500-nova.conf
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl restart libvirtd.service openstack-nova-compute.service
modprobe nbd
echo nbd > /etc/modules-load.d/nbd.conf
_EOFNEWTEST_
ssh root@$IPCOMPUTE << _EOFNEWTEST_
zypper -n in --no-recommends openvswitch
systemctl enable openvswitch
systemctl restart openvswitch
if [[ $INTMANAGEMENTCOMPUTE == $INTEXTERNALCOMPUTE ]]
then
cat << _EOF_ > /etc/sysconfig/network/ifcfg-br-ex
BOOTPROTO='static'
NAME='br-ex'
STARTMODE='auto'
OVS_BRIDGE='yes'
OVS_BRIDGE_PORT_DEVICE='$INTMANAGEMENTCOMPUTE'
IPADDR='$IPCOMPUTE'
NETMASK='$NETMASKMANAGEMENTCOMPUTE'
_EOF_
mv /etc/sysconfig/network/ifroute-$INTMANAGEMENTCOMPUTE /etc/sysconfig/network/backup.ifroute-$INTMANAGEMENTCOMPUTE
mv /etc/sysconfig/network/ifcfg-$INTMANAGEMENTCOMPUTE /etc/sysconfig/network/backup.ifcfg-$INTMANAGEMENTCOMPUTE
echo "default $IPGATEWAYCOMPUTE - br-ex" > /etc/sysconfig/network/ifroute-br-ex
else
mv /etc/sysconfig/network/ifcfg-$INTEXTERNALCOMPUTE /etc/sysconfig/network/backup.ifcfg-$INTEXTERNALCOMPUTE
cat << _EOF_ > /etc/sysconfig/network/ifcfg-br-ex
BOOTPROTO='none'
NAME='br-ex'
STARTMODE='auto'
OVS_BRIDGE='yes'
OVS_BRIDGE_PORT_DEVICE='$INTEXTERNALCOMPUTE'
_EOF_
fi
cat << _EOF_ > /etc/sysconfig/network/ifcfg-$INTEXTERNALCOMPUTE
STARTMODE='auto'
BOOTPROTO='none'
_EOF_
systemctl restart network
_EOFNEWTEST_
ssh root@$IPCOMPUTE << _EOFNEW_
zypper -n install --no-recommends openstack-neutron-openvswitch-agent
cat << _EOF_ > /etc/neutron/neutron.conf.d/500-neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:$RABBITPASS@$IPMANAGEMENT
auth_strategy = keystone
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[keystone_authtoken]
www_authenticate_uri = http://$IPMANAGEMENT:5000
auth_url = http://$IPMANAGEMENT:5000
memcached_servers = $IPMANAGEMENT:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = $NEUTRONPASS
_EOF_
chown root:neutron /etc/neutron/neutron.conf.d/500-neutron.conf
[ ! -f /etc/neutron/plugins/ml2/openvswitch_agent.ini.orig ] && cp -v /etc/neutron/plugins/ml2/openvswitch_agent.ini /etc/neutron/plugins/ml2/openvswitch_agent.ini.orig
cat << _EOF_ > /etc/neutron/plugins/ml2/openvswitch_agent.ini
[DEFAULT]
[agent]
tunnel_types = vxlan
vxlan_udp_port = 4789
l2_population = False
drop_flows_on_start = False
[network_log]
[ovs]
integration_bridge = br-int
tunnel_bridge = br-tun
local_ip = $IPCOMPUTE
bridge_mappings = provider:br-ex
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[xenapi]
_EOF_
_EOFNEW_
ssh root@$IPCOMPUTE << _EOFNEWTEST_
[ -f /etc/nova/nova.conf.d/500-nova.conf.orig ] && cp -v /etc/nova/nova.conf.d/500-nova.conf.orig /etc/nova/nova.conf.d/500-nova.conf
[ ! -f /etc/nova/nova.conf.d/500-nova.conf.orig ] && cp -v /etc/nova/nova.conf.d/500-nova.conf /etc/nova/nova.conf.d/500-nova.conf.orig
cat << _EOF_ >> /etc/nova/nova.conf.d/500-nova.conf
[neutron]
url = http://$IPMANAGEMENT:9696
auth_url = http://$IPMANAGEMENT:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = $NEUTRONPASS
_EOF_
echo 'NEUTRON_PLUGIN_CONF="/etc/neutron/plugins/ml2/ml2_conf.ini"' >> /etc/sysconfig/neutron
ln -s /etc/apparmor.d/usr.sbin.dnsmasq /etc/apparmor.d/disable/
systemctl stop apparmor
systemctl disable apparmor
systemctl restart openstack-nova-compute.service
# su -s /bin/sh -c "neutron-db-manage upgrade head" neutron
systemctl enable openstack-neutron-openvswitch-agent.service
systemctl restart openstack-neutron-openvswitch-agent.service
sleep 5
firewall-cmd --permanent --add-port=4789/udp
firewall-cmd --permanent --add-port=9696/tcp
firewall-cmd --permanent --add-port=5900-5999/tcp
firewall-cmd --permanent --add-port 6080/tcp
firewall-cmd --permanent --add-port 6081/tcp
firewall-cmd --permanent --add-port 6082/tcp
firewall-cmd --permanent --add-port 8773-8775/tcp
firewall-cmd --reload
_EOFNEWTEST_
echo -e "$red \n\n############## Completed running script Add New Compute Openstack ############## $color_off\n\n"
| true
|
751cc58a0b86214330ecf6a9a20128486f701f33
|
Shell
|
fengzi-code/shell-code
|
/nginx/nginx_install.sh
|
UTF-8
| 6,069
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
nginxversion="1.13.9"
nginx_dir=/usr/local
nginx_conf_path=$nginx_dir/nginx/nginx.conf
nginx_url=http://nginx.org/download/nginx-$nginxversion.tar.gz
if [ ! -d $nginx_dir ];then
mkdir -p $nginx_dir
fi
cd $nginx_dir
yum -y install pcre pcre-devel zlib zlib-devel gcc gcc-c++ autoconf automake make openssl-devel wget
useradd -s /sbin/nologin -M nginx
mkdir -p /etc/nginx/conf.d /etc/nginx/default.d
chown nginx:nginx -R /etc/nginx
if [ ! -e "$nginx_dir/nginx-$nginxversion.tar.gz" ];then
wget -c $nginx_url
else
echo "文件已经存在"
if [ -d "$nginx_dir/nginx-$nginxversion" ];then
rm -rf $nginx_dir/nginx-$nginxversion
fi
fi
tar -zxf nginx-$nginxversion.tar.gz
cd $nginx_dir/nginx-$nginxversion
./configure --prefix=$nginx_dir/nginx --user=nginx --group=nginx --with-http_stub_status_module --with-http_ssl_module --with-http_realip_module --with-http_sub_module --with-http_gzip_static_module --with-ipv6 --conf-path=$nginx_conf_path
# --add-module=/opt/headers-more-nginx-module-0.33
sleep 1
make -j$(cat /proc/cpuinfo | grep "cpu cores" |awk '{print $4}'|head -1) && make install
sleep 1
#-------------------------启动脚本----------------------------------------------------
fun_6_service (){
echo '#! /bin/bash
# chkconfig: - 85 15
DESC="nginx daemon"
NAME=nginx
DAEMON=$PATH/sbin/$NAME
#CONFIGFILE=/etc/nginx/$NAME.conf
PIDFILE=$PATH/logs/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
set -e
[ -x "$DAEMON" ] || exit 0
do_start() {
$DAEMON -c $CONFIGFILE || echo -n "nginx already running"
}
do_stop() {
$DAEMON -s stop || echo -n "nginx not running"
}
do_reload() {
$DAEMON -s reload || echo -n "nginx can’t reload"
}
case "$1" in
start)
echo -n "Starting $DESC: $NAME"
do_start
echo "."
;;
stop)
echo -n "Stopping $DESC: $NAME"
do_stop
echo "."
;;
reload|graceful)
echo -n "Reloading $DESC configuration..."
do_reload
echo "."
;;
restart)
echo -n "Restarting $DESC: $NAME"
do_stop
do_start
echo "."
;;
*)
echo "Usage: $SCRIPTNAME {start|stop|reload|restart}" >&2
exit 3
;;
esac
exit 0' > /etc/init.d/nginx
}
#-------------------------启动脚本----------------------------------------------------
echo "PATH=\$PATH:$nginx_dir/nginx/sbin" >> /etc/profile
# 在匹配行后加入一行
sed -i '/http {/a\ include \/etc\/nginx\/conf.d\/*.conf;' ${nginx_conf_path}
source /etc/profile
os_ver=`cat /etc/redhat-release|grep -Po '[0-9]'|head -1`
if [ ${os_ver} == '7' ];then
cat > /usr/lib/systemd/system/nginx.service << EOF
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network.target
Wants=network-online.target
[Service]
Type=forking
PIDFile=${nginx_dir}/nginx/logs/nginx.pid
ExecStartPre=${nginx_dir}/nginx/sbin/nginx -t
ExecStart=${nginx_dir}/nginx/sbin/nginx -c ${nginx_conf_path}
ExecReload=/bin/kill -s HUP \$MAINPID
ExecStop=/bin/kill -s TERM \$MAINPID
PrivateTmp=true
[Install]
WantedBy=multi-user.target
EOF
systemctl enable nginx
# systemctl start nginx
else
fun_6_service
sed -i 'N;2aPATH='$nginx_dir'/nginx' /etc/init.d/nginx
sed -i 'N;2aCONFIGFILE='$nginx_conf_path'' /etc/init.d/nginx
chmod +x /etc/init.d/nginx
chkconfig --add nginx
chkconfig --level 345 nginx on
#service nginx start
fi
echo "nginx 安装完毕~~~~~"
# '''active connections – 活跃的连接数量
# server accepts handled requests — 总共处理了xxx个连接 , 成功创建xxx次握手, 总共处理了xxx个请求
# reading — 读取客户端的连接数.
# writing — 响应数据到客户端的数量
# waiting — 开启 keep-alive 的情况下,这个值等于 active – (reading+writing), 意思就是 Nginx 已经处理完正在等候下一次请求指令的驻留连接.
# log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for" "$host" '
# '"$upstream_addr" $upstream_status $upstream_response_time $request_time';
# $remote_addr, $http_x_forwarded_for 记录客户端IP地址
# $remote_user 记录客户端用户名称
# $request 记录请求的URL和HTTP协议
# $status 记录请求状态
# $body_bytes_sent 发送给客户端的字节数,不包括响应头的大小; 该变量与Apache模块mod_log_config里的“%B”参数兼容。
# $bytes_sent 发送给客户端的总字节数。
# $connection 连接的序列号。
# $connection_requests 当前通过一个连接获得的请求数量。
# $msec 日志写入时间。单位为秒,精度是毫秒。
# $pipe 如果请求是通过HTTP流水线(pipelined)发送,pipe值为“p”,否则为“.”。
# $http_referer 记录从哪个页面链接访问过来的
# $http_user_agent 记录客户端浏览器相关信息
# $request_length 请求的长度(包括请求行,请求头和请求正文)。
# $request_time 请求处理时间,单位为秒,精度毫秒; 从读入客户端的第一个字节开始,直到把最后一个字符发送给客户端后进行日志写入为止。
# $time_iso8601 ISO8601标准格式下的本地时间。
# $time_local 通用日志格式下的本地时间。
# 1、轮询是upstream的默认分配方式,即每个请求按照时间顺序轮流分配到不同的后端服务器,如果某个后端服务器down掉后,能自动剔除。
# 2、weight 轮询的加强版,即可以指定轮询比率,weight和访问几率成正比,主要应用于后端服务器异质的场景下。
# 3、ip_hash 每个请求按照访问ip(即Nginx的前置服务器或者客户端IP)的hash结果分配,这样每个访客会固定访问一个后端服务器,可以解决session一致问题。
# 4、fair fair顾名思义,公平地按照后端服务器的响应时间(rt)来分配请求,响应时间短即rt小的后端服务器优先分配请求。
# 5、url_hash 与ip_hash类似,但是按照访问url的hash结果来分配请求,使得每个url定向到同一个后端服务器,主要应用于后端服务器为缓存时的场景下。
# '''
| true
|
25f6e912fc39da087080b9c7781ba3cc09ef2704
|
Shell
|
bioinformagical/scripts-cluster
|
/cotescripts/unmapped-blastn-NT.pbs
|
UTF-8
| 1,257
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# Request a run time of 5 hours and 30 minutes
#PBS -l walltime=168:30:00
# Request 1 processor in 1 node
#PBS -l nodes=1:ppn=12
# Request 7600 megabytes memory per processor. ( 48 usable CPUs)
#PBS -l vmem=88gb
#PBS -N bbblastN2NT-cote
#PBS -t 1-18
umask 007
set -eu
file=$(sed -n -e "${PBS_ARRAYID}p" /lustre/groups/bioservices/gregory/HSMS_104_Cote/file2.txt)
echo "Launching tophat on ${file}"
#cd $home
module load ncbi-blast+
cd /lustre/groups/bioservices/gregory/HSMS_104_Cote/blastn/
#ln -s ../${file}-R1-clip.fq_matched.fq $file-R1-filtered.fastq
#ln -s ../${file}-R2-clip.fq_matched.fq $file-R2-filtered.fastq
awk 'BEGIN{P=1}{if(P==1||P==2){gsub(/^[@]/,">");print}; if(P==4)P=0; P++}' ../tophat/${file}/s_${file}_1_extracted_reads.fastq > ${file}_1.fasta
awk 'BEGIN{P=1}{if(P==1||P==2){gsub(/^[@]/,">");print}; if(P==4)P=0; P++}' ../tophat/${file}/s_${file}_2_extracted_reads.fastq > ${file}_2.fasta
blastn -db ~/db/nt.fna -query ${file}_1.fasta -out ${file}-1VSnt.blastn.txt -num_threads 24 -evalue 1e-10 -soft_masking false -outfmt 6
blastn -db ~/db/nt.fna -query ${file}_2.fasta -out ${file}-2VSnt.blastn.txt -num_threads 24 -evalue 1e-10 -soft_masking false -outfmt 6
rm ${file}_1.fasta
rm ${file}_2.fasta
| true
|
3b3ec75fbeb4b7032764db0a75cd71ea891ff6a7
|
Shell
|
thienkimlove/standard
|
/setup.sh
|
UTF-8
| 2,304
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
setup_laravel() {
cd /var/www/html/$1
mysql -uroot -ptieungao -e "create database $1;"
sed -i -e "s/DB_DATABASE=homestead/DB_DATABASE=$1/g" .env
composer install
php artisan key:generate
php artisan migrate
php artisan ide-helper:generate
chmod -R 777 storage
chmod -R 777 bootstrap
php artisan vendor:publish --provider="Intervention\Image\ImageServiceProviderLaravel5"
php artisan vendor:publish
cd /var/www/html/$1
}
setup_editor() {
cd /var/www/html/$1
[ -d public/upload ] || mkdir public/upload
[ -d public/files ] || mkdir public/files
chmod -R 777 public/upload
chmod -R 777 public/files
cd public && bower install && [ -d kcfinder ] || git clone git@github.com:sunhater/kcfinder.git
sed -i "s/'disabled' => true/'disabled' => false/g" kcfinder/conf/config.php
sed -i 's/"upload"/"\/upload"/g' kcfinder/conf/config.php
[ -d bower_components/ckeditor/plugins/pbckcode ] || git clone git@github.com:prbaron/pbckcode.git bower_components/ckeditor/plugins/pbckcode
cat > bower_components/ckeditor/config.js <<'endmsg'
CKEDITOR.editorConfig = function( config ) {
// Define changes to default configuration here. For example:
config.filebrowserBrowseUrl = '/kcfinder/browse.php?opener=ckeditor&type=files';
config.filebrowserImageBrowseUrl = '/kcfinder/browse.php?opener=ckeditor&type=images';
config.filebrowserFlashBrowseUrl = '/kcfinder/browse.php?opener=ckeditor&type=flash';
config.filebrowserUploadUrl = '/kcfinder/upload.php?opener=ckeditor&type=files';
config.filebrowserImageUploadUrl = '/kcfinder/upload.php?opener=ckeditor&type=images';
config.filebrowserFlashUploadUrl = '/kcfinder/upload.php?opener=ckeditor&type=flash';
//do not add extra paragraph to html
config.autoParagraph = false;
config.toolbarGroups = [
{"name":"basicstyles","groups":["basicstyles"]},
{"name":"links","groups":["links"]},
{"name":"paragraph","groups":["list","blocks"]},
{"name":"document","groups":["mode"]},
{"name":"insert","groups":["insert"]},
{"name":"styles","groups":["styles"]},
{"name":"about","groups":["about"]},
{ name: 'pbckcode', "groups":["pbckcode"]}
];
config.extraPlugins = 'pbckcode';
};
endmsg
cd /var/www/html/$1
}
echo "Start setup..."
cp .env.stage .env
read -p "Your project : " project
setup_laravel $project
setup_editor $project
| true
|
ea6f82c87b809fb762d4357c66b44ab21435d9f1
|
Shell
|
firoj786/shell-scripting
|
/firoj/bigNum.sh
|
UTF-8
| 252
| 3.40625
| 3
|
[] |
no_license
|
#WRP biggest two number or equal num.
#!/bin/bash
printf "enter A:"
read A
printf "enter B:"
read B
if [ $A -gt $B ]
then
echo "$A is bigger than $B"
fi
if [ $B -gt $A ]
then
echo "$B is bigger than $A"
fi
if [ $A -eq $B ]
then
echo "A and B same"
fi
| true
|
831d66f7de2434d2488bf45834095dd8fe7e486a
|
Shell
|
cicoub13/libellule
|
/test/initialize.sh
|
UTF-8
| 650
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Initalization of SQLite"
if [ "$#" -ne 1 ]; then
echo "Usage : ./initialize.sh dir_install"
else
echo "Start of initialization"
sqlite3 $1/test/libellule.db "drop table if exists ping;"
sqlite3 $1/test/libellule.db "drop table if exists download;"
sqlite3 $1/test/libellule.db "drop table if exists upload;"
sqlite3 $1/test/libellule.db "create table ping(point INTEGER PRIMARY KEY, value INTEGER);"
sqlite3 $1/test/libellule.db "create table download(point INTEGER PRIMARY KEY, value INTEGER);"
sqlite3 $1/test/libellule.db "create table upload(point INTEGER PRIMARY KEY, value INTEGER);"
echo "Initalization done"
fi
| true
|
7bef6eca3c5f7611f19018be9be24241abb3e24a
|
Shell
|
yu-iskw/spark-streaming-with-google-cloud-example
|
/bin/send-pubsub-messages.sh
|
UTF-8
| 333
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"
SCRIPT_NAME=`basename "$0"`
SBT="$SCRIPT_DIR/../build/sbt"
__PROJECT_ID=$1 ; shift
__PUBSUB_TOPIC=$1 ; shift
__MAIN_CLASS="com.github.yuiskw.google.datastore.PubsubMessagePublisher"
$SBT "run-main $__MAIN_CLASS --projectId $__PROJECT_ID --pubsubTopic $__PUBSUB_TOPIC"
| true
|
7c3a8c7ec176330302848d6e50bdef7b4e870b2a
|
Shell
|
veltzer/utils-bash
|
/src/utils_rename_single.sh
|
UTF-8
| 428
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash -e
: <<'COMMENT'
This is a script that allows you to move files with long names
without repeating the name of the file.
References:
- https://www.ostechnix.com/bash-tips-rename-files-without-typing-full-name-twice-in-linux/
COMMENT
if [ "$#" -eq 0 ]
then
echo "$0: use with at least one argument"
exit 1
fi
for var in "$@"
do
read -rei "${var}" newfilename
command mv -v -- "${var}" "${newfilename}"
done
| true
|
8cd77ebed312e870038c1ebb9ade2c5702c1a249
|
Shell
|
gabrielmip/search-engine
|
/run2.sh
|
UTF-8
| 362
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
HD="/media/gabriel/Dados\\ Massivos"
DOCS=${HD}/small_collection
RUNS=${HD}/runs
MERGE=${HD}/merge
INDEX=${HD}/index
LOG=${HD}/logs/removendo_log.txt
MEMORY=200
for i in 10 9 8 7 6 5; do
echo $i
command="time indexer/indexer ${DOCS} ${RUNS} ${MERGE} ${INDEX} ${MEMORY} ${LOG}"
eval $command
command="rm ${DOCS}/html_${i}"
#eval $command
done
| true
|
dd35929ea00be8d6fcb3e6bc8f8a380b898786f2
|
Shell
|
yonchu/dotfiles
|
/bin/check-sw
|
UTF-8
| 2,066
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
path() {
if which "$1" > /dev/null 2>&1; then
echo " ($(which "$1"))"
else
echo "not installed"
fi
}
check() {
## bash
echo "bash: $(bash --version | head -n 1)$(path bash)"
## zsh
echo "zsh: $(zsh --version)$(path zsh)"
## ssh
echo "SSH: $(ssh -V 2>&1)$(path ssh)"
## mosh
echo "mosh: $(mosh --version 2>&1 | head -n 1)$(path mosh)"
## GCC
echo "gcc: $(gcc --version | head -n 1)$(path gcc)"
## Java
echo "Java: $(java -version 2>&1 | head -n 2 | tr '\n' ' ')$(path java)"
## Perl
echo "Perl: $(perl --version | sed '/^$/d' | head -1)$(path perl)"
## PHP
echo "PHP: $(php --version | head -n 1)$(path php)"
## Python
echo "Python: $(python --version 2>&1)$(path python)"
echo "easy_install: $(easy_install --version)$(path easy_install)"
echo "pip: $(pip --version)"
## Ruby
echo "Ruby: $(ruby --version)$(path ruby)"
echo "gem: $(gem --version)$(path gem)"
echo "rvm: $(rvm --version | sed '/^$/d' | cut -d ' ' -f 1,2,3)$(path rvm)"
## node
echo "node: $(node --version)$(path node)"
echo "npm: $(npm --version)$(path npm)"
echo "coffee: $(coffee --version)$(path coffee)"
echo "typescript: $(typescript --version)$(path typescript)"
## Apache
echo "Apache: $(httpd -v | head -n 1)$(path httpd)"
## MySQL
echo "MySQL: $(mysql --version)$(path mysql)"
## tmux
echo "tmux: $(tmux -V)$(path tmux)"
## screen
echo "screen: $(screen -v)"
echo " $(path screen)"
## git
echo "Git: $(git --version)$(path git)"
## Xcode
[ $(uname -s) = 'Darwin' ] && echo "Xcode: $(xcodebuild -version | tr '\n' ' ' | cut -d ' ' -f 1,2,5)$(path xcodebuild)"
## homebrew
[ $(uname -s) = 'Darwin' ] && echo "Homebrew: $(brew --version)$(path brew)"
}
check 2> /dev/null
| true
|
953aaa307ba55e195d0d7b0d6abfa0b11b748fc2
|
Shell
|
martinep/foam-extend-svn
|
/Breeder_1.5/OSIG/TurboMachinery/ercoftacCentrifugalPump/cases/MRFSimpleFoam/ECPStitchMesh2D/Allrun
|
UTF-8
| 2,439
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
. $WM_PROJECT_DIR/bin/tools/RunFunctions ### load macros
if [ -d ECPGgi2D_orig ] ; then
case=ECPGgi2D; ### this part sets
elif [ -d ECPStitchMesh2D_orig ]; then ### variable $case to
case=ECPStitchMesh2D; ### frozenRotorGgi2D or
else ### frozenRotorGgi2D
echo "sorry, you need to have ECPStitchMesh2D_orig or ECPGgi2D_orig in the current folder"; ### depending on the case studied.
fi
###-------------Saving the datas----------------###
cp -r rotor2D_orig rotor2D ### Backup of the initial directories
cp -r stator2D_orig stator2D ### Those three directories should stay unchanged
cp -r ${case}_orig ${case} ### to allow a reseting of the case
###------- some clean-up-------###
rm -rf rotor2D/.svn
rm -rf rotor2D/system/.svn
rm -rf rotor2D/constant/.svn
rm -rf stator2D/.svn
rm -rf stator2D/system/.svn
rm -rf stator2D/constant/.svn
rm -rf ${case}/.svn
rm -rf ${case}/0_orig/.svn
rm -rf ${case}/system/.svn
rm -rf ${case}/constant/.svn
rm -rf ${case}/0/.svn
###------------making the mesh---------------###
runApplication fluentMeshToFoam -case rotor2D ../../meshes/rotor2D.msh
mv log.fluentMeshToFoam log.fluentMeshToFoam.rotor2D
runApplication fluentMeshToFoam -case stator2D ../../meshes/stator2D.msh
mv log.fluentMeshToFoam log.fluentMeshToFoam.stator2D
## (optional) rotating the rotor so that its position corresponds to the ones that give ##
##transformPoints -case rotor2D -rotate "((1 0 0)(0.995957356 0.089827305 0))" ## t/Ti=0.226 in [1] ##
##transformPoints -case rotor2D -rotate "((1 0 0)(0.983862111 0.178928331 0))" ## t/Ti=0.326 in [1] ##
##transformPoints -case rotor2D -rotate "((1 0 0)(0.963812057 0.266582669 0))" ## t/Ti=0.426 in [1] ##
###------------Building the case-----------###
pushd ${case} ## starting the script makeMesh that is located in ##
export PATH=.:$PATH
runApplication makeMesh ## ${case}/makeMesh ##
###-----------launching the computations------###
runApplication MRFSimpleFoam
popd
| true
|
6f1c75832777116d022120bf70390137321a8aad
|
Shell
|
f-kratzenstein/CHARMeMaps
|
/apache-tomcat/bin/setenv.sh
|
UTF-8
| 2,624
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# Control Script for the CATALINA Server
#
# Environment Variable Prerequisites
#
# Set the enviroment variables in this script, as recommended in catalina.sh
# in to keep our customizations separate.
#
# As we want to ship/start the catalina with a defined JRE we set JRE_HOME to
# our predefined location.
#-------------------------------------------------------------------------------
# OS specific support. $var _must_ be set to either true or false.
cygwin=false
darwin=false
os400=false
case "`uname`" in
CYGWIN*) cygwin=true;;
Darwin*) darwin=true;;
OS400*) os400=true;;
esac
# resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
# Get standard environment variables
PRGDIR=`dirname "$PRG"`
# Only set CATALINA_HOME if not already set
[ -z "$CATALINA_HOME" ] && CATALINA_HOME=`cd "$PRGDIR/.." >/dev/null; pwd`
# Copy CATALINA_BASE from CATALINA_HOME if not already set
[ -z "$CATALINA_BASE" ] && CATALINA_BASE="$CATALINA_HOME"
#JRE_HOME=${CATALINA_HOME}/jre
CHARMEMAPS_BASE=`cd "$CATALINA_HOME/.." >/dev/null; pwd`
echo Using CHARMEMAPS_BASE: $CHARMEMAPS_BASE
mkdir $CHARMEMAPS_BASE/.ncWMS
NCWMS_CONFIG=`cd "$CHARMEMAPS_BASE/.ncWMS" >/dev/null; pwd`
echo "configDir=${NCWMS_CONFIG}" > $CATALINA_HOME/webapps/ncWMS/WEB-INF/classes/config.properties
echo Using NCWMS_CONFIG: $NCWMS_CONFIG
# Set up the CHARMeMaps configuration for ncWMS
if [ ! -e $NCWMS_CONFIG/config.xml ]; then
if [ -e $NCWMS_CONFIG/CHARMeMaps_config.xml ]; then
sed "s%&CHARMEMAPS_BASE;%${CHARMEMAPS_BASE}%g" $NCWMS_CONFIG/CHARMeMaps_config.xml.template > $NCWMS_CONFIG/config.xml
echo Using NCWMS_CONFIG:$NCWMS_CONFIG
else
echo "Warning!!! $NCWMS_CONFIG/CHARMeMaps_config.xml does not exist!"
fi
fi
# Set up the CHARMeMaps configuration for the tomcat/ncWMS port
if [ -e $CATALINA_HOME/webapps/charme-maps ]; then
#configure CHARMeMaps
CHARMEMAPS_APP=$CATALINA_HOME/webapps/charme-maps
#getting the tomcat-port
MY_PORT=$(xpath $CATALINA_HOME/conf/server.xml '/Server/Service[@name="Catalina"]/Connector[@protocol="HTTP/1.1"]/@port' 2>&1 | grep port | egrep -o '[[:digit:]]{4}')
#setting the tomcat-port
sed "s%&MY_PORT;%${MY_PORT}%g" ${CHARMEMAPS_APP}/WEB-INF/web.xml.template > ${CHARMEMAPS_APP}/WEB-INF/web.xml
else
echo "Warning!!! $CATALINA_HOME/webapps/charme-maps does not exist!"
fi
| true
|
f4e93fda77a8b490af34864b258dba7b7bdd4963
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/angular-cli/PKGBUILD
|
UTF-8
| 464
| 2.75
| 3
|
[] |
no_license
|
_npmscope=@angular
_npmname=cli
_npmver=1.4.1
pkgname=angular-cli # All lowercase
pkgver=1.4.1
pkgrel=1
pkgdesc="CLI tool for Angular"
arch=(any)
url="https://github.com/angular/angular-cli"
license=()
depends=('nodejs' 'npm' )
optdepends=()
options=(!strip)
package() {
cd $srcdir
local _npmdir="$pkgdir/usr/lib/node_modules/"
mkdir -p $_npmdir
cd $_npmdir
npm install -g --prefix "$pkgdir/usr" $_npmscope/$_npmname@$_npmver
}
# vim:set ts=2 sw=2 et:
| true
|
1ca585a2a211a26d2d72f2444cb2e4a24f31d9f9
|
Shell
|
galaxyeye/warps-scripts
|
/start-workspace.sh
|
UTF-8
| 1,593
| 2.890625
| 3
|
[] |
no_license
|
#bin
HBASE_HOME=~/programs/hbase-0.98.8
# SOLR_HOME=~/programs/solr-4.10.3
# SATELLITE_HOME=~/workspace/satellite
# SCENT_HOME=~/workspace/qiwur-scent-1.0.0-src
# NUTCH_HOME=~/workspace/apache-nutch-2.3.0-src
# enter the home directory
cd
echo "start eclipse..."
/opt/eclipse/mars/eclipse > /dev/null 2>&1 &
sleep 3
echo "start firefox..."
firefox 2> /dev/null &
sleep 1
# echo "start ibus..."
# we use fcitx to support sogou pinyin
# ibus-daemon 2> /dev/null &
# sleep 1
# echo "start chrome..."
# chromium-browser 2> /dev/null &
# sleep 3
echo "start gedit..."
gedit 2> /dev/null &
sleep 1
echo "ssh to qiwur.com"
gnome-terminal -e "ssh hduser@qiwur.com"
sleep 1
echo "start mysql workbench"
/usr/bin/mysql-workbench 2> /dev/null &
sleep 1
SATELLITE="$SATELLITE_HOME/bin/phantomjs"
if [ -x $SATELLITE ]; then
echo "start satellite..."
$SATELLITE --load-images=false src/server.js 2> /dev/null &
sleep 3
fi
HBASE="$HBASE_HOME/bin/start-hbase.sh"
if [ -x $HBASE ]; then
echo "start hbase"
$HBASE
sleep 3
fi
SOLR="$SOLR_HOME/bin/solr"
if [ -d $SOLR ]; then
echo "start solr"
$SOLR start
sleep 3
fi
SCENT="$SCENT_HOME/bin/scent"
if [ -x $SCENT ]; then
echo "start scent..."
gnome-terminal --tab -e "$SCENT scentserver"
sleep 3
gnome-terminal --tab -e "less logs/scent.log"
fi
NUTCH="$NUTCH_HOME/runtime/local/bin/nutch"
if [ -x $NUTCH ]; then
# TODO : wait for hbase available
sleep 5
echo "start nutch server"
gnome-terminal --tab -e "$NUTCH nutchserver"
sleep 3
fi
cd
echo "press any key to exit:"
read key
| true
|
6542f082c9888274cd7afce917154d52e5d843b2
|
Shell
|
aviadco11/ob
|
/kafka-connect-to-elk.sh
|
UTF-8
| 918
| 2.515625
| 3
|
[] |
no_license
|
echo "install vim"
echo "------------"
docker exec --user="root" kafka bash -c "apt-get update & apt-get install vim"
echo ""
echo "copying some config files :"
echo "----------------------------"
docker exec kafka bash -c "/opt/bitnami/kafka/bin/kafka-topics.sh --create --bootstrap-server localhost:9092 --replication-factor 1 --partitions 1 --topic yosi1"
docker cp kafka/kafka-connect/connectors/. kafka:/opt/bitnami/kafka/config
docker cp kafka/kafka-connect/lib/. kafka:/usr/local/share/kafka/plugins
docker cp kafka/hello.json /var/lib/kafka/.
docker exec kafka bash -c "sleep 5; /opt/bitnami/kafka/bin/kafka-console-producer.sh --broker-list localhost:9092 --topic yosi1 < /var/lib/kafka/hello.json"
docker exec --user="root" kafka bash -c "/opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/elasticsearch-connect.properties"
echo ""
| true
|
e0d9218a7ab4bae525de70db7ba906d8c49150e6
|
Shell
|
massenz/HOW-TOs
|
/HOW-TO Install Docker
|
UTF-8
| 726
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Installs the latest version of Docker.
#
# Source: https://blog.docker.com/2015/07/new-apt-and-yum-repos/
source ${HOME}/Dropbox/development/scripts/utils.sh
msg "Removing existing Docker, if any"
sudo apt-get purge -y lxc-docker* >/dev/null 2>&1
msg "Adding the new APT Docker repositories"
wrap "sudo apt-key" adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
# NOTE: This only works for Trusty (Ubuntu 14.04 LTS)
# See the reference above for an appropriate key
sudo echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" >/etc/apt/sources.list.d/docker.list
sudo apt-get update
msg "Installing Docker..."
wrap "sudo apt-get" install -y docker-engine
wrap docker version
| true
|
567441418fe1e377fe5f625409726ff090fb41d2
|
Shell
|
fabripautasso/pi4-captive-portal
|
/start_as_access_point.sh
|
UTF-8
| 3,244
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "-----STEP 1 - System Update-----"
sudo apt-get update -y
sudo apt-get upgrade -y
echo "-----STEP 2 - hostapd and dnsmasq installation-----"
sudo apt-get install hostapd dnsmasq -y
echo "-----STEP 3 - Stop hostapd and dnsmasq-----"
sudo systemctl stop hostapd
sudo systemctl stop dnsmasq
echo "-----STEP 4 - Captive portal installation-----"
sudo apt install libmicrohttpd-dev -y
cd ~
git clone https://github.com/nodogsplash/nodogsplash.git
cd ~/nodogsplash
make
sudo make install
echo "-----STEP 5 - Update dhcpcd to take control of the wlan0 interface-----"
sudo cat <<EOF | sudo tee -a /etc/dhcpcd.conf
interface wlan0
static ip_address=192.168.220.1/24
nohook wpa_supplicant
EOF
sudo systemctl restart dhcpcd
echo "-----STEP 6 - Creating new hostapd configuration-----"
sudo cat <<EOF | sudo tee -a /etc/hostapd/hostapd.conf
interface=wlan0
driver=nl80211
hw_mode=g
channel=6
ieee80211n=1
wmm_enabled=0
macaddr_acl=0
ignore_broadcast_ssid=0
auth_algs=1
wpa=2
wpa_key_mgmt=WPA-PSK
wpa_pairwise=TKIP
rsn_pairwise=CCMP
# This is the name of the network
ssid=pi-portal-ap
# The network passphrase
wpa_passphrase=wifiappoc
EOF
echo "-----STEP 7 - Update hostapd default path-----"
sudo sed -i 's/#DAEMON_CONF.*/DAEMON_CONF=\"\/etc\/hostapd\/hostapd.conf\"/g' /etc/default/hostapd
echo "-----STEP 8 - Update dnsmasq configuration-----"
sudo mv /etc/dnsmasq.conf /etc/dnsmasq.conf.orig
sudo cat <<EOF | sudo tee -a /etc/dnsmasq.conf
interface=wlan0 # Use interface wlan0
server=1.1.1.1 # Use Cloudflare DNS
address=/#/123.123.123.123
except-interface=lo
dhcp-range=192.168.220.50,192.168.220.150,12h # IP range and lease time
EOF
echo "-----STEP 9 - Enable port forwarding-----"
sudo sed -i 's/#net.ipv4.ip_forward=1.*/net.ipv4.ip_forward=1/g' /etc/sysctl.conf
sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward"
echo "-----STEP 10 - Update IP Tables-----"
sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
sudo sh -c "iptables-save > /etc/iptables.ipv4.nat"
sudo sed -i 's/exit 0/iptables-restore < \/etc\/iptables.ipv4.nat\'$'\nexit 0/g' /etc/rc.local
echo "-----STEP 11 - Start hostapd and dnsmasq-----"
sudo systemctl unmask hostapd
sudo systemctl enable hostapd
sudo systemctl start hostapd
sudo service dnsmasq start
echo "-----STEP 12 - Captive portal configuration-----"
sudo cat <<EOF | sudo tee -a /etc/nodogsplash/nodogsplash.conf
GatewayInterface wlan0
GatewayAddress 192.168.220.1
MaxClients 250
AuthIdleTimeout 480
EOF
sudo nodogsplash
sudo sed -i 's/exit 0/nodogsplash\'$'\nexit 0/g' /etc/rc.local
sudo cp /home/pi/portal/captive-portal/* /etc/nodogsplash/htdocs
sudo mv /etc/nodogsplash/htdocs/login.html /etc/nodogsplash/htdocs/splash.html
echo "-----STEP 13 - Configure API-----"
sudo sed -i 's/exit 0/node \/home\/pi\/portal\/api\/app.js\'$'\nexit 0/g' /etc/rc.local
sudo sed -i 's/FirewallRule allow tcp port 443/FirewallRule allow tcp port 443\'$'\nFirewallRule allow tcp port 3000/g' /etc/nodogsplash/nodogsplash.conf
echo "-----STEP 14 - Backup wpa_supplicant configuration-----"
sudo mv /etc/wpa_supplicant/wpa_supplicant.conf /etc/wpa_supplicant/wpa_supplicant.conf.orig
echo "-----STEP 15 - Installation complete - The system will reboot-----"
sudo reboot
| true
|
bf560ead2858458d2b11f8af04a5b07968082f91
|
Shell
|
8gears/do
|
/examples/do.docker.sh
|
UTF-8
| 652
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env sh
# Do - The Simplest Build Tool on Earth.
# Documentation and examples see https://github.com/8gears/do
set -e -u # -e "Automatic exit from bash shell script on error" -u "Treat unset variables and parameters as errors"
IMAGE_NAME="registry.8gears.com/hello-world"
TAG="${CI_COMMIT_TAG:-latest}"
build() {
docker build -t ${IMAGE_NAME}:TAG .
}
test() {
docker build -t ${IMAGE_NAME}:candidate .
}
deploy() {
docker push ${IMAGE_NAME}:${TAG}
}
all() {
build && test && deploy
}
"$@" # <- execute the task
[ "$#" -gt 0 ] || printf "Usage:\n\t./do.sh %s\n" "($(compgen -A function | grep '^[^_]' | paste -sd '|' -))"
| true
|
f3cfebf2ffdd44477a218428eccf338c2ec8051f
|
Shell
|
dosaboy/ovs-stat
|
/dataset_factory.d/07load_vlan_conntrack_zone_info
|
UTF-8
| 819
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash -u
TRAP_ERROR_MSG="vlan conntrack zone info"
# lib
for l in `find $CWD/common -type f`; do source $l; done
__load_vlan_conntrack_zone_info ()
{
local conntrack_root=$RESULTS_PATH_HOST/ovs/conntrack
# start with a test to see if we have permissions to get conntrack info
get_ovs_appctl_dump_conntrack_zone 0 &>/dev/null
(($?)) && return 0 # dont yield error since older snapd can't do this.
mkdir -p $conntrack_root/zones
# include id 0 to catch unzoned
for vlan in 0 `ls $RESULTS_PATH_HOST/ovs/vlans/`; do
mkdir -p $conntrack_root/zones/$vlan
get_ovs_appctl_dump_conntrack_zone $vlan > $conntrack_root/zones/$vlan/entries
done
}
# NOTE: requires snapd with https://pad.lv/1873363
# main()
__load_vlan_conntrack_zone_info 2>$RESULTS_PATH_HOST/error.$$
| true
|
32e65bc042712a7f2517b10f87bce558feb66778
|
Shell
|
hratcliffe/WIPPS
|
/files/scripts/generate_runtime_flags.sh
|
UTF-8
| 522
| 2.9375
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#Extract any strings in classes called test_ containing "Set runtime_flag"
grep -r "Set runtime_flag" ./html/classtest__* | awk -F':' '{if(match($1, "classtest_")){ name = substr($1, match($1, "classtest")+5, length($1)-match($1, "class")-9); print "In class " name ":" $2}}'> ./files/tests_runtime_flags.txt
#Remove everything after the first para break
sed -i.bak 's|<\/p>.*|</p>|' ./files/tests_runtime_flags.txt
#Replace double underscores with single
sed -i.bak 's|__|_|g' ./files/tests_runtime_flags.txt
| true
|
ddfba68d7340978989479c9af88e54bb7236ce27
|
Shell
|
ssharpjr/dotfiles-original
|
/install_configs.sh
|
UTF-8
| 254
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Installs configuration files
destdir="tmp"
configs_file="configs.csv"
while IFS=, read -r dest file; do
case "$dest" in
"") echo "$file" goes in root ;;
".config") echo "$file" goes in "$dest" ;;
esac
done < $configs_file
| true
|
60b362762b2f5698c56eedbaf3ff44b79a2c3c2a
|
Shell
|
myw/my.vimfiles
|
/scripts/clean-swap-files.sh
|
UTF-8
| 1,750
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# clean-swap-files.sh - clean up vim swap files after crash
#
# Author: @chouser <http://stackoverflow.com/users/7624/chouser>
# From: http://stackoverflow.com/questions/63104/smarter-vim-recovery
TMPDIR=$(mktemp -d) || exit 1
# Temporary files for persistant storage of the recovery text and original filename
RECTXT="$TMPDIR/vim.recovery.$USER.txt"
RECFN="$TMPDIR/vim.recovery.$USER.fn"
# Clean up our own mess if we're ever cancelled
trap 'rm -f "$RECTXT" "$RECFN"; rm -rf "$TMPDIR"' 0 1 2 3 15
VIM_SWAP_DIR=${VIM_SWAP_DIR:-~/.vim/swap}
swap_count=0
for swapfile in $VIM_SWAP_DIR/.*sw? $VIM_SWAP_DIR/*.sw?; do
# Only deal with real files
if [[ -f $swapfile ]]; then
let swap_count++
else
continue
fi
# Clean up any work from the previous recovery
rm -f "$RECTXT" "$RECFN"
# Load the contents of the recovered file and save its filename to $RECFN
vim -X -r "$swapfile" \
-c "set binary; set nofixeol" \
-c "w! $RECTXT" \
-c "let fn=expand('%:p')" \
-c "new $RECFN" \
-c "exec setline( 1, fn )" \
-c w\! \
-c "qa!"
if [[ ! -f $RECFN ]]; then
echo "- removing empty swap file $swapfile"
rm -f "$swapfile"
continue
fi
currentfile="$(cat $RECFN)"
if diff --ignore-blank-lines --brief "$currentfile" "$RECTXT"; then
echo "- removing redundant swap file $swapfile"
echo " for $currentfile"
rm -f "$swapfile"
else
echo "- swap file $swapfile contains changes: editing"
vim -n -d "$currentfile" "$RECTXT"
# Break early if we don't decide to delete the swap file
rm -i "$swapfile" || exit
fi
done
if [[ $swap_count -le 0 ]]; then
echo "No swap files found"
fi
# vim: set et fenc= ff=unix sts=2 sw=2 ts=2 :
| true
|
4e322df5f11764bb038054db72764f34cd969ce8
|
Shell
|
paranoidsp/bash-config
|
/bash_aliases
|
UTF-8
| 1,960
| 2.859375
| 3
|
[] |
no_license
|
alias rmlck='sudo rm /var/lib/apt/lists/lock'
alias cardstat="cat /proc/acpi/bbswitch"
alias cardon="sudo tee /proc/acpi/bbswitch <<<ON"
alias cardoff="sudo tee /proc/acpi/bbswitch <<<OFF"
alias lns="ln -s"
alias rmlockarch="rm /var/lib/pacman/db.lck"
alias ..="cd .."
#if [$UID -ne 0 ]; then
alias reboot='sudo reboot'
alias update='sudo apt-get update'
alias aginstall='sudo apt-get install'
alias mount='sudo mount'
#fi
alias ...="cd ../../"
alias ....="cd ../../../"
alias .....="cd ../../../../"
alias svim="sudo vim"
alias hibernate='sudo pm-hibernate'
# Pacman alias examples
alias pacupg='sudo pacman -Syu' # Synchronize with repositories and then upgrade packages that are out of date on the local system.
alias pacin='sudo pacman -S' # Install specific package(s) from the repositories
alias pacins='sudo pacman -U' # Install specific package not from the repositories but from a file
alias pacre='sudo pacman -R' # Remove the specified package(s), retaining its configuration(s) and required dependencies
alias pacrem='sudo pacman -Rns' # Remove the specified package(s), its configuration(s) and unneeded dependencies
alias pacrep='pacman -Si' # Display information about a given package in the repositories
alias pacreps='pacman -Ss' # Search for package(s) in the repositories
alias pacloc='pacman -Qi' # Display information about a given package in the local database
alias paclocs='pacman -Qs' # Search for package(s) in the local database
# Additional pacman alias examples
alias pacupd='sudo pacman -Sy && sudo abs' # Update and refresh the local package and ABS databases against repositories
alias pacinsd='sudo pacman -S --asdeps' # Install given package(s) as dependencies of another package
alias pacmir='sudo pacman -Syy' # Force refresh of all package lists after updating /etc/pacman.d/mirrorlist
| true
|
f767c9bc50290b072f47b2f177cf83d8577b218a
|
Shell
|
sekoudosso82/InterviewPrep
|
/bash_learning/smoothstackTest.sh
|
UTF-8
| 16,975
| 3.75
| 4
|
[] |
no_license
|
input="2 40 -15"
echo $input
first=$(echo $input | awk '{print $2}')
if [ $first -lt 0 ]; then
first=$((-1*$first))
fi
echo $first
second=$(echo $input | awk '{print $3}')
if [ $second -lt 0 ]; then
second=$((-1*$second))
fi
echo $second
summ=$(($first+$second))
echo $summ
# 2.
# Given N lines of input, print the 3rd character from
# each line as a new line of output. It is guaranteed
# that each of the n lines of input will have a 3rd character.
while read line; do
third_char=$(echo $line | head -c 3 | tail -c 1)
echo $third_char
done
# 3.
# Display the 2nd and 7th character from each line of text.
while read line; do
second_char=$(echo $line | head -c 2 | tail -c 1)
seventh_char=$(echo $line | head -c 7 | tail -c 1)
echo "$second_char$seventh_char"
done
# 4.
# Display the first four characters from each line of text.
# Input Format: A text file with lines of ASCII text only.
# Output Format: The output should contain N lines. Each line should contain just the first four characters of the corresponding input line.
while read line; do
second_char=$(echo $line | head -c 4 )
echo "$second_char"
done
# 5.
# Given a tab delimited file with several columns (tsv format) print the first three fields.
# Input Format: A tab-separated file with lines of ASCII text only.
IFS=""
while read line; do
echo -e "$line" | cut -f -3
done
# 6.
# Display a range of characters starting at the 2nd position
# of a string and ending at the 7t position (both positions included).
# Input Format: A text file containing N lines of ASCII text only.
# Output Format: The output should contain N lines.
# Each line should contain the range of characters starting
# at the 2nd position of a string and ending at the position
# (both positions included).
IFS=""
while read line; do
second_char=$(echo $line | cut -c 2-7)
echo $second_char
done
# 7.
# Given a sentence, identify and display its fourth word.
# Assume that the space (' ') is the only delimiter
# between words.
# Input Format: A text file with lines of ASCII text only.
# Each line has exactly one sentence.
# Output Format: The output should contain N lines.
# For each input sentence, identify and display its
# fourth word. Assume that the space (' ') is
# the only delimiter between words.
while read line; do
echo $line | cut -d " " -f 4
done
# 8.
# Given a sentence, identify and display its first three words.
# Assume that the space (' ') is the only delimiter between words.
# Input Format: A text file with lines of ASCII text only.
# Each line has exactly one sentence.
# Output Format: The output should contain N lines. For each
# input sentence, identify and display its first three words.
# Assume that the space (' ') is the only delimiter between words.
while read line; do
echo -e "$line" | cut -d ' ' -f1-3
done
# 9.
# Print the characters from thirteenth position to the end.
# Input Format: A text file with lines of ASCII text only.
# Output Format: The output should contain N lines. For each input line, print the characters from thirteenth position to the end.
while read line; do
echo -e "$line" | cut -c13-
done
# 10.
# Given a tab delimited file with several columns (tsv format)
# print the fields from second fields to last field.
# Input Format: A tab-separated file with lines of ASCII text only.
# Output Format: The output should contain N lines.
# For each line in the input, print the fields from second fields to last field.
while read line; do
echo -e "$line" | cut -f2-
done
# 11.
# In this challenge, we practice using the tr command
# because it is a useful translation tool in Linux.
# In a given fragment of text, replace all parentheses ()
# with box brackets [].
# Input Format: A block of ASCII text.
# Output Format: Output the text with all parentheses
# () replaced with box brackets [].
# Sample Input :
# int i=(int)5.8 => int i=[int]5.8
# (23 + 5)*2 => [23 + 5]*2
cat | tr '()' '[]' $1
or
while read line; do
echo $line | tr '()' '[]'
done
echo $line | tr '()' '[]'
# 12.
# In this challenge, we practice using the tr command
# because it is a useful translation tool in Linux.
# In a given fragment of text, delete all the lowercase
# characters a-z
# Input Format: A block of ASCII text.
# Output Format: Delete all the lowercase characters in the given block of text.
# Sample Input
# Hello => H
# World => W
# how are you =>
cat | tr -d '[:lower:]'
cat | tr -d a-z
cat | tr -d abcdefghijklmnopqrstuvwxyz
# 13.
# In a given fragment of text, replace all sequences of
# multiple spaces with just one space.
# Input Format: A block of ASCII text.
# Output Format: Replace all sequences of multiple spaces with just one space.
# Sample Input
# He llo
# Wor ld
# how are you
# Sample Output
# He llo
# Wor ld
# how are you
tr -s " "
tr -s ' ' ' '
cat $1 | tr -s ' ' ' '
tr -s [:space:]
# 14.
# You are provided a file with four space-separated
# columns containing the scores of students in three
# subjects. The first column, contains a single character
# (A-Z) - the identifier of the student. The next three
# columns have three numbers (each between 0 and 100,
# both inclusive) which are the scores of the students in
# English, Mathematics and Science respectively.
# Your task is to identify the performance grade for each student.
# If the average of the three scores is 80 or more, the grade is 'A'.
# If the average is 60 or above, but less than 80, the grade is 'B'.
# If the average is 50 or above, but less than 60, the grade is 'C'.
# Otherwise the grade is 'FAIL'.
# Input Format
# There will be no more than 10 rows of data.
# Each line will be in the format:
# [Identifier][Score in English][Score in Math][Score in Science]
# Output Format
# For each row of data, append a space, a colon, followed by another
# space, and the grade. Observe the format showed in the sample output.
# Sample Input
# A 25 27 50
# B 35 37 75
# C 75 78 80
# D 99 88 76
# Sample Output
# A 25 27 50 : FAIL
# B 35 37 75 : FAIL
# C 75 78 80 : B
# D 99 88 76 : A
while read line; do
student=$(echo $line | awk '{print $1}')
first=$(echo $line | awk '{print $2}')
second=$(echo $line | awk '{print $3}')
third=$(echo $line | awk '{print $4}')
summ=$(($first+$second+$third))
av=$(($summ/3))
if [ $av -ge 80 ]; then
echo "$line : A"
elif [ $av -ge 60 ]; then
echo "$line : B"
elif [ $av -ge 50 ]; then
echo "$line : C"
else
echo "$line : FAIL"
fi
done
awk 'NR%2{printf$0";"}1-NR%2'
cat /dev/stdin | awk '{
if ( NR%2 == 0 )
printf $0"\n";
else
printf $0";";
}'
awk '{
if ( NR % 2 == 1 )
printf "%s;", $0
else
printf "%s\n", $0
}'
#15.
# Task: You are given a file with four space separated
# columns containing the scores of students in three subjects.
# The first column contains a single character (A-Z), the student
# identifier. The next three columns have three numbers each.
# The numbers are between 0 and 100, both inclusive. These numbers
# denote the scores of the students in English, Mathematics, and Science,
# respectively.
# Your task is to identify those lines that do not contain all three
# scores for students.
# Input Format
# There will be no more than 10 rows of data.
# Each line will be in the following format:
# [Identifier][English Score][Math Score][Science Score]
# Output Format
# For each student, if one or more of the three scores is missing, display:
# Not all scores are available for [Identifier]
# Sample Input
# A 25 27 50
# B 35 75
# C 75 78
# D 99 88 76
# Sample Output
# Not all scores are available for B
# Not all scores are available for C
# awk '{if ($4 == "") print "Not all scores are available for",$1;}'
# or
# awk '{if (NF < 4){print "Not all scores are available for "$1}}'
awk '{if (length($4)==0) print "Not all scores are available for " $1}'
#16.
# Task
# You are given a file with four space separated columns containing
# the scores of students in three subjects. The first column contains
# a single character (A-Z), the student identifier. The next three columns
# have three numbers each. The numbers are between 0 and 100, both inclusive.
# These numbers denote the scores of the students in English, Mathematics,
# and Science, respectively.
# Your task is to identify whether each of the students has passed or failed.
# A student is considered to have passed if (s)he has a score 50 or more in
# each of the three subjects.
# Input Format
# There will be no more than 10 rows of data.
# Each line will be in the following format:
# [Identifier][English Score][Math Score][Science Score]
# Output Format
# Depending on the scores, display the following for each student:
# [Identifier] : [Pass]
# or
# [Identifier] : [Fail]
# Sample Input
# A 25 27 50
# B 35 37 75
# C 75 78 80
# D 99 88 76
# Sample Output
# A : Fail
# B : Fail
# C : Pass
# D : Pass
# awk '{print $1,":", ($2<50||$3<50||$4<50) ? "Fail" : "Pass"}'
# awk '{if ($1>=50 && $2>=50 && $3>=50){print $1,": Pass"}else {print $1,": Fail"}}'
# awk '{
# if ($2 >=50 && $3 >= 50 && $4 >= 50)
# print $1,":","Pass";
# else
# print $1,":","Fail";
# }'
awk '{ if($2>=50 && $3>=50 && $4>=50) {print($1" : Pass")} else {print($1" : Fail")}}'
# **************** GREP
# Task
# You are given a text file that will be piped into your command through STDIN.
# Use grep to display all the lines that contain the word the in them.
# The search should be sensitive to case. Display only those lines
# of the input file that contain the word 'the'.
# Input Format: A text file will be piped into your command through STDIN.
# Output Format: Output only those lines that contain the word 'the'.
# The search should be case sensitive. The relative ordering of the
# lines in the output should be the same as it was in the input.
# grep " the "
# or
# grep -w 'the'
# or
# grep '\<the\>'
# or
# grep -w "the"
# or
grep "the "
# Task
# You are given a text file that will be piped into your command
# through STDIN. Use grep to display all those lines that contain
# the word the in them.
# The search should NOT be sensitive to case.
# Display only those lines of the input file that contain the word 'the'.
# Input Format: A text file will be piped into your command through STDIN.
# Output Format: Output only those lines that contain the word 'the'.
# The search should NOT be case sensitive. The relative ordering of
# the lines in the output should be the same as it was in the input.
# grep -i 'the '
# or
# grep -wi "the"
# or
# grep -i "\bthe\b"
# or
cat | grep -i 'the '
# or
grep -w -i 'the'
# Task
# You are given a text file that will be piped into your command
# through STDIN. Use grep to remove all those lines that contain
# the word 'that'. The search should NOT be sensitive to case.
# Input Format: A text file will be piped into your command through STDIN.
# Output Format: Only display those lines that do NOT contain the word 'that'.
# The relative ordering of the lines should be the same as it was in the input file.
# grep -viw 'that'
# -v : Invert the sense of matching
# -i : Ignore case distinctions
# -w : Match only those lines containing the whole word
# or
# egrep -v "That|that"
# or
egrep -iwv 'that'
# Given a text file, which will be piped to your command through STDIN,
# use grep to display all those lines which contain any of the following words in them:
# the or that or then or those
# The search should not be sensitive to case. Display only those lines
# of an input file, which contain the required words.
# Input Format: A text file with multiple lines will be piped to your
# command through STDIN.
# Output Format: Display the required lines without any changes to
# their relative ordering.
# grep -iw 'that\|the\|then\|those'
# or
# grep -iw -e "the" -e "that" -e "then" -e "those"
# or
grep -iw -E "the|that|then|those"
# Current Task
# Given an input file, with N credit card numbers, each in a new
# line, your task is to grep out and output only those credit card
# numbers which have two or more consecutive occurences of the same
# digit (which may be separated by a space, if they are in different
# segments). Assume that the credit card numbers will have 4 space
# separated segments with 4 digits each.
# If the credit card number is 1434 5678 9101 1234, there are two
# consecutive instances of 1 (though) as highlighted in box
# brackets: 1434 5678 910[1] [1]234
# Here are some credit card numbers where consecutively repeated digits
# have been highlighted in box brackets. The last case does not have any
# repeated digits: 1234 5678 910[1] [1]234
# 2[9][9][9] 5178 9101 [2][2]34
# [9][9][9][9] 5628 920[1] [1]232
# 8482 3678 9102 1232
# Input Format: N credit card numbers. Assume that the credit card
# numbers will have 4 space separated segments with 4 digits each.
# Constraints: 1<=N<=20
# However, the value of N does not matter while writing your command.
# Output Format: Display the required lines after filtering with grep,
# without any changes to their relative ordering in the input file.
# Sample Input
# 1234 5678 9101 1234
# 2999 5178 9101 2234
# 9999 5628 9201 1232
# 8482 3678 9102 1232
# Sample Output
# 1234 5678 9101 1234
# 2999 5178 9101 2234
# 9999 5628 9201 1232
# grep '\([0-9]\) *\1'
# or
grep '\(\d\)\s*\1'
# Task: For each line in a given input file, transform the first
# occurrence of the word 'the' with 'this'. The search and transformation
# should be strictly case sensitive.
# Input Format: A text file will be piped into your command through STDIN.
# Output Format: Transform the text as specified by the task.
# sed 's/ the / this /'
# # sed -e 's/\<the\>/this/'
# # sed 's/\<the\>/this/'
# # sed 's/the /this /'
# For each line in a given input file, transform all the occurrences of the word 'thy' with 'your'. The search should be case insensitive, i.e. 'thy', 'Thy', 'tHy' etc. should be transformed to 'your'.
# Input Format: A text file will be piped into your command via STDIN.
# Output Format: Transform and display the text as required in the task.
# sed 's/thy/your/ig'
# sed 's/\bthy\b/your/Ig'
# sed 's/\<thy\>/your/ig'
# sed 's/[tT][hH][yY]/your/g'
Task
Given an input file, in each line, highlight all the occurrences of 'thy' by wrapping them up in brace brackets. The search should be case-insensitive.
Input Format: A text file will be piped to your command via STDIN.
Output Format: Highlight all occurrences of 'thy' as shown in the example below.
# sed -e 's/thy/{thy}/g' -e 's/Thy/{Thy}/g'
# sed -e 's/[tT]hy/{&}/g'
sed 's:thy:{&}:ig'
Task
Given n lines of credit card numbers, mask the first 12 digits of each
credit card number with an asterisk (i.e., *) and print the masked card
number on a new line. Each credit card number consists of four
space-separated groups of four digits. For example, the credit
card number 1234 5678 9101 1234 would be masked and printed
as **** **** **** 1234.
Input Format: Each line contains a credit card number in the
form dddd dddd dddd dddd, where d denotes a decimal digit (i.e., 0 through 9).
There are a total of n lines of credit card numbers.
Output Format: For each credit card number, print its masked version on a new line.
Sample Input
1234 5678 9101 1234
2999 5178 9101 2234
9999 5628 9201 1232
8888 3678 9101 1232
Sample Outpu
**** **** **** 1234
**** **** **** 2234
**** **** **** 1232
**** **** **** 1232
sed 's/[0-9][0-9][0-9][0-9] /**** /g' #my solution
# sed 's/[0-9]\+ /**** /g'
# sed -r 's/[0-9]{4} /**** /g'
# awk '{print "****","****","****",$4}'
# Task: Given an input file, with N credit card numbers, each in a
# new line, your task is to reverse the ordering of segments in each
# credit card number. Assume that the credit card numbers will have 4
# space separated segments with 4 digits each.
# If the original credit card number is 1434 5678 9101 1234,
# transform it to 1234 9101 5678 1434.
# Input Format: N credit card numbers, each in a new line, credit card numbers will have 4 space separated segments with 4 digits each.
# Output Format: N lines, each containing a credit card number with the ordering of its segments reversed.
# Sample Input
# 1234 5678 9101 1234
# 2999 5178 9101 2234
# 9999 5628 9201 1232
# 8888 3678 9101 1232
# Sample Output
# 1234 9101 5678 1234
# 2234 9101 5178 2999
# 1232 9201 5628 9999
# 1232 9101 3678 8888
# sed 's/\([[:digit:]]\{4\}\) \([[:digit:]]\{4\}\) \([[:digit:]]\{4\}\) \([[:digit:]]\{4\}\)/\4 \3 \2 \1/'
sed -r 's/(.... )(.... )(.... )(....)/\4 \3\2\1/'
# sed -r 's/(.+ )(.+ )(.+ )(....)/\4 \3\2\1/'
# sed 's/\([[:digit:]]\{4\}\) \([[:digit:]]\{4\}\) \([[:digit:]]\{4\}\) \([[:digit:]]\{4\}\)/\4 \3 \2 \1/'
# awk '{print $4" "$3" "$2" "$1}'
| true
|
d4e0ba44029fd1f59c2b0f6cb2a1561fbbad1f66
|
Shell
|
tdowg1/dotfiles
|
/work-machines/walrus/.bashrc
|
UTF-8
| 316
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
export OS=$(uname -s)
export TZ=UTC
# Source 'system' bashrc
if [[ -f /etc/bashrc ]]; then
source /etc/bashrc
fi
######################################################################
# For NetView
######################################################################
export LANG=en_US
export LC_MESSAGES=en_US
| true
|
eb40580bf981c28198c6a1b141a3d99bf333b1e5
|
Shell
|
pb30/dotfiles
|
/bin/cheat
|
UTF-8
| 565
| 4
| 4
|
[] |
no_license
|
#!/bin/sh
CHEATDIR="$HOME/.cheats"
COMMAND=$1
while getopts ":e:" opt; do
case $opt in
e)
COMMAND=$2
if [ ! -f $CHEATDIR/$COMMAND ]; then
wget http://cheat.sh/$COMMAND -qO $CHEATDIR/$COMMAND
fi
"${EDITOR:-vi}" $CHEATDIR/$COMMAND
exit 1
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
if [ -f $CHEATDIR/$COMMAND ]; then
cat $CHEATDIR/$COMMAND
else
curl http://cheat.sh/$COMMAND
fi
| true
|
f7c97eaabdff26c9c52072f7f92376e032672861
|
Shell
|
VladVons/sh-conf
|
/pkg/console/debootstrap/conf/lxde_a/sources.list.sh
|
UTF-8
| 416
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#--- VladVons@gmail.com
AddDeb()
{
aPkg="$1"; aStr="$2";
echo $aStr >> /etc/apt/sources.list.d/$aPkg.list
}
AddDeb "google-chrome-stable" "deb http://dl.google.com/linux/chrome/deb/ stable main"
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
AddDeb "dropbox" "deb http://linux.dropbox.com/ubuntu/ vivid main"
apt-key adv --keyserver pgp.mit.edu --recv-keys 5044912E
| true
|
1ac1ff7b61ae1cca3eb528d1e13e3ea81f91faf0
|
Shell
|
mayan404/extensions
|
/PHP-FPDF/FPDF-master/ttf2pt1-chinese-3.4.0/mkrel
|
UTF-8
| 2,534
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Copyright (c) 2000
# Sergey A. Babkin. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# Sergey A. Babkin (sab123@hotmail.com, babkin@users.sourceforge.net)
#
#
# Script to create a release or shapshot archive.
# Also checks for very basic inconsistences.
# Expects that it would be run from the current directory of chinese-maps,
# that the ttf2pt1 directory with corresponding version is ../ttf2pt1
# and that the parent directory is the place to create archives.
# Expects that the CVS environment variables are set properly.
VER=`grep TTF2PT1_VERSION ../ttf2pt1/version.h | cut -d\" -f2`
case "$1" in
snapshot)
echo "$VER" | egrep '^[0-9][0-9]*\.[0-9].*-CURRENT$' || {
echo "mkrel: version.h must contain *-CURRENT to create a snapshot" >&2
exit 1
}
snapdate=`date "+ %y %m %d " | sed 's/ \([0-9]\) / 0& /g;s/ //g'`
NEWVER=`echo "$VER" | sed "s/-CURRENT/-SNAP-$snapdate/"`
TAG="-D tomorrow"
;;
release)
echo "$VER" | egrep '^[0-9][0-9]*\.[0-9][.0-9]*$' || {
echo "mkrel: version.h must not be -CURRENT to create a release" >&2
exit 1
}
NEWVER="$VER"
TAG=`echo "-r ttf2pt1-$VER" | sed \
's/\(-[0-9][0-9]*\.[0-9]\)$/&.0/;s/\./-/g'`
;;
*)
echo "use: mkrel [snapshot|release]" >&2
exit 1
;;
esac
cd .. || {
echo "mkrel: can't cd to .." >&2
exit 1
}
rm -f ttf2pt1-chinese-$NEWVER.tgz ttf2pt1-chinese-$NEWVER.zip
rm -rf ttf2pt1-chinese-$NEWVER
echo "cvs -z9 export $TAG -d ttf2pt1-chinese-$NEWVER chinese-maps"
cvs -z9 export $TAG -d ttf2pt1-chinese-$NEWVER chinese-maps || {
echo "mkrel: unable to export from CVS" >&2
echo "mkrel: check that the CVS tree is properly tagged" >&2
exit 1
}
tar czvf ttf2pt1-chinese-$NEWVER.tgz ttf2pt1-chinese-$NEWVER || {
echo "mkrel: can't create .tgz archive" >&2
exit 1
}
zip -u -r ttf2pt1-chinese-$NEWVER.zip ttf2pt1-chinese-$NEWVER || {
echo "mkrel: can't create .zip archive" >&2
exit 1
}
| true
|
2f98fda0e482b7744aedbfaabf8d5eecd8e16dae
|
Shell
|
pradeeppasupuleti/Python-Practice
|
/Shell_scripts/Unix/ServerHealthCheck_2.0.sh
|
UTF-8
| 13,082
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/ksh
#
# Health check script - Solaris
#
# Author: Pradeep K Pasupuleti
#
# Created on:
#
ECHO="/bin/echo"
if [ -f "/usr/ucb/whoami" ]; then
WHOAMI="/usr/ucb/whoami"
else
$ECHO "Error: whoami command not found"
exit
fi
if [ $($WHOAMI) != "root" ]; then
$ECHO "Please execute the script with the root user"
exit 1
fi
if [ -f "/bin/cat" ]; then
CAT="/bin/cat"
else
$ECHO "Error: cat command not found"
exit
fi
if [ -f "/bin/date" ]; then
DATE1="/bin/date"
else
$ECHO "Error: date command not found"
exit
fi
if [ -f "/usr/bin/nawk" ]; then
NAWK="/usr/bin/nawk"
else
$ECHO "Error: nawk command not found"
exit
fi
if [ -f "/usr/bin/grep" ]; then
GREP="/usr/bin/grep"
else
$ECHO "Error: grep command not found"
exit
fi
if [ -f "/usr/bin/awk" ]; then
AWK="/usr/bin/awk"
else
$ECHO "Error: awk command not found"
exit
fi
if [ -f "/usr/bin/nawk" ]; then
NAWK="/usr/bin/nawk"
else
$ECHO "Error: nawk command not found"
exit
fi
if [ -f "/usr/bin/sed" ]; then
SED="/usr/bin/sed"
elif [ -f "/bin/sed" ]; then
SED="/bin/sed"
else
$ECHO "Error: sed command not found"
exit
fi
if [ -f "/usr/bin/egrep" ]; then
EGREP="/usr/bin/egrep"
else
$ECHO "Error: egrep command not found"
exit
fi
if [ -f "/usr/local/soe/bin/bdf" ]; then
BDF="/usr/local/soe/bin/bdf"
else
$ECHO "Error: bdf command not found"
fi
if [ -f "/usr/sbin/ndd" ]; then
NDD="/usr/sbin/ndd"
else
$ECHO "Error: ndd command not found"
fi
if [ -f "/usr/sbin/dladm" ]; then
DLADM="/usr/sbin/dladm"
else
$ECHO "Error: dladm command not found"
fi
if [ -f "/usr/bin/kstat" ]; then
KSTAT="/usr/bin/kstat"
else
$ECHO "Error: Kstat command not found"
fi
if [ -f "/usr/bin/netstat" ]; then
NETSTAT="/usr/bin/netstat"
else
$ECHO "Error: netstat command not found"
fi
if [ -f "/sbin/ifconfig" ]; then
IFCFG="/sbin/ifconfig"
elif [ -f "/usr/sbin/ifconfig" ]; then
IFCFG="/usr/sbin/ifconfig"
else
$ECHO "Error: ifconfig command not found"
fi
DATE=$($DATE1 "+%m/%d/%Y %H:%M")
HOSTNAME1=$(uname -n)
OS_VER=$($CAT /etc/release | grep -i "Solaris" | $AWK '{print $1" "$2}')
OS_DETAILS ()
{
OS_VER=$($CAT /etc/release | $GREP -i "Solaris" | $AWK '{print $1" "$2}')
KER_ID=$(uname -X | $GREP "KernelID" | $AWK '{print $3}')
UPTIME=$(uptime | $AWK '{print $3}')
LOAD_AVG=$(uptime | $NAWK -F "load average:" '{print $2}' | $SED -e 's/ //')
VENDOR=$(prtdiag | $GREP -i "System Configuration")
}
HW_DETAILS ()
{
PROC_VER=$(psrinfo -pv | $AWK '{print $2}' | uniq)
N_PROC=$(psrinfo -p)
V_PROC=$(psrinfo -v | wc -l)
P_MEM=$(prtconf | $GREP Memory | cut -d ":" -f2 | $AWK '{print $1}')
if [ -f "/opt/SUNWsneep/bin/showplatform" ]; then
SERIAL=$(/opt/SUNWsneep/bin/showplatform)
else
SERIAL=$(/usr/sbin/eeprom |grep -i ChassisSerialNumber | awk '{print $3}')
fi
}
FS_CHECK ()
{
for i in $(df -k | grep -v "^$"| $GREP -v "^Filesystem" | $AWK '{print $1}')
do
FS=$(df -k | grep -v "^$" | $GREP $i | $AWK '{print $6}')
USED=$(df -k | grep -v "^$" | $GREP $i | $AWK '{ print $5 }' | sed -e 's/%//')
if [ "$USED" -gt 85 ]; then
FS_FULL="$FS_FULL; $FS ${USED}%"
fi
done
if [ -z "FS_FULL" ]; then
FS_FULL="All File system(s) fine."
else
FS_FULL="`$ECHO $FS_FULL| $SED -e 's/^;//'`"
fi
for j in $($BDF -i | $GREP -v "^Filesystem" | $AWK '{print $1}')
do
IUSED=$($BDF -i | $GREP $i | $AWK '{ print $8 }' | $SED -e 's/%//')
if [ "$IUSED" -gt 85 ]; then
FS_INODEFULL="$FS_INODEFULL; $j"
fi
done
if [ "$FS_INODEFULL" = "" ]; then
FS_INODEFULL="All Filesystems are fine."
fi
DISKERR=$(iostat -En | grep -i hard | awk '$7 > 30 || $10 > 30 {print $0}' |wc -l | awk '{print $1}')
if [ $DISKERR -eq 0 ]
then
DISK_STATUS="No Disk errors found"
else
DISK_STATUS="Found "$DISKERR" disk errors. Please check the "/tmp/"$HOSTNAME1"_eror.log" for errors."
$ECHO "+++++++++++++ Hard Disk Erros +++++++++++++++" >> "/tmp/"$HOSTNAME1"_eror.log"
iostat -En |grep -i hard | awk '$7 > 30 || $10 > 30 {print $0}' >> "/tmp/"$HOSTNAME1"_eror.log"
fi
}
PROCESS_CHECK () {
DEFUNCT_LIST=$(ps -elf | grep -i defunct | wc -l | sed -e 's/[ \t]*//')
if [ "$DEFUNCT_LIST" = "0" ]; then
DEFUNCT_LIST="No defunct processes."
else
DEFUNCT_LIST="Found $DEFUNCT_LIST defunct processes."
fi
}
BACKUP_CHECK () {
BACKUP_VER=$(cat /usr/openv/netbackup/bin/version)
/usr/openv/netbackup/bin/bpclimagelist > /dev/null 2>&1
if [ $? -eq 0 ]; then
RECENT_BK=$(/usr/openv/netbackup/bin/bpclimagelist | head -3 | tail -1 | $AWK '{ print $1, $2, $7 $8 }')
BACKUP="Working and recent backup is on $RECENT_BK."
else
BACKUP="No backup"
fi
}
USM_CHECK () {
#/opt/perf/bin/perfstat > /dev/null 2>&1
if [ -f "/opt/perf/bin/perfstat" ]; then
OUTPUT=$(/opt/perf/bin/perfstat)
ERR_CNT=$($ECHO "$OUTPUT" | $EGREP -i -c "Aborting|not active|Aborted")
if [ $ERR_CNT -ne 0 ]; then
USM_STATUS="Few agents are not running in USM."
else
USM_STATUS="Running fine."
fi
else
USM_STATUS="USM agent not found."
fi
}
NET_CHECK () {
#auto neg=ndd -get /dev/bge1 adv_autoneg_cap
#link_status=ndd -get /dev/bge0 link_status
#number of interface = netstat -in
#link_speed=ndd -get /dev/bge0 link_speed
#NET=$(netstat -in | grep -v "Name" | awk '{print $1}' | grep -v "^$")
$ECHO "\t+-------------+--------+-------------------+-----------------+-----------------+-----------+---------------------+"
$ECHO "\t| Interface | Status | Mac-ID | Speed | Duplex | Auto-Neg | IP Address |"
$ECHO "\t+-------------+--------+-------------------+-----------------+-----------------+-----------+---------------------+"
BGNET=$($NETSTAT -in | $GREP bge | $AWK '{print $1}')
if [ ! -z $BGNET ]; then
for i in $BGNET
do
INET=$i
if [ $($NDD -get /dev/$i link_status) -ge 0 ]; then
LINK="UP"
else
LINK="DOWN"
fi
MAC=$($IFCFG $i | $GREP ether | $AWK '{print $2}')
ADD=$($IFCFG $i | $GREP inet | $AWK '{print $2}')
if [ $($NDD -get /dev/$i adv_autoneg_cap) -ge 1 ]; then
AUTO="ON"
else
AUTO="OFF"
fi
SPEED=$($NDD -get /dev/$i link_speed)
if [ $($NDD -get /dev/$i link_duplex) -ge 1 ]; then
DUP="FULL"
else
DUP="HALF"
fi
printf "\t""| %-11s | %-6s | %-15s | %-15s | %-15s | %-9s | %-19s |\n" "$INET" "$LINK" "$MAC" "$SPEED" "$DUP" "$AUTO" "$ADD"
done
fi
CENET=$($NETSTAT -in | $GREP ce | $AWK '{print $1}' | $NAWK -F"ce" '{print $2}')
if [ ! -z $CENET ]; then
for i in $CENET
do
INET="ce$i"
if [ $($KSTAT -m ce -i $i -s link_up | $GREP "link_up" | $AWK '{print $2}') -eq 1 ]; then
LINK="UP"
else
LINK="DOWN"
fi
MAC=$($IFCFG ce$i | $GREP ether | $AWK '{print $2}')
ADD=$($IFCFG ce$i | $GREP inet | $AWK '{print $2}')
if [ $($KSTAT -m ce -i $i -s cap_autoneg | $GREP "cap_autoneg" | $AWK '{print $2}') -ge 1 ]; then
AUTO="ON"
else
AUTO="OFF"
fi
SPEED=$($KSTAT -m ce -i 0 -s link_speed | $GREP "link_speed" | $AWK '{print $2}')
if [ $($KSTAT -m ce -i 0 -s link_duplex | $GREP "link_duplex" | $AWK '{print $2}') -ge 1 ]; then
DUP="FULL"
else
DUP="HALF"
fi
printf "\t""| %-11s | %-6s | %-15s | %-15s | %-15s | %-9s | %-19s |\n" "$INET" "$LINK" "$MAC" "$SPEED" "$DUP" "$AUTO" "$ADD"
done
fi
NXGENET=$($NETSTAT -in | $GREP nxge | $AWK '{print $1}')
if [ ! -z $NXGENET ]; then
for i in $NXGENET
do
INET="$i"
if [ $( $DLADM show-dev $i 2> /dev/null | awk '{print $3;}') -eq 1 ]; then
LINK="UP"
else
LINK="DOWN"
fi
MAC=$($IFCFG $i | $GREP ether | $AWK '{print $2}')
ADD=$($IFCFG $i | $GREP inet | $AWK '{print $2}')
if [ $($NDD -get /dev/$i adv_autoneg_cap) -ge 1 ]; then
AUTO="ON"
else
AUTO="OFF"
fi
SPEED=$($DLADM show-dev $i 2> /dev/null | awk '{print $5;}')
DUP=$(show-dev $i 2> /dev/null | awk '{print $NF;}' | tr "[a-z]" "[A-Z]")
printf "\t""| %-11s | %-6s | %-15s | %-15s | %-15s | %-9s | %-19s |\n" "$INET" "$LINK" "$MAC" "$SPEED" "$DUP" "$AUTO" "$ADD"
done
fi
$ECHO "\t+-------------+--------+-------------------+-----------------+-----------------+-----------+---------------------+"
}
CHECK_META () {
METASYN=$(metastat | grep -i "%" | wc -l | sed -e 's/^[ \t]*//')
if [ $METASYN -eq 0 ]; then
META_SYN_STAT="No meta device's are syncing in Background."
else
META_SYN_STAT="Found $METASYN device's are syncing in Background."
fi
METADEV=$(metastat | grep -i maintenance | wc -l | sed -e 's/^[ \t]*//')
if [ $METADEV -eq 0 ]; then
META_DEV_STAT="No meta device's are in Maintenance mode."
else
META_DEV_STAT="Found $METASYN meta device's are in Maintenance mode."
fi
}
VXVM_CHECK () {
VXVM_VER=`/usr/sbin/modinfo |grep -i vxvm | grep -v portal |awk '{print $8}'| sed -e 's/://g' | uniq`
RPM_C=`/usr/sbin/modinfo |grep -i vxvm | grep -v portal |awk '{print $8}' | cut -d "_" -f2 | cut -c 5- | wc -l`
if [ $RPM_C -ne 0 ]; then
DISK_G=`vxdg list | awk '{print $1}' | grep -v "NAME"`
DISK_GC=`vxdg list | awk '{print $1}' | grep -v "NAME" | wc -l`
for i in $DISK_G
do
if [ `vxprint -g $i | grep "^v" | awk '{print $4}' | grep "^v" | grep -v "ENABLED" | wc -l` -ne 0 ]; then
V_STATUS="$V_STATUS,$i"
fi
done
if [ -z "$V_STATUS" ]; then
V_STATUS="All Voluumes under DiskGroup(s) fine."
else
V_STATUS="Volumes under Diskgroup(s) failed:`echo $V_STATUS | sed 's/,/ /'`"
fi
for i in $DISK_G
do
if [ `vxprint -g $i | grep "^sd" | awk '{print $4}' | grep -v "ENABLED"| wc -l` -ne 0 ]; then
SD_STATUS="$SD_STATUS,$i"
fi
done
if [ -z "$SD_STATUS" ]; then
SD_STATUS="All Sub Disks under DiskGroup(s) fine."
else
SD_STATUS="Subdisk under Diskgroup(s) failed:`echo $SD_STATUS | sed 's/,/ /'`"
fi
for i in $DISK_G
do
for j in `vxprint -g $i | grep "^dm" | awk '{print $2}'`
do
if [ `vxdisk list | grep "$j" | awk '{print $5}' | grep online | wc -l` -eq 0 ]; then
PD_STATUS="$PD_STATUS,$j"
fi
done
done
if [ -z "$PD_STATUS" ]; then
PD_STATUS="All Physical disk(s) are online."
else
PD_STATUS="Physical disk offline for : `echo $PD_STATUS | sed 's/,/ /'`"
fi
#SAN_ST=`vxdmpadm listenclosure all | grep -i disconnected | awk '{print $1}' | tr '\n' ';' ; printf "\n"`
SAN_ST=`vxdmpadm listenclosure all | grep -i disconnected | awk '{print $1}' | wc -l`
SNA_STC=`vxdmpadm listenclosure all | grep -i disconnected | awk '{print $1}' | tr '\n' ';' ; printf "\n"`
if [ $SAN_ST -eq 0 ]; then
SAN_STATUS="All SAN storages are attached."
else
SAN_STATUS="`echo $SNA_STC SAN` encloser were disconnected."
fi
echo "\tVeritas FileSystem : $VXVM_VER"
echo "\tDisk groups count : $DISK_GC"
echo "\tVolumes Status : $V_STATUS"
echo "\tSubDisk(s) Status : $SD_STATUS"
echo "\tP Disk Status : $PD_STATUS"
echo "\tSAN DISK Status : $SAN_STATUS"
else
echo "\t Veritas Volume Status : Host not configure with Vertias Volume Manager."
fi
}
## Main
$ECHO "***************************Server's health check has started ($DATE) on $HOSTNAME1 *********************************"
$ECHO "OS details:"
$ECHO "-----------"
OS_DETAILS
$ECHO "\t Hostname : $HOSTNAME1"
$ECHO "\t OS Version : $OS_VER"
$ECHO "\t Kernel ID : $KER_ID"
$ECHO "\t Vendor : $VENDOR"
$ECHO "\t Uptime in day(s) : $UPTIME day(s)"
$ECHO "\t Load Average : $LOAD_AVG"
$ECHO "\n"
$ECHO "HW details:"
$ECHO "-----------"
HW_DETAILS
$ECHO "\t Processor Version : $PROC_VER"
$ECHO "\t Serial Number : $SERIAL"
$ECHO "\t No of Physical Procs : $N_PROC"
$ECHO "\t No of Virtual Procs : $N_PROC"
$ECHO "\t Physical Memory : $P_MEM (MB)"
$ECHO "\n"
$ECHO "FileSystem Utilization:"
$ECHO "-----------------------"
FS_CHECK
$ECHO "\t Space status : $FS_FULL"
$ECHO "\t Inode status : $FS_INODEFULL"
$ECHO "\t Disk Errors status : $DISK_STATUS"
$ECHO "\n"
$ECHO "Processes details:"
$ECHO "------------------"
PROCESS_CHECK
$ECHO "\t Defunct List : $DEFUNCT_LIST"
$ECHO "\n"
$ECHO "Backup:"
$ECHO "-------"
BACKUP_CHECK
$ECHO "\t Version : $BACKUP_VER"
$ECHO "\t Status : $BACKUP"
$ECHO "\n"
$ECHO "Meta Devices Check:"
$ECHO "-------------------"
CHECK_META
$ECHO "\t Sync status : $META_SYN_STAT"
$ECHO "\t Maintenance status : $META_DEV_STAT"
$ECHO "\n"
$ECHO "USM:"
$ECHO "----"
USM_CHECK
$ECHO "\t Agent Status : $USM_STATUS"
$ECHO "\n"
$ECHO "Veritas Volume Manger and SAN DISK"
$ECHO "----------------------------------"
VXVM_CHECK
$ECHO "\n"
$ECHO "Network Details:"
$ECHO "----------------"
NET_CHECK
$ECHO "\n"
$ECHO "***************************Server's health check has completed ($DATE) on $HOSTNAME1 *********************************"
| true
|
86059638eb86020e9a806aad4ca031a09567a0ff
|
Shell
|
TomLous/medium-spark-k8s
|
/scripts/7-create-spark-runner.sh
|
UTF-8
| 526
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
name="spark-runner"
cd ${BASH_SOURCE%/*}/../docker/${name}
eval $(minikube docker-env)
name="spark-runner"
registry="localhost:5000"
version="0.1"
docker build \
--build-arg VCS_REF=$(git rev-parse --short HEAD) \
--build-arg BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
--build-arg VERSION=0.1 \
-t ${registry}/${name}:${version} . \
&& docker push ${registry}/${name}:${version} \
&& echo "Build & pushed ${registry}/${name}:${version}"
curl -s $(minikube ip):5000/v2/_catalog | jq
| true
|
c25097053bb041ef7db6e4beecaddaccfd3ffdcf
|
Shell
|
leloulight/PKGBUILD
|
/porntime-bin/PKGBUILD
|
UTF-8
| 2,009
| 2.53125
| 3
|
[] |
no_license
|
# Maintainer: M0Rf30
# Contributor: Ricardo Funke <ricardo [at] gmail [dot] com>
# Contributor: Attila Bukor <r1pp3rj4ck [at] w4it [dot] eu>
# Contributor: Iwan Timmer <irtimmer [at] gmail [dot] com>
# Contributor: Eric Engestrom <aur [at] engestrom [dot] ch>
# Contributor: Ricardo Band <me [at] xengi [dot] de>
# Contributor: Axilleas Pipinellis (aka axil42) <axilleas [at] archlinux [dot] info>
# Contributor: UshakovVasilii <UshakovVasilii [at] yahoo [dot] com>
# Contributor: Giulio Fidente <gfidente [at] gmail [dot] com>
pkgname=porntime-bin
pkgver=0.3.8
pkgrel=4
pkgdesc="Stream porn movies from torrents. Skip the downloads. Launch, click, watch."
arch=('x86_64' 'i686')
url="http://porntime.ws/"
license=('GPL3')
depends=('ttf-liberation' 'gconf' 'nss' 'libxtst' 'gtk2' 'alsa-lib')
optdepends=('net-tools: necessary for the new vpn feature')
provides=('porntime')
conflicts=('porntime')
options=('!strip')
install="porntime.install"
md5sums=('309ed5f0d93e87ff0f33f7368749e079'
'69d7b9c45f83c399b1c68e85c2921a9a'
'eda173affdfd1630ff6a0bfa566bc1de'
'886a43e17c69dd434b2bbb243fefee75')
_platform=X64
if [ "$CARCH" = 'i686' ]; then
_platform=X32
md5sums[0]='90f72d88bef9758b5ba09b05c3e45e49'
fi
source=("http://porntime.ws/PornTime${_platform}.tar.gz"
"icon.png"
"porntime.install"
"porntime.desktop")
package() {
cd "${srcdir}"
install -dm755 "${pkgdir}/opt/${pkgname}/"
install -dm755 "${pkgdir}/usr/bin"
install -dm755 "${pkgdir}/usr/share"
# Program
echo "${pkgdir}/opt/${pkgname}/"
install -Dm755 ${srcdir}/PornTime "${pkgdir}/opt/${pkgname}/"
install -Dm644 ${srcdir}/nw.pak "${pkgdir}/opt/${pkgname}/"
install -Dm644 ${srcdir}/libffmpegsumo.so "${pkgdir}/opt/${pkgname}/"
# Link to program
ln -s "/opt/${pkgname}/PornTime" "${pkgdir}/usr/bin/porntime"
# Desktop file
install -Dm644 "${srcdir}/porntime.desktop" "${pkgdir}/usr/share/applications/porntime.desktop"
# Icon
install -Dm644 "${srcdir}/icon.png" "${pkgdir}/usr/share/pixmaps/porntime.png"
}
| true
|
cd4787b1504a7541879f5f273a1f3cd954d2fdf4
|
Shell
|
GaomingPan/smartAC
|
/scripts/WifiSmartAcCmd/accmd_setdhcp
|
UTF-8
| 1,334
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
#
#
#
##########################
#
# $(1): cmd id
# $(2): gw_ac_id
# $(3): dhcp IP start
# $(4): dhcp IP limit numbers
# $(5): dhcp leasetime min:2m, normal:12h
# $(6): dhcp switch, 0 OPEN dhcp, 1 CLOSE dhcp
#
#
start=$3
limit=$4
leasetime=$5
ignore=$6
. /usr/bin/accmd_common
if [ $# != 6 ];then
ErrorMessage $1 $2 "set dhcp parameter error."
exit 1
fi
if [ $start == "" ];then
start=100
fi
if [ $limit == "" ];then
limit=150
fi
if [ $leasetime == "" ];then
leasetime=12
fi
case $ignore in
"1")
ignore=1
;;
*)
ignore=0
;;
esac
uci set dhcp.lan.start=$start
uci set dhcp.lan.limit=$limit
uci set dhcp.lan.leasetime=$leasetime
uci set dhcp.lan.ignore=$ignore
uci commit dhcp
ret=$?
if [ $ret != 0 ];then
ErrorMessage $1 $2 "uci commit dhcp ERROR."
exit 1
fi
RebootInterface lan
ret=$?
if [ $ret != 0 ];then
ErrorMessage $1 $2 "reboot lan interface error."
exit 1
fi
start=$(uci get dhcp.lan.start)
limit=$(uci get dhcp.lan.limit)
leasetime=$(uci get dhcp.lan.leasetime)
ignore=$(uci get dhcp.lan.ignore)
if [ "$ignore" == "" ]; then
ignore=0
fi
info="Set DHCP Success."
echo "{\"cmd_id\":\"$1\",\"gw_ac_id\":\"$2\",\"status\":true,\"info\":\"$info\",\"dhcp\":{\"start\":$start,\"limit\":$limit,\"leasetime\":\"$leasetime\",\"ignore\":$ignore}}"
| true
|
baed9a6168ad59b8ab467b592d85802b5dafea6a
|
Shell
|
mjkelly/experiments
|
/auto_timezone.sh
|
UTF-8
| 827
| 3.984375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Uses free online services to automatically set your timezone. This is
# intended for a laptop that moves from place to place with you.
set -e
set -u
# An API key is free for 1000 requests per day. See:
# https://ipgeolocation.io/pricing.html
# Write .ipgeolocation.io in your home directory with the following format:
# IP_LOCATION_API_KEY=<API KEY>
. ~/.ipgeolocation.io
# We use the timezone API as documented here:
# https://ipgeolocation.io/documentation/timezone-api.html
timezone=$(curl --silent "https://api.ipgeolocation.io/timezone?apiKey=${IP_LOCATION_API_KEY}" | jq -r .timezone)
echo -n "Set timezone to ${timezone}? [y/N] "
read answer
answer=$(echo $answer | tr A-Z a-z)
if [[ $answer = "y" || $answer = "yes" ]]; then
sudo timedatectl set-timezone $timezone
else
echo "Aborted."
exit 1
fi
| true
|
6f702429247c9791ab844129beb4d46f2036706d
|
Shell
|
rgulewich/dotfiles
|
/zshrc
|
UTF-8
| 1,571
| 2.84375
| 3
|
[] |
no_license
|
if [[ ! -o interactive ]]; then
return
fi
source $HOME/.profile.d/common/functions.sh
## Shell options, completion
bindkey -v
zstyle ':completion:*:*:git:*' script ~/.zsh/git-completion.bash
fpath=(~/.zsh $fpath)
# Command history
HISTFILE=${ZDOTDIR:-$HOME}/.zsh_history
SAVEHIST=10000
HISTSIZE=10000
# don't share history across multiple zsh sessions
setopt no_share_history
unsetopt share_history
setopt APPEND_HISTORY
# adds commands as they are typed, not at shell exit
setopt INC_APPEND_HISTORY
# expire duplicates first
setopt HIST_EXPIRE_DUPS_FIRST
# do not store duplications
setopt HIST_IGNORE_DUPS
#ignore duplicates when searching
setopt HIST_FIND_NO_DUPS
# removes blank lines from history
setopt HIST_REDUCE_BLANKS
# Enable reverse history search with Ctrl-R
bindkey '^R' history-incremental-search-backward
# nix's autojump
source_it /run/current-system/sw/share/zsh/site-functions/autojump.zsh
if command -v direnv >/dev/null ; then
eval "$(direnv hook zsh)"
fi
## Common
source $HOME/.profile.d/common/common.sh
## Prompt
source $HOME/.profile.d/git/git-prompt.sh
setopt prompt_subst
autoload -Uz promptinit && promptinit
PROMPT='%F{$date_colour}%D{%y-%m-%f} %D{%r}|%F{$host_colour}%n@%M%f%F{237}:%f%F{22}%~%f%F{172}$(__git_ps1 " (%s)")%f %F{237}$%f '
# No right prompt, please
RPS1=""
RPROMPT=""
## Source any local overrides
if [[ -d "$HOME/.profile.d/local" ]]; then
for P in $HOME/.profile.d/local/* ; do
# echo $P
source $P
done
fi
## Stuff that needs to go last
autoload -Uz compinit && compinit
| true
|
72ae584fcb55a899e31c263d0916a7af7a8b9e3a
|
Shell
|
supernifty/prostate-cancer-pipeline
|
/src/util/filter_vcf.sh
|
UTF-8
| 859
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
S="CMHS51 CMHS52 CMHS53 CMHS54 CMHS55 CMHS64 CMHS65 CMHS66 CMHS67 CMHS68 CMHS69 CMHS70 CMHS71"
for s in $S; do
python src/util/filter_germline.py tumorfirst < ./out/CMHS52.muse.vcf > ./out/CMHS52.muse.pass.vcf
python src/util/filter_germline.py tumorfirst < ./out/CMHS52.platypus.vcf > ./out/CMHS52.platypus.pass.vcf
python src/util/filter_germline.py normalfirst < ./out/CMHS52.mutect1.vcf > ./out/CMHS52.mutect1.pass.vcf
# WGS_775021c4-50ef-48e4-b9d7-bbfa959b67b4_vs_be887b7f-8cb2-418f-be9a-9c683bc79b52/caveman/775021c4-50ef-48e4-b9d7-bbfa959b67b4_vs_be887b7f-8cb2-418f-be9a-9c683bc79b52.annot.muts.vcf.gz
tar xvfz ./out/CMHS52.wgs/WGS_*.result.tar.gz "*.annot.muts.vcf.gz"
gunzip < ./WGS_*/caveman/*.annot.muts.vcf.gz | python src/util/filter_germline.py normalfirst > ./out/CMHS52.caveman.pass.vcf
rm -r ./WGS_*;
done
| true
|
540f0d4964b3beedf5fb2b50819ecd2f0bda5f5b
|
Shell
|
scylladb/scylla-monitoring
|
/start-grafana.sh
|
UTF-8
| 9,321
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
CURRENT_VERSION="master"
if [ -f CURRENT_VERSION.sh ]; then
CURRENT_VERSION=`cat CURRENT_VERSION.sh`
fi
LOCAL=""
if [ -z "$GRAFANA_ADMIN_PASSWORD" ]; then
GRAFANA_ADMIN_PASSWORD="admin"
fi
if [ -z "$GRAFANA_AUTH" ]; then
GRAFANA_AUTH=false
fi
if [ -z "$GRAFANA_AUTH_ANONYMOUS" ]; then
GRAFANA_AUTH_ANONYMOUS=true
fi
DOCKER_PARAM=""
EXTERNAL_VOLUME=""
BIND_ADDRESS=""
if [ -z "$ANONYMOUS_ROLE" ]; then
ANONYMOUS_ROLE="Admin"
fi
SPECIFIC_SOLUTION=""
LDAP_FILE=""
DATA_SOURCES=""
LIMITS=""
VOLUMES=""
PARAMS=""
DEFAULT_THEME="light"
. versions.sh
. UA.sh
if [ -f env.sh ]; then
. env.sh
fi
BRANCH_VERSION=$CURRENT_VERSION
if [ -z ${DEFAULT_VERSION[$CURRENT_VERSION]} ]; then
BRANCH_VERSION=`echo $CURRENT_VERSION|cut -d'.' -f1,2`
fi
if [ "$1" = "-e" ]; then
DEFAULT_VERSION=${DEFAULT_ENTERPRISE_VERSION[$BRANCH_VERSION]}
fi
MANAGER_VERSION=${MANAGER_DEFAULT_VERSION[$BRANCH_VERSION]}
for arg; do
shift
if [ -z "$LIMIT" ]; then
case $arg in
(--limit)
LIMIT="1"
;;
(--volume)
LIMIT="1"
VOLUME="1"
;;
(--param)
LIMIT="1"
PARAM="1"
;;
(--auth)
GRAFANA_AUTH=true
;;
(--disable-anonymous)
GRAFANA_AUTH_ANONYMOUS=false
;;
(*) set -- "$@" "$arg"
;;
esac
else
DOCR=`echo $arg|cut -d',' -f1`
VALUE=`echo $arg|cut -d',' -f2-|sed 's/#/ /g'`
NOSPACE=`echo $arg|sed 's/ /#/g'`
if [ "$PARAM" = "1" ]; then
if [ -z "${DOCKER_PARAMS[$DOCR]}" ]; then
DOCKER_PARAMS[$DOCR]=""
fi
DOCKER_PARAMS[$DOCR]="${DOCKER_PARAMS[$DOCR]} $VALUE"
PARAMS="$PARAMS --param $NOSPACE"
unset PARAM
else
if [ -z "${DOCKER_LIMITS[$DOCR]}" ]; then
DOCKER_LIMITS[$DOCR]=""
fi
if [ "$VOLUME" = "1" ]; then
SRC=`echo $VALUE|cut -d':' -f1`
DST=`echo $VALUE|cut -d':' -f2-`
SRC=$(readlink -m $SRC)
DOCKER_LIMITS[$DOCR]="${DOCKER_LIMITS[$DOCR]} -v $SRC:$DST"
VOLUMES="$VOLUMES --volume $NOSPACE"
unset VOLUME
else
DOCKER_LIMITS[$DOCR]="${DOCKER_LIMITS[$DOCR]} $VALUE"
LIMITS="$LIMITS --limit $NOSPACE"
fi
fi
unset LIMIT
fi
done
usage="$(basename "$0") [-h] [-v comma separated versions ] [-g grafana port ] [-G path to external dir] [-n grafana container name ] [-p ip:port address of prometheus ] [-j additional dashboard to load to Grafana, multiple params are supported] [-c grafana enviroment variable, multiple params are supported] [-x http_proxy_host:port] [-m alert_manager address] [-a admin password] [ -M scylla-manager version ] [-D encapsulate docker param] [-Q Grafana anonymous role (Admin/Editor/Viewer)] [-S start with a system specific dashboard set] [-P ldap_config_file] -- loads the prometheus datasource and the Scylla dashboards into an existing grafana installation"
while getopts ':hlEg:n:p:v:a:x:c:j:m:G:M:D:A:S:P:L:Q:' option; do
case "$option" in
h) echo "$usage"
exit
;;
v) VERSIONS=$OPTARG
;;
M) MANAGER_VERSION=$OPTARG
;;
g) GRAFANA_PORT=$OPTARG
;;
G) EXTERNAL_VOLUME="-v "`readlink -m $OPTARG`":/var/lib/grafana"
if [ ! -d $OPTARG ]; then
echo "Creating grafana external directory $OPTARG"
mkdir -p $OPTARG
fi
;;
n) GRAFANA_NAME=$OPTARG
;;
p) DATA_SOURCES="$DATA_SOURCES -p $OPTARG"
;;
m) DATA_SOURCES="$DATA_SOURCES -m $OPTARG"
;;
L) DATA_SOURCES="$DATA_SOURCES -L $OPTARG"
;;
l) DOCKER_PARAM="$DOCKER_PARAM --net=host"
;;
P) LDAP_FILE="$OPTARG"
GRAFANA_ENV_ARRAY+=("GF_AUTH_LDAP_ENABLED=true" "GF_AUTH_LDAP_CONFIG_FILE=/etc/grafana/ldap.toml" "GF_AUTH_LDAP_ALLOW_SIGN_UP=true")
LDAP_FILE="-v "`readlink -m $OPTARG`":/etc/grafana/ldap.toml"
GRAFANA_AUTH=true
GRAFANA_AUTH_ANONYMOUS=false
;;
D) DOCKER_PARAM="$DOCKER_PARAM $OPTARG"
;;
Q) ANONYMOUS_ROLE=$OPTARG
;;
a) GRAFANA_ADMIN_PASSWORD=$OPTARG
;;
x) HTTP_PROXY="$OPTARG"
;;
c) GRAFANA_ENV_ARRAY+=("$OPTARG")
;;
j) GRAFANA_DASHBOARD_ARRAY+=("$OPTARG")
;;
A) BIND_ADDRESS="$OPTARG:"
;;
S) SPECIFIC_SOLUTION="-S $OPTARG"
;;
E) RUN_RENDERER="-E"
;;
:) printf "missing argument for -%s\n" "$OPTARG" >&2
echo "$usage" >&2
exit 1
;;
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
echo "$usage" >&2
exit 1
;;
esac
done
if [ -z $GRAFANA_PORT ]; then
GRAFANA_PORT=3000
if [ -z $GRAFANA_NAME ]; then
GRAFANA_NAME=agraf
fi
fi
VERSION=`echo $VERSIONS|cut -d',' -f1`
if [ "$VERSION" = "" ]; then
echo "Scylla-version was not not found, add the -v command-line with a specific version (i.e. -v 2021.1)"
exit 1
fi
if [ "$VERSION" = "latest" ]; then
if [ -z "$BRANCH_VERSION" ] || [ "$BRANCH_VERSION" = "master" ]; then
echo "Default versions (-v latest) is not supported on the master branch, use specific version instead"
exit 1
fi
VERSION=${DEFAULT_VERSION[$BRANCH_VERSION]}
echo "The use of -v latest is deprecated. Use a specific version instead."
fi
if [ -z $GRAFANA_NAME ]; then
GRAFANA_NAME=agraf-$GRAFANA_PORT
fi
docker container inspect $GRAFANA_NAME > /dev/null 2>&1
if [ $? -eq 0 ]; then
printf "\nSome of the monitoring docker instances ($GRAFANA_NAME) exist. Make sure all containers are killed and removed. You can use kill-all.sh for that\n"
exit 1
fi
group_args=()
is_podman="$(docker --help | grep -o podman)"
if [ ! -z "$is_podman" ]; then
group_args+=(--userns=keep-id)
fi
if [ "`id -u`" -ne 0 ]; then
GROUPID=`id -g`
USER_PERMISSIONS="-u $UID:$GROUPID"
fi
proxy_args=()
if [[ -n "$HTTP_PROXY" ]]; then
proxy_args=(-e http_proxy="$HTTP_PROXY")
fi
for val in "${GRAFANA_ENV_ARRAY[@]}"; do
GRAFANA_ENV_COMMAND="$GRAFANA_ENV_COMMAND -e $val"
if [[ $val == GF_USERS_DEFAULT_THEME=* ]]; then
DEFAULT_THEME=""
fi
done
if [[ $DEFAULT_THEME != "" ]]; then
GRAFANA_ENV_COMMAND="$GRAFANA_ENV_COMMAND -e GF_USERS_DEFAULT_THEME=$DEFAULT_THEME"
fi
for val in "${GRAFANA_DASHBOARD_ARRAY[@]}"; do
GRAFANA_DASHBOARD_COMMAND="$GRAFANA_DASHBOARD_COMMAND -j $val"
done
./generate-dashboards.sh -t $SPECIFIC_SOLUTION -v $VERSIONS -M $MANAGER_VERSION $GRAFANA_DASHBOARD_COMMAND
./grafana-datasource.sh $DATA_SOURCES
if [[ ! $DOCKER_PARAM = *"--net=host"* ]]; then
PORT_MAPPING="-p $BIND_ADDRESS$GRAFANA_PORT:3000"
fi
if [[ "$HOME_DASHBOARD" = "" ]]; then
HOME_DASHBOARD="/var/lib/grafana/dashboards/ver_$VERSION/scylla-overview.$VERSION.json"
fi
if [ ! -z "$is_podman" ]; then
if [[ $(uname) == "Linux" ]]; then
DOCKER_HOST=$(hostname -I | awk '{print $1}')
elif [[ $(uname) == "Darwin" ]]; then
DOCKER_HOST=$(ifconfig bridge0 | awk '/inet / {print $2}')
fi
else
if [[ $(uname) == "Linux" ]]; then
DOCKER_HOST=$(ip -4 addr show docker0 | grep -Po 'inet \K[\d.]+')
elif [[ $(uname) == "Darwin" ]]; then
DOCKER_HOST=$(ifconfig bridge0 | awk '/inet / {print $2}')
fi
fi
docker run -d $DOCKER_PARAM ${DOCKER_LIMITS["grafana"]} -i $USER_PERMISSIONS $PORT_MAPPING \
-e "GF_AUTH_BASIC_ENABLED=$GRAFANA_AUTH" \
-e "GF_AUTH_ANONYMOUS_ENABLED=$GRAFANA_AUTH_ANONYMOUS" \
-e "GF_AUTH_ANONYMOUS_ORG_ROLE=$ANONYMOUS_ROLE" \
-e "GF_PANELS_DISABLE_SANITIZE_HTML=true" \
$LDAP_FILE \
"${group_args[@]}" \
-v $PWD/grafana/build:/var/lib/grafana/dashboards:z \
-v $PWD/grafana/plugins:/var/lib/grafana/plugins:z \
-v $PWD/grafana/provisioning:/var/lib/grafana/provisioning:z $EXTERNAL_VOLUME \
-e "GF_PATHS_PROVISIONING=/var/lib/grafana/provisioning" \
-e "GF_SECURITY_ADMIN_PASSWORD=$GRAFANA_ADMIN_PASSWORD" \
-e "GF_ANALYTICS_GOOGLE_ANALYTICS_UA_ID=$UA_ANALTYICS" \
-e "GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=scylladb-scylla-datasource" \
-e "GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=$HOME_DASHBOARD" \
$GRAFANA_ENV_COMMAND \
"${proxy_args[@]}" \
--name $GRAFANA_NAME docker.io/grafana/grafana:$GRAFANA_VERSION ${DOCKER_PARAMS["grafana"]} >& /dev/null
if [ $? -ne 0 ]; then
echo "Error: Grafana container failed to start"
echo "For more information use: docker logs $GRAFANA_NAME"
exit 1
fi
# Wait till Grafana API is available
printf "Wait for Grafana container to start."
RETRIES=7
TRIES=0
until $(curl --output /dev/null -f --silent http://localhost:$GRAFANA_PORT/api/org) || [ $TRIES -eq $RETRIES ]; do
printf '.'
((TRIES=TRIES+1))
sleep 5
done
echo
if [ ! "$(docker ps -q -f name=$GRAFANA_NAME)" ]
then
echo "Error: Grafana container failed to start"
echo "For more information use: docker logs $GRAFANA_NAME"
exit 1
fi
if [ -z "$BIND_ADDRESS" ]; then
BIND_ADDRESS="localhost:"
fi
printf "Start completed successfully, check http://$BIND_ADDRESS$GRAFANA_PORT\n"
| true
|
6e2d89754c6ad171fff2c0f53a3c43b0006d879e
|
Shell
|
ray2501/monetdb_fdw
|
/test_monetdb_fdw.sh
|
UTF-8
| 2,404
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
PSQL_OPTS=""
if [ -z "$PGHOME" ]; then
PGHOME=/tmp/pgsql
fi
USE_PGXS=1
PATH=${PGHOME}/bin:$PATH
export PGHOME USE_PGXS PATH
make clean USE_PGXS=1
make all USE_PGXS=1
make install USE_PGXS=1
DBNAME=__fdwtest__
echo dropdb ${PSQL_OPTS} ${DBNAME}
dropdb ${PSQL_OPTS} ${DBNAME}
createdb ${PSQL_OPTS} ${DBNAME}
#psql ${PSQL_OPTS} -f monetdb_fdw.sql ${DBNAME}
psql ${PSQL_OPTS} ${DBNAME} <<EOF
CREATE FUNCTION monetdb_fdw_handler()
RETURNS fdw_handler
AS 'monetdb_fdw'
LANGUAGE C STRICT;
CREATE FUNCTION monetdb_fdw_validator(text[], oid)
RETURNS void
AS 'monetdb_fdw'
LANGUAGE C STRICT;
CREATE FOREIGN DATA WRAPPER monetdb_fdw
HANDLER monetdb_fdw_handler
VALIDATOR monetdb_fdw_validator;
EOF
psql ${PSQL_OPTS} ${DBNAME} <<EOF
\timing
CREATE SERVER monetdb_server FOREIGN DATA WRAPPER monetdb_fdw;
CREATE USER MAPPING FOR current_user SERVER monetdb_server;
--
-- customer
--
CREATE FOREIGN TABLE nation (
"n_nationkey" INTEGER,
"n_name" CHAR(25),
"n_regionkey" INTEGER,
"n_comment" VARCHAR(152)
) SERVER monetdb_server
OPTIONS (host 'localhost', port '50000', user 'monetdb', passwd 'monetdb', dbname 'dbt3', table 'nation')
;
SELECT * FROM nation;
--
-- customer
--
CREATE FOREIGN TABLE customer (
count text
) SERVER monetdb_server
OPTIONS (host 'localhost', port '50000', user 'monetdb', passwd 'monetdb', dbname 'dbt3', query 'select count(*) from customer')
;
SELECT * FROM customer;
EXPLAIN SELECT * FROM customer;
EXPLAIN ANALYZE SELECT * FROM customer;
ANALYZE customer;
CREATE FOREIGN TABLE q12 (
l_shipmode text,
high_line_count bigint,
low_line_count bigint
) SERVER monetdb_server
OPTIONS (host 'localhost', port '50000', user 'monetdb', passwd 'monetdb', dbname 'dbt3', query '
select l_shipmode,
sum(case when o_orderpriority = ''1-URGENT'' or o_orderpriority = ''2-HIGH'' then 1 else 0 end) as high_line_count,
sum(case when o_orderpriority <> ''1-URGENT'' and o_orderpriority <> ''2-HIGH'' then 1 else 0 end) as low_line_count
from orders, lineitem
where o_orderkey = l_orderkey and l_shipmode in (''TRUCK'', ''REG AIR'')
and l_commitdate < l_receiptdate and l_shipdate < l_commitdate
and l_receiptdate >= date ''1994-01-01''
and l_receiptdate < date ''1995-01-01''
group by l_shipmode
order by l_shipmode;
'
);
SELECT * FROM q12;
EOF
| true
|
26b7c8d10419a9fa8daedfc8ca9d7ce6ba6b8b5b
|
Shell
|
BonJovi1/Bash-Scripting-2
|
/q4.sh
|
UTF-8
| 175
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
cron=$1
crontab $cron > /dev/null 2>&1
if [ $? -ne 0 ]
then
echo "Crontab is Not properly formatted :("
else
echo "It's properly formatted, should work! :)"
fi
| true
|
45a16de8d53d5cb7b0ad9f7afcb759ff6b9760f9
|
Shell
|
devaradhanm/accessibility-insights-service
|
/packages/resource-deployment/scripts/setup-cosmos-db.sh
|
UTF-8
| 4,267
| 3.84375
| 4
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# shellcheck disable=SC1090
set -eo pipefail
export cosmosAccountName
export resourceGroupName
createCosmosAccount() {
echo "[setup-cosmos-db] Creating Cosmos DB account..."
resources=$(az group deployment create --resource-group "$resourceGroupName" --template-file "${0%/*}/../templates/cosmos-db.template.json" --parameters "${0%/*}/../templates/cosmos-db.parameters.json" --query "properties.outputResources[].id" -o tsv)
export resourceName
. "${0%/*}/get-resource-name-from-resource-paths.sh" -p "Microsoft.DocumentDB/databaseAccounts" -r "$resources"
cosmosAccountName="$resourceName"
echo "[setup-cosmos-db] Successfully created Cosmos DB account '$cosmosAccountName'"
}
createCosmosCollection() {
local collectionName=$1
local dbName=$2
local ttl=$3
local throughput=$4
if [[ -z $ttl ]]; then
ttl=-1
fi
echo "[setup-cosmos-db] Checking if collection '$collectionName' exists in db '$dbName' of cosmosAccount '$cosmosAccountName' in resource group '$resourceGroupName'"
collectionExists=$(az cosmosdb collection exists --collection-name "$collectionName" --db-name "$dbName" --name "$cosmosAccountName" --resource-group-name "$resourceGroupName")
if [ "$collectionExists" = true ]; then
echo "[setup-cosmos-db] Collection '$collectionName' already exists"
else
echo "[setup-cosmos-db] Creating DB collection '$collectionName'"
az cosmosdb collection create --collection-name "$collectionName" --db-name "$dbName" --name "$cosmosAccountName" --resource-group-name "$resourceGroupName" --partition-key-path "/partitionKey" --throughput "$throughput" --default-ttl "$ttl" 1>/dev/null
echo "Successfully created DB collection '$collectionName'"
fi
}
createCosmosDatabase() {
local dbName=$1
echo "[setup-cosmos-db] Checking if database '$dbName' exists in Cosmos account '$cosmosAccountName' in resource group '$resourceGroupName'"
databaseExists=$(az cosmosdb database exists --db-name "$dbName" --name "$cosmosAccountName" --resource-group-name "$resourceGroupName")
if [ "$databaseExists" = true ]; then
echo "[setup-cosmos-db] Database '$dbName' already exists"
else
echo "[setup-cosmos-db] Creating Cosmos DB '$dbName'"
az cosmosdb database create --db-name "$dbName" --name "$cosmosAccountName" --resource-group-name "$resourceGroupName" 1>/dev/null
echo "[setup-cosmos-db] Successfully created Cosmos DB '$dbName'"
fi
}
exitWithUsageInfo() {
echo "
Usage: $0 \
-r <resource group> \
-e <environment>
"
exit 1
}
# Read script arguments
while getopts ":r:e:" option; do
case $option in
r) resourceGroupName=${OPTARG} ;;
e) environment=${OPTARG} ;;
*) exitWithUsageInfo ;;
esac
done
# Print script usage help
if [ -z $resourceGroupName ] || [ -z $environment ]; then
exitWithUsageInfo
fi
createCosmosAccount
scannerDbName="scanner"
onDemandScannerDbName="onDemandScanner"
createCosmosDatabase "$scannerDbName"
createCosmosDatabase "$onDemandScannerDbName"
# Increase throughput for below collection only in case of prod
# Refer to https://docs.microsoft.com/en-us/azure/cosmos-db/time-to-live for item TTL scenarios
if [ $environment = "prod" ]; then
createCosmosCollection "a11yIssues" "$scannerDbName" "-1" "25000"
createCosmosCollection "scanRuns" "$onDemandScannerDbName" "2592000" "20000" # 30 days
createCosmosCollection "scanBatchRequests" "$onDemandScannerDbName" "604800" "2000" # 7 days
createCosmosCollection "scanRequests" "$onDemandScannerDbName" "604800" "20000" # 7 days
createCosmosCollection "systemData" "$onDemandScannerDbName" "-1" "2000"
else
createCosmosCollection "a11yIssues" "$scannerDbName" "-1" "2000"
createCosmosCollection "scanRuns" "$onDemandScannerDbName" "2592000" "2000" # 30 days
createCosmosCollection "scanBatchRequests" "$onDemandScannerDbName" "604800" "2000" # 7 days
createCosmosCollection "scanRequests" "$onDemandScannerDbName" "604800" "2000" # 7 days
createCosmosCollection "systemData" "$onDemandScannerDbName" "-1" "2000"
fi
| true
|
803434b9a0d0fe47f69904c0b730b231e22ebc40
|
Shell
|
webdetails/cbf2
|
/startClients.sh
|
UTF-8
| 3,322
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# Lists the clients and starts / delets
BASEDIR=$(dirname $0)
source "$BASEDIR/utils.sh"
cd $BASEDIR
#VERSIONS=()
VERSIONS=(6.0-NIGHTLY 6.1.0.0)
BOX_URL=${BOX_URL:-ftp.box.com/CI}
DIR=clients
CLIENTS=()
BRANCHES=()
BUILDNOS=()
# Get list of files
DIRS=$(find $DIR -type d -maxdepth 3 -d 3)
echo
echo Clients found:
echo --------------
echo
n=0;
for i in ${DIRS[@]}; do
read -a foo <<< $(echo $i | sed 's/\// /g')
CLIENTS+=(${foo[1]});
BRANCHES+=(${foo[2]});
BUILDNOS+=(${foo[3]});
echo \ [$n] ${CLIENTS[$n]}: ${BRANCHES[$n]}-${BUILDNOS[$n]}
n=$((n+1))
done
echo
# What ?
read -e -p "Select a client: " clientNo
if [ -z $clientNo ] || [ -z ${CLIENTS[$clientNo]} ]
then
echo Invalid option: $clientNo
exit 1
fi
echo
echo You selected ${CLIENTS[$clientNo]}: ${BRANCHES[$clientNo]}-${BUILDNOS[$clientNo]}
read -e -p "What do you want to do? (L)aunch it, (D)elete it, or (S)end to docker? [L]: " operation
operation=${operation:-L}
if ! [ $operation == "L" ] && ! [ $operation == "D" ] && ! [ $operation == "S" ]
then
echo Invalid selection
exit 1;
fi
# Are we deleting it?
if [ $operation == "D" ]
then
rm -rf $DIR/${CLIENTS[$clientNo]}/${BRANCHES[$clientNo]}/${BUILDNOS[$clientNo]}
echo Removed successfully
exit 0
fi
# Are we launching it?
if [ $operation == "L" ]
then
cd $DIR/${CLIENTS[$clientNo]}/${BRANCHES[$clientNo]}/${BUILDNOS[$clientNo]}
# This is the part where we need to be specific...
# PRODUCTS=(pdi-ee-client pdi-ce prd-ee prd-ce pme-ee pme-ce psw-ee psw-ce pad-ee pad-ce)
if [ ${CLIENTS[$clientNo]} == "pdi-ee-client" ] || [ ${CLIENTS[$clientNo]} == "pdi-ce" ]
then
cd data-integration
sed -i '' -e 's/^# OPT=/OPT=/' ./spoon.sh
./spoon.sh
elif [ ${CLIENTS[$clientNo]} == "prd-ee" ] || [ ${CLIENTS[$clientNo]} == "prd-ce" ]
then
cd report-designer
./report-designer.sh
elif [ ${CLIENTS[$clientNo]} == "pme-ee" ] || [ ${CLIENTS[$clientNo]} == "pme-ce" ]
then
cd metadata-editor
./metadata-editor.sh
elif [ ${CLIENTS[$clientNo]} == "psw-ee" ] || [ ${CLIENTS[$clientNo]} == "psw-ce" ]
then
cd schema-workbench
./workbench.sh
elif [ ${CLIENTS[$clientNo]} == "pad-ee" ] || [ ${CLIENTS[$clientNo]} == "pad-ce" ]
then
cd pentaho-aggdesigner-ui
./startaggregationdesigner.sh
else
echo The author was probably lazy enough to not implement what to do with ${CLIENTS[$clientNo]}...
cd $BASEDIR
exit 1
fi
fi
# Docker magic! The reason for this is that in mac, docker host is not on the
# same network of the clients; This means we can't test the big data aspects
# of namely PDI
#
# The spark-client image from github.com:pmalves/hadoop-cluster-docker works
# specially well with this
if [ $operation == "S" ]
then
read -a OPTIONS <<< $( docker ps --format "{{.Names}}" )
promptUser "Select a running container " "0"
container=${OPTIONS[$CHOICE]}
echo Copying $DIR/${CLIENTS[$clientNo]}-${BRANCHES[$clientNo]}-${BUILDNOS[$clientNo]} to $container:/root/
cd $DIR/${CLIENTS[$clientNo]}/${BRANCHES[$clientNo]}/${BUILDNOS[$clientNo]}
docker exec $container rm -rf /root/${CLIENTS[$clientNo]}-${BRANCHES[$clientNo]}-${BUILDNOS[$clientNo]}
docker cp . $container:/root/${CLIENTS[$clientNo]}-${BRANCHES[$clientNo]}-${BUILDNOS[$clientNo]}
docker cp ~/.pentaho $container:/root/
fi
echo Done
cd $BASEDIR
exit 0
| true
|
af2a381c9ccdf36195be68f9f577321a8b072a93
|
Shell
|
sa3036/powerstation
|
/powerstation/IDE_plugin/src/main/resources/static-analyzer/single_action.sh
|
UTF-8
| 1,295
| 3.140625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
app=$1
c_a=$2
input=$3
output='./applications/'
pre_pro='./preprocess_views'
controller='./controller_model_analysis'
call=$input/'calls.txt'
schema=$input/db/'schema.rb'
rm -rf $output/$app
echo "remove existing"
mkdir $output/$app
echo "move app dir to applications "
cp -r $input/app/* $output/$app/
cp $call $output/$app
cp $schema $output/$app
echo "finished moving"
echo "preprocessing"
echo $app
cd $pre_pro; pwd; chmod +x check_system.sh;./check_system.sh; ruby main.rb -a $app; cd ../
echo "finish preprocessing"
echo "jruby get dataflow"
cd $output; pwd; ruby generate_dataflow_log.rb $app $c_a; cd ../
echo "FINISH dataflow"
echo "run analysis"
cd $controller; pwd; ruby main.rb -s $c_a -d ../$output/$app/
echo "FINISH analysis"
cd ../
file=("loop_invariant.xml" "dead_store.xml" "inefficient_render.xml" "common_subexpression.xml" "redundant_usage.xml")
start=("loopInvariants" "dead_store_queries" "inefficientRenders" "commonSubexprs" "redundantData")
result="$output/$app/results"
ca=`echo $c_a | sed -e "s/,/_/g"`
echo $result
index=0
for i in "${file[@]}"
do
:
fn="$result/$ca/$i"
re="$result/$i"
echo $i
s=${start[$index]}
index=$index+1
#echo $s
#echo $fn
echo "<$s>" > $re
cat $fn >> $re
echo "</$s>" >> $re
# do whatever on $i
done
| true
|
ed64c38a17bfc4307c464c36c03cf0f750bd230e
|
Shell
|
inetss/docker-django-app
|
/setup/python_version.sh
|
UTF-8
| 403
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
die() { >&2 echo -e "$@\nAdd python executable to requirements.txt like this:\n# python: python3.5"; exit 1; }
python=$(cat requirements.txt | grep '^# python: ' | sed -re 's/.*://' | xargs)
[ -n "$python" ] || die "Could not discover python version"
python_exe=$(which "$python"; true)
[ -x "$python_exe" ] || die "Could not find python executable '${python}'"
export python=$python_exe
| true
|
5aa0ea2e12ccb3440fb6c066c7c74bafd75c9018
|
Shell
|
mrnechay/mikescripts
|
/turbo2
|
UTF-8
| 1,956
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# A script to run TURBOMOLE
# by Sean Nedd
# improved by Nathan Gallup
echo "TURBOMOLE program"
echo "This TURBOMOLE script is an interactive guide for the setup and running of TURBOMOLE calculations on a cluster or a single node."
echo "Running on a cluster?[Y/n]"
read cluster
if [ "$cluster" = 'Y' -o "$cluster" = 'y' -o "$cluster" = '' ]; then
nodes=8
tyme=24
iteration=0
echo "How many nodes?[Default 8]"
read nodes
if [ "$nodes" = '' ]; then nodes=8; fi
echo "How much time (in hours)?[Default 24]"
read tyme
if [ "$tyme" = '' ]; then tyme=24; fi
if [ -f "script.sh" ]; then
echo "Using \"script.sh\" in the present directory."
else
echo "Is this an optimization?[y/N]"
read check
if [ "$check" = 'n' -o "$check" = 'N' -o "$check" = '' ]; then
echo "ridft>>ridft.out">> script.sh
chmod +x script.sh
else
if [ "$check" = 'y' -o "$check" = 'Y' ]; then
echo "Adding \"jobex -c 500 -ri -energy 7 -gcart 4 -gexp 2\" to script.sh"
echo "jobex -c 500 -ri -energy 7 -gcart 4 -gexp 2">> script.sh
echo "Change script.sh information as necessary, then re-run \"turbo\""
chmod +x script.sh
fi
fi
fi
adddisp3.sh
# Personal additions
echo "Enter job name as you'd want it to appear in the Hoffman queue [Default script.sh]"
read name
if [ "$name" = '' ]; then name="script.sh"; fi
#
echo "Submit job?[Y/n]"
read submit
if [ "$submit" = 'Y' -o "$submit" = 'y' -o "$submit" = '' ]; then
echo "Running: runturbomole2.sh -sub -t $tyme -N $nodes script.sh"
runturbomole2.sh -sub -t $tyme -N $nodes -a $name script.sh
else
if [ "$submit" = 'n' ]; then exit; fi
fi
else
if [ "$cluster" = 'n' ]; then
echo "Submit job?[Y/n]"
read submit
if [ "$submit" = 'Y' -o "$submit" = 'y' -o "$submit" = '' ]; then
echo "Running: runturbomole2.sh -sub -t $tyme -N 1 -a $name script.sh"
runturbomole2.sh -sub -t $tyme -N 1 -a $name script.sh
else
if [ "$submit" = 'n' ]; then exit; fi
fi
fi
fi
exit
| true
|
81ac0d4249d6ec3ac0b326a799098b3a000d377a
|
Shell
|
kayamash/CalcEffTool
|
/compile.sh
|
UTF-8
| 444
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
if [ "$TestArea" = "" ] ; then
echo "TestArea is missing. Please source setup_grid.sh"
exit 1
fi
if [ "$1" = "clean" ] ; then
rm -r build
fi
if [ -d $TestArea/build ] ; then
echo "Compiled in $TestArea/build"
else
echo "Creating $TestArea/build ..."
mkdir -p $TestArea/build
fi
if [ "$1" = "noCmake" ] ; then
cd $TestArea/build && make -j8 && cd -
else
cd $TestArea/build && cmake $TestArea && make -j8 && cd -
fi
| true
|
1ae4e55fbfdca8c501e5a66cb46cacc55742b569
|
Shell
|
vishwaraja/spring-nom-demo
|
/src/main/resources/docker/16-2/setup_sso.sh
|
UTF-8
| 456
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
PREFIX_SSO='##SSO##'
PREFIX_LDAP='##LDAP##'
echo Preparing... docker-compose.yml
sed -i "" "s/$PREFIX_LDAP//g" ./docker-compose.yml
sed -i "" "s/$PREFIX_SSO//g" ./docker-compose.yml
echo Preparing... Dockerfile
sed -i "" "s/$PREFIX_SSO//g" ./eportal/Dockerfile
echo Preparing... set_ip.sh
sed -i "" "s/$PREFIX_SSO//g" ./eportal/set_ip.sh
echo Preparing... supervisord.conf
sed -i "" "s/$PREFIX_SSO//g" ./eportal/supervisord.conf
echo Done...
| true
|
46f0c43a0bbfcda2200af1990684bf19dfa65422
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/teamspeak3-server/PKGBUILD
|
UTF-8
| 2,971
| 2.65625
| 3
|
[] |
no_license
|
# Maintainer: Malte Rabenseifner <mail@malte-rabenseifner.de>
# Contributor: EnteEnteEnte <ducksource@duckpond.ch>
pkgname='teamspeak3-server'
pkgver='3.0.13.8'
pkgrel=1
pkgdesc='A proprietary VoIP conference software'
license=('custom')
arch=('i686' 'x86_64')
url="http://www.teamspeak.com"
depends=('glibc')
optdepends=('mariadb-connector-c: for MariaDB backend')
backup=(etc/teamspeak3-server.ini
etc/tsdns_settings.ini)
install='teamspeak3-server.install'
source=('teamspeak3-server.ini'
'teamspeak3-server.service')
source_i686=("http://teamspeak.gameserver.gamed.de/ts3/releases/$pkgver/teamspeak3-server_linux_x86-$pkgver.tar.bz2")
source_x86_64=("http://teamspeak.gameserver.gamed.de/ts3/releases/$pkgver/teamspeak3-server_linux_amd64-$pkgver.tar.bz2")
sha256sums=('c678f5d657772920260c4ea4718677e6b00ef28ad74c317e05632a01d33b3ca5'
'de414c8dffc06b941181007383250bd90500ef8a81bb502a2396118861c7c938')
sha256sums_i686=('c1be39df7ee136c87e0ea1eff4e3aa84074e2d141c66d23f9a66eb5afa2b1876')
sha256sums_x86_64=('460c771bf58c9a49b4be2c677652f21896b98a021d7fff286e59679b3f987a59')
if [ "$CARCH" == "x86_64" ]; then
_TSARCH='amd64'
elif [ "$CARCH" == "i686" ]; then
_TSARCH='x86'
fi
package() {
cd "$srcdir"
install -Dm 644 teamspeak3-server.ini "$pkgdir/etc/teamspeak3-server.ini"
install -Dm 644 "teamspeak3-server_linux_$_TSARCH/tsdns/tsdns_settings.ini.sample" "$pkgdir/etc/tsdns_settings.ini"
install -Dm 644 teamspeak3-server.service "$pkgdir/usr/lib/systemd/system/teamspeak3-server.service"
install -Dm 755 "teamspeak3-server_linux_$_TSARCH/ts3server" "$pkgdir/usr/bin/ts3server"
install -Dm 755 "teamspeak3-server_linux_$_TSARCH/tsdns/tsdnsserver" "$pkgdir/usr/bin/tsdnsserver"
install -Dm 644 "teamspeak3-server_linux_$_TSARCH/libts3db_mariadb.so" "$pkgdir/usr/lib/libts3db_mariadb.so"
install -Dm 644 "teamspeak3-server_linux_$_TSARCH/libts3db_sqlite3.so" "$pkgdir/usr/lib/libts3db_sqlite3.so"
install -Dm 644 "teamspeak3-server_linux_$_TSARCH/LICENSE" "$pkgdir/usr/share/licenses/teamspeak3-server/LICENSE"
mkdir -p "$pkgdir/usr/share/doc/teamspeak3-server" \
"$pkgdir/usr/share/teamspeak3-server" \
"$pkgdir/var/lib/teamspeak3-server" \
"$pkgdir/var/log/teamspeak3-server"
cp -a "teamspeak3-server_linux_$_TSARCH/doc/" "$pkgdir/usr/share/doc/teamspeak3-server/"
cp -a "teamspeak3-server_linux_$_TSARCH/serverquerydocs/" "$pkgdir/usr/share/doc/teamspeak3-server/"
cp -a "teamspeak3-server_linux_$_TSARCH/sql/" "$pkgdir/usr/share/teamspeak3-server/"
find "$pkgdir/usr/share/teamspeak3-server" -type d -exec chmod 755 {} \;
find "$pkgdir/usr/share/teamspeak3-server" -type f -exec chmod 644 {} \;
find "$pkgdir/usr/share/doc/teamspeak3-server" -type d -exec chmod 755 {} \;
find "$pkgdir/usr/share/doc/teamspeak3-server" -type f -exec chmod 644 {} \;
chmod 750 "$pkgdir/var/lib/teamspeak3-server" \
"$pkgdir/var/log/teamspeak3-server"
}
| true
|
bd9620da52e3f1bfdf5b8f8f9834b3f253f97a06
|
Shell
|
DigitalPostma/power
|
/power-monitor
|
UTF-8
| 288
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# /etc/init.d/power-monitor
case "$1" in
start)
echo "Starting power monitor..."
python /root/power/monitor.py &
;;
stop)
echo "Stopping power monitor..."
killall monitor.py
;;
*)
echo "Usage: /etc/init.d/power-monitor (start|stop)"
exit 1
;;
esac
exit 0
| true
|
7ca4653dbaf1a5691ceaa421985ca1bbb9ab09af
|
Shell
|
ablin42/push_swap
|
/script.sh
|
UTF-8
| 615
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
#DEBUG MODE: bash -x script.sh
#USAGE: ./script.sh "lowest nb" "highest nb" "maximum nb of moves"
arg=`ruby -e "puts ($1..$2).to_a.shuffle.join(' ')"`
nbmove=`./push_swap $arg | wc -l | tr -d " "`
nbtest='0'
moyenne='0'
limit=$(($2 * 100))
while [[ $nbmove -le $3 && $nbtest -le $limit ]]
do
arg=`ruby -e "puts ($1..$2).to_a.shuffle.join(' ')"`
nbmove=`./push_swap $arg | wc -l | tr -d " "`
nbtest=$((nbtest+1))
moyenne=$((moyenne + nbmove))
echo $nbtest
done
moyenne=$((moyenne / nbtest))
echo "ARGS=[$arg]"
echo "NB OF MOVES=[$nbmove]"
echo "NB OF TESTS=[$nbtest]"
echo "MOYENNE=[$moyenne]"
| true
|
ac83e3c843e60d895bde5e2d57da8f7844e6f01b
|
Shell
|
seattle-biomed/zfstools
|
/zfs-send.sh
|
UTF-8
| 2,126
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/local/bin/bash
function banner {
echo "Usage:"
echo " $0 -h"
echo " $0 -s SECONDARY -p POOL1 -d POOL2 -n SNAP [ -x EXCLUDE ]"
echo " SECONDARY - secondary hostname"
echo " POOL1 - source zpool"
echo " POOL2 - destination zpool"
echo " SNAP - snapshot base name"
echo " EXCLUDE - space-delimited list of file systems not to replicate"
exit 1
}
while getopts s:p:d:n:x: option; do
case "${option}" in
s) SECONDARY=${OPTARG} ;;
p) POOL1=${OPTARG} ;;
d) POOL2=${OPTARG} ;;
n) SNAP=${OPTARG} ;;
x) EXCLUDE=${OPTARG} ;;
*) banner ;;
esac
done
if [ -z "$SECONDARY" ] ; then
echo "Must enter a destination host"
echo
banner
fi
if [ -z "$POOL1" ] ; then
echo "Must enter a source zpool"
echo
banner
fi
if [ -z "$POOL2" ] ; then
echo "Must enter a destination zpool"
echo
banner
fi
if [ -z "$SNAP" ] ; then
echo "Must enter a snapshot name"
echo
banner
fi
# FreeBSD binary locations, adjust for Solaris/Nexenta/whatever:
AWK='/usr/bin/awk'
DATE='/bin/date'
GREP='/usr/bin/grep'
MBUFFER='/usr/local/bin/mbuffer'
SED='/usr/local/bin/gsed' # Must be GNU sed!
SSH='/usr/bin/ssh'
ZFS='/sbin/zfs'
# Gather volumes on source system. We're interested in the *volumes*
# but not the *pool* - therefore look for a forward slash:
volumes=`$ZFS list -H -r $POOL1 | $AWK '{ print $1 }' | $GREP '/'`
if [ -n "$EXCLUDE" ] ; then
for x in $EXCLUDE; do
volumes=`echo $volumes | $SED "s%\b$x[^\w/]%%g"`
volumes=`echo $volumes | $SED "s%\b$x$%%g"`
done
fi
# Send incremental snapshot per-volume:
for v in $volumes; do
# Check return code of zfs list to see whether we need to do an initial
# sync, or an incremental:
$SSH $SECONDARY $ZFS list $v > /dev/null 2>&1
if [ "$?" -ne "0" ]; then
# initial transfer:
$ZFS send ${v}@${SNAP}.0 | $SSH -c arcfour128 $SECONDARY "${MBUFFER} -q -s 128k -m 1G | $ZFS recv -d $POOL2"
else
# incremental transfer:
$ZFS send -i @${SNAP}.1 ${v}@${SNAP}.0 | $SSH -c arcfour128 $SECONDARY "${MBUFFER} -q -s 128k -m 1G | $ZFS recv -F -d $POOL2"
fi
done
| true
|
bb9513fec9ab86426e98ddd6833757aacd4ff687
|
Shell
|
mamemomonga/apnic-cidr-range
|
/bin/restrict-ssh-jponly.sh
|
UTF-8
| 645
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )"
IPTABLES="iptables"
$IPTABLES -t filter -P INPUT DROP
$IPTABLES -t filter -P OUTPUT ACCEPT
$IPTABLES -t filter -P FORWARD DROP
$IPTABLES -t nat -P PREROUTING ACCEPT
$IPTABLES -t nat -P POSTROUTING ACCEPT
$IPTABLES -t nat -P OUTPUT ACCEPT
$IPTABLES -A INPUT -i lo -j ACCEPT
$IPTABLES -A FORWARD -i lo -j ACCEPT
$IPTABLES -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
$IPTABLES -X COUNTRY > /dev/null 2>&1
$IPTABLES -N COUNTRY
$BASEDIR/bin/apply-iptables.pl JP eth0 COUNTRY
$IPTABLES -A COUNTRY -p tcp --dport 22 --syn -j ACCEPT
| true
|
46f25e7fddf18351bc1d9f6ec7aa9f88191dfed8
|
Shell
|
lixiang2017/leetcode
|
/count_all.sh
|
UTF-8
| 871
| 3.015625
| 3
|
[] |
no_license
|
#! /bin/bash
# count leetcode.com and leetcode-cn.com, stored files at localhost.
# include accpeted and attempted, the attempted will be a very small number.
# not include `explore` topic in leetcode.com, so the result will less than real sum of accepted number in both websites.
# not include `shell`/`lcp`/`sword2offer`/`CrackingTheCodingInterview` in leetcode-cn.com
echo 'leetcode.com and leetcode-cn.com problems count: '
ls problems leetcode-cn | grep -E '^[0-9]' | awk '{print substr($1, 1, 4)}' | uniq |wc -l
echo 'leetcode.com problems count: '
ls problems | grep -E '^[0-9]' | awk '{print substr($1, 1, 4)}' | uniq |wc -l
echo 'leetcode-cn.com problems count: '
ls leetcode-cn | grep -E '^[0-9]' | awk '{print substr($1, 1, 4)}' | uniq |wc -l
echo 'database count: '
ls leetcode-cn/sql | grep -E '^[0-9]' | awk '{print substr($1, 1, 4)}' | uniq |wc -l
| true
|
3cd8f0f972e50145679068c6726cabafb76b4f79
|
Shell
|
kapliy/hepcode
|
/ana/trunk/CommonAnalysis/RootCore/scripts/svn_retry.sh
|
UTF-8
| 248
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
if test "$TMPDIR" == ""
then
TMPDIR=/tmp/
fi
FILE=$TMPDIR/svn-out.$$
if svn "$@" >$FILE || svn "$@" >$FILE || svn "$@" >$FILE
then
cat $FILE
rm -f $FILE
else
result=$?
cat $FILE
rm -f $FILE
exit $result
fi
| true
|
8ddf397f4aca2f7359082c3f368d9ccf071f05b0
|
Shell
|
Hexaware-DevOps-Batch2/demo1
|
/hhernan85/script.sh
|
UTF-8
| 4,145
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
DIRECTORY="~/my-projects/demo2"
PROPERTIESFILE="properties.txt"
SERVER="ubuntu@ec2-18-219-236-181.us-east-2.compute.amazonaws.com"
KEY="trainingubuntukeypair.pem"
APPFILE="application.yml"
CREATED=true
TRANSFERED=true
UPDATED=false
WRITTEN=true
function createDirectory(){
ssh -i $KEY $SERVER "mkdir -p $DIRECTORY"
if [ "$?" -eq "0" ];
then
echo "Directory created"
else
CREATED=false
fi
}
function transferFile(){
if ssh -i $KEY $SERVER '[ -e $DIRECTORY/$APPFILE ]'
then
UPDATED=true
fi
scp -i $KEY application.yml $SERVER:$DIRECTORY
if [ "$?" -eq "0" ];
then
echo "File transfered"
else
TRANSFERED=false
fi
}
function changeFilePerm(){
ssh -i $KEY $SERVER "chmod -R 744 $DIRECTORY/$APPFILE"
}
function writeEnvProperties(){
local NEW=$(ssh -i $KEY $SERVER "env |grep '${1}'|cut -d'=' -f2")
#local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${2}'|cut -d':' -f1")
local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${2}'")
ssh -i $KEY $SERVER "sed -i 's?$OLD?'${2}':$NEW?g' $DIRECTORY/$APPFILE"
if [ "$?" -eq "0" ];
then
echo "Env property replaced"
fi
#sed -i 's/$OLD/"${2}":$NEW/g' $DIRECTORY/$APPFILE
}
function writeDateProperties(){
local NEW=$(ssh -i $KEY $SERVER "date +"%y-%m-%d"")
#local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${2}'|cut -d':' -f1")
local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${1}'")
ssh -i $KEY $SERVER "sed -i 's/$OLD/'${1}':$NEW/g' $DIRECTORY/$APPFILE"
#sed -i 's/$OLD/"${2}":$NEW/g' $DIRECTORY/$APPFILE
}
function writeTimeProperties(){
local NEW=$(ssh -i $KEY $SERVER "date +"%T"")
#local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${2}'|cut -d':' -f1")
local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${1}'")
ssh -i $KEY $SERVER "sed -i 's/$OLD/'${1}':$NEW/g' $DIRECTORY/$APPFILE"
#sed -i 's/$OLD/"${2}":$NEW/g' $DIRECTORY/$APPFILE
}
function writePathProperties(){
local NEW=$(ssh -i $KEY $SERVER "pwd")
#sed 's?#REPLACE-WITH-PATH?'`pwd`'?'
#local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${2}'|cut -d':' -f1")
local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${1}'")
ssh -i $KEY $SERVER "sed -i 's?$OLD?'${1}':${NEW}?g' $DIRECTORY/$APPFILE"
#sed -i 's/$OLD/"${2}":$NEW/g' $DIRECTORY/$APPFILE
}
function writeiPProperties(){
local NEW=$(ssh -i $KEY $SERVER "hostname -i")
#local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${2}'|cut -d':' -f1")
local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${1}'")
ssh -i $KEY $SERVER "sed -i 's/$OLD/'${1}':$NEW/g' $DIRECTORY/$APPFILE"
#sed -i 's/$OLD/"${2}":$NEW/g' $DIRECTORY/$APPFILE
}
function writeHostProperties(){
local NEW=$(ssh -i $KEY $SERVER "hostname")
#local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${2}'|cut -d':' -f1")
local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${1}'")
ssh -i $KEY $SERVER "sed -i 's/$OLD/'${1}':$NEW/g' $DIRECTORY/$APPFILE"
#sed -i 's/$OLD/"${2}":$NEW/g' $DIRECTORY/$APPFILE
}
#netstat -vatn | grep ESTABLISHED
function writePortProperties(){
local NEW=$(ssh -i $KEY $SERVER "netstat -vatn | grep ESTABLISHED | cut -d':' -f2 | cut -d ' ' -f 1")
echo $NEW
local OLD=$(ssh -i $KEY $SERVER "cat $DIRECTORY/$APPFILE |grep '${1}'")
ssh -i $KEY $SERVER "sed -i 's/$OLD/'${1}':$NEW/g' $DIRECTORY/$APPFILE"
#sed -i 's/$OLD/"${2}":$NEW/g' $DIRECTORY/$APPFILE
}
createDirectory
if ssh -i $KEY $SERVER '[ -d $DIRECTORY ]'
then
transferFile
if ssh -i $KEY $SERVER '[ -e $DIRECTORY/$APPFILE ]'
then
#server
writeiPProperties address
#writePortProperties port
writeHostProperties hostname
writeDateProperties date
writeTimeProperties time
writePathProperties contextPath
#environment
writeEnvProperties USER username
writeEnvProperties HOME homeDirectory
writeEnvProperties LANG lang
changeFilePerm
fi
fi
#if [[$CREATED -eq "true"]] && [[ $TRANSFERED -eq "true" ]]
#print results
#ip-address:{Success|Fail}:Success?{new|update}:FailReason|date time
| true
|
f19c55db34633b48fcc9e7f2fbf6f57b187b0a85
|
Shell
|
Yixf-Self/gatk
|
/scripts/cnv_cromwell_tests/germline/run_cnv_germline_workflows.sh
|
UTF-8
| 3,500
| 3.15625
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -l
set -e
#cd in the directory of the script in order to use relative paths
script_path=$( cd "$(dirname "${BASH_SOURCE}")" ; pwd -P )
cd "$script_path"
ln -fs /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/cnv_common_tasks.wdl
ln -fs /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_panel_workflow.wdl
ln -fs /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_single_sample_workflow.wdl
ln -fs /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_cohort_workflow.wdl
WORKING_DIR=/home/travis/build/broadinstitute
pushd .
echo "Building docker without running unit tests... ========="
cd $WORKING_DIR/gatk
# IMPORTANT: This code is duplicated in the M2 WDL test.
if [ ${TRAVIS_PULL_REQUEST} != false ]; then
HASH_TO_USE=FETCH_HEAD
sudo bash build_docker.sh -e ${HASH_TO_USE} -s -u -d $PWD/temp_staging/ -t ${TRAVIS_PULL_REQUEST};
else
HASH_TO_USE=${TRAVIS_COMMIT}
sudo bash build_docker.sh -e ${HASH_TO_USE} -s -u -d $PWD/temp_staging/;
fi
echo "Docker build done =========="
popd
echo "Inserting docker image into json ========"
CNV_CROMWELL_TEST_DIR="${WORKING_DIR}/gatk/scripts/cnv_cromwell_tests/germline/"
sed -r "s/__GATK_DOCKER__/broadinstitute\/gatk\:$HASH_TO_USE/g" ${CNV_CROMWELL_TEST_DIR}/cnv_germline_cohort_workflow_wes.json >cnv_germline_cohort_workflow_wes_mod.json
sed -r "s/__GATK_DOCKER__/broadinstitute\/gatk\:$HASH_TO_USE/g" ${CNV_CROMWELL_TEST_DIR}/cnv_germline_cohort_workflow_wgs.json >cnv_germline_cohort_workflow_wgs_mod.json
sed -r "s/__GATK_DOCKER__/broadinstitute\/gatk\:$HASH_TO_USE/g" ${CNV_CROMWELL_TEST_DIR}/cnv_germline_panel_workflow_wes.json >cnv_germline_panel_workflow_wes_mod.json
sed -r "s/__GATK_DOCKER__/broadinstitute\/gatk\:$HASH_TO_USE/g" ${CNV_CROMWELL_TEST_DIR}/cnv_germline_panel_workflow_wgs.json >cnv_germline_panel_workflow_wgs_mod.json
sed -r "s/__GATK_DOCKER__/broadinstitute\/gatk\:$HASH_TO_USE/g" ${CNV_CROMWELL_TEST_DIR}/cnv_germline_single_sample_workflow_wes.json >cnv_germline_single_sample_workflow_wes_mod.json
sed -r "s/__GATK_DOCKER__/broadinstitute\/gatk\:$HASH_TO_USE/g" ${CNV_CROMWELL_TEST_DIR}/cnv_germline_single_sample_workflow_wgs.json >cnv_germline_single_sample_workflow_wgs_mod.json
echo "Running ========"
CROMWELL_JAR="cromwell-0.28.jar"
# Panel WES
java -jar ~/${CROMWELL_JAR} run /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_panel_workflow.wdl cnv_germline_panel_workflow_wes_mod.json
# Panel WGS
java -jar ~/${CROMWELL_JAR} run /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_panel_workflow.wdl cnv_germline_panel_workflow_wgs_mod.json
# Single sample WES calling
java -jar ~/${CROMWELL_JAR} run /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_single_sample_workflow.wdl cnv_germline_single_sample_workflow_wes_mod.json
# Single sample WGS calling
java -jar ~/${CROMWELL_JAR} run /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_single_sample_workflow.wdl cnv_germline_single_sample_workflow_wgs_mod.json
# Cohort WES calling
java -jar ~/${CROMWELL_JAR} run /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_cohort_workflow.wdl cnv_germline_cohort_workflow_wes_mod.json
# Cohort WGS calling
java -jar ~/${CROMWELL_JAR} run /home/travis/build/broadinstitute/gatk/scripts/cnv_wdl/germline/cnv_germline_cohort_workflow.wdl cnv_germline_cohort_workflow_wgs_mod.json
| true
|
670c0312dd09fb9edd30bc3079e62f2488c98dbf
|
Shell
|
ProgrammingLab/koneko-online-judge
|
/server/judge.sh
|
UTF-8
| 875
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
cd /tmp >/dev/null 2>/dev/null
mv ./koj-workspace/$4 . >/dev/null 2>/dev/null
chmod 755 $4 >/dev/null 2>/dev/null
rm -rf ./koj-workspace >/dev/null 2>/dev/null
chmod 700 $0 >/dev/null 2>/dev/null
chmod 700 -R ./input >/dev/null 2>/dev/null
for var in `ls -1 ./input/*.txt`
do
sudo -u nobody chmod 777 -R /tmp >/dev/null 2>/dev/null
sudo -u nobody rm -rf /tmp/* >/dev/null 2>/dev/null
sudo -u nobody chmod 777 -R /var/tmp >/dev/null 2>/dev/null
sudo -u nobody rm -rf /var/tmp/* >/dev/null 2>/dev/null
sudo -u nobody chmod 777 -R /run/lock >/dev/null 2>/dev/null
sudo -u nobody rm -rf /run/lock/* >/dev/null 2>/dev/null
cp ${var} input.txt
chmod 744 input.txt
/usr/bin/time -f "$1%e %M$1" -- timeout $2 /usr/bin/sudo -u nobody -- /bin/bash -c "$3 <input.txt"
echo -n $1$?$1
rm -rf input.txt >/dev/null 2>/dev/null
done
| true
|
6e964802d9885b7db2ac71cb0e2eb7512d092c0f
|
Shell
|
OGSnoop/Security
|
/lockout.sh
|
UTF-8
| 4,500
| 2.703125
| 3
|
[] |
no_license
|
##### BUMPER #####
!#/bin/bash
ifconfig down enp3s0
DATA="/home /root /etc /var"
tar cfzp "/scratcher.tgz" $DATA --same-owner
useradd picklerick
#echo shindigity | passwd --stdin root
#echo nigguhs | passwd --stdin picklerick
passwd --stdin picklerick
grep -e /bash /etc/passwd | cut -d: -f1 > pickle.txt
sed s/^/'usermod -s @sbin@nologin '/ pickle.txt > dill.txt
sed s/'usermod -s @sbin@nologin root'/'!#@bin@bash@'/ dill.txt > llama.txt
sed s_’@’_'/'_ llama.txt > ugh.txt
sed s_’@’_'/'_ ugh.txt > ugh2.txt
sed s_’@’_'/'_ ugh2.txt > ugh3.txt
grep -v 'picklerick' ugh3.txt > fish.sh
chmod +x fish.sh
cut -d: -f1 /etc/passwd > death.txt
sed s/^/'passwd -l '/ death.txt > dilly.txt
sed s/'passwd -l root'/'!#@bin@bash@'/ dilly.txt > llama2.txt
sed s_’@’_'/'_ llama2.txt > um.txt
sed s_’@’_'/'_ um.txt > um2.txt
sed s_’@’_'/'_ um2.txt > um3.txt
grep -v 'picklerick' um3.txt > cake.sh
chmod +x cake.sh
chmod 750 /usr/bin/python3.4
chmod 750 /usr/bin/python2
chmod 750 /usr/bin/python3
chmod 750 /usr/bin/python2.7
chmod 750 /usr/bin/python
chmod 750 /usr/bin/perl
chmod 750 /etc/issue
chmod 750 /etc/issue.net
chmod 750 /usr/bin/gcc
chmod 751 /var/log/
chmod 650 /var/log/lastlog
chmod 650 /var/log/firewalld
chmod 650 /var/log/btmp
chmod 750 /bin/dmesg
chmod 750 /bin/uname
chmod 750 /home/*
ifconfig up enp3s0
#### Current installs for fedora
yum -y install aide rsyslog python3-pip iptables-services.x86_64 nmap.x86_64
pip3 install pycryptodome
ifconfig down enp3s0
echo "tty1" > /etc/securetty
service acpid stop
service portmap stop
service cpuspeed stop
service apmd stop
service autofs stop
service bluetooth stop
service hidd stop
service firstboot stop
service cups stop
service gpm stop
service hplip stop
service isdn stop
service kudzu stop
service kdump stop
service mcstrans stop
service pcscd stop
service readahead_early stop
service readahead_later stop
service setroubleshoot stop
service rhnsd stop
service xfs stop
service yum-updatesd stop
service avahi-daemon stop
chkconfig acpid off
chkconfig portmap off
chkconfig cpuspeed off
chkconfig apmd off
chkconfig autofs off
chkconfig bluetooth off
chkconfig hidd off
chkconfig firstboot off
chkconfig cups off
chkconfig gpm off
chkconfig hplip off
chkconfig isdn off
chkconfig kudzu off
chkconfig kdump off
chkconfig mcstrans off
chkconfig pcscd off
chkconfig readahead_early off
chkconfig readahead_later off
chkconfig setroubleshoot off
chkconfig rhnsd off
chkconfig xfs off
chkconfig yum-updatesd off
chkconfig avahi-daemon off
# log forwarding
cp /etc/rsyslog.conf /etc/copyrsyslog.conf
sed s/"*.* @@remote-host:514"/"*.* @@172.20.241.20:9997"/ /etc/copyrsyslog.conf > rsyslog.conf
service rsyslog restart
rm -f /etc/copyrsyslog.conf
rm -f *.txt
ifconfig down enp3s0
systemctl stop cockpit.socket
systemctl disable cockpit.socker
systemctl stop sshd.service
systemctl disable sshd.service
systemctl stop firewalld.service
systemctl disable firewalld.service
systemctl mask firewalld.service
systemctl enable iptables.service
systemctl start iptables.service
systemctl enable ip6tables.service
systemctl start ip6tables.service
iptables -t filter -F
iptables -t filter -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -t raw -F
iptables -t raw -X
iptables -t security -F
iptables -t security -X
iptables -P INPUT DROP
iptables -P OUTPUT DROP
iptables -P FORWARD DROP
iptables -t filter -A INPUT -p udp --dport 123 -j ACCEPT
iptables -t filter -A OUTPUT -p udp --sport 123 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 25 -j ACCEPT
iptables -t filter -A OUTPUT -p tcp --sport 25 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 110 -j ACCEPT
iptables -t filter -A OUTPUT -p tcp --sport 110 -j ACCEPT
iptables -t filter -A INPUT -p tcp --dport 9997 -j ACCEPT
iptables -t filter -A OUTPUT -p tcp --sport 9997 -j ACCEPT
#iptables -t filter -A INPUT -p tcp --dport 143 -j ACCEPT
#iptables -t filter -A OUTPUT -p tcp --sport 143 -j ACCEPT
ip6tables -t filter -F
ip6tables -t filter -X
ip6tables -t nat -F
ip6tables -t nat -X
ip6tables -t mangle -F
ip6tables -t mangle -X
ip6tables -t raw -F
ip6tables -t raw -X
ip6tables -t security -F
ip6tables -t security -X
ip6tables -P INPUT DROP
ip6tables -P OUTPUT DROP
ip6tables -P FORWARD DROP
service iptables save
service ip6tables save
systemctl restart iptables.service
systemctl restart ip6tables.service
ifconfig up enp3s0
tar cfzp "/ace.tgz" $DATA --same-owner
####PICKLE RRRRRRRRRRRREEEEEEEEEEEEEEEEEEEEEEEEE
| true
|
4506b5bc05923f5346e9020ca2a8b66b96c71514
|
Shell
|
crlorentzen/dotfiles
|
/bashrc
|
UTF-8
| 212
| 2.609375
| 3
|
[] |
no_license
|
# bashrc
PATH=${PATH}:${HOME}/scripts
if [ -f ${HOME}/.bash_aliases ]; then
source ${HOME}/.bash_aliases
fi
if [ -f /usr/local/bin/virtualenvwrapper.sh ]; then
source /usr/local/bin/virtualenvwrapper.sh
fi
| true
|
b37586c62cb8189e272dcf4d5bf3e18d6aecf29d
|
Shell
|
kdubovikov/catalyst
|
/teamcity/py_deploy.sh
|
UTF-8
| 1,350
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Cause the script to exit if a single command fails
set -eo pipefail -v
echo "pip install -r requirements/requirements.txt"
pip install -r requirements/requirements.txt
echo "pip install -r requirements/requirements-cv.txt"
pip install -r requirements/requirements-cv.txt
echo "pip install -r requirements/requirements-nlp.txt"
pip install -r requirements/requirements-nlp.txt
echo "pip install -r requirements/requirements-rl.txt"
pip install -r requirements/requirements-rl.txt
echo "pip install -r docs/requirements.txt"
pip install -r docs/requirements.txt
echo "REMOVE_BUILDS=0 make check-docs"
REMOVE_BUILDS=0 make check-docs
echo "COMMENT=$(git log -1 --pretty=%B)"
COMMENT=$(git log -1 --pretty=%B)
echo "cp -a builds $TEMP/builds"
cp -a builds $TEMP/builds
echo "cd $TEMP"
cd $TEMP
echo "git clone --single-branch --branch gh-pages https://GH_TOKEN:$GH_TOKEN@github.com/catalyst-team/catalyst.git"
git clone --single-branch --branch gh-pages https://GH_TOKEN:$GH_TOKEN@github.com/catalyst-team/catalyst.git
echo "copying files"
cd catalyst
rm -rf *
cp -a $TEMP/builds/* .
echo "git commit and push"
git config --global user.email "teamcity@catalyst.github"
git config --global user.name "Teamcity"
git add .
git commit -m "$COMMENT"
BRANCH=$(git rev-parse --abbrev-ref HEAD)
if [ $BRANCH == 'master' ]; then
git push
fi
| true
|
b2302db9fd8f03c68f96dd132c38269a717279e0
|
Shell
|
mongopa/shell
|
/auto-mv.sh
|
UTF-8
| 279
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
WATCH_DIR="/home/riki/Downloads";
while true; do
filename=`inotifywait -e CLOSE ${WATCH_DIR} --format "%f" 2> /dev/null`;
/home/riki/shell/filemove.sh
ls -l ${WATCH_DIR};
for d in `find ${WATCH_DIR} -type d`; do echo $d,`ls "$d" | wc -l`; done
done
| true
|
629f5bd01fc6e96046352cc845d6e06a69725234
|
Shell
|
sparkleholic/meta-pelux
|
/meta-intel-extras/recipes-support/grub/grub-efi/grubenv-copy.sh
|
UTF-8
| 168
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ ! -d /boot/EFI/BOOT/ ]; then
mount /dev/sda1 /boot
fi
if [ ! -f /boot/EFI/BOOT/grubenv ]; then
cp /usr/share/grubenv /boot/EFI/BOOT/grubenv
fi
| true
|
8a440e8da1e9489c57e03b7cc1743dcf8eb987ee
|
Shell
|
C622/TVShow
|
/strip_color.sh
|
UTF-8
| 108
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
VALUE=$(cat $1 | perl -pe 's/\e([^\[\]]|\[.*?[a-zA-Z]|\].*?\a)//g')
echo "$VALUE" > $1
cat $1
| true
|
e593804f49f883faa6dd23587e898c63ce03ec3e
|
Shell
|
lonkamikaze/bsda2
|
/tests/lst.sh
|
UTF-8
| 15,063
| 3.0625
| 3
|
[
"ISC"
] |
permissive
|
. ../src/lst.sh
#
# Verify record separator ${RS} is set by log/rec/csv but not lst.
#
lst.check_log() { test "${RS}" = $'\n'; }
lst.check_rec() { test "${RS}" = $'\036'; }
lst.check_csv() { test "${RS}" = ,; }
lst.check_col() { test "${RS}" = :; }
! RS= lst x.check_log
RS=$'\n' lst x.check_log
! RS=$'\036' lst x.check_log
! RS=, lst x.check_log
! RS=: lst x.check_log
! RS= lst x.check_rec
! RS=$'\n' lst x.check_rec
RS=$'\036' lst x.check_rec
! RS=, lst x.check_rec
! RS=: lst x.check_rec
! RS= lst x.check_csv
! RS=$'\n' lst x.check_csv
! RS=$'\036' lst x.check_csv
RS=, lst x.check_csv
! RS=: lst x.check_csv
! RS= lst x.check_col
! RS=$'\n' lst x.check_col
! RS=$'\036' lst x.check_col
! RS=, lst x.check_col
RS=: lst x.check_col
log x.check_log
! rec x.check_log
! csv x.check_log
! log x.check_rec
rec x.check_rec
! csv x.check_rec
! log x.check_csv
! rec x.check_csv
csv x.check_csv
! log x.check_col
! rec x.check_col
! csv x.check_col
#
# lst:unpack
#
lst.check_name_i() { test "$1" = "$3"; }
lst.check_index_i() { test "$2" = "$3"; }
! lst foobar[2+3].check_name fox
lst foobar[2+3].check_name foobar
! lst foobar[2+3].check_index 5
lst foobar[2+3].check_index 2+3
lst foobar['2 + 3'].check_index '2 + 3'
(
#
# a[i] calls lst.get_i
#
array=
index=
lst.get_i() { array="$1"; index="$2"; }
rec a[12]
test "${array}" = a
test "${index}" = 12
rec foo_bar['index += 1']
test "${array}" = foo_bar
test "${index}" = 'index += 1'
#
# a[i]= calls lst.set_i
#
array=
index=
value=
lst.set_i() { array="$1"; index="$2"; value="$3"; }
rec a[12]= 1337
test "${array}" = a
test "${index}" = 12
test "${value}" = 1337
rec foo_bar['index += 1']= faßtföod
test "${array}" = foo_bar
test "${index}" = 'index += 1'
test "${value}" = faßtföod
)
#
# a=
#
log l= foo bar baz '' bam ''
test "$l" = $'foo\nbar\nbaz\n\nbam\n\n'
rec r= foo bar baz '' bam ''
test "$r" = $'foo\036bar\036baz\036\036bam\036\036'
csv c=
test -z "$c"
#
# a[i] aka a[i].get aka a.get_i
#
log a= foo bar 'bird in the sky' 'gold mine'
x=x
! log a[0] x
test "${x}" = x
log a[1] x
test "${x}" = 'foo'
log a[3] x
test "${x}" = 'bird in the sky'
x=x
! log a[5] x
test "${x}" = x
! log a[6] x
test "${x}" = x
log a[-1] x
test "${x}" = 'gold mine'
log a[-4] x
test "${x}" = 'foo'
x=x
! log a[-5] x
test "${x}" = x
! log a[-6] x
test "${x}" = x
v=x
! log a[i=0] v
test "${v}" = 'x'
log a[i+=1] v
test "${v}" = foo
test "${i}" -eq 1
log a[i+=1] v
test "${v}" = bar
test "${i}" -eq 2
log a[i+=1] v
test "${v}" = 'bird in the sky'
test "${i}" -eq 3
log a[i+=1] v
test "${v}" = 'gold mine'
test "${i}" -eq 4
v=x
! log a[i+=1] v
test "${v}" = 'x'
test "${i}" -eq 5
! log a[i+=1] v
test "${v}" = 'x'
test "${i}" -eq 6
log a[i=-1] v
test "${v}" = 'gold mine'
test "${i}" -eq -1
log a[i-=1] v
test "${v}" = 'bird in the sky'
test "${i}" -eq -2
log a[i-=1] v
test "${v}" = 'bar'
test "${i}" -eq -3
log a[i-=1] v
test "${v}" = 'foo'
test "${i}" -eq -4
v=x
! log a[i-=1] v
test "${v}" = 'x'
test "${i}" -eq -5
! log a[i-=1] v
test "${v}" = 'x'
test "${i}" -eq -6
log l= foo bar baz '' bam ''
test "$l" = $'foo\nbar\nbaz\n\nbam\n\n'
log l[-1] x
test "$x" = bam
log l[-2] x
test "$x" = baz
log l[-4] x
test "$x" = foo
! log l[-5] x
test "$x" = foo
test "$l" = $'foo\nbar\nbaz\n\nbam\n\n'
rec r= foo bar baz '' bam ''
test "$r" = $'foo\036bar\036baz\036\036bam\036\036'
x=1
rec r[-1] x
test "$x" = ''
rec r[-2] x
test "$x" = bam
rec r[-4] x
test "$x" = baz
! rec r[-7] x
test "$x" = baz
test "$r" = $'foo\036bar\036baz\036\036bam\036\036'
#
# a[i]= aka a[i].set aka a.set_i
#
log l= foo bar baz '' bam ''
! log l[6]= x
test "$l" = $'foo\nbar\nbaz\n\nbam\n\n'
log l[-1]= x
test "$l" = $'foo\nbar\nbaz\nx\n'
rec r= foo bar baz '' bam ''
test "$r" = $'foo\036bar\036baz\036\036bam\036\036'
rec r[6]= x
test "$r" = $'foo\036bar\036baz\036\036bam\036x\036'
! rec r[0]= x
! rec r[7]= x
test "$r" = $'foo\036bar\036baz\036\036bam\036x\036'
rec r[-2]= y
test "$r" = $'foo\036bar\036baz\036\036y\036x\036'
! rec r[-7]= y
test "$r" = $'foo\036bar\036baz\036\036y\036x\036'
i=0
while rec r[i+=1]= $i; do :; done
test "$r" = $'0\0361\0362\0363\0364\0365\036'
#
# a[i].rm aka a.rm_i
#
log l= foo bar baz '' bam ''
test "$l" = $'foo\nbar\nbaz\n\nbam\n\n'
log l[-2].rm
test "$l" = $'foo\nbar\nbam\n'
rec r= foo bar baz '' bam ''
test "$r" = $'foo\036bar\036baz\036\036bam\036\036'
rec r[2].rm
test "$r" = $'foo\036baz\036\036bam\036\036'
rec r[-1].rm
test "$r" = $'foo\036baz\036\036bam\036'
! rec r[0].rm
test "$r" = $'foo\036baz\036\036bam\036'
! rec r[-5].rm
test "$r" = $'foo\036baz\036\036bam\036'
#
# a.resize
#
rec a= 1 2 3 4
test "$a" = $'1\0362\0363\0364\036'
rec a.resize 6
test "$a" = $'1\0362\0363\0364\036\036\036'
rec a.resize 3
test "$a" = $'1\0362\0363\036'
rec a.resize 5 X
test "$a" = $'1\0362\0363\036X\036X\036'
rec a= '' '' 3 '' 5 ''
test "$a" = $'\036\0363\036\0365\036\036'
rec a.resize 8
test "$a" = $'\036\0363\036\0365\036\036\036\036'
rec a.resize 10 ...
test "$a" = $'\036\0363\036\0365\036\036\036\036...\036...\036'
log a= '' '' 3 '' 5 ''
test "$a" = $'\n\n3\n\n5\n\n'
log a.resize 8
test "$a" = $'3\n5\n\n\n\n\n\n\n'
#
# a.push_front
# a.peek_front
#
log a=
test -z "$a"
log a.push_front
test -z "$a"
! log a.peek_front
log a.push_front 'fire in the hole'
test "$a" = $'fire in the hole\n'
log a.peek_front x
test "$x" = 'fire in the hole'
log a.push_front xXx '...oooOOO(foo)' '(bar)OOOooo...'
test "$a" = $'(bar)OOOooo...\n...oooOOO(foo)\nxXx\nfire in the hole\n'
log a.peek_front x
test "$x" = '(bar)OOOooo...'
log a.push_front '' '' ''
test "$a" = $'\n\n\n(bar)OOOooo...\n...oooOOO(foo)\nxXx\nfire in the hole\n'
log a.peek_front x
test "$x" = ''
log a.push_front x ''
test "$a" = $'\nx\n\n\n\n(bar)OOOooo...\n...oooOOO(foo)\nxXx\nfire in the hole\n'
x=1
log a.peek_front x
test "$x" = ''
#
# a.push_back
# a.peek_back
#
log a=
test -z "$a"
log a.push_back
test -z "$a"
! log a.peek_back
log a.push_back 'fire in the hole'
test "$a" = $'fire in the hole\n'
log a.peek_back x
test "$x" = 'fire in the hole'
log a.push_back xXx '...oooOOO(foo)' '(bar)OOOooo...'
test "$a" = $'fire in the hole\nxXx\n...oooOOO(foo)\n(bar)OOOooo...\n'
log a.peek_back x
test "$x" = '(bar)OOOooo...'
log a.push_back '' '' ''
test "$a" = $'fire in the hole\nxXx\n...oooOOO(foo)\n(bar)OOOooo...\n\n\n\n'
log a.peek_back x
test "$x" = ''
log a.push_back x ''
test "$a" = $'fire in the hole\nxXx\n...oooOOO(foo)\n(bar)OOOooo...\n\n\n\nx\n\n'
log a.peek_back x
test "$x" = ''
#
# a.pop_front
#
log a=
test -z "$a"
log a.pop_front; test $? -eq 1
log a= 'this is' sparta
test "$a" = $'this is\nsparta\n'
log a.pop_front x
test "$x" = 'this is'
test "$a" = $'sparta\n'
log a= 'this is' sparta recursively pop values from array
log a.pop_front x0 x1 x2 x3
test "$x0" = 'this is'
test "$x1" = sparta
test "$x2" = recursively
test "$x3" = pop
test "$a" = $'values\nfrom\narray\n'
log a.pop_front x0 x1 x2 x3; test $? -eq 4
test "$x0" = values
test "$x1" = from
test "$x2" = array
test "$x3" = pop
test -z "$a"
log a= a b '' d e '' g h ''
test "$a" = $'a\nb\n\nd\ne\n\ng\nh\n\n'
log a.pop_front x0 x1 x2 x3
test "$x0" = a
test "$x1" = b
test "$x2" = ''
test "$x3" = d
test "$a" = $'e\n\ng\nh\n\n'
log a.pop_front x0 x1 x2 x3
test "$x0" = e
test "$x1" = ''
test "$x2" = g
test "$x3" = h
test "$a" = $'\n'
log a.pop_front x0 x1 x2 x3; test $? -eq 2
test "$x0" = ''
test "$x1" = ''
test "$x2" = g
test "$x3" = h
test -z "$a"
#
# a.pop_back
#
log a=
test -z "$a"
log a.pop_back; test $? -eq 1
log a= 'this is' sparta
test "$a" = $'this is\nsparta\n'
log a.pop_back x
test "$x" = sparta
test "$a" = $'this is\n'
log a= 'this is' sparta recursively pop values from array
log a.pop_back x0 x1 x2 x3
test "$x0" = array
test "$x1" = from
test "$x2" = values
test "$x3" = pop
test "$a" = $'this is\nsparta\nrecursively\n'
log a.pop_back x0 x1 x2 x3; test $? -eq 4
test "$x0" = recursively
test "$x1" = sparta
test "$x2" = 'this is'
test "$x3" = pop
test -z "$a"
log a= a b '' d e '' g h ''
test "$a" = $'a\nb\n\nd\ne\n\ng\nh\n\n'
log a.pop_back x0 x1 x2 x3
test "$x0" = ''
test "$x1" = h
test "$x2" = g
test "$x3" = ''
test "$a" = $'a\nb\n\nd\ne\n'
log a.pop_back x0 x1 x2 x3
test "$x0" = e
test "$x1" = d
test "$x2" = ''
test "$x3" = b
test "$a" = $'a\n'
log a.pop_back x0 x1 x2 x3; test $? -eq 2
test "$x0" = a
test "$x1" = d
test "$x2" = ''
test "$x3" = b
test -z "$a"
#
# a.rm_first
#
log a=
test -z "$a"
log a.rm_first; test $? -eq 1
log a= a b c '' a b c '' a b c ''
test "$a" = $'a\nb\nc\n\na\nb\nc\n\na\nb\nc\n\n'
log a.rm_first
test "$a" = $'a\nb\nc\na\nb\nc\n\na\nb\nc\n\n'
log a.rm_first '' '' ''; test $? -eq 3
test "$a" = $'a\nb\nc\na\nb\nc\na\nb\nc\n'
log a.rm_first d; test $? -eq 1
test "$a" = $'a\nb\nc\na\nb\nc\na\nb\nc\n'
log a.rm_first a c
test "$a" = $'b\na\nb\nc\na\nb\nc\n'
#
# a.rm_last
#
log a=
test -z "$a"
log a.rm_last; test $? -eq 1
log a= a b c '' a b c '' a b c ''
test "$a" = $'a\nb\nc\n\na\nb\nc\n\na\nb\nc\n\n'
log a.rm_last
test "$a" = $'a\nb\nc\n\na\nb\nc\n\na\nb\nc\n'
log a.rm_last '' '' ''; test $? -eq 3
test "$a" = $'a\nb\nc\na\nb\nc\na\nb\nc\n'
log a.rm_last d; test $? -eq 1
test "$a" = $'a\nb\nc\na\nb\nc\na\nb\nc\n'
log a.rm_last a c
test "$a" = $'a\nb\nc\na\nb\nc\nb\n'
#
# a.count
#
rec a=
test -z "$a"
rec a.count n
test "$n" -eq 0
rec a= 'foo bar' '' baz bam keks dose
test "$a" = $'foo bar\036\036baz\036bam\036keks\036dose\036'
rec a.count n
test "$n" -eq 6
log a=
test -z "$a"
log a.count n
test "$n" -eq 0
log a= 'foo bar' '' baz bam keks dose
test "$a" = $'foo bar\n\nbaz\nbam\nkeks\ndose\n'
log a.count n
test "$n" -eq 5
#
# a.contains
# a.contains_any
# a.contains_all
#
log a=
test -z "$a"
! log a.contains
! log a.contains ''
log a.contains_all
! log a.contains_all ''
! log a.contains_any
! log a.contains_any ''
log a= 'foo bar' '' baz bam keks dose
! log a.contains
log a.contains ''
log a.contains_all
log a.contains_all ''
! log a.contains_any
log a.contains_any ''
! log a.contains foo
! log a.contains_all foo
! log a.contains_any foo
log a.contains keks
! log a.contains_all foo keks
log a.contains_any foo keks
log a.contains 'foo bar'
log a.contains_all 'foo bar' keks
log a.contains_any 'foo bar' keks
! log a.contains_all 'foo bar' koks
log a.contains_any 'foo bar' koks
#
# a.is_defined
# a.is_undefined
# a.is_empty
# a.is_not_empty
#
unset a
! rec a.is_defined
rec a.is_undefined
rec a.is_empty
! rec a.is_not_empty
rec a=
rec a.is_defined
! rec a.is_undefined
rec a.is_empty
! rec a.is_not_empty
rec a.push_back ''
rec a.is_defined
! rec a.is_undefined
! rec a.is_empty
rec a.is_not_empty
rec a.push_back keks dose
rec a.is_defined
! rec a.is_undefined
! rec a.is_empty
rec a.is_not_empty
unset a
! log a.is_defined
log a.is_undefined
log a.is_empty
! log a.is_not_empty
log a=
log a.is_defined
! log a.is_undefined
log a.is_empty
! log a.is_not_empty
log a.push_back ''
log a.is_defined
! log a.is_undefined
! log a.is_empty
log a.is_not_empty
log a.push_back keks dose
log a.is_defined
! log a.is_undefined
! log a.is_empty
log a.is_not_empty
#
# a aka a.print
#
(
rs=
lst_print=
lst.print() { local IFS; IFS=,; rs="${RS}"; lst_print="$*"; }
log a
test "${lst_print}" = a
test "${rs}" = $'\n'
csv b c d
test "${lst_print}" = b,c,d
test "${rs}" = ,
)
log a= foo bar '' baz
test "$a" = $'foo\nbar\n\nbaz\n'
test "$(ORS=, log a)" = foo,bar,baz
log b= bang '' boom bang
test "$(ORS=' ' log a b)" = 'foo bar baz bang boom bang'
rec a= foo bar '' baz
test "$a" = $'foo\036bar\036\036baz\036'
test "$(ORS=- rec a)" = foo-bar--baz
rec b= bang '' boom bang
test "$b" = $'bang\036\036boom\036bang\036'
test "$(ORS=* rec a b)" = foo*bar**baz*bang**boom*bang
test "$(ORS=$'\n' rec a a)" = $'foo\nbar\n\nbaz\nfoo\nbar\n\nbaz'
#
# a.printf
#
rec a= foo bar '' baz
test "$a" = $'foo\036bar\036\036baz\036'
test "$(rec a.printf '[%s]\n')" = $'[foo]\n[bar]\n[]\n[baz]'
log a= foo bar '' baz
test "$a" = $'foo\nbar\n\nbaz\n'
test "$(log a.printf '[%s]\n')" = $'[foo]\n[bar]\n[baz]'
#
# a.append
#
log a= 'foo bar' '' baz bam keks dose
log b= '' '' ''
log c=
log d= 'this is the' end
log a.append b c d
test "$a" = $'foo bar\n\nbaz\nbam\nkeks\ndose\n\n\n\nthis is the\nend\n'
#
# a.set_irs
# a.set_ors
# a.set_ifs
#
unset IRS ORS IFS
log a.set_irs
test "${IRS}" = $'\n'
test -z "${ORS+1}"
test -z "${IFS+1}"
unset IRS ORS IFS
log a.set_ors
test -z "${IRS+1}"
test "${ORS}" = $'\n'
test -z "${IFS+1}"
unset IRS ORS IFS
log a.set_ifs
test -z "${IRS+1}"
test -z "${ORS+1}"
test "${IFS}" = $'\n'
unset IRS ORS IFS
rec a.set_irs
test "${IRS}" = $'\036'
test -z "${ORS+1}"
test -z "${IFS+1}"
unset IRS ORS IFS
rec a.set_ors
test -z "${IRS+1}"
test "${ORS}" = $'\036'
test -z "${IFS+1}"
unset IRS ORS IFS
rec a.set_ifs
test -z "${IRS+1}"
test -z "${ORS+1}"
test "${IFS}" = $'\036'
#
# a=cat aka lst:cat
#
(
lst_cat=
rs=
lst:cat() { local IFS; IFS=,; rs="${RS}" lst_cat="$*"; }
log a=cat foo bar baz
test "${rs}" = $'\n'
test "${lst_cat}" = a,foo,bar,baz
rec a=cat foo bar baz
test "${rs}" = $'\036'
test "${lst_cat}" = a,foo,bar,baz
)
log a= 'foo bar' '' baz bam keks dose
log b= '' '' ''
log c=
log d= 'this is the' end
log e=cat a b c d
test "$e" = $'foo bar\n\nbaz\nbam\nkeks\ndose\n\n\n\nthis is the\nend\n'
#
# lst:convert
#
log a=
test "${a}" = ''
IRS=$'\n' ORS=,: lst:convert a x
test "${x}" = ''
IRS=$'\n' ORS=,: lst:convert a a
test "${a}" = ''
log a= foo bar baz
test "${a}" = $'foo\nbar\nbaz\n'
IRS=$'\n' ORS=,: lst:convert a x
test "${x}" = 'foo,bar,baz,'
IRS=$'\n' ORS=,: lst:convert a a
test "${a}" = 'foo,bar,baz,'
log a= '' foo '' bar baz
test "${a}" = $'\nfoo\n\nbar\nbaz\n'
IRS=$'\n' ORS=,: lst:convert a x
test "${x}" = 'foo,bar,baz,'
IRS=$'\n' ORS=,: lst:convert a a
test "${a}" = 'foo,bar,baz,'
csv c= '' foo '' bar baz
test "${c}" = ',foo,,bar,baz,'
IRS=$',' ORS=$'\n': lst:convert c a
test "${a}" = $'\nfoo\n\nbar\nbaz\n'
#
# lst:cast
#
log a= foo $'bar\tender' baz
lst:cast log:a csv:b
test "${b}" = $'foo,bar\tender,baz,'
lst:cast csv:b log:c
test "${c}" = $'foo\nbar\tender\nbaz\n'
lst:cast csv:b rec:d
test "${d}" = $'foo\036bar\tender\036baz\036'
lst:cast rec:d csv:e
test "${e}" = $'foo,bar\tender,baz,'
rec a= $'text\nwith' $'line\nfeed' $'for the\nlulz'
test "${a}" = $'text\nwith\036line\nfeed\036for the\nlulz\036'
lst:cast rec:a csv:b
test "${b}" = $'text\nwith,line\nfeed,for the\nlulz,'
lst:cast csv:b log:c
test "${c}" = $'text\nwith\nline\nfeed\nfor the\nlulz\n'
lst:cast log:c rec:d
test "${d}" = $'text\036with\036line\036feed\036for the\036lulz\036'
lst:cast log:c csv:e
test "${e}" = $'text,with,line,feed,for the,lulz,'
lst:cast csv:e rec:e
test "${e}" = $'text\036with\036line\036feed\036for the\036lulz\036'
rec a= text with '' empty entries '' ''
test "$a" = $'text\036with\036\036empty\036entries\036\036\036'
lst:cast rec:a log:b
test "$b" = $'text\nwith\n\nempty\nentries\n\n\n'
lst:cast log:b rec:c
test "$c" = $'text\036with\036empty\036entries\036'
| true
|
ab7fbf677ede9eaff86efbf13eb9372a6c43b9c0
|
Shell
|
rafmagns-skepa-dreag/stately-plump-buck
|
/bash/zshrc
|
UTF-8
| 3,478
| 3.109375
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH=/home/$USER/.oh-my-zsh
export TERM=xterm-256color
export TMPDIR=/home/rhanson/tmp
# source pg10 before adding pg13 to the path
source /opt/rh/rh-postgresql10/enable
source /opt/rh/rh-git218/enable
CARGO_BIN=$HOME/.cargo/bin
POETRY=$HOME/.poetry/bin
CUSTOM_BIN=$HOME/.bin
PYENV_BIN=$HOME/.pyenv/bin
TOOLS_BIN=$HOME/tools/bin
CODE=$HOME/VSCode-linux-x64/bin
REMOTE_TOOLS_BIN=$HOME/remote-home/tools/bin
PG13_BIN=/usr/pgsql-13/bin
#export PATH=$PG13_BIN:$CODE:$PYENV_BIN:$POETRY:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:$PATH
export PATH=$PG13_BIN:$TOOLS_BIN:$REMOTE_TOOLS_BIN:$CODE:$CARGO_BIN:$PYENV_BIN:$POETRY:$CUSTOM_BIN:$HOME/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin:$PATH
export TERMINFO=/usr/share/terminfo
export LD_LIBRARY_PATH=$PG13_BIN/../lib:$HOME/remote-home/tools/lib:$LD_LIBRARY_PATH
ZSH_THEME="miloshadzic"
DISABLE_UNTRACKED_FILES_DIRTY="true"
# Standard plugins can be found in $ZSH/plugins/
# Custom plugins may be added to $ZSH_CUSTOM/plugins/
plugins=(
bazel
git
common-aliases
ripgrep
rust
# pyenv
fzf
fd
wd
redis-cli
docker
docker-compose
)
source $ZSH/oh-my-zsh.sh
# User configuration
export LANG=en_US.UTF-8
export EDITOR=nvim
export PYTHONSTARTUP=~/.pythonrc
export POETRY_VIRTUALENVS_IN_PROJECT=true
export MANPAGER="sh -c 'col -bx | bat -l man -p'"
export BAT_THEME="gruvbox-dark"
unalias fd 2>/dev/null
alias vi=/usr/bin/vim
alias vim=nvim
alias pcr="pre-commit run"
alias ls="exa"
alias la="ls -alh"
alias rga="rg -A 1"
#alias git="git branchless wrap"
alias git-clean="git fetch -p && git branch -vv | rg -v '(\*|\+)' | awk '/: gone]/{print \$1}' | xargs -r git branch -D"
alias cat=bat
alias find=fd
alias z=zenith
# pyenv should be loaded by oh-my-zsh
# eval "$(pyenv init -)"
# eval "$(pyenv virtualenv-init -)"
eval "$(starship init zsh)"
# source /opt/rh/rh-git218/enable
if [ -n "$PYTHONPATH" ]; then
export PYTHONPATH='/home/rhanson/.local/share/pdm/venv/lib64/python3.8/site-packages/pdm/pep582':$PYTHONPATH
else
PYTHONPATH='/home/rhanson/.local/share/pdm/venv/lib64/python3.8/site-packages/pdm/pep582'
fi
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
nvm use --lts
bindkey '^[[1;5D' backward-word
bindkey '^[[1;5C' forward-word
bindkey '^[^[[D' backward-word
bindkey '^[^[[C' forward-word
function find_files() {
rg -ic $1 | sed 's/:[1-9]\+$//g' | xargs exa -lFhs modified
}
function refdb-p() {
curl -X 'POST' 'https://ny5-p-refdb-v00.omc.oldmissioncapital.com/v1/query' -H 'accept: application/json' -H 'Content-Type: application/json' -d "{\"security_type\": \"BOND\",\"where\": {\"join_keyword\": \"OR\",\"items\": [{\"cusip\": [\"$1\"]}]}}" | jq '.data'
}
function refdb() {
curl -X 'POST' "https://ny5-$1-refdb-v00.omc.oldmissioncapital.com/v1/query" -H 'accept: application/json' -H 'Content-Type: application/json' -d "{\"security_type\": \"BOND\",\"where\": {\"join_keyword\": \"OR\",\"items\": [{\"cusip\": [\"$2\"]}]}}" | jq '.data'
}
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
export PATH="$PATH:$HOME/.rvm/bin"
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
| true
|
ec8671279d42cc55dea0963a6e96718f053c1b56
|
Shell
|
mclaybaugh/linux-setup
|
/config/zsh-git-prompt-agnoster.zsh
|
UTF-8
| 2,686
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
# https://gist.github.com/fabienduhamel/df178911bddea12098b2aa06752bbf2a
# Better zsh git prompt with zsh-git-prompt
# Add it to your .zshrc after the plugins(... zsh-git-prompt ...) line
# First, it rewrites git prompt style
# Then overrides git_super_status()
# And overrides build_prompt() from agnoster (depends of what you want)
ZSH_THEME_GIT_PROMPT_PREFIX=""
ZSH_THEME_GIT_PROMPT_SUFFIX=""
ZSH_THEME_GIT_PROMPT_SEPARATOR=" "
ZSH_THEME_GIT_PROMPT_BRANCH="%{$fg[black]%}"
ZSH_THEME_GIT_PROMPT_STAGED="%{$fg[green]%}%{●%G%}"
ZSH_THEME_GIT_PROMPT_CONFLICTS="%{$fg[magenta]%}%{✖%G%}"
ZSH_THEME_GIT_PROMPT_CHANGED="%{$fg[red]%}%{✚%G%}"
ZSH_THEME_GIT_PROMPT_BEHIND="%{$fg[black]%}%{↓%G%}"
ZSH_THEME_GIT_PROMPT_AHEAD="%{$fg[black]%}%{↑%G%}"
ZSH_THEME_GIT_PROMPT_UNTRACKED="%{$fg[black]%}%{…%G%}"
ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg[green]%}%{✔%G%}"
git_super_status() {
precmd_update_git_vars
if [ -n "$__CURRENT_GIT_STATUS" ]; then
STATUS="$ZSH_THEME_GIT_PROMPT_PREFIX$ZSH_THEME_GIT_PROMPT_BRANCH$GIT_BRANCH"
if [ "$GIT_BEHIND" -ne "0" ] || [ "$GIT_AHEAD" -ne "0" ]; then
STATUS="$STATUS "
fi
if [ "$GIT_BEHIND" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_BEHIND$GIT_BEHIND"
fi
if [ "$GIT_AHEAD" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_AHEAD$GIT_AHEAD"
fi
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_SEPARATOR"
if [ "$GIT_STAGED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_STAGED$GIT_STAGED"
fi
if [ "$GIT_CONFLICTS" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CONFLICTS$GIT_CONFLICTS"
fi
if [ "$GIT_CHANGED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CHANGED$GIT_CHANGED"
fi
if [ "$GIT_UNTRACKED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_UNTRACKED"
fi
if [ "$GIT_CHANGED" -eq "0" ] && [ "$GIT_CONFLICTS" -eq "0" ] && [ "$GIT_STAGED" -eq "0" ] && [ "$GIT_UNTRACKED" -eq "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CLEAN"
fi
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_SUFFIX%{${reset_color}%}%{$bg[yellow]%}"
echo "$STATUS"
fi
}
# Override agnoster theme
prompt_time () {
prompt_segment black grey "%*"
}
prompt_ret_status () {
local RET_STATUS=$?
local RET_CHAR='\xE2\x97\x8F'
[[ $RET_STATUS -eq 0 ]] && prompt_segment black green "$RET_CHAR" || prompt_segment black red "$RET_CHAR"
}
prompt_git_super_status () {
[ -z "$__CURRENT_GIT_STATUS" ] || prompt_segment yellow black " `git_super_status`"
}
build_prompt() {
prompt_ret_status
prompt_virtualenv
prompt_time
prompt_dir
prompt_git_super_status
prompt_bzr
prompt_hg
prompt_end
}
| true
|
5212f11897bb74e829d0e55cfbbebed7b4ae39dd
|
Shell
|
wincent/wincent-on-rails
|
/script/start
|
UTF-8
| 299
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
usage() {
echo "usage: $0 [nginx|all]"
exit 1
}
test $# = 0 && usage
while test $# != 0; do
case "$1" in
nginx)
START_NGINX=1
;;
all)
START_NGINX=1
;;
*)
usage
;;
esac
shift
done
if [ -n "$START_NGINX" ]; then
script/nginx
fi
| true
|
c995e51959715a0e5b336351c6eea40a840dba08
|
Shell
|
m0rphtail/Linux-scripts
|
/brute-update.sh
|
UTF-8
| 161
| 2.703125
| 3
|
[] |
no_license
|
#script to update/ download if your internet is slow
#!/bin/sh
INPUT_STRING=yes
while [ "$INPUT_STRING" != "no" ]
do
sudo eopkg up -y && INPUT_STRING=no
done
| true
|
d792b47a402eb431acfd4feba72a564dce9e6a5f
|
Shell
|
andrusha10t500/InfoPhoneWithQml
|
/CopyDataBases.sh
|
UTF-8
| 339
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#cd ~/InfoPhone
if [ ! -d "SMS" ]
then mkdir SMS
fi
DBContacts=$(adb shell su -c find /data/ -name contacts2.db)
DBSMS=$(adb shell su -c find /data/ -name mmssms.db)
adb shell su -c cp $DBContacts /storage/sdcard1/Download/SMS/
adb shell su -c cp $DBSMS /storage/sdcard1/Download/SMS/
adb pull /storage/sdcard1/Download/SMS .
| true
|
568b30865ae54f3c9b6cd6a3b7901db718b16ca5
|
Shell
|
shahrzad/phylanx_dist
|
/scripts/old/cpp_one_node.sh
|
UTF-8
| 1,471
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH -N 1
#SBATCH -p medusa
#SBATCH --time=72:00:00
node_name=$1
num_nodes=$2
script_dir="/work/sshirzad/phylanx_dist/scripts"
result_dir="/work/sshirzad/phylanx_dist/results"
phylanx_bin_dir="/home/sshirzad/src/phylanx/build_release_clang_no_hpxmp_medusa/bin"
#input1=(10 100 1000 10000 100000 50 500 5000 50000)
#input1=(50 500 5000 50000)
input1=(100000)
#input2=(10 100 1000 50 500 5000 50000)
input2=(500)
filter1=(10)
#filter1=(10 50 5 100 500)
#filter2=(10 50 100)
#filter2=(1 2 5 20 10 50 100)
filter2=(5)
thr=(1 4 8 16 24 32 40)
export PATH=${phylanx_bin_dir}:$PATH
#rm ${result_dir}/*.dat
export OMP_NUM_THREADS=1
for num_cores in ${thr[@]}
do
for i1 in ${input1[@]}
do
for i2 in ${input2[@]}
do
for f1 in ${filter1[@]}
do
if [ $i2 -ge $f1 ]
then
for f2 in ${filter2[@]}
do
echo "input ${i1}x${i2}x3 filter ${f1}x${f2}x3"
touch ${result_dir}/${node_name}_cpp_${i1}_${i2}_${f1}_${f2}_${num_nodes}_${num_cores}.dat
rm ${result_dir}/${node_name}_cpp_${i1}_${i2}_${f1}_${f2}_${num_nodes}_${num_cores}.dat
srun -p ${node_name} -N ${num_nodes} ${phylanx_bin_dir}/conv1d_dist_instrumented_test --batch=${i1} --length=${i2} --channel=3 --filter_length=${f1} --out_channels=${f2} --hpx:localities=${num_nodes} --hpx:threads=${num_cores}>>${result_dir}/${node_name}_cpp_${i1}_${i2}_${f1}_${f2}_${num_nodes}_${num_cores}.dat
done
fi
done
done
done
done
| true
|
24a00ad0a4636861172d4b7f76772e9a92c360d3
|
Shell
|
webfrogs/ToolKit
|
/configs/vscode/vscode-configer.sh
|
UTF-8
| 1,470
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
ShellFolderPath=$(cd $(dirname $0) && pwd)
cd "${ShellFolderPath}"
case "$(uname -s)" in
Darwin)
VSCodeConfigPath="$HOME/Library/Application Support/Code"
;;
Linux)
VSCodeConfigPath="$HOME/.config/Code"
;;
*)
echo "Error: unsupported OS."
exit 1
;;
esac
echo "VS Code configuration start..."
VSCodeSettingFilePath="${VSCodeConfigPath}/User/settings.json"
if [[ -f "${VSCodeSettingFilePath}" ]]; then
rm "${VSCodeSettingFilePath}"
fi
if [[ -L "${VSCodeSettingFilePath}" ]]; then
rm "${VSCodeSettingFilePath}"
fi
mkdir -p $(dirname ${VSCodeSettingFilePath})
ln -s "${ShellFolderPath}/settings.json" "${VSCodeSettingFilePath}"
VSCodeKeybindingFilePath="${VSCodeConfigPath}/User/keybindings.json"
if [[ -f "${VSCodeKeybindingFilePath}" ]]; then
rm "${VSCodeKeybindingFilePath}"
fi
if [[ -L "${VSCodeKeybindingFilePath}" ]]; then
rm "${VSCodeKeybindingFilePath}"
fi
mkdir -p $(dirname ${VSCodeKeybindingFilePath})
ln -s "${ShellFolderPath}/keybindings.json" "${VSCodeKeybindingFilePath}"
VSCodeSnippitsFolderPath="${VSCodeConfigPath}/User/snippets"
if [[ -d "${VSCodeSnippitsFolderPath}" ]]; then
rm -rf "${VSCodeSnippitsFolderPath}"
fi
if [[ -L "${VSCodeSnippitsFolderPath}" ]]; then
rm "${VSCodeSnippitsFolderPath}"
fi
mkdir -p $(dirname ${VSCodeSnippitsFolderPath})
ln -s "${ShellFolderPath}/snippets" "${VSCodeSnippitsFolderPath}"
echo "VS Code configuration end."
| true
|
490be067c2717dc2affffd7be4289538f27ca8d6
|
Shell
|
p4p1/assistance
|
/src/install
|
UTF-8
| 7,179
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# install.sh
# Created on: Fri, 27 March 2020
# https://p4p1.github.io/#config
# ____ __ ____ __
# ( _ \ /. |( _ \/ )
# )___/(_ _))___/ )(
# (__) (_)(__) (__)
#
# Description:
# Installation script to install and configure the softwares I used the
# most. I also propose a list of packages for different working environements.
#
# Usage:
# ./install.sh -u $(whoami) # basic installation
# ./install.sh -u $(whoami) -a # install all
# ./install.sh -u $(whoami) -p # basic installation w/ programming tools
# ./install.sh -u $(whoami) -s # basic installation w/ pentesting tools
source /opt/assistance/data/vars.sh
# dmenu settings:
L_NO="5"
L_HEIGHT="25"
BG_COLOR="#222222"
FG_COLOR="#bbbbbb"
SBG_COLOR="#ff9d14"
SFG_COLOR="#222222"
BRD_WIDTH="5"
# versions
DWM_VERSION=6.2
ST_VERSION=0.8.2
# wrapper assistance
defaults=($(cat $DATA_DIR/packages.txt | tr "\n" " "))
programming=($(cat $DATA_DIR/packages_programming.txt | tr "\n" " "))
pentest=($(cat $DATA_DIR/packages_programming.txt | tr "\n" " "))
# values for install
#defaults=(vim emacs tmux git openvpn tor ssh docker-compose curl net-tools tar adb fastboot gparted neofetch nmap thefuck feh compton libx11-dev libxinerama-dev libxft-dev gcc make gcc i3lock vifm irssi tig imagemagick blueman)
#programming=(npm python2 python3 gcc valgrind g++ nasm php nodejs python-pip python3-pip clang gdb ghc mingw-w64 fritzing arduino audacity xserver-xephyr golang-go)
#pentest=(aircrack-ng fping exiftool radare2 nikto hydra wireshark dfu-programmer dfu-util whois proxychains traceroute smbclient hashcat sqlmap binwalk steghide)
webbrowser="https://vivaldi.com/download/"
config="https:/p4p1.github.io/backup.tar.xz"
# values for arguments
p=false
s=false
a=false
# Usage
function usage () {
echo -e "\e[1;31mUsage:\e[m" 1>&2
echo "$0 -b -> Base install" 1>&2
echo "$0 -u \$(whoami) -> default install" 1>&2
echo "$0 -u \$(whoami) -p -> programming install" 1>&2
echo "$0 -u \$(whoami) -s -> pentest install" 1>&2
echo "$0 -u \$(whoami) -a -> programming and pentest install" 1>&2
echo "$0 -u \$(whoami) -c -> Change bash config file" 1>&2
exit 84
}
function install_dwm_st() {
SAVED_PWD=$(pwd)
WD=/home/$1/.source
SOURCES=($(ls $WD))
echo "Installing source projects"
for item in ${SOURCES[*]}; do
FILE=$WD/$item
if [ -d "$FILE" ]; then
cd $FILE
echo -n "Running make install on $item -> "
make install &> /dev/null && echo -e "\e[1;34m:)\e[m" ||
echo -e "\e[1;31m:(\e[m"
fi
done
cd $SAVED_PWD
}
function emacs_conf() {
echo -e "Downloading \e[1;31mspacemacs\e[m..."
su $1 -c "git clone https://github.com/syl20bnr/spacemacs ~/.emacs.d"
}
# function to install different packages
function package_install() {
for item in $@; do
echo -n "Checking $item -> "
if hash $item &> /dev/null; then
echo -e "\e[1;34m:)\e[m"
else
echo -e "\e[1;31m:(\e[m"
apt install -y $item
fi
done
}
# function to install the default webbrowser
function webbrowser_install() {
echo -e "Download vivaldi in the \e[1;31msame\e[m directory as this script..."
echo $(pwd)
su $1 -c "firefox '$webbrowser'"
apt install -y ./vivaldi*
}
# function to install my config files
function config_install() {
curl $config --output ./backup.tar.xz
tar xf ./backup.tar.xz
for item in $(ls -a ./backup/ | sed 1,2d); do
echo -e "\e[1;34mAdding\e[m $item in /home/$1/ directory"
[ -f /home/$1/$item ] && rm -rf /home/$1/$item
[ -d /home/$1/$item ] && rm -rf /home/$1/$item
cp -r ./backup/$item /home/$1/$item
chown $1 -R /home/$1/$item
done
rm -rf ./backup.tar.xz
rm -rf ./backup/
}
# function to choose bash config
function pick_bashrc() {
VAL=$(echo -e "My bashrc\nKali linux\nParrot OS\nUbuntu" | dmenu -nb \
$BG_COLOR -nf $FG_COLOR -sb $SBG_COLOR -sf $SFG_COLOR -c -bw \
$BRD_WIDTH -h $L_HEIGHT -l $L_NO)
if [ "$VAL" = "Kali linux" ]; then
cp -r /home/$1/.bash_configs/.bashrc_kali /home/$1/.bashrc
elif [ "$VAL" = "Parrot OS" ]; then
cp -r /home/$1/.bash_configs/.bashrc_parot /home/$1/.bashrc
elif [ "$VAL" = "Ubuntu" ]; then
cp -r /home/$1/.bash_configs/.bashrc_ubuntu /home/$1/.bashrc
fi
chown $1 /home/$1/.bashrc
}
# Command parser
while getopts "u::psac" o; do
case "${o}" in
u)
u=${OPTARG}
;;
p)
p=true
;;
s)
s=true
;;
a)
a=true
;;
c)
[ -z "${u}" ] && usage
pick_bashrc $u
install_dwm_st $u
exit 0
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
if [ -z "${u}" ]; then
echo -n "Please enter your username: "
read u
while true; do
echo -n "Installation type (p: proramming/s: pentesting/a: all): "
read tmp
case "${tmp}" in
p)
p=true
break
;;
s)
s=true
break
;;
a)
a=true
break
;;
*)
echo "Unknown command: $tmp"
;;
esac
done
fi
# Do the basic checks to see if root and on supported systems and if the user exitsts
if [ "$EUID" -ne 0 ]; then
echo -e "\e[1;31mPlease run as root\e[m"
exit 84
fi
if [ ! -d /home/$1 ]; then
echo -e "\e[1;31mError: /home/$1. Folder not found!\e[m"
exit 84
fi
# beginning of script
echo "Installing Leo's Stuff :)"
apt update
apt -y upgrade
package_install ${defaults[*]}
if $a; then
package_install ${programming[*]}
package_install ${pentest[*]}
elif $p; then
package_install ${programming[*]}
elif $s; then
package_install ${pentest[*]}
fi
webbrowser_install $u
config_install $u
echo "Now run PlugInstall in vim"
echo "Press [Enter]"
read a
su $u -c "vim /home/$u/.vimrc"
echo -n "Checking for shh key -> "
if [ -f /home/$u/.ssh/id_rsa.pub -a -f /home/$u/.ssh/id_rsa ]; then
echo -e "\e[1;34m:)\e[m"
else
echo -e "\e[1;31m:(\e[m"
su $u -c "ssh-keygen"
fi
install_dwm_st $u
pick_bashrc $u
emacs_conf $u
OS=$(neofetch | grep "OS" | awk -F":" '{print $2}' | awk -F' ' '{print $2}')
if [ "$OS" = "Ubuntu" ]; then
# Desktop entry for dwm
echo "[Desktop Entry]" > /usr/share/xsessions/dwm.desktop
echo "Encoding=UTF-8" >> /usr/share/xsessions/dwm.desktop
echo "Name=Dwm" >> /usr/share/xsessions/dwm.desktop
echo "Comment=Dynamic window manager" >> /usr/share/xsessions/dwm.desktop
echo "Exec=$(whereis dwm | cut -d ":" -f2 | cut -c 2-)" >> /usr/share/xsessions/dwm.desktop
echo "Type=XSession" >> /usr/share/xsessions/dwm.desktop
# Touchpad config for ubuntu
[ ! -d /etc/X11/xorg.conf.d ] && mkdir -p /etc/X11/xorg.conf.d
echo -e "Section \"InputClass\"" > /etc/X11/xorg.conf.d/90-touchpad.conf
echo -e "\tIdentifier \"touchpad\"" >> /etc/X11/xorg.conf.d/90-touchpad.conf
echo -e "\tMatchIsTouchpad \"on\"" >> /etc/X11/xorg.conf.d/90-touchpad.conf
echo -e "\tDriver \"libinput\"" >> /etc/X11/xorg.conf.d/90-touchpad.conf
echo -e "\tOption \"Tapping\" \"on\"" >> /etc/X11/xorg.conf.d/90-touchpad.conf
echo -e "\tOption \"TappingButtonMap\" \"lrm\"" >> /etc/X11/xorg.conf.d/90-touchpad.conf
echo -e "\tOption \"ScrollMethod\" \"twofinger\"" >> /etc/X11/xorg.conf.d/90-touchpad.conf
echo -e "EndSection" >> /etc/X11/xorg.conf.d/90-touchpad.conf
# Fix for audio
rm -rf /home/$u/.config/pulse
su $u -c "pulseaudio -k"
alsa force-reload
fi
[ ! -f /home/$u/.xinitrc ] && echo "exec dwm" > /home/$u/.xinitrc
echo "All done :)"
| true
|
bdf1dec59097e7a3b881fe79a93fb8585ef9d7e1
|
Shell
|
mjp2ff/aid
|
/config.sh
|
UTF-8
| 369
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# Get WordNet dictionary
WORDNET_DIR=./wordnet/dict
if [ ! -d $WORDNET_DIR ]; then
mkdir -p wordnet/
cd wordnet && curl -o dict.tar.gz http://wordnetcode.princeton.edu/wn3.1.dict.tar.gz
tar -xzvf dict.tar.gz
rm dict.tar.gz
fi
# Set up benchmark script
sed "s#^\([^#]\)#$PWD\/\1#g" benchmark/benchmark.csv > benchmark/benchmark-runnable.csv
| true
|
b7503d455503b18dd21198d04be1d9cd7936b9c8
|
Shell
|
ionic3-toolbox/ionic3-app-theming
|
/upgrade/page-upgrade.sh
|
UTF-8
| 1,646
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
cd ../src/pages
# Add IonicPage import and decorator
for page in **/*.ts; do
echo "Handle page $page"
echo "import { IonicPage } from 'ionic-angular';" | cat - $page > temp && mv temp $page
sed -i '' 's/@Component/@IonicPage()@Component/g' $page
done
for d in *; do
echo "Handle dir $d"
echo "Create the correct name of the Page"
parts=$(echo $d | tr "-" "\n")
finalString=""
for part in $parts; do
upperCaseName="$(tr a-z A-Z <<< ${part:0:1})${part:1}"
finalString=$finalString$upperCaseName
done
echo "name=$finalString"
# Remove Page Import from other pages
cd ..
pageName=$finalString"Page"
exclude="pages/$d/$d.ts"
for f in $(find pages -type f -name "*.ts"); do
if [ $f != $exclude ]
then
echo "Replace Page usage with 'Page' for lazy loading"
sed -i '' 's/'$pageName'/'\'$pageName\''/g' "$f"
echo "Remove all imports of the page"
sed -i '' '/'$d'/d' $f
fi
done
# back to correct folder
cd pages
echo "Copy the template file into the page folder: $d/$d.module.ts"
cp ../../upgrade/page-template.ts "$d/$d.module.ts"
echo "Replace the Placeholder inside the page template"
echo "_PAGENAME_ ==> $finalString"
echo "_FILENAME_ ==> $d"
sed -i '' 's/_PAGENAME_/'$finalString'/g' "$d/$d.module.ts"
sed -i '' 's/_FILENAME_/'$d'/g' "$d/$d.module.ts"
# Remove imports, declarations and entryComponents
echo "Remove imports, declarations and entryComponents"
sed -i '' '/'$pageName'/d' '../app/app.module.ts'
done
| true
|
2c74908d2190b689da7466c7097658ad8b4f416f
|
Shell
|
paususe/spacewalk-centos-errata
|
/errata-sync.sh
|
UTF-8
| 975
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Download latest errata files and import to Spacewalk
## Configure these variables ##
# Spacewalk Username/Password with administrator access
export SPACEWALK_USER='user'
export SPACEWALK_PASS='pass'
# INCLUDE_CHANNELS: Comma separated list of Channel Labels
INCLUDE_CHANNELS='centos7-updates,centos-extras'
## End of Configure varables ##
# Get this script's base path to use for errata-import.pl execution
BASE_PATH="$(echo ${0} | sed 's/errata-sync.sh//')"
# Download updated files only into the same dir as errata-sync.sh
cd ${BASE_PATH}
wget -N "http://cefs.steve-meier.de/errata.latest.xml"
wget -N "https://www.redhat.com/security/data/oval/com.redhat.rhsa-all.xml"
cd -
# Import errata
${BASE_PATH}errata-import.pl --server localhost --errata ${BASE_PATH}errata.latest.xml --rhsa-oval ${BASE_PATH}com.redhat.rhsa-all.xml --include-channels="${INCLUDE_CHANNELS}" --publish
# Unset vars for security
unset SPACEWALK_USER
unset SPACEWALK_PASS
| true
|
b89b280c83bf57c994239c371e9f261d568bc208
|
Shell
|
riwave/operationalForecastDownload
|
/get_gens_c00.sh
|
UTF-8
| 20,782
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
################################################################
#
# Script to fetch Global Ensemble forecasts via CURL and WGET- cycle 00Z
# NCEP (GEFS), Canadian Meteorological Center (CMCE) and
# US Navy Fleet Numerical Meteorology and Ocenography Center (FENS)
# using WGRIB2, NCO, and CDO for post-processing
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is provided at
# http://www.gnu.org/licenses/
#
# Ricardo Campos (REMO/CHM, IST/CENTEC) & Ronaldo Palmeira (IAG/USP)
#
# riwave@gmail.com , https://github.com/riwave
# https://www.linkedin.com/in/ricardo-martins-campos-451a45122/
# https://www.researchgate.net/profile/Ricardo_Campos20
#
# Version 1.0: 07/2016
# Version 2.0: 08/2019
################################################################
source /etc/bash.bashrc
# directory where this code as well as get_grib.pl and get_inv.pl are saved
DIRS=/media/chico/op/ftpget/model/scripts
# directory where directory will be created and filed will be saved
DIR=/media/data/forecast/model/opftp
# e-mail info for automatic messages in case of error during download
emaildest=''
# email source
emailfonte=''
senha=''
# server address
SERVER=https://www.ftp.ncep.noaa.gov
s1="pgrb2a"
s2="pgrb2b"
# limits for domain selection
latmin=-77.5
latmax=90.
lonmin=-102.
lonmax=30.
# Global Ensemble Forecast System (GEFS) https://www.ncdc.noaa.gov/data-access/model-data/model-datasets/global-ensemble-forecast-system-gefs
# https://www.ftp.ncep.noaa.gov/data/nccf/com/gens/prod/gefs.20160803/00/pgrb2/
# gep20.t00z.pgrb2f354.idx
UGRD=":UGRD:850 mb:|:UGRD:10 m above ground:"
VGRD=":VGRD:850 mb:|:VGRD:10 m above ground:"
GUST=":GUST:surface:"
PRMSL=":PRMSL:mean sea level:"
HGT=":HGT:850 mb:"
VARSGETGEFS1="$UGRD|$VGRD|$PRMSL|$HGT"
VARSGETGEFS2="$GUST"
# Canadian Meteorological Center Ensemble (CMCE) http://nomads.ncep.noaa.gov/txt_descriptions/CMCENS_doc.shtml
# Fleet Numerical Meteorology and Ocenography Ensemble Forecast System (FENS) http://www.nco.ncep.noaa.gov/pmb/products/fens/
# https://www.ftp.ncep.noaa.gov/data/nccf/com/naefs/prod/
VARSGETNAEFS="$UGRD|$VGRD|$PRMSL|$HGT"
# initial date cycle for the ftp
ANO=`date +%Y`
MES=`date +%m`
DIA=`date +%d`
# pa=1
# ANO=`date --date=-$pa' day' '+%Y'`
# MES=`date --date=-$pa' day' '+%m'`
# DIA=`date --date=-$pa' day' '+%d'`
HORA="00" # first cycle 00Z
cd $DIR
# create directory
mkdir -p $DIR/gens.$ANO$MES$DIA$HORA
# all information about fetching the grib2 files will be saved in the log file
echo " " > $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# hours of forecast to be dowloaded
horas=`seq -f "%02g" 0 6 384`
# number of ensembles
ensbl="`seq -f "%02g" 0 1 20`"
ensblf="`seq -f "%02g" 1 1 20`"
for h in $horas;do
for e in $ensbl;do
# GENS GEFS (1X1, 6h) --------------------------------------------------
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " ======== GENS GEFS Forecast: $ANO$MES$DIA$HORA $h ========" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# size TAM and tries TRIES will control the process
TAM=0
TRIES=1
# while file has a lower size the expected or attemps are less than 130 (almos 11 hours trying) it does:
while [ $TAM -lt 800000 ] && [ $TRIES -le 130 ]; do
# sleep 5 minutes between attemps
if [ ${TRIES} -gt 5 ]; then
sleep 300
fi
# --- CURL VIA WWW.FTP.NCEP.NOAA.GOV --- #
if [ ${TAM} -lt 800000 ]; then
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " attempt number: $TRIES" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# main line where get_inv.pl and get_grib.pl are used to fech the grib2 file
if [ ${e} == 00 ]; then
$DIRS/get_inv.pl $SERVER/data/nccf/com/gens/prod/gefs.$ANO$MES$DIA/$HORA/${s1}p5/gec${e}.t${HORA}z.${s1}.0p50.f"$(printf "%03d" $h)".idx | egrep "($VARSGETGEFS1)" | $DIRS/get_grib.pl $SERVER/data/nccf/com/gens/prod/gefs.$ANO$MES$DIA/$HORA/${s1}p5/gec${e}.t${HORA}z.${s1}.0p50.f"$(printf "%03d" $h)" $DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
else
$DIRS/get_inv.pl $SERVER/data/nccf/com/gens/prod/gefs.$ANO$MES$DIA/$HORA/${s1}p5/gep${e}.t${HORA}z.${s1}.0p50.f"$(printf "%03d" $h)".idx | egrep "($VARSGETGEFS1)" | $DIRS/get_grib.pl $SERVER/data/nccf/com/gens/prod/gefs.$ANO$MES$DIA/$HORA/${s1}p5/gep${e}.t${HORA}z.${s1}.0p50.f"$(printf "%03d" $h)" $DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
fi
# test if the downloaded file exists
test -f $DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TE=$?
if [ ${TE} -eq 1 ]; then
TAM=0
else
# check size of each file
TAM=`du -sb $DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 | awk '{ print $1 }'` >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
fi
fi
TRIES=`expr $TRIES + 1`
done
sleep 2
# size TAM and tries TRIES will control the process
TAM=0
TRIES=1
# while file has a lower size the expected or attemps are less than 130 (almos 11 hours trying) it does:
while [ $TAM -lt 100000 ] && [ $TRIES -le 130 ]; do
# sleep 5 minutes between attemps
if [ ${TRIES} -gt 5 ]; then
sleep 300
fi
# --- CURL VIA WWW.FTP.NCEP.NOAA.GOV --- #
if [ ${TAM} -lt 100000 ]; then
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " attempt number: $TRIES" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# main line where get_inv.pl and get_grib.pl are used to fech the grib2 file
if [ ${e} == 00 ]; then
$DIRS/get_inv.pl $SERVER/data/nccf/com/gens/prod/gefs.$ANO$MES$DIA/$HORA/${s2}p5/gec${e}.t${HORA}z.${s2}.0p50.f"$(printf "%03d" $h)".idx | egrep "($VARSGETGEFS2)" | $DIRS/get_grib.pl $SERVER/data/nccf/com/gens/prod/gefs.$ANO$MES$DIA/$HORA/${s2}p5/gec${e}.t${HORA}z.${s2}.0p50.f"$(printf "%03d" $h)" $DIR/gens.$ANO$MES$DIA$HORA/gefs_gust${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
else
$DIRS/get_inv.pl $SERVER/data/nccf/com/gens/prod/gefs.$ANO$MES$DIA/$HORA/${s2}p5/gep${e}.t${HORA}z.${s2}.0p50.f"$(printf "%03d" $h)".idx | egrep "($VARSGETGEFS2)" | $DIRS/get_grib.pl $SERVER/data/nccf/com/gens/prod/gefs.$ANO$MES$DIA/$HORA/${s2}p5/gep${e}.t${HORA}z.${s2}.0p50.f"$(printf "%03d" $h)" $DIR/gens.$ANO$MES$DIA$HORA/gefs_gust${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
fi
# test if the downloaded file exists
test -f $DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TE=$?
if [ ${TE} -eq 1 ]; then
TAM=0
else
# check size of each file
TAM=`du -sb $DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 | awk '{ print $1 }'` >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
fi
fi
TRIES=`expr $TRIES + 1`
done
sleep 2
done
sleep 5
done
sleep 30
for h in $horas;do
for e in $ensbl;do
# GENS CMCE (1X1, 6h) --------------------------------------------------
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " ======== GENS CMCE Forecast: $ANO$MES$DIA$HORA $h ========" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# size TAM and tries TRIES will control the process
TAM=0
TRIES=1
# while file has a lower size the expected or attemps are less than 130 (almos 11 hours trying) it does:
while [ $TAM -lt 700000 ] && [ $TRIES -le 130 ]; do
# sleep 5 minutes between attemps
if [ ${TRIES} -gt 5 ]; then
sleep 300
fi
# --- CURL VIA WWW.FTP.NCEP.NOAA.GOV --- #
if [ ${TAM} -lt 700000 ]; then
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " attempt number: $TRIES" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# main line where get_inv.pl and get_grib.pl are used to fech the grib2 file
if [ ${e} == 00 ]; then
$DIRS/get_inv.pl $SERVER/data/nccf/com/naefs/prod/cmce.$ANO$MES$DIA/$HORA/pgrb2ap5/cmc_gec"$e".t"$HORA"z.pgrb2a.0p50.f"$(printf "%03d" $h)".idx | egrep "($VARSGETNAEFS)" | $DIRS/get_grib.pl $SERVER/data/nccf/com/naefs/prod/cmce.$ANO$MES$DIA/$HORA/pgrb2ap5/cmc_gec${e}.t${HORA}z.pgrb2a.0p50.f"$(printf "%03d" $h)" $DIR/gens.$ANO$MES$DIA$HORA/cmce${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
else
$DIRS/get_inv.pl $SERVER/data/nccf/com/naefs/prod/cmce.$ANO$MES$DIA/$HORA/pgrb2ap5/cmc_gep"$e".t"$HORA"z.pgrb2a.0p50.f"$(printf "%03d" $h)".idx | egrep "($VARSGETNAEFS)" | $DIRS/get_grib.pl $SERVER/data/nccf/com/naefs/prod/cmce.$ANO$MES$DIA/$HORA/pgrb2ap5/cmc_gep${e}.t${HORA}z.pgrb2a.0p50.f"$(printf "%03d" $h)" $DIR/gens.$ANO$MES$DIA$HORA/cmce${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
fi
# test if the downloaded file exists
test -f $DIR/gens.$ANO$MES$DIA$HORA/cmce${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TE=$?
if [ ${TE} -eq 1 ]; then
TAM=0
else
# check size of each file
TAM=`du -sb $DIR/gens.$ANO$MES$DIA$HORA/cmce${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 | awk '{ print $1 }'` >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
fi
fi
TRIES=`expr $TRIES + 1`
done
sleep 2
done
sleep 5
done
sleep 30
for h in $horas;do
for e in $ensblf;do
# GENS FENS (1X1, 6h) --------------------------------------------------
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " ======== GENS FENS Forecast: $ANO$MES$DIA$HORA $h ========" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# size TAM and tries TRIES will control the process
TAM=0
TRIES=1
# while file has a lower size the expected or attemps are less than 130 (almos 11 hours trying) it does:
# 3000000
while [ $TAM -lt 800000 ] && [ $TRIES -le 130 ]; do
# sleep 5 minutes between attemps
if [ ${TRIES} -gt 5 ]; then
sleep 300
fi
# --- CURL VIA WWW.FTP.NCEP.NOAA.GOV --- #
if [ ${TAM} -lt 800000 ]; then
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " attempt number: $TRIES" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
$DIRS/get_inv.pl $SERVER/data/nccf/com/naefs/prod/fens.$ANO$MES$DIA/$HORA/pgrb2ap5/ENSEMBLE.halfDegree.MET.fcst_et0$e."$(printf "%03d" $h)".$ANO$MES$DIA$HORA.idx | egrep "($VARSGETNAEFS)" | $DIRS/get_grib.pl $SERVER/data/nccf/com/naefs/prod/fens.$ANO$MES$DIA/$HORA/pgrb2ap5/ENSEMBLE.halfDegree.MET.fcst_et0${e}."$(printf "%03d" $h)".$ANO$MES$DIA$HORA $DIR/gens.$ANO$MES$DIA$HORA/fens${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
sleep 2
# test if the downloaded file exists
test -f $DIR/gens.$ANO$MES$DIA$HORA/fens${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TE=$?
if [ ${TE} -eq 1 ]; then
TAM=0
else
# check size of each file
TAM=`du -sb $DIR/gens.$ANO$MES$DIA$HORA/fens${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)".grb2 | awk '{ print $1 }'` >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
fi
fi
TRIES=`expr $TRIES + 1`
done
sleep 2
done
sleep 5
done
sleep 30
# Check the entire download, as a whole, inside the directory
TAMD=`du -sb $DIR/gens.$ANO$MES$DIA$HORA | awk '{ print $1}'` >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TAMDW=`du -sh $DIR/gens.$ANO$MES$DIA$HORA | awk '{ print $1}'` >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo "---- Final Status ----" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
NANO=`date '+%Y'`
NMES=`date '+%m'`
NDIA=`date '+%d'`
NHORA=`date '+%H'`
NMINUTO=`date '+%M'`
if [ ${TAMD} -gt 4000000000 ]; then
echo " Entire Download successfully completed, no problem found. Total size: $TAMDW" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " Local Time: $NANO $NMES $NDIA - $NHORA:$NMINUTO" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
else
echo " ATTENTION! Some error has happened during the download. Total size: $TAMDW" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " Local Time: $NANO $NMES $NDIA - $NHORA:$NMINUTO" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# Send e-mail in case of problems, see /media/rmc/bento/Library/Manuals_Tips/enviandoemail
cat > email.txt << EOF
To: $emaildest
Subject: GENS Download Problem
ATTENTION! Some error has happened during the GENS download. Total size: $TAMDW
In: $DIR/gens.$ANO$MES$DIA$HORA
Script: $DIRS/get_gens_c00.sh
Local Time: $NANO $NMES $NDIA - $NHORA:$NMINUTO
EOF
/usr/sbin/ssmtp $emaildest -au$emailfonte -ap$senha < email.txt >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
rm -f email*
fi
echo "----------------------" >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
# Cleaning
# --- Remove directories older than 5 days
cd $DIR
# find gens.?????????? -ctime +4 -type d | xargs rm -rf >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
# Post-processing: select area and reduces resolution, in order to save disk space. ------------------
echo " " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
echo " Post-Processing. select area and reduces resolution " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
for h in $horas;do
for e in $ensbl;do
# GEFS
arqn=$DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)"
test -f ${arqn}.grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TE=$?
if [ ${TE} -eq 1 ]; then
echo " File ${arqn}.grb2 does not exist. Failed to download " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
else
/usr/local/grib2/wgrib2/wgrib2 ${arqn}.grb2 -netcdf ${arqn}.saux.nc
/usr/bin/cdo sellonlatbox,-180,180,-90,90 ${arqn}.saux.nc ${arqn}.saux1.nc
/usr/bin/ncks -4 -L 1 -d latitude,${latmin},${latmax} ${arqn}.saux1.nc ${arqn}.saux2.nc
/usr/bin/ncks -4 -L 1 -d longitude,${lonmin},${lonmax} ${arqn}.saux2.nc ${arqn}.saux3.nc
/usr/bin/ncatted -a _FillValue,,o,f,NaN ${arqn}.saux3.nc
/usr/bin/ncks --ppc default=.$dp ${arqn}.saux3.nc ${arqn}.nc
rm -f ${arqn}.grb2
rm ${arqn}.saux*
echo " Converted file ${arqn} to netcdf with success, and reduced size. " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
fi
# CMCE
arqn=$DIR/gens.$ANO$MES$DIA$HORA/cmce${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)"
test -f ${arqn}.grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TE=$?
if [ ${TE} -eq 1 ]; then
echo " File ${arqn}.grb2 does not exist. Failed to download " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
else
/usr/local/grib2/wgrib2/wgrib2 ${arqn}.grb2 -netcdf ${arqn}.saux.nc
/usr/bin/cdo sellonlatbox,-180,180,-90,90 ${arqn}.saux.nc ${arqn}.saux1.nc
/usr/bin/ncks -4 -L 1 -d latitude,${latmin},${latmax} ${arqn}.saux1.nc ${arqn}.saux2.nc
/usr/bin/ncks -4 -L 1 -d longitude,${lonmin},${lonmax} ${arqn}.saux2.nc ${arqn}.saux3.nc
/usr/bin/ncatted -a _FillValue,,o,f,NaN ${arqn}.saux3.nc
/usr/bin/ncks --ppc default=.$dp ${arqn}.saux3.nc ${arqn}.nc
rm -f ${arqn}.grb2
rm ${arqn}.saux*
echo " Converted file ${arqn} to netcdf with success, and reduced size. " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
fi
# FENS
if [ ${e} -gt 0 ]; then
arqn=$DIR/gens.$ANO$MES$DIA$HORA/fens${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)"
test -f ${arqn}.grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TE=$?
if [ ${TE} -eq 1 ]; then
echo " File ${arqn}.grb2 does not exist. Failed to download " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
else
/usr/local/grib2/wgrib2/wgrib2 ${arqn}.grb2 -netcdf ${arqn}.saux.nc
/usr/bin/cdo sellonlatbox,-180,180,-90,90 ${arqn}.saux.nc ${arqn}.saux1.nc
/usr/bin/ncks -4 -L 1 -d latitude,${latmin},${latmax} ${arqn}.saux1.nc ${arqn}.saux2.nc
/usr/bin/ncks -4 -L 1 -d longitude,${lonmin},${lonmax} ${arqn}.saux2.nc ${arqn}.saux3.nc
/usr/bin/ncatted -a _FillValue,,o,f,NaN ${arqn}.saux3.nc
/usr/bin/ncks --ppc default=.$dp ${arqn}.saux3.nc ${arqn}.nc
rm -f ${arqn}.grb2
rm ${arqn}.saux*
echo " Converted file ${arqn} to netcdf with success, and reduced size. " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
fi
fi
# GEFS Gust only
arqn=$DIR/gens.$ANO$MES$DIA$HORA/gefs_gust${e}.t${HORA}z.pgrb2f"$(printf "%03d" $h)"
test -f ${arqn}.grb2 >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA 2>&1
TE=$?
if [ ${TE} -eq 1 ]; then
echo " File ${arqn}.grb2 does not exist. Failed to download " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
else
/usr/local/grib2/wgrib2/wgrib2 ${arqn}.grb2 -netcdf ${arqn}.saux.nc
/usr/bin/cdo sellonlatbox,-180,180,-90,90 ${arqn}.saux.nc ${arqn}.saux1.nc
/usr/bin/ncks -4 -L 1 -d latitude,${latmin},${latmax} ${arqn}.saux1.nc ${arqn}.saux2.nc
/usr/bin/ncks -4 -L 1 -d longitude,${lonmin},${lonmax} ${arqn}.saux2.nc ${arqn}.saux3.nc
/usr/bin/ncatted -a _FillValue,,o,f,NaN ${arqn}.saux3.nc
/usr/bin/ncks --ppc default=.$dp ${arqn}.saux3.nc ${arqn}.nc
rm -f ${arqn}.grb2
rm ${arqn}.saux*
echo " Converted file ${arqn} to netcdf with success, and reduced size. " >> $DIR/gens.$ANO$MES$DIA$HORA/logGENS_$ANO$MES$DIA$HORA
fi
done
done
# Merge all netcdf files
for e in $ensbl;do
/usr/bin/ncecat $DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f*.nc -O $DIR/gens.$ANO$MES$DIA$HORA/gefs.$ANO$MES$DIA$HORA.m${e}.${s1}p5.nc
/usr/bin/ncecat $DIR/gens.$ANO$MES$DIA$HORA/cmce${e}.t${HORA}z.pgrb2f*.nc -O $DIR/gens.$ANO$MES$DIA$HORA/cmce.$ANO$MES$DIA$HORA.m${e}.pgrb2ap5.nc
/usr/bin/ncecat $DIR/gens.$ANO$MES$DIA$HORA/fens${e}.t${HORA}z.pgrb2f*.nc -O $DIR/gens.$ANO$MES$DIA$HORA/fens.$ANO$MES$DIA$HORA.m${e}.pgrb2ap5.nc
rm -f $DIR/gens.$ANO$MES$DIA$HORA/gefs${e}.t${HORA}z.pgrb2f*.nc
rm -f $DIR/gens.$ANO$MES$DIA$HORA/cmce${e}.t${HORA}z.pgrb2f*.nc
rm -f $DIR/gens.$ANO$MES$DIA$HORA/fens${e}.t${HORA}z.pgrb2f*.nc
done
for e in $ensbl;do
/usr/bin/ncecat $DIR/gens.$ANO$MES$DIA$HORA/gefs_gust${e}.t${HORA}z.pgrb2f*.nc -O $DIR/gens.$ANO$MES$DIA$HORA/gefs_gust.$ANO$MES$DIA$HORA.m${e}.${s2}p5.nc
rm -f $DIR/gens.$ANO$MES$DIA$HORA/gefs_gust${e}.t${HORA}z.pgrb2f*.nc
done
# permissions and groups
# chgrp amadmin -R $DIR/gfs.$ANO$MES$DIA$HORA
chmod -R 775 $DIR/gens.$ANO$MES$DIA$HORA
| true
|
605bb33dcb978f1ccf6a5f6de812e56604cd2faf
|
Shell
|
BackupTheBerlios/atomicl
|
/portage/sys-kernel/linux-headers/files/generate-asm-sparc
|
UTF-8
| 1,707
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh -e
#
# $Header: /home/xubuntu/berlios_backup/github/tmp-cvs/atomicl/Repository/portage/sys-kernel/linux-headers/files/generate-asm-sparc,v 1.1 2005/07/23 02:20:27 sjlongland Exp $
#
# This script generates the files in /usr/include/asm for sparc systems
# during installation of sys-kernel/linux-headers.
# Will no longer be needed when full 64 bit support is used on sparc64
# systems.
#
# Shamefully ripped from Debian
# ----------------------------------------------------------------------
# Idea borrowed from RedHat's kernel package
if [ -n "$1" ]; then
if [ ! -d "$1" ]; then
echo "$1" does not exist, or is not a directory
exit 1
fi
cd $1
else
cd /usr/include
fi
if [ ! -d asm-sparc -o ! -d asm-sparc64 ] ; then
echo E: asm-sparc and asm-sparc64 must exist, or you will have problems
exit 1
fi
rm -rf asm
mkdir asm
for h in `( ls asm-sparc; ls asm-sparc64 ) | grep '\.h$' | sort -u`; do
name=`echo $h | tr a-z. A-Z_`
# common header
cat > asm/$h << EOF
/* All asm/ files are generated and point to the corresponding
* file in asm-sparc or asm-sparc64. To regenerate, run "generate-asm"
*/
#ifndef __SPARCSTUB__${name}__
#define __SPARCSTUB__${name}__
EOF
# common for sparc and sparc64
if [ -f asm-sparc/$h -a -f asm-sparc64/$h ]; then
cat >> asm/$h <<EOF
#ifdef __arch64__
#include <asm-sparc64/$h>
#else
#include <asm-sparc/$h>
#endif
EOF
# sparc only
elif [ -f asm-sparc/$h ]; then
cat >> asm/$h <<EOF
#ifndef __arch64__
#include <asm-sparc/$h>
#endif
EOF
# sparc64 only
else
cat >> asm/$h <<EOF
#ifdef __arch64__
#include <asm-sparc64/$h>
#endif
EOF
fi
# common footer
cat >> asm/$h <<EOF
#endif /* !__SPARCSTUB__${name}__ */
EOF
done
exit 0
| true
|
c0c0e4076b0b6f368e8b9ddbb231504a03f32e68
|
Shell
|
sheynandr/Starter
|
/starter.sh
|
UTF-8
| 2,694
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
############# Variables #############
serverpath=/root/servers
runServers=()
############# Functions #############
info() {
printf "\033[0;32m[INFO]\033[0m $* \n"
}
warning() {
printf "\033[0;31m[WARNING]\033[0m $1 \n"
}
can_exists() {
[ -e "$1" ]
}
parse_screens() {
if ! can_run screen; then
warning "ИМПОСИБРУ!!!!! Пакет screen не установлен, это как так?!!"
if can_run apt-get; then
info "Пробуем установить пакет их репозитория."
sudo apt-get install screen
fi
else
screen -x | rev | cut -d . -f1 -s | rev | cut -d " " -f 1 -s | awk -F" " '{print $1}'
fi
}
clear_newLine() {
"$@" | sed ':a;N;$!ba;s/\n/ /g'
}
clear_whitespaces(){
"$@" | tr -d " \t\n\r"
}
containsElement () {
local e
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
return 1
}
prepareToRun () {
runServers+=("$1")
info "$1 не найден в списке запущенных, и поставлен в очередь на запуск"
}
can_run() {
(test -x "$1" || which "$1") &>/dev/null
}
createScreen() {
screen -dmS "$1" "$2"
}
findServerPath() {
echo $serverpath/$1/start.sh
info "Команда запуска отправлена на скрин"
}
startCommand() {
screen -S $1 -p 0 -X stuff "sh $2`echo -ne '\015'`"
}
############# Core #############
if can_exists servers.txt; then
info "Файл со списком серверов был найден. Пробуем собрать массив c серверами."
else
warning "Файл со списком серверов не был найден. Создайте в папке со скриптом файл servers.txt и впишите названия серверов, начиная каждое с новой строки"
exit 1;
fi
if readarray servers < servers.txt; then
info "Массив с серверами успешно собран. Количество элементов - ${#servers[@]}"
else
warning "Массив собрать не удалось."
exit 1;
fi
if readarray screens < <(parse_screens); then
info "Массив со скринами успешно собран. Количество элементов - ${#screens[@]}"
else
warning "Массив собрать не удалось."
exit 1;
fi
total=${#servers[*]}
for (( i=0; i<=$(( $total -1 )); i++ ))
do
if ! containsElement "${servers[$i]}" "${screens[@]}"; then
server=${servers[$i]}
prepareToRun $server
fi
done
total=${#runServers[*]}
for (( i=0; i<=$(( $total -1 )); i++ ))
do
server=${runServers[$i]}
path="$(findServerPath $server)"
createScreen $server
startCommand $server $path
done
| true
|
f5acbb355ede5a596b4e4fec06dae716b7834edf
|
Shell
|
Elias-Will/Test
|
/iOS/osx_get_ios_data_with_libimobiledevice.sh
|
UTF-8
| 853
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ "$1" == "-ni" ]]
then
INTERACTIVE=0
else
INTERACTIVE=1
fi
if [ $INTERACTIVE -eq 0 ]
then
echo "Let only one iOS device plugged on the USB port"
echo "Unlock the phone"
echo ""
echo "Press a key to continue."
read
fi
PAIRED=1
idevicepair validate
PAIRED=$?
while [ $PAIRED -eq 1 ]
do
echo -e "Device Not Paired. Unlock the iOS Device. \n Trust the computer if asked."
idevicepair pair
PAIRED=$?
sleep 2
done
echo "Getting the info"
ideviceinfo
echo "Paired Status: $PAIRED"
if [ $PAIRED -eq 0 ]
then
echo "Unpairing ..."
idevicepair unpair
PAIRED=$?
while [ $PAIRED -eq 1 ]
do
echo -e "Device STILL Paired. Don't unplug it."
sleep 5
idevicepair unpair
PAIRED=$?
done
echo "You can now safely unplug the iOS Device"
fi
| true
|
c892f2b0c13b4d99bf850f999153bdc14dde54bd
|
Shell
|
nolfla/ift383
|
/v1/hw2/q3/price.sh
|
UTF-8
| 194
| 3.015625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
declare -l lab="lab ?"
declare -l labs="lab*"
declare -l price="84.5"
declare -l sentence="The price of the book is $"
echo "${lab}"
echo "${labs}"
echo "${sentence}${price}"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.