blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a2ab2f000b89abd8bbe59f621e8d81a6bdb8f27f
|
Shell
|
jasonnerothin/robotrobot.io
|
/docker/nginx/run-nginx.sh
|
UTF-8
| 682
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash -x
# execute this script from the directory containing this script, please
source ./01-localhost.sh
#readonly conf_file=nginx.conf
#source 02-pcoroduction.sh
readonly nginx_port=80
readonly host_nginx_dir=/tmp/nginx/
readonly host_nginx_conf=${host_nginx_dir}${conf_file}
readonly host_nginx_log_files=${host_nginx_dir}logs
if [ ! -d ${host_nginx_log_files} ];
then
mkdir -pf ${host_nginx_log_files} ;
fi
cp -f ./${conf_file} ${host_nginx_conf} ;
set -x
docker run --network=tonowhere -it -p 80:80 -p 1414:1414 --name nginx \
-v ${host_nginx_conf}:/etc/nginx/nginx.conf:ro \
-d nginx-debug \
nginx-debug -g 'daemon off;' ;
set +x
sleep 1 ;
exit 0 ;
| true
|
72018c390284de20e6cf5c3c4eeb5f5793d01229
|
Shell
|
pjam/docker-symfony4-mysql
|
/docker/application/enable-xdebug.sh
|
UTF-8
| 583
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
XDEBUG_INI_FILE="/usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini"
DISABLED_EXT="disabled"
if [ -f "${XDEBUG_INI_FILE}.${DISABLED_EXT}" ]; then
mv "${XDEBUG_INI_FILE}.${DISABLED_EXT}" "${XDEBUG_INI_FILE}"
fi
if [ ! -f "${XDEBUG_INI_FILE}" ]; then
docker-php-ext-enable xdebug
sed -i '1 a xdebug.remote_autostart=true' "${XDEBUG_INI_FILE}"
sed -i '1 a xdebug.remote_host=host.docker.internal' "${XDEBUG_INI_FILE}"
sed -i '1 a xdebug.remote_enable=1' "${XDEBUG_INI_FILE}"
sed -i '1 a xdebug.max_nesting_level=400' "${XDEBUG_INI_FILE}"
fi
| true
|
b15348fde967d30ab483a937c9b98b61668b8659
|
Shell
|
benaux/env-pub
|
/moreutils_51j/decutils/decreg
|
UTF-8
| 1,480
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
# Copyright (c) 2020 ben
# Licensed under the MIT license. See License-MIT.txt in project root.
HELP='reg - register a decimal in redir'
USAGE="<input decimal dir"
input="$1"
file_input=
## Disable for dash
set -o errexit -o errtrace # abort on nonzero exitstatus
set -o nounset # abort on unbound variable
set -o pipefail # don't hide errors within pipes
cwd=$(pwd)
cwd_base=$(basename $cwd)
die () { echo "$@" 1>&2; exit 1; }
warn () { echo "$@" 1>&2; }
usage () { [ -n "$1" ] && warn "$1"; local app=$(basename $0); die "usage - $app: $USAGE"; }
help () { warn "Help: $HELP" ; usage ; }
cmdcheck () { for c in $@ ; do
command -v $c >/dev/null 2>&1 || die "Err: no cmd '$c' installed" ;
done ; }
realpath () { perl -MCwd -le 'print Cwd::realpath($ARGV[0])' $1; }
filename () { local base=$1; local sep=$2; echo ${base%${sep}*} ; }
fileext () { local base=$1; local sep=$2; ext=${base##*${sep}} ; [ "$base" = "$ext" ] || echo $ext ; }
cleanup () { echo ok ; }
[ -n "$input" ] || usage
while [ $# -gt 0 ]; do
arg="$1"
shift
case "$arg" in
-h|--help) help ;;
-*) die "Err: invalid option use -h for help" ;;
*) file_input="$arg" ;;
esac
done
file_path=$(realpath $file_input)
[ -f "$file_path" ] || die "Err: file path $file_path is invalid"
redir=$HOME/base/redir
mkdir -p $redir
rm -f ~/r && ln -s $redir ~/r
here=$(pwd)
basehere=$(basename $here)
rm -f ~/r/$basehere && ln -s $here ~/r/$basehere
# trap "cleanup" EXIT
| true
|
e63669b5ca76fa831e825a3f362bc89624469669
|
Shell
|
igouss/dotfiles
|
/.bash_profile
|
UTF-8
| 1,367
| 2.5625
| 3
|
[] |
no_license
|
source ~/.bash_completion.d/git-completion.sh
source ~/.bash_completion.d/todo_completer.sh
source ~/.gitfunctions
source ~/.alias
# append to the history file, don't overwrite it
shopt -s histappend
#export PS1=">"
export PS1='[\W$(__git_ps1 " (%s)")]\$ '
export PATH=~/opt/git/bin/:$HOME/bin:/opt/local/bin:/opt/local/sbin:$PATH
export CLICOLOR=1
export JAVA_HOME=/Library/Java/Home
# 30 black 31 red 32 green 33 yellow 34 blue 35 purple 36 cyan 37 white
# export GREP_COLOR="38;5;245"
export GREP_COLOR="32"
export GREP_OPTIONS="--color=auto"
export EDITOR=vim
export HISTCONTROL=ignoredups
export HISTFILESIZE=1000000000
export HISTSIZE=1000000
# see http://www.macosxhints.com/article.php?story=20031025162727485
export CLICOLOR=1
#export LSCOLORS=gxfxcxdxbxegedabagacad
export LSCOLORS=DxGxcxdxCxcgcdabagacad
#export LESS=-RX
complete -F _todo_sh -o default t
export MANPAGER="/bin/sh -c \"unset PAGER;col -b -x | \
vim -R -c 'set ft=man nomod nolist' -c 'nmap K :Man <C-R>=expand(\\\"<cword>\\\")<CR><CR>' -\""
set -o vi
#run "sudo visudo" and add the line
#Defaults env_keep += "JAVA_HOME"
if [ -f /opt/local/etc/bash_completion ]; then
. /opt/local/etc/bash_completion
fi
ruby -e'include Math;(-7..30).each{|y|s="";\
(26..100).each{|x|s<<" .,:+*%xX08@"[ \
((sin(x/8.0)+2+sin(y*x/2.0))** \
(sin(y/7.0)+1)*12/16.0)]};puts s}'
alias
| true
|
db53472e6063bcc35e16c1de36dd8549b65a84f3
|
Shell
|
Skeen/tardigrade_s3fs
|
/entrypoint.sh
|
UTF-8
| 971
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
SATELLITE_REGION=${SATELLITE_REGION:-EUROPE}
if [[ "${SATELLITE_REGION}" == "EUROPE" ]]; then
SATELLITE_ADDRESS=${SATELLITE_ADDRESS:-europe-west-1.tardigrade.io}
elif [[ "${SATELLITE_REGION}" == "ASIA" ]]; then
SATELLITE_ADDRESS=${SATELLITE_ADDRESS:-asia-east-1.tardigrade.io}
elif [[ "${SATELLITE_REGION}" == "US" ]]; then
SATELLITE_ADDRESS=${SATELLITE_ADDRESS:-us-central-1.tardigrade.io}
fi
if [[ -z "${SATELLITE_ADDRESS}" ]]; then
echo "SATELLITE_ADDRESS is not set" 1>&2
exit 1
fi
if [[ -z "${API_KEY}" ]]; then
echo "API_KEY is not set" 1>&2
exit 1
fi
if [[ -z "${PASSPHRASE}" ]]; then
echo "PASSPHRASE is not set" 1>&2
exit 1
fi
gateway setup --non-interactive --satellite-address "${SATELLITE_ADDRESS}" --api-key "${API_KEY}" --passphrase "${PASSPHRASE}"
SETUP_STATUS=$?
if [[ "${SETUP_STATUS}" -ne 0 ]]; then
echo "Setup failed!" 1>&2
exit ${SETUP_STATUS}
fi
gateway run --server.address 0.0.0.0:7777
| true
|
c3f57dd29d25c7156e5c8c5b5eb36894d4553a24
|
Shell
|
NanNaDa/_GIT_TUTORIAL_
|
/_SHELL_SCRIPT_/Lec_01/Lec_01_03.sh
|
UTF-8
| 252
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dirname="Lec_01_03"
filename="File_01_03.txt"
touch_new_file() {
cd $dirname
touch $filename
echo "Hello World" > $filename
}
error_message() {
echo "The directory already exists."
}
mkdir $dirname && touch_new_file || error_message
| true
|
3455966388bbe8dcbdabf953b91a3fc08f700da9
|
Shell
|
kevinfra/orga2-tp2
|
/codigo/scripteandoConDosImplementaciones.sh
|
UTF-8
| 4,397
| 3.265625
| 3
|
[] |
no_license
|
imp_sepia="asm c"
imp_ldr="asm c"
imp_cropflip="asm c"
filtros="all"
while getopts 'chf:' opt; do
case $opt in
c) filtros="none" ;;
h) echo ""
echo " Script para generar info de test cases. Se calcula la cantidad de ciclos de"
echo " reloj cada 100 ejecuciones por imagen, con hasta 13 imagenes."
echo ""
echo " Opciones disponibles:"
echo " -h Imprime este texto de ayuda."
echo " -f <ldr|sepia|cropflip|cropflip3|all> Filtro a ejecutar (todas por defecto)."
echo " -c Limpia todo."
echo ""
echo " Cropflip3 ejecuta las 2 versiones de cropflip en c y la version de asm"
echo ""
exit 0 ;;
f) filtros=$OPTARG ;;
esac
done
make clean
make
if [[ $filtros = "sepia" || $filtros == "all" ]]; then
rm sepiac
rm sepiasm
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro sepia asm v1 para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> sepiasm
./build/tp2 sepia -i asm ./img/bastachicos.${i}x${i}.bmp -t 100 >>sepiasm
done
mv filtros/sepia_asm.asm filtros/sepia_asmV1.asm
mv filtros/sepia_asmV2.asm filtros/sepia_asm.asm
make clean
make
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro sepia asm v2 para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> sepiac
./build/tp2 sepia -i asm ./img/bastachicos.${i}x${i}.bmp -t 100 >>sepiac
done
mv filtros/sepia_asm.asm filtros/sepia_asmV2.asm
mv filtros/sepia_asmV1.asm filtros/sepia_asm.asm
fi
if [[ $filtros = "ldr" || $filtros == "all" ]]; then
rm ldrasm
rm ldrc
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro ldr asm V1 para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> ldrasm
./build/tp2 ldr -i asm ./img/bastachicos.${i}x${i}.bmp 100 -t 100 >>ldrasm
done
mv filtros/ldr_asm.asm filtros/ldr_asmV1.asm
mv filtros/ldr_asmV2.asm filtros/ldr_asm.asm
make clean
make
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro ldr asm V2 para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> ldrc
./build/tp2 ldr -i asm ./img/bastachicos.${i}x${i}.bmp 100 -t 100 >>ldrc
done
mv filtros/ldr_asm.asm filtros/ldr_asmV2.asm
mv filtros/ldr_asmV1.asm filtros/ldr_asm.asm
fi
if [[ $filtros = "cropflip" || $filtros == "all" ]]; then
rm cropasm
rm cropc
echo $t
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro cropflip c V1 para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> cropasm
t=$i-128
./build/tp2 cropflip -i c ./img/bastachicos.${i}x${i}.bmp 128 128 $t $t -t 100000 >>cropasm
done
mv filtros/cropflip_c.c filtros/cropflip_cV1.c
mv filtros/cropflip_cV2.c filtros/cropflip_c.c
make clean
make
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro cropflip c V2 para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> cropc
t=$i-128
./build/tp2 cropflip -i c ./img/bastachicos.${i}x${i}.bmp 128 128 $t $t -t 100000 >>cropc
done
mv filtros/cropflip_c.c filtros/cropflip_cV2.c
mv filtros/cropflip_cV1.c filtros/cropflip_c.c
fi
if [[ $filtros = "cropflip3" ]]; then
rm cropASM1
rm cropCv1
rm cropCv2
echo $t
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro cropflip c V1 para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> cropCv1
t=$i-128
./build/tp2 cropflip -i c ./img/bastachicos.${i}x${i}.bmp 128 128 $t $t -t 100000 >>cropCv1
done
mv filtros/cropflip_c.c filtros/cropflip_cV1.c
mv filtros/cropflip_cV2.c filtros/cropflip_c.c
make clean
make
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro cropflip c V2 para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> cropCv2
t=$i-128
./build/tp2 cropflip -i c ./img/bastachicos.${i}x${i}.bmp 128 128 $t $t -t 100000 >>cropCv2
done
mv filtros/cropflip_c.c filtros/cropflip_cV2.c
mv filtros/cropflip_cV1.c filtros/cropflip_c.c
make clean
make
for (( i = 128; i < 1700; i=i+128 )); do
echo "corriendo filtro cropflip ASM para una matriz de $i x $i"
printf '%i ' $(($i*$i)) >> cropASM1
t=$i-128
./build/tp2 cropflip -i asm ./img/bastachicos.${i}x${i}.bmp 128 128 $t $t -t 100000 >>cropASM1
done
fi
rm bastachicos.*
if [[ $filtros = "none" ]]; then
rm sepiac
rm sepiasm
rm ldrc
rm ldrasm
rm cropc
rm cropasm
rm cropASM
rm cropCv1
rm cropCv2
make clean
fi
# done
#python GraficarBarras.py Sepia
| true
|
fd41ab513f2abcce97ed00ef629d5fa8975500a3
|
Shell
|
jrialland/python-brain
|
/release.sh
|
UTF-8
| 1,333
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
#run unit tests
PYTHONPATH=$PYTHONPATH:`pwd` python tests/all_tests.py
if [ $? -ne 0 ]; then
echo 'unit tests failed !' >&2
exit 1
fi
if [ ! -d '3to2-1.0' ]; then
wget "https://bitbucket.org/amentajo/lib3to2/downloads/3to2-1.0.tar.gz"
tar -zxvf 3to2-1.0.tar.gz
fi
#apply 3to2
for py in `find ./ -type f -name "*_py3.py"`; do
py2=`echo $py | sed -e s/_py3/_py2/`
cp $py $py2
3to2-1.0/3to2 -w $py2
done
#apply autopep8 formatting
for py in `find ./ -type f -name "*.py"`; do
autopep8 -i $py
done
#cleanup temporary files
find ./ -name "*.pyc" | xargs rm -f
find ./ -name "*.bak" | xargs rm -f
find ./ -type d -name "__pycache__" | xargs rm -rf
version=`python << '__eof'
import brain
print brain.__version__
__eof`
git tag "$version" -m "releasing version $version"
git push --tags origin master
#in order to publish to pipy, the account infos (obtained at https://pypi.python.org/pypi?%3Aaction=register_form)
#should written to a .pypirc file for the current user.
#[distutils] # this tells distutils what package indexes you can push to
#index-servers = pypitest
#
#[pypi]
#repository: https://pypi.python.org/pypi
#username=
#password=
#
#[pypitest]
#repository: https://testpypi.python.org/pypi
#username: jrialland
python setup.py register -r pypi
python setup.py sdist upload -r pypi
| true
|
dd24f634e90177803cf5482ba9aed9e97b300675
|
Shell
|
jwarykowski/dotfiles
|
/.zshrc
|
UTF-8
| 909
| 2.5625
| 3
|
[] |
no_license
|
export ZSH_CONFIG="$HOME/.config/zsh"
export ZSH="$HOME/.oh-my-zsh"
TERM=xterm-256color
ZSH_THEME=""
# profile
[[ -f "$HOME/.profile" ]] \
&& source "$HOME/.profile"
plugins=(
bgnotify
command-not-found
git
git-extras
yarn
zsh-autosuggestions
zsh-completions
zsh-syntax-highlighting
z
)
# oh-my-zsh
[[ -f "$ZSH/oh-my-zsh.sh" ]] \
&& source "$ZSH/oh-my-zsh.sh"
# options
setopt hist_ignore_space
setopt append_history
setopt extended_history
# aliases
[[ -f "$ZSH_CONFIG/alias.zsh" ]] \
&& source "$ZSH_CONFIG/alias.zsh"
# fzf
[[ -f "$ZSH_CONFIG/fzf.zsh" ]] \
&& source "$ZSH_CONFIG/fzf.zsh"
# functions
[[ -f "$ZSH_CONFIG/functions.zsh" ]] \
&& source "$ZSH_CONFIG/functions.zsh"
# key_bindings
[[ -f "$ZSH_CONFIG/key_bindings.zsh" ]] \
&& source "$ZSH_CONFIG/key_bindings.zsh"
# prompt
fpath+=$HOME/.zsh/pure
autoload -U promptinit; promptinit
prompt pure
| true
|
75c9c5b837f5231d094fe15568c034d7c9043333
|
Shell
|
SuperITMan/android_prebuilts_prebuiltapks
|
/scripts/functions.sh
|
UTF-8
| 1,663
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -u -e -o pipefail
#######################################
# Echo the current package version of an available application.
# Arguments:
# param1 - application name
# param2 - application package name
# param3 - location path of the applications
#######################################
getCurrentPackageVersion() {
name=$1
packageName=$2
location=$3
echo $(ls ${3}/${1} | grep $2_)
}
#######################################
# Echo the name of the application at position X in the array.
# Arguments:
# param1 - application index
#######################################
getApplicationName() {
echo $(node -e "require('$SCRIPTS_LOCATION/index.js').getApplicationName($1)")
}
#######################################
# Echo the package name of the application at position X in the array.
# Arguments:
# param1 - application index
#######################################
getApplicationPackageName() {
echo $(node -e "require('$SCRIPTS_LOCATION/index.js').getApplicationPackageName($1)")
}
#######################################
# Echo the latest package version of the application available on internet.
# Arguments:
# param1 - application package name
#######################################
getApplicationApkName() {
echo $(node -e "require('$SCRIPTS_LOCATION/index.js').getPackageApkVersion('$1')")
}
#######################################
# Echo the type of the application at position X in the array.
# Arguments:
# param1 - application index
#######################################
getApplicationType() {
echo $(node -e "require('$SCRIPTS_LOCATION/index.js').getApplicationType($1)")
}
| true
|
55cf92b44fd576728d763af80bcadc687ecd5b0b
|
Shell
|
lowet84/k8s-config
|
/tools/updateByNamespace
|
UTF-8
| 704
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
NAMESPACES=$(kubectl get namespace | cut -d' ' -f1 | awk 'NR>1')
NAMESPACE=$(echo $NAMESPACES | grep $1)
if [ -z $1 ]; then
echo "Available namespaces:"
for NAMESPACE in $NAMESPACES
do
echo $NAMESPACE
done
elif [ ! -n "$NAMESPACE" ]; then
echo "Namespace does not exist"
else
DEPLOYMENTS=$(kubectl get deploy --namespace=$1 | cut -d' ' -f1 | awk 'NR>1')
for DEPLOYMENT in $DEPLOYMENTS
do
TARGET="$(kubectl get deploy --namespace=$1 $j | awk 'NR>1' | xargs | cut -d' ' -f2)"
echo $DEPLOYMENT: $TARGET
kubectl scale --namespace=$1 --replicas=0 deploy $DEPLOYMENT
sleep .5
kubectl scale --namespace=$1 --replicas=$TARGET deploy $DEPLOYMENT
done
fi
| true
|
c0f39163986b306f9da382d579a521be3178fefb
|
Shell
|
cHolzberger/kvm-osx
|
/bin/machine-list-running
|
UTF-8
| 500
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
VERBOSE=0
if [[ "$1" == "-v" ]]; then
VERBOSE=1
fi
_sum=0
for MACHINE_PATH in /srv/kvm/vms/*; do
if [ -e $MACHINE_PATH/var/pid ]; then
p=$(cat $MACHINE_PATH/var/pid)
if [ -e "/proc/$p" ]; then
vm=$(basename $MACHINE_PATH)
source $MACHINE_PATH/config
[[ $VERBOSE == "0" ]] && echo $vm
[[ $VERBOSE == "1" ]] && echo -en "$vm \t $MEM \t $GFX_VNCPORT\n"
let _sum=_sum+"$( echo $MEM | sed -e s/G// )"
fi
fi
done
[[ $VERBOSE == "1" ]] && echo -e "Total: \t ${_sum}G"
| true
|
084621c16f81376cc860d689e90a7b9bf04973d9
|
Shell
|
akash99-code/Shell_Programming
|
/Scripts/p20greatestn.sh
|
UTF-8
| 299
| 3.328125
| 3
|
[] |
no_license
|
#
# Write a shell script to find out greatest among n input integers where n is to be input by the user.
#
read -p "Enter number of inputs - " n
read -p "number 1 - " a
i=2
while test $i -le $n
do
read -p "number $i - " b
if [ $a -lt $b ]
then
a=$b
fi
i=$((i+1))
done
echo Greatest Number - $a
| true
|
4dce5b5c7e7274cda7a93938e9190d121984397f
|
Shell
|
wkl1990/phasemets
|
/run_AgIn.sh
|
UTF-8
| 638
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
MODDIR=/hpgwork/hacone/P6_7515/Mods
AGINDIR=/work/hacone/AgIn_mercurial/Agin/target
BETA=/work/hacone/AgIn_mercurial/Agin/resources/P6C4.dat
REF=/work/hacone/Reference/hg38_7515_SVs/sequence/hg38_7515_SVs.fasta
RESULTDIR=/hpgwork/hacone/P6_7515/AgInResult
mkdir -p ${AgInResult}
# -b = P5C3 or LDAVector (Default)
# -g = -1.80 (~P4/C2) or -2.52 (P5/C3) <- 0.80!!
# -g = -0.55 (P6/C4)
# predict methylation class
#ls -S Mods/ > .tmp
while read file; do
JVM_OPTS="-Xmx128G" ${AGINDIR}/dist/bin/launch \
-i ${MODDIR}/${file} \
-f ${REF} \
-o ${RESULTDIR}/${file%%.csv} \
-g -0.55 -l 40 -c -b ${BETA} predict
done < .tmp
| true
|
c904c60adc5b818ae9b07603041965f433d5f4a7
|
Shell
|
joeygravlin/eos.dotfiles
|
/.profile
|
UTF-8
| 273
| 3.03125
| 3
|
[] |
no_license
|
EDITOR="/usr/bin/vim"
export EDITOR
# export username?!?!?! seriously!?
USERNAME="`whoami`"
export USERNAME
# Attempt to make zsh run by default
# (stupid chsh says `whoami` don't exist :/)
if [ -x /bin/zsh ]; then
SHELL="/bin/zsh"
export SHELL
exec SHELL
fi
| true
|
3f0d4b1b078e89b553a3ab91b3cba287b2820071
|
Shell
|
cncf/devstatscode
|
/devel/api_com_contrib_repo_grp.sh
|
UTF-8
| 831
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -z "$1" ]
then
echo "$0: please specify project name as a 1st arg"
exit 1
fi
if [ -z "$2" ]
then
echo "$0: please specify timestamp from as a 2nd arg"
exit 2
fi
if [ -z "$3" ]
then
echo "$0: please specify timestamp to as a 3rd arg"
exit 3
fi
if [ -z "$4" ]
then
echo "$0: please specify repository group as a 4th arg"
exit 4
fi
if [ -z "$5" ]
then
echo "$0: please specify period as a 5th arg"
exit 5
fi
if [ -z "$API_URL" ]
then
API_URL="http://127.0.0.1:8080/api/v1"
fi
project="${1}"
from="${2}"
to="${3}"
repo_group="${4}"
period="${5}"
curl -H "Content-Type: application/json" "${API_URL}" -d"{\"api\":\"ComContribRepoGrp\",\"payload\":{\"project\":\"${project}\",\"from\":\"${from}\",\"to\":\"${to}\",\"repository_group\":\"${repo_group}\",\"period\":\"${period}\"}}" 2>/dev/null | jq
| true
|
8d9505e7108659c80a20574780c62732727bef8f
|
Shell
|
angcrush/tmp
|
/cron-deploy
|
UTF-8
| 360
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#echo "======start======"
#echo ">>>>> cron deploy"
repos=( dogilike ddfw glitter )
for i in ${repos[@]}; do
flag="/git/dirty/dirty-$i"
# echo ">>> check flag for $i"
if [ -f $flag ]; then
# echo ">> found flag "
rm $flag
/home/gituser/export $i
fi
#echo "run by cron $i" >> run.txt
done
| true
|
43604cf0c73e082ba7b46543995145c8e61bb7ba
|
Shell
|
cha63506/RimRoot_Fedora_15_armv5
|
/usr/share/dracut/modules.d/98syslog/parse-syslog-opts.sh
|
UTF-8
| 748
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
# -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# ex: ts=8 sw=4 sts=4 et filetype=sh
# Parses the syslog commandline options
#
#Bootparameters:
#syslogserver=ip Where to syslog to
#sysloglevel=level What level has to be logged
#syslogtype=rsyslog|syslog|syslogng
# Don't auto detect syslog but set it
type getarg >/dev/null 2>&1 || . /lib/dracut-lib.sh
syslogserver=$(getarg syslog.server syslog)
syslogfilters=$(getargs syslog.filter filter)
syslogtype=$(getarg syslog.type syslogtype)
[ -n "$syslogserver" ] && echo $syslogserver > /tmp/syslog.server
[ -n "$syslogfilters" ] && echo "$syslogfilters" > /tmp/syslog.filters
[ -n "$syslogtype" ] && echo "$syslogtype" > /tmp/syslog.type
| true
|
9ce631f89b5c353ac1c95e845978486bbd6b72f0
|
Shell
|
cathyatseneca/CourseRepoScripts
|
/clonelog
|
UTF-8
| 771
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
#your github id goes here
yourusername="YourGithbuID"
#generate a personal token from your github settings
#menu-->personal access token. Make sure you have
#repo,delete_repo and admin:org checked for your token
token="YourPersonalAccessToken"
#name of the organization you want repo to go in
organization="YourGithubOrganization"
#the log file is generated by your createrepo script
#name of the file that stores info about your team/repo names
#and id.
#each line of file should look like this:
#|nameofrepo|githubid|teamid|
inputfile="logfile.txt"
while IFS="|" read notused realname githubid teamid
do
realname=${realname//[[:blank:]]/}
githubid=${githubid//[[:blank:]]/}
git clone git@github.com:$organization/$realname.git
done < $inputfile
| true
|
638dc18752633fdebee936f79e7afc201e277e21
|
Shell
|
RosenZhu/fuzzing-binaries
|
/use_different_containers/compiling.sh
|
UTF-8
| 120
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
path=$(pwd)
for i in $(seq 1 10)
do
cd "$path"/node"$i"/scripts
python auto_compiling.py
sleep 5
done
| true
|
1fc1b2ab46f468acac15c01cea08a6bdf589b7d1
|
Shell
|
tcallier/Comp485
|
/create_version.sh
|
UTF-8
| 13,257
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
###################################################################################################
# Script name : create_version.sh #
# Description : this script is designed to create a version history of config file #
# backups created by the create_backup.sh script. these backups include #
# config files for DHCP, DNS and Firewall/IPtables. it is designed to #
# compare the most recent backup to the most recent version created. if #
# file modifications are found, it will create a new version and add it #
# to the history. if no modifications are found, a new version will not #
# be created. it will save up to five unique versions with version one #
# being the newest and version five being the oldest. if five versions #
# exist, version five will be overwritten if a new version is added. #
# Author : Thomas Callier #
###################################################################################################
###################################################################################################
# v variable declaration v #
###################################################################################################
DHCP_LOGFILE=/home/techlab/Comp485/versions/dhcp_versions/versions_create.log # file path of dhcp version log file
DHCP_SOURCE=/home/techlab/backups/dhcp_backups/current_backup # file path of latest dhcp backup (includes time-stamp file)
DHCP_SOURCE_CONTENT=/home/techlab/backups/dhcp_backups/current_backup/content # file path of latest dhcp backup content (excludes time-stamp file)
DHCP_VERSIONS=/home/techlab/Comp485/versions/dhcp_versions # file path to directory holding dhcp version history
DHCP_FIRST_VERSION=/home/techlab/Comp485/versions/dhcp_versions/version_1 # file path to directory containing most recent dhcp version created (includes time-stamp file)
DHCP_FIRST_VERSION_CONTENT=/home/techlab/Comp485/versions/dhcp_versions/version_1/content # file path to directory containing the content of the most recent dhcp version created (exlcludes time-stamp file)
DNS_LOGFILE=/home/techlab/Comp485/versions/dns_versions/versions_create.log # file path of dns version log file
DNS_SOURCE=/home/techlab/backups/dns_backups/current_backup # file path of latest dns backup (includes time-stamp file)
DNS_SOURCE_CONTENT=/home/techlab/backups/dns_backups/current_backup/content # file path of latest dns backup content (excludes time-stamp file)
DNS_VERSIONS=/home/techlab/Comp485/versions/dns_versions # file path to directory holding dns version history
DNS_FIRST_VERSION=/home/techlab/Comp485/versions/dns_versions/version_1 # file path to directory containing most recent dns version created (includes time-stamp file)
DNS_FIRST_VERSION_CONTENT=/home/techlab/Comp485/versions/dns_versions/version_1/content # file path to directory containing the content of the most recent dns version created (excludes time-stamp file)
FIREWALL_LOGFILE=/home/techlab/Comp485/versions/firewall_versions/versions_create.log # file path of firewall version log file
FIREWALL_SOURCE=/home/techlab/backups/firewall_backups/current_backup # file path of latest firewall backup (includes time-stamp file)
FIREWALL_SOURCE_CONTENT=/home/techlab/backups/firewall_backups/current_backup/content # file path of latest firewall backup content (excludes time-stamp file)
FIREWALL_VERSIONS=/home/techlab/Comp485/versions/firewall_versions # file path to directory holding firewall version history
FIREWALL_FIRST_VERSION=/home/techlab/Comp485/versions/firewall_versions/version_1 # file path to directory containing most recent firewall version created (includes time-stamp file)
FIREWALL_FIRST_VERSION_CONTENT=/home/techlab/Comp485/versions/firewall_versions/version_1/content # file path to directory containing the content of the most recent firewall version created (excludes time-stamp file)
DATE=$(date "+%m/%d/%Y %T") # Displays the current date/time
SPACER=------------------------------------------------------------ # used to separate log entries
###################################################################################################
# v dhcp version creation v #
###################################################################################################
for i in 1 2 3 4 5
do
mkdir -p $DHCP_VERSIONS/version_$i
done # creates 5 directories for the dhcp version backups if they do not already exist
touch $DHCP_LOGFILE # create the dhcp log file if it does not already exist
echo -e "$DATE : Starting...\n$DATE : Seeking source directory..." >> $DHCP_LOGFILE # write starting message to dhcp log file
if [ ! -d $DHCP_SOURCE_CONTENT ]; then # if the content of the latest dhcp backup is not found
echo -e "$DATE : Source directory not found! Unable to create new version." >> $DHCP_LOGFILE # write message to dhcp log file that source directory could not be found - unable to create new version
else # if the content of the latest dhcp backup is found
echo -e "$DATE : Source directory found.\n$DATE : Checking if any files in directory have been modified since last version creation..." >> $DHCP_LOGFILE # write success message to dhcp log file. write message to dhcp log file that file comparison will begin
diff -r $DHCP_SOURCE_CONTENT $DHCP_FIRST_VERSION_CONTENT &>/dev/null # search for differences in the content of the latest dhcp backup and the content of the most recent dhcp version created. output to /dev/null to prevent printing to console if differences are found
if [ $? -eq 0 ]; then # if no differences found
echo -e "$DATE : No files in source directory have been modified since last version creation. No new version will be created." >> $DHCP_LOGFILE # write message to dhcp log file that no modifications have been found - new version will not be created
else # if differences are found
echo -e "$DATE : Files in source directory have been modified. Creating new version..." >> $DHCP_LOGFILE # write message to dhcp log file that modifications were found and a new version is being created
cp -R $DHCP_VERSIONS/version_4/* $DHCP_VERSIONS/version_5 2>> $DHCP_LOGFILE # copy all files in version 4 to version 5
cp -R $DHCP_VERSIONS/version_3/* $DHCP_VERSIONS/version_4 2>> $DHCP_LOGFILE # copy all files in version 3 to version 4
cp -R $DHCP_VERSIONS/version_2/* $DHCP_VERSIONS/version_3 2>> $DHCP_LOGFILE # copy all files in version 2 to version 3
cp -R $DHCP_VERSIONS/version_1/* $DHCP_VERSIONS/version_2 2>> $DHCP_LOGFILE # copy all files in version 1 to version 2
cp -R $DHCP_SOURCE/* $DHCP_FIRST_VERSION 2>> $DHCP_LOGFILE # copy all files in the latest dhcp backup created to version 1
echo -e "$DATE : New version created!" >> $DHCP_LOGFILE # write message to dhcp log file that a new version was successfully created
fi
fi
echo -e "$DATE : Done.\n$SPACER" >> $DHCP_LOGFILE # write message to dhcp log file that the version create process is complete
###################################################################################################
# v dns version creation v #
###################################################################################################
for i in 1 2 3 4 5
do
mkdir -p $DNS_VERSIONS/version_$i
done # creates 5 directories for the dns version backups if they do not exist already
touch $DNS_LOGFILE # create the dns log file if it does not exist already
echo -e "$DATE : Starting...\n$DATE : Seeking source directory..." >> $DNS_LOGFILE # write starting message to dns log file
if [ ! -d $DNS_SOURCE_CONTENT ]; then # if the content of the latest dns backup is not found
echo -e "$DATE : Source directory not found! Unable to create new version." >> $DNS_LOGFILE # write message to dns log file that source directory could not be found - unable to create new version
else # if the content of the latest dns backup is found
echo -e "$DATE : Source directory found.\n$DATE : Checking if any files in directory have been modified since last version creation..." >> $DNS_LOGFILE # write success message to dns log file. write message to dns log file that file comparison will begin
diff -r $DNS_SOURCE_CONTENT $DNS_FIRST_VERSION_CONTENT &>/dev/null # search for differences in the content of the latest dns backup and the content of the most recent dns version created. output to /dev/null to prevent printing to console if differences are found
if [ $? -eq 0 ]; then # if no differences found
echo -e "$DATE : No files in source directory have been modified since last version creation. No new version will be created." >> $DNS_LOGFILE # write message to dns log file that no modifications have been found - new version will not be created
else # if differences are found
echo -e "$DATE : Files in source directory have been modified. Creating new version..." >> $DNS_LOGFILE # write message to dns log file that modifications were found and a new versions is being created
cp -R $DNS_VERSIONS/version_4/* $DNS_VERSIONS/version_5 2>> $DNS_LOGFILE # copy all files in version 4 to version 5
cp -R $DNS_VERSIONS/version_3/* $DNS_VERSIONS/version_4 2>> $DNS_LOGFILE # copy all files in version 3 to version 4
cp -R $DNS_VERSIONS/version_2/* $DNS_VERSIONS/version_3 2>> $DNS_LOGFILE # copy all files in version 2 to version 3
cp -R $DNS_VERSIONS/version_1/* $DNS_VERSIONS/version_2 2>> $DNS_LOGFILE # copy all files in version 1 to version 2
cp -R $DNS_SOURCE/* $DNS_FIRST_VERSION 2>> $DNS_LOGFILE # copy all files in the latest dns backup created to version 1
echo -e "$DATE : New version created!" >> $DNS_LOGFILE # write message to dns log file that a new version was successfully created
fi
fi
echo -e "$DATE : Done.\n$SPACER" >> $DNS_LOGFILE # write message to dns log file that the version create process is complete
###################################################################################################
# v firewall version creation v #
###################################################################################################
for i in 1 2 3 4 5
do
mkdir -p $FIREWALL_VERSIONS/version_$i
done # creates 5 directories for the firewall version backups if they do not exist already
touch $FIREWALL_LOGFILE # create the firewall log file if it does not exist already
echo -e "$DATE : Starting...\n$DATE : Seeking source directory..." >> $FIREWALL_LOGFILE # write starting message to firewall log file
if [ ! -d $FIREWALL_SOURCE_CONTENT ]; then # if the content of the latest firewall backup is not found
echo -e "$DATE : Source directory not found! Unable to create new version." >> $FIREWALL_LOGFILE # write message to firewall log file that source directory could not be found - unable to create new version
else # if the content of the latest firewall backup is found
echo -e "$DATE : Source directory found.\n$DATE : Checking if any files in directory have been modified sicne last version creation..." >> $FIREWALL_LOGFILE # write success message to firewall log file. write message to firewall log file that file comparison will begin
diff -r $FIREWALL_SOURCE_CONTENT $FIREWALL_FIRST_VERSION_CONTENT &>/dev/null # search for differences in the content of the latest firewall backup and the content of the most recent firewall version created. output to /dev/null to prevent printing to console if differences are found
if [ $? -eq 0 ]; then # if no differences found
echo -e "$DATE : No files in source directory have been modified since last version creation. No new version will be created." >> $FIREWALL_LOGFILE # write message to firewall log file that no modifications have been found - new version will not be created
else # if differences are found
echo -e "$DATE : Files in source directory have been modified. Creating new version." >> $FIREWALL_LOGFILE # write message to firewall log file that modifications were found and a new version is being created
cp -R $FIREWALL_VERSIONS/version_4/* $FIREWALL_VERSIONS/version_5 2>> $FIREWALL_LOGFILE # copy all files in version 4 to version 5
cp -R $FIREWALL_VERSIONS/version_3/* $FIREWALL_VERSIONS/version_4 2>> $FIREWALL_LOGFILE # copy all files in version 3 to version 4
cp -R $FIREWALL_VERSIONS/version_2/* $FIREWALL_VERSIONS/version_3 2>> $FIREWALL_LOGFILE # copy all files in version 2 to version 3
cp -R $FIREWALL_VERSIONS/version_1/* $FIREWALL_VERSIONS/version_2 2>> $FIREWALL_LOGFILE # copy all files in version 1 to version 2
cp -R $FIREWALL_SOURCE/* $FIREWALL_FIRST_VERSION 2>> $FIREWALL_LOGFILE # copy all files in the latest firewall backup created to version 1
echo -e "$DATE : New version created!" >> $FIREWALL_LOGFILE # write message to firewall log file that a new version was successfully created
fi
fi
echo -e "$DATE : Done.\n$SPACER" >> $FIREWALL_LOGFILE # write message to firewall log file that the version create process is complete
####### gitlab
echo "starting push to github..." >> /home/techlab/Comp485/github.log
git add versions && \
git add -u && \
git commit -m "remote commit from 18.04 vm" && \
git push origin HEAD 2>> /home/techlab/Comp485/github.log
# end script
| true
|
c6319acd7124c2a0d04ad85c483844186afcc9c2
|
Shell
|
pivotal-bank/quotes-service
|
/ci/tasks/build-quotes-service/task.sh
|
UTF-8
| 264
| 2.578125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
pushd quotes-service
./gradlew clean assemble
VERSION=`cat version-number`
popd
mkdir build-output/libs && cp quotes-service/build/libs/$ARTIFACT_ID-$VERSION.jar build-output/libs/.
cp quotes-service/build/manifest.yml build-output/.
| true
|
fd33730212f4777586101e6e2a4baa5a96a24e16
|
Shell
|
BJGSR47/https---github.com-paulcolby43-scripts
|
/dly_reset.sh
|
UTF-8
| 561
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/ksh
# dly_reset.sh
cd /home/prod/4gl
LOG="../logs/dly_reset.log"
#NOTIFY=" barry@tranact.com \
# colby@tranact.com "
NOTIFY=" barry@tranact.com \
scott@tranact.com \
bj@tranact.com "
###############################################################################
date > $LOG
fglgo dly_reset |tee -a ${LOG}
echo "Finished dly_reset.4gl " >> ${LOG}
echo "See ${LOG} for information about process to database..." |tee -a ${LOG}
SUBJ="dly_reset.sh PROCESS LOG `hostname`"
# cat $LOG | awk 'sub("$", "\r")' | \
# mail -s "${SUBJ}" ${NOTIFY} < ${LOG}
exit
| true
|
b176737c6b64ffc8591f891e9f309466ac96e1c7
|
Shell
|
icedevil2001/personal-genomics-course-2017
|
/problem_sets/pset4_ngs/code/run_ps4_bwamem.sh
|
UTF-8
| 628
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH -A csd524
#SBATCH -p compute
#SBATCH -t 1000
#SBATCH --get-user-env
#DATADIR=/oasis/projects/nsf/csd524/mgymrek/data/ps4/
#INDEX=${DATADIR}/hs37d5.fa
DATADIR=/storage/mgymrek/cse291/ps4data/
INDEX=/storage/resources/dbase/human/hs37d5/hs37d5.fa
FQ1=${DATADIR}/SRR622457_1.fastq.gz
FQ2=${DATADIR}/SRR622457_2.fastq.gz
bwa mem -t 5 -R '@RG\tID:NA12878\tSM:NA12878' ${INDEX} ${FQ1} ${FQ2} > ${DATADIR}/NA12878.sam
# Convert to BAM, note sam is truncated but we'll just use a subset?
#cat ${DATADIR}/NA12878.sam | grep -v "SRR622457\.68907082" | \
# samtools view -bS - > ${DATADIR}/NA12878.bam
# Sort
| true
|
25e7f19df807373e8956f74b513d3beecd049b83
|
Shell
|
eqmvii/eqmvii-sys-mon
|
/scriptpractice
|
UTF-8
| 439
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash -x
echo -e "Current Value of Count: $count\n"
(test -z $count) && count=-5 # if count has length 0, set it to 0
while [[ "$count" -le 5 ]]; do
echo "$count"
count=$((count + 1))
done
read -p "enter a word > "
case "$REPLY" in
[[:alpha:]]) echo "It was a single alpha character";;
???) echo "It was exactly three characters long";;
*.txt) echo "is a word ending in '.txt'";;
*) echo "Is something else!";;
esac
| true
|
172d3228a1d3ec0b9ec553ad858f81fda1fcd1d0
|
Shell
|
mzachariasz/sap-deployment-automation
|
/terraform/modules/terraform-google-lb-internal/examples/simple/nginx_upstream.sh.tpl
|
UTF-8
| 365
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -xe
apt-get update
apt-get install -y nginx
cat - > /etc/nginx/sites-available/upstream <<EOF
server {
listen 80;
location / {
proxy_pass http://${UPSTREAM};
}
}
EOF
unlink /etc/nginx/sites-enabled/default
ln -sf /etc/nginx/sites-available/upstream /etc/nginx/sites-enabled/upstream
systemctl enable nginx
systemctl reload nginx
| true
|
eb6f104d9cdaf8a2e4cbed3d1de6023c51ef8a64
|
Shell
|
notlcry/docker-elk
|
/setup.sh
|
UTF-8
| 1,180
| 2.6875
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
echo "create elasticsearch data path"
mkdir -p /opt/es/data/
chown -R 1000:1000 /opt/es/data/
echo "starting elk stack docker containers"
docker-compose up -d
echo "elk stack docker containers finish"
echo "sleep 120s for kibana init"
sleep 120
## add index-pattern for logstash
echo "create index pattern"
curl -XPOST -D- 'http://localhost:5601/api/saved_objects/index-pattern' \
-H 'Content-Type: application/json' \
-H 'kbn-version: 7.9.0' \
-d '{"attributes":{"title":"logstash-*","timeFieldName":"@timestamp"}}'
## modify default lifecycle policy for 90day log rotation
echo "modify logstash lifecycle policy"
curl -XPUT "http://localhost:9200/_ilm/policy/logstash-policy" \
-H 'Content-Type: application/json' \
-d'{ "policy": { "phases": { "hot": { "min_age": "0ms", "actions": { "rollover": { "max_age": "7d", "max_size": "5gb" }, "set_priority": { "priority": null } } }, "delete": { "min_age": "90d", "actions": { "delete": { "delete_searchable_snapshot": true } } } } }}'
| true
|
6b6630038ee10a04563a26d871ead81ed5e955e5
|
Shell
|
juanmanavella/sa
|
/qcow2_backup.sh
|
UTF-8
| 779
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
## Simple script to backup live mounted qcow2 virtual hard disks
## using qemu-nbd. Just edit the paths below to fit your needs.
LOG=/var/log/105_backup.log
LOCAL_BACKUP=/mnt/12t/backup/rdiff-backup/105
CLOUD_BACKUP=/mnt/12t/cloud/rsync/105-pdc
VDEVICE=/dev/nbd5
VPARTITION=/dev/nbd5p1
VHDD=/mnt/12t/images/105/vm-105-disk-0.qcow2
LOCAL_MOUNT=/mnt/backup/105/
EXCLUDE=/root/rdiff-exclude.txt
## Runtime: rdiff-backup to a local path and the rsync to any mounted
## cloud backup.
date >> $LOG
modprobe nbd >> $LOG
qemu-nbd -c $VDEVICE $VHDD -r >> $LOG
mount -o ro $VPARTITION $LOCAL_MOUNT >> $LOG
rdiff-backup --exclude-globbing-filelist $EXCLUDE $LOCAL_MOUNT $CLOUD_BACKUP >> $LOG
umount LOCAL_MOUNT >> $LOG
killall qemu-nbd >> $LOG
printf "\n\n\n" >> $LOG
| true
|
b869eded807ba36420a8779603df8292c5dc73e5
|
Shell
|
Xercoy/my-k8s-stuff
|
/pki/generate-csrs.sh
|
UTF-8
| 2,885
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# note: use sh instead - https://stackoverflow.com/questions/3411048/unexpected-operator-in-shell-programming
# script to generate certs needed for k8s
DESIRED_CERT_TYPE=$1
NODE_NAME=$2
TYPES="admin, kubelet, kube-controller-manager, kube-proxy, kube-scheduler, kube-api-server, service-account (requires node name as arg #2), kube-controller-manager"
CSR_COMMON_NAME=""
CSR_ORGANIZATION_NAME=""
DEFAULT_CSR=""
generate_csrs() {
# multi-line strings
# https://stackoverflow.com/questions/23929235/multi-line-string-with-extra-space-preserved-indentation
# TODO: make it possible to obtain the config from a file
CSR_FILE_NAME=$1
read -r -d '' CSR <<EOM
{
"CN": "${CSR_COMMON_NAME}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "USA",
"L": "Los Angeles",
"O": "${CSR_ORGANIZATION_NAME}",
"OU": "devops",
"ST": "California"
}
]
}
EOM
echo "${CSR}"
echo "${CSR}" > "${CSR_FILE_NAME}-csr.json"
}
if [[ "${DESIRED_CERT_TYPE}" == "" ]]; then
echo "desired cert type required; use 'all' to generate all of them. valid types: ${TYPES}"
exit
fi
# admin access
if [[ "${DESIRED_CERT_TYPE}" == "admin" || "${DESIRED_CERT_TYPE}" == "all" ]]; then
CSR_COMMON_NAME="admin"
CSR_ORGANIZATION_NAME="system:masters"
CSR_FILE_NAME="admin-csr.json"
# generate CSR
generate_csrs admin
fi
# kubelet
if [[ "${DESIRED_CERT_TYPE}" == "kubelet" || "${DESIRED_CERT_TYPE}" == "all" ]]; then
CSR_COMMON_NAME="system:node:${HOSTNAME}"
CSR_ORGANIZATION_NAME="system:nodes"
generate_csrs kubelet
fi
# kube-controller-manager
if [[ "${DESIRED_CERT_TYPE}" == "kube-controller-manager" || "${DESIRED_CERT_TYPE}" == "all" ]]; then
CSR_COMMON_NAME="system:kube-controller-manager"
CSR_ORGANIZATION_NAME="system:kube-controller-manager"
generate_csrs kube-controller-manager
fi
# kube-proxy
if [[ "${DESIRED_CERT_TYPE}" == "kube-proxy" || "${DESIRED_CERT_TYPE}" == "all" ]]; then
CSR_COMMON_NAME="system:kube-proxy"
CSR_ORGANIZATION_NAME="system:node-proxier"
generate_csrs kube-proxy
fi
# kube-scheduler
if [[ "${DESIRED_CERT_TYPE}" == "kube-scheduler" || "${DESIRED_CERT_TYPE}" == "all" ]]; then
CSR_COMMON_NAME="system:kube-scheduler"
CSR_ORGANIZATION_NAME="system:kube-scheduler"
generate_csrs kube-scheduler
fi
# kube-api-server
if [[ "${DESIRED_CERT_TYPE}" == "kube-api-server" || "${DESIRED_CERT_TYPE}" == "all" ]]; then
CSR_COMMON_NAME="kubernetes"
CSR_ORGANIZATION_NAME="Kubernetes"
generate_csrs kubernetes #kube-api-server
fi
# service account
if [[ "${DESIRED_CERT_TYPE}" == "service-account" || "${DESIRED_CERT_TYPE}" == "all" ]]; then
CSR_COMMON_NAME="service-accounts"
CSR_ORGANIZATION_NAME="Kubernetes"
generate_csrs service-account
fi
echo "CSR generation complete"
| true
|
83edfc6367cc1123ea1e6e556de94df2b6bd6864
|
Shell
|
hpe-container-platform-community/kubedirector-lab
|
/packer/install-git-client.sh
|
UTF-8
| 408
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
yum groupinstall -y 'Development Tools'
yum install -y gettext-devel openssl-devel perl-CPAN perl-devel zlib-devel curl-devel
# Theia requires a recent version of git
yum -y remove git*
yum -y install wget
export VER="2.27.0"
wget https://github.com/git/git/archive/v${VER}.tar.gz
tar -xvf v${VER}.tar.gz
rm -f v${VER}.tar.gz
cd git-*
make configure
./configure --prefix=/usr
make
make install
| true
|
1f741ad6f0175d5ffef354be9cb6d3b9b027d111
|
Shell
|
tking53/pixie_scan-loaders
|
/094rb-loaders/segment-archiver.bash
|
UTF-8
| 142
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
for i in {1..10}
do
file="094rb_14-seg-"$i"."
echo $file
filename=$file"tar"
tar -acvf $filename $file* #--remove-files
done
| true
|
da01df633cf9fe8da1ef097ce5af50f07095bf88
|
Shell
|
stdob/json_to_relation
|
/scripts/backupEdx.sh
|
UTF-8
| 5,673
| 3.40625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env bash
# Run simple, full backup for all the relevant OpenEdX databases:
# Edx
# EdxPrivate
# EdxForum
# EdxPiazza
# If used outside of Stanford: change the target disk,
# which at Stanford is /lfs/datastage/1/MySQLBackup/
#
# NOTE: will ask for sudo pwd, which limits running by cron
usage="Usage: "`basename $0`" [-u username][-p[pwd]]"
USERNAME=`whoami`
PASSWD=''
askForPasswd=false
# ------------------- Process Commandline Option -----------------
# Check whether given -pPassword, i.e. fused -p with a
# pwd string:
for arg in $@
do
# The sed -r option enables extended regex, which
# makes the '+' metachar wor. The -n option
# says to print only if pattern matches:
PASSWD=`echo $arg | sed -r -n 's/-p(.+)/\1/p'`
if [ -z $PASSWD ]
then
continue
else
#echo "Pwd is:"$PASSWD
break
fi
done
# Keep track of number of optional args the user provided:
NEXT_ARG=0
while getopts ":u:p" opt
do
case $opt in
u) # look in given user's HOME/.ssh/ for mysql_root
USERNAME=$OPTARG
NEXT_ARG=$((NEXT_ARG + 2))
;;
p) # ask for mysql root pwd
askForPasswd=true
NEXT_ARG=$((NEXT_ARG + 1))
;;
\?)
# If the $PASSWD is set, we *assume* that
# the unrecognized option was a
# -pMyPassword, and don't signal
# an error. Therefore, if $PASSWD is
# set then illegal options are quietly
# ignored:
if [ ! -z $PASSWD ]
then
continue
else
echo $USAGE
exit 1
fi
;;
:)
echo $USAGE
exit 1
;;
esac
done
# Shift past all the optional parms:
shift ${NEXT_ARG}
if $askForPasswd && [ -z $PASSWD ]
then
# The -s option suppresses echo:
read -s -p "Password for $USERNAME on MySQL server: " PASSWD
echo
elif [ -z $PASSWD ]
then
if [ $USERNAME == "root" ]
then
# Get home directory of whichever user will
# log into MySQL:
HOME_DIR=$(getent passwd `whoami` | cut -d: -f6)
# If the home dir has a readable file called mysql_root in its .ssh
# subdir, then pull the pwd from there:
if test -f $HOME_DIR/.ssh/mysql_root && test -r $HOME_DIR/.ssh/mysql_root
then
PASSWD=`cat $HOME_DIR/.ssh/mysql_root`
fi
else
# Get home directory of whichever user will
# log into MySQL:
HOME_DIR=$(getent passwd $USERNAME | cut -d: -f6)
# If the home dir has a readable file called mysql in its .ssh
# subdir, then pull the pwd from there:
if test -f $HOME_DIR/.ssh/mysql && test -r $HOME_DIR/.ssh/mysql
then
PASSWD=`cat $HOME_DIR/.ssh/mysql`
fi
fi
fi
# Create the mysql call password option:
if [ -z $PASSWD ]
then
pwdOption=''
else
pwdOption=$PASSWD
fi
# Create new directory with name including current date and time:
# The part `echo \`date\` | sed -e 's/[ ]/_/g'`:
# ... \'date\': get date as string like "Fri Jun 20 08:54:42 PDT 2014"
# ... | sed -e 's/[ ]/_/g'` remove spaces within the date, and replace them with underscore
# Result example: "backupEdx_Fri_Jun_20_08:54:42_PDT_2014"
newDir=/lfs/datastage/1/MySQLBackup/backupEdx_`echo \`date\` | sed -e 's/[ ]/_/g'`
#echo $newDir
# The following will ask for sudo PWD, which limits
# automatic run for now. Need to fix this:
sudo mkdir $newDir
# Use mysqlhotcopy to grab one MySQL db at a time:
echo "Backing up Edx db..."
sudo time mysqlhotcopy --user=$USERNAME --password=$pwdOption Edx $newDir # ~3hrs
echo "Backing up EdxForum db..."
sudo time mysqlhotcopy --user=$USERNAME --password=$pwdOption EdxForum $newDir # instantaneous
echo "Backing up EdxPiazza db..."
sudo time mysqlhotcopy --user=$USERNAME --password=$pwdOption EdxPiazza $newDir # instantaneous
echo "Backing up EdxPrivate db..."
sudo time mysqlhotcopy --user=$USERNAME --password=$pwdOption EdxPrivate $newDir # ~3min
| true
|
b29740c7a809f28eb373b9a6f065136d1ae9b602
|
Shell
|
fargusplumdoodle/ICS199-MLIK
|
/moveToDeepblue.sh
|
UTF-8
| 408
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
git pull
tar cvf public_html.tar.gz public_html/
ping -c 1 mlik.ra
if [ 1 -eq 0 ]
then
scp public_html.tar.gz root@mlik.ra:public_html.tar.gz
ssh root@mlik.ra '~/unpack.sh'
else
echo 'local mlik not pingable'
fi
ping -c 1 deepblue.cs.camosun.bc.ca
if [ $? -eq 0 ]
then
scp public_html.tar.gz cst166@deepblue.cs.camosun.bc.ca:public_html.tar.gz
ssh cst166@deepblue.cs.camosun.bc.ca '~/unpack.sh'
else
echo 'Deepblue not pingable'
fi
| true
|
2bcdb0484de365fbc7a29c79feec90d451c65af8
|
Shell
|
MainShayne233/myspace
|
/bin/deploy
|
UTF-8
| 318
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -CEeuo pipefail
IFS=$'\n\t'
shopt -s extdebug
main() {
git switch -c deploy
git reset --hard master
bin/build
git add --force build
git commit -m "Add production build files"
git push heroku +deploy:master
git checkout master
git branch -D deploy
}
main "$@"
| true
|
61e5b798023dad20322fc198a244c3a3dcf50fd8
|
Shell
|
debdeepbh/archyhome
|
/.myscr/vim
|
UTF-8
| 478
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
# Launching each vim instance in servermode
# Make sure the path /usr/bin/vim is replaced by the current vim installion
choice='vim'
case $choice in
'vim' )
path='/usr/bin/vim'
## Launch vim in server mode
exec $path --servername vim "$@"
;;
'nvim' )
path='/usr/bin/nvim'
## Launch neovim in servermode
#exec $nvimpath --listen /tmp/nvimserver "$@"
## No need to start in servermode since it already runs in servermode
exec $path "$@"
;;
esac
| true
|
27e07daa760aefb3f7cb4b162d596e79469571ef
|
Shell
|
publiux/aviationCharts
|
/unzip_and_normalize.sh
|
UTF-8
| 3,958
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
set -o errexit
set -o pipefail
set -o nounset
# set -o xtrace
IFS=$(printf '\n\t') # IFS is newline or tab
main() {
# Get the number of function parameters
local -r NUMARGS=$#
# Get the base directory of where source charts are
local -r chartsRoot=$(readlink -f "$1")
# Set our destination directories
local -r UNZIP_DESTINATION_ABSOLUTE_PATH=$(readlink -f "${chartsRoot}/1_all_tifs/")
local -r NORMALIZED_FILE_DESTINATION_ABSOLUTE_PATH=$(readlink -f "${chartsRoot}/2_normalized/")
# Unzip every .tif in every .zip, overwriting older files when needed
unzip_freshen "${chartsRoot}" "${UNZIP_DESTINATION_ABSOLUTE_PATH}"
# copy/move/link georeferenced tifs another directory with normalized names
normalize "${UNZIP_DESTINATION_ABSOLUTE_PATH}" "${NORMALIZED_FILE_DESTINATION_ABSOLUTE_PATH}"
}
unzip_freshen() {
# Get the number of function parameters
local -r NUMARGS=$#
# Validate number of function parameters
if [ "$NUMARGS" -ne 2 ] ; then
echo "Bad unzip parameters"
fi
local -r chartsRoot="$1"
local -r UNZIP_DESTINATION_ABSOLUTE_PATH="$2"
mkdir --parents "${UNZIP_DESTINATION_ABSOLUTE_PATH}"
# Unzip any .tif file in any .zip file in the supplied directory
echo "Unzipping all .zip files under ${chartsRoot} to ${UNZIP_DESTINATION_ABSOLUTE_PATH}"
find "${chartsRoot}" \
-type f \
-iname "*.zip" \
-exec unzip -uo -j -d "${UNZIP_DESTINATION_ABSOLUTE_PATH}" "{}" "*.tif" \;
}
normalize() {
# Get the number of function parameters
local -r NUMARGS=$#
# Validate number of function parameters
if [ "$NUMARGS" -ne 2 ] ; then
echo "Bad number of normalize parameters"
fi
local -r UNZIP_DESTINATION_ABSOLUTE_PATH="$1"
local -r NORMALIZED_FILE_DESTINATION_ABSOLUTE_PATH="$2"
# Where we'll put normalized files
mkdir --parents "${NORMALIZED_FILE_DESTINATION_ABSOLUTE_PATH}"
# All of the .tif files in the source directory
local -r CHART_ARRAY=("${UNZIP_DESTINATION_ABSOLUTE_PATH}/*.tif")
echo "Normalize and copy"
for SOURCE_CHART_ABSOLUTE_NAME in ${CHART_ARRAY[@]}
do
# Does this file have georeference info?
if gdalinfo "$SOURCE_CHART_ABSOLUTE_NAME" -noct | grep -q -P 'PROJCS'
then
# Replace non-alpha characters with _ and
# then strip off the series number and add .tif back on
local SANITIZED_CHART_NAME_WITHOUT_VERSION=($(basename $SOURCE_CHART_ABSOLUTE_NAME |
sed --regexp-extended 's/\W+/_/g' |
sed --regexp-extended 's/_[0-9]+_tif$/\.tif/ig' |
sed --regexp-extended 's/_tif$/\.tif/ig'))
# echo "Latest $SOURCE_CHART_ABSOLUTE_NAME, calling it $SANITIZED_CHART_NAME_WITHOUT_VERSION"
# Copy/move/link this file if it's newer than what is already there
mv \
--update \
--verbose \
"$SOURCE_CHART_ABSOLUTE_NAME" \
"${NORMALIZED_FILE_DESTINATION_ABSOLUTE_PATH}/${SANITIZED_CHART_NAME_WITHOUT_VERSION}"
fi
done
echo "Finished Normalize and copy"
}
USAGE() {
echo "Unzip and normalize chart names"
echo "Usage: $PROGNAME <charts_root_directory> <normalized_directory>" >&2
exit 1
}
# The script begins here
# Set some basic variables
declare -r PROGNAME=$(basename "$0")
declare -r PROGDIR=$(readlink -m $(dirname "$0"))
declare -r ARGS="$@"
# Set fonts for Help.
declare -r NORM=$(tput sgr0)
declare -r BOLD=$(tput bold)
declare -r REV=$(tput smso)
#Get the number of remaining command line arguments
NUMARGS=$#
#Validate number of command line parameters
if [ "$NUMARGS" -ne 1 ] ; then
USAGE
fi
# Call the main routine
main "$@"
exit 0
| true
|
911b7c96ab6d87dc8160647d5f2588081513c64d
|
Shell
|
rsmeurer0/scripts
|
/gdb/continue_after_break.sh
|
UTF-8
| 205
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#insert the continue command afet the break point in order to get a full list of hitten breakpoints
FILE="$1"
sed -i -e 's/break \(\*0x[0-9a-f]*\).*/\0\n commands\ncontinue\n end/' $FILE
| true
|
04371fa9ca4505c6319bc9bc7e83cdf9c5137e9d
|
Shell
|
Michael137/os
|
/cs246/hw2/quantum/sweep.bash
|
UTF-8
| 1,074
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash -e
# Adaptive file names: <name>.PTSIZE_HHRTSIZE
for i in `seq 7 13`
do
sed "s|PLACEHOLDER|${i}|g" sweep.condor.template > "sweep.submit.1bit.${i}"
sed -i "s|SCRIPT|run-1bit|g" "sweep.submit.1bit.${i}"
sed -i "s|ARGUMENTS|${i} out/tool_1bit_${i}.out /usr/local/benchmarks/libquantum_O3 400 25|g" "sweep.submit.1bit.${i}"
condor_submit "sweep.submit.1bit.${i}"
sed "s|PLACEHOLDER|${i}|g" sweep.condor.template > "sweep.submit.2bit.${i}"
sed -i "s|SCRIPT|run-2bit|g" "sweep.submit.2bit.${i}"
sed -i "s|ARGUMENTS|${i} out/tool_2bit_${i}.out /usr/local/benchmarks/libquantum_O3 400 25|g" "sweep.submit.2bit.${i}"
condor_submit "sweep.submit.2bit.${i}"
for j in `seq 7 13`
do
sed "s|PLACEHOLDER|${i}_${j}|g" sweep.condor.adaptive.template > "sweep.submit.adaptive.${i}_${j}"
sed -i "s|SCRIPT|run-adaptive|g" "sweep.submit.adaptive.${i}_${j}"
sed -i "s|ARGUMENTS|${i} $j out/tool_adaptive_${i}_${j}.out /usr/local/benchmarks/libquantum_O3 400 25|g" "sweep.submit.adaptive.${i}_${j}"
condor_submit "sweep.submit.adaptive.${i}_${j}"
done
done
| true
|
344eabafd196026a08c6dc954a843878e5851a0f
|
Shell
|
sedovandrew/microservices
|
/create-docker-machine4swarm.sh.example
|
UTF-8
| 805
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
MASTER=master-1
WORKERS="worker-1 worker-2 worker-3"
machines="$MASTER $WORKERS"
ENV=DEV
for machine in $machines; do
docker-machine create --driver google \
--google-project <ENTER-YOUR-PROJECT-NAME> \ # FIX: Enter your project name and remove this comment!
--google-zone europe-west1-b \
--google-machine-type g1-small \
--google-machine-image $(gcloud compute images list --filter ubuntu-1604-lts --uri) \
$machine
done
eval $(docker-machine env $MASTER)
docker swarm init
docker_swarm_join=$(docker swarm join-token worker | grep "docker swarm join")
for machine in $WORKERS; do
eval $(docker-machine env $machine)
eval $docker_swarm_join
done
eval $(docker-machine env $MASTER)
docker node update --label-add reliability=high $MASTER
./deploy_$ENV.sh
| true
|
38a225ddc85afa64ea6c93ea5214b38f97a5abf6
|
Shell
|
ccsf-dt-service-desk/ccsf-jss
|
/forcePWchange.sh
|
UTF-8
| 1,032
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
#########################################################################################################################################################
#
# ABOUT THIS SCRIPT
#
# NAME
# forcePWchange.sh
#
# TRIGGER
# Run by Jamf as sudo
#
#########################################################################################################################################################
#
#
# HISTORY
#
# REVISION 1.0
#
# - Jake Bilyak, City & County of San Francisco Department of Technology, 10/16/2018
#
#########################################################################################################################################################
# This script will set force a user named within the JSS parameter 4 value to change their password upon next login.
# Define Variable
userToModify=$4 #Enter local username in JSS Parameter Value 4
# Execute PW Policy Command
pwpolicy -u $userToModify -setpolicy "newPasswordRequired=1"
echo $? #Successfully implemented password policy.
| true
|
327d0879a03178caafc34bee5449c6dbdde77312
|
Shell
|
oguzy/crowbar-build
|
/git.sh
|
UTF-8
| 1,228
| 2.546875
| 3
|
[] |
no_license
|
mkdir -p ~/.crowbar-build-cache/iso
cd ~/.crowbar-build-cache/iso
cd ~
if [ -f ubuntu-12.04.3-server-amd64.iso ]
then
mv ubuntu-12.04.3-server-amd64.iso ~/.crowbar-build-cache/iso
mkdir -p ~/.crowbar-build-cache/barclamps/dell_raid/files/dell_raid/tools/
cd ~/.crowbar-build-cache/barclamps/dell_raid/files/dell_raid/tools/
wget http://www.lsi.com/downloads/Public/Host%20Bus%20Adapters/Host%20Bus%20Adapters%20Common%20Files/SAS_SATA_6G_P16/SAS2IRCU_P16.zip
wget http://www.lsi.com/downloads/Public/MegaRAID%20Common%20Files/8.07.07_MegaCLI.zip
fi
cd ~
git clone https://github.com/crowbar/crowbar.git
cd ~/crowbar
./dev setup --no-github
git config --global user.name "Oguz Yarimtepe"
git config --global user.email "oguzyarimtepe@gmail.com"
./dev fetch --no-github
cd barclamps
for bc in *; do (cd "$bc"; git checkout master; git reset HEAD README.empty-branch); done
cd ..
find ./ -name README.empty-branch -delete
./dev sync --no-github
# clean up any .empty-branch files first
cd ~/crowbar/barclamps
for bc in *; do (cd "$bc"; git clean -f -x -d 1>/dev/null 2>&1; git reset --hard 1>/dev/null 2>&1); done
cd ~/crowbar/
./dev switch roxy/openstack-os-build
# run sledge hammer seperatelly
#cd ~/crowbar
#sudo ./build_sledgehammer.sh
| true
|
510f39dead98e59c9f64f190652e9f03dd75e83b
|
Shell
|
brando91/STAN
|
/scripts/deploy/release.sh
|
UTF-8
| 555
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
relative_path=`dirname $0`
root=`cd $relative_path/../..;pwd`
project=$root/stan
release=$1
todeploy=$root/todeploy/$release
$root/scripts/build.sh
echo "******* Preparing Deploy Package $release *******"
rm -rf $todeploy
mkdir -p $todeploy
cp $project/stan $todeploy
cp $project/stan.jar $todeploy
cp -r $project/templates $todeploy
cp -r $project/assets $todeploy
cp -r $project/scripts $todeploy
cp $project/log4j.properties $todeploy
echo $release "$(date)" > $todeploy/version
chmod 775 -R $todeploy
echo "******* Done *******"
| true
|
1aa8f768eabf489aa10438eeb5013570a7ca5de6
|
Shell
|
MusicScience37/cpp-cmake-doxygen
|
/test/run_test.sh
|
UTF-8
| 3,210
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh -eu
# Copyright 2019 MusicScience37 (Kenta Kabashima)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# run tests in a docker container
# move to the test directory
cd $(dirname $0)
echo ""
echo ">> build with cmake and gcc"
echo ""
rm -rf build
mkdir -p build/gcc
cd build/gcc
cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON ../..
cmake --build .
ctest -V .
echo ""
echo ">> collect coverage with lcov (and gcov)"
echo ""
COV=./coverage/coverage.info
HTML=./coverage/html
ROOT=$(realpath $(dirname $0))/src
mkdir coverage
lcov --rc lcov_branch_coverage=1 --directory ./ --capture --output-file $COV
lcov --rc lcov_branch_coverage=1 --extract $COV "${ROOT}/*" --output-file $COV
lcov --rc lcov_branch_coverage=1 --remove $COV "*/Test/*" --output-file $COV
lcov --rc lcov_branch_coverage=1 --list $COV
genhtml --rc lcov_branch_coverage=1 --output-directory $HTML $COV
echo ""
echo ">> build with cmake and clang"
echo ""
cd ../
mkdir clang
cd clang
cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DCMAKE_CXX_COMPILER=clang++ ../..
cmake --build .
mkdir coverage
export LLVM_PROFILE_FILE=$(pwd)/coverage/coverage_%p.profraw
ctest -V .
echo ""
echo ">> collect coverage with llvm-profdata llvm-cov"
echo ""
cd coverage
llvm-profdata merge -o coverage.profdata coverage_*.profraw
llvm-cov show -instr-profile=coverage.profdata \
-object ../src/libtest_add.so -object ../src/test_add_test \
-ignore-filename-regex='Test/*' \
> coverage.txt
llvm-cov show -instr-profile=coverage.profdata \
-object ../src/libtest_add.so -object ../src/test_add_test \
-ignore-filename-regex='Test/*' \
-format=html -output-dir=html
llvm-cov report -instr-profile=coverage.profdata \
-object ../src/libtest_add.so -object ../src/test_add_test \
-ignore-filename-regex='Test/*' \
| tee coverage_summary.txt
line_cov=$(cat coverage_summary.txt | awk '{ if (NF > 0) { last = $NF } } END { print last }')
echo "Line Coverage: $line_cov"
cd ../../..
echo ""
echo ">> PlantUML"
echo ""
mkdir build/uml
java -jar ${PLANTUML_JAR_PATH} uml/test_sequence.puml -o $(pwd)/build/uml -tsvg
echo ""
echo ">> doxygen"
echo ""
doxygen
echo ""
echo ">> clang-tidy"
echo ""
mkdir build/clang-tidy
clang-tidy -checks=* -p=build/gcc/compile_commands.json src/add.cpp \
| tee build/clang-tidy/clang-tidy.log
echo ""
echo ">> python-pip"
echo ""
cd build
git clone https://github.com/PSPDFKit-labs/clang-tidy-to-junit.git clang-tidy-to-junit
cat clang-tidy/clang-tidy.log \
| python3 clang-tidy-to-junit/clang-tidy-to-junit.py $(realpath $(dirname $0)) \
> clang-tidy/clang-tidy-junit.xml
pip3 install junit2html
python3 -m junit2htmlreport clang-tidy/clang-tidy-junit.xml clang-tidy/clang-tidy-junit.html
| true
|
cada3810f95b9df005dc1093c9352ecf25241602
|
Shell
|
dustinrhollar/portfolio
|
/LowPolyTerrainGen/run.sh
|
UTF-8
| 349
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
# Get the target machine
MACHINE=$(./scripts/machine.sh)
if [[ "$MACHINE" == "Linux" ]]; then
# Get the directory
ME="$(readlink -f "$0")"
LOCATION="$(dirname "$ME")"
elif [[ "$MACHINE" == "Mac" ]]; then
# Get the directory
ME="$(greadlink -f "$0")"
LOCATION="$(dirname "$ME")"
fi
pushd $LOCATION/bin/debug
./MapleVk.exe
popd
| true
|
fd26db909d2046213a2ab4304e2a1df65bd9384b
|
Shell
|
open-power/HTX
|
/packaging/ubuntu/DEBIAN/preinst
|
UTF-8
| 439
| 3.140625
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -e
case "$1" in
install)
if ! id -g htx > /dev/null 2>&1 ; then groupadd htx -o -g 0 ; fi
sync
sleep 1
if ! id htx > /dev/null 2>&1 ; then mkdir -p /usr/lpp/htx; useradd -g htx -d /usr/lpp/htx -s /bin/bash -u 0 -o htx ; rm -rf /usr/lpp/htx 2>/dev/null; fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
echo "$1"
;;
*)
echo "preinst called with unknown argument \`\$1'" >&2
exit 0
;;
esac
exit 0
| true
|
45e912852a3b57077d0f1988eedb9732ae24b777
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/dcled/PKGBUILD
|
UTF-8
| 799
| 2.875
| 3
|
[] |
no_license
|
# Maintainer: Gunther Schulz <forum (at) guntherschulz.de>
pkgname=dcled
pkgver=stable.1.9.kost.r4.gc819fec
_pkgver=1.9
pkgrel=2
pkgdesc='dcled - userland driver for Dream Cheeky (Dream Link?) USB LED Message Board'
url='https://github.com/kost/dcled'
license=('GPL')
arch=('i686' 'x86_64')
depends=('libhid'
)
makedepends=('make'
'git')
provides=("$pkgname=$pkgver")
source=("${pkgname}::git://github.com/kost/dcled.git")
md5sums=('SKIP')
pkgver() {
cd $pkgname
git describe --long --tags | sed 's/^final-//;s/\([^-]*-g\)/r\1/;s/-/./g;s/_/./g'
}
build() {
cd $pkgname
make
}
package() {
cd $pkgname
install -Dm755 $srcdir/$pkgname/dcled \
"$pkgdir/usr/bin/dcled"
install -Dm644 $srcdir/$pkgname/40-dcled.rules \
"$pkgdir/etc/udev/rules.d/40-dcled.rules"
}
| true
|
3c059f4f09783af591d62c7cc03bd4573757cc6f
|
Shell
|
aalzubidy/ubuntuInstall
|
/install.sh
|
UTF-8
| 2,029
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Starting install script"
#Flashback to Classic Ubuntu
echo "Flashback to Classic Ubuntu"
sudo apt-get update
sudo apt-get install gnome-session-flashback
#Disable Recenet Documents
echo "Disable Recenet Documents"
sudo chattr +i ~/.local/share/recently-used.xbel
#Enable using sudo chattr -i ~/.local/share/recently-used.xbel
#Install Flux
echo "Flux"
sudo add-apt-repository ppa:nathan-renniewaldock/flux
sudo apt-get update
sudo apt-get install fluxgui
# Enable html5 videos
echo "HTML5"
sudo apt-get install ubuntu-restricted-extras
#Audio manager
echo "Audio Manager"
sudo apt-get install pulseaudio pavucontrol
#UI Root
echo "UI Root"
sudo add-apt-repository ppa:noobslab/apps
sudo apt-get install
sudo apt-get install nautilus-admin
#VPN Secure
echo "VPN Secure"
sudo wget https://www.vpnsecure.me/files/install -O install && sudo bash install
#Gparted
echo "GParted"
sudo apt-get install gparted
#VLC
echo "VLC"
sudo snap install vlc
#gimp
echo "GIMP"
sudo add-apt-repository ppa:otto-kesselgulasch/gimp
sudo apt-get install gimp
#Font scale
# gsettings set org.gnome.desktop.interface text-scaling-factor 1.2
#UI Tweaks
echo "UI Tweaks"
sudo apt install gnome-tweak-tool
#Spotify
echo "Spotify"
sudo snap install spotify
#Disable screen rotation
echo "Disable Screen Rotation"
gsettings set org.gnome.settings-daemon.peripherals.touchscreen orientation-lock true
gsettings set org.gnome.settings-daemon.plugins.orientation active false
#Brave
curl -s https://brave-browser-apt-release.s3.brave.com/brave-core.asc | sudo apt-key --keyring /etc/apt/trusted.gpg.d/brave-browser-release.gpg add -
source /etc/os-release
echo "deb [arch=amd64] https://brave-browser-apt-release.s3.brave.com/ $UBUNTU_CODENAME main" | sudo tee /etc/apt/sources.list.d/brave-browser-release-${UBUNTU_CODENAME}.list
sudo apt update
sudo apt install brave-browser
#Git
apt-get install git-core
#NodeJS
curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -
sudo apt-get install -y nodejs
echo "Done!"
| true
|
a52cb409690f06daebec3dd92fb31de679af799c
|
Shell
|
bmichalo/performance
|
/hostScripts/virt-install-vm.sh~
|
UTF-8
| 2,019
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
dist=rhel72
vm=master
bridge=virbr0
master_image=master.qcow2
ks=$dist-vm.ks
image_path=/var/lib/libvirt/images/
nr_vms=0
case $dist in
rhel71)
#location="http://download.devel.redhat.com/released/RHEL-7/7.1/Server/x86_64/"
location="http://download.devel.redhat.com/released/RHEL-7/7.1-RC-2/Server/x86_64/os//"
;;
rhel72)
location="http://download.devel.redhat.com/released/RHEL-7/7.2/Server/x86_64/os/"
;;
f22)
location="http://download.devel.redhat.com/released/F-22/GOLD/Server/x86_64/os/"
;;
esac
#location="http://download.eng.rdu2.redhat.com/nightly/RHEL-7.2-20150427.n.1/compose/Server/x86_64/os/"
extra="ks=file:/$ks console=ttyS0,115200"
echo deleting master image
/bin/rm -f $image_path/$master_image
echo deleting vm image copies
for i in `seq 1 $nr_vms`; do
set -x
/bin/rm -f $image_path/vm*.qcow2
set +x
done
echo creating new master image
qemu-img create -f raw $image_path/$master_image 100G
echo undefining master xml
virsh list --all | grep master && virsh undefine master
echo calling virt-install
# normal
#virt-install --name=$vm \
# --virt-type=kvm \
# --disk path=$image_path/$master_image,format=raw \
# --vcpus=2 \
# --ram=2048 \
# --network bridge=$bridge \
# --os-type=linux \
# --os-variant=rhel7 \
# --graphics none \
# --extra-args="$extra" \
# --initrd-inject=/root/$ks \
# --serial pty \
# --serial file,path=/tmp/$vm.console \
# --location=$location \
# --noreboot
# realtime
virt-install --name=$vm \
--virt-type=kvm \
--disk path=$image_path/$master_image,format=raw,bus=virtio,cache=none,io=threads \
--vcpus=2,cpuset=14,15 \
--numatune=1 \
--memory=1024,hugepages=yes \
--memorybacking hugepages=yes,size=1,unit=G,locked=yes,nodeset=1 \
--network bridge=$bridge \
--os-type=linux \
--os-variant=rhel7 \
--graphics none \
--extra-args="$extra" \
--initrd-inject=/root/$ks \
--serial pty \
--serial file,path=/tmp/$vm.console \
--location=$location \
--noreboot
| true
|
d0e7311c46ee04b0801656eac4d10ac4e3f83dad
|
Shell
|
MGalego/docker_wordpress
|
/_bash_webserver.sh
|
UTF-8
| 148
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
MY_VAR=$(grep CONTAINER_NAME= .env | xargs)
MY_VAR=${MY_VAR#*=}_php
echo "Bash to $MY_VAR"
docker exec -u root -ti $MY_VAR bash
| true
|
4df6d9a1b6db16eb941bb2244bcb389f0ab907be
|
Shell
|
dacapobench/dacapobench
|
/tools/analysis/perf/scrapegc.sh
|
UTF-8
| 388
| 3.421875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/bash
#
# gather all the GC stats for all benchmarks into yml files
DIR=log/mole-2023-06-08-Thu-033509
for bm in `(cd $DIR; ls | grep 1000 | cut -d '.' -f1 | sort | uniq)`; do
echo $bm
out=../../../benchmarks/bms/$bm/stats-gc.yml
cp gc-stats-hdr.txt $out
for i in 1 2; do
echo -n "$i.0: " >> $out
./parsegclog.py $DIR/$bm.$i*.0/gc.log >> $out
done
done
| true
|
4c4bbe6ae554c38c9b49ab7ffdd144512fc1f6d5
|
Shell
|
tensorflow/tfjs-models
|
/model-playground/scripts/deploy-dev.sh
|
UTF-8
| 662
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Build and deploy the app to the `model-playground-demo` Google Storage bucket.
set -e
# Use date+time as an unique identifier for the current demo.
datetime=$(date '+%Y-%m-%d-%H-%M-%S')
# Build and copy angular app to the corresponding Google Storage location.
base_url="/model-playground-demo/${datetime}/"
yarn build --base-href ${base_url} --deploy-url ${base_url}
gsutil -m cp dist/* gs://model-playground-demo/${datetime}/
# Output the url to access to demo.
echo "-------------------------------------------------------------------------"
echo "Demo deployed: https://storage.googleapis.com/model-playground-demo/${datetime}/index.html"
| true
|
da5dfaa8d5cdc9011e3ccdd44c09dbcb089b273e
|
Shell
|
shaan1337/EventStore
|
/src/Scripts/v8/build-v8_x64_release.sh
|
UTF-8
| 343
| 3.296875
| 3
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function err() {
exit
}
function build-v8() {
make x64.release library=shared || err
}
function copy-files() {
cp out/x64.release/lib.target/* -t ../libs || err
mkdir ../libs/include
cp include/* -t ../libs/include || err
}
pushd $(dirname $0)/../../v8 || err
build-v8 || err
copy-files || err
popd || err
| true
|
c42b5a05ec6dcd29b4acbe32e95d71020a3a0b72
|
Shell
|
MasonIT/git-log-action
|
/entrypoint.sh
|
UTF-8
| 531
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[$3 != '']]
then
path=$3
else
path=update.log
if
if [[ $1 == 'tag' ]]
then
echo "type is $1"
git log $2.. --pretty=format:'- %cd %an \n %s\n \n' --date=format:'%Y-%m-%d %H:%M:%S' > path
elif [[ $1 == 'time' ]]
then
echo "type is $1"
git log --since "$2" --pretty=format:'- %cd %an \n %s\n \n' --date=format:'%Y-%m-%d %H:%M:%S' > path
else
tagName=$(git describe --abbrev=0 --tags)
echo "tag is $tagName"
git log $tagName.. --pretty=format:'- %cd %an \n %s\n \n' --date=format:'%Y-%m-%d %H:%M:%S' > path
fi
| true
|
457026a75278f13ecc3a62b98b979778763e1622
|
Shell
|
crazyrex/sysadmin
|
/scripts/verify-chameleon.sh
|
UTF-8
| 1,428
| 3.203125
| 3
|
[] |
no_license
|
# Two snippets to compare private/reports-raw at chameleon against private/canned at datacollector
# at datacollector.infra.ooni.io
cd /data/ooni/private/canned && for date in 2017-09-{01..30}; do
echo -n "$date -- "
{
zcat $date/index.json.gz | jq 'select(.canned != null) | .canned[] | .textname + " " + (.text_size | tostring)'
zcat $date/index.json.gz | jq 'select(.canned == null) | .textname + " " + (.text_size | tostring)'
} | sed 's,.*/,,; s,"$,,' | LC_ALL=C sort | sha256sum
done |& less
# at chameleon.infra.ooni.io
cd /data/ooni/private/reports-raw/yaml && for date in 2017-09-{01..30}; do
( # subshell instead of noisy `pushd` because of `cd`
echo -n "$date -- "
cd $date && find . -type f -printf '%f %s\n' | LC_ALL=C sort | sha256sum
)
done |& less
# Two snippets to compare public/sanitised at chameleon against public/sanitised-s3-ls at datacollector
# at datacollector.infra.ooni.io
cd /data/ooni/public/sanitised-s3-ls && \
for date in 2017-09-{01..30}; do
echo -n "$date -- "
zcat $date.json.gz | jq '.results[] | .file_name + " " + (.file_size | tostring)' | sed 's,.*/,,; s,"$,,' | LC_ALL=C sort | sha256sum
done |& less
# at chameleon.infra.ooni.io
cd /data/ooni/public/sanitised && \
for date in 2017-09-{01..30}; do
echo -n "$date -- "
(
cd $date && find . -type f -printf '%f %s\n' | LC_ALL=C sort | sha256sum
)
done |& less
| true
|
1765cdea6b13ee54fe2ba23aa7163876e9a020d8
|
Shell
|
barbatoast/chat-server
|
/run.sh
|
UTF-8
| 813
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
run_unit_tests() {
ret=0
found_tests=0
for test_bin_path in $(find bin | grep test); do
found_tests=1
echo 'Running '${test_bin_path}
./${test_bin_path}
if [ $? = 1 ]
then
ret = 1
fi
echo '==========================================================================='
done
if [ ${ret} = 0 ] && [ ${found_tests} = 1 ]
then
echo 'All tests ran'
fi
if [ ${ret} = 1 ] && [ ${found_tests} = 1 ]
then
echo 'One or more tests failed to run'
fi
}
run_unit_tests > results.txt;
grep 'FAIL' results.txt
if [ $? = 0 ]
then
echo 'One or more tests failed' >> results.txt
echo 'One or more tests failed'
exit 1
fi
echo 'All tests passed' >> results.txt
echo 'All tests passed'
| true
|
29b5dd50e7e5d62470a8429efdf011708599c257
|
Shell
|
nuxlli/Bash-Scripts
|
/lib/functions.bash
|
UTF-8
| 266
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
function search_array() {
index=0
array=( `echo "$1"` )
while [ "$index" -lt "${#array[@]}" ]; do
if [ "${array[$index]}" = "$2" ]; then
echo $index
return
fi
let "index++"
done
echo ""
}
| true
|
ff4a9033ecef369b915aca8264a0353973b1f186
|
Shell
|
DimitriTchapmi/overview
|
/sites/scripts/init_arbre_pc.sh
|
UTF-8
| 932
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
nom_epse=$1
ip_pc=$2
id_epse_bdd=$3
chemin_epse="/var/www/overview/projets"
###création du dossier inventaire###
sudo mkdir $chemin_epse/$nom_epse/inventaire/default/$ip_pc
###création du dossier supervision###
sudo mkdir -p $chemin_epse/$nom_epse/supervision/$ip_pc/graphes/jours
sudo mkdir $chemin_epse/$nom_epse/supervision/$ip_pc/graphes/heures
sudo mkdir $chemin_epse/$nom_epse/supervision/$ip_pc/graphes/semaines
sudo mkdir $chemin_epse/$nom_epse/supervision/$ip_pc/processus/
sudo mkdir $chemin_epse/$nom_epse/supervision/$ip_pc/alertes/
sudo mkdir $chemin_epse/$nom_epse/supervision/$ip_pc/bases/
###création du fichier lien pc_group
sudo echo "$ip_pc default" > $chemin_epse/$nom_epse/inventaire/lien_pc_group.txt
### Ajout machine dans bdd
mysql --user='root' --password='overview' << EOF
CONNECT overview;
INSERT INTO machines (nom, entreprise, groupe) VALUES("$ip_pc", $id_epse_bdd, "default");
EOF
| true
|
805bd25b4c785d16a2bdc32aba7a5b3467e46935
|
Shell
|
roeybiran/dotfiles
|
/links/_LINK_.zshrc
|
UTF-8
| 10,205
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
#### FIG ENV VARIABLES ####
# Please make sure this block is at the start of this file.
[ -s ~/.fig/shell/pre.sh ] && source ~/.fig/shell/pre.sh
#### END FIG ENV VARIABLES ####
# enable vim mode
# bindkey -v
# ENVIRONMENT VARS
export LANG=en_US.UTF-8
# https://stackoverflow.com/questions/444951/zsh-stop-backward-kill-word-on-directory-delimiter
export WORDCHARS='*?[]~=&;!#$%^(){}<>'
export HOMEBREW_BUNDLE_FILE=~/.Brewfile
export BREW_BUNDLE_NO_LOCK=1
export HOMEBREW_NO_ANALYTICS=1
export HOMEBREW_NO_AUTO_UPDATE=1
export FZF_DEFAULT_COMMAND='fd --type f'
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
export FZF_DEFAULT_OPTS='--bind tab:down --cycle'
setopt complete_aliases
# ALIASES
alias nr="npm run"
alias bci='brew install --cask'
alias bci='brew install --cask'
alias bcr='brew reinstall --cask'
alias bcu='brew uninstall --cask'
alias bi='brew install'
alias bs='brew search'
alias bu='brew uninstall'
alias defd='defaults delete'
alias defre='defaults read'
alias deft='defaults read-type'
alias ls='ls -G -F -A'
alias grepi='grep -i'
alias gu='cd ..'
alias dbxignore='xattr -w com.dropbox.ignored 1'
alias r='source ~/.zshrc'
alias git-show-ignored='git ls-files . --ignored --exclude-standard --others'
alias git-show-untracked='git ls-files . --exclude-standard --others'
alias git-show-tracked='git ls-tree -r HEAD --name-only'
# PATH
export PATH=$PATH:/usr/local/sbin #brew
if type brew &>/dev/null; then
FPATH=$(brew --prefix)/share/zsh/site-functions:$FPATH
fi
# case insensitive path-completion
zstyle ':completion:*' \
matcher-list \
'm:{[:lower:][:upper:]}={[:upper:][:lower:]}' \
'm:{[:lower:][:upper:]}={[:upper:][:lower:]} l:|=* r:|=*' \
'm:{[:lower:][:upper:]}={[:upper:][:lower:]} l:|=* r:|=*' \
'm:{[:lower:][:upper:]}={[:upper:][:lower:]} l:|=* r:|=*'
# partial completion suggestions
zstyle ':completion:*' list-suffixes
zstyle ':completion:*' expand prefix suffix
zstyle ':completion:*' select-prompt ''
zstyle ':completion:*' list-prompt ''
# load bashcompinit for some old bash completions
autoload bashcompinit && bashcompinit
### SHELL OPTS
setopt AUTO_CD
setopt NO_CASE_GLOB
# history
HISTFILE=~/.zsh_history
SAVEHIST=5000
HISTSIZE=2000
setopt EXTENDED_HISTORY
# share history across multiple zsh sessions
setopt SHARE_HISTORY
# append to history
setopt APPEND_HISTORY
# adds commands as they are typed, not at shell exit
setopt INC_APPEND_HISTORY
# expire duplicates first
setopt HIST_EXPIRE_DUPS_FIRST
# do not store duplications
setopt HIST_IGNORE_DUPS
# ignore duplicates when searching
setopt HIST_FIND_NO_DUPS
# removes blank lines from history
setopt HIST_REDUCE_BLANKS
# bindings
bindkey '^[[A' history-beginning-search-backward
bindkey '^[[B' history-beginning-search-forward
### PROMPT
autoload -Uz promptinit compinit vcs_info
compinit
promptinit
setopt prompt_subst
precmd_vcs_info() {
vcs_info
}
precmd_functions+=(precmd_vcs_info)
gitprompt=\$vcs_info_msg_0_
if compaudit | grep -qE "\w"; then
# https://stackoverflow.com/questions/13762280/zsh-compinit-insecure-directories
compaudit | xargs chmod g-w
fi
zstyle ':completion:*' menu select
zstyle ':vcs_info:git:*' formats '%F{240}(%r/%b)%f' # brgreen / # brcyan
zstyle ':vcs_info:*' enable git
pwd_with_blue_underline="%U%F{blue}%~%f%u"
exit_status_bold_and_red_if_0="%B%(?.>.%F{red}x)%f%b"
PROMPT="
$pwd_with_blue_underline $gitprompt
$exit_status_bold_and_red_if_0 "
### PLUGINS
# source ~/Documents/fzf-tab/fzf-tab.plugin.zsh
# zsh autosuggest
source /usr/local/share/zsh-autosuggestions/zsh-autosuggestions.zsh
source /usr/local/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
export ZSH_HIGHLIGHT_HIGHLIGHTERS_DIR=/usr/local/share/zsh-syntax-highlighting/highlighters
### FZF
# https://github.com/junegunn/fzf#tips
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# Use fd (https://github.com/sharkdp/fd) instead of the default find command for listing path candidates.
_fzf_compgen_path() {
fd --hidden --follow --exclude ".git" . "$1"
}
# Use fd to generate the list for directory completion
_fzf_compgen_dir() {
fd --type d --hidden --follow --exclude ".git" . "$1"
}
### AUTOJUMP
[ -f /usr/local/etc/profile.d/autojump.sh ] && . /usr/local/etc/profile.d/autojump.sh
# functions
j() {
cd "$(/usr/local/bin/autojump "$1")" || cd
}
defra() {
cd ~/Desktop || return
defaults read >a
printf "%s\n" "Press any key to continue"
read -r
defaults read >b
icdiff -N -H a b
cd || return
}
cdf() {
current_path=$(
osascript <<-EOF
tell app "Finder"
try
POSIX path of (insertion location as alias)
on error
POSIX path of (path to desktop folder as alias)
end try
end tell
EOF
)
cd "$current_path"
}
rm() {
trash "$@"
}
tldr() {
/usr/local/bin/tldr "$@"
}
keydump() {
local app="$1"
if [[ -z "$app" ]]; then
echo "USAGE: keydump <bundle identifier>"
return
fi
hotkeys="$(defaults read "$app" NSUserKeyEquivalents | sed '1d' | sed '$ d')"
arr=()
while IFS=$'\n' read -r hotkey; do
formatted="$(printf "%s\n" "$hotkey" | sed -E 's/[[:space:]]{2,}/ /' | sed -E 's/^[[:space:]]+//' | sed "s|\"|'|g" | sed 's/ = / -string /g' | sed -E 's/;$//')"
arr+=("defaults write $app NSUserKeyEquivalents $formatted")
done <<<"$hotkeys"
printf "%s\n" "${arr[@]}" | pbcopy
}
top() {
/usr/bin/top -i 10 -stats command,cpu,mem -s 2
}
fkill() {
pids=$(ps -r -c -A -o 'command=,%cpu=,pid=' | /usr/local/bin/fzf -m --bind 'tab:toggle' | awk '{ print $NF }')
while IFS=$'\n' read -A pid; do
/bin/kill -SIGKILL "$pid"
done <<<"$pids"
}
mkcd() {
mkdir -p "$1"
cd "$1"
}
maintain() {
dependencies=(
/usr/local/bin/trash
)
dotfiles_prefs=~/Library/Preferences/com.roeybiran.dotfiles.plist
weekly_maintenance_dirs=(
~/Dropbox
)
for f in "${dependencies[@]}"; do
test ! -e "$f" && echo "Missing depedency: $f. Exiting" && return
done
if test -z "$1"; then
echo "USAGE: maintain [run] [--status]"
return
fi
now="$(date +%s)"
if test "$1" = --status; then
last_update_date="$(defaults read "$dotfiles_prefs" maintainanceLastRunDate 2>/dev/null)"
if test -z "$last_update_date"; then
# first run
echo "has yet to run."
return
fi
time_elapsed_since_last_update=$(((now - last_update_date) / 86400))
echo "last run was $time_elapsed_since_last_update days ago."
return
fi
defaults write "$dotfiles_prefs" maintainanceLastRunDate -int "$now"
sudo -v
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done 2>/dev/null &
echo "Updating package managers..."
# mas
echo ">> mas upgrade"
mas upgrade
# npm
echo ">> updating npm"
npm install -g npm@latest
echo ">> updating global npm packages"
npm update -g
# brew
# update brew itself and all formulae
echo ">> brew update"
brew update
# update casks and all unpinned formulae
echo ">> brew upgrade"
brew upgrade
echo ">> brew cleanup"
brew cleanup
echo ">> brew autoremove"
brew autoremove
echo ">> brew doctor"
brew doctor
echo "Trashing sync conflicts and broken symlinks..."
for dir in "${weekly_maintenance_dirs[@]}"; do
find "$dir" \( -iname '*conflict*-*-*)*' -or -type l ! -exec test -e {} \; \) -exec trash {} \; -exec echo "Trashed: " {} \;
done
# launchbar housekeeping
# remove logging for all actions
# for f in "$HOME/Library/Application Support/LaunchBar/Actions/"*".lbaction/Contents/Info.plist"; do
# /usr/libexec/PlistBuddy -c "Delete :LBDebugLogEnabled" "$f" 2>/dev/null
# done
actions_identifiers=()
launchbar_dir="$HOME/Library/Application Support/LaunchBar"
action_support_dir="$launchbar_dir/Action Support"
lbaction_packages=$(find "$launchbar_dir/Actions" -type d -name "*.lbaction")
while IFS=$'\n' read -r plist; do
actions_identifiers+=("$(/usr/libexec/PlistBuddy -c "Print :CFBundleIdentifier" "$plist/Contents/Info.plist" 2>/dev/null)")
done <<<"$lbaction_packages"
paths="$(printf "%s\n" "$action_support_dir/"*)"
while IFS=$'\n' read -r dir; do
delete=true
basename="$(basename "$dir")"
for id in "${actions_identifiers[@]}"; do
if test "$basename" = "$id"; then
delete=false
fi
done
if "$delete"; then
echo "LaunchBar cleanup: $dir"
trash "$dir"
fi
done <<<"$paths"
# if softwareupdate --all --install --force 2>&1 | tee /dev/tty | grep -q "No updates are available"; then
# sudo rm -rf /Library/Developer/CommandLineTools
# sudo xcode-select --install
# fi
}
adobe_cleanup() {
pkill -15 -li adobe
pkill -15 -li "creative cloud"
for f in ~/Library/LaunchAgents/* /Library/LaunchDaemons/* /Library/LaunchAgents/*; do
if echo "$f" | grep -iq adobe; then
sudo rm -rf "$f"
echo "deleting $f"
fi
done
}
dropboxignore() {
ignorables=(
-name
node_modules
-or
-name
.next
)
# https://stackoverflow.com/a/1489405
find ~/Dropbox \( ${ignorables[@]} \) -prune -exec xattr -w com.dropbox.ignored 1 {} \;
}
dropbox_ignore_all_ignorables() {
ignorables=(
-name
node_modules
-or
-name
.next
)
# https://stackoverflow.com/a/1489405
find ~/Dropbox \( ${ignorables[@]} \) -prune -print -exec xattr -w com.dropbox.ignored 1 {} \;
}
applist() {
brewfile_list="$(brew bundle list --all --no-upgrade | grep -v "/")"
npm_list=(
np
npm-check-updates
vercel
zx
)
mylist="$(printf "%s\n" "$brewfile_list" "${npm_list[@]}")"
brew="$(brew leaves)"
cask="$(brew list --cask)"
mas="$(mas list | cut -d" " -f2- | rev | cut -d" " -f2- | rev | sed -E 's/^[[:space:]]+//' | sed -E 's/[[:space:]]+$//')"
npm="$(npm list -g --depth=0 2>/dev/null | grep ── | cut -d" " -f2 | sed -E "s/@.+$//" | grep -ve '^npm$')"
current="$(printf "%s\n" ">> brew" "$brew" ">> cask" "$cask" ">> mas" "$mas" ">> npm" "$npm")"
while IFS=$'\n' read -r LINE; do
if echo "$mylist" | grep -q "$LINE"; then
echo -e "\033[0;32m$LINE"
else
echo -e "\033[0m$LINE"
fi
done <<<"${current}"
}
gt() {
paths=(
~/Dropbox/
)
res="$(fd --no-ignore . "${paths[@]}" | fzf)"
cd "$res" || cd "$(dirname "$res")"
}
# source /usr/local/opt/zsh-vi-mode/share/zsh-vi-mode/zsh-vi-mode.plugin.zsh
#### FIG ENV VARIABLES ####
# Please make sure this block is at the end of this file.
[ -s ~/.fig/fig.sh ] && source ~/.fig/fig.sh
#### END FIG ENV VARIABLES ####
| true
|
244796388a307a5b20e79e3e9d59256699f0b21a
|
Shell
|
faroncoder/falcon-fly
|
/htmls/htmllinksToFiles.sh
|
UTF-8
| 614
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ ! "$( echo $PATH | grep 'source /usr/local/bin' )" ]]; then export PATH=$PATH:/usr/local/bin; fi
source /usr/local/lib/faron_falcon/colors; source /usr/local/lib/faron_falcon/functions; startTime
####################START
ls > links.txt
> links.html
while read line
do
echo "<iframe src=\"./jsc/media/htmls/$line\" width=\"400\" height=\"285\"frameborder=\"0\" scrolling=\"no\"></iframe>" >> links.html
done < links.txt
rm links.txt
####################STOP
### exit code for clean exit
doneTime
### IGNORE BELOW. THIS IS MEGATAG FOR MY SCRIPTS
### [FILE] htmllinksToFiles.sh [ACTIVE] y
| true
|
17cd46f6ff0ddeac4e3cab2d083a842e6735af5d
|
Shell
|
evzharko/zabbix-templates
|
/php-fpm/php-fpm_value.sh
|
UTF-8
| 4,629
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
LOG_trn="/etc/zabbix/scripts/php-fpm/log_trn"
LOG_osvita="/etc/zabbix/scripts/php-fpm/log_osvita"
LOG_vse10="/etc/zabbix/scripts/php-fpm/log_vse10"
case $1 in
'pool_trn' )
grep pool $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'process_manager_trn' )
grep 'process manager' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'start_time_trn' )
grep 'start time' $LOG_trn | cut -d":" -f 2,3,4,5 | sed -e 's/ //g' | sed -e 's/+/\ +/';;
'start_since_trn' )
grep 'start since' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'accepted_conn_trn' )
grep 'accepted conn' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'listen_queue_trn' )
grep 'listen queue' $LOG_trn | grep -v max | grep -v len | cut -d":" -f 2 | sed -e 's/ //g';;
'max_listen_queue_trn' )
grep 'max listen queue' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'listen_queue_len_trn' )
grep 'listen queue len' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'idle_processes_trn' )
grep 'idle processes' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'active_processes_trn' )
grep 'active processes' $LOG_trn | grep -v max | cut -d":" -f 2 | sed -e 's/ //g';;
'total_processes_trn' )
grep 'total processes' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'max_active_processes_trn' )
grep 'max active processes' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'max_children_reached_trn' )
grep 'max children reached' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'slow_requests_trn' )
grep 'slow requests' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
'ping' )
grep 'pong' $LOG_trn | cut -d":" -f 2 | sed -e 's/ //g';;
esac
case $1 in
'pool_osvita' )
grep pool $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'process_manager_osvita' )
grep 'process manager' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'start_time_osvita' )
grep 'start time' $LOG_osvita | cut -d":" -f 2,3,4,5 | sed -e 's/ //g' | sed -e 's/+/\ +/';;
'start_since_osvita' )
grep 'start since' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'accepted_conn_osvita' )
grep 'accepted conn' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'listen_queue_osvita' )
grep 'listen queue' $LOG_osvita | grep -v max | grep -v len | cut -d":" -f 2 | sed -e 's/ //g';;
'max_listen_queue_osvita' )
grep 'max listen queue' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'listen_queue_len_osvita' )
grep 'listen queue len' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'idle_processes_osvita' )
grep 'idle processes' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'active_processes_osvita' )
grep 'active processes' $LOG_osvita | grep -v max | cut -d":" -f 2 | sed -e 's/ //g';;
'total_processes_osvita' )
grep 'total processes' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'max_active_processes_osvita' )
grep 'max active processes' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'max_children_reached_osvita' )
grep 'max children reached' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
'slow_requests_osvita' )
grep 'slow requests' $LOG_osvita | cut -d":" -f 2 | sed -e 's/ //g';;
esac
case $1 in
'pool_vse10' )
grep pool $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'process_manager_vse10' )
grep 'process manager' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'start_time_vse10' )
grep 'start time' $LOG_vse10 | cut -d":" -f 2,3,4,5 | sed -e 's/ //g' | sed -e 's/+/\ +/';;
'start_since_vse10' )
grep 'start since' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'accepted_conn_vse10' )
grep 'accepted conn' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'listen_queue_vse10' )
grep 'listen queue' $LOG_vse10 | grep -v max | grep -v len | cut -d":" -f 2 | sed -e 's/ //g';;
'max_listen_queue_vse10' )
grep 'max listen queue' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'listen_queue_len_vse10' )
grep 'listen queue len' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'idle_processes_vse10' )
grep 'idle processes' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'active_processes_vse10' )
grep 'active processes' $LOG_vse10 | grep -v max | cut -d":" -f 2 | sed -e 's/ //g';;
'total_processes_vse10' )
grep 'total processes' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'max_active_processes_vse10' )
grep 'max active processes' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'max_children_reached_vse10' )
grep 'max children reached' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
'slow_requests_vse10' )
grep 'slow requests' $LOG_vse10 | cut -d":" -f 2 | sed -e 's/ //g';;
esac
| true
|
11b84dec86eaa39581963ed2506f2d5654efa737
|
Shell
|
obu-team/obu-manager
|
/.openshift/action_hooks/start
|
UTF-8
| 430
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# The logic to start up your application should be put in this
# script. The application will work only if it binds to
# $OPENSHIFT_DIY_IP:8080
JDK_LINK=jdk1.8
export JAVA_HOME="$OPENSHIFT_DATA_DIR/$JDK_LINK"
export PATH=$JAVA_HOME/bin:$PATH
cd $OPENSHIFT_REPO_DIR
nohup java -jar target/obu-app-0.0.1-SNAPSHOT.jar --spring.profiles.active=pro --server.port=${OPENSHIFT_DIY_PORT} --server.address=${OPENSHIFT_DIY_IP} &
| true
|
b57ab993e28b506c133af0bdecc3cbb2456ebda9
|
Shell
|
satarii/microk8s
|
/microk8s-resources/actions/disable.cilium.sh
|
UTF-8
| 1,706
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
source $SNAP/actions/common/utils.sh
echo "Disabling Cilium"
if [ -L "${SNAP_DATA}/bin/cilium" ]
then
"$SNAP/kubectl" "--kubeconfig=$SNAP_DATA/credentials/client.config" delete -f "$SNAP_DATA/actions/cilium.yaml"
# Give K8s some time to process the deletion request
sleep 15
cilium=$(wait_for_service_shutdown "kube-system" "k8s-app=cilium")
if [[ $cilium == fail ]]
then
echo "Cilium did not shut down on time. Proceeding."
fi
cilium=$(wait_for_service_shutdown "kube-system" "name=cilium-operator")
if [[ $cilium == fail ]]
then
echo "Cilium operator did not shut down on time. Proceeding."
fi
sudo rm -f "$SNAP_DATA/args/cni-network/05-cilium-cni.conf"
sudo rm -f "$SNAP_DATA/opt/cni/bin/cilium-cni"
sudo rm -rf $SNAP_DATA/bin/cilium*
sudo rm -f "$SNAP_DATA/actions/cilium.yaml"
sudo rm -rf "$SNAP_DATA/actions/cilium"
sudo rm -rf "$SNAP_DATA/var/run/cilium"
sudo rm -rf "$SNAP_DATA/sys/fs/bpf"
if $SNAP/sbin/ip link show "cilium_vxlan"
then
$SNAP/sbin/ip link delete "cilium_vxlan"
fi
set_service_expected_to_start flanneld
echo "Restarting kubelet"
refresh_opt_in_config "cni-bin-dir" "\${SNAP}/opt/cni/bin/" kubelet
sudo systemctl restart snap.${SNAP_NAME}.daemon-kubelet
echo "Restarting containerd"
if ! grep -qE "bin_dir.*SNAP}\/" $SNAP_DATA/args/containerd-template.toml; then
sudo "${SNAP}/bin/sed" -i 's;bin_dir = "${SNAP_DATA}/opt;bin_dir = "${SNAP}/opt;g' "$SNAP_DATA/args/containerd-template.toml"
fi
sudo systemctl restart snap.${SNAP_NAME}.daemon-containerd
echo "Restarting flanneld"
sudo systemctl stop snap.${SNAP_NAME}.daemon-flanneld
echo "Cilium is terminating"
fi
| true
|
fa871e0e6e09581c96be9b7b110a97a741b36b4f
|
Shell
|
mxq1688/Shell
|
/sbuild/build.sh
|
UTF-8
| 3,637
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# 定义日志输出
:<<EOF
Log(){
msgs=''
for msg in $*;do
msgs+=' '$msg
done
echo $msgs $(date "+%Y-%m-%d %H:%M:%S")
# 或者
# printf %s $*
}
EOF
# 定义颜色变量,\033、\e、\E是等价的,都是转义起始符
# echo -e "\033[字背景颜色;文字颜色m字符串\033[0m"
# 字体颜色: 黑30m 红31m 绿32m 黄33m 蓝34m 紫35m 天蓝36m 白37m
# 背景色: 黑40 红41 绿42 黄43 蓝44 紫45 天蓝46 白47
LogError(){
echo -e "\033[41;37m ERROR \033[0m" $(printf %-4s $*) ---$(date "+%Y-%m-%d %H:%M:%S")
}
LogSuccess(){
echo -e "\033[42;37m DONE \033[0m" $(printf %-4s $*) ---$(date "+%Y-%m-%d %H:%M:%S")
}
LogWarning(){
echo -e "\033[43;37m ERROR \033[0m" $(printf %-4s $*) ---$(date "+%Y-%m-%d %H:%M:%S")
}
LogInfo(){
echo -e "\033[44;37m INFO \033[0m" $(printf %-4s $*) ---$(date "+%Y-%m-%d %H:%M:%S")
}
# 闪烁tip window下貌似没用
LogTip(){
echo -e "\033[33;05m $(printf %-4s 'Tip: ' $*) \033[0m" $(date "+%Y-%m-%d %H:%M:%S")
}
# 定义要移动的路径数组 判断如果有参数使用参数
pathArr=('neobpback/neobp-web/neobp-web-app/src/main/webapp')
# 获取脚本文件所在路径 $(pwd) echo $(dirname $0)
workdir=$(cd $(dirname $0); pwd)
copyPathArr=()
# 获取命令行参数个数 $# or ${#*} 获取值 $* $@ ${*} ${@}
if(($#>0))
then
i=0
for p in $*;do
path="${workdir}/../../$p"
# 数组赋值
copyPathArr[${i}]=$(cd $path;pwd)
let i++
done
else
for((i=0;i<${#pathArr[*]};i++));do
path="${workdir}/../../${pathArr[${i}]}"
# 数组赋值
copyPathArr[${i}]=$(cd $path;pwd)
done
fi
LogTip ${copyPathArr[@]}
time1=$(date "+%Y%m%d%H%M%S")
Log "build to dist "
cd $workdir # 作用是在其他地方执行脚本 cd `dirname $0`
npm run build #echo $(cd $workdir; npm run build)
time2=$(date "+%Y%m%d%H%M%S")
LogSuccess "build finished! --->>> dist "
LogTip "打包耗时:$(((time2-time1)/60)) Min"
timeDiff=$((time2-time1))
timeDiffMin=$(awk 'BEGIN{printf "%.1f\n",'$timeDiff'/'60'}')
LogTip "打包耗时:$timeDiffMin Min"
echo $(date "+%Y-%m-%d %H:%M:%S") > version.txt
# 删除文件 copy
for path in ${copyPathArr[*]}
do
file="$path/index.html"
if [ -e $file ]
then
rm $file
if(($?==0));then
LogSuccess $file'删除成功'
else
LogError $file'删除成功'
fi
else
LogWarning "文件不存在($path/index.html)"
fi
dir="$path/static"
if [ -e $dir ]
then
rm -rf $dir
if(($?==0));then
LogSuccess $dir'删除成功'
else
LogError $dir'删除成功'
fi
else
LogWarning "文件夹不存在($dir)"
fi
done
# 复制文件
for path in ${copyPathArr[*]}
do
file="$workdir/dist/index.html"
if [ -e $file ]
then
cp $file $path
if(($?==0));then
LogSuccess "复制成功($file -->> $path)"
else
LogError "复制成功($file -->> $path)"
fi
else
LogWarning "文件不存在($workdir/index.html)"
fi
dir="$workdir/dist/static"
if [ -e $dir ]
then
cp -rf $dir $path
if(($?==0));then
LogSuccess "复制成功($dir -->> $path)"
else
LogError "复制成功($dir -->> $path)"
fi
else
LogWarning "文件夹不存在($workdir)"
fi
done
# echo "copy dist--->>> 目标路径" 尝试下是否能覆盖,减少资源浪费
exit
| true
|
369dcd4954c356ad0bd38e5a39ec72936c525339
|
Shell
|
stkevintan/Cube
|
/src/shell/install.sh
|
UTF-8
| 387
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash -e
echo "start installation,Please do NOT run in root!"
baseDir=$(cd "$(dirname "$0")"; pwd)
fileName="nwMusicBox.desktop"
filePath="$HOME/.local/share/applications/"
[[ -d "$filePath" ]] || mkdir -p "$filePath"
cp -f "$baseDir/$fileName" "$filePath"
sed -i "s@__Exec@$baseDir/nwMusicBox@g; s@__Icon@$baseDir/icon.svg@g" "$filePath/$fileName"
echo "installation completed!"
| true
|
ff6e5d5de8b9951ad8df037bc58e11eb21a1bf1a
|
Shell
|
hogus2037/laravel-server-script
|
/centos-http.sh
|
UTF-8
| 5,225
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
yum -y install gcc gcc-c++ autoconf libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel libxml2 libxml2-devel zlib zlib-devel glibc glibc-devel glib2 glib2-devel bzip2 bzip2-devel ncurses ncurses-devel curl curl-devel e2fsprogs e2fsprogs-devel krb5 krb5-devel libidn libidn-devel openssl openssl-devel openldap openldap-devel nss_ldap openldap-clients openldap-servers cmake libevent-devel ntp unzip zip git svn
basedir=/data/
logdir=${basedir}logs
redisdir=${basedir}redis
softwaredir=${basedir}software
webdir=${basedir}www
pcre_ver=pcre-8.35
libiconv=libiconv-1.14
libmcrypt=libmcrypt-2.5.8
mhash=mhash-0.9.9.9
mcrypt=mcrypt-2.6.8
nginx_ver=nginx-1.13.5
php=php-7.1.9
redis=redis-3.2.10
mysql=mysql-5.6.21
cmake=cmake-3.8.0-rc4
mkdir -p $logdir
mkdir -p $redisdir
mkdir -p $softwaredir
mkdir -p $webdir
cd $softwaredir
groupadd www
useradd -g www www
# groupadd mysql
# useradd -g mysql mysql
chmod +w $logdir
chmod +w $webdir
chown -R www:www $logdir
chown -R www:www $webdir
cd $softwaredir
tar zxvf ${pcre_ver}.tar.gz
cd $pcre_ver
./configure
make && make install
cd $softwaredir
tar zxvf ${nginx_ver}.tar.gz
cd ${nginx_ver}
./configure --user=www --group=www --prefix=/usr/local/webserver/nginx --with-pcre=${softwaredir}/${pcre_ver} --with-http_stub_status_module --with-http_ssl_module
make && make install
/usr/local/webserver/nginx/sbin/nginx -c /usr/local/webserver/nginx/config/nginx.conf
cd $softwaredir
tar zxvf ${libiconv}.tar.gz
cd ${libiconv}
./configure --prefix=/usr/local
make && make install
cd $softwaredir
tar zxvf ${libmcrypt}.tar.gz
cd ${libmcrypt}
./configure
make
make install
/sbin/ldconfig
cd libltdl/
./configure --enable-ltdl-install
make && make install
cd $softwaredir
tar zxvf ${mhash}.tar.gz
cd ${mhash}
./configure
make && make install
ln -s /usr/local/lib/libmcrypt.la /usr/lib/libmcrypt.la
ln -s /usr/local/lib/libmcrypt.so /usr/lib/libmcrypt.so
ln -s /usr/local/lib/libmcrypt.so.4 /usr/lib/libmcrypt.so.4
ln -s /usr/local/lib/libmcrypt.so.4.4.8 /usr/lib/libmcrypt.so.4.4.8
ln -s /usr/local/lib/libmhash.a /usr/lib/libmhash.a
ln -s /usr/local/lib/libmhash.la /usr/lib/libmhash.la
ln -s /usr/local/lib/libmhash.so /usr/lib/libmhash.so
ln -s /usr/local/lib/libmhash.so.2 /usr/lib/libmhash.so.2
ln -s /usr/local/lib/libmhash.so.2.0.1 /usr/lib/libmhash.so.2.0.1
ln -s /usr/local/bin/libmcrypt-config /usr/bin/libmcrypt-config
cd $softwaredir
tar zxvf ${mcrypt}.tar.gz
cd ${mcrypt}
/sbin/ldconfig
./configure
make && make install
cp -frp /usr/lib64/libldap* /usr/lib/
cd $softwaredir
tar zxvf ${php}.tar.gz
cd ${php}
./configure --prefix=/usr/local/webserver/php --with-config-file-path=/usr/local/webserver/php/etc --with-iconv-dir=/usr/local --with-freetype-dir --with-jpeg-dir --with-png-dir --with-zlib --with-libxml-dir=/usr --enable-xml --disable-rpath --enable-bcmath --enable-shmop --enable-sysvsem --enable-inline-optimization --with-curl --with-curlwrappers --enable-mbregex --enable-fpm --enable-mbstring --with-mcrypt --with-gd --enable-gd-native-ttf --with-openssl --with-mhash --enable-pcntl --enable-sockets --with-ldap --with-ldap-sasl --with-xmlrpc --enable-zip --enable-soap --without-pear --with-mysqli=mysqlnd --with-pdo-mysql=mysqlnd
make ZEND_EXTRA_LIBS='-liconv'
make install
cp php.ini-development /usr/local/webserver/php/etc/php.ini
cp /usr/local/webserver/php/etc/php-fpm.conf.default /usr/local/webserver/php/etc/php-fpm.conf
cp ${softwaredir}/${php}/sapi/fpm/init.d.php-fpm /etc/init.d/php-fpm
chmod 755 /etc/init.d/php-fpm
cd $softwaredir
tar zxvf ${redis}.tar.gz
mv ${redis} /usr/local/webserver/redis
cd /usr/local/webserver/redis/
make && make install
cd $softwaredir
unzip phpredis-php7.zip
cd phpredis-php7
/usr/local/webserver/php/bin/phpize
./configure --with-php-config=/usr/local/webserver/php/bin/php-config
make && make install
# cd $softwaredir
# tar zxvf eaccelerator-eaccelerator-42067ac.tar.gz
# cd eaccelerator-eaccelerator-42067ac
# /usr/local/webserver/php/bin/phpize
# ./configure --with-php-config=/usr/local/webserver/php/bin/php-config
# make && make install
# cd $softwaredir
# tar zxvf ${cmake}.tar.gz
# cd ${cmake}
# ./configure
# make && make install
# cd $softwaredir
# tar zxvf ${mysql}.tar.gz
# cd ${mysql}
# cmake -DCMAKE_INSTALL_PREFIX=/usr/local/webserver/mysql \
# -DMYSQL_UNIX_ADDR=/tmp/mysql.sock \
# -DDEFAULT_CHARSET=utf8 \
# -DDEFAULT_COLLATION=utf8_general_ci \
# -DWITH_EXTRA_CHARSETS:STRING=utf8,gbk \
# -DWITH_MYISAM_STORAGE_ENGINE=1 \
# -DWITH_INNOBASE_STORAGE_ENGINE=1 \
# -DWITH_MEMORY_STORAGE_ENGINE=1 \
# -DENABLED_LOCAL_INFILE=1 \
# -DMYSQL_DATADIR=/usr/local/webserver/mysql/data \
# -DMYSQL_USER=mysql \
# -DMYSQL_TCP_PORT=3306 \
# -DSYSCONFDIR=/etc \
# -DINSTALL_SHAREDIR=share
# make && make install
# chown -R mysql:mysql /usr/local/webserver/mysql
# cd /usr/local/webserver/mysql
# scripts/mysql_install_db --user=mysql --datadir=/usr/local/webserver/mysql/data
# cp support-files/mysql.server /etc/init.d/mysql
# chkconfig mysql on
# cd ${php}
# cd ext/pdo_mysql
# /usr/local/webserver/php/bin/phpize
# ./configure --with-php-config=/usr/local/webserver/php/bin/php-config --with-pdo-mysql=/usr/local/webserver/mysql
# make && make install
echo Complete!
| true
|
9f4218e79906bb13de7152bf37bfa178f4f2d597
|
Shell
|
ngkim/vagrant
|
/one-box/test_scenario/03_2_create_provider_net.sh
|
UTF-8
| 1,428
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
source "./00_check_config.sh"
if [ -z ${OS_AUTH_URL+x} ]; then
source ~/admin-openrc.sh
fi
source "$WORK_HOME/include/provider_net_util.sh"
red() {
#==================================================================
print_title "PROVIDER_NET: RED"
#==================================================================
create_provider_net_shared $RED_NET $RED_PHYSNET $RED_VLAN
create_provider_subnet $RED_NET $RED_SBNET $RED_NETWORK_CIDR
}
green() {
#==================================================================
print_title "PROVIDER_NET: GREEN"
#==================================================================
create_provider_net $GRN_NET $GRN_PHYSNET $GRN_VLAN
create_provider_subnet $GRN_NET $GRN_SBNET $GRN_NETWORK_CIDR
}
orange() {
#==================================================================
print_title "PROVIDER_NET: ORANGE"
#==================================================================
create_provider_net $ORG_NET $ORG_PHYSNET $ORG_VLAN
create_provider_subnet $ORG_NET $ORG_SBNET $ORG_NETWORK_CIDR
}
localnet() {
#==================================================================
print_title "PROVIDER_NET: LOCAL"
#==================================================================
create_flat_net $LOC_NET $LOC_PHYSNET
create_provider_subnet $LOC_NET $LOC_SBNET $LOC_NETWORK_CIDR
}
red
green
orange
localnet
| true
|
090faaa5d7ab90df448456ea8d079468ad9f099c
|
Shell
|
ef37/operations-puppet
|
/modules/role/files/toollabs/deploy-proxy.bash
|
UTF-8
| 510
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# This script does the following:
#
# 1. Stop the K8S Proxy
# 2. Download new versions of Proxy from a central location
# 3. Start the K8S Proxy
set -e
# TODO: Add error checking (baaaaassshhhhh)
URL_PREFIX="$1"
VERSION="$2"
# Stop all the running services!
service kube-proxy stop
# Download the new things!
wget -O /usr/local/bin/kube-proxy $URL_PREFIX/$VERSION/kube-proxy
# Make it executable!
chmod u+x /usr/local/bin/kube-proxy
# Start services again, and hope!
service kube-proxy start
| true
|
02e6fa34ad98033a2d0e2069043553050e05c6c1
|
Shell
|
naivekun/os6360_trtest
|
/cpss-ac3/lsp/wnc_tools/diag/diag_code/project/release/wnc-diag/usr/bin/wnc_diag/funcs/sh_funcs/yesno
|
UTF-8
| 269
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#ask user for yes/no for flow status
function yesno() {
local result=''
while [ -z "$result" ]; do
read result
case "$result" in
[Yy])
result=0;;
[Nn])
result=1;;
*)
echo "Please enter y or n.";;
esac
done
return $result
}
| true
|
f7da4c3b9053829687484b5c690bbacc688f5afa
|
Shell
|
mbkusharki/cornelius
|
/framework/scripts/init.sh
|
UTF-8
| 376
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
################################################################################
# Initialize components of Cornelius
source util.sh
if [ ! -e "$FRAMEWORK" ]
then
mkdir -p "$FRAMEWORK"
fi
if [ ! -e "$LIB" ]
then
mkdir -p "$LIB"
fi
if [ ! -e "$REPOS" ]
then
mkdir -p "$REPOS"
fi
./install_all.sh
cd "$BASE/cornelius"
cargo build --release
| true
|
c192a32b99b7e57dbec3a4c1405e0e89a4307023
|
Shell
|
clburlison/scripts
|
/clburlison_scripts/munki/exit_on_wireless.sh
|
UTF-8
| 510
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This will retrieve the primary interface of a Mac and
# exit the munki check flight script if on wireless.
PORT=$(route get 0.0.0.0 2>/dev/null | awk '/interface: / {print $2}')
IS_WIRELESS=$(networksetup -getairportnetwork $PORT | awk 'NR==1')
if [[ "$IS_WIRELESS" == *"is not a Wi-Fi interface"* ]]
then
echo "Default route is a Wired interface."
else
# We're on a wireless connection, exit this package install
echo "Default route is a Wi-Fi interface."
exit 1
fi
| true
|
8659c09359816af9db1623bc93950b917f63e4d2
|
Shell
|
fakemelvynkim/dotfiles
|
/init/INSTALL.sh
|
UTF-8
| 2,034
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
export DOTFILES_HOME=""
check_requirements() {
# Die if dotfiles directory exists
if [ -d "${DOTFILES_HOME}" ]; then
echo "dotfiles exist in ${DOTFILES_HOME}"
read -p "Do you wish to re-install dotfiles? [Y]es/[N]o: " prompt_yes_or_no
case $prompt_yes_or_no in
[Yy]*)
echo "Removing existing dotfiles directory.."
rm -rf "${DOTFILES_HOME}"
echo "Cloning dotfiles.."
wget --quiet --no-check-certificate https://raw.github.com/melvynkim/dotfiles/master/INSTALL.sh -O - | sh
;;
[Nn]*)
die_on_warning "dotfiles not installed."
;;
*)
die "Enter 'Yes' or 'No'"
;;
esac
fi
}
clone_dotfiles() {
echo "Cloning dotfiles to ${DOTFILES_HOME}.."
hash git >/dev/null 2>&1 && /usr/bin/env git clone --quiet --recursive "https://github.com/melvynkim/dotfiles" ${DOTFILES_HOME} ||
die "git is not installed."
}
add_symbolic_links() {
# zsh
ln -s "${DOTFILES_HOME}/zshrc.git" "${HOME}/.zsh"
ln -s "${DOTFILES_HOME}/zshrc.git/.zshrc" "${HOME}/.zshrc"
# vim
ln -s "${DOTFILES_HOME}/vimrc.git" "${HOME}/.vim"
ln -s "${DOTFILES_HOME}/vimrc.git/.vimrc" "${HOME}/.vimrc"
# git
ln -s "${DOTFILES_HOME}/git/.gitignore_global" "${HOME}/.gitignore_global"
ln -s "${DOTFILES_HOME}/git/.gitconfig" "${HOME}/.gitconfig"
ln -s "${DOTFILES_HOME}/git/.gitattributes" "${HOME}/.gitattributes"
}
install_neobundle_vim() {
# install NeoBundle
# https://github.com/Shougo/neobundle.vim
bash <(curl -s https://raw.githubusercontent.com/Shougo/neobundle.vim/master/bin/install.sh)
}
die_on_warning() {
echo "WARNING: $1"
exit 2
}
die() {
echo "ERROR: $1"
echo "Report issues at http://github.com/melvynkim/dotfiles"
exit 1
}
DOTFILES_HOME="${HOME}/.dotfiles"
check_requirements
clone_dotfiles
add_symbolic_links
install_neobundle_vim
| true
|
1a595992cbea4294d3e96c9eb59fe8e8aef57859
|
Shell
|
openmole/mgo-benchmark
|
/lib/coco/generateInterface.sh
|
UTF-8
| 1,194
| 3.40625
| 3
|
[] |
no_license
|
BUILD_DIR=$1
# generate coco c interface file from original CocoJNI
ls ../../src/main/scala
PREFIX=`ls ../../src/main/scala | grep "CocoJNI" | awk -F"_" '{$NF=""; print $0}' | sed-gnu -r 's/[" "]+/_/g'`
echo "Prefix of the header: "$PREFIX
# create the source
rm $BUILD_DIR/"$PREFIX"CocoJNI.c
#cp src/CocoJNI.c src/"$PREFIX"CocoJNI.c
#cat src/CocoJNI.c | gawk -F "_" '{if ($0=="#include \"CocoJNI.h\"") print "#include \"'$PREFIX'CocoJNI.h\""; else if ($1=="JNIEXPORT void JNICALL Java"||$1=="JNIEXPORT jlong JNICALL Java"||$1=="JNIEXPORT jint JNICALL Java"||$1=="JNIEXPORT jdoubleArray JNICALL Java"||$1=="JNIEXPORT jstring JNICALL Java") print $1"_'$PREFIX'"$2"_"$3; else print $0}' > $BUILD_DIR/"$PREFIX"CocoJNI.c
cat src/CocoJNI.c | gawk -F "_" '{if ($0~"#include \"CocoJNI.h\"") print "#include \"'$PREFIX'CocoJNI.h\""; else if ($1~"JNIEXPORT void JNICALL Java"||$1~"JNIEXPORT jlong JNICALL Java"||$1~"JNIEXPORT jint JNICALL Java"||$1~"JNIEXPORT jdoubleArray JNICALL Java"||$1~"JNIEXPORT jstring JNICALL Java") print $1"_'$PREFIX'"$2"_"$3; else print $0}' > $BUILD_DIR/"$PREFIX"CocoJNI.c
# move the header (needed for compilation)
mv ../../src/main/scala/"$PREFIX"CocoJNI.h $BUILD_DIR
| true
|
669db00e0c5d8df4483cb1504bc490875bb2f7b6
|
Shell
|
jammycakes/dotfiles
|
/setup/github-clone-all.sh
|
UTF-8
| 274
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
mkdir -p ~/abc/github
for I in $(curl -s https://api.github.com/users/jammycakes/repos | jq -r .[].name)
do
if [ -e ~/abc/github/$I ]; then
echo Already cloned $I
else
git clone git@github.com:jammycakes/$I ~/abc/github/$I
fi
done
| true
|
62bcae9d6e7d85244bbb980fde9d1c36c31c548c
|
Shell
|
ews-ffarella/optiMesh
|
/applications/moveLastToConstant/moveLastToConstant
|
UTF-8
| 210
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
TIME=`foamListTimes -noFunctionObjects 2> /dev/null | tail -1`
cp -r -t constant/polyMesh/ $TIME/polyMesh/*
echo "Warning: fields are not moved! Everything in $TIME will be deleted"
rm -fr $TIME
| true
|
ffa6f03198cd3089205bfe968e0d710bae52bb1f
|
Shell
|
mas178/terraform-dl
|
/provision.sh
|
UTF-8
| 2,368
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
echo '--------------------'
echo 'Set tensorflow as Keras backend'
echo '--------------------'
/bin/mkdir ~/.keras
echo '{"image_dim_ordering": "tf", "floatx": "float32", "backend": "tensorflow", "epsilon": 1e-07}' > ~/.keras/keras.json
echo '--------------------'
echo 'Import source code'
echo '--------------------'
git clone https://github.com/mas178/Kaggle-Carvana-Image-Masking-Challenge.git
echo '--------------------'
echo 'Install libraries'
echo '--------------------'
python3 --version
pip3 --version
cd Kaggle-Carvana-Image-Masking-Challenge
sudo pip3 install -r requirements.txt
sudo pip3 install kaggle-cli
sudo pip3 list --outdated --format=legacy | awk '{print $1}' | xargs sudo pip3 install -U
sudo pip3 check
echo '--------------------'
echo 'Install exiftool'
echo '--------------------'
cd ~
wget https://www.sno.phy.queensu.ca/~phil/exiftool/Image-ExifTool-10.61.tar.gz
tar -xf Image-ExifTool-10.61.tar.gz
rm Image-ExifTool-10.61.tar.gz
echo '--------------------'
echo 'Mount additional volume'
echo '--------------------'
sudo mkfs -t ext4 /dev/xvdh
sudo mount /dev/xvdh ~/Kaggle-Carvana-Image-Masking-Challenge/input
sudo chown ubuntu:ubuntu ~/Kaggle-Carvana-Image-Masking-Challenge/input
df -h
echo '--------------------'
echo 'Prepare data'
echo '--------------------'
kg config -u minaba -p $1 -c carvana-image-masking-challenge
cd ~/Kaggle-Carvana-Image-Masking-Challenge/input/
nohup sh -c 'kg download -f sample_submission.csv.zip && unzip -q sample_submission.csv.zip && rm sample_submission.csv.zip' &
nohup sh -c 'kg download -f train_masks.csv.zip && unzip -q train_masks.csv.zip && rm train_masks.csv.zip' &
nohup sh -c 'kg download -f train_masks.zip && unzip -q train_masks.zip && rm train_masks.zip && cd train_masks/ && mogrify -format png *.gif && rm *.gif && ~/Image-ExifTool-10.61/exiftool -overwrite_original -all= *' &
nohup sh -c 'kg download -f train.zip && unzip -q train.zip && rm train.zip' &
nohup sh -c 'kg download -f test.zip && unzip -q test.zip && rm test.zip' &
nohup sh -c 'kg download -f train_hq.zip && unzip -q train_hq.zip && rm train_hq.zip' &
nohup sh -c 'kg download -f test_hq.zip && unzip -q test_hq.zip && rm test_hq.zip' &
sleep 5
| true
|
4d01f612ce3ef2d565b360d6c47f22a30a101d46
|
Shell
|
atomduan/atm-cache
|
/auto/os/linux/conf
|
UTF-8
| 5,137
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
have=ATM_LINUX . auto/have_headers
atm_spacer='
'
cc_aux_flags="$CC_AUX_FLAGS"
CC_AUX_FLAGS="$cc_aux_flags -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64"
# Linux kernel version
version=$((`uname -r \
| sed -n -e 's/^\([0-9][0-9]*\)\.\([0-9][0-9]*\)\.\([0-9][0-9]*\).*/ \
\1*256*256+\2*256+\3/p' \
-e 's/^\([0-9][0-9]*\)\.\([0-9][0-9]*\).*/\1*256*256+\2*256/p'`))
version=${version:-0}
# posix_fadvise64() had been implemented in 2.5.60
if [ $version -lt 132412 ]; then
have=ATM_HAVE_POSIX_FADVISE . auto/nohave
fi
# epoll, EPOLLET version
atm_feature="epoll"
atm_feature_name="ATM_HAVE_EPOLL"
atm_feature_run=yes
atm_feature_incs="#include <sys/epoll.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="int efd = 0;
struct epoll_event ee;
ee.events = EPOLLIN|EPOLLOUT|EPOLLET;
ee.data.ptr = NULL;
(void) ee;
efd = epoll_create(100);
if (efd == -1) return 1;"
. auto/feature
if [ $atm_found = yes ]; then
have=ATM_HAVE_CLEAR_EVENT . auto/have
BUILD_SRCS="$BUILD_SRCS $EPOLL_SRCS"
EVENT_MODULES="$EVENT_MODULES $EPOLL_MODULE"
EVENT_FOUND=YES
# EPOLLRDHUP appeared in Linux 2.6.17, glibc 2.8
atm_feature="EPOLLRDHUP"
atm_feature_name="ATM_HAVE_EPOLLRDHUP"
atm_feature_run=no
atm_feature_incs="#include <sys/epoll.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="int efd = 0, fd = 0;
struct epoll_event ee;
ee.events = EPOLLIN|EPOLLRDHUP|EPOLLET;
ee.data.ptr = NULL;
epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ee)"
. auto/feature
# EPOLLEXCLUSIVE appeared in Linux 4.5, glibc 2.24
atm_feature="EPOLLEXCLUSIVE"
atm_feature_name="ATM_HAVE_EPOLLEXCLUSIVE"
atm_feature_run=no
atm_feature_incs="#include <sys/epoll.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="int efd = 0, fd = 0;
struct epoll_event ee;
ee.events = EPOLLIN|EPOLLEXCLUSIVE;
ee.data.ptr = NULL;
epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ee)"
. auto/feature
fi
# O_PATH and AT_EMPTY_PATH were introduced in 2.6.39, glibc 2.14
atm_feature="O_PATH"
atm_feature_name="ATM_HAVE_O_PATH"
atm_feature_run=no
atm_feature_incs="#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="int fd; struct stat sb;
fd = openat(AT_FDCWD, \".\", O_PATH|O_DIRECTORY|O_NOFOLLOW);
if (fstatat(fd, \"\", &sb, AT_EMPTY_PATH) != 0) return 1"
. auto/feature
# sendfile()
CC_AUX_FLAGS="$cc_aux_flags -D_GNU_SOURCE"
atm_feature="sendfile()"
atm_feature_name="ATM_HAVE_SENDFILE"
atm_feature_run=yes
atm_feature_incs="#include <sys/sendfile.h>
#include <errno.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="int s = 0, fd = 1;
ssize_t n; off_t off = 0;
n = sendfile(s, fd, &off, 1);
if (n == -1 && errno == ENOSYS) return 1"
. auto/feature
if [ $atm_found = yes ]; then
BUILD_SRCS="$BUILD_SRCS $LINUX_SENDFILE_SRCS"
fi
# sendfile64()
CC_AUX_FLAGS="$cc_aux_flags -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64"
atm_feature="sendfile64()"
atm_feature_name="ATM_HAVE_SENDFILE64"
atm_feature_run=yes
atm_feature_incs="#include <sys/sendfile.h>
#include <errno.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="int s = 0, fd = 1;
ssize_t n; off_t off = 0;
n = sendfile(s, fd, &off, 1);
if (n == -1 && errno == ENOSYS) return 1"
. auto/feature
atm_include="sys/prctl.h"; . auto/include
# prctl(PR_SET_DUMPABLE)
atm_feature="prctl(PR_SET_DUMPABLE)"
atm_feature_name="ATM_HAVE_PR_SET_DUMPABLE"
atm_feature_run=yes
atm_feature_incs="#include <sys/prctl.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="if (prctl(PR_SET_DUMPABLE, 1, 0, 0, 0) == -1) return 1"
. auto/feature
# sched_setaffinity()
atm_feature="sched_setaffinity()"
atm_feature_name="ATM_HAVE_SCHED_SETAFFINITY"
atm_feature_run=no
atm_feature_incs="#include <sched.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="cpu_set_t mask;
CPU_ZERO(&mask);
sched_setaffinity(0, sizeof(cpu_set_t), &mask)"
. auto/feature
# crypt_r()
atm_feature="crypt_r()"
atm_feature_name="ATM_HAVE_GNU_CRYPT_R"
atm_feature_run=no
atm_feature_incs="#include <crypt.h>"
atm_feature_path=
atm_feature_libs=-lcrypt
atm_feature_test="struct crypt_data cd;
crypt_r(\"key\", \"salt\", &cd);"
. auto/feature
# pcap()
atm_feature="pcap()"
atm_feature_name="ATM_HAVE_PCAP"
atm_feature_run=no
atm_feature_incs="#include <pcap.h>"
atm_feature_path=
atm_feature_libs=
atm_feature_test="pcap_t *pd = NULL;"
. auto/feature
atm_include="sys/vfs.h"; . auto/include
CC_AUX_FLAGS="$cc_aux_flags -D_GNU_SOURCE -D_FILE_OFFSET_BITS=64"
| true
|
0f13bb5cc6db0f11a2c9afcfb44a22713dd3678b
|
Shell
|
rodolfo-picoreti/openstack
|
/base-image/install.sh
|
UTF-8
| 945
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
__USER=$USER
# Get super user privileges
if [[ $EUID != 0 ]]; then
sudo -E "$0" "$@"
exit $?
fi
REGISTRY=10.51.0.39:5000
NTP_IP=10.50.0.3
logfile=/install.log
exec > $logfile 2>&1
apt update
apt install -y docker.io chrony
linenum=`grep iburst -nr /etc/chrony/chrony.conf | cut -d : -f 1`
sed -i "${linenum}s/.*/pool ${NTP_IP} iburst/" /etc/chrony/chrony.conf
service chrony restart
echo '============= Pulling images ==============='
echo '{ "insecure-registries" : ["'$REGISTRY'"] }' > /etc/docker/daemon.json
service docker restart
docker pull python:2-alpine
docker pull $REGISTRY/is-rabbitmq:3
docker pull $REGISTRY/camera-gateway:1.1
docker pull $REGISTRY/mjpeg-server:1
docker pull $REGISTRY/aruco:1
docker pull $REGISTRY/sync:1
docker pull $REGISTRY/robot-controller:1
echo '============= Installation finished ==============='
docker run -d --network=host -p8000:8000 python:2-alpine python -m SimpleHTTPServer
| true
|
71eb8ee2917a2a7a2dc766e0c8dea58f5baff50e
|
Shell
|
leehonan/meterman-server
|
/meterman_setup.sh
|
UTF-8
| 5,130
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# download with 'sudo wget https://github.com/leehonan/meterman-server/raw/master/meterman_setup.sh'
# then 'sudo chmod +x ./meterman_setup.sh'
# then 'sudo ./meterman_setup.sh -n <network_id> -d (data purge) -c (config purge)''
if [ $(/usr/bin/id -u) -ne 0 ]
then
echo "Not running as sudo!"
exit
fi
do_data_purge=false
do_config_purge=false
network_id=0
while getopts "n:dc" opt; do
case $opt in
n)
network_id=$OPTARG
echo "setting network id to $OPTARG" >&2
;;
d)
do_data_purge=true
echo "meterman data purge was triggered!" >&2
;;
c)
do_config_purge=true
echo "meterman config purge was triggered!" >&2
;;
\?)
echo "Invalid option: -$OPTARG, options are -n <network_id>, -d (data purge), -c (config purge)" >&2
;;
esac
done
if [ -e /lib/systemd/system/meterman.service ]
then
systemctl stop meterman.service
fi
if [ -d /home/pi/meterman ]
then
chown -R root:root /home/pi/meterman
chmod -R 775 /home/pi/meterman
fi
if [ $do_data_purge ]
then
echo "purging old meterman data..."
rm /home/pi/meterman/meterman*
fi
if [ $do_config_purge ]
then
echo "purging old meterman config..."
rm /home/pi/meterman/config.txt
rm /usr/local/lib/python3.6/site-packages/meterman/config.txt
fi
echo "Cleaning up from previous runs..."
rm /home/pi/temp/*.service*
rm /home/pi/temp/autoreset*
rm /home/pi/temp/avrdude*
rm /home/pi/temp/firmware*
rm /home/pi/temp/meterman*
rm /home/pi/temp/pishutdown*
rm -R /home/pi/temp/Python*
echo "Fetching prerequisites..."
apt-get update
apt-get install --yes --force-yes wget screen minicom sqlite3 avrdude libffi-dev libssl-dev zlib1g-dev build-essential checkinstall libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev
echo "Done\n"
if [ ! -e /home/pi/temp ]
then
mkdir /home/pi/temp
fi
if [ ! -e /usr/local/bin/python3.6 ]
then
echo "Setting up Python 3.6..."
cd /home/pi/temp
wget https://www.python.org/ftp/python/3.6.0/Python-3.6.0.tgz
tar xzvf Python-3.6.0.tgz
cd Python-3.6.0/
./configure && make -j4 && make install
echo "Done"
fi
if [ ! -e /usr/bin/autoreset ]
then
echo "Configuring GPIO Serial..."
if [ ! -e /dev/ttyS0 ]
then
systemctl stop serial-getty@ttyAMA0.service
systemctl disable serial-getty@ttyAMA0.service
else
systemctl stop serial-getty@ttyS0.service
systemctl disable serial-getty@ttyS0.service
fi
grep -q -F 'enable_uart=1' /boot/config.txt || echo 'enable_uart=1' >> /boot/config.txt
sed -e 's/console=serial0,115200//g' -i /boot/cmdline.txt
sed -e 's/T0:23:respawn:/sbin/getty -L ttyAMA0 115200 vt100//g' -i /etc/inittab
echo "Setting up gateway firmware tools..."
cd /home/pi/temp
wget https://github.com/leehonan/rfm69-pi-gateway/raw/master/src/autoreset
wget https://github.com/leehonan/rfm69-pi-gateway/raw/master/src/avrdude-autoreset
wget https://github.com/leehonan/rfm69-pi-gateway/raw/master/firmware.hex
cp firmware.hex /home/pi/
cp autoreset /usr/bin/
cp avrdude-autoreset /usr/bin/
chmod +x /usr/bin/autoreset
chmod +x /usr/bin/avrdude-autoreset
mv /usr/bin/avrdude /usr/bin/avrdude-original
ln -s /usr/bin/avrdude-autoreset /usr/bin/avrdude
echo "Done\n"
fi
if [ ! -e /lib/systemd/system/pishutdown.service ]
then
echo "Setting up gateway shutdown..."
cd /home/pi/temp
wget https://github.com/leehonan/rfm69-pi-gateway/raw/master/src/pishutdown.py
wget https://github.com/leehonan/rfm69-pi-gateway/raw/master/src/pishutdown.service
cp pishutdown.py /home/pi/
chown pi:pi /home/pi/pishutdown.py
chmod +x /home/pi/pishutdown.py
cp pishutdown.service /lib/systemd/system
chmod 644 /lib/systemd/system/pishutdown.service
systemctl daemon-reload
systemctl enable pishutdown.service
systemctl start pishutdown.service
echo "Done\n"
fi
echo "Setting up meterman..."
cd /home/pi/temp
wget https://github.com/leehonan/meterman-server/raw/master/meterman/meterman.service
wget https://github.com/leehonan/meterman-server/raw/master/meterman-0.1.tar.gz
pip3.6 install meterman-0.1.tar.gz --upgrade
if [ network_id != 0 ]; then
echo "Changing Network Id to $network_id"
sed -i "s/network_id = 0.0.1.1/network_id = $network_id/g" -i /usr/local/lib/python3.6/site-packages/meterman/default_config.txt
fi
cp meterman.service /lib/systemd/system
chmod 644 /lib/systemd/system/meterman.service
systemctl daemon-reload
systemctl enable meterman.service
systemctl start meterman.service
echo "Done! Now..."
echo " 1) reboot if first install for GPIO serial to work"
echo " 2) stop service with 'sudo systemctl stop meterman.service'"
echo " 2) Update gateway with... 'sudo avrdude -c arduino -p atmega328p -P /dev/serial0 -b 115200 -U flash:w:/home/pi/firmware.hex'"
echo " 3) configure gateway with minicom (sudo minicom -b 115200 -o -D /dev/serial0), set neti"
echo " 4) edit /home/pi/meterman/config.txt file"
echo " 5) reboot again"
| true
|
0114f05e47540c20af4ed4848b43753fe5abb2f3
|
Shell
|
Componentality/FlexRoad-BuildSys
|
/build.sh
|
UTF-8
| 3,391
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
CDIR="$(pwd)"
FR_BS_PATH="$CDIR"
FR_PK_PATH="$CDIR/../FlexRoad-Packages/"
FR_PATH="$CDIR/../FlexRoad/"
DEFAULTS=$(pwd)/.defaults
REMOTE=origin
BRANCH=flexroad-dev-1
BUILD_ID=0
TARGET=router
git_get_remote() {
local path="$1"
cd $path &> /dev/null
git branch -r | sed "s/^\s\+\([^\/]\+\).*/\1/" | uniq
cd - &> /dev/null
}
git_get_branch() {
local path="$1"
local remote="$2"
cd $path &> /dev/null
git branch -r | grep $remote | sed "s/^\s\+[^\/]\+\/\(.*\)/\1/"
cd - &> /dev/null
}
git_get_allowed_remote() {
_FIN_=`mktemp`
TMP_BS=`mktemp`; git_get_remote $FR_BS_PATH | sort > $TMP_BS
TMP_PK=`mktemp`; git_get_remote $FR_PK_PATH | sort > $TMP_PK
TMP_FR=`mktemp`; git_get_remote $FR_PATH | sort > $TMP_FR
comm -1 -2 $TMP_BS $TMP_PK &> $_FIN_
comm -1 -2 $TMP_FR $_FIN_
rm -f $TMP_BS $TMP_PK $_FIN_ $TMP_FR
}
git_get_allowed_branches() {
local remote=$1
_FIN_=`mktemp`
TMP_BS=`mktemp`; git_get_branch $FR_BS_PATH $remote | sort > $TMP_BS
TMP_PK=`mktemp`; git_get_branch $FR_PK_PATH $remote | sort > $TMP_PK
TMP_FR=`mktemp`; git_get_branch $FR_PATH $remote | sort > $TMP_FR
comm -1 -2 $TMP_BS $TMP_PK &> $_FIN_
comm -1 -2 $TMP_FR $_FIN_
rm -f $TMP_BS $TMP_PK $_FIN_ $TMP_FR
}
update_config() {
local tmp=`mktemp`
if [ -e $DEFAULTS ]; then
rm $DEFAULTS
fi
echo REMOTE=$REMOTE >> $tmp
echo BRANCH=$BRANCH >> $tmp
echo BUILD_ID=$BUILD_ID >> $tmp
echo TARGET=$TARGET >> $tmp
mv $tmp $DEFAULTS
}
git_branch_exists() {
local path="$1"
local remote=$2
local branch=$3
local out=1 # not exists
cd "$path"
if git config -l | grep "branch.$branch.remote=$remote" &> /dev/null; then
out=0
fi
cd - &> /dev/null
return $out
}
# Usage:
TARGETS=`ls -1 config-*.mk | sed -e "s/config-\(.*\).mk/\1/"`
ACTION=empty
CURR_BRANCH=`git branch | grep '*' | sed "s/\*\s//"`
[ -e $DEFAULTS ] && . $DEFAULTS
update_config
# choose action
case $1 in
rootfs)
BUILD_ID=$(($BUILD_ID+1))
update_config
make image TARGET=$TARGET
;;
initramfs)
update_config
make initramfs TARGET=$TARGET
;;
listremote)
git_get_allowed_remote
;;
setremote)
if git_get_allowed_remote | grep -E "^$2$" &> /dev/null; then
REMOTE=$2
update_config
else
echo Unknown remote "'"$2"'"
fi
;;
getremote)
echo $REMOTE
;;
getbranch)
echo $BRANCH
;;
listbranches)
git_get_allowed_branches $REMOTE
;;
setbranch)
if git_get_allowed_branches $REMOTE | grep -E "^$2$" &> /dev/null; then
BRANCH=$2
update_config
else
echo Unknown branch "'"$2"'"
fi
;;
switch)
for path in "$FR_BS_PATH" "$FR_PK_PATH" "$FR_PATH"; do
git_branch_exists $path $REMOTE $BRANCH
cd $path
if [ $? -eq 1 ]; then
git checkout -b $BRANCH $REMOTE/$BRANCH
else
git checkout $BRANCH
fi
cd - &> /dev/null
done
;;
gettarget)
echo $TARGET
;;
listtargets)
echo $TARGETS
;;
settarget)
if echo $TARGETS | tr ' ' '\n' | grep -E "^$2$" &> /dev/null; then
TARGET=$2
update_config
else
echo Unknown target "'"$2"'"
fi
;;
tag)
echo tag current configuration [$REMOTE/$BRANCH/$TARGET/$BUILD_ID]
echo "... not implemented yet ..."
;;
*)
if [ -e $FR_PATH/.config ]; then
echo rebuild last configuration [$REMOTE/$BRANCH/$TARGET/$BUILD_ID]
make build_rootfs TARGET=$TARGET
make copy_rootfs TARGET=$TARGET
else
echo Please configure system first.
fi
;;
esac
exit 0
| true
|
c23978043832de198b1a46066f06de744f993767
|
Shell
|
Techfolio/benbalter.github.com
|
/script/bootstrap
|
UTF-8
| 271
| 2.578125
| 3
|
[
"CC-BY-3.0",
"MIT"
] |
permissive
|
#!/bin/sh
#bootstrap the development environment
set -e
echo "cloning into submodules"
git submodule init
git submodule update
if [ "$TRAVIS" == "TRUE" ]; then
export NOKOGIRI_USE_SYSTEM_LIBRARIES=true
fi
echo "Bundle Installin'"
gem install bundler
bundle install
| true
|
26eb15c4a10d5ff348622b2bbb866f130891ef02
|
Shell
|
dashohoxha/DocBookWiki
|
/install/install-scripts/00-config.sh
|
UTF-8
| 324
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash -x
export DEBIAN_FRONTEND=noninteractive
cwd=$(dirname $0)
$cwd/10-install-additional-packages.sh
$cwd/20-make-and-install-docbookwiki.sh
$cwd/30-git-clone-docbookwiki.sh
$cwd/40-configure-docbookwiki.sh
### copy overlay files over to the system
cp -TdR $(dirname $cwd)/overlay/ /
$cwd/50-misc-config.sh
| true
|
2ff6b301223db4c9ccad8f7d76688a249a80603d
|
Shell
|
pedrosanmi23/script2
|
/processfeed.sh
|
UTF-8
| 641
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "URL del feed rss"
read url
html=""
wget -nv -cO - $url > feedrss
linea=$(grep "<item>" feedrss -m 1 -n | cut -f1 -d:)
sed "1,$linea""d" feedrss | grep -E "<title>|<pubDate>" | sed -e 's/<title>//g' -e 's/<pubDate>//g' |sed -e 's/<\/title>//g' -e 's/<\/pubDate>//g' | sed 's/<!\[CDATA\[//g' | sed 's/\]\]>//g' > salida.txt
let CONT=0
while IFS='' read -r linea || [[ -n "$linea" ]]; do
let CONT++
if [[ CONT -eq 2 ]]; then
fecha=$(date -d "$linea" +%d-%m-%Y\ %T)
html+="<span>$fecha</span>"
CONT=0
else
html+="<h1>$linea</h1>"
fi
done < salida.txt
rm salida.txt
echo $html > index.html
| true
|
7d0a65fe6a80ef24d1294ebcfdbc6d6d599d7abe
|
Shell
|
etsurin/cmu-script
|
/bin/run_pipeline.sh
|
UTF-8
| 443
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
args=${@:3}
export MAVEN_OPTS=${MAVEN_OPTS:-"-Xmx15g"}
if (( $# > 0 )); then
if [[ -z "${logfile}" ]]; then
echo "logfile not set, will use the default log file"
mvn exec:java -pl $1 -Dexec.mainClass="$2" -Dexec.args="$args"
else
echo "logfile provided at: "${logfile}
mvn exec:java -pl $1 -Dexec.mainClass="$2" -Dexec.args="$args" -Dlogback.configurationFile=${logfile}
fi
fi
| true
|
e0e1691d7c4f4b2fe23a22716b4d538722d948b4
|
Shell
|
Roja-B/EvolvingComs
|
/code_v1_recovered/paths.sh
|
UTF-8
| 343
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
# this is for creating the paths
python communityEvolution.py
Num=40
i=5
while [ $i -lt $Num ]; do
python communityEvolutionPaths.py $i
# python CommunityTopicBalatarin.py $i
i=$((i + 5))
echo $i
done
# python communityEvolutionPathsTopics.py to get a list of topics for each community on the path of community evolutions
| true
|
517a7937437f68d3a4c205b4ea0c97d0cfc5d2fc
|
Shell
|
FHomps/RichText
|
/build.sh
|
UTF-8
| 3,293
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
CMD=$1
BUILD=$2
VSCODE=$3
cwd=${PWD##*/}
if [[ $CMD == '' ]]; then
CMD=buildprod
fi
if [[ $BUILD == '' ]]; then
BUILD=Release
fi
if [[ $OSTYPE == 'linux-gnu'* || $OSTYPE == 'cygwin'* ]]; then
if [[ $OSTYPE == 'linux-gnueabihf' ]]; then
export PLATFORM=rpi
else
export PLATFORM=linux
fi
if [[ $NAME == '' ]]; then
export NAME=$cwd
fi
elif [[ $OSTYPE == 'darwin'* ]]; then
export PLATFORM=osx
if [[ $NAME == '' ]]; then
export NAME=$cwd
fi
elif [[ $OSTYPE == 'msys' || $OSTYPE == 'win32' ]]; then
export PLATFORM=windows
if [[ $NAME == '' ]]; then
export NAME=$cwd.exe
fi
fi
if [[ $VSCODE != 'vscode' ]]; then
export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"
if [[ $PLATFORM == 'windows' ]]; then
export PATH="/c/SFML-2.5.1/bin:/c/mingw32/bin:$PATH"
else
if [[ $PLATFORM == 'rpi' ]]; then
export PATH="/usr/local/gcc-8.1.0/bin:$PATH"
fi
fi
echo
echo build.sh PATH=$PATH
echo
fi
export MAKE_EXEC=make
if [[ $PLATFORM == 'windows' ]]; then
if [ $(type -P "mingw32-make.exe") ]; then
export MAKE_EXEC=mingw32-make.exe
elif [ $(type -P "make.exe") ]; then
export MAKE_EXEC=make.exe
fi
fi
if [[ $BUILD != "Release" && $BUILD != 'Debug' ]]; then
BUILD=Release
fi
PROF_EXEC=gprof
PROF_ANALYSIS_FILE=profiler_analysis.stats
dec=\=\=\=\=\=\=
display_styled() {
tput setaf $1
tput bold
echo $dec $2 $dec
tput sgr0
}
build_success() {
display_styled 2 "Build Succeeded"
}
build_success_launch() {
display_styled 2 "Build Succeeded: Launching bin/$BUILD/$NAME"
}
build_fail() {
display_styled 1 "Build Failed: Review the compile errors above"
tput sgr0
exit 1
}
build_prod_error() {
display_styled 1 "Error: buildprod must be run on Release build."
tput sgr0
exit 1
}
launch() {
display_styled 2 "Launching bin/$BUILD/$NAME"
}
launch_prod() {
display_styled 2 "Launching Production Build: $NAME"
}
profiler_done() {
display_styled 2 "Profiler Completed: View $PROF_ANALYSIS_FILE for details"
}
profiler_error() {
display_styled 1 "Error: Profiler must be run on Debug build."
tput sgr0
exit 1
}
profiler_osx() {
display_styled 1 "Error: Profiling (with gprof) is not supported on Mac OSX."
tput sgr0
exit 1
}
tput setaf 4
if [[ $CMD == 'buildrun' ]]; then
if $MAKE_EXEC BUILD=$BUILD; then
build_success_launch
bin/$BUILD/$NAME
else
build_fail
fi
elif [[ $CMD == 'build' ]]; then
if $MAKE_EXEC BUILD=$BUILD; then
build_success
else
build_fail
fi
elif [[ $CMD == 'rebuild' ]]; then
if $MAKE_EXEC BUILD=$BUILD rebuild; then
build_success
else
build_fail
fi
elif [[ $CMD == 'run' ]]; then
launch
bin/$BUILD/$NAME
elif [[ $CMD == 'buildprod' ]]; then
if [[ $BUILD == 'Release' ]]; then
if $MAKE_EXEC BUILD=$BUILD buildprod; then
build_success
else
build_fail
fi
else
build_prod_error
fi
elif [[ $CMD == 'profile' ]]; then
if [[ $PLATFORM == 'osx' ]]; then
profiler_osx
elif [[ $BUILD == 'Debug' ]]; then
if $MAKE_EXEC BUILD=$BUILD; then
build_success_launch
tput sgr0
bin/$BUILD/$NAME
tput setaf 4
gprof bin/Debug/$NAME gmon.out > $PROF_ANALYSIS_FILE
profiler_done
else
build_fail
fi
else
profiler_error
fi
else
tput setaf 1
tput bold
echo $dec Error: Command \"$CMD\" not recognized. $dec
tput sgr0
fi
tput sgr0
| true
|
554153af85355ba1a582b4069d95ee03b86d1267
|
Shell
|
dushaofan/data-analysis
|
/mongo/express/mongo_storeentity_idld.sh
|
UTF-8
| 1,768
| 2.765625
| 3
|
[] |
no_license
|
#!bin/sh
#date
source /www/data-analysis/conf/config.sh
two_start_date=`date -d -2day "+%Y-%m-%d"`
two_start_time=`date -d -2day "+%Y-%m-%d 00:00:00"`
start_date=`date -d yesterday "+%Y-%m-%d"`
start_time=`date -d yesterday "+%Y-%m-%d 00:00:00"`
today_date=`date "+%Y-%m-%d"`
today_time=`date "+%Y-%m-%d 00:00:00"`
start_date_time=`date -d "$start_time" +%s`000
end_date_time=`date -d "$today_time" +%s`000
begin_time_8=`expr $start_date_time + 28800000`
end_time_8=`expr $end_date_time + 28800000`
#dump-path
local_path=`pwd`
#mongo数据库信息
connect=`grep 'mongo_express_IP=' $configUrl`
connect_url=${connect##*=}
user=`grep 'mongo_express_USER=' $configUrl`
username=${user##*=}
pw=`grep 'mongo_express_PW=' $configUrl`
password=${pw##*=}
db_name=`grep 'mongo_db=' $configUrl`
database=${db_name##*=}
spark=`grep 'spark_url=' $configUrl`
spark_url=${spark##*=}
table=StoreEntity
hdfs_path=/apps/hive/warehouse/tubobo_express.mongo/${table}/
#download-mongo-to-loacal-path
source /etc/profile
#mongodump -h ${connect_url} -d express-admin -c StoreEntity
mongodump -h ${connect_url} -u ${username} -p ${password} -d ${database} -c ${table}
#删除旧的文件目录
source /etc/profile
hadoop fs -rmr ${hdfs_path};
#创建HDFS临时路径
hadoop fs -mkdir ${hdfs_path};
#把本地文件存放在HDFS临时路径
hadoop fs -put ${local_path}/dump/${database}/${table}.bson ${hdfs_path}
#把HDFS临时文件写入到增量表中
source /etc/profile
hive -f /xinguang/mongo/express/mongo_storeentity_hive_idld.sql
#增量表中写入到全量表中,每天全量存储
spark-sql --master ${spark_url} -e "
insert overwrite table ds_tubobo_express.bas_storeentity_pd partition (day='${start_date}')
select * from ds_tubobo_express.bas_storeentity_idld;"
| true
|
5cd01f2442538be44910830a4f1bc53b52752fba
|
Shell
|
lixhq/docker-grafana
|
/.buildkite/build.sh
|
UTF-8
| 215
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
eval "$($(dirname $0)/env.sh)"
echo "--- Build image"
$(dirname $0)/../build.sh $GRAFANA_VERSION $DOCKER_IMAGE $VERSION
if [ "$PUSH_IMAGE" == "true" ]; then
$(dirname $0)/push.sh
fi;
| true
|
81a33a5535a4bb78af87879d11ce8408c6202ca7
|
Shell
|
chrismattmann/imagecat
|
/distribution/src/main/resources/bin/ingest
|
UTF-8
| 1,337
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export DIR=`dirname $0`
cd $DIR
export DIR_PATH=`pwd`
if [ "$#" -ne 1 ]; then
echo "Usage: $0 <dir>"
exit 1
else
export PRODUCT_PATH=$1
$DIR_PATH/../crawler/bin/crawler_launcher \
--filemgrUrl http://localhost:9000 \
--operation --launchMetCrawler \
--clientTransferer org.apache.oodt.cas.filemgr.datatransfer.InPlaceDataTransferFactory \
--productPath $1 \
--metExtractor org.apache.oodt.cas.metadata.extractors.TikaCmdLineMetExtractor \
--metExtractorConfig /home/mattmann/data/exp5/image_catalog/deploy/data/met/tika.conf
fi
| true
|
e2c70cec6c94e0c51540efba43c767e320bb9456
|
Shell
|
kieranajp/colossal
|
/bin/consul-leave.sh
|
UTF-8
| 185
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if pgrep cosnul > /dev/null; then
echo "* Issuing Consul leave command"
consul leave
else
echo "* [warning] Consul is not running can't issue a leave command"
fi
| true
|
adec8566c92e70c1afefc74ae41944223550b2bb
|
Shell
|
nacase/scripts
|
/cpio2tar.sh
|
UTF-8
| 1,151
| 4.46875
| 4
|
[] |
no_license
|
#!/bin/bash
# Convert a CPIO[.gz|.bz2|.xz] archive to TAR format using fakeroot
#
# Author: Nate Case <nacase@gmail.com>
if [ "$2" = "" ] ; then
echo "usage: $0 <file.cpio[{.gz|.bz2|.xz}]> <file.tar>"
exit 1
fi
CPIOFN="$1"
TARFN="$2"
CAT="cat"
file "${CPIOFN}" | grep -qE "gzip compressed"
if [ $? = 0 ] ; then
echo "gzip compression detected"
CAT="zcat"
else
file "${FNAME}" | grep -qE "bzip2 compressed"
if [ $? = 0 ] ; then
echo "bzip2 compression detected"
CAT="bzcat"
else
cat $1 | xz -d > /dev/null
if [ $? = 0 ] ; then
echo "XZ/LZMA compression detected"
CAT="lzcat"
else
echo "Archive is not compressed"
fi
fi
fi
echo "Extracting archive"
CPIOFN=$(readlink -e ${CPIOFN})
TMPDIR=$(mktemp -d cpio2tar.XXXXXX)
TMPNUM=$(echo "${TMPDIR}" | cut -c 10-)
fakeroot bash -c " \
pushd \"${TMPDIR}\"; \
${CAT} \"${CPIOFN}\" | cpio --extract --make-directories --no-absolute-filenames -H newc; \
popd; \
tar -c -C \"${TMPDIR}\" -f \"${TARFN}\" ."
echo "Created ${TARFN}"
echo "Cleaning up temporary directory ${TMPDIR}"
rm -rf "cpio2tar.${TMPNUM}"
| true
|
9499cdc4898d018f8c00f7d361e459675f19b9a7
|
Shell
|
aakashhemadri/shell
|
/examples/cut/cut.sh
|
UTF-8
| 2,452
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# @author: Aakash Hemadri
# @email: aakashhemadri123@gmail.com
#
# Example cut commands
## --------------------------------------------------------
# Function definitions
## --------------------------------------------------------
function run_command {
echo "Executing command: $1"
## ----------------------------- ##
# Prints description if it exists #
## ----------------------------- ##
if [ "${2}" ]; then
echo "**************"
echo -e "Description: ${2}"
echo "**************"
fi
${1} 2> ${ERROR}
echo "#######################################"
}
## --------------------------------------------------------
# Constants & Variables
## --------------------------------------------------------
declare -r COMMAND="cut"
declare -r INPUT="data/input"
declare -r OUTPUT="data/output"
declare -r ERROR="data/error.log"
declare -r SPACE=" "
## --------------------------------------------------------
# Exports
## --------------------------------------------------------
export PAGER='/usr/bin/less'
## --------------------------------------------------------
# Program
## --------------------------------------------------------
echo -e "Hello!\nThis is the set of commands that are related to ${COMMAND}!"
## -------- ##
# Clean Logs #
## -------- ##
rm "data/error.log"
{
run_command "${COMMAND} -c4 ${INPUT}/file" "Cuts character at position 4."
run_command "${COMMAND} -c4,6 ${INPUT}/file" "Cuts characters at position 4 & 6."
run_command "${COMMAND} -c4-7 ${INPUT}/file" "Cuts characters between the range of 4-7."
run_command "${COMMAND} -c-7 ${INPUT}/file" "Cuts characters until position 7."
run_command "${COMMAND} -c10- ${INPUT}/file" "Cuts character after position 10."
run_command "${COMMAND} --delimiter=: -f2 ${INPUT}/file" "Similar to awk, returns second field\nusing the delimeter option -d."
run_command "${COMMAND} -d: -f1 /etc/passwd" "Retrieves first field of file /etc/passwd"
run_command "${COMMAND} --delimiter=. -f2 ${INPUT}/filenames" "Retieves the extension of the filenames."
run_command "${COMMAND} -f 1-2,4- ${INPUT}/data.txt" "Returns range of fields"
run_command "${COMMAND} -c 3-12 $INPUT/data.txt" "Cuts specific number of characters"
} | tee "${OUTPUT}/output"
echo -e "Output logged at ${OUTPUT}/output"
if [ -s ${ERROR} ]; then
echo "Check error log at ${ERROR}"
fi
## --------------------------------------------------------
# End Program
## --------------------------------------------------------
| true
|
b5bbc0151003f8091494feec31c8939f506aaf5d
|
Shell
|
LyzV/bash-learn
|
/find-file
|
UTF-8
| 138
| 3.234375
| 3
|
[] |
no_license
|
#! /bin/bash
for file in $(ls -a) ; do
if [ $file = $1 ]; then
echo "Find file: $file"
exit 0
fi
done
echo "Nothing found"
exit 1
| true
|
93efa074d110ea93ae9e162ec880ba9420029ea9
|
Shell
|
pildog/ssl-scripts
|
/bin/create
|
UTF-8
| 980
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
source $(dirname "$0")/../lib.sh
if [ ! -f $(dirname "$0")/../$CONFIG_FILE ]; then
echo -e "${RED}Config file not found.${NC}"
exit 0
fi
source $(dirname "$0")/../$CONFIG_FILE
SCRIPT_NAME=$(basename "$0")
if [ $# -ne 2 ]; then
echo "$SCRIPT_NAME [E-mail] [Domain]"
echo "Example: $SCRIPT_NAME your@email.com yourdomain.com"
exit 1
fi
DOMAIN="*.$2"
echo -e "> ${GREEN}Creating SSL Certificate${NC}."
sudo certbot certonly --manual --preferred-challenges=dns --email "$1" --server https://acme-v02.api.letsencrypt.org/directory --agree-tos -d "${DOMAIN}"
echo -e "> Coping certificate files to Dropbox folder for ${GREEN}$2${NC}"
sudo cp /etc/letsencrypt/live/$2/fullchain.pem "${SSL_PATH}"/wild-$2.fullchain.pem
sudo cp /etc/letsencrypt/live/$2/privkey.pem "${SSL_PATH}"/wild-$2.privkey.pem
sudo chown $(whoami) "${SSL_PATH}"/wild-$2.fullchain.pem
sudo chown $(whoami) "${SSL_PATH}"/wild-$2.privkey.pem
echo -e "> ${GREEN}Job done.${NC}"
| true
|
e78d9880b5bafb16264ef382668c3cb2c16bb6cc
|
Shell
|
aerosol/dotfiles
|
/silos/scripts/bin/switch
|
UTF-8
| 265
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
layout=$(setxkbmap -query |grep -e layout: |cut -c 13-14)
if [[ $layout == "us" ]]; then
setxkbmap -layout "pl(intl)"
notify-send "Language" "Polish"
elif [[ $layout == "pl" ]]; then
notify-send "Language" "English"
setxkbmap -layout us
fi
| true
|
0c25e7dbf8d0fa671d4519323a0db5dc39936b34
|
Shell
|
simonoff/dotfiles
|
/.bashrc
|
UTF-8
| 3,281
| 3.859375
| 4
|
[] |
no_license
|
########################################################################
# Evil bash settings file for Ciaran McCreesh <ciaranm at gentoo.org>
#
# Not many comments here, you'll have to guess how it works. Note that
# I use the same .bashrc on Linux, IRIX and Slowaris, so there's some
# strange uname stuff in there.
#
# Most recent update: Sat Aug 20 20:29:08 2011
#
# Get the latest version from:
# http://dev.gentoo.org/~ciaranm/configs/bashrc
#
########################################################################
export UNAME_S=$(uname -s 2>&1 || echo "Linux" )
if [ "${TERM}" == "rxvt-unicode" ] ; then
export TERMTYPE="256"
elif [ "${TERM}" != "dumb" ] ; then
export TERMTYPE="16"
else
export TERMTYPE=""
fi
select_by_term() {
if [ "${TERMTYPE}" == "256" ] ; then
echo -n "$1"
elif [ "${TERMTYPE}" == "16" ] ; then
echo -n "$2"
else
echo -n "$3"
fi
}
if [ -n "${PATH/*$HOME\/bin:*}" ] ; then
export PATH="$HOME/bin:$PATH"
fi
if [ -n "${PATH/*\/usr\/local\/bin:*}" ] ; then
export PATH="/usr/local/bin:$PATH"
fi
if [ -f /usr/bin/less ] ; then
export PAGER=less
alias page=$PAGER
export LESS="--ignore-case --long-prompt"
fi
alias ls="ls --color"
alias ll="ls --color -l -h"
alias la="ls -a --color"
alias pd="pushd"
alias pp="popd"
# More standard stuff
case $TERM in
xterm*|rxvt*|Eterm|eterm)
PROMPT_COMMAND='echo -ne "\033]0;${USER}@$HOSTNAME:${PWD/$HOME/~}\007"'
;;
screen)
PROMPT_COMMAND='echo -ne "\033_${USER}@$HOSTNAME:${PWD/$HOME/~}\033\\"'
;;
esac
# Bash completion
[ -f /etc/profile.d/bash-completion ] && \
source /etc/profile.d/bash-completion
# Set up host-specific things
hostcolour() {
case ${1:-`hostname`} in
gentoo) # teal
echo $(select_by_term '\033[38;5;22m' '\033[0;36m' '' )
;;
devel) # magenta
echo $(select_by_term '\033[38;5;54m' '\033[01;35m' '' )
;;
router) # green
echo $(select_by_term '\033[38;5;20m' '\033[01;32m' '' )
;;
*) # orange
echo $(select_by_term '\033[38;5;68m' '\033[01;31m' '' )
;;
esac
}
ps1_return_colour() {
if [ "$1" == "0" ] ; then
echo -e $(select_by_term '\033[0;0m\033[38;5;78m' '\033[0;37m' '' )
else
echo -e $(select_by_term '\033[0;0m\033[38;5;64m' '\033[01;31m' '' )
fi
return $1
}
PS1H="\[$(hostcolour)\]${HOSTNAME}"
PS1U=$(select_by_term '\[\033[38;5;78m\]\u' '\[\033[0;39m\]\u' '\u' )
PS1D=$(select_by_term '\[\033[38;5;38m\]\W' '\[\033[01;34m\]\W' '\W' )
PS1R=$(select_by_term "\\[\\033[00;39m\\]\$?" "\\[\\033[00;39m\\]\$?" "\$?" )
export PS1E=$(select_by_term '\[\033[00m\]' '\[\033[00m\]' '' )
export PS1="${PS1U}@${PS1H} ${PS1D} ${PS1R} ${PS1L}${PS1S}${PS1E}$ "
alias cvu="cvs update"
alias cvc="cvs commit"
alias svu="svn update"
alias svs="svn status"
alias svc="svn commit"
alias ssync="rsync --rsh=ssh"
alias ssyncr="rsync --rsh=ssh --recursive --verbose --progress"
alias grab="sudo chown ${USER} --recursive"
alias hmakej="hilite make -j"
alias clean="rm *~"
# toys
makepasswords() {
# suggest a bunch of possible passwords. not suitable for really early perl
# versions that don't do auto srand() things.
perl <<EOPERL
my @a = ("a".."z","A".."Z","0".."9",(split //, q{#@,.<>$%&()*^}));
for (1..10) {
print join "", map { \$a[rand @a] } (1..rand(3)+7);
print qq{\n}
}
EOPERL
}
# vim: set noet ts=4 tw=80 :
| true
|
b5048fbaaaee3dcd7a6cd9c4ad920451e32f2103
|
Shell
|
lnbayinen/platform-deployment-master
|
/orchestration/saltstack/prototype/orchestration-bootstrap/scripts/bootstrap-nodeX.sh
|
UTF-8
| 2,767
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# Script that installs platform infrastructure components for NetWitness node-0
# Notes and TODO
# 1) Missing checking for existing deployment (will be priority post POC)
# 2) Allow script argument passing
# 3) Better error handling
# 4) Debug feature, and better message control on stdout
# 5) Clean up node-0 functionality
# 6) Redo functionality from where the script might have stopped (broke)
# 7) Do shiny things with getopts
##### SETTINGS #####
CHEF_RPM_PACKAGE="chef-12.12.15-1.el7.x86_64.rpm"
SALTMASTER="$1"
####################
# Only root or a user with sudo privilege can execute this script
if [ "$EUID" -ne 0 ]
then echo "NodeX bootstrap requires root privileges. Aborting script execution."
exit 1
fi
# Check if system is CentOS 7
if [[ -f /etc/os-release ]]; then
OS_NAME=`gawk -F= '/^NAME/{print $2}' /etc/os-release | sed -e 's/^"//' -e 's/"$//'`
OS_VERSION_ID=`gawk -F= '/^VERSION_ID/{print $2}' /etc/os-release | sed -e 's/^"//' -e 's/"$//'`
else
echo "Unable to determine operating system version. Missing /etc/os-release."
exit 1
fi
if [[ "${OS_NAME}" != "CentOS Linux" ]] || [[ "${OS_VERSION_ID}" != "7" ]]; then
echo "Platform ${OS_NAME} version ${OS_VERSION_ID} not supported."
exit 1
fi
function chef_client_deploy {
echo "Delpoying Chef"
rpm --quiet -ivh https://packages.chef.io/stable/el/7/${CHEF_RPM_PACKAGE}
}
function saltstack_repo {
echo "Setting up SaltStack yum repository"
# The 1> /dev/null is to disable printing out the repo in stdout
tee /etc/yum.repos.d/saltstack.repo 1> /dev/null <<'EOF'
[saltstack-repo]
name=SaltStack repo for Red Hat Enterprise Linux $releasever
baseurl=https://repo.saltstack.com/yum/redhat/$releasever/$basearch/latest
enabled=1
gpgcheck=1
gpgkey=https://repo.saltstack.com/yum/redhat/$releasever/$basearch/latest/SALTSTACK-GPG-KEY.pub
https://repo.saltstack.com/yum/redhat/$releasever/$basearch/latest/base/RPM-GPG-KEY-CentOS-7
EOF
}
# This is subject to change. Currently used for the purpose of demo and POC.
function epel_install {
echo "Installing CentOS 7 EPEL yum repository"
rpm --quiet -ivh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
}
function install_saltstack_minion {
echo "Installing SaltStack Minion"
yum -y -q install salt-minion
}
function configure_saltstack_minion {
echo "Configuring SaltStack Minion"
tee /etc/salt/minion 1> /dev/null <<EOF
# Master server
master: ${SALTMASTER}
hash_type: sha256
EOF
}
function start_services {
systemctl enable salt-minion.service
systemctl start salt-minion.service
}
# Main Section
saltstack_repo
epel_install
install_saltstack_minion
configure_saltstack_minion
chef_client_deploy
start_services
| true
|
7daf68b0ef70c85146807719c7e9d77ecdabab66
|
Shell
|
larsoncaldwell/larsonGit
|
/racer.sh
|
UTF-8
| 212
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
# clean up on ^C to terminate script
trap 'pkill java; rm -f file.*; exit' 0 1 2 3 15
java -cp . Racer &
while true
do
sleep 1
find . -name "file.*" | sort
echo "----------------------"
done
| true
|
64655f08b2d4af9e37866b5e6fb0febc0a9f1e23
|
Shell
|
DittoPardo/hootsuite-challenge
|
/test.sh
|
UTF-8
| 163
| 2.625
| 3
|
[] |
no_license
|
container="$1"
if [ -z $container ]; then
container=hsc_web
fi
docker-compose -f docker-compose.yml -f debug.yml -f test.yml up -d && docker attach $container
| true
|
9876896b0403b8a7f113ea37e49f5e6a8d50857e
|
Shell
|
H1d3r/quiver
|
/modules/qq-notes.zsh
|
UTF-8
| 1,644
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
#############################################################
# qq-notes
#############################################################
qq-notes-help() {
cat << "DOC"
qq-notes
-------
The notes namespace provides searching and reading of markdown notes that are
stored in a directory specified by the __NOTES environment variable (qq-vars-global).
Commands
--------
qq-notes-install: installs dependencies
qq-notes: lists all notes in $__NOTES or searches notes by filename if $1 is supplied
qq-notes-content: list all notes in $__NOTES or searches notes by content if $1 is supplied
qq-notes-menu: display an interactive menu for reading notes
DOC
}
qq-notes-install() {
__info "Running $0..."
__pkgs fzf ripgrep
qq-install-golang
go get -u github.com/charmbracelet/glow
qq-install-bat
}
qq-notes() {
__notes-check
__info "Use \$1 to search file names"
select note in $(ls -R --file-type ${__NOTES} | grep -ie ".md$" | grep -i "$1")
do test -n ${note} && break
exit
done
[[ ! -z ${note} ]] && glow ${__NOTES}/${note}
}
qq-notes-content() {
__notes-check
__info "Use \$1 to search content"
select note in $(grep -rliw "$1" ${__NOTES}/*.md)
do test -n ${note} && break
exit
done
[[ ! -z ${note} ]] && glow ${note}
}
qq-notes-menu() {
__notes-check
pushd ${__NOTES} &> /dev/null
rg --no-heading --no-line-number --with-filename --color=always --sort path -m1 "" *.md | fzf --tac --no-sort -d ':' --ansi --preview-window wrap --preview 'bat --style=plain --color=always ${1}'
popd &> /dev/null
}
| true
|
cf77daf06a9d177a7b4a7d5d62bd7ba77d791223
|
Shell
|
nick-smith8/Unix-Admin
|
/lab1/dashboard.sh
|
UTF-8
| 1,279
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
CPULoad=$(uptime | awk '{print $8 $9 $10}')
FreeRAM=$(free -m | awk '{print $4}' | tail -n 2 | head -n 1 )
ActiveUsers=$(uptime | awk '{print $4}')
TotalUsers=$(more /etc/passwd | wc -l)
MostUsedShell=$(cat /etc/passwd | cut -d ":" -f7 | sort | uniq -c | sort -n | tail -n 1 | awk '{print $2}')
IoBytesReceived=$(cat /proc/net/dev | awk '{print $2}' | tail -n 1)
EnpBytesReceived=$(cat /proc/net/dev | awk '{print $2}' | tail -n 2 | head -n 1)
IoBytesTransmitted=$(cat /proc/net/dev | awk '{print $10}' | tail -n 1)
EnpBytesTransmitted=$(cat /proc/net/dev | awk '{print $10}' | tail -n 2 | head -n 1)
ping -q -w1 -c1 8.8.8.8 > /dev/null
if [[ $? -eq 0 ]]; then
InternetConnected="yes"
else
InternetConnected="no"
fi
echo -e CPU AND MEMORY RESOURCES -------------- '\n'CPU Load Average: $CPULoad Free RAM: $FreeRAM MB'\n'
echo -e NETWORK CONNECTIONS -------------------- '\n'Io Bytes Received: $IoBytesReceived Bytes Transmitted: $IoBytesTransmitted '\n'enp0s3 Bytes Received: $EnpBytesReceived Bytes Transmitted: $EnpBytesTransmitted '\n'internet Connectivity: $InternetConnected'\n'
echo -e ACCOUNT INFORMATION --------------------'\n'Total Users: $TotalUsers Number Active: $ActiveUsers '\n'Most Frequently Used Shell: $MostUsedShell
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.