blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
67a93aca7dd3c1021034fce09ff28cce4c80de3e
|
Shell
|
adithya-sn/misc_scripts
|
/jenkins_slave_alert.sh
|
UTF-8
| 390
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
slave_1_state=$(nc -vz IP1 PORT; echo $?)
slave_2_state=$(nc -vz IP2 PORT; echo $?)
if [ $slave_1_state != '0' ];then
A="Slave 1 is down."
curl -s -XPOST https://hooks.slack.com/services/ \
-d '{
"text": "'"$A"'"
}'
fi
if [ $slave_2_state != '0' ];then
B="Slave 2 is down."
curl -s -XPOST https://hooks.slack.com/services/ \
-d '{
"text": "'"$B"'"
}'
fi
| true
|
f965a92924601da4838934d2cd00bc29091c5000
|
Shell
|
mahendran8655/Hello
|
/shellscript_practice/shellparam.sh
|
UTF-8
| 296
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
echo this is script name $0
echo this first argument is $1
echo this is second argument is $2
echo this is third argument is $3
echo $$ PID of the script
echo $# total no of arguments
echo $? last return code
echo $* no of arguments in single string
echo $0 name of the script
| true
|
3c6285dc51a21a09b827a35f424b8585028f08c5
|
Shell
|
nbrew/binfiles
|
/bin/tc
|
UTF-8
| 282
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# Logs comments with a datestamp to a log.
# usage: tc 'JOBNUM|TASK|Comment goes here'
# >> 2012-12-14 14:46:08 | JOBNUM|TASK|Comment goes here
for msg in "$@"
do
DATE=`date +'%Y-%m-%d %H:%M:%S'` && echo "$DATE | $msg" >> ~/Documents/timecard.txt
done
exit
| true
|
1aac014ecdbc991212b9de2c6337c1437abe37d5
|
Shell
|
metwork-framework/mfdata
|
/integration_tests/0006_process_a_thousand_files/test.sh
|
UTF-8
| 1,436
| 3.609375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Create and install plugin foobar6 archiving PNG images
# Inject 1000 PNG files
# Check number of PNG file and corresponding tags files on archive
# Check no tags left in redis
source ../support.sh
check_no_tags_left_in_redis
plugins.uninstall --clean foobar6 >/dev/null 2>&1
DEST_DIR="${MFMODULE_RUNTIME_HOME}/var/archive/$(date +%Y%m%d)"
rm -R "${DEST_DIR}" >/dev/null 2>&1
plugins.install --new-name=foobar6 ${MFDATA_HOME}/share/plugins/archive-*.plugin
cat >"${MFMODULE_RUNTIME_HOME}/config/plugins/foobar6.ini" <<EOF
[switch_rules:alwaystrue]
* = {{MFDATA_CURRENT_PLUGIN_NAME}}/main
[custom]
dest_basename = %Y%m%d/{ORIGINAL_BASENAME}
EOF
wait_conf_monitor_idle
echo "Injecting 1000 files"
for ((i=1;i<=1000;i++)); do
cp -f ../data/Example.png "${MFMODULE_RUNTIME_HOME}/var/in/incoming/Example$i.png.t"
mv "${MFMODULE_RUNTIME_HOME}/var/in/incoming/Example$i.png.t" "${MFMODULE_RUNTIME_HOME}/var/in/incoming/Example$i.png"
done
wait_dir "${DEST_DIR}" 10
wait_empty_dir "${MFMODULE_RUNTIME_HOME}/var/in/incoming" 60
n=0
while test $n -lt 60; do
N1=$(ls ${DEST_DIR}/Example*.png |wc -l)
N2=$(ls ${DEST_DIR}/Example*.png.tags |wc -l)
if test "${N1}" = 1000; then
if test "${N2}" = 1000; then
break
fi
fi
sleep 1
n=$(expr $n + 1)
done
if test $n -ge 60; then
echo "ERROR: missing files"
exit 1
fi
check_no_tags_left_in_redis
plugins.uninstall --clean foobar6
rm -Rf "${DEST_DIR}"
| true
|
2ce310f0a76473ad9e60663ea3b5c50d8995f1a0
|
Shell
|
Piropa/YAMon-v4-alt
|
/includes/setupIPChains.sh
|
UTF-8
| 6,374
| 3.578125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
##########################################################################
# Yet Another Monitor (YAMon)
# Copyright (c) 2013-present Al Caughey
# All rights reserved.
#
# functions to define chains in iptables & optionally ip6tables
#
# History
# 2020-03-20: 4.0.7 - added wait option ( -w -W1) to commands that add entries in iptables;
# - then added _iptablesWait 'cause not all firmware variants support iptables -w...
# 2020-01-03: 4.0.6 - added check for _logNoMatchingMac in SetupIPChains
# 2019-12-23: 4.0.5 - no changes
# 2019-11-24: 4.0.4 - no changes (yet)
# 2019-06-18: development starts on initial v4 release
#
# To Do:
# * allow comma separated list of guest interfaces
# * add ip6 addresses for interfaces
#
##########################################################################
_PRIVATE_IP4_BLOCKS='10.0.0.0/8,172.16.0.0/12,192.168.0.0/16'
_PRIVATE_IP6_BLOCKS='fc00::/7,ff02::/7'
_LOCAL_IP4='255.255.255.255,224.0.0.1,127.0.0.1'
_LOCAL_IP6=''
SetupIPChains(){
CheckChains(){
local chain="$YAMON_IPTABLES$ch"
local ce=$(echo "$ipchains" | grep "\b$chain\b")
Send2Log "CheckChain: $chain --> '$ce'"
if [ -z "$ce" ] ; then
Send2Log "CheckChains: Adding $chain in $cmd" 2
eval $cmd -N $chain "$_iptablesWait"
else
Send2Log "CheckChain: $chain exists in $cmd" 1
fi
}
CheckTables()
{
local rule="${YAMON_IPTABLES}Entry"
local foundRuleinChain=$(eval $cmd -nL "$tbl" | grep -ic "\b$rule\b")
if [ "$foundRuleinChain" == "1" ] ; then
Send2Log "CheckTables: '$cmd' rule $rule exists in chain $tbl" 1
return
elif [ "$foundRuleinChain" -eq "0" ]; then
Send2Log "CheckTables: Created '$cmd' rule $rule in chain $tbl" 2
eval $cmd -I "$tbl" -j "$rule" "$_iptablesWait"
return
fi
#its unlikely you should get here... but added defensively
Send2Log "CheckTables: Found $foundRuleinChain instances of '$cmd' $rule in chain $tbl... deleting entries individually rather than flushing!" 3
local i=1
while [ "$i" -le "$foundRuleinChain" ]; do
local dup_num=$($cmd -nL "$tbl" --line-numbers | grep -m 1 -i "\b$rule\b" | cut -d' ' -f1)
eval $cmd -D "$tbl" $dup_num "$_iptablesWait"
i=$(($i+1))
done
eval $cmd -I "$tbl" -j "$rule" "$_iptablesWait"
}
AddPrivateBlocks(){
$cmd -F "$YAMON_IPTABLES"
$cmd -F "$ent"
$cmd -F "$loc"
Send2Log "AddPrivateBlocks: $cmd / '$YAMON_IPTABLES' / '$ent' / '$loc' / $ip_blocks" 1
IFS=$','
for iprs in $ip_blocks
do
for iprd in $ip_blocks
do
if [ "$_firmware" -eq "0" ] && [ "$cmd" == 'ip6tables' ] ; then
eval $cmd -I "$ent" -j "RETURN" -s $iprs -d $iprd "$_iptablesWait"
eval $cmd -I "$ent" -j "$loc" -s $iprs -d $iprd "$_iptablesWait"
else
eval $cmd -I "$ent" -g "$loc" -s $iprs -d $iprd "$_iptablesWait"
fi
done
done
eval $cmd -A "$ent" -j "${YAMON_IPTABLES}" "$_iptablesWait"
eval $cmd -I "$loc" -j "RETURN" "$_iptablesWait"
Send2Log "chains --> $cmd / $YAMON_IPTABLES --> $(IndentList "$($cmd -L -vx | grep $YAMON_IPTABLES | grep Chain)")"
}
AddLocalIPs(){
Send2Log "AddLocalIPs: $cmd / '$YAMON_IPTABLES' / '$ent' / '$loc' / $ip_addresses" 1
IFS=$','
for ip in $ip_addresses
do
if [ "$_firmware" -eq "0" ] && [ "$cmd" == 'ip6tables' ] ; then
eval $cmd -I "$ent" -j "RETURN" -s $ip "$_iptablesWait"
eval $cmd -I "$ent" -j "RETURN" -d $ip "$_iptablesWait"
eval $cmd -I "$ent" -j "$loc" -s $ip "$_iptablesWait"
eval $cmd -I "$ent" -j "$loc" -d $ip "$_iptablesWait"
else
eval $cmd -I "$ent" -g "$loc" -s $ip "$_iptablesWait"
eval $cmd -I "$ent" -g "$loc" -d $ip "$_iptablesWait"
fi
done
}
#Main body of function
local commands='iptables'
[ -n "$ip6Enabled" ] && commands='iptables,ip6tables'
local chains=",Entry,Local"
local tables="FORWARD,INPUT,OUTPUT"
local loc="${YAMON_IPTABLES}Local"
local ent="${YAMON_IPTABLES}Entry"
IFS=$','
for cmd in $commands
do
Send2Log "SetupIPChains --> $cmd" 1
local ipchains=$(eval "$cmd" -L | grep "Chain $YAMON_IPTABLES")
for ch in $chains
do
CheckChains
done
if [ "$cmd" == 'iptables' ] ; then
local ip_blocks="$_PRIVATE_IP4_BLOCKS"
local ip_addresses="$_LOCAL_IP4"
else
local ip_blocks="$_PRIVATE_IP6_BLOCKS"
local ip_addresses="$_LOCAL_IP6"
fi
AddPrivateBlocks
AddLocalIPs
for tbl in $tables
do
CheckTables
done
if [ "${_logNoMatchingMac:-0}" -eq "1" ] ; then
eval $cmd -A "$YAMON_IPTABLES" -j LOG --log-prefix "YAMon: " "$_iptablesWait"
else
eval $cmd -A "$YAMON_IPTABLES" -j RETURN
fi
done
}
AddNetworkInterfaces(){
Send2Log "AddNetworkInterfaces:" 1
listofInterfaces=$(ls /sys/class/net)
#[ -z "$listofInterfaces" ] && "$(ifconfig | grep HWaddr | awk '{print $1}')"
local re_mac='([a-f0-9]{2}:){5}[a-f0-9]{2}'
IFS=$'\n'
interfaceList=''
for inf in $listofInterfaces
do
ifc=$(ifconfig $inf)
mac=$(echo "$ifc" | grep -o 'HWaddr.*$' | cut -d' ' -f2 | tr "[A-Z]" "[a-z]")
[ -z "$mac" ] && continue
if [ -z "$(echo "$mac" | grep -Ei "$re_mac")" ] ; then
Send2Log "AddNetworkInterfaces: bad mac --> $mac from $ifc" 1
continue
fi
inet4=$(echo "$ifc" | grep 'inet addr' | cut -d: -f2 | awk '{print $1}')
inet6=$(echo "$ifc" | grep 'inet6 addr'| awk '{print $3}')
[ -z "$inet4" ] && [ -z "$inet6" ] && continue
iplist=$(echo -e "$inet4\n$inet6")
Send2Log "AddNetworkInterfaces: $inf --> $mac $(IndentList "$iplist")" 1
for ip in $iplist
do
[ -z "$ip" ] && continue
CheckMAC2IPinUserJS "$mac" "$ip" "$inf"
CheckMAC2GroupinUserJS "$mac" 'Interfaces'
CheckIPTableEntry "$ip" "Interfaces"
done
interfaceList="$interfaceList,$inf"
done
interfaceList=${interfaceList#,}
AddEntry "_interfaces" "$interfaceList"
IFS=$'\n'
pnd=$(cat "/proc/net/dev" | grep -E "${interfaceList//,/|}")
for line in $pnd
do
ifn=$(echo "$line" | awk '{ print $1 }' | sed -e 's~-~_~' -e 's~:~~')
AddEntry "interface_${ifn}" "$(echo "$line" | awk '{ print $10","$2 }')"
done
CheckMAC2IPinUserJS "$_generic_mac" "$_generic_ipv4" "No Matching Device"
[ "$ip6Enabled" == '1' ] && CheckMAC2IPinUserJS "$_generic_mac" "$_generic_ipv6" "No Matching Device"
CheckMAC2GroupinUserJS "$_generic_mac" "$_defaultGroup"
}
| true
|
956ae68b320f9cb059b41565bf8ed73109ecf925
|
Shell
|
owlxof/batteryguard
|
/brightness_bell.sh
|
UTF-8
| 165
| 2.578125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/usr/bin/bash
PREV=$(xbacklight -get)
for i in {1..2}
do
xbacklight -set 100
sleep 0.001
xbacklight -set 0
sleep 0.001
done
xbacklight -set $PREV
| true
|
ebb7ded320f85aafb9c3b3f42a9cf37d2a492e13
|
Shell
|
vforgione/dotfiles
|
/macos/install-packages.sh
|
UTF-8
| 528
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo ""
echo " INSTALLING MACOS PACKAGES"
echo "............................."
if ! command -v brew &>/dev/null; then
echo "ensuring xcode cmd line tools installed..."
xcode-select -install
echo "downloading and running brew setup script..."
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
echo "brew installing formulae..."
brew bundle install
echo "installing asdf..."
git clone https://github.com/asdf-vm/asdf.git ~/.asdf --branch v0.8.0
| true
|
228ea3c11d98dbe2e5bdf14943355ce26ef8ae1c
|
Shell
|
moisesrbd/ngm
|
/scripts/legal_md_to_html.sh
|
UTF-8
| 193
| 3.0625
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -e
mkdir -p dist/legal/
template=$(<legal/template.html)
for file in legal/*.md; do
html=$(marked -i $file)
echo "${template//_CONTENT_/$html}" > dist/${file/.md/.html}
done
| true
|
ef549d88ea13ff11ad39b1f8a211953bbc420a81
|
Shell
|
mrVanDalo/overlay
|
/travis/emerge/emerge.sh
|
UTF-8
| 3,855
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# emerge a package
set -xe
if [[ "$#" -le 0 ]] || [[ "$#" -gt 2 ]]
then
echo "Usage: $0 =<ebuild category>/<ebuild name>-<version>" >&2
echo "Usage: $0 <ebuild category>/<ebuild name>" >&2
exit 1
fi
PACKAGE=$1
CONFIG=$2
cat <<EOF
-----------------------------------------------
Emerging ${PACKAGE}
${CONFIG}
-----------------------------------------------
EOF
function die(){
cat <<EOF
!---------------------------------------------!
Failed Build : ${PACKAGE}
${CONFIG}
!---------------------------------------------!
EOF
exit 1
}
# Disable news messages from portage and disable rsync's output
export FEATURES="-news" \
PORTAGE_RSYNC_EXTRA_OPTS="-q"
# Fix repository name
cat >/etc/portage/repos.conf/localOverlay <<EOF
[mrVanDalo]
location = /ebuilds
masters = gentoo
auto-sync = no
EOF
# Update the portage tree
emerge --sync
# Set portage's distdir to /tmp/distfiles
# This is a workaround for a bug in portage/git-r3 where git-r3 can't
# create the distfiles/git-r3 directory when no other distfiles have been
# downloaded by portage first, which happens when using binary packages
# https://bugs.gentoo.org/481434
export DISTDIR="/tmp/distfiles"
# Source ebuild specific env vars
unset ACCEPT_KEYWORDS
unset USE
TRAVIS_EBUILD_HELP_PACKAGES=()
if [[ -z ${CONFIG} ]]
then
echo "no config-file set emerge as default"
else
echo "run config-file ${CONFIG}"
. ${CONFIG}
fi
function help_package_emerge_and_exit(){
help_pkg=$1
cat <<EOF
------------------------------------------------------------------------------------
install help package : ${help_pkg}
------------------------------------------------------------------------------------
EOF
emerge \
--verbose \
--quiet-build \
--buildpkg \
--usepkg \
--getbinpkg \
--autounmask=y \
--autounmask-continue=y \
"${help_pkg}" || die
cat <<EOF
------------------------------------------------------------------------------------
This is just a build that is depended to be in the cache
otherwise the real build will not proceed in time.
>>>>> HIT THE REBUILD BUTTON !!! <<<<<<<
------------------------------------------------------------------------------------
EOF
# yes it should exit with success
# otherwise the cache will not be stored by travis
exit 1
}
# help travis to fill cache without running in timeouts
for help_pkg in "${TRAVIS_EBUILD_HELP_PACKAGES[@]}"
do
emerge -p --usepkgonly ${help_pkg} || help_package_emerge_and_exit ${help_pkg}
done
# Emerge dependencies first
emerge \
--verbose \
--quiet-build \
--buildpkg \
--usepkg \
--getbinpkg \
--onlydeps \
--autounmask=y \
--autounmask-continue=y \
"${PACKAGE}" || die
# Emerge the ebuild itself
emerge \
--verbose \
--usepkg=n \
--getbinpkg=n \
--buildpkg \
"${PACKAGE}" || die
# Print out some information about dependencies at the end
cat <<EOF
----------------------------------------------------------
RDEPEND information :
----------------------------------------------------------
EOF
cat <<EOF
linked packages :
EOF
qlist -e ${PACKAGE} \
| grep -ve "'" \
| xargs scanelf -L -n -q -F "%n #F" \
| tr , ' ' \
| xargs qfile -Cv \
| sort -u \
| awk '{print $1}' \
| uniq \
| xargs qatom --format "%{CATEGORY}/%{PN}" || exit 0
cat <<EOF
linked virtual packages :
EOF
qlist -e ${PACKAGE} \
| grep -ve "'" \
| xargs scanelf -L -n -q -F "%n #F" \
| tr , ' ' \
| xargs qfile -Cv \
| sort -u \
| awk '{print $1}' \
| uniq \
| xargs qatom --format "%{CATEGORY}/%{PN}" \
| xargs -L1 qdepends --nocolor --name-only --rdepend --query \
| grep ^virtual \
| uniq || exit 0
| true
|
24914f77650f836e570f5979a767b998a89a0266
|
Shell
|
dsantorojr/multegula
|
/cleanup.sh
|
UTF-8
| 992
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
###########################################################
#Multegula - cleanup.sh #
#Cleanup script for running Multegula processes #
#Armin Mahmoudi, Daniel Santoro, Garrett Miller, Lunwen He#
###########################################################
echo "Cleaning up processes and sockets..."
#Cleanup any running go
pkill -9 go 2> /dev/null
pkill -9 multegula 2> /dev/null
pkill -9 bootstrap 2> /dev/null
pkill -9 BootstrapServer 2> /dev/null
pkill -9 -f multegulaUI.py 2> /dev/null
killall run_local_multi.sh 2> /dev/null
killall run.sh 2> /dev/null
####################################
##To be EXTRA clean - maybe remove these for release.
##We don't want users accidentally killing important processes.
####################################
pkill -9 -f python 2> /dev/null
pkill -9 -f python3 2> /dev/null
pkill -9 -f Python 2> /dev/null
pkill -9 -f Python3 2> /dev/null
#################################
echo "The light is green, the trap is clean."
| true
|
b08b53107ec17b4f6f0527eb1eeebf5f746e165d
|
Shell
|
fecloud/bdsync
|
/bdsync
|
UTF-8
| 2,178
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
### BEGIN INIT INFO
#
# Provides: bdsync
# Required-Start: $local_fs $remote_fs
# Required-Stop: $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: initscript
# Description: This file should be used to construct scripts to be placed in /etc/init.d.
#
### END INIT INFO
## Fill in name of program here.
PROG="bdsync"
PROG_PATH="/data/app/$PROG" #::# Not need, but sometimes helpful (if $PROG resides in /opt for example).
VM_RUN="/data/app/jdk1.8.0_60/bin/java"
VM_DEBUG="-Dcom.sun.management.jmxremote.port=8999 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Djava.rmi.server.hostname=10.0.0.8"
VM_ARGS="-Dbdsync.db=$PROG_PATH/resource/$PROG.db -Dbdsync.log.file=/var/log/$PROG.log -Dbdsync.log.priority=DEBUG -Dbdsync.cookie=$PROG_PATH/resource/cookie.json -Dbdsync.downthread=4 -Dbdsync.interval=86400000"
VM_JAR="$PROG_PATH/lib/bdsync.jar"
PROG_ARGS="sync / /hdd -l git bak tmp thum s 影视 TDDOWNLOAD ThunderDB lost+found ipc 6s h3 -c 160 apps h3"
MSG_PREFIX=" *"
USER="linaro"
start() {
PROG_STATUS=0
su -c "$VM_RUN $VM_ARGS -jar $VM_JAR $PROG_ARGS 2>&1 >/dev/null &" $USER
PROG_STATUS=$?
if [ $PROG_STATUS -eq 1 ]; then
## Program is running, exit with error.
echo "$MSG_PREFIX $PROG is currently running..."
else
echo "$MSG_PREFIX $PROG started"
fi
}
stop() {
su -c "$VM_RUN $VM_ARGS -jar $VM_JAR stop 2>&1 >/dev/null" $USER
PROG_STATUS=$?
if [ $PROG_STATUS -eq 0 ]; then
## Program is running, so stop it
echo "$MSG_PREFIX $PROG stopped"
else
## Program is not running, exit with error.
echo "$MSG_PREFIX $PROG not started!"
fi
}
## Check to see if we are running as root first.
case "$1" in
start)
start
exit 0
;;
stop)
stop
exit 0
;;
reload|restart|force-reload)
stop
start
exit 0
;;
**)
echo "Usage: $0 {start|stop|reload}" 1>&2
exit 1
;;
esac
| true
|
0df214aaa2979ac40c06885822d13907de8cc095
|
Shell
|
blakezimmerman/scripts
|
/ts-node-init.sh
|
UTF-8
| 1,333
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
# Check if a command is available
checkCommand() {
command -v $1 >/dev/null 2>&1 || {
echo >&2 "I require $1 but it's not installed. Aborting."; exit 1;
}
}
checkCommand node
checkCommand npm
PROJECT_NAME=$1
mkdir $PROJECT_NAME
# Create package.json
cat << EOF > $PROJECT_NAME/package.json
{
"name": "$PROJECT_NAME",
"version": "0.1.0",
"description": "",
"main": "src/index.ts",
"scripts": {
"start": "NODE_PATH=./src ts-node src/index.ts"
},
"keywords": [],
"author": "",
"license": "ISC"
}
EOF
# Create tsconfig.json
cat << EOF > $PROJECT_NAME/tsconfig.json
{
"compilerOptions": {
"sourceMap": true,
"alwaysStrict": true,
"strict": true,
"lib": ["esnext"],
"module": "commonjs",
"moduleResolution": "node",
"target": "es2019",
"allowJs": true,
"resolveJsonModule": true,
"allowSyntheticDefaultImports": true,
"esModuleInterop": true,
"baseUrl": "src"
},
"include": ["src/**/*"],
"exclude": ["node_modules"]
}
EOF
# Create .gitignore
cat << EOF > $PROJECT_NAME/.gitignore
.DS_Store
node_modules
npm-debug.log
EOF
cd $PROJECT_NAME
# Create initial source file
mkdir src
touch src/index.ts
# Install dependencies
npm install --save-dev \
@types/node \
ts-node \
typescript
echo "All done! Have fun writing your Node app! 🎉"
| true
|
225d10edb54fb80173b1ef7ac19822828b5c33de
|
Shell
|
diqidoq/pw
|
/pw
|
UTF-8
| 5,313
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
# PW - simple password user account storage management based on CSV.
# Copyright (c) 2016 Digidog (aka. diqidoq) - Released under GPL v3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DataTable(){
cat << EOF
--------------
| Full Table |
--------------
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 |
|-----|------|-----|------|------|------|----|-----|---|-------|----|------|------|-------|------|-----|----|----|-----|
| cat | type | srv | http | nick | name | id | crn | @ | login | pw | note | addr | btown | bday | age | Q | A | use |
EOF
}
Help(){
cat << EOF
Type pw <username> or pw <service> <username> or any other combination (e.g. pw <real neme> <cat>) to filter output.
Standart Output:
--------------------------------------------------------
type c2 | service c3| uname c5 | email c9 | password c11
--------------------------------------------------------
EOF
DataTable
cat << EOF
[!] For input you can use both directives (nr or label) to target the input column.
[!] cat stands for categories. you can choose your own tags or a team based tag system for later filtering
-i = input by simple form fields asking you: column=value column=value
-a = add a row of raw csv line, e.g.: cat;type;srv;-;yournick;-;-;-;your@mail.de;loginname;pass;-;-;-;-;-;-;-;-
EOF
}
if [[ "$@" ]] ; then
FILENAME=pw.csv
PWPATH=/media/digidog/portable/BFK/s/U/ts/pcuserdata/Documents/Registries_Logins/$FILENAME
case $1 in
-h|--help)
Help
exit 1
;;
-i|--input)
# we try to pipe the input into our pw.csv
# we need to check if the count of 19 columns is given to keep the db consistent
unset ROW
declare -a ROW
i=0
read -p "Categorie tags with comma: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Type of Service: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Service Name: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Link to the Login: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Nickname/Username: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Real Name: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "ID in your unique Persona system: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Customer Reference Number, if any: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Email Adress for Login: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Login name or Login type (user-login/mail-login): " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Password: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Additional notes/(info to the account: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Address if divergent from unique Person of account: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Birth town if divergent from unique Person of account: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Birthday if divergent from unique Person of account: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Age if divergent from unique Person of account: " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Challenge questions (watch order): " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Challenge responses (same order): " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi ; ((i++))
read -p "Usage info (like notes, but more specific to history): " ROW[$i] ; if [ -z "${ROW[$i]}" ] ; then ROW[$i]="-" ; fi
IFS=';' ; ROWSET="${ROW[*]}" ; IFS=$' \t\n'
DataTable
echo "$ROWSET"
echo "-------"
read -p "Please check the entry fo possible errors. Does it look ok (y), or do we need ot break up (n)? <y/n>: " ynROW
if [ "$ynROW" == "n" ] || [ -z "$ynROW" ] ; then
Help
exit 1
else
echo "$ROWSET" >> $PWPATH
fi
;;
# -a|--add)
# shift
# echo "$@" >> $PWPATH
# exit
# ;;
*)
if [[ "$2" ]] ; then
grep --color -i $1 $PWPATH | grep --color -i $2 | cut -d ';' -f2,3,5,9,10,11,12 | column -ts ';'
else
grep --color -i $1 $PWPATH | cut -d ';' -f2,3,5,9,10,11,12 | column -ts ';'
fi
;;
esac
else
echo "
PW - simple password user account storage management based on CSV.
Type -h or --help for help and options.
"
fi
exit 0
# for index in "${!array[@]}"
# do
# echo "$index ${array[index]}"
# done
| true
|
6c644570f1fcfd18fe82e12fe64269457eb3a0ed
|
Shell
|
mariusungureanu/APD
|
/laboratoare/lab03/test_oets.sh
|
UTF-8
| 1,350
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f "oets" ]
then
echo "Nu exista binarul oets"
exit
fi
if ./oets 100 3 | grep -q "Sortare corecta"; then
echo "Sortare corecta pentru N = 100 P = 3"
else
echo "Sortare incorecta pentru N = 100 P = 3"
fi
if ./oets 100 5 | grep -q "Sortare corecta"; then
echo "Sortare corecta pentru N = 100 P = 5"
else
echo "Sortare incorecta pentru N = 100 P = 5"
fi
if ./oets 100 6 | grep -q "Sortare corecta"; then
echo "Sortare corecta pentru N = 100 P = 6"
else
echo "Sortare incorecta pentru N = 100 P = 6"
fi
if ./oets 100 7 | grep -q "Sortare corecta"; then
echo "Sortare corecta pentru N = 100 P = 7"
else
echo "Sortare incorecta pentru N = 100 P = 7"
fi
if ./oets 500 7 | grep -q "Sortare corecta"; then
echo "Sortare corecta pentru N = 500 P = 7"
else
echo "Sortare incorecta pentru N = 500 P = 7"
fi
if ./oets 1000 4 | grep -q "Sortare corecta"; then
echo "Sortare corecta pentru N = 1000 P = 4"
else
echo "Sortare incorecta pentru N = 1000 P = 4"
fi
if ./oets 1000 6 | grep -q "Sortare corecta"; then
echo "Sortare corecta pentru N = 1000 P = 6"
else
echo "Sortare incorecta pentru N = 1000 P = 6"
fi
if ./oets 1000 7 | grep -q "Sortare corecta"; then
echo "Sortare corecta pentru N = 1000 P = 7"
else
echo "Sortare incorecta pentru N = 1000 P = 7"
fi
| true
|
12aa8913bfb3b4c886ff717aa12db015888dc46c
|
Shell
|
ltoornman/bash_basics
|
/02-add_nums.sh
|
UTF-8
| 439
| 3.484375
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
echo "Enter your first number"
read first
echo "Enter your second number"
read second
echo "Enter your third number"
read third
sum=$(( first+second+third ))
echo "The sum is $sum"
# exercise: ask the user for the width and height and present total
# number of pixels
echo "Enter length in Pixels"
read firstpixel
echo "Enter width in Pixels"
read secondpixel
sum=$((firstpixel+secondpixel))
echo "The total is $sum pixels"
| true
|
9d8dbcbacefe0f0ae0d1f7eb81c57a0c4a6003db
|
Shell
|
oldbitpusher/npasswd
|
/V2.0/src/PasswordCheck/SCCS/s.TestSuite.sh
|
UTF-8
| 8,473
| 2.734375
| 3
|
[] |
no_license
|
h38998
s 00346/00000/00000
d D 1.1 98/06/04 08:42:43 clyde 1 0
c date and time created 98/06/04 08:42:43 by clyde
e
u
U
f e 0
t
T
I 1
#! /bin/sh
#
# Copyright 1998, The University of Texas at Austin ("U. T. Austin").
# All rights reserved.
#
# By using this software the USER indicates that he or she has read,
# understood and will comply with the following:
#
# U. T. Austin hereby grants USER permission to use, copy, modify, and
# distribute this software and its documentation for any purpose and
# without fee, provided that:
#
# 1. the above copyright notice appears in all copies of the software
# and its documentation, or portions thereof, and
# 2. a full copy of this notice is included with the software and its
# documentation, or portions thereof, and
# 3. neither the software nor its documentation, nor portions thereof,
# is sold for profit. Any commercial sale or license of this software,
# copies of the software, its associated documentation and/or
# modifications of either is strictly prohibited without the prior
# consent of U. T. Austin.
#
# Title to copyright to this software and its associated documentation
# shall at all times remain with U. T. Austin. No right is granted to
# use in advertising, publicity or otherwise any trademark, service
# mark, or the name of U. T. Austin.
#
# This software and any associated documentation are provided "as is,"
# and U. T. AUSTIN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESSED OR
# IMPLIED, INCLUDING THOSE OF MERCHANTABILITY OR FIMinorESS FOR A PARTICULAR
# PURPOSE, OR THAT USE OF THE SOFTWARE, MODIFICATIONS, OR ASSOCIATED
# DOCUMENTATION WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS, TRADEMARKS OR
# OTHER INTELLECTUAL PROPERTY RIGHTS OF A THIRD PARTY. U. T. Austin, The
# University of Texas System, its Regents, officers, and employees shall
# not be liable under any circumstances for any direct, indirect, special,
# incidental, or consequential damages with respect to any claim by USER
# or any third party on account of or arising from the use, or inability
# to use, this software or its associated documentation, even if U. T.
# Austin has been advised of the possibility of those damages.
#
# Submit commercialization requests to: Office of the Executive Vice
# President and Provost, U. T. Austin, 201 Main Bldg., Austin, Texas,
# 78712, ATMinor: Technology Licensing Specialist.
#
# %W% %G% (cc.utexas.edu)
#
#
# Test suite for password check library
#
# Usage: $0
#
TEMP=/tmp/pwtest.$$
opt_S='-s'
SINK=/dev/null
verbose=false
while [ $# -gt 0 ]; do
case "$1" in
-v) verbose=true
SINK=/dev/tty
[ -c /dev/stdout ] && SINK=/dev/stdout
opt_S=''
;;
*) break;;
esac
shift
done
msg()
{
echo "***" $*
}
die()
{
echo "***" $*; exit 1
}
spew()
{
sed 's/^/ /' $*
}
set_test()
{
TestMajor=$1
TestMinor=''
TestMicro=''
}
get_test()
{
r=$TestMajor
[ -n "$TestMinor" ] && r="$r $TestMinor"
[ -n "$TestMicro" ] && r="$r.$TestMicro"
echo $r
}
incr_minor()
{
[ -z "$TestMinor" ] && TestMinor=0
TestMinor=`expr $TestMinor + 1`
TestMicro='';
}
incr_micro()
{
if [ -n "$TestMicro" ]; then
TestMicro=`expr $TestMicro + 1`
else
TestMicro=1
fi
}
incr()
{
eval "_tt=\$$1"
_t=`expr $_tt + 1`
eval "$1=\$_t"
}
#
# MAIN
#
msg ""
msg This suite verifies the functionality of the
msg top-level password checking routines
msg ""
msg Test groups and the routines they verify are:
msg lexical: pwck_lexical
msg local: pwck_local
msg history: pwck_history
msg ""
msg pwck_passwd is verified by the FascistGecos test suite in ./cracklib
msg pwck_crack is verified by the FascistNpasswd test suite in ./cracklib
msg ""
PROGS="./test_pwck ./test_history"
for what in $PROGS; do
[ ! -f $what ] && die The executable $what is missing - run make
done
#
# Driver for running password checks
#
# $1 = password, $2 = prog, $3 = expected return, $* = config directives
#
test_password()
{
pw="$1"; shift
prog=$1; shift
expect=$1; shift
[ $# -gt 0 ] && args="-D \"$*\""
eval ./test_pwck $opt_S -p $prog -P \"$pw\" $args > $TEMP
status=$?
if [ X$expect = "X-" ]; then # Expect "-" means don't care
spew $TEMP
msg `get_test` DONE
else
if [ $status -eq $expect ]; then
spew $TEMP
msg `get_test` OK
else
spew $TEMP
msg `get_test` FAILED
incr failures
fi
fi
rm -f $TEMP
}
failures=0
#
# Test suite for "pwck_lexical"
#
trap "rm -f $TEMP; exit 1" 1 2 3 15
set_test lexical
if [ -z "$1" -o "$1" = $TestMajor ]; then
echo ""
ofailures=$failures
msg `get_test` Start test group \"$TestMajor\" `date`
#
# Test for password too short
#
incr_minor
msg `get_test` Reject too short
test_password foo lexical 1 "minpassword 4"
#
# Test for password being all numbers
#
incr_minor
msg `get_test` Reject all numbers
test_password 012345689 lexical 1
#
# Test for password being all whitespace
#
incr_minor
msg `get_test` Reject all whitespace
# Too many levels of shell quoting in test_password - do not use
./test_pwck $opt_S -p lexical \
-P ' ' -D "minpassword 3" > $TEMP
if [ $? -eq 1 ]; then
spew $TEMP
msg `get_test` OK
else
spew $TEMP
msg `get_test` FAILED
incr failures
fi
#
# Test for password being all alphas
#
incr_minor
incr_micro
msg `get_test` Reject all alpha
test_password foobarabc lexical 1 "alphaonly no"
incr_micro
msg `get_test` Accept all alpha
test_password foobarabc lexical 0 "alphaonly yes"
#
# Test for password having repeated characters
#
incr_minor
msg `get_test` Reject repeated characters
test_password 'aaabbbccc' lexical 1 "maxrepeat 2"
#
# Test for password fitting some patterns
#
incr_minor
incr_micro
msg `get_test` Reject telephone number lookalikes
test_password '123-1234' lexical 1
incr_micro
msg `get_test` Reject US Social Security number lookalikes
test_password '123-45-1234' lexical 1
#
# Test for password fitting some patterns
#
incr_minor
msg `get_test` Reject single case
incr_micro
test_password 'absdcrs' lexical 1 "singlecase no; alphaonly yes"
incr_micro
test_password 'ABSDCRS' lexical 1 "singlecase no; alphaonly yes"
#
# Test for password being single case alpha
#
incr_minor
incr_micro
msg `get_test` Accept single case
test_password 'absdcrs' lexical 0 "singlecase yes; alphaonly yes"
incr_micro
test_password 'ABSDCRS' lexical 0 "singlecase yes; alphaonly yes"
#
# Test for password having non-printable characters
#
incr_minor
incr_micro
msg `get_test` Reject non-printable characters
test_password 'abcdfg' lexical 1 "printableonly yes"
incr_micro
msg `get_test` Allow non-printable characters
test_password 'abcdfg' lexical 0 "printableonly no"
incr_micro
msg `get_test` Reject forbidden non-printable characters
test_password 'abcdfg' lexical 1 "printableonly no"
#
# Test for password having a diversity of character classes
#
incr_minor
msg `get_test` Test character class checks
test_password 'a1b2c3d4e5' lexical 1 "charclasses 3"
echo ""
msg Test group \"$TestMajor\" done - `expr $failures - $ofailures` failures `date`
fi
set_test local
if [ -z "$1" -o "$1" = $TestMajor ]; then
#
# Test suite for the stock "pwck_local"
# Add tests here if pwck_local.c has been customized
#
permute()
{
echo $1 | tr '[io]' '[10]'
echo $1 | tr '[aeiou]' '[AEIOU]'
}
echo ""
msg `get_test` Start test group \"$TestMajor\" `date`
ofailures=$failures
host=`hostname 2>/dev/null`
[ -z "$host" ] && host=`uname -n`
shost=`echo $host | sed 's/\..*//'`
incr_minor
msg `get_test` Test for full hostname
test_password $host local 1
incr_minor
msg `get_test` Test for short hostname
test_password $shost local 1
incr_minor
msg `get_test` Test of permuted hostname - some of these will fail
for s in `permute $shost`; do
incr_micro
test_password $s local -
done
echo ""
msg Test group \"$TestMajor\" done - `expr $failures - $ofailures` failures `date`
fi
#
# Test suite for the pasword history mechanism
#
set_test history
if [ -z "$1" -o "$1" = $TestMajor ]; then
echo ""
msg `get_test` Start test group \"$TestMajor\" `date`
ofailures=$failures
sh TestHistory.sh file || incr failures
sh TestHistory.sh dbm || incr failures
msg Test group \"$TestMajor\" done - `expr $failures - $ofailures` failures `date`
fi
msg End password check tests - $failures failures `date`
exit $failures
#
# End %M%
E 1
| true
|
3f5718993575ab541a7f242f867e5bf697248163
|
Shell
|
peter50216/dotfiles
|
/bin/unarchive
|
UTF-8
| 3,563
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
set -o noclobber
temp_files=()
cleanup() {
for file in "${temp_files[@]}"; do
rm -rf "${file}"
done
}
info() {
printf "\e[1;33m%s\e[m\n" "$*"
}
die() {
printf "\e[1;31mERROR: %s\e[m\n" "$*" >&2
cleanup
exit 1
}
atomic_file() {
{ true > "$1" ; } &> /dev/null
}
atomic_directory() {
mkdir -- "$1" 2>/dev/null
}
replace() {
# mv -T is not supported on BSD :(
local temp_dir
temp_dir=$(mktemp -d tmp.unarchive.XXXXXXXXXX)
temp_files+=(${temp_dir})
mv -- "$1" "$temp_dir/$2"
mv -- "$temp_dir/$2" .
}
find_unique_name() {
local orig_name="$1"
local file_type="$2"
local name="${orig_name}"
local ext=""
if [[ ${file_type} == "file" ]]; then
ext=""
name="${name%.*}"
if [[ "${orig_name}" != "${name}" ]]; then
ext=".${orig_name##*.}"
fi
fi
if "atomic_${file_type}" "${name}${ext}"; then
echo "${name}${ext}"
return
fi
local num=2
while ! "atomic_${file_type}" "${name}_${num}${ext}"; do
((num++))
done
echo "${name}_${num}${ext}"
}
unarchive_zip() {
unzip -d "$2" -- "$1"
}
unarchive_tgz() {
if type pigz >/dev/null 2>&1; then
tar --use-compress-program pigz -xvf "$1" -C "$2"
else
tar -xvzf "$1" -C "$2"
fi
}
unarchive_txz() {
if type pixz >/dev/null 2>&1; then
tar --use-compress-program pixz -xvf "$1" -C "$2"
else
tar --xz -xvf "$1" -C "$2"
fi
}
unarchive_tbz2() {
tar -xvjf "$1" -C "$2"
}
unarchive_7z() {
7za x -o"$2" -- "$1"
}
unarchive_tar() {
tar -xvf "$1" -C "$2"
}
unarchive() {
local archive_orig_path="$1"
if [[ ! -f "${archive_orig_path}" ]]; then
die "${archive_orig_path}: archive doesn't exist."
fi
local archive
archive="$(basename "${archive_orig_path}")"
local archive_type
local archive_basename="${archive%.*}"
case "${archive}" in
*.zip)
archive_type=zip
;;
*.tar)
archive_type=tar
;;
*.tar.gz)
archive_type=tgz
archive_basename="${archive%.tar.gz}"
;;
*.tgz)
archive_type=tgz
;;
*.tar.xz)
archive_type=txz
archive_basename="${archive%.tar.xz}"
;;
*.txz)
archive_type=txz
;;
*.tar.bz2)
archive_type=tbz2
archive_basename="${archive%.tar.bz2}"
;;
*.tbz|*.tbz2)
archive_type=tbz2
;;
*.7z)
archive_type=7z
;;
*)
die "Unknown archive: ${archive_orig_path}"
esac
local temp_dir
temp_dir=$(mktemp -d tmp.unarchive.XXXXXXXXXX)
temp_files+=(${temp_dir})
"unarchive_${archive_type}" "${archive_orig_path}" "${temp_dir}"
local sub_files=()
while IFS= read -r -d '' f; do
sub_files+=("$f")
done < <(find "${temp_dir}" -maxdepth 1 -mindepth 1 -print0)
if (( "${#sub_files[@]}" == 0 )); then
info "${archive_orig_path}: No file in archive!"
return
fi
local target
if (( "${#sub_files[@]}" == 1 )); then
local file="${sub_files[0]}"
local file_type
file_type=$([[ -f "${file}" ]] && echo "file" || echo "directory")
target="$(find_unique_name "$(basename "${file}")" "${file_type}")"
replace "${file}" "${target}"
info "${archive_orig_path}: Single ${file_type}, moved to ${target}"
else
target="$(find_unique_name "${archive_basename}" "directory")"
replace "${temp_dir}" "${target}"
info "${archive_orig_path}: Extracted to ${target}"
fi
}
main() {
trap cleanup EXIT
if (( $# == 0 )); then
echo "Usage: $0 [archive files]"
exit 1
fi
while (( $# > 0 )); do
unarchive "$1"
shift
done
}
main "$@"
| true
|
08a3bdeb17f804d5a9a9bca8840ed4cbc3cc5851
|
Shell
|
naivekun/os6360_trtest
|
/cpss-ac3/cpss/cpssEnabler/mainOs/src/gtOs/linux/kernelExt/driver/inst_mod.sh
|
UTF-8
| 1,113
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# parameters:
home_rootfs=$1
node_major=$2
node_minor=$3
init_sh=$home_rootfs/etc/init.sh
test -d $home_rootfs || exit 0
# copy module
unset MAKEFLAGS
KSUBD=$(make -s -C $DIST_HOME kernelversion)
grep -q 'modules.*uname -r' $init_sh || KSUBD=.
test -d $home_rootfs/lib/modules/$KSUBD || mkdir -p $home_rootfs/lib/modules/$KSUBD
cp mvKernelExt.ko $home_rootfs/lib/modules/$KSUBD
# fix init.sh
if [ "$KSUBD" = "." -a -f "$init_sh" ]; then
grep -q -e mvKernelExt -e module_f $init_sh ||
sed -i \
-e '/date/ i\' \
-e ' test -f /lib/modules/mvKernelExt.ko &&\' \
-e ' insmod /lib/modules/mvKernelExt.ko\' \
-e '' \
$init_sh
fi
# create node if it not exists yet
if [ -f $home_rootfs/../rootfs_deviceMap ]; then
dm=$home_rootfs/../rootfs_deviceMap
grep -q '^\/dev\/mvKernelExt' $dm >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "/dev/mvKernelExt c 640 0 0 $node_major $node_minor 0 0 -" >>$dm
fi
fi
if [ -e $home_rootfs/dev/null ]; then
cd $home_rootfs/dev
if [ \! -c mvKernelExt ]
then
sudo mknod mvKernelExt c $node_major $node_minor
fi
fi
| true
|
39c1bc126078a50f86f393ecd86576449aae7ba4
|
Shell
|
mercadopago/cart-magento2
|
/bin/run-sync-files.sh
|
UTF-8
| 317
| 2.953125
| 3
|
[] |
no_license
|
#!bin/bash
echo "\n"
echo 🐘🔍 '\033[01;33m RUNNING SYNC FILES TO MAGENTO 2 CONTAINER \033[0m'
echo "\n"
docker cp src/. magento-php:/var/www/html/magento2/app/code
if [ $? -eq 0 ]; then
echo ✅ "\033[01;32m SYNC EXECUTED SUCCESSFULLY \n \033[0m"
else
echo 🚫 "\033[01;31m SYNC FAILED \n \033[0m"
fi
| true
|
3f2d4b7941b052799aa4d8238e2767bfe0ccc279
|
Shell
|
apache/impala
|
/testdata/bin/create-load-data.sh
|
UTF-8
| 27,724
| 3.375
| 3
|
[
"Apache-2.0",
"OpenSSL",
"bzip2-1.0.6",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-google-patent-license-webrtc",
"PSF-2.0",
"BSD-3-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-mit-modification-obligations",
"Minpack",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script can be executed in two ways:
# 1) Without any command line parameters - A normal data load will happen where data is
# generated as needed, generally by issuing 'INSERT INTO <table> SELECT *' commands.
# 2) With a command line parameter pointing to a test-warehouse snapshot file - In this
# case the snapshot file contents will be copied into HDFS prior to calling the data load
# scripts. This speeds up overall data loading time because it usually means only the
# table metadata needs to be created.
#
# For more information look at testdata/bin/load-test-warehouse-snapshot.sh and
# bin/load-data.py
set -euo pipefail
. $IMPALA_HOME/bin/report_build_error.sh
setup_report_build_error
. ${IMPALA_HOME}/bin/impala-config.sh > /dev/null 2>&1
. ${IMPALA_HOME}/testdata/bin/run-step.sh
# Environment variables used to direct the data loading process to an external cluster.
# TODO: We need a better way of managing how these get set. See IMPALA-4346
: ${HS2_HOST_PORT=localhost:11050}
: ${HDFS_NN=${INTERNAL_LISTEN_HOST}:20500}
: ${IMPALAD=localhost}
: ${REMOTE_LOAD=}
: ${CM_HOST=}
: ${IMPALA_SERIAL_DATALOAD=}
# We don't expect dataload to take more than 2.5 hours.
: ${TIMEOUT_FOR_CREATE_LOAD_DATA_MINS:= 150}
SKIP_METADATA_LOAD=0
SKIP_SNAPSHOT_LOAD=0
SKIP_RANGER=0
SNAPSHOT_FILE=""
LOAD_DATA_ARGS=""
EXPLORATION_STRATEGY="exhaustive"
export JDBC_URL="jdbc:hive2://${HS2_HOST_PORT}/default;"
HIVE_CMD="beeline -n $USER -u $JDBC_URL"
# For logging when using run-step.
LOG_DIR=${IMPALA_DATA_LOADING_LOGS_DIR}
echo "Executing: create-load-data.sh $@"
while [ -n "$*" ]
do
case $1 in
-exploration_strategy)
EXPLORATION_STRATEGY=${2-}
if [[ -z "$EXPLORATION_STRATEGY" ]]; then
echo "Must provide an exploration strategy from e.g. core, exhaustive"
exit 1;
fi
shift;
;;
-skip_metadata_load)
SKIP_METADATA_LOAD=1
;;
-skip_snapshot_load)
SKIP_SNAPSHOT_LOAD=1
;;
-snapshot_file)
SNAPSHOT_FILE=${2-}
if [ ! -f $SNAPSHOT_FILE ]; then
echo "-snapshot_file does not exist: $SNAPSHOT_FILE"
exit 1;
fi
shift;
;;
-cm_host)
CM_HOST=${2-}
shift;
;;
-skip_ranger)
SKIP_RANGER=1
;;
-timeout)
TIMEOUT_FOR_CREATE_LOAD_DATA_MINS=${2-}
shift;
;;
-help|-h|*)
echo "create-load-data.sh : Creates data and loads from scratch"
echo "[-skip_metadata_load] : Skips loading of metadata"
echo "[-skip_snapshot_load] : Assumes that the snapshot is already loaded"
echo "[-snapshot_file] : Loads the test warehouse snapshot into hdfs"
echo "[-cm_host] : Address of the Cloudera Manager host if loading to a remote cluster"
echo "[-skip_ranger] : Skip the set-up for Ranger."
echo "[-timeout] : The timeout in minutes for loading data."
exit 1;
;;
esac
shift;
done
if [[ -n $REMOTE_LOAD ]]; then
SKIP_RANGER=1
fi
"${IMPALA_HOME}/bin/script-timeout-check.sh" -timeout $TIMEOUT_FOR_CREATE_LOAD_DATA_MINS \
-script_name "$(basename $0)" &
TIMEOUT_PID=$!
SCHEMA_MISMATCH_ERROR="A schema change has been detected in the metadata, "
SCHEMA_MISMATCH_ERROR+="but it cannot be loaded on Isilon, s3, gcs, cos, oss, obs or "
SCHEMA_MISMATCH_ERROR+="local filesystem, and the filesystem is ${TARGET_FILESYSTEM}".
if [[ $SKIP_METADATA_LOAD -eq 0 && "$SNAPSHOT_FILE" = "" ]]; then
run-step "Generating HBase data" create-hbase.log \
${IMPALA_HOME}/testdata/bin/create-hbase.sh
run-step "Creating /test-warehouse HDFS directory" create-test-warehouse-dir.log \
hadoop fs -mkdir -p /test-warehouse
elif [ $SKIP_SNAPSHOT_LOAD -eq 0 ]; then
run-step "Loading HDFS data from snapshot: $SNAPSHOT_FILE" \
load-test-warehouse-snapshot.log \
${IMPALA_HOME}/testdata/bin/load-test-warehouse-snapshot.sh "$SNAPSHOT_FILE"
# Don't skip the metadata load if a schema change is detected.
if ! ${IMPALA_HOME}/testdata/bin/check-schema-diff.sh; then
if [[ "${TARGET_FILESYSTEM}" == "isilon" || "${TARGET_FILESYSTEM}" == "s3" || \
"${TARGET_FILESYSTEM}" == "local" || "${TARGET_FILESYSTEM}" == "gs" || \
"${TARGET_FILESYSTEM}" == "cosn" || "${TARGET_FILESYSTEM}" == "oss" || \
"${TARGET_FILESYSTEM}" == "obs" || "${TARGET_FILESYSTEM}" == "ozone" ]] ; then
echo "ERROR in $0 at line $LINENO: A schema change has been detected in the"
echo "metadata, but it cannot be loaded on isilon, s3, gcs, cos, oss, obs, ozone,"
echo "or local and the target file system is ${TARGET_FILESYSTEM}. Exiting."
# Generate an explicit JUnitXML symptom report here for easier triaging
${IMPALA_HOME}/bin/generate_junitxml.py --phase=dataload \
--step=check-schema-diff.sh --error "${SCHEMA_MISMATCH_ERROR}"
exit 1
fi
echo "Schema change detected, metadata will be loaded."
SKIP_METADATA_LOAD=0
fi
else
# hdfs data already exists, don't load it.
echo Skipping loading data to hdfs.
fi
echo "Derived params for create-load-data.sh:"
echo "EXPLORATION_STRATEGY=${EXPLORATION_STRATEGY:-}"
echo "SKIP_METADATA_LOAD=${SKIP_METADATA_LOAD:-}"
echo "SKIP_SNAPSHOT_LOAD=${SKIP_SNAPSHOT_LOAD:-}"
echo "SNAPSHOT_FILE=${SNAPSHOT_FILE:-}"
echo "CM_HOST=${CM_HOST:-}"
echo "REMOTE_LOAD=${REMOTE_LOAD:-}"
function start-impala {
: ${START_CLUSTER_ARGS=""}
# Use a fast statestore update so that DDL operations run faster.
START_CLUSTER_ARGS_INT="--state_store_args=--statestore_update_frequency_ms=50"
if [[ "${TARGET_FILESYSTEM}" == "local" ]]; then
START_CLUSTER_ARGS_INT+=("--impalad_args=--abort_on_config_error=false -s 1")
else
START_CLUSTER_ARGS_INT+=("-s 3")
fi
START_CLUSTER_ARGS_INT+=("${START_CLUSTER_ARGS}")
${IMPALA_HOME}/bin/start-impala-cluster.py --log_dir=${IMPALA_DATA_LOADING_LOGS_DIR} \
${START_CLUSTER_ARGS_INT[@]}
}
function restart-cluster {
# Break out each individual step for clarity
echo "Shutting down Impala"
${IMPALA_HOME}/bin/start-impala-cluster.py --kill
echo "Shutting down the minicluster"
${IMPALA_HOME}/testdata/bin/kill-all.sh
echo "Starting the minicluster"
${IMPALA_HOME}/testdata/bin/run-all.sh
echo "Starting Impala"
start-impala
}
function load-custom-schemas {
# HDFS commandline calls are slow, so consolidate the manipulation into
# as few calls as possible by populating a temporary directory with the
# appropriate structure and copying it in a single call.
TMP_DIR=$(mktemp -d)
# Cleanup old schemas dir
hadoop fs -rm -r -f /test-warehouse/schemas
SCHEMA_SRC_DIR=${IMPALA_HOME}/testdata/data/schemas
SCHEMA_TMP_DIR="${TMP_DIR}/schemas"
mkdir ${SCHEMA_TMP_DIR}
mkdir ${SCHEMA_TMP_DIR}/enum
ln -s ${SCHEMA_SRC_DIR}/zipcode_incomes.parquet ${SCHEMA_TMP_DIR}
ln -s ${SCHEMA_SRC_DIR}/alltypestiny.parquet ${SCHEMA_TMP_DIR}
ln -s ${SCHEMA_SRC_DIR}/enum/* ${SCHEMA_TMP_DIR}/enum
ln -s ${SCHEMA_SRC_DIR}/malformed_decimal_tiny.parquet ${SCHEMA_TMP_DIR}
ln -s ${SCHEMA_SRC_DIR}/decimal.parquet ${SCHEMA_TMP_DIR}
ln -s ${SCHEMA_SRC_DIR}/nested/modern_nested.parquet ${SCHEMA_TMP_DIR}
ln -s ${SCHEMA_SRC_DIR}/nested/legacy_nested.parquet ${SCHEMA_TMP_DIR}
# CHAR and VARCHAR tables written by Hive
mkdir -p ${TMP_DIR}/chars_formats_avro_snap \
${TMP_DIR}/chars_formats_parquet \
${TMP_DIR}/chars_formats_text \
${TMP_DIR}/chars_formats_orc_def
ln -s ${IMPALA_HOME}/testdata/data/chars-formats.avro ${TMP_DIR}/chars_formats_avro_snap
ln -s ${IMPALA_HOME}/testdata/data/chars-formats.parquet ${TMP_DIR}/chars_formats_parquet
ln -s ${IMPALA_HOME}/testdata/data/chars-formats.orc ${TMP_DIR}/chars_formats_orc_def
ln -s ${IMPALA_HOME}/testdata/data/chars-formats.txt ${TMP_DIR}/chars_formats_text
# File used by CreateTableLikeOrc tests
ln -s ${IMPALA_HOME}/testdata/data/alltypes_non_acid.orc ${SCHEMA_TMP_DIR}
hadoop fs -put -f ${TMP_DIR}/* /test-warehouse
rm -r ${TMP_DIR}
}
function load-data {
WORKLOAD=${1}
EXPLORATION_STRATEGY=${2:-"core"}
TABLE_FORMATS=${3:-}
FORCE_LOAD=${4:-}
MSG="Loading workload '$WORKLOAD'"
ARGS=("--workloads $WORKLOAD")
MSG+=" using exploration strategy '$EXPLORATION_STRATEGY'"
ARGS+=("-e $EXPLORATION_STRATEGY")
if [ $TABLE_FORMATS ]; then
MSG+=" in table formats '$TABLE_FORMATS'"
ARGS+=("--table_formats $TABLE_FORMATS")
fi
if [ $LOAD_DATA_ARGS ]; then
ARGS+=("$LOAD_DATA_ARGS")
fi
# functional-query is unique. The dataset name is not the same as the workload name.
if [ "${WORKLOAD}" = "functional-query" ]; then
WORKLOAD="functional"
fi
# TODO: Why is there a REMOTE_LOAD condition? See IMPALA-4347
#
# Force load the dataset if we detect a schema change.
if [[ -z "$REMOTE_LOAD" ]]; then
if ! ${IMPALA_HOME}/testdata/bin/check-schema-diff.sh $WORKLOAD; then
ARGS+=("--force")
echo "Force loading $WORKLOAD because a schema change was detected"
elif [ "${FORCE_LOAD}" = "force" ]; then
ARGS+=("--force")
echo "Force loading."
fi
fi
ARGS+=("--impalad ${IMPALAD}")
ARGS+=("--hive_hs2_hostport ${HS2_HOST_PORT}")
ARGS+=("--hdfs_namenode ${HDFS_NN}")
# Disable parallelism for dataload if IMPALA_SERIAL_DATALOAD is set
if [[ "${IMPALA_SERIAL_DATALOAD}" -eq 1 ]]; then
ARGS+=("--num_processes 1")
fi
if [[ -n ${TABLE_FORMATS} ]]; then
# TBL_FMT_STR replaces slashes with underscores,
# e.g., kudu/none/none -> kudu_none_none
TBL_FMT_STR=${TABLE_FORMATS//[\/]/_}
LOG_BASENAME=data-load-${WORKLOAD}-${EXPLORATION_STRATEGY}-${TBL_FMT_STR}.log
else
LOG_BASENAME=data-load-${WORKLOAD}-${EXPLORATION_STRATEGY}.log
fi
LOG_FILE=${IMPALA_DATA_LOADING_LOGS_DIR}/${LOG_BASENAME}
echo "$MSG. Logging to ${LOG_FILE}"
# Use unbuffered logging by executing with -u
if ! impala-python -u ${IMPALA_HOME}/bin/load-data.py ${ARGS[@]} &> ${LOG_FILE}; then
echo Error loading data. The end of the log file is:
tail -n 50 $LOG_FILE
return 1
fi
}
function cache-test-tables {
echo CACHING tpch.nation AND functional.alltypestiny
# uncaching the tables first makes this operation idempotent.
${IMPALA_HOME}/bin/impala-shell.sh -i ${IMPALAD}\
-q "alter table functional.alltypestiny set uncached"
${IMPALA_HOME}/bin/impala-shell.sh -i ${IMPALAD}\
-q "alter table tpch.nation set uncached"
${IMPALA_HOME}/bin/impala-shell.sh -i ${IMPALAD}\
-q "alter table tpch.nation set cached in 'testPool'"
${IMPALA_HOME}/bin/impala-shell.sh -i ${IMPALAD} -q\
"alter table functional.alltypestiny set cached in 'testPool'"
}
function load-aux-workloads {
LOG_FILE=${IMPALA_DATA_LOADING_LOGS_DIR}/data-load-auxiliary-workloads-core.log
rm -f $LOG_FILE
# Load all the auxiliary workloads (if any exist)
if [ -d ${IMPALA_AUX_WORKLOAD_DIR} ] && [ -d ${IMPALA_AUX_DATASET_DIR} ]; then
echo Loading auxiliary workloads. Logging to $LOG_FILE.
if ! impala-python -u ${IMPALA_HOME}/bin/load-data.py --workloads all\
--impalad=${IMPALAD}\
--hive_hs2_hostport=${HS2_HOST_PORT}\
--hdfs_namenode=${HDFS_NN}\
--workload_dir=${IMPALA_AUX_WORKLOAD_DIR}\
--dataset_dir=${IMPALA_AUX_DATASET_DIR}\
--exploration_strategy=core ${LOAD_DATA_ARGS} >> $LOG_FILE 2>&1; then
echo Error loading aux workloads. The end of the log file is:
tail -n 20 $LOG_FILE
return 1
fi
else
echo "Skipping load of auxilary workloads because directories do not exist"
fi
}
function copy-and-load-dependent-tables {
# COPY
# TODO: The multi-format table will move these files. So we need to copy them to a
# temporary location for that table to use. Should find a better way to handle this.
echo COPYING AND LOADING DATA FOR DEPENDENT TABLES
hadoop fs -rm -r -f /test-warehouse/alltypesmixedformat \
/tmp/alltypes_rc /tmp/alltypes_seq /tmp/alltypes_parquet
hadoop fs -mkdir -p /tmp/alltypes_seq/year=2009 \
/tmp/alltypes_rc/year=2009 /tmp/alltypes_parquet/year=2009
# The file written by hive to /test-warehouse will be strangely replicated rather than
# erasure coded if EC is not set in /tmp
if [[ -n "${HDFS_ERASURECODE_POLICY:-}" ]]; then
hdfs ec -setPolicy -policy "${HDFS_ERASURECODE_POLICY}" -path "/tmp/alltypes_rc"
hdfs ec -setPolicy -policy "${HDFS_ERASURECODE_POLICY}" -path "/tmp/alltypes_seq"
hdfs ec -setPolicy -policy "${HDFS_ERASURECODE_POLICY}" -path "/tmp/alltypes_parquet"
fi
hadoop fs -cp /test-warehouse/alltypes_seq/year=2009/month=2/ /tmp/alltypes_seq/year=2009
hadoop fs -cp /test-warehouse/alltypes_rc/year=2009/month=3/ /tmp/alltypes_rc/year=2009
hadoop fs -cp /test-warehouse/alltypes_parquet/year=2009/month=4/ /tmp/alltypes_parquet/year=2009
# Create a hidden file in AllTypesSmall
hadoop fs -cp -f /test-warehouse/zipcode_incomes/DEC_00_SF3_P077_with_ann_noheader.csv \
/test-warehouse/alltypessmall/year=2009/month=1/_hidden
hadoop fs -cp -f /test-warehouse/zipcode_incomes/DEC_00_SF3_P077_with_ann_noheader.csv \
/test-warehouse/alltypessmall/year=2009/month=1/.hidden
# In case the data is updated by a non-super user, make sure the user can write
# by chmoding 777 /tmp/alltypes_rc and /tmp/alltypes_seq. This is needed in order
# to prevent this error during data load to a remote cluster:
#
# ERROR : Failed with exception Unable to move source hdfs://cluster-1.foo.cloudera.com:
# 8020/tmp/alltypes_seq/year=2009/month=2/000023_0 to destination hdfs://cluster-1.foo.
# cloudera.com:8020/test-warehouse/alltypesmixedformat/year=2009/month=2/000023_0
# [...]
# Caused by: org.apache.hadoop.security.AccessControlException:
# Permission denied: user=impala, access=WRITE
# inode="/tmp/alltypes_seq/year=2009/month=2":hdfs:supergroup:drwxr-xr-x
#
# The error occurs while loading dependent tables.
#
# See: logs/data_loading/copy-and-load-dependent-tables.log)
# See also: IMPALA-4345
hadoop fs -chmod -R 777 /tmp/alltypes_rc /tmp/alltypes_seq /tmp/alltypes_parquet
# For tables that rely on loading data from local fs test-wareload-house
# TODO: Find a good way to integrate this with the normal data loading scripts
beeline -n $USER -u "${JDBC_URL}" -f\
${IMPALA_HOME}/testdata/bin/load-dependent-tables.sql
}
function create-internal-hbase-table {
# TODO: For some reason DROP TABLE IF EXISTS sometimes fails on HBase if the table does
# not exist. To work around this, disable exit on error before executing this command.
# Need to investigate this more, but this works around the problem to unblock automation.
set +o errexit
beeline -n $USER -u "${JDBC_URL}" -e\
"DROP TABLE IF EXISTS functional_hbase.internal_hbase_table;"
echo "disable 'functional_hbase.internal_hbase_table'" | hbase shell
echo "drop 'functional_hbase.internal_hbase_table'" | hbase shell
set -e
# Used by CatalogTest to confirm that non-external HBase tables are identified
# correctly (IMP-581)
# Note that the usual 'hbase.table.name' property is not specified to avoid
# creating tables in HBase as a side-effect.
cat > /tmp/create-hbase-internal.sql << EOF
CREATE TABLE functional_hbase.internal_hbase_table(key int, value string)
STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler'
WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,cf1:val");
EOF
beeline -n $USER -u "${JDBC_URL}" -f /tmp/create-hbase-internal.sql
rm -f /tmp/create-hbase-internal.sql
}
function load-custom-data {
# Add a sequence file that only contains a header (see IMPALA-362)
hadoop fs -put -f ${IMPALA_HOME}/testdata/tinytable_seq_snap/tinytable_seq_snap_header_only \
/test-warehouse/tinytable_seq_snap
# IMPALA-1619: payload compressed with snappy used for constructing large snappy block
# compressed file
hadoop fs -put -f ${IMPALA_HOME}/testdata/compressed_formats/compressed_payload.snap \
/test-warehouse/compressed_payload.snap
# Create Avro tables
beeline -n $USER -u "${JDBC_URL}" -f\
${IMPALA_HOME}/testdata/avro_schema_resolution/create_table.sql
# Delete potentially existing avro data
hadoop fs -rm -f /test-warehouse/avro_schema_resolution_test/*.avro
# Upload Avro data to the 'schema_resolution_test' table
hadoop fs -put ${IMPALA_HOME}/testdata/avro_schema_resolution/records*.avro \
/test-warehouse/avro_schema_resolution_test
}
function build-and-copy-hive-udfs {
# Build the test Hive UDFs
pushd ${IMPALA_HOME}/java/test-hive-udfs
${IMPALA_HOME}/bin/mvn-quiet.sh clean
${IMPALA_HOME}/bin/mvn-quiet.sh package
popd
# Copy the test UDF/UDA libraries into HDFS
${IMPALA_HOME}/testdata/bin/copy-udfs-udas.sh -build
}
# Additional data loading actions that must be executed after the main data is loaded.
function custom-post-load-steps {
# TODO: Why is there a REMOTE_LOAD condition? See IMPALA-4347
if [[ -z "$REMOTE_LOAD" ]]; then
# Configure alltypes_seq as a read-only table. This is required for fe tests.
# Set both read and execute permissions because accessing the contents of a directory on
# the local filesystem requires the x permission (while on HDFS it requires the r
# permission).
hadoop fs -chmod -R 555 \
${FILESYSTEM_PREFIX}/test-warehouse/alltypes_seq/year=2009/month=1 \
${FILESYSTEM_PREFIX}/test-warehouse/alltypes_seq/year=2009/month=3
fi
hadoop fs -mkdir -p ${FILESYSTEM_PREFIX}/test-warehouse/lineitem_multiblock_parquet \
${FILESYSTEM_PREFIX}/test-warehouse/lineitem_sixblocks_parquet \
${FILESYSTEM_PREFIX}/test-warehouse/lineitem_multiblock_one_row_group_parquet \
${FILESYSTEM_PREFIX}/test-warehouse/lineitem_multiblock_variable_num_rows_parquet
#IMPALA-1881: data file produced by hive with multiple blocks.
hadoop fs -Ddfs.block.size=1048576 -put -f \
${IMPALA_HOME}/testdata/LineItemMultiBlock/000000_0 \
${FILESYSTEM_PREFIX}/test-warehouse/lineitem_multiblock_parquet
# IMPALA-2466: Add more tests to the HDFS Parquet scanner (Added after IMPALA-1881)
hadoop fs -Ddfs.block.size=1048576 -put -f \
${IMPALA_HOME}/testdata/LineItemMultiBlock/lineitem_sixblocks.parquet \
${FILESYSTEM_PREFIX}/test-warehouse/lineitem_sixblocks_parquet
# IMPALA-2466: Add more tests to the HDFS Parquet scanner (this has only one row group)
hadoop fs -Ddfs.block.size=1048576 -put -f \
${IMPALA_HOME}/testdata/LineItemMultiBlock/lineitem_one_row_group.parquet \
${FILESYSTEM_PREFIX}/test-warehouse/lineitem_multiblock_one_row_group_parquet
# IMPALA-11350: Add tests for row groups with variable num rows.
hadoop fs -Ddfs.block.size=1048576 -put -f \
${IMPALA_HOME}/testdata/LineItemMultiBlock/lineitem_multiblock_variable_num_rows.parquet \
${FILESYSTEM_PREFIX}/test-warehouse/lineitem_multiblock_variable_num_rows_parquet
# IMPALA-3307: Upload test time-zone database
hadoop fs -Ddfs.block.size=1048576 -put -f ${IMPALA_HOME}/testdata/tzdb \
${FILESYSTEM_PREFIX}/test-warehouse/
}
function copy-and-load-ext-data-source {
# Copy the test data source library into HDFS
${IMPALA_HOME}/testdata/bin/copy-data-sources.sh
# Create data sources table.
${IMPALA_HOME}/bin/impala-shell.sh -i ${IMPALAD} -f\
${IMPALA_HOME}/testdata/bin/create-data-source-table.sql
}
function check-hdfs-health {
if [[ -n "${HDFS_ERASURECODE_POLICY:-}" ]]; then
if ! grep "Replicated Blocks:[[:space:]]*#[[:space:]]*Total size:[[:space:]]*0 B"\
<<< $(hdfs fsck /test-warehouse | tr '\n' '#'); then
echo "There are some replicated files despite that erasure coding is on"
echo "Failing the data loading job"
exit 1
fi
return
fi
MAX_FSCK=30
SLEEP_SEC=120
LAST_NUMBER_UNDER_REPLICATED=-1
for ((FSCK_COUNT = 0; FSCK_COUNT <= MAX_FSCK; FSCK_COUNT++)); do
FSCK_OUTPUT="$(hdfs fsck /test-warehouse)"
echo "$FSCK_OUTPUT"
NUMBER_UNDER_REPLICATED=$(
grep -oP "Under-replicated blocks:[[:space:]]*\K[[:digit:]]*" <<< "$FSCK_OUTPUT")
if [[ "$NUMBER_UNDER_REPLICATED" -eq 0 ]] ; then
# All the blocks are fully-replicated. The data loading can continue.
return
fi
if [[ $(($FSCK_COUNT + 1)) -eq "$MAX_FSCK" ]] ; then
echo "Some HDFS blocks are still under-replicated after running HDFS fsck"\
"$MAX_FSCK times."
echo "Some tests cannot pass without fully-replicated blocks (IMPALA-3887)."
echo "Failing the data loading."
exit 1
fi
if [[ "$NUMBER_UNDER_REPLICATED" -eq "$LAST_NUMBER_UNDER_REPLICATED" ]] ; then
echo "There are under-replicated blocks in HDFS and HDFS is not making progress"\
"in $SLEEP_SEC seconds. Attempting to restart HDFS to resolve this issue."
# IMPALA-7119: Other minicluster components (like HBase) can fail if HDFS is
# restarted by itself, so restart the whole cluster, including Impala.
restart-cluster
fi
LAST_NUMBER_UNDER_REPLICATED="$NUMBER_UNDER_REPLICATED"
echo "$NUMBER_UNDER_REPLICATED under replicated blocks remaining."
echo "Sleeping for $SLEEP_SEC seconds before rechecking."
sleep "$SLEEP_SEC"
done
}
function warm-up-hive {
echo "Running warm up Hive statements"
$HIVE_CMD -e "create database if not exists functional;"
$HIVE_CMD -e "create table if not exists hive_warm_up_tbl (i int);"
# The insert below starts a Tez session (if Hive uses Tez) and initializes
# .hiveJars directory in HDFS, see IMPALA-8841.
$HIVE_CMD -e "insert overwrite table hive_warm_up_tbl values (1);"
}
# For kerberized clusters, use kerberos
if ${CLUSTER_DIR}/admin is_kerberized; then
LOAD_DATA_ARGS="${LOAD_DATA_ARGS} --use_kerberos --principal=${MINIKDC_PRINC_HIVE}"
fi
# Start Impala
if [[ -z "$REMOTE_LOAD" ]]; then
run-step "Starting Impala cluster" start-impala-cluster.log start-impala
fi
# The hdfs environment script sets up kms (encryption) and cache pools (hdfs caching).
# On a non-hdfs filesystem, we don't test encryption or hdfs caching, so this setup is not
# needed.
if [[ "${TARGET_FILESYSTEM}" == "hdfs" ]]; then
run-step "Setting up HDFS environment" setup-hdfs-env.log \
${IMPALA_HOME}/testdata/bin/setup-hdfs-env.sh
fi
if [ $SKIP_METADATA_LOAD -eq 0 ]; then
# Using Hive in non-parallel mode before starting parallel execution may help with some
# flakiness during data load, see IMPALA-8841. The problem only occurs in Hive 3
# environment, but always doing the warm up shouldn't hurt much and may make it easier
# to investigate future issues where Hive doesn't work at all.
warm-up-hive
run-step "Loading custom schemas" load-custom-schemas.log load-custom-schemas
# Run some steps in parallel, with run-step-backgroundable / run-step-wait-all.
# This is effective on steps that take a long time and don't depend on each
# other. Functional-query takes about ~35 minutes, and TPC-H and TPC-DS can
# finish while functional-query is running.
run-step-backgroundable "Loading functional-query data" load-functional-query.log \
load-data "functional-query" "exhaustive"
run-step-backgroundable "Loading TPC-H data" load-tpch.log load-data "tpch" "core"
run-step-backgroundable "Loading TPC-DS data" load-tpcds.log load-data "tpcds" "core"
run-step-wait-all
# Load tpch nested data.
# TODO: Hacky and introduces more complexity into the system, but it is expedient.
if [[ -n "$CM_HOST" ]]; then
LOAD_NESTED_ARGS="--cm-host $CM_HOST"
fi
run-step "Loading nested parquet data" load-nested.log \
${IMPALA_HOME}/testdata/bin/load_nested.py \
-t tpch_nested_parquet -f parquet/none ${LOAD_NESTED_ARGS:-}
run-step "Loading nested orc data" load-nested.log \
${IMPALA_HOME}/testdata/bin/load_nested.py \
-t tpch_nested_orc_def -f orc/def ${LOAD_NESTED_ARGS:-}
run-step "Loading auxiliary workloads" load-aux-workloads.log load-aux-workloads
run-step "Loading dependent tables" copy-and-load-dependent-tables.log \
copy-and-load-dependent-tables
run-step "Loading custom data" load-custom-data.log load-custom-data
run-step "Creating many block table" create-table-many-blocks.log \
${IMPALA_HOME}/testdata/bin/create-table-many-blocks.sh -p 1234 -b 1
elif [ "${TARGET_FILESYSTEM}" = "hdfs" ]; then
echo "Skipped loading the metadata."
run-step "Loading HBase data only" load-hbase-only.log \
load-data "functional-query" "core" "hbase/none"
fi
if [[ $SKIP_METADATA_LOAD -eq 1 ]]; then
# Tests depend on the kudu data being clean, so load the data from scratch.
# This is only necessary if this is not a full dataload, because a full dataload
# already loads Kudu functional and TPC-H tables from scratch.
run-step-backgroundable "Loading Kudu functional" load-kudu.log \
load-data "functional-query" "core" "kudu/none/none" force
run-step-backgroundable "Loading Kudu TPCH" load-kudu-tpch.log \
load-data "tpch" "core" "kudu/none/none" force
fi
run-step-backgroundable "Loading Hive UDFs" build-and-copy-hive-udfs.log \
build-and-copy-hive-udfs
run-step-wait-all
run-step "Running custom post-load steps" custom-post-load-steps.log \
custom-post-load-steps
if [ "${TARGET_FILESYSTEM}" = "hdfs" ]; then
# Caching tables in s3 returns an IllegalArgumentException, see IMPALA-1714
run-step "Caching test tables" cache-test-tables.log cache-test-tables
# TODO: Modify the .sql file that creates the table to take an alternative location into
# account.
run-step "Loading external data sources" load-ext-data-source.log \
copy-and-load-ext-data-source
run-step "Creating internal HBase table" create-internal-hbase-table.log \
create-internal-hbase-table
run-step "Checking HDFS health" check-hdfs-health.log check-hdfs-health
# Saving the list of created files can help in debugging missing files.
run-step "Logging created files" created-files.log hdfs dfs -ls -R /test-warehouse
fi
# TODO: Investigate why all stats are not preserved. Theoretically, we only need to
# recompute stats for HBase.
run-step "Computing table stats" compute-table-stats.log \
${IMPALA_HOME}/testdata/bin/compute-table-stats.sh
# IMPALA-8346: this step only applies if the cluster is the local minicluster
if [[ -z "$REMOTE_LOAD" ]]; then
run-step "Creating tpcds testcase data" create-tpcds-testcase-data.log \
${IMPALA_HOME}/testdata/bin/create-tpcds-testcase-files.sh
fi
if [[ $SKIP_RANGER -eq 0 ]]; then
run-step "Setting up Ranger" setup-ranger.log \
${IMPALA_HOME}/testdata/bin/setup-ranger.sh
fi
# Restart the minicluster. This is strictly to provide a sanity check that
# restarting the minicluster works and doesn't impact the tests. This is a common
# operation for developers, so it is nice to test it.
restart-cluster
# Kill the spawned timeout process and its child sleep process.
# There may not be a sleep process, so ignore failure.
pkill -P $TIMEOUT_PID || true
kill $TIMEOUT_PID
| true
|
4aae48fac58cda318ac3d8c084c91334c6029aa8
|
Shell
|
praveenn7/KKN1
|
/p4.sh
|
UTF-8
| 420
| 3.3125
| 3
|
[] |
no_license
|
<<Q
write a shell script
step 1: declare and initialize file system details
(filesystemType; filesystemMount ; filePartition)
(ex: xfs /D1 /dev/sda1 )
step 2: using echo command - display filesystem details
Q
fstype="xfs"
fmount="/D1"
fpart="/dev/xvdb1"
echo "--------------------------------
File system type is:$fstype
File system mount point:$fmount
Partition is:$fpart
--------------------------------"
| true
|
63d2cc8dde1b4aa3476ad3978319d7c2137cf42d
|
Shell
|
emg110/wolfservers
|
/scripts/live.sh
|
UTF-8
| 1,821
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
currentSlot=$(cardano-cli query tip --mainnet | jq -r '.slot')
echo Current Slot: $currentSlot
cardano-cli query utxo \
--address $(cat payment.addr) \
--mainnet > fullUtxo.out
tail -n +3 fullUtxo.out | sort -k3 -nr > balance.out
tx_in=""
total_balance=0
while read -r utxo; do
echo aa1: ${utxo}
in_addr=$(awk '{ print $1 }' <<< "${utxo}")
echo aa2: ${in_addr}
idx=$(awk '{ print $2 }' <<< "${utxo}")
echo aa3: ${idx}
utxo_balance=$(awk '{ print $3 }' <<< "${utxo}")
total_balance=$((${total_balance}+${utxo_balance}))
echo TxHash: ${total_balance}
echo ADA: ${utxo_balance}
tx_in="${tx_in} --tx-in ${in_addr}#${idx}"
done < balance.out
echo aa4: ${tx_in}
txcnt=$(cat balance.out | wc -l)
echo Total ADA balance: ${total_balance}
echo Number of UTXOs: ${txcnt}
stakePoolDeposit=$(cat $NODE_HOME/params.json | jq -r '.stakePoolDeposit')
echo stakePoolDeposit: $stakePoolDeposit
cardano-cli transaction build-raw \
${tx_in} \
--tx-out $(cat payment.addr)+$(( ${total_balance} - ${stakePoolDeposit})) \
--invalid-hereafter $(( ${currentSlot} + 10000)) \
--fee 0 \
--certificate-file pool.cert \
--certificate-file deleg.cert \
--out-file tx.tmp
fee=$(cardano-cli transaction calculate-min-fee \
--tx-body-file tx.tmp \
--tx-in-count ${txcnt} \
--tx-out-count 1 \
--mainnet \
--witness-count 3 \
--byron-witness-count 0 \
--protocol-params-file params.json | awk '{ print $1 }')
echo fee: $fee
txOut=$((${total_balance}-${stakePoolDeposit}-${fee}))
echo txOut: ${txOut}
cardano-cli transaction build-raw \
${tx_in} \
--tx-out $(cat payment.addr)+${txOut} \
--invalid-hereafter $(( ${currentSlot} + 10000)) \
--fee ${fee} \
--certificate-file pool.cert \
--certificate-file deleg.cert \
--out-file tx.raw
| true
|
5a61e0c714d81086be6f3f449bf72473235df913
|
Shell
|
radarlui/jboss-soe
|
/build/sat-upload.sh
|
UTF-8
| 2,240
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
BUILD_DIR=`dirname $0`
HOST="Please enter the Host"
USER="Please enter the Host"
CHANNEL_5="Please enter the name of the Channel to upload to"
CHANNEL_6="Please enter the name of the Channel to upload to"
EASYBASHGUI_VERSION=1.3.1
source ${BUILD_DIR}/../tools/EasyBashGUI_${EASYBASHGUI_VERSION}/easybashgui_${EASYBASHGUI_VERSION}
clear
echo "What are you going to do?"
echo "1 - upload all available RPMs"
echo "2 - upload selected RPMs"
echo ""
read UPLOAD
RELEASE_DIR=`cat build.properties | egrep -v "^#" | grep release.files.rootdir | awk -F"=" '{print $2;}' | sed '{s#\r##;}'`
case $UPLOAD in
1)
clear
echo ""
echo "Please enter password and press Return to continue or Ctrl-C to cancel!"
stty_orig=`stty -g`
stty -echo
read SECRET
stty $stty_orig
for rpm in `find ${RELEASE_DIR} -name *.rpm`;do
echo "uploading $rpm now on SOE RH 5 repo..."
rhnpush --server ${HOST} --force -u ${USER} -p ${SECRET} -c ${CHANNEL_5} ${rpm}
echo "uploading $rpm now on SOE RH 6 repo..."
rhnpush --server ${HOST} --force -u ${USER} -p ${SECRET} -c ${CHANNEL_6} ${rpm}
done
echo "uploading done."
;;
2)
pushd ${RELEASE_DIR}
FILTER=${1:-\*}
echo ""
echo -e "Please enter a filter criteria and press Return to continue or Ctrl-C to cancel! [${FILTER}]"
read FILTER_TEMP
if [ -n $FILTER_TEMP ]; then
FILTER=$FILTER_TEMP
fi
for rpm in `find . -type f -name \*${FILTER}\*.rpm`;do
rpmmenu="$rpmmenu `basename $rpm`"
done
list $rpmmenu
popd
choice="$(0< "${dir_tmp}/${file_tmp}" )"
if_arg_is_an_empty_variable_then_exit "choice" #bye, bye, user... :)
clear
echo "This will be uploaded to satellite:"
echo "$choice"
echo ""
echo "Please enter password and press Return to continue or Ctrl-C to cancel!"
stty_orig=`stty -g`
stty -echo
read SECRET
stty $stty_orig
for rpm in ${choice}; do
echo "uploading $rpm now on SOE RH 5 repo..."
rhnpush --server ${HOST} --force -u ${USER} -p ${SECRET} -c ${CHANNEL_5} `find ${RELEASE_DIR} -name ${rpm}`
echo "uploading $rpm now on SOE RH 6 repo..."
rhnpush --server ${HOST} --force -u ${USER} -p ${SECRET} -c ${CHANNEL_6} `find ${RELEASE_DIR} -name ${rpm}`
done
echo "uploading done."
;;
*) echo "Wrong selection"
;;
esac
####
clean_temp
| true
|
490d3e0a739a27fd9f742891e2a3086222254773
|
Shell
|
Trietptm-on-Security/el_harden
|
/fixes/repo/account_disable_post_pw_expiration.sh
|
UTF-8
| 426
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# SID: CCE-27283-1
{
: ${DISABLE_POST_PW_EXPIRATION:=30}
var_account_disable_post_pw_expiration="${DISABLE_POST_PW_EXPIRATION}"
grep -q ^INACTIVE /etc/default/useradd && \
sed -i "s/INACTIVE.*/INACTIVE=$var_account_disable_post_pw_expiration/g" /etc/default/useradd
if ! [ $? -eq 0 ]; then
echo "INACTIVE=$var_account_disable_post_pw_expiration" >> /etc/default/useradd
fi
echo "testing"
} &>> ${LOGFILE}
| true
|
3a8c88f16b6d060b600b14d03385d3e147ade5e8
|
Shell
|
regilo76/pButtonsExtract
|
/pbreportlib/graphic_gnuplot.sh
|
UTF-8
| 676
| 2.625
| 3
|
[] |
no_license
|
for f in $DataFolder"/"*; do echo "Processing $f file.."; genchart.sh $f | bash; done
gnuplot -persistent -e "n='4'" -e "filename='$CPUTimeFile'" -e "graphic_title='CPU Time'" -e "max_y_stat=100" /usr/local/bin/plotline.plg > $CPUTimeFile.eps
gnuplot -persistent -e "n='4'" -e "filename='$CPUUtilizationFile'" -e "graphic_title='CPU Time'" -e "max_y_stat=100" /usr/local/bin/plotline.plg > $CPUUtilizationFile.eps
#genchart.sh $RunBlockedFile | bash
#genchart.sh $TotalProcessFile | bash
#genchart.sh $FreePagesFile | bash
#genchart.sh $PageInFile | bash
#genchart.sh $ContextSwitchFile | bash
#genchart.sh $MgstatFile | bash
| true
|
a4306471ae693c87ca89c357c62aef93ed6bd6d0
|
Shell
|
Wetitpig/termux-packages
|
/scripts/utils/package/package.sh
|
UTF-8
| 1,238
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
# shellcheck shell=sh
# shellcheck disable=SC2039,SC2059
# Title: package
# Description: A library for package utils.
##
# Check if package on device builds are supported by checking
# `$TERMUX_PKG_ON_DEVICE_BUILD_NOT_SUPPORTED` value in its `build.sh`
# file.
# .
# .
# **Parameters:**
# `package_dir` - The directory path for the package `build.sh` file.
# .
# **Returns:**
# Returns `0` if supported, otherwise `1`.
# .
# .
# package__is_package_on_device_build_supported `package_dir`
##
package__is_package_on_device_build_supported() {
[ $(. "${1}/build.sh"; echo "$TERMUX_PKG_ON_DEVICE_BUILD_NOT_SUPPORTED") != "true" ]
return $?
}
##
# Check if a specific version of a package has been built by checking
# the `$TERMUX_BUILT_PACKAGES_DIRECTORY/<package_name>` file.
# .
# .
# **Parameters:**
# `package_name` - The package name for the package.
# `package_version` - The package version for the package to check.
# .
# **Returns:**
# Returns `0` if built, otherwise `1`.
# .
# .
# package__is_package_version_built `package_name` `package_version`
##
package__is_package_version_built() {
[ -e "$TERMUX_BUILT_PACKAGES_DIRECTORY/$1" ] && [ "$(cat "$TERMUX_BUILT_PACKAGES_DIRECTORY/$1")" = "$2" ]
return $?
}
| true
|
22c0da9292dfbe3d85cbb75d64f2ec2788673572
|
Shell
|
layeddie/example-nix
|
/pkgs-make/haskell/tools/cabal-new-watch/cabal-new-watch
|
UTF-8
| 1,326
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh -eu
FIRST_RUN="first run"
main()
{
echo "$FIRST_RUN" | run_cabal_per_line "$@"
wait_for_change \
| filter_unrelated_out \
| run_cabal_per_line "$@"
}
wait_for_change()
{
fswatch -raE -l .5 -e '/\.' \
--event AttributeModified \
--event Created \
--event Removed \
--event Renamed \
--event Updated \
. 2>/dev/null
}
filter_unrelated_out()
{
grep -E '/[^.#][^/]*\.(hs|lhs|cabal)$' --line-buffered \
| grep -E -v --line-buffered '(/dist-newstyle/|/flycheck_)'
}
run_cabal_per_line()
{
last_hash=""
while read -r file_triggering
do
new_hash="$(
md5sum "$file_triggering" 2> /dev/null \
|| echo "$last_hash")"
if [ "$new_hash" != "$last_hash" ] \
|| [ "$file_triggering" = "$FIRST_RUN" ]
then
printf '\nTRIGGER: %s\n' "$file_triggering"
last_hash="$new_hash"
if output="$(run_cabal "$@")"
then
printf '\033[34m%s: SUCCESS\033[m\n' "$(date)"
else
printf '\n\033[31m%s\033[m\n' "$output"
printf '\n\033[1;31m%s: ERROR\033[m\n' "$(date)"
fi
fi
done
}
run_cabal()
{
cabal new-build "$@" 3>&2 2>&1 1>&3
}
main "$@"
| true
|
596182350fd7cb286b27a0b966c4d23332e2cdc6
|
Shell
|
vrybas/dotfiles
|
/bin/git-pick
|
UTF-8
| 545
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Usage: git pick
#
# 1. Creates `cherry-pick` branch from current branch
# 2. Saves `git log -p` to a file
# 3. Switches back to current branch
# 4. Opens `git log -p` file in vim
# 5. Now, in another shell, you can `git reset --hard` current branch
# and cherry-pick some commits, lookng at the log in a file.
current_branch=$(git rev-parse --abbrev-ref HEAD)
out_file=.git/diff.diff
git branch -D cherry-pick
git checkout -b cherry-pick
git log -p -100 > $out_file
git checkout $current_branch
vim "+set ft=git" $out_file
| true
|
36120faa613251e44911f22c6673363376a2b336
|
Shell
|
BaiLab/Read-Split-Fly
|
/splitPairs.sh
|
UTF-8
| 2,807
| 3.953125
| 4
|
[
"Apache-2.0",
"Artistic-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
BASEDIR=$( cd ${0%/*} >& /dev/null ; pwd -P )
source "${BASEDIR}/config.sh"
OPTSFILE="" #to be filled-in
OUTPUTFILE="" #also to be filled-in
# options file needs:
# RNA-seq file: $2
# max split distance: $6
# rna sample length: $4
# known gene reference: $3
# reference boundries: $3.intronBoundries.exonsgaps
# supporting read tolerance: $7
# output basename: $(basename $2)
function make_options_file() {
if [ -f "$OPTSFILE" ]; then
rm "$OPTSFILE"
fi
if [ -z $REFDIR ];then
if [ -n "$BOWTIE_INDEX_ROOT" ]; then
REFDIR="$BOWTIE_INDEX_ROOT/$1"
else
die "Panic! dunno where the ref files are!"
fi
fi
if [ ! -f "${REFDIR}/${1}.refFlat.txt" ]; then
die "Error: cannot find refFlat file for $1"
fi
if [ ! -f "${REFDIR}/${1}.refFlat.txt.intronBoundary.exonsgaps" ]; then
die "Error: cannot find intron/exon boundry file for $1"
fi
touch "$OPTSFILE"
echo "$2" > $OPTSFILE #reads file
echo "$6" >> $OPTSFILE #maxSplitDistance
echo "$3" >> $OPTSFILE #sampleLength
echo "${REFDIR}/${1}.refFlat.txt" >> $OPTSFILE #refFlat
echo "${REFDIR}/${1}.refFlat.txt.intronBoundary.exonsgaps" >> $OPTSFILE #intron/exon boundry
echo "$5" >> $OPTSFILE #minSplitDistance
echo "$7" >> $OPTSFILE #Support tolerance
if [ -z "$OUTPUTFILE" ]; then
OUTPUTFILE="${9}/$(echo "$(python $BASENAME_SCRIPT $2)" | cut -d. -f1)"
fi
echo "$OUTPUTFILE" >> $OPTSFILE #results base name
echo "$8" >> $OPTSFILE #required supports
}
function dry_run() {
if [ -f "$OPTSFILE" ]; then
cat "$OPTSFILE"
fi
}
function run_rsw() {
if [ -f "$OPTSFILE" ]; then
log "OUTPUTFILE = $OUTPUTFILE"
if [ -z "$LOG_FILE" ]; then
logfile="/dev/null"
else
logfile="${LOG_FILE}"
fi
try $RSR_PROGRAM "$OPTSFILE" >> $logfile
if [ ! -f "${OUTPUTFILE}.results" ]; then
log "Panic! rsw failed to generate output file. Check stderr."
exit 1
fi
fi
}
function cleanup() {
# OUTPUTFILE=$(tail -2 "$1" | head -1 )
# mv ${OUTPUTFILE}.* "$2"
mv "$OPTSFILE" "$1"
}
if (( $# < 9 )); then
yell "usage: $0 genome readsFile readLength minSplitSize minSplitdistance maxSplitdistance regionBuffer requiredSupports pathToSaveFesults"
die "you had $#"
exit 1
fi
date=$(date +%s)
genome=$1
log "fyi: \$2 = $2"
OPTSFILE="$RSR_TEMP_DIR/$(python $BASENAME_SCRIPT $2).${date}.options.txt"
OUTPUTFILE="$9/$(echo "$(python $BASENAME_SCRIPT $2)" | cut -d. -f1)"
make_options_file $*
if [ ! -d "${9}" ];then
log "${9} didn't exist. something wrong?"
mkdir "${9}"
fi
run_rsw
cleanup "${9}"
#dry_run "$OPTSFILE"
echo "${OUTPUTFILE}.results"
| true
|
743dd3c178ecfe862b966d8613d6f5227596e807
|
Shell
|
askrepps/EN605.417.FA
|
/module4/run.sh
|
UTF-8
| 923
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
runTest()
{
numThreads=$1
blockSize=$2
echo "Running with $numThreads total threads and a block size of $blockSize threads each..."
build/arrayXor $numThreads $blockSize
}
echo 'Creating build directory...'
if [ ! -d build ]; then
mkdir build
if [ $? -ne 0 ]; then
echo 'Could not create build directory' >&2
exit 1
fi
fi
echo 'Compiling...'
nvcc -std=c++11 -o build/arrayXor arrayXor.cu
if [ $? -ne 0 ]; then
echo 'Compilation failed' >&2
exit 1
fi
runTest 512 256
runTest 512 512
runTest 1024 256
runTest 1024 512
runTest 2048 256
runTest 2048 512
runTest 4096 256
runTest 4096 512
runTest 8192 256
runTest 8192 512
runTest 16384 256
runTest 16384 512
runTest 32768 256
runTest 32768 512
runTest 65536 256
runTest 65536 512
runTest 131072 256
runTest 131072 512
runTest 262144 256
runTest 262144 512
runTest 524288 256
runTest 524288 512
runTest 1048576 256
runTest 1048576 512
| true
|
ff539dda846206e103ccdfa4a4ff103671deb767
|
Shell
|
gitter-badger/solvcon
|
/ground/scvars.sh
|
UTF-8
| 1,740
| 3.203125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#MANPATH=$(manpath -q)
manpathmunge () {
if ! echo $MANPATH | egrep -q "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
MANPATH=$MANPATH:$1
else
MANPATH=$1:$MANPATH
fi
fi
export MANPATH
}
pythonpathmunge () {
if ! echo $PYTHONPATH | egrep -q "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
PYTHONPATH=$PYTHONPATH:$1
else
PYTHONPATH=$1:$PYTHONPATH
fi
fi
export PYTHONPATH
}
pathmunge () {
if ! echo $PATH | egrep -q "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
PATH=$PATH:$1
else
PATH=$1:$PATH
fi
fi
export PATH
}
libpathmunge () {
if ! echo $LIBRARY_PATH | egrep -q "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
LIBRARY_PATH=$LIBRARY_PATH:$1
else
LIBRARY_PATH=$1:$LIBRARY_PATH
fi
fi
export LIBRARY_PATH
}
ldpathmunge () {
if ! echo $LD_LIBRARY_PATH | egrep -q "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$1
else
LD_LIBRARY_PATH=$1:$LD_LIBRARY_PATH
fi
fi
export LD_LIBRARY_PATH
}
dyldpathmunge () {
if ! echo $DYLD_LIBRARY_PATH | egrep -q "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:$1
else
DYLD_LIBRARY_PATH=$1:$DYLD_LIBRARY_PATH
fi
fi
export DYLD_LIBRARY_PATH
}
pathmunge $SCROOT/bin
manpathmunge $SCROOT/share/man
libpathmunge $SCROOT/lib
if [ `uname` == "Darwin" ]; then
dyldpathmunge $SCROOT/lib
dyldpathmunge $SCROOT/lib/vtk-5.6
else
ldpathmunge $SCROOT/lib
ldpathmunge $SCROOT/lib/vtk-5.6
fi
unset manpathmunge
unset pythonpathmunge
unset pathmunge
unset libpathmunge
unset ldpathmunge
unset dyldpathmunge
# vim: sw=2 ts=2 tw=76 et nu ft=sh:
| true
|
50ed39de72c46c59ae5a6c7c0d1e22e95f7925df
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/mkpackage-git/PKGBUILD
|
UTF-8
| 575
| 2.65625
| 3
|
[] |
no_license
|
# Maintainer: Anna Ivanova <kalterfx@gmail.com>
pkgname='mkpackage-git'
pkgver='5.fbe2fd1'
url='https://github.com/kalterfive/mkpackage'
pkgrel='1'
pkgdesc='Collect an Arch package using its information from a local pacman database'
arch=('any')
license=('GPL3')
depends=('bash')
source=("mkpackage::git://github.com/kalterfive/mkpackage")
md5sums=('SKIP')
conflicts=('mkpackage')
provides=('mkpackage')
function pkgver()
{
cd 'mkpackage'
echo "$(git rev-list --count HEAD).$(git rev-parse --short HEAD)"
}
function package()
{
cd 'mkpackage/script'
DESTDIR="$pkgdir" ./install.sh
}
| true
|
f2ba774be53167d50b28981de028e41a0663eedd
|
Shell
|
jbustamante35/phytomorph
|
/Aquire/phytoStreams/testStreams~
|
UTF-8
| 1,059
| 2.6875
| 3
|
[] |
permissive
|
#!/bin/bash
echo "#############################################"
echo "Testing local copying via buffers"
echo " This test has a root other than the file(s) root"
echo "Expect to have the file and the extra folder strucuture rendered"
echo "#############################################"
echo "creating cyverse buffer stream"
./createStream testProgram cyverseBuffer 1 1 /iplant/home/nmiller/testStreams/ /mnt/scratch1/phytomorph_dev/
echo "streaming to buffer"
./streamFile testProgram cyverseBuffer 1 ./test.txt
./streamFile testProgram cyverseBuffer 1 ./test2.txt
./streamFile testProgram cyverseBuffer 1 ./test3.txt
echo "streaming the buffer"
./streamBuffer testProgram cyverseBuffer 1
echo "#############################################"
echo "Testing remote without buffers"
echo "#############################################"
echo "creating cyverse stream"
./createStream testProgram cyverseStream 2 1 /iplant/home/nmiller/testStreams/ /mnt/scratch1/phytomorph_dev/
echo "streaming to cyverse"
./streamFile testProgram cyverseStream 2 ./test.txt
| true
|
0fb6f444059b527765c85476670629884f6015ca
|
Shell
|
AESD-Course-Project/AESD-Course-Project.github.io
|
/.github/scripts/cibuild
|
UTF-8
| 1,427
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
# Install site dependencies
python3 -m pip --no-cache install blockdiag seqdiag actdiag nwdiag graphviz
if [ -f "/etc/alpine-release" ]; then
export PATH="$PATH:/home/runner/work/jekyll/jekyll/vendor"
gem install -n /home/runner/work/jekyll/jekyll/vendor/ jekyll --user-install
gem install -n /home/runner/work/jekyll/jekyll/vendor/ bundler --user-install
gem install -n /home/runner/work/jekyll/jekyll/vendor/ rubocop --user-install
gem install -n /home/runner/work/jekyll/jekyll/vendor/ ffi --platform=ruby --user-install
gem install -n /home/runner/work/jekyll/jekyll/vendor/ kramdown-parser-gfm --user-install
gem install -n /home/runner/work/jekyll/jekyll/vendor/ http_parser.rb --user-install
gem install -n /home/runner/work/jekyll/jekyll/vendor/ jekyll --user-install
gem install -n /home/runner/work/jekyll/jekyll/vendor/ jekyll-diagrams --user-install
else
apt update && apt install python-blockdiag -y
gem install bundler
gem install rubocop
gem install ffi --platform=ruby
gem install kramdown-parser-gfm
gem install http_parser.rb
gem install jekyll
gem install jekyll-diagrams
fi
# Remove Gemfile.lock to avoid write permission issues
rm -f Gemfile.lock
# Execute build process
bundle install && bundle update
bundle exec jekyll build
bundle exec rubocop -D
bundle exec .github/scripts/validate-html
gem build site.gemspec
| true
|
77a162f2185aa3d1c2c1233c9d4b995ac0f64c49
|
Shell
|
ple-utt239/macports-ports
|
/multimedia/mythtv.28/files/preinstall
|
UTF-8
| 652
| 3.0625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# MythTV installer support, preinstall script
# Note that all the preinstall scripts are run (as root), then the payload is 'shove'd into place
# After that, all the postinstall scripts are run.
# Script also runs in a sandbox with no access to /Private and other directories
# note that launchctl requires sudo
echo mythtv.28 preinstall script starting
echo if backend loaded, unload before continuing install
if /bin/launchctl list "org.mythtv.mythbackend" &> /dev/null; then
/bin/launchctl unload "/Library/LaunchDaemons/org.mythtv.mythbackend.plist"
echo ...backend now unloaded
fi
echo mythtv.28 preinstall script finished
| true
|
d34d9d207bc18f33a3c28f518e9fb350d31df658
|
Shell
|
Pr0methean/TacoSpigot
|
/remap.sh
|
UTF-8
| 3,543
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
pushd Paper # TacoSpigot
PS1="$"
basedir=`pwd`
workdir=$basedir/work
minecraftversion=$(cat work/BuildData/info.json | grep minecraftVersion | cut -d '"' -f 4)
minecrafthash=$(cat work/BuildData/info.json | grep minecraftHash | cut -d '"' -f 4)
accesstransforms=work/BuildData/mappings/$(cat work/BuildData/info.json | grep accessTransforms | cut -d '"' -f 4)
classmappings=work/BuildData/mappings/$(cat work/BuildData/info.json | grep classMappings | cut -d '"' -f 4)
membermappings=work/BuildData/mappings/$(cat work/BuildData/info.json | grep memberMappings | cut -d '"' -f 4)
packagemappings=work/BuildData/mappings/$(cat work/BuildData/info.json | grep packageMappings | cut -d '"' -f 4)
jarpath=$workdir/Minecraft/$minecraftversion/$minecraftversion
minecrafturl=https://theseedmc.com/mirrors/vanilla_1.13.jar
useragent='Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
echo "[TacoSpigot/remap.sh] Downloading unmapped vanilla jar from $minecrafturl to $jarpath..."
if [ ! -f "$jarpath.jar" ]; then
mkdir -p "$workdir/Minecraft/$minecraftversion"
curl -A "$useragent" -s -o "$jarpath.jar" "$minecrafturl"
if [ "$?" != "0" ]; then
echo "Failed to download the vanilla server jar. Check connectivity or try again later."
exit 1
fi
fi
# OS X doesn't have md5sum, just md5 -r
if [[ "$OSTYPE" == "darwin"* ]]; then
shopt -s expand_aliases
alias md5sum='md5 -r'
echo "[TacoSpigot/remap.sh] Using an alias for md5sum on OS X"
fi
checksum=$(md5sum "$jarpath.jar" | cut -d ' ' -f 1)
if [ "$checksum" != "$minecrafthash" ]; then
echo "[TacoSpigot/remap.sh] The MD5 checksum of the downloaded server jar ($checksum) does not match the work/BuildData hash ($minecrafthash)."
exit 1
fi
echo "[TacoSpigot/remap.sh] Applying class mappings..."
if [ ! -f "$jarpath-cl.jar" ]; then
if [ ! -f "$classmappings" ]; then
echo "[TacoSpigot/remap.sh] Class mappings not found!"
exit 1
fi
java -jar work/BuildData/bin/SpecialSource-2.jar map -i "$jarpath.jar" -m "$classmappings" -o "$jarpath-cl.jar"
if [ "$?" != "0" ]; then
echo "[TacoSpigot/remap.sh] Failed to apply class mappings."
exit 1
fi
fi
echo "[TacoSpigot/remap.sh] Applying member mappings..."
if [ ! -f "$jarpath-m.jar" ]; then
if [ ! -f "$membermappings" ]; then
echo "[TacoSpigot/remap.sh] Member mappings not found!"
exit 1
fi
java -jar work/BuildData/bin/SpecialSource-2.jar map -i "$jarpath-cl.jar" -m "$membermappings" -o "$jarpath-m.jar"
if [ "$?" != "0" ]; then
echo "[TacoSpigot/remap.sh] Failed to apply member mappings."
exit 1
fi
fi
echo "[TacoSpigot/remap.sh] Creating remapped jar..."
if [ ! -f "$jarpath-mapped.jar" ]; then
java -jar work/BuildData/bin/SpecialSource.jar --kill-lvt -i "$jarpath-m.jar" --access-transformer "$accesstransforms" -m "$packagemappings" -o "$jarpath-mapped.jar"
if [ "$?" != "0" ]; then
echo "[TacoSpigot/remap.sh] Failed to create remapped jar."
exit 1
fi
fi
echo "[TacoSpigot/remap.sh] Installing remapped jar..."
cd work/CraftBukkit # Need to be in a directory with a valid POM at the time of install.
mvn install:install-file -q -Dfile="$jarpath-mapped.jar" -Dpackaging=jar -DgroupId=org.spigotmc -DartifactId=minecraft-server -Dversion="$minecraftversion-SNAPSHOT"
if [ "$?" != "0" ]; then
echo "[TacoSpigot/remap.sh] Failed to install remapped jar."
exit 1
fi
popd # TacoSpigot
| true
|
ca5317e6dc17a86e683209479a9dc4cc90587400
|
Shell
|
pisani/icm-util
|
/rm.sh
|
UTF-8
| 347
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash -e
if [ $# -ne 1 ]; then
echo "rm.sh containerName"
exit 1
fi
icmname=$1
# cleanup cloud resources
docker exec $icmname sh -c 'cd $(cat dir.txt); icm unprovision -cleanUp -force; rm *.log'
docker stop $icmname
docker rm $icmname
# remove backup files
rm -fR ./Backup/$icmname
# remove a ssh key file
rm -f ~/insecure_$icmname
| true
|
ed0671097da14803ee1c49e44b8830e6821420ba
|
Shell
|
webuni/commonmark-twig-renderer
|
/run
|
UTF-8
| 1,154
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
set -e
url="https://raw.githubusercontent.com/webuni/shell-task-runner/master/runner"
[ -f ./.runner ] || wget -q "$url" -O- > .runner || curl -fso .runner "$url"
. ./.runner
! mkdir -p ~/.composer/cache
if [ ! -f ~/.composer/auth.json ]; then echo "{}" > ~/.composer/auth.json; fi
_decorator()( _decorator_docker_compose_run "$@" )
# Update dependencies to the latest versions
task_deps__latest()( _run composer update --prefer-source )
# Update dependencies to the lowest versions
task_deps__lowest()( _run composer update --prefer-lowest )
# Run composer
# @service php
task_composer()( composer "$@" )
# Run tests
# @service php
task_tests()( "$(__fn phpunit-8 && echo "phpunit-8" || echo "phpunit")" --colors=always "$@" )
# Fix code style
# @service php
task_cs()( php-cs-fixer fix --ansi --allow-risky=yes "$@" )
# Analyse code
# @service php
task_analyse()( phpstan analyse --no-progress --ansi; __fn psalm && psalm || echo '' )
# Remove all containers in project (use -v to remove volumes also)
task_clean()( _docker_compose down --remove-orphans "$@" )
# Run shell with tools
# @service php
task_tools()( sh "$@" )
| true
|
3164c7f418641db28447b0d4bd39126a09fa149b
|
Shell
|
nael-fridhi/elastic-stack-course
|
/config/install_logstash_filebeat.sh
|
UTF-8
| 2,089
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Bash output configuration to display message with colors
RED='\033[0;31m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
ORANGE='\e[93m'
info() {
printf "${BLUE}$1${NC}\n"
}
error() {
printf "${RED}$1${NC}\n"
}
warn() {
printf "${ORANGE}$1${NC}\n"
}
# Verify And Install Java
info "#######################################################"
info "Verifying Java Installation..."
info "#######################################################"
java -v > /dev/null 2>&1
RETURN=$?
if [[ ${RETURN:0:1} -ne 0 ]];then
warn "Java is not installed"
sleep 1
info "Insatlling Java .."
sudo apt-get install java-1.8.0-openjdk -y
info "Java Installed"
fi
echo "Do you want to install Logstash ? [y/n]: "
read logstash
if [[ $logstash -eq "y"]];then
info "#######################################################"
info "Installing logstash..."
info "#######################################################"
wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
sudo apt-get install apt-transport-https
echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee -a /etc/apt/sources.list.d/elastic-7.x.list
sudo apt-get update && sudo apt-get install -y logstash
sudo systemctl enable logstash
sudo systemctl start logstash
info "#######################################################"
info "Logstash installed Successfully"
info "#######################################################"
fi
echo "Do you want to install filebeat ? [y/n]: "
read filebeat
if [[ $filebeat -eq "y"]];then
info "#######################################################"
info "Installing filebeat..."
info "#######################################################"
sudo apt-get update && sudo apt-get install -y filebeat
sudo systemctl enable filebeat
sudo systemctl start filebeat
info "#######################################################"
info "filebeat installed Successfully"
info "#######################################################"
fi
| true
|
d025df725756362e3602fb81b302d60dd849dc76
|
Shell
|
danielleiszen/rdotnet
|
/build/build_R.NET_nuget.sh
|
UTF-8
| 3,078
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
rdotnet_dir=~/src/github_jm/rdotnet/
MSB=xbuild
# ======= NuGet settings
# Get the nuget tools from nuget.org. There is also one coming with the NuGet plug-on from Visual Studio.
# Section on NuGet.config for nuget update (NOT YET USED - CAME ACCROSS ISSUE WITH NUGET 2.8)
# To limit machine specific issues, we will require an explicit nuget config file.
# Usually you will have a config file %AppData%\NuGet\NuGet.config.
# A gotcha is that even if you have configured your package feed from visual studio, you may need to also add a key to the activePackageSource
# <activePackageSource>
# <add key="per202 nuget tests" value="\\path\to\work\per202\nuget" />
# <add key="nuget.org" value="https://www.nuget.org/api/v2/" />
# </activePackageSource>
# nuget_conf_file=%AppData%\NuGet\NuGet.config
# You can also adapt from the sample NuGet.config.sample in the same directory as this file
# nuget_conf_file=%~d0%~p0\NuGet.config
# if not exist nuget goto Nuget_config_not_found
# The target where we will put the resulting nuget packages.
repo_dir=/home/per202/nuget/
# The xcopy options for the nuget packages (and some other build outputs)
# COPYOPTIONS=/Y /R /D
# ================== code compilation settings
# if not "$BuildConfiguration"=="Release" if not "$BuildConfiguration"=="Debug" BuildConfiguration=Release
BuildConfiguration=Release
# Setting the variable named 'Platform' seems to interfere with the nuget pack command, so
# we deliberately a variable BuildPlatform for use with MSBuild.exe
BuildPlatform="Any CPU"
# Mode=Rebuild
Mode=Build
# ================== Start build process ========================
# EVERYTHING else below this line should use paths relative to the lines above, or environment variables
# build_options="/t:$Mode /p:Configuration=$BuildConfiguration /p:Platform=\"$BuildPlatform\""
build_options="/t:$Mode /p:Configuration=$BuildConfiguration"
common_ng_pack_options="-Verbosity normal -Properties Configuration=$BuildConfiguration"
# ================== R.NET ========================
# package R.NET
# TODO - could not get the F# build script to work.
# @if "%VS120COMNTOOLS%"=="" echo "WARNING - env var VS120COMNTOOLS not found - fsharp interactive may not be found!"
# if not "%VS120COMNTOOLS%" == "" (
# call "%VS120COMNTOOLS%VsDevCmd.bat"
# )
# fsi.exe $rdotnet_dirtools\build.fsx --debug
# error FS0193: internal error: Value cannot be null.
# Parameter name: con
SLN=$rdotnet_dir/RDotNet.Release.sln
$MSB $SLN $build_options
pack_options="-OutputDirectory $rdotnet_dir$build $common_ng_pack_options"
#if exist $repo_dir/R.NET.1.5.*.nupkg del $repo_dir/R.NET.1.5.*.nupkg
#if "$BuildConfiguration"=="Release" nuspec_file=RDotNet.nuspec
#if "$BuildConfiguration"=="Debug" nuspec_file=RDotNet_debug.nuspec
nuspec_file=RDotNet.nuspec
nuget pack $rdotnet_dir$nuspec_file $pack_options
nuget pack $rdotnet_dir/RDotNet.FSharp.nuspec $pack_options
nuget pack $rdotnet_dir/RDotNet.Graphics.nuspec $pack_options
cp $rdotnet_dir$build/*.nupkg $repo_dir
| true
|
61805d718bd50ae3f589c913875b7bf2e24873b4
|
Shell
|
elppans/ShellTestes
|
/ShellScript_do_zero/Scripts/mv_conv
|
UTF-8
| 24,510
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
data=$(echo $*)
LNXPDV=/Zanthus/Zeus/pdvJava
path=/Zanthus/Zeus/path_comum
BINCONV=lnx_conv_CJ
LCONV=./lnx_conv_CJ
NECF=$(pwd |cut -d "." -f "2")
#Date
if [ -z $data ]
then
echo -e "O comando mv_conv deve conter uma data como parametro no formato AAAAMMDD.\nPor exemplo: mv_conv $(date "+%Y%m%d")"
read -t 5
else
if [ $data -lt 1 ]
then
echo "Digite um numero maior que 0 (Zero)"
read -t 5
else
#Date cont
##Inicio lnxconv
if (ls $LNXPDV | grep lnx_conv_CJ)
then
ln -sf $LNXPDV/lnx_conv_CJ /usr/bin/lnx_conv_CJ
else
tar $path/VerAtu.tar.gz -C $LNXPDV
if (ls $LNXPDV | grep lnx_conv_CJ)
then
ln -sf $LNXPDV/lnx_conv_CJ /usr/bin/lnx_conv_CJ
else
clear
fi
fi
##Fim lnxconv
##Inicio MV
if [ -d $path/MV ]
then
mkdir -p $path/MV/Vendas$data
else
echo "A pasta $path/MV nao existe"
read -t 5
#fi
##Fim MV
##Conversao
if (ls | grep $data.001)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.001" >> $path/MV/DataECF_info >> /dev/null
fi
fi
##Fim Conversao
##Conversao
if (ls | grep $data.002)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.002" >> $path/MV/DataECF_info >> /dev/null
fi
fi
##Fim Conversao
##Conversao
if (ls | grep $data.003)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.003" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.004)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.004" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.005)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.005" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.006)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.006" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.007)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.007" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.008)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.008" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.009)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.009" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.010)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.010" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.011)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.011" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.012)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.012" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.013)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.013" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.014)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.014" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.015)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.015" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.016)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.016" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.017)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.017" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.018)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.018" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.019)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.019" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.020)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.020" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.021)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.021" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.022)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.022" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.023)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.023" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.024)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.024" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
##Conversao
if (ls | grep $data.025)
then
cd $data.$NECF
if (ls /usr/bin | grep $BINCONV)
then
$BINCONV -m
else
if (ls $path/MV | grep $BINCONV)
then
$path/MV/./$BINCONV -m
else
echo "Nao existe $BINCONV"
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/lnxconv_info >> /dev/null
echo "O sistema deve conter o aplicativo $BINCONV para fazer as conversoes."
echo "Saindo do configurador..."
read -t 5
fi
fi
if (ls | grep TRAB1.SDF)
then
MV TRAB1.SDF TRAB1cx$NECF.SDF
if (ls $path/MV |grep "Vendas$data")
then
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
else
mkdir -p $path/MV/Vendas$data
cp -rf TRAB1cx$NECF.SDF $path/MV/Vendas$data
cd $path/MV
fi
else
echo "Nao foi possivel criar TRAB1.SDF para ECF $NECF" >> $path/MV/SDF_info >> /dev/null
cd $path/MV
fi
else
echo "Nao foi possivel achar a pasta $data.025" >> $path/MV/DataECF_info >> /dev/null
fi
#fi
##Fim Conversao
#Fim Date
##Fim MV alternativo
fi
| true
|
ba225df1be30e132d2161a46f5d9e81c78620c79
|
Shell
|
xf0e/ulogme
|
/logdesktop.sh
|
UTF-8
| 658
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# periodically takes screenshot and saves them to desktopscr/
# the filename contains unix time
# wait time in seconds
waittime="60"
# directory to save screenshots to
saveprefix="desktopscr/scr"
mkdir -p desktopscr
#------------------------------
while true
do
islocked=true; if [[ $(qdbus org.kde.screensaver /ScreenSaver org.freedesktop.ScreenSaver.GetActive) =~ .*false.* ]]; then islocked=false; fi
if ! $islocked
then
# take screenshot into file
T="$(date +%s)"
fname="$saveprefix_$T.jpg"
# q is quality. Higher is higher quality
scrot -q 50 "$fname"
else
echo "screen is locked, waiting..."
fi
sleep $waittime
done
| true
|
302526e8ba133a690aee832c62d60fe1b0390ab0
|
Shell
|
ezequieldevalais/sisop-4h
|
/bin/IniPro.sh
|
UTF-8
| 9,064
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
#===========================================================
#
# ARCHIVO: IniPro.sh
#
# DESCRIPCION: Prepara el entorno de ejecución del TP
#
# AUTOR: Solotun, Roberto.
# PADRON: 85557
#
#===========================================================
# Llama al log para grabar
# $1 = mensaje
# $2 = tipo (INF WAR ERR)
function grabarLog {
Glog.sh "IniPro" "$1" "$2"
}
# Verifica que las variables de ambiente este seteadas
function chequearVariables {
for var in ${variables[*]}
do
res=`env | grep $var | cut -d"=" -f 2`
if [ -z "$res" ]; then
#echo -e "Falta la variable de ambiente $var, agregando..."
setVariablesDeConfiguracion $var
#echo -e "Variable $var ahora esta agregada"
#else
#echo -e "La variable de ambiente $var=$res existe"
fi
done
#echo -e
}
# Lee las variables de Config del archivo InsPro.conf
function setVariablesDeConfiguracion {
value=$(grep "$1" "$CONFDIR/$confFile" | cut -d"=" -f 2)
export $1="$value"
echo -e "$value"
}
# Chequea que existan los scripts en la carpeta BINDIR,
# y tengan los permisos de lectura, escritura y ejecucion
# seteados,sino los setea.
function chequearComandos {
for i in ${comandos[*]}
do
if [ -f "$GRUPO$BINDIR/$i" ]; then
#echo -e "El comando $i existe"
if ! [ -x "$GRUPO$BINDIR/$i" ]; then
#echo -e "y tiene permisos de ejecucion"
#else
chmod 777 "$GRUPO$BINDIR/$i"
#echo -e "`ls -l $BINDIR/$i`"
fi
else
#echo -e "El comando $i no existe"
grabarLog "El comando $GRUPO$BINDIR/$i no existe." "ERR"
error=true
fi
done
#echo -e
}
# Chequea que existan los maestros en la carpeta MAEDIR,
# y tengan los permisos de lectura seteados,sino los setea.
function chequearMaestros {
for i in ${maestros[*]}
do
if [ -f "$GRUPO$MAEDIR/$i" ]; then
#echo -e "El archivo maestro $i existe"
if ! ([ -r "$GRUPO$MAEDIR/$i" ] && ! [ -w "$GRUPO$MAEDIR/$i" ]) ; then
#echo -e "y tiene permisos de lectura, pero no escritura"
#else
chmod 444 "$GRUPO$MAEDIR/$i"
#echo -e `ls -l $MAEDIR/$i`
fi
else
#echo -e "El archivo maestro $i no existe"
grabarLog "El maestro $GRUPO$MAEDIR/$i no existe." "ERR"
error=true
fi
done
#echo -e
}
# Chequea que existan las tablas en la carpeta MAEDIR/tab,
# y tengan los permisos de lectura seteados,sino los setea.
function chequearTablas {
for i in ${tablas[*]}
do
if [ -f "$GRUPO$MAEDIR/tab/$i" ]; then
#echo -e "La tabla $i existe"
if ! ([ -r "$GRUPO$MAEDIR/tab/$i" ] && ! [ -w "$GRUPO$MAEDIR/tab/$i" ]); then
#echo -e "y tiene permisos de lectura, pero no escritura"
#else
chmod 444 "$GRUPO$MAEDIR/tab/$i"
#echo -e `ls -l $MAEDIR/tab/$i`
fi
else
#echo -e "La tabla $i no existe"
grabarLog "La tabla $GRUPO$MAEDIR/tab/$i no existe." "ERR"
error=true
fi
done
#echo -e
}
# Chequea que la carpeta donde se encuentran los comandos, este incluido en la variable PATH,
# para su correcta ejecucion, sino lo setea
function chequearPaths {
ejec=`echo "$PATH" | grep "$GRUPO$BINDIR"`
if [ -z "$ejec" ]; then
#echo -e "No esta el path de ejecutables, agregando..."
export PATH="$PATH:$GRUPO$BINDIR"
#echo -e "Agregado\n"
#else
#echo -e "El path de ejecutables esta seteado"
fi
#echo -e
}
# Chequea si el proceso RecPro ya esta corriendo
function chequearRecPro {
resultado=`ps ax | grep -v $$ | grep -v "grep" | grep -v "gedit" | grep "RecPro.sh"`
if [ -z "$resultado" ]; then
return 0
else
return 1
fi
}
# Pregunta si se desea iniciar el comando RecPro, y actua segun la respuesta.
function lanzarRecPro {
echo "“Desea efectuar la activación de RecPro?” Si – No"
read resp
while [ "${resp,,}" != "si" -a "${resp,,}" != "s" ]
do
if [ "${resp,,}" == "no" -o "${resp,,}" == "n" ]; then
return 1
fi
echo "Ingrese una respuesta valida"
read resp
done
return 0
}
# Muestra el mensaje de finalizacion de Inicializacion
function mostrarMensajeInstalacionFinalizada {
#CONFDIR="${GRUPO}conf"
dirconf=`ls "$CONFDIR" | tr "\n" " "`
dirbin=`ls "$GRUPO$BINDIR" | tr "\n" " "`
dirmae=`ls -R "$GRUPO$MAEDIR" | tr "\n" " "`
dirlog=`ls "$GRUPO$LOGDIR" | tr "\n" " "`
mensaje="Directorio de Configuración: $CONFDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje"
mensaje="Archivos: $dirconf"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Directorio de Ejecutables: $GRUPO$BINDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje"
mensaje="Archivos: $dirbin"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Directorio de Maestros y Tablas: $GRUPO$MAEDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje"
mensaje="Archivos: $dirmae"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Directorio de recepción de documentos para protocolización: $GRUPO$NOVEDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Directorio de Archivos Aceptados: $GRUPO$ACEPDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Directorio de Archivos Rechazados: $GRUPO$RECHDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Directorio de Archivos Protocolizados: $GRUPO$PROCDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Directorio para informes y estadísticas: $GRUPO$INFODIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Nombre para el repositorio de duplicados: $GRUPO$DUPDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Directorio para Archivos de Log: $GRUPO$LOGDIR"
grabarLog "$mensaje" "INF"
echo -e "$mensaje"
mensaje="Archivos: $dirlog"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
mensaje="Estado del Sistema: INICIALIZADO"
grabarLog "$mensaje" "INF"
echo -e "$mensaje\n"
}
# Setea el CONFDIR
function setCONFDIR {
CONFDIR="${PWD}/conf"
confdirInvalido=true
while [ "true" == "$confdirInvalido" ]
do
if [ ! -f "$CONFDIR/$confFile" ]; then
old="$CONFDIR"
CONFDIR=""
cantSeparadores=$(grep -o "/" <<< "$old" | wc -l)
for (( c=2; c < $cantSeparadores; c++ ))
do
aux=$(echo "$old" | cut -d "/" -f$c)
CONFDIR="$CONFDIR/$aux"
done
CONFDIR="$CONFDIR/conf"
else
confdirInvalido=false
fi
done
export CONFDIR="$CONFDIR"
}
# Funcion principal
function main {
error=false
variables=(GRUPO BINDIR MAEDIR NOVEDIR ACEPDIR RECHDIR PROCDIR INFODIR DUPDIR LOGDIR LOGSIZE)
maestros=(emisores.mae normas.mae gestiones.mae)
comandos=(Start.sh Stop.sh Mover.sh Glog.sh IniPro.sh RecPro.sh ProPro.sh InfPro.pl)
tablas=(nxe.tab axg.tab)
confFile=InsPro.conf
setCONFDIR
if [ "true" == "`env | grep INICIALIZADO | cut -d"=" -f 2`" ]
then
echo -e "Ambiente ya inicializado, si quiere reiniciar termine su sesión e ingrese nuevamente."
grabarLog "Ambiente ya inicializado, si quiere reiniciar termine su sesión e ingrese nuevamente." "WAR"
else
echo -e "Comenzando a inicializar el ambiente.\n"
chequearVariables
chequearComandos
chequearPaths
chequearMaestros
chequearTablas
if [ false == $error ]; then
mostrarMensajeInstalacionFinalizada
lanzarRecPro
if [ $? == 1 ]; then
msj="-Usted ha elegido no arrancar RecPro, para hacerlo manualmente debe hacerlo de la siguiente manera: Uso: Start.sh RecPro.sh"
echo -e $msj
grabarLog "Se ha elegido no arrancar RecPro" "INF"
else
chequearRecPro
if [ $? == 0 ]; then
Start.sh "RecPro.sh"
msj="-Usted ha elegido arrancar RecPro, para frenarlo manualmente debe hacerlo de la siguiente manera: Uso: Stop.sh RecPro.sh"
echo -e $msj
procssid=$(ps -ax | grep -v $$ | grep -v "grep" | grep -v "gedit" | grep "RecPro.sh" | sed 's-\(^ *\)\([0-9]*\)\(.*$\)-\2-g')
echo -e "proc: $procssid"
grabarLog "proc: $procssid" "INF"
else
msj="-RecPro ya iniciado, para frenarlo manualmente debe hacerlo de la siguiente manera: Uso: Stop.sh RecPro.sh"
echo -e $msj
grabarLog "RecPro ya iniciado" "ERR"
procssid=$(ps -ax | grep -v $$ | grep -v "grep" | grep -v "gedit" | grep "RecPro.sh" | sed 's-\(^ *\)\([0-9]*\)\(.*$\)-\2-g')
echo -e "proc: $procssid"
grabarLog "RecPro.sh proc: $procssid" "ERR"
fi
fi
export INICIALIZADO="true"
else
msj="Error en la inicialización del ambiente. Revise el log para mayor información."
echo -e $msj
export INICIALIZADO="false"
fi
fi
}
main
| true
|
e08ac0124dd5f0ae746b134ff69bd03bca1458ef
|
Shell
|
MagicGroup/magicinstaller2
|
/rootfs/post_scripts/commands_chroot.sh
|
UTF-8
| 350
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
### temp directory and files
touch /var/run/utmp
# Don't use the {} ,because chroot system is based on busybox.
#touch /var/log/{btmp,lastlog,wtmp}
for f in btmp lastlog wtmp;do
touch /var/log/$f
done
chgrp utmp /var/run/utmp /var/log/lastlog
chmod 664 /var/run/utmp /var/log/lastlog
if [ -x /sbin/ldconfig ] ;then /sbin/ldconfig; fi
| true
|
727201e04c26f0992233c7a67c7a0784428f17ad
|
Shell
|
hobby/mkx
|
/mkrun
|
UTF-8
| 10,614
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# vim:ts=4:sw=4:expandtab
##################################################################
# NAME
# mkrun - a toy for run target's command
#
# SYNOPSIS
# mkrun
# mkrun [ options ] command-type targets ...
#
# debug=on mkrun ...
#
# OPTIONS
# -h Print this message and exit.
#
# -m MODULENAME Use MODULENAME as module-name for each target.
# -d DEPLOYPATH Use DEPLOYPATH as deploy-path for each target.
#
# AUTHORS
# neiku project <ku7d@qq.com>
#
# SEE ALSO
# mkxrc_modules
# mkxrc_targets
# mkxrc_commands
# mkxrc_targetregs
# mkxrc_commandtpls
#
# VERSION
# 2015/11/21: 支持每个target指定类型的命令(start/stop/restart)
# 2015/11/25: 支持可配置登录方式(目前只支持rsa)
# 支持pre-deploy/post-deploy类型命令
# 支持命令别名(alias)
# 2015/11/28: 支持使用mkm查找target(project/global/system级别)
# 支持使用mkm查找module(project/global/system级别)
# 支持使用mkm查找command(project/global/system级别)
# 2015/12/03: 支持本地部署模块(local module)
# 2015/12/05: 支持基于密码登录的远程部署模块(passwd module)
# 2015/12/11: 支持自定义命令类型
# 2015/12/12: 支持命令模板(可配置不同模块的不同命令模板)
# 2015/12/23: 支持模式匹配target
# 2016/03/18: 支持通过环境变量与用户定义命令(模板)通信
# (废弃基于sed的变量替换命令生成机制)
# 2016/04/23: 支持命令行指定module-name(-m)、deploy-path(-d)
# (方便临时切换部署环境)
# MKX_MODULE 改名为 MKX_MODULENAME (模块名字)
# MKX_HOSTPATH 改名为 MKX_DEPLOYPATH (部署路径)
# 2016/05/01: 支持透传-m/-d选项给mkrun(处理alias命令)
# 支持自定义module(module type = custom)
# (custom-runner 配置自定义命令执行工具)
# 2016/06/18: 支持su module(module type = su)
# 2017/01/16: 支持跨平台
# - 通过env程序+PATH变量动态查询bash,控制权
# 、准确性都得到保障
#
##################################################################
# target(*) <-----> module(1) <-----> destination(1)
# target(1) <-----> command(*)
# help
function help()
{
echo "Usage: mkrun [ options ] command-type targets ..."
echo "Options:"
echo " -h Print this message and exit."
echo ""
echo " -m MODULENAME Use MODULENAME as module-name for each target."
echo " -d DEPLOYPATH Use DEPLOYPATH as deploy-path for each target."
echo ""
echo "Report bugs to <ku7d@qq.com>"
}
# mkrun [ options ] command-type targets ...
cmdline_modulename=""
cmdline_deploypath=""
# parse cmdline
cmdline="$@"
mklog debug "origin-args:[$@]"
temp=$(getopt -o "hm:d:" --long "" -n "mkrun" -- "$@")
if [ $? != 0 ] ; then
echo "`help`" >&2
exit 1
fi
eval set -- "$temp"
mklog debug "parsed-args:[$temp]"
while true
do
case "$1" in
-h) echo "`help`" >&2; exit 0;;
-m) cmdline_modulename="$2"; shift 2;;
-d) cmdline_deploypath="$2"; shift 2;;
--) shift ; break ;;
*) echo "parse options error!" >&2 ; exit 1 ;;
esac
done
mklog debug "cmdline_modulename:[$cmdline_modulename], cmdline_deploypath:[$cmdline_deploypath]"
if [ $# -lt 2 ] ; then
echo "`help`" >&2;
exit 1
fi
# get command-type
cmdtype="$1"
shift 1
# run target's command with type
for target in $*
do
# .so target is specail
if expr match "$target" ".*\.so$" >/dev/null 2>&1 ; then
if ! expr match "$target" "^lib" >/dev/null 2>&1 ; then
target="lib$target"
fi
fi
# .a target is specail
if expr match "$target" ".*\.a$" >/dev/null 2>&1 ; then
if ! expr match "$target" "^lib" >/dev/null 2>&1 ; then
target="lib$target"
fi
fi
# find target's module
# or use user-defined module-name/deploy-path in cmdline
modulename="$cmdline_modulename";
deploypath="$cmdline_deploypath";
if [ -z "$modulename" -o -z "$deploypath" ] ; then
# find modulename or deploypath by target
eval `mkm find target $target \
| awk '{printf "modulename=%s; deploypath=%s;" \
, $1, $2}'`
if [ -z "$modulename" -o -z "$deploypath" ] ; then
eval `mkm find targetreg $target \
| awk '{printf "modulename=%s; deploypath=%s;" \
, $1, $2}'`
if [ -z "$modulename" -o -z "$deploypath" ] ; then
mklog error "module not found, target:[$target]"
continue
fi
mklog debug "module found in targetreg, target:[$target]"
fi
# use user-defined module name (from cmdline)
if [ -n "$cmdline_modulename" ] ; then
modulename="$cmdline_modulename"
fi
# use user-defined deploy path (from cmdline)
if [ -n "$cmdline_deploypath" ] ; then
deploypath="$cmdline_deploypath"
fi
fi
mklog debug "module:[$modulename], deploypath:[$deploypath], target:[$target]"
# load target's command
command=""
eval `mkm find command $target $cmdtype | while read target type command
do
echo "command='$command';"
done` >/dev/null 2>&1
if [ $? -ne 0 ] ; then
mklog error "load '$cmdtype' command fail, target:[$target]"
continue
fi
mklog debug "target:[$target], cmdtype:[$cmdtype], command:[$command]"
if [ -z "$command" ] ; then
# no command yet, try command tpl
cmdtpl=""
eval `mkm find commandtpl $modulename $cmdtype | while read moduledump typedump commandtpl
do
echo "cmdtpl='$commandtpl';"
done` >/dev/null 2>&1
mklog debug "module:[$modulename], target:[$target], cmdtype:[$cmdtype], cmdtpl:[$cmdtpl]"
if [ -z "$cmdtpl" ] ; then
mklog error "'$cmdtype' command and default '$cmdtype' command tpl not found," \
"module:[$modulename], target:[$target], cmdtype:[$cmdtype]"
continue
fi
mklog debug "module:[$modulename], target:[$target], command tpl:[$cmdtpl]"
command="$cmdtpl"
fi
# maybe command alias
if [ "${command:0:1}" = "@" ] ; then
mklog debug "$target's '$cmdtype' command is alias '${command:1}' command"
mkrun -m"$modulename" -d"$deploypath" "${command:1}" $target
continue
fi
# load module's info
mtype=""; username=""; rsapkey=""; hostname=""; hostport="";
eval `mkm find module $modulename \
| awk '{printf "mtype=%s; username=%s; rsapkey=%s; hostname=%s; hostport=%s;" \
, $2, $3, $4, $5, $6}'`
mklog debug "target:[$target], module:[$modulename], mtype:[$mtype]," \
"username:[$username], rsa-private:[$rsapkey]," \
"hostname:[$hostname], hostport:[$hostport]"
if [ "$mtype" != "rsa" \
-a "$mtype" != "local" \
-a "$mtype" != "passwd" \
-a "$mtype" != "custom" \
-a "$mtype" != "su" ] ; then
mklog error "module type unsupported, type:[$mtype]," \
"supported type:[rsa, local, passwd, custom, su], target:[$target]"
continue
fi
# export env before running command
exportcmd="export"
exportcmd="$exportcmd MKX_MODULENAME='$modulename'"
exportcmd="$exportcmd MKX_TARGET='$target'"
exportcmd="$exportcmd MKX_MTYPE='$mtype'"
exportcmd="$exportcmd MKX_CMDTYPE='$cmdtype'"
exportcmd="$exportcmd MKX_DEPLOYPATH='$deploypath'"
# run command
mklog normal "run '$cmdtype' command for '$target' with command:[$command]"
case "$mtype" in
rsa)
if [ -z "$username" -o -z "$rsapkey" \
-o -z "$hostname" -o -z "$hostport" ]
then
mklog error "rsa module not ok, target:[$target], module:[$modulename]"
continue
fi
ssh -i $rsapkey -p $hostport $username@$hostname "$exportcmd; $command"
;;
local)
eval "$exportcmd; $command"
;;
passwd)
if [ -z "$username" -o -z "$rsapkey" \
-o -z "$hostname" -o -z "$hostport" ]
then
mklog error "passwd module not ok, target:[$target], module:[$modulename]"
exit 1
fi
mkssh $username $rsapkey $hostname $hostport "$exportcmd; $command"
;;
custom)
# custom runner
customrunner="`mkm get config custom-runner`"
if [ -z "$customrunner" ] ; then
mklog error "custom runner not found, target:[$target], module:[$modulename]"
continue
fi
mklog debug "custom-runner:[$customrunner]"
# run runner
export MKX_CUSTOM_TARGETPATH="$makedir/$target" \
MKX_CUSTOM_TARGETNAME="$target" \
MKX_CUSTOM_DEPLOYPATH="$deploypath/" \
MKX_CUSTOM_CONFIG1="$modulename" \
MKX_CUSTOM_CONFIG2="$mtype" \
MKX_CUSTOM_CONFIG3="$username" \
MKX_CUSTOM_CONFIG4="$rsapkey" \
MKX_CUSTOM_CONFIG5="$hostname" \
MKX_CUSTOM_CONFIG6="$hostport" \
MKX_CUSTOM_COMMAND="$exportcmd; $command"
eval "$customrunner"
unset MKX_CUSTOM_TARGETPATH \
MKX_CUSTOM_TARGETNAME \
MKX_CUSTOM_DEPLOYPATH \
MKX_CUSTOM_CONFIG1 \
MKX_CUSTOM_CONFIG2 \
MKX_CUSTOM_CONFIG3 \
MKX_CUSTOM_CONFIG4 \
MKX_CUSTOM_CONFIG5 \
MKX_CUSTOM_CONFIG6 \
MKX_CUSTOM_COMMAND
;;
su)
MKX_MKSU_PASSWORD="$rsapkey" mksu $username "" "$exportcmd; $command"
;;
*)
mklog error "module type unsupported, type:[$mtype]," \
"supported type:[rsa, local, passwd, custom, su]"
exit 1
;;
esac
done
| true
|
5157791ce75a55bb8e6a67a7c5e23c5f60f56421
|
Shell
|
saiprakash774/ConditionalStmtsExsinShell
|
/arthop.sh
|
UTF-8
| 591
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash -x
echo "enter three numbers a,b,c is to perform arthimatic operations :"
read -p "enter value of a: " a
read -p "enter value of b: " b
read -p "enter value of c: " c
r1=$(( $a+$b*$c ));
r2=$(( $a%$b+$c ));
r3=$(( $c+$a/$b ));
r4=$(( $a*$b+$c ));
if [ $r1 -gt $r2 ];
then
if [ $r1 -gt $r3 ];
then
if [ $r1 -gt $r4 ];
then echo $r1 "is maximun"
else echo $r4 "is maximum"
fi
elif [ $r3 -gt $r4 ];
then echo $r3 "is maximum"
else echo $r4 "is maximum"
fi
elif [ $r2 -gt $r3 ];
then if [ $r2 -gt $r4 ];
then echo $r2 "is maximum"
else echo $r4 "is maximum"
fi
fi
| true
|
28765aa7b52020489feda30470d092f735d9000c
|
Shell
|
fishi0x01/vsh
|
/test/util/common.bash
|
UTF-8
| 983
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
export VAULT_VERSION=${VAULT_VERSION:-"1.9.2"}
export VAULT_CONTAINER_NAME="vsh-integration-test-vault"
export VAULT_HOST_PORT=${VAULT_HOST_PORT:-"8888"}
export VAULT_TOKEN="root"
export VAULT_ADDR="http://localhost:${VAULT_HOST_PORT}"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export DIR
UNAME=$(uname | tr '[:upper:]' '[:lower:]')
case "$(uname -m)" in
x86_64)
ARCH=amd64 ;;
arm64|aarch64|armv8b|armv8l)
ARCH=arm64 ;;
arm*)
ARCH=arm ;;
i386|i686)
ARCH=386 ;;
*)
ARCH=$(uname -m) ;;
esac
export ARCH
export APP_BIN="${DIR}/../../build/vsh_${UNAME}_${ARCH}"
export NO_VALUE_FOUND="No value found at"
teardown() {
docker rm -f ${VAULT_CONTAINER_NAME} &> /dev/null
}
vault_exec() {
vault_exec_output "$@" &> /dev/null
}
vault_exec_output() {
docker exec ${VAULT_CONTAINER_NAME} /bin/sh -c "$1"
}
get_vault_value() {
vault_exec_output "vault kv get -field=\"${1}\" \"${2}\" || true"
}
| true
|
c4f7726bca883d84305064bd21319f8e2cd3a2cd
|
Shell
|
Zaier9/Curso-de-Bash
|
/21_archivoDirectorio.sh
|
UTF-8
| 373
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
# ! /bin/bash
# Programa para ejemplificar la creacion de archivos y directorios
# Autor: Zaier | veraalber989@gmail.com
echo "Archivos - Directorios"
if [ $1 = "d" ]; then
mkdir -m 755 $2
echo "Directorio creado correctamente"
ls -la $2
elif [ $1 = "f" ]; then
touch $2
echo "Archivo creado correctamente"
ls -la $2
else
echo "No existe esa opcion: $1"
fi
| true
|
c26e7efa4245393b57d9625a36bcc6a9bd015a23
|
Shell
|
karunmatharu/Android-4.4-Pay-by-Data
|
/external/chromium_org/android_webview/build/install_binary
|
UTF-8
| 672
| 3.84375
| 4
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash -x
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copies a possibly stripped binary and a symbol file to installation dirs.
if [ "$3" = "" ]
then
echo "Usage: install_binary path/to/binary path/to/target1 path/to/target2 path/to/symbols path/to/symbols2"
exit 1
fi
SOURCE=$1
TARGET=$2
TARGET2=$3
SYMBOLS=$4
SYMBOLS2=$5
mkdir -p $(dirname $SYMBOLS)
cp $SOURCE $SYMBOLS
# Create a hard link to avoid the additional copy to the secondary location.
ln $SYMBOLS $SYMBOLS2
$STRIP --strip-unneeded $SOURCE -o $TARGET
cp $TARGET $TARGET2
| true
|
81c5855b66695d25d577c72476d65af224dc4ea6
|
Shell
|
xaliq2299/UFAZ-BSc
|
/L0/S1/bash/cs_hw_5/ex2
|
UTF-8
| 126
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
counter=0
until [ $counter -gt 20 ]
do
echo "The counter is $counter"
let counter=$counter+1
done
| true
|
917c5a7b090a1c9df993c5a3cc44734fad307e31
|
Shell
|
trowind/my_config
|
/common_conf.sh
|
UTF-8
| 946
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
. ${JYLEE_CONF_DIR}/.git-prompt.sh
green=$(tput setaf 2)
yellow=$(tput setaf 3)
cyan=$(tput setaf 6)
bold=$(tput bold)
red=$(tput setaf 1)
reset=$(tput sgr0)
PS1='\[$cyan\]\D{%Y%m%d %H:%M:%S} \[$green\]\u@\h\[$reset\]\w\[$yellow\]$(__git_ps1)\[$reset\]\n\$ '
## aliass
alias lh='ls -lah'
alias ll='ls -la'
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../../"
alias g="git"
alias ga="git add"
alias gap="git add -p"
alias gc="git commit"
alias gs="git status"
alias go="git checkout"
alias gsl='git status | less -REX'
alias gd="git diff --color"
alias gl="git log --graph --decorate --oneline --color --all"
alias gls="git log --graph --decorate --color --name-status"
alias gcred="git config --global credential.helper cache"
alias tmuxjy_new="tmuxjy new -s jooyoung"
HISTFILE="$HOME/.jylee_history"
export HISTTIMEFORMAT="%F %T "
export HISTCONTROL=ignoredups
export HISTIGNORE="pwd:ls:cd:lh"
export HISTSIZE=5000
| true
|
dc39f6c12b1ccf8d166a39311f91bdb382cab5af
|
Shell
|
brettcs/dotfiles
|
/start
|
UTF-8
| 2,008
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
set -u
xdg_dirs() {
local dirs home
eval "dirs=\${XDG_$1_DIRS:-}; home=\${XDG_$1_HOME:-}"
if [ -n "${dirs:-}" ]; then
echo "$dirs"
elif [ -n "${home:-}" ]; then
echo "$home"
else
echo "$HOME/$2"
fi
}
HERE="$(dirname "$(readlink -e "$0")")"
SOCKET_DIR="${XDG_RUNTIME_DIR:-/tmp}/Bretts"
EMACS_SOCKET_NAME="$SOCKET_DIR/Emacs"
export PATH="$PATH:$HERE/.local/bin"
export SCREENDIR="$SOCKET_DIR/screen"
export TMUX_TMPDIR="$SOCKET_DIR"
export XDG_CONFIG_DIRS="$HERE/.config:$(xdg_dirs CONFIG .config)"
export XDG_CONFIG_HOME="$HERE/.config"
export XDG_DATA_DIRS="$HERE/data:$(xdg_dirs DATA .local/share)"
export XDG_DATA_HOME="$HERE/data"
export XDG_STATE_DIRS="$HERE/state:$(xdg_dirs STATE .local/state)"
export XDG_STATE_HOME="$HERE/state"
export ZDOTDIR="$XDG_CONFIG_HOME/bcsh"
unset DISPLAY WAYLAND_DISPLAY
if ! SHELL="$(which zsh 2>/dev/null)"; then
SHELL="$HERE/bcsbash"
fi
export SHELL
mkdir -p --mode=0700 "$SCREENDIR" "$XDG_DATA_HOME" "$XDG_STATE_HOME"
umask 077
if command -v emacsclient >/dev/null; then
emacs_eval() {
if [ $# = 0 ]; then
set -- t
fi
emacsclient --socket-name="$EMACS_SOCKET_NAME" --alternate-editor=false \
--eval "$@" >/dev/null 2>&1
}
emacs_load=true
if emacs_eval; then
emacs_load=false
elif [ -e "/var/lib/systemd/linger/$USER" ]; then
systemd-run --user \
--unit="brett-emacs-$(date +%Y-%m-%dT%H:%M)" \
emacs --fg-daemon="$EMACS_SOCKET_NAME" --no-init-file
else
emacs --daemon="$EMACS_SOCKET_NAME" --no-init-file
fi
if $emacs_load; then
while ! emacs_eval; do
sleep .1
done
nohup emacsclient --socket-name="$EMACS_SOCKET_NAME" \
--eval "(load-file \"$XDG_CONFIG_HOME/emacs/init.el\")" \
>>"$SOCKET_DIR/Emacs.log" 2>&1 &
fi
export EMACS_SOCKET_NAME
fi
exec "$HERE/.local/bin/reattach" -C "$HERE"
| true
|
04c04be144548ca13978142827231ef8f481fc78
|
Shell
|
wilderfield/inference_results_v0.7
|
/closed/Lenovo/code/rnnt/tensorrt/download_data.sh
|
UTF-8
| 1,281
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source code/common/file_downloads.sh
DATA_DIR=${DATA_DIR:-build/data}
if [[ -e ${DATA_DIR}/LibriSpeech/dev-clean/1272/128104/1272-128104-0000.flac ]]
then
echo "Dataset for RNN-T already exists!"
else
download_file data LibriSpeech http://www.openslr.org/resources/12/dev-clean.tar.gz dev-clean.tar.gz \
&& tar -xzf ${MLPERF_SCRATCH_PATH}/data/LibriSpeech/dev-clean.tar.gz -C ${MLPERF_SCRATCH_PATH}/data
download_file data LibriSpeech http://www.openslr.org/resources/12/train-clean-100.tar.gz train-clean-100.tar.gz \
&& tar -xzf ${MLPERF_SCRATCH_PATH}/data/LibriSpeech/train-clean-100.tar.gz -C ${MLPERF_SCRATCH_PATH}/data
fi
| true
|
a043a9eeab7b4838414789b34c27cce362f9eaff
|
Shell
|
sch3m4/net2ban
|
/net2ban_init.d
|
UTF-8
| 1,188
| 3.65625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#! /bin/bash
#
# net2ban Start/stop net2ban service
#
### BEGIN INIT INFO
# Provides: net2ban
# Required-Start: $remote_fs $network $named $time $rabbitmq-server
# Required-Stop: $remote_fs $network $named $time $rabbitmq-server
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts net2ban service
# Description: Starts net2ban, a service to remotely
# deploy firewall rules
### END INIT INFO
NAME='net2ban'
DAEMON="/usr/bin/python"
DIR="/usr/share/$NAME"
ACTIONS="$DIR/actions"
ARGS="$DIR/$NAME.py"
PIDFILE="/var/run/$NAME.pid"
USER="$NAME"
GROUP="$NAME"
case "$1" in
start)
[ -d $ACTIONS ] || mkdir -p $ACTIONS
echo "Starting $NAME"
/sbin/start-stop-daemon --start --pidfile $PIDFILE --user $USER --group $GROUP -b --make-pidfile --chuid $USER --chdir $DIR --exec $DAEMON $ARGS
;;
stop)
echo "Stopping $NAME"
/sbin/start-stop-daemon --stop --pidfile $PIDFILE --verbose
rm "$PIDFILE" 2>/dev/null
;;
status)
if [ -f $PIDFILE ]
then
echo "$NAME status: Running"
else
echo "$NAME status: Stopped"
fi
;;
*)
echo "Usage: /etc/init.d/$NAME {start|stop|status}"
exit 1
;;
esac
exit 0
| true
|
b388f41eb56d72d7d9b2feb90fd07c434de9d69f
|
Shell
|
Brmm/dotfiles
|
/bootstrap.sh
|
UTF-8
| 2,218
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
set -v # echo commands on
set -e # quit on error
sudo -v
## Keep `sudo -v` alive
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
## Install binaries and applications with brew
chmod +x ./brew.sh
./brew.sh
# Helm
# helm plugin install https://github.com/futuresimple/helm-secrets
# compile osmctools
cat files/osmctools/osmconvert.c | cc -x c - -lz -O3 -o osmconvert
mv osmconvert /usr/local/bin/osmconvert
cat files/osmctools/osmfilter.c | cc -x c - -O3 -o osmfilter
mv osmfilter /usr/local/bin/osmfilter
## Copy dotfiles
cp ./home/.??* $HOME
## Run macos script
chmod +x ./macos.sh
./macos.sh
## Install node.js v10
curl -o- https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
nvm install 10
nvm use 10
## NPM
npm install -g \
# create-react-native-app \
eslint \
lerna \
npm-check-updates \
prettier \
typescript \
yarn
## Copy iTerm2 settings
# defaults write com.googlecode.iterm2 HotkeyTermAnimationDuration -float 0.00001
cp "./files/com.googlecode.iterm2.plist" ~/Library/Preferences
## Copy f.lux settings
cp "./files/org.herf.Flux.plist" ~/Library/Preferences
## Configure the Dock
dockutil --remove all
dockutil --add "/System/Applications/Mail.app"
dockutil --add "/Applications/iTerm.app" --after "Mail"
dockutil --add "/Applications/Visual Studio Code.app"
dockutil --add "/Applications/Google Chrome.app"
dockutil --add "/Applications/TextMate.app"
dockutil --add "/System/Applications/Messages.app"
dockutil --add "/Applications/WhatsApp.app"
dockutil --add "/Applications/FireFox.app"
dockutil --add "/Applications/Robo 3T.app"
dockutil --add "/Applications/Safari.app"
dockutil --add "/System/Applications/Calendar.app"
dockutil --add "/System/Applications/Notes.app"
open "/Applications/Clipy.app"
open "/Applications/Flux.app"
open "/Applications/Tunnelblick.app"
open "/Applications/iTerm.app"
open "/Applications/Dropbox.app"
open "/Applications/Docker.app"
# set default shell
chsh -s /usr/local/bin/bash
| true
|
4fd89ff16c11b3f6fe779197406d3f56808d56e7
|
Shell
|
HPI-Information-Systems/dynfd
|
/benchmark.sh
|
UTF-8
| 553
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
filename = "benchmark.adultfull.csv"
readarray test_versions < ./benchmarks.txt
batch_sizes=(1 10 100 1000 10000 100000)
tests_per_version_and_batch=5
for configuration in "${test_versions[@]}"; do
for run in $(seq 1 $tests_per_version_and_batch); do
for i in ${batch_sizes[@]}; do
echo "Running test for configuration $configuration, run $run and batchsize $i"
java -jar benchmarks/target/benchmarks-1.0-SNAPSHOT-jar-with-dependencies.jar $configuration --batchSize $i
done
done
done
exit 0
| true
|
f7a7ee0ca41cf71046414813579fd6d0f2c0122f
|
Shell
|
raycast/script-commands
|
/commands/developer-utils/unix-timestamp.sh
|
UTF-8
| 419
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title Copy Current Epoch Unix Timestamp
# @raycast.mode silent
# Optional parameters:
# @raycast.author Caleb Stauffer
# @raycast.authorURL https://github.com/crstauf
# @raycast.description Copy the current epoch Unix timestamp.
# @raycast.packageName Developer Utils
# @raycast.icon ⏱️
echo -n $(date +"%s") | pbcopy
echo "Unix timestamp copied"
| true
|
a7ac65964831f78cf03f5e08f496ca56f2c1b2a4
|
Shell
|
disputed1/Hammer-CSV-report-
|
/mailer.sh
|
UTF-8
| 527
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#### Variables ####
#email="email"
report=/path/satellite_hosts_$(date +%Y%m%d)*
lreport=/path/satellite_hosts_$(date +%Y%m%d)*
#### Puller ####
if [ ! -f $lreport ];
then
scp -P 1234 hostname:$report /report_location/
else
:
fi
### Email the reports
echo "This report is automatically generated each day" | mutt -x -s "Production Satellite Daily Report -- $(date +%Y%m%d)" $email -a $lreport
### Clean up old reports (remove older than 30 days)
find /reportlocation/ -type f -mtime +1 -exec rm {} \;
| true
|
4100fbd0bd1315e3262bb82b70fd098a89f376fe
|
Shell
|
daniel-1964/msys2
|
/custom-packages/stdman/PKGBUILD
|
UTF-8
| 14,835
| 3.28125
| 3
|
[] |
no_license
|
# Maintainer: Aleksandar Trifunović <akstrfn at gmail dot com>
# Contributor: Marcel Krüger <zauguin at gmail dot com>
pkgbase=stdman
pkgname=${pkgbase}
pkgver=2018.03.11
pkgrel=1
pkgdesc="Formatted C++11/14 stdlib man pages (cppreference)."
arch=(any)
url="https://github.com/jeaye/stdman"
license=('MIT')
source=("${pkgbase}-${pkgver}.tar.gz::$url/archive/$pkgver.tar.gz")
sha256sums=('d29e6b34cb5ba9905360cee6adcdf8c49e7f11272521bf2e10b42917486840e8')
noextract=("${pkgbase}-${pkgver}.tar.gz")
# Link a file with his depedencies
old_link_file() {
orig="$1"
file="$2"
shift 2
if [ -e "${file}" ]
then
echo File ${file} cannot be created
file="${orig}"
else
mv -- "$orig" "$file"
fi
for link in $*
do
if [ -e "${link}" ]
then
echo File ${link} cannot be created
else
ln -s -- "${file}" "${link}"
fi
done
}
# Link a file with his depedencies
link_file() {
orig="$1"
file="$2"
shift 2
if [ "${orig}" != "${file}" ]
then
if [ -e "${file}" ]
then
echo File ${file} cannot be created
file="${orig}"
else
mv -- "$orig" "$file"
orig=${orig/[/\\[}
sed -i "1s#${orig/.3/}#${file/.3/}#g" ${file}
fi
fi
names="${file/.3/}"
desc="${file/.3/} \\\\- ${file/.3/}"
for link in $*
do
names="${names},${link/.3/}"
desc="${desc}\\n.br\\n${link/.3/} \\\\- ${link/.3/}"
done
#sed -i "1s#${orig/.3/}#${names}#g" ${file}
sed -i "3d" ${file}
sed -i "3i\\\\${desc}" ${file}
}
# fix some misformed files
fix_files() {
link_file "std::atomic::compare_exchange_weak,std::atomic::compare_exchange_strong.3" \
"std::atomic::compare_exchange_weak.3" \
"std::atomic::compare_exchange_strong.3" \
"std::atomic::compare_exchange_weak_explicit.3" \
"std::atomic::compare_exchange_strong_explicit.3"
link_file "std::atomic_compare_exchange_weak,std::atomic_compare_exchange_strong,.3" \
"std::atomic_compare_exchange_weak.3" \
"std::atomic_compare_exchange_strong.3" \
"std::atomic_compare_exchange_weak_explicit.3" \
"std::atomic_compare_exchange_strong_explicit.3"
link_file "std::equal_to,not_equal_to,less,greater,less_equal,.3" \
"std::equal_to<std::experimental::propagate_const>.3" \
"std::not_equal_to<std::experimental::propagate_const>.3" \
"std::less<std::experimental::propagate_const>.3" \
"std::greater<std::experimental::propagate_const>.3" \
"std::less_equal<std::experimental::propagate_const>.3" \
"std::greater_equal<std::experimental::propagate_const>.3"
link_file "std::experimental::basic_string_view::rbegin,.3" \
"std::experimental::basic_string_view::rbegin.3" \
"std::experimental::basic_string_view::crbegin.3"
link_file "std::experimental::basic_string_view::to_string,.3" \
"std::experimental::basic_string_view::to_string.3" \
"std::experimental::basic_string_view::basic_string.3"
link_file "std::experimental::boyer_moore_horspool_searcher,.3" \
"std::experimental::boyer_moore_horspool_searcher.3" \
"std::experimental::make_boyer_moore_horspool_searcher.3"
link_file "std::experimental::filesystem::begin(directory_iterator),.3" \
"std::experimental::filesystem::begin(directory_iterator).3" \
"std::experimental::filesystem::end(directory_iterator).3"
link_file "std::experimental::filesystem::begin(recursive_directory_iterator),.3" \
"std::experimental::filesystem::begin(recursive_directory_iterator).3" \
"std::experimental::filesystem::end(recursive_directory_iterator).3"
link_file "std::experimental::filesystem::create_directory,.3" \
"std::experimental::filesystem::create_directory.3" \
"std::experimental::filesystem::create_directories.3"
link_file "std::experimental::filesystem::create_symlink,.3" \
"std::experimental::filesystem::create_symlink.3" \
"std::experimental::filesystem::create_directory_symlink.3"
link_file "std::experimental::filesystem::directory_entry::status,.3" \
"std::experimental::filesystem::directory_entry::status.3" \
"std::experimental::filesystem::directory_entry::symlink_status.3"
link_file "std::experimental::filesystem::path::append,.3" \
"std::experimental::filesystem::path::append.3" \
"std::experimental::filesystem::path::operators=.3"
link_file "std::experimental::filesystem::path::c_str,std::experimental::filesystem::path::native,.3" \
"std::experimental::filesystem::path::c_str.3" \
"std::experimental::filesystem::path::native.3" \
"std::experimental::filesystem::path::operatorstring_type.3"
link_file "std::experimental::filesystem::path::concat,.3" \
"std::experimental::filesystem::path::concat.3" \
"std::experimental::filesystem::path::operator+=.3"
link_file "std::experimental::filesystem::path::string,wstring,u8string,....3" \
"std::experimental::filesystem::path::string.3" \
"std::experimental::filesystem::path::wstring.3" \
"std::experimental::filesystem::path::u8string.3" \
"std::experimental::filesystem::path::u16string.3" \
"std::experimental::filesystem::path::u32string.3"
link_file "std::experimental::filesystem::recursive_directory_iterator::operator*,.3" \
"std::experimental::filesystem::recursive_directory_iterator::operator*.3" \
"std::experimental::filesystem::recursive_directory_iterator::operator->.3"
link_file "std::experimental::filesystem::recursive_directory_iterator::operator++,increment.3" \
"std::experimental::filesystem::recursive_directory_iterator::operator++.3" \
"std::experimental::filesystem::recursive_directory_iterator::increment.3"
link_file "std::experimental::is_detected,std::experimental::detected_t,.3" \
"std::experimental::is_detected.3" \
"std::experimental::detected_t.3" \
"std::experimental::detected_or.3"
link_file "std::experimental::parallel::seq,std::experimental::parallel::par,.3" \
"std::experimental::parallel::seq.3" \
"std::experimental::parallel::par.3" \
"std::experimental::parallel::par_vec.3"
link_file "std::experimental::parallel::sequential_execution_policy,.3" \
"std::experimental::parallel::sequential_execution_policy.3" \
"std::experimental::parallel::parallel_execution_policy.3" \
"std::experimental::parallel::parallel_vector_execution_policy.3"
link_file "std::experimental::propagate_const::operator*,.3" \
"std::experimental::propagate_const::operator*.3" \
"std::experimental::propagate_const::operator->.3"
link_file "std::experimental::propagate_const::operatorelement_type*,.3" \
"std::experimental::propagate_const::operatorelement_type*.3" \
"std::experimental::propagate_const::operatorconstelement_type*.3"
link_file "std::hash(std::string,std::wstring,std::u16string,std::u32string).3" \
"std::hash(std::string).3" \
"std::hash(std::wstring).3" \
"std::hash(std::u16string).3" \
"std::hash(std::u32string).3"
link_file "std::hash(std::experimental::string_view,std::experimental::wstring_view,.3" \
"std::hash(std::experimental::string_view).3" \
"std::hash(std::experimental::wstring_view).3" \
"std::hash(std::experimental::u16string_view).3" \
"std::hash(std::experimental::u32string_view).3"
link_file "std::input_iterator_tag,std::output_iterator_tag,std::forward_iterator_tag,.3" \
"std::input_iterator_tag.3" \
"std::output_iterator_tag.3" \
"std::forward_iterator_tag.3" \
"std::bidirectional_iterator_tag.3" \
"std::random_access_iterator_tag.3"
link_file "std::is_copy_assignable,std::is_trivially_copy_assignable,.3" \
"std::is_copy_assignable.3" \
"std::is_trivially_copy_assignable.3" \
"std::is_nothrow_copy_assignable.3"
link_file "std::is_copy_constructible,std::is_trivially_copy_constructible,.3" \
"std::is_copy_constructible.3" \
"std::is_trivially_copy_constructible.3" \
"std::is_nothrow_copy_constructible.3"
link_file "std::is_default_constructible,std::is_trivially_default_constructible,.3" \
"std::is_default_constructible.3" \
"std::is_trivially_default_constructible.3" \
"std::is_nothrow_default_constructible.3"
link_file "std::is_move_assignable,std::is_trivially_move_assignable,.3" \
"std::is_move_assignable.3" \
"std::is_trivially_move_assignable.3" \
"std::is_nothrow_move_assignable.3"
link_file "std::is_move_constructible,std::is_trivially_move_constructible,.3" \
"std::is_move_constructible.3" \
"std::is_trivially_move_constructible.3" \
"std::is_nothrow_move_constructible.3"
link_file "std::mem_fun_ref_t,std::mem_fun1_ref_t,std::const_mem_fun_ref_t,.3" \
"std::mem_fun_ref_t.3" \
"std::mem_fun1_ref_t.3" \
"std::const_mem_fun_ref_t.3" \
"std::const_mem_fun1_ref_t.3"
link_file "std::operator==,!=,<(std::error_code).3" \
"std::operator==(std::error_code).3" \
"std::operator!=(std::error_code).3" \
"std::operator<(std::error_code).3"
link_file "std::remove,std::remove_if.3" \
"std::remove_if.3" \
"std::remove.3"
}
do_link() {
declare -a typedefs=('basic_string'\
'basic_istringstream' 'basic_ostringstream' 'basic_stringstream'\
'basic_istream' 'basic_ostream' 'basic_iostream'\
'basic_ifstream' 'basic_ofstream' 'basic_fstream'\
'basic_stringbuf' 'basic_streambuf' 'basic_ios'\
'basic_filebuf' 'basic_regex'\
'basic_string_view'
)
declare -a alias_basic_string=('string' 'wstring' 'u16string' 'u32string')
declare -a alias_basic_stringstream=('stringstream' 'wstringstream')
declare -a alias_basic_istringstream=('istringstream' 'wistringstream')
declare -a alias_basic_ostringstream=('ostringstream' 'wostringstream')
declare -a alias_basic_istream=('istream' 'wistream')
declare -a alias_basic_ostream=('ostream' 'wostream')
declare -a alias_basic_iostream=('iostream' 'wiostream')
declare -a alias_basic_ifstream=('ifstream' 'wifstream')
declare -a alias_basic_ofstream=('ofstream' 'wofstream')
declare -a alias_basic_fstream=('fstream' 'wfstream')
declare -a alias_basic_stringbuf=('stringbuf' 'wstringbuf')
declare -a alias_basic_streambuf=('streambuf' 'wstreambuf')
declare -a alias_basic_ios=('ios' 'wios')
declare -a alias_basic_filebuf=('filebuf' 'wfilebuf')
declare -a alias_basic_regex=('regex' 'wregex')
declare -a alias_basic_string_view=('string_view' 'wstring_view' 'u16string_view' 'u32string_view')
for td in ${typedefs[@]}
do
# Link class pages (eg. std::string.3)
tmp=alias_${td}[@]
td1=${td}
[ "${td1}" = "basic_string_view" ] && td1="experimental::${td1}"
names=""
# Class file
file="std::${td1}.3"
if [ -f "${file}" ]
then
for al in ${!tmp}
do
out="${file/${td}/${al}}"
names="${names} ${out}"
done
link_file "${file}" "${file}" ${names}
fi
# Link member pages (eg. std::string::erase.3)
for file in std::${td1}::*.3
do
names=""
file1=${file}
if [ "${file1}" = "std::experimental::basic_string_view::to_string,.3" ]
then
file1="std::experimental::basic_string_view::to_string.3"
names="std::experimental::basic_string_view::basic_string.3"
fi
for al in ${!tmp}
do
out="${file1//$td/$al}"
names="${names} ${out}"
[ "${file1}" = "std::experimental::basic_string_view::to_string.3" ] &&
names="${names} std::experimental::${al}::${al/_view/}.3"
done
if [[ "${file1}" = *","* ]]
then
names="${file1#*,} ${names}"
file1="${file1%%,*}".3
fi
link_file "${file}" "${file1}" ${names//,/.3 }
done
done
}
rm_comas() {
for f in *,*
do
orig="${f}"
file="${f/.3/}"
list="${file#*,}"
file="${file%%,*}.3"
names=""
while [ -n "${list}" ]
do
item="${list%%,*}"
if [[ "${item}" != *"::"* ]]
then
if [[ ( "${file}" == *"operator"* ) && ( "${item}" != "operator"* ) ]]
then
prefix="${file%operator*}operator"
else
prefix="${file%::*}::"
fi
else
prefix=""
fi
link="${prefix}${item}.3"
names="${names} ${link}"
list="${list#*,}"
if [ "${item}" = "${list}" ]
then
break
fi
done
link_file "${orig}" "${file}" ${names}
done
}
prepare() {
cd ${srcdir}
tar -a -x -v -f "${pkgbase}-${pkgver}.tar.gz"
}
build() {
cd "${pkgname}-${pkgver}"
#./configure --prefix=/usr --mandir=/usr/share/man
[ -d ${srcdir}/build/man ] && rm -fr ${srcdir}/build/man
mkdir -p ${srcdir}/build/man
cp -a man/* ${srcdir}/build/man/
cd ${srcdir}/build/man
msg2 "Removing symlinks"
find . -type l -delete
msg2 "Fixing permissions"
chmod a-x *.3
msg2 "Fixing file names"
fix_files
msg2 "Linking manual pages"
do_link
msg2 "Removing comas"
rm_comas
# Since prior to 1.7, files could not ordinarly contain ':', man
# and perl were patched to substitute '.' for '::'. However,
# sometimes (e.g. gtk2-perl) man pages created with '::', and
# with 1.7 it works OOTB, but man is still patched and hence
# won't find these.
msg2 "Renaming *::* to *.* to suit Windows finalemes"
find . -name '*::*' -print0 | \
while read -d $'\0' man
do
mv "${man}" "${man//::/.}"
done
}
package() {
#cd "${pkgname}-${pkgver}"
#make DESTDIR="$pkgdir/" install
rm -fr ${pkgdir}/usr
mkdir -p ${pkgdir}/usr/share/man/man3
cp -a ${srcdir}/build/man/* ${pkgdir}/usr/share/man/man3/
install -Dpm644 ${srcdir}/${pkgbase}-${pkgver}/LICENSE -t "$pkgdir/usr/share/licenses/${pkgname}"
}
| true
|
80571c78a8ac3722a50c073aa61ffee0e0303d46
|
Shell
|
bogol243/HW1_DSBDA
|
/start_lab1.sh
|
UTF-8
| 676
| 2.71875
| 3
|
[] |
no_license
|
# clear the hdfs workspace
hdfs dfs -rm -r output
hdfs dfs -rm -r input
#clear the local workspace
[ -d output ] && rm -r output
[ -d input ] && rm -r input
mkdir input
#generate input files
NUM_LINES=100000
NUM_BROKEN=20
DATE_BEGIN="2020-10-10"
DATE_END="2020-10-15"
NUM_FILES=4
./get_data.py $NUM_LINES $NUM_FILES $NUM_BROKEN $DATE_BEGIN $DATE_END
#upload input data to hdfs
hdfs dfs -put input/
#start the yarn job
yarn jar target/lab1-1.0-SNAPSHOT-jar-with-dependencies.jar input output
#download the raw output date to local directory
hdfs dfs -get output
#unpack and download snappy-compressed sequence file
hadoop fs -text output/part-r-00000 > output/uncompressed_res
| true
|
742593ca8aaba0948a614593c1f1c0c9789b6d6a
|
Shell
|
JCavallo/dotfiles
|
/tools/backlight
|
UTF-8
| 874
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Only works for intel_backlight
# Requires:
# sudo usermod -a -G video $LOGNAME
#
# In /etc/udev/rules.d/45-backlight.rules
#
# ACTION=="add", SUBSYSTEM=="backlight" KERNEL=="intel_backlight", RUN+="/bin/chgrp video /sys/class/backlight/intel_backlight/brightness"
# ACTION=="add", SUBSYSTEM=="backlight" KERNEL=="intel_backlight", RUN+="/bin/chmod g+w /sys/class/backlight/intel_backlight/brightness"
max_brightness=$(cat /sys/class/backlight/intel_backlight/max_brightness)
cur_brightness=$(cat /sys/class/backlight/intel_backlight/brightness)
step=$((max_brightness / 20))
if [[ "$1" = "+" ]]; then
echo $((cur_brightness + $step)) > /sys/class/backlight/intel_backlight/brightness
elif [[ "$1" = "-" ]]; then
echo $((cur_brightness - $step)) > /sys/class/backlight/intel_backlight/brightness
else
echo "Usage: backlight [+/-]"
fi
| true
|
291fc16469d2bdc7fe2af4fc36bcdd16348bea3a
|
Shell
|
k4ml/booktype
|
/.openshift/action_hooks/post_deploy
|
UTF-8
| 991
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
# This deploy hook gets executed after dependencies are resolved and the
# build hook has been run but before the application has been started back
# up again. This script gets executed directly, so it could be python, php,
# ruby, etc.
# Activate VirtualEnv in order to use the correct libraries
source $OPENSHIFT_HOMEDIR/python-2.6/virtenv/bin/activate
#echo "Executing 'python $OPENSHIFT_REPO_DIR/wsgi/mybooktype/manage.py createsuperuser'"
#python $OPENSHIFT_REPO_DIR/wsgi/mybooktype/manage.py createsuperuser
echo "Executing 'python $OPENSHIFT_REPO_DIR/wsgi/mybooktype/manage.py loaddata documentation_licenses'"
python $OPENSHIFT_REPO_DIR/wsgi/mybooktype/manage.py loaddata documentation_licenses
echo "Creating Data Dirs if they don't exist: mkdir -p $OPENSHIFT_DATA_DIR/books $OPENSHIFT_DATA_DIR/profile_images $OPENSHIFT_DATA_DIR/cover_images"
mkdir -p $OPENSHIFT_DATA_DIR/books
mkdir -p $OPENSHIFT_DATA_DIR/cover_images
mkdir -p $OPENSHIFT_DATA_DIR/profile_images
| true
|
986266a6076e4bfb458f8a88586e71bd7608a752
|
Shell
|
vcatafesta/chili
|
/sh/sql.sh
|
UTF-8
| 2,413
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
sqlite3 test.db <<EOF
create table if not exists n (id INTEGER PRIMARY KEY,f TEXT,l TEXT);
insert into n (f,l) values ('john','smith');
select * from n;
EOF
sqlite3 -batch test.db "create table if not exists n (id INTEGER PRIMARY KEY,f TEXT,l TEXT);"
sqlite3 -batch test.db "insert into n (f,l) values ('john','smith');"
sqlite3 -batch test.db "select * from n;"
sqlite3 test.db "create table if not exists n (id INTEGER PRIMARY KEY,f TEXT,l TEXT);"
sqlite3 test.db "insert into n (f,l) values ('john','smith');"
sqlite3 test.db "select * from n";
sqlite3 mydatabase.sqlite "CREATE TABLE if not exists person ( id int, name varchar(30), phone varchar(30) );"
sqlite3 mydatabase.sqlite "INSERT INTO person VALUES (1, 'Jim', '123446223');\
INSERT INTO person VALUES (2, 'Tom', '232124303');\
INSERT INTO person VALUES (3, 'Bill', '812947283');\
INSERT INTO person VALUES (4, 'Alice', '351246233');"
sqlite3 mydatabase.sqlite "SELECT name from person where id=3;"
sqlite3 mydatabase.sqlite "SELECT name from person where id=$1;"
sqlite3 mydatabase.sqlite "SELECT id from person where name='$1';"
temp=`cat file_with_temperature_value`
echo "INSERT INTO readings (TStamp, reading) VALUES (datetime(), '$temp');" | sqlite3 mydb
temp=`cat file_with_temperature_value`
sqlite3 mydb "INSERT INTO readings (TStamp, reading) VALUES (datetime(), '$temp');"
urls="$(
sqlite3 /home/pi/.newsbeuter/cache.db \
'select url from rss_item where unread = 1 limit 5' \
)"
for url in $urls; do
sqlite3 /home/pi/.newsbeuter/cache.db \
"UPDATE rss_item set unread = 0 where url = '$url'"
done
result=$(sqlite3 /media/0CBA-1996/logfiles/SQLite3Database/myDB.db "SELECT energy FROM SmartMeter WHERE Timestamp= date('now') LIMIT 1")
echo $result
result=`sqlite3 /media/0CBA-1996/logfiles/SQLite3Database/myDB.db "SELECT energy FROM SmartMeter WHERE Timestamp= date('now') LIMIT 1" `
echo $result
sqlite3 script.db "insert into notes (note) values (\"Stuff happens.\"||\"$Str1\");"
sqlite3 script.db "insert into notes (note) values ('Stuff happens.$Str1');"
files=( $( cat list.txt ) )
for file in "${files[@]}"
do echo "Checking if item ${files[$count]} was already downloaded.."
exists=$(sqlite3 sync.db "select count(*) from rememberedFiles where filename="?${files[$count]}"")
if [ $exists > 0 ]
then
echo "It exists!"
else
echo "It doesn't exist!"
fi
((count++))
done
| true
|
4a9f2b0a003ecabe66d55ae383e3153a4af3bdf3
|
Shell
|
SaferSocietyGroup/suab
|
/client/clone.sh
|
UTF-8
| 517
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
## This script will checkout the git repository at $REPO_URL and put it in
## $CHECKOUT_DESTINATION. Note that $GIT_COMMIT is expected to passed in as
## an environment variable. This is achieved by the --env flag to suab. If
## omitted, it will checkout master by default
REPO_URL="https://github.com/SaferSocietyGroup/suab.git"
DEFAULT_REVISION="master"
CHECKOUT_DESTINATION="."
set -x
git clone \
--depth=1 \
--branch=${GIT_COMMIT:-$DEFAULT_REVISION} \
$REPO_URL \
$CHECKOUT_DESTINATION
| true
|
e35e706c3d1f4e1ca8bb0bfb8a17caaa65d23fdd
|
Shell
|
maiha/ansible-openstack
|
/files/redis/start
|
UTF-8
| 152
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eu
dir=$(dirname ${BASH_SOURCE:-$0})
_olddir=$(pwd)
cd $dir
mkdir -p v
cp -p redis.conf v/
docker compose up -d
cd $_olddir
| true
|
36e6fc9d0b2bd998e7a26f24da6600cc3aaedc5b
|
Shell
|
Dunaiskyi/Project
|
/backup/root/etc/nginx/ssl/acme/dnsapi/dns_desec.sh
|
UTF-8
| 5,411
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# deSEC.io Domain API
#
# Author: Zheng Qian
#
# deSEC API doc
# https://desec.readthedocs.io/en/latest/
REST_API="https://desec.io/api/v1/domains"
######## Public functions #####################
#Usage: dns_desec_add _acme-challenge.foobar.dedyn.io "d41d8cd98f00b204e9800998ecf8427e"
dns_desec_add() {
fulldomain=$1
txtvalue=$2
_info "Using desec.io api"
_debug fulldomain "$fulldomain"
_debug txtvalue "$txtvalue"
DEDYN_TOKEN="${DEDYN_TOKEN:-$(_readaccountconf_mutable DEDYN_TOKEN)}"
DEDYN_NAME="${DEDYN_NAME:-$(_readaccountconf_mutable DEDYN_NAME)}"
if [ -z "$DEDYN_TOKEN" ] || [ -z "$DEDYN_NAME" ]; then
DEDYN_TOKEN=""
DEDYN_NAME=""
_err "You did not specify DEDYN_TOKEN and DEDYN_NAME yet."
_err "Please create your key and try again."
_err "e.g."
_err "export DEDYN_TOKEN=d41d8cd98f00b204e9800998ecf8427e"
_err "export DEDYN_NAME=foobar.dedyn.io"
return 1
fi
#save the api token and name to the account conf file.
_saveaccountconf_mutable DEDYN_TOKEN "$DEDYN_TOKEN"
_saveaccountconf_mutable DEDYN_NAME "$DEDYN_NAME"
_debug "First detect the root zone"
if ! _get_root "$fulldomain" "$REST_API/"; then
_err "invalid domain"
return 1
fi
_debug _sub_domain "$_sub_domain"
_debug _domain "$_domain"
# Get existing TXT record
_debug "Getting txt records"
txtvalues="\"\\\"$txtvalue\\\"\""
_desec_rest GET "$REST_API/$DEDYN_NAME/rrsets/$_sub_domain/TXT/"
if [ "$_code" = "200" ]; then
oldtxtvalues="$(echo "$response" | _egrep_o "\"records\":\\[\"\\S*\"\\]" | cut -d : -f 2 | tr -d "[]\\\\\"" | sed "s/,/ /g")"
_debug "existing TXT found"
_debug oldtxtvalues "$oldtxtvalues"
if [ -n "$oldtxtvalues" ]; then
for oldtxtvalue in $oldtxtvalues; do
txtvalues="$txtvalues, \"\\\"$oldtxtvalue\\\"\""
done
fi
fi
_debug txtvalues "$txtvalues"
_info "Adding record"
body="[{\"subname\":\"$_sub_domain\", \"type\":\"TXT\", \"records\":[$txtvalues], \"ttl\":60}]"
if _desec_rest PUT "$REST_API/$DEDYN_NAME/rrsets/" "$body"; then
if _contains "$response" "$txtvalue"; then
_info "Added, OK"
return 0
else
_err "Add txt record error."
return 1
fi
fi
_err "Add txt record error."
return 1
}
#Usage: fulldomain txtvalue
#Remove the txt record after validation.
dns_desec_rm() {
fulldomain=$1
txtvalue=$2
_info "Using desec.io api"
_debug fulldomain "$fulldomain"
_debug txtvalue "$txtvalue"
DEDYN_TOKEN="${DEDYN_TOKEN:-$(_readaccountconf_mutable DEDYN_TOKEN)}"
DEDYN_NAME="${DEDYN_NAME:-$(_readaccountconf_mutable DEDYN_NAME)}"
if [ -z "$DEDYN_TOKEN" ] || [ -z "$DEDYN_NAME" ]; then
DEDYN_TOKEN=""
DEDYN_NAME=""
_err "You did not specify DEDYN_TOKEN and DEDYN_NAME yet."
_err "Please create your key and try again."
_err "e.g."
_err "export DEDYN_TOKEN=d41d8cd98f00b204e9800998ecf8427e"
_err "export DEDYN_NAME=foobar.dedyn.io"
return 1
fi
_debug "First detect the root zone"
if ! _get_root "$fulldomain" "$REST_API/"; then
_err "invalid domain"
return 1
fi
_debug _sub_domain "$_sub_domain"
_debug _domain "$_domain"
# Get existing TXT record
_debug "Getting txt records"
txtvalues=""
_desec_rest GET "$REST_API/$DEDYN_NAME/rrsets/$_sub_domain/TXT/"
if [ "$_code" = "200" ]; then
oldtxtvalues="$(echo "$response" | _egrep_o "\"records\":\\[\"\\S*\"\\]" | cut -d : -f 2 | tr -d "[]\\\\\"" | sed "s/,/ /g")"
_debug "existing TXT found"
_debug oldtxtvalues "$oldtxtvalues"
if [ -n "$oldtxtvalues" ]; then
for oldtxtvalue in $oldtxtvalues; do
if [ "$txtvalue" != "$oldtxtvalue" ]; then
txtvalues="$txtvalues, \"\\\"$oldtxtvalue\\\"\""
fi
done
fi
fi
txtvalues="$(echo "$txtvalues" | cut -c3-)"
_debug txtvalues "$txtvalues"
_info "Deleting record"
body="[{\"subname\":\"$_sub_domain\", \"type\":\"TXT\", \"records\":[$txtvalues], \"ttl\":60}]"
_desec_rest PUT "$REST_API/$DEDYN_NAME/rrsets/" "$body"
if [ "$_code" = "200" ]; then
_info "Deleted, OK"
return 0
fi
_err "Delete txt record error."
return 1
}
#################### Private functions below ##################################
_desec_rest() {
m="$1"
ep="$2"
data="$3"
export _H1="Authorization: Token $DEDYN_TOKEN"
export _H2="Accept: application/json"
export _H3="Content-Type: application/json"
if [ "$m" != "GET" ]; then
_secure_debug2 data "$data"
response="$(_post "$data" "$ep" "" "$m")"
else
response="$(_get "$ep")"
fi
_ret="$?"
_code="$(grep "^HTTP" "$HTTP_HEADER" | _tail_n 1 | cut -d " " -f 2 | tr -d "\\r\\n")"
_debug "http response code $_code"
_secure_debug2 response "$response"
if [ "$_ret" != "0" ]; then
_err "error $ep"
return 1
fi
response="$(printf "%s" "$response" | _normalizeJson)"
return 0
}
#_acme-challenge.www.domain.com
#returns
# _sub_domain=_acme-challenge.www
# _domain=domain.com
_get_root() {
domain="$1"
ep="$2"
i=2
p=1
while true; do
h=$(printf "%s" "$domain" | cut -d . -f $i-100)
_debug h "$h"
if [ -z "$h" ]; then
#not valid
return 1
fi
if ! _desec_rest GET "$ep"; then
return 1
fi
if _contains "$response" "\"name\":\"$h\"" >/dev/null; then
_sub_domain=$(printf "%s" "$domain" | cut -d . -f 1-$p)
_domain=$h
return 0
fi
p=$i
i=$(_math "$i" + 1)
done
return 1
}
| true
|
a43661284d3919bd7947ef60ab6cf4850d11467b
|
Shell
|
markcerv/dotfiles-v0
|
/wsl/000-create-wsl-conf.sh
|
UTF-8
| 755
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Create an /etc/wsl.conf file
#
# The options will allow for you to have file permissions that are NOT all RWX
#
FILE=/etc/wsl.conf
TMP=/tmp/wsl.conf
if [ -f "$FILE" ]; then
echo "Uh-oh, $FILE already exists. Aborting."
exit 99
else
echo "Creating the $FILE file"
cat <<EOF > $TMP
[automount]
enabled = true
options = "metadata,umask=22,fmask=11"
#
# More settings can be found at:
# https://docs.microsoft.com/en-us/windows/wsl/wsl-config#configure-per-distro-launch-settings-with-wslconf
#
EOF
sudo cp $TMP $FILE
[ -f "$FILE" ] && echo "$FILE has been created:"
[ -f "$FILE" ] && ls -al $FILE
[ -f "$FILE" ] && echo " "
# cleaning up
rm $TMP
exit 0
fi
| true
|
a55cd2e5a2b5e6c72b7202fddacc174c459d0e21
|
Shell
|
erlang/docker-erlang-example
|
/advanced_examples/logstash/.travis.sh
|
UTF-8
| 1,501
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
./create-certs
docker build -t logstash logstash/
docker run --name logstash -d -p 9600:9600 -p 44622:44622/udp logstash
docker build -t dockerwatch .
docker run --name dockerwatch -d -p 8443:8443 --init --volume="$PWD/ssl:/etc/ssl/certs" --log-driver=gelf --log-opt gelf-address=udp://0.0.0.0:44622 dockerwatch
IP=$(docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' dockerwatch)
# Wait for logstash to finis startup
until curl -s 'localhost:9600/_node'; do sleep 5; echo "waiting for logstash to finish startup"; done
# Create counter via http
curl -H 'Content-Type: application/json' -X POST -d '' http://$IP:8080/cnt
# Increment counter via http
curl -H 'Content-Type: application/json' -X POST -d '{}' http://$IP:8080/cnt
# Read all counters via https
curl --cacert ssl/dockerwatch-ca.pem -H 'Accept: application/json' https://localhost:8443/
# Read the counter `cnt` as json using https
curl --cacert ssl/dockerwatch-ca.pem -H 'Accept: application/json' https://localhost:8443/cnt
# Increment the counter `cnt` by 20 using http
curl -H 'Content-Type: application/json' -X POST -d '{\"value\":20}' http://$IP:8080/cnt
# Read the counter `cnt` as text using http
curl -H 'Accept: text/plain' http://$IP:8080/cnt
# Check that there are 6 lines in the log (one for each curl command above)
sleep 10
docker exec logstash cat /usr/share/logstash/logs/output.log
test "$(docker exec logstash cat /usr/share/logstash/logs/output.log | wc -l)" = "7"
| true
|
c8578e8171fba49d2bbd1e437e8faf5584b60f9b
|
Shell
|
growlzor/macos_setup_cheatsheet
|
/ip_command_for_mac.sh
|
UTF-8
| 224
| 2.8125
| 3
|
[] |
no_license
|
ip() {
if [[ $@ == "route" ]]; then
command netstat -nr
elif [[ $@ == "a" ]]; then
command ifconfig | grep inet
else
command ifconfig "$@"
fi
}
| true
|
c21a44ba07519a9c571b5a008066fee8c46c8788
|
Shell
|
AlecAivazis/dotfiles
|
/zsh/exports.zsh
|
UTF-8
| 1,322
| 2.734375
| 3
|
[] |
no_license
|
# patch zsh cache
export ZSH_CACHE_DIR=$HOME/.zsh
export SHELL=/bin/zsh
# go stuff
export GOPATH=~/dv/go
export GO111MODULE=on
# env vars for git
export GIT_PAGER="diff-so-fancy | less --tabs=4 -RFX"
# add the user specific bin to the path
export PATH=~/dv/dotfiles/bin/bash:~/bin/bash:~/.dotnet/tools:/usr/local/sbin:${GOPATH}/bin:${PATH}
export PYTHONPATH=~/bin/python
# Setup terminal, and turn on colors
export TERM=xterm-256color
export LSCOLORS=Gxfxcxdxbxegedabagacad
# default editor settings
export EDITOR=/usr/bin/vi
export VISUAL=/usr/bin/vi
# This resolves issues install the mysql, postgres, and other gems with native non universal binary extensions
export ARCHFLAGS='-arch x86_64'
export LESS='--ignore-case --raw-control-chars'
export PAGER='most'
# CTAGS Sorting in VIM/Emacs is better behaved with this in place
export LC_COLLATE=C
# setup virtualenv if its present
if [ -f /usr/local/bin/virtualenvwrapper.sh ]; then
export WORKON_HOME=${HOME}/dv/virtualenvs
export PROJECT_HOME=${HOME}/dv
source /usr/local/bin/virtualenvwrapper.sh
fi
# make sure opam is enabled
. /Users/alec/.opam/opam-init/init.zsh > /dev/null 2> /dev/null || true
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" #
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"
| true
|
0c76f4716a15ffaca8f95031e8533bbbd8b77023
|
Shell
|
tarmiste/lfspkg
|
/archcore/svnsnap/community/nbtscan/repos/community-x86_64/PKGBUILD
|
UTF-8
| 906
| 2.8125
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 114101 2014-07-01 13:29:38Z spupykin $
# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
# Contributor: Sergio Rubio <rubiojr@biondofu.net>
pkgname=nbtscan
pkgver=1.5.1
pkgrel=6
pkgdesc="NBTscan is a program for scanning IP networks for NetBIOS name information."
arch=('i686' 'x86_64')
license=('GPL')
depends=(glibc)
url="http://www.inetcat.net/software/nbtscan.html"
source=(http://www.sourcefiles.org/Networking/Tools/Miscellanenous/nbtscan-1.5.1.tar.gz
nbtscan.1.gz)
md5sums=('85bb085077c380b82a6ff73e0de0c154'
'c9b7f61d2be12af5edc120142341c5f7')
build() {
cd $srcdir/$pkgname-${pkgver}a
./configure --prefix=/usr
make
}
package() {
cd $srcdir/$pkgname-${pkgver}a
# Copy the binary
mkdir -p $pkgdir/usr/bin
cp nbtscan $pkgdir/usr/bin
cd $srcdir
# Include the nbtscan debian man page
mkdir -p $pkgdir/usr/share/man/man1
cp nbtscan.1.gz $pkgdir/usr/share/man/man1
}
| true
|
efcc01a15ca370548f4b311c1091966bfeccac64
|
Shell
|
LeGrand-Lab/INMG_SingleCell
|
/scripts/launch_seu.sh
|
UTF-8
| 794
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Launch seurat+FItSNE on homeostasis data (D0)
# each dataset independently
# assumes compressed folders in 'data/'
# Runs from 'scripts/'
# Joha GL 2020
pwd
MYPATH=$(pwd) # expected home/MYHOME/scripts
echo "uncompress data keeping .tar.gz files, move to data"
cd ../data
for i in $(ls *D0.tar.gz); do
tar -zxvf $i
echo $i
done
echo "return to scripts/"
cd $MYPATH
for i in $(ls seu*D0.R);do
chmod 755 ${i}
echo $i
done
echo "execute D0 seurat analysis +FItSNE visuals"
for i in $(ls seu*D0.R);do
echo """=== ${i} ====="
./${i}
done
echo "cd data again"
cd ../data
for d in */ ;do
if [ ! -f ${d%/}".tar.gz" ]; then
tar -czvf ${d%/}".tar.gz" $d
fi
done
echo "please remember NOT TO commit any voluminous uncompressed content from 'data/'"
echo "END"
| true
|
714b82864455831b3402540c18302a71b5e69165
|
Shell
|
ETHZ-INS/enrichMiR_benchmark
|
/processing/whipple.STAR.sh
|
UTF-8
| 644
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash/
mkdir STAR
# Alignment for samples SRR10513924-SRR10513935 (paired-end RNA-seq):
for f in *_1.fastq.gz; do
/conda/pkgs/star-2.6.1b-0/bin/STAR --runThreadN 18\
--genomeDir /reference/Mus_musculus/GENCODE/GRCm38.p5/Annotation/Genes/genes_STARIndex\
--genomeLoad LoadAndKeep\
--readFilesIn $f ${f%_1.fastq.gz}_2.fastq.gz\
--readFilesCommand zcat\
--outFileNamePrefix ./STAR/${f%_1.fastq.gz}.\
--quantMode GeneCounts\
--twopassMode None\
--outFilterScoreMin 1\
--chimOutType Junctions SeparateSAMold\
--chimScoreMin 1
done
| true
|
3f14d687bba48b00d849a5a8565bbd7091869746
|
Shell
|
mad3310/jetty-manager
|
/scripts/docker/init/jetty
|
UTF-8
| 1,276
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Description: This shell script takes care of starting and stopping dennis
# Hu Dennis created on Sep. 24th, 2010
#
# Source function library
. /etc/init.d/functions
#the service name for example: dennis
SNAME=jetty
#the full path and name of the daemon program
#Warning: The name of executable file must be identical with service name
PROG() {
cd /opt/letv/jetty
java -jar start.jar > /var/log/jetty_stdout.log &
}
kill_jetty() {
kill -9 `ps -ef | grep -v "grep" | grep "java -jar start.jar" | awk '{print $2}'` &>/dev/null
}
# start function
start() {
#check the daemon status first
if [ -f /var/lock/subsys/$SNAME ]
then
echo "$SNAME is already started!"
exit 0;
else
action "Starting $SNAME ..." PROG
[ $? -eq 0 ] && touch /var/lock/subsys/$SNAME
exit 0;
fi
}
#stop function
stop() {
echo -n "Stopping $SNAME ..."
kill_jetty
success
echo
rm -rf /var/lock/subsys/$SNAME
}
case "$1" in
start)
start
;;
stop)
stop
;;
reload|restart)
stop
start
;;
status)
status $SNAME
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
exit 1
esac
| true
|
bef9b51c139b770a570ee1e6707e1547bc5b1b7d
|
Shell
|
csye6225-rbharti-spring2020/ami
|
/scripts/JdkAndMavenSetup.sh
|
UTF-8
| 678
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt update -y
sudo apt upgrade -y
sudo apt --fix-broken install
#JAVA SETUP
sudo apt install default-jre -y
sudo apt install default-jdk -y
sudo update-alternatives --config java -y
java --version
echo "export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac)))))" | sudo tee -a /etc/profile
#import profile
source /etc/profile
#export path
echo "export PATH=$PATH:$JAVA_HOME/bin" | sudo tee -a /etc/profile
echo "export CLASSPATH=.:$JAVA_HOME/jre/lib:$JAVA_HOME/lib:$JAVA_HOME/lib/tools.jar" | sudo tee -a /etc/profile
#import updated profile
source /etc/profile
#install maven
sudo apt update -y
sudo apt install maven -y
mvn --version
| true
|
73d31d1b0eae9bfd4da997682242048301338d98
|
Shell
|
namidairo/cloudflare-ufw
|
/cloudflare-ufw.sh
|
UTF-8
| 1,183
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
DIR="$(dirname $(readlink -f $0))"
cd $DIR
wget https://www.cloudflare.com/ips-v4 -O ips-v4.tmp
wget https://www.cloudflare.com/ips-v6 -O ips-v6.tmp
mv ips-v4.tmp ips-v4
mv ips-v6.tmp ips-v6
for cfip in `cat ips-v4`; do ufw allow from $cfip; done
for cfip in `cat ips-v6`; do ufw allow from $cfip; done
ufw reload > /dev/null
# Nginx
# Empty out old config
truncate -s 0 /etc/nginx/conf.d/cloudflare.conf
# Append to config
for cfip in `cat ips-v4`; do echo "set_real_ip_from $cfip;" >> /etc/nginx/conf.d/cloudflare.conf; done
for cfip in `cat ips-v6`; do echo "set_real_ip_from $cfip;" >> /etc/nginx/conf.d/cloudflare.conf; done
echo "real_ip_header CF-Connecting-IP;" >> /etc/nginx/conf.d/cloudflare.conf;
# Reload nginx
service nginx reload
# OTHER EXAMPLE RULES
# Examples to retrict to port 80
#for cfip in `cat ips-v4`; do ufw allow from $cfip to any port 80 proto tcp; done
#for cfip in `cat ips-v6`; do ufw allow from $cfip to any port 80 proto tcp; done
# Examples to restrict to port 443
#for cfip in `cat ips-v4`; do ufw allow from $cfip to any port 443 proto tcp; done
#for cfip in `cat ips-v6`; do ufw allow from $cfip to any port 443 proto tcp; done
| true
|
d9ad3bff3fe5caf8fff206a177e4e51d022d5c53
|
Shell
|
eltsc/sen
|
/ansible/metricbeat/install
|
UTF-8
| 611
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
function metricbeat_action(){
action=$1
systemctl $action metricbeat.service
}
function turn_off(){
metricbeat_action stop
metricbeat_action disable
}
function turn_on(){
metricbeat_action enable
metricbeat_action start
}
function service_update(){
cp metricbeat.service /lib/systemd/system/metricbeat.service
systemctl daemon-reload
}
function upgrade_modules(){
cp system.yml /etc/metricbeat/modules.d
}
function install_metricbeat(){
dpkg -i *.deb
}
# main
install_metricbeat
turn_off
upgrade_modules
service_update
turn_on
| true
|
afd07cf50f921278076403122e8db0ceef677c8d
|
Shell
|
dennis-tmeinc/dvr
|
/zeus6/deploy/ZEUS5_MSS/host/etc_back/init.d/stunneld
|
UTF-8
| 853
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
# chkconfig: 2345 17 81
STUNNEL_SCRIPT="/etc/init.d/stunnel"
SWITCH_FILE="/etc/conf.d/ssl/switch"
PEM="/etc/conf.d/ssl/certificate.pem"
IPTABLES="/usr/sbin/iptables"
IP_FLAG="INPUT 1 -i eth0 -p tcp --dport 80 -j DROP"
IP_SFLAG="INPUT 1 -i eth0 -p tcp --dport 443 -j DROP"
SWITCH=`cat $SWITCH_FILE`
start(){
echo -n "Starting check if set stunnel on "
if [ $SWITCH == "1" ]; then
$STUNNEL_SCRIPT start
$IPTABLES -I $IP_FLAG
elif [ $SWITCH == "0" ]; then
#echo "Nothing in stunneld"
if [ -f $PEM ]; then
$STUNNEL_SCRIPT start
fi
$IPTABLES -I $IP_SFLAG
else
echo "Parse stunnel switch fail in stunneld"
fi
echo "."
}
stop(){
echo "Redundant stop(stunneld) ."
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
esac
exit 0
| true
|
fd1eebf6e06c798bb318cb30fa2b0afb4938a537
|
Shell
|
AdiPezer/linux
|
/vjezba1/cleanlog.sh
|
UTF-8
| 304
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Kao root korinsik potrebno je da ocistimo logove"
LOG_DIR=/var/log
ROOT_UID=0
E_XCD=86
E_NOTROOT=87
if [ "$UID" -ne "$ROOT_UID" ]
then
echo "Must be root to run this script."
exit $E_NOTROOT
fi
cat /dev/null > messages
cat /dev/null > wtmp
echo "Log files cleaned up."
exit 0
| true
|
4bf76c38de1899d50af0025fa5982d9d891ad7a4
|
Shell
|
dmedme/web_path_web
|
/autodefC.sh
|
UTF-8
| 2,827
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# autodefC.sh automates the generation of a *.def file for a path script
# A *.def file must exist with possible substition values in the second col
# of each row (i.e. in the format of a def file but other than the second
# column the values can be null and are in any case ignored.
# Thus a current *.def file could be used.
#
# This script handles multiple substitutions on the same line of an *.msg file.
#
# The script basedefC.sh can be used to create a suitable *.def based on
# values in one or more *.db files stored within the data directory.
#
# Following generation the *.def file will need to be edited to facilitate
# use of the F directive in the 5th column.
#
# The new *.def will be named *.new. The original *.def will be maintained.
#
for i in $*
do
#
#if [ -d $i -a -f $i/$i.def ]
#
#then
#
# not a def file so use *.db
#
# fi
#
#
if [ -d $i -a -f $i/$i.msg -a -f $i/$i.def ]
then
#
whichawk="$PATH_AWK"
echo "PATH_AWK= " $whichawk
if [ -z "$whichawk" ]
then
echo "No awk specified in PATH_AWK. Possibly need to run fdvars.sh"
exit 1
fi
#
# First create deduped def file
sort -t"|" -k2,2 $i/$i.def > $i/$i.de1
#
$whichawk 'BEGIN {
FS="|"
currentval=""
}
currentval!=$2 {
print $0 > "'$i/$i.de2'"
currentval=$2
}' $i/$i.de1
#
# delete temp file *.de1
rm $i/$i.de1
# Now create *.new with events and subs
#
$whichawk 'function findstring(thestring,theline) {
nooftimes=split(theline,thearray,thestring)
nooftimes--
return
}
BEGIN {
print "------- START of '$i' -------"
FS="|"
# Set counts for script
#
eventscount=0
msglinenum=0
noofsubs=0
# loop used to count no of subs within each loop
loopx=0
loop[loopx]=0
#First read def file and store in an array
deffile="'$i/$i.de2'"
deflinenum=0
while(( getline < deffile) >0) {
if(NF==5) {
deflinenum++
for(y=1;y<6;y++) {
def[deflinenum,y]=$y
}
}
for(x=1;x<=deflinenum;x++) {
# print def[x,1] "|" def[x,2] "|" def[x,3] "|" def[x,4] "|" def[x,5]
}
}
}
{
notpath="Y"
msglinenum++
}
/^\\S[A-Z][A-Z,0-9]:*/ {
# The path e2sync (S) events
notpath="N"
eventscount++
print " " $0 > "'$i/$i'.new"
if($0~"LOOP") {
loopx++
loop[loopx]=0
}
}
/^\\T[A-Z][A-Z,0-9]:\\/ {
# The path e2sync end (T) events
notpath="N"
}
/^\\W*\\/ {
# The path wait event
notpath="N"
}
notpath=="Y" {
# process script line
for(x=1;x<=deflinenum;x++) {
findstring(def[x,2],$0)
while(nooftimes>0) {
print msglinenum "|" def[x,2] "|" def[x,3] "|" def[x,4] "|N" > "'$i/$i'.new"
nooftimes--
loop[loopx]++
noofsubs++
}
}
}
END {
print "Lines in '$i' = " msglinenum
print "Events in '$i' = " eventscount
# print loop info
print "Total no. of substitutions = " noofsubs
if(loopx>0) {
for(x=0;x<=loopx;x++) {
print "Loop " x " - subs = " loop[x]
}
}
print "-------- END of '$i' --------"
}' $i/$i.msg
# delete temp file *.de2
rm $i/$i.de2
else
echo "not a script directory"
fi
done
| true
|
0e2f01161415ceceb6b4d65e3bdf7675b9c7566b
|
Shell
|
echocloudG/auto-deploy-maven-project
|
/auto-update.sh
|
UTF-8
| 1,088
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
APP_NAME=修改为你的项目名
PROJECT_PATH=修改为你的项目路径
PROJECT_JAR_PATH=${PROJECT_PATH}/target
# 检测代码是否有更新
LOG=$(git pull)
if [ "${LOG}" = "Already up-to-date." ]; then
echo "当前版本为最新版本"
exit 1
else
echo "更新到最新版本,开始编译"
# 开始打包程序
mvn clean package -Dmaven.test.skip=true
if [ $? != 0 ]
then
echo "打包时出现错误"
exit 1
fi
fi
# 寻找jar包
for file in $(ls ${PROJECT_JAR_PATH})
do
if [ "${file##*.}" = "jar" ]; then
JAR_NAME=${file}
echo "找到jar包:${JAR_NAME}"
break
fi
done
if [ -z ${JAR_NAME} ];then
echo "未找到jar包"
exit 1
fi
PID=$(ps aux | grep ${APP_NAME} | grep -v grep | awk '{printf $2}')
if [ -n ${PID} ]; then
kill -9 ${PID}
echo "停止旧程序(pid:${PID})"
else
echo "程序并未运行"
fi
echo "开始启动新程序"
nohup java -jar ${PROJECT_JAR_PATH}/${JAR_NAME} > ${JAR_NAME%.*}.log &
PID=$(ps aux | grep ${APP_NAME} | grep -v grep | awk '{printf $2}')
echo "启动新程序(pid:${PID})"
| true
|
c197fc45fd1b41f79448021d6adaaf6847c7545c
|
Shell
|
paddureddy/Free_elec_BBB_3.8
|
/patch.sh
|
UTF-8
| 5,262
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# (c) 2009 - 2013 Koen Kooi <koen@dominion.thruhere.net>
# (c) 2012 Robert Nelson <robertcnelson@gmail.com>
# This script will take a set of directories with patches and make a git tree out of it
# After all the patches are applied it will output a SRC_URI fragment you can copy/paste into a recipe
set -e
# don't use builtin 'echo' from /bin/sh
export ECHO="$(which echo)"
DIR="$PWD"
PATCHPATH="${DIR}/patches"
EXPORTPATH="${DIR}/export"
RECIPEDIR="linux-mainline-3.8"
RECIPENAME="linux-mainline_3.8.bb"
RECIPEFILE="${DIR}/recipes/${RECIPENAME}"
#For TAG, use mainline Kernel tags
TAG="v3.8.13"
EXTRATAG=""
EXTERNAL_TREE="https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git"
EXTERNAL_BRANCH="linux-3.8.y"
EXTERNAL_SHA="dbf932a9b316d5b29b3e220e5a30e7a165ad2992"
PATCHSET="dma rtc pinctrl cpufreq adc i2c da8xx-fb pwm mmc crypto 6lowpan capebus arm omap omap_sakoman omap_beagle_expansion omap_beagle omap_panda net drm not-capebus pru usb PG2 reboot iio w1 gpmc mxt ssd130x build hdmi audio resetctrl camera resources pmic pps leds capes proto logibone BeagleLogic fixes machinekit backports"
git_kernel_stable () {
git pull https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git master --tags || true
}
git_pull_torvalds () {
git pull https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git master --tags || true
#Maybe, we need a stable tag '3.0.2'?
git tag | grep ${TAG} >/dev/null || git_kernel_stable
}
if [ ! -d ${DIR}/kernel ] ; then
mkdir -p ${DIR}/kernel
fi
cd ${DIR}/kernel
if [ ! -f ./.git/config ] ; then
git clone https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git .
else
git fetch
fi
git am --abort || echo "Do you need to make sure the patches apply cleanly first?"
# Always return to master, and remove patched branch...
git reset --hard
git checkout master -f
git describe
git branch -D tmp-patching-branch &>/dev/null || true
git branch -D tmp-patching-branch-sha &>/dev/null || true
#Do we have the tag?
git tag | grep ${TAG} | grep -v rc >/dev/null || git_pull_torvalds
git checkout -f ${TAG} -b tmp-patching-branch
if [ "${EXTERNAL_TREE}" ] ; then
#we are pulling the external tree into 1st branch, and checkout the SHA into a 2nd,
#which saves a little pain in cleaning up master, when switching between different beagleboard branches
git pull ${EXTERNAL_TREE} ${EXTERNAL_BRANCH}
git checkout ${EXTERNAL_SHA} -b tmp-patching-branch-sha
fi
git describe
# newer gits will run 'git gc' after every patch if you don't prune
#git gc
#git prune
if [ -d ${EXPORTPATH} ] ; then
rm -rf ${EXPORTPATH} || true
rm -rf ${EXPORTPATH}-oe || true
fi
# apply patches
for patchset in ${PATCHSET} ; do
CURRENTCOMMIT="$(git log --oneline --no-abbrev -1 | awk '{print $1}')"
mkdir -p ${EXPORTPATH}/$patchset
for patch in $(ls -1 ${PATCHPATH}/$patchset/*.patch | sort -n) ; do
$ECHO -n "$patch: "
git am -q $patch && echo applied || exit 1
done
NEWCOMMIT="$(git log --oneline --no-abbrev -1 | awk '{print $1}')"
git format-patch ${CURRENTCOMMIT}..${NEWCOMMIT} -o ${EXPORTPATH}/$patchset
rm -rf ${PATCHPATH}/$patchset && cp -a ${EXPORTPATH}/$patchset ${PATCHPATH}
git commit --allow-empty -a -m "${TAG}-${patchset}${EXTRATAG}"
done
mkdir -p ${EXPORTPATH}-oe/recipes-kernel/linux
cp ${RECIPEFILE} ${EXPORTPATH}-oe/recipes-kernel/linux/
if [ "${EXTERNAL_TREE}" ] ; then
sed -i -e s:SEDMEREV:${EXTERNAL_SHA}: ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
sed -i -e s,SEDMEURI,${EXTERNAL_TREE}\;branch=${EXTERNAL_BRANCH}, ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
echo >> ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
echo 'SRC_URI += " \' >> ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
fi
if [ -f ${DIR}/patch_script.sh ] ; then
rm -rf ${DIR}/patch_script.sh || true
fi
# export patches and output SRC_URI for them
for patchset in ${PATCHSET} ; do
for patch in $(ls -1 ${EXPORTPATH}/$patchset/*.patch | sort -n) ; do
patch=${patch##*/}
echo -e "\tfile://${patchset}/$patch \\" >> ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
echo " git am \"\${DIR}/patches/${patchset}/$patch\"" >> ${DIR}/patch_script.sh
done
done
mkdir -p ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPEDIR}
echo ' file://defconfig \' >> ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
echo ' file://am335x-pm-firmware.bin \' >> ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
if [ -e ${DIR}/logo_linux_clut224.ppm ] ; then
cp ${DIR}/logo_linux_clut224.ppm ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPEDIR}/
echo ' file://logo_linux_clut224.ppm \' >> ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
fi
echo "\"" >> ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPENAME}
cp -a ${EXPORTPATH}/* ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPEDIR}/
mkdir -p ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPEDIR}/beaglebone
cp ${DIR}/configs/beaglebone ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPEDIR}/beaglebone/defconfig
mkdir -p ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPEDIR}/beagleboard
cp ${DIR}/configs/beagleboard ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPEDIR}/beagleboard/defconfig
if [ -e ${DIR}/kernel/am335x-pm-firmware.bin ] ; then
cp ${DIR}/kernel/am335x-pm-firmware.bin ${EXPORTPATH}-oe/recipes-kernel/linux/${RECIPEDIR}/
fi
| true
|
36aa21deacdd8a5f9d275b754a1b0115464550be
|
Shell
|
xzfc/cached-nix-shell
|
/tests/t15-old-nix.sh
|
UTF-8
| 394
| 2.640625
| 3
|
[
"MIT",
"Unlicense"
] |
permissive
|
#!/bin/sh
. ./lib.sh
# Check that cached-nix-shell works even if the $PATH contains an old derivation
# of nix-shell. https://github.com/xzfc/cached-nix-shell/issues/24
PATH=$(nix-build '<old>' -A nix --no-out-link -I \
old=https://github.com/NixOS/nixpkgs/archive/nixos-21.05.tar.gz
)/bin:$PATH run cached-nix-shell -p --exec nix-shell --version
check_contains '^nix-shell (Nix) 2\.3\.16$'
| true
|
c87c60946e56cdc9443e982cebcd43dd8489a422
|
Shell
|
tonanuvem/kafka-producer-consumer
|
/start_cluster.sh
|
UTF-8
| 218
| 2.96875
| 3
|
[] |
no_license
|
start_zookeper_kafka() {
docker-compose up -d
}
check_control_center_up() {
FOUND=$(docker-compose logs kafka | grep "started (kafka.server.KafkaServer)")
if [ -z "$FOUND" ]; then
return 1
fi
return 0
}
| true
|
3b1b95efe5438d281dd17a85d2e14272e8746030
|
Shell
|
gbarany/crypto-arbitrage-finder
|
/docker-entrypoint.sh
|
UTF-8
| 904
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
Mode="${MODE:-SANDBOX_NETX_KAFKA_AWS}"
if [ "$Mode" = "SANDBOX_NETX_LOCALPOLLER" ]; then
python ./src/FrameworkLive.py --noforex --resultsdir=./results/ --dealfinder=networkx 2> ./results/errorlog.txt
else
if [ "$Mode" = "SANDBOX_NEO4J_LOCALPOLLER" ]; then
python ./src/FrameworkLive.py --noforex --resultsdir=./results/ --neo4jmode=aws --dealfinder=neo4j 2> ./results/errorlog.txt
else
if [ "$Mode" = "SANDBOX_NETX_KAFKA_AWS" ]; then
python ./src/FrameworkLive.py --noforex --resultsdir=./results/ --dealfinder=networkx --datasource=kafkaaws 2> ./results/errorlog.txt
else
if [ "$Mode" = "LIVE_NETX_KAFKA_AWS" ]; then
python ./src/FrameworkLive.py --noforex --resultsdir=./results/ --dealfinder=networkx --datasource=kafkaaws --output=kafkaaws --live 2> ./results/errorlog.txt
fi
fi
fi
fi
| true
|
666e497abc02910d7a6a1a52000516869cecdc2c
|
Shell
|
nagatax/docker-library
|
/apache/hooks/build
|
UTF-8
| 1,115
| 2.734375
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
##################################################
# Dockerイメージを作成する
##################################################
# Load some environmental values
. ./apache/hooks/env.sh
# Building a docker image
echo "Building a docker image"
docker image build . \
--build-arg APR=${APR} \
--build-arg APR_PAKAGE=${APR_PAKAGE} \
--build-arg APR_PAKAGE_FILE=${APR_PAKAGE_FILE} \
--build-arg APR_URL=${APR_URL} \
--build-arg APR_SHA256=${APR_SHA256} \
--build-arg APR_UTIL=${APR_UTIL} \
--build-arg APR_UTIL_PAKAGE=${APR_UTIL_PAKAGE} \
--build-arg APR_UTIL_PAKAGE_FILE=${APR_UTIL_PAKAGE_FILE} \
--build-arg APR_UTIL_URL=${APR_UTIL_URL} \
--build-arg APR_UTIL_SHA256=${APR_UTIL_SHA256} \
--build-arg PCRE=${PCRE} \
--build-arg PCRE_PAKAGE=${PCRE_PAKAGE} \
--build-arg PCRE_PAKAGE_FILE=${PCRE_PAKAGE_FILE} \
--build-arg PCRE_URL=${PCRE_URL} \
--build-arg APACHE=${APACHE} \
--build-arg APACHE_PAKAGE=${APACHE_PAKAGE} \
--build-arg APACHE_PAKAGE_FILE=${APACHE_PAKAGE_FILE} \
--build-arg APACHE_URL=${APACHE_URL} \
--build-arg APACHE_SHA256=${APACHE_SHA256} \
-t ${D_IMAGE} \
-f ./apache/Dockerfile
| true
|
1695a2de6b97038a6e5443ad1cfa0b9c4c31e485
|
Shell
|
NKb03/Hextant
|
/setup/hextup.sh
|
UTF-8
| 3,262
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
get_jar() {
wget "https://oss.sonatype.org/service/local/artifact/maven/redirect?r=snapshots&g=com.github.nkb03&a=$1&v=$version&t=jar" -O "$hextant_home/plugins/$2"
}
check_java() {
if java --version; then
echo "Please make sure that your Java JRE has version 11."
else
echo "java command is not on your PATH. Exiting"
exit 1
fi
}
setup_javafx_jdk() {
echo "Do you have the JavaFX SDK installed? (1) Yes, (2) No"
read -r option
if [ "$option" = "1" ]; then
javafx_sdk="$HOME/lib/javafx-sdk"
if [ ! -f "$javafx_sdk/lib/javafx.controls.jar" ]; then
echo "Where is your SDK located?"
read -r javafx_sdk
if [ ! -f "$javafx_sdk/lib/javafx.controls.jar" ]; then
echo "JavaFX SDK not recognized. Exiting"
exit 1
fi
else
echo "Autodetected JavaFX SDK located at $javafx_sdk"
fi
elif [ "$option" = "2" ]; then
read -r javafx_sdk
default="$HOME/lib/javafx-sdk"
echo "Where should the JavaFX SDK be installed? (Default: $default"
if [ -z "$javafx_sdk" ]; then
javafx_sdk=default
fi
mkdir -p "$javafx_sdk"
wget https://gluonhq.com/download/javafx-11-0-2-sdk-linux/ -O /tmp/javafx.zip
7z x /tmp/javafx.zip "-o$javafx_sdk"
rm /tmp/javafx.zip
mv "$javafx_sdk/javafx-sdk-11.0.2/lib" "$javafx_sdk"
rm -r "$javafx_sdk/javafx-sdk-11.0.2"
else
echo "Invalid option. Exiting"
exit 1
fi
}
query_version() {
echo "Which version Hextant should be installed? (Default: Latest)"
read -r version
if [ -z "$version" ]; then
version="1.0-SNAPSHOT"
fi
}
query_home() {
echo "Location to install Hextant" "Where should Hextant be installed? (Default: $HOME/hextant)"
read -r hextant_home
if [ -z "$hextant_home" ]; then
hextant_home="$HOME/hextant"
fi
echo "export HEXTANT_HOME=$hextant_home" >> ~/.bashrc
}
get_components() {
mkdir -p "$hextant_home/plugins"
get_jar "hextant-core-fatjar" "core.jar"
get_jar "hextant-main" "main.jar"
get_jar "hextant-launcher" "launcher.jar"
}
create_launcher() {
mkdir -p "$hextant_home/launcher"
wget https://raw.githubusercontent.com/NKb03/Hextant/master/setup/launcher-info.json -O "$hextant_home/launcher/project.json"
}
create_script() {
command="#!/bin/sh
java --module-path $javafx_sdk/lib\
--add-modules javafx.controls\
--add-opens java.base/jdk.internal.loader=ALL-UNNAMED\
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-jar $hextant_home/plugins/main.jar \"\$@\"
"
sudo sh -c "echo '$command' > /bin/hextant"
sudo chmod +x /bin/hextant
}
install() {
echo "" >> ~/.bashrc
echo "" >> ~/.bashrc
check_java
setup_javafx_jdk
query_version
query_home
get_components
create_launcher
create_script
echo "Installation successful. Hextant version is $version."
}
update() {
hextant_home="$HEXTANT_HOME"
query_version
get_components
echo "Update successful. Hextant version is $version."
}
echo "Welcome to the Hextant setup assistant!"
if [ -z "$1" ]; then
echo "Usage: $0 {install|update}"
exit 1
fi
case "$1" in
install)
install;;
update)
update;;
*)
echo "Unknown command $1. Valid commands 'install' and 'update'."
exit 1
;;
esac
. ~/.bashrc
| true
|
b5ea9ac37c0a1ff5e92c946b309626b3dd49685f
|
Shell
|
commercetools/ctp-smoke-tests
|
/.circleci/run_smoke_tests.sh
|
UTF-8
| 339
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
rm -rf test-reports && mkdir test-reports
img=gcr.io/ct-images/ctp-smoke-tests:master
docker pull $img
docker run \
--rm \
--mount type=bind,source="$(pwd)"/test-reports,destination=/tmp/test-reports,consistency=cached \
-e AUTH_URL \
-e CLIENT_ID \
-e CLIENT_SECRET \
-e PROJECT_KEY \
$img
| true
|
53b60989b9b350e1bd73dce6961f3e8e9387f9bd
|
Shell
|
mludolph/cloudcomputing
|
/assignment02/alex/spinup-gcp-for-openstack.sh
|
UTF-8
| 1,473
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# 1. VPC networks create
gcloud compute networks create cc-network1 --subnet-mode=custom
gcloud compute networks create cc-network2 --subnet-mode=custom
# 2. VPC networks create subnets
#? wtf, what networks should I give them
gcloud compute networks subnets create cc-subnet1 \
--network=cc-network1 \
--range=/25 \
--secondary-range=RANGE=
gcloud compute networks subnets create cc-subnet2 \
--network=cc-network2 \
--range=/25
# 4. Create disk based on Ubuntu Server
gcloud compute disks create disk1 \
--image-project=ubuntu-os-cloud \
--image-family=ubuntu-1804-lts \
--zone=europe-west1-d \
--size=100GB
# 5. Custom image
gcloud compute images create nested-vm-image \
--source-disk=disk1 \
--source-disk-zone=europe-west1-d \
--licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
# 6. create VMs
gcloud compute instances create controller \
--zone="europe-west1-d" \
--min-cpu-platform="Intel Haswell" \
--image=nested-vm-image \
--tags=cc \
--machine-type=n2-standard-2 \
--network-interface=\
--network
# 7.firewall rule
gcloud compute firewall-rules create "cc-ssh-icmp-ingress" \
--allow=tcp:22,icmp \
--direction=INGRESS \
--target-tags="cc" \
--destination-ranges
# 8. open all openstack ports
| true
|
4b972710f2219490fd88988c493fd09e21b95233
|
Shell
|
tuyen81/self_learning
|
/shell/running_package/testcases/coreutils/uname.sh
|
UTF-8
| 471
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#==============================================================================
# DISCRIPTION: Test script is test to 'uname' command.
# The 'uname' is a command which prints system information like the kernel name
#==============================================================================
# Prints system information like the kernel name
uname -a > ${log_file} 2>&1
# Check result of command uname with expected output
assert_passed $? 0
| true
|
bbefe534007afd499fd4927a69268a30c4dde94d
|
Shell
|
LUGatGT/Scripts
|
/Hacks/Gizoogle/tranzizzle
|
UTF-8
| 450
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
INPUT=
if [[ $(tty) = 'not a tty' || "$1" = '' ]]; then
INPUT=$(cat -)
elif [[ -f "$1" ]]; then
INPUT=$(cat "$1")
else
echo "Could not find file '$1'"
exit 2
fi
OUTPUT=$(curl -s --data-urlencode "translatetext=$INPUT" \
--data 'translate=Tranzizzle+Dis+Shiznit' \
'http://gizoogle.net/textilizer.php' )
echo "$OUTPUT" | grep -Pzo '(?s)(?<=textarea).+?(?=</textarea>)' | sed '1 s/.*\/>//'
| true
|
4a8b6371e42c0bbc9e30c04d57bf99720b1fc078
|
Shell
|
PhilyT/SystemDistrib
|
/build.sh
|
UTF-8
| 569
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -a
echo ""
echo "--- Nettoyage de la version précédente de maven..."
mvn clean
echo "--- Nettoyage terminé."
echo ""
echo ""
echo "--- Packaging du projet..."
if [ -f "pom.xml" ]; then
mvn package
else
echo ""
echo "Le fichier pom.xml n'existe pas !"
exit 1;
fi
echo "--- Packaging du projet terminé."
echo ""
echo ""
echo "|---------------------------------------|"
echo "|----- MISE EN PRODUCTION TERMINÉE -----|"
echo "|---------------------------------------|"
echo ""
java -jar annuaire-jar-with-dependencies.jar
| true
|
caaf2f78b18686a4f298b1b1f0e0d0c4a32682f3
|
Shell
|
mpolitano/bounded-exhaustive-api-testgen
|
/scripts/run-inclusion-korat-in-beapi.sh
|
UTF-8
| 1,311
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
projectsdir=$BE_EXP_SRC
scriptsdir=$projectsdir/scripts
source $scriptsdir/common.sh
source $scriptsdir/process-results.sh
TO=60m
function run_serialize() {
technique=$1
budget=$2
cmd="timeout $TO ./run-begen-serialize-experiment.sh $project $casestudy $technique $budget matching builders"
echo "************"
echo ">> Executing: $cmd"
bash -c "$cmd"
if [ $? -eq 124 ]; then
echo ">> Execution timed out"
break;
fi
}
function run_korat_inclusion() {
# Check inclusion
koratdir=results-begen-inclusion/$project/$casestudy/korat/$scopeKORAT
koratstrs=$koratdir/korat-tests/objects.ser
bestrs=results-begen-inclusion/$project/$casestudy/beapi/matching/builders/$scopeBEAPI/beapi-tests/objects.ser
config=properties/scope$scopeKORAT.all.canonicalizer.properties
reslog=$koratdir/inclusion-results-$scopeBEAPI.txt
diff=$koratdir/structures-not-included-$scopeBEAPI.txt
echo "************"
cmd="./check-inclusion.sh $project $koratstrs $bestrs $config $diff > $reslog"
echo ">> Executing: $cmd"
bash -c "$cmd"
}
######korat###############
project="$1"
casestudy="$2"
scopeKORAT=$3
scopeBEAPI=$4
run_serialize korat $scopeKORAT;
run_serialize beapi $scopeBEAPI;
run_korat_inclusion;
echo "************"
echo "Report"
process-results-inclusion "korat"
echo "************"
| true
|
0e3e8ecaa8a0ee8026b9be78cc10dcd7e3e4864b
|
Shell
|
progr1mmer/big-whale
|
/bin/restart.sh
|
UTF-8
| 306
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source /etc/profile
if test $( pgrep -f big-whale | wc -l ) -eq 0
then
echo "big-whale is not running"
else
pgrep -f big-whale | xargs kill -9
echo "big-whale stopped"
fi
curr=$(date "+%Y%m%d")
echo "big-whale starting..."
nohup java -jar big-whale.jar >> application-$curr.log 2>&1 &
| true
|
b83973ad9f8b3219304e06eb44556b4d388bd7a7
|
Shell
|
monsterzzz/myBash
|
/test/2.sh
|
UTF-8
| 897
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
name="monster"
echo $name
echo ${name}
# read dir file
# shellcheck disable=SC2045
#for i in $(ls /etc) ; do
# echo "$i"
#done
# readOnly var
readOnlyVar="monster"
readonly readOnlyVar # can not change var , because it is read only
readOnlyVar="monster1"
echo ${readOnlyVar}
# unset delete var
unset readOnlyVar # can not delete this var, because it is read only!
echo ${readOnlyVar}
# string add
str=${name}${readOnlyVar}
echo $str
str="hello,${name}"
echo $str
str="hello"",hello"
echo $str
# get string length
echo ${#str}
strLen=${#str}
echo ${str:1:strLen}
echo ${str:1:${#str}}
# find index of string
string="runoob is a great site"
echo ${str}
echo `expr index "$string" io` # 输出 4
echo `expr index "$str" s`
# array
arr=("hello1" "hello2" "hello3" "hello4" "hello5")
# shellcheck disable=SC2068
for i in ${arr[@]} ; do
echo "${i}"
done
| true
|
1c06f4194692e74eee46a97a3a52b83254350be0
|
Shell
|
songweijia/hdfsrs
|
/experiments/readwrite/run/writthp.sh
|
UTF-8
| 1,428
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# $1 - type
# $2 - block size
# $3 - paket size
# $4 - page size
# $5 - write size
# $6 - duration sec
# $7 - snapshot?
function run(){
EXP=$1
BS=$2
PKS=$3
PGS=$4
WS=$5
DUR=$6
SNAP=$7
#prepare
./prepare.sh $EXP $BS $PKS $PGS
#run
hadoop jar Exp1.jar timeappend /timefile $WS $DUR $SNAP
#collect
BYTES=`hdfs dfs -ls / | grep timefile | awk '{print $5}'`
expr $BYTES \/ $DUR > wt_${EXP}_${BS}_${PKS}_${PGS}_${WS}_${DUR}_${SNAP}
}
for exp in org crtc
# for exp in crtc
do
for bs in 64M
do
# for pks in 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304
# for pks in 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304
for pks in 8388608 16777216 33554432
# for pks in 65536
# for pks in 16384 32768 65536 131072 262144 524288 1048576 2097152 4194304
# for pks in 32768 131072 524288 2097152
do
for pgs in 4096
do
# for ws in 256 1048576
for ws in 256
do
for dur in 60
do
# for snap in true false
for snap in false
do
run $exp $bs $pks $pgs $ws $dur $snap > clientlog_${exp}_${bs}_${pks}_${pgs}_${ws}_${dur}_${snap}
scp weijia@compute29:/home/weijia/opt/hadoop/logs/hadoop-weijia-datanode-compute29.out ./hdfs_${pks}_dn.out
scp weijia@compute29:/home/weijia/opt/hadoop/logs/hadoop-weijia-datanode-compute29.log ./hdfs_${pks}_dn.log
done
done
done
done
done
done
done
| true
|
2f652cc32ab446562ebb8870be6ed400b350eaf8
|
Shell
|
autom8ter/docker-protoc
|
/gwy/generate_gateway.sh
|
UTF-8
| 3,493
| 4.125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
printUsage() {
echo "protoc-gen-gwy generates a ready-to-build gRPC gateway server."
echo ""
echo "Options:"
echo "-h, --help Show this message."
echo "-i, --includes INCLUDES Extra includes (optional)."
echo "-f, --file FILE Relative path to the proto file to build the gateway from."
echo "-s, --service SERVICE The name of the service to build the gateway for."
echo "-a, --additional_interfaces The set of additional interfaces to bind to this gateway."
echo "-o, --out DIRECTORY Optional. The output directory for the gateway. By default, gen/grpc-gateway."
echo "--go-package-map Optional. Map proto imports to go import paths"
}
# Path to the proto file
FILE=""
# Name of the service.
SERVICE=""
# Name of additional interfaces
ADDITIONAL_INTERFACES=""
# Output directory.
OUT_DIR=""
GO_PACKAGE_MAP=""
# Extra includes.
INCLUDES=""
while test $# -gt 0; do
case "$1" in
-h|--help)
printUsage
exit 0
;;
-i|--includes)
shift
if test $# -gt 0; then
INCLUDES="$INCLUDES -i $1"
shift
else
echo "Missing extra include directory name for --includes."
echo ""
printUsage
exit 1
fi
;;
-f|--file)
shift
if test $# -gt 0; then
FILE=$1
shift
else
echo "Missing file name for --file."
echo ""
printUsage
exit 1
fi
;;
-s|--service)
shift
if test $# -gt 0; then
SERVICE=$1
shift
else
echo "Missing service name for --service."
echo ""
printUsage
exit 1
fi
;;
-a|--additional_interfaces)
shift
if test $# -gt 0; then
ADDITIONAL_INTERFACES=$1
shift
fi
;;
-o|--out)
shift
if test $# -gt 0; then
OUT_DIR=$1
shift
else
echo "Missing output directory for --out"
echo ""
printUsage
exit 1
fi
;;
--go-package-map)
if [ "$#" -gt 1 ] && [[ $2 != -* ]]; then
GO_PACKAGE_MAP=$2,
shift
fi
shift
;;
*)
printUsage
exit 1
;;
esac
done
if [[ -z $FILE ]]; then
echo "Error: You must specify the proto file name"
printUsage
exit 1
fi
if [[ -z $SERVICE ]]; then
echo "Error: You must specify the proto service name"
printUsage
exit 1
fi
if [[ -z $OUT_DIR ]]; then
OUT_DIR="./gen/grpc-gateway"
fi
# Generate the gateway files
PROTO_DIR=$(dirname $FILE)
GEN_PATH=${OUT_DIR}/gen/
entrypoint.sh -d ${PROTO_DIR} -l go --with-gateway -o ${GEN_PATH} --go-package-map ${GO_PACKAGE_MAP} ${INCLUDES}
GATEWAY_IMPORT_DIR=`find ${GEN_PATH} -type f -name "*.gw.go" -print | head -n 1 | xargs -n1 dirname`
GATEWAY_IMPORT_DIR=${GATEWAY_IMPORT_DIR#"$OUT_DIR/"}
# Find the Swagger file.
PROTO_FILE=$(basename $FILE)
SWAGGER_FILE_NAME=`basename $PROTO_FILE .proto`.swagger.json
# Copy and update the templates.
renderizer --import=${GATEWAY_IMPORT_DIR} --swagger=${SWAGGER_FILE_NAME} /templates/config.yaml.tmpl > $OUT_DIR/config.yaml
renderizer --import=${GATEWAY_IMPORT_DIR} --swagger=${SWAGGER_FILE_NAME} /templates/Dockerfile.tmpl > $OUT_DIR/Dockerfile
MAIN_DIR=${OUT_DIR}/cmd/gateway
mkdir -p ${MAIN_DIR}
renderizer --import=${GATEWAY_IMPORT_DIR} --service=${SERVICE} --additional=${ADDITIONAL_INTERFACES} /templates/main.go.tmpl > $MAIN_DIR/main.go
| true
|
23d6e2a0d8a7b200932bedae70f3c3b1a2a2deb4
|
Shell
|
cuongquay/monbus-restapi
|
/code-push.sh
|
UTF-8
| 1,397
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# Simplify Automatic Deployment Script
# Project: monbus-restapi
# Deployment: monbus-demo
# Region: eu-west-1
# Profile: simplify-eu
# Version: 0.1.1
INITIAL_DIR=$PWD
node code-init.js --input monbus-demo.json
source ${INITIAL_DIR}/code-versions.sh ${1}
export DEPLOYMENT_STAGE=${ENV_functionForGetTimetables_DEPLOYMENT_STAGE}
export ENFORCEMENT_PACKAGE=${ENV_functionForGetTimetables_ENFORCEMENT_PACKAGE}
cd ${INITIAL_DIR}/monbusRestapi/functionForGetTimetables && npm install && npm run build && npm run deploy && cd -
if [ $? -ne 0 ]; then
echo "Publishing functionForGetTimetables (latest) version has failed!"
exit 255
fi
export DEPLOYMENT_STAGE=${ENV_eventFunctionStationsSync_DEPLOYMENT_STAGE}
export ENFORCEMENT_PACKAGE=${ENV_eventFunctionStationsSync_ENFORCEMENT_PACKAGE}
cd ${INITIAL_DIR}/monbusRestapi/eventFunctionStationsSync && npm install && npm run build && npm run deploy && cd -
if [ $? -ne 0 ]; then
echo "Publishing eventFunctionStationsSync (latest) version has failed!"
exit 255
fi
export DEPLOYMENT_STAGE=${ENV_functionForStations_DEPLOYMENT_STAGE}
export ENFORCEMENT_PACKAGE=${ENV_functionForStations_ENFORCEMENT_PACKAGE}
cd ${INITIAL_DIR}/monbusRestapi/functionForStations && npm install && npm run build && npm run deploy && cd -
if [ $? -ne 0 ]; then
echo "Publishing functionForStations (latest) version has failed!"
exit 255
fi
| true
|
9bd1b17742b9686dbdef95b4a9fa42f29de084a0
|
Shell
|
cwinux/ducter
|
/server/script/svr_script/search/dcmd_svr_search_homepage.script
|
UTF-8
| 2,814
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# test
#DCMD_SVR_USER=search
#DCMD_SVR_PATH=/letv/search
#DCMD_SVR_POOL=test_wanxiang_tdxy
###################################################################
PATH=$PATH:/usr/sbin:/sbin:/bin
export PATH
echo $PATH
# 确定部署目录
WX_KEYWORD="wanxiang"
GG_KEYWORD="guoguang"
if [[ $DCMD_SVR_POOL == *$WX_KEYWORD* ]];then
SVR_PATH="/letv/deploy/data_access_proxy_pano"
elif [[ $DCMD_SVR_POOL == *$GG_KEYWORD* ]];then
SVR_PATH="/letv/deploy/data_access_proxy"
fi
prepare(){
echo "Begin prepare........."
echo "Action: $1"
echo "service home: $2"
echo "prepare enviroment:"
# 检查用户
id $DCMD_SVR_USER >/dev/null
if [ $? -ne 0 ];then
useradd -d $DCMD_SVR_PATH $DCMD_SVR_USER
id $DCMD_SVR_USER
fi
echo "$DCMD_SVR_USER user is ready."
# 检查部署目录
[[ -L $SVR_PATH/conf ]] && cd $SVR_PATH && mv conf conf.bak
[[ ! -d "$SVR_PATH" ]] && mkdir -p $SVR_PATH
[[ ! -d "$DCMD_SVR_PATH" ]] && mkdir -p $DCMD_SVR_PATH
chown -R $DCMD_SVR_USER.$DCMD_SVR_USER $DCMD_SVR_PATH $SVR_PATH
id $DCMD_SVR_USER >/dev/null && [[ -d "$DCMD_SVR_PATH" ]] && [[ -d "$SVR_PATH" ]]
exit $?
}
start(){
echo "Begin start........."
echo "Action: $1"
echo "service home: $2"
echo "start enviroment:"
/bin/sh ${SVR_PATH}/sbin/start.sh start
exit $?
}
stop(){
echo "Begin stop........."
echo "Action: $1"
echo "service home: $2"
echo "stop enviroment:"
SERVICE_SBIN="${SVR_PATH}/sbin/start.sh"
if [ -f "${SERVICE_SBIN}" ];then
/bin/sh $SERVICE_SBIN stop
else
echo "don't init."
exit 0
fi
exit $?
}
check(){
echo "Begin check........."
echo "Action: $1"
echo "service home: $2"
echo "check enviroment:"
ps -eaf |egrep "`echo $SVR_PATH|awk -F '/' '{print $NF}'`/" |grep -v grep
exit $?
}
install(){
echo "Begin install........."
echo "Action: $1"
echo "service home: $2"
echo "install type: $3"
LOG_DIR=$SVR_PATH/log
[[ ! -d "$LOG_DIR" ]] && /bin/mkdir -p $LOG_DIR
if [ $3 == "all" ];then
echo "new_pkg_path: $4"
echo "new_env_path: $5"
SERVICE_PKG=$4
SERVICE_ENV=$5
/bin/cp -arf $SERVICE_PKG/* $SVR_PATH/
chmod +x $SVR_PATH/bin/*
/bin/cp -arf $SERVICE_ENV/* $SVR_PATH/
echo "----> copy complete!"
elif [ $3 == "pkg" ];then
echo "new_pkg_path: $4"
SERVICE_PKG=$4
/bin/cp -arf $SERVICE_PKG/* $SVR_PATH/
chmod +x $SVR_PATH/bin/*
echo "----> copy complete!"
elif [ $3 == "env" ];then
echo "new_env_path: $4"
SERVICE_ENV=$4
/bin/cp -arf $SERVICE_ENV/* $SVR_PATH/
# md5sum $SERVICE_HOME/conf/*
echo "----> copy complete!"
else
echo "invalid install type:$3"
fi
echo "End install."
}
if [ $1 == "start" ]; then
start $*
elif [ $1 == "stop" ]; then
stop $*
elif [ $1 == "check" ]; then
check $*
elif [ $1 == "install" ]; then
install $*
elif [ $1 == "prepare" ]; then
prepare $*
else
echo "invalid action"
fi
| true
|
375a280bb4a16b5d9f1294535a7f4d81644f40db
|
Shell
|
kelseysbrose/COMP2101
|
/bash/helloworldugly.sh
|
UTF-8
| 707
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# helloworldugly.sh - an exercise in obfuscation
# This script displays the string “Hello World!”
# and then displays its PID
# Function Definitions
function output-string { echo "$*"; }
# Main Script Body
# This is a silly way of creating the output text
# We start with similar text and stream edit it in a pipeline
# This is a trivial form of code obfuscation
# This version might require installing rot13 first
which rot13 >/dev/null || sudo apt install rot13
output-string $(rot13 <<< "uryo jbyq" |
sed -e "s/b/o/g" -e "s/l/ll/" -e "s/ol/orl/" |
tr "h" "H"|tr "w" "W"|
awk '{print $1 "\x20" $2 "\41"}')
bc <<< "(($$ * 4 - 24)/2 + 12)/2" |
sed 's/^/I am process # /'
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.