blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
513446d4e6d970ab3f3d73fafc5e5a848fe9a612
|
Shell
|
bala4rtraining/awesome-cka-guide
|
/workspace/SYSDIG/ovn_infrastructure/jenkins-sh/grafanaobj.sh
|
UTF-8
| 8,097
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
#
# File: grafanaobj.sh
# Purpose: 1. Download grafana objects from the grafana server to create a tar.gz file
# 2. Upload grafana objects from tar.gz file replacing any with same filename
# 3. optionally allow for filtering (on import and export) based on filename regex
# 4. retrieve latest tarball from OVNGIT as a separate step,
# (File was previously uploaded into OVNGIT is uploaded by jenkins release job)
#
# Requires: jq (json cli processor)
#
#Defaults
tarballfile="grafanaobj.tar"
fileselector='*ovn*' # default file selector
# Options
while getopts s: opt
do
case $opt in
s) fileselector=$OPTARG
;;
[?])
echo >&2 "Usage: $0 [-s fileselector] upload OVNDEV|OVNTEST|OVNTEST1|OVNTEST2|OVNPROD-KHE|OVNPROD-KHC|OVNPROD-DCI|OVNPROD-TELIN"
echo >&2 " or $0 retrieve OVNDEV|OVNTEST|OVNTEST1|OVNTEST2|OVNPROD-KHE|OVNPROD-KHC|OVNPROD-DCI|OVNPROD-TELIN \t\t\t(get last tarball from OVNGIT)"
echo >&2 " or: $0 maketarball OVNDEV|OVNTEST|OVNTEST1|OVNTEST2|OVNPROD-KHE|OVNPROD-KHC|OVNPROD-DCI|OVNPROD-TELIN \t\t\t(make a tarball)"
echo >&2 " Note: when using a wildcard fileselector, use doublequotes e.g. -s\"s.OVN*\" "
exit 1
;;
esac
done
shift $((OPTIND - 1 )) # remove options leave arguments
# positional parameters
function_type=$1
ovnenvironment=$2
case $ovnenvironment in
OVNDEV)
grafana_url="https://ovndev:OpenVisaNetDev@ovndev.visa.com/grafana"
;;
OVNTEST)
grafana_url="https://ovndev:OpenVisaNetDev@ovntest.visa.com/grafana"
;;
OVNTEST1)
grafana_url="https://ovndev:OpenVisaNetDev@ovntest.visa.com:8443/grafana-test1"
;;
OVNTEST2)
grafana_url="https://ovndev:OpenVisaNetDev@ovntest.visa.com:8443/grafana-test2"
;;
OVNCERT)
grafana_url="https://ovndev:OpenVisaNetDev@ovncert.visa.com:8443/grafana"
;;
OVNPROD-KHE)
grafana_url="https://ovndev:OpenVisaNetDev@sl73ovnmgp01.visa.com:8443/grafana"
;;
OVNPROD-KHC)
grafana_url="https://ovndev:OpenVisaNetDev@sl73ovnmgp02.visa.com:8443/grafana"
;;
OVNPROD-DCI)
grafana_url="https://ovndev:OpenVisaNetDev@sl73ovnmgp01.visa.com:8444/grafana-dci"
;;
OVNPROD-TELIN)
grafana_url="https://ovndev:OpenVisaNetDev@sl73ovnmgp02.visa.com:8444/grafana-telin"
;;
*)
echo "== Environment will default to OVNDEV =="
ovnenvironment="OVNDEV"
grafana_url="https://ovndev:OpenVisaNetDev@ovndev.visa.com/grafana"
;;
esac
# Define the export function which will export objects from grafana (used when backing up)
function exportobjs() {
url=$1 # here is the grafana_url
tarball=$2 # output file
selector=$3 # file selection allowing wildcard
touch $tarball # start with it empty
mkdir .workdir # make a working directory
count=0
printf "Starting export+download of grafana objects\n"
printf "\tExport will pull objects from grafana URL:\t%s\n" "$url"
printf "\tExport will filter using: \t%s\n" "$selector"
# Authkey="Authorization: Bearer eyJrIjoiTzBBN2RCNURRZVZYYnpiRkF3d2JqNU90bDFOdFd3N2kiLCJuIjoiamVua2lucyIsImlkIjoxfQ=="
# Authkey='Authorization: Bearer eyJrIjoib3owZVRBcFdZem1yWkFpR3lRVWxCSUd2OVU0cDJlN0UiLCJuIjoiamVua2luczEiLCJpZCI6MX0='
# get the list of dashboards
curl_resp=$(curl -k -H "Content-Type: application/json" -X GET $url/api/search -o .workdir/temp.json)
rc=$?
if [ "$rc" -ne 0 ]; then
printf 'Error retrieving grafana object data from %s rc=%s\n' $url $rc
printf 'Msg from Curl: %s\n' $curl_resp
return 1
fi
jsondatalist=$(jq . .workdir/temp.json)
rc=$?
if [ "$rc" -ne 0 ]; then
printf 'Error parsing the first JSON object from %s rc=%s\n' $url $rc
return 1
fi
cp .workdir/temp.json DEBUG.json
unlink .workdir/temp.json # don't need it anymore
num=$(printf '%s ' $jsondatalist | jq length 2>/dev/null)
rc=$?
if [ "$rc" -ne 0 ]; then
printf 'Error No valid json data returned from curl call to grafana %s\n' $rc
return 1
fi
for ((i=0; i < $num; i++))
do
uri=$(printf '%s ' $jsondatalist | jq .[$i].uri|sed 's/"//g')
type=$(printf '%s ' $jsondatalist | jq .[$i].type|sed 's/"//g')
slug="${uri//\//_}"
case $type in
'dash-db')
filename=$(printf '%s.json' $slug)
;;
*)
echo $type: unknown >&2
filename=$(printf '%s.json' $slug)
;;
esac
curl -k -sS -H "Content-Type: application/json" -XGET $url/api/dashboards/$uri -o .workdir/$filename
rc=$?
if [ "$rc" -ne 0 ]; then
printf 'Error retrieving grafana object data from %s rc=%s\n' $url $rc
return 1
fi
sel=$(ls -1 .workdir/$selector 2>/dev/null|wc -l)
if [ "$sel" -ne 0 ]; then # only add it if it matches the selection criteria
printf '\tProcessing %s\n' $filename # send it to a file
mv .workdir/$filename $filename
tar -uf $tarball $filename
count=$(($count+1))
unlink $filename
else
unlink .workdir/$filename
fi
done
rmdir .workdir
gzip -f $tarball
printf "Done: %s.gz has been created.(%d files)\n" $tarball $count
}
# Define the import function which will import objects INTO grafana (used when uploading)
function importobjs() {
url=$1 # here is the grafana_url
tgzinput=$2
selector=$3
count=0
printf "Target grafana URL:\t%s\n" $url
printf "\tImport will read from %s\n" $tgzinput
printf "\tImport will filter using: %s\n" $selector
for tempfile in $(tar -tzf $tgzinput --wildcards $selector)
do
tar -xzf $tgzinput "$tempfile"
slug=$(jq .meta.slug $tempfile| sed 's/"//g')
type=$(jq .meta.type $tempfile| sed 's/"//g')
printf '\tImporting %s (%s)\n' "$tempfile" "$slug"
case $type in
db)
dashboard=$(jq '.dashboard' $tempfile|jq '.id=null') # for the upload we need an id=null
uploadjson=$(printf '{ "overwrite": true, "dashboard" : %s }' "$dashboard")
http_api='api/dashboards/db'
;;
*)
echo $type: unknown >&2
return 1
;;
esac
unlink "$tempfile"
curl_resp=$(curl -k -H "Content-Type: application/json" -X POST -d "$uploadjson" $url/$http_api)
rc=$?
if [ "$rc" -ne 0 ]; then
printf 'Error storing grafana object into %s rc=%s\n' $url $rc
printf '%s ' $curl_resp
printf 'Exit\n'
return 1
elif [ "${curl_resp/success}" = "$curl_resp" ]; then
printf 'Unexpected Curl Response:'
printf '%s ' $curl_resp
printf 'Exit\n'
return 1
else
count=$(($count+1))
fi
done
printf 'Successful uploads: %d\n' $count
}
# Define the retrieve function, assumes OVN GIT data was uploaded and tagged using jenkins BUILD conventions
function retrieve() {
ovnenvironment=$1 # here is the OVN environment (OVNDEV/OVNTEST/OVNPRODOCE/OVNPRODOCC)
lastbranch=$(curl -s -f -XGET "https://sl55ovnapq01.visa.com/git/?p=pod1;a=heads" | grep "list name"| grep 'refs/heads/build/grafanaobj' | grep "$ovnenvironment"| tr '";' "\n\n"| grep refs |head -1)
printf 'OVNGIT last branch: %s\n' $lastbranch
bloburl=$(curl -s -f -XGET "https://sl55ovnapq01.visa.com/git/?p=pod1;a=commit;$lastbranch" | grep "list" | tr '"' "\n"| grep grafanaobj.tar.gz| head -1)
retrieve=$(curl -s -f -XGET "https://sl55ovnapq01.visa.com$bloburl" -o grafanaobj.tar.gz)
contents=$(tar -tzf grafanaobj.tar.gz)
printf "Contents of grafanaobj.tar.gz:\n"
printf '\t%s\n' $contents
}
case $function_type in
upload)
importobjs "$grafana_url" "$tarballfile.gz" "$fileselector"
;;
maketarball)
exportobjs "$grafana_url" "$tarballfile" "$fileselector"
;;
retrieve)
retrieve "$ovnenvironment"
;;
*)
echo "\nERROR: require 'upload', 'maketarball', or 'retrieve' as first parameter\n"
exit 1
;;
esac
| true
|
dcbd263198080ca1c6bcda3fc87e1067927dfe93
|
Shell
|
wplib/box-cli2
|
/cmd/util/cmd/get-sha1-checksum/get-sha1-checksum.sh
|
UTF-8
| 411
| 3.84375
| 4
|
[] |
no_license
|
#
# Command: box util get-sha1-checksum <filename>
#
# Returns just the sha1 checksum for a file
# (omits the filename, unlike shasum and sha1sum)
#
if (( 0 == "$#" )) ; then
stdErr "No filename passed."
return 1
fi
filename="$1"
if [ "${BOXCLI_PLATFORM}" = "macOS" ] ; then
sha1="$(shasum "${filename}")"
else
sha1="$(sha1sum "${filename}")"
fi
echo "${sha1%% *}"
hasError && exit 1
setQuiet
| true
|
914d3431dc0244d2b5ec4486309be8724f98ba98
|
Shell
|
prankard/RetroPie-Randomizers
|
/functions.sh
|
UTF-8
| 6,803
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
###################################################
# Test
#
# Globals:
# None
#
# Arguments:
# $1 system-name (eg. "snes")
# $2 rom filename (eg. "SuperMetroidRandomized.smc")
# $3 image (eg. "image.png")
#
# Returns:
# Nothing
#
function addGameToXml()
{
xmlstarlet edit --inplace --update "/gameList/game[path='./$2']/desc" --value "$FILENAME" "~/.emulationstation/gamelists/$1/gamelist.xml"
}
# end test
###################################################
# hasRom
#
# Globals:
# None
#
# Arguments:
# $1 system folder
# $2 rom name
#
# Returns:
# Nothing
#
function hasRom() {
xml_command="xmlstarlet sel -t -c \"count(/gameList/game[name='$2'])\" ~/.emulationstation/gamelists/$1/gamelist.xml"
count=$(eval $xml_command)
if [[ "$count" == "0" ]]; then
return 0
fi
return 1
} # end hasRom
###################################################
# copyAndExtractRom
#
# Globals:
# None
#
# Arguments:
# $1 system folder (eg. 'snes')
# $2 rom name (eg. 'Super Metroid', will find filepath from gamelist)
# $3 rom destination path (eg. '~/MyFilePath/SuperMetroidCopy.smc' must include correct extension to find rom in zip folder)
#
# Returns:
# Nothing
#
function copyRom() {
xml_command="xmlstarlet sel -t -v \"/gameList/game[name='$2']/path\" ~/.emulationstation/gamelists/$1/gamelist.xml"
rom_filename=$(eval $xml_command)
full_path="~/RetroPie/roms/$1/'$rom_filename'"
source_rom_ext="${rom_filename##*.}"
dest_rom_ext="${3#*.}"
echo $source_rom_ext
echo $dest_rom_ext
if [[ "$source_rom_ext" == "zip" ]]; then
line="unzip -d dump $full_path"
eval $line
for f in "dump/*.$dest_rom_ext"
do
FILENAME=$(basename $f .$des_trom_ext)
break
done
line="mv dump/'$FILENAME' $3"
echo $line
eval $line
echo "unzipped"
rm -r dump
else
line="cp $full_path $3"
eval $line
echo "copied"
fi
## Logic to extract here if a zip file
#cp $full_path $3
} # end test
###################################################
# getRomPathFromName
#
# Globals:
# None
#
# Arguments:
# $1 Rom Game Name (eg. "Super Metroid")
#
# Returns:
# "./path.rom"
#
function getRomPathFromName() {
path=$(eval xmlstarlet sel -t -v "/gameList/game[name='$1']/path" ~/.emulationstation/gamelists/snes/gamelist.xml)
return $path
} # end test
###################################################
# addGameToXML
#
# Globals:
# None
#
# Arguments:
# $1 Rom Folder System (eg. snes)
# $2 Rom Game Name (eg. "Super Metroid Randomized")
# $3 Rom Game Path (eg. "./SuperMetroidRandomized.smc")
# $4 Rom Description (eg. "Randomized Seed 12345")
# $5 Original Rom Name (eg. "Super Metroid") used to copy all other nodes, eg. image etc
#
# Returns:
# 1 Success
# 0 Fail
#
function addGameToXML() {
xml_command="xmlstarlet sel -t -c \"/gameList/game[name='${2}']\" ~/.emulationstation/gamelists/$1/gamelist.xml"
#echo $xml_command
xmlNode=$(eval $xml_command)
#xmlNode=$(eval xmlstarlet sel -t -c "/gameList/game[name='${2}']" ~/.emulationstation/gamelists/$1/gamelist.xml)
if [[ -z "$xmlNode" ]]; then
# Copy new from original rom name
xml_command="xmlstarlet sel -t -v \"/gameList/game[name='${5}']/image\" ~/.emulationstation/gamelists/$1/gamelist.xml"
image=$(eval $xml_command)
xml_command="xmlstarlet sel -t -v \"/gameList/game[name='${5}']/rating\" ~/.emulationstation/gamelists/$1/gamelist.xml"
rating=$(eval $xml_command)
xml_command="xmlstarlet sel -t -v \"/gameList/game[name='${5}']/releasedate\" ~/.emulationstation/gamelists/$1/gamelist.xml"
releasedate=$(eval $xml_command)
xml_command="xmlstarlet sel -t -v \"/gameList/game[name='${5}']/developer\" ~/.emulationstation/gamelists/$1/gamelist.xml"
developer=$(eval $xml_command)
xml_command="xmlstarlet sel -t -v \"/gameList/game[name='${5}']/publisher\" ~/.emulationstation/gamelists/$1/gamelist.xml"
publisher=$(eval $xml_command)
xml_command="xmlstarlet sel -t -v \"/gameList/game[name='${5}']/genre\" ~/.emulationstation/gamelists/$1/gamelist.xml"
genre=$(eval $xml_command)
xml_command="xmlstarlet sel -t -v \"/gameList/game[name='${5}']/players\" ~/.emulationstation/gamelists/$1/gamelist.xml"
players=$(eval $xml_command)
# add a new elemnt for the game
xml_command="xmlstarlet ed --inplace --subnode '/gameList' -type elem -n 'game' -s \"/gameList/game[last()]\" -type elem -n 'name' -v \"${2}\" -s \"/gameList/game[last()]\" -type elem -n 'path' -v \"${3}\" -s \"/gameList/game[last()]\" -type elem -n 'desc' -v \"${4}\" -s \"/gameList/game[last()]\" -type elem -n 'image' -v \"${image}\" -s \"/gameList/game[last()]\" -type elem -n 'rating' -v \"${rating}\" -s \"/gameList/game[last()]\" -type elem -n 'releasedate' -v \"${releasedate}\" -s \"/gameList/game[last()]\" -type elem -n 'developer' -v \"${developer}\" -s \"/gameList/game[last()]\" -type elem -n 'publisher' -v \"${publisher}\" -s \"/gameList/game[last()]\" -type elem -n 'genre' -v \"${genre}\" ~/.emulationstation/gamelists/$1/gamelist.xml"
eval $xml_command
echo "Made new node from original node"
else
# update old node element with new values
xml_command="xmlstarlet ed --inplace -u \"/gameList/game[name='${2}']/path\" -v \"$3\" -u \"/gameList/game[name='${2}']/desc\" -v \"$4\" ~/.emulationstation/gamelists/$1/gamelist.xml"
eval $xml_command
echo "Updated old node"
fi
return 0
exit 1
} # end test
#addGameToXML "snes" "Super Metroid Test" "./testromnew.smc" "new description a a a a" "Super Metroid"
# FUNCTION INSTALL GAME
# $1 plugin name
function canInstallPlugin()
{
# load plugin name functions file
md5_check="asdfasfdsdff"
md5_file=$(eval md5sum $file)
if [ diff -q $md5_file $md5_check ]; then
# forgot the not check
else
fi
# unload plugin name functions file
}
# $1 plugin_name
function hasInstalledPlugin()
{
# check folder exists
}
# $1 plugin_name
function installPlugin()
{
#hasRom "snes" "Super Metroid"
hasRom "$systemname" "$gamename"
if [[ "$?" == "1" ]]; then
# copyRom "snes" "Super Metroid" "rom.smc"
copyRom "$systemname" "$gamename" "$romdestination"
# rm -r -f varia
# mkdir varia
# git clone --depth 1 "git@github.com:theonlydude/RandomMetroidSolver.git" varia
rm -r -f "$name"
mkdir "$name"
git clone --depth 1 "$git" "$name"
fi
}
| true
|
4c7dde34514652319bdd90cf34bd571bf0624c0b
|
Shell
|
Thorlak2202/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/7-clock
|
UTF-8
| 265
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# This script is displaying "Holberton School" 10 times
h="0"
while [ $h -lt 13 ]
do
echo "Hour: $h"
i="1"
while [ $i -lt 60 ]
do
echo "$i"
case $i in "59")
;;
esac
i=$((i+1))
done
h=$((h+1))
done
| true
|
c1b0bf7397c0bc6c5cbfa60d36d5c41199af037e
|
Shell
|
tshr20140816/test20180821
|
/80_install/post_install_main.sh.unrar
|
UTF-8
| 737
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
date
start_date=$(date)
chmod 777 start_web.sh
export PATH="/tmp/usr/bin:${PATH}"
export CFLAGS="-O2 -march=native -mtune=native -fomit-frame-pointer"
export CXXFLAGS="$CFLAGS"
export LDFLAGS="-fuse-ld=gold"
gcc --version
g++ -fuse-ld=gold -Wl,--version
gcc -c -Q -march=native --help=target
grep -c -e processor /proc/cpuinfo
cat /proc/cpuinfo | head -n $(($(cat /proc/cpuinfo | wc -l) / $(grep -c -e processor /proc/cpuinfo)))
mkdir -p /tmp/usr/bin
mkdir -p /tmp/usr/share
pushd /tmp
wget https://www.rarlab.com/rar/unrarsrc-5.7.1.tar.gz
tar xf unrarsrc-5.7.1.tar.gz
ls -lang
pushd unrar
ls -lang
cat makefile
time make -j2 -f makefile
ls -lang
popd
popd
cp /tmp/unrar/unrar ./www
echo ${start_date}
date
| true
|
d2f24028d51945e317215a2b04ba2be42ba19cfc
|
Shell
|
LittleJ0hnny/CloudTemplate
|
/config-service/bootstrap.sh
|
UTF-8
| 157
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
while ! nc -z "$LOGSTASH_HOST" "$LOGSTASH_PORT"; do
echo "Waiting for the Log Service"
sleep 5
done
java $JAVA_OPTS -jar $ARTIFACT_FILE
| true
|
7f21db6c5397544f7fc02353e185bba48b8fdd6d
|
Shell
|
docopt/docopts
|
/tests/build_doc.bats
|
UTF-8
| 3,378
| 3.890625
| 4
|
[
"MIT",
"LicenseRef-scancode-fsf-notice"
] |
permissive
|
#!/usr/bin/env bash
#
# unit test for helper for building the README.md
#
source ../build_doc.sh
output_split_lines() {
local oldIFS=$IFS
IFS=$'\n'
local i=0
local l
for l in $output
do
lines[$i]=$l
i=$((i+1))
done
IFS=$oldIFS
}
setup() {
# https://github.com/docopt/docopts/issues/39
if [[ "$OSTYPE" =~ darwin ]] ; then
skip "build_doc.sh skipped on macOS"
fi
}
@test "extract_markup" {
run extract_markup input_for_build_doc.txt
echo "$output"
[[ ${lines[0]} == "2 include test_content.txt" ]]
[[ ${lines[1]} == '14 echo "new outputed content"' ]]
}
@test "parse_input" {
run parse_input input_for_build_doc.txt
echo "$output"
[[ ${lines[0]} == "2@6@include 'test_content.txt'" ]]
[[ ${lines[1]} == '14@16@echo "new outputed content"' ]]
}
@test "to_filename" {
run to_filename "awk '{print \$1}'"
echo "$output"
[[ $output == 'awkprint1' ]]
}
@test "markdown_extract_link" {
run markdown_extract_link "[text](some/path/to/file)"
echo "$output"
[[ $output == 'some/path/to/file' ]]
run markdown_extract_link "[text(some/path/to/file)"
echo "$output"
[[ $output == 'no_match' ]]
}
@test "strpos" {
run strpos "[text](some/path/to/file)" ']'
echo "$output"
[[ $output -eq 5 ]]
run strpos "[text(some/path/to/file)" pipo
echo "$output"
[[ $output == '-1' ]]
}
tester_get_usage() {
cat << EOT | get_usage
some text
Usage:
usage get_usage line 1
usage get_usage line 2
empty line above
some more text
EOT
}
@test "get_usage" {
run tester_get_usage
echo "$output"
[[ $(echo "$output" | wc -l) -eq 2 ]]
[[ ${lines[0]} == " usage get_usage line 1" ]]
}
@test "find_end_content" {
run find_end_content 6 input_for_build_doc.txt
[[ $output -eq 10 ]]
}
@test "include" {
# file doesn't exist
run include "some file" 42
echo "$output"
[[ $status -ne 0 ]]
run include "test_content.txt" 42
echo "$output"
[[ $status -eq 0 ]]
# bats bug #224 (blank line missing in $lines)
readarray -t lines <<<"$output"
[[ ${lines[0]} == "[source test_content.txt](test_content.txt)" ]]
[[ ${lines[1]} == "" ]]
[[ ${lines[2]} == '```' ]]
[[ ${lines[3]} == "some include test content" ]]
}
populate_var_by_printf_v() {
printf -v $1 "%s" $2
test $var == "value"
return $?
}
@test "bats bug fail to printf -v var" {
run populate_var_by_printf_v var value
[[ -z $var ]] # should be $var == "value"
}
test_eval_wrapper_helper() {
eval_wrapper 2 6 "include test_content.txt" our_start our_filename
echo $our_start
echo $our_filename
}
@test "eval_wrapper" {
# it seems that populating variables by printf -v inside function seems not visible by bats
run test_eval_wrapper_helper
# so we read content from stdout
begin_line=${lines[0]}
filename=${lines[1]}
[[ -n $begin_line ]]
[[ -n $filename ]]
[[ $begin_line -eq 4 ]]
}
@test "build_sed_cmd" {
infile=input_for_build_doc.txt
run build_sed_cmd $infile
expect=" -e '3,10 d' -e '11 r /tmp/content.includetest_contenttxt' -e '15,20 d' -e '21 r /tmp/content.echonewoutputedcontent'"
[[ $output == $expect ]]
# apply
eval "sed $output $infile" | diff expect_build_doc.txt -
}
@test "extract_markup free character" {
in_text="[make README.md]: # (grep -F \"():\" \$input)"
run extract_markup <<< "$in_text"
echo $output
[[ $output == '1 grep -F "():" $input' ]]
}
| true
|
1c00b21080b74bb0bd69d1e3320f1d9b31ace490
|
Shell
|
iTKunst/dts
|
/tmpl/smtp/docker/linux/pGO.sh
|
UTF-8
| 1,388
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2086
source settings.sh
source bLOG.sh
log_load pGO
source tENV.sh
CONT=$PROJ_CONT
CONT_DIR=$PROJ_CONT_DIR
CONT_MNT_DIR=$PROJ_CONT_MNT_DIR
HOST=$PROJ_HOST
HOST_DIR=$(pwd)$PROJ_HOST_DIR
HOST_MNT_DIR=$(pwd)$PROJ_HOST_MNT_DIR
IMG=$PROJ_IMG
IP=$PROJ_IP
LABEL=$PROJ_LABEL
MODE=$PROJ_MODE
NET=$PROJ_NET
PASSWORD=$PROJ_PASSWORD
PORT_EXT=$PROJ_PORT_EXT
PORT_INT=$PROJ_PORT_INT
USER=$PROJ_USER
VOL=$PROJ_VOL
SH_ADDRESS=$PROJ_SH_ADDRESS
SH_ALIASES=$PROJ_SH_ALIASES
SH_PORT=$PROJ_SH_PORT
log_var CONT $CONT
log_var CONT_DIR $CONT_DIR
log_var CONT_MNT_DIR $CONT_MNT_DIR
log_var HOST $HOST
log_var HOST_DIR $HOST_DIR
log_var HOST_MNT_DIR $HOST_MNT_DIR
log_var IMG $IMG
log_var IP $IP
log_var LABEL $LABEL
log_var MODE $MODE
log_var NET $NET
log_var PASSWORD $PASSWORD
log_var PORT_EXT $PORT_EXT
log_var PORT_INT $PORT_INT
log_var USER $USER
log_var VOL $VOL
log_var SH_ADDRESS $SH_ADDRESS
log_var SH_ALIASES $SH_ALIASES
log_var SH_PORT $SH_PORT
mkdir -p $HOST_MNT_DIR
docker run \
-$MODE \
-p $HOST:$PORT_EXT:$PORT_INT \
-e SMARTHOST_ADDRESS=$SH_ADDRESS \
-e SMARTHOST_ALIASES=$SH_ALIASES \
-e SMARTHOST_PORT=$SH_PORT \
-e SMARTHOST_PASSWORD=$PASSWORD \
-e SMARTHOST_USER=$USER \
--add-host=$HOST:$IP \
--label=$LABEL \
--name=$CONT \
--network=$NET \
$IMG
log_unload pGO
| true
|
a4fa5366ae4b46a45889ea107d624a28a082b128
|
Shell
|
QualiSystemsLab/Colony-Cloudformation
|
/applications/redirect-vm/init.sh
|
UTF-8
| 618
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e # Stop Script on Error
# save all env for debugging
#printenv > /var/log/colony-vars-"$(basename "$BASH_SOURCE" .sh)".txt
apt-get update -y
echo '==> Install nginx'
apt-get install nginx -y
echo '==> Configure nginx'
cd /etc/nginx/sites-available/
cp default default.backup # backup default config
cat << EOF > ./default
server {
listen 80;
server_name _;
location = / {
return 302 http://${REDIRECT_URL};
}
location / {
return 302 http://${REDIRECT_URL}\$request_uri;
}
}
EOF
# restart nginx
echo '==> Restart nginx'
service nginx restart
| true
|
242f4c472dabec6f50fda413f2347ef3fef60b96
|
Shell
|
jlou96/dotfiles
|
/config/autostart-scripts/mouse.sh
|
UTF-8
| 860
| 3
| 3
|
[] |
no_license
|
# !/bin/sh
# sed "s/pattern1/pattern2" replaces pattern1 with pattern 2
# ".*id=([0-9]+).*" matches any line containing id=NUMBER
# \1 references the 1st capture group, ([0-9]+), i.e. NUMBER
# in the end, only NUMBER is written to stdout.
# -r enables extended regexp syntax,
# https://www.gnu.org/software/sed/manual/sed.html#ERE-syntax
# xinput list | grep "EVGA TORQ X5L EVGA TORQ X5L Gaming Mouse" | sed -r "s/.*id=([0-9]+).*/\1/" | while read -r line ; do
# # $line <- n where "id=n"
# echo "Processing $line"
# xinput --set-prop $line "Coordinate Transformation Matrix" 1 0 0 0 1 0 0 0 2
# done
xinput list | grep "Logitech G203 Prodigy Gaming Mouse" | sed -r "s/.*id=([0-9]+).*/\1/" | while read -r line ; do
# $line <- n where "id=n"
echo "Processing $line"
xinput --set-prop $line "Coordinate Transformation Matrix" 1 0 0 0 1 0 0 0 2
done
| true
|
6dfc404ba22974d6f394e9fa7c232524d0e5a9e1
|
Shell
|
NOAA-EMC/DATM-MOM6-CICE5
|
/tests/datm_conf/datm_slurm.IN_orion
|
UTF-8
| 1,182
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
#SBATCH -e err
#SBATCH -o out
#SBATCH --account=@[ACCNR]
#SBATCH --qos=@[QUEUE]
#SBATCH --partition=@[PARTITION]
### #SBATCH --ntasks=@[TASKS]
#SBATCH --nodes=@[NODES]
#SBATCH --ntasks-per-node=@[TPN]
#SBATCH --time=@[WLCLK]
#SBATCH --job-name="@[JBNME]"
#SBATCH --exclusive
set -eux
set +x
source ./module-setup.sh
module use $( pwd -P )
module load modules.datm
module list
set -x
echo "Model started: " `date`
export MPI_TYPE_DEPTH=20
export OMP_STACKSIZE=512M
export OMP_NUM_THREADS=@[THRD]
export ESMF_RUNTIME_COMPLIANCECHECK=OFF:depth=4
export ESMF_RUNTIME_PROFILE=ON
export ESMF_RUNTIME_PROFILE_OUTPUT=SUMMARY
# Set the stack limit as high as we can.
if [[ $( ulimit -s ) != unlimited ]] ; then
for try_limit in 20000 18000 12000 9000 6000 3000 1500 1000 800 ; do
if [[ ! ( $( ulimit -s ) -gt $(( try_limit * 1000 )) ) ]] ; then
ulimit -s $(( try_limit * 1000 ))
else
break
fi
done
fi
# Avoid job errors because of filesystem synchronization delays
sync && sleep 1
srun --label -n @[TASKS] ./datm_mom6_cice.exe
echo "Model ended: " `date`
exit
| true
|
b0e0bbfd05d34c742f4d3babfd54a8f40689a01a
|
Shell
|
maxwen/android_device_brcm_rpi4
|
/scripts/mk-raspi64-large-image.sh
|
UTF-8
| 3,220
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
#IN_IMAGE_DIR=$OUT_DIR/target/product/rpi4/
#IN_BOOT_FILES=$ANDROID_BUILD_TOP/vendor/brcm/rpi4/proprietary/boot/
#OUT_IMAGE_FILE=$HOME/raspberrypi/omni-$ROM_BUILDTYPE.img
if [ -z $ROM_BUILDTYPE ]; then
echo "missing ROM_BUILDTYPE"
exit 0
fi
options=$(getopt -o ho:i:b: -- "$@")
[ $? -eq 0 ] || {
echo "Incorrect options provided"
exit 1
}
eval set -- "$options"
while true; do
case "$1" in
-o)
shift
OUT_IMAGE_FILE=$1
shift
;;
-i)
shift
IN_IMAGE_DIR=$1
shift
;;
-b)
shift
IN_BOOT_FILES=$1
shift
;;
-h)
echo "-i <image folder> -b <boot file dir> -o <image file>"
echo "e.g. -i $OUT_DIR/target/product/rpi4/ -b $ANDROID_BUILD_TOP/vendor/brcm/rpi4/proprietary/boot/ -o /tmp/omni.img"
exit 0
;;
--)
shift
break
;;
esac
done
if [ -z $IN_IMAGE_DIR ]; then
echo "missing -i <image folder>"
exit 0
fi
if [ -z $OUT_IMAGE_FILE ]; then
echo "missing -o <image file>"
exit 0
fi
if [ -z $IN_BOOT_FILES ]; then
echo "missing -b <boot file dir>"
exit 0
fi
if [ ! -f "$IN_IMAGE_DIR/system.img" ]; then
echo "no <input folder>/system.img"
exit 0
fi
if [ ! -f "$IN_IMAGE_DIR/vendor.img" ]; then
echo "no <input folder>/vendor.img"
exit 0
fi
if [ ! -f "$IN_IMAGE_DIR/obj/KERNEL_OBJ/arch/arm64/boot/Image" ]; then
echo "no <input folder>/obj/KERNEL_OBJ/arch/arm64/boot/Image"
exit 0
fi
if [ ! -d $IN_BOOT_FILES ]; then
echo "no <boot file dir>"
exit 0
fi
if [ ! -f "$IN_BOOT_FILES/config.txt" ]; then
echo "no <boot file dir>/config.txt"
exit 0
fi
echo "create: images $IN_IMAGE_DIR + boot files $IN_BOOT_FILES -> $OUT_IMAGE_FILE"
if [ -f $OUT_IMAGE_FILE ]; then
rm $OUT_IMAGE_FILE
fi
echo "create empty image"
dd if=/dev/zero of="$OUT_IMAGE_FILE" bs=1M count=8192
echo "create partitions"
sudo sfdisk "$OUT_IMAGE_FILE" << EOF
2,262144,0xC,*
264192,4194304,0x83,-
4458496,524288,0x83,-
4982784,,-
EOF
echo "mount partitions"
sudo kpartx -av "$OUT_IMAGE_FILE"
echo "create file systems"
sudo mkfs.vfat /dev/mapper/loop0p1 -n boot
sudo mkfs.ext4 /dev/mapper/loop0p2 -L system
sudo mkfs.ext4 /dev/mapper/loop0p3 -L vendor
sudo mkfs.ext4 /dev/mapper/loop0p4 -L userdata
echo "enable project quota"
sudo tune2fs -O project,quota /dev/mapper/loop0p4
echo "write system.img"
sudo dd if="$IN_IMAGE_DIR/system.img" of=/dev/mapper/loop0p2 bs=1M
echo "write vendor.img"
sudo dd if="$IN_IMAGE_DIR/vendor.img" of=/dev/mapper/loop0p3 bs=1M
echo "write boot patition"
sudo mkdir /mnt/tmp
sudo mount /dev/mapper/loop0p1 /mnt/tmp
sudo cp "$IN_IMAGE_DIR/ramdisk.img" /mnt/tmp/
sudo cp "$IN_IMAGE_DIR/obj/KERNEL_OBJ/arch/arm64/boot/Image" /mnt/tmp/Image
sudo cp "$IN_IMAGE_DIR/obj/KERNEL_OBJ/arch/arm64/boot/dts/broadcom/bcm2711-rpi-4-b.dtb" /mnt/tmp/
sudo mkdir /mnt/tmp/overlays/
sudo cp $IN_IMAGE_DIR/obj/KERNEL_OBJ/arch/arm64/boot/dts/overlays/* /mnt/tmp/overlays/
sudo cp $IN_BOOT_FILES/* /mnt/tmp/
sync
echo "unmounting"
sudo umount /mnt/tmp
sudo rm -fr /mnt/tmp
sudo kpartx -dv "$OUT_IMAGE_FILE"
echo "now write $OUT_IMAGE_FILE to a sdcard"
exit 1
| true
|
e15cb00300e8310adbacaf1a326c2bae1ba44164
|
Shell
|
Chapabu/dockerfiles
|
/ubuntu/16.04/usr/local/share/bootstrap/common_functions.sh
|
UTF-8
| 2,506
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
load_env() {
set +x
shopt -s nullglob
set -- /usr/local/share/env/*
if [ "$#" -gt 0 ]; then
for file in "$@"; do
# shellcheck source=/dev/null
source "${file}"
done
fi
}
get_user_home_directory() {
local USER="$1"
if [ -z "$USER" ]; then
return 1
fi
getent passwd "$USER" | cut -d: -f 6
}
as_user() {
set +x
local COMMAND="$1"
local WORKING_DIR="$2"
local USER="$3"
if [ -z "$COMMAND" ]; then
return 1;
fi
if [ -z "$WORKING_DIR" ]; then
WORKING_DIR='/app';
fi
if [ -z "$USER" ]; then
USER='build';
fi
USER_HOME="$(get_user_home_directory "$USER")"
set -x
sudo -u "$USER" -E HOME="$USER_HOME" /bin/bash -c "cd '$WORKING_DIR'; $COMMAND"
}
as_build() {
set +x
as_user "$1" "$2" 'build'
}
as_code_owner() {
set +x
as_user "$1" "$2" "$CODE_OWNER"
}
as_app_user() {
set +x
as_user "$1" "$2" "$APP_USER"
}
is_hem_project() {
if [ -f /app/tools/hem/config.yaml ] || [ -f /app/tools/hobo/config.yaml ]; then
return 0
fi
return 1
}
is_app_mountpoint() {
grep -q -E "/app (nfs|vboxsf|fuse\.osxfs)" /proc/mounts
return $?
}
is_chown_forbidden() {
# Determine if the app directory is an NFS mountpoint, which doesn't allow chowning.
grep -q -E "/app (nfs|vboxsf)" /proc/mounts
return $?
}
is_vboxsf_mountpoint() {
grep -q "/app vboxsf" /proc/mounts
return $?
}
alias_function() {
local -r ORIG_FUNC=$(declare -f "$1")
local -r NEWNAME_FUNC="$2${ORIG_FUNC#$1}"
eval "$NEWNAME_FUNC"
}
do_build() {
:
}
do_start() {
:
}
do_user_ssh_keys() {
set +x
local SSH_USER="$1"
if [ -z "$SSH_USER" ]; then
return 1;
fi
local SSH_FILENAME="$2"
local SSH_PRIVATE_KEY="$3"
local SSH_PUBLIC_KEY="$4"
local SSH_KNOWN_HOSTS="$5"
local SSH_USER_HOME
SSH_USER_HOME=$(get_user_home_directory "$SSH_USER")
if [ -n "$SSH_PRIVATE_KEY" ]; then
echo "Setting up SSH keys for the $SSH_USER user"
(
umask 0077
mkdir -p "$SSH_USER_HOME/.ssh/"
echo "$SSH_PRIVATE_KEY" | base64 --decode > "$SSH_USER_HOME/.ssh/$SSH_FILENAME"
)
if [ -n "$SSH_PUBLIC_KEY" ]; then
echo "$SSH_PUBLIC_KEY" | base64 --decode > "$SSH_USER_HOME/.ssh/$SSH_FILENAME.pub"
fi
if [ -n "$SSH_KNOWN_HOSTS" ]; then
echo "$SSH_KNOWN_HOSTS" | base64 --decode > "$SSH_USER_HOME/.ssh/known_hosts"
fi
chown -R "$SSH_USER" "$SSH_USER_HOME/.ssh/"
unset SSH_PRIVATE_KEY
unset SSH_PUBLIC_KEY
unset SSH_KNOWN_HOSTS
fi
set -x
}
| true
|
08a1c39b85c17d17b451379b0f7886f75b9cf88f
|
Shell
|
jcihain/woobuntu-installer
|
/scripts/Students/webgoat.sh
|
UTF-8
| 603
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
sudo rm -rf /opt/woobuntu/webgoat
sudo mkdir -p /opt/woobuntu/webgoat
sudo chmod a+rw /opt/woobuntu/webgoat
cd /opt/woobuntu/webgoat
sudo wget https://github.com/WebGoat/WebGoat/releases/download/7.0.1/webgoat-container-7.0.1-war-exec.jar -O webgoat.jar
cat > /tmp/webgoat.desktop <<EOF
[Desktop Entry]
Version=1.0
Type=Application
Name=webgoat
Icon=application-default-icon
Exec=$term -e '/bin/bash -c "cd /opt/woobuntu/webgoat;java -jar webgoat.jar; exec bash"'
NoDisplay=false
Categories=woobuntu;
StartupNotify=true
Terminal=false
EOF
sudo mv /tmp/webgoat.desktop /usr/share/applications
| true
|
b3657e82c616ac13dd7c6280657928dba12334d5
|
Shell
|
XGWang0/Suse_testsuite
|
/tests/qa_test_coreutils/qa_test_coreutils/orig_test_suite/chmod/usage
|
UTF-8
| 2,077
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/sh
# Verify that chmod works correctly with odd option combinations.
if test "$VERBOSE" = yes; then
set -x
chmod --version
fi
. $srcdir/../lang-default
pwd=`pwd`
t0=`echo "$0"|sed 's,.*/,,'`.tmp; tmp=$t0/$$
trap 'status=$?; cd $pwd; chmod -R u+rwx $t0; rm -rf $t0 && exit $status' 0
trap '(exit $?); exit $?' 1 2 13 15
framework_failure=0
mkdir -p $tmp || framework_failure=1
cd $tmp || framework_failure=1
if test $framework_failure = 1; then
echo "$0: failure in testing framework" 1>&2
(exit 1); exit 1
fi
fail=0
# Each line in this list is a set of arguments, followed by :,
# followed by the set of files it will attempt to chmod,
# or empty if the usage is erroneous.
# Many of these test cases are due to Glenn Fowler.
# These test cases assume GNU behavior for "options" like -w.
cases='
-- :
-- -- :
-- -- -- f : -- f
-- -- -w f : -w f
-- -- f : f
-- -w :
-- -w -- f : -- f
-- -w -w f : -w f
-- -w f : f
-- f :
-w :
-w -- :
-w -- -- f : -- f
-w -- -w f : -w f
-w -- f : f
-w -w :
-w -w -- f : f
-w -w -w f : f
-w -w f : f
-w f : f
f :
f -- :
f -w : f
f f :
u+gr f :
ug,+x f :
'
all_files=`echo "$cases" | sed 's/.*://'|sort -u`
old_IFS=$IFS
IFS='
'
for case in $cases; do
IFS=$old_IFS
args=`expr "$case" : ' *\(.*[^ ]\) *:'`
files=`expr "$case" : '.*: *\(.*\)'`
case $files in
'')
touch -- $all_files || framework_failure=1
chmod $args 2>/dev/null && fail=1
;;
?*)
touch -- $files || framework_failure=1
chmod $args || fail=1
for file in $files; do
# Test for misparsing args by creating all $files but $file.
# chmod has a bug if it succeeds even though $file is absent.
rm -f -- $all_files && touch -- $files && rm -- $file \
|| framework_failure=1
chmod $args 2>/dev/null && fail=1
done
;;
esac
done
if test $framework_failure = 1; then
echo "$0: failure in testing framework" 1>&2
fail=1
fi
(exit $fail); exit $fail
| true
|
d38de66a6b305038844e91e83d4cd2686e94f3f9
|
Shell
|
axilleas/humblebundle
|
/pkgbuilds/spaz-hib/PKGBUILD
|
UTF-8
| 2,546
| 3.53125
| 4
|
[] |
no_license
|
# Maintainer: Sam S. <smls75@gmail.com>
pkgname=spaz-hib
pkgver=2012_09_18
_hibver=09182012
pkgrel=0
pkgdesc='Space Pirates and Zombies, a top-down space combat game (Humble Bundle version)'
url='http://spacepiratesandzombies.com/'
arch=('i686' 'x86_64')
license=('custom:commercial')
depends=('gcc-libs' 'libgl' 'sdl' 'openal' 'alsa-lib')
optdepends=('alsa-plugins: PulseAudio support'
'libpulse: PulseAudio support')
if [ $CARCH == x86_64 ]; then
depends=('lib32-gcc-libs' 'lib32-libgl' 'lib32-sdl' 'lib32-openal' 'lib32-alsa-lib')
optdepends=('lib32-alsa-plugins: PulseAudio support'
'lib32-libpulse: PulseAudio support')
fi
source=('spaz-hib.desktop')
md5sums=('9fc788b38bb5d21f63f97913734d3db0')
PKGEXT='.pkg.tar'
_gamepkg="spaz-linux-humblebundle-${_hibver}-bin"
package() {
cd $srcdir
# Get installer
_get_humblebundle_source "${_gamepkg}" || {
error "Unable to find the game archive. Please download it from your Humble
Bundle page, and place it into one of the above locations."
exit 1; }
# Extract the installer
unzip -o "${_gamepkg}" || true
# # Remove bundled libraries (to force usage of system libs instead)
rm -rf $srcdir/data/libSDL-1.2.so.0
rm -rf $srcdir/data/libopenal.so
install -d "${pkgdir}/opt/SPAZ"
cp -r "$srcdir/data/"* "${pkgdir}/opt/SPAZ/"
# Install desktop entry
install -Dm644 "$pkgname.desktop" \
"$pkgdir/usr/share/applications/$pkgname.desktop"
# Install icon
install -Dm644 "data/SPAZ.png" "$pkgdir/usr/share/pixmaps/spaz.png"
# Create launcher script
install -d "$pkgdir/usr/bin"
echo -e "#!/bin/sh\ncd /opt/SPAZ && ./SPAZ" > "$pkgdir/usr/bin/spaz"
chmod 755 "$pkgdir/usr/bin/spaz"
}
# Locate a game archive from one of the Humble Bundles, and symlink it into $srcdir
_get_humblebundle_source() {
_get_local_source "$1" || return 1;
# TODO: Implement automatic downloading from Humble Bundle site
}
# Locate a file or folder provided by the user, and symlink it into $srcdir
_get_local_source() {
msg "Looking for '$1'..."
declare -A _search=(['build dir']="$startdir"
['$LOCAL_PACKAGE_SOURCES']="$LOCAL_PACKAGE_SOURCES")
for _key in "${!_search[@]}"; do local _dir="${_search["$_key"]}"
if [ -z "$_dir" ]; then _dir="<undefined>"; fi
echo -n " - in $_key ['$_dir'] ... ";
if [ -e "$_dir/$1" ]; then
echo "FOUND"; ln -sfT "$(readlink -f "$_dir/$1")" "$srcdir/$1"; break; fi
echo "NOT FOUND"
done
if [ ! -e "$srcdir/$1" ]; then return 1; fi
}
| true
|
ae07f717e4a94de79e64c4a0bf6f4afbae6f71e5
|
Shell
|
OpenMandrivaAssociation/dovecot
|
/dovecot-init
|
UTF-8
| 1,504
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Init file for Dovecot IMAP daemon
#
# Written by Dag Wieers <dag@wieers.com>.
#
# chkconfig: 345 57 46
# description: Dovecot IMAP Daemon
#
# processname: dovecot
# config: /etc/dovecot.conf
# pidfile: /var/run/dovecot
### BEGIN INIT INFO
# Provides: dovecot
# Required-Start: $network $remote_fs
# Required-Stop: $network $remote_fs
# Should-Start: ntpd
# Default-Start: 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Dovecot IMAP daemon
# Description: The dovecot daemon serves as a master process for the Dovecot \
# IMAP and POP servers.
### END INIT INFO
# source function library
. /etc/init.d/functions
[ -x /usr/sbin/dovecot ] || exit 1
[ -r /etc/dovecot/dovecot.conf ] || exit 1
RETVAL=0
prog="dovecot"
desc="IMAP daemon"
start() {
echo -n $"Starting $desc ($prog): "
daemon $prog
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/dovecot
return $RETVAL
}
stop() {
echo -n $"Shutting down $desc ($prog): "
killproc $prog
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/dovecot
return $RETVAL
}
restart() {
stop
start
}
reload() {
echo -n $"Reloading $desc ($prog): "
killproc $prog -HUP
RETVAL=$?
echo
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
reload)
reload
;;
condrestart)
[ -e /var/lock/subsys/$prog ] && restart
RETVAL=$?
;;
status)
status $prog
RETVAL=$?
;;
*)
echo $"Usage: $0 {start|stop|restart|reload|condrestart|status}"
RETVAL=1
esac
exit $RETVAL
| true
|
e037050e629a75b1324719bf98da7f05a4c9d4f9
|
Shell
|
jazzwang/docker-pool
|
/cloudera-director/bin/director
|
UTF-8
| 986
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
case "$1" in
new)
echo "[32;1m[INFO] Creating new cloudera-director docker instances ...[0m"
docker create -it --name director -h director.etu.im -v ${PWD}:/vagrant jazzwang/cloudera-director:centos6 /bin/bash
;;
start)
echo "[32;1m[INFO] Starting cloudera-director docker instances ...[0m"
docker start director
;;
login)
docker exec -it director /bin/bash
;;
stop)
echo "[32;1m[INFO] Stoping cloudera-director docker instances ...[0m"
docker stop director
;;
status)
docker ps -a | grep "director "
if [ "$?" == "0" ]; then
IP=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' director)
echo "Please visit http://${IP}:7189 for Cloudera Director Web UI"
else
echo "There is no instance of cloudera-director."
fi
;;
log)
docker logs director
;;
rm)
echo "[32;1m[INFO] Removing cloudera-director docker instances ...[0m"
docker rm director
;;
*)
echo "Usage: $0 [ new | start | login | stop | status | log | rm ]"
;;
esac
| true
|
24fde36c6c1f44e5be3468adc9f90fdb9e985b95
|
Shell
|
vp-sdeniau/vp-ansible
|
/roles/admin_docker/tasks/files/purge_volumes_docker.sh
|
UTF-8
| 135
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
volumes=$(docker volume ls -qf dangling=true)
if [ -n "$volumes" ] ;then
docker volume rm $volumes
else
echo "empty"
fi
| true
|
b59c9fbabd2aaa96bf2c74778ea61dc4470d7ca5
|
Shell
|
cloudfoundry-community/jumpbox-boshrelease
|
/jobs/inventory/templates/bin/run
|
UTF-8
| 752
| 3.203125
| 3
|
[
"TMate",
"MIT"
] |
permissive
|
#!/bin/bash
set -e
source /var/vcap/jobs/jumpbox/config/bashrc
EXITSTATUS=0
check() {
name=$1
echo -en "Checking \e[1;35m${name}\e[0m... "
if ! $* >/dev/null 2>&1; then
echo -e "\e[1;31mFAILED!\e[0m"
EXITSTATUS=1
else
echo -e "\e[1;32mOK\e[0m"
$* 2>&1 | sed -e 's/^/ /g'
fi
echo
}
export HOME=/var/vcap/data/.home
mkdir -p $HOME
check bbr version
check bosh -v
check cf --version
check credhub --version
check jq --version
check spruce --version
check genesis version
check safe version
check shield -v
check spiff -v
check vault -v
check curl --version
check fly --version
check git --version
check tmux -V
check tree --version
check vim --version
check wget --version
check zip -v
check unzip -v
check go version
exit $EXITSTATUS
| true
|
f2e5ecab3010d20afe3970cd7f7c996072f05335
|
Shell
|
Makaque/Modsenfree
|
/buildcs.sh
|
UTF-8
| 1,551
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
MANAGED="/c/Program Files (x86)/Steam/steamapps/common/Oxenfree/Oxenfree_Data/Managed/"
SYSTEM="$MANAGED/System.Core.dll"
MONO35=./Libraries/Mono/lib/mono/3.5-api/Microsoft.Build.Engine.dll,./Libraries/Mono/lib/mono/3.5-api/Microsoft.Build.Framework.dll,./Libraries/Mono/lib/mono/3.5-api/Microsoft.Build.Tasks.v3.5.dll,./Libraries/Mono/lib/mono/3.5-api/Microsoft.Build.Utilities.v3.5.dll
RESOURCES="./resources"
HOOK_OUTPUT="./target/modsenfree/hook"
PATCHER_OUTPUT="./target/modsenfree/patcher"
RUN_OUTPUT="./target/scala-2.13"
SRC="./src/main/cs"
mkdir -p $HOOK_OUTPUT
mkdir -p $PATCHER_OUTPUT
mkdir -p $RUN_OUTPUT
./Libraries/Mono/bin/mcs $SRC/Patcher.cs -out:$PATCHER_OUTPUT/Patcher.exe -r:$MONO35,$RESOURCES/Mono.Cecil.dll,$RESOURCES/Mono.Cecil.Inject.dll
./Libraries/Mono/bin/mcs -langversion:ISO-2 $SRC/Hook.cs -out:$HOOK_OUTPUT/Hook.dll -t:library -r:$MONO35,$RESOURCES/0Harmony.dll,$RESOURCES/Assembly-CSharp.dll
cp $RESOURCES/Mono.Cecil.dll $PATCHER_OUTPUT/
cp $RESOURCES/Mono.Cecil.Inject.dll $PATCHER_OUTPUT/
cp $RESOURCES/0Harmony.dll $HOOK_OUTPUT
#cp $RESOURCES/UnityEngine.dll $OUTPUT/
TESTMODDIR="./target/modsenfree/Mods/testmod"
mkdir -p $TESTMODDIR
./Libraries/Mono/bin/mcs $SRC/testmod/TestMod.cs -out:$TESTMODDIR/TestMod.dll -t:library -r:$MONO35,$RESOURCES/0Harmony.dll,$RESOURCES/Assembly-CSharp.dll,$RESOURCES/UnityEngine.dll,$RESOURCES/UnityEngine.UI.dll
cp $SRC/testmod/mod.json $TESTMODDIR
cp -r $PATCHER_OUTPUT $RUN_OUTPUT
cp -r $HOOK_OUTPUT $RUN_OUTPUT
cp -r "./target/modsenfree/Mods" $RUN_OUTPUT
| true
|
a56589d25e54ef1a7f0a7ce371345e127e4037e8
|
Shell
|
deep5050/smart-radio
|
/chrome.sh
|
UTF-8
| 742
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Remove existing downloads and binaries so we can start from scratch.
rm google-chrome-stable_current_amd64.deb
rm chromedriver_linux64.zip
rm -f driver/*
# Install Latest Chrome.
set -ex
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
sudo apt install ./google-chrome-stable_current_amd64.deb
# Install Latest ChromeDriver.
CHROME_DRIVER_VERSION=`curl -sS https://chromedriver.storage.googleapis.com/LATEST_RELEASE`
wget -N https://chromedriver.storage.googleapis.com/$CHROME_DRIVER_VERSION/chromedriver_linux64.zip
unzip ./chromedriver_linux64.zip -d ./driver
rm ./chromedriver_linux64.zip
chmod +x ./driver/chromedriver
# deleting chrome executable
rm google-chrome-stable_current_amd64.deb
| true
|
b65ab4a4dd67a67fc81d06883d23484ac84089af
|
Shell
|
lemonal88/Linux_Study
|
/practise/sh03.sh
|
UTF-8
| 963
| 2.546875
| 3
|
[] |
no_license
|
# sh03.sh
#!/bin/bash
# Program:
# This proggress shows default parameters
# History:
# 2018-05-27 Lvhongbin First Relsase
echo "The script's name is ==> $0"
echo "The argument[0] is ==> $1"
echo "The argument[1] is ==> $2"
# 这里需要用引号吧$@括起来,其实括不括起来都无所谓
echo "The arguments is ==> '$@'"
echo "The arguments.length is ==> '$#'"
shift && echo -e "shift \n"
echo "The script's name is ==> $0"
echo "The argument[0] is ==> $1"
echo "The argument[1] is ==> $2"
# 这里需要用引号吧$@括起来,其实括不括起来都无所谓
echo "The arguments is ==> '$@'"
echo "The arguments.length is ==> '$#'"
shift 2 && echo -e "shift 2 \n"
echo "The script's name is ==> $0"
echo "The argument[0] is ==> $1"
echo "The argument[1] is ==> $2"
# 这里需要用引号吧$@括起来,其实括不括起来都无所谓
echo "The arguments is ==> '$@'"
echo "The arguments.length is ==> '$#'"
| true
|
20905a71ccbdf7a0069dde6a5a35619a0797af09
|
Shell
|
joybinchen/cmake-generator
|
/compile_commands
|
UTF-8
| 1,657
| 3.21875
| 3
|
[] |
no_license
|
#! /bin/bash
CC_LOGGER_SCRIPT=$(realpath $0)
CC_LOGGER_DIR=$(dirname ${CC_LOGGER_SCRIPT})
COMPILE_DB=compile_commands.json
if [[ -e ${COMPILE_DB} ]]; then
BACKUP_TAG=$(stat --printf=%y ${COMPILE_DB} |sed 's| |_|' | cut -d'.' -f1)
mv ${COMPILE_DB} ${COMPILE_DB}.${BACKUP_TAG}.json
fi
#echo CodeChecker log -k -o compile_commands.json -b "/usr/bin/make -j1 $@"
make_command="/usr/bin/make -j1 $@"
#CodeChecker log -k -o compile_commands.json -b "$make_command"
#exit $?
export LD_PRELOAD=ldlogger.so
export LD_LIBRARY_PATH="${CC_LOGGER_DIR}/ldlogger/build/lib/$(uname -m):${LD_LIBRARY_PATH}"
echo LD_LIBRARY_PATH == ${LD_LIBRARY_PATH}
export CC_LOGGER_FILE="${PWD}/compile_commands.json"
export CC_LOGGER_KEEP_LINK=true
export CC_LOGGER_GCC_LIKE="gcc:g++:clang:ar:python"
export CC_LOGGER_JAVA_LIKE="javac"
export CC_LOGGER_CUSTOM_LIKE="moc:uic:rcc:qmake:lrelease:install:msgfmt:dbus-binding-tool:glib-genmarshal"
export CC_LOGGER_OUTPUT_ARG_uic='-o'
export CC_LOGGER_OUTPUT_ARG_rcc='-o'
export CC_LOGGER_OPTION_ARG_rcc='-name'
export CC_LOGGER_OUTPUT_ARG_moc='-o'
export CC_LOGGER_OPTION_ARG_moc='--include'
export CC_LOGGER_OUTPUT_ARG_qmake='$-1'
export CC_LOGGER_OPTION_ARG_qmake='-install'
export CC_LOGGER_OUTPUT_ARG_lrelease='-qm'
export CC_LOGGER_OUTPUT_ARG_install='$-1'
export CC_LOGGER_OPTION_ARG_install='-m'
export CC_LOGGER_OUTPUT_ARG_msgfmt='-o:--output-file'
export CC_LOGGER_OUTPUT_ARG_dbus_binding_tool='--output'
export CC_LOGGER_OUTPUT_ARG_glib_genmarshal='--output'
if [ x"$MAKE_COMMAND" != x ]; then
make_command="echo '$MAKE_COMMAND' | bash"
fi
echo $make_command
CodeChecker log -k -o compile_commands.json -b "$make_command"
#$@
| true
|
99b2e5e9311799dde4fd88dc989b338860fe1779
|
Shell
|
tvdtb/ad-docker-ws
|
/99_config_companion/server/docker-cmd.sh
|
UTF-8
| 143
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
CONFIG=$1
echo Startup, config file = $CONFIG
while true; do
echo $(date) ----------------------------
cat $CONFIG
sleep 5
done
| true
|
7b145d116dd5ddc4648270215663e2a174417e13
|
Shell
|
flecoqui/azure
|
/azure-quickstart-templates/201-vmss-debian-glusterfs-mysql-autoscale/install-apache.sh
|
UTF-8
| 3,594
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# This bash file install apache on debian
# Parameter 1 hostname
wm_hostname=$1
wm_ipaddr=`ip addr show eth0 | grep global`
environ=`env`
echo "Environment before installation: $environ"
echo "Installation script start : $(date)"
echo "Apache Installation: $(date)"
echo "##### wm_hostname: $wm_hostname"
echo "Installation script start : $(date)"
ARCH=$(uname -m | sed 's/x86_//;s/i[3-6]86/32/')
if [ -f /etc/lsb-release ]; then
. /etc/lsb-release
OS=$DISTRIB_ID
VER=$DISTRIB_RELEASE
elif [ -f /etc/debian_version ]; then
OS=Debian # XXX or Ubuntu??
VER=$(cat /etc/debian_version)
elif [ -f /etc/redhat-release ]; then
# TODO add code for Red Hat and CentOS here
...
else
OS=$(uname -s)
VER=$(uname -r)
fi
echo "OS=$OS version $VER $ARCH"
# Apache installation
apt-get -y update
apt-get -y install apache2
# GlusterFS client installation
NUM=`echo $VER | sed 's/\(\.\)[0-9]*//g'`
if [ $NUM -eq 7 ];
then
# install a version compliant with Debian 7
wget -O - http://download.gluster.org/pub/gluster/glusterfs/3.7/LATEST/rsa.pub | apt-key add -
echo deb http://download.gluster.org/pub/gluster/glusterfs/3.7/LATEST/Debian/wheezy/apt wheezy main > /etc/apt/sources.list.d/gluster.list
fi
# install a version compliant with Debian 8
if [ $NUM -eq 8 ];
then
wget -O - http://download.gluster.org/pub/gluster/glusterfs/LATEST/rsa.pub | apt-key add -
echo deb http://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/jessie/apt jessie main > /etc/apt/sources.list.d/gluster.list
fi
apt-get update -y
apt-get install glusterfs-client -y
#install mysql client
apt-get install mysql-client -y
wm_mysql=`mysql --user=admin --password=VMP@ssw0rd -h 10.0.0.100 -e "show databases;"`
# firewall configuration
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -j ACCEPT
# glusterfs mount
mkdir /shareddata
mount -t glusterfs gfs1vm0:gfs1vol /shareddata
wm_nfs=`df -h /shareddata`
directory=/var/www/html
if [ ! -d $directory ]; then
mkdir $directory
fi
cat <<EOF > $directory/index.html
<html>
<head>
<title>Sample "Hello from Azure Debian VM scaleset $wm_hostname" </title>
</head>
<body bgcolor=white>
<table border="0" cellpadding="10">
<tr>
<td>
<h1>Hello from Azure Debian VM scaleset $wm_hostname</h1>
</td>
</tr>
</table>
<p>This is the home page for the Apache test on Azure VM</p>
<p>Local IP address:</p>
<p> $wm_ipaddr</p>
<p>MYSQL databses:</p>
<p> $wm_mysql</p>
<p>NFS partition:</p>
<p> $wm_nfs</p>
<ul>
<li>To the VM Scale Set: <a href="http://$wm_hostname/html/index.html">http://$wm_hostname/html/index.html</a>
</ul>
<ul>
<li>To <a href="http://www.microsoft.com">Microsoft</a>
<li>To <a href="https://portal.azure.com">Azure</a>
</ul>
</body>
</html>
EOF
rm -f /etc/apache2/sites-enabled/*.conf
echo "Configuring Web Site for Apache: $(date)"
cat <<EOF > /etc/apache2/sites-enabled/html.conf
ServerName "$wm_hostname"
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName "$wm_hostname"
DocumentRoot /var/www
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
# Possible values include: debug, info, notice, warn, error, crit,
# alert, emerg.
LogLevel warn
ErrorLog /var/log/apache2/evaluation-error.log
CustomLog /var/log/apache2/evaluation-access.log combined
</VirtualHost>
EOF
apachectl restart
exit 0
| true
|
2cab573c96071a7cbf177c4ee5cafe7fe719626f
|
Shell
|
jonasvdd/SPRAAK-docker
|
/convert_tel_data.sh
|
UTF-8
| 769
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# variables: CHANGE THESE!
dbase=name_of_my_dbase
wav_src=path_to_the_root_of_your_dbase
wav_tel_dst=path_to_where_the_telephone_version_of_the_dbase_must_be_written
# find all wav files and make a corpus file; since we have no orthographic transcription, we use '#' (silence) as transcription
find "${wav_src}" -type f -name '*.wav' -printf '%P\n' | sort | gawk '{C[++ndx]=gensub("\\.wav$","\t#\t0 -1",1,$0);} END {printf(".spr\nDIM1\t%i\nDATA\tCORPUS\nTYPE\tSTRING\nFORMAT\tASCII\n#\n",ndx);for(ndx=0;++ndx in C;printf("%s\n",C[ndx]));}' > "${dbase}.cor"
# process the data (convert to telephone alike data); each file will be processed only once!
spr_sigp --mkdirs -i "${wav_src}" -c "${dbase}.cor" -Si wav -So wav -o "${wav_tel_dst}" -ssp tel_flt.ssp
| true
|
caaecfab8eaed75d9b64254652c4fe681d388a7c
|
Shell
|
nextmovesoftware/CEX
|
/src/doc/man/mansrc/xrefmans.sh
|
UTF-8
| 1,985
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Cross references one set of man pages against another. This is used to
# add cross-references to the section 5 man pages (object descriptions),
# referring each object to the section 3 man pages (functions) that apply.
#
# Each section 3 .mansrc file has a non-printing <objects> section containing
# a list of the objects to which the function applies. This shell script
# takes each section 5 man page, gets the object's name by striping off the
# .mansrc suffix, and searches the section 3 man pages for functions that
# mention the object's name. When the list is complete, each function is
# appended to the nroff version of the section 5 man page (NOT to .mansrc!).
#
# Takes two parameters: the name of the directory that is to be searched
# for applicable functions ("srcdir"), and the name of the directory to
# which cross references should be added ("destdir").
#
# Modified by Dave Weininger from the Daylight development utility
# `do_man_crossrefs.sh' written by Craig James
#
if [ $# != 2 ] ; then
echo "error - requires two parameters"
echo "usage: $0 srcdir destdir"
exit 1
fi
srcdir=$1
destdir=$2
#----------------------------------------------------------------------------
# Search each source file for its object references and crossreference
#----------------------------------------------------------------------------
(
cd $srcdir
for src in *.mansrc ; do
name=`basename $src .mansrc`
echo "looking for references to objects in "$name" ..."
objects=`cat $src |
(
while true ; do
read line
if [ $? != 0 -o "$line" = "<objects>" ] ; then
break;
fi
done
cat
)`
for dest in $objects ; do
destfile=$destdir/$dest.5cx
if [ ! -w $destfile ] ; then
echo "Unknown object name: $dest ($destfile)"
else
echo "${name}(3cx)" >>$destfile
fi
done
done
)
| true
|
d6fcfa78da18f63db18e6a933411e83ff78b1cf5
|
Shell
|
cdelatte/puppet-os_docker
|
/support/run-update.sh
|
UTF-8
| 418
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash -xve
BASEDIR=$(cd "$(dirname "$0")"; pwd)
docker build -t os_docker-updater .
COMMAND=/update-configs.sh
if [ $# -eq 1 ]; then
if [ "$1" == "tesora" ]; then
COMMAND=/tesora-update-configs.sh
fi
fi
docker run --rm \
--hostname os-docker \
-v $BASEDIR/$COMMAND:/$COMMAND \
-v $BASEDIR/exceptions/:/exceptions/ \
-v $BASEDIR/../files/:/configs \
os_docker-updater \
$COMMAND
| true
|
34eea7d8649f846956d35083696d606a2ea2071d
|
Shell
|
Tsaregorodtsev/rsha
|
/praks2/lisa_kasutaja
|
UTF-8
| 367
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#
if [ $# -ne 1 ]; then
echo "kasutus juhend: $0 kasutajanimi"
else
kasutajanimi=$1
useradd $kasutajanimi -m -s /bin/bash
if [ $? -eq 0 ]; then
echo "kasutaja nimega $kasutajanimi on lisatud susteemis"
cat /etc/passwd | grep $kasutajanimi
ls -la /home/$kasutajanimi
else
echo "probleem kasutaja $kasutaja lisamisega"
echo "probleem on kood $"
fi
fi
| true
|
fbbaf75b75e984ee686cca6cd2b92c25da8f578f
|
Shell
|
asokani/docker-base
|
/letsencrypt-cron.sh
|
UTF-8
| 579
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -f /etc/secrets/letsencrypt/domain.csr ]
then
SIGNED_CRT="$(mktemp --suffix=signed_crt)"
sudo -u acme date >> /var/log/acme/acme_tiny.log
sudo -u acme /usr/bin/python /opt/acme_tiny.py --account-key /etc/secrets-global/letsencrypt/letsencrypt-account.key --csr /etc/secrets/letsencrypt/domain.csr --acme-dir /var/app-cert/.well-known/acme-challenge > $SIGNED_CRT 2>> /var/log/acme/acme_tiny.log
if [ -s $SIGNED_CRT ] # && /usr/bin/openssl verify $SIGNED_CRT
then
chmod 400 $SIGNED_CRT
mv $SIGNED_CRT /etc/secrets/letsencrypt/signed.crt
fi
fi
| true
|
e3092335236e124aeb770516ec764da03682aef3
|
Shell
|
authlete/authlete-jose
|
/bin/jose-generator
|
UTF-8
| 1,371
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# LICENSE
# -------
#
# Copyright (C) 2018 Authlete, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
MAIN_CLASS=com.authlete.jose.tool.JoseGenerator
#--------------------------------------------------
# Entry point
#--------------------------------------------------
__main()
{
# Top directory of this source tree.
local top_dir=$(dirname $(dirname $0))
# Command line to execute.
local command_line=(
mvn -f "${top_dir}/pom.xml"
-q exec:java
-Dexec.mainClass="${MAIN_CLASS}"
-Dexec.args="$*"
-Dmaven.test.skip=true
-Dmaven.javadoc.skip=true
)
# Execute the command line.
exec "${command_line[@]}"
}
#--------------------------------------------------
# S T A R T
#--------------------------------------------------
__main "$@"
| true
|
1be9d0490a3f5de4852e6dde916b51700adbc72a
|
Shell
|
emmanuele-abbenante/transaction-report
|
/src/main/script/load_transactions.sh
|
UTF-8
| 912
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
APP_PATH=/home/emmanuele/workspace/java/transaction-report/target/transaction-report-0.0.1-SNAPSHOT-fat.jar
TRANSACTIONS_DIR=/home/emmanuele/Scrivania/Documenti/Finanze\ personali/Movimenti
# Loading credit card transactions
for year in $(seq 2017 2018)
do
for month in $(seq -w 1 12)
do
echo "Period: "$year-$month
echo
java -jar $APP_PATH "$TRANSACTIONS_DIR/$year-$month""_carta_credito.xml" CREDIT_CARD
echo "========================================================================"
done
done
# Loading current account transactions
for year in $(seq 2015 2018)
do
for month in $(seq -w 1 12)
do
echo "Period: "$year-$month
echo
java -jar $APP_PATH "$TRANSACTIONS_DIR/$year-$month""_conto_corrente.xml" CURRENT_ACCOUNT
echo "========================================================================"
done
done
| true
|
3cf54e2f50a38118bb49ada50f4bd23cc7803bad
|
Shell
|
emazzotta/dotfiles
|
/bin/videorotate
|
UTF-8
| 277
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if test "${2}"; then
extension=${1#*.}
name="${1%%.*}"
ffmpeg -i "${1}" -map_metadata 0 -metadata:s:v rotate="${2}" -codec copy "${name}_rotated.${extension}"
else
echo "usage: $(basename "${0}") <file_name> <rotation [90,-90,180,...]>"
fi
| true
|
6884a647ab40ebe7ac2c7188e603b6e70cfa4efa
|
Shell
|
sye/hax
|
/makeHTML.sh
|
UTF-8
| 377
| 2.859375
| 3
|
[] |
no_license
|
#! /bin/bash
# script to make every cpp/h file to html
find . -iname "*.cpp" -exec vim {} -c TOhtml -c wq -c q \; #render them .cpp
find . -iname "*.h" -exec vim {} -c TOhtml -c wq -c q \; #render them .h
cd ~/public_html/cprog/lab3
rm -f *.html #clean the old files
cd ~/cprog10/lab3/hax
find . -iname "*.html" -exec mv {} ~/public_html/cprog/lab3 \; #copy them to homepage
| true
|
d96f8485750d43bca30e2337016b7a03b2fd0f0b
|
Shell
|
necrosato/uoregon-secday
|
/launch_agent_defense/encrypt.sh
|
UTF-8
| 707
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
PUBKEY=~/.msgprotect/id_rsa.pub
ARCHIVE=~/Library/Containers/com.apple.iChat/Data/Library/Messages/Archive
MESSAGES=~/Library/Messages/Archive
DPATH=''
cd ${ARCHIVE}
if [ $? -eq 0 ]; then
DPATH=$ARCHIVE
fi
cd ${MESSAGES}
if [ $? -eq 0 ]; then
DPATH=$MESSAGES
fi
cd ${DPATH}
mkdir keys
find ./* -iname "*.ichat" | while read f
do
KEY=keys/$(basename "${f}").key
openssl rand 32 -out "$KEY"
openssl enc -aes-256-cbc -salt -in "${f}" -out "${f}.enc" -k file:"${KEY}"
openssl rsautl -encrypt -pubin -inkey <(ssh-keygen -e -f "${PUBKEY}" -m PKCS8) -in "$KEY" -out "${KEY}.enc" #encrypt key
rm "$KEY" #delete plaintext key
rm "${f}" #delete plaintext archive
done
| true
|
ab6c82512a81ddb341971c3fa9a1079672c77a1a
|
Shell
|
prezi/gradle-profiler
|
/src/main/resources/org/gradle/profiler/perf/configure_sudo_access.sh
|
UTF-8
| 819
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
realuser=${1:-$USER}
gradle_user_home=${2:-$HOME/.gradle-profiler}
if [ "$realuser" = "root" ]; then
echo "usage: $0 [userid] [gradle_user_home]"
exit 1
fi
cat >/tmp/sudoers_perf$$ <<EOF
Defaults!/usr/bin/perf use_pty
$realuser ALL=(ALL) NOPASSWD: /usr/bin/perf, /bin/kill, $gradle_user_home/tools/brendangregg/Misc/java/jmaps-updated
EOF
# verify configuration in temp file
sudo visudo -c -f /tmp/sudoers_perf$$ || { echo "sudo configuration couldn't be verified."; exit 1; }
# install to /etc/sudoers.d directory
sudo chmod 0660 /tmp/sudoers_perf$$ \
&& sudo chown root:root /tmp/sudoers_perf$$ \
&& sudo mv /tmp/sudoers_perf$$ /etc/sudoers.d/gradle-profiler_$realuser
# verify configuration
sudo visudo -c -f /etc/sudoers.d/gradle-profiler_$realuser && echo "sudo configuration successful."
| true
|
15b249e88f5ccabac30da6391dfcecbbb82ec162
|
Shell
|
xiangchunyang/pprof
|
/internal/binutils/testdata/build_mac.sh
|
UTF-8
| 573
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
# This is a script that generates the test MacOS executables in this directory.
# It should be needed very rarely to run this script. It is mostly provided
# as a future reference on how the original binary set was created.
set -o errexit
cat <<EOF >/tmp/hello.cc
#include <stdio.h>
int main() {
printf("Hello, world!\n");
return 0;
}
EOF
cat <<EOF >/tmp/lib.c
int foo() {
return 1;
}
int bar() {
return 2;
}
EOF
cd $(dirname $0)
rm -rf exe_mac_64* lib_mac_64*
clang -g -o exe_mac_64 /tmp/hello.c
clang -g -o lib_mac_64 -dynamiclib /tmp/lib.c
| true
|
36775edd80f5e3a2e77e70500a1f749f47d6c578
|
Shell
|
wess/alloy
|
/alloy/__helpers.sh
|
UTF-8
| 246
| 2.71875
| 3
|
[] |
no_license
|
#
# __helpers.sh
# alloy
#
# Author: Wess Cope (you@you.you)
# Created: 11/20/2020
#
# Copywrite (c) 2020 Wess.io
#
#!/usr/bin/env bash
pause() {
read -p "$*"
}
quit() {
echo >&2 "$@"
exit 1
}
define(){ IFS='\n' read -r -d '' ${1} || true; }
| true
|
7e5ead5d7d705b1eb34640d8a11b31c5d1b762fc
|
Shell
|
Woona/src
|
/school/class
|
UTF-8
| 7,516
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
debug=false; sday=false; repeat=true; disp=false
function calc {
read -p "Hour:" hour
read -p "minutes:" minute
awk "BEGIN {print $hour*60+$minute}"
exit
}
function graph {
g1="_"; g2="_"; g3="_"; g4="_"; g5="_"; g6="_"; g7="_"; g8="_"; g9="_"; g10="_"; g11="_"; g12="_"; g13="_"; g14="_"; g15="_"; g16="_"; g17="_"; g18="_"; g19="_"; g20="_";
if [[ $1 -ge 100 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*"; g13="*"; g14="*"; g15="*"; g16="*"; g17="*"; g18="*"; g19="*"; g20="*";
elif [[ $1 -ge 95 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*"; g13="*"; g14="*"; g15="*"; g16="*"; g17="*"; g18="*"; g19="*";
elif [[ $1 -ge 90 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*"; g13="*"; g14="*"; g15="*"; g16="*"; g17="*"; g18="*";
elif [[ $1 -ge 85 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*"; g13="*"; g14="*"; g15="*"; g16="*"; g17="*";
elif [[ $1 -ge 80 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*"; g13="*"; g14="*"; g15="*"; g16="*";
elif [[ $1 -ge 75 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*"; g13="*"; g14="*"; g15="*";
elif [[ $1 -ge 70 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*"; g13="*"; g14="*";
elif [[ $1 -ge 65 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*"; g13="*";
elif [[ $1 -ge 60 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*"; g12="*";
elif [[ $1 -ge 55 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*"; g11="*";
elif [[ $1 -ge 50 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*"; g10="*";
elif [[ $1 -ge 45 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*"; g9="*";
elif [[ $1 -ge 40 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*"; g8="*";
elif [[ $1 -ge 35 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*"; g7="*";
elif [[ $1 -ge 30 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*"; g6="*";
elif [[ $1 -ge 25 ]]; then
g1="*"; g2="*"; g3="*"; g4="*"; g5="*";
elif [[ $1 -ge 20 ]]; then
g1="*"; g2="*"; g3="*"; g4="*";
elif [[ $1 -ge 15 ]]; then
g1="*"; g2="*"; g3="*";
elif [[ $1 -ge 10 ]]; then
g1="*"; g2="*";
elif [[ $1 -ge 5 ]]; then
g1="*";
fi
graph=$(echo "[$g1$g2$g3$g4$g5$g6$g7$g8$g9$g10$g11$g12$g13$g14$g15$g16$g17$g18$g19$g20]")
}
function percent {
((percent=$1*100))
((percent=$percent/$2))
ptd=$(echo $percent"%")
}
function remain {
let rem=$pe-$hadd
rem=$(echo $rem minutes remaining)
}
function current {
seconds=$(awk "BEGIN {print 60-$(date "+%-S")}")
minutes=$(awk "BEGIN {print "$(let date=$(date "+%-H")*60 && echo $date)"+"$(date "+%-M")"}")
days=$(awk "BEGIN {print $(date "+%-j")-1}")
}
function blockk {
if [[ $minutes -ge 467 ]] && [[ $minutes -le 472 ]]; then
block=-1; bstart=467; btime=5; bdisplay="Time till transition to first block"
elif [[ $minutes -ge 472 ]] && [[ $minutes -le 475 ]]; then
block=0; bstart=472; btime=3; bdisplay="Time till announcements"
elif [[ $minutes -ge 475 ]] && [[ $minutes -le 480 ]]; then
block=z5; bstart=475; btime=5; bdisplay="Time till first block"
elif [[ $minutes -ge 480 ]] && [[ $minutes -le 572 ]]; then
block=1; bstart=480; btime=92; bdisplay="Time till transition to second block"
elif [[ $minutes -ge 572 ]] && [[ $minutes -le 577 ]]; then
block=15; bstart=572; btime=5; bdisplay="Time till second block"
elif [[ $minutes -ge 577 ]] && [[ $minutes -le 669 ]]; then
block=2; bstart=577; btime=92; bdisplay="Time till third block and lunch"
elif [[ $minutes -ge 669 ]] && [[ $minutes -le 796 ]]; then
block=3; bstart=669; btime=127; bdisplay="Time till transition to fourth block"
elif [[ $minutes -ge 796 ]] && [[ $minutes -le 801 ]]; then
block=35; bstart=796; btime=5; bdisplay="Time till fourth block"
elif [[ $minutes -ge 801 ]] && [[ $minutes -le 893 ]]; then
block=4; bstart=801; btime=92; bdisplay="Time till school ends"
elif [[ $minutes -ge 893 ]] || [[ $minutes -le 467 ]]; then
block=5; bstart=893; btime=1014; bdisplay="Time till doors open"
if [[ $minutes -le 467 ]]; then
minutes=$(($minutes+1440))
fi
fi
}
function resolve {
if [[ -n $(echo $* | sed 's/[A-Z,a-z]//g') ]]; then
btime=$1; bstart=$2
if [[ $* = *"scientific"* ]]; then
s=scientific
else
s=nope
fi
else
if [[ $1 = scientific ]]; then
s=scientific
else
s=nope
fi
fi
if [[ $s = scientific ]]; then
rminutes=$(awk "BEGIN {print $btime-($(awk "BEGIN {print $minutes-$bstart}"))}")
rhours=$(awk "BEGIN {print $rminutes/60}") #rminutes=$(awk "BEGIN {print $rminutes-$(awk "BEGIN {print $rhours*60}")}")
rdays=$(awk "BEGIN {print $rhours/24}") #rhours=$(awk "BEGIN {print $rhours-$(awk "BEGIN {print $rdays*24}")}")
rweeks=$(awk "BEGIN {print $rdays/7}")
ryears=$(awk "BEGIN {print $rdays/365.242}") #rdays=$(awk "BEGIN {print $rdays-$(awk "BEGIN {print $ryears*365.242}")}")
sep=" total or,"
else
rminutes=$(($btime-$(($minutes-$bstart))))
rhours=$(($rminutes/60)); rminutes=$(($rminutes-$(($rhours*60))))
rdays=$(($rhours/24)); rhours=$(($rhours-$(($rdays*24))))
rweeks=$(($rdays/7)); rdays=$(($rdays-$((rweeks*7))))
ryears=$(($rdays/365)); rdays=$(($rdays-$(($ryears/365))))
sep=","
fi
}
while [[ -n $* ]]; do
if [[ $1 = scientific ]] || [[ $1 = science ]] || [[ $1 = "s" ]]; then
science=scientific; shift; usage=false
elif [[ $1 = calc ]] || [[ $1 = "c" ]]; then
calc; shift; usage=false
elif [[ $1 = debug ]] || [[ $1 = "d" ]]; then
debug=true; shift; usage=false
elif [[ $1 = *year ]] || [[ $1 = "y" ]]; then
year=true; shift; usage=false
elif [[ $1 = "-dyc" ]]; then
science=scientific; debug=true; year=true; shift
elif [[ $1 = "-day" ]]; then
sday=true; shift
elif [[ $1 = e ]]; then
disp=true; shift
else
usage=true; shift
fi
done
while [[ $repeat = true ]]; do
if [[ $disp = true ]]; then
clear
fi
if [[ $usage = true ]]; then
echo "Usage [ class -year -debug -calc -scientific -day ]"; exit
fi
current; blockk; percent $(($minutes-$bstart)) $btime; graph $percent; resolve $science
echo "$bdisplay: $ryears years$sep $rweeks weeks$sep $rdays days$sep $rhours hours$sep $rminutes minutes, and $seconds seconds, $ptd complete, $(($minutes-$bstart)) minutes gone by so far"
echo $graph
if [[ "$block" -ne '5' ]]; then
percent $(($minutes-467)) 426; graph $percent; resolve 467 426 $science
echo "Time till school day ends: $ryears years$sep $rdays days$sep $rhours hours$sep $rminutes minutes, and $seconds seconds, $ptd complete, $((minutes-467)) minutes gone by so far"
echo $graph
fi
if [[ $year = true ]]; then
minutes=$(($days*1440+191520+$minutes))
percent $(($minutes-0)) 399773; graph $percent; resolve 0 399773 $science
echo "Time till school year ends: $ryears years$sep $rweeks weeks$sep $rdays days$sep $rhours hours$sep $rminutes minutes, and $seconds seconds, $ptd complete"
echo $graph
fi
if [[ $debug = true ]]; then
echo "in about $SECONDS seconds"
fi
if [[ $disp = false ]]; then
repeat=false
else
sleep 1
fi
done
| true
|
207dc7dfcfd72b617899f79d0eb85cc8f0b04de0
|
Shell
|
cuvidk/os-setup
|
/install.sh
|
UTF-8
| 4,449
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
WORKING_DIR="$(realpath "$(dirname "${0}")")"
CONFIG_FILE="${WORKING_DIR}/install.config"
. "${WORKING_DIR}/config/shell-utils/util.sh"
usage() {
print_msg "Usage: ${0} --config <filename>\n"
}
check_root() {
test $(id -u) -eq 0
}
check_conn() {
ping -c 4 archlinux.org
}
check_uefi_boot() {
[ -d /sys/firmware/efi/efivars -a `ls /sys/firmware/efi/efivars | wc -l` -gt 0 ]
}
check_config_file() {
local hostname_regex='^hostname[ \t]*=[ \t]*[[:alnum:]]+$'
local root_pass_regex='^root_pass[ \t]*=[ \t]*.+$'
local user_regex='^user[ \t]*=[ \t]*[[:alnum:]]+:.+:[0|1]$'
[ -f "${CONFIG_FILE}" ] &&
[ -n "$(grep -E "${hostname_regex}" "${CONFIG_FILE}")" ] &&
[ -n "$(grep -E "${root_pass_regex}" "${CONFIG_FILE}")" ] &&
[ "$(grep -c -E "${user_regex}" "${CONFIG_FILE}")" == "$(grep -c -E '^user' ${CONFIG_FILE})" ]
}
update_package_database() {
pacman -Sy
}
update_system_clock() {
timedatectl set-ntp true
}
setup_download_mirrors() {
pacman -S --noconfirm pacman-contrib
grep -E "Server = .+" /etc/pacman.d/mirrorlist >/etc/pacman.d/mirrorlist.filtered
cp /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.backup
rankmirrors /etc/pacman.d/mirrorlist.filtered >/etc/pacman.d/mirrorlist
}
install_essentials() {
pacstrap /mnt base linux linux-firmware
}
generate_fstab() {
genfstab -U /mnt >>/mnt/etc/fstab
}
prepare_change_root() {
cp -R "${WORKING_DIR}" /mnt
}
exec_arch_chroot() {
arch-chroot /mnt /os-setup/post_chroot.sh
}
clean() {
rm -rf "/mnt/$(basename "${WORKING_DIR}")"
}
main() {
setup_verbosity "${@}"
[ -z "$(echo ${@} | grep '\-\-config ')" ] && usage && return 1
while [ $# -gt 0 ];
do
local option=${1}
case ${option} in
"--config")
cp "${2}" "${CONFIG_FILE}"
shift
shift
;;
*)
echo "Unknown option ${option}; ignoring"
shift
;;
esac
done
perform_task check_config_file "Checking for valid config file"
[ $? != 0 ] && print_msg "ERR: Invalid config file.\n" && return 2
perform_task check_uefi_boot 'Checking if system is booted in UEFI mode '
[ $? != 0 ] && print_msg 'The installer scripts are limited to UEFI systems.\n' && return 3
perform_task check_root 'Checking for root '
[ $? != 0 ] && print_msg 'This script needs to be run as root.\n' && return 4
perform_task check_conn 'Checking for internet connection '
[ $? != 0 ] && print_msg 'Unable to reach the internet. Check your connection.\n' && return 5
perform_task update_package_database 'Updating package database '
perform_task update_system_clock 'Updating system clock '
perform_task setup_download_mirrors 'Sorting download mirrors (this will take a while) '
perform_task install_essentials 'Installing essential arch linux packages '
local ret=$?
[ ${ret} != 0 ] && print_msg "ERR: Installing essential packages exit code; ${ret}. \n" && return 6
perform_task generate_fstab 'Generating fstab ' &&
print_msg '################################################\n' &&
print_msg '################# /mnt/etc/fstab ###############\n' &&
print_msg '################################################\n' &&
cat /mnt/etc/fstab >$(tty) &&
print_msg '################################################\n'
ret=$?
[ ${ret} != 0 ] && print_msg "ERR: Generating fstab exit code: ${ret}.\n" && return 7
perform_task prepare_change_root 'Preparing to chroot into the new system '
ret=$?
[ ${ret} != 0 ] && print_msg "ERR: Prepare chroot exit code: ${ret}.\n" && return 8
print_msg '################################################\n'
print_msg '#################### chroot ####################\n'
print_msg '################################################\n'
perform_task exec_arch_chroot
[ ${ret} != 0 ] && print_msg "ERR: arch-chroot returned ${ret}.\n"
print_msg '################################################\n'
perform_task clean 'Removing os setup files from the new system '
check_for_errors
if [ $? -eq 1 ]; then
print_msg "[ WARNING ]: Errors encountered. Check $(log_file_name) for details.\n"
return 9
else
print_msg "[ SUCCESS ]"
return 0
fi
}
main "${@}"
| true
|
cbd6c7491e3d03ba081ea8ffee70366bf7121131
|
Shell
|
dockhippie/memcached
|
/latest/overlay/usr/bin/healthcheck
|
UTF-8
| 193
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
source /usr/bin/entrypoint
CHECK="$(echo stats | nc 127.0.0.1 11211 | grep uptime 2>/dev/null)"
if [[ "${CHECK}" =~ "STAT" ]]; then
exit 0
fi
exit 1
| true
|
a84c56cdcc705a9e49d7a4f384ac103744e2083c
|
Shell
|
akoenig/dotfiles-ng
|
/bin/helpers/profile
|
UTF-8
| 1,127
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Helper which exposes the path to the respective profile
# configuration file
#
# Author: André König <andre.koenig@posteo.de>
#
SHELLTYPE="$(basename /$SHELL)"
if [ $SHELLTYPE = "bash" ]; then
if [ -f "$HOME/.bashrc" ]; then
PROFILE_CONFIGURATION="$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
PROFILE_CONFIGURATION="$HOME/.bash_profile"
fi
elif [ $SHELLTYPE = "zsh" ]; then
PROFILE_CONFIGURATION="$HOME/.zshrc"
fi
if [ -z $PROFILE_CONFIGURATION ]; then
if [ -f "$PROFILE" ]; then
PROFILE_CONFIGURATION="$PROFILE"
elif [ -f "$HOME/.profile" ]; then
PROFILE_CONFIGURATION="$HOME/.profile"
elif [ -f "$HOME/.bashrc" ]; then
PROFILE_CONFIGURATION="$HOME/.bashrc"
elif [ -f "$HOME/.bash_profile" ]; then
PROFILE_CONFIGURATION="$HOME/.bash_profile"
elif [ -f "$HOME/.zshrc" ]; then
PROFILE_CONFIGURATION="$HOME/.zshrc"
fi
fi
if [ -z $PROFILE_CONFIGURATION ]; then
echo "Your bash profile couldn't be detected.
Log:
Profile $PROFILE_CONFIGURATION
Shelltype: $SHELLTYPE
Please report this issue with the values reported above.
https://github.com/akoenig/dotfiles/issues"
exit 1
fi
| true
|
2ad8732a760f4ea0a463155fd4b474d30c4fb027
|
Shell
|
nyirock/mg_blast_wrapper
|
/cycle_wrapper_v1.2.bash
|
UTF-8
| 1,186
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dir_name="mg1_shear100_2000"
chunk_size=100
increment=50
iterations=40
rm -rf $dir_name
mkdir $dir_name
#location=$dir_name"/it_1"
format="csv,fasta"
alen="100"
metagenome=IMG\ Data/76969.assembled.fna,IMG\ Data/76969.unassembled_illumina.fna
reference="all_AP_WPS-2_bacterium.fna"
#echo $location
#python mg_blast_wrapper_v1.8_cyclic.py -r all_AP_WPS-2_bacterium.fna -m pp_metagenome3_assembled.fasta -n $location -shear $chunk_size
#parse parameters
while test $# != 0
do
case "$1" in
-p) filename=;;
-c) count_flag=1;;
-h) help_flag=1;;
--) shift; break;;
*) usage ;;
esac
shift # past argument or value
done
if [ "$iterations" -gt "1" ]
then
for (( c=0; c<=iterations; c++ ))
do
location=$dir_name"/run_"$c
#echo $location
chunk_size=$(($chunk_size+$increment*$c))
#echo $chunk_size
python mg_blast_wrapper_v1.11.py -r $reference -m "$metagenome" -n $location --shear $chunk_size -f $format -a $alen
done
else
location=$dir_name
python mg_blast_wrapper_v1.11.py -r all_AP_WPS-2_bacterium.fna -m pp_metagenome3_assembled.fasta -n $location --shear $chunk_size -f $format -a $alen
fi
| true
|
314a47862690a75c897cd50ba3ab2bfdb78697c0
|
Shell
|
yijunyu/demo
|
/datasets/linux-4.11-rc3/tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
|
UTF-8
| 2,216
| 3.484375
| 3
|
[
"BSD-2-Clause",
"Linux-syscall-note",
"GPL-2.0-only"
] |
permissive
|
#!/bin/bash
#
# Given the results directories for previous KVM-based torture runs,
# check the build and console output for errors. Given a directory
# containing results directories, this recursively checks them all.
#
# Usage: kvm-recheck.sh resdir ...
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, you can access it online at
# http://www.gnu.org/licenses/gpl-2.0.html.
#
# Copyright (C) IBM Corporation, 2011
#
# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
. tools/testing/selftests/rcutorture/bin/functions.sh
for rd in "$@"
do
firsttime=1
dirs=`find $rd -name Make.defconfig.out -print | sort | sed -e 's,/[^/]*$,,' | sort -u`
for i in $dirs
do
if test -n "$firsttime"
then
firsttime=""
resdir=`echo $i | sed -e 's,/$,,' -e 's,/[^/]*$,,'`
head -1 $resdir/log
fi
TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
kvm-recheck-${TORTURE_SUITE}.sh $i
if test -f "$i/console.log"
then
configcheck.sh $i/.config $i/ConfigFragment
if test -r $i/Make.oldconfig.err
then
cat $i/Make.oldconfig.err
fi
parse-build.sh $i/Make.out $configfile
if test "$TORTURE_SUITE" != rcuperf
then
parse-torture.sh $i/console.log $configfile
fi
parse-console.sh $i/console.log $configfile
if test -r $i/Warnings
then
cat $i/Warnings
fi
else
if test -f "$i/qemu-cmd"
then
print_bug qemu failed
echo " $i"
elif test -f "$i/buildonly"
then
echo Build-only run, no boot/test
configcheck.sh $i/.config $i/ConfigFragment
parse-build.sh $i/Make.out $configfile
else
print_bug Build failed
echo " $i"
fi
fi
done
done
| true
|
bad6f2fd30485c400af70764d1f17e59258753fd
|
Shell
|
vaginessa/Pi-Kitchen
|
/sdcard/pi-kitchen/020-usb-device-module/_RUNONCE/install_0_usbdevicemodule.sh
|
UTF-8
| 509
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
#Set the path of the PiZeroCombined file
modulesource="/home/pi/recovery/pi-kitchen/020-usb-device-module/module/PiZeroCombined.tar.gz"
modulefolder=PiZeroCombined
#Extract the module to tmp directory
tar xvzfC $modulesource /tmp/
#Copy the boot partition files to /boot
sudo cp -R /tmp/PiZeroCombined/fat32/* /boot/
#Copy the root partition module files
sudo cp -R /tmp/PiZeroCombined/ext4/lib/* /lib/
#Remove extracted files
sudo rm /tmp/$modulefolder -Rf
echo "USB Device Module Installed!"
| true
|
2f37633c52bf7e17a4f87ef6ccc14202dc63ee57
|
Shell
|
balsini/dotfiles
|
/bin/email_sync
|
UTF-8
| 656
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
POLLTIME_S=${POLLTIME_S:-180}
VERBOSE=${VERBOSE:-0}
LOCKFILE=/tmp/email_sync.lock
function clean_up {
rm -f $LOCKFILE
exit
}
trap clean_up SIGHUP SIGINT SIGTERM SIGKILL
echo "Locking..."
lockfile -r 0 $LOCKFILE || exit 1
while [ 1 ]; do
# Trigger an update of the local mailbox
while [ 1 ]; do
if [ $VERBOSE -eq 1 ]; then
echo "Verbose update... "
mbsync work
else
echo "Silent update... "
mbsync work &>/dev/null
fi
# Return when all messages have been fetched without errors
[ $? -eq 1 ] || break
echo "Fetch errors, retrying... "
sleep 1
done
# Refresh periodically
sleep $POLLTIME_S
done
clean_up
| true
|
4b2c1f76e2112efeef55877a9575c0c7e508bcac
|
Shell
|
slaash/scripts
|
/rpi/rsync-ubuntu-iso.sh
|
UTF-8
| 204
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
myDir="$(dirname "$0")"
source ${myDir}/retry.sh
for rel in focal jammy
do
retry -s 20 -m 3 rsync -Lav rsync://cdimage.ubuntu.com/releases/${rel}/*-desktop-amd64.iso /repo/ubuntu/
done
| true
|
6f0750568908645b1bd94697107059d51b394be6
|
Shell
|
fusion-research/mapconstruction
|
/algorithms/Ahmed/script_to_run.sh
|
UTF-8
| 535
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#To Compile:
CODE_PATH="MapConstruction/" #path to the MapConstruction folder.
cd $CODE_PATH
javac -d bin/ src/mapconstruction2/*.java
#To Run:
INPUT_PATH="" #path to the folder that constains all input tracks
OUTPUT_PATH="" #path to the folder where output will be written
EPS=150.0 #epsilon in meters
HAS_ALTITUDE=false #if input file has altitude information
ALT_EPS=4.0 #minimum altitude difference in meters between two streets
java -cp bin/ mapconstruction2.MapConstruction $INPUT_PATH $OUTPUT_PATH $EPS $HAS_ALTITUDE $ALT_EPS
| true
|
ae2236360731b7522cb33691fd33e6cddb87960c
|
Shell
|
rh9/blockssh
|
/script/GEO_from_ip.sh
|
UTF-8
| 4,317
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
rm -f /tmp/ram/geo/geo_data_id.dt
if [ -z $1 ]
then
exit 0
fi
GEO_ip=$1
if [ `echo $GEO_ip | grep '\.' | wc -l` -eq 0 ]
then
exit 0
fi
otl=1
time_live=0
if [ ! -e /tmp/ram/geo ]
then
mkdir /tmp/ram/geo
fi
geo_data_id=`echo "use proxy; SELECT id from geo where ip='$GEO_ip' order by t limit 1" | mysql | tail -n 1`
if [ -z $geo_data_id ]
then
wget -O /tmp/ram/geo/geo.html "http://ip-whois.net/ip_geo.php?ip=$GEO_ip"
SIZE_FILE=`stat --printf=%s /tmp/ram/geo/geo.html`
if [ $SIZE_FILE -eq 0 ]
then
echo `date`" no response from ip-whois.net for ip $GEO_ip, retry after 60 sec " >> /var/log/ssh_failed/log.txt
sleep 60
wget -O /tmp/ram/geo/geo.html "http://ip-whois.net/ip_geo.php?ip=$GEO_ip"
fi
sed -i -e 's/<script/<!--script/g; s/<\/script>/<\/script-->/g' /tmp/ram/geo/geo.html
iconv -f windows-1251 -t utf8 /tmp/ram/geo/geo.html > /tmp/ram/geo/geo.utf.html
sed -i -e 's/windows-1251/utf8/g' /tmp/ram/geo/geo.utf.html
sed -i -e "s/<br>/<br>\n/g" /tmp/ram/geo/geo.utf.html
if [ $otl ]
then
cp /tmp/ram/geo/geo.utf.html /var/log/ssh_failed/$GEO_ip.geo.utf.html
fi
rm -f /tmp/ram/geo/geo.html
IP_addr=`cat /tmp/ram/geo/geo.utf.html | grep 'IP адрес:' | awk -F : '{ print $2 }' | awk -F \< '{ print $1 }' | head -n 1`
Country=`cat /tmp/ram/geo/geo.utf.html | grep 'Страна:' | awk -F : '{ print $2 }' | awk -F \< '{ print $1 }' | head -n 1 | awk -F '"' '{ print $1 }' | sed -e "s/'/ /g"`
Region=`cat /tmp/ram/geo/geo.utf.html | grep 'Регион:' | awk -F : '{ print $2 }' | awk -F \< '{ print $1 }' | head -n 1 | awk -F '"' '{ print $1 }' | sed -e "s/'/ /g"`
City=`cat /tmp/ram/geo/geo.utf.html | grep 'Город:' | awk -F : '{ print $2 }' | awk -F \< '{ print $1 }' | head -n 1 | awk -F '"' '{ print $1 }' | sed -e "s/'/ /g"`
latitude=`cat /tmp/ram/geo/geo.utf.html | grep 'Широта:' | awk -F : '{ print $2 }' | awk -F \< '{ print $1 }' | head -n 1`
longitude=`cat /tmp/ram/geo/geo.utf.html | grep 'Долгота:' | awk -F : '{ print $2 }' | awk -F \< '{ print $1 }' | head -n 1`
if [ -z $latitude -a -z $longitude ]
then
echo `date`" latitude and longitude is NULL, use http://webmasters.ru/tools/location?addr=$GEO_ip" >> /var/log/ssh_failed/log.txt
#error get ip, agane
wget -O /tmp/ram/geo/geo.utf.html "http://webmasters.ru/tools/location?addr=$GEO_ip"
sed -i -e 's/<script/<!--script/g; s/<\/script>/<\/script-->/g' /tmp/ram/geo/geo.html
IP_addr=`cat /tmp/ram/geo/geo.utf.html | grep 'var ip = ' | awk -F = '{ print $2 }' | awk -F \; '{ print $1 }' | head -n 1`
Country=`cat /tmp/ram/geo/geo.utf.html | grep 'var info = ' | awk -F \> '{ print $8 }' | awk -F \< '{ print $1 }' | head -n 1`
#Region=
City=`cat /tmp/ram/geo/geo.utf.html | grep 'Город' | awk -F \> '{ print $7 }' | awk -F \< '{ print $1 }' | head -n 1`
longitude=`cat /tmp/ram/geo/geo.utf.html | grep 'var y = ' | awk -F = '{ print $2 }' | awk -F \; '{ print $1 }' | head -n 1`
latitude=`cat /tmp/ram/geo/geo.utf.html | grep 'var x = ' | awk -F = '{ print $2 }' | awk -F \; '{ print $1 }' | head -n 1`
if [ $otl ]
then
cp /tmp/ram/geo/geo.utf.html /var/log/ssh_failed/$GEO_ip.geo2.utf.html
fi
fi
#IP адрес: 1.179.175.22<br>
#Страна: Australia<br>
#Регион: <br>
#Город: <br>
#Широта: -27<br>
#Долгота: 133<br>
rm -f /tmp/ram/geo/geo.utf.html
if [ ! -z $latitude -o ! -z $longitude ]
then
if [ $otl ]
then
echo "use proxy; INSERT INTO geo (ip,Country,Region,City,latitude,longitude) VALUE ('$GEO_ip','$Country','$Region','$City','$latitude','$longitude')" >> /var/log/ssh_failed/geo.i.sql.txt
fi
geo_data_id=`echo "use proxy; INSERT INTO geo (ip,Country,Region,City,latitude,longitude) VALUE ('$GEO_ip','$Country','$Region','$City','$latitude','$longitude'); select LAST_INSERT_ID();" | mysql | tail -n 1`
if [ $otl ]
then
echo `date`" geo_data_id=$geo_data_id" >> /var/log/ssh_failed/geo.i.sql.txt
fi
echo "$geo_data_id" > /tmp/ram/geo/geo_data_id.dt
else
rm -f /tmp/ram/geo/geo_data_id.dt
fi
else
if [ $otl ]
then
echo `date`" r geo_data_id=$geo_data_id" >> /var/log/ssh_failed/geo.i.sql.txt
fi
echo "$geo_data_id" > /tmp/ram/geo/geo_data_id.dt
fi
| true
|
e1432b5f5d1266db44a976076014c253492a9fb7
|
Shell
|
reef-technologies/cookiecutter-rt-django
|
/{{cookiecutter.repostory_name}}/devops/scripts/vars.sh
|
UTF-8
| 455
| 3.203125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
[ "$1" != "staging" ] && [ "$1" != "prod" ] && echo "Please provide environment name to deploy: staging or prod" && exit 1;
PROJECT_DIR=`cd "$(dirname "${BASH_SOURCE[0]}")" && pwd`/../../
[ "$1" != "prod" ] && APP_SUFFIX="-$1"
APP_OWNER=$(aws sts get-caller-identity --query "Account" --output text)
APP_REGION="{{ cookiecutter.aws_region }}"
APP_NAME="{{ cookiecutter.aws_project_name }}${APP_SUFFIX}"
CLOUDFRONT_BUCKET="${APP_NAME}-spa${APP_SUFFIX}"
| true
|
a0d011df136e57710b7559add924be92300f1a90
|
Shell
|
JakubBrojacz/MINILang-tests
|
/test.sh
|
UTF-8
| 2,828
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
# Copyright (c) 2020 Tomasz Herman
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
COMPILER="/home/tomasz/Workspace/C#/MiniCompiler/MiniCompiler/bin/Debug/MiniCompiler.exe"
LOG="./test.log"
ERROR_LOG="./error.log"
TEMP="./temp.out"
VALID_DIR="./valid/"
INVALID_DIR="./invalid/"
error=0
date | tee "${LOG}" > "${ERROR_LOG}"
echo "***Valid programs test***"
for file in "${VALID_DIR}"prog???; do
head -n1 "${file}"
mono "${COMPILER}" "${file}" >> "${LOG}"
if [ $? -ne 0 ]; then
echo "Error couldn't compile ${file} to il" | tee -a "${ERROR_LOG}"
((error=error+1))
continue
fi
ilasm "${file}.il" >> "${LOG}" 2>> "${ERROR_LOG}"
if [ $? -ne 0 ]; then
echo "Error couldn't compile ${file}.il to exe" | tee -a "${ERROR_LOG}"
((error=error+1))
continue
fi
peverify "${file}.exe" | tee -a "${LOG}" "${ERROR_LOG}"
if [ ${PIPESTATUS[0]} -ne 0 ]; then
echo "Error ${file}.exe contains problems" | tee -a "${ERROR_LOG}"
((error=error+1))
continue
fi
cat "${file}.in" | mono "${file}.exe" > "${TEMP}"
if cmp "${file}.out" "${TEMP}"; then
echo "OK"
else
echo "Output of ${file}.exe is not identical. See ${file}.diff for details." | tee -a "${ERROR_LOG}"
diff "${file}.out" "${TEMP}" > "${file}.diff"
((error=error+1))
fi
done
echo "***Invalid programs test***"
for file in "${INVALID_DIR}"prog???; do
head -n1 "${file}"
mono "$COMPILER" "${file}" >> "${LOG}"
if [ $? -eq 0 ]; then
echo "Program ${file} compiled, while failure was expected" | tee -a "${ERROR_LOG}"
((error=error+1))
continue
else
echo "OK"
fi
done
rm "${TEMP}"
echo "${COMPILER}"
echo "Total error count: ${error}" | tee -a "${ERROR_LOG}"
| true
|
31626a8ec62f55088a18a35a7c5955b3e45dd02a
|
Shell
|
helm/acceptance-testing
|
/scripts/completion-tests/completionTests.sh
|
UTF-8
| 30,861
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!bash
#
# Copyright The Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script tests different scenarios of completion. The tests can be
# run by sourcing this file from a bash shell or a zsh shell.
source ${COMP_DIR}/lib/completionTests-base.sh
export PATH=${COMP_DIR}/bin:$PATH
# Use the memory driver with pre-defined releases to easily
# test release name completion
export HELM_DRIVER=memory
export HELM_MEMORY_DRIVER_DATA=${COMP_DIR}/releases.yaml
# Helm setup
if [ ! -z ${ROBOT_HELM_V3} ]; then
export XDG_CACHE_HOME=${COMP_DIR}/cache && rm -rf ${XDG_CACHE_HOME} && mkdir -p ${XDG_CACHE_HOME}
export XDG_CONFIG_HOME=${COMP_DIR}/config && rm -rf ${XDG_CONFIG_HOME} && mkdir -p ${XDG_CONFIG_HOME}
export XDG_DATA_HOME=${COMP_DIR}/data && rm -rf ${XDG_DATA_HOME} && mkdir -p ${XDG_DATA_HOME}
REPO_ROOT=${XDG_CONFIG_HOME}/helm
REPO_CACHE=${XDG_CACHE_HOME}/helm/repository
PLUGIN_ROOT=${XDG_DATA_HOME}/helm/plugins
else
export HELM_HOME=${COMP_DIR}/.helm && rm -rf ${HELM_HOME} && mkdir -p ${HELM_HOME}
helm init --client-only
REPO_ROOT=${HELM_HOME}/repository
REPO_CACHE=${REPO_ROOT}/cache
PLUGIN_ROOT=${HELM_HOME}/plugins
fi
# Setup some repos to allow testing completion of the helm repo command
# We inject the content of the repositories.yaml file directly to avoid requiring
# an internet connection if we were to use 'helm repo add'
mkdir -p ${REPO_ROOT}
cat > ${REPO_ROOT}/repositories.yaml << EOF
apiVersion: v1
generated: "2019-08-11T22:28:44.841141-04:00"
repositories:
- name: nginx
url: https://nginx.example.com
- name: zztest1
url: https://charts.example.com
- name: zztest2
url: https://charts2.example.com
EOF
helm repo list
# Create a some repo content to feed completions
mkdir -p ${REPO_CACHE}
# Repo index file
cat > ${REPO_CACHE}/nginx-index.yaml << EOF
apiVersion: v1
entries:
nginx:
- name: nginx
version: 0.11.0
- name: nginx
version: 0.12.0
- name: nginx
version: 0.13.0
nginx-test:
- name: nginx-test
version: 1.1.1
- name: nginx-test
version: 2.2.2
- name: nginx-test
version: 3.3.3
generated: "2020-06-18T04:08:38.041908903Z"
EOF
# Repo charts file
cat > ${REPO_CACHE}/nginx-charts.yaml << EOF
nginx
nginx-test
EOF
# Setup some plugins to allow testing completion of the helm plugin command
# We inject the content of different plugin.yaml files directly to avoid having
# to install a real plugin which can take a long time.
###########
# Plugin 1
###########
PLUGIN_DIR=${PLUGIN_ROOT}/helm-2to3
mkdir -p ${PLUGIN_DIR}
# The plugin file
cat > ${PLUGIN_DIR}/plugin.yaml << EOF
name: "2to3"
version: "2.5.1+2"
description: "Migrate from helm v2 to helm v3"
EOF
# The plugin's static completion file
cat > ${PLUGIN_DIR}/completion.yaml << EOF
commands:
- name: cleanup
flags:
- r
- label
- cleanup
- s
- storage
- name: convert
flags:
- l
- label
- s
- storage
- t
- name: move
commands:
- name: config
flags:
- dry-run
EOF
# The plugin's dynamic completion file
cat > ${PLUGIN_DIR}/plugin.complete << EOF
#!/usr/bin/env sh
if [ "\$2" = "config" ]; then
echo "case-config"
echo "gryffindor"
echo "slytherin"
echo "ravenclaw"
echo "hufflepuff"
echo ":0"
exit
fi
if [ "\$HELM_NAMESPACE" != "default" ]; then
echo "case-ns"
# Check the namespace flag is not passed
echo "\$1"
# Check plugin variables are set
echo "\$HELM_NAMESPACE"
echo ":4"
exit
fi
if [ "\$2" = -s ]; then
echo "case-flag"
echo "lucius"
echo "draco"
echo "dobby"
echo ":4"
exit
fi
# Check missing directive
echo "hermione"
echo "harry"
echo "ron"
EOF
chmod u+x ${PLUGIN_DIR}/plugin.complete
###########
# Plugin 2
###########
PLUGIN_DIR=${PLUGIN_ROOT}/helm-push-plugin
mkdir -p ${PLUGIN_DIR}
# The plugin file
cat > ${PLUGIN_DIR}/plugin.yaml << EOF
name: "push-plugin"
version: "0.7.1"
description: "Some plugin description"
EOF
###########
# Plugin 3
###########
PLUGIN_DIR=${PLUGIN_ROOT}/helm-push-artifactory
mkdir -p ${PLUGIN_DIR}
# The plugin file
cat > ${PLUGIN_DIR}/plugin.yaml << EOF
name: "push-artifactory"
version: "0.3.0"
description: "Push helm charts to artifactory"
EOF
helm plugin list
# Source the completion script after setting things up, so it can
# take the configuration into consideration (such as plugin names)
# Don't use the new source <() form as it does not work with bash v3
source /dev/stdin <<- EOF
$(helm completion $SHELL_TYPE)
EOF
allHelmCommands="completion create dependency env 2to3 help get history install lint list package plugin pull push push-artifactory push-plugin registry repo rollback search show status template test uninstall upgrade verify version"
if [ "$SHELL_TYPE" = bash ]; then
allHelmLongFlags="--burst-limit --debug --help --kube-apiserver --kube-as-group --kube-as-user --kube-ca-file --kube-context --kube-insecure-skip-tls-verify --kube-tls-server-name --kube-token --kubeconfig --namespace --registry-config --repository-cache --repository-config"
allHelmGlobalFlags="${allHelmLongFlags} -h -n"
else
allHelmGlobalFlags="--debug --kube-apiserver --kube-apiserver --kube-apiserver --kube-context --kube-context --kube-context --kube-token --kube-token --kube-token --kubeconfig --kubeconfig --kubeconfig --namespace --namespace --namespace --registry-config --registry-config --registry-config --repository-cache --repository-cache --repository-cache --repository-config --repository-config --repository-config -n"
allHelmLongFlags="--debug --kube-apiserver --kube-apiserver --kube-apiserver --kube-context --kube-context --kube-context --kube-token --kube-token --kube-token --kubeconfig --kubeconfig --kubeconfig --namespace --namespace --namespace --registry-config --registry-config --registry-config --repository-cache --repository-cache --repository-cache --repository-config --repository-config --repository-config "
fi
#####################
# Static completions
#####################
# Basic first level commands (static completion)
_completionTests_verifyCompletion "helm " "$allHelmCommands"
_completionTests_verifyCompletion "helm help " "$allHelmCommands"
_completionTests_verifyCompletion "helm sho" "show"
_completionTests_verifyCompletion "helm help sho" "show"
_completionTests_verifyCompletion "helm --debug " "$allHelmCommands"
_completionTests_verifyCompletion "helm --debug sho" "show"
_completionTests_verifyCompletion "helm -n ns " "$allHelmCommands"
_completionTests_verifyCompletion "helm -n ns sho" "show"
_completionTests_verifyCompletion "helm --namespace ns " "$allHelmCommands"
_completionTests_verifyCompletion "helm --namespace ns sho" "show"
_completionTests_verifyCompletion "helm stat" "status"
_completionTests_verifyCompletion "helm status" "status"
_completionTests_verifyCompletion "helm lis" "list"
if [ ! -z ${ROBOT_HELM_V3} ]; then
_completionTests_verifyCompletion "helm r" "registry repo rollback"
_completionTests_verifyCompletion "helm re" "registry repo"
else
_completionTests_verifyCompletion "helm r" "registry repo reset rollback"
_completionTests_verifyCompletion "helm re" "repo reset"
fi
# Basic second level commands (static completion)
if [ ! -z ${ROBOT_HELM_V3} ]; then
_completionTests_verifyCompletion "helm get " "all hooks manifest notes values"
else
_completionTests_verifyCompletion "helm get " "all hooks manifest notes values"
fi
_completionTests_verifyCompletion "helm get h" "hooks"
_completionTests_verifyCompletion "helm completion " "bash fish powershell zsh"
_completionTests_verifyCompletion "helm completion z" "zsh"
_completionTests_verifyCompletion "helm plugin " "install list uninstall update"
_completionTests_verifyCompletion "helm plugin u" "uninstall update"
_completionTests_verifyCompletion "helm --debug plugin " "install list uninstall update"
_completionTests_verifyCompletion "helm --debug plugin u" "uninstall update"
_completionTests_verifyCompletion "helm -n ns plugin " "install list uninstall update"
_completionTests_verifyCompletion "helm -n ns plugin u" "uninstall update"
_completionTests_verifyCompletion "helm --namespace ns plugin " "install list uninstall update"
_completionTests_verifyCompletion "helm --namespace ns plugin u" "uninstall update"
_completionTests_verifyCompletion "helm plugin --debug " "install list uninstall update"
_completionTests_verifyCompletion "helm plugin --debug u" "uninstall update"
_completionTests_verifyCompletion "helm plugin -n ns " "install list uninstall update"
_completionTests_verifyCompletion "helm plugin -n ns u" "uninstall update"
_completionTests_verifyCompletion "helm plugin --namespace ns " "install list uninstall update"
_completionTests_verifyCompletion "helm plugin --namespace ns u" "uninstall update"
# With validArgs
_completionTests_verifyCompletion "helm completion " "bash fish powershell zsh"
_completionTests_verifyCompletion "helm completion z" "zsh"
_completionTests_verifyCompletion "helm --debug completion " "bash fish powershell zsh"
_completionTests_verifyCompletion "helm --debug completion z" "zsh"
_completionTests_verifyCompletion "helm -n ns completion " "bash fish powershell zsh"
_completionTests_verifyCompletion "helm -n ns completion z" "zsh"
_completionTests_verifyCompletion "helm --namespace ns completion " "bash fish powershell zsh"
_completionTests_verifyCompletion "helm --namespace ns completion z" "zsh"
# Completion of flags
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "helm --kube-con" "--kube-context"
_completionTests_verifyCompletion "helm --kubecon" "--kubeconfig"
else
_completionTests_verifyCompletion "helm --kube-con" "--kube-context --kube-context --kube-context"
_completionTests_verifyCompletion "helm --kubecon" "--kubeconfig --kubeconfig --kubeconfig"
fi
if [ ! -z ${ROBOT_HELM_V3} ]; then
_completionTests_verifyCompletion "helm -n" "-n"
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "helm --name" "--namespace"
else
_completionTests_verifyCompletion "helm --name" "--namespace --namespace --namespace"
fi
fi
_completionTests_verifyCompletion "helm -" "$allHelmGlobalFlags"
_completionTests_verifyCompletion "helm --" "$allHelmLongFlags"
_completionTests_verifyCompletion "helm show -" "$allHelmGlobalFlags"
_completionTests_verifyCompletion "helm show --" "$allHelmLongFlags"
_completionTests_verifyCompletion "helm -n" "-n"
_completionTests_verifyCompletion "helm show -n" "-n"
# Completion of commands while using flags
_completionTests_verifyCompletion "helm --kube-context prod sta" "status"
_completionTests_verifyCompletion "helm --kubeconfig=/tmp/config lis" "list"
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "helm get hooks --kubec" "--kubeconfig"
else
_completionTests_verifyCompletion "helm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig"
fi
if [ ! -z ${ROBOT_HELM_V3} ]; then
_completionTests_verifyCompletion "helm --namespace mynamespace get h" "hooks"
_completionTests_verifyCompletion "helm -n mynamespace get " "all hooks manifest notes values"
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "helm get --name" "--namespace"
else
_completionTests_verifyCompletion "helm get --name" "--namespace --namespace --namespace"
fi
fi
# Cobra command aliases are purposefully not completed
_completionTests_verifyCompletion "helm ls" ""
_completionTests_verifyCompletion "helm dependenci" ""
# Static completion for plugins
_completionTests_verifyCompletion "helm push-plugin " ""
_completionTests_verifyCompletion "helm 2to3 " "cleanup convert move"
_completionTests_verifyCompletion "helm 2to3 c" "cleanup convert"
_completionTests_verifyCompletion "helm 2to3 move " "config"
_completionTests_verifyCompletion "helm 2to3 cleanup -" "$allHelmGlobalFlags -r -s --label --cleanup --storage"
# For plugin completion, when there are more short flags than long flags, a long flag is created for the extra short flags
# So here we expect the extra --t
_completionTests_verifyCompletion "helm 2to3 convert -" "$allHelmGlobalFlags -l -s -t --t --label --storage"
_completionTests_verifyCompletion "helm 2to3 move config --" "$allHelmLongFlags --dry-run"
#####################
# Dynamic completions
#####################
# For release name completion
_completionTests_verifyCompletion "helm status " "athos porthos aramis"
_completionTests_verifyCompletion "helm history a" "athos aramis"
_completionTests_verifyCompletion "helm uninstall a" "athos aramis"
_completionTests_verifyCompletion "helm upgrade a" "athos aramis"
_completionTests_verifyCompletion "helm get manifest -n default " "athos porthos aramis"
_completionTests_verifyCompletion "helm --namespace gascony get manifest " "dartagnan"
_completionTests_verifyCompletion "helm --namespace gascony test d" "dartagnan"
_completionTests_verifyCompletion "helm rollback d" ""
# For the repo command
_completionTests_verifyCompletion "helm repo remove " "nginx zztest1 zztest2"
_completionTests_verifyCompletion "helm repo remove zztest" "zztest1 zztest2"
if [ ! -z ${ROBOT_HELM_V3} ]; then
# Make sure completion works as expected when there are no repositories configured
tmp=$XDG_CONFIG_HOME
XDG_CONFIG_HOME='/invalid/path' _completionTests_verifyCompletion "helm repo remove " ""
XDG_CONFIG_HOME=$tmp
fi
# For the plugin command
_completionTests_verifyCompletion "helm plugin uninstall " "2to3 push-artifactory push-plugin"
_completionTests_verifyCompletion "helm plugin uninstall pu" "push-artifactory push-plugin"
_completionTests_verifyCompletion "helm plugin update " "2to3 push-artifactory push-plugin"
_completionTests_verifyCompletion "helm plugin update pus" "push-artifactory push-plugin"
if [ ! -z ${ROBOT_HELM_V3} ]; then
# Make sure completion works as expected when there are no plugins
tmp=$XDG_DATA_HOME
XDG_DATA_HOME='/invalid/path' _completionTests_verifyCompletion "helm plugin uninstall " ""
XDG_DATA_HOME=$tmp
fi
# For the global --kube-context flag
if [ ! -z ${ROBOT_HELM_V3} ]; then
# Feature not available in v2
_completionTests_verifyCompletion "helm --kube-context " "dev1 dev2 accept prod"
_completionTests_verifyCompletion "helm upgrade --kube-context " "dev1 dev2 accept prod"
_completionTests_verifyCompletion "helm upgrade --kube-context d" "dev1 dev2"
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "helm --kube-context=" "dev1 dev2 accept prod"
else
_completionTests_verifyCompletion "helm --kube-context=" "--kube-context=dev1 --kube-context=dev2 --kube-context=accept --kube-context=prod"
fi
fi
# Now requires a real cluster
# # For the global --namespace flag
# if [ ! -z ${ROBOT_HELM_V3} ]; then
# # No namespace flag in v2
# _completionTests_verifyCompletion "helm --namespace " "casterly-rock white-harbor winterfell"
# _completionTests_verifyCompletion "helm --namespace w" "white-harbor winterfell"
# _completionTests_verifyCompletion "helm upgrade --namespace " "casterly-rock white-harbor winterfell"
# _completionTests_verifyCompletion "helm -n " "casterly-rock white-harbor winterfell"
# _completionTests_verifyCompletion "helm -n w" "white-harbor winterfell"
# _completionTests_verifyCompletion "helm upgrade -n " "casterly-rock white-harbor winterfell"
# if [ "$SHELL_TYPE" = bash ]; then
# _completionTests_verifyCompletion "helm --namespace=" "casterly-rock white-harbor winterfell"
# _completionTests_verifyCompletion "helm --namespace=w" "white-harbor winterfell"
# _completionTests_verifyCompletion "helm upgrade --namespace=w" "white-harbor winterfell"
# _completionTests_verifyCompletion "helm upgrade --namespace=" "casterly-rock white-harbor winterfell"
# _completionTests_verifyCompletion "helm -n=" "casterly-rock white-harbor winterfell"
# _completionTests_verifyCompletion "helm -n=w" "white-harbor winterfell"
# _completionTests_verifyCompletion "helm upgrade -n=w" "white-harbor winterfell"
# _completionTests_verifyCompletion "helm upgrade -n=" "casterly-rock white-harbor winterfell"
# else
# _completionTests_verifyCompletion "helm --namespace=" "--namespace=casterly-rock --namespace=white-harbor --namespace=winterfell"
# _completionTests_verifyCompletion "helm --namespace=w" "--namespace=white-harbor --namespace=winterfell"
# _completionTests_verifyCompletion "helm upgrade --namespace=w" "--namespace=white-harbor --namespace=winterfell"
# _completionTests_verifyCompletion "helm upgrade --namespace=" "--namespace=casterly-rock --namespace=white-harbor --namespace=winterfell"
# _completionTests_verifyCompletion "helm -n=" "-n=casterly-rock -n=white-harbor -n=winterfell"
# _completionTests_verifyCompletion "helm -n=w" "-n=white-harbor -n=winterfell"
# _completionTests_verifyCompletion "helm upgrade -n=w" "-n=white-harbor -n=winterfell"
# _completionTests_verifyCompletion "helm upgrade -n=" "-n=casterly-rock -n=white-harbor -n=winterfell"
# fi
# # With override flags
# _completionTests_verifyCompletion "helm --kubeconfig myconfig --namespace " "meereen myr volantis"
# _completionTests_verifyCompletion "helm --kubeconfig=myconfig --namespace " "meereen myr volantis"
# _completionTests_verifyCompletion "helm --kube-context mycontext --namespace " "braavos old-valyria yunkai"
# _completionTests_verifyCompletion "helm --kube-context=mycontext --namespace " "braavos old-valyria yunkai"
# fi
# For the --output flag that applies to multiple commands
if [ ! -z ${ROBOT_HELM_V3} ]; then
# Feature not available in v2
# Also test that the list of outputs matches what the helm message gives.
# This is an imperfect way of detecting if the output format list has changed, but
# the completion wasn't updated to match.
outputFormats=$(helm repo list -h | grep -- --output | cut -d: -f2 | cut -d '(' -f1 | sed s/,//g)
_completionTests_verifyCompletion "helm repo list --output " "${outputFormats}"
_completionTests_verifyCompletion "helm install --output " "${outputFormats}"
_completionTests_verifyCompletion "helm history -o " "${outputFormats}"
_completionTests_verifyCompletion "helm list -o " "${outputFormats}"
fi
# For completing specification of charts
if [ ! -z ${ROBOT_HELM_V3} ]; then
tmpFiles="zztest2file files"
touch $tmpFiles
_completionTests_verifyCompletion "helm show values " "./ / zztest1/ zztest2/ nginx/ oci:// file:// http:// https://"
_completionTests_verifyCompletion "helm show values ht" "http:// https://"
_completionTests_verifyCompletion "helm show values zz" "zztest1/ zztest2/ zztest2file"
_completionTests_verifyCompletion "helm show values zztest2" "zztest2/ zztest2file"
_completionTests_verifyCompletion "helm show values zztest2f" ""
_completionTests_verifyCompletion "helm show values nginx/yyy" ""
_completionTests_verifyCompletion "helm show values nginx/n" "nginx/nginx nginx/nginx-test"
_completionTests_verifyCompletion "helm show values fil" "file:// files"
_completionTests_verifyCompletion "helm show chart zz" "zztest1/ zztest2/ zztest2file"
_completionTests_verifyCompletion "helm show readme zz" "zztest1/ zztest2/ zztest2file"
_completionTests_verifyCompletion "helm show values zz" "zztest1/ zztest2/ zztest2file"
_completionTests_verifyCompletion "helm pull " "zztest1/ zztest2/ nginx/ oci:// file:// http:// https://"
_completionTests_verifyCompletion "helm pull zz" "zztest1/ zztest2/"
_completionTests_verifyCompletion "helm install name " "./ / zztest1/ zztest2/ nginx/ oci:// file:// http:// https://"
_completionTests_verifyCompletion "helm install name zz" "zztest1/ zztest2/ zztest2file"
_completionTests_verifyCompletion "helm install name nginx/n" "nginx/nginx nginx/nginx-test"
_completionTests_verifyCompletion "helm template name " "./ / zztest1/ zztest2/ nginx/ oci:// file:// http:// https://"
_completionTests_verifyCompletion "helm template name zz" "zztest1/ zztest2/ zztest2file"
_completionTests_verifyCompletion "helm template name nginx/n" "nginx/nginx nginx/nginx-test"
_completionTests_verifyCompletion "helm upgrade release " "./ / zztest1/ zztest2/ nginx/ oci:// file:// http:// https://"
_completionTests_verifyCompletion "helm upgrade release zz" "zztest1/ zztest2/ zztest2file"
_completionTests_verifyCompletion "helm upgrade release nginx/n" "nginx/nginx nginx/nginx-test"
_completionTests_verifyCompletion "helm show values ngin" "nginx/"
\rm $tmpFiles
fi
# For completing the --version flag
_completionTests_verifyCompletion "helm install name nginx/nginx --version " "0.11.0 0.12.0 0.13.0"
_completionTests_verifyCompletion "helm install name nginx/nginx --version 0.11" "0.11.0"
_completionTests_verifyCompletion "helm install nginx/nginx --generate-name --version " "0.11.0 0.12.0 0.13.0"
_completionTests_verifyCompletion "helm install nginx/nginx --generate-name --version 0.13" "0.13.0"
_completionTests_verifyCompletion "helm template name nginx/nginx --version 0.11" "0.11.0"
_completionTests_verifyCompletion "helm template nginx/nginx --generate-name --version " "0.11.0 0.12.0 0.13.0"
_completionTests_verifyCompletion "helm upgrade name nginx/nginx --version 0.11" "0.11.0"
_completionTests_verifyCompletion "helm upgrade name nginx/nginx --version " "0.11.0 0.12.0 0.13.0"
_completionTests_verifyCompletion "helm pull nginx/nginx --version " "0.11.0 0.12.0 0.13.0"
_completionTests_verifyCompletion "helm pull nginx/nginx --version 0.11" "0.11.0"
_completionTests_verifyCompletion "helm show all nginx/nginx --version " "0.11.0 0.12.0 0.13.0"
_completionTests_verifyCompletion "helm show chart nginx/nginx --version 0.11" "0.11.0"
_completionTests_verifyCompletion "helm show readme nginx/nginx --version 0.11" "0.11.0"
_completionTests_verifyCompletion "helm show values nginx/nginx --version 0.11" "0.11.0"
# Dynamic completion for plugins
_completionTests_verifyCompletion "helm push-plugin " ""
_completionTests_verifyCompletion "helm 2to3 move config g" "gryffindor"
_completionTests_verifyCompletion "helm 2to3 -n dumbledore convert " "case-ns convert dumbledore"
_completionTests_verifyCompletion "helm 2to3 convert -s flag d" "dobby draco"
_completionTests_verifyCompletion "helm 2to3 convert " "hermione harry ron"
##############################################################
# Completion with helm called through an alias or using a path
##############################################################
# We want to specify a different helm for completion than the one
# that is found on the PATH variable.
# This is particularly valuable to check that dynamic completion
# uses the correct location for helm.
# Copy helm to a location that is not on PATH
TMP_HELM_DIR=$(mktemp -d ${ROBOT_OUTPUT_DIR}/helm-acceptance-temp-bin.XXXXXX)
trap "rm -rf ${TMP_HELM_DIR}" EXIT
mkdir -p $TMP_HELM_DIR
HELM_DIR=$(dirname $(which helm))
cp $HELM_DIR/helm $TMP_HELM_DIR/helm
# Make 'helm' unavailable to make sure it can't be called direactly
# by the dynamic completion code, which should instead use the helm
# as called in the completion calls that follow.
alias helm=echo
# Testing with shell aliases is only applicable to bash.
# Zsh replaces the alias before calling the completion function,
# so it does not make sense to try zsh completion with an alias.
if [ "$SHELL_TYPE" = bash ]; then
# Create aliases to helm
# This alias will be created after the variable is expanded
alias helmAlias="${TMP_HELM_DIR}/helm"
# This alias will be created without expanding the variable (because of single quotes)
alias helmAliasWithVar='${TMP_HELM_DIR}/helm'
# Hook these new aliases to the helm completion function.
complete -o default -F $(_completionTests_findCompletionFunction helm) helmAlias
complete -o default -F $(_completionTests_findCompletionFunction helm) helmAliasWithVar
# Completion with normal alias
_completionTests_verifyCompletion "helmAlias lis" "list"
_completionTests_verifyCompletion "helmAlias completion z" "zsh"
_completionTests_verifyCompletion "helmAlias --kubecon" "--kubeconfig"
_completionTests_verifyCompletion "helmAlias get hooks --kubec" "--kubeconfig"
_completionTests_verifyCompletion "helmAlias repo remove zztest" "zztest1 zztest2"
_completionTests_verifyCompletion "helmAlias plugin update pus" "push-plugin push-artifactory"
_completionTests_verifyCompletion "helmAlias upgrade --kube-context d" "dev1 dev2"
# if [ ! -z ${ROBOT_HELM_V3} ]; then
# _completionTests_verifyCompletion "helmAlias --kube-context=mycontext --namespace " "braavos old-valyria yunkai"
# fi
# Completion with alias that contains a variable
_completionTests_verifyCompletion "helmAliasWithVar lis" "list"
_completionTests_verifyCompletion "helmAliasWithVar completion z" "zsh"
_completionTests_verifyCompletion "helmAliasWithVar --kubecon" "--kubeconfig"
_completionTests_verifyCompletion "helmAliasWithVar get hooks --kubec" "--kubeconfig"
_completionTests_verifyCompletion "helmAliasWithVar repo remove zztest" "zztest1 zztest2"
_completionTests_verifyCompletion "helmAliasWithVar plugin update pus" "push-plugin push-artifactory"
_completionTests_verifyCompletion "helmAliasWithVar upgrade --kube-context d" "dev1 dev2"
# if [ ! -z ${ROBOT_HELM_V3} ]; then
# _completionTests_verifyCompletion "helmAliasWithVar --kube-context=mycontext --namespace " "braavos old-valyria yunkai"
# fi
fi
# Completion with absolute path
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm lis" "list"
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm completion z" "zsh"
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm repo remove zztest" "zztest1 zztest2"
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm plugin update pus" "push-plugin push-artifactory"
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm upgrade --kube-context d" "dev1 dev2"
# if [ ! -z ${ROBOT_HELM_V3} ]; then
# _completionTests_verifyCompletion "$TMP_HELM_DIR/helm --kube-context=mycontext --namespace " "braavos old-valyria yunkai"
# fi
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm --kubecon" "--kubeconfig"
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm get hooks --kubec" "--kubeconfig"
else
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm --kubecon" "--kubeconfig --kubeconfig --kubeconfig"
_completionTests_verifyCompletion "$TMP_HELM_DIR/helm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig"
fi
# Completion with relative path
cd $TMP_HELM_DIR
_completionTests_verifyCompletion "./helm lis" "list"
_completionTests_verifyCompletion "./helm completion z" "zsh"
_completionTests_verifyCompletion "./helm repo remove zztest" "zztest1 zztest2"
_completionTests_verifyCompletion "./helm plugin update pus" "push-plugin push-artifactory"
_completionTests_verifyCompletion "./helm upgrade --kube-context d" "dev1 dev2"
# if [ ! -z ${ROBOT_HELM_V3} ]; then
# _completionTests_verifyCompletion "./helm --kube-context=mycontext --namespace " "braavos old-valyria yunkai"
# fi
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "./helm --kubecon" "--kubeconfig"
_completionTests_verifyCompletion "./helm get hooks --kubec" "--kubeconfig"
else
_completionTests_verifyCompletion "./helm --kubecon" "--kubeconfig --kubeconfig --kubeconfig"
_completionTests_verifyCompletion "./helm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig"
fi
cd - >/dev/null
# Completion with a different name for helm
mv $TMP_HELM_DIR/helm $TMP_HELM_DIR/myhelm
# Generating the completion script using the new binary name
# should make completion work for that binary name
source /dev/stdin <<- EOF
$(${TMP_HELM_DIR}/myhelm completion $SHELL_TYPE)
EOF
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm lis" "list"
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm completion z" "zsh"
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm repo remove zztest" "zztest1 zztest2"
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm plugin update pus" "push-plugin push-artifactory"
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm upgrade --kube-context d" "dev1 dev2"
# if [ ! -z ${ROBOT_HELM_V3} ]; then
# _completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm --kube-context=mycontext --namespace " "braavos old-valyria yunkai"
# fi
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm --kubecon" "--kubeconfig"
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm get hooks --kubec" "--kubeconfig"
else
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm --kubecon" "--kubeconfig --kubeconfig --kubeconfig"
_completionTests_verifyCompletion "$TMP_HELM_DIR/myhelm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig"
fi
# Completion with a different name for helm that is on PATH
mv $TMP_HELM_DIR/myhelm $HELM_DIR/myhelm
_completionTests_verifyCompletion "myhelm lis" "list"
_completionTests_verifyCompletion "myhelm completion z" "zsh"
_completionTests_verifyCompletion "myhelm repo remove zztest" "zztest1 zztest2"
_completionTests_verifyCompletion "myhelm plugin update pus" "push-plugin push-artifactory"
_completionTests_verifyCompletion "myhelm upgrade --kube-context d" "dev1 dev2"
# if [ ! -z ${ROBOT_HELM_V3} ]; then
# _completionTests_verifyCompletion "myhelm --kube-context=mycontext --namespace " "braavos old-valyria yunkai"
# fi
if [ "$SHELL_TYPE" = bash ]; then
_completionTests_verifyCompletion "myhelm --kubecon" "--kubeconfig"
_completionTests_verifyCompletion "myhelm get hooks --kubec" "--kubeconfig"
else
_completionTests_verifyCompletion "myhelm --kubecon" "--kubeconfig --kubeconfig --kubeconfig"
_completionTests_verifyCompletion "myhelm get hooks --kubec" "--kubeconfig --kubeconfig --kubeconfig"
fi
unalias helm
# This must be the last call. It allows to exit with an exit code
# that reflects the final status of all the tests.
_completionTests_exit
| true
|
dc72858dc9f54cd6098b05ff749e775748641240
|
Shell
|
zielmicha/sandboxd
|
/example.sh
|
UTF-8
| 155
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
ls /
ls -l /opt
ls -l /data
sudo # should return Operation not permitted
echo "hey!"
for i in {0..9}; do
echo "work $i"
sleep 1
done
| true
|
6f71f652a0cf7506b414a33cace8d7f87bd86a83
|
Shell
|
janelia-flyem/libdvid-cpp
|
/conda-recipe/build.sh
|
UTF-8
| 4,666
| 3.765625
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
set -x
# Depending on our platform, shared libraries end with either .so or .dylib
if [[ $(uname) == 'Darwin' ]]; then
DYLIB_EXT=dylib
CXXFLAGS="${CXXFLAGS} -I${PREFIX}/include -stdlib=libc++"
else
DYLIB_EXT=so
CXXFLAGS="${CXXFLAGS} -I${PREFIX}/include"
# Don't specify these -- let conda-build do it.
#CC=gcc
#CXX=g++
fi
CONDA_PY=${CONDA_PY-$(python -c "import sys; print('{}{}'.format(*sys.version_info[:2]))")}
PY_VER=$(python -c "import sys; print('{}.{}'.format(*sys.version_info[:2]))")
PY_ABIFLAGS=$(python -c "import sys; print('' if sys.version_info.major == 2 else sys.abiflags)")
PY_ABI=${PY_VER}${PY_ABIFLAGS}
BUILD_DIR=${BUILD_DIR-build}
CONFIGURE_ONLY=0
if [[ $1 != "" ]]; then
if [[ $1 == "--configure-only" ]]; then
CONFIGURE_ONLY=1
else
echo "Unknown argument: $1"
exit 1
fi
fi
# On Mac, you can specify CMAKE_GENERATOR=Xcode if you want.
CMAKE_GENERATOR=${CMAKE_GENERATOR-Unix Makefiles}
CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE-Release}
export MACOSX_DEPLOYMENT_TARGET=10.9
#
# For some reason cmake's FindPython3 module seems
# to not work properly at all, at least on macOS-10.15 with cmake-3.18.
# WTF? OK, I'm just overriding everything.
#
PYTHON_CMAKE_SETTINGS=(
-DPython3_ROOT_DIR=${PREFIX}
-DPython3_FIND_FRAMEWORK=NEVER
-DPython3_FIND_VIRTUALENV=ONLY
-DPython3_EXECUTABLE=${PREFIX}/bin/python3
-DPython3_INCLUDE_DIR=${PREFIX}/include/python${PY_ABI}
-DPython3_NumPy_INCLUDE_DIR=${PREFIX}/lib/python${PY_VER}/site-packages/numpy/core/include
-DPython3_LIBRARY=${PREFIX}/lib/libpython${PY_ABI}.${DYLIB_EXT}
-DPython3_LIBRARY_RELEASE=${PREFIX}/lib/libpython${PY_ABI}.${DYLIB_EXT}
)
# CONFIGURE
mkdir -p "${BUILD_DIR}" # Using -p here is convenient for calling this script outside of conda.
cd "${BUILD_DIR}"
cmake ..\
-G "${CMAKE_GENERATOR}" \
-DCMAKE_C_COMPILER=${CC} \
-DCMAKE_CXX_COMPILER=${CXX} \
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} \
-DCMAKE_INSTALL_PREFIX="${PREFIX}" \
-DCMAKE_PREFIX_PATH="${PREFIX}" \
-DCMAKE_CXX_FLAGS="${CXXFLAGS}" \
-DCMAKE_OSX_SYSROOT="${CONDA_BUILD_SYSROOT}" \
-DCMAKE_OSX_DEPLOYMENT_TARGET="${MACOSX_DEPLOYMENT_TARGET}" \
-DCMAKE_SHARED_LINKER_FLAGS="-L${PREFIX}/lib ${LDFLAGS}" \
-DCMAKE_EXE_LINKER_FLAGS="-L${PREFIX}/lib ${LDFLAGS}" \
-DBOOST_ROOT="${PREFIX}" \
-DBoost_LIBRARY_DIR="${PREFIX}/lib" \
-DBoost_INCLUDE_DIR="${PREFIX}/include" \
-DBoost_PYTHON_LIBRARY="${PREFIX}/lib/libboost_python${CONDA_PY}.${DYLIB_EXT}" \
${PYTHON_CMAKE_SETTINGS[@]} \
-DLIBDVID_WRAP_PYTHON=1 \
-DWITH_JPEGTURBO=1 \
-DWITH_LIBDEFLATE=1 \
##
if [[ $CONFIGURE_ONLY == 0 ]]; then
##
## BUILD
##
make -j${CPU_COUNT}
##
## INSTALL
##
# "install" to the build prefix (conda will relocate these files afterwards)
make install
# For debug builds, this symlink can be useful...
#cd ${PREFIX}/lib && ln -s libdvidcpp-g.${DYLIB_EXT} libdvidcpp.${DYLIB_EXT} && cd -
##
## TEST
##
if [[ -z "$SKIP_LIBDVID_TESTS" || "$SKIP_LIBDVID_TESTS" == "0" ]]; then
echo "Running build tests. To skip, set SKIP_LIBDVID_TESTS=1"
# Launch dvid
echo "Starting test DVID server..."
dvid -verbose serve ${RECIPE_DIR}/dvid-testserver-config.toml &
DVID_PID=$!
sleep 5;
if [ ! pgrep -x > /dev/null ]; then
2>&1 echo "*****************************************************"
2>&1 echo "Unable to start test DVID server! "
2>&1 echo "Do you already have a server runnining on port :8000?"
2>&1 echo "*****************************************************"
exit 2
fi
# Kill the DVID server when this script exits
trap 'kill -TERM $DVID_PID' EXIT
# This script runs 'make test', which uses the build artifacts in the build directory, not the installed files.
# Therefore, they haven't been post-processed by conda to automatically locate their dependencies.
# We'll set LD_LIBRARY_PATH to avoid errors from ld
if [[ $(uname) == Darwin ]]; then
export DYLD_FALLBACK_LIBRARY_PATH="${PREFIX}/lib":"${DYLD_FALLBACK_LIBRARY_PATH}"
else
export LD_LIBRARY_PATH="${PREFIX}/lib":"${LD_LIBRARY_PATH}"
fi
if ! make test; then
cat Testing/Temporary/LastTest.log
1>&2 echo "****************************************"
1>&2 echo "Post-build tests FAILED. See log above."
1>&2 echo "****************************************"
exit 1
fi
fi
fi
| true
|
aad98f69283dbc634caf62f92ddaf4ddba81f6f2
|
Shell
|
doc22940/tdoku
|
/BENCH.sh
|
UTF-8
| 2,112
| 3.796875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#
# Run multiple benchmark configurations
#
# BENCH.sh <arch_name> <taskset_cpu_mask> <spec_1> ... <spec_n>
#
# Where a spec is compiler_optlevel_{native|sse4.2}[_pgo]
#
# e.g.,
# BENCH.sh i7-4930k 0x20 gcc-6_O3_native gcc-6_O3_sse4.2 clang-8_Ofast_native_pgo
#
# flags to tell BUILD.sh to include all solvers
ALL_SOLVERS="-DMINISAT=on -DBB_SUDOKU=on -DFAST_SOLV_9R2=on -DKUDOKU=on -DNORVIG=on \
-DJSOLVE=on -DFSSS=on -DFSSS2=on -DJCZSOLVE=on -DSK_BFORCE2=on -DRUST_SUDOKU=on"
# which solvers to run for profile generation
PGO_SOLVERS="norvig,fast_solv_9r2,kudoku,bb_sudoku,jsolve,fsss2,jczsolve,sk_bforce2,tdoku"
PLATFORM=$1
TASKSET_CPU=$2
shift 2
for spec in "$@"
do
# extract compiler options from spec
IFS='_'
read -ra TOKS <<< "${spec}"
IFS=' '
export CC=${TOKS[0]}
export CXX=$(echo ${CC} | sed -e "s/gcc/g++/" -e "s/clang/clang++/")
OPT=${TOKS[1]}
TARGET=${TOKS[2]}
SSEFLAG=""
if [[ "$TARGET" == "msse4.2" ]]; then
SSEFLAG="-DSSE4_2=on"
fi
if [[ "$TARGET" == "avx" ]]; then
SSEFLAG="-DAVX=on"
fi
PGO=""
if [[ "${TOKS[3]}" == "pgo" ]]; then
# build for profile generation, profile a test load, move or merge profile, build using profile
PGO="_pgo"
rm -rf build/pgodata*
./BUILD.sh run_benchmark -DOPT="${OPT}" "${SSEFLAG}" -DARGS="-fprofile-generate=build/pgodata.gen" ${ALL_SOLVERS}
build/run_benchmark -t15 -w15 -s${PGO_SOLVERS} data/puzzles1_17_clue
if echo "${CC}" | grep -q gcc; then
mv build/pgodata.gen build/pgodata.use
else
"${CC/clang/llvm-profdata}" merge build/pgodata.gen -output build/pgodata.use
fi
./BUILD.sh run_benchmark -DOPT="${OPT}" "${SSEFLAG}" -DARGS="-fprofile-use=pgodata.use" ${ALL_SOLVERS}
else
# build without pgo
./BUILD.sh run_benchmark -DOPT="${OPT}" "${SSEFLAG}" ${ALL_SOLVERS}
fi
# run benchmarks for this spec
benchmarks/bench.sh setarch $(uname -m) -R taskset ${TASKSET_CPU} | tee benchmarks/${PLATFORM}_${CC}_${OPT}_${TARGET}${PGO}
done
| true
|
531b9ff483e7a0c0230507696090bb9d3001fecd
|
Shell
|
clifmo/CloudflareDDNS
|
/cfddns.sh
|
UTF-8
| 20,033
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# update Cloudflare DNS records with current (dynamic) IP address
# Script by Asif Bacchus <asif@bacchus.cloud>
# Last modified: May 10, 2021
# Version 2.2
#
### text formatting presets using tput
if command -v tput >/dev/null; then
[ -z "$TERM" ] && export TERM=xterm
bold=$(tput bold)
cyan=$(tput setaf 6)
err=$(tput bold)$(tput setaf 1)
magenta=$(tput setaf 5)
norm=$(tput sgr0)
ok=$(tput setaf 2)
warn=$(tput bold)$(tput setaf 3)
yellow=$(tput setaf 3)
width=$(tput cols)
else
bold=""
cyan=""
err=""
magenta=""
norm=""
ok=""
warn=""
yellow=""
width=80
fi
### functions
badParam() {
if [ "$1" = "null" ]; then
printf "\n%sERROR: '%s' cannot have a NULL (empty) value.\n" "$err" "$2"
printf "%sPlease use '--help' for assistance.%s\n\n" "$cyan" "$norm"
exit 1
elif [ "$1" = "dne" ]; then
printf "\n%sERROR: '%s %s'\n" "$err" "$2" "$3"
printf "file or directory does not exist or is empty.%s\n\n" "$norm"
exit 1
elif [ "$1" = "errMsg" ]; then
printf "\n%sERROR: %s%s\n\n" "$err" "$2" "$norm"
exit 1
fi
}
exitError() {
case "$1" in
3)
errMsg="Unable to connect to Cloudflare servers. This is probably a temporary networking issue. Please try again later."
;;
10)
errMsg="Unable to auto-detect IP address. Try again later or supply the IP address to be used."
;;
20)
errMsg="Cloudflare authorized email address (cfEmail) is either null or undefined. Please check your Cloudflare credentials file."
;;
21)
errMsg="Cloudflare authorized API key (cfKey) is either null or undefined. Please check your Cloudflare credentials file."
;;
22)
errMsg="Cloudflare zone id (cfZoneId) is either null or undefined. Please check your Cloudflare credentials file."
;;
25)
errMsg="Cloudflare API error. Please review any 'CF-ERR:' lines in this log for details."
;;
26)
errMsg="${failedHostCount} host update(s) failed. Any 'CF-ERR:' lines noted in this log may help determine what went wrong."
;;
*)
writeLog error "An unspecified error occurred. (code: 99)"
printf "%s[%s] -- Cloudflare DDNS update-script: completed with error(s) --%s\n" "$err" "$(stamp)" "$norm" >>"$logFile"
exit 99
;;
esac
writeLog error "$errMsg" "$1"
printf "%s[%s] -- Cloudflare DDNS update-script: completed with error(s) --%s\n" "$err" "$(stamp)" "$norm" >>"$logFile"
exit "$1"
}
exitOK() {
printf "%s[%s] -- Cloudflare DDNS update-script: completed successfully --%s\n" "$ok" "$(stamp)" "$norm" >>"$logFile"
exit 0
}
listCFErrors() {
# extract error codes and messages in separate variables, replace newlines with underscores
codes="$(printf "%s" "$1" | jq -r '.errors | .[] | .code' | tr '\n' '_')"
messages="$(printf "%s" "$1" | jq -r '.errors | .[] | .message' | tr '\n' '_')"
# iterate codes and messages and assemble into coherent messages in log
while [ -n "$codes" ] && [ -n "$messages" ]; do
# get first code and message in respective sets
code="${codes%%_*}"
message="${messages%%_*}"
# update codes and messages sets by removing first item in each set
codes="${codes#*_}"
messages="${messages#*_}"
# output to log
writeLog cf "$message" "$code"
done
}
scriptExamples() {
newline
printf "Update Cloudflare DNS host A/AAAA records with current IP address.\n"
printf "%sUsage: %s --records host.domain.tld[,host2.domain.tld,...] [parameters]%s\n\n" "$bold" "$scriptName" "$norm"
textBlock "${magenta}--- usage examples ---${norm}"
newline
textBlockSwitches "${scriptName} -r myserver.mydomain.net"
textBlock "Update Cloudflare DNS records for myserver.mydomain.net with the auto-detected public IP4 address. Credentials will be expected in the default location and the log will be written in the default location also."
newline
textBlockSwitches "${scriptName} -r myserver.mydomain.net -6"
textBlock "Same as above, but update AAAA host records with the auto-detected public IP6 address."
newline
textBlockSwitches "${scriptName} -r myserver.mydomain.net,myserver2.mydomain.net -l /var/log/cfddns.log --nc"
textBlock "Update DNS entries for both listed hosts using auto-detected IP4 address. Write a non-coloured log to '/var/log/cfddns.log'."
newline
textBlockSwitches "${scriptName} -r myserver.mydomain.net,myserver2.mydomain.net -l /var/log/cfddns.log --ip6 --ip fd21:7a62:2737:9c3a::a151"
textBlock "Update DNS AAAA entries for listed hosts using the *specified* IP address. Write a colourful log to the location specified."
newline
textBlockSwitches "${scriptName} -r myserver.mydomain.net -c /root/cloudflare.creds -l /var/log/cfddns.log --ip 1.2.3.4"
textBlock "Update DNS A entry for listed hostname with the provided IP address. Read cloudflare credentials file from specified location, save log in specified location."
newline
textBlockSwitches "${scriptName} -r myserver.mydomain.net -c /root/cloudflare.creds -l /var/log/cfddns.log -6 -i fd21:7a62:2737:9c3a::a151"
textBlock "Exact same as above, but change the AAAA record. This is how you run the script once for IP4 and again for IP6."
exit 0
}
scriptHelp() {
newline
printf "Update Cloudflare DNS host A/AAAA records with current IP address.\n"
printf "%sUsage: %s --records host.domain.tld[,host2.domain.tld,...] [parameters]%s\n\n" "$bold" "$scriptName" "$norm"
textBlock "The only required parameter is '--records' which is a comma-delimited list of hostnames to update. However, there are several other options which may be useful to implement."
textBlock "Parameters are listed below and followed by a description of their effect. If a default value exists, it will be listed on the following line in (parentheses)."
newline
textBlock "${magenta}--- script related parameters ---${norm}"
newline
textBlockSwitches "-c | --cred | --creds | --credentials | -f (deprecated, backward-compatibility)"
textBlock "Path to file containing your Cloudflare *token* credentials. Please refer to the repo README for more information on format, etc."
textBlockDefaults "(${accountFile})"
newline
textBlockSwitches "-l | --log"
textBlock "Path where the log file should be written."
textBlockDefaults "(${logFile})"
newline
textBlockSwitches "--nc | --no-color | --no-colour"
textBlock "Switch value. Disables ANSI colours in the log. Useful if you review the logs using a reader that does not parse ANSI colour codes."
textBlockDefaults "(disabled: print logs in colour)"
newline
textBlockSwitches "--log-console"
textBlock "Switch value. Output log to console (stdout) instead of a log file. Can be combined with --nc if desired."
textBlockDefaults "(disabled: output to log file)"
newline
textBlockSwitches "--no-log"
textBlock "Switch value. Do not create a log (i.e. no console, no file). You will not have *any* output from the script if you choose this option, so you will not know if updates succeeded or failed."
textBlockDefaults "(disabled: output to log file)"
newline
textBlockSwitches "-h | --help | -?"
textBlock "Display this help screen."
newline
textBlockSwitches "--examples"
textBlock "Show some usage examples."
newline
textBlock "${magenta}--- DNS related parameters ---${norm}"
newline
textBlockSwitches "-r | --record | --records"
textBlock "Comma-delimited list of hostnames for which IP addresses should be updated in Cloudflare DNS. This parameter is REQUIRED. Note that this script will only *update* records, it will not create new ones. If you supply hostnames that are not already defined in DNS, the script will log a warning and will skip those hostnames."
newline
textBlockSwitches "-i | --ip | --ip-address | -a | --address"
textBlock "New IP address for DNS host records. If you omit this, the script will attempt to auto-detect your public IP address and use that."
newline
textBlockSwitches "-4 | --ip4 | --ipv4"
textBlock "Switch value. Update Host 'A' records (IP4) only. Note that this script can only update either A *or* AAAA records. If you need to update both, you'll have to run the script once in IP4 mode and again in IP6 mode. If you specify both this switch and the IP6 switch, the last one specified will take effect."
textBlockDefaults "(enabled: update A records)"
newline
textBlockSwitches "-6 | --ip6 | --ipv6"
textBlock "Switch value. Update Host 'AAAA' records (IP6) only. Note that this script can only update either A *or* AAAA records. If you need to update both, you'll have to run the script once in IP4 mode and again in IP6 mode. If you specify both this switch and the IP4 switch, the last one specified will take effect."
textBlockDefaults "(disabled: update A records)"
newline
textBlock "Please refer to the repo README for more detailed information regarding this script and how to automate and monitor it."
newline
exit 0
}
stamp() {
(date +%F" "%T)
}
newline() {
printf "\n"
}
textBlock() {
printf "%s\n" "$1" | fold -w "$width" -s
}
textBlockDefaults() {
printf "%s%s%s\n" "$yellow" "$1" "$norm"
}
textBlockSwitches() {
printf "%s%s%s\n" "$cyan" "$1" "$norm"
}
writeLog() {
case "$1" in
cf)
printf "[%s] CF-ERR: %s (code: %s)\n" "$(stamp)" "$2" "$3" >>"$logFile"
;;
err)
printf "%s[%s] ERR: %s%s\n" "$err" "$(stamp)" "$2" "$norm" >>"$logFile"
;;
error)
printf "%s[%s] ERROR: %s (code: %s)%s\n" "$err" "$(stamp)" "$2" "$3" "$norm" >>"$logFile"
;;
process)
printf "%s[%s] %s... %s" "$cyan" "$(stamp)" "$2" "$norm" >>"$logFile"
;;
process-done)
printf "%s%s%s\n" "$cyan" "$2" "$norm" >>"$logFile"
;;
process-error)
printf "%sERROR%s\n" "$err" "$norm" >>"$logFile"
;;
process-warning)
printf "%s%s%s\n" "$warn" "$2" "$norm" >>"$logFile"
;;
stamped)
printf "[%s] %s\n" "$(stamp)" "$2" >>"$logFile"
;;
success)
printf "%s[%s] SUCCESS: %s%s\n" "$ok" "$(stamp)" "$2" "$norm" >>"$logFile"
;;
warn)
printf "%s[%s] WARN: %s%s\n" "$warn" "$(stamp)" "$2" "$norm" >>"$logFile"
;;
warning)
printf "%s[%s] WARNING: %s%s\n" "$warn" "$(stamp)" "$2" "$norm" >>"$logFile"
;;
*)
printf "%s\n" "$2" >>"$logFile"
;;
esac
}
### default variable values
scriptPath="$(CDPATH='' \cd -- "$(dirname -- "$0")" && pwd -P)"
scriptName="$(basename "$0")"
logFile="$scriptPath/${scriptName%.*}.log"
accountFile="$scriptPath/cloudflare.credentials"
colourizeLogFile=1
dnsRecords=""
dnsSeparator=","
ipAddress=""
ip4=1
ip6=0
ip4DetectionSvc="http://ipv4.icanhazip.com"
ip6DetectionSvc="http://ipv6.icanhazip.com"
invalidDomainCount=0
failedHostCount=0
### process startup parameters
if [ -z "$1" ]; then
scriptHelp
fi
while [ $# -gt 0 ]; do
case "$1" in
-h | -\? | --help)
# display help
scriptHelp
;;
--examples)
# display sample commands
scriptExamples
;;
-l | --log)
# set log file location
if [ -n "$2" ]; then
logFile="${2%/}"
shift
else
badParam null "$@"
fi
;;
--log-console)
# log to the console instead of a file
logFile="/dev/stdout"
;;
--no-log)
# do not log anything
logFile="/dev/null"
;;
--nc | --no-color | --no-colour)
# do not colourize log file
colourizeLogFile=0
;;
-c | --cred* | -f)
# path to Cloudflare credentials file
if [ -n "$2" ]; then
if [ -f "$2" ] && [ -s "$2" ]; then
accountFile="${2%/}"
shift
else
badParam dne "$@"
fi
else
badParam null "$@"
fi
;;
-r | --record | --records)
# DNS records to update
if [ -n "$2" ]; then
dnsRecords=$(printf "%s" "$2" | sed -e 's/ //g')
shift
else
badParam null "$@"
fi
;;
-i | --ip | --ip-address | -a | --address)
# IP address to use (not parsed for correctness)
if [ -n "$2" ]; then
ipAddress="$2"
shift
else
badParam null "$@"
fi
;;
-4 | --ip4 | --ipv4)
# operate in IP4 mode (default)
ip4=1
ip6=0
;;
-6 | --ip6 | --ipv6)
# operate in IP6 mode
ip6=1
ip4=0
;;
*)
printf "\n%sUnknown option: %s\n" "$err" "$1"
printf "%sUse '--help' for valid options.%s\n\n" "$cyan" "$norm"
exit 1
;;
esac
shift
done
### pre-flight checks
if ! command -v curl >/dev/null; then
printf "\n%sThis script requires 'curl' be installed and accessible. Exiting.%s\n\n" "$err" "$norm"
exit 2
fi
if ! command -v jq >/dev/null; then
printf "\n%sThis script requires 'jq' be installed and accessible. Exiting.%s\n\n" "$err" "$norm"
exit 2
fi
[ -z "$dnsRecords" ] && badParam errMsg "You must specify at least one DNS record to update. Exiting."
# verify credentials file exists and is not empty (default check)
if [ ! -f "$accountFile" ] || [ ! -s "$accountFile" ]; then
badParam errMsg "Cannot find Cloudflare credentials file (${accountFile}). Exiting."
fi
# turn off log file colourization if parameter is set
if [ "$colourizeLogFile" -eq 0 ]; then
bold=""
cyan=""
err=""
magenta=""
norm=""
ok=""
warn=""
yellow=""
fi
### initial log entries
{
printf "%s[%s] -- Cloudflare DDNS update-script: starting --%s\n" "$ok" "$(stamp)" "$norm"
printf "Parameters:\n"
printf "script path: %s\n" "$scriptPath/$scriptName"
printf "credentials file: %s\n" "$accountFile"
if [ "$ip4" -eq 1 ]; then
printf "mode: IP4\n"
elif [ "$ip6" -eq 1 ]; then
printf "mode: IP6\n"
fi
# detect and report IP address
if [ -z "$ipAddress" ]; then
# detect public ip address
if [ "$ip4" -eq 1 ]; then
if ! ipAddress="$(curl -s $ip4DetectionSvc)"; then
printf "ddns ip address:%s ERROR%s\n" "$err" "$norm"
exitError 10
fi
fi
if [ "$ip6" -eq 1 ]; then
if ! ipAddress="$(curl -s $ip6DetectionSvc)"; then
printf "ddns ip address:%s ERROR%s\n" "$err" "$norm"
exitError 10
fi
fi
printf "ddns ip address (detected): %s\n" "$ipAddress"
else
printf "ddns ip address (supplied): %s\n" "$ipAddress"
fi
# iterate DNS records to update
dnsRecordsToUpdate="$(printf '%s' "${dnsRecords}" | sed "s/${dnsSeparator}*$//")$dnsSeparator"
while [ -n "$dnsRecordsToUpdate" ] && [ "$dnsRecordsToUpdate" != "$dnsSeparator" ]; do
record="${dnsRecordsToUpdate%%${dnsSeparator}*}"
dnsRecordsToUpdate="${dnsRecordsToUpdate#*${dnsSeparator}}"
if [ -z "$record" ]; then continue; fi
printf "updating record: %s\n" "$record"
done
printf "(end of parameter list)\n"
} >>"$logFile"
### read Cloudflare credentials
writeLog process "Reading Cloudflare credentials"
case "$accountFile" in
/*)
# absolute path, use as-is
# shellcheck source=./cloudflare.credentials
. "$accountFile"
;;
*)
# relative path, rewrite
# shellcheck source=./cloudflare.credentials
. "./$accountFile"
;;
esac
if [ -z "$cfKey" ]; then
writeLog process-error
exitError 21
fi
if [ -z "$cfZoneId" ]; then
writeLog process-error
exitError 22
fi
writeLog process-done "DONE"
### connect to Cloudflare and do what needs to be done!
dnsRecordsToUpdate="$dnsRecords$dnsSeparator"
if [ "$ip4" -eq 1 ]; then
recordType="A"
elif [ "$ip6" -eq 1 ]; then
recordType="AAAA"
fi
# iterate hosts to update
while [ -n "$dnsRecordsToUpdate" ] && [ "$dnsRecordsToUpdate" != "$dnsSeparator" ]; do
record="${dnsRecordsToUpdate%%${dnsSeparator}*}"
dnsRecordsToUpdate="${dnsRecordsToUpdate#*${dnsSeparator}}"
if [ -z "$record" ]; then continue; fi
writeLog process "Processing ${record}"
# exit if curl/network error
if ! cfLookup="$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/${cfZoneId}/dns_records?name=${record}&type=${recordType}" \
-H "Authorization: Bearer ${cfKey}" \
-H "Content-Type: application/json")"; then
writeLog process-error
exitError 3
fi
# exit if API error
# exit here since API errors on GET request probably indicates authentication error which would affect all remaining operations
# no reason to continue processing other hosts and pile-up errors which might look like a DoS attempt
cfSuccess="$(printf "%s" "$cfLookup" | jq -r '.success')"
if [ "$cfSuccess" = "false" ]; then
writeLog process-error
listCFErrors "$cfLookup"
exitError 25
fi
resultCount="$(printf "%s" "$cfLookup" | jq '.result_info.count')"
# skip to next host if cannot find existing host record (this script *updates* only, does not create!)
if [ "$resultCount" = "0" ]; then
# warn if record of host not found
writeLog process-warning "NOT FOUND"
writeLog warn "Cannot find existing record to update for DNS entry: ${record}"
invalidDomainCount=$((invalidDomainCount + 1))
continue
fi
objectId=$(printf "%s" "$cfLookup" | jq -r '.result | .[] | .id')
currentIpAddr=$(printf "%s" "$cfLookup" | jq -r '.result | .[] | .content')
writeLog process-done "FOUND: IP = ${currentIpAddr}"
# skip to next hostname if record already up-to-date
if [ "$currentIpAddr" = "$ipAddress" ]; then
writeLog stamped "IP address for ${record} is already up-to-date"
continue
fi
# update record
writeLog process "Updating IP address for ${record}"
updateJSON="$(jq -n --arg key0 content --arg value0 "${ipAddress}" '{($key0):$value0}')"
# exit if curl/network error
if ! cfResult="$(curl -s -X PATCH "https://api.cloudflare.com/client/v4/zones/${cfZoneId}/dns_records/${objectId}" \
-H "Authorization: Bearer ${cfKey}" \
-H "Content-Type: application/json" \
--data "${updateJSON}")"; then
writeLog process-error
exitError 3
fi
# note update success or failure
cfSuccess="$(printf "%s" "$cfResult" | jq '.success')"
if [ "$cfSuccess" = "true" ]; then
writeLog process-done "DONE"
writeLog success "IP address for ${record} updated."
else
writeLog process-error
listCFErrors "$cfResult"
writeLog err "Unable to update IP address for ${record}"
# do not exit with error, API error here is probably an update issue specific to this host
# increment counter and note it after all processing finished
failedHostCount=$((failedHostCount + 1))
fi
done
# exit
if [ "$invalidDomainCount" -ne 0 ]; then
writeLog warning "${invalidDomainCount} invalid host(s) supplied for updating."
fi
if [ "$failedHostCount" -ne 0 ]; then
exitError 26
else
exitOK
fi
### exit return codes
# 0: normal exit, no errors
# 1: invalid or unknown parameter
# 2: cannot find or access required external program(s)
# 3: curl error (probably connection)
# 10: cannot auto-detect IP address
# 21: accountFile has a null or missing cfKey variable
# 22: accountFile has a null or missing cfZoneId variable
# 25: Cloudflare API error
# 26: one or more updates failed
# 99: unspecified error occurred
| true
|
5570b67ec04c80c7e659360006c8f9d25b6412f4
|
Shell
|
ekcomputer/random-wetlands
|
/polsar_pro/editEnviHdr.sh
|
UTF-8
| 669
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# Script to add georef line to ENVI header
# inputs are .ann and .hdr file name
# note: imaginary .bin files have wrong data type (float instead of complex)
#PARSE
long=$(cat $1 | grep -e "grd_pwr.col_addr" | awk '{print $4}')
lat=$(cat $1 | grep -e "grd_pwr.row_addr" | awk '{print $4}')
longpx=$(cat $1 | grep -e "grd_pwr.col_mult" | awk '{print $4}')
latpx=$(cat $1 | grep -e "grd_pwr.row_mult" | awk '{print $4}') # get rid of minus sign?
echo map info = {Geographic Lat/Lon, 1, 1, $long, $lat, ${longpx#-}, ${latpx#-}, WGS-84} >> $2 # removes leading minus sign from pixel measurements
echo
echo Added georef info to header file: `basename $2`
echo
| true
|
0dbdbc33329d39adbf3389d16409d98b95f66433
|
Shell
|
Gravity-Hub-Org/gravity-node-middleware
|
/run-geth.sh
|
UTF-8
| 1,423
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
address_qty=0
simple_start () {
docker run -d --name ethereum-node -v $HOME/ethereum:/root \
-p 8545:8545 -p 30303:30303 \
ethereum/client-go --dev --rpcapi personal,web3,eth --rpc --rpcaddr '0.0.0.0' \
--dev.period 5 --rpcport 8545 --ws \
--wsaddr '0.0.0.0' --wsport 8545 --cache 4096
}
start_multiple () {
address_qty=$(echo $1 | sed -E s/[^0-9]//g)
echo "Number of ETH accounts: $address_qty"
if [ $address_qty -lt 1 ]
then
echo "Invalid accounts number"
exit 1
fi
echo "Starting ethereum node..."
simple_start
eth_node_id=$(bash pure-start.sh --get-eth-node-id)
sleep 10
echo "Creating $address_qty additional ETH addresses..."
address_list=$(
docker exec -it "$eth_node_id" geth attach http://127.0.0.1:8545 \
--exec "(function a(i, r) { if (i==$address_qty) { return r } r.push(personal.newAccount('1')); return a(i+1, r) })(0, [])"
)
echo "Fetched addresses"
echo "Address list: $address_list"
}
# trap 'echo "Terminating environment..."; bash pure-start.sh --shutdown' SIGINT
main () {
if [ -z "$1" ]
then
simple_start
exit 0
fi
while [ -n "$1" ]
do
case "$1" in
--simple) simple_start ;;
--start-*) start_multiple $1 ;;
esac
shift
done
}
main $@
| true
|
b9a2af988f793709de839fa397a30c9167aa9769
|
Shell
|
kapral/install-couchdb
|
/install-couchdb.sh
|
UTF-8
| 526
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
sudo apt-get update || true
sudo apt-get --no-install-recommends -y install \
build-essential pkg-config runit erlang \
libicu-dev libmozjs185-dev libcurl4-openssl-dev
wget http://apache-mirror.rbc.ru/pub/apache/couchdb/source/2.0.0/apache-couchdb-2.0.0.tar.gz
tar -xvzf apache-couchdb-2.0.0.tar.gz
cd apache-couchdb-2.0.0/
./configure && make release
sudo adduser --system \
--no-create-home \
--shell /bin/bash \
--group --gecos \
"CouchDB Administrator" couchdb
| true
|
29f2aad7b0e3b290353072bcf7b00248e99d5859
|
Shell
|
silverlyra/kubes
|
/push
|
UTF-8
| 1,202
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
if [[ -n "$(git status --porcelain)" ]]; then
echo >&2 './push can only be run with a clean Git checkout'
exit 2
fi
for t in types/*; do
version="$(basename "$t")"
parent=''
commit=''
if git rev-parse --verify "$version" >/dev/null 2>&1; then
parent="$(git rev-parse --verify "$version")"
elif git rev-parse --verify "origin/${version}" >/dev/null 2>&1; then
parent="$(git rev-parse --verify "origin/${version}")"
fi
git add --force "$t"
tree="$(git write-tree --prefix="${t}/")"
if [[ -n "$parent" && "$(git rev-parse --verify "${parent}^{tree}")" == "$tree" ]]; then
echo >&2 "$version already up-to-date: ${parent} (${tree})"
elif [[ -n "$parent" ]]; then
commit="$(git commit-tree "$tree" -p "$parent" -m "Update types for ${version}")"
git update-ref "refs/heads/${version}" "$commit" "$parent"
git push -u origin "$version"
echo >&2 "Updated $version: ${commit}"
else
commit="$(git commit-tree "$tree" -m "Create types for ${version}")"
git update-ref "refs/heads/${version}" "$commit"
git push -u origin "$version"
echo >&2 "Created $version: ${commit}"
fi
git reset HEAD
done
| true
|
61c8412e6362492df5bd5a3b91796f6175c048f8
|
Shell
|
ILCogCompCuratorToHadoop/CuratorHadoopInterface
|
/scripts/directory_restructure.sh
|
UTF-8
| 626
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# A script to rearrange output from the Ant build of the CuratorHadoopInterface into the recommended directory structure specified in the README.
# You must run this script from its original location in the scripts directory!
echo -e "Copying Ant output into recommended directory structure..."
mkdir ../../CuratorHadoopInterface
mkdir ../../JobHandler
cp ../out/artifacts/Jar/CuratorHadoopInterface.jar ../../CuratorHadoopInterface
cp ../out/artifacts/JobHandler/JobHandler.jar ../../JobHandler
cp -R ../lib ../../JobHandler
cp -R ../scripts ../../JobHandler
echo -e "Directory restructuring is complete!"
| true
|
14267c01826622f739ef8cdfecceca89d188c623
|
Shell
|
pashashiz/hadoop
|
/word-counter/scripts/run-task.sh
|
UTF-8
| 569
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
export JAVA_HOME=/usr/lib/jvm/java-8-oracle
base=./..
echo 'Cleaning HDFS data...'
hdfs dfs -rm -r -skipTrash input/word-counter
hdfs dfs -rm -r -skipTrash output/word-counter
echo 'Cleaning local data...'
rm -R -f ${base}/output/*
echo 'Loading input data into HDFS...'
hdfs dfs -mkdir -p input
hdfs dfs -put ${base}/input input/word-counter
echo 'Run word-counter map-reduce programm...'
hadoop jar ${base}/target/word-counter.jar input/word-counter output/word-counter
hdfs dfs -get output/word-counter/* ${base}/output/
echo 'Result:'
cat ${base}/output/*
| true
|
89f479fe67aad19b5a7a621912e5e889813e92c9
|
Shell
|
aurbasica/lfs-pkgbuilds
|
/e2fsprogs/PKGBUILD
|
UTF-8
| 1,068
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
pkgname=e2fsprogs
pkgver=1.43.3
pkgrel=1
pkgdesc='Utilities for handling the ext2/3/4 filesystems'
arch=('x86_64')
source=("http://downloads.sourceforge.net/project/$pkgname/$pkgname/v$pkgver/$pkgname-$pkgver.tar.gz")
sha512sums=('77e753f77222a0a7a334d7d93c900ea6cb339ed40af29952e414fc4e45d8c5c01d67771978a941195effe666df965746c2b31977c5f05ff307429b978dac8dea')
build(){
cd "$srcdir/$pkgname-$pkgver"
mkdir -v build
cd build
../configure --prefix=/usr \
--bindir=/bin \
--with-root-prefix="" \
--enable-elf-shlibs \
--disable-libblkid \
--disable-libuuid \
--disable-uuidd \
--disable-fsck
make
}
package(){
cd "$srcdir/$pkgname-$pkgver"
cd build
make DESTDIR=$pkgdir install
make DESTDIR=$pkgdir install-libs
chmod -v u+w $pkgdir/usr/lib/{libcom_err,libe2p,libext2fs,libss}.a
gunzip -v $pkgdir/usr/share/info/libext2fs.info.gz
install-info --dir-file=$pkgdir/usr/share/info/dir $pkgdir/usr/share/info/libext2fs.info
}
| true
|
562ca148d45f1d27075f02eb2b83594a059b3520
|
Shell
|
morristech/dotfiles-72
|
/bash/prompt.sh
|
UTF-8
| 1,180
| 3.125
| 3
|
[
"Unlicense"
] |
permissive
|
# those backslashes ARE escaping
# shellcheck disable=SC1117
readonly COLOR_BLACK="\[\033[0;30m\]"
readonly COLOR_RED="\[\033[0;31m\]"
readonly COLOR_GREEN="\[\033[0;32m\]"
readonly COLOR_BROWN="\[\033[0;33m\]"
readonly COLOR_BLUE="\[\033[0;34m\]"
readonly COLOR_PURPLE="\[\033[0;35m\]"
readonly COLOR_CYAN="\[\033[0;36m\]"
readonly COLOR_LIGHT_GRAY="\[\033[0;37m\]"
readonly COLOR_DARK_GRAY="\[\033[1;30m\]"
readonly COLOR_LIGHT_RED="\[\033[1;31m\]"
readonly COLOR_LIGHT_GREEN="\[\033[1;32m\]"
readonly COLOR_LIGHT_YELLOW="\[\033[1;33m\]"
readonly COLOR_LIGHT_BLUE="\[\033[1;34m\]"
readonly COLOR_LIGHT_PURPLE="\[\033[1;35m\]"
readonly COLOR_WHITE="\[\033[1;37m\]"
readonly COLOR_NONE="\[\e[0m\]"
# Build smart prompt
smart_prompt() {
# needs to be checked indirectly, of course
# shellcheck disable=SC2181
if (( $? ))
then
PS1=$COLOR_RED
else
PS1=$COLOR_NONE
fi
if type jobs &>/dev/null && (( $(jobs -p | wc -l) > 0 ))
then
PS1=${PS1}$PROMPT_PS1_JOBS
else
PS1=${PS1}$PROMPT_PS1
fi
PS1=${PS1}${COLOR_NONE}' '
}
PROMPT_PS1='.'
PROMPT_PS1_JOBS=':'
[[ $PROMPT_COMMAND == *smart_prompt* ]] ||
PROMPT_COMMAND='smart_prompt'${PROMPT_COMMAND:+;}$PROMPT_COMMAND
| true
|
ee8c51e7b1859d84d82bbbf192ea62e4b09a2f11
|
Shell
|
repomu/rclonebp
|
/bin/compile
|
UTF-8
| 607
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
indent() {
sed -u 's/^/ /'
}
echo "-----> Install rclone"
BUILD_DIR=$1
VENDOR_DIR="vendor"
FILE="rclone-v1.47.0-linux-amd64"
DOWNLOAD_URL="https://github.com/ncw/rclone/releases/download/v1.47.0/$FILE.zip"
echo "DOWNLOAD_URL = " $DOWNLOAD_URL | indent
cd $BUILD_DIR
mkdir -p $VENDOR_DIR
cd $VENDOR_DIR
mkdir -p rclone
cd rclone
wget -q $DOWNLOAD_URL
unzip -qq $FILE.zip
mv $FILE/* .
rm -rf $FILE $FILE.zip
echo "exporting PATH" | indent
PROFILE_PATH="$BUILD_DIR/.profile.d/rclone.sh"
mkdir -p $(dirname $PROFILE_PATH)
echo 'export PATH="$PATH:${HOME}/vendor/rclone"' >> $PROFILE_PATH
| true
|
362b1d798741f1367dbfc91701695ea37a91ae29
|
Shell
|
apache/cloudstack
|
/scripts/storage/qcow2/managesnapshot.sh
|
UTF-8
| 10,829
| 3.640625
| 4
|
[
"GPL-2.0-only",
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"LicenseRef-scancode-unknown"
] |
permissive
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# $Id: managesnapshot.sh 11601 2010-08-11 17:26:15Z kris $ $HeadURL: svn://svn.lab.vmops.com/repos/branches/2.1.refactor/java/scripts/storage/qcow2/managesnapshot.sh $
# managesnapshot.sh -- manage snapshots for a single disk (create, destroy, rollback, backup)
usage() {
printf "Usage: %s: -c <path to disk> -n <snapshot name>\n" $(basename $0) >&2
printf "Usage: %s: -d <path to disk> -n <snapshot name>\n" $(basename $0) >&2
printf "Usage: %s: -r <path to disk> -n <snapshot name>\n" $(basename $0) >&2
printf "Usage: %s: -b <path to disk> -n <snapshot name> -p <dest dir> -t <dest file>\n" $(basename $0) >&2
exit 2
}
qemu_img="cloud-qemu-img"
which $qemu_img >& /dev/null
if [ $? -gt 0 ]
then
which ccp-qemu-img >& /dev/null
if [ $? -eq 0 ]
then
qemu_img="ccp-qemu-img"
else
which qemu-img >& /dev/null
if [ $? -eq 0 ]
then
qemu_img="qemu-img"
fi
fi
fi
is_lv() {
# Must be a block device
if [ -b "${1}" -o -L "{1}" ]; then
# But not a volume group or physical volume
lvm vgs "${1}" > /dev/null 2>&1 && return 1
# And a logical volume
lvm lvs "${1}" > /dev/null 2>&1 && return 1
fi
return 0
}
get_vg() {
lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 2
}
get_lv() {
lvm lvs --noheadings --unbuffered --separator=/ "${1}" | cut -d '/' -f 1
}
double_hyphens() {
echo ${1} | sed -e "s/-/--/g"
}
create_snapshot() {
local disk=$1
local snapshotname="$2"
local failed=0
is_lv ${disk}
islv_ret=$?
if [ ${dmsnapshot} = "yes" ] && [ "$islv_ret" == "1" ]; then
local lv=`get_lv ${disk}`
local vg=`get_vg ${disk}`
local lv_dm=`double_hyphens ${lv}`
local vg_dm=`double_hyphens ${vg}`
local lvdevice=/dev/mapper/${vg_dm}-${lv_dm}
local lv_bytes=`blockdev --getsize64 ${lvdevice}`
local lv_sectors=`blockdev --getsz ${lvdevice}`
lvm lvcreate --size ${lv_bytes}b --name "${snapshotname}-cow" ${vg} >&2 || return 2
dmsetup suspend ${vg_dm}-${lv_dm} >&2
if dmsetup info -c --noheadings -o name ${vg_dm}-${lv_dm}-real > /dev/null 2>&1; then
echo "0 ${lv_sectors} snapshot ${lvdevice}-real /dev/mapper/${vg_dm}-${snapshotname}--cow p 64" | \
dmsetup create "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
dmsetup resume "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
else
dmsetup table ${vg_dm}-${lv_dm} | dmsetup create ${vg_dm}-${lv_dm}-real >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
dmsetup resume ${vg_dm}-${lv_dm}-real >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
echo "0 ${lv_sectors} snapshot ${lvdevice}-real /dev/mapper/${vg_dm}-${snapshotname}--cow p 64" | \
dmsetup create "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
echo "0 ${lv_sectors} snapshot-origin ${lvdevice}-real" | \
dmsetup load ${vg_dm}-${lv_dm} >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
dmsetup resume "${vg_dm}-${snapshotname}" >&2 || ( destroy_snapshot ${disk} "${snapshotname}"; return 2 )
fi
dmsetup resume "${vg_dm}-${lv_dm}" >&2
elif [ -f "${disk}" ]; then
$qemu_img snapshot -c "$snapshotname" $disk
if [ $? -gt 0 ]
then
failed=2
printf "***Failed to create snapshot $snapshotname for path $disk\n" >&2
$qemu_img snapshot -d "$snapshotname" $disk
if [ $? -gt 0 ]
then
printf "***Failed to delete snapshot $snapshotname for path $disk\n" >&2
fi
fi
else
failed=3
printf "***Failed to create snapshot $snapshotname, undefined type $disk\n" >&2
fi
return $failed
}
destroy_snapshot() {
local disk=$1
local snapshotname="$2"
local failed=0
is_lv ${disk}
islv_ret=$?
if [ "$islv_ret" == "1" ]; then
local lv=`get_lv ${disk}`
local vg=`get_vg ${disk}`
local lv_dm=`double_hyphens ${lv}`
local vg_dm=`double_hyphens ${vg}`
if [ -e /dev/mapper/${vg_dm}-${lv_dm}-real ]; then
local dm_refcount=`dmsetup info -c --noheadings -o open ${vg_dm}-${lv_dm}-real`
if [ ${dm_refcount} -le 2 ]; then
dmsetup suspend ${vg_dm}-${lv_dm} >&2
dmsetup table ${vg_dm}-${lv_dm}-real | dmsetup load ${vg_dm}-${lv_dm} >&2
dmsetup resume ${vg_dm}-${lv_dm}
dmsetup remove "${vg_dm}-${snapshotname}"
dmsetup remove ${vg_dm}-${lv_dm}-real
else
dmsetup remove "${vg_dm}-${snapshotname}"
fi
else
dmsetup remove "${vg_dm}-${snapshotname}"
fi
lvm lvremove -f "${vg}/${snapshotname}-cow"
elif [ -f $disk ]; then
#delete all the existing snapshots
$qemu_img snapshot -l $disk |tail -n +3|awk '{print $1}'|xargs -I {} $qemu_img snapshot -d {} $disk >&2
if [ $? -gt 0 ]
then
failed=2
printf "Failed to delete snapshot $snapshotname for path $disk\n" >&2
fi
else
failed=3
printf "***Failed to delete snapshot $snapshotname, undefined type $disk\n" >&2
fi
return $failed
}
rollback_snapshot() {
local disk=$1
local snapshotname="$2"
local failed=0
$qemu_img snapshot -a $snapshotname $disk
if [ $? -gt 0 ]
then
printf "***Failed to apply snapshot $snapshotname for path $disk\n" >&2
failed=1
fi
return $failed
}
backup_snapshot() {
local disk=$1
local snapshotname="$2"
local destPath=$3
local destName=$4
local forceShareFlag="-U"
if [ ! -d $destPath ]
then
mkdir -p $destPath >& /dev/null
if [ $? -gt 0 ]
then
printf "Failed to create $destPath\n" >&2
return 3
fi
fi
is_lv ${disk}
islv_ret=$?
if [ ${dmsnapshot} = "yes" ] && [ "$islv_ret" == "1" ] ; then
local vg=`get_vg ${disk}`
local vg_dm=`double_hyphens ${vg}`
local scriptdir=`dirname ${0}`
if ! dmsetup info -c --noheadings -o name ${vg_dm}-${snapshotname} > /dev/null 2>&1; then
printf "Disk ${disk} has no snapshot called ${snapshotname}.\n" >&2
return 1
fi
qemuimg_ret=$($qemu_img $forceShareFlag -f raw -O qcow2 "/dev/mapper/${vg_dm}-${snapshotname}" "${destPath}/${destName}")
ret_code=$?
if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"snapshot: invalid option -- 'U'"* ]]
then
forceShareFlag=""
$qemu_img $forceShareFlag -f raw -O qcow2 "/dev/mapper/${vg_dm}-${snapshotname}" "${destPath}/${destName}"
ret_code=$?
fi
if [ $ret_code -gt 0 ]
then
printf "${qemu_img} failed to create backup of snapshot ${snapshotname} for disk ${disk} to ${destPath}.\n" >&2
return 2
fi
elif [ -f ${disk} ]; then
if [[ $disk == *"/snapshots/"* ]]; then
#Backup volume snapshot
cp "$disk" "${destPath}/${destName}"
ret_code=$?
if [ $ret_code -gt 0 ]
then
printf "Failed to backup $snapshotname for disk $disk to $destPath\n" >&2
return 2
fi
else
# Backup VM snapshot
qemuimg_ret=$($qemu_img snapshot $forceShareFlag -l $disk 2>&1)
ret_code=$?
if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"snapshot: invalid option -- 'U'"* ]]; then
forceShareFlag=""
qemuimg_ret=$($qemu_img snapshot $forceShareFlag -l $disk)
ret_code=$?
fi
if [ $ret_code -gt 0 ] || [[ ! $qemuimg_ret == *"$snapshotname"* ]]; then
printf "there is no $snapshotname on disk $disk\n" >&2
return 1
fi
qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1 > /dev/null)
ret_code=$?
if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"convert: invalid option -- 'U'"* ]]; then
forceShareFlag=""
qemuimg_ret=$($qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -l snapshot.name=$snapshotname $disk $destPath/$destName 2>&1 > /dev/null)
ret_code=$?
fi
if [ $ret_code -gt 0 ] && [[ $qemuimg_ret == *"convert: invalid option -- 'l'"* ]]; then
$qemu_img convert $forceShareFlag -f qcow2 -O qcow2 -s $snapshotname $disk $destPath/$destName >& /dev/null
ret_code=$?
fi
if [ $ret_code -gt 0 ]; then
printf "Failed to backup $snapshotname for disk $disk to $destPath\n" >&2
return 2
fi
fi
else
printf "***Failed to backup snapshot $snapshotname, undefined type $disk\n" >&2
return 3
fi
return 0
}
revert_snapshot() {
local snapshotPath=$1
local destPath=$2
${qemu_img} convert -f qcow2 -O qcow2 "$snapshotPath" "$destPath" || \
( printf "${qemu_img} failed to revert snapshot ${snapshotPath} to disk ${destPath}.\n" >&2; return 2 )
return 0
}
#set -x
cflag=
dflag=
rflag=
bflag=
vflag=
nflag=
pathval=
snapshot=
tmplName=
deleteDir=
dmsnapshot=no
dmrollback=no
while getopts 'c:d:r:n:b:v:p:t:f' OPTION
do
case $OPTION in
c) cflag=1
pathval="$OPTARG"
;;
d) dflag=1
pathval="$OPTARG"
;;
r) rflag=1
pathval="$OPTARG"
;;
b) bflag=1
pathval="$OPTARG"
;;
v) vflag=1
pathval="$OPTARG"
;;
n) nflag=1
snapshot="$OPTARG"
;;
p) destPath="$OPTARG"
;;
t) tmplName="$OPTARG"
;;
f) deleteDir=1
;;
?) usage
;;
esac
done
if modprobe dm-snapshot; then
dmsnapshot=yes
dmsetup targets | grep -q "^snapshot-merge" && dmrollback=yes
fi
[ -z "${snapshot}" ] && usage
[ -b "$pathval" ] && snapshot=`echo "${snapshot}" | md5sum -t | awk '{ print $1 }'`
if [ "$cflag" == "1" ]
then
create_snapshot $pathval "$snapshot"
exit $?
elif [ "$dflag" == "1" ]
then
destroy_snapshot $pathval "$snapshot" $deleteDir
exit $?
elif [ "$bflag" == "1" ]
then
[ -z "${destPath}" -o -z "${tmplName}" ] && usage
backup_snapshot $pathval $snapshot $destPath $tmplName
exit $?
elif [ "$rflag" == "1" ]
then
rollback_snapshot $pathval "$snapshot" $destPath
exit $?
elif [ "$vflag" == "1" ]
then
revert_snapshot $pathval $destPath
exit $?
fi
exit 0
| true
|
bfedfd3408b29dc56c9c88e821b89aef20ff9692
|
Shell
|
arnoldsandoval/dotfiles
|
/brew.sh
|
UTF-8
| 2,543
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# Homebrew
if ! which brew > /dev/null; then
echo '📦 Installing Homebrew'
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi;
# zsh/ohmyzsh
echo '📦 Installing zsh'
brew install zsh
echo '📦 Installing Oh My Zsh!'
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
# MongoDB
echo '📦 Install MongoDB'
brew install mongodb
echo '📦 Create directory where mongo data files will live'
sudo mkdir -p /data/db
echo '📦 Change permissions for data directory'
sudo chown -R `id -un` /data/db
echo '📦 Run mongo daemon'
brew services start mongodb
# Install macOS apps
echo '📦 Installing Mac App Store CLI'
brew install mas
echo '📦 Install OS X apps from Brewfile'
brew bundle install
# Node
echo '📦 Installing Node Version Manager'
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.1/install.sh | bash
# VSCode Extensions
# NOTE: You can always generate this list on any computer by running the following:
# `code --list-extensions | xargs -L 1 echo code --install-extension`
code --install-extension akamud.vscode-theme-onedark
code --install-extension christian-kohler.npm-intellisense
code --install-extension dbaeumer.vscode-eslint
code --install-extension eamodio.gitlens
code --install-extension EditorConfig.EditorConfig
code --install-extension EQuimper.react-native-react-redux
code --install-extension esbenp.prettier-vscode
code --install-extension FallenMax.mithril-emmet
code --install-extension flowtype.flow-for-vscode
code --install-extension formulahendry.auto-close-tag
code --install-extension jaspernorth.vscode-pigments
code --install-extension msjsdiag.debugger-for-chrome
code --install-extension shinnn.stylelint
code --install-extension silvenon.mdx
code --install-extension TimonVS.ReactSnippetsStandard
code --install-extension wayou.vscode-todo-highlight
code --install-extension xabikos.JavaScriptSnippets
code --install-extension zhuangtongfa.Material-theme
code --install-extension Zignd.html-css-class-completion
echo '⌨️ Install Pure Prompt'
npm install --global pure-prompt
echo '⌨️ Install Expo CLI'
npm install --global expo-cli
echo -e "
✅ Setup script complete
\e[1mTo finish setup, you will need to restart Terminal and do the following:\e[0m
- Install latest node: \e[4mnvm install node\e[0m
- Use the newly installed node version: \e[4mnvm use node\e[0m
- Verify the installation version: \e[4mnode -v\e[0m
"
| true
|
c088be38fda91578cacea5b13ecd0ad3c469eaa3
|
Shell
|
bleshik/folders-to-flickr-sets-uploader
|
/download-set.sh
|
UTF-8
| 1,558
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash -
#===============================================================================
#
# FILE: download-set.sh
#
# USAGE: ./download-set.sh SET-NAME
#
# DESCRIPTION:
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Alexey Balchunas,
# ORGANIZATION:
# CREATED: 12/21/2013 18:06:00 MSK
# REVISION: ---
#===============================================================================
set -o nounset
SET_NAME=$1
if [ ! -d "$SET_NAME" ] ; then
mkdir "$SET_NAME"
fi
cd "$SET_NAME"
SETS=`flickcurl photosets.getList`
SET_ID=`echo "$SETS" | grep "title: '$SET_NAME'" | head -n1 | sed -e 's/.*ID \([0-9]*\) .*/\1/g'`
PHOTOS=`flickcurl photosets.getPhotos "$SET_ID" | grep "ID \d\+" | sed -e 's/.*ID \([0-9]*\).*/\1/g'`
for PHOTO_ID in $PHOTOS ; do
PHOTO_INFO=`flickcurl photos.getInfo "$PHOTO_ID"`
FARM=`echo "$PHOTO_INFO" | grep farm | sed -e "s/.*value: '\(.*\)'.*/\1/g"`
SERVER=`echo "$PHOTO_INFO" | grep server | sed -e "s/.*value: '\(.*\)'.*/\1/g"`
ORIGINAL_SECRET=`echo "$PHOTO_INFO" | grep originalsecret | sed -e "s/.*value: '\(.*\)'.*/\1/g"`
ORIGINAL_FORMAT=`echo "$PHOTO_INFO" | grep originalformat | sed -e "s/.*value: '\(.*\)'.*/\1/g"`
TITLE=`echo "$PHOTO_INFO" | grep title | sed -e "s/.*value: '\(.*\)'.*/\1/g"`
URL="http://farm$FARM.staticflickr.com/$SERVER/${PHOTO_ID}_${ORIGINAL_SECRET}_o.$ORIGINAL_FORMAT"
if [ -z "$TITLE" ] ; then
wget $URL
else
wget $URL -O "$TITLE.$ORIGINAL_FORMAT"
fi
done
| true
|
b57621c37a68e9db9f04b8f7d92ea421eb801e90
|
Shell
|
GiantSpaceRobot/tsRNAsearch
|
/bin/Generate-depthfile-stats.sh
|
UTF-8
| 262
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
newname=$(echo $1 | awk -F '_mean-std_sorted.tsv' '{print $1}')
cp $1 ${newname}_depth.inf
echo -e "Feature\tMean\tStandard Deviation\tCoefficient of Variation" \
> Header.txt
cat Header.txt \
${newname}_depth.inf \
> ${newname}_depth_stats.tsv
| true
|
6228859393b0a6cfa73529791dda7c5418d023e2
|
Shell
|
mocha-parallel/mocha-parallel-tests
|
/test/parallel/parallel.sh
|
UTF-8
| 345
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
TIMESTAMP_START="$(date +%s)"
OUTPUT=$(dist/bin/cli.js -R spec test/parallel/tests --timeout 10000 --slow 10000)
TIMESTAMP_FINISH="$(date +%s)"
TIMESTAMP_DIFF=`expr $TIMESTAMP_FINISH - $TIMESTAMP_START`
if [[ $TIMESTAMP_DIFF -lt 10 ]]; then
exit 0
else
echo "Tests running time was $TIMESTAMP_DIFF seconds"
exit 1
fi
| true
|
42d1d0be9a8c57f7f2973d826185e9af3cffb991
|
Shell
|
firstkeaster/BigData_proj
|
/task_freq.sh
|
UTF-8
| 1,050
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# Ask the user for their name
echo Hello, which type of query do you want?
read varname
echo I got $varname
echo dataset?
read data
echo I got $data
module load python/gnu/3.4.4
/usr/bin/hadoop fs -rm -r -f "task_fftmp.out"
if [ "$varname" == find_format ]
then
/usr/bin/hadoop fs -rm -r -f "task_fftmp.out"
MAPPER=$(echo "task_ff"/*map*.py)
REDUCER=$(echo "task_ff"/*reduce*.py)
/usr/bin/hadoop jar /opt/cloudera/parcels/CDH/lib/hadoop-mapreduce/hadoop-streaming.jar -D mapreduce.job.reduces=1 -files "task_ff/" -mapper "$MAPPER" -reducer "$REDUCER" -input "$data" -output "task_fftmp.out"
/usr/bin/hadoop fs -getmerge "task_fftmp.out" "task_ff/task_fftmp.out"
#cat "task_ff/task_fftmp.out" | sort -n > "task_ff/task_fftmp.out"
fi
if [ "$varname" == high_freq ]
then
/usr/bin/hadoop fs -rm -r "out.csv"
SPARKCODE=$(echo "task_freq/spark".py)
spark-submit --conf spark.pyspark.python=/share/apps/python/3.4.4/bin/python "$SPARKCODE" "$data"
/usr/bin/hadoop fs -getmerge "out.csv" "task_freq/out.csv"
fi
| true
|
0ba9f6eb79882e2ded8d267b8c04be3610deb42a
|
Shell
|
voc/cm
|
/bundlewrap/bundles/apt/files/kernel-postinst.d
|
UTF-8
| 413
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# /etc/kernel/postinst.d/unattended-upgrades
case "$DPKG_MAINTSCRIPT_PACKAGE::$DPKG_MAINTSCRIPT_NAME" in
linux-image-extra*::postrm)
exit 0;;
esac
if [ -d /var/run ]; then
touch /var/run/reboot-required
if ! grep -q "^$DPKG_MAINTSCRIPT_PACKAGE$" /var/run/reboot-required.pkgs 2> /dev/null ; then
echo "$DPKG_MAINTSCRIPT_PACKAGE" >> /var/run/reboot-required.pkgs
fi
fi
| true
|
8d23ca28f3bc6e86089cf355831ec8e64a237080
|
Shell
|
triton-inference-server/server
|
/qa/L0_backend_python/env/test.sh
|
UTF-8
| 12,111
| 3
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
CLIENT_LOG="./env_client.log"
source ../common.sh
source ../../common/util.sh
SERVER=/opt/tritonserver/bin/tritonserver
BASE_SERVER_ARGS="--model-repository=`pwd`/models --log-verbose=1 --disable-auto-complete-config"
PYTHON_BACKEND_BRANCH=$PYTHON_BACKEND_REPO_TAG
SERVER_ARGS=$BASE_SERVER_ARGS
SERVER_LOG="./env_server.log"
RET=0
rm -fr ./models
rm -rf *.tar.gz
install_build_deps
install_conda
# Tensorflow 2.1.0 only works with Python 3.4 - 3.7. Successful execution of
# the Python model indicates that the environment has been setup correctly.
# Create a model with python 3.7 version
create_conda_env "3.7" "python-3-7"
conda install numpy=1.20.1 -y
conda install tensorflow=2.1.0 -y
conda install -c conda-forge libstdcxx-ng=12 -y
PY37_VERSION_STRING="Python version is 3.7, NumPy version is 1.20.1, and Tensorflow version is 2.1.0"
create_python_backend_stub
conda-pack -o python3.7.tar.gz
path_to_conda_pack=`pwd`/python3.7.tar.gz
mkdir -p models/python_3_7/1/
cp ../../python_models/python_version/config.pbtxt ./models/python_3_7
(cd models/python_3_7 && \
sed -i "s/^name:.*/name: \"python_3_7\"/" config.pbtxt && \
echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt)
cp ../../python_models/python_version/model.py ./models/python_3_7/1/
cp python_backend/builddir/triton_python_backend_stub ./models/python_3_7
conda deactivate
# Use python-3-7 without conda pack
# Create a model with python 3.7 version and numpy 1.20.3 to distinguish from
# previous test.
# Tensorflow 2.1.0 only works with Python 3.4 - 3.7. Successful execution of
# the Python model indicates that the environment has been setup correctly.
path_to_conda_pack="$PWD/python-3-7-1"
create_conda_env_with_specified_path "3.7" $path_to_conda_pack
conda install numpy=1.20.3 -y
conda install tensorflow=2.1.0 -y
conda install -c conda-forge libstdcxx-ng=12 -y
PY37_1_VERSION_STRING="Python version is 3.7, NumPy version is 1.20.3, and Tensorflow version is 2.1.0"
create_python_backend_stub
mkdir -p models/python_3_7_1/1/
cp ../../python_models/python_version/config.pbtxt ./models/python_3_7_1
(cd models/python_3_7_1 && \
sed -i "s/^name:.*/name: \"python_3_7_1\"/" config.pbtxt && \
echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}">> config.pbtxt)
cp ../../python_models/python_version/model.py ./models/python_3_7_1/1/
# Copy activate script to folder
cp $path_to_conda_pack/lib/python3.7/site-packages/conda_pack/scripts/posix/activate $path_to_conda_pack/bin/.
cp python_backend/builddir/triton_python_backend_stub ./models/python_3_7_1
conda deactivate
# Create a model with python 3.6 version
# Tensorflow 2.1.0 only works with Python 3.4 - 3.7. Successful execution of
# the Python model indicates that the environment has been setup correctly.
create_conda_env "3.6" "python-3-6"
conda install -c conda-forge libstdcxx-ng=12 -y
conda install numpy=1.18.1 -y
conda install tensorflow=2.1.0 -y
PY36_VERSION_STRING="Python version is 3.6, NumPy version is 1.18.1, and Tensorflow version is 2.1.0"
conda-pack -o python3.6.tar.gz
# Test relative execution env path
path_to_conda_pack='$$TRITON_MODEL_DIRECTORY/python_3_6_environment.tar.gz'
create_python_backend_stub
mkdir -p models/python_3_6/1/
cp ../../python_models/python_version/config.pbtxt ./models/python_3_6
cp python3.6.tar.gz models/python_3_6/python_3_6_environment.tar.gz
(cd models/python_3_6 && \
sed -i "s/^name:.*/name: \"python_3_6\"/" config.pbtxt && \
echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt)
cp ../../python_models/python_version/model.py ./models/python_3_6/1/
cp python_backend/builddir/triton_python_backend_stub ./models/python_3_6
conda deactivate
# Test conda env without custom Python backend stub This environment should
# always use the default Python version shipped in the container. For Ubuntu 22.04
# it is Python 3.10 and for Ubuntu 20.04 is 3.8
path_to_conda_pack='$$TRITON_MODEL_DIRECTORY/python_3_10_environment.tar.gz'
create_conda_env "3.10" "python-3-10"
conda install -c conda-forge libstdcxx-ng=12 -y
conda install numpy=1.23.4 -y
conda install tensorflow=2.10.0 -y
PY310_VERSION_STRING="Python version is 3.10, NumPy version is 1.23.4, and Tensorflow version is 2.10.0"
conda pack -o python3.10.tar.gz
mkdir -p models/python_3_10/1/
cp ../../python_models/python_version/config.pbtxt ./models/python_3_10
cp python3.10.tar.gz models/python_3_10/python_3_10_environment.tar.gz
(cd models/python_3_10 && \
sed -i "s/^name:.*/name: \"python_3_10\"/" config.pbtxt && \
echo "parameters: {key: \"EXECUTION_ENV_PATH\", value: {string_value: \"$path_to_conda_pack\"}}" >> config.pbtxt)
cp ../../python_models/python_version/model.py ./models/python_3_10/1/
conda deactivate
rm -rf ./miniconda
run_server
if [ "$SERVER_PID" == "0" ]; then
echo -e "\n***\n*** Failed to start $SERVER\n***"
cat $SERVER_LOG
exit 1
fi
kill $SERVER_PID
wait $SERVER_PID
set +e
for EXPECTED_VERSION_STRING in "$PY36_VERSION_STRING" "$PY37_VERSION_STRING" "$PY37_1_VERSION_STRING" "$PY310_VERSION_STRING"; do
grep "$EXPECTED_VERSION_STRING" $SERVER_LOG
if [ $? -ne 0 ]; then
cat $SERVER_LOG
echo -e "\n***\n*** $EXPECTED_VERSION_STRING was not found in Triton logs. \n***"
RET=1
fi
done
# Test default (non set) locale in python stub processes
# NOTE: In certain pybind versions, the locale settings may not be propagated from parent to
# stub processes correctly. See https://github.com/triton-inference-server/python_backend/pull/260.
export LC_ALL=INVALID
grep "Locale is (None, None)" $SERVER_LOG
if [ $? -ne 0 ]; then
cat $SERVER_LOG
echo -e "\n***\n*** Default unset Locale was not found in Triton logs. \n***"
RET=1
fi
set -e
rm $SERVER_LOG
# Test locale set via environment variable in python stub processes
# NOTE: In certain pybind versions, the locale settings may not be propagated from parent to
# stub processes correctly. See https://github.com/triton-inference-server/python_backend/pull/260.
export LC_ALL=C.UTF-8
run_server
if [ "$SERVER_PID" == "0" ]; then
echo -e "\n***\n*** Failed to start $SERVER\n***"
cat $SERVER_LOG
exit 1
fi
kill $SERVER_PID
wait $SERVER_PID
set +e
grep "Locale is ('en_US', 'UTF-8')" $SERVER_LOG
if [ $? -ne 0 ]; then
cat $SERVER_LOG
echo -e "\n***\n*** Locale UTF-8 was not found in Triton logs. \n***"
RET=1
fi
set -e
rm $SERVER_LOG
## Test re-extraction of environment.
SERVER_ARGS="--model-repository=`pwd`/models --log-verbose=1 --model-control-mode=explicit"
run_server
if [ "$SERVER_PID" == "0" ]; then
echo -e "\n***\n*** Failed to start $SERVER\n***"
cat $SERVER_LOG
exit 1
fi
# The environment should be extracted
curl -v -X POST localhost:8000/v2/repository/models/python_3_10/load
touch -m models/python_3_10/1/model.py
# The environment should not be re-extracted
curl -v -X POST localhost:8000/v2/repository/models/python_3_10/load
touch -m models/python_3_10/python_3_10_environment.tar.gz
# The environment should be re-extracted
curl -v -X POST localhost:8000/v2/repository/models/python_3_10/load
kill $SERVER_PID
wait $SERVER_PID
set +e
PY310_ENV_EXTRACTION="Extracting Python execution env"
if [ `grep -c "${PY310_ENV_EXTRACTION}" ${SERVER_LOG}` != "2" ]; then
cat $SERVER_LOG
echo -e "\n***\n*** Python execution environment should be extracted exactly twice. \n***"
RET=1
fi
set -e
# Test execution environments with S3
# S3 credentials are necessary for this test. Pass via ENV variables
aws configure set default.region $AWS_DEFAULT_REGION && \
aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID && \
aws configure set aws_secret_access_key $AWS_SECRET_ACCESS_KEY
# S3 bucket path (Point to bucket when testing cloud storage)
BUCKET_URL="s3://triton-bucket-${CI_JOB_ID}"
# Cleanup and delete S3 test bucket if it already exists (due to test failure)
aws s3 rm $BUCKET_URL --recursive --include "*" && \
aws s3 rb $BUCKET_URL || true
# Make S3 test bucket
aws s3 mb "${BUCKET_URL}"
# Remove Slash in BUCKET_URL
BUCKET_URL=${BUCKET_URL%/}
BUCKET_URL_SLASH="${BUCKET_URL}/"
# Remove Python 3.7 model because it contains absolute paths and cannot be used
# with S3.
rm -rf models/python_3_7
# Test with the bucket url as model repository
aws s3 cp models/ "${BUCKET_URL_SLASH}" --recursive --include "*"
rm $SERVER_LOG
SERVER_ARGS="--model-repository=$BUCKET_URL_SLASH --log-verbose=1"
run_server
if [ "$SERVER_PID" == "0" ]; then
echo -e "\n***\n*** Failed to start $SERVER\n***"
cat $SERVER_LOG
exit 1
fi
kill $SERVER_PID
wait $SERVER_PID
set +e
grep "$PY36_VERSION_STRING" $SERVER_LOG
if [ $? -ne 0 ]; then
cat $SERVER_LOG
echo -e "\n***\n*** $PY36_VERSION_STRING was not found in Triton logs. \n***"
RET=1
fi
set -e
# Clean up bucket contents
aws s3 rm "${BUCKET_URL_SLASH}" --recursive --include "*"
# Test with EXECUTION_ENV_PATH outside the model directory
sed -i "s/TRITON_MODEL_DIRECTORY\/python_3_6_environment/TRITON_MODEL_DIRECTORY\/..\/python_3_6_environment/" models/python_3_6/config.pbtxt
mv models/python_3_6/python_3_6_environment.tar.gz models
sed -i "s/\$\$TRITON_MODEL_DIRECTORY\/python_3_10_environment/s3:\/\/triton-bucket-${CI_JOB_ID}\/python_3_10_environment/" models/python_3_10/config.pbtxt
mv models/python_3_10/python_3_10_environment.tar.gz models
aws s3 cp models/ "${BUCKET_URL_SLASH}" --recursive --include "*"
rm $SERVER_LOG
SERVER_ARGS="--model-repository=$BUCKET_URL_SLASH --log-verbose=1"
run_server
if [ "$SERVER_PID" == "0" ]; then
echo -e "\n***\n*** Failed to start $SERVER\n***"
cat $SERVER_LOG
exit 1
fi
kill $SERVER_PID
wait $SERVER_PID
set +e
for EXPECTED_VERSION_STRING in "$PY36_VERSION_STRING" "$PY310_VERSION_STRING"; do
grep "$EXPECTED_VERSION_STRING" $SERVER_LOG
if [ $? -ne 0 ]; then
cat $SERVER_LOG
echo -e "\n***\n*** $EXPECTED_VERSION_STRING was not found in Triton logs. \n***"
RET=1
fi
done
set -e
# Clean up bucket contents and delete bucket
aws s3 rm "${BUCKET_URL_SLASH}" --recursive --include "*"
aws s3 rb "${BUCKET_URL}"
if [ $RET -eq 0 ]; then
echo -e "\n***\n*** Env Manager Test PASSED.\n***"
else
cat $SERVER_LOG
echo -e "\n***\n*** Env Manager Test FAILED.\n***"
fi
collect_artifacts_from_subdir
exit $RET
| true
|
0157441b6c75fadf13ec8fa05629739be0caeb09
|
Shell
|
jollywho/Yaya-tan
|
/lib/scripts/eidpl.sh
|
UTF-8
| 739
| 3.296875
| 3
|
[] |
no_license
|
pend="/dev/sr0: writable, no read permission"
res=$(sudo file -s /dev/sr0)
printf "waiting..."
while [[ $res == $pend ]]
do
res=$(sudo file -s /dev/sr0)
done
printf "done.\n"
sudo mount /dev/sr0 /media/1
a=$(find /media/1 -type f -printf x | wc -c)
b=$(find /media/1 -type d -printf x | wc -c)
casper_path=/mnt/casper/chishiki/dvds
if [[ $a -gt 0 || $b -gt 1 ]]; then
printf "starting..."
temp=$(mktemp -d $casper_path/XXXXX) && chmod -R +rw $temp
sudo cp -rf /media/1/* $temp
sudo chmod -R +rw $temp
sudo chown -R chishiki $temp
printf "done.\n"
tput setaf 2
banner -C --font=2 '##DONE##'
tput setaf 5
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' -
sudo umount /media/1
eject
else
printf "not run"
fi
| true
|
24510cf26b8d8ede57d0d6fd3f81bf4da3c7e590
|
Shell
|
bsc-dd/hecuba
|
/cassandra4slurm/scripts/execute.sh
|
UTF-8
| 13,108
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
###############################################################################################################
# #
# Application Launcher for Slurm #
# Eloy Gil - eloy.gil@bsc.es #
# #
# Barcelona Supercomputing Center #
# .-.--_ #
# ,´,´.´ `. #
# | | | BSC | #
# `.`.`. _ .´ #
# `·`·· #
# #
###############################################################################################################
export C4S_HOME=$HOME/.c4s
export CASS_IFACE="-ib0"
# Cleaning old executions (more than a month ago)
`find $C4S_HOME -maxdepth 1 -mtime +30 -type f | grep -v ".cfg" | xargs rm -f`
# Cleaning old jobs (not in the queuing system anymore)
C4S_JOBLIST=$C4S_HOME/joblist.txt
C4S_SCONTROL=$C4S_HOME/scontrol.txt
C4S_SQUEUE=$C4S_HOME/squeue.txt
APP_JOBLIST=$C4S_HOME/appjoblist.txt
scontrol show job > $C4S_SCONTROL
squeue > $C4S_SQUEUE
touch $C4S_JOBLIST $C4S_HOME/newjoblist.txt $C4S_HOME/newappjoblist.txt
if [ $(squeue | wc -l) -eq 1 ]; then
rm -f $C4S_JOBLIST $APP_JOBLIST; touch $C4S_JOBLIST $APP_JOBLIST
else
OLDIFS=$IFS; IFS=$'\n';
for job_line in $(cat $C4S_JOBLIST); do
job_id=$(echo $job_line | awk '{ print $1 }')
if [ $(grep "$job_id " $C4S_SQUEUE | wc -l) -gt 0 ]; then
echo "$job_line" >> $C4S_HOME/newjoblist.txt
fi
done
for job_line in $(cat $APP_JOBLIST); do
job_id=$(echo $job_line | awk '{ print $1 }')
if [ $(grep "$job_id " $C4S_SQUEUE | wc -l) -gt 0 ]; then
echo "$job_line" >> $C4S_HOME/newappjoblist.txt
fi
done
IFS=$OLDIFS
mv $C4S_HOME/newjoblist.txt $C4S_JOBLIST
mv $C4S_HOME/newappjoblist.txt $APP_JOBLIST
fi
CFG_FILE=$C4S_HOME/conf/cassandra4slurm.cfg
source $CFG_FILE
MODULE_PATH=$HECUBA_ROOT/bin/cassandra4slurm
UNIQ_ID="c4app"$(echo $RANDOM | cut -c -3)
DEFAULT_APP_NODES=2
DEFAULT_MAX_TIME="00:30:00"
RETRY_MAX=15
PYCOMPSS_SET=0
function usage () {
# Prints a help message
echo "Usage: ${0} [ -h | -l | RUN [-n=N_APP_NODES] [-c=cluster_id] --appl=PATH [ARGS] [ --pycompss[=PARAMS] ] [ -t=HH:MM:ss ] [ --qos=debug ] [ --jobname=NAME ] [ --logs=DIR ] | KILL [-e=application_id] ]"
echo " "
echo " -h:"
echo " Prints this usage help."
echo " "
echo " -l | --list:"
echo " Shows a list of Cassandra clusters and applications."
echo " "
echo " RUN:"
echo " Starts a new application execution over an existing Cassandra cluster, depending of the optional parameters."
echo " The flag -n is used to set the number of application nodes to reserver. Default is 2."
echo " It is used --appl to set the path to the executable and its arguments, if any."
echo " If the application must be executed using PyCOMPSs the variable --pycompss should contain its PyCOMPSs parameters."
echo " Using -t it will set the maximum time of the job to this value, with HH:MM:ss format. Default is 30 minutes (00:30:00)."
echo " Using --qos=debug it will run in the testing queue. It has some restrictions (a single job and 2h max.) so any higher requirements will be rejected by the queuing system."
echo " "
echo " KILL application_id:"
echo " The application identified by application_id is killed, aborting the process."
echo " "
}
function set_utils_paths () {
export JOBNAME=$UNIQ_ID
APP_PATH_FILE=$C4S_HOME/app-"$UNIQ_ID".txt
PYCOMPSS_FLAGS_FILE=$C4S_HOME/pycompss-flags-"$UNIQ_ID".txt
}
function show_list_info () {
# Gets clusters and application information from SLURM
num_clusters=$(cat $C4S_JOBLIST | wc -l)
plural1="This is"
plural2=""
if [ $num_clusters -gt 1 ]; then
plural1="These are"
plural2="s"
fi
if [ "$num_clusters" != "0" ]; then
echo "$plural1 the existing Cassandra cluster$plural2:"
echo "JOBID JOB NAME"
echo $(cat $C4S_JOBLIST)
else
echo "There are no Cassandra clusters."
fi
num_apps=$(cat $APP_JOBLIST | wc -l)
plural3="This is"
plural4=""
if [ $num_apps -gt 1 ]; then
plural3="These are"
plural4="s"
fi
if [ "$num_apps" != "0" ]; then
echo "$plural3 the existing application$plural4:"
echo "JOBID JOB NAME"
echo $(cat $APP_JOBLIST)
else
echo "There are no applications."
fi
}
function get_job_info () {
# Gets the ID of the job that runs the Cassandra Cluster
JOB_INFO=$(squeue | grep c4s)
JOB_ID=$(echo $JOB_INFO | awk '{ print $1 }')
JOB_STATUS=$(echo $JOB_INFO | awk '{ print $5 }')
}
function get_cluster_node () {
# Gets the ID of the first node
#NODE_ID=$(head -n 1 $C4S_HOME/hostlist-$(squeue | grep $JOBNAME | awk '{ print $1 }').txt)
NODE_ID=$(head -n 1 $C4S_HOME/hostlist-"$(squeue | grep c4s | awk '{ print $3 }')".txt)
}
function get_cluster_ips () {
# Gets the IP of every node in the cluster
NODE_IPS=$(ssh $NODE_ID "$CASS_HOME/bin/nodetool -h $NODE_ID$CASS_IFACE status" | awk '/Address/{p=1;next}{if(p){print $2}}')
}
function exit_no_cluster () {
# Any Cassandra cluster is running. Exit.
echo "There is no running Cassandra cluster. Exiting..."
exit
}
function test_if_cluster_up () { # Perhaps this function doesnt make sense anymore
# Checks if there are Cassandra Clusters running, aborting if not
if [ $(cat $C4S_JOBLIST | wc -l) -eq 0 ]; then
exit_no_cluster
elif [ $(cat $C4S_JOBLIST | wc -l) -eq 1 ]; then
CLUSTID=$(cat $C4S_JOBLIST | awk '{ print $2 }')
CLUSTST=$(squeue | grep "$(cat $C4S_JOBLIST | awk '{ print $1 }') " | awk '{ print $5 }')
if [ "$CLUSTST" != "R" ]; then
echo "ERROR: The job status is not running (R). Exiting..."
squeue
exit
fi
if [ "0$CLUSTERID" != "0" ] && [ "$CLUSTERID" != "$CLUSTID" ]; then
echo "ERROR: Given Cluster ID ("$CLUSTERID") not found."
echo "The only available Cluster is "$CLUSTID". Exiting..."
exit
else
CLUSTERID=$CLUSTID
fi
elif [ "0$CLUSTERID" == "0" ]; then
echo "ERROR: There are many Cassandra clusters, use -c=cluster_name to specify which one to use."
echo "JOBID CLUSTER NAME"
cat $C4S_JOBLIST
exit
elif [ "$(cat $C4S_JOBLIST | grep " $CLUSTERID ")" != $CLUSTERID ]; then
echo "ERROR: Given Cluster ID ("$CLUSTERID") not found. The available ones are the following:"
echo "JOBID CLUSTER NAME"
cat $C4S_JOBLIST
exit
fi
}
function get_nodes_up () {
get_job_info
if [ "$JOB_ID" != "" ]
then
if [ "$JOB_STATUS" == "R" ]
then
get_cluster_node
NODE_STATE_LIST=`ssh -q $NODE_ID "$CASS_HOME/bin/nodetool -h $NODE_ID$CASS_IFACE status" | sed 1,5d | sed '$ d' | awk '{ print $1 }'`
if [ "$NODE_STATE_LIST" != "" ]
then
NODE_COUNTER=0
for state in $NODE_STATE_LIST
do
if [ $state != "UN" ]
then
RETRY_COUNTER=$(($RETRY_COUNTER+1))
break
else
NODE_COUNTER=$(($NODE_COUNTER+1))
fi
done
fi
fi
fi
}
for i in "$@"; do
case $i in
-h|--help)
usage
exit
;;
run|RUN)
ACTION="RUN"
echo "Action is RUN."
shift
;;
-l|--list)
show_list_info
exit
shift
;;
kill|KILL)
ACTION="KILL"
shift
;;
-a=*|--appl=*)
APP="${i#*=}"
echo $APP > $APP_PATH_FILE
shift
;;
-p=*|--pycompss=*)
PYCOMPSS_APP="${i#*=}"
PYCOMPSS_SET=1
echo $PYCOMPSS_APP > $PYCOMPSS_FLAGS_FILE
shift
;;
-c=*|--cluster=*)
CLUSTERID="${i#*=}"
shift
;;
-t=*|--time=*)
JOB_MAX_TIME="${i#*=}"
shift
;;
-n=*|--number_of_nodes=*)
APP_NODES="${i#*=}"
shift
;;
-q=*|--qos=*)
QUEUE="--qos=""${i#*=}"
shift
;;
-l=*|--logs=*)
LOGS_DIR="${i#*=}"
mkdir -p $LOGS_DIR
shift
;;
-j=*|--jobname=*)
UNIQ_ID="${i#*=}"
UNIQ_ID=$(echo $UNIQ_ID | sed 's+ ++g')
if [ $(grep " $UNIQ_ID " $C4S_HOME/joblist.txt | wc -l) -gt 0 ]; then
echo "Jobname "$UNIQ_ID" already in use. Continue? (y/n) "
read input_jobname
while [ "$input_jobname" != "y" ] && [ "$input_jobname" != "n" ]; do
echo "Wrong option. Continue? (y/n) "
read input_jobname
done
if [ "$input_jobname" == "n" ]; then
echo "Aborted."
exit
fi
fi
shift
;;
-e=*|--execution=*)
EXEC_ID="${i#*=}"
shift
;;
*)
UNK_FLAGS=$UNK_FLAGS"${i#=*}"" "
;;
esac
done
if [ "0$UNK_FLAGS" != "0" ]; then
if [ "$(echo $UNK_FLAGS | wc -w)" -gt 1 ]; then
MANY_FLAGS="s"
fi
echo "ERROR: Unknown flag$MANY_FLAGS: "$UNK_FLAGS
echo "Check help: ./${0} -h"
exit
fi
if [ "$ACTION" == "RUN" ]; then
if [ "0$APP" == "0" ]; then
echo "ERROR: An application must be specified. Use mandatory flag --appl, or check help for more info. Exiting..."
exit
fi
set_utils_paths
test_if_cluster_up
if [ "0$NUM_NODES" == "0" ]; then
APP_NODES=$DEFAULT_APP_NODES
fi
# Check PyCOMPSs pre-condition (at least two nodes, 1 master 1 slave)
if [ "0$PYCOMPSS_APP" != "0" ] && [ $APP_NODES -lt 2 ]; then
echo "ERROR: PyCOMPSs executions need at least 2 application nodes. Aborting..."
exit
elif [ "0$PYCOMPSS_APP" != "0" ]; then
echo "[INFO] This execution will use PyCOMPSs in $APP_NODES nodes."
fi
#WHILE DEBUGGING...
echo "EXECUTION SUMMARY:"
#echo "# of Cassandra nodes: "$CASSANDRA_NODES
echo "# of application nodes: "$APP_NODES
#echo "# total of requested nodes: "$TOTAL_NODES
#DEBUGGING#exit
echo "Job allocation started..."
if [ "0$JOB_MAX_TIME" == "0" ]; then
JOB_MAX_TIME=$DEFAULT_MAX_TIME
fi
if [ "0$LOGS_DIR" == "0" ]; then
#yolandab
DEFAULT_LOGS_DIR=$(cat $CFG_FILE | grep "LOG_PATH=")
if [ $? -eq 1 ]; then
DEFAULT_LOGS_DIR=$PWD
else
DEFAULT_LOGS_DIR=$(echo $DEFAULT_LOGS_DIR| sed 's/LOG_PATH=//g' | sed 's/"//g')
fi
echo "[INFO] This execution will use $DEFAULT_LOGS_DIR as logging dir"
#was:
#DEFAULT_LOGS_DIR=$(cat $CFG_FILE | grep "LOG_PATH=" | sed 's/LOG_PATH=//g' | sed 's/"//g')
LOGS_DIR=$DEFAULT_LOGS_DIR
fi
sbatch --job-name=$UNIQ_ID --ntasks=$APP_NODES --ntasks-per-node=1 --time=$JOB_MAX_TIME --exclusive $QUEUE --output=$LOGS_DIR/app-%j.out --error=$LOGS_DIR/app-%j.err $MODULE_PATH/job-app.sh $UNIQ_ID $PYCOMPSS_SET $CLUSTER_ID
sleep 3
squeue
elif [ "$ACTION" == "KILL" ] || [ "$ACTION" == "kill" ]; then
# If there is an application it kills it
if [ $(cat $APP_JOBLIST | wc -l) -eq 0 ]; then
echo "ERROR: There is no running application to kill. Exiting..."
elif [ "0$EXEC_ID" != "0" ]; then
if [ "0$(cat $APP_JOBLIST | grep $EXEC_ID )" == "0" ]; then
echo "[ERROR] Application name $EXEC_ID not found, these are the current jobs:"
cat $APP_JOBLIST
else
echo "[INFO] Killing application $EXEC_ID. It may take a while..."
scancel $(cat $APP_JOBLIST | grep " $EXEC_ID " | awk '{ print $1 }')
echo "Done."
fi
elif [ $(cat $APP_JOBLIST | wc -l) -eq 1 ]; then
JOBID=$(cat $APP_JOBLIST | awk '{ print $1 }')
JOBNAME=$(cat $APP_JOBLIST | awk '{ print $2 }')
echo "INFO: Killing application $JOBNAME. It may take a while..."
scancel $JOBID
echo "Done."
fi
exit
else
# There may be an error with the arguments used, also prints the help
echo "Input argument error. Only an ACTION must be specified."
usage
echo "Exiting..."
exit
fi
| true
|
62019170c8a61fb49d5a62d1415edf3df0c640e8
|
Shell
|
ohtu-rekry/recruitment-tool
|
/scripts/deploy-k8s.sh
|
UTF-8
| 986
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
npm prune --production
if [ ! -d ${HOME}/google-cloud-sdk/bin ]; then
rm -rf ${HOME}/google-cloud-sdk;
curl https://sdk.cloud.google.com | bash >/dev/null;
fi
source ${HOME}/google-cloud-sdk/path.bash.inc
gcloud components update kubectl
docker build -t ${DOCKER_IMAGE}:${TRAVIS_BUILD_NUMBER} .
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
docker push ${DOCKER_IMAGE}:${TRAVIS_BUILD_NUMBER}
openssl aes-256-cbc -K $encrypted_127facfaf176_key -iv $encrypted_127facfaf176_iv -in ../gke-service-account.json.enc -out gke-service-account.json -d
gcloud auth activate-service-account --key-file=gke-service-account.json
gcloud config set project emblica-212815
gcloud config set compute/region europe-north1
gcloud container clusters get-credentials emblica-cluster-1 --region europe-north1
kubectl set image ${K8S_RESOURCE_KIND}/${K8S_RESOURCE_NAME} ${K8S_RESOURCE_NAME}=${DOCKER_IMAGE}:${TRAVIS_BUILD_NUMBER} -n rekrysofta
| true
|
23eec69014a6187ebc533a353f6bc9e980e0159c
|
Shell
|
andry81/tacklelib
|
/bash_tests/unit/02_traplib/01_push_trap/test_1.sh
|
UTF-8
| 1,462
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ -n "$BASH" ]]; then
if [[ -z "$SOURCE_TACKLELIB_BASH_TACKLELIB_SH" || SOURCE_TACKLELIB_BASH_TACKLELIB_SH -eq 0 ]]; then
# builtin search
for BASH_SOURCE_DIR in "/usr/local/bin" "/usr/bin" "/bin"; do
[[ -f "$BASH_SOURCE_DIR/bash_tacklelib" ]] && {
source "$BASH_SOURCE_DIR/bash_tacklelib" || exit $?
break
}
done
fi
tkl_include_or_abort '__init__.sh'
tkl_include_or_abort 'testlib.sh'
function test_1()
{
foo()
{
tkl_test_echo foo
boo()
{
tkl_push_trap 'tkl_test_echo 2' RETURN
tkl_test_echo boo
}
boo
}
foo
foo
builtin trap -p RETURN >&3
}
function test_2()
{
foo()
{
tkl_push_trap 'tkl_test_echo 1' RETURN
tkl_test_echo foo
boo()
{
tkl_push_trap 'tkl_test_echo 2' RETURN
tkl_test_echo boo
}
boo
}
foo
foo
builtin trap -p RETURN >&3
}
function test_3()
{
(
tkl_push_trap 'tkl_test_echo e1' EXIT
tkl_test_echo 1
foo()
{
(
tkl_push_trap 'tkl_test_echo r2' RETURN
tkl_push_trap 'tkl_test_echo e2' EXIT
tkl_test_echo 2
)
}
foo
foo
tkl_test_echo 3
)
}
if [[ -z "$BASH_LINENO" || BASH_LINENO[0] -eq 0 ]]; then
# Script was not included, then execute it.
tkl_testmodule_init
tkl_testmodule_run_test test_1 foo:boo:2:foo:boo:2
tkl_testmodule_run_test test_2 foo:boo:2:1:foo:boo:2:1
tkl_testmodule_run_test test_3 1:2:r2:e2:2:r2:e2:3:e1
fi
fi
| true
|
2e81099d1a18a25ea7e1ea162e7250fd3e7e10cc
|
Shell
|
phreaker56/dmvps
|
/modulos/gen/VPSbot/TeleBotGen/sources/menu
|
UTF-8
| 2,367
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
menu_src () {
bot_retorno=" ⚡ Bienvenido al BotGen ADM-VPS ⚡\n"
if [[ $(echo $permited|grep "${chatuser}") = "" ]]; then
if [[ $(cat ${CID}|grep "${chatuser}") = "" ]]; then
bot_retorno+="$LINE\n"
bot_retorno+=" ⚠️ TENDRAS ACCESO ILIMITADO ⚠️ \n 🚫 Sin TU RESELLER EN LA KEY 🚫"
bot_retorno+="$LINE\n"
bot_retorno+="/ID | Muestra sus ID |\n"
bot_retorno+="/menu | MENU DE COMANDOS |\n"
bot_retorno+="/ayuda | INSTRUCCIONES DE COMANDOS |\n"
bot_retorno+="$LINE\n"
msj_fun
else
[[ -e /etc/menu_ito ]] && nomkey="$(cat /etc/menu_ito)" || nomkey="$(curl -sSL "https://www.dropbox.com/s/1qe8hraqmfhpwio/menu_credito")"
numkey_used=$(grep -o -i $nombrevalue /etc/gerar-sh-log | wc -l)
numkey_gen=$(grep -o -i $nombrevalue /etc/ADM-db/num-key.cont | wc -l)
bot_retorno+="$LINE\n"
bot_retorno+=" ⚠️ TENDRAS ACCESO ILIMITADO ⚠️ \n 🚫 Sin TU RESELLER EN LA KEY 🚫"
bot_retorno+="$LINE\n"
bot_retorno+="Reseller Actual: $nomkey\n"
bot_retorno+="$LINE\n"
bot_retorno+="/ID | Muestra sus ID |\n"
bot_retorno+="/instal | INSTALADOR OFICIAL |\n"
bot_retorno+="/menu | MENU DE COMANDOS | \n"
bot_retorno+="/ayuda | INSTRUCCIONES DE COMANDOS |\n"
bot_retorno+="/donar | Agradecer Trabajo |\n"
bot_retorno+=" 🔐 『 $numkey_used 』𝙆𝙚𝙮𝙨 𝙐𝙨𝙖𝙙𝙖𝙨 🔓,『 $numkey_gen 』𝙆𝙚𝙮𝙨 𝙂𝙚𝙣𝙚𝙧𝙖𝙙𝙖𝙨 🗝️ \n"
bot_retorno+="$LINE\n"
menu_print
fi
else
unset PID_GEN
PID_GEN=$(ps x|grep -v grep|grep "http-server.sh")
[[ ! $PID_GEN ]] && PID_GEN='(Offline) ❌' || PID_GEN='(Online) ✅'
unset usadas
usadas="$(cat /etc/http-instas)"
[[ ! $usadas ]] && k_used="0" || k_used="$usadas"
bot_retorno+="Gen $PID_GEN | Keys Used [$k_used]\n"
bot_retorno+="$LINE\n"
bot_retorno+="/infosys | INFORMACION DE SERVIDOR |\n"
bot_retorno+="/list | MUESTRA LISTA DE ID PERMITIDOS\n"
bot_retorno+="/instal | INSTALADOR OFICIAL |\n"
bot_retorno+="/menu | MENU DE ACCIONES |\n"
bot_retorno+="/ayuda | INSTRUCCIONES DE COMANDOS |\n"
bot_retorno+="/cache | OPTIMIZA SERVIDOR |\n"
bot_retorno+="/update | ACTUALIZA BOT |\n"
bot_retorno+="/reboot | REINICIA SERVIDOR VPS |\n"
bot_retorno+="$LINE\n"
menu_print
fi
}
| true
|
37885d059b13e0b9f0114e576bfc99c862a98092
|
Shell
|
LArbys/ubdl
|
/scripts/container_setenv.sh
|
UTF-8
| 1,145
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# LOCATIONS FOR UBDL CONTAINER (FOR USE ON TUFTS TYPICALLY)
# ROOT
source /usr/local/root/build/bin/thisroot.sh
# CUDA
# typical location of cuda in ubuntu
export CUDA_HOME=/usr/local/cuda-10.0
[[ ":$LD_LIBRARY_PATH:" != *":${CUDA_HOME}/lib64:"* ]] && export LD_LIBRARY_PATH="${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}"
# OPENCV
export OPENCV_INCDIR=/usr/local/include
export OPENCV_LIBDIR=/usr/local/lib
# LIBTORCH
PYTHON_VERSION=`python -c 'import sys; print(sys.version_info[0])'`
echo "SETUP FOR PYTHON ${PYTHON_VERSION}"
if [ $PYTHON_VERSION='3' ]
then
# location below is typically where running `pip install torch` will put pytorch
export LIBTORCH_DIR="/usr/local/lib/python3.5/dist-packages/torch"
export LIBTORCH_LIBDIR=${LIBTORCH_DIR}/lib
export LIBTORCH_INCDIR=${LIBTORCH_DIR}/lib/include
else
export LIBTORCH_DIR="/usr/local/lib/python3.5/dist-packages/torch"
export LIBTORCH_LIBDIR=${LIBTORCH_DIR}/lib
export LIBTORCH_INCDIR=${LIBTORCH_DIR}/lib/include
fi
[[ ":$LD_LIBRARY_PATH:" != *":${LIBTORCH_LIBDIR}:"* ]] && \
export LD_LIBRARY_PATH="${LIBTORCH_LIBDIR}:${LD_LIBRARY_PATH}"
| true
|
a99b16bf00d3a7a5a89c7b2747c39bd958258b69
|
Shell
|
bigdatalyn/bigdatalyn.github.io
|
/files/Linux/tigerVNC/TigerVNC-server automated installer for OL6.8.txt
|
UTF-8
| 2,292
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#author: alexandru.dobritescu@oracle.com
#date: 10-May-2018
#version: 1.0
#Each VNC desktop is associated with a user account. Change this value according to your needs.
display="1"
port="5901"
#Installing TigerVNC-Server
#this is checking wether TigerVNC-Server is installed or not; if it is not then it will perform the installation
sleep 5
echo "Installing TigerVNC-Server!"
if ! rpm -qa | grep -qw tigervnc-server; then
yum install tigervnc-server -y
else
echo "TigerVNC-Server already installed. Nothing to do."
fi
#Installing Desktop environment
#this will install all the necessary packages needed for the graphical user interface
if ! rpm -qa | grep -qw "Desktop"; then
echo "Installing "Desktop"!"
else
echo "Desktop Platform already installed. Nothing to do."
fi
yum groupinstall "Desktop" -y
sleep 5
#Creating a username for accessing VNC server
#this will ask you to enter the name ofthe user and input a password for it
echo "Creating an username for VNC access!"
if [ $(id -u) -eq 0 ]; then
read -p "Enter username : " username
read -s -p "Enter password : " password
egrep "^$username" /etc/passwd >/dev/null
if [ $? -eq 0 ]; then
echo "$username exists!"
else
pass=$(perl -e 'print crypt($ARGV[0], "password")' $password)
useradd -m -p $pass $username
[ $? -eq 0 ] && echo "User "$username" has been added to system!" || echo "Failed to add a user!"
fi
else
echo "Only root may add a user to the system"
exit 2
fi
sleep 5
#Script to check if the username exists and it is currently used
if [ "$(id -un)" != "$username" ]; then
echo "Insert vncpasswd for user $username!"
su - $username -c vncpasswd
else
echo "Insert vncpasswd for user $username!"
vncpasswd
fi
sleep 5
echo "Configure the display!"
echo "VNCSERVERS=\"$display:$username\"" >> /etc/sysconfig/vncservers
echo "VNCSERVERARGS[$display]=\"-geometry 1280x1024 -nolisten tcp\"" >> /etc/sysconfig/vncservers
echo "Enable the "vncserver" service for autostart and start the service."
chkconfig vncserver on
service vncserver start
echo "Creating firewall rules for VNC access!"
iptables -I INPUT -m state --state NEW -p tcp --destination-port $port -j ACCEPT
service iptables save
echo "Installation & Configuration of TigerVNC-Server is now complete!"
| true
|
c498a35a8ac84ff63b649c1facbd21acfeaa63e6
|
Shell
|
gungwald/utils
|
/java/bin/java-set-fonts.sh
|
UTF-8
| 672
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
FONT_NAME=monofur
FONT_TYPE=plain
FONT_SIZE=25
FONT_SPEC="$FONT_NAME-$FONT_TYPE-$FONT_SIZE"
# Swing fonts - Haven't gotten these to work
JDK_JAVA_OPTION="-Dswing.aatext=true -Dswing.plaf.metal.controlFont=$FONT_SPEC -Dswing.plaf.metal.userFont=$FONT_SPEC"
# Plastic fonts - Haven't gotten these to work
JDK_JAVA_OPTIONS="$JDK_JAVA_OPTIONS -DWindows.controlFont=$FONT_SPEC -DWindows.menuFont=$FONT_SPEC -DPlastic.controlFont=$FONT_SPEC -DPlastic.menuFont=$FONT_NAME-bold-$FONT_SIZE"
# General scaling
JDK_JAVA_OPTIONS="$JDK_JAVA_OPTIONS -Dsun.java2d.uiScale=2"
JDK_JAVA_OPTIONS="$JDK_JAVA_OPTIONS -Dsun.java2d.uiScale.enabled=true"
export JDK_JAVA_OPTIONS
| true
|
b0d8d149ee218bf079d60dca62da5ebf322d109b
|
Shell
|
ladyson/Weather-and-Crime
|
/Training_Pipeline/AccessData/FormatDataScripts/optimize_sample_ratio.sh
|
UTF-8
| 705
| 2.84375
| 3
|
[] |
no_license
|
for ratio in 100 50 40 30 20 10 5 4 3 2 1
#for ratio in 1 2 # for testing code
do
mkdir shooting
cp WeatherandCrime_Data.shooting.ones.csv shooting
cp WeatherandCrime_Data.shooting.zeroes.csv shooting
./bag_and_bin.sh WeatherandCrime_Data.csv 10 1 $ratio
#./bag_and_bin.sh WeatherandCrime_Data.csv 1 1 $ratio #for testing code
mkdir ratio_$ratio
mv shooting ratio_$ratio
#mv robbery ratio_$ratio
#mv assault ratio_$ratio
rm ratio_$ratio/shooting/WeatherandCrime_Data.shooting.ones.csv
rm ratio_$ratio/shooting/WeatherandCrime_Data.shooting.zeroes.csv
tar -cvf Shooting_$ratio.tar ratio_$ratio/shooting/*binned*
scp Shooting_$ratio.tar mking@fusion.lcrc.anl.gov:~/data
done
| true
|
d6f08d5de1aa31f4469be70d8f9bed35fec1638a
|
Shell
|
IslamAlam/ml-setup
|
/conda_all.sh
|
UTF-8
| 8,395
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
NB_USER=$USER
CONDA_DIR=$ROOT_DIR/conda
BASE_DIR=$ROOT_DIR/
# Install Tini
conda install --quiet --yes 'tini=0.18.0' && \
conda list tini | grep tini | tr -s ' ' | cut -d ' ' -f 1,2 >> $CONDA_DIR/conda-meta/pinned && \
conda clean --all -f -y
# Install Jupyter Notebook, Lab, and Hub
# Generate a notebook server config
# Cleanup temporary files
# Correct permissions
# Do all this in a single RUN command to avoid duplicating all of the
# files across image layers when the permissions change
conda install --quiet --yes \
'notebook=6.0.3' \
'jupyterhub=1.1.0' \
'jupyterlab=2.1.5' \
'nodejs=12.*' \
'zsh' && \
conda clean --all -f -y && \
npm cache clean --force && \
# jupyter notebook --generate-config && \
rm -rf $CONDA_DIR/share/jupyter/lab/staging && \
rm -rf /home/$NB_USER/.cache/yarn
# ################ scipy-notebook
# Install Python 3 packages
# numba update to 0.49 fails resolving deps.
conda install --quiet --yes \
'beautifulsoup4=4.9.*' \
'conda-forge::blas=*=openblas' \
'bokeh=2.0.*' \
'bottleneck=1.3.*' \
'cloudpickle=1.4.*' \
'cython=0.29.*' \
'dask=2.15.*' \
'dill=0.3.*' \
'h5py=2.10.*' \
'hdf5=1.10.*' \
'ipywidgets=7.5.*' \
'ipympl=0.5.*'\
'matplotlib-base=3.2.*' \
'numba=0.48.*' \
'numexpr=2.7.*' \
'pandas=1.0.*' \
'patsy=0.5.*' \
'protobuf=3.11.*' \
'pytables=3.6.*' \
'scikit-image=0.16.*' \
'scikit-learn=0.23.*' \
'scipy=1.4.*' \
'seaborn=0.10.*' \
'sqlalchemy=1.3.*' \
'statsmodels=0.11.*' \
'sympy=1.5.*' \
'vincent=0.4.*' \
'widgetsnbextension=3.5.*'\
'xlrd=1.2.*'
conda clean --all -f -y
pip install --upgrade jupyterlab-git
# Install Debugger in Jupyter Lab
pip install --no-cache-dir xeus-python
jupyter labextension install @jupyterlab/debugger --no-build
# Activate ipywidgets extension in the environment that runs the notebook server
jupyter nbextension enable --py widgetsnbextension --sys-prefix
# Also activate ipywidgets extension for JupyterLab
# Check this URL for most recent compatibilities
# https://github.com/jupyter-widgets/ipywidgets/tree/master/packages/jupyterlab-manager
jupyter labextension install @jupyter-widgets/jupyterlab-manager@^2.0.0 --no-build
jupyter labextension install @bokeh/jupyter_bokeh@^2.0.0 --no-build
jupyter labextension install jupyter-matplotlib@^0.7.2 --no-build
jupyter labextension install @jupyterlab/toc --no-build
jupyter labextension install jupyterlab_tensorboard --no-build
# jupyterlab-gitplus
pip install --upgrade jupyterlab_gitplus
jupyter labextension install @reviewnb/jupyterlab_gitplus --no-build
jupyter serverextension enable --py jupyterlab_gitplus
# Perspective
pip install --no-cache-dir pyarrow==0.15.1
pip install --no-cache-dir perspective-python
jupyter labextension install @finos/perspective-jupyterlab --no-build
# nbdime Jupyter Notebook Diff and Merge tools
pip install --no-cache-dir nbdime
# jupyterlab_spellchecker
jupyter labextension install @ijmbarr/jupyterlab_spellchecker --no-build
# A JupyterLab extension for standalone integration of drawio / mxgraph into jupyterlab.
jupyter labextension install jupyterlab-drawio --no-build
# jupyterlab-toc A Table of Contents extension for JupyterLab.
jupyter labextension install @jupyterlab/toc --no-build
# Collapsible_Headings
jupyter labextension install @aquirdturtle/collapsible_headings --no-build
# Go to definition extension for JupyterLab
jupyter labextension install @krassowski/jupyterlab_go_to_definition --no-build # JuupyterLab 2.x
# Jupyterlab Code Formatter
jupyter labextension install @ryantam626/jupyterlab_code_formatter --no-build
pip install --upgrade --no-cache-dir jupyterlab_code_formatter black yapf autopep8 isort
jupyter serverextension enable --py jupyterlab_code_formatter
# install jupyterlab git
jupyter labextension install @jupyterlab/git --no-build
pip install --upgrade --no-cache-dir jupyterlab-git
jupyter serverextension enable --py jupyterlab_git
# jupyterlab_voyager
# A JupyterLab MIME renderer extension to view CSV and JSON data in Voyager 2.
# jupyter labextension install jupyterlab_voyager --no-build
# For Matplotlib: https://github.com/matplotlib/jupyter-matplotlib
jupyter labextension install jupyter-matplotlib --no-build
# ipyleaflet
pip install --upgrade --no-cache-dir ipyleaflet
jupyter labextension install @jupyter-widgets/jupyterlab-manager jupyter-leaflet --no-build
#
pip install --upgrade --no-cache-dir ipympl
jupyter labextension install @jupyter-widgets/jupyterlab-manager --no-build
jupyter lab build -y
jupyter lab clean -y
npm cache clean --force
rm -rf "/home/${NB_USER}/.cache/yarn" && \
rm -rf "/home/${NB_USER}/.node-gyp"
# Install facets which does not have a pip or conda package at the moment
cd /tmp
git clone https://github.com/PAIR-code/facets.git && \
jupyter nbextension install facets/facets-dist/ --sys-prefix && \
rm -rf /tmp/facets
# Import matplotlib the first time to build the font cache.
# ENV XDG_CACHE_HOME="/home/${NB_USER}/.cache/"
python -c "import matplotlib.pyplot"
conda install -c conda-forge jupyter_contrib_nbextensions
# install Julia packages in /opt/julia instead of $HOME
JULIA_DEPOT_PATH=$ROOT_DIR/julia
JULIA_PKGDIR=$ROOT_DIR/julia
JULIA_VERSION=1.4.1
cd /tmp
# hadolint ignore=SC2046
mkdir "${JULIA_DEPOT_PATH}-${JULIA_VERSION}" && \
wget -q https://julialang-s3.julialang.org/bin/linux/x64/$(echo "${JULIA_VERSION}" | cut -d. -f 1,2)"/julia-${JULIA_VERSION}-linux-x86_64.tar.gz" && \
echo "fd6d8cadaed678174c3caefb92207a3b0e8da9f926af6703fb4d1e4e4f50610a *julia-${JULIA_VERSION}-linux-x86_64.tar.gz" | sha256sum -c - && \
tar xzf "julia-${JULIA_VERSION}-linux-x86_64.tar.gz" -C "${JULIA_DEPOT_PATH}-${JULIA_VERSION}" --strip-components=1 && \
rm "/tmp/julia-${JULIA_VERSION}-linux-x86_64.tar.gz"
ln -fs ${JULIA_DEPOT_PATH}-*/bin/julia $HOME/.local/bin
# Show Julia where conda libraries are \
# mkdir /etc/julia && \
# echo "push!(Libdl.DL_LOAD_PATH, \"$CONDA_DIR/lib\")" >> /etc/julia/juliarc.jl && \
# # Create JULIA_PKGDIR \
# mkdir "${JULIA_PKGDIR}" && \
# chown "${USER}" "${JULIA_PKGDIR}" && \
# R packages including IRKernel which gets installed globally.
conda install --quiet --yes \
'r-base=3.6.3' \
'r-caret=6.0*' \
'r-crayon=1.3*' \
'r-devtools=2.3*' \
'r-forecast=8.12*' \
'r-hexbin=1.28*' \
'r-htmltools=0.4*' \
'r-htmlwidgets=1.5*' \
'r-irkernel=1.1*' \
'r-nycflights13=1.0*' \
'r-plyr=1.8*' \
'r-randomforest=4.6*' \
'r-rcurl=1.98*' \
'r-reshape2=1.4*' \
'r-rmarkdown=2.1*' \
'r-rsqlite=2.2*' \
'r-shiny=1.4*' \
'r-tidyverse=1.3*' \
'rpy2=3.1*' \
&& \
conda clean --all -f -y
# Add Julia packages. Only add HDF5 if this is not a test-only build since
# it takes roughly half the entire build time of all of the images on Travis
# to add this one package and often causes Travis to timeout.
#
# Install IJulia as jovyan and then move the kernelspec out
# to the system share location. Avoids problems with runtime UID change not
# taking effect properly on the .local folder in the jovyan home dir.
julia -e 'import Pkg; Pkg.update()' && \
(test $TEST_ONLY_BUILD || julia -e 'import Pkg; Pkg.add("HDF5")') && \
julia -e "using Pkg; pkg\"add IJulia\"; pkg\"precompile\"" && \
# move kernelspec out of home \
mv "${HOME}/.local/share/jupyter/kernels/julia"* "${CONDA_DIR}/share/jupyter/kernels/" && \
chmod -R go+rx "${CONDA_DIR}/share/jupyter"
# ######### Jupyter Notebook Data Science Stack
# hadolint ignore=SC2046
# Add Julia packages. Only add HDF5 if this is not a test-only build since
# it takes roughly half the entire build time of all of the images on Travis
# to add this one package and often causes Travis to timeout.
#
# Install IJulia as jovyan and then move the kernelspec out
# to the system share location. Avoids problems with runtime UID change not
# taking effect properly on the .local folder in the jovyan home dir.
# Install Tensorflow
pip install --quiet --no-cache-dir \
'tensorflow==2.2.0'
| true
|
48c7476901f1d3acc4953ce9a26f66a5f25090fa
|
Shell
|
archmangler/botfactory
|
/knowledgebase/makekb.sh
|
UTF-8
| 2,510
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/bash
#Small snippet to create a QnA knowledgebase
#From Microsofts DTO json format
#example: https://raw.githubusercontent.com/microsoft/botbuilder-tools/master/packages/QnAMaker/examples/QnADocumentsDTO.json
#https://github.com/microsoft/botbuilder-tools/tree/master/packages/QnAMaker/examples
kbase_file_path="kbase.json"
qna_subscription_key=""
kbase_name="srebotkb"
kb_id=""
function create_kb_from_json () {
if [ -n "$kb_id" ]
then
echo "Knowledgebase ${kbase_name} exists as Id ${kb_id}"
else
echo "creating knowledge base ${kbase_name}"
create_result=$(bf qnamaker:kb:create --name="${kbase_name}" --subscriptionKey=${qna_subscription_key} -i ${kbase_file_path})
echo ${create_result}
fi
}
function get_kb_id_by_name () {
echo "checking if >>${kbase_name}<< exists ..."
knowledgebases="\"knowledgebases\""
kb_id=$(bf qnamaker:kb:list --subscriptionKey=${qna_subscription_key} |jq -r --arg kbase_name "$kbase_name" '.knowledgebases|.[]|select(.name==$kbase_name)|.id')
if [ -n "$kb_id" ]
then
echo "kb exists as: ${kb_id}"
else
echo "kb ${kbase_name} does not exist"
fi
echo "${kb_id}"
}
function save_and_train_kb() {
if [[ ${kb_id} ]]
then
#to be implemented in a future version
echo "save and train not implemented yet ..."
fi
}
function publish_kb () {
if [[ ${kb_id} ]]
then
echo "publishing knowledgebase ${kbase_name}"
echo bf qnamaker:kb:publish --subscriptionKey=${qna_subscription_key} --kbId="${kb_id}"
publish_results=$(bf qnamaker:kb:publish --subscriptionKey=${qna_subscription_key} --kbId="${kb_id}" )
echo "${publish_results}"
fi
}
function get_kb_endpoint_key () {
if [[ ${kb_id} ]]
then
echo "getting QnA KB configuration details ..."
kb_endpoint_key=$(bf qnamaker:endpointkeys:list --subscriptionKey=${qna_subscription_key}|jq -r '.primaryEndpointKey')
echo "${kb_endpoint_key}"
fi
}
function get_kb_endpoint_host () {
if [[ ${kb_id} ]]
then
echo "getting the configured endpoint host for knowledgebase ${kbase_name}"
kb_host=$(bf qnamaker:kb:get --subscriptionKey=${qna_subscription_key} --kbId="${kb_id}" | jq -r '.hostName')
echo "${kb_host}"
fi
}
#check if the kb exists
get_kb_id_by_name
#create the kb IF NOT ALREADY THERE!
create_kb_from_json
#get the kb id if newly created
get_kb_id_by_name
#publish the kb
publish_kb
#get the QnAmaker KB connection parameters
get_kb_endpoint_key
#get the endpoint host for the kb
get_kb_endpoint_host
| true
|
79d5f428968b4bce659b8204c641da809b3c346d
|
Shell
|
dawlane/cerberus
|
/src/builders/bash/tools.sh
|
UTF-8
| 6,527
| 3.578125
| 4
|
[
"Zlib"
] |
permissive
|
#!/bin/bash
# TOOL BUILDER FUNCTIONS VERSION
# THE SCRIPT IS PART OF THE CERBERUS X BUILER TOOL.
# Function to build transcc
do_transcc(){
EXITCODE=0
do_info "BUILDING TRANSCC WITH $COMPILER"
# Check for an exisiting transcc
[ -f "$BIN/transcc_$HOST" ] && { rm -f "$BIN/transcc_$HOST"; }
PROJECT_DIR="$SRC/transcc/transcc.build/cpptool"
# Host specific parameters to pass the the C++ compiler
[ $HOST = "linux" ] && {
BUILD_DIR="$PROJECT_DIR/gcc_linux/Release"
ARG=("make")
ARG+=("CXX_COMPILER=$COMPILER" "C_COMPILER=$C_COMPILER" "CCOPTS=-DNDEBUG" "CCOPTS+=-Os")
ARG+=("BUILD_DIR=$BUILD_DIR")
ARG+=("OUT=transcc_linux" "OUT_PATH=$BIN" "LIBOPTS=-lpthread" "LIBOPTS+=-ldl" "LDOPTS=-no-pie" "LDOPTS+=-s");
mkdir -p $BUILD_DIR
cd $SRC/transcc/transcc.build/cpptool/gcc_linux
execute ${ARG[@]}
cd $SRC
clean_build "$BUILD_DIR";
} || {
BUILD_DIR="$PROJECT_DIR/xcode/build"
ARG=("xcodebuild" "-project" "$ROOT/src/transcc/transcc.build/cpptool/xcode/main_macos.xcodeproj")
ARG+=("-configuration" "Release")
ARG+=("CONFIGURATION_BUILD_DIR=$ROOT/bin")
ARG+=("TARGET_NAME=transcc_macos")
cd "$SRC/transcc/transcc.build/cpptool/xcode"
execute ${ARG[@]}
cd "$SRC"
clean_build "$BUILD_DIR";
}
do_build_result
return $EXITCODE
}
# Function to build CServer
do_cserver(){
EXITCODE=0
# Call transcc
transcc "CServer" "Desktop_Game" "cserver"
PROJECT_DIR="$SRC/cserver/cserver.build/glfw3/$TARGET"
# If transcc execution was successful; then update cerver.
[ $EXITCODE -eq 0 ] && {
# Clean out the olds and move new associated CServer files into the Cerberus bin directory.
# If the host system is Linux; then add the data directory if one is not present.
[ $HOST = "linux" ] && {
[ ! -d "$BIN/data" ] && {
mv "$PROJECT_DIR/Release/data" "$BIN/data";
}
[ -f "$BIN/cserver_$HOST" ] && { rm -f "$BIN/cserver_$HOST"; };
} || {
[ -d "$BIN/cserver_$HOST$EXTENSION" ] && { rm -rf "$BIN/cserver_$HOST$EXTENSION"; };
}
# Move the newly built CServer into the Cerberus bin directory.
mv "$PROJECT_DIR/Release/CerberusGame$EXTENSION" "$BIN/cserver_$HOST$EXTENSION"
# Clean up the .build directory.
clean_build "cserver" "dotbuild"
return $EXITCODE;
}
# Clean up the .build directory.
clean_build "cserver" "dotbuild"
return $EXITCODE
}
# Function to build the launcher
do_launcher(){
EXITCODE=0
PROJECT_DIR="$SRC/launcher/launcher.build"
# Use transcc to build the launcher on a Linux host
if [ "$HOST" = "linux" ]; then
transcc "Launcher" "C++_Tool" "launcher"
# Only update the launcher if the build was successful.
[ $EXITCODE -eq 0 ] && {
[ -f "$ROOT/Cerberus" ] && { rm -f "$ROOT/Cerberus"; }
mv "$PROJECT_DIR/cpptool/main_$HOST" "$ROOT/Cerberus";
};
else
# Execute xcodebuild
execute xcodebuild "PRODUCT_BUNDLE_IDENTIFIER=$MACOS_BUNDLE_PREFIX.launcher" -scheme Cerberus -configuration release -project $SRC/launcher/xcode/Cerberus.xcodeproj -derivedDataPath $SRC/launcher/launcher.build
# Only update the launcher if the build was successful.
[ $EXITCODE -eq 0 ] && {
[ -d "$ROOT/Cerberus$EXTENSION" ] && { rm -rf "$ROOT/Cerberus$EXTENSION"; }
mv "$PROJECT_DIR/Build/Products/Release/Cerberus.app" "$ROOT/Cerberus.app";
};
fi
# Clean up the .build directory.
clean_build "launcher" "dotbuild"
return $EXITCODE
}
# Function to build makedocs
do_makedocs(){
EXITCODE=0
# Call transcc to build makedocs.
transcc "Makedocs" "C++_Tool" "makedocs"
# Only update the makedocs if the build was successful.
[ $EXITCODE -eq 0 ] && {
[ -f "$BIN/makedocs_$HOST" ] && { rm -f "$BIN/makedocs_$HOST"; }
mv "$SRC/makedocs/makedocs.build/cpptool/main_$HOST" "$BIN/makedocs_$HOST";
}
# Clean up the .build directory.
clean_build "makedocs" "dotbuild"
return $EXITCODE
}
# Function to build the IDE Ted.
do_ted(){
EXITCODE=0
PROJECT_DIR="$SRC/build-ted-Desktop-Release"
# As the qmake project expects there to be a directory with the name build-ted-Desktop-Release.
# It's best to make sure any old version is removed and a new one created before running qmake
[ -d "$PROJECT_DIR" ] && { rm -rf "$PROJECT_DIR"; }
mkdir "$PROJECT_DIR"
cd "$PROJECT_DIR"
[ $HOST = "macos" ] && {
MACOS_OPTS="QMAKE_TARGET_BUNDLE_PREFIX=$MACOS_BUNDLE_PREFIX";
}
# Run qmake on the ted project file to create the makefile.
execute qmake CONFIG+=release ../ted/ted.pro $MACOS_OPTS
# If qmake was successfully executed; then proceed to build the IDE.
[ $EXITCODE -eq 0 ] && {
# Not realy required with qmake cleaning out all Qt related binaries.
[ $HOST = "linux" ] && {
[ -f "$BIN/Ted" ] && { rm -f "$BIN/Ted"; };
} || {
[ -d "$BIN/Ted$EXTENSION" ] && { rm -rf "$BIN/Ted$EXTENSION"; };
}
# For Linux. Either
[ $HOST = "linux" ] && {
if [[ $QT_SELECTED != *"/usr/"* ]]; then
execute "make" "install"
else
execute "make"
fi;
} || {
execute "make"
}
do_build_result;
}
clean_build "$PROJECT_DIR"
return $EXITCODE
}
do_freedesktop(){
do_init_linux_desktop
}
do_all(){
do_header "\n====== BUILDING ALL TOOLS ======"
do_info "BUILDING TransCC"
do_transcc;
[ $EXITCODE -eq 0 ] && {
do_info "BUILDING CServer"
do_cserver;
}
[ $EXITCODE -eq 0 ] && {
do_info "BUILDING Makedocs"
do_makedocs;
}
[ $EXITCODE -eq 0 ] && {
do_info "BUILDING Launcher"
do_launcher;
}
[ $HOST = "linux" ] && {
do_info "Generating Free Desktop Launcher"
do_freedesktop;
}
[[ ${#QT_INSTALLS[@]} -gt 0 && $EXITCODE -eq 0 ]] && {
do_info "BUILDING IDE Ted"
do_ted;
} || {
do_error "NO QT SDK KITS INSTALLED";
}
[ -n "$DEPLY" ] && {
do_deploy;
return $EXITCODE
}
}
| true
|
32aea18116747158ece2b50e44dd10078c94ccaf
|
Shell
|
ternandsparrow/wild-orchid-watch-pwa
|
/scripts/rsync-up.sh
|
UTF-8
| 991
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# copies your working directory to some other computer.
# Intended to be used with the docker-shell.sh script when you're tight on
# internet quota. Start a VM in the cloud, use the docker-shell.sh to quickly
# get a dev server running. Continue working on your local machine and use this
# script to rsync to the VM where the dev server will see the changes to the
# volume mounted files and rebuild. Simples.
set -euo pipefail
cd `dirname "$0"`/..
sshTargetSpec=${1:?first param must be rsync target spec, like user@some.host:/path/to/workspace}
compressParam="-zz" # rsync tells me to use this over --compress
rsync \
--archive \
--partial \
--progress \
$compressParam \
. \
--exclude=.git/ \
--exclude=node_modules/ \
--exclude=dist/ \
--exclude=tags \
--exclude=scripts/inat-taxa-cache* \
--exclude=scripts/transform* \
--exclude=error-screenshots \
--exclude=public/wow-taxa-index.json \
--exclude=.env.local \
--delete \
$sshTargetSpec
| true
|
4282e61b8482ab22130c6f62a335cf83c48d99e8
|
Shell
|
eleniums/blackjack
|
/machine/scripts/generate_training_data.sh
|
UTF-8
| 523
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
iterations=1000
output="./machine/testdata/training.csv"
converted="./machine/testdata/training_converted.csv"
echo "Running simulation to generate training data..."
./scripts/run.sh -standard-ai -random-ai -num-players 0 -num-rounds $iterations -generate-training-data -training-data-file $output $@
echo "Converting training data to an appropriate format..."
go run ./machine/training/convert.go $output $converted
echo "Training data generated: $output"
echo "Training data converted: $converted"
| true
|
1cb719c96e67d9e6cf71d7e65eb2805e9d4d820a
|
Shell
|
stefanhummel/ADE_Db2_Lift
|
/ADE_sample_data/load_sample_data.sh
|
UTF-8
| 777
| 3.125
| 3
|
[] |
no_license
|
#################################################################################################
# Licensed Materials - Property of IBM
# (c) Copyright IBM Corporation 2019. All Rights Reserved.
#
# Note to U.S. Government Users Restricted Rights:
# Use, duplication or disclosure restricted by GSA ADP Schedule
# Contract with IBM Corp.
#
#################################################################################################
#!/bin/bash
container_name=`docker ps | grep ade_backend | awk -F " " '{print $NF}'`
# Check if container_name is empty
if [ -z "$container_name" ]; then
echo "Cannot find ADE backend container; please check that ADE containers are running"
exit
fi
docker exec -w /backend/utils -it ${container_name} python3 load_sample_data.pyc
| true
|
328edf4c64b47803150e84d529daccdaeea097ae
|
Shell
|
tapis-project/tapis-files
|
/release/release-update.sh
|
UTF-8
| 3,022
| 3.96875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Build, tag and push docker image for the release cycle.
# This is the job run in Jenkins as part of jobs 1_release-start and 2_release-update in
# Jenkins folder TapisJava->2_Release-Service-<service>
# RC version must be the first and only argument to the script
# Existing docker login is used for push
# Docker image is created with a unique tag: tapis/<IMG_NAME>-<VER>-<YYYYmmddHHMM>-<COMMIT>
# - other tags are created and updated as appropriate
#
PrgName=$(basename "$0")
USAGE="Usage: $PrgName { <rc_version> }"
SVC_NAME="files"
REPO="tapis"
BUILD_DIR="../api/target"
RC_VER=$1
# service contains three docker images as part of deployment
IMG1="tapis-files"
IMG2="tapis-files-workers"
IMG2="tapis-files-migrations"
# Check number of arguments
if [ $# -ne 1 ]; then
echo $USAGE
exit 1
fi
# Determine absolute path to location from which we are running
# and change to that directory.
export RUN_DIR=$(pwd)
export PRG_RELPATH=$(dirname "$0")
cd "$PRG_RELPATH"/. || exit
export PRG_PATH=$(pwd)
# Make sure service has been built
if [ ! -d "$BUILD_DIR" ]; then
echo "Build directory missing. Please build. Directory: $BUILD_DIR"
exit 1
fi
# Set variables used for build
VER=$(cat classes/tapis.version)
GIT_BRANCH_LBL=$(awk '{print $1}' classes/git.info)
GIT_COMMIT_LBL=$(awk '{print $2}' classes/git.info)
TAG_UNIQ1="${REPO}/${IMG1}:${VER}-$(date +%Y%m%d%H%M)-${GIT_COMMIT_LBL}"
TAG_UNIQ2="${REPO}/${IMG2}:${VER}-$(date +%Y%m%d%H%M)-${GIT_COMMIT_LBL}"
TAG_UNIQ3="${REPO}/${IMG3}:${VER}-$(date +%Y%m%d%H%M)-${GIT_COMMIT_LBL}"
TAG_RC1="${REPO}/${IMG1}:${VER}-rc${RC_VER}"
TAG_RC2="${REPO}/${IMG2}:${VER}-rc${RC_VER}"
TAG_RC3="${REPO}/${IMG3}:${VER}-rc${RC_VER}"
TAG_DEV1="${REPO}/${IMG1}:dev"
TAG_DEV2="${REPO}/${IMG2}:dev"
TAG_DEV3="${REPO}/${IMG3}:dev"
# If branch name is UNKNOWN or empty as might be the case in a jenkins job then
# set it to GIT_BRANCH. Jenkins jobs should have this set in the env.
if [ -z "$GIT_BRANCH_LBL" -o "x$GIT_BRANCH_LBL" = "xUNKNOWN" ]; then
GIT_BRANCH_LBL=$(echo "$GIT_BRANCH" | awk -F"/" '{print $2}')
fi
# Build images from Dockerfiles
echo "Building local images"
echo " VER= ${VER}"
echo " GIT_BRANCH_LBL= ${GIT_BRANCH_LBL}"
echo " GIT_COMMIT_LBL= ${GIT_COMMIT_LBL}"
docker build -f ./deploy/Dockerfile -t "${TAG_UNIQ1}" .
docker build -f ./deploy/Dockerfile.workers -t "${TAG_UNIQ2}" .
docker build -f ./deploy/Dockerfile.migrations -t "${TAG_UNIQ3}" .
echo "Creating RC and DEV image tags"
docker tag "$TAG_UNIQ1" "$TAG_RC1"
docker tag "$TAG_UNIQ2" "$TAG_RC2"
docker tag "$TAG_UNIQ3" "$TAG_RC3"
docker tag "$TAG_UNIQ1" "$TAG_DEV1"
docker tag "$TAG_UNIQ2" "$TAG_DEV2"
docker tag "$TAG_UNIQ3" "$TAG_DEV3"
echo "Pushing images and tags to docker hub."
# NOTE: Use current login. Jenkins job does login
docker push "$TAG_UNIQ1"
docker push "$TAG_UNIQ2"
docker push "$TAG_UNIQ3"
docker push "$TAG_RC1"
docker push "$TAG_RC2"
docker push "$TAG_RC3"
docker push "$TAG_DEV1"
docker push "$TAG_DEV2"
docker push "$TAG_DEV3"
cd "$RUN_DIR"
| true
|
97f7b507709ad4b581d0eadad8c6d2983ac5bcd0
|
Shell
|
Valuimnee/linux-command-line-networking
|
/execve-hash/execve.txt
|
UTF-8
| 94
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
T=$(sha1sum "$1" | cut -b 1-40)
echo $T
p=/home/calapova_maria/dir
cp "$1" $p/$T
| true
|
9d0946695248d76efdfc4f45526d6622191a76d5
|
Shell
|
ContainerSolutions/core-plans
|
/yarn/plan.sh
|
UTF-8
| 1,126
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
pkg_name=yarn
pkg_origin=core
pkg_version=0.22.0
pkg_source="https://yarnpkg.com/downloads/$pkg_version/yarn-v$pkg_version.tar.gz"
pkg_shasum=e295042279b644f2bc3ea3407a2b2fb417a200d35590b0ec535422d21cf19a09
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_description="Yarn is a package manager for your code. It allows you to use and share code with other developers from around the world. Yarn does this quickly, securely, and reliably so you don’t ever have to worry."
pkg_upstream_url=https://yarnpkg.com/
pkg_license=('BSD-2-Clause')
pkg_bin_dirs=(bin)
pkg_build_deps=(
core/rsync
)
pkg_deps=(
core/coreutils
core/node
core/sed
)
# Yarn unpacks into dist, so fix that
do_unpack() {
build_line "Unpacking $pkg_filename"
pushd "$HAB_CACHE_SRC_PATH" > /dev/null
mkdir -pv "$pkg_dirname"
tar --strip-components=1 --directory="$pkg_dirname" -xf "$pkg_filename"
popd > /dev/null
}
do_build() {
return 0
}
do_install() {
rsync --archive --relative \
--exclude bin/node-gyp \
--exclude bin/yarnpkg \
--exclude bin/yarn.cmd \
--exclude end_to_end_tests \
. "$pkg_prefix"
}
| true
|
e65c1a092a06018935b3edeb6bae811e512e651e
|
Shell
|
MagicalStrangeQuark/Linux
|
/Sample/Case.sh
|
UTF-8
| 266
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/zsh
NUMERO=3;
case $NUMERO in
1)
echo "1";
;;
2||3)
echo "2|3";
;;
4|5|6|7|8|9|10)
echo "4|5|6|7|8|9|10";
;;
*)
echo "Não foi possível encontrar o valor presente";
;;
esac
| true
|
0c536a407e92d270187525c04875bfbeaf6dc647
|
Shell
|
illwieckz/datarepotools
|
/drt-gittest
|
UTF-8
| 584
| 3.4375
| 3
|
[
"ISC"
] |
permissive
|
#! /bin/sh
### Legal
#
# Author: Thomas Debesse <dev@illwieckz.net>
# License: ISC
#
_error () {
printf 'ERROR: %s\n' "${1}" >&2
exit 1
}
if ! [ -x "$(which git)" ]
then
_error 'git not installed'
fi
if ! [ "$(git rev-parse --is-inside-work-tree 2>/dev/null)" = 'true' ]
then
_error 'not within git repository'
fi
if ! [ "$(git status --short 2>/dev/null)" = '' ]
then
_error 'repository has untracked or unstaged files, please commit changes first'
fi
if ! [ "$(git diff --shortstat 2>/dev/null | tail -n1)" = '' ]
then
_error 'repository is dirty, please commit changes first'
fi
#EOF
| true
|
4a259f67e5121d8e5183d973f5bdb3488b3edd44
|
Shell
|
dcycle/drupal_inquicker
|
/scripts/ci.sh
|
UTF-8
| 413
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Run tests on Circle CI.
#
set -e
echo ""
echo " => Running fast tests"
echo ""
./scripts/test.sh
echo ""
echo " => Deploying Drupal 9"
echo ""
./scripts/deploy.sh 9
echo ""
echo " => Running some tests on the running Drupal 8 environment"
echo ""
./scripts/test-running-environment.sh
echo ""
echo " => Destroying Drupal 9"
echo ""
docker-compose down -v
echo ""
echo " => All tests OK!"
echo ""
| true
|
9afa7ddf7923d0b7b29c3a2208dabeb23a4171a1
|
Shell
|
luyongxi/deep_share
|
/experiments/scripts/archive/train_cls_branch_scratch.sh
|
UTF-8
| 744
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
iters=$1
stepsize=$2
base_lr=$3
model=$4
last_low_rank=$5
rounds=$6
aff_type=$7
set -x
set -e
export PYTHONUNBUFFERED="True"
LOG="../logs/train_dynamic_branch_scratch_${aff_type}.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
exec &> >(tee -a "$LOG")
echo Logging output to "$LOG"
cd ../..
time ./tools/train_cls.py --gpu 0 \
--traindb celeba_train \
--valdb celeba_val \
--iters ${iters} \
--base_lr ${base_lr} \
--clip_gradients 20 \
--loss Sigmoid \
--model ${model} \
--last_low_rank ${last_low_rank} \
--use_svd \
--exp ${model}-branch-scratch-${last_low_rank}-${aff_type} \
--num_rounds ${rounds} \
--stepsize ${stepsize} \
--aff_type ${aff_type} \
--share_basis \
--use_bn
| true
|
01dc5a44d4706fd0ddd31bb71001e690f70bd7ff
|
Shell
|
harryrackmil/manhattan-trees
|
/treeCount.sh
|
UTF-8
| 1,101
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
cURL https://data.cityofnewyork.us/api/views/4eh7-xcm8/rows.csv?accessType=DOWNLOAD | grep -v ^1230\" | grep -v \"1230 | sed "s/a\ Circle,/a\ Circle/" | sed "s/\"//" > tmp.man
diff data/ManhattanTree.csv tmp.man > tmp.diff
if [ ! -e data/ManhattanTree.csv -o -s tmp.diff ]
then
rm -r data
mkdir data
mv tmp.man data/ManhattanTree.csv
hdfs dfs -rmr data/trees
hdfs dfs -mkdir data/trees
hdfs dfs -copyFromLocal data/ManhattanTree.csv data/trees/tree.csv
hdfs dfs -rmr output
hdfs dfs -mkdir output
gradle clean jar
hadoop jar ./build/libs/trees.jar data/trees/tree.csv output/specSt output/st output/spec
hdfs dfs -copyToLocal output/specSt/part-00000 data/treesPerStSp.csv
hdfs dfs -copyToLocal output/st/part-00000 data/treesPerSt.csv
hdfs dfs -copyToLocal output/spec/part-00000 data/treesPerSp.csv
grep '[0-9].*,' data/treesPerSt.csv | sed "s/st,/street,/" | grep 'street' | sed "s/[ a-z]//g" > data/stNum.csv
grep '[0-9].*,' data/treesPerSt.csv | grep 'avenue' | sed "s/[ a-z]//g" > data/aveNum.csv
fi
rm tmp.*
/usr/bin/Rscript treePlots.R
| true
|
cd839f36673c584869020e24a049c74b15fa2c91
|
Shell
|
jgarte/adventofcode-2020
|
/11-1.sh
|
UTF-8
| 1,378
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
height="$(wc -l < "$3")"
height="$(( height + 1 ))"
width="$(head -1 "$3")"
width="${#width}"
around() {
r="$1"
c="$2"
head -"$(( r - 1 ))" > /dev/null # smaller rows
head -c"$(( c - 1 ))" > /dev/null # smaller columns
head -c3
head -1 > /dev/null # rest of the line
head -c"$(( c - 1 ))" > /dev/null # smaller columns
head -c1 # left of the marker
head -c1 > /dev/null # the marker
head -c1 # right of the marker
head -1 > /dev/null # rest of the line
head -c"$(( c - 1 ))" > /dev/null # smaller columns
head -c3
}
count() {
tr -cd "$1" | wc -c
}
f1="$(mktemp)"
f2="$(mktemp)"
yes '.' | head -"$(( width + 2 ))" | tr -d '\n' >> "$f2"
echo >> "$f2"
sed 's/.*/.&./' "$3" >> "$f2"
if [ ! -z "$(tail -c1 "$3")" ]; then echo >> "$f2"; fi
yes '.' | head -"$(( width + 2 ))" | tr -d '\n' >> "$f2"
echo >> "$f2"
while ! diff -q "$f1" "$f2"; do
cat "$f2" > "$f1"
{
head -1
for r in $(seq 1 "$height"); do
head -c1
for c in $(seq 1 "$width"); do
case "$(head -c1)" in
'L') if [ "$(around "$r" "$c" < "$f1" | count '#')" -eq 0 ]; then printf '#'; else printf 'L'; fi ;;
'#') if [ "$(around "$r" "$c" < "$f1" | count '#')" -ge 4 ]; then printf 'L'; else printf '#'; fi ;;
'.') printf '.' ;;
esac
done
head -c2 # point and newline
done
head -1
} < "$f1" > "$f2"
cat "$f2"
echo
done
count '#' < "$f1"
rm "$f1" "$f2"
| true
|
47ef83b731c3dbb7996868b2e9bb869cc21a751f
|
Shell
|
dierobotsdie/random
|
/bin/copyhscripts
|
UTF-8
| 1,196
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
hadoophome=~/HADOOP/hadoop-3.0.0-SNAPSHOT
common="hadoop-common-project/hadoop-common/src/main"
hdfs="hadoop-hdfs-project/hadoop-hdfs/src/main"
yarn="hadoop-yarn-project/hadoop-yarn"
mapred="hadoop-mapreduce-project"
kms="hadoop-common-project/hadoop-kms/src/main"
function copier
{
local dest=$1
shift
for i in $@
do
cp $i $dest
chmod a+rx ${dest}/$(basename $i)
done
}
copier ${hadoophome}/bin \
${common}/bin/hadoop ${common}/bin/slaves.sh ${common}/bin/rcc \
${hdfs}/bin/hdfs \
${yarn}/bin/yarn \
${mapred}/bin/mapred
copier ${hadoophome}/libexec \
${common}/bin/hadoop-functions.sh ${common}/bin/hadoop-config.sh \
${hdfs}/bin/hdfs-config.sh \
${mapred}/bin/mapred-config.sh \
${yarn}/bin/yarn-config.sh \
${kms}/libexec/kms-config.sh
copier ${hadoophome}/libexec/shellprofile.d \
${hdfs}/shellprofile.d/* \
${yarn}/shellprofile.d/* \
${mapred}/shellprofile.d/*
copier ${hadoophome}/sbin \
${common}/bin/*-daemon.sh \
${common}/bin/*-daemons.sh \
${hdfs}/bin/start-dfs.sh \
${hdfs}/bin/stop-dfs.sh \
${yarn}/bin/start-yarn.sh \
${yarn}/bin/stop-yarn.sh \
${yarn}/bin/yarn-daemon*.sh \
${kms}/sbin/kms.sh
| true
|
a89db3e35ea709999bedcc2ec54b2f8e8e4906b4
|
Shell
|
sasurau4/9ccs
|
/debug.sh
|
UTF-8
| 165
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
execute() {
input="$1"
./9ccs "$input" > debug.s
cc -o debug debug.s test_func.o
./debug
echo "status: $?"
}
execute ./test/debug.c
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.