blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
00f9f9edd3a035acd3b488aa6f69e5800319aa82
|
Shell
|
mbirth/UnixTools
|
/bzr2git.sh
|
UTF-8
| 571
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
PWD=`pwd`
DIRNAME=`basename "$PWD"`
if [ ! -d "$HOME/.bazaar/plugins/fastimport" ]; then
echo "Fastimport plugin not found. Installing..."
bzr branch lp:bzr-fastimport ~/.bazaar/plugins/fastimport
fi
echo "Processing: $DIRNAME"
if [ ! -d ".bzr" ]; then
echo "No .bzr directory here. Is this a BZR branch?"
exit 1
fi
if [ -d ".git" ]; then
echo ".git already exists. Rename it and try again."
exit 2
fi
echo "Creating git repo in $DIRNAME"
git init
echo "Ex-/Importing repository..."
bzr fast-export | git fast-import
echo "All done."
| true
|
5353e1482d0f6c798bb72bcccf0393825721ba9d
|
Shell
|
mihaicris/dotfiles
|
/scripts/ee
|
UTF-8
| 1,477
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env zsh
heading "Change Development Environement"
ENVIRONMENTS=(
'Production;https://www.adoreme.com'
'DELTA 1;delta-1'
'DELTA 2;delta-2'
'DELTA 3;delta-3'
'GAMMA 1;gamma-1'
'GAMMA 2;gamma-2'
'GAMMA 3;gamma-3'
'SIGMA 1;sigma-1'
'SIGMA 2;sigma-2'
'SIGMA 3;sigma-3'
'OMEGA 1;omega-1'
'OMEGA 2;omega-2'
'OMEGA 3;omega-3'
'ALPHA 1;alpha-1'
'ALPHA 2;alpha-2'
'ALPHA 3;alpha-3'
'CORE 1;core-1'
'CORE 2;core-2'
'CORE 3;core-3'
)
NAMES=()
BASEURLS=()
for ENVIRONMENT in $ENVIRONMENTS; do
IFS=";" read -r -A arr <<< "${ENVIRONMENT}"
NAMES=("${NAMES[@]}" "${arr[1]}")
BASEURLS=("${BASEURLS[@]}" "${arr[2]}")
done
export COLUMNS=1
select NAME in $NAMES; do
if [[ -n $NAME ]]; then
break
else
print "Wrong selection."
fi
done
ENVIRONMENT=${BASEURLS[$REPLY]}
if [[ "$REPLY" -eq "1" ]]; then
BASE_URL="$ENVIRONMENT"
WEB_URL="$ENVIRONMENT"
else
BASE_URL="http://gw-main-ns-${ENVIRONMENT}.stg.adoreme.com"
WEB_URL="http://adoreme-js-ns-${ENVIRONMENT}.stg.adoreme.com"
fi
#iOS
find ./Sources \
-name "URLs.plist" \
-print \
-exec /usr/libexec/PlistBuddy \
-c "Set :BaseUrl ${BASE_URL}" \
{} \;
# Adnroid
PATTERN="\"BASE_URL\", \'\"\(.*\)\"\'"
REPLACE="\"BASE_URL\", \'\"${BASE_URL}\/\"\'"
find ../../android-app/app \
-name "build.gradle" \
-print \
-exec sed -i "" -e "s|${PATTERN}|${REPLACE}|" \
{} \;
| true
|
1b0696c6f0f7a3f9807e7e93879937bdb0d53ed6
|
Shell
|
CESNET/glite-testsuites
|
/LB/tests/lb-test-statistics.sh
|
UTF-8
| 11,400
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright (c) Members of the EGEE Collaboration. 2004-2010.
# See http://www.eu-egee.org/partners for details on the copyright holders.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# show help and usage
progname=`basename $0`
showHelp()
{
cat << EndHelpHeader
Script for testing statistic functions provided by the L&B Service
Prerequisities:
- LB local logger, interlogger, and server running
- environment variables set:
GLITE_LB_SERVER_PORT - if nondefault port (9000) is used
GLITE_LB_LOGGER_PORT - if nondefault port (9002) is used
GLITE_WMS_QUERY_SERVER
GLITE_WMS_LOG_DESTINATION
Tests called:
job registration
event logging
Returned values:
Exit TEST_OK: Test Passed
Exit TEST_ERROR: Test Failed
Exit 2: Wrong Input
EndHelpHeader
echo "Usage: $progname [OPTIONS] [event file prefix]"
echo "Options:"
echo " -h | --help Show this help message."
echo " -o | --output 'file' Redirect all output to the 'file' (stdout by default)."
echo " -t | --text Format output as plain ASCII text."
echo " -c | --color Format output as text with ANSI colours (autodetected by default)."
echo " -x | --html Format output as html."
echo ""
}
submitted_to_running () {
EDG_WL_SEQUENCE="UI=000003:NS=0000000000:WM=000000:BH=0000000000:JSS=000000:LM=000000:LRMS=000000:APP=000000:LBS=000000"
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s NetworkServer -e Accepted --from="UserInterface" --from_host="sending component hostname" --from_instance="sending component instance" --local_jobid="new jobId (Condor Globus ...)"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s NetworkServer -e EnQueued --queue="destination queue" --job="job description in receiver language" --result=OK --reason="detailed description of transfer"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s WorkloadManager -e DeQueued --queue="queue name" --local_jobid="new jobId assigned by the receiving component"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s WorkloadManager -e HelperCall --helper_name="name of the called component" --helper_params="parameters of the call" --src_role=CALLING`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s WorkloadManager -e Match --dest_id="CE$datestr$$"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s WorkloadManager -e HelperReturn --helper_name="name of the called component" --retval="returned data" --src_role=CALLING`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s WorkloadManager -e EnQueued --queue="destination queue" --job="job description in receiver language" --result=OK --reason="detailed description of transfer"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s JobController -e DeQueued --queue="queue name" --local_jobid="new jobId assigned by the receiving component"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s JobController -e Transfer --destination="LRMS" --dest_host="destination hostname" --dest_instance="destination instance" --job="job description in receiver language" --result=OK --reason="detailed description of transfer" --dest_jobid="destination internal jobid"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s LogMonitor -e Accepted --from="JobController" --from_host="sending component hostname" --from_instance="sending component instance" --local_jobid="new jobId (Condor Globus ...)"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s LogMonitor -e Transfer --destination="LRMS" --dest_host="destination hostname" --dest_instance="destination instance" --job="job description in receiver language" --result=OK --reason="detailed description of transfer" --dest_jobid="destination internal jobid"`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s LogMonitor -e Running --node="${CE_NODE:-worker node}"`
}
running_to_done () {
EDG_WL_SEQUENCE="UI=000003:NS=0000000004:WM=000010:BH=0000000000:JSS=000004:LM=000006:LRMS=000000:APP=000000:LBS=000000"
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s LogMonitor -e Done --status_code=OK --reason="reason for the change" --exit_code=0`
EDG_WL_SEQUENCE=`${LBLOGEVENT} -j $1 -c $EDG_WL_SEQUENCE -s LogMonitor -e Clear --reason=USER`
}
# read common definitions and functions
COMMON=lb-common.sh
NOOFJOBS=10
SEC_COVERAGE=600
if [ ! -r ${COMMON} ]; then
printf "Common definitions '${COMMON}' missing!"
exit 2
fi
source ${COMMON}
logfile=$$.tmp
flag=0
while test -n "$1"
do
case "$1" in
"-h" | "--help") showHelp && exit 2 ;;
"-o" | "--output") shift ; logfile=$1 flag=1 ;;
"-t" | "--text") setOutputASCII ;;
"-c" | "--color") setOutputColor ;;
"-x" | "--html") setOutputHTML ;;
"-n" | "--noofjobs") shift ; NOOFJOBS=$1 ;;
*) EVENTFILE=$1 ;;
esac
shift
done
# redirecting all output to $logfile
#touch $logfile
#if [ ! -w $logfile ]; then
# echo "Cannot write to output file $logfile"
# exit $TEST_ERROR
#fi
DEBUG=2
##
# Starting the test
#####################
{
test_start
# check_binaries
printf "Testing if all binaries are available"
check_binaries $GRIDPROXYINFO $SYS_GREP $SYS_SED $LBJOBREG $SYS_AWK $LBJOBSTATUS $SYS_DATE $LB_STATS $LB_FROMTO $SYS_BC
if [ $? -gt 0 ]; then
test_failed
else
test_done
fi
printf "Testing credentials"
check_credentials_and_generate_proxy
if [ $? != 0 ]; then
test_end
exit 2
fi
datestr=`$SYS_DATE +%Y%m%d%H%M`
SEQUENCE=`eval "echo {1..${NOOFJOBS}}"`
for i in $SEQUENCE
do
# Register job:
jobid[$i]=`${LBJOBREG} -m ${GLITE_WMS_QUERY_SERVER} -s application | ${SYS_GREP} "new jobid" | ${SYS_AWK} '{ print $3 }'`
if [ -z ${jobid[$i]} ]; then
test_failed
print_error "Failed to register job"
fi
done
printf "Test jobs registered."
test_done
printf "Sleeping for 10 seconds... "
sleep 10
printf "Sending events for all test jobs, Submitted => Running "
for i in $SEQUENCE
do
submitted_to_running ${jobid[$i]}
done
test_done
printf "Sleeping for 10 seconds... "
sleep 10
printf "Sending events for all test jobs, Running => Done "
for i in $SEQUENCE
do
running_to_done ${jobid[$i]}
done
test_done
printf "Sleeping for 10 seconds... "
sleep 10
# printf "Sending events for all test jobs, Running => Done"
# for i in $SEQUENCE
# do
# running_to_done ${jobid[$i]}
# done
# test_done
expected_rate=`echo "scale=7;$NOOFJOBS/$SEC_COVERAGE" | bc`
printf "Getting job rate (should be around $expected_rate, testing if > 0): "
#rate=`$LB_STATS -n $SEC_COVERAGE CE$datestr$$ 5 | ${SYS_GREP} "Average" | ${SYS_AWK} '{ print $6 }'`
rate=`$LB_STATS CE$datestr$$ 5 | ${SYS_GREP} "Average" | ${SYS_AWK} '{ print $6 }'`
cresult=`$SYS_ECHO "$rate > 0" | $SYS_BC`
printf "$rate"
if [ "$cresult" -eq "1" ]; then
test_done
else
test_failed
print_error "Rate other than expected"
fi
printf "Getting average 'Submitted' -> 'Running' transfer time (should be a number > 10): "
$LB_FROMTO CE$datestr$$ 1 5 > fromto.out.$$
average=`$SYS_CAT fromto.out.$$ | ${SYS_GREP} "Average duration" | ${SYS_AWK} '{ print $5 }'`
cresult=`$SYS_ECHO "$average > 10" | $SYS_BC`
printf "$average"
if [ "$cresult" -eq "1" ]; then
test_done
else
test_failed
print_error "Average value other than expected"
fi
printf "Getting the dispersion index (should be a number >= 0): "
dispersion=`$SYS_CAT fromto.out.$$ | ${SYS_GREP} "Dispersion index" | ${SYS_AWK} '{ print $3 }'`
cresult=`$SYS_ECHO "$dispersion >= 0" | $SYS_BC`
printf "$dispersion"
if [ "$cresult" -eq "1" ]; then
test_done
else
test_failed
print_error "Dispersion index value other than expected"
fi
$SYS_RM fromto.out.$$
printf "Getting average 'Submitted' -> 'Done/OK' transfer time (should be a number > 20): "
$LB_FROMTO CE$datestr$$ 1 6 0 > fromto.out.$$
doneaverage=`$SYS_CAT fromto.out.$$ | ${SYS_GREP} "Average duration" | ${SYS_AWK} '{ print $5 }'`
donecresult=`$SYS_ECHO "$doneaverage > 20" | $SYS_BC`
printf "$doneaverage"
if [ "$donecresult" -eq "1" ]; then
test_done
else
test_failed
print_error "Average value other than expected"
fi
printf "Comparing. 'Submitted' -> 'Running' should take longer than 'Submitted' -> 'Done/OK': "
donecresult=`$SYS_ECHO "$doneaverage > $average" | $SYS_BC`
if [ "$donecresult" -eq "1" ]; then
printf "OK"
test_done
else
test_failed
print_error "Done earlier than Running"
fi
printf "Long term (Regression into bug #73716): Getting average 'Submitted' -> 'Running' transfer times (should be numbers >= 0):"
$LB_FROMTO ALL 1 5 > fromto.out.$$
averages=( $($SYS_CAT fromto.out.$$ | ${SYS_GREP} "Average duration" | ${SYS_SED} 's/^.*": //' | ${SYS_SED} 's/ s.*$//') )
$SYS_CAT fromto.out.$$ | ${SYS_GREP} "Average duration" | $SYS_SED 's/":.*$//' | $SYS_SED 's/^.*"//' > fromto.out.ces.$$
dispersions=( $($SYS_CAT fromto.out.$$ | ${SYS_GREP} "Dispersion index" | ${SYS_AWK} '{ print $3 }') )
printf "\n"
let i=0
$SYS_CAT fromto.out.ces.$$ | while read ce; do
printf "$i.\t$ce:\t${averages[$i]}\t${dispersions[$i]}"
cresult=`$SYS_ECHO "${averages[$i]} >= 0" | $SYS_BC`
if [ "$cresult" -ne "1" ]; then
test_failed
print_error "Bad average value"
fi
# Also check dispersion
cresult=`$SYS_ECHO "${dispersions[$i]} >= 0" | $SYS_BC`
if [ "$cresult" -eq "1" ]; then
test_done
else
test_failed
print_error "Bad dispersion value"
fi
let i++
done
$SYS_RM fromto.out.$$
$SYS_RM fromto.out.ces.$$
printf "Long term: Getting average 'Running' rates (should be numbers >= 0):"
$LB_STATS -n 7200 ALL 5 > rates.out.$$
rates=( $(${SYS_GREP} "Average" rates.out.$$ | ${SYS_SED} 's/^.*": //' | ${SYS_SED} 's/ jobs.*$//') )
$SYS_CAT rates.out.$$ | ${SYS_GREP} "Average" | $SYS_SED 's/":.*$//' | $SYS_SED 's/^.*"//' > rates.out.ces.$$
printf "\n"
let i=0
$SYS_CAT rates.out.ces.$$ | while read ce; do
printf "$i.\t$ce:\t${rates[$i]}"
cresult=`$SYS_ECHO "${rates[$i]} >= 0" | $SYS_BC`
if [ "$cresult" -eq "1" ]; then
test_done
else
test_failed
print_error "Bad dispersion value"
fi
let i++
done
$SYS_RM rates.out.$$
$SYS_RM rates.out.ces.$$
#Purge test job
joblist=$$_jobs_to_purge.txt
for i in $SEQUENCE
do
echo ${jobid[$i]} >> ${joblist}
done
try_purge ${joblist}
test_end
}
#} &> $logfile
#if [ $flag -ne 1 ]; then
# cat $logfile
# $SYS_RM $logfile
#fi
exit $TEST_OK
| true
|
fc5a2a251c2a289c52360e440cb7f9c4c2a5866c
|
Shell
|
ram-ku/Weekly-Problems
|
/2013–2014/Week 06/Beginner/crooney/quine.sh
|
UTF-8
| 326
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
q="'"
r='"'
s="="
u='echo -e $p$n$n\\q$s$r$q$r$n\\r$s$q$r$q$n\\s$s$r$s$r\necho u$s$q$u$q\necho -e p$s$r$p$r$n\\n$s$r$m$r\necho m$s$q\\\\\\\\n$q\nprintf "$u"'
p="#!/bin/bash"
n="\n"
m='\\n'
echo -e $p$n$n\q$s$r$q$r$n\r$s$q$r$q$n\s$s$r$s$r
echo u$s$q$u$q
echo -e p$s$r$p$r$n\n$s$r$m$r
echo m$s$q\\\\n$q
printf "$u"
| true
|
3362fc3325a3cd0a821768ab30a8700e183eca7c
|
Shell
|
game-forest/Citrus
|
/Examples/Tests/hooks/pre-commit
|
UTF-8
| 226
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
IFS=$'\n' && for file in `git diff --cached --name-only | grep ".png\$"`
do
if [ -f "$file" ]; then
$(CITRUS_DIRECTORY)/Orange/Toolchain.Win/PngOptimizerCL.exe --KeepPixels "$file" && git add "$file"
fi
done
| true
|
0d6d47b122ef75b810dc7c9fc040359777694373
|
Shell
|
alishakiba/DPA
|
/run_ipynb2py_versioning.sh
|
UTF-8
| 370
| 3.390625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# create python scripts out of the jupyter notebooks
#
# How-to-run:
#
# cd DPA
# ./run_ipynb2py_versioning.sh
for file in *.ipynb; do
echo "$file";
NAME=`echo "${file##*/}" | cut -d'.' -f1`;
echo $NAME;
echo "jupytext --output "${NAME}".py" ${file};
eval $(echo -e "jupytext --output "${NAME}".py" ${file});
mv ${NAME}.py jupytext/;
done;
| true
|
737fa9b6e7eba8b5c3e505e3ea0978096ba3c5b9
|
Shell
|
yichao0319/iParking
|
/ml_weka/batch_eval_condor.sh
|
UTF-8
| 9,330
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
## CONDOR
func="batch_eval"
num_jobs=200
cnt=0
## DAG
rm tmp.${func}.dag*
echo "" > tmp.${func}.dag
TYPES=("norm.fix" "fix")
MONTHS=(201504 201505 201506 201507 201508 201509 201510 201511 201512 201601 201604 201605 201608 54-5 54-6 54-7 54-8 54-9)
DUPS=(200 0 100)
## 108 * 10%, 20%, 40%, 80%
FEATURES=(11 22 43 86)
RNG=100
# CLASSIFIERS=("NaiveBayes" "C45" "SVM")
CLASSIFIERS=("C45" "NaiveBayes")
# CLASSIFIERS=("NaiveBayes")
# CLASSIFIERS=("C45")
# CLASSIFIERS=("SVM")
# CLASSIFIERS=("LIBSVM")
SENSOR=""
VALID=""
if [[ ${SENSOR} != "" ]]; then
VALID=".valid"
fi
NT=${#TYPES[@]}
NM=${#MONTHS[@]}
ND=${#DUPS[@]}
NF=${#FEATURES[@]}
NC=${#CLASSIFIERS[@]}
for (( nci = 0; nci < ${NC}; nci++ )); do
CLASSIFIER=${CLASSIFIERS[${nci}]}
i=0 ## MONTHS
j=0 ## DUPS
t=0 ## TYPES
for (( i = 0; i < ${NM}; i++ )); do
FILENAME="weka_${SENSOR}${MONTHS[${i}]}.${TYPES[${t}]}"
echo "-----------------"
echo "FILE=${FILENAME}"
BAL=""
if [[ ${DUPS[${j}]} > 0 ]]; then
BAL=".bal${DUPS[${j}]}"
fi
echo " > all"
cnt=$((${cnt} + 1))
echo "bash eval.sh -C=\"${CLASSIFIER}\" -m=\"${MONTHS[${i}]}\" -t=\"${TYPES[${t}]}\" -d=${DUPS[${j}]} -v=\"${VALID}\" -M=\"${MONTHS[${i}]}\" -T=\"${TYPES[${t}]}\" -s=\"${SENSOR}\" -r=${RNG}" > tmp.job${cnt}.sh
sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
echo " > FCBF"
# bash eval.sh -C=\"${CLASSIFIER}\" -E="SymmetricalUncertAttributeSetEval" -S="FCBFSearch" -t="${FILENAME}${BAL}" -T="${FILENAME}" -r=${RNG} -N=30
cnt=$((${cnt} + 1))
echo "bash eval.sh -C=\"${CLASSIFIER}\" -E=\"SymmetricalUncertAttributeSetEval\" -S=\"FCBFSearch\" -m=\"${MONTHS[${i}]}\" -t=\"${TYPES[${t}]}\" -d=${DUPS[${j}]} -v=\"${VALID}\" -M=\"${MONTHS[${i}]}\" -T=\"${TYPES[${t}]}\" -s=\"${SENSOR}\" -r=${RNG} -N=30" > tmp.job${cnt}.sh
sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
echo " > Cfs, BesttFirst"
# bash eval.sh -C=\"${CLASSIFIER}\" -E="CfsSubsetEval" -S="BestFirst" -t="${FILENAME}${BAL}" -T="${FILENAME}" -r=${RNG} -N=30
cnt=$((${cnt} + 1))
echo "bash eval.sh -C=\"${CLASSIFIER}\" -E=\"CfsSubsetEval\" -S=\"BestFirst\" -m=\"${MONTHS[${i}]}\" -t=\"${TYPES[${t}]}\" -d=${DUPS[${j}]} -v=\"${VALID}\" -M=\"${MONTHS[${i}]}\" -T=\"${TYPES[${t}]}\" -s=\"${SENSOR}\" -r=${RNG} -N=30" > tmp.job${cnt}.sh
sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
for (( k = 0; k < ${NF}; k++ )); do
echo " > Ranker: ${FEATURES[${k}]}"
# bash eval.sh -C=\"${CLASSIFIER}\" -E="GainRatioAttributeEval" -S="Ranker" -t="${FILENAME}${BAL}" -T="${FILENAME}" -r=${RNG} -N=${FEATURES[${k}]}
cnt=$((${cnt} + 1))
echo "bash eval.sh -C=\"${CLASSIFIER}\" -E=\"GainRatioAttributeEval\" -S=\"Ranker\" -m=\"${MONTHS[${i}]}\" -t=\"${TYPES[${t}]}\" -d=${DUPS[${j}]} -v=\"${VALID}\" -M=\"${MONTHS[${i}]}\" -T=\"${TYPES[${t}]}\" -s=\"${SENSOR}\" -r=${RNG} -N=${FEATURES[${k}]}" > tmp.job${cnt}.sh
sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
done
done
###########
## Wrapper -- too slow..
# bash eval.sh -C=\"${CLASSIFIER}\" -E="WrapperSubsetEval" -S="BestFirst" -t="${FILENAME}${BAL}" -T="${FILENAME}" -r=${RNG} -N=30
###########
#######################################################
## Varying Duplications
echo "================================"
echo "Varying Duplications"
i=0 ## MONTHS
j=0 ## DUPS
t=0 ## TYPES
FILENAME="weka_${SENSOR}${MONTHS[${i}]}.${TYPES[${t}]}"
echo "FILE=${FILENAME}"
for (( j = 0; j < ${ND}; j++ )); do
BAL=""
if [[ ${DUPS[${j}]} > 0 ]]; then
BAL=".bal${DUPS[${j}]}"
fi
# bash eval.sh -C=\"${CLASSIFIER}\" -t="${FILENAME}${BAL}" -T="${FILENAME}" -r=${RNG}
cnt=$((${cnt} + 1))
echo "bash eval.sh -C=\"${CLASSIFIER}\" -m=\"${MONTHS[${i}]}\" -t=\"${TYPES[${t}]}\" -d=${DUPS[${j}]} -v=\"${VALID}\" -M=\"${MONTHS[${i}]}\" -T=\"${TYPES[${t}]}\" -s=\"${SENSOR}\" -r=${RNG}" > tmp.job${cnt}.sh
sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
done
#######################################################
## Varying Types
echo "================================"
echo "Varying Types"
i=0 ## MONTHS
j=0 ## DUPS
t=0 ## TYPES
for (( t = 0; t < ${NT}; t++ )); do
FILENAME="weka_${SENSOR}${MONTHS[${i}]}.${TYPES[${t}]}"
echo "FILE=${FILENAME}"
BAL=""
cnt=$((${cnt} + 1))
echo "bash eval.sh -C=\"${CLASSIFIER}\" -m=\"${MONTHS[${i}]}\" -t=\"${TYPES[${t}]}\" -d=0 -v=\"${VALID}\" -M=\"${MONTHS[${i}]}\" -T=\"${TYPES[${t}]}\" -s=\"${SENSOR}\" -r=${RNG}" > tmp.job${cnt}.sh
sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
BAL=".bal200"
cnt=$((${cnt} + 1))
echo "bash eval.sh -C=\"${CLASSIFIER}\" -m=\"${MONTHS[${i}]}\" -t=\"${TYPES[${t}]}\" -d=200 -v=\"${VALID}\" -M=\"${MONTHS[${i}]}\" -T=\"${TYPES[${t}]}\" -s=\"${SENSOR}\" -r=${RNG}" > tmp.job${cnt}.sh
sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
done
#######################################################
## Varying Time
echo "================================"
echo "Varying Time"
# TRAIN_MONTHS=(4 5 6 7 45 456 4567)
# TEST_MONTHS=(4 5 6 7)
TRAIN_MONTHS=(201504 201505 201506 201507 201508 201509 201510 201511 201512 201601 201604 201605 201608 54-5 54-6 54-7 54-8 54-9)
TEST_MONTHS=(201504 201505 201506 201507 201508 201509 201510 201511 201512 201601 201604 201605 201608)
TYPES2=("norm.fix")
NT1=${#TRAIN_MONTHS[@]}
NT2=${#TEST_MONTHS[@]}
NT3=${#TYPES2[@]}
j=0 ## DUPS
t=0 ## TYPES
# for (( m = 0; m < ${NT1}; m++ )); do
# for (( n = 0; n < ${NT2}; n++ )); do
# if [[ ${TRAIN_MONTHS[${m}]} == ${TEST_MONTHS[${n}]} ]]; then
# ## cross-validation: should have been done above
# continue
# fi
# FILENAME1="weka_${SENSOR}${TRAIN_MONTHS[${m}]}.${TYPES[${t}]}"
# FILENAME2="weka_${SENSOR}${TEST_MONTHS[${n}]}.${TYPES[${t}]}"
# BAL=""
# if [[ ${DUPS[${j}]} > 0 ]]; then
# BAL=".bal${DUPS[${j}]}"
# fi
# cnt=$((${cnt} + 1))
# # echo "bash eval.sh -C=\"${CLASSIFIER}\" -t=\"${FILENAME1}${BAL}\" -T=\"${FILENAME2}\" -r=${RNG}" > tmp.job${cnt}.sh
# echo "bash eval.sh -C=\"${CLASSIFIER}\" -m=\"${TRAIN_MONTHS[${m}]}\" -t=\"${TYPES[${t}]}\" -d=${DUPS[${j}]} -v=\"${VALID}\" -M=\"${TEST_MONTHS[${n}]}\" -T=\"${TYPES[${t}]}\" -s=\"${SENSOR}\" -r=${RNG}" > tmp.job${cnt}.sh
# sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
# echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
# done
# done
for (( t = 0; t < ${NT3}; t++ )); do
for (( m = 0; m < ${NT1}; m++ )); do
for (( n = 0; n < ${NT2}; n++ )); do
if [[ ${TRAIN_MONTHS[${m}]} == ${TEST_MONTHS[${n}]} ]]; then
if [[ ${TYPES2[${t}]} == ${TYPES[0]} ]]; then
## cross-validation: should have been done above
continue
fi
fi
FILENAME1="weka_${SENSOR}${TRAIN_MONTHS[${m}]}.${TYPES2[${t}]}"
FILENAME2="weka_${SENSOR}${TEST_MONTHS[${n}]}.${TYPES2[${t}]}"
BAL=""
if [[ ${DUPS[${j}]} > 0 ]]; then
BAL=".bal${DUPS[${j}]}"
fi
cnt=$((${cnt} + 1))
# echo "bash eval.sh -C=\"${CLASSIFIER}\" -t=\"${FILENAME1}${BAL}\" -T=\"${FILENAME2}\" -r=${RNG}" > tmp.job${cnt}.sh
echo "bash eval.sh -C=\"${CLASSIFIER}\" -m=\"${TRAIN_MONTHS[${m}]}\" -t=\"${TYPES2[${t}]}\" -d=${DUPS[${j}]} -v=\"${VALID}\" -M=\"${TEST_MONTHS[${n}]}\" -T=\"${TYPES2[${t}]}\" -s=\"${SENSOR}\" -r=${RNG}" > tmp.job${cnt}.sh
sed "s/CODENAME/tmp.job${cnt}.sh/g; s/JOBNAME/job${cnt}/g" condor.${func}.mother.condor > tmp.job${cnt}.condor
echo JOB J${cnt} tmp.job${cnt}.condor >> tmp.${func}.dag
done
done
done
done
echo $cnt / $num_jobs
condor_submit_dag -maxjobs ${num_jobs} tmp.${func}.dag
# condor_submit_dag tmp.${func}.dag
| true
|
ff23f29865097715f1281554fbc32e4f21dcfd87
|
Shell
|
davidjsanders/studySim
|
/OLD/v3_00/sim1/setup.sh
|
UTF-8
| 2,157
| 3.34375
| 3
|
[] |
no_license
|
#
# Simulation Setup
#
if [ "X"$simpath == "X" ]; then
echo "ERROR: simpath is not defined!"
echo ""
echo "Before running this script, ensure that simpath is defined:"
echo ""
echo " export simpath=/path/to/studySim"
echo
exit 1
fi
stage_path="v3_00"
#set -e
source $simpath/includes/check_params.sh
source $simpath/includes/_do_first.sh
#
# Simulation 1 Configuration
#
sim_heading="Simulation set 1 (with obfuscation) setup"
clear
set +e
start_message "${sim_heading}"
set -e
# Logger
run_docker "v3_00" $loggerPort "logger" "Logger" "${save_param}"
sleep 2
do_delete '{'$genKey'}' $loggerPort '/'$version'/log' "Clear logs."
sleep 1
# Bluetooth
run_docker "v3_00" $bluePort "bluetooth" "Bluetooth" "${save_param}"
# Location Service
run_docker "v3_00" $locPort "location_service" "Location_Service" "${save_param}"
# Monitor App
run_docker "v3_00" $monitorPort "monitor_app" "Monitor_App" "${save_param}"
# Notification Service
run_docker "v3_00" $notesvcPort "notification" "Notification_Service" "${save_param}"
# Start the phone
run_docker_phone "v3_00" "${save_param}"
echo ""
echo -n "Pausing to let services complete start-up: "
sleep 2
echo "done."
echo ""
echo "${underline}Configure logging.${normal}"
echo ""
config_logging $bluePort "Bluetooth" # Bluetooth
config_logging $locPort "Location Service" # Location Service
config_logging $monitorPort "Monitor App" # Monitor App
config_logging $notesvcPort "Notification Service" # Notification Service
config_logging $phonePort "Phone" # Phone
echo ""
echo "Logging configured."
echo ""
echo ${underline}"Summary of services"${normal}
echo "Central logger: "$serverIPName":"$loggerPort"/"$presentAs
echo "Notification service: "$serverIPName":"$notesvcPort"/"$presentAs
echo "Bluetooth service: "$serverIPName":"$bluePort"/"$presentAs
echo "Location service: "$serverIPName":"$locPort"/"$presentAs
echo "Monitor App service: "$serverIPName":"$monitorPort"/"$presentAs
echo "Phone: "$serverIPName":"$phonePort"/"$presentAs
echo
set +e
stop_message "${sim_heading}"
| true
|
953017c1f3101fb2cea6ae90e968e22edb44a596
|
Shell
|
modell-aachen/qwiki-perl-dependency-builder
|
/distros/debian/entrypoint.sh
|
UTF-8
| 457
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$2" ]; then
echo "Installing latest"
fqn=$(cpanm --info $1)
else
echo "Installing Version $2"
fqn=$(cpanm --info $1@$2)
fi
tarball=$(echo $fqn | sed 's/.*\///')
package_folder=$(echo $tarball | sed 's/\(.*\)\.tar\.gz/\1/')
rm -rf /opt/build/*
cpanm $fqn
find / -name $tarball | xargs cp -t /opt/build/
cd /opt/build/ && tar -pzxf $tarball
export DEB_BUILD_OPTIONS=nocheck
dh-make-perl --build /opt/build/$package_folder
| true
|
09923901946d54d70b9589d6d30c88db4f2fe41e
|
Shell
|
xakon/haskell-docs-cli
|
/test/cram/run.sh
|
UTF-8
| 357
| 2.515625
| 3
|
[] |
no_license
|
#! /bin/bash
# Command to run cram tests
# $TESTDIR is a variable made available by cram that contains the
# path to the directory where the test file lives.
# This means that all files must be in the same directory as this runner.
cd $TESTDIR/../..
$(stack path --dist-dir)/build/hdc/hdc \
--data-dir $TESTDIR/cram-data \
--cache unlimited\
$@
| true
|
9e390aa55facac8b10f17301f387697fc6fa6bfb
|
Shell
|
kanghuanyao/yao_env
|
/yao_bin/bin/yao_rm
|
UTF-8
| 239
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
function get_filename
{
echo "$HOME/rm/$(date +%s_%N)_$RANDOM"
}
while [ "$1" != "" ] ; do
RDN_FILE=$(get_filename)
while [ ! -f $RND_FILE ] ; do
RDN_FILE=$(get_filename)
done
test -e $1 && mv $1 $RDN_FILE
shift
done
| true
|
7bd0b08444d3d5e0fb5336a6012e46aa47b9f295
|
Shell
|
shieldproject/shield-addon-postgres-boshrelease
|
/packages/shield-addon-postgres-9.2/packaging
|
UTF-8
| 214
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -eux
CPUS=$(grep -c ^processor /proc/cpuinfo)
tar -xjf postgres/postgresql-9.2.24.tar.bz2
cd postgresql-9.2.24
./configure --prefix=${BOSH_INSTALL_TARGET} --with-openssl
make -j$CPUS && make install
| true
|
6155ad2584432cfc0136e2d4f14ce0d36d200b39
|
Shell
|
hufman/ShantaePirateExtractions
|
/doit.sh
|
UTF-8
| 2,442
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
BASE=`pwd`
[ -e quickbms.zip ] || wget http://aluigi.altervista.org/papers/quickbms.zip
[ -e quickbms.exe ] || unzip quickbms.zip
[ -x quickbms.exe ] || chmod +x quickbms.exe
[ -e MSFHDEx.zip ] || wget https://dl.dropboxusercontent.com/u/31816885/programs/MSFHDEx.zip
[ -e animEx.exe ] || unzip MSFHDEx.zip
[ -x animEx.exe ] || chmod +x animEx.exe
[ -x imageEx.exe ] || chmod +x imageEx.exe
[ -x volEx.exe ] || chmod +x volEx.exe
# remember where the utils are
animEx=`pwd`/animEx.exe
imageEx=`pwd`/imageEx.exe
volEx=`pwd`/imageEx.exe
# extraction functions
extractVol() {
# assert(pwd==$BASE)
vol="$1"
extractedvol=`echo "$vol" | sed 's|extracted/|extractedvol/|' | sed -E 's|/([A-Z]+)/([^/]+)/|/\1/\2-|' | sed 's|\.[^/.]*$||'`
[ -d "$extractedvol" ] && return
mkdir -p "$extractedvol"
./quickbms.exe wayforward.bms "$vol" "$extractedvol" 2>/dev/null
}
extractAnim() {
# assert(pwd==$BASE)
anim="$1"
name=`basename "$anim"`
tmp=/tmp/"$name".$$
output=`echo "$anim" | sed 's|extractedvol/|anim/|' | sed 's|\.[^/.]*$||'`
[ -d "$output" ] && return
mkdir "$tmp"
cp "$anim" "$tmp"
pushd "$tmp" >/dev/null
"$animEx" "$name" 2>/dev/null
popd >/dev/null
mkdir -p "$output"
mv "$tmp"/output/*/* "$output"
rm -r "$tmp"
}
extractImage() {
# assert(pwd==$BASE)
image="$1"
name=`basename "$image"`
tmp=/tmp/"$name".$$
output=`echo "$image" | sed 's|extractedvol/|image/|' | sed 's|\.[^/.]*$||'`
[ -d "$output" ] && return
mkdir "$tmp"
cp "$image" "$tmp"
pushd "$tmp" >/dev/null
"$imageEx" "$tmp/$name" 2>/dev/null
popd >/dev/null
mkdir -p "$output"
mv "$tmp"/output/* "$output"
rm -r "$tmp"
}
dir2gif() {
[ -e "$1".gif ] ||
convert -scale 400% -dispose Background -loop 0 "$1"/*png "$1".gif
}
# begin extracting files
[ -e extracted ] || mkdir extracted
[ -e extracted/ANIM ] || ./quickbms.exe wayforward.bms ShantaeCurse.data extracted 2>/dev/null
[ -e extractedvol ] || mkdir extractedvol
[ -e anim ] || mkdir anim
[ -e image ] || mkdir image
for vol in extracted/*vol extracted/ANIM/*vol extracted/ANIM/*/*vol; do
extractVol "$vol"
done
find extractedvol -name '*.anim' | while read name; do
extractAnim "$name"
done
find extractedvol -name '*.image' | while read name; do
extractImage "$name"
done
(find anim -type d; echo) | awk 'index($0,prev"/")!=1 && NR!=1 {print prev}
1 {sub(/\/$/,""); prev=$0}' | while read anim; do
dir2gif "$anim"
done
| true
|
03c69601661595f37b20ed6a03171b469cf4f1dd
|
Shell
|
dmvelasco/test-scripts
|
/GATK_1_prep.sh
|
UTF-8
| 2,098
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash -l
#SBATCH -D /home/dmvelasc/Projects/Prunus/Data/BAM/
#SBATCH -o /home/dmvelasc/Projects/Prunus/slurm-log/%j-stdout-GATK1prep.txt
#SBATCH -e /home/dmvelasc/Projects/Prunus/slurm-log/%j-stderr-GATK1prep.txt
#SBATCH -p serial
#SBATCH -J GATK1
#SBATCH -n 1
#SBATCH -c 2
#SBATCH -t 4-00:00
#SBATCH --mail-user=dmvelasco@ucdavis.edu
#SBATCH --mail-type=ALL
#SBATCH --mem=3000M
set -e
set -u
module load samtools/1.3.1
module load bamtools
module load java/1.8
# load GATK dependencies
module load R/3.3.1
module load maven/3.2.3
#module load GATK/3.6
#######################################################################################
### picard verion: 2.9.0 ###
### GATK version: 3.7 ###
#######################################################################################
echo "########## set up directories ##########";
date
###############################
### Set up directories ###
###############################
bwa="/home/dmvelasc/bin/bwa"
echo "########## set up parameters ##########";
date
###############################
### Set up the parameters ###
###############################
# location of picard.jar
picard="/home/dmvelasc/Software/picard/picard.jar"
# location of GenomeAnalysisTK.jar
GATK="/home/dmvelasc/Software/GATK/GenomeAnalysisTK.jar"
# genome reference file location
genome="/home/dmvelasc/Data/references/persica-SCF/Prunus_persica_v1.0_scaffolds.fa"
###############################
### Step 1: Index reference ###
###############################
# Step1: BWA index for reference genome
echo "########## prepare the BWA index and picard/GATK dictionary files ##########";
date
echo "indexing reference genome with BWA";
date
#"$bwa" index -a bwtsw "$genome" #check that options are correct, however, already done so skip step
echo "creating picard/GATK dictionary";
date
java -Xmx3g -jar "$picard" CreateSequenceDictionary \
R="$genome" \
O=/home/dmvelasc/Data/references/persica-SCF/Prunus_persica_v1.0_scaffolds.dict
| true
|
80b3fff893fdd5ec923013dbc21a063fd687048d
|
Shell
|
coldblaze/docker-x11-korean
|
/x11-korean.sh
|
UTF-8
| 1,134
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##### CONFIGS #####
HOST_VOLUME="/Users/coldblaze/docker_works"
CONTAINER_MNT="/srv"
NAME="x11-korean"
HOST_NAME="x11-korean"
NETWORK_CARD="en1"
##### END OF CONFIGS #####
_IMAGE="coldblaze/x11-korean"
_IMAGE_TAG="18.04.1"
case $1 in
pull)
docker pull $_IMAGE:$_IMAGE_TAG
;;
run)
echo ${0##$}" run"
# For macOS with socat
case "$OSTYPE" in
darwin*)
_MY_IP=`ipconfig getifaddr $NETWORK_CARD`
docker run \
--interactive --tty \
--rm \
--hostname $HOST_NAME --name $NAME \
--volume $HOST_VOLUME:$CONTAINER_MNT:rw \
--env DISPLAY=$_MY_IP:0 \
$_IMAGE:$_IMAGE_TAG
;;
linux*)
xhost +local:
docker run \
--interactive --tty \
--rm \
--hostname $HOST_NAME --name $NAME \
--volume $HOST_VOLUME:$CONTAINER_MNT:rw \
--volume /tmp/.X11-unix:/tmp/.X11-unix:rw \
--env DISPLAY=unix$DISPLAY \
$_IMAGE:$_IMAGE_TAG
;;
esac
;;
stop)
echo ${0##$}" stop"
docker stop $NAME
docker rm $NAME
;;
*)
echo "Usage: ./"${0##*/}" [run|stop]"
;;
esac
| true
|
25b6dd54016d37022578622017d085b32f762a9b
|
Shell
|
defaziogiancarlo/scripts
|
/add_hooks
|
UTF-8
| 901
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# add the whamcloud hooks to .git hooks for lustre
# commit-msg
# prepare-commit-msg
# if they are already in the correct directory but suffixed
# with deactivation suffix ($SUFFIX) remove the suffix so they become active
# if they aren't in the hooks sirectory at all, get them from
# the lustre source
SUFFIX=deactivated
LUSTRE=~/lustre-release
HOOKS_DIR=$LUSTRE/.git/hooks
CONTRIB_HOOKS_DIR=$LUSTRE/contrib/git-hooks
add_hook_file() {
# check if hook file exists, if so do nothing
if [ -e "$HOOKS_DIR/$1" ]
then
echo "$1 already exists in $HOOKS_DIR"
# if $SUFFIX activate it
elif [ -e "$HOOKS_DIR/$1.$SUFFIX" ]
then
mv "$HOOKS_DIR/$1.$SUFFIX" "$HOOKS_DIR/$1"
# if non-existent, copy it from the lustre source
else
cp "$CONTRIB_HOOKS_DIR/$1" "$HOOKS_DIR/$1"
fi
}
#
# main
#
add_hook_file "commit-msg"
add_hook_file "prepare-commit-msg"
| true
|
b2e0d299635a8c102bd6e4eff6a3eae98347e16f
|
Shell
|
yohei-yamazaki/dotfiles
|
/setup.sh
|
UTF-8
| 451
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
DOT_FILES=(.bashrc .bash_profile .zshrc)
VS_CODE_FILES=(settings.json keybindings.json)
NVIM_FILES=(init.vim dein.toml dein_lazy.toml)
for file in ${DOT_FILES[@]}
do
ln -s $HOME/dotfiles/$file $HOME/$file
done
for file in ${VS_CODE_FILES[@]}
do
ln -s $HOME/dotfiles/vscode/$file $HOME/Library/Application\ Support/Code/User/$file
done
for file in ${NVIM_FILES[@]}
do
ln -s $HOME/dotfiles/nvim/$file $HOME/.config/nvim/$file
done
| true
|
392b57ad3fd35db91242839df00657ec62651539
|
Shell
|
kcolford/kernel-update-hook
|
/kernel-update-hook
|
UTF-8
| 197
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
mods=/var/lib/kernel-update-hook/modules
if [ ! -e $mods ]; then
mkdir -p "$(dirname $mods)"
touch $mods
fi
for i in $(cat $mods); do
modprobe "$i"
done
| true
|
aed3b7b97876338f4e2c22c3e122370eb611c07f
|
Shell
|
maxme/mbhttpd
|
/tests/full_fs_test.sh
|
UTF-8
| 142
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
PORT=6543
cd ..
./mbhttpd -p $PORT &
cd -
find $(pwd) -exec wget -O - http://localhost:$PORT{} > /dev/null 2>&1 \;
killall mbhttpd
| true
|
6a6c68142adad1081d8c2392097394e892785a15
|
Shell
|
laughingMan/dotfiles
|
/bin/dot
|
UTF-8
| 296
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# dot
#
# `dot` handles installation, updates, things like that. Run it periodically
# to make sure you're on the latest and greatest.
export DOT=$HOME/.dotfiles
# Set OS X defaults
$DOT/osx/set-osx-defaults.sh
# Update homebrew and install packages
$DOT/homebrew/install.sh 2>&1
| true
|
0f7d3d22a630935f3d9c6c804050fbf7a9298ab2
|
Shell
|
bingxuanying/DuFu
|
/bin/dufu-server-start.sh
|
UTF-8
| 350
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
SOURCEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DUFUMAINDIR="$( dirname "${SOURCEDIR}" )"
MAINFILEDIR="${DUFUMAINDIR}/core/main/BrokerServer.py"
# Check if server file exists
if [ ! -f "${MAINFILEDIR}" ]; then
echo "[ERROR] BrokerServer.py file doesn't exist" >&2
exit 1
fi
python3 "${MAINFILEDIR}" --show
exit 0
| true
|
a0c48e92c233a7ee05a671066ad8313ca7cf028b
|
Shell
|
cloneko/demo
|
/.zshrc
|
UTF-8
| 1,056
| 2.546875
| 3
|
[] |
no_license
|
#
# .zshrc is sourced in interactive shells.
# It should contain commands to set up aliases,
# functions, options, key bindings, etc.
#
autoload -U compinit
compinit
#allow tab completion in the middle of a word
setopt COMPLETE_IN_WORD
## keep background processes at full speed
#setopt NOBGNICE
## restart running processes on exit
#setopt HUP
## history
setopt APPEND_HISTORY
## for sharing history between zsh processes
setopt INC_APPEND_HISTORY
setopt SHARE_HISTORY
## never ever beep ever
setopt NO_BEEP
## automatically decide when to page a list of completions
#LISTMAX=0
## disable mail checking
#MAILCHECK=0
# autoload -U colors
#colors
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/yonashiro/lib
export PATH=$PATH:/home/yonashiro/bin
export PERL5LIB=/home/yonashiro/lib/perl5/5.8.8/i386-linux-thread-multi/
alias ls='ls -F'
alias la='ls -a'
alias ll='ls -l'
#PROMPT='[%n@%m]%~%# ' # default prompt
PROMPT='[%n@%m %~]%# '
EDITOR=vi
alias aptproxy='export http_proxy=http://172.16.40.1:8888'
alias aptproxyremove='export http_proxy='
| true
|
89dd86f3f2cc22689a2657e902fa7953cab7ade8
|
Shell
|
HSLdevcom/digitransit-tools
|
/avgresponse.sh
|
UTF-8
| 407
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#compute average time of responses of a proxy log for desired api path
#an example: ./avgresponse.sh digitransit-proxy-6d856c4548-d8f8m geocoding/v1/search
kubectl logs $1 > log.txt
tail -500 log.txt > logtail.txt
cat logtail.txt | grep $2 | grep -oE '[^ ]+$' > log2.txt
SUM=$(paste -sd+ log2.txt | bc)
COUNT=$(cat log2.txt | wc -l)
rm log.txt logtail.txt log2.txt
echo "$SUM/$COUNT" | bc -l
| true
|
bd5c2fc1f8bcca25c77fa4a1e35cc2980efe1bb0
|
Shell
|
dougwyu/ArcDyn_2
|
/4_minimap2_samtools/launch_minimap2_samtools__PlatesAB.sh
|
UTF-8
| 5,980
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set -u
set -o pipefail
#######################################################################################
#######################################################################################
# a shell script to launch bsub files
#######################################################################################
#######################################################################################
# upload (scp) the new minimap and samtools sh and bsub files to ArcDyn/PlatesAB
# run in macOS, not hpc
# scp /Users/Negorashi2011/Dropbox/Working_docs/Roslin_Greenland/2017/bulk_samples/ArcDyn_scripts/4_minimap2_samtools/_loop_minimap2_only_20190115.sh b042@hpc.uea.ac.uk:~/ArcDyn/PlatesAB/PlatesAB_combined/
# scp /Users/Negorashi2011/Dropbox/Working_docs/Roslin_Greenland/2017/bulk_samples/ArcDyn_scripts/4_minimap2_samtools/_loop_minimap2_only_20190115.bsub b042@hpc.uea.ac.uk:~/ArcDyn/PlatesAB/PlatesAB_combined/
#
# scp /Users/Negorashi2011/Dropbox/Working_docs/Roslin_Greenland/2017/bulk_samples/ArcDyn_scripts/4_minimap2_samtools/_loop_samtools_only_20190115.sh b042@hpc.uea.ac.uk:~/ArcDyn/PlatesAB/PlatesAB_combined/
# scp /Users/Negorashi2011/Dropbox/Working_docs/Roslin_Greenland/2017/bulk_samples/ArcDyn_scripts/4_minimap2_samtools/_loop_samtools_only_20190115.bsub b042@hpc.uea.ac.uk:~/ArcDyn/PlatesAB/PlatesAB_combined/
# ssh hpc
# interactive
# to use parallel without a pathname in bsub scripts
PATH=$PATH:~/scripts/parallel-20170722/bin/
############## by hand, copy 1/10 the sample folders into each BWA folder
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/; ls
# mkdir BWA{01,02,03,04,05,06,07,08,09,10}; ls # BWA is prefix because this was the original mapping software
# there are 192 sample folders: hand move 19 into each BWA folder (easier than writing a script)
############# copy the minimap and samtools shell and bsub scripts into each BWA folder and edit the jobIDs
MINIMAP2_BSUB="_loop_minimap2_only_20190115.bsub"; echo ${MINIMAP2_BSUB}
MINIMAP2_SH="_loop_minimap2_only_20190115.sh"; echo ${MINIMAP2_SH}
SAMTOOLS_BSUB="_loop_samtools_only_20190115.bsub"; echo ${SAMTOOLS_BSUB}
SAMTOOLS_SH="_loop_samtools_only_20190115.sh"; echo ${SAMTOOLS_SH}
cd ~/ArcDyn/PlatesAB/PlatesAB_combined
ls
parallel cp ${MINIMAP2_BSUB} BWA{} ::: 01 02 03 04 05 06 07 08 09 10
parallel cp ${MINIMAP2_SH} BWA{} ::: 01 02 03 04 05 06 07 08 09 10
parallel cp ${SAMTOOLS_BSUB} BWA{} ::: 01 02 03 04 05 06 07 08 09 10
parallel cp ${SAMTOOLS_SH} BWA{} ::: 01 02 03 04 05 06 07 08 09 10
ls BWA{01,02,03,04,05,06,07,08,09,10}
#### only run this if i think that i'm not downloading the latest minimap2/bwa files
# # remove previous bwa_output and minimap2_output files
# parallel "rm -rf BWA{}/bwa_outputs/" ::: 01 02 03 04 05 06 07 08 09 10
# parallel "rm -rf BWA{}/minimap2_outputs/" ::: 01 02 03 04 05 06 07 08 09 10
# edit the bsub files so that the correct jobID will show up (i suppose i could have instead run a job array...)
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/
parallel "sed 's/mnmploop01/mnmpAB{}/g' BWA{}/${MINIMAP2_BSUB} > BWA{}/${MINIMAP2_BSUB}_tmp" ::: 01 02 03 04 05 06 07 08 09 10
parallel "mv BWA{}/${MINIMAP2_BSUB}_tmp BWA{}/${MINIMAP2_BSUB}" ::: 01 02 03 04 05 06 07 08 09 10
head -n 7 BWA{01,02,03,04,05,06,07,08,09,10}/${MINIMAP2_BSUB} # check. should be mnmpAB01
# check if i'm using mellanox-ib or short-eth
tail -n 4 BWA{01,02,03,04,05,06,07,08,09,10}/${MINIMAP2_BSUB} # check for correct shell file version
parallel "sed 's/samtools01/samtlsAB{}/g' BWA{}/${SAMTOOLS_BSUB} > BWA{}/${SAMTOOLS_BSUB}_tmp" ::: 01 02 03 04 05 06 07 08 09 10
parallel "mv BWA{}/${SAMTOOLS_BSUB}_tmp BWA{}/${SAMTOOLS_BSUB}" ::: 01 02 03 04 05 06 07 08 09 10
head -n 7 BWA{01,02,03,04,05,06,07,08,09,10}/${SAMTOOLS_BSUB} # check. should have the correct index number
# check if i'm using mellanox-ib or short-eth
tail -n 1 BWA{01,02,03,04,05,06,07,08,09,10}/${SAMTOOLS_BSUB} # check. should have the correct samtools shell filename
ls # BWA* folders should now sort to bottom
####### launch minimap2 scripts #######
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA01; ls
bsub < ${MINIMAP2_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA02; ls
bsub < ${MINIMAP2_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA03; ls
bsub < ${MINIMAP2_BSUB}
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA04; ls
bsub < ${MINIMAP2_BSUB}
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA05; ls
bsub < ${MINIMAP2_BSUB}
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA06; ls
bsub < ${MINIMAP2_BSUB}
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA07; ls
bsub < ${MINIMAP2_BSUB}
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA08; ls
bsub < ${MINIMAP2_BSUB}
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA09; ls
bsub < ${MINIMAP2_BSUB}
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA10; ls
bsub < ${MINIMAP2_BSUB}
bjobs
ls ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA01/minimap2_outputs # check
###### WAIT FOR THE MINIMAP2 JOBS TO FINISH BEFORE LAUNCHING THE SAMTOOLS SCRIPTS
############# launch samtools scripts #############y
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA01; ls
echo "${SAMTOOLS_BSUB}"
bsub < ${SAMTOOLS_BSUB}
bjobs
ls minimap2_outputs # should start to see the new bam files
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA02; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA03; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA04; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA05; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA06; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA07; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA08; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA09; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA10; ls
bsub < ${SAMTOOLS_BSUB}
bjobs
cd ~/ArcDyn/PlatesAB/PlatesAB_combined/BWA10; ls
bjobs
ls minimap2_outputs # should show new bam files
| true
|
e1cfd3b7ae3116d4eba22a13097ea08ec1eebbbd
|
Shell
|
saraswat2385/admin
|
/monitor/monit-restart.sh
|
UTF-8
| 291
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games
service monit status | grep not
if [ $? -eq 0 ]; then
#service monit restart
service monit restart
echo "Monit restarted on server $(hostname)" | mail -s "Monit restarted on $(hostname)" sys@rtcamp.com
fi
| true
|
7df676fc12b6d1b45aba645d0edd733b155c816d
|
Shell
|
UJo1vHQvgs/secutils
|
/dec
|
UTF-8
| 1,243
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
unameThis=`uname -s`
function hash5 {
if [ "Darwin" = "${unameThis}" ] ; then
echo -n $1 | md5
else
echo -n $1 | md5sum | cut -c 1-32
fi
}
function hash256 {
if [ "Darwin" = "${unameThis}" ] ; then
echo -n $1 | shasum -a 256 | cut -c 1-64
else
echo -n $1 | sha256sum | cut -c 1-64
fi
}
function b64 {
if [ "Darwin" = "${unameThis}" ] ; then
echo -n $1 | base64
else
echo -n $1 | base64 -w 0
fi
}
echo -n 'Subject: '
read SUBJECT
if [ "" = "${ENC_PASSWD}" ] ; then
echo -n 'Password: '
read -s PASSWD
else
echo Using env variable ENC_PASSWD
PASSWD=${ENC_PASSWD}
fi
KEY1=`hash256 ${SUBJECT}${PASSWD}`
SUBJ_HASH=`hash5 ${KEY1}`
AES_KEY=`hash256 ${SUBJ_HASH}`
echo
echo AES_KEY = ${AES_KEY}
AES_KEY=`b64 ${AES_KEY}`
echo
echo SUBJ_HASH = ${SUBJ_HASH}
echo AES_KEY = ${AES_KEY}
echo
if [ "" = "${SEC_DAT_DIR}" ] ; then
SEC_DAT_DIR=.
fi
ENC_FILE=${SEC_DAT_DIR}/${SUBJ_HASH}.dat
if [ ! -f ${ENC_FILE} ] ; then
echo
echo Cannot open file ${ENC_FILE}
exit 1
fi
echo
echo '----------------------------------------------------------'
openssl aes-256-cbc -d -a -md md5 -in ${ENC_FILE} -k ${AES_KEY}
echo
echo '----------------------------------------------------------'
| true
|
6cc06198e3c972e1d4aa8aa1e19e66e19555064b
|
Shell
|
gitGNU/gnu_rtty
|
/agelogs.sh
|
UTF-8
| 1,040
| 2.84375
| 3
|
[
"ISC"
] |
permissive
|
#! /bin/sh
# $Id: agelogs.sh,v 1.4 2001-03-24 21:17:39 vixie Exp $
# Copyright (c) 1996 by Internet Software Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
# ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
# CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
agelog=DESTPATH/bin/agelog
cd DESTPATH/dev
for tty in *
do
$agelog -m DESTPATH/log/$tty \
-p `cat DESTPATH/pid/$tty` \
7 \
DESTPATH/log/.aged
done
exit
| true
|
cc93963c5f925860ee8df15f17390e15b78f9d4f
|
Shell
|
anoyo-lin/aws_related
|
/chg_type/mongo_status.sh
|
UTF-8
| 1,181
| 3.40625
| 3
|
[] |
no_license
|
#/bin/bash
set -x
: 'it need to define the ssh config at ~/.ssh/config
it can start/stop mongodb from local ==> bastion ==> target
example of ssh config file:
Host dev
StrictHostKeyChecking no
ForwardAgent yes
Hostname 54.236.225.95
User ubuntu
ProxyCommand /bin/connect-proxy -H proxy.redacted.net:8080 %h %p
IdentityFile /home/88881782/.ssh/my_key
'
function mongo_kill(){
# if [[ "$2" == 'dev' ]]
# then
# ssh_acc='ubuntu'
# else
# ssh_acc='gene'
# fi
if [[ $( aws ec2 describe-instances --instance-ids=$1 --profile $2 2>&1 | grep -c '.*error.*' ) == 1 ]] || [[ $2 == '' ]]
then
echo 'no matched instance id found, quit!'
echo 'Usage: ./mongo_kill.sh i-7061789c dev'
exit -1
else
IP=$(aws ec2 describe-instances --instance-ids=$1 --profile $2 --query 'Reservations[0].Instances[0].PrivateIpAddress'|tr -d '"')
fi
ssh $2 << EOF
sudo su - ubuntu
ssh ubuntu@${IP} /bin/bash << 'END_STOP'
if [[ \$(ps -ef|grep mongo|grep -v grep) == '' ]]
then
/etc/init.d/mongodb start
sleep 5
/opt/app/mongodb/bin/mongo --quiet --eval 'rs.status()'
else
/opt/app/mongodb/bin/mongo --quiet --eval 'rs.status()'
fi
END_STOP
EOF
}
mongo_kill $1 $2
| true
|
1412113bc6063f96ccc7619343667bf8ebca40bb
|
Shell
|
seanbuchholz/dotfiles
|
/zsh/functions/ff.zsh
|
UTF-8
| 130
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/zsh
# find file with pattern in name
function ff() { find . ! -readable -prune -o -type f -iname '*'"$*"'*' -ls -print; }
| true
|
2f0b3a95edced76e5013c0391d3760a5da3cf523
|
Shell
|
Wei-N-Ning/gdbPit
|
/inspection/prettyprint/graph_printer_gdb.sh
|
UTF-8
| 1,890
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# source:
# http://tromey.com/blog/?p=524
# https://www.rethinkdb.com/blog/make-debugging-easier-with-custom-pretty-printers/
# 1) the source code shows a mock-up version of a graph-based computing
# network that has a flaw causing segfault (documented in source);
# the developer wants to debug this flaw by stopping the program at
# each call to the compute() method of the Node object; he then
# may take advantage of the pretty-print feature implemented in
# python to inspect the data and track down the offending code;
# 2) this script builds the program and automates the loading and running
# of SUT in gdb.
# 3) note GDB must "see" the python script therefore PYTHONPATH is
# modify to enable that; in an environment with a package/shell management
# system this can be configured in the package or payload
# 4) the GDB command can also be written in a script file that is set up
# automatically by the environment management system.
CC=${CC-gcc}
CXX=${CXX-g++}
DBG=${DBG-gdb}
set -e
TEMPDIR=/tmp/sut
tearDown() {
rm -rf ${TEMPDIR} /tmp/_ /tmp/_.* /tmp/__*
}
setUp() {
tearDown
mkdir -p ${TEMPDIR}
}
# $1: additional cxxflags
sutbin=
buildSUT() {
${CXX} -std=c++14 ${1} -o ${TEMPDIR}/_ ./graph.cpp
sutbin=${TEMPDIR}/_
}
modulePath=$( realpath ./graph_printer.py )
modulePath=$( dirname ${modulePath} )
moduleName=simple_printer
export PYTHONPATH=${modulePath}:${PYTHONPATH}
# to show the raw data structure contents:
# print /r g
# note that core::graph::Graph (g) is printed by the new printer
# ::Graph (phony) is still printed by the default printer
debugSUT() {
cat >${TEMPDIR}/commands.gdb <<"EOF"
py import graph_printer_gdb
py graph_printer_gdb.init()
start
n 3
print g
print phony
quit
EOF
${DBG} -batch -command=${TEMPDIR}/commands.gdb ${sutbin}
}
setUp
buildSUT "-g"
debugSUT
tearDown
| true
|
9620d12256ae2ed641b936eca22d06c5b67d6d9e
|
Shell
|
marekdynowski/bwunicluster_bsps
|
/01_basic_job_script_stdout/01_basic_job_script_stdout.moab
|
UTF-8
| 692
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# 01_basic_job_script_stdout.pbs
#
#
# Created by Marek Dynowski on 15/12/14.
# MOAB job script for submitting a simple job to the batch system
# of the bwUniCluster. This job writes out the hostname of the
# worker node on which it is executed
### MOAB directives
## Serial job 1 core on one node is used
#MSUB -l nodes=1:ppn=1
## Submitting into class/queue singlenode
#MSUB -q singlenode
## The maximum time the jobs takes is restricted to two minutes
#MSUB -l walltime=00:00:05:00
## Submitting into the reservation created for this course
## (only necessary for the course)
#MSUB -A workshop
#MSUB -l advres=bwhpc-workshop.148
cd ${MOAB_SUBMITDIR}/
hostname
sleep 60
| true
|
ba132e111a7d94b769fab7698107f4455af62fd4
|
Shell
|
fergalmoran/dotfiles
|
/install.sh
|
UTF-8
| 2,214
| 3.515625
| 4
|
[] |
no_license
|
# This script creates symlinks from the home directory to any desired dotfiles in /home/fergalm/dotfiles
############################
#ignore
########## Variables
MACHINE_TYPE=`uname -m`
dir=/home/fergalm/dotfiles # dotfiles directory
olddir=/home/fergalm/dotfiles_old # old dotfiles backup directory
files="gitconfig pylint.rc tmux.conf muttrc zshrc bash_aliases bash_functions bash_dirhooks sqliterc Xresources" # list of files/folders to symlink in homedir
##########
# create dotfiles_old in homedir
echo "Creating $olddir for backup of any existing dotfiles in ~"
mkdir -p $olddir
echo "...done"
# change to the dotfiles directory
echo "Changing to the $dir directory"
cd $dir
echo "...done"
# move any existing dotfiles in homedir to dotfiles_old directory, then create symlinks
for file in $files; do
echo "Moving any existing dotfiles from ~ to $olddir"
mv /home/fergalm/.$file /home/fergalm/dotfiles_old/
echo "Creating symlink to $file in home directory."
ln -s $dir/$file /home/fergalm/.$file
done
echo Setting git stuff
git config --global user.email "fergal.moran@gmail.com"
git config --global user.name "Fergal Moran"
exit
read -p "Press enter to continue"
# merge Xresources
xrdb -merge /home/fergalm/.Xresources
git clone https://github.com/powerline/fonts.git pwfonts
cd pwfonts && ./install.sh
#install tmux plugin manager
git clone https://github.com/tmux-plugins/tpm /home/fergalm/.tmux/plugins/tpm
sudo apt-get install -y nfs-common exuberant-ctags build-essential cmake \
python-dev python3-dev libssl-dev vim-youcompleteme autojump htop \
ncdu python-pip python3-pip byobu zsh vim-gtk python-setuptools \
tree git-extras cowsay fortune winbind libpq-dev xclip whois \
git-flow
# Setup default locales
sudo locale-gen "en_IE.UTF-8"
sudo pip install livereload speedtest-cli virtualenv virtualenvwrapper
#install git flow completion
OMF=/home/fergalm/.oh-my-zsh/oh-my-zsh.sh
if [ ! -f $OMF ]; then
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
git clone https://github.com/bobthecow/git-flow-completion /home/fergalm/.oh-my-zsh/custom/plugins/git-flow-completion
fi
| true
|
85400eac4f177d8ace509ae8fdd04b414d50b1fa
|
Shell
|
intel-analytics/BigDL
|
/ppml/trusted-python-toolkit/start-scripts/start-python-numpy-performance-sgx.sh
|
UTF-8
| 358
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
num=4096
dtype='int'
while getopts "n:t:" opt
do
case $opt in
n)
num=$OPTARG
;;
t)
dtype=$OPTARG
;;
esac
done
cd /ppml
./init.sh
bash examples/numpy/CLI.sh -n $num -p native -t $dtype
export sgx_command="bash examples/numpy/CLI.sh -n $num -p sgx -t $dtype"
gramine-sgx bash 2>&1
| true
|
55ab15604ee292e51179b35d362b361a1cbb1110
|
Shell
|
ed-barberis/devops-2.0
|
/provisioners/scripts/oracle/install_ol7_oracle_uekr6.sh
|
UTF-8
| 1,480
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -eux
# install the oracle linux 7 uekr6 kernel.
# ensure that the uekr6 kernel is enabled by default. ----------------------------------------------
yum-config-manager --enable ol7_UEKR6
yum-config-manager --disable ol7_UEKR5
yum-config-manager --disable ol7_UEKR4
yum-config-manager --disable ol7_UEKR3
yum-config-manager --enable ol7_addons
yum-config-manager --enable ol7_developer_EPEL
yum-config-manager --enable ol7_software_collections
yum-config-manager --enable ol7_optional_latest
# install the latest ol7 updates. ------------------------------------------------------------------
yum -y update
# install kernel development tools and headers for building guest additions. -----------------------
yum -y install kernel-uek-devel
yum -y install kernel-uek
# remove package kit utility to turn-off auto-update of packages. ----------------------------------
yum -y remove PackageKit
# update the yum configuration. --------------------------------------------------------------------
/usr/bin/ol_yum_configure.sh
# re-enable the correct repositories after the yum configuration update. ---------------------------
yum-config-manager --enable ol7_UEKR6
yum-config-manager --disable ol7_UEKR5
yum-config-manager --disable ol7_UEKR4
yum-config-manager --disable ol7_UEKR3
yum-config-manager --enable ol7_addons
yum-config-manager --enable ol7_developer_EPEL
yum-config-manager --enable ol7_software_collections
yum-config-manager --enable ol7_optional_latest
| true
|
f52e78107c01b4158476b8cda2aa744e035d3e30
|
Shell
|
meschnigm/webserver-public
|
/caterva/analysis/BC_Check.sh
|
UTF-8
| 3,573
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
function Echo_ok ()
{
echo -e "\033[0;32m Check war ok.\033[0m"
}
function Echo_nok ()
{
echo -e "\033[0;31m Check war nicht ok.\033[0m"
}
function Color_green_on ()
{
echo -e "\033[0;32m "
}
function Color_turkis_on ()
{
echo -e "\033[0;36m "
}
function Color_red_on ()
{
echo -e "\033[0;31m "
}
function Color_off ()
{
echo -e "\033[0m"
}
function Check_FS_var_log ()
{
echo -e "\n---Pruefe FS /var/log < 50% Usage"
VAR_LOG_USAGE_IN_PCT=`df --output=pcent /var/log | tail -1`
VAR_LOG_USAGE=`echo $VAR_LOG_USAGE_IN_PCT | cut -d "%" -f1`
if [ $VAR_LOG_USAGE -lt 50 ] ; then
Echo_ok
else
Echo_nok
fi
}
function Check_number_of_invoiceLog ()
{
echo -e "\n---Pruefe Anzahl invoiceLog Dateien < 200 "
NUMBER_OF_invoiceLog=`ls -1 /var/log/invoiceLog.csv.* | wc -l`
if [ $NUMBER_OF_invoiceLog -lt 200 ] ; then
Echo_ok
else
Echo_nok
fi
}
function Command_exist ()
{
if [ $1 -eq 0 ] ; then
Echo_ok
else
Echo_nok
fi
}
function Check_crontab_log_cleanup ()
{
echo -e "\n---Pruefe crontab Job /home/admin/bin/log-cleanup "
CRONTAB_START=`cat /etc/crontab | grep -v "#" | grep "/home/admin/bin/log-cleanup" | cut -d " " -f1-5`
if [ "$CRONTAB_START" = "00 0 * * *" ] ; then
Echo_ok
else
Echo_nok
fi
}
function Check_crontab ()
{
echo -e "\n---Pruefe crontab Job $1 "
COMMAND_EXISTS=`cat /etc/crontab | grep -v "#" | grep "$1"| wc -l`
Command_exist $COMMAND_EXISTS
}
function Check_etc_rclocal ()
{
echo -e "\n---Pruefe /etc/rc.local"
COMMAND_EXISTS=`cat /etc/rc.local | grep -v "#" | grep "iptables"| wc -l`
Command_exist $COMMAND_EXISTS
}
function Check_date ()
{
echo -e "\n---Pruefe Datum"
DATE=`date`
echo -e "Pruefe das Datum:\033[0;32m $DATE . ENTER fuer weiter: \033[0m"
read _ANSWER_
}
function Check_swarm_comm ()
{
echo -e "\n---Pruefe Datei /etc/init.d/swarm-comm\n"
Color_turkis_on
head -19 /etc/init.d/swarm-comm
Color_off
echo -e "\nPruefe die Datei. \033[0;32m ENTER fuer weiter: \033[0m"
read _ANSWER_
}
function Check_swarm_switch_on ()
{
echo -e "\n---Pruefe Datei /etc/init.d/swarm-switch-on\n"
Color_turkis_on
cat /etc/init.d/swarm-switch-on
Color_off
echo -e "\nPruefe die Datei. \033[0;32m ENTER fuer weiter: \033[0m"
read _ANSWER_
echo -e "\n---Pruefe Verlinkungen auf /etc/init.d/swarm-switch-on"
NUM_FILES=`sudo find /etc -name \*swarm-switch\* -exec ls -1 {} \; | wc -l`
if [ $NUM_FILES -eq 8 ] ; then
Echo_ok
else
Echo_nok
fi
}
function Check_router ()
{
bmmType=$(cat /home/admin/registry/out/bmmType)
if [ "$bmmType" = "sony" ] ; then
echo -e "\n---Pruefe Router Einstellungen\n\033[0;31m Sollte ein Passwort erfragt werden, dann bitte \033[0m admin01 \033[0;31m eingeben."
echo -e "\033[0;32m ENTER fuer weiter: \033[0m"
read _ANSWER_
ssh root@192.168.0.105 cat /root/monitor.config
echo -e "\nPruefe die Router Einstellungen.\033[0;32m \n ENTER fuer weiter: \033[0m"
read _ANSWER_
fi
}
#######################################
# MAIN
Check_FS_var_log
Check_number_of_invoiceLog
Check_crontab_log_cleanup
Check_crontab /home/admin/bin/timeupdate
Check_crontab /home/admin/bin/swarmcomm.sh
Check_etc_rclocal
Check_date
Check_swarm_comm
Check_swarm_switch_on
Check_router
Color_green_on
echo -e "\n ----Der Check ist beendet----"
Color_off
| true
|
bd289055c6bbe66b4799b0d4b34501f90d187136
|
Shell
|
stevenrbrandt/PhylanxBuilder
|
/versions.sh
|
UTF-8
| 122
| 2.6875
| 3
|
[] |
no_license
|
for v in /hpx /blaze /blaze_tensor /pybind11 ~/phylanx
do
echo -n "VERSION: $v "
cd $v
git log -1 --format=%cd
done
| true
|
ad52679c86c2a00ea5c77fd98d9cf5c75352bea6
|
Shell
|
blakemcbride/Rand-E-Editor
|
/local.sh
|
UTF-8
| 2,172
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/sh
# 'local.sh' : shell script to update the 'NewVersion' shell script with the
# correct path to 'bash'.
# The program name can be specified, if it must be something else than
# the default 'e'.
# This shell can run on linux and on AIX, sun Solaris and LynxOS.
# By Fabien Perriollat CERN/PS <Fabien.Perriollat@cern.ch>
# last edit : February 2001
# Update the script with the bash path
up_bash ()
{
if grep "^#\!$BASH" $1 > /dev/null 2>&1 ; then \
echo "$1 is OK"; \
else \
rm -f $1.new; \
sed "/^#\!/ s|^\(#\!\)\(.*\)|\1$BASH|" $1 > $1.new; \
touch -r $1 $1.new; \
chmod a+x $1.new; \
ls -l $f $1.new; \
mv -f $1.new $1; \
echo "$1 is updated"; \
fi
chmod a+x $1
}
synopsis="synopsis : $0 [-h] [--help] [local_program_name]"
if [ $# -gt 0 ]; then
if test $1 = "-h" -o $1 = "--help"; then
echo "Script to update some Rand editor files according to the local environement"
echo $synopsis
exit 0;
else
if [ $# -gt 1 ]; then
echo $0 : Invalid parameters \"$@\"
echo $synopsis
exit 1;
fi;
fi;
fi;
fnv="e19/NewVersion"
fnr="new_release.sh"
fmk="./Makefile"
BASH=`which bash`
OS=`uname -s`
up_bash $fnv
up_bash $fnr
# Update the NewVersion script
# if ! grep -q -e "^#\!$BASH" $fnv; then \
# rm -f $fnv.new; \
# sed "/^#\!/ s|\(^#\!\)\(.*\)|\1$BASH|" $fnv > $fnv.new; \
# touch -r $fnv $fnv.new; \
# chmod a+x $fnv.new; \
# ls -l $f $fnv.new; \
# mv -f $fnv.new $fnv; \
# echo "$fnv is updated"; \
# else \
# echo "$fnv is OK"; \
# fi
if [ $# -eq 0 ]; then
exit 0;
fi
# A program name is provided, update PROG in ./Makefile
echo "On $OS the local program name will be \"$1\""
echo " Are you sure ?"
sed_script="./Rand_sed.script"
rm -f $sed_script
cat > $sed_script <<-EOF
/^ifeq (.*, $OS)/,/^endif/ {
/PROG=.*/ d
/^endif/i\\
PROG=$1
}
EOF
sed -f $sed_script $fmk > $fmk.new
if [ $? -ne 0 ]; then
echo "Error in updating $fmk"
exit 2
fi
rm -f $sed_script
# save previous version of Makefile
rm -f $fmk.ref
mv $fmk $fmk.ref
mv $fmk.new $fmk
echo "On $OS the local program name will be \"$1\""
echo "Previous version on \"$fmk\" is saved in \"$fmk.ref\""
exit 0
| true
|
92ace9049a38a97a6fb40f2e1ec070f6eaab354f
|
Shell
|
Unitech/jquery-youtube-player
|
/build/release.sh
|
UTF-8
| 1,320
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# @description : this BASH script is used to build the jquery youtube player plugin
# @author : Richard Willis
# @project : http://github.com/badsyntax/jquery-youtube-player
# @requirements : curl, zip, git, rhino
echo -n "Enter the version for this release: "
read ver
if [ ! $ver ]; then
echo "Invalid version."
exit
fi
#echo "Checking.."
#lint=$(js jslint.js ../js/jquery.youtube.player.js)
echo "Building.."
name=jquery-youtube-player
in=../js/jquery.youtube.player.js
out=../js/jquery.youtube.player.min.js
thedate=$(date)
cat copywrite | sed "s/\${ver}/$ver/g;s/\${time}/$thedate/g" > $out
curl -s \
-d compilation_level=SIMPLE_OPTIMIZATIONS \
-d output_format=text \
-d output_info=compiled_code \
--data-urlencode "js_code@${in}" \
http://closure-compiler.appspot.com/compile \
>> $out
git add $out && git commit -m "added ${ver} min version"
rm -rf "${name}-${ver}" && mkdir "${name}-${ver}" && cd "${name}-${ver}"
cp -r ../../js/ .
cp -r ../../css/ .
cp -r ../../examples/ .
cp ../../index.html .
cp ../../README.md .
cd ../
zip -r "${name}-${ver}.zip" "${name}-${ver}"
rm -rf "${name}-${ver}"
git add "${name}-${ver}.zip" && git commit -m "added v${ver} release archive" && git push
cd ../
git tag -a $ver -m "tagged version ${ver}" && git push --tags
echo "done."
| true
|
570ba7938754c4f1ecec11a2b52a62f8e3fefef6
|
Shell
|
tetherless-world/whyis
|
/script/release
|
UTF-8
| 282
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Check release version in Dockerfile and setup.py
VERSION=`python whyis/_version.py`
echo ${VERSION}
twine upload dist/whyis-${VERSION}.tar.gz
docker push tetherlessworld/whyis:${VERSION}
docker push tetherlessworld/whyis:latest
git tag -f v${VERSION}
git push
| true
|
9556e2f91aab33ad3f3a08e392f21fbe677663f5
|
Shell
|
djlabbe/Syntinel
|
/_docs/_scripts/randomResult.sh
|
UTF-8
| 109
| 2.96875
| 3
|
[] |
no_license
|
number=$RANDOM
value=$(($number%2))
if (($value == 0)); then
echo "Hello World"
else
>&2 echo "error"
fi
| true
|
317d717a9de98e2a72b0f7d132b30cc72bc2e3c6
|
Shell
|
chrisoverzero/aws-embedded-metrics-dotnet
|
/scripts/publish-package.sh
|
UTF-8
| 853
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Run integration tests against a CW Agent.
#
# usage:
# export AWS_ACCESS_KEY_ID=
# export AWS_SECRET_ACCESS_KEY=
# export AWS_REGION=us-west-2
# ./start-agent.sh
source ./utils.sh
# publish <package-name>
function publish() {
rootdir=$(git rev-parse --show-toplevel)
rootdir=${rootdir:-$(pwd)} # in case we are not in a git repository (Code Pipelines)
package_dir="$rootdir/src/$1"
output_dir="$package_dir/bin/Release"
pushd $package_dir
dotnet pack -c Release
pushd $output_dir
dotnet nuget push *.nupkg --api-key $NUGET_API_KEY --source https://api.nuget.org/v3/index.json
popd
popd
}
validate "$NUGET_API_KEY" "NUGET_API_KEY"
validate "$CODEBUILD_BUILD_NUMBER" "CODEBUILD_BUILD_NUMBER"
publish Amazon.CloudWatch.EMF
publish Amazon.CloudWatch.EMF.Web
| true
|
9059c0721d0c17fe20463dc6c642096539ee14fd
|
Shell
|
mola/pfsense-builder
|
/sign_setup.sh
|
UTF-8
| 285
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
mkdir -p /root/sign/
cp sign.sh /root/sign/sign.sh
cd /root/sign/
openssl genrsa -out repo.key 2048
chmod 0400 repo.key
openssl rsa -in repo.key -out repo.pub -pubout
printf "function: sha256\nfingerprint: `sha256 -q repo.pub`\n" > fingerprint
chmod +x /root/sign/sign.sh
| true
|
79715a2c89af12cead1b2b170c55259aa990193b
|
Shell
|
dwsyoyo/lambda_functions
|
/sslyze/build_sslyze.sh
|
UTF-8
| 1,105
| 3.515625
| 4
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
###
# Set up the Python virtual environment
###
VENV_DIR=/venv
python -m venv $VENV_DIR
source $VENV_DIR/bin/activate
###
# Update pip and setuptools
###
pip install --upgrade pip setuptools
###
# Install sslyze
###
pip install --upgrade "sslyze==1.4.1"
###
# Install domain-scan
###
mkdir domain-scan
wget -q -O - https://api.github.com/repos/18F/domain-scan/tarball | tar xz --strip-components=1 -C domain-scan
pip install --upgrade -r domain-scan/lambda/requirements-lambda.txt
###
# Leave the Python virtual environment
###
deactivate
###
# Set up the build directory
###
BUILD_DIR=/build
mkdir -p $BUILD_DIR/bin
###
# Copy all packages, including any hidden dotfiles. Also copy the
# sslyze executable.
###
cp -rT $VENV_DIR/lib/python3.6/site-packages/ $BUILD_DIR
cp -rT $VENV_DIR/lib64/python3.6/site-packages/ $BUILD_DIR
cp $VENV_DIR/bin/sslyze $BUILD_DIR/bin
###
# Zip it all up
###
OUTPUT_DIR=/output
if [ ! -d $OUTPUT_DIR ]
then
mkdir $OUTPUT_DIR
fi
if [ -e $OUTPUT_DIR/sslyze.zip ]
then
rm $OUTPUT_DIR/sslyze.zip
fi
cd $BUILD_DIR
zip -rq9 $OUTPUT_DIR/sslyze.zip .
| true
|
a1b8e03b7b8bb308c4d4733276731d975515dc3f
|
Shell
|
esantanche/emsreact
|
/scripts/container-git-tasks.sh
|
UTF-8
| 527
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/zsh
if [ `basename ${PWD}` != "scripts" ] ; then
echo "You should be in the scripts folder now"
exit
fi
echo "For now it's manual"
echo "After you run the create react app script, you need a git checkout ."
echo "which gets rid of the changes create react app did (apart for node_modules)"
echo "Maybe you need others things"
echo "probably you need to do some git pull"
echo "if there are a lot of changes and untracked files, use git stash -u"
echo "Finished! Next step: container-after-git-pull-tasks.sh"
exit
| true
|
3afde33acf34d8c7787e6cfa053d23bcaf4e7f83
|
Shell
|
hmdlab/frame_diff
|
/scripts/composite.sh
|
UTF-8
| 1,000
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# ${0} の dirname を取得
cwd=`dirname "${0}"`
# ${0} が 相対パスの場合は cd して pwd を取得
expr "${0}" : "/.*" > /dev/null || cwd=`(cd "${cwd}" && pwd)`
cd "${cwd}/.."
IN1="images/001.bmp"
IN2="images/002.bmp"
IN3="images/003.bmp"
BASE="images/cambridge.bmp"
THETA=10
ALPHA1="out/alpha1vs2.bmp"
ALPHA2="out/alpha2vs3.bmp"
OUT_HIST="out/hist.tsv"
OUT_COMPOSITE="out/composite.bmp"
OUT_MASK="out/mask.bmp"
bin/frame_diff -in1 $IN1 \
-in2 $IN2 \
-in3 $IN3 \
-histogram $OUT_HIST \
-alpha1 $ALPHA1 \
-alpha2 $ALPHA2 \
-theta $THETA \
-out $OUT_MASK \
# バイナリを実行する
bin/frame_diff -in1 $IN1 \
-in2 $IN2 \
-in3 $IN3 \
-histogram $OUT_HIST \
-alpha1 $ALPHA1 \
-alpha2 $ALPHA2 \
-theta $THETA \
-out $OUT_COMPOSITE \
-base $BASE
| true
|
e4c6c53f955ffbc150411e97bc35b685bcc34189
|
Shell
|
chigui271/teamred
|
/watchdog_sysrq.sh
|
UTF-8
| 593
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
# Sysrq-based watchdog example script for TeamRedMiner. This script
# issues a hard form of reboot through the linux sysrq subsystem. The
# kernel must have support for sysrq to begin with, but that is
# normally the case.
#
# NOTE: the miner must be run as root for the watchdog script to work
# correctly.
# Activate sysrq
echo "1" > /proc/sys/kernel/sysrq
# Sync disks
echo "s" > /proc/sysrq-trigger
/bin/sleep 1
# Remount all disks read-only.
echo "u" > /proc/sysrq-trigger
/bin/sleep 1
# Issue the hard reboot command.
echo "b" > /proc/sysrq-trigger
| true
|
8d63d88d83ecf70d4b9b3e9f6380c568e05c2ddb
|
Shell
|
RedHatInsights/drift-backend
|
/run_unit_tests.sh
|
UTF-8
| 339
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# this script is a wrapper for nosetests. It ensures that
# 'prometheus_multiproc_dir' is set up and torn down.
TEMPDIR=`mktemp -d`
export UNLEASH_TOKEN="token"
prometheus_multiproc_dir=$TEMPDIR pytest --cov-report xml "$@" && prometheus_multiproc_dir=$TEMPDIR python generate_report.py test_reports.toml && rm -rf $TEMPDIR
| true
|
59b3ee7b878c4d37af61d08c97bfa9f6c670e8c2
|
Shell
|
ash12358/ssh
|
/openSakura
|
UTF-8
| 501
| 2.65625
| 3
|
[] |
no_license
|
#! /bin/bash
echo "115.238.185.30 frp.tcotp.cn" >> /etc/hosts
wget https://cdn.tcotp.cn:4443/client/Sakura_frpc_linux_amd64.tar.gz
tar -xzvf Sakura_frpc_linux_amd64.tar.gz
# 30 洛杉矶
# 38 新加坡
# 39 蒙特利尔
# 45 美国纽约
chmod +x Sakura_frpc_linux_amd64
echo -e 'ash12358\n12358a\n45' | nohup ./Sakura_frpc_linux_amd64 &
while [[ $? -eq 1 ]]
do
echo 'open Sakura failed'
sleep 5s
echo -e 'ash12358\n12358a\n45' | nohup ./Sakura_frpc_linux_amd64 &
done
echo 'open Sakura success'
| true
|
1afab22fd518b50893b207026444c867b9d80e88
|
Shell
|
YilunLiu/CSE120_PA1_Tests
|
/test.sh
|
UTF-8
| 888
| 4.15625
| 4
|
[] |
no_license
|
function exitProgram {
cp "$TEST_DIR/pa1f_orginal.c" pa1f.c
rm my_output standard_output
exit
}
TEST_DIR="./tests"
FAILED_TEST=0
for testfile in `ls $TEST_DIR`; do
echo "Testing file: $testfile ..."
cp "$TEST_DIR/$testfile" pa1f.c
make > /dev/null
./pa1f &> tmp
tail +2 tmp > my_output
./pa1f_standard &> tmp
tail +2 tmp > standard_output
rm tmp
diff my_output standard_output > /dev/null
if [ $? -ne 0 ]; then
FAILED_TEST=`expr $FAILED_TEST + 1`
vimdiff my_output standard_output 2> /dev/null
read -p "Do you wish to continue?[y/n] " yn
case $yn in
[Yy]* ) continue;;
* ) echo "Exiting the program..."; exitProgram;;
esac
else
echo "$testfile passed."
fi
done
if [[ $FAILED_TEST -eq 0 ]]; then
echo "ALL TESTS have passed!"
fi
exitProgram
| true
|
8765cf7ab4a3854955ecb629969812176e15304d
|
Shell
|
nabaco/dotfiles
|
/bash/dot-bashrc
|
UTF-8
| 7,750
| 3.390625
| 3
|
[] |
no_license
|
# vim: foldmethod=marker ft=bash
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# Ansi color code variables{{{1
red='\[\e[0;91m\]'
blue='\[\e[0;94m\]'
green='\[\e[0;92m\]'
yellow='\[\e[0;33m\]'
purple=' \[\e[0;35m\]'
white='\[\e[0;97m\]'
expand_bg='\[\e[K\]'
blue_bg='\[\e[0;104m\]${expand_bg}'
red_bg='\[\e[0;101m\]${expand_bg}'
green_bg='\[\e[0;102m\]${expand_bg}'
bold='\[\e[1m\]'
uline='\[\e[4m\]'
reset='\[\e[0m\]'
#}}}
# Functions {{{1
TICK="✓"
CROSS="✗"
URGENT="❗"
OVERDUE="☠️"
DUETODAY="😱"
DUETOMORROW="📅"
function task_indicator {
ti="["
if ! which todoist &> /dev/null; then echo ""; return 0; fi
unset overdue
unset today
unset tomorrow
unset inbox
unset next
overdue=`todoist list --filter 'overdue' | wc --lines 2> /dev/null`
today=`todoist list --filter 'today & ! overdue' | wc --lines 2> /dev/null`
tomorrow=`todoist list --filter 'tomorrow' | wc --lines 2> /dev/null`
inbox=`todoist list --filter '#inbox' | wc --lines 2> /dev/null`
next=`todoist list --filter '@next' | wc --lines 2> /dev/null`
# if [ "$overdue" -gt "0" ]; then
ti+="${red@P}O:$overdue${reset@P} "
# fi
# if [ "$urgent" -gt "0" ]; then
ti+="${purple@P}N:$next${reset@P} "
# fi
# if [ "$today" -gt "0" ]; then
ti+="${yellow@P}D:$today${reset@P} "
# fi
# if [ "$tomorrow" -gt "0" ]; then
ti+="${green@P}T:$tomorrow${reset@P} "
# fi
# if [ "$inbox" -gt "0" ]; then
ti+="${blue@P}I:$inbox${reset@P}]"
# fi
echo $ti|tr -s " " "|"
}
function ns {
namespace=`ip netns id`
ns=${namespace:-"\u@\h"}
echo ${ns@P}
}
# -- Improved X11 forwarding through GNU Screen (or tmux).
# If not in screen or tmux, update the DISPLAY cache.
# If we are, update the value of DISPLAY to be that in the cache.
update-x11-forwarding ()
{
if [ -z "$STY" -a -z "$TMUX" ]; then
echo $DISPLAY > ~/.display.txt
else
export DISPLAY=`cat ~/.display.txt`
fi
}
# This is run before every command.
preexec() {
# Don't cause a preexec for PROMPT_COMMAND.
# Beware! This fails if PROMPT_COMMAND is a string containing more than one command.
[ "$BASH_COMMAND" = "$PROMPT_COMMAND" ] && return
update-x11-forwarding
# Debugging.
#echo DISPLAY = $DISPLAY, display.txt = `cat ~/.display.txt`, STY = $STY, TMUX = $TMUX
}
trap 'preexec' DEBUG
#}}}
# History Configuration {{{1
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
#HISTCONTROL=ignoredups:erasedups
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=10000
HISTFILESIZE=10000
PROMPT_COMMAND="${PROMPT_COMMAND:+$PROMPT_COMMAND$'\n'}history -a; history -c; history -r"
# }}}
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
[[ $DISPLAY ]] && shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
PS1="[\u@\h \W]\$ "
# Prompt Configuration {{{1
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
*-termite|xterm-color|*-256color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='$(task_indicator)${reset@P}${debian_chroot:+($debian_chroot)}${green@P}$(ns)${reset@P}:${blue@P}\w${reset@P}${yellow@P}$(__git_ps1 "(%s)")${reset@P}\n\$ '
if [ -n "$NVIM" ]; then
:
elif [ -z "$TMUX" ]; then
# OSC 1337 for CWD tracking by the terminal
export PS1=${PS1}'\[\e]1337;CurrentDir=${PWD}\a\]'
else
if [ $(echo "$(echo $TERM_PROGRAM_VERSION | sed 's/\([0-9]\.[0-9]\)[a-z]/\1/') < 3.3" | bc) -eq 0 ]; then
# OSC 7 support was introduced in tmux 3.3a
# tmux.conf requires requires:
# set -as terminal-overrides '*:Swd=\E]1337;CurrentDir='
# set -as terminal-features '*:osc7'
# set -g set-titles on
export PS1=${PS1}'\[\e]7;${PWD}\a\]'
else
# Otherwise pass OSC 1337 with tmux passthrough
export PS1=${PS1}'\[\ePtmux;\e\e]1337;CurrentDir=${PWD}\a\e\\\]'
fi
fi
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
#}}}
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
#### End of standard Ubuntu .bashrc ####
#It doesn't work properly with FZF
#set -o vi
# Enter a directory by typing its name
shopt -s autocd
# Integrations {{{1
# Apparish: https://github.com/goedel-gang/bash-utils
# FZF: Expected to source either the first one, or the 2 others, but not all.
FILES_TO_SOURCE="$HOME/.bashrc.$USER \
$HOME/local/usr/share/bash-completion/completions/task \
$HOME/.config/broot/launcher/bash/br \
$HOME/.cargo/env \
$HOME/.local/bin/appary.sh \
/usr/share/git/completion/git-completion.bash \
$HOME/.local/bin/git-completion.bash \
/usr/share/git/completion/git-prompt.sh \
$HOME/.fzf.bash \
$HOME/.local/bin/fzf/shell/completion.bash \
$HOME/.local/bin/fzf/shell/key-bindings.bash \
/usr/share/fzf/completion.bash \
/usr/share/fzf/key-bindings.bash \
$HOME/.bash_aliases \
$HOME/.local/usr/bin/todoist_functions_fzf_bash.sh \
$HOME/.cargo/env \
$HOME/.local/etc/lfcd.sh"
for file in $FILES_TO_SOURCE; do
if [ -f "$file" ]; then
source "$file"
fi
done
if [ -f ~/.local/usr/bin/bb ]; then
eval "$(~/.local/usr/bin/bb init -)"
fi
#}}}
tere() {
local result=$(command tere "$@")
[ -n "$result" ] && cd -- "$result"
}
| true
|
238c69bd276a4c261cbe57ab079cbc8ed1e9d185
|
Shell
|
s-leonard/WebAppPrivateLink
|
/HubAndSpokeNetwork/CLI/script.sh
|
UTF-8
| 9,536
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
## Pre-Req
az extension add --name azure-firewall
###########################################
# Global Variables
###########################################
LOCATION=centralus
PREFIX=ws$(date +%s%N | md5sum | cut -c1-6)
SUBID=$(az account list --query "[?isDefault].id" -o tsv)
ADO_ACCOUNTNAME={AzureDevOps Account name}
ADO_PERSONALACCESSTOKEN={AzureDevOps personal access token}
ADO_VMADMINUSER=adminuser
ADO_VMADMINPASSWORD={ADO Agent VM admin password}
###########################################
# Hub & Spoke Networks
###########################################
RG_DEVOPSAGENT=devopsagent
RG_HUB=hub
RG_SPOKE_DEV=devspoke
RG_SPOKE_PROD=prodspoke
VNET_HUB=hubvnet
VNET_SPOKE_DEV=devvnet
VNET_SPOKE_PROD=prodvnet
VNET_HUB_IPRANGE=10.1.0.0/16
VNET_SPOKE_IPRANGE_DEV=10.2.0.0/16
VNET_SPOKE_IPRANGE_PROD=10.3.0.0/16
APPGATEWAY_SUBNET_DEV=appgatewaydev
APPGATEWAY_SUBNET_PROD=appgatewayprod
APPGATEWAY_SUBNET_IPRANGE_DEV=10.2.0.0/24
APPGATEWAY_SUBNET_IPRANGE_PROD=10.3.0.0/24
WEB_SUBNET_DEV=webdev
WEB_SUBNET_PROD=webprod
WEB_SUBNET_IPRANGE_DEV=10.2.1.0/24
WEB_SUBNET_IPRANGE_PROD=10.3.1.0/24
API_SUBNET_DEV=apidev
API_SUBNET_PROD=apiprod
API_SUBNET_IPRANGE_DEV=10.2.2.0/24
API_SUBNET_IPRANGE_PROD=10.3.2.0/24
#Firewall subnet MUST be named AzureFirewallSubnet
FIREWALL_SUBNET=AzureFirewallSubnet
FIREWALL_SUBNET_IPRANGE=10.1.0.0/24
HUB_TO_SPOKE_VNET_PEER_DEV=$(echo $PREFIX)-hub-spoke-peer-dev
SPOKE_TO_HUB_VNET_PEER_DEV=$(echo $PREFIX)-spoke-hub-peer-dev
HUB_TO_SPOKE_VNET_PEER_PROD=$(echo $PREFIX)-hub-spoke-peer-prod
SPOKE_TO_HUB_VNET_PEER_PROD=$(echo $PREFIX)-spoke-hub-peer-prod
AZDO_TO_SPOKE_VNET_PEER_DEV=$(echo $PREFIX)-azdo-spoke-peer-dev
SPOKE_TO_AZDO_VNET_PEER_DEV=$(echo $PREFIX)-spoke-azdo-peer-dev
AZDO_TO_SPOKE_VNET_PEER_PROD=$(echo $PREFIX)-azdo-spoke-peer-prod
SPOKE_TO_AZDO_VNET_PEER_PROD=$(echo $PREFIX)-spoke-azdo-peer-prod
az group create -n $RG_HUB -l $LOCATION
az group create -n $RG_SPOKE_DEV -l $LOCATION
az group create -n $RG_SPOKE_PROD -l $LOCATION
az network vnet create -n $VNET_HUB -g $RG_HUB --address-prefixes $VNET_HUB_IPRANGE
az network vnet create -n $VNET_SPOKE_DEV -g $RG_SPOKE_DEV --address-prefixes $VNET_SPOKE_IPRANGE_DEV
az network vnet create -n $VNET_SPOKE_PROD -g $RG_SPOKE_PROD --address-prefixes $VNET_SPOKE_IPRANGE_PROD
#Create the Subnets in each vnet
az network vnet subnet create -n $APPGATEWAY_SUBNET_DEV -g $RG_SPOKE_DEV --address-prefixes $APPGATEWAY_SUBNET_IPRANGE_DEV --vnet-name $VNET_SPOKE_DEV
az network vnet subnet create -n $APPGATEWAY_SUBNET_PROD -g $RG_SPOKE_PROD --address-prefixes $APPGATEWAY_SUBNET_IPRANGE_PROD --vnet-name $VNET_SPOKE_PROD
az network vnet subnet create -n $WEB_SUBNET_DEV -g $RG_SPOKE_DEV --address-prefixes $WEB_SUBNET_IPRANGE_DEV --vnet-name $VNET_SPOKE_DEV --service-endpoints "Microsoft.Web"
az network vnet subnet create -n $WEB_SUBNET_PROD -g $RG_SPOKE_PROD --address-prefixes $WEB_SUBNET_IPRANGE_PROD --vnet-name $VNET_SPOKE_PROD --service-endpoints "Microsoft.Web"
az network vnet subnet create -n $API_SUBNET_DEV -g $RG_SPOKE_DEV --address-prefixes $API_SUBNET_IPRANGE_DEV --vnet-name $VNET_SPOKE_DEV --service-endpoints "Microsoft.Web"
az network vnet subnet create -n $API_SUBNET_PROD -g $RG_SPOKE_PROD --address-prefixes $API_SUBNET_IPRANGE_PROD --vnet-name $VNET_SPOKE_PROD --service-endpoints "Microsoft.Web"
az network vnet subnet create -n $FIREWALL_SUBNET -g $RG_HUB --address-prefixes $FIREWALL_SUBNET_IPRANGE --vnet-name $VNET_HUB
#Peer the Vnets
HUBID=$(az network vnet show -g $RG_HUB -n $VNET_HUB --query id -o tsv)
SPOKEID_DEV=$(az network vnet show -g $RG_SPOKE_DEV -n $VNET_SPOKE_DEV --query id -o tsv)
SPOKEID_PROD=$(az network vnet show -g $RG_SPOKE_PROD -n $VNET_SPOKE_PROD --query id -o tsv)
az network vnet peering create -g $RG_HUB -n $HUB_TO_SPOKE_VNET_PEER_DEV --vnet-name $VNET_HUB --remote-vnet $SPOKEID_DEV --allow-vnet-access
az network vnet peering create -g $RG_HUB -n $HUB_TO_SPOKE_VNET_PEER_PROD --vnet-name $VNET_HUB --remote-vnet $SPOKEID_PROD --allow-vnet-access
az network vnet peering create -g $RG_SPOKE_DEV -n $SPOKE_TO_HUB_VNET_PEER_DEV --vnet-name $VNET_SPOKE_DEV --remote-vnet $HUBID --allow-vnet-access
az network vnet peering create -g $RG_SPOKE_PROD -n $SPOKE_TO_HUB_VNET_PEER_PROD --vnet-name $VNET_SPOKE_PROD --remote-vnet $HUBID --allow-vnet-access
###########################################
# Hub Network
###########################################
FWPUBLICIP_NAME=$(echo $PREFIX)-fw-ip
FWNAME=$(echo $PREFIX)-fw
FWROUTE_TABLE_NAME="${PREFIX}fwrt"
FWROUTE_NAME="${PREFIX}fwrn"
FWROUTE_NAME_INTERNET="${PREFIX}fwinternet"
FWIPCONFIG_NAME="${PREFIX}fwconfig"
## Firewall
az network public-ip create -g $RG_HUB -n $FWPUBLICIP_NAME -l $LOCATION --sku "Standard"
az network firewall create -g $RG_HUB -n $FWNAME -l $LOCATION
# Configure Firewall IP Config
az network firewall ip-config create \
-g $RG_HUB \
-f $FWNAME \
-n $FWIPCONFIG_NAME \
--public-ip-address $FWPUBLICIP_NAME \
--vnet-name $VNET_HUB
# Capture Firewall IP Address for Later Use
FWPUBLIC_IP=$(az network public-ip show -g $RG_HUB -n $FWPUBLICIP_NAME --query "ipAddress" -o tsv)
FWPRIVATE_IP=$(az network firewall show -g $RG_HUB -n $FWNAME --query "ipConfigurations[0].privateIpAddress" -o tsv)
# Create UDR and add a route for the web subnet (spoke), this ensures all traffic from the web app goes through the firewall
az network route-table create -g $RG_SPOKE_DEV --name $FWROUTE_TABLE_NAME
az network route-table route create -g $RG_SPOKE_DEV --name $FWROUTE_NAME --route-table-name $FWROUTE_TABLE_NAME --address-prefix 0.0.0.0/0 --next-hop-type VirtualAppliance --next-hop-ip-address $FWPRIVATE_IP --subscription $SUBID
az network route-table create -g $RG_SPOKE_PROD --name $FWROUTE_TABLE_NAME
az network route-table route create -g $RG_SPOKE_PROD --name $FWROUTE_NAME --route-table-name $FWROUTE_TABLE_NAME --address-prefix 0.0.0.0/0 --next-hop-type VirtualAppliance --next-hop-ip-address $FWPRIVATE_IP --subscription $SUBID
#Ensure all traffic to httpbin.org is allowed (highly locked down)
#az network firewall application-rule create \
# --collection-name $FIREWALL_HTTPBIN_APPLICATION_RULE_COLLECTION \
# --name $FIREWALL_HTTPBIN_APPLICATION_RULE \
# --firewall-name $FWNAME \
# -g $RG_HUB \
# --protocols HTTP=80 HTTPS=443 \
# --action Allow \
# --priority 100 \
# --target-fqdns "httpbin.org" \
# --source-addresses "*"
#Add the UDR to the network
az network vnet subnet update -g $RG_SPOKE_DEV --vnet-name $VNET_SPOKE_DEV --name $WEB_SUBNET_DEV --route-table $FWROUTE_TABLE_NAME
az network vnet subnet update -g $RG_SPOKE_DEV --vnet-name $VNET_SPOKE_DEV --name $API_SUBNET_DEV --route-table $FWROUTE_TABLE_NAME
az network vnet subnet update -g $RG_SPOKE_PROD --vnet-name $VNET_SPOKE_PROD --name $WEB_SUBNET_PROD --route-table $FWROUTE_TABLE_NAME
az network vnet subnet update -g $RG_SPOKE_PROD --vnet-name $VNET_SPOKE_PROD --name $API_SUBNET_PROD --route-table $FWROUTE_TABLE_NAME
## Azure Deplopyment Agent using quickstart ARM Template
## Will create VM with Azure Pipelines Agent in a new VNET and register agent to Azure DevOps
az group create --name $RG_DEVOPSAGENT --location $LOCATION
az deployment group create -g $RG_DEVOPSAGENT --name agentdeployment \
--template-uri "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/201-vm-vsts-agent/azuredeploy.json" \
--parameters publicIPDnsName=$(echo $PREFIX)agent \
_artifactsLocation="https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/201-vm-vsts-agent/" \
vmAdminUser=$ADO_VMADMINUSER \
vmAdminPassword=$ADO_VMADMINPASSWORD \
vmSize=Standard_D1_v2 \
vstsAccount=$ADO_ACCOUNTNAME \
vstsPersonalAccessToken=$ADO_PERSONALACCESSTOKEN \
vstsAgentCount=1 \
modules="[]" \
vstsPoolName=Default
# Get newly created network details (name&id) needed for network peering
VNET_AZDO=$(az network vnet list -g $RG_DEVOPSAGENT --query "[].name" -o tsv)
AZDEVOPSID=$(az network vnet show -g $RG_DEVOPSAGENT -n $VNET_AZDO --query id -o tsv)
# Peer network with Azure DevOps agent to dev & prod spokes so pipelines can deploy
az network vnet peering create -g $RG_DEVOPSAGENT -n $AZDO_TO_SPOKE_VNET_PEER_DEV --vnet-name $VNET_AZDO --remote-vnet $SPOKEID_DEV --allow-vnet-access
az network vnet peering create -g $RG_DEVOPSAGENT -n $AZDO_TO_SPOKE_VNET_PEER_PROD --vnet-name $VNET_AZDO --remote-vnet $SPOKEID_PROD --allow-vnet-access
az network vnet peering create -g $RG_SPOKE_DEV -n $SPOKE_TO_AZDO_VNET_PEER_DEV --vnet-name $VNET_SPOKE_DEV --remote-vnet $AZDEVOPSID --allow-vnet-access
az network vnet peering create -g $RG_SPOKE_PROD -n $SPOKE_TO_AZDO_VNET_PEER_PROD --vnet-name $VNET_SPOKE_PROD --remote-vnet $AZDEVOPSID --allow-vnet-access
###########################################
# Pre-Prod Spoke
###########################################
## Spoke VNet
## Website App Service Subnet
## WebSite App Service Plan / App Service
## Website App Service Private Link
## Website App Service VNet Integration (with UDRs)
## API App Service Subnet
## API App Service Plan / App Service
## API App Service Private Link
## API App Service VNet Integration (with UDRs)
## App Gateway Subnet
## App Gateway
## KeyVault
## KeyVault Access Policies (Website and API)
## Keyvault Secrets
## Website App Service - App Settings (including keyvault)
## Azure SQL
### Azure SQL Connectivity
| true
|
6c67c0a7a8ce3f667a4f728a01f834e108a344d4
|
Shell
|
antonbabenko/pre-commit-terraform
|
/hooks/infracost_breakdown.sh
|
UTF-8
| 5,411
| 3.859375
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
# globals variables
# shellcheck disable=SC2155 # No way to assign to readonly variable in separate lines
readonly SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd -P)"
# shellcheck source=_common.sh
. "$SCRIPT_DIR/_common.sh"
function main {
common::initialize "$SCRIPT_DIR"
common::parse_cmdline "$@"
common::export_provided_env_vars "${ENV_VARS[@]}"
common::parse_and_export_env_vars
# shellcheck disable=SC2153 # False positive
infracost_breakdown_ "${HOOK_CONFIG[*]}" "${ARGS[*]}"
}
#######################################################################
# Wrapper around `infracost breakdown` tool which checks and compares
# infra cost based on provided hook_config
# Environment variables:
# PRE_COMMIT_COLOR (string) If set to `never` - do not colorize output
# Arguments:
# hook_config (string with array) arguments that configure hook behavior
# args (string with array) arguments that configure wrapped tool behavior
# Outputs:
# Print out hook checks status (Passed/Failed), total monthly cost and
# diff, summary about infracost check (non-supported resources etc.)
#######################################################################
function infracost_breakdown_ {
local -r hook_config="$1"
local args
read -r -a args <<< "$2"
# Get hook settings
IFS=";" read -r -a checks <<< "$hook_config"
# Suppress infracost color
if [ "$PRE_COMMIT_COLOR" = "never" ]; then
args+=("--no-color")
fi
local RESULTS
RESULTS="$(infracost breakdown "${args[@]}" --format json)"
local API_VERSION
API_VERSION="$(jq -r .version <<< "$RESULTS")"
if [ "$API_VERSION" != "0.2" ]; then
common::colorify "yellow" "WARNING: Hook supports Infracost API version \"0.2\", got \"$API_VERSION\""
common::colorify "yellow" " Some things may not work as expected"
fi
local dir
dir="$(jq '.projects[].metadata.vcsSubPath' <<< "$RESULTS")"
echo -e "\nRunning in $dir"
local have_failed_checks=false
for check in "${checks[@]}"; do
# $hook_config receives string like '1 > 2; 3 == 4;' etc.
# It gets split by `;` into array, which we're parsing here ('1 > 2' ' 3 == 4')
# Next line removes leading spaces, just for fancy output reason.
# shellcheck disable=SC2001 # Rule exception
check=$(echo "$check" | sed 's/^[[:space:]]*//')
# Drop quotes in hook args section. From:
# -h ".totalHourlyCost > 0.1"
# --hook-config='.currency == "USD"'
# To:
# -h .totalHourlyCost > 0.1
# --hook-config=.currency == "USD"
first_char=${check:0:1}
last_char=${check: -1}
if [ "$first_char" == "$last_char" ] && {
[ "$first_char" == '"' ] || [ "$first_char" == "'" ]
}; then
check="${check:1:-1}"
fi
mapfile -t operations < <(echo "$check" | grep -oE '[!<>=]{1,2}')
# Get the very last operator, that is used in comparison inside `jq` query.
# From the example below we need to pick the `>` which is in between `add` and `1000`,
# but not the `!=`, which goes earlier in the `jq` expression
# [.projects[].diff.totalMonthlyCost | select (.!=null) | tonumber] | add > 1000
operation=${operations[-1]}
IFS="$operation" read -r -a jq_check <<< "$check"
real_value="$(jq "${jq_check[0]}" <<< "$RESULTS")"
compare_value="${jq_check[1]}${jq_check[2]}"
# Check types
jq_check_type="$(jq -r "${jq_check[0]} | type" <<< "$RESULTS")"
compare_value_type="$(jq -r "$compare_value | type" <<< "$RESULTS")"
# Fail if comparing different types
if [ "$jq_check_type" != "$compare_value_type" ]; then
common::colorify "yellow" "Warning: Comparing values with different types may give incorrect result"
common::colorify "yellow" " Expression: $check"
common::colorify "yellow" " Types in the expression: [$jq_check_type] $operation [$compare_value_type]"
common::colorify "yellow" " Use 'tonumber' filter when comparing costs (e.g. '.totalMonthlyCost|tonumber')"
have_failed_checks=true
continue
fi
# Fail if string is compared not with `==` or `!=`
if [ "$jq_check_type" == "string" ] && {
[ "$operation" != '==' ] && [ "$operation" != '!=' ]
}; then
common::colorify "yellow" "Warning: Wrong comparison operator is used in expression: $check"
common::colorify "yellow" " Use 'tonumber' filter when comparing costs (e.g. '.totalMonthlyCost|tonumber')"
common::colorify "yellow" " Use '==' or '!=' when comparing strings (e.g. '.currency == \"USD\"')."
have_failed_checks=true
continue
fi
# Compare values
check_passed="$(echo "$RESULTS" | jq "$check")"
status="Passed"
color="green"
if ! $check_passed; then
status="Failed"
color="red"
have_failed_checks=true
fi
# Print check result
common::colorify $color "$status: $check\t\t$real_value $operation $compare_value"
done
# Fancy informational output
currency="$(jq -r '.currency' <<< "$RESULTS")"
echo -e "\nSummary: $(jq -r '.summary' <<< "$RESULTS")"
echo -e "\nTotal Monthly Cost: $(jq -r .totalMonthlyCost <<< "$RESULTS") $currency"
echo "Total Monthly Cost (diff): $(jq -r .projects[].diff.totalMonthlyCost <<< "$RESULTS") $currency"
if $have_failed_checks; then
exit 1
fi
}
[ "${BASH_SOURCE[0]}" != "$0" ] || main "$@"
| true
|
18a305a19750de5d291651a7d96afc3f13382d40
|
Shell
|
KangOl/odoo-ops-tools
|
/git-push-pr
|
UTF-8
| 2,373
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ -z "$GHTOKEN" ]]; then
echo "No \$GHTOKEN environment variable defined." >&2
exit 1
fi
DEBUG=0
RESET=1
VERBOSE=0
opt=
while getopts 'hdvr' opt; do
case "$opt" in
h)
echo "Usage:"
echo " $0 (-h | --help)"
echo " $0 [-d] [-r] [<DEST_BRANCH>]"
echo ""
echo "Options:"
echo " -v output executed commands"
echo " -d debug mode. imply -v. Do not actually push."
echo " -r do no reset to upsteam"
exit 0
;;
d)
DEBUG=1
;;
v)
VERBOSE=1
;;
r)
RESET=0
;;
?) exit 1
;;
*);;
esac
done
shift $((OPTIND - 1))
if [[ $DEBUG = 1 || $VERBOSE = 1 ]]; then
set -x
fi
REMOTE=$(git remote get-url origin)
HOST=${REMOTE:0:20}
if [[ "${HOST}" != "git@github.com:odoo/" ]]; then
echo "Invalid remote for 'origin'." >&2
exit 1
fi
REPO=${REMOTE:20}
REPO=${REPO%.git}
BRANCH=$(git symbolic-ref --short HEAD)
EEC=$((DEBUG==1 ? 0 : 1))
# ensure we have something to push
git rev-list --count --left-right "@{upstream}...HEAD" | awk "
/0\\t0/ { print \"In sync with ${BRANCH}.\"; exit $EEC }
/0\\t[0-9]+/ { exit 0 }
// { print \"You diverge from ${BRANCH}.\"; exit $EEC }
" >&2
MSG=$(git log -n1 --format=%s)
DT=$(date -j +%y%m%d)
DYNDEVBRANCH="fp-${BRANCH}-${DT}"
DEVBRANCH=${1:-$DYNDEVBRANCH}
HADCONFLICTS=$(git diff-tree --cc --pretty= HEAD | wc -l)
PRIORITY=$((HADCONFLICTS > 0 ? 0 : 1))
if [[ $DEBUG = 1 ]]; then
exit 1
fi
PUSH_ARGS=
if [[ $RESET = 0 ]]; then
git checkout -B "${DEVBRANCH}"
PUSH_ARGS="--set-upsteam"
fi
git push --quiet --force ${PUSH_ARGS} dev "HEAD:${DEVBRANCH}"
if [[ $RESET = 1 ]]; then
git reset --quiet --hard '@{upstream}'
fi
RESP=$(http --json --body --pretty=none "https://api.github.com/repos/odoo/${REPO}/pulls" \
Accept:application/vnd.github.v3+json \
"Authorization:token ${GHTOKEN}" \
title="${MSG}" head="odoo-dev:${DEVBRANCH}" base="${BRANCH}" \
)
PR=$(echo "${RESP}" | jq -r ".number")
http --json --print= --pretty=none "https://api.github.com/repos/odoo/${REPO}/issues/${PR}/comments" \
Accept:application/vnd.github.v3+json \
"Authorization:token ${GHTOKEN}" \
body="robodoo merge review+ priority=$PRIORITY"
URL=$(echo "${RESP}" | jq -r ".html_url")
echo "${URL}"
| true
|
3e9d71c9beebbf1ebc61fdea726a323ae0ed1eac
|
Shell
|
irradia/ci
|
/ci-changelog.sh
|
UTF-8
| 701
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------------
# A script to generate changelogs.
#
#------------------------------------------------------------------------
# Utility methods
#
fatal()
{
echo "ci-changelog.sh: fatal: $1" 1>&2
exit 1
}
info()
{
echo "ci-changelog.sh: info: $1" 1>&2
}
#------------------------------------------------------------------------
if [ $# -ne 2 ]
then
fatal "usage: changelog.jar README_CHANGES.xml"
fi
JAR_NAME="$1"
shift
CHANGE_FILE="$1"
shift
cat <<EOF
Changes since the last production release:
EOF
exec java -jar "${JAR_NAME}" write-plain --file "${CHANGE_FILE}" | egrep -v '^Release: ' | sed 's/^Change: /* /g'
| true
|
7fc3650511ab43602ca5964dc0d00b6d3f40e421
|
Shell
|
qixin5/debloating_study
|
/expt/debaug/benchmark/gzip-1.2.4/testscript/I3/6
|
UTF-8
| 521
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BIN=$1
OUTDIR=$2
TIMEOUT=$3
INDIR=$4
cp -r $INDIR/file ./
cp -r $INDIR/mydocument.odt ./
cp -r $INDIR/"this is my file.png" ./
gzip file
gzip mydocument.odt
gzip "this is my file.png"
{ timeout -k 9 ${TIMEOUT}s $BIN -d *.gz; } &>$OUTDIR/o6
echo "$?" >>$OUTDIR/o6
test -f file.gz
echo "$?" >>$OUTDIR/o6
test -f mydocument.odt.gz
echo "$?" >>$OUTDIR/o6
test -f "this is my file.png".gz
echo "$?" >>$OUTDIR/o6
cat file &>>$OUTDIR/o6
cat mydocument.odt &>>$OUTDIR/o6
cat "this is my file.png" &>>$OUTDIR/o6
| true
|
5e8b642fb0de81273979b753c5c95b5e213503b8
|
Shell
|
fredrik-rambris/scripts
|
/log_funcs.sh
|
UTF-8
| 2,794
| 4.4375
| 4
|
[] |
no_license
|
#!/bin/sh
# Uniform logging functions
# by Fredrik Rambris
#
# Use this function to make all log messages look the same way.
#
# Begin with sourcing this file like so (dot included):
# . /etc/scripts/log_funcs.sh
#
# Log your start and finish with
# log_start
# log_finished
#
# Log messages with
# log "Message"
# or add a severity of your message
# log "No rows returned" warning
#
# Log and exit with
# log_abort "Could not open file"
#
# Set LOG_SCREEN to mininum severity to output to stdout (default warning)
# Set LOG_FILE to minimum severity to output to syslog (default info)
# Eg. when developing, only log to screen.
# LOG_SCREEN=debug
# LOG_FILE=panic
#
# Note you do this all in your script
# PRG should be initialized to the scripts name. If it's not we
# try to guess it here.
[ -z "$PRG" ] && PRG="$( basename $0 )"
# Minimum levels to log to screen and syslog (file)
[ -z "$LOG_SCREEN" ] && LOG_SCREEN="warning"
[ -z "$LOG_FILE" ] && LOG_FILE="info"
# Returns true if $2 is more severe or equals $1
function check_severity()
{
[ $# -lt 2 ] && return 1
declare LEVELS[7]
LEVELS[1]="debug"
LEVELS[2]="info"
LEVELS[3]="notice"
LEVELS[4]="warning" # Warnings, not errors
LEVELS[5]="err" # Recoverable errors
LEVELS[6]="crit" # Always exit after a crit or more
LEVELS[7]="alert" # Big trouble
LEVELS[8]="panic" # System in a state of chaos
for level in $( seq 1 ${#LEVELS[*]} ) ; do
[ "$1" == ${LEVELS[level]} ] && reference=$level
[ "$2" == ${LEVELS[level]} ] && check=$level
done
[ $check -ge $reference ] && return 0
return 1
}
# Usage log_message "message" [severity]
function log_message()
{
SEVERITY="$2"
case "$2" in
panic|emerg)
SEVERITY="panic"
SEVER="Emergency"
;;
alert)
SEVERITY="alert"
SEVER="Alert"
;;
crit)
SEVERITY="crit"
SEVER="Critical"
;;
err|error)
SEVERITY="err"
SEVER="Error"
;;
warning|warn)
SEVERITY="warning"
SEVER="Warning"
;;
notice)
SEVERITY="notice"
SEVER="Notice"
;;
info)
SEVERITY="info"
SEVER="Info"
;;
debug)
SEVERITY="debug"
SEVER="Debug"
;;
*)
SEVERITY="info"
SEVER="Info";
esac
MESSAGE="$SEVER: $1"
check_severity $LOG_FILE $SEVERITY && logger -t"$PRG[$$]" -p user.$SEVERITY -- "$MESSAGE"
check_severity $LOG_SCREEN $SEVERITY && echo "$(LANG=C date +"%b %d %T") $MESSAGE"
}
# Log script started
function log_start()
{
SCRIPT_START=$( date +%s )
log_message "Started" notice
}
# Log script finished
function log_finished()
{
diff=$(( $( date +%s ) - $SCRIPT_START ))
time=$( date -d "1970-01-01 + $diff seconds" +"%kh %Mm %Ss" )
log_message "Finished $time" notice
}
# Die with a message
function log_abort()
{
[ -z "$1" ] && { log_message "Aborted" notice; exit 1; }
log_message "$1" crit
[ ! -z "$2" ] && exit $2
exit 1
}
| true
|
74416a35fb66ff5903a7426bee31a1b6ed202e7a
|
Shell
|
tob2/OvO
|
/ovo.sh
|
UTF-8
| 2,204
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Find path of all directories who contain a Makefile, those are the test directory
# If not argument are passed, we look for the 'test_src'folder
# The uniq at the end is needed because user can pass the same folder twice in the arguments
find_tests_folder() { find ${@:-test_src} -type f -name 'Makefile' -printf "%h\n" | sort -u ; }
fclean() { for dir in $(find_tests_folder $@); do make --silent -C "$dir" clean; done; }
frun() {
SYNC=$(make -v | head -n1 | awk '$NF >= 4 {print "--output-sync"}')
if [ -n "$SYNC" ]; then NPROC=$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || getconf _NPROCESSORS_ONLN 2>/dev/null); fi
export MAKEFLAGS="${MAKEFLAGS:--j${NPROC:-1} $SYNC}"
for dir in $(find_tests_folder $@); do
nresult=$result/${dir#*/}
echo ">> Running $dir | Saving log in $nresult"
mkdir -p "$nresult"
{
set -x
env
${CXX:-c++} --version
${FC:-gfortran} --version
set +x
} &> "$nresult"/env.log
# Compile in parallel
make --no-print-directory -C "$dir" exe 2>&1 | tee "$nresult"/compilation.log
# But Run serially
make -j1 --no-print-directory -C "$dir" run 2>&1 | tee "$nresult"/runtime.log
done
}
base=$(dirname $0)
# _
# |_) _. ._ _ o ._ _ /\ ._ _
# | (_| | _> | | | (_| /--\ | (_| \/
# _| _|
while (( "$#" )); do
case "$1" in
gen)
shift; $base/src/gtest.py "$@"; exit
;;
clean)
shift; fclean; exit
;;
run)
shift;
# See if user passed -o as first argument
# and set global baraible result used by frun
if [ "$1" == "-o" ]; then
shift; result="test_result/$1"; shift;
else
uuid=$(date +"%Y-%m-%d_%H-%M")
result="test_result/${uuid}_$(hostname)"
fi
fclean $@ && frun $@; exit
;;
report)
shift; $base/src/report.py $@; exit
;;
*)
shift;
;;
esac
done
cat $base/src/template/ovo_usage.txt
exit 0
| true
|
2ce8beaf83aadff589607ae31cb7335883b48896
|
Shell
|
jeslopalo/devkit
|
/execute_specs
|
UTF-8
| 1,873
| 4.125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
source $(dirname ${BASH_SOURCE})/.rc
# override PATH because we want to test commands
# from project's workspace and not from the system PATH
export PATH="$DEVKIT_BIN:$PATH"
specs_root_path="./test"
specs_extension=".bats"
usage() {
printf "usage: %s [-h/--help] [-p | --tap] -- [spec pattern]\\n" "$0"
}
execute_spec() {
local -r filename="${1:-}"
local -r mode="${2:---p}"
[[ -n $filename ]] && bats $mode $filename
}
spec_name() {
local name="${1:-}"
name=${name/$specs_root_path\//}
name=${name/$specs_extension/}
name=${name/__/::}
echo $name
}
execute_specs() {
local mode=""
for arg in "${@}"; do
shift
if [[ $arg = "--tap" ]]; then mode="--tap"; fi
if [[ $arg = "-p" ]]; then mode="-p"; fi
if [[ $arg = "-h" ]] || [[ $arg = "--help" ]]; then usage; exit 0; fi
if [[ $arg = "--" ]]; then break; fi
done
local -r slug="${1:-}"
local total_failures=0
local -a suites_with_failures=()
while read -d '' filename; do
if [[ -n $slug ]]; then
if [[ $filename = ${filename/$slug/} ]]; then
continue;
fi
fi
printf "\\n[%s] executing <%s>...\\n" "${slug:-all}" "$(spec_name $filename)"
execute_spec "${filename}" "${mode}"
if [ $? -gt 0 ]; then
suites_with_failures=( "${suites_with_failures[@]}" "$(spec_name $filename)" )
((total_failures++))
fi
done < <(find $specs_root_path -name "*$specs_extension" -not -path "$specs_root_path/test_helper/*" -maxdepth 2000 -type f -print0)
if [[ $total_failures -gt 0 ]]; then
printf "\\nWarning, there are %s suites with failures!!\\n" "$total_failures"
printf " > %s\\n" ${suites_with_failures[@]}
fi
return $total_failures
}
execute_specs "$@"
| true
|
b334fd2154dc181330af8ac780ff2ee77b2bf52a
|
Shell
|
Valle1806/SAT
|
/reducir.sh
|
UTF-8
| 459
| 3.453125
| 3
|
[] |
no_license
|
#Rango de instancias SAT a recorrer en la carpeta:
rango=$(ls InstanciasSAT/*)
for instancia in $rango
do
#Obtención del nombre del archivo con la instancia SAT:
reduccion=$(echo $instancia | cut -c 15-)
#Creación del archivo para la instancia X-SAT:
touch X-SAT/$reduccion
#Transcripción del formato DIMACS al nuevo archivo:
python3 Reductor/Reductor.py $instancia $2 > X-SAT/$reduccion
echo "Instancia" $reduccion "reducida con éxito."
done
| true
|
a6a8f52d5f0e3b071e4ba8f564f6c6e8bbff82cc
|
Shell
|
alpha-tian/code-count
|
/count-new.sh
|
UTF-8
| 4,504
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
date=`date +%Y%m%d`
if [[ ! $2 ]];then
echo "错误,已退出"
exit 168
else
echo "开始执行..."
echo "$1"
if [ -z "$1" ] || [ -z "$2" ];then
echo "你倒是输入参数啊!"
exit 1
fi
if [[ -f $PWD/$1-count.txt ]];then
rm -f $PWD/$1-count.txt
fi
if [[ -f $PWD/$1-$date-count2.txt ]];then
rm -f $PWD/$1-$date-count2.txt
fi
sql_type_num=`find $2 -name "*oracle*" | wc -l`
if [[ sql_type_num -gt 0 ]];then
sqltype="oracle"
else
sqltype="mysql"
fi
for i in `find $2 -type f`
do
end=`ls $i | awk -F "/" '{print $(NF)}' | awk -F"." '{print $(NF)}'`
echo $end | egrep "xml|sql|java|properties|js|gradle|html" > /dev/null
if [[ $? -eq 0 ]];then
sed -n '/^\s*$/p' $i | wc -l >> $2/"$end"-1.txt #空行数
sed -n "/^\s*\//p" $i | wc -l >> $2/"$end"-2.txt #以/开头的注释
sed -n "/^\s*#/p" $i | wc -l >> $2/"$end"-2.txt #以#开头的注释
sed -n "/^\s*\*/p" $i | wc -l >> $2/"$end"-2.txt #以*开头的注释
grep -Pzo '<!(.|[\r\n])*?>' $i | wc -l >> $2/"$end"-2.txt #以<!开头的注释
cat $i | wc -l >> $2/"$end"-3.txt #文件行数
echo $i | xargs ls -l | gawk '{print $5}' >> $2/"$end"-4.txt #文件大小
echo 1 >> $2/"$end"-5.txt #文件数
fi
done
for j in {"Abstract*.java","*VO.java","*BaseSQL.xml","*BaseSQL*$sqltype\.xml"}
do
for k in `find "$2" -name "$j"`
do
sed -n '/^\s*$/p' $k | wc -l >> $2/"$j"-1.txt #空行数
sed -n "/^\s*\//p" $k | wc -l >> $2/"$j"-2.txt #以/开头的注释
sed -n "/^\s*#/p" $k | wc -l >> $2/"$j"-2.txt #以#开头的注释
sed -n "/^\s*\*/p" $k | wc -l >> $2/"$j"-2.txt #以*开头的注释
grep -Pzo '<!(.|[\r\n])*?>' $k | wc -l >> $2/"$j"-2.txt #以<!开头的注释
cat $k | wc -l >> $2/"$j"-3.txt #文件行数
echo $k | xargs ls -l | gawk '{print $5}' >> $2/"$j"-4.txt #文件大小
echo 1 >> $2/"$j"-5.txt #文件数
done
done
for l in {"xml","sql","java","properties","js","gradle","html","Abstract*.java","*VO.java","*BaseSQL.xml","*BaseSQL*$sqltype\.xml"}
do
if [[ -f $2/"$l"-1.txt ]];then
cat $2/"$l"-1.txt | awk '{sum+=$1} END {print sum}' >> $2/"$l"-sum1.txt #空行数汇总
else
echo 0 >> $2/"$l"-sum1.txt
fi
if [[ -f $2/"$l"-2.txt ]];then
cat $2/"$l"-2.txt | awk '{sum+=$1} END {print sum}' >> $2/"$l"-sum2.txt #注释行数汇总
else
echo 0 >> $2/"$l"-sum2.txt
fi
if [[ -f $2/"$l"-3.txt ]];then
cat $2/"$l"-3.txt | awk '{sum+=$1} END {print sum}' >> $2/"$l"-sum3.txt #文件行数汇总
else
echo 0 >> $2/"$l"-sum3.txt
fi
if [[ -f $2/"$l"-4.txt ]];then
cat $2/"$l"-4.txt | awk '{sum+=$1} END {print sum}' >> $2/"$l"-sum4.txt #文件大小汇总
else
echo 0 >> $2/"$l"-sum4.txt
fi
if [[ -f $2/"$l"-5.txt ]];then
cat $2/"$l"-5.txt | awk '{sum+=$1} END {print sum}' >> $2/"$l"-sum5.txt #文件数汇总
else
echo 0 >> $2/"$l"-sum5.txt
fi
done
for m in {"xml","sql","java","properties","js","gradle","html","Abstract*.java","*VO.java","*BaseSQL.xml","*BaseSQL*$sqltype\.xml"}
do
num=`cat $2/"$m"-sum5.txt`
e=`cat $2/"$m"-sum4.txt`
c=`cat $2/"$m"-sum3.txt`
b=`cat $2/"$m"-sum2.txt`
a=`cat $2/"$m"-sum1.txt`
d=$[$c - $b - $a]
echo $d >> $2/$1-$date-code.txt
cat << EOF >> $1-$date-count.txt
"$m"
文件数: $num
文件大小: $e
总代码行数: $c
代码行数: $d
注释行数: $b
空行数: $a
EOF
rm -f $2/"$m"-1.txt $2/"$m"-2.txt $2/"$m"-3.txt $2/"$m"-4.txt $2/"$m"-5.txt $2/"$m"-sum1.txt $2/"$m"-sum2.txt $2/"$m"-sum3.txt $2/"$m"-sum4.txt $2/"$m"-sum5.txt
num=0
e=0
c=0
b=0
a=0
d=0
done
sed -i '5d' $2/$1-$date-code.txt
f=`cat $2/$1-$date-code.txt | awk 'NR==1,NR==6{sum+=$1} END {print sum}'`
g=`cat $2/$1-$date-code.txt | awk 'NR==4,NR==10{sum+=$1} END {print sum}'`
echo -e "总代码行数:\t\t$f" > $PWD/$1-$date-count2.txt
echo -e "平台生成代码行数:\t$g" >> $PWD/$1-$date-count2.txt
cat $PWD/$1-$date-count.txt
cat $PWD/$1-$date-count2.txt
rm -f $2/$1-$date-code.txt
fi
| true
|
dae94ef531109f3b7b18c292eec9672d0bdeda03
|
Shell
|
Bobby23Putra/TBG
|
/HTML/cdc/poll_sms.sh
|
UTF-8
| 631
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd /var/www/cdc/
sudo bash connect.sh "stop" "usb1"
sudo bash connect.sh "stop" "usb2"
num_server=`mysql -s -N --user="root" --password="root" --database="cdc" --execute="select setting_value from setting where setting_name = 'sms_server'"`
poll_value=`mysql -s -N --user="root" --password="root" --database="cdc" --execute="select poll from log order by id desc limit 0,1"`
#--kirim lewat modem 1
kirim=`sudo bash content_sms.sh "$num_server" "$poll_value" "1"`
if [[ $kirim == *berhasil* ]] ;then
exit
else
#--kirim lewat modem 2
kirim=`sudo bash content_sms.sh "$num_server" "$poll_value" "2"`
fi
| true
|
2da17500f33a9a5a00f2eedd63b65d504af0d75b
|
Shell
|
vijayendrabvs/geth_perf
|
/inst_geth.sh
|
UTF-8
| 510
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Updating apt"
sudo apt update
echo "Installing golang packages"
sudo apt install -y golang-go
# Verify golang is installed
go_binary=`which go`
if [ -z $go_binary ]; then
echo "Failed to install go, exiting.."
exit 1
fi
sudo apt-get install -y build-essential
# Create a working dir.
mkdir -p ~/work/papers/inst
cd ~/work/papers/inst
# Clone the geth repo.
echo "Cloning geth miner"
git clone https://github.com/ethereum/go-ethereum.git
cd go-ethereum
# Build geth.
make geth
| true
|
f069ae2552c2d1c480087c3370891379adc2ff2f
|
Shell
|
jdbarry/cue
|
/cue/templates/install_rabbit.sh.tmpl
|
UTF-8
| 1,113
| 3.234375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
cat > /etc/hosts << EOF
127.0.0.1 localhost
{%- for node_name, node_ip in rabbit_nodes.iteritems() %}
{{node_ip}} {{node_name}}
{%- endfor %}
EOF
cat > /etc/network/interfaces.d/eth1.cfg << EOF
auto eth1
iface eth1 inet dhcp
EOF
ifup eth1
if [[ ! "`grep rabbitmq /etc/passwd`" ]]; then
useradd -d /var/lib/rabbitmq -U -m rabbitmq
fi
mkdir -p /etc/rabbitmq /var/lib/rabbitmq
echo {{erlang_cookie}} > /var/lib/rabbitmq/.erlang.cookie
chmod 0400 /var/lib/rabbitmq/.erlang.cookie
cat > /etc/rabbitmq/rabbitmq.config << EOF
[
{rabbit, [
{cluster_nodes, {[
{%- for node_name in rabbit_nodes -%}
'rabbit@{{node_name}}' {%- if not loop.last -%},{%- endif -%}
{%- endfor -%}
], disc}},
{default_user, <<"{{default_rabbit_user}}">>},
{default_pass, <<"{{default_rabbit_pass}}">>}
]}
].
EOF
chown -R rabbitmq:rabbitmq /var/lib/rabbitmq
apt-get update
apt-get install -y rabbitmq-server
for i in `seq 5`; do
if [[ ! "`service rabbitmq-server status | grep pid`" ]]; then
sleep 1
service rabbitmq-server start
fi
done
| true
|
14193f85530f2307f03cb274321f6a4b12e1e736
|
Shell
|
lisuke/repo
|
/archlinuxcn/zju-connect-bin/zju-connect.install
|
UTF-8
| 1,025
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
function whisperer(){
echo "-----------------------------------------------------------------------"
echo "For more info and more troubleshooting visit:"
echo "CC98: https://cc98.org"
echo "Github: https://github.com/Mythologyli/zju-connect"
echo "-----------------------------------------------------------------------"
echo ""
echo "-----------------------------------------------------------------------"
echo "Quick Guide"
echo ""
echo "1. How to run directly?"
echo " /usr/bin/zju-connect -username <account> -password <password>"
echo ""
echo "2. What's the default port?"
echo " Socks5: 1080, HTTP: 1081"
echo ""
echo "3. How to run it as a systemd service?"
echo " a. edit /etc/zju-connect/config.toml"
echo " b. sudo systemctl enable zju-connect"
echo " c. sudo systemctl start zju-connect"
echo "-----------------------------------------------------------------------"
}
post_install()
{
whisperer
}
| true
|
0bae1a1a9e562d1ce6c060ad31b9e4c93ec609c2
|
Shell
|
sgupt9999/dns
|
/dns-name-caching-server.sh
|
UTF-8
| 1,902
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
##############################################################
##
## Configuring a DNS caching server using bind on Centos 7
##
##############################################################
##############################################################
##
## User Inputs
TRUSTEDCLIENTS=(172.31.17.32 172.31.17.55) # Need at least one entry
SERVERIP=172.31.26.113/32
# firewalld should already be installed, enabled and running
FIREWALL=yes
#FIREWALL=no
##
###############################################################
if [[ $EUID != "0" ]]
then
echo "ERROR. Need to run the script as root"
exit 1
fi
PACKAGES="bind"
if yum list installed bind > /dev/null 2>&1
then
systemctl -q is-active named && {
systemctl -q stop named
systemctl -q disable named
}
yum remove -y -q -e0 $PACKAGES
rm -rf /etc/rndc.key
rm -rf /etc/named*
fi
echo "Installing packages......................"
yum install -y -q -e0 $PACKAGES
echo "Done"
if [ -f /etc/named.conf_backup ]
then
cp -f /etc/named.conf_backup /etc/named.conf
else
cp -f /etc/named.conf /etc/named.conf_backup
fi
##################################################################
##
## Create trusted list of client servers
line_number=`grep options -n /etc/named.conf | head -n 1 | cut -d":" -f1`
sed -i "${line_number}i\acl \"trusted\" {" /etc/named.conf
for CLIENT in ${TRUSTEDCLIENTS[@]}
do
line_number=`expr $line_number + 1`
sed -i "${line_number}i $CLIENT;" /etc/named.conf
done
line_number=`expr $line_number + 1`
sed -i "${line_number}i};" /etc/named.conf
sed -i "s#listen-on port 53.*#listen-on port 53 { $SERVERIP; };#" /etc/named.conf
sed -i "s/allow-query.*/allow-query { trusted; };/" /etc/named.conf
sed -i "s/recursion no/recursion yes/" /etc/named.conf
if [ $FIREWALL == "yes" ]
then
firewall-cmd --permanent --add-service dns
firewall-cmd --reload
fi
systemctl start named
systemctl enable named
| true
|
ca4de532c8cccafa29f431ef4938ff197c7ca3d4
|
Shell
|
msrLi/portingSources
|
/lzo/lzo-2.10/B/generic/build.sh
|
UTF-8
| 2,129
| 3.0625
| 3
|
[
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
#! /bin/sh
## vim:set ts=4 sw=4 et:
set -e
echo "// Copyright (C) 1996-2017 Markus F.X.J. Oberhumer"
echo "//"
echo "// Generic Posix/Unix system"
echo "// Generic C compiler"
test "X${top_srcdir}" = X && top_srcdir=`echo "$0" | sed 's,[^/]*$,,'`../..
test "X${AR}" = X && AR="ar"
test "X${CC}" = X && CC="cc"
test "X${CFLAGS+set}" = Xset || CFLAGS="-O"
# CPPFLAGS, LDFLAGS, LIBS
# LZO_EXTRA_CPPFLAGS, LZO_EXTRA_CFLAGS, LZO_EXTRA_LDFLAGS
# LZO_EXTRA_SOURCES, LZO_EXTRA_OBJECTS, LZO_EXTRA_LIBS
CFI="-I${top_srcdir}/include -I${top_srcdir} -I${top_srcdir}/src"
BNAME=lzo2
BLIB=lib${BNAME}.a
CF="$CPPFLAGS $CFI $CFLAGS"
# info: we restrict ourselves to pure ANSI C library functions for the examples
CF="-DLZO_LIBC_ISOC90=1 $CF"
# info: we do not use _any_ external functions in freestanding mode
test -z "$LZO_CFG_FREESTANDING" || CF="-DLZO_CFG_FREESTANDING=1 $CF"
LF="$LDFLAGS $LZO_EXTRA_LDFLAGS"
LL="$BLIB $LIBS $LZO_EXTRA_LIBS"
. $top_srcdir/B/generic/clean.sh
for f in $top_srcdir/src/*.c $LZO_EXTRA_SOURCES; do
echo $CC $CF $LZO_EXTRA_CPPFLAGS $LZO_EXTRA_CFLAGS -c $f
$CC $CF $LZO_EXTRA_CPPFLAGS $LZO_EXTRA_CFLAGS -c $f
done
echo $AR rcs $BLIB *.o $LZO_EXTRA_OBJECTS
$AR rcs $BLIB *.o $LZO_EXTRA_OBJECTS
if test -n "$LZO_CFG_FREESTANDING"; then
echo "//"
echo "// Building $BLIB in freestanding mode was successful. All done."
echo "// Now try 'nm --extern-only $BLIB'"
else
for f in dict lzopack precomp precomp2 simple; do
echo $CC $CF $LF -o $f.out $top_srcdir/examples/$f.c $LL
$CC $CF $LF -o $f.out $top_srcdir/examples/$f.c $LL
done
echo $CC $CF $LF -o lzotest.out $top_srcdir/lzotest/lzotest.c $LL
$CC $CF $LF -o lzotest.out $top_srcdir/lzotest/lzotest.c $LL
for f in minilzo.c testmini.c; do
XF="-I$top_srcdir/include/lzo $CF $LZO_EXTRA_CPPFLAGS $LZO_EXTRA_CFLAGS"
echo $CC $XF -c $top_srcdir/minilzo/$f
$CC $XF -c $top_srcdir/minilzo/$f
done
echo $CC $CF $LF -o testmini.out testmini.o minilzo.o $LIBS $LZO_EXTRA_LIBS
$CC $CF $LF -o testmini.out testmini.o minilzo.o $LIBS $LZO_EXTRA_LIBS
echo "//"
echo "// Building LZO was successful. All done."
fi
true
| true
|
aa346429460818b5b2a538cb19c84a1fe6e0fa6c
|
Shell
|
Level0r0s/java-deepspeech
|
/native/scripts/cross-build.sh
|
UTF-8
| 494
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -e
# shellcheck source=./env.sh
. "$(dirname "$(realpath "$0")")/env.sh"
BUILD_TYPE="Release"
BUILD_ARCH="default"
while getopts ":b:a:" arg; do
case ${arg} in
b)
BUILD_TYPE="${OPTARG}"
;;
a)
BUILD_ARCH="${OPTARG}"
;;
?)
echo "Invalid option: -${arg} ${OPTARG}"
echo
exit 187
;;
esac
done
cd "$PROJECT_DIR"
DOCK_CROSS_IMAGE=$BUILD_ARCH "./scripts/dockcross.sh" "./scripts/build.sh" -b "$BUILD_TYPE" -a "$BUILD_ARCH"
| true
|
5ee68c30bb6e4921878f7187a88384cd86ac216c
|
Shell
|
ohel/dotfiles
|
/scripts/initkeyboard.sh
|
UTF-8
| 410
| 3.21875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
# Sometimes at least the Microsoft Natural Ergonomic 4000 keyboard acts weirdly or hangs up.
# Reconnecting it physically works, but one has to set everything anew. This script solves that.
secs=0
for pid in $(ps -ef | grep "xbindkeys$" | tr -s ' ' | cut -f 2 -d ' ')
do
kill $pid
secs=1
done
sleep $secs
[ !"$DISPLAY" ] && DISPLAY=:0.0
setxkbmap fi
xset r rate 200 45
xset b off
xbindkeys
| true
|
9b322e474b1caf073833e2096b3e51e48ca7fef0
|
Shell
|
kpkhxlgy0/pbc
|
/tools/build_mac.sh
|
UTF-8
| 280
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
CMAKE_BUILD_TYPE=$1
if [ "-$CMAKE_BUILD_TYPE" = "-" ]; then
CMAKE_BUILD_TYPE=Release
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
DIR_TO=$DIR/build_mac
rm -rf $DIR_TO
mkdir -p $DIR_TO
cd $DIR_TO && cd $_
cmake -DCMAKE_BUILD_TYPE=$CMAKE_BUILD_TYPE ../..
make -j8
| true
|
9580aae43d0e8956e5d1e5b0f5d8a2f1e52f3500
|
Shell
|
clegg89/classon
|
/meta-classon/recipes-example/classon-rootfs/classon-rootfs-1.0.0/etc/init.d/start_uploader_py
|
UTF-8
| 323
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
case "$1" in
start)
python /home/root/uploader.py &
;;
stop)
echo "TODO: how to stop uploader.py when its been autostarted."
;;
force-reload|restart)
$0 stop
$0 start
;;
status)
;;
*)
echo "Usag: /etc/init.d/start_uploader_py {start|stop|restart|force-reload|status}"
exit 1
;;
esac
| true
|
20584d8aba76ef43e6f475a9141215d3fd769b15
|
Shell
|
jackgavin/stratux-setup
|
/wifi-ap.sh
|
UTF-8
| 3,935
| 3.40625
| 3
|
[] |
permissive
|
# Copyright (c) 2016 Joseph D Poirier
# Distributable under the terms of The New BSD License
# that can be found in the LICENSE file.
#### files created and/or modified
# /etc/default/isc-dhcp-server
# /etc/hostapd/hostapd.conf
# /etc/network/interfaces
# /usr/sbin/stratux-wifi.sh
if [ $(whoami) != 'root' ]; then
echo "${RED}This script must be executed as root, exiting...${WHITE}"
exit
fi
rm -f /etc/rc*.d/*hostapd
rm -f /etc/network/if-pre-up.d/hostapd
rm -f /etc/network/if-post-down.d/hostapd
rm -f /etc/init.d/hostapd
rm -f /etc/default/hostapd
# what wifi interface, e.g. wlan0, wlan1..., uses the first one found
#wifi_interface=$(lshw -quiet -c network | sed -n -e '/Wireless interface/,+12 p' | sed -n -e '/logical name:/p' | cut -d: -f2 | sed -e 's/ //g')
wifi_interface=wlan0
echo "${MAGENTA}Configuring $wifi_interface interface...${WHITE}"
##############################################################
## Setup DHCP server for IP address management
##############################################################
echo
echo "${YELLOW}**** Setup DHCP server for IP address management *****${WHITE}"
### set /etc/default/isc-dhcp-server
cp -n /etc/default/isc-dhcp-server{,.bak}
cat <<EOT > /etc/default/isc-dhcp-server
INTERFACES="$wifi_interface"
EOT
### set /etc/dhcp/dhcpd.conf
cp -n /etc/dhcp/dhcpd.conf{,.bak}
cat <<EOT > /etc/dhcp/dhcpd.conf
ddns-update-style none;
default-lease-time 86400; # 24 hours
max-lease-time 172800; # 48 hours
authoritative;
log-facility local7;
subnet 192.168.10.0 netmask 255.255.255.0 {
range 192.168.10.10 192.168.10.50;
option broadcast-address 192.168.10.255;
default-lease-time 12000;
max-lease-time 12000;
option domain-name "stratux.local";
option domain-name-servers 4.2.2.2;
}
EOT
echo "${GREEN}...done${WHITE}"
##############################################################
## Setup /etc/hostapd/hostapd.conf
##############################################################
echo
echo "${YELLOW}**** Setup /etc/hostapd/hostapd.conf *****${WHITE}"
if [ "$REVISION" == "$RPI2BxREV" ] || [ "$REVISION" == "$RPI2ByREV" ] || [ "$REVISION" = "$RPI0xREV" ] || [ "$REVISION" = "$RPI0yREV" ]; then
cat <<EOT > /etc/hostapd/hostapd-edimax.conf
interface=$wifi_interface
driver=rtl871xdrv
ssid=stratux
hw_mode=g
channel=1
wmm_enabled=1
ieee80211n=1
ignore_broadcast_ssid=0
EOT
fi
cat <<EOT > /etc/hostapd/hostapd.conf
interface=$wifi_interface
ssid=stratux
hw_mode=g
channel=1
wmm_enabled=1
ieee80211n=1
ignore_broadcast_ssid=0
EOT
echo "${GREEN}...done${WHITE}"
##############################################################
## Setup /etc/network/interfaces
##############################################################
echo
echo "${YELLOW}**** Setup /etc/network/interfaces *****${WHITE}"
cp -n /etc/network/interfaces{,.bak}
cat <<EOT > /etc/network/interfaces
source-directory /etc/network/interfaces.d
auto lo
iface lo inet loopback
allow-hotplug wlan0
iface wlan0 inet static
address 192.168.10.1
netmask 255.255.255.0
post-up /usr/sbin/stratux-wifi.sh
EOT
echo "${GREEN}...done${WHITE}"
#################################################
## Setup /usr/sbin/stratux-wifi.sh
#################################################
echo
echo "${YELLOW}**** Setup /usr/sbin/stratux-wifi.sh *****${WHITE}"
# we use a slightly modified version to handle more hardware scenarios
chmod 755 ${SCRIPTDIR}/stratux-wifi.sh
cp ${SCRIPTDIR}/stratux-wifi.sh /usr/sbin/stratux-wifi.sh
echo "${GREEN}...done${WHITE}"
#################################################
## Legacy wifiap cleanup
#################################################
echo
echo "${YELLOW}**** Legacy wifiap cleanup *****${WHITE}"
#### legacy file check
if [ -f "/etc/init.d/wifiap" ]; then
service wifiap stop
rm -f /etc/init.d/wifiap
echo "${MAGENTA}legacy wifiap service stopped and file removed... *****${WHITE}"
fi
echo "${GREEN}...done${WHITE}"
| true
|
0b48ae9e1f28e2af74f5fbebee127dded741c6e6
|
Shell
|
jiri-pejchal/dotfiles
|
/bash_aliases
|
UTF-8
| 1,001
| 2.859375
| 3
|
[] |
no_license
|
alias ..='cd ..'
alias ...='cd ../..'
alias cd..='cd ..'
alias -- -='cd -'
alias ll='ls -l'
alias la='ls -la'
alias lt='ls -lrt'
alias li='locate -i'
alias tarp='tar --use-compress-program=pxz'
alias xcopy='xsel -b'
# git
alias g='git status -s'
alias gitroot='cd $(git rev-parse --show-toplevel)'
alias gti='git'
# IP addresses
alias ip='ip --color'
alias ipa='ip -4 --color a'
alias ipb='ip --color --brief'
alias svim='sudo vim'
alias SS='sudo systemctl'
# youtube-dll
alias yt='youtube-dl --add-metadata'
alias yta='youtube-dl -x'
# aptitude
alias u='sudo aptitude -u'
alias au='sudo aptitude upgrade'
alias ai='sudo aptitude install'
alias ag='sudo aptitude safe-upgrade'
alias https='http --default-scheme=https'
# emacs
alias gnus='emacs -f gnus &'
# exit to the current directory
alias mc=". /usr/lib/mc/mc-wrapper.sh"
# import all javase packages
# define print functions
alias jshellj="jshell JAVASE PRINTING"
function mcd() {
mkdir -p "$1" && cd "$1";
}
# vim: set ft=sh:
| true
|
951a9085bca05f4a03bd1e7f5c5fd4ea854663ce
|
Shell
|
aefimov/teamcity-deb
|
/etc/init.d/teamcity-agent
|
UTF-8
| 3,544
| 3.75
| 4
|
[] |
no_license
|
#! /bin/sh
### BEGIN INIT INFO
# Provides: teamcity-agent
# Required-Start: $local_fs $remote_fs $network
# Required-Stop: $local_fs $remote_fs $network
# Should-Start: $named $syslog $time
# Should-Stop: $named $syslog $time
# Default-Start: 2 3 4 5
# Default-Stop: S 0 1 6
# Short-Description: initscript for teamcity-agent
# Description: initscript for teamcity-agent
### END INIT INFO
# Author: Alexey Efimov <aefimov-box@ya.ru>
set -e
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
AGENTS_ROOT="/usr/local/teamcity-agents"
DATA_ROOT="/var/lib/teamcity/BuildAgents"
d_start() {
su - teamcity -c "(. ~/.teamcityrc; cd $AGENTS_ROOT/$1; $AGENTS_ROOT/$1/bin/agent.sh start)"
}
d_stop() {
su - teamcity -c "(. ~/.teamcityrc; cd $AGENTS_ROOT/$1; $AGENTS_ROOT/$1/bin/agent.sh stop kill)"
}
d_install() {
NAME=$1
SERVER_URL=$2
if [ -z "$NAME" -o -z "$SERVER_URL" ]; then
echo "Usage /etc/init.d/teamcity-agent install <teamcity-agent-name> <teamcity.server.url>"
exit 1;
else
echo Downloading TeamCity Agent from $SERVER_URL...
BUILD_AGENT_ZIP="buildAgent.zip"
wget --no-check-certificate -O $AGENTS_ROOT/$BUILD_AGENT_ZIP $SERVER_URL/update/$BUILD_AGENT_ZIP
unzip $AGENTS_ROOT/$BUILD_AGENT_ZIP -d $AGENTS_ROOT/$NAME
chmod +x $AGENTS_ROOT/$NAME/bin/agent.sh
WORK_DIR="$DATA_ROOT/$NAME/work"
TEMP_DIR="$DATA_ROOT/$NAME/temp"
SYSTEM_DIR="$DATA_ROOT/$NAME/system"
OWN_PORT="9090"
MAX_EXISTS_PORT=$(cat $AGENTS_ROOT/*/conf/buildAgent.properties 2>/dev/null | grep ownPort | cut -d= -f2 | sort -r | head -1)
if [ ! -z "$MAX_EXISTS_PORT" ]; then
OWN_PORT=$(expr $MAX_EXISTS_PORT + 1)
fi
cat $AGENTS_ROOT/$NAME/conf/buildAgent.dist.properties \
| sed "s,^serverUrl=.*$,serverUrl=$SERVER_URL,g" \
| sed "s,^name=.*$,name=$NAME,g" \
| sed "s,^workDir=.*$,workDir=$WORK_DIR,g" \
| sed "s,^tempDir=.*$,tempDir=$TEMP_DIR,g" \
| sed "s,^systemDir=.*$,systemDir=$SYSTEM_DIR,g" \
| sed "s,^ownPort=.*$,ownPort=$OWN_PORT,g" \
> $AGENTS_ROOT/$NAME/conf/buildAgent.properties
chown -R teamcity $AGENTS_ROOT/$NAME
echo "Agent installed to $AGENTS_ROOT/$NAME with follow options:"
cat $AGENTS_ROOT/$NAME/conf/buildAgent.properties | egrep -v "^#" | egrep -v "^[[:space:]]*$"
echo "To start/stop/restart agent use:"
echo "sudo /etc/init.d/teamcity-agent (start|stop|restart) $NAME"
fi
}
teamcity_agent_name="$2"
if [ -z "$teamcity_agent_name" ]; then
teamcity_agent_name="buildAgent"
fi
case "$1" in
start)
echo "Starting TeamCity Agent '$teamcity_agent_name'..."
d_start $teamcity_agent_name
;;
stop)
echo "Stopping TeamCity Agent '$teamcity_agent_name'..."
d_stop $teamcity_agent_name
;;
restart|force-reload)
echo "Restarting TeamCity Agent '$teamcity_agent_name'..."
d_stop $teamcity_agent_name
sleep 1
d_start $teamcity_agent_name
;;
list)
find $AGENTS_ROOT -maxdepth 1 -mindepth 1 -type d -printf '%f\n' | sort
;;
install)
shift
echo "Installing new agent $1 connected to $2"
d_install $1 $2
;;
*)
echo "Usage: /etc/init.d/teamcity-agent {start|stop|restart|install} [<teamcity-agent-name>] [<teamcity.server.url>]" >&2
exit 1
;;
esac
exit 0
| true
|
9b174422d4879a4c554be65c194d6de921e9e786
|
Shell
|
ccadruvi/dotfiles
|
/bash/aliases/git.sh
|
UTF-8
| 995
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
#set -euo pipefail
alias gs='git status'
alias gd='git diff'
alias gpl='git pull'
alias gps='git push'
alias gco='git checkout'
alias gc='git commit -m'
alias ga='git add'
complete -F _complete_alias gs
complete -F _complete_alias gd
complete -F _complete_alias gpl
complete -F _complete_alias gps
complete -F _complete_alias gco
complete -F _complete_alias gc
complete -F _complete_alias ga
function gb() {
# Only output color if the command isn't being piped.
if [ -t 1 ]; then
COLOR="always"
else
COLOR="auto"
fi
git branch \
--all \
--color="$COLOR" \
--sort=authordate \
--format="%(color:blue)%(authordate:relative);%(color:red)%(authorname);%(color:white)%(color:bold)%(refname:short)" \
"$@" \
| column -s ";" -t
}
function gbd() {
git checkout master;
git fetch --prune;
branchesToDelete=$(git branch -vv | grep ': gone' | awk '{print $1}')
for branch in $branchesToDelete; do
git branch -D "$branch"
done
}
| true
|
43c5f622ff37df31c6effad8464a0fd54788f938
|
Shell
|
fdev31/archx
|
/cos-installpackages.sh
|
UTF-8
| 1,150
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# Main installer script for most packages
# - install pacmanhooks
# - RUN pre-install hooks
# - RUN install hooks
# - RUN install of DISTRO_PACKAGE_LIST
# - RUN distro_install_hook
# - set the BOOT_TARGET
# - RUN post-install hooks
if [ ! -e configuration.sh ]; then
echo "This script must be executed from its own original folder"
exit 1
fi
source ./strapfuncs.sh
source ./coslib.sh
# vars:
# R HOOK_BUILD_FOLDER DISTRIB DISTRO_PACKAGE_LIST BOOT_TARGET
HOOK_BUILD_DIR="$R/$HOOK_BUILD_FOLDER"
$SUDO rm -fr "$HOOK_BUILD_DIR" 2> /dev/null
step "Installing pacman hooks"
$SUDO mkdir -p "$R/etc/pacman.d/hooks"
$SUDO cp -r resources/pacmanhooks "$R/etc/pacman.d/hooks"
step "Triggering install hooks"
run_hooks pre-install
step " Network setup "
run_hooks install
if [ -n "$DISTRO_PACKAGE_LIST" ]; then
step2 "Distribution packages"
for pkg in $DISTRO_PACKAGE_LIST; do
install_pkg "$pkg"
done
fi
distro_install_hook
sudo systemctl --root "$_MOUNTED_ROOT_FOLDER" set-default ${BOOT_TARGET}.target
run_hooks post-install
$SUDO mkdir -p "$_MOUNTED_ROOT_FOLDER/var/cache/pikaur/"
sudo mv "$R/stdout.log" .
| true
|
5c2f164f9247cb9a4dab4f986ec28631db32160e
|
Shell
|
s-kat/bash-scripts
|
/polynom_calc.sh
|
UTF-8
| 259
| 3.171875
| 3
|
[] |
no_license
|
x=-1
grade=1
ans=0
base=$(((10 ** 9) + 7))
for line in $(cat input.txt)
do
if [[ $x -eq -1 ]]; then
x=$line
else
ans=$((($ans + ($line * $grade)%base)%base))
grade=$((($grade * $x)%base))
fi
done
ans=$(($ans % $base))
echo $ans > output.txt
| true
|
29b0300dbb552a5cf7eb3715634722c7b5eabbf2
|
Shell
|
wonksing/githooks
|
/hooks/post-receive
|
UTF-8
| 532
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
read oldrev newrev _branch
REPO=$(basename "$PWD")
TARGET_BRANCHES="^(master|release-*|patch-*)"
branch=$(echo $_branch | sed 's/.*\/\([a-z0-9][a-z0-9]*\)$/\1/')
msg=$(git log -1 --pretty=format:"CommitDate: %ci%nRepo: ${REPO}%nBranch: ${branch}%nCommitHash: %h%nCommitter: %cn%nCommitMsg:%n%B" $newrev)
if [ "$branch" = "master" ] || [ "$branch" = "stage" ]
then
curl -H "Content-Type: application/json; charset=utf-8" \
-X POST \
-d "{\"text\": \"${msg}\"}" \
https://hooks.slack.com/services/TF...
fi
~
| true
|
a8f326f5909c644d5be7cf1521653a722e55f73a
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/python-yaql/PKGBUILD
|
UTF-8
| 1,674
| 2.578125
| 3
|
[] |
no_license
|
# Maintainer: Andy Botting <andy@andybotting.com>
_module='yaql'
pkgname=('python-yaql' 'python2-yaql')
pkgver='1.1.3'
pkgrel='2'
pkgdesc='YAQL - Yet Another Query Language'
arch=('any')
url='https://yaql.readthedocs.io'
license=('Apache')
makedepends=('git' 'python-setuptools' 'python2-setuptools')
checkdepends=('python-pbr' 'python2-pbr'
'python-dateutil' 'python2-dateutil'
'python-ply' 'python2-ply'
'python-six' 'python2-six'
'python-fixtures' 'python2-fixtures'
'python-subunit' 'python2-subunit'
'python-testrepository' 'python2-testrepository'
'python-testtools' 'python2-testtools')
source=("git+https://git.openstack.org/openstack/${_module}#tag=${pkgver}")
md5sums=('SKIP')
prepare() {
cp -a "${srcdir}/${_module}"{,-py2}
}
build() {
cd "${srcdir}/${_module}"
# Fix test function name for Python 3
sed -i 's/assertItemsEqual/assertCountEqual/g' yaql/tests/*.py
python setup.py build
cd "${srcdir}/${_module}-py2"
python2 setup.py build
}
check() {
cd "${srcdir}/${_module}"
python setup.py testr
cd "${srcdir}/${_module}-py2"
PYTHON=python2 python2 setup.py testr
}
package_python-yaql() {
depends=('python-six' 'python-pbr' 'python-babel' 'python-dateutil'
'python-ply')
cd "${srcdir}/${_module}"
python setup.py install --root="${pkgdir}/" --optimize=1
}
package_python2-yaql() {
depends=('python2-six' 'python2-pbr' 'python2-babel' 'python2-dateutil'
'python2-ply')
cd "${srcdir}/${_module}-py2"
python2 setup.py install --root="${pkgdir}/" --optimize=1
mv "${pkgdir}"/usr/bin/yaql{,2}
}
# vim:set ts=2 sw=2 et:
| true
|
080092a612755f58d0ba1eaffab04efcd2bcb281
|
Shell
|
aperloff/utilities
|
/PBS_Slurm/changeQueue_Slurm.sh
|
UTF-8
| 1,560
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
usage()
{
cat << EOF
usage: $0 options
This script will show the current queue status for a selection of users.
OPTIONS:
-d Destination partition where the jobs will end up
-h Show this message
-s Source partition where the pending jobs now reside
-t The new walltime to set
-u Username with the jobs you want to modify
-v Verbose
EOF
}
while getopts d:hs:t:u:v OPTION
do
case $OPTION in
d)
DESTINATION=$OPTARG
;;
h|\?)
usage
exit 1
;;
s)
SOURCE=$OPTARG
;;
t)
TIME=$OPTARG
;;
u)
USERNAME=$OPTARG
;;
v)
VERBOSE=1
;;
esac
done
if [ -z "$SOURCE" ]; then
echo "You must specify a source partition using the -s option."
exit 1
elif [ -z "$DESTINATION" ]; then
echo "You must specify a destination partition using the -d option."
exit 1
fi
if [ -z "$USERNAME" ]; then
USERNAME=$USER
fi
echo "Settings:"
echo "---------"
echo | awk -v I=${USERNAME} -v S=$SOURCE -v D=$DESTINATION -v T=$TIME 'BEGIN { format = "\t%-21s = %-25s\n" } { printf format, "username",I} { printf format, "source partition",S} { printf format, "destination partition",D } { printf format, "new walltime",T}'
for jobid in `squeue -u $USERNAME -t PENDING -p $SOURCE -o '%A' --noheader`; do
if [ -z "$TIME" ]; then
scontrol update job=$jobid partition=$DESTINATION qos=grid time=$TIME;
else
scontrol update job=$jobid partition=$DESTINATION;
fi
done
| true
|
1899ad98392cbfb31ad8c2b745e9ed0ab417caa0
|
Shell
|
316011989/flutter_ijkplayer
|
/ios/switch_local.sh
|
UTF-8
| 603
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
if [ "$1" == "dev" ]; then
rm IJKMediaFramework.framework
ln -s /Volumes/Samsung-T5/code/media_projects/ijkplayer/flutter_ijk/ios/output/IJKMediaFramework.framework IJKMediaFramework.framework
rm flutter_ijkplayer.podspec
ln flutter_ijkplayer.dev.podspec flutter_ijkplayer.podspec
echo 'switch to local'
elif [ "$1" == "prod" ]; then
rm IJKMediaFramework.framework
rm flutter_ijkplayer.podspec
ln flutter_ijkplayer.prod.podspec flutter_ijkplayer.podspec
echo 'switch to pod version'
else
echo "Switch dev or product"
echo "Usage:"
echo " $0 dev|prod"
fi
| true
|
005a402a3a6a8c4f4dd6ce8bbeca96540d92401e
|
Shell
|
databooks/databook
|
/build-tools/docker-build/hooks/post_push
|
UTF-8
| 370
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Tag the latest build with the short git sha. Push the tag in addition
# to the "latest" tag already pushed.
GIT_SHA_TAG=${SOURCE_COMMIT:0:12}
docker tag $IMAGE_NAME $DOCKER_REPO:$GIT_SHA_TAG
docker push $DOCKER_REPO:$GIT_SHA_TAG
# Invoke all downstream build triggers.
for url in $(echo $NEXT_BUILD_TRIGGERS | sed "s/,/ /g")
do
curl -X POST $url
done
| true
|
dcfa98bc7356742722e29a46b69d47ebb8df975d
|
Shell
|
isabella232/presto-yarn
|
/presto-yarn-test/etc/docker/get_docker_ips.sh
|
UTF-8
| 617
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash -e
if test ! -f docker-compose.yml; then
echo Please run this script from directory wiith docker-compose.yml file.
exit 1
fi
echo "# This section is generated by ../$0"
echo "# You may need to regenerate it per each 'docker-compose up' command"
echo "# NOTE this is only supported on linux"
echo 'hosts:'
for container in hadoop-master hadoop-slave{1,2,3}; do
container_id=$(docker-compose ps -q $container)
container_ip=$(docker exec $container_id ifconfig eth0 | grep 'inet addr' | awk '{print $2 }' | cut -d : -f 2)
echo " $container: $container_ip"
done
echo "# End of generated section"
| true
|
a91fe52fd5e6e51bbfbb5e2f4fe3ea15900bec84
|
Shell
|
swiss-art-research-net/docs
|
/docs/et/archivalCSV/archival.sh
|
UTF-8
| 1,979
| 2.875
| 3
|
[] |
no_license
|
#! /bin/bash
rm -rf criteria.sh ;
rm -rf csvcut.sh ;
rm -rf mmd.sh ;
rm -rf prefixes.txt ;
rm -rf rename_csv.sh ;
rm -rf rename_png copy.sh ;
rm -rf rename_png.sh ;
rm -rf src/ ;
rm -rf turtle.sh ;
rm -rf ttl/ ;
rm -rf *.csv ;
cp ../scripts/prefixes.txt .
cp ../scripts/csvcut.sh .
cp ../scripts/mmd.sh .
cp ../scripts/criteria.sh .
cp ../scripts/turtle.sh .
cp ../scripts/rename_png.sh .
cp ../scripts/rename_csv.sh .
cp -R ../scripts/src/ src/
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrNXdw7WZJf77xO6 > archival_names.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrBZTbMD2dJJ7Chi > archival_rights.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrd22DtCX0GG34Jg > archival_location.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrlSnotc4ugNYIjj > archival_desc.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrA5yMyUjWkgnajD > archival_docs.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrw2FJaaT8jrK8M1 > archival_parthood.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrbkP7XeL2ZkC1Hb > archival_existence.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shr5CqSEquR1GH73U > archival_substance.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrH0zA2yRwub0LEd > archival_aboutness.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shr1POYwgIiUtUm2a > archival_events.csv ;
python ../airscraper/airscraper/airscraper/airscraper.py https://airtable.com/shrbplxdEnio2noxQ > archival_relation.csv
echo "finished download"
./csvcut.sh
cd ttl/
./turtle.sh
echo "converted to RDF"
./criteria.sh
echo "converted to Mermaid"
cd mmd/
./mmd.sh
echo "converted to PNG"
cd png/
./rename_png.sh
echo "done"
| true
|
abc033c234a59dd81b667118039ff448daf8ac61
|
Shell
|
FransTegelmark/aliquid-stratmas
|
/StratmasDispatcher/buildutils/buildjar.sh
|
UTF-8
| 519
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
CMD=`basename $0`
DIR="$1"
CLASSPATH="$2"
JARNAME="$3"
if [[ ! -d "$DIR" ]]; then
echo "$CMD: Error: BUILDDIR \"$DIR\" is not directory." 2>&1
exit 1;
fi
cd "$DIR" || exit 1;
OLDIFS="$IFS"
IFS=":"
for JAR in $CLASSPATH; do
IFS="OLDIFS"
if [[ -f "$JAR" ]]; then
echo Processing "$JAR"
jar xf "$JAR"
fi
done
IFS="$OLDIFS"
# Recreate META-INF
rm -rf META-INF
echo Building $JARNAME
jar cfm "$JARNAME" ../Manifest *
echo "Creating index in " $JARNAME
jar -i "$JARNAME"
| true
|
ef4af3ef7c3e2e603beed8ce35e18c7970cdc6b2
|
Shell
|
Ouzii/distributed-string-manipulator
|
/sendMessage.sh
|
UTF-8
| 326
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#send request to server
# takes 2 params 1st is the type (1 is reverse or 2 is to uppercase), 2nd param is your string
# example usage ./sendMessage.sh 1 kotimato
set -e
BODY='{"type":"'"$1"'", "msg":"'$2'"}'
RES="$(curl -d "$BODY" -H "Content-Type: application/json" -X POST http://localhost:8080)"
echo $RES
| true
|
c4279976c15fc7558aa8f628d90218f13b88b349
|
Shell
|
great-fork/tmoe-linux
|
/share/old-version/share/container/proot/startup
|
UTF-8
| 29,801
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cat >${TMOE_STARTUP_SCRIPT} <<-ENDOFPROOT
#!/usr/bin/env bash
##################
PROOT_USER="root"
# You can specify a user. Default is root
# 您可以指定用户,例如root或ubuntu,默认为root
# Sie können einen Benutzer angeben
HOME_DIR="default"
# If the value is "default", the home directory will be auto-detected. You can enter an absolute path, for example, "/home/xxx".
WORK_DIR="default"
# Set the initial working directory to path. Some programs expect to be launched from a given directory but do not perform any chdir by them‐selves. This option avoids the need for running a shell and then entering the directory manually.
# 启动容器后进入的目录,默认为用户的主目录。请注意: 若您的默认登录shell为bash/zsh, 则请在“环境变量与登录项管理”处修改entrypoint。
################
PROOT_BIN="default"
# Optional values: "default", "system", "termux", "32", or an absolute path.
# You can specify the path where the binary file of proot is located. If the value is "default", it will use the default path of the system.
# If the value is "32", it will use proot 32bit-version. If your host is arm64, and you want to run an i386/armhf container, then you can modify it to "32".
# You can also enter an absolute path, for example, "\${PREFIX}/bin/proot"
# if (system|default) {bin = "proot"}
# else if (termux) {bin = "${PREFIX}/bin/proot"}
# else if (32) {bin = "\$PROOT_32_TERMUX_BIN"}
# 您可以自行编译proot,并指定其二进制文件所在路径。当此变量值为"default"时,将使用系统默认proot二进制文件所在路径;当其为"termux"时,将使用 "\${PREFIX}/bin/proot"。
# 您也可以输入完整路径,例如"/usr/local/bin/proot"
# Note:若您使用了termux, 宿主机为64位(arm64),容器为32位(i386/armhf),则请使用32位版proot。如果arm64 termux使用了armhf版的proot,那么qemu-user-i386(arm64)也要切换为32位armhf版本。
# if (host = arm64-android && container = i386) { armhf proot + qemu-user-i386(armhf) }
# else if (host = arm64-android && container = amd64) { arm64 proot + qemu-user-x86_64(arm64) }
# else if (host = arm64 && container = arm64) { arm64 proot }
PROOT_32_TERMUX_BIN="${TMOE_LINUX_DIR}/lib32/data/data/com.termux/files/usr/bin/proot"
# PROOT_32_DEBIAN_BIN="${TMOE_LINUX_DIR}/lib32/usr/bin/proot"
PROOT_LIBEXEC_LOADER="default"
PROOT_32_TERMUX_LOADER="${TMOE_LINUX_DIR}/lib32/data/data/com.termux/files/usr/libexec/proot/loader"
LD_LIB_PATH="default"
PROOT_32_TERMUX_LD_LIB_PATH="${TMOE_LINUX_DIR}/lib32/data/data/com.termux/files/usr/lib"
ROOTFS_DIR="${DEBIAN_CHROOT}"
PROOT_PROC_DIR="\${ROOTFS_DIR}/usr/local/etc/tmoe-linux/proot_proc"
################
LOAD_ENV_FILE=true
# Load the environment variable file when starting the container. Default is true.
CONTAINER_ENV_FILE="\${ROOTFS_DIR}/usr/local/etc/tmoe-linux/environment/container.env"
LOAD_PROOT_CONF=true
# Default is true. The priority of the configuration file is higher than the current configuration.
# 当该值为true时,当前配置信息将会被配置文件里的内容所覆盖。
PROOT_CONF_FILE="${CONFIG_FOLDER}/proot_global.conf"
################
KILL_ON_EXIT=true
# Kill all processes on command exit. When the executed command leaves orphean or detached processes around, proot waits until all processes possibly terminate. This option forces the immediate termination of all tracee processes when the main command exits. Default is true.
# 退出容器时, 自动杀掉所有进程, 默认为true
PROOT_SYSVIPC=true
# Handles System V IPC syscalls (shmget, semget, msgget, etc.) syscalls inside proot. IPC is handled inside proot and launching 2 proot instances will lead to 2 different IPC Namespaces. Default is true.
PROOT_L=true
# Correct the size returned from lstat for symbolic links. Default is true.
PROOT_H=false
# Hide files and directories starting with '.proot.'. Default is false.
# If your container is arch, you can modify the value to true.
# If you find that some folders cannot be deleted in the container, please modify the value to false.
PROOT_P=false
# Modify bindings to protected ports to use a higher port number. Default is false.
FAKE_KERNEL=false
# 伪造内核版本信息, 默认为false, 若将此变量值修改为true, 则启用该功能
# Default is false.
KERNEL_RELEASE="5.10.0-7-cloud-${ARCH_TYPE}"
# Make current kernel appear as kernel release *string*.
LINK_TO_SYMLINK=true
# Replace hard links with symlinks, pretending they are really hardlinks. Emulates hard links with symbolic links when SELinux policies do not allow hard links. Default is true.
# export PROOT_NO_SECCOMP=1
PROOT_DEBUG=false
# proot调试输出, 默认为false
# Default is false.
VERBOSE_LEVEL=2
# Set the level of debug information to *value*. The higher the integer value is, the more detailed debug information is printed to the standard error stream. A negative value makes PRoot quiet except on fatal errors.
OLD_ANDROID_VERSION_COMPATIBILITY_MODE=false
# Default is false.
# 旧系统/旧版本兼容模式
# Set the values of PROOT_L, PROOT_P and PROOT_SYSVIPC to false, and set FAKE_KERNEL to true.
################
#qemu
HOST_DISTRO="${LINUX_DISTRO}"
# Optional values: "Android", "linux"
HOST_ARCH="${TRUE_ARCH_TYPE}"
# Host architecture
CONTAINER_DISTRO="${CONTAINER_DISTRO}"
CONTAINER_NAME="${DEBIAN_FOLDER}"
CONTAINER_ARCH="${ARCH_TYPE}"
# 容器架构
# Optional values: "amd64", "i386", "arm64", "armhf", "armel", "mipsel", "mips64el", "ppc64el", "s390x", "riscv64"
QEMU_ARCH="default"
# Optional values: "default", "x86_64", "i386", "aarch64", "aarch64_be", "arm", "armeb", "mipsel", "mips64el", "s390x", "riscv64"
SKIP_QEMU_DETECTION=false
# After skipping, qemu will be called forcibly.
# 跳过qemu版本和架构的检测,并强制调用qemu。如需启用本选项,则请手动修改 QEMU_ARCH 的值为指定架构。
QEMU_USER_STATIC=true
# 当该值为true时,使用静态编译的版本。若该值为false,且遇到了lib库问题,则请挂载/system和/apex等目录。
# If the value is false, please install qemu-user manually, for example, apt install qemu-user-i386 qemu-user-x86_64 qemu-user-aarch64 qemu-user-arm
QEMU_32_ENABLED=false
# If your host is arm64-android, and you are using 32bit proot, then enable qemu32.
QEMU_USER_STATIC_PATH="${TMOE_LINUX_DIR}/lib/usr/bin"
QEMU_USER_STATIC_32_PATH="${TMOE_LINUX_DIR}/lib32/usr/bin"
QEMU_USER_BIN="default"
# 默认会自动根据宿主机架构和容器架构来判断需要调用的qemu版本, 您可以指定特定版本的qemu二进制文件。
# You can enter an absolute path, for example, "\${PREFIX}/bin/qemu-x86_64"
################
EXA_ENABLED=false
# Only for testing. Default is false.
# Only applicable to i386 environment. If you enable it, then QEMU will be automatically disabled. In addition, some mount settings will also be automatically disabled.
# 此功能与qemu冲突,如需启用qemu, 则请将该值修改为false。
# 此功能仅供测试,不建议将该值修改为true。
EXA_PATH="${TMOE_LINUX_DIR}/lib32/usr/bin"
EXA_PREFIX="\${ROOTFS_DIR}"
# FORK_CONTALLER=false
# IPC_EMU_SER=false
# External IPC emulation is used on Android only. Default is false.
VFS_HACKS="tlsasws,tsi,spd"
# pels,ansep,tlsasws,tsi,spd
SOCKET_PATH_SUFFIX=""
# es, ed
VPATHS_LIST="/dev/null"
VFS_KIND="guest-first"
################
# shells
# let mut shells: Vec<&str> = vec!["zsh", "fish", "bash", "ash", "su"];
DEFAULT_LOGIN_SHELL_0="zsh"
# The default login shell is zsh.
# 默认登录shell是zsh
# Die Standard-Login-Shell ist zsh.
DEFAULT_LOGIN_SHELL_1="fish"
DEFAULT_LOGIN_SHELL_2="bash"
DEFAULT_LOGIN_SHELL_3="ash"
DEFAULT_LOGIN_SHELL_4="su"
# The lower the number, the higher the priority.
################
#mounts
MOUNT_SD=true
SD_DIR_0="/storage/self/primary"
SD_DIR_1="/sdcard"
SD_DIR_2="/storage/emulated/0"
SD_DIR_3="\${HOME}/sd"
SD_DIR_4="\${HOME}/Downloads"
SD_DIR_5="\${HOME}/Download"
SD_MOUNT_POINT="/media/sd"
# The lower the number, the higher the priority. The highest priority directory will be mounted to the mount point.
# 挂载sd,默认为true,SD_DIR为宿主机sd目录,SD_MOUNT_POINT为容器内的挂载点。优先级别高,且存在相应目录时,才会被挂载。默认挂载点为容器内部的"/media/sd"
MOUNT_TERMUX=true
TERMUX_DIR="/data/data/com.termux/files"
TERMUX_MOUNT_POINT="/media/termux"
MOUNT_TF=true
TF_CARD_LINK="\${HOME}/storage/external-1"
TF_MOUNT_POINT="/media/tf"
# The value of TF_CARD_LINK is a symbolic link file.
# TF_CARD_LINK的值为一个软链接文件
MOUNT_STORAGE=true
STORAGE_DIR="/storage"
STORAGE_MOUNT_POINT="/storage"
# If the value of "MOUNT_STORAGE" is "false", the relevant directory will not be mounted. Default is true.
MOUNT_GITSTATUS=true
GITSTATUS_DIR="${CONFIG_FOLDER}/gitstatus"
GITSTATUS_MOUNT_POINT="/root/.cache/gitstatus"
MOUNT_TMP=false
TMP_SOURCE_DIR="${TMPDIR}"
TMP_MOUNT_POINT="/tmp"
MOUNT_SYSTEM=true
SYSTEM_DIR="/system"
MOUNT_APEX=true
APEX_DIR="/apex"
MOUNT_SYS=false
SYS_DIR="/sys"
MOUNT_DEV=true
DEV_DIR="/dev"
MOUNT_SHM_TO_TMP=true
MOUNT_URANDOM_TO_RANDOM=true
MOUNT_DEV_FD=true
MOUNT_DEV_STDIN=true
MOUNT_DEV_STDOUT=true
MOUNT_DEV_STDERR=true
MOUNT_DEV_TTY=true
MOUNT_PROC=true
PROC_DIR="/proc"
FAKE_PROOT_PROC=true
# Default is true.
MOUNT_CAP_LAST_CAP=true
CAP_LAST_CAP_SOURCE="/dev/null"
CAP_LAST_CAP_MOUNT_POINT="/proc/sys/kernel/cap_last_cap"
# /dev/null:/proc/sys/kernel/cap_last_cap
#---
NUM_OF_MOUNTS=12
# uint8
# 默认为12, 若您将该值修改为15, 则请手动添加MOUNT_SOURCE_13,MOUNT_SOURCE_14,MOUNT_SOURCE_15,MOUNT_POINT_13,MOUNT_POINT_14 & MOUNT_POINT_15 变量。
# If you want to mount hundreds of directories, then you need to add variables manually.
#---
# MOUNT_SOURCE_1为第一个挂载源,MOUNT_POINT_1 为第一个挂载点,MOUNT_SOURCE_2为第二个挂载源 ...
# 举个例子,假如您想将/storage/emulated/0/Download目录挂载至容器内部的/media/down, 那么MOUNT_SOURCE_1的值为"/storage/emulated/0/Download", MOUNT_POINT_1的值为"/media/down"
# For example, if you want to mount the /storage/emulated/0/DCIM directory to /media/pic, then the value of MOUNT_SOURCE_2 is "/storage/emulated/0/DCIM", and the value of MOUNT_POINT_2 is "/media/pic"
MOUNT_SOURCE_1=""
MOUNT_POINT_1=""
#---
MOUNT_SOURCE_2=""
MOUNT_POINT_2=""
#---
MOUNT_SOURCE_3=""
MOUNT_POINT_3=""
#---
MOUNT_SOURCE_4=""
MOUNT_POINT_4=""
#---
MOUNT_SOURCE_5=""
MOUNT_POINT_5=""
#---
MOUNT_SOURCE_6=""
MOUNT_POINT_6=""
#---
MOUNT_SOURCE_7=""
MOUNT_POINT_7=""
#---
MOUNT_SOURCE_8=""
MOUNT_POINT_8=""
#---
MOUNT_SOURCE_9=""
MOUNT_POINT_9=""
#---
MOUNT_SOURCE_10=""
MOUNT_POINT_10=""
#---
MOUNT_SOURCE_11=""
MOUNT_POINT_11=""
#---
MOUNT_SOURCE_12=""
MOUNT_POINT_12=""
#---
################
TMOE_LOCALE_FILE="${CONFIG_FOLDER}/locale.txt"
DEFAULT_SHELL_CONF="${CONFIG_FOLDER}/default_shell.conf"
PROC_FD_PATH="/proc/self/fd"
HOST_NAME_FILE="\${ROOTFS_DIR}/etc/hostname"
################
main() {
case "\$1" in
i* | -i* | -I*)
tmoe t
exit 0
;;
-vnc* | vnc*) startx11vnc ;;
-n | novnc*) novnc ;;
-x) startxsdl ;;
*) start_tmoe_gnu_linux_container ;;
esac
}
check_qemu_arch() {
TMOE_QEMU=true
case "\${CONTAINER_ARCH}" in
i386)
case \${HOST_ARCH} in
amd64 | i386) TMOE_QEMU=false ;;
*) TMOE_QEMU_ARCH="i386" ;;
esac
;;
amd64) TMOE_QEMU_ARCH="x86_64" ;;
arm64) TMOE_QEMU_ARCH="aarch64" ;;
armhf)
case \${HOST_ARCH} in
arm64 | armhf) TMOE_QEMU=false ;;
*) TMOE_QEMU_ARCH="arm" ;;
esac
;;
armel)
case \${HOST_ARCH} in
arm64 | armhf | armel) TMOE_QEMU=false ;;
*) TMOE_QEMU_ARCH="armeb" ;;
esac
;;
ppc64el) TMOE_QEMU_ARCH="ppc64le" ;;
s390x) TMOE_QEMU_ARCH="s390x" ;;
mipsel) TMOE_QEMU_ARCH="mipsel" ;;
mips64el) TMOE_QEMU_ARCH="mips64el" ;;
riscv64) TMOE_QEMU_ARCH="riscv64" ;;
esac
}
check_qemu32_path() {
case \${QEMU_32_ENABLED} in
true) QEMU_PATH="\${QEMU_USER_STATIC_32_PATH}/" ;;
false) QEMU_PATH="\${QEMU_USER_STATIC_PATH}/" ;;
esac
[[ -e \${QEMU_PATH}qemu-x86_64-static ]] || unset QEMU_PATH
}
check_qemu_bin() {
case \${QEMU_USER_BIN} in
default | "")
case \${QEMU_USER_STATIC} in
true) QEMU_BIN="\${QEMU_PATH}qemu-\${TMOE_QEMU_ARCH}-static" ;;
false) QEMU_BIN="qemu-\${TMOE_QEMU_ARCH}" ;;
esac
;;
*) QEMU_BIN="\${QEMU_USER_BIN}" ;;
esac
}
##############
check_exa_var() {
case \${CONTAINER_ARCH} in
i386) ;;
*) unset EXA_ENABLED ;;
esac
case \${EXA_ENABLED} in
true) unset MOUNT_TERMUX MOUNT_APEX MOUNT_SYSTEM MOUNT_SYS ;;
*) unset EXA_PREFIX ;;
esac
}
##############
start_tmoe_gnu_linux_container() {
if [[ \${LOAD_PROOT_CONF} = true && -r \${PROOT_CONF_FILE} ]]; then
source \${PROOT_CONF_FILE}
fi
unset LD_PRELOAD PROOT_UID PROOT_GID PROOT_HOME CONTAINER_BIN_PATH
check_exa_var
############
case \${PROOT_USER} in
"" | root) ;;
*)
PROOT_UID=\$(grep "^\${PROOT_USER}:" \${ROOTFS_DIR}/etc/passwd | awk -F ':' '{print \$3}')
PROOT_GID=\$(grep "^\${PROOT_USER}:" \${ROOTFS_DIR}/etc/passwd | awk -F ':' '{print \$4}')
;;
esac
if [[ -z \${PROOT_UID} ]]; then
PROOT_UID=0
PROOT_GID=0
fi
case \${HOME_DIR} in
default | "")
case \${PROOT_USER} in
root | "") PROOT_HOME="/root" ;;
*)
PROOT_HOME=\$(grep "^\${PROOT_USER}:" \${ROOTFS_DIR}/etc/passwd | awk -F ':' '{print \$6}')
[[ -n \${PROOT_HOME} ]] || PROOT_HOME="/home/\${PROOT_USER}"
;;
esac
;;
*) PROOT_HOME="\${HOME_DIR}" ;;
esac
if [[ \${PROOT_USER} = root || -z \${PROOT_USER} ]]; then
set -- "\${@}" "--root-id"
else
set -- "\${@}" "--change-id=\${PROOT_UID}:\${PROOT_GID}"
fi
if [[ \${EXA_ENABLED} != true ]]; then
if [[ \${WORK_DIR} = default || -z \${WORK_DIR} ]]; then
set -- "\${@}" "--pwd=\${PROOT_HOME}"
else
set -- "\${@}" "--pwd=\${WORK_DIR}"
fi
else
set -- "\${@}" "--pwd=/"
fi
[[ \${EXA_ENABLED} = true ]] || set -- "\${@}" "--rootfs=\${ROOTFS_DIR}"
if [[ "\${HOST_DISTRO}" = 'Android' ]]; then
if [[ \${MOUNT_SYSTEM} = true ]]; then
if [[ -e "\${SYSTEM_DIR}" ]]; then
set -- "\${@}" "--mount=\${SYSTEM_DIR}"
fi
fi
if [[ \${MOUNT_APEX} = true ]]; then
if [[ -e "\${APEX_DIR}" ]]; then
set -- "\${@}" "--mount=\${APEX_DIR}"
fi
fi
if [[ \${KILL_ON_EXIT} = true ]]; then
set -- "\${@}" "--kill-on-exit"
fi
if [[ \${MOUNT_TF} = true ]]; then
if [[ -L "\${TF_CARD_LINK}" ]]; then
TRUE_TF_CARD=\$(readlink \${TF_CARD_LINK})
if [[ -e "\${TRUE_TF_CARD}" ]]; then
set -- "\${@}" "--mount=\${TRUE_TF_CARD}:\${EXA_PREFIX}\${TF_MOUNT_POINT}"
fi
fi
fi
if [[ \${MOUNT_STORAGE} = true ]]; then
if [[ -e "\${STORAGE_DIR}" ]]; then
set -- "\${@}" "--mount=\${STORAGE_DIR}:\${EXA_PREFIX}\${STORAGE_MOUNT_POINT}"
fi
fi
if [[ \${MOUNT_TERMUX} = true ]]; then
if [[ -e "\${TERMUX_DIR}/home" ]]; then
set -- "\${@}" "--mount=\${TERMUX_DIR}:\${EXA_PREFIX}\${TERMUX_MOUNT_POINT}"
fi
fi
if [[ \${OLD_ANDROID_VERSION_COMPATIBILITY_MODE} = true ]]; then
PROOT_P=false && PROOT_L=false && PROOT_SYSVIPC=false && FAKE_KERNEL=true
fi
[[ \${PROOT_SYSVIPC} != true ]] || set -- "\${@}" "--sysvipc"
[[ \${PROOT_L} != true ]] || set -- "\${@}" "-L"
[[ \${PROOT_H} != true ]] || set -- "\${@}" "-H"
[[ \${PROOT_P} != true ]] || set -- "\${@}" "-p"
[[ \${LINK_TO_SYMLINK} != true ]] || set -- "\${@}" "--link2symlink"
fi
############
[[ \${FAKE_KERNEL} != true ]] || set -- "\${@}" "--kernel-release=\${KERNEL_RELEASE}"
[[ \${PROOT_DEBUG} != true ]] || set -- "\${@}" "--verbose=\${VERBOSE_LEVEL}"
if [[ \${MOUNT_PROC} = true ]]; then
set -- "\${@}" "--mount=\${PROC_DIR}:\${EXA_PREFIX}\${PROC_DIR}"
fi
if [[ \${MOUNT_DEV} = true ]]; then
set -- "\${@}" "--mount=\${DEV_DIR}:\${EXA_PREFIX}\${DEV_DIR}"
[[ \${MOUNT_SHM_TO_TMP} != true ]] || set -- "\${@}" "--mount=\${ROOTFS_DIR}/tmp:\${EXA_PREFIX}/dev/shm"
[[ \${MOUNT_URANDOM_TO_RANDOM} != true ]] || set -- "\${@}" "--mount=\${DEV_DIR}/urandom:\${EXA_PREFIX}/dev/random"
[[ \${MOUNT_DEV_FD} != true ]] || set -- "\${@}" "--mount=\${PROC_FD_PATH}:\${EXA_PREFIX}/dev/fd"
[[ \${MOUNT_DEV_STDIN} != true ]] || set -- "\${@}" "--mount=\${PROC_FD_PATH}/0:\${EXA_PREFIX}/dev/stdin"
[[ \${MOUNT_DEV_STDOUT} != true ]] || set -- "\${@}" "--mount=\${PROC_FD_PATH}/1:\${EXA_PREFIX}/dev/stdout"
[[ \${MOUNT_DEV_STDERR} != true ]] || set -- "\${@}" "--mount=\${PROC_FD_PATH}/2:\${EXA_PREFIX}/dev/stderr"
[[ \${MOUNT_DEV_TTY} != true ]] || set -- "\${@}" "--mount=\${DEV_DIR}/null:\${EXA_PREFIX}/dev/tty0"
fi
if [[ \${MOUNT_SYS} = true ]]; then
if [[ -e "\${SYS_DIR}" ]]; then
set -- "\${@}" "--mount=\${SYS_DIR}"
fi
fi
if [[ \${MOUNT_TMP} = true ]]; then
if [[ -e "\${TMP_SOURCE_DIR}" ]]; then
set -- "\${@}" "--mount=\${TMP_SOURCE_DIR}:\${EXA_PREFIX}\${TMP_MOUNT_POINT}"
fi
fi
if [[ \${MOUNT_GITSTATUS} = true ]]; then
if [[ -e "\${GITSTATUS_DIR}" ]]; then
set -- "\${@}" "--mount=\${GITSTATUS_DIR}:\${EXA_PREFIX}\${GITSTATUS_MOUNT_POINT}"
fi
fi
if [[ \${MOUNT_CAP_LAST_CAP} = true ]]; then
set -- "\${@}" "--mount=\${CAP_LAST_CAP_SOURCE}:\${EXA_PREFIX}\${CAP_LAST_CAP_MOUNT_POINT}"
fi
#############
if [[ \${MOUNT_SD} = true ]]; then
for i in "\${SD_DIR_0}" "\${SD_DIR_1}" "\${SD_DIR_2}" "\${SD_DIR_3}" "\${SD_DIR_4}" "\${SD_DIR_5}"; do
if [[ -e \${i} ]]; then
set -- "\${@}" "--mount=\${i}:\${EXA_PREFIX}\${SD_MOUNT_POINT}"
break
fi
done
fi
for ((i = 1; i <= \${NUM_OF_MOUNTS}; i++)); do
MOUNT_SOURCE="MOUNT_SOURCE_\${i}"
MOUNT_POINT="MOUNT_POINT_\${i}"
if [[ -n \${!MOUNT_SOURCE} && -x \${!MOUNT_SOURCE} ]]; then
set -- "\${@}" "--mount=\${!MOUNT_SOURCE}:\${EXA_PREFIX}\${!MOUNT_POINT}"
fi
done
#######################
#The files in the /proc directory will be automatically detected during installation. If your host does not have permission to read them, there is no " #" before set.
#不同系统对文件权限的限制可能有所区别,以下文件在安装时会自动检测,仅当宿主机无权读取时,才会去除set前的注释符号。
if [[ "\${FAKE_PROOT_PROC}" = 'true' ]]; then
##test01#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/.tmoe-container.stat:\${EXA_PREFIX}/proc/stat"
##test02#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/.tmoe-container.version:\${EXA_PREFIX}/proc/version"
if [[ -e "\${PROOT_PROC_DIR}/uptime" ]]; then
printf "%s" ""
##test04#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/bus:\${EXA_PREFIX}/proc/bus"
##buddyinfo#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/buddyinfo:\${EXA_PREFIX}/proc/buddyinfo"
##cgroups#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/cgroups:\${EXA_PREFIX}/proc/cgroups"
##consoles#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/consoles:\${EXA_PREFIX}/proc/consoles"
##crypto#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/crypto:\${EXA_PREFIX}/proc/crypto"
##devices#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/devices:\${EXA_PREFIX}/proc/devices"
##diskstats#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/diskstats:\${EXA_PREFIX}/proc/diskstats"
##execdomains#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/execdomains:\${EXA_PREFIX}/proc/execdomains"
##fb#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/fb:\${EXA_PREFIX}/proc/fb"
##filesystems#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/filesystems:\${EXA_PREFIX}/proc/filesystems"
##interrupts#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/interrupts:\${EXA_PREFIX}/proc/interrupts"
##iomem#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/iomem:\${EXA_PREFIX}/proc/iomem"
##ioports#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/ioports:\${EXA_PREFIX}/proc/ioports"
##kallsyms#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/kallsyms:\${EXA_PREFIX}/proc/kallsyms"
##keys#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/keys:\${EXA_PREFIX}/proc/keys"
##key-users#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/key-users:\${EXA_PREFIX}/proc/key-users"
##kmsg#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/kmsg:\${EXA_PREFIX}/proc/kmsg"
##kpageflags#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/kpageflags:\${EXA_PREFIX}/proc/kpageflags"
##loadavg#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/loadavg:\${EXA_PREFIX}/proc/loadavg"
##locks#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/locks:\${EXA_PREFIX}/proc/locks"
##misc#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/misc:\${EXA_PREFIX}/proc/misc"
##modules#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/modules:\${EXA_PREFIX}/proc/modules"
##pagetypeinfo#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/pagetypeinfo:\${EXA_PREFIX}/proc/pagetypeinfo"
##partitions#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/partitions:\${EXA_PREFIX}/proc/partitions"
##sched_debug#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/sched_debug:\${EXA_PREFIX}/proc/sched_debug"
##softirqs#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/softirqs:\${EXA_PREFIX}/proc/softirqs"
##timer_list#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/timer_list:\${EXA_PREFIX}/proc/timer_list"
##uptime#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/uptime:\${EXA_PREFIX}/proc/uptime"
##vmallocinfo#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/vmallocinfo:\${EXA_PREFIX}/proc/vmallocinfo"
##vmstat#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/vmstat:\${EXA_PREFIX}/proc/vmstat"
##zoneinfo#set -- "\${@}" "--mount=\${PROOT_PROC_DIR}/zoneinfo:\${EXA_PREFIX}/proc/zoneinfo"
fi
fi
###################
unset QEMU_BIN
case \${SKIP_QEMU_DETECTION} in
true)
TMOE_QEMU=true
case \${QEMU_ARCH} in
default | "")
printf "%s\n" "ERROR, please modify the value of QEMU_ARCH in the configuration file."
TMOE_QEMU=false
;;
*)
TMOE_QEMU_ARCH="\${QEMU_ARCH}"
check_qemu32_path
check_qemu_bin
;;
esac
;;
*)
case "\${CONTAINER_ARCH}" in
"\${HOST_ARCH}") TMOE_QEMU=false ;;
*)
check_qemu_arch
check_qemu32_path
check_qemu_bin
;;
esac
;;
esac
if [[ \${TMOE_QEMU} = true && -n \${QEMU_BIN} && \${EXA_ENABLED} != true ]]; then
set -- "\${@}" "--qemu=\${QEMU_BIN}"
fi
#############
if [[ \${EXA_ENABLED} = true && -e \${EXA_PREFIX} ]]; then
set -- "\${@}" "\${EXA_PATH}/exa-i386_armeabi"
set -- "\${@}" "--path-prefix" "\${EXA_PREFIX}"
set -- "\${@}" "--vfs-hacks=\${VFS_HACKS}"
set -- "\${@}" "--vfs-kind" "\${VFS_KIND}"
# [[ \${IPC_EMU_SER} != true ]] || set -- "\${@}" "--ipc-emul-server"
# [[ \${FORK_CONTALLER} != true ]] || set -- "\${@}" "--fork-controller"
[[ -z \${SOCKET_PATH_SUFFIX} ]] || set -- "\${@}" "--socket-path-suffix" "\${SOCKET_PATH_SUFFIX}"
set -- "\${@}" "--vpaths-list" "\${VPATHS_LIST}"
set -- "\${@}" "--tmp-dir" "\${EXA_PREFIX}/tmp" "--"
fi
#############
#SET ENV
HOST_NAME="localhost"
if [[ -r \${HOST_NAME_FILE} ]]; then
HOST_NAME=\$(sed -n p \${HOST_NAME_FILE})
else
[[ ! \$(command -v hostname) ]] || HOST_NAME=\$(hostname -f)
fi
set -- "\${@}" "/usr/bin/env" "-i"
set -- "\${@}" "HOSTNAME=\${HOST_NAME}"
set -- "\${@}" "HOME=\${PROOT_HOME}"
set -- "\${@}" "USER=\${PROOT_USER}"
set -- "\${@}" "TERM=xterm-256color"
set -- "\${@}" "SDL_IM_MODULE=fcitx"
set -- "\${@}" "XMODIFIERS=\@im=fcitx"
set -- "\${@}" "QT_IM_MODULE=fcitx"
set -- "\${@}" "GTK_IM_MODULE=fcitx"
set -- "\${@}" "TMOE_CHROOT=false"
set -- "\${@}" "TMOE_PROOT=true"
set -- "\${@}" "TMPDIR=/tmp"
set -- "\${@}" "DISPLAY=127.0.0.1:0"
set -- "\${@}" "PULSE_SERVER=tcp:127.0.0.1:4713"
if [[ -r "\${TMOE_LOCALE_FILE}" ]]; then
set -- "\${@}" "LANG=\$(head -n 1 \${TMOE_LOCALE_FILE})"
else
set -- "\${@}" "LANG=en_US.UTF-8"
fi
[[ \${EXA_ENABLED} != true ]] || set -- "\${@}" "LD_LIBRARY_PATH=/usr/local/lib:/usr/lib32:/usr/lib:/lib:/usr/lib/i386-linux-gnu:/var/lib:/var/lib/dpkg:/lib/i386-linux-gnu"
#SHELL
[[ ! -r \${DEFAULT_SHELL_CONF} ]] || source \${DEFAULT_SHELL_CONF}
if [[ -z \${TMOE_SHELL} ]]; then
for i in \${DEFAULT_LOGIN_SHELL_0} \${DEFAULT_LOGIN_SHELL_1} \${DEFAULT_LOGIN_SHELL_2} \${DEFAULT_LOGIN_SHELL_3} \${DEFAULT_LOGIN_SHELL_4}; do
if [[ -f \${ROOTFS_DIR}/bin/\${i} || -L \${ROOTFS_DIR}/bin/\${i} ]]; then
TMOE_SHELL="/bin/\${i}"
break
fi
done
fi
set -- "\${@}" "SHELL=\${TMOE_SHELL}"
#LOAD GLOBAL ENV FILE
if [[ -s \${CONTAINER_ENV_FILE} && \${LOAD_ENV_FILE} = true ]]; then
CONTAINER_BIN_PATH=\$(sed -E 's@export\s+@@;/#/d' \${CONTAINER_ENV_FILE} | grep '^PATH=\"' | grep '\${PATH:+:\${PATH}}' | sed 's@\${PATH:+:\${PATH}}\"@:@;s@PATH=\"@@')
OLD_IFS="\${IFS}"
IFS=\$'\n'
CONTAINER_ENV_VAR="\$(sed -E 's@export\s+@@;/#/d;/^PATH=/d' \${CONTAINER_ENV_FILE})"
# Do not use double quotes in CONTAINER_ENV_VAR in the for statement
for i in \${CONTAINER_ENV_VAR}; do
[[ -z \${i} ]] || set -- "\${@}" "\${i}"
done
IFS="\${OLD_IFS}"
fi
#PATH ENV
if [[ \${PROOT_USER} = root || -z \${PROOT_USER} ]]; then
set -- "\${@}" "PATH=\${CONTAINER_BIN_PATH}/usr/local/sbin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/games:/usr/local/games"
else
set -- "\${@}" "PATH=\${CONTAINER_BIN_PATH}/usr/local/bin:/bin:/usr/bin:/usr/games:/usr/local/games"
fi
#LOGIN SHELL
#It should be -l, not --login
set -- "\${@}" "\${TMOE_SHELL}" "-l"
###################
unset TMOE_LD_LIB_PATH
case "\${PROOT_BIN}" in
"" | system | default) PROOT_PROGRAM=proot ;;
termux | prefix) PROOT_PROGRAM="\${PREFIX}/bin/proot" ;;
32)
case \${HOST_DISTRO} in
Android)
PROOT_PROGRAM="\${PROOT_32_TERMUX_BIN}"
PROOT_LOADER="\${PROOT_32_TERMUX_LOADER}"
LD_LIB_PATH="\${PROOT_32_TERMUX_LD_LIB_PATH}"
;;
*) PROOT_PROGRAM=proot ;;
esac
;;
*) PROOT_PROGRAM="\${PROOT_BIN}" ;;
esac
set -- "\${PROOT_PROGRAM}" "\${@}"
case \${PROOT_LOADER} in
"")
case "\${PROOT_LIBEXEC_LOADER}" in
default | system | "") ;;
*) PROOT_LOADER="\${PROOT_LIBEXEC_LOADER}" ;;
esac
;;
esac
case "\${LD_LIB_PATH}" in
default | system | "") ;;
*)
case "\${LD_LIBRARY_PATH}" in
"") TMOE_LD_LIB_PATH="\${LD_LIB_PATH}" ;;
*) TMOE_LD_LIB_PATH="\${LD_LIB_PATH}:\${LD_LIBRARY_PATH}" ;;
esac
;;
esac
if [[ -n \${PROOT_LOADER} && -z \${TMOE_LD_LIB_PATH} ]]; then
set -- "env" "PROOT_LOADER=\${PROOT_LOADER}" "\${@}"
elif [[ -z \${PROOT_LOADER} && -n \${TMOE_LD_LIB_PATH} ]]; then
set -- "env" "LD_LIBRARY_PATH=\${TMOE_LD_LIB_PATH}" "\${@}"
elif [[ -n \${PROOT_LOADER} && -n \${TMOE_LD_LIB_PATH} ]]; then
set -- "PROOT_LOADER=\${PROOT_LOADER}" "\${@}"
set -- "env" "LD_LIBRARY_PATH=\${TMOE_LD_LIB_PATH}" "\${@}"
fi
exec "\${@}"
}
main "\${@}"
ENDOFPROOT
#TMOE_LINUX_DIR & TRUE_ARCH_TYPE & ARCH_TYPE - \$.
# CONFIG_FOLDER & ARCH_TYPE & DEBIAN_CHROOT & TMPDIR - \$
# CONTAINER_DISTRO DEBIAN_FOLDER - \$
case $(uname -o) in
Android) ${TMOE_CHROOT_PREFIX} termux-fix-shebang ${TMOE_STARTUP_SCRIPT} ;;
esac
chmod a+rx ${TMOE_STARTUP_SCRIPT}
ln -sf ${TMOE_SHARE_DIR}/container/debian/debian ${PREFIX}/bin
# 64 | tmoe)
# case \${HOST_DISTRO} in
# Android)
# PROOT_64_TMOE_BIN="${TMOE_LINUX_DIR}/lib/data/data/com.termux/files/usr/bin/proot"
# PROOT_64_TMOE_LOADER="${TMOE_LINUX_DIR}/lib/data/data/com.termux/files/usr/libexec/proot/loader"
# PROOT_PROGRAM="\${PROOT_64_TMOE_BIN}"
# PROOT_LOADER="\${PROOT_64_TMOE_LOADER}"
# ;;
| true
|
8b4f44680f72296db633d2c2ccf45fdc3efbfc7d
|
Shell
|
Leen15/rancher-export
|
/remove-affinity.sh
|
UTF-8
| 271
| 3.375
| 3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
if [ -x $1 ]; then
echo "Enter an environment name. It's a directory under 'export'. If unsure, stop now and seek help."
exit 1
fi
cd ./export/$1
find . -type f -name "*.yml" -exec sed -i.bak '/affinity/d' {} +
find . -type f -name "*.bak" -exec rm {} +
| true
|
03b560b4105c50cca3e1099c04af9c71ea1f35e8
|
Shell
|
ilventu/aur-mirror
|
/blitz-hg/PKGBUILD
|
UTF-8
| 1,300
| 2.765625
| 3
|
[] |
no_license
|
# Maintainer: bastikr <basti.kr@gmail.com>
pkgname="blitz-hg"
pkgver=1902
pkgrel=1
pkgdesc="C++ class library for scientific computing"
url="http://www.oonumerics.org/blitz/"
license=('GP' 'custom')
arch=('i686' 'x86_64')
depends=('gcc')
makedepends=('mercurial')
provides=('blitz')
conflicts=('blitz')
source=('Makefile.am')
md5sums=('8e0e4a056e76a561b86f7cb55931d67a')
_hgroot="http://blitz.hg.sourceforge.net:8000/hgroot/blitz/blitz"
_hgrepo="blitz"
build() {
cd $srcdir
msg "Connecting to Mercurial server...."
if [ -d $_hgrepo ] ; then
cd $_hgrepo
hg pull -u || return 1
msg "The local files are updated."
else
hg clone $_hgroot $_hgrepo || return 1
fi
msg "Mercurial checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_hgrepo-build"
cp -r "$srcdir/$_hgrepo" "$srcdir/$_hgrepo-build"
cp ${srcdir}/Makefile.am "$srcdir/$_hgrepo-build/blitz/generate/"
cd "$srcdir/$_hgrepo-build"
autoreconf -vif
# Install license
install -D -m644 LICENSE $startdir/pkg/usr/share/licenses/$pkgname/LICENSE
./configure CXX=g++ --prefix=/usr --enable-64bit --enable-html-docs=no --enable-doxygen=no --enable-serialization|| return 1
alias python='python2'
make DESTDIR=$startdir/pkg install || return 1
rm -rf $pkgdir/usr/lib/pkgconfig
}
| true
|
940095e5b6007189ad1d8be5189ce805440bb53c
|
Shell
|
CodeChillAlluna/code-chill-server
|
/install/deploy.sh
|
UTF-8
| 1,173
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
docker_path=docker
docker_build=$docker_path/codechill-server.jar
build_app () {
# Build the app
mvn clean package -DskipTests=true
}
build_dockerfile() {
# Build dockerfile
cd $docker_path
cp ../target/spring*.jar codechill-server.jar
mkdir $HOME/config
cp config/application.yml $HOME/config/application.yml
docker-compose build
cd ..
rm -r $docker_build
}
deploy() {
echo $TRAVIS_BRANCH
if [ "$TRAVIS_BRANCH" == "master" ]
then
echo "Master"
VERSION=`cat VERSION`
docker tag codechillaluna/code-chill-server codechillaluna/code-chill-server:latest
docker push codechillaluna/code-chill-server:latest
else
echo "Other branch"
VERSION=$TRAVIS_BRANCH
echo $VERSION
fi
docker tag codechillaluna/code-chill-server codechillaluna/code-chill-server:$VERSION
docker push codechillaluna/code-chill-server:$VERSION
}
if [ "$1" == "0" ]
then
build_app
build_dockerfile
elif [ "$1" == "1" ]
then
build_dockerfile
elif [ "$1" == "2" ]
then
build_dockerfile
deploy
elif [ "$1" == "3" ]
then
deploy
else
build_app
build_dockerfile
deploy
fi
| true
|
2b12e261a73110b4cb354efcfbb9221defc1ca25
|
Shell
|
jonpspri/s390x-docker-builds
|
/chas-kafka/test_kafka.sh
|
UTF-8
| 1,769
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
REPOSITORY=${REPOSITORY:-jpspring/s390x-openwhisk}
KAFKA_LABEL=${LABEL:-kafka}
ZOOKEEPER_LABEL=${LABEL:-zookeeper}
KAFKA_DIR=$(dirname $(realpath -s $0))
finish() {
for id in "$kafka_id" "$zookeeper_id"; do
if [ -n "$id" ]; then
echo 'Killing then removing container'
echo -n 'Kill...'; docker kill $id
echo -n 'Remove...'; docker rm $id
fi
done
}
trap finish EXIT
#id=$(docker run -v "$CONSULDIR/config:/consul/config" -v "$CONSULDIR/logs:/logs" -p 8500:8500 -d "$REPOSITORY:$LABEL")
zookeeper_id=$(docker run -d "$REPOSITORY:$ZOOKEEPER_LABEL")
if [ -z "$zookeeper_id" ]; then echo "Could not create zookeeper image"; exit 1; fi
zookeeper_ip=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' $zookeeper_id)
echo Successfully started Zookeeper container $zookeeper_id at IP $zookeeper_ip
kafka_id=$(docker run -e ZOOKEEPER_IP=$zookeeper_ip -e ZOOKEEPER_PORT=2181 \
-d "$REPOSITORY:$KAFKA_LABEL")
if [ -z "$kafka_id" ]; then echo "Could not create kafka image"; exit 1; fi
kafka_ip=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' $kafka_id)
echo Successfully started Kafka container $kafka_id at IP $kafka_ip
#url="http://localhost:8500/v1/kv/consulIsAlive"
#url="http://$ip:8500/v1/kv/consulIsAlive"
#echo "Testing container $id at $url in 3 seconds..."
#sleep 3
# for url in \
# "http://$ip:8500/v1/kv/consulIsAlive" \
# "http://localhost:8500/v1/kv/consulIsAlive" \
# ;\
# do
# echo "Testing at URL $url"
# result=$(curl --connect-timeout 4 --max-time 5 -XPUT "$url")
# if [ "$result" = "true" ]; then echo '.'; else exit 1; fi
#
# result=$(curl --connect-timeout 4 --max-time 5 -XDELETE "$url")
# if [ "$result" = "true" ]; then echo '.';else exit 1; fi
# done
| true
|
25926a73db7f6aa2453ca7370e54e9c256c83562
|
Shell
|
majacQ/ambertools-binary-build
|
/conda-ambertools-python-components/build_ambertools.py
|
UTF-8
| 1,463
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
import os
import sys
from glob import glob
import shutil
THIS_RECIPE = os.getenv('RECIPE_DIR', '')
conda_tools_dir = os.path.join(THIS_RECIPE, '..', 'conda_tools')
print('conda_tools_dir', conda_tools_dir)
sys.path.insert(0, conda_tools_dir)
import utils # conda_tools
import copy_ambertools
def main():
PREFIX = os.getenv('PREFIX')
AMBERHOME = os.getcwd()
os.environ['AMBERHOME'] = AMBERHOME
copy_ambertools.main()
ATPY2 = utils.get_package_dir(
conda_recipe=os.path.join(THIS_RECIPE, '..', 'conda-ambertools-single-python'), py=2.7)
print("ATPY2", ATPY2, 'exists = ', os.path.exists(ATPY2))
utils.tar_xf(ATPY2)
utils.patch(os.path.join(THIS_RECIPE, 'patch'))
utils.update_amber()
utils.set_compiler_env()
utils.run_configure()
os.chdir('AmberTools/src')
utils.make_python_serial()
os.chdir(AMBERHOME)
python_ver = ".".join(map(str, sys.version_info[:2]))
prefix_bin = os.path.join(PREFIX, 'bin')
shutil.copy('{}/bin/pdb4amber'.format(AMBERHOME), prefix_bin)
shutil.copy('{}/bin/parmed'.format(AMBERHOME), prefix_bin)
for fn in glob('{}/lib/*'.format(AMBERHOME)):
# only need some libraries for pytraj/libcpptraj
if os.path.isfile(fn):
shutil.copy(fn, '{}/lib/'.format(PREFIX))
utils.sh('cp -rf {}/lib/python{} {}/lib/'.format(AMBERHOME, python_ver, PREFIX))
shutil.rmtree('./info')
if __name__ == '__main__':
main()
| true
|
0ac49b99c30f2df984b0fb9f1672b1d9aa4d4827
|
Shell
|
1w3j/1w3j
|
/scripts/preptorrent.sh
|
UTF-8
| 156
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ -e "$1" ]]; then
echo Linking "$1" -\> ~/Bittorrent && ln -s "$(realpath $1)" ~/Bittorrent
else
echo "$1 does not exist"
fi
| true
|
6575121d8466cf4ccbd1368c52541af1f0b1a732
|
Shell
|
sfillwo/emnlp2019-dualgraph
|
/train_LDC2015E86.sh
|
UTF-8
| 1,016
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$#" -lt 5 ]; then
echo "./train_LDC2015E86.sh <gpuid> <gnn_type> <gnn_layers> <start_decay_steps> <decay_steps>"
exit 2
fi
mkdir -p data/models
GPUID=$1
GNNTYPE=$2
GNNLAYERS=$3
STARTDECAYSTEPS=$4
DECAYSTEPS=$5
DATASET=ldc2015e86
STEPS=840
EPOCHS=$((${STEPS}*60))
export CUDA_VISIBLE_DEVICES=${GPUID}
export OMP_NUM_THREADS=10
python -u train.py -data data/${DATASET} \
-save_model data/models/${DATASET}-${GNNTYPE}-2 \
-rnn_size 900 -word_vec_size 300 -train_steps ${EPOCHS} -optim adam \
-valid_steps ${STEPS} \
-valid_batch_size 1 \
-encoder_type graph \
-gnn_type ${GNNTYPE} \
-gnn_layers ${GNNLAYERS} \
-decoder_type rnn \
-learning_rate 0.001 \
-dropout 0.5 \
-copy_attn -copy_attn_type mlp -coverage_attn -batch_size 20 \
-save_checkpoint_steps ${STEPS} \
-start_decay_steps ${STARTDECAYSTEPS} \
-decay_steps ${DECAYSTEPS} \
-layers 2 \
-global_attention mlp \
-pre_word_vecs_enc data/${DATASET}.embeddings.enc.pt \
-pre_word_vecs_dec data/${DATASET}.embeddings.dec.pt \
-gpu_ranks 0
| true
|
2726130c1fb8cd2fc7efe80101b58673046d5d11
|
Shell
|
cybernetics/summer-of-k8s
|
/week3/ambassador-demo-cluster/install.sh
|
UTF-8
| 12,250
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
STACK="nodejs"
FAST_API_REPOSITORY="https://github.com/datawire/edgey-corp-python-fastapi.git"
FLASK_REPOSITORY="https://github.com/datawire/edgey-corp-python.git"
NODEJS_REPOSITORY="https://github.com/datawire/edgey-corp-nodejs.git"
GOLANG_REPOSITORY="https://github.com/datawire/edgey-corp-go.git"
JAVA_REPOSITORY="https://github.com/datawire/edgey-corp-java.git"
KUBECTL_MACOS_PATH="https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl"
KUBECTL_LINUX_PATH="https://dl.k8s.io/release/v1.20.0/bin/linux/amd64/kubectl"
TELEPRESENCE_MACOS_PATH="https://app.getambassador.io/download/tel2/darwin/amd64/latest/telepresence"
TELEPRESENCE_LINUX_PATH="https://app.getambassador.io/download/tel2/linux/amd64/latest/telepresence"
KUBECTL_BIN_PATH=${KUBECTL_MACOS_PATH}
TELEPRESENCE_BIN_PATH=${TELEPRESENCE_MACOS_PATH}
TELEPRESENCE_MACOS_VERSION_PATH="https://datawire-static-files.s3.amazonaws.com/tel2/darwin/amd64/stable.txt"
TELEPRESENCE_LINUX_VERSION_PATH="https://datawire-static-files.s3.amazonaws.com/tel2/linux/amd64/stable.txt"
TELEPRESENCE_VERSION_PATH=$TELEPRESENCE_MACOS_VERSION_PATH
FAST_API_DIR="edgey-corp-python-fastapi"
FLASK_DIR="edgey-corp-python-flask"
NODEJS_DIR="edgey-corp-nodejs"
GOLANG_DIR="edgey-corp-go"
JAVA_DIR="edgey-corp-java"
DATA_PROCESSING_DIR="DataProcessingService"
CURRENT_DIRECTORY=$(pwd)
VERBOSE="false"
# this function will download the lastest version of telepresence a replaced the installed by the user
update_telepresence(){
if [ $VERBOSE = "flase" ]; then
$(sudo curl -s -fL $TELEPRESENCE_BIN_PATH -o $(which telepresence))
else
$(sudo curl -fL $TELEPRESENCE_BIN_PATH -o $(which telepresence))
fi
}
# this function read the version of telepresence which is installed in the computer
parse_telepresence_version(){
local installed_version=$(telepresence version)
local substring_to_match="Client v"
installed_version=${installed_version#*$substring_to_match}
echo ${installed_version%(*}
}
# this function will remove all the software installed in the current directory
clean(){
if [ $VERBOSE = "false" ]; then
# remove demo applications
rm -rf edgey-* > /dev/null
# remove telepresence and kubectl
rm -rf bin > /dev/null
# remove virtualenv
rm -rf env > /dev/null
else
# remove demo applications
rm -rf edgey-*
# remove telepresence and kubectl
rm -rf bin
# remove virtualenv
rm -rf env
fi
}
# Validate if the telepresence version is supported for the demo.
# Requires one parameter, telepresence version to compare with LATEST_TELEPRESENCE_VERSION
# returns true when the telepresence version provided by the user is minor than the LATEST_TELEPRESENCE_VERSION
validate_telepresence_version(){
if [ $1 = $LATEST_TELEPRESENCE_VERSION ]; then
echo "false"
return
else
local IFS=.
local i ver1=($LATEST_TELEPRESENCE_VERSION) ver2=($1)
# fill empty fields in ver1 with zeros
for ((i=${#ver1[@]}; i<${#ver2[@]}; i++))
do
ver1[i]=0
done
for ((i=0; i<${#ver1[@]}; i++))
do
if [[ -z ${ver2[i]} ]]
then
# fill empty fields in ver2 with zeros
ver2[i]=0
fi
if ((10#${ver1[i]} > 10#${ver2[i]}))
then
echo "true"
return
fi
if ((10#${ver1[i]} < 10#${ver2[i]}))
then
echo "false"
return
fi
done
fi
echo "false"
}
# Ask for user confirmation. Needs a parameter to prompt a message for the user
# returns 1 when the user confirm with Y or 0 when the user select N
confirm(){
while true; do
read -r -n 1 -p $'\n'"${1:-Continue?} [y/n]: " REPLY
case $REPLY in
[yY])
echo "true"
return
;;
[nN])
echo "false"
return
;;
esac
done
}
clone_repo(){
# Cloning repository to temporal directory and overwritting the specific folder
echo "Cloning repository from $REPO_TO_USE"
if [ $VERBOSE = "false" ]; then
rm -rf ${DIR_TO_USE} > /dev/null
git clone --quiet ${REPO_TO_USE} ${DIR_TO_USE}
else
rm -rf ${DIR_TO_USE}
git clone ${REPO_TO_USE} ${DIR_TO_USE}
fi
}
analyze_dependencies_go(){
if [ -x "$(command -v go)" ]; then
local continue="$(confirm "Several dependencies will be installed using go in the current project without affecting the rest of the system")"
echo " "
if [ $continue = "true" ]; then
cd ${GOLANG_DIR}/${DATA_PROCESSING_DIR}
# Install fresh for auto refresh
if [ $VERBOSE = "false" ]; then
go get github.com/pilu/fresh > /dev/null
else
go get github.com/pilu/fresh
fi
cd $CURRENT_DIRECTORY
else
exit
fi
else
echo "To continue with go as stack please install golang compiler. Go to https://golang.org/dl/ for more information"
exit
fi
}
analyze_dependencies_nodejs(){
if [ -x "$(command -v npm)" ]; then
local continue="$(confirm "Several dependencies will be installed using npm in the current project without affecting the rest of the system")"
echo " "
if [ $continue = "true" ]; then
cd ${NODEJS_DIR}/${DATA_PROCESSING_DIR}
# install dependencies
if [ $VERBOSE = "false" ]; then
npm install --silent > "/dev/null" 2>&1
else
npm install
fi
cd $CURRENT_DIRECTORY
else
exit
fi
else
echo "To continue with nodejs as stack please install nodejs. Go to https://nodejs.org/en/download/ for more information"
exit
fi
}
analyze_dependencies_python(){
if [ -x "$(command -v python3)" ]; then
if [ -x "$(command -v pip3)" ]; then
local continue="$(confirm "A new virtual environment will be created on the current project and several dependencies will be installed using pip in the current project without affecting the rest of the system")"
echo " "
if [ $continue = "true" ]; then
# create and active a virtual environment
if [ $VERBOSE = "false" ]; then
python3 -m venv env > /dev/null
source env/bin/activate > /dev/null
else
python3 -m venv env
source env/bin/activate
fi
else
exit
fi
else
echo "In order to install dependencies for the demo, please install pip."
exit
fi
else
echo "To continue with python as stack please install python3. Go to https://www.python.org/downloads/ for more information"
exit
fi
}
analyze_dependencies_flask(){
analyze_dependencies_python
# install dependencies
if [ $VERBOSE = "false" ]; then
pip --disable-pip-version-check install --no-warn-script-location flask requests > /dev/null
else
pip install --no-warn-script-location flask requests
fi
}
analyze_dependencies_fast_api(){
analyze_dependencies_python
# install dependencies
if [ $VERBOSE = "false" ]; then
pip --disable-pip-version-check install --no-warn-script-location fastapi uvicorn requests > /dev/null
else
pip install --no-warn-script-location fastapi uvicorn requests
fi
}
analyze_dependencies_java(){
if [ -x "$(command -v javac)" ]; then
if ! [ -x "$(command -v mvn)" ]; then
echo "In order to install dependencies for the demo, please install mvn. Go to https://maven.apache.org/install.html for more information."
exit
fi
else
echo "To continue with java as stack please install java. Go to https://java.com/en/download/ for more information"
exit
fi
}
# Select
if [ "$(uname)" == "Darwin" ]; then
KUBECTL_BIN_PATH=${KUBECTL_MACOS_PATH}
TELEPRESENCE_BIN_PATH=${TELEPRESENCE_MACOS_PATH}
TELEPRESENCE_VERSION_PATH=${TELEPRESENCE_MACOS_VERSION_PATH}
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then
KUBECTL_BIN_PATH=${KUBECTL_LINUX_PATH}
TELEPRESENCE_BIN_PATH=${TELEPRESENCE_LINUX_PATH}
TELEPRESENCE_VERSION_PATH=${TELEPRESENCE_LINUX_VERSION_PATH}
fi
# Get specific stack selected by the user, by default will be nodejs
for arg in "$@"
do
case $arg in
-s=*|--stack=*)
STACK="${arg#*=}"
shift
;;
-h|--help)
cat README.txt
exit
;;
-v|--verbose)
VERBOSE="true"
;;
--clean)
clean
exit
;;
*)
echo "No valid argument ${arg}"
exit
;;
esac
done
# Get repository
case $STACK in
nodejs)
echo "nodejs selected"
REPO_TO_USE=$NODEJS_REPOSITORY
DIR_TO_USE=$NODEJS_DIR
clone_repo
analyze_dependencies_nodejs
;;
flask)
echo "python with flask selected"
REPO_TO_USE=$FLASK_REPOSITORY
DIR_TO_USE=$FLASK_DIR
clone_repo
analyze_dependencies_flask
;;
fast-api)
echo "python with fast-api selected"
REPO_TO_USE=$FAST_API_REPOSITORY
DIR_TO_USE=$FAST_API_DIR
clone_repo
analyze_dependencies_fast_api
;;
java)
echo " java selected"
REPO_TO_USE=$JAVA_REPOSITORY
DIR_TO_USE=$JAVA_DIR
clone_repo
analyze_dependencies_java
;;
go)
echo "go selected"
REPO_TO_USE=$GOLANG_REPOSITORY
DIR_TO_USE=$GOLANG_DIR
clone_repo
analyze_dependencies_go
;;
*)
echo "Invalid stack selected ${STACK}"
exit
;;
esac
# Get the latest version of telepresence
LATEST_TELEPRESENCE_VERSION="$(curl -s $TELEPRESENCE_VERSION_PATH)"
# Check if telepresence exists, if not install in this folder
echo " "
if ! [ -x "$(command -v telepresence)" ]; then
if ! [ -f ./bin/telepresence ]; then
echo "Installing telepresence"
if [ $VERBOSE = "false" ]; then
mkdir -p ./bin > /dev/null
curl -s -LO ${TELEPRESENCE_BIN_PATH}
chmod a+x ./telepresence > /dev/null
mv ./telepresence ./bin/telepresence > /dev/null
else
mkdir -p ./bin
curl -LO ${TELEPRESENCE_BIN_PATH}
chmod a+x ./telepresence
mv ./telepresence ./bin/telepresence
fi
echo "Telepresence is installed"
else
echo "Telepresence is installed at current directory"
fi
else
current_version="$(parse_telepresence_version)"
need_update="$(validate_telepresence_version $current_version)"
if [ $need_update = "true" ]; then
install_update=$(confirm "A new version of telepresence is detected. Do you want to install the update.")
echo " "
if [ $install_update = "true" ]; then
update_telepresence
else
echo "The telepresence version installed on this computer needs to be updated. Visit https://www.getambassador.io/docs/telepresence/latest/install/upgrade/ for more details."
exit
fi
else
echo "Telepresence is detected in this computer"
fi
fi
# Check if kubectl exists, if not install in this folder
if ! [ -x "$(command -v kubectl)" ]; then
if ! [ -f ./bin/kubectl ]; then
echo "Installing kubectl in current directory"
if [ $VERBOSE = "false" ]; then
mkdir -p ./bin >/dev/null
curl -s -LO ${KUBECTL_BIN_PATH}
chmod a+x ./kubectl > /dev/null
mv ./kubectl ./bin/kubectl > /dev/null
else
mkdir -p ./bin
curl -LO ${KUBECTL_BIN_PATH}
chmod a+x ./kubectl
mv ./kubectl ./bin/kubectl
fi
echo "kubectl is installed"
else
echo "kubectl is installed at current directory"
fi
else
echo "kubectl is detected in this computer"
fi
# Export configuration
env_vars="export KUBECONFIG=${CURRENT_DIRECTORY}/kubeconfig.yaml"
path="export PATH=\"$PATH:${CURRENT_DIRECTORY}/bin\""
eval $env_vars
eval $path
echo ""
echo "Setup complete!"
echo ""
echo "A new shell will now be started with the proper environment variables to allow interactions with the demo cluster."
echo ""
echo "Try running the following to see what's running in your cluster:"
echo " kubectl get all"
echo ""
echo "Try listing the services than can be intercepted with Telepresence:"
echo " telepresence list"
echo ""
echo "To step out of the demo cluster context, run:"
echo " exit"
echo ""
echo "To remove all the applications and files installed by this script in this directory, run the script with the parameter --clean."
echo " ./install.sh --clean"
echo "Before clean be sure to leave any shell instance created by the script using exit command."
echo ""
echo "Visit our docs for more information on how to leverage Telepresence: https://www.getambassador.io/docs/telepresence/latest/quick-start/"
echo "Reach out to us on Slack: https://a8r.io/Slack"
# run a new bash with a configuration set
eval $SHELL
| true
|
5c57260be2064db009697678b3d941ba4b62adcd
|
Shell
|
john-r2/OpenFPC
|
/tools/function_test.sh
|
UTF-8
| 2,021
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash -e
# Quickly test a set of functions to ensure things work before rolling a release
# Leon Ward - leon@rm-rf.co.uk
USER="admin"; # Creds to use for this one test
PASS="admin";
OFPC="openfpc-client -u $USER -p $PASS"
IP=$(hostname --all-ip-addresses)
echo IP is $IP
echo -e "[*] Checking Functions. Have you SVN up and installed? <Press Enter>"
read foo
if [ "$1" == "install" ]
then
echo "[-] About to reinstall <ENTER>"
read foo
cd ..
echo " - Reinstalling ..."
sudo ./openfpc-install.sh reinstall > /dev/null
cd tools
sudo openfpc -a stop -q
sudo openfpc -a start -q
echo "[------------------------------]"
sudo openfpc -a status -q
echo "[------------------------------]"
fi
SUMTYPE="top_source_ip_by_connection
top_source_ip_by_volume
top_destination_ip_by_connection
top_destination_ip_by_volume
top_source_tcp_by_connection
top_source_tcp_by_volume
top_destination_tcp_by_connection
top_destination_tcp_by_volume
top_destination_udp_by_connection
top_destination_udp_by_volume
top_source_udp_by_connection
top_source_udp_by_volume"
echo [-] Summary Tables
ARGS="-a summary --summary_type"
for T in $SUMTYPE
do
CMD="$OFPC $ARGS $T"
echo " - Table: $T"
$CMD > /dev/null || echo "ERROR Running $CMD"
done
echo [-] Search Tests
SEARCH="--sip $IP
--dip $IP
--spt 53
--dpt 80"
ARGS="-a search --limit 5 --last 6000"
$OFPC $ARGS --dip $IP || echo "ERROR with $OFPC $ARGS --dip $IP"
$OFPC $ARGS --sip $IP > /dev/null || echo "ERROR with $OFPC $ARGS --sip $IP "
$OFPC $ARGS --dpt 80 > /dev/null || echo "ERROR with $OFPC $ARGS --dpt 80"
$OFPC $ARGS --spt 53 > /dev/null || echo "ERROR with $OFPC $ARGS --spt 53"
echo [-] Fetch PCAP
ARGS="-a fetch --dip $IP --last 600"
$OFPC $ARGS -q
$OFPC $ARGS -q --zip --comment "Testing"
echo [-] Storing pcaps
ARGS="-a store --dip $IP --last 60"
$OFPC $ARGS
ARGS="-a store --dpt 80 --last 60"
$OFPC $ARGS
ARGS="-a store --dpt 22 --last 60"
$OFPC $ARGS
ARGS="-a store --sip $IP --last 60
$OFPC $ARGS --comment "SOME COMMENT"
| true
|
1ca95c6ebeca4bb730fc0bc2fc73f4cc118d9262
|
Shell
|
ryan-williams/ls-helpers
|
/ls-full-t-last-n
|
UTF-8
| 113
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ "$1" =~ ^[0-9]+ ]]; then
num="$1"
shift
fi
ls-full-t "$@" | tail -n "${num:-10}"
| true
|
3c9732e21f2f6223be91df1697a356f66af6b0d8
|
Shell
|
mikebean233/CSCD439_HW
|
/HW3/TurnIn/runTests.sh
|
UTF-8
| 1,499
| 3.5625
| 4
|
[] |
no_license
|
#! /bin/bash
# arg1: n
# arg2: threadsPerBlock
# arg3: kernelNo
function runConfiguration {
OLDIFS="$IFS"
IFS="~"
n=$1
threadsPerBlock=$2
kernelNo=$3
echo "--------- executing configuration: n=$n, threadsPerBlock=$threadsPerBlock, kernelNo=$kernelNo ---------------"
echo ""
error=($( { ./jacobi "$threadsPerBlock" 10 "$n" "$n" "$kernelNo" 1> "n${n}_tpb${threadsPerBlock}_k${kernelNo}.txt" ; } 2>&1 ))
exitStatus=$?
if (test "$exitStatus" -ne "0"); then
echo "There was a problem running the configuration: $error" 1>&2
fi
IFS="$OLDIFS"
}
# -------------- n threadsPerBlock kernelNo
runConfiguration 1600 0 0
runConfiguration 3200 0 0
runConfiguration 1600 32 0
runConfiguration 1600 32 1
runConfiguration 1600 128 0
runConfiguration 1600 128 1
runConfiguration 1600 256 0
runConfiguration 1600 256 1
runConfiguration 1600 384 0
runConfiguration 1600 384 1
runConfiguration 1600 512 0
runConfiguration 1600 512 1
runConfiguration 1600 1024 0
runConfiguration 1600 1024 1
runConfiguration 3200 256 0
runConfiguration 3200 256 1
runConfiguration 3200 128 0
runConfiguration 3200 128 1
| true
|
b4208b3fafda4bf79769621e85d491c79490d0b6
|
Shell
|
yuHe1/learnShell
|
/checkport.sh
|
UTF-8
| 201
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh -ex
port=$1
#errors=$( netstat -aon | grep $port )
#echo $errors
if [ "$(lsof -i:$port)" ]; then # netstat -aon | grep $port #in linux
echo "is in use"
exit 1
else
echo "not used "
fi
| true
|
2cfa7c03cb76a67a7a44e81f4b391a35669782d0
|
Shell
|
metalefty/s3fs-fuse-rpm
|
/fuse-rpm
|
UTF-8
| 359
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f './SRPMS/fuse-2.8.5-99.vitki.01.el5.src.rpm' ]; then
wget --no-check-certificate 'https://rpm.vitki.net/pub/SRPMS/fuse-2.8.5-99.vitki.01.el5.src.rpm' -O './SRPMS/fuse-2.8.5-99.vitki.01.el5.src.rpm'
fi
rpmbuild --define "_topdir `pwd`" --rebuild 'SRPMS/fuse-2.8.5-99.vitki.01.el5.src.rpm'
echo Your packages are available at $PWD/RPMS
| true
|
0cde9719aa2cd02596c7cbd4ed6354ca1097556e
|
Shell
|
hui-liu/rootstock_AlleleSeq
|
/scripts/pileup_samtools.sh
|
UTF-8
| 299
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR=$(readlink -f ${0%/*})
SAM=$1
BAMDIR=$2
OUT=$3
OUT2=$4
echo "Take $SAM and make to $OUT and then sort and output to $OUT2 \n"
echo "SAM is $SAM"
echo "BAMDIR is $BAMDIR"
echo "BAM is $OUT"
echo "SORTED is $OUT2"
qsub $SCRIPT_DIR/submit_samtools.sh $SAM $BAMDIR $OUT $OUT2
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.