blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a0c319460ed0343068beae91027d879ac1258194 | Shell | abdulbadii/MatrixMultiplier | /MatrixMultiplier.sh | UTF-8 | 1,624 | 3.203125 | 3 | [] | no_license | mul(){ ##### BEGINNING OF mul, matrix multiplier #####
unset IFS; local -n arr1=$1 arr2=$2;
p=$3; : ${p=-0}
R=${arr1[@]}
CR=(${R%%,*}); CR=${#CR[@]}
r=${arr2[@]};r=${r//[^,]}
((${#r}+1-CR)) &&{ echo The number of 1st matrix column must be equal to row number of 2nd one;return;}
R=${R//[^,]}; let R=${#R}+1
arr1=(${arr1[@]//,/ })
C=${arr2[@]}
C=(${C%%,*}); C=${#C[@]}
arr2=(${arr2[@]//,/ })
for((r=0;r<$R;r++)){
for((c=0;c<$C;c++)){ M=0
for((cr=0;cr<$CR;cr++)){
M=`bc<<<"$M+${arr1[r*CR+cr]}*${arr2[cr*C+c]}"`;}
printf -v arr[r*C+c] %.${p}f $M
};}
for((r=0;r<$R;r++)){ echo
for((c=0;c<$C;c++)){ echo -n ${arr[r*$C+c]}\ ;}
};echo
} ##### ENDING OF mul, matrix #####
rot(){ ##### BEGINNING OF rot, rotation #####
unset IFS; local -n arr1=$1
p=${3#p=};D=$2
[ -z "$p" ] &&{
if [[ "$2" = p=[0-9]* ]] ;then p=${2#p=};D=45
else
p=0
if [ "$2" = - ] ;then D=-45
elif [ -z "$2" ] ;then D=45;fi
fi
}
d=`bc -l<<<"$D/180*3.14159265358979323844"` #pi
cos=`bc -l<<<"c($d)"`
sin=`bc -l<<<"s($d)"`
minsin=`bc -l<<<"-1*$sin"`
R=${arr1[@]}
CR=(${R%%,*}); CR=${#CR[@]}
((CR==2))||{ echo Column of matrix must be 2;return ;}
R=${R//[^,]}; let R=${#R}+1
arr1=(${arr1[@]//,/ })
arr2=($cos $minsin $sin $cos)
for((r=0;r<$R;r++)){
for((c=0;c<2;c++)){ M=0
for((cr=0;cr<$CR;cr++)){
M=`bc<<<"$M+${arr1[r*CR+cr]}*${arr2[cr*2+c]}"`;}
printf -v arr[r*2+c] %.${p}f $M
};}
echo "Rotated by $D degree on cartesian system (start at horizontal line, counterclockwise):"
for((r=0;r<R;r++)){ echo
for((c=0;c<2;c++)){ echo -n ${arr[r*2+c]}\ ;}
};echo
} ##### ENDING OF rot, matrix #####
| true |
16729109980836038d76be3fa134627a44a364fa | Shell | mjenckel/core | /ocrd/ocrd/lib.bash | UTF-8 | 5,116 | 3.875 | 4 | [
"Apache-2.0"
] | permissive | # BEGIN-INCLUDE ./src/raise.bash
## ### `ocrd__raise`
##
## Raise an error and exit.
ocrd__raise () {
echo >&2 "ERROR: $1"; exit 127
}
# END-INCLUDE
# BEGIN-INCLUDE ./src/dumpjson.bash
## ### `ocrd__dumpjson`
##
## Output ocrd-tool.json.
##
## Requires `$OCRD_TOOL_JSON` and `$OCRD_TOOL_NAME` to be set:
##
## ```sh
## export OCRD_TOOL_JSON=/path/to/ocrd-tool.json
## export OCRD_TOOL_NAME=ocrd-foo-bar
## ```
##
ocrd__dumpjson () {
ocrd ocrd-tool "$OCRD_TOOL_JSON" tool "$OCRD_TOOL_NAME" dump
}
# END-INCLUDE
# BEGIN-INCLUDE ./src/usage.bash
## ### `ocrd__usage`
##
## Print usage
##
ocrd__usage () {
echo "
Usage: $OCRD_TOOL_NAME [OPTIONS]
`ocrd ocrd-tool "$OCRD_TOOL_JSON" tool "$OCRD_TOOL_NAME" description`
Options:
-l, --log-level [OFF|ERROR|WARN|INFO|DEBUG|TRACE]
Log level
-J, --dump-json Dump tool description as JSON and exit
-p, --parameter PATH
-g, --page-id TEXT ID(s) of the physical page
-O, --output-file-grp TEXT File group(s) used as output.
-I, --input-file-grp TEXT File group(s) used as input.
-w, --working-dir TEXT Working Directory
-m, --mets TEXT METS URL to validate [required]
--help Show this message and exit.
-V, --version Show version.
"
}
# END-INCLUDE
# BEGIN-INCLUDE ./src/parse_argv.bash
## ### `ocrd__parse_argv`
##
## Expects an associative array ("hash"/"dict") `ocrd__argv` to be defined:
##
## ```sh
## declare -A ocrd__argv=()
## ```
ocrd__parse_argv () {
# if [[ -n "$ZSH_VERSION" ]];then
# print -r -- ${+ocrd__argv} ${(t)ocrd__argv}
# fi
if ! declare -p "ocrd__argv" >/dev/null 2>/dev/null ;then
ocrd__raise "Must set \$ocrd__argv (declare -A ocrd__argv)"
fi
if ! declare -p "params" >/dev/null 2>/dev/null ;then
ocrd__raise "Must set \$params (declare -A params)"
fi
while [[ "${1:-}" = -* ]];do
case "$1" in
-l|--log-level) ocrd__argv[log_level]=$2 ; shift ;;
-h|--help|--usage) ocrd__usage; exit ;;
-J|--dump-json) ocrd__dumpjson; exit ;;
-p|--parameter) ocrd__argv[parameter]="$2" ; shift ;;
-g|--page-id) ocrd__argv[page_id]=$2 ; shift ;;
-O|--output-file-grp) ocrd__argv[output_file_grp]=$2 ; shift ;;
-I|--input-file-grp) ocrd__argv[input_file_grp]=$2 ; shift ;;
-w|--working-dir) ocrd__argv[working_dir]=$(realpath "$2") ; shift ;;
-m|--mets) ocrd__argv[mets_file]=$(realpath "$2") ; shift ;;
-V|--version) ocrd ocrd-tool "$OCRD_TOOL_JSON" version; exit ;;
*) ocrd__raise "Unknown option '$1'" ;;
esac
shift
done
if [[ ! -r "${ocrd__argv[mets_file]:=$PWD/mets.xml}" ]];then
ocrd__raise "METS '${ocrd__argv[mets_file]}' not readable. Use -m/--mets-file to set correctly"
fi
if [[ ! -d "${ocrd__argv[working_dir]:=$(dirname "${ocrd__argv[mets_file]}")}" ]];then
ocrd__raise "workdir '${ocrd__argv[working_dir]}' not a directory. Use -w/--working-dir to set correctly"
fi
if [[ ! "${ocrd__argv[log_level]:=INFO}" =~ OFF|ERROR|WARN|INFO|DEBUG|TRACE ]];then
ocrd__raise "log level '${ocrd__argv[log_level]}' is invalid"
fi
# if [[ ! "${ocrd__argv[input_file_grp]:=OCR-D-IMG}" =~ OCR-D-(GT-)?(IMG|SEG|OCR|COR)(-[A-Z0-9\-]{3,})?(,OCR-D-(GT-)?(IMG|SEG|OCR|COR)(-[A-Z0-9\-]{3,})?)* ]];then
# echo >&2 "WARNING: input fileGrp '${ocrd__argv[input_file_grp]}' does not conform to OCR-D spec"
# fi
# if [[ ! "${ocrd__argv[output_file_grp]:=OCR-D-OCR}" =~ OCR-D-(GT-)?(IMG|SEG|OCR|COR)(-[A-Z0-9\-]{3,})?(,OCR-D-(GT-)?(IMG|SEG|OCR|COR)(-[A-Z0-9\-]{3,})?)* ]];then
# echo >&2 "WARNING: output fileGrp '${ocrd__argv[output_file_grp]}' does not conform to OCR-D spec"
# fi
local params_parsed retval
params_parsed="$(ocrd ocrd-tool "$OCRD_TOOL_JSON" tool $OCRD_TOOL_NAME parse-params -p "${ocrd__argv[parameter]}")"
retval=$?
if [[ $retval != 0 ]];then
echo "Error: Failed to parse parameters (retval $retval):"
echo "$params_parsed"
exit 42 # $retval
fi
eval "$params_parsed"
}
# END-INCLUDE
# BEGIN-INCLUDE ./src/wrap.bash
ocrd__wrap () {
declare -gx OCRD_TOOL_JSON="$1"
declare -gx OCRD_TOOL_NAME="$2"
shift
shift
declare -Agx params
params=()
declare -Agx ocrd__argv
ocrd__argv=()
if ! which "ocrd" >/dev/null 2>/dev/null;then
ocrd__raise "ocrd not in \$PATH"
fi
if ! declare -p "OCRD_TOOL_JSON" >/dev/null 2>/dev/null;then
ocrd__raise "Must set \$OCRD_TOOL_JSON"
elif [[ ! -r "$OCRD_TOOL_JSON" ]];then
ocrd__raise "Cannot read \$OCRD_TOOL_JSON: '$OCRD_TOOL_JSON'"
fi
if [[ -z "$OCRD_TOOL_NAME" ]];then
ocrd__raise "Must set \$OCRD_TOOL_NAME"
elif ! ocrd ocrd-tool "$OCRD_TOOL_JSON" list-tools|grep -q "$OCRD_TOOL_NAME";then
ocrd__raise "No such command \$OCRD_TOOL_NAME: $OCRD_TOOL_NAME"
fi
ocrd__parse_argv "$@"
}
# END-INCLUDE
| true |
54dda1cf9b244f8b077810911ed6d553de324478 | Shell | acfoltzer/hyper | /capi/gen_header.sh | UTF-8 | 2,626 | 3.9375 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
# This script regenerates hyper.h. As of April 2021, it only works with the
# nightly build of Rust.
set -e
CAPI_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
WORK_DIR=$(mktemp -d)
# check if tmp dir was created
if [[ ! "$WORK_DIR" || ! -d "$WORK_DIR" ]]; then
echo "Could not create temp dir"
exit 1
fi
header_file_backup="$CAPI_DIR/include/hyper.h.backup"
function cleanup {
rm -rf "$WORK_DIR"
rm "$header_file_backup" || true
}
trap cleanup EXIT
mkdir "$WORK_DIR/src"
# Fake a library
cat > "$WORK_DIR/src/lib.rs" << EOF
#[path = "$CAPI_DIR/../src/ffi/mod.rs"]
pub mod ffi;
EOF
# And its Cargo.toml
cat > "$WORK_DIR/Cargo.toml" << EOF
[package]
name = "hyper"
version = "0.0.0"
edition = "2018"
publish = false
[dependencies]
# Determined which dependencies we need by running the "cargo rustc" command
# below and watching the compile error output for references to unknown imports,
# until we didn't get any errors.
bytes = "1"
futures-channel = "0.3"
futures-util = { version = "0.3", default-features = false, features = ["alloc"] }
libc = { version = "0.2", optional = true }
http = "0.2"
http-body = "0.4"
tokio = { version = "1", features = ["rt"] }
[features]
default = [
"client",
"ffi",
"http1",
]
http1 = []
client = []
ffi = ["libc", "tokio/rt"]
EOF
cp "$CAPI_DIR/include/hyper.h" "$header_file_backup"
#cargo metadata --no-default-features --features ffi --format-version 1 > "$WORK_DIR/metadata.json"
cd "${WORK_DIR}" || exit 2
# Expand just the ffi module
if ! output=$(RUSTFLAGS='--cfg hyper_unstable_ffi' cargo rustc -- -Z unpretty=expanded 2>&1 > expanded.rs); then
# As of April 2021 the script above prints a lot of warnings/errors, and
# exits with a nonzero return code, but hyper.h still gets generated.
#
# However, on Github Actions, this will result in automatic "annotations"
# being added to files not related to a PR, so if this is `--verify` mode,
# then don't show it.
#
# But yes show it when using it locally.
if [[ "--verify" != "$1" ]]; then
echo "$output"
fi
fi
# Replace the previous copy with the single expanded file
rm -rf ./src
mkdir src
mv expanded.rs src/lib.rs
# Bindgen!
if ! cbindgen \
--config "$CAPI_DIR/cbindgen.toml" \
--lockfile "$CAPI_DIR/../Cargo.lock" \
--output "$CAPI_DIR/include/hyper.h" \
"${@}"; then
bindgen_exit_code=$?
if [[ "--verify" == "$1" ]]; then
echo "diff generated (<) vs backup (>)"
diff "$CAPI_DIR/include/hyper.h" "$header_file_backup"
fi
exit $bindgen_exit_code
fi
exit 0
| true |
10a308cab045b2b29ba822499bb381bf2e367363 | Shell | stevekm/bash_commands | /rename_with_dirname.sh | UTF-8 | 697 | 3.109375 | 3 | [] | no_license | #!/bin/bash
cd /Users/kellys04/BaseSpace/TargetedRNAExpression-25587863
mkdir -p /Users/kellys04/BaseSpace/2015-10-14
FILES_PLUS=$(find /Users/kellys04/BaseSpace/TargetedRNAExpression-25587863 -type f -path "*+-*" -name "*.fastq.gz")
for i in $FILES_PLUS; do
echo $i
echo $(basename ${i/.fastq.gz/}_+.fastq.gz )
cp $i /Users/kellys04/BaseSpace/2015-10-14/$(basename ${i/.fastq.gz/}_+.fastq.gz );
done
FILES_MINUS=$(find /Users/kellys04/BaseSpace/TargetedRNAExpression-25587863 -type f -path "*--*" -name "*.fastq.gz")
for i in $FILES_MINUS; do
echo $i
echo $(basename ${i/.fastq.gz/}_-.fastq.gz )
cp $i /Users/kellys04/BaseSpace/2015-10-14/$(basename ${i/.fastq.gz/}_-.fastq.gz );
done
| true |
ee94c2891ace2434924c1f0172d2c964aeb54340 | Shell | MinimalCompact/thumbor | /tests/nginx-proxy-cors.bats | UTF-8 | 848 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bats
BASE=tests/setup/basic
teardown () {
docker-compose -f $BASE/docker-compose.yml down
}
load_thumbor () {
docker-compose -f $BASE/docker-compose.yml up -d
timeout 2m bash -c 'while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:8888/healthcheck)" != "200" ]]; do sleep 5; done' || false
}
@test "no CORS headers by default" {
load_thumbor
run bash -c "curl -sSL -D - http://localhost:8888/unsafe/500x150/iili.io/H8m6pHv.png -o /dev/null |grep 'Access-Control-Allow-Origin'"
[ $status -eq 1 ]
}
@test "CORS headers based on CORS_ALLOW_ORIGIN value" {
export CORS_ALLOW_ORIGIN=http://www.xyz
load_thumbor
run bash -c "curl -sSL -D - http://localhost:8888/unsafe/500x150/iili.io/H8m6pHv.png -o /dev/null |grep 'Access-Control-Allow-Origin: http://www.xyz'"
[ $status -eq 0 ]
}
| true |
d64c827d9586d1a21b091ff3b9a0a0d63e4be682 | Shell | mishamosher/backup-restore-webapps | /src/restore-sql.sh | UTF-8 | 888 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Specific requirements: env.sh
# Import shared configs and functions
source "$(dirname "$0")/env.sh"
showUsage() {
echo 'restore-sql.sh - restores a MySQL database from a local backup (falling back to downloading it)
usage: restore-sql.sh DESTINATION TIMESTAMP [REMOTE]
Parameters:
DESTINATION Required. Name of the database to restore.
TIMESTAMP Required. A timestamp of the backup to restore.
REMOTE Optional. Alternative name for the backup folder (by default, it is the same as DESTINATION).
Options:
--no-warn Skip warnings
example: restore-sql.sh "my_db" "2020-07-23T19-45-41Z"
example: restore-sql.sh "my_db" "2020-07-23T19-45-41Z" "alternative_name"
example: restore-sql.sh "my_db" "2020-07-23T19-45-41Z" "alternative_name" --no-warn'
}
if [ ${#ARGS[*]} -lt 2 ] || [ ${#ARGS[*]} -gt 3 ]; then
showUsage
exit 0
fi
restoreSql "${ARGS[@]}"
| true |
f21e33e61962aa977aade002c4de01a63b2245f4 | Shell | swirepe/personalscripts | /bt/btls | UTF-8 | 2,245 | 3.90625 | 4 | [] | no_license | #!/usr/bin/env bash
#transmission-remote snake --auth admin:snake --list
# no arguments: list all torrents
# torrent ids: list files for those ids
# --all-files: list files for all ids
# --bare: show only the files, not the other fluff
if [[ "$1" == "--help" ]]
then
echo -e "$(basename $0)"
echo -e " Peter Swire - swirepe.com"
echo -e " Part of a suite of command line tools for manipulating transmission."
$0 --help-oneline
echo -e "$(basename $0) <ids>\tList files for torrents with these ids."
echo -e "$(basename $0) --all-files\tList files for all torrents."
echo -e "$(basename $0) --bare\tShow only files, nothing else."
exit 0
fi
if [[ "$1" == "--help-oneline" ]]
then
echo -e "$(basename $0)\tList all torrents."
exit 0
fi
if [ -e $HOME/.btrc ]
then
source $HOME/.btrc
else
echo "Using defaults for snake." > /dev/stderr
BT_HOST="snake"
BT_AUTH="admin:snake"
fi
function bare {
TORRENT_ID="$1"
if [[ "$TORRENT_ID" == "-" ]]
then
return
fi
NAME_COL_OFFSET=$(transmission-remote $BT_HOST --auth $BT_AUTH -t $TORRENT_ID --files | grep Name | sed 's/Name.*//' | wc -c)
while read remote_file
do
echo $remote_file
done < <(transmission-remote $BT_HOST --auth $BT_AUTH -t $TORRENT_ID --files | tail -n +3 | cut -c"$NAME_COL_OFFSET"- )
}
BARE="false"
ALL_FILES="false"
if [[ "$1" == "--bare" ]] || [[ "$2" == "--bare" ]]
then
BARE="true"
fi
if [[ "$1" == "--all-files" ]] || [[ "$2" == "--all-files" ]]
then
ALL_FILES="true"
fi
if [[ -z "$1" ]]
then
transmission-remote $BT_HOST --auth $BT_AUTH --list
elif [[ "$ALL_FILES" == "true" ]]
then
if [[ "$BARE" == "true" ]]
then
for torrent in $(btid)
do
bare $torrent
done
else
for torrent in $(btid)
do
transmission-remote $BT_HOST --auth $BT_AUTH -t $torrent --files
done
fi
else
if [[ "$BARE" == "true" ]]
then
for torrent in $@
do
bare $torrent
done
else
for torrent in $@
do
transmission-remote $BT_HOST --auth $BT_AUTH -t $torrent --files
done
fi
fi
| true |
a246e6657f4cef3914048f19593d89afe96b2440 | Shell | ronsm/Assisted-Living-Confluence-Chatbot | /SYSTEM/start_alana.sh | UTF-8 | 3,541 | 2.828125 | 3 | [] | no_license | #!/bin/sh
if [ -z "$1" ]; then
echo "Usage: $0 <config.yaml>"; exit 1
fi
#added by me, THECYBERSMITH, to do stuff
pkill gunicorn
python3 -m venv valana
#added by me, THECYBERSMITH, to do stuff
SESSION_NAME="Alana"
BOT_WORKERS=1
HUB_WORKERS=1
#MACOS
#SOURCE="source ~/.bash_profile"
#Linux
#SOURCE="source ~/.bashrc"
# ALANA_PATH and ALANA_ENV should be set
export ALANA_PATH=bot_ensemble
export ALANA_ENV=valana/bin/activate
cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
tmux has-session -t ${SESSION_NAME}
if [ $? != 0 ]
then
# Create the session
tmux new-session -s ${SESSION_NAME} -n Alana -d
# First window (0) -- Bucket
tmux send-keys -t ${SESSION_NAME} "source $ALANA_ENV; python alana_main.py --config_file=$1 -cv=critical" C-m
#https://verboselogs.readthedocs.io/en/latest/readme.html#overview-of-logging-levels VERY IMPORTANT!!!!!!
# sleep 2
# Bots (2)
tmux new-window -n "aiml bots" -t ${SESSION_NAME}:2
#tmux send-keys -t ${SESSION_NAME}:2 "source $ALANA_ENV; cd $ALANA_PATH/ProfanityBot; gunicorn --workers=$BOT_WORKERS bot:app --bind 0.0.0.0:5113" C-m
# tmux split-window -h -t ${SESSION_NAME}:2
tmux send-keys -t ${SESSION_NAME}:2 "source $ALANA_ENV; cd $ALANA_PATH/aiml_bots; gunicorn --workers=$BOT_WORKERS bot:app --bind 0.0.0.0:5112" C-m
tmux split-window -v -t ${SESSION_NAME}:2
tmux send-keys -t ${SESSION_NAME}:2.1 'source $ALANA_ENV; cd $ALANA_PATH/aiml_bots/bots/persona; sh rest.sh' C-m
tmux split-window -h -t ${SESSION_NAME}:2
tmux send-keys -t ${SESSION_NAME}:2.2 'source $ALANA_ENV; cd $ALANA_PATH/aiml_bots/bots/rosie_fixed; sh rest.sh' C-m
# clarification/ontology (3)
tmux new-window -n "clarification/ontology" -t ${SESSION_NAME}:3
tmux send-keys -t ${SESSION_NAME}:3 "source $ALANA_ENV; cd $ALANA_PATH/clarification_bot; gunicorn --workers=${BOT_WORKERS} http_bot:app --bind 0.0.0.0:5111" C-m
tmux split-window -v -t ${SESSION_NAME}:3
tmux send-keys -t ${SESSION_NAME}:3.1 "source $ALANA_ENV; cd $ALANA_PATH/ontology_bot; python http_bot.py" C-m
# coherence(4)
tmux new-window -n "intro/coherence" -t ${SESSION_NAME}:4
tmux send-keys -t ${SESSION_NAME}:4 "source $ALANA_ENV; cd $ALANA_PATH/coherence_bot; gunicorn --workers=${BOT_WORKERS} bot:app --bind 0.0.0.0:5115" C-m
# evi (5)
tmux new-window -n "evi" -t ${SESSION_NAME}:5
tmux send-keys -t ${SESSION_NAME}:5 "source $ALANA_ENV; cd $ALANA_PATH/evibot/; gunicorn --workers=${BOT_WORKERS} bot:app --bind 0.0.0.0:5117" C-m
# wiki (6)
#tmux new-window -n "wiki" -t ${SESSION_NAME}:6
#tmux send-keys -t ${SESSION_NAME}:6 "source $ALANA_ENV; cd $ALANA_PATH/wiki_bot_mongo; gunicorn --workers=${BOT_WORKERS} bot:app --bind 0.0.0.0:5222" C-m
# mongodb(6)
tmux new-window -n "mongodb" -t ${SESSION_NAME}:6
tmux send-keys -t ${SESSION_NAME}:6 "mongod --dbpath db_data/" C-m
# NEW_BOT (7)
tmux new-window -n "NEWBOT" -t ${SESSION_NAME}:7
tmux send-keys -t ${SESSION_NAME}:7 "source $ALANA_ENV; cd $ALANA_PATH/NEW_BOT/; sleep 5; gunicorn --workers=${BOT_WORKERS} http_bot:app --bind 0.0.0.0:5557" C-m
tmux split-window -v -t ${SESSION_NAME}:7
tmux send-keys -t ${SESSION_NAME}:7.1 "source $ALANA_ENV; cd $ALANA_PATH/NEW_BOT/Profiler; sleep 2; npm start" C-m
tmux split-window -h -t ${SESSION_NAME}:7
tmux send-keys -t ${SESSION_NAME}:7.2 "source $ALANA_ENV; cd $ALANA_PATH/NEW_BOT/NLU; sleep 10; python2 bot.py" C-m
# Start out on the first window when we attach
tmux select-window -t ${SESSION_NAME}:0
fi
unset TMUX
tmux a -t ${SESSION_NAME}
| true |
60856ef9ff735e326c34a5ddbe5f0a603245bca8 | Shell | LorenzoCavatorta/Warmy | /Warmy | UTF-8 | 632 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env bash
worker_name='the_one_and_only_worker'
if [ "$1" == "up" ]; then
echo 'starting Warmy worker'
celery multi start "${worker_name}" -A Warmy -l info
wait
echo 'Warmy worker started'
fi
if [ "$1" == "down" ]; then
echo 'stopping Warmy worker'
celery multi stopwait "${worker_name}" -A Warmy -l info
echo 'Warmy worker stopped'
fi
if [ "$1" == "status" ]; then
worker_status=$(celery status -A Warmy | grep "${worker_name}" | grep OK | wc -l )
if [ ${worker_status} -ge 1 ]; then
echo -n 'up and running'
else
echo -n 'Warmy worker is not active'
fi
fi
| true |
1f9e1590145f6fa40b05341cd7c7e145a5ca1716 | Shell | dnfcallan/THE-2020-PERSONALIZED-VOICE-TRIGGER-CHALLENGE-BASELINE-SYSTEM | /run.sh | UTF-8 | 1,261 | 3.078125 | 3 | [] | no_license | #!/bin/bash
stage=1
if [ $stage -le 1 ];then
local/prepare_all.sh /PATH/official_PVTC/train /PATH/official_PVTC/train_xiaole_time_point /PATH/official_PVTC/dev /PATH/TESTSET/task1 /PATH/TESTSET/task2 || exit 1
fi
if [ $stage -le 2 ];then
#If the first parameter is set as false, we will provide the trained model for testing.
local/run_kws.sh false || exit 1
fi
if [ $stage -le 3 ];then
# 6 parameters in this sh. The first `list_pretrain` needs to be created by yourself based on your pre-training data. More details can be found in ./SV_README.md
# If you set the first `list_pretrain` to None, the pre-trained model we provided will be downloaded and used in next steps.
# The second and third parameters should be the path of PVTC train and dev data.
# The fourth and fifth parameters should be the path of MUSAN(SLR17) and RIRs(SLR28) noise.
# If the sixth parameter `whether_finetune` set as None, the finetuned model we provided will also be downloaded instead of fine-tuning on the pre-trained model.
local/run_sv.sh None /PATH/official_PVTC/train /PATH/official_PVTC/dev \
/PATH/musan/ /PATH/RIRS_NOISES/simulated_rirs/ None || exit 1
fi
if [ $stage -le 4 ];then
local/show_results.sh /PATH/official_PVTC/dev || exit 1
fi
exit 0;
| true |
fb243f92d37b7b5150afa36b4bdf3721d1b9cdaa | Shell | pymor/pymor | /.ci/gitlab/test_tutorials.bash | UTF-8 | 495 | 2.734375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | #!/bin/bash
COV_OPTION="--cov=pymor"
THIS_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})" ; pwd -P )"
source ${THIS_DIR}/common_test_setup.bash
for fn in ${PYMOR_ROOT}/docs/source/tutorial*md ; do
mystnb-to-jupyter -o "${fn}" "${fn/tutorial/..\/converted_tutorial}".ipynb
done
# manually add plugins to load that are excluded for other runs
xvfb-run -a pytest ${COMMON_PYTEST_OPTS} --nb-coverage -s -p no:pycharm \
-p nb_regression --cov=pymor -p notebook docs/test_tutorials.py
_coverage_xml
| true |
75a44dac61002326c6d8bc3e11ea57f93ddb5d62 | Shell | dmattek/lap-track | /cleanLAP.sh | UTF-8 | 465 | 2.6875 | 3 | [] | no_license | #!/bin/bash
# Clean LAP tracking output
# remove "track" directory created by MATLAB sript
find . -name "track*" -type d -exec rm -r {} +
# remove final "clean tracks" csv created by R script
find . -name "*clean_track*" -type f -exec rm -r {} +
# remove 1-line header CP output created by R script
find . -name "*1line.csv" -type f -exec rm -r {} +
# remove pngs with overlaid track IDs created by python script
find . -name "*over" -type d -exec rm -r {} +
| true |
e451c38b8d8b916931af71eba8e1f1658c4c01a6 | Shell | YaroslavGitHub/DevOps_online_Dnipro_2020Q42021Q1 | /m7/script2.sh | UTF-8 | 1,017 | 3.515625 | 4 | [] | no_license | #!/bin/bash
echo Please select a menu item
echo
echo "1) From which ip were the most requests?"
echo "2) What is the most requested page?"
echo "3) How many requests were there from each ip?"
echo "4) What non-existent pages were clients referred to?"
echo "5) What time did site get the most requests?"
echo "6) What search bots have accessed the site? (UA + IP)"
echo
read CHOICE
case $CHOICE in
1) cat example_log.log | awk '{ print $1 ; }' | sort | uniq -c | sort -n -r | head -n 10;;
2) cat example_log.log | awk '{ print $7 }' | sort | uniq -c | sort -rn | head -n 25;;
3) cat example_log.log | awk '{print "requests from " $1}' | sort | uniq -c | sort;;
4) grep '404' example_log.log | sed 's/, /,/g' | awk {'print $7'} | sort | uniq -c | sort -n -r | head -10;;
5) awk '{print $4}' example_log.log | cut -d: -f1 | uniq -c;;
6) cat example_log.log | awk -F'|' '!/Applebot|Googlebot|bingbot/{print $5 $11}' | sort -nr | uniq -c | head -30;;
*) echo You made an invalid selection;;
esac
echo Have a great day! | true |
f631a18120c2a41c137c27895d54e6858cbe18f9 | Shell | TMmichi/pyspacenav | /doc/setup_init.2 | UTF-8 | 810 | 2.875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# adds LSB to default init script and installs it
rm -f init_script.2
cp init_script init_script.2
sed -i -f - init_script.2 <<- "EOF"
2 i\
### BEGIN INIT INFO \
# Provides: spacenavd \
# Required-Start: $local_fs \
# Required-Stop: $local_fs \
# Default-Start: 2 3 \
# Default-Stop: 0 1 \
# Short-Description: a free user space driver for connexion input devices \
### END INIT INFO \
EOF
rm -f /etc/init.d/spacenavd
install -m 755 init_script.2 /etc/init.d/spacenavd
cd /etc/rc0.d
rm -f K01spacenavd
ln -s ../init.d/spacenavd K01spacenavd
cd /etc/rc1.d
rm -f K01spacenavd
ln -s ../init.d/spacenavd K01spacenavd
cd /etc/rc2.d
rm -f S99spacenavd
ln -s ../init.d/spacenavd S99spacenavd
cd /etc/rc3.d
rm -f S99spacenavd
ln -s ../init.d/spacenavd S99spacenavd
| true |
0d5a8b03c67c9c73d8f57034ae95e522394947f1 | Shell | pombredanne/nasl-animal-farm | /tests/moses/test_all.sh | UTF-8 | 193 | 2.765625 | 3 | [] | no_license | #!/bin/bash
FILES=$( find ./plugin_dev -regextype egrep -regex '.*(\.inc|\.nasl)' )
for i in $FILES
do
printf "\n%s\n" $i >> xxx 2>&1
python3 ./tests/moses/tester.py -f $i >> xxx 2>&1
done
| true |
429996826fa8b280585ea069a369bfb3b6d8b45b | Shell | theboocock/OtagoGalaxy | /src/ensembl-functgenomics/scripts/run_dump_features.sh | UTF-8 | 3,084 | 3.0625 | 3 | [] | no_license | #!/bin/sh
USER=$1
shift
if [ ! $USER ]; then
echo "You must specific a user argument"
exit
fi
#set_names="K562_SP1_ENCODE_Hudsonalpha_SWEMBL_R015 HepG2_USF1_ENCODE_Hudsonalpha_SWEMBL_R015"
set_names=""
#rset_names="RegulatoryFeatures:MultiCell RegulatoryFeatures:K562 RegulatoryFeatures:CD4 RegulatoryFeatures:IMR90 RegulatoryFeatures:GM06990 RegulatoryFeatures:GM12878 RegulatoryFeatures:H1ESC RegulatoryFeatures:H1ESC RegulatoryFeatures:HepG2 RegulatoryFeatures:NHEK RegulatoryFeatures:HUVEC"
#dbname='homo_sapiens_funcgen_63_37'
#dbhost='ens-staging'
#format='bigwig'
format='GFF'
#feature_type='ProbeFeature'
#feature_type='ResultFeature'
feature_type='RegulatoryFeature'
#feature_type='AnnotatedFeature'
rset_names="RegulatoryFeatures:MultiCell RegulatoryFeatures:ES RegulatoryFeatures:ESHyb RegulatoryFeatures:MEF RegulatoryFeatures:NPC"
#rset_names='AnnotatedFeatures' #Special set name to dump all supportingt sets
dbname='mus_musculus_funcgen_63_37'
dbhost='ens-staging2'
dnadbhost=$dbhost
dnadbuser=$USER
#change output to RegulatoryFeature.ctype rather than RegulatoryFeatures_ctype?
#set_names="IMR90_DNase1_EpiRoadmap_Uw_GSE18927"
#set_names="AnnotatedFeatures"
if [[ $USER != merge ]];then
#Only do this loop for reg feat sets
#i.e. we want separate dumps
#remove for loop for AnnotatedFeature dumps i.e. merged dump
for set_names in $rset_names; do
dump_name="-dump_name $set_names"
#dump_name='-dump_name HG-U133A'
dump_params="-feature_sets $set_names $dump_name"
# [30, 65, 130, 260, 450, 648, 950, 1296];
#wsizes="30 65 130 260 450 648 950" # 1296"
#wsizes="1296"
#for wsize in $wsizes; do
#dump_params="-result_sets $set_names -window_size $wsize"
#dump_params="-array $dump_name -vendor AFFY"
bin_dir='/nfs/users/nfs_n/nj1/'
out_dir="/lustre/scratch103/ensembl/funcgen/output/${dbname}/dumps/${format}/${feature_type}"
if [ ! -d $out_dir ]; then
mkdir -m 775 -p $out_dir
fi
job_cmd="$EFG_SRC/scripts/export/dump_features.pl\
-port 3306\
-format $format\
-dbhost $dbhost\
-dbname $dbname\
-dnadb_host $dnadbhost\
-dnadb_user $dnadbuser\
$dump_params\
-out_root $out_dir \
-bin_dir $bin_dir\
-user $USER\
$@ "
#Add these on cmd line to avoid including them by mistake
#-farm
#-post_process
#-force_local
#-slices chromosome:GRCh37:X:1:421628\
$job_cmd
done
else
#MERGE RegulatoryFeature dumps
if [[ $feature_type != RegulatoryFeature ]]; then
echo "Can only merge RegulatoryFeature sets at present"
fi
species=$(echo $dbname | sed 's/_funcgen.*//')
ftp_dir=/lustre/scratch103/ensembl/funcgen/output/ftp/current_functional_genomics/$species
start_dir=$PWD
cd $out_dir
for rset in $rset_names; do
file_prefix=$(echo $rset | sed s'/\:/_/')
gunzip ${file_prefix}*gff.gz
rm -f ${file_prefix}.gff
cat ${file_prefix}*gff > ${file_prefix}.gff
if [[ ! -d $ftp_dir ]]; then
mkdir -p $ftp_dir
fi
#Add MD5s here and test?
ln -s $PWD/${file_prefix}.gff ${ftp_dir}/${file_prefix}.gff
done
echo "DON'T FORGET THE READMEs!"
cd $start_dir
fi
#Are the permissions set correctly? 775?
| true |
1f91c07034c4a6e0cc59a94d24f0cd31296406cc | Shell | lrakai/lambda-bash-delete-cloudwatch-log-groups | /handler.sh | UTF-8 | 576 | 3.765625 | 4 | [
"MIT"
] | permissive | function handler () {
set -e
# Event Data is sent as the first parameter
EVENT_DATA=$1
# Example of command usage
EVENT_JSON=$(echo $EVENT_DATA | jq .)
aws logs describe-log-groups --query 'logGroups[*].logGroupName' --output table |
awk '{print $2}' |
grep -v ^$ |
tail -n +2 | # skip table title line
while read x; do
echo deleting log group $x
aws logs delete-log-group --log-group-name $x 2>&1
done
# This is the return value because it's being sent to stderr (>&2)
echo "{\"success\": true}" >&2
} | true |
6307b139ed1930ee21e578570072553ce77ab057 | Shell | mshahbaz/CloudStoneSetupOnUbuntuAdvanced | /base-setup.sh | UTF-8 | 2,780 | 3.5625 | 4 | [] | no_license | #!/bin/bash
# Import commonly reused functions
. $startDir/functions.sh
# Log Installation environment metadata
logHeader "Installation Metadata"
echo USER: `whoami`, $USER
echo SYSTEM: `uname -a`
echo CURRENT TIME: `date`
echo START DIR: $startDir
## Make the CloudStone directory and give appropriate access for everyone
logHeader " Setting up the installation directory /cloudstone"
sudo mkdir /cloudstone
sudo chown -R $USER /cloudstone/
sudo chmod -R 777 /cloudstone
cd /cloudstone
logHeader " Setting up common packages"
## Update repositories
sudo apt-get update 1> /dev/null
## Install libc6:amd64 - needed by some systmes
sudo apt-get install -y libc6:amd64 1> /dev/null
## Uninstall all javas if present
sudo apt-get purge -y openjdk-\* icedtea-\* icedtea6-\* 1> /dev/null
## Install unzip utilities
sudo apt-get install -y unzip 1> /dev/null
## Install C/C++ compilers
sudo apt-get install -y build-essential 1> /dev/null
## Install dependencies
sudo apt-get install -y libpcre3 libpcre3-dev libpcrecpp0 libssl-dev zlib1g-dev 1> /dev/null
## Install Java and ANT. Set up Java environment variables
sudo apt-get install -y openjdk-6-jdk 1> /dev/null
## Install libaio1
sudo apt-get install -y libaio1 1> /dev/null
## Install git
sudo apt-get install -y git 1> /dev/null
## Install monitoring tools
sudo apt-get install -y sysstat 1> /dev/null
## Change permission of pem file and set access without prompts
logHeader " Setting SSH access without password"
pemFile=CloudStone.pem
sudo chmod 400 ~/$pemFile
sudo cp -rf ~/config ~/.ssh/config
## Set up Java
logHeader " Setting up Java environment variables"
jdklocation=$(readlink -f /usr/bin/javac | sed "s:/bin/javac::")
jrelocation=$jdklocation/jre
exportVar JAVA_HOME $jdklocation
exportVar JDK_HOME $jdklocation
exportVar PATH "$PATH:$jrelocation/bin"
## Install ANT
logHeader " Setting up ANT"
sudo apt-get install -y ant
## Set up /tmp, /var/log/messages
logHeader " Change permissions and owner of /tmp, create /var/log/messages"
sudo chown -R $USER /tmp/
sudo chmod -R 777 /tmp/
sudo touch /var/log/messages
sudo chmod 777 /var/log/messages
## Download cloudstone, extract Olio and set its mysql connector
logHeader " Download and setup Olio"
wget --no-verbose http://parsa.epfl.ch/cloudsuite/software/web.tar.gz 1> /dev/null
tar xzvf web.tar.gz 1> /dev/null
cp web-release/apache-olio-php-src-0.2.tar.gz .
tar xzvf apache-olio-php-src-0.2.tar.gz 1> /dev/null
exportVar OLIO_HOME "/cloudstone/apache-olio-php-src-0.2"
cp web-release/mysql-connector-java-5.0.8.tar.gz .
tar xzvf mysql-connector-java-5.0.8.tar.gz 1> /dev/null
cp ./mysql-connector-java-5.0.8/mysql-connector-java-5.0.8-bin.jar $OLIO_HOME/workload/php/trunk/lib
sudo chown -R $USER $OLIO_HOME
sudo chmod -R 777 $OLIO_HOME
| true |
c066d24a56ca87c58c5233f5da4de9607cbd219d | Shell | szbjb/7104475_helm | /bitnami-docker-grafana-debian-9/rootfs/run.sh | UTF-8 | 1,373 | 3.3125 | 3 | [] | no_license | #!/bin/bash -e
: "${GF_PATHS_CONFIG:=/opt/bitnami/grafana/conf/grafana.ini}"
: "${GF_PATHS_DATA:=/opt/bitnami/grafana/data}"
: "${GF_PATHS_LOGS:=/opt/bitnami/grafana/logs}"
: "${GF_PATHS_PLUGINS:=/opt/bitnami/grafana/data/plugins}"
: "${GF_PATHS_PROVISIONING:=/opt/bitnami/grafana/conf/provisioning}"
mkdir -p /opt/bitnami/grafana/data/plugins/grafana-piechart-panel/
cp -avr /opt/grafana-piechart-panel/ /opt/bitnami/grafana/data/plugins/
cp -a /opt/grafana.db /opt/bitnami/grafana/data/
rm -f /opt/bitnami/grafana/conf/grafana.ini
cp -a /opt/grafana.ini /opt/bitnami/grafana/conf/
if [[ -n "$GF_INSTALL_PLUGINS" ]]; then
read -r -a gf_plugins_list <<< "$(tr ',;' ' ' <<< "$GF_INSTALL_PLUGINS")"
for plugin in "${gf_plugins_list[@]}"; do
grafana-cli --pluginsDir "$GF_PATHS_PLUGINS" plugins install "$plugin"
done
fi
exec /opt/bitnami/grafana/bin/grafana-server \
--homepath=/opt/bitnami/grafana/ \
--config="$GF_PATHS_CONFIG" \
cfg:default.log.mode="console" \
cfg:default.paths.data="$GF_PATHS_DATA" \
cfg:default.paths.logs="$GF_PATHS_LOGS" \
cfg:default.paths.plugins="$GF_PATHS_PLUGINS" \
cfg:default.paths.provisioning="$GF_PATHS_PROVISIONING" \
"$@"
| true |
045bc9cf9fe7a895f844e7396853b79cfa8c6d97 | Shell | Bandeira/sps | /releases/rebuild-mingw32.sh | UTF-8 | 159 | 2.578125 | 3 | [] | no_license | #!/bin/bash
SOURCE=../trunk
CURRENT=$(pwd)
rm *windows*.zip
cd ${SOURCE}
make clean
#make distclean
make compiler=mingw32
cd ${CURRENT}
. build-mingw32.sh
| true |
8284cb4d2898dd5a384e5ac5db1e1f8ea2ce5540 | Shell | prayagupa/programmer-mattress | /scala.sh | UTF-8 | 999 | 3.65625 | 4 | [] | no_license | DEFAULT_SOURCE_ROOT_JVM="$HOME"
DEFAULT_INSTALLATION_DEST="/usr/local/"
installScala(){
SCALA_VERSION="2.12.3"
WGET_URL="http://www.scala-lang.org/files/archive/scala-$SCALA_VERSION.tgz"
SCALA_LOCATION_SOURCE="${DEFAULT_SOURCE_ROOT_JVM}/scala-$SCALA_VERSION.tgz"
wget -O $SCALA_LOCATION_SOURCE --no-clobber $WGET_URL
sudo tar -zxvf ${SCALA_LOCATION_SOURCE} -C ${DEFAULT_INSTALLATION_DEST}
sudo chmod 777 -R ${DEFAULT_INSTALLATION_DEST}/scala-$SCALA_VERSION
cat >> ~/.bash_profile <<'EOF'
###############################
########### SCALA #############
###############################
SCALA_HOME=/usr/local/scala-2.12.3
export SCALA_HOME
export PATH=$PATH:$SCALA_HOME/bin
EOF
echo ""
echo "[info] : reloading $HOME/.bash_profile."
echo ""
source ~/.bash_profile
echo "##################################################"
echo "[info] : scala $SCALA_VERSION is installed successfully."
echo "##################################################"
}
installScala
| true |
825bd29232c1a7803cb9a0e61dc0310abd695a8b | Shell | oxidecomputer/tock | /tools/build-all-docs.sh | UTF-8 | 2,372 | 4.125 | 4 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
# Builds all of the board documentation into doc/rustdoc.
# The list of boards to build documentation for is generated from
# list_boards.sh.
set -e
# Parse a search-index.js file to get the known crates.
function get_known_crates {
FILE=$1
FOUND_CRATES=$(grep -o 'searchIndex\["[a-zA-Z0-9_-]*"\]' $FILE | cut -d'"' -f2)
echo $FOUND_CRATES
}
# Function to add new board.
function add_board {
BOARD=$1
echo "Building docs for $BOARD"
pushd boards/$BOARD > /dev/null
make doc
TARGET=`make -s show-target`
popd > /dev/null
EXISTING_CRATES=$(get_known_crates doc/rustdoc/search-index.js)
BUILT_CRATES=$(get_known_crates boards/$BOARD/target/$TARGET/doc/search-index.js)
# Get any new crates.
NEW_CRATES=" ${BUILT_CRATES[*]} "
for item in ${EXISTING_CRATES[@]}; do
NEW_CRATES=${NEW_CRATES/ ${item} / }
done
# Copy those crates over.
for item in ${NEW_CRATES[@]}; do
cp -r boards/$BOARD/target/$TARGET/doc/$item doc/rustdoc/
# Add the line to the search-index.js file.
grep "searchIndex\[\"$item\"\]" boards/$BOARD/target/$TARGET/doc/search-index.js >> doc/rustdoc/search-index.js
# Then need to move `initSearch(searchIndex);` to the bottom.
#
# Nothing in-place (i.e. `sed -i`) is safely cross-platform, so
# just use a temporary file.
#
# First remove it.
grep -v 'initSearch(searchIndex);' doc/rustdoc/search-index.js > doc/rustdoc/search-index-temp.js
# Then add it again.
echo "initSearch(searchIndex);" >> doc/rustdoc/search-index-temp.js
mv doc/rustdoc/search-index-temp.js doc/rustdoc/search-index.js
done
}
function build_all_docs {
# Need to build one board to get things started.
BOARD=$1
shift
echo "Building docs for $BOARD"
pushd boards/$BOARD > /dev/null
make doc
TARGET=`make -s show-target`
popd > /dev/null
cp -r boards/$BOARD/target/$TARGET/doc doc/rustdoc
## Now can do all the rest.
for BOARD in $*
do
echo "Now building for $BOARD"
add_board $BOARD
done
}
# Delete any old docs
rm -rf doc/rustdoc
# Get a list of all boards
ALL_BOARDS=$(./tools/list_boards.sh)
# Build documentation for all of them
build_all_docs $ALL_BOARDS
# Temporary redirect rule
# https://www.netlify.com/docs/redirects/
cat > doc/rustdoc/_redirects << EOF
# While we don't have a home page :/
/ /kernel 302
EOF
| true |
3fe3bcb886817e1068f08ecdccc0529278344840 | Shell | sgaland/Android-Lab-Devoxx-2013 | /deeper/3-dump_framework.sh | UTF-8 | 712 | 3.21875 | 3 | [] | no_license | #! /bin/bash
# This script retrieve the framework from an Android device and
# decompile it in every format possible
# Verify if api-level parameter is present
if [ $# -eq 0 ]
then
echo "Please invoke this script with api-level argument : [1-17]"
exit $E_NO_ARGS
fi
# Dump framework .odex from device
adb pull /system/framework/ ./odex
# Decompile .odex in .smali
baksmali --api-level $1 --deodex ./odex/framework.odex --bootclasspath-dir ./odex/ --output ./smali/
# Recompile .smali in .dex
mkdir dex
smali --api-level $1 --output ./dex/framework.dex ./smali/
# Decompile .dex in .jar
d2j-dex2jar.sh --output ./jar/framework.jar ./dex/framework.dex
# Display .jar in .java
jd-gui ./jar/framework.jar
| true |
ba7e4ec15ceaf41acbf04e582ccd7665493d9887 | Shell | TREND50/gp_daq | /scripts/run_server.sh | UTF-8 | 981 | 3.484375 | 3 | [] | no_license | #!/bin/sh
SELF_PATH=`realpath $0`
SELF_DIR=`dirname $SELF_PATH`
PROG_DIR=$SELF_DIR/../
if [ $# != 4 ]
then
echo "Usage:" $0 " <slc port> <data port> <dump file> <session name>"
exit
fi
SLC_PORT=$1
DATA_PORT=$2
DUMP_FILE=$3
SESSION_NAME=$4
session_exists=0
if tmux ls 2>/dev/null
then
for i in `tmux ls|awk -F ':' '{print $1}'`
do
if [ $i == $SESSION_NAME ]
then
session_exists=1
#echo "Session exists."
break
fi
done
fi
if [ $session_exists == 0 ]
then
# echo "Starting session."
tmux new -d -s $SESSION_NAME
sleep 0.1
fi
tmux select-pane -t 0
echo "Now starting server."
tmux send-keys "cargo run -q --manifest-path $PROG_DIR/Cargo.toml --bin \
trend_server --release -- -a 0.0.0.0 -s ${SLC_PORT} -d ${DATA_PORT} -m 8888 -t ${DUMP_FILE}.yaml -b ${DUMP_FILE}.bin -c ${DUMP_FILE}_data.yaml -v 1" C-m
#tmux send-keys "$PROG_DIR/trend_server 0.0.0.0 ${SLC_PORT} ${DATA_PORT} 8888 $DUMP_FILE" C-m
#sleep .5 # Needed on laptop
| true |
1817a30800f685200368d67b56cf9e93da59c24c | Shell | bowsersenior/cinfes | /deploy/stop-container.sh | UTF-8 | 410 | 3.90625 | 4 | [] | no_license | #!/bin/bash
if [[ $# -ne 1 ]]
then
echo "usage: $(basename $0) <docker container ID or name>"
echo " e.g. $(basename $0) foo"
exit 1
fi
readonly CONTAINER="$1"
docker stop "$CONTAINER" &> /dev/null
if [ $? -ne 0 ] ; then
echo "No previous container named "$CONTAINER" running"
fi
docker rm "$CONTAINER" &> /dev/null
if [ $? -ne 0 ] ; then
echo "No previous container named "$CONTAINER" found"
fi | true |
dabaf34eba2b9857d4226ce1a7907302d7f65119 | Shell | FX-Misc/stock | /chart/bin/winRate/simu.sh | UTF-8 | 1,968 | 3.34375 | 3 | [] | no_license | #!/bin/sh
stockCode=$1
if [ "$stockCode" = "" ];then
echo "usage:$0 stockCode"
exit 0
fi
calc_simu(){
rikaku=$1
songiri=$2
stockCode=$3
for term in 5 10 15 20;do
echo $term
python calcSimulation.py ascendingTriangle 2 up $rikaku $songiri $term $stockCode
python calcSimulation.py ascendingTriangle 2 down $rikaku $songiri $term $stockCode
python calcSimulation.py pennant 3 up $rikaku $songiri $term $stockCode
python calcSimulation.py pennant 3 down $rikaku $songiri $term $stockCode
python calcSimulation.py descendingTriangle 4 up $rikaku $songiri $term $stockCode
python calcSimulation.py descendingTriangle 4 down $rikaku $songiri $term $stockCode
python calcSimulation.py box 5 up $rikaku $songiri $term $stockCode
python calcSimulation.py box 5 down $rikaku $songiri $term $stockCode
python calcSimulation.py chanelUp 6 up $rikaku $songiri $term $stockCode
python calcSimulation.py chanelUp 6 down $rikaku $songiri $term $stockCode
python calcSimulation.py chanelDown 7 up $rikaku $songiri $term $stockCode
python calcSimulation.py chanelDown 7 down $rikaku $songiri $term $stockCode
python calcSimulation.py ascendingWedge 9 up $rikaku $songiri $term $stockCode
python calcSimulation.py ascendingWedge 9 down $rikaku $songiri $term $stockCode
python calcSimulation.py descendingWedge 10 up $rikaku $songiri $term $stockCode
python calcSimulation.py descendingWedge 10 down $rikaku $songiri $term $stockCode
python calcSimulation.py headAndShoulder 11 up $rikaku $songiri $term $stockCode
python calcSimulation.py headAndShoulder 11 down $rikaku $songiri $term $stockCode
python calcSimulation.py headAndShoulderBottom 12 up $rikaku $songiri $term $stockCode
python calcSimulation.py headAndShoulderBottom 12 down $rikaku $songiri $term $stockCode
done
}
calc_simu 0.05 0.05 $stockCode
calc_simu 0.10 0.10 $stockCode
calc_simu 99999 99999 $stockCode
| true |
e05edde91d38ffc768a73a544d3acac970862bd4 | Shell | Tryweirder/perfmon | /server/bin/ports.sh | UTF-8 | 1,879 | 3.5 | 4 | [] | no_license | cp /etc/resolv.conf "$DESTDIR"/etc/ # fetch doesn't work without this
cp "$SLAVEDIR"/conf/make.conf "$DESTDIR"/etc # make options for chroot install to work
# A WRKDIRPREFIX=/tmp must be present to the chroot's make.conf
# Copy port config options to DESTDIR, so we can batch-install
mkdir -p "$DESTDIR"/var/db/ports
cp -R "$SLAVEDIR"/ports/options/* "$DESTDIR"/var/db/ports
# Add various mounts for the chroot to work
mount -t devfs devfs "$DESTDIR"/dev # Only needed for jails?
mount -t procfs proc "$DESTDIR"/proc # Only needed for jails?
mkdir -p "$DESTDIR"/usr/ports
mount_nullfs "$PORTSDIR" "$DESTDIR"/usr/ports
while read line
do
# portslist.conf entries must be of the form somecategory/someport
# Strip comments and leading/trailing whitespaces
line=`echo $line | sed 's/#.*//; s/^[ \t]*//; s/[ \t]*$//'`
[ "$line" = "" ] && continue # Skip empty lines and comments
if [ -d "$PORTSDIR/$line" ]; then
cd "$PORTSDIR/$line"
log "Installing $line"
portname=`echo $line | sed s,.*/,,` # Strip the directory part of the line
# if ! make install clean DESTDIR="$DESTDIR" > "$LOGDIR/install-$portname-$BUILD_ID.log" 2>&1; then
# DESTDIR is buggy somehow. Use chroot directly instead.
if ! chroot "$DESTDIR" /bin/sh -c "cd /usr/ports/$line; WRKDIRPREFIX=/tmp make BATCH=yes install clean; " \
> "$LOGDIR/install-$portname-$BUILD_ID.log" 2>&1; then
log "Failed to install $line. See $LOGDIR/install-$portname-$BUILD_ID.log for details." 1
skip=1
fi
else
log "$PORTSIR/$line not found." 3
umount "$DESTDIR"/dev
umount "$DESTDIR"/proc
umount "$DESTDIR"/usr/ports
exit 1
fi
done < "$SLAVEDIR"/ports/portlist.conf
# Clean up
# rm -r "$DESTDIR"/tmp/* # make install clean does this?
umount "$DESTDIR"/dev
umount "$DESTDIR"/proc
umount "$DESTDIR"/usr/ports
| true |
c68a64aaf52dac9f599473a47c9ac9c98b26f419 | Shell | danyellw/jenkins-bootstrap-boshrelease | /create-release.sh | UTF-8 | 1,487 | 4.25 | 4 | [] | no_license | #!/bin/bash
usage() {
echo "Usage: $(basename $0) <release type> [tarball output filename]" 1>&2
echo " release type must be 'dev' or 'final'" 1>&2
echo " tarball output filename is optional; defaults to:" 1>&2
echo " jenkins-bootstrap-dev.tgz for dev releases" 1>&2
echo " jenkins-bootstrap-final.tgz for final releases" 1>&2
echo
exit 1
}
get_jenkins_version() {
local war=blobs/jenkins/jenkins.war
local version
if [[ ! -e ${war} ]]; then
echo "Jenkins blob file missing; use 'download-blobs' to download"
exit 1
fi
version=$(unzip -p "${war}" META-INF/MANIFEST.MF | grep "^Jenkins-Version: " | sed -e 's#^Jenkins-Version: ##')
if [[ $? -ne 0 ]]; then
echo "Jenkins blob file damaged; use 'download-blobs -c' to re-download"
exit 1
fi
version=${version%%[[:space:]]}
echo "${version}"
}
main() {
if [[ $# -lt 1 ]]; then
usage "missing arguments"
fi
case ${1} in
dev)
version_suffix="-dev.$(date '+%Y%m%d.%-H%M.%S+%Z')"
create_args="--force"
;;
final)
create_args="--final"
;;
-h|--help)
usage
;;
*)
echo "ERROR: Invalid release type: ${1}"
usage
;;
esac
version="$(get_jenkins_version)${version_suffix}"
filename=${2:-jenkins-bootstrap-${1}.tgz}
create_args="${create_args} --version=${version} --tarball=${filename}"
echo "Creating BOSH release with version: ${version}"
bosh create-release ${create_args}
}
main "$@"
| true |
7c1e3cf68abd54621fda4d7eb4a7b58dbd93c1d6 | Shell | codyn-net/osx | /scripts/bootstrap | UTF-8 | 970 | 2.59375 | 3 | [] | no_license | #!/bin/bash
. $(dirname "$0")/env
if [ ! -f "$brewbuild" ]; then
git clone https://github.com/mxcl/homebrew "$bb"
if [ ! -f "$brew" ]; then
cp -r "$bb" "$b"
fi
fi
INSTALL=install
$brewbuild $INSTALL --universal \
autoconf \
automake \
libtool \
pkg-config \
xz \
gettext
$brewbuild link --force autoconf automake libtool pkg-config gettext
if [ ! -f "$brew" ]; then
git clone https://github.com/mxcl/homebrew "$b"
fi
$brew tap jessevdk/codyn
$brew "$@" $INSTALL --universal \
xz \
autoconf \
automake \
libtool \
glib \
fontconfig \
freetype \
libpng \
python
$brew link --force autoconf automake libtool
$brew "$@" $INSTALL --universal \
jessevdk/codyn/python3
$brew "$@" $INSTALL --universal --without-x --with-glib cairo
$brew "$@" $INSTALL --universal --with-python3 --with-python jessevdk/codyn/pygobject3
$brew "$@" $INSTALL --universal \
jessevdk/codyn/json-glib
#$d/scripts/get-sources
| true |
56bf856f91640a18da5101ee7fdbb0b8fdf7cb9d | Shell | effective-shell/dotfiles | /shell.d/git_pull_request.sh | UTF-8 | 1,558 | 4.1875 | 4 | [
"MIT"
] | permissive | # This function pushes the current branch to 'origin'. If a web address is shown
# in the output, it opens it up. On GitHub, GitLab and BitBucket, this means it
# will open the pull request for you!
# Push the current branch to origin, set upstream, open the PR page if possible.
# Inspired by: https://gist.github.com/tobiasbueschel/ba385f25432c6b75f63f31eb2edf77b5
# How to get the current branch: https://stackoverflow.com/questions/1593051/how-to-programmatically-determine-the-current-checked-out-git-branch
# How to open the browser: https://stackoverflow.com/questions/3124556/clean-way-to-launch-the-web-browser-from-shell-script
gpr() {
# Colour constants for nicer output.
green='\e[0;32m'
reset='\e[0m'
# Get the current branch name, or use 'HEAD' if we cannot get it.
branch=$(git symbolic-ref -q HEAD)
branch=${branch##refs/heads/}
branch=${branch:-HEAD}
# Pushing take a little while, so let the user know we're working.
printf "Opening pull request for ${green}${branch}${reset}...\n"
# Push to origin, grabbing the output but then echoing it back.
push_output=`git push origin -u ${branch} 2>&1`
printf "\n${push_output}\n"
# If there's anything which starts with http, it's a good guess it'll be a
# link to GitHub/GitLab/Whatever. So open the first link found.
link=$(echo ${push_output} | grep -o 'http.*' | head -n1 | sed -e 's/[[:space:]]*$//')
if [ ${link} ]; then
printf "\nOpening: ${green}${link}${reset}...\n"
python -mwebbrowser ${link}
fi
}
| true |
85f58ac6b62e467b282db3403501dece3c1846e2 | Shell | FHead/PhysicsHIJetSDCount | /SDCorrelationStudies/17625_FirstCorrelation/RunJob.sh | UTF-8 | 615 | 2.578125 | 3 | [] | no_license | #!/bin/bash
WorkDir=__WORKDIR__
SampleName=__SAMPLENAME__
SampleFile=__SAMPLE__
OutputTag=__OUTPUTTAG__
InstallDir=$WorkDir/InstallDir/
SamplesDir=$WorkDir/InstallDir/Samples/$SampleName/smallfiles
ResultDir=$WorkDir/Result/
echo "Work dir = $WorkDir"
echo "Sample name = $SampleName"
echo "Sample file = $SampleFile"
echo "Output tag = $OutputTag"
echo "Install dir = $InstallDir"
echo "Sample dir = $SampleDir"
echo "Result dir = $ResultDir"
source $InstallDir/setup.sh
$InstallDir/runFromFile -hard $SamplesDir/$SampleFile -nev 10000
mv JetToyHIResultFromFile.root $ResultDir/Result___OUTPUTTAG__.root
| true |
592a372e0bf2306bf860b9a0dd1e7447491aeee8 | Shell | derrickstolee/git_osx_installer | /assets/uninstall.sh | UTF-8 | 780 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/bin/bash -e
if [ ! -r "/usr/local/git" ]; then
echo "Git doesn't appear to be installed via this installer. Aborting"
exit 1
fi
if [ "$1" != "--yes" ]; then
echo "This will uninstall git by removing /usr/local/git/, and symlinks"
printf "Type 'yes' if you are sure you wish to continue: "
read response
else
response="yes"
fi
if [ "$response" == "yes" ]; then
# remove all of the symlinks we've created
pkgutil --files com.git.pkg | grep bin | while read f; do
if [ -L /usr/local/$f ]; then
sudo rm /usr/local/$f
fi
done
# forget receipts.
pkgutil --packages | grep com.git.pkg | xargs -I {} sudo pkgutil --forget {}
echo "Uninstalled"
# The guts all go here.
sudo rm -rf /usr/local/git/
else
echo "Aborted"
exit 1
fi
exit 0
| true |
e8792adf27eef2b3fa2e76212a70c502056cd3ef | Shell | AlexSonar/prometheus_inegration | /prometheus/installers/install_base_tools.sh | UTF-8 | 4,981 | 3.453125 | 3 | [
"CC0-1.0"
] | permissive | #!/usr/bin/env bash
:'
Automated solution for integrating continuous monitoring systems Prometheus
Ready to go; the example configuration is set up to use Docker-compose
Comprehensive guide available at:
https://alexsonar.github.io/en/continuous-processes/monitoring/prometheus_inegration#top
By default sudo might not installed on Debian adn return the "-bash: sudo: command not found", but you can install it.
First enable su-mode: su -
Copyleft (c) by Alex Sonar 2021
'
function has_yum {
[[ -n "$(command -v yum)" ]]
}
function has_apt_get {
[[ -n "$(command -v apt-get)" ]]
}
function install_dependencies {
# log_info "Installing dependencies"
if $(has_apt_get); then
echo "it is used apt-get"
# apt-get update
# apt-get install sudo wget curl nano mc htop vi open-ssh net-tools iputils-ping -y
# sudo DEBIAN_FRONTEND=noninteractive apt-get install -y awscli curl unzip jq libcap2-bin
apt update
apt install apt-file -y
apt update
apt install sudo wget -y
apt install sudo unzip -y
apt install sudo curl nano vim mc htop net-tools iputils-ping -y
# apt install systemd -y
elif $(has_yum); then
echo "it is use yum"
sudo yum update -y
sudo yum install -y wget nano
sudo yum install -y curl nano vim mc htop net-tools iputils-ping
sudo yum install -y awscli curl unzip jq
else
log_error "Could not find apt-get or yum. Cannot install dependencies on this OS."
exit 1
fi
}
install_dependencies
# apt install sudo wget curl nano mc htop vi open-ssh net-tools iputils-ping -y
cp /etc/prometheus/installers/.bashrc /root
wget -O /etc/prometheus/installers/prometheus.tar.gz https://github.com/prometheus/prometheus/releases/download/v2.26.0/prometheus-2.26.0.linux-amd64.tar.gz
sudo mkdir /etc/prometheus/installers/bin/
sudo mkdir /var/lib/prometheus
[ -d "/usr/lib/systemd" && ! -L "/path/to/dir" ] && echo "Directory /usr/lib/systemd/ exists." || sudo mkdir /usr/lib/systemd/
[ -d "/usr/lib/systemd/system" && ! -L "/path/to/dir" ] && echo "Directory /usr/lib/systemd/system/ exists." || sudo mkdir /usr/lib/systemd/system/
# sudo mkdir /usr/lib/systemd/system/
tar xvfz /etc/prometheus/installers/prometheus*.tar.gz -C /etc/prometheus/installers/bin/
# mv /etc/prometheus/installers/bin/promettheus-* /etc/prometheus/installers/bin/promettheus
for x in /etc/prometheus/installers/bin/*;do mv $x /etc/prometheus/installers/bin/prometheus-files;done
# /etc/prometheus/installers/bin/prometheus-files
# Step Create User
sudo groupadd -f prometheus
sudo useradd -g prometheus --no-create-home --shell /bin/false prometheus
# sudo mkdir /etc/prometheus
# sudo mkdir /var/lib/prometheus
sudo chown prometheus:prometheus /etc/prometheus
sudo chown prometheus:prometheus /var/lib/prometheus
# Step Install Prometheus Libraries
sudo cp -r /etc/prometheus/installers/bin/prometheus-files/consoles /etc/prometheus
sudo cp -r /etc/prometheus/installers/bin/prometheus-files/console_libraries /etc/prometheus
sudo cp /etc/prometheus/installers/bin/prometheus-files/prometheus.yml /etc/prometheus/prometheus.yml
sudo cp /etc/prometheus/installers/bin/prometheus-files/prometheus /usr/local/bin/
sudo cp /etc/prometheus/installers/bin/prometheus-files/promtool /usr/local/bin/
# sudo chown -R prometheus:prometheus /etc/prometheus/consoles
# sudo chown -R prometheus:prometheus /etc/prometheus/console_libraries
# sudo chown prometheus:prometheus /etc/prometheus/prometheus.yml
for i in rules rules.d files_sd; do sudo chown -R prometheus:prometheus /etc/prometheus/${i}; done
for i in rules rules.d files_sd; do sudo chmod -R 775 /etc/prometheus/${i}; done
sudo chown -R prometheus:prometheus /var/lib/prometheus/
# Step 1.6: Setup Service
# sudo vim /usr/lib/systemd/system/prometheus.service
# sudo cp /etc/prometheus/installers/prometheus.service /usr/lib/systemd/system/prometheus.service
sudo tee /etc/systemd/system/prometheus.service<<EOF
[Unit]
Description=Prometheus
Documentation=https://prometheus.io/docs/introduction/overview/
Wants=network-online.target
After=network-online.target
[Service]
Type=simple
User=prometheus
Group=prometheus
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/prometheus \
--config.file=/etc/prometheus/prometheus.yml \
--storage.tsdb.path=/var/lib/prometheus \
--web.console.templates=/etc/prometheus/consoles \
--web.console.libraries=/etc/prometheus/console_libraries \
--web.listen-address=0.0.0.0:9090 \
--web.external-url=
SyslogIdentifier=prometheus
Restart=always
[Install]
WantedBy=multi-user.target
EOF
sudo chmod 664 /usr/lib/systemd/system/prometheus.service
# Step Reload systemd
# sudo systemctl daemon-reload
# sudo systemctl start prometheus
# sudo systemctl status prometheus
service prometheus start
# sudo systemctl enable prometheus.service
# service /usr/lib/systemd/system/prometheus.service start
# . /usr/local/bin/prometheus --config.file=/etc/prometheus/prometheus.yml
| true |
39954e383118dc0ab62e4f6e50f74d21d5b45755 | Shell | Jessicahh7/HiddenHarbor | /herokuUpdate.sh | UTF-8 | 463 | 3.5625 | 4 | [] | no_license | #!/bin/bash
echo "what update would you like to push to heroku?"
read message
echo "You are wanting to commit" /"$message?/"
read boolean
if [ $boolean = yes ]
then
cmd1; git status
cmd2; git add .
cmd3; git commit -m $message
cmd4; git push heroku master
success=`echo $?`
fi
if [success = 0]
echo "You are all set"
if [success > 0]
then
echo "commit unsuccessful"
fi | true |
e75061ce2ef7d7bc965723d7dc5770532ed2f827 | Shell | y010204025/repo | /crawl-tiles/PKGBUILD | UTF-8 | 1,598 | 2.6875 | 3 | [] | no_license | # This is an example PKGBUILD file. Use this as a start to creating your own,
# and remove these comments. For more information, see 'man PKGBUILD'.
# NOTE: Please fill out the license field for your package! If it is unknown,
# then please put 'unknown'.
# Maintainer: Sean Anderson <seanga2@gmail.com>
_srcname=crawl
pkgname=crawl-tiles
pkgver=0.21.1
pkgrel=2
epoch=
pkgdesc="Dungeon Crawl Stone Soup with graphical tiles and sound support"
arch=('i686' 'x86_64')
url="https://crawl.develz.org/"
license=('GPL')
depends=(
'sdl2_image'
'sdl2_mixer'
'freetype2'
'lua51'
'sqlite'
'glu'
'ttf-dejavu'
)
makedepends=('advancecomp')
checkdepends=()
optdepends=()
provides=('crawl')
conflicts=('crawl')
backup=()
options=()
source=("https://github.com/crawl/$_srcname/archive/$pkgver.tar.gz")
md5sums=('634808232f0811c7f16594a8c35d8b72')
prepare() {
cd "$_srcname-$pkgver/crawl-ref/source"
echo $_makeflags
echo $pkgver > util/release_ver
}
build() {
cd "$_srcname-$pkgver/crawl-ref/source"
make \
prefix=/usr \
bin_prefix=bin \
DESTDIR=$pkgdir \
SAVEDIR='~/.crawl' \
LUA_PACKAGE=lua51 \
TILES=y \
SOUND=y
}
# Tests cannot be run without a debug build.
# To enable them, add the debug target to build()
#check() {
# cd "$_srcname-$pkgver/crawl-ref/source"
# make -k test \
# prefix=/usr \
# bin_prefix=bin \
# DESTDIR=$pkgdir \
# SAVEDIR='~/.crawl' \
# LUA_PACKAGE=lua51 \
# TILES=y
#}
package() {
cd "$_srcname-$pkgver/crawl-ref/source"
make install \
prefix=/usr \
bin_prefix=bin \
DESTDIR=$pkgdir \
SAVEDIR='~/.crawl' \
LUA_PACKAGE=lua51 \
TILES=y \
SOUND=y
}
| true |
ea28622d99a340a431f95218d61eafcb73b2bde2 | Shell | guptarah/bi_addiction_scripts | /get_unique_codes | UTF-8 | 198 | 2.6875 | 3 | [] | no_license | #! /bin/bash
# script to get the unique codes over all the annotations
annotations_dir=$1 # give the name of the directory given by BO
cat $annotations_dir/*JD* | cut -f6 | sort -u > MISC_codes
| true |
fa3504437eb8d3c410d7fbedc2b3624ecc0ae6e7 | Shell | sasmitaA/sagdevops-templates | /templates/sag-um-server/entrypoint.sh | UTF-8 | 1,950 | 3.0625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/sh
$SAG_HOME/register.sh
UM_INSTANCE_NAME=${UM_INSTANCE_NAME:-default}
UM_REALM=${UM_REALM:-default}
UM_INSTANCE_PORT=${UM_INSTANCE_PORT:-9000}
# Define all of the usual realm server options, except:
# The realm name which is parameterised from the environment
# Logging which goes to stdout in order to be captured by Docker's logging system
# SERVER_OPTS_EXTRA from the environment
SERVER_OPTS="-DDATADIR=server/$UM_INSTANCE_NAME/data
-DREALM=$UM_REALM
-DSERVERDIR=server/$UM_INSTANCE_NAME
-DADAPTER_0=nhp://0.0.0.0:$UM_INSTANCE_PORT
-DLICENCE_DIR=server/license/
-DLICENCE_FILE=licence.xml
-DLOGFILE=/dev/stdout
-Djavax.net.ssl.trustStore=server/$UM_INSTANCE_NAME/bin/nirvanacacerts.jks
-Djavax.net.ssl.trustStorePassword=nirvana
-Djavax.net.ssl.keyStore=server/$UM_INSTANCE_NAME/bin/server.jks
-Djavax.net.ssl.keyStorePassword=nirvana
-Djava.protocol.handler.pkgs=com.sun.net.ssl.internal.www.protocol
-Dcom.sun.management.jmxremote
-Djava.library.path=lib/
-DLOGLEVEL=4
-XX:MaxDirectMemorySize=1G
$SERVER_OPTS_EXTRA"
# The first time the server is run, the data will be a total blank slate. We can live with that, except we want to restore the default *@*
# full ACL.
if [[ ! -e server/$UM_INSTANCE_NAME/data ]]
then
mkdir server/$UM_INSTANCE_NAME/bin
echo '*@*' > server/$UM_INSTANCE_NAME/bin/secfile.conf
SERVER_OPTS="$SERVER_OPTS -DSECURITYFILE=server/$UM_INSTANCE_NAME/bin/secfile.conf"
fi
####
# Starting the realm server in the container in foreground
####
java $SERVER_OPTS com.pcbsys.nirvana.server.Server $@ &
# Ensure that 'docker stop' performs a clean server shutdown
export SERVER_PID=$!
trap "rm server/$UM_INSTANCE_NAME/data/RealmServer.lck; wait $SERVER_PID" SIGTERM
wait $SERVER_PID
| true |
bf1741f930f8ffd1f60ada3168a82c53f38f0467 | Shell | rapel/laurentvanacker.com | /Powershell/Remoting over SSH/2 - From Ubuntu To Windows/1 - Ubuntu/Configure-SSH.sh | UTF-8 | 1,102 | 3.03125 | 3 | [] | no_license | PrivateSSHRSAKey=~/.ssh/id_rsa
PublicSSHRSAKey=${PrivateSSHRSAKey}.pub
WindowsUser=CONTOSO\\administrator
WindowsServer=win10.mshome.net
LinuxUser=whoami
Passphrase=""
#Dedicated authorized file per user
AuthorizedKeys=.ssh/authorized_keys
#shared authorized file for administrators
#AuthorizedKeys="%ProgramData%\ssh\administrators_authorized_keys"
sudo apt install xclip -y
rm $PublicSSHRSAKey, $PrivateSSHRSAKey -f
ssh-keygen -f $PrivateSSHRSAKey -t rsa -q -N "$Passphrase"
#For testing the SSH connection
#ssh -o StrictHostKeyChecking=no $WindowsUser@$WindowsServer
scp -o StrictHostKeyChecking=no $PublicSSHRSAKey $WindowsUser@$WindowsServer:${LinuxUser}_rsa.pub
ssh -o StrictHostKeyChecking=no $WindowsUser@$WindowsServer "type ${LinuxUser}_rsa.pub >> $AuthorizedKeys && net stop sshd && net start sshd && del ${LinuxUser}_rsa.pub"
#Copy the line into the clipboard and just paste it in a PowerShell Core host. It should work like a charm :)
echo "Invoke-Command -ScriptBlock { \"Hello from \$(hostname)\" } -UserName $WindowsUser -HostName $WindowsServer" | xclip -selection clipboard
pwsh | true |
d42e56833209dcce14f39bb01e9aed4f1e964288 | Shell | tudasc/MetaCG | /cgcollector/test/run_format_two_test.sh | UTF-8 | 3,345 | 3.640625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
. ./testBase.sh
#if [ command -v $testerExe ]; then
if [[ $(type -P $testerExe) ]]; then
echo "The CGSimpleTester binary (cgsimpletester) could not be found in path, testing with relative path."
fi
stat ../../${build_dir}/cgcollector/test/mcgtester >>log/testrun.log 2>&1
if [ $? -eq 1 ]; then
echo "The file seems also non-present in ../${build_dir}/test. Aborting test. Failure! Please build the tester first."
exit 1
else
testerExe=../../${build_dir}/cgcollector/test/mcgtester
fi
if [[ $(type -P $cgcollectorExe) ]]; then
echo "No cgcollector in PATH. Trying relative path ../${build_dir}/tools"
fi
stat ../../${build_dir}/cgcollector/tools/cgcollector >>log/testrun.log 2>&1
if [ $? -eq 1 ]; then
echo "The file seems also non-present in ../${build_dir}/tools. Aborting test. Failure! Please build the collector first."
exit 1
else
cgcollectorExe=../../${build_dir}/cgcollector/tools/cgcollector
fi
if [[ $(type -P $cgmergeExe) ]]; then
echo "No cgcollector in PATH. Trying relative path ../${build_dir}/tools"
fi
stat ../../${build_dir}/cgcollector/tools/cgmerge >>log/testrun.log 2>&1
if [ $? -eq 1 ]; then
echo "The file seems also non-present in ../${build_dir}/tools. Aborting test. Failure! Please build the collector first."
exit 1
else
cgmergeExe=../../${build_dir}/cgcollector/tools/cgmerge
fi
# Multi-file tests
multiTests=(0042 0043 0044 0050 0053 0060)
fails=0
# Single File
echo " --- Running single file tests [file format version 1.0]---"
echo " --- Running basic tests ---"
testGlob="./input/singleTU/*.cpp"
for tc in ${testGlob}; do
echo "Running test ${tc}"
applyFileFormatTwoToSingleTU ${tc}
fail=$?
fails=$((fails + fail))
done
echo "Single file test failures: $fails"
# Single File and full Ctor/Dtor coverage
echo -e "\n --- Running single file full ctor/dtor tests ---"
testGlob="./input/allCtorDtor/*.cpp"
for tc in ${testGlob}; do
echo "Running test ${tc}"
applyFileFormatTwoToSingleTU ${tc} "--capture-ctors-dtors"
fail=$?
fails=$((fails + fail))
done
echo "Single file test failures: $fails"
# Single File and functionPointers
echo -e "\n --- Running single file functionPointers tests ---"
testGlob="./input/functionPointers/*.cpp"
for tc in ${testGlob}; do
echo "Running test ${tc}"
applyFileFormatTwoToSingleTU ${tc}
fail=$?
fails=$((fails + fail))
done
echo "Single file test failures: $fails"
# Single File metaCollectors
echo -e "\n --- Running single file metaCollectors tests ---"
testGlob="./input/metaCollectors/numStatements/*.cpp"
for tc in ${testGlob}; do
echo "Running test ${tc}"
applyFileFormatTwoToSingleTU ${tc}
fail=$?
fails=$((fails + fail))
done
echo "Single file test failures: $fails"
# Single File virtualCalls
echo -e "\n --- Running single file virtualCalls tests ---"
testGlob="./input/virtualCalls/*.cpp"
for tc in ${testGlob}; do
echo "Running test ${tc}"
applyFileFormatTwoToSingleTU ${tc}
fail=$?
fails=$((fails + fail))
done
echo "Single file test failures: $fails"
# Multi File
echo -e "\n --- Running multi file tests ---"
for tc in "${multiTests[@]}"; do
echo "Running test ${tc}"
# Input files
applyFileFormatTwoToMultiTU ${tc}
fail=$?
fails=$((fails + fail))
done
echo "Multi file test failuers: $fails"
echo -e "$fails failures occured when running tests"
exit $fails
| true |
11d78b2e2f834d804f398d2e06d180b24311237f | Shell | d-cameron/scripts | /sbp/check_shallow_runs_status | UTF-8 | 1,064 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
YYMMDD=$(date +'%y%m%d')
archive_path="/data/data_archive/shallow_seq_pipelines"
qcfail_path="/data/schuberg/qc_fail_runs"
## query and store
echo "## ShallowSeq Check (${YYMMDD})"
query_sbp_api -type runs -filter 'ini=ShallowSeq.ini' -json | jq -r '.[] | "\(.name) \(.status)"' | while read line; do
run_name=$(echo "${line}" | cut -d" " -f1)
run_status=$(echo "${line}" | cut -d" " -f2)
if [[ "${run_name}" =~ "190314-testR" ]]; then continue
elif [[ "${run_name}" =~ "_HMFregVAL_" ]]; then continue
elif [[ -d "${archive_path}/${run_name}" ]]; then continue
elif [[ "${run_status}" =~ ^(Waiting|Pending|Processing)$ ]]; then echo "## PROCESSING: ${run_name} (status=${run_status})"
elif [[ "${run_status}" =~ Failed ]]; then echo "## FAILED: ${run_name} (status=${run_status})"
elif [[ -d "${qcfail_path}/${run_name}" ]]; then echo "## DOWNLOADED: ${run_name} (status=${run_status}, location=${qcfail_path})"
else echo " process_shallow_run -s ${run_name} # (status=${run_status})"
fi
done | sort
| true |
15be70be819bab6b8aefcecbe6ad200ac8ee7ef1 | Shell | bayugn/premvps | /menur.sh | UTF-8 | 1,498 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# Debian 9 & 10 64bit
# Ubuntu 18.04 & 20.04 bit
# Centos 7 & 8 64bit
# By GilaGajet
# ==================================================
red='\e[1;31m'
green='\e[0;32m'
NC='\e[0m'
MYIP=$(wget -qO- ifconfig.co);
echo "Checking VPS"
IZIN=$( curl https://raw.githubusercontent.com/gilagajet/premvps/main/ipvps.conf | grep $MYIP )
if [ $MYIP = $IZIN ]; then
echo -e "${green}Permission Accepted...${NC}"
else
echo -e "${red}Permission Denied!${NC}";
echo "Only For Premium Users"
exit 0
fi
clear
#!/bin/bash
cat /usr/bin/bannermenu | lolcat
echo -e " ===================================================="| lolcat
echo -e " "| lolcat
echo -e " AutoScript By gilagajet.com" | lolcat
echo -e " "| lolcat
echo -e " [1] Vmess/Vless/Trojan" | lolcat
echo -e " [2] Shadowsocks/ShadowsocksR" | lolcat
echo -e " [3] Wireguard" | lolcat
echo -e " [4] Update Script" | lolcat
echo -e " [x] Exit" | lolcat
echo -e " "| lolcat
read -p " Select From Options [1-4 or x] : " menu
echo -e " "| lolcat
echo -e "[*][*][*]======================================[*][*][*]" | lolcat
clear
case $menu in
1)
vvt
;;
2)
shadowsocks
;;
3)
wireguard
;;
4)
update
;;
x)
clear
exit
echo "Please enter an correct number"
;;
esac
| true |
0aa0ca233818150c697e2290b1cb915379e20bf7 | Shell | azuwis/debian-repo | /hooks/A10append-distname-to-debian-version | UTF-8 | 280 | 2.546875 | 3 | [] | no_license | #!/bin/bash
#apt-get install -y "${APTGETOPT[@]}" lsb-release devscripts
codename=$(lsb_release --short --codename)
cd /tmp/buildd/*/debian/..
debchange --append --maintmaint --distribution "${codename}" "Build against ${codename}"
sed -i '1s/)/~'${codename}')/' debian/changelog
| true |
8f5e7b9bc01d21f20ea5e4c438bcbcc50f17ecbd | Shell | moodlehq/moodle-local_amos | /jobs/run | UTF-8 | 430 | 3.765625 | 4 | [] | no_license | #!/bin/bash -e
# This is a wrapper to run AMOS jobs from Jenkins. Jenkins is supposed to provide environment variable
# AMOSJOBSROOT with the full path to the folder containing the job scripts.
if [[ -z $1 ]]; then
echo "Usage: $0 [jobname]" >&2
exit 1;
fi
JOBPATH=${AMOSJOBSROOT}/$1
if [[ ! -x $JOBPATH ]]; then
echo "Job $1 not found or not executable" >&2
exit 2;
fi
sudo -E -H -u ${AMOSRUNAS} $JOBPATH "${@:2}"
| true |
7730d34a795dd965e8dfd909681e28c08992f011 | Shell | djw8605/condor_log_analyze | /CondorAnalyze/makereport.sh | UTF-8 | 480 | 3.21875 | 3 | [] | no_license | #!/bin/sh
/usr/bin/python ParseLog.py -l $1 > parse.txt
if [ $? -ne 0 ]
then
exit
fi
cp report-template.tex report.tex
#echo "\\begin{center} \\textbf{Percentage = $3\\%} \\end{center}" >> report.tex
cat parse.txt >> report.tex
#echo "\end{verbatim} \end{document}" >> report.tex
echo "\end{document}" >> report.tex
pdflatex report.tex
pdflatex report.tex
if [ $# -gt "1" ]
then
echo "Report for $1" | mutt -a report.pdf -a SubHist.png -a sites.png -s "Report" $2
fi
| true |
6621e9b54fd2e1ad5c91e333ec1b9519cb2ae81f | Shell | arpruss/arpruss-apv | /pdfview/scripts/build-native.sh | UTF-8 | 2,006 | 3.296875 | 3 | [] | no_license | #!/bin/sh
SCRIPTDIR=`dirname $0`
. $SCRIPTDIR/android-gcc-setup.sh
cd $SCRIPTDIR/..
if [ ! -d "deps" ]
then
mkdir -p deps
fi
cd deps
DEPSDIR=$PWD
JNIDIR=$DEPSDIR/../jni
if [ ! -d "$JNIDIR/pdfview2/lib" ]
then
mkdir -p $JNIDIR/pdfview2/lib
fi
if [ ! -d "$JNIDIR/pdfview2/include" ]
then
mkdir -p $JNIDIR/pdfview2/include
fi
cd $DEPSDIR
MUPDFSRC="mupdf-latest.tar.gz"
FTSRC="freetype-2.3.11.tar.bz2"
JPEGSRC="jpegsrc.v8a.tar.gz"
MUPDF=$DEPSDIR/mupdf
FT=$DEPSDIR/freetype-2.3.11
JPEG=$DEPSDIR/jpeg-8a
echo "Downloading sources."
if [ ! -e "$MUPDFSRC" ]
then
echo "Downloading mupdf..."
wget http://ccxvii.net/mupdf/download/$MUPDFSRC -O $MUPDFSRC
fi
if [ ! -e "$FTSRC" ]
then
echo "Downloading freetype..."
wget http://mirror.lihnidos.org/GNU/savannah/freetype/$FTSRC -O $FTSRC
fi
if [ ! -e "$JPEGSRC" ]
then
echo "Downloading jpeg..."
wget http://www.ijg.org/files/$JPEGSRC -O $JPEGSRC
fi
rm -rf $MUPDF $FT $JPEG
tar -xzvf $MUPDFSRC
tar -xjvf $FTSRC
tar -xzvf $JPEGSRC
cd $FT
sed -i -e '/^FT_COMPILE/s/\$(ANSIFLAGS) //' builds/freetype.mk
./configure --prefix="$FT/install" --host=arm-linux --disable-shared --enable-static
make
make install
\cp install/lib/libfreetype.a $JNIDIR/pdfview2/lib
\cp -rf include/* $JNIDIR/pdfview2/include
cd $JPEG
\cp *.c $JNIDIR/jpeg
\cp *.h $JNIDIR/jpeg
\cp jconfig.txt $JNIDIR/jpeg/jconfig.h
unset CFLAGS
unset CC
unset CPPFLAGS
unset LDFLAGS
cd $MUPDF
\cp mupdf/*.c $JNIDIR/mupdf/mupdf
\cp mupdf/*.h $JNIDIR/mupdf/mupdf
\cp fitz/*.c $JNIDIR/mupdf/fitz
\cp fitz/*.h $JNIDIR/mupdf/fitz
\cp fitzdraw/*.c $JNIDIR/mupdf/fitzdraw
\cp -rf apps $JNIDIR/mupdf
\cp -rf cmaps $JNIDIR/mupdf
\cp -rf fonts $JNIDIR/mupdf
cd $JNIDIR/mupdf/mupdf
make -f APV.mk font_files
cd $DEPSDIR/..
if [ ! -d "$NDK/apps/pdfview" ]
then
mkdir -p $NDK/apps/pdfview
fi
\cp -rf ndk-app/Application.mk $NDK/apps/pdfview/Application.mk
sed -i -e "/APP_PROJECT_PATH/s|\/cygdrive.*|$PWD|g" $NDK/apps/pdfview/Application.mk
cd $NDK
make APP=pdfview
| true |
f621f3758386a41c8add055cbca8ab385da46941 | Shell | AldaronLau/jl_lib | /compile-scripts/jl_android_.sh | UTF-8 | 2,970 | 3 | 3 | [] | no_license | #!/bin/sh
#printf "[JL/ANDR] exporting your program to android....\n"
#sh androidbuild.sh com.company.app src/*.c
#printf "[J/ANDR] exported your program to android!\n"
args=("$@")
DEBUG=${args[0]}
JLL_PATH=${args[1]}
PROGNAME=${args[2]}
PACKNAME=${args[3]}
USERNAME=${args[4]}
USERPROG=${args[5]}
NDK_PATH=${args[6]}
SDK_PATH=${args[7]}
ANDROID_PROJECT=${args[8]}
ANDROID_HOME=${args[9]}
IS_DEBUG=${args[10]}
export ANDROID_HOME
printf "JLL_PATH = $JLL_PATH\n"
printf "DEBUG = $DEBUG\n"
printf "PROGNAME = $PROGNAME\n"
printf "PACKNAME = $PACKNAME\n"
printf "USERNAME = $USERNAME\n"
printf "USERPROG = $USERPROG\n"
printf "NDK_PATH = $NDK_PATH\n"
printf "SDK_PATH = $SDK_PATH\n"
printf "ANDROID_PROJECT = $ANDROID_PROJECT\n"
printf "ANDROID_HOME = $ANDROID_HOME\n"
cp $JLL_PATH/android-src/AndroidManifest.xml \
$ANDROID_PROJECT/AndroidManifest.xml
rm -f $ANDROID_PROJECT/res/values/strings.xml
cp $JLL_PATH/android-src/strings.xml \
$ANDROID_PROJECT/res/values/strings.xml
cp $JLL_PATH/android-src/build.xml \
$ANDROID_PROJECT/build.xml
sed -i "s|JLR_USERNAME|$USERNAME|g" \
$ANDROID_PROJECT/build.xml
sed -i "s|JLR_APPNAME|$PACKNAME|g" \
$ANDROID_PROJECT/build.xml
sed -i "s|JLR_APPNAME|$PROGNAME|g" \
$ANDROID_PROJECT/res/values/strings.xml
sed -i "s|JLR_USERNAME|$USERNAME|g" \
$ANDROID_PROJECT/AndroidManifest.xml
sed -i "s|JLR_APPNAME|$PACKNAME|g" \
$ANDROID_PROJECT/AndroidManifest.xml
sed -i "s|JLR_IS_DEBUG|$IS_DEBUG|g" \
$ANDROID_PROJECT/AndroidManifest.xml
rm -f -r $ANDROID_PROJECT/src/jlw/
mkdir -p $ANDROID_PROJECT/src/jlw/$USERNAME/$PACKNAME/
cp \
$JLL_PATH/android-src/jl_Activity.java \
$ANDROID_PROJECT/src/jlw/$USERNAME/\
$PACKNAME/jl_Activity.java
sed -i "s|JLR_USERNAME|$USERNAME|g" \
$ANDROID_PROJECT/src/jlw/$USERNAME/\
$PACKNAME/jl_Activity.java
sed -i "s|JLR_APPNAME|$PACKNAME|g" \
$ANDROID_PROJECT/src/jlw/$USERNAME/\
$PACKNAME/jl_Activity.java
printf "[JL/ANDR] setting up files....\n"
if [ ! -e build/android-release-key.keystore ];then
printf "[JL/ANDR] Jarsigner key not found. For android you must create\n"
printf "[JL/ANDR] a key. Create your key for jarsigner:\n"
keytool -sigalg SHA1withRSA -keyalg RSA -keysize 1024 -genkey -keystore build/android-release-key.keystore -alias daliasle -validity 3650
fi
rm -f -r $ANDROID_PROJECT/jni/src/C/gen/src/
mkdir $ANDROID_PROJECT/jni/src/C/gen/src/
cp --recursive -t $ANDROID_PROJECT/jni/src/C/gen/src/ `find src/*`
cp build/android-release-key.keystore $ANDROID_PROJECT/
cp media/icon.png $ANDROID_PROJECT/res/drawable/prgm_icon.png
sudo $SDK_PATH/platform-tools/adb kill-server
printf "[JL/ANDR] compiling....\n"
cd $ANDROID_PROJECT
export NDK_MODULE_PATH="$PWD"/jni
echo $NDK_MODULE_PATH
$NDK_PATH/ndk-build $DEBUG && ant clean release
printf "[JL/ANDR] signing jar with jarsigner....\n"
jarsigner -verbose -tsa http://timestamp.digicert.com -sigalg SHA1withRSA -digestalg SHA1 -keystore android-release-key.keystore bin/jlw.$USERNAME.$PACKNAME-release-unsigned.apk daliasle
| true |
674a29c9066f72ebf8c99f02a455f6d424e99c93 | Shell | macintacos/dotfiles | /setup/tools/kube | UTF-8 | 1,064 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env bash
export PATH="$PWD/setup:$PATH"
log info "Setting up kubernetes things..."
log info "Kubectl first..."
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/arm64/kubectl"
chmod +x kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
log info "Krew next (https://krew.sigs.k8s.io/docs/user-guide/setup/install/)"
(
set -x
cd "$(mktemp -d)" &&
OS="$(uname | tr '[:upper:]' '[:lower:]')" &&
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" &&
KREW="krew-${OS}_${ARCH}" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" &&
tar zxvf "${KREW}.tar.gz" &&
./"${KREW}" install krew
)
export PATH="$HOME/.krew/bin:$PATH"
log info "Now installing some plugins..."
kubectl krew install ctx
kubectl krew install ns
log info "Now installing minikube..."
curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-arm64
sudo install minikube-darwin-arm64 /usr/local/bin/minikube
| true |
631a85ca9556c3f37dabdc647bdfb91d11f70c99 | Shell | stevegrunwell/dotfiles | /install.sh | UTF-8 | 5,067 | 4.375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env zsh
#
# Install the dotfiles into a user's home directory.
#
# USAGE
#
# install.sh [-d|--debug]
#
# OPTIONS
#
# -d, --debug Display debug information.
# -h, --help Show the help screen.
# -u, --unattended Do not attempt steps that require user interaction.
set -e
# The directory this script lives in.
dotfiles_dir=$0:a:h
# The current script name.
script_name=$0:a:t
# Set up colors.
color_cyan="\033[0;36m"
color_green="\033[0;32m"
color_red="\033[0;31m"
color_reset="\033[0;0m"
color_yellow="\033[0;33m"
bold="$(tput bold)"
nobold="$(tput sgr0)"
# Options.
debug_mode=0
unattended=0
# Print the usage instructions.
function print_usage {
cat <<EOT
Install the dotfiles into a user's home directory.
${bold}USAGE${nobold}
${script_name} [-d|--debug] [-u|--unattended]
${bold}OPTIONS${nobold}
-d, --debug Display debug information.
-h, --help Show this help screen.
-u, --unattended Do not attempt steps that require user interaction.
EOT
}
# Symlink a local file into the user's home directory, making a backup if the original
# is a regular file.
#
# Usage:
# safe-symlink <source_file> <target_file>
#
safe-symlink() {
source_file=${1:?Invalid source file}
target_file=${2:?Invalid target file}
# If a real file exists, back it up with the current timestamp.
if [[ -f ~/${target_file} && ! -h ~/${target_file} ]]; then
backup_file="${target_file}-backup-$(date +"%F_%T")"
debug "Backing up $HOME/${target_file} to $HOME/${backup_file}"
mv "$HOME/${target_file}" "$HOME/${backup_file}"
fi
# Symlink our version into place.
debug "Symlinking $HOME/${target_file} => ${dotfiles_dir}/${source_file}"
ln -sf "${dotfiles_dir}/${source_file}" "$HOME/${target_file}"
}
# Output helpers
debug() {
if [[ $debug_mode -eq 1 ]]; then
printf "${color_cyan}DEBUG: %s${color_reset}\n" "$1"
fi
}
error() {
printf "${color_red}[ERROR]${color_reset} %s\n" "$1"
}
step() {
printf "${bold}${color_cyan}‣${color_reset} %s${nobold}\n" "$1"
}
warn() {
printf "${color_yellow}[WARNING]${color_reset} %s\n" "$1"
}
# Parse arguments
while [ $# -gt 0 ]; do
case "$1" in
-d|--debug)
debug_mode=1
shift
;;
-h|--help)
print_usage
exit 0
;;
-u|--unattended)
unattended=1
shift
;;
*)
shift
;;
esac
done
# Create an empty .config directory in the home directory.
mkdir -p ~/.config/git
# Install Oh My Zsh (if not already present)
if [ ! -d ~/.oh-my-zsh ]; then
step 'Installing Oh My Zsh'
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)" "" --unattended
fi
# Replace ~/.zshrc with the version from this repository
safe-symlink oh-my-zsh/.zshrc .zshrc
# Clone external repos.
step 'Downloading third-party Oh My Zsh plugins'
if [ ! -d oh-my-zsh/custom/plugins/zsh-nvm ]; then
debug 'https://github.com/lukechilds/zsh-nvm'
git clone https://github.com/lukechilds/zsh-nvm "${dotfiles_dir}/oh-my-zsh/custom/plugins/zsh-nvm"
fi
# Prevent login messages by adding an empty .hushlogin file to the user's home directory.
debug 'Creating an empty ~/.hushlogin file'
touch ~/.hushlogin
# Custom git configuration.
step 'Applying Git configuration'
safe-symlink git/.gitconfig .gitconfig
safe-symlink git/ignore .config/git/ignore
# Composer configuration.
step 'Preparing Composer'
mkdir -p ~/.composer
safe-symlink composer/config.json .composer/config.json
# Node configuration.
step 'Preparing NodeJS + npm'
safe-symlink npm/.npmrc .npmrc
# RubyGems configuration.
step 'Preparing RubyGems'
safe-symlink ruby/.gemrc .gemrc
# App preferences
step 'Applying application preferences'
safe-symlink "Preferences/Code/settings.json" "Library/Application Support/Code/User/settings.json"
# iTerm2 preferences: http://stratus3d.com/blog/2015/02/28/sync-iterm2-profile-with-dotfiles-repository/
debug "Configuring iTerm2 to read preferences from ${dotfiles_dir}/Preferences/iTerm2"
if defaults write com.googlecode.iterm2.plist PrefsCustomFolder -string "${dotfiles_dir}/Preferences/iTerm2" &> /dev/null; then
defaults write com.googlecode.iterm2.plist LoadPrefsFromCustomFolder -bool true
else
warn 'Unable to update iTerm preferences'
fi
# Custom sudo configuration
if [[ ! -f /etc/sudoers.d/vagrant_hostsupdater ]]; then
if [[ unattended -eq 0 ]]; then
step 'Enabling password-less use of vagrant-hostsupdater'
sudo cp -i "${dotfiles_dir}/etc/sudoers.d/vagrant_hostsupdater" /etc/sudoers.d/vagrant_hostsupdater \
|| error 'Unable to copy to /etc/sudoers.d/vagrant_hostsupdater'
else
warn 'Skipping vagrant-hostsupdater config due to --unattended option'
fi
else
debug 'Skipping vagrant-hostsupdater, file exists'
fi
printf "\n${color_green}%s${color_reset}\n" 'Dotfiles have been installed successfully!'
| true |
3b1258f814e036d4d8bdaa57e6510c4da35f321a | Shell | sergmiller/acos_archive | /archive/task_memory/:w | UTF-8 | 774 | 2.78125 | 3 | [] | no_license | #!/bin/bash
#start step test:
declare -i count=4
declare -a list_opt
list_opt=("a_O0" "a_O1" "a_O2" "a_O3" "a_os")
while [ $count -ne 5 ]
do
declare -i array_size=100000000
declare -i step=1000000
echo "size step gallop buff_val" >> resalts_step_test_${list_opt[$count]}.csv
while [ $step -lt 50000000 ]
do
echo "run step test with size: $array_size, step: $step and optimization version: ${list_opt[$count]}"
./${list_opt[$count]} $array_size $step resalts_step_test_${list_opt[$count]}.csv 6 #<-- gallop key used
let "step = step + 1000000"
done
./cg_p resalts_step_test_${list_opt[$count]}.csv #программа для замены всех . на , в файле выходных данных
let "count = count + 1"
done
exit 0
#end step test
| true |
64f2d52be4f0ba30ac47ef600d7666f1141d7dba | Shell | CalebRHicks/DMCBS_Project | /supercomputer_scripts/p-wave.sb | UTF-8 | 1,388 | 2.59375 | 3 | [] | no_license | #!/bin/bash
#SBATCH --time=0-03:59:00
#SBATCH --mem=40G
#SBATCH --nodes=1
#SBATCH --ntasks=28
#SBATCH --cpus-per-task=1
#SBATCH --exclusive
#SBATCH --tasks-per-node=28
cd ${SLURM_SUBMIT_DIR}
mom=('0 0 0' '0 0 1' '0 1 0' '1 0 0' '-1 0 0' '0 -1 0' '0 0 -1' '1 1 0' '1 -1 0' '-1 1 0' '-1 -1 0' '1 0 1' '1 0 -1' '-1 0 1' '-1 0 -1' '0 1 1')
for unpaired in {1..16}
do
num=$unpaired
pairs=0
nup=$num
sed -i "s/.*number of particles.*/$num\t# number of particles/" input.dat
sed -i "s/.*number and mass of up atoms.*/$nup\t1.0\t# number and mass of up atoms/" input.dat
sed -i "s/.*number and mass of down atoms.*/0\t1.0\t# number and mass of down atoms/" input.dat
sed -i "s/.*number of up states.*/$unpaired\t# number of up states/" input.dat
sed -i "/unpaired momentum/d" input.dat
for i in $(eval echo "{0..$(( unpaired - 1 ))}")
do
sed -i "/number of up states/a${mom[i]}\t # unpaired momentum" input.dat
done
sed -i "s/.*use lattice sites if true.*/.true.\t# use lattice sites if true/" input.dat
sed -i "s/.*type of run.*/0\t# type of run -1=opt, 0=vmc, 1=dmc, local w/" input.dat
mpirun -n 28 ./penmp < input.dat > /dev/null
sed -i "s/.*type of run.*/1\t# type of run -1=opt, 0=vmc, 1=dmc, local w/" input.dat
sed -i "s/.*use lattice sites if true.*/.false.\t# use lattice sites if true/" input.dat
mpirun -n 28 ./penmp < input.dat > ./outputDoubleGauss/'outN'$num.out
done
| true |
68d7d7e62378bd01dcf5866298734234e1fafea1 | Shell | xyongcn/ION | /config_files/ipv6-9nodes/auto_step1.sh | WINDOWS-1252 | 9,635 | 2.65625 | 3 | [] | no_license | #!/bin/sh
NODE1_IP="2000::1"
NODE2_IP="2000::2"
NODE3_IP="2000::3"
NODE4_IP="2000::4"
NODE5_IP="2000::5"
NODE6_IP="2000::6"
NODE7_IP="2000::7"
NODE8_IP="2000::8"
NODE9_IP="2000::9"
BPCONFIG="bpconfig.conf"
CONFIG1="1.txt"
CONFIG2="2.txt"
CONFIG3="3.txt"
CONFIG4="4.txt"
CONFIG5="5.txt"
CONFIG6="6.txt"
CONFIG7="7.txt"
CONFIG8="8.txt"
CONFIG9="9.txt"
USERNAME="wjbang"
CONFIG_LOC=/home/${USERNAME}/ipv6-9nodes
PASSWORD_FILE="${CONFIG_LOC}/password"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE1_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE2_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE3_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE4_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE5_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE6_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE7_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE8_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE9_IP} "mkdir ${CONFIG_LOC}"
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG1} ${USERNAME}@[${NODE1_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG2} ${USERNAME}@[${NODE2_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG3} ${USERNAME}@[${NODE3_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG4} ${USERNAME}@[${NODE4_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG5} ${USERNAME}@[${NODE5_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG6} ${USERNAME}@[${NODE6_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG7} ${USERNAME}@[${NODE7_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG8} ${USERNAME}@[${NODE8_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${CONFIG9} ${USERNAME}@[${NODE9_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE1_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE2_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE3_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE4_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE5_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE6_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE7_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE8_IP}]:${CONFIG_LOC}
sshpass -f ${PASSWORD_FILE} scp -o StrictHostKeyChecking=no ${BPCONFIG} ${USERNAME}@[${NODE9_IP}]:${CONFIG_LOC}
echo "Stopping IONs if they are already opened..."
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE1_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit &"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE2_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit &"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE3_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit &"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE4_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit &"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE5_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit &"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE6_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit &"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE7_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit &"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE8_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit &"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE9_IP} "ionstop > ${CONFIG_LOC}/scriptlog_exit"
echo "Waiting ionstop 5.."
sleep 1
echo "Waiting ionstop 4.."
sleep 1
echo "Waiting ionstop 3.."
sleep 1
echo "Waiting ionstop 2.."
sleep 1
echo "Waiting ionstop 1.."
sleep 1
echo "Starting IONs..."
echo "Starting ION1"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE1_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG1} > ${CONFIG_LOC}/scriptlog &"
echo "Starting ION2"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE2_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG2} > ${CONFIG_LOC}/scriptlog &"
echo "Starting ION3"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE3_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG3} > ${CONFIG_LOC}/scriptlog &"
echo "Starting ION4"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE4_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG4} > ${CONFIG_LOC}/scriptlog &"
echo "Starting ION5"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE5_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG5} > ${CONFIG_LOC}/scriptlog &"
echo "Starting ION6"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE6_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG6} > ${CONFIG_LOC}/scriptlog &"
echo "Starting ION7"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE7_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG7} > ${CONFIG_LOC}/scriptlog &"
echo "Starting ION8"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE8_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG8} > ${CONFIG_LOC}/scriptlog &"
echo "Starting ION9"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no -q ${USERNAME}@${NODE9_IP} "ionstart -I ${CONFIG_LOC}/${CONFIG9} > ${CONFIG_LOC}/scriptlog"
echo "Waiting ionstart 6.."
sleep 1
echo "Waiting ionstart 5.."
sleep 1
echo "Waiting ionstart 4.."
sleep 1
echo "Waiting ionstart 3.."
sleep 1
echo "Waiting ionstart 2.."
sleep 1
echo "Waiting ionstart 1.."
sleep 1
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE1_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE2_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE3_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE4_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE5_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE6_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE7_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE8_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE9_IP} "bpadmin ${CONFIG_LOC}/${BPCONFIG}"
# 0~9˳
# 0 3 6
# 1 4 7
# 2 5 8
tmux kill-session
tmux kill-session
tmux new-session -d -s 9nodes
tmux split-window -h -p 67
tmux split-window -h
tmux select-pane -t %0
tmux split-window -v -p 67
tmux split-window -v
tmux select-pane -t %1
tmux split-window -v -p 67
tmux split-window -v
tmux select-pane -t %2
tmux split-window -v -p 67
tmux split-window -v
tmux select-pane -t 1
tmux send-keys "sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE2_IP} \"tail -f ${CONFIG_LOC}/scriptlog\"" C-m
tmux select-pane -t 2
tmux send-keys "sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE3_IP} \"tail -f ${CONFIG_LOC}/scriptlog\"" C-m
tmux select-pane -t 3
tmux send-keys "sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE4_IP} \"tail -f ${CONFIG_LOC}/scriptlog\"" C-m
tmux select-pane -t 4
tmux send-keys "sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE5_IP} \"tail -f ${CONFIG_LOC}/scriptlog\"" C-m
tmux select-pane -t 5
tmux send-keys "sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE6_IP} \"tail -f ${CONFIG_LOC}/scriptlog\"" C-m
tmux select-pane -t 6
tmux send-keys "sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE7_IP} \"tail -f ${CONFIG_LOC}/scriptlog\"" C-m
tmux select-pane -t 7
tmux send-keys "sshpass -f ${PASSWORD_FILE} ssh -o StrictHostKeyChecking=no ${USERNAME}@${NODE8_IP} \"tail -f ${CONFIG_LOC}/scriptlog\"" C-m
tmux select-pane -t 0
tmux attach | true |
fa5781a23f0b9761e83dc7a31af59027a0375904 | Shell | cwt1/scripts-1 | /projFocus/ceRNA/runs/runSampleinfo.sh | UTF-8 | 357 | 2.640625 | 3 | [] | no_license | #!/bin/bash
#By: J.He
#Desp:
#TODO:
#$ -cwd
head -1 clinical_patient_brca.txt > TCGA_barcode_all_in_cnv_meth_snp_EXP_clinical.txt
while read line
do
pid=`echo $line|awk '{print substr($1,0,12)}' `
echo $pid
grep -w $pid clinical_patient_brca.txt >> TCGA_barcode_all_in_cnv_meth_snp_EXP_clinical.txt
done < TCGA_barcode_all_in_cnv_meth_snp_EXP.txt
| true |
db3c201617c74036b5628b0a17c52c6b85d8c8fa | Shell | bridgecrew-perf4/Terraform-Pipelines | /tools/install/install.sh | UTF-8 | 91,504 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -euo pipefail
# Includes
source lib/http.sh
source lib/shell_logger.sh
usage() {
local jqStatusMessage
if [ -x "$(command -v jq)" ]; then
jqStatusMessage="(installed - you're good to go!)"
else
jqStatusMessage="\e[31m(not installed)\e[0m"
fi
local sedStatusMessage
if [ -x "$(command -v sed)" ]; then
sedStatusMessage="(installed - you're good to go!)"
else
sedStatusMessage="\e[31m(not installed)\e[0m"
fi
_helpText="
Usage: install.sh
-o | --org <AZDO_ORG_NAME> (User if provisioning to Azure DevOps Service)
-l | --server <Azure DevOps Server and Collection> (Ex. server/collectionName)
Must specify the server and collection name
Must also use -u parameter to specify the user
-u | --user specifies the user to use with PAT on Azure DevOps server
-n | --name <AZDO_PROJECT_NAME>
-p | --pat <AZDO_PAT>
-r | --region <REGION>
-i | --installInternal (Optional: set if attempting to install internal version of CredScan)
-c | --cloudName (Optional cloud name if service connection is for other cloud 'az cloud list')
| --subName '<Azure Subscription Name>' (Optional - if included, can be ommitted from -s.)
** Note: if the subscription name has spaces, you must use this input parameter. **
| --metaDataHost The Hostname of the Azure Metadata Service (for example management.azure.com), used to obtain the Cloud Environment when using a Custom Azure Environment.
| --private This flag indicates that the deployment target is an Azure Private Cloud.
| --useExistingEnvs This flag indicates that you will use existing env files and it skips generating dev and prod env files and the environments folder.
-s | --servicePrincipal <SP_INFORMATION>
Expected Format:
SUB_NAME='<Azure Subscription Name>' SP_ID=<Service Principal ID> SP_SUBSCRIPTION_ID=<Azure Subscription ID> SP_SECRET=<Service Principal Secret> SP_TENANT_ID=<Service Principal Tenant ID>
** Note: if the subscription name has spaces, you must use the --subName parameter. **
--offline (Optional) Enable project creation without importing source from public repos. This will set up the project with files from this repo and the associated Terraform-Code Repo.
** Note: For offline to function correctly, it's expected that the Terraform-Code repo sits alongside this (Terraform-Pipelines) repo.
--sourceLocalPath (Optional) Root folder of Terraform-Code and Terraform-Pipelines repos. Default ~/tfsource.
** Note: Works only with --offline
-d | --debug Turn debug logging on
dependencies:
-jq $jqStatusMessage
-sed $sedStatusMessage
"
_information "$_helpText" 1>&2
exit 1
}
#Script Parameters (Required)
declare AZDO_ORG_NAME=''
declare AZDO_PROJECT_NAME=''
declare AZURE_CLOUD_ENVIRONMENT='AzureCloud'
declare MANAGEMENT_URI=''
declare AZDO_PAT=''
declare AZDO_USER='AzureUser'
declare SP_RAW=''
declare SP_SUBSCRIPTION_NAME=''
declare SP_ID=''
declare SP_SUBSCRIPTION_ID=''
declare SP_SECRET=''
declare SP_TENANT_ID=''
declare REGION=''
declare INSTALL_INTERNAL_CREDSCAN=false
declare INSTALL_TYPE='PAAS'
declare DEBUG_FLAG=false
declare OFFLINE_INSTALL=false
declare USE_EXISTING_ENVS=false
declare SOURCE_LOCAL_PATH=''
# Defaults AZDO
declare AZDO_ORG_URI=''
declare AZDO_EXT_MGMT_URI=''
declare AZDO_PROJECT_PROCESS_TEMPLATE='Agile'
declare AZDO_PROJECT_SOURCE_CONTROL='git'
declare AZDO_PROJECT_VISIBILITY='private'
declare AZDO_SC_AZURERM_NAME='sc-azurerm-sp'
declare TEMPLATE_REPO_BASE='https://csedevops@dev.azure.com/csedevops/terraform-template-public'
declare PIPELINE_REPO_NAME='Terraform-Pipelines'
declare CODE_REPO_NAME='Terraform-Code'
declare TEMPLATE_PIPELINE_REPO=''
declare TEMPLATE_CODE_REPO=''
declare CODE_REPO_ID=''
declare PIPELINE_REPO_ID=''
# Locals
declare SEARCH_STRING="csedevops@"
declare REPO_GIT_HTTP_URL=''
declare PIPELINE_REPO_GIT_HTTP_URL=''
declare CODE_REPO_GIT_HTTP_URL=''
declare PR_SEED=
declare DEV_SEED=
declare PROD_SEED=
#declare ENVIRONMENT='dev'
declare AZDO_PROJECT_ID=
declare ARM_METADATA_HOST=
declare PRIVATE_CLOUD=false
declare AZURE_ENVIRONMENT_FILEPATH=
# Initialize parameters specified from command line
while [[ "$#" -gt 0 ]]
do
case $1 in
-h | --help)
usage
exit 0
;;
-o | --org )
# PAAS Azure DevOps
AZDO_ORG_NAME=$2
AZDO_ORG_URI="https://dev.azure.com/$2"
AZDO_EXT_MGMT_URI="https://extmgmt.dev.azure.com/$2"
INSTALL_TYPE='PAAS'
;;
-l | --server )
# Azure DevOps Server
AZDO_ORG_NAME=$2
AZDO_ORG_URI="https://$2"
AZDO_EXT_MGMT_URI="https://$2"
INSTALL_TYPE='SERVER'
;;
-n | --name )
AZDO_PROJECT_NAME=$2
;;
-u | --user )
AZDO_USER=$2
;;
-p | --pat )
AZDO_PAT=$2
;;
-r | --region )
REGION=$2
;;
-c | --cloudName )
AZURE_CLOUD_ENVIRONMENT=$2
;;
--subName )
SP_SUBSCRIPTION_NAME=$2
;;
--metaDataHost )
ARM_METADATA_HOST=$2
;;
--private)
PRIVATE_CLOUD=true
;;
--useExistingEnvs)
USE_EXISTING_ENVS=true
;;
-s | --servicePrincipal )
SP_RAW=$2
;;
-i | --installInternal )
INSTALL_INTERNAL_CREDSCAN=true
;;
-t | --templateRepo )
TEMPLATE_REPO_BASE=$2
;;
--offline )
OFFLINE_INSTALL=true
;;
--sourceLocalPath )
SOURCE_LOCAL_PATH=$2
;;
-d | --debug )
DEBUG_FLAG=true
;;
esac
shift
done
check_input() {
_information "Validating Inputs..."
echo $AZDO_ORG_NAME
echo $AZDO_PROJECT_NAME
echo $AZDO_PAT
echo $REGION
echo $SP_RAW
#TODO add check for server and check for PAAS
if [ -z "$AZDO_ORG_NAME" ] || [ -z "$AZDO_PROJECT_NAME" ] || [ -z "$AZDO_PAT" ] || [ -z "$REGION" ] || [ -z "$SP_RAW" ]; then
_error "Required parameter not set."
usage
return 1
fi
TEMPLATE_PIPELINE_REPO="${TEMPLATE_REPO_BASE}/_git/${PIPELINE_REPO_NAME}"
TEMPLATE_CODE_REPO="${TEMPLATE_REPO_BASE}/_git/${CODE_REPO_NAME}"
echo "ARM_METADATA_HOST: ${ARM_METADATA_HOST}"
if [ ! -x "$(command -v jq)" ]; then
_error "jq is not installed! jq is a dependency needed to run the install script.
Please ensure all requirements from the project installation document are met:
https://dev.azure.com/csedevops/terraform-template-public/_git/Terraform-Pipelines?path=%2Fdocs%2FPROJECT_INSTALLATION.md&_a=preview&anchor=pre-requisites
"
exit 1
fi
if [ ! -x "$(command -v az)" ]; then
_error "az cli is not installed! az cli is a dependency needed to run the install script.
Please ensure all requirements from the project installation document are met:
https://dev.azure.com/csedevops/terraform-template-public/_git/Terraform-Pipelines?path=%2Fdocs%2FPROJECT_INSTALLATION.md&_a=preview&anchor=pre-requisites
"
exit 1
fi
}
parse_sp() {
# Expected Format "SUB_NAME='<Azure Subscription Name>' SP_ID=<Service Principal ID> SP_SUBSCRIPTION_ID=<Azure Subscription ID> SP_SECRET=<Service Principal Secret> SP_TENANT_ID=<Service Principal Tenant ID>"
# NOTE: format is with quotes ""
_information "Parsing Service Principal credentials..."
BFS=$IFS
IFS=' '
read -ra kv_pairs <<<${1}
IFS=$BFS
len=${#kv_pairs[@]}
expectedLength=5
if [ ! -z "$SP_SUBSCRIPTION_NAME" ]; then
expectedLength=4
fi
if [ $len != $expectedLength ]; then
_error "SP_RAW contains invalid # of parameters"
_error "Expected Format SUB_NAME='<Azure Subscription Name>' SP_ID=<Service Principal ID> SP_SUBSCRIPTION_ID=<Azure Subscription ID> SP_SECRET=<Service Principal Secret> SP_TENANT_ID=<Service Principal Tenant ID>"
usage
return 1
fi
for kv in "${kv_pairs[@]}"; do
BFS=$IFS
IFS='='
read -ra arr <<<"$kv"
IFS=$BFS
k=${arr[0]}
v=${arr[1]}
case "$k" in
"SUB_NAME") SP_SUBSCRIPTION_NAME=$v ;;
"SP_ID") SP_ID=$v ;;
"SP_SUBSCRIPTION_ID") SP_SUBSCRIPTION_ID=$v ;;
"SP_SECRET") SP_SECRET=$v ;;
"SP_TENANT_ID") SP_TENANT_ID=$v ;;
*)
_error "Invalid service principal parameter."
return 1
;;
esac
done
_success "Sucessfully parsed service principal credentials..."
}
set_login_pat(){
export AZURE_DEVOPS_EXT_PAT=${AZDO_PAT}
}
create_project(){
_information "Starting project creation for project ${AZDO_PROJECT_NAME}"
# Refactor
# 1. GET Get all processes to get template id
# AzDo Service : Processes - Get https://docs.microsoft.com/rest/api/azure/devops/core/processes/get?view=azure-devops-rest-5.1
# AzDo Server 2019 : Processes - Get https://docs.microsoft.com/rest/api/azure/devops/core/processes/get?view=azure-devops-server-rest-5.0
# GET https://{instance}/{collection}/_apis/process/processes/{processId}?api-version=5.0
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/process/processes?api-version=" '5.1' '5.1')
_debug "Requesting process templates"
_response=$(request_get "${_uri}")
echo $_response > ./temp/pt.json
if [[ "$_response" == *"Access Denied: The Personal Access Token used has expired"* ]]; then
_error "Authentication Error Personal Access Token used has expired!"
exit 1
fi
if [[ "$_response" == *"Azure DevOps Services | Sign In"* ]]; then
_error "Authentication Error Requesting process templates. Please ensure the PAT is valid."
exit 1
fi
if [ -z "$_response" ]; then
_error "Error Requesting process templates. Please ensure the PAT is valid and has not expired."
exit 1
fi
_processTemplateId=$(cat ./temp/pt.json | jq -r '.value[] | select(.name == "'"${AZDO_PROJECT_PROCESS_TEMPLATE}"'") | .id')
# 2. Create Project
# AzDo Service : Projects - Create https://docs.microsoft.com/rest/api/azure/devops/core/projects/create?view=azure-devops-rest-5.1
# AzDo Server 2019 : Projects - Create https://docs.microsoft.com/rest/api/azure/devops/core/projects/create?view=azure-devops-server-rest-5.0
# POST https://{{coreServer}}/{{organization}}/_apis/projects?api-version={{api-version}}
_payload=$(cat "payloads/template.project-create.json" | sed 's~__AZDO_PROJECT_NAME__~'"${AZDO_PROJECT_NAME}"'~' | sed 's~__AZDO_PROJECT_SOURCE_CONTROL__~'"${AZDO_PROJECT_SOURCE_CONTROL}"'~' | sed 's~__AZDO_PROCESS_TEMPLATE_ID__~'"${_processTemplateId}"'~')
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/projects?api-version=" '5.1' '5.1')
_debug "Creating project"
# 2. POST Create project
_response=$( request_post \
"${_uri}" \
"${_payload}"
)
echo $_response > ./temp/cp.json
local _createProjectTypeKey=$(echo $_response | jq -r '.typeKey')
if [ "$_createProjectTypeKey" = "ProjectAlreadyExistsException" ]; then
_error "Error creating project in org '${AZDO_ORG_URI}. \nProject repo '${AZDO_PROJECT_NAME}' already exists."
exit 1
fi
_debug_log_post "$_uri" "$_response" "$_payload"
#When going through rest apis, there is a timing issue from project create to querying the repo properties.
sleep 10s
# Fetch The list of projects to get this project's id
# https://docs.microsoft.com/rest/api/azure/devops/core/Projects/List?view=azure-devops-server-rest-5.0
# GET https://{instance}/{collection}/_apis/projects?api-version=5.0
_uri="${AZDO_ORG_URI}/_apis/projects?api-version=5.0"
_response=$(request_get $_uri)
echo $_response > './temp/get-project-id.json'
AZDO_PROJECT_ID=$(cat './temp/get-project-id.json' | jq -r '.value[] | select (.name == "'"${AZDO_PROJECT_NAME}"'") | .id')
# 3. Create Repos
#https://docs.microsoft.com/rest/api/azure/devops/git/repositories/create?view=azure-devops-rest-5.1
_information "Creating ${PIPELINE_REPO_NAME} Repository"
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/git/repositories/${AZDO_PROJECT_NAME}?api-version=" '5.1' '5.1')
_payload=$(cat "payloads/template.repo-create.json" | sed 's~__AZDO_PROJECT_ID__~'"${AZDO_PROJECT_ID}"'~' | sed 's~__REPO_NAME__~'"${PIPELINE_REPO_NAME}"'~' )
_response=$(request_post "${_uri}" "${_payload}")
echo _response > "./temp/$PIPELINE_REPO_NAME-create-response.json"
_information "Creating ${CODE_REPO_NAME} Repository"
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/git/repositories/${AZDO_PROJECT_NAME}?api-version=" '5.1' '5.1')
_payload=$(cat "payloads/template.repo-create.json" | sed 's~__AZDO_PROJECT_ID__~'"${AZDO_PROJECT_ID}"'~' | sed 's~__REPO_NAME__~'"${CODE_REPO_NAME}"'~' )
_response=$(request_post "${_uri}" "${_payload}")
echo _response > "./temp/$CODE_REPO_NAME-create-response.json"
# 4. GET Repos Git Url and Repo Id's
# AzDo Service : Repositories - Get Repository https://docs.microsoft.com/rest/api/azure/devops/git/repositories/get%20repository?view=azure-devops-rest-5.1
# AzDo Server 2019 : Repositories - Get Repository https://docs.microsoft.com/rest/api/azure/devops/git/repositories/get%20repository?view=azure-devops-server-rest-5.0
# GET https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}?api-version=5.1
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/git/repositories/${PIPELINE_REPO_NAME}?api-version=" '5.1' '5.1')
_debug "Fetching ${PIPELINE_REPO_NAME} repository information"
_response=$( request_get ${_uri})
_debug_log_get "$_uri" "$_response"
echo $_response > "./temp/${PIPELINE_REPO_NAME}-ri.json"
PIPELINE_REPO_GIT_HTTP_URL=$(cat "./temp/${PIPELINE_REPO_NAME}-ri.json" | jq -c -r '.remoteUrl')
PIPELINE_REPO_ID=$(cat "./temp/${PIPELINE_REPO_NAME}-ri.json" | jq -c -r '.id')
_debug "$PIPELINE_REPO_GIT_HTTP_URL"
echo "${PIPELINE_REPO_NAME} Git Repo remote URL: "$PIPELINE_REPO_GIT_HTTP_URL
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/git/repositories/${CODE_REPO_NAME}?api-version=" '5.1' '5.1')
_debug "Fetching ${CODE_REPO_NAME} repository information"
_response=$( request_get ${_uri})
_debug_log_get "$_uri" "$_response"
echo $_response > "./temp/${CODE_REPO_NAME}-ri.json"
CODE_REPO_GIT_HTTP_URL=$(cat "./temp/${CODE_REPO_NAME}-ri.json" | jq -c -r '.remoteUrl')
CODE_REPO_ID=$(cat "./temp/${CODE_REPO_NAME}-ri.json" | jq -c -r '.id')
_debug "$CODE_REPO_GIT_HTTP_URL"
echo "${CODE_REPO_NAME} Git Repo remote URL: "$CODE_REPO_GIT_HTTP_URL
_information "Project '${AZDO_PROJECT_NAME}' created."
}
import_multi_template_repo(){
templateRepoName=$1
if [ -z "$templateRepoName" ]; then
_error "Missing Template Repo Name from import"
exit 1
fi
templateRepoUrl=$2
if [ -z "$templateRepoUrl" ]; then
_error "Missing Template Repo Url from import"
exit 1
fi
_information "Starting Import of template repo (URL: ${templateRepoName})"
# AzDo Service : Import Requests - Create https://docs.microsoft.com/rest/api/azure/devops/git/import%20requests/create?view=azure-devops-rest-5.1
# AzDo Server 2019 : Import Requests - Create https://docs.microsoft.com/rest/api/azure/devops/git/import%20requests/create?view=azure-devops-server-rest-5.0
# POST https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}/importRequests?api-version=5.1-preview.1
_payload=$(cat "payloads/template.import-repo.json" | sed 's~__GIT_SOURCE_URL__~'"${templateRepoUrl}"'~')
_importTemplateUri="${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/git/repositories/${templateRepoName}/importRequests?api-version=5.1-preview.1"
_debug "Import POST Request"
_debug "payload: "
_debug_json "${_payload}"
_response=$( request_post \
"${_importTemplateUri}" \
"${_payload}"
)
_debug_log_post "$_importTemplateUri" "$_response" "$_payload"
echo $_response > "./temp/$templateRepoName.impreqrepo.json"
_importRequestId=$(cat "./temp/$templateRepoName.impreqrepo.json" | jq -r '.importRequestId')
if [ "$_importRequestId" != "null" ]; then
echo "Import in progress - Import Request Id:${_importRequestId}"
sleep 5
# AzDo Service : Import Requests - Get https://docs.microsoft.com/rest/api/azure/devops/git/import%20requests/get?view=azure-devops-rest-5.1
# AzDo Server 2019 : Import Requests - Get https://docs.microsoft.com/rest/api/azure/devops/git/import%20requests/get?view=azure-devops-server-rest-5.0
# GET https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}/importRequests/{importRequestId}?api-version=5.1-preview.1
_debug "Checking import status"
_response=$(request_get "${_importTemplateUri}")
_debug_log_get "$_importTemplateUri" "$_response"
_importRequestStatus=$(echo $_response | jq -r .value[].status)
_debug "$_importRequestStatus"
if [ "$_importRequestStatus" = "completed" ]; then
_success "Import Complete from source '${TEMPLATE_REPO_BASE}' into project repo '${AZDO_PROJECT_NAME}'"
fi
else
# Failed to Submit Import Request
_importTypeKey=$(echo $_response | jq -r '.typeKey')
if [ "$_importTypeKey" = "GitImportForbiddenOnNonEmptyRepository" ]; then
_error "Error importing from source '${TEMPLATE_REPO_BASE}'. \nProject repo '${AZDO_PROJECT_NAME}' is not Empty."
elif [ "$_importTypeKey" = "GitRepositoryNotFoundException" ]; then
_error "importing from source '${TEMPLATE_REPO_BASE}'. \nProject repo '${AZDO_PROJECT_NAME}' was not found."
fi
fi
}
_getProjectRootPath(){
scriptPath=$(realpath "$0")
relativePath="tools/install/install.sh"
echo ${scriptPath%/$relativePath}
}
offline_install_template_repo(){
templateRepoName=$1
if [ -z "$templateRepoName" ]; then
_error "Missing Template Repo Name from offline Install"
exit 1
fi
templateRepoUrl=$2
if [ -z "$templateRepoUrl" ]; then
_error "Missing Template Repo Url from offline Install"
exit 1
fi
sourcePath=$3
if [ -z "$sourcePath" ]; then
_error "Missing sourcePath from offline Install"
exit 1
fi
_information "Starting offline set up of template repo (URL: ${templateRepoName})"
_debug "** offline_install_template_repo **"
_debug "templateRepoName: $templateRepoName"
_debug "templateRepoUrl: $templateRepoUrl"
_debug "sourcePath: $sourcePath"
_debug "** /offline_install_template_repo **"
pushd $sourcePath
local _token=$(echo -n ":${AZDO_PAT}" | base64)
local tarFlags="-zxf"
local gitVerbosity="-q"
if [ "${DEBUG_FLAG}" == true ]; then
tarFlags="-zxvf"
gitVerbosity="-v"
fi
git config user.email "installer@terraform-template.com"
git config user.name "Terraform template"
git remote set-url origin ${templateRepoUrl}
git -c http.extraHeader="Authorization: Basic ${_token}" push -u origin --all ${gitVerbosity}
popd
}
_gen_random_seed(){
PR_SEED=$(head -30 /dev/urandom | LC_CTYPE=c tr -dc 'a-z0-9' | fold -w $1 | head -n 1)
DEV_SEED=$(head -30 /dev/urandom | LC_CTYPE=c tr -dc 'a-z0-9' | fold -w $1 | head -n 1)
PROD_SEED=$(head -30 /dev/urandom | LC_CTYPE=c tr -dc 'a-z0-9' | fold -w $1 | head -n 1)
echo "PR SEED CREATED: "$PR_SEED
echo "DEV SEED CREATED: "$DEV_SEED
echo "PRD SEED CREATED: "$PROD_SEED
}
clone_repo() {
local temp_dir=$1
local repoGitHttpUrl=$2
mkdir -p $temp_dir
if [ $INSTALL_TYPE == 'PAAS ' ]; then
SEARCH_STRING="${AZDO_ORG_NAME}@"
echo "Searching for ${SEARCH_STRING} and replacing with ${AZDO_PAT}"
SED_SEARCH_REPLACE="s|${SEARCH_STRING}|${AZDO_PAT}@|"
_debug "$SED_SEARCH_REPLACE"
GIT_REPO_URL=$(echo $repoGitHttpUrl | sed $SED_SEARCH_REPLACE | sed s/\"//g)
#Clone the repo (GIT_CURL_VERBOSE=1 GIT_TRACE=1 if debugging is needed)
git clone $repoGitHttpUrl $TEMP_DIR
else
_debug "Cloning repo from server"
_debug "Git URL: ${repoGitHttpUrl} TEMP_DIR:${TEMP_DIR}"
_token=$(echo -n ":${AZDO_PAT}" | base64)
_debug "$_token"
git -c http.extraHeader="Authorization: Basic ${_token}" clone $repoGitHttpUrl $TEMP_DIR
fi
}
git_push() {
if [ $INSTALL_TYPE == 'PAAS ' ]; then
git push origin master
else
_token=$(echo -n ":${AZDO_PAT}" | base64)
git -c http.extraHeader="Authorization: Basic ${_token}" push origin master
fi
}
create_default_env_files() {
_information "Cloning "$CODE_REPO_GIT_HTTP_URL
TEMP_DIR=~/git_repos/${AZDO_PROJECT_NAME}/${CODE_REPO_NAME}
clone_repo "$TEMP_DIR" "$CODE_REPO_GIT_HTTP_URL"
_information "Creating default environments..."
_gen_random_seed "4"
pushd $TEMP_DIR
#create tmp dir for loggin
if [[ -d "environments" ]]; then
_error "Environments Folder found in Terraform-Code Repo. Please rename the environments, as this template will auto-generate an environments folder."
exit 1
fi
mkdir environments
mkdir environments/pr
mkdir environments/dev
mkdir environments/prod
# Create PR Env Files
cat >./environments/pr/pr.remotestate.env <<EOL
####################
# TERRAFORM values #
####################
## global
TF_VAR_ENVIRONMENT=pr
TF_VAR_NAME=apppr
TF_VAR_SUBSCRIPTION_ID=${SP_SUBSCRIPTION_ID}
TF_VAR_LOCATION=${REGION}
TF_VAR_TENANT_ID=${SP_TENANT_ID}
## 01_Init state storage
TF_VAR_BACKEND_STORAGE_ACCOUNT_NAME=sttfrspr${PR_SEED}
TF_VAR_BACKEND_RESOURCE_GROUP_NAME=tf-remote-state-pr
TF_VAR_BACKEND_CONTAINER_NAME=tfrs
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_TIER=Standard
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_REPLICATION_TYPE=RAGRS
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_KIND=StorageV2
TF_VAR_IDENTITY_TYPE=SystemAssigned
TF_VAR_TAGS_ENVIRONMENT=pr
TF_VAR_TAGS_VERSION=3.0.0
## Remote state backup
TF_VAR_BACKEND_BACKUP_RESOURCE_GROUP_NAME=tf-remote-state-backup-pr
TF_VAR_BACKUP_STORAGE_ACCOUNT_NAME=sttfrsbakpr${PR_SEED}
EOL
cat >./environments/pr/pr.03_webapp.env <<EOL
## 03_webapp
TF_VAR_PLAN_SKU_TIER=Standard
TF_VAR_PLAN_SKU_SIZE=S1
TF_VAR_APP_NAME=appeshoppr
TF_VAR_APP_PLAN_NAME=planpr
TF_VAR_DOCKER_IMAGE_NAME=dariuszporowski/eshopwebmvc:latest
EOL
# Create Dev Env Files
cat >./environments/dev/dev.remotestate.env <<EOL
####################
# TERRAFORM values #
####################
## global
TF_VAR_ENVIRONMENT=dev
TF_VAR_NAME=appdev
TF_VAR_SUBSCRIPTION_ID=${SP_SUBSCRIPTION_ID}
TF_VAR_LOCATION=${REGION}
TF_VAR_TENANT_ID=${SP_TENANT_ID}
## 01_Init state storage
TF_VAR_BACKEND_STORAGE_ACCOUNT_NAME=sttfrsdev${DEV_SEED}
TF_VAR_BACKEND_RESOURCE_GROUP_NAME=tf-remote-state-dev
TF_VAR_BACKEND_CONTAINER_NAME=tfrs
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_TIER=Standard
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_REPLICATION_TYPE=RAGRS
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_KIND=StorageV2
TF_VAR_IDENTITY_TYPE=SystemAssigned
TF_VAR_TAGS_ENVIRONMENT=dev
TF_VAR_TAGS_VERSION=3.0.0
## Remote state backup
TF_VAR_BACKEND_BACKUP_RESOURCE_GROUP_NAME=tf-remote-state-backup-dev
TF_VAR_BACKUP_STORAGE_ACCOUNT_NAME=sttfrsbakdev${DEV_SEED}
EOL
cat >./environments/dev/dev.03_webapp.env <<EOL
## 03_webapp
TF_VAR_PLAN_SKU_TIER=Standard
TF_VAR_PLAN_SKU_SIZE=S1
TF_VAR_APP_NAME=appeshopdev
TF_VAR_APP_PLAN_NAME=plandev
TF_VAR_DOCKER_IMAGE_NAME=dariuszporowski/eshopwebmvc:latest
EOL
# Create Prod Env Files
cat >./environments/prod/prod.remotestate.env <<EOL
####################
# TERRAFORM values #
####################
## global
TF_VAR_ENVIRONMENT=prod
TF_VAR_NAME=appprod
TF_VAR_SUBSCRIPTION_ID=${SP_SUBSCRIPTION_ID}
TF_VAR_LOCATION=${REGION}
TF_VAR_TENANT_ID=${SP_TENANT_ID}
## 01_Init state storage
TF_VAR_BACKEND_STORAGE_ACCOUNT_NAME=sttfrsprod${PROD_SEED}
TF_VAR_BACKEND_RESOURCE_GROUP_NAME=tf-remote-state-prod
TF_VAR_BACKEND_CONTAINER_NAME=tfrs
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_TIER=Standard
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_REPLICATION_TYPE=RAGRS
TF_VAR_STORAGE_ACCOUNT_ACCOUNT_KIND=StorageV2
TF_VAR_IDENTITY_TYPE=SystemAssigned
TF_VAR_TAGS_ENVIRONMENT=prod
TF_VAR_TAGS_VERSION=3.0.0
## Remote state backup
TF_VAR_BACKEND_BACKUP_RESOURCE_GROUP_NAME=tf-remote-state-backup-prod
TF_VAR_BACKUP_STORAGE_ACCOUNT_NAME=sttfrsbakprod${PROD_SEED}
EOL
cat >./environments/prod/prod.03_webapp.env <<EOL
## 03_webapp
TF_VAR_PLAN_SKU_TIER=Standard
TF_VAR_PLAN_SKU_SIZE=S1
TF_VAR_APP_NAME=appeshop
TF_VAR_APP_PLAN_NAME=plan
TF_VAR_DOCKER_IMAGE_NAME=dariuszporowski/eshopwebmvc:latest
EOL
#commit pr.env to the repo
git add ./environments/pr/pr.remotestate.env
git add ./environments/pr/pr.03_webapp.env
#commit dev.env to the repo
git add ./environments/dev/dev.remotestate.env
git add ./environments/dev/dev.03_webapp.env
#commit prod.env to the repo
git add ./environments/prod/prod.remotestate.env
git add ./environments/prod/prod.03_webapp.env
git config user.email "installer@terraform-template.com"
git config user.name "Terraform template"
git commit -m "Initialize environments"
git_push
#before you delete the clone, we need to upload the file to secure files.
popd
#delete local copy of repo
rm -rf $TEMP_DIR
_success "Completed configuring env"
}
declare QUERY_EXTENSION_RESULT=''
query_extension() {
# AzDo Service : Import Requests - Get https://docs.microsoft.com/rest/api/azure/devops/git/import%20requests/get?view=azure-devops-rest-5.1
# AzDo Server 2019 : Import Requests - Get https://docs.microsoft.com/rest/api/azure/devops/git/import%20requests/get?view=azure-devops-server-rest-5.0
# GET https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}/importRequests/{importRequestId}?api-version=5.1-preview.1
_publisherName=$1
_extensionName=$2
_uri=$(_set_api_version "${AZDO_EXT_MGMT_URI}/_apis/extensionmanagement/installedextensionsbyname/${_publisherName}/${_extensionName}?api-version=" '5.1-preview.1' '5.1-preview.1')
_debug "$_uri"
_response=$(request_get $_uri)
echo $_response > ./temp/${1}${2}.json
_debug_log_get "$_uri" "$_response"
_queryExtentionTypeKey=$(cat ./temp/${1}${2}.json | jq -r '.typeKey')
if [ "$_queryExtentionTypeKey" == "InstalledExtensionNotFoundException" ]; then
QUERY_EXTENSION_RESULT='[]'
return 0
fi
QUERY_EXTENSION_RESULT=$_response
}
check_and_install_extension_by_name() {
# AzDo Service : Install Extension By Name - https://docs.microsoft.com/rest/api/azure/devops/extensionmanagement/installed%20extensions/install%20extension%20by%20name?view=azure-devops-rest-5.1
# AzDo Server 2019 : Install Extension By Name - https://docs.microsoft.com/rest/api/azure/devops/extensionmanagement/installed%20extensions/install%20extension%20by%20name?view=azure-devops-server-rest-5.0
# POST https://extmgmt.dev.azure.com/{organization}/_apis/extensionmanagement/installedextensionsbyname/{publisherName}/{extensionName}/{version}?api-version=5.1-preview.1
_publisherName=$1
_extensionName=$2
query_extension "${_publisherName}" "${_extensionName}"
_debug "query result: $QUERY_EXTENSION_RESULT"
if [ "$QUERY_EXTENSION_RESULT" == '[]' ]; then
echo "Installing extension $_extensionName..."
_uri=$(_set_api_version "${AZDO_EXT_MGMT_URI}/_apis/extensionmanagement/installedextensionsbyname/${_publisherName}/${_extensionName}?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$(request_post $_uri "")
echo $_response > "./temp/install${1}${2}.json"
_debug_log_post "$_uri" "$_response" ""
_installExtensionTypeKey=$(cat "./temp/install${1}${2}.json" | jq -r '.typeKey')
if [ "$_installExtensionTypeKey" == "ExtensionDoesNotExistException" ]; then
_error "The extension $_publisherName.$_extensionName does not exist."
elif [ "$_installExtensionTypeKey" == "AccessCheckException" ]; then
_error "Access Denied."
echo $_response | jq
else
_success "Extension ${_extensionName} from publisher ${_publisherName} was installed in organization."
fi
else
_success "Extension ${_extensionName} from publisher ${_publisherName} already installed in organization.."
fi
}
install_extensions() {
_information "Installing ADO extensions"
check_and_install_extension_by_name "charleszipp" "azure-pipelines-tasks-terraform"
check_and_install_extension_by_name "CSE-DevOps" "RunPipelines"
# Only install credscan in PAAS projects.
if [ "$INSTALL_TYPE" == "PAAS" ]; then
if [ $INSTALL_INTERNAL_CREDSCAN == true ]; then
check_and_install_extension_by_name "securedevelopmentteam" "vss-secure-development-tools"
else
check_and_install_extension_by_name "ms-codeanalysis" "vss-microsoft-security-code-analysis-devops"
fi
fi
}
_get_management_endpoint() {
local _response=$(az cloud show -n ${AZURE_CLOUD_ENVIRONMENT})
echo $_response > "./temp/az-cloud-show-response.json"
if [ "$INSTALL_TYPE" == "PAAS" ]; then
MANAGEMENT_URI=`echo $_response | jq .endpoints.management | sed "s/^\([\"']\)\(.*\)\1\$/\2/g"`
else
MANAGEMENT_URI=`echo $_response | jq .endpoints.resourceManager | sed "s/^\([\"']\)\(.*\)\1\$/\2/g"`
fi
_debug "MANAGEMENT_URI: ${MANAGEMENT_URI}"
}
_create_svc_connection_payload() {
local _payload
if [ "$INSTALL_TYPE" == "PAAS" ]; then
_payload=$(cat "payloads/template.service-connection-create.json" \
| sed 's~__SERVICE_PRINCIPAL_ID__~'"${SP_ID}"'~' \
| sed 's@__SERVICE_PRINCIPAL_KEY__@'"${SP_SECRET}"'@' \
| sed 's~__SERVICE_PRINCIPAL_TENANT_ID__~'"${SP_TENANT_ID}"'~' \
| sed 's~__CLOUD_ENVIRONMENT__~'"${AZURE_CLOUD_ENVIRONMENT}"'~' \
| sed 's~__SUBSCRIPTION_ID__~'"${SP_SUBSCRIPTION_ID}"'~' \
| sed 's~__SUBSCRIPTION_NAME__~'"${SP_SUBSCRIPTION_NAME}"'~' \
| sed 's~__SERVICE_CONNECTION_NAME__~'"${AZDO_SC_AZURERM_NAME}"'~' \
| sed 's~__PROJECT_ID__~'"${AZDO_PROJECT_ID}"'~' \
| sed 's~__PROJECT_NAME__~'"${AZDO_PROJECT_NAME}"'~' \
| sed 's~__MANAGEMENT_URI__~'"${MANAGEMENT_URI}"'~' \
)
else
local targetEnvironment=${AZURE_CLOUD_ENVIRONMENT}
if [ "${PRIVATE_CLOUD}" == true ]; then
targetEnvironment="AzureStack"
fi
_payload=$(cat "payloads/template.service-connection-create-azdovm.json" \
| sed 's~__SERVICE_PRINCIPAL_ID__~'"${SP_ID}"'~' \
| sed 's@__SERVICE_PRINCIPAL_KEY__@'"${SP_SECRET}"'@' \
| sed 's~__SERVICE_PRINCIPAL_TENANT_ID__~'"${SP_TENANT_ID}"'~' \
| sed 's~__CLOUD_ENVIRONMENT__~'"${targetEnvironment}"'~' \
| sed 's~__SUBSCRIPTION_ID__~'"${SP_SUBSCRIPTION_ID}"'~' \
| sed 's~__SUBSCRIPTION_NAME__~'"${SP_SUBSCRIPTION_NAME}"'~' \
| sed 's~__SERVICE_CONNECTION_NAME__~'"${AZDO_SC_AZURERM_NAME}"'~' \
| sed 's~__MANAGEMENT_URI__~'"${MANAGEMENT_URI}"'~' \
)
fi
echo $_payload
}
create_arm_svc_connection() {
# https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/endpoints/create?view=azure-devops-rest-5.1#endpointauthorization
# https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/endpoints/create?view=azure-devops-server-rest-5.0
# Create Azure RM Service connection
_information "Creating AzureRM service connection"
# Get the management endpoint for whatever cloud we are provisioning for.
_get_management_endpoint
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/serviceendpoint/endpoints?api-version=" '5.1-preview.2' '5.1-preview.2')
_payload=$(_create_svc_connection_payload)
echo "${_payload}" > ./temp/casc_payload.json
_response=$( request_post \
"${_uri}" \
"${_payload}"
)
echo $_response > ./temp/casc.json
_debug_log_post "$_uri" "$_response" "$_payload"
sc_id=`cat ./temp/casc.json | jq -r .id`
_debug "Service Connection ID: ${sc_id}"
sleep 10
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/serviceendpoint/endpoints/${sc_id}?api-version=" '5.1-preview.2' '5.1-preview.1' )
_response=$(request_get $_uri)
echo $_response > ./temp/isready.json
_isReady=$(cat ./temp/isready.json | jq -r '.isReady')
if [ $_isReady != true ]; then
_error "Error creating AzureRM service connection"
fi
# https://docs.microsoft.com/rest/api/azure/devops/build/authorizedresources/authorize%20project%20resources?view=azure-devops-rest-5.1
# https://docs.microsoft.com/rest/api/azure/devops/build/authorizedresources/authorize%20project%20resources?view=azure-devops-server-rest-5.0
# Authorize the service connection for all pipelines.
_information "Authorizing service connection for all pipelines."
_payload=$(cat "payloads/template.authorized-resources.json" | sed 's~__SERVICE_CONNECTION_ID__~'"${sc_id}"'~')
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/build/authorizedresources?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$( request_patch \
"${_uri}" \
"${_payload}"
)
_debug_log_patch "$_uri" "$_response" "$_payload"
_success "AzureRM service connection created and authorized"
#az devops service-endpoint update --org ${AZDO_ORG_URI} --project ${AZDO_PROJECT_NAME} --enable-for-all true --id ${scId}
}
list_svc_connection_types() {
# AzDo Service : Service Endpoint Types List - https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/types/list?view=azure-devops-rest-5.1
# AzDo Server 2019 : Service Endpoint Types List - https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/types/list?view=azure-devops-server-rest-5.0
# GET https://dev.azure.com/{organization}/_apis/serviceendpoint/types?api-version=5.1-preview.1
request_get "${AZDO_ORG_URI}/_apis/serviceendpoint/types?api-version=5.1-preview.1" | jq '.value[].name'
}
get_svc_connection_type() {
type=$1
# AzDo Service : Service Endpoint - Get https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/endpoints/get?view=azure-devops-rest-5.1
# AzDo Server 2019 : Service Endpoint - Get https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/endpoints/get?view=azure-devops-server-rest-5.0
# GET https://dev.azure.com/{organization}/_apis/serviceendpoint/types?type={type}&api-version=5.1-preview.1
# AzDo Service : Service Endpoint - Get https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/endpoints/get?view=azure-devops-rest-5.1
# AzDo Server 2019 : Service Endpoint - Get https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/endpoints/get?view=azure-devops-server-rest-5.0
# GET https://dev.azure.com/{organization}/_apis/serviceendpoint/types?type={type}&api-version=5.1-preview.1
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/serviceendpoint/types?type=${type}&api-version=" '5.1-preview.2' '5.0-preview.2')
echo $_uri
_response=$(request_get 'https://b2020-server-vm/ProjectCollection/_apis/serviceendpoint/types?type=externaltfs')
echo $_response | jq
}
create_azdo_svc_connection() {
_information "Creating azdo service connection"
# AzDo Service : Service Endpoint - Create https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/endpoints/create?view=azure-devops-rest-5.1
# AzDo Server 2019 : Service Endpoint - Create https://docs.microsoft.com/rest/api/azure/devops/serviceendpoint/endpoints/create?view=azure-devops-server-rest-5.0
_templateFile=''
if [ "$INSTALL_TYPE" == "PAAS" ]; then
_templateFile='template.sc-ado-paas.json'
else
_templateFile='template.sc-ado-server.json'
fi
_debug "starting payload $_templateFile"
_payload=$( cat "payloads/$_templateFile" | sed 's~__ADO_ORG_NAME__~'"${AZDO_ORG_NAME}"'~' | sed 's~__ADO_ORG_URI__~'"${AZDO_ORG_URI}"'~' | sed 's~__ADO_PAT__~'"${AZDO_PAT}"'~' )
_debug "done payload"
_debug_json "$_payload"
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/serviceendpoint/endpoints?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$( request_post \
"${_uri}" \
"${_payload}"
)
echo $_response > ./temp/scado.json
_debug_log_post "$_uri" "$_response" "$_payload"
_scId=$(cat ./temp/scado.json | jq -r '.id')
_isReady=$(cat ./temp/scado.json | jq -r '.isReady')
if [ $_isReady != true ]; then
_error "Error creating azdo service connection"
fi
_success "azdo service connection created. service connection id: ${_scId}"
_payload=$(cat "payloads/template.sc-ado-auth.json" | sed 's~__SC_ADO_ID__~'"${_scId}"'~')
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/pipelines/pipelinePermissions/endpoint/${_scId}?api-version=" '5.1-preview' '5.1-preview' )
_response=$( request_patch \
"${_uri}" \
"${_payload}"
)
echo $_response > ./temp/sc-ado-auth.json
_debug_log_patch "$_uri" "$_response" "$_payload"
_allPipelinesAuthorized=$(cat ./temp/sc-ado-auth.json | jq -r '.allPipelines.authorized')
if [ $_allPipelinesAuthorized == true ]; then
_success "azdo service connection authorized for all pipelines"
fi
}
create_variable_groups() {
# AzDo Service : Variablegroups - Add https://docs.microsoft.com/rest/api/azure/devops/distributedtask/variablegroups/add?view=azure-devops-rest-5.1
# AzDo Server 2019 : Variablegroups - Add https://docs.microsoft.com/rest/api/azure/devops/distributedtask/variablegroups/add?view=azure-devops-server-rest-5.0
# POST https://dev.azure.com/{organization}/{project}/_apis/distributedtask/variablegroups?api-version=5.1-preview.1
_information "Creating Variable Groups"
local _vgName="tool_versions"
local _tfCloudEnvironment=$(get_tf_azure_clound_env)
local _cloudConfigPayload=""
if [ ! -z "${ARM_METADATA_HOST}" ]; then
_cloudConfigPayload=$_cloudConfigPayload',"ARM_METADATA_HOST":{"value":"'${ARM_METADATA_HOST}'"}'
fi
if [ "${PRIVATE_CLOUD}" == true ];then
_cloudConfigPayload=$_cloudConfigPayload',"ARM_ENVIRONMENT":{"value":"'${_tfCloudEnvironment}'"}'
_cloudConfigPayload=$_cloudConfigPayload',"AZURE_ENVIRONMENT":{"value":"'${_tfCloudEnvironment}'"}'
_cloudConfigPayload=$_cloudConfigPayload',"AZURE_CLOUD_NAME":{"value":"'${AZURE_CLOUD_ENVIRONMENT}'"}'
fi
if [ ! -z "${AZURE_ENVIRONMENT_FILEPATH}" ]; then
_cloudConfigPayload=$_cloudConfigPayload',"AZURE_ENVIRONMENT_FILEPATH":{"value":"'${AZURE_ENVIRONMENT_FILEPATH}'"}'
fi
echo $_cloudConfigPayload
_payload=$(cat "payloads/template.vg.json" | sed 's~__VG_NAME__~'"${_vgName}"'~' | sed 's~__ARM_CLOUD_CONFIGS__~'"${_cloudConfigPayload}"'~')
echo $_payload > temp/vg.payload.json
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/distributedtask/variablegroups?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$( request_post \
"${_uri}" \
"${_payload}"
)
echo $_response > ./temp/cvg.json
_debug_log_post "$_uri" "$_response" "$_payload"
_createVgTypeKey=$(cat ./temp/cvg.json | jq -r '.typeKey')
if [ "$_createVgTypeKey" == "VariableGroupExistsException" ]; then
_error "can't add variable group ${_vgName}. Variable group exists"
fi
_vgId=$(cat ./temp/cvg.json | jq -r '.id')
if [ "$_vgId" != null ]; then
# AzDo Service : Authorize Project Resources - https://docs.microsoft.com/rest/api/azure/devops/build/authorizedresources/authorize%20project%20resources?view=azure-devops-rest-5.1
# AzDo Server 2019 : Authorize Project Resources - https://docs.microsoft.com/rest/api/azure/devops/build/authorizedresources/authorize%20project%20resources?view=azure-devops-server-rest-5.0
# PATCH https://dev.azure.com/{organization}/{project}/_apis/build/authorizedresources?api-version=5.1-preview.1
_payload=$(cat "payloads/template.vg-auth.json" | sed 's~__VG_ID__~'"${_vgId}"'~' | sed 's~__VG_NAME__~'"${_vgName}"'~')
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/build/authorizedresources?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$( request_patch \
"${_uri}" \
"${_payload}"
)
echo $_response > ./temp/vgauth.json
_debug_log_patch "$_uri" "$_response" "$_payload"
_vgAuthorized=$(cat ./temp/vgauth.json | jq -r --arg _vgId "$_vgId" '.value[] | select( (.type == "variablegroup") and (.id == $_vgId)) | .authorized')
_debug "Variable Group Authrized ${_vgAuthorized}"
if [ $_vgAuthorized == true ]; then
_success "variable group ${_vgName} created and authorized for all pipelines."
fi
fi
}
create_and_upload_pr_state_secfile() {
local _fileName="pr.storage.init.state"
touch ./$_fileName
# POST https://dev.azure.com/{organization}/{project}/_apis/distributedtask/securefiles?api-version=6.0-preview.1&name={fileName}
_information "Uploading PR State File to Secure Files"
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/distributedtask/securefiles?name=${_fileName}&api-version=" '6.0-preview.1' '5.1-preview.1')
local _response=$(request_post_binary "${_uri}" "${_fileName}")
_debug_log_post_binary "$_uri" "$_response" "$_fileName"
echo $_response > ./temp/usf.json
_id=$(cat ./temp/usf.json | jq -c -r '.id')
_debug "Secure File ID: ${_id}"
# PATCH https://dev.azure.com/{organization}/{project}/_apis/build/authorizedresources?api-version=5.1-preview.1
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/build/authorizedresources?api-version=" '5.1-preview.1' '5.1-preview.1')
_payload="[{\"authorized\":true,\"id\":\"${_id}\",\"name\":\"${_fileName}\",\"type\":\"securefile\"}]"
_response=$( request_patch \
"${_uri}" \
"${_payload}"
)
echo "${_response}"
_debug_log_patch "$_uri" "$_response" "$_payload"
rm ./$_fileName
}
create_pr_variable_group() {
if [ ${INSTALL_TYPE} == 'SERVER' ]; then
_information "Install targets Azure DevOps Server 2020. Skipping PR Variable Group creation."
return 0
fi
# AzDo Service : Variablegroups - Add https://docs.microsoft.com/rest/api/azure/devops/distributedtask/variablegroups/add?view=azure-devops-rest-5.1
# AzDo Server 2019 : Variablegroups - Add https://docs.microsoft.com/rest/api/azure/devops/distributedtask/variablegroups/add?view=azure-devops-server-rest-5.0
# POST https://dev.azure.com/{organization}/{project}/_apis/distributedtask/variablegroups?api-version=5.1-preview.1
_information "Creating PR Variable Group"
local _vgName="pullrequest.state"
local _cloudConfigPayload=""
_payload=$(cat "payloads/template.vg.pr.json" | sed 's~__VG_NAME__~'"${_vgName}"'~')
echo $_payload > temp/vg.payload.json
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/distributedtask/variablegroups?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$( request_post \
"${_uri}" \
"${_payload}"
)
echo $_response > ./temp/cprvg.json
_debug_log_post "$_uri" "$_response" "$_payload"
_createVgTypeKey=$(cat ./temp/cprvg.json | jq -r '.typeKey')
if [ "$_createVgTypeKey" == "VariableGroupExistsException" ]; then
_error "can't add variable group ${_vgName}. Variable group exists"
fi
_vgId=$(cat ./temp/cprvg.json | jq -r '.id')
if [ "$_vgId" != null ]; then
# AzDo Service : Authorize Project Resources - https://docs.microsoft.com/rest/api/azure/devops/build/authorizedresources/authorize%20project%20resources?view=azure-devops-rest-5.1
# AzDo Server 2019 : Authorize Project Resources - https://docs.microsoft.com/rest/api/azure/devops/build/authorizedresources/authorize%20project%20resources?view=azure-devops-server-rest-5.0
# PATCH https://dev.azure.com/{organization}/{project}/_apis/build/authorizedresources?api-version=5.1-preview.1
_payload=$(cat "payloads/template.vg-auth.json" | sed 's~__VG_ID__~'"${_vgId}"'~' | sed 's~__VG_NAME__~'"${_vgName}"'~')
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/build/authorizedresources?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$( request_patch \
"${_uri}" \
"${_payload}"
)
echo $_response > ./temp/vg.pr.auth.json
_debug_log_patch "$_uri" "$_response" "$_payload"
local _vgAuthorized=$(cat ./temp/vg.pr.auth.json | jq -r --arg _vgId "$_vgId" '.value[] | select( (.type == "variablegroup") and (.id == $_vgId)) | .authorized')
_debug "PR Variable Group Authorized |${_vgAuthorized}|"
if [ $_vgAuthorized == true ]; then
_success "PR variable group created and authorized for all pipelines."
fi
fi
}
_list_pipelines() {
# GET https://dev.azure.com/{organization}/{project}/_apis/build/definitions?api-version=5.1
_uri="${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/build/definitions/?api-version=5.1"
request_get $_uri
}
_get_agent_pool_queue() {
# https://docs.microsoft.com/rest/api/azure/devops/distributedtask/queues/get%20agent%20queues?view=azure-devops-rest-5.1
_uri="https://dev.azure.com/${AZDO_ORG_NAME}/${AZDO_PROJECT_NAME}/_apis/distributedtask/queues?api-version=5.1-preview.1"
_response=$(request_get $_uri)
_is_ubuntu=$(echo $_response | jq '.value[] | select( .name | contains("Ubuntu") )')
if [ -z "${_is_ubuntu}" ]; then
_default_pool=$(echo $_response | jq '.value[] | select( .name | contains("Default") )')
agent_pool_queue_id=$(echo $_default_pool | jq -r '.id')
agent_pool_queue_name=$(echo $_default_pool | jq -r '.name')
else
agent_pool_queue_id=$(echo $_is_ubuntu | jq -r '.id')
agent_pool_queue_name=$(echo $_is_ubuntu | jq -r '.name')
fi
echo "{\"agent_pool_queue_id\":\"$agent_pool_queue_id\",\"agent_pool_queue_name\":\"$agent_pool_queue_name\"}"
}
_get_agent_pool_queue() {
# https://docs.microsoft.com/rest/api/azure/devops/distributedtask/queues/get%20agent%20queues?view=azure-devops-rest-5.1
local _uri="${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/distributedtask/queues?api-version=5.1-preview.1"
_response=$(request_get $_uri)
_is_ubuntu=$(echo $_response | jq '.value[] | select( .name | contains("Ubuntu") )')
if [ -z "${_is_ubuntu}" ]; then
_default_pool=$(echo $_response | jq '.value[] | select( .name | contains("Default") )')
agent_pool_queue_id=$(echo $_default_pool | jq -r '.id')
agent_pool_queue_name=$(echo $_default_pool | jq -r '.name')
else
agent_pool_queue_id=$(echo $_is_ubuntu | jq -r '.id')
agent_pool_queue_name=$(echo $_is_ubuntu | jq -r '.name')
fi
echo "{\"agent_pool_queue_id\":\"$agent_pool_queue_id\",\"agent_pool_queue_name\":\"$agent_pool_queue_name\"}"
}
_create_pipeline() {
# AzDo Service : Definitions - Create https://docs.microsoft.com/rest/api/azure/devops/build/definitions/create?view=azure-devops-rest-5.1
# AzDo Server 2019 : Definitions - Create https://docs.microsoft.com/rest/api/azure/devops/build/definitions/create?view=azure-devops-server-rest-5.0
# POST https://dev.azure.com/{organization}/{project}/_apis/build/definitions?api-version=5.1
# usage: _create_pipeline storageinit "/azure-pipelines/pipeline.storageinit.yml"
_information "Creating pipelines..."
local _template_file="payloads/template.pipeline-create.json"
local _name="${1}"
local _yaml_path=${2}
local _folder_path=${3}
local _variables=${4}
local _pipelineRepoName=${5}
local _agent_queue=$(_get_agent_pool_queue)
local _agent_pool_queue_id=$(echo $_agent_queue | jq -c -r '.agent_pool_queue_id')
local _agent_pool_queue_name=$(echo $_agent_queue | jq -c -r '.agent_pool_queue_name')
#Ensure the agent pool is setup correctly.
local _branch_name="master"
local _uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/build/definitions?api-version=" '5.1' '5.1')
local _payload=$( cat "${_template_file}" \
| sed 's~__ADO_PIPELINE_NAME__~'"${_name}"'~' \
| sed 's~__ADO_PIPELINE_FOLDER_PATH__~'"${_folder_path}"'~' \
| sed 's~__ADO_PIPELINE_REPO_BRANCH__~'"${_branch_name}"'~' \
| sed 's~__ADO_PIPELINE_REPO_NAME__~'"${_pipelineRepoName}"'~' \
| sed 's~__ADO_PIPELINE_YAML_FILE_PATH__~'"${_yaml_path}"'~' \
| sed 's~__ADO_PIPELINE_VARIABLES__~'"${_variables}"'~' \
| sed 's~__ADO_POOL_ID__~'"${_agent_pool_queue_id}"'~' \
| sed 's~__ADO_POOL_NAME__~'"${_agent_pool_queue_name}"'~' \
| sed 's~__AZDO_ORG_URI__~'"${AZDO_ORG_URI}"'~' \
)
local _response=$(request_post "${_uri}" "${_payload}")
echo $_payload > ./temp/${_name}-cp-payload.json
echo $_response > ./temp/${_name}-cp.json
_debug_log_post "$_uri" "$_response" "$_payload"
local _createPipelineTypeKey=$(cat ./temp/${_name}-cp.json | jq -r '.typeKey')
if [ "$_createPipelineTypeKey" == "DefinitionExistsException" ]; then
_error "Pipeline ${_name} already exists."
fi
local _pipeId=$(cat ./temp/${_name}-cp.json | jq -r '.id')
# Authorize Pipeline
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/pipelines/pipelinePermissions/queue/${_agent_pool_queue_id}?api-version=" '5.1-preview.1' '5.1-preview.1')
_debug "${_uri}"
_payload=$( cat "payloads/template.pipeline-authorize.json" \
| sed 's~__PIPELINE_ID__~'"${_pipeId}"'~' \
)
_response=$(request_patch "${_uri}" "${_payload}")
echo $_payload > ./temp/${_name}-cp-authorize-payload.json
echo $_response > ./temp/${_name}-cp-authorize.json
if [ "$_pipeId" != null ]; then
if [ "${_name}" == "env.compile" ]; then
envCompilePipelineId=$_pipeId
fi
_success "Created Pipeline ${_name} - id:${_pipeId}"
fi
# Authorize Terraform-Code Repo Access for Pipeline
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/pipelines/pipelinePermissions/repository/${AZDO_PROJECT_ID}.${CODE_REPO_ID}?api-version=" '5.1-preview.1' '5.1-preview.1')
_debug "${_uri}"
_payload=$( cat "payloads/template.pipeline-authorize.json" \
| sed 's~__PIPELINE_ID__~'"${_pipeId}"'~' \
)
_response=$(request_patch "${_uri}" "${_payload}")
echo $_payload > ./temp/${_name}-cp-authorize-code-repo-payload.json
echo $_response > ./temp/${_name}-cp-authorize-code-repo.json
# Authorize Terraform-Pipeline Repo Access for Pipeline
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/pipelines/pipelinePermissions/repository/${AZDO_PROJECT_ID}.${PIPELINE_REPO_ID}?api-version=" '5.1-preview.1' '5.1-preview.1')
_debug "${_uri}"
_payload=$( cat "payloads/template.pipeline-authorize.json" \
| sed 's~__PIPELINE_ID__~'"${_pipeId}"'~' \
)
_response=$(request_patch "${_uri}" "${_payload}")
echo $_payload > ./temp/${_name}-cp-authorize-pipeline-repo-payload.json
echo $_response > ./temp/${_name}-cp-authorize-pipeline-repo.json
if [ "$_pipeId" != null ]; then
if [ "${_name}" == "env.compile" ]; then
envCompilePipelineId=$_pipeId
fi
if [ "${_name}" == "pr" ]; then
prPipelineId=$_pipeId
fi
_success "Created Pipeline ${_name} - id:${_pipeId}"
fi
}
_get_pipeline_var_defintion() {
local _var_key=${1}
local _var_value=${2}
local _allowOverride=${3}
local _template_file="payloads/template.pipeline-variable.json"
local _payload=$(
cat "${_template_file}" |
sed 's~__PIPELINE_VAR_NAME__~'"${_var_key}"'~' |
sed 's~__PIPELINE_VAR_VALUE__~'"${_var_value}"'~' |
sed 's~__PIPELINE_VAR_IS_SECRET__~'false'~' |
sed 's~__PIPELINE_ALLOW_OVERRIDE__~'"${_allowOverride}"'~'
)
echo $_payload
}
get_tf_azure_clound_env() {
local tf_cloud_env=''
case $AZURE_CLOUD_ENVIRONMENT in
AzureCloud)
tf_cloud_env='public'
;;
AzureChinaCloud)
tf_cloud_env='china'
;;
AzureUSGovernment)
tf_cloud_env='usgovernment'
;;
AzureGermanCloud)
tf_cloud_env='german'
;;
esac
echo $tf_cloud_env
}
create_pipelines() {
echo "Creating Azure Pipelines "
local pipelineVariables
# Create PR pipelines
if [ ${INSTALL_TYPE} == 'PAAS' ]; then
pipelineVariables=$(_get_pipeline_var_defintion environment pr false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion autoDestroy true true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "" true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion FULL_DEPLOYMENT false false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion gitDiffBaseBranch master true)"
_create_pipeline pr "/azure-pipelines/pipeline.pr.yml" "infrastructure/utility" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment pr false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion resetTFStateContainer false true)"
_create_pipeline pr.storageinit "/azure-pipelines/pipeline.storageinit.yml" "infrastructure/utility" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment pr false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
_create_pipeline pr.backupremotestate "/azure-pipelines/pipeline.backupremotestate.yml" "infrastructure/utility" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment pr false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "" true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion FULL_DEPLOYMENT false true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion gitDiffBaseBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion gitDiffCompareBranch \$\(Build.SourceBranch\) true)"
_create_pipeline pr.infrastructure "/azure-pipelines/pipeline.infrastructure.yml" "infrastructure" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
fi
# Create Dev pipelines
pipelineVariables=$(_get_pipeline_var_defintion environment dev false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
_create_pipeline dev.storageinit "/azure-pipelines/pipeline.storageinit.yml" "infrastructure/utility" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment dev false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
_create_pipeline dev.backupremotestate "/azure-pipelines/pipeline.backupremotestate.yml" "infrastructure/utility" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment dev false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "" true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion FULL_DEPLOYMENT false true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion gitDiffBaseBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion gitDiffCompareBranch \$\(Build.SourceBranch\) true)"
_create_pipeline dev.infrastructure "/azure-pipelines/pipeline.infrastructure.yml" "infrastructure" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
# Create Prod pipelines
pipelineVariables=$(_get_pipeline_var_defintion environment prod false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
_create_pipeline prod.storageinit "/azure-pipelines/pipeline.storageinit.yml" "infrastructure/utility" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment prod false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
_create_pipeline prod.backupremotestate "/azure-pipelines/pipeline.backupremotestate.yml" "infrastructure/utility" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment prod false)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion FULL_DEPLOYMENT false true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion gitDiffBaseBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion gitDiffCompareBranch \$\(Build.SourceBranch\) true)"
_create_pipeline prod.infrastructure "/azure-pipelines/pipeline.infrastructure.yml" "infrastructure" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment dev true)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
_create_pipeline env.compile "/azure-pipelines/pipeline.compile.env.yml" "infrastructure/shared" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment dev true)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "XX_layer/XX_deployment" true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "XX_layer/XX_deployment" true)"
_create_pipeline tfapply "/azure-pipelines/pipeline.tfapply.yml" "infrastructure/shared" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment dev true)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "XX_layer/XX_deployment" true)"
_create_pipeline tfplan "/azure-pipelines/pipeline.tfplan.yml" "infrastructure/shared" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment dev true)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "XX_layer/XX_deployment" true)"
_create_pipeline tfdestroy "/azure-pipelines/pipeline.tfdestroy.yml" "infrastructure/shared" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment dev true)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "" true)"
_create_pipeline tfdestroy.full "/azure-pipelines/pipeline.tfdestroy.full.yml" "infrastructure/shared" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
pipelineVariables=$(_get_pipeline_var_defintion environment dev true)
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion INSTALL_TYPE ${INSTALL_TYPE} false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion tfCodeBranch master true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion NODE_TLS_REJECT_UNAUTHORIZED 0 false)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion azureSubscription ${AZDO_SC_AZURERM_NAME} true)"
pipelineVariables="$pipelineVariables, $(_get_pipeline_var_defintion DEPLOYMENT_DIR "" true)"
_create_pipeline generate.tfdestroy.full "/azure-pipelines/pipeline.generate.tfdestroy.full.yml" "infrastructure/utility" "${pipelineVariables}" "${PIPELINE_REPO_NAME}"
}
list_users() {
_uri=$(_set_api_version "https://vssps.dev.azure.com/${AZDO_ORG_NAME}/_apis/graph/users?api-version=" '5.0-preview.1' '5.0-preview.1')
request_get $_uri
}
grant_perms_build_svc_account() {
if [ ${INSTALL_TYPE} == 'SERVER' ]; then
_information "Granting the build service account 'ALLOW QUEUE' permissions is not supported on Azure DevOps Server. Skipping..."
return 0
fi
_information "Granting Build Service Account - Allow Queue permissions"
# AzDo Service : Users - Get https://docs.microsoft.com/rest/api/azure/devops/graph/users/get?view=azure-devops-rest-5.0
# AzDo Server 2019 : Users - Get - ** Not available for Azure DevOps Server 2019 **
# GET https://vssps.dev.azure.com/<org>/_apis/graph/users/?api-version=5.0-preview.1
_uri=$(_set_api_version "https://vssps.dev.azure.com/${AZDO_ORG_NAME}/_apis/graph/users?api-version=" '5.0-preview.1' '5.0-preview.1')
_response=$(request_get $_uri)
_debug_log_get "$_uri" "$_response"
echo $_response > ./temp/getuser.json
_principalName=$(cat ./temp/getuser.json | jq -c -r '.value[] | select( .displayName == "'"${AZDO_PROJECT_NAME}"' Build Service ('"${AZDO_ORG_NAME}"')" ) | .principalName')
# echo $_principalName
# AzDo Service : Groups - Get https://docs.microsoft.com/rest/api/azure/devops/graph/groups/get?view=azure-devops-rest-5.1
# AzDo Server 2019 : Groups - Get ** Not available for Azure DevOps Server 2019 **
# GET https://vssps.dev.azure.com/{organization}/_apis/graph/groups/{groupDescriptor}?api-version=5.1-preview.1
_uri=$(_set_api_version "https://vssps.dev.azure.com/${AZDO_ORG_NAME}/_apis/graph/groups?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$(request_get $_uri)
_debug_log_get "$_uri" "$_response"
echo $_response > ./temp/getgroups.json
if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX
_groupDomainId=$(cat ./temp/getgroups.json | jq -c -r '.value[] | select( .displayName == "Enterprise Service Accounts" ) | .domain' | ggrep -oP '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}')
else
# All Others
# https://vssps.dev.azure.com/<org>/_apis/graph/groups?api-version=5.1-preview.1
_groupDomainId=$(cat ./temp/getgroups.json | jq -c -r '.value[] | select( .displayName == "Enterprise Service Accounts" ) | .domain' | grep -oP '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}')
fi
# [0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12} https://docs.microsoft.com/windows/win32/wes/eventschema-guidtype-simpletype
# echo $_groupDomainId
# AzDo Service : Security Namespaces - Query https://docs.microsoft.com/rest/api/azure/devops/security/security%20namespaces/query?view=azure-devops-rest-5.1
# AzDo Server 2019 : Security Namespaces - Query https://docs.microsoft.com/rest/api/azure/devops/security/security%20namespaces/query?view=azure-devops-server-rest-5.0
# GET https://dev.azure.com/<org>/_apis/securitynamespaces?api-version=5.1
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/securitynamespaces?api-version=" '5.1-preview.1' '5.1')
_response=$(request_get $_uri)
_debug_log_get "$_uri" "$_response"
echo $_response > ./temp/getsecnamespaces.json
_namespaceId=$(cat ./temp/getsecnamespaces.json | jq -c -r '.value[] | select( .name == "Build" ) | .namespaceId')
_debug "Namespace id: ${_namespaceId}"
# AzDo Service : Access Control Entries - Set Access Control Entries - https://docs.microsoft.com/rest/api/azure/devops/security/access%20control%20entries/set%20access%20control%20entries?view=azure-devops-server-rest-5.0
# AzDo Server 2019 : Access Control Entries - Set Access Control Entries - https://docs.microsoft.com/rest/api/azure/devops/security/access%20control%20entries/set%20access%20control%20entries?view=azure-devops-server-rest-5.0
# POST https://dev.azure.com/<org>/_apis/AccessControlEntries/33344d9c-fc72-4d6f-aba5-fa317101a7e9?api-version=5.1
_payload=$(cat "payloads/template.perm.json" | sed 's~__PRINCIPAL_NAME__~'"${_principalName}"'~' | sed 's~__GROUP_DOMAIN_ID__~'"${_groupDomainId}"'~')
echo $_payload > ./temp/perm.json
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/AccessControlEntries/${_namespaceId}?api-version=" '5.1' '5.1')
request_post $_uri "$_payload"
echo $_response > ./temp/setace.json
_success "\nPermissions granted"
}
grant_perms_build_svc_account_library() {
_information "Granting Build Service Account for Lib - Allow Queue permissions"
_information "AZDO_PROJECT_ID = ${AZDO_PROJECT_ID}"
# AzDo Service : Users - Get https://docs.microsoft.com/rest/api/azure/devops/graph/users/get?view=azure-devops-rest-5.0
# AzDo Server 2019 : Users - Get - ** Not available for Azure DevOps Server 2019 **
# GET https://vssps.dev.azure.com/<org>/_apis/graph/users/?api-version=5.0-preview.1
local _principalName=''''
local _originId=''
if [ ${INSTALL_TYPE} == 'SERVER' ]; then
sleep 10
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/securityroles/scopes/distributedtask.library/roleassignments/resources/${AZDO_PROJECT_ID}%240?api-version" '5.0-preview.1' '5.0-preview.1')
_debug "roleassignments uri: $_uri"
_response=$(request_get $_uri)
echo $_response > ./temp/getuser.json
_principalName=${AZDO_PROJECT_ID}
_originId=$(cat ./temp/getuser.json | jq -c -r '.value[].identity | select( .displayName == "'"${AZDO_PROJECT_NAME} Build Service (ProjectCollection)"'" ) | .id')
_debug "_principalName: $_principalName"
_debug "_originId: $_originId"
else
_uri=$(_set_api_version "https://vssps.dev.azure.com/${AZDO_ORG_NAME}/_apis/graph/users?api-version=" '5.0-preview.1' '5.0-preview.1')
_response=$(request_get $_uri)
_debug_log_get "$_uri" "$_response"
echo $_response > ./temp/getuser.json
_principalName=$(cat ./temp/getuser.json | jq -c -r '.value[] | select( .displayName == "'"${AZDO_PROJECT_NAME}"' Build Service ('"${AZDO_ORG_NAME}"')" ) | .principalName')
_originId=$(cat ./temp/getuser.json | jq -c -r '.value[] | select( .displayName == "'"${AZDO_PROJECT_NAME}"' Build Service ('"${AZDO_ORG_NAME}"')" ) | .originId')
fi
_debug "_principalName: $_principalName"
_debug "_originId: $_originId"
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/securityroles/scopes/distributedtask.library/roleassignments/resources/${_principalName}%240?api-version=" '5.1-preview.1' '5.1-preview.1')
_debug $_uri
_payload=$(cat "payloads/template.build-service-library-permissions.json" | sed 's~__ORIGIN_ID__~'"${_originId}"'~')
_debug $_payload
_response=$(request_put $_uri $_payload)
echo $_response > ./temp/build-service-permission-response.json
_success "\nPermissions granted"
}
updateAgentPools(){
if [ "$INSTALL_TYPE" == "PAAS" ]; then
_information "Running on Azure DevOps Service. Skipping Agent Pool Update of yaml."
else
_information "Running on Azure DevOps Server! Updating Agent Pool references to use Default agent pool."
_information "Cloning Project Repo: "$PIPELINE_REPO_GIT_HTTP_URL
TEMP_DIR=~/git_repos/${AZDO_PROJECT_NAME}/${PIPELINE_REPO_NAME}
clone_repo $TEMP_DIR $PIPELINE_REPO_GIT_HTTP_URL
pushd $TEMP_DIR
yamlFiles=$(grep -lrnw "$TEMP_DIR/azure-pipelines" -e 'vmImage: "ubuntu-latest"' || true)
if [ ! -z "${yamlFiles}" ]; then
for file in $yamlFiles
do
echo "Updating pipeline yaml for: $file"
sed -i 's/pool://g' $file
sed -i 's/ vmImage: "ubuntu-latest"/pool: "Default"/g' $file
done
#commit updated pipelines to the repo
git add azure-pipelines/*
git config user.email "installer@terraform-template.com"
git config user.name "Terraform template"
git commit -m "Adding updated Pipelines"
git_push
else
_information "No yaml files found with hosted ubuntu agents. Skipping update of agent pool in yaml."
fi
popd
#delete local copy of repo
rm -rf $TEMP_DIR
fi
}
_get_project_id() {
_uri="${AZDO_ORG_URI}/_apis/projects/${AZDO_PROJECT_NAME}?api-version=5.1-preview.2"
_response=$(request_get "${_uri}")
_projectId=$(echo $_response | jq -c -r '.id')
echo $_projectId
}
_get_default_team() {
_uri="${AZDO_ORG_URI}/_apis/projects/${AZDO_PROJECT_NAME}/teams?api-version=5.1-preview.2"
_response=$(request_get "${_uri}")
_defaultTeamId=$(echo $_response | jq -c -r '.value[] | select(.name == "'"${AZDO_PROJECT_NAME}"' Team") | .id')
echo $_defaultTeamId
}
_add_user() {
_projectId=$1
_defaultTeamId=$2
_upn=$3
_payload=$(cat "payloads/template.add-user.json" | sed 's~__PROJECT_ID__~'"${_projectId}"'~' | sed 's~__TEAM_ID__~'"${_defaultTeamId}"'~' | sed 's~__UPN__~'"${_upn}"'~')
_uri="https://vsaex.dev.azure.com/${AZDO_ORG_NAME}/_apis/UserEntitlements?doNotSendInviteForNewUsers=true&api-version=5.1-preview.3"
_response=$(request_post "${_uri}" "${_payload}")
_debug_log_post "$_uri" "$_response" "$_payload"
}
try_add_users() {
if [ ${INSTALL_TYPE} == 'SERVER' ]; then
_information "Add users is not supported on Azure DevOps Server. Skipping..."
return 0
fi
_projectId=$(_get_project_id)
_defaultTeamId=$(_get_default_team)
INPUT=users.csv
if [ -f "$INPUT" ]; then
OLDIFS=$IFS
IFS=$'\r'
while read upn; do
_add_user $_projectId $_defaultTeamId $upn
done <$INPUT
IFS=$OLDIFS
fi
}
credScanRemovalForAzDoServer(){
if [ "$INSTALL_TYPE" != "PAAS" ]; then
_information "Running on Azure DevOps Server! Removing CredScan Task"
_information "Cloning Project Repo: "$PIPELINE_REPO_GIT_HTTP_URL
TEMP_DIR=~/git_repos/${AZDO_PROJECT_NAME}/${PIPELINE_REPO_NAME}
clone_repo $TEMP_DIR $PIPELINE_REPO_GIT_HTTP_URL
templatePath="azure-pipelines/templates/template.stage.infrastructure.yml"
pushd $TEMP_DIR
#Remove Cred Scan Section (multi-line delete)
sed -i '/# Check for Credentials #/,+12 d' $templatePath
#commit updated pipeline template to the repo
git add $templatePath
git config user.email "installer@terraform-template.com"
git config user.name "Terraform template"
git commit -m "Removing CredScan Task"
git_push
popd
#delete local copy of repo
rm -rf $TEMP_DIR
fi
}
cleanUpPipelineArtifactForAzdoServer(){
if [ "$INSTALL_TYPE" != "PAAS" ]; then
_information "Running on Azure DevOps Server! Removing Publish Pipeline Artifacts"
_information "Cloning Project Repo: "$PIPELINE_REPO_GIT_HTTP_URL
TEMP_DIR=~/git_repos/${AZDO_PROJECT_NAME}/${PIPELINE_REPO_NAME}
clone_repo $TEMP_DIR $PIPELINE_REPO_GIT_HTTP_URL
pushd $TEMP_DIR
git config user.email "installer@terraform-template.com"
git config user.name "Terraform template"
#Remove Publish Pipeline Artifact from Tf Plan pipeline
_information "Removing PublishPipelineArtifact from pipeline.tfplan.yml"
file="azure-pipelines/pipeline.tfplan.yml"
sed -i '/task: PublishPipelineArtifact@1/,+3 d' $file
git add $file
git commit -m "Removing Publish Artifact from tfPlan pipeline" --allow-empty
#Remove Publish Pipeline Artifact from Infrastructure Stage
_information "Converting PublishPipelineArtifact to PublishBuildArtifacts in template.stage.infrastructure.yml"
file='azure-pipelines/templates/template.stage.infrastructure.yml'
sed -i 's/task: PublishPipelineArtifact@1/task: PublishBuildArtifacts@1/g' $file
sed -i "s/targetPath: '\$(Build.ArtifactStagingDirectory)'/PathtoPublish: '\$(Build.ArtifactStagingDirectory)'/g" $file
sed -i "s/artifact: 'script'/ArtifactName: 'script'/g" $file
sed -i "s/publishLocation: 'pipeline'/publishLocation: 'Container'/g" $file
git add $file
git commit -m "Converting PublishPipelineArtifact to PublishBuildArtifacts in template.stage.infrastructure.yml"
git_push
popd
#delete local copy of repo
rm -rf $TEMP_DIR
fi
}
run_env_compile_pipeline(){
# https://${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/pipelines/${PIPELINE_ID}/runs
_uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/build/builds?api-version=" '5.1' '5.1')
_payload=$(cat "payloads/template.env-compile-run.json" | sed 's~__PIPELINE_ID__~'"${envCompilePipelineId}"'~')
_debug $_uri
_debug $_payload
_response=$(request_post "$_uri" "$_payload")
echo $_response > 'temp/run-env-compile-pipeline-response.json'
_success "Started Env Compile Pipeline"
}
configure_private_cloud() {
if [ "${PRIVATE_CLOUD}" == true ];then
_information "Private Cloud: Setting up configs for private cloud support."
configPath=~/.lucidity_config/
echo "${configPath}"
if [ ! -d "$configPath" ]; then
_debug "Creating ${configPath}"
mkdir "${configPath}"
fi
AZURE_ENVIRONMENT_FILEPATH="${configPath}private.cloud.json"
cp "payloads/private.cloud.json" "${AZURE_ENVIRONMENT_FILEPATH}"
TEMP_DIR=~/git_repos/${AZDO_PROJECT_NAME}/${PIPELINE_REPO_NAME}
clone_repo $TEMP_DIR $PIPELINE_REPO_GIT_HTTP_URL
cp ./payloads/template.pipeline.tfplan-private.yml $TEMP_DIR/azure-pipelines/pipeline.tfplan.yml
cp ./payloads/template.pipeline.tfapply-private.yml $TEMP_DIR/azure-pipelines/pipeline.tfapply.yml
pushd $TEMP_DIR
git config user.email "installer@terraform-template.com"
git config user.name "Terraform template"
git add azure-pipelines/pipeline.tfapply.yml
git commit -m "Adding pipeline.tfapply.yml for private" --allow-empty
git add azure-pipelines/pipeline.tfplan.yml
git commit -m "Adding pipeline.tfplan.yml for private" --allow-empty
git_push
popd
#delete local copy of repo
rm -rf $TEMP_DIR
fi
}
configure_checkout_template(){
_information "Configuring Checkout Template Project Repo: "$PIPELINE_REPO_GIT_HTTP_URL
TEMP_DIR=~/git_repos/${AZDO_PROJECT_NAME}/${PIPELINE_REPO_NAME}
_debug "TEMP_DIR: ${TEMP_DIR}"
clone_repo $TEMP_DIR $PIPELINE_REPO_GIT_HTTP_URL
file="template.step.checkout.terraform-code.yml"
destinationFile="$TEMP_DIR/azure-pipelines/templates/$file"
cp "./payloads/$file" "$TEMP_DIR/azure-pipelines/templates"
pushd $TEMP_DIR
git config user.email "installer@terraform-template.com"
git config user.name "Terraform template"
#Remove Publish Pipeline Artifact from Tf Plan pipeline
_information "Removing PublishPipelineArtifact from pipeline.tfplan.yml"
if [[ "$OSTYPE" == "darwin"* ]]; then
# macOS sed requires extra '' after -i for in place updates.
sed -i '' "s/__AZDO_PROJECT_NAME__/${AZDO_PROJECT_NAME}/g" $destinationFile
else
sed -i "s/__AZDO_PROJECT_NAME__/${AZDO_PROJECT_NAME}/g" $destinationFile
fi
git add -A
git commit -m "Adding checkout template with project name" --allow-empty
git_push
popd
#delete local copy of repo
rm -rf $TEMP_DIR
}
_get_build_policy_id() {
# https://docs.microsoft.com/rest/api/azure/devops/policy/configurations/list?view=azure-devops-rest-5.1
# GET https://dev.azure.com/{organization}/{project}/_apis/policy/configurations?api-version=5.1
local _response=$(request_get "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/policy/types?api-version=5.1")
echo $_response > "temp/policy.types.response.json"
local _buildPolicy=$(cat "temp/policy.types.response.json" | jq -r '.value[] | select(.displayName == "Build") | .id' )
echo $_buildPolicy
}
configure_pr_build_policy() {
if [ ${INSTALL_TYPE} == 'SERVER' ]; then
_information "skipping configure pr build policy. Install targets AzDo Server 2020"
return 0
fi
_information "Creating PR Pipeline Build Policy on Terraform-Code Repo"
# https://docs.microsoft.com/rest/api/azure/devops/policy/configurations/create?view=azure-devops-rest-5.1#build-policy
# POST https://dev.azure.com/{organization}/{project}/_apis/policy/configurations/{configurationId}?api-version=5.1
local _buildPolicyId=$(_get_build_policy_id)
local _uri=$(_set_api_version "${AZDO_ORG_URI}/${AZDO_PROJECT_NAME}/_apis/policy/configurations/?api-version=" '5.0' '5.0')
local _payload=$(cat "payloads/template.build-policy.json" | sed 's~__POLICY_ID__~'"${_buildPolicyId}"'~' | sed 's~__REPOSITORY_ID__~'"${CODE_REPO_ID}"'~' | sed 's~__PIPELINE_ID__~'"${prPipelineId}"'~')
echo $_payload > temp/build-policy.payload.json
local _response=$(request_post "$_uri" "$_payload")
echo $_response > 'temp/build-policy-response.json'
local _importTypeKey=$(echo $_response | jq -r '.typeKey')
}
configure_destroy_full_pipeline(){
_information "Configuring Checkout Template Project Repo: "$PIPELINE_REPO_GIT_HTTP_URL
TEMP_DIR=~/git_repos/${AZDO_PROJECT_NAME}/${PIPELINE_REPO_NAME}
_debug "TEMP_DIR: ${TEMP_DIR}"
clone_repo $TEMP_DIR $PIPELINE_REPO_GIT_HTTP_URL
file="template.pipeline.tfdestroy.full.yml"
destinationFile="$TEMP_DIR/azure-pipelines/pipeline.tfdestroy.full.yml"
sourcePath="../../../Terraform-Code/terraform "
cp "./payloads/$file" "$TEMP_DIR/azure-pipelines/pipeline.tfdestroy.full.yml"
../../azure-pipelines/scripts/generatematrix.sh $destinationFile $sourcePath
pushd $TEMP_DIR
git config user.email "installer@terraform-template.com"
git config user.name "Terraform template"
git add -A
git commit -m "Adding Full Destroy Pipeline yaml" --allow-empty
git_push
popd
#delete local copy of repo
rm -rf $TEMP_DIR
}
grant_perms_build_svc_account_terraform_code_repo() {
_information "Granting Build Service Account - Contributor Access to Terraform-Code Repo"
if [ ${INSTALL_TYPE} == 'SERVER' ]; then
_information "Granting the Granting Build Service Account - Contributor Access to Terraform-Code Repo is not supported on Azure DevOps Server. Skipping..."
return 0
fi
# AzDo Service : Groups - Get https://docs.microsoft.com/rest/api/azure/devops/graph/groups/get?view=azure-devops-rest-5.1
# AzDo Server 2019 : Groups - Get ** Not available for Azure DevOps Server 2019 **
# GET https://vssps.dev.azure.com/{organization}/_apis/graph/groups/{groupDescriptor}?api-version=5.1-preview.1
_uri=$(_set_api_version "https://vssps.dev.azure.com/${AZDO_ORG_NAME}/_apis/graph/groups?api-version=" '5.1-preview.1' '5.1-preview.1')
_response=$(request_get $_uri)
_debug_log_get "$_uri" "$_response"
echo $_response > ./temp/getgroups-tf-code.json
if [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OSX
_groupDomainId=$(cat ./temp/getgroups-tf-code.json | jq -c -r '.value[] | select( .displayName == "Enterprise Service Accounts" ) | .domain' | ggrep -oP '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}')
else
# All Others
# https://vssps.dev.azure.com/<org>/_apis/graph/groups?api-version=5.1-preview.1
_groupDomainId=$(cat ./temp/getgroups-tf-code.json | jq -c -r '.value[] | select( .displayName == "Enterprise Service Accounts" ) | .domain' | grep -oP '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}')
fi
# AzDo Service : Security Namespaces - Query https://docs.microsoft.com/rest/api/azure/devops/security/security%20namespaces/query?view=azure-devops-rest-5.1
# AzDo Server 2019 : Security Namespaces - Query https://docs.microsoft.com/rest/api/azure/devops/security/security%20namespaces/query?view=azure-devops-server-rest-5.0
# GET https://dev.azure.com/<org>/_apis/securitynamespaces?api-version=5.1
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/securitynamespaces?api-version=" '5.1-preview.1' '5.0')
_response=$(request_get $_uri)
_debug_log_get "$_uri" "$_response"
echo $_response > ./temp/getsecnamespaces-tf-code.json
_namespaceId=$(cat ./temp/getsecnamespaces-tf-code.json | jq -c -r '.value[] | select( .name == "Git Repositories" ) | .namespaceId')
_debug "Git Repositories Namespace id: ${_namespaceId}"
# AzDo Service : Access Control Entries - Set Access Control Entries - https://docs.microsoft.com/rest/api/azure/devops/security/access%20control%20entries/set%20access%20control%20entries?view=azure-devops-server-rest-5.0
# AzDo Server 2019 : Access Control Entries - Set Access Control Entries - https://docs.microsoft.com/rest/api/azure/devops/security/access%20control%20entries/set%20access%20control%20entries?view=azure-devops-server-rest-5.0
# POST https://dev.azure.com/<org>/_apis/AccessControlEntries/33344d9c-fc72-4d6f-aba5-fa317101a7e9?api-version=5.1
_payload=$(cat "payloads/template.perm.tfcoderepo.json" | sed 's~__PROJECT_ID__~'"${AZDO_PROJECT_ID}"'~' | sed 's~__GROUP_DOMAIN_ID__~'"${_groupDomainId}"'~' | sed 's~__REPOSITORY_ID__~'"${PIPELINE_REPO_ID}"'~')
echo $_payload > ./temp/perm-payload-tfcode.json
_uri=$(_set_api_version "${AZDO_ORG_URI}/_apis/AccessControlEntries/${_namespaceId}?api-version=" '5.0' '5.0')
_debug "url: $_uri"
_response=$(request_post $_uri "$_payload")
_debug_log_post "$_uri" "$_response" "$_payload"
echo $_response > ./temp/setace-tfcoderepo.json
_success "\nPermissions granted"
}
#MAIN
mkdir -p ./temp
check_input
parse_sp "${SP_RAW}"
set_login_pat
create_project
if [ "${OFFLINE_INSTALL}" == true ]; then
if [ ${INSTALL_TYPE} == 'SERVER' ]; then
if [ "${SOURCE_LOCAL_PATH}" == "" ]; then
SOURCE_LOCAL_PATH=~/tfsource
fi
offline_install_template_repo "${PIPELINE_REPO_NAME}" "${PIPELINE_REPO_GIT_HTTP_URL}" "${SOURCE_LOCAL_PATH}/Terraform-Pipelines"
offline_install_template_repo "${CODE_REPO_NAME}" "${CODE_REPO_GIT_HTTP_URL}" "${SOURCE_LOCAL_PATH}/Terraform-Code"
else
offline_install_template_repo "${PIPELINE_REPO_NAME}" "${PIPELINE_REPO_GIT_HTTP_URL}" ../../
offline_install_template_repo "${CODE_REPO_NAME}" "${CODE_REPO_GIT_HTTP_URL}" ../../../Terraform-Code
fi
else
import_multi_template_repo "${PIPELINE_REPO_NAME}" "${TEMPLATE_PIPELINE_REPO}"
import_multi_template_repo "${CODE_REPO_NAME}" "${TEMPLATE_CODE_REPO}"
fi
install_extensions
create_arm_svc_connection
create_azdo_svc_connection
configure_private_cloud
create_variable_groups
create_pr_variable_group
create_and_upload_pr_state_secfile
configure_destroy_full_pipeline
create_pipelines
if [ "${USE_EXISTING_ENVS}" == false ]; then
create_default_env_files
fi
try_add_users
updateAgentPools
credScanRemovalForAzDoServer
cleanUpPipelineArtifactForAzdoServer
grant_perms_build_svc_account
configure_checkout_template
configure_pr_build_policy
if [ ${INSTALL_TYPE} == 'SERVER' ]; then
run_env_compile_pipeline # Run the pipeline once in order to generate the build service account role.
fi
grant_perms_build_svc_account_library
run_env_compile_pipeline
grant_perms_build_svc_account_terraform_code_repo
echo ""
_success "**** Successfully Created ${AZDO_PROJECT_NAME} in ${AZDO_ORG_URI}! ****"
if [ $DEBUG_FLAG == false ]; then
rm -rf ./temp
fi | true |
19c2038bccefed6f074fa229ed8882871d533492 | Shell | ttsft/keychain-creds | /keychain-creds.sh | UTF-8 | 1,505 | 4.0625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
keychain_location="/path_to/api_credentials.keychain"
keynames=("apiuser") #create additional entries in the list for each key name
get_keychain_username() {
# $1 is the path to the keychain
# $2 is the service name (key name)
security find-generic-password -s $2 -g $1 2>&1 | grep "acct" | cut -d \" -f 4
}
get_keychain_password() {
# $1 is the path to the keychain
# $2 is the service name
security find-generic-password -s $2 -g $1 2>&1 | grep "password" | cut -d \" -f 2
}
get_api_creds() {
# $1 is the service name
security unlock-keychain -p $API_KEYCHAIN_PASS $keychain_location
# obtain the username and password from the keychain
# If obtaining multiple keys, duplicate these two entries to get the user and password for each key
apiuser=$( get_keychain_username $keychain_location $1 )
apipwd=$( get_keychain_password $keychain_location $1 )
apicredentials="${apiuser}:${apipwd}"
echo $apicredentials
}
unlock_keychain() {
# unlock the keychain
if [[ ! $API_KEYCHAIN_PASS ]]; then
echo "Please run the following command to set the environment variables to unlock the keychain :"
echo
echo ". ./set_credentials.sh"
echo
exit 1
fi
}
lock_keychain() {
# lock the keychain
security lock-keychain $keychain_location
}
# Main
apicredentials=[]
unlock_keychain
for name in keynames ; do
apicredentials[$name]=$( get_api_creds $name )
done
lock_keychain | true |
fee399e41ff075ec8157e0df5ad74c3160569712 | Shell | yahaa/Shell | /test4.sh | UTF-8 | 399 | 2.53125 | 3 | [] | no_license | #########################################################################
# File Name: test4.sh
# Author: yahaa
# mail: yuanzihua0@gmail.com
# Created Time: 2016年09月22日 星期四 11时04分53秒
#########################################################################
#!/bin/bash
echo "What is your favourite os ?"
select var in "Linux " "GNU HURD" "FREE BAD" "OTHER";do
break;
done
echo "YOU HAVE $var"
| true |
3d8b3c4b2e41636862410cb1b6ffcfd215443c98 | Shell | TinyChief/dotfiles | /install.sh | UTF-8 | 2,035 | 2.921875 | 3 | [] | no_license | echo 'Let`s install goodies'
zsh --version
git --version
dest=$HOME
conf_dir=$dest/.config
git clone https://github.com/robbyrussell/oh-my-zsh $dest/.oh-my-zsh
git clone https://github.com/zsh-users/zsh-autosuggestions $dest/.oh-my-zsh/plugins/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git $dest/.oh-my-zsh/plugins/zsh-syntax-highlighting
cp -r zsh-custom/* $dest/.oh-my-zsh/custom
if test -f $dest/.zshrc; then
echo "$dest/.zshrc exists. Backuping to .zshrc.bak"
cp $dest/.zshrc $dest/.zshrc.bak
fi
cp .zshrc $dest
cp .zshrc-arco $dest
cp -r .bin $dest
### i3-gaps ###
i3_conf=$conf_dir/i3/config
[[ -f $i3_conf ]] && echo "$i3_conf exists. Backuping to config.bak"; cp $i3_conf $i3_conf.bak
cp -r i3 $conf_dir
### polybar ###
polybar_conf=$conf_dir/polybar/config
polybar_launch=$conf_dir/polybar/launch.sh
[[ -f $polybar_conf ]] && echo "$polybar_conf exists. Backuping to config.bak"; cp $polybar_conf $polybar_conf.bak
[[ -f $polybar_launch ]] && echo "$polybar_launch exists. Backuping to launch.sh.bak"; cp $polybar_launch $polybar_launch.bak
cp -r polybar $conf_dir
### rofi ###
rofi_conf=$conf_dir
[[ -d $rofi_conf ]] && echo "$rofi_conf exists. Backuping to rofi.bak"; cp -r $rofi_conf $rofi_conf.bak
cp -r rofi $conf_dir
### vim ###
vimrc=$dest/.vimrc
[[ -f $vimrc ]] && echo "$vimrc exists. Backuping to .vimrc.bak"; cp $vimrc $vimrc.bak
cp .vimrc $dest
nvim_init=$conf_dir/nvim/init.vim
[[ -f $nvim_init ]] && echo "$nvim_init exists. Backuping to $nvim_init.bak"; cp $nvim_init $nvim_init.bak
cp -r nvim $conf_dir
git clone https://github.com/VundleVim/Vundle.vim.git $dest/.vim/bundle/Vundle.vim
### kitty ###
kitty_conf=$conf_dir/kitty/kitty.conf
[[ -f $kitty_conf ]] && echo "$kitty_conf exists. Backuping to $kitty_conf.bak"; cp $kitty_conf $kitty_conf.bak
cp -r kitty $conf_dir
### vifm ###
vifmrc=$conf_dir/vifm/vifmrc;
[[ -f $vifmrc ]] && echo "$vifmrc exists. Backuping to $vifmrc.bak"; cp $vifmrc "$vifmrc.bak"
cp -r vifm $conf_dir
echo 'We are done!'
| true |
8a3043afdd41fd7992871748969e7936d9c977c7 | Shell | kuno/TGCrename | /TGCrename2.0.sh | UTF-8 | 6,773 | 3.84375 | 4 | [] | no_license | #!/bin/bash
#filetype = `ls -1 | head -1 | awk -F '.' '{print $2}'`
if [ $# -lt 1 ]; then
echo "Usage: $0 [y_option] [bulkfile]"
echo "y_option :"
echo " -n Don't trust the renaming scheme and be prompted to rename each file"
echo " -y Trust the renaming a little bit (recommended)"
echo " -yy Trust the renaming fully (not recommended)"
echo "bulkfile (optional) - an optional argument to specify an absolute path to a file containg TGC directory names to run this renaming in bulk"
echo ""
exit
fi
FILETYPE=(.mp4 .wmv .mkv .m4v .avi .flv .AVI .mov .m4a)
ytoall=$1
BULKFILE=$2
LFILES=()
TITLE=$CNAME" (TGC"$CNUM") S01E"
DIR=`pwd`
TGCFILE=$DIR"/AllTGCnameno.csv"
lev=()
answer="y"
i=0
getCourseNameFromDIR() {
COURSENAME=`echo $DIRNAME | sed 's/TTC - //g'`
COURSENAME=`echo $COURSENAME | sed 's/TTC//g'`
COURSENAME=`echo $COURSENAME | sed 's/Video //g'`
COURSENAME=`echo $COURSENAME | sed 's/VIDEO //g'`
COURSENAME=`echo $COURSENAME | sed 's/\[//g'`
COURSENAME=`echo $COURSENAME | sed 's/\]//g'`
COURSENAME=`echo $COURSENAME | sed 's/^[ \t]*//'`
echo "Course Name: $COURSENAME"
}
getMultiFiles() {
while [ "$i" -lt "${#FILETYPE[@]}" ]; do
filetmp=`ls -m | grep "${FILETYPE[$i]}"`
lret=$?
shopt -s nullglob
LFILES=(*${FILETYPE[$i]})
if [ "$lret" -eq "0" ]; then
break
fi
let i++
done
}
levenshtein() {
if [ "$#" -ne "2" ]; then
echo "Usage: $0 word1 word2" >&2
elif [ "${#1}" -lt "${#2}" ]; then
levenshtein "$2" "$1"
else
local str1len=$((${#1}))
local str2len=$((${#2}))
local d i j
for i in $(seq 0 $(((str1len+1)*(str2len+1)))); do
d[i]=0
done
for i in $(seq 0 $((str1len))); do
d[$((i+0*str1len))]=$i
done
for j in $(seq 0 $((str2len))); do
d[$((0+j*(str1len+1)))]=$j
done
for j in $(seq 1 $((str2len))); do
for i in $(seq 1 $((str1len))); do
[ "${1:i-1:1}" = "${2:j-1:1}" ] && local cost=0 || local cost=1
local del=$((d[(i-1)+str1len*j]+1))
local ins=$((d[i+str1len*(j-1)]+1))
local alt=$((d[(i-1)+str1len*(j-1)]+cost))
d[i+str1len*j]=$(echo -e "$del\n$ins\n$alt" | sort -n | head -1)
done
done
echo ${d[str1len+str1len*(str2len)]}
fi
}
findCourseTitle() {
COURSEtmp=`$DIR/levenstein.pl "$COURSENAME" "$TGCFILE"`
COURSE=`echo $COURSEtmp | awk -F "[" '{print $1}'`
COURSENO=`echo $COURSEtmp | awk -F "[" '{print $2}'`
echo "---------------------------------------------- "
echo "Course Name: $COURSE"
echo "Course No.: $COURSENO"
if [ "$ytoall" == "-yy" ]; then
TITLE=$COURSE" (TGC"$COURSENO") S01E"
else
read -p "Is this correct? (y/n): " answer
fi
if [ "$answer" == "y" ]; then
TITLE=$COURSE" (TGC"$COURSENO") S01E"
elif [ "$answer" == "n" ]; then
echo "Enter the Course name and Course No. manually"
echo -ne "Course Name: "
read CNAME
echo -ne "Course No.: "
read CNUM
TITLE=$CNAME" (TGC"$CNUM") S01E"
fi
}
renameLFILES() {
while [ "$i" -lt "${#LFILES[@]}" ]; do
temp=`echo ${LFILES[$i]} | grep -i "s01E[0-9][0-9]"`
result=$?
if [ "$result" -eq "0" ]; then
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*S01E//gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "L[0-9]"`
result=$?
if [ "$result" -eq "0" ]; then
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/^L//gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "chapter_"`
result=$?
if [ "$result" -eq "0" ]; then
# mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | sed 's/.*[Ll]ecture-//g'`"
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*chapter_//gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "lecture-"`
result=$?
if [ "$result" -eq "0" ]; then
# mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | sed 's/.*[Ll]ecture-//g'`"
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*lecture-//gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "lecture [0-9][0-9]"`
result=$?
if [ "$result" -eq "0" ]; then
# mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | sed 's/.*[Ll]ecture //g'`"
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*lecture //gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "lecture[0-9][0-9]"`
result=$?
if [ "$result" -eq "0" ]; then
# mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | sed 's/.*[Ll]ecture//g'`"
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*lecture//gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "lect[0-9][0-9]"`
result=$?
if [ "$result" -eq "0" ]; then
# mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | sed 's/.*[Ll][Ee][Cc][Tt]/g'`"
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*?lect/\1/i'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "_[0-9]{3,4}"`
result=$?
if [ "$result" -eq "0" ]; then
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*_[0-9]{3,4}//gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "session-[0-9][0-9]"`
result=$?
if [ "$result" -eq "0" ]; then
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*session-//gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "Lesson "`
result=$?
if [ "$result" -eq "0" ]; then
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*Lesson //gi'`"
fi
temp=`echo ${LFILES[$i]} | grep -i "_[0-9][0-9]\."`
result=$?
if [ "$result" -eq "0" ]; then
mv "${LFILES[$i]}" "`echo ${LFILES[$i]} | perl -p -e 's/.*_//gi'`"
fi
let i++
done
}
finishRenameLFILES() {
while [ "$i" -lt "${#LFILES[@]}" ]; do
if [ "$ytoall" == "-y" ] || [ "$ytoall" == "-yy" ]; then
mv "${LFILES[$i]}" "$TITLE${LFILES[$i]}"
elif [ "$ytoall" == "-n" ]; then
echo -ne "Rename ${LFILES[$i]} to $TITLE${LFILES[$i]} (y/n)"
read rnresp
if [ "$rnresp" == "y" ]; then
mv "${LFILES[$i]}" "$TITLE${LFILES[$i]}"
else
echo -ne "Rename ${LFILES[$i]} to: "
read newname
mv "${LFILES[$i]}" "$newname"
fi
else
echo "I'm unaware of an option: $ytoall"
break
fi
let i++
done
}
if [ ! -z $BULKFILE ]; then
while IFS='' read -u 3 -r path || [[ -n "$path" ]]; do
echo "Changing to directory: $path"
cd "$path"
DIRNAME=${PWD##*/}
getCourseNameFromDIR
findCourseTitle
getMultiFiles
i=0
renameLFILES
i=0
unset LFILES
getMultiFiles
i=0
finishRenameLFILES
done 3< "$BULKFILE"
exit
else
echo -ne "Enter absolute path of the TGC course files: "
read CDIR
echo "changing directory to $CDIR"
cd "$CDIR"
DIRNAME=${PWD##*/}
getCourseNameFromDIR
findCourseTitle
getMultiFiles
i=0
renameLFILES
i=0
unset LFILES
getMultiFiles
i=0
finishRenameLFILES
fi
| true |
6e34df96323899cfa7d780c0e31e0af335e263f7 | Shell | shardulsrivastava/shardulshreya.com | /auto/deploy-website | UTF-8 | 629 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env bash
set -euox pipefail
cd "$(dirname $0)/../website/"
echo "Installing updates"
sudo apt-get update -y && sudo apt-get install awscli -y
export DEFAULT_AWS_REGION=ap-south-1
echo "Region => ${DEFAULT_AWS_REGION}"
export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}"
echo "Access Key Id => ${AWS_ACCESS_KEY_ID}"
export AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}"
echo "Secret Access Key => ${AWS_SECRET_ACCESS_KEY}"
echo "Updating index.html on shardulshreya.com "
aws s3 cp index.html s3://shardulshreya.com
echo "Updating index.html on www.shardulshreya.com"
aws s3 cp index.html s3://www.shardulshreya.com | true |
4b97c13644ad6f2632cd7182b22424103b7fda81 | Shell | SrikanthParsha14/test | /bookshell/chapter25/print_UP_HP-UX.ksh | UTF-8 | 640 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/ksh
#
# SCRIPT: print_UP_HP-UX.ksh
#
# AUTHOR: Randy Michael
# DATE: 03/14/2007
# REV: 1.1.P
#
# PLATFORM: HP-UX Only
#
# PURPOSE: This script is used to enable printing and queuing separately
# on each print queue on an HP-UX system.
#
# REV LIST:
#
# set -x # Uncomment to debug this script
# set -n # Uncomment to check syntax without any execution
lpstat | grep Warning: | while read LINE
do
if (echo $LINE | grep 'is down') > /dev/null
then
enable $(echo $LINE | awk '{print $3}')
fi
if (echo $LINE | grep 'queue is turned off') >/dev/null
then
accept $(echo $LINE | awk '{print $3}')
fi
done
| true |
d0a07929f30f7453c6dd0c53c55cc7a266909c45 | Shell | delkyd/alfheim_linux-PKGBUILDS | /qpdfview-bzr/PKGBUILD | UTF-8 | 1,587 | 2.53125 | 3 | [] | no_license | # Maintainer: Adam Reichold <adam.reichold@t-online.de>
# Contributor: Stefan Husmann <stefan-husmann@t-online.de>
pkgname=qpdfview-bzr
pkgver=2041
pkgrel=1
pkgdesc='A tabbed PDF viewer using the poppler library. (development version)'
arch=('i686' 'x86_64' 'armv7h')
url='https://launchpad.net/qpdfview'
license=('GPL2')
depends=('libcups' 'qt5-svg' 'desktop-file-utils' 'hicolor-icon-theme')
optdepends=('texlive-bin: for shared SyncTeX parser library (required at build time)'
'poppler-qt5: for PDF support (required at build time)'
'libspectre: for PostScript support (required at build time)'
'djvulibre: for DjVu support (required at build time)'
'libmupdf: for PDF support (required at build time)')
makedepends=('bzr' 'qt5-tools')
conflicts=('qpdfview')
source=('qpdfview::bzr+http://bazaar.launchpad.net/~adamreichold/qpdfview/trunk/')
md5sums=('SKIP')
pkgver() {
cd "$srcdir/qpdfview"
bzr revno
}
build() {
cd "$srcdir/qpdfview"
local config="with_lto"
if ! pkg-config --exists poppler-qt5; then
local config="$config without_pdf"
fi
if ! pkg-config --exists libspectre; then
local config="$config without_ps"
fi
if ! pkg-config --exists ddjvuapi; then
local config="$config without_djvu"
fi
if [ -f /usr/lib/libmupdf.a ]; then
local config="$config with_fitz"
fi
lrelease-qt5 qpdfview.pro qpdfview.pro
qmake-qt5 "CONFIG+=$config" qpdfview.pro
make
}
package() {
cd "$srcdir/qpdfview"
make "INSTALL_ROOT=$pkgdir" install
if pkg-config --exists synctex; then
depends=("${depends[@]}" 'texlive-bin')
fi
}
| true |
eb718c81571ee2f7bf047edacae3051907c8a8eb | Shell | lielongxingkong/openstack_logs | /swift/multi-server/ring.sh | UTF-8 | 1,514 | 3.046875 | 3 | [] | no_license | #!/bin/bash
sed -i s/[[:space:]]//g ./Ring.conf
cp ./Ring.conf /etc/swift/
cd /etc/swift
RING_CONF="./Ring.conf"
swift-ring-builder account.builder create 18 3 1
swift-ring-builder container.builder create 18 3 1
swift-ring-builder object.builder create 18 3 1
while read line; do
name=`echo $line|awk -F '=' '{print $1}'`
value=`echo $line|awk -F '=' '{print $2}'`
case $name in
"proxy_ip")
proxy_ip=`echo $value|awk -F '|' '{print $1}'`
proxy_user=`echo $value|awk -F '|' '{print $2}'`
;;
"datanode")
zone=`echo $value|awk -F '|' '{print $1}'`
ip=`echo $value|awk -F '|' '{print $2}'`
swift_path=`echo $value|awk -F '|' '{print $3}'`
weight=`echo $value|awk -F '|' '{print $4}'`
swift-ring-builder account.builder add $zone-$ip:6002/$swift_path $weight
swift-ring-builder container.builder add $zone-$ip:6001/$swift_path $weight
swift-ring-builder object.builder add $zone-$ip:6000/$swift_path $weight
;;
*)
;;
esac
done < $RING_CONF
swift-ring-builder account.builder
swift-ring-builder container.builder
swift-ring-builder object.builder
swift-ring-builder account.builder rebalance
swift-ring-builder container.builder rebalance
swift-ring-builder object.builder rebalance
while read line; do
name=`echo $line|awk -F '=' '{print $1}'`
value=`echo $line|awk -F '=' '{print $2}'`
case $name in
"datanode")
ip=`echo $value|awk -F '|' '{print $2}' `
user=`echo $value|awk -F '|' '{print $5}' `
scp /etc/swift/*.gz $user@$ip:/etc/swift/
;;
*)
;;
esac
done < $RING_CONF
rm /etc/swift/Ring.conf
| true |
bde0a55f7509ff598720b27c5acedb177f1e2662 | Shell | mtezych/cpp | /build_android.sh | UTF-8 | 4,522 | 2.984375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# BSD 3-Clause License
#
# Copyright (c) 2017, mtezych
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################
# DO NOT RUN THIS SCRIPT WITHOUT THINKING !!! #
# #
# This script should be thought as an example of steps #
# required to build binaries for the Android OS. #
########################################################
# Android source code
# https://source.android.com/source/downloading.html
# https://source.android.com/source/using-repo.html
# https://source.android.com/source/build-numbers.html
curl https://storage.googleapis.com/git-repo-downloads/repo > repo
chmod +x repo
mkdir android-source
cd android-source
../repo init \
-u https://android.googlesource.com/platform/manifest \
-b android-6.0.1_r59
../repo sync system/core \
hardware/libhardware \
frameworks/native
cd ..
rm repo
# Android sysroot
# https://developer.android.com/studio/command-line/adb.html
mkdir -p android-sysroot/system/lib
adb pull /system/lib/libui.so android-sysroot/system/lib/libui.so
adb pull /system/lib/libgui.so android-sysroot/system/lib/libgui.so
adb pull /system/lib/libutils.so android-sysroot/system/lib/libutils.so
# Android NDK
# https://developer.android.com/ndk/index.html
wget https://dl.google.com/android/repository/android-ndk-r13b-linux-x86_64.zip
unzip android-ndk-r13b-linux-x86_64.zip
rm android-ndk-r13b-linux-x86_64.zip
# Android standalone toolchain
# https://developer.android.com/ndk/guides/standalone_toolchain.html
cd android-ndk-r13b/build/tools
python make_standalone_toolchain.py \
--arch arm \
--api 21 \
--install-dir `pwd`/../../../android-standalone-toolchain
cd ../../..
# CMake 3.7.2
wget https://cmake.org/files/v3.7/cmake-3.7.2-Linux-x86_64.tar.gz
tar -xvf cmake-3.7.2-Linux-x86_64.tar.gz
rm cmake-3.7.2-Linux-x86_64.tar.gz
# CMake & Make
# https://cmake.org/cmake/help/v3.7/manual/cmake-toolchains.7.html
mkdir build
cd build
if [ $# -eq 0 ] # any arguments specified
then
../cmake-3.7.2-Linux-x86_64/bin/cmake .. \
-DCMAKE_SYSTEM_NAME=Android \
-DCMAKE_ANDROID_API=21 \
-DCMAKE_ANDROID_ARCH_ABI=armeabi-v7a \
-DCMAKE_ANDROID_NDK=`pwd`/../android-ndk-r13b \
-DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=clang \
-DCMAKE_ANDROID_STL_TYPE=gnustl_shared \
-DANDROID_SOURCE=`pwd`/../android-source \
-DANDROID_SYSROOT=`pwd`/../android-sysroot
else
../../cmake-3.7.2-Linux-x86_64/bin/cmake .. \
-DCMAKE_SYSTEM_NAME=Android \
-DCMAKE_ANDROID_STANDALONE_TOOLCHAIN=`pwd`/../android-standalone-toolchain \
-DANDROID_SOURCE=`pwd`/../android-source \
-DANDROID_SYSROOT=`pwd`/../android-sysroot
fi
make
cd ..
| true |
43d0b40084814b917db8d6711718530a97dd793c | Shell | QwertChinu/dotfiles | /.local/bin/status | UTF-8 | 890 | 3.640625 | 4 | [] | no_license | #!/bin/sh
pkg(){
upkg=$(pacman -Qu | grep -v "\[ignored\]" | wc -l | sed -e "s/^0$//g")
krnl=$(pacman -Qu | grep "linux-lts" | wc -l | sed -e "s/^0$//g")
[[ "$upkg" -eq "" ]] && upkg="U"
[[ "$krnl" -ne "" ]] && krnl="K" || krnl="P"
echo -e "$krnl$upkg"
}
volume(){
[ "$(pulsemixer --get-mute)" = "1" ] && printf "Mute" && exit
vol=$(pulsemixer --get-volume | awk '{print $1}')
echo -e "V$vol%"
}
battery(){
capacity=$(cat /sys/class/power_supply/"BAT0"/capacity)
status=$(cat /sys/class/power_supply/"BAT0"/status)
echo -e "${status:0:1}$capacity%"
}
disk(){
sizeR=$(df | grep /$ | awk '{print $5}')
sizeH=$(df | grep /home$ | awk '{print $5}')
echo -e "H$sizeH R$sizeR"
}
ram(){
echo -e $(free -h | awk '/^Mem:/ {print $3 "/" $2}')
}
xsetroot -name " $(ram) $(battery) $(disk) $(pkg) $(volume) \
$(date +"%a %m/%d %H:%M")"
| true |
51189b1cef8c96b60ad65812094f0485c26fe89e | Shell | marromlam/lhcb-software | /Rec/Rec/ChargedProtoANNPID/job/CopyToCastor.sh | UTF-8 | 594 | 3.34375 | 3 | [] | no_license | #!/bin/bash
export CASTORD=$CASTOR_HOME/ProtoParticlePIDtuples/MC12-Binc-nu2.5
export FILELIST="castor-list.txt"
rm -rf $FILELIST
touch $FILELIST
nsrm -r $CASTORD
nsmkdir $CASTORD
export i=0
for tuple in `find ~/gangadir/workspace/jonrob/LocalXML -name ProtoPIDANN.MC.tuples.root | perl -MList::Util=shuffle -e"print shuffle<>"`; do
export CASTORF=${CASTORD}"/Reco14-"${i}".root"
echo "Copying "$tuple" to castor as "$CASTORF
echo $CASTORF >> $FILELIST
rfcp $tuple $CASTORF
i=`expr $i + 1`
done
echo "Copied "`cat $FILELIST | wc -l`" files to castor. Listed in '"$FILELIST"'"
exit 0
| true |
49182a39daf9769373c4b8b89231925cf7629482 | Shell | fernandopasik/dotfiles | /.zshrc | UTF-8 | 2,629 | 2.96875 | 3 | [
"MIT"
] | permissive | # shellcheck disable=SC2148
# Path to your oh-my-zsh installation
export ZSH="$HOME"/.oh-my-zsh
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
# shellcheck disable=SC2034
ZSH_THEME="agnoster"
# Configure auto update
zstyle ':omz:update' mode auto
zstyle ':omz:update' frequency 7
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Plugins
# shellcheck disable=SC2034
plugins=(
docker
docker-compose
emoji
gem
gh
git
git-prompt
golang
httpie
macos
minikube
npm
)
if [ -x "$(command -v kubectl)" ]; then
plugins+=(kubectl)
fi
# shellcheck source=/dev/null
. "$ZSH"/oh-my-zsh.sh
USR_PATH=/usr/
if [ "$(uname -s)" = "Darwin" ]; then
USR_PATH=/usr/local/
fi
# shellcheck source=/dev/null
. $USR_PATH/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
# ********************************
# ****** User configuration ******
# ********************************
# Default zsh user
DEFAULT_USER=$(whoami)
export DEFAULT_USER
# Bash paths
export PATH="/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin"
# Python path setup
export PATH="$HOME"/.local/bin:$PATH
# Ruby setup
export PATH=/usr/local/opt/ruby/bin:$PATH
# Ruby gem setup
if [ -x "$(command -v gem)" ]; then
GEM_DIR=$(gem env gemdir)
export PATH="$GEM_DIR/bin:$PATH"
fi
# Ruby RVM setup
export PATH=$PATH:/opt/rvm/bin:/opt/rvm/sbin
# Go setup
export GOPATH="$HOME"/.go
export PATH="$GOPATH/bin:$PATH"
# NVM setup
export NVM_DIR="$HOME/.nvm"
# This loads nvm
# shellcheck source=/dev/null
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
# This loads nvm bash_completion
# shellcheck source=/dev/null
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"
# VS Code for windows path
if [ -d "$HOME/.vscode-server" ]; then
VSCODE_BIN_PATH=$(find "$HOME/.vscode-server" -name "code")
VSCODE_BIN_DIR=$(dirname "$(realpath --relative-to="$HOME" "$VSCODE_BIN_PATH")")
export PATH="$HOME/$VSCODE_BIN_DIR:$PATH"
fi
# Setup Pure Prompt
autoload -U promptinit
if [ "$(command -v promptinit)" ]; then
promptinit
prompt pure
fi
DOTFILES_DIR=$(dirname "$(readlink -n "$HOME/.zshrc")")
# shellcheck source=utils.sh
. "$DOTFILES_DIR"/utils.sh
# shellcheck source=/dev/null
. "$HOME"/.profile
| true |
edf99066cbb8c7df1608fdf9b80fd57ca8ea3125 | Shell | jochym/almode-tools | /proc-calcs | UTF-8 | 2,424 | 3.609375 | 4 | [] | no_license | #!/bin/bash
i=${1%%_gen.in}
ALM=/opt/bin/alm
ANPH=/opt/bin/anphon
OK=''
python -c 'import ase' 2>/dev/null >/dev/null && OK=1
if [ -z "$OK" ] ; then
echo "No ASE packages in the environment. Activate python env."
exit 1
fi
export LANG=C.UTF-8
echo "Process: $i"
extract.py --VASP=SPOSCAR --get=disp calc-disp*/vasprun*.xml > disp.dat
extract.py --VASP=SPOSCAR --get=force calc-disp*/vasprun*.xml > force.dat
n=`ls -d calc-disp* | wc -l`
echo "NDAT: $n"
sed -e 's/MODE *= *[a-zA-Z]*/MODE = fitting /' < ${i}_gen.in > ${i}_fit.in
if [ -e ${i}_gen_cub.in ]; then
sed -e 's/MODE *= *[a-zA-Z]*/MODE = fitting /' < ${i}_gen_cub.in > ${i}_fit_cub.in
fi
tfn=`mktemp`
cat <<EOF > $tfn
&fitting
NDATA = $n
DFILE = disp.dat
FFILE = force.dat
/
EOF
cat $tfn >> ${i}_fit.in
if [ -e ${i}_fit_cub.in ]; then
cat $tfn >> ${i}_fit_cub.in
fi
rm -f $tfn
$ALM ${i}_fit.in > ${i}_fit.log
grep "Residual sum" ${i}_fit.log
if [ -e ${i}_fit_cub.in ]; then
$ALM ${i}_fit_cub.in > ${i}_fit_cub.log
grep "Residual sum" ${i}_fit_cub.log
fi
#sed -e 's/MODE *= *[a-zA-Z]*/MODE = phonons ; FCSXML = '"${i}.xml"' /' \
# -e 's/NAT *= *[0-9]*//' -e 's/^# *MASS/ MASS/' < ${i}_gen.in > ${i}_phon.in
if [ -e ${i}.path ]; then
KPATH=`cat ${i}.path`
else
KPATH="G 0.0 0.0 0.0 X 0.0 0.5 0.5 51"
fi
tfnp=`mktemp`
cat <<EOF > $tfnp
&kpoint
1 # KPMODE = 1: line mode
${KPATH}
/
EOF
tfnd=`mktemp`
cat <<EOF > $tfnd
&kpoint
2 # KPMODE = 2: grid mode
20 20 20
/
EOF
BORN=""
if [ -e ${i}.born ] ; then
BORN=" -b 2 "
fi
make-gen.py phon ${BORN} -p ${i} -n SPOSCAR -e 1 -t 1600 > ${i}_phon.in
cp ${i}_phon.in ${i}_dos.in
cat $tfnp >> ${i}_phon.in
cat $tfnd >> ${i}_dos.in
echo "Calculating harmonic phonons"
$ANPH ${i}_phon.in >${i}_phon.log
mv ${i}.evec ${i}_phon.evec
echo "Calculating harmonic dos"
$ANPH ${i}_dos.in >${i}_dos.log
if [ -e ${i}_fit_cub.in ]; then
make-gen.py phon ${BORN} -p ${i}_cub -n SPOSCAR -e 1 -t 1600 > ${i}_phon_cub.in
if [ -n "${BORN}" ] ; then
ln -s ${i}.born ${i}_cub.born
fi
cp ${i}_phon_cub.in ${i}_dos_cub.in
cat $tfnp >> ${i}_phon_cub.in
cat $tfnd >> ${i}_dos_cub.in
echo "Calculating anharmonic phonons"
$ANPH ${i}_phon_cub.in >${i}_phon_cub.log
mv ${i}_cub.evec ${i}_phon_cub.evec
echo "Calculating anharmonic dos"
$ANPH ${i}_dos_cub.in >${i}_dos_cub.log
fi
rm -f $tfnp $tfnd
| true |
baffdee15daaa1cb9a72c18a3a08d700fbafab9f | Shell | Tofull/social_graph | /ToGraph/exportToNeo4j.sh | UTF-8 | 624 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Go to neo4j tabs
xte "keydown Alt_L" "key Tab" "keyup Alt_L"
sleep 0.1
xte "sleep 0.2" "keydown Control_L" "key a" "keyup Control_L"
xte "sleep 0.2" "keydown Control_L" "key v" "keyup Control_L"
xte "sleep 0.2" "keydown Control_L" "key Return" "keyup Control_L"
# 'mouseclick 1'
while IFS='' read -r line || [[ -n "$line" ]]; do
echo "$line" | xclip -selection clipboard
sleep 0.2
# Paste Text
xte "sleep 0.2" "keydown Control_L" "key v" "keyup Control_L"
# execute command
xte "sleep 0.2" "keydown Control_L" "key Return" "keyup Control_L"
# wait 100 ms
sleep 0.2
done < "$1"
| true |
ab320557e590a59e2bb7a09b47a2e3acdd5b4a30 | Shell | cloudogu/CIS-Ubuntu-20.04 | /6-System-Maintenance/6.2.8.sh | UTF-8 | 683 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
awk -F: '($1!~/(halt|sync|shutdown)/ && $7!~/^(\/usr)?\/sbin\/nologin(\/)?$/ && $7!~/(\/usr)?\/bin\/false(\/)?$/) { print $1 " " $6 }' /etc/passwd |
while read -r user dir; do
if [ -d "$dir" ]; then
file="$dir/.netrc"
if [ ! -h "$file" ] && [ -f "$file" ]; then
if stat -L -c "%A" "$file" | cut -c4-10 | grep -Eq '[^-]+'; then
echo "FAILED: User: \"$user\" file: \"$file\" exists with permissions: \"$(stat -L -c "%a" "$file")\", remove file or excessive permissions"
else
echo "WARNING: User: \"$user\" file: \"$file\" exists with permissions: \"$(stat -L -c "%a" "$file")\", remove file unless required"
fi
fi
fi
done
| true |
13fa8017eec7e454aef516e8929bb5bce2629093 | Shell | DAP-DarkneSS/obs | /lc-lite.sh | UTF-8 | 814 | 3.59375 | 4 | [
"CC-BY-3.0"
] | permissive | #!/bin/bash
NOBS=leechcraft
DGIT=~/lc/$NOBS
NPRJ=home:Reki:leechcraft:masterbranch
SPEC='https://api.opensuse.org:443/public/source/'$NPRJ'/'$NOBS'/'$NOBS'.spec'
MONI='https://api.opensuse.org:443/public/build/'$NPRJ'/_result'
echo -e '\e[0;4mChecking of github version:\e[0m'
cd $DGIT
git pull
VGIT=`git describe`
echo -e '\e[0;33m\n'$VGIT'\e[0m'
echo -e "\n"'\e[0;4mChecking of OBS version:\e[0m'
VOBS=`curl -s $SPEC | grep 'define LEECHCRAFT' | awk '{print $3}'`
echo -e '\e[0;33m\n'$VOBS'\e[0m'
echo -e "\n"'\e[0;4mChecking of OBS status:\e[0m'"\n"
curl -s curl -s $MONI | tr '"' ' ' | grep $NOBS | awk '{print $5 "\t" $7}'
if [ "$VGIT" == "$VOBS" ]
then
echo -e "\n"'\e[0;4mNo changes.\e[0m'
else
git log --date=raw --full-diff --name-only
echo -e '\e[0;35m\nThe spec could be edited!\e[0m'
fi
| true |
359ac289fbb7fde9772537c40fdcc7020f2917ca | Shell | juliensf/protobuf-mercury | /tests/runtests | UTF-8 | 392 | 3.59375 | 4 | [] | no_license | #!/bin/sh
some_tests_failed="no"
for test in "$@"; do
if ./runtest $test 2> $test.err > $test.out; then
echo PASSED TEST $test
else
echo "*** FAILED TEST $test ***"
some_tests_failed="yes"
fi
done
echo
if [ "$some_tests_failed" = "no" ]; then
echo ALL TESTS PASSED
else
echo SOME TESTS FAILED
exit 1
fi
echo
| true |
75d17522a053bc196ea0be6f480273a80ab6d81b | Shell | YanbingJiang/jpeg_tcm | /mac_parallel_processing/countFiles.sh | UTF-8 | 748 | 3.40625 | 3 | [] | no_license |
input_path=/Volumes/DATA/ml/rename_test/
gen_path=/Volumes/DATA/ml/rename_test/
# Count1
i=1
for D in $input_path*; do
if [ -d "${D}" ]; then
#echo "${D}" # your processing here
count1[$i]=`ls -B "$D" | wc -l`
#echo ${count1[$i]}
let i=i+1
fi
done
# Count2
j=1
for D in $gen_path*; do
if [ -d "${D}" ]; then
#echo "${D}" # your processing here
count2[$j]=`ls -B "$D" | wc -l`
#echo ${count2[$j]}
let j=j+1
fi
done
# Display
echo '-----DISPLAY COUNTS-----------'
for ((k = 1; k<i;k++))
do
# Get org
orgCount=${count1[$k]}
let "orgCount=orgCount*11"
#Get generated
genCount=${count2[$k]}
echo $k')' $orgCount '<--->' $genCount
done
echo '-----------------------' | true |
28c1ac94dba2ab645e629bc08ce9475a3ce1acd5 | Shell | Sush002/mygenie | /openPutty.sh | UTF-8 | 1,110 | 3 | 3 | [] | no_license | #! /bin/bash
echo "Enter the unique Public IP/Hostname/VPN IP"
read name
read key public_ip vanity_URL <<< $(awk -F',' -v nameA="$name" 'BEGIN{IGNORECASE = 1}{
if ($14 == nameA || $4 == nameA || $16 == nameA || $5 == nameA || $17 == nameA || $14 == nameA".")
print $7,$4,$14
}' inventory.csv)
echo $key
echo $public_ip
echo $vanity_URL
if [ -z "$key" ]
then
echo $'NOT IN current Inventory..Checking in old inventory\n'
key=`cat Old_Instance_Sheet.csv | grep -iw $name | awk -F, {'print $12'}`
public_ip=`cat Old_Instance_Sheet.csv | grep -iw $name | awk -F, {'print $7'}`
vanity_URL=`cat Old_Instance_Sheet.csv | grep -iw $name | awk -F, {'print $5'}`
if [ -z $key ]
then
echo $'opppsss..NOT in Old Inventory as well.\n Please check manually'
exit
fi
fi
key=`echo $key | sed 's/ //g'`
public_ip=`echo $public_ip | sed 's/ //g'`
echo "Vanity URL: $vanity_URL"
echo "Public IP: $public_ip"
echo "Key : $key "
/drives/e/Sush/Scripts/OpenPutty/putty.exe pegauser@$public_ip -i "E:\Sush\Scripts\OpenPutty\Keys\/$key.ppk" 22 &
| true |
a3978ef4a71496c517bd306a9dbb4588f5a4afa3 | Shell | stridentbean/bash_profile | /.bash_profile | UTF-8 | 1,331 | 2.796875 | 3 | [] | no_license | export PATH=/bin:/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:$PATH
export EDITOR='subl -w'
echo "Good to see you again, Michael"
alias gs="git status"
alias ga="git add"
alias om="origin master"
alias gpum="git pull upstream master"
alias gpom="git push origin master"
alias gcob="git checkout -b"
alias cl='clear'
alias serve='python -m SimpleHTTPServer'
alias prof='vi ~/.bash_profile'
alias reprof='. ~/.bash_profile'
alias testify='. /Users/michaelborglin/Documents/open-source/testify/testify.sh /Users/michaelborglin/Documents/open-source/testify'
alias gpr='git pull --rebase upstream master'
alias npmi='npm install '
alias boweri='bower install '
b () {
cd ..
ls
}
f () {
cd "$1"
ls
}
function grp() {
branch_name="$(git symbolic-ref HEAD 2>/dev/null)" ||
branch_name="(unnamed branch)"
branch_name=${branch_name##refs/heads/}
git pull --rebase upstream master && git push origin $branch_name
}
export NODE_ENV=development
# Setting PATH for Python 3.4
# The orginal version is saved in .bash_profile.pysave
PATH="/Library/Frameworks/Python.framework/Versions/3.4/bin:${PATH}"
export PATH
# Setting PATH for Python 2.7
# The orginal version is saved in .bash_profile.pysave
PATH="/Library/Frameworks/Python.framework/Versions/2.7/bin:${PATH}"
export PATH
. /c/Users/mborglin/Documents/open-source/z/z.sh
| true |
ff3c89bb6a03d3d9c62165e6d015637ff28acf25 | Shell | n1ko-w1ll/docker-hivemq | /docker-install-scripts/install_hivemq.sh | UTF-8 | 482 | 2.71875 | 3 | [] | no_license | #!/bin/sh
################################################
#Install HiveMQ
# 3.3.3
INSTALL_TEMP=/opt/install/hivemq-install-temp
mkdir -p $INSTALL_TEMP
wget -O $INSTALL_TEMP/hivemq.zip https://www.hivemq.com/download.php?token=6264303c0ea115248df151f50a722572
unzip -d $INSTALL_TEMP $INSTALL_TEMP/hivemq.zip
rm $INSTALL_TEMP/hivemq.zip
mv -v $INSTALL_TEMP/hivemq-* /opt/hivemq
adduser -D -h /opt/hivemq hivemq
chown -R hivemq:hivemq /opt/hivemq
chmod +x /opt/hivemq/bin/run.sh
rm -r $INSTALL_TEMP | true |
60826ed9d65daae341ca560bc5f99e6b343ccd8f | Shell | dholm/FS2XPlane | /setup | UTF-8 | 3,771 | 3.078125 | 3 | [] | no_license | #!/bin/sh
VERSION=`python -c "from version import appversion; print '%4.2f' % appversion"`
VER=`python -c "from version import appversion; print int(round(appversion*100,0))"`
APPNAME=`python -c "from version import appname; print appname"`
APPLINUX=`python -c "from version import appname; print appname.lower()"`
RELEASE=1
rm -f ${APPNAME}_${VER}_src.zip
rm -f ${APPLINUX}-$VERSION-$RELEASE.noarch.rpm
rm -f ${APPLINUX}_$VERSION-$RELEASE_all.deb
rm -f ${APPNAME}_${VER}_mac.zip
rm -rf ${APPNAME}.app
PY='fs2xp.py FS2XPlane.py convatc.py convbgl.py convmain.py convmdl.py convobjs.py convphoto.py convtaxi.py convutil.py convxml.py MessageBox.py version.py'
RSRC=`ls Resources/*.{bgl,dds,fac,for,html,lin,obj,png,pol,txt,xml}`
HELP='DSFTool bglunzip bglxml bmp2dds bmp2png fake2004 winever'
# linux
RPM=/tmp/${APPLINUX}
RPMRT=$RPM/root
rm -rf $RPM
mkdir -p $RPM/BUILD
mkdir -p $RPM/SOURCES
mkdir -p $RPM/SPECS
mkdir -p $RPM/RPMS/noarch
mkdir -p $RPMRT/usr/local/bin
mkdir -p $RPMRT/usr/local/lib/${APPLINUX}/Resources
mkdir -p $RPMRT/usr/local/lib/${APPLINUX}/linux
mkdir -p $RPMRT/usr/local/lib/${APPLINUX}/win32
mkdir -p $RPMRT/usr/local/share/applications
mkdir -p $RPMRT/usr/local/share/icons/hicolor/48x48/apps
mkdir -p $RPMRT/usr/local/share/icons/hicolor/128x128/apps
cp -p linux/${APPLINUX}.spec $RPM/SPECS/
cp -p linux/fs2xp $RPMRT/usr/local/bin/
cp -p linux/${APPLINUX} $RPMRT/usr/local/bin/
cp -p linux/${APPLINUX}.desktop $RPMRT/usr/local/share/applications/
cp -p Resources/${APPNAME}.png $RPMRT/usr/local/share/icons/hicolor/48x48/apps/${APPLINUX}.png
cp -p linux/${APPNAME}-128.png $RPMRT/usr/local/share/icons/hicolor/128x128/apps/${APPLINUX}.png
cp -p linux/${APPNAME}-128.png $RPMRT/usr/local/lib/${APPLINUX}/Resources/
for i in $PY; do cp -p "$i" $RPMRT/usr/local/lib/${APPLINUX}/; done
for i in $RSRC; do cp -p "$i" $RPMRT/usr/local/lib/${APPLINUX}/Resources/; done
for i in $HELP; do cp -p "linux/$i" $RPMRT/usr/local/lib/${APPLINUX}/linux/; done
for i in win32/bglunzip.exe win32/fake2004.exe; do cp -p "$i" $RPMRT/usr/local/lib/${APPLINUX}/win32/; done
rpmbuild -bb --buildroot $RPMRT --define "_target_os linux" --define "_target_vendor pc" --define "_topdir $RPM" --define "_unpackaged_files_terminate_build 0" --define "version $VERSION" --define "release $RELEASE" --quiet $RPM/SPECS/${APPLINUX}.spec
mv $RPM/RPMS/noarch/${APPLINUX}-$VERSION-$RELEASE.noarch.rpm .
# Debian/Ubuntu
mkdir -p $RPMRT/DEBIAN
echo Version: $VERSION-$RELEASE> $RPMRT/DEBIAN/control
cat linux/control >> $RPMRT/DEBIAN/control
cp -p linux/postinst $RPMRT/DEBIAN/
mkdir -p $RPMRT/usr/local/share/doc/${APPLINUX}/
cp -p linux/copyright $RPMRT/usr/local/share/doc/${APPLINUX}/
fakeroot "dpkg-deb -b $RPMRT ." # requires gnu-tar
# mac
mkdir -p ${APPNAME}.app/Contents
sed s/appversion/${VERSION}/ <MacOS/Info.plist >${APPNAME}.app/Contents/Info.plist
mkdir -p ${APPNAME}.app/Contents/MacOS
cp -p MacOS/${APPNAME} ${APPNAME}.app/Contents/MacOS/
for i in $PY; do cp -p "$i" ${APPNAME}.app/Contents/MacOS/; done
for i in $HELP; do cp -p "MacOS/$i" ${APPNAME}.app/Contents/MacOS/; done
mkdir -p ${APPNAME}.app/Contents/MacOS/win32
for i in win32/bglunzip.exe win32/fake2004.exe; do cp -p "$i" ${APPNAME}.app/Contents/MacOS/win32/; done
mkdir -p ${APPNAME}.app/Contents/Resources
for i in $RSRC; do cp -p "$i" ${APPNAME}.app/Contents/Resources/; done
cp -p MacOS/*.icns MacOS/*.png ${APPNAME}.app/Contents/Resources/ # overwrite with Mac versions
python2.5 -OO ./bundledeps.py -x wx -o ${APPNAME}.app ${APPNAME}.py # exclude wx - included with 10.5 and 10.6
python2.7 -OO ./bundledeps.py -o ${APPNAME}.app ${APPNAME}.py
codesign --deep -s "Developer ID Application: Jonathan Harris" ${APPNAME}.app
ditto -ck --keepParent --sequesterRsrc ${APPNAME}.app ${APPNAME}_${VER}_mac.zip
| true |
e681db22e73c6ec29a9b0f4a54b8f7c81f874de2 | Shell | MichelleNear/ParticleChromo3D | /src/utility/tmScoreRunner.bash | UTF-8 | 397 | 2.953125 | 3 | [] | no_license | #!/bin/bash
chrNum=1
infile="../../Results/gm12878/consistency/500kb/chr1/chr1-"
#infile=../../input-and-models/Input/GM12878_input/KR_1mb/chr${chrNum}_matrix.txt
rm tmscore.txt
for i in {1..30}
do
for j in {1..30}
do
if [[ $i -eq $j ]]; then
break
fi
./TMScore ${infile}${i}.pdb ${infile}${j}.pdb | grep "TM-score = " | cut -c15-20 >> tmscore.txt
done
done
| true |
319678a31179f37f33d85cc7668d3dbf7b8ab3bd | Shell | johnyin123/private_documents | /kvm/virt_createvol.sh | UTF-8 | 4,101 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env bash
readonly DIRNAME="$(readlink -f "$(dirname "$0")")"
readonly SCRIPTNAME=${0##*/}
if [[ ${DEBUG-} =~ ^1|yes|true$ ]]; then
exec 5> "${DIRNAME}/$(date '+%Y%m%d%H%M%S').${SCRIPTNAME}.debug.log"
BASH_XTRACEFD="5"
export PS4='[\D{%FT%TZ}] ${BASH_SOURCE}:${LINENO}: ${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
set -o xtrace
fi
VERSION+=("505ec4e[2023-07-14T10:35:33+08:00]:virt_createvol.sh")
[ -e ${DIRNAME}/functions.sh ] && . ${DIRNAME}/functions.sh || { echo '**ERROR: functions.sh nofound!'; exit 1; }
################################################################################
usage() {
[ "$#" != 0 ] && echo "$*"
cat <<EOF
${SCRIPTNAME}
-K|--kvmhost <ipaddr> kvm host address
-U|--kvmuser <str> kvm host ssh user
-P|--kvmport <int> kvm host ssh port
--kvmpass <password>kvm host ssh password
-p|--pool * <str> libvirt store pool name
-n|--name * <str> vol name
-f|--fmt <str> default raw, qemu-img format
-s|--size * <size> size, GiB/MiB
-b|--backing_vol <str>
-F|--backing_fmt <str>
-q|--quiet
-l|--log <int> log level
-V|--version
-d|--dryrun dryrun
-h|--help help
cat <<EOVOL | virsh vol-create --pool default --file /dev/stdin
<volume>
<name>test.img</name>
<capacity unit="G">1</capacity>
</volume>
EOVOL
virsh vol-path --pool default test.img
EOF
exit 1
}
main() {
local kvmhost="" kvmuser="" kvmport="" kvmpass=""
local pool="" name="" fmt="raw" size="" backing_vol="" backing_fmt=""
local opt_short="K:U:P:p:f:n:s:b:F:"
local opt_long="kvmhost:,kvmuser:,kvmport:,kvmpass:,pool:,fmt:,name:,size:,backing_vol:,backing_fmt:,"
opt_short+="ql:dVh"
opt_long+="quiet,log:,dryrun,version,help"
__ARGS=$(getopt -n "${SCRIPTNAME}" -o ${opt_short} -l ${opt_long} -- "$@") || usage
eval set -- "${__ARGS}"
while true; do
case "$1" in
-K | --kvmhost) shift; kvmhost=${1}; shift;;
-U | --kvmuser) shift; kvmuser=${1}; shift;;
-P | --kvmport) shift; kvmport=${1}; shift;;
--kvmpass) shift; kvmpass=${1}; shift;;
-p | --pool) shift; pool=${1}; shift;;
-n | --name) shift; name=${1}; shift;;
-f | --fmt) shift; fmt=${1}; shift;;
-s | --size) shift; size=${1}; shift;;
-b | --backing_vol) shift; backing_vol=${1}; shift;;
-F | --backing_fmt) shift; backing_fmt=${1}; shift;;
########################################
-q | --quiet) shift; QUIET=1;;
-l | --log) shift; set_loglevel ${1}; shift;;
-d | --dryrun) shift; DRYRUN=1;;
-V | --version) shift; for _v in "${VERSION[@]}"; do echo "$_v"; done; exit 0;;
-h | --help) shift; usage;;
--) shift; break;;
*) usage "Unexpected option: $1";;
esac
done
[ ! -z "${pool}" ] && [ ! -z "${name}" ] && [ ! -z "${size}" ] || usage "pool/name/size must input"
[ -z ${kvmpass} ] || set_sshpass "${kvmpass}"
info_msg "create vol ${name} on ${pool} size ${size}\n"
virsh_wrap "${kvmhost}" "${kvmport}" "${kvmuser}" pool-start ${pool} &>/dev/null || true
virsh_wrap "${kvmhost}" "${kvmport}" "${kvmuser}" pool-refresh ${pool} || exit_msg "pool-refresh error\n"
virsh_wrap "${kvmhost}" "${kvmport}" "${kvmuser}" vol-create-as --pool ${pool} --name ${name} --capacity 1M --format ${fmt} \
${backing_vol:+--backing-vol ${backing_vol} --backing-vol-format ${backing_fmt} } || exit_msg "vol-create-as error\n"
virsh_wrap "${kvmhost}" "${kvmport}" "${kvmuser}" vol-resize --pool ${pool} --vol ${name} --capacity ${size} || exit_msg "vol-resize error\n"
local val=$(virsh_wrap "${kvmhost}" "${kvmport}" "${kvmuser}" vol-path --pool "${pool}" "${name}") || error_msg "vol-path error\n"
info_msg "create ${val} OK\n"
return 0
}
main "$@"
| true |
f5b6b86f9c22ae41931d2c0783f7008457b2939a | Shell | sanjukh68/Parallel-Computing | /Extra credit/queue_dynamic.sh | UTF-8 | 421 | 3.0625 | 3 | [] | no_license | #!/bin/sh
set -x
RESULTDIR=result/
h=`hostname`
. ./params.sh
if [ "$h" = "mba-i1.uncc.edu" ];
then
echo Do not run this on the headnode of the cluster, use qsub!
exit 1
fi
if [ ! -d ${RESULTDIR} ];
then
mkdir ${RESULTDIR}
fi
make reduce_dynamic
for proc in ${PROC}}
do
for n in ${N}
do
echo "mpirun -np ${proc} ./reduce_dynamic ${n} 2> ${RESULTDIR}/reduce_dynamic_${N}_${PROC} >/dev/null"
done
done | true |
71456e45319a9c9c54ce993bf4ee0d15d61b608a | Shell | dse/git-scripts | /bin/clone | UTF-8 | 2,476 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env bash
if ! [[ -e .clone.sh ]] ; then
cat <<EOF >.clone.sh
# -*- sh -*-
# Uncomment one of the CLONE_TEMPLATE lines below.
# Do not change <<USER>> or <<REPOS>>.
# In the CLONE_USER line below, change <USER> to your username.
# CLONE_TEMPLATE="git@github.com:<<USER>>/<<REPOS>>.git"
# CLONE_TEMPLATE="https://github.com/<<USER>>/<<REPOS>>.git"
# CLONE_TEMPLATE="git@bitbucket.org:<<USER>>/<<REPOS>>.git"
CLONE_USER="<USER>"
EOF
>&2 echo "I just created a file called '.clone.sh' for you."
>&2 echo "Please view its contents and edit as instructed."
exit 0
fi
. .clone.sh
if ! [[ -v CLONE_TEMPLATE ]] ; then
>&2 echo "You haven't set CLONE_TEMPLATE."
exit 1
fi
if ! [[ -v CLONE_USER ]] ; then
>&2 echo "You haven't set CLONE_USER."
exit 1
fi
if ! [[ "${CLONE_TEMPLATE}" = *'<<USER>>'* ]] ; then
>&2 echo "CLONE_TEMPLATE is missing the <<USER>> placeholder."
exit 1
fi
if ! [[ "${CLONE_TEMPLATE}" = *'<<REPOS>>'* ]] ; then
>&2 echo "CLONE_TEMPLATE is missing the <<REPOS>> placeholder."
exit 1
fi
uri="${CLONE_TEMPLATE}"
dir=""
if (( $# == 1 )) ; then
if [[ "$1" = */*/* ]] ; then
>&2 echo "too may slashes"
exit 1
elif [[ "$1" = */* ]] ; then
# clone <user>/<repos>
# clone -/<repos>
# clone /<repos>
user="${1%%/*}"
repos="${1#*/}"
else
# clone <repos>
user="${CLONE_USER}"
repos="$1"
fi
elif (( $# == 2 )) ; then
if [[ "$1" = */* ]] ; then
# clone <user>/<repos> <dir>
# clone -/<repos> <dir>
# clone /<repos> <dir>
user="${1%%/*}"
repos="${1#*/}"
if [[ "${user}" = "" ]] || [[ "${user}" = "/" ]] ; then
user="${CLONE_USER}"
fi
dir="$2"
elif [[ "$2" = */* ]] ; then
>&2 echo "unwanted slash"
exit 1
fi
# clone <user> <repos>
# clone - <repos>
user="$1"
repos="$2"
elif (( $# == 3 )) ; then
if [[ "$1" = */* ]] ; then
>&2 echo "unwanted slash"
exit 1
elif [[ "$2" = */* ]] ; then
>&2 echo "unwanted slash"
exit 1
fi
# clone <user> <repos> <dir>
# clone - <repos> <dir>
user="$1"
repos="$2"
dir="$3"
fi
if [[ "${user}" = "" ]] || [[ "${user}" = "-" ]] ; then
user="${CLONE_USER}"
fi
uri="${uri//<<USER>>/${user}}"
uri="${uri//<<REPOS>>/${repos}}"
if [[ -n "$dir" ]] ; then
git clone "${uri}" "${dir}"
else
git clone "${uri}"
fi
| true |
593a6c8359299f504fd34cbe193f72922843613f | Shell | coreboot/vboot | /scripts/image_signing/ensure_amd_psp_flags.sh | UTF-8 | 3,990 | 3.828125 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Copyright 2022 The ChromiumOS Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Load common constants and variables.
. "$(dirname "$0")/common.sh"
# Abort on error and uninitialized variables.
set -eu
declare -A -r REQUIRED_BIT_MASKS=(
# Bit 58 - PSP_S0I3_RESUME_VERSTAGE - Run PSP verstage during S0i3 resume.
# Checks that FW images have not been tampered with when exiting S0i3.
[guybrush]="$((1 << 58))"
[zork]="0x0"
)
declare -A -r FORBIDDEN_BIT_MASKS=(
[guybrush]="0x0"
[zork]="0x0"
)
# Grunt uses an old firmware format that amdfwread cannot read.
# See b/233787191 for skyrim.
BOARD_IGNORE_LIST=(grunt skyrim)
usage() {
echo "$0: Validate AMD PSP soft-fuse flags contained in a ChromeOS image." \
"These flags can have security implications and control debug features."
echo "Usage $0 <image> <board>"
}
main() {
if [[ $# -ne 2 ]]; then
usage
exit 1
fi
local image="$1"
local board="$2"
# Check the ignore list.
if [[ " ${BOARD_IGNORE_LIST[*]} " == *" ${board} "* ]]; then
echo "Skipping ignore-listed board ${board}"
exit 0
fi
# Mount the image.
local loopdev rootfs
if [[ -d "${image}" ]]; then
rootfs="${image}"
else
rootfs="$(make_temp_dir)"
loopdev="$(loopback_partscan "${image}")"
mount_loop_image_partition_ro "${loopdev}" 3 "${rootfs}"
fi
local firmware_bundle shellball_dir
firmware_bundle="${rootfs}/usr/sbin/chromeos-firmwareupdate"
shellball_dir="$(make_temp_dir)"
# Extract our firmware.
if ! extract_firmware_bundle "${firmware_bundle}" "${shellball_dir}"; then
die "Failed to extract firmware bundle"
fi
# Find our images.
declare -a images
readarray -t images < <(find "${shellball_dir}" -iname 'bios-*')
# Validate that all our AP FW images are AMD images.
local image
for image in "${images[@]}"; do
# With no args, amdfwread will just attempt to validate the FW header.
# On non-AMD FW this will fail, allowing us to skip non-AMD FW images.
if ! amdfwread "${image}" &> /dev/null; then
if [[ ! -v "REQUIRED_BIT_MASKS[${board}]" &&
! -v "FORBIDDEN_BIT_MASKS[${board}]" ]]; then
# If we have an invalid FW image and don't have bitsets for this board
# then this isn't an AMD board, exit successfully.
exit 0
else
die "Found invalid AMD AP FW image"
fi
fi
done
# Get the board specific bit masks.
local required_bit_mask forbidden_bit_mask
if [[ ! -v "REQUIRED_BIT_MASKS[${board}]" ]]; then
die "Missing PSP required bit mask set for ${board}"
fi
if [[ ! -v "FORBIDDEN_BIT_MASKS[${board}]" ]]; then
die "Missing PSP forbidden bit mask set for ${board}"
fi
required_bit_mask="${REQUIRED_BIT_MASKS[${board}]}"
forbidden_bit_mask="${FORBIDDEN_BIT_MASKS[${board}]}"
# Check the soft-fuse bits
for image in "${images[@]}"; do
local soft_fuse soft_fuse_output forbidden_set missing_set
if ! soft_fuse_output="$(amdfwread --soft-fuse "${image}")"; then
die "'amdfwread --soft-fuse ${image}' failed"
fi
# Output format from amdfwread is Soft-fuse:value, where value is in hex.
soft_fuse="$(echo "${soft_fuse_output}" | \
sed -E -n 's/Soft-fuse:(0[xX][0-9a-fA-F]+)/\1/p')"
if [[ -z "${soft_fuse}" ]]; then
die "Could not parse Soft-fuse value from output: '${soft_fuse_output}'"
fi
forbidden_set="$((soft_fuse & forbidden_bit_mask))"
if [[ "${forbidden_set}" != 0 ]]; then
local forbidden_hex
forbidden_hex="$(printf %#x "${forbidden_set}")"
die "${image}: Forbidden AMD PSP soft-fuse bits set: ${forbidden_hex}"
fi
missing_set="$((~soft_fuse & required_bit_mask))"
if [[ "${missing_set}" != 0 ]]; then
local missing_hex
missing_hex="$(printf %#x "${missing_set}")"
die "${image}: Required AMD PSP soft-fuse bits not set: ${missing_hex}"
fi
done
}
main "$@"
| true |
59fe1ade8196dc184f2c042a6b2dc9a3e285caac | Shell | shentar/my-scripts | /stopapp.sh | UTF-8 | 627 | 2.625 | 3 | [] | no_license | #!/bin/bash
killoneprocess ()
{
local processname="$1"
if [ ! -z ${processname} ]
then
result=$(ps aufx |grep "${processname}"|grep -v grep |awk '{print $2}')
if [ ! -z "${result}" ]
then
echo "$result" |xargs kill -9
fi
fi
}
killoneprocess nginx
killoneprocess php-fpm
#killoneprocess smbd
killoneprocess vsftpd
killoneprocess ETMDaemon
killoneprocess vod_httpserver
killoneprocess EmbedThunderManager
killoneprocess oraysl
killoneprocess oraynewph
killoneprocess mysql
killoneprocess btsync
killoneprocess git
| true |
1a2c2f14d7b313d492776a7988ee51865c6e5360 | Shell | wannasky/caffe2 | /docker/jenkins/build.sh | UTF-8 | 1,971 | 3.890625 | 4 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -e
declare -a valid_images
valid_images=(
# Primary builds
py2-cuda8.0-cudnn7-ubuntu16.04
py3-cuda8.0-cudnn7-ubuntu16.04
py2-cuda9.0-cudnn7-ubuntu16.04
py3-cuda9.0-cudnn7-ubuntu16.04
py2-mkl-ubuntu16.04
py3-mkl-ubuntu16.04
# Compiler compatibility
py2-gcc5-ubuntu16.04
py2-gcc6-ubuntu16.04
py2-gcc7-ubuntu16.04
py2-clang3.8-ubuntu16.04
py2-clang3.9-ubuntu16.04
# Build for Android
py2-android-ubuntu16.04
)
image="$1"
shift
if [ -z "${image}" ]; then
echo "Usage: $0 IMAGE"
exit 1
fi
UBUNTU_VERSION="$(echo "${image}" | perl -n -e'/ubuntu(\d+\.\d+)/ && print $1')"
DOCKERFILE="ubuntu/Dockerfile"
PYTHON_VERSION="$(echo "${image}" | perl -n -e'/py(\d+(\.\d+)?)/ && print $1')"
if [[ "$image" == *cuda* ]]; then
CUDA_VERSION="$(echo "${image}" | perl -n -e'/cuda(\d+\.\d+)/ && print $1')"
CUDNN_VERSION="$(echo "${image}" | perl -n -e'/cudnn(\d+)/ && print $1')"
DOCKERFILE="ubuntu-cuda/Dockerfile"
fi
if [[ "$image" == *-mkl-* ]]; then
MKL=yes
fi
if [[ "$image" == *-android-* ]]; then
ANDROID=yes
fi
if [[ "$image" == *-gcc* ]]; then
GCC_VERSION="$(echo "${image}" | perl -n -e'/gcc(\d+(\.\d+)?)/ && print $1')"
fi
if [[ "$image" == *-clang* ]]; then
CLANG_VERSION="$(echo "${image}" | perl -n -e'/clang(\d+(\.\d+)?)/ && print $1')"
fi
# Copy over common scripts to directory containing the Dockerfile to build
cp -a bin common/* "$(dirname ${DOCKERFILE})"
# Build image
docker build \
--build-arg EC2=${EC2:-} \
--build-arg JENKINS=${JENKINS:-} \
--build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \
--build-arg PYTHON_VERSION=${PYTHON_VERSION} \
--build-arg CUDA_VERSION=${CUDA_VERSION} \
--build-arg CUDNN_VERSION=${CUDNN_VERSION} \
--build-arg MKL=${MKL} \
--build-arg ANDROID=${ANDROID} \
--build-arg GCC_VERSION=${GCC_VERSION} \
--build-arg CLANG_VERSION=${CLANG_VERSION} \
"$@" \
"$(dirname ${DOCKERFILE})"
| true |
ae454ec48d17bb3b6d9955aa0070cfec2cf18cfd | Shell | lantw44m/ftp.isu.edu.tw-pub-Unix-BBS | /BBS/Atlantis/OLD/FreeBSD/Current_Release/ats-1.32_20030526_patch_ok/lib/bbsctl | UTF-8 | 275 | 3.09375 | 3 | [] | no_license | #!/bin/sh
# Editor : drange.bbs@tribe.twbbs.org
case "$1" in
showpid)
more /home/bbsrs/run/mbbsd.pid
;;
stop)
kill -9 $2
;;
restart)
kill -9 $2
rm -f /home/bbsrs/run/mbbsd.pid
/home/bbsrs/bin/mbbsd
;;
*)
echo "Usage: bbsctl <showpid/restart/stop> [PID]"
;;
esac
exit 0 | true |
8a613f5c6f737154da391bc6cb262bb526b7fb53 | Shell | jwang-a/CTF | /MyChallenges/Pwn/Sentinel/distribute/reset.sh | UTF-8 | 931 | 3.3125 | 3 | [] | no_license | #!/bin/bash
###Cleanup
docker rm -f $(docker ps -aq)
chmod -R 777 ./tmp/dockerRoot/
chmod -R 777 ./tmp/instances/*
chmod 777 ./share/guest_home
rm -fr ./tmp/dockerRoot
rm -fr ./tmp/instances/*
rm -fr ./share/guest_home/*
###Copy target files into challenge directory
if [ "$1" = "original" ]
then
ln ./challengeBin/sentinel ./share/guest_home/sentinel
ln ./challengeFlag/flag ./share/guest_home/flag
elif [ "$1" = "revenge" ]
then
ln ./challengeBin/sentinelRevenge ./share/guest_home/sentinel
ln ./challengeFlag/flagRevenge ./share/guest_home/flag
else
echo "invalid target"
exit
fi
ln ./challengeFlag/fakeFlag ./share/guest_home/fakeFlag
###Set permission of all files to prevent changes
chmod 777 ./tmp/instances
chmod 777 ./share/guest_home/flag
chmod 777 ./share/guest_home/fakeFlag
chmod 555 ./share/guest_home/sentinel
chmod 555 ./share/guest_home/
###Start service
docker-compose build
docker-compose up -d
| true |
50b3e19ba678cc2d2e9c1b47fd03d219437046cc | Shell | namsnath/dotfiles | /zsh/.zshrc | UTF-8 | 1,488 | 2.734375 | 3 | [] | no_license | # Antibody
source <(antibody init)
antibody bundle denysdovhan/spaceship-prompt
antibody bundle zsh-users/zsh-autosuggestions
# Packages
autoload -U colors && colors
# Sources
source /usr/share/nvm/init-nvm.sh
# Spaceship prompt configs
SPACESHIP_PROMPT_SEPARATE_LINE=false
SPACESHIP_GIT_PREFIX='| '
# Environment Variables
export DOT=$HOME/dotfiles
# export JAVA_OPTS='-XX:+IgnoreUnrecognizedVMOptions --add-modules java.se.ee'
export JAVA_HOME='/usr/lib/jvm/java-8-openjdk'
# export JAVA_HOME='/usr/lib/jvm/java-14-openjdk'
export ANDROID_SDK_ROOT='/opt/android-sdk'
# History
HISTSIZE=1000
SAVEHIST=1000
HISTFILE=~/.zhistory
# Keybinds
## Ctrl + Right
bindkey "^[[1;5C" forward-word
## Ctrl + Left
bindkey "^[[1;5D" backward-word
# Aliases
alias ls='ls --color=auto'
alias reload='source ~/.zshrc'
alias ssh='kitty +kitten ssh'
alias suspend='systemctl suspend'
alias fast_mirrors='sudo reflector --latest 20 --sort rate --save /etc/pacman.d/mirrorlist'
## Flavours
alias get_foreground=$'flavours info $(flavours current) | sed -n \'8p\' | cut -d " " -f2'
alias gen_flavour_screen_1=$'flavours generate dark $(cat .fehbg | tail -1 | cut -d "\'" -f2) && flavours apply generated'
alias gen_flavour_screen_2=$'flavours generate dark $(cat .fehbg | tail -1 | cut -d "\'" -f4) && flavours apply generated'
## Git
alias git_prune_local='git remote prune origin'
alias git_prune_merged_local="git branch --merged master --no-color | grep -v '^[ *]*master$' | xargs -r git branch -d"
| true |
075ec36f3fb3d52f59d7d9b49ee5f579e7a758dc | Shell | ryomo/vagrant-kivy | /vagrantconf/provision.sh | UTF-8 | 587 | 2.859375 | 3 | [] | no_license | #!/usr/bin/env bash
# `privileged:false` is set in Vagrantfile, so both `whoami` and $HOME are 'vagrant'.
cd ~
sudo add-apt-repository -y ppa:kivy-team/kivy && sudo apt-get update
#sudo apt-get upgrade -y
# Desktop Environment
sudo apt-get install -y lxde
# Python3, pip3
sudo apt-get install -y python3 python3-pip
mkdir -p ~/.local/bin
ln -s /usr/bin/python3 ~/.local/bin/python
#pip3 install --user pip # This causes ImportError. https://github.com/pypa/pip/issues/5599
# Kivy
sudo apt-get install -y python3-kivy
# To run GUI apps via ssh
echo 'export DISPLAY=:0' >> .bashrc
| true |
9d8207135c6a256ec81a7b9c14db5d6be4319217 | Shell | xingmeichen/java-tutorial | /scripts/init.sh | UTF-8 | 625 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#################################################################################
# javatool-server 项目初始化脚本
# 执行本脚本后,会将 JavaStack 下载到 /home/zp/source/JavaStack 目录下。
# 环境中必须安装了 git
#################################################################################
rm -rf /home/temp
rm -rf /home/zp/source/JavaStack
mkdir -p /home/temp
cd /home/temp
wget https://raw.githubusercontent.com/dunwu/JavaStack/master/scripts/git-clone.sh
chmod 777 git-clone.sh
./git-clone.sh JavaStack master
chmod 777 -R /home/zp/source/JavaStack
rm -rf /home/temp
| true |
b5fa016acdf059efa55d4af2939349843b9d1481 | Shell | saurabh-mk/thesis | /Codes/gblocks_cleaning_cds2.sh | UTF-8 | 452 | 3 | 3 | [] | no_license | #!/bin/sh
## this takes the name of an multiple alignment file and cleans it using gblocks
fn_oi=$1
wt_dir=$2
wd_oi=$3
cds_dir="${wd_oi}_cds_alignments/"
cd ${wt_dir}${wd_oi}/
echo "Gblocks was called as Gblocks ${cds_dir}${fn_oi}.best.nuc.fas -t=c -b1=12 -b2=12 -b3=2 -b4=10 -b5=n -e=aln" >> ${cds_dir}gblocks_alignment.log
Gblocks ${cds_dir}${fn_oi}.best.nuc.fas -t=c -b1=12 -b2=12 -b3=2 -b4=10 -b5=n -e=.aln >> ${cds_dir}gblocks_alignment.log
| true |
798231e75121e3a38a64e322755c7251df513db7 | Shell | fckinnocent/spcam-multiscale-extreme-rainfall | /scripts/getHourlyMassfluxDailyFiles.sh | UTF-8 | 2,054 | 3.671875 | 4 | [] | no_license | #!/bin/bash
scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Load modules if necessary
source $scriptdir/loadPlatformSpecific.sh preprocessing
#---------------------------#
# Experiment variables #
#---------------------------#
compset="FAMIPC5"
experiment="abrupt4xCO2"
case="bf_"${compset}"_"$experiment
if [[ "$HOSTNAME" == "jollyjumper" ]]
then
inputdir="/Users/bfildier/Data/simulations/"${case}
elif [[ "$HOSTNAME" == "edison"* ]] || [[ "$HOSTNAME" == "cori"* ]]
then
inputdir=${scriptdir%\/*}"/archive/"${case}"/atm/hist"
fi
#-----------------------------------#
# Time boundaries and functions #
#-----------------------------------#
lowerTimeBound="1850-05-01-03600"
upperTimeBound="1850-05-02-00000"
## Get start and end date
lowerDate=${lowerTimeBound%-*}
upperDate=${upperTimeBound%-*}
## Use start and end times for dividing all days
lowerTime=${lowerTimeBound##*-}
upperTime=${upperTimeBound##*-}
## Define function to increment date in correct format
cat << EOF > incrementDate
#!/usr/bin/python
import datetime as dt
import sys
newDate = dt.datetime.strptime(sys.argv[1],'%Y-%m-%d')
newDate += dt.timedelta(days=1)
print newDate.isoformat().split("T")[0]
EOF
chmod +x incrementDate
#-----------------------------------------------#
# Get values for a sequences of variables #
#-----------------------------------------------#
echo
echo "------------------------------------------------------------------"
echo " Getting hourly $varid values for $compset and $experiment"
echo "------------------------------------------------------------------"
startDate=$lowerDate
# global startDate
while [ "$startDate" != "$upperDate" ]
do
endDate=`./incrementDate $startDate`
# std message
echo
echo "$(tput bold) Compute mass flux for ${startDate}-${lowerTime}"\
"--- ${endDate}-${upperTime}$(tput sgr0)"
echo
python getHourlyGCMMassFlux.py -e $experiment -c $compset -d ${startDate}-${lowerTime} \
${endDate}-${upperTime} -i $inputdir
# Increment dates
startDate=$endDate
done
exit 0
| true |
c5601e738ce052a77317a49ad39270c8df8370eb | Shell | ColinIanKing/fwts | /scripts/bash-completion/fwts | UTF-8 | 2,860 | 3.234375 | 3 | [] | no_license | #!/bin/bash
#
# FWTS tab completion for bash.
#
# Copyright (C) 2017-2023 Canonical
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
_fwts()
{
local cur prev
cur=${COMP_WORDS[COMP_CWORD]}
prev=${COMP_WORDS[COMP_CWORD-1]}
case $prev in
'--arch')
COMPREPLY=( $(compgen -W "x86 x86_32 x86_64 ia64 arm64 aarch64" -- $cur) )
compopt -o nosort
return 0
;;
'--dumpfile'|'-k'|'--klog'|'-J'|'--json-data-file'|'--lspci'|'-o'|'--olog'|'--s3-resume-hook'|'-r'|'--results-output')
_filedir
return 0
;;
'-j'|'--json-data-path'|'-t'|'--table-path')
local IFS=$'\n'
compopt -o filenames
COMPREPLY=( $(compgen -d -- ${cur}) )
return 0
;;
'--log-level')
COMPREPLY=( $(compgen -W "critical high medium low info all" -- $cur) )
compopt -o nosort
return 0
;;
'--log-type')
COMPREPLY=( $(compgen -W "plaintext json xml" -- $cur) )
return 0
;;
'--pm-method')
COMPREPLY=( $(compgen -W "logind pm-utils sysfs" -- $cur) )
return 0
;;
'--log-filter'|'--log-format'|'-w'|'--log-width'|'-R'|'-rsdp'|\
'--s3-delay-delta'|'--s3-device-check-delay'|'--s3-max-delay'|'--s3-min-delay'|'--s3-multiple'|\
'--s3-quirks'|'--s3-resume-time'|'--s3-sleep-delay'|'--s3-suspend-time'|'--s3power-sleep-delay'|\
'--s4-delay-delta'|'--s4-device-check-delay'|'--s4-max-delay'|'--s4-min-delay'|'--s4-multiple'|'--s4-quirks'|'--s4-sleep-delay'|\
'-s'|'--skip-test'|'--uefi-get-var-multiple'|'--uefi-query-var-multiple'|'--uefi-set-var-multiple')
# argument required but no completions available
return 0
;;
'-h'|'--help'|'-v'|'--version'|'-d'|'--dump'|'-s'|'--show-tests'|'--show-tests-full'|'--show-tests-categories'|'--log-fields')
# all other arguments are noop with these
return 0
;;
esac
local all_tests=`fwts --show-tests | sed '/.*:/d;/^$/d' | awk '{ print $1 }'`
local all_long_options=$( _parse_help "$1" --help )
if [ -z "$cur" ]; then
COMPREPLY=( $( compgen -W "${all_tests}" -- "$cur" ) )
else
COMPREPLY=( $( compgen -W "${all_tests} ${all_long_options}" -- "$cur" ) )
fi
return 0
}
# load the completion
complete -F _fwts fwts
| true |
87b3400712dbc0c33e8b5f75332fdb59ad2e643e | Shell | iarakshana/hw1 | /salaries.sh | UTF-8 | 1,566 | 3.765625 | 4 | [] | no_license | #!/bin/bash
#f=$1
f="salaries.csv"
echo "(1) TOP SALARIES IN THE CITY!!"
# Remove dollar signs, sort the appropriate column, and show the top few.
grep '\$' salaries.csv | sed 's/\$//g'| sort -k8 -t, -n -r |head
echo "(2) CITY EMPLOYEES"
# Count the lines, but careful of headers....
sed -i '1d' salaries.csv
wc -l salaries.csv
echo "(3 and 4) :: FULL AND PART-TIME WORKERS"
# Cut out the apppropriate column, sort it, and count the types.
# full time workers
cut -f5 -d, salaries.csv | sort | grep F | wc -l
# part time workers
cut -f5 -d, salaries.csv | sort | grep P | wc -l
echo "(5 and 6) HIGHEST HOURLY WAGES"
# Same approach as the first question.
grep -i hourly salaries.csv | sort -k9 -t, -n -r | head
echo "(7) POLICE DEPARTMENT"
# Just grep out the department and count the lines.
sort -k4 -t, salaries.csv | grep "POLICE" | wc -l
echo "(8) DETECTIVES"
# One more grep than the last.
sort -k4 -t, salaries.csv | grep "POLICE" | grep "DETECTIVE" |wc -l
echo "(9) THE MODAL FIREMAN"
# grep out the fire department, cut the salary column, sort it and count the number with each salary.
# Then sort these and print the the most common occurrence.
grep "FIRE" salaries.csv | cut -f8 -d, | sort -n -r | uniq -c | sort -necho
echo "(10) NAMES FOR POLICE OFFICERS"
# sed to remove last names (preceding the first comma).
# grep for police officers, and then remove everything else in the line.
# sort the names, and use uniq to get their frequencies.
grep -i "police officer" salaries.csv | cut -f2 -d, | awk '{print$1;}' | sort -rn | uniq -c | sort -n
| true |
854cc5cf9be324cf73c6f6b70071d4f8a0aa30b4 | Shell | kenzhaoyihui/auto_vm_create.sh | /auto_create_vm.sh | UTF-8 | 1,928 | 3.46875 | 3 | [] | no_license | #!/bin/bash -xe
# Just a example
# Author: yzhao
setenforce 0
sed -i "s/^SELINUX=.*/SELINUX=disabled/" /etc/selinux/config
systemctl stop firewalld && systemctl disable firewalld
yum install -y virt-manager libvirt qemu-common qemu-kvm qemu-system-x86 libcgroup-tools
systemctl start libvirtd && systemctl enable libvirtd
[ $# -lt 2 ] && echo 'Usage:auto_create_vm $host_num $host_name' && exit 1
image_path="/var/lib/libvirt/images"
xml_path="/var/libvirt/qemu"
# centos-test.qcow2
# System: Fedora 23 or 25
# username:root
# passwd: redhat
# Date: 2017.01.19
template_image="centos_test.qcow2"
cp
for i in $(seq $1);do
filename=${2}-${i}
cp ./xml/centos_test.xml ${xml_path}/${filename}.xml
sed -i "s,<name>.*</name>,<name>${filename}</name>,g" ${xml_path}/${filename}.xml
UUID=`uuidgen`
sed -i "s,<uuid>.*</uuid>,<uuid>${UUID}</uuid>,g" ${xml_path}/${filename}.xml
sed -i "s/UUID=.*$/UUID=${UUID}/g" ./ifcfg/ifcfg-eth0
sed -i "s/ONBOOT=no/ONBOOT=yes/g" ./ifcfg/ifcfg-eth0
# sed "/HWADDR/c HWADDR=${MAC}" ifcfg-eth0
cp -rf ./images/${template_image} ${image_path}/${filename}.qcow2
#sed -i "s,<source file=.*$,<source file='${image_path}/${filename}.qcow2'/>,g" ${filename}.xml
#MAC="fa:95:$(dd if=/dev/urandom count=1 2>/dev/null | md5sum | sed 's/^\(..\)\(..\)\(..\)\(..\).*$/\1:\2:\3:\4/')"
#sed -i "s,<mac address=.*$,<mac address='$MAC'/>,g" ${filename}.xml
#sed -i "s/HWADDR=.*$/HWADDR=${MAC}/g" ifcfg-eth0
#sed -i "s/HOSTNAME=.*$/HOSTNAME=${filename}/g" network
echo $filename > ./hostname/hostname
virt-copy-in ./ifcfg/ifcfg-eth0 -a ${image_path}/${filename}.qcow2 /etc/sysconfig/network-scripts/
virt-copy-in ./hostname/hostname -a ${image_path}/${filename}.qcow2 /etc/
virsh define ${xml_path}/${filename}.xml
virsh start ${filename}
done
| true |
1b339e077eab643ef0b227c98dd74517c53f907d | Shell | cjie888/cryptocurrency-quant | /start_data.sh | UTF-8 | 610 | 2.890625 | 3 | [] | no_license | #!/bin/bash
git pull
/root/apache-maven-3.5.4/bin/mvn clean compile install -Dmaven.test.skip=true
cd ..
rm -rf quant-data-1.0.0-SNAPSHOT.jar
mv cryptocurrency-quant/quant-data/target/quant-data-1.0.0-SNAPSHOT.jar ./quant-data-1.0.0-SNAPSHOT.jar
PID=$(ps -ef|grep java|grep data|awk '{printf $2}')
if [ $? -eq 0 ]; then
echo "process id:$PID"
else
echo "process $input1 not exit"
exit
fi
kill -9 ${PID}
if [ $? -eq 0 ];then
echo "kill $input1 success"
else
echo "kill $input1 fail"
fi
nohup java -jar quant-data-1.0.0-SNAPSHOT.jar >/dev/null 2>&1 &
echo "------success---------" | true |
0c2be47cc1db84a998fab24bd7ffd2d27d036fc9 | Shell | yiyiyang81/COMP-206 | /A2/seeker.sh | UTF-8 | 5,200 | 4.03125 | 4 | [] | no_license | #!/bin/bash
len=$#
#############################################################################################
#check if the input is empty
if [[ len -eq 0 ]]
then
echo "Error missing the pattern argument"
echo "Usage ./seeker.sh [-c] [-a] pattern [path]"
fi
#############################################################################################
if [[ len -eq 1 ]]
then
#first check if the input is valid
if [[ $1 == "-c" ]] || [[ $1 == "-a" ]]
then
echo "Error missing the pattern argument"
echo "Usage ./seeker.sh [-c] [-a] pattern [path]"
#check if the number of file in the current directory is 0
elif [[ `ls | grep -c "$1"` -eq 0 ]]
then
echo "Unable to locate any files tat has pattern $1 in its name in "$0""
#if not 0 then it means the file exists
else
echo `ls | grep "$1"`
fi
fi
#############################################################################################
if [[ len -eq 2 ]]
then
#first check if the input is valid
if [[ $1 == "-c" ]] && [[ $2 == "-a" ]]
then
echo "Error missing the pattern argument"
echo "Usage ./seeker.sh [-c] [-a] pattern [path]"
#check if option starts with "-c" and if the number of file in the current directory is 0
elif [[ $1 == "-c" ]] && [[ `ls | grep -c "$2"` -eq 0 ]]
then
echo "Unable to locate any files that has pattern $2 in its name in `pwd`"
#if not 0 then it exists, proceed to execute "-c"
elif [[ $1 == "-c" ]]
then
#make a list with the found filenames and print the content of the first one since no "-a"
list=($(ls | grep "$2"))
echo "==== Contents of: `pwd`"/""${list[0]}" ===="
echo `cat "`pwd`/${list[0]}"`
#check if option starts with "-c" and if the number of file in the current directory is 0
elif [[ $1 == "-a" ]] && [[ `ls | grep -c "$2"` -eq 0 ]]
then
echo "Unable to locate any files that has pattern $2 in its name in `pwd`"
#if not 0 then it exists, proceed to exectute "-a"
elif [[ $1 == "-a" ]]
then
#iterate through the list of found file names and print its path
for i in `ls | grep "$2"`
do echo "`pwd`/$i"
done
#goes to situation when one input is pattern and the other one is path
#check if the number of file in the directory is 0
elif [[ -d "$2" ]] && [[ `ls "$2" | grep -c "$1"` -eq 0 ]]
then
echo "Unable to locate any files that has pattern $1 in its name in $2"
#if not 0 then the file exists in the directory
elif [[ -d "$2" ]]
then
#iterate through the list of found file names and print only 1
list=($(ls "$2" | grep "$1"))
echo "$2/${list[0]}"
else
#if it is none of the situation above, then the directory should be invalid
echo "Error "$2" is not a valid directory"
fi
fi
#############################################################################################
if [[ len -eq 3 ]]
then
#check if the input is in format of [-c] [-a] [pattern]
#check the number of file exists in current direcotry
if [[ $1 == "-c" ]] && [[ $2 == "-a" ]] && [[ `ls | grep -c "$3"` -eq 0 ]]
then
echo "Unable to locate any files that has pattern $3 in its name in `pwd`"
#if not 0 then file exists
elif [[ $1 == "-c" ]] && [[ $2 == "-a" ]]
then
#iterate through returned file names and print its content and directory
for i in $(ls | grep "$3")
do echo "==== Contents of: `pwd`/"$i" ===="
echo "`cat `pwd`/"$i"`"
done
#check if the option "-c" is present and if the file exists in the given directory
elif [[ $1 == "-c" ]] && [[ -d "$3" ]] && [[ `ls "$3" | grep -c "$2"` -eq 0 ]]
then
echo "unable to locate any files that has pattern $2 in its name in "$3""
#if not 0 then exists
elif [[ $1 == "-c" ]] && [[ -d "$3" ]]
then
#print the first file in the list with its content
list=($(ls "$3" | grep "$2"))
echo "==== Contents of: "$3"/"${list[0]}" ===="
echo "`cat "$3/${list[0]}"`"
#check if the option "-a" is present and if the file exists in the given direcotry
elif [[ $1 == "-a" ]] && [[ -d "$3" ]] && [[ `ls "$3" | grep -c "$2"` -eq 0 ]]
then
echo "Unable to locate any files that has pattern $2 in its name in $3"
#if not 0 then it must exists
elif [[ $1 == "-a" ]] && [[ -d "$3" ]]
then
#print the list of found filenames with its directory
for i in $(ls "$3" | grep "$2")
do echo "$3/$i"
done
#if it is none of the situation above, then the directory should be invalid
else
echo "Error "$3" is not a valid directory"
fi
fi
#############################################################################################
if [[ len -eq 4 ]]
then
#there is only one possibility for the input of length 4
#check if the number of file in the direcotry is 0
if [[ $1 == "-c" ]] && [[ $2 == "-a" ]] && [[ -d "$4" ]] && [[ `ls "$4" | grep -c "$3"` -eq 0 ]]
then
echo "Unable to locate any files that has pattern $3 in its name in $4"
#if not 0 then it exists in the given directory
elif [[ $1 == "-c" ]] && [[ $2 == "-a" ]] && [[ -d "$4" ]]
then
#print all in the list of found filenames with its content and directory
for i in $(ls "$4" | grep "$3")
do echo "==== Contents of: "$4""/""$i" ===="
echo "`cat "$4/$i"`"
done
#if it is none of the situation above, then the directory should be invalid
else
echo "Error "$4" is not a valid directory"
fi
fi
| true |
81f0218ea86b2352d5dbdedba281c91d3f3de08e | Shell | mgriffin/mikegriffin | /script/new-post | UTF-8 | 492 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
TODAY=$(date +"%Y-%m-%d")
if [ $# -eq 0 ]; then
read -p "Post title: " TITLE
else
TITLE=$*
fi
SLUG=$(echo $TITLE | sed 's/ /-/g' | tr "[:upper:]" "[:lower:]")
FILENAME=_posts/$TODAY-$SLUG.md
if [ ! -z $FILENAME ]; then
echo $FILENAME does not exist
# The heredoc is indented with actual tabs on purpose
# <<- removes these in the final file
cat <<- EOF > $FILENAME
---
layout: post
title: "$TITLE"
date: $TODAY
tags:
---
EOF
fi
vim +7 $FILENAME
| true |
c1acb9283a4088e528554dcbcd9020bbc0879a49 | Shell | byeol3325/NIRRGBmatching | /scripts/unzip_syn.sh | UTF-8 | 1,161 | 3.265625 | 3 | [] | no_license | #!/bin/bash
download_root=$1
dst="data/syn"
files=(
$download_root/depth_compressed_0.zip
$download_root/depth_compressed_1.zip
$download_root/depth_compressed_2.zip
$download_root/depth_compressed_3.zip
$download_root/depth_compressed_4.zip
$download_root/gated0_10bit_0.zip
$download_root/gated0_10bit_1.zip
$download_root/gated0_10bit_2.zip
$download_root/gated0_10bit_3.zip
$download_root/gated0_10bit_4.zip
$download_root/gated1_10bit_0.zip
$download_root/gated1_10bit_1.zip
$download_root/gated1_10bit_2.zip
$download_root/gated1_10bit_3.zip
$download_root/gated1_10bit_4.zip
$download_root/gated2_10bit_0.zip
$download_root/gated2_10bit_1.zip
$download_root/gated2_10bit_2.zip
$download_root/gated2_10bit_3.zip
$download_root/gated2_10bit_4.zip
$download_root/rgb_left_0.zip
$download_root/rgb_left_1.zip
$download_root/rgb_left_2.zip
$download_root/rgb_left_3.zip
$download_root/rgb_left_4.zip
)
mkdir -p $dst
all_exists=true
for item in ${files[*]}
do
if [[ ! -f "$item" ]]; then
echo "$item is missing"
all_exists=false
fi
done
if $all_exists; then
for item in ${files[*]}
do
unzip $item -d $dst
done
fi
| true |
ca588341ce6d828b88a9f6616737a8ab4fe58b6c | Shell | SambaEdu/se3master | /usr/share/se3/sbin/se3_group_members.sh | UTF-8 | 1,861 | 3.703125 | 4 | [] | no_license | #!/bin/bash
#
# SambaEdu
#
# $Id$
#
WWWPATH="/var/www"
# recup parametres ldap
. /etc/se3/config_l.cache.sh
# recup parametres caches :
. /etc/se3/config_m.cache.sh
. /etc/se3/config_d.cache.sh
if [ "$1" = "-h" -o "$1" = "--help" ]; then
echo "Script destine a retourner la liste des membres d'un groupe (uid)."
echo ""
echo "USAGE: Passer en parametre le nom du groupe."
echo " Sinon, sans parametre, la liste des groupes est proposee."
echo ""
echo " A IMPLEMENTER:"
echo " Si un groupe est passe en parametre, on peut donner ensuite,"
echo " la liste des attributs a recuperer."
exit
fi
interactif="y"
groupe=""
if [ -n "$1" ]; then
t=$(ldapsearch -xLLL cn=$1 -b ${groupsRdn},${ldap_base_dn})
if [ -z "$t" ]; then
echo "Groupe $1 invalide"
else
groupe=$1
interactif="n"
fi
fi
while [ -z "$groupe" ]
do
# Choix du groupe
echo ""
echo "Voici la liste des groupes existants: "
ldapsearch -xLLL -b ${groupsRdn},${ldap_base_dn} cn|grep "^cn: "| sed -e "s|^cn:||" | sort | tr -d '\n' | more
echo ""
echo -e "Choix du groupe: \c"
read -e groupe
if [ -n "$groupe" ]; then
t=$(ldapsearch -xLLL cn=$groupe -b ${groupsRdn},${ldap_base_dn})
if [ -z "$t" ]; then
echo "Groupe $groupe invalide"
groupe=""
fi
fi
done
t=$(ldapsearch -xLLL -b ${groupsRdn},${ldap_base_dn} "(&(cn=$groupe)(objectClass=posixGroup))")
if [ $n "$t" ]; then
# C'est un posixGroup
if [ "$interactif" = "y" ]; then
echo ""
echo "Liste des membres du groupe $groupe:"
ldapsearch -xLLL cn=$groupe -b ${groupsRdn},${ldap_base_dn} memberUid | grep "^memberUid: " | sed -e "s|^memberUid: ||" | more
else
ldapsearch -xLLL cn=$groupe -b ${groupsRdn},${ldap_base_dn} memberUid | grep "^memberUid: " | sed -e "s|^memberUid: ||"
fi
else
# Traiter aussi le cas groupOfNames
echo "Cas groupOfNames a implementer."
exit
fi
| true |
c6585aade36cdbd0c9885e0161ebf48bc885c15c | Shell | Teamprojekt-HSTrier/Bidirektionales-Kommunikationssystem | /MESSAGE_PRINTER/printMessageParamRotProc.sh | UTF-8 | 1,415 | 3.359375 | 3 | [] | no_license | #!/bin/bash
readonly X_LINES_POS=576
readonly SRC_HTML="$1"
readonly DST_PNG="$2"
readonly ROTATE="$3"
readonly PHOTO_PROCESSING="$4"
readonly PNG_EXT=".png"
export PNG2POS_PRINTER_MAX_WIDTH="$X_LINES_POS"
echo "Konvertiere HTML-Datei: $1 zu PNG-Datei: $2 ==> mittels \"wkhtmltoimage\""
# wkhtmltoimage hat ein internes Problem welches Fehler meldet, diese koennen ignoriert werden muessen aber nach /dev/null
wkhtmltoimage --width "$X_LINES_POS" --quality 100 "$SRC_HTML" "$DST_PNG" 2> /dev/null &&
if [ "$ROTATE" = "-R" ]
then
echo "Rotiere PNG-Datei passe Groesse an: $1 und speichere unter PNG-Datei: ${DST_PNG%.*}-ROT.png ==> mittels \"imagemagick\""
convert "$DST_PNG" -rotate 90 -resize 576 ${DST_PNG%.*}-ROT.png &&
echo "Sende PNG-Datei: ${DST_PNG%.*}-ROT.png über \"png2pos\" an Drucker"
if [ "$PHOTO_PROCESSING" = "-p" ]
then
png2pos -c -p -s 4 ${DST_PNG%.*}-ROT.png | lpr -o raw
else
png2pos -c -s 4 ${DST_PNG%.*}-ROT.png | lpr -o raw
fi
else
echo "PNG-Datei passe Groesse an: $1 und speichere unter PNG-Datei: ${DST_PNG%.*}-ROT.png ==> mittels \"imagemagick\""
convert "$DST_PNG" -resize 576 ${DST_PNG%.*}.png &&
echo "Sende PNG-Datei: ${DST_PNG%.*}.png über \"png2pos\" an Drucker"
if [ "$PHOTO_PROCESSING" = "-p" ]
then
png2pos -c -p -s 4 ${DST_PNG%.*}.png | lpr -o raw
else
png2pos -c -s 4 ${DST_PNG%.*}.png | lpr -o raw
fi
fi
echo "Drucken erfolgreich abgeschlossen!"
| true |
300a090251b54206cf3fcfa2d6c25aa70bf5a6d2 | Shell | eatoin5hrdlu/EvoStat | /web/images/makeplate | UTF-8 | 449 | 2.640625 | 3 | [] | no_license | #!/bin/bash
if [ -z "$1" ]; then
label='darwin'
else
label=$1
fi
if [ -z "$2" ]; then
fsize=128;
else fsize=$2;
fi
yoff=140
yoff2=130
labu=${label^}
convert -size 580x170 canvas:none -font Bookman-DemiItalic -pointsize ${fsize} -draw "text 80,$yoff $labu" -channel RGBA -blur 0x6 -fill \#99AADD -stroke \#4444FF -draw "text 90,$yoff2 $labu" outA.png
/usr/bin/composite -compose Dst_In outA.png smallbluebrush.jpg -matte $1.png
| true |
a5e65e3357f860e55893f702921cfad2372d0537 | Shell | Originate/dcos-ci-example | /scripts/ci/install-dcos-cli | UTF-8 | 1,079 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
if [[ ! -z ${VERBOSE+x} ]]; then
set -x
fi
BINS="$HOME/.cache/bin"
DCOS_CLI="$BINS/dcos"
DCOS_CLI_VERSION="0.4.16"
# Returns the version of the currently installed DC/OS CLI binary. e.g 0.4.16
installed_version() {
dcos --version | grep 'dcoscli.version' | cut -d= -f2
}
# Downloads the DC/OS CLI binary for linux with version $DCOS_CLI_VERSION to the cache
install_dcos_cli() {
mkdir -p "$BINS"
curl -sSL "https://downloads.dcos.io/binaries/cli/linux/x86-64/$DCOS_CLI_VERSION/dcos" \
-o "$DCOS_CLI"
chmod u+x "$DCOS_CLI"
}
# Install the DC/OS CLI if it's missing. If it's present, upgrade it if needed otherwise do nothing
if [ ! -e "$DCOS_CLI" ]; then
echo "DC/OS CLI not found. Installing"
install_dcos_cli
else
INSTALLED_VERSION="$(installed_version)"
if [ "$DCOS_CLI_VERSION" != "$INSTALLED_VERSION" ]; then
echo "DC/OS CLI has version $INSTALLED_VERSION, want $DCOS_CLI_VERSION. Upgrading"
rm -rf "$DCOS_CLI"
install_dcos_cli
else
echo "Using cached DC/OS CLI $INSTALLED_VERSION"
fi
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.