blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e82d4a9752c14a769f4ee16a3534997cfeac6086
|
Shell
|
Vilakerer/lzy_cmmt
|
/lzy_cmmt.sh
|
UTF-8
| 263
| 3.453125
| 3
|
[] |
no_license
|
lzy_cmmt(){
branch=$(git branch | sed -n -e 's/^\* \(.*\)/\1/p')
len=$#
if [[ $len > 1 ]]; then
for i in "${@:2}"; do
git add $i
done
else
git add .
fi
git commit -m "$1"
git push origin "$branch"
}
| true
|
14949a263b6496b1543eb2e76b86dbc77fca5728
|
Shell
|
mahamot/projet
|
/scripts/scripttp5.2.1.sh
|
UTF-8
| 420
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
#fonction de gestion des prenoms composés
prenomcomp(){
prepre=""
for i in $*
do
prepre=$prepre" "${i^}
done
echo $prepre
}
while read line
do
prenom=${line%% *}
mail=${line##* }
prenom=${prenom//:/ }
# prepre=""
# for i in $prenom
# do
# prepre=$prepre" "${i^}
# done
# prepre=${prepre# *}
prepre=$(prenomcomp $prenom)
echo "prenom: $prepre et mail: $mail"
done < stagiaire2.txt
| true
|
97c8a2748987d044858c431c37fbe22dd242637a
|
Shell
|
engma/app-autoscaler
|
/bin/deploy.sh
|
UTF-8
| 660
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [[ "$#" != 1 ]]; then
echo 'An environment is required'
echo "for example, $0 myenv"
exit 1
fi
basedir=$(cd "$(dirname "$0")"; pwd)
appAutoScaler="${basedir}/.."
envProperties="${appAutoScaler}/profiles/$1.properties"
if [ ! -f $envProperties ]; then
echo "The file '$envProperties' does not exist"
exit 1
fi
source $envProperties
appDomain=${apiServerURI#*://*.}
cf push AutoScaling -p $appAutoScaler/server/target/server-*.war -m 512M -d $appDomain
cf push AutoScalingAPI -p $appAutoScaler/api/target/api-*.war -d $appDomain
cf push AutoScalingServiceBroker -p $appAutoScaler/servicebroker/target/servicebroker-*.war -d $appDomain
| true
|
4209e12f48ba897954083a3bc9c4fe9af8a1b70e
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/ctodo/PKGBUILD
|
UTF-8
| 654
| 2.609375
| 3
|
[] |
no_license
|
# Maintainer: Niels Sonnich Poulsen <niels@nielssp.dk>
pkgname=ctodo
pkgver=1.3
pkgrel=1
pkgdesc="A simple ncurses-based task manager."
url="http://ctodo.apakoh.dk"
arch=('x86_64' 'i686')
license=('MIT')
depends=('ncurses' 'readline')
makedepends=('cmake')
conflicts=()
replaces=()
backup=()
install=
source=("https://github.com/nielssp/${pkgname}/archive/v${pkgver}.tar.gz")
md5sums=('27f97d2ea65f0f48dd054b19ddcdbb6a')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
cmake ./ -DCMAKE_INSTALL_PREFIX=/usr
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
make DESTDIR="${pkgdir}" install
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true
|
641eb37da38a46e44e9fe679d1df1ddd97771b1e
|
Shell
|
cveligkos/dotfiles
|
/modules/user/zsh/scripts/waybar-dev-reload
|
UTF-8
| 219
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
CONFIG_FILES="$HOME/.config/waybar/config $HOME/.config/waybar/style.css"
trap "pkill waybar" EXIT
while true; do
waybar &
inotifywait -e create,modify $CONFIG_FILES
pkill waybar
done
| true
|
b5173439f1f79d0ce8d8e9fd3721062a32482181
|
Shell
|
nicktheway/godot-git-helpers
|
/diff_checker.sh
|
UTF-8
| 3,324
| 4.375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script checks all the changed tracked files to check if the only difference is in the AnimatedSprite frame number.
DRY_RUN=
VERBOSE=
function show_usage() {
echo "usage: $0 [-n]"
echo ''
echo ' -n: Dry run. Shows the files that will be restored.'
echo ' -v: Verbose. Explain the process, useful for debugging the script.'
echo ''
}
while getopts 'nv' opt; do
case "$opt" in
n) DRY_RUN=t; VERBOSE=t;;
v) VERBOSE=t ;;
*) show_usage;
exit 1
esac
done
function log() {
if [ -z "$VERBOSE" ]; then
return 1
fi
echo "$@"
}
function validate_node_type() {
local filepath="$1"
local line="$2"
local stop=1
local previousline=$((line-1))
while [ $stop -eq 1 ]; do
local linetext=$(git diff -U$(wc -l "$filepath") | sed "${previousline}q;d")
echo "$linetext" | grep "^ *$" > /dev/null
stop=$?
echo "$linetext" | grep --extended-regexp "\[node.*\" type=\"AnimatedSprite\"" > /dev/null
if [ $? = 0 ]; then
log ' O Confirmed AnimatedSprite node: ' "$linetext"
return 0
fi
previousline=$((previousline-1))
done
log ' X Not an AnimatedSprite node!'
return 1
}
function validate_playing() {
local filepath="$1"
local line="$2"
local stop=1
local nextline=$((line+1))
while [ $stop -eq 1 ]; do
local linetext=$(git diff -U$(wc -l "$filepath") | sed "${nextline}q;d")
echo "$linetext" | grep "^ *$" > /dev/null
stop=$?
echo "$linetext" | grep "playing = true" > /dev/null
if [ $? = 0 ]; then
log ' O Confirmed that the animation is running: ' "$linetext"
return 0
fi
nextline=$((nextline+1))
done
log ' X The animation is not running!'
return 1
}
function validate_file_frame_changes() {
local filepath="$1"
local framelines="$(git diff -U$(wc -l "$filepath") | grep -n '^+frame' | cut -d':' -f1)"
for line in $(echo "$framelines"); do
log ' > Verifying frame change on line' $line # for file $filepath
validate_node_type "$filepath" "$line"
if [ $? -ne 0 ]; then
return 1
fi
validate_playing "$filepath" "$line"
if [ $? -ne 0 ]; then
return 1
fi
done
return 0
}
function validate_and_revert_changes() {
for filepath in "$@"; do
log ""
log "- Validating: " "$filepath"
validate_file_frame_changes "$filepath"
if [ $? -ne 0 ]; then
log ' The script will not revert this file as it looks like it contains intended changes!'
continue
fi
if [ -z "$DRY_RUN" ]; then
git restore -- "$filepath"
log ' Restored changes.'
else
log ' DRY RUN MODE - Did not restore changes.'
fi
done
return 0
}
SCRIPT_DIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
cd "$SCRIPT_DIR"
CHANGED_FILES="$(git diff --name-only | fmt -w1)"
CHANGED_SCENES=$(echo "$CHANGED_FILES" | grep \.tscn$)
ONLY_FRAME_CHANGED_FILES=()
for FILE in $CHANGED_SCENES; do
git diff -U0 -- $FILE | grep '^[+-]' | grep -Ev '^(--- a/|\+\+\+ b/)' | grep --extended-regexp -v "[-+]frame = [0-9]+" > /dev/null
if [ $? -eq 1 ]; then
ONLY_FRAME_CHANGED_FILES+=($FILE)
fi
done
if [ -n "$ONLY_FRAME_CHANGED_FILES" ]; then
log 'Identified files with only frame changes: '
log ''
for file in ${ONLY_FRAME_CHANGED_FILES[@]}; do log "$file"; done;
validate_and_revert_changes "${ONLY_FRAME_CHANGED_FILES[@]}"
else
log 'No files with only frame changes found.'
fi
| true
|
f0eae539c81f5bdba8a1978b884696e13689467c
|
Shell
|
Bicocca/Calibration
|
/EcalCalibNtuple/test/cron_stability/cronjob_stability.sh
|
UTF-8
| 1,766
| 2.53125
| 3
|
[] |
no_license
|
#! /bin/sh
export GROUP=zh
echo "****** INFO ******"
echo "whoami: "
whoami
echo "hostname: "
hostname
echo "pwd: "
pwd
#set the cmssw environment
echo ""
echo "****** SET THE CMSSW ENVIRONMENT ******"
export SCRAM_ARCH=slc5_amd64_gcc462
source /afs/cern.ch/cms/cmsset_default.sh
source /afs/cern.ch/cms/LCG/LCG-2/UI/cms_ui_env.sh
source /afs/cern.ch/cms/ccs/wm/scripts/Crab/crab.sh
cd /afs/cern.ch/user/e/ecalmon/EoPStability/2012/cron_stability/CMSSW_5_3_3_patch1/src/
eval `scramv1 runtime -sh`
cd -
# launch the plots
echo ""
echo "****** LAUNCH THE PLOTS ******"
cd /afs/cern.ch/user/e/ecalmon/EoPStability/2012/cron_stability/
perl launchStability.pl
# copy the plots
echo ""
echo "****** COPY THE PLOTS ******"
rm -rf /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/EB_1_1_2012_31_12_2012/
rm -rf /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/EE_1_1_2012_31_12_2012/
rm -rf /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/EB_1_1_2012_31_12_2012_0.00-1.14/
rm -rf /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/EE_1_1_2012_31_12_2012_1.50-2.00/
rm -rf /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/EB_1_1_2012_31_12_2012_1.14-1.50/
rm -rf /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/EE_1_1_2012_31_12_2012_2.00-2.50/
cp -r EB_1_1_2012_31_12_2012 /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/
cp -r EE_1_1_2012_31_12_2012 /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/
cp -r EB_1_1_2012_31_12_2012_0.00-1.14 /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/
cp -r EE_1_1_2012_31_12_2012_1.50-2.00 /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/
cp -r EB_1_1_2012_31_12_2012_1.14-1.50 /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/
cp -r EE_1_1_2012_31_12_2012_2.00-2.50 /afs/cern.ch/user/e/ecalmon/www/EoPStability2012/
| true
|
ff4cf6209ff1e17c8b3398c39cb64c6c86416f60
|
Shell
|
josegonzalez/dokku-global-cert
|
/subcommands/set
|
UTF-8
| 2,287
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
source "$(cd "$(dirname "$(dirname "${BASH_SOURCE[0]}")" )" && pwd)/config"
source "$PLUGIN_CORE_AVAILABLE_PATH/common/functions"
source "$PLUGIN_AVAILABLE_PATH/global-cert/internal-functions"
set -eo pipefail; [[ $DOKKU_TRACE ]] && set -x
cmd-global-cert-set() {
#E set a global certificate combination using a full path on disk
#E dokku $PLUGIN_COMMAND_PREFIX:set server.crt server.key
#A crt-file, path to the certificate file
#A key-file, path to the key file
declare desc="imports an SSL cert/key combo either on STDIN via a tarball or from specified cert/key filenames"
local cmd="$PLUGIN_COMMAND_PREFIX:set" argv=("$@"); [[ ${argv[0]} == "$cmd" ]] && shift 1
declare CRT_FILE="$1" KEY_FILE="$2"
if fn-is-file-import "$CRT_FILE" "$KEY_FILE"; then
# importing from file
true
elif fn-is-tar-import; then
local CERTS_SET_TMP_WORK_DIR=$(mktemp -d "/tmp/dokku_certs_set.XXXX")
pushd "$CERTS_SET_TMP_WORK_DIR" &> /dev/null
trap 'popd &> /dev/null || true; rm -rf $CERTS_SET_TMP_WORK_DIR > /dev/null' RETURN
tar xvf - <&0
local CRT_FILE_SEARCH=$(find . -not -path '*/\.*' -type f | grep ".crt$")
local CRT_FILE_COUNT=$(printf "%s" "$CRT_FILE_SEARCH" | grep -c '^')
if [[ $CRT_FILE_COUNT -lt 1 ]]; then
dokku_log_fail "Tar archive is missing .crt file"
elif [[ $CRT_FILE_COUNT -gt 1 ]]; then
dokku_log_fail "Tar archive contains more than one .crt file"
else
local CRT_FILE=$CRT_FILE_SEARCH
fi
local KEY_FILE_SEARCH=$(find . -not -path '*/\.*' -type f | grep ".key$")
local KEY_FILE_COUNT=$(printf "%s" "$KEY_FILE_SEARCH" | grep -c '^')
if [[ $KEY_FILE_COUNT -lt 1 ]]; then
dokku_log_fail "Tar archive is missing .key file"
elif [[ $KEY_FILE_COUNT -gt 1 ]]; then
dokku_log_fail "Tar archive contains more than one .key file"
else
local KEY_FILE=$KEY_FILE_SEARCH
fi
else
dokku_log_fail "Tar archive containing server.crt and server.key expected on stdin"
fi
mkdir -p "$PLUGIN_CONFIG_ROOT"
cp "$CRT_FILE" "$PLUGIN_CONFIG_ROOT/server.crt"
cp "$KEY_FILE" "$PLUGIN_CONFIG_ROOT/server.key"
chmod 750 "$PLUGIN_CONFIG_ROOT"
chmod 640 "$PLUGIN_CONFIG_ROOT/server.crt" "$PLUGIN_CONFIG_ROOT/server.key"
}
cmd-global-cert-set "$@"
| true
|
cf141a0fc0756820fd38d664337506fc673a62b0
|
Shell
|
shiwaforce/NGINX-Demos
|
/packer-terraform-all-active-nginx-plus-lb/terraform/scripts/setup-nginx.sh
|
UTF-8
| 4,177
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# Slightly modified versions of the following code blocks are currently used
# as startup and shutdown scripts for Google Cloud instance templates
# They provide a good reference on how to query Google Cloud for LB and App IPs
# and update the Nginx upstream API with those values
# Get list of available upstreams
curl 'http://localhost/upstream_conf?upstream=upstream_app_pool'
# Loop through IPs of available LBs and APPs
gcloud compute instances list --format="value(networkInterfaces[0].accessConfigs[0].natIP)" --regexp=.*lb.* | while read -r lb; do
gcloud compute instances list --format="value(networkInterfaces[0].networkIP)" --regexp=.*app.* | while read -r app; do
# curl -s 'http://'"$lb"'/upstream_conf?add=&upstream=upstream_app_pool&server='"$app"'';
# echo "LB: $lb && APP: $app"
done;
done;
# Loop through IPs of available LBs and APPs
lbip=$(gcloud compute instances list --format="value(networkInterfaces[0].accessConfigs[0].natIP)" --regexp=.*lb.*)
arrlb=($lbip)
appip=$(gcloud compute instances list --format="value(networkInterfaces[0].networkIP)" --regexp=.*app.*)
arrapp=($appip)
for (( i=0; i < ${#arrlb[@]}; i++ )); do
for (( j=0; j < ${#arrapp[@]}; j++ )); do
# curl 'http://'"${arrlb[i]}"'/upstream_conf?add=&upstream=upstream_app_pool&server='"${arrapp[j]}"'';
# echo "LB: ${arrlb[i]} && APP: ${arrapp[j]}"
done;
done;
# Add all app servers not in the upstream to the current LB server
# Check if app server is already present and if not add to LB
appip=$(gcloud compute instances list --format="value(networkInterfaces[0].networkIP)" --regexp=.*app.*)
arrapp=($appip)
for (( i=0; i < ${#arrapp[@]}; i++ )); do
is_present=false;
upstrlist=$(curl -s 'http://localhost/upstream_conf?upstream=upstream_app_pool' | grep -Eo 'server ([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*')
upstrarr=($upstrlist)
for (( j=0; j < ${#upstrarr[@]}; j++ )); do
if [ "${arrapp[i]}" = "${upstrarr[j]}" ]; then
is_present=true;
fi;
done;
if [ "$is_present" = false ]; then
curl -s 'http://localhost/upstream_conf?add=&upstream=upstream_app_pool&server='"${arrapp[i]}"'';
fi;
done;
# Get the internal IP of the current app server and add this server to the upstream of all LB instances
# Check if app server is already present and if not add to LB
inip=$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1');
lbip=$(gcloud compute instances list --format="value(networkInterfaces[0].accessConfigs[0].natIP)" --regexp=.*lb.*)
arrlb=($lbip)
for (( i=0; i < ${#arrlb[@]}; i++ )); do
is_present=false;
upstrlist=$(curl -s 'http://'"${arrlb[i]}"'/upstream_conf?upstream=upstream_app_pool' | grep -Eo 'server ([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*')
upstrarr=($upstrlist)
for (( j=0; j < ${#upstrarr[@]}; j++ )); do
if [ "$inip" = "${upstrarr[j]}" ]; then
is_present=true;
fi;
done;
if [ "$is_present" = false ]; then
curl -s 'http://'"${arrlb[i]}"'/upstream_conf?add=&upstream=upstream_app_pool&server='"$inip"'';
fi;
done;
# Get the internal IP of the current app server and delete this server from the upstream of all LB instances
inip=$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1');
gcloud compute instances list --format="value(networkInterfaces[0].accessConfigs[0].natIP)" --regexp=.*lb.* | while read -r lb; do
for ID in $(curl -s 'http://'"$lb"'/upstream_conf?upstream=upstream_app_pool' | grep -o 'server '"$inip"':80; # id=[0-9]\+' | grep -o 'id=[0-9]\+' | grep -o '[0-9]\+'); do
curl 'http://'"$lb"'/upstream_conf?remove=&upstream=upstream_app_pool&id='"$ID"'';
done;
done;
# Get a list of Upstream servers and loop through them to delete them
gcloud compute instances list --format="value(networkInterfaces[0].accessConfigs[0].natIP)" --regexp=.*lb.* | while read -r lb; do
for ID in $(curl -s 'http://'"$lb"'/upstream_conf?upstream=upstream_app_pool' | grep -o 'id=[0-9]\+' | grep -o '[0-9]\+'); do
curl 'http://'"$lb"'/upstream_conf?remove=&upstream=upstream_app_pool&id='"$ID"'';
done;
done;
| true
|
2ce9ebd14518cec19cd8722862daf66263d79155
|
Shell
|
oliverguenther/docker-osx-dev
|
/docker-osx-dev
|
UTF-8
| 15,310
| 3.921875
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# A script for running a productive development environment with Docker
# on OS X. See https://github.com/brikis98/docker-osx-dev for more info.
set -e
# Console colors
readonly COLOR_DEBUG='\033[1;36m'
readonly COLOR_INFO='\033[0;32m'
readonly COLOR_WARN='\033[1;33m'
readonly COLOR_ERROR='\033[0;31m'
readonly COLOR_INSTRUCTIONS='\033[0;37m'
readonly COLOR_END='\033[0m'
# Log levels
readonly LOG_LEVEL_DEBUG="DEBUG"
readonly LOG_LEVEL_INFO="INFO"
readonly LOG_LEVEL_WARN="WARN"
readonly LOG_LEVEL_ERROR="ERROR"
readonly LOG_LEVEL_INSTRUCTIONS="INSTRUCTIONS"
readonly LOG_LEVELS=($LOG_LEVEL_DEBUG $LOG_LEVEL_INFO $LOG_LEVEL_WARN $LOG_LEVEL_ERROR $LOG_LEVEL_INSTRUCTIONS)
readonly DEFAULT_LOG_LEVEL="$LOG_LEVEL_INFO"
# Boot2Docker constants
readonly DOCKER_HOST_NAME="dockerhost"
readonly BOOT2DOCKER_SSH_KEY=$(boot2docker cfg | grep "^SSHKey = " | sed -e 's/^SSHKey = "\(.*\)"/\1/')
readonly BOOT2DOCKER_REMOTE_SHELL_CMD="ssh -i $BOOT2DOCKER_SSH_KEY -o StrictHostKeyChecking=no"
readonly BOOT2DOCKER_USER="docker"
readonly BOOT2DOCKER_SSH_URL="$BOOT2DOCKER_USER@$DOCKER_HOST_NAME"
# docker-compose constants
readonly DEFAULT_COMPOSE_FILE="docker-compose.yml"
# Sync and watch constants
readonly DEFAULT_PATHS_TO_SYNC=(".")
readonly DEFAULT_EXCLUDES=(".git")
readonly DEFAULT_IGNORE_FILE=".dockerignore"
readonly RSYNC_FLAGS=("archive" "verbose" "delete" "omit-dir-times" "inplace" "whole-file")
# Global variables. The should only ever be set by the corresponding
# configure_XXX functions.
PATHS_TO_SYNC=()
EXCLUDES=()
CURRENT_LOG_LEVEL="$DEFAULT_LOG_LEVEL"
# Helper function to log an INFO message. See the log function for details.
function log_info {
log $COLOR_INFO $LOG_LEVEL_INFO "$@"
}
# Helper function to log a WARN message. See the log function for details.
function log_warn {
log $COLOR_WARN $LOG_LEVEL_WARN "$@"
}
# Helper function to log a DEBUG message. See the log function for details.
function log_debug {
log $COLOR_DEBUG $LOG_LEVEL_DEBUG "$@"
}
# Helper function to log an ERROR message. See the log function for details.
function log_error {
log $COLOR_ERROR $LOG_LEVEL_ERROR "$@"
}
# Helper function to log an INSTRUCTIONS message. See the log function for details.
function log_instructions {
log $COLOR_INSTRUCTIONS $LOG_LEVEL_INSTRUCTIONS "$@"
}
#
# Usage: index_of VALUE ARRAY
#
# Returns the first index where VALUE appears in ARRAY. If ARRAY does not
# contain VALUE, returns -1.
#
# Examples:
#
# index_of foo ("abc" "foo" "def")
# Returns: 1
#
# index_of foo ("abc" "def")
# Returns -1
#
function index_of {
local readonly value="$1"
shift
local readonly array=("$@")
for (( i = 0; i < ${#array[@]}; i++ )); do
if [ "${array[$i]}" = "${value}" ]; then
echo $i
return
fi
done
echo -1
}
#
# Usage: log COLOR LEVEL [MESSAGE ...]
#
# Logs MESSAGE to stdout with color COLOR if the log level is at least LEVEL.
# If no MESSAGE is specified, reads from stdin. The log level is determined by
# the DOCKER_OSX_DEV_LOG_LEVEL environment variable.
#
# Examples:
#
# log $COLOR_INFO $LOG_LEVEL_INFO "Hello, World"
# Prints: "[INFO] Hello, World" to stdout in green.
#
# echo "Hello, World" | log $COLOR_RED $LOG_LEVEL_ERROR
# Prints: "[ERROR] Hello, World" to stdout in red.
#
function log {
if [[ "$#" -gt 2 ]]; then
do_log "$@"
elif [[ "$#" -eq 2 ]]; then
while read message; do
do_log "$1" "$2" "$message"
done
else
echo "Internal error: invalid number of arguments passed to log function: $@"
exit 1
fi
}
#
# Usage: do_log COLOR LEVEL MESSAGE ...
#
# Logs MESSAGE to stdout with color COLOR if the log level is at least LEVEL.
# The log level is determined by the DOCKER_OSX_DEV_LOG_LEVEL environment
# variable.
#
# Examples:
#
# do_log $COLOR_INFO $LOG_LEVEL_INFO "Hello, World"
# Prints: "[INFO] Hello, World" to stdout in green.
#
function do_log {
local readonly color="$1"
shift
local readonly log_level="$1"
shift
local readonly message="$@"
local readonly log_level_index=$(index_of "$log_level" "${LOG_LEVELS[@]}")
local readonly current_log_level_index=$(index_of "$CURRENT_LOG_LEVEL" "${LOG_LEVELS[@]}")
if [[ "$log_level_index" -ge "$current_log_level_index" ]]; then
echo -e "${color}[${log_level}] ${message}${COLOR_END}"
fi
}
#
# Usage: find_path_to_sync_parent PATH
#
# Finds the parent folder of PATH from the PATHS_TO_SYNC global variable. When
# using rsync, we want to sync the exact folders the user specified when
# running the docker-osx-dev script. However, when we we use fswatch, it gives
# us the path of files that changed, which may be deeply nested inside one of
# the folders we're supposed to keep in sync. Therefore, this function lets us
# transform one of these nested paths back to one of the top level rsync paths.
#
function find_path_to_sync_parent {
local readonly path="$1"
local readonly normalized_path=$(greadlink -m "$path")
for path_to_sync in "${PATHS_TO_SYNC[@]}"; do
if [[ "$normalized_path" == $path_to_sync* ]]; then
echo "$path_to_sync"
return
fi
done
}
#
# Usage: rsync PATH
#
# Uses rsync to sync PATH to the same PATH on the Boot2Docker VM.
#
# Examples:
#
# rsync /foo
# Result: the contents of /foo are rsync'ed to /foo on the Boot2Docker VM
#
function do_rsync {
local readonly path="$1"
local readonly path_to_sync=$(find_path_to_sync_parent "$path")
local readonly parent_folder=$(dirname "$path_to_sync")
local readonly flags="${RSYNC_FLAGS[@]/#/--}"
local readonly excludes="${EXCLUDES[@]/#/--exclude }"
local readonly rsh_flag="--rsh=\"$BOOT2DOCKER_REMOTE_SHELL_CMD\""
local readonly rsync_cmd="rsync $flags $excludes $rsh_flag $path_to_sync $BOOT2DOCKER_SSH_URL:$parent_folder"
log_debug "$rsync_cmd"
eval "$rsync_cmd" 2>&1 | log_info
}
#
# Usage: sync [PATHS ...]
#
# Uses rsync to sync PATHS to the Boot2Docker VM. If one of the values in PATHS
# is not valid (e.g. doesn't exist), it will be ignored.
#
# Examples:
#
# rsync /foo /bar
# Result: /foo and /bar are rsync'ed to the Boot2DockerVM
#
function sync {
local readonly paths_to_sync=("$@")
for path in "${paths_to_sync[@]}"; do
do_rsync "$path"
done
}
#
# Usage: join SEPARATOR ARRAY
#
# Joins the elements of ARRAY with the SEPARATOR character between them.
#
# Examples:
#
# join ", " ("A" "B" "C")
# Returns: "A, B, C"
#
function join {
local readonly IFS="$1"
shift
echo "$*"
}
#
# Usage: initial_sync PATHS
#
# Perform the initial sync of PATHS to the Boot2Docker VM, including setting up
# all necessary parent directories and permissions.
#
function initial_sync {
local readonly paths_to_sync=("$@")
log_info "Performing initial sync of paths: ${paths_to_sync[@]}"
local dirs_to_create=()
for path in "${paths_to_sync[@]}"; do
local readonly parent_dir=$(dirname "$path")
dirs_to_create+=("$parent_dir")
done
local readonly dir_string=$(join " " "${dirs_to_create[@]}")
local readonly mkdir_string="sudo mkdir -p $dir_string"
local readonly chown_string="sudo chown -R $BOOT2DOCKER_USER $dir_string"
local readonly ssh_cmd="$mkdir_string && $chown_string"
log_debug "Creating parent directories in Docker VM: $ssh_cmd"
boot2docker ssh "$ssh_cmd"
sync "${paths_to_sync[@]}"
log_info "Initial sync done"
}
#
# Usage: watch
#
# Watches the paths in the global variable PATHS_TO_SYNC for changes and rsyncs
# any files that changed.
#
function watch {
log_info "Watching: ${PATHS_TO_SYNC[@]}"
local readonly excludes="${EXCLUDES[@]/#/--exclude }"
local readonly fswatch_cmd="fswatch -0 $excludes ${PATHS_TO_SYNC[@]}"
log_debug "$fswatch_cmd"
eval "$fswatch_cmd" | while read -d "" file
do
log_info "Detected change in $file"
sync "$file"
done
}
#
# Usage: watch_and_sync
#
# Syncs the paths in the global variable PATHS_TO_SYNC. Kicks off a file watcher
# that will keep those paths in sync.
#
function watch_and_sync {
initial_sync "${PATHS_TO_SYNC[@]}"
watch
}
#
# Usage: instructions
#
# Prints the usage instructions for this script to stdout.
#
function instructions {
echo -e "Usage: docker-osx-dev [-s PATH] [-e PATH] [-l LOG_LEVEL] [-c COMPOSE_FILE] [-i IGNORE_FILE] [-h]"
echo -e
echo -e "Options, if supplied, have the following meanings:"
echo -e
echo -e "-s PATH\t\t\tSync PATH to the Boot2Docker VM. No wildcards allowed. May be specified multiple times. Default: ${DEFAULT_PATHS_TO_SYNC[@]}"
echo -e "-e PATH\t\t\tExclude PATH while syncing. Wildcards are allowed, but make sure to quote them. May be specified multiple times. Default: ${DEFAULT_EXCLUDES[@]}"
echo -e "-c COMPOSE_FILE\t\tRead in this docker-compose file and sync any volumes within it. Default: $DEFAULT_COMPOSE_FILE"
echo -e "-i IGNORE_FILE\t\tRead in this ignore file and exclude any paths within it while syncing. Default: $DEFAULT_IGNORE_FILE"
echo -e "-l LOG_LEVEL\t\tSpecify the logging level. One of: ${LOG_LEVELS[@]}. Default: ${DEFAULT_LOG_LEVEL}"
echo -e "-h\t\t\tPrint this help text."
echo -e
echo -e "Overview:"
echo -e
echo -e "docker-osx-dev is a script you can use to sync folders to the Boot2Docker VM using rsync."
echo -e "It's an alternative to using VirtualBox shared folders, which are agonizingly slow and break file watchers."
echo -e "For more info, see: https://github.com/brikis98/docker-osx-dev"
echo -e
echo -e "Example workflow:"
echo -e
echo -e "\t> docker-osx-dev -s /host-folder"
echo -e "\t> docker run -v /host-folder:/guest-folder some-docker-image"
echo -e
echo -e "After you run the commands above, /host-folder on OS X will be kept in sync with /guest-folder in some-docker-image."
echo -e
}
#
# Usage: load_paths_from_docker_compose DOCKER_COMPOSE_FILE
#
# Parses out all volumes: entries from the docker-compose file
# DOCKER_COMPOSE_FILE. This is a very hacky function that just uses regex
# instead of a proper yaml parser. If it proves to be fragile, it will need to
# be replaced.
#
function load_paths_from_docker_compose {
local readonly yaml_file_path="$1"
local in_volumes_block=false
local paths=()
if [[ -f "$yaml_file_path" ]]; then
while read line; do
if $in_volumes_block; then
if [[ "${line:0:2}" = "- " ]]; then
local readonly path=$(echo $line | sed -ne "s/- \(.*\):.*$/\1/p")
if [ ! -z "$path" ]; then
paths+=("$path")
fi
else
in_volumes_block=false
fi
else
if [[ "$line" = "volumes:" ]]; then
in_volumes_block=true
fi
fi
done < "$yaml_file_path"
fi
echo "${paths[@]}"
}
#
# Usage: load_ignore_paths IGNORE_FILE
#
# Parse the paths from IGNORE_FILE that are of the format used by .gitignore and
# .dockerignore: that is, each line contains a single path, and lines that
# start with a pound sign are treated as comments.
#
function load_ignore_paths {
local readonly ignore_file="$1"
local paths=()
if [[ -f "$ignore_file" ]]; then
while read line; do
if [[ "${line:0:1}" != "#" ]]; then
paths+=("$line")
fi
done < "$ignore_file"
fi
echo "${paths[@]}"
}
#
# Usage: configure_log_level LEVEL
#
# Set the logging level to LEVEL. LEVEL must be one of the values in LOG_LEVELS.
#
function configure_log_level {
local readonly level="${1:-$DEFAULT_LOG_LEVEL}"
local readonly index=$(index_of "$level" "${LOG_LEVELS[@]}")
if [[ "$index" -ge 0 ]]; then
CURRENT_LOG_LEVEL="$level"
else
log_error "Invalid log level specified: $level"
instructions
exit 1
fi
}
#
# Usage: configure_paths_to_sync COMPOSE_FILE [PATHS_FROM_CMD_LINE ...]
#
# Set the paths that should be synced to the Boot2Docker VM. PATHS_FROM_CMD_LINE
# are paths specified as command line arguments and will take precedence. If
# none are specified, this function will try to read the docker-compose file at
# COMPOSE_FILE and load volumes from it. If that fails, this function will fall
# back to the DEFAULT_PATHS_TO_SYNC.
#
function configure_paths_to_sync {
local readonly compose_file="${1:-$DEFAULT_COMPOSE_FILE}"
shift
local readonly paths_to_sync_from_cmd_line=("$@")
local readonly paths_to_sync_from_compose_file=($(load_paths_from_docker_compose "$compose_file"))
local paths_to_sync=()
if [[ "${#paths_to_sync_from_cmd_line[@]}" -gt 0 ]]; then
paths_to_sync=("${paths_to_sync_from_cmd_line[@]}")
log_debug "Using sync paths from command line args: ${paths_to_sync[@]}"
elif [[ "${#paths_to_sync_from_compose_file}" -gt 0 ]]; then
paths_to_sync=("${paths_to_sync_from_compose_file[@]}")
log_info "Using sync paths from Docker Compose file at $compose_file: ${paths_to_sync[@]}"
else
paths_to_sync=("${DEFAULT_PATHS_TO_SYNC[@]}")
log_debug "Using default sync paths: ${paths_to_sync[@]}"
fi
for path in "${paths_to_sync[@]}"; do
local normalized_path=$(greadlink -m "$path")
PATHS_TO_SYNC+=("$normalized_path")
done
}
#
# Usage: configure_excludes IGNORE_FILE [EXCLUDE_PATHS_FROM_CMD_LINE ...]
#
# Sets the paths that should be excluded when syncing files to the Boot2Docker
# VM. EXCLUDE_PATHS_FROM_CMD_LINE are paths specified as command line arguments
# and will take precedence. If none are specified, this function will try to
# read the ignore file (see load_ignore_paths) at IGNORE_FILE and use those
# entries as excludes. If that fails, this function will fall back to
# DEFAULT_EXCLUDES.
#
function configure_excludes {
local readonly ignore_file="${1:-$DEFAULT_IGNORE_FILE}"
shift
local readonly excludes_from_cmd_line=("$@")
local readonly excludes_from_ignore_file=($(load_ignore_paths "$ignore_file"))
if [[ "${#excludes_from_cmd_line}" -gt 0 ]]; then
EXCLUDES=("${excludes_from_cmd_line[@]}")
log_debug "Using exclude paths from command line args: ${EXCLUDES[@]}"
elif [[ "${#excludes_from_ignore_file}" -gt 0 ]]; then
EXCLUDES=("${excludes_from_ignore_file[@]}")
log_info "Using excludes from ignore file $ignore_file: ${EXCLUDES[@]}"
else
EXCLUDES=("${DEFAULT_EXCLUDES[@]}")
log_debug "Using default exclude paths: ${EXCLUDES[@]}"
fi
}
#
# Usage handle_command ARGS ...
#
# Parses ARGS to kick off this script. See the output of the instructions
# function for details.
#
function handle_command {
local paths_to_sync=()
local excludes=()
local docker_compose_file=""
local ignore_file=""
local log_level=""
while getopts ":s::e::c::l::i::h" opt; do
case "$opt" in
s)
paths_to_sync+=("$OPTARG")
;;
e)
excludes+=("$OPTARG")
;;
c)
docker_compose_file="$OPTARG"
;;
i)
ignore_file="$OPTARG"
;;
l)
log_level="$OPTARG"
;;
h)
instructions
exit 0
;;
:)
log_error "Option -$OPTARG requires an argument"
instructions
exit 1
;;
\?)
log_error "Invalid option: -$OPTARG"
instructions
exit 1
;;
esac
done
configure_log_level "$log_level"
configure_paths_to_sync "$docker_compose_file" "${paths_to_sync[@]}"
configure_excludes "$ignore_file" "${excludes[@]}"
watch_and_sync
}
handle_command "$@"
| true
|
ce4e62677e27a211c08766e4500e58585de65854
|
Shell
|
bwdmonkey/dotfiles
|
/bash_profile
|
UTF-8
| 1,053
| 2.984375
| 3
|
[] |
no_license
|
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
[[ -s "$HOME/.git-prompt.sh" ]] && source "$HOME/.git-prompt.sh"
[[ -s "$HOME/.bashrc" ]] && source "$HOME/.bashrc"
[[ -s "$HOME/.bash_prompt" ]] && source "$HOME/.bash_prompt"
[[ -s "$HOME/.profile" ]] && source "$HOME/.profile" # Load the default .profile
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
export VISUAL=vim
export EDITOR="$VISUAL"
# Setting PATH for Python 3.6
# The original version is saved in .bash_profile.pysave
PATH="/Library/Frameworks/Python.framework/Versions/3.6/bin:${PATH}"
export PATH
# Setting PATH for Python 3.7
# The original version is saved in .bash_profile.pysave
PATH="/Library/Frameworks/Python.framework/Versions/3.7/bin:${PATH}"
export PATH
test -e "${HOME}/.iterm2_shell_integration.bash" && source "${HOME}/.iterm2_shell_integration.bash"
| true
|
6b499b7dcd9cb77628d71122a9f70810f7729c66
|
Shell
|
CyberSys/bifrost-build
|
/all/strace-4.5.20-2/Fetch-source.sh
|
UTF-8
| 1,896
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
SRC=strace-4.5.20.tar.bz2
DST=/var/spool/src/"${SRC}"
MD5=64dfe10d9db0c1e34030891695ffca4b
[ -s "${DST}" ] || ../../wget-finder --checksum "${MD5}" -O "${DST}" http://downloads.sourceforge.net/project/strace/strace/4.5.20/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://distro.ibiblio.org/openwall/Owl/pool/sources/strace/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ftp.lip6.fr/pub/linux/distributions/slackware/slackware-13.37/source/d/strace/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ftp.sunet.se/mirror/archive/ftp.sunet.se/pub/Linux/distributions/bifrost/download/src/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://oe-lite.org/mirror/crosstool-ng/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" https://slackbuilds.org/mirror/slackware/slackware-14.0/source/d/strace/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://repository.timesys.com/buildsources/s/strace/strace-4.5.20/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://mirror.efixo.net/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://mirror.math.princeton.edu/pub/slackware/slackware64-14.0/source/d/strace/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://downloads.aredn.org/sources/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ftp.acc.umu.se/mirror/archive/ftp.sunet.se/pub/Linux/distributions/bifrost/download/src/"${SRC}" \
|| ../../wget-finder --checksum "${MD5}" -O "${DST}" http://ponce.cc/mirrors/slackware-13.37/source/d/strace/"${SRC}" \
|| ../../wget-finder -O "${DST}" "${SRC}:${MD5}"
| true
|
5c1f0bfbf3d08b07dbfe321808a6421ec8f66730
|
Shell
|
martinhynar/.dotfiles
|
/.oh-my-zsh/custom/plugins/oh-my-settings/oh-my-settings.plugin.zsh
|
UTF-8
| 1,646
| 2.609375
| 3
|
[] |
no_license
|
source ~/.dotfiles-private/load-private
# PATHS
WORKBENCH=~/Workbench
PATH=~/.dotfiles/bin:${PATH}
PATH=~/bin:${PATH}
PATH=~/.cache/rebar3/bin:${PATH}
JAVA_HOME=/usr/java/latest/
PATH=${JAVA_HOME}:${PATH}
TMP=/tmp; export TMP
TMPDIR=$TMP; export TMPDIR
# GOLANG
export GOPATH=~/Projects/Go
PATH=/usr/local/go/bin:${PATH}
PATH=${GOPATH}/bin:${PATH}
# System paths
PATH=$PATH:/usr/sbin:/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:.
export PATH
# ALIASES
alias ll='ls -lah --color=auto'
alias w='cd ${WORKBENCH}'
alias sr='sudo bash'
alias gvim="gvim -p --remote-tab-silent"
alias pgrep="pgrep -lf"
alias g="gvim"
alias eeessh="ssh -l root eeebox"
alias open="xdg-open $1 2> /dev/null"
alias octave="octave -q"
alias ,p="cd /home/mhynar/Projects"
export BROWSER="google-chrome"
export LC_ALL=en_US.utf-8
# JAVA CLASSPATH
# export CLASSPATH
# Default applications
xdg-mime default google-chrome.desktop application/pdf
xdg-mime default google-chrome.desktop text/html
xdg-mime default google-chrome.desktop x-scheme-handler/http
xdg-mime default google-chrome.desktop x-scheme-handler/https
# GIT
alias ,gcm="git commit -m $@"
alias ,gcsm="git commit -S -m $@"
alias ,gs="git status"
alias ,gp="git push"
alias ,gpt="git push --tags"
alias ,gmaster="git checkout master"
alias ,gconfig_bitbucket='git config --local user.name "Martin Hynar"; git config --local user.email "martin.hynar@gmail.com"; git config --local --list'
alias ,gconfig_gitlab='git config --local user.name "Martin Hynar"; git config --local user.email "martin.hynar@gmail.com"; git config --local --list'
alias ,ping_google_dns="ping -w 5 8.8.8.8; traceroute 8.8.8.8"
alias ,json="xclip -o | jq '.'"
| true
|
f20eb2557bbc3871a1ef058379ed6f31ff59cb9a
|
Shell
|
ScottByronDarrow/Logistic-Software-LS10-v5
|
/LS10.5/INSTALL/bin/BuildEnv
|
UTF-8
| 210
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Build LS10 Environment file
#
LOAD_ENV=$PROG_PATH/BIN/UTILS/load_env
PSL_ENV_NAME=$PROG_PATH/BIN/LOGISTIC; export PSL_ENV_NAME
echo "Building LS10 Environment File"
$LOAD_ENV $1 > /dev/null 2>&1
| true
|
777df0a69ee2edca7910fd8708bf2b4b6133e71c
|
Shell
|
dkhabarov/EtherwayBalanceFetcher
|
/old.sh
|
UTF-8
| 1,282
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
###################################
# Скрипт проверки баланса Etherway ru
# Идеей и основой послужило: http://habrahabr.ru/blogs/sysadm/114177/
# Доработан Saymon21, Сбт Сен 10 19:27:55 2011
# admin<at>hub21<dot>ru or ewsaymon<at>yandex<dot>ru
# Требования, wget
###################################
############ Настройки скрипта ######
LOGIN="p0000000"
PASSWORD="pass"
USRAGENT='Opera/9.80 (X11; Linux i686; U; ru) Presto/2.9.168 Version/11.51'
############ Настройки скрипта ######
request()
{
wget \
--user-agent="$USRAGENT" \
--load-cookies cookies.txt \
--save-cookies cookies.txt \
--keep-session-cookies \
--quiet \
$@
}
request -O index.html \
'https://lk.etherway.ru/site/login'
request -O form.html \
--post-data="LoginForm[username]=$LOGIN&LoginForm[password]=$PASSWORD" \
'https://lk.etherway.ru/site/login'
request -O info.html 'https://lk.etherway.ru/account/info'
CUR_BALANCE=$(cat "info.html"| grep "Баланс" | awk -F"<|>" '{print $11}')
echo "$CUR_BALANCE"
#####################
rm -f index.html
rm -f form.html
rm -f info.html
| true
|
6616d5ad29641c413c8b7b592f5f3efe8629c569
|
Shell
|
vi88i/filerail
|
/setup.sh
|
UTF-8
| 710
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Grab a coffee, sit back and relax till dependencies are downloaded..."
if [[ ! -d "deps" ]]
then
mkdir deps
fi
# install all dependencies
cd deps
# install openssl
if [[ ! -d "openssl" ]]
then
git clone https://github.com/openssl/openssl.git
cd openssl
git reset --hard 31a8925
./Configure
make
make test
cd ..
fi
# install zip
if [[ ! -d "zip" ]]
then
git clone https://github.com/kuba--/zip.git
cd zip
git reset --hard 05f412b
cd ..
fi
# install msgpack
if [[ ! -d "msgpack-c" ]]
then
git clone https://github.com/msgpack/msgpack-c.git
cd msgpack-c
git reset --hard 6e7deb8
git checkout c_master
cmake .
make
sudo make install
cd ..
fi
# end
cd ..
echo "Done"
| true
|
39fb87aaa6fe5c9e86e9982d616bea97948fa8d3
|
Shell
|
saulgoodenough/Notes
|
/shell_notes/talk.sh
|
UTF-8
| 429
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Please talk to me ..."
while :
do
read INPUT_STRING
case $INPUT_STRING in
hello)
echo "Hello yourself!"
;;
"how are you")
echo "fine,how are you"
;;
"what is your name")
echo "tiger machine, how about you"
;;
"i am tired")
echo "take a rest"
;;
bye)
echo "See you again!"
break
;;
*)
echo "Sorry, I don't understand"
;;
esac
done
echo
echo "That's all folks!"
| true
|
44889fdcf6da45cd947d4fa5b6d08817cf8ca214
|
Shell
|
vishnuaggarwal23/quartzscheduler
|
/scripts/clearPreBuildArtifacts.sh
|
UTF-8
| 592
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function clearPreBuildArtifacts(){
cd "$ROOT_PROJECT_PATH"
cd $1
echo "Current Working Directory "
pwd
case $1 in
rest)
rm -rf build/
rm -rf out/
rm -rf bin/
;;
admin)
rm -rf build/
rm -rf out/
rm -rf bin/
;;
core)
rm -rf build/
rm -rf out/
rm -rf bin/
;;
esac
}
clearPreBuildArtifacts quartz-rest
clearPreBuildArtifacts quartz-admin-thymeleaf
clearPreBuildArtifacts quartz-core
| true
|
fa62eba9945bcb645014f311ebbf1a6564a9a72b
|
Shell
|
PrincetonUniversity/prsonpipe
|
/scripts/preprocess/run_prep
|
UTF-8
| 23,235
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# June 21, 2017: Miriam Weaverdyck added flags and style standards.
# September 7, 2017: Judith Mildner overhauled structure to more flexible version
# run_prep reads in prep_TSK.par or prep.par pfiles and launches each of the
# steps as efficiently as possible. Sequential steps that use the same software
# will be run together. Every subject is run separately, unless group normalization
# in DARTEL is turned on.
#
################################################################################----------
# Packages used:
# matlab
#
# Files sourced:
# globals.par
# funcs
# prep.par (or prep_TSK.par)
#
# Flags:
# [-h] : help
# [-c] : skip conversion
# [-t] <TSK> : task
# [-p] <pfile> : pfile to be sourced (default = prep.par or prep_TSK.par)
# [-d] <jobID> : dependency jobIDs (e.g. 1111111:2222222:3333333)
#
# Arguments:
# [subs] : subjects to run (keyword 'all' accepted)
#
###############################################################################----------
set -e
label='[PREP]'
function help_func () {
cat << END
run_prep [-t <TSK>] [-p <parfile>] [-a <prepdir>] [-d <job(s)>] [-ch] [<subs>]
Description:
------------
Launches preprocessing steps for each subject based on
specified parameters in prep.par
Usage:
------
[-h | --help | -help]
Display this help
[-c]
Skip conversion
[-t <TSK>]
Run task 'TSK' only (default runs all).
Valid tasks: ${TASKS[@]}
[-a <prepdir>]
Does NOT overwrite previously preprocessed data. Uses epi_r##.nii.gz files in
${PREP_DIR}/<TSK>/<prepdir> as raw and runs all steps in pfile on those files
[-p <filename>]
filename of pfile (default is 'prep.par' or 'prep_TSK.par')
[-d <jobIDs>]
jobIDs that these jobs will be dependent on.
Valid form: one dependency '1111111' or multiple '2222222:3333333'
[-f]
Force topup to run, even if output already exists
[<subs>]
Subjects to run
Keywords: 'all', 'new'
END
}
#First, check for help flag (multi-character flags not supported by getopts)
if [[ "$@" =~ -h|--help|-help ]]; then help_func; exit; fi
######################### SOURCE FILES ########################################
# Get the name of the directory this script is in to create full path to globals.par
d="$(dirname -- "$(pwd)")"
# source globals.par
source "${d%scripts*}/scripts/globals.par"
# source functions
source "${SCRIPT_DIR_UTIL}/funcs"
convert=false
keep_prep=false
######################### SET UP & PARSE ARGUMENTS #############################
#Parse flags other than help
while getopts "ct:p:d:a:" opt; do
case "$opt" in
p)
in_parsfile="$(full_file "${OPTARG}" "${SCRIPT_DIR_PREP}")"
if [ ! -f "${in_parsfile}" ]; then
echo "${label} ERROR: pfile does not exist. Use -h for help."
echo -e "${in_parsfile}"
exit 1
fi
echo "${label} Using pfile: ${in_parsfile}"
;;
c)
convert=true
echo "${label} Skipping conversion."
;;
t)
tasks="$OPTARG"
if ! "$(isTSK $tasks)"; then
echo "$label ERROR: $tasks is not a valid task. Use -h for help."
exit 1
fi
echo "$label Inputted task: $tasks"
;;
d)
jobIDs="$OPTARG"
if "$(isNUM ${jobIDs:0})"; then
first_jobid=":$jobIDs"
else
echo "${label} ERROR: $jobIDs is not a valid sbatch dependency. Use -h for help."
exit 1
fi
;;
a)
keep_prep=true
keep_dir="$OPTARG"
full_keep_dir="$(full_dir "${keep_dir}" "${PREP_DIR}")"
if [[ ! -d "${full_keep_dir}" ]]; then
matching_dirs=$(find "${PREP_DIR}" -maxdepth 2 -path *"${keep_dir}")
if [[ ! "$(echo "${matching_dirs}" | wc -w)" == 1 ]]; then
echo "${label} ERROR: ${keep_dir} is not a valid and/or unique (sub)directory."
echo "${label} Use -h for help."
exit 1
else
prev_wd_dir="${matching_dirs}"
fi
else
prev_wd_dir="${full_keep_dir}"
fi
echo "${label} Using ${prev_wd_dir} as prep directory. WARNING: Using preprocessed data, not raw."
;;
f)
force_topup=true
;;
\?)
echo "$label ERROR: unknown flag specified: ${opt}. Use -h for help."
exit 1
;;
: ) #Catch options without arguments
echo "$label ERROR: -$OPTARG requires an argument. Use -h for help."
exit 1
esac
done
#remove used input args
shift $((OPTIND -1))
#leftovers are subject IDs
input_subjects="$@"
#set directory with parameters to globals.par's SCRIPT_DIR_PREP
#pars_dir=${SCRIPT_DIR_PREP}
# If no job dependency is specified, set it to 1 to start immediately
[[ -z "${first_jobid}" ]] && first_jobid=':1'
# Set up standard sbatch flags
mflag="--mail-user=${USER_EMAIL}" # Email address
##############################*** SCRIPT BODY ***##############################
#***** CONVERT NEW DATA *****#
if "${convert}"; then
# get inputted subjects
num2subID ${input_subjects}
# Convert new data from arch/dicom/*tar.gz to raw/TSK/s000/*nii.gz (if not done yet)
convert_log="${PROJECT_DIR}/raw/LOG_import.txt"
echo "$label $(date) -- Importing data from dicom --" | tee -a "${convert_log}"
bash "${SCRIPT_DIR_IMPORT}/run_import_from_labvol" -l "${convert_log}" ${SUBIDS[@]}
wait_for_it '[IMPORT]' "${convert_log}"
unset SUBIDS
fi
########################### DEFINE LOCAL FUNCTIONS ############################
#***** add_run_steps function definition *****#
function add_run_steps {
# WARNING: this function uses only global variables, make sure it matches run section
run_steps=( "${run_steps[@]}" "${step_names[${step_index}]}" )
run_program="${current_program}"
run_index="${step_index}"
run_exts="${run_exts}${step_exts[${step_index}]}"
if (( "${step_index}"+1 == "${#step_softs[@]}" ));then
previous_program='end'
else
previous_program="${current_program}"
fi
}
#***** reset_run_parameters function definition *****#
function reset_run_parameters {
# WARNING: this function uses only global variables, make sure it matches run section
# remove the steps we just ran from the step arrays
if (( "${#run_steps[@]}" == "${#step_softs[@]}" )); then
unset step_softs step_names step_exts
else
step_softs=( "${step_softs[@]:${step_index}}" )
step_names=( "${step_names[@]:${step_index}}" )
step_exts=( "${step_exts[@]:${step_index}}" )
fi
# clear previous program and run arrays
unset previous_program run_steps run_index run_exts run_program
}
#***** write_pfile function *****#
function write_pfile {
# WARNING: this function uses only global variables, make sure it matches run section
# set working directory
work_dir="${wd_dir_full}"
# initialize step variables as 0
local qa=0;slice_time=0;realign=0;unwarp=0;smooth=0;norm=0;filter=0
# if SPMW is selected, set some special defaults
if [[ "${run_program}" == "${SPMW}" ]]; then
norm='none'
[[ "${#run_steps[@]}" < 2 ]] && cleanup=0 || cleanup=3
# Set more defaults for DARTEL
elif [[ "${run_program}" == "${DARTEL}" ]]; then
if [[ -z "${epi_readout_time}" ]]; then
bppe=$(read_json_value 'BandwidthPerPixelPhaseEncode' \
"${wd_dir_sub}/epi_r01.json")
if [[ ${bppe} =~ 'ERROR:' ]]; then
echo -e "${bppe}"
exit 1
fi
# spm definition of readout time
epi_readout_time=$( echo "(1/${bppe})*1000" | bc -l)
fi
if [[ $(element_in_array 'all' ${no_fieldmap_subs[@]}) == true ]]; then
no_fieldmap_subs="{'all'}"
else
# make this a matlab cell array
no_fieldmap_subs=$(make_smatlab_array "${no_fieldmap_subs[@]}")
fi
# directory in matlab format
[[ -z "${fieldmap_dir}" ]] && fieldmap_dir="fullfile(PREP_DIR, p.task, 'topup')"
else
#fsl definition of readout time
[[ -z "${epi_readout_time}" ]] && epi_readout_time=$(read_json_value \
'TotalReadoutTime' "${wd_dir_sub}/epi_r01.json")
if [[ ${epi_readout_time} =~ 'ERROR:' ]]; then
echo -e "${epi_readout_time}"
exit 1
fi
#directory in bash format
[[ -z "${fieldmap_dir}" ]] && fieldmap_dir="${PREP_DIR}/${task}/topup"
fi
# Turn on each step in run_steps in the pfile
for step in "${run_steps[@]}"; do
case "${step}" in
SLICE_TIME)
slice_time=1
;;
REALIGN)
realign=1
;;
UNWARP)
unwarp=1
;;
NORM)
if [[ "${run_program}" == "${SPMW}" ]]; then
norm="${NORMTYPE}"
else
norm=1
fi
;;
SMOOTH_SOFT)
smooth="${SMOOTH}" #set to smoothing kernel
;;
FILTER)
filter=1
;;
esac
done
# Write a pfile
pfile_name="p_${run_program}_${task}${run_exts}_$(date +%y%m%dx%H%M%S).m"
pfile="${SCRIPT_DIR_PREP}/${pfile_name}"
# Replace all variables in template pfile with their values and save
eval "cat << EOF > "${pfile}"
$(<"${SCRIPT_DIR_PREP}/pfile_template_${run_program}.txt")
EOF"
unset qa slice_time realign unwarp norm smooth filter
}
############################ START PREPROCESSING ##############################
#***** PROCESS EACH TASK *****#
# Select all tasks if none is given
[[ -z "$tasks" ]] && tasks=( "${TASKS[@]}" )
echo "${label} Running task(s): ${tasks[@]}";
# Cycle through each task
for task in "${tasks[@]}"; do
echo "${label}"
echo "${label} $(date) >>> Starting task ${task} <<<"
# Get parameters from prep.par or prep_TSK.par
if [[ ! -z "${in_parsfile}" ]]; then
parsfile="${in_parsfile}"
elif [[ "${PREP_SEP}" -eq 0 ]]; then
parsfile="${SCRIPT_DIR_PREP}/prep.par"
else
parsfile="${SCRIPT_DIR_PREP}/prep_${task}.par"
fi
source "${parsfile}"
# Set path and file names
# work directory
wd_dir_full="${PREP_DIR}/${task}/${wd_dir}"
[[ ! -z "${prev_wd_dir}" ]] && wd_dir_full="${prev_wd_dir}"
# file to turn steps on/off during processing
stepfile="${SCRIPT_DIR_PREP}/step.par"
# logfile (master logfile in wd)
logfile_wd="${wd_dir_full}/LOG.txt"
#***** Validate programs chosen in par file *****#
# trim down the 'none' steps so we don't have to keep looping through them
for program_index in "${!step_softs[@]}"; do
# if program is a case-insensitive match to 'none' (grep -i)
if [[ $( echo "${step_softs[${program_index}]}" | grep -i 'none') ]]; then
# unset sets the value to '', meaning it keeps the length of the array the same
unset step_softs["${program_index}"]
unset step_names["${program_index}"]
unset step_exts["${program_index}"]
fi
done
# now, remake the arrays to remove the steps that were unset in the previous loop
step_softs=( "${step_softs[@]}" )
step_names=( "${step_names[@]}" )
step_exts=( "${step_exts[@]}" )
# check if the are not all set to none, which would leave us with an empty array now
if [[ -z "${step_softs[@]}" ]]; then
echo "${label} All steps set to 'none'. Moving on to next task..."
echo "${label} WARNING: All steps set to 'none'"
continue
fi
# loop through all programs specified
for program_index in "${!step_softs[@]}"; do
step_name="${step_names[${program_index}]}"
program="${step_softs[${program_index}]}"
# check if $program is a real option (i.e. listed in globals.par's PREP_SOFTS)
if [[ $(element_in_array "${program}" "${PREP_SOFTS[@]}") == 'false' ]]; then
echo -e "${label} ERROR: ${step_name}=${program} is not a valid software choice. \n\
Each step must be set to one of the following: none ${PREP_SOFTS[@]}"
exit
fi
# check if choices are correct (e.g. unwarp and realign in the same software)
case "${program}" in
# For DARTEL, normalization should be on, unwarp requires topup
"${DARTEL}")
if [[ "${NORM}" != "${DARTEL}" ]]; then
echo "${label} ERROR: ${step_name}=${program} but NORM=${NORM}. \
In ${DARTEL}, NORM must always be used.";
exit
fi
if [[ "${UNWARP}" == "${DARTEL}" ]] && [[ "${REALIGN}" != "${DARTEL}" ]]; then
echo "${label} ERROR: UNWARP=${UNWARP} but REALIGN=${REALIGN}. \
If UNWARP is done in ${DARTEL}, then REALIGN must be as well."
exit
fi
;;
# For FSL, realign must be on if unwarp is used
"${FSL}")
if [[ "${UNWARP}" == "${FSL}" ]] && [[ "${REALIGN}" != "${FSL}" ]]; then
echo "${label} ERROR: UNWARP=${UNWARP} but REALIGN=${REALIGN}. In ${FSL}, \
if UNWARP is used, then REALIGN must be as well. Aborting..."
exit
fi
;;
# For SPMw, Normalization and realignment must both be used
"${SPMW}")
if [[ ${NORM} == ${SPMW} ]] && [[ ${REALIGN} != ${SPMW} ]]; then
echo "${label} ERROR: NORM=${NORM} but REALIGN=${REALIGN}. In ${SPMW}, if \
NORM is used, then REALIGN must be as well. Aborting..."
exit
fi
;;
esac
done
# CREATE WORKING DIRECTORY IN PREP
if [[ ! -d "${wd_dir_full}" ]]; then mkdir "${wd_dir_full}"; fi
DATE=$(date +%Y%m%d)
cp "${parsfile}" "${wd_dir_full}/${DATE}_$(basename ${parsfile})"
# CREATE LIST OF SUBJECT IDS
get_subs "${task}" "${RAW_DIR}/${task}" "${wd_dir_full}" "${input_subjects}"
if [[ -z ${SUBS} ]]; then
echo "${label} No subjects found for task ${task}. Moving on to next task...";
continue;
fi
[[ "${new_template}" == 1 ]] && temp='new' || temp='existing'
# print out steps and put them in log file
cat <<- EOM | tee "${logfile_wd}"
${label}
${label} $(date) *** Running subjects ${SUBS[@]} in task ${task} ***
${label} Slice Time Correction = ${SLICE_TIME}
${label} Motion Correction = ${REALIGN}
${label} Unwarping = ${UNWARP}
with FIELDMAP = ${FIELDMAP}
${label} Normalization = ${NORM} (DARTEL template: ${temp})
${label} Smoothing = ${SMOOTH_SOFT} (kernel size: ${SMOOTH})
${label} Bandpass Filter = ${FILTER} (hpf: ${hFilter}, lpf: ${lFilter})
${label}
${label} Writing prep files to ${wd_dir_full}
EOM
#***** Set up prep for each subject *****#
for sub_index in "${!SUBS[@]}"; do
subject="${SUBS[${sub_index}]}"
echo "${label}"
echo "${label} $(date) -- setting up preprocessing of ${subject} --" \
| tee -a "${logfile_wd}"
# check if subject exists in task
if [[ -z "${subject}" ]]; then
echo "${label} WARNING: Can't find subject ${subject}. Skipping..." \
| tee -a "${logfile_wd}"
continue;
fi
# subject paths and filenames
wd_dir_sub="${wd_dir_full}/${subject}"
logfile="${wd_dir_sub}/LOG.txt"
# set up subject folder in prep
if [[ "${keep_prep}" == false ]] && [[ -d "${wd_dir_sub}" ]]; then
# if -a flag was not used and the subject already exists, delete
echo "${label} $(date) ${wd_dir_sub} already exists. Deleting..."\
| tee -a "${logfile_wd}";
rm -rf "${wd_dir_sub}";
fi
if [[ ! -d "${wd_dir_sub}" ]]; then
# copy sub folder from raw/tsk to prep/tsk/wd
echo "${label} $(date) Copying ${subject}'s raw folder to ${wd_dir_sub}..." \
| tee -a "${logfile_wd}"
cp -fr "${RAW_DIR}/${task}/${subject}" "${wd_dir_sub}"
else
echo "${label} $(date) Using preprocessed data as raw" \
| tee -a "${logfile_wd}" "${logfile}"
fi
#**** Run topup first if it's on and the subject is not in no_fieldmap_subs *****#
if [[ "${FIELDMAP}" == "${TOPUP}" ]]; then
if [[ $(element_in_array "${subject}" "${no_fieldmap_subs[@]}") == 'false' ]]; then
sbatch_name="FSL_topup_${subject}"
jflag="-J ${sbatch_name}"
oflag="--output=${OUT_DIR}/${sbatch_name}_%j.out"
sbatch_flags="${jflag} ${oflag} --dependency=afterok${first_jobid} ${mflag}"
[[ ${force_topup} == true ]] && force_flag='-f'
jobid=$(sbatch ${sbatch_flags} -D "${OUT_DIR}" \
sbatch_prep_TOPUP "${force_flag} ${wd_dir_sub}" | grep -o '[0-9]*')
sub_jobid["${sub_index}"]="${sub_jobid[${sub_index}]}:${jobid}"
echo "${label} $(date) Started topup job ${jobid} for ${subject} on task ${task}" \
| tee -a "${logfile_wd}" "${logfile}"
fi
fi
done
#***** Run preprocessing until we are out of steps *****#
while (( "${#step_softs}" > 0 )); do
init_step_softs=("${step_softs[@]}")
init_step_names=("${step_names[@]}")
init_step_exts=("${step_exts[@]}")
current_program="${init_step_softs[0]}"
# Are we on group processing (DARTEL) or individual (everyting else)?
if [[ "${current_program}" == "${DARTEL}" ]]; then
# do DARTEL stuff
echo "${label} $(date) -- Running group preprocessing --"
while [[ ! -z "${step_softs[@]}" ]] && [[ "${current_program}" == "${DARTEL}" ]]; do
for step_index in "${!step_softs[@]}"; do
current_program="${step_softs[${step_index}]}"
# if prev step is empty, pretend it's the same as current
if [[ -z "${previous_program}" ]]; then previous_program="${current_program}"; fi;
# Run it if the software changes
if [[ "${current_program}" != "${previous_program}" ]]; then
# create the pfile (function relies on run variables specified above)
# make changes there, and you need to change the function too
write_pfile
# set up run/sbatch variables
run_script="${SCRIPT_DIR_PREP}/sbatch_prep_${run_program}"
# set number of cores according to number of subjects (max at 20 or 12)
if [[ "$(hostname | grep -o 'spock')" ]]; then
maxcores=12
elif [[ "$(hostname | grep -o 'della')" ]]; then
maxcores=20
fi
(( "${#SUBS[@]}" > "${maxcores}" )) && ncores="${maxcores}" || ncores="${#SUBS[@]}"
# get the number of times each step runs sequentially (subs/cores)
# ensure proper rounding by adding denominator-1 to numerator
n_iterations="$(( (${#SUBS[@]} + ( ${ncores} - 1 ) ) / ${ncores} ))"
# get the number of processes executed sequentially (iterations*number of steps)
n_sequential_steps="$(( ${#run_exts} * ${n_iterations} ))"
# add time if DARTEL is creating a template
[[ "${new_template}" == 1 ]] && dartel_time=500 || dartel_time=0
# run time is 60 minutes per step + dartel time if we're making a template
run_time="$(( 60 * ${n_sequential_steps} + ${dartel_time} ))"
# sbatch flags
sbatch_name="${run_program}${run_exts}_prep"
jflag="-J ${sbatch_name}"
tflag="-t ${run_time}"
oflag="--output=${OUT_DIR}/${sbatch_name}_%j.out"
# Set dependencies
dependencies="${first_jobid}$(join_by '' ${sub_jobid[@]})${group_jobid}"
dflag="--dependency=afterok${dependencies}"
memflag="--mem-per-cpu=3000"
if [[ "${#SUBS[@]}" -ge "${maxcores}" ]]; then
coreflag="-c ${maxcores}"
else
coreflag="-c ${#SUBS[@]}"
fi
flags="${tflag} ${oflag} ${jflag} ${mflag} ${dflag} ${memflag} ${coreflag}"
# run it
group_jobid=$(sbatch ${flags} "${run_script}" -p "$pfile" "${SUBS[@]}" \
| grep -o '[0-9]*')
# save/print jobid number
jobinfo_str1="${label} $(date) job ${group_jobid} submitted to run"
jobinfo_str2="${run_steps[@]} in ${run_program} for subjects ${SUBS[@]}"
group_jobid=":${group_jobid}"
echo "${jobinfo_str1} ${jobinfo_str2}" | tee -a "${logfile_wd}"
# write to each sub's logfiles
for subject in "${SUBS[@]}"; do
logfile="${wd_dir_full}/${subject}/LOG.txt"
echo "${jobinfo_str}" >> "${logfile}"
done
reset_run_parameters
break
# If it's not time to run, add the steps to the run parameters
else
add_run_steps
fi
done
done
elif [[ "${current_program}" != "${DARTEL}" ]]; then
for sub_index in "${!SUBS[@]}"; do
subject="${SUBS["${sub_index}"]}"
echo "${label} $(date) -- Running individual preprocessing for subject ${subject} --"
prev_jobid="${group_jobid}"
step_softs=("${init_step_softs[@]}")
step_names=("${init_step_names[@]}")
step_exts=("${init_step_exts[@]}")
# subject paths and filenames
wd_dir_sub="${wd_dir_full}/${subject}"
logfile="${wd_dir_sub}/LOG.txt"
while [[ ! -z "${step_softs[@]}" ]] && [[ "${current_program}" != "${DARTEL}" ]]; do
for step_index in "${!step_softs[@]}"; do
current_program="${step_softs[${step_index}]}"
# if prev step is empty, pretend it's the same as current
if [[ -z "${previous_program}" ]]; then previous_program="${current_program}"; fi;
# Run it if the software changes
if [[ "${current_program}" != "${previous_program}" ]]; then
# create the pfile (function defined above)
write_pfile
## set up run/sbatch variables
run_script="${SCRIPT_DIR_PREP}/sbatch_prep_${run_program}"
# set run_time
run_time="$((60 * ${#ext} + 500))"
# add an hour run if normalization is selected
if [[ "${run_exts[@]}" =~ w ]]; then
add_time="$((60*$NRUNS))"
run_time="$((${run_time}+${add_time}))"
fi
# create full list of dependencies (first_jobid, sub_jobid, group_jobid)
dependencies="${first_jobid}${sub_jobid[${sub_index}]}${group_jobid}"
# sbatch flags
sbatch_name="${run_program}${run_exts}_prep_${subject}"
jflag="-J ${sbatch_name}"
tflag="-t ${run_time}"
oflag="--output=${OUT_DIR}/${sbatch_name}_%j.out"
dflag="--dependency=afterok${dependencies}"
flags="${tflag} ${oflag} ${jflag} ${mflag} ${dflag}"
# run it
jobid=$(sbatch ${flags} "${run_script}" -p "${pfile}" "${subject}" \
| grep -o '[0-9]*')
sub_jobid[${sub_index}]="${sub_jobid[${sub_index}]}:${jobid}"
# save/print jobid number
jobinfo_str1="${label} $(date) job ${jobid} submitted to run"
jobinfo_str2="${run_steps[@]} in ${run_program} for subject ${subject}"
echo "${jobinfo_str1} ${jobinfo_str2}" | tee -a "${logfile_wd}" "${logfile}"
# Append job ID to list of job IDs DARTEL has to wait for
all_jobs+=":${prev_jobid}"
reset_run_parameters
break
# If it's not time to run, add the steps to the run parameters
else
add_run_steps
fi
done
done
# reset current_program for the next subject
unset current_program
done
fi
done
unset parsfile sub_jobid group_jobid
# Write Done. to task logfile
echo "${label}" | tee -a "${logfile_wd}"
echo "${label} DONE. $(date)" | tee -a "${logfile_wd}"
echo "${label}" >> "${logfile_wd}"
done
| true
|
ac74b4602596d97b7257f0f727971e525101f81c
|
Shell
|
nkhorman/spamilter
|
/rc.d/spamilter
|
UTF-8
| 2,832
| 3.625
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# PROVIDE: spamilter
# REQUIRE: psql
# BEFORE: LOGIN
# KEYWORD: nojail shutdown
. /etc/rc.subr
name="spamilter"
spamilter_enable=${spamilter_enable:-"NO"}
spamilter_flags=${spamilter_flags:-""}
ipfwmtad_enable=${ipfwmtad_enable:-"NO"}
ipfwmtad_flags=${ipfwmtad_flags:-""}
greydbd_enable=${greydbd_enable:-"NO"}
greydbd_flags=${greydbd_flags:-""}
rcvar=${name}_enable
pidfile="/tmp/spamilter.pid"
extra_commands="status"
start_cmd="spamilter_start"
stop_cmd="spamilter_stop"
status_cmd="spamilter_status"
proc_spamilter="/usr/local/bin/spamilter"
proc_ipfwmtad="/usr/local/bin/ipfwmtad"
proc_greydbd="/usr/local/bin/greydbd"
proc_pid_status()
{
local _pidcmd
_pidcmd="`check_pidfile ${1} ${2}`"
if [ -z $_pidcmd ]; then
# failure - not running
return 1
else
# success - running
echo ${_pidcmd}
return 0
fi
}
spamilter_status()
{
local pid
pid="`proc_pid_status ${pidfile} ${name}`"
rc=0
if [ -n "$pid" ]; then
echo "${name} is running as pid ${pid}."
else
echo "${name} is not running."
rc=1
fi
if [ "${ipfwmtad_enable}" = "YES" ]; then
pid_ipfwmtad="`check_process ${proc_ipfwmtad}`"
if [ -n "$pid_ipfwmtad" ]; then
echo "ipfwmtad is running as pid ${pid_ipfwmtad}"
else
echo "ipfwmtad is not running."
fi
fi
if [ "${greydbd_enable}" = "YES" ]; then
pid_greydbd="`check_process ${proc_greydbd}`"
if [ -n "$pid_greydbd" ]; then
echo "greydbd is running as pid ${pid_greydbd}"
else
echo "greydbd is not running."
fi
fi
return $rc
}
spamilter_start()
{
psql_user="psql"
psql_path="`grep ${psql_user} /etc/passwd|cut -f 6 -d:`"
rc=1
if [ -z "`proc_pid_status ${pidfile} ${name}`" ]; then
LD_LIBRARY_PATH="${psql_path}/lib" ${proc_spamilter} ${spamilter_flags}
rc=0
fi
if [ "${ipfwmtad_enable}" = "YES" ]; then
if [ -z "`check_process ${proc_ipfwmtad}`" ]; then
${proc_ipfwmtad} ${ipfwmtad_flags}
rc=0
fi
fi
if [ "${greydbd_enable}" = "YES" ]; then
if [ -z "`check_process ${proc_greydbd}`" ]; then
LD_LIBRARY_PATH="${psql_path}/lib" ${proc_greydbd} ${greydbd_flags}
rc=0
fi
fi
return $rc
}
spamilter_stop()
{
local pids
pid_spamilter="`proc_pid_status ${pidfile} ${name}`"
if [ ! -z "$pid_spamilter" ]; then
kill $sig_stop ${pid_spamilter}
pids="${pids} ${pid_spamilter}"
fi
if [ "${ipfwmtad_enable}" = "YES" ]; then
pid_ipfwmtad="`check_process ${proc_ipfwmtad}`"
if [ ! -z "${pid_ipfwmtad}" ]; then
kill $sig_stop ${pid_ipfwmtad}
pids="${pids} ${pid_ipfwmtad}"
fi
fi
if [ "${greydbd_enable}" = "YES" ]; then
pid_greydbd="`check_process ${proc_greydbd}`"
if [ ! -z "${pid_greydbd}" ]; then
kill $sig_stop ${pid_greydbd}
pids="${pids} ${pid_greydbd}"
fi
fi
if [ -n "${pids}" ]; then
wait_for_pids ${pids}
return 0
fi
return 1
}
load_rc_config $name
run_rc_command "$1"
| true
|
83be1742af90fbf2556b77416ac6252ffa3f15a5
|
Shell
|
tppqt/iosArchiveShell
|
/demo/pid_time
|
UTF-8
| 493
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# author wangyingbo
# date:2021-04-23 上午 11:03
function show_elapsed_time()
{
user_hz=$(getconf CLK_TCK) #mostly it's 100 on x86/x86_64
pid=$1
jiffies=$(cat /proc/$pid/stat | cut -d" " -f22)
sys_uptime=$(cat /proc/uptime | cut -d" " -f1)
last_time=$(( ${sys_uptime%.*} - $jiffies/$user_hz ))
echo "the process $pid lasts for $last_time seconds."
}
if [ $# -ge 1 ];then
for pid in $@
do
show_elapsed_time $pid
done
fi
while read pid
do
show_elapsed_time $pid
done
| true
|
54688d92e51a524dc4e85ea47c0c0e4db71e45d0
|
Shell
|
vitorAmorims/shellscript
|
/gitBasico.sh
|
UTF-8
| 1,974
| 3.0625
| 3
|
[] |
no_license
|
!#/bin/bash
clear;
echo "etapa 1 - instalação do git"
sleep 1
echo "use o comando sudo apt-get install git-all"
echo "Vamos ver se a pasta git existe?"
if [ -d /bin ]
then
echo "Diretório git já existe."
fi
sleep 2
clear
echo "etapa 2 - configuração"
sleep 1
echo "use o comando git config --global username"
echo "use o comando git config --global yourEmail"
sleep 1
echo "adiciona use o comando..."
echo "git config --global SeuEditorPreferido"
sleep 1
echo "testando suas configurações..."
sleep 1
echo "comando git config --list"
git config --list
sleep 1
clear
echo "Verificando versão de git instalada."
echo "comando git --version"
echo "."
sleep 1
echo "."
sleep 1
echo "."
sleep 1
echo "."
sleep 1
git --version
sleep 2
clear
echo "********CRIANDO REPOSOTÒRIO**********"
echo "comando git init"
echo "use ls para listar os diretórios"
echo "use ls -la para listar os arquivos e diretórios ocultos"
echo "veja que existe o diretório .git oculto."
echo "Se remover este diretório, perde o repositório."
sleep 2
echo "use o comando cat > form.html e digite Primeira linha de código."
echo "use o comando comando: git status: analisa possíveis alterações no diretório."
echo "Segundo comando: git add form.html, para adicionar o arquivo no git"
echo "use o comando comando: git status: analisa possíveis alterações no diretório."
echo "Terceiro comando: git commit - m form.html estou realizando commit no git"
echo "comando git log para ver logs das alterações no repostório."
echo "Quarto comando: git push - Para enviar o commit ao GitHub"
clear
echo "********************************************"
echo "Criando branch com comando git branch newform"
echo "comando git branch"
echo "comando git checkout newform"
echo "troquei de branch..........."
echo "obs: git add . adiciona tudo dentro da pasta git"
sleep 1
echo "*****************merge***********************"
echo "comando git cheout master"
echo "comando git merge newform"
| true
|
9189f0677d6f7483633da6d08e0817d871023ca5
|
Shell
|
Thecarisma/serenity
|
/Ports/links/links.sh
|
UTF-8
| 311
| 2.5625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
PORT_DIR=links
fetch() {
run_fetch_web "http://links.twibright.com/download/links-2.19.tar.bz2"
}
configure() {
run_export_env CC i686-pc-serenity-gcc
run_configure_autotools
}
build() {
run_make
}
install() {
run_make_install DESTDIR="$SERENITY_ROOT"/Root
}
. ../.port_include.sh
| true
|
fc6574a9237a12b52c5f5f4683bf33a179a9fd80
|
Shell
|
AlexRomero10/Scripts
|
/DHCP/libreria.sh
|
UTF-8
| 8,847
| 3.703125
| 4
|
[] |
no_license
|
#Esta función determina si eres root o no usando el comando whoami. Si lo
#eres devuelve 0, si no es así te echa del script, ya que no tendría sentido continuar.
function f_soyroot {
set $(whoami)
if [[ $1 = 'root' ]]
then
return 0
else
echo "No eres root"
exit 1
fi
}
#Esta función necesita de un argumento. Le pasas el nombre del paquete, y si
#está instalado te lo indica. Si no es así te pregunta si quieres instalarlo.
#Para instalar el paquete debes ser root.
function f_comprobar_paquete {
if [[ $(dpkg -s $1) ]]
then
return 0
else
echo "El paquete no está instalado, ¿quieres instalarlo?(s/n)"
read confirmacion
if [[ $confirmacion = "s" ]]; then
f_instalar_paquete $1
else
exit 1
fi
fi
}
#Esta función instala el paquete que le añadas como argumento. Solo instala un paquete,
#aunque dependiendo de la situación podría modificarse para que instalase varios
function f_instalar_paquete {
apt update -y &> /dev/null && apt install -y $1 &> /dev/null
}
#Esta función muestra las interfaces para que el usuario pueda elegir una a la que
#aplicar el servidor dhcp. No requiere de argumentos de entrada.
function f_mostrar_interfaces {
ip link show | awk 'NR % 2 == 1' | awk '{print $2}' | tr -d ':'
}
#La siguiente función comprueba si la interfaz dada a través de un argumento existe o no. Devolverá 0
#si existe u otro número si no existe.
function f_comprobar_interfaz {
ip link show | awk 'NR % 2 == 1' | awk '{print $2}' | tr -d ':' | egrep $interfaz > /dev/null
}
#Esta función detecta si la interfaz que has elegido está levantada o no, e informa al usuario.
#Si no lo está le pregunta al usuario si quiere levantarla, si este responde que no, el script
#se acaba.
function f_levantar_interfaz {
if [[ $(ip link show | egrep $interfaz | egrep -i 'state down' > /dev/null;echo $?) = 0 ]]; then
echo "La interfaz $interfaz está bajada. ¿Levantárla? (s/n)"
read confirmacion
if [[ $confirmacion = 's' ]]; then
ip link set $interfaz up
echo "Interfaz levantada"
else
exit 1
fi
fi
}
#Esta función se utiliza para modificar el archivo /etc/default/isc-dhcp-server, insertándo
#la interfaz a la que vamos a aplicar el servidor dhcp. Primero revisa si ya está configurado,
#y si no es así, lo configura.
function f_modificar_isc-dhcp-server {
if [[ $(cat /etc/default/isc-dhcp-server | egrep -i "INTERFACESv4" | egrep $interfaz > /dev/null;echo $?) != 0 ]]; then
sed -i 's/INTERFACESv4="/&'$interfaz' /' /etc/default/isc-dhcp-server
fi
}
#Esta función modifica la configuración global del dns que otorga el servidor dhcp si no se pone una
#específica para la subred. Primero muestra la configuración que hay ya, y después pregunta al usuario si
#desea cambiarla. Si es así, pregunta al usuario por cada uno de los parámetros y los configura según
#lo que el usuario introduzca por teclado. También revisa si el servidor no está marcado como principal en
#la red, y si no es así, pregunta al usuario si quiere que lo sea.
function f_modificar_configuracion_global {
if [[ $(cat /etc/dhcp/dhcpd.conf | egrep -o "#authoritative" > /dev/null;echo $?) = 0 ]]; then
echo "Este servidor dhcp no esta funcionando como servidor principal de la red"
echo "No es obligatorio que sea principal para que funcione correctamente"
echo "¿Deseas hacerlo principal?(s/n)"
read confirmacion
if [[ $confirmacion = "s" ]];then
sed -i 's/#authoritative/authoritative/' /etc/dhcp/dhcpd.conf
fi
fi
echo "La configuración global de dns es la siguiente:"
cat /etc/dhcp/dhcpd.conf | egrep -m 2 "option domain-name"
echo "¿Desea cambiarla?(s/n)"
read confirmacion
if [[ $confirmacion = "s" ]];then
sed -i '/^option domain-name /d' /etc/dhcp/dhcpd.conf
sed -i '/^option domain-name-servers /d' /etc/dhcp/dhcpd.conf
echo "¿Qué nombre de dominio quiere meter en la configuración?"
read dom1
sed -i '/# option definitions/ a option domain-name "'$dom1'";' /etc/dhcp/dhcpd.conf
sed -i '/# option definitions/ a option domain-name-servers ;' /etc/dhcp/dhcpd.conf
echo "¿Cuántos servidores de nombres de dominio quieres meter en la configuración?"
read num1
for i in $(seq 1 $num1)
do
echo "Dime el nombre del servidor $i"
read serv
if [[ $i = 1 ]]; then
sed -i 's/^option domain-name-servers /&'"$serv"' /' /etc/dhcp/dhcpd.conf
else
sed -i 's/^option domain-name-servers /&'"$serv"', /' /etc/dhcp/dhcpd.conf
fi
done;
fi
}
#Con la siguiente función vamos a revisar el fichero /etc/dhcp/dhcpd.conf
#para comprobar si ya tiene una subnet creada. En caso positivo, se le muestra al
#usuario la configuración de la subnet, y se le pregunta si desea continuar con
#el script o si desea continuar con la configuración que ya tiene.
function f_comprobar_subnet {
if [[ $(cat /etc/dhcp/dhcpd.conf | awk '/^subnet/,/\}$/' | egrep subnet > /dev/null;echo $?) = 0 ]]; then
echo "Ya tiene creada la siguiente subnet: "
cat /etc/dhcp/dhcpd.conf | awk '/^subnet/,/\}$/'
echo "¿Desea iniciar el servidor con esta configuración? (s/n)"
read respuesta
if [[ $respuesta = 's' ]]; then
echo "De acuerdo, iniciemos el servidor"
return 1
else
echo "De acuerdo, crearemos otra subred"
return 0
fi
fi
}
#Con la siguiente función vamos a modificar el fichero /etc/dhcp/dhcpd.conf
#para insertar la configuración de la subnet que daremos por dhcp y sus opciones
#Para ello iremos preguntando al usuario por cada una de las opciones y las iremos
#anexando una a una.
function f_anadir_subnet {
echo "Digame la subnet en notación decimal puntuada (ejemplo: 192.168.0.0):"
read ip
echo "Dígame la mascara de red en notación decimal puntuada: (ejemplo: 255.255.255.0):"
read mascara
echo "subnet $ip netmask $mascara {" > axklmldhcp.txt
echo "Empecemos a configurar la subnet. Dígame el rango inferior de ip que va a repartir el servidor"
read inferior
echo "Ahora dígame el límite superior de ip que va a repartir el servidor: "
read superior
sed -i '$a \ range '$inferior' '$superior';' axklmldhcp.txt
echo "A partir de ahora configuraremos parámetros que son importantes, pero son opcionales, así que no es necesario ponerlos para el funcionamiento del servidor"
echo "¿Desea configurar la puerta de enlace? (s/n)"
read respuesta
if [[ $respuesta = "s" ]];then
echo "Dígame la dirección de la puerta de enlace (ej: 192.168.0.1):"
read puerta
sed -i '$a \ option routers '$puerta';' axklmldhcp.txt
else
echo "De acuerdo"
fi
echo "¿Desea configurar la mascara de red que otorgará el servidor? (s/n)"
read respuesta
if [[ $respuesta = "s" ]];then
echo "Dígame la mascara de red que otorgará el servidor dhcp (ej: 255.255.255.0):"
read submascara
sed -i '$a \ option subnet-mask '$submascara';' axklmldhcp.txt
else
echo "De acuerdo"
fi
echo "¿Desea configurar la búsqueda de dominios? (s/n)"
read respuesta
if [[ $respuesta = "s" ]];then
echo "¿Qué nombre de dominio desea introducir en la configuración?"
read dominio
sed -i '$a \ option domain-search \"'$dominio'\";' axklmldhcp.txt
else
echo "De acuerdo"
fi
echo "¿Desea configurar los sevidores de nombres de dominio? (s/n)"
read respuesta
if [[ $respuesta = "s" ]];then
sed -i '$a \ option domain-name-servers ;' axklmldhcp.txt
echo "¿Cuántos servidores de nombres de dominio quieres meter en la configuración?"
read num1
for i in $(seq 1 $num1)
do
echo "Dime el nombre del servidor $i"
read serv
if [[ $i = 1 ]]; then
sed -i 's/option domain-name-servers /&'"$serv"' /' axklmldhcp.txt
else
sed -i 's/option domain-name-servers /&'"$serv"', /' axklmldhcp.txt
fi
done;
else
echo "De acuerdo"
fi
echo "¿Desea introducir una dirección de broadcast? (s/n)"
read respuesta
if [[ $respuesta = "s" ]];then
echo "Introduzca la dirección de broadcast (ej: 192.168.0.255):"
read broadcast
sed -i '$a \ option broadcast-address '$broadcast';' axklmldhcp.txt
else
echo "De acuerdo"
fi
echo "¿Cuál será el tiempo de préstamo (lease time) por defecto de la subred (en segundos)?"
read deflease
sed -i '$a \ default-lease-time '$deflease';' axklmldhcp.txt
echo "¿Cuál será el tiempo de préstamo máximo de la subred (en segundos)?"
read maxlease
sed -i '$a \ max-lease-time '$maxlease';' axklmldhcp.txt
sed -i '$a \}' axklmldhcp.txt
echo "Subred configurada"
}
| true
|
e225fabc60719a545bfeb2b5662250e138d3b0da
|
Shell
|
muratayusuke/dot.zsh.d
|
/lib/misc.zsh
|
UTF-8
| 2,177
| 2.71875
| 3
|
[] |
no_license
|
# ベルを鳴らさない。
setopt no_beep
# バックグラウンドジョブが終了したらすぐに知らせる。
setopt no_tify
# cdでpushdする。
setopt auto_pushd
# ディレクトリ名でcd
setopt auto_cd
# 間違えたら訂正を出す
setopt correct
# reload .zshrc
alias s=". ~/.zshrc"
# キーをemacs風に
bindkey -e
# freemind
export PATH=/usr/local/freemind:$PATH
# ctags 環境設定
export PATH=/usr/local/ctags/bin:$PATH
export MANPATH=/usr/local/ctags/share/man:`manpath -q`
# mysql
export PATH=/usr/local/mysql/bin:$PATH
export MANPATH=/usr/local/mysql/man:`manpath -q`
export LD_LIBRARY_PATH=/usr/local/mysql/lib/mysql:$LD_LIBRARY_PATH
# tmux
export PATH=/usr/local/tmux/bin:$PATH
export MANPATH=/usr/local/tmux/man:`manpath -q`
# less color option
export LESS='-g -i -M -R -S -W -x4'
# alias 設定
case ${OSTYPE} in
darwin*)
# for Mac
alias ls="gls -lFAh --color"
;;
*)
# other
alias ls="ls -lFAh --color"
;;
esac
alias findf="find . -type f"
alias findfg="find . -type f | xargs grep"
alias ch="chromium-browser"
# tmux
alias tmux="tmux -2"
# add path private scripts
export PATH=~/.bin:$PATH
# history
HISTFILE=$HOME/.zsh-history # 履歴をファイルに保存する
HISTSIZE=100000 # メモリ内の履歴の数
SAVEHIST=100000 # 保存される履歴の数
setopt extended_history # 履歴ファイルに時刻を記録
function history-all { history -E 1 } # 全履歴の一覧を出力する
# ack-grep
alias a="ack-grep"
alias aa="ack-grep -a"
# homesick
alias hp="homesick pull"
alias hpa="homesick pull --all"
# cdr
autoload -Uz chpwd_recent_dirs cdr add-zsh-hook
add-zsh-hook chpwd chpwd_recent_dirs
zstyle ':chpwd:*' recent-dirs-max 5000
zstyle ':chpwd:*' recent-dirs-default yes
zstyle ':completion:*' recent-dirs-insert both
# cask
export PATH="$HOME/.cask/bin:$PATH"
# prevent sleep
case ${OSTYPE} in
darwin*)
# for Mac
;;
*)
# other
xfce4-power-manager
xset s off
;;
esac
# bin
export PATH=~/.bin:$PATH
export PATH=$PATH:./node_modules/.bin
# nodebrew
export PATH=$HOME/.nodebrew/current/bin:$PATH
| true
|
71d865166dacf71b23643d8d9769704e66d5d674
|
Shell
|
taarimalta/selenium-server-deb-package
|
/src/DEBIAN/postinst
|
UTF-8
| 1,921
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
# postinst script for selenium-server
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postinst> `configure' <most-recently-configured-version>
# * <old-postinst> `abort-upgrade' <new version>
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
# <new-version>
# * <postinst> `abort-remove'
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
# <failed-install-package> <version> `removing'
# <conflicting-package> <version>
# for details, see http://www.debian.org/doc/debian-policy/ or
# the debian-policy package
case "$1" in
configure)
# Create selenium-server user if it doesn't exist.
if ! id selenium-server > /dev/null 2>&1 ; then
adduser --system --home /var/lib/selenium-server --no-create-home \
--ingroup nogroup --disabled-password --shell /bin/bash \
selenium-server
fi
# directories needed for selenium-server
chown selenium-server:adm /var/lib/selenium-server /var/log/selenium-server
chmod 750 /var/lib/selenium-server /var/log/selenium-server
# make sure selenium-server can delete everything in /var/run/selenium-server to re-explode war
chown -R selenium-server:adm /var/run/selenium-server
chmod -R 750 /var/run/selenium-server
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
# Automatically added by dh_installinit
if [ -x "/etc/init.d/selenium-server" ]; then
update-rc.d selenium-server defaults >/dev/null
invoke-rc.d selenium-server start || exit $?
fi
# End automatically added section
exit 0
| true
|
583ed34f5431c3f076a002a15c43b9b7d9748f1d
|
Shell
|
nishakshatriya/shellprogram
|
/arithmetic/Gambler.sh
|
UTF-8
| 369
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash/ -x
read -p "enter the Stake:" stake
read -p "enter the Goal:" goal
read -p "enter the trials:" trials
win=0
bet=0
for((i=0;i<trial;i++))
do
cash=$stake
while[[ cash > 0 && cash < goal ]]
do
((bet++))
if((RANDOM%2)>0.5)
((cash++))
else
((cash--))
fi
done
if [ $cash -eq $goal ]
do
((cash++))
done
done
echo "$win wins of $trial"
| true
|
25e3541569b119bb07bbe8e867e737a3c9482062
|
Shell
|
hmasmoudi/SyphaxOS
|
/Default/0003-SyphaxOSGnome3/001_BuildPackagesScripts/0121_3-flac/PKGBUILD
|
UTF-8
| 638
| 2.625
| 3
|
[] |
no_license
|
# Maintainer: Hatem Masmoudi <hatem.masmoudi@gmail.com>
pkgname=flac
pkgver=1.3.2
pkgrel=4
pkgdesc="FLAC is an audio CODEC similar to MP3, but lossless, meaning that audio is compressed without losing any information."
arch=('x86_64')
url="http://downloads.xiph.org/releases/flac"
license=('GPL')
groups=('desktop')
source=("$url/${pkgname}-${pkgver}.tar.xz")
sha1sums=('2bdbb56b128a780a5d998e230f2f4f6eb98f33ee')
depends=('rootfs')
build() {
cd "$srcdir/${pkgname}-${pkgver}"
./configure --prefix=/usr --disable-static --disable-thorough-tests
make
}
package() {
cd "$srcdir/${pkgname}-${pkgver}"
make DESTDIR="$pkgdir" install
}
| true
|
cfb334e6fab44ff3bf9d2e83a610af46b1f89838
|
Shell
|
J4VJ4R/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/6-superstitious_numbers
|
UTF-8
| 248
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# script to display numbers from 1 to 20
x=1
while [ $x -le 20 ]
do
case $x in
5) echo "bad luck from China";;
10) echo "bad luck from Japan";;
18) echo "bad luck from Italy";;
esac
echo "$x"
((x=x+1))
done
| true
|
f576e76142e1c3be343439a4d386295b78b1f42f
|
Shell
|
miroi/My_scripts
|
/grid_runs/virtual_organizations/enmr_eu/1node/test_run/run_on_CE_test.sh
|
UTF-8
| 2,363
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#######################################################################################################
#
#
# Script for downloading, upacking and launching the DIRAC4Grid suite on some grid CE of given VO.
#
#
#######################################################################################################
# check the parameter - VO name
if [[ $1 != "voce" && $1 != "compchem" && $1 != "isragrid" && $1 != "osg" && $1 != "sivvp.slovakgrid.sk" && $1 != "enmr.eu" ]]; then
echo -e "\n wrong parameter - VO name : $1 "
exit 12
else
VO=$1
echo -e "\n OK, you specified properly the VO=$VO, continuing \n"
fi
# include all external functions from file copied onto current CE
if [ -e "UtilsCE.sh" ]
then
source ./UtilsCE.sh
else
echo -e "\n Source file UtilsCE not found! Error exit 13 ! \n"
exit 13
fi
# name of Dirac package distributed over grid clusters
package="DIRAC4Grid_suite.tgz"
print_CE_info
querry_CE_attributes $VO
#check_file_on_SE $VO $package
# download & unpack tar-file onto CE - MUST be successfull or exit
#download_from_SE $VO $package
# get number of procs #
unset nprocs
get_nprocs_CE nprocs
#RETVAL=$?; [ $RETVAL -ne 0 ] && exit 5
echo -e "\n Number of #CPU obtained from the function: $nprocs \n"
#
# Unpack the downloaded DIRAC tar-ball
#
#unpack_DIRAC $package
#RETVAL=$?; [ $RETVAL -ne 0 ] && exit 6
#-----------------------------------------------
# specify the scratch space for DIRAC runs #
#-----------------------------------------------
#echo "--scratch=\$PWD/DIRAC_scratch" > ~/.diracrc
#echo -e "\n\n The ~/.diracrc file was created, containing: "; cat ~/.diracrc
##########################################
# set build dirs and paths #
##########################################
# take care of unique nodes ...
UNIQUE_NODES="`cat $PBS_NODEFILE | sort | uniq`"
UNIQUE_NODES="`echo $UNIQUE_NODES | sed s/\ /,/g `"
echo -e "\n Unique nodes for parallel run (from PBS_NODEFILE): $UNIQUE_NODES"
echo "PBS_NODEFILE=$PBS_NODEFILE"
echo "PBS_O_QUEUE=$PBS_O_QUEUE"
echo "PBS_O_WORKDIR=$PBS_O_WORKDIR"
##############################################################
echo -e "\n --------------------------------- \n ";
#############################################
#### flush out some good-bye message ... ####
#############################################
final_message
exit 0
| true
|
2844c22adf7eb159554564c49c416a50cea6046d
|
Shell
|
bartman/dot-uzbl
|
/scripts/load_url_from_bookmarks.sh
|
UTF-8
| 649
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
#NOTE: it's the job of the script that inserts bookmarks to make sure there are no dupes.
file=$HOME/.uzbl/data/bookmarks
[ -z "$file" ] && exit 1
COLORS=" -nb #303030 -nf khaki -sb #CCFFAA -sf #303030"
if dmenu --help 2>&1 | grep -q '\[-rs\] \[-ni\] \[-nl\] \[-xs\]'
then
DMENU="dmenu -i -xs -rs -l 10" # vertical patch
# show tags as well
goto=`$DMENU $COLORS < $file | awk '{print $1}'`
else
DMENU="dmenu -i"
# because they are all after each other, just show the url, not their tags.
goto=`awk '{print $1}' $file | $DMENU $COLORS`
fi
#[ -n "$goto" ] && echo "uri $goto" > $4
[ -n "$goto" ] && uzblctrl -s $5 -c "uri $goto"
| true
|
39354901252aa403663f662a64abba9020833afd
|
Shell
|
lonkamikaze/bsda2
|
/tests/bsda_container.sh
|
UTF-8
| 3,615
| 3.140625
| 3
|
[
"ISC"
] |
permissive
|
. ../src/bsda_container.sh
. ../src/bsda_test.sh
. ../src/bsda_util.sh
NL=$'\n'
# Comma separated fields
IFS=","
# Lambda, produce comma separated list of key→value pairs
lambda() {
kvs="${kvs:+$kvs,}$(printf "%s→%s" "$@")"
}
# Create a memory dump
memdump0="$(true;set)"
#
# Check bsda:container:Array
#
# Create array
bsda:container:Array arr a b c d e
kvs=
$arr.foreach lambda
test "0→a,1→b,2→c,3→d,4→e" = "$kvs"
# Check item count
$arr.getCount val && test "$val" = 5
# Value retrieval
$arr.[ 3 ] val && test "$val" = d
# Value assignment
$arr.[ 3 ]= D
$arr.[ 3 ] val && test "$val" = D
# Pop/Push
$arr.pop val && test "$val" = e
$arr.getCount val && test "$val" = 4
$arr.push E
$arr.getCount val && test "$val" = 5
# Verify
kvs=
$arr.foreach lambda
test "0→a,1→b,2→c,3→D,4→E" = "$kvs"
# Copy
$arr.copy arr1
kvs=
$arr1.foreach lambda
test "0→a,1→b,2→c,3→D,4→E" = "$kvs"
# Check serialise/deserialise
$arr1.serialise arr1s
# Restore modified object from serialised string
$arr1.[ 9 ]= X
$arr1.getCount val && test "$val" = 10
bsda:obj:deserialise arr1 "$arr1s"
$arr1.getCount val && test "$val" = 5
kvs=
$arr1.foreach lambda
test "0→a,1→b,2→c,3→D,4→E" = "$kvs"
# Delete and restore
$arr1.delete
unset arr1
bsda:obj:deserialise arr1 "$arr1s"
kvs=
$arr1.foreach lambda
test "0→a,1→b,2→c,3→D,4→E" = "$kvs"
# Clean up
$arr1.delete
$arr.delete
unset arr1 arr1s arr kvs val
# Create a new memory dump
memdump1="$(unset memdump0;true;set)"
# Compare before and after memory dumps, the only thing allowed to have
# changed are object ID counters.
diff01="$(echo "$memdump1" | grep -vFx "$memdump0")"
bsda:test:xmatch "$diff01" all:any "$bsda_obj_frameworkPrefix*_nextId=[0-9]*"
#
# Check bsda:container:Map
#
# Create map
bsda:container:Map map A a B b C c D d E e
kvs=
$map.foreach lambda
bsda:util:join kvs "$NL" $kvs
bsda:test:xmatch "$kvs" all:once A→a B→b C→c D→d E→e
# Check item count
$map.getCount val && test "$val" = 5
# Value retrieval
$map.[ D ] val && test "$val" = d
# Value assignment
$map.[ D ]= D
$map.[ D ] val && test "$val" = D
# Value removal
$map.[ E ]x
$map.getCount val && test "$val" = 4
$map.[ E ] val && test -z "$val"
# Special character assignment
$map.[ '%$foo&&{' ]= '$bar}'
$map.getCount val && test "$val" = 5
$map.[ '%$foo&&{' ] val && test "$val" = '$bar}'
# Verify
kvs=
$map.foreach lambda
bsda:util:join kvs "$NL" $kvs
bsda:test:xmatch "$kvs" all:once A→a B→b C→c D→D '%$foo&&{→$bar}'
# Copy
$map.copy map1
kvs=
$map1.foreach lambda
bsda:util:join kvs "$NL" $kvs
bsda:test:xmatch "$kvs" all:once A→a B→b C→c D→D '%$foo&&{→$bar}'
# Check serialise/deserialise
$map1.serialise map1s
# Restore modified object from serialised string
$map1.[ Z ]= X
$map1.getCount val && test "$val" = 6
bsda:obj:deserialise map1 "$map1s"
$map1.getCount val && test "$val" = 5
kvs=
$map1.foreach lambda
bsda:util:join kvs "$NL" $kvs
bsda:test:xmatch "$kvs" all:once A→a B→b C→c D→D '%$foo&&{→$bar}'
# Delete and restore
$map1.delete
unset map1
bsda:obj:deserialise map1 "$map1s"
kvs=
$map1.foreach lambda
bsda:util:join kvs "$NL" $kvs
bsda:test:xmatch "$kvs" all:once A→a B→b C→c D→D '%$foo&&{→$bar}'
# Clean up
$map1.delete
$map.delete
unset map1 map1s map kvs val
# Create a new memory dump
memdump2="$(unset diff01 memdump0 memdump1;true;set)"
# Compare before and after memory dumps, the only thing allowed to have
# changed are object ID counters.
diff12="$(echo "$memdump2" | grep -vFx "$memdump1")"
bsda:test:xmatch "$diff12" all:any "$bsda_obj_frameworkPrefix*_nextId=[0-9]*"
| true
|
ea96c82b4cdbe38875360bb30042f9c9d6c8a157
|
Shell
|
cha63506/maemo
|
/gadget/xprootfs
|
UTF-8
| 916
| 4.25
| 4
|
[] |
no_license
|
#!/bin/sh
#
# xprootfs -- extract a rootfs.tar.gz
#
# This utility helps you to unpack a rootfs tarball such that you can
# customize it, and create a customize rootfs image from it.
#
# Synopsis:
# xprootfs [-x] [[<rootfs.tar.gz>] [<target-dir>]]
#
# will create <target-dir> ("rootfs" by default) and extract <rootfs.tar.gz>
# in it, preserving file owners, modes and everything. -x blows <target-dir>
# before anything else.
#
# We need to be root to extract the tarball faithfully.
[ $UID -eq 0 ] || exec su -c "$0 $*";
rootfs="arm-rd-rootfs.tgz";
root="rootfs";
kill="no";
# Parse the command line.
if [ "x$1" = "x-x" ];
then
kill="yes";
shift;
fi
if [ $# -gt 0 ];
then
rootfs="$1";
shift;
if [ $# -gt 0 ];
then
root="$1";
shift;
fi
fi
# Create $root.
set -e;
[ "$kill" = "yes" ] && rm -rf "$root";
mkdir "$root";
# Run
exec tar xzf "$rootfs" -C "$root" --numeric-owner;
# End of xprootfs
| true
|
49db817b23a245117a6c012d691eb49525ea175d
|
Shell
|
batya239/rg-graph
|
/phi4/graphs/run_SDN_all_tp.sh
|
UTF-8
| 338
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
MODEL=`cat modelN.txt`
NPOINTS=1000000
cd $PWD
for i in `cat _to_tpclusterN.txt`; do
TT=`qstat -a @pbs-tp.hpc.cc.spbu.ru|grep mkompan|wc -l`
M=`cat NQ-tp.txt`
while [ $TT -gt $M ]; do
#echo $TT
sleep 10
M=`cat NQ-tp.txt`
TT=`qstat -a @pbs-tp.hpc.cc.spbu.ru|grep mkompan|wc -l`
done
echo $i
sh rundiagN-tp.sh $i $NPOINTS
done
| true
|
2825d83fa28c1eb4bbe259f2a4f1b42bd4cd85dd
|
Shell
|
durgeshmishra24x7/Quick_Tutorial
|
/Shell Script Example
|
UTF-8
| 2,354
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#Author : Durgesh Mishra
#Date Created : 11-06-2021
#Description :This script will output Control file data into .csv format
SOURCE="D:/Shell_scri[t"
#echo "My source file is $SOURCE"
#This Function will UNZIP the folder
function unzipp() {
#echo "Calling Function - " $1
#locate $1
echo " "
echo "UNZIPPING........................................................../"
unzip "$1"
}
#This Function will identify Control file
function identify_control_file(){
TARGET_DIR="$PWD"
echo "Our target directory is $TARGET_DIR"
locate $TARGET_DIR
if [ -d "$TARGET_DIR" ]; then
echo "$TARGET_DIR directory is VALID."
#This Loop will search CONTROL file in the SOURCE directory
for file in "$TARGET_DIR"/*
do
for sub_files in "$file"/*
do
#echo "SubFoler -----> $sub_files"
for control_file in "$sub_files"/*
do
echo "$control_file"
if [[ "$control_file" == *"Control_file.txt"* ]]; then
echo "$control_file file is a Control file"
#EXPORT DATA INTO ,CSV/.JSON
value=`$control_file`
echo $value
#value = $control_file
#echo value
#grep value | tr "\\t" "," > output_file.csv
else
echo " "
echo "$control_file file is not a CONTROL file"
fi
done
done
done
else
myscript [unix] (04:55 11/06/2021) 45,1-8 Top
"myscript" [unix] 91L, 2111B
| true
|
5dd783239e95df016ab6bd9a4598b2f12f8111cd
|
Shell
|
mglaman/drupal-puphpet
|
/scripts/setup-commerce-deploy.sh
|
UTF-8
| 499
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
ROOT_DIR="$( cd "$( dirname "$( dirname "$0" )" )" && pwd )"
REPO_DIR=$ROOT_DIR/profiles/commerce_deploy
SITE_DIR=$ROOT_DIR/sites/commerce_deploy
echo "Checking out latest of Commerce Deploy distribution..."
if [ -d "$REPO_DIR" ]; then
cd $REPO_DIR && git pull
else
git clone --branch 7.x-1.x http://git.drupal.org/project/commerce_deploy.git $SITE_DIR
fi
echo "Building development version of Commerce Deploy distribution..."
rm -rf $SITE_DIR
cd $REPO_DIR && sh build.sh $SITE_DIR
| true
|
6f1eb4c837547ceac266f0ba2cbab80b9439a871
|
Shell
|
freenas/iocage-plugin-nextcloud
|
/overlay/usr/local/bin/generate_letsencrypt_certificates
|
UTF-8
| 968
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
set -eu
. load_env
domain_name=${1:-}
admin_email=${2:-}
if [ "$domain_name" = "" ]
then
echo "Please provide a domain name: generate_letsencrypt_certificates <domain_name> <admin_email>"
exit 1
fi
if [ "$admin_email" = "" ]
then
echo "Please provide a admin email: generate_letsencrypt_certificates <domain_name> <admin_email>"
exit 1
fi
# Move self-signed certificates
tmp_backup="/tmp/$(openssl rand --hex 8)"
mkdir -p "$tmp_backup"
mv /usr/local/etc/letsencrypt/live/truenas "$tmp_backup"
echo "Old certificates moved to: $tmp_backup"
# Ask letsencrypt for some certificates
certbot certonly \
--rsa-key-size 4096 \
--cert-name truenas \
--non-interactive \
--webroot \
--webroot-path /usr/local/www/nextcloud \
--force-renewal \
--agree-tos \
--email "$admin_email" \
--domain "$domain_name"
# Refresh nginx configuration to use 443 as HTTPS port
sync_configuration
# Restart nginx
service nginx restart
| true
|
8604e0f5fa2f7a1061b24aafa482795480886007
|
Shell
|
hongchan2/LinuxFileExplore
|
/main.sh
|
UTF-8
| 15,328
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
declare -a files # store all files in current directory
declare -i INDEX=0 # files array index
declare -i x_current
declare -i y_current
declare -i x_print
declare -i y_print
declare -i index_num # total index number
declare -a dir_path
declare -a file_path
declare -a file_origin_path # store original path
declare -i print_tree_cnt=0 # to treat exception
call_print_SecondWin="false" # to Optimization program ( add 2nd project )
call_print_ThirdWin="false" # to Optimization program ( add 2nd project )
zip="*.zip" # to treat compressed file
gz="*.gz" # to treat compressed file
trashbin="2015202065-TrashBin" # to treat trash bin
trashbin_path="${HOME}/${trashbin}" # store trashbin path
in_trash="false" # trash or not
firstPrint() { # function1 to make window
for (( i=0; i<47+$1; i++ ))
do
echo -n "="
done
}
secondPrint() { # function2 to make window
for (( i=0; i<6+$1; i++ ))
do
echo "|"
done
}
secondPrintDraw(){ # function3 to make window
for (( i=2; i<30; i++ ))
do
tput cup $i $1
echo "|"
done
}
printBorder() { # function to make entire window border
tput cup 0 0
firstPrint "0"
echo -n " 2015202065 HongChan Yun "
firstPrint "0"
echo ""
firstPrint "10"
echo -n " List "
firstPrint "10"
echo ""
secondPrint "22"
secondPrintDraw "29"
secondPrintDraw "76"
secondPrintDraw "119"
firstPrint "6"
echo -n " Information "
firstPrint "7"
echo ""
secondPrint "0"
firstPrint "9"
echo -n " Total "
firstPrint "10"
echo -e "\n|"
firstPrint "10"
echo -n " END "
firstPrint "11"
}
sortDir() { # function to get directory from all current directory and files
declare -a dirArr
declare -i j=0
arr=("${!1}")
for (( i=0; i<${#arr[@]}; i++ ))
do
if [ -d "${arr[i]}" ]
then
dirArr[j]=${arr[i]}
j=${j}+1
fi
done
files=("${dirArr[@]}")
}
sortFile() { # function to get files from all current directory and files
declare -a fileArr
declare -i j=0
arr=("${!1}")
for (( i=0; i<${#arr[@]}; i++ ))
do
if [ -f ${arr[i]} ]
then
fileArr[j]=${arr[i]}
j=${j}+1
fi
done
files=("${files[@]}" "${fileArr[@]}")
}
firstWindow_Exception() { # function to deal exception (long directory and file name)
tput cup ${y_print} 26
echo -n "..."
tput cup ${y_print} 29
echo -n "| "
}
printFirst_Window() { # function to print ordered all current directory and files
parr=("${!1}")
y_print=2
for (( i=0; i<${#parr[@]}; i++ ))
do
tput cup ${y_print} 1
if [ ${#parr[$i]} -gt 27 ]
then
if [ -d ${parr[$i]} ]
then
echo [34m "${parr[$i]}" [0m
firstWindow_Exception
elif [ -f ${parr[$i]} ]
then
if [ -x ${parr[$i]} ]
then
echo [32m "${piarr[$i]}" [0m
firstWindow_Exception
elif [ ${parr[$i]} = ${zip} ] || [ ${parr[$i]} = ${gz} ]
then
echo [31m "${parr[$i]}" [0m
firstWindow_Exception
else
tput cup ${y_print} 2
echo "${parr[$i]}"
firstWindow_Exception
fi
fi
else
if [ -d ${parr[$i]} ]
then
if [ ${parr[$i]} = ${trashbin} ] # add 2nd project
then
echo [33m "${parr[$i]}" [0m
else
echo [34m "${parr[$i]}" [0m
fi
elif [ -f ${parr[$i]} ]
then
if [ -x ${parr[$i]} ]
then
echo [32m "${parr[$i]}" [0m
elif [ ${parr[$i]} = ${zip} ] || [ ${parr[$i]} = ${gz} ]
then
echo [31m "${parr[$i]}" [0m
else
tput cup ${y_print} 2
echo "${parr[$i]}"
fi
fi
fi
if [ ${y_print} -gt 29 ] # to deal exception (too many directories and files)
then
tput cup 29 2
echo -n " "
tput cup 29 2
echo -n "..."
echo
tput cup 30 0
echo -n "============================="
break
fi
y_print=${y_print}+1
done
}
getcurrentTreeDir() {
declare -a arr
declare -i i=1
for var in *
do
arr[i]=$var
i=$i+1
done
sort_TreeDir arr[@]
sort_TreeFile arr[@]
}
getcurrentDir() { # function to get unordered all current directory and files
declare -a arr[0]=".."
declare -i i=1
for var in *
do
arr[i]=$var
i=${i}+1
done
files=(${arr[@]})
# sortDir arr[@]
# sortFile arr[@]
}
secondWindow_Exception() { # function to deal exception (about long width)
x_print=76
y_print=2
for (( i=0; i<28; i++ ))
do
tput cup ${y_print} ${x_print}
echo "| "
y_print=${y_print}+1
done
}
printSecond_Window() { # function to print file content
x_print=30
y_print=2
f_lenght=`cat ${files[$INDEX]} | wc -l`
tput cup ${y_print} ${x_print}
for (( i=0; i<f_lenght; i++ ))
do
if [ $i -gt 27 ] # to deal exception (about long lenght)
then
break
fi
more +`expr ${i} + 1` ${files[$INDEX]} | head -n 1
y_print=${y_print}+1
tput cup ${y_print} ${x_print}
done
secondWindow_Exception
}
search_Tree(){
x_print=77
y_print=${y_print}+1
tput cup ${y_print} ${x_print}
local name=$1 # define local variable, because it is recursive function
local path=$2 # define local variavle, because it is recursive function
declare -i depth=$3
declare -i depth_min=4*depth # all varialbe at bottom declared for treat exception
declare -i max=38-${depth_min}
# declare -i max="39-4*depth"
declare -i lenght=${#name}
declare -i result=${max}-${lenght}
declare -i f_result=${max}-${lenght}+3
print_tree_cnt=${print_tree_cnt}+1
if [ "${print_tree_cnt}" -ge "29" ] # to deal exception (too long tree <long lenght>) 29
then
return
fi
if [ ${depth} -eq 6 ] # to print 5 depth tree
then
depth=0
y_print=${y_print}-1
return
fi
for (( i=0; i<${depth}; i++ )) # to print tree structure
do
echo -n "...."
done
if [ -d "${path}/${name}" ]
then
if [ ${result} -lt 1 ]
then
echo -n " -" [34m " ..." [0m # to treat exception (too long width)
return
fi
elif [ -f "${path}/${name}" ]
then
if [ ${f_result} -lt 1 ]
then
if [ -x "${path}/${name}" ]
then
echo -n [32m " ..." [0m
elif [ ${name} = ${zip} ] || [ ${name} = ${gz} ]
then
echo -n [31m " ..." [0m
else
echo -n " ..."
fi
return
fi
fi
if [ -d "${path}/${name}" ]
then
echo -n " -" [34m "${name}" [0m
elif [ -f "${path}/${name}" ]
then
if [ -x "${path}/${name}" ]
then
echo -n [32m "${name}" [0m
elif [ ${name} = ${zip} ] || [ ${name} = ${gz} ]
then
echo -n [31m "${name}" [0m
else
echo -n " ${name}"
fi
fi
if [ -d "${path}/${name}" ] # recursive call to print tree directory structure
then
for next in `ls ${path}/${name}`
do
search_Tree "${next}" "${path}/${name}" "${depth}+1"
done
fi
}
printAccessTime(){
year=`stat -c %x ${files[$INDEX]} | cut -c 1-4` # to print year
month=`stat -c %x ${files[$INDEX]} | cut -c 6-7` # to print month
day=`stat -c %x ${files[$INDEX]} | cut -c 9-10` # to print day
time=`stat -c %x ${files[$INDEX]} | cut -c 12-19` # to print time
case "${month}"
in
01) month="Jan";;
02) month="Feb";;
03) month="Mar";;
04) month="Apr";;
05) month="May";;
06) month="Jun";;
07) month="Jul";;
08) month="Aug";;
09) month="Sep";;
10) month="Oct";;
11) month="Nov";;
12) month="Dec";;
esac
echo "Access time: ${month} ${day} ${time} ${year}"
}
printFourth_Window() { # function to print directory or file's information
y_print=31
tput cup ${y_print} 1
echo "file name : ${files[$INDEX]}"
y_print=${y_print}+1
tput cup ${y_print} 1
if [ -d ${files[$INDEX]} ]
then
if [ ${files[$INDEX]} = ${trashbin} ]
then
echo "File type :" [33m "TrashBin" [0m
else
echo "File type :" [34m "Directory" [0m # change for 2nd project (upper alpabet)
fi
elif [ -f ${files[$INDEX]} ]
then
if [ -x ${files[$INDEX]} ]
then
echo "File type :" [32m "Execute file" [0m
elif [ ${files[$INDEX]} = ${zip} ] || [ ${files[$INDEX]} = ${gz} ]
then
echo "File type :" [31m "Compressed file" [0m
else
echo "file type : Normal file"
fi
fi
y_print=${y_print}+1
tput cup ${y_print} 1
echo "File size : `du -sh ${files[$INDEX]} | cut -f 1`" # change for 2nd project (human readable size)
y_print=${y_print}+1
tput cup ${y_print} 1
printAccessTime
y_print=${y_print}+1
tput cup ${y_print} 1
echo "Permission : `stat -c %a ${files[$INDEX]}`"
y_print=${y_print}+1
tput cup ${y_print} 1
echo "Absolute path : $PWD/${files[$INDEX]}"
tput cup ${y_current} ${x_current} # reset current x and y
}
printFifth_Window() { # function to print current directory's total information
x_print=20
y_print=38
declare -i cnt_total=`ls | wc -w`
declare -i cnt_dir=`ls -lF | grep / | wc -l`
declare -i cnt_sfile=0
declare -i cnt_allfile=0
declare -i cnt_file=0
for (( i=0; i<${#files[@]}; i++ )) # to count files and Sfiles
do
if [ -d ${files[$i]} ]
then
if [ ${files[$i]} = ${trashbin} ] # add 2nd project
then
cnt_total=${cnt_total}-1
cnt_dir=${cnt_dir}-1
fi
elif [ -f ${files[$i]} ]
then
cnt_allfile=${cnt_allfile}+1
if [ -x ${files[$i]} ] || [ ${files[$i]} = ${zip} ] || [ ${files[$i]} = ${gz} ]
then
cnt_sfile=${cnt_sfile}+1
fi
fi
done
cnt_file=${cnt_allfile}-${cnt_sfile}
tput cup ${y_print} ${x_print}
echo "Total: ${cnt_total}, Directory: ${cnt_dir}, SFile: ${cnt_sfile}, NFile: ${cnt_file}, Size: `du -sh | cut -f 1`"
# change for 2nd project (upper alpabet and human readable size)
}
clearFirst_Window() { # function to clear first window
x_print=1
y_print=2
for (( i=0; i<28; i++ ))
do
for (( j=0; j<28; j++ ))
do
tput cup ${y_print} ${x_print}
echo -n " "
x_print=${x_print}+1
done
x_print=1
y_print=${y_print}+1
done
}
clearThird_Window() { # function to clear third window
x_print=77
y_print=2
for (( i=0; i<28; i++ ))
do
for (( j=0; j<42; j++ ))
do
tput cup ${y_print} ${x_print}
echo -n " "
x_print=${x_print}+1
done
x_print=77
y_print=${y_print}+1
done
}
clearSecond_Window() { # function to clear second window
x_print=30
y_print=2
for (( i=0; i<28; i++ ))
do
for (( j=0; j<46; j++ ))
do
tput cup ${y_print} ${x_print}
echo -n " "
x_print=${x_print}+1
done
x_print=30
y_print=${y_print}+1
done
}
clearFourth_Window() { # function to clear fourth window
x_print=1
y_print=31
for (( i=0; i<6; i++ ))
do
for (( j=0; j<118; j++ ))
do
tput cup ${y_print} ${x_print}
echo -n " "
x_print=${x_print}+1
done
x_print=1
y_print=${y_print}+1
done
}
clearFifth_Window() { # function to clear fifth window
x_print=1
y_print=38
for (( j=0; j<110; j++ ))
do
tput cup ${y_print} ${x_print}
echo -n " "
x_print=${x_print}+1
done
}
remove_trash() { # function to remove at trashbin
if [ -d ${files[$INDEX]} ]
then
rm -r ${files[$INDEX]}
else
rm ${files[$INDEX]}
fi
}
get_dir_file_path(){ # function to store all directory and file path
local name=$1 # define local variable, because it is recursive function
local path_origin=$2 # define local variable, because it is recursive function
local path=$3 # define local variable, because it is recursive function
if [ -d "${path_origin}/${name}" ]
then
dir_path=("${dir_path[@]}" "${path}/${name}") # store path
elif [ -f "${path_origin}/${name}" ]
then
file_path=("${file_path[@]}" "${path}/${name}") # store path
file_origin_path=("${file_origin_path[@]}" "${path_origin}/${name}") # store absoulte path
fi
if [ -d "${path_origin}/${name}" ]
then
for next in `ls ${path_origin}/${name}`
do
get_dir_file_path "${next}" "${path_origin}/${name}" "${path}/${name}" # recursively call
done
fi
}
remove_to_trash(){ # function to remove to trashbin
if [ -d ${files[$INDEX]} ] # mkdir to make all subdir and cat to copy all files
then
get_dir_file_path "${files[$INDEX]}" "${PWD}"
for (( i=0; i<${#dir_path[@]}; i++ ))
do
mkdir -p ${trashbin_path}${dir_path[$i]}
done
for (( i=0; i<${#file_path[@]}; i++ ))
do
cat ${file_origin_path[$i]} > ${trashbin_path}${file_path[$i]}
done
dir_path=() # reset directory path array
file_path=() # reset file path array
file_origin_path=() # reset file original path array
rm -r ${files[$INDEX]}
fi
if [ -f ${files[$INDEX]} ]
then
cat ${files[$INDEX]} > ${trashbin_path}/${files[$INDEX]} # copy to trashbin
rm ${files[$INDEX]}
fi
}
remove() { # function to remove file and directory
if [ ${in_trash} = "true" ]
then
remove_trash
else
remove_to_trash
fi
clearFirst_Window # clear first, fifth and print new first fifth window
clearFifth_Window
getcurrentDir
printFirst_Window files[@]
printFifth_Window
INDEX=0
x_current=2
y_current=2
tput cup ${y_current} ${x_current}
}
inputKey() {
key_up="[A"
key_down="[B"
key_enter=""
key_delete="d"
key_tree="t"
# key_space=" "
# key_return="r"
getcurrentDir
printFirst_Window files[@]
printFifth_Window # print current directory's information
index_num=${#files[@]}-1
while [ 1 ]
do
printFourth_Window # print directory or file's information
read -sn 1 key # change for 2nd project (to get one key)
if [ "${key}" = "" ]
then
read -sn 2 key_two
key="${key}${key_two}" # to treat three key
fi
if [ "${call_print_SecondWin}" = "true" ] # optimize program
then
clearSecond_Window
call_print_SecondWin="false"
fi
if [ "${call_print_ThirdWin}" = "true" ] # optimize program
then
clearThird_Window
call_print_ThirdWin="false"
fi
if [ "${key}" = "${key_up}" ] # if input key is up direction
then
INDEX=${INDEX}-1
y_current=${y_current}-1
elif [ "${key}" = "${key_down}" ] # if input key is down direction
then
if [ ${INDEX} -eq ${index_num} ] # to deal exception (no more file)
then
INDEX=${INDEX}
y_current=${y_current}
else
INDEX=${INDEX}+1
y_current=${y_current}+1
fi
elif [ "${key}" = "${key_enter}" ] # if input key is enter
then
if [ -d ${files[$INDEX]} ]
then
if [ "${files[$INDEX]}" = "${trashbin}" ]
then
in_trash="true"
else
in_trash="false"
fi
cd ${files[$INDEX]}
clearFourth_Window
clearFirst_Window
clearFifth_Window
x_current=2
y_current=2
INDEX=0
tput cup ${y_current} ${x_current}
inputKey
elif [ -f ${files[$INDEX]} ]
then
if [ ${files[$INDEX]} != ${zip} ] && [ ${files[$INDEX]} != ${gz} ]
then
printSecond_Window
call_print_SecondWin="true"
fi
fi
elif [ "${key}" = "${key_delete}" ] # if input key is d
then
remove # to remove file and directory
elif [ "${key}" = "${key_tree}" ] # if input key is t
then
if [ -d ${files[$INDEX]} ]
then
y_print=1
search_Tree "${files[$INDEX]}" "${PWD}" "0"
call_print_ThirdWin="true" # to clean third window
print_tree_cnt=0 # reset cnt
x_print=77
y_print=30
tput cup ${y_print} ${x_print}
echo -n "=============================="
echo -n "============="
fi
fi
if [ ${y_current} -eq 1 ] # to deal exception (invade up area)
then
y_current=2
INDEX=0
fi
if [ ${y_current} -eq 30 ] # to deal exception (invade down area)
then
y_current=29
INDEX=${INDEX}-1
fi
clearFourth_Window
tput cup ${y_current} ${x_current}
done
}
main() { # function to initialize first screen
clear
x_current=2
y_current=2
printBorder # print border
tput cup ${y_current} ${x_current}
inputKey
}
main
| true
|
a3d5df4a8503556a32e383e9ba6baac047873889
|
Shell
|
AdamSvetec/statera-dataprocessing
|
/scripts/data_processing.sh
|
UTF-8
| 2,168
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#Script for end to end data run
data_folder="/home/ubuntu/BulkData/"
source_folder="/home/ubuntu/379SeniorProject/"
fortune_n_file="fortune_500.csv"
bill_scores_file="bill_scores.csv"
issues_file="issues.csv"
votes_folder="votes/"
cf16_folder="CampaignFin16/"
r_command="R --vanilla -q --slave -f"
r_script_folder=$source_folder"r_dp/"
read -s -p "Please enter password: " local_pass
echo ""
echo "Begining end to end data processing run..."
#Copying user generated data files to data directory
cp -fp -- $source_folder"extraneous/"$issues_file $data_folder
cp -fp -- $source_folder"extraneous/"$bill_scores_file $data_folder
cp -fp -- $source_folder"extraneous/"$fortune_n_file $data_folder
#Preliminary
echo "Performing preliminary check"
$r_command $r_script_folder"preliminary.R"
#Data Upload
echo "Begining data upload"
echo "Uploading user defined issues"
$r_command $r_script_folder"issues_upload.R"
echo "Uploading legislators from govtrack"
$r_command $r_script_folder"legislator_upload.R"
echo "Uploading opensecrets legislator data"
$r_command $r_script_folder"campaignfin16_upload.R"
echo "Uploading opensecrets contribution data"
sudo mysql --password=$local_pass --verbose dataprocessing < ../db_utils/indivs_read.sql
echo "Uploading govtrack voting records"
$r_command $r_script_folder"voting_upload.R"
#Filter out companies not in fortune 500
echo "Reducing organizations to fortune 500 companies"
$r_command $r_script_folder"fortune_n_reduce.R"
#Upload Bill Scores
echo "Uploading user defined bill leans"
$r_command $r_script_folder"bill_score_upload.R"
#Legislator Mapping
echo "Merging legislator data from govtrack and OpenSecrets"
$r_command $r_script_folder"legislator_merge.R"
#Scoring
echo "Scoring legislators"
$r_command $r_script_folder"legislator_scoring.R"
echo "Scoring organizations"
$r_command $r_script_folder"organization_scoring.R"
#sudo mysql --password=$local_pass --verbose dataprocessing < ../db_utils/org_scoring.sql
#Output results
echo "Creating results file"
$r_command $r_script_folder"output_results.R"
#Collect Metrics
echo "Collecting Metrics"
$r_command $r_script_folder"overall_metric_score.R"
| true
|
e93275dde483e1b1c69472629e270c888882bd22
|
Shell
|
startupheroes/startupheroes-scripts
|
/bin/react-app.sh
|
UTF-8
| 1,002
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
npm install
npm install -g code-push-cli
code-push login --accessKey ${CODE_PUSH_ACCESS_KEY}
code-push release-react ${CODE_PUSH_REPO_NAME} android -d "Staging" --dev false
cd android
./gradlew dependencies
./gradlew assembleInternalRelease crashlyticsUploadDistributionInternalRelease
./gradlew assembleProductionRelease crashlyticsUploadDistributionProductionRelease
cd ..
mkdir /tmp/bugsnag
node node_modules/react-native/local-cli/cli.js bundle \
--platform android \
--dev false \
--entry-file index.android.js \
--bundle-output /tmp/bugsnag/index.android.bundle \
--sourcemap-output /tmp/bugsnag/index.android.map
find android/app/build/outputs/apk/*-release.apk | awk -F '-' '{print $3}' | while read line; do
curl https://upload.bugsnag.com/ \
-F apiKey=${BUGSNAG_API_KEY} \
-F appVersion=$line \
-F minifiedUrl="index.android.bundle" \
-F sourceMap=@/tmp/bugsnag/index.android.map \
-F minifiedFile=@/tmp/bugsnag/index.android.bundle \
-F overwrite=true
echo
done
| true
|
074d60eb30f5f579473d8714290598f7160df965
|
Shell
|
ownport/microk8s-snippets
|
/scripts/microk8s.sh
|
UTF-8
| 2,405
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# exit immediately on error
set -e
# fail on undeclared variables
set -u
# Grab the directory of the scripts, in case the script is invoked from a different path
SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
# Useful routines in common.sh
. "${SCRIPTS_DIR}/common.sh"
MICROK8S_CHANNEL="1.18/stable"
# ====================================================
# Help screen
# ----------------------------------------------------
function help() {
echo "./manage.sh <command>"
echo " help - this help"
echo " create_aliases - create aliases: kubectl"
echo " fix_flannel_subnet_conf - fix flannel subnet configuration"
echo " install - install microk8s"
}
# ====================================================
# Create aliases
# ----------------------------------------------------
function create_aliases() {
if [ -f ~/.bash_aliases ]; then
[ -z "$(cat ~/.bash_aliases | grep kubectl)" ] && {
echo "alias kubectl='microk8s kubectl'" >> ~/.bash_aliases
}
else
echo "alias kubectl='microk8s kubectl'" > ~/.bash_aliases
fi
source ~/.bash_aliases
}
# ====================================================
# Fix flannel subnet configuration
# ----------------------------------------------------
function fix_flannel_subnet_conf() {
FLANNEL_SUBNET_CONF="/var/snap/microk8s/common/run/flannel/subnet.env"
sudo mkdir -p /var/snap/microk8s/common/run/flannel/ && \
echo "FLANNEL_NETWORK=10.5.0.0/16" | sudo tee ${FLANNEL_SUBNET_CONF} && \
echo "FLANNEL_SUBNET=10.5.72.1/24" | sudo tee -a ${FLANNEL_SUBNET_CONF} && \
echo "FLANNEL_MTU=1450" | sudo tee -a ${FLANNEL_SUBNET_CONF} && \
echo "FLANNEL_IPMASQ=false" | sudo tee -a ${FLANNEL_SUBNET_CONF}
}
# ====================================================
# Install microk8s
# ----------------------------------------------------
function install() {
info "Install microk8s, 1.18/stable" && \
sudo snap install microk8s --classic --channel ${MICROK8S_CHANNEL} && \
info "Update user rights/permissions for local use" && \
sudo usermod -a -G microk8s drey && \
sudo chown -f -R drey ~/.kube
info "Activate microk8s plugins" && \
sudo microk8s enable \
dns helm3 storage registry
}
$@
| true
|
b0a49ae54967b1f57d259f399ffdb6640d9cbd74
|
Shell
|
locochris/dotfiles
|
/.bin/startdwm
|
UTF-8
| 162
| 2.625
| 3
|
[] |
no_license
|
#!/bin/sh
dwm_dir=${HOME}/.dwm
conky -c ${dwm_dir}/conkyrc | while read -r; do xsetroot -name "$REPLY"; done &
while true; do dwm 2> ${dwm_dir}/dwm.err; done;
| true
|
b49b177175c53d66cea021753f237d77107db7a2
|
Shell
|
jorgeflores742/JF_AT08
|
/task5/script.sh
|
UTF-8
| 423
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
echo "~ Installing java 8 ~"
#install java
sudo apt update
sudo apt-get install openjdk-8-jdk -y
echo "~ Installing jenkins ~"
#install jenkins:2.105.3
wget -q -O - https://pkg.jenkins.io/debian/jenkins-ci.org.key | sudo apt-key add -
sudo sh -c 'echo deb http://pkg.jenkins.io/debian-stable binary/ > /etc/apt/sources.list.d/jenkins.list'
sudo apt-get update
sudo apt-get install jenkins=2.150.3 -y
| true
|
506d3ac1fb7e8d738edaea5c8507a6a2339b9381
|
Shell
|
kobylinski/baset
|
/src/baset/vendor.sh
|
UTF-8
| 3,232
| 4.28125
| 4
|
[] |
no_license
|
baset_vendor_install() {
local root=$(baset_root)
local path=$(baset_path)
local vendor=$(baset_vendor)
# Ensure that .baset file exists
if [ ! -f "$path" -a -w "$root" ]; then
touch "$path"
fi
# Ensure that vendor directory already exists
if [ ! -d "$vendor" ]; then
mkdir "$vendor"
fi
local repo=$(cat $path)
local line data i response
# List of selected repositories to install
local installNames=() # Repository name represents target directory or file in vendor directory
local installUrls=() # Repository source url
local installType=() # Repository type: http file or git repository
# Installation status
# 0 - ready
# 1 - already installet
# 2 - not available
# 3 - invalid type
local installStatus=()
# Loop through the manifest file
while IFS=',' read -ra line; do
installNames+=("${line[0]}")
installUrls+=("${line[1]}")
case "${line[1]}" in
# Git repository adapter
git@*)
installType+=("git")
if [ -d "$vendor/${line[0]}" ]; then
installStatus+=(1)
else
git ls-remote -h "${line[1]}" --exit-code &> /dev/null
response=$?
if [ "$response" -eq 0 ]; then
installStatus+=(0)
else
installStatus+=(2)
fi
fi
;;
# Remote file adapter
http*)
installType+=("file")
# Omit if file exists
if [ ! -f "$vendor/${line[0]}" ]; then
# Check if file is available by remote
file_http_available "${line[1]}"
if [ $? -eq 0 ]; then
installStatus+=(0)
else
installStatus+=(2)
fi
else
installStatus+=(1)
fi
;;
*)
installType+=("unknown")
installStatus+=(3)
;;
esac
done <<< "$repo"
# Format response raport to be nice and readable
local vLength=$(strlen "${installNames[@]}") name;
i=0
for name in "${installNames[@]}"; do
printf "\033[${FPF}mInstalling \033[${FN}m%-${vLength}s " "$name"
case "${installStatus[$i]}" in
0)
case "${installType[$i]}" in
git)
git clone "${installUrls[$i]}" $vendor/$name &> /dev/null
if [ -d "$vendor/$name/.git" ]; then
printf " \033[${FS}m done \033[${FN}m\n"
else
printf " \033[${FE}m error during loading \033[${FN}m\n"
fi
;;
file)
file_http_get "${installUrls[$i]}" "$vendor/$name"
if [ $? -eq 0 ]; then
printf " \033[${FS}m done \033[${FN}m\n"
else
printf " \033[${FE}m error during loading \033[${FN}m\n"
fi
;;
esac
;;
1)
printf " \033[${FS}m already installed \033[${FN}m\n"
;;
2)
printf " \033[${FE}m repository not available \033[${FN}m\n"
;;
3)
printf " \033[${FW}m invalit type \033[${FN}m\n"
;;
*)
printf " \033[${FW}m invalid status \033[${FN}m\n"
;;
esac
i=$(($i+1))
done
}
| true
|
4668f2be2cde8c2b0dace22cb363c6c7477f5f98
|
Shell
|
jcstr/arch4edu
|
/raspi-config/PKGBUILD
|
UTF-8
| 794
| 2.96875
| 3
|
[] |
no_license
|
# Maintainer: Jefferson Gonzalez <jgmdev@gmail.com>
pkgname=raspi-config
pkgver=r2.aa2dfd1
pkgrel=1
pkgdesc="Raspberry pi raspi-config utility adapted for ArchLinux ARM."
arch=('armv6h' 'armv7h' 'aarch64')
url="https://github.com/jgmdev/alarm-raspi-config"
license=('MIT')
depends=('xorg-xrandr' 'libnewt')
provides=('raspi-config')
source=("git://github.com/jgmdev/alarm-raspi-config.git")
md5sums=('SKIP')
_gitname=alarm-raspi-config
pkgver() {
cd "${srcdir}/${_gitname}"
( set -o pipefail
git describe --long 2>/dev/null | sed 's/\([^-]*-g\)/r\1/;s/-/./g' ||
printf "r%s.%s" \
"$(git rev-list --count HEAD)" \
"$(git log | head -n 1 | cut -d" " -f2 | awk '{print substr($0,0,7)}')"
)
}
package() {
cd "${srcdir}/$_gitname"
make DESTDIR="${pkgdir}" install
}
makedepends=('git')
| true
|
41813e970286c06eb1ba98e23091ab02b874c808
|
Shell
|
ospray/ospray_studio
|
/gitlab/run-dev-img-cmp.sh
|
UTF-8
| 1,678
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
## Copyright 2015 Intel Corporation
## SPDX-License-Identifier: Apache-2.0
set -e
OSPRAY_VER="devel"
export LD_LIBRARY_PATH=$CACHE_DIR/ospray-$OSPRAY_VER/build/install/lib:$LD_LIBRARY_PATH
SCRIPT_DIR=$(pwd)/$(dirname "$0")
echo $SCRIPT_DIR
cd ./build
#IMG_DIFF_TOOL=$CACHE_DIR/../tools/img_diff/img_diff
pip3 install --user --no-warn-script-location scikit-image argparse numpy sewar reportlab imagecodecs
model_fns=(bunny.sg hairball.sg Peonies.sg sponza.sg)
model_dirs=(bunny hairball Peonies sponza)
models=(bunny hairball Peonies sponza)
cam_cnt=(2 1 1 1)
mse=(0.000001 0.000001 0.1 0.000001)
results="model-results"
mkdir -p ${results}
for i in "${!models[@]}";do
#copy cameras file to app location
cp $CACHE_DIR/datasets/${model_dirs[i]}/cams.json .
./ospStudio batch --format png --denoiser --spp 32 \
--resolution 1024x1024 --image ${results}/c-${models[i]} \
--loadCams cams.json \
$CACHE_DIR/datasets/${model_dirs[i]}/${model_fns[i]}
rm cams.json
echo "model ${model_dirs[i]}/${model_fns[i]} -> c-${models[i]} CI exit code $?"
#$IMG_DIFF_TOOL $CACHE_DIR/datasets/${models[i]}.png ${results}/c-${models[i]}.00000.png ${mse[i]}
#echo "MSE c-${models[i]} CI exit code $?"
#using an sg means it can't have --forceRewrite, always move 00000 to -----
set -e
for ((j=0; j<${cam_cnt[i]}; j++));
do
mv ${results}/c-${models[i]}.0000$j.png ${results}/c-${models[i]}.----$j.png
python3 $SCRIPT_DIR/image-comparison.py --reference $CACHE_DIR/datasets/${models[i]}/${models[i]}_$j\_gold.png --candidate ${results}/c-${models[i]}.----$j.png --mse ${mse[i]}
done
done
| true
|
7fb18315993dc26abcd0098d6b8c05e14d1e452a
|
Shell
|
TheBlackParrot/blockland-dedi-linux-guide
|
/assets/scripts/bl-ded
|
UTF-8
| 1,463
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
export WINEDLLOVERRIDES="mscoree,mshtml="
## Blockland Dedicated Server Launcher
## Written by Greek2me.
# Define defaults.
attach_screen=false
game_mode="Custom"
server_mode="dedicated"
server_number=-1
server_path="/blockland/server"
# Parse arguments.
OPTIND=1
while getopts "ac:g:ln:p:" opt; do
case "$opt" in
a) attach_screen=true
;;
c) server_customArgs=$OPTARG
;;
g) game_mode=$OPTARG
;;
l) server_mode="dedicatedLAN"
;;
n) server_number=$OPTARG
;;
p) server_path=$OPTARG
;;
esac
done
shift $((OPTIND-1))
[ "$1" = "--" ] && shift
# Validate the server path.
if [ ! -f "$server_path/Blockland.exe" ]; then
echo "No Blockland server was found at $server_path."
exit 1
fi
# Determine which server number this is.
if [ "$server_number" -lt 0 ]; then
server_number=$(screen -list | grep -c blockland-server)
fi
# Launch the server.
screen -dmS blockland-server$server_number xvfb-run wineconsole --backend=curses $server_path/Blockland.exe ptlaaxobimwroe -$server_mode -gamemode $game_mode $server_customArgs
sleep 1
# Check that the server exists.
if screen -list | grep -q "blockland-server$server_number"; then
echo "Starting server $server_number"
echo " > Running game mode $game_mode"
else
echo "Failed to start server."
exit 1
fi
# Attach to the server.
if [ "$attach_screen" = true ]; then
screen -x blockland-server$server_number
if [ $? -gt 0 ]; then
echo "Failed to attach to server."
fi
fi
| true
|
b258d9a77ed5c4e27fb3dd53b972ee71da948e8d
|
Shell
|
wd5m/echoirlp
|
/filemirror/scripts/dtmfregen
|
UTF-8
| 508
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# Plays DTMF digits with check for COS, key, .25 second delay, unkey
if [ "$DTMF_REGENERATION" != "YES" ] ; then exit 1; fi
# Check for user defined commands
if [ -f "$CUSTOM"/custom_regen ]; then
if ! "$CUSTOM"/custom_regen $1 $2 ; then exit 0; fi
fi
if [ "$#" = "0" ] ; then exit 1; fi
killall ispeaker &>/dev/null
killall sfswrapper &>/dev/null
$BIN/coscheck
$BIN/key
usleep ${TXDELAY:-250000}
$BIN/dial "$1"
$BIN/unkey
if [ -f $LOCAL/active ] ; then
$SCRIPT/sfswrapper
fi
exit 0
| true
|
dad82c538c3816e4ceace83dda4d4338ce83952c
|
Shell
|
drozdowsky/rc-files
|
/.local/bin/interface
|
UTF-8
| 342
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
#
# http://github.com/mitchweaver/bin
#
# get correct wifi interface
#
case $(uname) in
Linux|FreeBSD)
ifconfig -a | grep 'wl' | sed 's/Link//' | awk '{print $1}' | head -n 1 | sed 's/://'
;;
OpenBSD)
ifconfig -a | grep -B 3 wl | head -1 | awk '{print $1}' | head -n 1 | sed 's/://'
;;
esac
| true
|
448e99800b15fd3f3edfb20a16c21f350b19e7f3
|
Shell
|
gustavorobertux/install_golang_parrot
|
/golang_install.sh
|
UTF-8
| 283
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
# Install golang
sudo apt install golang -y
# Install git
sudo apt install git -y
echo "export GOROOT=/usr/lib/go" >> $HOME/.bashrc
echo "export GOPATH=\$HOME/go" >> $HOME/.bashrc
echo "export PATH=$GOPATH/bin:$GOROOT/bin:$PATH" >> $HOME/.bashrc
source $HOME/.bashrc
| true
|
9e5ab4a8cfe95371b9544cd10ba7922119a421e6
|
Shell
|
maxbert/EnvairoBlinkUp
|
/deployment/bin/setup_deploy_keys.sh
|
UTF-8
| 493
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
function add_key() {
ssh-copy-id -i ./deployment/config/deploy_rsa.pub $1
}
echo "Setting up deploy key..."
ssh-keygen -t rsa -b 4096 -C "build@travis-ci.org" -f ./deployment/config/deploy_rsa
travis encrypt-file ./deployment/config/deploy_rsa --add
mv deploy_rsa.enc ./deployment/config/deploy_rsa.enc
add_key "ec2-user@dashboard.envairo.com"
echo "Deploy key created. Please save your private key in a safe place, then remove it from the repository."
echo "E.g., rm deployment/config/deploy_rsa"
| true
|
21f33d0506b0e1b30d0620481e7e1fdba14f04d4
|
Shell
|
KarelWintersky/BodyBuildingShop
|
/cron/backup.db.sh
|
UTF-8
| 1,494
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SCRIPT="${0}"
SCRIPT_BASEDIR="$(dirname ${SCRIPT})"
if [[ ${SCRIPT_BASEDIR} != '.' ]]; then
SCRIPT_PATH=`echo ${SCRIPT_BASEDIR}`
else
SCRIPT_PATH=`echo ${PWD}`
fi
. ${SCRIPT_PATH}/_config.conf
CLOUD_CONTAINER="BBS_SQL"
DATABASES=(
bodybuildingshop
)
for DB in "${DATABASES[@]}"
do
FILENAME_SQL=${DB}_${NOW}.sql
FILENAME_GZ=${DB}_${NOW}.gz
mysqldump -Q --single-transaction -h "$MYSQL_HOST" "$DB" | pigz -c > ${TEMP_PATH}/${FILENAME_GZ}
rclone delete --config ${RCLONE_CONFIG} --min-age ${MIN_AGE_DAILY} ${RCLONE_ACCOUNT}:${CLOUD_CONTAINER}/DAILY
rclone copy --config ${RCLONE_CONFIG} -L -u -v "$TEMP_PATH"/"$FILENAME_GZ" ${RCLONE_ACCOUNT}:${CLOUD_CONTAINER}/DAILY
# if it is a sunday (7th day of week) - make store weekly backup (42 days = 7*6 + 1, so we storing last six weeks)
if [[ ${NOW_DOW} -eq 1 ]]; then
rclone delete --config ${RCLONE_CONFIG} --min-age ${MIN_AGE_WEEKLY} ${RCLONE_ACCOUNT}:${CLOUD_CONTAINER}/WEEKLY
rclone copy --config ${RCLONE_CONFIG} -L -u -v "$TEMP_PATH"/"$FILENAME_GZ" ${RCLONE_ACCOUNT}:${CLOUD_CONTAINER}/WEEKLY
fi
# backup for first day of month
if [[ ${NOW_DAY} == 01 ]]; then
rclone delete --config ${RCLONE_CONFIG} --min-age ${MIN_AGE_MONTHLY} ${RCLONE_ACCOUNT}:${CLOUD_CONTAINER}/MONTHLY
rclone copy --config ${RCLONE_CONFIG} -L -u -v "$TEMP_PATH"/"$FILENAME_GZ" ${RCLONE_ACCOUNT}:${CLOUD_CONTAINER}/MONTHLY
fi
rm "$TEMP_PATH"/"$FILENAME_GZ"
done
| true
|
e0b4675edb789002edfa9b0792ea022cadd6e3e5
|
Shell
|
jiangkui/linux
|
/bin/publish42.sh
|
UTF-8
| 2,398
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
#
#-----------------版权--------------------
# 名称:发布静态文件
# 版本:1.0
# 语言:bash shell
# 日期:2016年01月04日11:39:53
# 作者:ljk
# 邮件:1013939150@qq.com
#-----------------环境--------------------
# Linux 3.13.0
# GNU Bash 4.3.11
#-----------------参数--------------------
tryAgain=$1
serverMt="root@192.168.1.42"
serverLvs="work@lvs"
#-----------------数组--------------------
#serverPublishWarPathBin["one_production"]=". ~/bin/go-onebaby; pwd"
#-----------------方法-------------------
function printLog(){
echo ""
echo "================================================"
echo $1
echo "================================================"
echo ""
}
function uploadWar(){
localMd5=$(ssh ${serverMt} "cd /data/work/nginx/www; md5sum mt.zip" | awk '{print $1}')
serverMd5=""
uploadNum=0
while [[ ${localMd5} != ${serverMd5} ]]; do
((uploadNum=uploadNum + 1))
if [[ ${uploadNum} -gt 3 ]] ; then
echo "上传次数超过3次,上传失败!"
exit 0;
fi
echo "第 ${uploadNum} 次上传。。。"
echo "远程执行 scp /data/work/nginx/www/mt.zip ==> ${serverLvs}:${serverFile} 请等待。。。"
ssh ${serverMt} "scp /data/work/nginx/www/mt.zip ${serverLvs}:${serverFile}"
serverMd5=$(ssh ${serverLvs} "md5sum ${serverFile}" | awk '{print $1}')
echo "localFileMd5:${localMd5}"
echo "serverWarMd5:${serverMd5}"
if [[ ${localMd5} != ${serverMd5} ]]; then
echo "md5sum 不等,文件上传过程中有损坏!"
if [[ ${tryAgain} != "y" ]]; then
read -p "请输入 y/n 重新尝试上传!" tryAgain
if [[ ${tryAgain} != "y" ]]; then
exit 0;
fi
fi
else
echo "md5sum 相等,文件上传成功!"
fi
done
}
#-----------------main-------------------
echo "$(printLog "打 zip 包")"
echo "$(ssh ${serverMt} "cd /data/work/nginx/www; ./go.sh")"
echo "$(printLog "打包完成")"
#获取公网路径
serverFile=$(ssh ${serverLvs} ". ~/bin/go-money; pwd")"/mt.zip"
uploadWar
read -p "是否远程执行自动发布脚本?(lvs auto-mt.sh) 请输入 y/n " autoMt
if [[ ${autoMt} == "y" ]];
then
echo $(ssh ${serverLvs} ". ~/bin/go-money; . ./auto-mt.sh")
fi
| true
|
b35c06b14c99bbe269925bea623b069bba3ca393
|
Shell
|
archoncap/FrameworkBenchmarks
|
/toolset/setup/linux/systools/gcc-4.9.sh
|
UTF-8
| 293
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
RETCODE=$(fw_exists ${IROOT}/gcc-4.9.installed)
[ ! "$RETCODE" == 0 ] || { \
source $IROOT/gcc-4.9.installed
return 0; }
sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y
sudo apt-get -yq update
sudo apt-get install -qqy gcc-4.9 g++-4.9
touch $IROOT/gcc-4.9.installed
| true
|
742c8e72786981cead84857473dfc40fb2a82b38
|
Shell
|
brucewu16899/email2db-php
|
/bin/m2dctl
|
UTF-8
| 5,697
| 3.703125
| 4
|
[] |
no_license
|
#! /bin/sh
### BEGIN INIT INFO
# Provides: m2dctl
# Required-Start: $local_fs $remote_fs $network $syslog
# Required-Stop: $local_fs $remote_fs $network $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# X-Interactive: true
# Short-Description: mail2db initscript
# Description: This file should be used to construct scripts to be
# placed in /etc/init.d and /usr/sbin
### END INIT INFO
# 16/07/2009 Copyright Igor Moiseev
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
PWD=`pwd`
# verbosity dirrective
VERBOSE='--verbose'
#VERBOSE='' # verbosity dirrective
MODE='750' # directory mode creation
# GENERAL Commands
LN='/bin/ln' LS='/bin/ls'
ECHO='/bin/echo' CP='/bin/cp'
RM='/bin/rm' CHMOD='/bin/chmod'
# Folders
ROOT="" # modify for production: use Root ""
VAR=${ROOT}'/var'
ETC=${ROOT}'/etc'
RUN=${VAR}'/run'
LOG=${VAR}'/log'
SBIN=${ROOT}'/usr/sbin'
INITD=${ETC}'/init.d'
NAMEP='mail2db'
SECR=${ROOT}'/root/security'
CONF=${ETC}/${NAMEP} # Configurations folder
LOGS=${LOG} # Folder for logs
PIDS=${RUN}/${NAMEP} # Folder for pids
HOME=${VAR}/${NAMEP} # Home folder
USERN='mail2faxsystem' # User name
NAME='mail2db' # Daemon name and file name to execute
DESC="Mail2DB daemon: "
DAEMON=${HOME}/$NAME
DAEMON_ARGS=""
PIDFILE=${PIDS}/${NAME}.pid
LOGFILE=${LOGS}/${NAME}.log
SCRIPTNAME=/etc/init.d/m2dctl
# Exit if the package is not installed
[ -f "$DAEMON" ] || exit 0
# Read configuration variable file if it is present
[ -r /etc/default/$NAME ] && . /etc/default/$NAME
# Load the VERBOSE setting and other rcS variables
. /lib/init/vars.sh
# Define LSB log_* functions.
# Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
. /lib/lsb/init-functions
#
# Function that starts the daemon/service
#
do_start()
{
# Return
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
|| return 1
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON -- \
$DAEMON_ARGS > /dev/null\
|| return 2
# Add code here, if necessary, that waits for the process to be ready
# to handle requests from services started subsequently which depend
# on this one. As a last resort, sleep for some time.
}
#
# Function that stops the daemon/service
#
do_stop()
{
# Return
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Wait for children to finish too if this is a daemon that forks
# and if the daemon is only ever run from this initscript.
# If the above conditions are not satisfied then add some other code
# that waits for the process to drop all resources that could be
# needed by services started subsequently. A last resort is to
# sleep for some time.
start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
[ "$?" = 2 ] && return 2
# Many daemons don't delete their pidfiles when they exit.
rm -f $PIDFILE
return "$RETVAL"
}
#
# Function that sends a SIGHUP to the daemon/service
#
do_check()
{
#
# If the daemon can reload its configuration without
# restarting (for example, when it is sent a SIGHUP),
# then implement that here.
#
start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE
RETVAL="$?"
return "$RETVAL"
}
do_status()
{
# sends the SIGUSR1 to master daemon CSD
# and prints out the status of the processes
start-stop-daemon --stop --signal 10 --quiet --pidfile $PIDFILE
RETVAL="$?"
return "$RETVAL"
}
usage()
{
echo $DESC $NAME
echo "Usage: $SCRIPTNAME {start|stop|reload|restart|status|tree|log}" >&2
echo ""
}
case "$1" in
start)
log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) log_end_msg 0 ;;
2) log_end_msg 1 ;;
esac
;;
stop)
log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) log_end_msg 0 ;;
2) log_end_msg 1 ;;
esac
;;
check)
# crontab check "/etc/init.d/m2dctl check || /etc/init.d/m2dctl start"
#log_daemon_msg "Checking running status for $DESC" "$NAME"
do_check
RETVAL="$?"
#log_end_msg $RETVAL
exit $RETVAL
;;
restart)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
*) log_end_msg 1 ;; # Failed to start
esac
;;
*)
# Failed to stop
log_end_msg 1
;;
esac
;;
status)
log_daemon_msg "Status call for $DESC" "$NAME"
do_status
case "$?" in
0)
/bin/sleep 1
if [ -f "/tmp/status.tmp" ]; then
log_end_msg 0
/bin/cat /tmp/status.tmp
$RM ${HOME}/tmp/status.tmp
exit 2
else
log_end_msg 1
fi
;;
*) log_end_msg 1 ;;
esac
;;
tree)
/usr/bin/pstree -nulap | grep mail2db
;;
log)
if [ ! -f "$PIDFILE" ]; then
echo "No service running"
exit 2
fi
tail -f $LOGFILE
;;
*)
usage
if [ ! -f "$PIDFILE" ]; then
echo "No service running\n"
fi
exit 3
;;
esac
:
| true
|
4191577e64f229efd75801b03f3e806355ecbd40
|
Shell
|
stg-tud/SecQL
|
/distributed-benchmarks/aws-setup/lib/aws-sd-create-namespace.sh
|
UTF-8
| 467
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Creating namespace $2 in VPC $1"
echo ">> Request namespace creation"
OPID=$(aws servicediscovery create-private-dns-namespace \
--name $2 \
--vpc $1 \
| sed -nE '/"OperationId": "/{s/.*:\s*"(.*)"/\1/p;q}')
while [ "SUCCESS" != $(aws servicediscovery get-operation \
--operation-id $OPID | sed -nE '/"Status": "/{s/.*:\s*"(.*)",/\1/p;q}') ]; do
echo ">> Not created yet. Retrying in 2 seconds..."
sleep 2
done
echo ">> Created successfully"
| true
|
e181b55414f988cf48a7c2a0ae2717a9b8af7e70
|
Shell
|
rsignell-usgs/cloud-gchp-paper
|
/scripts/download_data/fix_GMI.sh
|
UTF-8
| 506
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# HEMCO/GMI has some softlinks that are ignored by S3
if [ "$1" ]; then
DATA_ROOT=$1
else
echo 'Must specify path to ExtData/ directory'
exit 1
fi
cd $DATA_ROOT/HEMCO/GMI/v2015-02
ln -s gmi.clim.PMN.geos5.2x25.nc gmi.clim.IPMN.geos5.2x25.nc
ln -s gmi.clim.PMN.geos5.2x25.nc gmi.clim.NPMN.geos5.2x25.nc
ln -s gmi.clim.RIP.geos5.2x25.nc gmi.clim.RIPA.geos5.2x25.nc
ln -s gmi.clim.RIP.geos5.2x25.nc gmi.clim.RIPB.geos5.2x25.nc
ln -s gmi.clim.RIP.geos5.2x25.nc gmi.clim.RIPD.geos5.2x25.nc
| true
|
5760c8aa8546a3556896db74c477ddcb347a18de
|
Shell
|
NovaKe1n/null
|
/bin/wifi
|
UTF-8
| 2,043
| 4.3125
| 4
|
[] |
no_license
|
#!/usr/local/bin/bash
# wi-fi tool
# 0.8-b1
# wi-fi tool for OSX to control your wifi, antenna and sniff packets
host="8.8.8.8"
timeout=10
network="$(networksetup -getairportnetwork en0 | cut -d':' -f2 | tr -d ' ')"
# Function to connect to a network
function connect()
{
networksetup -setairportpower en0 on
sleep $timeout
ping -c1 "$host" &> /dev/null
if [ $? -eq 0 ]; then
printf "Connected to ${Yellow}$network${Green}\n"
else
printf "Connecting to ${Yellow}$network${Green}...\n"
networksetup -setairportnetwork en0 "$network" #2>&1 /dev/null
fi
}
# Program
case $1 in
'-s')
printf "${Green}Scanning networks...${NC}\n"
/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport -s
;;
'-c')
if [[ -n "$2" ]]; then
printf "${Green}Connecting to:${Yellow} $2\n"
networksetup -setairportpower en0 on
sleep 10
networksetup -setairportnetwork en0 "$2"
else
printf "${Green}Give me a network!\n"
fi
;;
'-r')
printf "${Green}Reconnecting to:${Yellow} $network\n"
printf "${Green}Network down...\n"
networksetup -setairportpower en0 off
printf "Network up...\n"
connect
;;
'-d')
printf "${Green}Disconnecting...${NC}\n"
sudo networksetup -setnetworkserviceenabled Wi-Fi off
;;
'-S')
printf "${Green}Shutdown...${NC}\n"
networksetup -setairportpower en0 off
;;
'-P')
printf "${Green}Power Up...${NC}\n"
sudo networksetup -setnetworkserviceenabled Wi-Fi on
networksetup -setairportpower en0 on
;;
'-a')
if [[ -n "$2" ]]; then
printf "${Green}Sniffing ${Yellow}en0 ${Green}on channel ${Yhellow}$2${NC}\n"
airport en0 sniff $2
else
printf "${Red}Please give me a channel.\n"
fi
;;
*) printf """
${Green}$(basename $0)${NC} is a network tool.
${Yellow}Usage:${NC}
-s scan Scan networks
-c connect Connect to a network
-d disconnect Disconnect from network
-S shutdown Disable the Wi-Fi
-P power up Enable the Wi-Fi
-a (sniff) CHANNEL] Sniff packets on given channel
${NC}
""" ;;
esac
| true
|
73fbb7aa1d321070620ca0bafc917b3456188a7e
|
Shell
|
alexandre1985/bin
|
/cloudflare-purge-all
|
UTF-8
| 404
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
EMAIL=${LESSPASS_EMAIL}
AUTH_KEY=${CLOUDFLARE_KEY}
ZONE_ID=77a6c2ae28ca89c89d6d077510dce3fe
API_LINK=https://api.cloudflare.com/client/v4/zones/${ZONE_ID}/purge_cache
# Purge all
CMD=$(curl --silent -X POST "${API_LINK}" -H "X-Auth-Email: ${EMAIL}" -H "X-Auth-Key: ${AUTH_KEY}" -H "Content-Type: application/json" --data '{"purge_everything":true}')
echo $CMD | jq -r '.success'
exit 0
| true
|
a26f74077e0f340c359b5793e31adddcf2b9271a
|
Shell
|
guangminghe/openstack-installer-rdo
|
/launch-instance.sh
|
UTF-8
| 1,367
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -x
source config/config.sh
source ../openstack_installer_temp/admin-openrc
START_IP="192.168.1.120"
END_IP="192.168.1.150"
DNS="211.137.130.3"
GATEWAY="192.168.1.1"
openstack network create --share --external --provider-physical-network provider --provider-network-type flat provider
openstack subnet create --network provider --allocation-pool start=${START_IP},end=${END_IP} --dns-nameserver ${DNS} --gateway ${GATEWAY} --subnet-range ${SUBNET} provider
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
source ../openstack_installer_temp/demo-openrc
echo -e "\n" | ssh-keygen -q -N ""
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
openstack keypair list
openstack security group rule create --proto icmp default
openstack security group rule create --proto tcp --dst-port 22 default
openstack flavor list
openstack image list
openstack network list
openstack security group list
NET_ID=$(openstack network list | grep provider | cut -d ' ' -f 2)
openstack server create --flavor m1.nano --image cirros --nic net-id=${NET_ID} --security-group default --key-name mykey provider-instance
openstack server list
openstack console url show provider-instance
# ping -c 4 192.168.1.130
# ssh cirros@192.168.1.130
# 删除:
# openstack subnet delete provider
# openstack network delete provider
| true
|
1edf5ed3ad2441ad32ebec5711c93622c07eb0d3
|
Shell
|
Catamondium/scratch
|
/test.sh
|
UTF-8
| 649
| 3.859375
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
#GNU bash, version 4.3.48(1)-release (x86_64-pc-linux-gnu)
: '
# Function testing
function gettime () {
response=`addtime $1 $2`
echo -e "gettime: $*\n$response"
return 0
}
start=3:30
length=60
gettime $start $length
gettime 0:00 60
echo "Returned: $?"
'
# Boolean/conditionals testing
bool=$true
num="10"
str="cat"
if [ $bool -eq $true ] && [ $num -gt 0 ] && [ "$str" = "cat" ]; then
echo "Correct"
else
echo "Error"
fi
: '
# Array testing
arr=(`seq 5 -1 1`)
echo -e "Array:\t${arr[*]}\nlength:\t$arr"
# Input testing
i=0
for v in "$@"; do
echo -e "loop: $v\t index $i"
((i++))
done
echo "back to main"
echo "$# params"
'
| true
|
1f1997b0264f0286bacd9a431e03636a9155fb13
|
Shell
|
shiftuya/Bioinformatics
|
/run.sh
|
UTF-8
| 269
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
fastqc $1.fastq.gz
mv $1_fastqc.html $1.html
rm $1_fastqc.zip
bwa index ref.fna
bwa mem ref.fna $1.fastq.gz > $1.sam
if (( $(echo $(samtools flagstat $1.sam | grep -o -P '[0-9]*(\.[0-9]*)?(?=%)')">90" | bc -l) )); then
echo "OK"
else
echo "Not OK"
fi
| true
|
65448f3a6d6cd6f3ac1e898575c0a52aad894e5c
|
Shell
|
prasenforu/openshift-origin-aws
|
/install-aws-cli.sh
|
UTF-8
| 996
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# This script will install AWS CLI tool
# Create by Prasenjit Kar (prasenforu@hotmail.com)
# Version 0.1
# Check AWS CLI installed or not
# If not installed it will start download and install
if ! type "aws" > /dev/null; then
echo "Installing AWS ..."
echo "Downloading AWS CLI package and unzip"
wget https://s3.amazonaws.com/aws-cli/awscli-bundle.zip
unzip awscli-bundle.zip
sudo ./awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws
echo "Execute below command with root priviledge in different terminal"
echo ""
echo ""
echo ""
echo "sudo ./awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws"
echo ""
echo ""
echo "Create Security file in your user ID"
mkdir ~/.aws
touch ~/.aws/config
cat <<EOF > ~/.aws/config
[default]
aws_access_key_id=< PUT YOUR ACCESS KEY >
aws_secret_access_key=< PUT YOUR SECRET ACCESS KEY >
region=ap-southeast-2
output=text
EOF
else
echo "AWS CLI is already Installed."
fi
| true
|
af6a9c7a9c139e6469c17d3a08f3d69e1eb6ae6f
|
Shell
|
demichele/install-hornet-1.5
|
/install.sh
|
UTF-8
| 1,008
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# Run all commands needed to install Hornet
apt update && apt dist-upgrade -y
apt install build-essential git
# Install Goland 1.16
wget https://golang.org/dl/go1.16.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.16.linux-amd64.tar.gz
export PATH=$PATH:/usr/local/go/bin
cd /opt
# Open dash and TCP ports
ufw allow 8081/tcp
ufw allow 15600/tcp
# Clone Hornet from develop branch
git clone -b develop --single-branch https://github.com/gohornet/hornet
cd hornet
go build
mkdir /opt/hornet-testnet
cp /opt/hornet/hornet /opt/hornet-testnet
cp /opt/hornet/config_chrysalis_testnet.json /opt/hornet-testnet
cp /opt/hornet/peering.json /opt/hornet-testnet
cp /opt/hornet/profiles.json /opt/hornet-testnet
# Add Hornet as Service
wget https://raw.githubusercontent.com/demichele/install-hornet-1.5/main/hornet-testnet.service
mv hornet-testnet.service /lib/systemd/system/hornet-testnet.service
systemctl enable hornet-testnet.service
systemctl start hornet-testnet && journalctl -u hornet-testnet -f
| true
|
a49b39b6fc3e8f20f78e1b345f4e3ebc2ac56282
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/python2-xmltramp/PKGBUILD
|
UTF-8
| 569
| 2.640625
| 3
|
[] |
no_license
|
pkgname=python2-xmltramp
pkgver=2.18
pkgrel=2
pkgdesc="xmltramp is a simple Pythonic API for working with XML documentation"
arch=('i686' 'x86_64')
makedepends=('python2-distribute')
url="http://www.aaronsw.com/2002/xmltramp/"
license=('GPL2')
depends=('python2')
source=('http://www.aaronsw.com/2002/xmltramp/xmltramp.py')
md5sums=('12d232c0bd6ef8ffbd16da760ad0ba6f')
package() {
python2_sitelib=$(python2 -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")
install -Dm644 "$srcdir/xmltramp.py" "$pkgdir/${python2_sitelib}/xmltramp.py"
}
| true
|
56ec92f62696f9a8b8adbf358bb44b65a8131da1
|
Shell
|
b0elter/mke-police-blotter
|
/scraper/assets/opt/mke-pd-blt/forever.sh
|
UTF-8
| 398
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
SLEEP=120
database_ready() {
curl --silent --connect-timeout 1 http://${PGHOST}:${PGPORT}
echo $?
}
echo "Waiting for database to be ready..."
while [ $(database_ready) -ne "52" ]; do
sleep 1
done
set -e
echo "Run schema initialization..."
node init-schema.js
echo "Begin polling data source..."
while [ true ]; do
node scraper.js
sleep ${SLEEP}
done
| true
|
8633def0ce99c69093a1a3dfc26cd8149885e5f2
|
Shell
|
yuksiy/setup_tools_options_for_master
|
/setup_pkg_list_local_make_debian.sh
|
UTF-8
| 4,508
| 4.1875
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
# ==============================================================================
# 機能
# パッケージリスト(ローカル)を作成する (Debian)
# 構文
# USAGE 参照
#
# Copyright (c) 2011-2017 Yukio Shiiya
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
# ==============================================================================
######################################################################
# 基本設定
######################################################################
trap "" 28 # TRAP SET
trap "POST_PROCESS;exit 1" 1 2 15 # TRAP SET
SCRIPT_NAME="`basename $0`"
PID=$$
######################################################################
# 変数定義
######################################################################
# ユーザ変数
METHOD="http"
REMOTEHOST="ftp.jp.debian.org"
ROOTDIR="debian"
DIST="stable"
SECTIONS="main contrib non-free"
ARCH="amd64"
CUT_DIRS_NUM=3
# システム環境 依存変数
# プログラム内部変数
#DEBUG=TRUE
TMP_DIR="/tmp"
SCRIPT_TMP_DIR="${TMP_DIR}/${SCRIPT_NAME}.${PID}"
######################################################################
# 関数定義
######################################################################
PRE_PROCESS() {
# 一時ディレクトリの作成
mkdir -p "${SCRIPT_TMP_DIR}"
}
POST_PROCESS() {
# 一時ディレクトリの削除
if [ ! ${DEBUG} ];then
rm -fr "${SCRIPT_TMP_DIR}"
fi
}
USAGE() {
cat <<- EOF 1>&2
Usage:
setup_pkg_list_local_make_debian.sh [OPTIONS ...] PKG_LIST
ARGUMENTS:
PKG_LIST : Specify an output package list.
OPTIONS:
-d DIST
Specify the distribution of Debian.
-a ARCH
Specify the architecture of Debian.
--help
Display this help and exit.
EOF
}
. cmd_v_function.sh
######################################################################
# メインルーチン
######################################################################
# オプションのチェック
CMD_ARG="`getopt -o d:a: -l help -- \"$@\" 2>&1`"
if [ $? -ne 0 ];then
echo "-E ${CMD_ARG}" 1>&2
USAGE ${ACTION};exit 1
fi
eval set -- "${CMD_ARG}"
while true ; do
opt="$1"
case "${opt}" in
-d) DIST="$2" ; shift 2;;
-a) ARCH="$2" ; shift 2;;
--help)
USAGE;exit 0
;;
--)
shift 1;break
;;
esac
done
# 第1引数のチェック
if [ "$1" = "" ];then
echo "-E Missing PKG_LIST argument" 1>&2
USAGE;exit 1
else
PKG_LIST=$1
# パッケージリスト格納ディレクトリのチェック
PKG_LIST_DIR=`dirname "${PKG_LIST}"`
if [ ! -d "${PKG_LIST_DIR}" ];then
echo "-E \"${PKG_LIST_DIR}\" not a directory" 1>&2
USAGE;exit 1
fi
# パッケージリストのチェック
if [ -e "${PKG_LIST}" ];then
echo "-E PKG_LIST already exists -- \"${PKG_LIST}\"" 1>&2
USAGE;exit 1
fi
fi
# 作業開始前処理
PRE_PROCESS
cd "${SCRIPT_TMP_DIR}"
# Release ファイルのダウンロード
uri="${METHOD}://${REMOTEHOST}/${ROOTDIR}/dists/${DIST}/Release"
CMD_V "LANG=C wget -N -x -nH --cut-dirs=${CUT_DIRS_NUM} ${uri}"
if [ $? -ne 0 ];then
echo "-E Command has ended unsuccessfully." 1>&2
POST_PROCESS;exit 1
fi
# Packages ファイルのダウンロード
for section in ${SECTIONS} ; do
uri="${METHOD}://${REMOTEHOST}/${ROOTDIR}/dists/${DIST}/${section}/binary-${ARCH}/Packages.gz"
CMD_V "LANG=C wget -N -x -nH --cut-dirs=${CUT_DIRS_NUM} ${uri}"
if [ $? -ne 0 ];then
echo "-E Command has ended unsuccessfully." 1>&2
POST_PROCESS;exit 1
fi
done
# Packages ファイルの展開
for file in `find . -name '*.gz' | sort` ; do
CMD_V "gzip -d ${file}"
if [ $? -ne 0 ];then
echo "-E Command has ended unsuccessfully." 1>&2
POST_PROCESS;exit 1
fi
done
cd "${OLDPWD}"
# Release ファイルのVersion フィールドの表示
echo
echo -n "-I Version field of Release file: "
(cd "${SCRIPT_TMP_DIR}"; cat Release) \
| sed -n 's#^Version: \(.*\)$#\1#p'
# パッケージリストの作成
cat <<- EOF >> "${PKG_LIST}"
# pkg_group pkg_name
EOF
echo
for priority in required important standard ; do
echo "-I Now processing: priority=${priority}"
for section in ${SECTIONS} ; do
(cd "${SCRIPT_TMP_DIR}"; cat ${section}/binary-${ARCH}/Packages) \
| grep-dctrl -n -s Package -F Priority -e "${priority}" \
| awk -v priority=${priority} '{printf("%s\t%s\n",priority,$0)}'
done | LANG=C sort | uniq >> "${PKG_LIST}"
echo >> "${PKG_LIST}"
done
# 作業終了後処理
POST_PROCESS;exit 0
| true
|
5972b11c172215b0211f20654dc0e28ec706f4a5
|
Shell
|
ndrean/pg-doc
|
/sinatra/setup.sh
|
UTF-8
| 231
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
set -ex
echo "Waiting PostgreSQL:$POSTGRES_HOST to start on 5432..."
until pg_isready -h $POSTGRES_HOST -p 5432; do
sleep 1
done
echo "PostgreSQL started"
exec "$@"
# exec bundle exec rackup --host 0.0.0.0
| true
|
5a6ff17190c945481e3958d169f5e5365efcacd5
|
Shell
|
webclinic017/Termux
|
/installs/de-apt-xfce4.sh
|
UTF-8
| 1,452
| 2.984375
| 3
|
[] |
no_license
|
#!/data/data/com.termux/files/usr/bin/sh
echo "Get the necessary components"
apt-get update -y
apt-get install -y xfce4
apt-get install -y xfce4-terminal
apt-get install -y tightvncserver
apt-get install -y xfe
apt-get clean
echo "Setup the necessary files"
mkdir ~/.vnc
wget https://raw.githubusercontent.com/EXALAB/AnLinux-Resources/master/Scripts/DesktopEnvironment/Apt/Xfce4/xstartup -P ~/.vnc/
wget https://raw.githubusercontent.com/EXALAB/AnLinux-Resources/master/Scripts/DesktopEnvironment/Apt/Xfce4/vncserver-start -P /usr/local/bin/
wget https://raw.githubusercontent.com/EXALAB/AnLinux-Resources/master/Scripts/DesktopEnvironment/Apt/Xfce4/vncserver-stop -P /usr/local/bin/
chmod +x ~/.vnc/xstartup
chmod +x /usr/local/bin/vncserver-start
chmod +x /usr/local/bin/vncserver-stop
echo " "
echo "You can now start vncserver by running vncserver-start"
echo " "
echo "It will ask you to enter a password when first time starting it."
echo " "
echo "The VNC Server will be started at 127.0.0.1:5901"
echo " "
echo "You can connect to this address with a VNC Viewer you prefer"
echo " "
echo "Connect to this address will open a window with Xfce4 Desktop Environment"
echo " "
echo " "
echo " "
echo "Running vncserver-start"
echo " "
echo " "
echo " "
echo "To Kill VNC Server just run vncserver-stop"
echo " "
echo " "
echo " "
echo "export DISPLAY=":1"" >> /etc/profile
source /etc/profile
vncserver-start
| true
|
f390568f3f69257083c13390cf7a9e031a2d50d6
|
Shell
|
kdave/xfstests
|
/tests/xfs/063
|
UTF-8
| 748
| 2.8125
| 3
|
[] |
no_license
|
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
#
# FS QA Test No. 063
#
# xfsdump/xfsrestore with EAs
#
. ./common/preamble
_begin_fstest dump attr auto quick
# Override the default cleanup function.
_cleanup()
{
_cleanup_dump
cd /
rm -f $tmp.*
}
# Import common functions.
. ./common/filter
. ./common/dump
. ./common/attr
# real QA test starts here
_supported_fs xfs
_require_attrs trusted user
_require_scratch
_scratch_mkfs_xfs >>$seqres.full || _fail "mkfs failed"
_scratch_mount
# create files with EAs
_create_dumpdir_fill_ea
_do_dump_file
_do_restore_file
# need to compare EAs
# so need to get them back
_diff_compare_eas
# success, all done
status=0
exit
| true
|
d2b361597423bc8c3360cfd1f79fe3d50d4a01e6
|
Shell
|
purdue-tlt/latex2sympy
|
/scripts/test.sh
|
UTF-8
| 554
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Get relative path of the root directory of the project
rdir=`git rev-parse --git-dir`
rel_path="$(dirname "$rdir")"
# Change to that path and run the file
cd $rel_path
# Activate virtual environment
echo "activating venv..."
if test -f .env/bin/activate
then . .env/bin/activate && echo "venv activate (bin)"
elif test -f .env/Scripts/activate
then . .env/Scripts/activate && echo "venv activated (Scripts)"
else exit 1
fi
echo ''
# Run unit tests
echo "starting tests..."
if pytest tests
then echo "tests finished"
else exit 1
fi
exit 0
| true
|
0c9c223b1b289fa899cdc2cc0f41a58ee555aaab
|
Shell
|
pine/dotfiles-1
|
/install-for-ubuntu.sh
|
UTF-8
| 1,367
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! `type fish >/dev/null 2>&1` ]; then
# install fish shell
sudo apt-add-repository -y ppa:fish-shell/release-2
sudo apt-get update
sudo apt-get -y install fish
chsh -s /usr/bin/fish
echo "installed fish shell"
fi
if [ ! -e $HOME/.tmux/tmux-powerline ]; then
[ ! -e $HOME/.tmux ] && mkdir $HOME/.tmux
# install tmux-powerline
git clone https://github.com/erikw/tmux-powerline.git $HOME/.tmux
echo "installed tmux-powerline"
fi
if [ ! -e $HOME/.rbenv ]; then
# install rbenv
sudo apt-get -y install build-essential bison libreadline6-dev curl git-core zlib1g-dev libssl-dev libyaml-dev libsqlite3-dev sqlite3 libxml2-dev libxslt1-dev autoconf libncurses5-dev
git clone git://github.com/sstephenson/rbenv.git ~/.rbenv
git clone https://github.com/sstephenson/ruby-build.git ~/.rbenv/plugins/ruby-build
echo "installed rbenv"
fi
if [ ! -e $HOME/.crenv ]; then
# install crenv
curl -L https://raw.github.com/pine613/crenv/master/install.sh | bash
git clone https://github.com/pine613/crystal-build.git ~/.crenv/plugins/crystal-build
echo "installed crenv"
fi
# install neobundle
if [ ! -e ~/.vim/bundle ]; then
mkdir -p ~/.vim/bundle
git clone git://github.com/Shougo/neobundle.vim ~/.vim/bundle/neobundle.vim
echo "you should run following command to setup plugins -> vim -c ':NeoBundleInstall'"
fi
| true
|
f615b252c114f15efc4664a3ebfb36f7b004f265
|
Shell
|
Sumner1185/ecsd-scripting-workshop
|
/AWK/task_1.sh
|
UTF-8
| 214
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
declare -a numbers
Echo "Enter numbers to be split..."
read -a numbers
{
for (number in numbers) {
if (number % 2 == 0) {
number >> even.txt
} else {
number >> odd.txt
}
}
cat even.txt
| true
|
467f27c3aa4c73e53184dff58a41f065d05fb52e
|
Shell
|
GildasLepennetier/black_ip_matters
|
/script.fail2ban.report.sh
|
UTF-8
| 739
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
# check the website
DATABASE="blacklist.fail2ban-report.all.txt"
WEBSITE="https://lists.blocklist.de/lists/all.txt"
NAME_DISPLAY="[fail2ban-report]"
DATE=$(date +"%Y-%d-%m %T" )
COUNT1=$(cat ${DATABASE} | wc -l)
wget -q $WEBSITE -O blacklist.fail2ban-report.all.LATEST.txt 2> /dev/null
cat blacklist.fail2ban-report.all.LATEST.txt | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" > ${DATABASE}.new
cat ${DATABASE} ${DATABASE}.new | sort | uniq > ${DATABASE}.tmp
mv ${DATABASE}.tmp ${DATABASE}
COUNT2=$(cat ${DATABASE} | wc -l)
#rm blacklist.fail2ban-report.all.LATEST.txt
rm ${DATABASE}.new
EXEMPLE=$(shuf -n 1 ${DATABASE})
echo -e "$NAME_DISPLAY\t+$(($COUNT2-$COUNT1)) IP\t[total: $COUNT2]\t$DATE\t\tex:$EXEMPLE"
| true
|
ad8ade38136f7e742503e11c4ae564a181fb2cab
|
Shell
|
intfrr/Dotfiles-3
|
/scripts/markdown_link_clipboard.sh
|
UTF-8
| 230
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
url=$(~/.bin/safepaste | ~/.bin/urls -b)
if [[ -z "$url" ]]; then
echo "No URL found on the clipboard" >&2
exit 1
fi
text="$1"
if [[ -z "$text" ]]; then
text=$(cat)
fi
echo -n "[$text]($url)"
| true
|
c6952eb9b68fcd3852215e81f70fb1bcaaf639ac
|
Shell
|
make7love/sunlight-env-install
|
/rpm/mysql/install_mariadb.sh
|
UTF-8
| 4,102
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$base_path" ];then
base_path=$(cd `dirname $0`;pwd)
opt_path="$base_path"
else
opt_path="$base_path/rpm/mysql"
fi
function kill_mysql()
{
check_mysql_pid=$(ps -ef|grep mysqld | grep -v grep | wc -l)
if [ $check_mysql_pid -gt 0 ];then
systemctl stop mysql.service
mysqladmin shutdown
killall mysqld
sleep 3
pkill -9 mysqld
sleep 3
fi
}
function clean_sys_for_mysql()
{
local clean_target=("libmysqlclient18", "mariadb")
}
if [ -z "$base_server_role" ];then
echo "Sooory! Cann't detect this server role...,Install terminate!!!"
exit 1
fi
if [ $(rpm -qa | grep "Maria" | wc -l ) -gt 0 ];then
echo "MariaDB Has Been Installed ~!"
echo "Skipping Install MariaDB......"
elif [[ $base_server_role -ne 1 && $base_server_role -ne 2 && $base_server_role -ne 5 ]];then
send_info "This server need not to install mysql server, skipped..."
else
echo "Checking Mysql Pid......"
if [ $(ps -ef | grep mysqld | grep -v grep | wc -l) -gt 0 ];then
kill_mysql
fi
if [ $(rpm -qa|grep mariadb | wc -l) -gt 0 ];then
rpm -e `rpm -qa|grep mariadb`
fi
if [ $(rpm -qa|grep libmysqlclient18 | wc -l) -gt 0 ];then
rpm -e `rpm -qa|grep libmysqlclient18`
fi
if [ ! -d /var/log/mysql ];then
mkdir /var/log/mysql
chmod 755 /var/log/mysql
fi
echo "Begin To Install MariaDB......"
echo "Copy mysql lib files......"
if [ ! -f /usr/lib64/libssl.so.0.9.8 ];then
cp -f "$opt_path/lib/libssl.so.0.9.8" /usr/lib64/
fi
if [ ! -f /usr/lib64/libcrypto.so.0.9.8 ];then
cp -f "$opt_path/lib/libcrypto.so.0.9.8" /usr/lib64/
fi
if [ ! -f /usr/lib64/libmysqlclient.so.16 ]; then
cp -f "$opt_path/lib/libmysqlclient.so.16.0.0" /usr/lib64/
cp -f "$opt_path/lib/libmysqlclient_r.so.16.0.0" /usr/lib64/
ln -s /usr/lib64/libmysqlclient.so.16.0.0 /usr/lib64/libmysqlclient.so.16
ln -s /usr/lib64/libmysqlclient_r.so.16.0.0 /usr/lib64/libmysqlclient_r.so.16
fi
if [ ! -d /usr/lib64/mysql/plugin ];then
mkdir -p /usr/lib64/mysql/plugin
fi
if [ ! -f /usr/lib64/mysql/plugin/lib_mysqludf_sys.so ];then
cp -f "$opt_path/lib/lib_mysqludf_sys.so" /usr/lib64/mysql/plugin/lib_mysqludf_sys.so
fi
rpm -ivh "$opt_path/maria/galera-25.3.19-1.sles12.sle12.x86_64.rpm"
rpm -ivh "$opt_path/maria/MariaDB-10.1.22-sles12-x86_64-common.rpm"
rpm -ivh "$opt_path/maria/MariaDB-10.1.22-sles12-x86_64-shared.rpm"
rpm -ivh "$opt_path/maria/MariaDB-10.1.22-sles12-x86_64-devel.rpm"
rpm -ivh "$opt_path/maria/MariaDB-10.1.22-sles12-x86_64-client.rpm"
rpm -ivh "$opt_path/maria/MariaDB-10.1.22-sles12-x86_64-cracklib-password-check.rpm"
rpm -ivh "$opt_path/maria/MariaDB-10.1.22-sles12-x86_64-server.rpm"
if [ "$base_server_role" == "2" ];then
this_node_ip=$app_private_ip
this_cluster_ip=$app_cluster_private_ip
fi
if [ "$base_server_role" == "5" ];then
this_node_ip=$db_private_ip
this_cluster_ip=$db_cluster_private_ip
fi
if [ "$base_server_role" == "1" ];then
cp -rf "$opt_path/conf/mysql-server-manage.cnf" /etc/my.cnf.d/server.cnf
systemctl enable mysql.service
systemctl start mysql
sleep 2
mysql < "$opt_path/conf/install_mysql_udf_functions.sql"
else
cp -rf "$opt_path/conf/mysql-server.cnf" /etc/my.cnf.d/server.cnf
sed -i "/^wsrep_node_address=/c\wsrep_node_address=\"$this_node_ip\"" /etc/my.cnf.d/server.cnf
sed -i "/^wsrep_cluster_address=/c\wsrep_cluster_address=\"gcomm://$this_cluster_ip\"" /etc/my.cnf.d/server.cnf
echo "Checking Mysql Pid......"
if [ $(ps -ef | grep mysqld | grep -v grep | wc -l) -gt 1 ];then
kill_mysql
fi
echo "Is this the first node to start?[Y/n]"
read x
if [ "$x" == "Y" -o "$x" == 'y' ]; then
galera_new_cluster
else
systemctl start mysql
fi
sleep 3
mysql < "$opt_path/conf/all_host_privileges.sql"
mysql < "$opt_path/conf/install_mysql_udf_functions.sql"
fi
if [ $(grep mysql /etc/passwd | wc -l) -gt 1 ];then
if [ -d /var/log/mysql ];then
chown -R mysql:mysql /var/log/mysql
chmod -R 700 /var/log/mysql
fi
fi
send_success "****** Mysql Server Has Been Installed ******"
send_info "------------------------------------------"
fi
| true
|
d199be0b2d0390ff28354a998fa99b76e1e45294
|
Shell
|
AndyA/RemarcTools
|
/media/tools/prune.sh
|
UTF-8
| 302
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
incoming="incoming"
output="output"
outabs="$PWD/$output"
cd "$incoming"
find . -type f -not -name '.*' | while read src; do
dir="$outabs/$( dirname "$src" )"
base="$( basename "$src" )"
pat="${base%.*}.*"
find "$dir" -maxdepth 1 -name "$pat"
done
# vim:ts=2:sw=2:sts=2:et:ft=sh
| true
|
48721acd2ca56bb868f57d8fc43783ba02d3bf25
|
Shell
|
JPGOMEZP/Linux_Core_Kernel
|
/otc_lck_gdc_mx_test_suite-lck_suite/LCK-Test/testcases/scripts/rtc/rtc_read_time_date.sh
|
UTF-8
| 3,565
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
###############################################################################
#
# Copyright (C) 2016 Intel - http://www.intel.com/
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation version 2.
#
# This program is distributed "as is" WITHOUT ANY WARRANTY of any
# kind, whether express or implied; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
###############################################################################
############################ CONTRIBUTORS #####################################
# Author: Juan Carlos Alonso <juan.carlos.alonso@intel.com>
#
# May, 2016. Juan Carlos Alonso <juan.carlos.alonso@intel.com>
# - Initial draft
############################ DESCRIPTION ######################################
# This script reads time and date from '/proc/driver/rtc',
# '/sys/class/rtc/rtc0/', 'date' command and 'hwclock' command.
############################ FUNCTIONS ########################################
############################ DO THE WORK ######################################
source "common.sh"
# READ TIME WITH `date`
test_print_trc "=== Read time with 'date' command ==="
test_print_trc "date +%H:%M:%S"
do_cmd "TIME=$(date +%H:%M:%S)"
if [ $? -eq 0 ]; then
test_print_trc "Time is $TIME"
else
die "Cannot read time with 'date' command"
fi
sleep 2
# READ DATE WITH `date`
test_print_trc "=== Read date with 'date' command ==="
test_print_trc "date +%Y-%m-%d"
do_cmd "DATE=$(date +%Y-%m-%d)"
if [ $? -eq 0 ]; then
test_print_trc "Date is $DATE"
else
die "Cannot read date with 'date' command"
fi
sleep 2
# READ TIME WITH `hwclock`
test_print_trc "=== Read time with 'hwclock' command ==="
test_print_trc "hwclock -r | awk '{print \$5}'"
do_cmd "TIME=$(hwclock -r | awk '{print $5}')"
if [ $? -eq 0 ]; then
test_print_trc "Time is $TIME"
else
die "Cannot read time with 'hwclock' command"
fi
sleep 2
# READ DATE WITH `hwclock`
test_print_trc "=== Read date with 'hwclock' command ==="
test_print_trc "hwclock -r | awk '{print \$2 \$3 \$4}'"
do_cmd "DATE=$(hwclock -r | awk '{print $2 $3 $4}')"
if [ $? -eq 0 ]; then
test_print_trc "Date is $DATE"
else
die "Cannot read date with 'hwclock' command"
fi
sleep 2
# READ TIME IN '/proc/driver/rtc'
test_print_trc "=== Read time in '$PROC_RTC' file ==="
test_print_trc "cat $PROC_RTC | grep rtc_time"
do_cmd "TIME=$(cat $PROC_RTC | grep "rtc_time" | awk '{print $3}')"
if [ $? -eq 0 ]; then
test_print_trc "RTC TIME is $TIME"
else
die "Cannot read RTC time in '$PROC_RTC' file"
fi
sleep 2
# READ DATE IN '/proc/driver/rtc'
test_print_trc "=== Read date in '$PROC_RTC' file ==="
test_print_trc "cat $PROC_RTC | grep rtc_date"
do_cmd "DATE=$(cat $PROC_RTC | grep "rtc_date" | awk '{print $3}')"
if [ $? -eq 0 ]; then
test_print_trc "RTC DATE is $DATE"
else
die "Cannot read RTC date in '$PROC_RTC' file"
fi
sleep 2
# READ TIME IN '/sys/class/rtc/rtc0'
test_print_trc "=== Read time in '$SYS_RTC' ==="
test_print_trc "cat $SYS_RTC/time"
do_cmd "TIME=$(cat $SYS_RTC/time)"
if [ $? -eq 0 ]; then
test_print_trc "RTC TIME is $TIME"
else
die "Cannot read RTC time in '$SYS_RTC'"
fi
sleep 2
# READ DATE IN '/sys/class/rtc/rtc0'
test_print_trc "=== Read date in '$SYS_RTC' file ==="
test_print_trc "cat $SYS_RTC/date"
do_cmd "DATE=$(cat $SYS_RTC/date)"
if [ $? -eq 0 ]; then
test_print_trc "RTC DATE is $DATE"
else
die "Cannot read RTC date in '$SYS_RTC' file"
fi
| true
|
5dc06fe50e021c761726975d2c7c554230540fb6
|
Shell
|
ivan-claire/RPL_Extension
|
/runSimTwoOF.sh
|
UTF-8
| 3,444
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Bash version ${BASH_VERSION}..."
printf "Link_Quality\Objective_Function\tOF0\tMRHOF\n" >> output.csv
# Run simulation for OF0
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.5, \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.0, \'rssi\' : -30\}/g" ./OrignalSimulator/SimEngine/Connectivity.py
for percent in {0..9..1}
do
printf "$((percent+1))0\t\t\n" >> output.csv
if [ $percent -lt 9 ]
then
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.$percent, \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.$((percent+1)), \'rssi\' : -30\}/g" ./OrignalSimulator/SimEngine/Connectivity.py
else
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.$percent, \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 1.00, \'rssi\' : -30\}/g" ./OrignalSimulator/SimEngine/Connectivity.py
fi
echo "Objective Function: OF0, percent: $((percent+1))"
cd ./OrignalSimulator/bin/
python runSim.py
cd ..
cd ..
file="./OrignalSimulator/bin/pdr_value/mean_pdr.txt"
pdr=$(cat "$file")
echo "pdr value of OF0, link quality $((percent+1))0 %: $pdr"
awk -v value=$pdr -v row=$((percent+2)) -v col=2 'BEGIN{FS=OFS="\t"} NR==row {$col=value}1' output.csv > new.csv
mv new.csv output.csv
done
if [ $percent -lt 9 ]
then
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.$((percent+1)), \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.5, \'rssi\' : -30\}/g" ./OrignalSimulator/SimEngine/Connectivity.py
else
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 1.00, \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.5, \'rssi\' : -30\}/g" ./OrignalSimulator/SimEngine/Connectivity.py
fi
# Run simulation for MRHOF
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.5, \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.0, \'rssi\' : -30\}/g" ./MrHofSimulators/SimEngine/Connectivity.py
for percent1 in {0..9..1}
do
if [ $percent1 -lt 9 ]
then
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.$percent1, \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.$((percent1+1)), \'rssi\' : -30\}/g" ./MrHofSimulators/SimEngine/Connectivity.py
else
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.$percent1, \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 1.00, \'rssi\' : -30\}/g" ./MrHofSimulators/SimEngine/Connectivity.py
fi
echo "Objective Function: MRHOF, percent: $((percent1+1))"
cd ./MrHofSimulators/bin/
python runSim.py
cd ..
cd ..
file1="./MrHofSimulators/bin/pdr_value/mean_pdr.txt"
pdr1=$(cat "$file1")
echo "pdr value of MRHOF, link quality $((percent1+1))0 %: $pdr1"
awk -v value=$pdr1 -v row=$((percent1+2)) -v col=3 'BEGIN{FS=OFS="\t"} NR==row {$col=value}1' output.csv > new.csv
mv new.csv output.csv
done
if [ $percent1 -lt 9 ]
then
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.$((percent1+1)), \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.5, \'rssi\' : -30\}/g" ./MrHofSimulators/SimEngine/Connectivity.py
else
perl -p -i -e "s/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 1.00, \'rssi\' : -30\}/CONNECTIVITY_MATRIX_LINK_QUALITY = \{\'pdr\' : 0.5, \'rssi\' : -30\}/g" ./MrHofSimulators/SimEngine/Connectivity.py
fi
echo ""
echo "Simulation Results"
cat output.csv
| true
|
70abd58c2244ce752e25c7ec525cdbf452df37a6
|
Shell
|
munair/ddex-price-reservation-alert
|
/monitor.bash
|
UTF-8
| 694
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# backup the cron tables
crontab -l > /tmp/crontab.original
# use them as the basis for new cron tables
cp /tmp/crontab.original /tmp/crontab.informer
# make a temporary BASH script to check for polling activity
cat << EOF > /tmp/informer.bash
activepoller=\$(/bin/ps auwx | grep poller.py | wc -l)
if [[ \$activepoller -lt 2 ]]
then
/usr/bin/python3 /home/ubuntu/ddex-price-reservation-alert/informer.py
crontab /tmp/crontab.original
fi
EOF
# update cron to monitor the poller process with informer.bash
echo "* * * * * /bin/bash /tmp/informer.bash 1>/tmp/informer.out 2>/tmp/informer.err" >> /tmp/crontab.informer
crontab /tmp/crontab.informer && rm /tmp/crontab.informer
| true
|
23630af151795092094a577cb84bd62c28af31fd
|
Shell
|
dsivkov/bs_bench_idp
|
/set_python_envs.sh
|
UTF-8
| 2,234
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -x
# Copyright (c) 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#export ACCEPT_INTEL_PYTHON_EULA=yes
DIR=$HOME/miniconda3
CONDA=$DIR/bin/conda
mkdir -p $DIR
cd $DIR
[ -x $CONDA ] || (
[ -f Miniconda3-latest-Linux-x86_64.sh ] || curl -O https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
bash ./Miniconda3-latest-Linux-x86_64.sh -b -p $DIR -f
[ -x $CONDA ] || exit 1
)
[ -d $DIR/envs/intel3 ] || $CONDA create -y -n intel3 -c intel python=3 numpy numexpr scipy tbb dask numba cython
[ -d $DIR/envs/pip3 ] || (
$CONDA create -y -n pip3 -c intel python=3 pip llvmlite cython
$DIR/envs/pip3/bin/pip install numpy scipy scikit-learn toolz numexpr
$DIR/envs/pip3/bin/pip install dask numba
)
| true
|
380370eedc02a1a3abc8e628884f38e201543b12
|
Shell
|
fionnan/BlockchainInfoParser
|
/parse.sh
|
UTF-8
| 194
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
file=$1
grep \<td\> $file | awk -F'>' '{if ((NR % 5) == 1 ) print $3; if ((NR % 5) == 3) print $2 ; if ((NR % 5) == 4 ) print $4 }' | awk -F '<' '{print $1}' > ${file}_parsed
| true
|
3cc2d646b1f89811ea4a0f56270a24dcdcfa89b1
|
Shell
|
ymarion/geometry
|
/Tests/timePerformance.sh
|
UTF-8
| 1,533
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Testing the LOADing of different # of figures."
printf "Note: Generating time not counted.\n\n\n"
printf "10 figures"
./generator 10 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "50 figures"
./generator 50 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "100 figures"
./generator 100 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "500 figures"
./generator 500 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "1000 figures"
./generator 1000 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "5000 figures"
./generator 5000 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "10000 figures"
./generator 10000 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "50000 figures"
./generator 50000 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "100000 figures"
./generator 100000 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "500000 figures"
./generator 500000 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
printf "1000000 figures"
./generator 1000000 > temp.txt
{ time printf "LOAD temp.txt\nEXIT\n" | ./geometry > /dev/null; } 2>&1
echo
rm temp.txt
| true
|
d4246a08629d29b4ea7415f1e6d3d37cbd081561
|
Shell
|
plomlompom/config
|
/bin/setup_starttls.sh
|
UTF-8
| 796
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
set -x
set -e
key=$1
cert=$2
if [ ! "$(id -u)" -eq "0" ]; then
echo "Must be run as root."
exit 1
fi
key_target=/etc/postfix/key.pem
if [ ! -n "$key" ]; then
if [ ! -f "${key_target}" ]; then
(umask 077; openssl genrsa -out "${key_target}" 2048)
fi
else
cp "$key" "${key_target}"
fi
fqdn=$(postconf -h myhostname)
cert_target=/etc/postfix/cert.pem
if [ ! -n "$cert" ]; then
if [ ! -f "${cert_target}" ]; then
openssl req -new -key "${key_target}" -x509 -subj "/CN=${fqdn}" -days 3650 -out "${cert_target}"
fi
else
cp "$cert" "${cert_target}"
fi
cat >> /etc/postfix/main.cf << EOF
# Enable server-side STARTTLS.
smtpd_tls_cert_file = /etc/postfix/cert.pem
smtpd_tls_key_file = /etc/postfix/key.pem
smtpd_tls_security_level = may
EOF
service postfix restart
| true
|
367dce79b71c3041f7fbd75d3b50ba7e2cd2519d
|
Shell
|
mmathesius/stream-module-testing
|
/create-new-branches.sh
|
UTF-8
| 1,187
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
# Usage: create-new-branches.sh namespace/component#ref [ ... ]
dry_run=no
scm_base="ssh://git@gitlab.com/redhat/centos-stream"
for arg in $@
do
echo "Processing $arg"
base="${arg%%\#*}"
if [ "$base" == "$arg" ]
then
# no suffix
ref="master"
else
ref="${arg#*\#}"
fi
comp="${base##*/}"
if [ "$comp" == "$base" ]
then
ns="unknown"
else
ns="${base%/*}"
fi
# trim any trailing :stream from module component name
comp="${comp%%\:*}"
echo ns="$ns"
echo comp="$comp"
echo ref="$ref"
tmpdir=$(mktemp -d)
echo tmpdir="$tmpdir"
scm="$scm_base/temp/$comp"
echo scm="$scm"
git clone "$scm" "$tmpdir"
if [ $? -ne 0 ]
then
echo "clone failed, skipping"
continue
fi
pushd "$tmpdir" >/dev/null
git checkout --orphan "$ref"
git rm -rf --ignore-unmatch .
git commit --allow-empty -m "Initialize $ref branch"
if [ "$dry_run" == "no" ]
then
git push --set-upstream origin $ref
else
git push --dry-run --set-upstream origin $ref
fi
popd >/dev/null
rm -rf $tmpdir
done
| true
|
1d6e749a98ac2d3d95a2ef083c8aeda19e69ed51
|
Shell
|
havenden/web-backup
|
/backup.sh
|
UTF-8
| 2,527
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
#备份网站相关配置
web_path='/data/htdocs/' #网站文件路径
conf_path='/usr/local/nginx/conf/vhost/' #nginx配置文件路径
username='mysql-user' #数据库用户名
password='mysql-password' #数据库密码
target_path='/data/wwwbak/backup_files/' #要备份到哪个目录,此目录会自动创建
#服务器互相备份
#上传远程服务器FTP配置
ftp_ip=('120.1.21.62' '116.2.134.201' '120.15.18.116')
ftp_port=('21' '21' '21')
ftp_username=('backup' 'backup_user' 'backup_xxx')
ftp_password=('bakpwd' 'bakpwd' 'bakpwd')
#备份目录
back_path=$target_path`date +%Y-%m-%d/`
[ ! -d $back_path ]&&mkdir -p $back_path
#备份配置文件
function backup_conf(){
\cp -R ${conf_path}* $back_path
}
#备份数据库
function backup_database()
{
webs=$(ls -l $web_path |awk '/^d/ {print $NF}')
for i in $webs
do
dbname=$(cat $web_path$i'/data/common.inc.php' 2>>error.log|grep 'cfg_dbname'|awk -F "'" '{print $2}')
mysqldump -u$username -p$password -B $dbname -lF>$back_path$dbname".sql" 2>>error.log
[ $? -ne 0 ]&&rm -rf $back_path$dbname".sql" 2>>error.log
done
}
#备份网站文件
function backup_website(){
#Delete backup files for more than 7 days
find $target_path -type d -mtime +5 | xargs rm -rf
#Delete empty directory
target_directory=$(ls $target_path)
for j in $target_directory
do
file_count=$(ls $target_path$j|wc -l)
[ $file_count -eq 0 ]&&rm -rf $target_path$j
done
webs=$(ls -l $web_path |awk '/^d/ {print $NF}')
for i in $webs
do
rm -rf ${web_path-/tmp/}${i}/data/tplcache 2>>error.log
mkdir ${web_path}${i}/data/tplcache 2>>error.log
chmod -R 777 ${web_path}${i}/data/tplcache 2>>error.log
find ${web_path-/tmp/}$i -size +15M -exec rm -rf {} \;
tar -zcf ${back_path}$i".tar.gz" -C ${web_path} $i 2>>error.log
done
}
function upftp(){
local_ip=`cat /etc/sysconfig/network-scripts/ifcfg-eth1|grep IPADDR|awk -F "=" '{print $2}'`
ftp -v -n $1 $2 <<EOF
user $3 $4
binary
mkdir $local_ip
cd $local_ip
lcd $back_path
prompt
mput $5
close
bye
EOF
}
function upload(){
for((i=0;i<`echo ${#ftp_ip[@]}`;i++)){
upftp ${ftp_ip[$i]} ${ftp_port[$i]} ${ftp_username[$i]} ${ftp_password[$i]} ${1-"*"}
}
}
if [ "$1" == 'ftp' ];then
touch ${back_path}test.file
upload "test.file"
else
backup_conf
backup_database
backup_website
upload
fi
| true
|
e11c5e99d5ac70dbb3c0172ae7f71252d7374440
|
Shell
|
combs/reposearch
|
/repoindex
|
UTF-8
| 333
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
DIR=$1
if [ "$DIR" == "" ]
then
DIR=`git rev-parse --show-toplevel`
fi
REPONAME=`basename $DIR`
INDEX=$DIR/../.index.$REPONAME.gz
echo Indexing $REPONAME to $INDEX
cd $DIR
grep -IR '$' * --exclude-dir=.git --exclude=app.\* --exclude=\*.min.\* --exclude-dir=\*/vendor --exclude-dir=\*/node_modules | gzip -9 > $INDEX
| true
|
014c4e074065d345c0d74172f05c3668ef7b6653
|
Shell
|
nshttpd/mikrotik-exporter
|
/scripts/build-armhf.sh
|
UTF-8
| 290
| 2.59375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
set -x
DIR=`pwd`
NAME=`basename ${DIR}`
SHA=`git rev-parse --short HEAD`
VERSION=${VERSION:-$SHA}
GOOS=linux GOARCH=arm go build .
docker build -t nshttpd/${NAME}:${VERSION}-armhf -f Dockerfile.armhf .
docker push nshttpd/${NAME}:${VERSION}-armhf
rm mikrotik-exporter
| true
|
7486a630b6b188825227645a032844a27f738f4e
|
Shell
|
mapbox/mason
|
/scripts/icu/58.1/script.sh
|
UTF-8
| 4,036
| 3.78125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Build ICU common package (libicuuc.a) with data file separate and with support for legacy conversion and break iteration turned off in order to minimize size
MASON_NAME=icu
MASON_VERSION=58.1
MASON_LIB_FILE=lib/libicuuc.a
#MASON_PKGCONFIG_FILE=lib/pkgconfig/icu-uc.pc
. ${MASON_DIR}/mason.sh
MASON_BUILD_DEBUG=0 # Enable to build library with debug symbols
MASON_CROSS_BUILD=0
function mason_load_source {
mason_download \
https://download.icu-project.org/files/icu4c/58.1/icu4c-58_1-src.tgz \
ad6995ba349ed79dde0f25d125a9b0bb56979420
mason_extract_tar_gz
export MASON_BUILD_PATH=${MASON_ROOT}/.build/${MASON_NAME}
}
function mason_prepare_compile {
if [[ ${MASON_PLATFORM} == 'ios' || ${MASON_PLATFORM} == 'android' || ${MASON_PLATFORM_VERSION} != `uname -m` ]]; then
mason_substep "Cross-compiling ICU. Starting with host build of ICU to generate tools."
pushd ${MASON_ROOT}/..
env -i HOME="$HOME" PATH="$PATH" USER="$USER" ${MASON_DIR}/mason build icu ${MASON_VERSION}
popd
# TODO: Copies a bunch of files to a kind of orphaned place, do we need to do something to clean up after the build?
# Copying the whole build directory is the easiest way to do a cross build, but we could limit this to a small subset of files (icucross.mk, the tools directory, probably a few others...)
# Also instead of using the regular build steps, we could use a dedicated built target that just builds the tools
mason_substep "Moving host ICU build directory to ${MASON_ROOT}/.build/icu-host"
rm -rf ${MASON_ROOT}/.build/icu-host
cp -R ${MASON_BUILD_PATH}/source ${MASON_ROOT}/.build/icu-host
fi
}
function mason_compile {
if [[ ${MASON_PLATFORM} == 'ios' || ${MASON_PLATFORM} == 'android' || ${MASON_PLATFORM_VERSION} != `uname -m` ]]; then
MASON_CROSS_BUILD=1
fi
mason_compile_base
}
function mason_compile_base {
pushd ${MASON_BUILD_PATH}/source
# Using uint_least16_t instead of char16_t because Android Clang doesn't recognize char16_t
# I'm being shady and telling users of the library to use char16_t, so there's an implicit raw cast
ICU_CORE_CPP_FLAGS="-DU_CHARSET_IS_UTF8=1 -DU_CHAR_TYPE=uint_least16_t"
ICU_MODULE_CPP_FLAGS="${ICU_CORE_CPP_FLAGS} -DUCONFIG_NO_LEGACY_CONVERSION=1 -DUCONFIG_NO_BREAK_ITERATION=1"
CPPFLAGS="${CPPFLAGS} ${ICU_CORE_CPP_FLAGS} ${ICU_MODULE_CPP_FLAGS} -fvisibility=hidden $(icu_debug_cpp)"
#CXXFLAGS="--std=c++0x"
echo "Configuring with ${MASON_HOST_ARG}"
./configure ${MASON_HOST_ARG} --prefix=${MASON_PREFIX} \
$(icu_debug_configure) \
$(cross_build_configure) \
--with-data-packaging=archive \
--enable-renaming \
--enable-strict \
--enable-static \
--enable-draft \
--disable-rpath \
--disable-shared \
--disable-tests \
--disable-extras \
--disable-tracing \
--disable-layout \
--disable-icuio \
--disable-samples \
--disable-dyload || cat config.log
# Must do make clean after configure to clear out object files left over from previous build on different architecture
make clean
make -j${MASON_CONCURRENCY}
make install
popd
}
function icu_debug_cpp {
if [ ${MASON_BUILD_DEBUG} ]; then
echo "-glldb"
fi
}
function icu_debug_configure {
if [ ${MASON_BUILD_DEBUG} == 1 ]; then
echo "--enable-debug --disable-release"
else
echo "--enable-release --disable-debug"
fi
}
function cross_build_configure {
# Building tools is disabled in cross-build mode. Using the host-built version of the tools is the whole point of the --with-cross-build flag
if [ ${MASON_CROSS_BUILD} == 1 ]; then
echo "--with-cross-build=${MASON_ROOT}/.build/icu-host --disable-tools"
else
echo "--enable-tools"
fi
}
function mason_cflags {
echo "-I${MASON_PREFIX}/include -DUCHAR_TYPE=char16_t"
}
function mason_ldflags {
echo ""
}
mason_run "$@"
| true
|
839f614a235f3fcb2fc1708be2da0fd316b323fc
|
Shell
|
wavefrontHQ/wavefront-proxy
|
/macos_proxy_notarization/github_workflow_wrapper_for_notarization.sh
|
UTF-8
| 8,150
| 3.703125
| 4
|
[
"Apache-2.0",
"CC0-1.0",
"GPL-2.0-or-later",
"Apache-1.1",
"CDDL-1.1",
"bzip2-1.0.6",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-or-later",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"LGPL-3.0-only",
"EPL-2.0",
"CDDL-1.0",
"CC-PDDC",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-2.5",
"LicenseRef-scancode-generic-export-compliance",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-other-permissive",
"MIT",
"SunPro",
"EPL-1.0",
"Classpath-exception-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"GPL-2.0-only",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
usage () {
set +x
echo "command line parameters"
echo "1 | proxy version | required=True | default='' | example: '11.1.0'"
echo "3 | github API token | required=True | default='' | example: 'ghp_xxxxx'"
echo "3 | release type | required=False | default='proxy-test' | example: 'proxy-snapshot' / 'proxy-GA'"
echo "4 | github org | required=False | default='wavefrontHQ' | example: 'wavefrontHQ' / 'sbhakta-vmware' (for forked repos)"
echo "5 | debug | required=False | default='' | example: 'debug'"
echo "Example command:"
echo "$0 11.1.0 ghp_xxxx proxy-snapshot sbhakta-vmware debug"
echo "$0 11.1.0 ghp_xxxx proxy-snapshot debug # uses wavefrontHQ org"
}
trigger_workflow() {
# trigger workflow and make sure it is running
# trigger the Github workflow and sleep for 5-
curl -X POST -H "Accept: application/vnd.github.v3+json" -H "Authorization: token ${github_token}" "https://api.github.com/repos/${github_org}/${github_repo}/actions/workflows/${github_notarization_workflow_yml}/dispatches" -d '{"ref":"'$github_branch'","inputs":{"proxy_version":"'$proxy_version'","release_type":"'$release_type'"}'
sleep 5
}
check_jobs_completion() {
jobs_url=$1
# add some safeguard in place for infinite while loop
# if allowed_loops=100, sleep_bwtween_runs=15, total allowed time for loop to run= 15*100=1500sec(25min) (normal run takes 15min or so)
allowed_loops=100
current_loop=0
sleep_between_runs=15
total_allowed_loop_time=`expr $allowed_loops \* $sleep_between_runs`
# start checking status of our workflow run until it succeeds/fails/times out
while true;
do
# increment current loop count
((current_loop++))
# infinite loop safeguard
if [[ $current_loop -ge $allowed_loops ]]; then
echo "Total allowed time exceeded: $total_allowed_loop_time sec! Workflow taking too long to finish... Quitting!!"
exit 1
fi
echo "Checking status and conclusion of the running job...."
status=`curl -s -H "Accept: application/vnd.github.v3+json" -H "Authorization: token ${github_token}" $jobs_url | jq '.jobs[0].status' | tr -d '"'`;
conclusion=`curl -s -H "Accept: application/vnd.github.v3+json" -H "Authorization: token ${github_token}" $jobs_url | jq '.jobs[0].conclusion' | tr -d '"'`;
echo "### status=$status & conclusion=$conclusion"
if [[ ( "$status" == "completed" ) && ( "$conclusion" == "success" ) ]]; then
echo "Job completed successfully"
break
elif [[ ("$status" == "in_progress") || ("$status" == "queued") ]]; then
echo "Still in progress or queued. Sleep for $sleep_between_runs sec and try again..."
echo "loop time so far / total allowed loop time = `expr $current_loop \* $sleep_between_runs` / $total_allowed_loop_time"
sleep $sleep_between_runs
else # everything else
echo "Job did not complete successfully"
exit 1
fi
done
}
#######################################
############# MAIN ####################
#######################################
if [[ "$1" == "--help" || "$1" == "-h" ]]; then
usage
exit 0
fi
# command line args
proxy_version=$1
github_token=$2
release_type=$3
github_org=$4
debug=$5
# constants
github_repo='wavefront-proxy'
github_notarization_workflow_yml='mac_tarball_notarization.yml'
github_branch='master'
if [[ -z $proxy_version ]]; then
echo "proxy version is required as 1st cmd line argument. example: '11.1.0-proxy-snapshot'. Exiting!"
usage
exit 1
fi
if [[ -z $release_type ]]; then
release_type='proxy-test'
fi
if [[ -z $github_token ]]; then
echo "github token is required as 3rd cmd line argument. Exiting!"
usage
exit 1
fi
if [[ -z $github_org ]]; then
github_org='wavefrontHQ'
fi
if [[ ! -z $debug ]]; then
set -x
fi
# print all variables for reference
echo "proxy_version=$proxy_version"
echo "release_type=$release_type"
echo "github_org=$github_org"
echo "github_repo=$github_repo"
echo "github_branch=$github_branch"
echo "github_notarization_workflow_yml=$github_notarization_workflow_yml"
# get current date/time that github workflow API understands
# we'll us this in our REST API call to get latest runs later than current time.
format_date=`date +'%Y-%d-%m'`
format_current_time=`date +'%H-%M-%S'`
date_str=$format_date'T'$format_current_time
echo "date_str=$date_str"
# trigger the workflow
trigger_workflow
# get count of currently running jobs for our workflow, later than "date_str"
# retry 4 times, our triggered workflow may need some time to get started.
max_retries=4
sleep_between_retries=15
for retry in $(seq 1 $max_retries); do
current_running_jobs=`curl -s -H "Accept: application/vnd.github.v3+json" -H "Authorization: token ${github_token}" "https://api.github.com/repos/${github_org}/${github_repo}/actions/workflows/${github_notarization_workflow_yml}/runs?status=in_progress&created>=${date_str}" | jq '.total_count'`
echo "### total runs right now=$current_running_jobs"
if [[ $current_running_jobs == 0 ]]; then
echo "No currently running jobs found. sleep for $sleep_between_retries sec and retry! ${retry}/$max_retries"
sleep $sleep_between_retries
else # current runs are > 0
break
fi
done
# if no current running jobs, exit
if [[ $current_running_jobs == 0 ]]; then
echo "No currently running jobs found. retry=${retry}/$max_retries.. Exiting"
exit 1
fi
# we get the triggered run's jobs_url for checking status
# there may be multiple workflows running, we need t uniquely identify which is our workflow
# Steps to identify our workflow uniquely -
# 1. loop through all runs in progress
# 2. Get the "jobs_url" for each
# 3. Run a GET API call on "jobs_url", and look at the step names of the workflow.
# - sometimes the steps may take time to load, retry if there are no step names so far
# 4. the workflow is set in such a way that the steps have a unique name depending on the version passed
# 5. Identlfy the jobs_url with the unique step name, and store the jobs_url to see if the workflow is successful or not
## set variables
jobs_url=''
found_jobs_url=False
for i in $(seq 0 $((current_running_jobs-1))); do
jobs_url=`curl -s -H "Accept: application/vnd.github.v3+json" -H "Authorization: token ${github_token}" "https://api.github.com/repos/${github_org}/${github_repo}/actions/runs?status=in_progress&created>=$date_str" | jq '.workflow_runs['"$i"'].jobs_url' | tr -d '"'`;
echo "### jobs_url=$jobs_url"
if [[ jobs_url != '' ]]; then
for retry_step_name in $(seq 1 3); do
# assuming only 1 run inside a job, get the 2nd step name, which has the unique version associated with it.
step_name=`curl -s -H "Accept: application/vnd.github.v3+json" -H "Authorization: token ${github_token}" "$jobs_url" | jq '.jobs[0].steps[1].name'`
echo "### step_name=$step_name"
# if step_name is null, retry again
if [[ -z $step_name ]]; then
echo "Step_name is null, sleep and rety again!!!"
sleep 10
continue
# verify the step name has the version passed as cmd line to this script
elif [[ (! -z $step_name) && ($step_name =~ .*$proxy_version.*) ]]; then
echo "We've found our running job for proxy_version:$proxy_version. Final jobs_url below..."
found_jobs_url=True
break;
# this may not be the correct job_url we're looking for
else
echo "Reset jobs_url"
jobs_url=''
fi
done
fi
if [[ $found_jobs_url == True ]]; then
break
fi
done
# check if we found the correct jobs_url for our running job
if [[ $jobs_url == '' ]]; then
echo "no jobs_url found for proxy_version:$proxy_version.. quitting"
exit 1
fi
echo "Confirmed jobs_url=$jobs_url"
check_jobs_completion $jobs_url
set +x
| true
|
ff71ca6608f9825dfad3d8acb464a6e51e50b587
|
Shell
|
paieer/pubkey_channel
|
/pubkey_dns_client/commands_loop.sh
|
UTF-8
| 2,690
| 4.5625
| 5
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
commands="" # Commands to be executed
seconds=5 # Default execution every 5 seconds
total=0 # Total number of executions, default always runing
show_help()
{
echo -e "Options:\n"\
" -c '\"<cmd>\"'\tCommands to execute, content needs to\n"\
"\t\tbe enclosed in quotation marks: (\\\") or ('\").\n"\
" -s <num>\tSeconds between each execution.\n"\
" -t <num>\tVariable number of loops.\n"\
" --help\tDisplay this information."
}
parse_input_loop()
{
for ((i = 0; i < $#; i++))
do
arg=${args[$i]}
if [ "$arg" == "-s" ]; then
((i++))
seconds=${args[$i]} && continue
fi
if [ "$arg" == "-t" ]; then
((i++))
total=${args[$i]} && continue
fi
if [ "$arg" == "-c" ]; then
((i++))
arg=${args[$i]}
if ! echo $arg | grep -Eq "^\""; then
echo "Invalid commands. Try: $0 --help"
exit 1
fi
commands+=$arg
while ! echo $arg | grep -Eq "\"$" && ((i < $#))
do
((i++))
arg=${args[$i]}
commands+=" "$arg
done
continue
fi
if [ "$arg" == "--help" ] || [ "$arg" == "-h" ]; then
show_help
exit 0
fi
echo Invalid input \"$arg\". Try \"$0 --help\" && exit 0
done
}
parse_input()
{
if [ $# -lt 1 ]; then
echo "Missing args. Try: $0 --help" && exit 1
fi
for arg in $@
do
args[$arg_idx]=$arg
((arg_idx++))
done
parse_input_loop $@
# Remove quotation marks
intercept_len=$(( ${#commands} - 2 ))
commands=${commands: 1:$intercept_len}
echo -e "\n\tCommands: $commands"
# I'm too lazy to do more checks, the user is responsible for it
result=`echo $total | grep "[^0-9]"`
if [ ! -z $result ]; then
echo "Invalid total number '$total'."
exit 1
fi
if [ $total -gt 0 ]; then
echo -e "\tTotal number of executions: $total"
fi
result=`echo $seconds | grep "[^0-9]"`
if [[ ! -z $result ]] || [[ $seconds -lt 1 ]]; then
echo "Invalid seconds '$seconds'."
exit 1
fi
echo -e "\tSleep seconds: $seconds\n"
}
doing_work()
{
if [ $total -gt 0 ]
then
for ((i = 0; i < $total; i++))
do
$commands &
sleep $seconds
done
return
fi
while true
do
# echo run: $commands
$commands &
sleep $seconds
done
}
main()
{
parse_input $@
doing_work
}
main $@
| true
|
ec3025819d5421325e7f0d20387ec311c8d2ac74
|
Shell
|
huww98/initialize-server
|
/targets/nvidia-docker
|
UTF-8
| 559
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
required-target docker || return 1
required-target nvidia-driver || return 1
PACKAGES=(nvidia-docker2)
package-prepare-install() {
curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - || return 1
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list || return 1
}
package-after-install() {
systemctl restart docker.service
}
. utils/package-target nvidia-docker || return 1
| true
|
255102230e94dd1f39aabc952faa8d314cdbee12
|
Shell
|
YuzhouStar/DevOps
|
/Linux/print_input_process.sh
|
UTF-8
| 318
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
function Proceess(){
spa=''
i=0
while [ $i -le 100 ]
do
read -n1 input
if [[ $input -eq " " ]];then
printf "[%-50s] %d%% \r" "$spa" "$i";
sleep 0.5
((i=i+2))
spa+='#'
elif [[ $input -eq "\n" ]];then
continue
#exit -1
else
continue
#exit -1
fi
done
echo
}
Proceess
| true
|
562623761de5897dabe5d60932b0e319d8e905cd
|
Shell
|
The-Bioinformatics-Group/Lake_Eukaryota_Metagenomics_Project
|
/01_Trimming/trim_cutadapt_noAdapt.sge
|
UTF-8
| 955
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
#$ -cwd
#$ -q node0
#$ -S /bin/bash
## Trimming fixed amount of bases from the right using cutadapt
## More is trimmed from R2 reads due to poorer mean quality
## Uses the dataset where primers have already been removed, creates new directory for trimmed files
## One of two method used for trimming the data. Less stringent than trimTails
#module load cutadapt/v1.10
DIR=../00_Data #Root of all data
NoAdapDIR=$DIR/AdaptersRemoved #Directory of files to be trimmed
mkdir -p $DIR/trim_cutadapt #Creating directory for new fastq files
##Looping over all files in directory of files to be trimmed
##Checking if file is R1 och R2 and cutting accordingly
##writes trimmed sequences to new files
for f in $NoAdapDIR/*.fastq.gz
do
FILE=${f#$NoAdapDIR}
if [[ $FILE =~ R1 ]]
then
cutadapt -u -20 -o $DIR/trim_cutadapt/$FILE $f
elif [[ $FILE =~ R2 ]]
then
cutadapt -u -90 -o $DIR/trim_cutadapt/$FILE $f
fi
done
| true
|
bbfc9fa7fcf68b8bc81235b05b590ec348980eb6
|
Shell
|
glenonmateus/bacula-sd
|
/run
|
UTF-8
| 617
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
: ${BACULA_SDNAME:="bacula-sd"}
: ${BACULA_SDPASSWORD:="password"}
: ${BACULA_DIRNAME:="bacula"}
: ${BACULA_MONNAME:="${BACULA_DIRNAME}-mon"}
: ${BACULA_MONSDPASSWORD:="${BACULA_SDPASSWORD}"}
: ${BACULA_DEBUG:="50"}
CONFIGS_VARS=(
BACULA_SDNAME
BACULA_SDPASSWORD
BACULA_DIRNAME
BACULA_MONNAME
BACULA_MONSDPASSWORD
)
for c in ${CONFIGS_VARS[@]}; do
sed -i "s,@@${c}@@,$(eval echo \$$c)," /etc/bacula/bacula-sd.conf
done
echo "==> Verifying Bacula SD configuration"
bacula-sd -t -c /etc/bacula/bacula-sd.conf
echo "==> Starting Bacula SD"
bacula-sd -f -d ${BACULA_DEBUG} -c /etc/bacula/bacula-sd.conf
| true
|
5a3264828aeeb99f544ea6320d26cb332445acfe
|
Shell
|
opnfv/opnfv-ravello-demo
|
/joid/ci/odl/cloud-sh-odl/openstack.sh
|
UTF-8
| 4,863
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh -ex
agentState()
{
juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"machines\"][\"$1\"][\"agent-state\"]" 2> /dev/null
}
agentStateUnit()
{
juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"agent-state\"]" 2> /dev/null
}
configOpenrc()
{
cat <<-EOF
export OS_USERNAME=$1
export OS_PASSWORD=$2
export OS_TENANT_NAME=$3
export OS_AUTH_URL=$4
export OS_REGION_NAME=$5
EOF
}
unitAddress()
{
juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"public-address\"]" 2> /dev/null
}
unitMachine()
{
juju status | python -c "import yaml; import sys; print yaml.load(sys.stdin)[\"services\"][\"$1\"][\"units\"][\"$1/$2\"][\"machine\"]" 2> /dev/null
}
waitForMachine()
{
for machine; do
while [ "$(agentState $machine)" != started ]; do
sleep 5
done
done
}
waitForService()
{
for service; do
while [ "$(agentStateUnit "$service" 0)" != started ]; do
sleep 5
done
done
}
if [ $# -ne 0 ]; then
. "$1"
fi
juju bootstrap
waitForMachine 0
spare_cpus=$(($(grep processor /proc/cpuinfo | wc -l) - 5))
if [ $spare_cpus -gt 0 ]; then
spare_cpus=$(((spare_cpus * 3) / 4))
else
spare_cpus=0
fi
extra_cpus=0
[ $spare_cpus -ne 0 ] && extra_cpus=$((1 + (((spare_cpus - 1) * 3) / 4))) && spare_cpus=$((spare_cpus - extra_cpus))
juju add-machine --constraints "cpu-cores=$((1 + extra_cpus)) mem=8G root-disk=20G" --series $DEFAULT_SERIES
juju deploy --constraints mem=1G $CHARM_NEUTRON_GATEWAY_DEPLOY_OPTS "${CHARM_NEUTRON_GATEWAY:-quantum-gateway}" neutron-gateway
juju deploy --constraints "cpu-cores=$((1 + spare_cpus)) mem=4G root-disk=20G" $CHARM_NOVA_COMPUTE_DEPLOY_OPTS "${CHARM_NOVA_COMPUTE:-nova-compute}"
waitForMachine 1
juju scp lxc-network.sh 1:
juju run --machine 1 "sudo ./lxc-network.sh"
juju deploy --to lxc:1 $CHARM_MYSQL_DEPLOY_OPTS "${CHARM_MYSQL:-mysql}"
juju deploy --to lxc:1 $CHARM_RABBITMQ_SERVER_DEPLOY_OPTS "${CHARM_RABBITMQ_SERVER:-rabbitmq-server}"
juju deploy --to lxc:1 $CHARM_KEYSTONE_DEPLOY_OPTS "${CHARM_KEYSTONE:-keystone}"
juju deploy --to lxc:1 $CHARM_NOVA_CLOUD_CONTROLLER_DEPLOY_OPTS "${CHARM_NOVA_CLOUD_CONTROLLER:-nova-cloud-controller}"
juju deploy --to lxc:1 $CHARM_NEUTRON_API_DEPLOY_OPTS "${CHARM_NEUTRON_API:-neutron-api}"
juju deploy --to lxc:1 $CHARM_GLANCE_DEPLOY_OPTS "${CHARM_GLANCE:-glance}"
juju deploy --to lxc:1 $CHARM_OPENSTACK_DASHBOARD_DEPLOY_OPTS "${CHARM_OPENSTACK_DASHBOARD:-openstack-dashboard}"
# opendaylight
juju deploy --to lxc:1 $CHARM_ODL_CONTROLLER_DEPLOY_OPTS "${CHARM_ODL_CONTROLLER:-odl-controller}"
juju deploy $CHARM_NEUTRON_ODL_DEPLOY_OPTS "${CHARM_NEUTRON_ODL:-neutron-odl}"
# relation must be set first
# no official way of knowing when this relation hook will fire
waitForService mysql keystone
juju add-relation keystone mysql
sleep 60
waitForService rabbitmq-server nova-cloud-controller glance
juju add-relation nova-cloud-controller mysql
juju add-relation nova-cloud-controller rabbitmq-server
juju add-relation nova-cloud-controller glance
juju add-relation nova-cloud-controller keystone
sleep 60
waitForService neutron-api
juju add-relation neutron-api mysql
juju add-relation neutron-api rabbitmq-server
juju add-relation neutron-api keystone
juju add-relation neutron-api nova-cloud-controller
sleep 60
waitForService openstack-dashboard neutron-gateway nova-compute
juju add-relation neutron-gateway mysql
juju add-relation neutron-gateway:amqp rabbitmq-server:amqp
juju add-relation neutron-gateway nova-cloud-controller
juju add-relation neutron-gateway neutron-api
juju add-relation nova-compute:shared-db mysql:shared-db
juju add-relation nova-compute:amqp rabbitmq-server:amqp
juju add-relation nova-compute glance
juju add-relation nova-compute nova-cloud-controller
juju add-relation glance mysql
juju add-relation glance keystone
juju add-relation openstack-dashboard keystone
sleep 60
# opendaylight
waitForService odl-controller
juju add-relation neutron-api odl-controller
juju add-relation neutron-gateway odl-controller
juju add-relation nova-compute neutron-odl
juju add-relation neutron-odl odl-controller
sleep 60
# enable kvm on compute
machine=$(unitMachine nova-compute 0)
juju scp compute.sh $machine:
juju run --machine $machine "sudo ./compute.sh"
mkdir -m 0700 -p cloud
controller_address=$(unitAddress keystone 0)
configOpenrc admin password Admin http://$controller_address:5000/v2.0 RegionOne > cloud/admin-openrc
chmod 0600 cloud/admin-openrc
machine=$(unitMachine nova-cloud-controller 0)
juju scp cloud-setup.sh cloud/admin-openrc ~/.ssh/id_rsa.pub $machine:
juju run --machine $machine ./cloud-setup.sh
machine=$(unitMachine glance 0)
juju scp glance.sh cloud/admin-openrc $machine:
juju run --machine $machine ./glance.sh
| true
|
674dd71af544b70c8db1f0bbe51ce2a33a0bbe50
|
Shell
|
djimenezjerez/bo_captcha_reader
|
/setup.sh
|
UTF-8
| 639
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
source /etc/os-release
commands=(python3 firefox geckodriver)
function verifyCommand {
search=${NAME// /+}
if ! [ -x "$(command -v $1)" ];
then
search="${search}+$1"
echo -e "$(tput setaf 1)\xF0\x9F\x97\x99$(tput sgr0) Debe instalar la herramienta $1: $(tput setaf 4)http://www.google.com/search?q=$search$(tput sgr0)"
return 0
else
echo -e "$(tput setaf 2)\xE2\x9C\x93$(tput sgr0) La herramienta $1 se ha instalado correctamente"
return 1
fi
}
function main() {
for command in "${commands[@]}"
do
verifyCommand $command $i
done
}
main
| true
|
6a13c40a6d1589de9d91eab48c8a9e22af917dc6
|
Shell
|
sahara-labs/rig-client
|
/test/resources/Control/slow-cat.sh
|
UTF-8
| 1,182
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
##
## BatchRunnerTest.sh
##
## SLOOOOW 'cat' that prints the lines of a text file in the following format:
##
## <Percentage complete>[space]<Text file line>
##
## Author: Michael Diponio <mdiponio@eng.uts.edu.au>
##
if [ $# -lt 3 ]; then
echo "Incorrect number of arguments, should be <file reference> <sleep time> [stdout|stderr]"
exit 10
fi
FILE=$1
if [ ! -e $FILE ]; then
echo "Requested file does not exist"
exit 2
fi
SLEEP=$2
if [ $SLEEP -lt 1 ]; then
echo "Sleep time must be greater then 1 second."
exit 3
fi
FD=0;
case "$3" in
stdout)
FD=1
;;
stderr)
FD=2
;;
*)
echo "Redirection should be to either 'stdout' or 'stderr'"
exit 4
;;
esac
if [ $4 == "output" ]; then
OUTPUT="true"
OUTFILE=`pwd`/results-`date +%s`.txt
fi
RUNTIME=`wc -l $FILE | cut -d ' ' -f1`
I=1
let TOTAL=$SLEEP*$RUNTIME
while [ $I -lt $RUNTIME ] ; do
let PERCENTAGE=$I*$SLEEP*100/$TOTAL
echo "$PERCENTAGE `head -n $I $FILE | tail -n 1`" >&$FD
if [ $FD -eq 2 ]; then
echo "$PERCENTAGE <Random Junk>"
fi
if [ $OUTPUT == "true" ]; then
echo "`head -n $I $FILE | tail -n 1`" > $OUTFILE
fi
sleep $SLEEP
let I=$I+1
done
exit 0
| true
|
96c06ea2814405bb10e83540db98921beb6beb6c
|
Shell
|
vegayours/icfpc2015
|
/scripts/push_solutions.sh
|
UTF-8
| 559
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
PROBLEMS_DIR="$DIR/../problems"
BUILD_DIR="$DIR/../release"
for problem in $(ls $PROBLEMS_DIR/*.json); do
echo $(basename $problem)
ID=$(echo "${problem%.*}" | awk -F "_" '{print $2}')
RESULT_FN="result_${ID}.json"
$BUILD_DIR/icfpc/solver $problem $ID $RESULT_FN
curl --user :TzEFNFCbeYwqpgcDPfz0xXuynPZkXdCvIFbGFe9X1y4= -X POST -H "Content-Type: application/json" \
--data-binary @$RESULT_FN https://davar.icfpcontest.org/teams/223/solutions
echo ""
done
| true
|
69ae3785ae8db8e2e3c049d19f4a4f1a3d8f0dd0
|
Shell
|
ColdShadow80/MAD
|
/scripts/migrate_to_rocketmap.sh
|
UTF-8
| 5,028
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
########################################################################################################
# This is the Monocle/RDM to Rocketmap migration script. Before you run this you should run #
# OSM-rocketmap and let it configure its database. After it has built its empty database you can run #
# this script to populate your rocketmap database with the gym and pokestop info from your monocle/RDM #
# database. If you were using Monocle with MAD spawnpoints do not change, so I dump that table from #
# your monocle db and import it to your rocketmap db for you. If you have old spawnpoint info from #
# before MAD then you want to use import_allspawns.sh as well. This script does not import things like #
# controlling team/mons, or ex status, because MAD will fill this in after 1 scan. #
# #
# If you were already scanning in MAD using your Monocle database, be sure to remove version.json #
# so MAD will update your new rocketmap schema. #
# #
# Blank RocketMap schema created via https://github.com/cecpk/OSM-Rocketmap properly working with MAD #
# https://gist.github.com/sn0opy/fb654915180cfbd07d5a30407c286995i #
# #
# If you get an error like: #
# "ERROR 1364 (HY000) at line 1: Field 'enabled' doesn't have a default value #
# Then run this in mysql: SET GLOBAL sql_mode='' and run this script again. #
########################################################################################################
# Old database format (valid options: monocle/rdm)
dbtype=""
# Old database, Monocle or RDM format:
# old database IP:
olddbip="127.0.0.1"
# old database username:
olduser=""
# old database pass:
oldpass=""
# old database name:
olddbname=""
# old database port:
oldport="3306"
# new database, Rocketmap format:
# Rocketmap database IP:
newdbip="127.0.0.1"
# Rocketmap database username:
newuser=""
# Rocketmap database pass:
newpass=""
# Rocketmap database name:
newdbname=""
# Rocketmap database port:
newport="3306"
########################################################################################################
###################################HAPPY HUNTING#####KRZTHEHUNTER#######################################
########################################################################################################
# You should not edit below here unless you know what you're doing #
########################################################################################################
########################################################################################################
case "$dbtype" in
monocle) gymquery="select external_id, lat, lon, name, url, park from forts"
stopquery="select external_id, lat, lon, name, url from pokestops"
mysqldump -h "$olddbip" -u "$olduser" -p"$oldpass" -P "$oldport" "$olddbname" trs_spawn > /tmp/trs_spawn.sql
mysql -NB -h "$newdbip" -u "$newuser" -p"$newpass" -P "$newport" "$newdbname" < /tmp/trs_spawn.sql
rm /tmp/trs_spawn.sql
mysqldump -h "$olddbip" -u "$olduser" -p"$oldpass" -P "$oldport" "$olddbname" trs_quest > /tmp/trs_quest.sql
mysql -NB -h "$newdbip" -u "$newuser" -p"$newpass" -P "$newport" "$newdbname" < /tmp/trs_quest.sql
rm /tmp/trs_quest.sql
;;
rdm) gymquery="select id, lat, lon, name, url from gym"
stopquery="select id, lat, lon, name, url from pokestop"
;;
*) echo "you need to configure this script before running it" && exit
;;
esac
oldquery(){
mysql -NB -h "$olddbip" -u "$olduser" -p"$oldpass" -P "$oldport" "$olddbname" -e "$1"
}
newquery(){
mysql -NB -h "$newdbip" -u "$newuser" -p"$newpass" -P "$newport" "$newdbname" -e "$1"
}
fix_quotes(){
echo $(sed -e "s/'/' \"'\" '/g" <<<"$*")
}
while IFS=';' read -r eid lat lon name url ;do
[[ $(newquery "select gym_id from gym where gym_id='$eid'") == "$eid" ]] && continue
newquery "insert into gym set gym_id='$eid', latitude='$lat', longitude='$lon'" && \
newquery "insert into gymdetails set gym_id='$eid', name='$(fix_quotes "$name")', url='$url'"
done < <(oldquery "$gymquery"|sed 's/\x09/;/g')
while IFS=';' read -r eid lat lon name url ;do
[[ $(newquery "select pokestop_id from pokestop where pokestop_id='$eid'") == "$eid" ]] && continue
newquery "insert into pokestop set pokestop_id='$eid', latitude='$lat', longitude='$lon', name='$(fix_quotes "$name")', image='$url'"
done < <(oldquery "$stopquery"|sed 's/\x09/;/g')
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.