blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d065a62118ca85f12bd1e020294007640aec6745 | Shell | Leo1003/NCTU_CS_Coding | /SystemAdministraion/HW2-2/_submenu_file.sh | UTF-8 | 963 | 3.765625 | 4 | [] | no_license | cwd="$(pwd)"
dlg_ret=0
while [ $dlg_ret -eq 0 ]; do
dlg_args=(sim_dialog --title "'$SIM_TITLE -- File Browser'" --menu "Current Directory: $cwd" 27 80 20)
files=($(ls -a $cwd))
if [ -z "$files" ]; then
# Can't get any files
dlg_args+=('.' 'inode/directory')
dlg_args+=('..' 'inode/directory')
dlg_args+=('###' '(Cannot read the directory!)')
else
# Add each entry as a menu item
for i in "${files[@]}"; do
dlg_args+=("$i" "$(file -b --mime-type "$cwd/$i")")
done
fi
dlg_sel="$("${dlg_args[@]}")"
dlg_ret=$?
if [ $dlg_ret -eq 0 ]; then
selpath="$(rdslash "$cwd/$dlg_sel")"
if [ -d "$selpath" ]; then
# Check if we can enter the directory
ncwd="$(cd "$selpath" && pwd)"
cwd=${ncwd:-$cwd}
elif [ -e "$selpath" ]; then
bash "$(script_dir)/_applet_file.sh" "$selpath"
fi
fi
done
| true |
636ab6dd7cbb1b2c343f58705d082c35fb15fef8 | Shell | rajeshm7910/devportal-binary | /devportal-binary-bundle-Redhat-6-x86_64/install-from-rpm-bundle.sh | UTF-8 | 4,882 | 4.1875 | 4 | [] | no_license | #!/bin/bash
###
# This is the start script for a non-networked install.
##
if [[ $( whoami ) != 'root' ]] ; then
echo "$0 must be run as root (or run via sudo)."
exit 1
fi
has_network=0
drush_home=/usr/local/share/drush
# Get directory this script is running in and put it in SCRIPT_PATH
source="${BASH_SOURCE[0]}"
while [ -h "$source" ]; do
# resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
# if $SOURCE was a relative symlink, we need to resolve it relative to the
# path where the symlink file was located
[[ $source != /* ]] && source="$DIR/$source"
done
script_path="$( cd -P "$( dirname "$source" )" && pwd )"
repo_path="${script_path}/devportal-repo"
# Load command line args script
source ${script_path}/lib/bash_cmd_args.sh
# Load function library
source ${script_path}/lib/bash_toolkit.sh
script_initialize
# Get OS and version information.
source ${script_path}/lib/detect-os.sh
# -----------------------------------------------------
# Starting Installation
# -----------------------------------------------------
display_h1 "Starting non-networked installation ${script_rundate}"
os_info="$( cat $release_file | head -n 1 )"
display "${os_info}"
# Configure the repo
display_h1 "Step 1: Configure local package repository"
case $platform_variant in
rhel)
if [[ -f /etc/yum.repos.d/devportal.repo ]] ; then
echo "The Dev Portal repo is already configured... "
else
echo "Configuring Dev Portal repo."
(
echo "[devportal]"
echo "name=Dev Portal Installation Repository"
echo "baseurl=file://${repo_path}"
echo "enabled=1"
) > /etc/yum.repos.d/devportal.repo
# yum clean all fails if enabled=0 and no other repos are available
yum clean all
sed -i 's/enabled=1/enabled=0/g' /etc/yum.repos.d/devportal.repo
fi
;;
suse)
repos=`zypper repos | tail -n +3 | cut -d "|" -f3`
if [[ $(echo "${repos}" | grep -c devportal) -eq 1 ]]; then
echo "The Dev Portal repo is already configured... "
else
echo "Configuring Dev Portal repo."
zypper addrepo --no-gpgcheck ${repo_path} devportal
fi
;;
debian)
if [[ $( grep -c "^deb file:${repo_path}" /etc/apt/sources.list ) -gt 1 ]] ; then
echo "The Dev Portal repo is already configured... "
else
echo "Configuring Dev Portal repo."
(
echo
echo "## Software bundled expressly for Dev Portal installation"
echo "deb file:${repo_path} ./"
) >> /etc/apt/sources.list
fi
;;
esac
display_h1 "Step 2: Install Apache and PHP Software Packages"
source ${script_path}/lib/install-required-pkgs.sh
source ${script_path}/lib/configure-php.sh
display_h1 "Step 3: Install drush"
[[ -d $drush_home ]] && rm -rf $drush_home
[[ -f /usr/local/bin/drush || -h /usr/local/bin/drush ]] && rm -f /usr/local/bin/drush
mkdir -p $drush_home
tar -C $drush_home -xf ${script_path}/drush.tar >> $logfile 2>&1
ln -s ${drush_home}/drush /usr/local/bin/drush >> $logfile 2>&1
display_h1 "Step 4: Install and Configure Database"
source ${script_path}/lib/install-mysqld.sh
display_h1 "Step 5: Configure Apache Web Server"
source ${script_path}/lib/configure-apache.sh
display_h1 "Step 6: Installing Dev Portal Drupal files"
[[ -d $devportal_install_dir ]] && rm -rf $devportal_install_dir
cp -r ${script_path}/devportal-webroot $devportal_install_dir >> $logfile 2>&1
cp -r ${script_path}/devportal-webroot/.[!.]* $devportal_install_dir >> $logfile 2>&1
display "Setting Dev Portal permissions..."
webroot=${devportal_install_dir}
is_installer_running=1
source ${script_path}/lib/configure-apache-webroot-permissions.sh
# Make sure settings.php is writable
chmod 660 ${webroot}/sites/default/settings.php >> $logfile 2>&1
display_h1 "Step 7: Modifying security settings to allow incoming HTTP connections."
source ${script_path}/lib/configure-security.sh
display_multiline "
--------------------------------------------------------------------------------
Dev Portal Installation Complete
--------------------------------------------------------------------------------
You are ready to configure your Dev Portal by
going to the following URL using your local web browser:
http://${portal_hostname}
Keep the following information for the rest of the install and for future
reference.
Apache Configuration
--------------------
Dev Portal URL: http://${portal_hostname}
Dev Portal web root: ${devportal_install_dir}
Database Configuration
----------------------
Dev Portal database hostname: ${db_host}
Dev Portal database port: ${db_port}
Dev Portal database name: ${db_name}
Dev Portal database user: ${db_user}
Dev Portal database password: ******* (not shown)
--------------------------------------------------------------------------------
"
| true |
a3dc266c7fccd68475d60cade0cd1bf8cfe7f307 | Shell | Miaouf/Projects | /network/src/fakeTracker/tracker.sh | UTF-8 | 196 | 3.015625 | 3 | [] | no_license | #!/bin/bash
coproc nc -l localhost 8090
while read -r cmd; do
case $cmd in
d) date ;;
q) break ;;
*) echo 'What?'
esac
done <&"${COPROC[0]}" >&"${COPROC[1]}"
kill "$COPROC_PID"
| true |
6223b189ebc69adc14d036018632329728e7d01a | Shell | pjgg/go-rest-skel | /ci/build.env.sh | UTF-8 | 799 | 3.015625 | 3 | [] | no_license | #!/bin/bash
export GIT_TAG=$(git describe --abbrev=0 --tags 2> /dev/null)
export GIT_BRANCH=$(basename $(git branch -r --contains HEAD))
export GIT_COMMIT=$(git rev-parse --short HEAD)
export GIT_COMMIT_SHORT=$(git rev-parse HEAD)
if [ -z $GIT_TAG ]
then
export BASE_VERSION="0.0.1";
else
export BASE_VERSION=$GIT_TAG
fi
if [ "$GIT_BRANCH" = "master" ]
then
export VERSION=$BASE_VERSION
else
export VERSION=$BASE_VERSION"-SNAPSHOT"
fi
if [ "$JOB_NAME" = "" ]
then
export JOB_NAME="default"
fi
export DOCKER_IMAGE="pjgg/go-rest-skel/$JOB_NAME:$VERSION"
# Check if can run docker without sudo
docker ps > /dev/null
if [ $? -eq 0 ]; then
export DOCKER="docker"
export GCLOUD="sudo gcloud"
else
export DOCKER="sudo docker"
export GCLOUD="sudo gcloud"
fi
| true |
ece6f729519bd9ef011ad76bac4dd344f1c07cc0 | Shell | nine/osm-austria-building-coverage | /scripts/update-all.sh | UTF-8 | 2,072 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function current_time()
{
date +%Y-%m-%d_%H:%M:%S
}
if [ "$#" -ne 4 ]; then
echo "Usage: ./update-all.sh <log-directory> <db-working-directory> <tiles-root-directory> <database-name>"
exit 1
fi
log_directory=$1
log_file=${log_directory}update-all.log
exec > >(tee -a ${log_file})
exec 2> >(tee -a ${log_file} >&2)
echo "$(current_time) Starting update of OSM Austria database, building coverage tiles and coverage statistics"
scriptdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
db_working_directory=$2
tiles_root_directory=$3
database_name=$4
municipality_tiles_path=${tiles_root_directory}municipalities-colored/
basemap_tiles_path=${tiles_root_directory}basemap-buildings-extracted/
osm_tiles_path=${tiles_root_directory}osm-buildings-only-current/
echo "$(current_time) Incremental database update: Starting."
${scriptdir}osm-db-update/osm-update-austria-incremental.sh ${db_working_directory}
exit_code=$?
echo "$(current_time) Incremental database update: Process finished with exit code ${exit_code}."
if [ ${exit_code} -eq 0 ]; then
echo "$(current_time) Incremental tile update and scaling: Starting."
${scriptdir}mapnik/update-tiles.sh ${tiles_root_directory}
exit_code=$?
echo "$(current_time) Incremental tile update and scaling: Process finished with exit code ${exit_code}."
if [ ${exit_code} -eq 0 ]; then
echo "$(current_time) Incremental coverage scores update: Starting."
${scriptdir}coverage-scores/update-coverage.py -m ${municipality_tiles_path} -b ${basemap_tiles_path} -o ${osm_tiles_path} -d ${database_name}
exit_code=$?
echo "$(current_time) Incremental coverage scores update: Process finished with exit code ${exit_code}."
# Refresh the coverage score views
echo "$(current_time) Refreshing materialized views..."
${scriptdir}coverage-scores/refresh-materialized-views.py ${database_name}
exit_code=$?
echo "$(current_time) Refresh materialized views: Process finished with exit code ${exit_code}."
fi
fi
| true |
3ba1ca439de274a3ed5a4cc67bbbde66b6e1e848 | Shell | gene-git/Arch-SKM | /certs-local/dkms/kernel-sign.sh | UTF-8 | 490 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# SPDX-License-Identifier: MIT
# Copyright (c) 2020-2023 Gene C
#
# Installed in /etc/dkms/kernel-sign.sh
#
# This is called via POST_BUILD for each module
# We use this to sign in the dkms build directory.
#
SIGN=/usr/lib/modules/$kernelver/build/certs-local/sign_module.py
if [ -f $SIGN ] ;then
$SIGN -d ../$kernelver/$arch/module/
else
echo "kernel $kernelver doesn't have out of tree module signing tools"
echo "skipping signing out of tree modules"
fi
exit
| true |
69b08b0b012ac09cd52cf771c10e6fcf5e725f57 | Shell | sjrd/scalajs-benchmarks | /common/benchmark-runner.sh | UTF-8 | 2,864 | 3.8125 | 4 | [
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
# __
# ________ ___ / / ___ __ ____ Scala.js Benchmarks
# / __/ __// _ | / / / _ | __ / // __/ (c) 2013, Jonas Fonseca
# __\ \/ /__/ __ |/ /__/ __ |/_// /_\ \
# /____/\___/_/ |_/____/_/ | |__/ /____/
# |/____/
#
# Run a benchmark against a JavaScript VM.
# set -x
RUN_DIR="$(dirname "$0")"
ROOT_DIR="./$(git rev-parse --show-cdup)"
ENGINES="d8 node"
MODES="fastopt fullopt js"
SEP='
'
trap "exit" SIGHUP SIGINT SIGTERM
die() {
echo >&2 "$@"
exit 1
}
info()
{
printf "%-25s : " "$1"; shift
test $# -gt 0 && echo "$@"
}
print_option()
{
echo "[$@]" | sed 's/ /|/g'
}
find_binary()
{
version_options="$1"; shift
engine="$1"
for bin in $@; do
path="$(which "$bin" 2>/dev/null)"
if test -n "$path"; then
info "$engine" "$($path $version_options) [$path]"
eval "${engine}_bin=$path"
return
fi
done
info "$engine" "No binary found while searching \$PATH for $@"
}
detect_engine()
{
engine="$1"
test -n "$engine_bin" && return
case "$engine" in
d8) find_binary "-e print(version())" d8 ;;
node) find_binary "-v" node nodejs js ;;
phantomjs)
find_binary "-v" phantomjs ;;
*) die "Unknown engine: $engine"
esac
}
detect_engines()
{
for engine in $@; do
detect_engine "$engine"
done
}
run_benchmark_mode()
{
engine="$1" benchmark="$2" mode="$3"
test -d "$ROOT_DIR/$benchmark/.js" && \
out_dir="$ROOT_DIR/$benchmark/.js/target/scala-2.12" || \
out_dir="$ROOT_DIR/$benchmark/js/target/scala-2.12"
lib_dir="$ROOT_DIR/common"
js="$out_dir/$benchmark.$engine-$mode.js"
engine_bin=$(eval echo \$"${engine}_bin")
test -z "$engine_bin" && return
{
test -e "$lib_dir/$engine-stubs.js" &&
cat "$lib_dir/$engine-stubs.js"
case "$mode" in
js) cat "$lib_dir/reference/bench.js" \
"$lib_dir/reference/$benchmark.js" ;;
fullopt) cat "$out_dir/$benchmark-opt.js" ;;
fastopt) cat "$out_dir/$benchmark-fastopt.js" ;;
*) die "Unknown mode: $mode"
esac
cat "$lib_dir/start-benchmark.js"
} > "$js"
info "$benchmark [$mode] $engine"
# Remove benchmark prefix (e.g. DeltaBlue:) and squelch
# PhantomJS warning
"$engine_bin" "$js" 2>&1 | sed 's/[^:]*:\s//' | grep -v phantomjs
}
run_benchmark()
{
benchmark="$(basename "$(cd "$RUN_DIR" && pwd)")"
engines=
modes=
while test $# != 0; do
arg="$1"; shift
case "$arg" in
fastopt|fullopt|js)
modes="$modes$SEP$arg" ;;
d8|node|phantomjs)
engines="$engines$SEP$arg" ;;
*)
die "Usage: $0 $(print_option $ENGINES) $(print_option $MODES)"
esac
done
test -z "$engines" && engines="d8" ||
engines="$(echo "$engines" | sort -u)"
test -z "$modes" && modes="fullopt" ||
modes="$(echo "$modes" | sort -u)"
detect_engines "$engines"
for mode in $modes; do
for engine in $engines; do
run_benchmark_mode "$engine" "$benchmark" "$mode"
done
done
}
| true |
105b987fd899949859dd095b233929a09745eaa3 | Shell | HiteshReddy/AuthenticationAPI | /key_extract.sh | UTF-8 | 2,169 | 4.125 | 4 | [] | no_license | #!/usr/bin/env bash
KEYSTORE_PATH=""
KEYSTORE_PASS=""
KEYSTORE_KEY_PASS=""
CERT_ALIAS=""
OUTPUT_NAME=""
function print_help {
echo "USAGE: key_extract.sh --storepath PATH_TO_JKS --storepass JKS_PASSWORD --keypass PRIVATE_KEY_PASSWORD --certalias CERTIFICATE_ALIAS --outname OUTPUT_NAME"
}
function check_required_parameter() {
if [ -z "$2" ]; then
echo "The '$1' parameter must be provided!"
print_help
exit 1
fi
}
if [ $# -eq 0 ]; then
print_help
exit 1
fi
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--storepath)
KEYSTORE_PATH="$2"
shift # past argument
shift # past value
;;
--storepass)
KEYSTORE_PASS="$2"
shift # past argument
shift # past value
;;
--keypass)
KEYSTORE_KEY_PASS="$2"
shift # past argument
shift # past value
;;
--certalias)
CERT_ALIAS="$2"
shift # past argument
shift # past value
;;
--outname)
OUTPUT_NAME="$2"
shift # past argument
shift # past value
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
check_required_parameter "--storepath" $KEYSTORE_PATH
check_required_parameter "--storepass" $KEYSTORE_PASS
check_required_parameter "--keypass" $KEYSTORE_KEY_PASS
check_required_parameter "--certalias" $CERT_ALIAS
check_required_parameter "--outname" $OUTPUT_NAME
keytool -export \
-storetype JCEKS \
-alias $CERT_ALIAS \
-file $OUTPUT_NAME.der \
-keystore $KEYSTORE_PATH \
-storepass $KEYSTORE_PASS
openssl x509 \
-inform der \
-in $OUTPUT_NAME.der \
-out $OUTPUT_NAME.pem
keytool -importkeystore \
-srcstoretype JCEKS \
-srckeystore $KEYSTORE_PATH \
-srcalias $CERT_ALIAS \
-srcstorepass $KEYSTORE_PASS \
-srckeypass $KEYSTORE_KEY_PASS \
-destkeystore $OUTPUT_NAME.p12 \
-deststoretype PKCS12 \
-deststorepass $KEYSTORE_KEY_PASS \
-destkeypass $KEYSTORE_KEY_PASS \
-noprompt
openssl pkcs12 \
-in $OUTPUT_NAME.p12 \
-nodes \
-nocerts \
-out $OUTPUT_NAME.key \
-password pass:$KEYSTORE_KEY_PASS
rm $OUTPUT_NAME.der
rm $OUTPUT_NAME.p12
| true |
fb1d8c0ae786e7bb054e992a025d5565fe5191f8 | Shell | viswanc/ezt-try2 | /dep/probe/resetProbe.sh | UTF-8 | 2,115 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# A util script to get a list of running pods of the given service.
cd $(dirname "$0")
# Data
service_name="$1"
service_dir="./config/$service_name"
# Imports
source ../utils/helpers.sh
# Helpers
reset_pod() {
pod_id="$1"
service_name="$2"
echo "Resetting: $pod_id" >&2
# #Note: For Envoy config changes to be reloaded, the configMap has to be updated.
kubectl cp $service_dir/res/envoy-config.yaml $pod_id:/app/probe/envoy-config.yaml -c envoy-sidecar # #Note: ConfigMaps aren't used as they couldn't be reloaded after changes.
echo "Killing Envoy..." >&2
while [ ! -z "$(kubectl exec -it $pod_id --tty=false -c envoy-sidecar -- ps -A | filter envoy)" ]
do
kubectl exec -it $pod_id --tty=false -c envoy-sidecar -- pkill -x envoy > /dev/null 2>&1
sleep 1
done
echo "Starting Envoy..." >&2
kubectl exec -it $pod_id --tty=false -c envoy-sidecar -- /usr/local/bin/envoy -c /app/probe/envoy-config.yaml --service-cluster p-$service_name -l debug --log-path /app/probe/envoy.log > /dev/null 2>&1 &
echo "Waiting for Envoy..." >&2
while [ -z "$(kubectl exec -it $pod_id --tty=false -c envoy-sidecar -- ps -A | filter envoy)" ]
do
sleep 1
done
echo "Restarting the service..." >&2
kubectl cp ./res/bg.sh $pod_id:/app/probe/bg.sh -c p-$service_name
sh $service_dir/copyFiles.sh $pod_id $service_name
echo "Stopping the service..."
while [ "$(kubectl exec -it $pod_id --tty=false -c p-$service_name -- ps -A | filter $service_name)" ]
do
kubectl exec -it $pod_id --tty=false -c p-$service_name -- pkill -f $service_name > /dev/null 2>&1
sleep 1
done
echo "Starting the service..."
kubectl exec -it $pod_id --tty=false -c p-$service_name -- /bin/sh /app/probe/bg.sh /app/probe/service.log /bin/$service_name > /dev/null 2>&1 & # #Note: The markers at the end are to discard the STD streams and to background the process.
}
# Main
n=1
for pod_id in `sh ../utils/getPods.sh p-$1` # #Note: while command breaks in case of an error in the loop.
do
echo "Pod: $n"
n="$(($n+1))"
reset_pod $pod_id $service_name &
wait
done
| true |
4d5ab116f47e6d76719f6da6850ae771796558a6 | Shell | packeted/OpenSpeechPlatform-UCSD | /Software/Build-Scripts/libosp | UTF-8 | 434 | 2.796875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
echo
echo
echo "This script will use 'sudo' throughout at various times, invoke this"
echo "as a user with sudo priveleges and enter your user password when prompted"
echo
echo
OS=$(uname)
echo Build and install librtmha
cd -
pushd librtmha
mkdir -p build
cd build
cmake ..
make && sudo make install
popd
echo Build and install osp process
pushd RTMHA
mkdir -p build
cd build
cmake ..
make && sudo make install
popd
| true |
bacd6616d39ce20bde82678c4290afb11a1793c7 | Shell | kgeil/Alienvault-Demo_scripts | /pcaps/inject_pcaps.sh | UTF-8 | 335 | 2.78125 | 3 | [] | no_license | #!/bin/bash
# JS
#Just in case....
modprobe dummy
ifconfig dummy0 up
ifconfig dummy0 promisc
SCRIPTPATH=$( cd $(dirname $0) ; pwd -P )
cd $SCRIPTPATH
while true
do
for pcap in `ls *.pcap`
do
tcpreplay-edit -T nano -N '10.0.0.0/8:192.168.100.76/30,192.168.0.0/16:192.168.100.74/28' -i dummy0 --pps=10 $pcap
done
sleep 1200
done
| true |
bcc880fcadeceb638ffca09569c98148d7a4ad84 | Shell | kenanigans2/bashenv | /bash_profile | UTF-8 | 3,394 | 3.859375 | 4 | [] | no_license | #!/usr/bin/env bash
# bash_profile
#
#-:LOAD SHELL CONFIG
#shellcheck source=/Users/kend/.profile
[ -r ~/.profile ] && . ~/.profile
__setBashEnvVariablesAndOptions () {
shopt -s cdable_vars extglob globstar
declare -r ERROR_CLR='[31;1m'
declare -r CLR_RST='[0;0m'
export FCEDIT=${EDITOR:-vi}
}
__getBashEnvDir () {
dirname "$(find "${BASH_SOURCE[0]}" -exec /bin/ls -l {} \; | awk -F" -> " '{print $NF}')"
return
}
__printHeading () {
echo -e "\\n${*:?}\\n" | sed 's:^: :'
return
}
__printParagraph () {
#
#-:SET INDENT LEVEL
# - default=1, option `-i' or `-l' sets to `$OPTARG'
#
local msg bulletChar
local -i OPTIND INDENT_LEVEL LEADING_SPACE TRAILING_SPACE
msg="${*}" bulletChar=''
OPTIND=1 INDENT_LEVEL=1 LEADING_SPACE=1 TRAILING_SPACE=1
while getopts :lti:b: opt; do
case ${opt} in
b)
bulletChar="${OPTARG}"
;;
i)
#
#-:SPECIFY INDENT LEVEL
# - otherwise, default is 1
#
INDENT_LEVEL=${OPTARG}
;;
l)
#
#-:SPECIFY LEADING EMPTY LINE
LEADING_SPACE=0
;;
t)
#
#-:SPECIFY TRAILING EMPTY LINE
TRAILING_SPACE=0
;;
:)
return 2
;;
\?)
return 1
;;
esac
done \
&& shift $((OPTIND-1))
#
#-:IF NO ARGS AFTER OPTIONS, ERROR
(( $# < 1)) && return 2
#
#-:SET MSG TO REST OF ARGS
msg="${*}"
#
#-:STRIP LEADING & TRAILNG EMPTY LINES
[[ "$(echo "${msg:?}" | sed -n '$p')" =~ ^[[:blank:]]*$ ]] \
&& msg="$(echo "${msg}" | sed '$d')"
[[ "$(echo "${msg:?}" | sed -n '1p')" =~ ^[[:blank:]]*$ ]] \
&& msg="$(echo "${msg}" | sed '1d')"
#
#-:PROCESS `$msg' WITH INDENTS
while ((INDENT_LEVEL > 0)); do
#shellcheck disable=2001
msg="$(echo "${msg}" | sed "s:^: ${bulletChar:+$bulletChar }:")"
((INDENT_LEVEL-=1))
done
#
#-:FORMAT LEADING/TRAILING EMPTY LINES
(( LEADING_SPACE == 0 )) && msg="\\n${msg}"
(( TRAILING_SPACE == 0 )) && msg="${msg}\\n"
#
#-:OUTPUT `$msg'
echo -e "${msg}"
return
}
__reportErr () {
local msg
msg="${*:-NULL}"
if [[ "${msg}" != 'NULL' ]]; then
msg="${ERROR_CLR}ERROR:${CLR_RST}\\n${msg:?}"
__printHeading "${msg}" >&2
else
return 1
fi
return
}
#
#-:SET SHELL VARIABLES
{
__setBashEnvVariablesAndOptions
if [[ -z "${bashenv:-}" ]]; then
#shellcheck disable=2155
declare -xr bashenv="$(__getBashEnvDir)"
fi
}
#
#-:LOAD SHARED CONFIG IN BASHRC
{
#shellcheck source=/Users/kend/.bashrc
[[ -L ~/.bashrc && -r ~/.bashrc ]] \
&& . ~/.bashrc
}
#
#-:PATH
{
export PS1='[\[\e[35;1m\]\W\[\[\e[0;0m\]]\$ '
if
! echo -e "${PATH//:/\\n}" | grep -q "\/usr\/local\/bin" \
&& [[ -d /usr/local/bin ]]
then
PATH="/usr/local/bin${PATH:+:$PATH}"
fi
}
#
#-:HANDLE VERBOSE OUTPUT
{
if [[ -n ${bashenv_debug_verbosity} ]] \
&& (( bashenv_debug_verbosity == 0 )); then
servers
fi
[[ -n "${bashenv_debug_verbosity:-}" ]] \
&& unset bashenv_debug_verbosity
}
hr -t
| true |
5e9ab0ee4a572384e9e118899d6e1e59670d97ef | Shell | wskplho/ariatemplates | /scripts/attester-nophantom.sh | UTF-8 | 256 | 2.59375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
runAttester() {
node node_modules/attester/bin/attester.js test/attester-nophantom.yml --env package.json --phantomjs-instances 0 --robot-browser "$@"
}
if [ "$TRAVIS" = "true" ]; then
runAttester "Firefox"
else
runAttester "Chrome"
fi
| true |
44bccb2b38164130144f0b50988aba373ac3e879 | Shell | bitnodesnet/daemons | /services/linux/zetacoin | UTF-8 | 2,171 | 3.65625 | 4 | [
"Unlicense"
] | permissive | #!/bin/sh
#
# $FreeBSD: $
#
# PROVIDE: zetacoin
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
# to enable this service:
#
# zetacoin_enable (bool): Set to NO by default.
# Set it to YES to enable zetacoin.
# zetacoin_config (path): Set to /usr/local/etc/zetacoin.conf
# by default.
# zetacoin_user: The user account zetacoin daemon runs as
# It uses 'root' user by default.
# zetacoin_group: The group account zetacoin daemon runs as
# It uses 'wheel' group by default.
# zetacoin_datadir (str): Default to "/var/db/zetacoin"
# Base data directory.
. /etc/rc.subr
name=zetacoin
rcvar=zetacoin_enable
load_rc_config $name
: ${zetacoin_enable:=NO}
: ${zetacoin_config=/data/nodes/db/zetacoin/zetacoin.conf}
: ${zetacoin_datadir=/data/nodes/db/zetacoin}
: ${zetacoin_user="root"}
: ${zetacoin_group="wheel"}
required_files=${zetacoin_config}
command=/usr/local/bin/zetacoind
cli_command=/usr/local/bin/zetacoin-cli
zetacoin_chdir=${zetacoin_datadir}
pidfile="${zetacoin_datadir}/zetacoind.pid"
stop_cmd=zetacoin_stop
command_args="-conf=${zetacoin_config} -datadir=${zetacoin_datadir} -noupnp -daemon -pid=${pidfile}"
start_precmd="${name}_prestart"
reindex_cmd=zetacoin_reindex
extra_commands="reindex"
zetacoin_create_datadir()
{
echo "Creating data directory"
eval mkdir -p ${zetacoin_datadir}
[ $? -eq 0 ] && chown -R ${zetacoin_user}:${zetacoin_group} ${zetacoin_datadir}
ln -s ${zetacoin_datadir} /.zetacoin
}
zetacoin_prestart()
{
if [ ! -d "${zetacoin_datadir}/." ]; then
zetacoin_create_datadir || return 1
fi
}
zetacoin_requirepidfile()
{
if [ ! "0`check_pidfile ${pidfile} ${command}`" -gt 1 ]; then
echo "${name} not running? (check $pidfile)."
exit 1
fi
}
zetacoin_stop()
{
zetacoin_requirepidfile
echo "Stopping ${name}."
eval ${cli_command} -conf=${zetacoin_config} -datadir=${zetacoin_datadir} stop
wait_for_pids ${rc_pid}
}
zetacoin_reindex()
{
if [ -z "$rc_fast" -a -n "$rc_pid" ]; then
zetacoin_stop
fi
echo "Reindexing ${name} blockchain."
command_args="${command_args} -reindex"
eval ${command} ${command_args}
}
run_rc_command "$1"
| true |
5be3e9b6d5d48f9d1424c5fbe1c6159fd1b5215c | Shell | waqasalam/xorp.ct | /other/testbed/tools/xorp.sh | UTF-8 | 1,107 | 3.921875 | 4 | [] | no_license | #!/bin/sh
# xorp
#
# $XORP: other/testbed/tools/xorp.sh,v 1.1.1.1 2002/12/11 23:55:13 hodson Exp $
#
# Installed in /usr/local/etc/rc.d/ on testbed machines.
#
# Set the time from www.icir.org
echo -e "\nXORP testbed configuration"
if [ -e /usr/sbin/ntpdate ]
then
echo "Setting time time"
/usr/sbin/ntpdate -b www.icir.org
fi
TESTBEDDIR="/usr/local/xorp/testbed"
if [ -d ${TESTBEDDIR} ]
then
cd ${TESTBEDDIR}
PATH=$PATH:/usr/local/bin
PYTHON=/usr/local/bin/python
SCRIPT=xtifset.py
UTILS=xtutils.py
XML=xtxml.py
VARS=xtvars.py
CONFIG=config.xml
HOSTS_TEMPLATE=hosts.template
HOSTS=/etc/hosts
if [ -x ${PYTHON} -a -x ${SCRIPT} -a -f ${XML} -a -f ${UTILS} -a \
-f ${CONFIG} -a -f ${VARS} ]
then
echo -e "Configure testbed interfaces"
HOST=`uname -n`
if ! ./${SCRIPT} -n $HOST -c ${CONFIG}
then
echo -e "This host is too smart to be configured"
else
./${SCRIPT} -i -c ${CONFIG}
./${SCRIPT} -r -c ${CONFIG}
fi
if [ -f ${HOSTS_TEMPLATE} ]
then
echo "Creating /etc/hosts"
./${SCRIPT} -H -c ${CONFIG} | \
cat ${HOSTS_TEMPLATE} - > ${HOSTS}
fi
fi
fi
| true |
eb59efb50c09eba634f6e0c37bbc4f7fe34bf177 | Shell | Snoop05/raspberrypi | /boot-archlinux.sh | UTF-8 | 418 | 3.25 | 3 | [] | no_license | #!/bin/bash
if (($EUID < 1)); then
mount -o remount,rw /flash
cp /flash/config.txt /flash/config.bak
cp /flash/cmdline.txt /flash/cmdline.bak
cp /flash/config-archlinux.txt /flash/config.txt
cp /flash/cmdline-archlinux.txt /flash/cmdline.txt
fi
if (($EUID != 0)); then
echo "This script must be run as root!"
echo "Type your root password (default is 'root')"
su root -c "sh ${0}"
fi
| true |
3e74a3a300a26722788c5a498dff71b767624cdf | Shell | rupang818/cs246 | /project2/p2_test.sh | UTF-8 | 4,991 | 3.046875 | 3 | [] | no_license | #!/bin/bash
queries=( "information%20retrieval" "the%20matrix" "algebra" "elasticity" "elizabeth" )
TMP_DIR=/tmp/project2/
REQUIRED_FILES="build.sh build.gradle install-plugin.sh task3a.sh"
task3a='"max_score" : 89.30481,
"_score" : 89.30481,
"_score" : 86.686066,
"_score" : 86.29083,
"_score" : 86.09051,
"_score" : 85.67467,
"_score" : 85.61694,
"_score" : 84.22537,
"_score" : 83.28606,
"_score" : 82.99002,
"_score" : 80.8033,
"max_score" : 40.151104,
"_score" : 40.151104,
"_score" : 38.99555,
"_score" : 38.77317,
"_score" : 38.37591,
"_score" : 36.38686,
"_score" : 36.263447,
"_score" : 36.263447,
"_score" : 36.249115,
"_score" : 36.196007,
"_score" : 36.13217,
"max_score" : 91.52402,
"_score" : 91.52402,
"_score" : 90.47272,
"_score" : 75.96907,
"_score" : 71.13267,
"max_score" : 140.12831,
"_score" : 140.12831,
"_score" : 100.34806,
"_score" : 77.94006,
"max_score" : 98.82846,
"_score" : 98.82846,
"_score" : 81.944954,
"_score" : 79.832344,
"_score" : 79.41739,
"_score" : 78.47116,
"_score" : 78.304855,
"_score" : 78.25241,
"_score" : 78.155525,
"_score" : 77.41552,
"_score" : 77.25188,'
# usage
if [ $# -ne 1 ]
then
echo "Usage: $0 project1.zip"
exit
fi
if [ `hostname` != "cs246" ]; then
echo "ERROR: You need to run this script within the class virtual machine"
exit
fi
ZIP_FILE=$1
# clean any existing files
rm -rf ${TMP_DIR}
mkdir ${TMP_DIR}
# unzip the submission zip file
if [ ! -f ${ZIP_FILE} ]; then
echo "ERROR: Cannot find $ZIP_FILE"
rm -rf ${TMP_DIR}
exit 1
fi
unzip -q -d ${TMP_DIR} ${ZIP_FILE}
if [ "$?" -ne "0" ]; then
echo "ERROR: Cannot unzip ${ZIP_FILE} to ${TMP_DIR}"
rm -rf ${TMP_DIR}
exit 1
fi
# change directory to the grading folder
cd ${TMP_DIR}
# check the existence of the required files
for FILE in ${REQUIRED_FILES}
do
if [ ! -f ${FILE} ]; then
echo "ERROR: Cannot find ${FILE} in the root folder of your zip file"
fi
done
mkdir data
echo "Getting files..."
curl -s "http://oak.cs.ucla.edu/classes/cs246/projects/project2/simplewiki-abstract.json" > data/simplewiki-abstract.json
curl -s "http://oak.cs.ucla.edu/classes/cs246/projects/project2/benchmark.txt" > data/benchmark.txt
curl -s "http://oak.cs.ucla.edu/classes/cs246/projects/project2/benchmark.sh" > benchmark.sh
echo "Deleting any old indexes..."
curl -s -XDELETE 'localhost:9200/*?pretty' &> /dev/null
echo
echo "Running gradle assemble..."
gradle assemble
if [ $? -eq 0 ]
then
echo "SUCCESS!!"
else
echo "Error: Gradle build FAILED."
rm -rf ${TMP_DIR}
exit 1
fi
echo "Installing plugin..."
PLUGINS=`/usr/share/elasticsearch/bin/elasticsearch-plugin list`
for PLUGIN in ${PLUGINS}
do
echo "password" | sudo -S /usr/share/elasticsearch/bin/elasticsearch-plugin remove ${PLUGIN}
done
chmod +x install-plugin.sh
echo "password" | sudo -S ./install-plugin.sh
echo "Waiting for elasticsearch to restart..."
for i in `seq 1 180`;
do
curl -s 'localhost:9200' &> /dev/null
if [ $? -eq 0 ]; then
break;
else
sleep 1;
fi
done
curl -s 'localhost:9200' &> /dev/null
if [ $? -ne 0 ]; then
echo "Error: Elasticsearech is not responding for 3 minutes."
rm -rf ${TMP_DIR}
exit 1
fi
echo "Running build.sh..."
chmod +x build.sh
./build.sh &> /dev/null
curl -s -XPOST 'localhost:9200/_flush?pretty' > /dev/null
chmod +x benchmark.sh
echo
echo "Testing task1a..."
./benchmark.sh task1a | grep Average | grep 0.2551 &> /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS!!"
else
echo "Error: Results from task1a are incorrect."
rm -rf ${TMP_DIR}
exit 1
fi
echo
echo "Testing task1b..."
./benchmark.sh task1b | grep Average | grep 0.2579 &> /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS!!"
else
echo "Error: Results from task1b are incorrect."
rm -rf ${TMP_DIR}
exit 1
fi
echo
echo "Testing task2..."
./benchmark.sh task2 | grep Average | grep 0.2569 &> /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS!!"
else
echo "Error: Task 2 parameters are not optimal."
rm -rf ${TMP_DIR}
exit 1
fi
chmod +x task3a.sh
echo
echo "Testing task3a..."
for query in "${queries[@]}"
do
./task3a.sh $query | grep score &>> task3a.txt
done
diff -w task3a.txt <(echo "$task3a") &> /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS!"
else
echo "ERROR: Query rankings from task3a incorrect."
rm -rf ${TMP_DIR}
exit 1
fi
echo
echo "Testing task3b..."
./benchmark.sh task3b | grep Average | grep 0.2227 &> /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS!!"
else
echo "Error: Results from task3b are incorrect."
rm -rf ${TMP_DIR}
exit 1
fi
# clean up
rm -rf ${TMP_DIR}
exit 0
| true |
7076ea0cc6e83180ccf409faf3f64ec4b5c89a64 | Shell | mpavlase/python-talk | /_packages/oneliner/demo.sh | UTF-8 | 413 | 3.109375 | 3 | [] | no_license | #!/bin/bash
# we all know 'nl'
nl example.py
# like 'nl'
python -m oneliner -ne '"%6d %s" % (NR, _)' example.py
# print filename before line
python -m oneliner -ne '"%s %6d %s" % (FN, NR, _)' example.py
# make output UPPERCASE
python -m oneliner -ne '"%6d %s" % (NR, _.upper())' example.py
# replace numbers to NUMBER
python -m oneliner -m re -ne "'%6d %s' % (NR, re.sub('[0-9]+', 'NUMBER', _))" example.py
| true |
5450309c97289c25308210967d671182e44314d6 | Shell | FauxFaux/debian-control | /v/vigor/vigor_0.016-27_amd64/postinst | UTF-8 | 447 | 3.09375 | 3 | [] | no_license | #! /bin/sh
set -e
remove_alternative () {
update-alternatives --remove $1 /usr/bin/vigor
}
if [ "$1" = configure ] && dpkg --compare-versions "$2" lt-nl 0.016-26; then
remove_alternative vi
remove_alternative view
remove_alternative editor
fi
# Automatically added by dh_installmenu/11.5.3
if [ "$1" = "configure" ] && [ -x "`which update-menus 2>/dev/null`" ]; then
update-menus
fi
# End automatically added section
exit 0
| true |
4e5736bb14ebf642c9b4433d8c40a6e827d2deac | Shell | bergwolf/kata-containers | /tools/packaging/static-build/firecracker/build-static-firecracker.sh | UTF-8 | 1,378 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o nounset
set -o pipefail
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${script_dir}/../../scripts/lib.sh"
config_dir="${script_dir}/../../scripts/"
firecracker_url="${firecracker_url:-}"
firecracker_dir="firecracker"
firecracker_version="${firecracker_version:-}"
arch=$(uname -m)
[ -n "$firecracker_url" ] ||firecracker_url=$(get_from_kata_deps "assets.hypervisor.firecracker.url")
[ -n "$firecracker_url" ] || die "failed to get firecracker url"
[ -n "$firecracker_version" ] || firecracker_version=$(get_from_kata_deps "assets.hypervisor.firecracker.version")
[ -n "$firecracker_version" ] || die "failed to get firecracker version"
firecracker_tarball_url="${firecracker_url}/releases/download"
file_name="firecracker-${firecracker_version}-${arch}.tgz"
download_url="${firecracker_tarball_url}/${firecracker_version}/${file_name}"
info "Download firecracker version: ${firecracker_version} from ${download_url}"
curl -o ${file_name} -L $download_url
sha256sum="${file_name}.sha256.txt"
sha256sum_url="${firecracker_tarball_url}/${firecracker_version}/${sha256sum}"
info "Download firecracker ${sha256sum} from ${sha256sum_url}"
curl -o ${sha256sum} -L $sha256sum_url
sha256sum -c ${sha256sum}
tar zxvf ${file_name}
| true |
466a40054620af5a077e39927790087591f95f67 | Shell | rharriszzz/madweave | /convert | UTF-8 | 572 | 2.890625 | 3 | [] | no_license | #! /bin/bash
dpi=600
# 75 150 300 600 900 1200 1500 1800 2100 2400
#comment out the next line for transparent background
extra="-bg 255.255.255.255"
widthInInches=10
heightInInches=8
control_c()
{
exit
}
trap control_c SIGINT
jar=/Users/rharris/Downloads/batik-1.7/batik-rasterizer.jar
widthInDots=$(($widthInInches * $dpi))
heightInDots=$(($heightInInches * $dpi))
mem=$((32 + ( $dpi * $dpi ) / 1000 ))
type_name=png
mime_type=image/png
d=.
java -Xmx${mem}M -jar "${jar}" -d "${d}" -m ${mime_type} -h ${heightInDots} -w ${widthInDots} -dpi ${dpi} ${extra} "$@"
| true |
2aa4c5027bbf9d549568ebd75a500f6c0571dac3 | Shell | ramrock93/karapace | /launcher.sh | UTF-8 | 884 | 2.828125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -o errexit # abort on nonzero exitstatus
set -o nounset # abort on unbound variable
set -o xtrace # echo commands after variable expansion
registry_host="${KAFKA_SCHEMA_REGISTRY%:*}" # Drop port part
registry_port="${KAFKA_SCHEMA_REGISTRY##*:}" # Drop host part
cat /app/karapace_config.json
jq --null-input \
--arg advertised_hostname "${NAIS_APP_NAME}.${NAIS_NAMESPACE}" \
--arg bootstrap_uri "${KAFKA_BROKERS}" \
--arg client_id "$(hostname)" \
--arg group_id "${NAIS_CLIENT_ID}" \
--arg registry_host "${registry_host}" \
--arg registry_port "${registry_port}" \
--arg ssl_cafile "${KAFKA_CA_PATH}" \
--arg ssl_certfile "${KAFKA_CERTIFICATE_PATH}" \
--arg ssl_keyfile "${KAFKA_PRIVATE_KEY_PATH}" \
--from-file "/app/karapace_config.json" \
> "/app/config.json"
cat /app/config.json
exec /app/.local/bin/karapace /app/config.json
| true |
d23468f0c10ba8ebe7675dec62ec11410ef1b195 | Shell | kohkimakimoto/omnibus-supervisor | /package-scripts/supervisor/postinst | UTF-8 | 1,388 | 3.640625 | 4 | [] | no_license | #!/bin/sh
#
# Perform necessary supervisor setup steps
# after package is installed.
#
PROGNAME=`basename $0`
error_exit()
{
echo "${PROGNAME}: ${1:-"Unknown Error"}" 1>&2
exit 1
}
ln -fs /opt/supervisor/bin/supervisorctl /usr/bin/supervisorctl
ln -fs /opt/supervisor/bin/supervisord /usr/bin/supervisord
if [ -d "/etc/logrotate.d/" ]; then
cp -f /opt/supervisor/etc/logrotate.d/supervisor /etc/logrotate.d/supervisor
fi
cp -f /opt/supervisor/etc/supervisord.conf /etc/supervisord.conf
if [ ! -e "/etc/supervisord.d" ]; then
mkdir -p /etc/supervisord.d
fi
chown -R root:root /etc/supervisord.d
if [ ! -e "/var/log/supervisor" ]; then
mkdir -p /var/log/supervisor
fi
chown -R root:root /var/log/supervisor
if [ ! -e "/var/run/supervisor" ]; then
mkdir -p /var/run/supervisor
fi
chown -R root:root /var/run/supervisor
if [ -f "/bin/systemctl" ]; then
# systemd (el7)
mkdir -p /usr/lib/systemd/system
cp -f /opt/supervisor//usr/lib/systemd/system/supervisord.service /usr/lib/systemd/system/supervisord.service
/bin/systemctl daemon-reload
sleep 2
/bin/systemctl enable supervisord.service
elif [ -f "/etc/redhat-release" -o -f "/etc/fedora-release" -o -f "/etc/system-release" ]; then
# init (el5, el6)
cp -f /opt/supervisor/etc/init.d/supervisord /etc/init.d/supervisord
/sbin/chkconfig --add supervisord
/sbin/chkconfig supervisord on
fi
exit 0
| true |
d9cccea319e3f79f8f2abcb3f73c50ed7c51b25d | Shell | marcoguido/pocker | /docker/workspace/conf/.bashrc | UTF-8 | 1,736 | 3.875 | 4 | [
"MIT"
] | permissive | export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
nvm alias default $NVM_DEFAULT_VERSION;
find-up () {
path=$(pwd)
while [[ "$path" != "" && ! -e "$path/$1" ]]; do
path=${path%/*}
done
echo "$path"
}
cdnvm(){
cd "$@";
nvm_path=$(find-up .nvmrc | tr -d '[:space:]')
# If there are no .nvmrc file, use the default nvm version
if [[ ! $nvm_path = *[^[:space:]]* ]]; then
declare default_version;
default_version=$(nvm version default);
# If there is no default version, set it to `node`
# This will use the latest version on your machine
if [[ $default_version == "N/A" ]]; then
nvm alias default node;
default_version=$(nvm version default);
fi
# If the current version is not the default version, set it to use the default version
if [[ $(nvm current) != "$default_version" ]]; then
nvm use default;
fi
elif [[ -s $nvm_path/.nvmrc && -r $nvm_path/.nvmrc ]]; then
declare nvm_version
nvm_version=$(<"$nvm_path"/.nvmrc)
# Add the `v` suffix if it does not exists in the .nvmrc file
if [[ $nvm_version != v* ]]; then
nvm_version="v""$nvm_version"
fi
# If it is not already installed, install it
if [[ $(nvm ls "$nvm_version" | tr -d '[:space:]') == "N/A" ]]; then
nvm install "$nvm_version";
fi
if [[ $(nvm current) != "$nvm_version" ]]; then
nvm use "$nvm_version";
fi
else
echo ".nvmrc found, installing or using this version.";
nvm install;
fi
}
# Shell aliases
alias cd='cdnvm'
alias pa='php artisan'
| true |
2ea85f0a5ad40d30fdd916864c85dda5dd86ef8e | Shell | primeXXIX/unix-and-bash | /bash-scripting/variables-tutorial/act-1.2.sh | UTF-8 | 270 | 2.796875 | 3 | [
"MIT"
] | permissive | length=$(($1-1))
echo $length
if [[ $length ]]; then
result=$(head -n $RANDOM /usr/share/dict/words | grep -o -w -E "\b\w[A-Za-z0-9]{${length}}\b" | tail -1)
else
result=$(head -n $RANDOM /usr/share/dict/words | tail -1)
fi
echo $result
export result
# ./output.sh
| true |
6301256f14d183823c4df093ffc9ed8d120e5a6b | Shell | int10/CodeLab | /Qt/ext4fstools/ext4_utils/mkuserimg.sh | UTF-8 | 1,487 | 3.890625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# To call this script, make sure make_ext4fs is somewhere in PATH
function usage() {
cat<<EOT
Usage:
mkuserimg.sh [-s] SRC_DIR OUTPUT_FILE EXT_VARIANT MOUNT_POINT SIZE
[-T TIMESTAMP] [-C FS_CONFIG] [-B BLOCK_LIST_FILE] [FILE_CONTEXTS]
EOT
}
ENABLE_SPARSE_IMAGE=
if [ "$1" = "-s" ]; then
ENABLE_SPARSE_IMAGE="-s"
shift
fi
if [ $# -lt 5 ]; then
usage
exit 1
fi
SRC_DIR=$1
if [ ! -d $SRC_DIR ]; then
echo "Can not find directory $SRC_DIR!"
exit 2
fi
OUTPUT_FILE=$2
EXT_VARIANT=$3
MOUNT_POINT=$4
SIZE=$5
shift; shift; shift; shift; shift
TIMESTAMP=-1
if [[ "$1" == "-T" ]]; then
TIMESTAMP=$2
shift; shift
fi
FS_CONFIG=
if [[ "$1" == "-C" ]]; then
FS_CONFIG=$2
shift; shift
fi
BLOCK_LIST=
if [[ "$1" == "-B" ]]; then
BLOCK_LIST=$2
shift; shift
fi
FC=$1
case $EXT_VARIANT in
ext4) ;;
*) echo "Only ext4 is supported!"; exit 3 ;;
esac
if [ -z $MOUNT_POINT ]; then
echo "Mount point is required"
exit 2
fi
if [ -z $SIZE ]; then
echo "Need size of filesystem"
exit 2
fi
OPT=""
if [ -n "$FC" ]; then
OPT="$OPT -S $FC"
fi
if [ -n "$FS_CONFIG" ]; then
OPT="$OPT -C $FS_CONFIG"
fi
if [ -n "$BLOCK_LIST" ]; then
OPT="$OPT -B $BLOCK_LIST"
fi
MAKE_EXT4FS_CMD="make_ext4fs $ENABLE_SPARSE_IMAGE -T $TIMESTAMP $OPT -l $SIZE -a $MOUNT_POINT $OUTPUT_FILE $SRC_DIR"
echo $MAKE_EXT4FS_CMD
$MAKE_EXT4FS_CMD
if [ $? -ne 0 ]; then
exit 4
fi
| true |
a096efc1df6636f202dc8ca49ffa6481bdcddea7 | Shell | Microbrewit/Microbrewit.Api | /src/Microbrewit.Api/docker/entrypoint.sh | UTF-8 | 584 | 2.75 | 3 | [] | no_license | #!/bin/bash
set -e
sed -i \
-e 's,${POSTGRES_DB},'"${POSTGRES_DB}"',g' \
-e 's,${POSTGRES_USER},'"${POSTGRES_USER}"',g' \
-e 's,${POSTGRES_PASSWORD},'"${POSTGRES_PASSWORD}"',g' \
-e 's,${ELASTICSEARCH_INDEX},'"${ELASTICSEARCH_INDEX}"',g' \
-e 's,${MICROBREWIT_APIURL},'"${MICROBREWIT_APIURL}"',g' \
./docker/appsettings.json
# Remove default config and replace with environment variable based config.
rm ./appsettings.json
mv ./docker/appsettings.json ./appsettings.json
echo "START ALL THE THINGS!"
# Exec docker run invokers original command
dotnet run
| true |
88f8b1da20a8c7a18db820d03f454ab91d401e3b | Shell | piogrzej/scripts | /cpu_freq.sh | UTF-8 | 221 | 3.453125 | 3 | [] | no_license | #!/bin/bash
cpuFreq=$(lscpu | grep "CPU MHz" | awk '{print $3}' | cut -f1 -d".")
if [ $cpuFreq -ge 1000 ]
then
cpu=$(echo $cpuFreq | cut -c1).$(echo $cpuFreq | cut -c2)GHz
else
cpu=${cpuFreq}MHz
fi
printf "%s" $cpu
| true |
796f850297cc640be6c510028d7f922715a22700 | Shell | scalacommunitybuild/playframework | /scripts/scriptLib | UTF-8 | 1,476 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright (C) Lightbend Inc. <https://www.lightbend.com>
# Lib for CI scripts
set -e
set -o pipefail
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
BASEDIR=$DIR/..
export DOCUMENTATION=$BASEDIR/documentation
printMessage() {
echo "[info]"
echo "[info] ---- $1"
echo "[info]"
}
runSbt() {
sbt -jvm-opts "$BASEDIR/.travis-jvmopts" 'set concurrentRestrictions in Global += Tags.limitAll(1)' "$@" | grep --line-buffered -v 'Resolving \|Generating '
}
# Runs code formating validation in the current directory
scalafmtValidation() {
printMessage "VALIDATE SCALA CODE FORMATTING"
runSbt +scalafmtCheckAll scalafmtSbtCheck || (
echo "[error] ERROR: Scalafmt test failed for $1 source."
echo "[error] To fix, format your sources using 'sbt scalafmtAll scalafmtSbt' before submitting a pull request."
false
)
}
# Runs code formating validation in the current directory
javafmtValidation() {
printMessage "VALIDATE JAVA CODE FORMATTING"
setJavafmtIntegrationTests "$1"
runSbt javafmt test:javafmt $JAVAFMT_INTEGRATION_TESTS
git diff --exit-code || (
echo "[error] ERROR: javafmt check failed for $1 source, see differences above."
echo "[error] To fix, format your sources using 'sbt javafmt test:javafmt' before submitting a pull request."
false
)
}
setJavafmtIntegrationTests() {
JAVAFMT_INTEGRATION_TESTS=""
if [ "$1" == "framework" ]; then
JAVAFMT_INTEGRATION_TESTS="it:javafmt"
fi
}
| true |
3476b6200996cded80832afd194bb61716a3ab00 | Shell | pazeshun/dotfiles-1 | /install_scripts/install_vim.trusty.manual.sh | UTF-8 | 1,352 | 2.828125 | 3 | [] | no_license | #!/bin/bash
if [ "$(uname)" != "Linux" -o "$(lsb_release -sr)" != "14.04" ]; then
exit 0
fi
set -x
sudo apt-get remove --purge -qq -y vim vim-runtime vim-gnome vim-tiny vim-common vim-gui-common
sudo apt-get build-dep -qq -y vim-gnome
sudo apt-get install aptitude
sudo aptitude install -qq -y liblua5.1-dev luajit libluajit-5.1 python-dev ruby-dev libperl-dev mercurial libncurses5-dev libgnome2-dev libgnomeui-dev libgtk2.0-dev libatk1.0-dev libbonoboui2-dev libcairo2-dev libx11-dev libxpm-dev libxt-dev
sudo rm -rf /usr/local/share/vim
sudo rm /usr/bin/vim
sudo mkdir /usr/include/lua5.1/include
sudo mv /usr/include/lua5.1/*.h /usr/include/lua5.1/include/
sudo ln -s /usr/bin/luajit-2.0.0-beta9 /usr/bin/luajit
TMPDIR=$(mktemp -d)
cd $TMPDIR
git clone https://github.com/vim/vim.git -b v8.0.0075
cd vim/src
make distclean
./configure --with-features=huge \
--enable-multibyte \
--without-x \
--enable-rubyinterp \
--enable-largefile \
--disable-netbeans \
--enable-pythoninterp \
#--with-python-config-dir=/usr/lib/python2.7/config \
--enable-perlinterp \
--enable-luainterp \
--with-luajit \
--enable-gui=auto \
--enable-fail-if-missing \
--with-lua-prefix=/usr/include/lua5.1 \
--enable-cscope
make -j
sudo make install
cd
rm -rf $TMPDIR
sudo apt-get install -qq -y vim-gtk
| true |
96defd815cdc8df1881c5c9ea2d80e09ba109ed7 | Shell | boubech/postgresql-backup-azure-blob | /backup.sh | UTF-8 | 840 | 3.578125 | 4 | [] | no_license | #!/bin/bash
echo "Job started: $(date)"
DATE=$(date $FILENAME_DATE_FORMAT_SUFFIX)
FILE="$FILENAME_PREFIX-$DATE.sql"
echo "Start postgresql dump in file $FILE.."
export PGPASSWORD=${PG_PASSWORD}
export PGPASSFILE=${PG_PASSFILE}
echo "PG_HOST=$PG_HOST"
echo "PG_USER=$PG_USER"
echo "PG_PORT=$PG_PORT"
echo "PG_DB=$PG_DB"
echo "PG_EXTRA_OPTS=$PG_EXTRA_OPTS"
pg_dump -h "$PG_HOST" -p "$PG_PORT" -U "$PG_USER" -f "$FILE" -d "$PG_DB" $PG_EXTRA_OPTS || exit 1
unset PGPASSWORD
echo "Make a tar file.."
tar -czvf $FILE.tar.gz $FILE || exit 1
echo "Push tar file in Azure Blob Storage.."
azcopy copy "$FILE.tar.gz" "${AZ_BLOB_SAS_URL}" || exit 1
echo "Job finished: $(date)"
azcopy list ${AZ_BLOB_SAS_URL} > existing_file || exit 1
[ "${MAX_BACKUP_RETENTION_IN_SECONDS}" == "" ] && echo "No purge configured" && exit 0
./purge.sh
exit $? | true |
3ab98bd1c169a59f73b59c37bd45e4e18ad74596 | Shell | timkphd/examples | /tims_tools/piubuntu/mpiinstalls | UTF-8 | 1,929 | 3.09375 | 3 | [
"Unlicense"
] | permissive | #!/usr/bin/bash
sudo mkdir /nopt
sudo chmod 777 /nopt
cd ~/piubuntu
##### install openmpi
wget https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-4.1.0.tar.gz
tar -xzf openmpi-4.1.0.tar.gz
cd openmpi-4.1.0
./configure --prefix=/nopt/mpi/openmpi/4.1.0 --enable-orterun-prefix-by-default
make -j 4
make install
## openmpi module
mkdir -p /nopt/mods/mpi/openmpi
cat << END > /nopt/mods/mpi/openmpi/4.1.0.lua
help([[
"OpenMPI 4.1.0"
"
]])
whatis("Name: OpenMPI")
whatis("Version: 4.1.0")
local base = "/nopt/mpi/openmpi/4.1.0"
setenv("OPENMPI_ROOT_DIR", base)
prepend_path("PATH", pathJoin(base, "bin"))
prepend_path("MANPATH", pathJoin(base, "share/man"))
prepend_path("LD_LIBRARY_PATH", pathJoin(base, "lib"))
prepend_path("LIBRARY_PATH", pathJoin(base, "lib"))
prepend_path("CPATH", pathJoin(base, "include"))
prepend_path("PKG_CONFIG_PATH", pathJoin(base, "lib/pkgconfig"))
prepend_path("CMAKE_PREFIX_PATH", base)
END
##### install mpich
cd ~/piubuntu
wget http://www.mpich.org/static/downloads/3.4.1/mpich-3.4.1.tar.gz
tar -xzf mpich-3.4.1.tar.gz
cd mpich-3.4.1
if gcc -v 2>&1 | tail -1 | grep 10 ; then
export FFLAGS="-w -fallow-argument-mismatch -O2"
echo gcc is 10.x
else
echo gcc not 10.x
fi
./configure --prefix=/nopt/mpi/mpich/3.4.1 --with-device=ch3
make -j 4
make install
## mpich module
mkdir -p /nopt/mods/mpi/mpich
cat << END > /nopt/mods/mpi/mpich/3.4.1.lua
help([[
"mpich 3.4.1"
"
]])
whatis("Name: MPICH")
whatis("Version: 3.4.1")
local base = "/nopt/mpi/mpich/3.4.1"
setenv("MPICH_ROOT_DIR", base)
prepend_path("PATH", pathJoin(base, "bin"))
prepend_path("MANPATH", pathJoin(base, "share/man"))
prepend_path("LD_LIBRARY_PATH", pathJoin(base, "lib"))
prepend_path("LIBRARY_PATH", pathJoin(base, "lib"))
prepend_path("CPATH", pathJoin(base, "include"))
prepend_path("PKG_CONFIG_PATH", pathJoin(base, "lib/pkgconfig"))
prepend_path("CMAKE_PREFIX_PATH", base)
END
| true |
91625cb1273ae05932bfc6f66147405071b9670d | Shell | dylanwu/snippts | /shell/ggd/ggd | UTF-8 | 898 | 3.90625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ $# -lt 1 ]; then
echo "usage: ggd ~/.gg.conf"
exit 0;
fi
i=1
conf_file=$1
touch ${conf_file}
here=`pwd`
valid_here=${here//\//\\\/}
already_exist=`grep "^${here}$" ${conf_file}`
declare -a godirs
while true; do
[[ "$2"x = "a"x ]] && [[ x"${already_exist}" = x"" ]] && echo ${here} >> ${conf_file}
[[ "$2"x = "a"x ]] && break
[[ "$2"x = "d"x ]] && [[ x"${already_exist}" != x"" ]] && sed -i "/^${valid_here}$/d" ${conf_file}
[[ "$2"x = "d"x ]] && break
while read line; do
if [ "$line"x = ""x ]; then
continue
fi
[[ -d $line ]] || continue
echo -e "\033[`expr 35 + $i % 3`m" $i: $line
godirs[$i]=$line
let i=$i+1
done < ${conf_file};
echo -e "\033[0m"
echo -n 'where to go? '
read -n1 -t30 n
echo
[[ x"${godirs[${n}]}" != x"" ]] && cd ${godirs[${n}]}
break
done
| true |
379cf76952e06775eb2a917aef8ac53ec8c784de | Shell | anianruoss/StatPhys | /chapter5/ex5/calculate.sh | UTF-8 | 402 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# prerequisites:
# - current directory contains executables
# * mdatom
DIR=calculations_dir
rm -rf $DIR
mkdir $DIR
cd $DIR
for TT in 0.003 0.01 0.03 0.1 0.3 1.0
do
# Create parameter file
PARAM_FILE=$TT.inp
sed "s/_CouplingTime_/$TT/" < ../params.template > $PARAM_FILE
# Perform calculations
OUTPUT_FILE=${TT}.out
./../mdatom $PARAM_FILE ../coord.inp > $OUTPUT_FILE
done
cd ..
| true |
2cf4449e5fd9631cb95a6adfbeba78abdf2e879b | Shell | sunilthitme/BasicShellScripting | /Assignment/Day 5 Assignment/selection practice problems with if and else/leapyear.sh | UTF-8 | 213 | 3.328125 | 3 | [] | no_license | #/bin/bash
echo "Enter the year : "
read y
a=$(( $y % 4 ))
b=$(( $y % 100 ))
c=$(( $y % 400 ))
if [ $a -eq 0 ] && [ $b -ne 0 ] || [ $c -eq 0 ]
then
echo "$y is a leap year"
else
echo "$y is a not leap year"
fi
| true |
4068e426f0645f34f4152645b82f47cece98c4c4 | Shell | lundlab/RiboMeth-seq-public | /pipeline.sh | UTF-8 | 5,252 | 3.171875 | 3 | [] | no_license | # set the variables ############################################################
# set the variables ############################################################
source /opt/anaconda3/bin/activate # optional
conda activate ribomethseq # optional
cd /home/disat/data/PROJECT-rRNA-RMS
cwd=$(pwd)
# set the experiment name and barcodes
expname="pipe_test"
barcode1="ACAATG"
barcode2="CAAGAG"
barcode3="GGACTT"
#fastqfile="R_2019_06_27_11_44_21_user_proton1-241-Disa_run_4_Auto_user_proton1-241-Disa_run_4_338.fastq"
fastqfile="RMS.test.fastq"
# set the ref genome sequence and raw data files
#dataDir="/home/disat/data/raw_Data/RMS_fastq"
dataDir="/home/disat/data/PROJECT-rRNA-RMS/raw_data"
genomeDir="/home/disat/data/ref_genomes/"
refgenome="hsa_sno+sn+rRNA" # put the basename here i.e leave out ".fa"
path_to_cutadapt2_6="/home/disat/.local/bin" # I need this since I had to install cutadapt 2.6 in my local bing
#### these are more or less standard for each project tree
workDir="${cwd}/analysis"
scriptDir="${cwd}/scripts"
cd ${workDir}
mkdir ${expname} # make the expname dir
mkdir ${expname}/results
resultDir="${workDir}/${expname}/results"
##################################################################### end variables
cp ${scriptDir}/pipeline.sh ${expname}/pipeline.${expname}.sh # copy the pipeline with all the correct variables
##### START ANALYSIS
# sort and separate reads based on the barcode
date +"%d %b-%y %T ...... sortNtrim "
mkdir ${expname}/trimmed
for var in ${!barcode@}; do
python ${scriptDir}/sortNtrim.py ${dataDir}/${fastqfile} ${!var} > ${expname}/trimmed/${!var}.data.BC.fq
done
# remove adapters
date +"%d %b-%y %T ...... cutadapt "
for n in $(ls ${expname}/trimmed/*BC.fq); do
/home/disat/.local/bin/cutadapt -a ATCACCGAC -m 15 -j 18 --discard-untrimmed ${n} > ${n}.CA_disc.fq 2>> ${expname}/trimmed/log.cutadapt.${expname}.txt
done
# bowtie2 mapping
date +"%d %b-%y %T ...... bowtie2 "
mkdir ${expname}/bamfiles # make this dir to put sam to bam files later for saving
for n in $(ls ${expname}/trimmed/*CA_disc.fq); do
mfile=${n##*/}
cd ${genomeDir}/${refgenome}/
bowtie2 -k 10 --threads 18 -x index/${refgenome} -U ${workDir}/${n} -S ${workDir}/${expname}/${mfile}.sam 2>> ${workDir}/${expname}/bamfiles/log.bowtie2.${expname}.txt
done
cd ${workDir}
## PYTHON SCRIPTS multimap + readcount + seqnumbering + SAMTOOLS mileup + pilecount + map%
date +"%d %b-%y %T ...... calculating RMS scores and SNP analysis"
for n in $(ls ${expname}/*.sam); do
cp ${genomeDir}/${refgenome}/${refgenome}.fa.fai seqs_to_perform_RMS.txt
#remove multimappers (Ulf's script)
python ${scriptDir}/remove_multiple_map_hits.py ${n} > ${n}.best_map.sam
# count read ends
python ${scriptDir}/readcount_5and3.py ${n}.best_map.sam seqs_to_perform_RMS.txt > ${n}.best_map.sam.count
#count the RMS scores
python ${scriptDir}/ms_scores.py ${n}.best_map.sam.count > ${n}.best_map.sam.count.scores
# seq numbering
python ${scriptDir}/seq_numbering.py ${genomeDir}/${refgenome}/${refgenome}.fa ${n}.best_map.sam.count.scores > ${n}.best_map.sam.count.scores.seq
# sam to sorted bam files
samtools view -u -bS ${n}.best_map.sam | samtools sort -o ${n}.best_map.sam.sort.bam
# mileup for SNP analysis
samtools mpileup -d 10000000 -f ${genomeDir}/${refgenome}/${refgenome}.fa ${n}.best_map.sam.sort.bam > ${n}.best_map.sam.sort.bam.sort.pileup
# pilecpunt for SNP analysis
python ${scriptDir}/pilecount_v5.py ${n}.best_map.sam.sort.bam.sort.pileup seqs_to_perform_RMS.txt > ${n}.best_map.sam.sort.bam.sort.pileup.snp
# get mapping percentages
python ${scriptDir}/map_percent.py ${n} >> ${expname}/bamfiles/map_percent.${expname}.txt
# sam to sorted bam files
samtools view -u -bS ${n} | samtools sort -o ${n%.sam}.sorted.bam
done
# Combining replicates
date +"%d %b-%y %T ...... running comb_RMS.py"
replicates=$(echo $(ls ${expname}/*.seq))
python ${scriptDir}/comb_RMS.py ${replicates} > ${resultDir}/${expname}_RMS_comb.txt
date +"%d %b-%y %T ...... running comb_SNP.py"
replicates=$(echo $(ls ${expname}/*.snp))
python ${scriptDir}/comb_SNP.py ${replicates} > ${resultDir}/${expname}_SNP_comb.txt
date +"%d %b-%y %T ...... running transcript_count.py"
replicates=$(echo $(ls ${expname}/*.best_map.sam))
python ${scriptDir}/transcript_count.py ${replicates} ${genomeDir}/${refgenome}/${refgenome}.fa > ${resultDir}/${expname}_expressed.txt
# making excel files in results Dir
cd ${resultDir}
date +"%d %b-%y %T ...... converting to xslx"
python ${scriptDir}/txt2xlsx_RMS.py ${expname}_RMS_comb.txt
python ${scriptDir}/txt2xlsx_SNP.py ${expname}_SNP_comb.txt
python ${scriptDir}/txt2xlsx_expressed.py ${expname}_expressed.txt
date +"%d %b-%y %T ...... Cleaning up"
cd ${workDir}/${expname}
mkdir countfiles
rm trimmed/*.BC.fq
rm *.best_map.sam.sort.bam
rm *.seq
rm *.scores
rm *.pileup
rm *.snp
rm *.sam
mv *.bam bamfiles/
mv *.count countfiles/
mv ../seqs_to_perform_RMS.txt seqs_to_perform_RMS.txt
date +"%d %b-%y %T ...... end script"
### can do this if possible
date +"%d %b-%y %T ...... Running R script"
# run QC script in R: arguments are: "expname" "path_to_RMS.anno" "path_to_output"
nice Rscript ${scriptDir}/Rscript_QC_for_RMS_pipeline.R ${expname} ${scriptDir}/helpfiles ${resultDir}
conda deactivate
conda deactivate
| true |
c1b7bab825f040b89b1e06d3948b659921fcc039 | Shell | jizongFox/DGA1033 | /parameterSearch/viewer_script_generator.sh | UTF-8 | 573 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env bash
input=$1
echo $input
inputfolder=$2
echo $inputfolder
showalllamda=$3
echo $showalllamda
inputzip=$input'.zip'
echo 'inputzip: '$inputzip
#
if [ ! -d "parameterSearch/$input" ]; then
# Control will enter here if $DIRECTORY exists.
echo ">>> unzip from $inputzip to $input "
unzip -x -q parameterSearch/$inputzip -d parameterSearch/
fi
RES=$(python parameterSearch/viewer_wraper.py --csv_path=parameterSearch/$input/prostate/$inputfolder/prostate.csv \
--img_source=admm_research/dataset/PROSTATE/train/Img \
$showalllamda)
echo $RES
eval $RES | true |
cbe2b92d2c3e58a56ce6663eb40be3fefb17e221 | Shell | nik-kor/blog | /bin/render.sh | UTF-8 | 410 | 3.71875 | 4 | [] | no_license | #!/usr/bin/env bash
set -Eeuo pipefail
list=""
for i in $(ls tech | sort -r); do
date=${i:0:10}
title=$(head -1 "tech/$i" | sed 's/# //g')
# title=$(echo "$i" | sed "s/$date-//" | sed "s/.md//")
list+="- [$date $title](./tech/$i)\n";
done
echo -e "\n[//]: # (RENDERED BASED ON README-template.md)" > README.md
sed "s:%tech-posts-list%:$list:g" README-template.md >> README.md
cat README.md
| true |
009021d6e1a63bae2995b6b4375367be960a7592 | Shell | DigGe/tools | /memsum | UTF-8 | 261 | 3.078125 | 3 | [] | no_license | #!/bin/bash
mem=`cat /proc/meminfo | grep MemTotal | awk '{print $2}'`
a=`cat /proc/meminfo | grep MemTotal | awk '{print $3}'`
e=0
for num in `sudo ps --no-headers xao %mem`
do
e=`calc $e+$num`
done
echo Total Usage: `calc 2 $e*$mem/100` / $mem kB \($e%\)
| true |
f17fd9e4dbef2375336080908c9d1a43f41c9081 | Shell | ekimekim/scripts | /spawn | UTF-8 | 184 | 2.890625 | 3 | [] | no_license | #!/bin/bash
if [ "$#" -eq 0 ]; then
echo "$0 COMMAND {ARGS} - Execute command completely divorced from the current session" >&2
exit 1
fi
setsid "$@" <>/dev/null >&0 2>&0 &
disown
| true |
6d08430f9547bd854d527c756787eccb2c74f2b7 | Shell | nathanchere/dot | /desktop/Monster/status.sh | UTF-8 | 1,168 | 2.90625 | 3 | [] | no_license | #too lazy to put interpreter
mus() {
music="$(mpc current -f "%artist% - [%title%|%file%]")"
if [ -z "$music" ]; then music="stopped" mstat=""
else
mstat="$(mpc | sed -rn '2s/\[([[:alpha:]]+)].*/\1/p')"
[ "$mstat" == "paused" ] && mstat="" || mstat=""
fi
echo "%{B#FF272727}%{U#FF364069}%{+u} $mstat %{-u}%{B} $music "
}
vol() {
if [ "$(amixer get Master | sed -nr '$ s:.*\[(.+)]$:\1:p')" == "off" ]
then vol="[m]" vstat=""
else
vol="$(amixer get PCM | sed -nr '$ s:.*\[(.+%)].*:\1:p')"
if [ "${vol%\%}" -le 10 ]; then vstat=""
elif [ "${vol%\%}" -le 20 ]; then vstat=""; else vstat=""; fi
fi
echo $vstat $vol
}
bat() {
bat="$(acpi | awk '{print $4}' | sed 's/,//g' )"
if [ "$(acpi | awk '{print $3}')" == "Discharging," ]
then bstat=""
else
bstat=""
fi
echo "%{B#FF272727} $bstat %{F#FF364069}◀%{F}%{B#FF364069} pow: $bat %{B}"
}
while :; do
date="$(date +"%a, %b %d %R")" dstat=""
echo "%{r} $(mus) $(bat)%{B#FF272727} $date %{B}"
sleep 1
done | bar -p -d -g 1066x17+300+0 -f '-*-lemon-*-*-*-*-10-*-*-*-*-*-*-*,-*-stlarch-*-*-*-*-10-*-*-*-*-*-*-*' -u 3 -B '#FF161616' -F "FF9A9A9A"
| true |
cc3b6d15d4cf2f24751a3a5f74c3cf3ec6d65631 | Shell | ayyi/libwaveform | /shaders/make_shaders | UTF-8 | 734 | 3.421875 | 3 | [] | no_license | #!/bin/bash
shaders=(peak peak_nonscaling horizontal vertical hires hires_ng ruler ass lines cursor);
out=shaders.c
if [[ -s $out ]]; then
#file already exists, check age
need_regen=false
for s in "${shaders[@]}"; do
if [[ $s.frag -nt $out ]]; then
need_regen=true
fi
if [[ $s.vert -nt $out ]]; then
need_regen=true
fi
done
if [[ $need_regen == false ]]; then exit; fi;
fi;
echo "regenerating shaders.c ..."
echo "" > $out
function a {
echo '' >> $out
echo 'AGlShaderText '$1'_text = {" \' >> $out
cpp -P $1.vert | sed -e "s/$/ \\\/" >> $out
echo '",' >> $out
echo '" \' >> $out
cpp -P $1.frag | sed -e "s/$/ \\\/" >> $out
echo '"' >> $out
echo '};' >> $out
}
for s in "${shaders[@]}"; do
a $s
done
| true |
bab1d4e515937535a4523f0be3e3a41412a51eb4 | Shell | dianqiji/contrail | /tools/patroni/build_patroni.sh | UTF-8 | 878 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
patroni_version="1.5.1"
[[ "$(docker images -q patroni)" == "" ]] || { echo "Patroni image already exists. Skipping building docker image." ; exit 0; }
tmpdir=$(mktemp -d -t patroni-repository-XXXXXX) || { echo "Failed to create temporary directory" ; exit 1; }
echo "Downloading patroni"
(cd $tmpdir && curl -LO "https://github.com/zalando/patroni/archive/v$patroni_version.zip" --connect-timeout 60) || { echo "Failed to download patroni repository" ; exit 1; }
echo "Upacking repository archive"
unzip -q "$tmpdir/v$patroni_version.zip" -d $tmpdir && rm "$tmpdir/v$patroni_version.zip" || { echo "Failed to exctract repository archive" ; exit 1; }
echo "Archive unpacked"
docker build -t patroni "$tmpdir/patroni-$patroni_version" || { echo "Failed to build docker image" ; exit 1; }
rm -rf $tmpdir || { echo "Failed to remove temporary directory" ; exit 1; }
| true |
49493b356b4896b0af2e83fd9c9bd995c178dc71 | Shell | okertanov/pinguin | /src/examples/gpio.sh | UTF-8 | 892 | 3.390625 | 3 | [] | no_license | #!/bin/sh
# see http://elinux.org/RPi_Low-level_peripherals
# GPIO numbers should be from this list
# 0, 1, 4, 7, 8, 9, 10, 11, 14, 15, 17, 18, 21, 22, 23, 24, 25
# Note that the GPIO numbers that you program here refer to the pins
# of the BCM2835 and *not* the numbers on the pin header.
# So, if you want to activate GPIO7 on the header you should be
# using GPIO4 in this script. Likewise if you want to activate GPIO0
# on the header you should be using GPIO17 here.
# Set up GPIO 4 and set to output
echo "4" > /sys/class/gpio/export
echo "out" > /sys/class/gpio/gpio4/direction
# Set up GPIO 7 and set to input
echo "7" > /sys/class/gpio/export
echo "in" > /sys/class/gpio/gpio7/direction
# Write output
echo "1" > /sys/class/gpio/gpio4/value
# Read from input
cat /sys/class/gpio/gpio7/value
# Clean up
echo "4" > /sys/class/gpio/unexport
echo "7" > /sys/class/gpio/unexport
| true |
1a44602a42e1460581889afba4369e3ccac944c9 | Shell | laristra/portage | /app/portageapp/test/portageapp_rgmd_jali_tjunction_2d_swept.sh | UTF-8 | 761 | 2.765625 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | #!/bin/bash
: <<'END'
This file is part of the Ristra portage project.
Please see the license file at the root of this repository, or at:
https://github.com/laristra/portage/blob/master/LICENSE
END
# Exit on error
set -e
# Echo each command
set -x
DATA_DIR=.
mpirun -np 1 ${TESTAPPDIR}/portageapp_rgmd_jali \
--problem=tjunction \
--dim=2 \
--nsourcecells=10 \
--ntargetcells=10 \
--material_fields="1,x,y" \
--intersect=n \
--perturb_source=pseudorandom \
--source_convex_cells=n \
--output_meshes=y \
--remap_order=2 \
--field_filename="tjunction_2d_swept"
# Compare the values for the field
$CMPAPPDIR/apptest_cmp GOLD_portageapp_rgmd_jali_tjunction_2d_swept.gmv tjunction_2d_swept0_iteration_0.gmv 1e-9
| true |
5bb096dab5deba59823085ea5ba0f203a33386e1 | Shell | ECS-GDP2-1516/ml-data | /set-2015-11-18/apply-mlp.sh | UTF-8 | 583 | 2.5625 | 3 | [] | no_license | #!/bin/bash
linefile="$1"
ls reduced | sort -V | sed 's/hz//' |
while read freq
do
for n in {0..10}
do
for seed in {0..0}
do
java -cp ~/university/weka-3-6-13/weka.jar weka.classifiers.functions.MultilayerPerceptron -L 0.3 -M 0.2 -N 50 -V 0 -S "$seed" -E 20 -H "$n" -B -C -I -t "reduced/${freq}hz/all.arff" -m /tmp/3-class.cost | grep 'Average Cost' | tail -n1 | awk '{print $3}'
done |
paste -sd+ - | bc | awk '{print '"$freq"' " " '"$n"' " " $1/1}'
done
echo ''
done
| true |
d7c065e5219bb4f779f78fe7798fc010ddc6fd5d | Shell | TimFenwick15/Pi0-Media-Controller | /setup.sh | UTF-8 | 571 | 2.765625 | 3 | [] | no_license | #!/bin/bash
### NOT YET TESTED - lines only performed manually ###
# From the repo directory, call:
# $ sudo ./setup.sh
echo "dtoverlay=dwc2" | tee -a /boot/config.txt
echo "dwc2" | tee -a /etc/modules
echo "libcomposite" | tee -a /etc/modules
# Add thses lines to /etc/rc.local above the exit 0 (uncomment the line first)
# /usr/bin/isticktoit_usb # libcomposite configuration
# /usr/bin/media
cp bin/isticktoit_usb /usr/bin/isticktoit_usb
chmod +x /usr/bin/isticktoit_usb
cp bin/media_keys /usr/bin/media_keys
chmod +x /usr/bin/media_keys
echo Done, now reboot
| true |
6fc27dabd981f06f588620726075464edf2851d4 | Shell | csitd/shell-utils | /showip.sh | UTF-8 | 1,080 | 3.625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
hex_to_ip(){
CACHE="$1"
COUNT="0"
REMOV=""
while [ "$COUNT" -lt 4 ]
do
REMOV=${CACHE%*??}
printf "%d" "0x${CACHE##$REMOV}" 2>/dev/null
COUNT=$(( COUNT + 1 ))
[ "$COUNT" -eq 4 ] && break
printf "."
CACHE="$REMOV"
done
}
f1(){ printf "$1\n" ;};
f2(){ printf "$2\n" ;};
f3(){ printf "$3\n" ;};
f8(){ printf "$8\n" ;};
while read -r i
do
if [ $(f3 $(printf '%s' "$i")) = 00000000 ]
then if [ $(f8 $(printf '%s' "$i")) != 00000000 ]
then
printf "%s" "Interface: ";
f1 $(printf '%s' "$i")
printf "%s" "Network: "
hex_to_ip $(f2 $(printf '%s' "$i"))
printf "\n"
printf "%s" "Netmask: "
hex_to_ip $(f8 $(printf '%s' "$i"))
printf "\n"
fi
fi
done < /proc/net/route
for j in /proc/net/udp /proc/net/tcp
do
while read i
do IFS=': '
if [ "$(f2 $(printf "$i \n" ) )" != 00000000 ]
then HOLD=$(hex_to_ip $(f2 $(printf "$i\n" ) ) )
fi
done < "$j"
done
printf "Address: $HOLD\n"
| true |
795e31ddb114b826e0fd8d77b7cb2e7494b2d341 | Shell | baiqj/backupsystem | /server/do_backup | UTF-8 | 2,731 | 3.5 | 4 | [] | no_license | #!/bin/bash
. "$(dirname $0)/backupconfig.sh"
. "$UtilsDir/utils.sh"
Logfile="$LogfileDir/$(date +%y-%m)"
Launch_time=$(date)
function ProcessRequest {
local P="$1"
local node="$2"
local totalog="$3"
local failog="$4"
local t1=$(mktemp)
local module=$(cut -d " " -f 1 "$P")
local cmd=""
local args=""
echo "$(date) ${P} =======>" >> "$t1"
cat "$P" >> "$t1"
echo "End of ${P} <=======" >> "$t1"
case "$module" in
'git')
url=$(cut -d " " -f 2- "$P")
store_dir="${Datadir}/$(basename ${node})/git"
store_to="${store_dir}/$(basename ${url})"
test -e "$store_dir" || mkdir -p "$store_dir"
cmd="${Cmdir}/git.sh"
args=("$url" "$store_to")
;;
'sftp')
timestamp=$(cut -d " " -f 2 "$P")
url=$(cut -d " " -f 3- "$P")
store_dir="${Datadir}/$(basename ${node})/sftp"
store_to="${store_dir}/${P:1} $(date +%y-%m-%d_%H_%M_%S) $(basename ${url})"
test -e "$store_dir" || mkdir -p "$store_dir"
cmd="${Cmdir}/sftp.sh"
args=("$url" "$store_to")
;;
'rsync')
timestamp=$(cut -d " " -f 2 "$P")
url=$(cut -d " " -f 3- "$P")
dir_path=$(echo "$url" | sed -n 's/^[^:\/]*\(:\/\|[:\/]\)//p')
if test -z "$dir_path"; then
test -z "$failog" || echo "Invalid url '$url'" >>"$failog"
return 1
fi
store_dir="${Datadir}/$(basename ${node})/rsync"
store_to="${store_dir}/${dir_path}"
test -e "$store_to" || mkdir -p "$store_to"
cmd="${Cmdir}/rsync.sh"
args=("$url/" "$store_to")
;;
*)
echo "unknown module: \"${module}\""
return 2
;;
esac
if test -z "$failog"; then
(. "$cmd" "${args[@]}") 1>>"$t1" 2>&1
else
(. "$cmd" "${args[@]}") 1>>"$t1" 2>&1 || (cat "$t1" >> "$failog"; echo >> "$failog")
fi
cat "$t1" >> "$totalog"; echo >> "$totalog"
rm -f "$t1"
}
(
flock -xn 300 || exit 0
cd "$TODOdir"
for N in *
do
if test -d "$N"; then
cd "$N" || continue
resumelog=$(mktemp)
for P in P*
do
test -e "$P" || break
ProcessRequest "$P" "$N" "$resumelog" ""
mv -f "$P" "F${P:1}"
done
if test -s "$resumelog"; then
"$Sendmail" -i $Administrators <<EOF
Subject: [${Host}][${Launch_time}] resume backup
$(cat $resumelog)
EOF
fi
rm -f "$resumelog"
failog=$(mktemp)
for C in C*
do
test -e "$C" || break
P="P${C:1}"
mv -f "$C" "$P"
ProcessRequest "$P" "$N" "$Logfile" "$failog"
mv -f "$P" "F${P:1}"
done
if test -s "$failog"; then
echo "Some error happens in backup ${Host} => $TODOdir/$N, notifies $Administrators..."
"$Sendmail" -i $Administrators <<EOF
Subject: [${Host}][${Launch_time}] errors in backup
$(cat $failog)
EOF
fi
rm -f "$failog"
for F in F*
do
test -e "$F" || break
rm -f "$F"
done
cd "$TODOdir"
fi
done
)300>"${TODOdir}/.lock"
| true |
24c155c05aa6d1f7059420bdaf9c8b64b30a2175 | Shell | alexbuczynsky/arrow | /scripts/project-build.sh | UTF-8 | 615 | 3.078125 | 3 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | permissive | #!/bin/bash
set -e
export JAVA_OPTS="-Xms512m -Xmx1024m"
. $BASEDIR/arrow/scripts/commons.sh
if [ "$1" != "arrow-test" ]; then
$BASEDIR/arrow/scripts/project-install.sh $1
cd $BASEDIR/arrow
addLocalRepository generic-conf.gradle
cd $BASEDIR
git clone https://github.com/arrow-kt/arrow-test.git
cd arrow-test
useLocalGenericConf gradle.properties
$BASEDIR/arrow/scripts/project-install.sh arrow-test
cd $BASEDIR/$1
useLocalGenericConf gradle.properties
./gradlew test
./gradlew check
else
# TODO
$BASEDIR/arrow/scripts/project-simple-build.sh $1
fi
| true |
52313c0c93232e03283be239d7755068a55cbb0a | Shell | Wolox/aws-lambdas-bootstrap | /script/bootstrap | UTF-8 | 834 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source "script/common/install.sh"
echo ""
echo " Bootstrapping AWS Lambdas Bootstrap"
echo ""
install_apex
install_nvm
install_node
install_npm_packages
script/configure_env 'local'
script/configure_env dev
script/configure_env stage
script/configure_env prod
echo ""
echo " Project successfully bootstrapped"
echo ""
echo " Run 'script/server' to start a local development server."
echo " Run 'script/deploy FUCTION_NAME [ENV]' to deploy a function."
echo " RUN 'script/invoke' FUNCTION_NAME [ENV] to invoke a function."
echo ""
echo ""
echo " IMPORTANT!!!"
echo " ------------"
echo ""
echo " Make sure that your AWS credentials are properly configured"
echo " for Apex to be able to run. Check Apex's documentation"
echo " for more information: "
echo ""
echo " http://apex.run/#aws-credentials"
echo ""
echo ""
| true |
4fe56e3adf29181af3cdb6d0f29c9398acb9be21 | Shell | tedle/uitabot | /scripts/pre-commit | UTF-8 | 306 | 2.78125 | 3 | [
"ISC"
] | permissive | #!/bin/sh
exec 1>&2
cd `git rev-parse --show-toplevel`
(cd bot; python -m flake8 uitabot.py uita test type-stubs && mypy)
PYLINT_EXIT_CODE=$?
(cd web-client; npm run --silent lint -- -f unix)
JSLINT_EXIT_CODE=$?
if [ $PYLINT_EXIT_CODE -ne 0 ] || [ $JSLINT_EXIT_CODE -ne 0 ]; then
exit 1
fi
exit 0
| true |
b6ddb72d6ddb50608b87566963e0117ec6acdc8c | Shell | bzimage-it/pegaso | /t/bash/snippets.sh | UTF-8 | 1,685 | 3.4375 | 3 | [] | no_license | #!/bin/bash
tROOT="$(dirname $0)"
source $tROOT/../../snippets/bash/header.sh || exit 2
test "$(basename $PEGASO_ROOT)" == snippets || exit 3
test "$(basename $PEGASO_PARENT_ROOT)" == pegaso || exit 4
tU=$PEGASO_PARENT_ROOT/lib/bash/utils.lib.sh
source $tU || exit 5
tX=ciccio
tY=pluto
tZ=paperino
t=$(tempfile)
function tabort() {
echo "tabort called: $1"
exit 10
}
function myexec () {
local file=$1
local expect=$2
local abort_msg=$3
cat $file | tee -a /dev/stderr | bash -x
test $? == $expect || tabort "$abort_msg"
}
ID=ABRA
cat <<EOF > $t
source $tU
pegaso_assert "aa == ab" "abort $ID"
exit 0
EOF
myexec $t ${PEGASO_EXIT_CODE_ON_ASSERT} $ID
ID=ABRA2
cat <<EOF > $t
source $tU
pegaso_assert "aa == aa" "abort $ID"
exit 0
EOF
myexec $t 0 $ID
ID=CADA
cat <<EOF > $t
source $tU
X=$tX
Y=$tY
Z=$tZ
pegaso_assert "\$X == ciccio" "abort $ID - 1"
pegaso_assert "\$X != pluto" "abort $ID - 2"
pegaso_assert "\$X != \$Y" "abort $ID - 3"
pegaso_assert "\$X != \$Y -a \$Z != \$Y" "abort $ID - 4"
exit 0
EOF
myexec $t 0 $ID
ID=EMALE
cat <<EOF > $t
source $tU
pegaso_assert_eq 1 1 "abort $ID - 1"
pegaso_assert_eq ciccio ciccio "abort $ID - 2"
exit 0
EOF
myexec $t 0 $ID
ID=EMALE2
cat <<EOF > $t
source $tU
pegaso_assert_eq 1 2 "abort $ID - 1"
exit 0
EOF
myexec $t ${PEGASO_EXIT_CODE_ON_ASSERT} $ID
ID=DEF1
cat <<EOF > $t
source $tU
pegaso_assert_def defined "abort $ID - 1"
exit 0
EOF
myexec $t 0 $ID
ID=DEF1
cat <<EOF > $t
source $tU
undef=
pegaso_assert_def "\$undef" "abort $ID - 1"
exit 0
EOF
myexec $t 1 $ID
echo EXIT SUCCESSFULL
exit 0 | true |
117cd15aec2a7cfd6b312f0dcaa995f97de9bc89 | Shell | erik-morgan/code | /Bash/missing-p65.sh | UTF-8 | 969 | 3.328125 | 3 | [] | no_license | #!/bin/bash
while IFS= read -r N; do
FILES=$(find /Users/HD6904/D* -type f \( -iname "$N *" -o -iname "$N *.p??" \) -not -name "*.pdf" -not -name "*.??" -not -name "*TOC*" -print)
IFS=$'\n'
for F in $FILES; do
F_KIND=$(mdls -name 'kMDItemKind' "$F")
if [[ "$F_KIND" =~ .*InDesign.* ]]; then
TARGET_F="/Users/HD6904/Erik/PM2INDD/MISSING/${N}.indd"
elif [[ "$F_KIND" =~ .*PageMaker.* ]]; then
TARGET_F="/Users/HD6904/Erik/PM2INDD/MISSING/${N}.p65"
fi
if [ -n "$TARGET_F" ]; then
if [ -e "$TARGET_F" ]; then
TARGET_MDATE=$(stat -f %m "$TARGET_F")
F_MDATE=$(stat -f %m "$F")
if [ $F_MDATE -ge $TARGET_MDATE ]; then
cp -a "$F" "$TARGET_F"
fi
else
cp -a "$F" "$TARGET_F"
fi
fi
TARGET_F=""
done
done < /Users/HD6904/Erik/PM2INDD/MISSING.txt | true |
1abb950451ffbcf701530f3fe9e0092684a149bb | Shell | stephen-knutter/infrastructure-as-code-on-aws | /virtual-server-with-vpn/vpn.sh | UTF-8 | 776 | 2.59375 | 3 | [] | no_license | #!/bin/bash -ex
VpcId=$(aws ec2 describe-vpcs --query Vpcs[0].VpcId --output text)
SubnetId=$(aws ec2 describe-subnets --filters Name=vpc-id,Values=$VpcId --query Subnets[0].SubnetId --output text)
SharedSecret=$(openssl rand -base64 30)
Password=$(openssl rand -base64 30)
aws cloudformation create-stack --stack-name vpn --template-url https://s3.amazonaws.com/awsinaction/chapter5/vpn-cloudformation.json --parameters ParameterKey=KeyName,ParameterValue=mykey ParameterKey=VPC,ParameterValue=$VpcId ParameterKey=Subnet,ParameterValue=$SubnetId ParameterKey=IPSecSharedSecret,ParameterValue=$SharedSecret ParameterKey=VPNUser,ParameterValue=vpn ParameterKey=VPNPassword,ParameterValue=$Password
aws cloudformation describe-stacks --stack-name vpn --query Stacks[0].Outputs
| true |
b694bac23e698bb6e8ac9f5d1d126cb7a5bdca6e | Shell | Azure/AgentBaker | /pkg/agent/testdata/convert.sh | UTF-8 | 373 | 3.328125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | # !/bin/bash
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <test-case-data-folder>"
exit 1;
fi
rm $1/line*.sh
file="./$1/CustomData"
#echo "Processing $file"
lineNumber=`grep "content: \!\!binary" -n $file | cut -d':' -f1`
for i in $lineNumber; do
c=$((i+1));
#echo "Working on line $c";
z=`sed -n ${c}p $file`
echo $z | base64 --decode | gunzip > $1/line${c}.sh
done | true |
cc8483d6a30aa08bd0b7c3cda001a726ce462ccf | Shell | expnch/sms-snail-racing | /tools/clear_hooks.sh | UTF-8 | 499 | 2.65625 | 3 | [
"CC0-1.0"
] | permissive | set -euo pipefail
source env.sh
ids=$(curl -s -request POST 'https://api.zipwhip.com/webhook/list' \
--header 'Content-Type: application/x-www-form-urlencoded' \
--data-urlencode "session=$ZW_SESSION_KEY" | jq -r '.response[].webhookId')
for id in $ids; do
echo "$id"
curl --location --request POST 'https://api.zipwhip.com/webhook/delete' \
--header 'Content-Type: application/x-www-form-urlencoded' \
--data-urlencode "session=$ZW_SESSION_KEY" \
--data-urlencode "webhookId=$id"
done
| true |
56bad14699ff68e9542c7f60f54d5f40efb9463b | Shell | igorbelitei/dotfiles | /bashrc/convox.sh | UTF-8 | 1,515 | 2.703125 | 3 | [] | no_license | alias cx="convox"
alias cxs="echo formapi > ~/code/docspring/.convox/app \
&& cp ~/.convox/host.staging ~/.convox/host \
&& rm -f ~/.convox/rack \
&& printf 'Switched to Staging host: ' \
&& cat ~/.convox/host"
alias cxp="echo formapi > ~/code/docspring/.convox/app \
&& cp ~/.convox/host.prod ~/.convox/host \
&& cp ~/.convox/rack.prod ~/.convox/rack \
&& printf 'Switched to Prod host: ' \
&& cat ~/.convox/host"
alias cxeu="echo docspring > ~/code/docspring/.convox/app \
&& cp ~/.convox/host.eu ~/.convox/host \
&& rm -f ~/.convox/rack \
&& printf 'Switched to EU host: ' \
&& cat ~/.convox/host"
alias cxr="convox rack"
alias cxe="convox exec"
alias cxru="convox run"
alias cxd="convox deploy --wait"
alias cxl="convox logs"
alias cxrs="convox resources"
alias cxsc="convox scale"
alias cxi="convox instances"
alias cxrl="convox releases"
alias cxb="convox builds"
alias cxps="convox ps"
# Easy way to switch between versions.
# Install v3: https://docs.convox.com/installation/cli
# Install v2: https://github.com/convox/docs/blob/8b3dfeef207711d442295afdb08c88491b0869a4/docs/introduction/installation.md
# See also: https://community.convox.com/t/what-happened-to-gen-2-cli-downloads/741
# https://docsv2.convox.com/introduction/installation
# Upgrading: https://docs.convox.com/help/upgrading
alias convox2="set -x && sudo ln -fs /usr/local/bin/convox2 /usr/local/bin/convox && set +x"
alias convox3="set -x && sudo ln -fs /usr/local/bin/convox3 /usr/local/bin/convox && set +x"
| true |
8002d006dd0e8ad0a44fccc49a8aa3bb752253d4 | Shell | Nexenta/nza-userland | /nza-userland/components/zones/dpkg/src/usr/lib/brand/dpkg/uninstall | UTF-8 | 2,989 | 3.6875 | 4 | [] | no_license | #!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# get script name (bname) and path (dname)
#
bname=`basename $0`
#
# common shell script functions
#
. /usr/lib/brand/dpkg/common.ksh
. /usr/lib/brand/shared/uninstall.ksh
#
# options processing
#
zonename=$1
if [ -z "$zonename" ]; then
printf "$f_abort\n" >&2
exit $ZONE_SUBPROC_FATAL
fi
zonepath=$2
if [ -z "$zonepath" ]; then
printf "$f_abort" >&2
exit $ZONE_SUBPROC_FATAL
fi
shift 2
options="FhHnv"
options_repeat=""
options_seen=""
opt_F=""
opt_n=""
opt_v=""
# check for bad or duplicate options
OPTIND=1
while getopts $options OPT ; do
case $OPT in
\? ) usage_err ;; # invalid argument
: ) usage_err ;; # argument expected
* )
opt=`echo $OPT | sed 's/-\+//'`
if [ -n "$options_repeat" ]; then
echo $options_repeat | grep $opt >/dev/null
[ $? = 0 ] && break
fi
( echo $options_seen | grep $opt >/dev/null ) &&
usage_err
options_seen="${options_seen}${opt}"
;;
esac
done
# check for a help request
OPTIND=1
while getopts :$options OPT ; do
case $OPT in
h|H ) usage
esac
done
# process options
OPTIND=1
while getopts :$options OPT ; do
case $OPT in
F ) opt_F="-F" ;;
n ) opt_n="-n" ;;
v ) opt_v="-v" ;;
esac
done
shift `expr $OPTIND - 1`
[ $# -gt 0 ] && usage_err
#
# main
#
zoneroot=$zonepath/root
nop=""
if [[ -n "$opt_n" ]]; then
nop="echo"
#
# in '-n' mode we should never return success (since we haven't
# actually done anything). so override ZONE_SUBPROC_OK here.
#
ZONE_SUBPROC_OK=$ZONE_SUBPROC_FATAL
fi
#
# We want uninstall to work in the face of various problems, such as a
# zone with no delegated root dataset or multiple active datasets, so we
# don't use the common functions. Instead, we do our own work here and
# are tolerant of errors.
#
# get_current_gzbe
CURRENT_GZBE=`/sbin/beadm list -H | /bin/nawk -F\; '{
# Field 3 is the BE status. 'N' is the active BE.
if ($3 ~ "N")
# Field 2 is the BE UUID
print $2
}'`
if [ -z "$CURRENT_GZBE" ]; then
print "$f_no_gzbe"
fi
uninstall_get_zonepath_ds
uninstall_get_zonepath_root_ds
# find all the zone BEs datasets associated with this global zone BE.
unset fs_all
(( fs_all_c = 0 ))
if [ -n "$CURRENT_GZBE" ]; then
/sbin/zfs list -H -t filesystem -o $PROP_PARENT,name \
-r $ZONEPATH_RDS |
while IFS=" " read uid fs; do
# only look at filesystems directly below $ZONEPATH_RDS
[[ "$fs" != ~()($ZONEPATH_RDS/+([^/])) ]] &&
continue
# match by PROP_PARENT uuid
[[ "$uid" != ${CURRENT_GZBE} ]] &&
continue
fs_all[$fs_all_c]=$fs
(( fs_all_c = $fs_all_c + 1 ))
done
fi
destroy_zone_datasets
exit $ZONE_SUBPROC_OK
| true |
6320f1b4a9b48a7411f6da299abcdd02b44da598 | Shell | amiga909/deluge-synthstrom-utils | /multisample/xml.sh | UTF-8 | 3,795 | 3.125 | 3 | [] | no_license | #! /bin/sh
## Create Deluge multisample XML preset file from folder of samples with named wavs
## Params
## 1 = Root folder where samples are stored on the SD Card
OIFS="$IFS"
IFS=$'\n'
if ! [ -x "$(command -v soxi)" ]; then
echo 'Error: soxi (sox.sourceforge.net) is not installed.'
exit 1
fi
if [ ! -z "$1" ]; then
DELUGE_SAMPLES_ROOT="$1"
else
DELUGE_SAMPLES_ROOT="SAMPLES/_stTest________"
fi
WORKING_DIR="./"
DELUGE_PRESET_NAMESPACE="l"
# map Deluge 0-50, fixh format, https://docs.google.com/document/d/11DUuuE1LBYOVlluPA9McT1_dT4AofZ5jnUD5eHvj7Vs/edit
paramVals=(0x80000000 0x851EB851 0x8A3D70A2 0x8F5C28F3 0x947AE144 0x99999995 0x9EB851E6 0xA3D70A37 0xA8F5C288 0xAE147AD9 0xB333332A 0xB851EB7B 0xBD70A3CC 0xC28F5C1D 0xC7AE146E 0xCCCCCCBF 0xD1EB8510 0xD70A3D61 0xDC28F5B2 0xE147AE03 0xE6666654 0xEB851EA5 0xF0A3D6F6 0xF5C28F47 0xFAE14798 0x00000000 0x051EB83A 0x0A3D708B 0x0F5C28DC 0x147AE12D 0x1999997E 0x1EB851CF 0x23D70A20 0x28F5C271 0x2E147AC2 0x33333313 0x3851EB64 0x3D70A3B5 0x428F5C06 0x47AE1457 0x4CCCCCA8 0x51EB84F9 0x570A3D4A 0x5C28F59B 0x6147ADEC 0x6666663D 0x6B851E8E 0x70A3D6DF 0x75C28F30 0x7AE14781 0x7FFFFFD2)
RELEASE_TIME="${paramVals[10]}"
RELEASE_TIME_BASS="${paramVals[8]}"
RELEASE_TIME_BASS_SHORT="${paramVals[1]}"
RELEASE_TIME_LEAD="${paramVals[15]}"
RELEASE_TIME_PAD="${paramVals[26]}"
RELEASE_TIME_FX="${paramVals[18]}"
echo "-----------------------------"
# 1) Cut bad recordings (sometimes lowest and hightes octaves sound very bad)
for instrument in $(find "$WORKING_DIR/" -type d -mindepth 1 -maxdepth 1 | sort ); do
instName=$(basename "$instrument")
instNameRaw="${instName/-*/}"
sampleRangesStr=""
hasValidFilenames=false
for wav in $(find "$instrument" -type f -maxdepth 1 -iname '*.wav' | sort -V ); do
hasValidFilenames=true
#pattern: 104.g#7.wav, 60.c4.wav,
if ! [[ $wav =~ [0-9]+\.[a-zA-Z]{1}\#?[0-9]{1}\.wav$ ]]; then
#echo "$wav has an invalid format"
hasValidFilenames=false
fi
done
if [ "$hasValidFilenames" = true ]; then
wavTotalCount=0
for wav in $(find "$instrument" -type f -maxdepth 1 -iname '*.wav' | sort -V ); do
wavTotalCount=$(($wavTotalCount + 1))
done
wavCount=0
for wav in $(find "$instrument" -type f -maxdepth 1 -iname '*.wav' | sort -V ); do
wavCount=$(($wavCount + 1))
name=$(basename "$wav")
rangeTopNote=0
fileName="$DELUGE_SAMPLES_ROOT/$instName/$name"
transpose=0
startSamplePos=0
endSamplePos="$(soxi -s $wav)"
midiNo="${name/.*/}"
midiNo=$((midiNo + 0))
rangeTopNote=$(($midiNo)) # omit if last
transpose=$((60 - $midiNo)) # if 0 omit
rangeTopNoteStr=$(cat <<EOF
rangeTopNote="$rangeTopNote"
EOF)
if [[ $wavCount = $wavTotalCount ]]; then
rangeTopNoteStr=""
fi
sampleRangesStr=$(cat <<EOF
$sampleRangesStr <sampleRange
$rangeTopNoteStr
fileName="$fileName"
transpose="$transpose">
<zone startSamplePos="$startSamplePos" endSamplePos="$endSamplePos" />
</sampleRange>
EOF)
done
# apply params
if [[ $instNameRaw =~ "b." ]]; then
RELEASE_TIME="$RELEASE_TIME_BASS"
elif [[ $instNameRaw =~ "bs." ]]; then
RELEASE_TIME="$RELEASE_TIME_BASS_SHORT"
elif [[ $instNameRaw =~ "l." ]]; then
RELEASE_TIME="$RELEASE_TIME_LEAD"
elif [[ $instNameRaw =~ "p." ]]; then
RELEASE_TIME="$RELEASE_TIME_PAD"
elif [[ $instNameRaw =~ "x." ]]; then
RELEASE_TIME="$RELEASE_TIME_FX"
fi
template=$(<template.XML)
template="${template/__SAMPLE_RANGES__/$sampleRangesStr}"
template="${template/__RELEASE_TIME__/$RELEASE_TIME}"
delugePresetName="$DELUGE_PRESET_NAMESPACE.$instNameRaw.XML"
printf "%s" "$template" > "$delugePresetName"
echo "Created $delugePresetName for folder $instNameRaw"
else
echo "folder $instNameRaw: no XML generated, invalid files"
fi
done
IFS="$OIFS"
| true |
7d14643f7e4572432a31a00256e29fe2d48942b5 | Shell | AutomateCompliance/complianceascode-content | /build-oval510/ubuntu2004/fixes/bash/grub2_audit_backlog_limit_argument.sh | UTF-8 | 952 | 3.296875 | 3 | [
"BSD-3-Clause"
] | permissive | # platform = multi_platform_rhel,multi_platform_fedora,multi_platform_ol,multi_platform_rhv,multi_platform_ubuntu,multi_platform_sle
# Remediation is applicable only in certain platforms
if [ ! -f /.dockerenv ] && [ ! -f /run/.containerenv ] && { dpkg-query --show --showformat='${db:Status-Status}\n' 'grub2-common' 2>/dev/null | grep -q installed; }; then
# Correct the form of default kernel command line in GRUB
if grep -q '^GRUB_CMDLINE_LINUX=.*audit_backlog_limit=.*"' '/etc/default/grub' ; then
# modify the GRUB command-line if an audit_backlog_limit= arg already exists
sed -i 's/\(^GRUB_CMDLINE_LINUX=".*\)audit_backlog_limit=[^[:space:]]*\(.*"\)/\1 audit_backlog_limit=8192 \2/' '/etc/default/grub'
else
# no audit_backlog_limit=arg is present, append it
sed -i 's/\(^GRUB_CMDLINE_LINUX=".*\)"/\1 audit_backlog_limit=8192"/' '/etc/default/grub'
fi
update-grub
else
>&2 echo 'Remediation is not applicable, nothing was done'
fi | true |
83491e1750c2b1444efec351012b03d69f1745a6 | Shell | xinyuzhao/dotfiles | /confs/.zshrc | UTF-8 | 835 | 2.796875 | 3 | [] | no_license | source ~/.zplug/init.zsh
# zplug "larkery/zsh-histdb"
zplug "zsh-users/zsh-autosuggestions"
zplug "zsh-users/zsh-history-substring-search"
# Install plugins if there are plugins that have not been installed
if ! zplug check --verbose; then
printf "Install? [y/N]: "
if read -q; then
echo; zplug install
fi
fi
zplug load
# arrow up/down
bindkey '^[[A' history-substring-search-up
bindkey '^[[B' history-substring-search-down
# =============================================================================
# make completion work for keybase
zstyle ':completion:*' accept-exact-dirs true
# allows interactive comments
set -k
# enable starship
eval "$(/opt/homebrew/bin/starship init zsh)"
source $HOME/.rc
if [ -f $HOME/.rc.local ]; then
source $HOME/.rc.local
fi
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true |
999e423fd63d41e25e6507d27614f0f8102fc44b | Shell | LukasHanot/Portfolio | /Projecten/Linux/Scripts/EindeLes.sh | UTF-8 | 774 | 3.546875 | 4 | [] | no_license | #!/bin/bash
#Lukas Hanot INF 102A
TELLER=6000
echo "fill in time when this lesson ends as followed (hh:mm)"
INPUT1=$(date "+%Y-%m-%d") #een les gaat nooit tot de volgende dag duren dus de datum is altijd vandaag
read INPUT2 #input van de tijd
INPUT="$INPUT1 $INPUT2" #datum en uur worden samen gevoegd als input voor verder in het script
while [ $TELLER -ge 0 ]
do
TIJD=`date +%s` #het aantal seconden sinds "UNIX epoch" begon
TOTTIJD=`date -d "$INPUT" +%s` #tijdstip einde les
NOGTEGAAN=`expr $TOTTIJD - $TIJD`
INMINUTEN=`expr $NOGTEGAAN / 60 + 1 `
echo "You'll have to survive for another" $INMINUTEN minutes or $NOGTEGAAN seconds
echo ""
if [ $NOGTEGAAN -le 0 ] #-le = less or equal
then
echo ALARM!! ALARM!! ALARM!!
read x
exit 0
fi
sleep 15
TELLER=`expr $TELLER - 1`
done
| true |
8a7bb4b0ce4f540d7eefa84d127cb995b63af933 | Shell | mkoskar/dotfiles | /bin/a | UTF-8 | 293 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -eu
d=${1:-5m}
ts=$(date -R)
printf '%s ... alarm in %s\n' "$ts" "$d"
sleep "$d"
echo Beep...
notify -u critical "Beep... Time's up!"
mpv --no-config \
--msg-level=all=error \
--loop-file=5 \
/usr/share/sounds/freedesktop/stereo/alarm-clock-elapsed.oga
| true |
d62509085c71a193273e9a5346e16cceac70d550 | Shell | dawn-0531china/dawn-0531china.github.io | /commit.sh | UTF-8 | 531 | 3.046875 | 3 | [] | no_license | #!/bin/bash
# 解决使用git add命令时报错LF will be replaced by CRLF的问题
echo '执行命令:git config auto.crlf true\n'
git config auto.crlf true
# 保存所有的修改
echo '执行命令:git add -A\n'
git add -A
# 把修改的文件提交
echo "执行命令:git commit -m 'update gitbook'\n"
git commit -m 'update gitbook'
# 将本地仓库推送至远程仓库
echo '执行命令:git push origin master\n'
git push origin master
# 返回到上一次的工作目录
echo "回到刚才工作目录"
cd -
| true |
6dd27b8dfb9c48eb3be31f13b887c8e605d2be92 | Shell | sibomarie/batstests | /t1.1.0.bats | UTF-8 | 1,465 | 3.03125 | 3 | [] | no_license | #!/usr/local/bin/bats
@test "1.1.1 - Hostname is set correctly" {
hostname | grep 'controller'*[1-2]*
}
@test "1.1.2 - controller connects to internet" {
ping -q -c1 google.com
}
@test "1.1.3 - SElinux is disabled" {
sestatus | grep 'Mode from config file' | egrep disabled
}
@test "1.1.4 - firewall is disabled" {
systemctl status firewall | grep inactive
}
@test "1.1.5 - chronyd is running" {
systemctl status chronyd.service
}
@test "1.1.10 - The database are started and running" {
systemctl status mariadb
}
@test "1.1.16 - RABBITMQ is running and cluster is up" {
systemctl status rabbitmq-server.service
rabbitmqctl cluster_status | grep running_nodes | grep rabbit@${hostname}
rabbitmqctl list_users | grep openstack
}
@test "1.1.17 - the appropriate openstack services are active" {
status=$(openstack-status)
echo $status | grep "nova-api:[ ]*active"
echo $status | grep "nova-neutron-server:[ ]*active"
echo $status | grep "nova-neutron-dhcp-agent:[ ]*active"
echo $status | grep "nova-scheduler:[ ]*active"
echo $status | grep "openstack-dashboard:[ ]*active"
echo $status | grep "dbus:[ ]*active"
echo $status | grep "memcached:[ ]*active"
echo $status | grep "openstack-cinder-api:[ ]*active"
echo $status | grep "openstack-cinder-scheduler:[ ]*active"
echo $status | grep "openstack-glance-registry:[ ]*active"
echo $status | grep "openstack-glance-api:[ ]*active"
}
| true |
ad44f961d0974a32bf369ce1621964157fb2d20c | Shell | kclejeune/RegistrationBot | /macSetup.sh | UTF-8 | 904 | 3.28125 | 3 | [] | no_license | #!/bin/bash
# check if homebrew is installed and install if not
if [[ ! command -v brew > /dev/null ]]; then
echo "Installing Homebrew"
# run the installer from https://brew.sh
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
# handle the m1 case
if [[ -d /opt/homebrew ]]; then
eval "$(/opt/homebrew/bin/brew shellenv)"
fi
fi
if [[ ! command -v nix > /dev/null ]]; then
echo "Installing Nix"
# run the nix installer from https://nixos.org
sh <(curl -L https://nixos.org/nix/install) --daemon --darwin-use-unencrypted-nix-store-volume
fi
# install browsers
brew install firefox
# change time server to match SIS
sudo /usr/sbin/systemsetup -setnetworktimeserver "tick.usno.navy.mil"
sudo /usr/sbin/systemsetup -setusingnetworktime on
echo "Installation complete. Refer to README.md for running instructions."
| true |
e65031e170657de7cce894b8980353dddf2b2b24 | Shell | mpiko/scripts | /functs/funcProcess.sh | UTF-8 | 3,030 | 4.09375 | 4 | [] | no_license | #!/bin/bash -x
logevent() {
# tests to see if logging of events are wanted.
# Usage: if logevent "y log/logfile "message text"; then ...
# Example: if logevent "script filed"; then exit 1; fi
local LOGEVENT=$1
local LOGEVENTFILE=$2
shift 2
if [ $LOGEVENT = 'y' ]
then
TODAY=$(getDate)
TIME=$(getTime)
echo "$TODAY_$TIME: $@" >> $LOGEVENTFILE
return 0
fi
return 1
}
usage() {
# Intend to display the usage clause from a script.
# Usage: usage
# Example: if ! enoughArgs 2 $#; then usage ; exit 2; fi
# if runs back off to the calling script to find the usage clause.
# the clause must be properly formatted: "#-#"
#
SCRIPT=$(which $ME)
IFS="
"
FOUND="FALSE"
for LINE in $(cat $SCRIPT)
do
TMP=$(echo $LINE | grep "^#-#")
if [ ! -z $TMP ]
then
FOUND="TRUE"
fi
if [ $FOUND = "TRUE" ]
then
CHAR=$(echo $LINE | cut -c1-3)
if [ $CHAR = '#-#' ]
then
echo $LINE | sed 's/^#-#/ /'
else
break
fi
fi
done
}
isRunning() {
local PID=$1
[ -z $PID ] && return 1
local RUNNING=$(ps ax | sed 's/^ \+//' | grep "^$PID" | cut -f1 -d" ")
[ -z "$RUNNING" ] && return 1
return 0
}
dieIfNotEnoughArgs() {
# Usage: dieIfNotEnoughArgs x $#
# Example: dieIfNotEnoughArgs 3 $#
local NEEDED=$1
local SUPPLIED=$2
if [ -z $NEEDED ] || [ -z $SUPPLIED ]
then
echo "Invalid use of dieIfNotEnoughArgs"
echo Usage dieIfNotEnoughArgs x \$#
exit
fi
if [ $SUPPLIED -ne $NEEDED ]
then
echo "$ME: Not enough args supplied"
exit
fi
}
enoughArgs() {
# Usage: checkEnoughArgs x $#
# Example: if checkEnoughArgs 3 $#; then...
# or: if enoughArgs +3 $#; then ...
# in the second example, there needs to be 3 or more args supplied.
# note the '+' indicates 3 or more.
local NEEDED=$1
local SUPPLIED=$2
if [ -z $NEEDED ] || [ -z $SUPPLIED ]
then
echo "Invalid use of checkEnoughArgs"
echo Usage checkEnoughArgs x \$#
exit
fi
CHAR1=$(echo $NEEDED | cut -c1)
if [ $CHAR1 = '+' ]
then
NEEDED=$(echo $NEEDED | sed 's/+//')
if [ $SUPPLIED -ge $NEEDED ]
then
return 0
fi
fi
if [ $SUPPLIED -eq $NEEDED ]
then
return 0
fi
return 1
}
getUpdateHosts() {
# get a list of hosts to be updated
local FILE=~/bin/allhosts.master
local FILELEN=$(wc -l $FILE| awk '{print $1}')
local COUNTER=1
while [ $COUNTER -le $FILELEN ]
do
local LINE=$(head -$COUNTER $FILE | tail -1)
COUNTER=$( expr $COUNTER + 1)
[ -z "$LINE" ] && continue
local SKIP=$(echo $LINE | awk '{print $1}' | grep '^#')
[ ! -z $SKIP ] && continue
UPDATE=$(echo $LINE | awk '{print $4}')
if [ $UPDATE == "yes" ]
then
HNAME=$(echo $LINE | awk '{print $2}')
SYNCHOSTS="$SYNCHOSTS $HNAME"
fi
done
echo $SYNCHOSTS
}
getUpdateUser(){
FILE=~/bin/allhosts.master
local HNAME=$1
RUSER=$(grep $HNAME $FILE | awk '{print $3}')
echo $RUSER
}
| true |
5657f77a8fae474ff55b4870d7c0306e8e588e8c | Shell | Primos-tn/backend-app | /push.sh | UTF-8 | 451 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# copyright
comment=
push (){
#git add .
#git commit -am $comment
echo "ok for $comment"
}
while getopts "c:" option
do
case $option in
c)
comment=$OPTARG
;;
esac
done
# if comment is empty
if [[ -z "$comment" ]]; then
echo "I don't know what you are asking for
usage ./push.sh -c [comment]"
else
push
fi
# restart | true |
2faa4911e762f716f8c76d107c1b932f882d8d49 | Shell | MarthavBirapa/FIA | /1503322H/7.8 | UTF-8 | 349 | 3.40625 | 3 | [] | no_license | #!/bin/bash
#7.8
printf "1mEnsuring password fields are not empty: "
PFieldsNEmpty= $(cat /etc/shadow | awk -F: '($2 == "" ) { print $1 " does not have a password "}')
echo $PFieldsNEmpty
if [ -z "$PFieldsNEmpty" ]; then
printf "\e[No remediation is needed\e[0m\n"
else
/usr/bin/passwd -l $1
printf "\e[Remediation has been completed\e[0m\n"
fi
| true |
716e3172f035e8bd0793a87492335b33b5a060b8 | Shell | terence/aws-automation | /iam/cx.sh | UTF-8 | 4,557 | 3.515625 | 4 | [] | no_license | # AWS Scripts Command-line Assistant
#================================================================
clear
#source ./vars.sh
PWD=pwd
# DEFAULTS
PROFILE="ipadev"
REGION="ap-southeast-2"
OUTPUT="json"
STACK_NAME="IPA-BIA-stack1"
echo =============================================================
echo Hi $USER@$HOSTNAME. You are in $PWD directory.
echo -------------------------------------------------------------
echo 001 : AWS Configure
echo 002 : AWS S3 List
echo 003 : AWS STS Assume Role
echo ----------------------------------------------
echo 010 : AWS IAM List Users
echo 011 : AWS IAM List Roles
echo 012 : AWS IAM List Groups
echo 013 : AWS IAM List Policies
echo 014 : AWS IAM List access-keys
echo ----------------------------------------------
echo 020 : AWS ORGANIZATIONS List Accounts
echo 021 : AWS ORGANISATIONS List Roots
echo ----------------------------------------------
echo 200 : AWS CloudFormation create-stack Add Users
echo 201 : AWS CloudFormation delete-stack Delete Users
echo ----------------------------------------------
echo 210 : AWS CloudFormation create-stack Add Groups
echo 211 : AWS CloudFormation delete-stack Delete Groups
echo ----------------------------------------------
echo Enter [Selection] to continue
echo =============================================================
# Command line selection
if [ -n "$1" ]; then
SELECTION=$1
else
read -n 3 SELECTION
fi
if [ -n "$2" ]; then
PROFILE=$2
fi
echo Your selection is : $SELECTION.
echo Your profile is : $PROFILE.
case "$SELECTION" in
"001" )
echo "===== AWS Configure - Setup"
aws configure
;;
"002" )
echo "===== AWS S3 List:" $PROFILE
aws s3 ls --profile $PROFILE
echo "Count:"
aws s3 ls --profile $PROFILE | wc -l
#aws s3 ls s3://bucketname
#aws s3 cp
# aws s3 sync local s3://remote
;;
"003" )
echo "===== AWS Assume Role:" $PROFILE
# aws sts assume-role --role-arn "arn:aws:iam::xxxxxxxxxxxx:role/AWSAdmin" --role-session-name AWSCLI-Session
# aws sts get-caller-identity --profile ipadev
;;
"010" )
echo "===== AWS IAM List Users:" $PROFILE
aws iam list-users \
--profile $PROFILE \
--output $OUTPUT
;;
"011" )
echo "===== AWS IAM List Roles:" $PROFILE
aws iam list-roles \
--profile $PROFILE \
--output $OUTPUT
;;
"012" )
echo "===== AWS IAM List Groups:" $PROFILE
aws iam list-groups \
--profile $PROFILE \
--output $OUTPUT
;;
"013" )
echo "===== AWS IAM List Policies:" $PROFILE
aws iam list-policies \
--profile $PROFILE \
--output $OUTPUT
;;
"014" )
echo "===== AWS IAM List Access Keys:" $PROFILE
aws iam list-access-keys \
--user-name terence.chia
--profile $PROFILE \
--output $OUTPUT
;;
"020" )
echo "===== AWS ORGANIZATIONS List Accounts:" $PROFILE
aws organizations list-accounts \
--profile $PROFILE \
--output $OUTPUT
;;
"021" )
echo "===== AWS ORGANIZATIONS List Roots:" $PROFILE
aws organizations list-roots \
--profile $PROFILE \
--output $OUTPUT
;;
"200" )
echo "===== AWS CF create-stack Add Users:" $PROFILE
STACK_NAME="terence-iam-stack1"
aws cloudformation create-stack \
--stack-name $STACK_NAME \
--template-body file://stacks/iam-users-cf1.yaml \
--capabilities CAPABILITY_NAMED_IAM \
# --role-arn arn:aws:iam::832435373672:role/Git2S3
# --role-arn $STACK_ROLE \
;;
"201" )
echo "===== AWS CF delete-stack Add Users:" $PROFILE
STACK_NAME="terence-iam-stack1"
aws cloudformation delete-stack \
--stack-name $STACK_NAME \
--profile $PROFILE \
--output $OUTPUT
;;
"210" )
echo "===== AWS CF create-stack Add Groups:" $PROFILE
STACK_NAME="terence-iam-stack2"
aws cloudformation create-stack \
--stack-name $STACK_NAME \
--template-body file://stacks/iam-groups-cf1.yaml \
--capabilities CAPABILITY_NAMED_IAM \
--role-arn arn:aws:iam::832435373672:role/Git2S3
# --role-arn $STACK_ROLE \
;;
"211" )
echo "===== AWS CF delete-stack Delete Groups:" $PROFILE
STACK_NAME="terence-iam-stack2"
aws cloudformation delete-stack \
--stack-name $STACK_NAME \
--profile $PROFILE \
--output $OUTPUT
;;
# Attempt to cater for ESC
"\x1B" )
echo ESC1
exit 0
;;
# Attempt to cater for ESC
"^[" )
echo ESC2
exit 0
;;
# ------------------------------------------------
# GIT
# ------------------------------------------------
* )
# Default option.
# Empty input (hitting RETURN) fits here, too.
echo
echo "Not a recognized option."
;;
esac
| true |
57f35a5009a09d4a290168e01898d9da03dd3537 | Shell | PennLINC/xcpEngine | /utils/sfilter | UTF-8 | 7,078 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
###################################################################
# ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ ⊗ #
###################################################################
###################################################################
# Generalised function for spatially filtering 4D BOLD timeseries
# data
###################################################################
###################################################################
# Constants
###################################################################
source ${XCPEDIR}/core/constants
source ${XCPEDIR}/core/functions/library.sh
###################################################################
# Usage function
###################################################################
Usage(){
cat << endstream
___________________________________________________________________
Usage: sfilter -i <input> -o <output> <options>
Compulsory arguments:
-i : Input timeseries
The 4D timeseries to which the spatial filter is to
be applied.
-o : Output timeseries
The path to the file where the filtered timeseries
will be written.
Optional arguments:
-s : Spatial filter mode [default gaussian]
Input can either be 'gaussian', 'uniform', or 'susan'.
* Gaussian smoothing [default] applies the same
Gaussian smoothing kernel across the entire volume.
* SUSAN-based smoothing restricts mixing of signals
from disparate tissue classes (Smith and Brady,
1997).
* Uniform smoothing applies smoothing to all voxels
until the smoothness computed at every voxel
attains the target value.
* Uniform smoothing may be used as a compensatory
mechanism to reduce the effects of subject motion
on the final processed image (Scheinost et al.,
2014).
-k : Spatial smoothing kernel [default 4]
The FWHM of the smoothing kernel, in mm
-m : Mask
Smoothing is computed only within the specified
binary mask.
-u : USAN or MASTER
* This option is required only for SUSAN-based
smoothing. SUSAN uses the USAN to identify
boundaries across which smoothing should be limited.
For functional image processing, this is often an
example volume extracted from the BOLD timeseries.
* If this option is specified for UNIFORM smoothing,
then smoothing is applied to the MASTER until it is
uniformly smooth. The smoothing regime computed for
the MASTER is then applied to the input dataset.
-h : Hard segmentation USAN
Set this flag if the USAN that you are providing is
a hard segmentation of the image and you wish to
disallow smoothing between parcels.
-t : Trace
If this flag is set, then any commands called by the
sfilter routine will be explicitly printed to the
console or log.
endstream
}
###################################################################
# Define defaults
###################################################################
filter=gaussian
kernel=4
hardseg=0
###################################################################
# Parse arguments
###################################################################
while getopts "i:o:s:k:m:u:th" OPTION
do
case $OPTION in
i)
image=${OPTARG}
! is_image ${image} && Usage && exit
;;
o)
out=${OPTARG}
;;
s)
filter=${OPTARG}
if [[ ${filter} != gaussian ]] \
&& [[ ${filter} != susan ]] \
&& [[ ${filter} != uniform ]]
then
echo "Unrecognised filter mode: ${filter}."
Usage
exit
fi
;;
k)
kernel=${OPTARG}
! is+numeric ${kernel} && Usage && exit
;;
m)
mask=${OPTARG}
! is_image ${mask} && Usage && exit
;;
u)
usan=${OPTARG}
! is_image ${usan} && Usage && exit
;;
h)
hardseg=1
;;
t)
set -x
;;
*)
echo "Option not recognised: ${OPTARG}"
Usage
exit
esac
done
###################################################################
# Ensure that all compulsory arguments have been defined
###################################################################
[[ -z ${image} ]] && Usage && exit
[[ -z ${out} ]] && Usage && exit
[[ -z ${filter} ]] && Usage && exit
[[ -z ${kernel} ]] && Usage && exit
case ${filter} in
gaussian)
subroutine @u.1
################################################################
# Convert filter kernel from mm to sigma
################################################################
kernel=$(arithmetic ${kernel}/${SIGMA})
################################################################
# Add a mask argument, if a mask has been specified
################################################################
[[ ! -z ${mask} ]] && mask="-mas ${mask}"
exec_fsl \
fslmaths ${image} \
-s ${kernel} \
${mask} \
${out}
;;
uniform)
subroutine @u.2
rm -rf ${out}
################################################################
# * If a mask has been specified, then use it for smoothing.
# * Otherwise, automatically generate a mask using AFNI's
# 3dAutomask tool. This may not perform as well on demeaned
# or detrended data.
# * If a master dataset has been specified, then use it for
# smoothing.
################################################################
[[ -n ${mask} ]] && mask="-mask ${mask}"
[[ -z ${mask} ]] && mask="-automask"
[[ -n ${usan} ]] && master="-blurmaster ${usan}"
################################################################
# Use AFNI's 3dBlurToFWHM to obtain the smoothed image.
################################################################
exec_afni \
3dBlurToFWHM -quiet \
-input ${image} \
-prefix ${out} \
-FWHM ${kernel} \
-detrend \
-temper \
${mask} \
${master} 2>/dev/null
;;
susan)
subroutine @u.3
################################################################
# Convert filter kernel from mm to sigma
################################################################
kernel=$(arithmetic ${kernel}/${SIGMA})
################################################################
# Use 75 percent of the median as the SUSAN brightness threshold
################################################################
perc50=$(exec_fsl \
fslstats ${usan} \
-k ${mask} \
-p 50)
bt=$(arithmetic ${perc50}\*3/4)
[[ ${hardseg} == 1 ]] && bt=0.01
exec_fsl \
susan \
${image} \
${bt} \
${kernel} \
3 1 1 \
${usan} \
${bt} \
${out}
[[ -n ${mask} ]] && exec_fsl \
fslmaths \
${out} \
-mul ${mask} \
${out}
is_image ${out}_usan_size.nii.gz \
&& exec_sys rm -f ${out}_usan_size.nii.gz
;;
esac
| true |
1c0fa1dd8a74602db35129afc40c24b79db7df0c | Shell | jtorres-dev/systems-programming-concepts | /scriptinginbash/secretfinder.sh | UTF-8 | 718 | 3.625 | 4 | [] | no_license | #!/bin/bash
###################################
# #
# Jonathan Torres #
# October 8, 2019 #
# #
###################################
# creates a secretfiles directory
mkdir secretfiles
# changes directory to secretfiles
cd secretfiles
# copies grep.tar from location to the current directory
# cp /home/CSCI2467/labs/misc/grep.tar .
# extracts the contents of grep.tar
tar -xf ../grep.tar
# searches for the string 'SECRET AUTH CODE' within all of the files in secretfiles
# the result is redirected into authcode.txt
grep 'SECRET AUTH CODE' * > authcode.txt
# removes all 1000 files, then prints out to screen authcode.txt
rm f*== | rm grep.tar | echo "Found the secret auth code! Saved to authcode.txt"
| true |
6d3232d9d03ed7c102590ebd7ba7555fa1369188 | Shell | atbjones/cse130-pa-s20 | /asgn1/test/client/script | UTF-8 | 506 | 2.6875 | 3 | [] | no_license | #!/bin/sh
clear
echo "PUT tests"
for file in cb@d cbig csmall cbin c-_-
do
echo "Processing $file"
curl -T $file http://localhost:8080/$file
diff ../$file $file >d_$file
done
echo "HEAD tests"
for file in s_bad s_big s_f@il s_small s_dne
do
echo "Processing $file"
curl -I http://localhost:8080/$file
done
echo "GET tests"
for file in s_bad s_big s_f@il s_small s_dne
do
echo "Processing $file"
curl http://localhost:8080/$file --output $file
diff ../$file $file >d_$file
done
| true |
0abef071518de4dc9a0843b2724838731dd2fb91 | Shell | andreafrancia/ely-itaca | /count-files | UTF-8 | 157 | 2.734375 | 3 | [] | no_license | #!/bin/bash
filename="$1"
for line in $(cat < "$filename"); do
line="${line%%C.ACC.ASC}"
line="${line%%X.ACC.ASC}"
echo "$line"
done | uniq -c
| true |
a815afdcb5cec55628286a4e4ac56bcdaa519880 | Shell | airtonix/concourse-resource-samba | /test/run | UTF-8 | 374 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
source /opt/resource/lib/log
set_log_prefix "test/run"
log "start"
export SRC_DIR=$PWD
export BUILD_ID=1
export BUILD_NAME='my-build'
export BUILD_JOB_NAME='my-build-job'
export BUILD_PIPELINE_NAME='my-build-pipeline'
export ATC_EXTERNAL_URL='127.0.0.1/atc'
COMMAND="/opt/resource/${1:-check}"
log "command" $COMMAND
exec $COMMAND ${@:2}
log "end"
| true |
2a63184c37f8fb5b9981d5ebaaaf99fc25553493 | Shell | amitv87/EC20CEFAG | /ql-ol-sdk/ql-ol-rootfs/etc/init.d/quectel-gps-handle | UTF-8 | 1,060 | 3.4375 | 3 | [] | no_license | #! /bin/sh
# ---------------------------------------------------------------------------
# Copyright (c) 2015 Quectel Wireless Solutions Co., Ltd. All Rights Reserved.
# Quectel Wireless Solutions Proprietary and Confidential.
# ---------------------------------------------------------------------------
#
# init.d script for quectel-gps-handle
set -x
set -e
case "$1" in
start)
echo -n "Starting quectel-gps-handle(uart-ddp): "
echo "cp quectel-uart-ddp quectel-gps-handle done"
start-stop-daemon -S -b -a /usr/bin/quectel-gps-handle -- -default
echo "done"
;;
stop)
echo -n "Stopping quectel gps handle: "
start-stop-daemon -K -n quectel-gps-handle
echo "done"
;;
restart)
$0 stop
$0 start
;;
*)
echo "Usage: quectel-gps-handle { start | stop | restart }" >&2
exit 1
;;
esac
exit 0
| true |
a4581fe17595cdfc46fdcda1a2e7685a580c6e6c | Shell | CarlosANovo/pgre | /tux61.sh | UTF-8 | 5,311 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Script to configure tux61
AUTHOR='CarlosANovo'
if ! [ $(id -u) = 0 ]; then
echo "This script must be run as root"
exit 1
fi
echo 'Updating and installing necessary packages...'
apt update
apt install -y curl wget
echo 'Downloading files'
wget https://github.com/${AUTHOR}/pgre/archive/master.zip
echo 'Unpacking files'
unzip master.zip
echo ' === Installing Apache Web Server === '
apt install -y apache2
mkdir -p /var/www/pgre.fe.up.pt/html
chown -R $USER:$USER /var/www/pgre.fe.up.pt/html
chmod -r 755 /var/www/pgre.fe.up.pt
echo 'Inserting content into the Web page...'
cp -r pgre-master/tux61/html/* /var/www/pgre.fe.up.pt/html
cp pgre-master/tux61/pgre.fe.up.pt.conf /etc/apache2/sites-available/pgre.fe.up.pt.conf
echo 'Enabling the Vitual Host...'
a2ensite pgre.fe.up.pt
systemctl restart apache2
systemctl enable apache2
echo ' === Installing the NTP server === '
apt install -y ntp
cp /etc/ntp.conf{,.backup}
cp pgre-master/tux61/ntp.conf /etc/ntp.conf
systemctl restart ntp
systemctl enable ntp
echo ' === Installing FTP server === '
apt install -y vsftpd
cp /etc/vsftpd.conf{,.backup}
cp pgre-master/tux61/vsftpd.conf /etc/vsftpd.conf
systemctl restart vsftpd
systemctl enable vsftpd
echo ' === Installing DNS server === '
apt install -y bind9 bind9utils
cp pgre-master/tux61/bind9 /etc/default/bind9
cp /etc/bind/named.conf.options{,.backup}
cp pgre-master/tux61/named.conf.options /etc/bind/named.conf.options
cp /etc/bind/named.conf.local{,.backup}
cp pgre-master/tux61/named.conf.local /etc/bind/named.conf.local
echo 'Making the databases...'
mkdir /etc/bind/zones
cp pgre-master/tux61/db.2.16.172 /etc/bind/zones/db.2.16.172
cp pgre-master/tux61/db.pgre.fe.up.pt /etc/bind/zones/db.pgre.fe.up.pt
echo 'Checking correct configuration...'
named-checkconf
named-checkzone pgre.fe.up.pt /etc/bind/zones/db.pgre.fe.up.pt
named-checkzone 2.16.172.in-addr.arpa /etc/bind/zones/db.2.16.172
echo 'Restarting and enabling DNS server...'
systemctl restart bind
systemctl enable bind
echo ' === Installing e-mail server === '
apt install -y net-tools wget lsof postfix mailutils
cp /etc/postfix/main.cf{,.backup}
cp pgre-master/tux61/main.cf /etc/postfix/main.cf
echo 'Restarting and enabling mail server...'
systemctl restart postfix
systemctl enable postfix
echo 'Installing IMAP agent...'
apt install -y dovecot-core dovecot-imapd
cp /etc/dovecot/dovecot.conf{,.backup}
cp pgre-master/tux61/dovecot.conf /etc/dovecot/dovecot.conf
cp /etc/dovecot/conf.d/10-auth.conf{,.backup}
cp pgre-master/tux61/10-auth.conf /etc/dovecot/conf.d/10-auth.conf
cp /etc/dovecot/conf.d/10-mail.conf{,.backup}
cp pgre-master/tux61/10-mail.conf /etc/dovecot/conf.d/10-mail.conf
cp /etc/dovecot/conf.d/10-master.conf{,.backup}
cp pgre-master/tux61/10-master.conf /etc/dovecot/conf.d/10-master.conf
echo 'Restarting IMAP agent...'
systemctl restart dovecot.service
systemctl enable dovecot.service
echo 'Installing Webmail service...'
apt install -y php7.0 libapache2-mod-php7.0 php7.0-curl php7.0-xml
mkdir -p /var/www/mail.pgre.fe.up.pt/html
chown -R $USER:$USER /var/www/mail.pgre.fe.up.pt/html
chmod -r 755 /var/www/mail.pgre.fe.up.pt
cd /var/www/mail.pgre.fe.up.pt/html
curl -sL https://repository.rainloop.net/installer.php | php
cd -
cp pgre-master/tux61/mail.pgre.fe.up.pt.conf /etc/apache2/sites-available/mail.pgre.fe.up.pt.conf
a2ensite mail.pgre.fe.up.pt
systemctl restart apache2
echo 'Webmail service installed, please configure.'
# ADD USERS AND MAKE THEIR FOLDERS
echo ' === Adding users === '
echo 'export MAIL=$HOME/.maildir' >> /etc/profile
addgroup webadminsgroup
chgrp -R webadminsgroup /var/www
chmod -R g+w /var/www
echo 'webadmin...'
useradd -m -c "Web Administrator" -s /bin/bash -G webadminsgroup webadmin
echo -e "internet123\ninternet123" | passwd webadmin
echo "webadmin" | tee -a /etc/vsftpd.userlist
mkdir /home/webadmin/ftp
chown nobody:nogroup /home/webadmin/ftp
chmod a-w /home/webadmin/ftp
mkdir /home/webadmin/ftp/files
chown -R webadmin:webadmin /home/webadmin/ftp/files
chmod -R 0770 /home/webadmin/ftp/files/
mkdir /home/webadmin/ftp/www
chown -R webadmin:webadmin /home/webadmin/ftp/www
chmod -R 0770 /home/webadmin/ftp/www/
mount /var/www /home/webadmin/ftp/www -o bind
echo 'john...'
useradd -m -c "John" -s /bin/bash john
echo -e "john.1980\njohn.1980" | passwd john
echo "john" | tee -a /etc/vsftpd.userlist
mkdir /home/john/ftp
chown nobody:nogroup /home/john/ftp
chmod a-w /home/john/ftp
mkdir /home/john/ftp/files
chown -R john:john /home/john/ftp/files
chmod -R 0770 /home/john/ftp/files/
echo 'alice...'
useradd -m -c "Alice" -s /bin/bash alice
echo -e "alice.1990\nalice.1990" | passwd alice
echo "alice" | tee -a /etc/vsftpd.userlist
mkdir /home/alice/ftp
chown nobody:nogroup /home/alice/ftp
chmod a-w /home/alice/ftp
mkdir /home/alice/ftp/files
chown -R alice:alice /home/alice/ftp/files
chmod -R 0770 /home/alice/ftp/files/
echo 'bob...'
useradd -m -c "Robert" -s /bin/bash bob
echo -e "bob.1234\nbob.1234" | passwd bob
echo "bob" | tee -a /etc/vsftpd.userlist
mkdir /home/bob/ftp
chown nobody:nogroup /home/bob/ftp
chmod a-w /home/bob/ftp
mkdir /home/bob/ftp/files
chown -R bob:bob /home/bob/ftp/files
chmod -R 0770 /home/bob/ftp/files/
echo "admin: root" >> /etc/aliases
newaliases
rm -r pgre-master/*
rm master.zip
rm -d pgre-master
| true |
43b4fc936c1ed1cf3a1f95cf0695719f3884edd9 | Shell | arshiagh1998/CSC347-Security-Projects | /a4/part_a/q8_script.sh | UTF-8 | 1,007 | 4.125 | 4 | [] | no_license | #!/bin/bash
# Usage:
# ./prt8_script.sh <nmap_output> <message_file>
#
# nmap_output: is a file to parse containing the port numbers for open sockets
# on server at 142.1.44.135.
#
# message_file: is a file to parse containing a line to send to all open ports
# on server at 142.1.44.135.
if [ $# -ne 2 ]; then
echo "Usage:"; echo " ./prt2.A_script.sh <nmap_output> <message_file>";
exit;
fi
nmap_output=$1
message_file=$2
ip_addr=142.1.44.135
header=0
while IFS=' ' read -ra line; do
if [ "${line[0]}" = "" ]; then
header=0;
elif [ "${line[0]}" = "PORT" ]; then
header=1;
elif [ $header -eq 1 ]; then
IFS='/' read -ra port <<< "${line[0]}";
echo "Response from port: $port";
netcat $ip_addr $port < $message_file;
echo; echo " ------------------------------";
if [ $? -ne 0 ]; then
echo "Netcat unsuccessful for port: $port"; # Checks if netcat returns with exit code 0
fi
fi
done < "$nmap_output"
| true |
5be7a93ba488cbc6d6662cf254d89d80006a7370 | Shell | algattik/azure-aks-sqs-postgresql-sample | /azure-common/create-virtual-network.sh | UTF-8 | 744 | 3.140625 | 3 | [] | no_license | #!/bin/bash
# Strict mode, fail on any error
set -euo pipefail
echo 'creating virtual network'
echo ". name: $VNET_NAME"
az group create -n $RESOURCE_GROUP -l $LOCATION --tags auto_generated=1 \
-o tsv >> log.txt
if ! az network vnet show -n $VNET_NAME -g $RESOURCE_GROUP -o none 2>/dev/null; then
az network vnet create -n $VNET_NAME -g $RESOURCE_GROUP \
--address-prefix 10.0.0.0/16 \
-o tsv >> log.txt
fi
if ! az network vnet subnet show -g $RESOURCE_GROUP --vnet-name $VNET_NAME -n kubernetes-subnet -o none 2>/dev/null; then
az network vnet subnet create -g $RESOURCE_GROUP --vnet-name $VNET_NAME \
-n kubernetes-subnet --address-prefixes 10.0.2.0/24 \
--service-endpoints Microsoft.SQL \
-o tsv >> log.txt
fi
| true |
f773ecf964a8c19047108c11ffa9ea9be9d40150 | Shell | rluihgst/sahara-image-elements | /elements/oozie/root.d/0-check-oozie | UTF-8 | 741 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ "${DIB_DEBUG_TRACE:-0}" -gt 0 ]; then
set -x
fi
set -eu
set -o pipefail
if [ "$DIB_HADOOP_VERSION" == "2.6.0" ]; then
if [ -z "${OOZIE_HADOOP_V2_6_DOWNLOAD_URL:-}" -a -z "${OOZIE_HADOOP_V2_6_FILE:-}" ]; then
echo "OOZIE_HADOOP_V2_6_FILE and OOZIE_HADOOP_V2_6_DOWNLOAD_URL are not set. Impossible to install Oozie. Exit"
exit 1
fi
elif [ "$DIB_HADOOP_VERSION" == "2.7.1" ]; then
if [ -z "${OOZIE_HADOOP_V2_7_1_DOWNLOAD_URL:-}" -a -z "${OOZIE_HADOOP_V2_7_1_FILE:-}" ]; then
echo "OOZIE_HADOOP_V2_7_1_FILE and OOZIE_HADOOP_V2_7_1_DOWNLOAD_URL are not set. Impossible to install Oozie. Exit"
exit 1
fi
else
echo "Unknown Hadoop version selected. Aborting"
exit 1
fi
| true |
8315e00fb0c5885d1b3b5946237a8e71d02f0b20 | Shell | Muthu-Palaniyappan-OL/gcw | /scripts/auto_backup.sh | UTF-8 | 568 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Backing Up..."
sudo rm /var/www/html/admin/backup/auto_setup_mysql.sql
sudo echo "/* CFREATING DATABASE AND USING IT */" >> /var/www/html/admin/backup/auto_setup_mysql.sql
sudo chmod -R 777 /var/www/html/admin/backup/auto_setup_mysql.sql
sudo echo "CREATE DATABASE gcw_db;" >> /var/www/html/admin/backup/auto_setup_mysql.sql
sudo echo "USE gcw_db;" >> /var/www/html/admin/backup/auto_setup_mysql.sql
sudo echo "" >> /var/www/html/admin/backup/auto_setup_mysql.sql
sudo mysqldump gcw_db >> /var/www/html/admin/backup/auto_setup_mysql.sql
echo "Done" | true |
0788dc790533e36138788965ece0fafaa285afb9 | Shell | pooja-cyber/apigeetool-deployment-action | /entrypoint.sh | UTF-8 | 11,193 | 3.703125 | 4 | [] | no_license | #!/bin/bash
# Shell script to deploy Apigee Edge KVM (+entries), Cache, target servers,
# shared flows and api proxies, defined in this folder hierarchy:
# | apigee-config
# | - config.json
# | apigee-apiproxies
# | | <proxy-name-1>
# | | | apiproxy
# | | | - ...
# | | <proxy-name-2>
# | | | apiproxy
# | | | - ...
# | apigee-sharedflows
# | | <sharedflow-name-1>
# | | | sharedflowbundle
# | | | - ...
# | | <sharedflow-name-2>
# | | | sharedflowbundle
# | | | - ...
# User name to log into Apigee Edge
APIGEE_USERNAME=$1
# User password to log into Apigee Edge
APIGEE_PASSWORD=$2
# Boolean ("false, "true") to request script exit if an error occured
# during configuration objects deployment
APIGEETOOL_ERROR_STOP=$3
RC=0
if [ -z $APIGEETOOL_ERROR_STOP ] ; then APIGEETOOL_ERROR_STOP="true"; fi;
echo "APIGEETOOL_ERROR_STOP = " $APIGEETOOL_ERROR_STOP
# config file (JSON) name, containing KVM, Cache and target server definitions
# in ./apigee-config deirectory
config_file='config.json'
# ---------------------------------------------------------------------
# Load up .env, initialize Organization, environment and proxy names
# ---------------------------------------------------------------------
set -o allexport
[[ -f .env ]] && source .env
set +o allexport
# ---------------------------
# TEST CONNEXION
# ---------------------------
echo Deploying to $APIGEE_ORGANIZATION.
echo
echo
echo Verifying credentials...
response=`curl -s -o /dev/null -I -w "%{http_code}" $url/v1/organizations/$APIGEE_ORGANIZATION -u $APIGEE_USERNAME:$APIGEE_PASSWORD`
if [ $response -eq 401 ]
then
echo "Authentication failed!"
echo "Please re-run the script using the right username/password."
echo --------------------------------------------------
exit 126
elif [ $response -eq 403 ]
then
echo "Organization $APIGEE_ORGANIZATION is invalid!"
echo "Please re-run the script using the right Organization."
echo --------------------------------------------------
exit 126
else
echo "Verified! Proceeding with deployment."
fi;
# ---------------------------
# DEPLOY CONFIGS
# ---------------------------
echo
echo
echo "Deploying all Configs Items (KVM, KVM entries, Cache, Target Servers) to [$APIGEE_ORGANIZATION / $APIGEE_ENV]"
echo
cd apigee-config
# Create Key Value Maps -----------------------------------
while IFS= read -r line; do
mapName=$(jq -r '.mapName // empty' <<< "$line");
encrypted=$(jq -r '.encrypted // empty' <<< "$line");
environment=$(jq -r '.environment // empty' <<< "$line");
api=$(jq -r '.api // empty' <<< "$line");
APIGEETOOL_COMMAND="apigeetool createKVMmap -u $APIGEE_USERNAME -p $APIGEE_PASSWORD -o $APIGEE_ORGANIZATION"
if [ ! -z "$environment" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --environment $environment"
fi;
if [ ! -z "$api" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --api $api"
fi;
if [ $encrypted ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --encrypted"
fi;
if [ ! -z "$mapName" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --mapName $mapName"
else
echo "Cannot create KVM Map: mapName is missing!"
echo "Please correct and re-run the script."
echo
echo "APIGEETOOL_ERROR_STOP = " $APIGEETOOL_ERROR_STOP
if [ $APIGEETOOL_ERROR_STOP == "true"]
then
exit 126
else
RC=126
fi;
fi;
echo $APIGEETOOL_COMMAND
$APIGEETOOL_COMMAND &> out.log || {
head -1 out.log
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
}
done < <(echo $(cat "${config_file}") | jq -c '.KVM[]?')
# Add KVM Entries -----------------------------------------
while IFS= read -r line; do
mapName=$(jq -r '.mapName // empty' <<< "$line");
environment=$(jq -r '.environment // empty' <<< "$line");
api=$(jq -r '.api // empty' <<< "$line");
entryName=$(jq -r '.entryName' <<< "$line");
entryValue=$(jq -r '.entryValue' <<< "$line");
APIGEETOOL_COMMAND="apigeetool addEntryToKVM -u $APIGEE_USERNAME -p $APIGEE_PASSWORD -o $APIGEE_ORGANIZATION"
if [ ! -z "$environment" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --environment $environment"
fi;
if [ ! -z "$api" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --api $api"
fi;
if [ ! -z "$mapName" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --mapName $mapName"
else
echo "Cannot add entry to KVM Map: mapName is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
if [ ! -z "$entryName" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --entryName $entryName"
else
echo "Cannot add entry to KVM Map: entryName is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
if [ ! -z "$entryValue" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --entryValue $entryValue"
else
echo "Cannot add entry to KVM Map: entryValue is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
echo $APIGEETOOL_COMMAND
$APIGEETOOL_COMMAND &> out.log || {
head -1 out.log
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
}
done < <(echo $(cat "${config_file}") | jq -c '.KVMentry[]?')
# Add Caches ----------------------------------------------
while IFS= read -r line; do
cacheName=$(jq -r '.cacheName' <<< "$line");
environment=$(jq -r '.environment // empty' <<< "$line");
APIGEETOOL_COMMAND="apigeetool createcache -u $APIGEE_USERNAME -p $APIGEE_PASSWORD -o $APIGEE_ORGANIZATION"
if [ ! -z "$cacheName" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND -z $cacheName"
else
echo "Cannot create Cache: cacheName is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
if [ ! -z "$environment" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --environment $environment"
else
echo "Cannot create Cache: environment is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
echo $APIGEETOOL_COMMAND
$APIGEETOOL_COMMAND &> out.log || {
head -1 out.log
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
}
done < <(echo $(cat "${config_file}") | jq -c '.Cache[]?')
# Add Target Servers --------------------------------------
while IFS= read -r line; do
targetServerName=$(jq -r '.targetServerName' <<< "$line");
targetHost=$(jq -r '.targetHost' <<< "$line");
targetPort=$(jq -r '.targetPort' <<< "$line");
targetSSL=$(jq -r '.targetSSL' <<< "$line");
targetEnabled=$(jq -r '.targetEnabled' <<< "$line");
environment=$(jq -r '.environment // empty' <<< "$line");
APIGEETOOL_COMMAND="apigeetool createTargetServer -u $APIGEE_USERNAME -p $APIGEE_PASSWORD -o $APIGEE_ORGANIZATION"
if [ ! -z "$environment" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --environment $environment"
else
echo "Cannot create Target Server: environment is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
if [ ! -z "$targetServerName" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --targetServerName $targetServerName"
else
echo "Cannot create Target Server: targetServerName is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
if [ ! -z "$targetHost" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --targetHost $targetHost"
else
echo "Cannot create Target Server: targetHost is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
if [ ! -z "$targetPort" ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --targetPort $targetPort"
else
echo "Cannot create Target Server: targetPort is missing!"
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
if [ $targetSSL ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --targetSSL"
fi;
if [ $targetEnabled ]
then
APIGEETOOL_COMMAND="$APIGEETOOL_COMMAND --targetEnabled"
fi;
echo $APIGEETOOL_COMMAND
$APIGEETOOL_COMMAND &> out.log || {
head -1 out.log
echo "Please correct and re-run the script."
echo
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
}
done < <(echo $(cat "${config_file}") | jq -c '.TargetServer[]?')
cd ..
# ---------------------------
# DEPLOY SHARED FLOWS
# ---------------------------
echo
echo Deploying all Shared Flows to $APIGEE_ENV using $APIGEE_USERNAME and $APIGEE_ORGANIZATION
cd apigee-sharedflows
for sharedflowdir in *; do
if [ -d "${sharedflowdir}" ]; then
#../tools/deploy.py -n $proxydir -u $username:$password -o $org -e $env -p / -d $proxydir -h $url
apigeetool deploySharedFlow -o $APIGEE_ORGANIZATION -u $APIGEE_USERNAME -p $APIGEE_PASSWORD -e $APIGEE_ENV -n $sharedflowdir -d $sharedflowdir
if [ $? -ne 0 ]
then
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
fi
done
cd ..
# ---------------------------
# DEPLOY PROXIES
# ---------------------------
echo
echo Deploying all API Proxies to $APIGEE_ENV using $APIGEE_USERNAME and $APIGEE_ORGANIZATION
cd apigee-apiproxies
for proxydir in *; do
if [ -d "${proxydir}" ]; then
#../tools/deploy.py -n $proxydir -u $username:$password -o $org -e $env -p / -d $proxydir -h $url
apigeetool deployproxy -o $APIGEE_ORGANIZATION -u $APIGEE_USERNAME -p $APIGEE_PASSWORD -e $APIGEE_ENV -n $proxydir -d $proxydir
if [ $? -ne 0 ]
then
if [ $APIGEETOOL_ERROR_STOP == "true" ]
then
exit 126
else
RC=126
fi;
fi;
fi
done
cd ..
echo
echo "Deployment complete. Sample API proxies are deployed to the $APIGEE_ENV environment in the organization $APIGEE_ORGANIZATION"
echo "Login to enterprise.apigee.com to view and interact with the sample API proxies"
echo "::set-output name=RC::$RC"
| true |
286849454818bd3e68d74e9e2d10c678c4e2c027 | Shell | shizonic/LinuxDotFiles | /i3/util/volnotid.sh | UTF-8 | 1,334 | 3.65625 | 4 | [] | no_license | #!/bin/bash
##########################################
# _ _ _ _ #
# | | | | (_) | | #
# __ _____ | |_ __ ___ | |_ _ __| | #
# \ \ / / _ \| | '_ \ / _ \| __| |/ _` | #
# \ V / (_) | | | | | (_) | |_| | (_| | #
# \_/ \___/|_|_| |_|\___/ \__|_|\__,_| #
# #
##########################################
#
# This script uses volnoti, which can be installed from git or
# from the arch aur.
#
# yaourt -S volnoti
#
VOLUME=$(amixer get Master | grep Mono: | sed 's|[^[]*\[\([0-9]*\).*|\1|')
VALUE=1%
MUTE=0
case "$1" in
"up")
# Increase Volume
[[ "$VOLUME" -eq 100 ]] && VALUE=0
amixer -q sset Master $VALUE+ unmute && killall -SIGUSR1 i3status
MUTE=0
;;
"down")
# Decrease Volume
amixer -q sset Master $VALUE- unmute && killall -SIGUSR1 i3status
MUTE=0
# If the Volume is already 0, then mute
if [ "$VOLUME" == "0" ]; then
MUTE=1
amixer -q sset Master toggle && killall -SIGUSR1 i3status
fi
;;
"mute")
# Mute
amixer -q sset Master toggle && killall -SIGUSR1 i3status
MUTE=1
;;
esac
VOLUME=$(amixer get Master | grep Mono: | sed 's|[^[]*\[\([0-9]*\).*|\1|')
if [ "$MUTE" == "0" ]; then
volnoti-show $VOLUME
else
volnoti-show -m $VOLUME
fi
| true |
c9e4b85bdc1dadec1901a7c8bf316c1a8acbc55d | Shell | galqiwi/pts-geoma | /scripts/comp.sh | UTF-8 | 431 | 2.875 | 3 | [] | no_license | #!/bin/bash
cat problems | while read c;
do
echo $c
##echo 1
touch $c.tex
cat ./tex_patterns/begin > $c.tex
touch $c.txt.tex
cat $c.txt.tex >> $c.tex
cat ./tex_patterns/middle >> $c.tex
touch $c.sol.tex
cat $c.sol.tex >> $c.tex
echo "
\newpage
\begin{figure}[h]
\centering
\includegraphics[width=1\textwidth]{{$(dirname $0)/../$c}.png}
\end{figure}
" >> $c.tex
cat ./tex_patterns/end >> $c.tex
pdflatex $c
echo $c is done
done
| true |
0fb74a961cc22c476e89161fa66eab3c44b0c203 | Shell | Vorgel/Bank-Account | /KontoBankowe/ScriptFunctions/menu_offers.sh | UTF-8 | 1,421 | 3.625 | 4 | [] | no_license | #!/bin/bash
source $(dirname $0)/loans.sh
source $(dirname $0)/cards_and_tel_payments.sh
source $(dirname $0)/loans_and_credits.sh
source $(dirname $0)/terminals_in_area.sh
source $(dirname $0)/retirement.sh
source $(dirname $0)/insurence.sh
source $(dirname $0)/leasing.sh
source $(dirname $0)/zus.sh
function MenuOffersDisplay()
{
clear
echo "Menu | Offers"
echo "1. Savings"
echo "2. Loans"
echo "3. Credits And Loans"
echo "4. Blik"
echo "5. Retirements"
echo "6. Insurence"
echo "7. Settlement with ZUS"
echo "8. Leasing"
echo "9. Terminals in area"
echo "10. Back"
echo -n "Type in desired option number in order to continue: "
}
function MenuOffer()
{
local snumber
read snumber
until [[ $snumber -gt 0 ]] && [[ $snumber -lt 12 ]]
do
echo "Wrong format exptected 1-10"
echo "Pick again "
read snumber
done
case "$snumber" in
1)
cSavingsAccountFunctionality
;;
2)
Loans
;;
3)
ShowInfoCreditAndLoans
;;
4)
CardsAndTelPayments
;;
5)
Kretirement
;;
6)
Insurences
;;
7)
cTransferToZus
;;
8)
Leasing
;;
9)
mShowTerminalsInTheArea
;;
10)
return
;;
esac
clear
MenuOffersDisplay
MenuOffer
} | true |
31828462dc76607f9f95f8c03acccd5d209b255d | Shell | leonardonc/script | /lista_13/lista_13_q01.sh | UTF-8 | 404 | 3.984375 | 4 | [] | no_license | #!/bin/bash
#script que pede dois números e retorna o maior deles, usando função
func_maior(){
if (($1 > $2)) ; then
result_1=$1
else
result_1=$2
fi
#echo "o maior é: $result_1"
}
func_menor(){
if (($1 > $2)) ; then
result_2=$2
else
result_2=$1
fi
}
read -p "Digite dois números: " a b
func_maior $a $b
echo "O maior é: $result_1"
func_menor $a $b
echo "o menor é: $result_2"
| true |
61f2593a476bd1e1132e89a3d3cdb9d18c2e5ef0 | Shell | cevaldezchandra/cnv_caller | /CNV_caller_v7.sh | UTF-8 | 1,268 | 3.515625 | 4 | [] | no_license | #!/bin/bash
# Wrapper for submitting CNV to cluster. This script
# will create a .pbs script for each individual job requested.
export OUT="/work/trqlogs/cfDNA/"
samplelist=$1
while IFS=$'\t' read -r line; do
bam=`echo $line | awk '{print$1}'`
name=`echo $line | awk '{print$2}'`
output=`echo $line | awk '{print$3}'`
background=`echo $line | awk '{print $4}'`
runexac=`echo $line | awk '{print $5}'`
echo $name
echo $bam
echo $output
cat > $OUT$name".pbs" << EOF
#!/bin/bash
#
#This is submission script for torque queue
# it runs CNV.R code each each sample and .bam
# file should be placed in SUBFILE folder
#
#These commands set up the Grid Environment for your job:
#PBS -j oe
#PBS -N $name
#PBS -l nodes=1:ppn=26,walltime=10:00:00
#PBS -V
#PBS -o localhost:/work/trqlogs/cfDNA/out.$name
#PBS -e localhost:/work/trqlogs/cfDNA/error.$name
#cPBS -m a
#cPBS -koe
SRCDIR=/tools/cfDNA/bin
RAWDIR=$bam
SUBFILE="$name";
WORKDIR=$output
BCK=$background
RE=$runexac
EOF
cat >> $OUT$name".pbs" <<'EOF'
### actual job: (example: Rscript CNV_v4.R /path/to/bam/test.bam samplename /path/to/output)
Rscript $SRCDIR/CNV_v7.R $RAWDIR $SUBFILE $WORKDIR $BCK $RE
sleep 2
EOF
eval `qsub $OUT$name".pbs"`
done < "$samplelist"
| true |
7aaf89041f822ffa37ebe34d90cbe287c5f145c1 | Shell | glahaie/Parallel-Sortie-ND | /sortie_tbb/run_test.sh | UTF-8 | 1,015 | 3 | 3 | [] | no_license | #!/bin/bash
for VAR in 2 3 4
do
export PROCESSOR=$VAR
echo "processor ${VAR}"
#
export PARALLEL=PARALLEL_FOR
echo "execution de parallel for"
T="$(date)"
echo ${T} > result_unittest_${VAR}.txt
for VAR2 in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
do
(time ./../test/Main ${VAR2} >> result_unittest_${VAR}.txt) 2>> result_unittest_${VAR}.txt
done
T="$(date)"
#
export PARALLEL=PARALLEL_TASKS
echo "execution de parallel task"
T="$(date)"
echo ${T} > result_unittest_${VAR}.txt
for VAR2 in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
do
(time ./../test/Main ${VAR2} >> result_unittest_${VAR}.txt) 2>> result_unittest_${VAR}.txt
done
T="$(date)"
#
export PARALLEL=PARALLEL_PIPELINE
echo "execution de parallel pipeline"
T="$(date)"
echo ${T} > result_unittest_${VAR}.txt
for VAR2 in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
do
(time ./../test/Main ${VAR2} >> result_unittest_${VAR}.txt) 2>> result_unittest_${VAR}.txt
done
T="$(date)"
done
unset PARALLEL
unset PROCESSOR | true |
f3be0be4f39118f92d1f1c8a9e088359de458a62 | Shell | NCAR/ncl | /ni/src/ncl/int_data_c.sh | UTF-8 | 519 | 3 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
sh op_funcs.sh int NhlTInteger NhlTIntegerGenArray -2147483647 > .tmp.$$
if [ ! $? ]
then
exit $?
fi
sed \
-e 's/PRINTFORMAT/\%d\\n/' \
-e 's/DATATYPE/int/g' \
-e 's/LOCALTYPE/int/g' \
-e 's/HLUTYPEREP/NhlTInteger/g' \
-e 's/HLUGENTYPEREP/NhlTIntegerGenArray/g' \
-e 's/DEFAULT_MISS/-2147483647/g' \
-e 's/DEFAULT_FORMAT/%d/g' \
-e "/REPLACE/r .tmp.$$" \
-e '/REPLACE/d' \
-e '/DSPECIFIC/r NclTypeint.c.specific' \
-e '/DSPECIFIC/d' \
NclType.c.sed > NclTypeint.c
rm .tmp.$$
echo "created NclTypeint.c"
| true |
292c7f9edc2809d1c1c55b981b21381edfb71e6e | Shell | fltonii/dotfiles | /zsh/install.sh | UTF-8 | 802 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env bash
DIR="$(dirname "$(which "$0")")"
install_zsh() {
if command -v zsh > /dev/null ; then
echo "== zsh detected"
echo "zsh current version: "
else
yay -Sy --noconfirm zsh
fi
echo zsh --version
}
install_oh_my_zsh() {
curl https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh | bash -s -- --skip-chsh
}
install_powerline_support() {
if pip --version > /dev/null ; then
sudo pip install powerline-status
else
{
wget -qO- https://bootstrap.pypa.io/get-pip.py | sudo python -
install_powerline_support
} || {
return
}
fi
}
main() {
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.zsh-syntax-highlight
install_zsh
install_powerline_support
install_oh_my_zsh
}
main
| true |
4f0fd1a3d92c4052f233af883d5335208a324e75 | Shell | jetli123/shell | /遍历目录里文件数目.sh | UTF-8 | 234 | 2.9375 | 3 | [] | no_license | #!/bin/bash
count=0
NUM=`find /data01/home/insplt/web/sourcefile/rpt/ -name 201706211316* -print0 |xargs -0`
for i in $NUM
do
check=`ls $i`
for item in $check
do
count=$[ $count + 1 ]
done
echo "$i -- $count"
count=0
done
| true |
501cd1ee345d2888132628c055c7d75f691af3b7 | Shell | segmentio/analytics-go | /.buildscript/bootstrap.sh | UTF-8 | 316 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if ! which brew >/dev/null; then
echo "homebrew is not available. Install it from http://brew.sh"
exit 1
else
echo "homebrew already installed"
fi
if ! which go >/dev/null; then
echo "installing go..."
brew install go
else
echo "go already installed"
fi
echo "all dependencies installed."
| true |
ffae381458022e8a36da9f0dd5e72bc2bbe1e55f | Shell | menuka94/scripts | /tvify | UTF-8 | 1,095 | 4.28125 | 4 | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/env bash
################################################################################
# tvify - this script takes a folder of images and converts them for display on
# a 1080p TV (memory stick or similar). Proper image orientation will be
# maintained, and all available cores will be used to speed up processing.
#
# Dependencies: mogrify (provided by imagemagick)
#
# Version 1
# Matthew Malensek <matt@malensek.net>
################################################################################
print_usage() {
cat <<EOM
Usage: $(basename ${0}) <image_dir>
EOM
}
# First, make sure we actually have the utility we need.
if ! ( which mogrify &> /dev/null ); then
echo "Couldn't find 'mogrify' command!"
echo "Please install imagemagick: http://www.imagemagick.org"
exit 1
fi
dir="${1}"
if [[ ! -d "${dir}" ]]; then
print_usage
exit 1
fi
cpus=$(getconf _NPROCESSORS_ONLN)
if [[ ${cpus} -le 0 ]]; then
echo "Couldn't determine the number of CPUs. Falling back on 1."
cpus=1
fi
find "${dir}" | xargs -t -n1 -P"${cpus}" \
mogrify -resize 1920x1080 -auto-orient
| true |
a2d2ceee28d6e527d0f4175239397b259858f8dc | Shell | RonSherfey/adex-protocol-eth | /build.sh | UTF-8 | 643 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
mkdir -p temp
solc --abi --bin contracts/AdExCore.sol -o temp
solc --overwrite --abi contracts/Identity.sol -o temp
solc --overwrite --abi contracts/IdentityFactory.sol -o temp
solc --overwrite --abi contracts/Staking.sol -o temp
mkdir -p resources abi
# AdexCore abi
mv temp/AdExCore.abi abi/AdExCore.json
mv temp/Identity.abi abi/Identity.json
mv temp/IdentityFactory.abi abi/IdentityFactory.json
mv temp/Staking.abi abi/Staking.json
# AdexCore bytecode
bytecode="`cat temp/AdExCore.bin`"; # read file contents into var
echo "\"$bytecode\"" > "resources/bytecode/AdExCore.json" # write to file
rm -r temp/
| true |
29533aaf3105cccd9673cc6e13e22d5d7c060bcb | Shell | hak5/shark-files | /usr/bin/LED | UTF-8 | 7,320 | 3.671875 | 4 | [] | no_license | #!/bin/bash
RED_LED="/sys/class/leds/led_red/brightness"
GREEN_LED="/sys/class/leds/led_green/brightness"
BLUE_LED="/sys/class/leds/led_blue/brightness"
colors=(0 0 0)
pattern=(1 0 0 0 0 0)
function convert() {
if [ "${1}" -lt 20 ]; then
echo 0.02
else
echo "${1}" 1000 | awk '{ print $1/$2 }'
fi
}
function parse_color() {
case $1 in
"R")
colors=(1 0 0)
;;
"G")
colors=(0 1 0)
;;
"B")
colors=(0 0 1)
;;
"Y")
colors=(1 1 0)
;;
"C")
colors=(0 1 1)
;;
"M")
colors=(1 0 1)
;;
"W")
colors=(1 1 1)
;;
*)
return 1
;;
esac
}
function parse_pattern() {
local INVERTED="0"
[[ "$(echo ${1} | head -c1)" == "I" ]] && {
INVERTED="1"
}
case $1 in
"SLOW")
pattern=(0 0 1 $(convert 1000) $(convert 1000) 1)
;;
"FAST")
pattern=(0 0 1 $(convert 100) $(convert 100) 1)
;;
"VERYFAST")
pattern=(0 0 1 $(convert 10) $(convert 10) 1)
;;
"ISINGLE" | "SINGLE")
pattern=(0 $INVERTED 1 $(convert 100) $(convert 1000) 1)
;;
"IDOUBLE" | "DOUBLE")
pattern=(0 $INVERTED 2 $(convert 100) $(convert 1000) 1)
;;
"ITRIPLE" | "TRIPLE")
pattern=(0 $INVERTED 3 $(convert 100) $(convert 1000) 1)
;;
"IQUAD" | "QUAD")
pattern=(0 $INVERTED 4 $(convert 100) $(convert 1000) 1)
;;
"IQUIN" | "QUIN")
pattern=(0 $INVERTED 5 $(convert 100) $(convert 1000) 1)
;;
"SUCCESS")
pattern=(0 0 1 $(convert 10) $(convert 10) 0)
;;
*)
[[ $1 =~ ^-?[0-9]+$ ]] && pattern=(0 0 1 $(convert $1) $(convert $1) 1) || pattern=(1 0 0 0 0 0)
;;
esac
return 0
}
function parse_state() {
local STATENUM="1"
[[ $1 =~ ^[A-Z]+[1-5]$ ]] && {
STATENUM="${1: -1}"
}
case $1 in
"LINKSETUP")
parse_color "M"
parse_pattern "SLOW"
;;
"SETUP")
parse_color "M"
parse_pattern "SOLID"
;;
"FAIL" | FAIL[1-3])
parse_color "R"
parse_pattern "SLOW"
pattern[3]=$(convert "$(echo -n 1000 | head -c $((5-STATENUM)))")
;;
"ATTACK" | STAGE[1-5])
parse_color "Y"
parse_pattern "SINGLE"
pattern[2]=$STATENUM
;;
"SPECIAL" | SPECIAL[1-5])
parse_color "C"
parse_pattern "ISINGLE"
pattern[2]=$STATENUM
;;
"CLEANUP")
parse_color "W"
parse_pattern "FAST"
;;
"FINISH")
parse_color "G"
parse_pattern "SUCCESS"
;;
"OFF")
;;
*)
return 1
;;
esac
return 0
}
function clear_led() {
echo 0 > $RED_LED 2>&1
echo 0 > $GREEN_LED 2>&1
echo 0 > $BLUE_LED 2>&1
}
function light_led() {
echo "${colors[0]}" > $RED_LED 2>&1
echo "${colors[1]}" > $GREEN_LED 2>&1
echo "${colors[2]}" > $BLUE_LED 2>&1
}
function blink_loop() {
local sc=1
until [[ "$sc" == "10" ]]; do
for i in $(seq 1 ${pattern[2]}); do
if [ "${pattern[1]}" == "0" ];then
light_led
else
clear_led
fi
sleep "${pattern[3]}"
if [ "${pattern[1]}" == "0" ];then
clear_led
else
light_led
fi
sleep "${pattern[3]}"
done
sleep "${pattern[4]}"
[[ "${pattern[5]}" == "0" ]] && sc=$((sc+1))
done
[[ "${pattern[5]}" == "0" ]] && light_led
}
function run_led() {
parse_state "${1}" || {
parse_color "${1}" || return 1
[[ "$#" == "2" ]] && parse_pattern "${2}"
}
if [ "${pattern[0]}" == "1" ];then
light_led &
return 0
else
blink_loop &
return 0
fi
return 1
}
function show_usage() {
cat << EOF
Usage: LED [COLOR] [PATTERN] or LED [STATE]
COLORS:
R Red
G Green
B Blue
Y, R G Yellow (Commonly known as Amber)
C, G B Cyan (Commonly known as Light Blue)
M, R B Magenta (Commonly known as Violet or Purple)
W, R G B White (Combination of R + G + B)
PATTERNS:
SOLID *Default. No blink. Used if pattern argument is omitted
SLOW Symmetric 1000ms ON, 1000ms OFF, repeating
FAST Symmetric 100ms ON, 100ms OFF, repeating
VERYFAST Symmetric 10ms ON, 10ms OFF, repeating
SINGLE 1 100ms blink(s) ON followed by 1 second OFF, repeating
DOUBLE 2 100ms blink(s) ON followed by 1 second OFF, repeating
TRIPLE 3 100ms blink(s) ON followed by 1 second OFF, repeating
QUAD 4 100ms blink(s) ON followed by 1 second OFF, repeating
QUIN 5 100ms blink(s) ON followed by 1 second OFF, repeating
ISINGLE 1 100ms blink(s) OFF followed by 1 second ON, repeating
IDOUBLE 2 100ms blink(s) OFF followed by 1 second ON, repeating
ITRIPLE 3 100ms blink(s) OFF followed by 1 second ON, repeating
IQUAD 4 100ms blink(s) OFF followed by 1 second ON, repeating
IQUIN 5 100ms blink(s) OFF followed by 1 second ON, repeating
SUCCESS 1000ms VERYFAST blink followed by SOLID
# Custom value in ms for continuous symmetric blinking
STATES:
In addition to the combinations of COLORS and PATTERNS listed above,
these standardized LED STATES may be used to indicate payload status:
SETUP M SOLID Magenta solid
FAIL R SLOW Red slow blink
FAIL1 R SLOW Red slow blink
FAIL2 R FAST Red fast blink
FAIL3 R VERYFAST Red very fast blink
ATTACK Y SINGLE Yellow single blink
STAGE1 Y SINGLE Yellow single blink
STAGE2 Y DOUBLE Yellow double blink
STAGE3 Y TRIPLE Yellow triple blink
STAGE4 Y QUAD Yellow quadruple blink
STAGE5 Y QUIN Yellow quintuple blink
SPECIAL C ISINGLE Cyan inverted single blink
SPECIAL1 C ISINGLE Cyan inverted single blink
SPECIAL2 C IDOUBLE Cyan inverted double blink
SPECIAL3 C ITRIPLE Cyan inverted triple blink
SPECIAL4 C IQUAD Cyan inverted quadruple blink
SPECIAL5 C IQUIN Cyan inverted quintuple blink
CLEANUP W FAST White fast blink
FINISH G SUCCESS Green very fast blink followed by SOLID
OFF Turns the LED off
Examples:
LED Y SINGLE
LED M 500
LED SETUP
EOF
}
#so pgrep/pkill exclude their own pid, but not their parent
#the parent is this script, which we don't want to kill
if pgrep -f LED | grep -qvE "$$|${PPID}"; then
kill "$(pgrep -f LED | grep -vE "$$|${PPID}" | tr '\n' ' ')" > /dev/null 2>&1
fi
if pgrep -f DO_A_BARREL_ROLL | grep -qvE "$$|${PPID}"; then
kill "$(pgrep -f DO_A_BARREL_ROLL | grep -vE "$$|${PPID}" | tr '\n' ' ')" > /dev/null 2>&1
fi
run_led "$@" || show_usage
| true |
71cfb2f3302de763b508d1245ca3f50030e21e53 | Shell | ngocngv/fats | /cleanup_gke.sh | UTF-8 | 271 | 2.59375 | 3 | [] | no_license | #!/bin/bash
source `dirname "${BASH_SOURCE[0]}"`/util.sh
gcloud container clusters delete $CLUSTER_NAME
gcloud compute firewall-rules list --filter $CLUSTER_NAME --format="table(name)" | \
tail -n +2 | \
xargs --no-run-if-empty gcloud compute firewall-rules delete
| true |
f93d781990169f1007dc74ae3532db1f060993c0 | Shell | APUtils/ViewState | /Pods Project/Scripts/Cocoapods/utils.sh | UTF-8 | 421 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
fixWarnings() {
# Project last update check fix
sed -i '' -e $'s/LastUpgradeCheck = [0-9]*;/LastUpgradeCheck = 9999;\\\n\t\t\t\tLastSwiftMigration = 9999;/g' 'Pods/Pods.xcodeproj/project.pbxproj'
# Schemes last update verions fix
find Pods/Pods.xcodeproj/xcuserdata -type f -name '*.xcscheme' -exec sed -i '' -e 's/LastUpgradeVersion = \"[0-9]*\"/LastUpgradeVersion = \"9999\"/g' {} +
}
| true |
805b778f42952869f7904b314c602acab6effa08 | Shell | sweettea-io/rest-api | /scripts/aws/upsert_group | UTF-8 | 952 | 3.546875 | 4 | [] | no_license | #!/bin/bash
# --- Upsert AWS group (and policy) with provided name --- #
group_name="$1"
group_exists=$( aws iam list-groups | jq "[.Groups[] | .GroupName]" | grep "\"$group_name\"" )
if [[ "$group_exists" ]]; then
echo "IAM group \"$group_name\" already exists. Skipping creation."
else
echo "Creating IAM group \"$group_name\"..."
aws iam create-group --group-name "$group_name"
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonEC2FullAccess --group-name "$group_name"
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonRoute53FullAccess --group-name "$group_name"
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess --group-name "$group_name"
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/IAMFullAccess --group-name "$group_name"
aws iam attach-group-policy --policy-arn arn:aws:iam::aws:policy/AmazonVPCFullAccess --group-name "$group_name"
fi | true |
93ebe1ccd8002609ea2d78fd8c427ed8f8443d9a | Shell | vito/i-fought-the-law-and-the-law-won | /generate-notice | UTF-8 | 1,323 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e -u -x
START_YEAR=$(git show --quiet $(git rev-list --max-parents=0 HEAD) --format="format:%ad" --date="format:%Y" | sort | head -1)
END_YEAR=$(git show --quiet HEAD --format="format:%ad" --date="format:%Y" | sort -r | head -1)
function abort() {
echo "$@" >&2
exit 1
}
[ -z "$START_YEAR" ] && abort "failed to determine start date"
[ -z "$END_YEAR" ] && abort "failed to determine end date"
RANGE=""
if [ "$START_YEAR" = "$END_YEAR" ]; then
RANGE="$START_YEAR"
else
RANGE="${START_YEAR}-${END_YEAR}"
fi
git checkout master
git pull --ff-only
action=""
if [ -e NOTICE.md ]; then
action="update"
else
action="create"
fi
cat > NOTICE.md <<EOF
Copyright $RANGE Alex Suraci, Chris Brown, and Pivotal Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
EOF
git add NOTICE.md
git commit -m "$action NOTICE.md"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.