blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9e6aefacc801980e9491d3a79a1939239346dbe2
|
Shell
|
dedowsdi/dotfiles
|
/.ddd/script/readtagsi
|
UTF-8
| 1,940
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -eu -o pipefail
show_usage()
{
echo "
Usage: ${0##*/} [-t TAGFILE]... [-k KIND]... <scope_name>
Read from everything that have a scope. Your tag fields must include
\"Z\" for this to work.
Options:
-h Show this help.
-t TAGFILE Add tag file, can be used zero or multiple times. Use ./tags if
none is provided.
-k KIND Restrict kind, can be specified zero or multiple times.
-o Only print names.
All other options except -Q are forwarded to readtags.
"
}
tags=()
kinds=()
extra_options=()
while getopts ":ht:k:Q:o" opt; do
case $opt in
h)
show_usage
exit 0
;;
t)
tags+=("$OPTARG")
;;
k)
kinds+=("$OPTARG")
;;
o)
name_only=
;;
Q)
echo -e "\n Can't pass extra -Q to readtags.\n"
usage
exit 1
;;
*)
extra_options+=("-$OPTARG")
;;
esac
done
shift $((OPTIND-1))
scope_name=${1:-}
if [[ -z "$scope_name" ]]; then
show_usage
exit 1
fi
# use tags if no -t exists
if [[ ${#tags[@]} -eq 0 ]]; then
tags=(tags)
fi
# TODO use (member $kind list) instead of or, don't know how to construct list.
kinds_condition=$(printf "(prefix? \$kind \"%s\") " "${kinds[@]}")
condition="(and
(or $kinds_condition)
(eq? \$scope-name \"$scope_name\")
)"
for tagfile in "${tags[@]}" ; do
# need to test result as readtags exit with 0 when nothing found
mapfile -t results < <(readtags "${extra_options[@]}" -t "$tagfile" -Q "${condition}" -l)
if [[ ${#results[@]} -gt 0 ]]; then
if [[ -v name_only ]]; then
printf "%s\n" "${results[@]}" | cut -f 1
else
printf "%s\n" "${results[@]}"
fi
exit 0
fi
done
| true
|
bff965f6ba2f3fb9e2facb69f61aff0d4197bed3
|
Shell
|
chenqiangzhishen/Shell
|
/downloadAugusta/downloadAugusta.sh
|
UTF-8
| 10,011
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# The downloadAugusta script downloads the specified Augusta VM image.
#
# FEATURES:
# - download up to 2 times faster
# - only changed image blocks are downloaded
# - sparse image file is used to minimize space
# - automatic daily image download via crontab
# - keeps multiple daily images
# - automatically deletes older images
#
# DEFAULTS:
# - Default download directory = ~/augusta
# - Number of daily images to keep = 7
#
# LOCAL TESTING:
# 1) Make mock test directory:
# mkdir -p /tmp/augusta/2013-11-12/KVM/
# 2) Copy augusta.qcow2 image and install.sh files to test directory
# 3) Run local download:
# downloadAugusta -h localhost -s /tmp/augusta -d 2013-11-12 kvm
#
# Author: Dave Christenson
#
# Uncomment this line to enable debug mode
#set -x
# Script name
g_script_name=downloadAugusta
# Global variables - default values
g_user=$USER
g_password=""
g_host=gsax-pro.labs.lenovo.com
g_source_dir=/ifs/rtpgsa/projects/a/augusta
g_dest_dir="$HOME/augusta"
g_build_fork=1.0
g_build_subdir="/"
g_rsync_options="-av --progress --sparse"
g_max_image_files=7 # Keep up to 7 image files
g_hidden_current_image_dir='.current_image'
g_protocol=""
# Print help
usage() {
echo "usage: $g_script_name [options] image_type"
echo ""
echo " image_type 'kvm', 'vmware' or 'vhd'"
echo ""
echo "OPTIONS:"
echo " -f Select the build fork 0.1 or 2.0. Default is $g_build_fork."
echo " -b Select build image sub-directory. Default is to select most recent build."
echo " -t Target (destination) directory. Default is ${g_dest_dir}."
echo " -s Server's source directory. Default is ${g_source_dir}."
echo " -k Number of daily images to keep. Default is ${g_max_image_files}."
echo " -u User name. Default is ${g_user}."
echo " -h Host name. Default is ${g_host}."
echo " -p Protocol. Values are lftp or rsync."
echo ""
echo "EXAMPLES:"
echo " $g_script_name kvm # Download kvm image to ~/download directory"
echo " $g_script_name -k 10 vmware # Download VMware image, keeping last 10 images"
echo " $g_script_name -b 1.0-298 vmware # Download VMware image for build 1.0-298"
echo " $g_script_name -f 2.0 vmware # Download latest VMware image from fork 2.0"
echo " echo mypassword | $g_script_name kvm # Run without password prompt"
echo ""
echo "CRONTAB:"
echo " Download daily image at 4 AM every day:"
echo " $ crontab -e"
echo " 0 4 * * * echo password | $HOME/bin/$g_script_name vmware >> $HOME/augusta/downloadAugusta.log 2>&1"
exit 0
}
# main function
main() {
if (( $# == 0 )); then
usage
fi
# Parse options. Properly option values with spaces.
g_xxx_ref=
while (( $# > 1 )) || [[ $1 = -* ]]; do
case "$1" in
-t) g_xxx_ref=g_dest_dir; g_dest_dir=;;
-s) g_xxx_ref=g_source_dir; g_source_dir=;;
-k) g_xxx_ref=g_max_image_files; g_max_image_files=;;
-u) g_xxx_ref=g_user; g_user=;;
-h) g_xxx_ref=g_host; g_host=;;
-b) g_xxx_ref=g_build_subdir; g_build_subdir=;;
-f) g_xxx_ref=g_build_fork; g_build_fork=;;
-p) g_xxx_ref=g_protocol; g_protocol=;;
-*) usage;;
# save argument value using indirect reference to g_xxx variable
*) if [ -z "${!g_xxx_ref}" ]; then
declare $g_xxx_ref="$1" # save first or only argument value
else
declare $g_xxx_ref+=" $1" # save space separated argument value
fi;;
esac
shift
done
local temp_server=0
if [ "$g_host" == 10.240.80.241 ] || [ "$g_host" == ait-move-backup.labs.lenovo.com ]; then
temp_server=1
fi
# Append fork to source directory path
# (the fork sub-directory is only present in the GSA path and not on the temporary build image server)
if [ "$g_build_fork" != "" ] && [ $temp_server == 0 ]; then
g_source_dir+=/$g_build_fork
fi
# Get 'image_type' positional parameter
local image_type=$1
local image_type_subdir=
if [ "$image_type" == kvm ]; then
image_type_subdir=KVM
elif [ "$image_type" == vmware ]; then
image_type_subdir=OVA
elif [ "$image_type" == vhd ]; then
image_type_subdir=VHD
else
echo "Invalid image type '$image_type'. Must be 'kvm', 'vmware' or 'vhd'." > /dev/stderr
exit 1
fi
# Verify that destination directory exists
if [ ! -d "$g_dest_dir" ]; then
echo "Destination directory '$g_dest_dir' not found."
exit 1
fi
# No protocol specified?
if [ "$g_protocol" == "" ]; then
# Subsequent download of image?
if [ -d "$g_dest_dir/$g_hidden_current_image_dir" ]; then
g_protocol=rsync
else
g_protocol=lftp
fi
fi
# Invalid protocol specified?
if [ "$g_protocol" != rsync ] && [ "$g_protocol" != lftp ]; then
echo "Invalid -p option value '$g_protocol'. Valid values are 'rsync' or 'lftp'." > /dev/stderr
exit 1
fi
# Read password from stdin
read -p "${g_user}@${g_host}'s password: " -s g_password
echo ""
# Do wildcard match on daily build directory
local output=''
# Testing with localhost?
if [ "$g_host" == localhost ]; then
output=$(ls .)
else
if [ $temp_server == 1 ]; then
output=$(run_expect "$g_password" "lftp -e 'cls -1 --sort=date -q ${g_source_dir} | grep $g_build_subdir; exit' sftp://${g_user}@${g_host}")
else
# host name contains dash?
if echo $g_host | grep '-' > /dev/null; then
local g_host=$(host $g_host)
g_host=${g_host##* }
fi
output=$(run_expect $g_password "lftp -e 'cls -1 --sort=date -q ${g_source_dir} | grep /$g_build_fork/ | grep $g_build_subdir; exit' ftp://${g_user}@${g_host}")
fi
(( $? )) && exit $? # Exit on bad return code from expect
fi
# Select most recent matching build
local build_dir
while read -r line; do
echo $line
# This is an output line from lftp cls?
if echo $line | grep $g_source_dir/ >/dev/null; then
build_dir=${line%\/*} # Remove slash at end of directory
break
fi
done <<< "$output"
# Image directory with matching build found?
if echo $build_dir | grep $g_build_subdir >/dev/null; then
g_source_dir="$build_dir"
g_build_subdir=${build_dir##*\/}
echo "Selected image directory: $g_host:$g_source_dir"
else
echo "No image directory match for '$g_build_subdir' found in '${g_host}:${g_source_dir}'." > /dev/stderr
exit 1
fi
# Download the most recent Augusta image
# Use rsync if this is a subsequent download?
if [ "$g_protocol" == rsync ]; then
run_expect "$g_password" "TIME='Download Time=%E' time rsync $g_rsync_options ${g_user}@${g_host}:${g_source_dir}/$image_type_subdir/ '$g_dest_dir/$g_hidden_current_image_dir/$image_type_subdir/'"
else # use lftp on first download
mkdir -p "$g_dest_dir/$g_hidden_current_image_dir/$image_type_subdir"
if [ $temp_server == 1 ]; then
run_expect $g_password "TIME='Download Time=%E' time lftp -e 'mirror --use-pget-n=5 ${g_source_dir}/$image_type_subdir $g_dest_dir/$g_hidden_current_image_dir/$image_type_subdir; exit' sftp://${g_user}@${g_host}"
else
run_expect $g_password "TIME='Download Time=%E' time lftp -e 'mirror --use-pget-n=5 ${g_source_dir}/$image_type_subdir $g_dest_dir/$g_hidden_current_image_dir/$image_type_subdir; exit' ftp://${g_user}@${g_host}"
fi
fi
# Copy image file so it isn't overlaid on the next scheduled rsync
mkdir -p "$g_dest_dir/$g_build_subdir/$image_type_subdir"
echo -e "\nCopying image file to $g_dest_dir/$g_build_subdir/$image_type_subdir..."
# cp -u will skip files that are newer than the downloaded file.
# We don't want to update an VM image file that is currently running.
cp -ur --sparse=never "$g_dest_dir/$g_hidden_current_image_dir/$image_type_subdir/"* "$g_dest_dir/$g_build_subdir/$image_type_subdir/"
# KVM needs read/write/execute permission on all directories
# Otherwise, it will complain that it doesn't have search permission
find "$g_dest_dir/$g_build_subdir/$image_type_subdir" -type d -exec chmod +rwx {} \;
find "$g_dest_dir/$g_build_subdir/$image_type_subdir" -type f -exec chmod +rw {} \;
find "$g_dest_dir/$g_build_subdir/$image_type_subdir" -type f -name install.sh -exec chmod +x {} \;
# Clean up old image files
local dirs=''
for dir in $(ls -tr "$g_dest_dir"); do
# This is a directory and not a file?
if [ -d "$g_dest_dir/$dir" ]; then
dirs+=" $g_dest_dir/$dir"
fi
done
local image_dir_array=($dirs)
local i=0
while (( ${#image_dir_array[@]} > $g_max_image_files )); do
dir=${image_dir_array[$i]}
unset image_dir_array[$i]
((i++))
if [ -d "$dir" ]; then
echo "Deleting old image directory '$dir'"
# Try to delete without root authority first...
if ! rm -r "$dir"; then
sudo rm -r "$dir"
fi
fi
done
echo "Image files downloaded to $g_dest_dir/$g_build_subdir/$image_type_subdir"
}
# Expect function use to automatically enter password
# $1 password
# $2 command
run_expect() {
# input arguments
password=$1
command=$2
# Export environment that must be accesed by expect
export password
export command
# Use expect to automatically enter the password
expect <<- DONE
set timeout -1
spawn bash -c "$::env(command)"
match_max 100000
# Look for passwod prompt
expect "*?assword:*"
# Send password $::env(password)
send -- "$::env(password)\r"
# send blank line (\r) before eof
send -- "\r"
expect eof
set waitval [wait]
set exval [lindex $waitval 3]
exit $exval
DONE
# Bad exit code from expect?
rc=$?
if [ "$rc" != 0 ]; then
echo "Expect error exit code $rc" >/dev/stderr
exit $rc
fi
}
main $@
| true
|
1b383fe816fd22b3310837d7ed1304e9b1ab352f
|
Shell
|
Sensei-Dad/dotfiles
|
/functions/hgrep
|
UTF-8
| 123
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Grep the history for something
# Usage:
# hgrep <string>
hgrep() {
history | grep $1 | grep -v grep;
}
| true
|
65bff814da1e72d7f948fd43bd0f8a60130964fa
|
Shell
|
openshift/openshift-azure
|
/hack/aad.sh
|
UTF-8
| 1,900
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
usage() {
cat <<EOF >&2
usage:
$0 app-create name owner
Examples:
$0 app-create user-$USER-aad aro-team-shared
EOF
exit 1
}
# Meaning of the below magic numbers
# "00000002-0000-0000-c000-000000000000" -> AAD Graph API
# "5778995a-e1bf-45b8-affa-663a9f3f4d04" -> Read directory data
# "311a71cc-e848-46a1-bdf8-97ff7156d8e6" -> Sign in and read user profile
# "type": "Role" -> application permission
# "type": "Scope" -> delegated permission
case "$1" in
app-create)
if [[ "$#" -ne 3 ]]; then
usage
fi
OWNER_ID=$(az ad sp list --display-name "$3" --query [0].objectId --output tsv)
if [[ "$OWNER_ID" == "" ]]; then
echo "owner $3 not found" >&2
exit 1
fi
AZURE_AAD_CLIENT_SECRET=$(uuidgen)
AZURE_AAD_CLIENT_ID=$(az ad app create \
--display-name "$2" \
--homepage "http://$2/" \
--identifier-uris "http://$2/" \
--key-type password \
--password "$AZURE_AAD_CLIENT_SECRET" \
--query appId \
--reply-urls "http://$2/" \
--required-resource-accesses @- <<'EOF' | tr -d '"'
[
{
"resourceAppId": "00000002-0000-0000-c000-000000000000",
"resourceAccess": [
{
"id": "5778995a-e1bf-45b8-affa-663a9f3f4d04",
"type": "Role"
},
{
"id": "311a71cc-e848-46a1-bdf8-97ff7156d8e6",
"type": "Scope"
}
]
}
]
EOF
)
az ad app owner add --id $AZURE_AAD_CLIENT_ID --owner-object-id $OWNER_ID
cat >&2 <<EOF
Note: ask an administrator to grant your application's permissions. Until this
is done the application will not work.
To use this application with an OpenShift cluster, add the following line to
your env file and source it:
export AZURE_AAD_CLIENT_ID=$AZURE_AAD_CLIENT_ID
export AZURE_AAD_CLIENT_SECRET=$AZURE_AAD_CLIENT_SECRET
EOF
;;
*)
usage
;;
esac
| true
|
9179f09ffb69141fffa3618409dbfe42a722bc59
|
Shell
|
clp-research/slurk-bots
|
/audio_video/setup_audio_video_room.sh
|
UTF-8
| 1,626
| 3.453125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -eu
# export SLURK_OPENVIDU_URL and SLURK_OPENVIDU_SECRET before starting the script
function errcho {
echo "$@" 1>&2
}
function check_response {
response=$("$@")
if [ -z "$response" ]; then
errcho "Unexpected error for call to: $1"
exit 1
fi
echo "$response"
}
export SLURK_DOCKER=slurk
# run slurk
cd ../slurk
docker build --tag="slurk/server" -f Dockerfile .
check_response scripts/start_server_with_openvidu.sh
sleep 5
# create admin token
SLURK_TOKEN=$(check_response scripts/read_admin_token.sh)
echo "Admin Token:"
echo $SLURK_TOKEN
# create task room layout
TASK_ROOM_LAYOUT=$(check_response scripts/create_layout.sh ../slurk-bots/audio_video/simple_av_room_layout.json | jq .id)
echo "Task Room Layout Id:"
echo $TASK_ROOM_LAYOUT
SESSION=$(check_response scripts/create_openvidu_session.sh)
SESSION_ID=$(echo $SESSION | jq .id | sed 's/^"\(.*\)"$/\1/')
echo "Session Id:"
echo $SESSION_ID
# create demo task room
TASK_ROOM=$(check_response scripts/create_room.sh $TASK_ROOM_LAYOUT $SESSION_ID | jq .id)
echo "Task Room Id:"
echo $TASK_ROOM
# create two users
USER1=$(check_response scripts/create_room_token.sh $TASK_ROOM ../slurk-bots/audio_video/user_permissions.json 1 | jq .id | sed 's/^"\(.*\)"$/\1/')
echo "User Token:"
echo $USER1
USER2=$(check_response scripts/create_room_token.sh $TASK_ROOM ../slurk-bots/audio_video/user_permissions.json 1 | jq .id | sed 's/^"\(.*\)"$/\1/')
echo "User Token:"
echo $USER2
echo "Use the two tokens to log into slurk at localhost:5000, using either private browsing sessions or two different browsers."
| true
|
4cf0d5bdffe7a16e61c46697fa35b4d4043e2987
|
Shell
|
openshift/openshift-docs
|
/scripts/analytics_fix.sh
|
UTF-8
| 895
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ev
NC='\033[0m' # No Color
GREEN='\033[0;32m'
# Download analytics related files and move them to correct folders
echo -e "${GREEN}==== Download files with removed analytics ====${NC}"
wget https://raw.githubusercontent.com/openshift/openshift-docs/main/scripts/ocpdocs/_analytics_other.html -O _templates/_analytics_other.html
wget https://raw.githubusercontent.com/openshift/openshift-docs/main/scripts/ocpdocs/_footer_other.html.erb -O _templates/_footer_other.html.erb
wget https://raw.githubusercontent.com/openshift/openshift-docs/main/scripts/ocpdocs/_topnav_other.html -O _templates/_topnav_other.html
wget https://raw.githubusercontent.com/openshift/openshift-docs/main/scripts/ocpdocs/index-commercial.html -O index-commercial.html
wget https://raw.githubusercontent.com/openshift/openshift-docs/main/scripts/ocpdocs/search-commercial.html -O search-commercial.html
| true
|
655a635cf43615dcc094d9a2c6a42222c673e2e4
|
Shell
|
cloudcafetech/bridget
|
/bridget.sh
|
UTF-8
| 7,369
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
CNI_CONFIG="${CNI_CONFIG:-/etc/cni/net.d/10-bridget.conf}"
usage() {
cat <<EOF
Available variables:
- BRIDGE (example: cbr0)
- VLAN (example: 100)
- IFACE (example: eth0)
- MTU (default: 1500)
- CHECK_SLAVES (default: 1)
- POD_NETWORK (default: 10.244.0.0/16)
- DEBUG (default: 0)
Short workflow:
* If the bridge exists it will be used, otherwise it will be created
* If VLAN and IFACE are set, the following chain will be created:
IFACE <-- VLAN <-- BRIDGE
* IP-address will be set automatically. This IP-address
will be used as default gateway for containers
to make kubernetes services work.
EOF
}
error() {
>&2 echo -en "[$(date '+%Y-%m-%d %H:%M:%S')] ERROR:\t"
>&2 echo "$1"
>&2 usage
exit 1
}
log() {
echo -en "[$(date '+%Y-%m-%d %H:%M:%S')] INFO:\t"
echo "$1"
}
debug() {
if [ "${DEBUG:-0}" = 1 ]; then
>&2 echo -en "[$(date '+%Y-%m-%d %H:%M:%S')] DEBUG:\t"
>&2 echo "$1"
fi
}
next_ip() {
local IP_HEX=$(printf '%.2X%.2X%.2X%.2X\n' $(echo $1 | sed -e 's/\./ /g'))
local NEXT_IP_HEX=$(printf %.8X $(echo $((0x$IP_HEX + 1))))
local NEXT_IP=$(printf '%d.%d.%d.%d\n' $(echo $NEXT_IP_HEX | sed -r 's/(..)/0x\1 /g'))
echo $NEXT_IP
}
prev_ip() {
local IP_HEX=$(printf '%.2X%.2X%.2X%.2X\n' $(echo $1 | sed -e 's/\./ /g'))
local PREV_IP_HEX=$(printf %.8X $(echo $((0x$IP_HEX - 1))))
local PREV_IP=$(printf '%d.%d.%d.%d\n' $(echo $PREV_IP_HEX | sed -r 's/(..)/0x\1 /g'))
echo $PREV_IP
}
getnodecidr() {
CA_CERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
curl -sS -m 5 --cacert $CA_CERT -H "Authorization: Bearer $TOKEN" "https://${KUBERNETES_PORT#*//}/api/v1/nodes/$1" | jq -r .spec.podCIDR
}
# ------------------------------------------------------------------------------------
# Configure bridge
# ------------------------------------------------------------------------------------
log "Starting bridge configuration"
[ -z "$BRIDGE" ] && error "BRIDGE variable is not defined"
[ -z "$NODE_NAME" ] && error "NODE_NAME variable is not defined"
# Check if bridge interface exist
if ! ip link show "$BRIDGE" 1>/dev/null 2>/dev/null; then
log "Adding new bridge $BRIDGE"
ip link add dev "$BRIDGE" type bridge
export CHECK_SLAVES=1
else
log "Bridge $BRIDGE already exist, use it"
fi
log "Setting bridge $BRIDGE up"
ip link set "$BRIDGE" up
# ------------------------------------------------------------------------------------
# Configure vlan
# ------------------------------------------------------------------------------------
if ([ ! -z "$VLAN" ] || [ ! -z "$IFACE" ]) && [ "${CHECK_SLAVES:-1}" = 1 ]; then
log "Starting VLAN configuration"
[ -z "$IFACE" ] && error "IFACE variable is not defined"
if [ ! -z "$VLAN" ]; then
# check if vlan interface exist
if ip link show "$IFACE.$VLAN" 1>/dev/null 2>/dev/null; then
log "VLAN interface $IFACE.$VLAN already exist"
else
log "Adding new VLAN interface $IFACE.$VLAN"
ip link add link "$IFACE" name "$IFACE.$VLAN" mtu "${MTU}" type vlan id "$VLAN"
fi
log "Setting vlan $IFACE.$VLAN up"
ip link set dev "$IFACE.$VLAN" up
fi
fi
# ------------------------------------------------------------------------------------
# Configure slaves
# ------------------------------------------------------------------------------------
if ([ ! -z "$VLAN" ] || [ ! -z "$IFACE" ]) && [ "${CHECK_SLAVES:-1}" = 1 ]; then
log "Starting configuring slave interfaces"
if [ ! -z "$VLAN" ]; then
SLAVEIF="$IFACE.$VLAN"
else
SLAVEIF="$IFACE"
fi
if ! ip link show "$SLAVEIF" 1>/dev/null 2>/dev/null; then
error "$SLAVEIF does not exist"
fi
# check if slave interface contains right master
MASTERIF="$(ip -o link show "$SLAVEIF" | grep -o -m1 'master [^ ]\+' | cut -d' ' -f2)"
case "$MASTERIF" in
"$BRIDGE") log "$SLAVEIF already member of $BRIDGE" ;;
"" ) log "Adding $SLAVEIF as member to $BRIDGE"
ip link set "$SLAVEIF" master "$BRIDGE" ;;
* ) error "interface $SLAVEIF have another master" ;;
esac
fi
# ------------------------------------------------------------------------------------
# Retrive network parameters
# ------------------------------------------------------------------------------------
log "Starting retriving parameters"
POD_NETWORK="${POD_NETWORK:-10.244.0.0/16}"
NODE_NETWORK="$(getnodecidr "${NODE_NAME}")"
if [ -z "$NODE_NETWORK" ] || [ "$NODE_NETWORK" = "null" ]; then
error "Failed to get node cidr"
fi
set -e
export "POD_$(ipcalc -b "$POD_NETWORK")" # POD_BROADCAST
export "POD_$(ipcalc -p "$POD_NETWORK")" # POD_PREFIX
export "POD_$(ipcalc -n "$POD_NETWORK")" # POD_NETWORK
export "NODE_$(ipcalc -p "$NODE_NETWORK")" # NODE_PREFIX
export "NODE_$(ipcalc -b "$NODE_NETWORK")" # NODE_BROADCAST
export "NODE_$(ipcalc -n "$NODE_NETWORK")" # NODE_NETWORK
export "NODE_IP=$(next_ip "$NODE_NETWORK")" # NODE_IP
set +e
debug "POD_BROADCAST=$POD_BROADCAST"
debug "POD_PREFIX=$POD_PREFIX"
debug "POD_NETWORK=$POD_NETWORK"
debug "NODE_PREFIX=$NODE_PREFIX"
debug "NODE_BROADCAST=$NODE_BROADCAST"
debug "NODE_NETWORK=$NODE_NETWORK"
debug "NODE_IP=$NODE_IP"
# ------------------------------------------------------------------------------------
# Configure IP-address
# ------------------------------------------------------------------------------------
log "Configuring $NODE_IP/$POD_PREFIX on $BRIDGE"
ip -o addr show "$BRIDGE" | grep -o 'inet [^ ]\+' | while read _ IP; do
# Remove bridge addresses from the same subnet, don't touch other addresses
if [ "$(ipcalc -b "$IP")" = "BROADCAST=${POD_BROADCAST}" ] && [ "$IP" != "$NODE_IP/$POD_PREFIX" ]; then
ip addr del "$IP" dev "$BRIDGE"
fi
done
ip addr change "$NODE_IP/$POD_PREFIX" dev "$BRIDGE"
# ------------------------------------------------------------------------------------
# Configure cni
# ------------------------------------------------------------------------------------
log "Starting generating CNI configuration"
set -e
GATEWAY="${NODE_IP}"
SUBNET="${POD_NETWORK}/${POD_PREFIX}"
FIRST_IP="$(next_ip "${NODE_IP}")"
LAST_IP="$(prev_ip "${NODE_BROADCAST}")"
set +e
debug "GATEWAY=$GATEWAY"
debug "SUBNET=$SUBNET"
debug "FIRST_IP=$FIRST_IP"
debug "LAST_IP=$LAST_IP"
log "Writing $CNI_CONFIG"
cat >$CNI_CONFIG <<EOT
{
"name": "bridget",
"cniVersion": "0.2.0",
"type": "bridge",
"bridge": "${BRIDGE}",
"ipMasq": true,
"mtu": ${MTU:-1500},
"ipam": {
"type": "host-local",
"subnet": "${SUBNET}",
"rangeStart": "${FIRST_IP}",
"rangeEnd": "${LAST_IP}",
"gateway": "${GATEWAY}",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}
EOT
# Display config
cat "$CNI_CONFIG"
# ------------------------------------------------------------------------------------
# Sleep gently
# ------------------------------------------------------------------------------------
exec tail -f /dev/null
| true
|
3eea942a228cd996f38baa7caf40550d35bbc7a1
|
Shell
|
noirgif/CORDS
|
/systems/tikv/recover_all.sh
|
UTF-8
| 177
| 2.65625
| 3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
echo Recovering everything ...
for f in tidb-docker-compose/workdir/{pd,tikv}? ; do
sudo rm -rf $f/data
cp -r $f/data.snapshot $f/data
echo $(basename $f)
done
| true
|
05842ca7596dee895ef283628c52d5e5ea20f761
|
Shell
|
NNemec/DIYkit
|
/unchecked/ietl.diy
|
UTF-8
| 436
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
# DIYkit package: ???
NAME=ietl
VERSION=2.2
DISTFILE=${NAME}-${VERSION}.tar.gz
MASTER_SITE=http://www.comp-phys.org/software/${NAME}
ARCHIVE_URL=${MASTER_SITE}/${DISTFILE}
. $DIYkit/DIYkit-include.sh
DEPENDS_ON atlas
DOWNLOAD_ARCHIVE
UNPACK_ARCHIVE
cd $SRCDIR
./configure --prefix $PREFIX --with-blitz=$PREFIX --with-boost=$PREFIX/include/boost-1_32 --with-atlas-dir=$HOME/lib/atlas
make
make install
| true
|
2c2527309912287922e22635eb0ed7c64e1cbe19
|
Shell
|
mineiro25/matrixmult-analysis
|
/code-cuda/search.sh
|
UTF-8
| 561
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -N AA_CUDA
#PBS -l walltime=03:00:00
#PBS -l nodes=1:r662:k20:ppn=48
source /share/apps/intel/parallel_studio_xe_2019/compilers_and_libraries_2019/linux/bin/compilervars.sh intel64
module purge
module load papi/5.4.1
module load gcc/7.2.0
module load cuda/7.0.28
export CUDA=yes
make
StringVal = "dotProductCUDA dotProductBlockCUDA useCUDA"
for i in 32 128 1024 2048; do
for func in $StringVal; do
echo "Size=" $i >> "resultadosCUDA.csv"
echo "\nFunction= " $func >> "resultadosCUDA.csv"
./bin/simple $func $i time
done
done
| true
|
b7de54e649e3c312ede462ce8a50f2a1995df95d
|
Shell
|
zalkdo/vagrant
|
/centos/scripts/install.sh
|
UTF-8
| 806
| 2.671875
| 3
|
[] |
no_license
|
echo "yum updating......."
sudo yum update
echo "ssh PasswordAuthentication enable"
sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
sudo systemctl restart sshd
echo "docker installing......"
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce docker-ce-cli containerd.io
sudo systemctl start docker
sudo systemctl enable docker
sudo usermod -aG docker vagrant
echo "k3s installing......."
curl -sfL https://get.k3s.io | sh -
sudo cp -i /etc/rancher/k3s/k3s.yaml ~/.kube
mv ~/.kube/k3s.yaml ~/.kube/config-k3s
export KUBECONFIG=$HOME/.kube/config-k3s:$HOME/.kube/config:$KUBECONFIG
echo "k3s restart......."
sudo systemctl daemon-reload
sudo systemctl restart k3s
| true
|
ac6245a29e833776297e3b632c1dee39faeb7c2e
|
Shell
|
renantmagalhaes/workstation
|
/utils/1password/install-1password.sh
|
UTF-8
| 1,213
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
check_cmd() {
command -v "$1" 2> /dev/null
}
# Add the repository key with either wget or curl
if check_cmd apt-get; then # FOR DEB SYSTEMS
wget https://downloads.1password.com/linux/debian/amd64/stable/1password-latest.deb -O /tmp/1password-latest.deb
sudo dpkg -i /tmp/1password-latest.deb
sudo apt-get -f install
elif check_cmd dnf; then # FOR RPM SYSTEMS
wget https://downloads.1password.com/linux/rpm/stable/x86_64/1password-latest.rpm -O /tmp/1password-latest.rpm
sudo rpm -i /tmp/1password-latest.rpm
elif check_cmd zypper; then # FOR SUSE SYSTEMS
wget https://downloads.1password.com/linux/rpm/stable/x86_64/1password-latest.rpm -O /tmp/1password-latest.rpm
sudo rpm -i /tmp/1password-latest.rpm
else
echo "Not able to identify the system"
exit 0
fi
# Fix vivaldi
sudo mkdir -p /etc/1password
sudo cp custom_allowed_browsers /etc/1password/custom_allowed_browsers
sudo chown root:root /etc/1password/custom_allowed_browsers && sudo chmod 755 /etc/1password/custom_allowed_browsers
# Auto start
bash -c 'cat << EOF > ~/.config/autostart/1password.desktop
[Desktop Entry]
Type=Application
Name=1password
Exec=/usr/bin/1password --silent
EOF'
| true
|
e0a19f4e0086ec011f3860267694c8aabf4dbd74
|
Shell
|
pklimov10/CMauto
|
/zabbix/verssion kontrok/ver.sh
|
UTF-8
| 821
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#Путь до WildflyHome например /opt/wildfly
WFHOME=/u01/CM/wildfly
#логин пользователя
login=irshk
pass=1
#Урл сервера
b=1
in=(SGO-SED-REP101
SGO-SED-AP102
SGO-SED-TECH102
SGO-SED-MRM102
SGO-SED-AP101
SGO-SED-MRM101
SGO-SED-AP107
SGO-SED-AP108
SGO-SED-VIP101
SGO-SED-AP109
SGO-SED-AP103
SGO-SED-AP105
SGO-SED-AP104
SGO-SED-AP106
SGO-SED-TECH101)
for ADDR in ${in[*]}
do
ver=`curl -u $login:$pass $ADDR:8080/ssrv-war/api/cmj-info --silent |grep -A 1 '<br>Version :' |grep -v '<br>Version :'`
echo $ver >> /u01/CM/Error/tmp_zabbix_log.txt
done
cat /u01/CM/Error/tmp_zabbix_log.txt | sort | uniq -cd |wc -l > /u01/CM/Error/tmp_zabbix_log_wc.txt
a=`cat /u01/CM/Error/tmp_zabbix_log_wc.txt`
if [ "$a" -eq "$b" ]
then
echo 0
else
echo 1
fi
| true
|
97b3f1bbc7367924b7c2a611385a7b68cdb0c760
|
Shell
|
Fennec89/PHS-scripts
|
/bash/advanced.sh
|
UTF-8
| 701
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#########################
# Author: Gustav Fahlén #
# Date: 2012-09-28 #
# @: PHS #
#########################
folder=$1
target=$2
if [ $2 ];then
arr=`find ${folder} -iname '*.avi' | cut -d'/' -f5`
for line in ${arr}
do
echo ${line}
mkdir -p "${target}${line}"
length=`mplayer -vo null -ao null -identify -frames 0 ${folder}${line} 2> /dev/null | grep 'ID_LENGTH' | cut -d'=' -f2`
len=`echo ${length} | awk '{printf("%d\n",$1 + 0.5)}'`
mid=$((${len}/2))
mplayer -vo jpeg -ao null -frames 1 -ss ${mid} ${folder}${line}
mv *.jpg "${line}/${line}.jpg"
done
else
echo "You must select a target dir!"
fi
| true
|
1606db30f1a49cb7fc3697ac5c890c4c828a9024
|
Shell
|
handicraftsman/wunderlistux
|
/bin/install.sh
|
UTF-8
| 1,067
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
ARCH=`uname -m`
KERNEL=`uname -s`
if [[ $EUID -ne 0 ]]; then
echo 'You need to run installer as root!' 2>&1
exit 1
fi
if [ $KERNEL = 'Linux' ] && [ $ARCH = 'x86_64' ]; then
URL='https://github.com/edipox/wunderlistux/archive/0.0.5-linux-x64.tar.gz'
else
URL='https://github.com/edipox/wunderlistux/archive/0.0.4-linux-ia32.tar.gz'
fi
# download builded application and extract in /opt directory
function install {
echo 'Downloading package...'
wget -q -O /tmp/wunderlistux.tar.gz $URL
echo 'Extracting package...'
mkdir /opt/wunderlistux
tar -zxf /tmp/wunderlistux.tar.gz -C /opt/wunderlistux --strip-components=1
}
# fix paths in .desktop file
function fix_desktop {
sed -i 's/path\/to\/Wunderlistux-linux-x64/opt\/wunderlistux/g' /opt/wunderlistux/wunderlistux.desktop
}
# link .desktop file to all users
function create_desktop {
echo 'Creating shortcut...'
ln -s /opt/wunderlistux/wunderlistux.desktop /usr/share/applications/
}
install
fix_desktop
create_desktop
echo 'Wunderlistux has been installed successfully :)'
| true
|
1fa9e560d6a3e6a8715c5dc3b1dbc41ac4eac2ca
|
Shell
|
vash15/magick-installer
|
/magick-installer.sh
|
UTF-8
| 2,616
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
function download() {
url=$1
base=$(basename $1)
if [[ ! -e $base ]]; then
echo "curling $url"
curl -O -L $url
fi
}
# mkdir magick-installer
cd magick-installer
download http://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.14.tar.gz
download http://download.savannah.gnu.org/releases/freetype/freetype-2.5.3.tar.gz
download http://freefr.dl.sourceforge.net/project/libpng/libpng16/1.6.12/libpng-1.6.12.tar.gz
download http://www.ijg.org/files/jpegsrc.v9.tar.gz
download http://download.osgeo.org/libtiff/tiff-4.0.3.tar.gz
download http://downloads.sourceforge.net/lcms/lcms2-2.5.tar.gz
# download http://downloads.ghostscript.com/public/ghostscript-9.10.tar.gz
download https://ghostscript.googlecode.com/files/ghostscript-fonts-std-8.11.tar.gz
download http://www.imagemagick.org/download/ImageMagick-6.8.9-5.tar.gz
#
# Compile under /opt/local
#
tar xzvf libiconv-1.14.tar.gz
cd libiconv-1.14
cd libcharset
./configure --prefix=/opt/local
make
sudo make install
cd ../..
tar xzvf freetype-2.5.3.tar.gz
cd freetype-2.5.3
./configure --prefix=/opt/local
make clean
make
sudo make install
cd ..
tar xzvf libpng-1.6.12.tar.gz
cd libpng-1.6.12
./configure --prefix=/opt/local
make clean
make
sudo make install
cd ..
tar xzvf jpegsrc.v9a.tar.gz
cd jpeg-9a
./configure --enable-shared --prefix=/opt/local
make clean
make
sudo make install
cd ..
tar xzvf tiff-4.0.3.tar.gz
cd tiff-4.0.3
./configure --prefix=/opt/local
make clean
make
sudo make install
cd ..
tar xzvf lcms2-2.5.tar.gz
cd lcms2-2.5
./configure
make clean
make
sudo make install
cd ..
# Does not work in OS X
# tar zxvf ghostscript-9.10.tar.gz
# cd ghostscript-9.10
# ./configure --prefix=/opt/local
# make clean
# make
# sudo make install
# cd ..
tar zxvf ghostscript-fonts-std-8.11.tar.gz
sudo mkdir -p /opt/local/share/ghostscript/fonts
sudo mv -f fonts/* /opt/local/share/ghostscript/fonts
tar xzvf ImageMagick-6.8.9-5.tar.gz
cd ImageMagick-6.8.9-5
export CPPFLAGS=-I/opt/local/include
export LDFLAGS=-L/opt/local/lib
./configure --prefix=/opt/local --disable-static --without-fontconfig --with-modules --without-perl --without-magick-plus-plus --with-quantum-depth=8 --with-gs-font-dir=/opt/local/share/ghostscript/fonts --disable-openmp
make clean
make
sudo make install
cd ..
sudo ln -s /opt/local/include/ImageMagick/wand /opt/local/include/wand
sudo ln -s /opt/local/include/ImageMagick/magick /opt/local/include/magick
# I have problem width convert launch width daemon or ngix. Ad example: convert test.ai test.png
sudo ln -s /opt/local/bin/gs /usr/bin/gs
echo "ImageMagick successfully installed!"
| true
|
17c376eac59ca5a9af8f637aaa6f69bc67533873
|
Shell
|
gassara-kys/aws-check-cli
|
/util/convert_admin_json2csv.sh
|
UTF-8
| 416
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash -eu
DIR_NAME=$(dirname $0)
# column
echo "user_arn, user_name, access_key_id_1, access_key_id_2, has_user_admin, has_grorup_admin, enable_permission_boundory" > ${DIR_NAME}/admin.csv
# convert by jq
cat ${DIR_NAME}/admin.json | jq -r '.[][] | [.user_arn, .user_name, .access_key_id[0], .access_key_id[1], .has_user_admin, .has_grorup_admin, .enable_permission_boundory]|@csv' >> ${DIR_NAME}/admin.csv
| true
|
a2bcc284deae8d2426060b38bb79b99a11d14cb5
|
Shell
|
yagamy4680/yapps-scripts
|
/.bashrc
|
UTF-8
| 606
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
function setup_prompt_yac {
local CONFIG=$1
local PROFILE=$(cat ${CONFIG} | grep -P "^profile\t" | awk '{print $2}')
local BOARD_PROFILE_ENV=$(cat ${CONFIG} | grep -P "^profile_env\t" | awk '{print $2}')
local BOARD_PROFILE_VERSION=$(cat ${CONFIG} | grep -P "^profile_version\t" | awk '{print $2}')
local BOARD_SERIAL_NUMBER=$(cat ${CONFIG} | grep -P "^sn\t" | awk '{print $2}')
export PS1="${BOARD_SERIAL_NUMBER} \[\e[36m\]${BOARD_PROFILE_ENV}\[\e[m\]#\[\e[35m\]${BOARD_PROFILE_VERSION}\[\e[m\] ${PS1}"
cd /${PROFILE}/current/logs
}
[ -f "/tmp/ttt_system" ] && setup_prompt_yac "/tmp/ttt_system"
| true
|
30be4e022f605d205cc085a1ffe6f5df2a9761ca
|
Shell
|
pomadchin/geotrellis-pdal-benchmark
|
/scripts/emr/bootstrap-pdal.sh
|
UTF-8
| 2,931
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This scripts bootstraps each node in the the EMR cluster to install PDAL.
# Ensure that Spark knows where to find things.
# sudo aws s3 cp s3://geotrellis-test/pdal-test/environment /etc/environment
# Install minimal explicit dependencies.
sudo yum -y install git geos-devel libcurl-devel cmake libtiff-devel
# laz-perf
cd /mnt
git clone https://github.com/verma/laz-perf.git laz-perf
cd laz-perf
cmake .
make
sudo make install
# laszip
cd /mnt
git clone https://github.com/LASzip/LASzip.git laszip
cd laszip
git checkout e7065cbc5bdbbe0c6e50c9d93d1cd346e9be6778 # Yes this is necessary. See https://github.com/PDAL/PDAL/issues/1205
cmake .
make
sudo make install
# proj4
cd /mnt
wget https://github.com/OSGeo/proj.4/archive/4.9.3.zip
unzip 4.9.3.zip
cd proj.4-4.9.3
cmake .
make
sudo make install
# libgeotiff
cd /mnt
wget http://download.osgeo.org/geotiff/libgeotiff/libgeotiff-1.4.2.zip
unzip libgeotiff-1.4.2.zip
cd libgeotiff-1.4.2
cmake .
make
sudo make install
# jsoncpp
cd /mnt
wget https://github.com/open-source-parsers/jsoncpp/archive/1.7.7.zip
unzip 1.7.7.zip
cd jsoncpp-1.7.7
cmake . -DBUILD_SHARED_LIBS=ON # Need BUILD_SHARED_LIBS or pdal fails.
make
sudo make install
# Compile/install GDAL
cd /mnt
git clone https://github.com/OSGeo/gdal.git
cd gdal/gdal
./configure
make
sudo make install
# Compile/install PDAL
cd /mnt
git clone https://github.com/pomadchin/PDAL.git pdal
cd pdal
git checkout feature/pdal-jni
cmake . -DWITH_APPS=ON -DWITH_LAZPERF=ON -DWITH_GEOTIFF=ON -DWITH_LASZIP=ON -DCMAKE_BUILD_TYPE=Release
make -j4
sudo make install
# Compile the JNI bindings ourselves.
cd /mnt/pdal/java
./sbt native/nativeCompile
sudo cp /mnt/pdal/java/native/target/native/x86_64-linux/bin/libpdaljni.1.4.so /usr/local/lib/
# load demo data
# hadoop fs -mkdir -p whitestare/test/lidar
# hdfs dfs -cp -p s3n://5827c3f4-ab3e-11e6-b689-3c15c2ddc9be/GRM_Lidar/Lidar_201609/Classified_LAS/ whitestare/test/lidar/
# dfs dfs -ls whitestare/test/lidar/Classified_LAS
# hadoop fs -mkdir -p whitestare/test/lidar && hdfs dfs -cp -p s3n://5827c3f4-ab3e-11e6-b689-3c15c2ddc9be/GRM_Lidar/Lidar_201609/Classified_LAS/ whitestare/test/lidar/
# hadoop fs -mkdir -p whitestare/test/lidar1/Classified_LAS && hdfs dfs -cp whitestare/test/lidar/Classified_LAS/Goonyella_14B_20160913_AMG66z55.las whitestare/test/lidar1/Classified_LAS/
# Copy prebuilt JNI bindings from S3.
# cd /mnt
# aws s3 cp s3://geotrellis-test/pdal-test/geotrellis-pdal-assembly-0.1.0-SNAPSHOT.jar /tmp/geotrellis-pdal-assembly-0.1.0-SNAPSHOT.jar
# sudo aws s3 cp s3://geotrellis-test/pdal-test/libpdaljni.1.4.so /usr/local/lib/libpdaljni.1.4.so
# spark-submit --conf spark.driver.extraJavaOptions="-Djava.library.path=/usr/local/lib/" --conf spark.executor.extraJavaOptions="-Djava.library.path=/usr/local/lib/" --class com.azavea.PackedPointCount /tmp/geotrellis-pdal-assembly-0.1.0-SNAPSHOT.jar whitestare/test/lidar/Classified_LAS/
| true
|
01e556e9c0bee8ab6e2316844b7fb837b296fda5
|
Shell
|
maxfortun/PoolController
|
/bin/pool.cgi
|
UTF-8
| 2,686
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'Content-type: text/html'
echo
SWD=$(dirname $(readlink -f $0))
pumpStatus=$($SWD/pump/status.sh)
pumpAction=$($SWD/pump/actions.sh "$pumpStatus")
drainStatus=$($SWD/drain/status.sh)
drainAction=$($SWD/drain/actions.sh "$drainStatus")
solarStatus=$($SWD/solar_heater/status.sh)
solarAction=$($SWD/solar_heater/actions.sh "$solarStatus")
fillStatus=$($SWD/fill/status.sh)
fillAction=$($SWD/fill/actions.sh "$fillStatus")
fillDescription=$($SWD/fill/description.sh "$fillStatus" | sed -z 's#\n#<br/>#g')
OFS=$IFS IFS="&" PARAMS=($QUERY_STRING) IFS=$OFS
for PARAM in $PARAMS; do
OFS=$IFS IFS== PARAM=($PARAM) IFS=$OFS
export QUERY_${PARAM[0]}=${PARAM[1]}
done
cat <<_EOT_
<html>
<head>
<meta name="viewport" content="width=device-width, initial-scale=1" />
<script>
window.addEventListener('load', (e) => {
window.isRefreshed = true;
console.log("load", window.isRefreshed);
});
window.addEventListener('pageshow', (e) => {
console.log("pageshow", window.isRefreshed);
if(!window.isRefreshed) {
//window.location.reload();
}
});
window.addEventListener('pagehide', (e) => {
window.isRefreshed = false;
console.log("pagehide", window.isRefreshed);
});
window.addEventListener('blur', (e) => {
window.isRefreshed = false;
console.log("blur", window.isRefreshed);
});
</script>
<title>Pool Control</title>
</head>
<body>
<center>
<h1>Pool Controller</h1>
_EOT_
action="$SWD/$QUERY_action.sh"
if [ -x "$action" ]; then
cat <<_EOT_
<h2>Executing $action - please wait.</h2>
<script>
location.href = location.href.replace(/\?.*$/g,"");
</script>
_EOT_
$action >/dev/null 2>/dev/null &
fi
cat <<_EOT_
<table>
<tr>
<th></th>
<th>Status</th>
<th>Actions</th>
<th>Description</th>
</tr>
<tr>
<th>Pump</th>
<td>$pumpStatus</td>
<td>
_EOT_
for action in $pumpAction; do
echo " <a href="?action=pump/$action">$action</a>"
done
cat <<_EOT_
</td>
</tr>
<tr>
<th>Drain</th>
<td>$drainStatus</td>
<td>
_EOT_
for action in $drainAction; do
echo " <a href="?action=drain/$action">$action</a>"
done
cat <<_EOT_
</td>
</tr>
<tr>
<th>Solar heater</th>
<td>$solarStatus</td>
<td>
_EOT_
for action in $solarAction; do
echo " <a href="?action=solar_heater/$action">$action</a>"
done
cat <<_EOT_
</td>
</tr>
<tr>
<th>Fill</th>
<td>$fillStatus</td>
<td>
_EOT_
for action in $fillAction; do
echo " <a href="?action=fill/$action">$action</a>"
done
cat <<_EOT_
</td>
<td>$fillDescription</td>
</tr>
</table>
</center>
</body>
</html>
_EOT_
| true
|
9e45a557174bf8e28466577c6bfb1a361894f8a2
|
Shell
|
timesong/nginx-rp
|
/rpu
|
UTF-8
| 1,299
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Create proxy.conf."
tmpl='\t\tlocation /<svc> {\n\t\t\tproxy_pass http://<svc>:<port>/;\n\t\t\tproxy_http_version 1.1;\n\t\t\tproxy_set_header Upgrade $http_upgrade;\n\t\t\tproxy_set_header Connection $connection_upgrade;\n\t\t}'
keyword=`printenv NP_SVC_KWD`
addrs=`printenv NP_SVC_ADDRS`
old_svcs=""
while :
do
for addr in ${addrs//,/ }
do
data=`curl --silent http://$addr/services`
svcs=""
for s in `echo $data|jq -r -C '.[] | .Spec | select(.Name|test("'"${keyword}"'")) | .Name' | sort`
do
ping -q -c1 $s > /dev/null
if [ $? -eq 0 ]
then
svcs="$svcs $s"
fi
done
if [ -z "$svcs" ]
then
echo "Waiting for next feching..."
sleep 10s
continue
fi
if [ "$old_svcs" != "$svcs" ]
then
echo "" > $1proxy.conf
for item in `echo $data | jq -r -C '.[] | .["Spec"] | select(.Name|test("'"${keyword}"'")) | [.Name, {"p": [.EndpointSpec.Ports[].TargetPort] | sort}]|[.[0], .[1].p[0]|tostring]| join(",")'`
do
# echo $item
v1=`echo $item|cut -d , -f 1`
v2=`echo $item|cut -d , -f 2`
new=${tmpl//<svc>/${v1}}
new=${new//<port>/${v2}}
echo -e $new
echo -e $new >> $1proxy.conf
done
echo "Reloading..."
nginx -s reload
old_svcs=$svcs
break
fi
done
echo "Waiting..."
sleep 10s
done
| true
|
bef444d05f086e1597cad7c259d870859433c163
|
Shell
|
anderssandstrom/ecmc_bifrost_vac_tank_sat
|
/tests/scripts/ecmcScaleOffsetLines.bash
|
UTF-8
| 751
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Scale and offset data in a camonitor text line (scale first then offset)
#
# Arg 1 Optional file name, otherwise stdin
# Arg 2 Scale value
# Arg 3 Offset value
#
# Return Complete camonitor lines scaled (with PV name and timestamp).
#
# Example:Scale all lines from stdin with 10
# bash ecmcScaleLines.bash 10
#
# Author: Anders Sandström, anders.sandstrom@esss.se
#
ARGOK=0
if [ "$#" -eq 2 ]; then
FILE="-"
SCALE=$1
OFFSET=$2
ARGOK=1
fi
if [ "$#" -eq 3 ]; then
FILE=$1
SCALE=$2
OFFSET=$3
ARGOK=1
fi
if [ "$ARGOK" -ne 1 ]; then
echo "ecmcScaleOffsetLines: Wrong arg count..."
exit 1
fi
DATA=$(cat ${FILE} | awk -v CONVFMT=%.17g -v scale=${SCALE} -v offset=${OFFSET} '{$NF*=scale;$NF+=offset; print $0}')
echo "${DATA}"
| true
|
c3e647b7f3ae0b8bc55ba166a83134f92c5eaf03
|
Shell
|
anecsulea/LncEvoDevo
|
/gene_overlaps/extract.intergenic.regions.sh
|
UTF-8
| 1,113
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
export sp=$1
export annot=$2
#####################################################################
export path=LncEvoDevo
export pathEnsembl=${path}/data/ensembl_annotations/${sp}
export pathStringTie=${path}/results/stringtie_assembly/${sp}/combined
export pathResults=${path}/results/gene_overlaps/${sp}
export pathScripts=${path}/scripts/gene_overlaps
export release=94
#####################################################################
if [ ${annot} = "Ensembl" ]; then
export pathAnnot=${pathEnsembl}
export filename=FilteredTranscripts_Ensembl${release}
fi
if [ ${annot} = "StringTie" ]; then
export pathAnnot=${pathStringTie}
export filename=FilteredTranscripts_StringTie_Ensembl${release}
fi
#####################################################################
perl ${pathScripts}/extract.intergenic.regions.pl --pathExonBlocks=${pathAnnot}/ExonBlocks_${filename}.txt --minDistance=5000 --minSize=1000 --pathOutput=${pathAnnot}/IntergenicRegions_MinDistance5kb_MinSize1kb_${filename}.txt
#####################################################################
| true
|
7d87444472721db6e4328c200326357c43973427
|
Shell
|
snabbco/snabb
|
/src/program/wall/tests/bench.sh
|
UTF-8
| 863
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# A benchmark script for 'snabb wall filter'
#
# Run this script with three arguments. The first two are the PCI addresses for the
# two NICs that are connected. The third is the CPU to pin the filter.
PCI1=$1
PCI2=$2
CPU=$3
DURATION=10
ITERS=10
echo "Firewall on $PCI1 (CPU $CPU). Packetblaster on $PCI2."
function benchmark {
output=`mktemp`
echo "BENCH ($1, $ITERS iters, $DURATION secs)"
for (( i=1; i<=$ITERS; i++ ))
do
# run the filter
./snabb wall filter --cpu $CPU -p -e "{ BITTORRENT = 'drop', default = 'accept' }" -D $DURATION -4 192.168.0.1 -m "01:23:45:67:89:ab" pci $PCI1 > $output &
# blast with pcap traffic
./snabb packetblaster replay -D $DURATION program/wall/tests/data/$1 $PCI2 > /dev/null
grep "bytes:.*packets:.*bps:" $output
done
}
benchmark BITTORRENT.pcap
benchmark rtmp_sample.cap
| true
|
9df28d875e073efa005bd17a8d58b7c913a8dbe1
|
Shell
|
DasJott/ScreenRotate
|
/toggleScreenRotation.sh
|
UTF-8
| 484
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
touchpad=$(xinput | grep "SYNA2B23:00 06CB:2714" | sed 's/^.*id=\([0-9]*\).*$/\1/')
keyboard=$(xinput | grep "AT Translated Set 2 keyboard" | sed 's/^.*id=\([0-9]*\).*$/\1/')
current=$(xrandr -q --verbose | grep "eDP1" | cut -d ' ' -f6)
if [ $current = "normal" ];
then
xrandr --output eDP1 --rotate inverted
xinput disable $keyboard
xinput disable $touchpad
else
xrandr --output eDP1 --rotate normal
xinput enable $keyboard
xinput enable $touchpad
fi
exit 0
| true
|
b6f78adb2f557923317943c8a78d8ff234168ae4
|
Shell
|
wso2/testgrid-jenkins-library
|
/scripts/deployment-handler.sh
|
UTF-8
| 5,180
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# -------------------------------------------------------------------------------------
# Copyright (c) 2022 WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
#
# WSO2 Inc. licenses this file to you under the Apache License,
# Version 2.0 (the "License"); you may not use this file except
# in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# --------------------------------------------------------------------------------------
deploymentName=$1; shift
cloudformationFileLocations=$@
cloudformationFileLocations=$(echo $cloudformationFileLocations | tr -d '[],')
cloudformationFileLocations=(`echo $cloudformationFileLocations | sed 's/,/\n/g'`)
currentScript=$(dirname $(realpath "$0"))
deploymentDirectory="${WORKSPACE}/deployment/${deploymentName}"
parameterFilePath="${deploymentDirectory}/parameters.json"
outputFile="${deploymentDirectory}/deployment.properties"
source ${currentScript}/common-functions.sh
product=$(extractParameters "Product" ${parameterFilePath})
testType=$(extractParameters "TestType" ${parameterFilePath})
echo "-----------"
echo "Deployment Directory: "${deploymentDirectory}
echo "CloudFormation Locations: "${cloudformationFileLocations[*]}
echo "-----------"
function cloudformationValidation() {
## Validate the CFN file before deploying
for cloudformationFileLocation in ${cloudformationFileLocations[@]}
do
log_info "Validating cloudformation script ${cloudformationFileLocation}!"
cloudformationResult=$(aws cloudformation validate-template --template-body file://${cloudformationFileLocation})
if [[ $? != 0 ]];
then
log_error "Cloudformation Template Validation failed!"
bash ${currentScript}/post-actions.sh ${deploymentName}
exit 1
else
log_info "Cloudformation template is valid!"
fi
done
}
# The output locations in S3 bucket will be created seperately for each deployment
# Therefore the output location which was written at the beginning will be changed
function changeCommonLogPath(){
local s3OutputBucketLocation=$(extractParameters "S3OutputBucketLocation" ${parameterFilePath})
local stackName=$(extractParameters "StackName" ${parameterFilePath})
local deployementLogPath="${s3OutputBucketLocation}/${stackName}/test-outputs"
updateJsonFile "S3OutputBucketLocation" ${deployementLogPath} ${parameterFilePath}
}
function cloudformationDeployment(){
log_info "Executing product specific deployment..."
log_info "Running ${product} deployment.."
if [[ ${testType} == "intg" ]];
then
if [[ ${product} == "wso2am" ]];
then
bash ${currentScript}/apim/intg/intg-deploy.sh ${deploymentName} ${cloudformationFileLocations[@]}
else
bash ${currentScript}/${product}/intg/intg-deploy.sh ${deploymentName} ${cloudformationFileLocations[@]}
fi
else
bash ${currentScript}/${product}/deploy.sh ${deploymentName} ${cloudformationFileLocations[@]}
fi
if [[ $? != 0 ]];
then
# If deployment fails the handler should also fail
exit 1
fi
}
function writeCommonVariables(){
extractRequired=$3
if [[ ${extractRequired} = true ]];
then
getVariable=$1
variableName=$2
variableValue=$(extractParameters $getVariable ${parameterFilePath})
else
variableName=$1
variableValue=$2
fi
outputEntry="${variableName}=${variableValue}"
echo "${outputEntry}" >> ${outputFile}
}
function addCommonVariables(){
writeCommonVariables "S3OutputBucketLocation" "S3OutputBucketLocation" true
writeCommonVariables "Product" "Product" true
writeCommonVariables "ProductVersion" "ProductVersion" true
writeCommonVariables "WUMUsername" "WUMUsername" true
writeCommonVariables "WUMPassword" "WUMPassword" true
writeCommonVariables "GithubUserName" "GithubUserName" true
writeCommonVariables "GithubPassword" "GithubPassword" true
writeCommonVariables "ProductRepository" "ProductRepository" true
writeCommonVariables "ProductTestBranch" "ProductTestBranch" true
writeCommonVariables "ProductTestScriptLocation" "ProductTestScriptLocation" true
writeCommonVariables "S3AccessKeyID" "s3accessKey" true
writeCommonVariables "S3SecretAccessKey" "s3secretKey" true
writeCommonVariables "TESTGRID_EMAIL_PASSWORD" "testgridEmailPassword" true
writeCommonVariables "CustomURL" "CustomURL" true
writeCommonVariables "UpdateType" "UpdateType" true
writeCommonVariables "TestType" "TestType" true
writeCommonVariables "SurefireReportDir" "SurefireReportDir" true
}
function main(){
changeCommonLogPath
cloudformationValidation
cloudformationDeployment
addCommonVariables
}
main
| true
|
9cd46b29ef194bc64f42932a49cf66892e59204d
|
Shell
|
gorkinovich/UNIX
|
/UPSAM/Practica-4.3b.sh
|
UTF-8
| 349
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#####################################################################
DIR="$HOME/uptime_history"
echo "Numero de saltos de tiempo: "
awk '
{
if (NR == 1)
{
ant = $7
}
else
{
if (($7 - ant) > 6)
{
printf ("Salto (%d): %f -> %f\n", NR, ant, $7)
}
ant = $7
}
}' $DIR #|wc -l
exit 0
| true
|
917d7c255b7121191b4f3d15fdd3f7c98ffd3219
|
Shell
|
Sword-Smith/EggsML
|
/concieggs/cmds/ugrundlov
|
UTF-8
| 281
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Giv noget Markov-kædet grundlov. Brug: ugrundlov [ANTAL ORD]
antal_ord="$1"
if ! [ "$antal_ord" ]; then
antal_ord=40;
fi
tmpfile=$(mktemp)
echo -n "§$(random 1 89), stk. $(random 1 5). "
markov "$antal_ord" grundlov "$CONCIEGGS_DB_DIR/grundloven/grundlov-markov-ready" | withSpaces
| true
|
9f5e44aff8ff8f870ca90db67c92a884703b37cf
|
Shell
|
gokul-sarath07/CodinClub-BootCamp
|
/day8_assignment/month.sh
|
UTF-8
| 847
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
i=0
declare -A month
while [[ $i -lt 50 ]]
do
check=$(( $((RANDOM%12)) + 1 ))
if [[ $check -eq 1 ]]
then
month[$i]="January"
elif [[ $check -eq 2 ]]
then
month[$i]="February"
elif [[ $check -eq 3 ]]
then
month[$i]="March"
elif [[ $check -eq 4 ]]
then
month[$i]="April"
elif [[ $check -eq 5 ]]
then
month[$i]="May"
elif [[ $check -eq 6 ]]
then
month[$i]="June"
elif [[ $check -eq 7 ]]
then
month[$i]="July"
elif [[ $check -eq 8 ]]
then
month[$i]="August"
elif [[ $check -eq 9 ]]
then
month[$i]="September"
elif [[ $check -eq 10 ]]
then
month[$i]="October"
elif [[ $check -eq 11 ]]
then
month[$i]="November"
else
month[$i]="December"
fi
((i++))
done
printf '%s\n' "${month[@]}" | sort | uniq -c | sort -nr
| true
|
4b36592fcbd809b1f5a1baaec7f7d21806f3fd3f
|
Shell
|
BucknellFilmSearch/FilmMediaDatabase
|
/scripts/remote/deploy.sh
|
UTF-8
| 1,343
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
colorize() {
echo -e "$(tput setaf $2)$1$(tput sgr0)";
}
colorize() {
echo -e "$(tput setaf $2)$1$(tput sgr0)";
}
#put me in the right dir
pushd $(dirname $0)/../../ &> /dev/null
rm -rf build
pushd src/server/ &> /dev/null
build_tool=''
# Check for either npm or yarn
type yarn &> /dev/null
if [[ $? != 0 ]]; then
type npm &> /dev/null
if [[ $? != 0 ]]; then
# Ask user to install yarn or npm if neither is found
echo $(colorize "✘ Please install https://yarnpkg.com/en/ or https://www.npmjs.com/ and then run this script again..." 1)
exit 1
else
build_tool='npm'
echo $(colorize '✔ Found npm install:' 2) $(npm --version)
fi
else
build_tool='yarn'
echo $(colorize '✔ Found yarn install:' 2) $(yarn --version)
fi
# Run the build, and install libraries
echo "Running build script with $build_tool"
$build_tool run build \
&& popd &> /dev/null
user=$(./scripts/utils/read.py configuration/ec2/config.json user)
addr=$(./scripts/utils/read.py configuration/ec2/config.json addr)
# Deploy files using ssh and tar
echo "Copying to remote"
tar -zcf - ./build | ssh -i configuration/ec2/default_cred.pem $user@$addr 'sudo tar -zxf - --no-same-owner -C /var/www --strip-components=2; cd /var/www; npm install; pm2 startOrRestart pm2_config.yml'
# success
popd &> /dev/null
exit 0
| true
|
547395f1ee7b51a3a2163c06929f94319e356ad4
|
Shell
|
mavelyc/bash-scripts
|
/lab3/minor3_2
|
UTF-8
| 138
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 4 ]
then
echo "My guy, what's with the constant brickage"
exit
fi
egrep "$3|$4" $1 > $2
echo "done" >> $2
| true
|
f315c88fa85e080f180227e537e53b377623f865
|
Shell
|
jagguli/arch-ppa
|
/src/solaar-git/PKGBUILD
|
UTF-8
| 1,008
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
# Maintainer: Maxime Poulin <maxpoulin64@gmail.com>
# Contributor: Arnaud Taffanel <dev@taffanel.org>
# Contributor: Victor Häggqvist <victor@snilius.com>
pkgname=solaar-git
pkgver=r985.b852903
pkgrel=1
pkgdesc="Linux devices manager for the Logitech Unifying Receiver."
arch=('any')
url="http://pwr.github.io/Solaar/"
license=('GPL2')
depends=('python' 'python-pyudev' 'python-gobject' 'pygtk' 'python-six')
optdepends=('libappindicator-gtk3')
makedepends=('git')
provides=('solaar')
conflicts=('solaar')
install='solaar.install'
source=('git+https://github.com/pwr/Solaar.git' 'solaar.install')
md5sums=('SKIP' '4057d7179fe2ae9718b8aac4607a2c47')
pkgver() {
cd "$srcdir/Solaar"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
package() {
cd "$srcdir/Solaar"
python3 setup.py install --root="$pkgdir/" --optimize=1
install -D -m0644 rules.d/42-logitech-unify-permissions.rules \
"$pkgdir/etc/udev/rules.d/42-logitech-unify-permissions.rules"
}
post_install() {
xdg-icon-resource forceupdate
update-desktop-database -q
}
| true
|
a09218ac280eba9cdec231bc9aad8feaef592639
|
Shell
|
OpenVnmrJ/OpenVnmrJ
|
/src/scripts/writeacct.sh
|
UTF-8
| 405
| 2.953125
| 3
|
[
"Apache-2.0",
"GPL-3.0-only"
] |
permissive
|
#!/bin/sh
#writeacct.sh
# The acct macro needs to be able to append to the acccounting file
# as root. acct can call this script using sudo to append to the file.
# Usage: "writeacct file_path operator owner start/done time"
# The line should look something like the following:
# account="" operator="vnmr1" owner="vnmr1" done=1259097053
echo account=\"\" operator=\"$2\" owner=\"$3\" $4=$5 >> $1
| true
|
3be8c2e7474bf64eda70d0bb28a300fc5c84214c
|
Shell
|
neilellis/easydeploy
|
/remote/install-component.sh
|
UTF-8
| 18,999
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -eux
chmod 755 ~/bin/*
error() {
echo "**** EASYDEPLOY-COMPONENT-INSTALL-FAILED ****"
sourcefile=$1
lineno=$2
code=$3
echo "$1:$2" $3
set +e
exit $3
}
trap 'error "${BASH_SOURCE}" "${LINENO}" "$?"' ERR
cd $(dirname $0)
DIR=$(pwd)
echo "Setting defaults"
export DATACENTER=$1
shift
export COMPONENT=$1
shift
export DEPLOY_ENV=$1
shift
export PROJECT=$1
shift
export BACKUP_HOST=$1
shift
export MACHINE_NAME=$1
shift
export TARGET_COMPONENT=$1
shift
export EASYDEPLOY_REMOTE_IP_RANGE=$1
shift
export APP_ARGS="$@"
export EASYDEPLOY_PRIMARY_ADMIN_SERVER=
export EASYDEPLOY_SECONDARY_ADMIN_SERVER=
export EASYDEPLOY_PORTS=
export EASYDEPLOY_PRIMARY_PORT=
export EASYDEPLOY_UPDATE_CRON="0 4 * * *"
export EASYDEPLOY_PACKAGES=
export EASYDEPLOY_STATE="stateful"
export EASYDEPLOY_PROCESS_NUMBER=1
export EASYDEPLOY_EXTERNAL_PORTS=
export EASYDEPLOY_SERVICE_CHECK_INTERVAL=300s
export EASYDEPLOY_UPDATE_CRON=none
export DEBIAN_FRONTEND=noninteractive
export SERF_VERSION=0.6.3
export CONSUL_VERSION=0.4.0
echo "Creating directories"
sudo [ -d /home/easydeploy/bin ] || mkdir /home/easydeploy/bin
sudo [ -d /home/easydeploy/usr/bin ] || mkdir -p /home/easydeploy/usr/bin
sudo [ -d /home/easydeploy/usr/etc ] || mkdir -p /home/easydeploy/usr/etc
sudo [ -d /var/log/easydeploy ] || mkdir /var/log/easydeploy
sudo [ -d /var/easydeploy ] || mkdir /var/easydeploy
sudo [ -d /var/easydeploy/.install ] || mkdir /var/easydeploy/.install
sudo [ -d /var/easydeploy/share ] || mkdir /var/easydeploy/share
sudo [ -d /var/easydeploy/share/tmp ] || mkdir /var/easydeploy/share/tmp
sudo [ -d /var/easydeploy/share/tmp/hourly ] || mkdir /var/easydeploy/share/tmp/hourly
sudo [ -d /var/easydeploy/share/tmp/daily ] || mkdir /var/easydeploy/share/tmp/daily
sudo [ -d /var/easydeploy/share/tmp/monthly ] || mkdir /var/easydeploy/share/tmp/monthly
sudo [ -d /var/easydeploy/share/backup ] || mkdir /var/easydeploy/share/backup
sudo [ -d /var/easydeploy/share/sync ] || mkdir /var/easydeploy/share/sync
sudo [ -d /var/easydeploy/share/sync/global ] || mkdir /var/easydeploy/share/sync/global
sudo [ -d /var/easydeploy/share/sync/discovery ] || mkdir /var/easydeploy/share/sync/discovery
sudo [ -d /var/easydeploy/share/sync/env ] || mkdir /var/easydeploy/share/sync/env
sudo [ -d /var/easydeploy/share/.config/ ] || mkdir /var/easydeploy/share/.config/
sudo [ -d /var/easydeploy/share/.config/sync/discovery ] || mkdir -p /var/easydeploy/share/.config/sync/discovery
[ -d /ezlog ] || sudo ln -s /var/log/easydeploy /ezlog
[ -d /ezshare ] || sudo ln -s /var/easydeploy/share /ezshare
[ -d /ez ] || sudo ln -s /var/easydeploy /ez
[ -d /ezbin ] || sudo ln -s /home/easydeploy/bin /ezbin
[ -d /ezubin ] || sudo ln -s /home/easydeploy/usr/bin /ezubin
[ -d /ezuetc ] || sudo ln -s /home/easydeploy/usr/etc /ezuetc
[ -d /ezsync ] || sudo ln -s /var/easydeploy/share/sync /ezsync
[ -d /ezbackup ] || sudo ln -s /var/easydeploy/share/backup /ezbackup
[ -d /eztmp ] || sudo ln -s /var/easydeploy/share/tmp /eztmp
if /sbin/ifconfig | grep "eth0 "
then
/sbin/ifconfig eth0 | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' > /var/easydeploy/share/.config/ip
else
/sbin/ifconfig p1p1 | sed -En 's/127.0.0.1//;s/.*inet (addr:)?(([0-9]*\.){3}[0-9]*).*/\2/p' > /var/easydeploy/share/.config/ip
fi
export EASYDEPLOY_HOST_IP=$(</var/easydeploy/share/.config/ip)
sudo cp -f ~/remote/*.sh /home/easydeploy/bin
[ -d ~/bin/ ] || mkdir ~/bin; cp -f ~/remote/bin/* ~/bin
chmod 755 ~/bin/*
mv -f ~/remote/bash_profile ~/.bash_profile
sudo cp -f ~/.dockercfg /home/easydeploy/
[ -d /home/easydeploy/project/ezd/bin/ ] || mkdir -p /home/easydeploy/project/ezd/bin/
[ -d /home/easydeploy/project/ezd/etc/ ] || mkdir -p /home/easydeploy/project/ezd/etc/
cp -rf ~/project/* /home/easydeploy/project/
[ -d ~/user-scripts ] && sudo cp -rf ~/user-scripts/* /home/easydeploy/project/ezd/bin/
[ -d ~/user-config ] && sudo cp -rf ~/user-config/* /home/easydeploy/project/ezd/etc/
sudo chown easydeploy:easydeploy /home/easydeploy/.dockercfg ;
sudo chown -R easydeploy:easydeploy /home/easydeploy/project
sudo chmod 700 /home/easydeploy/.dockercfg
sudo chmod 755 /home/easydeploy/bin/*
sudo chmod 755 /home/easydeploy/project/ezd/bin/* ||
echo "Setting up deployment project"
sudo su - easydeploy <<EOF
set -eu
cd /home/easydeploy
chmod 600 ~/.ssh/*
chmod 700 ~/.ssh
EOF
echo ${EASYDEPLOY_HOST_IP} > /var/easydeploy/share/.config/ip
echo "Reading config"
. /home/easydeploy/project/ezd/etc/ezd.sh
#store useful info for scripts
echo "Saving config"
echo ${EASYDEPLOY_STATE} > /var/easydeploy/share/.config/edstate
echo ${APP_ARGS} > /var/easydeploy/share/.config/app_args
echo ${COMPONENT} > /var/easydeploy/share/.config/component
echo ${DEPLOY_ENV} > /var/easydeploy/share/.config/deploy_env
echo ${PROJECT} > /var/easydeploy/share/.config/project
echo ${BACKUP_HOST} > /var/easydeploy/share/.config/backup_host
echo ${MACHINE_NAME} > /var/easydeploy/share/.config/hostname
echo ${TARGET_COMPONENT} > /var/easydeploy/share/.config/target
cp ~/serf_key /var/easydeploy/share/.config/serf_key
sudo chown easydeploy:easydeploy /var/easydeploy/share
[ -f machines.txt ] && cp -f machines.txt /var/easydeploy/share/.config/machines.txt
#Install additional host packages, try to avoid that and keep them in
#the Dockerfile where possible.
if [ ! -z "${EASYDEPLOY_PACKAGES}" ]
then
echo "Installing custom packages ${EASYDEPLOY_PACKAGES}"
sudo apt-get -q install -y ${EASYDEPLOY_PACKAGES}
fi
#Sync between nodes using btsync
echo "Installing Bit Torrent sync"
if [ ! -f /usr/local/bin/btsync ]
then
curl http://download.getsyncapp.com/endpoint/btsync/os/linux-x64/track/stable > /tmp/btsync.tgz
tar -zxvf /tmp/btsync.tgz
mv btsync /usr/local/bin/btsync
chmod 755 /usr/local/bin/btsync
sudo apt-get install -q -y rhash
fi
export EASYDEPLOY_GLOBAL_SYNC_SECRET="$(cat /home/easydeploy/.ssh/id_rsa | sed -e 's/0/1/g' | rhash --sha512 - | cut -c1-64 )"
export EASYDEPLOY_COMPONENT_SYNC_SECRET="$(cat /home/easydeploy/.ssh/id_rsa /var/easydeploy/share/.config/component /var/easydeploy/share/.config/project /var/easydeploy/share/.config/deploy_env | rhash --sha512 - | cut -c1-64)"
export EASYDEPLOY_ENV_SYNC_SECRET="$(cat /home/easydeploy/.ssh/id_rsa /var/easydeploy/share/.config/deploy_env /var/easydeploy/share/.config/project | rhash --sha512 - | cut -c1-64)"
known_hosts="\"localhost\""
for m in $(cat ~/ machines.txt | cut -d: -f2 | tr '\n' ' ')
do
known_hosts="${known_hosts},\"${m}\""
done
sudo cat > /etc/btsync.conf <<EOF
{
"device_name": "$EASYDEPLOY_HOST_IP",
"listening_port": 9595,
"check_for_updates": false,
"storage_path":"/var/easydeploy/.btsync",
"use_upnp": false,
"download_limit": 0,
"upload_limit": 0,
"shared_folders": [
{
"secret": "$EASYDEPLOY_GLOBAL_SYNC_SECRET",
"dir": "/var/easydeploy/share/sync/global",
"use_relay_server": true,
"use_tracker": true,
"use_dht": false,
"search_lan": true,
"use_sync_trash": true
},
{
"secret": "$EASYDEPLOY_COMPONENT_SYNC_SECRET",
"dir": "/var/easydeploy/share/sync/component",
"use_relay_server": true,
"use_tracker": true,
"use_dht": false,
"search_lan": true,
"use_sync_trash": true
},
{
"secret": "$EASYDEPLOY_ENV_SYNC_SECRET",
"dir": "/var/easydeploy/share/sync/env",
"use_relay_server": true,
"use_tracker": true,
"use_dht": false,
"search_lan": true,
"use_sync_trash": true
},
{
"secret": "$EASYDEPLOY_ENV_SYNC_SECRET",
"dir": "/var/easydeploy/share/.config/sync",
"use_relay_server": true,
"use_tracker": true,
"use_dht": false,
"search_lan": true,
"use_sync_trash": true,
"known_hosts": [
$known_hosts
]
}
]
}
EOF
sudo chown -R easydeploy:easydeploy /var/easydeploy/share/sync
sudo chown easydeploy:easydeploy /etc/btsync.conf
#Serf is used for service discovery and admin tasks
if [ ! -f /var/easydeploy/.install/serf ]
then
echo "Installing serf for node discovery and communication"
sudo apt-get -q install -y unzip
[ -f ${SERF_VERSION}_linux_amd64.zip ] || wget -q https://dl.bintray.com/mitchellh/serf/${SERF_VERSION}_linux_amd64.zip
unzip ${SERF_VERSION}_linux_amd64.zip
sudo mv -f serf /usr/local/bin
[ -d /etc/serf ] || sudo mkdir /etc/serf
sudo cp -f ~/remote/serf-event-handler.sh /etc/serf/event-handler.sh
[ -d /etc/serf ] || sudo mkdir /etc/serf
[ -d /etc/serf/handlers ] && sudo rm -rf /etc/serf/handlers
sudo cp -rf ~/remote/serf-handlers /etc/serf/handlers
sudo chmod 755 /etc/serf/handlers/*
sudo chmod 755 /etc/serf/event-handler.sh
touch /var/easydeploy/.install/serf
fi
consul_server=true
[ -z "$EASYDEPLOY_ADMIN_SERVER" ] && consul_server=false
[ -d /etc/consul.d ] || sudo mkdir /etc/consul.d
cat > /etc/consul.d/server.json <<EOF
{
"datacenter": "${DATACENTER}",
"data_dir": "/var/easydeploy/.consul",
"log_level": "INFO",
"bootstrap_expect": 3,
"rejoin_after_leave" : true,
"leave_on_terminate" : true,
"server": ${consul_server},
"domain" : "consul.",
"encrypt" :"$(cat /var/easydeploy/share/.config/serf_key)",
"leave_on_terminate" : true
}
EOF
if [ ! -f /var/easydeploy/.install/consul ]
then
echo "Installing consul for service discovery and communication"
sudo apt-get install -y unzip
[ -f ${CONSUL_VERSION}_linux_amd64.zip ] || wget https://dl.bintray.com/mitchellh/consul/${CONSUL_VERSION}_linux_amd64.zip
unzip ${CONSUL_VERSION}_linux_amd64.zip
sudo mv -f consul /usr/local/bin
touch /var/easydeploy/.install/consul
fi
if [ ! -f /var/easydeploy/.install/consul_ui ]
then
[ -f ${CONSUL_VERSION}_web_ui.zip ] || wget https://dl.bintray.com/mitchellh/consul/${CONSUL_VERSION}_web_ui.zip
mkdir webziptmp
unzip -d webziptmp ${CONSUL_VERSION}_web_ui.zip
rm -rf /usr/local/consul_ui
mv webziptmp/dist /usr/local/consul_ui
rm -rf webziptmp
touch /var/easydeploy/.install/consul_ui
fi
ports=( ${EASYDEPLOY_PRIMARY_PORT} ${EASYDEPLOY_PORTS} ${EASYDEPLOY_EXTERNAL_PORTS} )
if [ ! -z "$ports" ]
then
primary_port=${ports[0]}
cat > /etc/consul.d/component.json <<EOF
{
"service": {
"name": "${MACHINE_NAME}",
"port": ${primary_port},
"check": {
"script": "/home/easydeploy/bin/consul_health_check.sh",
"interval": "30s"
}
}
}
EOF
fi
cat > /etc/bind/ezd.conf <<EOF
zone "ezd" IN {
type master;
file "/etc/bind/ezd.zone";
};
EOF
cat > /etc/bind/ezd.zone <<'EOF'
$ORIGIN ezd.
$TTL 5
ezd. IN SOA localhost. support.cazcade.com. (
2001062501 ; serial
5 ; refresh after 5 secs
5 ; retry after 5 secs
5 ; expire after 5 secs
5 ) ; minimum TTL of 5 secs
;
;
ezd. IN NS 127.0.0.1
EOF
cat > /etc/bind/consul.conf <<EOF
zone "consul" IN {
type forward;
forward only;
forwarders { 127.0.0.1 port 8600; };
};
EOF
cat > /etc/bind/named.conf.options <<EOF
options {
listen-on port 53 { any;};
listen-on-v6 port 53 { ::1; };
directory "/var/cache/bind";
allow-query { any; };
recursion yes;
dnssec-enable no;
dnssec-validation no;
version "none of your business";
};
include "/etc/bind/ezd.conf";
include "/etc/bind/consul.conf";
EOF
ports=( ${EASYDEPLOY_PRIMARY_PORT} ${EASYDEPLOY_PORTS} ${EASYDEPLOY_EXTERNAL_PORTS} )
if [ ! -z "$ports" ]
then
primary_port=${ports[0]}
fi
#Logstash is used for log aggregation
if [ ! -f /var/easydeploy/.install/logstash ] && [[ -n "$INSTALL_LOGSTASH_FLAG" ]]
then
if [ ! -d /usr/local/logstash ]
then
wget -q https://download.elasticsearch.org/logstash/logstash/logstash-1.4.0.tar.gz
tar -zxvf logstash-1.4.0.tar.gz
mv logstash-1.4.0 /usr/local/logstash
fi
touch /etc/logstash.conf
chown easydeploy:easydeploy /etc/logstash.conf
touch /var/easydeploy/.install/logstash
fi
if [ ! -f /var/easydeploy/.install/sysdig ] && [[ -n "$INSTALL_SYSDIG_FLAG" ]]
then
echo "Adding sysdig for diagnostics"
curl -s https://s3.amazonaws.com/download.draios.com/stable/install-sysdig | sudo bash
touch /var/easydeploy/.install/sysdig
fi
if [[ ! -z "${EASYDEPLOY_UPDATE_CRON}" ]]
then
echo $pathline > /etc/cron.d/update
echo "${EASYDEPLOY_UPDATE_CRON} root /bin/bash -l -c '/home/easydeploy/bin/update.sh $[ ( $RANDOM % 3600 ) + 1 ]s &> /var/log/easydeploy/update.log'" >> /etc/cron.d/update
fi
chmod 755 /etc/cron.d/*
sudo su - easydeploy -c "crontab" <<EOF2
0 * * * * find /var/easydeploy/share/tmp/hourly -mmin +60 -exec rm {} \;
0 3 * * * find /var/easydeploy/share/tmp/daily -mtime +1 -exec rm {} \;
0 4 * * * find /var/easydeploy/share/tmp/monthly -mtime +31 -exec rm {} \;
EOF2
cd
if [[ ! -f /var/easydeploy/.install/docker ]]
then
echo "Installing Docker"
# sudo apt-get install -y docker.io
curl -sSL https://get.docker.io/ubuntu/ | sudo sh
# sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker
#sudo addgroup worker docker
sudo addgroup easydeploy docker
sudo chmod a+rwx /var/run/docker.sock
sudo chown -R easydeploy:easydeploy /home/easydeploy/
grep "limit nofile 65536 65536" /etc/init/docker.conf || echo "limit nofile 65536 65536" >> /etc/init/docker.conf
sudo service docker start || true
touch /var/easydeploy/.install/docker
fi
if [ ! -f /usr/local/bin/weave ]
then
sudo curl https://raw.githubusercontent.com/zettio/weave/master/weaver/docker-ns > /usr/local/bin/docker-ns
sudo curl https://raw.githubusercontent.com/dpw/weave/87_multicast_route/weaver/weave > /usr/local/bin/weave
sudo chmod +x /usr/local/bin/docker-ns
sudo chmod a+x /usr/local/bin/weave
# sudo wget -O /usr/local/bin/weave https://raw.githubusercontent.com/zettio/weave/master/weaver/weave
# sudo chmod a+x /usr/local/bin/weave
fi
if [ ! -f /usr/local/bin/fig ]
then
sudo curl -L https://github.com/docker/fig/releases/download/0.5.2/linux > /usr/local/bin/fig
sudo chmod +x /usr/local/bin/fig
fi
sudo chown -R easydeploy:easydeploy /var/easydeploy
sudo [ -d /home/easydeploy/modules ] && rm -rf /home/easydeploy/modules
sudo cp -r ~/remote/modules /home/easydeploy
sudo chown easydeploy:easydeploy /var/log/easydeploy
sudo chown easydeploy:easydeploy /var/easydeploy
#Pre installation custom tasks
[ -f /home/easydeploy/project/ezd/bin/pre-install.sh ] && sudo bash /home/easydeploy/project/ezd/bin/pre-install.sh
if [ -f /home/easydeploy/project/ezd/bin/pre-install-user.sh ]
then
sudo su - easydeploy <<EOF
set -eu
bash /home/easydeploy/project/ezd/bin/pre-install-user.sh
EOF
fi
sudo chmod a+rwx /var/run/docker.sock
echo "Configuring firewall"
sudo ufw allow 22 #ssh
sudo ufw allow 7946 #serf
sudo ufw allow 17123 #???
sudo ufw allow 1888 #status check for lb
sudo ufw allow 9595 #btsync
sudo ufw allow 8300 #consul
sudo ufw allow 8301 #consul
sudo ufw allow 8302 #consul
sudo ufw allow 6783 #weave
sudo ufw allow 37582 #weave
sudo ufw allow from 172.16.0.0/12 #docker network
sudo ufw allow from 10.0.0.0/8 #weave network
#sudo ufw allow from 172.16.0.0/12 to any port 53 #dns from containers
#sudo ufw allow from 172.16.0.0/12 to any port 8125 #statsd from containers
if [ ! -z "$EASYDEPLOY_REMOTE_IP_RANGE" ]
then
ufw allow proto udp from $EASYDEPLOY_REMOTE_IP_RANGE to any port 60000:60050 # mosh
ufw allow from $EASYDEPLOY_REMOTE_IP_RANGE to any port 8500
ufw allow from $EASYDEPLOY_REMOTE_IP_RANGE to any port 8400
ufw allow from $EASYDEPLOY_REMOTE_IP_RANGE to any port 8600
fi
for port in ${EASYDEPLOY_PORTS} ${EASYDEPLOY_EXTERNAL_PORTS}
do
sudo ufw allow ${port}
done
yes | sudo ufw enable
sudo iptables -I FORWARD -i weave -o weave -j ACCEPT
sudo bash -c "iptables-save > /etc/iptables.rules"
if [[ -n "$INSTALL_SQUID_FLAG" ]] && [[ ! -f /etc/squid3/squid.conf ]]
then
#Squid
sudo apt-get -q install -y squid3
cat > /etc/squid3/squid.conf <<EOF
acl all src all
http_port 3128
http_access allow all
# We recommend you to use at least the following line.
hierarchy_stoplist cgi-bin ?
# Uncomment and adjust the following to add a disk cache directory.
cache_dir ufs /var/spool/squid3 10000 16 256
# Leave coredumps in the first cache dir
coredump_dir /var/spool/squid3
EOF
[ -d /var/spool/squid3 ] || mkdir /var/spool/squid3
squid3 -z
chown -R proxy:proxy /var/spool/squid3
fi
sudo [ -d /home/easydeploy/template ] || mkdir /home/easydeploy/template
sudo cp ~/remote/template-run.conf /home/easydeploy/template/
if [ ! -f /var/easydeploy/.install/supervisord ]
then
echo "Installing supervisor for process monitoring"
sudo apt-get install -q -y supervisor timelimit
touch /var/easydeploy/.install/supervisord
fi
sudo /bin/bash <<EOF
export COMPONENT=${COMPONENT}
export EASYDEPLOY_HOST_IP=$EASYDEPLOY_HOST_IP
export DEPLOY_ENV=$DEPLOY_ENV
export EASYDEPLOY_PROCESS_NUMBER=${EASYDEPLOY_PROCESS_NUMBER}
envsubst < ~/remote/template-run.conf > /etc/supervisor/conf.d/run.conf
EOF
sudo cp -f ~/remote/rc.local /etc
sudo chmod 755 /etc/rc.local
sudo /etc/rc.local
#Monitoring
echo "Adding Monitoring"
if [ -f /home/easydeploy/project/ezd/etc/newrelic-license-key.txt ] && [ ! -f /var/easydeploy/.install/newrelic ]
then
echo "Adding New Relic support"
sudo echo deb http://apt.newrelic.com/debian/ newrelic non-free >> /etc/apt/sources.list.d/newrelic.list
wget -O- https://download.newrelic.com/548C16BF.gpg | apt-key add -
sudo apt-get -qq update
sudo apt-get -q install -y newrelic-sysmond
sudo nrsysmond-config --set license_key=$(cat /home/easydeploy/project/ezd/etc/newrelic-license-key.txt)
/etc/init.d/newrelic-sysmond start
touch /var/easydeploy/.install/newrelic
fi
if ! which dstat
then
sudo apt-get -q install -y dstat
fi
#Security (always the last thing hey!)
if [ ! -f /var/easydeploy/.install/hardened ]
then
echo "Hardening"
#sudo apt-get install -y denyhosts
sudo apt-get -q install -y fail2ban
touch /var/easydeploy/.install/hardened
fi
. /home/easydeploy/bin/env.sh
if [[ $EASYDEPLOY_STATE == "stateless" ]]
then
docker kill $(docker ps -q -a) || :
service docker stop || :
rm -rf /var/lib/docker || :
service docker start
fi
[ -f /home/easydeploy/project/ezd/bin/post-install.sh ] && sudo bash /home/easydeploy/project/ezd/bin/post-install.sh
[ -f /home/easydeploy/project/ezd/bin/post-install-userland.sh ] && sudo su easydeploy "cd; bash /home/easydeploy/project/ezd/bin/post-install-userland.sh"
echo "Done"
exit 0
| true
|
c4fcbb5a0eede232a38ab0722b71c345f7bf2d17
|
Shell
|
adobe/XMP-Toolkit-SDK
|
/build/cmake.command
|
UTF-8
| 978
| 3.78125
| 4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# =================================================================================================
# ADOBE SYSTEMS INCORPORATED
# Copyright 2013 Adobe Systems Incorporated
# All Rights Reserved
#
# NOTICE: Adobe permits you to use, modify, and distribute this file in accordance with the terms
# of the Adobe license agreement accompanying it.
# =================================================================================================
# Get the absolut path of the script
abspath=$(cd ${0%/*} && echo $PWD/${0##*/})
# to get the path only - not the script name
path_only=$(dirname "$abspath")
#Set XMPROOT for further reference
XMPROOT=$path_only/..
# defines some handy function used over in several scripts
source "$XMPROOT/build/shared/CMakeUtils.sh"
# generate projects in above file defined function
GenerateBuildProjects "$@"
if [ $? -ne 0 ]; then
echo "cmake.command failed";
exit 1;
else
echo "cmake.command Success";
exit 0;
fi
| true
|
616f32eaf3c9567335639ed351e7780bf5e3fe2a
|
Shell
|
zhrq95/Learning
|
/Shell/根据不同的菜单选择,分别显示当前时间、登录用户和当前工作目录.sh
|
UTF-8
| 340
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
echo -e "\n Command MENU\n"
echo "D.Current data and time"
echo "U.Users currently logged in"
echo -e "W.Name of the working directory\n"
echo "Enter D,U or W:"
read answer
echo
case "$answer" in
D | d)
date
;;
U | u)
who
;;
W | w)
pwd
;;
*)
echo "There is no selection:$answer"
;;
esac
| true
|
b9e6349dd23328d24523189d3c5c526b5442419d
|
Shell
|
jacebenson/workflow
|
/bin/beta-release
|
UTF-8
| 320
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Clone mermaid-js/docs parallel with the mermaid live editor.
# git clone git@github.com:mermaid-js/docs.git
# git clone https://github.com/mermaid-js/docs.git
yarn release
pushd .
cp -r docs/* ../docs/mermaid-live-editor-beta
cd ../docs
git add .
git commit -m "Beta updated with latest changes"
git push
popd
| true
|
a4c00a7b9e1c6a03f0589a6cd47bb563583f0466
|
Shell
|
legacy-codedigger/aix-4.1.3
|
/aix-4.1.3/bldenv/rastools/probeidsbld.sh
|
UTF-8
| 1,874
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
# @(#)15 1.1 src/bldenv/rastools/probeidsbld.sh, cmderrlg, bos412, GOLDA411a 10/20/93 14:29:41
# COMPONENT_NAME: CMDERRLG
#
# FUNCTIONS: tool to genenerate /usr/include/sys/probeids.h
#
# ORIGINS: 27
#
# IBM CONFIDENTIAL -- (IBM Confidential Restricted when
# combined with the aggregated modules for this product)
# SOURCE MATERIALS
# (C) COPYRIGHT International Business Machines Corp. 1993
# All Rights Reserved
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
# Input:
# The first file in sequence is probeids.h.
# The second is probeids.desc.
#
# First, get the existing labels and their ids from probeids.h.
# Then get the labels and comments from probeids.desc.
# Recreate probeids.h with the labels in the order they are in
# probeids.desc. Any label not in probeids.h will get a new id.
# Any label left over (i.e.), in probeids.h but not in probeids.desc
# will retain its id but get a comment of "UNUSED".
#
# The format of probeids.desc is:
# label comment
# Where comment is a one-liner
PATH=${ODE_TOOLS}/bin:${ODE_TOOLS}/usr/bin
export PATH
if [ $# != 2 -o $1 = "-?" ]
then # syntax error
echo "Usage: probeidsbld probeids.h probeids.desc" >&2
exit 1
fi
# Quit if we can't read the database
if [ ! -r "$2" ]
then echo "Can't read database $2" >&2
exit 2
fi
# Recreate $1
PROLOGS=/afs/austin/local/bin/prologs
tmpf=/tmp/probeids.$$
touch $tmpf
$PROLOGS -C CMDERRLG -O 27 -D 1993 -P 2 -T header $tmpf
# create a new probeids.h file in tmpf
awkf=$0.awk
touch $1 # Just in case it doesn't exist at all.
awk -f $awkf $* >>$tmpf
# Build the output file $1
# Put out #ifndef and prolog.
echo "#ifndef _h_PROBEIDS" >$1
echo "#define _h_PROBEIDS" >>$1
cat $tmpf >>$1
rm $tmpf
# Put on the #endif
echo "#endif /* _h_PROBEIDS */" >>$1
exit 0
| true
|
6b557217859e3ba99dcf703d33b61e9f73d139f8
|
Shell
|
ayredkklc/myportfolio
|
/prod-test.sh
|
UTF-8
| 292
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
server="https://deryakilic.duckdns.org/"
while read endpoint;
do
response=$(curl --write-out '%{http_code}' --silent --output /dev/null $server$endpoint)
if [[ $response == "200" ]]; then
echo "Success /"$endpoint
else
echo "Error /"$endpoint
exit 1
fi
done < endpoint.txt
exit 0
| true
|
9c567c99fb8696074df89968c3b248e5f9695a4c
|
Shell
|
xiaostar518/chopper
|
/sh/changeEthX.sh
|
UTF-8
| 854
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
dumpSeries(){
lspci|grep Eth|awk '{print $1}'>/etc/1.txt
exit 0
}
updateDevice(){
for ((i=0; i<32; i++))
do
ifconfig eth${i} down
mac=`ifconfig eth${i}|grep ether|awk '{print $2}'`
nameif e${i} ${mac}
done
n=0
for j in `cat /etc/1.txt`
do
for ((i=0; i<32; i++))
do
m=`ethtool -i e${i} 2>/dev/null|grep ${j}`
if [ -n "$m" ]; then
mac=`ifconfig e${i}|grep ether|awk '{print $2}'`
nameif eth${n} ${mac}
ifconfig eth${n} up
n=` expr ${n} + 1`
fi
done
done
}
cmdList=("dumpSeries","updateDevice")
select cmd in ${$cmdList[@]};
do
$(${cmd})
done
| true
|
c87b8bbd4b700ab071c320a77445d7aaa6335226
|
Shell
|
udzura/docker-builds-haconiwa
|
/deb/builddeb.sh
|
UTF-8
| 1,353
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
set -xe
export DEBIAN_FRONTEND=noninteractive
export LANG=C
VERSION=${1:-0.8.5}
MAINTAINER=${2:-'udzura@udzura.jp'}
MAINTAINER_NAME=${3:-'Uchio Kondo'}
if test -d /build/haconiwa
then
mkdir /build/haconiwa-$VERSION
rsync -a /build/haconiwa/ /build/haconiwa-$VERSION/
cd /build/haconiwa-$VERSION
rake clean
cd -
else
git clone https://github.com/haconiwa/haconiwa.git /build/haconiwa-$VERSION
cd /build/haconiwa-$VERSION
git checkout $(git rev-parse v$VERSION)
cd -
fi
if test -f /build/build_config.rb
then
cp -f /build/build_config.rb /build/haconiwa-$VERSION/build_config.rb
fi
cd /build/haconiwa-$VERSION
if ! grep -q 'VERSION = "'$VERSION'"' mrblib/haconiwa/version.rb; then
sed -i.old 's/.*VERSION.*/VERSION = "'$VERSION'"/' mrblib/haconiwa/version.rb
cp packages/deb/debian/changelog /tmp/.changelog
cat <<EOLOG > packages/deb/debian/changelog
haconiwa ($VERSION-1) unstable; urgency=medium
* Customized deb build
-- $MAINTAINER_NAME <$MAINTAINER> $(date "+%a, %e %b %Y %H:%M:%S %z")
EOLOG
cat /tmp/.changelog >> packages/deb/debian/changelog
fi
rake mruby
dh_make -y -s -e $MAINTAINER --createorig -y
cp -v packages/deb/debian/* debian/
rm -rf debian/*.ex debian/*.EX
debuild --no-tgz-check -uc -us
cd ../
cp -v *.deb /out/pkg
| true
|
ddfbb6dcfd12870df6be8ced17a8a9ea27321ff1
|
Shell
|
Dmytro10/env-vagrant
|
/scripts/finish-config.sh
|
UTF-8
| 433
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
sudo su
# Hach for Windows
ping -c 5 192.168.25.1
# Constants
RED='\033[0;31m'
GR='\033[0;32m'
NC='\033[0m' # No Color
OVPNS=$(ifconfig tun0)
REALIP=$(wget http://ipinfo.io/ip -qO -)
echo "You're good now :)"
echo -e "${GR}Your output IP:${RED} $REALIP ${NC}"
if [[ -z "$OVPNS" ]]; then
echo -e "${RED}Dont connect to OpenVPN Server!${NC}"
else
echo -e "${GR}Status connect to OpenVPN: OK!${NC}
$OVPNS"
fi
| true
|
6a88d7f37d52f98efb9afd358e7d3dbd84d2b305
|
Shell
|
kmader/studio_home
|
/app/userstart.sh
|
UTF-8
| 707
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Deep Learning Studio - GUI platform for designing Deep Learning AI without programming
#
# Copyright (C) 2016-2017 Deep Cognition Labs, Skiva Technologies Inc.
#
# All rights reserved.
#
export PYTHONPATH=/home/app
export HOME=/home/app
export LD_LIBRARY_PATH="/usr/local/nvidia/lib:/usr/local/nvidia/lib64:"
if [ ! -d "/data/$USERID/deploy" ]; then
mkdir -p /data/$USERID/deploy
fi
export DEPLOY_DIR=/data/$USERID/deploy
if [ -d "/usr/local/nvidia/bin" ]; then
echo ********Detected CUDA path********
export PATH="/usr/local/nvidia/bin:/usr/local/cuda/bin:$PATH"
fi
if [ -f app.so ]; then
./app.so &
else
python app.py &
fi
olympus up --no-debug --port 6666 --host 127.0.0.1 &
| true
|
f200187b9599950daa5ce9532b622bdba57df9dd
|
Shell
|
epasham/bigdata-ansible
|
/filebeat-prd/symbolic_link.sh
|
UTF-8
| 414
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
bin=`dirname "$this"`
bin=`cd "$bin"; pwd`
export SERVERLIST="${bin}/serverlist/serverlist-$1"
. ../elasticsearch-prd/mpalyes/script/env.sh
for server in `cat "$SERVERLIST"`; do
echo "ssh root@$server "ln -s /root/filebeat/filebeat-$ES_VERSION-linux-x86_64-$1 filebeat""
ssh root@$server "ln -s /root/filebeat/filebeat-$ES_VERSION-linux-x86_64-$1 /root/filebeat/filebeat"
done
wait
| true
|
ad518729a675d965c97c25242ad66cf586c45922
|
Shell
|
DigitalPacific/corekube
|
/cloud-config/get_discovery_interface_ip.sh
|
UTF-8
| 265
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Get's the IP of the interface that discovery will be
# accessible over
DISCOVERY_IF=%discovery_net_interface%
/usr/bin/ip -4 addr show $DISCOVERY_IF | /usr/bin/awk '/inet/ {print $2}' | /usr/bin/cut -d/ -f1 > /run/IP
/usr/bin/sed -i 's/^/IP=/' /run/IP
| true
|
fbcb8fbe84d6657d3afbc545ed0f7e4712f16c5a
|
Shell
|
lyokato/myenv
|
/roles/zsh/files/.zshrc
|
UTF-8
| 938
| 2.75
| 3
|
[] |
no_license
|
source "${HOME}/.zgen/zgen.zsh"
if ! zgen saved; then
zgen oh-my-zsh
zgen oh-my-zsh plugins/git
zgen oh-my-zsh plugins/sudo
zgen oh-my-zsh themes/gallois
zgen load zsh-users/zsh-syntax-highlighting
zgen load zsh-users/zsh-completions src
zgen load zsh-users/zsh-history-substring-search
zgen save
fi
autoload -Uz colors
colors
autoload -U compinit
compinit
setopt auto_cd
setopt correct
setopt auto_pushd
setopt pushd_ignore_dups
setopt share_history
setopt histignorealldups
HISTFILE=~/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
set NO_BEEP
PATH=/usr/local/bin:/bin:/usr/bin:/usr/local/opt/coreutils/libexec/gnubin:${PATH}
export PATH
alias ls='ls --color=auto'
eval "$(dircolors ~/.zsh/dircolors-solarized/dircolors.ansi-universal)"
if [ -n "$LS_COLORS" ]; then
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
fi
chpwd() { ls -ltr --color=auto }
[ -e ~/.zshrc-after ] && source ~/.zshrc-after ]
| true
|
c0e7b38a0c466fb9a247c4b51a6b6d6734654525
|
Shell
|
kenmanheimer/ShellUtils
|
/envelope
|
UTF-8
| 5,222
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# Execute a command, intercepting output and emailing if errors occur.
# With many options to tailor behavior - 'envelope -?' for details.
# Todo:
# - Convert argument parsing to getopts.
wrap="$0 $*"
recipient=$LOGNAME
scriptNm=$(basename $0)
subjLine="$scriptNm RESULT for: COMMAND"
alwaysSend=""; showEnv=""; from=""; onOutputSend=""; verbose=""; cmd=""
grepFor=""; replyTo=""; DEBUG=""
sendmail=/usr/sbin/sendmail
if [ -x /bin/uname ]; then
uname=/bin/uname
elif [ -x /usr/bin/uname ]; then
uname=/usr/bin/uname
else
uname=uname
fi
domainname=/bin/domainname
UsageMsg () {
echo "Usage: ${scriptNm} [ -r recip ] [ -f reply-to ] [ -s subj ] [ -a | -n | -o ] [ -g string ] [ -v ] [ -e ] CMD [ cmd-args... ]
CMD output is intercepted and is emailed to recip, by default if the command
concludes with an error status, or other conditions per some of these options:
-r - Email to recipient; defaults to job owner.
-f - Set reply-to address.
-s - Email subject line. Any occurences of the token
'RESULT' are replaced by 'error' or 'output', depending on
cmd status. 'COMMAND' is replaced by the command text and args.
Default subject line is: $subjLine
-a - Send mail notification, regardless of error status.
-n - Do not send mail notification, regardless of error status.
-o - Send mail if there is any non-null output, stdout _or_ stderror.
-g - Send mail if output includes grep match for string
-v - Verbose - emit results on stdout, as well as via mail.
-e - Include process environment in notification.
-d - Debug - execute CMD, but present results and sendmail cmd, don't send"
}
setupLogFiles () {
export logFile="$(mktemp /tmp/envelope-log-XXXXXX)"
touch $logFile
rmLogFile () { rm -f $logFile; }
trap rmLogFile 0
export preLogFile="$(mktemp /tmp/envelope-pre-XXXXXX)"
touch $preLogFile
rmPreLogFile () { rm -f $preLogFile; }
trap rmPreLogFile 0
}
PrepSubjLine () {
preppedSubjLine=`echo "$subjLine" \
| sed -e sCOMMAND"$cmd"g -e sRESULT"$result"g`
}
preppedFromLine=""
PrepFromLine () {
host=`$uname -n`
courtesy="$LOGNAME@$host"
preppedFromLine="$LOGNAME (envelope)"
}
SendEMail () {
# Put address and subject in head of message:
PrepFromLine
PrepSubjLine
echo "Subject: $preppedSubjLine" >> $preLogFile
if [ -n "$replyTo" ]; then
echo "Reply-To: $replyTo" >> $preLogFile
fi
echo "" >> $preLogFile
echo "$scriptNm run...
command: $cmd
for: $LOGNAME@$host
result status: $status
invoked as:
$wrap" >> $preLogFile
echo "" >> $preLogFile
if [ -z "$showEnv" ]; then
echo "execution PATH:" >> $preLogFile
wasIFS="$IFS"; IFS=":$IFS"
for dir in $PATH; do
echo $dir
done | \
xargs -s75 echo | awk '{printf("\t")
for (i=1; i<=NF; i++) printf(":%s", $i)
print ""}' >> $preLogFile
IFS="$wasIFS"
echo "" >> $preLogFile
else
echo " Process environment:" >> $preLogFile
env >> $preLogFile
fi
echo "Output (stdout and stderr):" >> $preLogFile
echo "--------------------------" >> $preLogFile
if [ "$DEBUG" != "t" ]; then
cat $preLogFile $logFile | $sendmail -F "$LOGNAME (enveloped)" $recipient
else
cat $preLogFile $logFile
echo $sendmail -F "$LOGNAME (envelope)" $recipient
fi
rm -f $preLogFile
}
InferQuotes () {
# Echo argument list with quotations surrounding elements containing spaces.
got=""
for item in "$@"; do
case "$item" in
*\ * ) got+=" \"$item\"";;
* ) got+=" $item";;
esac
done
echo "${got:1}"
}
# Parse the command line, inferring quotes:
command_with_quotes="$(InferQuotes $0 "$@")"
while [ "$*" != "" ]; do
case "$1" in
-a ) alwaysSend=t; shift;;
-d ) DEBUG=t; shift;;
-e ) showEnv=t; shift;;
-f ) shift; replyTo="$1"; shift;;
-g ) shift; grepFor="$1"; shift;;
-n ) neverSend=t; shift;;
-o ) onOutputSend=t; shift;;
-r ) shift; recipient="$1"; shift;;
-s ) shift; subjLine="$1"; shift;;
-v ) verbose=t; shift;;
-h* | -? | --help ) UsageMsg; exit 0;;
-* ) echo "${scriptNm}: unknown flag $1" 1>&2
UsageMsg
date
exit 1;;
* ) if [ -z "$recipient" ]; then
shift; recipient="$1" # Garner the recipient
else
cmd="$(InferQuotes "$@")"
break # Leave the rest in $@
fi;;
esac
done
if [ -z "$*" ]; then
echo "${scriptNm}: non-empty command required" 1>&2
UsageMsg
exit 1
fi
setupLogFiles
status=0
sh -c "$*" > $logFile 2>&1; status=$?
if [ $status != 0 ]; then
result="err $status"
else
result=output
fi
if [ -n "$neverSend" ]; then
doSend=""
elif [ -n "$alwaysSend" -o $status != 0 ]; then
doSend=t
if [ ! -s $logFile ]; then
if [ $status = 0 ]; then result="(null) $result"
else result="$result (null output)"
fi
fi
elif [ -n "$onOutputSend" -a -s $logFile ]; then
doSend=t
elif [ -n "$grepFor" -a -s $logFile ]; then
if grep -s "$grepFor" $logFile > /dev/null; then
doSend=t
fi
fi
if [ -n "$doSend" ]; then
SendEMail
fi
if [ -n "$verbose" ]; then
cat $logFile
rm -f $logFile
exit $status
else
rm -f $logFile
exit 0
fi
| true
|
66e38e95dd6a7461d6b16c4c8446f5ab8e26c7c4
|
Shell
|
harvard-library/librarycloud_deploy
|
/vagrant-api/puppet-bootstrap-centos.sh
|
UTF-8
| 661
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# This bootstraps Puppet on CentOS 7.x
# It has been tested on CentOS 7.0 64bit
set -e
if [ "$EUID" -ne "0" ]; then
echo "This script must be run as root." >&2
exit 1
fi
if which puppet > /dev/null 2>&1; then
echo "Puppet is already installed."
exit 0
fi
# Install puppet labs repo
echo "Configuring PuppetLabs repo..."
sudo rpm -ivh http://yum.puppetlabs.com/puppetlabs-release-el-7.noarch.rpm
# Install Puppet...
echo "Installing puppet"
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo http://mirror.centos.org/centos/7/os/x86_64/
sudo yum install --nogpgcheck -y puppet > /dev/null
echo "Puppet installed!"
| true
|
0f3035e8b006f23fc6db8869aaf0625bac5fc788
|
Shell
|
nachtmaar/androlyze
|
/docker/mongodb/mongodb.sh
|
UTF-8
| 673
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# `/sbin/setuser memcache` runs the given command as the user `memcache`.
# If you omit that part, the command will be run as root.
#/bin/chown -R mongodb:root /var/log/mongodb/ /var/lib/mongodb
source $ANDROLYZE_UTIL
echo "configuring mongodb ..."
/etc/androlyze_init/mongodb_init.sh
echo "configuring mongodb [done]
"
echo "configuring ssl ..."
/etc/androlyze_init/mongodb_ssl_init.sh
echo "configuring ssl [done]"
echo "starting mongodb ..."
exec /sbin/setuser mongodb\
/usr/bin/mongod --smallfiles --dbpath /data/db/ \
--sslWeakCertificateValidation --sslPEMKeyFile /etc/ssl/private/mongodb.pem --sslOnNormalPorts --sslCAFile $ANDROLYZE_SSL_CA_CERT
| true
|
b6cd3a67c967f69aa106b34a3e56fb4b556660f4
|
Shell
|
itsmina1/OS10
|
/os.10/party.sh
|
UTF-8
| 287
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
u_have_the_letter=true
echo "say ur age plz"
read x
if [$x -gt 18]||[$x -eq 18]
then
echo "you may go to the party"
elif $u_have_the_letter
then
echo "you may go to the party"
else
echo "you may not go to the party"
fi
| true
|
3f353cd7f308f33ba4b43eaaa923cb9942d44eca
|
Shell
|
kaiwan/L5_debug_trg
|
/stresstest/stressor.sh
|
UTF-8
| 500
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# stressor.sh
# Stress system
# with stress(1) : CPU, I/O, VM, disk
# Wrapper over stress(1) and stress-ng(1)
[ $# -ne 1 ] && {
echo "Usage: $0 duration-in-seconds (for both IO and n/w)"
exit 1
}
#sudo stress -v --cpu 4 --io 4 --vm 4 --hdd 4 &
# IO, disk
echo "1. sudo stress -v --io 8 --hdd 8 --timeout $1"
sudo stress-ng -v --io 8 --hdd 8 --timeout $1
# net
echo "2. sudo stress-ng -v --all 0 --class network ? --timeout 3"
sudo stress-ng -v --all 0 --class network ? --timeout 3
| true
|
a4423a28182e1b7c790b8ac2c19a244cbd14a8ad
|
Shell
|
rhiswell/15418
|
/assignment1/prog2_vecintrin/collect.sh
|
UTF-8
| 1,358
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
VECTOR_WIDTH=( 2 4 8 16 )
default_vector_width=4
function inject_vector_width
{
local vector_width=$1
local target_file="./CMU418intrin.h"
sed -i "s/#define VECTOR_WIDTH [0-9]\+/#define VECTOR_WIDTH $vector_width/g" \
$target_file
}
function collect_stat
{
local vector_width=$1
local app_bin="./myexp"
local app_opt="-s 10000"
echo "$vector_width" >stat_$vector_width.log
$app_bin $app_opt | grep -A5 -m 1 "*" | awk -F':[ ]*' 'NR>1 {print $2}' \
>stat_$vector_width.log
}
function merge_stat
{
printf "Vector Width\nTotal Vector Instructions\nVector Utilization" \
>title.csv
printf "\nUtilized vector lanes\nTotal Vector Lanes" \
>>title.csv
paste -d',' title.csv `find . -name "*.log" | sort` >merged_stat.csv && \
sed -i "s/%//g" merged_stat.csv
rm *.log title.csv
}
case $1 in
"make" )
make >/dev/null
;;
"stat" )
inject_vector_width $default_vector_width
make >/dev/null
collect_stat $default_vector_width
;;
"stat_all" )
for vector_width in ${VECTOR_WIDTH[@]}; do
inject_vector_width $vector_width
make >/dev/null
collect_stat $vector_width
done
merge_stat
;;
* )
echo unknown subcommand
;;
esac
| true
|
496a35023b3c1b4de7f5f2a82871ffea259076b5
|
Shell
|
corsec00/Azure500Stuff
|
/az500/Getting-Started-Vault/m4/m4-tokenwrapping.sh
|
UTF-8
| 1,827
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#Export the Vault server running in AD environment
export VAULT_ADDR=http://127.0.0.1:8200
export VAULT_TOKEN=AddYourVaultTokenHere
#For Windows
$env:VAULT_ADDR = "http://127.0.0.1:8200"
$env:VAULT_TOKEN = "AddYourVaultTokenHere"
$headers = @{
"X-Vault-Token" = $env:VAULT_TOKEN
}
#Log into Vault server as root
vault login
#Store a new application secret
vault kv put secret/app-server api-key=123456
#Create a wrapping token for 5 minutes
vault kv get -wrap-ttl=300 secret/app-server
#Retrieve the secret
curl --header "X-Vault-Token: WRAPPING_TOKEN" --request POST \
$VAULT_ADDR/v1/sys/wrapping/unwrap | jq
$wrapper_header = @{
"X-Vault-Token" = "WRAPPING_TOKEN"
}
Invoke-WebRequest -Method Post -Uri $env:VAULT_ADDR/v1/sys/wrapping/unwrap `
-UseBasicParsing -Headers $wrapper_header
#Add the webkv store if you haven't already
vault secrets enable -path=webkv kv
#Add a secret to webkv
vault kv put webkv/app-server api-key=123456
#Add a web policy if you haven't already
vault policy write web webpol.hcl
#Create a token for an account using the web policy and wrap it
vault token create -policy=web -wrap-ttl=300
#Retrieve the app token
curl --header "X-Vault-Token: WRAPPING_TOKEN" --request POST \
$VAULT_ADDR/v1/sys/wrapping/unwrap | jq
$wrapper_header = @{
"X-Vault-Token" = "WRAPPING_TOKEN"
}
Invoke-WebRequest -Method Post -Uri $env:VAULT_ADDR/v1/sys/wrapping/unwrap `
-UseBasicParsing -Headers $wrapper_header
#Retrieve a secret from webkv using new token
#For Linux
curl --header "X-Vault-Token: APP_TOKEN" $VAULT_ADDR/v1/webkv/app-server | jq
#For Windows
$app_header = @{
"X-Vault-Token" = "APP_TOKEN"
}
Invoke-WebRequest -Method Get -Uri $env:VAULT_ADDR/v1/webkv/app-server `
-UseBasicParsing -Headers $app_header
| true
|
f3d3ee94ceae04a4afde9d1c2d1840dd5f55b92a
|
Shell
|
FlintInspiresRealEngineers/Attendance
|
/nightly_refresh.sh
|
UTF-8
| 1,777
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#Config
REPO_PATH='/home/attendance/Attendance'
DATA_PATH='/home/attendance/Attendance_data'
MAKE_BACKUP=true
BACKUP_PATH='/backup-usb'
SIZE_LIMIT='28672' #in megabytes
log () {
echo [`date`] "$1" >> "$DATA_PATH/logs/refreshlog.log"
}
#Generate history cache
log 'Creating history cache'
python3.8 "$REPO_PATH/cache_history.py"
#Cycle server log
log 'Cycling server log'
cd "$DATA_PATH/logs"
cp 'serverlog.log' ./serverlogs/"`date`".log
echo '' > 'serverlog.log'
if [ "$MAKE_BACKUP" = true ]; then
#Create zip
cd ..
log 'Creating zip file'
zip -r ../backup.zip ./* > /dev/null
cd ..
#Get size of data and calculate target size
du_output=`du -sb ./backup.zip`
du_array=($du_output)
dataSize=${du_array[0]}
targetSize=$(expr $SIZE_LIMIT \* 1048576 - $dataSize)
#Find current size
du_output=`du -sb "$BACKUP_PATH"`
du_array=($du_output)
size=${du_array[0]}
while [ "$size" -gt "$targetSize" ]; do
#Delete oldest backup
filename=`ls -1r "$BACKUP_PATH" | tail -n 1`
#Check if folder empty
if [ "$filename" == '' ]; then
log 'Cannot create backup - capacity too small'
exit 1
fi
log "Deleting backup '$filename'"
rm -r "$BACKUP_PATH"/"$filename"
#Get new size
du_output=`du -sb "$BACKUP_PATH"`
du_array=($du_output)
size=${du_array[0]}
done
log 'Copying data'
currentTime=`date '+%s - %m-%d-%Y'`
cp -r --no-preserve=mode ./backup.zip "$BACKUP_PATH"/"$currentTime".zip
log 'Running disk sync'
sync
log 'Deleting zip file'
rm ./backup.zip
fi
log 'Finished refresh'
#Reboot once a week
if [ `date +%u` == 7 ]; then
log 'Rebooting'
sudo reboot
fi
| true
|
19b4fa54f538a3ee4a1bde2ba6ab35e22b53959c
|
Shell
|
osresearch/opensgx
|
/opensgx
|
UTF-8
| 1,883
| 3.828125
| 4
|
[] |
no_license
|
#! /bin/bash
ROOT=$(dirname "$0")
SGXTOOL=$ROOT/user/sgx-tool
SGX=$ROOT/sgx
DEVICEKEY=$ROOT/user/conf/device.key
key_gen() {
FILENAME=sign.key
$SGXTOOL -k 3072 > $FILENAME
}
compile_code() {
BASEDIR=$(dirname $1)
SUBDIR=${BASEDIR#*/}
BASENAME=$(basename $1)
NAME="${BASENAME%.*}"
cd user
make $SUBDIR/$NAME.sgx
}
run_enclave() {
BASEDIR=$(dirname $1)
BASENAME=$(basename $1)
NAME="${BASENAME%.*}"
EXE=$BASEDIR/$NAME.sgx
$SGX $EXE $1
}
measure() {
size=$(stat -c%s $1)
offset=$(readelf -S $1 | grep .enc_text)
array=($offset)
offset=${array[4]}
code_start=$(nm $1 | grep ENCT_START)
array=($code_start)
code_start=${array[0]}
code_end=$(nm $1 | grep ENCT_END)
array=($code_end)
code_end=${array[0]}
data_start=$(nm $1 | grep ENCD_START)
array=($data_start)
data_start=${array[0]}
data_end=$(nm $1 | grep ENCD_END)
array=($data_end)
data_end=${array[0]}
entry=$(nm $1 | grep enclave_start)
array=($entry)
entry=${array[0]}
SZ="--size="
CO="--offset="
CS="--code_start="
CE="--code_end="
DS="--data_start="
DE="--data_end="
EN="--entry="
$SGXTOOL -m $1 $SZ$size $CO$offset $CS$code_start $CE$code_end $DS$data_start $DE$data_end $EN$entry
}
sign() {
BASEDIR=$(dirname $1)
BASENAME=$(basename $1)
NAME="${BASENAME%.*}"
MEASURE=$BASEDIR/$NAME-measurement.conf
SIG=$BASEDIR/$NAME-sig.conf
TOKEN=$BASEDIR/$NAME-token.conf
CONF=$BASEDIR/$NAME.conf
touch $CONF
measure $1 > $MEASURE
$SGXTOOL -S $MEASURE > $SIG
$SGXTOOL -s $SIG --key=$2 > $CONF
$SGXTOOL -E $CONF > $TOKEN
$SGXTOOL -M $TOKEN --key=$DEVICEKEY >> $CONF
rm $MEASURE $SIG $TOKEN
}
case "$1" in
-k|--key)
key_gen
;;
-c|--compile)
compile_code $2
;;
-m|--measure)
measure $2
;;
-s|--sign)
case "$3" in
-k|--key)
sign $2 $4
;;
esac
;;
*)
run_enclave $1
;;
esac
| true
|
72466af16d24e5349065b9cec608a3ab47aac6ff
|
Shell
|
wangaguo/ossf_openfoundry_projects
|
/services/rt/sync_group.sh
|
UTF-8
| 789
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
CHECKOUT_PATH="/usr/local/rt36"
SYNC_SECRET="5f12117baa04ce58"
SYNC_HOST="192.168.3.80:80"
DUMP_LINK="http://${SYNC_HOST}/openfoundry/foundry_sync?secret=${SYNC_SECRET}&module=rt"
SYNC_SCRIPT=group.pl
JSON_DUMP=b.json
RESULT_LOG=group_result.txt
CHECK_LOG=group_result2.txt
#utils...
PERL=`which perl`
DATE=/bin/date
FETCH=`which fetch`
GREP=/usr/bin/grep
cd ${CHECKOUT_PATH}
echo "#####################" >> ${RESULT_LOG}
$DATE >> ${RESULT_LOG}
echo "#####################" >> ${RESULT_LOG}
if $FETCH -o $JSON_DUMP ${DUMP_LINK} ; then
$PERL $SYNC_SCRIPT > ${RESULT_LOG}
$PERL $SYNC_SCRIPT > ${CHECK_LOG}
if $GREP 'should' $CHECK_LOG ; then
echo "bad"
else
echo "good"
fi
else
echo "fetch failed!!!!!!!!!!!!!!!!!!!!" >> ${RESULE_LOG}
fi
| true
|
446d07608f0a40a9913e5a478c5537baebd52e42
|
Shell
|
ActianCorp/spark-vector
|
/deployment/docker/spark-provider/build_image.sh
|
UTF-8
| 1,885
| 3.828125
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#REQUIREMENTS: curl, sbt and docker have to be on PATH
#ARG 1: Tag of the to-be-created docker image
set -e
SPARK_VERSION=3.1.1
HADOOP_VERSION=3.2
JAVA_VERSION=8
GCS_VERSION=2.1.5
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
PROJECT_ROOT="$(echo $SCRIPT_DIR | sed 's/\/deployment.*//')"
PATH_TO_PROVIDER_DIR=$PROJECT_ROOT/provider/target
SPARK_URL=https://archive.apache.org/dist/spark/spark-$SPARK_VERSION/
SPARK_FOLDER=spark-$SPARK_VERSION-bin-hadoop$HADOOP_VERSION
SPARK_PKG=tgz
JAVA_IMAGE=$JAVA_VERSION-jre-slim
# shaded version contains all dependencies
GCS_CONNECTOR_URL=https://repo1.maven.org/maven2/com/google/cloud/bigdataoss/gcs-connector/hadoop3-$GCS_VERSION
GCS_CONNECTOR_JAR=gcs-connector-hadoop3-$GCS_VERSION-shaded.jar
#Build the Spark Vector Provider jar file
cd $PROJECT_ROOT
sbt provider/assembly
cd $SCRIPT_DIR
#Download Spark distribution
curl $SPARK_URL$SPARK_FOLDER.$SPARK_PKG -o $SPARK_FOLDER.$SPARK_PKG
mkdir $SPARK_FOLDER
tar xfvz $SPARK_FOLDER.$SPARK_PKG --directory=$SPARK_FOLDER --strip 1
rm $SPARK_FOLDER.$SPARK_PKG
#Download GCS jar and add it to Spark's jar folder
curl $GCS_CONNECTOR_URL/$GCS_CONNECTOR_JAR -o ./$SPARK_FOLDER/jars/$GCS_CONNECTOR_JAR
chmod 644 ./$SPARK_FOLDER/jars/$GCS_CONNECTOR_JAR
#Add Spark Vector Provider to Spark's jar folder
if [[ -d "$PATH_TO_PROVIDER_DIR" ]]; then
find $PATH_TO_PROVIDER_DIR -regex ".*spark_vector_provider.*.jar" -exec cp {} ./$SPARK_FOLDER/jars/spark_vector_provider.jar \;
fi
#Add additional jar files to Spark's jar folder
if [[ -d "jars" ]]; then
find jars -name "*.jar" -exec cp {} ./$SPARK_FOLDER/jars/ \;
fi
#Build the final Docker image
cd $SPARK_FOLDER
sed -i '' -e "s/\(.*ARG java_image_tag=\).*/\1$JAVA_IMAGE/" kubernetes/dockerfiles/spark/Dockerfile
docker build -t $1 -f kubernetes/dockerfiles/spark/Dockerfile .
cd ..
rm -rf $SPARK_FOLDER
| true
|
c8d9755fb2980b685e938e66b3ebd0f9fee765ae
|
Shell
|
DQuomsieh/random-scripts
|
/cli_contact_manager/modifycontacts
|
UTF-8
| 5,306
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
option=0
until [ "$option" = "4" ]; do
echo "1.) Delete a contact"
echo "2.) Delete all contacts"
echo "3.) Modify contact info"
echo "4.) Return"
echo -n "Please enter your choice: "
read option
echo ""
case $option in
1)
clear
echo "Please enter ID number"
echo " "
read id
if [[ ${#id} -eq 4 && $id =~ ^[0-9]+$ ]]; then
if grep -Fq "Contact ID: $id" contacts; then
sed -i.bak -e "/$id/I,+3 d" contacts >> contacts
echo -e "Contact with id: $id has been deleted\n"
else
echo -e "ERROR: ID could not be found or entered incorrectly!\n"
fi
else
echo -e "ERROR: Enter correct 4 digit ID number\n!"
fi
;;
2)
cp contacts contacts.bak
> contacts
clear
echo -e "All contacts deleted!\n"
;;
3)
option2=0
until [ "$option2" = "3" ]; do
clear
echo "1.) Phone Number"
echo "2.) Last Name"
echo "3.) Return"
echo -n "Please enter your choice: "
read option2
echo ""
case $option2 in
1)
clear
echo -e "Please enter ID number of user to change\n"
read id2
if [[ ${#id2} -eq 4 && $id2 =~ ^[0-9]+$ ]]; then
if grep -Fq "Contact ID: $id2" contacts; then
oldnum=$(grep -n2 "$id2" contacts | grep Phone | cut -d: -f2 | xargs | cut -d' ' -f2)
echo -e "Enter new Phone number in this format xxx-xxxx\n"
read number
if [[ "${number:3:1}" == "-" && ${number//-} =~ ^[0-9]+$ ]]; then
sed -i.bak "s/$oldnum/$number/g" contacts
echo -e "Phone number has been updated to $number\n!"
else
echo -e "ERROR: Incorrect format for phone\n"
fi
else
echo -e "ERROR: ID number does not exist!\n"
fi
else
echo -e "ERROR: Enter correct 4 digit ID number!\n"
fi
;;
2)
clear
echo "Please enter ID number of user to change"
read id2
if [[ ${#id2} -eq 4 && $id2 =~ ^[0-9]+$ ]]; then
if grep -Fq "Contact ID: $id2" contacts; then
echo Please enter Last name
fullname=$(cat contacts | grep -n1 $id2 | tail -1 | cut -d: -f2 | xargs)
firstname=$(cat contacts | grep -n1 $id2 | tail -1 | cut -d: -f2 | xargs | cut -d' ' -f1)
read lname
if [[ $lname =~ ^[A-Za-z]+$ ]]; then
newfullname="$firstname $lname"
cat contacts | sed -e "s/$fullname/$newfullname/" > tempcontacts
mv tempcontacts contacts
echo -e "Last name has been updated!\n"
else
echo -e "ERROR: Last name must contain only character!\n"
fi
else
echo -e "ERROR: ID number does not exist!\n"
fi
else
echo -e "ERROR: Enter correct 4 digit ID number!\n"
fi
;;
3)
break
;;
esac
done
;;
4)
break
;;
*) tput setf 3;echo "Please enter 1, 2 or 3";tput setf 3;
esac
done
| true
|
c17a3d849106ebe02d90fb0410c5292dbfdd0d09
|
Shell
|
bmink/redissms
|
/check_sender.sh
|
UTF-8
| 231
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Install this script in crontab to run every minute.
#
if [[ ! $(pidof redissms_sender) ]]; then
logger "Starting redissms_sender"
REDIS_ADDR="xx.xx.xx.xx" \
nohup /path/to/redissms_sender 2>&1 >/dev/null &
fi
| true
|
6110fd06052a159974710f251ec2f20f137270a5
|
Shell
|
kzsh/dotfiles
|
/common/scripts/bash/git/git-op.test.sh
|
UTF-8
| 96
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
. git-op.sh
operation() {
echo "is git"
ls .git
}
forEachGitRepo "operation"
| true
|
3e20a1f9a0314f7cb59e333d233def24c9321484
|
Shell
|
frizinak/provision.sh
|
/src/provision/go/init.sh
|
UTF-8
| 518
| 2.890625
| 3
|
[] |
no_license
|
#! /bin/bash
help_text <<EOF
Basic golang install.
EOF
include web
if [ ! -d /usr/local/go ]; then
curl 'https://storage.googleapis.com/golang/go1.12.linux-amd64.tar.gz' | \
tar -C /usr/local -xzf -
fi
ensure_dir /home/${webuser}/web/go
ln -sf /usr/local/go/bin/go /usr/local/bin/go
set_line "/home/${webuser}/.bashrc" 'export GOPATH=~/web/go; export GOBIN="$GOPATH/bin"; export GOROOT="/usr/local/go";'
set_line "/home/${webuser}/.bashrc" 'export PATH="$PATH:$GOBIN";'
fix_user_perms "${webuser}"
| true
|
21661d484385b5096ff387714e77477aa6055ee4
|
Shell
|
SODALITE-EU/xopera-rest-api
|
/xOpera-rest-blueprint/prerequisites.sh
|
UTF-8
| 1,946
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
PIP_INSTALLED=$(which pip3)
if [ -z "$PIP_INSTALLED" ]; then
echo
echo
read -p "pip3 is not installed. Do you wish to update and install pip? " ynp
if [ "$ynp" != "${ynp#[Yy]}" ] ;then
echo
echo "Installing pip3"
else
echo
echo "Abort."
return
fi
sudo apt update
sudo apt install -y python3 python3-pip
fi
OPERA_INSTALLED=$(pip3 show opera)
if [ -z "$OPERA_INSTALLED" ]; then
echo
echo
read -p "xOpera is not installed. Do you wish to update and install xOpera and required packages? " yn
if [ "$yn" != "${yn#[Yy]}" ] ;then
echo
echo "Installing xOpera"
else
echo
echo "Abort."
return
fi
sudo apt update
sudo apt install -y python3-venv python3-wheel python-wheel-common
sudo apt install -y ansible
python3 -m venv --system-site-packages .venv && . .venv/bin/activate
pip3 install opera
fi
echo
echo "Installing required Ansible roles"
ansible-galaxy install geerlingguy.docker --force
ansible-galaxy install geerlingguy.pip --force
ansible-galaxy install geerlingguy.repo-epel --force
echo
echo "Cloning modules"
rm -r -f modules/
git clone -b 3.4.1 https://github.com/SODALITE-EU/iac-modules.git modules/
echo "Please enter email for SODALITE certificate: "
read EMAIL_INPUT
export SODALITE_EMAIL=$EMAIL_INPUT
echo "Checking TLS key and certificate..."
FILE_KEY=modules/docker/artifacts/ca.key
if [ -f "$FILE_KEY" ]; then
echo "TLS key file already exists."
else
echo "TLS key does not exist. Generating..."
openssl genrsa -out $FILE_KEY 4096
fi
FILE_CRT=modules/docker/artifacts/ca.crt
if [ -f "$FILE_CRT" ]; then
echo "TLS certificate file already exists."
else
echo "TLS certificate does not exist. Generating..."
openssl req -new -x509 -key $FILE_KEY -out $FILE_CRT -subj "/C=SI/O=XLAB/CN=$SODALITE_EMAIL" 2>/dev/null
fi
unset SODALITE_EMAIL
| true
|
8314dafe05c601d9bf6b938af3c5b5581b6aa2f4
|
Shell
|
personal-distro-configurator/personal-distro-configurator
|
/pdc/sh/steps/_execute.sh
|
UTF-8
| 361
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# shellcheck disable=SC2154
for execution in "${pdcyml_execute[@]}"; do
for i in ${!pdcyml_executors__command[*]}; do
if [ "${pdcyml_executors__command[$i]}" = "$(cut -d ' ' -f1 <<< "$execution")" ]; then
"${pdcyml_executors__function[$i]}" "${execution#${pdcyml_executors__command[$i]} }"
fi
done
done
| true
|
a511915782a543e04ec23471fc3b7fb911ce1275
|
Shell
|
DARMA-tasking/vt
|
/ci/clean_cpp.sh
|
UTF-8
| 174
| 2.90625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -ex
source_dir=${1}
build_dir=${2}
export VT=${source_dir}
export VT_BUILD=${build_dir}/vt
pushd "$VT_BUILD"
cmake --build . --target clean
popd
| true
|
81ae79f9e930e3a55789effca96b14a2a2a6ae17
|
Shell
|
tortious/dotfiles-9
|
/install_scripts/install_hub.sh
|
UTF-8
| 735
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
if [ -f /usr/local/bin/hub ]; then
exit 0
elif [ "$(uname)" = "Darwin" ]; then
exit 0
fi
sudo mkdir -p /usr/local/share/man/man1
TMPDIR=$(mktemp -d)
VERSION='2.5.1'
cd $TMPDIR
URL="https://github.com/github/hub/releases/download/v$VERSION/hub-linux-amd64-$VERSION.tgz"
wget -q $URL -O hub-linux-amd64-$VERSION.tgz
tar zxf hub-linux-amd64-$VERSION.tgz
sudo cp hub-linux-amd64-$VERSION/bin/hub /usr/local/bin/hub
sudo cp hub-linux-amd64-$VERSION/etc/hub.bash_completion.sh /etc/bash_completion.d/hub.bash_completion.sh
sudo cp hub-linux-amd64-$VERSION/etc/hub.zsh_completion /usr/local/share/zsh/site-functions/_hub
sudo cp hub-linux-amd64-$VERSION/share/man/man1/hub.1 /usr/local/share/man/man1/hub.1
rm -rf $TMPDIR
| true
|
cce67df59d34312d213409f645342b2d31fd53f2
|
Shell
|
Prometeo/scripts_post_installing_fedora
|
/script.sh
|
UTF-8
| 1,035
| 2.96875
| 3
|
[] |
no_license
|
#! /bin/bash
LIST_OF_APPS="ansible"
# update system
echo "Updating ..."
sudo dnf -y update
# install development tools
# sudo dnf -y groupinstall "Development Tools"
# install development libraries
# sudo dnf -y groupinstall "Development Libraries"
# add rpmfusion
echo "Adding rpmfusion repository ..."
sudo dnf install https://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-$(rpm -E %fedora).noarch.rpm https://download1.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-$(rpm -E %fedora).noarch.rpm
#install ansible
echo "Installing Ansible ..."
sudo dnf install -y $LIST_OF_APPS
echo "Running ansible playbook ..."
ansible-playbook -K post_install.yml
# oh-my-zsh
echo "Installing oh-my-zsh ..."
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
echo "Installing antibody .."
curl -sL git.io/antibody | sh -s
# installing powerline zsh
echo "Installing powerline with pip ..."
pip install powerline-status --user
echo "Setting zsh as default shell ..."
chsh -s /bin/zsh
| true
|
e53b13b75ad7a52fdbbcf69039769dd293e4d5ab
|
Shell
|
soda-research/soda-poc
|
/kolla/etc/kolla/config/trove/cloudinit/mariadb.cloudinit
|
UTF-8
| 1,889
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# add osc public key
mkdir -p /home/trove/.ssh/
cat <<EOT >> /home/trove/.ssh/authorized_keys
<SSH_PUBLIC_KEY>
EOT
# add osc private key
mkdir -p /root/.ssh/
cat <<EOT >> /root/.ssh/id_rsa
<SSH_PRIVATE_KEY>
EOT
chmod 600 /root/.ssh/id_rsa
# link guestagent config
ln -s /etc/trove/conf.d/trove-guestagent.conf /etc/trove/trove-guestagent.conf
# synchronize trove code from osc
rsync \
-e "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" -a \
--exclude .git \
--exclude-from /opt/trove/.gitignore \
root@<OSC_INTERNAL_IP>:/opt/stack/trove/ /opt/trove-rsync
chown -R trove /opt/trove-rsync
systemctl stop trove-guest
if [[ -d /opt/trove-rsync ]]; then
mv /opt/trove /opt/trove-orig
mv /opt/trove-rsync /opt/trove
fi
systemctl start trove-guest
# wait until guestagent completes preparing database service
while [ ! -f /home/trove/.guestagent.prepare.end ]; do sleep 2; done
# create user for monitoring agent
mysql -e "CREATE USER 'monitoring'@'localhost' IDENTIFIED BY 'monitoring';"
# grant access for monitoring agent
mysql -e "GRANT SELECT, REPLICATION CLIENT, SHOW DATABASES, SUPER, PROCESS ON *.* TO 'monitoring'@'localhost';"
# fetch trove instance id
guest_id=$(awk -F "=" '/guest_id/ {print $2}' /etc/trove/conf.d/guest_info.conf)
# init monasca agent
monasca-setup \
--username monitoring \
--password monitoring \
--project_name monasca_control_plane \
--project_domain_name default \
--user_domain_name default \
--keystone_url http://<OSC_INTERNAL_VIP>:5000/v3 \
--monasca_url http://<OSC_INTERNAL_VIP>:8070/v2.0 \
--system_only \
--service trove \
--dimensions resource_id:${guest_id},resource_type:trove.instance \
--verbose
# setup mysql monitoring plugin
monasca-setup \
--detection_plugins mysql \
--detection_args 'host=127.0.0.1 user=monitoring password=monitoring'
--verbose
| true
|
c294db3082acc0a1af52f03a677687e941534cbd
|
Shell
|
roadhump/dotfiles
|
/zsh/aliases.zsh
|
UTF-8
| 2,733
| 3.015625
| 3
|
[] |
no_license
|
alias reload!=src
alias r!='src'
alias c='clear'
alias pubkey="more ~/.ssh/id_rsa.pub | pbcopy | echo '=> Public key copied to pasteboard.'"
alias sourcetree='open -a SourceTree'
alias chrome="open -a google\ chrome"
alias finder='open -a Finder'
alias gitme="open 'http://github.com/roadhump'"
alias vscode="'/Applications/Visual Studio Code.app/Contents/Resources/app/bin/code'"
function findfile() {
find . -iname "*$1*" -ls
}
function subln() {
if [ $# -eq 0 ]; then
subl -n .
else
subl -n "$@"
fi
}
function sourcetreen() {
if [ $# -eq 0 ]; then
sourcetree .
else
sourcetree "$@"
fi
}
alias st=sourcetreen
function mcd() {
mkdir -p "$1" && cd "$1";
}
git_super_status() {
precmd_update_git_vars
if [ -n "$__CURRENT_GIT_STATUS" ]; then
STATUS="$ZSH_THEME_GIT_PROMPT_PREFIX$ZSH_THEME_GIT_PROMPT_BRANCH$GIT_BRANCH%{${reset_color}%}"
if [ "$GIT_BEHIND" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_BEHIND$GIT_BEHIND %{${reset_color}%}"
fi
if [ "$GIT_AHEAD" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_AHEAD$GIT_AHEAD %{${reset_color}%}"
fi
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_SEPARATOR"
if [ "$GIT_STAGED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_STAGED$GIT_STAGED%{${reset_color}%}"
fi
if [ "$GIT_CONFLICTS" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CONFLICTS$GIT_CONFLICTS%{${reset_color}%}"
fi
if [ "$GIT_CHANGED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CHANGED$GIT_CHANGED%{${reset_color}%}"
fi
if [ "$GIT_UNTRACKED" -ne "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_UNTRACKED$GIT_UNTRACKED%{${reset_color}%}"
fi
if [ "$GIT_CHANGED" -eq "0" ] && [ "$GIT_CONFLICTS" -eq "0" ] && [ "$GIT_STAGED" -eq "0" ] && [ "$GIT_UNTRACKED" -eq "0" ]; then
STATUS="$STATUS$ZSH_THEME_GIT_PROMPT_CLEAN"
fi
STATUS="$STATUS%{${reset_color}%}$ZSH_THEME_GIT_PROMPT_SUFFIX"
echo "$STATUS"
fi
}
function wh() {
local input="$1";
local inputType="$(type -w $input)"
if [[ $inputType == *"function" ]]; then
type -s $input && type -f $input
else
type -s $input
fi
}
alias dotenv=source_env
alias de=source_env
alias lh='ls -lAth'
alias lah='ls -lAth'
alias docker-gc='docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v /etc:/etc spotify/docker-gc'
alias venv='source .venv/bin/activate'
function aws-s3-total() {
aws s3 ls --summarize --human-readable --recursive $1 | grep 'Total'
}
alias ssh-init='ssh-add ~/.ssh/id_rsa'
| true
|
aa50b89c0b841fad0effd423eb2fadf5f6122b15
|
Shell
|
olethrosdc/beliefbox
|
/src/algorithms/tests/scripts/pareto_ucb.sh
|
UTF-8
| 1,428
| 2.6875
| 3
|
[] |
no_license
|
#! /bin/bash
#PBS -S /bin/bash
# use one node with 8 cores:
#PBS -lnodes=1:ppn=8
# job requires at most 16 hours, 0 minutes
# and 0 seconds wallclock time
#PBS -lwalltime=48:00:00
# cd to the directory where the program is to be called:
exc=$HOME/projects/beliefbox/src/algorithms/tests/bin/pareto_ucb
actions=16
outcomes=4
horizon=1000000
runs=10
for outcomes in 4 16 256
do
for actions in 4 16 256
do
resdir=$HOME/results/pareto_ucb/dispersed/${actions}A_${outcomes}S_${horizon}T_WUCB/
mkdir -p $resdir
results=$resdir/regret.out
errors=$resdir/errors.out
echo $actions $outcomes $horizon $runs $epsilon
time ${exc} $actions $outcomes $horizon $runs 2 0.0 > $results 2> $errors
resdir=$HOME/results/pareto_ucb/dispersed/${actions}A_${outcomes}S_${horizon}T_HUCB/
mkdir -p $resdir
results=$resdir/regret.out
errors=$resdir/errors.out
echo $actions $outcomes $horizon $runs $epsilon
time ${exc} $actions $outcomes $horizon $runs 3 0.0 > $results 2> $errors
for epsilon in 0.0 0.1 1.0
do
resdir=$HOME/results/pareto_ucb/dispersed/${actions}A_${outcomes}S_${horizon}T_${epsilon}E/
mkdir -p $resdir
results=$resdir/regret.out
errors=$resdir/errors.out
echo $actions $outcomes $horizon $runs $epsilon
time ${exc} $actions $outcomes $horizon $runs 1 $epsilon > $results 2> $errors
done
done
done
| true
|
5837c6250afd1ae85b61f9f3b070f11146ace2d3
|
Shell
|
kangdazhi/pci-passthrough
|
/code/misc/sum_vm_exit_percentage_by_reason.sh
|
UTF-8
| 350
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# For a given VM-exit reason, sum up its percentages.
# Get the command line arguments.
if [ $# -ne 2 ]; then
echo "Usage: $0 <VM EXIT REASON> <KVM PROFILE>"
exit 1
fi
reason=$1
kvm_profile=$2
# Sum the percentage of VM exits
grep -ie "${reason}" ${kvm_profile} | awk '{print $1}' | tr '%' ' ' | awk '{sum += $1} END {print sum}'
| true
|
c13622274399239aad997abf5f16a788836fa52a
|
Shell
|
richardc/marionette-collective
|
/ext/aio/suse/mcollective.init
|
UTF-8
| 3,978
| 3.703125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# mcollective Application Server for STOMP based agents
#
# chkconfig: 345 24 76
#
# description: mcollective lets you build powerful Stomp compatible middleware clients in ruby without having to worry too
# much about all the setup and management of a Stomp connection, it also provides stats, logging and so forth
# as a bonus.
#
### BEGIN INIT INFO
# Provides: mcollective
# Required-Start: $remote_fs
# Required-Stop: $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start daemon at boot time
# Description: Enable service provided by daemon.
### END INIT INFO
# Shell functions sourced from /etc/rc.status:
# rc_check check and set local and overall rc status
# rc_status check and set local and overall rc status
# rc_status -v ditto but be verbose in local rc status
# rc_status -v -r ditto and clear the local rc status
# rc_failed set local and overall rc status to failed
# rc_reset clear local rc status (overall remains)
# rc_exit exit appropriate to overall rc status
[ -f /etc/rc.status ] && . /etc/rc.status
[ -f /etc/sysconfig/mcollective ] && . /etc/sysconfig/mcollective
desc=${DESC:-mcollective daemon}
daemon=${DAEMON:-/opt/puppetlabs/puppet/bin/mcollectived}
name=${NAME:-mcollectived}
pidfile=${PIDFILE:-/var/run/puppetlabs/mcollectived.pid}
daemon_opts=${DAEMON_OPTS:---pid ${pidfile}}
# First reset status of this service
rc_reset
# Return values acc. to LSB for all commands but status:
# 0 - success
# 1 - misc error
# 2 - invalid or excess args
# 3 - unimplemented feature (e.g. reload)
# 4 - insufficient privilege
# 5 - program not installed
# 6 - program not configured
#
# Note that starting an already running service, stopping
# or restarting a not-running service as well as the restart
# with force-reload (in case signalling is not supported) are
# considered a success.
case "$1" in
start)
echo -n "Starting ${desc}: "
## Start daemon with startproc(8). If this fails
## the echo return value is set appropriate.
# startproc should return 0, even if service is
# already running to match LSB spec.
startproc -p $pidfile $daemon $daemon_opts
# Remember status and be verbose
rc_status -v
;;
stop)
echo -n "Stopping ${desc}: "
## Stop daemon with killproc(8) and if this fails
## set echo the echo return value.
killproc -p $pidfile $daemon && rm -f ${pidfile}
# Remember status and be verbose
rc_status -v
;;
try-restart|force-reload)
## Stop the service and if this succeeds (i.e. the
## service was running before), start it again.
$0 status &> /dev/null
if test $? = 0; then
$0 restart
else
rc_reset # Not running is not a failure.
fi
# Remember status and be quiet
rc_status
;;
restart)
## Stop the service and regardless of whether it was
## running or not, start it again.
$0 stop
sleep 1
$0 start
# Remember status and be quiet
rc_status
;;
status)
echo -n "Checking ${desc}: "
## Check status with checkproc(8), if process is running
## checkproc will return with exit status 0.
# Status has a slightly different for the status command:
# 0 - service running
# 1 - service dead, but /var/run/ pid file exists
# 2 - service dead, but /var/lock/ lock file exists
# 3 - service not running
# NOTE: checkproc returns LSB compliant status values.
checkproc -p ${pidfile} ${daemon}
rc_status -v
;;
*)
echo "Usage: $0 {start|stop|status|try-restart|restart|force-reload}"
exit 1
;;
esac
rc_exit
| true
|
685cf168729a70bb63d31d98153f8abb1b8f26c1
|
Shell
|
BrunoSilvaFreire/polybar-module-news
|
/install.sh
|
UTF-8
| 438
| 3.046875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
# this script installs the polybar module news
destdir=${HOME}/.config/polybar/scripts/news
polybar_conf=${HOME}/.config/polybar/config
install -d "${destdir}"
install -b -m 644 ./*.conf ./*.py rss.feeds "${destdir}"
install -m 554 news.sh "${destdir}"
if [ -f "${polybar_conf}" ]; then
cat polybar.conf >> "${polybar_conf}"
else
echo "Add the following lines to your polybar configuration:"
cat polybar.conf
fi
| true
|
fa05e96ac0058f37ad20034c290b058ed16436a4
|
Shell
|
Hardeep18/shell_script_01
|
/env.sh
|
UTF-8
| 339
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
clear
echo "This script will give us the enviornment Information"
echo "===================================================="
echo ""
echo "Hello username: $USER"
echo ""
echo "Your home Directory is $HOME"
echo ""
echo "Your History file will Ignore: $HISTCONTROL"
echo ""
echo "Your Terminal session type is $TERM"
echo ""
| true
|
6f321c364926c3ae8ee5bf070057790c87d476fa
|
Shell
|
anlzou/linux-shell_test
|
/centos-shell/ruanyif/shell_variable.sh
|
UTF-8
| 8,466
| 3.84375
| 4
|
[] |
no_license
|
#环境变量是 Bash 环境自带的变量,进入 Shell 时已经定义好了,可以直接使用。它们通常是系统定义好的,也可以由用户从父 Shell 传入子 Shell。
#env命令或printenv命令,可以显示所有环境变量。
#下面是一些常见的环境变量。
#BASHPID:Bash 进程的进程 ID。
#BASHOPTS:当前 Shell 的参数,可以用shopt命令修改。
#DISPLAY:图形环境的显示器名字,通常是:0,表示 X Server 的第一个显示器。
#EDITOR:默认的文本编辑器。
#HOME:用户的主目录。
#HOST:当前主机的名称。
#IFS:词与词之间的分隔符,默认为空格。
#LANG:字符集以及语言编码,比如zh_CN.UTF-8。
#PATH:由冒号分开的目录列表,当输入可执行程序名后,会搜索这个目录列表。
#PS1:Shell 提示符。
#PS2: 输入多行命令时,次要的 Shell 提示符。
#PWD:当前工作目录。
#RANDOM:返回一个0到32767之间的随机数。
#SHELL:Shell 的名字。
#SHELLOPTS:启动当前 Shell 的set命令的参数,参见《set 命令》一章。
#TERM:终端类型名,即终端仿真器所用的协议。
#UID:当前用户的 ID 编号。
#USER:当前用户的用户名。
#很多环境变量很少发生变化,而且是只读的,可以视为常量。由于它们的变量名全部都是大写,所以传统上,如果用户要自己定义一个常量,也会使用全部大写的变量名。
#注意,Bash 变量名区分大小写,HOME和home是两个不同的变量。
#查看单个环境变量的值,可以使用printenv命令或echo命令。
#printenv PATH
# 或者
#echo $PATH
#自定义变量是用户在当前 Shell 里面自己定义的变量,必须先定义后使用,而且仅在当前 Shell 可用。一旦退出当前 Shell,该变量就不存在了。
#set命令可以显示所有变量(包括环境变量和自定义变量),以及所有的 Bash 函数。
#set
#2.创建变量
#用户创建变量的时候,变量名必须遵守下面的规则。
#1.字母、数字和下划线字符组成。
#2.第一个字符必须是一个字母或一个下划线,不能是数字。
#3.不允许出现空格和标点符号。
#变量声明的语法如下。
#variable=value
##上面命令中,等号左边是变量名,右边是变量。注意,等号两边不能有空格
##如果变量的值包含空格,则必须将值放在引号中。
##Bash 没有数据类型的概念,所有的变量值都是字符串。
#下面是一些自定义变量的例子:
#a=z # 变量 a 赋值为字符串 z
#b="a string" # 变量值包含空格,就必须放在引号里面
#c="a string and $b" # 变量值可以引用其他变量的值
#d="\t\ta string\n" # 变量值可以使用转义字符
#e=$(ls -l foo.txt) # 变量值可以是命令的执行结果
#f=$((5 * 7)) # 变量值可以是数学运算的结果
##变量可以重复赋值,后面的赋值会覆盖前面的赋值。
#3.读取变量
#读取变量的时候,直接在变量名前加上$就可以了
#a=anlzou
#b=$a
#echo $b
##anlzou
#如果变量的值本身也是变量,可以使用${!varname}的语法,读取最终的值。
#user=USER
#echo ${!user}
##root
#读取变量的时候,变量名也可以使用花括号{}包围,比如$a也可以写成${a}。这种写法可以用于变量名与其他字符连用的情况。
#a=foo
#echo ${a}_file
##foo_file
#4.删除变量
#unset命令用来删除一个变量。
#unset NAME
#这个命令不是很有用。因为不存在的 Bash 变量一律等于空字符串,所以即使unset命令删除了变量,还是可以读取这个变量,值为空字符串。
#所以,删除一个变量,也可以将这个变量设成空字符串。
#foo=''
#or
#foo=
#5.输出变量,export 命令
#用户创建的变量仅可用于当前 Shell,子 Shell 默认读取不到父 Shell 定义的变量。为了把变量传递给子 Shell,需要使用export命令。这样输出的变量,对于子 Shell 来说就是环境变量。
#export命令用来向子 Shell 输出变量
#NAME=foo
#export NAME
#or
#export NAME=value
##上面命令执行后,当前 Shell 及随后新建的子 Shell,都可以读取变量$NAME。
##子 Shell 如果修改继承的变量,不会影响父 Shell。
# 输出变量 $foo
# export foo=bar
# 新建子 Shell
# bash
# 读取 $foo
# echo $foo
##bar
# 修改继承的变量
# foo=baz
# 退出子 Shell
## exit
# 读取 $foo
# echo $foo
##bar
#6.特殊变量
#Bash 提供一些特殊变量。这些变量的值由 Shell 提供,用户不能进行赋值。
#(1)$?为上一个命令的退出码,用来判断上一个命令是否执行成功。返回值是0,表示上一个命令执行成功;如果是非零,上一个命令执行失败。
#echo $?
#(2)$$为当前 Shell 的进程 ID。
#echo $$
#这个特殊变量可以用来命名临时文件。
#LOGFILE=/tmp/output_log.$$
#(3)$_为上一个命令的最后一个参数。
#grep dictionary /usr/share/dict/words
##dictionary
#echo $_
##/usr/share/dict/words
#(4)$!为最近一个后台执行的异步命令的进程 ID。
#firefox &
##[1] 11064
#echo $!
##11064
#(5)$0为当前 Shell 的名称(在命令行直接执行时)或者脚本名(在脚本中执行时)。
#echo $0
##bash
#(6)$-为当前 Shell 的启动参数。
#echo $-
##himBH
#(7)$@和$#表示脚本的参数数量
#7.变量的默认值
#Bash 提供四个特殊语法,跟变量的默认值有关,目的是保证变量不为空。
#(1)
#${varname:-word}
#上面语法的含义是,如果变量varname存在且不为空,则返回它的值,否则返回word。它的目的是返回一个默认值,比如${count:-0}表示变量count不存在时返回0。
#(2)
#${varname:=word}
#上面语法的含义是,如果变量varname存在且不为空,则返回它的值,否则将它设为word,并且返回word。它的目的是设置变量的默认值,比如${count:=0}表示变量count不存在时返回0,且将count设为0。
#(3)
#${varname:+word}
#上面语法的含义是,如果变量名存在且不为空,则返回word,否则返回空值。它的目的是测试变量是否存在,比如${count:+1}表示变量count存在时返回1(表示true),否则返回空值。
#(4)
#${varname:?message}
#上面语法的含义是,如果变量varname存在且不为空,则返回它的值,否则打印出varname: message,并中断脚本的执行。如果省略了message,则输出默认的信息“parameter null or not set.”。它的目的是防止变量未定义,比如${count:?"undefined!"}表示变量count未定义时就中断执行,抛出错误,返回给定的报错信息undefined!。
#上面四种语法如果用在脚本中,变量名的部分可以用到数字1到9,表示脚本的参数。
#filename=${1:?"filename missing."}
#上面代码出现在脚本中,1表示脚本的第一个参数。如果该参数不存在,就退出脚本并报错。
#8.declare 命令
#declare命令可以声明一些特殊类型的变量,为变量设置一些限制,比如声明只读类型的变量和整数类型的变量。
#declare OPTION VARIABLE=value
#declare命令的主要参数(OPTION)如下:
#-a:声明数组变量。
#-f:输出所有函数定义。
#-F:输出所有函数名。
#-i:声明整数变量。
#-l:声明变量为小写字母。
#-p:查看变量信息。
#-r:声明只读变量。
#-u:声明变量为大写字母。
#-x:该变量输出为环境变量。
##declare命令如果用在函数中,声明的变量只在函数内部有效,等同于local命令。
#不带任何参数时,declare命令输出当前环境的所有变量,包括函数在内,等同于不带有任何参数的set命令。
##详情:https://wangdoc.com/bash/variable.html#declare-%E5%91%BD%E4%BB%A4
#9.readonly 命令
#readonly命令等同于declare -r,用来声明只读变量,不能改变变量值,也不能unset变量。
#readonly命令有三个参数:
#-f:声明的变量为函数名。
#-p:打印出所有的只读变量。
#-a:声明的变量为数组。
#10.let 命令
#let命令声明变量时,可以直接执行算术表达式。
#let命令的参数表达式如果包含空格,就需要使用引号。
#let可以同时对多个变量赋值,赋值表达式之间使用空格分隔。
#let foo=1+2
#let "v1 = 1" "v2 = v1++"
| true
|
0ab7cb4647f2e4385b5a0be2c39602f5af0f150b
|
Shell
|
BarneyBuffet/docker-btc-rpc-explorer
|
/entrypoint.sh
|
UTF-8
| 12,784
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eo pipefail
# Set config file variable
EXP_CONFIG_FILE=${CONFIG_DIR}/btc-rpc-explorer.env
##############################################################################
## Map container runtime PUID & PGID
##############################################################################
map_user(){
## https://github.com/magenta-aps/docker_user_mapping/blob/master/user-mapping.sh
## https://github.com/linuxserver/docker-baseimage-alpine/blob/3eb7146a55b7bff547905e0d3f71a26036448ae6/root/etc/cont-init.d/10-adduser
## https://github.com/haugene/docker-transmission-openvpn/blob/master/transmission/userSetup.sh
## Set puid & pgid to run container, fallback to defaults
PUID=${PUID:-10000}
PGID=${PGID:-10001}
## If uid or gid is different to existing modify nonroot user to suit
if [ ! "$(id -u nonroot)" -eq "$PUID" ]; then usermod -o -u "$PUID" nonroot ; fi
if [ ! "$(id -g nonroot)" -eq "$PGID" ]; then groupmod -o -g "$PGID" nonroot ; fi
echo "Tor set to run as nonroot with uid:$(id -u nonroot) & gid:$(id -g nonroot)"
## Make sure volumes directories match nonroot
chown -R nonroot:nonroot \
${CONFIG_DIR} \
/etc/btc-rpc-explorer \
/app
echo "Enforced ownership of ${CONFIG_DIR} to nonroot:nonroot"
## Make sure volume permissions are correct
chmod -R go=rX,u=rwX \
${CONFIG_DIR} \
/etc/btc-rpc-explorer \
/app
echo "Enforced permissions for ${CONFIG_DIR} to go=rX & u=rwX"
## Export to the rest of the bash script
export PUID
export PGID
}
##############################################################################
## Display TOR torrc config in log
##############################################################################
echo_config(){
echo -e "\\n====================================- START ${EXP_CONFIG_FILE} -====================================\\n"
cat $EXP_CONFIG_FILE
echo -e "\\n=====================================- END ${EXP_CONFIG_FILE} -=====================================\\n"
}
##############################################################################
## TEMPLATE CONFIG
## Template config file based on environmental variations
##############################################################################
template_config(){
## Optional logging
if [[ -n "${DEBUGGER}" ]]; then
sed -i "/#DEBUG=.*/c\DEBUG=${DEBUGGER}" $EXP_CONFIG_FILE
echo "Updated debug setting..."
fi
## The explorer base url
if [[ -n "${BASEURL}" ]]; then
sed -i "/#BTCEXP_BASEURL=.*/c\BTCEXP_BASEURL=${BASEURL}" $EXP_CONFIG_FILE
echo "Updated baseurl..."
fi
## The active coin
if [[ -n "${COIN}" ]]; then
sed -i "/#BTCEXP_COIN=.*/c\BTCEXP_COIN=${COIN}" $EXP_CONFIG_FILE
echo "Updated coin..."
fi
## Explorer host IP for binding
if [[ -n "${HOST}" ]]; then
sed -i "/#BTCEXP_HOST=.*/c\BTCEXP_HOST=${HOST}" $EXP_CONFIG_FILE
echo "Updated explorer host IP for binding..."
fi
## Explorer host port for binding
if [[ -n "${PORT}" ]]; then
sed -i "/#BTCEXP_PORT=.*/c\BTCEXP_PORT=${PORT}" $EXP_CONFIG_FILE
echo "Updated explorer host port for binding..."
fi
## Bitcoin core RPC IP address
if [[ -n "${BITCOIND_HOST}" ]]; then
sed -i "/#BTCEXP_BITCOIND_HOST=.*/c\BTCEXP_BITCOIND_HOST=${BITCOIND_HOST}" $EXP_CONFIG_FILE
echo "Updated bitcoin core rpc host IP..."
fi
## Bitcoin core RPC port
if [[ -n "${BITCOIND_PORT}" ]]; then
sed -i "/#BTCEXP_BITCOIND_PORT=.*/c\BTCEXP_BITCOIND_PORT=${BITCOIND_PORT}" $EXP_CONFIG_FILE
echo "Updated bitcoin core rpc port..."
fi
## Bitcoin core RPC username
if [[ -n "${BITCOIND_USER}" ]]; then
sed -i "/#BTCEXP_BITCOIND_USER=.*/c\BTCEXP_BITCOIND_USER=${BITCOIND_USER}" $EXP_CONFIG_FILE
echo "Updated bitcoin core rpc username..."
fi
## Bitcoin core RPC password
if [[ -n "${BITCOIND_PASS}" ]]; then
sed -i "/#BTCEXP_BITCOIND_PASS=.*/c\BTCEXP_BITCOIND_PASS=${BITCOIND_PASS}" $EXP_CONFIG_FILE
echo "Updated bitcoin core rpc password..."
fi
## Bitcoin core RPC cookie location
if [[ -n "${BITCOIND_COOKIE}" ]]; then
sed -i "/#BTCEXP_BITCOIND_COOKIE=.*/c\BTCEXP_BITCOIND_COOKIE=${BITCOIND_COOKIE}" $EXP_CONFIG_FILE
echo "Updated bitcoin core rpc cookie location..."
fi
## Bitcoin core RPC timeout on call
if [[ -n "${BITCOIND_RPC_TIMEOUT}" ]]; then
sed -i "/#BTCEXP_BITCOIND_RPC_TIMEOUT=.*/c\BTCEXP_BITCOIND_RPC_TIMEOUT=${BITCOIND_RPC_TIMEOUT}" $EXP_CONFIG_FILE
echo "Updated bitcoin core rpc timeout..."
fi
## Bitcoin API to use when looking up tx lists and balances
if [[ -n "${ADDRESS_API}" ]]; then
sed -i "/#BTCEXP_ADDRESS_API=.*/c\BTCEXP_ADDRESS_API=${ADDRESS_API}" $EXP_CONFIG_FILE
echo "Updated address api for tx lists and balances..."
fi
## Optional Electrum Protocol Servers
if [[ -n "${ELECTRUM_SERVERS}" ]]; then
sed -i "/#BTCEXP_ELECTRUM_SERVERS=.*/c\BTCEXP_ELECTRUM_SERVERS=${ELECTRUM_SERVERS}" $EXP_CONFIG_FILE
echo "Updated optional Electrum Protocol Servers..."
fi
if [[ -n "${ELECTRUM_TXINDEX}" ]]; then
sed -i "/#BTCEXP_ELECTRUM_TXINDEX=.*/c\BTCEXP_ELECTRUM_TXINDEX=${ELECTRUM_TXINDEX}" $EXP_CONFIG_FILE
echo "Updated optional Electrum TX Index..."
fi
## Number of concurrent RPC requests
if [[ -n "${RPC_CONCURRENCY}" ]]; then
sed -i "/#BTCEXP_RPC_CONCURRENCY=.*/c\BTCEXP_RPC_CONCURRENCY=${RPC_CONCURRENCY}" $EXP_CONFIG_FILE
echo "Updated Bitcoin Core api concurrency..."
fi
## Disable app's in-memory RPC caching to reduce memory usage
if [[ -n "${NO_INMEMORY_RPC_CACHE}" ]]; then
sed -i "/#BTCEXP_NO_INMEMORY_RPC_CACHE=.*/c\BTCEXP_NO_INMEMORY_RPC_CACHE=${NO_INMEMORY_RPC_CACHE}" $EXP_CONFIG_FILE
echo "Updated in-memory caching option..."
fi
## Optional redis server for RPC caching
if [[ -n "${REDIS_HOST}" ]]; then
sed -i "/#BTCEXP_REDIS_URL=.*/c\BTCEXP_REDIS_URL=redis://${REDIS_HOST}:${REDIS_PORT}" $EXP_CONFIG_FILE
echo "Updated redis server location..."
fi
## Cookie hash
if [[ -n "${COOKIE_SECRET}" ]]; then
sed -i "/#BTCEXP_COOKIE_SECRET=.*/c\BTCEXP_COOKIE_SECRET=${COOKIE_SECRET}" $EXP_CONFIG_FILE
echo "Updated cookie secret..."
fi
## Whether public-demo aspects of the site are active
if [[ -n "${DEMO}" ]]; then
sed -i "/#BTCEXP_DEMO=.*/c\BTCEXP_DEMO=${DEMO}" $EXP_CONFIG_FILE
echo "Updated demo setting..."
fi
## Set to false to enable resource-intensive features
if [[ -n "${SLOW_DEVICE_MODE}" ]]; then
sed -i "/#BTCEXP_SLOW_DEVICE_MODE=.*/c\BTCEXP_SLOW_DEVICE_MODE=${SLOW_DEVICE_MODE}" $EXP_CONFIG_FILE
echo "Updated slow device mode..."
fi
## Privacy mode disables: Exchange-rate queries, IP-geolocation queries
if [[ -n "${PRIVACY_MODE}" ]]; then
sed -i "/#BTCEXP_PRIVACY_MODE=.*/c\BTCEXP_PRIVACY_MODE=${PRIVACY_MODE}" $EXP_CONFIG_FILE
echo "Updated privacy mode..."
fi
## Don't request currency exchange rates
if [[ -n "${NO_RATES}" ]]; then
sed -i "/#BTCEXP_NO_RATES=.*/c\BTCEXP_NO_RATES=${NO_RATES}" $EXP_CONFIG_FILE
echo "Updated no rates setting..."
fi
## Password protection for site via basic auth (enter any username, only the password is checked)
if [[ -n "${BASIC_AUTH_PASSWORD}" ]]; then
sed -i "/#BTCEXP_BASIC_AUTH_PASSWORD=.*/c\BTCEXP_BASIC_AUTH_PASSWORD=${BASIC_AUTH_PASSWORD}" $EXP_CONFIG_FILE
echo "Updated basic authentication password..."
fi
## Password protection for site via basic auth (enter any username, only the password is checked)
if [[ -n "${SSO_TOKEN_FILE}" ]]; then
sed -i "/#BTCEXP_SSO_TOKEN_FILE=.*/c\BTCEXP_SSO_TOKEN_FILE=${SSO_TOKEN_FILE}" $EXP_CONFIG_FILE
echo "Updated SSO token location..."
fi
## URL of an optional external SSO provider
if [[ -n "${SSO_LOGIN_REDIRECT_URL}" ]]; then
sed -i "/#BTCEXP_SSO_LOGIN_REDIRECT_URL=.*/c\BTCEXP_SSO_LOGIN_REDIRECT_URL=${SSO_LOGIN_REDIRECT_URL}" $EXP_CONFIG_FILE
echo "Updated SSO redirect url..."
fi
## Enable to allow access to all RPC methods
if [[ -n "${RPC_ALLOWALL}" ]]; then
sed -i "/#BTCEXP_RPC_ALLOWALL=.*/c\BTCEXP_RPC_ALLOWALL=${RPC_ALLOWALL}" $EXP_CONFIG_FILE
echo "Updated allow all RPC methods..."
fi
## Custom RPC method blacklist
if [[ -n "${RPC_BLACKLIST}" ]]; then
sed -i "/#BTCEXP_RPC_BLACKLIST=.*/c\BTCEXP_RPC_BLACKLIST=${RPC_BLACKLIST}" $EXP_CONFIG_FILE
echo "Updated RPC method blacklist..."
fi
## Google analytics API key
if [[ -n "${GANALYTICS_TRACKING}" ]]; then
sed -i "/#BTCEXP_GANALYTICS_TRACKING=.*/c\BTCEXP_GANALYTICS_TRACKING=${GANALYTICS_TRACKING}" $EXP_CONFIG_FILE
echo "Updated Google analytics tracking key..."
fi
## Sentry URL and API key
if [[ -n "${SENTRY_URL}" ]]; then
sed -i "/#BTCEXP_SENTRY_URL=.*/c\BTCEXP_SENTRY_URL=${SENTRY_URL}" $EXP_CONFIG_FILE
echo "Updated Sentry URL API and key..."
fi
## IP Stack API Key
if [[ -n "${IPSTACK_APIKEY}" ]]; then
sed -i "/#BTCEXP_IPSTACK_APIKEY=.*/c\BTCEXP_IPSTACK_APIKEY=${IPSTACK_APIKEY}" $EXP_CONFIG_FILE
echo "Updated IP Stack API key..."
fi
## Map Box API Key
if [[ -n "${MAPBOX_APIKEY}" ]]; then
sed -i "/#BTCEXP_MAPBOX_APIKEY=.*/c\BTCEXP_MAPBOX_APIKEY=${MAPBOX_APIKEY}" $EXP_CONFIG_FILE
echo "Updated Map Box API key..."
fi
## Optional value for a directory for filesystem caching
if [[ -n "${FILESYSTEM_CACHE_DIR}" ]]; then
sed -i "/#BTCEXP_FILESYSTEM_CACHE_DIR=.*/c\BTCEXP_FILESYSTEM_CACHE_DIR=${FILESYSTEM_CACHE_DIR}" $EXP_CONFIG_FILE
echo "Updated cache directory..."
fi
## Optional analytics
if [[ -n "${PLAUSIBLE_ANALYTICS_DOMAIN}" ]] && [[ -n "${PLAUSIBLE_ANALYTICS_SCRIPT_URL}" ]]; then
sed -i "/#BTCEXP_PLAUSIBLE_ANALYTICS_DOMAIN=.*/c\BTCEXP_PLAUSIBLE_ANALYTICS_DOMAIN=${PLAUSIBLE_ANALYTICS_DOMAIN}" $EXP_CONFIG_FILE
sed -i "/#BTCEXP_PLAUSIBLE_ANALYTICS_SCRIPT_URL=.*/c\BTCEXP_PLAUSIBLE_ANALYTICS_SCRIPT_URL=${PLAUSIBLE_ANALYTICS_SCRIPT_URL}" $EXP_CONFIG_FILE
echo "Updated Plausible analytics..."
fi
## Optional value for "max_old_space_size"
if [[ -n "${OLD_SPACE_MAX_SIZE}" ]]; then
sed -i "/#BTCEXP_OLD_SPACE_MAX_SIZE=.*/c\BTCEXP_OLD_SPACE_MAX_SIZE=${OLD_SPACE_MAX_SIZE}" $EXP_CONFIG_FILE
echo "Updated max old space..."
fi
## The number of recent blocks to search for transactions when txindex is disabled
if [[ -n "${NOTXINDEX_SEARCH_DEPTH}" ]]; then
sed -i "/#BTCEXP_NOTXINDEX_SEARCH_DEPTH=.*/c\BTCEXP_NOTXINDEX_SEARCH_DEPTH=${NOTXINDEX_SEARCH_DEPTH}" $EXP_CONFIG_FILE
echo "Updated recent block search depth..."
fi
## UI theme
if [[ -n "${UI_THEME}" ]]; then
sed -i "/#BTCEXP_UI_THEME=.*/c\BTCEXP_UI_THEME=${UI_THEME}" $EXP_CONFIG_FILE
echo "Updated UI theme..."
fi
## Set the number of recent blocks shown on the homepage.
if [[ -n "${UI_HOME_PAGE_LATEST_BLOCKS_COUNT}" ]]; then
sed -i "/#BTCEXP_UI_HOME_PAGE_LATEST_BLOCKS_COUNT=.*/c\BTCEXP_UI_HOME_PAGE_LATEST_BLOCKS_COUNT=${UI_HOME_PAGE_LATEST_BLOCKS_COUNT}" $EXP_CONFIG_FILE
echo "Updated recent block count..."
fi
## Set the number of blocks per page on the browse-blocks page.
if [[ -n "${UI_BLOCKS_PAGE_BLOCK_COUNT}" ]]; then
sed -i "/#BTCEXP_UI_BLOCKS_PAGE_BLOCK_COUNT=.*/c\BTCEXP_UI_BLOCKS_PAGE_BLOCK_COUNT=${UI_BLOCKS_PAGE_BLOCK_COUNT}" $EXP_CONFIG_FILE
echo "Updated browse blocks page count..."
fi
}
##############################################################################
## Initialise docker image
##############################################################################
init(){
echo -e "\\n====================================- INITIALISING BTC-RPC-EXPLORER -====================================\\n"
## Copy config file into bind-volume
cp /tmp/btc-rpc-explorer.env* ${CONFIG_DIR}
## Don't remove tmp files incase we want to overwrite
echo "Copied .env into ${CONFIG_DIR}..."
template_config
echo "Templated config..."
}
##############################################################################
## Main shell script function
##############################################################################
main() {
## Initialise container if there is no lock file
if [[ ! -e $EXP_CONFIG_FILE.lock ]] || $OVERWRITE_CONFIG; then
init
echo "Only run init once. Delete this file to re-init .env templating on container start up." > $EXP_CONFIG_FILE.lock
else
echo ".env already configured. Skipping config templating..."
fi
map_user
## Symbolic link config file
ln -s ${EXP_CONFIG_FILE} /etc/btc-rpc-explorer/.env
## Echo config to log if set true
if $LOG_CONFIG; then
echo_config
fi
}
## Call main function
main
echo -e "\\n====================================- STARTING BTC-RPC-EXPLORER -====================================\\n"
echo ''
## Execute dockerfile CMD as nonroot alternate gosu
su-exec "${PUID}:${PGID}" "$@"
| true
|
9beaa02e92ee3ee9bdd2aae5c7b52201955b493c
|
Shell
|
martin-der/gradle-shell-plugin
|
/test/tool/test_runner.sh
|
UTF-8
| 383
| 3.0625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source "$(dirname "${BASH_SOURCE[0]}")/extra_assertions.sh"
mkTestResultsDir() {
mkdir -p "${MDU_SHELLTEST_TEST_GENERATED_DIRECTORY}"
}
runTests() {
source "${MDU_SHELLTEST_TESTUNIT_SHUNIT2_EXEC}" || exit ${MDU_SHELLTEST_TEST_EXECUTION_ERROR_EXIT_CODE}
[ ${__shunit_testsFailed} -gt 0 ] && exit ${MDU_SHELLTEST_TEST_ASSERTION_FAILURE_EXIT_CODE}
exit 0
}
| true
|
ca4e6e9bb8efccc2a71f5717de93f207ee5e8f11
|
Shell
|
gravity-platform/vagrant-centos-7-php
|
/vendor-wrapper.sh
|
UTF-8
| 456
| 3.203125
| 3
|
[] |
no_license
|
#/bin/sh
# Simple wrapper for potentially vendorized bins
#
# This script is always called through a symlink. ie 'phpunit -> vagrant-centos-7-php-wrapper.sh'.
#
# @author Lucas Bickel <lucas.bickel@swisscom.com>
# @license GPL
# @link http://swisscom.com
REQUESTED_BIN=`basename ${0}`
if [[ -f ${PWD}/vendor/bin/${REQUESTED_BIN} ]]; then
FINAL_BIN=${PWD}/vendor/bin/${REQUESTED_BIN}
else
FINAL_BIN=${HOME}/.composer/vendor/bin/${REQUESTED_BIN}
fi
exec $FINAL_BIN $@
| true
|
55895cb198e01c9dcadcc3ae6a7c28103d6590c9
|
Shell
|
shengyongniu/bulk_rna_seq_tophat
|
/RNA_flagstat.sh
|
UTF-8
| 279
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash -l
module load samtools
for file in *_thout/accepted_hits.bam ; do echo "${file}" >> RNA_merge_170415_NB501164_170506_NB501164_flagstat.txt ; OUTPUT="$(samtools flagstat $file)" ; echo "${OUTPUT}" >> RNA_merge_170415_NB501164_170506_NB501164_flagstat.txt ; done
| true
|
8522e9ad645a2e4b71dcdfab08a484f863913012
|
Shell
|
exaltedmt/PA2
|
/autograder/test.sh
|
UTF-8
| 263
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
tmpoutput=`echo -e freddy '\n' susan | ./a.out`
CORRECT=0
f1=`echo $tmpoutput | grep -q 'freddy'`
if [ $? = 0 ]; then
let CORRECT=CORRECT+1
fi
f1=`echo $tmpoutput | grep -q 'susan'`
if [ $? = 0 ]; then
let CORRECT=CORRECT+1
fi
exit $CORRECT
| true
|
aec3bf796170485a197f817ce262ea1c3a42f95f
|
Shell
|
nuitrcs/examplejobs
|
/alphafold/v2.1.1/example_submit_cpu_part.sh
|
UTF-8
| 2,227
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --account=pXXXX ## YOUR ACCOUNT pXXXX or bXXXX
#SBATCH --partition=short ### PARTITION (buyin, short, normal, etc)
#SBATCH --nodes=1 ## how many computers do you need
#SBATCH --ntasks-per-node=12 ## how many cpus or processors do you need on each computer
#SBATCH --time=04:00:00 ## how long does this need to run (remember different partitions have restrictions on this param)
#SBATCH --mem=85G ## how much RAM do you need per CPU (this effects your FairShare score so be careful to not ask for more than you need))
#SBATCH --job-name=run_AlphaFold ## When you run squeue -u NETID this is how you can identify the job
#SBATCH --output=AlphaFold-CPU.log ## standard out and standard error goes to this file
#########################################################################
### PLEASE NOTE: ###
### The above CPU and Memory resources have been selected based ###
### on the computing resources that alphafold was tested on ###
### which can be found here: ###
### https://github.com/deepmind/alphafold#running-alphafold) ###
### It is likely that you do not have to change anything above ###
### besides your allocation, and email (if you want to be emailed). ###
#########################################################################
module purge
module load alphafold/2.1.1-only-msas-flag-addition
# To run alphafold more efficiently,
# we split the CPU and GPU parts of the pipeline into two separate submissions.
# Below we provide a way to run the CPU part of alpahfold-multimer and alphafold-monomer
# real example monomer (takes about 3 hours and 15 minutes)
alphafold-monomer --fasta_paths=/projects/intro/alphafold/T1050.fasta \
--max_template_date=2020-05-14 \
--model_preset=monomer \
--db_preset=full_dbs \
--only_msas=true \
--output_dir=$(pwd)/out
# real example multimer (takes about 2 hours and 40 minutes)
alphafold-multimer --fasta_paths=/projects/intro/alphafold/6E3K.fasta \
--max_template_date=2020-05-14 \
--model_preset=multimer \
--db_preset=full_dbs \
--only_msas=true \
--output_dir=$(pwd)/out
| true
|
41cd02b2c5a81867105412fa390c6397304044ab
|
Shell
|
anirvanr/scripts
|
/start-mongo-shard.sh
|
UTF-8
| 4,235
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
# This script will start three shards, a config server and a mongos, all on the current machine.
# This script is only for testing purposes. In production, you would run each of these servers on a different machine.
# It was written for Mongo v4.0.6
# clean everything up
echo "killing mongod and mongos"
pkill -u $UID mongod || true
pkill -u $UID mongos || true
echo "removing data files"
rm -rf data/config-*
rm -rf data/shard*
sleep 1
# start a replica set and tell it that it will be shard0
rm s0-r0.log s0-r1.log s0-r2.log || true
mkdir -p data/shard0/rs0 data/shard0/rs1 data/shard0/rs2
mongod --replSet s0 --logpath "s0-r0.log" --dbpath data/shard0/rs0 --port 37017 --fork --shardsvr --smallfiles
mongod --replSet s0 --logpath "s0-r1.log" --dbpath data/shard0/rs1 --port 37018 --fork --shardsvr --smallfiles
mongod --replSet s0 --logpath "s0-r2.log" --dbpath data/shard0/rs2 --port 37019 --fork --shardsvr --smallfiles
sleep 5
# connect to one server and initiate the set
mongo --port 37017 << 'EOF'
config = { _id: "s0", members:[
{ _id : 0, host : "localhost:37017" },
{ _id : 1, host : "localhost:37018" },
{ _id : 2, host : "localhost:37019" }]};
rs.initiate(config)
EOF
# start a replicate set and tell it that it will be a shard1
rm s1-r0.log s1-r1.log s1-r2.log || true
mkdir -p data/shard1/rs0 data/shard1/rs1 data/shard1/rs2
mongod --replSet s1 --logpath "s1-r0.log" --dbpath data/shard1/rs0 --port 47017 --fork --shardsvr --smallfiles
mongod --replSet s1 --logpath "s1-r1.log" --dbpath data/shard1/rs1 --port 47018 --fork --shardsvr --smallfiles
mongod --replSet s1 --logpath "s1-r2.log" --dbpath data/shard1/rs2 --port 47019 --fork --shardsvr --smallfiles
sleep 5
mongo --port 47017 << 'EOF'
config = { _id: "s1", members:[
{ _id : 0, host : "localhost:47017" },
{ _id : 1, host : "localhost:47018" },
{ _id : 2, host : "localhost:47019" }]};
rs.initiate(config)
EOF
# start a replicate set and tell it that it will be a shard2
rm s2-r0.log s2-r1.log s2-r2.log || true
mkdir -p data/shard2/rs0 data/shard2/rs1 data/shard2/rs2
mongod --replSet s2 --logpath "s2-r0.log" --dbpath data/shard2/rs0 --port 57017 --fork --shardsvr --smallfiles
mongod --replSet s2 --logpath "s2-r1.log" --dbpath data/shard2/rs1 --port 57018 --fork --shardsvr --smallfiles
mongod --replSet s2 --logpath "s2-r2.log" --dbpath data/shard2/rs2 --port 57019 --fork --shardsvr --smallfiles
sleep 5
mongo --port 57017 << 'EOF'
config = { _id: "s2", members:[
{ _id : 0, host : "localhost:57017" },
{ _id : 1, host : "localhost:57018" },
{ _id : 2, host : "localhost:57019" }]};
rs.initiate(config)
EOF
# now start 3 config servers
rm cfg-a.log cfg-b.log cfg-c.log || true
mkdir -p data/config/config-a data/config/config-b data/config/config-c
mongod --replSet cs --logpath "cfg-a.log" --dbpath data/config/config-a --port 57040 --fork --configsvr --smallfiles
mongod --replSet cs --logpath "cfg-b.log" --dbpath data/config/config-b --port 57041 --fork --configsvr --smallfiles
mongod --replSet cs --logpath "cfg-c.log" --dbpath data/config/config-c --port 57042 --fork --configsvr --smallfiles
sleep 5
mongo --port 57040 << 'EOF'
config = { _id: "cs", configsvr: true, members:[
{ _id : 0, host : "localhost:57040" },
{ _id : 1, host : "localhost:57041" },
{ _id : 2, host : "localhost:57042" }]};
rs.initiate(config)
EOF
# now start the mongos on port 27018
rm mongos-1.log || true
sleep 5
mongos --port 27018 --logpath "mongos-1.log" --configdb cs/localhost:57040,localhost:57041,localhost:57042 --fork
echo "Waiting 60 seconds for the replica sets to fully come online"
sleep 60
echo "Connnecting to mongos and enabling sharding"
# add shards and enable sharding on the test db
mongo --port 27018 << 'EOF'
db.adminCommand( { addshard : "s0/"+"localhost:37017" } );
db.adminCommand( { addshard : "s1/"+"localhost:47017" } );
db.adminCommand( { addshard : "s2/"+"localhost:57017" } );
db.adminCommand({enableSharding: "test"});
EOF
sleep 5
echo "Done setting up sharded environment on localhost"
echo "================================================"
ps aux | grep " mongo[ds] "
| true
|
b24f35aebc262b4f8fbba4a818a09d176837f312
|
Shell
|
navalekar-juhi/Network-Structure-Cloud-Computing
|
/Infrastructure/cloudformation/aws-cf-create-stack.sh
|
UTF-8
| 1,600
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
STACK_NAME=$1
AWSREGION1=$2
AWSREGION2=$3
AWSREGION3=$4
VPC_NAME=${STACK_NAME}-csye6225-vpc
SUBNET1_NAME=${STACK_NAME}-csye6225-subnet1
SUBNET2_NAME=${STACK_NAME}-csye6225-subnet2
SUBNET3_NAME=${STACK_NAME}-csye6225-subnet3
IGNAME=${STACK_NAME}-csye6225-InternetGateway
PUBLIC_ROUTE_TABLE=${STACK_NAME}-csye6225-public-route-table
aws cloudformation list-stack-resources --stack-name $STACK_NAME
if [ $? -eq "0" ]
then
echo "Stack already exists with this name"
exit 1
fi
if [ "${AWSREGION1:0:9}" != "us-east-1" -o "${AWSREGION2:0:9}" != "us-east-1" -o "${AWSREGION3:0:9}" != "us-east-1" ];
then
echo "Zone entered maybe outside us-east-1"
exit 1
fi
if [ "$AWSREGION1" == "$AWSREGION2" -o "$AWSREGION1" == "$AWSREGION3" -o "$AWSREGION2" == "$AWSREGION3" ]
then
echo "Two zones entered maybe similar"
exit 1
fi
aws cloudformation create-stack --stack-name $STACK_NAME --template-body file://csye6225-cf-networking.json --parameters ParameterKey=VPCName,ParameterValue=$VPC_NAME ParameterKey=SubnetName1,ParameterValue=$SUBNET1_NAME ParameterKey=SubnetName2,ParameterValue=$SUBNET2_NAME ParameterKey=SubnetName3,ParameterValue=$SUBNET3_NAME ParameterKey=PubicRouteTableName,ParameterValue=$PUBLIC_ROUTE_TABLE ParameterKey=AWSREGION1,ParameterValue=$AWSREGION1 ParameterKey=AWSREGION2,ParameterValue=$AWSREGION2 ParameterKey=AWSREGION3,ParameterValue=$AWSREGION3 ParameterKey=IGName,ParameterValue=$IGNAME
aws cloudformation wait stack-create-complete --stack-name $STACK_NAME
if [ $? -ne "0" ]
then
echo "Creation of Stack failed"
else
echo "Creation of Stack Success"
fi
| true
|
ea5985a68c181792141728add23a276b703c6066
|
Shell
|
kversl/bash-snippets
|
/CaptureOneCatalogAddExifContent/setGeoOnImage.sh
|
UTF-8
| 4,236
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# required: ##########################
# CaptureOne on MacOSX
# ####################################
# VARS:
# - Image name (part of)
# - overwrite?
# - latitude/longitude
# - path to CaptureOne Catalog
######################################
USAGE="
====================================
adds EXIF coordinates to an IMAGE
inside a CaptureOne COCATALOG
or in a COSESSION
====================================
prerequisites:
- remembers CaptureOne cocatalog: "$CC"
- do not replace existing coordinates
- limit to one IMAGE, if not a range
====================================
USAGE:
$0 49.231/10.9234 DCC0333
optional parameters
-c \"path/to/CaptureOne catalog\"
====================================
"
PREFERENCESDIR="${HOME}/.config"
PREFERENCESFILE="$0.pref"
CCDBPATTERN="*.cocatalogdb"
prefsdirCheck(){
if [ ! -d "$PREFERENCESDIR" ]; then mkdir -p "$PREFERENCESDIR" ; fi
}
writeprefs(){
prefsdirCheck
echo "CC=${CC}" > "${PREFERENCESDIR}/${PREFERENCESFILE}"
}
# read preferences
if [ -r "${PREFERENCESDIR}/${PREFERENCESFILE}" ]; then
. "${PREFERENCESDIR}/${PREFERENCESFILE}"
fi
CC="/Users/klaus8/Pictures/Nonrail.cocatalog"
CCDB=$(find "${CC}" -name '*.cocatalogdb' -maxdepth 1 -print0 | xargs -0 -n1 )
# import functions
. ./sql.sh ####### executesql(){} ########
. ./latlong.sh ### geo-functions llsplit(), lat2exif(), long2exif()
POSITIONAL=()
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-c|--catalog)
CATALOGBUNDLE="$2"
WRITEPREFS=1
shift
shift
;;
-h|--help)
echo "$USAGE"
exit 0
;;
*)
POSITIONAL+=("$1")
shift
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
#======================================
if [[ ${#POSITIONAL[@]} -lt 2 ]]; then
PROMPTUSER=1
echo "$USAGE"
else
if [[ "${POSITIONAL[1]}" == *?-?* ]]; then
# imagename contains a hyphen
echo "image name contains a hyphen. Range not implemented yet."
exit
else
echo "IMAGEPATTERN=${POSITIONAL[1]};"
IMAGEPATTERN="${POSITIONAL[1]}"
fi
LLSTRING="${POSITIONAL[0]}"
echo "LLSTRING=${POSITIONAL[0]};"
fi
#=====================================
#########################################
## Auftrag: bild-Pattern und Geo-string
## 1. genau ein Bild gefunden?
## 2. Geo noch nicht gesetzt?
## 3. wenn OK, dann schreiben.
#########################################
## 1.
[[ $PROMPTUSER -eq 1 ]] && read -p "Enter Image PATTERN: " IMAGEPATTERN
SQL_CountImagesOnPattern="SELECT ZDISPLAYNAME FROM ZIMAGE WHERE ZDISPLAYNAME LIKE '%_IMAGEPATTERN_%';"
dnamescsv=$(executesql "${SQL_CountImagesOnPattern/_IMAGEPATTERN_/${IMAGEPATTERN}}" -csv)
if [[ ${#dnamescsv} > 2 ]]; then
linesplit_a=()
linesplit "$dnamescsv"
else
echo "SQL RETURNED TOO LESS:${#dnamescsv}; CHARACTERS"
echo "GEFUNDEN TOO LESS:${#linesplit_a[@]};"
fi
echo "SQL RETURNED:${#dnamescsv}; CHARACTERS"
echo "GEFUNDEN:${#linesplit_a[@]};"
if [[ ${#linesplit_a[@]} -eq 1 ]] ; then
DISPLAYNAME="${linesplit_a[0]}"
else
echo "PATTERN matches ${#linesplit_a[@]} images. Only one is accepted. Exit now. These images: ${linesplit_a[@]}"
exit;
fi
## 2.
SQL_LatLongFromPattern="SELECT ZGPSLATITUDE, ZGPSLONGITUDE FROM ZIMAGE WHERE ZDISPLAYNAME LIKE '%_IMAGEPATTERN_%';"
llIMG=$(executesql "${SQL_LatLongFromPattern/_IMAGEPATTERN_/${IMAGEPATTERN}}" -csv )
if [[ "$llIMG" = "," ]] || [[ "$llIMG" = ",,*" ]] ; then
echo "Contains no Coordinates. Continue."
else
echo "image already contains Coordinates:${llIMG}. Exit now"
fi
## 3.
llstring="${LLSTRING}"
[[ $PROMPTUSER -eq 1 ]] && read -p "Enter Lat/Long: " llstring
llsplit_a=()
llsplit $llstring
lat2exif_v=0
lat2exif ${llsplit_a[0]}
long2exif_v=0
long2exif ${llsplit_a[1]}
SQL_SetLatLongOnImage="UPDATE ZIMAGE SET ZGPSLATITUDE = '_LAT_', ZGPSLONGITUDE = '_LONG_' WHERE ZDISPLAYNAME = '_DISPLAYNAME_';"
SQL_SetLatLongOnImage="${SQL_SetLatLongOnImage/_LAT_/$lat2exif_v}"
SQL_SetLatLongOnImage="${SQL_SetLatLongOnImage/_LONG_/$long2exif_v}"
llIMG=$(executesql "${SQL_SetLatLongOnImage/_DISPLAYNAME_/${DISPLAYNAME}}" -csv )
echo sql replied:"${llIMG};"
#======================================
if [[ $WRITEPREFS -eq 1 ]]; then
writeprefs
fi
| true
|
ba917bf631c376750c96d55f152e136cca380de1
|
Shell
|
B-Rich/autoscripts-for-cpeg657
|
/plot/enum.sh
|
UTF-8
| 184
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
cnt=1
for i in `cat xy.txt | head -1`
do
for j in `cat xy.txt | tail -1`
do
line=`cat input.txt|head -$cnt|tail -1`
echo "[$i, $j]: $line"
let "cnt+=1"
done
done
| true
|
eb3c10bed898d7fdd6aa2f7ad3c0da3c2a637643
|
Shell
|
mj/dotfiles
|
/.zsh/mj.zsh-theme
|
UTF-8
| 2,186
| 3.71875
| 4
|
[] |
no_license
|
#
# Based on the awesome agnoster theme - https://gist.github.com/3712874
#
CURRENT_BG='NONE'
PRIMARY_FG=black
# Characters
SEGMENT_SEPARATOR="\ue0b0"
PLUSMINUS="\u00b1"
BRANCH="\ue0a0"
DETACHED="\u27a6"
LIGHTNING="\u26a1"
# Begin a segment
# Takes two arguments, background and foreground.
prompt_segment() {
local bg="%K{$1}"
local fg="%F{$2}"
if [[ $CURRENT_BG != 'NONE' && $1 != $CURRENT_BG ]]
then
print -n "%{$bg%F{$CURRENT_BG}%}$SEGMENT_SEPARATOR%{$fg%}"
else
print -n "%{$bg%}%{$fg%}"
fi
CURRENT_BG=$1
[[ -n $3 ]] && print -n $3
}
# End the prompt, closing any open segments
prompt_end() {
if [[ -n $CURRENT_BG ]]
then
print -n "%{%k%F{$CURRENT_BG}%}$SEGMENT_SEPARATOR"
else
print -n "%{%k%}"
fi
print -n "%{%f%}"
CURRENT_BG=''
}
# Git: branch/detached head, dirty status
prompt_git() {
local color ref
is_dirty() {
test -n "$(git status --porcelain --ignore-submodules)"
}
ref="$vcs_info_msg_0_"
test -z "$ref" && return
if is_dirty
then
color=yellow
ref="${ref} $PLUSMINUS"
else
color=green
ref="${ref} "
fi
if [[ "${ref/.../}" == "$ref" ]]
then
ref="$BRANCH $ref"
else
ref="$DETACHED ${ref/.../}"
fi
prompt_segment $color $PRIMARY_FG
print -Pn " $ref"
}
## Main prompt
prompt_mj_main() {
RETVAL=$?
CURRENT_BG='NONE'
# am I root?
[[ $UID -eq 0 ]] && prompt_segment $PRIMARY_FG default "%{%F{red}%}$LIGHTNING "
# user@host
prompt_segment NONE default "%(!.%{%F{yellow}%}.)%n@%m "
# CWD
prompt_segment cyan $PRIMARY_FG ' %~ '
[[ $PWD == *"/mnt/"* ]] || prompt_git
prompt_end
}
prompt_mj_precmd() {
vcs_info
PROMPT='%{%f%b%k%}$(prompt_mj_main) '
}
prompt_mj_setup() {
autoload -Uz add-zsh-hook
autoload -Uz vcs_info
prompt_opts=(cr subst percent)
add-zsh-hook precmd prompt_mj_precmd
zstyle ':vcs_info:*' enable git
zstyle ':vcs_info:*' check-for-changes false
zstyle ':vcs_info:git*' formats '%b'
zstyle ':vcs_info:git*' actionformats '%b (%a)'
}
prompt_mj_setup "$@"
| true
|
238228c451ab4e219f567b32f2a146e8a8d14d5b
|
Shell
|
moogates/yarmproxy
|
/test/redis/set7.sh
|
UTF-8
| 470
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
YARMPROXY_PORT=11311
if [ $# -gt 0 ]; then
YARMPROXY_PORT=$1
fi
gunzip -c ./set7.data.gz | ../yarmnc 127.0.0.1 $YARMPROXY_PORT > set7.tmp
# cat set7.tmp
expected_count=$(gunzip -c set7.data.gz | grep "^*" | wc -l | awk '{print $1}')
count=$(cat set7.tmp | wc -l | awk '{print $1}')
if [ $count -ne $expected_count ]; then
echo -e "\033[33mFail $count/$expected_count.\033[0m"
exit 1
else
echo -e "\033[32mPass $count/$expected_count.\033[0m"
fi
| true
|
79d2da5158cdc3da1d637144782ccdb1b7f86628
|
Shell
|
kinoulink/infra
|
/stacks/manager/rsync/rsync
|
UTF-8
| 456
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
OWNER=${OWNER:-www-data}
GROUP=${GROUP:-www-data}
chown "${OWNER}:${GROUP}" "${VOLUME}"
[ -f /etc/rsyncd.conf ] || cat <<EOF > /etc/rsyncd.conf
uid = ${OWNER}
gid = ${GROUP}
use chroot = yes
log file = /dev/stdout
reverse lookup = no
[server]
read only = false
path = /opt/ktv/shares/server
[infra]
read only = false
path = /opt/ktv/shares/infra
EOF
exec /usr/bin/rsync --no-detach --daemon --config /etc/rsyncd.conf "$@"
| true
|
dc9cabbfc1e95de69f186a5d75f56d6e6662d787
|
Shell
|
swesterfeld/bfsync
|
/src/link-del-test.sh
|
UTF-8
| 655
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
usage()
{
echo "link-del-test.sh <dir>"
exit 1
}
[ -d "$1" ] || usage;
mkdir -p $1/mnt-a
mkdir -p $1/mnt-b
bfsync init $1/master
bfsync clone $1/master $1/repo-b
bfsync clone $1/master $1/repo-a
bfsyncfs $1/repo-a $1/mnt-a
bfsyncfs $1/repo-b $1/mnt-b
for i in $(seq 10)
do
for n in $(seq 1 ${i}000)
do
touch $1/mnt-a/$n
done
cd $1/mnt-a
bfsync commit -m "add links"
rm $1/mnt-a/[0-9]*
bfsync commit -m "del links"
bfsync push
cd $1/mnt-b
/usr/bin/time -f "$i link-del-time %e" bfsync pull 2>&1
done
cd /
fusermount -u $1/mnt-a
fusermount -u $1/mnt-b
rm -rf $1/mnt-a $1/mnt-b $1/master $1/repo-a $1/repo-b
| true
|
c6bc5a2b27b2fe5bfb2987565b0344a0e99ae85f
|
Shell
|
VicRef/Bash_Game_Simple_Snake
|
/catch_arrow_keys.sh
|
UTF-8
| 157
| 2.890625
| 3
|
[] |
no_license
|
while true
do
read -r -sn1 t
case $t in
A) echo up ;;
B) echo down ;;
C) echo right ;;
D) echo left ;;
esac
done
| true
|
80f71a7c55e7cc653a722cc412eadde1e4e9d11e
|
Shell
|
artur-cabral/BashScript_Project
|
/animatedGif
|
UTF-8
| 989
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
echo "============================================================"
echo "Author: Artur Cabral"
echo "running animatedGif script File..."
#Summary: The goal for this script is to transform .gif pictures into one animated gif
# taking the arguments given by the user for the name, the starting number, and the ending
# number of the pictures sequence that will become a animated gif
# prompt the user to give the animated gif a name:
echo "Give a name to your animated gif:"
read gifName
#assign the arguments to the correct variable names
name=$1
number1=$2
number2=$3
# make an animated gif out of the first image
convert $name*.gif $gifName.gif
# do the same for the other ones
for((i=(number1+1);i<(number2+1);i++))
do
convert $gifName.gif $name$i.gif temp.gif
convert temp.gif $gifName.gif
done
# let the user know the animated gif is ready
echo "Your $name animated gif is now available as $gifName.gif!"
echo "============================================================"
| true
|
6ba5d285ca7c2036f51c4d509f04ff9c264c955a
|
Shell
|
sjyoo/coexpression
|
/deps/WGCNA/install-r-packages.sh
|
UTF-8
| 1,872
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dest=${TARGET-/usr/}
runt=${KB_RUNTIME-/usr}/bin
srcd="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo "using $dest as installation directory";
echo "using $runt as kb_runtime directory";
echo "using $srcd as package directory";
###
# Warning: the following installation package assumes the latest version installation is stable on WGCNA,
# which might not be the best if any new piece is broken.
# TODO: Fix all the version of the dependencies
c=$(grep "deb http://cran.rstudio.com/bin/linux/ubuntu trusty" /etc/apt/sources.list | wc -l);
if [ "$c" == 0 ]; then
echo "Upgrading R version to 3.2 or latest"
sudo sh -c 'echo "deb http://cran.rstudio.com/bin/linux/ubuntu trusty/" >> /etc/apt/sources.list'
gpg --keyserver keyserver.ubuntu.com --recv-key E084DAB9
gpg -a --export E084DAB9 | sudo apt-key add -
sudo apt-get update
sudo apt-get -y remove r-base-core r-base-dev
# ensure the installed packages gone for clean install
# by default, R would not update the installed packages with the latest version,
# which causes version compatibility issues.
sudo rm -rf /usr/lib/R
sudo rm -rf /usr/local/lib/R
sudo apt-get -y install r-base
fi
mkdir -p $dest/lib/R/library # for sanity and it actually does not use the created folder
# the kb runtime execution environment has the following variable was set
export R_LIBS=$dest/lib
if [ -e $runt/R ]; then
# /kb/runtime case
tpage --define rlib=$dest/lib "$srcd/r-packages.R" | $runt/R --vanilla --slave
tpage --define rlib=$dest/lib "$srcd/r-wgcna-packages.R" | $runt/R --vanilla --slave
else # docker does not have R on /kb/runtime
# system default case
tpage --define rlib=$dest/lib "$srcd/r-packages.R" | /usr/bin/R --vanilla --slave
tpage --define rlib=$dest/lib "$srcd/r-wgcna-packages.R" | /usr/bin/R --vanilla --slave
fi
| true
|
e8738b390ffef48fd6484f0351ede930987dd7c3
|
Shell
|
Zemotacqy/hazelcast-simulator
|
/dist/src/main/dist/jdk-install/jdk-oracle-6-64.sh
|
UTF-8
| 211
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
source jdk-support.sh
installPackage wget
JDK_FILE="jdk-6u45-linux-x64.bin"
cd ~
wget --no-verbose -N "$BASE_URL/$JDK_FILE"
chmod +x ${JDK_FILE}
./${JDK_FILE}
addJavaHome "~/jdk1.6.0_45"
| true
|
e7766a0d658cf75e26d9d50ec2c77e2921da6ab8
|
Shell
|
andrewsoong/EnKF_crtm
|
/DA/module_enkf.sh
|
UTF-8
| 9,496
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
. $CONFIG_FILE
rundir=$WORK_DIR/run/$DATE/enkf
if [[ ! -d $rundir ]]; then mkdir -p $rundir; echo waiting > $rundir/stat; fi
cd $rundir
if [[ `cat stat` == "complete" ]]; then exit; fi
#Check dependency
wait_for_module ../../$PREVDATE/wrf_ens ../obsproc ../icbc
if [[ $DATE == $LBDATE ]]; then
wait_for_module ../perturb_ic
fi
#Run EnKF
echo running > stat
echo " Running EnKF..."
domlist=`seq 1 $MAX_DOM`
#link files
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
if [[ ! -d $dm ]]; then mkdir -p $dm; fi
if [ -f $dm/${DATE}.finish_flag ]; then continue; fi
cd $dm
lfs setstripe -c 1 $rundir/$dm
echo " Linking files for domain $dm"
for NE in `seq 1 $NUM_ENS`; do
id=`expr $NE + 1000 |cut -c2-`
ln -fs $WORK_DIR/fc/$PREVDATE/wrfinput_${dm}_`wrf_time_string $DATE`_$id $WORK_DIR/fc/$DATE/wrf_enkf_input_${dm}_$id
if $USE_ESTIMATE_INF; then
cp -L $WORK_DIR/fc/$PREVDATE/wrfinput_${dm}_`wrf_time_string $DATE`_$id fort.`expr 80010 + $NE`
else
ln -fs $WORK_DIR/fc/$PREVDATE/wrfinput_${dm}_`wrf_time_string $DATE`_$id fort.`expr 80010 + $NE`
fi
cp -L fort.`expr 80010 + $NE` fort.`expr 90010 + $NE` >> link.log 2>&1 &
done
wait
cp -L $WORK_DIR/fc/$PREVDATE/wrfinput_${dm}_`wrf_time_string $DATE` fort.`expr 80011 + $NUM_ENS`
cp -L fort.`expr 80011 + $NUM_ENS` fort.`expr 90011 + $NUM_ENS`
cp -L fort.`expr 80011 + $NUM_ENS` fort.`expr 60011 + $NUM_ENS`
ln -fs $WRF_DIR/run/LANDUSE.TBL .
# coefficients for CRTM
ln -fs $CRTM_DIR/crtm_wrf/coefficients .
# Empirical Localization Function
#ln -fs $ELF_DIR/elf .
#Observations
#LITTLE_R format from obsproc
ln -fs $DATA_DIR/obs/${DATE:0:4}/obs_gts_`wrf_time_string $DATE`.3DVAR obs_3dvar_${DATE}00
#airborne radar superobs
ln -fs $DATA_DIR/so/${DATE:0:4}/${DATE}_all.so_ass airborne_${DATE}_so
ln -fs $DATA_DIR/radiance/radiance_${dm}_${DATE}_so radiance_${DATE}_so
# updating non-Q variables only every 1-hour
if [[ ${DATE:10:2} == '00' ]]; then
ln -fs $ENKF_DIR/enkf.mpi .
else
#ln -fs $ENKF_DIR/enkf.mpi .
ln -fs $ENKF_DIR/enkf_hydro.mpi enkf.mpi
fi
# multiplicative inflation
if $USE_ESTIMATE_INF; then
ln -fs $ENKF_DIR/cal_inflate.mpi .
ln -sf $WORK_DIR/run/$PREVDATE/enkf/$dm/parameters_update${PREVDATE} parameters_update
fi
$SCRIPT_DIR/namelist_enkf.sh $n > namelist.enkf
cd ..
done
# replacing prior mean with determinstic forecast
if [ $DATE != $DATE_CYCLE_START ]; then
if $REPLACE_MEAN; then
if [[ $REPLACE_MEAN_WITH == "prior_forecast" ]]; then
tid=0
nn=$((($enkf_ntasks+$HOSTPPN-$enkf_ntasks%$HOSTPPN)/$HOSTPPN))
nt=$(($total_ntasks/$HOSTPPN/$nn))
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
cd $dm
if [[ ! -d replace_mean ]]; then mkdir -p replace_mean; fi
cd replace_mean
echo " Replacing ens mean with $REPLACE_MEAN_WITH for domain $dm"
for NE in `seq 1 $((NUM_ENS+1))`; do
mv ../fort.`expr 80010 + $NE` fort.`expr 80010 + $NE`
cp -L fort.`expr 80010 + $NE` fort.`expr 90010 + $NE`
done
if [[ $REPLACE_MEAN_WITH == "prior_forecast" ]]; then
ln -fs $WORK_DIR/fc/$PREVDATE/wrfinput_${dm}_`wrf_time_string $DATE` fort.70010
fi
ln -fs $ENKF_DIR/replace_mean.exe .
export SLURM_TASKS_PER_NODE="$HOSTPPN(x$SLURM_NNODES)"
ibrun -n $enkf_ntasks -o $((tid*$enkf_ntasks)) ./replace_mean.exe $NUM_ENS >& replace_mean.log &
export SLURM_TASKS_PER_NODE="$((SLURM_NTASKS/$SLURM_NNODES))(x$SLURM_NNODES)"
tid=$((tid+1))
if [[ $tid == $nt ]]; then
tid=0
wait
fi
cd ../..
done
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
cd $dm/replace_mean/
watch_log replace_mean.log Successful 1 $rundir
for NE in `seq 1 $((NUM_ENS+1))`; do
mv fort.`expr 90010 + $NE` ../fort.`expr 80010 + $NE`
cp ../fort.`expr 80010 + $NE` ../fort.`expr 90010 + $NE`
done
cd ../..
done
fi
fi
fi
# inflate prior members
if $USE_ESTIMATE_INF; then
if [[ ${DATE:10:2} == '00' ]]; then
tid=0
nn=$((($enkf_ntasks+$enkf_ppn-$enkf_ntasks%$enkf_ppn)/$enkf_ppn))
nt=$(($total_ntasks/$HOSTPPN/$nn))
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
if [ -f $dm/${DATE}.inffin_flag ]; then continue; fi
cd $dm
echo " Running inflation for domain $dm"
$SCRIPT_DIR/job_submit.sh $enkf_ntasks $((tid*$enkf_ntasks)) $enkf_ppn ./cal_inflate.mpi >& inflate.log &
tid=$((tid+1))
if [[ $tid == $nt ]]; then
tid=0
wait
fi
cd ..
done
wait
#Check output
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
# watch_log $dm/enkf.log Successful 5 $rundir
watch_log $dm/${DATE}.inffin_flag _ 5 $rundir
done
fi
fi
#run enkf.mpi
tid=0
nn=$((($enkf_ntasks+$enkf_ppn-$enkf_ntasks%$enkf_ppn)/$enkf_ppn))
nt=$(($total_ntasks/$HOSTPPN/$nn))
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
if [ -f $dm/${DATE}.finish_flag ]; then continue; fi
cd $dm
echo " Running enkf.mpi for domain $dm"
$SCRIPT_DIR/job_submit.sh $enkf_ntasks $((tid*$enkf_ntasks)) $enkf_ppn ./enkf.mpi >& enkf.log &
tid=$((tid+1))
if [[ $tid == $nt ]]; then
tid=0
wait
fi
cd ..
done
#Check output
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
# watch_log $dm/enkf.log Successful 5 $rundir
watch_log $dm/${DATE}.finish_flag _ 5 $rundir
done
# replacing mean with first guess (GFS/FNL) reanalysis
if $REPLACE_MEAN; then
if [[ $REPLACE_MEAN_WITH != "prior_forecast" ]]; then
tid=0
nn=$((($enkf_ntasks+$HOSTPPN-$enkf_ntasks%$HOSTPPN)/$HOSTPPN))
nt=$(($total_ntasks/$HOSTPPN/$nn))
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
cd $dm
if [[ ! -d replace_mean ]]; then mkdir -p replace_mean; fi
cd replace_mean
echo " Replacing ens mean with $REPLACE_MEAN_WITH for domain $dm"
for NE in `seq 1 $((NUM_ENS+1))`; do
mv ../fort.`expr 90010 + $NE` fort.`expr 80010 + $NE`
cp fort.`expr 80010 + $NE` fort.`expr 90010 + $NE`
done
if [[ $REPLACE_MEAN_WITH == "forecast" ]]; then
ln -fs $WORK_DIR/fc/$PREVDATE/wrfinput_${dm}_`wrf_time_string $DATE` fort.70010
elif [[ $REPLACE_MEAN_WITH == "gfs" ]]; then
ln -fs $WORK_DIR/rc/$DATE/wrfinput_$dm fort.70010
fi
ln -fs $ENKF_DIR/replace_mean.exe .
export SLURM_TASKS_PER_NODE="$HOSTPPN(x$SLURM_NNODES)"
ibrun -n $enkf_ntasks -o $((tid*$enkf_ntasks)) ./replace_mean.exe $NUM_ENS >& replace_mean.log &
export SLURM_TASKS_PER_NODE="$((SLURM_NTASKS/$SLURM_NNODES))(x$SLURM_NNODES)"
tid=$((tid+1))
if [[ $tid == $nt ]]; then
tid=0
wait
fi
cd ../..
done
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
cd $dm/replace_mean/
watch_log replace_mean.log Successful 1 $rundir
for NE in `seq 1 $((NUM_ENS+1))`; do
mv fort.`expr 90010 + $NE` ../
done
cd ..
cd ..
done
fi
fi
### replacing outside with first guess (GFS/FNL) reanalysis
if $REPLACE_ENVIRONMENT; then
nt=$((total_ntasks/$HOSTPPN))
if [ $DATE == $LBDATE ]; then
tid=0
for n in $domlist; do
# for n in `seq 1 $((MAX_DOM-1))`; do
dm=d`expr $n + 100 |cut -c2-`
cd $dm
if [[ ! -d replace_environment ]]; then mkdir -p replace_environment; fi
cd replace_environment
echo " Replacing environment with GFS for domain $dm"
for NE in `seq 1 $((NUM_ENS+1))`; do
id=`expr $NE + 1000 |cut -c2-`
if [[ ! -d $id ]]; then mkdir $id; fi
if [[ `tail -n2 ${id}/replace_environment.log |grep Successful` ]]; then continue; fi
cd $id
ln -sf $ENKF_DIR/replace_environment_by_gfs.exe .
ln -sf $TCVITALS_DIR/${DATE:0:4}/${DATE}.${STORM_ID}-tcvitals.dat tcvitals.dat
mv ../../fort.`expr 90010 + $NE` wrfinput
if [[ $NE == `expr $((NUM_ENS+1))` ]]; then
ln -sf $WORK_DIR/rc/$DATE/wrfinput_${dm} wrfinput_gfs
else
ln -sf $WORK_DIR/rc/$DATE/wrfinput_${dm}_$id wrfinput_gfs
fi
./replace_environment_by_gfs.exe >& replace_environment.log
tid=$((tid+1))
if [[ $tid == $nt ]]; then
tid=0
wait
fi
cd ..
done
cd ../..
done
for n in $domlist; do
# for n in `seq 1 $((MAX_DOM-1))`; do
dm=d`expr $n + 100 |cut -c2-`
cd $dm
for NE in `seq 1 $((NUM_ENS+1))`; do
id=`expr $NE + 1000 |cut -c2-`
watch_log replace_environment/$id/replace_environment.log Successful 1 $rundir
mv replace_environment/$id/wrfinput fort.`expr 90010 + $NE`
done
cd ..
done
fi
fi
###
for n in $domlist; do
dm=d`expr $n + 100 |cut -c2-`
for NE in `seq 1 $NUM_ENS`; do
id=`expr $NE + 1000 |cut -c2-`
ln -sf $rundir/$dm/fort.`expr 90010 + $NE` $WORK_DIR/fc/$DATE/wrf_enkf_output_${dm}_$id
# cp $dm/fort.`expr 90010 + $NE` $WORK_DIR/fc/$DATE/wrf_enkf_output_${dm}_$id
ln -fs $WORK_DIR/fc/$DATE/wrf_enkf_output_${dm}_$id $WORK_DIR/fc/$DATE/wrfinput_${dm}_$id
if [ ! -f $WORK_DIR/fc/$DATE/wrf_enkf_input_${dm}_$id ]; then
ln -fs $WORK_DIR/fc/$PREVDATE/wrfinput_${dm}_`wrf_time_string $DATE`_$id $WORK_DIR/fc/$DATE/wrf_enkf_input_${dm}_$id
fi
done
cp $dm/fort.`expr 80011 + $NUM_ENS` $WORK_DIR/fc/$DATE/wrf_enkf_input_${dm}_mean
cp $dm/fort.`expr 90011 + $NUM_ENS` $WORK_DIR/fc/$DATE/wrf_enkf_output_${dm}_mean
ln -fs $WORK_DIR/fc/$DATE/wrf_enkf_output_${dm}_mean $WORK_DIR/fc/$DATE/wrfinput_${dm}
done
echo complete > stat
| true
|
6987130fe08bf1b25fe982acf918541a256c72fd
|
Shell
|
theodore/olympiads
|
/HOJ/1001/run.sh
|
UTF-8
| 153
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash -e
for (( i = 0; i < $1; ++i ))
do
echo "test case #$i"
echo $RANDOM $2 $3 $4 | ./data
./spfa
./rabbit < "in" > "out"
diff ans out
done
| true
|
8350c7a3de646c49f7649d6aacb209dbc41ea458
|
Shell
|
TianZongyang/fastdfs_DockerFile
|
/start.sh
|
UTF-8
| 1,483
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#set -e
if [ -n "$PORT" ] ; then
sed -i "s|^port=.*$|port=${PORT}|g" /etc/fdfs/"$FASTDFS_MODE".conf
fi
if [ -n "$WEB_PORT" ] ; then
sed -i "s|^http\.server_port=.*$|http\.server_port=${WEB_PORT}|g" /etc/fdfs/"$FASTDFS_MODE".conf
sed -i "s|listen 80|listen ${WEB_PORT}|g" /usr/local/nginx/conf/nginx.conf
fi
if [ -n "$GROUP_NAME" ] ; then
sed -i "s|^group_name=.*$|group_name=${GROUP_NAME}|g" /etc/fdfs/storage.conf
sed -i "s|group1|${GROUP_NAME}|g" /usr/local/nginx/conf/nginx.conf
fi
sed -i "s|#fastdfsPath#|${FASTDFS_BASE_PATH}|g" /usr/local/nginx/conf/nginx.conf
if [ -n "$TRACKER_SERVER" ] ; then
sed -i "s|^tracker_server=.*$|tracker_server=${TRACKER_SERVER}|g" /etc/fdfs/storage.conf
sed -i "s|^tracker_server=.*$|tracker_server=${TRACKER_SERVER}|g" /etc/fdfs/client.conf
fi
FASTDFS_LOG_FILE="${FASTDFS_BASE_PATH}/logs/${FASTDFS_MODE}d.log"
PID_NUMBER="${FASTDFS_BASE_PATH}/data/fdfs_${FASTDFS_MODE}d.pid"
echo "尝试启动 $FASTDFS_MODE 节点..."
if [ -f "$FASTDFS_LOG_FILE" ]; then
rm "$FASTDFS_LOG_FILE"
fi
# start the fastdfs node.
fdfs_${FASTDFS_MODE}d /etc/fdfs/${FASTDFS_MODE}.conf start
if [ "$FASTDFS_MODE" == "storage" ] ; then
/usr/local/nginx/sbin/nginx
fi
# wait for pid file(important!),the max start time is 5 seconds,if the pid number does not appear in 5 seconds,start failed.
TIMES=5
while [ ! -f "$PID_NUMBER" -a $TIMES -gt 0 ]
do
sleep 1s
TIMES=`expr $TIMES - 1`
done
tail -f "$FASTDFS_LOG_FILE"
| true
|
c40d41e8510cad28d2921220acc6c733214d02d6
|
Shell
|
MrVortexx/exercicios-shell-script
|
/exercicio4
|
UTF-8
| 142
| 2.84375
| 3
|
[] |
no_license
|
#!usr/bin/env bash
for i in *.txt; do
actual_name= echo $"$i"
cp "$i" "$i.bak" | sed -i 's,the,an,g' $echo "$i.bak"
done
| true
|
7a24420a059aa2645e28660d7be48fd916319628
|
Shell
|
ibrahimaycan/Shell-Scripts
|
/myprog4.sh
|
UTF-8
| 608
| 3.921875
| 4
|
[] |
no_license
|
num=$1
#--------------------------------------
if [ $# -ne 1 ]
then
echo "please enter 1 argument"
exit
fi
#--------------------------------------checks if argumant is a number
re='^[0-9]+$'
if ! [[ $1 =~ $re ]] ;
then
echo "argument must be a number" >&2; exit
fi
#---------------------------------------
c=2
while [ $c -le $1 ]
do
flag=1
i=2
while [ $i -lt $c ]
do
a=`expr $c % $i`
if [ $a -eq 0 ]
then
flag=0
fi
i=`expr $i + 1`
done
if [ $flag -ne 0 ]
then
printf "Hexadecimal of $c is"
printf " %x\n" $c
fi
c=`expr $c + 1`
done
| true
|
e5aea9d1fe9ebda0be6ecd8ee81b454a68830e11
|
Shell
|
jenkins-x-apps/jx-app-jenkins
|
/plugins/updateplugins.sh
|
UTF-8
| 445
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
echo "updating the plugins to the latest verions"
#updatebot pull -k plugins
echo "adding plugins to ../jx-app-jenkins/values.schema.json"
filename="plugins.txt"
while read p; do
pluginstxt=$pluginstxt\"$p\",
done < $filename
# trim the trailing ,
pluginstxt=`echo $pluginstxt | sed 's/.$//'`
sed "s/\"PLUGINS_INJECTED_HERE\"/${pluginstxt}/" ../jx-app-jenkins/values.schema.tmpl.json > \
../jx-app-jenkins/values.schema.json
| true
|
d05fa9a484c4940cc1c0029ef8ff3ec2f7a9eddc
|
Shell
|
foobarlab/funtoo-stage3-packer
|
/etc/build.conf
|
UTF-8
| 1,677
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -uea
# vim: ts=4 sw=4 et
set -a
# customized user configuration
# overrides defaults in bin/config.sh
# run `make config` to see actual settings
BUILD__HELP="see file 'etc/build.conf' for customizing settings"
# ----------------------------! customize settings below !----------------------------
## memory/cpu/disk used for final box:
#BUILD_BOX_CPUS="2"
#BUILD_BOX_MEMORY="2048"
#BUILD_GUEST_DISKSIZE="20480" # dynamic disksize in MB, e.g. 20480 => 20 GB
## Funtoo 1.4 (current more or less stable)
#BUILD_BOX_FUNTOO_VERSION="1.4"
#BUILD_RELEASE="${BUILD_BOX_FUNTOO_VERSION}-release-std"
#BUILD_RELEASE_VERSION_ID="2021-12-25"
#BUILD_FUNTOO_ARCHITECTURE="x86-64bit/intel64-nehalem"
#BUILD_FUNTOO_STAGE3="stage3-intel64-nehalem-${BUILD_BOX_FUNTOO_VERSION}-release-std"
#BUILD_GUEST_ADDITIONS=true # set to 'true' to install virtualbox guest additions
# Funtoo next (experimental next-release, see: https://forums.funtoo.org/topic/4970-announcing-next-release/)
#BUILD_BOX_FUNTOO_VERSION="0"
#BUILD_RELEASE="next"
#BUILD_RELEASE_VERSION_ID="2021-12-25" # FIXME parse xml from https://build.funtoo.org/index.xml to get version
#BUILD_FUNTOO_ARCHITECTURE="x86-64bit/generic_64" # FIXME arch/cpu into separate vars
#BUILD_FUNTOO_STAGE3="stage3-generic_64-next" # FIXME build string from cpu + release
#BUILD_GUEST_ADDITIONS=false # set to 'true' to install virtualbox guest additions
## enable custom overlay?
#BUILD_CUSTOM_OVERLAY=true
#BUILD_CUSTOM_OVERLAY_NAME="foobarlab-stage3"
#BUILD_CUSTOM_OVERLAY_BRANCH="stage3"
#BUILD_CUSTOM_OVERLAY_URL="https://github.com/foobarlab/foobarlab-overlay.git"
| true
|
61915db8e4ceed5d225dc7e5ede1d114bc55836e
|
Shell
|
omarkaramat/bash
|
/variables.sh
|
UTF-8
| 131
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "$$" # pid of current bash script
for n in {0..5}
do
echo "BASH_VERSINFO[$n] = ${BASH_VERSINFO[$n]}"
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.