blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
761505516a6c417f6666aee579fdbd17b9fb86bc | Shell | jeffreydurieux/neurohack_dl_gan | /oasis_png_loop.sh | UTF-8 | 845 | 3.578125 | 4 | [
"MIT"
] | permissive |
# create scan list file
rm scan_list.txt
rm subj_list.txt
cd ..
cd OASIS3_T1
ls | sed 's/_MR*/ /' | awk '{print $1}' | uniq >> subj_list.txt
# first loop: extract directory of first T1w image for each unique subject
for subj in $(cat 'subj_list.txt') ; do
echo "running script on $subj"
a=`ls "$subj"_MR_d*/* | awk 'NR==1{print $1}'`
a=`echo "${a::-1}"`
b=`ls $a | awk 'NR==2{print $1}'`
echo "$a"/"$b">> scan_list.txt
done
# second loop: for each first unique T1w, run pngify script
for scan in $(cat 'scan_list.txt') ; do
a=$scan
outputFolder=`echo $a | sed 's/_ses*/ /' | awk '{print $1}' | sed 's/sub-//'`
print $outputFolder
outDir=/home/ubuntu/data/oasis_pngimages/$outputFolder
mkdir -p $outDir
# run script with input individual T1w image and output to
python convert_nii_to_png.py /home/ubuntu/OASIS3_T1/$scan $outDir
done
| true |
0a453bc9dd4152e91dd9816cdd09d0818dddfb94 | Shell | bsc-wdc/compss | /builders/specs/sc/install | UTF-8 | 6,451 | 4.125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
####################################################################################################
# Name: install
# Description: Script to install COMPSs
# Parameters: <targetFolder> Folder where to install COMPSs
# ATTENTION: The target Folder will be completely removed and created again to avoid
# conflicts between installations. Please save any configuration files.
# Support: support-compss@bsc.es
####################################################################################################
####################
# FUNCTIONS
####################
usage() {
exitValue=$1
echo " "
echo " Usage: install [options] <targetFolder> [<queues_cfg>]"
echo " "
echo " Options:"
echo " --help, -h Print this help message"
echo " --no-bindings, -B Disable bindings installation"
echo " --no-tracing, -T Disable tracing system installation"
echo " --no-c-binding, -C Disable C++ binding installation"
echo " --nothing, -N Disable all previous options"
echo " "
echo " Parameters:"
echo " - targetFolder : Target folder of the COMPSs installation."
echo " - queues_cfg : Queue configuration file to be copied as default into targetFolder/Runtime/scripts/queues/cfgs/"
echo " "
echo "ATTENTION: The COMPSs folder inside the target folder will be completely removed to avoid"
echo " conflicts between installations. Please save any configuration files."
echo " "
echo "SUPPORT: support-compss@bsc.es"
echo " "
exit "$exitValue"
}
# Displays parsing arguments errors
display_error() {
local error_msg=$1
echo "$error_msg"
echo " "
usage 1
}
get_args() {
# Parse COMPSs Options
while getopts hvBTAC-: flag; do
# Treat the argument
case "$flag" in
h)
# Display help
usage 0
;;
B)
# Custom bindings value
bindings=false
;;
T)
# Custom tracing value
tracing=false
;;
C)
# Custom C++ binding value
c_binding=false
;;
N)
# Disables all flags
tracing=false
bindings=false
c_binding=false
;;
-)
# Check more complex arguments
case "$OPTARG" in
help)
# Display help
usage 0
;;
no-bindings)
# Custom bindings value
bindings=false
;;
no-tracing)
# Custom tracing value
tracing=false
;;
no-c-binding)
c_binding=false
;;
nothing)
# Disables all flags
tracing=false
bindings=false
c_binding=false
;;
*)
# Flag didn't match any patern. End of COMPSs flags
display_error "${INCORRECT_PARAMETER}"
break
;;
esac
;;
*)
# Flag didn't match any patern. End of COMPSs flags
display_error "${INCORRECT_PARAMETER}"
break
;;
esac
done
# Shift option arguments
shift $((OPTIND-1))
# Get parameters
if [ $# -lt 1 ] || [ $# -gt 2 ]; then
echo "Incorrect number of parameters ($#)"
usage 1
fi
targetDir=$1
queues_cfg=$2
}
####################
# MAIN
####################
tracing=true
bindings=true
c_binding=true
system_os=$(uname)
if [[ "$system_os" == "Darwin" ]]; then
tracing=false
autoparallel=false
fi
get_args "$@"
# WARN MESSAGE and log parameters
echo " "
echo "Options:"
echo " - Tracing: $tracing"
echo " - Bindings: $bindings"
echo " - C_Binding: $c_binding"
echo " "
echo "Parameters:"
echo " - Target Installation Folder = $targetDir"
echo " - SC CFG file = $queues_cfg"
echo " "
echo "ATTENTION: The target folder will be completely removed to avoid"
echo " conflicts between installations. Please save any configuration files."
echo " "
echo " You can abort the installation within 5s..."
#sleep 5
# Begin installation
echo " "
echo "Beginning COMPSs installation..."
echo " "
# Define script variables
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Deploy COMPSs
echo "- Deploy COMPSs files"
rm -rf "${targetDir}"
mkdir -p "${targetDir}"
cp -r "${SCRIPT_DIR}"/* "${targetDir}"
sed -i -e 's#/opt/COMPSs/#'"${targetDir}"'#g' "${targetDir}"/Runtime/configuration/xml/projects/default_project.xml
rm -rf "${targetDir}"/Bindings/*
echo " Success"
if [ "${bindings}" == "true" ]; then
# Install bindings-common
echo "- Install bindings-common"
cd "${SCRIPT_DIR}"/Bindings/bindings-common/
./install_common "${targetDir}"/Bindings/bindings-common
cd "${SCRIPT_DIR}"
echo " Success"
# Install C-binding
if [ "${c_binding}" == "true" ]; then
echo "- Install C-binding"
cd "${SCRIPT_DIR}"/Bindings/c
./install "${targetDir}"/Bindings/c true
mkdir -p "${targetDir}"/Runtime/scripts/system/c/
cp "${targetDir}"/Bindings/c/bin/* "${targetDir}"/Runtime/scripts/system/c
cp ./compss_build_app "${targetDir}"/Runtime/scripts/user/
cd "${SCRIPT_DIR}"
echo " Success"
fi
# Install Python-binding
echo "- Install Python binding"
cd "${SCRIPT_DIR}"/Bindings/python
./install "${targetDir}"/Bindings/python false python3
cd "${SCRIPT_DIR}"
echo " Success"
fi
if [ "${tracing}" == "true" ]; then
# Install extrae
echo "- Install extrae"
cd "${SCRIPT_DIR}"/Dependencies
./install_extrae.sh "${SCRIPT_DIR}"/Dependencies/extrae "${targetDir}"/Dependencies/extrae true
cd "${SCRIPT_DIR}"
echo " Success"
fi
# Set permissions
echo "- Set COMPSs permissions"
chmod -R 755 "${targetDir}"
chmod -R 777 "${targetDir}"/Runtime/configuration/
# Copy the queue.cfg as default.cfg if it is defined
if [ -n "${queues_cfg}" ]; then
cp "${targetDir}"/Runtime/scripts/queues/supercomputers/"${queues_cfg}" "${targetDir}"/Runtime/scripts/queues/supercomputers/default.cfg
fi
# End
echo " "
echo "Congratulations!"
echo "COMPSs Successfully installed!"
echo " "
echo "To use COMPSs please source the ${targetDir}/compssenv file into the users .bashrc"
echo " "
exit 0
| true |
7640ba883b8ff478b5692ae6ac6295985c4a9834 | Shell | mikelangelo-project/vTorque | /src/components/iocm/iocm-common.sh | UTF-8 | 3,140 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2016-2017 HLRS, University of Stuttgart
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=============================================================================
#
# FILE: iocm-common.sh
#
# USAGE: source iocm-common.sh
#
# DESCRIPTION: Constants, configuration and functions for the IOcm integration.
# OPTIONS: ---
# REQUIREMENTS: IOcm must be installed.
# BUGS: ---
# NOTES: ---
# AUTHOR: Nico Struckmann, struckmann@hlrs.de
# COMPANY: HLRS, University of Stuttgart
# VERSION: 0.1
# CREATED:
# REVISION: ---
#
# CHANGELOG
# v0.2:
#
#=============================================================================
#
set -o nounset;
shopt -s expand_aliases;
# source the config and common functions
source /etc/profile.d/99-mikelangelo-hpc_stack.sh;
source "$VTORQUE_DIR/common/const.sh" $@;
source "$VTORQUE_DIR/common/config.sh";
source "$VTORQUE_DIR/common/root-functions.sh";
#
# happens in case of manual debugging
#
if [ ! -f $LOG_FILE ]; then
# prevents dir to be created as root
LOG_FILE=/dev/null;
fi
if [ ! -d $VM_JOB_DIR ]; then
# prevents IOcm to fail, the task template needs to be written
VM_JOB_DIR=/tmp/;
fi
#============================================================================#
# #
# CONFIG #
# #
#============================================================================#
#
# iocm config file
#
IOCM_JSON_CONFIG="$VM_JOB_DIR/$LOCALHOST/iocm-config.json";
#
# Name of the network interface to be used for i/o operations
#
IOCM_INTERFACE_NAME="ib0";
#
# Debug log for iocm
#
IOCM_LOG_FILE="$VM_JOB_DIR/$LOCALHOST/iocm.log";
#============================================================================#
# #
# FUNCTIONS #
# #
#============================================================================#
#---------------------------------------------------------
#
# Ensures all environment variables are in place.
# If not it aborts with an error.
#
checkIOcmPreconditions() {
# check for kernel mod 'stats'
if [ -n "$(lsmod | grep stats)" ]; then
logDebugMsg "IOcm Kernel detected, version: $kernelVersion";
else
logErrorMsg "No IOcm kernel available.";
fi
}
| true |
f01d8d613ab2fcbb867a0ae3dcc8cfe405b43ea6 | Shell | dawmlight/vendor_oh_fun | /hihope_neptune-oh_hid/00_src/v0.3/prebuilts/lite/sysroot/build/thirdparty_headers.sh | UTF-8 | 2,465 | 3.265625 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-musl-exception",
"BSD-2-Clause",
"MIT"
] | permissive | #!/bin/bash
#Copyright (c) 2020-2021 Huawei Device Co., Ltd.
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
# This script is used to prepare header files for musl's libc.so.
set -e
PRJ_ROOT="${PWD}/../../../../"
LINUX_HDR="${PRJ_ROOT}/prebuilts/lite/sysroot/thirdparty/linux_headers_install"
TMP_DIR_ORI="${PRJ_ROOT}/prebuilts/lite/sysroot/ohos_tmp_ori"
TMP_DIR="${PRJ_ROOT}/prebuilts/lite/sysroot/ohos_tmp"
if [ $# -eq 1 ]; then
MUSL_DIR="$1/include"
else
MUSL_DIR="${PWD}/musl/include"
fi
export Z_BUILD_TOP="${PRJ_ROOT}"
if [ ! -d "${LINUX_HDR}/asm" ] || [ ! -d "${LINUX_HDR}/asm-generic" ] || [ ! -d "${LINUX_HDR}/linux" ];then
echo "please install headers first!!!"
echo "see guide at ${SYSROOT}/../../thirdparty/README"
exit 1
fi
mkdir -p ${TMP_DIR_ORI}
mkdir -p ${TMP_DIR}
mkdir -p ${TMP_DIR_ORI}/uapi/asm-generic
mkdir -p ${TMP_DIR_ORI}/uapi/asm
mkdir -p ${TMP_DIR_ORI}/uapi/linux
cp ${LINUX_HDR}/asm-generic/bitsperlong.h ${TMP_DIR_ORI}/uapi/asm-generic
cp ${LINUX_HDR}/asm-generic/int-ll64.h ${TMP_DIR_ORI}/uapi/asm-generic
cp ${LINUX_HDR}/asm-generic/posix_types.h ${TMP_DIR_ORI}/uapi/asm-generic
cp ${LINUX_HDR}/asm/bitsperlong.h ${TMP_DIR_ORI}/uapi/asm
cp ${LINUX_HDR}/asm/posix_types.h ${TMP_DIR_ORI}/uapi/asm
cp ${LINUX_HDR}/asm/types.h ${TMP_DIR_ORI}/uapi/asm
cp ${LINUX_HDR}/linux/capability.h ${TMP_DIR_ORI}/uapi/linux
cp ${LINUX_HDR}/linux/posix_types.h ${TMP_DIR_ORI}/uapi/linux
cp ${LINUX_HDR}/linux/stddef.h ${TMP_DIR_ORI}/uapi/linux
cp ${LINUX_HDR}/linux/types.h ${TMP_DIR_ORI}/uapi/linux
echo "#ifndef _UAPI_LINUX_COMPILER_H" >> ${TMP_DIR_ORI}/uapi/linux/compiler.h
echo "#define _UAPI_LINUX_COMPILER_H" >> ${TMP_DIR_ORI}/uapi/linux/compiler.h
echo "#define __user" >> ${TMP_DIR_ORI}/uapi/linux/compiler.h
echo "#endif" >> ${TMP_DIR_ORI}/uapi/linux/compiler.h
pushd ${PRJ_ROOT}
python prebuilts/lite/sysroot/build/update_headers.py 2>/dev/null
popd
cp -rf ${TMP_DIR}/uapi/* ${MUSL_DIR}/
rm -rf ${TMP_DIR_ORI}
rm -rf ${TMP_DIR}
| true |
886e016f913233c41d4d9741a1f458c10609f7e2 | Shell | vipoo/msxrc2014 | /msxsys-build/tools/sf | UTF-8 | 922 | 3.96875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
PWD=$(pwd)
if [[ "$1" == "-s" ]]; then
SRC_DIR=$2
for file in "${@:3}"
do
sf "${SRC_DIR}/${file}"
done
exit 0
fi
SRC_FILE=$1
DEST_FILE=$2
PATTERN="\*"
if [[ "${SRC_FILE}" =~ $PATTERN ]]; then
for file in ${PWD}/${SRC_FILE}
do
[ -e "$file" ] || continue
target=$(basename "$file")
src=${file#"$PWD/"}
sf "$src" "${target,,}"
done
exit 0
fi
if [[ ! -f "${SRC_FILE}" ]]; then
echo "Source file not found ${SRC_FILE}"
exit 1
fi
if [[ "${DEST_FILE}" == "" ]]; then
DEST_FILE=$(basename $SRC_FILE)
fi
CURRENT_LINK=$(readlink ${PWD}/${WRK_DIR}/${DEST_FILE} || echo "NEWFILE")
if [[ "$CURRENT_LINK" == 'NEWFILE' ]]; then
ln -s "${PWD}/$SRC_FILE" "${PWD}/${WRK_DIR}/${DEST_FILE}"
exit 0
fi
if [[ "$CURRENT_LINK" != "${PWD}/$SRC_FILE" ]]; then
echo "Redirecting existing linked file $CURRENT_LINK, ${PWD}/${WRK_DIR}/${DEST_FILE}"
exit 1
fi
| true |
6a981fbfac4c60c6c62d41ffa52c4325f81e70e5 | Shell | andriipanchuk/test | /nagiosxi/fullinstall | UTF-8 | 4,867 | 3.984375 | 4 | [] | no_license | #!/bin/bash -e
# Set up system variables
./init.sh
. ./xi-sys.cfg
. ./functions.sh
# Explicitly set umask
umask 0022
# Install log
log="install.log"
# Installation is interactive by default
export INTERACTIVE="True"
# INSTALL_PATH is current dir for use in making install dir independent
export INSTALL_PATH=`pwd`
# we wont tune mysql unless we're the ones installing it
export TUNE_MYSQL="False"
# Force the install even if the XI directory exists
export FORCE=0
if ! path_is_ok; then
echo "Your system \$PATH does not include /sbin and /usr/sbin. This is usually the result of installing GNOME rather than creating a clean system."
echo "Adding /sbin and /usr/sbin to \$PATH."
PATH="$PATH:/usr/sbin:/sbin"
fi
# Parse command line
# Added offline install option - SR
while [ -n "$1" ]; do
case "$1" in
-h | --help)
usage
exit 0
;;
-v | --version)
sed -n '/full/ s/.*=//p' "${0%/*}/nagiosxi/basedir/var/xiversion"
exit 0
;;
-n | --non-interactive)
export INTERACTIVE="False"
;;
-p | --mysql-password)
mysqlpass="$2"
./xivar mysqlpass "$2"
shift
;;
-o | --offline-install)
export OFFLINE="TRUE"
touch ./offline
;;
-f | --force)
export FORCE=1
;;
*)
echo "Unknown option: $1" >&2
usage >&2
exit 1
esac
shift
done
# Verify that XI is not already installed
if [ -d /usr/local/nagiosxi/html ]; then
if [ $FORCE -eq 0 ]; then
echo "Error: It looks like Nagios XI is already installed in /usr/local/nagiosxi. If you know what"
echo "you're doing you can run the installer with -f or --force to run the install."
exit 1;
fi
fi
if [ "$INTERACTIVE" = "True" ]; then
# CentOS, RedHat, Raspbian, Ubuntu, Debian, openSUSE, or SUSE Enterprise
fmt -s -w $(tput cols) <<-EOF
========================
Nagios XI Full Installer
========================
This script will do a complete install of Nagios XI by executing all necessary sub-scripts.
IMPORTANT: This script should only be used on a 'clean' install of CentOS, RHEL, Ubuntu LTS, Debian, or Oracle. Do NOT use this on a system that has been tasked with other purposes or has an existing install of Nagios Core. To create such a clean install you should have selected only the base package in the OS installer.
EOF
fi
echo "Checking MySQL credentials..."
# Check Mysql root password if MySQL is already installed and running...
if service $mysqld status &>/dev/null; then
# Test for null MySQL root password
if mysqlshow -u root &>/dev/null; then
echo "After installation your MySQL root password will be set to 'nagiosxi' (without quotes)."
elif mysqlshow -u root -p"$mysqlpass" &>/dev/null; then
echo "Stored MySQL password validated."
else
for i in 1 2 3; do
if [ "$INTERACTIVE" = "True" ]; then
echo "Enter the MySQL root password to continue..."
read -p "MySQL Root Password: " pass
fi
# Test the password
if mysqlshow -u root -p"$pass" &>/dev/null; then
echo "Password validated."
mysqlpass="$pass"
# Update xi-sys.cfg with MySQL password for later use by subcomponent install
if ! ./xivar mysqlpass "$mysqlpass"; then
echo "ERROR: Failed to update xi-sys.cfg with MySQL password - exiting." >&2
exit 1
fi
break
else
echo "Password failed." >&2
[ $i -eq 3 ] && exit 1
fi
done
fi
else
echo "MySQL not yet installed - that's okay."
export TUNE_MYSQL="True"
fi
# Initialize install.log
cat >>"$log" <<-EOF
Nagios XI Installation Log
==========================
DATE: $(date)
DISTRO INFO:
$distro
$version
$architecture
EOF
{
if [ ! -f "$proddir/var/xiversion" ]; then
echo "THIS IS A NEW INSTALL!"
else
echo "THIS IS AN UPGRADE!"
echo
echo "OLD VERSION:"
grep -v "#" "$proddir/var/xiversion"
fi
echo
echo "INSTALLING:"
grep -v "#" nagiosxi/basedir/var/xiversion
echo
} >>"$log"
# Install the subcomponents
run_sub ./0-repos noupdate
run_sub ./1-prereqs
run_sub ./2-usersgroups
run_sub ./3-dbservers
run_sub ./4-services
run_sub ./5-sudoers
run_sub ./6-firewall
run_sub ./8-selinux
run_sub ./9-dbbackups
run_sub ./11-sourceguardian
run_sub ./13-phpini
run_sub ./A-subcomponents
run_sub ./A0-mrtg
run_sub ./B-installxi
run_sub ./C-cronjobs
run_sub ./D-chkconfigalldaemons
run_sub ./E-importnagiosql
run_sub ./F-startdaemons
run_sub ./Z-webroot
echo >>"$log"
echo "Install complete!" >>"$log"
# Get IP address
ip=$(ip addr | grep global | grep -m 1 'inet' | awk '/inet[^6]/{print substr($2,0)}' | sed 's|/.*||')
if [ "$ip" == "" ]; then
ip=$(ip addr | grep global | grep -m 1 'inet' | awk '/inet6/{print substr($2,0)}' | sed 's|/.*||')
if [ "$ip" == "" ];then
ip="<HOSTNAME>"
else
ip="[$ip]"
fi
fi
cat <<-EOF
Nagios XI Installation Complete!
--------------------------------
You can access the Nagios XI web interface by visiting:
http://${ip}/nagiosxi/
EOF
| true |
d812c5c5fc5157da3ab1df5f42eef11908d6f221 | Shell | joshuarubin/dotfiles | /private_dot_config/yabai/executable_yabairc | UTF-8 | 4,098 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
# the scripting-addition must be loaded manually if
# you are running yabai on macOS Big Sur. Uncomment
# the following line to have the injection performed
# when the config is executed during startup.
#
# for this to work you must configure sudo such that
# it will be able to run the command without password
#
# see this wiki page for information:
# - https://github.com/koekeishiya/yabai/wiki/Installing-yabai-(latest-release)
#
sudo yabai --load-sa
yabai -m signal --add event=dock_did_restart action="sudo yabai --load-sa"
# global settings
# set mouse follows focus mode (default: off)
yabai -m config mouse_follows_focus on
# set focus follows mouse mode (default: off, options: off, autoraise, autofocus)
yabai -m config focus_follows_mouse autoraise
# specify which display a newly created window should be managed in (default:
# default, options: default, focused, cursor)
yabai -m config window_origin_display focused
# New window spawns to the right if vertical split, or bottom if horizontal split
yabai -m config window_placement second_child
# floating windows are always on top (default: off)
yabai -m config window_topmost off
# modify window shadows (default: on, options: on, off, float)
# example: show shadows only for floating windows
yabai -m config window_shadow on
# window opacity (default: off)
# example: render all unfocused windows with 90% opacity
yabai -m config window_opacity off
yabai -m config active_window_opacity 1.0
yabai -m config normal_window_opacity 0.90
# window borders
yabai -m config window_border off
yabai -m config window_border_width 6
yabai -m config active_window_border_color 0xff775759
yabai -m config normal_window_border_color 0xff555555
# Floating point value between 0 and 1 (default: 0.5)
yabai -m config split_ratio 0.618 # golden ratio
# Balance the window tree upon change, so that all windows occupy the same area
# on or off (default: off)
yabai -m config auto_balance off
# set mouse interaction modifier key (default: fn, options: cmd, alt, shift, ctrl, fn)
yabai -m config mouse_modifier alt
# set modifier + left-click drag to move window (default: move)
yabai -m config mouse_action1 move
# set modifier + right-click drag to resize window (default: resize)
yabai -m config mouse_action2 resize
# action performed when a bsp-managed window is dropped in the center of some
# other bsp-managed window.
yabai -m config mouse_drop_action swap
# general space settings
# bsp or float (default: float, options: bsp, stack, float)
yabai -m config layout bsp
# Set all padding and gaps to 20pt (default: 0)
yabai -m config top_padding 0
yabai -m config bottom_padding 0
yabai -m config left_padding 0
yabai -m config right_padding 0
yabai -m config window_gap 0
# Custom app rules
yabai -m rule --add app="^Fantastical Helper$" manage=off border=off
yabai -m rule --add app="^System Preferences$" manage=off
yabai -m rule --add app="^Parcel$" manage=off
yabai -m rule --add app="^1Password$" manage=off
yabai -m rule --add label="Firfox PIP" app="^Firefox$" title="^(Picture-in-Picture)$" manage=off
# Destroy empty spaces when leaving them
yabai -m signal --add event=space_changed action='(RSINDEX=$(yabai -m query --spaces | jq -re ".[] | select(.id == $YABAI_RECENT_SPACE_ID).index") && [ "$(yabai -m query --spaces --space $RSINDEX | jq -re ".windows | length")" -eq 0 ] && yabai -m space $RSINDEX --destroy); /usr/local/bin/hs -c "YabaiBar:update()"'
# for when the current display changes
yabai -m signal --add event=display_changed action="/usr/local/bin/hs -c 'YabaiBar:update()'"
# display_changed does not trigger when the display changes
# but the front-most application stays the same
yabai -m signal --add event=application_front_switched action="/usr/local/bin/hs -c 'YabaiBar:update()'"
echo "yabai configuration loaded.."
# animations have been disabled to prevent a race with the window manager
# defaults write com.apple.finder DisableAllAnimations -bool true
# killall Finder
# re-enable with
# defaults delete com.apple.finder DisableAllAnimations
# killall Finder
| true |
88b628eafac332cac135a27bd11a24a7c9daef99 | Shell | mdholloway/mediawiki-vagrant | /setup.sh | UTF-8 | 415 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if ! which vagrant > /dev/null; then
echo "Vagrant doesn't seem to be installed. Please download and install it"
echo "from http://www.vagrantup.com/downloads.html and re-run setup.sh."
exit 1
fi
vagrant config --required
echo
echo "You're all set! Simply run \`vagrant up\` to boot your new environment."
echo "(Or try \`vagrant config --list\` to see what else you can tweak.)"
| true |
43f4f50352a8dc7cbd0c167d5c77380750671779 | Shell | kaihenzler/rapid-prototyping-nestjs | /docker/db/entrypoint.sh | UTF-8 | 700 | 3.5625 | 4 | [] | no_license | #!/bin/bash
# start db in background to execute query and create db if needed
/opt/mssql/bin/sqlservr &
killAndWait() {
echo "shutting down process $1"
kill $1 && tail --pid=$1 -f /dev/null
echo "finished shutting down $1"
}
echo "Testing if database was created ..."
/opt/mssql-tools/bin/sqlcmd -S localhost -U more -P "s4fePassword" -Q "SELECT 1"
test_query_result=$?
if [[ "$test_query_result" -ne 0 ]]; then
echo "Create database"
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P "s4fePassword" -i /var/opt/mssql/testdata/create_db.sql -r
fi
# kill the background db and start into foreground to keep container alive
killAndWait "$(lsof -i:1433 -t)" && /opt/mssql/bin/sqlservr
| true |
f5c8bbc2312aafa3c7f44c18d7b0215eb69ef7b9 | Shell | Thaodan/pkg_diff | /src/lib/options/5-debug.in.sh | UTF-8 | 255 | 2.9375 | 3 | [] | no_license | #!/bin/sh
#help_begin
# -D : debug enable debug mode
# -V : enable verbose mode
# -m : be chatty
#help_end
args=DVm
arg_func=option_debug
option_debug()
{
case $arg in
D) debug=true;;
V) verbose=true;;
m) msg=true;;
esac
}
| true |
1209ba6118c40a33fc5db29639ed18549aa329f6 | Shell | Nicholas-Kron/Kron_Cohort77_CoExpression_Analysis | /scripts/bash/MakeAllSalmonBBDukScripts.sh | UTF-8 | 1,619 | 2.78125 | 3 | [] | no_license | #! /usr/bin/env bash
projdir="/scratch/projects/fieberlab/Batch77"
salmondir="/nethome/n.kron/local/salmon/0.11.2/bin"
project="fieberlab"
transcriptome="${projdir}/Genome/GCF_000002075.1_AplCal3.0_rna.fna.gz"
index="${projdir}/Genome/AplCal3.0_salmon_index"
if [ ! -d ${projdir}/salmon ]
then
mkdir ${projdir}/salmon
fi
for samp in `cat ${projdir}/samples.txt`
do
###salmon mapping Scripts
echo "making salmon script for ${samp}"
echo '#!/bin/bash' > ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -P '$project'' >> ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -J '$samp'_quant' >> ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -e '$samp'_quant.err' >> ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -o '$samp'_quant.out' >> ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -W 12:00' >> ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -n 8' >> ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -R "span[ptile=8]"' >> ${projdir}/salmon/${samp}_salmon.job
#echo '#BSUB -R "rusage[mem=128]"' >> ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -u n.kron@umiami.edu' >> ${projdir}/salmon/${samp}_salmon.job
echo '#BSUB -q general' >> ${projdir}/salmon/${samp}_salmon.job
echo ''${salmondir}'/salmon quant \
-i '${index}' \
-l ISR \
-1 '${projdir}'/bbduk_reads/'${samp}'_READ1_clean.fastq.gz \
-2 '${projdir}'/bbduk_reads/'${samp}'_READ2_clean.fastq.gz \
-p 8 \
--rangeFactorizationBins 4 \
--validateMappings \
--seqBias \
--gcBias \
-o '${projdir}'/salmon/'${samp}'_quant
rm '${projdir}'/salmon/'${samp}'_salmon.job' >> ${projdir}/salmon/${samp}_salmon.job
bsub < ${projdir}/salmon/${samp}_salmon.job
done
| true |
39bcdcab9045b17ca921824ea8a06aa513839270 | Shell | mbelanabel/repositorio-scripts-alumnos | /36_JuanTonda/2018/36_2018_JuanT_cambiarT.sh | UTF-8 | 663 | 3.1875 | 3 | [] | no_license | #!bin/bash
# JUAN TONDA. Mayo 2018
# cambiar el tamaño de un disco virtual
# solicita el fichero que queremos cambiar
# vboxmanage modifyhd [fichero_vdi] --resize [tamaño]
# fichero_vdi-> sustituir por el que correspondar a extender
# tamaño -> p.e. (para 40 GB.) - 40960 (indicado en bytes)
clear
# solicitamos el fichero que queremos cambiar
echo " "
read -p " ¿ Qué archivo VDI quieres cambiar su UUID ? : " archivo
echo " "
read -p " ¿ Tamaño de ampliación (indicado en bytes 40GB - 40960) ? : " cantidad
if test -f $archivo; then
vboxmanage modifyhd $archivo --resize $cantidad
else
echo "No existe el archivo VDI: " $archivo
fi
| true |
35c0412d01c78cd6552b270b9507e56775853649 | Shell | mindis/hillview | /bin/rebuild.sh | UTF-8 | 325 | 2.796875 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
# A small shell script which rebuilds both projects that compose Hillview
mydir="$(dirname "$0")"
if [[ ! -d "$mydir" ]]; then mydir="$PWD"; fi
source $mydir/config.sh
# Bail out on first error
set -e
export MAVEN_OPTS="-Xmx2048M"
pushd $mydir/../platform
mvn install
popd
pushd $mydir/../web
mvn package
popd
| true |
7a9b9256cff2b5dac333d8cb478e45b3bfd4956e | Shell | fcrespo82/dotfiles | /functions.d/git | UTF-8 | 189 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env sh
git-status-recursive() {
find . -name .git -execdir bash -c 'echo -en "\033[1;31m"repo: "\033[1;34m"; basename "`git rev-parse --show-toplevel`"; git status -s' \;
} | true |
6ae98a279358e5b3f75394d1145c1d5e12c8e04c | Shell | Alpha666/SoalShiftSISOP20_modul1_T13 | /soal1.sh | UTF-8 | 1,538 | 3 | 3 | [] | no_license | #!/bin/bash
echo "Region yang memiliki profit paling sedikit : "
poina=`awk -F "," 'FNR>1{(seen[$13]+=$NF)}END{for(i in seen)print i, seen[i]}' Sample-Superstore.csv | sort -n | awk 'FNR<2{print $1}'`
echo "$poina"
echo ""
echo "2 negara bagian yang memiliki profit paling sedikit dari poin A "
poinb=`awk -F "," -v a=$poina 'FNR>1{if(a~$13)seen[$11]+=$NF}END{for(i in seen)print i, seen[i]}' Sample-Superstore.csv | sort -nk2 | awk 'FNR<2{print $1}' > susah.txt`
poinb2=`awk -F "," -v a=$poina 'FNR>1{if(a~$13)seen[$11]+=$NF}END{for(i in seen)print i, seen[i]}' Sample-Superstore.csv | sort -nk2 | awk 'NR==2{print $1}' > njir.txt`
cat "susah.txt"
varsusah=$(cat "susah.txt")
cat "njir.txt"
varnjir=$(cat "njir.txt")
echo ""
poinc=`awk -F "," -v susah="$varsusah" 'FNR>1{if(susah~$11)seen[$NF]}END{for(i in seen)print i}' Sample-Superstore.csv | sort -n | awk 'NR==1,NR==10{print $1}' > apaini.txt `
poinc2=`awk -F "," -v njir="$varnjir" 'FNR>1{if(njir~$11)seen[$NF]}END{for(i in seen)print i}' Sample-Superstore.csv | sort -n | awk 'NR==1,NR==10{print $1}' > pusinggua.txt `
echo "10 barang dari negara bagian $varsusah"
varpusing=$(cat "apaini.txt")
hasilc=`awk -F "\"*,\"*" -v susah="$varsusah" 'FNR>1{if($11~susah)printf "%f %s\n",$21,$17}' Sample-Superstore.csv | sort -g| awk 'NR<11{print $0}'`
echo "$hasilc"
echo ""
echo "10 barang dari negara bagian $varnjir"
hasilc2=`awk -F "\"*,\"*" -v njir="$varnjir" 'FNR>1{if($11~njir)printf "%f %s\n",$21,$17}' Sample-Superstore.csv | sort -g | awk 'NR<11{print $0}'`
echo "$hasilc2" | true |
f2bcbc105b4bd2f536b2dacb24e8c0635b057d7a | Shell | EdrianI/openeew-detection | /detector | UTF-8 | 1,176 | 3.203125 | 3 | [
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] | permissive | #!/usr/bin/env sh
set -em
initialized_check_file="/opt/openeew/.initialized"
if [ -f "${initialized_check_file}" ]; then
echo "✅ Already initialized, skipping initialization"
else
echo "⏲ Initializing..."
if [ -n "${username}" ] && [ -n "${password}" ]; then
echo "🔑 Using authentication"
touch /opt/openeew/mosquitto_passwords
mosquitto_passwd -b /opt/openeew/mosquitto_passwords "${username}" "${password}"
echo "allow_anonymous false" >> /opt/openeew/mosquitto.conf
echo "password_file /opt/openeew/mosquitto_passwords" >> /opt/openeew/mosquitto.conf
else
echo "⚠ Not using authentication"
fi
touch "${initialized_check_file}"
echo "✅ Initialized"
fi
mosquitto -c /opt/openeew/mosquitto.conf -d
timeout 1m sh -c "until nc -z 127.0.0.1 1883; do sleep 1; done"
echo "🚀 Mosquitto is ready"
watch -n 600 /usr/sbin/logrotate /etc/logrotate.d/* &
python3 /opt/openeew/receive_devices.py --username "${username}" --password "${password}" &
python3 /opt/openeew/receive_earthquakes.py --username "${username}" --password "${password}" &
python3 /opt/openeew/detection.py --username "${username}" --password "${password}"
| true |
1b067ddb9554a544b3b3dc42e0fef74682332f1d | Shell | schanur/extreme-defaults | /scripts/max-optipng | UTF-8 | 584 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -o errexit -o nounset -o pipefail
# Command line parameter parsing and dependency checks.
function desc { echo "Runs optipng with slowest settings to archieve maximum PNG file compression"; }
function usage { echo "Usage: $(basename ${0}) PNG_INPUT_FILE"; }
if [ ${#} -eq 1 ]; then if [ ${1} = "-h" -o ${1} = "--help" ]; then desc; echo; usage; exit 0; fi; fi
OK=1; which optipng > /dev/null || OK=0; if [ ${OK} -ne 1 ]; then echo "optipng not found"; exit 1; fi
if [ ${#} -ne 1 ]; then echo "Wrong number of arguments."; echo; usage; exit 1; fi
optipng -o7 ${1}
| true |
6ea4c0a33905da37e99ef3cdb3db803c1091df8b | Shell | reanisz/dotfiles | /nvim/setup_nvim-dein.sh | UTF-8 | 345 | 3.015625 | 3 | [] | no_license | #!/bin/sh
init_target nvim-dein
process_nvim-dein(){
directory .cache
if [ $command = "setup" ] ; then
exec_cmd curl https://raw.githubusercontent.com/Shougo/dein.vim/master/bin/installer.sh > /tmp/deininstaller.sh
exec_cmd sh /tmp/deininstaller.sh $HOME/.cache/dein/
exec_cmd rm /tmp/deininstaller.sh
fi
}
| true |
39181957c37dd18d505191c842769893fb0fdf12 | Shell | kimprado/challenge-payment-processor-api | /configure | UTF-8 | 5,238 | 3.640625 | 4 | [] | no_license | #!/bin/bash
set -e
CURDIR=`pwd`
DIR=".tmp-configure"
DOCKER_GROUP="docker"
GO_VERSION="go1.13.4"
GO_PKG="$GO_VERSION.linux-amd64.tar.gz"
GO_BASE="/usr/local"
GO_ROOT="$GO_BASE/go"
GO_ROOT_VERSION="$GO_BASE/$GO_VERSION"
JMETER_VERSION="apache-jmeter-5.1.1"
JMETER_PKG="$JMETER_VERSION.tgz"
JMETER_BASE="/usr/local"
JMETER_ROOT="$JMETER_BASE/jmeter"
JMETER_ROOT_VERSION="$JMETER_BASE/$JMETER_VERSION"
if [ "$USER" == "root" ]; then
echo "Aborting: can't use root"
exit 1
fi
if [ ! -e "./.vscode/settings.json" ]; then
cp "./.vscode/settings-sample.json" "./.vscode/settings.json"
fi
if [ ! -e "./.vscode/launch.json" ]; then
cp "./.vscode/launch-sample.json" "./.vscode/launch.json"
fi
if [ ! -e "./configs/config-dev.json" ]; then
cp "./configs/config-dev-sample.json" "./configs/config-dev.json"
fi
if [ ! -e "./configs/config.env" ]; then
cp "./configs/config-sample.env" "./configs/config.env"
fi
if [ ! -e "./configs/config-dev.env" ]; then
cp "./configs/config-dev-sample.env" "./configs/config-dev.env"
fi
OS_DEBIAN="/etc/debian_version"
if [ -e "$OS_DEBIAN" ]; then
sudo apt-get -y install \
curl \
git gitk git-gui \
make
fi
if [ ! -d "$PWD/$DIR" ]; then
echo "Creating folder $PWD/$DIR"
mkdir -p "$PWD/$DIR"
fi
cp ./third_party/pcurl.sh $DIR/pcurl.sh
cd $DIR
PATH_COMPOSER=`whereis -b docker-compose | awk -F": " '{ print $2 }'`
if [ "$PATH_COMPOSER" == "" ] && [ ! -e "$PATH_COMPOSER" ]; then
echo "Donwloading docker-compose"
curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o ./docker-compose
sudo chmod +x ./docker-compose
sudo cp ./docker-compose /usr/local/bin/
else
echo "Existing Composer: $PATH_COMPOSER ... OK"
fi
PATH_DOCKER=`whereis -b dockerd | awk -F": " '{ print $2 }'`
if [ "$PATH_DOCKER" == "" ] && [ ! -e "$PATH_DOCKER" ]; then
echo "Donwloading docker"
curl -L "https://download.docker.com/linux/static/stable/$(uname -m)/docker-18.09.2.tgz" -o ./docker-18.09.2.tgz
tar zxvf ./docker-18.09.2.tgz
sudo cp ./docker/* /usr/local/bin/
else
echo "Existing Docker: $PATH_DOCKER ... OK"
fi
case "$(getent group $DOCKER_GROUP | awk -F' ' '{ print $1 }' | wc -w)" in
0) echo "Creating $DOCKER_GROUP group ..."
sudo /usr/sbin/groupadd $DOCKER_GROUP
echo "$DOCKER_GROUP group ... OK"
;;
1) echo "$DOCKER_GROUP group exists ... OK"
;;
esac
case "$(pidof dockerd | wc -w)" in
0) echo "Starting Docker ..."
sudo dockerd &
echo "Docker running pid: $(pidof dockerd) ... OK"
;;
1) echo "Docker running pid: $(pidof dockerd) ... OK"
;;
esac
PATH_GO=`whereis -b go | awk -F": " '{ print $2 }'`
if [ "$PATH_GO" == "" ] && [ ! -e "$PATH_GO" ]; then
if [ -d "$GO_ROOT" -o -e "$GO_ROOT" ]; then
sudo rm -rf "$GO_ROOT""_bkp_old"
sudo mv "$GO_ROOT" "$GO_ROOT""_bkp_old"
fi
if [ -d "$GO_ROOT_VERSION" -o -e "$GO_ROOT_VERSION" ]; then
sudo rm -rf "$GO_ROOT_VERSION""_bkp_old"
sudo mv "$GO_ROOT_VERSION" "$GO_ROOT_VERSION""_bkp_old"
fi
if [ ! -d "$GO_ROOT_VERSION" ]; then
echo "Creating Installation dir $GO_ROOT_VERSION"
sudo mkdir -p "$GO_ROOT_VERSION"
fi
if [ ! -d "$GO_ROOT" ]; then
echo "Creating GOROOT link $GO_ROOT to $GO_ROOT_VERSION"
sudo ln -s "$GO_ROOT_VERSION" "$GO_ROOT"
fi
echo "Donwloading Go"
curl -L "https://dl.google.com/go/$GO_PKG" -o "./$GO_PKG"
sudo tar zxf "./$GO_PKG" --strip-components=1 -C "$GO_ROOT"
if [ ! -e "$HOME/.profile" ]; then
touch "$HOME/.profile"
fi
echo -e "\nexport GOPATH=\$HOME/go\nexport PATH=\$PATH:$GO_ROOT/bin:\$GOPATH/bin" >> "$HOME/.profile"
echo "Donwloading wire"
$($GO_ROOT/bin/go get github.com/google/wire/cmd/wire@v0.4.0)
else
echo "Existing `whereis -b go` ... OK"
echo "Donwloading wire"
go get github.com/google/wire/cmd/wire@v0.4.0
fi
PATH_JMETER=`whereis -b jmeter | awk -F": " '{ print $2 }'`
if [ "$PATH_JMETER" == "" ] && [ ! -e "$PATH_JMETER" ]; then
if [ -d "$JMETER_ROOT" -o -e "$JMETER_ROOT" ]; then
sudo rm "$JMETER_ROOT"
fi
if [ -d "$JMETER_ROOT_VERSION" -o -e "$JMETER_ROOT_VERSION" ]; then
sudo rm -rf "$JMETER_ROOT_VERSION""_bkp_old"
sudo mv "$JMETER_ROOT_VERSION" "$JMETER_ROOT_VERSION""_bkp_old"
fi
if [ ! -d "$JMETER_ROOT_VERSION" ]; then
echo "Creating Installation dir $JMETER_ROOT_VERSION"
sudo mkdir -p "$JMETER_ROOT_VERSION"
fi
if [ ! -d "$JMETER_ROOT" ]; then
echo "Creating JMETER_ROOT link $JMETER_ROOT to $JMETER_ROOT_VERSION"
sudo ln -s "$JMETER_ROOT_VERSION" "$JMETER_ROOT"
fi
sudo rm -rf /usr/local/bin/jmeter
echo "Donwloading JMeter"
./pcurl.sh "https://archive.apache.org/dist/jmeter/binaries/$JMETER_PKG"
sudo tar -zxf "./$JMETER_PKG" --strip-components=1 -C "$JMETER_ROOT"
sudo ln -s "$JMETER_ROOT/bin/jmeter" /usr/local/bin/jmeter
else
echo "Existing `whereis -b jmeter` ... OK"
fi
sudo /usr/sbin/usermod -a -G $DOCKER_GROUP $USER
echo "user $USER added in $DOCKER_GROUP group ... OK"
cd $CURDIR
| true |
f5d8bb60f8cd2ce02fda20aac2950c8a952c3a8d | Shell | nsaeki/utils | /s3bucketsize | UTF-8 | 825 | 3.5625 | 4 | [] | no_license | #!/bin/bash
# Print S3 bucketsize
# Usage: s3bucketsize profile-name bucketname
PROFILE=$1
BUCKET=$2
if date -d yesterday > /dev/null 2>&1; then
START_DATE=`date -d yesterday +%Y-%m-%dT00:00:00+09:00`
else
START_DATE=`date -v -1d +%Y-%m-%dT00:00:00+09:00`
fi
END_DATE=`date +%Y-%m-%dT00:00:00+09:00`
OPTS=
if [ "X$PROFILE" != "X" ]; then
OPTS="--profile $PROFILE"
fi
OPTS="$OPTS --namespace AWS/S3"
OPTS="$OPTS --metric-name BucketSizeBytes"
# OPTS="$OPTS --metric-name NumberOfObjects"
OPTS="$OPTS --statistics Average"
OPTS="$OPTS --start-time $START_DATE"
OPTS="$OPTS --end-time $END_DATE"
OPTS="$OPTS --period 86400"
OPTS="$OPTS --dimensions Name=BucketName,Value=${BUCKET} Name=StorageType,Value=StandardStorage"
echo aws cloudwatch get-metric-statistics $OPTS
aws cloudwatch get-metric-statistics $OPTS
| true |
9e43dfa5a7b27ca49b9ef72a76cb0544fc00bf62 | Shell | vibhorrawal/Lab-work | /sh/q6.sh | UTF-8 | 280 | 3.078125 | 3 | [] | no_license | #!/bin/bash/
echo "Enter number of rows to be printed: "
read n
i=$n
clear
k=0
while [ $i -gt 0 ]
do
j=$i
m=$k
while [ $m -ge 0 ]
do
echo -n " "
m=$(($m-1))
done
k=$(($k+1))
while [ $j -gt 0 ]
do
echo -n "* "
j=$(($j-1))
done
echo
i=$(($i-1))
done
| true |
c3b42f6b795e1eae926fc9162dbfa31751b087d1 | Shell | Writtic/dotfiles | /zsh/.zshrc.d/bump | UTF-8 | 248 | 3.125 | 3 | [] | no_license | #!/bin/bash
function bumps {
svc="$1"
part="${2:-0}"
#git tag|grep "${svc}-v"|sed "s/${svc}-//"|sort|tail -1|bump -stdin -part="$part"|sed "s/^/${svc}-/"
git tag|grep "${svc}-v"|sort|tail -1|bump -prefix "${svc}-v" -stdin -part $part
}
| true |
5da39e5b495805826d4e792f1558a810c8da9eda | Shell | satinderiitd/Linux-Learning | /process-status-check.sh | UTF-8 | 128 | 2.515625 | 3 | [] | no_license | #!/bin/bash
for vm in server1 server2 server3;
do
echo -e "\n$vm"
ssh $vm
sudo systemctl status docker | head -n3
done
| true |
33ea7df9ea9479344fe503ffef567245a51466c5 | Shell | tammersaleh/github-local-sync | /missing | UTF-8 | 798 | 3.75 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
if [[ $# != 1 ]]; then
echo " Usage: $(basename $0) <DIRECTORY>"
echo " Example: $(basename $0) $HOME/code"
echo
echo "Examines the git repositories in \`DIRECTORY\` and outputs all which _aren't_ starred on github."
echo
echo "Assumes that the repositories in \`DIRECTORY\` mimic the github \`org/repo\` pattern, as follows:"
echo
echo " ~/DIRECTORY $ ls -d */*"
echo " daneden/animate.css"
echo " discourse/discourse"
echo " golangcookbook/golangcookbook.github.io"
echo " jnunemaker/nunes"
echo " ..."
exit 3
fi
base_dir=$1
starred=$(jq -r '.[] | .full_name' starred.json)
cd $base_dir
locals=$(\ls -d */* | cat)
for name in $locals; do
if ! echo "$starred" | grep -xq "$name"; then echo "$name"; fi
done
| true |
5c3fb2e8288ffe700a4d7d81781cc2f25f1d682e | Shell | CSGO-Analysis/linuxgameservers | /functions/fn_validateserver | UTF-8 | 721 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# LGSM fn_validateserver function
# Author: Daniel Gibbs
# Website: http://danielgibbs.co.uk
# Version: 010115
# Description: Runs a server validation.
local modulename="Validate"
fn_rootcheck
fn_syscheck
fn_printwarn "Validating may overwrite some customised files."
sleep 1
echo -en "\n"
echo -en "https://developer.valvesoftware.com/wiki/SteamCMD#Validate"
sleep 5
echo -en "\n"
fn_printdots "Checking server files"
sleep 1
fn_printok "Checking server files"
fn_scriptlog "Checking server files"
sleep 1
cd "${rootdir}"
cd "steamcmd"
./steamcmd.sh +login "${steamuser}" "${steampass}" +force_install_dir "${filesdir}" +app_update "${appid}" validate +quit|tee -a "${scriptlog}"
fn_scriptlog "Checking complete" | true |
f2cc5edc429ce197d1f530c1838113064f103a3c | Shell | nikitadurasov/emacs-config | /reload.sh | UTF-8 | 955 | 3.78125 | 4 | [] | no_license | #!/usr/bin/bash
# get newest version of configs
cd $(dirname $BASH_SOURCE)
git pull origin master
# check if everything went fine
getExitCode=$?
if [[ $getExitCode != 0 ]]; then
exit $getExitCode
fi
function clean() {
git clean -nx
read -p "Clean the above files (y/n)" -n 1
echo
echo "**** Processing ****"
if [[ $REPLY =~ ^[Yy]$ ]]; then
git clean -fx
fi
}
function loadFilesInRoot() {
for i in $(ls -a); do
if [ $i != '.' -a $i != '..' -a $i != '.git' -a $i != '.DS_Store' -a $i != 'reload.sh' -a $i != 'README.md' -a $i != '.gitignore' -a $i != '.gitmodules' ]; then
echo "$i"
cp -r "$i" "$HOME/"
fi
done
}
clean
if [ "$1" == "--force" -o "$1" == "-f" ]; then
loadFilesInRoot
else
read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
loadFilesInRoot
fi
fi
unset clean
unset doIt
| true |
abbdb8645642b779c3e9a9ec38be06b23ff9ee90 | Shell | jtstorck/proxy-nifi-docker | /scripts/start.sh | UTF-8 | 686 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# http://stackoverflow.com/questions/59895/can-a-bash-script-tell-which-directory-it-is-stored-in#answer-246128
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
REPO_DIR=$SCRIPT_DIR/..
# clone Sandeep's repo, which creates docker containers to compile Knox,
# start an LDAP, and start Knox
git clone https://github.com/moresandeep/knox-dev-docker.git $REPO_DIR/knox-dev-docker
# copy the NiFi topology into the cloned knox-dev-docker repo
cp $REPO_DIR/topologies/nifi.xml $REPO_DIR/knox-dev-docker/topologies
docker-compose -f $REPO_DIR/knox-dev-docker/docker-compose.yml up -d
docker-compose -f $REPO_DIR/docker-compose-anonymous-nifi.yaml up -d
| true |
65d9474e56b05d21263855454712228f3c4c625f | Shell | billsioros/MPIsuite | /MPIs/mpis-profile | UTF-8 | 3,228 | 3.78125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
. "${MPIS_UI}"
REQUIRED=( MPIS_OUTPUT_ROOT MPIS_STDOUT_EXTENSION MPIS_STDERR_EXTENSION MPIS_USER_ID MPIS_COMPILER MPIS_SCHEDULER MACRO VALUES PROCESSES THREADS_PER_PROCESS TIME_PATTERN )
RESULTS="results.csv"
function max
{
list=""
for value in $*
do
list="$value, $list"
done
python -c "print(max([${list}]))"
}
if [ "$#" -lt 2 ]
then
log "ERROR" "usage: $( basename "$0" ) [SOURCE] [DESCRIPTION]"; exit 1
fi
if [ ! -r "$1" ]
then
log "ERROR" "'$1' is not readable"; exit 1
fi
if [ ! -r "$2" ]
then
log "ERROR" "'$2' is not readable"; exit 1
fi
. "$2"
for value in "${REQUIRED[@]}"
do
if [[ -z "${!value// }" ]]
then
log "ERROR" "'$value' has not been specified"; exit 1
fi
done
exe="$( basename "$1" )"
exe="${exe%.*}.x"
DIR="$( date +"%d_%m_%Y" )/$( date +"%H_%M_%S" )"
for value in "${VALUES[@]}"
do
if ! "$MPIS_COMPILER" "$1" "$MACRO" "$value"
then
exit 1
fi
for processes in "${PROCESSES[@]}"
do
export SCHEDULE_DIRECTORY="${DIR}/${value}/${processes}"
if ! "$MPIS_SCHEDULER" "$exe" "$processes" "$THREADS_PER_PROCESS"
then
exit 1
fi
done
while true
do
running="$( qstat | grep "$MPIS_USER_ID" )"
if [ -z "$running" ]
then
break
fi
done
done
find . -maxdepth 1 -name "*job.sh" -delete
find . -maxdepth 1 -name "*.x" -delete
for file in $(find "${MPIS_OUTPUT_ROOT}" -name "*.mpiP")
do
mv "$file" "$(dirname "$file")/mpiP.log"
done
declare -A measurements
for value in "${VALUES[@]}"
do
for processes in "${PROCESSES[@]}"
do
for file in $( find "${MPIS_OUTPUT_ROOT}/${DIR}/${value}/${processes}" -name "*.${MPIS_STDERR_EXTENSION}" | tr '\n' ' ' )
do
values="$( cat "$file" | grep -Po "$TIME_PATTERN" | tr '\n' ' ' )"
if [[ -z "${values// }" ]]
then
log "WARNING" "No matches for '${TIME_PATTERN}' in '$file'"
if [[ ! -z "${MPIS_EDITOR// }" ]] && [[ "$( command -v "$MPIS_EDITOR" )" ]]
then
"$MPIS_EDITOR" "$MPIS_EDITOR_ARGS" "$file" "${file%%.*}.${MPIS_STDERR_EXTENSION}"
fi
measurements["$value, $processes"]="-1"
else
measurements["$value, $processes"]="$( max "$values" )"
fi
done
done
done
log "MESSAGE" "Saving measurements to '${MPIS_OUTPUT_ROOT}/${DIR}/${RESULTS}'"
echo "${MACRO}, Processes, Time, Speed Up, Efficiency" > "${MPIS_OUTPUT_ROOT}/${DIR}/${RESULTS}"
for value in "${VALUES[@]}"
do
for ((processes = 1; processes <= 64; processes *= 2))
do
speedup="$( python -c "print(${measurements[$value, 1]} / ${measurements[$value, $processes]})" )"
efficiency="$( python -c "print(${speedup} / ${processes})" )"
echo "${value}, ${processes}, ${measurements[$value, $processes]}, ${speedup}, ${efficiency}"
done
done >> "${MPIS_OUTPUT_ROOT}/${DIR}/${RESULTS}"
if [[ ! -z "${MPIS_EDITOR// }" ]] && [[ "$( command -v "$MPIS_EDITOR" )" ]]
then
"$MPIS_EDITOR" "$MPIS_EDITOR_ARGS" "${MPIS_OUTPUT_ROOT}/${DIR}/${RESULTS}"
fi
| true |
ffab4515f23a91d2fc7a3182257bad58d2c9836c | Shell | sy250/getqiitaitems | /get_qiita_items.sh | UTF-8 | 954 | 3.1875 | 3 | [] | no_license | #!/bin/bash
MAX_PER_PAGE=100
items_count=$(./get_authenticated_user.sh | jq -r '.items_count')
# echo "items_count""${items_count}"
# Header print
echo likes_count,page_views_count,title,id,created_at
page_count=1
per_page=$items_count
page_left=$items_count
if [ $page_left -gt $MAX_PER_PAGE ]; then
per_page=$MAX_PER_PAGE
fi
while [ 0 -lt "${page_left}" ]
do
# echo 'befor page_count='"$page_count"
# echo 'per_page'"$per_page"
# echo 'page_left'"$page_left"
./get_authenticated_user_items.sh "${page_count}" "${per_page}" | \
jq '[.likes_count, .page_views_count, .title, .id, .created_at]' | \
jq -r '@csv'
if [ $page_left -gt $MAX_PER_PAGE ]; then
page_left=$(($page_left - $MAX_PER_PAGE))
per_page=$MAX_PER_PAGE
page_count=$((page_count + 1))
else
page_left=$(($page_left - $MAX_PER_PAGE))
per_page=$page_left
page_count=$((page_count + 1))
fi
done
exit 0 | true |
2bd759ec5b5be862a0a5c56a7ade22ad065faad3 | Shell | mf-collinhayden/mongotools | /install_mongomem.bash | UTF-8 | 5,531 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "Starting setup...."
echo ""
echo ""
# Install necessary pre-requisite packages
sudo apt-get install git
sudo apt-get install python-pip
sudo apt-get install build-essential python-dev
# Get the repo files
cd /home/ubuntu
mkdir mongomem_source
cd mongomem_source
git clone https://github.com/ContextLogic/mongotools.git
# Now changes what's needed and run the install
cd mongotools/src
# Here doc out the file w/ the change that's not in the git repo yet
cat <<EOE >mongomem.py
import pymongo
import argparse
import os.path
import resource
from ftools import fincore
import glob
import sys
from collections import defaultdict
def main():
parser = argparse.ArgumentParser(
description="Gives information about collection memory usage in Mongo")
parser.add_argument('--connection', '-c', default='localhost',
help='pymongo connection string to mongos')
parser.add_argument('--dbpath', '-p', default='/var/lib/mongodb',
help='path to data dir')
parser.add_argument('--directoryperdb', action='store_true',
help='path to data dir')
parser.add_argument('--num', '-n', default=10, help='number of collections')
parser.add_argument('--username', '-u', default=None, help='admin DB username')
parser.add_argument('--password', default=10, help='admin DB password')
args = parser.parse_args()
#conn = pymongo.Connection(args.connection)
conn = pymongo.MongoClient(args.connection)
if args.username:
result = conn.admin.authenticate(args.username, args.password)
if not result:
print "Failed to authenticate to admin DB with those credentials"
return False
dbpath = args.dbpath
if not os.path.exists(dbpath):
print "dbpath %s does not appear to exist" % dbpath
return False
DB_FILE_PTRN = '{0}/{1}/{1}.[0-9]*' if args.directoryperdb else \
'{0}/{1}.[0-9]*'
ns_resident_ratios = {}
ns_resident_pages = {}
ns_total_pages = {}
ns_extents = {}
total_pages = 0
total_resident_pages = 0
PAGE_SIZE = resource.getpagesize()
MB_PER_PAGE = float(PAGE_SIZE) / float(1024 * 1024)
for db in conn.database_names():
# load fincore details for all of that DB's files
files = glob.glob(DB_FILE_PTRN.format(os.path.abspath(dbpath), db))
# dictionary of file num => set of resident pages
resident_pages = defaultdict(set)
for f in files:
_, filenum = f.rsplit('.', 1)
filenum = int(filenum)
fd = file(f)
vec = fincore(fd.fileno())
fd.close()
for i, pg in enumerate(vec):
if ord(pg) & 0x01:
resident_pages[filenum].add(i)
total_resident_pages += 1
total_pages += 1
print "Examining %s [%d pages]" % (f, len(vec))
for collection in conn[db].collection_names():
ns = "%s.%s" % (db, collection)
# figure out extent details
stats = conn[db].command('collStats', collection, verbose=True)
extent_info = stats['extents']
col_pages = []
ns_extents[ns] = len(extent_info)
for extent in extent_info:
loc = extent['loc: ']
if loc['offset'] % PAGE_SIZE != 0:
print "Extent not page-aligned!"
if extent['len'] % PAGE_SIZE != 0:
print "Extent length not multiple of page size (%d)!" \
% extent['len']
for i in xrange(extent['len'] / PAGE_SIZE):
col_pages.append((loc['file'],
(loc['offset'] / PAGE_SIZE) + i))
# map extents against fincore results
total_col_pages = len(col_pages)
in_mem_pages = sum([1 for pg in col_pages \
if pg[1] in resident_pages[pg[0]]])
ns_resident_ratios[ns] = float(in_mem_pages) / \
float(total_col_pages) if total_col_pages else 0
ns_resident_pages[ns] = in_mem_pages
ns_total_pages[ns] = total_col_pages
# sort & output
num_cols = int(args.num)
biggest_ns = sorted(ns_resident_pages, key=ns_resident_pages.get,
reverse=True)
if num_cols != 0:
biggest_ns = biggest_ns[:num_cols]
print "\n\n---------\nResults\n---------\nTop collections:"
for ns in biggest_ns:
print "%s %d / %d MB (%f%%) [%d extents]" % (ns,
ns_resident_pages[ns] * MB_PER_PAGE,
ns_total_pages[ns] * MB_PER_PAGE,
ns_resident_ratios[ns] * 100,
ns_extents[ns])
print "\n"
total_page_ratio = float(total_resident_pages) / float(total_pages) \
if total_pages else 0
print "Total resident pages: %d / %d MB (%f%%)" % \
(total_resident_pages * MB_PER_PAGE,
total_pages * MB_PER_PAGE,
total_page_ratio * 100)
return True
if __name__ == "__main__":
if not main():
sys.exit(1)
EOE
# go up a directory and setup the binary file
cd ..
sudo python setup.py install
echo "Done with the setup...."
exit 0
| true |
aea611a204a25283b2f13dc0a2e17913284046c6 | Shell | geku/workshop-vm | /provision/install-packages.sh | UTF-8 | 1,328 | 2.546875 | 3 | [
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | echo Installing some packages using apt-get...
whoami
# add repository for microsoft visualstudio code packages
curl -sS https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > microsoft.gpg
mv microsoft.gpg /etc/apt/trusted.gpg.d/microsoft.gpg
echo "deb [arch=amd64] http://packages.microsoft.com/repos/vscode stable main" > /etc/apt/sources.list.d/vscode.list
echo apt-get update
apt-get update || while pgrep aptd; do pkill -e aptd; sleep 5; done && apt-get update
# remove some discouraged packages
apt-get purge -y --auto-remove libreoffice-common unity-webapps-common thunderbird transmission-gtk transmission-common simple-scan deja-dup shotwell cheese
# remove games
apt-get purge -y --auto-remove aisleriot gnome-sudoku mahjongg ace-of-penguins gnomine gbrainy gnome-mines
# install some packages
echo apt-get install/update some packages
apt-get install -y dos2unix vim virtualbox-guest-additions-iso code
# install openbox, a spartanic and resource-saving window manager (to start it, logout and choose window manager)
#apt-get install -y openbox pcmanfm
# clean
echo apt-get autoremove -y
apt-get autoremove -y
echo apt-get autoclean -y
apt-get autoclean -y
echo ...apt-get done.
# do upgrade
export DEBIAN_FRONTEND=noninteractive
sudo grub-install /dev/sda
apt-get upgrade -yq --auto-remove
| true |
37e925dfde726f6f4d6e34c17a9ba05405986742 | Shell | simbadSid/cubeRemapper_perfBenchmark | /srcAlignedMemAlloc/compileAndRunTest.sh | UTF-8 | 1,054 | 3.109375 | 3 | [] | no_license | #!/bin/bash
progDir="src"
prog="mem_shell"
testFile="test"
outputTestFile="output"
testFileList=`ls $testFile/*.in`
cd $progDir
make clean
make
cd ../
cd $outputTestFile/test
rm -f *
cd ../../
for file in $testFileList
do
./$progDir/$prog < $file > $outputTestFile/$file
done
echo "Différence between the expected and created output: alloc1:"
diff output/test/alloc1.in test/alloc1.out.expected
echo ""
echo ""
echo ""
echo ""
echo "Différence between the expected and created output: alloc2:"
diff output/test/alloc2.in test/alloc2.out.expected
echo ""
echo ""
echo ""
echo ""
echo "Différence between the expected and created output: alloc3:"
diff output/test/alloc3.in test/alloc3.out.expected
echo ""
echo ""
echo ""
echo ""
echo "Différence between the expected and created output: alloc4:"
diff output/test/alloc4.in test/alloc4.out.expected
echo ""
echo ""
echo ""
echo ""
echo "Différence between the expected and created output: alloc5:"
diff output/test/alloc5.in test/alloc5.out.expected
echo ""
echo ""
echo ""
echo ""
| true |
f739dcec65684a9425f8bf9626d41e0eaf692804 | Shell | poojasri6799/Assignment | /Gambler.sh | UTF-8 | 414 | 3.65625 | 4 | [] | no_license | #!/bin/bash
read -p "enter amount to start: " n
if [[ $n -gt "100" && $n -lt "200" ]]
then
won=0
loss=0
while(( $n > 0 && $n <= 200 ))
do
random=$(( $RANDOM%2 ))
case $random in
1) n=$(($n +1 ))
echo -n " $n"
won=$(($won + 1 ))
;;
0) n=$(( $n - 1 ))
echo -n " $n"
loss=$(($loss + 1 ))
;;
esac
done
echo "won = " $won
echo "loss = " $loss
else
echo "enter amount range in between 100-200"
fi
| true |
6e6e6d8d247668157989739765adc5bdda3a4606 | Shell | alanoakes/Repentance | /DataPreprocessing.sh | UTF-8 | 1,114 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# Constructing CSV Format
cat Data/Scripture_Raw.txt |\
sed 's/ KJV\] /",/g' |\
sed 's/\[/"/' |\
sed 's/\s/","/' |\
sed 's/\:/","/' |\
sed -e '1i"Book","Chapter","Verse","Text"\' \
> Data/Scripture_TextAndStrongs.csv
# Constructing Scripture Text Only CSV Format
cat Data/Scripture_TextAndStrongs.csv |\
sed -e 's/\[[^][]*\]//g' \
> Data/Scripture_Text.csv
# Constructing Strongs Numbers only CSV Format
cat Data/Scripture_TextAndStrongs.csv |\
sed 's/\(^\|]\)[^][]*\(\[\|$\)/\1\2/g' |\
sed 's/\]\[/,/g' |\
sed 's/\[/"/g' |\
sed 's/\]/"/g' |\
sed -e '1d' |\
sed -e '1i"Strongs"\' \
> Data/Scripture_StrongsA.csv
# Concat scripture refs into strongs numbers
cut -d "," -f 1-3 Data/Scripture_TextAndStrongs.csv > Data/Scripture_Refs.csv
paste -d "," Data/Scripture_Refs.csv Data/Scripture_StrongsA.csv |\
awk -F',' '{print $0}' \
> Data/Scripture_StrongsB.csv
# Construct kjv word repent paired with its strongs number
cat Data/Scripture_TextAndStrongs.csv |\
awk -F',' '{print $4}' |\
sed 's/.*repent\(.*\)\]/\1/' |\
awk -F']' '{print $1}' |\
sed 's/^/repent/g' \
> Data/test
| true |
c79df4897be2b0756d1e0089aafa5c3c3c81ce66 | Shell | protoben/erlang_junk | /crypto/euclid.sh | UTF-8 | 335 | 3.65625 | 4 | [] | no_license | #!/bin/bash
function usage {
cat << EOF
Usage: $0 m n
m,n in Z
EOF
}
function gcd {
local m n gcd temp
m=$1
n=$2
gcd=1
[ $m -eq $n ] && {
echo $m
return
}
while [ $n -ne 0 ]; do
temp=$n
n=$(($m % $n))
m=$temp
done
echo $m
}
[ $# -ne 2 ] && {
usage
exit 1
}
GCD=$(gcd $1 $2)
echo $GCD
| true |
7b6acf339174680de075f8c3ee568b868108a90f | Shell | mzimmerm/flutter_charts | /tool/make_new_chart_type_structure.sh | UTF-8 | 469 | 3.859375 | 4 | [
"BSD-2-Clause-Views"
] | permissive | #!/usr/bin/env bash
# Creates a set of directories and empty dart files for a new chart type.
echo Usage: $0 newChartType
chartType=${1:-UNSET}
if [[ -z "$chartType" ]]; then
echo Invalid chartType="$chartType", exiting
exit 1
fi
echo chartType = $chartType
mkdir {$chartType}
for file in \
$chartType/chart.dart \
$chartType/container.dart \
$chartType/options.dart \
$chartType/painter.dart \
$chartType/presenters.dart
do
echo "" >> $file
done | true |
09b665d818dfd9fe85ad332f07757f30e07c4f4f | Shell | kalrish/cxx-ucem | /install.bash | UTF-8 | 2,924 | 3.921875 | 4 | [
"MIT"
] | permissive | declare -A -r default_installation_dir_variables=(
[prefix]='/usr/local'
[exec_prefix]='${installation_dir_variables[prefix]}'
[includedir]='${installation_dir_variables[prefix]}/include'
[libdir]='${installation_dir_variables[exec_prefix]}/lib'
)
declare -A installation_dir_variables not_explicitly_set_installation_dir_variables
for argument in "$@" ; do
if [[ "$argument" =~ ^([^= ]+)=(.+)$ ]] ; then
variable="${BASH_REMATCH[1]}"
value="${BASH_REMATCH[2]}"
if [[ -n "${default_installation_dir_variables[$variable]}" ]] ; then
installation_dir_variables[$variable]="$value"
elif [[ $variable == 'DESTDIR' ]] ; then
DESTDIR="$value"
else
errors=1
echo "$0: error: '${variable}' is not a supported installation directory variable"
fi
else
errors=1
invalid_arguments=1
echo "$0: error: invalid argument '${argument}'"
fi
done
if [[ -z $errors ]] ; then
if [[ -t 1 ]] && type 'tput' &>/dev/null ; then
# Raw
declare -r tput_sgr0="$(tput sgr0)"
declare -r tput_bold="$(tput bold)"
declare -r tput_setaf_4="$(tput setaf 4)"
declare -r tput_setaf_6="$(tput setaf 6)"
declare -r tput_setab_6="$(tput setab 6)"
# Conceptual
declare -r style_variable="${tput_bold}"
declare -r style_value="${tput_setaf_6}"
declare -r style_DESTDIR="${tput_bold}"
declare -r style_DESTDIR_value="${tput_setab_6}"
declare -r style_command="${tput_setaf_4}"
fi
function print_n_run()
{
echo "${style_command}${tput_bold}$@${tput_sgr0}"
$@
}
for variable in "${!default_installation_dir_variables[@]}" ; do
if [[ -z "${installation_dir_variables[$variable]}" ]] ; then
not_explicitly_set_installation_dir_variables[$variable]=1
installation_dir_variables[$variable]="${default_installation_dir_variables[$variable]}"
fi
done
for variable in "${!not_explicitly_set_installation_dir_variables[@]}" ; do
while [[ "${installation_dir_variables[$variable]}" =~ \$\{installation_dir_variables\[[^\]]+]} ]] ; do
installation_dir_variables[$variable]="$(eval echo "${installation_dir_variables[$variable]}")"
done
done
for variable in "${!installation_dir_variables[@]}" ; do
echo "${style_variable}${variable}${tput_sgr0}=${style_value}${installation_dir_variables[${variable}]}${tput_sgr0}"
done
echo
echo "${style_DESTDIR}DESTDIR${tput_sgr0}=${style_DESTDIR_value}${DESTDIR}${tput_sgr0}"
echo
print_n_run install -v -d -- {"${DESTDIR}${installation_dir_variables[includedir]}","${DESTDIR}${installation_dir_variables[libdir]}"}/unicode-character-encoding-model/{coded_character_sets,coded_character_set_translators,character_encoding_forms,character_encoding_schemes}
print_n_run install -v -t "${DESTDIR}${installation_dir_variables[includedir]}/unicode-character-encoding-model" -- src/*.{hpp,tpp}
exit 0
else
if [[ -n $invalid_arguments ]] ; then
echo "$0: valid arguments are of the form \"variable=value\""
fi
exit 1
fi | true |
605515ed83e19d00a28cfa80bcf96b2f91e262cb | Shell | trawick/emptyhammock-project-template | /deploy/install_roles.sh | UTF-8 | 929 | 3.765625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
cleanup() {
exit_code=$?
rm -f "${TEMPFILE}"
exit $exit_code
}
TEMPFILE=$(mktemp /tmp/install_roles.XXXXXX)
trap "cleanup" INT TERM EXIT
if ls -l roles | grep ^l >/dev/null; then
echo "**************************************************************************"
echo "* At least one role is installed via symlink; skipping role installation *"
echo "**************************************************************************"
sleep 2
exit 0
fi
if ! ansible-galaxy install -r requirements.yml 2>&1 | tee "${TEMPFILE}"; then
exit 1
fi
if grep "WARNING" "${TEMPFILE}" >/dev/null 2>&1; then
echo "" 1>&2
echo "Out of date packages:" 1>&2
echo "" 1>&2
grep "WARNING" "${TEMPFILE}" 1>&2
echo "" 1>&2
echo "Unless you have made local changes to the role, remove those directories" 1>&2
echo "from ./deploy/roles and try again." 1>&2
exit 1
fi
| true |
9051e4e85a66afc472f2deb0a18340869e020453 | Shell | tacow/setup | /utils/History | UTF-8 | 96 | 3.203125 | 3 | [] | no_license | #!/bin/sh
if [ $# -lt 1 ]
then
history
else
PATTERN=$1
history | grep ${PATTERN}
fi
| true |
4e98460eedcbee747210c8574ea36cc2868d4cb7 | Shell | 1313ou/sqlunet-browser | /wordNet/artwork-relations/make-artwork.sh | UTF-8 | 3,578 | 2.78125 | 3 | [] | no_license | #!/bin/bash
thisdir="`dirname $(readlink -m $0)`"
thisdir="$(readlink -m ${thisdir})"
dirres=../src/main/res
dirassets=../src/main/assets
dirapp=..
RED='\u001b[31m'
GREEN='\u001b[32m'
YELLOW='\u001b[33m'
BLUE='\u001b[34m'
MAGENTA='\u001b[35m'
CYAN='\u001b[36m'
RESET='\u001b[0m'
list_fore="
base_dom
base_pos
base
"
list_fore="
relations
adjderived
also
antonym
attribute
causes
caused
derivation
entails
entailed
holonym
hypernym
hyponym
instance_hypernym
instance_hyponym
member_holonym
member_meronym
meronym
other
part_holonym
participle
part_meronym
pertainym
pos_a
pos_n
pos_r
pos_s
pos
pos_v
similar
substance_holonym
substance_meronym
synonym
verb_group
"
list_fore_dom="
domain
domain_member
domain_topic
domain_member_topic
domain_region
domain_member_region
domain_term
domain_member_term
exemplifies
exemplified
"
list_fore_pos="
pos_a
pos_n
pos_r
pos_s
pos
pos_v
"
#declare -A res_icon16
#res_icon16=([mdpi]=16 [hdpi]=24 [xhdpi]=32 [xxhdpi]=48 [xxxhdpi]=64)
#declare -A res_icon24
#res_icon24=([mdpi]=24 [hdpi]=36 [xhdpi]=48 [xxhdpi]=72 [xxxhdpi]=96)
#declare -A res_icon32
#res_icon32=([mdpi]=32 [hdpi]=48 [xhdpi]=64 [xxhdpi]=96 [xxxhdpi]=128)
#declare -A res_icon48
#res_icon48=([mdpi]=48 [hdpi]=72 [xhdpi]=96 [xxhdpi]=144 [xxxhdpi]=192)
#declare -A res_icon144
#res_icon144=([mdpi]=144 [hdpi]=192 [xhdpi]=288 [xxhdpi]=384 [xxxhdpi]=576)
# res
declare -A res
res=([mdpi]=16 [hdpi]=24 [xhdpi]=32 [xxhdpi]=48 [xxxhdpi]=64)
webres=16
to_png=true
#to_png=
# base dir
bdir="./temp"
mkdir -p ${bdir}
# A S S E T S
echo -e "${MAGENTA}WEBRESOLUTION${RESET} ${BLUE}$r ${webres}${RESET}"
# base
if [ ! -z "${to_png}" ]; then
for svg in *.svg; do
echo "to png:${svg}"
png="_${svg%.svg}.png"
echo -e "${svg} -> ${bdir}/${png} @ resolution ${BLUE}${webres}${RESET}"
inkscape ${svg} --export-png=${bdir}/${png} -w ${webres} -h${webres} > /dev/null 2> /dev/null
done
fi
# composite
d="${dirassets}/images/wordnet"
mkdir -p ${d}
b="base"
for f in ${list_fore}; do
p="${f}"
echo "${bdir}/_${b}.png + ${bdir}/_${f}.png -> ${d}/${p}.png"
composite ${bdir}/_ic_${f}.png ${bdir}/_${b}.png ${d}/${p}.png
done
b="base_dom"
for f in ${list_fore_dom}; do
p="${f}"
echo "${bdir}/_${b}.png + ${bdir}/_${f}.png -> ${d}/${p}.png"
composite ${bdir}/_ic_${f}.png ${bdir}/_${b}.png ${d}/${p}.png
done
b="base_pos"
for f in ${list_fore_pos}; do
p="${f}"
echo "${bdir}/_${b}.png + ${bdir}/_${f}.png -> ${d}/${p}.png"
composite ${bdir}/_ic_${f}.png ${bdir}/_${b}.png ${d}/${p}.png
done
# R E S O U R C E S
for r in ${!res[@]}; do
echo -e "${MAGENTA}RESOLUTION${RESET} ${BLUE}$r ${res[$r]}${RESET}"
# base
if [ ! -z "${to_png}" ]; then
for svg in *.svg; do
echo "to png:${svg}"
png="_${svg%.svg}.png"
echo -e "${svg} -> ${bdir}/${png} @ resolution ${BLUE}${res[$r]}${RESET}"
inkscape ${svg} --export-png=${bdir}/${png} -w ${res[$r]} -h${res[$r]} > /dev/null 2> /dev/null
done
fi
# composite
d="${dirres}/drawable-${r}"
mkdir -p ${d}
b="base"
for f in ${list_fore}; do
p="ic_${f}"
echo "${bdir}/_${b}.png + ${bdir}/_${f}.png -> ${d}/${p}.png"
composite ${bdir}/_ic_${f}.png ${bdir}/_${b}.png ${d}/${p}.png
done
b="base_dom"
for f in ${list_fore_dom}; do
p="ic_${f}"
echo "${bdir}/_${b}.png + ${bdir}/_${f}.png -> ${d}/${p}.png"
composite ${bdir}/_ic_${f}.png ${bdir}/_${b}.png ${d}/${p}.png
done
b="base_pos"
for f in ${list_fore_pos}; do
p="ic_${f}"
echo "${bdir}/_${b}.png + ${bdir}/_${f}.png -> ${d}/${p}.png"
composite ${bdir}/_ic_${f}.png ${bdir}/_${b}.png ${d}/${p}.png
done
done
rm -fR ./temp
| true |
d563ace42b313e4775ae65aa2e02684f8be73440 | Shell | hg2c/hellogrpc | /scripts/golang.sh | UTF-8 | 708 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env bash
set -eu
DOCKER_WORKDIR=/go/src/$APP_PACKAGE
DOCKER_BUILD_IMAGE="hg2c/golang:alpine"
golang::docker::run() {
run docker run --rm -ti \
-w ${DOCKER_WORKDIR} \
-v ${PROJECT_ROOT}:${DOCKER_WORKDIR} \
$DOCKER_BUILD_IMAGE \
${@:-bash}
}
golang::build() {
local APP_NAME=${1:-$APP_NAME}
local APP_PACKAGE=${2:-$APP_PACKAGE}
local OUTPUT=./build
for PLATFORM in ${APP_PLATFORMS}; do
local GOOS=${PLATFORM%/*}
local GOARCH=${PLATFORM#*/}
local TARGET=${OUTPUT}/${APP_NAME}-${GOOS}-${GOARCH}
run CGO_ENABLED=1 GOOS=$GOOS GOARCH=$GOARCH go build -o ${TARGET} -ldflags \"${LDFLAGS}\" ${APP_PACKAGE}
done
}
| true |
cb96143e9fc417c83a39644bd2b6ff20d3fcea23 | Shell | systemkern/Hermes | /HermesFtp/src/test/resources/entrypoint.sh | UTF-8 | 408 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
chmod -R 0555 /home/ftpusers/hermes
pure-pw useradd hermes -u ftpuser -d /home/ftpusers/hermes < /home/ftpusers/hermes/hermespasswords.tmp > dev/null
pure-pw mkdb
rm /home/ftpusers/hermes/hermespasswords.tmp
echo "connect to FTP using hermes/hermes on {DOCKER_MACHINE_IP}:21"
/usr/sbin/pure-ftpd -c 50 -C 10 -l puredb:/etc/pure-ftpd/pureftpd.pdb -E -j -R -P $PUBLICHOST -p 30000:30009 | true |
ead40d48b569a04997840e6c3d565bd9a59662be | Shell | floscr/old-vimrc-dotfiles | /home/.dotfiles/functions/video_and_audio.zsh | UTF-8 | 792 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env bash
# Download youtbe video and notify
alias y='__youtube_dl_notify'
function __youtube_dl_notify() {
dst="$HOME/Movies/Youtube"
if [[ ! -d $dst ]]; then
mkdir -p $dst
fi
cd $dst
youtube-dl $@
title="$(youtube-dl --get-title $@)"
terminal-notifier \
-title "Video Downloaded" \
-message "$title" \
-sound Submarine
}
# Download preset for binding of isaac youtube channel videos
function isac() {
youtube-dl -f 18+140 "$@"
}
# Download youtube clip as mp3
function youtube-mp3() {
if [[ -z "$@" ]]; then
echo "Pass an youtube url!"
exit 0
fi
youtube-dl --extract-audio --audio-format mp3 "$1"
file="$(find . -type f -name "*.m4a" -print0 | xargs -0 stat -f "%m %N" | sort -rn | head -1 | cut -f2- -d " ")"
echo $file
}
| true |
619710e7548c96128ef7eb15a30543f897da01cc | Shell | kboom/iga-adi-giraph | /results-external-extraction/extract-entry.sh | UTF-8 | 3,150 | 3.125 | 3 | [] | no_license | #!/usr/bin/env bash
DAG_HEIGHT=${DAG_HEIGHT:-11}
SUPERSTEPS_IN_TIME_STEP=$(echo "4 * ${DAG_HEIGHT} + 6" | bc)
LAST_SUPERSTEP_BEGINNING=$(echo "${SUPERSTEPS_IN_TIME_STEP} + 1 + (${IGA_STEPS} - 2) * (${SUPERSTEPS_IN_TIME_STEP}+2)" | bc)
INIT_SUPERSTEP="${LAST_SUPERSTEP_BEGINNING}"
FIRST_ROOT_SUPERSTEP=$(echo "${LAST_SUPERSTEP_BEGINNING} + ${DAG_HEIGHT} + 1" | bc)
TRANSPOSE_MAP_SUPERSTEP=$(echo "${LAST_SUPERSTEP_BEGINNING} + 2*${DAG_HEIGHT} + 3" | bc)
TRANSPOSE_REDUCE_SUPERSTEP=$(echo "${LAST_SUPERSTEP_BEGINNING} + 1" | bc)
SECOND_ROOT_SUPERSTEP=$(echo "${LAST_SUPERSTEP_BEGINNING} + 3*${DAG_HEIGHT} + 5" | bc)
if [[ -n "${DEBUG}" ]]; then
printf "\n=====================================================\n"
printf "SUPERSTEPS_IN_TIME_STEP=${SUPERSTEPS_IN_TIME_STEP}\n"
printf "INIT_SUPERSTEP=${INIT_SUPERSTEP}\n"
printf "FIRST_ROOT_SUPERSTEP=${FIRST_ROOT_SUPERSTEP}\n"
printf "TRANSPOSE_MAP_SUPERSTEP=${TRANSPOSE_MAP_SUPERSTEP}\n"
printf "TRANSPOSE_REDUCE_SUPERSTEP=${TRANSPOSE_REDUCE_SUPERSTEP}\n"
printf "SECOND_ROOT_SUPERSTEP=${SECOND_ROOT_SUPERSTEP}"
printf "\n=====================================================\n"
fi
INPUT_R='^.*input superstep: Took ([0-9]*\.[0-9]*) seconds'
SUPERSTEP_R="^.*superstep ([0-9]*): Took ([0-9]*\.[0-9]*) seconds"
TOTAL_R='^.*total: Took ([0-9]*\.[0-9]*)'
SHUTDOWN_R='^.*shutdown: Took ([0-9]*\.[0-9]*) seconds'
INPUT_TIME=0.0
INIT_TIME=0.0
FACTORIZATION_TIME=0.0
BACKWARDS_SUBSTITUTION_TIME=0.0
TRANSPOSE_MAP_TIME=0.0
TRANSPOSE_REDUCE_TIME=0.0
STEP_SOLUTION_TIME=0.0
SHUTDOWN_TIME=0.0
TOTAL_TIME=0.0
while read -r line;
do
if [[ "$line" =~ $INPUT_R ]]; then
INPUT_TIME="${BASH_REMATCH[1]}"
elif [[ "$line" =~ $SHUTDOWN_R ]]; then
SHUTDOWN_TIME="${BASH_REMATCH[1]}"
elif [[ "$line" =~ $TOTAL_R ]]; then
TOTAL_TIME="${BASH_REMATCH[1]}"
elif [[ "$line" =~ $SUPERSTEP_R ]]; then
thisStep="${BASH_REMATCH[1]}"
thisTime="${BASH_REMATCH[2]}"
if [[ $thisStep -ge "${INIT_SUPERSTEP}" ]]; then
STEP_SOLUTION_TIME=$(echo "${STEP_SOLUTION_TIME} + ${thisTime}" | bc)
else
continue
fi
if [[ $thisStep = "${INIT_SUPERSTEP}" ]]; then
INIT_TIME=$thisTime
continue
elif [[ $thisStep = "${TRANSPOSE_MAP_SUPERSTEP}" ]]; then
TRANSPOSE_MAP_TIME=$thisTime
continue
elif [[ $thisStep = "${TRANSPOSE_REDUCE_SUPERSTEP}" ]]; then
TRANSPOSE_REDUCE_TIME=$thisTime
continue
fi
if [[ ($thisStep -gt "${INIT_SUPERSTEP}" && $thisStep -le "${FIRST_ROOT_SUPERSTEP}") || ($thisStep -gt "${TRANSPOSE_REDUCE_SUPERSTEP}" && $thisStep -le "${SECOND_ROOT_SUPERSTEP}") ]]; then
FACTORIZATION_TIME=$(echo "${FACTORIZATION_TIME} + ${thisTime}" | bc)
fi
if [[ ($thisStep -gt "${FIRST_ROOT_SUPERSTEP}" && $thisStep -lt "${TRANSPOSE_MAP_SUPERSTEP}") || $thisStep -gt "${SECOND_ROOT_SUPERSTEP}" ]]; then
BACKWARDS_SUBSTITUTION_TIME=$(echo "${BACKWARDS_SUBSTITUTION_TIME} + ${thisTime}" | bc)
fi
fi
done < "${1:-/dev/stdin}"
echo "${INPUT_TIME},${SHUTDOWN_TIME},${STEP_SOLUTION_TIME},${TOTAL_TIME},${INIT_TIME},${FACTORIZATION_TIME},${BACKWARDS_SUBSTITUTION_TIME},${TRANSPOSE_MAP_TIME},${TRANSPOSE_REDUCE_TIME}" | true |
44352bcace2da2118e07aeb6542757d025c49450 | Shell | anthony-chu/build-tool | /nightly.sh | UTF-8 | 2,408 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
source bash-toolbox/init.sh
include app.server.version.AppServerVersion
include calendar.util.CalendarUtil
include command.validator.CommandValidator
include curl.util.CurlUtil
include file.util.FileUtil
include help.message.HelpMessage
include logger.Logger
include props.reader.util.PropsReaderUtil
include repo.Repo
@description downloads_a__tomcat_snapshot_bundle_for_the_specified_branch
get(){
if [[ ${branch} == *-private || ${branch} == ee-* ]]; then
local propKey=snapshot.private.url
else
local propKey=snapshot.public.url
fi
local baseUrl=$(PropsReaderUtil getValue ${snapshotProps} ${propKey})
cd ${bundleDir}
${_log} info "downloading_${branch}_snapshot_bundle..."
local url=$(echo ${baseUrl} | \
sed "s#https\?://#http://mirrors/#g")/snapshot-${branch}/latest
local snapshotFile=liferay-portal-${appServer}-${branch}.7z
CurlUtil getFile ${url}/${snapshotFile}
local appServerVersion=$(AppServerVersion
getAppServerVersion ${appServer} ${branch})
local appServerRelativeDir=${appServer}-${appServerVersion}
local appServerDir=${bundleDir}/${appServerRelativeDir}
local filePaths=(
data
deploy
logs
license
osgi
${appServerRelativeDir}
tools
work
.githash
.liferay-home
)
${_log} info "cleaning_up_bundle_files..."
rm -rf ${filePaths[@]}
${_log} info "extracting_${branch}_snapshot_bundle..."
7z x ${snapshotFile} > /dev/null
for filePath in ${filePaths[@]}; do
if [[ -e liferay-portal-${branch}/${filePath} ]]; then
mv liferay-portal-${branch}/${filePath} .
fi
done
rm -rf liferay-portal-${branch} ${snapshotFile}
${_log} info "zipping_up_${branch}_snapshot_bundle..."
local zipFile=liferay-portal-${appServer}-${branch}-$(CalendarUtil
getTimestamp date)$(CalendarUtil getTimestamp clock).7z
filePaths+=(portal-ext.properties)
FileUtil compress ${zipFile} filePaths
${_log} info "completed."
}
main(){
local appServer="tomcat"
local baseDir=$(pwd)
local branch=$(Repo getBranch $@)
local bundleDir=$(Repo getBundleDir ${branch})
local snapshotProps=build.snapshot.properties
local _log="Logger log"
if [[ ! ${1} ]]; then
HelpMessage printHelpMessage
return
fi
until [[ ! ${1} ]]; do
if [[ ${1} == ${appServer} || ${1} == ${branch} ]]; then
shift
else
cd ${baseDir}
CommandValidator validateCommand ${0} ${1}
${1}
fi
shift
done
}
main $@ | true |
a7bfe68d54576c5d9c9f76e2feeaf4889ffa9c25 | Shell | Diogo-Paulico/os_bootstrap_installer | /install.sh | UTF-8 | 2,050 | 2.921875 | 3 | [] | no_license | PREINSTALL=./pre_install.sh
if [ "$EUID" -ne 0 ]; then
echo -e "\e[31mThis script needs root privileges! Please run with sudo!\e[0m"
exit
fi
if test -f "$PREINSTALL"; then
cp -R ../configurator_taker /home/"$SUDO_USER"/
bash "$PREINSTALL"
exit
fi
#remove libreoffice and geary
sudo apt remove --purge libreoffice*
sudo apt remove geary
sudo snap install spotify
sudo snap install gitkraken --classic
sudo snap install discord
snap connect discord:system-observe
#VS CODE
sudo snap install code --classic
#MS TEAMS
sudo snap install teams
#MailSpring
sudo snap install mailspring
#FlameShot
sudo snap install flameshot
#Plank
sudo add-apt-repository ppa:ricotz/docky
sudo apt-get update && sudo apt-get install plank
#Password Safe
wget -O pswsafe.deb https://sourceforge.net/projects/passwordsafe/files/Linux/1.13.0/passwordsafe-ubuntu20-1.13-amd64.deb/download
sudo dpkg -i pswsafe.deb
rm -f pswsafe.deb
#NOTION
sudo snap install notion-snap
#papirus icon pack
wget -O papirus.deb https://launchpad.net/~papirus/+archive/ubuntu/papirus/+files/papirus-icon-theme_20210401-5189+pkg21~ubuntu20.04.1_all.deb
sudo dpkg -i papirus.deb
rm -f papirus.deb
#gnome-tweaks -> enable minimize and maximize and choose correct icon pack
sudo apt install gnome-tweaks
gnome-tweaks
#import all alias from alias file
echo ". /home/$SUDO_USER/configurator_taker/alias" >> /home/"$SUDO_USER"/.bashrc
#docker
sudo apt-get remove docker docker-engine docker.io containerd runc
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg \
lsb-release
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo \
"deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io
| true |
dfcb9cad6165f3f356f8db6fcb0482c1deb0c8f8 | Shell | puppetlabs/puppetlabs-splunk_hec | /tasks/examples/cleanup_tokens.sh | UTF-8 | 477 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
declare -x PUPPET='/opt/puppetlabs/bin/puppet'
declare -x CURL='/bin/curl'
SSLDIR=$($PUPPET config print ssldir --section master)
CERTNAME=$($PUPPET config print certname --section master)
USERNAME="$PT_username"
$CURL -X DELETE "https://$CERTNAME:4433/rbac-api/v2/tokens" \
--tlsv1 \
--cacert $SSLDIR/certs/ca.pem \
--cert $SSLDIR/certs/$CERTNAME.pem \
--key $SSLDIR/private_keys/$CERTNAME.pem \
-d "{\"revoke_tokens_by_usernames\": [\"$USERNAME\"]}" | true |
d6abbcfbf789607d2c5ea729b0ad4fce50aa7809 | Shell | demeritcowboy/github-periodic-summary | /github-summary.sh | UTF-8 | 2,216 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# For this to work the cron script has to cd to our folder first.
. ./github-summary.cfg
. $GITHUB_SUMMARY_PATH/github-summary-lastcursor.cfg
# format our query in the weirdo way that the api wants
read -r -d '' query << ENDQUERY
{
"query": "query { repository(owner:\"$GITHUB_REPO_OWNER\", name:\"$GITHUB_REPO_NAME\") { pullRequests(after:\"$GITHUB_SUMMARY_LASTCURSOR\", first:100, orderBy:{field:UPDATED_AT, direction:ASC}) { edges { node { number title updatedAt state bodyText } cursor } } } }"
}
ENDQUERY
# Run query and store results in a file (the quoting weirdness is weird enough below, so we use a file to at least avoid dealting with single quotes that are in the returned result).
curl -s -H "Authorization: bearer $GITHUB_API_TOKEN" -H "Content-type: application/json" -H "Accept: application/json" -X POST -d "$query" https://api.github.com/graphql > $GITHUB_SUMMARY_PATH/github-summary.tmp
# bash doesn't do json, so we call out to php.
# Was hoping this script was simple enough to do all in bash, but at this point maybe should just do this whole script in php.
read -r -d '' phpparam << ENDPHP
\$js = file_get_contents('$GITHUB_SUMMARY_PATH/github-summary.tmp');
\$results = json_decode(\$js);
\$cursor = NULL;
\$gs_tz = '$GITHUB_SUMMARY_TIMEZONE';
if (empty(\$gs_tz)) {
\$tz = new DateTimeZone(date_default_timezone_get());
} else {
\$tz = new DateTimeZone(\$gs_tz);
}
foreach(\$results->data->repository->pullRequests->edges as \$r) {
\$dt = new DateTime(\$r->node->updatedAt);
\$dt->setTimezone(\$tz);
echo "{\$r->node->title}\\n";
echo "https://github.com/civicrm/civicrm-core/pull/{\$r->node->number}\\n";
echo "Status: {\$r->node->state}\\n";
echo "Updated: " . \$dt->format('Y-m-d H:i') . "\\n";
echo "{\$r->node->bodyText}\\n\\n========================\\n\\n";
\$cursor = \$r->cursor;
}
if (\$cursor) {
file_put_contents('$GITHUB_SUMMARY_PATH/github-summary-lastcursor.cfg', "GITHUB_SUMMARY_LASTCURSOR=\$cursor");
}
ENDPHP
msg=$( php -r "$phpparam" );
mail -s "Github Summary" -S "from=$GITHUB_SUMMARY_EMAIL" $GITHUB_SUMMARY_EMAIL << ENDMSG
$msg
ENDMSG
rm -f $GITHUB_SUMMARY_PATH/github-summary.tmp
| true |
4327322f1213702171aa29e156c3223b134f08d0 | Shell | uwplse/verdi-raft | /extraction/vard-debug/scripts/start-tmux.sh | UTF-8 | 527 | 2.703125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
function start-vard-command {
PORT=800${1}
echo -e "./vard.native" "-dbpath \"/tmp/vard-$PORT\"" \
"-port \"$PORT\"" \
"-node 1,localhost:9001" "-node 2,localhost:9002" "-node 3,localhost:9003" \
"-me \"$1\""
}
tmux new-session -d -s 'vard'
tmux split-window -h "$(start-vard-command 1)"
tmux split-window -v -p 66 "$(start-vard-command 2)"
tmux split-window -v -p 50 "$(start-vard-command 3)"
tmux select-pane -L
exec tmux attach
| true |
9c5e749c98dc20caf41010553dca54f329580f31 | Shell | rpappalax/deploy-tools | /autopush/watch.sh | UTF-8 | 619 | 3.171875 | 3 | [] | no_license | source config.sh
URL_STATUS="https://$HOST_UPDATES/status"
clear;
JSON=`curl -s "$URL_STATUS"`
VERS_OLD=`echo $JSON | /usr/local/bin/jq '.version' ;`
say $VERS_OLD
while :; do
clear;
JSON=`curl -s "$URL_STATUS"`
VERS=`echo $JSON | /usr/local/bin/jq '.version' ; sleep 5;`
if [ "$VERS_OLD" == "$VERS" ]; then
MSG="still same version: $VERS"
FLAG=0
else
MSG="Attention! Attention! Attention! Attention! DNS has changed! New version is: $VERS"
FLAG=1
fi
echo $MSG
say $MSG
if [ $FLAG == 1 ]; then
echo $MSG
break;
fi
echo $MSG
say $MSG
done
| true |
d9ea1eaaf8f7909e01012d5020311cdb906468b6 | Shell | theappleman/ci-roles | /run.sh | UTF-8 | 364 | 3.484375 | 3 | [] | no_license | #!/bin/bash
while getopts c:vKs opt; do
case $opt in
c) transport="-c $OPTARG"
;;
K|s|v) exargs="$exargs -$opt"
;;
esac
done
shift $((OPTIND-1))
hosts=$1
shift
playbook=$(mktemp -p .)
echo -e "---\n- hosts: $hosts\n roles:" > $playbook
for role in $@; do echo " - $role"; done >> $playbook
ansible-playbook $exargs $transport $playbook
rm "$playbook"
| true |
effa55147a408942e5581c0c277cb4854bb6142f | Shell | yaolongli520/rootfs | /unit_tests/BAT/bat-uart.sh | UTF-8 | 2,989 | 4.125 | 4 | [] | no_license | #!/bin/bash
#
# Tests for the uart ports:
# - check that data is correctly transmitted and received in loopback mode
# - check that multiple bytes data is correctly transmitted and received in
# loopback mode for various baud rates
#
# This test tries to be board-independent
#
set -e
batdir=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
. $batdir/bat_utils.sh
declare -A stress_test
# RXD pin is shared with NAND (see MLK-12482)
machine=`cat /sys/devices/soc0/machine`
case $machine in
"Freescale i.MX6 "*" SABRE Automotive Board")
stress_test["ttymxc2"]="disable"
;;
esac
function cleanup
{
if [ "$pids" != "" ]; then
echo "Resume processes using /dev/${port}: $pids"
signal_processes $pids CONT
fi
}
# returns list of pids that are using file $1
# $1: file name to search for
function lsof()
{
filename="$1";
all_pids=$(find /proc -maxdepth 1 -name "[0-9]*")
for pid in $all_pids; do
if ls -l ${pid}/fd 2>/dev/null | grep -q "${filename}"; then
echo "${pid#/proc/}"
fi
done
}
# sends signal $2 to given list of processes $1
# $1: list of pids
# $2: signal
function signal_processes()
{
pids="$1"
signal="$2"
for pid in $pids; do
kill -${signal} $pid
done
}
current_pid=$$
test_baud_rates="9600 19200 115200 576000 1152000 3000000"
# Test on all uart
uart_ports=$(find /sys/class/tty \( -iname ttymxc* -o -iname ttyLP* \) -printf '%f\n')
# Make sure we restore stopped processes at error
trap cleanup EXIT
# Transfer data in loopback mode
for port in $uart_ports; do
pids=$(lsof /dev/${port})
driver=$(basename $(readlink -f "/sys/class/tty/$port/device/driver"))
echo "checking uart $port driver $driver"
# Don't run test from serial console
for pid in $pids; do
if [ "$current_pid" == "$pid" ]; then
echo "Cannot test port /dev/${port} while using it as console."\
"Run test using SSH."
trap - EXIT
exit 1
fi
done
# pause processes using this uart
if [ "$pids" != "" ]; then
echo "Pause processes using /dev/${port}: $pids"
signal_processes $pids STOP
# disable stress test for console uart
stress_test[$port]="disable"
fi
# Run simple loopback test
echo "Test: loopback test for /dev/${port}"
$batdir/../UART/mxc_uart_test.out /dev/${port}
# Run test with various baud rates. Don't use more then FIFO size
# chunks as the loopback test does not use flow control.
if [ "${stress_test[$port]}" != "disable" -a "$driver" == "imx-uart" ]; then
for baud in $test_baud_rates; do
echo "Test: loopback test for /dev/${port} at baud $baud"
$batdir/../UART/mxc_uart_stress_test.out /dev/${port} $baud D L 5 31 N
done
fi
# resume processes using this uart
if [ "$pids" != "" ]; then
echo "Resume processes using /dev/${port}: $pids"
signal_processes $pids CONT
fi
done
| true |
b1e4ebbf0e586a61b00e872541dd7488ee2367d8 | Shell | DavidAlphaFox/haystack | /bin/package | UTF-8 | 1,519 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright (c) 2012-2016 Peter Morgan <peter.james.morgan@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DISTRIBUTOR=$(lsb_release --id --short | perl -ne 'print lc')
CODENAME=$(lsb_release --codename --short | perl -ne 'print lc')
ARCH=amd64
PROJECT=shortishly/haystack
URL=https://github.com/${PROJECT}
PREFIX=/opt
echo ${DISTRIBUTOR}
echo ${CODENAME}
echo ${ARCH}
fpm --verbose \
-s dir \
-t deb \
-C _rel \
--url ${URL} \
--architecture ${ARCH} \
--name $(bin/app) \
-v $(bin/version) \
--prefix ${PREFIX} .
package_cloud yank \
${PROJECT}/${DISTRIBUTOR}/${CODENAME} \
$(bin/app)_$(bin/version)_${ARCH}.deb
package_cloud push \
${PROJECT}/${DISTRIBUTOR}/${CODENAME} \
$(bin/app)_$(bin/version)_${ARCH}.deb
curl -H "Content-Type: application/json" \
--data '{"source_type": "Branch", "source_name": "master"}' \
-X POST \
https://registry.hub.docker.com/u/shortishly/haystack/trigger/${DOCKER_TRIGGER}/
| true |
39b99bc7a65718bc648b8a4258fa801e4760afca | Shell | hostersecurity/automated-tests | /intrusiontest.sh | UTF-8 | 1,898 | 3.484375 | 3 | [] | no_license | MAGIC_FILE="magic_hostersecurity.php"
function runtestcommand()
{
echo "START TEST $4 FOR $1"
echo "#################################" > tmptest/$3/$4.test
echo " TEST CASE NAME : $4" >> tmptest/$3/$4.test
echo " COMMAND : $2" >> tmptest/$3/$4.test
echo " URL : $1" >> tmptest/$3/$4.test
echo "#################################" >> tmptest/$3/$4.test
curl --silent -k "$1$MAGIC_FILE" --data-urlencode "command=$2" >> tmptest/$3/$4.test
echo "#################################" >> tmptest/$3/$4.test
}
function runphpinfo()
{
curl --silent -k "$1$MAGIC_FILE" --data-urlencode "command=phpinfo" >> tmptest/$3/phpinfo.html
}
function compiletestresults()
{
cat tmptest/$1/*.test > $1.intrusion
cat tmptest/$1/phpinfo.html > $1.phpinfo.html
}
function runtests()
{
runtestcommand $1 "ls" $2 "test_ls"
runtestcommand $1 "ls -laR /home/" $2 "test_ls_home"
runtestcommand $1 "ls -l /etc/" $2 "test_ls_etc"
runtestcommand $1 "ls -lR /var/" $2 "test_ls_var"
runtestcommand $1 "cat /etc/shadow 2>&1" $2 "test_cat_shadow"
runtestcommand $1 "cat /etc/passwd 2>&1" $2 "test_cat_passwd"
runtestcommand $1 "uname -a" $2 "test_uname"
runtestcommand $1 "whoami" $2 "test_whoami"
runtestcommand $1 "groups" $2 "test_my_groups"
runtestcommand $1 "cat /etc/group" $2 "test_cat_group"
runtestcommand $1 "ps -fix" $2 "test_process"
runphpinfo $1 "phpinfo" $2 "test_get_phpinfo"
compiletestresults $2
}
while read URL; do
STATUS=$(curl -k -s -o /dev/null -w '%{http_code}' "$URL$MAGIC_FILE")
DOMAIN=$(echo $URL | awk -F/ '{print $3}')
mkdir -p tmptest/$DOMAIN
if [ $STATUS -eq 200 ] ; then
echo "Magic script found ... starting tests"
runtests $URL $DOMAIN
else
echo "No magic script found"
fi
rm -rf tmptest/$DOMAIN
mv tmptest intrusion_tests
if [ -f *.intrusion ]; then
mv *.intrusion intrusion_tests
mv *.phpinfo.html intrusion_tests
fi
done < websites.txt
| true |
edeb107a97f6010741f0e534184de23a7dc35d90 | Shell | tathagata/moneymotion | /process | UTF-8 | 807 | 2.796875 | 3 | [] | no_license | #!/bin/bash
awk '
BEGIN{
while( getline < "categories") list[$2]=$1;
}
function trim(s) {gsub(/[[:blank:]]/,"_",s);return s}
{
FS=",";
NR>7
if($13=="Debit"){
pos=trim($3)
if (pos in list){
expense[list[pos]]+=$4;
print "[\47"list[pos]"\47,new Date("substr($2,1,10)"),"expense[list[pos]]"],"
#print pos"-"list[pos]"-"expense[list[pos]]
}else{
list[pos]=ghost
expense[ghost]+=$4
print pos >> "categories"
print Added new category to categories file
}
}
}' download_1274854364185.csv | tr '-' ',' > results
#awk -F , '{if($13=="Debit"){total+=$4; print "["$3",new Date("substr($2,1,10)"),"$4","total"],";}}' download_1274486431674.csv | tr '-' ','
#This is a comment in the experiment branch
cat top.html > page.html
cat results >> page.html
cat bottom.html >> page.html
| true |
d17dd52d52f4fd4decc29b1b7bfdab4ed97f192a | Shell | hassoon1986/repo | /archlinuxcn/oce-git/PKGBUILD | UTF-8 | 1,155 | 2.546875 | 3 | [] | no_license | # Maintainer: Yichao Yu <yyc1992@gmail.com>
# Contributor: Giuseppe Borzi <gborzi@ieee.org>
# Contributor: Brice M<E9>alier <mealier_brice@yahoo.fr>
# Contributor: Michele Mocciola <mickele>
pkgname=oce-git
pkgver=0.17.0.304.g9fa6390e2
pkgrel=1
pkgdesc="Open CASCADE community edition, 3D modeling & numerical simulation"
arch=('i686' 'x86_64')
url="http://www.opencascade.org"
license=('custom')
depends=(freeimage freetype2 mesa libgl glu opencl-icd-loader libx11 tk)
makedepends=(cmake git opencl-headers)
optdepends=(java-runtime)
provides=("opencascade=6.8.0" 'oce')
conflicts=("opencascade" 'oce')
options=(!libtool debug)
source=(git://github.com/tpaviot/oce)
md5sums=('SKIP')
pkgver() {
cd oce
git describe | sed -e 's/^[^0-9]*//' -e 's/-/.0./' -e 's/-/./g'
}
build() {
cd oce
mkdir -p build
cd build
export CFLAGS+=' -DGLX_GLXEXT_LEGACY'
export CXXFLAGS+=' -DGLX_GLXEXT_LEGACY'
cmake .. -DOCE_INSTALL_PREFIX=/usr -DOCE_WITH_OPENCL=On \
-DOCE_WITH_FREEIMAGE=On
make
}
package() {
cd oce/build
make install DESTDIR="${pkgdir}"
install -dm755 "$pkgdir/usr/share/licenses/$pkgname/"
install -m644 ../OCCT_LGPL_EXCEPTION.txt "$pkgdir/usr/share/licenses/$pkgname"
}
| true |
caf0f50bbc466d319c8ca67b11b4c02a65353be5 | Shell | mfkiwl/jet | /tools/CtsTest/jenkinsCTSflash.sh | UTF-8 | 2,657 | 4.09375 | 4 | [] | no_license | #!/bin/bash
#obtain workspace directory from argument
WORKSPACE=$1
#Number of tries allowed to enable ADB throughout the while script
TRIES=5
#JET mounting location
MNTPNT="/mnt/JET"
#JET enable adb location
JETADBLOCATION="/mnt/JET/ReconApps/LispXML/Input/adb.lxl"
#JET auto-generated file for adb
JETADBOUTLOCATION="/mnt/JET/ReconApps/LispXML/Output/adb.lxl"
#location of the adb-enabling lxl file
ADBLOCATION="$1/tools/CtsTest/adb.lxl"
#function to enable adb
enableADB ()
{
echo
echo "Enabling Android ADB through MTP...."
#check if JET is available
if [[ $(mtp-detect) == *"No raw devices found"* ]];
then
echo "no JET detected... FAIL"
exit 1
else
echo "JET detected..."
fi
#check if adb is enabled already
if [ $(adb get-state) == "device" ];
then
echo "ADB already enabled"
return
fi
#check if mounting point is created already
if [ ! -e "$MNTPNT" ];
then
echo
echo "Mounting point for JET has not been created..."
echo "Creating mounting point \"$MNTPNT\"..."
sudo mkdir $MNTPNT
fi
#mount JET
echo
echo "Mounting JET to $MNTPNT...."
sudo mtpfs -o allow_other $MNTPNT
if [ -s "$MNTPNT" ];
then
echo "Mount FAILED..."
echo "EXIT...."
exit 1
fi
#enable adb with the .lxl file
echo "Enabling ADB...."
rm $JETADBLOCATION $JETADBOUTLOCATION
cp $ADBLOCATION $JETADBLOCATION
#unmount adb
echo "Unmounting JET...."
sudo umount -l $MNTPNT
if [ ! -s "$MNTPNT" ];
then
echo
echo "WARNING! UNMOUNT WAS UNSUCCESSFUL!!!"
echo
fi
#delay for 2 sec then check if adb is enabled
#if not, sleep for 10 sec restart the process
#maximum of retries allowed are specified by the variable $TRIES
sleep 2
if [ $(adb get-state) != "device" ];
then
if (( TRIES > 0 ));
then
echo "Android ADB could not find device...."
echo "RETRY..."
sleep 10
(( TRIES-- ))
enableADB
else
echo "Android ADB could not be enables..."
exit 1
fi
fi
}
echo "Target Workspace: $WORKSPACE"
enableADB
echo
echo "Flashing JET with new build...."
#flash JET
adb reboot bootloader
cd $WORKSPACE/omap4_emmc_files_jet/
sudo ./flash_new.sh
#wait for jet to boot completely
sleep 60
while true
do
if [[ $(mtp-detect) == *"No raw devices found"* ]];
then
sleep 30
else
break
fi
done
#enable ADB after flashing master build
sleep 10
enableADB
echo "SUCCESS...."
| true |
a7ab78e0aa2405267a5df5d39d0245357aed7aee | Shell | LiamLombard/dots | /applyconfigs.sh | UTF-8 | 178 | 2.921875 | 3 | [] | no_license | #!/bin/sh
copyfolder()
{
cp -Rf $1/* $2
}
copyfile()
{
cp -Rf $1 $2
}
copyfolder "polybar" "/home/liam/.config/polybar"
copyfolder "openbox" "/home/liam/.config/openbox" | true |
ea45a843b5326b24085a627ec7451a6216137217 | Shell | dougllcooper/dotfiles | /.local/bin/remaps | UTF-8 | 822 | 2.734375 | 3 | [] | no_license | #!/bin/sh
# This script is called on startup to remap keys.
# Increase key speed via a rate change
xset r rate 300 50
# Map the caps lock key to super...
#setxkbmap -option caps:super
# But when it is pressed only once, treat it as escape.
#killall xcape 2>/dev/null ; xcape -e 'Super_L=Escape'
# Changed - press one is Esc, hold is Ctrl
setxkbmap -option caps:ctrl_modifier
# But when it is pressed only once, treat it as escape.
killall xcape 2>/dev/null
#xcape -e 'Caps_Lock=Escape'
# xcape -e 'Control_L=Escape'
# xcape -t 250 -e "Shift_L=parenleft;Shift_R=parenright;Control_L=Escape"
xcape -t 250 -e "Shift_L=parenleft;Shift_R=parenright;Caps_Lock=Escape"
echo "This is run from .profile"
# Map the menu button to right super as well.
# Only useful if you have a right super.
# xmodmap -e 'keycode 135 = Super_R'
| true |
b8ac4456f4d8b7fc74cbf4a5ddbaf0e0c2211ead | Shell | davidddw/imageBuilder | /centos_7.2_kvm_livecloud/build_kvm.sh | UTF-8 | 682 | 3.65625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -x
: ${BUILD_VERSION:="v$(date +'%Y%m%d%H%M%S')"}
: ${BUILD_NAME:="CentOS_7.2-x86_64"}
: ${VM_NAME:="centos_7.2"}
export BUILD_NAME
export VM_NAME
export BUILD_VERSION
PWD=`pwd`
FILENAME=${VM_NAME}
PACKER=/usr/bin/packer
if [ -e "${PWD}/disk" ];
then
rm -rf ${PWD}/disk
fi
if [ ! -e "${PWD}/final_images" ];
then
mkdir -pv ${PWD}/final_images
fi
$PACKER build template_kvm.json
cd disk
qemu-img convert -c -O qcow2 $FILENAME ${BUILD_NAME}-${BUILD_VERSION}.qcow2
cd -
mv ${PWD}/disk/${BUILD_NAME}-${BUILD_VERSION}.qcow2 ${PWD}/final_images
rm -rf ${PWD}/disk
echo "==> Generate files:"
find ${PWD}/final_images -type f -printf "==> %f\n"
echo "Done" | true |
94a2eb2592deaa1db279d5ea9d2d203e582f4575 | Shell | PaddlePaddle/paddle-ce-latest-kpis | /ce_cloud_models/paddleNLP/linux/scripts/lexical_analysis/infer.sh | UTF-8 | 1,291 | 3.21875 | 3 | [] | no_license | #unset http_proxy
HTTPPROXY=$http_proxy
HTTPSPROXY=$https_proxy
unset http_proxy
unset https_proxy
#外部传入参数说明
# $1: $XPU = gpu or cpu
#获取当前路径
cur_path=`pwd`
model_name=${PWD##*/}
echo "$model_name 模型样例测试阶段"
#路径配置
root_path=$cur_path/../../
code_path=$cur_path/../../models_repo/examples/lexical_analysis/
log_path=$root_path/log/$model_name/
mkdir -p $log_path
#临时环境更改
cd $root_path/models_repo
#访问RD程序
cd $code_path
print_info(){
if [ $1 -ne 0 ];then
cat ${log_path}/$2.log
echo "exit_code: 1.0" >> ${log_path}/$2.log
else
echo "exit_code: 0.0" >> ${log_path}/$2.log
fi
}
DEVICE=$1
if [[ ${DEVICE} == "gpu" ]]; then
python predict.py --data_dir ./lexical_analysis_dataset_tiny \
--init_checkpoint ./save_dir/model_100.pdparams \
--batch_size 32 \
--device ${DEVICE} > $log_path/infer_${DEVICE}.log 2>&1
print_info $? infer_${DEVICE}
else
python predict.py --data_dir ./lexical_analysis_dataset_tiny \
--init_checkpoint ./save_dir/model_100.pdparams \
--batch_size 32 \
--device ${DEVICE} > $log_path/infer_${DEVICE}.log 2>&1
print_info $? infer_${DEVICE}
fi
#set http_proxy
export http_proxy=$HTTPPROXY
export https_proxy=$HTTPSPROXY
| true |
87285419a4151fc9a4a3477ad50ee4a3471efc60 | Shell | dimitrov570/uni-operating-systems-course | /exam_prep/shell/task13.sh | UTF-8 | 193 | 3.3125 | 3 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ]; then
exit 1;
fi
if [ ! -d $1 ]; then
exit 2;
fi
find "$1" -type l -printf "%Y %p\n" 2> /dev/null | awk '{if ($1 == "N" || $1 == "L" || $1 == "?") print $2}'
| true |
dd32bfafdd55193fb2f6ad5c2bf781431a22ecdf | Shell | soundarrk/az-ip-fwd | /ip_fwd.sh | UTF-8 | 3,028 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
#--------------------------------------------------------------------------
usage() {
echo -e "\e[33m"
echo "usage: ${0} [-i <eth_interface>] [-f <frontend_port>] [-a <dest_ip_addr>] [-b <dest_port>]" 1>&2
echo "where:" 1>&2
echo "<eth_interface>: Interface on which packet will arrive and be forwarded" 1>&2
echo "<frontend_port>: Frontend port on which packet arrives" 1>&2
echo "<dest_port> : Destination port to which packet is forwarded" 1>&2
echo "<dest_ip_addr> : Destination IP which packet is forwarded" 1>&2
echo -e "\e[0m"
}
if [[ $# -eq 0 ]]; then
echo -e "\e[31mERROR: no options given\e[0m"
usage
exit 1
fi
while getopts 'i:f:a:b:' OPTS; do
case "${OPTS}" in
i)
echo -e "\e[32mUsing ethernet interface ${OPTARG}\e[0m"
ETH_IF=${OPTARG}
;;
f)
echo -e "\e[32mFrontend port is ${OPTARG}\e[0m"
FE_PORT=${OPTARG}
;;
a)
echo -e "\e[32mDestination IP Address is ${OPTARG}\e[0m"
DEST_HOST=${OPTARG}
;;
b)
echo -e "\e[32mDestination Port is ${OPTARG}\e[0m"
DEST_PORT=${OPTARG}
;;
*)
usage
exit 1
;;
esac
done
if [ -z ${ETH_IF} ]; then
echo -e "\e[31mERROR: ethernet interface not specified!!!\e[0m"
usage
exit 1
fi
if [ -z ${FE_PORT} ]; then
echo -e "\e[31mERROR: frontend port not specified!!!\e[0m"
usage
exit 1
fi
if [ -z ${DEST_HOST} ]; then
echo -e "\e[31mERROR: destination IP not specified!!!\e[0m"
usage
exit 1
fi
if [ -z ${DEST_PORT} ]; then
echo -e "\e[31mERROR: destination port not specified!!!\e[0m"
usage
exit 1
fi
#1. Make sure you're root
echo -e "\e[32mChecking whether we're root...\e[0m"
if [ -z ${UID} ]; then
UID=$(id -u)
fi
if [ "${UID}" != "0" ]; then
echo -e "\e[31mERROR: user must be root\e[0m"
exit 1
fi
#2. Make sure IP Forwarding is enabled in the kernel
echo -e "\e[32mEnabling IP forwarding...\e[0m"
echo "1" > /proc/sys/net/ipv4/ip_forward
#3. Check if IP or hostname is specified for destination IP
if [[ ${DEST_HOST} =~ ^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
DEST_IP=${DEST_HOST}
else
DEST_IP=$(host ${DEST_HOST} | grep "has address" | awk '{print $NF}')
fi
echo -e "\e[32mUsing Destination IP ${DEST_IP}\e[0m"
#4. Get local IP
LOCAL_IP=$(ip addr ls ${ETH_IF} | grep -w inet | awk '{print $2}' | awk -F/ '{print $1}')
echo -e "\e[32mUsing Local IP ${LOCAL_IP}\e[0m"
#4. Do DNAT
echo -e "\e[32mCreating DNAT rule from ${LOCAL_IP}:${FE_PORT} to ${DEST_IP}:${DEST_PORT}...\e[0m"
iptables -t nat -A PREROUTING -p tcp -i ${ETH_IF} --dport ${FE_PORT} -j DNAT --to ${DEST_IP}:${DEST_PORT}
#4. Do SNAT
echo -e "\e[32mCreating SNAT rule from ${DEST_IP}:${DEST_PORT} to ${LOCAL_IP}:${FE_PORT}...\e[0m"
#iptables -t nat -A POSTROUTING -p tcp -o ${ETH_IF} --dport ${DEST_PORT} -j SNAT -d ${DEST_IP} --to-source ${LOCAL_IP}:${FE_PORT}
iptables -t nat -A POSTROUTING -o ${ETH_IF} -j MASQUERADE
echo -e "\e[32mDone!\e[0m"
| true |
0014757113f6fd4687ef2e951de66c681eec2737 | Shell | abijith-kp/os_lab | /lab2/odd_even.sh | UTF-8 | 182 | 3.734375 | 4 | [] | no_license | #check wheather the given number is odd or even
#!/bin/bash
read -p "enter a no.: " n
chk=`expr $n % 2`
if [ $chk == 0 ];
then
echo "$n is even"
exit
fi
echo "$n is odd"
exit
| true |
a6e2e8e87d59bbdbea4e9535e999eade6e8f8a96 | Shell | ArshamR/CS480-Group-Project | /detect.sh | UTF-8 | 1,121 | 3.796875 | 4 | [] | no_license | #!/bin/bash
if [ $# -ne 2 ]
then
echo "Usage: <output file>, <log file> (Don't include path, file must be in same directory) "
exit 1;
fi
if [ ! -e $2 ]
then
echo "Log file does not exist"
exit
fi
if [ -e $1 ]
then
rm $1
fi
results=$1
declare -a sourceIps
count=0
while read p;
do
timeS=`echo "$p" | awk '{ print $3 }'`
port=`echo "$p" | awk '{ print $7 }'`
ip=`echo "$p" | awk '{ print $9 }'`
sourceIp=`echo "$p" | awk '{ print $8 }'`
if [ "$timeS" = "$timePrev" -a "$port" = "$portPrev" -a "$ip" = "$ipPrev" ]
then
sourceIps[$count]=$sourceIp
count=`expr $count + 1`
elif [ $count -ge 10 ]
then
echo "**************DDOS ATTACK DETECTED*******"
echo "Targeted IP: $ipPrev"
echo "Targeted Port: $portPrev"
echo "Time: $timePrev"
echo "Number of hits $count"
printf '%s\n' "${sourceIps[@]}" >> $results
printf '%s\n' "$portPrev" >> $results
printf '%s\n' "$timePrev" >> $results
printf '%s\n' "$ipPrev" >> $results
printf '%s\n' "*" >> $results
let count=0
sourceIps=()
else
let count=0
sourceIps=()
fi
timePrev=$timeS
portPrev=$port
ipPrev=$ip
done < $2 | true |
b54c546d0c001ec841bb2478bea95772c12d07e0 | Shell | freyes/pinhole | /tests/run.sh | UTF-8 | 330 | 2.859375 | 3 | [] | no_license | #!/bin/bash -x
echo "launching gunicorn"
gunicorn pinhole.common.app --access-logfile access.log --log-file - --log-level error &
GPID=$!
if [ "x$?" != "x0" ]; then
echo "Error running gunicorn"
exit 1
fi
pushd tests/
echo "running casperjs"
casperjs test *.js
EX=$?
echo "killing gunicorn: $GPID"
kill $GPID
exit $EX
| true |
e4a16e7881c5e2c44fb68ae3232a7c695d57b36b | Shell | Desenho2018-1/simian | /scripts/sh/docker-deploy.sh | UTF-8 | 641 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Represents the latest version of the project according to setup.py file
VERSION=$(python setup.py --version)
echo "Latest Simian version is $VERSION";
# double validation in script and in .travis.yml
if [[ "${TRAVIS_BRANCH}" == "master" ]]; then
echo "Deploying to Docker registry latest Simian...";
docker login -u "$DOCKER_USERNAME" -p "$DOCKER_PASSWORD";
docker build -f /home/travis/build/Desenho2018-1/simian/scripts/docker/Dockerfile -t simian:$VERSION .;
docker tag simian:$VERSION $DOCKER_USERNAME/simian:$VERSION;
docker push $DOCKER_USERNAME/simian:$VERSION;
else
echo "Skipping Docker registry deploy";
fi;
| true |
366a6c1dd5ec396bdbe5fe2f43b43394b94ea18f | Shell | pivotal-sadubois/pcfconfig | /demos/pas-demo-articulate/demo_articulate_cleanup.sh | UTF-8 | 771 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | cf unmap-route articulate-v2 apps.pcfone.io --hostname articulate-workshop
cf routes > /tmp/cfroutes
for host in articulate-workshop articulate-temp articulate-attendee-service; do
cnt=$(egrep -c " $host " /tmp/cfroutes)
if [ $cnt -gt 0 ]; then
dom=$(egrep " $host " /tmp/cfroutes | awk '{ print $3 }')
echo "cf delete-route $dom --hostname $host -f"
fi
done
rm -f /tmp/cfroutes
cf delete articulate -f
cf delete articulate-v2 -f
cf delete attendee-service -f
cf delete-service attendee-service -f
cf delete-service attendee-mysql -f
dmn=$(cf routes | grep " articulate-workshop " | awk '{ print $3 }')
cf delete-route $dmn --hostname articulate-workshop -f
cf delete-route $dmn --hostname articulate-attendee-service -f
cf apps
cf services
cf routes
| true |
5e7b82a3349f41af67d9cd487b2329f12548a2d9 | Shell | monch1962/wilee | /demo/jsonplaceholder.typicode.com/010-postman-to-wilee.sh | UTF-8 | 1,422 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
POSTMAN_TESTS=./test-cases/postman-collections/*.json
for postmanfile in ${POSTMAN_TESTS}
do
TEST_ID=`cat $postmanfile | jq -r '.info._postman_id'`
TEST_DESCRIPTION=`cat $postmanfile | jq '.info.name'`
#echo "TEST_ID: " $TEST_ID
#echo "TEST_DESCRIPTION: " $TEST_DESCRIPTION
POSTMAN_HOST=`cat $postmanfile | jq -r '.item[0].request.url.raw'`
#echo $POSTMAN_HOST
# extract the protocol
proto="$(echo $POSTMAN_HOST | grep :// | sed -e's,^\(.*://\).*,\1,g')"
#echo "PROTO: " $proto
# remove the protocol
url="$(echo ${POSTMAN_HOST/$proto/})"
echo "URL: " $url
# extract the path (if any)
path="$(echo $url | grep / | cut -d/ -f2- | sed -es,^,/,)"
#echo "PATH: " $path
hostname="$(echo ${POSTMAN_HOST/$path/})"
echo "HOSTNAME: " $hostname
REQUEST_VERB=`cat $postmanfile | jq -r '.item[0].request.method'`
cat $postmanfile \
| jq --arg tc $postmanfile '._comment |= $tc' \
| jq '.test_info.tags[0] |= "postman"' \
| jq -r --arg testid $TEST_ID '.test_info.id = $testid' \
| jq -r --arg targethost $hostname '.test_info.postman_host = $targethost' \
| jq --arg requestverb $REQUEST_VERB '.request.verb = $requestverb' \
| jq --arg url $path '.request.url = $url' \
| jq 'del(.info,.item)'
#| jq --arg testdescription $TEST_DESCRIPTION '.test_info.description |= $testdescription'
#echo "TEST_ID:" $TEST_ID
#cat $tc | jq '.info.name'
done
| true |
ebc3c0ff711b7f897d642833d0178aca115b1ee3 | Shell | concourse/ci | /tasks/scripts/fly-build | UTF-8 | 775 | 3.40625 | 3 | [] | no_license | #!/bin/bash
# vim: set ft=sh
set -e -x
export GOPATH=$PWD/gopath
export PATH=$PWD/gopath/bin:$PATH
platform="$(go env GOOS)"
arch="$(go env GOARCH)"
output="$PWD/fly-${platform}"
ldflags=""
if [ -e final-version/version ]; then
final_version="$(cat final-version/version)"
ldflags="-X github.com/concourse/concourse.Version=${final_version}"
fi
tags=""
platform_flags=""
pushd concourse
ldflags+=' -extldflags "-static"'
if [[ "$platform" == "darwin" ]]; then
export CGO_ENABLED=1
tags+=' osusergo'
platform_flags+='-buildvcs=false'
fi
go build -a -tags "$tags" -ldflags "$ldflags" $platform_flags -o $output/fly ./fly
popd
pushd $output
archive=fly-$platform-$arch.tgz
tar -czf $archive fly
shasum "$archive" > "${archive}.sha1"
popd
| true |
93223cab2b993370928b17633f2f34d0c26c51aa | Shell | moravianlibrary/mapseries | /catalog/docker.sh | UTF-8 | 203 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
TARGET_DIR=$JBOSS_HOME/static
cd /build/catalog
npm install
gulp
mkdir -p $TARGET_DIR/catalog
cp -r dist/* $TARGET_DIR/catalog
# clean up after yourself
cd /
rm -rf /build/catalog
| true |
432a0e29702b00488d95e86469716ea2bf6ec4b4 | Shell | jimstedman/prezto | /runcoms/zshrc | UTF-8 | 537 | 2.90625 | 3 | [
"MIT"
] | permissive | #
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Customize to your needs...
wd() {
. ~/bin/wd/wd.sh
}
alias ll="ls -altrh"
# now vim can use !s and !q
alias vim="stty stop '' -ixoff ; vim"
ttyctl -f
# remap caps to ESC
xmodmap -e 'clear Lock' -e 'keycode 0x42 = Escape'
wd() {
. ~/bin/wd/wd.sh
}
export GOPATH=~/dev/gocode
export PATH=$PATH:$GOPATH/bin
| true |
67c70866ea22426c7e210a5cd8479c38137bae50 | Shell | uva-bi-sdad/iarpa_embers | /code/consolidate-score-threshold1.sh | UTF-8 | 639 | 2.671875 | 3 | [] | no_license | #!/bin/bash
#PBS -lwalltime=25:00:00
#PBS -lnodes=1:ppn=1
#PBS -W group_list=ndssl
#PBS -q ndssl_q
#PBS -j oe
#PBS -o pbs.log
## Calculate the number of processors requested so we can
## automatically fill in the "np" parameter for mpirun commands
cd $PBS_O_WORKDIR
NUM_PROCS=`/bin/cat $PBS_NODEFILE | /usr/bin/wc -l | /bin/sed "s/ //g"`
. /etc/profile.d/modules.sh
module add ndssl/networkx/1.6
for f in /home/gkorkmaz/git/iarpa_embers/code/twitt*
do
python /home/gkorkmaz/git/iarpa_embers/code/consolidateScores.py /home/gkorkmaz/git/iarpa_embers/code/hashtags.txt "$f" "/home/gkorkmaz/git/iarpa_embers/code/${f:73}-cons"
done
| true |
8b691e1f5d86f43ba29ec698adae151c1579e282 | Shell | petronny/aur3-mirror | /sks-hg/sks.install | UTF-8 | 716 | 3.265625 | 3 | [] | no_license | post_install() {
echo -n "adding sks system group... "
groupadd -r sks && echo -n "done."
echo
echo -n "adding sks system user... "
useradd -c "Synchronizing OpenPGP Key Server" -r -d /var/lib/sks -g sks -s /bin/bash sks && echo -n "done."
echo
mkdir -p /var/run/sks
chown sks:sks /var/run/sks
chmod 775 /var/run/sks
mkdir -p /var/log/sks
chown sks:sks /var/log/sks
mkdir -p /var/lib/sks
chown sks:sks /var/lib/sks
mkdir -p /var/spool/sks
chown sks:sks /var/spool/sks
}
post_remove() {
echo -n -e "\nremoving sks system user... "
userdel sks && echo "done."
rm -r /var/run/sks
echo "Not removing tmp and log directories"
}
op=$1
shift
$op $*
# vim: ft=sh ts=2 sw=2
| true |
84372e175d78e31f60895fb1326d8ac362e31d5c | Shell | mlutfy/nagios-plugins | /test/check_ddos.bats | UTF-8 | 7,312 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bats
load test_helper
@test 'Test check_ddos.sh when ok' {
[[ $OS != 'Linux' ]] && skip 'Skip - not on Linux'
local netstat_output='Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State
tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:36313 0.0.0.0:* LISTEN
tcp 0 0 10.0.2.15:22 10.0.2.2:56870 ESTABLISHED
tcp6 0 0 :::22 :::* LISTEN
tcp6 0 0 ::1:25 :::* LISTEN
udp 0 0 0.0.0.0:68 0.0.0.0:*
udp 0 0 0.0.0.0:58567 0.0.0.0:*
udp 0 0 0.0.0.0:111 0.0.0.0:*
udp 0 0 0.0.0.0:756 0.0.0.0:*
udp 0 0 10.0.2.15:123 0.0.0.0:*
udp 0 0 127.0.0.1:123 0.0.0.0:*
udp 0 0 0.0.0.0:123 0.0.0.0:*
udp6 0 0 fe80::a00:27ff:fe06:123 :::*
udp6 0 0 ::1:123 :::*
udp6 0 0 :::123 :::*
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags Type State I-Node Path
unix 2 [ ACC ] STREAM LISTENING 3299 /var/run/acpid.socket
unix 2 [ ] DGRAM 1923 @/org/kernel/udev/udevd
unix 6 [ ] DGRAM 3260 /dev/log
unix 3 [ ] STREAM CONNECTED 33200
unix 3 [ ] STREAM CONNECTED 33199
unix 2 [ ] DGRAM 33198
unix 2 [ ] DGRAM 3590
unix 2 [ ] DGRAM 3354
unix 2 [ ] DGRAM 3296
unix 3 [ ] DGRAM 1928
unix 3 [ ] DGRAM 1927'
stub netstat "$netstat_output"
run check_ddos.sh -w 42 -c 1337
[ "$status" -eq 0 ]
echo "$output" | grep 'No DDOS detected (0 / 42)'
}
@test 'Test check_ddos.sh when warning' {
[[ $OS != 'Linux' ]] && skip 'Skip - not on Linux'
local netstat_output='Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State
tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:36313 0.0.0.0:* LISTEN
tcp 0 0 10.0.2.15:22 10.0.2.2:56870 ESTABLISHED
tcp 0 0 10.0.2.15:22 10.0.2.2:56871 SYN_RECV
tcp 0 0 10.0.2.15:22 10.0.2.2:56872 SYN_RECV
tcp6 0 0 :::22 :::* LISTEN
tcp6 0 0 ::1:25 :::* LISTEN
udp 0 0 0.0.0.0:68 0.0.0.0:*
udp 0 0 0.0.0.0:58567 0.0.0.0:*
udp 0 0 0.0.0.0:111 0.0.0.0:*
udp 0 0 0.0.0.0:756 0.0.0.0:*
udp 0 0 10.0.2.15:123 0.0.0.0:*
udp 0 0 127.0.0.1:123 0.0.0.0:*
udp 0 0 0.0.0.0:123 0.0.0.0:*
udp6 0 0 fe80::a00:27ff:fe06:123 :::*
udp6 0 0 ::1:123 :::*
udp6 0 0 :::123 :::*
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags Type State I-Node Path
unix 2 [ ACC ] STREAM LISTENING 3299 /var/run/acpid.socket
unix 2 [ ] DGRAM 1923 @/org/kernel/udev/udevd
unix 6 [ ] DGRAM 3260 /dev/log
unix 3 [ ] STREAM CONNECTED 33200
unix 3 [ ] STREAM CONNECTED 33199
unix 2 [ ] DGRAM 33198
unix 2 [ ] DGRAM 3590
unix 2 [ ] DGRAM 3354
unix 2 [ ] DGRAM 3296
unix 3 [ ] DGRAM 1928
unix 3 [ ] DGRAM 1927'
stub netstat "$netstat_output"
run check_ddos.sh -w 2 -c 4
[ "$status" -eq 1 ]
echo "$output" | grep 'DDOS attack !
Top 10 SYN_RECV sources :
2 SYN_RECV'
}
@test 'Test check_ddos.sh when critical' {
[[ $OS != 'Linux' ]] && skip 'Skip - not on Linux'
local netstat_output='Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State
tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN
tcp 0 0 0.0.0.0:36313 0.0.0.0:* LISTEN
tcp 0 0 10.0.2.15:22 10.0.2.2:56870 ESTABLISHED
tcp 0 0 10.0.2.15:22 10.0.2.2:56871 SYN_RECV
tcp 0 0 10.0.2.15:22 10.0.2.2:56872 SYN_RECV
tcp6 0 0 :::22 :::* LISTEN
tcp6 0 0 ::1:25 :::* LISTEN
udp 0 0 0.0.0.0:68 0.0.0.0:*
udp 0 0 0.0.0.0:58567 0.0.0.0:*
udp 0 0 0.0.0.0:111 0.0.0.0:*
udp 0 0 0.0.0.0:756 0.0.0.0:*
udp 0 0 10.0.2.15:123 0.0.0.0:*
udp 0 0 127.0.0.1:123 0.0.0.0:*
udp 0 0 0.0.0.0:123 0.0.0.0:*
udp6 0 0 fe80::a00:27ff:fe06:123 :::*
udp6 0 0 ::1:123 :::*
udp6 0 0 :::123 :::*
Active UNIX domain sockets (servers and established)
Proto RefCnt Flags Type State I-Node Path
unix 2 [ ACC ] STREAM LISTENING 3299 /var/run/acpid.socket
unix 2 [ ] DGRAM 1923 @/org/kernel/udev/udevd
unix 6 [ ] DGRAM 3260 /dev/log
unix 3 [ ] STREAM CONNECTED 33200
unix 3 [ ] STREAM CONNECTED 33199
unix 2 [ ] DGRAM 33198
unix 2 [ ] DGRAM 3590
unix 2 [ ] DGRAM 3354
unix 2 [ ] DGRAM 3296
unix 3 [ ] DGRAM 1928
unix 3 [ ] DGRAM 1927'
stub netstat "$netstat_output"
run check_ddos.sh -w 1 -c 2
[ "$status" -eq 2 ]
echo "$output" | grep 'DDOS attack !
Top 10 SYN_RECV sources :
2 SYN_RECV'
}
| true |
a291cd2544a8391882952505a134c0df85550368 | Shell | codeb2cc/zsh | /zshrc | UTF-8 | 3,772 | 3.1875 | 3 | [] | no_license | # Lines configured by zsh-newuser-install
HISTFILE=~/.histfile
HISTSIZE=20480
SAVEHIST=20480
bindkey -e
# End of lines configured by zsh-newuser-install
# The following lines were added by compinstall
zstyle :compinstall filename '/home/codeb2cc/.zshrc'
autoload -Uz compinit
compinit
# End of lines added by compinstall
# Search history
bindkey "^[[A" history-beginning-search-backward
bindkey "^[[B" history-beginning-search-forward
# Alias
alias ..='cd ..'
alias vi='vim'
alias ls='ls --color=auto'
alias ll='ls -alF'
alias la='ls -A'
alias l='ls -CF'
alias c='clear'
alias rm='rm -i'
alias mv='mv -i'
alias du='du -a -h --max-depth=1'
alias df='df -h'
alias grep='grep --color'
alias sudo='sudo '
# Path shorcuts
cdpath=(~)
# Go Lang
GOROOT=$HOME/Local/lib/go
# User PATH
PATH=$HOME/Local/bin:$HOME/Local/sbin:$HOME/.local/bin:$HOME/.local/sbin:$GOROOT/bin:$PATH
# Key Binding
bindkey "^[[1~" beginning-of-line
bindkey "^[[4~" end-of-line
bindkey "^[[2~" overwrite-mode
bindkey "^[[3~" delete-char
bindkey "\e[1~" beginning-of-line # Home
bindkey "\e[4~" end-of-line # End
bindkey "\e[5~" beginning-of-history # PageUp
bindkey "\e[6~" end-of-history # PageDown
bindkey "\e[2~" quoted-insert # Ins
bindkey "\e[3~" delete-char # Del
bindkey "\e[5C" forward-word
bindkey "\e[5D" backward-word
bindkey "\e\e[C" forward-word
bindkey "\e\e[D" backward-word
bindkey "\e[Z" reverse-menu-complete # Shift+Tab
# Colored man pages
export LESS='-R'
man() {
env \
LESS_TERMCAP_mb=$(printf "\e[1;31m") \
LESS_TERMCAP_md=$(printf "\e[1;31m") \
LESS_TERMCAP_me=$(printf "\e[0m") \
LESS_TERMCAP_se=$(printf "\e[0m") \
LESS_TERMCAP_so=$(printf "\e[1;44;33m") \
LESS_TERMCAP_ue=$(printf "\e[0m") \
LESS_TERMCAP_us=$(printf "\e[1;32m") \
man "$@"
}
# Helpers
function git_prompt_info() {
ref=$(git symbolic-ref --short HEAD 2> /dev/null) || return
echo " git:${ref}"
}
function hg_prompt_info() {
id=$(hg id -b 2> /dev/null) || return
echo " hg:${id}"
}
function virtual_env_info() {
env=$(basename $VIRTUAL_ENV 2> /dev/null) || return
echo "[$env]"
}
function ip_info() {
ip=$(/sbin/ifconfig eth0 | ack 'inet ([0-9\.]+)' --output="\$1")
echo "$ip"
}
# Prompt configuration
# {
function precmd {
local TERMWIDTH
(( TERMWIDTH = ${COLUMNS} - 1 ))
# Truncate the path if it's too long.
PR_FILLBAR=""
PR_PWDLEN=""
PR_IP=$(ip_info)
local promptsize=${#${(%):- %n@%m:%l -}}
local pwdsize=${#${(%):-%~}}
if [[ "$promptsize + $pwdsize" -gt $TERMWIDTH ]]; then
((PR_PWDLEN=$TERMWIDTH - $promptsize))
else
PR_BARCHAR=" "
PR_FILLBAR="\${(l.(($TERMWIDTH - ($promptsize + $pwdsize)))..${PR_BARCHAR}.)}"
fi
PR_GIT=$(git_prompt_info)
PR_HG=$(hg_prompt_info)
PR_ENV=$(virtual_env_info)
}
setprompt () {
# Need this so the prompt will work.
setopt prompt_subst
# See if we can use colors.
autoload colors zsh/terminfo
if [[ "$terminfo[colors]" -ge 8 ]]; then
colors
fi
for color in RED GREEN YELLOW BLUE MAGENTA CYAN WHITE; do
eval PR_$color='%{$terminfo[bold]$fg[${(L)color}]%}'
eval PR_LIGHT_$color='%{$fg[${(L)color}]%}'
(( count = $count + 1 ))
done
PR_NO_COLOUR="%{$terminfo[sgr0]%}"
PROMPT='$PR_GREEN┌ %(!.%SROOT%s.%n)$PR_GREEN@%m:%l $PR_CYAN\
${(e)PR_FILLBAR}$PR_CYAN%$PR_PWDLEN<...<%~%<<$PR_CYAN\
$PR_GREEN└ %D{%H:%M:%S}\
$PR_YELLOW$PR_GIT$PR_HG\
%(?.. $PR_LIGHT_RED%?)\
$PR_LIGHT_CYAN %(!.$PR_RED.$PR_WHITE)%# $PR_NO_COLOUR'
RPROMPT=' $PR_MAGENTA$PR_ENV$PR_CYAN$PR_NO_COLOUR'
PS2='($PR_LIGHT_GREEN%_$PR_CYAN)$PR_NO_COLOUR '
}
setprompt
# }
# Virtualenvwrapper setting
WORKON_HOME=~/Virtual
source /usr/bin/virtualenvwrapper.sh
| true |
08a27f6a5e5d4b126f2a06b5dcf0bd5b2b929475 | Shell | gianose/gitpullrepo | /test/test_gpeclr.sh | UTF-8 | 10,031 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env bash
# Author: Gregory Rose
# Createdi: 20170410
# Name: test_gpeclr.sh
# Relative Working Directory: ${NAMESPACE}/test/test_gpeclr.sh
declare TST_GPECLR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. ${TST_GPECLR_DIR}/../lib/const.sh
. ${NAMESPACE}lib/unittest.sh
. ${NAMESPACE}lib/excp.sh
################################################################################
# In order utilize `test_gpeclr.sh` to properly test `gpeclr` please follow #
# the instruction provided here '' in order to add your ssh key to gitlab #
# and to create a test repository. Following that set 'TST_GPECLR_SSH_REPO' #
# to the ssh URL of your test repo, and 'TST_GPECLR_REPO' to the http URL of #
# your test repo #
################################################################################
declare TST_GPECLR_REPO='https://gitlab.ins.risk.regn.net/RoseGr01/tst_gpeclr.git'
declare TST_GPECLR_SSH_REPO='git@gitlab.ins.risk.regn.net:RoseGr01/tst_gpeclr.git'
#################################################################################
# Futhermore please set the following variable 'TST_GPECLR_CONTACT' to your #
# email address. #
#################################################################################
declare TST_GPECLR_CONTACT='gregory.rose@lexisnexis.com'
#################################################################################
declare TST_GPECLR="${NAMESPACE}bin/gpeclr"
declare TST_GPECLR_BAD_REPO='https://gitlab.ins.risk.regn.net/FakeUser01/NotReal.git'
declare TST_GPECLR_BRANCH='master'
declare TST_GPECLR_BAD_BRANCH='foo'
declare TST_GPECLR_USAGE_OUT="${NAMESPACE}tmp/tst_gpeclr_usage.out"
declare TST_GPECLR_CONFIG="${NAMESPACE}config/gpeclr.cnf"
declare TST_GPECLR_CONFIG_BACKUP="${TST_GPECLR_CONFIG}.backup"
declare TST_GPECLR_DEST="${NAMESPACE}tmp/good"
declare TST_GPECLR_REPO_DEST="${NAMESPACE}tmp/repo"
declare TST_GPECLR_BAD_CLONE_DEST="${NAMESPACE}tmp/bad_clone"
declare TST_GPECLR_BAD_PULL_DEST="${NAMESPACE}tmp/bad_pull"
$TST_GPECLR > ${TST_GPECLR_USAGE_OUT}
declare TST_GPECLR_USAGE_MD5=$(md5sum ${TST_GPECLR_USAGE_OUT} | awk '{print $1}')
declare -a TST_GPECLR_PUT_USAGE=(
"gpeclr::${TST_GPECLR} - Zero input params;0;"
"gpeclr::${TST_GPECLR} - Just the repo;0;-r;${TST_GPECLR_REPO}"
"gpeclr::${TST_GPECLR} - Just the repo and the branch;0;-r;${TST_GPECLR_REPO};-b;${TST_GPECLR_BRANCH}"
"gpeclr::${TST_GPECLR} - Just the log directory;0;-l;${NAMESPACE}logs"
"gpeclr::${TST_GPECLR} - '-C' option followed by a additional param;0;-C;foo"
"gpeclr::${TST_GPECLR} - '-h' option followed by a additional param;0;-h;bar"
)
declare -a TST_GPECLR_THROW_ERR=(
"gpeclr::${TST_GPECLR} - The destination option (-d) with no directory following;113;-d"
"gpeclr::${TST_GPECLR} - The log option (-l) with no directory following;113;-l"
"gpeclr::${TST_GPECLR} - The repository option (-r) with no url following;113;-r"
"gpeclr::${TST_GPECLR} - The branch option (-b) with no branch following;113;-b"
"gpeclr::${TST_GPECLR} - The destination option (-d) with a non existant destination;113;-d;/not/real/dir"
"gpeclr::${TST_GPECLR} - The log option (-l) with a non existant log directory;113;-l;/not/real/dir"
"gpeclr::${TST_GPECLR} - The repo option (-r) with a non existant url;113;-r;${TST_GPECLR_BAD_REPO}"
"gepclr::${TST_GPECLR} - The branch option (-b) without the a corresponding repo;113;-r;${TST_GPECLR_BAD_BRANCH}"
"gpeclr::${TST_GPECLR} - The branch option (-b) with a non existant branch;113;-r;${TST_GPECLR_REPO};-b;${TST_GPECLR_BAD_BRANCH}"
)
declare -a TST_GPECLR_FATAL_ERR=(
"gpeclr::${TST_GPECLR} - The config option (-C) with gpeclr.cnf missing;107;-C"
"gpeclr::${TST_GPECLR} - The destination option (-d) with a valid destination, but gpeclr.cnf missing;107;-d ${TST_GPECLR_DEST}"
"gpeclr::${TST_GPECLR} - Attempt to clone to non-empty destination dir;107;-d ${TST_GPECLR_BAD_CLONE_DEST} -r ${TST_GPECLR_REPO}"
"gpeclr::${TST_GPECLR} - Attempt to pull to inaccessible local repo;107;-d ${TST_GPECLR_BAD_PULL_DEST} -r ${TST_GPECLR_REPO}"
)
tst_gpeclr_neg() {
#printf "%s\n" "USAGE"
#runCustom TST_GPECLR_PUT_USAGE[@] tst_gpeclr_put_usage
#printf "%s\n" "ERROR InvalidArgument"
#runMultiInput TST_GPECLR_THROW_ERR[@]
#printf "%s\n" "CORRECT"
tst_gpeclr_prep
#printf "%s\n" "ERROR FatalError"
#runMultiInput TST_GPECLR_FATAL_ERR[@]
#tst_gpeclr_clean
#printf "%s\n" "CONFIG ERROR InvalidArgument"
#tst_gpeclr_cnf_arg_err
}
tst_gpeclr_put_usage() {
declare -a params=("${!1}")
local _f=${params[0]%% - *}; _f=${_f##*::}
[ -s ${TST_GPECLR_USAGE_OUT} ] && truncate -s 0 ${TST_GPECLR_USAGE_OUT}
case ${#params[@]} in
2) $($_f &> ${TST_GPECLR_USAGE_OUT})
;;
3) $($_f ${params[2]} &> ${TST_GPECLR_USAGE_OUT})
;;
4) $($_f ${params[2]} ${params[3]} &> ${TST_GPECLR_USAGE_OUT})
;;
5) $($_f ${params[2]} ${params[3]} ${params[4]} &> ${TST_GPECLR_USAGE_OUT})
;;
6) $($_f ${params[2]} ${params[3]} ${params[4]} ${params[5]} &> ${TST_GPECLR_USAGE_OUT})
;;
esac
local _md5=$(md5sum ${TST_GPECLR_USAGE_OUT} | awk '{print $1}')
[ ${_md5} == ${TST_GPECLR_USAGE_MD5} ] && return ${?}
return 1
}
tst_gpeclr_prep() {
_bad_clone_dest() {
[ -d "${TST_GPECLR_BAD_CLONE_DEST}" ] || {
mkdir "${TST_GPECLR_BAD_CLONE_DEST}"
touch "${TST_GPECLR_BAD_CLONE_DEST}/tst_gpeclr.txt"
}
}
case "${FUNCNAME[1]}" in
'tst_gpeclr_neg' )
[ -s ${TST_GPECLR_CONFIG} ] && mv "${TST_GPECLR_CONFIG}" "${TST_GPECLR_CONFIG_BACKUP}"
_bad_clone_dest
[ -d "${TST_GPECLR_BAD_PULL_DEST}" ] || {
mkdir ${TST_GPECLR_BAD_PULL_DEST}
${TST_GPECLR} -d ${TST_GPECLR_BAD_PULL_DEST} -r ${TST_GPECLR_REPO}
chmod -w ${TST_GPECLR_BAD_PULL_DEST}
sleep .2
}
[ -d "${TST_GPECLR_DEST}" ] || {
mkdir "${TST_GPECLR_DEST}"
${TST_GPECLR} -d ${TST_GPECLR_DEST} -r ${TST_GPECLR_REPO}
assertEquals "gpeclr::${TST_GPECLR} - Cloning the repo '${TST_GPECLR_REPO}' to '${TST_GPECLR_DEST}'" 0 ${?}
sleep .2
}
tst_gpeclr_alt_repo
${TST_GPECLR} -d ${TST_GPECLR_DEST} -r ${TST_GPECLR_REPO}
assertEquals "gpeclr::${TST_GPECLR} - Pull changes from repo '${TST_GPECLR_REPO}' to '${TST_GPECLR_DEST}'" 0 ${?}
;;
esac
}
tst_gpeclr_alt_repo() {
local _dir
[ -d "${TST_GPECLR_REPO_DEST}" ] || {
mkdir "${TST_GPECLR_REPO_DEST}"
${TST_GPECLR} -r ${TST_GPECLR_SSH_REPO} -d ${TST_GPECLR_REPO_DEST} && {
echo "#### Changed" >> "${TST_GPECLR_REPO_DEST}/README.md"
[[ ${TST_GPECLR_REPO_DEST:$((${#TST_GPECLR_REPO_DEST}-1))} == "/" ]] && _dir=${TST_GPECLR_REPO_DEST}'.git' || _dir=${TST_GPECLR_REPO_DEST}'/.git'
git --git-dir=${_dir} --work-tree=${TST_GPECLR_REPO_DEST} add ${TST_GPECLR_REPO_DEST}/README.md
git --git-dir=${_dir} --work-tree=${TST_GPECLR_REPO_DEST} commit -m 'Maked Change to README.md' --quiet
git --git-dir=${_dir} --work-tree=${TST_GPECLR_REPO_DEST} push origin master --quiet
}
}
}
tst_gpeclr_clean() {
case "${FUNCNAME[1]}" in
'tst_gpeclr_neg' )
[ -s "${TST_GPECLR_USAGE_OUT}" ] && rm "${TST_GPECLR_USAGE_OUT}"
[ -s "${TST_GPECLR_CONFIG_BACKUP}" ] && mv "${TST_GPECLR_CONFIG_BACKUP}" "${TST_GPECLR_CONFIG}"
[ -d "${TST_GPECLR_BAD_CLONE_DEST}" ] && rm -rf "${TST_GPECLR_BAD_CLONE_DEST}"
[ -d "${TST_GPECLR_BAD_PULL_DEST}" ] && {
chmod +w ${TST_GPECLR_BAD_PULL_DEST}
rm -rf ${TST_GPECLR_BAD_PULL_DEST}
}
[ -d "${TST_GPECLR_DEST}" ] && rm -rf "${TST_GPECLR_DEST}"
[ -d "${TST_GPECLR_REPO_DEST}" ] && rm -rf "${TST_GPECLR_REPO_DEST}"
;;
esac
}
tst_gpeclr_cnf_arg_err() {
declare -a _original
local _cnf=$(cat ${TST_GPECLR_CONFIG} | grep -P -i '^\w+:' | tr '\r\n' '|')
IFS='|' read -r -a _original <<< ${_cnf}
declare -a _bad_params=(
"^contact:.*;contact: ${TST_GPECLR_CONTACT}"
"^logs:.*;logs: /not/real/log/dir;gpeclr::${TST_GPECLR} - The directory for logs set in config/gpeclr.cnf is nonexistent."
"^destination:.*;destination: /not/rea/dest/dir;gpeclr::${TST_GPECLR} - The directory for destination set in config/gpeclr.cnf is nonexistent."
"^repository:.*;repository: ${TST_GPECLR_BAD_REPO}/;gpeclr::${TST_GPECLR} - The URL provided for the repo in config/gpeclr.cnf is incorrect."
"^branch:.*;branch: foo;gpeclr::${TST_GPECLR} - The branch provided for the repo in config/gpeclr.cnf is incorrect."
)
for i in "${!_bad_params[@]}"; do
declare -a _params
IFS=';' read -r -a _params <<< ${_bad_params[$i]}
sed -ie "s~${_params[0]}~${_params[1]}~" "${TST_GPECLR_CONFIG}"
[ ${i} -eq 0 ] && { _regex=${_params[0]}; _contact=${_params[1]}; continue; }
${TST_GPECLR} -C
assertEquals "${_params[2]}" 113 ${?}
sleep .2
sed -ie "s~${_params[0]}~${_original[${i}]}~" ${TST_GPECLR_CONFIG}
done
sed -ie "s~${_regex}~${_contact}~" ${TST_GPECLR_CONFIG}
}
tst_gpeclr_cnf_ftl_err() {
declare -a _original
# Backup the original configuration file.
local _cnf=$(cat ${TST_GPECLR_CONFIG} | grep -P -i '^\w+:' | tr '\r\n' '|')
IFS='|' read -r -a _original <<< ${_cnf}
# Replace the repo currently in the config file with the test repo.
sed -ie "s~^repository:.*~repository: ${TST_GPECLR_REPO}~" ${TST_GPECLR_CONFIG}
declare -a _cnf_cor=(
"gpeclr::${TST_GPECLR} - Cloning the repo '${TST_GPECLR_REPO}' to '${TST_GPECLR_DEST}';0;${TST_GPECLR_DEST}"
)
# Replace the repo in the config file with the orginal repo.
sed -ie "s~^repository:.*~${_original[3]}~" ${TST_GPECLR_CONFIG}
}
tst_gpeclr_cnf_work() {
declare -a _params
IFS=';' read -r -a _params <<< ${1}
[ -d "${_params[2]}" ] || mkdir "${_params[2]}"
sed -ie "s~^destination:.*~destination: ${_params[2]}~" ${TST_GPECLR_CONFIG}
[ ${_params[3]} ] && $_params[3]
${TST_GPECLR} -C
assertEquals "${_params[0]}" ${_params[1]} ${?}
sed -ie "s~^destination:.*~destination: ${_original[2]}~" ${TST_GPECLR_CONFIG}
}
# 1. unless TST_GPECLR_REPO_DEST exist, make DIR TST_GPECLR_REPO_DEST
# 2. Call tst_gpeclr_alt_repo with TST_GPECLR_REPO_DEST
# 3.
#tst_gpeclr_neg
tst_gpeclr_cnf_ftl_err
| true |
0d0a11681d16ac36a0476b619c73d6809ce959e5 | Shell | pawelma/ubob_bot | /run.sh | UTF-8 | 237 | 3.0625 | 3 | [] | no_license | #!/bin/bash
if [ -e configuration/settings.rb ];
then
echo "Configuration found, running ubob bot"
ruby app.rb 2>&1
else
echo "No configuration found! adjust & cp configuration/settings.rb.example -> configuration/settings.rb"
fi
| true |
f19f97adea2e3c762dfc487f85cd71a367266b05 | Shell | emphanos/quickstart-build | /quickimage-clean.sh | UTF-8 | 1,090 | 3.609375 | 4 | [] | no_license | #!/bin/bash
echo "*** Cleaning $1"
# ############################################## Get settings
. settings.sh
# ############################################## Functions
## Destroy a vm in Virtualbox. Suppress "does not exist" errors.
qs_vbox_clean() {
echo "** Removing output virtualbox image: $1 ..."
vboxmanage controlvm "$1" poweroff 2> /dev/null; sleep 5
vboxmanage unregistervm "$1" --delete 2> /dev/null
echo "** ... Done"
}
## Clean output files
qs_output_clean() {
echo "** Removing output files:" "$QS_OUTPUT"/"$1.box" "$QS_OUTPUT"/"$1.ova"
rm -f "$QS_OUTPUT"/"$1.box"
rm -f "$QS_OUTPUT"/"$1.ova"
echo "** ... Done"
}
# ############################################## Clean Build is cleaning vagrant
if [ "$1" == "test" ]; then
qs_output_clean "$QUICKTEST_FILEBASE"
qs_vbox_clean "$QUICKTEST_VBOX"
elif [ "$1" == "prod" ]; then
qs_output_clean "$QUICKPROD_FILEBASE"
qs_vbox_clean "$QUICKPROD_VBOX"
elif [ "$1" == "dev" ]; then
qs_output_clean "$QUICKDEV_FILEBASE"
qs_vbox_clean "$QUICKDEV_VBOX"
else
echo " Usage: $0 [ test | prod | dev ]"
fi
| true |
47db2c712241ff7aaf42973e158f83945bfb98de | Shell | arxaqapi/licence-info | /S6/secu_info/ping_securise.sh | UTF-8 | 7,042 | 3.53125 | 4 | [] | no_license | #! /bin/bash
set -o pipefail
DIR=$(pwd)
my_id=""
declare -A NoncePour
function hex_to_binary () {
xxd -r -p
}
function binary_to_hex () {
xxd -p
}
function to_base64 () {
openssl base64
}
function from_base64 () {
openssl base64 -d
}
function remove_newlines() {
tr -d '\n'
}
function add_newline_format() {
fold -w 64
}
function group () {
to_base64 | sed -e 's/\(.*\)/{\1}/'
}
function ungroup () {
sed -e 's/{\([^}]*\)}/\1/' | from_base64
}
function hash () {
sha256sum | awk '{print $1}'
}
function clefs_rsa () {
if [ -z "${1}" ] ; then
pub="clef_publique"
priv="clef_privee"
else
pub="${1}_pub"
priv="${1}"
fi
openssl genrsa -F4 -out "${priv}.pem"
openssl rsa -in "${priv}.pem" -out "${pub}.pem" -pubout
echo "la clef publique est dans ${pub}.pem"
echo "la clef privée est dans ${priv}.pem"
}
function ajoute_clef_publique () {
local message
message=(${1//:/ })
echo "-----BEGIN PUBLIC KEY-----" > "${message[0]}.pem"
echo "${message[1]}" | fold -w 64 >> "${message[0]}.pem"
echo "-----END PUBLIC KEY-----" >> "${message[0]}.pem"
}
function rsa_chiffre_pour () {
openssl rsautl -encrypt -oaep -inkey "${1}.pem" -pubin | to_base64 | remove_newlines
}
function rsa_dechiffre () {
from_base64 | openssl rsautl -decrypt -oaep -inkey clef_privee.pem
}
function rsa_signe () {
openssl rsautl -sign -inkey clef_privee.pem | to_base64
}
function rsa_signature_de () {
from_base64 | openssl rsautl -verify -inkey "${1}.pem" -pubin
}
function concatenate () {
local res
local -a other_args
read -a otherargs
res="${otherargs[0]}"
for part in ${otherargs[@]:1} ; do
res="${res}:${part}"
done
echo ${res}
}
function projection () {
# ungroup | IFS=":" read -a ${1}
sed -e 's/:/ /g'
}
function rsa_digest_creation () {
hash | rsa_signe | remove_newlines
}
function rsa_digest_valide_origine () {
rsa_signature_de "${1}"
}
function rsa_signature () {
local message
read message
echo "$(echo ${message} | group | remove_newlines)@$(echo "${message}" | rsa_digest_creation )"
}
function rsa_signature_verification () {
local origine Mesg resultat somme_hash somme_hash_signee
origine=$1
IFS='@' read -ra Mesg
somme_hash=$(echo ${Mesg[0]} | ungroup | hash )
somme_hash_signee=$(echo "${Mesg[1]}" | rsa_digest_valide_origine "${origine}")
if [ "${somme_hash}" = "${somme_hash_signee}" ] ; then
echo "${origine} a signé le message: $(echo ${Mesg[0]} | ungroup)"
else
echo "${origine} n'a pas signé ce message"
fi
}
function publie_clef_publique() {
local id
id=$1
my_id=${id}
echo "ajoute_clef_publique ${id}:$(sed '1d; $d' clef_publique.pem | remove_newlines)"
}
function fresh() {
xxd -l 16 -p /dev/urandom
}
function AlicenBobPrintf() {
printf "${1} -> ${2} : ${3}\n"
}
function ping_securise_message1() {
local message
NoncePour[${2}]=$(fresh)
echo "envoi du nonce ${NoncePour[${2}]} à ${2}"
# message=$(echo "${id}" "${nonce}" | concatenate | rsa_chiffre_pour "${destinataire}" )
if [ ! -e "${2}.pem" ] ; then
echo "vous devez d'abord récupérer sa clef publique."
return 1
fi
message=$(echo "${1}" "${NoncePour[${2}]}" | concatenate | rsa_chiffre_pour "${2}" )
echo ${message}
AlicenBobPrintf "${1}" "${2}" $(echo "${1}" "${NoncePour[${2}]}" | concatenate | rsa_chiffre_pour "${2}" )
1>&2 printf "nonce utilisé: ${NoncePour[${2}]}\n"
return 0
}
function ping_securise_message2 () {
local -a donnees
donnees=($(echo ${3} | rsa_dechiffre | projection ) )
if [ "${donnees[0]}" != "${2}" ] ; then
echo "émetteur annoncé: ${2}"
echo "émetteur réel: ${donnees[0]}"
return 1
fi
NoncePour[${2}]=$(fresh)
# message=$(echo "${donnees[1]}" "${nonce}" | concatenate | rsa_chiffre_pour "${2}" )
printf "émetteur: %s\nnonce : %s\nréponse (avec nonce ${NoncePour[${2}]}):\n" "${donnees[0]}" "${donnees[1]}"
AlicenBobPrintf "${1}" "${2}" $(echo "${donnees[1]}" "${NoncePour[${2}]}" | concatenate | rsa_chiffre_pour "${2}" )
echo "nonce renvoyé (à vérifier avant de conclure): ${nonce}"
return 0
}
function ping_securise_message3 () {
local -a donnees
donnees=($(echo $3 | rsa_dechiffre | projection ))
# IFS=":" read -ra donnees <<< "${clair}"
# IFS=" "
# echo "nonce renvoyé (à vérifier avant de répondre): ${donnees[0]}"
# if [ "${donnees[0]}" != "${NoncePour[${2}]}" ] ; then
# echo "le nonce n'est pas bon"
# return 1
# fi
# message=$(echo "${donnees[1]}" | rsa_chiffre_pour "${2}" )
AlicenBobPrintf "${1}" "${2}" $(echo "${donnees[1]}" | rsa_chiffre_pour "${2}" )
return 0
}
function ping_securise_conclusion () {
local donnees clair messsage
clair=$(echo $3 | rsa_dechiffre )
echo "nonce renvoyé par $2 (à vérifier avant de conclure): ${clair}"
# if [ "${clair}" != "${NoncePour[${2}]}" ] ; then
# echo "le nonce n'est pas bon"
# return 1
# fi
return 0
}
function ping_securise_initiateur () {
local id correspondant msg reponse
if [ -z "${my_id}" ] ; then
echo "Quelle est votre identité ?"
read -e my_id
fi
echo "Avec qui voulez-vous communiquer ?"
read -e correspondant
if [ ! -e "${correspondant}.pem" ] ; then
echo "Il faut d'abord récupérer sa clef publique."
else
if ! ping_securise_message1 "${my_id}" "${correspondant}" ; then
return 1
fi
echo "Quelle est la réponse ?"
read -a Msg
until ping_securise_message3 "${my_id}" "${correspondant}" "${Msg[4]}" ; do
echo -n "Erreur dans le message reçu. Entrez 'n' pour arrêter la session ?"
read reponse
if [ "${reponse}" = "n" ] ; then
return 1
fi
echo "Quelle est la réponse ?"
read -a Msg
done
fi
echo "Exécution avec ${correspondant} terminée avec succès."
return 0
}
function ping_securise_repondeur () {
local id correspondant reponse
declare -a Msg
echo "Quel message vous a été adressé ?"
read -a Msg
# Msg[0] : initiator, Msg[2]: moi , Msg[4] : message
if [ -z "${my_id}" ] ; then
my_id=${Msg[2]}
fi
if [ ! -e "${Msg[0]}.pem" ] ; then
echo "Il faut d'abord récupérer la clef publique de ${Msg[0]}."
return 1
fi
correspondant=${Msg[0]}
if [ "${Msg[2]}" != "${my_id}" ] ; then
echo "Ce message ne vous est pas adressé (ou changez votre identité)"
return 1
fi
echo "message recu: ||${Msg[4]}|| de ||${Msg[0]}||"
ping_securise_message2 "${Msg[2]}" "${Msg[0]}" "${Msg[4]}"
echo "Quelle est la réponse ?"
read -a Msg
until ping_securise_conclusion "${Msg[2]}" "${Msg[0]}" "${Msg[4]}" ; do
echo -n "Erreur dans le message reçu. Entrez 'n' pour arrêter la session: "
read reponse
if [ "${reponse}" = "n" ] ; then
return 1
fi
echo "Quelle est la réponse ?"
read -a Msg
done
echo "Exécution avec ${correspondant} terminée avec succès."
}
| true |
90335b5e67f4036914275c1ff1b35447e86fcefc | Shell | wmthu/paper_hard_to_measure_well | /code/git_hooks/setup_hooks.sh | UTF-8 | 476 | 3.1875 | 3 | [] | no_license | #!/bin/bash
set -euf -o pipefail
# Get the directory the setup_hooks.sh script is in:
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
DEST=$DIR/../../.git/hooks
# See directions in http://mirrors.ctan.org/macros/latex/contrib/gitinfo2/gitinfo2.pdf
cp -pi "$DIR/post-checkout" "$DEST/"
cp -pi "$DIR/post-commit" "$DEST/"
cp -pi "$DIR/post-merge" "$DEST/"
chmod +x "$DEST/post-checkout"
chmod +x "$DEST/post-commit"
chmod +x "$DEST/post-merge"
| true |
0f85748f19cb513f84bbbd100761570fe4dfc397 | Shell | AyazKhuraishi/springboot-keycloak-swagger | /service-orchestration/src/main/docker/press-release/run.sh | UTF-8 | 633 | 2.703125 | 3 | [] | no_license | #!/bin/sh
echo "********************************************************"
echo "Starting OAuth 2.0 Service on $OAUTH20_HOST:$OAUTH20_PORT"
echo "********************************************************"
while ! `nc -z $OAUTH20_HOST $OAUTH20_PORT`; do sleep 3; done
echo "******* The OAuth 2.0 Provider has started"
echo "********************************************************"
echo "Starting Press Release Service on $PRESS_RELEASE_HOST:$PRESS_RELEASE_PORT"
echo "********************************************************"
java -Dserver.port=$PRESS_RELEASE_PORT -jar /usr/share/service-orchestration/press-release-management.jar
| true |
a99198b0c1c188438f0347b07a52e34298f1add7 | Shell | ntarmos/eXO | /bin/run.sh | UTF-8 | 277 | 2.703125 | 3 | [] | no_license | #!/bin/sh
PROJECT_ROOT=`dirname $0`/..
EXOROOT=$PROJECT_ROOT
MAINCLASS=ceid.netcins.exo.Frontend
EXOCP=$EXOROOT/classes:$EXOROOT/jars/freepastry:$EXOROOT/jars/eXO
for jar in `find $EXOROOT/lib -type f -name '*.jar'`; do
EXOCP=$EXOCP:$jar
done
java -cp $EXOCP $MAINCLASS $*
| true |
8d82303cec5dfd436d54157e31a53f148677403f | Shell | sanjan/shell_tools | /hsbcscript.sh | UTF-8 | 1,062 | 2.921875 | 3 | [] | no_license | #!/bin/bash
file=$1
threads=/opt/mobileway/tmp/threads.txt
wrkdir=/opt/mobileway/tmp
#zgrep 'submit:\|submit_resp' /opt/mobileway/swiftmq/log/smppsvr_common99/smpp.log.2011-03-09.gz | sort -k3,3 | awk '{print $2" "$3" "$7}' | less
zgrep 'submit:\|submit_resp' $file | awk '{print $3}' | sort | uniq | tr -d "[]" > $threads
while read line
do
zgrep $line $file | grep 'submit:' > $wrkdir/$line\_log.txt
zgrep $line $file | grep 'submit:\|submit_resp' | sed 's/,/:/' | awk '{print $2}' | awk -F":" '{ hour = $1 *3600000; min = $2 * 60000; sec = $3 * 1000; sum = hour + min + sec + $4; print sum }' > $wrkdir/$line.txt
echo -n "" >$wrkdir/$line\_result.txt
value1=0
value2=0
while read value
do
while read submit
do
value2=$value
expr $value2 - $value1 >> $wrkdir/$line\_result.txt
value1=$value
done < $wrkdir/$line.txt
echo $line
echo max delay = `sort -nr $wrkdir/$line\_result.txt | head -2 | tail -1`
echo min delay = `sort -n $wrkdir/$line\_result.txt | head -1`
echo ""
done < $threads
| true |
c1a138ffa134b70e6974c20a56c7f86bcc88490c | Shell | Dc-cpu-arch/holberton-system_engineering-devops | /0x04-loops_conditions_and_parsing/3-until_holberton_school | UTF-8 | 126 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env bash
# prints Holberton School 10 times
i=0
until [ $i -eq 10 ]
do
echo "Holberton School"
i=$((i + 1))
done
| true |
58fc1b876b6ed208e977953990a2309068823fc1 | Shell | thomasdelhomenie/swf-scripts | /jenkins/translation/build-translation-branches.sh | UTF-8 | 3,637 | 3.046875 | 3 | [] | no_license | #!/bin/bash -eu
# Script to build Translation branches:
# * X-x.x-translation
# * X-x.x-translation-jipt
set -e
mkdir -p ${WORKSPACE}/sources
arr=("gatein-portal" "platform-ui" "commons" "ecms" "social" "wiki" "forum" "calendar" "integration" "platform")
for project in "${arr[@]}"
do
echo "Clone git repo $project of origin repository"
cd ${WORKSPACE}/sources && /usr/bin/git clone git@github.com:exodev/$project.git
cd ${WORKSPACE}/sources/$project && /usr/bin/git checkout -b ${GIT_PLATFORM_SOURCE_BRANCH} origin/${GIT_PLATFORM_SOURCE_BRANCH}
done
# distributions projects in exoplatform
arr=("platform-public-distributions" "platform-private-distributions")
for project in "${arr[@]}"
do
echo "Clone git repo $project of origin repository"
cd ${WORKSPACE}/sources && /usr/bin/git clone git@github.com:exoplatform/$project.git
cd ${WORKSPACE}/sources/$project && /usr/bin/git checkout -b ${GIT_PLATFORM_SOURCE_BRANCH} origin/${GIT_PLATFORM_SOURCE_BRANCH}
done
SEP="`echo | tr '\n' '\001'`"
replaceInPom(){
find ${WORKSPACE}/sources -name pom.xml -not -wholename "*/target/*" -exec sed -i "s${SEP}$1${SEP}$2${SEP}g" {} \;
}
replaceInPom "<version>${GATEIN_VERSION_IN_GIT}</version>" "<version>${GATEIN_VERSION_TO_BUILD}</version>"
replaceInPom "<version>${PLATFORM_VERSION_IN_GIT}</version>" "<version>${PLATFORM_VERSION_TO_BUILD}</version>"
replaceInPom "<org.gatein.portal.version>${GATEIN_VERSION_IN_GIT}</org.gatein.portal.version>" "<org.gatein.portal.version>${GATEIN_VERSION_TO_BUILD}</org.gatein.portal.version>"
replaceInPom "<org.exoplatform.platform-ui.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.platform-ui.version>" "<org.exoplatform.platform-ui.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.platform-ui.version>"
replaceInPom "<org.exoplatform.commons.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.commons.version>" "<org.exoplatform.commons.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.commons.version>"
replaceInPom "<org.exoplatform.ecms.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.ecms.version>" "<org.exoplatform.ecms.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.ecms.version>"
replaceInPom "<org.exoplatform.social.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.social.version>" "<org.exoplatform.social.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.social.version>"
replaceInPom "<org.exoplatform.wiki.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.wiki.version>" "<org.exoplatform.wiki.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.wiki.version>"
replaceInPom "<org.exoplatform.forum.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.forum.version>" "<org.exoplatform.forum.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.forum.version>"
replaceInPom "<org.exoplatform.calendar.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.calendar.version>" "<org.exoplatform.calendar.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.calendar.version>"
replaceInPom "<org.exoplatform.integ.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.integ.version>" "<org.exoplatform.integ.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.integ.version>"
replaceInPom "<org.exoplatform.platform.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.platform.version>" "<org.exoplatform.platform.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.platform.version>"
replaceInPom "<org.exoplatform.platform.distributions.version>${PLATFORM_VERSION_IN_GIT}</org.exoplatform.platform.distributions.version>" "<org.exoplatform.platform.distributions.version>${PLATFORM_VERSION_TO_BUILD}</org.exoplatform.platform.distributions.version>"
| true |
96952ae49eb4afe125570fe52a8d701e0ed7f99f | Shell | platrum/module-bundler | /bundle.sh | UTF-8 | 208 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
docker run --rm \
-v $SOURCE_DIR:/app/src \
-v $TARGET_DIR:/assets \
-w /app \
webpack-module-bundle ${@:2}
| true |
89b01d3aa7a6fbede705a9a978a106bff133cf69 | Shell | LiamBao/TextEngineExample | /bin/te_mergeindex.sh | UTF-8 | 477 | 2.65625 | 3 | [] | no_license | LIB="../trunk/lib"
TE_LIB="../trunk/dist/te.jar"
CORE_LIB="../../CICCore/trunk/dist/ciccore.jar"
TECONF="../conf"
HADOOPCONF="../hadoop_conf"
TARGET_INDEX="/home/textd/data/IndexRepo_2007"
SRC_INDEX="/home/textd/data/IndexRepo_history/"
for f in `find $LIB -type f -name "*.jar"`
do
CLASSPATH=$CLASSPATH:$f
done
CLASSPATH=$CLASSPATH:$TE_LIB:$CORE_LIB:$TECONF:$HADOOPCONF
java -Xmx512m -cp $CLASSPATH com.cic.textengine.IndexMerger $TARGET_INDEX $1 $2 $3 $4 $5 $6 $7 $8 $9
| true |
188b259fe7fa5f55576c2295702719c6ad91a160 | Shell | sabrehagen/dotfiles-i3 | /.config/i3/move-to-next-window-of-type.sh | UTF-8 | 823 | 3.25 | 3 | [] | no_license | WINDOW_CLASS=$1
CURRENT_WORKSPACE=$(i3-msg -t get_workspaces | jq '.[] | select(.focused==true).num')
PREVIOUS_WORKSPACES=$(i3-msg -t get_workspaces | jq '.[].num' | sort -n | awk -v current=$CURRENT_WORKSPACE '$1 < current')
NEXT_WORKSPACES=$(i3-msg -t get_workspaces | jq '.[].num' | sort -n | awk -v current=$CURRENT_WORKSPACE '$1 > current')
WORKSPACE_NUMBERS="$NEXT_WORKSPACES $PREVIOUS_WORKSPACES $CURRENT_WORKSPACE"
FOCUSED_WINDOW=$(xdotool getactivewindow 2>/dev/null || echo no-focused-window)
for WORKSPACE_NUMBER in $WORKSPACE_NUMBERS; do
MATCHING_WORKSPACE_WINDOW=$(xdotool search --desktop $(( $WORKSPACE_NUMBER - 1 )) --class $WINDOW_CLASS | grep -v $FOCUSED_WINDOW | head -1)
if [ ! -z "$MATCHING_WORKSPACE_WINDOW" ]; then
xdotool windowactivate $MATCHING_WORKSPACE_WINDOW
break
fi
done
| true |
df40441f9201fe1cc7f74ede5c2dfde79a10d67a | Shell | tracyvierra/vigilant-pancake | /bash scripting/scripts/temp_convert.sh | UTF-8 | 661 | 3.921875 | 4 | [] | no_license | #!/bin/bash
# Author: Tracy Vierra
# Date Created: 2/8/2022
# Date Modified: 2/8/2022
# Description:
# Usage:
# ./temp_convert.sh <unit> <temp>
while getopts "c:f:" opt; do
case "$opt" in
c) # convert from celsius to farenheit
result=$(echo "scale=2; ($OPTARG * (9 / 5)) + 32" | bc)
;;
f) # convert from fahrenheit to celsius
result=$(echo "scale=2; ($OPTARG - 32) * (5/9)" | bc)
;;
\?)
Echo "Invalid option provided"
;;
esac
echo "$result"
donewhile getopts "f:c:" opt; do
case "$opt" in
f)
echo "fahrenheit"
;;
c)
echo "celsius"
;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
| true |
abc299aa73e2142400fe29e4cb3765df7d898fd3 | Shell | toddgator/tiberius | /library/rhel7/_hold/S30-real-time-updates.sh | UTF-8 | 1,725 | 3.015625 | 3 | [] | no_license | #!/bin/bash
# 201402071200
##
# pps_real_time_updates configuration
##
export APP_NAME=$(basename $0 | sed "s/\.sh$//g" | sed "s/^S..-//g")
##
# Memory Settings
##
##
# SDI application configuration
##
echo "$(grep JVM /opt/sdi/sdi_service_scripts/templates/sdi.${APP_NAME}.conf)
PORT=$(cat /opt/sdi/sdi_service_scripts/supplemental/ports.txt | grep ${APP_NAME} | grep ${ENVIRONMENT} | grep -v "^#" | awk ' { print $3 } ')
DEBUGPORT=$(cat /opt/sdi/sdi_service_scripts/supplemental/ports.txt | grep ${APP_NAME} | grep ${ENVIRONMENT} | grep -v "^#" | awk ' { print $5 } ')
JMXPORT=$(cat /opt/sdi/sdi_service_scripts/supplemental/ports.txt | grep ${APP_NAME} | grep ${ENVIRONMENT} | grep -v "^#" | awk ' { print $4 } ')
# URL where the current production build lives
$(grep URL /opt/sdi/sdi_service_scripts/templates/sdi.${APP_NAME}.conf)
BASEDIR=/opt
APP_NAME=${APP_NAME}
" > /etc/sdi/sdi.${APP_NAME}.conf
. /etc/sdi/sdi.${APP_NAME}.conf
ln -s /opt/sdi/sdi_service_scripts/init/sdi.${APP_NAME} /etc/init.d/sdi.${APP_NAME}
. /etc/credentials/jenkins.credfile
ARCHIVE=real-time-updates-1.0.0.jar
## Create directories for logs.thig.com to samba into
## Tested working on 11.26.13 CMH
useradd -d "/opt/${APP_NAME}" "${APP_NAME}"
mkdir -p "/opt/${APP_NAME}/logs" "/opt/${APP_NAME}/bin"
cd "/opt/${APP_NAME}/bin"
wget -q --user=${JENKINS_USER} --password=${JENKINS_PASSWORD} -O "${ARCHIVE}" "${APP_URL}"
mkdir /var/log/${APP_NAME}
grep "/var/log/${APP_NAME}" /etc/fstab || echo "/opt/${APP_NAME}/logs /var/log/${APP_NAME} bind defaults,bind 0 0" >> /etc/fstab
chown -R ${APP_NAME}:${APP_NAME} /opt/${APP_NAME}/
chmod 755 /opt/${APP_NAME}
chkconfig sdi.real-time-updates on
| true |
0b3046ad2bcf8e5cfcae5aed269404346db8cb15 | Shell | mhus/mhus-docker | /pi-hole/debian-root/etc/cont-init.d/20-start.sh | UTF-8 | 1,593 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/with-contenv bash
set -e
bashCmd='bash -e'
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
set -x ;
bashCmd='bash -e -x'
fi
if [ "${PH_NAMESERVER:-0}" != 0 ] ; then
echo nameserver ${PH_NAMESERVER} > /etc/resolv.conf
fi
$bashCmd /start.sh
# Gotta go fast, no time for gravity
if [ -n "$PYTEST" ]; then
sed -i 's/^gravity_spinup$/#gravity_spinup # DISABLED FOR PYTEST/g' "$(which gravity.sh)"
fi
gravityDBfile="/etc/pihole/gravity.db"
config_file="/etc/pihole/pihole-FTL.conf"
# make a point to mention which config file we're checking, as breadcrumb to revisit if/when pihole-FTL.conf is succeeded by TOML
echo " Checking if custom gravity.db is set in ${config_file}"
if [[ -f "${config_file}" ]]; then
gravityDBfile="$(grep --color=never -Po "^GRAVITYDB=\K.*" "${config_file}" 2> /dev/null || echo "/etc/pihole/gravity.db")"
fi
if [ -z "$SKIPGRAVITYONBOOT" ] || [ ! -e "${gravityDBfile}" ]; then
if [ -n "$SKIPGRAVITYONBOOT" ];then
echo " SKIPGRAVITYONBOOT is set, however ${gravityDBfile} does not exist (Likely due to a fresh volume). This is a required file for Pi-hole to operate."
echo " Ignoring SKIPGRAVITYONBOOT on this occaision."
fi
echo '@reboot root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole updateGravity >/var/log/pihole_updateGravity.log || cat /var/log/pihole_updateGravity.log' > /etc/cron.d/gravity-on-boot
else
echo " Skipping Gravity Database Update."
[ ! -e /etc/cron.d/gravity-on-boot ] || rm /etc/cron.d/gravity-on-boot &>/dev/null
fi
pihole -v
echo " Container tag is: ${PIHOLE_DOCKER_TAG}" | true |
955e41a6f982b05ff70f15748dd253426d3fa721 | Shell | joseph-long/dotfiles | /setup_workspace.sh | UTF-8 | 3,789 | 3.40625 | 3 | [] | no_license | #!/bin/bash
# Usage: curl -OL [...]/raw/setup_workspace.sh && bash setup_workspace.sh
# or: BASEDIR=/groups/jrmales/josephlong bash setup_workspace.sh
source ~/.profile
set -xo pipefail
# cd
OSTYPE=$(uname)
case "$OSTYPE" in
Darwin) platform=MacOSX ;;
Linux) platform=Linux ;;
*) exit 1 ;;
esac
# Based on Linux convention
# https://unix.stackexchange.com/questions/316765/which-distributions-have-home-local-bin-in-path
source paths.sh
cd $BASEDIR
./setup_dotfiles.sh
if [[ $platform == "MacOSX" ]]; then
if ! [ -x "$(command -v brew)" ]; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
fi
if ! [ -x "$(command -v code)" ]; then
brew install --cask signal
fi
if ! [ -x "$(command -v code)" ]; then
brew install --cask visual-studio-code
fi
if ! [ -e "/Applications/Firefox.app" ]; then
brew install --cask firefox
fi
if ! [ -e "/Applications/iTerm.app" ]; then
brew install --cask iterm2
fi
if ! [ -e "/Applications/Transmission.app" ]; then
brew install --cask transmission
fi
if ! [ -e "/Applications/Slack.app" ]; then
brew install --cask slack
fi
if ! [ -e "/Applications/Utilities/XQuartz.app" ]; then
brew install --cask xquartz
fi
if ! [ -e "/Applications/Spotify.app" ]; then
brew install --cask spotify
fi
if ! [ -d "/Applications/TeX" ]; then
brew install --cask mactex
fi
if ! [ -e "/Applications/Zoom.app" ]; then
brew install --cask zoom
fi
fi
if [[ $platform == "Linux" && ${XDG_SESSION_TYPE:-0} == x11 ]]; then
cd Downloads
# Vagrant
if ! [ -x "$(command -v vagrant)" ]; then
curl -OL https://releases.hashicorp.com/vagrant/2.2.5/vagrant_2.2.5_x86_64.deb
sudo dpkg -i vagrant_2.2.5_x86_64.deb
fi
# Slack
if ! [ -x "$(command -v slack)" ]; then
curl -OL https://downloads.slack-edge.com/linux_releases/slack-desktop-4.0.1-amd64.deb
sudo dpkg -i slack-desktop-4.0.1-amd64.deb
sudo apt --fix-broken install -y
fi
# VSCode
if ! [ -x "$(command -v code)" ]; then
curl -L https://go.microsoft.com/fwlink/?LinkID=760868 > vscode.deb
sudo dpkg -i vscode.deb
fi
# Spotify
if ! [ -x "$(command -v spotify)" ]; then
curl -sS https://download.spotify.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb http://repository.spotify.com stable non-free" | sudo tee /etc/apt/sources.list.d/spotify.list
sudo apt-get update
sudo apt-get install -y spotify-client
fi
# Docker
if ! [ -x "$(command -v docker)" ]; then
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install -y docker-ce docker-ce-cli containerd.io
fi
# yarn
if ! [ -x "$(command -v yarn)" ]; then
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
sudo apt-get update && sudo apt-get install -y yarn
fi
cd
fi
if [[ ! -e $BASEDIR/mambaforge ]]; then
curl -L -O https://github.com/conda-forge/miniforge/releases/latest/download/Mambaforge-$(uname)-$(uname -m).sh
bash Mambaforge-$(uname)-$(uname -m).sh -b -p $BASEDIR/mambaforge
fi
| true |
fa24d4f24e1473ef0eb9242d09624b5ea527b606 | Shell | phebous/promscale-bosh-addon-release | /jobs/promscale/templates/promscale-ctl.erb.old | UTF-8 | 928 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
RUN_DIR=/var/vcap/sys/run/promscale
LOG_DIR=/var/vcap/sys/log/promscale
PIDFILE=${RUN_DIR}/pid
case $1 in
start)
mkdir -p $RUN_DIR $LOG_DIR
chown -R vcap:vcap $RUN_DIR $LOG_DIR
cd /var/vcap/packages/promscale/bin
/var/vcap/packages/promscale/bin/promscale -db-host <%= p('promscale.db-host') %> \
-db-port <%= p('promscale.db-port') %> \
-db-name <%= p('promscale.db-name') %> \
-db-password <%= p('promscale.db-password') %> \
-db-user <%= p('promscale.db-user') %> \
-leader-election-pg-advisory-lock-id <%= p('promscale.advisory-lock-id') %> \
-leader-election-pg-advisory-lock-prometheus-timeout <%= p('promscale.prometheus-timeout') %> \
>> $LOG_DIR/promscale.stdout.log &
echo $! > $PIDFILE
;;
stop)
kill -9 `cat $PIDFILE`
rm -f $PIDFILE
;;
*)
echo "Usage: ctl {start|stop}"
;;
esac
| true |
0a53398ad80ba5b9630f24884454d58d559b1c6e | Shell | dettrace/dettrace | /test/samplePrograms/procdump.sh | UTF-8 | 179 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# set -eEuo pipefail
for f in `ls /proc`; do
echo -n "$f: ";
if [ -f "/proc/$f" ];
then
cat "/proc/$f" | md5sum
else
echo "not a file"
fi
done
| true |
eb3afe7a8ff7ad045d64c802b7d7bd10e8b14e3f | Shell | wzzrd/ocp36-azure-simple | /deploy.sh | UTF-8 | 4,443 | 4.125 | 4 | [] | no_license | #!/bin/bash
# Shell script to deploy OpenShift 3.6 on Microsoft Azure
# Magnus Glantz, sudo@redhat.com, 2017
OK=0
if [ -f ./deploy.cfg ]; then
. ./deploy.cfg
if test -z $RHN_ACCOUNT; then
OK=1
elif test -z $OCP_USER; then
OK=1
elif test -z $OCP_PASSWORD; then
OK=1
elif test -z $SUBSCRIPTION_POOL; then
OK=1
elif test -z $LOCATION; then
OK=1
elif test -z $RHN_PASSWORD; then
echo "Please type your red hat password, finish with [enter]:"
read -s
RHN_PASSWORD=$REPLY
fi
else
OK=1
fi
if [ "$OK" -eq 1 ]; then
echo "Missing variable values: Edit the deploy.cfg file"
exit 1
fi
if [ -f ~/.ssh/id_rsa.pub ]; then
if grep "PUBLIC_SSH_KEY" azuredeploy.parameters.json >/dev/null; then
PUBLIC_SSH_KEY="$(cat ~/.ssh/id_rsa.pub)"
echo "Your public key at ~/.ssh/id_rsa.pub is:"
echo "$PUBLIC_SSH_KEY"
fi
else
echo "No SSH key found in ~/.ssh/id_rsa.pub. Generating key."
ssh-keygen
PUBLIC_SSH_KEY="$(cat ~/.ssh/id_rsa.pub)"
echo "Your key is:"
echo "$PUBLIC_SSH_KEY"
fi
if test -n "$PUBLIC_SSH_KEY"; then
echo "Do you want to use this key to access your azure VMs? (y) :"
read
if [ "$REPLY" == "y" ]; then
awk -v key="$PUBLIC_SSH_KEY" '{sub(/PUBLIC_SSH_KEY/,key)}1' azuredeploy.parameters.json >azuredeploy.parameters.json.new
mv azuredeploy.parameters.json.new azuredeploy.parameters.json
else
echo "Edit azuredeploy.parameters.json and paste your public ssh key into the value for sshPublicKey."
exit 1
fi
fi
# Assign first argument to be Azure Resource Group
GROUP=$1
# Test group variable
if test -z $GROUP; then
echo "Usuage: $0 <unique name for Azure resource group>"
exit 1
else
if dig ${GROUP}master.${LOCATION}.cloudapp.azure.com|grep -v ";"|grep "IN A"|awk '{ print $5 }'|grep [0-9] >/dev/null; then
echo "Error: ${GROUP}master.${LOCATION}.cloudapp.azure.com already exists. Select other name than $GROUP."
exit 1
fi
cat azuredeploy.parameters.json|sed -e "s/REPLACE/$GROUP/g" -e "s/RHN_ACCOUNT/$RHN_ACCOUNT/" -e "s/RHN_PASSWORD/$RHN_PASSWORD/" -e "s/OCP_USER/$OCP_USER/" -e "s/OCP_PASSWORD/$OCP_PASSWORD/" -e "s/SUBSCRIPTION_POOL_ID/$SUBSCRIPTION_POOL/" >azuredeploy.parameters.json.new
mv azuredeploy.parameters.json.new azuredeploy.parameters.json
fi
echo "Deploying OpenShift Container Platform."
# Create Azure Resource Group
azure group create $GROUP $LOCATION
# Create Keyvault in which we put our SSH private key
azure keyvault create -u ${GROUP}KeyVaultName -g $GROUP -l $LOCATION
# Put SSH private key in key vault
azure keyvault secret set -u ${GROUP}KeyVaultName -s ${GROUP}SecretName --file ~/.ssh/id_rsa
# Enable key vault to be used for deployment
azure keyvault set-policy -u ${GROUP}KeyVaultName --enabled-for-template-deployment true
# Launch deployment of cluster, after this it’s just waiting for it to complete.
# azuredeploy.parameters.json needs to be populated with valid values first, before you run this.
azure group deployment create --name ${GROUP} --template-file azuredeploy.json -e azuredeploy.parameters.json --resource-group $GROUP --nowait
cat azuredeploy.parameters.json|sed -e "s/$GROUP/REPLACE/g" -e "s/$RHN_ACCOUNT/RHN_ACCOUNT/" -e "s/$RHN_PASSWORD/RHN_PASSWORD/" -e "s/$OCP_USER/OCP_USER/" -e "s/$OCP_PASSWORD/OCP_PASSWORD/" -e "s/$SUBSCRIPTION_POOL/SUBSCRIPTION_POOL_ID/" >azuredeploy.parameters.json.new
mv azuredeploy.parameters.json.new azuredeploy.parameters.json
echo
echo "Deployment initiated. Allow 40-50 minutes for a deployment to succeed."
echo "The cluster will be reachable at https://${GROUP}master.${LOCATION}.cloudapp.azure.com:8443"
echo
echo "Waiting for Bastion host IP to get allocated."
while true; do
if azure network public-ip show $GROUP bastionpublicip|grep "IP Address"|cut -d':' -f3|grep [0-9] >/dev/null; then
break
else
sleep 5
fi
done
echo "You can SSH into the cluster by accessing it's bastion host: ssh $(azure network public-ip show $GROUP bastionpublicip|grep "IP Address"|cut -d':' -f3|grep [0-9]|sed 's/ //g')"
echo "Once your SSH key has been distributed to all nodes, you can then jump passwordless from the bastion host to all nodes."
echo "To SSH directly to the master, use port 2200: ssh ${GROUP}master.${LOCATION}.cloudapp.azure.com -p 2200"
echo "For troubleshooting, check out /var/lib/waagent/custom-script/download/[0-1]/stdout or stderr on the nodes"
| true |
deffcf68db68cadbc435bb2f01a4d4a259b5cf2e | Shell | michaelcunningham/oracledba | /adhoc/gather_stats_npdb530.sh | UTF-8 | 2,001 | 3.0625 | 3 | [] | no_license | #!/bin/sh
target_server=npdb530
this_server=`uname -n | cut -f1 -d.`
if [ "$this_server" != "$target_server" ]
then
echo "You are trying to run this script on the wrong server."
echo "It is intended to only run on the "$target_server" server."
exit
fi
log_date=`date +%a`
adhoc_dir=/dba/adhoc
log_file=$adhoc_dir/log/gather_stats_$target_server.log
echo "Gather stats started on "$target_server" at "`date`"." > $log_file
echo >> $log_file
echo " Gather stats for apex : "`date`"." >> $log_file
/dba/admin/gather_sys_stats.sh apex
/dba/admin/gather_schema_stats_auto_degree_8.sh apex tdce
/dba/admin/gather_schema_stats_auto_degree_8.sh apex dmmaster
echo " Starting gather stats for itqa : "`date`"." >> $log_file
/dba/admin/gather_schema_stats_100.sh itqa inforepqa
/dba/admin/gather_schema_stats_100.sh itqa inforepqa_srv
echo " Starting gather stats for tdccpy : "`date`"." >> $log_file
/dba/admin/gather_schema_stats_100.sh tdccpy ignite43
/dba/admin/gather_schema_stats_100.sh tdccpy novaprd
/dba/admin/gather_schema_stats_100.sh tdccpy npic
/dba/admin/gather_schema_stats_100.sh tdccpy rein
/dba/admin/gather_schema_stats_100.sh tdccpy security
/dba/admin/gather_schema_stats_100.sh tdccpy tdcglobal
/dba/admin/gather_schema_stats_100.sh tdccpy vistaprd
echo " Starting gather stats for tdcdv3 : "`date`"." >> $log_file
/dba/admin/gather_schema_stats_100.sh tdcdv3 fpicusr
/dba/admin/gather_schema_stats_100.sh tdcdv3 ignite43
# /dba/admin/gather_schema_stats_100.sh tdcdv3 novaprd
/dba/admin/gather_schema_stats_100.sh tdcdv3 security
/dba/admin/gather_schema_stats_100.sh tdcdv3 tdcglobal
/dba/admin/gather_schema_stats_100.sh tdcdv3 vistaprd
echo >> $log_file
echo "Gather stats finished on "$target_server" at "`date`"." >> $log_file
echo '' >> $log_file
echo '' >> $log_file
echo 'This report created by : '$0' '$* >> $log_file
#mail -s "Database statistics for "$target_server mcunningham@thedoctors.com < $log_file
#mail -s "Database statistics for "$target_server swahby@thedoctors.com < $log_file
| true |
faff9bcf61c3723d1ed3c6476a26571f65214a67 | Shell | josephvoss/dotfiles | /.local/bin/wal-set | UTF-8 | 635 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Script to set colors generated by 'wal'
# https://github.com/dylanaraps/wal
# Source generated colors.
export HOME=/home/jvoss14
source "$HOME/.cache/wal/colors.sh"
reload_dunst() {
pkill dunst
dunst \
-lb "${color0:-#F0F0F0}" \
-nb "${color0:-#F0F0F0}" \
-cb "${color0:-#F0F0F0}" \
-lf "${color15:=#000000}" \
-bf "${color15:=#000000}" \
-cf "${color15:=#000000}" \
-nf "${color15:=#000000}" \
-fn "${DUNST_FONT:=Roboto Mono for Powerline 10}" \
-geometry "${DUNST_SIZE:=300x50+0+27}" &
}
main() {
reload_dunst &
}
main
| true |
1e96c8b0ae8d63fa2b77ae76ceaaf032d1397dba | Shell | doronbehar/msmtpq | /msmtpq | UTF-8 | 10,645 | 3.859375 | 4 | [] | no_license | #!/bin/sh
set -e
config_dir="${XDG_CONFIG_DIR:-${HOME}/.config}"/"$(basename "$0")"
config_file="${MSMTPQ_CONFIG:-${config_dir}/default.conf}"
function _usage {
local options=(
'-c,--config <FILE>'
'-l,--list'
'-r,--run'
'-v,--verbose'
'-n,--dry-run'
'-h,--help'
)
local descriptions=(
"Use the given configuration file instead of the default one (${config_file}), can also be set with \`\$MSMTPQ_CONFIG\`"
"List all queued emails. If used with \`--verbose\`, the full emails are printed"
"Try to send all the emails in the queue"
"Turn on verbose output"
"don't store emails in queue but show only what actions would have been taken if $(basename "$0") would have run normally (with or without --run), implies --verbose"
"display help"
)
# The offset options will get
local options_offset=3
# The offset descriptions will get after the longest option
local descriptions_offset_after_longest_option=5
# The maximum length of descriptions spanning
local maximum_descriptions_length=80
# First we print the classic Usage message
echo "Usage: $(basename "$0") [OPTION]..."
# In the next loop, we put in ${max_option_length} the length of the
# longest option. This way, we'll be able to calculate the offset when
# printing long descriptions that should span over several lines.
local max_option_length=1
for (( i = 0; i < ${#options[@]}; i++)); do
if [[ $max_option_length -lt ${#options[$i]} ]]; then
max_option_length=${#options[$i]}
fi
done
# We put in the following variable the total offset of descriptions
# after new-lines.
local descriptions_new_line_offset=$((${max_option_length} + ${options_offset} + ${descriptions_offset_after_longest_option}))
# The next loop is the main loop where we actually print the options with
# the corresponding descriptions.
for (( i = 0; i < ${#options[@]}; i++)); do
# First, we print the option and the offset we chose above right before it
printf -- '%*s' ${options_offset}
printf -- '%s' "${options[$i]}"
# Here we start tracking through out this loop the current index of the
# char on the terminal window. This is necessary because in the process
# of printing the descriptions' words we'll be able to know how not to
# span over the defined maximum length or not to split words when
# hitting ${COLUMNS}
local current_char_index=$((${options_offset} + ${#options[$i]}))
# We calculate the offset which should be given between the current
# option and the start of it's description. This is different for every
# option because every option has a different length but they all must
# be aligned according to the longest option's length and the offsets
# we chose above
local current_description_offset=$((${max_option_length} - ${#options[$i]} + ${descriptions_offset_after_longest_option}))
# We print this offset before printing the description
printf -- '%*s' ${current_description_offset}
# Updating the current_char_index
current_char_index=$((${current_char_index} + ${current_description_offset}))
# We put in a temporary variable the current description from the array
local current_description="${descriptions[$i]}"
# We divide the current_description to an array with the description's
# words as the array's elements. This is necessary so we can print the
# description without spliting words
IFS=' ' read -r -a description_words <<< "${current_description}"
# We start a loop for every word in the descriptions words array
for (( j = 0; j < ${#description_words[@]}; j++)); do
# We update the current char index before actually printing the
# next word in the description because of the condition right
# afterwards
current_char_index=$((${current_char_index} + ${#description_words[$j]} + 1))
# We check if the index we will reach will hit the maximum limit we
# chose in the beginning or the number of ${COLUMNS} our terminal
# gives us
if [[ ${current_char_index} -le ${COLUMNS} ]] && [[ ${current_char_index} -le ${maximum_descriptions_length} ]]; then
# If we don't hit our limit, print the current word
printf -- '%s ' ${description_words[$j]}
else
# If we've hit our limit, print a new line
printf -- '\n'
# Print a number of spaces equals to the offset we need to give
# according to longest option we have and the other offsets we
# defined above
printf -- '%*s' ${descriptions_new_line_offset}
# print the next word in the new line
printf -- '%s ' ${description_words[$j]}
# Update the current char index
current_char_index=$((${descriptions_new_line_offset} + ${#description_words[$j]}))
fi
done
# print a new line between every option and it's description
printf '\n'
done
}
function _list {
cd "${QUEUEDIR}"
if [[ $? != 0 ]]; then
echo "Couldn't cd into queue dir (${QUEUEDIR})" > /dev/stderr
exit 4
fi
set +e
for i in *.mail; do
printf -v line '%*s' "${#i}"
echo ${line// /-}
echo $i
echo ${line// /-}
if [[ $verbose == "true" ]]; then
cat "$i"
else
egrep -s --color=auto -h '(^From:|^To:|^Subject:)' "$i"
if [[ $? != 0 ]]; then
cat "$i"
fi
fi
done
set -e
}
function _run {
if [[ -f ${LOCKFILE} ]]; then
cat <<EOM
Cannot use queue dir (${QUEUEDIR}) since it contains a lockfile
If you are sure that no other instance of this script is
running, then delete the lock file.
EOM
exit 4
fi
touch ${LOCKFILE}
if [[ $? != 0 ]]; then
echo "couldn't create a lockfile (${LOCKFILE})" > /dev/stderr
exit 4
fi
cd "${QUEUEDIR}"
if [[ $? != 0 ]]; then
echo "Couldn't cd into queue dir (${QUEUEDIR})" > /dev/stderr
exit 4
fi
set +e
for MAILFILE in *.mail; do
MSMTPFILE="$(echo $MAILFILE | sed -e 's/mail/msmtp/')"
if [ ! -f "$MSMTPFILE" ]; then
echo "No corresponding file $MSMTPFILE found" > /dev/stderr
continue
fi
echo "*** Sending $MAILFILE to `sed -e 's/^.*-- \(.*$\)/\1/' $MSMTPFILE` ..."
msmtp $(cat "$MSMTPFILE") < "$MAILFILE"
if [ $? -eq 0 ]; then
rm "$MAILFILE" "$MSMTPFILE"
echo "$MAILFILE sent successfully"
else
echo "FAILURE"
fi
done
rm -f "$LOCKFILE"
set -e
}
function _enqueue {
cd "${QUEUEDIR}"
if [[ $? != 0 ]]; then
echo "Couldn't cd into queue dir (${QUEUEDIR})" > /dev/stderr
exit 4
fi
umask "${UMASK}"
# Create new unique filenames of the form
# MAILFILE: ccyy-mm-dd-hh.mm.ss[-x].mail
# MSMTPFILE: ccyy-mm-dd-hh.mm.ss[-x].msmtp
# where x is a consecutive number only appended if you send more than one
# mail per second.
BASE="$(date +${DATEFORMAT})"
if [ -f "$BASE.mail" -o -f "$BASE.msmtp" ]; then
TMP="$BASE"
i=1
while [ -f "$TMP-$i.mail" -o -f "$TMP-$i.msmtp" ]; do
i=`expr $i + 1`
done
BASE="$BASE-$i"
fi
MAILFILE="$BASE.mail"
MSMTPFILE="$BASE.msmtp"
# Write command line to $MSMTPFILE
echo "$@" > "$MSMTPFILE" || exit 1
# Write the mail to $MAILFILE
cat > "$MAILFILE" || exit 1
}
action=enqueue
while [[ $# -gt 0 ]]; do
key="$1"
case "$key" in
"--config"|"-c")
if [[ -f "$2" ]]; then
config_file="$2"
elif [[ -f "${config_dir}"/"$2".conf ]]; then
config_file="${config_dir}"/"$2".conf
else
echo given configuration file $2 not found > /dev/stderr
exit 1
fi
shift 2
continue
;;
"--list"|"-l")
if [[ "${action}" == "enqueue" ]]; then
action=list
else
echo "Please don't specify multiple actions together" > /dev/stderr
exit 2
fi
shift
continue
;;
"--run"|"-r")
if [[ "${action}" == "enqueue" ]]; then
action=run
else
echo "Please don't specify multiple actions together" > /dev/stderr
exit 2
fi
shift
continue
;;
"--verbose"|"-v")
verbose=true
shift
continue
;;
"--dry-run"|"-n")
dry_run=true
shift
continue
;;
"--help"|"-h")
action=usage
exit_code=0
shift
break
;;
*)
break
;;
esac
done
if [[ -f "${config_file}" ]]; then
source "${config_file}"
if [[ $? != 0 ]]; then
echo "there appeared to be some errors when sourcing the configuration file (${config_file}), exiting" > /dev/stderr
exit 3
fi
fi
if [[ -z ${QUEUEDIR} ]]; then
QUEUEDIR="${HOME}/.msmtpq"
if [[ $verbose == "true" ]]; then
echo "QUEUEDIR is not set in configuration file or environment, using default (${QUEUEDIR})" > /dev/stderr
fi
fi
if [[ -z ${DATEFORMAT} ]]; then
DATEFORMAT="%Y-%m-%d-%H.%M.%S"
if [[ $verbose == "true" ]]; then
echo "DATEFORMAT is not set in configuration file or environment, using default (${DATEFORMAT})" > /dev/stderr
fi
fi
if [[ -z ${LOCKFILE} ]]; then
LOCKFILE="${QUEUEDIR}/.lock"
if [[ $verbose == "true" ]]; then
echo "LOCKFILE is not set in configuration file or environment, using default (${LOCKFILE})" > /dev/stderr
fi
fi
if [[ -z ${UMASK} ]]; then
UMASK=077
if [[ $verbose == "true" ]]; then
echo "UMASK is not set in configuration file or environment, using default (${UMASK})" > /dev/stderr
fi
fi
case "${action}" in
usage)
_usage
exit ${exit_code}
;;
list)
_list
;;
run)
_run
;;
enqueue)
_enqueue "$@"
;;
esac
| true |
cdd2f754020d08458c571612a45b6203bd0d2012 | Shell | uvt/blackarch | /packages/libowfat/PKGBUILD | UTF-8 | 555 | 2.65625 | 3 | [] | no_license | pkgname=libowfat
pkgver=0.29
pkgrel=2
pkgdesc="GPL reimplementation of libdjb"
arch=(i686 x86_64)
url=http://www.fefe.de/libowfat/
makedepends=(dietlibc)
license=(GPL)
install=libowfat.install
source=(http://dl.fefe.de/$pkgname-$pkgver.tar.bz2)
md5sums=('1187c6acf11429e7adb9ebe180f644bb')
build() {
cd $srcdir/$pkgname-$pkgver
DIET= make
}
package() {
cd $srcdir/$pkgname-$pkgver
DIET= make prefix=$pkgdir/usr/ LIBDIR=$pkgdir/usr/lib MAN3DIR=$pkgdir/usr/share/man/man3 install
cd $pkgdir/usr/share/man/man3
mv buffer.3 buffer-libowfat.3
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.