blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
277fd954b2c71b7154aaf8072bc0d6441cabeb8b | Shell | hfujikawa/Scripts | /call_py_from_bash.sh | UTF-8 | 209 | 2.671875 | 3 | [] | no_license | #!/bin/bash
# script for tesing
clear
echo "............script started............"
sleep 1
result=`python py_from_bash.py "hi"`
if [ "$result" == "Salaam" ]; then
echo "script return correct response"
fi
| true |
93eeb9ed6470e5bc00cdd0ae0631836552bc7a22 | Shell | orenlivne/ober | /primal/src/code/impute/batch/po/idcoef.sh | UTF-8 | 1,312 | 3.875 | 4 | [] | no_license | #!/bin/bash
#-------------------------------------------------------------------------
# Calculate detailed identity coefficients from a haplotype IBD
# segment index.
#
# Author: Oren E. Livne
# Date: 01-DEC-2012
#-------------------------------------------------------------------------
#-----------------
# Input parameters
#-----------------
# Local imputed data output directory
segment_index="$1" # Location of segment index
out="$2" # Output directory
start_chr="$3"
stop_chr="$4"
num_jobs="$5" # #jobs to run in parallel
# Calculate identity state counts for a single chromosome
function do_chrom
{
# Location of segment index
local segment_index="$1"
# Output directory
local out="$2"
# Chromosome number
local chrom="$3"
echo "Chromosome ${chrom}"
${OBER_CODE}/impute/impute/poo/idcoef.py -c ${chrom} ${segment_index} ${out}/idcoefs-chr${chrom}.txt
}
#---------------------
# Main program
#---------------------
mkdir -p ${out}
export -f do_chrom
monitor-usage 30 false >& ${out}/monitor-usage.out &
if [ ${num_jobs} -eq 1 ]; then
# Serial run
for chrom in `seq ${start_chr} ${stop_chr}`; do
do_chrom ${segment_index} ${out} ${chrom}
done
else
# Parallel run
seq ${start_chr} ${stop_chr} | parallel -j ${num_jobs} do_chrom ${segment_index} ${out}
fi
| true |
e0322f02d0440b9794798e26597d4460f07f7b7c | Shell | issta21-109/F-detector | /test/test_cases/exiv2Insert.sh | UTF-8 | 1,152 | 2.703125 | 3 | [] | no_license | export output_dir_name="exiv2Insert"
source ../config.sh
program="~/Others/exiv2/build/bin/exiv2"
traces=$traces_path/*
target="insert"
# Variables specific for tracing the program #
#############################################################
imgfiles=$test/test_imgs
# Runs that execute the target feature #
#############################################################
$tracer -f $target -i "colors" -r "1" -o $traces_path -- $program in $imgfiles"/colors.tiff"
$tracer -f $target -i "colors" -r "2" -o $traces_path -- $program -i a $imgfiles"/colors.tiff"
# Runs that DONT execute the target feature #
#############################################################
$tracer -f "print" -i "colors" -r "1" -o $traces_path -- $program pr $imgfiles"/colors.tiff"
$tracer -f "remove" -i "colors" -r "1" -o $traces_path -- $program -P E $imgfiles"/colors.tiff"
# echo $output_path
$profiler $traces $output_path
$merge $output_path
$identifier $output_path $target
$retracer -d $output_path -- $program in $imgfiles"/colors.tiff"
$patcher $program $test/results/$output_dir_name/identified
| true |
3f3621abcf2dc5fa7dbda24aae5b6f7319584934 | Shell | jpacitto/dotfiles-1 | /archLinux/change_gaps.sh | UTF-8 | 869 | 3.796875 | 4 | [] | no_license | #!/bin/bash
# this script is to allow i3 gap params to be changed
if [[ $# -gt 2 ]] || [[ $# -eq 0 ]]; then
echo "Invalid number of arguments!"
exit 1
fi
if [[ $# -eq 1 ]]; then # only one arg, it's a special one :)
if [[ $1 = "work" ]]; then
inner=0
outer=0
elif [[ $1 = "play" ]]; then
inner=40
outer=2
else
echo "If you want to use one argument, it must be 'work' or 'play'"
exit 1
fi
elif [[ $# -eq 2 ]]; then # default to using the first and second args
inner=$1
outer=$2
fi
echo "Creating backup config at ~/.config/i3/config.bak"
cd ~/.config/i3
cp config config.bak #make our own backup
echo "i3: Setting inner gap to $inner and outer gap to $outer"
sed -E "/gaps inner/s/[0-9]+/$inner/" -i config --follow-symlinks
sed -E "/gaps outer/s/[0-9]+/$outer/" -i config --follow-symlinks
i3-msg reload # reload i3 so the config takes effect
| true |
bfa1f5f4ab9aaaa89069adb505a9984680017777 | Shell | Decipher/drupalcampmel | /tests/drupal_ti/scripts/before_script.sh | UTF-8 | 1,388 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# Simple script to install drupal for travis-ci running.
set -e $DRUPAL_TI_DEBUG
# Ensure the right Drupal version is installed.
if [ -d "$DRUPAL_TI_DRUPAL_DIR" ]
then
return
fi
# HHVM env is broken: https://github.com/travis-ci/travis-ci/issues/2523.
PHP_VERSION=`phpenv version-name`
if [ "$PHP_VERSION" = "hhvm" ]
then
# Create sendmail command, which links to /bin/true for HHVM.
BIN_DIR="$TRAVIS_BUILD_DIR/../drupal_travis/bin"
mkdir -p "$BIN_DIR"
ln -s $(which true) "$BIN_DIR/sendmail"
export PATH="$BIN_DIR:$PATH"
fi
# Create database and install Drupal.
mysql -e "create database $DRUPAL_TI_DB"
mkdir -p "$DRUPAL_TI_DRUPAL_BASE"
cd $DRUPAL_TI_DRUPAL_BASE
drush make --no-recursion "$TRAVIS_BUILD_DIR/stub.make" "$DRUPAL_TI_DRUPAL_BASE/drupal"
rm -rf drupal/profiles/$DRUPAL_TI_MODULE_NAME
# Point project into the drupal installation.
ln -sf "$TRAVIS_BUILD_DIR" "drupal/profiles/$DRUPAL_TI_MODULE_NAME"
cd "$TRAVIS_BUILD_DIR"
drush make --no-core --contrib-destination=./ ./drupal-org.make -y
cd "$DRUPAL_TI_DRUPAL_BASE/drupal"
php -d sendmail_path=$(which true) ~/.composer/vendor/bin/drush.php --yes site-install $DRUPAL_TI_MODULE_NAME --db-url="$DRUPAL_TI_DB_URL"
drush use $(pwd)#default
# Clear caches and run a web server.
drupal_ti_clear_caches
drupal_ti_run_server
# Start xvfb and selenium.
drupal_ti_ensure_xvfb
drupal_ti_ensure_webdriver
| true |
a483147c8fde816459723d42f96dfbc81cacaf10 | Shell | ProyectoDavid/PapelyLapiz | /pyl5/pyl5/pyl5a/pyl5p/VideoGenerator/production/scripts/validateJobs.sh | UTF-8 | 306 | 2.890625 | 3 | [] | no_license | #!/bin/bash
# Wavemage
cd /
# PEACHROOT=~/production # use wavemages account
PEACHROOT=/media/data/peach/production # Normal users
REND=/shared/render
JOBS=`find $PEACHROOT/jobs -iname "*.job"`
for j in $JOBS
do
# Generate job file
echo $blen
python $PEACHROOT/scripts/validateJob.py $j $REND
done
| true |
66f62952b3cb4aa9f738ed4fbfd81b2bc71ae1ca | Shell | dfangx/linux_config | /bin/notes/notes | UTF-8 | 2,194 | 3.609375 | 4 | [] | no_license | #!/bin/sh
dir=$HOME/notes
case $1 in
-n | --new)
optdir="$2"
"$EDITOR" "$dir/$optdir/$(date '+%Y%m%d%H%M%S').md"
;;
-t | --tags)
RG="rg \
--no-heading \
--smart-case \
--line-number \
--column \
--color=always"
TAGS_RG="rg \
--no-heading -r '\$1' \
-INo \"tags: \[((\w+)((?:,) (\w+))*)\]\" "$dir"\
| sed 's/, /\n/g' \
| sort -u \
| sed '/^\$/d'"
q=$2
f=false
while ! $f ; do
f=false
FZF_DEFAULT_COMMAND="cat $dir/tags"
result="$(fzf \
--multi \
--ansi \
--query "$q" \
--delimiter : \
--prompt "Tags Search> " \
)"
[ -z "$result" ] && break
tags="${result/$'\n'/'|'}"
echo "$tags"
files="$($RG -t md --color never -lw "$tags" $dir)"
[ $(echo "$files" | wc -l) -eq 1 ] && result="$files" && break
# files="${files//$'\n'/' '}"
# FZF_DEFAULT_COMMAND="$RG -H $(printf %q $q) $files"
result="$(echo "$files" | fzf \
--multi \
--ansi \
--delimiter : \
--preview "bat -pp --color=always {}" \
--preview-window 'wrap,up,60%,+{2}+3/3,~1' \
)"
[ -n "$result" ] && f=true
done
[ -n "$result" ] && $EDITOR $(echo "$result" | cut -f1 -d':')
;;
*)
RG="rg \
--no-heading \
--smart-case \
--line-number \
--column \
--color=always"
FZF_DEFAULT_COMMAND="$RG -H $(printf %q $q) $dir"
result="$(fzf \
--multi \
--ansi \
--query "$q" \
--delimiter : \
--prompt "Tags Search> " \
--preview "bat -pp --color=always {1} --highlight-line {2}" \
--preview-window 'wrap,up,60%,+{2}+3/3,~3'\
)"
[ -n "$result" ] && $EDITOR $(echo "$result" | cut -f1 -d':')
;;
esac
| true |
1740416eb728a010b9420e514e1a125af500606a | Shell | malfunct0r/dotfiles-1 | /config/install.sh | UTF-8 | 2,069 | 2.96875 | 3 | [] | no_license | #!bin/sh
#
# Automates initial config of MihailDono's files.
echo "Powering up systems...\n"
sleep 1s
echo "\nInstalling packages via apt...\n"
sudo apt install git terminator zsh silversearcher-ag markdown -y
echo "\nMoving .zshrc...\n"
cp config/.zshrc ~/
echo "\nSetuping terminal...\n"
wget -O zshinstall.sh https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh
# Prevent the script from stopping.
sed -i.tmp 's:::g' zshinstall.sh
sed -i.tmp 's:'
sed '111d' zshinstall.sh
chmod +x zshinstall.sh
./zshinstall.sh
rm zshinstall.sh
# Add zsh plugins
git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions
git clone https://github.com/romkatv/powerlevel10k.git ~/.oh-my-zsh/custom/themes/powerlevel10k
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting
git clone https://github.com/lukechilds/zsh-nvm ~/.oh-my-zsh/custom/plugins/zsh-nvm
echo "\nInstalling npm packages...\n"
npm install -g typescript typescript-language-server # --allow-root --unsafe-perm=true
echo "\nInstalling emacs27...\n"
snap install emacs --classic
echo "\nHappy Hacking!\n"
# MacOS
# Install brew
# /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
# Install brew packages
# Install zsh
# wget -O zshinstall.sh https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh
# brew install autojump markdown the_silver_searcher aspell
# Add zsh plugins
# git clone https://github.com/zsh-users/zsh-autosuggestions ~/.oh-my-zsh/custom/plugins/zsh-autosuggestions
# git clone https://github.com/romkatv/powerlevel10k.git ~/.oh-my-zsh/custom/themes/powerlevel10k
# git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ~/.oh-my-zsh/custom/plugins/zsh-syntax-highlighting
# git clone https://github.com/lukechilds/zsh-nvm ~/.oh-my-zsh/custom/plugins/zsh-nvm
# echo "\nInstalling npm packages...\n"
# npm install -g typescript typescript-language-server # --allow-root --unsafe-perm=true
| true |
5600e56a6a835428d50cdb709b67b5314780736e | Shell | accordproject/fabric-samples | /fabric-ca/scripts/start-intermediate-ca.sh | UTF-8 | 742 | 3.21875 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | #!/bin/bash
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
source $(dirname "$0")/env.sh
initOrgVars $ORG
set -e
# Wait for the root CA to start
waitPort "root CA to start" 60 $ROOT_CA_LOGFILE $ROOT_CA_HOST 7054
# Initialize the intermediate CA
fabric-ca-server init -b $BOOTSTRAP_USER_PASS -u $PARENT_URL
# Copy the intermediate CA's certificate chain to the data directory to be used by others
cp $FABRIC_CA_SERVER_HOME/ca-chain.pem $TARGET_CHAINFILE
# Add the custom orgs
for o in $FABRIC_ORGS; do
aff=$aff"\n $o: []"
done
aff="${aff#\\n }"
sed -i "/affiliations:/a \\ $aff" \
$FABRIC_CA_SERVER_HOME/fabric-ca-server-config.yaml
# Start the intermediate CA
fabric-ca-server start
| true |
1babc83a582d5551fd05351cb3891b1553a19b1e | Shell | keithballdotnet/tum_microservices_session | /src/tum/partvi/2_build_docker.sh | UTF-8 | 555 | 2.78125 | 3 | [
"MIT"
] | permissive |
# Copy dockerfile to build location as dockerfile paths are relative to build
cp src/tum/partvi/hello/Dockerfile ./bin/partvi/hello
cp src/tum/partvi/world/Dockerfile ./bin/partvi/world
# Switch to docker inside minikube
eval $(minikube docker-env)
# eval $(minikube docker-env -u)
# Build with tag tum/hello
docker build -t tum/hello:latest ./bin/partvi/hello
# Build with tag tum/world
docker build -t tum/world:latest ./bin/partvi/world
# verify it is there
docker images
# to use local docker daemon run...
# eval $(minikube docker-env -u) | true |
758d0cd370f8e5ec22e43f16ee6cca89b10b1489 | Shell | thewilkybarkid/dotfiles | /setup.sh | UTF-8 | 328 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eo pipefail
if [[ ! -x /usr/local/bin/brew ]]; then
echo "Installing Homebrew"
wget --output-document=brew.sh https://raw.githubusercontent.com/Homebrew/install/master/install.sh
bash brew.sh
rm brew.sh
fi
echo "Updating Homebrew"
brew update
echo "Installing Homebrew packages"
brew bundle
| true |
19c59c731b3e4ef9936db1cf3e06eac70025f282 | Shell | binary-c/menu | /settings_sub_editmenu.sh | UTF-8 | 229 | 2.859375 | 3 | [] | no_license | #/bin/bash
source system_sub_header.sh 'Edit Menu'
read -p 'Menu Name:' MENU
if [ -f ${MENU}_menu.sh ]
then
vim ${MENU}_menu.sh
else
echo "can not edit ${MENU}_menu.sh because it does not exit"
fi
source system_sub_pause.sh
| true |
d0273a5847bacb5c9b4e3c06fe9975fe07b78c43 | Shell | m-e-leypold/glitzersachen-demos | /generix/v0-2011/lsdtools-datatypes.shlib | UTF-8 | 4,567 | 4.375 | 4 | [] | no_license | #!/bin/bash
#
# -- Quoting strings and arguments ----------------------------------------
#
quote_stdin(){
# Shell quote standard input, output to standard output
echo -n "'"
sed "s|'|'"'\\'"''|"
echo -n "'"
}
quote_args(){
# Shell quote all command line arguments, output to stdout
getopt -o '' -- "$@" | cut -c5-
}
write_var(){
# Write variable to standard output as assignment (in shell
# syntax). Useful for saveing variables and for passing variables
# to parent processes.
#
# $1 - Name of variable
# $2 - Name to be used in assignment
echo -n "${2:-${1}}="
eval 'echo -n "$'"$1"'"' | quote_stdin
# Example: Z="Karl's place"; write_var Z LOCATION results in the
# following output:
#
# LOCATION='Karl'\''s place'
}
write_vars(){
# Write multiple variable to standard output as assignment (in a
# form that can be sourced or evaluated by the shell to restore
# the variables.
#
# Usage: write_vars [-s <suffix>] [-p <prefix>] <variable-names ...>
#
# <prefix> or <suffix> will be prepended or appended to the
# variable names in the output. Variable content will be quoted
# properly.
(
TMP_PREFIX=""
TMP_SUFFIX=""
while test "$#" -gt 0; do
case "$1" in
-p) TMP_PREFIX="$2"; shift 2;;
-s) TMP_SUFFIX="$2"; shift 2;;
*) break;;
esac
done
for TMP_VAR in $@; do
write_var "$TMP_VAR" "$TMP_PREFIX""$TMP_VAR""$TMP_SUFFIX"
echo
done
)
# Example: write_vars -s _old HOME PATH will result in output like
#
# HOME_old='/home/user'
# PATH_old='/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games'
#
}
args_print(){
# Print a list represenation of $@.
#
# $1 ... - Items to print
echo -n "{";
while test $# -gt 1; do
echo -n "$1" | quote_stdin
echo -n ", "
shift
done
if test "$#" -gt 0; then
echo -n "$1" | quote_stdin
fi
echo -n "}";
}
#
# -- list data type -------------------------------------------------------------
#
# XXX TODO: Explain internal representation
list_init(){
#
# Initialize list variable
#
# $1 - Name of list variable
eval "$1"'="--"'
}
list_set(){
#
# Set items in list
#
# $1 - Name of list variable
# $2 ... - Items to be assigned to list
eval "$1"'="$(shift; getopt -o" " "$@")"'
}
list_count(){
#
# Count items in list
#
# $1 - Name of list variable
( eval 'eval '"'"'set -- '"'"' "$'"$1"'"; shift; echo $#' ) || return 1
}
list_items(){
#
# Output quoted items in list
#
# $1 - Name of list variable
eval 'echo $'"$1"' | cut -c4-'
}
list_shift(){
# Shift list $2 items to the left
#
# $1 - Name of list variable
# $2 - Item count by which to shift the list
eval 'eval '"'"'set '"'"'"$'"$1"'"'"'"';
shift '"'"'"${2:-}"'"'"';
list_set '"$1"' "$@"'"'"'
'
# shifting by more items than there are items in the list hast the
# result that the shift is silently ignored. Trace shows that
# e.g. shift 9 is performed, but apparently it has no effekt on
# $@. There is also no error message. I think this is a bug in the
# shell that error handling in nested evals is flakey
}
list_print(){
# Print a list represenation (not in shell syntax)
#
# $1 - Name of list variable
# $2 - optional, flag -n to supress linefeed after printing
eval '
echo -n "'"$1"'={";
eval "set $'"$1"'";
while test $# -gt 1; do
echo -n "$1" | quote_stdin
echo -n ", "
shift
done
if test "$#" -gt 0; then
echo -n "$1" | quote_stdin
fi
echo '"${2:-}"' "}"
'
# Example: list_set T 1 2 "a'c" 3 ; list_print T produces the
# following output:
#
# T={'1', '2', 'a'\''c', '3'}
}
list_append(){
#
# Append items to list
#
# $1 - Name of list variable
# $2 ... - Items to be appended.
eval "$1"'="$'"$1"' $(shift 1; quote_args "$@")"'
}
list_prepend(){
#
# Prepend items to list
#
# $1 - Name of list variable
# $2 ... - Items to be prepended.
eval "$1"'=" -- $(shift 1; quote_args "$@") $(list_items '"$1"')"'
}
list_apply(){
#
# Apply list items to command
#
# $1 - Name of list variable
# $2 ... - Command line to which append the list variables
eval "$(shift 1; quote_args "$@") $(list_items $1)"
}
| true |
36023838781386ebdcc2e8a818c839749715f6fe | Shell | knead/code | /ell | UTF-8 | 472 | 3.40625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
# usage: sh ell
GET=$(which wget)
ADD=$(git)
if [ -z "$GET" [; then echo "Missing: wget"; exit; fi
if [ -z "$ADD" [; then echo "Missing: git"; exit; fi
FROM=https://raw.githubusercontent.com/se4ai/code/master/etc
export Ell=$(git rev-parse --show-toplevel)
mkdir -p $Ell/etc
for f in .bashrc .vimrc .tmuxrc; do
if [ ! -f "$Ell/etc/$f" ]; then
$GET -O $Ell/etc/$f $FROM/$f
$ADD add $Ell/etc/$f
fi
done
bash --init-file $Ell/etc/.bashrc -i
| true |
39f1edf47f0d2c4c1db9957d7b620a9749a75e14 | Shell | Barathan-Somasundram/Epitech | /Tek2/B3-semestre/SHL_2014_bdsh/bdsh.sh | UTF-8 | 1,466 | 3.578125 | 4 | [] | no_license | #!/bin/sh
is_k=0
df_file="./sh.db"
file=""
val=""
key=""
msg_error()
{
if [ $1 = 1 ]; then
echo "Usage: ./bdsh.sh [-k] [-f <db_file>] (put (<clef> | $<clef>) (<valeur> | $<clef>) |" 1>&2
echo " del (<clef> | $<clef>) [<valeur> | $<clef>] |" 1>&2
echo " select [<expr> | $<clef>] |" 1>&2
echo " flush" 1>&2
elif [ $1 = 2 ]; then
echo "Syntax Error" 1>&2
elif [ $1 = 3 ]; then
echo "Error: too many parameters" 1>&2
elif [ $1 = 4 ]; then
echo "Error: too few parameters" 1>&2
elif [ $1 = 5 ]; then
echo "Error" 1>&2
fi
}
verif()
{
if [[ $1 > 6 ]]; then
msg_error 3
msg_error 1
exit 1
elif [[ $1 = 0 ]]; then
msg_error 4
msg_error 1
exit 1
fi
if [ "$file" = "" ]; then
if [ ! -e $df_file ]; then
echo "Error: No such file (data base)"
exit 1
fi
fi
}
init_args()
{
for i in $@
do
if [ $i = "-k" ]; then
is_k=1
fi
done
for i in $@
do
if [ $i = "-f" ]; then
is_f=1
elif [ $is_f = 1 ]; then
file=$i
fi
done
}
flush()
{
echo -n "" > "sh.db"
}
put_db()
{
echo ""
}
select_db()
{
echo ""
}
del_db()
{
echo ""
}
echo "$#"
verif $#
init_args $*
if [ $is_f = 1 ]; then
if [ ! -e $file ]; then
echo "Error: No such file (data base)"
exit 1
fi
else
file=$df_file
fi
echo "is_k = $is_k"
echo "-f = $file"
exit 0
| true |
fbe05d2878bb8e9138f888186a6ba555627e97cf | Shell | tsalo/phaseprep | /container/05.install_phaseprep.sh | UTF-8 | 414 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Git
DIR=/opt/git
mkdir $DIR
cd $DIR
git clone https://github.com/ostanley/nipype.git
cd $DIR/nipype
pip3 install .
python3 -c "import nipype; print(nipype.__version__)"
cd $DIR
git clone https://git.cfmm.robarts.ca/nipype/phaseprep.git
cd $DIR/phaseprep
# Install requirements starting with fmriprep dependencies
pip3 install -r requirements.txt
# add phaseprep to path
python3 setup.py install
| true |
3dc51402a85c4a1ccd9452fa17a27553f901b678 | Shell | tabulon-ext/zeesh | /plugins/theme/solace.zsh | UTF-8 | 604 | 3.328125 | 3 | [
"MIT"
] | permissive | export VIRTUAL_ENV_DISABLE_PROMPT=true
_rprompt() {
local s=''
# print return code if non-zero
local rc=$?
if [[ $rc != 0 ]]; then
s="%F{red}$rc!%f "
fi
# print virtualenv name if active
if [ $VIRTUAL_ENV ]; then
s="$s%K{black}${${(s:/:)VIRTUAL_ENV}[-1]}%k"
fi
# display vcs info
if [ "$vcs_info_msg_0_" ]; then
[ $VIRTUAL_ENV ] && s="$s "
s="$s%K{black}$vcs_info_msg_0_%k"
fi
echo -e $s
}
PROMPT='%B%F{magenta}%n%b%f%F{magenta}@%f%B%F{magenta}%m%b%f %F{blue}%B${PWD/$HOME/~}%b%F{magenta} ›%f '
RPROMPT='$(_rprompt)'
| true |
ced5f8b114318de9fbaf090f0255234a441bd09a | Shell | slochower/scaling-octo-garbanzo | /build/build.sh | UTF-8 | 3,784 | 3.328125 | 3 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0"
] | permissive | set -o errexit
# Set timezone used by Python for setting the manuscript's date
export TZ=Etc/UTC
# Default Python to read/write text files using UTF-8 encoding
export LC_ALL=en_US.UTF-8
# Generate reference information
echo "Retrieving and processing reference metadata"
manubot \
--content-directory=content \
--output-directory=output \
--cache-directory=ci/cache \
--log-level=INFO
BUILD_HTML="false"
BUILD_PDF="false"
BUILD_DOCX="false"
BUILD_LATEX="true"
BUILD_PDF_VIA_LATEX="true"
# pandoc settings
# Exports so that we can convert and resize figures.
CSL_PATH=build/assets/journal-of-chemical-theory-and-computation.csl
DOCX_PATH=build/assets/pandoc.docx
BIBLIOGRAPHY_PATH=output/references.json
INPUT_PATH=output/manuscript.md
# Make output directory
mkdir -p output
# Create HTML output
# http://pandoc.org/MANUAL.html
if [ "$BUILD_HTML" = "true" ];
then
echo "Exporting HTML manuscript"
pandoc --verbose \
--from=markdown \
--to=html5 \
--filter=pandoc-fignos \
--filter=pandoc-eqnos \
--filter=pandoc-tablenos \
--bibliography=$BIBLIOGRAPHY_PATH \
--csl=$CSL_PATH \
--metadata link-citations=true \
--mathjax \
--css=github-pandoc.css \
--include-in-header=build/assets/analytics.html \
--include-after-body=build/assets/anchors.html \
--include-after-body=build/assets/hypothesis.html \
--output=output/manuscript.html \
$INPUT_PATH
fi
# Create PDF output
if [ "$BUILD_PDF" = "true" ];
then
echo "Exporting PDF manuscript"
ln -s content/images images
pandoc \
--from=markdown \
--to=html5 \
--pdf-engine=weasyprint \
--pdf-engine-opt=--presentational-hints \
--filter=pandoc-fignos \
--filter=pandoc-eqnos \
--filter=pandoc-tablenos \
--bibliography=$BIBLIOGRAPHY_PATH \
--csl=$CSL_PATH \
--metadata link-citations=true \
--webtex=https://latex.codecogs.com/svg.latex? \
--css=webpage/github-pandoc.css \
--output=output/manuscript.pdf \
$INPUT_PATH
rm -r images
fi
# Create DOCX output when user specifies to do so
if [ "$BUILD_DOCX" = "true" ];
then
echo "Exporting DOCX manuscript"
pandoc \
--from=markdown \
--to=docx \
--filter=pandoc-fignos \
--filter=pandoc-eqnos \
--filter=pandoc-tablenos \
--filter=pandoc-img-glob \
--bibliography=$BIBLIOGRAPHY_PATH \
--csl=$CSL_PATH \
--reference-doc=$DOCX_PATH \
--metadata link-citations=true \
--resource-path=.:content \
--output=output/manuscript.docx \
$INPUT_PATH
fi
if [ "$BUILD_LATEX" = "true" ];
then
echo "Exporting LATEX manuscript"
pandoc \
--from=markdown \
--to=latex \
--filter=pandoc-fignos \
--filter=pandoc-eqnos \
--filter=pandoc-tablenos \
--filter=pandoc-img-glob \
--bibliography=$BIBLIOGRAPHY_PATH \
--csl=$CSL_PATH \
--template=build/assets/nih4.tex \
--metadata link-citations=true \
--number-sections \
--resource-path=.:content \
-s --output=output/manuscript.tex \
$INPUT_PATH
fi
if [ "$BUILD_PDF_VIA_LATEX" = "true" ];
then
echo "Exporting LATEX (PDF) manuscript"
FONT="Helvetica"
COLORLINKS="true"
pandoc \
--from=markdown \
--filter=pandoc-eqnos \
--filter=pandoc-tablenos \
--filter=pandoc-img-glob \
--filter=pandoc-chemfig \
--filter=pandoc-fignos \
--lua-filter=build/latex-color.lua \
--bibliography=$BIBLIOGRAPHY_PATH \
--csl=$CSL_PATH \
--template=build/assets/nih4.tex \
--metadata link-citations=true \
--resource-path=.:content:../content \
--pdf-engine=xelatex \
--variable mainfont="${FONT}" \
--variable sansfont="${FONT}" \
--variable colorlinks="${COLORLINKS}" \
--output=output/manuscript.pdf \
$INPUT_PATH
fi
echo "Build complete"
| true |
d8de7322542fe9d9c1d3f23d45f582a04430e5b2 | Shell | Teaspot-Studio/Urho3D-Haskell | /Urho3D/setup-hook.sh | UTF-8 | 169 | 2.515625 | 3 | [
"MIT"
] | permissive | addUrho3DPath () {
addToSearchPath PKG_CONFIG_PATH $1/lib/pkgconfig
export URHO3D_PREFIX_PATH="$1/share/Urho3D/Resources"
}
addEnvHooks "$hostOffset" addUrho3DPath
| true |
1b112a7459358858669ea387aa21705299546b28 | Shell | bridder/factory | /init/init.d/remount_tmp_as_exec.sh | UTF-8 | 585 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# In http://crbug.com/936818 and also http://crrev.com/c/1494694, /tmp is
# mounted as noexec by default, and we won't remount it to executable at
# test image anymore.
# This change break factory stuff and cause b/138982809. Also, it may break the
# factory tests, factory services..., etc. We would like to re-enabled it at
# factory environment.
main() {
mount -n -o remount,exec,suid /tmp
}
main "$@"
| true |
804438d808eec9551f6f616fd035e0c2df218fc2 | Shell | rochecirclecipoc/simpleR | /.circleci/install_R_pkgs.sh | UTF-8 | 1,052 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -eo pipefail
# information about packages file
# r-pkg-deps.txt file ends with empty line
# contains list of R-packages to be installed, one per line
#
result_line=""
file="./.circleci/r-pkg-deps.txt"
if [ -f "$file" ]; then
while read -r line
do
echo "line = $line"
if [ -n "$result_line" ]; then
result_line="$result_line, "
fi
result_line="$result_line \"$line\""
done < "$file"
if [ -n "$result_line" ]; then
Rscript --vanilla -e 'options(repos = c(CRAN = "https://cloud.r-project.org"))' \
-e '.libPaths()' \
-e "install.packages(c($result_line))"
echo "information about R-packages from list"
while read -r line
do
echo "details about package: $line"
Rscript --vanilla -e 'installed.packages() -> d1' \
-e "print(d1[rownames(d1) == '$line', c('Version', 'LibPath')])"
done < "$file"
fi
else
echo "R packages dependencies file ($file) does not exist"
fi
| true |
53dbdafab61917b584daab405b952f21c3e1e376 | Shell | hungnguyenm/dotfiles | /zsh_custom/func_ssh.zsh | UTF-8 | 6,265 | 3.65625 | 4 | [] | no_license | [ -r ~/.ssh/config ] && _ssh_config=($(cat ~/.ssh/config | sed -ne 's/Host[=/t ]\([^\*]\)/\1/p')) || _ssh_config=()
## rsync ##
alias rsync-copy-sudo='rsync -avz --progress -h -e ssh --rsync-path="sudo rsync"'
alias rsync-move-sudo='rsync -avz --progress -h --remove-source-files -e ssh --rsync-path="sudo rsync"'
alias rsync-update-sudo='rsync -avzu --progress -h -e ssh --rsync-path="sudo rsync"'
alias rsync-synchronize-sudo='rsync -avzu --delete --progress -h -e ssh --rsync-path="sudo rsync"'
## ssh ##
# improved ssh to send client host name env variable
function ssh() {
if (( ${#} == 1 )); then
if [[ $_ssh_config =~ (^|[[:space:]])$1($|[[:space:]]) ]]; then
command ssh -t "$1" "if type zsh >/dev/null 2>&1; then SSH_CLIENT_SHORT_HOST="${PREFER_HOST_NAME:-${SHORT_HOST}}" zsh; elif type $SHELL >/dev/null 2>&1; then $SHELL; else bash; fi;"
else
command ssh "$@"
fi
else
command ssh "$@"
fi
}
# ssh with forwarding port for VNC
function ssh-tunnel() {
if [[ -r ~/.ssh/config ]]; then
if [[ -n $1 ]] && [[ $_ssh_config =~ (^|[[:space:]])$1($|[[:space:]]) ]]; then
_port_list=$(ssh "$1" 'bash -s' < $DOTFILES_DIR/scripts/get_tunnel_ports.sh)
if [[ -n "_port_list" ]]; then
echo "Tunneling..."
command ssh "$1" 'bash -s' < $DOTFILES_DIR/scripts/get_tunnel_info.sh | \
sed -e "s/\(spice - \).*\(127.0.0.1:\)\([0-9]*\)/\1950\3 forward to \2590\3/g;s/\(vnc - \).*\(127.0.0.1:\)\([0-9]*\)/\1950\3 forward to \2590\3/g"
_ssh_options=$1
while read i; do
[[ -z $i ]] && continue
nc -z localhost "95${i: -2}" 2> /dev/null && echo "Port 95${i: -2} is in used!" && return 1
_ssh_options="$_ssh_options -L 95${i: -2}:localhost:$i"
done <<< "$_port_list"
command ssh $=_ssh_options -t "SSH_CLIENT_SHORT_HOST="${PREFER_HOST_NAME:-${SHORT_HOST}}-tunnel" '$SHELL'"
else
echo "No VNC port available!"
fi
else
echo "fatal: ssh-tunnel only works with hosts defined in ~/.ssh/config\n\rUsage: ssh-tunnel host"
fi
else
echo "fatal: ~/.ssh/config doesn't exist"
fi
}
# copy authorized keys to host
_auth_profile="default"
function ssh-copy-auth() {
if [[ -r ~/.ssh/config ]]; then
if [[ -n $1 ]] && [[ $_ssh_config =~ (^|[[:space:]])$1($|[[:space:]]) ]]; then
if [[ -n $2 ]] && [[ $_auth_profile =~ (^|[[:space:]])$2($|[[:space:]]) ]]; then
git_clone_private
command ssh "$1" 'mkdir -p ~/.ssh'
command $PRIVATE_FOLDER/data/"$2"/authorized_keys scp "$1":~/.ssh/
git_remove_private
else
echo "fatal: invalid profile\n\rAvailable profiles: default"
fi
else
echo "fatal: ssh-copy-auth only works with hosts defined in ~/.ssh/config\n\rUsage: ssh-copy-auth host"
fi
else
echo "fatal: ~/.ssh/config doesn't exist"
fi
}
# copy public key to authorized_keys at host
function ssh-copy-pubkey() {
auth="~/.ssh/authorized_keys"
if [[ -n $1 ]] && [[ -n $2 ]]; then
if [[ -f $1 ]] && [ ${1: -4} = ".pub" ]; then
key=$(<$1)
cmd="mkdir -p ~/.ssh;touch $auth;echo $key | tee -a $auth"
command ssh "$2" "$cmd"
else
echo "fatal: invalid pubkey"
fi
else
echo "Usage: ssh-copy-pubkey path-to-file.pub hostname/ipaddr"
fi
}
## routers ##
_routers="erxh erxw"
function rb-vtun4() {
if [[ -r ~/.ssh/config ]]; then
if [[ -n $1 ]] && [[ $_routers =~ (^|[[:space:]])$1($|[[:space:]]) ]]; then
ssh $1 "sudo /config/scripts/restart_vtun4.sh"
else
echo "fatal: rb-vtun4 only works with erxh erxw"
fi
else
echo "fatal: ~/.ssh/config doesn't exist"
fi
}
function erx-clear-ip() {
if [[ -r ~/.ssh/config ]]; then
if [[ -n $1 ]] && [[ $_routers =~ (^|[[:space:]])$1($|[[:space:]]) ]]; then
local _ip=$2
if [[ -n $_ip ]] && [[ $_ip != ${_ip#*[0-9].[0-9]} ]]; then
ssh $1 "clear dhcp lease ip $2"
else
echo "fatal: erx-clear-ip requires an ip address\n\rusage: erx-clear-ip router ip"
fi
else
echo "fatal: erx-clear-ip only works with erxh erxw\n\rusage: erx-clear-ip router ip"
fi
else
echo "fatal: ~/.ssh/config doesn't exist"
fi
}
## sshfs ##
# fs: mount remote ssh $HOST to ~/remote/$HOST folder
function fs() {
if [[ -r ~/.ssh/config ]]; then
if [[ -n $1 ]] && [[ $_ssh_config =~ (^|[[:space:]])$1($|[[:space:]]) ]]; then
echo "Mounting remote host "$1":"$2""
mkdir -p ~/remote/"$1"
if [[ -n $2 ]] ; then
sshfs "$1":"$2" ~/remote/"$1"
else
sshfs "$1": ~/remote/"$1"
fi
else
echo "fatal: fs only works with hosts defined in ~/.ssh/config\n\rUsage: fs host OR fs host path"
fi
else
echo "fatal: ~/.ssh/config doesn't exist"
fi
}
# fsu: unmount sshfs
function fsu() {
if [[ -r ~/.ssh/config ]]; then
if [[ -n $1 ]] && [[ $_ssh_config =~ (^|[[:space:]])$1($|[[:space:]]) ]]; then
echo "Unmounting remote host "$1""
case `uname` in
Darwin) umount ~/remote/"$1"
;;
Linux) fusermount -u ~/remote/"$1"
;;
esac
else
echo "fatal: fsu only works with hosts defined in ~/.ssh/config\n\rUsage: fsu host"
fi
else
echo "fatal: ~/.ssh/config doesn't exist"
fi
}
# fsc: cd to sshfs mounted folder
function fsc() {
if [[ -r ~/.ssh/config ]]; then
if [[ -n $1 ]] && [[ $_ssh_config =~ (^|[[:space:]])$1($|[[:space:]]) ]]; then
cd ~/remote/"$1"
else
echo "fatal: fsc only works with hosts defined in ~/.ssh/config\n\rUsage: fsc host"
fi
else
echo "fatal: ~/.ssh/config doesn't exist"
fi
}
# fsl: list all sshfs mounted folders
function fsl() {
mount | sed -ne 's/\(\/remote\/\)/\1/p'
}
# fso: mount if not mounted then open sshfs folder in Finder/Nautilus
function fso() {
if [[ -n $1 ]]; then
if ! (mount | grep remote/"$1" > /dev/null); then
if [[ -n $2 ]]; then
fs "$1" "$2"
else
fs "$1"
fi
fi
case `uname` in
Darwin)
ofd ~/remote/"$1"
;;
Linux)
nautilus ~/remote/"$1"
;;
esac
else
echo "Usage: fso host OR fso host path"
fi
}
compctl -k "($_ssh_config)" fs fsu fsc fso ssh-tunnel ssh-copy-auth rb-vtun4 | true |
404d473cdcb18d1030dd8f567064b78d667d99b6 | Shell | JeffpanUK/MLSALT11Speech-master | /pre-task/evaluate_inter_eval.sh | UTF-8 | 635 | 2.578125 | 3 | [] | no_license | mainpath='/home/jp697/Major'
devpath='/home/jp697/Major/lattices'
store='/home/jp697/Major/exp/temp_file'
task1path="${mainpath}/exp/task1"
#evaluate the performance of the merged lm_eval
echo 'Evaluate the performance of WER of 5 LM using eval03 shows'
while read line
do
${mainpath}/scripts/lmrescore.sh $line lattices decode lm_int_eval plp-tglm_int_eval FALSE
done < "$store/temp2"
#score eval03 of all LMs
echo "receiving the mfl"
while(true)
do
test -e "./plp-tglm_int_eval/eval03_DEV013-20010220-XX2000/rescore/rescore.mlf" && break
done
echo 'scoring all the files'
./scripts/score.sh plp-tglm_int_eval eval03 rescore
| true |
2eed6f16a645537b979c219dd529692ff4583458 | Shell | hupili/utility | /recycled/tex-spell.sh | UTF-8 | 876 | 3.53125 | 4 | [] | no_license | #!/bin/bash
#hupili
#20111007
#check latex source file using 'aspell'
#simply list the mis-spelled words
#=========== 20120706
echo "This script is deprecated."
echo "Read the script file for explanation."
echo "Press Ctrl+C to end"
echo "Press any key to continue..."
read tmp
#It is known that the cascade command of detex and aspell
#generates many non-meaningful entries.
#Many of them are just human names, abbreviations.
#A more important problem is they do not provide
#straightforward warning in the article.
#You need to locate them with search function of your editor.
#Now I advocate to use VIM's spell checking function:
#
#:set spell
#
#for more information:
# http://vimdoc.sourceforge.net/htmldoc/spell.html
#===========
if [[ $# == 1 ]] ; then
file="$1"
else
echo "usage: $0 {file}"
exit 255
fi
detex "$file" | aspell list | sort -u
exit 0
| true |
df20ced90f9c64303c292d4fd5b20a88377657ea | Shell | gitter-lab/SINGE | /tests/standalone_branching_test.sh | UTF-8 | 1,032 | 3.65625 | 4 | [
"GPL-2.0-only",
"MIT",
"GPL-1.0-or-later"
] | permissive | #!/bin/bash
# Run SINGE on a branching dataset in standalone mode using the Docker image
set -o errexit
# Only one one set of hyperparamters because SINGE has to run three times
# (once per branch) for each hyperparameter combination
# Keep the thrid line of the hyperparameters file
hyperparams=tests/example_hyperparameters.txt
reducedhyperparams=tests/reduced_hyperparameters.txt
cat $hyperparams | sed -n '3p' > $reducedhyperparams
# Run SINGE on the example data and inspect the output
echo Testing SINGE standalone mode in Dockerized SINGE.sh with a branching dataset
output=branching_output
docker run -v $(pwd):/SINGE -w /SINGE agitter/singe:tmp \
standalone data1/X_BranchTest.mat data1/gene_list.mat $output $reducedhyperparams
ls $output -l
rm $reducedhyperparams
# Run the tests to evaluate the SINGE outputs from the standalone script
docker run -v $(pwd):/SINGE -w /SINGE --entrypoint "/bin/bash" agitter/singe:tmp -c \
"source ~/.bashrc; conda activate singe-test; tests/compare_branching_output.sh $output"
| true |
d9cc72c97fd962181b4f4ede7c50b6b9bb57cc2d | Shell | RavitejaAmruthapu/Demo | /read-file.sh | UTF-8 | 199 | 3.6875 | 4 | [] | no_license | echo -e "Enter a file name:\c"
read fname
if [ -z "$fname" ]
then
exit
fi
terminal=`tty`
exec < $fname
count=1
while read line
do
echo $count.$line
count=`expr $count + 1`
done
exec < $terminal | true |
fd39ab3566cf19112a84708447881327b25f4db4 | Shell | PHuhn/RaspberryPI | /Nagios/libexec/check_state_statusjson.sh | UTF-8 | 5,293 | 3.75 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# 2019-01-30 V1.0.0 Phil of Northern Software Group
# Return the current state/status of the service or escalates from warning to
# critical state.
# this uses the following to obtain state/status information
# 1) curl localhost/nagios/cgi-bin/statusjson.cgi
# 2) awk script against nagios/var/status.dat
# ============================================================================
# 2019-02-05 V1.0.8 Phil
# 2019-03-12 V1.0.9 Phil empty PASSWD forces only awk script
# 2019-03-16 V1.0.10 Phil Lint with www.shellcheck.net
#
# program values
PROGNAME=$(basename "$0")
REVISION="1.0.10"
# exit codes
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
# parameter values
HOST=localhost
SERVICE=unknown
USERID=nagiosadmin
PASSWD=
ESCALATE=true
LOGGING=true
VERBOSE=false
#
print_help() {
print_version
cat <<EOF
Usage: ${PROGNAME} [options]
-H service hostname, default value: ${HOST}
-S service description, default value: ${SERVICE}
-U cgi user name, default value: ${USERID}
-P cgi password, default value: ${PASSWD}
-e escalate to critical, default value: ${ESCALATE}
-l logging to /tmp, default value: ${LOGGING}
-v logging to stdout, default value: ${VERBOSE}
Example: ${PROGNAME} -H SensorHost -S "sensor-19" -U nagiosuser -P passw0rd
EOF
}
#
print_version() {
echo "Script: ${PROGNAME}, version: ${REVISION}"
}
#
# Information options
case "${1}" in
--help)
print_help
exit "${STATE_OK}"
;;
-h)
print_help
exit "${STATE_OK}"
;;
--version)
print_version
exit "${STATE_OK}"
;;
-V)
print_version
exit "${STATE_OK}"
;;
esac
#
while getopts ":e:H:l:S:U:P:v:" option
do
case "${option}"
in
H) HOST=${OPTARG};;
S) SERVICE=${OPTARG};;
U) USERID=${OPTARG};;
P) PASSWD=${OPTARG};;
e) ESCALATE=$(echo "${OPTARG}" | tr '[:upper:]' '[:lower:]');;
l) LOGGING=$(echo "${OPTARG}" | tr '[:upper:]' '[:lower:]');;
v) VERBOSE=$(echo "${OPTARG}" | tr '[:upper:]' '[:lower:]');;
esac
done
#
if [ "${LOGGING}" == "true" ]; then
LOG_FILE=/tmp/${PROGNAME}.log
if [ ! -f "${LOG_FILE}" ]; then
echo "$$ ${PROGNAME} initializing ..." >> "${LOG_FILE}"
chmod 666 "${LOG_FILE}"
fi
else
# if don't want LOG_FILE then change to /dev/null
LOG_FILE=/dev/null
fi
# This overrides LOGGING value
if [ "${VERBOSE}" == "true" ]; then
LOG_FILE=/dev/stdout
fi
echo "$$ ${PROGNAME} starting at $(date '+%Y-%m-%d %H:%M:%S') ..." >> "${LOG_FILE}"
#
QUERY="query=service&hostname=${HOST}&servicedescription=${SERVICE}"
echo "$$ ${PROGNAME} ${QUERY}" >> "${LOG_FILE}"
FILE=/tmp/check_${SERVICE}_$$
STATUS_FILE=/usr/local/nagios/var/status.dat
STATE=
OUTPUT=
# can test with wrong password
if [ "X${PASSWD}" != "X" ]; then
curl -v "http://${USERID}:${PASSWD}@localhost/nagios/cgi-bin/statusjson.cgi?${QUERY}" 1> "${FILE}" 2> /dev/null
if [ -s "${FILE}" ]; then
grep "last_hard_state.:" "${FILE}" >/dev/null 2>&1
if [ $? == "0" ]; then
echo "$$ ${PROGNAME} processing statusjson.cgi" >> "${LOG_FILE}"
STATE=$(grep "last_hard_state.:" "${FILE}" | cut -d ":" -f 2 | tr -cd "[:digit:]") 2> /dev/null
OUTPUT=$(grep "\"plugin_output.:" "${FILE}" | tr -s ' ' | tr -d '"' | sed -e 's/ plugin_output: //' -e 's/,$//') 2> /dev/null
fi
else
echo "$$ ${PROGNAME} statusjson.cgi empty, awk command against ${STATUS_FILE}" >> "${LOG_FILE}"
fi
fi
if [ "${STATE}X" == "X" ]; then
awk -v FS='\n' -v RS='\n\n' -v h_name="${HOST}" -v s_name="${SERVICE}" 'BEGIN {host="host_name="h_name; service="service_description="s_name; print host", "service;}{ if (match($0,host) && match($0,service)) { print "##" $0; } }' ${STATUS_FILE} > "${FILE}"
STATE=$(grep "last_hard_state=" "${FILE}" | cut -d "=" -f 2 | tr -cd "[:digit:]") 2> /dev/null
OUTPUT=$(grep "plugin_output=" "${FILE}" | cut -d "=" -f 2 | sed 's/,$//') 2> /dev/null
fi
chmod 666 "${FILE}"
#
echo "$$ ${PROGNAME} state: ${STATE}, output: ${OUTPUT}" >> "${LOG_FILE}"
# move output file to a single 'last' file
cp -f "${FILE}" "/tmp/check_${SERVICE}_last"
rm "${FILE}"
#
if [ "${STATE}X" != "X" ]; then
if [ "${STATE}" == "${STATE_OK}" ]; then
if [ "${OUTPUT}X" != "X" ]; then
echo "${OUTPUT}"
else
echo "OK"
fi
exit "${STATE_OK}"
elif [ "${STATE}" == "${STATE_WARNING}" ]; then
if [ "${OUTPUT}X" != "X" ]; then
echo "${OUTPUT}"
else
if [ "X${ESCALATE}" == "Xtrue" ]; then
echo "CRITICAL"
else
echo "WARNING"
fi
fi
if [ "X${ESCALATE}" == "Xtrue" ]; then
exit "${STATE_CRITICAL}"
fi
exit "${STATE_WARNING}"
elif [ "${STATE}" == "${STATE_CRITICAL}" ]; then
if [ "${OUTPUT}X" != "X" ]; then
echo "${OUTPUT}"
else
echo "CRITICAL"
fi
exit "${STATE_CRITICAL}"
else
echo "UNKNOWN"
exit "${STATE_UNKNOWN}"
fi
else
echo "UNKNOWN"
exit "${STATE_UNKNOWN}"
fi
# == end-of-script ==
| true |
ec780e08fe471e2b95410e68aea41cf3cd07f36b | Shell | ekeyme/bash-toggle-runner | /toggle-run | UTF-8 | 1,579 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# display usage
usage() {
cat <<EOF
Run your app in Run-Pause-Run-Pause toggle model.
Usage:
$APP_NAME [-a i|ri|run] [-p path] [your_app[+id]] [-- app args...]
Option:
-a, action, default run without initial. i: initial, run first; ri, reverse initial, pause in the first round.
-p, directory to store toggle status, default $marker_store_path.
EOF
}
# config
marker_store_path=$HOME/bin/tmp/toggle-run
status_toggle_bin=$HOME/bin/status-toggle.sh
APP_NAME=$(basename $0)
action='run' # default action
# read the options
TEMP=$(getopt -o 'ha:p:' -l 'help' -n $APP_NAME -- "$@")
eval set -- "$TEMP"
while true; do
case $1 in
-a)
case $2 in
i|ri|run) action=$2; shift 2
;;
*) echo "${APP_NAME}: '$2': invalid action" 2>&1; exit 1
;;
esac
;;
-p) marker_store_path=$2; shift 2
;;
-h|--help) usage&&exit 0
;;
--) shift; break
;;
esac
done
[[ -z $1 ]] && echo "${APP_NAME}: Missing app." 1>&2 && exit 1
if [[ $1 = *+* ]]; then
app=${1%+*}
app_id=${1##*+}
else
app=$1
fi
shift
[[ ! -w $marker_store_path ]] && echo "Permission denied: directory '$marker_store_path'" 1>&2 && exit 1
job_identifer=$marker_store_path/$(basename $app)${app_id}
if [[ $action = i ]]; then
$status_toggle_bin -ai $job_identifer
elif [[ $action = ri ]]; then
$status_toggle_bin -ari $job_identifer
else
$status_toggle_bin $job_identifer && exec $app "$@"
fi | true |
1be33fab1d78a4b46efcf50e50d84a7aa2904db1 | Shell | cyverseuk/ontologizer | /wrapper/wrapper.sh | UTF-8 | 1,777 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
function debug {
echo "creating debugging directory"
mkdir .debug
for word in ${rmthis}
do
if [[ "${word}" == *.sh ]] || [[ "${word}" == lib ]]
then
mv "${word}" .debug;
fi
done
}
rmthis=`ls`
echo ${rmthis}
ARGSU=" ${calculation} ${dot} ${ignore} ${mtc} ${maxAlpha} ${maxBeta} ${mcmcSteps} ${annotation} ${resamplingsteps} ${sizetolerance} "
ASSOCIATIONU="${association}"
GOU="${go}"
POPULATIONU="${population}"
STUDYSETU="${studyset}"
INPUTSU="${ASSOCIATIONU}, ${GOU}, ${POPULATIONU}, ${STUDYSETU} "
echo "Association file is " "${ASSOCIATIONU}"
echo "GO file is " "${GOU}"
echo "Population file is " "${POPULATIONU}"
echo "Study set is " "${STUDYSETU}"
echo "Input files are " "${INPUTSU}"
echo "Arguments are " "${ARGSU}"
CMDLINEARG="ontologizer ${ARGSU} --association ${ASSOCIATIONU} --go ${GOU} --population ${POPULATIONU} --studyset ${STUDYSETU} --outdir output"
echo ${CMDLINEARG};
chmod +x launch.sh
echo universe = docker >> lib/condorSubmitEdit.htc
echo docker_image = cyverseuk/ontologizer:v2.1 >> lib/condorSubmitEdit.htc ######
echo executable = ./launch.sh >> lib/condorSubmitEdit.htc #####
echo arguments = ${CMDLINEARG} >> lib/condorSubmitEdit.htc
echo transfer_input_files = ${INPUTSU}, launch.sh >> lib/condorSubmitEdit.htc
#echo transfer_output_files = output >> lib/condorSubmitEdit.htc
cat /mnt/data/apps/ontologizer/lib/condorSubmit.htc >> lib/condorSubmitEdit.htc
less lib/condorSubmitEdit.htc
jobid=`condor_submit -batch-name ${PWD##*/} lib/condorSubmitEdit.htc`
jobid=`echo $jobid | sed -e 's/Sub.*uster //'`
jobid=`echo $jobid | sed -e 's/\.//'`
#echo $jobid
#echo going to monitor job $jobid
condor_tail -f $jobid
debug
exit 0
| true |
5890ad34e6e96b57229006b224816e32642e82d6 | Shell | foster-gabe/Pfal-eQTL | /pfalsrc/cisQTLrun_QTLtools.sh | UTF-8 | 877 | 2.6875 | 3 | [] | no_license | #!/bin/sh
#Let's run some QTLLLLLLLLLLLLLL
mkdir ciseQTL
# For each timepoint run the entire analysis:
for k in T4 T30 T44
do
# For each # inferred covariates being tested:
for j in 0 1 2 3 4 5 10 15 20 25 30 nobatch nocrc
do
QTLtools cis --vcf $k\_norm.vcf.gz --bed $k\_norm.bed.gz --cov PEERdata/$k\_norm_$j.PEER_covariates.txt --out ciseQTL/$k\_cisnom_$j.txt --nominal 1 --window 1000000
QTLtools cis --vcf $k\_norm.vcf.gz --bed $k\_norm.bed.gz --cov PEERdata/$k\_norm_$j.PEER_covariates.txt --out ciseQTL/$k\_cisperm_$j.txt --permute 1000 10000 --window 1000000
# For each significance level:
for i in 0.05 0.10 0.25
do
Rscript runFDR_cis.R ciseQTL/$k\_cisperm_$j.txt $i ciseQTL/$k\_FDRcuts_$i\_$j.txt
Rscript NomFilter.R ciseQTL/$k\_cisnom_$j.txt ciseQTL/$k\_FDRcuts_$i\_$j.txt.thresholds.txt ciseQTL/$k\_cisnom_sig_$j.txt
done
done
done
| true |
1cebffcc285b8144062cdb16ebeed6d63bc4be71 | Shell | makehappen/launch | /src/server/launch.sh | UTF-8 | 739 | 2.65625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# call script with
# LAUNCH_CONFIG=... && . src/server/launch.sh
# load config
. aws/servers/launch-config/$LAUNCH_CONFIG.sh
# create ec2 instance from owned image
aws ec2 run-instances \
--profile $AWS_PROFILE \
--image-id $AWS_IMAGE_ID \
--count 1 \
--instance-type $AWS_INSTANCE_TYPE \
--key-name $AWS_KEY_NAME \
--associate-public-ip-address \
--tag-specifications "ResourceType=instance,Tags=[{Key=Name,Value=$AWS_SERVER_NAME}]" \
--subnet-id $AWS_SUBNET_ID \
--security-group-ids $AWS_SECURITY_GROUPS_IDS \
--block-device-mappings "[{\"DeviceName\":\"/dev/sda1\",\"Ebs\":{\"VolumeSize\":$AWS_VOLUME_SIZE,\"DeleteOnTermination\":true,\"Encrypted\":true,\"VolumeType\": \"gp2\"}}]"
| true |
22de7667d853d917b3cd803774658391afd28c37 | Shell | wanglin1983/sh | /color.sh | UTF-8 | 2,105 | 3.421875 | 3 | [] | no_license | NONE="\033[0m"
BLACK="\033[0;30m"
DARK_GRAY="\033[1;30m"
BLUE="\033[0;34m"
LIGHT_BLUE="\033[1;34m"
GREEN="\033[0;32m"
LIGHT_GREEN="\033[1;32m"
CYAN="\033[0;36m"
LIGHT_CYAN="\033[1;36m"
RED="\033[0;31m"
LIGHT_RED="\033[1;31m"
PURPLE="\033[0;35m"
LIGHT_PURPLE="\033[1;35m"
BROWN="\033[0;33m"
YELLOW="\033[1;33m"
LIGHT_GRAY="\033[0;37m"
WHITE="\033[1;37m"
colored()
{
echo -ne $1$2$NONE
}
function color_echo()
{
echo -e $1$2$NONE;
}
log_info()
{
color_echo $YELLOW "$@"
}
log_error()
{
color_echo $RED "$@"
}
log_debug()
{
color_echo $LIGHT_GREEN "$@"
}
function show_color_table()
{
M='m'
T='RGB' # The test text
echo "\033[ ; m"
echo " default 40 41 42 43 44 45 46 47 "
## FGs ...(foreground)., BG ...(background).
for FGs in ' ' ' 1' ' 6' ' 7'\
' 30' '1;30' '6;30' '7;30'\
' 31' '1;31' '6;31' '7;31'\
' 32' '1;32' '6;32' '7;32'\
' 33' '1;33' '6;33' '7;33'\
' 34' '1;34' '6;34' '7;34'\
' 35' '1;35' '6;35' '7;35'\
' 36' '1;36' '6;36' '7;36'\
' 37' '1;37' '6;37' '7;37'
do
FG=$(echo $FGs|tr -d ' ')
echo -en " $FGs \033[$FG$M $T "
for BG in 40m 41m 42m 43m 44m 45m 46m 47m;
do
echo -en " \033[$FG;$BG $T \033[0m"
done
echo
done
echo
}
function show_color_table_ex()
{
echo
for STYLE in 0 1 2 3 4 5 6 7; do
for FG in 30 31 32 33 34 35 36 37; do
for BG in 40 41 42 43 44 45 46 47; do
CTRL="\033[${STYLE};${FG};${BG}m"
echo -en "${CTRL}"
echo -n "${CTRL}"
echo -en "\033[0m"
done
echo
done
echo
done
# Reset
echo -e "\033[0m"
}
alias cecho='color_echo'
alias sct='show_color_table'
alias scte='show_color_table_ex'
| true |
79dcf695e5bdab82b736f94277c5033973eb65f2 | Shell | eikek/sitebag | /src/main/dist/bin/start-sitebag.sh | UTF-8 | 1,676 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
usage() {
echo "Usage: $0 [OPTIONS]
Options
-c <file> The configuration file, default is etc/sitebag.conf.
-l <file> The logback configuration file. Default is etc/logback.xml.
-a Create an initial admin account (username, password: admin).
"
}
# find utf8 locale and set it
LOC=$(locale -a | grep -i utf8 | head -n1)
if [ -n "$LOC" ]; then
export LC_ALL=${LOC}
fi
# find working dir and cd into it
cd $(dirname $0)/..
# create classpath param
CPATH=""
SCALALIB=""
for f in lib/*; do
if [[ ${f} =~ scala-library.* ]]; then
SCALALIB="$f"
else
CPATH=${CPATH}:$f
fi
done
JCLASSPATH=${SCALALIB}:${CPATH#?}:plugins/*
if [ -z "$SITEBAG_JVM_OPTS" ]; then
SITEBAG_JVM_OPTS="-server -Xmx512M -Djava.awt.headless=true "
fi
while getopts :c:l:ah flag; do
case $flag in
c) SITEBAG_JVM_OPTS="$SITEBAG_JVM_OPTS -Dconfig.file=$OPTARG";;
l) SITEBAG_JVM_OPTS="$SITEBAG_JVM_OPTS -Dlogback.configurationFile=$OPTARG";;
a) SITEBAG_JVM_OPTS="$SITEBAG_JVM_OPTS -Dsitebag.create-admin-account=true";;
h) usage; exit 0;;
esac
done
if [[ ! $SITEBAG_JVM_OPTS =~ config.file.* ]] && [ -r etc/sitebag.conf ]; then
SITEBAG_JVM_OPTS="$SITEBAG_JVM_OPTS -Dconfig.file=etc/sitebag.conf"
fi
if [[ ! $SITEBAG_JVM_OPTS =~ logback.* ]] && [ -r etc/logback.xml ]; then
SITEBAG_JVM_OPTS="$SITEBAG_JVM_OPTS -Dlogback.configurationFile=etc/logback.xml"
fi
# use env for java command
JAVA=java
if [ -n "$JAVA_HOME" ]; then
JAVA=$JAVA_HOME/bin/java
elif [ -n "$JDK_HOME" ]; then
JAVA=$JDK_HOME/bin/java
fi
$JAVA ${SITEBAG_JVM_OPTS} -Xbootclasspath/a:${JCLASSPATH} -jar lib/sitebag.jar
| true |
25be7de38dc9b086978663cb5fd96b999e237bc6 | Shell | justineuro/mdgBookSVGKit | /mdg2midRndN-svg.sh | UTF-8 | 2,474 | 3.921875 | 4 | [
"CC0-1.0"
] | permissive | #!/bin/bash
#===================================================================================
#
# FILE: mdg2midRndN-svg.sh
#
# USAGE: mdg2midRndN-svg.sh <num>
#
# where <num> is the number of random MDG minuets to be generated, e.g., 50.
# *** NOTE: This script has to be in the same directory as mdg2mid.sh. ***
#
# DESCRIPTION: Used for generating <num> ABC files, each a Musical Dice Game (MDG)
# minuet based on the rules given in K. 516f or K. 294d or K. Anh. C 30.01
# (1792 publication attributed to W.A. Mozart by his publisher, Nikolaus
# Simrock), and the corresponding MIDI files.
#
# AUTHOR: J.L.A. Uro (justineuro@gmail.com)
# VERSION: 1.0.2
# LICENSE: Creative Commons Attribution 4.0 International License (CC-BY)
# CREATED: 2017.08.12 14:30:55 +8
# REVISION: 2017.11.23 08:46:21
#==================================================================================
#----------------------------------------------------------------------------------
# define the function genS() that randomly chooses an integer from 2 to 12, inclusive
#----------------------------------------------------------------------------------
genS() { # RANDOM randomly generates an integer from 0 to 32767
rnd=32768
until [ $rnd -lt 32758 ]
do
rnd=$[RANDOM]
if [ $rnd -lt 32758 ]; then echo $[rnd%11+2]; fi
done
}
#----------------------------------------------------------------------------------
# declare the variables "diceS" as an array
# diceS - array containing the 16 outcomes from input line
#----------------------------------------------------------------------------------
declare -a diceS
#----------------------------------------------------------------------------------
# generate the <num> random minuets
#----------------------------------------------------------------------------------
i=1
while [ $i -le $1 ]; do
#----------------------------------------------------------------------------------
# generate the random 16-sequence of outcomes of the 16 throws of two dice
#----------------------------------------------------------------------------------
for j in {0..15}; do
diceS[$j]=`genS`
done
#----------------------------------------------------------------------------------
# generate a minuet in ABC notation and corresponding MIDI for the current diceS
# using mdg2mid.sh
#----------------------------------------------------------------------------------
./mdg2mid-svg.sh ${diceS[*]}
i=`expr $i + 1`
done
#
##
####
| true |
d6dc1d05d925396e2a055eb1988016b1d0fd08d4 | Shell | hamishm/haiku | /src/bin/leak_analyser.sh | UTF-8 | 3,330 | 3.828125 | 4 | [] | no_license | #!/bin/bash
if [ ! -f "$1" ]
then
cat << EOF
$(basename $0) <leaksFile> [<options>] [<excludePatterns>]
<leaksFile>
A file containing the allocations with stack traces from
the guarded heap output.
To generate such a file run a program with the following
environment variables prefixed and pipe the output to a file:
LD_PRELOAD=libroot_guarded.so MALLOC_DEBUG=es50 program > file
The number after the "s" is the stack trace depth. Note that
there is an implementation defined maximum.
--no-default-excludes
Do not exclude known statics and globals. By default a list of
excludes is used that removes known allocations that are never
freed by the system libraries.
--no-exclude-empty
Do not exclude allocations with no stack trace. By default
allocations without a stack trace are excluded. This should
only happen for very early allocations where the stack trace
setting has not yet been applied. The program should not be
able to generate such allocations.
<excludePatterns>
Exclude allocations that match a regular expression. The
expression is matched against the whole text block of the
allocation, so can match in the header line as well as any
stack trace lines. Note that the whole block is on a single
line and newlines have been replaced with the caret (^)
character.
Multiple exclude patterns can be specified as one argument each
and they will be ored to form the final expression.
EOF
exit 1
fi
FILENAME="$1"
shift
DONE_PARSING_OPTIONS=
NO_DEFAULTS=
NO_EXCLUDE_EMPTY=
while [ -z "$DONE_PARSING_OPTIONS" ]
do
case "$1" in
--no-default-excludes)
NO_DEFAULTS=yes
shift
;;
--no-exclude-empty)
NO_EXCLUDE_EMPTY=yes
shift
;;
--*)
echo "unrecognized option \"$1\" ignored"
shift
;;
*)
DONE_PARSING_OPTIONS=yes
;;
esac
done
function append_pattern {
if [ -z "$EXCLUDE_PATTERN" ]
then
EXCLUDE_PATTERN="$1"
else
EXCLUDE_PATTERN="$EXCLUDE_PATTERN|$1"
fi
}
EXCLUDE_PATTERN=""
if [ -z "$NO_DEFAULTS" ]
then
declare -a DEFAULT_EXCLUDE_LIST=( \
"<libroot.so> __cxa_atexit " \
"<libroot.so> BPrivate::Libroot::LocaleBackend::LoadBackend" \
"<libbe.so> initialize_before " \
"<libbe.so> initialize_after " \
"<libbe.so> _control_input_server_" \
"<libbe.so> BApplication::_InitGUIContext" \
"<libbe.so> BApplication::_InitAppResources" \
"<libbe.so> BResources::LoadResource" \
"<libbe.so> BClipboard::_DownloadFromSystem" \
"<libbe.so> BToolTipManager::_InitSingleton" \
"<libbe.so> BPrivate::WidthBuffer::StringWidth" \
"<libtracker.so> _init " \
"<libtranslation.so> BTranslatorRoster::Default" \
"Translator> " \
"<libicui18n.so.54> icu::" \
"<libicuuc.so.54> icu::" \
)
for EXCLUDE in "${DEFAULT_EXCLUDE_LIST[@]}"
do
append_pattern "$EXCLUDE"
done
fi
if [ -z "$NO_EXCLUDE_EMPTY" ]
then
append_pattern "^[^^]*\^$"
fi
while [ $# -gt 0 ]
do
append_pattern "$1"
shift
done
ALLOCATIONS=$(cat "$FILENAME" | egrep "^allocation: |^ " | tr '\n' '^' \
| sed 's/\^a/~a/g' | tr '~' '\n' | sed 's/$/^/' | c++filt)
if [ ! -z "$EXCLUDE_PATTERN" ]
then
ALLOCATIONS=$(echo "$ALLOCATIONS" | egrep -v "$EXCLUDE_PATTERN")
fi
if [ -z "$ALLOCATIONS" ]
then
COUNT=0
else
COUNT=$(echo "$ALLOCATIONS" | wc -l)
fi
echo "$ALLOCATIONS^total leaks: $COUNT" | tr '^' '\n' | less
| true |
2f578d782ebd24f403d6d303b19236ad5b4d0b66 | Shell | shaomingfu/perfecttranscriptome | /bin2/download.sh | UTF-8 | 280 | 3.15625 | 3 | [] | no_license | #!/bin/bash
if [ "$#" != "2" ]; then
echo "usage list1 list2"
exit
fi
bin=`pwd`
listdir=/home/mshao/data/repositories/perfecttranscriptome/data/SRAdb/homo.sapiens.paired.lists
list1=$1
list2=$2
for list in `seq $list1 $list2`
do
$bin/download.single.py $listdir/$list
done
| true |
270777ae96b96633b79478ebe29b5b7f3672466d | Shell | BushraAloraini/SAST_Warnings | /scripts/extract_fpp_rates.sh | UTF-8 | 7,289 | 3.25 | 3 | [] | no_license | #!/bin/bash
#
# This script is to extract TFP, FFP1, and FFP2 rates from
# "FFP_warnings" file that includes traced line of code
# and save the output into "FP_rates" file
#
#
#
WDIR=$(pwd)
declare -a arrayname=("RATS" "Flawfinder" "cppcheck" "clang" "PVS-Studio" "Parasoft")
tracing_list="FFP_warnings.csv"
output="FP_rates"
IVR="Input Validation and Representation"
API="API Abuse"
SF="Security Features"
CQ="Code Quality"
TS="Time and State"
Err="Errors"
ENC="Encapsulation"
ENV="Environment"
for i in "${tool[@]}"
do
tool=${arrayname[$i]}
echo "" >> $output
echo "Tool= $tool " >> $output
echo "">> $output
echo " class, TFP, FFP1, FFP2, FFP3, total, OR" >> $output
IVR_TFP=$(awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "TFP") {print $0}'| cut -d',' -f16- | grep -c "$IVR" )
IVR_FFP1=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP1") {print $0}' | cut -d',' -f16- | grep -c "$IVR" )
IVR_FFP2=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP2") {print $0}' | cut -d',' -f16- | grep -c "$IVR" )
IVR_FFP3=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP3") {print $0}' | cut -d',' -f16- | grep -c "$IVR" )
IVR_total=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | cut -d',' -f16- | grep -c "$IVR" )
echo "IVR, $IVR_TFP, $IVR_FFP1, $IVR_FFP2, $IVR_FFP3, $IVR_total, " >> $output
API_TFP=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "TFP") {print $0}' | cut -d',' -f16- | grep -c "$API" )
API_FFP1=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP1") {print $0}' | cut -d',' -f16- | grep -c "$API" )
API_FFP2=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP2") {print $0}' | cut -d',' -f16- | grep -c "$API" )
API_FFP3=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP3") {print $0}' | cut -d',' -f16- | grep -c "$API" )
API_total=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | cut -d',' -f16- | grep -c "$API" )
echo "API, $API_TFP, $API_FFP1 , $API_FFP2, $API_FFP3, $API_total, " >> $output
SF_TFP=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "TFP") {print $0}' | cut -d',' -f16- | grep -c "$SF" )
SF_FFP1=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP1") {print $0}' | cut -d',' -f16- | grep -c "$SF" )
SF_FFP2=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP2") {print $0}' | cut -d',' -f16- | grep -c "$SF" )
SF_FFP3=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP3") {print $0}' | cut -d',' -f16- | grep -c "$SF" )
SF_total=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | cut -d',' -f16- | grep -c "$SF" )
echo "SF, $SF_TFP, $SF_FFP1 , $SF_FFP2, $SF_FFP3, $SF_total, " >> $output
CQ_TFP=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "TFP") {print $0}' | cut -d',' -f16- | grep -c "$CQ")
CQ_FFP1=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP1") {print $0}' | cut -d',' -f16- | grep -c "$CQ")
CQ_FFP2=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP2") {print $0}' | cut -d',' -f16- | grep -c "$CQ")
CQ_FFP3=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP3") {print $0}' | cut -d',' -f16- | grep -c "$CQ")
CQ_total=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | cut -d',' -f16- | grep -c "$CQ")
echo "CQ, $CQ_TFP, $CQ_FFP1 , $CQ_FFP2, $CQ_FFP3, $CQ_total, " >> $output
TS_TFP=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "TFP") {print $0}' | cut -d',' -f16- | grep -c "$TS" )
TS_FFP1=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP1") {print $0}' | cut -d',' -f16- | grep -c "$TS" )
TS_FFP2=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP2") {print $0}' | cut -d',' -f16- | grep -c "$TS" )
TS_FFP3=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP3") {print $0}' | cut -d',' -f16- | grep -c "$TS" )
TS_total=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | cut -d',' -f16- | grep -c "$TS" )
echo "TS, $TS_TFP, $TS_FFP1 , $TS_FFP2, $TS_FFP3, $TS_total, " >> $output
ERR_TFP=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "TFP") {print $0}' | cut -d',' -f16- | grep -c "$ERR" )
ERR_FFP1=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP1") {print $0}' | cut -d',' -f16- | grep -c "$ERR" )
ERR_FFP2=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP2") {print $0}' | cut -d',' -f16- | grep -c "$ERR" )
ERR_FFP3=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP3") {print $0}' | cut -d',' -f16- | grep -c "$ERR" )
ERR_total=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | cut -d',' -f16- | grep -c "$ERR" )
echo "ERR, $ERR_TFP, $ERR_FFP1 , $ERR_FFP2, $ERR_FFP3, $ERR_total, " >> $output
ENC_TFP=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "TFP") {print $0}' | cut -d',' -f16- | grep -c "$ENC" )
ENC_FFP1=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP1") {print $0}' | cut -d',' -f16- | grep -c "$ENC" )
ENC_FFP2=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP2") {print $0}' | cut -d',' -f16- | grep -c "$ENC" )
ENC_FFP3=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP3") {print $0}' | cut -d',' -f16- | grep -c "$ENC" )
ENC_total=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | cut -d',' -f16- | grep -c "$ENC" )
echo "ENC, $ENC_TFP , $ENC_FFP1 , $ENC_FFP2, $ENC_FFP3, $ENC_total, " >> $output
ENV_TFP=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "TFP") {print $0}' | cut -d',' -f16- | grep -c "$ENV" )
ENV_FFP1=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP1") {print $0}' | cut -d',' -f16- | grep -c "$ENV" )
ENV_FFP2=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP2") {print $0}' | cut -d',' -f16- | grep -c "$ENV" )
ENV_FFP3=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | awk -F"," '($15 ~ "FFP3") {print $0}' | cut -d',' -f16- | grep -c "$ENV" )
ENV_total=$( awk -F"," -v var="$tool" '($4 ~ var) {print $0}' $tracing_list | cut -d',' -f16- | grep -c "$ENV" )
echo "ENV, $ENV_TFP, $ENV_FFP1 , $ENV_FFP2, $ENV_FFP3, $ENV_total, " >> $output
done
| true |
df0ee1c3aa593eaf298d6a3e1283e87a6e998257 | Shell | ODEX-TOS/packages | /colord/repos/extra-x86_64/PKGBUILD | UTF-8 | 1,657 | 2.828125 | 3 | [
"GPL-1.0-or-later",
"MIT"
] | permissive | # Maintainer: Jan Alexander Steffens (heftig) <heftig@archlinux.org>
# Contributor: Ionut Biru <ibiru@archlinux.org>
pkgbase=colord
pkgname=(colord colord-sane)
pkgver=1.4.5
pkgrel=1
pkgdesc="System daemon for managing color devices"
url="https://www.freedesktop.org/software/colord"
arch=(x86_64)
license=(GPL2)
depends=(lcms2 libgusb polkit sqlite dconf dbus libgudev shared-mime-info systemd-libs udev)
makedepends=(gobject-introspection vala sane bash-completion argyllcms git meson gtk-doc systemd
docbook-xsl)
options=(!emptydirs)
_commit=0563117371f82420616e5e40b6a75a0b34c697c7 # tags/1.4.5^0
source=("git+https://github.com/hughsie/colord#commit=$_commit")
sha256sums=('SKIP')
validpgpkeys=('163EB50119225DB3DF8F49EA17ACBA8DFA970E17') # Richard Hughes
pkgver() {
cd colord
git describe --tags | sed 's/-/+/g'
}
prepare() {
cd colord
}
build() {
arch-meson colord build \
-D libcolordcompat=true \
-D sane=true \
-D vapi=true \
-D print_profiles=true \
-D daemon_user=colord
meson compile -C build
}
check() {
meson test -C build --print-errorlogs
}
package_colord() {
optdepends=('argyllcms: color profiling'
'colord-sane: SANE support')
replaces=(shared-color-profiles)
DESTDIR="$pkgdir" meson install -C build
echo 'u colord - "Color management daemon" /var/lib/colord' |
install -Dm644 /dev/stdin "$pkgdir/usr/lib/sysusers.d/colord.conf"
### Split colord-sane
mkdir -p colord-sane/usr/lib/colord-plugins
mv {"$pkgdir",colord-sane}/usr/lib/colord-sane
mv {"$pkgdir",colord-sane}/usr/lib/colord-plugins/libcolord_sensor_sane.so
}
package_colord-sane() {
pkgdesc+=" (SANE support)"
depends=("colord=$pkgver-$pkgrel" sane)
mv colord-sane/* "$pkgdir"
}
# vim:set sw=2 et:
| true |
b614338ebe191be08b77f10c23906ccafc2d9ac2 | Shell | delkyd/alfheim_linux-PKGBUILDS | /glcorearb/PKGBUILD | UTF-8 | 343 | 2.640625 | 3 | [] | no_license | # Maintainer: Lubosz Sarnecki <lubosz@gmail.com>
pkgname=glcorearb
pkgver=1
pkgrel=1
pkgdesc="glcorearb Header"
arch=('any')
url="http://www.opengl.org/registry"
license=('custom')
package() {
wget "http://www.opengl.org/registry/api/GL/glcorearb.h"
install -m755 -d "${pkgdir}/usr/include/GL"
install -m644 "glcorearb.h" "${pkgdir}/usr/include/GL/"
}
| true |
d8b34af9a86e3d6898c03db27804fff514d98399 | Shell | drocon11/lw-build | /build-L-SMASH-Works-ffmpeg-AviSynth.sh | UTF-8 | 999 | 2.703125 | 3 | [] | no_license | #!/bin/bash -eux
export PKG_CONFIG_PATH=/mingw/lib/pkgconfig
cd ffmpeg
make install
cd ..
if [ ! -d L-SMASH-Works-ffmpeg ]; then
mkdir L-SMASH-Works-ffmpeg
fi
cd L-SMASH-Works-ffmpeg
if [ ! -d .git ]; then
git clone -v --progress --config core.autocrlf=false https://github.com/VFR-maniac/L-SMASH-Works.git ./
fi
git pull -v --progress
cd AviSynth
cat <<__END_OF_TEXT__ >"LSMASHSourceVCX.vcxproj.bat"
set CL=/I..\..\..\..\..\include /I..\..\msinttypes
set LINK="libpthread.a" "libiconv.a" "libswresample.a" "libmsvcrt.a" /LIBPATH:..\..\..\..\..\i686-w64-mingw32\lib /LIBPATH:..\..\..\..\..\lib\gcc\i686-w64-mingw32\4.8.2 /LIBPATH:..\..\bzip2 /LIBPATH:..\..\..\..\..\lib
@for /d %%1 in (%SystemRoot%\Microsoft.NET\Framework\v*) do @if exist "%%~1\msbuild.exe" set "MSBUILD=%%~1\msbuild.exe"
"%MSBUILD%" LSMASHSourceVCX.vcxproj /target:Rebuild /property:Configuration=Release;Platform=Win32;PlatformToolset=v100
__END_OF_TEXT__
cmd.exe "/c "LSMASHSourceVCX.vcxproj.bat""
echo End of $0
ls
| true |
ca51d4c1e9575410601e31e4fa2f0129b7814efd | Shell | dritter/zulu | /commands/install | UTF-8 | 3,391 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env zsh
###
# Output usage information
###
function _zulu_install_usage() {
echo $(_zulu_color yellow "Usage:")
echo " zulu install <packages...>"
}
###
# Install a package
###
function _zulu_install_package() {
local package json repo dir file link packagetype
local -a dependencies
package="$1"
# Check if the package is already installed
root="$base/packages/$package"
if [[ -d "$root" ]]; then
echo $(_zulu_color red "Package '$package' is already installed")
return 1
fi
# Get the JSON from the index
json=$(cat "$index/$package")
# Get the repository URL from the JSON
repo=$(jsonval $json 'repository')
# Clone the repository
cd "$base/packages"
git clone --recursive $repo $package
packagefile="$config/packages"
in_packagefile=$(cat $packagefile | grep -e '^'${package}'$')
if [[ "$in_packagefile" = "" ]]; then
echo "$package" >> $packagefile
fi
return
}
###
# Zulu command to handle package installation
###
function _zulu_install() {
local base index packages out
# Parse options
zparseopts -D h=help -help=help
# Output help and return if requested
if [[ -n $help ]]; then
_zulu_install_usage
return
fi
# Set up some variables
base=${ZULU_DIR:-"${ZDOTDIR:-$HOME}/.zulu"}
config=${ZULU_CONFIG_DIR:-"${ZDOTDIR:-$HOME}/.config/zulu"}
index="${base}/index/packages"
packages=($@)
packagefile="$config/packages"
if [[ ! -f $packagefile ]]; then
touch $packagefile
fi
# If no package name is passed, throw an error
if [[ ${#packages} -eq 0 ]]; then
echo $(_zulu_color red "Please specify a package name")
echo
_zulu_install_usage
return 1
fi
# Do a first loop, to ensure all packages exist
for package in "$packages[@]"; do
if [[ ! -f "$index/$package" ]]; then
echo $(_zulu_color red "Package '$package' is not in the index")
return 1
fi
done
# Do a second loop, to do the actual install
for package in "$packages[@]"; do
# Get the JSON from the index
json=$(cat "$index/$package")
# Get the list of dependencies from the index
dependencies=($(echo $(jsonval $json 'dependencies') | tr "," "\n" | sed 's/\[//g' | sed 's/\]//g'))
# If there are dependencies in the list
if [[ ${#dependencies} -ne 0 ]]; then
# Loop through each of the dependencies
for dependency in "$dependencies[@]"; do
# Check that the dependency is not already installed
if [[ ! -d "$base/packages/$dependency" ]]; then
_zulu_revolver start "Installing dependency $dependency..."
out=$(_zulu_install_package "$dependency" 2>&1)
_zulu_revolver stop
if [ $? -eq 0 ]; then
echo "$(_zulu_color green '✔') Finished installing dependency $dependency "
zulu link $dependency
else
echo "$(_zulu_color red '✘') Error installing dependency $dependency "
echo "$out"
fi
fi
done
fi
_zulu_revolver start "Installing $package..."
out=$(_zulu_install_package "$package" 2>&1)
_zulu_revolver stop
if [ $? -eq 0 ]; then
echo "$(_zulu_color green '✔') Finished installing $package "
zulu link $package
else
echo "$(_zulu_color red '✘') Error installing $package "
echo "$out"
fi
done
}
| true |
9b13ca61e06763099060315dd037272e1bb96d07 | Shell | ttsz/addon-tasmoadmin | /tasmoadmin/rootfs/etc/cont-init.d/13-persistent-data.sh | UTF-8 | 960 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/with-contenv bash
# ==============================================================================
# Community Hass.io Add-ons: TasmoAdmin
# Ensures data is store in a persistent location
# ==============================================================================
# shellcheck disable=SC1091
source /usr/lib/hassio-addons/base.sh
if ! hass.directory_exists "/data/tasmoadmin"; then
hass.log.debug 'Data directory not initialized, doing that now...'
# Setup structure
cp -R /var/www/tasmoadmin/tasmoadmin/data /data/tasmoadmin
# Ensure file permissions
chown -R nginx:nginx /data/tasmoadmin
find /data/tasmoadmin -not -perm 0644 -type f -exec chmod 0644 {} \;
find /data/tasmoadmin -not -perm 0755 -type d -exec chmod 0755 {} \;
fi
hass.log.debug 'Symlinking data directory to persistent storage location...'
rm -f -r /var/www/tasmoadmin/tasmoadmin/data
ln -s /data/tasmoadmin /var/www/tasmoadmin/tasmoadmin/data
| true |
e7e209a930399f3b89f50716d25ea16cc797e572 | Shell | Jpocas3212/aur | /wizorb-hib/wizorb.sh | UTF-8 | 229 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env bash
confdir="$HOME/.Tribute Games"
[[ -d "$confdir" ]] || install -d "$confdir"
[[ -h "$HOME/Tribute Games" ]] || ln -s "$confdir" "$HOME/Tribute Games"
cd /opt/wizorb
mono Wizorb.exe
rm "$HOME/Tribute Games"
| true |
beb9fd1bd69ea3e331abf539065f35cc5c6b8f3f | Shell | rms1000watt/aiserver | /plumbing/trunk/ec2/scripts/bootstrap.sh | UTF-8 | 2,882 | 4.0625 | 4 | [] | no_license | #!/bin/bash
# EC2 instance bootstrap script
# This script will automated initialization of the EC2 instance.
# The script should be passed as a parameter to ec2-run-instances.
#
# ec2-run-instances [AMI] -k [KEYPAIR] -f bootstrap.sh -t [MACHINE_TYPE] -z [AVAILABILITY_ZONE]
#
# ec2-run-instances ami-9759bffe -k gsg-keypair -f bootstrap.sh -t m1.large -z us-east-1a
#
# The EBS_VOLUME variable must be set to the correct EBS Volume ID to be associated with
# the instance to be run.
EBS_VOLUME="vol-b1e20bd8"
#EBS_VOLUME="vol-191cf070"
INSTANCE_ID=`curl http://169.254.169.254/latest/meta-data/instance-id 2> /dev/null`
export JAVA_HOME=/usr
export EC2_HOME=/usr/local/ec2/apitools
export EC2_PRIVATE_KEY=/mnt/pk_fimco.pem
export EC2_CERT=/mnt/cert_fimco.pem
PATH=/usr/local/ec2/apitools/bin:$PATH
if [ -z $EBS_VOLUME ]
then
echo "EBS_VOLUME is not defined"
exit 1
fi
# create the certificate
LINE_BEG=`cat $0 | grep -n "BEGIN CERTIFICATE" | tail -1 | cut -d':' -f1`
LINE_END=`cat $0 | grep -n "END CERTIFICATE" | tail -1 | cut -d':' -f1`
cat $0 | sed -n "$LINE_BEG,$LINE_END p" > $EC2_CERT
# create the private key
LINE_BEG=`cat $0 | grep -n "BEGIN PRIVATE KEY" | tail -1 | cut -d':' -f1`
LINE_END=`cat $0 | grep -n "END PRIVATE KEY" | tail -1 | cut -d':' -f1`
cat $0 | sed -n "$LINE_BEG,$LINE_END p" > $EC2_PRIVATE_KEY
# get the status of the volume
echo "Checking volume status..."
VOLUME_STAT=`ec2-describe-volumes $EBS_VOLUME | cut -f6`
# check if the volume is available
if [ $VOLUME_STAT = "available" ]
then
echo "Volume is available"
elif [ $VOLUME_STAT = "in-use" ]
then
echo "Volume is already in-use."
exit 2
else
echo "Volume does not exist."
exit 2
fi
# attach the volume
echo "Attaching volume..."
VOLUME_STAT=`ec2-attach-volume $EBS_VOLUME -i $INSTANCE_ID -d /dev/sdh 2> /dev/null | cut -f5`
RETRIES=0
while [ ! $VOLUME_STAT = "attached" ]
do
if [ $RETRIES -ge 12 ]
then
echo "Volume is taking too long to attach."
exit 3
fi
# sleep 5 seconds
sleep 5
VOLUME_STAT=`ec2-describe-volumes $EBS_VOLUME 2> /dev/null | grep ATTACHMENT | cut -f5`
# increment no. of retries
RETRIES=`expr $RETRIES + 1`
done
echo "Volume attached."
# mount volume to our known mount points
echo "Creating /ebs directory"
mkdir -p /ebs
echo "Mounting /dev/sdh1 to /ebs... this might take a few minutes"
mount /dev/sdh1 /ebs
echo "Mounting /dev/sdh2 to /home... this might take a few minutes"
mount /dev/sdh2 /home
# run initialization script if exists
if [ -f /ebs/admin/init.sh ] && [ -x /ebs/admin/init.sh ]
then
echo "Running initialization script..."
/ebs/admin/init.sh
fi
echo "Copying bootstrap script to /mnt..."
cp -f $0 /mnt/bootstrap.sh
echo "*** BOOTSTRAP COMPLETE ***"
exit 0
-----BEGIN CERTIFICATE-----
MIICdzCCAeCgAwIBAgIGAPF9w/+bMA0GCSqGSIb3DQEBBQUAMFMxCzAJBgNVBAYT
AlVTMRMwEQYDVQQKEwpBbWF6b24uY29tMQwwCgYDVQQLEwNBV1MxITAfBgNVBAMT
GEFXUyBMaW1pdGVkLUFzc3VyYW5jZSBDQTAeFw0wOTAyMTgxNTA1NDBaFw0xMDAy
MTgxNTA1NDBaMFIxCzAJBgNVBAYTAlVTMRMwEQYDVQQKEwpBbWF6b24uY29tMRcw
FQYDVQQLEw5BV1MtRGV2ZWxvcGVyczEVMBMGA1UEAxMMeW54NnkxZnRnMmRuMIGf
MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCKh70DtYaR7DPWJKUfhxNLeOX35bhm
i8qQgvrGojWguFBDxsN7ydQkERvkTXmBh+xAnCWak985RZ7+KmjDLbXDvPJQ1MSj
j+rHiHSL6o7BvXqW44T7ZiFtCOVo6BfjUTh3kgmH22buJBr3xMmXVksd50TX25wr
uQ7aieN+zpDaqQIDAQABo1cwVTAOBgNVHQ8BAf8EBAMCBaAwFgYDVR0lAQH/BAww
CgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQUOoFCixofnTyXgwXV
SeCHSD4oyHswDQYJKoZIhvcNAQEFBQADgYEAn3qp14qeJvw9xFvfGIV62i8SNMc3
/i60bgpryVpD1A9W4/gaC435IrDX1fY7w5vPbxYI1Dnjn1HufsAOAJii/GNJ+XcN
G/tmf5G90J8Wr8d0yBmygK/IubF+ulBa3ewBB3QWehN6Jjn3xRQm6QzAQKdlZ9J0
EtiP9Em/h+Dxo9o=
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAIqHvQO1hpHsM9YkpR+HE0t45ffl
uGaLypCC+saiNaC4UEPGw3vJ1CQRG+RNeYGH7ECcJZqT3zlFnv4qaMMttcO88lDUxKOP6seIdIvq
jsG9epbjhPtmIW0I5WjoF+NROHeSCYfbZu4kGvfEyZdWSx3nRNfbnCu5DtqJ437OkNqpAgMBAAEC
gYAmA5maqvWClY6j9Opa/HYO/94baK5xdWrgvRCT8W9F604bSy/ZiEjunMNKovf005fBIxgukuVu
kexPUtPsu15lMCovU51QoAEaovX0Kh3hxLmbLT+g0W+wQNDoczER/4mtwV0WziRC64koJC+4whkC
Sf/GaNUCXz9KoZjjnsxNAQJBANNJnzVOUfk9bHtsdvtO48q84UBInbLuPOCrxDHa37u0ZxMuo3V4
e8t/VyHJbjHxcE7NBzt2HtE94XoqXV3ChdcCQQCn2Ie/Kxkki7lI7Gvv51Pjxl2gHwND0a8C2zg9
nmupKiN4Us3rAyhT4X8xsWZiDC2NgZpL8YGvTwKdVqo2upN/AkBIYx/FjoitIGsrOfTlkpieW+m8
MWS96bs3qgF0py0hzOPHgaIE2/tls8HxVGaJe9NjXAEPUR+rxkyaoysLtVpPAkAEKWlMQyxPbKt+
dGZEv46j8jI2Gy7Air11K6xcUsZGnoXcoOj8L8rbMZcuy0BHpBepD5Kc2XMmvqXI8vIrgzrPAkBr
IEEvjWSL7YXkTFmG9ryn0/TqbbmUzVK1k/h2kvaW4ZQC5m7KZX/0L38jqv2gBPVVIV/ERN2Erkz3
0ESBDVPY
-----END PRIVATE KEY-----
| true |
2b84136767fa3187395d0e0f72469fd61db507e0 | Shell | adnrbp/L-rails | /blogapi/compose/local/rails/start | UTF-8 | 1,412 | 3.265625 | 3 | [] | no_license | #!/bin/sh
set -x
# Only check network
# while !</dev/tcp/postgres/5432; do sleep 1; done;
# Required: postgres-client
# until psql -h $POSTGRES_HOST -U $POSTGRES_USER -d $POSTGRES_DB -c "select 1" > /dev/null 2>&1 || [ $RETRIES -eq 0 ]; do
# echo "Waiting for postgres server, $((RETRIES-=1)) remaining attempts..."
# sleep 1
# done
# until psql -h $POSTGRES_HOST -U $POSTGRES_USER -d $POSTGRES_DB -c "select 1" > /dev/null 2>&1 || [ $RETRIES -eq 0 ]; do
# echo "Waiting for postgres server to start, $((RETRIES)) remaining attempts..."
# RETRIES=$((RETRIES-=1))
# sleep 1
# done
# # Required: Netcat
# # wait for postgresql
# until nc -vz $POSTGRES_HOST 5432; do
# echo "Postgresql is not ready, sleeping..."
# sleep 1
# done
# echo "Postgresql is ready, starting Rails."
postgres_ready() {
ruby << END
require 'pg'
begin
conn = PG::Connection.open(:host => ENV["POSTGRES_HOST"],
:dbname => ENV["POSTGRES_DB"],
:user => ENV["POSTGRES_USER"])
rescue PG::Error => e
exit 1
ensure
conn.close if conn
end
exit 0
END
}
until postgres_ready; do
>&2 echo 'Waiting for PostgreSQL to become available...'
sleep 1
done
>&2 echo 'PostgreSQL is available'
#RAILS_ENV=development bundle exec rake db:create
RAILS_ENV=development bundle exec rake db:migrate
#RAILS_ENV=development bundle exec rake db:seed
RAILS_ENV=development bundle exec rails s -p 8002 -b '0.0.0.0'
| true |
a95a91e298ce5f050b8dbd5efed09f93feaf9b51 | Shell | Jessy142/shutdown-kvm-guests | /debian/shutdown-kvm-guest/etc/init.d/shutdown-kvm-guests.sh | UTF-8 | 1,484 | 3.859375 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
### BEGIN INIT INFO
# Provides: shutdown-kvm-guests
# Required-Start:
# Required-Stop: shutdown-kvm-guests $remote_fs
# Should-Stop:
# Default-Start:
# Default-Stop: 0 1 6
# Short-Description: Cleanly shut down all running KVM domains.
# Description:
### END INIT INFO
# Inspired by https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/350936.
# Configure timeout (in seconds).
TIMEOUT=300
VIRSH=/usr/bin/virsh
# List running domains.
list_running_domains() {
$VIRSH list | grep running | awk '{ print $2}'
}
case "$1" in
start,reload,restart,force-reload)
# We don't do anything here.
;;
stop)
echo "Try to cleanly shut down all running KVM domains..."
# Create some sort of semaphore.
touch /tmp/shutdown-kvm-guests
# Try to shutdown each domain, one by one.
list_running_domains | while read DOMAIN; do
# Try to shutdown given domain.
$VIRSH shutdown $DOMAIN
done
# Wait until all domains are shut down or timeout has reached.
END_TIME=$(date -d "$TIMEOUT seconds" +%s)
while [ $(date +%s) -lt $END_TIME ]; do
# Break while loop when no domains are left.
test -z "$(list_running_domains)" && break
# Wait a litte, we don't want to DoS libvirt.
sleep 1
done
# Clean up left over domains, one by one.
list_running_domains | while read DOMAIN; do
# Try to shutdown given domain.
$VIRSH destroy $DOMAIN
# Give libvirt some time for killing off the domain.
sleep 3
done
;;
esac
| true |
d4354485c55e4e182f8ea11a7bbe7abc3bc42a06 | Shell | amacgregor/docker-phpfpm | /5.4.43/config/phpfarm/src/options.sh | UTF-8 | 1,493 | 3.671875 | 4 | [] | no_license | #!/bin/bash
# You can override config options very easily.
# Just create a custom options file in the custom/ directory.
# It may be version specific:
# - custom/options.sh
# - custom/options-5.sh
# - custom/options-5.4.sh
# - custom/options-5.4.1.sh
#
# Don't touch this file here - it would prevent you from just
# "svn update"'ing your phpfarm source code.
version=$1
vmajor=$2
vminor=$3
vpatch=$4
configoptions="\
--disable-short-tags \
--with-layout=GNU \
--enable-bcmath \
--enable-calendar \
--enable-exif \
--enable-ftp \
--enable-mbstring \
--enable-pcntl \
--enable-soap \
--enable-sockets \
--enable-wddx \
--enable-zip \
--enable-cli \
--enable-fpm \
--with-openssl \
--with-zlib \
--with-gettext \
--with-mysql \
--with-pdo-mysql \
--with-gd \
--with-zlib \
--with-pear \
--with-curl \
--with-mcrypt \
--with-pdo-mysql \
--with-jpeg-dir \
--with-png-dir \
"
# --enable-sqlite-utf8 was removed starting with PHP 5.4.0.
test $vmajor -eq 5 -a $vminor -lt 4
if [ $? -eq 0 ]; then
configoptions="\
$configoptions \
--enable-sqlite-utf8 \
"
fi
echo $version $vmajor $vminor $vpatch
configure=`stat -c '%Y' "options.sh"`
for suffix in "" "-$vmajor" "-$vmajor.$vminor" "-$vmajor.$vminor.$vpatch" "-$version"; do
custom="custom/options$suffix.sh"
if [ -e "$custom" ]; then
tstamp=`stat -c '%Y' "$custom"`
if [ $tstamp -gt $configure ]; then
configure=$tstamp
fi
source "$custom" "$version" "$vmajor" "$vminor" "$vpatch"
fi
done
| true |
5e71e4ba9146c81f0634289dbe1aa858ec65f4a2 | Shell | demisto/content | /Tests/scripts/pylint.sh | UTF-8 | 796 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
pylint_disabled_errors=C0103,C0114,C0115,C0116,C0122,C0301,C0302,C0325,C0411,C0412,C0413,C0415,E1136,E1205,F0001,F0010,R0201,R0205,R0401,R0801,R0902,R0903,R0904,R0912,R0913,R0914,R0915,R1702,R1705,R1710,R1721,R1725,W0105,W0150,W0212,W0401,W0404,W0511,W0603,W0612,W0613,W0621,W0622,W0703,W1202,W1203
echo "Starting pylint run"
for dir in $*; do
pylint_out=$(python3 -m pylint --disable=$pylint_disabled_errors 2>&1 $dir/*.py)
if [[ $? -ne 0 ]]; then
echo -e "$pylint_out" | sort | uniq | grep ": [A-Z][0-9]*: "
if [[ $? -eq 0 ]]; then
errors=1 # some errors founded by grep
fi
fi
done
if [[ $errors -ne 0 ]]; then
echo "*** Finished pylint run, please fix the above errors ***"
exit 1
fi
echo "Finished pylint run - no errors were found"
| true |
25bf0c1694e48828d866a5c85348f4b24660d77c | Shell | timlinux/docker-projects | /run_production_site.sh | UTF-8 | 1,166 | 3.328125 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash
echo "Run this as sudo!"
# Run various inter-related docker containers for a production site.
# If you have problems connecting to the postgis container, try doing
# sudo rm -rf /var/docker-data/postgres-data/
# being aware obviously that it will DESTROY ALL YOUR DATA
# Then run the commands below
# Start postgis container, ensuring a postgres user 'qgis' with pass 'qgis' exists
docker run -name="postgis" -expose 5432 -p 54322:5432 -e USERNAME=qgis -e PASS=qgis -d -v /var/docker-data/postgres-data:/var/lib/postgresql -t timlinux/postgis:2.1 /start.sh
# Start tilemill, linked to the postgis site so it can access it
# using details from env-vars $PG_PORT_5432_TCP_ADDR (for the ip address)
# and $PG_PORT_5432_TCP_PORT (for the port number).
# Note that you will only see these env vars for PORT and ADDR in the initial shell,
# (e.g. docker run -t -i -name app ubuntu /bin/bash)
# but they dont show up in env when you ssh into the container
docker run -t -d -name="tilemill" -link postgis:pg -p 2222:22 -v /home/gisdata:/home/gisdata -v /home/timlinux/Documents/MapBox:/Documents/MapBox linfiniti/tilemill supervisord -n
docker ps -a
| true |
9613ccc035d558ceca7af9c0f61c208edab8a186 | Shell | ixc/docker | /bin/migrate.sh | UTF-8 | 750 | 3.71875 | 4 | [] | no_license | #!/bin/bash
cat <<EOF
#
# `whoami`@`hostname`:$PWD$ migrate.sh $@
#
# Apply Django migrations, if they are out of date. Chainable.
#
EOF
set -e
cd "$PROJECT_DIR"
# For Django 1.6 and below, we can't tell if apps without migrations need to be
# synced, so we always run the 'syncdb' management command.
if [[ "$(django-admin.py --version)" < 1.7 ]]; then
python manage.py syncdb
fi
touch var/migrate.md5
python manage.py migrate --list > var/migrate.txt
if md5sum -c --status var/migrate.md5; then
echo '# Migrations are already up to date. Skip.'
else
echo '# Migrations are out of date. Apply.'
python manage.py migrate --noinput
python manage.py migrate --list > var/migrate.txt
md5sum var/migrate.txt > var/migrate.md5
fi
exec "$@"
| true |
cc9638e3614a7c9de4ba9d02891f9119bd4cccba | Shell | mindspore-ai/mindspore | /mindspore/lite/test/st/scripts/run_benchmark_server_inference_tensorrt_cloud.sh | UTF-8 | 10,956 | 3.265625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MPL-1.0",
"OpenSSL",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause-Open-MPI",
"MIT",
"MPL-2.0-no-copyleft-exception",
"NTP",
"BSD-3-Clause",
"GPL-1.0-or-later",
"0BSD",
"MPL-2.0",
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"Libpng",
"MPL-1.1",
"IJG",
"GPL-2.0-only",
"BSL-1.0",
"Zlib",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-python-cwi",
"BSD-2-Clause",
"LicenseRef-scancode-gary-s-brown",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-mit-nagy",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | #!/bin/bash
source ./scripts/base_functions.sh
source ./scripts/run_benchmark_python.sh
# Run converter on x86 platform:
function Run_Converter() {
cd ${x86_path} || exit 1
tar -zxf mindspore-lite-${version}-linux-x64.tar.gz || exit 1
cd ${x86_path}/mindspore-lite-${version}-linux-x64/ || exit 1
cp tools/converter/converter/converter_lite ./ || exit 1
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:./tools/converter/lib/:./tools/converter/third_party/glog/lib
rm -rf ${ms_models_path}
mkdir -p ${ms_models_path}
echo ${models_server_inference_cfg_file_list[*]}
# Convert models:
# $1:cfgFileList; $2:inModelPath; $3:outModelPath; $4:logFile; $5:resultFile;
Convert "${models_server_inference_cfg_file_list[*]}" $models_path $ms_models_path $run_converter_log_file $run_converter_result_file $run_fail_not_return
convert_status=$?
if [[ convert_status -ne 0 ]]; then
echo "run server inference convert failed."
exit 1
fi
}
function Run_TensorRT() {
source /etc/profile.tensorrt8.5.1
# cd ${tensorrt_path} || exit 1
# tar -zxf ${x86_path}/cloud_fusion/mindspore-lite-${version}-linux-x64.tar.gz || exit 1
# tar -zxf mindspore-lite-${version}-linux-x64.tar.gz || exit 1
# cd ${tensorrt_path}/mindspore-lite-${version}-linux-x64/ || exit 1
echo 'cd '${x86_path}'/mindspore-lite-'${version}'-linux-*'
cd ${x86_path}/mindspore-lite-${version}-linux-*/ || exit 1
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/lib:./tools/converter/lib/
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/glog
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./runtime/third_party/dnnl
cp tools/benchmark/benchmark ./ || exit 1
local line_info model_info spec_acc_limit model_name input_num input_shapes \
mode model_file input_files output_file data_path acc_limit enableFp16 \
run_result
# Prepare the config file list
for cfg_file in ${models_server_inference_cfg_file_list[*]}; do
cfg_file_name=${cfg_file##*/}
while read line; do
line_info=${line}
if [[ $line_info == \#* || $line_info == "" ]]; then
continue
fi
# model_info accuracy_limit run_mode
model_info=`echo ${line_info} | awk -F ' ' '{print $1}'`
accuracy_info=`echo ${line_info} | awk -F ' ' '{print $2}'`
spec_acc_limit=`echo ${accuracy_info} | awk -F ';' '{print $1}'`
# model_info detail
model_name=`echo ${model_info} | awk -F ';' '{print $1}'`
input_info=`echo ${model_info} | awk -F ';' '{print $2}'`
input_shapes=`echo ${model_info} | awk -F ';' '{print $3}'`
mode=`echo ${model_info} | awk -F ';' '{print $5}'`
input_num=`echo ${input_info} | sed 's/:/;/' | awk -F ';' '{print $1}'`
if [[ ${model_name##*.} == "caffemodel" ]]; then
model_name=${model_name%.*}
elif [[ ${cfg_file_name} =~ "_posttraining" ]]; then
model_name=${model_name}"_posttraining"
fi
# converter for distribution models
if [[ ${spec_acc_limit} == "CONVERTER" ]]; then
echo "Skip ${model_name} ......"
continue
fi
use_parallel_predict="false"
if [[ ${mode} =~ "parallel_predict" ]]; then
use_parallel_predict="true"
fi
echo "Benchmarking ${model_name} ......"
model_file=${ms_models_path}'/'${model_name}'.mindir'
input_files=""
output_file=""
data_path=${models_path}'/input_output/'
if [[ ${input_num} == "" || ${input_num} == 1 ]]; then
input_files=${data_path}'input/'${model_name}'.bin'
else
for i in $(seq 1 $input_num)
do
input_files=${input_files}${data_path}'input/'${model_name}'.bin_'$i','
done
fi
output_file=${data_path}'output/'${model_name}'.out'
# set accuracy limitation
acc_limit="0.5"
if [[ ${spec_acc_limit} != "" ]]; then
acc_limit="${spec_acc_limit}"
elif [[ ${mode} == "fp16" ]]; then
acc_limit="5"
fi
# whether enable fp16
enableFp16="false"
if [[ ${mode} == "fp16" ]]; then
enableFp16="true"
fi
echo 'CUDA_VISIBLE_DEVICES='${cuda_device_id}' ./benchmark --enableParallelPredict='${use_parallel_predict}' --modelFile='${model_file}' --inputShapes='${input_shapes}' --inDataFile='${input_files}' --benchmarkDataFile='${output_file}' --enableFp16='${enableFp16}' --accuracyThreshold='${acc_limit}' --device=GPU'
CUDA_VISIBLE_DEVICES=${cuda_device_id} ./benchmark --enableParallelPredict=${use_parallel_predict} --modelFile=${model_file} --inputShapes=${input_shapes} --inDataFile=${input_files} --benchmarkDataFile=${output_file} --enableFp16=${enableFp16} --accuracyThreshold=${acc_limit} --device=GPU
if [ $? = 0 ]; then
if [[ ${mode} =~ "parallel_predict" ]]; then
run_result='TensorRT: '${model_name}' parallel_pass'; echo ${run_result} >> ${run_benchmark_result_file}
else
run_result='TensorRT: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file}
fi
else
if [[ ${mode} =~ "parallel_predict" ]]; then
run_result='TensorRT: '${model_name}' parallel_failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
else
run_result='TensorRT: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1
fi
fi
done < ${cfg_file}
done
}
# Print start msg before run testcase
function MS_PRINT_TESTCASE_START_MSG() {
echo ""
echo -e "-------------------------------------------------------------------------------------------------------------------------"
echo -e "env Testcase Result "
echo -e "--- -------- ------ "
}
# Print start msg after run testcase
function MS_PRINT_TESTCASE_END_MSG() {
echo -e "-------------------------------------------------------------------------------------------------------------------------"
}
function Print_Benchmark_Result() {
MS_PRINT_TESTCASE_START_MSG
while read line; do
arr=("${line}")
printf "%-20s %-90s %-7s\n" ${arr[0]} ${arr[1]} ${arr[2]}
done < $1
MS_PRINT_TESTCASE_END_MSG
}
# Example:sh run_benchmark_gpu.sh -r /home/temp_test -m /home/temp_test/models -d "8KE5T19620002408" -e arm_cpu
while getopts "r:m:d:e:l:" opt; do
case ${opt} in
r)
release_path=${OPTARG}
echo "release_path is ${OPTARG}"
;;
m)
models_path=${OPTARG}
echo "models_path is ${OPTARG}"
;;
d)
device_ip=`echo ${OPTARG} | cut -d \: -f 1`
cuda_device_id=`echo ${OPTARG} | cut -d \: -f 2`
echo "device_ip is ${device_ip}, cuda_device_id is ${cuda_device_id}."
;;
e)
backend=${OPTARG}
echo "backend is ${backend}"
;;
l)
level=${OPTARG}
echo "level is ${OPTARG}"
;;
?)
echo "unknown para"
exit 1;;
esac
done
run_fail_not_return="OFF"
basepath=$(pwd)
echo "NVIDIA TensorRT, basepath is ${basepath}"
x86_path=${release_path}/centos_x86/cloud_fusion # ../release_pkg/lite
#tensorrt_path=${x86_path}/server/tensorrt/cuda-11.1]
cd ${x86_path}
file_name=$(ls *-linux-*.tar.gz)
if [[ $backend == "all" || $backend == "server_inference_x86_cloud_gpu" ]]; then
cd ${x86_path} || exit 1
file_name=$(ls *-linux-*.tar.gz)
fi
IFS="-" read -r -a file_name_array <<< "$file_name"
version=${file_name_array[2]}
cd -
# cd ${basepath}
# rm -rf ./*
# Set models config filepath
config_folder="config_level0"
if [[ ${level} == "level1" ]]; then
config_folder="config_level1"
fi
# cp ${basepath}/../${config_folder}/models_server_inference_tensorrt_cloud.cfg ./
models_server_inference_config=${basepath}/../${config_folder}/models_server_inference_tensorrt_cloud.cfg
ms_models_path=${basepath}/ms_models
# Write converter result to temp file
run_converter_log_file=${basepath}/run_converter_log.txt
echo ' ' > ${run_converter_log_file}
run_converter_result_file=${basepath}/run_converter_result.txt
echo ' ' > ${run_converter_result_file}
models_server_inference_cfg_file_list=()
models_server_inference_cfg_file_list=("$models_server_inference_config")
# Run converter
echo "start Run converter ..."
Run_Converter
Run_converter_status=$?
# Check converter result and return value
Print_Converter_Result $run_converter_result_file
if [[ ${Run_converter_status} = 0 ]];then
echo "Run converter success"
else
echo "Run converter failed"
cat ${run_converter_log_file}
exit 1
fi
# Empty config file is allowed, but warning message will be shown
if [[ $(Exist_File_In_Path ${ms_models_path} ".mindir") != "true" ]]; then
echo "No mindir model found in ${ms_models_path}, please check if config file is empty!"
exit 1
fi
# Write benchmark result to temp file
run_benchmark_result_file=${basepath}/run_benchmark_result.txt
echo ' ' > ${run_benchmark_result_file}
# Copy the MindSpore models:
echo "Push files and run benchmark"
benchmark_test_path=${basepath}/benchmark_test
rm -rf ${benchmark_test_path}
mkdir -p ${benchmark_test_path}
cp -a ${ms_models_path}/*.mindir ${benchmark_test_path} || exit 1
backend=${backend:-"all"}
isFailed=0
if [[ $backend == "all" || $backend == "server_inference_x86_cloud_gpu" ]]; then
echo "start Run ..."
Run_TensorRT
Run_x86_status=$?
fi
if [[ $backend == "all" || $backend == "server_inference_x86_cloud_gpu" ]]; then
if [[ ${Run_x86_status} != 0 ]];then
echo "run x86 server inference failed"
isFailed=1
fi
fi
Print_Benchmark_Result ${run_benchmark_result_file}
# run python ST
if [[ $backend == "all" || $backend == "server_inference_x86_cloud_gpu" ]]; then
models_python_config=${basepath}/../config_level0/models_python_gpu.cfg
models_python_cfg_file_list=("$models_python_config")
Run_python_ST ${basepath} ${x86_path} ${ms_models_path} ${models_path} "${models_python_cfg_file_list[*]}" "GPU"
Run_python_status=$?
if [[ ${Run_python_status} != 0 ]];then
echo "Run_python_status failed"
isFailed=1
fi
fi
echo "run x86_gpu_server_inference is ended"
exit ${isFailed}
| true |
b74f5ed2b0f27de9a089ce6cf5818b4f3fb19072 | Shell | mdibl/package_downloads | /gitExportRepos.sh | UTF-8 | 2,517 | 4.5625 | 5 | [] | no_license | #!/bin/sh
#This script exports tags from public git repositories
#
# Assumption: None
#
# Input:
# 1) Owner/Organization name
# 2) Repository name
# 3) Tag
# 4) Install path
#
# What it does:
# 1) Set path to git tag
# 2) wget repos tag
# 3) Create local directory for the new tag
# 4) Untar new tag tar
# 5) Remove the downloaded tar file
#
# Author: lnh
# Date : 8/1/2017
# Modification Date : February/2018
#
WGET=`which wget`
TAR=`which tar`
#setup the log file
SCRIPT_NAME=`basename $0`
TOP=`dirname $0`
WORKING_DIR=`realpath $TOP`
#Check the number of arguments
if [ $# -lt 4 ]
then
echo ""
echo "***********************************************"
echo "Bad usage ---"
echo "Usage: ./$SCRIPT_NAME ORGANIZATION/OWNER REPO_NAME GIT_TAG INSTALL_DIR"
echo "Example1: ./$SCRIPT_NAME mdibl data_downloads v1.1.0 /usr/local/biocore"
echo "Example1: ./$SCRIPT_NAME mdibl biocore_misc master /usr/local/biocore/admin"
echo ""
echo "***********************************************"
echo ""
exit 1
fi
##
ORG=$1
REPO=$2
TAG=$3
INSTALL_DIR=$4
#Url to private repository
GIT_URL=https://api.github.com/repos/$ORG/$REPO/tarball/$TAG
#Local tag directory
tag_base=`basename $TAG`
TAG_DIR=$REPO-$tag_base
LOG_DIR=$WORKING_DIR/logs
if [ ! -d $LOG_DIR ]
then
mkdir $LOG_DIR
fi
LOG=$LOG_DIR/$SCRIPT_NAME.$TAG_DIR.log
rm -f $LOG
touch $LOG
#Results tar file
TAG_TAR_FILE=$TAG_DIR.tar.gz
date | tee -a $LOG
echo "The path to logs is $LOG_DIR" | tee -a $LOG
echo "wget path: $WGET" | tee -a $LOG
echo "tar path: $TAR"| tee -a $LOG
echo "Tag: $TAG"| tee -a $LOG
echo "Repository: $REPO"| tee -a $LOG
echo "Organization: $ORG"| tee -a $LOG
echo "Git url: $GIT_URL"| tee -a $LOG
echo "This product will be installed under: $INSTALL_DIR" | tee -a $LOG
date | tee -a $LOG
cd $INSTALL_DIR
#clean previous download of this tag
if [ -d $TAG_DIR ]
then
echo "The tag $TAG_DIR is already installed under $INSTALL_DIR"
if [ -d $TAG_DIR.old ]
then
rm -rf $TAG_DIR.old
fi
mv $TAG_DIR $TAG_DIR.old
fi
#execute the command
echo Cammand: $WGET -O $TAG_TAR_FILE "$GIT_URL" | tee -a $LOG
$WGET -O $TAG_TAR_FILE "$GIT_URL" 2>&1 | tee -a $LOG
#Create local directory for this tag
mkdir $TAG_DIR
#Untar the new archive
echo "Untar $TAG_TAR_FILE" | tee -a $LOG
echo "Command: $TAR -xzvf $TAG_TAR_FILE -C $TAG_DIR --strip-components 1"
$TAR -xzvf $TAG_TAR_FILE -C $TAG_DIR --strip-components 1
#Remove the tar file
rm -f $TAG_TAR_FILE
date
echo "Program complete"
exit 0
| true |
d2caa2b35f1153928af1b82a73110c32753b75f9 | Shell | acidanthera/OpenCorePkg | /Staging/EnableGop/vBiosInsert.sh | UTF-8 | 8,862 | 4 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# Copyright © 2023 Mike Beaton. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Insert EFI into AMD or Nvidia VBIOS.
# Tested back to Mac OS X 10.11 El Capitan.
#
usage() {
echo "Usage: ./${SELFNAME} [args] {rom-file} {efi-file} {out-file}"
echo "Args:"
echo " -a : AMD"
echo " -n : Nvidia"
echo " -o {GOP offset} : GOP offset (auto-detected if Homebrew grep is installed)"
echo " Can specify 0x{hex} or {decimal}"
echo " -t {temp dir} : Specify temporary directory, and keep temp files"
echo "Examples:"
echo " ./${SELFNAME} -n -o 0xFC00 nv.rom EnableGop.efi nv_mod.rom"
echo " ./${SELFNAME} -n nv.rom EnableGop.efi nv_mod.rom"
echo " ./${SELFNAME} -a amd.rom EnableGop.efi amd_mod.rom"
echo ""
}
SELFNAME="$(/usr/bin/basename "${0}")"
commands=(
"EfiRom"
"UEFIRomExtract"
"hexdump"
"grep"
)
FOUND=1
for command in "${commands[@]}"; do
if ! command -v "$command" 1>/dev/null ; then
echo "${command} not available!"
FOUND=0
fi
done
if [ "$FOUND" -eq 0 ] ; then
exit 1
fi
AMD=0
AMD_SAFE_SIZE="0x20000"
GOP_OFFSET="-"
NVIDIA=0
POS=0
while true; do
if [ "$1" = "-a" ] ; then
AMD=1
NVIDIA=0
shift
elif [ "$1" = "-n" ] ; then
AMD=0
NVIDIA=1
shift
elif [ "$1" = "-o" ] ; then
shift
if [ "$1" != "" ] && ! [ "${1:0:1}" = "-" ] ; then
GOP_OFFSET=$1
shift
else
echo "No GOP offset specified" && exit 1
fi
elif [ "$1" = "-s" ] ; then # semi-secret option to modify AMD safe size
shift
if [ "$1" != "" ] && ! [ "${1:0:1}" = "-" ] ; then
AMD_SAFE_SIZE=$1
shift
else
echo "No AMD safe size specified" && exit 1
fi
elif [ "$1" = "-t" ] ; then
shift
if [ "$1" != "" ] && ! [ "${1:0:1}" = "-" ] ; then
TEMP_DIR=$1
shift
else
echo "No temp dir specified" && exit 1
fi
elif [ "${1:0:1}" = "-" ] ; then
echo "Unknown option: ${1}" && exit 1
elif [ "$1" != "" ] ; then
case "$POS" in
0 )
ROM_FILE="$1"
;;
1 )
EFI_FILE="$1"
;;
2 )
OUT_FILE="$1"
;;
* )
echo "Too many filenames specified" && exit 1
;;
esac
POS=$(($POS+1))
shift
else
break
fi
done
if [ "$ROM_FILE" = "" ] ||
[ "$EFI_FILE" = "" ] ||
[ "$OUT_FILE" = "" ] ; then
usage
exit 0
fi
if [ "$AMD" -eq 0 ] && [ "$NVIDIA" -eq 0 ] ; then
echo "Must specify -a or -n" && exit 1
fi
if [ "$TEMP_DIR" != "" ] ; then
mkdir -p "$TEMP_DIR" || exit 1
tmpdir="$TEMP_DIR"
else
# https://unix.stackexchange.com/a/84980/340732
tmpdir=$(mktemp -d 2>/dev/null || mktemp -d -t 'vbios') || exit 1
fi
ORIGINAL_SIZE=$(stat -f%z "$ROM_FILE") || exit 1
if [ "$AMD" -eq 1 ] ; then
if [ "$ORIGINAL_SIZE" -lt "$((AMD_SAFE_SIZE))" ] ; then
echo " - File size of ${ORIGINAL_SIZE} bytes must be at least safe size of $((AMD_SAFE_SIZE)) bytes; use -s or check file" && exit 1
fi
dd bs=1 if="$ROM_FILE" of="$tmpdir/modify_part.rom" count=$(($AMD_SAFE_SIZE)) 2>/dev/null || exit 1
dd bs=1 if="$ROM_FILE" of="$tmpdir/keep_part.rom" skip=$(($AMD_SAFE_SIZE)) 2>/dev/null || exit 1
else
cp "$ROM_FILE" "$tmpdir/modify_part.rom" || exit 1
echo -n > "$tmpdir/keep_part.rom" || exit 1
fi
if [ "$GOP_OFFSET" = "-" ] ; then
echo "Auto-detecting GOP offset..."
# nicer techniques which do not assume nice alignment of what is being searched for do not work on older Mac OS X
OUTPUT=$(hexdump -C "$tmpdir/modify_part.rom" | grep '55 aa .. .. f1 0e 00 00' | head -1)
# Make macOS bash to split as expected:
# shellcheck disable=SC2206
GOP_ARRAY=($OUTPUT)
GOP_OFFSET=${GOP_ARRAY[0]}
if [ "$GOP_OFFSET" != "" ] ; then
GOP_OFFSET="0x${GOP_OFFSET}"
GOP_OFFSET=$(($GOP_OFFSET))
else
GOP_OFFSET=-1
fi
if [ "$GOP_OFFSET" -eq -1 ] ; then
echo " - No GOP found in ROM!" && exit 1
fi
fi
dd bs=1 if="$tmpdir/modify_part.rom" of="$tmpdir/original_first_part.rom" count=$(($GOP_OFFSET)) 2>/dev/null || exit 1
dd bs=1 if="$tmpdir/modify_part.rom" of="$tmpdir/original_last_part.rom" skip=$(($GOP_OFFSET)) 2>/dev/null || exit 1
echo "Compressing EFI using EfiRom..."
if [ "$AMD" -eq 1 ] ; then
EfiRom -o "$tmpdir/insert.rom" -ec "$EFI_FILE" -f 0xAAAA -i 0xBBBB -l 0x30000 -p || exit 1
else
EfiRom -o "$tmpdir/insert.rom" -ec "$EFI_FILE" -f 0xAAAA -i 0xBBBB -l 0x30000 || exit 1
fi
if [ "$NVIDIA" -eq 1 ] ; then
echo "Adding Nvidia header..."
dd bs=1 if="$tmpdir/insert.rom" of="$tmpdir/insert_first_part" count=$((0x38)) 2>/dev/null || exit 1
dd bs=1 if="$tmpdir/insert.rom" of="$tmpdir/insert_last_part" skip=$((0x38)) 2>/dev/null || exit 1
# TODO: truncation logic must be fixed for when there is not enough spare padding in output of EfiRom
INSERT_SIZE=$(stat -f%z "$tmpdir/insert.rom") || exit 1
# add NPDE from original GOP
dd bs=1 if="$tmpdir/original_last_part.rom" of="$tmpdir/insert_first_part" skip=$((0x38)) seek=$((0x38)) count=$((0x18)) 2>/dev/null || exit 1
cat "$tmpdir/insert_first_part" "$tmpdir/insert_last_part" > "$tmpdir/insert_oversize.rom" || exit 1
# Note: `truncate` command is not present by default on macOS
dd bs=1 if="$tmpdir/insert_oversize.rom" of="$tmpdir/insert_fixed.rom" count="$INSERT_SIZE" 2>/dev/null || exit 1
# patch size in NPDE
dd bs=1 if="$tmpdir/insert.rom" of="$tmpdir/insert_fixed.rom" skip=$((0x2)) seek=$((0x48)) count=1 conv=notrunc 2>/dev/null || exit 1
else
cp "$tmpdir/insert.rom" "$tmpdir/insert_fixed.rom" || exit 1
fi
# patch with vendor and device id from original GOP
dd bs=1 if="$tmpdir/original_last_part.rom" of="$tmpdir/insert_fixed.rom" skip=$((0x20)) seek=$((0x20)) count=4 conv=notrunc 2>/dev/null || exit 1
if [ "$NVIDIA" -eq 1 ] ; then
# patch size in PCIR
dd bs=1 if="$tmpdir/original_last_part.rom" of="$tmpdir/insert_fixed.rom" skip=$((0x16)) seek=$((0x16)) count=1 conv=notrunc 2>/dev/null || exit 1
# patch end marker in NPDE in fixed ROM (leave PCIR correct and EFI extractable from fixed ROM)
echo -n -e '\x00' | dd bs=1 of="$tmpdir/insert_fixed.rom" seek=$((0x4A)) conv=notrunc 2>/dev/null || exit 1
fi
echo "Combining..."
cat "$tmpdir/original_first_part.rom" "$tmpdir/insert_fixed.rom" "$tmpdir/original_last_part.rom" > "$tmpdir/combined.rom" || exit 1
TRUNCATE=0
if [ "$AMD" -eq 1 ] ; then
TRUNCATE=1
TRUNCATE_SIZE="$AMD_SAFE_SIZE"
else
printf '%x' "$ORIGINAL_SIZE" | grep -q "000$" && TRUNCATE=1
if [ "$TRUNCATE" -eq 1 ] ; then
echo "Detected standard ROM size, truncating to original size..."
TRUNCATE_SIZE="$ORIGINAL_SIZE"
fi
fi
if [ "$TRUNCATE" -eq 1 ] ; then
dd bs=1 if="$tmpdir/combined.rom" of="$tmpdir/truncated.rom" count="$TRUNCATE_SIZE" 2>/dev/null || exit 1
COUNT=$(hexdump -v -e '1/8 " %016X\n"' "$tmpdir/truncated.rom" | tail -n 8 | grep "FFFFFFFFFFFFFFFF" | wc -l)
if [ "$COUNT" -ne 8 ] ; then
# Some Nvidia ROMs, at least, incorrectly have 00000000 padding after active contents
# (it is incorrect, since writing only active contents using nvflash resets the rest to ffffffff).
# May also be relevant if we ever have any truly 00000000 default ROM images.
COUNT=$(hexdump -v -e '1/8 " %016X\n"' "$tmpdir/truncated.rom" | tail -n 8 | grep "0000000000000000" | wc -l)
fi
if [ "$COUNT" -ne 8 ] ; then
echo " - Not enough space within $((TRUNCATE_SIZE / 1024))k limit - aborting!" && exit 1
fi
cat "$tmpdir/truncated.rom" "$tmpdir/keep_part.rom" > "$OUT_FILE" || exit 1
else
cp "$tmpdir/combined.rom" "$OUT_FILE" || exit 1
fi
# patch end marker in PCIR in out file
echo -n -e '\x00' | dd bs=1 of="$OUT_FILE" seek=$(($GOP_OFFSET + 0x31)) conv=notrunc 2>/dev/null || exit 1
printf "Verifying (starting at 0x%X)...\n" "$GOP_OFFSET"
dd bs=1 if="$OUT_FILE" of="$tmpdir/out_efi_part.rom" skip=$(($GOP_OFFSET)) 2>/dev/null || exit 1
# UEFIRomExtract error messages are on stdout, so we cannot suppress unwanted normal output here
UEFIRomExtract "$tmpdir/out_efi_part.rom" "$tmpdir/extracted.efi" || exit 1
ERROR=0
diff "$tmpdir/extracted.efi" "$EFI_FILE" 1>/dev/null || ERROR=1
if [ "$ERROR" -ne 0 ] ; then
echo " - Failure comparing extracted EFI to original!"
fi
OLD_EFI_COUNT=$(EfiRom -d "$tmpdir/original_last_part.rom" | grep "0x0EF1" | wc -l) || exit 1
OLD_EFI_COUNT=$(($OLD_EFI_COUNT)) || exit 1
NEW_EFI_COUNT=$(EfiRom -d "$tmpdir/out_efi_part.rom" | grep "0x0EF1" | wc -l) || exit 1
NEW_EFI_COUNT=$(($NEW_EFI_COUNT)) || exit 1
if [ "$NEW_EFI_COUNT" -ne $(($OLD_EFI_COUNT + 1)) ] ; then
echo " - ${OLD_EFI_COUNT} EFI parts in original ROM, and detected ${NEW_EFI_COUNT} EFI parts in modified ROM, expected $(($OLD_EFI_COUNT + 1))!"
ERROR=1
fi
if [ "$ERROR" -eq 0 ] ; then
echo "SUCCESS."
else
echo "*** WARNING - FAIL ***"
fi
if [ "$TEMP_DIR" = "" ] ; then
rm -rf "$tmpdir" || exit 1
fi
echo "Done."
| true |
c744d98eb826016fa90f6678de1f960aa7adef89 | Shell | BianXinda/xinda_scripts | /oneClickPPTP.sh | UTF-8 | 1,273 | 2.75 | 3 | [] | no_license | #!/bin/bash
sudo sed -i "s/#precedence ::ffff:0:0\/96 100/precedence ::ffff:0:0\/96i 100/g" /etc/gai.conf
echo ---sudo apt-get update---
sudo apt-get update
echo ---sudo apt-get upgrade---
sudo apt-get upgrade
echo ---sudo apt-get install pptpd---
sudo apt-get install pptpd
cat >> /etc/pptpd.conf <<EOF
localip 192.168.0.1
remoteip 192.168.0.100-200
EOF
cat >> /etc/ppp/pptpd-options <<EOF
ms-dns 8.8.8.8
ms-dns 8.8.4.4
EOF
user1="test"
password1="123456"
sudo echo "$user1 pptpd $password1 *" >> /etc/ppp/chap-secrets
sudo /etc/init.d/pptpd restart
# IPv4 forwarding
sudo sed -i "s/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g" /etc/sysctl.conf
sudo sed -i "s/net.ipv4.ip_forward=0/net.ipv4.ip_forward=1/g" /etc/sysctl.conf
sudo sysctl -p
# Use 192.168.0 for its PPTP subnet. The second rule adjusts the MTU size
sudo iptables -t nat -A POSTROUTING -s 192.168.0.0/24 -o eth0 -j MASQUERADE
sudo iptables -A FORWARD -p tcp --syn -s 192.168.0.0/24 -j TCPMSS --set-mss 1356
sudo sed "s/exit 0//g" /etc/rc.local
cat >> /etc/rc.local <<EOF
iptables -t nat -A POSTROUTING -s 192.168.0.0/24 -o eth0 -j MASQUERADE
iptables -A FORWARD -p tcp --syn -s 192.168.0.0/24 -j TCPMSS --set-mss 1356
/etc/init.d/pptpd restart
EOF
#reboot to enable the setting
| true |
2e0b929d7242aa7a3f4bbe2726678de67a27b99f | Shell | cleberar/Development-Tools | /devel-install-centos6.sh | UTF-8 | 3,511 | 3.484375 | 3 | [] | no_license | #!/bin/bash
##
## Instalacao Ambiente de Desenvolvimento
##
# nome do ambiente
environment=$1
if [ $(whoami) != "root" ]; then
echo "Tem que executar como root."
exit 2
fi
##
## Configuracoes Basicas de Desenvolvimento
##
ConfigureDevel()
{
# desativa selinux
[ -f /etc/selinux/config ] && sed -i.backup -e 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config
# configura editor svn
if ! grep -q "SVN_EDITOR" /etc/bashrc ; then
if [ -x /usr/bin/vim ] ; then
printf 'export SVN_EDITOR=/usr/bin/vim\n'>> /etc/bashrc
else
printf 'export SVN_EDITOR=/usr/bin/vi\n'>> /etc/bashrc
fi
fi
# Melhorando visual bash
cat << SETVAR >> /etc/bashrc
if [ "$(id -u)" != "0" ] ; then
PS1="\n(\e[31;1m\u\e[m - \w @\e[32;1m\t\e[m - Devel :: $environment) \n\H: "
else
PS1="\n(\e[34;1m\u\e[m - \w @\e[32;1m\t\e[m - Devel :: $environment) \n\H: "
fi
SETVAR
}
##
## Instala os pacotes necessarios para um ambiente de desenvolvimento
##
InstallPackages() {
printf "\n Instalando Pacotes "
printf "\n------------------------------------------------------------\n"
# instando EPEL
wget "http://download.fedoraproject.org/pub/epel/6/i386/epel-release-6-7.noarch.rpm" -qO "/tmp/epel.rpm"
rpm -ivh /tmp/epel.rpm
# instalando Pacotes necessarios para desenvolvimento
yumList=("php-pear curl gettext make man man-pages python-crypto python-hashlib
python-nose python-simplejson python-twisted python-uuid rpm-build"
"selinux-policy selinux-policy-targeted subversion sudo syslinux"
"vim-enhanced wget yum-changelog yum-security yum-utils screen"
"automake make rpm rpm-build rpm-devel curl-devel openssl-devel mysql"
"mysql-server httpd gettext php php-devel php-mbstring php-mysql autoconf "
"php-pdo php-xml php-gd php-pear php-pear-PHP-CodeSniffer php-pear-PHPUnit"
"mod_ssl python python-twisted python-simplejson python-pycurl python-hashlib");
yum install $(printf "%s" "${yumList[@]}") -y
# Pacotes de Ferramentas de Desenvolvedor
yum groupinstall -y 'Development Tools'
}
##
## Node nao e querido, asm valido ter
##
InstallNode() {
printf " INSTALADO Node JS, aguarde isto vai demorar ... \n\n";
wget "http://nodejs.org/dist/v0.8.1/node-v0.8.1.tar.gz" -O "node-v0.8.1.tar.gz"
tar -vzxf node-v0.8.1.tar.gz
cd node-v0.8.1; ./configure; make; make install
rm -rf node-v0.8.1; rm-rf node-v0.8.1.tar.gz
}
##
## Instalando Softwares para Integracao continua em PHP
##
InstallICPHP() {
# instalando Code Sniffer
pear channel-update pear.php.net
pear install PHP_CodeSniffer-1.4.1
# instalando PHP Documentor
pear channel-discover pear.phpdoc.org
pear install phpdoc/phpDocumentor-alpha
# PHP Unit
pear channel-discover pear.phpunit.de
pear channel-discover components.ez.no
pear channel-discover pear.symfony-project.com
pear channel-update pear.phpunit.de
pear install pear.phpunit.de/PHPUnit
pear install pear.phpunit.de/phpcpd
pear install pear.phpunit.de/phploc
pear install pear.phpunit.de/PHP_CodeCoverage
# php depend
pear channel-discover pear.pdepend.org
pear channel-update pear.pdepend.org
pear install pdepend/PHP_Depend-beta
}
# executando lista de acoes
# executa Instalacao de Pacotes
InstallPackages;
# Configurando ambiente de Desenvolvimento
ConfigureDevel;
# integracao continua
InstallICPHP;
InstallNode; | true |
223ee63ed7211da58e50c98c60496927c11bc491 | Shell | ayushi-s/DCC | /utils/download_tools.sh | UTF-8 | 364 | 2.859375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
# change to directory $DIR where this script is stored
pushd .
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd $DIR
git clone https://github.com/pdollar/coco.git
cd coco/PythonAPI
make
echo "Finished downloading coco tools."
cd ..
git clone https://github.com/tylin/coco-caption.git
echo "Finished downloading caption eval tools"
| true |
f8fa01222bcae138c98c148328d57a3a9261c1fa | Shell | zweifisch/ppa-collection | /README.md | UTF-8 | 8,155 | 2.765625 | 3 | [] | no_license | #!/bin/bash
## install
install(){
curl https://raw.githubusercontent.com/zweifisch/ppa-collection/master/README.md \
| sudo tee /usr/bin/ppa-collection
sudo chmod +x /usr/bin/ppa-collection
}
## packages
### cmake
cmake() {
sudo add-apt-repository ppa:george-edison55/cmake-3.x
sudo apt-get update
sudo apt-get install cmake
}
### crystal
crystal(){
curl http://dist.crystal-lang.org/apt/setup.sh | sudo bash
sudo apt-key adv --keyserver keys.gnupg.net --recv-keys 09617FD37CC06B54
echo deb http://dist.crystal-lang.org/apt crystal main \
| sudo tee /etc/apt/sources.list.d/crystal.list
sudo apt-get install crystal
}
### docker
docker(){
[ -e /usr/lib/apt/methods/https ] || {
sudo apt-get update
sudo apt-get install apt-transport-https
}
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
echo deb https://get.docker.com/ubuntu docker main \
| sudo tee /etc/apt/sources.list.d/docker.list
sudo apt-get update
sudo apt-get install lxc-docker
}
### elixir
elixir(){
wget http://packages.erlang-solutions.com/erlang-solutions_1.0_all.deb \
-O /tmp/erlang-solutions.deb && sudo dpkg -i /tmp/erlang-solutions.deb
sudo apt-get update
sudo apt-get install elixir
}
### emacs
emacs(){
sudo apt-add-repository -y ppa:adrozdoff/emacs
sudo apt update
sudo apt install emacs25
}
### erlang
erlang(){
wget http://packages.erlang-solutions.com/erlang-solutions_1.0_all.deb \
-O /tmp/erlang-solutions.deb && sudo dpkg -i /tmp/erlang-solutions.deb
sudo apt-get update
sudo apt-get install erlang
}
### firefox
firefox(){
sudo add-apt-repository ppa:ubuntu-mozilla-daily/firefox-aurora
sudo apt-get update
sudo apt-get install firefox
}
remove-firefox() {
sudo apt-add-repository --remove ppa:ubuntu-mozilla-daily/firefox-aurora
}
### fish
fish() {
sudo apt-add-repository ppa:fish-shell/release-2
sudo apt-get update
sudo apt-get install fish
}
### fsharp
fsharp() {
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF
echo "deb http://download.mono-project.com/repo/debian wheezy main" | sudo tee /etc/apt/sources.list.d/mono-xamarin.list
sudo apt-get update
sudo apt-get install mono-complete fsharp
}
### git
git() {
sudo add-apt-repository ppa:git-core/ppa
sudo apt-get update
sudo apt-get install git
}
### go
go() {
sudo add-apt-repository ppa:gophers/archive
sudo apt-get update
sudo apt-get install golang
}
### java
java(){
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java7-installer
}
### kivy
kivy(){
sudo add-apt-repository ppa:kivy-team/kivy
sudo apt-get update
sudo apt-get install kivy
}
### mongodb
mongodb(){
echo deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen \
| sudo tee /etc/apt/sources.list.d/mongo.list
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv 7F0CEB10
sudo apt-get update
sudo apt-get install mongodb-10gen
}
### neo4j
neo4j(){
echo deb http://debian.neo4j.org/repo stable \
| sudo tee /etc/apt/sources.list.d/neo4j.list
wget -O - http://debian.neo4j.org/neotechnology.gpg.key | sudo apt-key add -
sudo apt-get update
sudo apt-get install neo4j
}
### neovim
neovim(){
sudo apt-add-repository ppa:neovim-ppa/unstable
sudo apt-get update
sudo apt-get install neovim
}
### nginx
nginx(){
sudo apt-add-repository ppa:nginx/stable
sudo apt-get update
sudo apt-get install nginx
}
### nodejs
nodejs(){
sudo apt-add-repository ppa:chris-lea/node.js
sudo apt-get update
sudo apt-get install nodejs
}
### ocaml
ocaml(){
sudo add-apt-repository ppa:avsm/ppa
sudo apt-get update
sudo apt-get install ocaml ocaml-native-compilers camlp4-extra opam
}
### phantomjs
phantomjs(){
sudo add-apt-repository ppa:tanguy-patte/phantomjs
sudo apt-get update
sudo apt-get install phantomjs
}
### php
php(){
sudo add-apt-repository ppa:ondrej/php5
sudo apt-get update
sudo apt-get install php5
}
### postgresql
postgresql(){
wget -qO - https://www.postgresql.org/media/keys/ACCC4CF8.asc \
| sudo apt-key add -
echo deb http://apt.postgresql.org/pub/repos/apt/ trusty-pgdg main \
| sudo tee /etc/apt/sources.list.d/postgresql.list
sudo apt-get update
sudo apt-get install postgresql
}
### rabbitmq
rabbitmq(){
echo deb http://www.rabbitmq.com/debian/ testing main \
| sudo tee /etc/apt/sources.list.d/rabbitmq.list
wget -O - http://www.rabbitmq.com/rabbitmq-signing-key-public.asc \
| sudo apt-key add -
sudo apt-get update
sudo apt-get install rabbitmq-server
}
### redis
redis(){
sudo apt-add-repository ppa:chris-lea/redis-server
sudo apt-get update
sudo apt-get install redis-server
}
### rethinkdb
rethinkdb(){
sudo add-apt-repository ppa:rethinkdb/ppa
sudo apt-get update
sudo apt-get install rethinkdb
}
### riak
riak(){
echo deb http://apt.basho.com precise main \
| sudo tee /etc/apt/sources.list.d/basho.list
wget -O - http://apt.basho.com/gpg/basho.apt.key | sudo apt-key add -
sudo apt-get update
sudo apt-get install riak
}
### elasticsearch
elasticsearch(){
wget -qO - http://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add -
echo deb http://packages.elasticsearch.org/elasticsearch/1.3/debian stable main \
| sudo tee /etc/apt/sources.list.d/elasticsearch.list
sudo apt-get update
sudo apt-get install elasticsearch
}
### tmux
tmux(){
sudo add-apt-repository ppa:pi-rho/dev
sudo apt-get update
sudo apt-get install tmux
}
### racket
racket(){
sudo add-apt-repository ppa:plt/racket
sudo apt-get update
sudo apt-get install racket
}
### rust
rust(){
curl -s https://static.rust-lang.org/rustup.sh | sudo sh
}
### stack
stack(){
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 575159689BEFB442
echo "deb http://download.fpcomplete.com/ubuntu/$(lsb_release -cs) stable main" \
| sudo tee /etc/apt/sources.list.d/fpco.list
sudo apt-get update && sudo apt-get install stack -y
}
### syncthing
syncthing(){
curl -s https://syncthing.net/release-key.txt | sudo apt-key add -
echo deb http://apt.syncthing.net/ syncthing release \
| sudo tee /etc/apt/sources.list.d/syncthing-release.list
sudo apt-get update
sudo apt-get install syncthing
}
### zeal
zeal(){
sudo add-apt-repository ppa:jerzy-kozera/zeal-ppa
sudo apt-get update
sudo apt-get install zeal
}
## update
update(){
curl https://raw.githubusercontent.com/zweifisch/ppa-collection/master/README.md \
| sudo tee /usr/bin/ppa-collection
}
## usage
usage(){
echo "usage:"
echo
echo " $0 <package>"
echo
echo "packages:"
echo
sed -n 's/^###/ /p' $0
echo
echo "update:"
echo
echo " $0 update"
echo
}
## main
if [[ $# = 0 ]]; then
usage
else
$1
fi
# vim: set ft=shell:
| true |
26ce5cffcb22d5721586d2306c33e344468666ec | Shell | willrogers/tracy-3.5 | /gnuplot/dynap_err.sh | UTF-8 | 2,937 | 2.609375 | 3 | [] | no_license | #!/bin/sh
prm1=${1-0}
gnuplot << EOP
ps = $prm1; eps = 0; phys_app = 0;
if (!ps) set terminal x11;
if (ps && !eps) \
set terminal postscript enhanced color solid lw 2 "Times-Roman" 20;
if (ps && eps) \
set terminal postscript eps enhanced color solid lw 2 "Times-Roman" 20;
set grid;
set style line 1 lt 1 lw 1 lc rgb "blue";
set style line 2 lt 1 lw 1 lc rgb "green";
set style line 3 lt 1 lw 1 lc rgb "red";
# draw projection of mechanical aperture
Ax = 17.5; Ay = 12.5;
beta_max_y = 25.5; beta_inj_y = 3.1;
if (phys_app) \
x_hat = Ax; y_hat = Ay*sqrt(beta_inj_y/beta_max_y); \
set arrow from -x_hat, 0.0 to -x_hat, y_hat nohead \
lt 1 lw 1 lc rgb "black"; \
set arrow from -x_hat, y_hat to x_hat, y_hat nohead \
lt 1 lw 1 lc rgb "black"; \
set arrow from x_hat, y_hat to x_hat, 0.0 nohead \
lt 1 lw 1 lc rgb "black";
if (ps) set output "dynap_err_1.ps"
set title "Dynamic Aperture\n";
set xlabel "x [mm]"; set ylabel "y [mm]";
#set xrange [-20:20];
#set yrange [0:4];
plot "DA_bare_0.00.out" using 1:2 title "bare" with linespoints ls 1, \
"DA_real_0.00.out" using 1:2 notitle with points ls 3;
if (!ps) pause mouse "click on graph to cont.\n";
unset arrow;
if (ps && !eps) \
set terminal postscript portrait enhanced color solid lw 2 "Times-Roman" 20;
if (ps) set output "dynap_err_2.ps"
set multiplot;
set size 1.0, 0.5; set origin 0.0, 0.5;
set title "Horizontal Momentum Aperture\n";
set xlabel "{/Symbol d} [%]"; set ylabel "x^ [mm]";
set yrange [0:];
plot "DA_bare.out" using 1:5 title "bare" with linespoints ls 2, \
"DA_real.out" using 1:11:13 title "w errors" with errorbars ls 1, \
"DA_real.out" using 1:11 notitle with lines ls 1;
set origin 0.0, 0.0;
set title "Vertical Momentum Aperture\n";
set xlabel "{/Symbol d} [%]"; set ylabel "y^ [mm]";
set yrange [0:];
plot "DA_bare.out" using 1:6 title "bare" with linespoints ls 2, \
"DA_real.out" using 1:14:16 title "w errors" with errorbars ls 3, \
"DA_real.out" using 1:14 notitle with lines ls 3;
unset multiplot;
if (!ps) pause mouse "click on graph to cont.\n";
if (ps) set output "dynap_err_3.ps"
set multiplot;
set size 1.0, 0.5; set origin 0.0, 0.5;
set title "Horizontal Momentum Acceptance\n";
set xlabel "{/Symbol d} [%]"; set ylabel "A_x [mm{/Symbol \327}mrad]";
set yrange [0:];
plot "DA_bare.out" using 1:3 title "bare" with linespoints ls 2, \
"DA_real.out" using 1:5:7 title "w errors" with errorbars ls 1, \
"DA_real.out" using 1:5 notitle with lines ls 1;
set origin 0.0, 0.0;
set title "Vertical Momentum Acceptance\n";
set xlabel "{/Symbol d} [%]"; set ylabel "A_y [mm{/Symbol \327}mrad]";
set yrange [0:];
plot "DA_bare.out" using 1:4 title "bare" with linespoints ls 2, \
"DA_real.out" using 1:8:10 title "w errors" with errorbars ls 3, \
"DA_real.out" using 1:8 notitle with lines ls 3;
unset multiplot;
if (!ps) pause mouse "click on graph to cont.\n";
EOP | true |
66850bd0ee7dce474afd8bad1e31cf94017a7eb4 | Shell | jurinva/convertiozzo | /topac.sh | UTF-8 | 931 | 3.171875 | 3 | [] | no_license | #!/bin/bash
file=$1
echo $file
#grep tli- inventories_hosts.tli\ \(1\).ini | grep -v ';' | grep -v '\[' | grep -v 'vip\|prefix' > tli
uuid=$(uuidgen)
uuid=${uuid,,}
guid=$(uuidgen)
guid=${guid,,}
echo $uuid
> ./$file.yml
while read -r I; do
uuid=$(uuidgen); uuid=${uuid,,}
echo " $uuid: 1" >> ./uuid.txt;
desc=`echo $I | cut -d" " -f1`
ip=`echo $I | cut -d"=" -f2`
[[ $ip == '' ]]; ip=`host $desc | cut -d" " -f4`
cat ./pac-template.txt | sed -e "s/{uuid}/$uuid/g" | sed -e "s/{desc}/$desc/g" | sed -e "s/{ip}/$ip/g" | sed -e "s/{port}/22/g" | sed -e "s/{parent}/72b7d72b-605d-4bd3-a6f6-d02a6c8e6119/g" | sed -e "s/{password}//g"
done < ./$file >> ./file.yml
echo -e "__PAC__EXPORTED__:
children:
$guid: 1
$guid:
_is_group: 1
_protected: 0
children:"
cat uuid.txt >> $file.yml
echo -e " description: Connection group 'TLI'
name: TLI
parent: __PAC__EXPORTED__
screenshots: ~
" >> $file.yml | true |
89cc14638cf1b6ccd22ea19e47332091ab7bdff5 | Shell | alejandroq/terra-incognita.blog | /hugo | UTF-8 | 199 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env bash
NAME=hugo
REGISTRY=alejandroq
IMAGE="${REGISTRY}/${NAME}"
# The base command to run the hugo container
docker run --rm \
-v $(pwd):/srv \
--workdir=/srv \
$IMAGE $1
| true |
e0afadf16ba7094e34c977cd5811c10b11c2bb67 | Shell | swallenstein/d-shibsp | /install/scripts/start.sh | UTF-8 | 1,683 | 3.71875 | 4 | [] | no_license | #!/bin/bash
function main {
# transition from root to daemon user is handled by shibd/httpd; must start as root
if [ $(id -u) -ne 0 ]; then
echo "must start shibd and httpd as root"
exit 1
fi
cleanup_and_prep
start_shibd
start_httpd
}
function cleanup_and_prep {
# correct ownership (docker run will reset the ownership of volumes at creation time).
# Only a problem with /etc/shibboleth, where mod_shib needs to have access with the httpd id
# Make sure we're not confused by old, incompletely-shutdown shibd or httpd
# context after restarting the container. httpd/shibd won't start correctly if thinking it is already running.
rm -rf /var/lock/subsys/shibd
su - shibd -c '[ -e /run/shibboleth/shibd.sock ] && rm /run/shibboleth/shibd.*'
}
function start_shibd {
echo "starting shibd"
export LD_LIBRARY_PATH=/opt/shibboleth/lib64
/usr/sbin/shibd -u shibd -g root -p /var/run/shibboleth/shib.pid
}
function start_httpd {
echo "starting httpd"
# `docker run` 1.12.6 will reset ownership and permissions on /run/httpd; therefore it need to be done again:
# do not start with root to avoid permission conflicts on log files
#su - $HTTPDUSER -c 'rm -f /run/httpd/* 2>/dev/null || true'
#su - $HTTPDUSER -c 'httpd -t -d /etc/httpd/ -f conf/httpd.conf'
#su - $HTTPDUSER -c 'httpd -DFOREGROUND -d /etc/httpd/ -f conf/httpd.conf'
# logging to stderr requires httpd to start as root (inside docker as of 17.05.0-ce)
rm -f /run/httpd/* 2>/dev/null || true
httpd -t -d /etc/httpd/ -f conf/httpd.conf
httpd -DFOREGROUND -d /etc/httpd/ -f conf/httpd.conf
}
main
| true |
9b6f1553c5e152ff2a32125e0a0524868383b45f | Shell | AffDk/dd-genomics | /archived/v0/code/drop_table.sh | UTF-8 | 446 | 3.78125 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/sh
#
# Drop the specified table
#
# First argument is the database name
# Second argument is the table to drop
#
if [ $# -ne 2 ]; then
echo "$0: ERROR: wrong number of arguments" >&2
echo "$0: USAGE: $0 DB TABLE" >&2
exit 1
fi
SQL_COMMAND_FILE=`mktemp /tmp/dft.XXXXX` || exit 1
echo "DROP TABLE IF EXISTS $2 CASCADE;" >> ${SQL_COMMAND_FILE}
psql -X --set ON_ERROR_STOP=1 -d $1 -f ${SQL_COMMAND_FILE} || exit 1
rm ${SQL_COMMAND_FILE}
| true |
0bc5c9ba1fcbf76d54815f6b99ed388a52191474 | Shell | octobot-dev/react-boilerplate | /scripts/go-script-bash/lib/prompt | UTF-8 | 5,822 | 3.953125 | 4 | [
"MIT",
"ISC",
"LicenseRef-scancode-free-unknown"
] | permissive | #! /usr/bin/env bash
#
# User input prompts
#
# Exports:
# @go.read_prompt_response
# Reads a line, trims leading/trailing space, and sets a default if empty
#
# @go.prompt_for_input
# Prompts the user for a line of input
#
# @go.prompt_for_safe_input
# Prompts the user for a line of input, then validates it isn't dangerous
#
# @go.prompt_for_yes_or_no
# Prompts the user for a yes or no response
#
# @go.select_option
# Prompts the user to select one item from a list of options
. "$_GO_USE_MODULES" 'strings' 'validation'
# Reads a line, trims leading/trailing space, and sets a default if empty
#
# Arguments:
# var_name: Name of the caller's variable into which to read value
# default: (Optional) Default value if the input line is empty
@go.read_prompt_response() {
@go.validate_identifier_or_die 'Input prompt response variable name' "$1"
read -r "$1"
@go.trim "$1"
printf -v "$1" -- '%s' "${!1:-$2}"
}
# Prompts the user for a line of input
#
# If the prompt doesn't end with a whitespace character, a space will be added
# between the prompt and the input cursor. Otherwise the existing character will
# be preserved.
#
# If a default value is specified, a space will be added to the prompt, followed
# by the default value in square brackets; the caller should not add the default
# value to the prompt directly. If the prompt ends with a whitespace character,
# it will be preserved and added after the default value.
#
# Arguments:
# result_var Name of the caller-declared variable for the result
# prompt Text prompt for user input
# default (Optional) Default value if response is empty
# fail_msg (Optional) Failure message if empty input isn't valid
@go.prompt_for_input() {
@go.validate_identifier_or_die 'Input prompt response variable name' "$1"
if [[ "$2" =~ [[:space:]]$ ]]; then
@go.printf '%s%s%s' "${2%?}" "${3:+ [default: $3]}" "${BASH_REMATCH[0]}" >&2
else
@go.printf '%s %s' "$2" "${3:+[default: $3] }" >&2
fi
@go.read_prompt_response "$1" "$3"
if [[ -z "${!1}" && -n "$4" ]]; then
@go.printf '%s\n' "$4" >&2
return 1
fi
}
# Prompts the user for a line of input, then validates it isn't dangerous
#
# With the exception of the `--or-die` option, the arguments are the same as the
# underlying `@go.prompt_for_input`. Useful if the input value may be used to
# construct a command or query.
#
# Options (must be specified before arguments):
# --or-die <desc> Print description and stack and exit with error if invalid
#
# Arguments:
# result_var Name of the caller-declared variable for the result
# prompt Text prompt for user input
# default (Optional) Default value if response is empty
# fail_msg (Optional) Failure message if empty input isn't valid
@go.prompt_for_safe_input() {
local or_die
local description
if [[ "$1" == '--or-die' ]]; then
or_die='true'
description="$2"
shift 2
fi
@go.validate_identifier_or_die 'Input prompt response variable name' "$1"
if ! @go.prompt_for_input "$@"; then
return 1
elif [[ "$or_die" == 'true' ]]; then
@go.validate_input_or_die "$description" "${!1}"
elif ! @go.validate_input "${!1}"; then
@go.printf '"%s" is an invalid response, as it contains %s.\n' \
"${!1}" 'unescaped shell metacharacters or control operators' >&2
return 1
fi
}
# Prompts the user for a yes or no response
#
# Arguments:
# prompt Text prompt for user input
# default (Optional) Default response; must be 'yes' or 'no'
#
# Returns:
# Zero on 'y' or 'yes' (case- and space- insensitive), nonzero otherwise
@go.prompt_for_yes_or_no() {
local prompt="$1"
local default="$2"
local response
case "$default" in
yes)
@go.printf '%s [Y/n] ' "$prompt" >&2
;;
no)
@go.printf '%s [y/N] ' "$prompt" >&2
;;
'')
@go.printf '%s [y/n] ' "$prompt" >&2
;;
*)
@go.printf 'Invalid `default` parameter "%s" for %s at:\n' \
"$default" "$FUNCNAME" >&2
@go.print_stack_trace '1' >&2
exit 1
;;
esac
while true; do
@go.read_prompt_response 'response' "$default"
if [[ "$response" =~ ^[Yy]([Ee][Ss])?$ ]]; then
return 0
elif [[ "$response" =~ ^[Nn]([Oo])?$ ]]; then
return 1
else
if [[ -n "$response" ]]; then
@go.printf '\n"%s" is an invalid response.\n' "$response" >&2
fi
@go.printf '\nPlease answer Y(es) or N(o): ' >&2
fi
done
}
# Prompts the user to select one item from a list of options.
#
# This is a thin wrapper around the `select` builtin command for
# straightforward, single-option user prompts. If you need to do anything more
# complex, use the `select` builtin command directly.
#
# This will prompt the user for a single input, returned in the caller-declared
# variable identified by `result_var`. If the user enters an invalid option,
# this will notify the user and prompt again. If the user terminates input (via
# EOF, i.e. Ctrl-D), `result_var` will remain unchanged and the function will
# return nonzero.
#
# Globals:
# PS3 environment variable defining the selection prompt
#
# Arguments:
# result_var: Name of the caller-declared variable used to store the option
# ...: Strings representing options available for the user to select
#
# Returns:
# zero if `result_var` contains the user's selection, nonzero otherwise
@go.select_option() {
@go.validate_identifier_or_die 'Input selection variable name' "$1"
local __go_selected_option
select __go_selected_option in "${@:2}"; do
case "$__go_selected_option" in
'')
@go.printf '"%s" is not a valid option.\n' "$REPLY" >&2
;;
*)
printf -v "$1" -- '%s' "$__go_selected_option"
break
;;
esac
done
[[ -n "$__go_selected_option" ]]
}
| true |
b6274a7efec5f3061640612158f130c9350a9d85 | Shell | friendbear/bashr_scripts_utf8 | /chap05/find_by_date.sh | UTF-8 | 1,271 | 4.59375 | 5 | [] | no_license | #!/bin/sh
# find_by_date.sh - 指定された日時に更新されたファイルを検索する
# GNU dateがgnudateとしてインストールされている場合、それを使う
gnudate=`which gnudate`
[ -z "$gnudate" ] && gnudate="date"
# cleanup
# スクリプトが中断・終了された時、この関数が実行される
cleanup() {
[ -n "$start_tmp" ] && rm "$start_tmp"
[ -n "$end_tmp" ] && rm "$end_tmp"
}
# 引数の数が不正な場合、説明を表示
if [ $# -ne 2 ]; then
echo "usage: find_by_date.sh dir date"
exit 1
fi
# 引数の値をセット
dir="$1"
start_date=`$gnudate -d "$2" +%Y%m%d`0000 # 指定した日付
end_date=`$gnudate -d "1 day $2" +%Y%m%d`0000 # 指定した日付+1日
# 中断・終了時に関数cleanupを実行するように設定する
trap cleanup EXIT
trap exit INT # Solarisの/bin/shのために必要
# 日付の基準となるテンポラリ・ファイルを作成する
start_tmp=`mktemp /tmp/fbd.XXXXXXXX` || exit 1
end_tmp=`mktemp /tmp/fbd.XXXXXXXX` || exit 1
touch -t $start_date $start_tmp
touch -t $end_date $end_tmp
# 「日付」より新しく、「日付+1日」より新しくないファイルを検索
find "$dir" -newer $start_tmp -not -newer $end_tmp
| true |
fc85a03017976f1485a708db34b2462af307806f | Shell | merase/github-move | /Automate/R00.00.07.28/scripts/lib/10-helper_yum.sh | UTF-8 | 4,181 | 3.703125 | 4 | [] | no_license | #!/bin/sh
: <<=cut
=script
This script contains simple helper functions which are related to the
installation command. Newly done by yum (most generic name), but it is
also used for the backwards compatibility with rpm.
commands.
=version $Id: 10-helper_yum.sh,v 1.2 2017/06/08 11:45:10 fkok Exp $
=author Frank.Kok@newnet.com
=feat make use of yum in RHEL7 environments
In RHEL7 R&D started to use yum and a repository to install and upgrade
packages. This in stead of the more loosely coupled rpm approach. A smoothly
integration is supplied by this module.
=cut
readonly YUM_aut_repo="$OS_yum_repos/newnet-automate.repo"
YUM_supported=0 # Will hold support, will stay read/write don't write yourself
: <<=cut
=func_frm
Will set the proper install command based on the OS and if the package actually
supports yum. This anticipates that in the early transition not all packages
can be done using yum. And if if they would then this is the location where
it smoothly moves on.
To-do this it check the following:
- Is yum enabled (and thus support in ISO release
- Does this package for this version requires checking
- If checking required then check_yum_support function is called to determine
=remark
it is preparing for a future as the future is behind is lacking behind.
=set CMD_install
=set CMD_ins_freshen
=set CMD_ins_upgrade
=set CMD_uninstall
=ret
1 if yum is selected otherwise 0
=cut
function set_install_comands() {
local name="$1" # (O) The name of the package, if none then cmds set to yum support
local ver="$2" # (O) The version to check (if known)
local force_rpm="$3" # (O) If set then always use the older rpm.
[ -z help ] && [ "$name" == '' ] && [ "$force_rpm" == '' ] && show_short="Set the proper install commands based on generic YUM support"
[ -z help ] && [ "$name" != '' ] && [ "$force_rpm" == '' ] && show_short="Set the proper install commands using generic and package needs '$name:$ver'"
[ -z help ] && [ "$force_rpm" != '' ] && show_short="Forcing install commands to use RPM"
[ -z help ] && show_trans=0
local yum=$YUM_supported
[ "$force_rpm" != '' ] && yum=0
if [ $yum != 0 ] && [ "$name" != '' ]; then
find_install "$name" 'opt'
if [ "$install_ent" != '' -a "$install_options" != '' ] ; then
is_substr "$CFG_opt_check_yum_sup" "$install_options" ','
if [ $? == 1 ]; then
YUM_check_support=0
func "*$name" check_yum_support "$ver" # Try calling the optional function itself
[ $? != 0 ] && yum=$YUM_check_support || yum=0 # Only overrule if func defined, otherwise none support (easier upgrade)
fi
fi
fi
# This will only alter the RPM/YUM commands. It is no use at this point
# to alter the RPM query commands
if [ $yum == 0 ]; then
CMD_install="$CMD_rpm_install"
CMD_ins_freshen="$CMD_rpm_ins_freshen"
CMD_ins_upgrade="$CMD_rpm_ins_upgrade"
CMD_uninstall="$CMD_rpm_uninstall"
else
CMD_install="$CMD_yum_install"
CMD_ins_freshen="$CMD_yum_ins_freshen"
CMD_ins_upgrade="$CMD_yum_ins_upgrade"
CMD_uninstall="$CMD_yum_uninstall"
fi
return $yum
}
: <<=cut
=func_int
Checks if yum is supported,(call once), needs;
- RH7
- Yum installed (on RH7 it should or exit)
=set
YUM_supported [0|1], make it read-only afterward so call once
=cut
function init_yum_support() {
local not_found=$?
if [ $OS_prefix != 'RH' ] || [ $OS_ver_numb -lt 70 ]; then
YUM_supported=0
log_info "No support for yum, wrong OS version: $OS_version, [$OS_prefix,$OS_ver_numb]"
else
# We should have yum at this stage check it!
[ "$CMD_yum" == '' ] && prg_err "This is RH7+ but no CMD_yum defined"
add_cmd_require $CMD_yum
[ ! -d "$OS_yum_repos" ] && log_exit "Did not find yum repo directory ($OS_yum_repos), check yum installation"
YUM_supported=1
fi
log_info "init_yum_support : YUM_supported=$YUM_supported"
set_install_comands
}
init_yum_support # And init it once
return 0
| true |
8a8d957c220b27d5896fcf2bae024769a92e9a8c | Shell | 4orbit/pg-londiste-logical-checks | /test-londiste3-replication.sh | UTF-8 | 2,692 | 3 | 3 | [] | no_license | #!/bin/sh -ex
export PATH=/usr/pgsql-12/bin:$PATH
export PGDATA_MASTER=/tmp/master
export PGDATA_STANDBY=/tmp/standby
#export PYTHONPATH=/usr/local/lib/python2.7/site-packages:/usr/lib/python2.7/site-packages:/usr/local/lib64/python2.7/site-packages
## kill pgqd and londiste proc
#kill -9 $(cat $PGDATA_MASTER/londiste_master.pid | head -n 1)
#kill -9 $(cat $PGDATA_MASTER/pgqd.pid | head -n 1)
#kill -9 $(cat $PGDATA_STANDBY/londiste_standby.pid | head -n 1)
pg_ctl stop -D $PGDATA_MASTER || echo "ok"
pg_ctl stop -D $PGDATA_STANDBY || echo "ok"
rm -rf $PGDATA_MASTER
rm -rf $PGDATA_STANDBY
# setup master
initdb -D $PGDATA_MASTER
cat <<EOF >>$PGDATA_MASTER/postgresql.conf
port=15432
EOF
pg_ctl -D $PGDATA_MASTER start
pgbench -i -s 10 -p 15432
# setup standby
initdb -D $PGDATA_STANDBY
cat <<EOF >>$PGDATA_STANDBY/postgresql.conf
port=25432
EOF
pg_ctl -D $PGDATA_STANDBY start
psql -p 25432 <<SQL
CREATE TABLE public.pgbench_accounts (
aid integer NOT NULL,
bid integer,
abalance integer,
filler character(84)
)
WITH (fillfactor='100');
ALTER TABLE ONLY public.pgbench_accounts
ADD CONSTRAINT pgbench_accounts_pkey PRIMARY KEY (aid);
SQL
cat <<EOF >>$PGDATA_MASTER/londiste_master.ini
[londiste]
job_name = master_table
db = dbname=postgres port=15432
queue_name = replication_queue
logfile = $PGDATA_MASTER/londiste_master.log
pidfile = $PGDATA_MASTER/londiste_master.pid
EOF
londiste $PGDATA_MASTER/londiste_master.ini create-root master 'dbname=postgres port=15432'
londiste -d $PGDATA_MASTER/londiste_master.ini worker
cat <<EOF >>$PGDATA_STANDBY/londiste_standby.ini
[londiste]
job_name = standby_table
db = dbname=postgres port=25432
queue_name = replication_queue
logfile = $PGDATA_STANDBY/londiste_standby.log
pidfile = $PGDATA_STANDBY/londiste_standby.pid
EOF
londiste $PGDATA_STANDBY/londiste_standby.ini create-leaf standby 'dbname=postgres port=25432' --provider='dbname=postgres port=15432'
londiste -d $PGDATA_STANDBY/londiste_standby.ini worker
cat <<EOF >>$PGDATA_MASTER/pgqd.ini
[pgqd]
base_connstr = port=15432
logfile = $PGDATA_MASTER/pgqd.log
pidfile = $PGDATA_MASTER/pgqd.pid
EOF
pgqd -d $PGDATA_MASTER/pgqd.ini
londiste $PGDATA_MASTER/londiste_master.ini add-table public.pgbench_accounts
londiste $PGDATA_STANDBY/londiste_standby.ini add-table public.pgbench_accounts
while [[ `londiste $PGDATA_STANDBY/londiste_standby.ini compare 2>&1| grep -c 'checksum'` = 0 ]]; do
sleep 1
echo "wait sync"
done
echo "sync complete"
pgbench -T 120 -c 40 -p 15432
while [[ `londiste $PGDATA_STANDBY/londiste_standby.ini compare 2>&1| grep -c 'checksum'` = 0 ]]; do
sleep 1
echo "wait sync"
done
echo "sync complete"
| true |
69e5ed6f2602ab3e688070ab90b8f4047aa274cd | Shell | Gnome-ls/script | /CEAVEM/instalacionMonitorCEAVEM.sh | UTF-8 | 3,363 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#Despues de escoger en que directorio se instalará el monitor
#mnt puede cambiar por otra que tenga mayor capacidad de memor$
cd /home/engine/
var1=$(pwd)
echo "estas en ====> $var1"
function crea_directorio {
echo "creando carpeta"
echo -e "evo100518\n" | sudo mkdir ManageEngine
}
function ManageEngine_descarga {
echo "====> Descargando...."
sudo wget https://www.manageengine.com/products/applications_manager/54974026/ManageEngine_ApplicationsManager_64bit.bin
sudo chmod +x ManageEngine_ApplicationsManager_64bit.bin
}
function archivoPassword {
sudo cat > password.txt <<- EOF
appmanager
EOF
}
function renombrarAmdb {
sudo mv amdb amdb_old
echo "amdb renombrado"
}
#Preguntamos si existe la carpeta
if [ -d /home/engine/ManageEngine ]; then
#si existe muestra el mensaje
echo "====> Existe la carpeta ManageEngine"
#si no existe la crea y muestra mensaje de creado
else
crea_directorio
fi
cd /home/engine/ManageEngine
direc=$(pwd)
echo "estas en ====> $direc"
#Salimos hasta la reiz
cd /home/engine/
# Ruta de donde se encuentre el script y donde se descargará e$
cd /home/engine/Descargas
ruta=$(pwd)
echo "estas en ====> $ruta"
#Enlace de descarga
if [ -f ManageEngine_ApplicationsManager_64bit.bin ]; then
echo "====> Ya se encuentra descargado Manage Engine"
else
ManageEngine_descarga
fi
if [ -d /home/engine/ManageEngine/AppManager14 ]; then
echo "Existe la carpeta AppManager14 "
else
echo "Instalando"
tmp=/home/engine/ManageEngine
echo -e "\n\n\n\n\n\n\n\n\ny\n1\n1\n1\n\n\n\n1\n$tmp\ny\n\nn\n\n\n\n" | sudo ./ManageEngine_ApplicationsManager_64bit.bin -i console
fi
cd /home/engine/
sleep 3s
var2=$(pwd)
echo "estas en ====> $var2"
cd /home/engine/ManageEngine/AppManager14
var3=$(pwd)
echo "estas en ====> $var3"
ls
echo "Iniciando aplicación"
sleep 30s
sudo nohup sh startApplicationsManager.sh &
sleep 30s
#echo "c"
cd /home/engine/ManageEngine/AppManager14/working/bin
var4=$(pwd)
echo "estas en ====> $var4"
ls
cd /home/engine/ManageEngine/AppManager14/working/bin
if [ -f /home/engine/ManageEngine/AppManager14/working/bin/password.txt ]; then
echo "====> El archivo password.txt ya se encuentra"
else
echo "====> Creando el archivo password.txt ..."
archivoPassword
fi
ls
cd /home/engine/ManageEngine/AppManager14/
var5=$(pwd)
echo "estas en ====> $var5"
ls
cd conf/
var6=$(pwd)
echo "estas en ====> $var6"
ls
sudo sed -i 's/am.db.port=15432/am.db.port=15434/g' "AMServer.properties"
echo "AMServer modificado"
cd /home/engine/ManageEngine/AppManager14/
var8=$(pwd)
echo "estas en ====> $var8"
echo "Bajar aplicación"
sleep 30s
sudo ./shutdownApplicationsManager.sh -force
sleep 30s
cd /home/engine/ManageEngine/AppManager14/working/pgsql/data/
var7=$(pwd)
echo "estas en ====> $var7"
if [ -f amdb_old ]; then
echo "=====> ya está amdb_old"
else
renombrarAmdb
fi
cd /home/engine/ManageEngine/AppManager14/
var9=$(pwd)
echo "estas en ====> $var9"
sudo rm -rf logs
echo "carpeta logs eliminada"
sudo mkdir logs
echo "carpeta logs creada"
cd /home/engine/ManageEngine/AppManager14/
var10=$(pwd)
sudo nohup sh startApplicationsManager.sh &
sudo tail -F nohup.out
echo "estas en ====> $var10"
#cd /home/engine/
#var11=$(pwd)
#echo "estas en ====> $var11"
| true |
3bf62ef5a99cb3b8b7a627889e713afba773e51d | Shell | MuhammadAsif1/OSP_Test_Cases | /nfv-auto/tempest_barbican.sh | UTF-8 | 1,523 | 3.734375 | 4 | [] | no_license | #!/bin/bash
counter=0
line=''
test_execuation()
{
counter=$(($counter + 1))
echo "======================================= Test Case Number : $counter ==============================================="
echo "*** $1 ***"
echo "============================================================================================================="
echo "==================================== Test Case Number : $counter ============================================" >> barbican_tempest.log
echo "========== $1 ============" >> barbican_tempest.log
echo "=============================================================================================================" >> barbican_tempest.log
output=$(tempest run --regex "($1)")
echo "$output"
echo "$output" >> barbican_tempest.log
echo "============================================================================================================="
echo "=============================================================================================================" >> barbican_tempest.log
}
##############################
echo "================================ Barbican Tempest Log File =====================" > barbican_tempest.log
output=$(ostestr -l | grep barbican)
barbican_test_cases_list=$(echo "$output" | awk 'BEGIN { FS="[" } /4/ { print $1}')
echo "$barbican_test_cases_list" > barbican_test_cases_list.txt
filename="barbican_test_cases_list.txt"
while read -r line; do
test_execuation $line
done < "$filename"
| true |
6941efa37421fed583735f1feb7b7fa739fdf3e8 | Shell | nabbar/gotools | /prepare | UTF-8 | 1,502 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
export PACKAGE_BIN=/usr/src/tools
export SUDOCMD=$(which sudo)
export CURDIR=$(pwd)
export RUNDIR=$(dirname ${0})
export GOROOT=$(echo ${GOROOT})
export GOPATH=$(echo ${GOPATH})
GOPATH=${GOPATH}
[[ "${GOPATH}" != "" ]] || export GOPATH=/go
mkdir -vp ${GOPATH}/src ${GOPATH}/bin ${GOPATH}/pkg || ${SUDOCMD} mkdir -vp ${GOPATH}/src ${GOPATH}/bin ${GOPATH}/pkg || exit 1
chown -R ${USER}:users ${GOPATH} || ${SUDOCMD} chown -R ${USER}:users ${GOPATH} || exit 1
chmod -R ug+rw ${GOPATH} || ${SUDOCMD} chmod -R ug+rw ${GOPATH} || exit 1
[[ -e ${PACKAGE_BIN} ]] || ${SUDOCMD} mkdir -vp ${PACKAGE_BIN} || exit 1
[[ "${USER}" = "root" ]] || [[ "${UID}" = "0" ]] || ${SUDOCMD} chown -vR ${USER}:users ${PACKAGE_BIN} || exit 1
if [[ "$(which apt-get)" = "" ]]
then
${SUDOCMD} $(which apk) add --no-cache wget curl openssl git tar jq || exit 1
else
DEBIAN_FRONTEND=noninteractive ${SUDOCMD} $(which apt-get) --assume-yes update || exit 1
DEBIAN_FRONTEND=noninteractive ${SUDOCMD} $(which apt-get) --assume-yes -o Dpkg::Options::="--force-confold" --no-install-recommends install wget curl openssl git tar jq || exit 1
fi
echo -e "\n\n\n\t >>> get last release of 'github.com/snyk/snyk'... "
${RUNDIR}/github_get_last_release 'snyk/snyk' 'linux' 'sha|arm' || exit 1
echo -e "\n\n\n\t >>> run gotools... "
${RUNDIR}/gotools || exit 1
${SUDOCMD} chmod -v +x ${PACKAGE_BIN}/*
ln -svf ${PACKAGE_BIN}/* /usr/local/bin/ || ${SUDOCMD} ln -svf ${PACKAGE_BIN}/* /usr/local/bin/ || exit 1
| true |
25f5c8550cd52259572fd8085c2096926b0fea38 | Shell | WikipediaLibrary/TWLight | /tests/shunit/twlight_i18n_lint_test.sh | UTF-8 | 774 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
shopt -s expand_aliases
testBadPyNewlines() {
prefix=${TWLIGHT_HOME}/tests/shunit/data/bad_i18n_newline_
for i in ${prefix}*.py
do
assertFalse "${file} should cause an error." "${TWLIGHT_HOME}/bin/twlight_i18n_lint.pl ${i}"
done
}
testGoodPy() {
prefix=${TWLIGHT_HOME}/tests/shunit/data/good_i18n_
for i in ${prefix}*.py
do
assertTrue "${file} should not cause an error." "${TWLIGHT_HOME}/bin/twlight_i18n_lint.pl ${i}" ||:
done
}
testBadPyComments() {
prefix=${TWLIGHT_HOME}/tests/shunit/data/bad_i18n_comment_
for i in ${prefix}*.py
do
assertFalse "${file} should cause an error." "${TWLIGHT_HOME}/bin/twlight_i18n_lint.pl ${i}"
done
}
. ${TWLIGHT_HOME}/tests/shunit/shunit2
| true |
d101e145327bf40a402e3614e3f79313ff4e8112 | Shell | uridr/RDF-TextGeneration | /models/run_model.sh | UTF-8 | 2,929 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#SBATCH --gres=gpu:2
#SBATCH --cpus-per-task 2
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --mem=64G
#SBATCH --job-name=fairseq
#SBATCH --output=logs/fairseq_%j.log
# POSITIONAL=()
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Parse the architecture to be used
-a|--architecture) ARCH="$2"; shift 2;;
-a=*|--architecture=*) ARCH="${1#*=}"; shift 1;;
# Parse the configuration file to be used
-c|--config-file) CONF="$2"; shift 2;;
-c=*|--config-file=*) CONF="${1#*=}"; shift 1;;
# Define the relative path to the data to be used
-p|--data-path) DATAPATH="$2"; shift 2;;
-p=*|--data-path=*) DATAPATH="$2"; shift 1;;
# Parse embeddings source
-s|--emb-source) SOURCE="$2"; shift 2;;
-s=*|--emb-source=*) SOURCE="${1#*=}"; shift 1;;
# Parse embedding dimension
-d|--emb-dimension) DIMENSION="$2"; shift 2;;
-d=*|--emb-dimension=*) DIMENSION="${1#*=}"; shift 1;;
# Use optimized fp16 precision training
-fp16|--fp16) FP16="True"; shift 1;;
# Handle unknown parameters
-*|--*|*) echo "Unknown parameter: $1"; exit 2;;
# Store positional arguments
# *) POSITIONAL+=("$1"); shift 1;;
esac
done
# Determine architecture
ARCHITECTURE=architectures/$ARCH.sh
# Check if model architecture is defined
if [ ! -f "$ARCHITECTURE" ]; then
echo "Unknown architecture: $ARCH"
echo "Available architectures are:"
ls architectures/
exit 2
fi
# Whether to use optimized training or not
if [ ! -z "$FP16" ]; then
FP16="$FP16"
else
FP16="False"
fi
# Determine config file
CONFIG=config_files/$ARCH/$CONF.config
if [ ! -f "$CONFIG" ]; then
echo "Unknown configuration file: $CONF"
echo "Available configuration files are:"
ls config_files/$ARCH/
exit 2
fi
# Determine data to be used
if [ ! -z "$DATAPATH" ]; then
FORMAT="$DATAPATH"
else
echo "Datapath must be specified!"
exit 2
fi
# Check if data path exists
if [ ! -d "$FORMAT" ]; then
echo "Unknown data format: $FORMAT"
exit 2
fi
# Determine checkpoints directory
CHECKPOINTS=checkpoints/$ARCH/new
# Create it if it does not exist
if [ ! -d "$CHECKPOINTS" ]; then
mkdir $CHECKPOINTS
fi
# Output some useful information
echo "Training model: $ARCHITECTURE"
echo "Configuration file: $CONFIG"
echo "Data: $FORMAT"
echo "Saving checkpoints at: $CHECKPOINTS"
echo "Using optimized fp16 training: $FP16"
# Train with pretrained embeddings
if [ ! -z "$SOURCE" ]; then
EMB="../embeddings/$SOURCE"
if [ "$SOURCE" == "glove" ]; then
EMB="$EMB/glove.6B.${DIMENSION}d.txt"
else
EMB="$EMB/enwiki_20180420_${DIMENSION}d.txt"
fi
# Check if embeddings exist
if [ ! -f "$EMB" ]; then
echo "Unknown pretrained embeddings path: $EMB"
exit 2
fi
echo "Pretrained embeddings: $EMB"
sh $ARCHITECTURE -c $CONFIG -f $FORMAT -e $EMB -d $DIMENSION --checkpoints $CHECKPOINTS --fp16 $FP16
# Train without pretrained embeddings
else
sh $ARCHITECTURE -c $CONFIG -f $FORMAT --checkpoints $CHECKPOINTS --fp16 $FP16
fi
| true |
25349031d2518dedb2f77da4cb9a90d79d628ab7 | Shell | Rutuja211998/Shell_script | /Selection_practice/leap_year.sh | UTF-8 | 163 | 3.34375 | 3 | [] | no_license | #!/bin/bash -x
echo "Enter a year (YYYY)"
read year
if [[ $year%4 -eq 0 && $year%100 -ne 0 || $year%400 -eq 0 ]]
then
echo "leap year"
else
echo "not a leap"
fi
| true |
08341c13e5cd31671fd926ccb2f5ad9f0302f300 | Shell | d9k/bio-essay | /regen-css.sh | UTF-8 | 427 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
SITE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
THEME_PATH="${SITE_PATH}/assets/themes/the-minimum"
INPUT_PATH=${THEME_PATH}/css/style.less
OUTPUT_PATH=${THEME_PATH}/css/style.css
lessc --line-numbers=comments ${INPUT_PATH} ${OUTPUT_PATH}
#TODO debug lessc instead of this script
#remove site path from output css
sed -i '' -e "s:${SITE_PATH}/::g" "${OUTPUT_PATH}"
ls -hal ${THEME_PATH}/css/style.css
| true |
6269fa97f0bc7edfc04bbcb9194d1cb89bdc0143 | Shell | nsasikumar/mariadb55 | /mariadb-setrootpassword | UTF-8 | 3,543 | 4.0625 | 4 | [] | no_license | #!/bin/bash
set -u
MSA=/usr/bin/mysqladmin
# generate long random password
MARIADB_ROOT_PW=$(openssl rand -base64 32)
# make sure ownership of data dir is OK
chown -R mysql:mysql /var/lib/mysql
/usr/bin/mysqld_safe &
sleep 5 # wait for mysqld_safe to rev up, and check for port 3306
port_open=0
while [ "$port_open" -eq 0 ]; do
/bin/nc -z -w 5 127.0.0.1 3306
if [ $? -ne 0 ]; then
echo "Sleeping waiting for port 3306 to open: result " $?
sleep 1
else
echo "Port 3306 is open"
port_open=1
fi
done
# Secure the installation
done=0
count=0
maxtries=10
while [ $done -eq 0 ]; do
${MSA} -u root password ${MARIADB_ROOT_PW}
if [ $? -ne 0 ]; then
count=$((${count} + 1))
if [ $count -gt $maxtries ]; then
echo "Maximum tries at setting password exceeded. Giving up"
exit 1
else
echo "Root password set failed. Sleeping, then retrying"
sleep 1
fi
else
echo "Root Password set successfully"
done=1
fi
done
# this code mimics the secure install script, which was originally
# scripted via expect. I found that unreliable, hence this section
# drop test database
echo "Dropping test DB"
DROP="DROP DATABASE IF EXISTS test"
echo "$DROP" | mysql -u root --password="$MARIADB_ROOT_PW" mysql
echo "Cleaning test db privs"
# remove db privs for test
DELETE="DELETE FROM mysql.db Where Db='test' OR Db='test\\_%'"
echo "$DELETE" | mysql -u root --password="$MARIADB_ROOT_PW" mysql
echo "Deleting anon db users"
# remove anon users
DELETE="DELETE FROM mysql.user WHERE User=''"
echo "$DELETE" | mysql -u root --password="$MARIADB_ROOT_PW" mysql
echo "create mysql user"
# create mysql@localhost user for xtrabackup
CUSER="CREATE USER 'mysql'@'localhost'"
echo "$CUSER" | mysql -u root --password="$MARIADB_ROOT_PW" mysql
GRANT="GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost'"
echo "${GRANT}" | mysql -u root --password="$MARIADB_ROOT_PW" mysql
# now set the root passwords given the certificates available
ROOTCERTS=/etc/ssl/mysql/root
if [ -d ${ROOTCERTS} ]; then
clist=$(find ${ROOTCERTS} -type f -a -name \*.pem -print)
if [ -z "${clist}" ]; then
echo "No certificates available to encrypt root pw" >&2
exit 1
fi
# dumping encrypted root password to disk - make sure that
# only owner can read/write it
oldu=$(umask)
umask 0077
echo -n $MARIADB_ROOT_PW | openssl smime -encrypt -aes256 \
-out /var/lib/mysql/rootpw.pem \
${clist}
umask ${oldu}
for c in ${clist}; do
filename=$(basename "${c}")
username=${filename%.*}
# extract subject and issuer
subject=$(openssl x509 -noout -subject -in ${c} | sed -e "s/^subject= //ig")
printf -v qsubject "%q" "${subject}"
issuer=$(openssl x509 -noout -issuer -in ${c} | sed -e "s/^issuer= //ig" -e "")
printf -v qissuer "%q" "${issuer}"
CUSER="CREATE USER '${username}'@'%'"
echo "${CUSER}" | mysql -u root --password="$MARIADB_ROOT_PW" mysql
GRANT="GRANT ALL PRIVILEGES ON *.* TO '${username}'@'%' \
REQUIRE SUBJECT '${qsubject}' AND \
ISSUER '${qissuer}' \
WITH GRANT OPTION"
echo "${GRANT}" | mysql -u root --password="$MARIADB_ROOT_PW" mysql
done
fi
echo "FLUSH PRIVILEGES" | mysql -u root --password="$MARIADB_ROOT_PW" mysql
echo "Shutting down MySQL server"
${MSA} -uroot -p${MARIADB_ROOT_PW} shutdown
echo "---> MariaDB installation secured with root password" >&2
| true |
2bc3ec0c40902dec42a54265f3a526bede608d9c | Shell | simenbkr/tdt4237-2019-group09 | /setup.sh | UTF-8 | 1,162 | 2.890625 | 3 | [] | no_license | #!/bin/bash
# NB: This script assumes that we are in the directory
# /srv/www-data/group09/
# If we are not, the configuration files for the nginx and group09 services must be edited.
DOMAIN='progsexy.flyktig.no' # Update this shizzle.
PORT=4009
apt update && apt install nginx python3 python3-pip -y
systemctl stop nginx
pip3 install virtualenv
virtualenv -p python3 venv
source venv/bin/activate
pip3 install -r requirements.txt
pip3 install argon2 django[argon2] #OMEGALUL
echo 'STATIC_ROOT = "/srv/www-data/group09/static/"
MEDIA_ROOT = "/srv/www-data/group09/media/"' > sec/sec/local_settings.py
python sec/manage.py makemigrations
python sec/manage.py migrate
python sec/manage.py loaddata init.json
python sec/manage.py collectstatic
mv static sec
ln -fs $PWD/group09.service /etc/systemd/system/group09.service
ln -fs $PWD/nginx-configuration-file /etc/nginx/sites-enabled/default
chown -R www-data:www-data .
chmod 600 $PWD/sec/db.sqlite3
python sec/manage.py clearsessions
sqlite3 sec/db.sqlite3 "update django_site set name='$DOMAIN:$PORT',domain='$DOMAIN:$PORT';"
systemctl daemon-reload
systemctl start group09
systemctl start nginx
| true |
8fa0aba89b53f8ad7a757066afaef684a480285b | Shell | chakralinux/desktop | /qxneur/PKGBUILD | UTF-8 | 1,058 | 2.671875 | 3 | [] | no_license | # Maintainer: Inkane <0inkane@googlemail.com>
# Contributor: Aleksey Ksenzov aka KsenZ <aksenzov@gmail.com>
pkgname=qxneur
pkgver=20121115
pkgrel=1
pkgdesc="Qt frontend for xneur"
arch=('i686' 'x86_64')
url="https://gitorious.org/qxneur"
license=('GPLv2')
depends=('qt' 'xneur' 'libxmu')
makedepends=('gcc' 'make' 'cmake' 'git' 'pkgconfig')
source=("qxneur-${pkgver}.tar.xz")
md5sums=("7c7e903fcc366a44fce892b0675d8193")
__gitroot="git://gitorious.org/~ksenz/qxneur/ksenz-qxneur.git"
__gitname="ksenz-qxneur"
mksource() {
git clone ${__gitroot}
cd ${__gitname}
git archive --prefix="${pkgname}-${pkgver}/" --format=tar HEAD\
| xz > "../${pkgname}-${pkgver}.tar.xz"
cd ..
md5sum "${pkgname}-${pkgver}.tar.xz"
}
package() {
#build qxneur from git
cd $srcdir/${pkgname}-${pkgver}
./configure --prefix=/usr --mode=release || return 1
make || return 1
install -Dm 755 qxneur $pkgdir/usr/bin/qxneur || return 1
install -Dm 644 qxneur_ru.qm $pkgdir/usr/share/qxneur/translations/qxneur_ru.qm || return 1
cp -R images $pkgdir/usr/share/qxneur || return 1
}
| true |
3d26a78c7c502913c93e7fa6daa65530c4b7238d | Shell | yurydelendik/wasmtime-py | /build-wheels.sh | UTF-8 | 833 | 3.03125 | 3 | [
"LLVM-exception",
"Apache-2.0"
] | permissive | #!/bin/bash
set -ex
curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain nightly -y
#curl https://www.python.org/ftp/python/3.7.3/Python-3.7.3.tgz | tar xz && \
# cd Python-3.7.3 && ./configure --enable-optimizations --prefix=$HOME/.py37 && make altinstall && cd .. && \
# rm -rf Python-3.7.3 && ln $HOME/.py37/bin/python3.7 $HOME/.py37/bin/python && ln $HOME/.py37/bin/pip3.7 $HOME/.py37/bin/pip
export PATH="$HOME/.cargo/bin:$PATH"
for PYBIN in /opt/rh/rh-python36/root/usr/bin; do
export PYTHON_SYS_EXECUTABLE="$PYBIN/python"
sudo "${PYBIN}/pip" install -U pip setuptools wheel==0.31.1 setuptools-rust auditwheel
"${PYBIN}/python" setup.py bdist_wheel
done
mkdir wheelhouse
export PATH="/opt/rh/rh-python36/root/usr/bin:$PATH"
for whl in dist/*.whl; do
auditwheel repair "$whl" -w wheelhouse/
done
| true |
08935612cadb299acd3be4f4f3b651cfc422d645 | Shell | hashbang/aosp-build | /scripts/build | UTF-8 | 2,112 | 3.421875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | #!/bin/bash
[ -f /.dockerenv ] || { echo "please run in supplied container"; exit 1; }
set -e; eval "$(environment)"
device="${DEVICE?}"
base_dir="${BASE_DIR?}"
build_type="${BUILD_TYPE?}"
build_id="${BUILD_ID?}"
build_variant="${BUILD_VARIANT?}"
external_dir="${EXTERNAL_DIR?}"
patch_dir="${PATCH_DIR?}"
build_id="${BUILD_ID?}"
platform_patches="${PLATFORM_PATCHES?}"
public_key_dir="${PUBLIC_KEY_DIR?}"
function apply_patches(){
cd "${base_dir}"
local fdroid_org="packages/apps/F-DroidPrivilegedExtension/app/src/main/java/org"
local fdroid_whitelist="${fdroid_org}/fdroid/fdroid/privileged/ClientWhitelist.java"
test -r "${public_key_dir}/platform.x509.pem" || { echo "${public_key_dir}/platform.x509.pem not readable" 1>&2; exit 1; }
local platform_key_hash; platform_key_hash=$( \
openssl x509 -in "${public_key_dir}/platform.x509.pem" -outform DER \
| sha256sum | cut -c1-64; \
)
echo "Platform Key Hash: ${platform_key_hash}"
if [ -f "${base_dir}/${fdroid_whitelist}" ]; then
echo "patching file ${fdroid_whitelist}"
sed -i \
"s/[a-f0-9]\\{64\\}/${platform_key_hash}/g" \
"${base_dir}/${fdroid_whitelist}"
fi
for patch in ${platform_patches//,/ }; do
echo "Applying patch: $patch_dir/${patch}"
patch -p1 --no-backup-if-mismatch < "${patch_dir}/${patch}"
done
cd -
}
function build_external(){
local vendor_dist="${external_dir}/vendor/out/${device}/${build_id,,}/vendor"
local vendor_dest="${base_dir}/vendor/"
[ -d "${vendor_dist}" ] || build-vendor
cp -R "${vendor_dist}/." "${vendor_dest}"
build-kernel
}
function build_platform(){
cd "$base_dir"
unset BUILD_ID
choosecombo "${build_type}" "aosp_${device}" "${build_variant}"
# Not reliable on Android 11?:
# (cd "$base_dir/external/icu/tools" && ./updatecldrdata.py && ./updateicudata.py)
# (cd "$base_dir/external/icu/tools/srcgen" && ./generate_android_icu4j.sh)
clear
if [ "$build_variant" == "user" ]; then
(set -o xtrace; m target-files-package brillo_update_payload otatools-package)
else
(set -o xtrace; m)
fi
cd -
}
function main(){
apply_patches
build_external
build_platform
}; main
| true |
baa48a77bd24a85a2b622233ec398f5d1a1b5a51 | Shell | tusind/Bash-Cookbook | /chapter_03/07_data-maker.sh | UTF-8 | 3,166 | 4.3125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
N_FILES=3
TYPE=binary
DIRECTORY="qa-data"
NAME="garbage"
EXT=".bin"
UNIT="M"
RANDOM=$$
TMP_FILE="/tmp/tmp.datamaker.sh"
## replace "qa-data for your own directory"
if [ ! -d ./qa-data ]; then
mkdir -v qa-data;
fi
function get_random_number() {
SEED=$(($(date +%s%N)/100000))
RANDOM=$SEED
# Sleep is needed to make sure that the next time rnadom is ran, everything is good.
sleep 3
local STEP=$1
local VARIANCE=$2
local UPPER=$3
local LOWER=$VARIANCE
local ARR;
INC=0
for N in $( seq "${LOWER}" "${STEP}" "${UPPER}" );
do
ARR[$INC]=$N
INC=$(("$INC+1"))
done
RAND=$(("$RANDOM" % ${#ARR[@]}))
echo $RAND
}
function create_binary_files(){
EXT=".bin"
local NUM_FILES=$1
local LOWER=$2
local UPPER=$3
local COUNTER=0
while [ $COUNTER -lt "${NUM_FILES}" ]; do
R_SIZE=$(get_random_number 1 "${LOWER}" "${UPPER}")
echo "Creating file... please wait..."
dd if=/dev/zero of="${DIRECTORY}/${NAME}${COUNTER}${EXT}" bs="${R_SIZE}${UNIT}" count=1 2> /dev/null
((COUNTER=COUNTER+1))
done
}
function create_text_files() {
EXT=".txt"
local NUM_FILES=$1
local VARIANCE=$2
local MIDDLE=$(($3 / 4))
local UPPER=$3
local LOWER=$(("$MIDDLE" - "$VARIANCE"))
if [ $LOWER -lt 0 ]; then
LOWER=$(("$LOWER" * -1))
LOWER=$(("$LOWER" + 1))
fi
local TRUE=0
local COUNTER=0
while [ $COUNTER -lt "${NUM_FILES}" ]; do
END=$(get_random_number 1 "${VARIANCE}" "${UPPER}")
START=$(get_random_number 1 "${VARIANCE}" "${LOWER}")
TRUE=0
while [ $TRUE -eq 0 ]; do
if [ "$END" -gt "$START" ]; then
TRUE=1
else
echo "Generating random values... please wait..."
END=$(get_random_number 1 "${VARIANCE}" "${UPPER}")
continue
fi
done
echo "Creating file... please wait..."
dd if="${TMP_FILE}" of="${DIRECTORY}/${NAME}${COUNTER}${EXT}" seek="${START}" bs=1 count=$(("${END}" -"${START}")) 2> /dev/null
((COUNTER=COUNTER+1))
done
}
# Add some extra fun to the script
OPT_ERROR=0
while getopts ":t:n:l:u:" opt; do
case $opt in
t)
TYPE="$OPTARG"
if [[ "${TYPE}" != "binary" && "${TYPE}" != "text" ]]; then
echo "ERROR: -t must be set to line or size"
OPT_ERROR+=1
fi
;;
n)
N_FILES="$OPTARG"
if [ "$N_FILES" -le 0 ]; then
echo "ERROR: -l must be greater than 0"
OPT_ERROR+=1
fi
;;
l)
LOWER="$OPTARG"
if [ "$LOWER" -le 0 ]; then
echo "ERROR: -l must be greater than 0"
OPT_ERROR+=1
fi
;;
u)
UPPER="$OPTARG"
if [ "$UPPER" -le 0 ]; then
echo "ERROR: -l must be greater than 0"
OPT_ERROR+=1
fi
;;
\?)
echo "Invalid option: -$OPTARG" >&2
OPT_ERROR+=1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
OPT_ERROR+=1
;;
esac
done
# Exit if we have any errors, otherwise continue to run the splitting
# functionality :)
if [ ${OPT_ERROR} -ne 0 ]; then
exit 1
fi
case "$TYPE" in
binary)
create_binary_files "$N_FILES" "$LOWER" "$UPPER"
;;
text)
create_text_files "$N_FILES" "$LOWER" "$UPPER"
;;
:)
echo "Unknown type of operaiton"
;;
esac
echo "DONE!"
exit 0
| true |
e1d76f4736f15baef5e161f95975ef3172840115 | Shell | dmetzger57/bin | /per_worker_smem | UTF-8 | 2,899 | 3.84375 | 4 | [] | no_license | #!/bin/bash
###
# Processes smem(1) output, converts to CSV for analysis.
###
###
# Source Data File Format:
#
# NAME, PID, PSS, RSS, USS, VSS, SWAP
#
# Which is the output format of the get_smem data collection tool. Typically
# we run get_smem on a system to capture the smem(1) data for later
# analysis, typically via per_worker_pss.
###
if [ $# -eq 1 ]
then
VERSION=$1
else
VERSION=`cat ../VERSION`
if [ "${VERSION}" == "master" ]
then
echo -e "Version: \c"
read VERSION
fi
fi
TMP_SMEM_CSV="../tmp_smem.csv"
SMEM_CSV="../smem.csv"
PROCESS_NAME_PIDS="../process_names_pids.txt"
WORKERS_ROOT="../../workers"
if [ ! -d ${WORKERS_ROOT} ]
then
mkdir -p ${WORKERS_ROOT}
fi
rm -f ${TMP_SMEM_CSV} ${SMEM_CSV} ${PROCESS_NAME_PIDS}
cat smem.201* | grep -v "NAME, PID" | cut -d',' -f1,2 | sort -k2 -t"," | uniq | sort -f | sed 's/,//' > ${PROCESS_NAME_PIDS}
echo -e "Date, Time, Elapsed, \c" >${SMEM_CSV}
cat ${PROCESS_NAME_PIDS} | while read N P
do
ShortName=`echo ${N} | sed -e 's/InfraManager:://' -e 's/Openstack::NetworkManager/OSNM/' -e 's/Openstack::CloudManager/OSCM/' -e 's/StorageManager::CinderManager/SMCM/' -e 's/StorageManager::SwiftManager/SMSM/'`
echo -e "${ShortName}-${P},\c" >>${SMEM_CSV}
echo "${VERSION}" >${WORKERS_ROOT}/${N}-${P}-pss-${VERSION}.csv
echo "${VERSION}" >${WORKERS_ROOT}/${N}-${P}-rss-${VERSION}.csv
echo "${VERSION}" >${WORKERS_ROOT}/${N}-${P}-uss-${VERSION}.csv
echo "${VERSION}" >${WORKERS_ROOT}/${N}-${P}-vss-${VERSION}.csv
done
echo "" >>${SMEM_CSV}
Elapsed=0
ls smem* | cut -d'.' -f2 | cut -d' ' -f1 | sort | while read T
do
echo "${T}"
TIME=`echo ${T} | cut -d'_' -f2`
HOUR=`echo ${TIME} | cut -c1-2`
MINUTE=`echo ${TIME} | cut -c3-4`
SECOND=`echo ${TIME} | cut -c5-6`
DATE=`echo ${T} | cut -d'_' -f1`
YEAR=`echo ${DATE} | cut -c1-4`
MONTH=`echo ${DATE} | cut -c5-6`
DAY=`echo ${DATE} | cut -c7-8`
echo -e "${YEAR}/${MONTH}/${DAY},${HOUR}:${MINUTE}:${SECOND},${Elapsed},\c" >>${TMP_SMEM_CSV}
cat ${PROCESS_NAME_PIDS} | while read N P
do
PSS=`grep " ${P}," smem.${T} | sed -e 's/ //g' | cut -d',' -f3`
if [ $? -eq 0 ]
then
RSS=`grep " ${P}," smem.${T} | sed -e 's/ //g' | cut -d',' -f4`
USS=`grep " ${P}," smem.${T} | sed -e 's/ //g' | cut -d',' -f5`
VSS=`grep " ${P}," smem.${T} | sed -e 's/ //g' | cut -d',' -f6`
else
PSS="0"
RSS="0"
USS="0"
VSS="0"
fi
echo -e "${PSS},\c" >>${TMP_SMEM_CSV}
echo "${PSS}" >>${WORKERS_ROOT}/${N}-${P}-pss-${VERSION}.csv
echo "${RSS}" >>${WORKERS_ROOT}/${N}-${P}-rss-${VERSION}.csv
echo "${USS}" >>${WORKERS_ROOT}/${N}-${P}-uss-${VERSION}.csv
echo "${VSS}" >>${WORKERS_ROOT}/${N}-${P}-vss-${VERSION}.csv
done
echo "" >>${TMP_SMEM_CSV}
Elapsed=$((Elapsed + 1))
done
cat ${TMP_SMEM_CSV} | sed -e 's/,,/,0,/g' -e 's/,,/,0,/g' >>${SMEM_CSV}
rm ${TMP_SMEM_CSV}
exit 0
| true |
33ccf940d85af09a5ced9c8dfd4fbc07874cb936 | Shell | cloudwan/gohan | /packager/debian/postinst | UTF-8 | 1,158 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
# summary of how this script can be called:
# * <postinst> `configure' <most-recently-configured-version>
# * <old-postinst> `abort-upgrade' <new version>
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
# <new-version>
# * <postinst> `abort-remove'
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
# <failed-install-package> <version> `removing'
# <conflicting-package> <version>
# for details, see http://www.debian.org/doc/debian-policy/ or
# the debian-policy package
APP_NAME="gohan"
CLI="${APP_NAME}"
APP_USER="gohan"
APP_GROUP="gohan"
APP_ETC="/etc/${APP_NAME}"
APP_CONFIG="${APP_ETC}/gohan.yaml"
APP_OPT="/opt/gohan"
# source debconf library
. /usr/share/debconf/confmodule
case "$1" in
configure)
mkdir -p ${APP_ETC}
chown ${APP_USER}.${APP_GROUP} ${APP_ETC}
[ -f ${APP_CONFIG} ] || cp -R /opt/gohan/etc/* ${APP_ETC}
cp ${APP_OPT}/packager/gohan.conf /etc/init
;;
abort-upgrade|abort-remove|abort-deconfigure)
exit 0
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac | true |
d1340b2a7988a71fb77e6f310faf02bbf4a6ba0a | Shell | nelsnelson/stardate | /install.sh | UTF-8 | 711 | 3.578125 | 4 | [
"MIT"
] | permissive | #! /usr/bin/env sh
stardate_repository='https://gitlab.com/nelsnelson/stardate.git';
target_directory="${HOME}/.stardate";
server='stardate.py';
wd=`pwd`;
python=`which python`;
git=`which git`;
if [ ! -f ${python} ]; then
echo 'fatal: python is not installed';
exit 1;
fi
if [ ! -f ${git} ]; then
echo 'fatal: git is not installed';
exit 1;
fi
if [ -d ${target_directory} ]; then
pushd ${target_directory} &>/dev/null;
${git} fetch origin
${git} reset --hard origin/master;
popd &>/dev/null;
else
${git} clone ${stardate_repository} ${target_directory};
fi
# TODO Install stardate.sh as a systemd service
`${python} ${target_directory}/${server} --project-path ${wd}`;
| true |
2ae87498180c069aeed6f267a6ff6b9f19ae461e | Shell | raghunathan-r/UNIX_Lab | /Part_A/1_File_and_directory_handing_commands/1_cat_command_with_attributes.sh | UTF-8 | 560 | 3.125 | 3 | [] | no_license | #!bin/sh
echo "\n\nCreating file catExample1.txt with contents :-\nHello friend\n\nNice to meet you\n"
#cat catExample1.txt >> "\nHello friend\n\nNice to meet you\n"
echo "\n\nCreating file catExample2.txt with contents :- \nThis is a follow up text file content\n"
echo "\n=> Displaying cat catExample* command :-\n"
cat catExample*
echo "\n=> Diplaying cat -n catExample* command :-\n"
cat -n catExample*
echo "\n=> Diplaying cat -b catExample* command :-\n"
cat -b catExample*
echo "\n=> Diplaying cat -e catExample* command :-\n"
cat -e catExample* | true |
c4a15992e3c3324aee7da53964ad52e9a1af2db0 | Shell | Cate-Michalska/csc221 | /hw3/sort_files.sh | UTF-8 | 415 | 3.09375 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/bash
cd ~/Documents/Github/csc221/hw3/
words=(
dawdy
loudness
reallots
salopette
special
specificity
submultiple
swum
unprejudicated
vivificate
)
for i in "${words[@]}"
do
mkdir $i
echo made $i
done
files=(part1/*)
for i in "${words[@]}"
do
for j in "${files[@]}"
do
if grep -Fxq "$i" $j
then
mv $j $i
echo moved $j to $i
else
echo no $i in $j
fi
done
done
| true |
8d866e5e7ca481c807b30da19f7f85e3d3ee76a5 | Shell | bluethundr/bash_scripts | /get_memory.sh | UTF-8 | 140 | 2.71875 | 3 | [] | no_license | #!/bin/bash
for h in $(awk '{print $1}' hosts); do printf "%-24s %s %s %s\n" "$h" $(timeout 4 ssh -q $h grep MemTotal /proc/meminfo); done
| true |
a3c48b31081a35dc6baadd99955af593b2522a7b | Shell | hmasmoudi/SyphaxOS | /Default/0003-SyphaxOSGnome3/001_BuildPackagesScripts/0128-upower/PKGBUILD | UTF-8 | 728 | 2.578125 | 3 | [] | no_license | # Maintainer: Hatem Masmoudi <hatem.masmoudi@gmail.com>
pkgname=upower
pkgver=0.99.7
pkgrel=2
pkgdesc="The UPower package provides an interface to enumerating power devices, listening to device events and querying history and statistics."
arch=('x86_64')
url="http://upower.freedesktop.org/releases"
license=('GPL')
groups=('desktop')
source=("$url/${pkgname}-${pkgver}.tar.xz")
md5sums=('SKIP')
depends=('rootfs')
build() {
cd "$srcdir/${pkgname}-${pkgver}"
./configure --prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
--enable-deprecated \
--disable-static
make
}
package() {
cd "$srcdir/${pkgname}-${pkgver}"
make DESTDIR="$pkgdir" install
}
| true |
f3d12b8b830cc6a311e2ce16e4cda23a8939d735 | Shell | andysim/helpme | /tools/check_style.sh | UTF-8 | 2,516 | 4.3125 | 4 | [
"BSD-3-Clause",
"Intel",
"GPL-1.0-or-later",
"BSL-1.0",
"LicenseRef-scancode-free-unknown"
] | permissive | #!/bin/bash
# BEGINLICENSE
#
# This file is part of helPME, which is distributed under the BSD 3-clause license,
# as described in the LICENSE file in the top level directory of this project.
#
# Author: Andrew C. Simmonett
#
# ENDLICENSE
# This script will check a predefined list of C++ source and header files, using clang-format.
# By default, the clang-format in the path will be used. To use a version from a different
# location, simply set the environmental variable CLANG_FORMAT_DIR before calling this, e.g.
#
# export CLANG_FORMAT_DIR=/usr/local/bin/
#
# with the trailing forward slash present. The return code is set to zero if no errors are
# found, else 1, so this script can be used in CI setups to automatically check formatting.
CLANGFORMAT=${CLANG_FORMAT_DIR}clang-format
# Make sure we're in the top level directory
cd `dirname $0`
cd ..
declare -a extensions=("*.h" "*.cc" "*.cpp" "*.hpp")
declare -a directories=("src" "test" "test/unittests")
# Make a temporary file, and ensure it's nuked even if interrupted
tmpfile=$(mktemp)
trap 'rm -f -- "$tmpfile"' INT TERM HUP EXIT
shopt -s nullglob
if [[ "$1" == "--fix" ]]
then
echo "Fixing C++ file formatting..."
else
echo "Checking C++ file formatting..."
fi
returncode=0
for dir in "${directories[@]}"
do
for ext in "${extensions[@]}"
do
for file in ${dir}/${ext}
do
$CLANGFORMAT --style=file $file > "$tmpfile"
diff $file "$tmpfile" > /dev/null
if [ $? -ne 0 ]
then
if [[ "$1" == "--fix" ]]
then
echo "${CLANGFORMAT} --style=file -i $file"
${CLANGFORMAT} --style=file -i $file
else
returncode=1
echo
echo "****************************************************"
echo "Formatting problem detected. Run"
echo
echo "${CLANGFORMAT} --style=file -i $file"
echo
echo "from the top directory, or apply the following diff:"
echo "----------------------------------------------------"
diff $file "$tmpfile"
echo "****************************************************"
echo
echo
fi
fi
done
done
done
if [ $returncode -eq 0 ]
then
echo "C++ file formats are good!"
fi
exit $returncode
| true |
8e5aad6f53de240f2a0f875b4de05b3fe73be471 | Shell | Ali1729/githooks | /tests/step-088.sh | UTF-8 | 862 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Test:
# Cli tool: manage single install configuration
if ! sh /var/lib/githooks/install.sh; then
echo "! Failed to execute the install script"
exit 1
fi
# Single install configuration
mkdir -p /tmp/test088 && cd /tmp/test088 || exit 2
! sh /var/lib/githooks/cli.sh config set single || exit 3
git init || exit 4
! sh /var/lib/githooks/cli.sh config unknown single || exit 5
sh /var/lib/githooks/cli.sh config set single &&
sh /var/lib/githooks/cli.sh config print single | grep -v 'NOT' || exit 6
sh /var/lib/githooks/cli.sh config reset single &&
sh /var/lib/githooks/cli.sh config print single | grep 'NOT' || exit 7
# Check the Git alias
git hooks config set single &&
git hooks config print single | grep -v 'NOT' || exit 10
git hooks config reset single &&
git hooks config print single | grep 'NOT' || exit 11
| true |
eb4b780198298d1ec21c3edaf21dc082f558e775 | Shell | raminou/mgrep | /mgrep.sh | UTF-8 | 2,796 | 4.28125 | 4 | [] | no_license | #!/bin/bash
args=($0 "$@")
PROGNAME=$(basename ${args[0]})
PATTERN=""
EXCLUDE_FILE_ARRAY=()
EXCLUDE_STRING=""
INCLUDE_FILE_ARRAY=()
INCLUDE_STRING=""
EXCLUDE=0
CASE=1
BIN=1
usage()
{
echo "${PROGNAME} PATTERN [-e|--exclude PARAMS ...] [-i|--include PARAMS ...] [-h|--help] [-v|--version] [--case] [--nobin]"
echo -e "\t--exclude to exclude some filename pattern"
echo -e "\t--include to accept only some filename patter"
echo -e "\t--help to display this help"
echo -e "\t--version to display the version"
echo -e "\t--case to be case sensitive"
echo -e "\t--nobin to refuse binary file"
echo
echo "Display the list of file which contains the PATTERN"
echo ""
}
if [[ ${#args[*]} -lt 2 ]] ; then
echo "Provide a PATTERN"
usage
exit 1
fi
LENGTH=$((${#args[*]} - 1))
for i in $(seq 1 $LENGTH) ; do
if [[ ${args[$i]} != "" ]] ; then
case ${args[$i]} in
-h|--help) usage
exit 0;;
-v|--version) echo "1"
exit 0;;
-e|--exclude)
EXCLUDE=1
INCLUDE=0;;
-i|--include)
INCLUDE=1
EXCLUDE=0;;
--case)
CASE=0;;
--nobin)
BIN=0;;
*)
if [[ $EXCLUDE -eq 1 ]] ; then
EXCLUDE_FILE_ARRAY+=(${args[$i]})
EXCLUDE_STRING+="| grep -v \"${args[$i]}\""
elif [[ $INCLUDE -eq 1 ]] ; then
INCLUDE_FILE_ARRAY+=(${args[$i]})
INCLUDE_STRING+="| grep \"${args[$i]}\""
else
if [[ ${PATTERN} = "" ]] ; then
PATTERN=${args[$i]}
else
echo "PATTERN already given"
exit 1
fi
fi
;;
esac
fi
done
GREP_OPT="-Rl"
if [[ ${CASE} -eq "1" ]] ; then
GREP_OPT+="i"
fi
if [[ ${BIN} -eq "0" ]] ; then
GREP_OPT+="I"
fi
for f in $(grep ${GREP_OPT} "${PATTERN}" $PWD) ; do
valid=1
for exclude_f in ${EXCLUDE_FILE_ARRAY[@]} ; do
res=$(echo $f | grep -v "${exclude_f}")
status=$?
if [[ "$status" -ne 0 ]] ; then
valid=0
break
fi
done
for include_f in ${INCLUDE_FILE_ARRAY[@]} ; do
res=$(echo $f | grep ${include_f})
if [[ $? -ne 0 ]] ; then
valid=0
break
fi
done
if [[ ${valid} -eq 1 ]] ; then
echo -e "\033[1m$f\033[0m"
if [[ ${CASE} -eq "1" ]] ; then
grep -i -n --color=ALWAYS "${PATTERN}" "$f"
else
grep -n --color=ALWAYS "${PATTERN}" "$f"
fi
echo
fi
done
| true |
e2343135e09dfe1d91d59449ac1ff28fb09da25d | Shell | shun8/local_envs | /vagrant/app/recursive_import_sqlserver.sh | UTF-8 | 1,232 | 4.09375 | 4 | [] | no_license | #!/bin/bash
function usage {
cat <<EOM
Usage: $(basename "$0") [OPTION]...
-h Display help
-j Config JSON <string> Config JSON file path (Default ~/sqlserver_config.json)
-m Month <string> yyyymm (Default Last month)
EOM
exit 2
}
# 自分自身のディレクトリを取得
script_dir=$(cd $(dirname ${0}) && pwd)
# デフォルト値設定
json_file=~/sqlserver_config.json
yyyymm=$(date -d "$(date +'%Y-%m-01') 1 month ago" +'%Y%m')
# 引数の処理
while getopts ":j:m:h" OPTKEY; do
case ${OPTKEY} in
j)
# 絶対パスに変換
json_file=$(cd $(dirname ${OPTARG}) && pwd)/$(basename ${OPTARG})
;;
m)
yyyymm=${OPTARG}
;;
'-h'|'--help'|* )
usage
;;
esac
done
len=$(jq length ${json_file})
for i in $( seq 0 $(($len - 1)) ); do
file_path=$(jq -r .[$i].file_path ${json_file})
table=$(jq -r .[$i].table ${json_file})
vars=$(jq -c -r .[$i].vars ${json_file})
if [ "${vars}" != "null" ] ; then
vars_op=" -v "$(echo ${vars} | tr -d " ")
else
vars_op=""
fi
${script_dir}/import_from_sqlserver.sh -s ${file_path} -t ${table} -m ${yyyymm}${vars_op}
result=$?
if [ ${result} -ne 0 ] ; then
exit ${result}
fi
done
exit 0
| true |
d207b740c4c9da7e5f326d116c20308ee70b36da | Shell | shikarukitake/Corewar | /assembler/help.sh | UTF-8 | 1,408 | 3.46875 | 3 | [] | no_license | #!/bin/sh
SUBJECT_ASM=subject_resources/asm
MY_ASM=./asm
VALGRIND_MY_ASM="valgrind --log-file="valgrindout" ./asm"
if [ $# -eq 1 ]
then
SUBSTR=$(echo "$1" | cut -d'.' -f 1)
NAME=${SUBSTR}.cor
NAME_COPY=${SUBSTR}_copy.cor
make
echo "\033[1;34mMY ASM:\033[0m"
$MY_ASM "$1"
if [ $? -eq 0 ]; then
cp "$NAME" "$NAME_COPY"
echo "\033[1;34mSUBJECT ASM:\033[0m"
$SUBJECT_ASM "$1"
echo "\033[1;34mDIFF:\033[0m"
diff $NAME $NAME_COPY
xxd $NAME > bin_temp1
xxd $NAME_COPY > bin_temp2
diff bin_temp1 bin_temp2
rm bin_temp1 bin_temp2
else
echo "\033[1;34mSUBJECT ASM:\033[0m"
$SUBJECT_ASM "$1"
fi
elif [ $# -eq 2 ]
then
if [ "$2" = "-v" ]
then
SUBSTR=$(echo "$1" | cut -d'.' -f 1)
NAME=${SUBSTR}.cor
NAME_COPY=${SUBSTR}_copy.cor
make
echo "\033[1;34mMY ASM:\033[0m"
$VALGRIND_MY_ASM "$1"
echo valgrindout | grep lost
rm valgrindout
if [ $? -eq 0 ]; then
cp "$NAME" "$NAME_COPY"
echo "\033[1;34mSUBJECT ASM:\033[0m"
$SUBJECT_ASM "$1"
echo "\033[1;34mDIFF:\033[0m"
diff $NAME $NAME_COPY
xxd $NAME > bin_temp1
xxd $NAME_COPY > bin_temp2
diff bin_temp1 bin_temp2
rm bin_temp1 bin_temp2
else
echo "\033[1;34mSUBJECT ASM:\033[0m"
$SUBJECT_ASM "$1"
fi
else
echo "USAGE: ./help.sh map [-v]"
fi
else
echo "USAGE: ./help.sh map [-v]"
fi | true |
8d6f4919c12a6500d5c2b976cc1e2c77de7aa903 | Shell | WeijiaChe/quorum-aws | /terraform/scripts/install/start-quorum.sh | UTF-8 | 670 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -euo pipefail
gid=$(cat node-id)
p2p_port=$((21000 + $gid))
rpc_port=$((22000 + $gid))
raft_port=$((50400 + $gid))
echo "starting geth ${gid}"
ARGS="--rpcapi admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum --emitcheckpoints"
sudo docker run -d -p $p2p_port:$p2p_port -p $rpc_port:$rpc_port -p $raft_port:$raft_port -v /home/ubuntu/datadir:/datadir -v /home/ubuntu/password:/password -e PRIVATE_CONFIG='/datadir/constellation.toml' quorum --datadir /datadir $ARGS --port $p2p_port --rpcport $rpc_port --raftport $raft_port --verbosity 3 --nodiscover --rpc --rpccorsdomain "'*'" --rpcaddr '0.0.0.0' --raft --unlock 0 --password /password
| true |
f44be7fcdbbe6a682c5e733d7c3e53f3c578d17e | Shell | micjagga/Dotfiles | /zsh/more_pref.zsh | UTF-8 | 1,303 | 3.328125 | 3 | [
"ISC",
"LicenseRef-scancode-mit-taylor-variant"
] | permissive | export GREP_COLOR='1;33'
#Load themes from yadr and from user's custom prompts (themes) in ~/.zsh.prompts
autoload promptinit
fpath=($HOME/.dotfiles/zsh/themes $HOME/.home/.zsh.prompts $fpath)
promptinit
#Fasd
# only init if installed.
fasd_cache="$HOME/.fasd-init-bash"
if [ "$(command -v fasd)" -nt "$fasd_cache" -o ! -s "$fasd_cache" ]; then
eval "$(fasd --init posix-alias zsh-hook zsh-ccomp zsh-ccomp-install zsh-wcomp zsh-wcomp-install)" >| "$fasd_cache"
fi
source "$fasd_cache"
unset fasd_cache
# jump to recently used items
alias a='fasd -a' # any
alias s='fasd -si' # show / search / select
alias d='fasd -d' # directory
alias f='fasd -f' # file
alias z='fasd_cd -d' # cd, same functionality as j in autojump
alias zz='fasd_cd -d -i' # interactive directory jump
# Use Ctrl-x,Ctrl-l to get the output of the last command
zmodload -i zsh/parameter
insert-last-command-output() {
LBUFFER+="$(eval $history[$((HISTCMD-1))])"
}
zle -N insert-last-command-output
bindkey "^X^L" insert-last-command-output
# Override rm -i alias which makes rm prompt for every action
alias rm='nocorrect rm'
# Use zmv, which is amazing
autoload -U zmv
alias zmv="noglob zmv -W"
# Functions
#
# (f)ind by (n)ame
# usage: fn foo
# to find all files containing 'foo' in the name
function fn() { ls **/*$1* }
| true |
c1769f2b8264d774a5092a06260f74bfbcb43556 | Shell | pazeshun/dotfiles-1 | /bashrc | UTF-8 | 2,599 | 3.265625 | 3 | [] | no_license | # vim: set ft=sh:
OS=$(uname)
if [ "$OS" = "Darwin" ]; then
if [ -f $(brew --prefix)/etc/bash_completion ]; then
. $(brew --prefix)/etc/bash_completion
fi
fi
export PATH=$HOME/.local/bin:$PATH
type wstool_cd.sh &>/dev/null && source `which wstool_cd.sh`
type pycd.sh &>/dev/null && source `which pycd.sh`
# encoding
export LC_CTYPE='en_US.UTF-8'
# terminal color
export TERM=xterm-256color
# prompt setup
parse_branch() {
local branch
branch=`git branch 2>/dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'`
[ "$branch" != "" ] && printf "\e[0m on \e[0;35m$branch\e[0m"
}
show_venv() {
if [ ! -z $CONDA_PREFIX ]; then
printf " workon \e[0;34mconda:$CONDA_DEFAULT_ENV\e[0m"
fi
}
PS1='${debian_chroot:+($debian_chroot)}\e[0;35m\u\e[0m at \e[0;33m\h\e[0m in \e[0;32m\w\e[0m tm \e[0;37m$(date +%H:%M)$(parse_branch)\e[0m$(show_venv)\n$ '
plugins=(
$HOME/.sh/plugins/browse.sh
$HOME/.sh/plugins/git.sh
$HOME/.sh/plugins/ros.sh
)
for plugin in ${plugins[@]}; do
source $plugin
done
# -------------------------------
# alias
# -------------------------------
# source common aliases
source $HOME/.sh/rc/alias.sh
source $HOME/.bash/rc/alias.sh
# google command
google () {
search=""
for term in $@; do
search="$search%20$term"
done
open "http://www.google.com/search?q=$search"
}
#------------------------------------------------
# alias
# -----------------------------------------------
# Basics
alias h=history
alias md='mkdir -p'
alias rd=rmdir
alias d='dirs -v | head -10'
# cd aliases
- () {
cd -
}
alias ..='cd ..'
alias ...='cd ../..'
alias cd..='cd ..'
alias cd...='cd ../..'
alias cd....='cd ../../..'
alias cd.....='cd ../../../..'
cd () {
if [[ "x$*" = "x..." ]]
then
cd ../..
elif [[ "x$*" = "x...." ]]
then
cd ../../..
elif [[ "x$*" = "x....." ]]
then
cd ../../../..
elif [[ "x$*" = "x......" ]]
then
cd ../../../../..
elif [ -d ~/.autoenv ]
then
source ~/.autoenv/activate.sh
autoenv_cd "$@"
else
builtin cd "$@"
fi
}
# git aliases
git_current_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'
}
alias current_branch=git_current_branch
export VIM_COLORSCHEME='default'
export EDITOR='vim'
export LESS='--tabs=4 --LONG-PROMPT --ignore-case --RAW-CONTROL-CHARS'
# history search bindkey
_replace_by_history() {
local l=$(HISTTIMEFORMAT= history | tac | sed -e 's/^\s*[0-9]\+\s\+//' | percol --query "$READLINE_LINE")
READLINE_LINE="$l"
READLINE_POINT=${#l}
}
bind -x '"\C-r": _replace_by_history'
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
| true |
ba78c57260c813465e376b6a88276743f2ba8d5c | Shell | siaimes/docker-cache | /ssl/get_ssl.sh | UTF-8 | 586 | 2.9375 | 3 | [] | no_license | FileName=$1
IsDomain=$2
openssl genrsa -des3 -out ${FileName}.key 4096
openssl req -new -subj /C=US/ST=Washington/CN=${FileName} -key ${FileName}.key -out ${FileName}.csr
mv ${FileName}.key ${FileName}.origin.key
openssl rsa -in ${FileName}.origin.key -out ${FileName}.key
if [ ${IsDomain} -eq 1 ]; then
openssl x509 -req -days 3650 -in ${FileName}.csr -signkey ${FileName}.key -out ${FileName}.crt
else
echo subjectAltName = IP:${FileName} > extfile.cnf
openssl x509 -req -days 3650 -in ${FileName}.csr -signkey ${FileName}.key -extfile extfile.cnf -out ${FileName}.crt
fi
| true |
f04cc0359ea76d409d7f7bf89864e1fa4fa7a87d | Shell | Juici/dotfiles | /zsh/.zsh/zi/plugins/_local---functions/functions/jump | UTF-8 | 3,424 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env zsh
# -*- mode: sh; sh-indentation: 4; indent-tabs-mode: nil; sh-basic-offset: 4; -*-
# Set the base and typically useful options
emulate -LR zsh
setopt extendedglob warncreateglobal typesetsilent noshortloops rcquotes
# Run as script? ZSH_SCRIPT is a Zsh 5.3 addition
if [[ $0 != jump || -n $ZSH_SCRIPT ]]; then
# Handle $0 according to the Zsh Plugin Standard:
# http://zdharma.org/Zsh-100-Commits-Club/Zsh-Plugin-Standard.html
0=${${ZERO:-${0:#$ZSH_ARGZERO}}:-${(%):-%N}}
0=${${(M)0##/*}:-$PWD/$0}
# Such global variable is expected to be typeset'd -g in the plugin.zsh
# file. Here it's restored in case of the function being run as a script.
typeset -gA Plugins
Plugins[FUNCTIONS_DIR]=${0:h}
print -P '%F{yellow}warning%f: jump cannot change directory when run as a script' >&2
fi
# The jump points file specifies directories and patterns for directories that
# can be jumped to.
.get_jump_points() {
local jump_points_file="${1:-"$HOME/.jump_points"}"
local -a jump_points
if [[ -r "$jump_points_file" ]]; then
jump_points=( ${(@Z:Cn:)"$(<"$jump_points_file")"} )
fi
print -r -- "${(@q)jump_points}"
}
jump() {
emulate -L zsh
setopt extendedglob warncreateglobal typesetsilent noshortloops rcquotes
setopt autopushd nonomatch
local -A opts
zparseopts -D -E -M -A opts - l=-list -list e::=-edit -edit::
if (( ${#opts} > 1 )); then
print -r "error: conflicting arguments: ${(kj:, :)opts}"
return 1
fi
# Flags.
integer list=${+opts[--list]}
integer edit=${+opts[--edit]}
local jump_points_file="$HOME/.jump_points"
if (( edit )); then
${EDITOR:-vim} "$jump_points_file"
return 0
fi
# Shift arguments if first argument is '-' or '--'.
if [[ "$1" = -(#c1,2) ]]; then
shift
fi
# Target directory.
local target="$1"
# If target is empty and --list flag not enabled, error.
if [[ -z "$target" ]] && (( ! list )); then
print 'error: no jump target' >&2
return 1
fi
local -A targets
# Add directories at jump points.
local jump_point
for jump_point in ${(@Q)${(@z)"$(.get_jump_points "$jump_points_file")"}}; do
local dir
for dir in ${~jump_point}(N/); do
targets+=( "${dir:t}" "$dir" )
done
done
# Is the --list flag enabled.
if (( list )); then
local -a lines
# List of lines to print.
local k v
for k v in "${(@kv)targets}"; do
lines+=( "${k}=${v}" )
done
(( ${#lines} > 0 )) && print -r -- "${(F)lines}"
return 0
fi
local resolved="${(QQ)targets[$target]}"
# If the resolved path is empty, try searching case-insensitively.
if [[ -z "$resolved" ]]; then
resolved="${(vQQ)targets[(i)(#i)$target]}"
fi
# Check that a resolved target was found and that target isn't a directory.
if [[ -z "$resolved" ]]; then
print -r "error: could not find target: $target"
return 1
elif [[ ! -d "$resolved" ]]; then
print -r "error: target '$target' is not a directory: $resolved"
return 1
fi
cd -- "$resolved"
}
jump "$@"
# Use alternate marks [[[ and ]]] as the original ones can confuse nested
# substitutions, e.g.: ${${${VAR}}}
# vim:ft=zsh:tw=80:sw=4:sts=4:et:foldmarker=[[[,]]]
| true |
869a727bbf93d86afafc4b371967fa73c325c312 | Shell | binaryage/cljs-devtools | /examples/lein/scripts/compare-advanced-builds.sh | UTF-8 | 696 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e -o pipefail
COMPILED_PATH="resources/public/_compiled"
CONDITIONAL_INSTALL_OUTPUT="$COMPILED_PATH/advanced-conditional-install/devtools_sample.js"
UNCONDITIONAL_INSTALL_OUTPUT="$COMPILED_PATH/advanced-unconditional-install/devtools_sample.js"
NO_INSTALL_OUTPUT="$COMPILED_PATH/advanced-no-install/devtools_sample.js"
function keywords {
echo " file: $1"
< "$1" perl -pe 's/(\$|\d+)\$/\1\$\n/g' | grep -o 'devtools\$.*' | sort | uniq -c
}
echo "no-install:"
keywords "$NO_INSTALL_OUTPUT"
echo ""
echo ""
echo "conditional-install:"
keywords "$CONDITIONAL_INSTALL_OUTPUT"
echo ""
echo ""
echo "unconditional-install:"
keywords "$UNCONDITIONAL_INSTALL_OUTPUT"
| true |
23a39c78d97e86c2d5c43d24f22b4f43057c3d98 | Shell | myourshaw/pypeline | /scripts/ensembl_table_reload.sh | UTF-8 | 2,133 | 3.8125 | 4 | [] | no_license | #!/bin/bash
#ensembl_table_reload.sh
#assumes databases and tables exist
USAGE="usage: $0 <ensembl current_mysql directory> <host> <user> <password> <list of database.table names>"
if [[ $1 == "?" || $1 == "-h" || $1 == "--help" || $1 == "help" ]]; then echo ${USAGE}; exit; fi
#get path and parent directory of this script
#http://hintsforums.macworld.com/archive/index.php/t-73839.html
# if the call came via symlink, then use its target instead:
arg=$0; [[ -L $0 ]] && arg=$(stat -L -c'%n' "$0");
script_path=$(2>/dev/null cd "${arg%/*}" >&2; echo "`pwd -P`/${arg##*/}");
script_dir=$(dirname "$script_path");
HERE=${script_dir};
wd=`pwd`;
if (( $# < 5 )) || [[ ! -d ${1} ]]; then echo ${USAGE}; exit; fi
ENSEMBL_DIR=${1}; shift;
HOST=${1}; shift;
USER=${1}; shift;
PASSWORD=${1}; shift;
TABLES=$@;
MYSQL="/usr/bin/mysql -h ${HOST} --user=${USER} -p${PASSWORD}";
MYSQLIMPORT="/usr/bin/mysqlimport -h ${HOST} --user=${USER} -p${PASSWORD}";
SUFFIX="`date +%Y%m%d%H%M%S`-$RANDOM"
qout=${wd}/qout_${SUFFIX};
mkdir -p ${qout};
QSUB='qsub -q all.q@compute-4*'" -cwd -V -e ${qout} -o ${qout} -l excl=true -N ensembl_table_reload_${SUFFIX}";
cmds="${qout}/ensembl_table_reload_${SUFFIX}.cmds";
if [[ -f "$cmds" ]]; then rm -f "$cmds"; fi
#truncate tables
for dt in ${TABLES}; do
db=${dt%.*};
d=${ENSEMBL_DIR}/${db};
t=${dt#*.};
if [[ -d ${d} ]] && [[ -e ${d}/${db}.sql ]]; then
echo "truncating table ${db}.${t}";
echo "TRUNCATE TABLE ${db}.${t};" | ${MYSQL};
else
echo "${d}/${db}.sql does not exist";
fi
done
echo "creating job array to import data";
#import data
for dt in ${TABLES}; do
db=${dt%.*};
d=${ENSEMBL_DIR}/${db};
t=${dt#*.};
f=${d}/${t}.txt
if [[ -d ${d} && -e ${f} ]]; then
N="${db}.`basename ${f%.txt}`";
cmd="${MYSQLIMPORT} -L --fields_escaped_by=\\ ${db} ${f}";
echo ${cmd} >> ${cmds};
else
echo "${f} does not exist";
fi
done
if [[ -f "$cmds" ]]; then
echo '$(head -n $SGE_TASK_ID' ${cmds} '| tail -n 1)' | ${QSUB} -t 1-$(cat ${cmds} | wc -l);
fi
echo "running cluster jobs to import data";
echo "job array is in ${cmds}";
echo "STDOUT and STDERR will be in ${qout}";
cd ${wd};
| true |
8efb990965ad015946b46d91fc918ea7b9534d63 | Shell | zwffff2015/shell | /full-distributed/removeHadoopEnv.sh | UTF-8 | 234 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
username="hadoop"
userdel $username
rm -rf /home/$username
chown -R root:root expectScriptLogin.sh
rm -rf /usr/local/hadoop/
# delete the last three rows
A=$(sed -n '$=' /etc/hosts)
sed -i $(($A-3+1)),${A}d /etc/hosts
| true |
8a47daf3d4ac5c58ac71c34201f4358c092e49ec | Shell | dreibh/planetlab-lxc-vsys-scripts | /root-context/exec/pfmount | UTF-8 | 399 | 3.296875 | 3 | [] | no_license | #!/bin/sh
# Mount the planetflow directory in a slice
# FIXME: if touch operation failes
if [ ! -e /etc/vservers/$1/nonamespace ]; then
touch /etc/vservers/$1/nonamespace
if [ $? -eq 0 ]; then
vserver $1 stop
vserver $1 start
fi
fi
VERSION=`uname -r | awk -F"-" '{print $1;}' | awk -F"." '{print $3}'`
DEST="/vservers/$1/pf"
mount --bind /var/local/fprobe -o ro $DEST
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.