blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0e28ccadcc0096ec1078a90f2b4e2063884e1c8b
|
Shell
|
cnyuanh/malaria_mitogenome
|
/765G/sam.sh
|
UTF-8
| 1,437
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --time=00:01:00
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --job-name=sam
#SBATCH --mem=10000
#SBATCH --output=sam.log
#Description: Subset the assembly from the putative mitochondrial reads
#Written by: Jorge Eduardo Amaya Romero
#Last revision: 14-10-2016
module purge
module load SAMtools/1.3.1-foss-2016a
module load BEDTools/2.25.0-foss-2016a
module load parallel/20160622-foss-2016a
module load Python/2.7.11-foss-2016a
module load picard/1.130-Java-1.7.0_80
module load Java/1.8.0_92
#Link the file with the recommended sampling rates. Check the how to produce this file by reading SamplingTest/README.md (Refurbish: Change to path to the directory that contains your data)
ln -s /path/to/SamplingTest/Results/Scores.csv Data/.
ids=( $( cut -d ',' -f1 Data/Scores.csv) )
rate=( $( cut -d ',' -f2 Data/Scores.csv) )
mkdir -p Results/SAM_to_FASTQ/Data
mkdir -p Results/SAM_to_FASTQ/Results
cp -r Code/ Results/SAM_to_FASTQ/.
cp *.sh Results/SAM_to_FASTQ/.
cd Results/SAM_to_FASTQ
#Link the data sets (Refurbish: Change to path to the directory that contains your data)
#ln -s /path/to/Sequences/* Data/.
len=${#ids[@]}
jobids=()
for (( i = 0; i <${len}; i++))
do
cmd="sbatch subordinate_sam.sh ${ids[$i]} ${rate[$i]}"
jobids+=(`$cmd | cut -d ' ' -f 4`)
done
txt=$(printf ":%s" "${jobids[@]}")
txt=${txt:1}
cd ../../
#Call the next script as a dependency
sbatch --dependency=afterok:$txt ass.sh | cut -d ' ' -f 4
| true
|
df883b5c603a2009767ccca55ca02e741dbc9cc2
|
Shell
|
farbodsalimi/istio-util
|
/bin/helpers/os.sh
|
UTF-8
| 176
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function cmd_open() {
case "$OSTYPE" in
darwin*) open $1 ;;
linux*) xdg-open $1 ;;
msys*) rundll32 $1 ;;
*) echo "unknown os: $OSTYPE" ;;
esac
}
"$@"
| true
|
e80d1ecc1c75d99cf2278c55fecd1467c357877b
|
Shell
|
paulwratt/ShellBASIC-bin
|
/BASIC/PEN
|
UTF-8
| 260
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
# PEN - created with 'mkbasic'
. .SETUP
if [ "$1" = "" ]; then
echo "Error: PEN n"
exit 1
fi
if [ $1 -lt 1 ]; then
echo "Error: PEN T_INTEGER"
exit 2
fi
FC=$1
echo $1 > "${BASIC_TEMP}.HCOLOR"
export bas_FC=$FC
tput setaf $FC
unset FC
exit 0
| true
|
2c99be238c3b41a7a60155410e36e073404312eb
|
Shell
|
thisboyiscrazy/fontem
|
/make-all
|
UTF-8
| 1,201
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# This file is distributed under the terms of the MIT License.
# See the LICENSE file at the top of this tree, or if it is missing a copy can
# be found at http://opensource.org/licenses/MIT
#
# Make everything
dir=$(dirname $0)
cd "${dir}"
MAKE=make
which gmake >/dev/null 2>&1 && MAKE=gmake
export MAKE
export LD_LIBRARY_PATH=
#MAKE_PARALLEL=-j4 # TODO: make this depend on if it's in CI
do_bootstrap=
if [ -f configure ]; then
# Find if any autotools input is newer than configure
if find . -type f '(' -name 'Makefile.*' -or -name 'configure.*' -or -name '*.in' ')' -newer configure | grep -q "."; then
echo "- autotools out of date; bootstrap required"
do_bootstrap=y
fi
# Find if any autotools output file is missing
outputs=$(eval $(grep ^ac_config_files= configure); echo $ac_config_files)
for i in ${outputs}; do
if [ ! -f "$i" ]; then
echo "- '$i' is missing; bootstrap required"
do_bootstrap=y
fi
done
else
echo "- 'configure' is missing; bootstrap required"
do_bootstrap=y
fi
if [ "${do_bootstrap}" ]; then
[ -f Makefile ] && $MAKE $MAKE_PARALLEL clean
./bootstrap
./do-configure-debug
$MAKE $MAKE_PARALLEL clean
fi
exec $MAKE $MAKE_PARALLEL
| true
|
5cc17754e0e3e9d2fd740df0c5d95b7e2d27cdf7
|
Shell
|
matthewcosgrove/cf-scripts
|
/pivnet/download-products.sh
|
UTF-8
| 6,604
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
echo "About to download products.."
jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")|.[]" products.json
if [[ $BASH_VERSION != 4* ]] ; then
echo "Error: Use of associative arrays requires Bash 4. Detected version $BASH_VERSION"
echo "NB: Might be error prone when Bash 5 is released ;-)"
fi
: ${CF_PIVNET_TOKEN:?"Need to set CF_PIVNET_TOKEN non-empty where token can be retrieved from edit-profile page of network.pivotal.io"}
: ${CF_BINARY_STORE:?"Need to set CF_BINARY_STORE non-empty"}
declare -A arr
while IFS="=" read -r key value
do
arr[$key]="$value"
done < <(jq -r "to_entries|map(\"\(.key)=\(.value)\")|.[]" products.json)
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
if [[ -f $DIR/helpers.sh ]] ; then
source "$DIR/helpers.sh"
echo "Sourced $DIR/helpers.sh"
else
echo "Skipping source of helper script"
fi
echo "*****************************************************************************************"
echo "* Validating config and PivNet API e.g. checking versions exist and urls are consistent *"
echo "*****************************************************************************************"
for product_name_with_alias in "${!arr[@]}"
do
product_version=${arr[$product_name_with_alias]}
echo "Checking $product_name_with_alias = $product_version"
name=$(extractTileName $product_name_with_alias)
product_name=$(extractProductName $product_name_with_alias)
product_release=$(getProductRelease $product_name $product_version)
hasVersion=$(echo $product_release | jq '. | has("id")')
if [[ "$hasVersion" != "true" ]] ; then
releases="https://network.pivotal.io/api/v2/products/$product_name/releases"
echo
echo "Please check version $product_version of $product_name exists at $releases and try again"
exit 1
fi
link_product_files=$(echo $product_release | jq -r ._links.product_files.href)
echo "Product files for tile with tile name as $name and product name as $product_name will be assessed.."
echo $link_product_files
product_files_response=$(curl -sfS "$link_product_files")
product_files_array=$(echo "$product_files_response" | jq [.product_files[]])
product_files_array_size=$(echo $product_files_array | jq '. | length')
echo "No. of product files found is $product_files_array_size"
if [[ $product_files_array_size = 0 ]] ; then
echo $link_product_files
echo "the above link has no product files for $product_name $product_version, raise a support case at support.pivotal.io"
echo "In the meantime, try a different version or remove from input and download manually. NOTE: Rabbit MQ is not available under pivotal-rabbitmq but pivotal-rabbitmq-service (See https://network.pivotal.io/api/v2/products/pivotal-rabbitmq-service/releases)"
echo "Apologies, but we need to exit, please retry again once you have dealt with this issue"
exit 1
fi
if [[ $product_files_array_size > 1 ]] ; then
echo "Multiple product files detected. Need to do some convoluted stuff with the response array.."
if [[ $product_name = "elastic-runtime" || $product_name = "ops-metrics" ]] ; then
echo "Specific support for $product_name has been added (it has array size $product_files_array_size), so the product will be processed"
else
nameOfZeroIndex=$(echo $product_files_array | jq -r .[0].aws_object_key)
echo "Product files array size = $nameOfZeroIndex"
if [[ $nameOfZeroIndex == *.pivotal ]] ; then
echo "$product_name has array size $product_files_array_size but the .pivotal file is in expected location of index 0, so the product will be processed"
else
echo "$product_name $product_version not supported as .pivotal file not at index 0 as expected. Please update script or remove product from products.json input file"
exit 1
fi
fi
fi
link_product_files=$(echo $product_release | jq -r ._links.product_files.href)
product_files_response=$(curl -sfS "$link_product_files")
if [[ $product_name = "elastic-runtime" ]] ; then
link_product_download=$(echo "$product_files_response" | jq [.product_files[]] | jq --arg name "PCF Elastic Runtime" '.[] | select(.name==$name)' | jq -r ._links.download.href)
elif [[ $product_name = "ops-metrics" ]] ; then
link_product_download=$(echo "$product_files_response" | jq [.product_files[]] | jq --arg name "PCF JMX Bridge" '.[] | select(.name==$name)' | jq -r ._links.download.href)
else
link_product_download=$(echo "$product_files_response" | jq -r .product_files[0]._links.download.href)
fi
echo "Will be downloading from $link_product_download"
done
echo "************************************************"
echo "* Validation complete. Downloads commencing... *"
echo "************************************************"
for product_name_with_alias in "${!arr[@]}"
do
product_version=${arr[$product_name_with_alias]}
tile_name=$(extractTileName $product_name_with_alias)
echo "Extracted Tile Name as $name"
file_name=$(generateFileName ${tile_name} ${product_version})
file_loc_and_name=$CF_BINARY_STORE/"$file_name"
echo "File destination determined as $file_loc_and_name"
if [[ -f $file_loc_and_name ]] ; then
echo "File $file_loc_and_name already downloaded so skipping"
continue
fi
echo "Handling $product_name_with_alias = $product_version"
product_name=$(extractProductName $product_name_with_alias)
echo "Extracted Product Name as $product_name"
product_release=$(getProductRelease $product_name $product_version)
echo
echo "Product release is at $product_release"
echo
link_eula=$(echo $product_release | jq -r ._links.eula_acceptance.href)
echo "Accepting EULA at $link_eula"
curl -s -X POST ${link_eula} --header "Authorization: Token ${CF_PIVNET_TOKEN}"
echo
echo
link_product_files=$(echo $product_release | jq -r ._links.product_files.href)
product_files_response=$(curl -sfS "$link_product_files")
if [[ $product_name = "elastic-runtime" ]] ; then
link_product_download=$(echo "$product_files_response" | jq [.product_files[]] | jq --arg name "PCF Elastic Runtime" '.[] | select(.name==$name)' | jq -r ._links.download.href)
elif [[ $product_name = "ops-metrics" ]] ; then
link_product_download=$(echo "$product_files_response" | jq [.product_files[]] | jq --arg name "PCF JMX Bridge" '.[] | select(.name==$name)' | jq -r ._links.download.href)
else
link_product_download=$(echo "$product_files_response" | jq -r .product_files[0]._links.download.href)
fi
echo "Downloading from $link_product_download"
mkdir -p $CF_BINARY_STORE
wget -O $file_loc_and_name --post-data="" --header="Authorization: Token ${CF_PIVNET_TOKEN}" ${link_product_download}
done
| true
|
af399ec744d6f2d5290f3bf88eec8f3e67872855
|
Shell
|
stormentt/bigtmp
|
/bigtmp.mount
|
UTF-8
| 756
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
source /etc/bigtmp.env
if [ -z "${BIGTMP_PART}" ]; then
echo "BIGTMP_PART not set"
exit 1
fi
if [ -z "${BIGTMP_USERS}" ]; then
echo "BIGTMP_USERS not set"
exit 1
fi
if grep -qs "/bigtmp" /proc/mounts; then
echo "bigtmp already mounted"
exit 1
fi
mkdir -p /bigtmp
cryptsetup plainOpen --key-file /dev/urandom "${BIGTMP_PART}" crBigTmp
mkfs.ext4 -F -F /dev/mapper/crBigTmp
mount /dev/mapper/crBigTmp /bigtmp
for i in ${BIGTMP_USERS[@]}; do
mkdir -p "/bigtmp/${i}/Downloads"
mkdir -p "/bigtmp/${i}/tmp"
chown -R "${i}:${i}" "/bigtmp/${i}"
find "/bigtmp/${i}" -type d -exec chmod 700 {} \;
done
mkdir -p /bigtmp/shared
#1777 for the restricted deletion bit and rwx for all
chmod 1777 /bigtmp/shared
| true
|
316283b776ccdb945b370b74d2e89a045fc9e7c9
|
Shell
|
leejo/battling_a_legacy_schema_with_dbic
|
/examples/db/rebuild.sh
|
UTF-8
| 211
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e -x -u
rm resorts.db
for file in $(ls -1 updates)
do
echo $file
# AUTO_INC in the file so we can use with mysqlworkbench
sed 's/AUTO_INC/AUTOINC/g' updates/$file | sqlite3 resorts.db
done
| true
|
e33feaad225a4a4a3d0b89121a69070d1e6bce74
|
Shell
|
fishuyo/kodama
|
/init.sh
|
UTF-8
| 805
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
git submodule update --init
if [ -e "`dirname $0`/sbt-launch.jar" ]
then
echo ""
else
echo "Downloading sbt-launch.jar..."
wget "http://repo.typesafe.com/typesafe/ivy-releases/org.scala-sbt/sbt-launch//0.12.3/sbt-launch.jar" > /dev/null 2>&1
if [ $? != 0 ]
then
curl -O "http://repo.typesafe.com/typesafe/ivy-releases/org.scala-sbt/sbt-launch//0.12.3/sbt-launch.jar"
if [ $? != 0 ]
then
echo "Failed to get sbt-launch.jar, please install wget or curl"
fi
fi
fi
java -Xms512M -Xmx2048M -Xss1M -XX:+CMSClassUnloadingEnabled -XX:MaxPermSize=384M -jar `dirname $0`/sbt-launch.jar "project seer-core" download-libs
java -Xms512M -Xmx2048M -Xss1M -XX:+CMSClassUnloadingEnabled -XX:MaxPermSize=384M -jar `dirname $0`/sbt-launch.jar "project seer-core" update-gdx
| true
|
3c30a2a4a52da3377d24da18138fa9c482ab2f4b
|
Shell
|
zombiecalypse/config-files
|
/bin/mkproj
|
UTF-8
| 145
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
DIR=~/Projects/$1
set -e
mkdir $DIR
cd $DIR
git init
touch README.md
vim README.md
git add README.md
git commit -m 'Initial commit'
| true
|
2d28e5a3cd6a65d65293200b06322c40623afac4
|
Shell
|
psivesely/project-euler
|
/scripts/stats.sh
|
UTF-8
| 777
| 3.859375
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# Prints out solutions per language, and which solutions have been solved in
# which language.
echo "Solutions Per Language"
echo "======================"
for dir in *; do
if [[ $dir = 'data' || $dir = 'scripts' ]]; then continue; fi
if [ ! -d $dir ]; then continue; fi
cd $dir
make clean > /dev/null 2>&1
num=$(ls | wc -l)
solns=""
echo -e -n "$dir ($((num - 1))):\t"
if [ $dir = 'c' ]; then echo -e -n "\t"; fi
for f in *; do
if [ $f = 'Makefile' ]; then continue; fi
if [ $f = 'a.out' ]; then continue; fi
if [ $f = 'lib' ]; then continue; fi
solns="$solns $f "
done
solns=$(echo -n "$solns" | sed 's/\.[^ ]*\s/ /g' | sed 's/P//g')
solns=$(echo $solns | tr ' ' '\n' | sort -n | tr '\n' ' ')
echo $solns
cd ..
done
| true
|
e54b7d4efac0df3e0e894d873dcba0383f0dbd80
|
Shell
|
mikey-/dotfiles
|
/.export.d/diary.sh
|
UTF-8
| 154
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# shellcheck disable=SC2155
export DIARY_BASE_DIR="${HOME}/diary.d";
export DIARY_CURRENT_DIR="${DIARY_BASE_DIR}/$(date +%Y/%m/%d)";
| true
|
139481b74ed07700583e81e91744d31dd8416159
|
Shell
|
inutano/chip-atlas
|
/script/chip-atlas
|
UTF-8
| 704
| 3.3125
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: chip-atlas
# Required-Start: $local_fs $network $named $time $syslog
# Required-Stop: $local_fs $network $named $time $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Description: Start ChIP-Atlas web application
### END INIT INFO
ARG=$1
RBENV_HOME=/home/ubuntu/.rbenv
WORKDIR=/home/ubuntu/chip-atlas
start(){
PATH=${RBENV_HOME}/bin:${RBENV_HOME}/shims:${PATH}; cd ${WORKDIR}; bundle exe unicorn -c ${WORKDIR}/unicorn.rb -E production -D
}
stop(){
cat ${WORKDIR}/tmp/pids/unicorn.pid | xargs kill
}
case $ARG in
"start")
start
;;
"stop")
stop
;;
"restart")
stop
start
;;
esac
| true
|
d2ccded0aa416f2a8f38894689e8a5a01e540a71
|
Shell
|
nghiat/myconfig
|
/unix/.bashrc
|
UTF-8
| 821
| 2.765625
| 3
|
[] |
no_license
|
export PATH="/bin:/usr/bin:$PATH";
export EDITOR="vim"
export PS1="\[\e[1;4;7m\]\u:\w$ \[\e[0m\]"
# Avoid duplicates
export HISTCONTROL=ignoredups:erasedups
export HISTSIZE=100000
export HISTFILESIZE=100000
export PROMPT_COMMAND="history -a; history -c; history -r;"
# If not running interactively, do not do anything
[[ $- != *i* ]] && return
# Autocorrect typos when using cd
shopt -s cdspell;
# Append history
shopt -s histappend
# Check the window size after each command and, if neccessary,
# update the value of LINES and COLUMNS
shopt -s checkwinsize
# Disable Ctrl-S
stty -ixon
force_color_prompt=yes
# This has to be sourced before running tmux
if [ -f ~/.ez_colors ]; then
source ~/.ez_colors
fi
if [ -f ~/.ez_local ]; then
source ~/.ez_local
fi
[[ -z "$TMUX" && -x "$(command -v tmux)" ]] && exec tmux
| true
|
42bcbc8e358bd12dd3e01ea052a4aab3ccdee4d1
|
Shell
|
johncoleman83/generate-testing-files
|
/execute.sh
|
UTF-8
| 2,802
| 4.375
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
# executes tests based on file type
RE='^[0-9]+$'
function description_output() {
echo -ne "\033[37m"
echo "* *********************************************************** *"
echo "* *"
echo "* Please Select Testing File Type: *"
echo -n "* "
echo -ne "\033[30m(1) PYTHON "
echo -e "\033[37m*"
echo -n "* "
echo -ne "\033[30m(2) PYTHON + SQL "
echo -e "\033[37m*"
echo -n "* "
echo -ne "\033[30m(3) C "
echo -e "\033[37m*"
echo -n "* "
echo -ne "\033[30m(4) C Header (.h) file "
echo -e "\033[37m*"
echo "* *"
echo "* *********************************************************** *"
echo -ne "\033[30m"
}
function request_user_input() {
read -p "Type your choice number or anything else to quit " -n 1 -r REPLY
echo
if ! [[ $REPLY =~ $RE ]] ; then
echo "error: Not a number" >&2; exit 52
fi
return "$REPLY"
}
function execution_confirmation_output() {
echo "executing file generation script..."
}
function init_selected_script() {
case "$1" in
1)
execution_confirmation_output
./generate_pyfile.py
;;
2)
execution_confirmation_output
./generate_sqlfile.py
;;
3)
execution_confirmation_output
./generate_cfile.py
;;
4)
execution_confirmation_output
./generate_cheader.py
;;
*)
echo "...Goodbye"
[[ "$0" = "$BASH_SOURCE" ]] && exit 1 || return 1
esac
}
function cleanup_output() {
echo -ne "\033[37m"
echo "Would you like to cleanup?"
echo -e "\033[30m(1) YES"
echo -e "(2) NO"
echo -e "\033[37m or anything else to quit"
echo -ne "\033[30m"
}
function prompt_and_init_cleanup() {
cleanup_output
request_user_input
local REPLY=$?
case "$REPLY" in
1)
clean_up
;;
*)
echo "...Goodbye"
[[ "$0" = "$BASH_SOURCE" ]] && exit 1 || return 1
esac
}
function clean_up() {
rm -rf generate*
rm -rf shared
rm execute.sh
rm intrapage.txt
rm README.md
}
function main() {
description_output
request_user_input
local REPLY=$?
init_selected_script "$REPLY"
prompt_and_init_cleanup
}
# make sure script is not being sourced but instead executed
if [[ "$0" = "$BASH_SOURCE" ]]; then
main
fi
| true
|
dbeb8e1011e06d5704e1034cd80538d0c99abb87
|
Shell
|
ncsuandrew12/notk-bot
|
/bin/prepCommit.bash
|
UTF-8
| 218
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source "${DIR}/common.bash"
set -x
cd ${ROOT_DIR}
git add *
git add .gitignore
set -e
git diff --cached > diff.diff
git status
| true
|
92c90b1120993f95437431ce16add00169107a1b
|
Shell
|
acookin/emissary
|
/build-aux/bin-sh/copy-ifchanged.sh
|
UTF-8
| 117
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 Datawire. All rights reserved.
if ! cmp -s "$1" "$2"; then
cp -f "$1" "$2"
fi
| true
|
ac0438cdb7fb02fc32a74417e9c233adefa533b4
|
Shell
|
fiedl/dot-zsh
|
/oh-my-zsh-customization/plugins/gnuplot/gnuplot.plugin.zsh
|
UTF-8
| 345
| 3.4375
| 3
|
[] |
no_license
|
# Use gnuplot to plot something and display it right inside the terminal
# using `imgcat`.
#
# Examples:
#
# plot "sin(x)"
# cat data.txt |plot
#
alias imgcat="~/.iterm2/imgcat"
function plot() {
plot_expression="$*"
[[ -z $plot_expression ]] && plot_expression="'<cat'"
gnuplot -e "set term png; plot $plot_expression" |imgcat
}
| true
|
21d25f4da4d76640e1c27b43213f1cb5efa11292
|
Shell
|
phanirajl/CassandraKeyspaceSize
|
/CassandraTableSize.sh
|
UTF-8
| 1,890
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
#--- We need nrjmx-1.0.1-jar-with-dependencies.jar for this script to load JMX Metrics
if [ "$1" != "" ]; then
echo "for keyspace $1 "
else
echo "Positional parameter 1 is empty"
fi
if [ "$2" != "" ]; then
echo "for table $2 "
else
echo "Positional parameter 1 is empty"
fi
if [ -e /home/table.txt ]
then
rm -rf /home/table.txt
fi
function bytes_to_gb {
local -i bytes=$1;
if [[ $bytes -lt 1024 ]]; then
echo "${bytes}B"
elif [[ $bytes -lt 1048576 ]]; then
echo "$(( (bytes + 1023)/1024 ))KB"
elif [[ $bytes -lt 1073741824 ]]; then
echo "$(( (bytes + 1048575)/1048576 ))MB"
elif [[ $bytes -lt 1099511627776 ]]; then
echo "$(( (bytes + 1073741823)/1073741824 ))GB"
else
echo "$(( (bytes + 1099511627775)/1099511627776))TB"
fi
}
clustername=`cat /etc/cassandra/conf/cassandra.yaml |grep cluster_name |cut -f2 -d :`
keyspaces=("$1")
table=("$2")
for keyspace in "${keyspaces[@]}"; do
nodes=`nodetool status | grep UN | awk '{print $2}'|sort`
let SUMVAL=0
for node in $nodes; do
value=`echo "org.apache.cassandra.metrics:type=Table,keyspace=$keyspace,scope=$table,name=TotalDiskSpaceUsed" | java -jar /home/nrjmx-1.0.1-jar-with-dependencies.jar -hostname $node -port 7199 -username cassandra -password cassandra|cut -f5 -d ,|cut -f2 -d :|cut -f1 -d "}"`
let SUMVAL=$SUMVAL+$value
#echo $SUMVAL,$value
#echo $SUMVAL
gb="$(bytes_to_gb "$SUMVAL")"
#echo $gb,$keyspace,$clustername >> /home/table.txt
done
echo $gb,$keyspace,$table,$clustername >> /home/table.txt
done
echo $'---------------------------------------------------' >> /home/table.txt
echo $'\n' >> /home/table.txt
#diskspace
file=/home/table.txt
mailalert(){
/sbin/sendmail -F Cassandra -it <<END_MESSAGE
To: xxxx
Subject: Cassandra table size $clustername
$(cat $file)
END_MESSAGE
}
mailalert
| true
|
cda74faf72fc69626d900b4b7397fdd2c180e32c
|
Shell
|
weaviate/weaviate
|
/tools/dev/run_dev_server.sh
|
UTF-8
| 18,289
| 2.6875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
CONFIG=${1:-local-development}
# Jump to root directory
cd "$( dirname "${BASH_SOURCE[0]}" )"/../.. || exit 1
export GO111MODULE=on
export LOG_LEVEL=${LOG_LEVEL:-"debug"}
export LOG_FORMAT=${LOG_FORMAT:-"text"}
export PROMETHEUS_MONITORING_ENABLED=${PROMETHEUS_MONITORING_ENABLED:-"true"}
export GO_BLOCK_PROFILE_RATE=${GO_BLOCK_PROFILE_RATE:-"20"}
export GO_MUTEX_PROFILE_FRACTION=${GO_MUTEX_PROFILE_FRACTION:-"20"}
export PERSISTENCE_DATA_PATH=${PERSISTENCE_DATA_PATH:-"./data"}
export ORIGIN=${ORIGIN:-"http://localhost:8080"}
export QUERY_DEFAULTS_LIMIT=${QUERY_DEFAULTS_LIMIT:-"20"}
export QUERY_MAXIMUM_RESULTS=${QUERY_MAXIMUM_RESULTS:-"10000"}
export TRACK_VECTOR_DIMENSIONS=true
export CLUSTER_HOSTNAME=${CLUSTER_HOSTNAME:-"node1"}
function go_run() {
GIT_HASH=$(git rev-parse --short HEAD)
go run -ldflags "-X github.com/weaviate/weaviate/usecases/config.GitHash=$GIT_HASH" "$@"
}
case $CONFIG in
debug)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
ENABLE_MODULES="text2vec-contextionary" \
dlv debug ./cmd/weaviate-server -- \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-development)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
BACKUP_FILESYSTEM_PATH="${PWD}/backups" \
ENABLE_MODULES="text2vec-contextionary,backup-filesystem" \
CLUSTER_GOSSIP_BIND_PORT="7100" \
CLUSTER_DATA_BIND_PORT="7101" \
LOG_LEVEL="trace" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
second-node)
GRPC_PORT=50052 \
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
PERSISTENCE_DATA_PATH="./data-node2" \
BACKUP_FILESYSTEM_PATH="${PWD}/backups-node2" \
CLUSTER_HOSTNAME="node2" \
CLUSTER_GOSSIP_BIND_PORT="7102" \
CLUSTER_DATA_BIND_PORT="7103" \
CLUSTER_JOIN="localhost:7100" \
CONTEXTIONARY_URL=localhost:9999 \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
ENABLE_MODULES="text2vec-contextionary,backup-filesystem" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8081 \
--read-timeout=600s \
--write-timeout=600s
;;
third-node)
GRPC_PORT=50053 \
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
PERSISTENCE_DATA_PATH="${PERSISTENCE_DATA_PATH}-node3" \
CLUSTER_HOSTNAME="node3" \
CLUSTER_GOSSIP_BIND_PORT="7104" \
CLUSTER_DATA_BIND_PORT="7105" \
CLUSTER_JOIN="localhost:7100" \
CONTEXTIONARY_URL=localhost:9999 \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
ENABLE_MODULES="text2vec-contextionary" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8082 \
--read-timeout=600s \
--write-timeout=600s
;;
fourth-node)
GRPC_PORT=50054 \
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
PERSISTENCE_DATA_PATH="${PERSISTENCE_DATA_PATH}-node4" \
CLUSTER_HOSTNAME="node4" \
CLUSTER_GOSSIP_BIND_PORT="7106" \
CLUSTER_DATA_BIND_PORT="7107" \
CLUSTER_JOIN="localhost:7100" \
CONTEXTIONARY_URL=localhost:9999 \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
ENABLE_MODULES="text2vec-contextionary" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8083 \
--read-timeout=600s \
--write-timeout=600s
;;
local-transformers)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-transformers \
TRANSFORMERS_INFERENCE_API="http://localhost:8000" \
ENABLE_MODULES="text2vec-transformers" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-transformers-passage-query)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-transformers \
TRANSFORMERS_PASSAGE_INFERENCE_API="http://localhost:8006" \
TRANSFORMERS_QUERY_INFERENCE_API="http://localhost:8007" \
ENABLE_MODULES="text2vec-transformers" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-qna)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
QNA_INFERENCE_API="http://localhost:8001" \
ENABLE_MODULES="text2vec-contextionary,qna-transformers" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-sum)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
SUM_INFERENCE_API="http://localhost:8008" \
ENABLE_MODULES="text2vec-contextionary,sum-transformers" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-image)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
IMAGE_INFERENCE_API="http://localhost:8002" \
ENABLE_MODULES="text2vec-contextionary,img2vec-neural" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-ner)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
NER_INFERENCE_API="http://localhost:8003" \
ENABLE_MODULES="text2vec-contextionary,ner-transformers" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-spellcheck)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
SPELLCHECK_INFERENCE_API="http://localhost:8004" \
ENABLE_MODULES="text2vec-contextionary,text-spellcheck" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-clip)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=multi2vec-clip \
CLIP_INFERENCE_API="http://localhost:8005" \
ENABLE_MODULES="multi2vec-clip" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-bind)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=multi2vec-bind \
BIND_INFERENCE_API="http://localhost:8011" \
ENABLE_MODULES="multi2vec-bind" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-oidc)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=false \
AUTHENTICATION_OIDC_ENABLED=true \
AUTHENTICATION_OIDC_ISSUER=http://localhost:9090/auth/realms/weaviate \
AUTHENTICATION_OIDC_USERNAME_CLAIM=email \
AUTHENTICATION_OIDC_GROUPS_CLAIM=groups \
AUTHENTICATION_OIDC_CLIENT_ID=demo \
AUTHORIZATION_ADMINLIST_ENABLED=true \
AUTHORIZATION_ADMINLIST_USERS=john@doe.com \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080
;;
local-apikey)
AUTHENTICATION_APIKEY_ENABLED=true \
AUTHENTICATION_APIKEY_ALLOWED_KEYS=my-secret-key \
AUTHENTICATION_APIKEY_USERS=john@doe.com \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=false \
AUTHORIZATION_ADMINLIST_ENABLED=true \
AUTHORIZATION_ADMINLIST_USERS=john@doe.com \
DEFAULT_VECTORIZER_MODULE=none \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080
;;
local-wcs-oidc-and-apikey)
AUTHENTICATION_APIKEY_ENABLED=true \
AUTHENTICATION_APIKEY_ALLOWED_KEYS=my-secret-key,my-secret-read-only-key \
AUTHENTICATION_APIKEY_USERS=etienne@semi.technology,etienne+read-only@semi.technology \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=false \
AUTHENTICATION_OIDC_ENABLED=true \
AUTHENTICATION_OIDC_ISSUER=https://auth.wcs.api.weaviate.io/auth/realms/SeMI\
AUTHENTICATION_OIDC_USERNAME_CLAIM=email \
AUTHENTICATION_OIDC_GROUPS_CLAIM=groups \
AUTHENTICATION_OIDC_CLIENT_ID=wcs \
AUTHORIZATION_ADMINLIST_ENABLED=true \
AUTHORIZATION_ADMINLIST_USERS=etienne@semi.technology \
AUTHORIZATION_ADMINLIST_READONLY_USERS=etienne+read-only@semi.technology \
DEFAULT_VECTORIZER_MODULE=none \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080
;;
local-multi-text)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
TRANSFORMERS_INFERENCE_API=http://localhost:8000 \
CLIP_INFERENCE_API=http://localhost:8005 \
ENABLE_MODULES=text2vec-contextionary,text2vec-transformers,multi2vec-clip \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080
;;
local-openai)
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-openai \
ENABLE_MODULES="text2vec-openai" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-qna-openai)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
QNA_INFERENCE_API="http://localhost:8001" \
CLUSTER_HOSTNAME="node1" \
ENABLE_MODULES="text2vec-contextionary,qna-openai" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-generative-openai)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
QNA_INFERENCE_API="http://localhost:8001" \
CLUSTER_HOSTNAME="node1" \
ENABLE_MODULES="text2vec-contextionary,generative-openai" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-all-openai)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
QNA_INFERENCE_API="http://localhost:8001" \
CLUSTER_HOSTNAME="node1" \
ENABLE_MODULES="text2vec-contextionary,qna-openai,generative-openai,text2vec-openai" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-all-palm)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
QNA_INFERENCE_API="http://localhost:8001" \
CLUSTER_HOSTNAME="node1" \
ENABLE_MODULES="text2vec-contextionary,generative-palm,text2vec-palm" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-all-openai-cohere-palm)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
QNA_INFERENCE_API="http://localhost:8001" \
CLUSTER_HOSTNAME="node1" \
ENABLE_MODULES="text2vec-contextionary,generative-palm,text2vec-palm,qna-openai,generative-openai,text2vec-openai,generative-cohere,text2vec-cohere,reranker-cohere" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-huggingface)
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-huggingface \
ENABLE_MODULES="text2vec-huggingface" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-no-modules)
CLUSTER_GOSSIP_BIND_PORT="7100" \
CLUSTER_DATA_BIND_PORT="7101" \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=none \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=3600s \
--write-timeout=3600s
;;
local-centroid)
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
ENABLE_MODULES="ref2vec-centroid" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=3600s \
--write-timeout=3600s
;;
local-s3)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
BACKUP_S3_ENDPOINT="localhost:9000" \
BACKUP_S3_USE_SSL="false" \
BACKUP_S3_BUCKET="weaviate-backups" \
AWS_ACCESS_KEY_ID="aws_access_key" \
AWS_SECRET_KEY="aws_secret_key" \
ENABLE_MODULES="text2vec-contextionary,backup-s3" \
CLUSTER_GOSSIP_BIND_PORT="7100" \
CLUSTER_DATA_BIND_PORT="7101" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-gcs)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
GOOGLE_CLOUD_PROJECT=project-id \
STORAGE_EMULATOR_HOST=localhost:9090 \
BACKUP_GCS_ENDPOINT=localhost:9090 \
BACKUP_GCS_BUCKET=weaviate-backups \
ENABLE_MODULES="text2vec-contextionary,backup-gcs" \
CLUSTER_GOSSIP_BIND_PORT="7100" \
CLUSTER_DATA_BIND_PORT="7101" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-azure)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
BACKUP_AZURE_CONTAINER=weaviate-container \
AZURE_STORAGE_CONNECTION_STRING="DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;" \
ENABLE_MODULES="text2vec-contextionary,backup-azure" \
CLUSTER_GOSSIP_BIND_PORT="7100" \
CLUSTER_DATA_BIND_PORT="7101" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-cohere)
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-cohere \
ENABLE_MODULES="text2vec-cohere" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-all-cohere)
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-cohere \
ENABLE_MODULES="text2vec-cohere,reranker-cohere,generative-cohere" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-reranker-transformers)
CONTEXTIONARY_URL=localhost:9999 \
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-contextionary \
RERANKER_INFERENCE_API="http://localhost:8009" \
ENABLE_MODULES="text2vec-contextionary,reranker-transformers" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
local-gpt4all)
AUTHENTICATION_ANONYMOUS_ACCESS_ENABLED=true \
DEFAULT_VECTORIZER_MODULE=text2vec-gpt4all \
GPT4ALL_INFERENCE_API="http://localhost:8010" \
ENABLE_MODULES="text2vec-gpt4all" \
go_run ./cmd/weaviate-server \
--scheme http \
--host "127.0.0.1" \
--port 8080 \
--read-timeout=600s \
--write-timeout=600s
;;
*)
echo "Invalid config" 2>&1
exit 1
;;
esac
| true
|
a0d83b6b35726838aeb9b57dfcb0fabfcdeb925c
|
Shell
|
jnayak1/kmp-mpc
|
/tests/test_client.sh
|
UTF-8
| 789
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
sleep_time=1
host="localhost"
rm text_length.csv
touch text_length.csv
echo "match, pattern length, text length, yao gate count, time" >> text_length.csv
# small pattern increase text size
tlen=8
for i in `seq 1 14`;
do
MAX_PATTERN_LENGTH=4 MAX_TEXT_LENGTH=$tlen make
./a.out 2345 $host text.txt >> text_length.csv
tlen=$tlen*2
sleep $sleep_time
done
rm pattern_length.csv
touch pattern_length.csv
echo "match, pattern length, text length, yao gate count, time" >> pattern_length.csv
# large text, increase pattern size
plen=1
for i in `seq 1 8`;
do
MAX_PATTERN_LENGTH=$plen MAX_TEXT_LENGTH=8192 make
./a.out 2345 $host text.txt >> pattern_length.csv
plen=$plen*2
sleep $sleep_time
done
| true
|
e7824872d02b619688d713c278a12657f189f7fb
|
Shell
|
ddeswart/vRA
|
/SoftwareComponents/Kubernetes-kubeadm/K8S-InstallDashboard.sh
|
UTF-8
| 629
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# Source: https://virtualhobbit.com/2018/02/05/deploying-kubernetes-with-vrealize-automation/
# Dimitri de Swart - VMguru
# Version 1.0
# 19 July 2018
# Log $PATH
echo "Intial PATH = $PATH"
# Update PATH
export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin/:/root/bin
echo "New PATH = $PATH"
# Checks to see if Kubernetes Dashboard is required
if [ $dashboard == true ]; then
echo "Kubernetes Dashboard is required, installing"
# Set admin variable
export KUBECONFIG=/etc/kubernetes/admin.conf
# Deploy the dashboard
kubectl create -f $url
else
echo "Kubernetes Dashboard is not required"
fi
| true
|
604c3edc8c009e67243acd47fc4f2fa13228d443
|
Shell
|
pawelk82/HackerRank
|
/Text_Processing/Head_of_a_Text_File_2
|
UTF-8
| 181
| 2.8125
| 3
|
[] |
no_license
|
#! /bin/bash
#In this challenge, we practice using the head command to display the first n characters of a text file.
#Display the first 20 characters of an input file.
head -c 20
| true
|
734ddd929e4f4d780e0c05407c431f313fb91da7
|
Shell
|
lazaropower/practicas-ISO
|
/practicas/practica3/ejemplo_break.sh
|
UTF-8
| 167
| 3.375
| 3
|
[] |
no_license
|
#! /bin/bash -u
# Muestra todos los parámetros. Si uno es una "f" finaliza
while [ $# -gt 0 ]
do
if [ $1 = "f" ]
then
break
fi
echo Parámetro: $1
shift
done
| true
|
09ae535e4fd29fdfc20011f5ad73b08669e9baa2
|
Shell
|
Ignotus/langdetect
|
/highway_to_hell/test.sh
|
UTF-8
| 497
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
rm -rf testset
mkdir testset
cp ../test.zip testset
pushd testset
unzip test.zip
rm test.zip
popd
correct=0
processed=0
for file in testset/*.txt;
do
let processed=processed+1
LAN1=$(echo $file | sed 's/[\/-]/ /g' | awk '{print tolower($2)}')
LAN2=$(./trigram_calc -detect-one-answer $file)
echo $LAN1 $LAN2
if [ "$LAN1" == "$LAN2" ]
then
let correct=correct+1
fi
echo "$correct/$processed" | bc -l
done
echo "Result: "
echo "$correct/1000" | bc -l
| true
|
66ffcb59e1f22f04c2b43659714d9b405c2d0bc3
|
Shell
|
kviktorman/clientOnline
|
/BashScripts/client-online.sh
|
UTF-8
| 1,550
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#set interface
interface=$1;
#get interface MAC and IP address
addressIP=`ifconfig $interface | grep "inet addr" | cut -d ':' -f 2 | cut -d ' ' -f 1`;
addressMAC=`cat /sys/class/net/$interface/address`;
#read configuration settings
CONFIGURATION=`cat ./configuration/server.json`;
# bind settings to variable and remove quotation marks if required
SERVERURL=`echo $CONFIGURATION | jq ".maintainerURL"`;
SERVERURL=${SERVERURL:1:-1};
LOGFILE=`echo $CONFIGURATION | jq ".clientLogFile"`;
LOGFILE=${LOGFILE:1:-1};
messageRequest="{\"messageName\":\"Identification\",";
messageRequest="$messageRequest \"addressMAC\":\"$addressMAC\",";
messageRequest="$messageRequest \"addressIP\":\"$addressIP\",";
messageRequest="$messageRequest \"idHW\":\"xxxx-xxxx-xxxx\"";
messageRequest="$messageRequest}";
echo "[$(date +'%Y.%m.%d %T') - client-online] MAC: $addressMAC" >> $LOGFILE;
echo "[$(date +'%Y.%m.%d %T') - client-online] IP: $addressIP" >> $LOGFILE;
#send for server folder list and version.json if exist
identification=$(curl -H "Accept: application/json" -H "Content-Type: application/json" -X POST -d "$messageRequest" $SERVERURL);
status=`echo $identification | jq ".status"`;
if [ $status -eq 0 ]; then
echo "[$(date +'%Y.%m.%d %T') - client-online] identification sent" >> $LOGFILE;
else
errorMSG=`echo $identification | jq ".msg"`;
echo "[$(date +'%Y.%m.%d %T') - client-online] Error code: $status" >> $LOGFILE;
echo "[$(date +'%Y.%m.%d %T') - client-online] Error message: $errorMSG" >> $LOGFILE;
fi
exit 0;
| true
|
a65164eb38712842ff5cc0acdc15f0203ee6f06a
|
Shell
|
dubanoze/actility_gw
|
/lrr/com/cmd_shells/restart.sh
|
UTF-8
| 190
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
DELAY="15"
if [ "$1" != "" ]
then
DELAY=$1
fi
echo "lrr will be restarted in $DELAY sec"
nohup $ROOTACT/lrr/com/cmd_shells/restart_pending.sh $DELAY > /dev/null 2>&1 &
exit 0
| true
|
423ef82c1fa40e3003ab601e6956e1751d9a1522
|
Shell
|
Thomaash/SnakeOn7Segment
|
/Graphics/game/leds/downscale.sh
|
UTF-8
| 347
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/zsh
cd "/home/tom/soubory/Workplace/IDEA/SnakeOn7Segment/Graphics/game/leds"
src="160"
cd "$src"
for file in *; do
for i in 120 80 40; do
dir="../$i"
mkdir -p "$dir"
if [[ $( echo "$file" | cut -c1 ) == "h" ]]; then
size="$i"
else
size="x$i"
fi
convert "$file" -resize "$size" -quality 0 "$dir/$file"
done
done
| true
|
2daa4bd8150c49ae604c85764f321325f3b8f630
|
Shell
|
Slackwaremx/slackbuilds
|
/libraries/lapack/lapack.SlackBuild
|
UTF-8
| 4,117
| 3.625
| 4
|
[] |
no_license
|
#!/bin/sh
# Slackware build script for LAPACK
# Copyright 2014 Kyle Guinn <elyk03@gmail.com>, USA
# All rights reserved.
#
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
PRGNAM=lapack
VERSION=${VERSION:-3.5.0}
BUILD=${BUILD:-1}
TAG=${TAG:-_SBo}
if [ -z "$ARCH" ]; then
case "$(uname -m)" in
i?86) ARCH=i486 ;;
arm*) ARCH=arm ;;
*) ARCH=$(uname -m) ;;
esac
fi
CWD=$(pwd)
TMP=${TMP:-/tmp/SBo}
PKG=$TMP/package-$PRGNAM
OUTPUT=${OUTPUT:-/tmp}
DOCS="LICENSE README"
if [ "$ARCH" = "i486" ]; then
SLKCFLAGS="-O2 -march=i486 -mtune=i686"
LIBDIRSUFFIX=""
elif [ "$ARCH" = "i686" ]; then
SLKCFLAGS="-O2 -march=i686 -mtune=i686"
LIBDIRSUFFIX=""
elif [ "$ARCH" = "x86_64" ]; then
SLKCFLAGS="-O2 -fPIC"
LIBDIRSUFFIX="64"
else
SLKCFLAGS="-O2"
LIBDIRSUFFIX=""
fi
set -e
rm -rf $PKG
mkdir -p $TMP $PKG $OUTPUT
cd $TMP
rm -rf $PRGNAM-$VERSION
tar xvf $CWD/$PRGNAM-$VERSION.tgz
cd $PRGNAM-$VERSION
chown -R root:root .
chmod -R u+w,go-w,a+rX-st .
# `make install` compiles and installs tmglib, along with many extra tests,
# so install files manually instead.
#
# Avoid adding an RPATH entry to the shared lib. It's unnecessary, and
# since we're not running `make install` we don't want to end up with
# a temporary RPATH that refers to the build directory.
#
# WARNING: The SONAME is "liblapack.so", not "liblapack.so.0" nor
# "liblapack.so.$MAJOR" as crafted in previous versions of this script.
# I don't know if this is intentional or an omission by upstream.
mkdir -p shared
cd shared
cmake \
-DCMAKE_Fortran_FLAGS:STRING="$SLKCFLAGS" \
-DCMAKE_INSTALL_PREFIX=/usr \
-DLIB_SUFFIX=${LIBDIRSUFFIX} \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_RULE_MESSAGES=OFF \
-DCMAKE_VERBOSE_MAKEFILE=TRUE \
-DUSE_OPTIMIZED_BLAS=ON \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_SKIP_RPATH=YES \
..
make lapack/fast
install -D -m0755 -s lib/liblapack.so $PKG/usr/lib${LIBDIRSUFFIX}/liblapack.so
install -D -m0644 lapack.pc $PKG/usr/lib${LIBDIRSUFFIX}/pkgconfig/lapack.pc
cd ..
# cmake doesn't appear to let us build both shared and static libs
# at the same time, so build it twice. This may build a non-PIC library
# on some architectures, which should be faster.
mkdir -p static
cd static
cmake \
-DCMAKE_Fortran_FLAGS:STRING="$SLKCFLAGS" \
-DCMAKE_INSTALL_PREFIX=/usr \
-DLIB_SUFFIX=${LIBDIRSUFFIX} \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_RULE_MESSAGES=OFF \
-DCMAKE_VERBOSE_MAKEFILE=TRUE \
-DUSE_OPTIMIZED_BLAS=ON \
..
make lapack/fast
install -D -m0644 lib/liblapack.a $PKG/usr/lib${LIBDIRSUFFIX}/liblapack.a
cd ..
# Fix the pkg-config file:
# 1. Version is wrong.
# 2. -L is hard-coded when a variable exists.
sed -i \
-e "/^Version:/cVersion: ${VERSION}" \
-e "/^Libs:/s/-L[^ ]*/-L\${libdir}/" \
$PKG/usr/lib${LIBDIRSUFFIX}/pkgconfig/lapack.pc
mkdir -p $PKG/usr/doc/$PRGNAM-$VERSION
cp -a $DOCS $PKG/usr/doc/$PRGNAM-$VERSION
cat $CWD/$PRGNAM.SlackBuild > $PKG/usr/doc/$PRGNAM-$VERSION/$PRGNAM.SlackBuild
mkdir -p $PKG/install
cat $CWD/slack-desc > $PKG/install/slack-desc
cd $PKG
/sbin/makepkg -l y -c n $OUTPUT/$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.${PKGTYPE:-tgz}
| true
|
de838afcff6b94021628320315cff221620b263b
|
Shell
|
derFunk/awscli-versionized-docker-images
|
/build_and_push_all.sh
|
UTF-8
| 2,895
| 4.09375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
PUSH_EXISTING=${PUSH_EXISTING:-false}
REPO=derfunk/awscli-versionized
set -e
# Get all versioned aws cli tags uploaded already available
i="1"
echo -n > awscli-versioned-versions.txt
while true; do
echo "Fetching page ${i} of already existing versionized aws cli tags..."
if ! curl -fs -o awscli-versioned-tmp.json https://hub.docker.com/v2/repositories/${REPO}/tags/\?page_size\=100\&page\=${i}; then
echo "Page ${i} did not exist anymore, continuing..."
break;
fi
jq -r ".results | .[].name" awscli-versioned-tmp.json | grep "\d\+.\d\+.\d\+" >> awscli-versioned-versions.txt
i=$[$i+1]
done
rm -f awscli-versioned-tmp.json
# Get current .changes tree SHA
JSON_MASTER=$(curl -fs https://api.github.com/repos/aws/aws-cli/git/trees/master)
CHANGES_JSON_URL=$(echo ${JSON_MASTER} | jq -r '.tree[] | select(.path==".changes" and .type=="tree") | .url')
# Get all official aws cli versions
JSON_CHANGES=$(curl -fs ${CHANGES_JSON_URL})
echo ${JSON_CHANGES} | jq -r ".tree | .[].path" | grep "\d\+.*\.json" | sed "s/\.json//g" > awscli-versions_unsorted.txt
# Sort versions
sort -t. -k 1,1nr -k 2,2nr -k 3,3nr -k 4,4nr awscli-versions_unsorted.txt > awscli-versions.txt
rm awscli-versions_unsorted.txt
BASE_BUILT=false
LATEST_TAGGED=false
PUSHED_IMAGES=
IFS=$'\n'
while read version
do
# We now that awscli version 1.10.55 is broken in pip.
if [ "${version}" = "1.10.55" ]; then continue; fi
# only push to the versioned aws cli repo if it's not available online yet
if [ "${PUSH_EXISTING}" = "true" ] || ! grep -q "^${version}$" awscli-versioned-versions.txt; then
if [ "${BASE_BUILT}" = "false" ]; then
echo "Creating local base image..."
docker build --force-rm -t awscli-versionized-base:latest -f Dockerfile.base .
BASE_BUILT=true
fi
echo "Building and pushing aws-cli version ${version}..."
docker build --compress --build-arg AWSCLI_VERSION=${version} -t ${REPO}:${version} -f Dockerfile .
docker push ${REPO}:${version}
if [ "${LATEST_TAGGED}" = "false" ]; then
# After we sorted the versions file, we know that the first version in it must be the latest version.
docker tag ${REPO}:${version} ${REPO}:latest \
&& docker push ${REPO}:latest \
&& docker rmi ${REPO}:latest
LATEST_TAGGED=true
fi
docker rmi ${REPO}:${version}
PUSHED_IMAGES="${PUSHED_IMAGES} ${version}"
else
echo "Not pushing aws-cli version ${version} because it's already present."
fi
done <<< $(cat awscli-versions.txt)
if [ "${BASE_BUILT}" = "true" ]; then
echo "Cleaning up..."
docker rmi -f awscli-versionized-base:latest || true
fi
[ -n "${PUSHED_IMAGES}" ] && {
echo "Pushed images: ${PUSHED_IMAGES}";
[ -e "./push_git.sh" ] && { ./push_git.sh; }
}
| true
|
9fd7684db1de6050a7aa5106a58fb989fce6427e
|
Shell
|
gmaher/svbuild
|
/scripts/get_freetype.sh
|
UTF-8
| 301
| 2.75
| 3
|
[] |
no_license
|
sudo apt-get install libtool
sudo apt-get install libpng-dev
echo "Downloading FREETYPE $FREETYPE_CHECKOUT, building into $FREETYPE_DIR"
git clone $FREETYPE_URL
cd $FREETYPE_DIR
git checkout $FREETYPE_CHECKOUT
./autogen.sh
./configure --prefix=$EXTERNALS_DIR
make -j6
make install
cd $BUILD_DIR
| true
|
8059e7ea896c7109932721839a6ab75698f02e87
|
Shell
|
zkokaja/dotfiles
|
/init
|
UTF-8
| 902
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# Make sure your XDG paths are set before running this
# TODO: make idempotent, interactive
# TODO: move old config/local into new location
ln -sfv $PWD/config/zsh/zshenv $HOME/.zshenv
source $HOME/.zshenv
# setup config home
mv $HOME/.config $HOME/config.old
ln -sfv "$PWD"/config "$XDG_CONFIG_HOME"
source "$XDG_CONFIG_HOME"/zsh/zprofile
# setup local home
mv "$HOME"/.local "$HOME/local.old"
ln -sfv "$PWD"/local "$HOME/.local"
# ensure dirs exist
mkdir -p "$XDG_DATA_HOME"
mkdir -p "$XDG_CACHE_HOME"
mkdir -p "$XDG_STATE_HOME"
# Install plugin managers
curl -fLo "$XDG_CONFIG_HOME"/vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
git clone https://github.com/tmux-plugins/tpm "$XDG_CONFIG_HOME"/tmux/plugins/tpm
"$XDG_CONFIG_HOME"/tmux/plugins/tpm/bin/install_plugins
# TODO - install vim plugins dynamically
| true
|
c2f4ae0d8585240c9af4328dc88e256d005bb2ba
|
Shell
|
GrimirCZ/maturitni-projekt-dokumentace
|
/compile.sh
|
UTF-8
| 560
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
rm -rf build
cp -rf src build
cd build
echo Running vlna to correct czech spelling
touch all.log
for filename in *.tex; do
#iconv -f UTF-8 -t ISO-8859-2 "$filename" -o "$filename" >> all.log 2>&1
vlna -l -m -n "$filename" >> all.log 2>&1
#iconv -f ISO-8859-2 -t UTF-8 "$filename" -o "$filename" >> all.log 2>&1
# Remove BOM
sed -i '' -e "s/\xEF\xBB\xBF//" "$filename"
done
rm *~
echo Compiling latex to pdf
time latexmk -pdf -pdflatex -f -bibtex -interaction=nonstopmode >> all.log 2>&1
mv *.pdf ../out.pdf
| true
|
868f1938d650b4e02d8ad6e044c4a669d491b5d8
|
Shell
|
lao-tseu-is-alive/tensorflow2-tutorial
|
/scripts/list_id_documents.sh
|
UTF-8
| 71
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
for i in `ls -1 *.jpeg`; do basename $i .jpeg; done|sort -n
| true
|
baad612310730fc535d94c59a4867408a6ffb008
|
Shell
|
juju-solutions/layer-swarm
|
/actions/swarmbench
|
UTF-8
| 2,492
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
RUNTIME_IMAGE='lazypower/swarm-bench'
BENCHMARK_CONCURRENCY=$(action-get concurrency)
BENCHMARK_REQUESTS=$(action-get requests)
BENCHMARK_IMAGE=$(action-get image)
SWARM_LEADER=$(leader-get swarm_manager)
if [ -z "$SWARM_LEADER" ]; then
action-fail "Swarm is still setting up. Skipping benchmark run."
exit 0
fi
if [ ! -d $CHARM_DIR/swarm_credentials ]; then
# This may need additional validation. Just because we have the dir doesn't
# mean we have the certificates.
action-fail "This is, um, awkward, I swear this never happens. But I can't find the SSL certificates for Docker Server auth."
exit 0
fi
# The yaml sets defaults, but you can override these to None values. We dont want that.
if [ -z "$BENCHMARK_CONCURRENCY" ]; then
BENCHMARK_CONCURRENCY=5
fi
if [ -z "$BENCHMARK_REQUESTS" ]; then
BENCHMARK_REQUESTS=100
fi
if [ -z "$BENCHMARK_IMAGE" ]; then
BENCHMARK_IMAGE='ubuntu'
fi
BENCHMARK_RUNTIME_IMAGE=$(docker images -q $RUNTIME_IMAGE)
if [ -z "$BENCHMARK_RUNTIME_IMAGE" ]; then
DOCKER_TLS_VERIFY=1
DOKER_HOST=$SWARM_LEADER
DOCKER_CERT_PATH="$CHARM_DIR/swarm_credentials"
docker pull $RUNTIME_IMAGE
fi
BENCH_IMAGE=$(docker images -q $RUNTIME_IMAGE)
if [ -z "$BENCH_IMAGE" ]; then
DOCKER_TLS_VERIFY=1
DOKER_HOST=$SWARM_LEADER
DOCKER_CERT_PATH="$CHARM_DIR/swarm_credentials"
docker pull $BENCH_IMAGE
fi
# Cleanup from any stale runs
set +e
docker rm $(docker ps -a -q --filter label=com.juju.benchmark) > /dev/null 2>&1
set -e
status-set waiting "Benchmarking swarm - launching $BENCHMARK_IMAGE $BENCHMARK_REQUESTS times"
benchmark-start
# if this fails, and throws a panic, see issue:
# https://github.com/aluzzardi/swarm-bench/issues/4
# https://github.com/docker/swarm/pull/2190
docker run -v $CHARM_DIR/swarm_credentials:/docker_certs \
-e DOCKER_TLS_VERIFY=1 \
-e DOCKER_HOST=$SWARM_LEADER \
-e DOCKER_CERT_PATH=/docker_certs \
--label=com.juju.benchmark=swarmbench \
$RUNTIME_IMAGE -c $BENCHMARK_CONCURRENCY -n $BENCHMARK_REQUESTS -i $BENCHMARK_IMAGE
benchmark-finish
CONTAINER_ID=$(docker ps -q -a -f "label=com.juju.benchmark")
# parse results goes here
python3 actions/parse_swarmbench_output.py $CONTAINER_ID
# This only cleans up the runner. This does not clean up any of the containers
# after the bench run
status-set waiting "Cleaning up after benchmark run"
docker rm $(docker ps -a -q --filter label=com.juju.benchmark)
status-set active
| true
|
0c2f5bbd247312790e2a95cec1e9b0c2ef49c582
|
Shell
|
hidetoshing/shell-scripts
|
/regrep
|
UTF-8
| 1,201
| 4.09375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# if color message, uncomment following.
#
source ~/dotfiles/lib/lib/color.sh
#
filetype="*.php"
function regrep {
echo "${fgb_blue}ReGrep${fg_default}"
echo "${1} => ${2}"
echo "find for ${filetype}"
echo "${fgb_blue}Target files${fg_default}"
find . -name ${filetype} | grep -rl ${1} *
echo "\n"
echo "${fg_green}Replace text? (y/n)${fg_default}"
read response
case ${response} in
"yes" | "y")
find . -name "${filetype}" | grep -rl ${1} * | xargs sed -i -e "s#${1}#${2}#g"
exit 0
;;
*)
echo "${fg_yellow}Canceled replace${fg_default}"
exit 0
;;
esac
}
function usage {
cat << EOF
$(basename ${0}) is a tool for Replace Grep
Usage:
$(basename ${0}) [<options>] src dist
Options:
--type, -t file type
--version, -v show version information
--help, -h show this message.
EOF
}
function version {
echo "$(basename ${0}) version 0.0.1"
}
# options loop
if [ $# -gt 0 ]; then
while [ $# -gt 0 ];
do
case ${1} in
--help|-h)
usage
exit 0
;;
--version|-v)
version
exit 0
;;
--type|-t)
filetype=${2}
shift
;;
*)
regrep ${1} ${2}
exit 0
;;
esac
shift
done
else
usage
exit 0
fi
| true
|
597904830cdb9685f838a904ad4c9d0dccd9ee6a
|
Shell
|
gongchanghao/gongchanghao.github.io
|
/docker-entrypoint.sh
|
UTF-8
| 722
| 2.703125
| 3
|
[
"BSL-1.0"
] |
permissive
|
#!/bin/bash
if [ -z "$(ls -A lib/ 2> /dev/null)" ]; then
curl https://apps.mzstatic.com/content/android-apple-music-apk/applemusic.apk -O
if [ "$(uname -m)" = 'aarch64' ]; then
unzip applemusic.apk 'lib/arm64-v8a/libstoreservicescore.so' 'lib/arm64-v8a/libCoreADI.so'
elif [[ "$(uname -m)" = 'arm'* ]]; then
unzip applemusic.apk 'lib/armeabi-v7a/libstoreservicescore.so' 'lib/armeabi-v7a/libCoreADI.so'
elif [ "$(uname -m)" = 'x86_64' ]; then
unzip applemusic.apk 'lib/x86_64/libstoreservicescore.so' 'lib/x86_64/libCoreADI.so'
elif [ "$(uname -m)" = 'i686' ]; then
unzip applemusic.apk 'lib/x86/libstoreservicescore.so' 'lib/x86/libCoreADI.so'
fi
rm applemusic.apk
fi
/opt/anisette_server
| true
|
f258ea8d88afe5e6ba632e9e26e0900def8b6b22
|
Shell
|
ronioncloud/dorothy-1
|
/commands/echo-dir
|
UTF-8
| 126
| 2.671875
| 3
|
[
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
source "$DOROTHY/sources/strict.bash"
if "$DOROTHY/commands/is-dir" "$1"; then
echo "$1"
else
exit 1
fi
| true
|
442d54adad5b099ba08e6660735af7fef03c1dcb
|
Shell
|
kdraun/Open.HD_Image_Builder
|
/stages/02-Kernel/04-run.sh
|
UTF-8
| 1,146
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
set -e
if [[ "${DISTRO}" == "buster" ]]; then
# Do this to the WORK folder of this stage
pushd ${STAGE_WORK_DIR}
MNT_DIR="${STAGE_WORK_DIR}/mnt"
log "Compile kernel for Pi 4"
pushd ${LINUX_DIR}
log "Copy Kernel config"
cp "${STAGE_DIR}/FILES/.config-${KERNEL_BRANCH}-v7l" ./.config || exit 1
make clean
#KERNEL=kernel7l ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- make bcm2711_defconfig
yes "" | KERNEL=kernel7l ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- make -j $J_CORES zImage modules dtbs
log "Saving kernel as ${STAGE_WORK_DIR}/kernel7l.img"
cp arch/arm/boot/zImage "${MNT_DIR}/boot/kernel7l.img" || exit 1
log "Copy the kernel modules for Pi 4"
make -j $J_CORES ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- INSTALL_MOD_PATH="$MNT_DIR" modules_install
log "Copy the DTBs for Pi 4"
sudo cp arch/arm/boot/dts/*.dtb "${MNT_DIR}/boot/" || exit 1
sudo cp arch/arm/boot/dts/overlays/*.dtb* "${MNT_DIR}/boot/overlays/" || exit 1
sudo cp arch/arm/boot/dts/overlays/README "${MNT_DIR}/boot/overlays/" || exit 1
# out of linux
popd
#return
popd
fi
| true
|
64345f1c13ec659333b7bd46f5b2e830360c3f6a
|
Shell
|
avvo/docker-ruby
|
/ruby-libv8-phantomjs/build.sh
|
UTF-8
| 611
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
VERSION=$1
source .dockerbuild
if [ "x$VERSION" == "x" ];
then
VERSION="latest"
fi
function buildfail() {
echo "ERROR: Build Failure - $@"
exit 1
}
IMAGE="$REPO/$NAME:$VERSION"
docker build --rm -t "$IMAGE" . || buildfail "Could not build image."
## These commands like to complain about the registry ssl cert.
## May have to add --insecure-registry registry.docker.prod.avvo.com
## to your *docker daemon* args for this to work.
#docker tag -f $NAME:$VERSION registry.docker.prod.avvo.com/$NAME
docker push "$IMAGE" || buildfail "Build succeeded but could not push image to remote repo."
| true
|
d5aa24f342baf324bfaed37aa6d01bf029fca2ad
|
Shell
|
gpenghe/linuxconfig
|
/snippets/LinuxCommand_ffmpeg_encode_for_windows.sh
|
UTF-8
| 611
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
#
if [ $# -ne 2 ]; then
echo "Usage:"
echo " $0 <video_file> <avi_file>"
exit 0
fi
if [ -e $2 ]; then
echo "$2 already exists. Overwrite it? (Y/N)"
read -d'' -s -n1
key=`echo $REPLY | tr [a-z] [A-Z]`
echo
if [ $key != 'Y' ]; then
echo "Not overwriting..."
exit 0
fi
# OK to delete it
rm -f $2
fi
bitrate="4000K"
echo Encoding $1 to $2 with $bitrate bitrate ...
ffmpeg -i $1 -b:v $bitrate $2 &> /dev/null
# A more complex method. Not needed
# ffmpeg -r 30 -i $1 -c:v mpeg4 -flags:v +qscale -global_quality:v 0 -codec:a libmp3lame $2
| true
|
8cede50f2e8cb9a6a6cd73bfbb371b86d8ed3422
|
Shell
|
treyhakanson/omaha
|
/src/get-players.sh
|
UTF-8
| 319
| 3.375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
BASE_URL="https://www.pro-football-reference.com"
ROOT_DIR="../raw/players"
for link in $( cat ../raw/misc/player-links.txt )
do
echo "Retrieving $BASE_URL$link"
fname_raw=$( echo $link | cut -c 2- )
fname="${fname_raw////.}"
curl "$BASE_URL$link" > "$ROOT_DIR/$fname"
echo ""
done
| true
|
e03f72a2b49db971e89214d0e869dcff29f29781
|
Shell
|
ikaritw/iot
|
/led/go.sh
|
UTF-8
| 312
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Jazz
for x in $(seq 1 1000)
do
python lightoff.py
case $(($x % 6)) in
1)
python n1.py
;;
2)
python n2.py
;;
3)
python n3.py
;;
4)
python n4.py
;;
5)
python n5.py
;;
0)
python n6.py
;;
esac
done
python lightoff.py
exit 0
| true
|
e9712156bcdd326447f6e07b9fe3f38ff77f55fd
|
Shell
|
sugarsweetrobotics/vrep_mcl_test
|
/conf/Localization_YMCL0.conf
|
UTF-8
| 7,474
| 2.53125
| 3
|
[] |
no_license
|
# -*- sh -*-
#------------------------------------------------------------
# Configuration file for Localization_YMCL
#
# This configuration file name should be specified in rtc.conf (or other
# configuration file specified by -f option) by "config_file" property.
#
# Navigation.Localization_YMCL.config_file: Localization_YMCL.conf
# or
# Navigation.Localization_YMCL0.config_file: Localization_YMCL0.conf
# Navigation.Localization_YMCL1.config_file: Localization_YMCL1.conf
# Navigation.Localization_YMCL2.config_file: Localization_YMCL2.conf
#
# ------------------------------------------------------------
# An example configuration file for Localization_YMCL
#
# See details in the following reference manual
#
# Execution context configuration
# exec_cxt.periodic.type: PeriodicExecutionContext
# exec_cxt.periodic.rate: 1000
#
#
# configuration.active_config: mode0
# Additional configuration-set example named "mode0"
#
# conf.mode0.int_param0: 0
# conf.mode0.int_param1: 1
# conf.mode0.double_param0: 0.99
# conf.mode0.double_param1: -0.99
# conf.mode0.str_param0: default
# conf.mode0.str_param1: default set in conf file
# conf.mode0.vector_param0: 0.0,0.1,0.2,0.3,0.4,0.5,0.6
#
# Other configuration set named "mode1"
#
# conf.mode1.int_param0: 0
# conf.mode1.int_param1: 1
# conf.mode1.double_param0: 0.99
# conf.mode1.double_param1: -0.99
# conf.mode1.str_param0: default
# conf.mode1.str_param1: default set in conf file
# conf.mode1.vector_param0: 0.0,0.1,0.2,0.3,0.4,0.5,0.6
##============================================================
## Component configuration reference
##
##============================================================
## Active configuration-set
##============================================================
##
## Initial active configuration-set. The following "mode0" is a
## configuration-set name. A configuration-set named "mode0" should be
## appear in this configuration file as follows.
##
## configuration.active_config: mode0
##
# conf.mode0.param0: hoge
# conf.mode0.param1: fuga
# conf.mode0.param2: munya
##============================================================
## GUI control option for RTSystemEditor
##============================================================
## Available GUI control options [__widget__]:
##
## - text: text box [default].
## - slider.<step>: Horizontal slider. <step> is step for the slider.
## A range constraints option is required.
## - spin: Spin button. A range constraitns option is required.
## - radio: Radio button. An enumeration constraints is required.
## - checkbox: Checkbox control. An enumeration constraints is
## required. The parameter has to be able to accept a
## comma separated list.
## - orderd_list: Orderd list control. An enumeration constraint is
## required. The parameter has to be able to accept a
## comma separated list. In this control, Enumerated
## elements can appear one or more times in the given list.
##
## Available GUI control constraint options [__constraints__]:
##
## - none: blank
## - direct value: 100 (constant value)
## - range: <, >, <=, >= can be used.
## - enumeration: (enum0, enum1, ...)
## - array: <constraints0>, ,constraints1>, ... for only array value
## - hash: {key0: value0, key1:, value0, ...}
##
# conf.__widget__.initial_pose_x, text
# conf.__widget__.initial_pose_y, text
# conf.__widget__.initial_pose_phi, text
# conf.__widget__.initial_particle_min_x, text
# conf.__widget__.initial_particle_max_x, text
# conf.__widget__.initial_particle_min_y, text
# conf.__widget__.initial_particle_max_y, text
# conf.__widget__.initial_particle_min_phi, text
# conf.__widget__.initial_particle_max_phi, text
# conf.__widget__.initial_particle_count, text
# conf.__widget__.poseTimeOut, text
# conf.__widget__.motion_model, radio
# conf.__widget__.motion_alpha1, text
# conf.__widget__.motion_alpha2, text
# conf.__widget__.motion_alpha3, text
# conf.__widget__.motion_alpha4, text
# conf.__widget__.motion_std_XY, text
# conf.__widget__.motion_std_PHI, text
# conf.__widget__.LM_likelihoodMethod, text
# conf.__widget__.LM_enableLikelihoodCache, radio
# conf.__widget__.LM_LF_decimation, text
# conf.__widget__.LM_LF_stdHit, text
# conf.__widget__.LM_LF_zRandom, text
# conf.__widget__.LM_LF_alternateAverageMethod, radio
# conf.__widget__.LM_LF_zHit, text
# conf.__widget__.LM_LF_maxCorrsDistance, text
# conf.__widget__.LM_LF_maxRange, text
# conf.__widget__.LM_MI_exponent, text
# conf.__widget__.LM_MI_ratio_max_distance, text
# conf.__widget__.LM_MI_skip_rays, text
# conf.__widget__.LM_consensus_pow, text
# conf.__widget__.LM_consensus_takeEachRange, text
# conf.__widget__.LM_rayTracing_stdHit, text
# conf.__widget__.LM_rayTracing_decimation, text
# conf.__widget__.LM_rayTracing_useDistanceFilter, radio
# conf.__widget__.PF_algorithm, radio
# conf.__widget__.PF_resamplingMethod, radio
# conf.__widget__.PF_BETA, text
# conf.__widget__.PF_powFactor, text
# conf.__widget__.PF_sampleSize, text
# conf.__widget__.PF_adaptiveSampleSize, text
# conf.__widget__.PF_max_loglikelihood_dyn_range, text
# conf.__widget__.PF_AuxFilterOptimal_MaximumSearchSamples, text
# conf.__widget__.PF_AuxFilterStandard_FirstStageWeightsMonteCarlo, radio
# conf.__widget__.PF_AuxFilterOptimal_MLE, radio
# conf.__widget__.KLD_binSize_PHI, text
# conf.__widget__.KLD_binSize_XY, text
# conf.__widget__.KLD_delta, text
# conf.__widget__.KLD_epsilon, text
# conf.__widget__.KLD_maxSampleSize, text
# conf.__widget__.KLD_minSampleSize, text
# conf.__widget__.KLD_minSamplesPerBin, text
# conf.__constraints__.motion_model, (Thrun,Gausian)
# conf.__constraints__.LM_likelihoodMethod, (lmLikelihoodField_Thrun,lmLikelihoodField_II,lmRayTracing,lmCellsDifference,lmConsensus,lmConsensusOWA,lmMeanInformation)
# conf.__constraints__.LM_enableLikelihoodCache, (true,false)
# conf.__constraints__.LM_LF_alternateAverageMethod, (true,false)
# conf.__constraints__.LM_rayTracing_useDistanceFilter, (true,false)
# conf.__constraints__.PF_algorithm, (pfStandardProposal,pfAuxiliaryPFStandard,pfOptimalProposal,pfAuxiliaryPFOptimal)
# conf.__constraints__.PF_resamplingMethod, (prMultinomial,prResidual,prStratified,prSystematic)
# conf.__constraints__.PF_AuxFilterStandard_FirstStageWeightsMonteCarlo, (true,false)
# conf.__constraints__.PF_AuxFilterOptimal_MLE, (true,false)
##============================================================
## Execution context settings
##============================================================
##
## Periodic type ExecutionContext
##
## Other availabilities in OpenRTM-aist
##
## - ExtTrigExecutionContext: External triggered EC. It is embedded in
## OpenRTM library.
## - OpenHRPExecutionContext: External triggred paralell execution
## EC. It is embedded in OpenRTM
## library. This is usually used with
## OpenHRP3.
## - RTPreemptEC: Real-time execution context for Linux
## RT-preemptive pathed kernel.
## - ArtExecutionContext: Real-time execution context for ARTLinux
## (http://sourceforge.net/projects/art-linux/)
##
# exec_cxt.periodic.type: PeriodicExecutionContext
##
## The execution cycle of ExecutionContext
##
exec_cxt.periodic.rate:1000.0
| true
|
161d9b3bf1e6fbd5e56a78f6ed6d5253113100d9
|
Shell
|
bismog/leetcode
|
/sh/add-numbered-heading.sh
|
UTF-8
| 674
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#srcfile=./nnh.md
srcfile=$1
replace()
{
local level=$1
local sharps=$(printf '%0.s#' $(seq 1 $level))
local base_numbered_string=''
[[ $level != '1' ]] && base_numbered_string=$(printf '%0.s1.' $(seq 1 $(expr $level - 1)))
local last_number=1
while true; do
#echo "sharps is /$sharps/"
local out=$(sed -n -e '/^'"${sharps}"' /p' ${srcfile})
[[ $out ]] || return
last_number=$(expr $last_number + 1)
sed -i -e '0,/^'"${sharps}"' /s/^'"${sharps}"' /'"${sharps}"''"${base_numbered_string}"''"${last_number}"' /' $srcfile
done
}
slist=$(seq 1 6)
for i in $slist;do
replace $i
done
| true
|
bd16feb55a4818dfb74a66be8b0bbce1cc23d2f1
|
Shell
|
bmule/ehkmt-integration
|
/etc/ssl/import-signed-certif.bash
|
UTF-8
| 583
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
#set -xv
alias_name="phrs_core"
keystore_filename="srfg-phrs-core-keystore.ks"
certificate_filename="srfg-phrs-corecertificate.csr"
$JAVA_HOME/bin/keytool -import -alias $alias_name -keystore $keystore_filename -file $certificate_filename
echo "The Certificate $certificate_filename based on the keystore file $keystore_filename was successfuly imported in to the local trust store."
# echo "Actaul certificates lists"
# $JAVA_HOME/bin/keytool -list -v -keystore srfg-phrs-core-keystore.ks
# $JAVA_HOME/bin/keytool -list -v -keystore $JAVA_HOME/lib/security/cacerts
| true
|
a1cd92df3552e30c03354962125c98b3f1d16efd
|
Shell
|
icometrix/dicom2nifti
|
/push_release.sh
|
UTF-8
| 557
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "Provide version number: "
read GIT_TAG
# Check if tag exists
echo "Check if tag exists ${GIT_TAG}"
if git rev-parse ${GIT_TAG} >/dev/null 2>&1
then
echo "Duplicate tag found. !!!EXITING!!!"
exit 1
else
echo "Creating and pushing tag ${GIT_TAG}."
git tag ${GIT_TAG}
git push --tags git@github.com:icometrix/dicom2nifti.git
fi
rm -Rf dist
python -m build
twine upload -r pypi dist/*
echo "Generating SHA256 for conda forge."
curl -sL https://github.com/icometrix/dicom2nifti/archive/${GIT_TAG}.tar.gz | openssl sha256
| true
|
e1faad4cf2d9b095c2d8177b05a583d4a242363c
|
Shell
|
pdelre/dotfiles
|
/zsh/bindkey.zsh
|
UTF-8
| 2,100
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
# https://www.reddit.com/r/zsh/comments/eblqvq/del_pgup_and_pgdown_input_in_terminal/fb7337q/
# If NumLock is off, translate keys to make them appear the same as with NumLock on.
bindkey -s '^[OM' '^M' # enter
bindkey -s '^[Ok' '+'
bindkey -s '^[Om' '-'
bindkey -s '^[Oj' '*'
bindkey -s '^[Oo' '/'
bindkey -s '^[OX' '='
# If someone switches our terminal to application mode (smkx), translate keys to make
# them appear the same as in raw mode (rmkx).
bindkey -s '^[OH' '^[[H' # home
bindkey -s '^[OF' '^[[F' # end
bindkey -s '^[OA' '^[[A' # up
bindkey -s '^[OB' '^[[B' # down
bindkey -s '^[OD' '^[[D' # left
bindkey -s '^[OC' '^[[C' # right
# TTY sends different key codes. Translate them to regular.
bindkey -s '^[[1~' '^[[H' # home
bindkey -s '^[[4~' '^[[F' # end
# Bind Home, End and a bunch of other standard things
bindkey '^?' backward-delete-char # bs delete one char backward
bindkey '^[[3~' delete-char # delete delete one char forward
bindkey '^[[H' beginning-of-line # home go to the beginning of line
bindkey '^[[F' end-of-line # end go to the end of line
bindkey '^[[1;5C' forward-word # ctrl+right go forward one word
bindkey '^[[1;5D' backward-word # ctrl+left go backward one word
bindkey '^H' backward-kill-word # ctrl+bs delete previous word
bindkey '^[[3;5~' kill-word # ctrl+del delete next word
bindkey '^J' backward-kill-line # ctrl+j delete everything before cursor
bindkey '^[[D' backward-char # left move cursor one char backward
bindkey '^[[C' forward-char # right move cursor one char forward
bindkey '^[[A' up-line-or-beginning-search # up prev command in history
bindkey '^[[B' down-line-or-beginning-search # down next command in history
autoload -Uz up-line-or-beginning-search
autoload -Uz down-line-or-beginning-search
zle -N up-line-or-beginning-search
zle -N down-line-or-beginning-search
| true
|
8686d6be7c383788a08694a53911058adcaec714
|
Shell
|
taget/random_app
|
/tests/functional/functions.sh
|
UTF-8
| 1,917
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#set -x
CURL='curl -s'
function get {
local ID=$1
local RET=$($CURL -H "Content-Type: application/json" -X GET 127.0.0.1:5001/random/$ID)
RET=$(echo $RET | awk -F\" '{print $2}')
if [[ $RET != "code" ]]; then
return 1
fi
echo $RET
}
function post {
local LENGTH=$1
local TIME_OUT=$2
local ret=$($CURL -H "Content-Type: application/json" -X POST 127.0.0.1:5001/random -d "{\"length\": $LENGTH, \"time_out\": $TIME_OUT}")
local CODE=$(echo $ret | awk -F\" '{print $4}')
verify_length $CODE $LENGTH
if [[ $? -ne 0 ]]; then
return 1
fi
echo $CODE
return 0
}
function post_with_version {
local LENGTH=$1
local TIME_OUT=$2
local VERSION=$3
local ret=$($CURL -H "Content-Type: application/json" -H "X-Version: $VERSION" -X POST 127.0.0.1:5001/random -d "{\"length\": $LENGTH, \"time_out\": $TIME_OUT}")
local CODE=$(echo $ret | awk -F\" '{print $4}')
verify_length $CODE $LENGTH
if [[ $? -ne 0 ]]; then
return 1
fi
echo $CODE
return 0
}
function delete {
local ID=$1
local ret=$($CURL -H "Content-Type: application/json" -X DELETE 127.0.0.1:5001/random/$ID)
if [[ $ret != "{}" ]]; then
return 1
fi
return 0
}
function verify_length {
local STR_CODE=$1
local LEN=$2
if [[ ${#STR_CODE} -eq $LEN ]]; then
return 0
else
return 1
fi
}
# $1 message
# $2 option , default is 0, if set to 1
function check_error {
if [[ $? -ne 0 ]]; then
local CMP=${2-0}
if [[ $CMP -eq 0 ]]; then
echo "!!!! Failed out: $1 failed!"
exit 1
else
echo ":-) $1 Done!"
fi
else
local CMP=${2-0}
if [[ $CMP -eq 0 ]]; then
echo ":-) $1 Done!"
else
echo "!!!! Failed out: $1 failed!"
exit 1
fi
fi
}
| true
|
9898ed1ff71c3389fbf690f61d05838a2e91606f
|
Shell
|
kampka/dockerfiles
|
/postgres/setup.sh
|
UTF-8
| 429
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
ln -sf /proc/self/fd /dev
pacman -Sy --noconfirm --needed postgresql postgresql-old-upgrade grep
mkdir -p /data
mkdir -p /etc/initrc.d
cp -v /build/initrc.d/* /etc/initrc.d
mkdir -p /services
cp -rv /build/services/* /services
rm -rf /build
REMOVABLES=$(pacman -Qqtd) || true
if [ -n "$REMOVABLES" ]; then
pacman -Rns --noconfirm $REMOVABLES
fi
pacman -S -cc --noconfirm
rm -rf /var/cache/pacman/pkg
| true
|
c720802d85f6ea70d5db94764f6ee396f4b42114
|
Shell
|
Jeasonxiu/Direct_Aniso_inversion
|
/kernel/Example/DirectKernel.sh
|
UTF-8
| 2,430
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
exe=../bin
DK=$exe/DKernel
DKReader=$exe/DKReader
# path
path=`pwd`
#
# infold=$path/infile
infold=$path/infile
# output=$path/output
output=$path/outputRV
kernel_fold=$output/kernel
Gramdd_fold=$output/Gramdd
mode_fold=$output/Sph_mode
mdl_fold=$output/pro_mdl
Lon_fold=$path/LonKernel
if [ ! -d $Lon_fold ]; then
mkdir $Lon_fold
fi
if [ ! -d $output ]; then
mkdir $output
fi
if [ ! -d $kernel_fold ]; then
mkdir $kernel_fold
fi
if [ ! -d $Gramdd_fold ]; then
mkdir $Gramdd_fold
fi
if [ ! -d $out_fold ]; then
mkdir $out_fold
fi
if [ ! -d $mode_fold ]; then
mkdir $mode_fold
fi
if [ ! -d $mdl_fold ]; then
mkdir $mdl_fold
fi
# 1.-------------------------------------------------
echo "_______________RUN DK LON CYCLE_____________"
#MOD c: input iso-Vs model file
#2 2 9 c: nx ny nz (grid number in lat lon and depth direction)
#1 c: ny_clc 1:ny
#3 c: 'minthk' = layerthick / sublayernum, can be 1, 2 ,3
#2 c: kmaxRc (followed by periods)
#40 50
#PREMQL6ic_12742u_intp_2km.card c: reference Mineos format model
nx=16
ny=16
nz=4
minthk=2
kmaxRc=36
mdl=$infold/MOD
refmdl=$infold/PREMQL6ic_12742u_intp_2km.card
#ny=1
for ((i=1;i<=$ny;i=i+1));do
echo "ny_cycle=" $i
(echo "$mdl
$nx $ny $nz
$i
$minthk
$kmaxRc
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
$refmdl" | nohup $DK >log_DK_$i 2>&1 ) &
done
# 2.---------------------------------------------
# zip
sw=0
#last=Lon_*"$ny"_SenG_Vs_Gsc.dat
while [[ $sw -ne $ny ]];do
sw=`find ./ -name "Lon*.dat" | wc -l `
echo $sw
sleep 1m
done
mv *.Gdused $Gramdd_fold
mv *.fre $mode_fold
mv *.card $mdl_fold
mv *dL_dA* $kernel_fold
rm *.split
Lonfile=Lonlist.txt
ls Lon*.dat > $Lonfile
# 3.-----------------------------------------
# Read_Dkernel
# input list
#2 2 9 c: nx ny nz (grid number in lat lon and depth direction)
#2 c: kmaxRc (followed by periods)
#Lonlist.txt c: Lonfile
#Sen_dcdL_dcdA.dat c: output kernel file
(echo "$nx $ny $nz
$kmaxRc
$Lonfile" | nohup $DKReader >log_Read 2>&1 ) &
sw=0
while [[ $sw -ne 1 ]];do
sw=`find ./ -name "Sen_dcdL_dA_dC_dF.dat" | wc -l `
sleep 10m
done
mv *Lon*.dat log_DK* $Lon_fold
echo "DK finished!"
| true
|
b119719079a5f5585b3157ec28e0045f3a085001
|
Shell
|
woojiahao/govid-19
|
/setup.sh
|
UTF-8
| 286
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
read -rp "Database user >>> " user
read -rp "Database password >>> " password
read -rp "Database name >>> " name
read -rp "Host >>> " host
{
echo "POSTGRES_DB=$name"
echo "POSTGRES_USER=$user"
echo "POSTGRES_PASSWORD=$password"
echo "HOST=$host"
} >> .env
| true
|
f36dd62dc77715773501c3a51472905e10e58bae
|
Shell
|
jhadvig/source-to-image
|
/hack/build-release.sh
|
UTF-8
| 1,521
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script generates release zips into _output/releases. It requires the openshift/sti-release
# image to be built prior to executing this command.
set -o errexit
set -o nounset
set -o pipefail
STI_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${STI_ROOT}/hack/common.sh"
# Go to the top of the tree.
cd "${STI_ROOT}"
context="${STI_ROOT}/_output/buildenv-context"
# clean existing output
rm -rf "${STI_ROOT}/_output/local/releases"
rm -rf "${STI_ROOT}/_output/local/go/bin"
rm -rf "${context}"
mkdir -p "${context}"
mkdir -p "${STI_ROOT}/_output/local"
# generate version definitions
sti::build::get_version_vars
sti::build::save_version_vars "${context}/sti-version-defs"
# create the input archive
git archive --format=tar -o "${context}/archive.tar" HEAD
tar -rf "${context}/archive.tar" -C "${context}" sti-version-defs
gzip -f "${context}/archive.tar"
# build in the clean environment
cat "${context}/archive.tar.gz" | docker run -i --cidfile="${context}/cid" openshift/sti-release
docker cp $(cat ${context}/cid):/go/src/github.com/openshift/source-to-image/_output/local/releases "${STI_ROOT}/_output/local"
# copy the linux release back to the _output/go/bin dir
releases=$(find _output/local/releases/ -print | grep 'source-to-image-.*-linux-' --color=never)
if [[ $(echo $releases | wc -l) -ne 1 ]]; then
echo "There should be exactly one Linux release tar in _output/local/releases"
exit 1
fi
bindir="_output/local/go/bin"
mkdir -p "${bindir}"
tar mxzf "${releases}" -C "${bindir}"
| true
|
0afe6267f167183ad142211bbe7bc493e1df5765
|
Shell
|
imclab/archivesphere
|
/script/deploy_qa.sh
|
UTF-8
| 2,164
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# deploy script for scholarsphere-qa
HHOME="/opt/heracles"
WORKSPACE="${HHOME}/archivesphere/archivesphere-qa"
RESQUE_POOL_PIDFILE="${WORKSPACE}/tmp/pids/resque-pool.pid"
DEFAULT_TERMCOLORS="\e[0m"
HIGHLIGHT_TERMCOLORS="\e[33m\e[44m\e[1m"
ERROR_TERMCOLORS="\e[1m\e[31m"
HOSTNAME=$(hostname -s)
function anywait {
for pid in "$@"; do
while kill -0 "$pid"; do
sleep 0.5
done
done
}
function banner {
echo -e "${HIGHLIGHT_TERMCOLORS}=-=-=-=-= $0 ? $1 ${DEFAULT_TERMCOLORS}"
}
banner "checking username"
[[ $(id -nu) == "tomcat" ]] || {
echo -e "${ERROR_TERMCOLORS}*** ERROR: $0 must be run as tomcat user ${DEFAULT_TERMCOLORS}"
exit 1
}
banner "exit if not as1qa"
[[ $HOSTNAME == "as1qa" || $HOSTNAME == "as1qa" ]] || {
echo -e "${ERROR_TERMCOLORS}*** ERROR: $0 must be run on as1qa or ss2qa ${DEFAULT_TERMCOLORS}"
exit 1
}
banner "source ${HHOME}/.bashrc"
source ${HHOME}/.bashrc
banner "source /etc/profile.d/rvm.sh"
source /etc/profile.d/rvm.sh
banner "cd ${WORKSPACE}"
cd ${WORKSPACE}
banner "source ${WORKSPACE}/.rvmrc"
source ${WORKSPACE}/.rvmrc
banner "bundle install"
bundle install
# stop Resque pool early
banner "resque-pool stop"
[ -f $RESQUE_POOL_PIDFILE ] && {
PID=$(cat $RESQUE_POOL_PIDFILE)
kill -2 $PID && anywait $PID
}
banner "passenger-install-apache2-module -a"
passenger-install-apache2-module -a
[[ $HOSTNAME == "as1qa" ]] && {
banner "rake db:migrate"
RAILS_ENV=production bundle exec rake db:migrate
}
banner "rake assets:precompile"
RAILS_ENV=production bundle exec rake assets:precompile
banner "resque-pool start"
bundle exec resque-pool --daemon --environment production start
banner "rake archivesphere:generate_secret"
bundle exec rake archivesphere:generate_secret
#[[ $HOSTNAME == "as1qa" ]] && {
# banner "rake archivesphere:resolrize"
# RAILS_ENV=production bundle exec rake archivesphere:resolrize
#}
banner "touch ${WORKSPACE}/tmp/restart.txt"
touch ${WORKSPACE}/tmp/restart.txt
banner "curl -s -k -o /dev/null --head https://..."
curl -s -k -o /dev/null --head https://$(hostname -f)
retval=$?
banner "finished $retval"
exit $retval
#
# end
| true
|
90394d2df3c4da273d37af83af391bb86f37ae50
|
Shell
|
Marco9412/TelegramMQTTBot
|
/build.sh
|
UTF-8
| 357
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
EXECUTABLENAME="TelegramMQTTBot"
mkdir -p out
echo "Cleaning old files"
rm -f "out/$EXECUTABLENAME" "out/$EXECUTABLENAME.zip"
echo "Building"
cd app
zip -r "../out/$EXECUTABLENAME.zip" *
cd ..
echo '#!/usr/bin/env python3' | cat - "out/$EXECUTABLENAME.zip" > "out/$EXECUTABLENAME"
chmod +x "out/$EXECUTABLENAME"
rm "out/$EXECUTABLENAME.zip"
| true
|
2482e9dff64ac61f89613eda68d7fdb8a2faf169
|
Shell
|
delicb/dotfiles
|
/scripts/debian.sh
|
UTF-8
| 639
| 3.28125
| 3
|
[] |
no_license
|
set -o errexit -o nounset
prepare() {
apt update
TZ=UTC
ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
DEBIAN_FRONTEND=noninteractive
apt install -y software-properties-common curl gnupg locales
locale-gen en-us.UTF-8
echo 'deb http://download.opensuse.org/repositories/shells:/fish:/release:/3/Debian_11/ /' | tee /etc/apt/sources.list.d/shells:fish:release:3.list
curl -fsSL https://download.opensuse.org/repositories/shells:fish:release:3/Debian_11/Release.key | gpg --dearmor | tee /etc/apt/trusted.gpg.d/shells_fish_release_3.gpg > /dev/null
apt update
}
install_pkg() {
apt install -y $@
}
| true
|
69d399d9c5328a65db095580265aeee088259a89
|
Shell
|
ekinanp/scripts
|
/util/helpers.sh
|
UTF-8
| 3,141
| 3.953125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# utility routines
fjq() {
local action="$1"
local file="$2"
jq -r --compact-output "${action}" "${file}" > "${file}.tmp" && mv "${file}.tmp" "${file}"
}
fsed() {
local action="$1"
local file="$2"
sed -E "${action}" "${file}" > "${file}.tmp" && mv "${file}.tmp" "${file}"
}
fawk() {
local code="$1"
local file="$2"
awk "${code}" "${file}" > "${file}.tmp" && mv "${file}.tmp" "${file}"
}
# read as "fawk" after first match
fawk_afm() {
local regex="$1"
local code="$2"
local file="$3"
# Code skeleton obtained from:
# https://stackoverflow.com/questions/32007152/insert-multiple-lines-of-text-before-specific-line-using-sed
#
# TODO: Refactor this to be more "awky"
fawk "
BEGIN {
matched=0
}
{ print }
/${regex}/ {
if ( matched == 0 ) {
${code}
matched=1
}
}
" "${file}"
}
repo_url() {
local github_user="$1"
local repo="$2"
echo "git@github.com:${github_user}/${repo}.git"
}
validate_arg() {
local usage="$1"
local arg="$2"
if [[ -z "${arg}" ]]; then
echo "USAGE: ${usage}"
exit 1
fi
}
clone_repo() {
local workspace="$1"
local fork_user="$2"
local repo="$3"
local checkout="$4"
pushd "${workspace}"
rm -rf "${repo}"
git clone `repo_url ${fork_user} ${repo}` "${repo}"
pushd "${repo}"
${checkout}
popd
popd
}
clone_clean_repo() {
local workspace="$1"
local fork_user="$2"
local repo="$3"
local branch="$4"
pushd "${workspace}"
rm -rf "${repo}"
git clone `repo_url ${fork_user} ${repo}` "${repo}"
pushd "${repo}"
git remote add upstream `repo_url puppetlabs ${repo}`
git fetch upstream
git checkout -b "${branch}" "upstream/${branch}"
git push --set-upstream origin "${branch}" --force
popd
popd
}
lazy_clone_clean_repo() {
local workspace="$1"
local fork_user="$2"
local repo="$3"
local branch="$4"
if [[ ! -d "${WORKSPACE}/${repo}" ]]; then
clone_clean_repo "${WORKSPACE}" "${fork_user}" "${repo}" "${branch}"
fi
}
checkout_repo() {
local workspace="$1"
local fork_user="$2"
local repo="$3"
local ref="$4"
pushd "${workspace}"
if [[ ! -d "${repo}" ]]; then
git clone `repo_url ${fork_user} ${repo}` "${repo}"
fi
pushd "${repo}"
git fetch
git checkout "${ref}"
popd
popd
}
make_host_json() {
local host_type="$1"
for host_engine in "vmpooler" "nspooler"; do
if [[ "${host_engine}" == "vmpooler" ]]; then
jq_cmd=".\"${host_type}\""
vm=`${VMFLOATY} get "${host_type}" 2>&1`
else
jq_cmd=".\"${host_type}\".hostname"
vm=`${VMFLOATY} get "${host_type}" --service ns 2>&1`
fi
if [[ "$?" -ne 0 ]]; then
continue
fi
host_name=`echo "${vm}" | jq -r "${jq_cmd}"`
echo "{\"hostname\":\"${host_name}\",\"type\":\"${host_type}\",\"engine\":\"${host_engine}\"}"
return 0
done
return 1
}
query_vmfloaty() {
local query="$1"
local cmd="${VMFLOATY} ${query}"
${cmd} && ${cmd} --service ns
}
# Common constants that may be shared throughout scripts
VMFLOATY="floaty"
| true
|
3fa572fefae163368f7bac916231f3756694d5d2
|
Shell
|
tmtaybah/dotfiles
|
/install.sh
|
UTF-8
| 1,461
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$#" -ne 1 ]; then
echo "Usage: install.sh <home_directory>"
exit 1
fi
# set paths
homedir=$1
dotfiledir=${homedir}/dotfiles
# change to the dotfiles directory
echo "Changing to the ${dotfiledir} directory"
cd ${dotfiledir}
# install brew
echo "Installing Homebrew ..."
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# run the brew script (installs most applications)
echo "Installing packages using brew ..."
./brew.sh
# change shell to zsh
echo "changing shell from bash to zsh "
SHELL=/usr/local/bin/ # brew version of zsh
echo $SHELL | sudo tee -a /etc/shells
chsh -s $(which zsh)
source ~/.zshrc
# install oh-my-zsh
echo "Installing oh-my-zsh ..."
sh -c "$(wget https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh -O -)"
# install pure theme
echo "Installing pure theme ..."
npm install --global pure-prompt
# download iterm2 one-dark theme
git clone https://github.com/anunez/one-dark-iterm.git
# list of files/folders to symlink in ${homedir}
files="zshrc vimrc"
# create symlinks (will overwrite old dotfiles)
echo "Creating symbolic link for dotfiles"
for file in ${files}; do
echo "Creating symlink to $file in home directory."
ln -sf ${dotfiledir}/.${file} ${homedir}/.${file}
done
echo "...done"
# install essential python packages
echo "Installing python essentials ... "
pip3 install -r py_essentials.txt
| true
|
9ba3be6c70eebf5ef2867b8d4cedf3426a6b2f9b
|
Shell
|
oxedions/CLI-tools
|
/Source/run-tests.sh
|
UTF-8
| 413
| 3.421875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
PY2=python2.7
PY3=python3.7
function cprint {
"${PY3}" -m colour_text "$@"
}
for tool in \
colour_text \
colour_filter \
ssh_wait \
smart_find \
;
do
for PY in ${PY3} ${PY2}
do
if ${tool}/run-test.sh ${PY}
then
cprint "<>green PASSED<> ${PY} ${tool}"
else
cprint "<>red FAILED<> ${PY} ${tool}"
fi
done
done
| true
|
8a4075c2e5836a20ebe0c6e69a74da9b1ce771ae
|
Shell
|
craigevil/Minecraft
|
/setupMC.sh
|
UTF-8
| 2,622
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh -e
#determine if host system is 64 bit arm64 or 32 bit armhf
if [ ! -z "$(file "$(readlink -f "/sbin/init")" | grep 64)" ];then
MACHINE='aarch64'
elif [ ! -z "$(file "$(readlink -f "/sbin/init")" | grep 32)" ];then
MACHINE='armv7l'
else
echo "Failed to detect OS CPU architecture! Something is very wrong."
fi
DIR=~/Minecraft
# create folders
if [ ! -d "$DIR" ]; then
mkdir "$DIR"
fi
cd "$DIR"
pwd
if [ "$MACHINE" = "aarch64" ]; then
echo "Raspberry Pi OS (64 bit)"
if [ ! -d ~/lwjgl3arm64 ]; then
mkdir ~/lwjgl3arm64
fi
else
echo "Raspberry Pi OS (32 bit)"
if [ ! -d ~/lwjgl3arm32 ]; then
mkdir ~/lwjgl3arm32
fi
if [ ! -d ~/lwjgl2arm32 ]; then
mkdir ~/lwjgl2arm32
fi
fi
# download minecraft launcher
if [ ! -f launcher.jar ]; then
wget https://launcher.mojang.com/v1/objects/eabbff5ff8e21250e33670924a0c5e38f47c840b/launcher.jar
fi
# download java
if [ "$MACHINE" = "aarch64" ]; then
if [ ! -f OpenJDK16U-jdk_aarch64_linux_hotspot_2021-05-08-12-45.tar.gz ]; then
wget https://github.com/chunky-milk/Minecraft/releases/download/2021-05-08-12-45/OpenJDK16U-jdk_aarch64_linux_hotspot_2021-05-08-12-45.tar.gz
fi
else
if [ ! -f OpenJDK16U-jdk_arm_linux_hotspot_2021-05-08-12-45.tar.gz ]; then
wget https://github.com/chunky-milk/Minecraft/releases/download/2021-05-08-12-45/OpenJDK16U-jdk_arm_linux_hotspot_2021-05-08-12-45.tar.gz
fi
fi
# download lwjgl3arm*
if [ "$MACHINE" = "aarch64" ]; then
if [ ! -f lwjgl3arm64.tar.gz ]; then
wget https://github.com/chunky-milk/Minecraft/raw/main/lwjgl3arm64.tar.gz
fi
else
if [ ! -f lwjgl3arm32.tar.gz ]; then
wget https://github.com/chunky-milk/Minecraft/raw/main/lwjgl3arm32.tar.gz
fi
if [ ! -f lwjgl2arm32.tar.gz ]; then
wget https://github.com/chunky-milk/Minecraft/raw/main/lwjgl2arm32.tar.gz
fi
fi
# extract oracle java 8
echo Extracting java ...
if [ "$MACHINE" = "aarch64" ]; then
sudo tar -zxf OpenJDK16U-jdk_aarch64_linux_hotspot_2021-05-08-12-45.tar.gz -C /opt
# install opnjdk for launcher.jar and optifine install
sudo apt install openjdk-11-jdk -y
else
sudo tar -zxf OpenJDK16U-jdk_arm_linux_hotspot_2021-05-08-12-45.tar.gz -C /opt
# install openjdk for launcher and optifine if needed
sudo apt install openjdk-11-jdk -y
fi
# extract lwjgl*
echo Extracting lwjgl...
if [ "$MACHINE" = "aarch64" ]; then
tar -zxf lwjgl3arm64.tar.gz -C ~/lwjgl3arm64
else
tar -zxf lwjgl3arm32.tar.gz -C ~/lwjgl3arm32
tar -zxf lwjgl2arm32.tar.gz -C ~/lwjgl2arm32
fi
echo end setupMC
| true
|
5753b170baaeb49cda5386c3ef64224240e7400e
|
Shell
|
lucharo/HDATDS
|
/BuildConvert.sh
|
UTF-8
| 757
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
OUTPUT="$(conda build .)"
echo "${OUTPUT}" > build.txt
storepath="$(grep 'TEST START:' build.txt | sed 's/^.*: //')"
filename="$(echo $storepath | awk -F'/' '{print $6}')"
# grep 'potato:' file.txt | sed 's/^.*: //'
# grep looks for any line that contains the string potato:, then, for each of these lines, # sed replaces (s/// - substitute) any character (.*) from the beginning of the line #(^)until the last occurrence of the sequence : (colon followed by space) with the empty #string (s/...// - substitute the first part with the second part, which is empty)
conda convert --platform all $storepath -o /opt/anaconda3/conda-bld
for VARIABLE in linux-64 linux-32
do
anaconda upload -i /opt/anaconda3/conda-bld/$VARIABLE/$filename
done
rm build.txt
| true
|
832c4511f8a6898f8f3e38a2c9195072f8c276c5
|
Shell
|
iamfauz/cpen442-miner
|
/verify_coin.sh
|
UTF-8
| 524
| 3.53125
| 4
|
[] |
no_license
|
#! /bin/bash
echo "Verify a Coin Script"
if ! command -v base64 > /dev/null; then
echo "base64 command not found!"
exit 1
fi
if ! command -v md5sum > /dev/null; then
echo "md5sum command not found!"
exit 1
fi
read -p "Miner ID String:" minerid
read -p "Coin Base64 Blob:" coinblob
read -p "Previous Coin:" previouscoin
cat <(printf 'CPEN 442 Coin2019') \
<(printf '%s' "$previouscoin") \
<(printf '%s' "$coinblob" | base64 -d) \
<(printf '%s' "$minerid") |
md5sum |
cut -d ' ' -f 1
| true
|
87ef6cc750d21445ee259ec00e4931dc11a53d62
|
Shell
|
chralp/splashr
|
/splashr.sh
|
UTF-8
| 1,604
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# Settings
collection=https://source.unsplash.com/random
# Folders
imgFolder=".splashr"
rootFolder=$HOME/$imgFolder
i3lockFolder="i3lock/"
bgFolder="bg/"
loginFolder="/usr/share/images/login"
tempFolder="temp"
function downloadImages {
# Get resolution for search. So we only get image that fits screen
size=$(xdpyinfo | grep dimensions | awk '{print $2}')
for i in {1..15};
do
# Setting sleep 5 sec here, so we get a better chance of random image.
sleep 5;
curl $collection/$size --location --output $rootFolder/$tempFolder/$i.jpeg
cp $rootFolder/$tempFolder/$i.jpeg $rootFolder/$bgFolder/$i.jpeg
done
}
function convertImages {
find $rootFolder/$tempFolder/. -name "*.jpeg" -exec mogrify -format png {} \;
mv $rootFolder/$tempFolder/*.png $rootFolder/$i3lockFolder/;
}
function changeBackground {
DISPLAY=:0 feh --bg-scale $(find $rootFolder/$bgFolder/ -name "*.jpeg" | shuf -n1)
}
function checkLoginBackground {
if [ $(find "$loginFolder/login.jpeg" -mtime +1 | wc -l) -gt 0 ]; then
cp $(find $rootFolder/$bgFolder/ -name "*.jpeg" | shuf -n1) $loginFolder/login.jpeg
exit
fi
}
if [ ! -d "$rootFolder" ]; then
mkdir $rootFolder
mkdir $rootFolder/$i3lockFolder
mkdir $rootFolder/$bgFolder
mkdir $rootFolder/$tempFolder
downloadImages
convertImages
changeBackground
exit
fi
if [ $(find "$rootFolder/$tempFolder" -mtime +2 | wc -l) -gt 0 ]; then
downloadImages
convertImages
changeBackground
exit
fi
changeBackground
checkLoginBackground
exit
| true
|
33c272293faae0d02473eeb069cd51846bbff707
|
Shell
|
mike-zueff/euphoria
|
/lib/lib_kernel.sh
|
UTF-8
| 661
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
function kernel_assert
{
logger_tee_function_begin
if [[ ${E_ASSERTS_ENABLED} == ${E_LITERAL_TRUE} ]]
then
case "${2}" in
"is_not_in_top" )
[[ `pgrep --full --uid ${UID} "${1}"` ]] && kernel_assert_exit "${*}"
;;
esac
fi
logger_tee_function_end
}
function kernel_assert_exit
{
logger_tee_function_begin
logger_tee_notice "Assert: \"${1}\", \"${2}\"."
exit ${E_EXIT_STATUS_FAILURE}
logger_tee_function_end
}
function kernel_handle_interrupt_ctrl_c
{
logger_tee_function_begin
logger_tee_notice "Catched Ctrl-c interrupt."
ipc_terminate_euphoria ${E_EXIT_STATUS_SUCCESS}
logger_tee_function_end
}
| true
|
aeb60c50dc9583f46da504fb0e36af4b56b9601c
|
Shell
|
Hughes-Genome-Group/CCseqBasicM
|
/bin/serial/subroutines/hubbers.sh
|
UTF-8
| 55,961
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##########################################################################
# Copyright 2017, Jelena Telenius (jelena.telenius@imm.ox.ac.uk) #
# #
# This file is part of CCseqBasic5 . #
# #
# CCseqBasic5 is free software: you can redistribute it and/or modify #
# it under the terms of the MIT license.
#
#
# #
# CCseqBasic5 is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# MIT license for more details.
# #
# You should have received a copy of the MIT license
# along with CCseqBasic5.
##########################################################################
doTrackExist(){
# NEEDS THESE TO BE SET BEFORE CALL :
#trackName=""
if [ -s "${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt" ]; then
echo -e "grep bigDataUrl ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt | grep -c \"${fileName}\$\" " > temp.command
chmod u=rwx temp.command
trackExists=$(( $(./temp.command) ))
rm -f temp.command
else
trackExists=0
fi
}
doMultiWigParent(){
# NEEDS THESE TO BE SET BEFORE CALL :
#longLabel=""
#trackName=""
#overlayType=""
#windowingFunction=""
#visibility=""
echo "" >> TEMP2_tracks.txt
echo "#--------------------------------------" >> TEMP2_tracks.txt
echo "" >> TEMP2_tracks.txt
echo "track ${trackName}" >> TEMP2_tracks.txt
echo "container multiWig" >> TEMP2_tracks.txt
echo "shortLabel ${trackName}" >> TEMP2_tracks.txt
echo "longLabel ${longLabel}" >> TEMP2_tracks.txt
echo "type bigWig" >> TEMP2_tracks.txt
echo "visibility ${visibility}" >> TEMP2_tracks.txt
echo "aggregate ${overlayType}" >> TEMP2_tracks.txt
echo "showSubtrackColorOnUi on" >> TEMP2_tracks.txt
#echo "windowingFunction maximum" >> TEMP2_tracks.txt
#echo "windowingFunction mean" >> TEMP2_tracks.txt
echo "windowingFunction ${windowingFunction}" >> TEMP2_tracks.txt
echo "configurable on" >> TEMP2_tracks.txt
echo "dragAndDrop subtracks" >> TEMP2_tracks.txt
echo "autoScale on" >> TEMP2_tracks.txt
echo "alwaysZero on" >> TEMP2_tracks.txt
echo "" >> TEMP2_tracks.txt
}
doMultiWigChild(){
# NEEDS THESE TO BE SET BEFORE CALL
# parentTrack=""
# trackName=""
# fileName=".bw"
# trackColor=""
# trackPriority=""
# bigWigSubfolder="${PublicPath}/FILTERED"
# Does this track have data file which has non-zero size?
if [ -s "${PublicPath}/${bigWigSubfolder}/${fileName}" ]; then
echo "track ${trackName}" >> TEMP2_tracks.txt
echo "parent ${parentTrack}" >> TEMP2_tracks.txt
echo "bigDataUrl ${ServerAndPath}/${bigWigSubfolder}/${fileName}" >> TEMP2_tracks.txt
# These are super long paths. using relative paths instead !
#echo "bigDataUrl ${fileName}" >> TEMP2_tracks.txt
echo "shortLabel ${trackName}" >> TEMP2_tracks.txt
echo "longLabel ${trackName}" >> TEMP2_tracks.txt
echo "type bigWig" >> TEMP2_tracks.txt
echo "color ${trackColor}" >> TEMP2_tracks.txt
echo "html http://${JamesUrl}/${Sample}_${CCversion}_description" >> TEMP2_tracks.txt
echo "priority ${trackPriority}" >> TEMP2_tracks.txt
echo "" >> TEMP2_tracks.txt
else
echo "Cannot find track ${PublicPath}/${bigWigSubfolder}/${fileName} - not writing it into ${parentTrack} track " >> "/dev/stderr"
fi
}
doOneColorChild(){
# Child name ends with the _1.bw where _1 is the number of the color.
# These get set in the analyseMappedReads.pl when bigwigs are generated
# Name being just the capture-site (REfragment) name, without the number.
# This parse works upto 99 colors
name=$(echo $file | sed 's/_.\.bw//' | sed 's/_..\.bw//')
# Number being just the color number.
number=$(echo $file | sed 's\.bw///' | sed 's/.*_//' )
echo track ${name} >> TEMP2_tracks.txt
echo parent ${parentname} >> TEMP2_tracks.txt
echo bigDataUrl ${file} >> TEMP2_tracks.txt
echo shortLabel ${name} >> TEMP2_tracks.txt
echo longLabel ${name} >> TEMP2_tracks.txt
echo type bigWig >> TEMP2_tracks.txt
echo color ${color[${number}/${#color[@]}]} >> TEMP2_tracks.txt
echo html ${Sample}_${CCversion}_description >> TEMP2_tracks.txt
echo priority ${trackPriority} >> TEMP2_tracks.txt
echo >> TEMP2_tracks.txt
}
setColors(){
# violet
color[1] = '162,57,91'
# red
color[2] = '193,28,23'
# orange
color[3] = '222,80,3'
color[4] = '226,122,29'
# yellow
color[5] = '239,206,16'
# green
color[6] = '172,214,42'
color[7] = '76,168,43'
color[8] = '34,139,34'
color[9] = '34,159,110'
# turqoise
color[10] = '32,178,170'
# blue
color[11] = '96,182,202'
color[12] = '127,145,195'
# violet
color[13] = '87,85,151'
color[14] = '80,46,114'
color[15] = '128,82,154'
color[16] = '166,112,184'
color[17] = '166,80,160'
color[18] = '166,53,140'
color[19] = '166,53,112'
}
doRegularTrack(){
# NEEDS THESE TO BE SET BEFORE CALL
# trackName=""
# longLabel=""
# fileName=".bw"
# trackColor=""
# trackPriority=""
# visibility=""
# trackType="bb" "bw"
# Is this track already written to the tracks.txt file?
doTrackExist
if [ "${trackExists}" -eq 0 ] ; then
# Does this track have data file which has non-zero size?
if [ -s "${publicPathForCCanalyser}/${fileName}" ] ; then
echo "" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "#--------------------------------------" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "track ${trackName}" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
# These are super long paths. using relative paths instead !
#echo "bigDataUrl ${ServerAndPath}/${bigWigSubfolder}/${fileName}" | sed 's/\/\//\//g' >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "bigDataUrl ${fileName}" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "shortLabel ${trackName}" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "longLabel ${longLabel}" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
if [ "${trackType}" = "bb" ] ; then
echo "type bigBed" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
# If we want item name as well.."
elif [ "${trackType}" = "bb4" ] ; then
echo "type bigBed 4" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
# Defaults to "bw"
else
echo "type bigWig" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
#echo "color ${trackColor}" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
fi
echo "color ${trackColor}" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "visibility ${visibility}" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "priority ${trackPriority}" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "windowingFunction maximum" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "autoScale on" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "alwaysZero on" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
echo "" >> ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt
else
echo "TRACK DESCRIPTION NOT CREATED - track ${trackName} does not have size in ${publicPathForCCanalyser}/${fileName}" >> "/dev/stderr"
fi
else
echo -n ""
# echo "TRACK DESCRIPTION NOT CREATED - track ${trackName} already exists in ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt"
fi
}
copyPreCCanalyserLogFilesToPublic(){
# Go into output folder..
cdCommand='cd F1_beforeCCanalyser_${Sample}_${CCversion}'
cdToThis="F1_beforeCCanalyser_${Sample}_${CCversion}"
checkCdSafety
cd F1_beforeCCanalyser_${Sample}_${CCversion}
# Making a public folder for log files
printThis="Making a public folder for log files"
printToLogFile
mkdir -p "${PublicPath}/${Sample}_logFiles"
# Copying log files
printThis="Copying log files to public folder"
printToLogFile
cp -rf READ1_fastqc_ORIGINAL "${PublicPath}/${Sample}_logFiles"
cp -rf READ2_fastqc_ORIGINAL "${PublicPath}/${Sample}_logFiles"
cp -rf READ1_fastqc_TRIMMED "${PublicPath}/${Sample}_logFiles"
cp -rf READ2_fastqc_TRIMMED "${PublicPath}/${Sample}_logFiles"
cp -rf FLASHED_fastqc "${PublicPath}/${Sample}_logFiles"
cp -rf NONFLASHED_fastqc "${PublicPath}/${Sample}_logFiles"
cp -rf FLASHED_REdig_fastqc "${PublicPath}/${Sample}_logFiles"
cp -rf NONFLASHED_REdig_fastqc "${PublicPath}/${Sample}_logFiles"
cp -f ./fastqcRuns.err "${PublicPath}/${Sample}_logFiles/fastqcRuns.log"
cp -f ./bowties.log "${PublicPath}/${Sample}_logFiles/."
cp -f ./read_trimming.log "${PublicPath}/${Sample}_logFiles/."
cp -f ./flashing.log "${PublicPath}/${Sample}_logFiles/."
cp -f ./out.hist "${PublicPath}/${Sample}_logFiles/flash.hist"
cp -f ./NONFLASHED_${REenzyme}digestion.log "${PublicPath}/${Sample}_logFiles/."
cp -f ./FLASHED_${REenzyme}digestion.log "${PublicPath}/${Sample}_logFiles/."
cat ${CapturesiteFile} | cut -f 1-4 | awk '{print $1"\tchr"$2"\t"$3"\t"$4}' > "${PublicPath}/${Sample}_logFiles/usedCapturesiteFile.txt"
cdCommand='cd ${runDir}'
cdToThis="${runDir}"
checkCdSafety
cd ${runDir}
}
copyFastqSummaryLogFilesToPublic(){
# Making a public folder for log files
printThis="Making a public folder for bam combine log files"
printToLogFile
mkdir -p "${PublicPath}/${Sample}_logFiles"
# Copying log files
printThis="Copying log files to public folder"
printToLogFile
cat ${CapturesiteFile} | cut -f 1-4 | awk '{print $1"\tchr"$2"\t"$3"\t"$4}' > "${PublicPath}/${Sample}_logFiles/usedCapturesiteFile.txt"
cp -fr ../${B_FOLDER_BASENAME}/multiqcReports "${PublicPath}/."
cp -f ../${B_FOLDER_BASENAME}/fastqRoundSuccess.log "${PublicPath}/."
cp -f ../${B_FOLDER_BASENAME}/multiQCrunlog.err "${PublicPath}/."
cp -f ../${B_FOLDER_BASENAME}/multiQCrunlog.out "${PublicPath}/."
if [ "${TILED}" -eq 1 ]; then
cp -f FLASHED_REdig_report11_${CCversion}.txt "${PublicPath}/."
cp -f NONFLASHED_REdig_report11_${CCversion}.txt "${PublicPath}/."
fi
}
copyBamCombiningLogFilesToPublic(){
# Making a public folder for log files
printThis="Making a public folder for fastq summary log files"
printToLogFile
mkdir -p "${PublicPath}"
# Copying log files
printThis="Copying log files to public folder"
printToLogFile
cp -fr ../${C_FOLDER_BASENAME}/bamlistings "${PublicPath}/."
cp -f ../${C_FOLDER_BASENAME}/bamcombineSuccess.log "${PublicPath}/."
mkdir -p "${PublicPath}/CAPTURESITEbunches"
for capturesiteTxtFile in ../CAPTURESITEbunches/DIVIDEDcapturesites/capturesitefile_sorted_BUNCH_*.txt
do
TEMPbasename=$(basename ${capturesiteTxtFile})
cat ${capturesiteTxtFile} | cut -f 1-4 | awk '{print $1"\tchr"$2"\t"$3"\t"$4}' > "${PublicPath}/CAPTURESITEbunches/${TEMPbasename}"
done
}
updateHub_part2c(){
# Link the file to each of the existing tracks..
seddedUrl=$( echo ${JamesUrl} | sed 's/\//\\\//g' )
echo "sed -i 's/alwaysZero on/alwaysZero on\nhtml http\:\/\/${seddedUrl}\/${Sample}_description/' ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_tracks.txt " > temp.command
chmod u=rwx temp.command
cat temp.command
./temp.command
rm -f temp.command
}
updateHub_part3(){
TEMPname=$( echo ${sampleForCCanalyser} | sed 's/_.*//' )
echo
if [ "${TEMPname}" == "RAW" ] || [ "${TEMPname}" == "PREfiltered" ] || [ "${TEMPname}" == "FILTERED" ] || [ "${TEMPname}" == "COMBINED" ] ; then
echo "Generated a data hub in : ${ServerAndPath}/${TEMPname}/${sampleForCCanalyser}_${CCversion}_hub.txt"
else
echo "Generated a data hub in : ${ServerAndPath}/${sampleForCCanalyser}_${CCversion}_hub.txt"
fi
echo 'How to load this hub to UCSC : http://userweb.molbiol.ox.ac.uk/public/telenius/CaptureCompendium/CCseqBasic/DOCS/HUBtutorial_AllGroups_160813.pdf'
}
updateHub_part3p(){
echo "Generated a html page in : ${ServerAndPathForPrintingOnly}/${sampleForCCanalyser}_description.html"
}
updateHub_part3final(){
echo
echo "Generated a data hub for RAW data in : ${ServerAndPath}/RAW/RAW_${Sample}_${CCversion}_hub.txt"
echo "( pre-filtered data for DEBUGGING purposes is here : ${ServerAndPath}/PREfiltered/PREfiltered_${Sample}_${CCversion}_hub.txt )"
echo "Generated a data hub for FILTERED data in : ${ServerAndPath}/FILTERED/FILTERED_${Sample}_${CCversion}_hub.txt"
echo "Generated a data hub for FILTERED flashed+nonflashed combined data in : ${ServerAndPath}/COMBINED/COMBINED_${Sample}_${CCversion}_hub.txt"
echo
echo "Generated a COMBINED data hub (of all the above) in : ${ServerAndPath}/${Sample}_${CCversion}_hub.txt"
echo 'How to load this hub to UCSC : http://userweb.molbiol.ox.ac.uk/public/telenius/CaptureCompendium/CCseqBasic/DOCS/HUBtutorial_AllGroups_160813.pdf'
}
updateHub_part3pfinal(){
echo "Generated a summary html page in : ${ServerAndPathForPrintingOnly}/${sampleForCCanalyser}_description.html"
}
updateCCanalyserReportsToPublic(){
mkdir -p ${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles
if [ -s ${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report_${CCversion}.txt ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s ${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report_${CCversion}.txt ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s ${sampleForCCanalyser}_${CCversion}/COMBINED_report_${CCversion}.txt ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/COMBINED_report_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s "${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report2_${CCversion}.txt" ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report2_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s "${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report2_${CCversion}.txt" ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report2_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s ${sampleForCCanalyser}_${CCversion}/COMBINED_report2_${CCversion}.txt ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/COMBINED_report2_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s "${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report3_${CCversion}.txt" ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report3_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s "${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report3_${CCversion}.txt" ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report3_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s ${sampleForCCanalyser}_${CCversion}/COMBINED_report3_${CCversion}.txt ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/COMBINED_report3_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s "${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report4_${CCversion}.txt" ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report4_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s "${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report4_${CCversion}.txt" ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report4_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
if [ -s ${sampleForCCanalyser}_${CCversion}/COMBINED_report4_${CCversion}.txt ]
then
cp -f "${sampleForCCanalyser}_${CCversion}/COMBINED_report4_${CCversion}.txt" "${publicPathForCCanalyser}/${sampleForCCanalyser}_logFiles/."
fi
}
updateCCanalyserDataHub(){
printThis="Updating the public folder with analysis log files.."
printToLogFile
temptime=$( date +%d%m%y )
updateCCanalyserReportsToPublic
#samForCCanalyser="F1_${Sample}_pre${CCversion}/Combined_reads_REdig.sam"
#samBasename=$( echo ${samForCCanalyser} | sed 's/.*\///' | sed 's/\_FLASHED.sam$//' | sed 's/\_NONFLASHED.sam$//' )
# Make the bigbed file from the bed file of capture-site (REfragment) coordinates and used exlusions ..
tail -n +2 "${CapturesiteFile}" | sort -T $(pwd) -k1,1 -k2,2n > tempBed.bed
bedOrigName=$( echo "${CapturesiteFile}" | sed 's/\..*//' | sed 's/.*\///' )
bedname=$( echo "${CapturesiteFile}" | sed 's/\..*//' | sed 's/.*\///' | sed 's/^/'${Sample}'_/' )
# Capturesite coordinates
tail -n +2 "${sampleForCCanalyser}_${CCversion}/${bedOrigName}.bed" | awk 'NR%2==1' | sort -T $(pwd) -k1,1 -k2,2n > tempBed.bed
bedToBigBed -type=bed9 tempBed.bed ${ucscBuild} "${sampleForCCanalyser}_${CCversion}/${bedname}_capturesite.bb"
rm -f tempBed.bed
# Exclusion fragments
tail -n +2 "${sampleForCCanalyser}_${CCversion}/${bedOrigName}.bed" | awk 'NR%2==0' | sort -T $(pwd) -k1,1 -k2,2n > tempBed.bed
bedToBigBed -type=bed9 tempBed.bed ${ucscBuild} "${sampleForCCanalyser}_${CCversion}/${bedname}_exclusion.bb"
rm -f tempBed.bed
thisLocalData="${sampleForCCanalyser}_${CCversion}"
thisLocalDataName='${sampleForCCanalyser}_${CCversion}'
isThisLocalDataParsedFineAndMineToMeddle
thisPublicFolder="${publicPathForCCanalyser}"
thisPublicFolderName='${publicPathForCCanalyser}'
isThisPublicFolderParsedFineAndMineToMeddle
mv -f "${sampleForCCanalyser}_${CCversion}/${bedname}_capturesite.bb" ${publicPathForCCanalyser}
mv -f "${sampleForCCanalyser}_${CCversion}/${bedname}_exclusion.bb" ${publicPathForCCanalyser}
fileName=$( echo ${publicPathForCCanalyser}/${bedname}_capturesite.bb | sed 's/^.*\///' )
trackName=$( echo ${fileName} | sed 's/\.bb$//' )
longLabel="${trackName}_coordinates"
trackColor="133,0,122"
trackPriority="1"
visibility="full"
trackType="bb"
doRegularTrack
fileName=$( echo ${publicPathForCCanalyser}/${bedname}_exclusion.bb | sed 's/^.*\///' )
trackName=$( echo ${fileName} | sed 's/\.bb$//' )
longLabel="${trackName}_coordinates"
trackColor="133,0,0"
trackPriority="2"
visibility="full"
trackType="bb"
doRegularTrack
# Add the missing tracks - if the hub was not generated properly in the perl..
for file in ${publicPathForCCanalyser}/*.bw
do
fileName=$( echo ${file} | sed 's/^.*\///' )
trackName=$( echo ${fileName} | sed 's/\.bw$//' )
longLabel=${trackName}
trackColor="0,0,0"
trackPriority="200"
visibility="hide"
trackType="bw"
bigWigSubfolder=${bigWigSubfolder}
doRegularTrack
done
updateHub_part2c
if [ -s ${runDir}/${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report_${CCversion}.txt ]
then
echo "##############################################"
echo "Report of FLASHED reads CCanalyser results :"
echo
cat "${runDir}/${sampleForCCanalyser}_${CCversion}/FLASHED_REdig_report_${CCversion}.txt"
echo
fi
if [ -s ${runDir}/${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report_${CCversion}.txt ]
then
echo "##############################################"
echo "Report of NONFLASHED reads CCanalyser results :"
echo
cat "${runDir}/${sampleForCCanalyser}_${CCversion}/NONFLASHED_REdig_report_${CCversion}.txt"
echo
fi
if [ -s ${runDir}/${sampleForCCanalyser}_${CCversion}/COMBINED_report_${CCversion}.txt ]
then
echo "##############################################"
echo "Report of COMBINED reads CCanalyser results :"
echo
cat "${runDir}/${sampleForCCanalyser}_${CCversion}/COMBINED_report_${CCversion}.txt"
echo
fi
updateHub_part3
}
generateSummaryCounts(){
printThis="Generating summary counts for the data.."
printToLogFile
beforeSummaryCountDir=$( pwd )
mkdir F7_summaryFigure_${Sample}_${CCversion}
cdCommand='cd F7_summaryFigure_${Sample}_${CCversion}'
cdToThis="F7_summaryFigure_${Sample}_${CCversion}"
checkCdSafety
cd F7_summaryFigure_${Sample}_${CCversion}
echo "summaryFigure is folder where the statistics summary counts (and possibly also a summary figure) is generated" > a_folder_containing_statistics_summary_counts
echo "${CapturePlotPath}/${countsFromCCanalyserScriptname} ${runDir} ${Sample} ${CCversion} ${REenzyme}" >> "/dev/stderr"
${CapturePlotPath}/${countsFromCCanalyserScriptname} ${runDir} ${Sample} ${CCversion} ${REenzyme} > counts.py
cdCommand='cd ${beforeSummaryCountDir}'
cdToThis="${beforeSummaryCountDir}"
checkCdSafety
cd ${beforeSummaryCountDir}
}
generateSummaryFigure(){
printThis="Generating summary figure for the data.."
printToLogFile
beforeSummaryCountDir=$( pwd )
cdCommand='cd F7_summaryFigure_${Sample}_${CCversion}'
cdToThis="F7_summaryFigure_${Sample}_${CCversion}"
checkCdSafety
cd F7_summaryFigure_${Sample}_${CCversion}
printThis="python ${CapturePlotPath}/${percentagesFromCountsScriptname}"
printToLogFile
python ${CapturePlotPath}/${percentagesFromCountsScriptname} > percentages.txt 2> percentages.log
printThis="python ${CapturePlotPath}/${figureFromPercentagesScriptname}"
printToLogFile
cat percentages.log
cat percentages.log >> "/dev/stderr"
python ${CapturePlotPath}/${figureFromPercentagesScriptname} 2> figure.log
cat figure.log
cat figure.log >> "/dev/stderr"
figureDimensions=$( file summary.png | sed 's/\s//g' | tr ',' '\t' | cut -f 2 | sed 's/^/width=\"/' | sed 's/x/\" height=\"/' | sed 's/$/\"/' )
cp summary.png ${publicPathForCCanalyser}/.
cp counts.py ${publicPathForCCanalyser}/counts.txt
cdCommand='cd ${beforeSummaryCountDir}'
cdToThis="${beforeSummaryCountDir}"
checkCdSafety
cd ${beforeSummaryCountDir}
}
generateCombinedDataHub(){
printThis="Updating the public folder with analysis log files.."
printToLogFile
temptime=$( date +%d%m%y )
# PINK GREEN (default)
redcolor="255,74,179"
orangecolor="255,140,0"
greencolor="62,176,145"
# Here add :
# Generate the hub itself, as well as genomes.txt
#${publicPathForCCanalyser}/${Sample}_${CCversion}_hub.txt
# cat MES0_CC2_hub.txt
#hub MES0_CC2
#shortLabel MES0_CC2
#longLabel MES0_CC2_CaptureC
#genomesFile http://userweb.molbiol.ox.ac.uk/public/mgosden/Dilut_Cap/MES0_CC2_genomes.txt
#email james.davies@trinity.ox.ac.uk
echo "hub ${Sample}_${CCversion}" > ${PublicPath}/${Sample}_${CCversion}_hub.txt
echo "shortLabel ${Sample}_${CCversion}" >> ${PublicPath}/${Sample}_${CCversion}_hub.txt
echo "longLabel ${Sample}_${CCversion}_CaptureC" >> ${PublicPath}/${Sample}_${CCversion}_hub.txt
echo "genomesFile ${ServerAndPath}/${Sample}_${CCversion}_genomes.txt" >> ${PublicPath}/${Sample}_${CCversion}_hub.txt
echo "email jelena.telenius@gmail.com" >> ${PublicPath}/${Sample}_${CCversion}_hub.txt
#${publicPathForCCanalyser}/${Sample}_${CCversion}_genomes.txt
# cat MES0_CC2_genomes.txt
#genome mm9
#trackDb http://sara.molbiol.ox.ac.uk/public/mgosden/Dilut_Cap//MES0_CC2_tracks.txt
#echo "genome ${GENOME}" > ${ServerAndPath}/${Sample}_${CCversion}_genomes.txt
#echo "trackDb ${ServerAndPath}/${Sample}_${CCversion}_tracks.txt" >> ${ServerAndPath}/${Sample}_${CCversion}_genomes.txt
echo "genome ${GENOME}" > TEMP_genomes.txt
echo "trackDb ${ServerAndPath}/${Sample}_${CCversion}_tracks.txt" >> TEMP_genomes.txt
# thisPublicFolder="${PublicPath}/${Sample}_${CCversion}"
# thisPublicFolderName='${PublicPath}/${Sample}_${CCversion}'
# isThisPublicFolderParsedFineAndMineToMeddle
# ABove : some issues when the tested thing is file not a folder. Not always, just sometimes ..
thisPublicFolder="${PublicPath}"
thisPublicFolderName='${PublicPath}'
isThisPublicFolderParsedFineAndMineToMeddle
checkThis="${Sample}_${CCversion}"
checkedName='${Sample}_${CCversion}'
checkParseEnsureNoSlashes
mv -f TEMP_genomes.txt ${PublicPath}/${Sample}_${CCversion}_genomes.txt
# Catenate the tracks.txt files to form new tracks.txt
tracksTxt="${Sample}_${CCversion}_tracks.txt"
# [telenius@deva run15]$ cat /public/telenius/capturetests/test_040316_O/test_040316_O/CC4/RAW/RAW_test_040316_O_CC4_tracks.txt | grep track
# [telenius@deva run15]$ cat /public/telenius/capturetests/test_040316_O/test_040316_O/CC4/PREfiltered/PREfiltered_test_040316_O_CC4_tracks.txt | grep track
# [telenius@deva run15]$ cat /public/telenius/capturetests/test_040316_O/test_040316_O/CC4/FILTERED/FILTERED_test_040316_O_CC4_tracks.txt | grep track
cat ${PublicPath}/RAW/RAW_${tracksTxt} ${PublicPath}/PREfiltered/PREfiltered_${tracksTxt} ${PublicPath}/FILTERED/FILTERED_${tracksTxt} > TEMP_tracks.txt
# echo
# echo
# cat TEMP_tracks.txt
# echo
# echo
# Make proper redgreen tracks based on the RAW and FILTERED tracks..
#doMultiWigParent
# NEEDS THESE TO BE SET BEFORE CALL :
#longLabel=""
#trackName=""
#overlayType=""
#windowingFunction=""
#visibility=""
#doMultiWigChild
# NEEDS THESE TO BE SET BEFORE CALL
# parentTrack=""
# trackName=""
# fileName=".bw"
# trackColor=""
# trackPriority=""
rm -f TEMP2_tracks.txt
trackList=$( cat TEMP_tracks.txt | grep track | grep RAW | sed 's/^track RAW_//' )
filenameList=$( cat TEMP_tracks.txt | grep bigDataUrl | grep RAW | sed 's/^bigDataUrl .*RAW\///' )
cat TEMP_tracks.txt | grep track | grep RAW | sed 's/^track RAW_//' | sed 's/^track win_RAW_/win_/' > TEMP_trackList.txt
cat TEMP_tracks.txt | grep bigDataUrl | grep RAW | sed 's/^bigDataUrl .*RAW\///' > TEMP_bigDataUrlList.txt
list=$( paste TEMP_trackList.txt TEMP_bigDataUrlList.txt | sed 's/\s/,/' )
echo
echo RAW track list
paste TEMP_trackList.txt TEMP_bigDataUrlList.txt
echo
rm -f TEMP_trackList.txt TEMP_bigDataUrlList.txt
for track in $list
do
echo $track
trackname=$( echo $track | sed 's/,.*//' )
filename=$( echo $track | sed 's/.*,//')
longLabel="CC_${trackname} all mapped RED, duplicate-filtered ORANGE, dupl+ploidy+blat-filtered GREEN"
trackName="${trackname}"
overlayType="solidOverlay"
windowingFunction="maximum"
visibility="hide"
doMultiWigParent
parentTrack="${trackname}"
trackName="${trackname}_raw"
fileName="${filename}"
bigWigSubfolder="RAW"
trackColor="${redcolor}"
trackPriority="100"
doMultiWigChild
done
cat TEMP_tracks.txt | grep track | grep PREfiltered | sed 's/^track PREfiltered_//' | sed 's/^track win_PREfiltered_/win_/' > TEMP_trackList.txt
cat TEMP_tracks.txt | grep bigDataUrl | grep PREfiltered| sed 's/^bigDataUrl .*PREfiltered\///' > TEMP_bigDataUrlList.txt
list=$( paste TEMP_trackList.txt TEMP_bigDataUrlList.txt | sed 's/\s/,/' )
echo
echo PREfiltered track list
paste TEMP_trackList.txt TEMP_bigDataUrlList.txt
echo
rm -f TEMP_trackList.txt TEMP_bigDataUrlList.txt
for track in $list
do
echo $track
trackname=$( echo $track | sed 's/,.*//')
filename=$( echo $track | sed 's/.*,//')
parentTrack="${trackname}"
trackName="${trackname}_PREfiltered"
fileName="${filename}"
bigWigSubfolder="PREfiltered"
trackColor="${orangecolor}"
trackPriority="110"
doMultiWigChild
done
cat TEMP_tracks.txt | grep track | grep FILTERED | sed 's/^track FILTERED_//' | sed 's/^track win_FILTERED_/win_/' > TEMP_trackList.txt
cat TEMP_tracks.txt | grep bigDataUrl | grep FILTERED | sed 's/^bigDataUrl .*FILTERED\///' > TEMP_bigDataUrlList.txt
list=$( paste TEMP_trackList.txt TEMP_bigDataUrlList.txt | sed 's/\s/,/' )
echo
echo FILTERED track list
paste TEMP_trackList.txt TEMP_bigDataUrlList.txt
echo
rm -f TEMP_trackList.txt TEMP_bigDataUrlList.txt
for track in $list
do
echo $track
trackname=$( echo $track | sed 's/,.*//')
filename=$( echo $track | sed 's/.*,//')
parentTrack="${trackname}"
trackName="${trackname}_filtered"
fileName="${filename}"
bigWigSubfolder="FILTERED"
trackColor="${greencolor}"
trackPriority="120"
doMultiWigChild
done
rm -f TEMP_tracks.txt
# Adding the combined files and the capture-site (REfragment) tracks
# Here used to be also sed 's/visibility hide/visibility full/' : to set only the COMBINED tracks visible.
# As multi-capture samples grep more frequent, this was taken out of the commands below.
cat ${PublicPath}/COMBINED/COMBINED_${tracksTxt} | sed 's/color 0,0,0/color '"${greencolor}"'/' \
| sed 's/priority 200/windowingFunction maximum\npriority 10/' \
| sed 's/bigDataUrl .*COMBINED\//bigDataUrl COMBINED\//' | grep -v "^html" \
> TEMP3_tracks.txt
cp ${PublicPath}/COMBINED/*.bb ${PublicPath}/.
cat TEMP2_tracks.txt TEMP3_tracks.txt > TEMP4_tracks.txt
rm -f TEMP2_tracks.txt TEMP3_tracks.txt
# Move over..
thisPublicFolder="${PublicPath}"
thisPublicFolderName='${PublicPath}'
isThisPublicFolderParsedFineAndMineToMeddle
checkThis="${tracksTxt}"
checkedName='${tracksTxt}'
checkParseEnsureNoSlashes
# SOme issues when the below is a file not a folder.
# thisPublicFolder="${PublicPath}/${tracksTxt}"
# thisPublicFolderName='${PublicPath}/${tracksTxt}'
# isThisPublicFolderParsedFineAndMineToMeddle
mv -f TEMP4_tracks.txt ${PublicPath}/${tracksTxt}
# Adding the bigbed track for BLAT-filter-marked RE-fragments (if there were any) :
if [ -s filteringLogFor_PREfiltered_${Sample}_${CCversion}/BlatPloidyFilterRun/BLAT_PLOIDY_FILTERED_OUTPUT/blatFilterMarkedREfragments.bed ]; then
cat filteringLogFor_PREfiltered_${Sample}_${CCversion}/BlatPloidyFilterRun/BLAT_PLOIDY_FILTERED_OUTPUT/blatFilterMarkedREfragments.bed | sort -T $(pwd) -k1,1 -k2,2n > tempBed.bed
bedToBigBed -type=bed4 tempBed.bed ${ucscBuild} ${sampleForCCanalyser}_${CCversion}_blatFilterMarkedREfragments.bb
rm -f tempBed.bed
thisLocalData="${sampleForCCanalyser}_${CCversion}_blatFilterMarkedREfragments.bb"
thisLocalDataName='${sampleForCCanalyser}_${CCversion}_blatFilterMarkedREfragments.bb'
isThisLocalDataParsedFineAndMineToMeddle
thisPublicFolder="${publicPathForCCanalyser}"
thisPublicFolderName='${publicPathForCCanalyser}'
isThisPublicFolderParsedFineAndMineToMeddle
mv -f ${sampleForCCanalyser}_${CCversion}_blatFilterMarkedREfragments.bb ${publicPathForCCanalyser}
fileName=$( echo ${publicPathForCCanalyser}/${sampleForCCanalyser}_${CCversion}_blatFilterMarkedREfragments.bb | sed 's/^.*\///' )
trackName=$( echo ${fileName} | sed 's/\.bb$//' )
longLabel="${trackName}"
trackColor="133,0,122"
trackPriority="1"
visibility="pack"
trackType="bb4"
bigWigSubfolder=""
doRegularTrack
fi
writeDescriptionHtml
# Moving the description file
thisLocalData="${sampleForCCanalyser}_description.html"
thisLocalDataName='${sampleForCCanalyser}_description.html'
isThisLocalDataParsedFineAndMineToMeddle
thisPublicFolder="${publicPathForCCanalyser}"
thisPublicFolderName='${publicPathForCCanalyser}'
isThisPublicFolderParsedFineAndMineToMeddle
mv -f "${sampleForCCanalyser}_description.html" "${publicPathForCCanalyser}/."
updateHub_part2c
updateHub_part3
}
writeBeginningOfDescription(){
# Write the beginning of the html file
echo "<!DOCTYPE HTML PUBLIC -//W3C//DTD HTML 4.01//EN" > begin.html
echo "http://www.w3.org/TR/html4/strict.dtd" >> begin.html
echo ">" >> begin.html
echo " <html lang=en>" >> begin.html
echo " <head>" >> begin.html
echo " <title> ${hubNameList[0]} data hub in ${GENOME} </title>" >> begin.html
echo " </head>" >> begin.html
echo " <body>" >> begin.html
# Generating TimeStamp
TimeStamp=($( date | sed 's/[: ]/_/g' ))
DateTime="$(date)"
echo "<p>Data produced ${DateTime} with CapC pipeline (coded by James Davies, pipelined by Jelena Telenius, located in ${CapturePipePath} )</p>" > temp_description.html
echo "<hr />" >> temp_description.html
echo "Restriction enzyme and genome build : ( ${REenzyme} ) ( ${GENOME} )" >> temp_description.html
echo "<hr />" >> temp_description.html
echo "Capturesite coordinates given to the run : <br>" >> temp_description.html
echo "<a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/usedCapturesiteFile.txt\" >${Sample}_usedCapturesiteFile.txt</a>" >> temp_description.html
echo "<hr />" >> temp_description.html
# echo "<p>User manual - to understand the pipeline and the output : <a target="_blank" href=\"http://sara.molbiol.ox.ac.uk/public/jdavies/MANUAL_for_pipe/PipeUserManual.pdf\" >CapturePipeUserManual.pdf</a></p>" >> temp_description.html
echo "<hr />" >> temp_description.html
echo "<p>Data located in : $(pwd)</p>" >> temp_description.html
echo "<p>Sample name : ${Sample}, containing fastq files : <br>" >> temp_description.html
if [ -s ../PIPE_fastqPaths.txt ]; then
# Parallel runs will find this :
echo "<pre>" >> temp_description.html
cat ../PIPE_fastqPaths.txt | sed 's/\s/\n/g' >> temp_description.html
echo "</pre>" >> temp_description.html
else
# Serial runs will find these :
echo "${Read1} and ${Read2}" >> temp_description.html
fi
echo "</p>" >> temp_description.html
# These have to be listed for all runs (RAW and PREfiltered and FILTERED)
# Summary figure and report ------------------------------------------------------------
echo "<hr />" >> temp_description.html
echo "<img" >> temp_description.html
echo "src=${ServerAndPath}/summary.png" >> temp_description.html
echo "${figureDimensions}" >> temp_description.html
echo "alt=\"Summary of the analysis\"/>" >> temp_description.html
echo "<hr />" >> temp_description.html
echo "<li>Above figure as READ COUNTS in text file :<a target="_blank" href=\"${ServerAndPath}/counts.txt\" >readCounts.txt</a></li>" >> temp_description.html
echo "<hr />" >> temp_description.html
if [ "${onlyFastqPartHtmls}" -ne 1 ]; then
echo "<li><b>Final counts summary files</b> :" >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/COMBINED/COMBINED_${Sample}_logFiles/COMBINED_report4_${CCversion}.txt\" >Final REPORTER counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/COMBINED/COMBINED_${Sample}_logFiles/COMBINED_report3_${CCversion}.txt\" >Final counts</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<hr />" >> temp_description.html
fi
# [telenius@deva CC4]$ ls RAW/RAW_test_040316_J_logFiles/
# FLASHED_REdig_report_CC4.txt NONFLASHED_REdig_report_CC4.txt
# [telenius@deva CC4]$ ls PREfiltered/PREfiltered_test_040316_J_logFiles/
# FLASHED_REdig_report2_CC4.txt FLASHED_REdig_report_CC4.txt NONFLASHED_REdig_report2_CC4.txt NONFLASHED_REdig_report_CC4.txt
}
writeFastqwiseReports(){
# FASTQ reports ------------------------------------------------------------
echo "<h4>FASTQC results here : </h4>" >> temp_description.html
echo "<li>FastQC results (untrimmed) : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/READ1_fastqc_ORIGINAL/fastqc_report.html\" >READ1_fastqc_ORIGINAL/fastqc_report.html</a> , and " >> temp_description.html
echo " <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/READ2_fastqc_ORIGINAL/fastqc_report.html\" >READ2_fastqc_ORIGINAL/fastqc_report.html</a> </li>" >> temp_description.html
echo "<li>FastQC results (trimmed) : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/READ1_fastqc_TRIMMED/fastqc_report.html\" >READ1_fastqc_TRIMMED/fastqc_report.html</a> , and " >> temp_description.html
echo " <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/READ2_fastqc_TRIMMED/fastqc_report.html\" >READ2_fastqc_TRIMMED/fastqc_report.html</a> </li>" >> temp_description.html
echo "<li>FastQC results (flash-combined) - before RE digestion: <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/FLASHED_fastqc/fastqc_report.html\" >FLASHED_fastqc/fastqc_report.html</a> </li>" >> temp_description.html
echo "<li>FastQC results (non-flash-combined) - before RE digestion: <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/NONFLASHED_fastqc/fastqc_report.html\" >NONFLASHED_fastqc/fastqc_report.html</a> </li>" >> temp_description.html
echo "<li>FastQC results (flash-combined) - after RE digestion: <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/FLASHED_REdig_fastqc/fastqc_report.html\" >FLASHED_REdig_fastqc/fastqc_report.html</a> </li>" >> temp_description.html
echo "<li>FastQC results (non-flash-combined) - after RE digestion: <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/NONFLASHED_REdig_fastqc/fastqc_report.html\" >NONFLASHED_REdig_fastqc/fastqc_report.html</a> </li>" >> temp_description.html
echo "<p>" >> temp_description.html
echo "FastQC run error logs : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/fastqcRuns.log\" >fastqcRuns.log</a>" >> temp_description.html
echo "</p>" >> temp_description.html
echo "<hr />" >> temp_description.html
# TRIMMING FLASHING RE-DIGESTION reports ------------------------------------------------------------
echo "<h4>Trimming/flashing/RE-digestion/mapping log files here : </h4>" >> temp_description.html
echo "<li>Harsh trim_galore trim : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/read_trimming.log\" >read_trimming.log</a> </li>" >> temp_description.html
echo "<li>Flashing : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/flashing.log\" >flashing.log</a> </li>" >> temp_description.html
echo "<li>Histogram of flashed reads : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/flash.hist\" >flash.hist</a> </li>" >> temp_description.html
echo "<li>RE digestion for of flash-combined reads : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/FLASHED_${REenzyme}digestion.log\" >FLASHED_${REenzyme}digestion.log</a> </li>" >> temp_description.html
echo "<li>RE digestion for of non-flash-combined reads : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/NONFLASHED_${REenzyme}digestion.log\" >NONFLASHED_${REenzyme}digestion.log</a> </li>" >> temp_description.html
echo "<li>Bowtie mapping for flashed and non-flash-combined reads : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/bowties.log\" >bowties.log</a> </li>" >> temp_description.html
echo "<hr />" >> temp_description.html
}
writeBamcombineReports(){
# BAM combining logs ------------------------------------------------------------
echo "<h4>Capturesite bunches here : </h4>" >> temp_description.html
for capturesiteTxtFile in ${publicPathForCCanalyser}/CAPTURESITEbunches/capturesitefile_sorted_BUNCH_*.txt
do
TEMPbasename=$(basename ${capturesiteTxtFile})
echo "<li><a target="_blank" href=\"CAPTURESITEbunches/${TEMPbasename}\" > ${TEMPbasename} </a></li> " >> temp_description.html
done
echo "<hr />" >> temp_description.html
echo "<h4>BAM combining logs here : </h4>" >> temp_description.html
for bamTxtFile in ${publicPathForCCanalyser}/bamlistings/bamlisting_FLASHED_BUNCH_*.txt
do
TEMPbasename=$(basename ${bamTxtFile})
echo "<li><a target="_blank" href=\"bamlistings/${TEMPbasename}\" > ${TEMPbasename} </a></li> " >> temp_description.html
done
echo "<br>" >> temp_description.html
for bamTxtFile in ${publicPathForCCanalyser}/bamlistings/bamlisting_NONFLASHED_BUNCH_*.txt
do
TEMPbasename=$(basename ${bamTxtFile})
echo "<li><a target="_blank" href=\"bamlistings/${TEMPbasename}\" > ${TEMPbasename} </a></li> " >> temp_description.html
done
echo "<br>" >> temp_description.html
echo "<li><a target="_blank" href=\"bamcombineSuccess.log\" >bamcombineSuccess.log</a></li> " >> temp_description.html
echo "<hr />" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<br>" >> temp_description.html
}
writeQsubOutToDescription(){
echo "<li><b>Final counts summary files</b> (these bam counts will enter further analysis) :" >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/FLASHED_REdig_report11_${CCversion}.txt\" >FLASHED bam counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/NONFLASHED_REdig_report11_${CCversion}.txt\" >NONFLASHED bam counts</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<hr />" >> temp_description.html
echo "Run log files available in : <a target="_blank" href=\"${ServerAndPath}/qsub.out\" >qsub.out</a> , and <a target="_blank" href=\"${ServerAndPath}/qsub.err\" >qsub.err</a>" >> temp_description.html
echo "<br>" >> temp_description.html
}
writeBunchDivisionpartsOfDescription(){
# CC-analyser reports ------------------------------------------------------------
echo "Run log files available in : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/${Sample}_$(basename ${QSUBOUTFILE})\" >${QSUBOUTFILE}</a> , and <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/${Sample}_$(basename ${QSUBERRFILE})\" >${QSUBERRFILE}</a>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<hr />" >> temp_description.html
echo "<b>Preliminary tile-wise counters : </b>" >> temp_description.html
echo "- from CCanalyser capture-site (REfragment)-bunch-wise division reports (no duplicate filtering)" >> temp_description.html
echo "<li>Flashed reads " : >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW_${Sample}_logFiles/FLASHED_REdig_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<li>Non-flashed reads : " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW_${Sample}_logFiles/NONFLASHED_REdig_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW_${Sample}_logFiles/NONFLASHED_REdig_report2_${CCversion}.txt\" >RE cut report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<hr />" >> temp_description.html
}
writeCCanalysispartsOfDescription(){
# CC-analyser reports ------------------------------------------------------------
echo "<h4>Step-by-step analysis reports (in \"chronological order\" ) : </h4>" >> temp_description.html
echo "Run log files available in : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/${Sample}_${QSUBOUTFILE}\" >${QSUBOUTFILE}</a> , and <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/${Sample}_${QSUBERRFILE}\" >${QSUBERRFILE}</a>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<b>RE digestion of flash-combined reads</b> - throws out all reads with no RE cut : <a target="_blank" href=\"${ServerAndPath}/${Sample}_logFiles/${Sample}_FLASHED_${REenzyme}digestion.log\" >FLASHED_${REenzyme}digestion.log</a> " >> temp_description.html
echo "<br>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<b>Red graph</b> - CCanalyser results with duplicate filter turned OFF (before blat+ploidy filter) " >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><b>Red graph, flashed reads</b> Capture script log files : <br>"
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW/RAW_${Sample}_logFiles/FLASHED_REdig_report4_${CCversion}.txt\" >Final REPORTER counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW/RAW_${Sample}_logFiles/FLASHED_REdig_report3_${CCversion}.txt\" >Final counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW/RAW_${Sample}_logFiles/FLASHED_REdig_report2_${CCversion}.txt\" >RE cut report</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW/RAW_${Sample}_logFiles/FLASHED_REdig_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<li><b>Red graph, non-flashed reads</b> Capture script log files : <br>" >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW/RAW_${Sample}_logFiles/NONFLASHED_REdig_report4_${CCversion}.txt\" >Final REPORTER counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW/RAW_${Sample}_logFiles/NONFLASHED_REdig_report3_${CCversion}.txt\" >Final counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW/RAW_${Sample}_logFiles/NONFLASHED_REdig_report2_${CCversion}.txt\" >RE cut report</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/RAW/RAW_${Sample}_logFiles/NONFLASHED_REdig_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<b>Orange graph</b> - CCanalyser results with duplicate filter turned ON (before blat+ploidy filter)" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><b>Orange graph, flashed reads</b> Capture script log files : <br>" >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/PREfiltered/PREfiltered_${Sample}_logFiles/FLASHED_REdig_report4_${CCversion}.txt\" >Final REPORTER counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/PREfiltered/PREfiltered_${Sample}_logFiles/FLASHED_REdig_report3_${CCversion}.txt\" >Final counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/PREfiltered/PREfiltered_${Sample}_logFiles/FLASHED_REdig_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<li><b>Orange graph, non-flashed reads</b> Capture script log files : <br>" >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/PREfiltered/PREfiltered_${Sample}_logFiles/NONFLASHED_REdig_report4_${CCversion}.txt\" >Final REPORTER counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/PREfiltered/PREfiltered_${Sample}_logFiles/NONFLASHED_REdig_report3_${CCversion}.txt\" >Final counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/PREfiltered/PREfiltered_${Sample}_logFiles/NONFLASHED_REdig_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<li><b>Non-flashed reads coordinate adjustment in duplicate filter</b> : <a target="_blank" href=\"${ServerAndPath}/PREfiltered/PREfiltered_${Sample}_logFiles/NONFLASHED_REdig_report2_${CCversion}.txt\" >dupl_filtered_nonflashed_report2_${CCversion}.txt</a> </li>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "Ploidy+Blat-filtering log file : <a target="_blank" href=\"${ServerAndPath}/PREfiltered/filtering.log\" >filtering.log</a>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<b>Green graph</b> - CCanalyser results for filtered data (duplicate, blat, ploidy filtered)" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><b>Green graph in flashed reads overlay track</b> Capture script log files : <br>" >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/FILTERED/FILTERED_${Sample}_logFiles/FLASHED_REdig_report4_${CCversion}.txt\" >Final REPORTER counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/FILTERED/FILTERED_${Sample}_logFiles/FLASHED_REdig_report3_${CCversion}.txt\" >Final counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/FILTERED/FILTERED_${Sample}_logFiles/FLASHED_REdig_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<li><b>Green graph in non-flashed reads overlay track</b> Capture script log files : <br>" >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/FILTERED/FILTERED_${Sample}_logFiles/NONFLASHED_REdig_report4_${CCversion}.txt\" >Final REPORTER counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/FILTERED/FILTERED_${Sample}_logFiles/NONFLASHED_REdig_report3_${CCversion}.txt\" >Final counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/FILTERED/FILTERED_${Sample}_logFiles/NONFLASHED_REdig_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><b>Green graph, combined flashed and nonflashed filtered reads</b> Capture script log files : <br>" >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/COMBINED/COMBINED_${Sample}_logFiles/COMBINED_report4_${CCversion}.txt\" >Final REPORTER counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/COMBINED/COMBINED_${Sample}_logFiles/COMBINED_report3_${CCversion}.txt\" >Final counts</a> ) " >> temp_description.html
echo " ( <a target="_blank" href=\"${ServerAndPath}/COMBINED/COMBINED_${Sample}_logFiles/COMBINED_report_${CCversion}.txt\" >FULL report</a> ) " >> temp_description.html
echo "</li>" >> temp_description.html
}
writeEachFastqHtmlAddress(){
echo "<hr />" >> temp_description.html
echo "<b>Multi-QC reports</b> here :" >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><a target="_blank" href=\"multiqcReports/ORIGINAL_READ1_report.html\" >ORIGINAL_READ1_report.html</a></li> " >> temp_description.html
echo "<li><a target="_blank" href=\"multiqcReports/ORIGINAL_READ2_report.html\" >ORIGINAL_READ2_report.html</a></li> " >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><a target="_blank" href=\"multiqcReports/TRIMMED_READ1_report.html\" >TRIMMED_READ1_report.html</a></li> " >> temp_description.html
echo "<li><a target="_blank" href=\"multiqcReports/TRIMMED_READ2_report.html\" >TRIMMED_READ2_report.html</a></li> " >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><a target="_blank" href=\"multiqcReports/FLASHED_report.html\" >FLASHED_report.html</a></li> " >> temp_description.html
echo "<li><a target="_blank" href=\"multiqcReports/NONFLASHED_report.html\" >NONFLASHED_report.html</a></li> " >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><a target="_blank" href=\"multiqcReports/NONFLASHED_REdig_report.html\" >NONFLASHED_REdig_report.html</a></li> " >> temp_description.html
echo "<li><a target="_blank" href=\"multiqcReports/FLASHED_REdig_report.html\" >FLASHED_REdig_report.html</a></li> " >> temp_description.html
echo "<br>" >> temp_description.html
echo "<li><a target="_blank" href=\"multiQCrunlog.out\" >multiQCrunlog.out</a></li> " >> temp_description.html
echo "<li><a target="_blank" href=\"multiQCrunlog.err\" >multiQCrunlog.err</a></li> " >> temp_description.html
echo "<hr />" >> temp_description.html
echo "<b>Fastq-wise reports</b> here :" >> temp_description.html
echo "<br>" >> temp_description.html
for htmlPage in ${publicPathForCCanalyser}/fastqWise/fastq_*/${Sample}_description.html
do
TEMPbasename=$(basename $(dirname ${htmlPage}))
echo "<li><a target="_blank" href=\"fastqWise/${TEMPbasename}/${Sample}_description.html\" > ${TEMPbasename} </a></li> " >> temp_description.html
done
echo "<br>" >> temp_description.html
echo "<li><a target="_blank" href=\"fastqRoundSuccess.log\" >fastqRoundSuccess.log</a></li> " >> temp_description.html
echo "<hr />" >> temp_description.html
}
writeEndOfDescription(){
echo "<hr />" >> temp_description.html
echo "</body>" > end.html
echo "</html>" >> end.html
cat begin.html temp_description.html end.html > "${sampleForCCanalyser}_description.html"
rm -f begin.html temp_description.html end.html
}
writeDescriptionHtml(){
printThis="Writing the description html-document"
printToLogFile
writeBeginningOfDescription
writeFastqwiseReports
writeCCanalysispartsOfDescription
writeEndOfDescription
}
writeDescriptionHtmlFastqonly(){
printThis="Writing the description html-document (for single fastq)"
printToLogFile
writeBeginningOfDescription
writeBunchDivisionpartsOfDescription
writeFastqwiseReports
writeEndOfDescription
}
writeDescriptionHtmlParallelFastqcombo(){
printThis="Writing the description html-document (for summary of all fastqs)"
printToLogFile
writeBeginningOfDescription
writeQsubOutToDescription
writeEachFastqHtmlAddress
writeBamcombineReports
writeEndOfDescription
}
generateFastqwiseDescriptionpage(){
onlyFastqPartHtmls=1
writeDescriptionHtmlFastqonly
# Moving the description file
thisLocalData="${sampleForCCanalyser}_description.html"
thisLocalDataName='${sampleForCCanalyser}_description.html'
isThisLocalDataParsedFineAndMineToMeddle
thisPublicFolder="${publicPathForCCanalyser}"
thisPublicFolderName='${publicPathForCCanalyser}'
isThisPublicFolderParsedFineAndMineToMeddle
mv -f "${sampleForCCanalyser}_description.html" "${publicPathForCCanalyser}/."
updateHub_part3p
}
generateCombinedFastqonlyDescriptionpage(){
onlyFastqPartHtmls=1
writeDescriptionHtmlParallelFastqcombo
# Moving the description file
thisLocalData="${sampleForCCanalyser}_description.html"
thisLocalDataName='${sampleForCCanalyser}_description.html'
isThisLocalDataParsedFineAndMineToMeddle
thisPublicFolder="${publicPathForCCanalyser}"
thisPublicFolderName='${publicPathForCCanalyser}'
isThisPublicFolderParsedFineAndMineToMeddle
mv -f "${sampleForCCanalyser}_description.html" "${publicPathForCCanalyser}/."
}
onlyFastqPartHtmls=0
| true
|
63dd52d18f217784e74b3b59db2fef72c6c43604
|
Shell
|
iPlantCollaborativeOpenSource/crushbone
|
/src/09_pip_install_atmo_requirements.sh
|
UTF-8
| 919
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash -x
PS4='$(date "+%s.%N ($LINENO) + ")'
##############################
# Usage
#
# LOCATIONOFATMOSPHERE -$1
# VIRTUAL_ENV_ATMOSPHERE - $2
##############################
this_filename=$(basename $BASH_SOURCE)
output_for_logs="logs/$this_filename.log"
touch $output_for_logs
main(){
LOCATIONOFATMOSPHERE=$1
VIRTUAL_ENV_ATMOSPHERE=$2
JENKINS=$3
TEST=$4
"$VIRTUAL_ENV_ATMOSPHERE/bin/pip" install -U -r "$LOCATIONOFATMOSPHERE/requirements.txt" >> $output_for_logs
if [[ "$JENKINS" = "true" || "$TEST" = "true" ]]; then
# Required to run jenkins OR a test
"$VIRTUAL_ENV_ATMOSPHERE/bin/pip" install -U -r "$LOCATIONOFATMOSPHERE/dev_requirements.txt" >> $output_for_logs
fi
"$VIRTUAL_ENV_ATMOSPHERE/bin/wheel" install-scripts celery
}
if [ "$#" -ne 4 ]; then
echo "Illegal number of parameters" 2>> $output_for_logs
echo $@ 2> $output_for_logs
exit 01
else
main "$@"
fi
| true
|
3030ba291a5e83e070de4e558959f482627be97e
|
Shell
|
iuri/docker
|
/po5-debian/src/install.sh
|
UTF-8
| 7,407
| 3.390625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh -e
# installation script for po5-debian 9
# See the README file for details
# Copyright (c) 2018 Iuri Sampaio <iuri@iurix.com>
# This code is provided under the GNU GPL licenses.
set +e
# comment out to debug
#set -x
# ----------------------------------------------------------------------------------------------
# Script to install ]project-open[ V5.0 in a Docker image for Denbian 9
# ----------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------
# Enable systemd
# ----------------------------------------------------------------------------------------------
(
cd /lib/systemd/system/sysinit.target.wants/
for i in *; do
[ "$i" == systemd-tmpfiles-setup.service ] || rm -f "$i"
done
)
rm -f /lib/systemd/system/multi-user.target.wants/*
rm -f /etc/systemd/system/*.wants/*
rm -f /lib/systemd/system/local-fs.target.wants/*
rm -f /lib/systemd/system/sockets.target.wants/*udev*
rm -f /lib/systemd/system/sockets.target.wants/*initctl*
rm -f /lib/systemd/system/basic.target.wants/*
# rm -f /lib/systemd/system/anaconda.target.wants/*
# ----------------------------------------------------------------------------------------------
# Update repository info and installed packages
# (maybe the base image was created long time ago)
# ----------------------------------------------------------------------------------------------
apt-get -y update
# ----------------------------------------------------------------------------------------------
# Install packages required by project-open
# ----------------------------------------------------------------------------------------------
# basic administration tools required by project-open
ln -s /usr/bin/dot /usr/local/bin/dot
apt-get install -y net-tools git-core emacs unzip zip make nginx ldap-utils ghostscript gsfonts imagemagick graphviz libreoffice-writer libreoffice-draw libreoffice-java-common wget cvs htmldoc
# ----------------------------------------------------------------------------------------------
# Create user projop
# ----------------------------------------------------------------------------------------------
# super-directory for all Web servers /web/ by default
mkdir -p /web && mkdir -p /web/projop
# create a group called "projop"
groupadd projop
# create user "projop" with home directory /web/projop
useradd -d /web/projop -g projop -m -s /bin/bash projop
# assign ownership to projop user on projop directory
chown -R projop:projop /web/projop/
# ----------------------------------------------------------------------------------------------
# Install and setup PostgreSQL 9.2
# ----------------------------------------------------------------------------------------------
# install the required packages
# postgresql-libs will be automatically installed
apt-get install -y libpq5 postgresql-9.6 postgresql-client-9.6 postgresql-client-common postgresql-common postgresql-contrib-9.6 ssl-cert postgresql-doc postgresql-doc-9.6 libdbd-pg-perl libdbi-perl sysstat
su - postgres <<_EOT_
# initialize the database cluster
/usr/bin/pg_ctl -D "/var/lib/pgsql/data" -l /var/lib/pgsql/initdb.log initdb
# Enable remote connections
echo "host all all 0.0.0.0/0 md5" >> /var/lib/pgsql/data/pg_hba.conf
echo "listen_addresses='*'" >> /var/lib/pgsql/data/postgresql.conf
# start the database
/usr/bin/pg_ctl -D "/var/lib/pgsql/data" -l /var/lib/pgsql/install.log start
sleep 2
# database user "projop" with admin rights
createuser -s projop
_EOT_
# ----------------------------------------------------------------------------------------------
# Initialize the projop database
# ----------------------------------------------------------------------------------------------
su - projop <<_EOT_
createdb --encoding=utf8 --owner=projop projop
_EOT_
# Configure PostgreSQL start with server start
systemctl enable postgresql
# ----------------------------------------------------------------------------------------------
# Install the naviserver distribution
# ----------------------------------------------------------------------------------------------
cd /usr/local
# extract the NaviServer binary 64 bit
tar xzf /usr/src/projop/naviserver-4.99.8.tgz
# fix ownerships and permissions
chown -R root:projop /usr/local/ns
chown nobody:projop /usr/local/ns/logs
find /usr/local/ns -type f -exec chmod 0664 {} \;
chmod 0775 /usr/local/ns/bin/*
# ----------------------------------------------------------------------------------------------
# Setup the /web/projop folder
# ----------------------------------------------------------------------------------------------
su - projop <<_EOT_
cd /web/projop
# extract auxiliary files
tar xzf /usr/src/projop/web_projop-aux-files.5.0.0.0.0.tgz
# extract the ]po[ product source code - latest
tar xzf /usr/src/projop/project-open-Update-5.0.2.4.0.tgz
# enable PlPg/SQL, may already be installed
#createlang plpgsql projop
psql -f /web/projop/pg_dump.5.0.2.4.0.sql > /web/projop/import.log 2>&1
_EOT_
# ----------------------------------------------------------------------------------------------
# Automate NaviServer Startup
# ----------------------------------------------------------------------------------------------
cp /usr/src/projop/projop.service /usr/lib/systemd/system
systemctl enable projop.service
# ----------------------------------------------------------------------------------------------
# Configure Automatic Backups
# ----------------------------------------------------------------------------------------------
# install the backup script in /root/bin/export-dbs
mkdir /root/bin
cp /usr/src/projop/export-dbs /root/bin
chmod ug+x /root/bin/export-dbs
# create a /var/backup directory for ]project-open[ database backups
mkdir /var/backup
chown projop:postgres /var/backup
chmod g+w /var/backup
# create a /var/log/postgres directory for ]project-open[ database related logs
mkdir /var/log/postgres
chown postgres:postgres /var/log/postgres
chmod g+w /var/log/postgres
# install a cron script to automate the backups
cp /usr/src/projop/crontab /root/bin
crontab /root/bin/crontab
# ----------------------------------------------------------------------------------------------
# Setup the directory to hold all the persistent data
# ----------------------------------------------------------------------------------------------
PODATA=/var/lib/docker-projop
mkdir "$PODATA"
mkdir "$PODATA/runtime"
setup_path() {
local src dst dir
dst="$PODATA/runtime/$1"
dir=$( dirname "$dst" )
src="$2"
mkdir -p "$dir"
mv "$src" "$dst"
ln -s "$dst" "$src"
}
# project-open related volumes (configuration, filestorage and logs)
setup_path projop/etc /web/projop/etc
setup_path projop/filestorage /web/projop/filestorage
setup_path projop/log /web/projop/log
# PostgreSQL related volumes (everything contained in a single path)
setup_path postgresql /var/lib/pgsql
# Automatic backups related volumes (backup databases and logs)
setup_path backups/data /var/backup
setup_path backups/logs /var/log/postgres
# Save the initial persistent data
touch "$PODATA/runtime/.initialized"
tar -czf "$PODATA/initial.tar.gz" -C "$PODATA/runtime" .
# The docker documentation says that, on Windows platforms,
# mounted volumes must be non-existent or empty
rm -rf "$PODATA/runtime"
mkdir "$PODATA/runtime"
| true
|
817eb1e04f07f5a3678f9f74e6d81efbe19299a7
|
Shell
|
weizhanguio/ABM_results
|
/Hexagon/Optimized-ABM/MPI/mpi.pbs
|
UTF-8
| 883
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Give the job a name
#PBS -N "32procs"
#
# Specify the project the job belongs to
#PBS -A nn2849k
#
# We want 60 hours on 512 cpu's (cores):
#PBS -l walltime=5:00:00
#
#
# Send me an email on a=abort, b=begin, e=end
#
#
# Use this email address (check that it is correct):
#
#
# Write the standard output of the job to file 'mpijob.out' (optional)
#
#
# Write the standard error of the job to file 'mpijob.err' (optional)
#
#
# Make sure I am in the correct directory
#PBS -l mppwidth=32
cd /work/weizhang/ABM/Optimized-ABM/MPI
#aprun -B -cc 0,10 ./app 0.1 200000
#aprun -B -cc 0,9 ./app 0.1 20000
#aprun -B -cc 0,1,2,3,10,11,12,13 ./app 0.1 200000
#aprun -B -cc 0,4,8,12,16,20,24,28 ./app 0.1 200000
#aprun -B -cc 0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30 ./app 0.1 200000
aprun -B -cc 0-31 ./app 0.1 200000
| true
|
6f5a48e0c5c8f91760a3d81bcba22cc5edc4f79f
|
Shell
|
omertuc/rhods-entitled-mirror
|
/build.sh
|
UTF-8
| 311
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
set -euxo pipefail
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
pushd "${SCRIPT_DIR}"
podman build -f Containerfile.sync containers/sync/ -t quay.io/otuchfel/rhods-mirror-sync:latest
podman build -f Containerfile.serve containers/serve/ -t quay.io/otuchfel/rhods-mirror-serve:latest
echo "Done"
| true
|
8e24b6de22b34864cd7e4e82fcdc1752ada45f68
|
Shell
|
pkorduan/kvwmap-server
|
/config/config-default
|
UTF-8
| 896
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#settings
DEBUG=false
OS_USER="gisadmin"
OS_USER_EXISTS=true
getent passwd $OS_USER >/dev/null 2>&1 && OS_USER_EXISTS=true
USER_DIR=/home/$OS_USER
SCRIPT=$(readlink -f "$0")
SCRIPTPATH=$(dirname "$SCRIPT")
export SERVER_NAME=$(hostname)
echo "Folgenden Hostname ermittelt: $SERVER_NAME"
export DOMAIN_NAME=$(hostname)
echo "Folgenden Domainname ermittelt: $DOMAIN_NAME"
export IP_EXTERN=$(hostname -I | awk '{print $1}')
echo "Folgende IP ermittelt: $IP_EXTERN"
CARGOPATH="$SCRIPTPATH/cargo-enabled"
CARGOS=()
CARGO_WEB_LINKS=
CARGO_PGSQL_LINKS=
KVWMAP_SERVER_VERSION=2.2.3
PROD_APP="kvwmap"
# Network settings
case "$network_name" in
kvwmap_prod)
NETWORK_IP_RANGE="172.0.10.0/24"
;;
kvwmap_dev)
NETWORK_IP_RANGE="172.0.20.0/24"
;;
kvwmap_test)
NETWORK_IP_RANGE="172.0.30.0/24"
;;
*)
# usage
# exit
;;
esac
NETWORK_SUBNET=$NETWORK_IP_RANGE
| true
|
165997c63d206a87581c3c3650b268e89c89be3a
|
Shell
|
benjamincharity-dev/terminus-oss
|
/tools/ci/set-should-run-flag.sh
|
UTF-8
| 310
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
. ~/.bashrc
OG_MESSAGE=$1
echo "Original commit Message: $OG_MESSAGE"
MESSAGE=$(echo "$OG_MESSAGE" | tr -d '"')
echo "Cleaned commit message: $MESSAGE"
if [[ "$MESSAGE" =~ (skip\ ci|ci\ skip) ]]; then
echo "SHOULD_RUN=false" >> "$GITHUB_ENV"
else
echo "SHOULD_RUN=true" >> "$GITHUB_ENV"
fi
| true
|
81637d6b8547eee35a7e5449883220f9858acaa0
|
Shell
|
wormintrude/sos2dbook
|
/bak-sos2dbook.sh
|
UTF-8
| 19,524
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# Script para parsear reportes SOS y generar documentos en publican [DocBook]
# TO DO
# -----
# - [DONE] Que pasa si el dbook_title ya existe como DocBook - Lo borramos ? Abortamos ?
# - Las variables en autores_file no soportan espacios [fixme]
# - Los paths a los sources estan hardcodeados, deberian pasar como argumentos [fixme]
# - [DONE] En doctype_header el % BOOK_ENTITIES SYSTEM tiene que reflejar el nombre del Book
# - El brand esta harcodeado, deberia pasarse como argumento [fixme]
# - El dbook_title esta harcodeado, deberia pasarse como argumento [fixme]
# - [DONE] Los 'headers' puede que sean solamente aplicables al Authors_Group.xml
# - El metodo dbook_create() no tiene validacion - IMPORTANTE! [fixme]
# - Metodo para descomprimir solo contempla .tar.xz - IMPORTANTE! [fixme]
# - Los SOS reports no tienen /etc/passwd para sacar usuarios agregados [addme]
# - Los SOS reports no tienen una manera de ver software instalado _puntualmente_ para ese deployment / proyecto [addme]
# - Diagrama de la solucion por separado o metido adentro de alguna otra funcion ?
# - Bonds y cualquier tipo de red != ethX [addme]
## Definiciones
# Nombre del DocBook | Es el nombre del cliente para que quede 'bonito' el titulo
dbook_title="Von_Braun_Rockets" # Idealmente seria $1
# Headers
xml_header="<?xml version='1.0' encoding='utf-8' ?>"
doctype_header="<!DOCTYPE <replaceme> PUBLIC \"-//OASIS//DTD DocBook XML V4.5//EN\" \"http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd\" [<!ENTITY % BOOK_ENTITIES SYSTEM \"$dbook_title.ent\"> %BOOK_ENTITIES;]>"
autores_doctype_header=`echo $doctype_header | sed 's/<replaceme>/authorgroup/'`
bookinfo_doctype_header=`echo $doctype_header | sed 's/<replaceme>/bookinfo/'`
book_doctype_header=`echo $doctype_header | sed 's/<replaceme>/book/'`
chapter_doctype_header=`echo $doctype_header | sed 's/<replaceme>/chapter/'`
preface_doctype_header=`echo $doctype_header | sed 's/<replaceme>/preface/'`
revhist_doctype_header=`echo $doctupe_header | sed 's/<replaceme>/appendix/'`
title_doctype_header=`echo $doctype_header | sed 's/<replaceme>/book/'`
# Definiciones del publican.cfg
xml_lang=en-US
dbook_type=Book
brand=RedHat
# PATHs
publican_test_dir=/home/$(whoami)/Documents/publican-test
dbook_root=$publican_test_dir/$dbook_title
dbook_data_dir=$publican_test_dir/$dbook_title/$xml_lang
dbook_build_dir=$publican_test_dir/$dbook_title/tmp
dbook_autores_file=$dbook_data_dir/Author_Group.xml
dbook_info_file=$dbook_data_dir/Book_Info.xml
dbook_chapter_file=$dbook_data_dir/Chapter.xml
dbook_preface_file=$dbook_data_dir/Preface.xml
dbook_revisionh_file=$dbook_data_dir/Revision_History.xml
dbook_ent_file=$dbook_data_dir/$dbook_title.ent
dbook_include_file=$dbook_data_dir/$dbook_title.xml
autores_file=./autores.lista
sos_dir=./sosreports
## Tratamiento de los reportes SOS
decompress_sos(){
for i in `ls $sos_dir/*.tar.xz`; do
echo "Descomprimiendo $i"
tar xf $i -C $sos_dir/ &>/dev/null
done
}
archive_sos(){
tar cfvz ./sos-tarball.tar.gz $sos_dir/*.tar.xz &>/dev/null
}
remove_sos_xz(){
rm -f $sos_dir/*.tar.xz &>/dev/null
}
## Elementos dinamicos del DocBook
# Creacion del DocBook
dbook_create(){
create_cmd="publican create"
cd $publican_test_dir &>/dev/null
$create_cmd --name $dbook_title --lang $xml_lang --brand $brand --type $dbook_type &>/dev/null
cd - >/dev/null
}
# XML Autores
autores_create(){
s_first(){
local first=$(grep -A 5 Autor$1 $autores_file | grep -v Autor | grep first | awk '{print $3}')
echo $first
}
s_last(){
local last=$(grep -A 5 Autor$1 $autores_file | grep -v Autor | grep last | awk '{print $3}')
echo $last
}
s_orgname(){
local orgname=$(grep -A 5 Autor$1 $autores_file | grep -v Autor | grep orgname | awk '{print $3}' | sed 's/_/ /g')
echo $orgname
}
s_orgdiv(){
local orgdiv=$(grep -A 5 Autor$1 $autores_file | grep -v Autor | grep orgdiv | awk '{print $3}' | sed 's/_/ /g')
echo $orgdiv
}
s_email(){
local email=$(grep -A 5 Autor$1 $autores_file | grep -v Autor | grep email | awk '{print $3}')
echo $email
}
echo $xml_header
echo $autores_doctype_header
echo "<authorgroup>"
for ((i=1; i<=$(grep "Autor" $autores_file | grep -v "#" | wc -l); i++)) ; do
echo " <author>
<firstname>$(s_first $i)</firstname>
<surname>$(s_last $i)</surname>
<affiliation>
<orgname>$(s_orgname $i)</orgname>
<orgdiv>$(s_orgdiv $i)</orgdiv>
</affiliation>
<email>$(s_email $i)</email>
</author>"
done
echo "</authorgroup>"
}
# Generamos el XML de informacion del DocBook
book_info_create(){
local dbook_desc="DID (Detailed Implementation Document)" # temp
local product_name="Red Hat Global Professional Services" # temp
local product_number="" # temp
local edition=0.1 # temp
local pubsnumber=1 # temp - random
local abstract="Esto es un abstract del problema" # temp
echo $xml_header
echo $bookinfo_doctype_header
echo "<bookinfo id=\"book-$dbook_title-$(echo $dbook_title | sed 's/-/_/g')\">
<title>$(echo $dbook_title | sed 's/_/ /g')</title>
<subtitle>$dbook_desc</subtitle>
<productname>$product_name</productname>
<productnumber>$product_number</productnumber>
<edition>$edition</edition>
<pubsnumber>$pubsnumber</pubsnumber>
<abstract>
<para>
$abstract
</para>
</abstract>
<corpauthor>
<inlinemediaobject>
<imageobject>
<imagedata fileref=\"Common_Content/images/title_logo.svg\" format=\"SVG\" />
</imageobject>
</inlinemediaobject>
</corpauthor>
<xi:include href=\"Common_Content/Legal_Notice.xml\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />
<xi:include href=\"Author_Group.xml\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />
</bookinfo>"
}
# Generamos el diagrama de la solucion
sos_gen_diagrama(){
local diag_img="images/exolgan-diagrama.svg"
cp /home/reaper/Documents/EXOLGAN/exolgan-diagrama.svg $dbook_data_dir/images/
local chapter_name="Diagrama"
echo "Generando $chapter_name.xml"
touch $dbook_data_dir/$chapter_name.xml
echo $xml_header >> $dbook_data_dir/$chapter_name.xml
echo $chapter_doctype_header >> $dbook_data_dir/$chapter_name.xml
echo "<chapter id=\"$chapter_name\">
<title>Diagrama de la Solucion</title>
<section id=\"$chapter_name.grafico\">
<title>$chapter_name</title>
<para>
<informalfigure>
<graphic fileref=\"$diag_img\" scalefit=\"1\" width=\"100%\" contentdepth=\"100%\"/>
</informalfigure>
</para>
</section>
</chapter>" >> $dbook_data_dir/$chapter_name.xml
}
# Generamos los XML de los Reportes SOS
sos_gen_caps(){
for i in `ls $sos_dir`; do
local chapter_name=$(echo $i | cut -d - -f1)
echo "Generando $chapter_name.xml"
touch $dbook_data_dir/$chapter_name.xml
local sysctl=$sos_dir/$i/etc/sysctl.conf
local hosts=$sos_dir/$i/etc/hosts
local rc_local=$sos_dir/$i/etc/rc.d/rc.local
local profile=$sos_dir/$i/etc/profile
local chkconfig=$sos_dir/$i/chkconfig
local ntp_conf=$sos_dir/$i/etc/ntp.conf
local dns_conf=$sos_dir/$i/etc/resolv.conf
# table_gen() sirve para NTP y DNS
table_gen(){
for i in $(cat $1 | grep -v \# | grep server | awk '{print $2}') ; do
echo "<row><entry>Server</entry><entry>$i</entry></row>"
done
}
local netconf_dir=$sos_dir/$i/etc/sysconfig/network-scripts
netconf_gen(){
for i in $(ls $netconf_dir/ifcfg-eth*) ; do
echo "<row><entry>$(grep DEVICE $i | sed 's/DEVICE=//g' | sed 's/"//g')</entry><entry>$(grep VLAN $i)</entry><entry>$(grep IPADDR $i | sed 's/IPADDR=//g')</entry><entry>$(grep NETMASK $i | sed 's/NETMASK=//g')</entry><entry>$(egrep 'ONBOOT|ONPARENT' $i | sed 's/ONBOOT=//g' | sed 's/ONPARENT=//g')</entry></row>"
done
}
local hostname=$sos_dir/$i/hostname
local ifconfig=$sos_dir/$i/ifconfig
local lsb_release=$sos_dir/$i/lsb-release
local selinux=$sos_dir/$i/etc/selinux/config
local partitions=$sos_dir/$i/proc/partitions
local mounts=$sos_dir/$i/proc/mounts
local etc_lvm_path=$sos_dir/$i/etc/lvm/backup
# is_lvm - PUEDE FALLAR - Es un asco.
pv_list(){
cat $sos_dir/$1/$etc_lvm_path/* | grep -A 2 pv | grep device | awk '{print $3}' | sed 's/"//g'| sed 's/\/dev\///g'
}
diskpart_info(){
for i in $(cat $partitions | egrep -v 'major|dm|cciss|emc' | sed '/\<sd[a-z]\>/g' | sed '/^$/d' | awk '{print $4}'); do
echo "<row><entry>$i</entry><entry>$(cat $partitions | egrep -v 'major|dm|cciss|emc'| grep $i | awk '{print $3}')</entry><entry>$(grep $i $mounts | awk '{print $2}')</entry><entry></entry></row>"
done
}
echo $xml_header >> $dbook_data_dir/$chapter_name.xml
echo $chapter_doctype_header >> $dbook_data_dir/$chapter_name.xml
echo "<chapter id=\"$chapter_name\">
<title>$chapter_name</title>
<section id=\"$chapter_name.summary\">
<title>Servidor</title>
<para>
<table>
<title>Servidor</title>
<tgroup cols='2'>
<thead>
<row>
<entry></entry>
<entry></entry>
</row>
</thead>
<tbody>
<row><entry>Nombre</entry><entry>$(cat $hostname)</entry></row>
<row><entry>Direccion IP</entry><entry>$(cat $ifconfig | grep inet | awk '{print $2}' | sed 's/addr://g' | grep -v 127.0.0.1)</entry></row>
<row><entry>Root Password</entry><entry>redhat</entry></row>
<row><entry>Sistema Operativo</entry><entry>$(if [ -e $lsb_release ] ; then cat $lsb_release | awk '{$1=""; print $0}'; else echo "Undefined" ; fi)</entry></row>
<row><entry>Firewall</entry><entry>$(cat $chkconfig | grep iptables | awk '{if ( $5 == "3:on" && $7 == "5:on" ){print "Enabled"} else {print "Disabled"}}')</entry></row>
<row><entry>SELinux</entry><entry>$(if [ -e $selinux ]; then cat $selinux | egrep -v '#|TYPE' | sed '/^$/d' | sed 's/SELINUX=//' | sed 's/\([a-z]\)\([a-zA-Z0-9]*\)/\u\1\2/g'; else echo "No hay /etc/selinux/config"; fi)</entry></row>
</tbody>
</tgroup>
</table>
</para>
</section>
<section id=\"$chapter_name.diskpart\">
<title>Particionado de Discos</title>
<para>
<table>
<title>Particionado de Discos</title>
<tgroup cols='4'>
<thead>
<row>
<entry>Slice</entry>
<entry>Size</entry>
<entry>Mount</entry>
<entry>FS</entry>
</row>
</thead>
<tbody>
$(diskpart_info)
</tbody>
</tgroup>
</table>
</para>
</section>
<section id=\"$chapter_name.lvmconf\">
<title>Configuracion de LVM</title>
<para>
<table>
<title>Configuracion de LVM</title>
<tgroup cols='6'>
<thead>
<row>
<entry>PV</entry>
<entry>VG</entry>
<entry>LV</entry>
<entry>Size</entry>
<entry>Mount</entry>
<entry>FS</entry>
</row>
</thead>
<tbody>
<row><entry>PV</entry><entry>PV</entry><entry>PV</entry><entry>PV</entry><entry>PV</entry><entry>PV</entry></row>
</tbody>
</tgroup>
</table>
</para>
</section>
<section id=\"$chapter_name.netconf\">
<title>Configuracion de Red</title>
<para>
<table>
<title>Configuracion de Red</title>
<tgroup cols='5'>
<thead>
<row>
<entry>Interfaz</entry>
<entry>VLAN Tag</entry>
<entry>Direccion IP</entry>
<entry>Mascara</entry>
<entry>OnBoot</entry>
</row>
</thead>
<tbody>
$(netconf_gen | tr " " "\n")
</tbody>
</tgroup>
</table>
</para>
</section>
<section id=\"$chapter_name.dns\">
<title>DNS Server</title>
<para>
<table>
<title>DNS Server</title>
<tgroup cols='2'>
<thead>
<row>
<entry>Campo</entry>
<entry>Valor</entry>
</row>
</thead>
<tbody>
$(table_gen $dns_conf | tr " " "\n")
</tbody>
</tgroup>
</table>
</para>
</section>
<section id=\"$chapter_name.ntp\">
<title>NTP Server</title>
<para>
<table>
<title>NTP Server</title>
<tgroup cols='2'>
<thead>
<row>
<entry>Campo</entry>
<entry>Valor</entry>
</row>
</thead>
<tbody>
$(table_gen $ntp_conf | tr " " "\n")
</tbody>
</tgroup>
</table>
</para>
</section>
<section id=\"$chapter_name.uname\">
<title>Sysctl.conf</title>
<para>
<code>/etc/sysctl.conf:</code>
<programlisting><![CDATA[$(cat $sysctl | grep -v \# | sed '/^$/d') ]]></programlisting>
</para>
</section>
<section id=\"$chapter_name.hosts\">
<title>Hosts</title>
<para>
<code>/etc/hosts:</code>
<programlisting><![CDATA[$(cat $hosts) ]]></programlisting>
</para>
</section>
<section>
<title>rc.local</title>
<para>
<code>/etc/rc.d/rc.local:</code>
<programlisting><![CDATA[$(cat $rc_local) ]]></programlisting>
</para>
</section>
<section id=\"$chapter_name.profile\">
<title>Profile</title>
<para>
<code>/etc/profile:</code>
<programlisting><![CDATA[$(if [ -e $sos_dir/$i/etc/profile ] ; then cat $profile ; else echo "No se modifico el archivo" ; fi)]]></programlisting>
</para>
</section>
<section id=\"$chapter_name.chkconfig\">
<title>Startup Services</title>
<para>
<programlisting><![CDATA[$(cat $chkconfig | awk '{print$1}')]]></programlisting>
</para>
</section>
</chapter>" >> $dbook_data_dir/$chapter_name.xml
done
# El Chapter.xml es nada mas que un template que genera el 'create' de publican
if [ -e $dbook_data_dir/Chapter.xml ]; then
rm -f $dbook_data_dir/Chapter.xml &>/dev/null
fi
}
# Para que publican procese los documentos, hay que incluirlos en el XML que lleva el nombre del DocBook
dbook_include(){
echo $xml_header
echo $book_doctype_header
# <book status="draft"> marca el documento como trabajo en progreso
echo "<book status=\"draft\">"
echo " <xi:include href=\"Book_Info.xml\" xmlns:xi=\"http://www.w3.org/2001/XInclude\"/>"
echo " <xi:include href=\"Preface.xml\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />"
echo " <xi:include href=\"Diagrama.xml\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />"
for i in `ls $sos_dir | cut -d - -f1`; do
echo "<xi:include href=\"$i.xml\" xmlns:xi=\"http://www.w3.org/2001/XInclude\" />"
done
echo " <index />"
echo "</book>"
}
# Build del DocBook
dbook_build(){
echo "Buildeando DocBook" # debug
build_cmd="publican build"
cd $dbook_root &>/dev/null
$build_cmd --formats=html,html-single,pdf,epub --langs=$xml_lang &>/dev/null
cd - &>/dev/null
}
## 'Commit' - No hay metodos de validacion [porque vivimos AL LIMITE MWAHAHA]
if [ -e $publican_test_dir/$dbook_title ] ; then
echo "$dbook_title ya exite. Renombrar este documento o borrar $dbook_title"
exit 1
fi
# Descomprimimos los Reportes SOS
decompress_sos
archive_sos
remove_sos_xz
# Creamos el DocBook
echo "Generando Templates"
dbook_create
## Generamos los xml's
# Author_Group.xml
echo "Generando Author_Group.xml"
autores_create > $dbook_autores_file
# Book_Info.xml
echo "Generando Book_Info.xml"
book_info_create > $dbook_info_file
# Generamos un capitulo por cada Reporte SOS y uno por el diagrama (?aca o aparte?)
sos_gen_diagrama
sos_gen_caps
# Generamos el 'indice' de XMLs a incluirse
echo "Generando $dbook_title.xml"
dbook_include > $dbook_include_file
# Buildeamos el documento en todos los formatos
dbook_build
if [ -e $dbook_build_dir/$xml_lang/html/index.html ] ; then
echo "DocBook en formato html generado."
else
echo "No se genero el DocBook en formato html."
fi
if [ -e $dbook_build_dir/$xml_lang/*.epub ] ; then
echo "DocBook en formato epub generado."
else
echo "No se genero el DocBook en formato epub."
fi
if [ -e $dbook_build_dir/$xml_lang/html-single/index.html ] ; then
echo "DocBook en formato html-single generado."
else
echo "No se genero el DocBook en formato html-single."
fi
if [ -e $dbook_build_dir/$xml_lang/pdf/*.pdf ] ; then
echo "DocBook en formato PDF generado."
else
echo "No se genero el DocBook en formato PDF."
fi
| true
|
bb88c5b6a214b098d03bc50b6607bc34f0b3efd4
|
Shell
|
Super-LeoJayZhang/TSMP
|
/bldsva/machines/GENERIC_X86/build_interface_GENERIC_X86.ksh
|
UTF-8
| 2,713
| 2.75
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#! /bin/ksh
getMachineDefaults(){
route "${cblue}>> getMachineDefaults${cnormal}"
defaultMpiPath="/opt/mpich-3.2.1"
defaultNcdfPath="/usr"
defaultGrib1Path="/opt//DWD-libgrib1_20110128/lib"
defaultGribapiPath="/usr"
defaultJasperPath=""
defaultTclPath="/opt/tcl8.6.9"
defaultHyprePath="/opt/hypre-2.11.2"
defaultSiloPath="/opt/silo-4.10"
defaultLapackPath=""
defaultPncdfPath="/usr"
# Default Compiler/Linker optimization
defaultOptC="-O2"
profilingImpl=" no scalasca "
profComp=""
profRun=""
profVar=""
# Default Processor settings
defaultwtime=""
defaultQ=""
route "${cblue}<< getMachineDefaults${cnormal}"
}
finalizeMachine(){
route "${cblue}>> finalizeMachine${cnormal}"
route "${cblue}<< finalizeMachine${cnormal}"
}
createRunscript(){
route "${cblue}>> createRunscript${cnormal}"
mpitasks=$((numInst * ($nproc_cos + $nproc_clm + $nproc_pfl + $nproc_oas)))
nnodes=`echo "scale = 2; $mpitasks / $nppn" | bc | perl -nl -MPOSIX -e 'print ceil($_);'`
#DA
if [[ $withPDAF == "true" ]] ; then
srun="mpiexec -n $mpitasks ./tsmp-pdaf -n_modeltasks $(($numInst-$startInst)) -filtertype 2 -subtype 1 -delt_obs $delta_obs -rms_obs 0.03 -obs_filename swc_crp"
else
srun="mpiexec -np $nproc_cos ./lmparbin_pur : -np $nproc_pfl ./parflow $pflrunname : -np $nproc_clm ./clm"
fi
cat << EOF >> $rundir/tsmp_slm_run.bsh
#!/bin/bash
cd $rundir
date
echo "started" > started.txt
rm -rf YU*
export $profVar
$srun
date
echo "ready" > ready.txt
exit 0
EOF
counter=0
#for mapfile
start_oas=$counter
end_oas=$(($start_oas+$nproc_oas-1))
start_cos=$(($nproc_oas+$counter))
end_cos=$(($start_cos+($numInst*$nproc_cos)-1))
start_pfl=$(($numInst*$nproc_cos+$nproc_oas+$counter))
end_pfl=$(($start_pfl+($numInst*$nproc_pfl)-1))
start_clm=$((($numInst*$nproc_cos)+$nproc_oas+($numInst*$nproc_pfl)+$counter))
end_clm=$(($start_clm+($numInst*$nproc_clm)-1))
if [[ $numInst > 1 && $withOASMCT == "true" ]] then
for instance in {$startInst..$(($startInst+$numInst-1))}
do
for iter in {1..$nproc_cos}
do
if [[ $withCOS == "true" ]] then ; echo $instance >> $rundir/instanceMap.txt ;fi
done
done
for instance in {$startInst..$(($startInst+$numInst-1))}
do
for iter in {1..$nproc_pfl}
do
if [[ $withPFL == "true" ]] then ; echo $instance >> $rundir/instanceMap.txt ;fi
done
done
for instance in {$startInst..$(($startInst+$numInst-1))}
do
for iter in {1..$nproc_clm}
do
if [[ $withCLM == "true" ]] then ; echo $instance >> $rundir/instanceMap.txt ;fi
done
done
fi
comment " change permission of runscript and mapfile"
chmod 755 $rundir/tsmp_slm_run.bsh >> $log_file 2>> $err_file
check
route "${cblue}<< createRunscript${cnormal}"
}
| true
|
778e61d0f280c6ed1a6d1e1b4ecdb14475d82727
|
Shell
|
BaderLab/netDx
|
/inst/extdata/INSTALL/INSTALL_Unix.sh
|
UTF-8
| 2,399
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script to automate dependency install for netDx
echo "* Installing Unix dependencies"
ENV TZ 'America/New York'
echo $TZ > /etc/timezone && \
apt-get update && \
apt-get install -y tzdata && \
apt-get install -y --no-install-recommends \
gfortran \
r-base \
openjdk-8-jre \
gcc make g++ \
zlib1g-dev libssl-dev libssh2-1-dev libcurl4-openssl-dev \
liblapack-dev liblapack3 libopenblas-base libopenblas-dev \
libxml2-dev
echo "* Checking if Java installed ..."
if java -version 2>&1 > /dev/null | grep -q "java version" ; then
echo -e "\tdone."
else {
echo -e "*** ERROR: Java not found; install (https://www.java.com/en/download/) or add to path"
exit 0;
}
fi
echo "* Checking if R installed ..."
if R --version | grep -q "R version" ;
then
ver=`R --version | grep "R version" | cut -f 3 -d " "`
echo -e "\tversion found: $ver"
ver1=`echo $ver | cut -f1 -d"."`
ver2=`echo $ver | cut -f2 -d"."`
if [ $ver1 -ge 3 ] && [ $ver2 -ge 6 ]; then
echo -e "\tdone"
else {
echo ""
echo -e "\t*** ERROR: Version 3.6+ of R required. Install from https://cran.r-project.org/, or add to path"
echo -e "\t*** If upgrading, install r-base and r-base-dev"
echo -e "\t*** Visit https://cran.r-project.org/bin/linux/ubuntu/README.html for details"
exit 0
}
fi
else {
echo -e "\t*** ERROR: R not found. Install R 3.6+ from https://cran.r-project.org/, or add to path"
exit 0;
}
fi
# install R packages
echo "* Installing R dependencies"
echo "r <- getOption('repos'); r['CRAN'] <- 'http://cran.us.r-project.org'; options(repos = r);" > ~/.Rprofile
declare -a PKGS=( devtools curl bigmemory foreach combinat doParallel ROCR pracma RColorBrewer reshape2 ggplot2 caroline rmarkdown igraph glmnet );
for p in ${PKGS[@]};do
echo -e "\t* Checking for $p"
Rscript -e "if(!requireNamespace(\"$p\",quietly=TRUE)){ install.packages(\"$p\")}"
done
echo "* Installing BioConductor if required"
Rscript -e 'if (!requireNamespace("BiocManager", quietly = TRUE)){install.packages("BiocManager")}'
echo "* Installing BioConductor dependencies if required"
declare -a PKGS=( GenomicRanges RCy3 );
for p in ${PKGS[@]};do
echo -e "\t* Checking for $p"
Rscript -e "if(!requireNamespace(\"$p\",quietly=TRUE)){ BiocManager::install(\"$p\")}"
done
cd ..
echo "* Installing netDx"
R CMD INSTALL netDx
| true
|
a70a16c081f9554289d5546af544f7fcf24584fd
|
Shell
|
lgoetschius/MRtrix_Connectomes_Analysis
|
/check_cluster_processing.sh
|
UTF-8
| 1,092
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
################################## MRtrix Script Check Cluster Processing ##################################
# This script will check to see who has successfully completed the cluster processing.
###############################################################################################
datapath=/Volumes/lsa-csmonk/bbox/FF_Data/Working_Files/MRTrix_Connectomes
sublist_path=/Volumes/lsa-csmonk/bbox/FF_Data/Working_Files/MRTrix_Connectomes/MRtrix_scripts/sublist.txt
stage3_prefix=stage3_10M_pm
###############################################################################################
# You should not need to edit below this line.
###############################################################################################
# Read in the subject list to a variable.
echo "This command was run on" $(date)
echo "Reading in subject list from " ${sublist_path}
echo "Checking for whether subjects completed " ${stage3_prefix}
sublist=$(<${sublist_path})
cd ${datapath}
for sub in ${sublist} ; do
echo ${sub}/${stage3_prefix}*
done
| true
|
308c892f9659345b6bb23a889fa635827c929eb2
|
Shell
|
alesgenova/openchemistry-demo
|
/scripts/copy_web_components.sh
|
UTF-8
| 470
| 2.921875
| 3
|
[] |
no_license
|
#! /bin/bash
components="@openchemistry/molecule-vtkjs
@openchemistry/vibrational-spectrum
@openchemistry/molecule-menu
split-me
"
components_dir=public/components
if [ -d "$components_dir" ]; then
rm -rf $components_dir
fi
mkdir $components_dir
for component in ${components}; do
from_dir=node_modules/${component}/dist
to_dir=${components_dir}/${component}
if [ -d "$from_dir" ]; then
mkdir -p ${to_dir}
cp -R ${from_dir}/* ${to_dir}/.
fi
done;
| true
|
023caf2e761da52b363474ff546a30c04c3e1250
|
Shell
|
q-a-z/Synapse
|
/Scripts/lweb
|
UTF-8
| 207
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#lweb = linux web
tun0=$(/sbin/ifconfig tun0 | grep 'inet' | head -1 | awk '{print $2}')
port=9000
for f in *; do
echo "wget http://$tun0:$port/$f"
done
python -m SimpleHTTPServer $port
| true
|
ea78a9dc520c4c12d18ced6788b005a3d67278cb
|
Shell
|
gap-system/gap-distribution
|
/testing/plots.sh
|
UTF-8
| 9,659
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
## The following commands, adjusted as instructed in the comments,
## should be performed in Jenkins job before calling this script
#
# rm -rf *
#
## absolute path to Jenkins workspace (without trailing "/")
#
# export JPATH="/mnt/raid/hudson-slave/workspace"
#
## name of the Jenkins job to look at this workspace
#
# export JJOB="GAP-minor-release-test"
#
## label of the machine, e.g. "32bit", "64bit", "graupius", "fruitloop"
## (see test logs to see which is used)
#
# export JLABEL="64bit"
#
## relative path to the location of tests logs from the JLABEL directory
## Adjust the path after the major release
#
# export JLOGS="gap4r9/dev/log"
#
for GTEST in install standard
do
for GBITS in 32 64
do
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_time.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_time.txt ${GBITS}bit-nogmp-test${GTEST}-nopackages
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_time.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_time.txt ${GBITS}bit-nogmp-test${GTEST}-allpackages
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_time.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_time.txt ${GBITS}bit-gmp-test${GTEST}-nopackages
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_time.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_time.txt ${GBITS}bit-gmp-test${GTEST}-allpackages
fi
#
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ${GBITS}bit-nogmp-test${GTEST}-nopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ${GBITS}bit-nogmp-test${GTEST}-allpackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ${GBITS}bit-gmp-test${GTEST}-nopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ${GBITS}bit-gmp-test${GTEST}-allpackages-count
fi
done
done
export GTEST="standard"
for ONETEST in arithlst hash2 primsan xgap grppcnrm grpmat grpperm matrix grplatt bugfix grpprmcs grpconst
do
for GBITS in 32 64
do
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${ONETEST}1.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${ONETEST}1.txt ${GBITS}bit-nogmp-${ONETEST}-nopackages
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${ONETEST}2.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${ONETEST}2.txt ${GBITS}bit-nogmp-${ONETEST}-allpackages
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${ONETEST}1.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${ONETEST}1.txt ${GBITS}bit-gmp-${ONETEST}-nopackages
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${ONETEST}2.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${ONETEST}2.txt ${GBITS}bit-gmp-${ONETEST}-allpackages
fi
done
done
export GTEST="manuals"
for GBITS in 32 64
do
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ${GBITS}bit-nogmp-test${GTEST}-nopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}A_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}A_count.txt ${GBITS}bit-nogmp-test${GTEST}-autopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ${GBITS}bit-nogmp-test${GTEST}-allpackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ${GBITS}bit-gmp-test${GTEST}-nopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}A_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}A_count.txt ${GBITS}bit-gmp-test${GTEST}-autopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ${GBITS}bit-gmp-test${GTEST}-allpackages-count
fi
done
export GTEST="packages"
for GBITS in 32 64
do
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ${GBITS}bit-nogmp-test${GTEST}-nopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ${GBITS}bit-nogmp-test${GTEST}-allpackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ${GBITS}bit-gmp-test${GTEST}-nopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}2_count.txt ${GBITS}bit-gmp-test${GTEST}-allpackages-count
fi
done
export GTEST="packagesload"
for GBITS in 32 64
do
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ${GBITS}bit-nogmp-test${GTEST}-nopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}A_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}A_count.txt ${GBITS}bit-nogmp-test${GTEST}-autopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}1_count.txt ${GBITS}bit-gmp-test${GTEST}-nopackages-count
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}A_count.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}A_count.txt ${GBITS}bit-gmp-test${GTEST}-autopackages-count
fi
done
export GTEST="packagesload"
for GBITS in 32 64
do
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}fail.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/nogmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}fail.txt ${GBITS}bit-nogmp-test${GTEST}fail
fi
if [ -f ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}fail.txt ]
then
cp ${JPATH}/${JJOB}/GAPCOPTS/${GBITS}build/GAPGMP/gmp/GAPTARGET/${GTEST}/label/${JLABEL}/${JLOGS}/plot${GTEST}fail.txt ${GBITS}bit-gmp-test${GTEST}fail
fi
done
| true
|
9cb98537a2bcd49ad75246c98758e0c6941f051f
|
Shell
|
moy/unix-training
|
/treasure-hunt/generate-step-F2.sh
|
UTF-8
| 2,931
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
. ./treasure-setup.sh
. ./spy-lib.sh
. ./i18n-lib.sh
dest=$(gettext etape)-F2.sh
# This may be too easy to "decode", but beeing able to do that may
# also indicate that the current exercice is not useful..
exec > "$dest"
printf "%s\n" '#!/bin/bash
exec 4<&1
# We need bash here to be able to read from 4th fd directly...
# RedHat bug with base64 -d => use -i as a workaround
# https://bugzilla.redhat.com/show_bug.cgi?id=719317
base64 -di <<EOF | exec /bin/bash -s'
# Encode script (note we should take care that it does not generate any dollar sign).
base64 <<EOF
#!/bin/bash
# execute 'exec 4<&1' before exec'ing this script!
retry () {
if [ "\$1" != "" ]; then
hint=" (\$1)."
else
hint=""
fi
echo "$(gettext "Non ...")" "\${hint}"
echo "$(gettext 'Rejoue !')";
}
# cancel () {
# echo "Ok j'arrête. Mais il faudra recommencer !";
# exit 1; # mouaif
# }
ok () {
echo "$(gettext "Bravo ! fin de l'étape...
L'étape suivante se trouve sur le serveur \${auxiliarymachine}. Elle est
dans le fichier
~\${auxiliary_user}/etape-G1.txt
Récupérez-la via sftp (cf.
http://ensiwiki.ensimag.fr/index.php/Travailler_a_distance pour 1001
façons de faire cela) pour continuer.
" | envsubst)"
exit 0;
}
retry_eof () {
retry "$(gettext 'cette action envoie un caractere de fin de fichier au processus')"
}
retry_int () {
retry "$(gettext 'cette action aurait pu tuer le processus')"
}
wait_eof () {
oneof () { ok; }
onstp () { :; }
oncont () { :; }
onint () { retry_int; }
onquit () { retry; }
echo "$(gettext "Ok, je me suspends. Relance-moi en avant-plan pour continuer.
A tout de suite ...")";
}
wait_stp () {
oneof () { retry_eof; }
onstp () {
wait_eof; kill -STOP \$\$;
echo "$(gettext "Me revoila. J'attends maintenant un caractere de fin de fichier.
Si la commande avait été lancée avec une entree redirigee
(comme './etape-F2.sh < un-fichier' ou bien 'commande | ./etape-F2.sh'),
le caractere de fin de fichier aurait ete recu en arrivant
a la fin du fichier ou de la commande d'entree. Ici, l'entree de
etape-F2.sh est le clavier. On peut simuler une fin de fichier avec
Control-d.
")" ; }
oncont () { :; }
onint () { retry_int; }
onquit () { retry; }
echo "$(gettext 'Suspends moi...')";
}
# wait_quit () {
# oneof () { retry; }
# onstp () { :; }
# oncont () { :; }
# onint () { retry; }
# onquit () { wait_stp; }
# echo 'SIGQUIT ?';
# }
# wait_int () {
# oneof () { retry; }
# onstp () { retry; }
# oncont () { :; }
# onint () { wait_quit; }
# onquit () { retry; }
# echo 'SIGINT ?';
# }
wait_stp;
trap 'onint' INT;
trap 'onstp' TSTP;
trap 'oncont' CONT;
trap 'onquit' QUIT;
$(monitor_step_cmd F2)
while true; do
while read -u 4 -r var; do :; done;
oneof;
done;
EOF
echo 'EOF'
chmod u+x "$dest"
# echo "$dest genere." >/dev/stderr
| true
|
5101d3bd443603164f307025b6b6529148af50ae
|
Shell
|
potyt/scripts
|
/ssh-cache-keys
|
UTF-8
| 240
| 2.859375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
# in case keychain has been restarted
. ~/.env.keychain
for pk in ~/.ssh/id_rsa*.pub; do
k=${pk%.pub}
id=$(basename $k)
id=${id#id_rsa-}
pass ssh/$id | head -n 1 | tty-pipe 1 "^.+: *$" ssh-add $k
done
| true
|
29c0583f1d3dff8749b7a64882dd41080cb9e8cd
|
Shell
|
prenaux/ham
|
/bin/ham-toolset-do-import.sh
|
UTF-8
| 1,220
| 3.625
| 4
|
[
"Jam",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
if [ "$1" == "force" ]; then
shift
if [ "$1" == "silent" ]; then
shift
else
ALREADY_IMPORTED=`ni-hget HAM_IMPORTS_TOOLSETS $1`
if [[ "$ALREADY_IMPORTED" = "1" ]]; then
echo "W/ham-toolset-import.sh: toolset already imported '$1', force reimported."
fi
fi
else
ALREADY_IMPORTED=`ni-hget HAM_IMPORTS_TOOLSETS $1`
if [[ "$ALREADY_IMPORTED" = "1" ]]; then
echo "E/ham-toolset-import.sh: toolset already imported '$1'."
return 1
fi
fi
FOUND_SETUP_SCRIPT=no
if [ "$FOUND_SETUP_SCRIPT" == "no" ]; then
export DIR="${HAM_HOME}/specs/toolsets/$1"
export SETUP_SCRIPT="$DIR/setup-toolset.sh"
if [ -f "$SETUP_SCRIPT" ]; then
FOUND_SETUP_SCRIPT="from GLOBAL."
fi
fi
if [ "$FOUND_SETUP_SCRIPT" == "no" ]; then
echo "E/Can't find the toolset '$1'"
return 1
fi
export PATH=$PATH
HAM_DIE_SHOULD_RETURN=yes source "$SETUP_SCRIPT"
if [ $? != 0 ]; then
echo "E/Toolset '$1' import failed !"
return 1
else
if [[ -z $HAM_IMPORTED_TOOLSETS ]]; then
export HAM_IMPORTED_TOOLSETS="$1"
else
export HAM_IMPORTED_TOOLSETS="$HAM_IMPORTED_TOOLSETS $1"
fi
ni-hput HAM_IMPORTS_TOOLSETS $1 1
echo -e "I/Imported toolset '$1' ${FOUND_SETUP_SCRIPT}"
fi
| true
|
a345937ca0f3006c15112f73a2613075f6e700ee
|
Shell
|
cms-lpc-llp/llp_analyzer
|
/scripts/SplitRunEventMapByLumi.sh
|
UTF-8
| 418
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
inputLumisFile=$1
inputDir=$2
outputDir=$3
echo ${inputDir}
echo ${outputDir}
IFS=' '
while read -r line;
do
read -a strarr <<< "${line}"
echo "Processing (run,lumi) (${strarr[0]} , ${strarr[1]})"
for i in ${inputDir}/RunDataRunEventIndexing_*.*; do
#echo "${i}"
cat ${i} | grep "${line}" >> ${outputDir}/RunEventMap_${strarr[0]}_${strarr[1]}.txt
done
done < $inputLumisFile
| true
|
4a9fb8523107831969b86259db307932a8cbfc78
|
Shell
|
kdave/xfstests
|
/tests/btrfs/123
|
UTF-8
| 1,415
| 2.859375
| 3
|
[] |
no_license
|
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2016 Fujitsu. All Rights Reserved.
#
# FS QA Test 123
#
# Test if btrfs leaks qgroup numbers for data extents
#
# Due to balance code is doing trick tree block swap, which doing
# non-standard extent reference update, qgroup can't handle it correctly,
# and leads to corrupted qgroup numbers.
#
. ./common/preamble
_begin_fstest auto quick qgroup balance
# Import common functions.
. ./common/filter
# real QA test starts here
# Modify as appropriate.
_supported_fs btrfs
_require_scratch
_require_btrfs_qgroup_report
_scratch_mkfs >/dev/null
# Need to use inline extents to fill metadata rapidly
_scratch_mount "-o max_inline=2048"
# create 64K inlined metadata, which will ensure there is a 2-level
# metadata. Even for maximum nodesize(64K)
for i in $(seq 32); do
_pwrite_byte 0xcdcdcdcd 0 2k $SCRATCH_MNT/small_$i | _filter_xfs_io
done
# then a large data write to make the quota corruption obvious enough
_pwrite_byte 0xcdcdcdcd 0 32m $SCRATCH_MNT/large | _filter_xfs_io
sync
# enable quota and rescan to get correct number
_run_btrfs_util_prog quota enable $SCRATCH_MNT
_run_btrfs_util_prog quota rescan -w $SCRATCH_MNT
# now balance data block groups to corrupt qgroup
_run_btrfs_balance_start -d $SCRATCH_MNT >> $seqres.full
_scratch_unmount
# qgroup will be check at _check_scratch_fs() by fstest
# success, all done
status=0
exit
| true
|
5129064b3441f6ce4be099382dac3fb39500b795
|
Shell
|
raycast/script-commands
|
/commands/apps/todoist/get-tasks.template.sh
|
UTF-8
| 947
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Required parameters:
# @raycast.author Faris Aziz
# @raycast.authorURL https://github.com/farisaziz12
# @raycast.schemaVersion 1
# @raycast.title Get Tasks
# @raycast.mode fullOutput
# @raycast.packageName Todoist
# @raycast.description Gets All Todoist tasks
# @raycast.needsConfirmation false
# Dependency: requires jq (https://stedolan.github.io/jq/)
# Install via Homebrew: `brew install jq`
# Optional parameters:
# @raycast.icon images/todoist-logo.png
# Get your API Token from: https://todoist.com/prefs/integrations
API_TOKEN=
if ! command -v jq &> /dev/null; then
echo "jq is required (https://stedolan.github.io/jq/).";
exit 1;
fi
if [ -z "$API_TOKEN" ]; then
echo "Todoist API token is missing.";
exit 1;
fi
TASKS=$(curl -s -X GET \
https://api.todoist.com/rest/v1/tasks \
-H "Authorization: Bearer $API_TOKEN")
echo "$TASKS" | jq '.[] | .content'
echo
echo "You have $(echo "$TASKS" | jq 'length') tasks"
| true
|
39c93d481f27c7c35327b2b8581c00d8dcf64775
|
Shell
|
ryo1107/make_layer_zip
|
/make_layer_zip.sh
|
UTF-8
| 197
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
echo start_install $1
mkdir $1
cd $1
mkdir python
cd python
pip install $1 -t .
echo install_finish
cd ../
echo zip_making
zip -r $1.zip ./*
cp $1.zip ../
cd ../
rm -rdf $1
echo finish!
| true
|
d1b0a2c71f395c05e08f87cd54763a881c27fea3
|
Shell
|
Clay-Ferguson/quantizr
|
/scripts/build-dev-javaonly.sh
|
UTF-8
| 478
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
clear
# show commands as they are run.
# set -x
# Set all environment variables
source ./setenv-dev.sh
checkFunctions
# Build the application from source
cd ${PRJROOT}
mvn -T 1C package -DskipTests=true -P${mvn_profile}
verifySuccess "Maven Build"
# to get all services: `docker service ls``
cd ${PRJROOT}
QUANTA_SERVICE_ID=$(docker service ls --filter name=quanta-stack-dev_quanta-dev --quiet)
docker service update --force ${QUANTA_SERVICE_ID}
echo "done!"
| true
|
814e099c0bd29fa1ba1faa40a985b56adcd5a91f
|
Shell
|
Limnogirl90/Weather.Olcott
|
/README
|
UTF-8
| 690
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#curl --data "printer_friendly=&product=CLI&station=BUF&recent=no&date=20140421&prodDesc=DAILY+CLIMATE+REPORT+-+issued+daily:Detailed+daily+weather+statistics+(usually+for+yesterday),+including+temperature,+precipitation,+degree+days,+wind,+humidity,+sunrise/sunset,+and+record+temperature+data+for+the+following+day.++Precipitation+data+includes+both+calendar+year+and+water+year+totals,+percent+of+normal+values,+and+comparisons+to+normal.++This+product+is+available+for+up+to+2+months.++++++++" http://www.nws.noaa.gov/climate/getclimate.php\?wfo\=buf
for date in `seq -w 20141001 20141031`; do
bundle exec ruby runme.rb $date > data/${date}.out 2>/dev/null
done
| true
|
70c55f4d3aaebf229b4a5598f9a34bf86a76fc9b
|
Shell
|
sbryngelson/dotfiles
|
/.aliasrc
|
UTF-8
| 5,239
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# LS commands
if ! command -v gls &> /dev/null
then
alias ls='ls -GFh --color'
else
alias ls='gls -GFhN --color=always'
fi
alias l='ls'
alias la='ls -a'
alias ll='ls -lhtr'
# python
alias python='python3'
alias dus='du -hs * | sort -h'
# OS-specific stuff
if [ "$(uname -s)" == "Darwin" ]; then
alias skim='open -a Skim'
alias visit='/Applications/VisIt.app/Contents/MacOS/VisIt'
alias tec360='/Applications/Tecplot\ 360\ EX\ 2018\ R1/Tecplot\ 360\ EX\ 2018\ R1.app/Contents/MacOS/Tecplot\ 360\ EX\ 2018\ R1'
alias paraview='/Applications/ParaView-5.4.1.app/Contents/MacOS/paraview'
alias copy="pbcopy"
# eval "$(rbenv init -)"
alias ls='gls -GFhN --color=always --group-directories-first'
else
if command -v xclip &> /dev/null
then
alias copy="xclip -selection c"
fi
if date -v -1d > /dev/null 2>&1; then
DATE='date -v -14d "+ %Y-%m-%d"'
else
DATE='date --date="14 days ago" +%Y-%m-%d'
fi
if command -v sacct &> /dev/null
then
export SLURM_TIME_FORMAT=relative
alias q='squeue -S "i" --format="%.9i %.11P %.30j %.8u %.8T %.10M %.12l %.6D %C" | grep --color=auto "$(whoami)\|$"'
alias myq="sacct -u $(whoami) --format=jobid,jobname%30,alloccpus,state,elapsed,end%15 -X --starttime=$(eval $DATE) | grep --color=auto 'RUNNING\|$' | (head -n 2 && tail -n 40)"
fi
fi
## Auto show (ls) new directories
function cd {
builtin cd "$@" && ls
}
# Directory navigation
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
# Common commands
alias grep='grep --color=auto'
alias untar='tar -zxvf '
alias tarup='tar -zcvf'
alias rs='rsync -avz '
alias c="clear && printf '\e[3J'"
alias ccat='highlight --out-format=ansi --force'
alias v='vim'
alias cnt='ls -1 | wc -l'
alias jek='bundle exec jekyll serve'
alias jekcheck="bundle exec jekyll build; bundle exec htmlproofer ./_site --alt-ignore '/.*/' --http_status_ignore='999,403,301,302' --assume-extension"
alias pngtompeg="ffmpeg -y -f image2 -r 20 -i D/img_%04d.png -b 5000k movie.mpeg"
if command -v colordiff &> /dev/null
then
alias diff='colordiff'
fi
# FZF command
alias ff="fdd --no-ignore-vcs -H -t f . | fzf"
function fd() {
local dir="$(fzf --reverse --preview '
__cd_nxt="$(echo {})";
__cd_path="$(echo ${__cd_nxt} | sed "s;//;/;")";
echo $__cd_nxt;
echo;
ls ${__cd_path};
')"
cd "$dir"
}
__fzf_history ()
{
__ehc $(history | fzf --tac --tiebreak=index | perl -ne 'm/^\s*([0-9]+)/ and print "!$1"')
}
__ehc()
{
if
[[ -n $1 ]]
then
bind '"\er": redraw-current-line'
bind '"\e^": magic-space'
READLINE_LINE=${READLINE_LINE:+${READLINE_LINE:0:READLINE_POINT}}${1}${READLINE_LINE:+${READLINE_LINE:READLINE_POINT}}
READLINE_POINT=$(( READLINE_POINT + ${#1} ))
else
bind '"\er":'
bind '"\e^":'
fi
}
alias gf='git fetch --all && git reset --hard origin/master'
# Servers
export GOLUB="bryngel2@golub.campuscluster.illinois.edu"
export LEGOLAS="bryngel2@legolas2.mechse.illinois.edu"
export RICHARDSON="spencer@richardson.caltech.edu"
export COMET='bryngel@comet.sdsc.xsede.org'
export BRIDGES='bryngel@bridges2.psc.edu'
export STAMPEDE='bryngel@stampede2.tacc.utexas.edu'
export CALTECH='spencer@login.hpc.caltech.edu'
export ASCENT='sbryngelson@login1.ascent.olcf.ornl.gov'
export EXPANSE='bryngel@login.expanse.sdsc.edu'
export SUMMIT='sbryngelson@summit.olcf.ornl.gov'
export PHOENIX='sbryngelson3@login-phoenix-slurm.pace.gatech.edu'
export WOMBAT='sbryngelson@wombat-login1.ccs.ornl.gov'
export WINGTIP='sbryngelson3@wingtip-gpu3.cc.gatech.edu'
export ROUGE='sbryngelson3@rg-login.crnch.gatech.edu'
export YELLOWSTONE='sbryngel@yellowstone-login.stanford.edu'
export CRUSHER='sbryngelson@crusher.olcf.ornl.gov'
alias golub='ssh -Y bryngel2@golub.campuscluster.illinois.edu'
alias legolas='ssh -Y bryngel2@legolas2.mechse.illinois.edu'
alias richardson='ssh -Y spencer@richardson.caltech.edu'
alias comet='ssh -Y bryngel@comet.sdsc.xsede.org'
alias bridges='ssh -Y bryngel@bridges2.psc.edu'
alias stampede='ssh -Y bryngel@stampede2.tacc.utexas.edu'
alias xsede='ssh -l bryngel login.xsede.org'
alias caltech='ssh -Y spencer@login.hpc.caltech.edu'
alias ascent='ssh -Y sbryngelson@login1.ascent.olcf.ornl.gov'
alias expanse='ssh -Y bryngel@login.expanse.sdsc.edu'
alias summit='ssh -Y sbryngelson@summit.olcf.ornl.gov'
alias phoenix='ssh -Y sbryngelson3@login-phoenix-slurm.pace.gatech.edu'
alias cocice='ssh -Y sbryngelson3@login-ice.pace.gatech.edu'
alias wombat='ssh -Y sbryngelson@wombat-login1.ccs.ornl.gov'
alias wingtip='ssh -Y sbryngelson3@wingtip-gpu3.cc.gatech.edu'
alias rouge='ssh -Y sbryngelson3@rg-login.crnch.gatech.edu'
alias yellowstone='ssh -Y sbryngel@yellowstone-login.stanford.edu'
alias swan='ssh -Y e63252@cy-gw1.hpcrb.rdlabs.ext.hpe.com'
alias delta='ssh bryngel@login.delta.ncsa.illinois.edu'
alias crusher='ssh -Y sbryngelson@crusher.olcf.ornl.gov'
alias sandia='ssh shbryng@srngate.sandia.gov'
# Dotfile stuff (some of this is deprecated?)
alias dotpush="cd ~/ && git add ~/.bash_profile ~/.aliasrc ~/.inputrc ~/.gnuplotrc_x11 ~/.gnuplotrc_qt ~/.vimrc .vim/colors/SHB.vim && git commit -m 'update to dotfiles' && git push -u origin master"
alias dotpull="cd ~/ && rm -rf dotfiles && git clone https://github.com/sbryngelson/dotfiles.git && mv -f ~/dotfiles/.*rc ./ && mv -f ~/dotfiles/.bash_profile ./ && rm -fr .vim/colors && mkdir .vim/colors && mv ~/dotfiles/.gnuplotrc* ./ && rm -rf .vim/undodir && mkdir .vim/undodir && mv ~/dotfiles/.vim/colors/SHB.vim ~/.vim/colors/ && rm -rf ~/dotfiles ~/.git && source ~/.bash_profile && vim +PluginInstall +qall"
alias vundle="git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim"
alias installfzf="git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf && ~/.fzf/install"
alias installfd="cd ~ && curl https://sh.rustup.rs -sSf | sh && source $HOME/.cargo/env && cargo install fd-find && mv .cargo/bin/fd .cargo/bin/fdd"
alias nvvp="/Applications/nvvp/bin/nvvp -vm /Library/Java/JavaVirtualMachines/zulu8.23.0.3-jdk8.0.144-macosx_x64/bin/java"
| true
|
13573c69981963855ab95822b4d8b45c7bf85cad
|
Shell
|
waj615/allmake
|
/bin/amc
|
UTF-8
| 5,207
| 3.890625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
###############################################################################
# Name: amc
# Description: amc is the client tool for simplifying shell commands
# Author: John Deng (john.deng@outlook.com)
# Updated: 2017-02-11
###############################################################################
export ver="0.0.1"
source $(which clap)
source $(which logging)
app="amc"
echo "cmd: $CMD_LINE"
function show_version() {
echo ""
echo "amc version: $ver"
echo "amc is the client tool for simplifying shell commands"
}
function show_pv_usage() {
show_version
echo "usages: "
echo " ${app} create pv size=10 "
echo " ${app} delete pv=pv-name"
echo " ${app} edit pv size=8 cmd="'"'"chmod 644 {}"'"'" "
echo " ${app} edit pv size=8 cmd="'"'"chown 27:27 {}"'"'" "
echo " ${app} edit pv size=8 uid=27 pvc name=mysql-pvc "
echo " ${app} delete pv pvc name="'"'"nexus-data"'"'" "
}
function show_exec_usage() {
show_version
echo "usages: "
echo " ${app} exec pod=the-pod-id cmd="'"'"ls -l"'"'" "
echo " ${app} exec pod=the-pod-id cmd="'"'"any-shell-command"'"'" "
}
function exec_command_on_volume() {
echo "execute command on pv: $1 cmd: "'"'"$2"'"'" "
if [ "$2" == "" ]; then
show_pv_usage
exit
fi
# NOTE: assume that glusterfs is deployed on default project
current_project=$(oc projects | grep "*" | awk '{print $2}')
if [ "$current_project" != "default" ]; then
oc project default
fi
volume=$(oc describe pv $1 | grep Path | awk '{print $2}' | sed s/vol_//g)
cmd=$2
echo volume: $volume
# get all glusterfs pods
glusterfs_pods=$(oc get po -n default | grep glusterfs | awk '{print $1}')
if [ "$volume" != "" ] ; then
# get all volume path
volumes=$(awk "/${volume}/,/Name/" <<< "$(heketi-cli topology info)" | grep Path | awk '{print $2}')
# get all glusterfs pods
glusterfs_pods=$(oc get po -n default | grep glusterfs | awk '{print $1}')
while read p; do
oc rsh $p <<EOF
echo pod: $p
echo "$volumes" | while read vol; do
if [ -d "\${vol}" ]; then
final_cmd="$(echo ${cmd} | sed 's/{}/\${vol}/g')"
echo "command: \${final_cmd}"
\${final_cmd}
fi
done
EOF
done <<< "$glusterfs_pods"
fi
oc project $current_project
}
function exec_command() {
echo "execute command on pod: $1 cmd: $2"
if [ "$2" == "" ] || [ "$1" == "" ]; then
show_pv_usage
exit
fi
oc rsh $pod <<EOF
echo ${cmd}
${cmd}
EOF
oc project $current_project
}
function delete_pv() {
if [ "$delete" == "delete" ]; then
echo deleting pv $pv
volume=$(oc describe pv $pv | grep Path | awk '{print $2}' | sed s/vol_//g)
heketi-cli volume delete $volume
oc delete pv $pv
fi
}
function create_pv() {
echo creating pv size=$size
result=$(heketi-cli volume create --size=$size --persistent-volume --persistent-volume-endpoint=heketi-storage-endpoints | oc create -f - )
new_pv=$(echo $result | awk '{print $2}' | sed s/\"//g)
if [ "$cmd" == "" ]; then
cmd="chmod 777 {}"
fi
if [ "$uid" != "" ]; then
cmd="chown ${uid}:${uid} {}"
fi
exec_command_on_volume "${new_pv}" "${cmd}"
}
function create_pvc() {
echo "create pvc name: ${name} size: ${size}"
if [ "${mode}" == "" ]; then
mode="ReadWriteMany"
fi
oc create -f - <<_EOF_
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ${name}
spec:
accessModes:
- ${mode}
resources:
requests:
storage: ${size}Gi
status: {}
_EOF_
}
###############################################################################
# main entry
function main() {
#log "main loop ..."
if [ "$version" == "version" ]; then
show_version
exit
fi
if [ "$pv" != "" ]; then
if [ "$help" == "help" ]; then
show_pv_usage
exit
fi
network_option=" -l network=default "
if [ "$net" == "vpn" ] || [ "$vpn" == "vpn" ] ; then
network_option=" -l network=vpn "
fi
export HEKETI_CLI_SERVER="http://$(oc get route -n default $network_option | grep heketi | awk '{print $2}')"
echo connecting to heketi server $HEKETI_CLI_SERVER
if [ "$delete" == "delete" ] && [ "$pv" != "pv" ]; then
delete_pv
elif [ "$create" == "create" ] && [ $size > 0 ] ; then
create_pv
elif [ "$edit" == "edit" ] && [ "$cmd" != "" ] ; then
exec_command_on_volume "$pv" "$cmd"
fi
fi
if [ "$pvc" != "" ]; then
if [ "$create" == "create" ] && [ $size > 0 ] ; then
create_pvc
elif [ "$delete" == "delete" ]; then
echo "delete pvc, name: $name"
if [ "$pv" = "pv" ]; then
pv=$(oc get pvc | grep nexus-postgres-data | awk '{print $3}')
if [ "$pv" != "" ]; then
delete_pv
fi
fi
oc delete pvc $name
fi
fi
if [ "$exec" != "" ]; then
log "debug"
if [ "$help" == "help" ]; then
log "debug"
show_exec_usage
elif [ "$cmd" != "" ] && [ "$pod" != "" ] ; then
log "debug"
exec_command "$pod" "$cmd"
elif [ "$cmd" != "" ] && [ "$pods" != "" ] ; then
log "debug"
exec_command "$pods" "$cmd" "batch"
fi
fi
}
main
| true
|
3ae363fbe7fb7b5e95984cb0bf4772bf44b95c3e
|
Shell
|
jontg/docker-ulfhedinn
|
/images/tt-rss/start.sh
|
UTF-8
| 738
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
# Give MySQL a few seconds to start up
sleep 5
if mysql -u root -ptt-rss -e 'use tt_rss'; then
echo "World appears to be uninitialized; creating table and setting the password"
mysqladmin -u root password 'tt-rss'
mysqladmin -u root -ptt-rss create tt_rss
fi
if [ ! -z $START_FROM_BACKUP ]; then
if [ ! -z $DO_RESTORE ]; then
FILE=$(ls -t /backups | head -n1)
if [ -z $FILE ]; then
echo "No file found!"
else
echo "Restoring from /backups/$FILE"
mysql -u root -ptt-rss tt_rss < /backups/$FILE
fi
fi
fi
chmod -R 777 /var/www/tt-rss/{cache/images,cache/upload,cache/export,cache/js,feed-icons,lock}
echo DONE
| true
|
8ecd0f2d9eb2ae864a6183196ab13ecb861876c3
|
Shell
|
cj-mclaughlin/Darkflow_Setup
|
/CUDA/install_cuda
|
UTF-8
| 702
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
GR='\033[1;32m'
NC='\033[0m'
# Optionally remove all nvidia-related packages prior to install with -r option
if [ "$1" == -r ]
then
sudo apt-get --purge -y remove 'cuda*'
sudo apt-get --purge -y remove 'nvidia*'
fi
# Install cuda 10.0 toolkit
aws s3 cp s3://cuda-install-deps/cuda-repo-ubuntu1804-10-0-local-10.0.130-410.48_1.0-1_amd64.deb .
echo -e "${GR}Installing Cuda 10.0 toolkit with local deb file.${NC}"
sudo dpkg -i cuda-repo-ubuntu1804-10-0-local-10.0.130-410.48_1.0-1_amd64.deb
sudo apt-key add /var/cuda-repo-10-0-local-10.0.130-410.48/7fa2af80.pub
sudo apt-get update
sudo apt-get install cuda
sudo rm cuda-repo-ubuntu1804-10-0-local-10.0.130-410.48_1.0-1_amd64.deb
exit 0
| true
|
f5a0a1d7c704ec30e22b1ed5b972b53a005a1213
|
Shell
|
jason-work/system-config
|
/bin/Linux/after-check-out.sh
|
UTF-8
| 1,795
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
~/system-config/bin/after-co-ln-s.sh
. ~/system-config/.bashrc
touch ~/.cache/system-config/.where ~/.cache/system-config/.where.lock
if test $(whoami) = bhj; then
sudo perl -npe 's/^XKBVARIANT=.*/XKBVARIANT="dvp"/;' -i /etc/default/keyboard
cat <<EOF | sudo tee /etc/locale.gen
zh_CN.GBK GBK
zh_CN.UTF-8 UTF-8
en_US.UTF-8 UTF-8
EOF
sudo locale-gen
echo LANG=en_US.UTF-8 |sudo tee /etc/default/locale
fi
sudo touch /etc/console-setup/* || true
sudo touch /etc/default/* || true # setupcon may fail when the timestamp of
# these files are messed up by debian
# installation (time zone or ntp not available
# because we are not connected to the
# Internet).
sudo setupcon
sudo usermod -a -G fuse $USER
sudo perl -npe 's/ main$/ main contrib non-free/' -i /etc/apt/sources.list
. ~/system-config/bin/Linux/download-external.sh
download_external >/dev/null 2>&1 &
set -e
export PATH=~/system-config/bin/Linux/config:~/system/bin/Linux:~/system/bin:$PATH
#update the system
upd_system
sudo usermod -a -G dialout $(whoami) || true
sudo perl -npe 's/^#user_allow_other/user_allow_other/' -i /etc/fuse.conf
mkdir -p ~/src/github
emacs-install-packages || true
if test ! -d /usr/local/share/info; then
sudo mkdir -p /usr/local/share/info
fi
sudo ln -s ~/system-config/doc/bash.info.gz /usr/local/share/info/ -f
(
cd /usr/local/share/info/
sudo ginstall-info bash.info.gz /usr/local/share/info/dir
)
sudo postconf -e "home_mailbox = Maildir/$(whoami).localhost/"
sudo postconf -e "mailbox_command = "
sudo /etc/init.d/postfix restart
(mozroots --import --sync&)
#编译一些软件
do_compile
echo 'Full config OK'
| true
|
e318f1b6db1f6342d552852356a08c9e7ebeb7c1
|
Shell
|
petronny/aur3-mirror
|
/angryanimals/PKGBUILD
|
UTF-8
| 1,209
| 2.734375
| 3
|
[] |
no_license
|
# Maintainer: Simone Sclavi 'Ito' <darkhado@gmail.com>
pkgname=angryanimals
pkgver=27.03_01
pkgrel=1
arch=('any')
pkgdesc='Launch the animals into the building to destroy it! (Angry Birds-like game)'
url='http://www.free-games.net/play/angry-animals'
license=('custom')
depends=('flashplayer-standalone')
makedepends=('imagemagick')
source=("http://content.free-games.net/games/${pkgname}.swf"
'http://www.angrybirdspc.it/games/images/angry-animals.jpg')
md5sums=('a9523aaeedef77266375fa91606a5653'
'05ed55216a18d335ab19264ce32880d1')
package(){
# Install icon
convert angry-animals.jpg $pkgname.png
install -D -m644 $pkgname.png ${pkgdir}/usr/share/pixmaps/${pkgname}.png
# Install the .swf file
install -D -m644 ${pkgname}.swf ${pkgdir}/usr/share/${pkgname}/${pkgname}.swf
# Make desktop launcher and install it
DESKTOP=angryanimals.desktop
cat > $DESKTOP << EOF
[Desktop Entry]
Type=Application
Terminal=false
Exec=flashplayer /usr/share/${pkgname}/${pkgname}.swf
Name=Angry Animals
Comment=Launch the animals into the building to destroy it!
Icon=${pkgname}.png
Categories=Game;ActionGame;
EOF
install -D -m644 $DESKTOP ${pkgdir}/usr/share/applications/$DESKTOP
}
| true
|
a10ebdf5f52249005d7f9735bc6dc0879e8afae9
|
Shell
|
latifkabir/Computation_using_C
|
/st_io/st_io_prb.sh
|
UTF-8
| 458
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#
gcc -c -I/$HOME/include st_io_prb.c
if [ $? -ne 0 ]; then
echo "Errors compiling st_io_prb.c"
exit
fi
#
gcc st_io_prb.o /$HOME/libc/$ARCH/st_io.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading st_io_prb.o."
exit
fi
#
rm st_io_prb.o
#
mv a.out st_io_prb
./st_io_prb > st_io_prb_output.txt
if [ $? -ne 0 ]; then
echo "Errors running st_io_prb."
exit
fi
rm st_io_prb
#
echo "Program output written to st_io_prb_output.txt"
| true
|
a11fe2e20c844e1f5d55ec66ba95de25afacfc21
|
Shell
|
scerry91/project
|
/modules/clean_log_file/templates/cleaning_script.sh.epp
|
UTF-8
| 2,681
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
###########################################################
# Bash script to clean up the oldest logs from filesystem #
# How to use: #
# - Enter log path (wildcard) for example: #
# a) /var/logs/log-*.gzip #
# b) /var/log/httpd/error-20*.gzip #
# - Enter maximum percentage #
#---------------------------------------------------------#
# What it does: #
# - Check if log exists #
# - Make a loop of all logs #
# - Get the oldest log #
# - Remove the oldest log #
#---------------------------------------------------------#
# Tested on: #
# - Ubuntu buster/sid #
# - RedHat 7.7 #
###########################################################
# Log location with wildcard
# EXAMPLE:
# log_directory=/var/logs/log-*.gzip
log_directory=<%= $log_file['log_directory'] %>
# What is maximum percentage takes on the system
# EXAMPLE:
# max_percentage_taken=95
max_percentage_taken=<%= $log_file['max_percentage_taken'] %>
# Variable for the loop
log_directory_for_loop=($log_directory)
while true
do
# Check if logs or directory exsists
if [[ -f ${log_directory_for_loop[0]} ]]
then
# Make a loop of log files
for name in $log_directory
do
# Check how much disk space it is using
space_used=$(df -h $(dirname $name) | tail -1 | awk '{ print $5 }' | sed 's/%//g')
# Check if we are running out of disk space
if [ $space_used -gt $max_percentage_taken ]
then
# Find the oldest log
latest_log_file=$(find $name -type f -printf '%T+\t%p\n' | sort | head -n 1 | awk '{print $2}')
echo "Removing $latest_log_file"
# Remove the oldest log
rm -f $latest_log_file
echo "We are using $space_used% out of $max_percentage_taken%"
else
# Finish if disk space looks ok
echo "Disk space looks ok, currently we are using $space_used% out of $max_percentage_taken%"
break 2
fi
done
else
# Finish if here is no directory or logs to remove
echo "There is no logs in $log_directory to remove"
# TO DO
# SEND AN EMAIL
break 2
fi
done
| true
|
427af55e41b6079f6b05b25c49b7c600cd6913c6
|
Shell
|
selvaganeshwari/scripts
|
/bash/git_remoterepo.sh
|
UTF-8
| 912
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Copyright homoacademycus@github.com
# MIT Licnese
#
echo -n "set git protocol
1. ssh://user@host.com
2. git://host.com
3. https://host.com
4. file://
* github url
git@github.com:user/reponame.git
--> select number (1~4) :
"
read sel
case $sel in
1)
echo -n "enter user account :"
read user
echo -n "enter host domain name :"
read domain
remoteurl="ssh://$user@$domain"
;;
2)
echo -n "enter host domain name :"
read domain
remoteurl="git://$domain"
;;
3)
echo -n "enter host domain name :"
read domain
remoteurl="https://$domain"
;;
4)
echo -n "enter file path :"
read filepath
remoteurl="file://$filepath"
;;
*)
echo "you typed wrong input. please try again."
break;
esac
git remote add origin $remoteurl
git remote set-url origin $remoteurl
git remote -v
git add ./*
git commit -m 'set remote repo..'
git push --set-upstream origin master
| true
|
8eb2340c9baeb2539925aeb5331bf7acc1508d0d
|
Shell
|
tonyhoo/shellkoans
|
/section8.sh
|
UTF-8
| 1,183
| 4.53125
| 5
|
[] |
no_license
|
#!/bin/sh
EXERCISE_NUMBER=0
TOTAL_ALLOWED_EXERCISE=2
# Exercise 1:
# Write a shell script that displays one random number to the screen and also generates a syslog
# message with that random number. Use the "user" facility and the "info" facility for your
# messages.
# Hint: Use $RANDOM
function e1() {
local number=$RANDOM
local message="Writing $number to the log"
echo $message
logger -p user.notice $message
}
# Exercise 2:
# Modify the previous script so that it uses a logging function. Additionally tag each syslog
# message with "randomly" and include the process ID. Generate 3 random numbers.
function e2() {
local number=$RANDOM
local message="Writing $number to the log"
echo $message
logger -p user.notice -t "randomly" -i $message
}
function run() {
if [ $# -ne 1 ]
then
echo "No exercise selected!"
read -p "Please pass in the exercise number[1-${TOTAL_ALLOWED_EXERCISE}]: " EXERCISE_NUMBER
else
EXERCISE_NUMBER=$1
fi
run_exercise $EXERCISE_NUMBER
}
function run_exercise() {
if [ "$1" -le "$TOTAL_ALLOWED_EXERCISE" ]
then
FUNCTION_NAME="e$1"
$FUNCTION_NAME
else
echo "Wrong exercise number passed: "$1
fi
}
run $@
| true
|
16b4834c4619ead7b9597edac68319a955821d3d
|
Shell
|
mrsabath/mytechnotes
|
/k8s/code/xqueuejob_controller_deployment_cluster_resource_update.sh
|
UTF-8
| 10,031
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# Licensed Materials - Property of IBM
# (c) Copyright IBM Corporation 2018, 2019. All Rights Reserved.
# Note to U.S. Government Users Restricted Rights:
# Use, duplication or disclosure restricted by GSA ADP Schedule
# Contract with IBM Corp.
###############
# configuration
###############
deployment_name=$1
config=$2
namespace=$3
certPath=$4
frequency=$5
k8s_api_ip_port=$6
######
# vars
######
available_cpu="0"
available_memory="0"
echo -e "
------ Introduction phase for the user-----
this script takes 6 input parameters, e.g.:
Usage: ${0##*/} ~/.kube/conf default 5
- parameter 1, XQueueJob deployment name, the default XQueueJob
- parameter 2, is the KUBECONFIG file path: ~/.kube/conf
- parameter 3, is the namespace where the CRD instances are created, the default namespace is `default`.
- parameter 4, is the path to the folder of the TLS certs to authenticate with k8s api server
- parameter 5, is the frequency at which we check over for CRD created, the default is 10 seconds
- parameter 6, is the ip:port of the k8s api server, the default is 192.168.10.2:6443
-------------------------------------------
"
sleep 1
#################################################
# initialize
#################################################
initialize(){
echo -e "
------ initializing phase-----
setting the kube config and frequency values
------------------------------
"
if [ "$2" = "" ]; then
echo "no KUBCONFIG is provided, using default '$HOME/.kube/conf'"
config="~/.kube/conf"
else
echo "using the following config: $config"
fi
if [ "$4" = "" ]; then
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
echo "no cert path, using default '$SCRIPTPATH/certs', this may not be correct!!!"
certPath="$SCRIPTPATH/certs"
else
echo "using the following certPath: $certPath"
fi
if [ "$3" = "" ]; then
echo "no namespace is provided, using 'default'"
namespace="default"
else
echo "using the following config: $namespace"
fi
if [ "$5" = "" ]; then
frequency=10
echo "using the default frequency: $frequency seconds"
else
echo "using the following frequency: $frequency seconds"
fi
if [ "$6" = "" ]; then
k8s_api_ip_port="192.168.10.2:6443"
echo "WARNING: the api-server ip:port is not provided using a default that may not be accurate: $k8s_api_ip_port"
else
echo "using the following api-server ip:port: $k8s_api_ip_port"
fi
if [[ "$1" = "" ]]
then
deployment_name=XQueueJob
echo "using the default deployment name: $deployment_name"
else
echo "using the following deployment name: $deployment_name"
fi
export KUBECONFIG=$config
deployment_exists=$(kubectl get deployment -n $namespace $deployment_name)
if [[ "$deployment_exists" != "" ]]
then
kubectl label --overwrite deployment $deployment_name -n $namespace available_cpu="0"
kubectl label --overwrite deployment $deployment_name -n $namespace available_memory="0"
#debug
deployment_label_cpu=$(kubectl get deployment -n $namespace $deployment_name -o jsonpath="{.metadata.labels.available_cpu}")
deployment_label_memory=$(kubectl get deployment -n $namespace $deployment_name -o jsonpath="{.metadata.labels.available_memory}")
echo "Cluster $deployment_name has $deployment_label_cpu CPU and $deployment_label_memory MEMORY available."
else
echo "WARNING: The deployment named $deployment_name must be running."
fi
echo -e "
------ initializing completed-----
"
}
#################################################
# get available resources
#################################################
set_available_resources(){
echo -e "
------ get available resources -----
Calculate available resources
"
available_cpu="0"
available_memory="0"
# Stanzas
local allocatable_flag=false
local allocated_flag=false
# Allocatable stanza resources
local allocatable_cpu_raw=""
local allocatable_memory_raw=""
local allocatable_cpu_flag=false
local allocatable_memory_flag=false
# Allocatable stanza resources
local allocated_cpu_raw=""
local allocated_memory_raw=""
local allocated_cpu_flag=false
local allocated_memory_flag=false
local IFS_orig="${IFS}"
IFS=' :'
nodes=$(kubectl get nodes --output=custom-columns=NAME:.metadata.name --no-headers=true | tr '\n' ' ')
for node in $nodes
do
# Allocatable stanza resources
allocatable_cpu_raw=""
allocatable_memory_raw=""
allocatable_cpu_flag=false
allocatable_memory_flag=false
# Allocatable stanza resources
allocated_cpu_raw=""
allocated_memory_raw=""
allocated_cpu_flag=false
allocated_memory_flag=false
describe_out_raw=$(kubectl describe node $node | grep -A 5 "Allocated\|Allocatable:")
echo "${describe_out_raw}"
describe_out=$(echo ${describe_out_raw} | tr '\n' ' ')
for wd in $describe_out
do
#echo "word=${wd}."
# Get allocated_memory_raw (used)
if [[ "$allocated_memory_flag" = "true" ]]
then
#echo "allocated memory flag true if found."
allocated_memory_raw="${wd}"
allocated_memory_flag=false
continue
fi
# Get allocated_cpu_raw (used)
if [[ "$allocated_cpu_flag" = "true" ]]
then
#echo "allocated cpu flag true if found."
allocated_cpu_raw="${wd}"
allocated_cpu_flag=false
continue
fi
# Get allocatable_memory_raw (capacity)
if [[ "$allocatable_memory_flag" = "true" ]]
then
echo "allocatable memory flag true if found."
allocatable_memory_raw="${wd}"
allocatable_memory_flag=false
continue
fi
# Get allocatable_cpu_raw (capacity)
if [[ "$allocatable_cpu_flag" = "true" ]]
then
#echo "allocatable cpu flag true if found."
allocatable_cpu_raw="${wd}"
allocatable_cpu_flag=false
continue
fi
# Find usage stanza
if [[ "$allocated_flag" = "false" ]]
then
if [[ "${wd}" = "Allocated" ]]
then
allocatable_flag=false
allocated_flag=true
continue
fi
fi
# Find capacity stanza
if [[ "$allocatable_flag" = "false" ]]
then
if [[ "$wd" = "Allocatable" ]]
then
allocatable_flag=true
allocated_flag=false
continue
fi
fi
# Found resource flag. Determine if flag is for allocated or allocatable.
if [[ "$wd" = "memory" ]]
then
if [[ "$allocated_flag" = "true" ]] && [[ "$allocated_memory_raw" = "" ]]
then
echo "setting allocated memory flag true."
allocated_memory_flag=true
elif [[ "$allocatable_flag" = "true" ]] && [[ "$allocatable_memory_raw" = "" ]]
then
#echo "setting allocatable memory flag true."
allocatable_memory_flag=true
fi
continue
fi
# Found resource flag. Determine if flag is for allocated or allocatable.
if [[ "$wd" = "cpu" ]]
then
if [[ "$allocated_flag" = "true" ]] && [[ "$allocated_cpu_raw" = "" ]]
then
#echo "setting allocated cpu flag true."
allocated_cpu_flag=true
elif [[ "$allocatable_flag" = "true" ]] && [[ "$allocatable_cpu_raw" = "" ]]
then
#echo "setting allocatable cpu flag true."
allocatable_cpu_flag=true
fi
continue
fi
done
# Remove units from values and normalize
allocatable_cpu=$(($allocatable_cpu_raw * 1024))
allocatable_memory=${allocatable_memory_raw%"Ki"}
m_units=$(echo $allocated_cpu_raw | grep m)
if [[ "$m_units" != "" ]]
then
allocated_cpu=${allocated_cpu_raw%"m"}
else
allocated_cpu=$(($allocated_cpu_raw * 1024))
fi
meg_units=$(echo $allocated_memory_raw | grep Mi)
if [[ "$meg_units" != "" ]]
then
allocated_memory=${allocated_memory_raw%"Mi"}
allocated_memory=$(($allocated_memory * 1024))
else
gig_units=$(echo $allocated_memory_raw | grep Gi)
if [[ "$gig_units" != "" ]]
then
allocated_memory=${allocated_memory_raw%"Gi"}
allocated_memory=$(($allocated_memory * 1024000))
else
allocated_memory=$(($allocated_memory_raw * 1))
fi
fi
# Calculate free space per node
node_available_cpu=$(($allocatable_cpu - $allocated_cpu))
node_available_memory=$(($allocatable_memory - $allocated_memory))
# debug
echo "allocatable cpu for node ${node} = ${allocatable_cpu}."
echo "allocatable memory for node ${node} = ${allocatable_memory}."
echo "allocated cpu for node ${node} = ${allocated_cpu}."
echo "allocated memory for node ${node} = ${allocated_memory}."
# Sum free space per cluster
available_cpu=$(($available_cpu + $node_available_cpu))
available_memory=$(($available_memory + $node_available_memory))
echo "suming available cpu for cluster = ${available_cpu}."
echo "suming available memory for cluster = ${available_memory}."
done
# Calculate free space and update cluster labels
kubectl label --overwrite deployment $deployment_name -n $namespace available_cpu=${available_cpu}
kubectl label --overwrite deployment $deployment_name -n $namespace available_memory=${available_memory}
# debug
deployment_label_cpu=$(kubectl get deployment -n $namespace $deployment_name -o jsonpath="{.metadata.labels.available_cpu}")
deployment_label_memory=$(kubectl get deployment -n $namespace $deployment_name -o jsonpath="{.metadata.labels.available_memory}")
echo "Cluster $deployment_name has $deployment_label_cpu CPU and $deployment_label_memory MEMORY available."
IFS="${IFS_orig}"
echo -e "
------ get available resources completed -----
"
}
#################################################
# ctrl_c:
#################################################
function ctrl_c() {
echo -e "
____________________________________________
| stopping the cluster calculator, good bye! |
--------------------------------------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
|| ||
"
exit 1
}
#################################################
# Main
#################################################
trap ctrl_c INT
initialize $1 $2 $3 $4 $5
for (( ; ; ))
do
set_available_resources
sleep 10
done
| true
|
2e34f471fdf46c27cd3f7da928f3b78372efed32
|
Shell
|
astralhpi/dotfiles
|
/zsh/zshrc
|
UTF-8
| 166
| 2.671875
| 3
|
[] |
no_license
|
if [[ ! -s "${ZDOTDIR:-$HOME}/.zshrc_local" ]]; then
touch "${ZDOTDIR:-$HOME}/.zshrc_local"
fi
source "${ZDOTDIR:-$HOME}/.zshrc_local"
source $HOME/.zshrc_symlink
| true
|
6bad2e6e3014e3153c2294587c090e04fa967b48
|
Shell
|
activescott/tiddlywiki-docker
|
/stop.sh
|
UTF-8
| 201
| 2.5625
| 3
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env sh
MYNAME=`basename "$0"`
MYFULLNAME="$PWD/$MYNAME"
MYDIR=`dirname "$MYFULLNAME"`
source ./.env
docker container stop tw
# always return 0 because we don't care if this succeeds
exit 0
| true
|
39f748801a299d010900d1688c1313a77e478016
|
Shell
|
particleman314/ShellLibrary
|
/test/filemgt/make_lockfile.sh
|
UTF-8
| 946
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
tempdir="${SUBSYSTEM_TEMPORARY_DIR}"
answer=$( make_lockfile -s -r --directory "${tempdir}" )
assert_success $?
assert_not_empty "${answer}"
assert_equals "${tempdir}/.lockfile" "${answer}"
schedule_for_demolition "${tempdir}/.lockfile"
tmplockdir="${tempdir}/LOCKFILES"
make_lockfile --directory "${tmplockdir}"
assert_success $?
assert_is_file "${tmplockdir}/.lockfile"
answer=$( make_lockfile -r --directory "${tmplockdir}" --lock-file '.mylock' )
assert_success $?
assert_is_file "${tmplockdir}/.mylock"
assert_equals "${tmplockdir}/.mylock" "${answer}"
#answer=$( make_lockfile -r --directory "${tmplockdir}" --lock-file '.mylock2' --msg 'This is a sample message' --permissions 000 )
#assert_success $?
#[ $( get_user_id ) == 'root' ] && assert_is_file "${tmplockdir}/.mylock2"
#[ -n "${CANOPUS_DETAIL}" ] && [ "${CANOPUS_DETAIL}" -gt 0 ] && \cat "${tmplockdir}/.mylock2"
schedule_for_demolition "${tmplockdir}"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.