blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8307678445bd877a82ce274367b6044ea2b3df70
|
Shell
|
Python3pkg/GenomeKey
|
/pilot-study/generate_tiny_bams.sh
|
UTF-8
| 1,021
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# generate an arbitrary number BAM files from an input BAM
# with different RG (read group) and SM (sample tags)
# Usage:
# generate_tiny_bams.sh <num-bams> <source.bam>
END=$1
INPUT_BAM=$2
OUTPUT_BAM_BASENAME=$(basename ${INPUT_BAM})
OUTPUT_BAM_PREFIX=${OUTPUT_BAM_BASENAME%.bam}
# get unique "SM" (sample) tag to rewrite
ID=$(samtools view -H ${INPUT_BAM} |grep "SM:"|cut -f3|cut -d':' -f2|sort|uniq)
for i in $(seq 1 $END)
do
# zero-pad output
printf -v j "%02d" $i
#cmd="samtools view -H ${INPUT_BAM} | sed \"s/${ID}/${j}/g\" | samtools reheader - ${INPUT_BAM} > tinytestbam-${j}.bam"
OUTPUT_BAM=tinytestbam-${j}.bam
cmd="java -jar /groups/cbi/WGA/tools/picard-tools-1.99/AddOrReplaceReadGroups.jar I=${INPUT_BAM} O=${OUTPUT_BAM} SORT_ORDER=coordinate RGPU=barcode RGID=${j} RGLB=${j} RGPL=Illumina RGSM=${j} CREATE_INDEX=True VALIDATION_STRINGENCY=LENIENT"
echo $cmd
eval $cmd
cmd="/groups/cbi/WGA/tools/samtools index ${OUTPUT_BAM}"
echo $cmd
eval $cmd
done
| true
|
5b057a4561c8c408d0cd326bac9b2abd8a47e1dd
|
Shell
|
brwnj/ggd-recipes
|
/recipes/genomics/Homo_sapiens/hg38/hg38-reference-genome-ucsc-v1/recipe.sh
|
UTF-8
| 358
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -eo pipefail -o nounset
## Get the latest hg19 reference genome for USCS. (Patch 13)
wget --quiet -O - http://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/p12/hg38.p12.fa.gz \
| gzip -dc \
| bgzip -c > hg38-reference-genome-ucsc-v1.fa.gz
## Index the fasta file using samtools
samtools faidx hg38-reference-genome-ucsc-v1.fa.gz
| true
|
e71a8a66291acc32a0a042f4c30f467ee9bb47d1
|
Shell
|
dmeekabc/tagaProtoAnalysis
|
/tagaScripts/tagaScriptsUtils/collect.sh
|
UTF-8
| 1,451
| 3.4375
| 3
|
[] |
no_license
|
#####################################################
# Copyright 2016 IBOA Corp
# All Rights Reserved
#####################################################
TAGA_DIR=~/scripts/taga
TAGA_CONFIG_DIR=$TAGA_DIR/tagaConfig
source $TAGA_CONFIG_DIR/config
outputDir=$1
for target in $targetList
do
echo
echo processing, collecting files from $target start:`date | cut -c12-20`
# if we are in local mode and target == MYIP , do not use ssh or scp
if cat $TAGA_LOCAL_MODE_FLAG_FILE 2>/dev/null | grep 1 >/dev/null ; then
if [ $target == $MYIP ]; then
echo A: processing, collecting files from $target start:`date | cut -c12-20`
cp /tmp/$TEST_DESCRIPTION* $outputDir
rm /tmp/$TEST_DESCRIPTION* 2>/dev/null
else
echo B: processing, collecting files from $target start:`date | cut -c12-20`
scp $MYLOGIN_ID@$target:/tmp/$TEST_DESCRIPTION* $outputDir
ssh -l $MYLOGIN_ID $target rm /tmp/$TEST_DESCRIPTION* 2>/dev/null
fi
# normal mode
else
echo C: processing, collecting files from $target start:`date | cut -c12-20`
scp $MYLOGIN_ID@$target:/tmp/$TEST_DESCRIPTION* $outputDir
ssh -l $MYLOGIN_ID $target rm /tmp/$TEST_DESCRIPTION* 2>/dev/null
fi
echo D: processing, collecting files from $target stop :`date | cut -c12-20`
done
echo
echo `basename $0` : Total File Count: `ls $outputDir | wc -l` Total Line Count: `cat $outputDir/* | wc -l`
echo
| true
|
acb1832b102cd23737117b16937a3ceccf3487e4
|
Shell
|
PFigs/wm-config
|
/modules/main.sh
|
UTF-8
| 7,627
| 3.4375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 Wirepas Ltd
# wm_config_load_settings
#
# Sources the default and custom file to retrieve the parameter values
function wm_config_load_settings
{
if [[ -f "${WM_CFG_SETTINGS_CUSTOM}" ]]
then
web_notify "read settings from ${WM_CFG_SETTINGS_CUSTOM}"
set -o allexport
host_ensure_linux_lf "${WM_CFG_SETTINGS_CUSTOM}"
source "${WM_CFG_SETTINGS_CUSTOM}"
set +o allexport
else
web_notify "could not find ${WM_CFG_SETTINGS_CUSTOM}"
mkdir -p "${WM_CFG_SETTINGS_PATH}"
cp "${WM_CFG_INSTALL_PATH}/environment/custom.env" "${WM_CFG_SETTINGS_CUSTOM}"
fi
if [[ -f "${WM_CFG_SETTINGS_DEFAULT}" ]]
then
web_notify "read settings from ${WM_CFG_SETTINGS_DEFAULT}"
set -o allexport
source "${WM_CFG_SETTINGS_DEFAULT}"
set +o allexport
else
web_notify "could not find ${WM_CFG_SETTINGS_DEFAULT}"
exit 1
fi
}
# wm_config_feature_selection
#
# Handles the feature selection customization
function wm_config_feature_selection
{
web_notify "read settings from ${WM_CFG_SETTINGS_DEFAULT}"
set -o allexport
source "${WM_CFG_INSTALL_PATH}/environment/feature.env"
set +o allexport
if [[ "${WM_CFG_FRAMEWORK_DEBUG}" == "true" ]]
then
echo "WM-CONFIG FEATURE SELECTION:"
for key in $(env | grep WM_HOST_)
do
echo "$key" | awk -F"=" '{print $1 " enabled? " $2 ;}'
done
fi
}
# wm_config_session_init
#
# Meant to initiate an execution round of the wm_config
function wm_config_session_init
{
if [[ -z "${WM_CFG_STARTUP_DELAY}" ]]
then
web_notify "delaying startup for ${WM_CFG_STARTUP_DELAY}"
sleep "${WM_CFG_STARTUP_DELAY}"
fi
if [[ -d "${WM_CFG_PYTHON_VIRTUAL_ENV}" ]]
then
source "${WM_CFG_PYTHON_VIRTUAL_ENV}/bin/activate" || true
web_notify "python virtual environment: $(command -v python)"
else
web_notify "using system's python environment: $(command -v python) (not found: ${WM_CFG_PYTHON_VIRTUAL_ENV})"
fi
rm -fr "${WM_CFG_SESSION_STORAGE_PATH}" || true
mkdir -p "${WM_CFG_SESSION_STORAGE_PATH}" || true
host_clock_management
host_systemd_management
host_keyboard_management
host_ip_management
host_dependency_management
host_tty_management
host_ssh_daemon_management
host_filesystem_management
host_user_management
host_hostname_management
host_wifi_management
host_support_management
host_dbus_management
host_docker_daemon_management
# framework updates and device enumeration
wm_config_update
wm_config_bitrate_configuration
wm_config_device_enumeration
}
# wm_config_session_end
#
# Meant to perform any operation prior the execution end
function wm_config_session_end
{
# exits the python venv
deactivate || true
}
# wm_config_update
#
# update routine, which pull a docker container
# with the next release files
function wm_config_update
{
if [[ "${WM_CFG_FRAMEWORK_UPDATE}" == "true" ]]
then
docker_fetch_settings
web_notify "I am updating the base program and scheduling a job restart"
sudo cp --no-preserve=mode,ownership \
"${WM_CFG_INSTALL_PATH}/bin/wm-config.sh" "${WM_CFG_ENTRYPOINT}"
sudo chmod +x "${WM_CFG_ENTRYPOINT}"
sudo chown root:root "${WM_CFG_ENTRYPOINT}"
wm_config_set_entry "WM_CFG_FRAMEWORK_UPDATE" "false"
if [[ "${WM_CFG_HOST_IS_RPI}" == "true" ]]
then
host_reboot 0
exit 0
fi
else
web_notify "skipping update pull"
fi
}
# wirepas_add_entry
#
# sets the input argument to false
function wm_config_set_entry
{
local _ENTRY=${1}
local _VALUE=${2:-}
web_notify "set setting entry: ${_ENTRY}=${_VALUE}"
sed -i "/${_ENTRY}/d" "${WM_CFG_SETTINGS_CUSTOM}"
# force it to false in custom
cp "${WM_CFG_SETTINGS_CUSTOM}" "${WM_CFG_SESSION_STORAGE_PATH}/.custom.tmp"
echo "${_ENTRY}=${_VALUE}" >> "${WM_CFG_SESSION_STORAGE_PATH}/.custom.tmp"
cp --no-preserve=mode,ownership "${WM_CFG_SESSION_STORAGE_PATH}/.custom.tmp" "${WM_CFG_SETTINGS_CUSTOM}"
rm "${WM_CFG_SESSION_STORAGE_PATH}/.custom.tmp"
}
# wm_config_template_copy
#
# copies and fills in the template by default the target
# file is replace. Pass in an optional operator as a 3rd argument
function wm_config_template_copy
{
local _TEMPLATE_NAME
local _OUTPUT_PATH
local _OPERATOR
local _TEMPLATE
# input name is basename
_TEMPLATE_NAME=${1:-"defaults"}
_OUTPUT_PATH=${2:-"template.output"}
_OPERATOR=${3:-">"}
# if set, changes the output filename
mkdir -p "${WM_CFG_TEMPLATE_PATH}"
_TEMPLATE=${WM_CFG_TEMPLATE_PATH}/${_TEMPLATE_NAME}.template
web_notify "generating ${_OUTPUT_PATH} based on ${_TEMPLATE}"
rm -f "${_OUTPUT_PATH}.tmp"
( echo "cat <<EOF ${_OPERATOR} ${_OUTPUT_PATH}" && \
cat "${_TEMPLATE}" && \
echo "EOF" \
) > "${_OUTPUT_PATH}.tmp"
. "${_OUTPUT_PATH}.tmp"
rm "${_OUTPUT_PATH}.tmp"
}
# wm_config_bitrate_configuration
#
# creates a bitrate list to be index by the device id.
#
function wm_config_bitrate_configuration
{
export WM_GW_SINK_BITRATE_CONFIGURATION
# create default bitrate array
if [[ -z "${WM_GW_SINK_BITRATE_CONFIGURATION}" ]]
then
WM_GW_SINK_BITRATE_CONFIGURATION=( )
for _ in $(seq 0 1 10)
do
WM_GW_SINK_BITRATE_CONFIGURATION+=( "125000" )
done
else
WM_GW_SINK_BITRATE_CONFIGURATION=($(echo "${WM_GW_SINK_BITRATE_CONFIGURATION}" | tr " " "\\n"))
fi
}
# wm_config_device_enumeration
#
# creates a list of tty ports. If they are blacklisted
# the ports wont be added to the list.
#
function wm_config_device_enumeration
{
export WM_GW_SINK_LIST
WM_GW_SINK_LIST=( )
local _SINK_ENUMERATION_PATTERN
local _SINK_ENUMERATION_IGNORE
local _DEVICE
local _BLACKLISTED
# multi sink support
if [[ "${WM_GW_SINK_ENUMERATION}" == "true" ]]
then
_SINK_ENUMERATION_PATTERN=($(echo "${WM_GW_SINK_PORT_RULE}" | tr " " "\\n"))
for _DEVICE in "${_SINK_ENUMERATION_PATTERN[@]}"
do
if [[ -z "${_DEVICE}" || "${_DEVICE}" == *"*"* ]]
then
web_notify "Could not find any device under ${_DEVICE}"
continue
fi
if [[ ! -z "${WM_GW_SINK_BLACKLIST}" ]]
then
_SINK_ENUMERATION_IGNORE=($(echo "${WM_GW_SINK_BLACKLIST}" | tr " " "\\n"))
for _BLACKLISTED in "${_SINK_ENUMERATION_IGNORE[@]}"
do
if [[ "${_BLACKLISTED}" == "${_DEVICE}" ]]
then
web_notify "Device is blacklisted, skipping it (list=${_BLACKLISTED} == device=${_DEVICE})"
break
fi
done
continue
fi
WM_GW_SINK_LIST+=("${_DEVICE}")
done
else
web_notify "skipping device enumeration - setting sink port to ${WM_GW_SINK_UART_PORT}"
WM_GW_SINK_LIST=( "${WM_GW_SINK_UART_PORT}" )
fi
}
# call wm_config_main
function wm_config_main
{
wm_config_load_settings
wm_config_feature_selection
web_notify ":wirepas:-config ${WM_CFG_VERSION}/${WM_CFG_HOST_ARCH}"
web_notify "ip addresses: $(hostname -I)"
wm_config_parser "$@"
wm_config_session_init
wm_config_session_main
wm_config_session_end
exit "${?}"
}
| true
|
3d8082616f3ffd189eb04633f69593f91e9a8387
|
Shell
|
damoon/ceph-docker
|
/travis-builds/purge_cluster.sh
|
UTF-8
| 144
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# FUNCTIONS
function purge_ceph {
docker stop $(docker ps -q)
rm -rf /var/lib/ceph/*
rm -rf /etc/ceph
}
# MAIN
purge_ceph
| true
|
ccff2765cf1ef8676cc080fcd32e59ae51902c92
|
Shell
|
weikunzz/test_cases
|
/libvirt-ci/libvirt_ci/data/jobs/scripts/report_result_to_jira.sh
|
UTF-8
| 782
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
#####################################
# Report failed test result to JIRA #
#####################################
set -xe
if [ $ROOT_BUILD_CAUSE = MANUALTRIGGER ] && [ $FORCE_REPORT != true ] || [ {enable-report} != true ]
then
echo "Skipping reporting to Jira"
else
if [ -f result_rhev.xml ]
then
ci report-to-jira --junit result_rhev.xml \
--fail-priority {fail-priority} --skip-priority {skip-priority} \
--fail-threshold {failure-thld} || echo "Report to JIRA failed, failure ignored"
fi
if [ -f result_rhel.xml ]
then
ci report-to-jira --junit result_rhel.xml \
--fail-priority {fail-priority} --skip-priority {skip-priority} || echo "Report to JIRA failed, failure ignored"
fi
fi
| true
|
d5e7d96113422db9b7a481a6828d2f71c37b288a
|
Shell
|
CraigJPerry/dotfiles
|
/dotfiles/bash_logout
|
UTF-8
| 225
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash # Never actually run directly, helps syntax highlighter though
# .bash_logout
# Clear terminal on logout (unless this is a nested shell)
if [[ "0$SHLVL" -le 1 ]] && [[ "$TERM" != xterm* ]]; then
clear
fi
| true
|
d17beebc4aba614c3f6c9aeb1e1e1979ffb03f30
|
Shell
|
ausov/helm-gs
|
/bin/main.sh
|
UTF-8
| 693
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
#
#? helm gs - Google Storage plugin for Helm
##? Usage: helm gs <command>
##?
##? Commands:
##? publish - Publish charts repository to Google Storage
#
set -euo pipefail
script_version=$(grep "^#?" "$0" | cut -c 4-)
script_help=$(grep "^##?" "$0" | cut -c 5-)
function fail() {
local msg=${@}
echo "ERROR: $msg"
exit 1
}
function help() {
echo "$script_version"
echo
echo "$script_help"
echo
}
trap 'fail "caught signal!' HUP KILL QUIT
case "${1:-}" in
publish)
shift
"$HELM_PLUGIN_DIR/bin/publish.sh" "$@"
;;
--help|'')
help
;;
*)
echo "Error: unknown command \"$1\" for \"helm gs\""
echo "Run 'helm gs --help' for usage."
;;
esac
exit 0
| true
|
83d93dc8f100c97b9c1ecf88739a3c9895f2f329
|
Shell
|
va2ron1/vector-datasource
|
/scripts/docker_boostrap.sh
|
UTF-8
| 1,324
| 2.96875
| 3
|
[] |
permissive
|
set -e
set -x
[ -z "$POSTGRES_PASSWORD" ] && echo "Need to set POSTGRES_PASSWORD" && exit 1;
export PGPASSWORD="$POSTGRES_PASSWORD"
METRO_EXTRACT_NAME="${METRO_EXTRACT_NAME:-new-york_new-york}"
psql -h "${POSTGRES_HOST:-postgres}" \
-p "${POSTGRES_PORT:-5432}" \
-U "${POSTGRES_USER:-osm}" \
-d "${POSTGRES_DB:-osm}" \
-c "create extension if not exists postgis; create extension if not exists hstore;"
/usr/bin/wget https://s3.amazonaws.com/metro-extracts.mapzen.com/${METRO_EXTRACT_NAME}.osm.pbf
osm2pgsql --slim \
--cache 1024 \
--style osm2pgsql.style \
--hstore-all \
${METRO_EXTRACT_NAME}.osm.pbf \
-H "${POSTGRES_HOST:-postgres}" \
-P "${POSTGRES_PORT:-5432}" \
-U "${POSTGRES_USER:-osm}" \
-d "${POSTGRES_DB:-osm}"
rm ${METRO_EXTRACT_NAME}.osm.pbf
cd data
/usr/bin/python2.7 bootstrap.py
/usr/bin/make -f Makefile-import-data
./import-shapefiles.sh | \
psql -h "${POSTGRES_HOST:-postgres}" \
-p "${POSTGRES_PORT:-5432}" \
-U "${POSTGRES_USER:-osm}" \
-d "${POSTGRES_DB:-osm}"
./perform-sql-updates.sh \
-h "${POSTGRES_HOST:-postgres}" \
-p "${POSTGRES_PORT:-5432}" \
-U "${POSTGRES_USER:-osm}" \
-d "${POSTGRES_DB:-osm}"
/usr/bin/make -f Makefile-import-data clean
cd ..
| true
|
e04a193a8f6cecef0efa7589b29a03a628a21b33
|
Shell
|
bcntec-learning/bigdata-with-hadoop
|
/hadoop/env/scripts/init.sh
|
UTF-8
| 283
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ -f env.sh ]; then
. env.sh
fi
if [[ -z $HADOOP_VERSION ]]; then
echo "HADOOP_VERSION required"
exit
fi
if [[ -z HADOOP_HOME ]]; then
echo "HADOOP_HOME required"
exit
fi
hdfs namenode -format
start-dfs.sh
sleep 10
start-yarn.sh
| true
|
968f239671f857c2766d7f0c78fe0024a3b0dabc
|
Shell
|
AtticFantastic/emqttd-docker
|
/src/docker-entrypoint.sh
|
UTF-8
| 771
| 3.625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
EMQTTD_DIR=/opt/emqttd
_copy_config() {
CONFIG_VOLUME=/etc/emqttd/config
if [ "$(ls -A $CONFIG_VOLUME)" ]; then
cp -ur $CONFIG_VOLUME/* $EMQTTD_DIR/etc
fi
}
_configure_plugins() {
# copy autoload list if provided
PLUGINS_LIST=$EMQTTD_DIR/etc/plugins.load
if [ -f $PLUGINS_LIST ]; then
cp $PLUGINS_LIST $EMQTTD_DIR/data/loaded_plugins
fi
# copy custom plugin configuration if provided
PLUGINS_VOLUME=/etc/emqttd/plugins
if [ "$(ls -A $PLUGINS_VOLUME)" ]; then
cp -ur $PLUGINS_VOLUME/* $EMQTTD_DIR/etc/plugins
fi
}
if [ "$1" = 'emqttd' -a "$(id -u)" = '0' ]; then
_copy_config
_configure_plugins
chown -R emqttd:emqttd .
exec gosu emqttd "$0" "$@"
fi
exec "$@"
| true
|
ada9b985b7087144c1d9d344a8ad910635e9c468
|
Shell
|
vimm0/auto-script
|
/tech/linux/commandline/zsh/.zshrc
|
UTF-8
| 774
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
# Custom zsh command.
# Below .zshrc paste the following script.
# Temporary Python Virtual Environment
SRC_DIRECTORY="$HOME/envs"
RED='\033[0;31m'
env(){
if [ $1 ]
then
virtualenv $SRC_DIRECTORY/$1
source $SRC_DIRECTORY/$1/bin/activate;
else
echo "${RED}Warning: Please, provide new directory name as parameter!"
fi
}
go(){
if [ $1 ]
then
source $SRC_DIRECTORY/$1/bin/activate;
else
echo "${RED}Warning: Please, provide destination name for source!"
fi
}
nogo(){
deactivate
cd ~
}
# Django Aliases
alias d='django-admin.py'
alias dsa='django-admin.py startapp'
alias dsp='django-admin.py startproject'
alias runserver='python manage.py runserver'
alias makemigrations='python manage.py makemigrations'
alias migrate='python manage.py migrate'
| true
|
603af2cfad39bedd1c0de964b4bf1639ad658341
|
Shell
|
dbburgess/dotfiles
|
/scripts/install.sh
|
UTF-8
| 245
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Run all dotfiles installers.
set -e
cd "$(dirname $0)"/..
# Find other installers and run them iteratively.
find . -name install.sh | grep -v "scripts\|homebrew" | while read installer ; do sh -c "${installer}" ; done
| true
|
9a09c5256afe93e5398ec75e8e179b9c96a0664b
|
Shell
|
damiendart/toolbox
|
/.profile
|
UTF-8
| 1,276
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Damien Dart's cross-shell configuration file for login shells.
#
# This file was written by Damien Dart, <damiendart@pobox.com>. This is
# free and unencumbered software released into the public domain. For
# more information, please refer to the accompanying "UNLICENCE file.
# shellcheck disable=SC2034
IPS="\n"
export EDITOR='vim'
export GOPATH="$HOME/.go"
export NPM_CONFIG_PREFIX="$HOME/.npm"
export NOTES_ROOT="$HOME/Syncthing/Notes"
export SNIPPET_PATH="$HOME/Shed/snippets/snippets:$NOTES_ROOT/templates"
# shellcheck disable=SC2155
export TOOLBOX_ROOT="$(dirname "$(readlink "$HOME/.profile")")"
while read -r ITEM; do
if [ -d "$ITEM" ]; then
# The following is a POSIX-compatible method of preventing duplicate
# entries in the PATH environmental variable; it is based on a
# snippet from <https://unix.stackexchange.com/a/32054>.
case ":$PATH:" in
*:$ITEM:*) ;;
*) export PATH="$PATH:$ITEM" ;;
esac
fi
done <<PATHS
$GOPATH/bin
$HOME/.cargo/bin
$HOME/.local/bin
$HOME/.local/share/JetBrains/Toolbox/scripts
$HOME/Library/Python/3.7/bin
$NPM_CONFIG_PREFIX/bin
$TOOLBOX_ROOT/bin
/usr/local/go/bin
/usr/local/node/bin
/usr/local/python/bin
PATHS
if [ -f ~/.machine.profile ]; then
# shellcheck disable=SC1090
. ~/.machine.profile
fi
| true
|
1f3c250b3372be0a1156d613dcc19b43a952758e
|
Shell
|
elbosso/elbosso.github.io
|
/resources/startxprawithxephyrondisplay77orconnect.sh
|
UTF-8
| 4,993
| 2.828125
| 3
|
[
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
#!/bin/sh
#Copyright (c) 2012-2019.
#
#Juergen Key. Alle Rechte vorbehalten.
#
#Weiterverbreitung und Verwendung in nichtkompilierter oder kompilierter Form,
#mit oder ohne Veraenderung, sind unter den folgenden Bedingungen zulaessig:
#
# 1. Weiterverbreitete nichtkompilierte Exemplare muessen das obige Copyright,
#die Liste der Bedingungen und den folgenden Haftungsausschluss im Quelltext
#enthalten.
# 2. Weiterverbreitete kompilierte Exemplare muessen das obige Copyright,
#die Liste der Bedingungen und den folgenden Haftungsausschluss in der
#Dokumentation und/oder anderen Materialien, die mit dem Exemplar verbreitet
#werden, enthalten.
# 3. Weder der Name des Autors noch die Namen der Beitragsleistenden
#duerfen zum Kennzeichnen oder Bewerben von Produkten, die von dieser Software
#abgeleitet wurden, ohne spezielle vorherige schriftliche Genehmigung verwendet
#werden.
#
#DIESE SOFTWARE WIRD VOM AUTOR UND DEN BEITRAGSLEISTENDEN OHNE
#JEGLICHE SPEZIELLE ODER IMPLIZIERTE GARANTIEN ZUR VERFUEGUNG GESTELLT, DIE
#UNTER ANDEREM EINSCHLIESSEN: DIE IMPLIZIERTE GARANTIE DER VERWENDBARKEIT DER
#SOFTWARE FUER EINEN BESTIMMTEN ZWECK. AUF KEINEN FALL IST DER AUTOR
#ODER DIE BEITRAGSLEISTENDEN FUER IRGENDWELCHE DIREKTEN, INDIREKTEN,
#ZUFAELLIGEN, SPEZIELLEN, BEISPIELHAFTEN ODER FOLGENDEN SCHAEDEN (UNTER ANDEREM
#VERSCHAFFEN VON ERSATZGUETERN ODER -DIENSTLEISTUNGEN; EINSCHRAENKUNG DER
#NUTZUNGSFAEHIGKEIT; VERLUST VON NUTZUNGSFAEHIGKEIT; DATEN; PROFIT ODER
#GESCHAEFTSUNTERBRECHUNG), WIE AUCH IMMER VERURSACHT UND UNTER WELCHER
#VERPFLICHTUNG AUCH IMMER, OB IN VERTRAG, STRIKTER VERPFLICHTUNG ODER
#UNERLAUBTE HANDLUNG (INKLUSIVE FAHRLAESSIGKEIT) VERANTWORTLICH, AUF WELCHEM
#WEG SIE AUCH IMMER DURCH DIE BENUTZUNG DIESER SOFTWARE ENTSTANDEN SIND, SOGAR,
#WENN SIE AUF DIE MOEGLICHKEIT EINES SOLCHEN SCHADENS HINGEWIESEN WORDEN SIND.
start() {
DISPLAY=:0 xmodmap -pke > /tmp/my_xmodmap
scp /tmp/my_xmodmap "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER":/tmp
if ssh "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER" 'DISPLAY=:78 xmodmap /tmp/my_xmodmap' > /dev/null 2>&1; then
echo "user $XPRA_SSH_USER can connect to display $DISPLAY"
xpra attach --clipboard=yes --clipboard-direction=both ssh/"$XPRA_SSH_USER"@"$XPRA_SSH_SERVER"/77&
pkill -f ".*/tmp/start_xpra_remote.sh.*"
else
echo "user $XPRA_SSH_USER cannot connect to display $DISPLAY"
cat <<EOM >/tmp/start_xpra_remote.sh
rm -fr ~/.Xauthority-*
xpra start :77 --clipboard=yes --clipboard-direction=both --start="Xephyr -ac -keybd ephyr,xkbmodel=pc105,xkblayout='de(nodeadkeys)',xkbrules=evdev,xkboption=grp:alts_toogle -screen 1680x1050 -br :78"
DISPLAY=:78 xterm -iconic -e "exit"
while [ \$? -ne 0 ] ;do
echo "waiting "\$?
sleep 1
DISPLAY=:78 xterm -iconic -e "exit"
done
echo "trying openbox"
DISPLAY=:78 xterm &
# DISPLAY=:78 cairo-dock &
# DISPLAY=:78 plank&
#just install a minimal lightweight window manager i thought...
#turns out that full-fledged web browssers (firefox as well as chromium-browser)
#open ghost windows that dont get redrawn and can not be closed except for
#a restart of the window manager. So i had to resort to fluxbox where this
# is not so big a problem because those windows open iconified there...
# The browsers mentioned above were not the real problem - the script
#start_xpra_remote running amok was it!
# DISPLAY=:78 metacity &
# DISPLAY=:78 mutter &
DISPLAY=:78 blackbox &
# DISPLAY=:78 openbox &
# DISPLAY=:78 fluxbox &
DISPLAY=:78 setxkbmap -model pc105 -layout de -variant ,nodeadkeys,
EOM
scp /tmp/start_xpra_remote.sh "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER":/tmp
ssh "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER" '/bin/bash /tmp/start_xpra_remote.sh > /dev/null &2>1'
echo "user $XPRA_SSH_USER can connect to display $DISPLAY"
ssh -Y "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER" 'DISPLAY=:78 xmodmap /tmp/my_xmodmap'
xpra attach --clipboard=yes --clipboard-direction=both ssh/"$XPRA_SSH_USER"@"$XPRA_SSH_SERVER"/77&
pkill -f ".*/tmp/start_xpra_remote.sh.*"
fi
}
detach() {
DISPLAY=:77
if ssh "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER" 'DISPLAY=:78 xterm -iconic -e "exit"' > /dev/null 2>&1; then
echo "user $XPRA_SSH_USER can connect to display $DISPLAY"
xpra detach ssh/"$XPRA_SSH_USER"@"$XPRA_SSH_SERVER"/77&
fi
}
stop() {
if ssh "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER" 'DISPLAY=:78 xterm -iconic -e "exit"' > /dev/null 2>&1; then
echo "user $XPRA_SSH_USER can connect to display $DISPLAY"
ssh "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER" 'xpra stop :77'
fi
}
status() {
DISPLAY=:77
if ssh "$XPRA_SSH_USER"@"$XPRA_SSH_SERVER" 'DISPLAY=:78 xterm -iconic -e "exit"' > /dev/null 2>&1; then
echo "running"
else
echo "not running"
fi
}
### main logic ###
case "$1" in
start)
start
;;
stop)
stop
;;
detach)
detach
;;
status)
status
;;
restart|reload)
stop
start
;;
*)
echo "Usage: $0 {start|stop|detach|restart|status}"
exit 1
esac
exit 0
| true
|
c0ad93580d33ceb951a26341b8434ce5c92074f5
|
Shell
|
linxuyalun/drone-git
|
/posix/clone
|
UTF-8
| 2,193
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh [0/533]
if [[ ! -z "${DRONE_WORKSPACE}" ]]; then
cd ${DRONE_WORKSPACE}
fi
# if the netrc enviornment variables exist, write
# the netrc file.
cat <<EOF > /root/.netrc
machine $${GIT_REPO_MACHINE_IP}
login $${MACHINE_USERNAME}
password $${MACHINE_PASSWORD}
EOF
# if the ssh_key environment variable exists, write
# the ssh key and add the netrc machine to the
# Do remember to put your own ssh key in the repo.
mkdir /root/.ssh
cat /ssh_rsa > /root/.ssh/id_rsa
chmod 600 /root/.ssh/id_rsa
touch /root/.ssh/known_hosts
chmod 600 /root/.ssh/known_hosts
ssh-keyscan -t rsa -p $${GIT_REPO_MACHINE_PORT} $${GIT_REPO_MACHINE_IP} > /root/.ssh/known_hosts 2> /dev/null
# configure git global behavior and parameters via the
# following environment variables:
echo "SSH to server to verify private key..."
ssh -T -p $${GIT_REPO_MACHINE_PORT} git@$${GIT_REPO_MACHINE_IP} -i /root/.ssh/id_rsa
if [[ -z "${DRONE_COMMIT_AUTHOR_NAME}" ]]; then
export DRONE_COMMIT_AUTHOR_NAME=drone
fi
if [[ -z "${DRONE_COMMIT_AUTHOR_EMAIL}" ]]; then
export DRONE_COMMIT_AUTHOR_EMAIL=drone@localhost
fi
export GIT_AUTHOR_NAME=${DRONE_COMMIT_AUTHOR_NAME}
export GIT_AUTHOR_EMAIL=${DRONE_COMMIT_AUTHOR_EMAIL}
export GIT_COMMITTER_NAME=${DRONE_COMMIT_AUTHOR_NAME}
export GIT_COMMITTER_EMAIL=${DRONE_COMMIT_AUTHOR_EMAIL}
# invoke the sub-script based on the drone event type.
# TODO we should ultimately look at the ref, since
# we need something compatible with deployment events.
CLONE_TYPE=$DRONE_BUILD_EVENT
case $DRONE_COMMIT_REF in
refs/tags/* ) CLONE_TYPE=tag ;;
refs/pull/* ) CLONE_TYPE=pull_request ;;
refs/pull-request/* ) CLONE_TYPE=pull_request ;;
refs/merge-requests/* ) CLONE_TYPE=pull_request ;;
esac
case $CLONE_TYPE in
pull_request)
clone-pull-request
;;
tag)
clone-tag
;;
*)
clone-commit
;;
esac
| true
|
bf3cb6c518cb7fc6756c7aec8b52523927ce5014
|
Shell
|
ahungry/scratch
|
/composed-types/rt-bench-maker.sh
|
UTF-8
| 846
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'const Num = Symbol("n")'
echo 'const Str = Symbol("s")'
echo 'const fn = (i, o, f) => ({ i, o, f })'
echo 'const invoke = ({ f }) => (x) => f(x)'
echo 'const in_type = ({ i }) => i'
echo 'const out_type = ({ o }) => o'
echo 'const err = (_i, _o) => { throw new Error("Incompatible types!") }'
echo 'const assert_types = (f, g) => in_type(f) === out_type(g) ? true : err(in_type(f), out_type(g))'
echo 'const comp = (f, g) => assert_types(f, g) ?'
echo ' fn(out_type(g), in_type(f), (x) => invoke(f)(invoke(g)(x))) :'
echo ' false'
echo 'const add_one = fn(Num, Num, n => n + 1)'
echo 'const add2 = comp(add_one, add_one)'
echo 'const add3 = comp(add_one, add2)'
gen_fun () {
echo 'const add'$1' = comp(add_one, add'$(($1 - 1))')'
}
for i in {4..5000}; do
gen_fun $i
done
echo 'console.log(invoke(add'$i')(1))'
| true
|
2950261849cd3ad258b8cd7c1bbe882cc796c667
|
Shell
|
sfinktah/bash
|
/makefunction.sh
|
UTF-8
| 369
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# makes function from source file
# $1 - function name
# $2 - file name, or blank for stdin
makefunction() {
# if [ $2 == "-" ]; then
# cat >
echo -e "$1() {\n" > /tmp/$$
cat $2 >> /tmp/$$
echo "}" >> /tmp/$$
. /tmp/$$
rm /tmp/$$
}
runmakefunction() {
fn="_$RANDOM"
makefunction $fn
$fn
}
makeloop() {
while :; do
. $1
done
}
| true
|
f957dd21ffb73d10c5863500e1cd1f571cea2322
|
Shell
|
bbonsign/.dotfiles
|
/zshrc
|
UTF-8
| 711
| 2.53125
| 3
|
[] |
no_license
|
fpath=(/usr/local/share/zsh-completions $fpath)
eval "$(starship init zsh)"
export MANPAGER="sh -c 'col -bx | bat -l man -p'"
# Get ls output with colors
# Zsh to use the same colors as ls
#LS_COLORS='no=00;37:fi=00:di=1;33:ln=04;36:pi=40;33:so=01;35:bd=40;33;01:'
LSCOLORS='Exfxcxdxbxcxdxbhagacad'
export LSCOLORS
zstyle ':completion:*' list-colors ${(s.:.)LSCOLORS}
alias ls='ls -GH'
alias ll='ls -al'
# Load Git completion
zstyle ':completion:*:*:git:*' script ~/.zsh/git-completion.bash
fpath=(~/.zsh $fpath)
autoload -Uz compinit && compinit
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
alias gdot='/usr/bin/git --git-dir=$HOME/.dotfiles.git/ --work-tree=$HOME'
export PATH="$HOME/doom-emacs/bin/:$PATH"
| true
|
3e6e9618dc9bdd7f2aa9c09d8f8483c6cc9eef16
|
Shell
|
lightcube/LightCube-OS
|
/old-rpm-specs/mkinitramfs/init.in
|
UTF-8
| 2,235
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/sh
PATH=/bin:/usr/bin:/sbin:/usr/sbin
export PATH
init=/sbin/init
root=
rootdelay=
rootfstype=auto
ro="ro"
rootflags=
device=
problem() {
cat <<"EOF"
Encountered a problem!
Dropping you to a shell.
EOF
sh
}
do_mount_root() {
mkdir /.root
[ -n "$rootflags" ] && rootflags="$rootflags,"
rootflags="$rootflags$ro"
case "$root" in
/dev/*)
device=$root
;;
UUID=*)
eval $root
device="/dev/disk/by-uuid/$UUID"
;;
"")
echo "No root device specified."
problem
;;
esac
while [ ! -b "$device" ] ; do
cat <<EOF
The device $device, which is supposed to contain the
root file system, does not exist.
Please fix this problem and exit this shell.
EOF
problem
done
if ! mount -n -t "$rootfstype" -o "$rootflags" "$device" /.root ; then
cat <<EOF
Could not mount device $device
Sleeping forever. Please reboot and fix the kernel command line.
Maybe the device is formatted with an unsupported file system?
Or maybe filesystem type autodetection went wrong, in which case
you should add the rootfstype=... parameter to the kernel command line.
Available partitions:
EOF
cat /proc/partitions
while true ; do sleep 10000 ; done
else
echo "Successfully mounted device $root"
fi
}
mount -n -t proc proc /proc
mount -n -t sysfs sysfs /sys
mount -t tmpfs tmpfs /dev
mknod -m 640 /dev/console c 5 1
mknod -m 664 /dev/null c 1 3
mount -t tmpfs tmpfs /run
read -r cmdline </proc/cmdline
for param in $cmdline ; do
case $param in
init=*)
init=${param#init=}
;;
root=*)
root=${param#root=}
;;
rootdelay=*)
rootdelay=${param#rootdelay=}
;;
rootfstype=*)
rootfstype=${param#rootfstype=}
;;
rootflags=*)
rootflags=${param#rootflags=}
;;
ro)
ro="ro"
;;
rw)
ro="rw"
;;
esac
done
/lib/udev/udevd --daemon
/sbin/udevadm trigger --action=add --type=subsystems
/sbin/udevadm trigger --action=add --type=devices
/sbin/udevadm settle
if [ -f /etc/mdadm.conf ] ; then mdadm -As ; fi
if [ -n "$rootdelay" ] ; then sleep "$rootdelay" ; fi
do_mount_root
killall udevd 2>/dev/null
mount -n -t tmpfs tmpfs /.root/dev
exec switch_root /.root "$init" "$@"
| true
|
c88e7600e47733c92297c84133a4c115353c52c2
|
Shell
|
DamolAAkinleye/big-data-experiments
|
/clusterScripts/docker-cluster-update-version.sh
|
UTF-8
| 689
| 2.96875
| 3
|
[] |
no_license
|
version=$1
if [ -z "$version" ]
then
echo "Version is empty"
exit
else
echo ${version}
docker commit -a anrisu ubuntu-zookeeper ubuntu-zookeeper:build-$version
docker commit -a anrisu ubuntu-hadoop-master ubuntu-hadoop-master:build-$version
docker commit -a anrisu ubuntu-hadoop-edge ubuntu-hadoop-edge:build-$version
docker commit -a anrisu ubuntu-hadoop-datanode-01 ubuntu-hadoop-datanode-01:build-$version
docker commit -a anrisu ubuntu-hadoop-datanode-02 ubuntu-hadoop-datanode-02:build-$version
docker commit -a anrisu ubuntu-hadoop-datanode-03 ubuntu-hadoop-datanode-03:build-$version
docker commit -a anrisu ubuntu-mysql-server ubuntu-mysql-server:build-$version
fi
| true
|
a53b2746f8f874f38b87557caaea9ef86ff231b8
|
Shell
|
maigbodi/laptop
|
/node.sh
|
UTF-8
| 360
| 2.96875
| 3
|
[] |
no_license
|
# Installing and updating nvm, node and npm to the most recent versions
fancy_echo "Updating NVM, Node and NPM..."
export NVM_DIR="$HOME/.nvm"
source $(brew --prefix nvm)/nvm.sh
nvm install node
node --version
npm update npm -g
npm update -g
fancy_echo "Installing global NPM packages..."
npm install -g nodemon
green_echo "Done with Node/NPM installs!"
| true
|
b7327d392ee59f745402b3c7a4fe59c9aa76aa1d
|
Shell
|
KieranJamess/KJ-Repo
|
/AZURE-PuppetMaster/files/basic_setup_puppet.sh
|
UTF-8
| 1,121
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
HOME=/root
WORKDIR="/tmp"
SERVERVERSION="18.04"
PUPPETFILE="puppet-enterprise-2019.8.5-ubuntu-$SERVERVERSION-amd64"
PUPPETURL="https://pm.puppet.com/cgi-bin/download.cgi?dist=ubuntu&rel=$SERVERVERSION&arch=amd64&ver=latest"
echo "PUPPETFILE: $PUPPETFILE"
echo "PUPPETURL: $PUPPETURL"
cat > /tmp/pe.conf << FILE
"console_admin_password": "kieran"
"puppet_enterprise::puppet_master_host": "%%{::trusted.certname}"
# Configure for low memory use on local machines
"puppet_enterprise::profile::master::java_args": {"Xmx": "256m", "Xms": "256m"}
"puppet_enterprise::profile::puppetdb::java_args": {"Xmx": "256m", "Xms": "256m"}
"puppet_enterprise::profile::console::java_args": {"Xmx": "256m", "Xms": "256m"}
"puppet_enterprise::profile::orchestrator::java_args": {"Xmx": "256m", "Xms": "256m"}
FILE
mkdir -p /etc/puppetlabs/puppet/
curl -JLO $PUPPETURL
tar -xvzf $PUPPETFILE.tar.gz -C $WORKDIR
/tmp/$PUPPETFILE/puppet-enterprise-installer -c /tmp/pe.conf
puppet agent -t
puppet agent -t
systemctl restart pe-puppetserver.service
systemctl restart pe-puppetdb.service
systemctl restart pe-nginx.service
| true
|
55713c32ac4ea23d3cc4fed02dc21190cd83e7b7
|
Shell
|
conda-forge/hdf4-feedstock
|
/recipe/build.sh
|
UTF-8
| 1,595
| 3.125
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -x
# Get an updated config.sub and config.guess
cp $BUILD_PREFIX/share/gnuconfig/config.* .
# The compiler flags interfere with th build and we need to overide them :-/
if [[ $(uname) == Darwin ]]; then
unset CPPFLAGS
export CPPFLAGS="-Wl,-rpath,$PREFIX/lib -I${PREFIX}/include"
unset LDFLAGS
export LDFLAGS="-L$PREFIX/lib -Wl,-rpath,$PREFIX/lib -headerpad_max_install_names"
fi
autoreconf -vfi
# The --enable-silent-rules is needed because Travis CI dies on the long output from this build.
./configure --prefix=${PREFIX}\
--host=$HOST \
--enable-linux-lfs \
--enable-silent-rules \
--enable-shared \
--with-ssl \
--with-zlib \
--with-jpeg \
--disable-netcdf \
--disable-fortran
make
# ncgen segfaults on macOS
if [[ $(uname) != Darwin ]]; then
if [[ "${CONDA_BUILD_CROSS_COMPILATION:-}" != "1" || "${CROSSCOMPILING_EMULATOR}" != "" ]]; then
# The following tests seems to fail on ppc64le when runing on azure with emulation
# Testing reading of netCDF file using the SDxxx interface (tnetcdf.c)
# *** Routine netCDF Read Test 1. SDstart failed on file test1.nc FAILED at line 176 ***
# *** Routine SDstart FAILED at line 83 ***
if [[ "${target_platform}" != "linux-ppc64le" ]]; then
make check
fi
fi
fi
make install
# Remove man pages.
rm -rf ${PREFIX}/share
# Avoid clashing names with netcdf.
mv ${PREFIX}/bin/ncdump ${PREFIX}/bin/h4_ncdump
mv ${PREFIX}/bin/ncgen ${PREFIX}/bin/h4_ncgen
# People usually Google these.
rm -rf ${PREFIX}/examples
| true
|
d82eeb9a24b5626ee28c8328db54f2a35a646b93
|
Shell
|
jboxberger/synology-gitea-jboxberger
|
/src/scripts/start-stop-status
|
UTF-8
| 865
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2000-2015 Synology Inc. All rights reserved.
. "$(dirname $0)"/common
UI_PATH="/usr/syno/synoman/webman/3rdparty/$PKG_NAME"
case "$1" in
start)
[ -d "$TARGET_PATH"/ui ] && ln -s "$TARGET_PATH"/ui "$UI_PATH"
"$SYNO_WEBAPI" --exec api=SYNO.Docker.Container version=1 method=start name="$GITEA_NAME" && exit 0
exit 1
;;
stop)
rm "$UI_PATH"
"$SYNO_WEBAPI" --exec api=SYNO.Docker.Container version=1 method=stop name="$GITEA_NAME" || exit 1
exit 0
;;
status)
[ -e "$DOCKER_BIN" ] || exit 1
docker_version=$(synopkg version Docker | cut -d '-' -f 2)
if [ "$docker_version" -le 0344 ]; then
"$DOCKER_BIN" inspect "$GITEA_NAME" | grep -q "\"SynoStatus\": \"running\"," || exit 1
else
"$DOCKER_BIN" inspect "$GITEA_NAME" | grep -q "\"Status\": \"running\"," || exit 1
fi
exit 0
;;
*)
exit 1
;;
esac
| true
|
87014f1eeac87fe70690bc4ee6ddc587a1e1b234
|
Shell
|
cscetbon/cassandrasidecar-go-client
|
/regenerate.sh
|
UTF-8
| 880
| 3.015625
| 3
|
[] |
no_license
|
#! /bin/bash
export wv_go_path="module github.com/cscetbon/cassandrasidecar-go-client"
export workspace_dir=$PWD
export wv_tmp_dir=${workspace_dir}/pkg
echo Generating Cassandra Sidecar Go client
rm -fr ${workspace_dir}/pkg && mkdir -p ${workspace_dir}/pkg/cassandrasidecar
cd resources/client_gen
./generate_api_client.sh
cd -
echo Cleaning Cassandra Sidecar Go client
mv ${wv_tmp_dir}/cassandrasidecar/go-experimental-client/* ${wv_tmp_dir}/cassandrasidecar && rm -rf ${wv_tmp_dir}/cassandrasidecar/go-experimental-client
rm -f ${wv_tmp_dir}/cassandrasidecar/go.sum
rm -f ${wv_tmp_dir}/cassandrasidecar/git_push.sh
echo Moving documentations files to root dirs
rm -fr docs && mv ${wv_tmp_dir}/cassandrasidecar/docs .
mv ${wv_tmp_dir}/cassandrasidecar/README.md .
mv ${wv_tmp_dir}/cassandrasidecar/go.mod .
sed -i.bak "1s#.*#$wv_go_path#" go.mod && echo "go 1.14" >> go.mod
| true
|
062d1ff6a2f66a4d2e5d4f13781733684503a27b
|
Shell
|
Sydney-Informatics-Hub/Bootstrapping-for-BQSR
|
/bsv-R1-S9_bqsr_apply_make_input.sh
|
UTF-8
| 5,612
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
#########################################################
#
# Platform: NCI Gadi HPC
# Description: make inputs file for parallel exectuion of GATK ApplyBQSR
# Details:
# Job can be run as separate tumour/normal jobs, or as one job.
# The contigs do take longer to print for tumour compared to normal re
# more data to print, but the impact of input sample size on effiency is
# lower than for other jobs, as there are many more tasks than CPUs for
# this job and the walltime discrepancies among tasks are somewhat absorbed
# by the large number of tasks. The walltime is capped by the time to print
# chromosome 1, so the inputs are sorted by contig size so that the largest
# contigs are processed first. If no binomial grouping is desired, change
# group=true to group=false. Assumes all non-cancer samples have suffix '-N',
# all other phenotype IDs are assigned to tumour.
# Sample info is read from <cohort>.config
#
# Author: Cali Willet
# cali.willet@sydney.edu.au
# Date last modified: 14/10/2020
#
# If you use this script towards a publication, please acknowledge the
# Sydney Informatics Hub (or co-authorship, where appropriate).
#
# Suggested acknowledgement:
# The authors acknowledge the scientific and technical assistance
# <or e.g. bioinformatics assistance of <PERSON>> of Sydney Informatics
# Hub and resources and services from the National Computational
# Infrastructure (NCI), which is supported by the Australian Government
# with access facilitated by the University of Sydney.
#
#########################################################
cohort=<cohort>
config=${cohort}.config
group=false
t_input=./Inputs/bqsr_apply.inputs-tumour
n_input=./Inputs/bqsr_apply.inputs-normal
input=./Inputs/bqsr_apply.inputs
rm -f $t_input
rm -f $n_input
rm -f $input
dict=<dict>
tasks=7 # Need to work out a way of automating this, but for now ths is decided manually: use the dict file to determine the optimal number of tasks, which will usually be the number of autosomes + X plus 1,
# ie do all the large contigs as separate tasks, then 1 extra task to do the remaining contigs (Y if present, MT, unplaced) plus the unmapped in one list
# Doing it this way instead of all contigs as their own parallel tasks will not speed up this job, but will increase speed and lower RAM usage for the subsequent merge job.
### 3/9/20: submitted ticket to GATK re -L unmapped not working for Devils. For now. must run unmapped as it's own task and use f12 extracted BAM and no -L flag, so intervals has been increased from 7 to 8
### 7/10/20: the issue with the -L unmapped not working seems to be due to the large chrs (of course!) as for read pairs where the inferred insert size is massive, ValidateSamFile throws an error.
((tasks--)) # reduce the value of 'tasks' to be zero-based to match the contigs array
contigs=$(awk '$2~/^SN/ {print $2}' $dict | sed 's/SN\://')
contigs=($contigs)
#contigs+=( "unmapped" )
function print_inputs {
if [[ $group = true ]]
then
if [[ $labSampleID = *-N ]]
then
printf "${labSampleID},${intervals_file}\n" >> $n_input
else
printf "${labSampleID},${intervals_file}\n" >> $t_input
fi
else
printf "${labSampleID},${intervals_file}\n" >> $input
fi
}
awk 'NR>1' ${config} | while read CONFIG
do
labSampleID=`echo $CONFIG | cut -d ' ' -f 2`
intervals_file=./Inputs/bqsr_apply_${labSampleID}.intervals
for ((i=0;i<${#contigs[@]};i++)) # make intervals file
do
#chrs=$(expr $tasks - 2) ###hack re unmapped issue - was $i -lt $tasks
#if [[ $i -le $chrs ]] # the first 6 chrs
if [[ $i -lt $tasks ]]
then
intervals="-L ${contigs[i]}"
intervals_file=./Inputs/bqsr_apply_${labSampleID}.${i}.intervals
echo ${intervals} > ${intervals_file}
print_inputs
intervals=''
#elif [[ ${contigs[$i]} != 'unmapped' ]] # contigs 7 - 105. hack for unmapped issue - was just 'else'
else
#label=$(expr $tasks - 1) # yet another hack
#intervals_file=./Inputs/bqsr_apply_${labSampleID}.${label}.intervals #hak - label var was tasks
intervals_file=./Inputs/bqsr_apply_${labSampleID}.${tasks}.intervals
intervals+=" -L ${contigs[i]}"
#echo ${intervals} > ${intervals_file}
#print_inputs
#else # the unmapped hack - run unmapped as it's own private task
#intervals_file=./Inputs/bqsr_apply_${labSampleID}.${tasks}.intervals
#intervals=''
#touch ${intervals_file}
#print_inputs
fi
done
echo ${intervals} > ${intervals_file}
print_inputs
last=$(expr $tasks + 1) # hack to add on the unmapped as a separate task instead of being at the end of the long list interval
intervals_file=./Inputs/bqsr_apply_${labSampleID}.${last}.unmapped.intervals
touch $intervals_file # just to make it easy to parse to run sh script
print_inputs
done
if [ -f $input ]
then
tasks=`wc -l < $input`
printf "Number of ApplyBQSR tasks to run: ${tasks}\n"
sort -t '.' -n -k3 $input > ./Inputs/bqsr_apply_reordered.input
mv ./Inputs/bqsr_apply_reordered.input $input
fi
if [ -f $n_input ]
then
tasks=`wc -l < $n_input`
printf "Number of ApplyBQSR normal sample tasks to run: ${tasks}\n"
sort -t '.' -n -k3 $n_input > ./Inputs/bqsr_apply_reordered_normal.input
mv ./Inputs/bqsr_apply_reordered_normal.input $n_input
fi
if [ -f $t_input ]
then
tasks=`wc -l < $t_input`
printf "Number of ApplyBQSR tumour sample tasks to run: ${tasks}\n"
sort -t '.' -n -k3 $t_input > ./Inputs/bqsr_apply_reordered_tumour.input
mv ./Inputs/bqsr_apply_reordered_tumour.input $t_input
fi
| true
|
8ecbddbd140c90b786741b85132bca7d57908f7e
|
Shell
|
leitu/jobs
|
/05_daily/setup.sh
|
UTF-8
| 396
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ `uname -s` == "Linux" ];
then
echo "23 23 * * * root test -x /var/hubcron/cron-daily && /var/hubcron/cron-daily >/dev/null 2>&1" >> /etc/crontab
else if [ `uname -s` == "SunOS" ];
then
echo "23 23 * * * test -x /var/hubcron/cron-daily && /var/hubcron/cron-daily >/dev/null 2>&1" >> /var/spool/cron/crontabs/root
else
echo "Unable to detect OS"
exit 1
fi
fi
| true
|
41d4366197909e0a788419cfa28a0fbf0b63d519
|
Shell
|
ionian/scripts
|
/dwmscripts/dwmbattery
|
UTF-8
| 333
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
bat(){
onl="$(</sys/class/power_supply/ADP1/online)"
charge="$(</sys/class/power_supply/BAT1/capacity)"
if [[ $onl -eq 0 && $charge -ge 15 ]]; then
echo -e "BAT \x04$charge%\x01"
elif [[ $onl -eq 0 && $charge -le 14 ]]; then
echo -e "BAT \x03$charge%\x01"
else
echo -e "AC \x06$charge%\x01"
fi
}
| true
|
083cb5218d254a0e66e95b6589eb1ca36e06221c
|
Shell
|
GsSqueak/GsSqueak
|
/deprecated/scripts/parseTodeitOutput
|
UTF-8
| 139
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
while IFS= read line; do
echo "$line" | grep -E "errors while loading|Press 'Proceed' to continue\." && exit 7
done
exit 0
| true
|
83aa5e00e094ebbac8ec403279e161c2ae5d5d98
|
Shell
|
kdave/xfstests
|
/tests/xfs/503
|
UTF-8
| 1,963
| 3.234375
| 3
|
[] |
no_license
|
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2019 Oracle, Inc. All Rights Reserved.
#
# FS QA Test No. 503
#
# Populate a XFS filesystem and ensure that metadump, mdrestore, and copy
# all work properly.
#
. ./common/preamble
_begin_fstest auto copy metadump
_register_cleanup "_cleanup" BUS
# Override the default cleanup function.
_cleanup()
{
cd /
rm -rf $tmp.* $testdir
}
# Import common functions.
. ./common/filter
. ./common/populate
testdir=$TEST_DIR/test-$seq
# real QA test starts here
_supported_fs xfs
_require_command "$XFS_MDRESTORE_PROG" "xfs_mdrestore"
_require_xfs_copy
_require_scratch_nocheck
_require_populate_commands
_xfs_skip_online_rebuild
_xfs_skip_offline_rebuild
echo "Format and populate"
_scratch_populate_cached nofill > $seqres.full 2>&1
mkdir -p $testdir
metadump_file=$testdir/scratch.md
metadump_file_a=${metadump_file}.a
metadump_file_g=${metadump_file}.g
metadump_file_ag=${metadump_file}.ag
copy_file=$testdir/copy.img
echo metadump
_scratch_xfs_metadump $metadump_file >> $seqres.full
echo metadump a
_scratch_xfs_metadump $metadump_file_a -a >> $seqres.full
echo metadump g
_scratch_xfs_metadump $metadump_file_g -g >> $seqres.full
echo metadump ag
_scratch_xfs_metadump $metadump_file_ag -a -g >> $seqres.full
echo copy
$XFS_COPY_PROG $SCRATCH_DEV $copy_file >> $seqres.full
_check_scratch_fs $copy_file
echo recopy
$XFS_COPY_PROG $copy_file $SCRATCH_DEV >> $seqres.full
_scratch_mount
_check_scratch_fs
_scratch_unmount
echo mdrestore
_scratch_xfs_mdrestore $metadump_file
_scratch_mount
_check_scratch_fs
_scratch_unmount
echo mdrestore a
_scratch_xfs_mdrestore $metadump_file_a
_scratch_mount
_check_scratch_fs
_scratch_unmount
echo mdrestore g
_scratch_xfs_mdrestore $metadump_file_g
_scratch_mount
_check_scratch_fs
_scratch_unmount
echo mdrestore ag
_scratch_xfs_mdrestore $metadump_file_ag
_scratch_mount
_check_scratch_fs
_scratch_unmount
# success, all done
status=0
exit
| true
|
520c4891f27d1be58f024d156bc13ae7251f7f03
|
Shell
|
ibm-apiconnect/pot-onprem-core
|
/env-setup/liberty/setup.sh
|
UTF-8
| 5,285
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# PRE-REQS
# Install Node 4.4.x using instructions at https://nodejs.org/en/download/package-manager/#debian-and-ubuntu-based-linux-distributions
# Include node install build tools: sudo apt-get install -y build-essential
# Package Locations
# ihs.tgz = https://ibm.box.com/shared/static/z3fy8hf0okmas0qczvbqisox6fhkurlv.tgz
# plugin.tgz = https://ibm.box.com/shared/static/wuzrq1tzauylyylnmhthq96ihsp3srup.tgz
# apiconnect-collective-member-1.0.78.tgz = https://ibm.box.com/shared/static/6fpdnotlfdcni9axgulrkf96fv4k21t1.tgz
# apiconnect-collective-member-1.0.78-deps.json = https://ibm.box.com/shared/static/04kssbcn5bty86h8ztt9ef4ly2qla2om.json
# apiconnect-collective-controller-linux-x86_64-1.0.8.tgz = https://ibm.box.com/shared/static/2d5r8tv8ny1qke77yxcjivt64xp5g6yx.tgz
# apiconnect-collective-controller-linux-x86_64-1.0.8-deps.json = https://ibm.box.com/shared/static/8zbrh3n6gt8pm76hz6sd46u1wzxt3hv2.json
# Set Variables
ihs_fn="ihs.tgz"
ihs_url="https://ibm.box.com/shared/static/z3fy8hf0okmas0qczvbqisox6fhkurlv.tgz"
plugin_fn="plugin.tgz"
plugin_url="https://ibm.box.com/shared/static/wuzrq1tzauylyylnmhthq96ihsp3srup.tgz"
liberty_member_fn="member.tgz"
liberty_member_url="https://ibm.box.com/shared/static/6fpdnotlfdcni9axgulrkf96fv4k21t1.tgz"
liberty_member_deps_fn="member-deps.json"
liberty_member_deps_url="https://ibm.box.com/shared/static/04kssbcn5bty86h8ztt9ef4ly2qla2om.json"
liberty_controller_fn="controller.tgz"
liberty_controller_url="https://ibm.box.com/shared/static/2d5r8tv8ny1qke77yxcjivt64xp5g6yx.tgz"
liberty_controller_deps_fn="controller-deps.json"
liberty_controller_deps_url="https://ibm.box.com/shared/static/8zbrh3n6gt8pm76hz6sd46u1wzxt3hv2.json"
download_dir="/home/student/Downloads/apic-liberty-packages"
# Clean up Liberty and IHS
clear
printf "\nCleaning Config...\n"
wlpn-controller stop || true
/opt/IBM/HTTPServer/bin/apachectl -k stop
killall -9 node || true
killall -9 java || true
rm -rf ~/wlpn
rm -rf ~/.liberty
rm -rf /opt/IBM
rm -rf $download_dir
echo `npm uninstall -g apiconnect-collective-controller`
echo `npm uninstall -g apiconnect-collective-member`
# Install OpenSSH Server
printf "\nInstalling SSH Server...\n"
apt-get install openssh-server
# Download Packages...
sudo -u student mkdir $download_dir
printf "\nDownloading IHS Package...\n"
echo `curl -k -L -o "$download_dir/$ihs_fn" "$ihs_url"`
printf "\nDownloading IHS Plugin Package...\n"
echo `curl -k -L -o "$download_dir/$plugin_fn" "$plugin_url"`
printf "\nDownloading Liberty Member Package...\n"
echo `curl -k -L -o "$download_dir/$liberty_member_fn" "$liberty_member_url"`
printf "\nDownloading Liberty Member Deps JSON...\n"
echo `curl -k -L -o "$download_dir/$liberty_member_deps_fn" "$liberty_member_deps_url"`
printf "\nDownloading Liberty Controller Package...\n"
echo `curl -k -L -o "$download_dir/$liberty_controller_fn" "$liberty_controller_url"`
printf "\nDownloading Liberty Controller Deps JSON...\n"
echo `curl -k -L -o "$download_dir/$liberty_controller_deps_fn" "$liberty_controller_deps_url"`
echo `chown -R student:student "$download_dir"`
# Install Liberty Packages
printf "\nInstalling Liberty Controller Package, this may take a minute...\n"
echo `npm install -g --unsafe-perm "$download_dir/$liberty_controller_fn"`
printf "\nInstalling Liberty Member Package, this may take a minute...\n"
echo `npm install -g --unsafe-perm "$download_dir/$liberty_member_fn"`
# Configure Liberty
printf "\nStarting Liberty Controller...\n"
sudo -u student wlpn-controller setup --password=Passw0rd!
sudo -u student wlpn-controller start
printf "\nConfiguring Liberty Collective...\n"
sleep 5
sudo -u student wlpn-collective updateHost xubuntu-vm --user=student --password=Passw0rd! --port=9443 --host=xubuntu-vm --rpcUser=student --rpcUserPassword=Passw0rd! --autoAcceptCertificates
# Install IHS
printf "\nInstalling IHS...\n"
mkdir -p /opt/IBM
cd /opt/IBM/
echo `tar xf "$download_dir/$ihs_fn"`
echo `tar xf "$download_dir/$plugin_fn"`
# Configure IHS
printf "\nConfiguring IHS...\n"
cd /home/student/.liberty/wlp/bin/
sudo -u student wlpn-controller ihsSetup --host=xubuntu-vm --port=9443 --user=student --password=Passw0rd! --keystorePassword=Passw0rd! --pluginInstallRoot="/opt/IBM/WebSphere/Plugins/" --webServerNames=webserver1
printf "\nCopying IHS Keys...\n"
sleep 5
cp plugin-cfg.xml /opt/IBM/WebSphere/Plugins/config/webserver1/
cp plugin-key.jks /opt/IBM/WebSphere/Plugins/config/webserver1/
printf "\nRegistering IHS with Liberty...\n"
sudo -u student wlpn-controller ihsRegister --host=xubuntu-vm --port=9443 --user=student --password=Passw0rd! --ihsIp=xubuntu-vm --ihsPort=80
printf "\nConverting IHS Keys...\n"
cd /opt/IBM/HTTPServer/bin/
rm /opt/IBM/WebSphere/Plugins/config/webserver1/plugin-key.kdb
./gskcmd -keydb -convert -pw Passw0rd! -db /opt/IBM/WebSphere/Plugins/config/webserver1/plugin-key.jks -old_format jks -target /opt/IBM/WebSphere/Plugins/config/webserver1/plugin-key.kdb -new_format cms -stash
sleep 5
./gskcmd -cert -setdefault -pw Passw0rd! -db /opt/IBM/WebSphere/Plugins/config/webserver1/plugin-key.kdb -label default
sleep 5
# Start IBM HTTP Server
printf "\nStarting IHS...\n"
/opt/IBM/HTTPServer/bin/apachectl -k start
printf "\nSetup Complete!\n"
| true
|
be3da92f470461d0ea6dadd5730ae752f98733fa
|
Shell
|
NeuroStat/cluster-stability-m
|
/00_nulldata/02_2nd_stab/s6/02_all20_OLS_threshold
|
UTF-8
| 7,402
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
S=$1
NBOOT=$2
rstart=1
START=$(date +%s)
simid=$(printf 'sel%04d' $S)
selection=$(printf 'n20%04d' $S)
# Data folder where the data is supposed to be stored. cope/varcopes.
# DDir="./Onderzoek/doctoraat/scripts/Paper04/01_data/02_emotion/02_all/00_rawData"
# tmpdir="./Onderzoek/doctoraat/scripts/Paper04/01_data/02_emotion/01_smallN/tmp/"
# HDIR="./Onderzoek/doctoraat/scripts/Paper04/02_scripts/02_emotion/01_groupsof10"
# SDir="./Onderzoek/doctoraat/scripts/Paper04/01_data/02_emotion/01_smallN/"
DDir=./Onderzoek/01_fcon/
tmpdir=./Onderzoek/Paper04/01_data/00_null/00_tmp_s6_20/
SDir=./Onderzoek/Paper04/01_data/00_null_s6/
HDIR=./Onderzoek/Paper04/02_scripts/03_null/02_2nd_stab/s6
SDIR=$SDir$selection
sDir=$tmpdir$simid
echo "delete previous selection "
[ -d $SDIR ] && rm -r $SDIR || echo 'Directory was not found'
mkdir -p $SDIR
echo "delete previous tmp files folder "
[ -d $sDir ] && rm -r $sDir || echo 'Directory was not found'
mkdir -p $sDir
cd $sDir
# select the individuals to take in the sample e.g. 1-10
python $HDIR/02_sel20.py $S 198 > $SDIR/tmp
mkdir $sDir/data
for i in $(cat $SDIR/tmp)
do
j=$(expr $i + 1)
foldid=$(printf '%03d' $j)
echo $foldid
cp -r $DDir$foldid $sDir/data
done
DDir=$sDir/data
# merge the COPE/VARCOPE/MASK for the group analysis and obtain a group map.
cd $DDir
fslmerge -t $sDir/copeREG `
for i in $(cat $SDIR/tmp)
do
j=$(expr $i + 1)
foldid=$(printf '%03d' $j)
echo ./$foldid/02_s6/cope1
done
` 2> $SDIR/null
fslmerge -t $sDir/varcopeREG `
for i in $(cat $SDIR/tmp)
do
j=$(expr $i + 1)
foldid=$(printf '%03d' $j)
echo ./$foldid/02_s6/varcope1
done
` 2> $SDIR/null
fslmerge -t $sDir/MASK `
for i in $(cat $SDIR/tmp)
do
j=$(expr $i + 1)
foldid=$(printf '%03d' $j)
echo ./$foldid/02_s6/*mask*
done
` 2> $SDIR/null
#for efficiency, everything is copied to the tmp folder. As such this does not cause read/write problems on the cluster,
#when e.g. 2 run access the file simultaneously.
cd $sDir
cp $HDIR/02_design* $sDir
cp $HDIR/cluster.R $sDir
cp $HDIR/detMINclus.R $sDir
cp $HDIR/02_RandomNR.py $sDir
cp $HDIR/00_results.R $sDir
# copy the design files
$FSLDIR/bin/flameo --cope=./copeREG --mask=MASK --dm=02_design.mat --tc=02_design.con --cs=02_design.grp --runmode=ols --ld=$SDIR/output
# make sure the mask is properly passed to the bootstrap samples
cp $SDIR/output/mask* $sDir
# threshold original image
smoothest -d 19 -r $SDIR/output/res4d.nii.gz -m $SDIR/output/mask.nii.gz -V > $SDIR/smoothness$selection
smoothest -d 19 -r $SDIR/output/res4d.nii.gz -m $SDIR/output/mask.nii.gz | awk '/DLH/ {print $2}' > $SDIR/thresh_zstat1.dlh
smoothest -d 19 -r $SDIR/output/res4d.nii.gz -m $SDIR/output/mask.nii.gz | awk '/VOLUME/ {print $2}' > $SDIR/thresh_zstat1.vol
$FSLDIR/bin/cluster -i $SDIR/output/zstat1 -t 3.090232 -d $(cat $SDIR/thresh_zstat1.dlh) -p 1.0 --volume=$(cat $SDIR/thresh_zstat1.vol) --oindex=$SDIR/cl > $SDIR/filecon
Rscript $sDir/detMINclus.R 0.001 $SDIR/output/mask.nii.gz $SDIR/smoothness$selection > $SDIR/VAR
var001=$(cat $SDIR/VAR)
Rscript $sDir/detMINclus.R 0.005 $SDIR/output/mask.nii.gz $SDIR/smoothness$selection > $SDIR/VAR
var005=$(cat $SDIR/VAR)
Rscript $sDir/detMINclus.R 0.01 $SDIR/output/mask.nii.gz $SDIR/smoothness$selection > $SDIR/VAR
var01=$(cat $SDIR/VAR)
Rscript $sDir/detMINclus.R 0.05 $SDIR/output/mask.nii.gz $SDIR/smoothness$selection > $SDIR/VAR
var05=$(cat $SDIR/VAR)
Rscript $sDir/cluster.R $SDIR/filecon $SDIR $var001
minVAR001=$(awk 'FNR == 1 {print $1}' $SDIR/cltmp)
Rscript $sDir/cluster.R $SDIR/filecon $SDIR $var005
minVAR005=$(awk 'FNR == 1 {print $1}' $SDIR/cltmp)
Rscript $sDir/cluster.R $SDIR/filecon $SDIR $var01
minVAR01=$(awk 'FNR == 1 {print $1}' $SDIR/cltmp)
Rscript $sDir/cluster.R $SDIR/filecon $SDIR $var05
minVAR05=$(awk 'FNR == 1 {print $1}' $SDIR/cltmp)
fslmaths $SDIR/cl.nii.gz -thr $minVAR001 -bin $SDIR/out_$simid\_001
fslmaths $SDIR/cl.nii.gz -thr $minVAR005 -bin $SDIR/out_$simid\_005
fslmaths $SDIR/cl.nii.gz -thr $minVAR01 -bin $SDIR/out_$simid\_01
fslmaths $SDIR/cl.nii.gz -thr $minVAR05 -bin $SDIR/out_$simid\_05
# compute stability
for ((r=$rstart; r<=$NBOOT; r++ ))
do
#statements
id=$(printf 'p%04d' $r)
echo $id
foldtmp=$sDir/$id
[ -d $foldtmp ] && rm -r $foldtmp || echo 'Directory was not found'
mkdir -p $foldtmp
cd $foldtmp
echo "python time"
python $sDir/02_RandomNR.py $SDIR/tmp $S $r 20 > ran$r
# merge the cope files
fslmerge -t $foldtmp/cREG `
for i in $(cat ran$r)
do
j=$(expr $i + 1)
foldid=$(printf '%03d' $j)
echo $DDir/$foldid/02_s6/cope1
done
` 2> $SDIR/null
fslmerge -t $foldtmp/vcREG `
for i in $(cat ran$r)
do
j=$(expr $i + 1)
foldid=$(printf '%03d' $j)
echo $DDir/$foldid/02_s6/varcope1
done
` 2> $SDIR/null
#copy the design information in the saving directory
cp $sDir/02_design* $foldtmp
# Actual analysis
cd $foldtmp
$FSLDIR/bin/flameo --cope=$foldtmp/cREG --varcope=$foldtmp/vcREG --mask=$sDir/mask --dm=$foldtmp/02_design.mat --tc=$foldtmp/02_design.con --cs=$foldtmp/02_design.grp --runmode=ols --ld=$foldtmp/output > $SDIR/null
# save smoothness FWE
smoothest -d 19 -r $foldtmp/output/res4d.nii.gz -m $foldtmp/output/mask.nii.gz > $SDIR/smoothness$S_$r
# Threshold the image to binary
$FSLDIR/bin/cluster -i $foldtmp/output/zstat1 -t 3.090232 --oindex=$foldtmp/cl > $foldtmp/filecon
Rscript $sDir/cluster.R $foldtmp/filecon $foldtmp $var001 > $SDIR/null
minVAR001=$(awk 'FNR == 1 {print $1}' $foldtmp/cltmp)
Rscript $sDir/cluster.R $foldtmp/filecon $foldtmp $var005 > $SDIR/null
minVAR005=$(awk 'FNR == 1 {print $1}' $foldtmp/cltmp)
Rscript $sDir/cluster.R $foldtmp/filecon $foldtmp $var01 > $SDIR/null
minVAR01=$(awk 'FNR == 1 {print $1}' $foldtmp/cltmp)
Rscript $sDir/cluster.R $foldtmp/filecon $foldtmp $var05 > $SDIR/null
minVAR05=$(awk 'FNR == 1 {print $1}' $foldtmp/cltmp)
fslmaths $foldtmp/cl.nii.gz -thr $minVAR001 -bin $foldtmp/out_$simid\_$id\_var001
fslmaths $foldtmp/cl.nii.gz -thr $minVAR005 -bin $foldtmp/out_$simid\_$id\_var005
fslmaths $foldtmp/cl.nii.gz -thr $minVAR01 -bin $foldtmp/out_$simid\_$id\_var01
fslmaths $foldtmp/cl.nii.gz -thr $minVAR05 -bin $foldtmp/out_$simid\_$id\_var05
# save zmap en thesholded zmap.
#cp $foldtmp/output/zstat1* $SDIR/zstat_$simid\_$id\.nii.gz
cp $foldtmp/out_$simid* $SDIR/
rm -r $foldtmp
done
cd $SDIR
fslmaths ./output/zstat1.nii.gz -mul 0 OUT001.nii.gz
fslmaths ./output/zstat1.nii.gz -mul 0 OUT005.nii.gz
fslmaths ./output/zstat1.nii.gz -mul 0 OUT01.nii.gz
fslmaths ./output/zstat1.nii.gz -mul 0 OUT05.nii.gz
for ((r=1; r<=$NBOOT; r++ ))
do
#statements
# generate random noise and random set of numbers 1...10 and simulate noise saved in folder with resulst
id=$(printf 'p%04d' $r)
fslmaths OUT001 -add out_$simid\_$id\_var001 OUT001
fslmaths OUT005 -add out_$simid\_$id\_var005 OUT005
fslmaths OUT01 -add out_$simid\_$id\_var01 OUT01
fslmaths OUT05 -add out_$simid\_$id\_var05 OUT05
done
# calculate the stability -map
fslmaths OUT001 -div $NBOOT OUT001
fslmaths OUT005 -div $NBOOT OUT005
fslmaths OUT01 -div $NBOOT OUT01
fslmaths OUT05 -div $NBOOT OUT05
# analyze results and combine results in one table
Rscript $sDir/00_results.R OUT001 OUT005 OUT01 OUT05
rm out_$simid\_*
rm -r $sDir
cp $SDIR/output/zstat* $SDIR
cp $SDIR/output/mask* $SDIR
rm -r $SDIR/output
rm $SDIR/null
# decide what voxels to incorporate for loop 1
echo "job finished in"
END=$(date +%s)
DIFF=$(echo "scale=3; ( $END - $START ) / 60.00" | bc)
echo $DIFF
| true
|
7c4e67e0534163062bb2081be6ac0ccbbe59562d
|
Shell
|
geniorgeous/ShellUtils
|
/rmfiles.sh
|
UTF-8
| 447
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# INIT: arguments number checking
if [ $# != 1 ]
then
echo "USAGE: `basename $0` <directory_name>
DESCRIPTION: removes all the files (not the directories) in the directory \$1
"
exit
fi
# INIT: check directory $1 exists
if [ ! -e "$1" ]
then
echo "directory \"$1\" does not exist"
exit
fi
. ifsnospace; for i in `find . -maxdepth 1 -type f`; do rm "$i"; done;. ifsspace
| true
|
9f540f4c9d2ddaeaf756fa6abf117aab8f9d5cc0
|
Shell
|
andrejsim/lambda-quick
|
/package-lambda-quick.sh
|
UTF-8
| 947
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
function log {
echo "> $(date +%T) $*"
}
name="dummy-lambda-quick"
bucket="datafabric-nonprod-lambdas"
lambdafile="lambda_function.py"
workdir=python
#$(mktemp -d ${workdir})
mkdir ${workdir}
log ${workdir}
log ${bucket}
log ${lambdafile}
zipfile="${name}.zip"
touch ${zipfile}
log "creating zipfile: ${zipfile}"
cd ${workdir} || exit
ls -la
zip "${zipfile}" ./*
key=${zipfile}
log "Starting upload to S3"
aws s3 cp "${zipfile}" "s3://${bucket}/${key}"
log "checking if function exists"
if ! aws lambda get-function --function-name "${name}" &> /dev/null; then
echo "Function doesn\'t exist yet, please create it first"
echo
exit 1
fi
log "Updating function code"
result=$(aws lambda update-function-code --function-name "${name}" --s3-bucket "${bucket}" --s3-key "${key}" 2> /dev/null)
if [[ $? -ne 0 ]]; then
log "Function update failed"
echo "${result}"
fi
log "Function updated successfully"
| true
|
1ddb741fe69b57f71bca2ff23adce266d714669e
|
Shell
|
salilkanitkar/branch_predictor
|
/gshare_graphs.sh
|
UTF-8
| 688
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
make clean ; make
for ((i=7 ; i<=12 ; i++)) ; do
for ((j=2 ; j<=$i ; j=j+2)) ; do
rm -f op.$i.$j
done;
done;
for file in gcc jpeg perl ; do
rm -f gshare.points.$file
done;
for file in gcc jpeg perl ; do
for ((i=7 ; i<=12 ; i++)) ; do
for ((j=2 ; j<=$i ; j=j+2)) ; do
trace_file="traces/"$file"_trace.txt"
./sim gshare $i $j $trace_file > op.$i.$j
mispred_rate=`cat op.$i.$j | grep "misprediction rate" | awk '{print $3}' | cut -d '%' -f 1`
echo "$i $j $mispred_rate" >> gshare.points.$file
# echo "$i $j $trace_file"
done;
done;
done;
for ((i=7 ; i<=12 ; i++)) ; do
for ((j=2 ; j<=$i ; j=j+2)) ; do
rm -f op.$i.$j
done;
done;
make clean ;
| true
|
9df2bead6f9ae3d6164fc37b2852ad6ea68b6425
|
Shell
|
Jhingun1/repo
|
/archlinuxcn/nodejs-jshint/PKGBUILD
|
UTF-8
| 581
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: Caleb Maclennan <caleb@alerque.com>
# Contributor: John D Jones III (jnbek) <https://aur.archlinux.org/account/jnbek>
_npmname=jshint
pkgname=nodejs-"$_npmname"
pkgver=2.10.2
pkgrel=1
pkgdesc='Static analysis tool for JavaScript'
arch=('any')
url='http://jshint.com/'
license=('MIT')
depends=('nodejs')
makedepends=('nodejs' 'npm')
source=("https://registry.npmjs.org/$_npmname/-/$_npmname-$pkgver.tgz")
sha256sums=('250fbe74b61670d7ca77dbba53724bec4d78a2c27bff58afcf5c1c006728f013')
noextract=("${source[@]##*/}")
package() {
npm install -g --prefix "$pkgdir/usr" "${source[@]##*/}"
}
| true
|
e2c1024c9652f6cac144bd3d3b0e0a527accbcc7
|
Shell
|
natashayuhimuk/SipService
|
/SipService/files/sip_ test_calls/dadscript
|
UTF-8
| 1,989
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
mainCatalogName="test_$(date)"
count=0
mkdir "$mainCatalogName"
echo "START TESTING"
echo "-------------------------------------------------------------------------------"
mkdir "$mainCatalogName"/FaildTests
mkdir "$mainCatalogName"/Tests
function createDirectory {
(( count++ ))
mkdir "$mainCatalogName"/Tests/test_$csvFile
touch "$mainCatalogName"/Tests/test_$csvFile/result.txt
}
function movingLogsToFolder {
if [[ -e uas_messages.log || -e uac_messages.log ]];
then
mv *messages.log "$mainCatalogName"/Tests/test_$csvFile
fi
if [[ -e uas_error.log || -e uac_error.log ]];
then
errors=1
mv *error.log "$mainCatalogName"/Tests/test_$csvFile
echo TestFaild
else
errors=0
echo TestPassed
fi
}
function testValidation {
if [[ $uasExitCod -eq 99 && $uacExitCod -eq 0 && $errors -eq 0 ]]
then
echo "0" > "$mainCatalogName"/Tests/test_$csvFile/result.txt
echo "-------------------------------------------------------------------------------"
else
echo "1" > "$mainCatalogName"/Tests/test_$csvFile/result.txt
echo "-------------------------------------------------------------------------------"
fi
}
function movingFaildTestToFolder {
if grep -q '1' "$mainCatalogName"/Tests/test_$csvFile/result.txt
then
cp -r "$mainCatalogName"/Tests/test_$csvFile "$mainCatalogName"/FaildTests
fi
}
while read uas && read uac && read csvFile
do
createDirectory
echo "Start $csvFile $count test"
./sipp -bg -sf $uas -m 1 -nr -p 5066 -i 127.0.0.1 -rsa 127.0.0.1:5060 -trace_msg -message_file uas_messages.log -trace_err -error_file uas_error.log > /dev/null
uasExitCod=$?
./sipp -sf $uac -m 1 -inf $csvFile -p 5065 -trace_msg -message_file uac_messages.log -trace_err -error_file uac_error.log -i 127.0.0.1 -rsa 127.0.0.1:5060 127.0.0.1:5066 > /dev/null
uacExitCod=$?
sleep 1
tail -n 15 /home/igor/Oracle/Middleware/user_projects/domains/base_domain/sipservice-log.log
sleep 2
killall -9 sipp &> /dev/null
movingLogsToFolder
testValidation
movingFaildTestToFolder
done < "sipp_console"
| true
|
4a709ce151eea3a327eede2f633808d488171d2c
|
Shell
|
dobrindtlab/shell_scripts
|
/dependencies/blast_Venn-groups_vs_control-DBs.sh
|
UTF-8
| 1,791
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
### script for the automatic analyse of extracted Venn group proteins of different Prokka runs.
if [ $# -lt 3 ]; then
echo "Usage: $0 input_folder[extracted-proteins] inputfolder[BLAST DB] output-path[Blast]"
exit 1
fi
[ -d "$3" ] || mkdir "$3"
for file in "$1"/*proteins.faa; do
prefix="${file%*_proteins.faa}"
name="${prefix##*/}"
### blast extracted proteins against control DBs.
echo "---------- BLAST "$name"-specific proteins against Database ----------"
blastp -task blastp -query "$file" -db "$2"/non-patho/non-patho -evalue 1e-9 -num_threads 32 -parse_deflines -outfmt '7 qseqid qacc sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qcovs' -out "$3""$name"_vs_non-patho.out
blastp -task blastp -query "$file" -db "$2"/IPEC/IPEC -evalue 1e-9 -num_threads 32 -parse_deflines -outfmt '7 qseqid qacc sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qcovs' -out "$3""$name"_vs_IPEC.out
blastp -task blastp -query "$file" -db "$2"/ExPEC/ExPEC -evalue 1e-9 -num_threads 32 -parse_deflines -outfmt '7 qseqid qacc sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qcovs' -out "$3""$name"_vs_ExPEC.out
blastp -task blastp -query "$file" -db "$2"/IPEC_eae-pos/IPEC_eae-pos -evalue 1e-9 -num_threads 32 -parse_deflines -outfmt '7 qseqid qacc sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qcovs' -out "$3""$name"_vs_IPEC-eae-pos.out
blastp -task blastp -query "$file" -db "$2"/IPEC_ST11_eae-pos/IPEC_ST11_eae-pos -evalue 1e-9 -num_threads 32 -parse_deflines -outfmt '7 qseqid qacc sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore qcovs' -out "$3""$name"_vs_IPEC-ST11.out
done
echo "---------- done ----------"
| true
|
e3c301ad6a6ffc9fc321545ba1b83293bd80ead9
|
Shell
|
thanksPei/ubuntu-scripts
|
/image/image-rotate
|
UTF-8
| 3,893
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# --------------------------------------------
# Rotate a list of image files
#
# Depends on :
# * mimetype (libfile-mimeinfo-perl package)
# * exiftool (libimage-exiftool-perl package)
# * pngtopnm, pnmtopng, giftopnm, ppmtogif, tifftopnm, pnmtotiff and pnmflip (netpbm package)
#
# Revision history :
# 09/10/2017, V1.0 - Creation by N. Bernaerts
# 01/03/2018, V1.1 - Jpeg lossless rotation with exiftool
# ---------------------------------------------------
# --------------------------
# check tools availability
# --------------------------
command -v mimetype >/dev/null 2>&1 || { zenity --error --text="Please install mimetype [libfile-mimeinfo-perl]"; exit 1; }
command -v exiftool >/dev/null 2>&1 || { zenity --error --text="Please install exiftool [libimage-exiftool-perl]"; exit 1; }
command -v pngtopnm >/dev/null 2>&1 || { zenity --error --text="Please install pngtopnm [netpbm]"; exit 1; }
command -v pnmtopng >/dev/null 2>&1 || { zenity --error --text="Please install pnmtopng [netpbm]"; exit 1; }
command -v giftopnm >/dev/null 2>&1 || { zenity --error --text="Please install giftopnm [netpbm]"; exit 1; }
command -v ppmtogif >/dev/null 2>&1 || { zenity --error --text="Please install ppmtogif [netpbm]"; exit 1; }
command -v tifftopnm >/dev/null 2>&1 || { zenity --error --text="Please install tifftopnm [netpbm]"; exit 1; }
command -v pnmtotiff >/dev/null 2>&1 || { zenity --error --text="Please install pnmtotiff [netpbm]"; exit 1; }
command -v pnmflip >/dev/null 2>&1 || { zenity --error --text="Please install pnmflip [netpbm]"; exit 1; }
# ----------------------
# JPEG rotation tables
# ----------------------
# no tag : no change
# 1 : no change
# 2 : horizontal mirror
# 3 : 180
# 4 : horizontal mirror + 180
# 5 : horizontal mirror + left
# 6 : right
# 7 : horizontal mirror + right
# 8 : left
ARR_LEFT=( "8" "8" "5" "6" "7" "4" "1" "2" "3" )
ARR_RIGHT=( "6" "6" "7" "8" "5" "2" "3" "4" "1" )
ARR_UPDOWN=( "3" "3" "4" "1" "2" "7" "8" "5" "6" )
ARR_MIRROR=( "2" "2" "1" "4" "3" "8" "7" "6" "5" )
# loop to retrieve arguments
while test $# -gt 0
do
case "$1" in
"--right") ARR_ROTATE=( "${ARR_RIGHT[@]}" ); PNM_ANGLE="270"; shift; ;;
"--left") ARR_ROTATE=( "${ARR_LEFT[@]}" ); PNM_ANGLE="90"; shift; ;;
"--up-down") ARR_ROTATE=( "${ARR_UPDOWN[@]}" ); PNM_ANGLE="180"; shift; ;;
*) ARR_FILE=( "${ARR_FILE[@]}" "$1" ); shift; ;;
esac
done
# check rotation is selected
[ "${ARR_ROTATE}" = "" ] && { zenity --error --text="Please select rotation parameter"; exit 1; }
# -------------------------------------
# loop thru PDF files to convert them
# -------------------------------------
# create temporary file
TMP_PNM=$(mktemp --tmpdir XXXXXXXX-org.pnm)
TMP_ROTATE=$(mktemp --tmpdir XXXXXXXX-rot.pnm)
# loop thru files
for FILE in "${ARR_FILE[@]}"
do
# remove temporary file
rm -f "${TMP_ROTATE}"
# rotate file according to mimetype
TYPE_FILE=$(mimetype --brief "${FILE}")
case "${TYPE_FILE}" in
"image/jpeg")
# read rotation tag
ANGLE=$(exiftool -args -Orientation -n "${FILE}" | cut -d'=' -f2)
[ "${ANGLE}" = "" ] && ANGLE="0"
# rotate image
exiftool -overwrite_original -Orientation=${ARR_ROTATE[${ANGLE}]} -n "${FILE}"
;;
"image/png")
# convert image to PNM
pngtopnm "${FILE}" > "${TMP_PNM}"
# rotate image
pnmflip -r${PNM_ANGLE} "${TMP_PNM}" | pnmtopng - > "${TMP_ROTATE}"
;;
"image/gif")
# convert image to PNM
giftopnm "${FILE}" > "${TMP_PNM}"
# rotate image
pnmflip -r${PNM_ANGLE} "${TMP_PNM}" | ppmtogif - > "${TMP_ROTATE}"
;;
"image/tiff")
# convert image to PNM
tifftopnm "${FILE}" > "${TMP_PNM}"
# rotate image
pnmflip -r${PNM_ANGLE} "${TMP_PNM}" | pnmtotiff - > "${TMP_ROTATE}"
;;
*)
;;
esac
# replace original file
[ -s "${TMP_ROTATE}" ] && mv --force "${TMP_ROTATE}" "${FILE}"
done
# remove temporary file
rm -f "${TMP_PNM}" "${TMP_ROTATE}"
| true
|
c8c3c34fd8dc5242aa09f455818e1a4623076c8c
|
Shell
|
ODEX-TOS/packages
|
/curl/trunk/PKGBUILD
|
UTF-8
| 1,399
| 2.65625
| 3
|
[
"GPL-1.0-or-later",
"MIT"
] |
permissive
|
# Maintainer: Dave Reisner <dreisner@archlinux.org>
# Contributor: Angel Velasquez <angvp@archlinux.org>
# Contributor: Eric Belanger <eric@archlinux.org>
# Contributor: Lucien Immink <l.immink@student.fnt.hvu.nl>
# Contributor: Daniel J Griffiths <ghost1227@archlinux.us>
pkgname=curl
pkgver=7.74.0
pkgrel=1
pkgdesc="An URL retrieval utility and library"
arch=('x86_64')
url="https://curl.haxx.se"
license=('MIT')
depends=('ca-certificates' 'krb5' 'libssh2' 'libssh2.so' 'openssl' 'zlib'
'libpsl' 'libpsl.so' 'libnghttp2' 'libidn2' 'libidn2.so' 'zstd')
provides=('libcurl.so')
source=("https://curl.haxx.se/download/$pkgname-$pkgver.tar.gz"{,.asc})
sha512sums=('4b61a23463315dce5529aa5f1dc7d21d7876347912c68855c9cfcb01e06492af1510975fafb213c67ac7b9764287767da69043a79376a4be366aa23ace09f163'
'SKIP')
validpgpkeys=('27EDEAF22F3ABCEB50DB9A125CC908FDB71E12C2') # Daniel Stenberg
build() {
cd "$pkgname-$pkgver"
./configure \
--prefix=/usr \
--mandir=/usr/share/man \
--disable-ldap \
--disable-ldaps \
--disable-manual \
--enable-ipv6 \
--enable-versioned-symbols \
--enable-threaded-resolver \
--with-gssapi \
--with-libssh2 \
--with-random=/dev/urandom \
--with-ca-bundle=/etc/ssl/certs/ca-certificates.crt
make
}
package() {
cd "$pkgname-$pkgver"
make DESTDIR="$pkgdir" install
make DESTDIR="$pkgdir" install -C scripts
# license
install -Dt "$pkgdir/usr/share/licenses/$pkgname" -m644 COPYING
}
| true
|
e81586c30961874ffca90cf0624d0b3ce38054b3
|
Shell
|
FollowMeDown/mailbsd
|
/functions/postgresql.sh
|
UTF-8
| 9,064
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# -------------------------------------------------------
# -------------------- PostgreSQL -----------------------
# -------------------------------------------------------
# NOTE: MailBSD will force all clients to send encrypted password
# after configuration completed and SQL data imported.
# Reference: functions/cleanup.sh, function cleanup_pgsql_force_password().
pgsql_initialize()
{
ECHO_DEBUG "Initialize PostgreSQL databases."
# Init db
if [ X"${DISTRO}" == X'OPENBSD' ]; then
mkdir -p ${PGSQL_DATA_DIR} >> ${INSTALL_LOG} 2>&1
chown ${SYS_USER_PGSQL}:${SYS_GROUP_PGSQL} ${PGSQL_DATA_DIR}
su - ${SYS_USER_PGSQL} -c "initdb -D ${PGSQL_DATA_DIR} -U ${SYS_USER_PGSQL} -A trust" >> ${INSTALL_LOG} 2>&1
fi
backup_file ${PGSQL_CONF_PG_HBA} ${PGSQL_CONF_POSTGRESQL}
if [ -f ${PGSQL_CONF_POSTGRESQL} ]; then
ECHO_DEBUG "Make sure PostgreSQL binds to local address: ${SQL_SERVER_ADDRESS}."
perl -pi -e 's#.*(listen_addresses.=.)(.).*#${1}${2}$ENV{LOCAL_ADDRESS}${2}#' ${PGSQL_CONF_POSTGRESQL}
ECHO_DEBUG "Set client_min_messages to ERROR."
perl -pi -e 's#.*(client_min_messages =).*#${1} error#' ${PGSQL_CONF_POSTGRESQL}
# SSL is enabled by default on Ubuntu.
[ X"${DISTRO}" == X'FREEBSD' ] && \
perl -pi -e 's/^#(ssl.=.)off(.*)/${1}on${2}/' ${PGSQL_CONF_POSTGRESQL}
fi
ECHO_DEBUG "Copy MailBSD SSL cert/key with strict permission."
backup_file ${PGSQL_DATA_DIR}/server.{crt,key}
rm -f ${PGSQL_DATA_DIR}/server.{crt,key} >> ${INSTALL_LOG} 2>&1
cp -f ${SSL_CERT_FILE} ${PGSQL_SSL_CERT} >> ${INSTALL_LOG} 2>&1
cp -f ${SSL_KEY_FILE} ${PGSQL_SSL_KEY} >> ${INSTALL_LOG} 2>&1
chown ${SYS_USER_PGSQL}:${SYS_GROUP_PGSQL} ${PGSQL_SSL_CERT} ${PGSQL_SSL_KEY} >> ${INSTALL_LOG} 2>&1
chmod 0600 ${PGSQL_SSL_CERT} ${PGSQL_SSL_KEY} >> ${INSTALL_LOG} 2>&1
ln -s ${PGSQL_SSL_CERT} ${PGSQL_DATA_DIR}/server.crt >> ${INSTALL_LOG} 2>&1
ln -s ${PGSQL_SSL_KEY} ${PGSQL_DATA_DIR}/server.key >> ${INSTALL_LOG} 2>&1
ECHO_DEBUG "Start PostgreSQL server and sleep 5 seconds for initialization"
service_control stop ${PGSQL_RC_SCRIPT_NAME} >> ${INSTALL_LOG} 2>&1
sleep 5
service_control start ${PGSQL_RC_SCRIPT_NAME} >> ${INSTALL_LOG} 2>&1
sleep 5
# Note: we must reset `postgres` password first, otherwise all connections
# will fail, because we cannot set/change passwords at all, so we're trying
# to connect with a wrong password.
ECHO_DEBUG "Setting password for PostgreSQL admin: (${PGSQL_ROOT_USER})."
su - ${SYS_USER_PGSQL} -c "psql -d template1" >> ${INSTALL_LOG} 2>&1 <<EOF
ALTER USER ${PGSQL_ROOT_USER} WITH ENCRYPTED PASSWORD '${PGSQL_ROOT_PASSWD}';
EOF
ECHO_DEBUG "Update pg_hba.conf to force local users to authenticate with md5."
perl -pi -e 's/^(local.*)/#${1}/g' ${PGSQL_CONF_PG_HBA}
perl -pi -e 's/^(host.*)/#${1}/g' ${PGSQL_CONF_PG_HBA}
if [ X"${PGSQL_VERSION}" == X'8' ]; then
echo "local all ${SYS_USER_PGSQL} ident" >> ${PGSQL_CONF_PG_HBA}
else
echo "local all ${SYS_USER_PGSQL} peer" >> ${PGSQL_CONF_PG_HBA}
fi
echo 'local all all md5' >> ${PGSQL_CONF_PG_HBA}
echo 'host all all 0.0.0.0/0 md5' >> ${PGSQL_CONF_PG_HBA}
ECHO_DEBUG "Restart PostgreSQL server and sleeping for 5 seconds."
service_control stop ${PGSQL_RC_SCRIPT_NAME} >> ${INSTALL_LOG} 2>&1
sleep 5
service_control start ${PGSQL_RC_SCRIPT_NAME} >> ${INSTALL_LOG} 2>&1
sleep 5
ECHO_DEBUG "Generate ${PGSQL_DOT_PGPASS}."
cat > ${PGSQL_DOT_PGPASS} <<EOF
*:*:*:${PGSQL_ROOT_USER}:${PGSQL_ROOT_PASSWD}
*:*:*:${VMAIL_DB_BIND_USER}:${VMAIL_DB_BIND_PASSWD}
*:*:*:${VMAIL_DB_ADMIN_USER}:${VMAIL_DB_ADMIN_PASSWD}
*:*:*:${IREDAPD_DB_USER}:${IREDAPD_DB_PASSWD}
*:*:*:${IREDADMIN_DB_USER}:${IREDADMIN_DB_PASSWD}
*:*:*:${SOGO_DB_USER}:${SOGO_DB_PASSWD}
*:*:*:${RCM_DB_USER}:${RCM_DB_PASSWD}
*:*:*:${AMAVISD_DB_USER}:${AMAVISD_DB_PASSWD}
EOF
chown ${SYS_USER_PGSQL}:${SYS_GROUP_PGSQL} ${PGSQL_DOT_PGPASS}
chmod 0600 ${PGSQL_DOT_PGPASS} >> ${INSTALL_LOG} 2>&1
cat >> ${TIP_FILE} <<EOF
PostgreSQL:
* Admin user: ${PGSQL_ROOT_USER}, Password: ${PGSQL_ROOT_PASSWD}
* Bind account (read-only):
- Name: ${VMAIL_DB_BIND_USER}, Password: ${VMAIL_DB_BIND_PASSWD}
* Vmail admin account (read-write):
- Name: ${VMAIL_DB_ADMIN_USER}, Password: ${VMAIL_DB_ADMIN_PASSWD}
* Database stored in: ${PGSQL_DATA_DIR}
* RC script: ${PGSQL_RC_SCRIPT}
* Config files:
* ${PGSQL_CONF_POSTGRESQL}
* ${PGSQL_CONF_PG_HBA}
* Log file: /var/log/postgresql/
* See also:
- ${PGSQL_INIT_SQL_SAMPLE}
- ${PGSQL_DOT_PGPASS}
EOF
echo 'export status_pgsql_initialize="DONE"' >> ${STATUS_FILE}
}
pgsql_import_vmail_users()
{
ECHO_DEBUG "Generate sample SQL templates."
cp -f ${SAMPLE_DIR}/postgresql/sql/init_vmail_db.sql ${PGSQL_DATA_DIR}/
cp -f ${SAMPLE_DIR}/mailbsd/mailbsd.pgsql ${PGSQL_DATA_DIR}/mailbsd.sql
cp -f ${SAMPLE_DIR}/postgresql/sql/add_first_domain_and_user.sql ${PGSQL_DATA_DIR}/
cp -f ${SAMPLE_DIR}/postgresql/sql/grant_permissions.sql ${PGSQL_DATA_DIR}/
perl -pi -e 's#PH_VMAIL_DB_NAME#$ENV{VMAIL_DB_NAME}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_VMAIL_DB_BIND_USER#$ENV{VMAIL_DB_BIND_USER}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_VMAIL_DB_BIND_PASSWD#$ENV{VMAIL_DB_BIND_PASSWD}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_VMAIL_DB_ADMIN_USER#$ENV{VMAIL_DB_ADMIN_USER}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_VMAIL_DB_ADMIN_PASSWD#$ENV{VMAIL_DB_ADMIN_PASSWD}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_DOMAIN_ADMIN_EMAIL#$ENV{DOMAIN_ADMIN_EMAIL}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_FIRST_DOMAIN#$ENV{FIRST_DOMAIN}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_TRANSPORT#$ENV{TRANSPORT}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_DOMAIN_ADMIN_PASSWD_HASH#$ENV{DOMAIN_ADMIN_PASSWD_HASH}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_DOMAIN_ADMIN_MAILDIR_HASH_PART#$ENV{DOMAIN_ADMIN_MAILDIR_HASH_PART}#g' ${PGSQL_DATA_DIR}/*.sql
perl -pi -e 's#PH_DOMAIN_ADMIN_NAME#$ENV{DOMAIN_ADMIN_NAME}#g' ${PGSQL_DATA_DIR}/*.sql
if [ X"${PGSQL_VERSION}" == X'8' ]; then
perl -pi -e 's#^(-- )(CREATE LANGUAGE plpgsql)#${2}#g' ${PGSQL_DATA_DIR}/mailbsd.sql
fi
perl -pi -e 's#^-- \\c#\\c#g' ${PGSQL_DATA_DIR}/mailbsd.sql
# Modify default SQL template, set storagebasedirectory, storagenode.
perl -pi -e 's#(.*storagebasedirectory.*DEFAULT..)(.*)#${1}$ENV{STORAGE_BASE_DIR}${2}#' ${PGSQL_DATA_DIR}/mailbsd.sql
perl -pi -e 's#(.*storagenode.*DEFAULT..)(.*)#${1}$ENV{STORAGE_NODE}${2}#' ${PGSQL_DATA_DIR}/mailbsd.sql
chmod 0755 ${PGSQL_DATA_DIR}/*sql
ECHO_DEBUG "Create roles (${VMAIL_DB_BIND_USER}, ${VMAIL_DB_ADMIN_USER}) and database: ${VMAIL_DB_NAME}."
su - ${SYS_USER_PGSQL} -c "psql -d template1 -f ${PGSQL_DATA_DIR}/init_vmail_db.sql" >> ${INSTALL_LOG} 2>&1
ECHO_DEBUG "Create tables in ${VMAIL_DB_NAME} database."
su - ${SYS_USER_PGSQL} -c "psql -d template1 -f ${PGSQL_DATA_DIR}/mailbsd.sql" >> ${INSTALL_LOG} 2>&1
ECHO_DEBUG "Grant permissions."
su - ${SYS_USER_PGSQL} -c "psql -d template1 -f ${PGSQL_DATA_DIR}/grant_permissions.sql" >> ${INSTALL_LOG} 2>&1
ECHO_DEBUG "Add first domain and postmaster@ user."
su - ${SYS_USER_PGSQL} -c "psql -U ${VMAIL_DB_ADMIN_USER} -d template1 -f ${PGSQL_DATA_DIR}/add_first_domain_and_user.sql" >> ${INSTALL_LOG} 2>&1
mv ${PGSQL_DATA_DIR}/*sql ${RUNTIME_DIR}
chmod 0700 ${RUNTIME_DIR}/*sql
cat >> ${TIP_FILE} <<EOF
SQL commands used to initialize database and import mail accounts:
- ${RUNTIME_DIR}/*.sql
EOF
echo 'export status_pgsql_import_vmail_users="DONE"' >> ${STATUS_FILE}
}
pgsql_cron_backup()
{
pgsql_backup_script="${BACKUP_DIR}/${BACKUP_SCRIPT_PGSQL_NAME}"
ECHO_INFO "Setup daily cron job to backup PostgreSQL databases with ${pgsql_backup_script}"
[ ! -d ${BACKUP_DIR} ] && mkdir -p ${BACKUP_DIR} >> ${INSTALL_LOG} 2>&1
backup_file ${pgsql_backup_script}
cp ${TOOLS_DIR}/${BACKUP_SCRIPT_PGSQL_NAME} ${pgsql_backup_script}
chown ${SYS_ROOT_USER}:${SYS_ROOT_GROUP} ${pgsql_backup_script}
chmod 0500 ${pgsql_backup_script}
perl -pi -e 's#^(export SYS_USER_PGSQL=).*#${1}"$ENV{SYS_USER_PGSQL}"#' ${pgsql_backup_script}
perl -pi -e 's#^(export BACKUP_ROOTDIR=).*#${1}"$ENV{BACKUP_DIR}"#' ${pgsql_backup_script}
# Add cron job
cat >> ${CRON_FILE_ROOT} <<EOF
# ${PROG_NAME}: Backup PostgreSQL databases on 03:01 AM
1 3 * * * ${SHELL_BASH} ${pgsql_backup_script}
EOF
echo 'export status_pgsql_cron_backup="DONE"' >> ${STATUS_FILE}
}
pgsql_setup()
{
ECHO_INFO "Configure PostgreSQL database server."
check_status_before_run pgsql_initialize
check_status_before_run pgsql_import_vmail_users
check_status_before_run pgsql_cron_backup
echo 'export status_pgsql_setup="DONE"' >> ${STATUS_FILE}
}
| true
|
d923f8fea65695ad4570b4cc06b2dda58229c9ad
|
Shell
|
mrseanryan/ubuntu-scripts
|
/extra-configure-ssh-server.sh
|
UTF-8
| 725
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
echo "!this script needs to run with sudo!"
CONFIG=/etc/ssh/sshd_config
echo "# BEGIN CUSTOM CONFIG" >> $CONFIG
echo "# yes - for VC remote" >> $CONFIG
echo AllowTcpForwarding yes >> $CONFIG
echo X11Forwarding no >> $CONFIG
# This script run under sudo, so actually uses root for $USER, which is not useful
echo "# TODO - AllowUsers $USER" >> $CONFIG
echo ! TODO - adjust AllowUsers in $CONFIG !
echo Banner /etc/issue.net >> $CONFIG
echo PubkeyAuthentication yes >> $CONFIG
echo RSAAuthentication yes >> $CONFIG
# cat $CONFIG
cat config/login-banner.txt >> /etc/issue.net
# reduce permissions to match open-ssh standard:
chmod go-w ~/
chmod 700 ~/.ssh
chmod 600 ~/.ssh/authorized_keys
sudo systemctl restart ssh
| true
|
0089591548a2d9c4d4c55847b583396b49b0eb6f
|
Shell
|
rickard-von-essen/dotfiles
|
/.config/yadm/bootstrap
|
UTF-8
| 2,242
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o nounset
system_type=$(uname -s)
export system_type
export PATH=/usr/local/sbin:/usr/sbin:/sbin:/usr/local/bin:/usr/bin:/bin
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
# -- OS / Distribution specific bootstrap --
if [ "$system_type" = "Darwin" ]; then
"$DIR/bootstrap.darwin"
fi
if [ "$system_type" = "Linux" ]; then
dist=$(grep ^ID= /etc/os-release | cut -d= -f2)
if [ "$dist" = "fedora" ]; then
"$DIR/bootstrap.fedora"
fi
if [ "$dist" = "debian" ]; then
"$DIR/bootstrap.debian"
fi
# "$DIR/bootstrap.linux"
fi
# -- Generic bootstrap --
mkdir -p "$HOME/local"
mkdir -p "$HOME/bin"
mkdir -p "$HOME/go"
# install homebrew if it's missing
if ! command -v brew >/dev/null 2>&1; then
echo "Installing homebrew"
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
. ~/.bash_profile
if [ -f "$HOME/.Brewfile" ]; then
echo "Updating homebrew bundle"
brew bundle --global
fi
echo "Installing Tmux Plugin Manager"
if [ ! -d "$HOME/.config/tmux/plugins/tpm" ]; then
mkdir -p "$HOME/.config/tmux/plugins"
git clone https://github.com/tmux-plugins/tpm "$HOME/.config/tmux/plugins/tpm"
fi
echo "Installing Kitty Themse"
if [ ! -d "$HOME/.config/kitty/kitty-themes" ]; then
mkdir -p "$HOME/.config/kitty"
git clone git@github.com:dexpota/kitty-themes.git "$HOME/.config/kitty/kitty-themes"
fi
echo "Installing Vim plug and Vim plugins"
if [ ! -d "$HOME/.vim/autoload/plug.vim" ]; then
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
vim +PluginInstall
fi
echo "Installing fisherman"
if [ ! -f "$HOME/.config/fish/functions/fisher.fish" ]; then
curl -Lo "$HOME/.config/fish/functions/fisher.fish" --create-dirs https://git.io/fisher
fish -c 'fisher'
fi
echo "Python 3 Modules"
cat $HOME/.pkg.py3 | xargs pip3 install --user
echo "Installing ASDF VM"
git clone https://github.com/asdf-vm/asdf.git $HOME/.asdf --branch v0.10.2
source $HOME/.asdf/asdf.fish
cat $HOME/.asdf-plugins | xargs -I= asdf plugin add =
echo "Updating the yadm repo origin URL"
yadm remote set-url origin "git@github.com:rickard-von-essen/dotfiles.git"
| true
|
7c30c51ffdf7532975ed3964d489a96957aae577
|
Shell
|
excalibur44/vps-shell-script
|
/install-vlmcsd.sh
|
UTF-8
| 1,082
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
#
# install-vlmcsd.sh
# A shell script for installing vlmcsd(KMS Emulator in C).
# Tested system:
# Debian 8
# Usage:
# bash <(curl -L -s https://raw.githubusercontent.com/excalibur44/vps-shell-script/master/install-vlmcsd.sh)
URL="https://github.com/Wind4/vlmcsd/releases/latest/download/binaries.tar.gz"
systemctl disable vlmcsd.service
systemctl stop vlmcsd.service
rm /usr/bin/vlmcsd /etc/systemd/system/vlmcsd.service
systemctl daemon-reload
wget -O vlmcsd.tar.gz $URL
tar zxf vlmcsd.tar.gz
cp binaries/Linux/intel/static/vlmcsd-x64-musl-static /usr/bin/vlmcsd
rm -rf vlmcsd.tar.gz binaries/
cat << EOF > /etc/systemd/system/vlmcsd.service
[Unit]
Description=KMS Emulator in C
After=network.target
Wants=network.target
[Service]
Type=forking
PIDFile=/var/run/vlmcsd.pid
ExecStart=/usr/bin/vlmcsd -p /var/run/vlmcsd.pid
ExecStop=/bin/kill -HUP $MAINPID
PrivateTmp=True
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl enable vlmcsd.service
systemctl start vlmcsd.service
systemctl daemon-reload
sleep 2s
systemctl status vlmcsd.service
| true
|
04fc91988dac16268c41c10b8a73e0792652fc18
|
Shell
|
CogRob/intel-aero-notes
|
/yocto_compile/22-install-extra.sh
|
UTF-8
| 777
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# http://stackoverflow.com/questions/59895/can-a-bash-script-tell-which-directory-it-is-stored-in
CURRENT_SCRIPT="${BASH_SOURCE[0]}"
while [ -h "$CURRENT_SCRIPT" ]; do # resolve $CURRENT_SCRIPT until the file is no longer a symlink
CURRENT_SCRIPT_PATH="$( cd -P "$( dirname "$CURRENT_SCRIPT" )" && pwd )"
CURRENT_SCRIPT="$(readlink "$CURRENT_SCRIPT")"
# if $CURRENT_SCRIPT was a relative symlink, we need to resolve it relative to the path where the symlink file was located
[[ $CURRENT_SCRIPT != /* ]] && CURRENT_SCRIPT="$CURRENT_SCRIPT_PATH/$CURRENT_SCRIPT"
done
CURRENT_SCRIPT_PATH="$( cd -P "$( dirname "$CURRENT_SCRIPT" )" && pwd )"
cd $CURRENT_SCRIPT_PATH/intel_aero
echo 'CORE_IMAGE_EXTRA_INSTALL += "openvpn vim zsh"' >> poky/build/conf/auto.conf
| true
|
4219392f01dd074834e6b439da70d278143c5472
|
Shell
|
danielpalstra/dev-host
|
/install.sh
|
UTF-8
| 749
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
REPO_URL=https://github.com/danielpalstra/dev-host.git
set -x
echo "Starting installation of all kind of crap to get a development host"
echo "Installing dependencies"
sudo apt update -y && \
sudo apt install -y git
# move to tmp to install from
cd /tmp
# First clone te complete git repo
git clone $REPO_URL /tmp/dev-host && \
cd /tmp/dev-host
# install ansible
echo "Installing latest ansible"
sudo apt-get install -y software-properties-common
sudo apt-add-repository -y ppa:ansible/ansible
sudo apt-get update -y
sudo apt-get install -y ansible
# install required playbooks
ansible-galaxy install -r requirements.yml
echo "Running ansible playbook"
# run playbook
ansible-playbook -i hosts dev-host.yml
echo "Finished..."
| true
|
264a978c13570726015719458f6b16cf632df7e2
|
Shell
|
Verduranti/ParallelEdison
|
/connectPhone.sh
|
UTF-8
| 665
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh
#Connect to Android
#Start up video feed
#This assumes that activateWifi.sh was successfully run
#MotoX's MAC address
#14:1a:a3:63:d4:d1 -> bluetooth?
#14:1a:a3:63:d4:d2 -> normal wifi
#14:1a:a3:63:d4:d3 -> Wifi direct
# Galaxy S3 MAC
#8a:32:9b:03:0b:6b -> Wifi Direct
#Join after being invited.
#wpa_cli -ip2p-dev-wlan0 p2p_connect 14:1a:a3:63:d4:d3 pbc join
#wpa_cli -ip2p-dev-wlan0 p2p_connect 14:1a:a3:63:d4:d3 pbc persistent \
#go_intent=10
stdout_log="/var/log/edicam.log"
stderr_log="/var/log/edicam.err"
#Start the web server
cd ../edi-cam/web/server
nohup node server.js >> "$stdout_log" 2>> "$stderr_log" &
echo $! > ~/server.pid
exit 0
| true
|
0d9ffd3bc1dfdd447ae950fef94b2c2c7e861ef0
|
Shell
|
snorrebrandstadmoen/haproxy-workshop
|
/docker_tail.sh
|
UTF-8
| 343
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
die () {
echo >&2 "$@"
echo
exit 1
}
[ "$#" -eq 2 ] || die "Eksempel: docker_tail.sh 9ad 4de"
tmux -2 new-session -n 'Logger' "docker logs -f $1; read -p 'Ferdig'"
split-window "docker logs -f $2; read -p 'Ferdig'"
select-layout even-vertical
set-window-option synchronize-panes on
select-window
attach-session
| true
|
8709ff0fce7c84614b2817866d2605456e9857a1
|
Shell
|
mterron/elasticsearch-autopilot
|
/bin/prestart.sh
|
UTF-8
| 5,223
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/ash
log() {
printf "[INFO] preStart: %s\n" "$@"
}
loge() {
printf "[ERR] preStart: %s\n" "$@"
}
# Update configuration file
update_ES_configuration() {
REPLACEMENT_CLUSTER="s/^#.*cluster\.name:.*/cluster.name: ${ES_CLUSTER_NAME}/"
sed -i "${REPLACEMENT_CLUSTER}" /usr/share/elasticsearch/config/elasticsearch.yml
REPLACEMENT_NAME="s/^#.*node\.name:.*/node.name: ${HOSTNAME}/"
sed -i "${REPLACEMENT_NAME}" /usr/share/elasticsearch/config/elasticsearch.yml
#REPLACEMENT_NODE_MASTER="s/^#.*node\.master:.*/node.master: ${ES_NODE_MASTER}/"
#sed -i "${REPLACEMENT_NODE_MASTER}" /usr/share/elasticsearch/config/elasticsearch.yml
printf "node.master: ${ES_NODE_MASTER}\n" >> /usr/share/elasticsearch/config/elasticsearch.yml
#REPLACEMENT_NODE_DATA="s/^#.*node\.data:.*/node.data: ${ES_NODE_DATA}/"
#sed -i "${REPLACEMENT_NODE_DATA}" /usr/share/elasticsearch/config/elasticsearch.yml
printf "node.data: ${ES_NODE_DATA}\n" >> /usr/share/elasticsearch/config/elasticsearch.yml
REPLACEMENT_PATH_DATA='s/^#.*path\.data:.*/path.data: \/elasticsearch\/data/'
sed -i "${REPLACEMENT_PATH_DATA}" /usr/share/elasticsearch/config/elasticsearch.yml
REPLACEMENT_PATH_LOGS='s/^#.*path\.logs:.*/path.logs: \/elasticsearch\/log/'
sed -i "${REPLACEMENT_PATH_LOGS}" /usr/share/elasticsearch/config/elasticsearch.yml
if [ "$ES_ENVIRONMENT" = "prod" ]; then
REPLACEMENT_BOOTSTRAP_MLOCKALL='s/^#.*bootstrap\.memory_lock:\s*true/bootstrap.memory_lock: true/'
sed -i "${REPLACEMENT_BOOTSTRAP_MLOCKALL}" /usr/share/elasticsearch/config/elasticsearch.yml
fi
REPLACEMENT_NETWORK_HOST='s/^#.*network\.host:.*/network.host: _eth0:ipv4_/'
sed -i "${REPLACEMENT_NETWORK_HOST}" /usr/share/elasticsearch/config/elasticsearch.yml
NUM_MASTERS=$(echo $MASTER| jq -r -e 'unique | length')
if [ "$ES_NODE_MASTER" = 'true' ]; then
NEW_QUORUM=$(( ((NUM_MASTERS+1)/2)+1 ))
else
NEW_QUORUM=$(( (NUM_MASTERS/2)+1 ))
fi
QUORUM=$(curl -E /etc/tls/client_certificate.crt -Ls --fail "${CONSUL_HTTP_ADDR}/v1/health/service/elasticsearch-master"|jq -r -e '[.[].Service.Address] | unique | length // 1')
if [ "$NEW_QUORUM" -gt "${QUORUM}" ]; then
QUORUM="$NEW_QUORUM"
fi
REPLACEMENT_ZEN_MIN_NODES="s/^#.*discovery\.zen\.minimum_master_nodes:.*/discovery.zen.minimum_master_nodes: ${QUORUM}/"
sed -i "${REPLACEMENT_ZEN_MIN_NODES}" /usr/share/elasticsearch/config/elasticsearch.yml
#REPLACEMENT_ZEN_MCAST='s/^#.*discovery\.zen\.ping\.multicast\.enabled:.*/discovery.zen.ping.multicast.enabled: false/'
#sed -i "${REPLACEMENT_ZEN_MCAST}" /usr/share/elasticsearch/config/elasticsearch.yml
MASTER=$(echo $MASTER | jq -e -r -c 'unique | [.[]+":9300"]')
REPLACEMENT_ZEN_UNICAST_HOSTS="s/^#.*discovery\.zen\.ping\.unicast\.hosts.*/discovery.zen.ping.unicast.hosts: ${MASTER}/"
sed -i "${REPLACEMENT_ZEN_UNICAST_HOSTS}" /usr/share/elasticsearch/config/elasticsearch.yml
# printf "discovery.zen.ping.retries: 6\n" >> /usr/share/elasticsearch/config/elasticsearch.yml
}
# Get the list of ES master nodes from Consul
get_ES_Master() {
MASTER=$(curl -E /etc/tls/client_certificate.crt -Ls --fail "${CONSUL_HTTP_ADDR}/v1/health/service/elasticsearch-master"| jq -r -e -c '[.[].Service.Address]')
if [[ $MASTER != "[]" ]] && [[ -n $MASTER ]]; then
log "Master found ${MASTER}, joining cluster."
update_ES_configuration
exit 0
else
unset MASTER
return 1
fi
}
#------------------------------------------------------------------------------
# Check that CONSUL_HTTP_ADDR environment variable exists
if [[ -z ${CONSUL_HTTP_ADDR} ]]; then
loge "Missing CONSUL_HTTP_ADDR environment variable"
exit 1
fi
# Wait up to 2 minutes for Consul to be available
log "Waiting for Consul availability..."
n=0
until [ $n -ge 120 ]||(curl -E /etc/tls/client_certificate.crt -fsL --connect-timeout 1 "${CONSUL_HTTP_ADDR}/v1/status/leader" &> /dev/null); do
sleep 5s
n=$((n+5))
done
if [ $n -ge 120 ]; then
loge "Consul unavailable, aborting"
exit 1
fi
log "Consul is now available [${n}s], starting up Elasticsearch"
get_ES_Master
# Data-only or client nodes can only wait until there's a master available
if [ "$ES_NODE_MASTER" = false ]; then
log "Client or Data only node, waiting for master"
until get_ES_Master; do
sleep 10s
done
else
# A master+data node will retry for 5 minutes to see if there's
# another master in the cluster in the process of starting up. But we
# bail out if we exceed the retries and just bootstrap the cluster
if [ "$ES_NODE_DATA" = true ]; then
log "Master+Data node, waiting up to 5m for master"
n=0
until [ $n -ge 300 ]; do
until (curl -E /etc/tls/client_certificate.crt -Ls --fail "${CONSUL_HTTP_ADDR}/v1/health/service/elasticsearch-master?passing" | jq -r -e '.[0].Service.Address' >/dev/null); do
sleep 5s
n=$((n+5))
done
curl -NLsS --fail -o /dev/null "http://elasticsearch-master.service.consul:9200/_cluster/health?timeout=5s"
get_ES_Master
n=$((n+5))
done
log "Master not found. Proceed as master"
fi
# for a master-only node (or master+data node that has exceeded the
# retry attempts), we'll assume this is the first master and bootstrap
# the cluster
log "MASTER node, bootstrapping..."
MASTER="[]"
update_ES_configuration
fi
| true
|
8802dffef3ed08b55e00ac966d049687b2de508b
|
Shell
|
cyber-dyne/pjkit.sh
|
/lib/install.sh
|
UTF-8
| 378
| 3.71875
| 4
|
[] |
no_license
|
from . import env
: ${InstallBinDir:=$SystemBinDir}
install_bin()
{(
local bin="$1"
local bin_name="${2:-$(basename "$bin")}"
mkdir -p "$InstallBinDir"
ln -sf "$bin" "$InstallBinDir/$bin_name"
)}
install_bins()
{
find "$1" -type l -o -type f -maxdepth 1 | while IFS= read -r bin; do
install_bin "$bin"
done
}
| true
|
ead18ea98057609ed74fe19388a2a70babdd104d
|
Shell
|
celiaxiao/cyberbrick
|
/docker-base/mongodb/create_unique_index.sh
|
UTF-8
| 670
| 3.015625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
source .env
export -a dbnames=${DB_COLLECTION_NAME}
#create user
docker exec -i mongodb mongo -u ${MONGO_INITDB_ROOT_USERNAME} -p ${MONGO_INITDB_ROOT_PASSWORD} ${MONGO_INITDB_DATABASE} << EOF
db.createUser( {user: "${ADMIN_USERNAME}",pwd: "${ADMIN_PASSWORD}",roles: [ { role: "userAdminAnyDatabase", db: "${MONGO_INITDB_DATABASE}" } ]});
EOF
#create unique index
for coll in "${dbnames[@]}"
do
echo "creating index for ${coll}"
docker exec -i mongodb mongo -u ${MONGO_INITDB_ROOT_USERNAME} -p ${MONGO_INITDB_ROOT_PASSWORD} ${MONGO_INITDB_DATABASE} << EOF
use ${DB_NAME};
db.${coll}.createIndex({elementId:1,date:1},{unique:true});
EOF
done
| true
|
07c56a1db6be9e5838e2f4f7aab42685b73ff8af
|
Shell
|
FestiveAkp/website
|
/deploy.sh
|
UTF-8
| 608
| 3.34375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
# abort on error
set -e
# build static assets
rm -rf dist
npm run build
# write and stage files
cd dist
git init
echo "akash.dev" > CNAME
cat << EOL > README.md
# festiveakp.github.io
This repo holds the static build output for my personal website, which is deployed through GitHub Pages.
You can find the actual source repo [here](https://github.com/FestiveAkp/website).
EOL
git add -A
git commit -m "deploy: $(date +"%F %r")"
# push to repo festiveakp.github.io
git push -f "https://github.com/FestiveAkp/festiveakp.github.io.git" master
cd ..
echo ""
echo ">> Deploy successful"
| true
|
e57e0e59f46d1e71b4bd2aa723aabcbb3b24365f
|
Shell
|
staswiner/bash_scripts
|
/CompleteGenerator/completion.bash
|
UTF-8
| 989
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
_add_option ()
{
local option=$1;
local current="${COMP_WORDS[COMP_CWORD]}";
local previous="${COMP_WORDS[COMP_CWORD-1]}";
COMPREPLY+=($(compgen -W "$option" -- $current))
}
_add_multi_option ()
{
local option=$1;
shift;
local answers="$@";
local current="${COMP_WORDS[COMP_CWORD]}";
local previous="${COMP_WORDS[COMP_CWORD-1]}";
if [[ $previous = $option ]]; then
COMPREPLY=($(compgen -W "$answers" -- $current));
return 1;
fi;
COMPREPLY+=($(compgen -W "$option" -- $current));
return 0
}
_compose_options()
{
COMPREPLY=();
local current="${COMP_WORDS[COMP_CWORD]}";
projects="kafka kafka-test"
_add_multi_option "--project" $projects
[[ $? = 1 ]] && return 0
_add_multi_option "--lama" "very much"
[[ $? = 1 ]] && return 0
master_ips="zose-master01 zose-master02"
_add_multi_option "--master-ip" $master_ips
[[ $? = 1 ]] && return 0
#_add_option "-h"
}
complete -F _compose_options ./script.sh
| true
|
536cb998f0447a3275b1e2d390bb80feec74f825
|
Shell
|
Redcarred2/PiBoy-Configurator2
|
/menus/osd_configuration.sh
|
UTF-8
| 5,262
| 3.3125
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
#####################################################################
#Project : Retropie_PiBoy_Configurator
#####################################################################
# Global vars
PIBOYCONF_ROOT_FOLDER="$HOME/PiBoy-Configurator"
source $PIBOYCONF_ROOT_FOLDER/common.sh >/dev/null 2>&1
THROTTLE="Undefined"
BLUETOOTH="Undefined"
WIFI="Undefined"
BATTERY="Undefined"
TEMPERATURE="Undefined"
function update_variables() {
THROTTLE=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py throttle comment get`
BLUETOOTH=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py bluetooth comment get`
WIFI=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py wifi comment get`
BATTERY=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py battery comment get`
TEMPERATURE=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py temperature comment get`
VOLUME=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py volumeicon comment get`
LOAD=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py load comment get`
VOLTAGE=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py voltage comment get`
CURRENT=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py current comment get`
CPU=`sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py cpu comment get`
#getVolumeImagesRessourcesName
}
function getVolumeImagesRessourcesName() {
for entry in "$OSD_RESSOURCES_FOLDER"/*
do
if [[ $entry == *"$RESSOURCE_VOLUME_IMAGE_CLR"* ]]; then
if [[ $entry == *"__"* ]]; then
VOLUME="no"
else
VOLUME="yes"
fi
fi
done
}
function manage_choice() {
#local choice=$1
if [ "$choice" == "show_throttle" ]; then
if [ "$THROTTLE" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py throttle comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py throttle comment set yes
fi
elif [ "$choice" == "show_bt" ]; then
if [ "$BLUETOOTH" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py bluetooth comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py bluetooth comment set yes
fi
elif [ "$choice" == "show_wifi" ]; then
if [ "$WIFI" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py wifi comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py wifi comment set yes
fi
elif [ "$choice" == "show_battery" ]; then
if [ "$BATTERY" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py battery comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py battery comment set yes
fi
elif [ "$choice" == "show_temperature" ]; then
if [ "$TEMPERATURE" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py temperature comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py temperature comment set yes
fi
elif [ "$choice" == "show_volume" ]; then
if [ "$VOLUME" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py volumeicon comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py volumeicon comment set yes
fi
elif [ "$choice" == "show_load" ]; then
if [ "$LOAD" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py load comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py load comment set yes
fi
elif [ "$choice" == "show_voltage" ]; then
if [ "$VOLTAGE" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py voltage comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py voltage comment set yes
fi
elif [ "$choice" == "show_current" ]; then
if [ "$CURRENT" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py current comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py current comment set yes
fi
elif [ "$choice" == "show_cpu" ]; then
if [ "$CPU" == "yes" ]; then
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py cpu comment set no
else
sudo python $PIBOYCONF_ROOT_FOLDER/readers/osd_config_file.py cpu comment set yes
fi
fi
update_variables
}
function main_menu() {
local choice
while true; do
choice=$(dialog --backtitle "PiBoy Configurator v.$piboyconf_version" --title "OSD Configuration" \
--ok-label "Select" --cancel-label "Back" --no-tags \
--menu "Settings" 25 75 20 \
"show_throttle" "1 Show throttle : $THROTTLE" \
"show_bt" "2 Show Bluetooth : $BLUETOOTH" \
"show_wifi" "3 Show Wifi : $WIFI" \
"show_battery" "4 Show Battery : $BATTERY" \
"show_temperature" "5 Show Temperature : $TEMPERATURE" \
"show_volume" "6 Show Volume : $VOLUME" \
"show_load" "7 Show Load : $LOAD" \
"show_voltage" "8 Show Voltage : $VOLTAGE" \
"show_current" "9 Show Current : $CURRENT" \
"show_cpu" "10 Show CPU : $CPU" \
2>&1 > /dev/tty)
opt=$?
[ $opt -eq 1 ] && exit
manage_choice choice
done
}
update_variables
main_menu
| true
|
c06bda6e8a27a2cc97151de59d5ada30299fcffa
|
Shell
|
xianxiaoxian/letkf-speedy
|
/common/timeinc.sh
|
UTF-8
| 1,098
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
function timeinc6hr
{
if test $# -ne 4
then
echo "USAGE: $0 yyyy mm dd hh"
return 1
fi
local YYYY
local MM
local DD
local HH
local ITMP
YYYY=$1
MM=$2
DD=$3
HH=$4
# Increment date
HH=`expr $HH + 6`
if test $HH -lt 10
then
HH=0$HH
elif test $HH -gt 23
then
HH=00
DD=`expr $DD + 1`
if test $DD -lt 10
then
DD=0$DD
elif test $DD -eq 29
then
ITMP=`expr $YYYY % 4` || test 1 -eq 1
if test $MM -eq 02 -a $ITMP -ne 0
then
DD=01
MM=03
fi
elif test $DD -eq 30
then
ITMP=`expr $YYYY % 4` || test 1 -eq 1
if test $MM -eq 02 -a $ITMP -eq 0
then
DD=01
MM=03
fi
elif test $DD -eq 31
then
if test $MM -eq 04 -o $MM -eq 06
then
DD=01
MM=`expr $MM + 1`
MM=0$MM
elif test $MM -eq 09 -o $MM -eq 11
then
DD=01
MM=`expr $MM + 1`
fi
elif test $DD -eq 32
then
DD=01
MM=`expr $MM + 1`
if test $MM -lt 10
then
MM=0$MM
fi
fi
if test $MM -gt 12
then
MM=01
YYYY=`expr $YYYY + 1`
fi
fi
#
# Outputs
#
echo $YYYY$MM$DD$HH
return 0
}
| true
|
4e2e615b6022fdacc25f6e351ddebe66dce02f7f
|
Shell
|
tomekaq/CORSIKAscripts
|
/divideByID.sh
|
UTF-8
| 720
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# Autor: Tomasz Antonik
# Program do rozdzielania cząsstek wg ich ID
# Program wczytuje plik tekstowy z danymi i następnie rozdziela
# dane do mniejszych plików według podanego przez użytkownia parametru
# Program read data from output of script txt
# and divide data according by user parameter
i=0
echo "Enter the name of file read the data:"
read readFile
echo "Enter ID of particle to separate:"
read ID
input=$readFile
patternid='id: ([0-9]+)'
echo "Enter the name of file to which save the data:"
read fileID
> $fileID
while IFS= read -r line
do
[[ $line =~ $patternid ]]
if [[ ${BASH_REMATCH[0]:3} -eq ID ]]
then
((i = i+1))
echo $line >> $fileID
fi
done < "$readFile"
echo "$i"
| true
|
7bcc44b43ea1c90f9fdba138adc12853399cf581
|
Shell
|
oceanscan/imcjava
|
/mvn_update.sh
|
UTF-8
| 278
| 2.546875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
gradle publishToMavenLocal
version=`gradle printVersion | sed -n 3p`
echo ""
echo "Publishing ICMJava v$version to omst's Maven repository..."
sleep 1
scp -r ~/.m2/repository/pt/lsts/imcjava/$version omst@192.168.61.1:/srv/data/www/maven/pt/lsts/imcjava/
| true
|
b3c83539156288dd75bdd714f760854698c668b3
|
Shell
|
jaswanth-gorripati/FileMetaStore
|
/hlf-network/scripts/fileHashExec.sh
|
UTF-8
| 6,435
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
source scripts/utils.sh
FABRIC_CFG_PATH=$PWD/../config/
. scripts/envVar.sh
CHANNEL_NAME="filesmetastore"
CC_NAME="metarecords"
successInvokeTx() {
setGlobals 1
parsePeerConnectionParameters 1 2
res=$?
verifyResult $res "Invoke transaction failed on channel '$CHANNEL_NAME' due to uneven number of peer and org parameters "
#set -x
fcn_call='{"function":"StoreFileMetadata","Args":["69dde88229fdcb24c05a10e2be2c1e54fb6ed9b36dab733de997d36c63576c3f","{\"DPIHeight\":72,\"Depth\":8,\"ColorModel\":\"RGB\",\"DPIWidth\":72,\"PixelHeight\":800,\"PixelWidth\":532,\"JFIF\":{\"DensityUnit\":1,\"YDensity\":2,\"JFIFVersion\":[1,1],\"XDensity\":72},ProfileName\":\"sRGBIEC61966-2.1\"}"]}'
echo -e ""
echo -e "Scenario 1 : ${C_YELLOW}Valid Invoke Transactions${C_RESET}"
echo ""
echo -e " ${C_BLUE}Function${C_RESET} : 'StoreFileMetadata'"
echo ""
echo -e ' args : ["69dde88229fdcb24c05a10e2be2c1e54fb6ed9b36dab733de997d36c63576c3f","{\"DPIHeight\":72,\"Depth\":8,\"ColorModel\":\"RGB\",\"DPIWidth\":72,\"PixelHeight\":800,\"PixelWidth\":532,\"JFIF\":{\"DensityUnit\":1,\"YDensity\":2,\"JFIFVersion\":[1,1],\"XDensity\":72},ProfileName\":\"sRGBIEC61966-2.1\"}"]'
echo ""
echo -e " ${C_BLUE}Command${C_RESET} : "
set -x
peer chaincode invoke -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile $ORDERER_CA -C $CHANNEL_NAME -n ${CC_NAME} $PEER_CONN_PARMS -c ${fcn_call} >&log.txt
{ set +x; } 2>/dev/null
echo ""
echo ""
echo -e " ${C_BLUE}Output${C_RESET} : ${C_GREEN}$(cat log.txt)${C_RESET}"
}
DuplicateInvokeTx() {
# setGlobals 1
# parsePeerConnectionParameters 1 2
#set -x
fcn_call='{"function":"StoreFileMetadata","Args":["69dde88229fdcb24c05a10e2be2c1e54fb6ed9b36dab733de997d36c63576c3f","{\"DPIHeight\":72,\"Depth\":8,\"ColorModel\":\"RGB\",\"DPIWidth\":72,\"PixelHeight\":800,\"PixelWidth\":532,\"JFIF\":{\"DensityUnit\":1,\"YDensity\":2,\"JFIFVersion\":[1,1],\"XDensity\":72},ProfileName\":\"sRGBIEC61966-2.1\"}"]}'
echo -e ""
echo -e "Scenario 3 : ${C_YELLOW}Duplicate invoke Transactions -- should result in error${C_RESET}"
echo ""
echo -e " ${C_BLUE}Function${C_RESET} : 'StoreFileMetadata'"
echo ""
echo -e ' args : ["69dde88229fdcb24c05a10e2be2c1e54fb6ed9b36dab733de997d36c63576c3f","{\"DPIHeight\":72,\"Depth\":8,\"ColorModel\":\"RGB\",\"DPIWidth\":72,\"PixelHeight\":800,\"PixelWidth\":532,\"JFIF\":{\"DensityUnit\":1,\"YDensity\":2,\"JFIFVersion\":[1,1],\"XDensity\":72},ProfileName\":\"sRGBIEC61966-2.1\"}"]'
echo ""
echo -e " ${C_BLUE}Command${C_RESET} : "
set -x
peer chaincode invoke -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile $ORDERER_CA -C $CHANNEL_NAME -n ${CC_NAME} $PEER_CONN_PARMS -c ${fcn_call} >&log.txt
{ set +x; } 2>/dev/null
echo ""
echo ""
echo -e " ${C_BLUE}Output${C_RESET} : ${C_GREEN}$(cat log.txt)${C_RESET}"
}
successChaincodeQuery() {
echo -e ""
echo -e "Scenario 2 : ${C_YELLOW}Query to get File Metadata Transactions${C_RESET}"
echo ""
echo -e " ${C_BLUE}Function${C_RESET} : 'GetFileMetadata'"
echo ""
echo -e " ${C_BLUE}args${C_RESET} : ['69dde88229fdcb24c05a10e2be2c1e54fb6ed9b36dab733de997d36c63576c3f']"
echo ""
echo -e " ${C_BLUE}Command${C_RESET} : "
set -x
peer chaincode query -C $CHANNEL_NAME -n ${CC_NAME} -c '{"Args":["GetFileMetadata","69dde88229fdcb24c05a10e2be2c1e54fb6ed9b36dab733de997d36c63576c3f"]}' >&log.txt
{ set +x; } 2>/dev/null
echo ""
echo ""
echo -e " ${C_BLUE}Output${C_RESET} : ${C_GREEN}$(cat log.txt)${C_RESET}"
}
failedChaincodeQuery() {
echo -e ""
echo -e "Scenario 4 : ${C_YELLOW}Invalid Hash Query -- should result in error${C_RESET}"
echo ""
echo -e " ${C_BLUE}Function${C_RESET} : 'GetFileMetadata'"
echo ""
echo -e " ${C_BLUE}args${C_RESET} : ['74cde88229fdcb24c05a10e2be2c1e54fb6ed9b36dab733de997d36c63576c3f']"
echo ""
echo -e " ${C_BLUE}Command${C_RESET} : "
set -x
peer chaincode query -C $CHANNEL_NAME -n ${CC_NAME} -c '{"Args":["GetFileMetadata","74cde88229fdcb24c05a10e2be2c1e54fb6ed9b36dab733de997d36c63576c3f"]}' >&log.txt
{ set +x; } 2>/dev/null
echo ""
echo ""
echo -e " ${C_BLUE}Output${C_RESET} : ${C_GREEN}$(cat log.txt)${C_RESET}"
}
echo "-------------------------------------------------------------"
echo "--------------- Blockchain Transactions ---------------------"
echo "-------------------------------------------------------------"
successInvokeTx
# echo "-------------------------------------------------------------"
# echo "-------------------------------------------------------------"
echo ""
echo ""
echo "------------------------------------------------------------------------------"
sleep 2
successChaincodeQuery
echo ""
echo ""
echo "------------------------------------------------------------------------------"
sleep 2
DuplicateInvokeTx
echo ""
echo ""
echo "------------------------------------------------------------------------------"
sleep 2
failedChaincodeQuery
echo ""
echo ""
echo "------------------------------ END --------------------------------------------"
# echo "-------------------------------------------------------------"
# echo "----- Successfull Chaincode Query to get Has Metadata -------"
# echo "-------------------------------------------------------------"
# successChaincodeQuery
# echo "-------------------------------------------------------------"
# echo "-------------------------------------------------------------"
# echo ""
# echo ""
# sleep 1
# echo "-------------------------------------------------------------"
# echo "--------- Duplicate Hash Transaction invokation -------------"
# echo "-------------------------------------------------------------"
# DuplicateInvokeTx
# echo "-------------------------------------------------------------"
# echo "-------------------------------------------------------------"
# sleep 2
# echo "-------------------------------------------------------------"
# echo "--------- Failed Chaincode Query ----------------"
# echo "-------------------------------------------------------------"
# failedChaincodeQuery
# echo "-------------------------------------------------------------"
# echo "------------------- END ------------------------------------"
| true
|
60619ff48cfec370e130a30e66231cd339fe627e
|
Shell
|
mmmarq/raspberry_setup
|
/bin/motion_uploader.sh
|
UTF-8
| 1,133
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/sh
# REFERENCE
# http://acd-cli.readthedocs.io/en/latest/usage.html
# http://xmodulo.com/access-amazon-cloud-drive-command-line-linux.html
TEMP_FILE="/tmp/motion_uploader.tmp"
trap cleanup 1 2 3 6 9 15
cleanup()
{
echo "Caught Signal ... cleaning up."
rm -f $TEMP_FILE
echo "Done cleanup ... quitting."
exit 1
}
if [ -f $TEMP_FILE ]; then
echo "Motion uploader lock file found!"
CURR_AGE=`date +%s`
LOCK_AGE=`stat -L --format %Y $TEMP_FILE`
AGE=`expr $CURR_AGE - $LOCK_AGE`
if [ $AGE -lt 1800 ]; then
echo "Motion uploader running. Do no run another task in parallel!"
exit 0
else
echo "Motion uploader lock file looks like too old. Lets remove it and run again!"
echo $$ > $TEMP_FILE
fi
else
echo $$ > $TEMP_FILE
fi
# CREATE CURRENT DATA FOLDER
CURR_DATE=`date +%Y-%m-%d`
sudo mkdir -p /mnt/server/share/motion/camera1/$CURR_DATE
FILES=`find /mnt/cameras/camera1 -type f -mmin +10`
for file in $FILES; do
FILE_DATE=`echo $file | sed 's/-[0-9]*_.*$//g' | sed 's/^.*\///g'`
sudo mv $file /mnt/server/share/motion/camera1/$FILE_DATE
sudo rm -f $file
done
rm -f $TEMP_FILE
| true
|
27a4be8bff83dfbe930fb79b7fb836b25a1b8847
|
Shell
|
hexcores/server-installer
|
/mongodb.sh
|
UTF-8
| 539
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#echo ">>> Installing MongoDB"
# Get key and add to sources
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list
# Update
sudo apt-get -y update
sudo apt-get install -y mongodb-org
sudo pecl install mongo
# add extencion file and restart service
echo 'extension=mongo.so' | sudo tee /etc/php5/mods-available/mongo.ini
sudo php5enmod mongo
sudo service apache2 restart
| true
|
b74a95618614b45edbf577ce6d9be5e70786b1cb
|
Shell
|
Ethns/bash_basic
|
/pathexistence.sh
|
UTF-8
| 171
| 3.625
| 4
|
[] |
no_license
|
read -p 'Input the path of the file you want to check in current folder: ' filepath
if [ -e $filepath ]
then
echo 'File exists'
else
echo 'File does not exist'
fi
| true
|
e7e7893c739c55c4490fc2c3d2234881c591310f
|
Shell
|
monokoo/patches4lede
|
/apply_patch.sh
|
UTF-8
| 941
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
sh /home/lede/patches4lede/clean_patch.sh
cd /home/lede/lede-source
patch -p1 -i /home/lede/patches4lede/personal-modify.patch
[ -n "$(cat /home/lede/lede-source/.config | grep phicomm-k3)" ] && {
echo "apply patches for K3..."
patch -p1 -i /home/lede/patches4lede/just_for_k3.patch
#patch -p1 -i /home/lede/patches4lede/mac80211-patches-k3.patch
}
if [ -n "$(cat /home/lede/lede-source/.config | grep x86_64)" ]; then
echo "apply patches for x86_64..."
#patch -p1 -i /home/lede/patches4lede/just_for_x64.patch
#patch -p1 -i /home/lede/patches4lede/mac80211-patches-k3.patch
else
echo "apply patches for other archs..."
patch -p1 -i /home/lede/patches4lede/just_for_other_arch.patch
fi
[ -n "$(cat /home/lede/lede-source/.config | grep CONFIG_PACKAGE_minidlna=y)" ] && {
echo "disselect PACKAGE minidlna..."
sed -i 's/CONFIG_PACKAGE_minidlna=y/# CONFIG_PACKAGE_minidlna is not set/' /home/lede/lede-source/.config
}
| true
|
f59dd9989caf7d0712e68c6d9bd00e4c7c995060
|
Shell
|
raid-7/RaidRace
|
/run.sh
|
UTF-8
| 515
| 3.484375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd "$(dirname "$0")"
usage() {
echo "Usage:
Run client
c host [port=25565]
or run server
s [port=25565] [num_of_players=2]"
exit 1
}
if (( $# == 0 )); then
usage
fi
if [[ "$1" == "c" ]]; then
# host port
if (( $# == 1 )); then
usage
fi
java -cp target/main-1.0-SNAPSHOT.jar ru.raid_7.raidrace.client.RaidRace ${@:2}
fi
if [[ "$1" == "s" ]]; then
# port players map
java -cp target/main-1.0-SNAPSHOT.jar ru.raid_7.raidrace.server.Main ${@:2}
fi
| true
|
9cdc92732c17764e1cbb306378e6c3003184532f
|
Shell
|
JussiPakkanen/sdk-build-tools
|
/buildqt5.sh
|
UTF-8
| 9,615
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This script builds dynamic and static versions of Qt5 into
# subdirectories in the parent directory of the Qt source directory.
#
# Qt5 sources must be found from the current user's home directory
# $HOME/invariant/qt-everywhere-opensource-src-5.5.0 or in in case of
# Windows in C:\invariant\qt-everywhere-opensource-src-5.5.0.
#
# To build a version of Qt other than 5.5.0, change the value of the
# QT_SOURCE_PACKAGE variable.
#
# Copyright (C) 2014 Jolla Oy
# Contact: Juha Kallioinen <juha.kallioinen@jolla.com>
# All rights reserved.
#
# You may use this file under the terms of BSD license as follows:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Jolla Ltd nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
export LC_ALL=C
UNAME_SYSTEM=$(uname -s)
UNAME_ARCH=$(uname -m)
QT_SOURCE_PACKAGE=qt-everywhere-opensource-src-5.5.0
if [[ $UNAME_SYSTEM == "Linux" ]] || [[ $UNAME_SYSTEM == "Darwin" ]]; then
BASEDIR=$HOME/invariant
SRCDIR_QT=$BASEDIR/$QT_SOURCE_PACKAGE
# the padding part in the dynamic build directory is necessary in
# order to accommodate rpath changes at the end of building Qt
# Creator, which reads Qt resources from this directory.
DYN_BUILD_DIR=$BASEDIR/$QT_SOURCE_PACKAGE-build
STATIC_BUILD_DIR=$BASEDIR/$QT_SOURCE_PACKAGE-static-build
ICU_INSTALL_DIR=$BASEDIR/icu-install
else
BASEDIR="/c/invariant"
SRCDIR_QT="$BASEDIR/$QT_SOURCE_PACKAGE"
DYN_BUILD_DIR="$BASEDIR/$QT_SOURCE_PACKAGE-build-msvc2012"
STATIC_BUILD_DIR="$BASEDIR/$QT_SOURCE_PACKAGE-static-build-msvc2012"
ICU_INSTALL_DIR=$BASEDIR/icu
fi
# common options for unix/windows dynamic build
# the dynamic build is used when building Qt Creator
COMMON_CONFIG_OPTIONS="-release -nomake examples -nomake tests -no-qml-debug -qt-zlib -qt-libpng -qt-libjpeg -qt-pcre -no-sql-mysql -no-sql-odbc -developer-build -confirm-license -opensource -skip qtandroidextras"
LINUX_CONFIG_OPTIONS="-no-eglfs -no-linuxfb -no-kms"
# add these to the COMMON_CONFIG_OPTIONS for static build
# the static build is required to build Qt Installer Framework
COMMON_STATIC_OPTIONS="-static -skip qtwebkit -skip qtxmlpatterns -no-dbus -skip qt3d"
build_dynamic_qt_windows() {
[[ -z $OPT_DYNAMIC ]] && return
rm -rf $DYN_BUILD_DIR
mkdir -p $DYN_BUILD_DIR
pushd $DYN_BUILD_DIR
cat <<EOF > build-dyn.bat
@echo off
if DEFINED ProgramFiles(x86) set _programs=%ProgramFiles(x86)%
if Not DEFINED ProgramFiles(x86) set _programs=%ProgramFiles%
set PATH=c:\windows;c:\windows\system32;%_programs\windows kits\8.0\windows performance toolkit;%_programs%\7-zip;C:\invariant\bin;c:\python27;c:\perl\bin;c:\ruby193\bin;c:\invariant\icu\bin;C:\invariant\\$QT_SOURCE_PACKAGE\gnuwin32\bin;%_programs%\microsoft sdks\typescript\1.0;c:\windows\system32\wbem;c:\windows\system32\windowspowershell\v1.0;c:\invariant\bin
call "%_programs%\microsoft visual studio 12.0\vc\vcvarsall.bat"
set MAKE=jom
call c:\invariant\\$QT_SOURCE_PACKAGE\configure.bat -make-tool jom $COMMON_CONFIG_OPTIONS -icu -I c:\invariant\icu\include -L c:\invariant\icu\lib -angle -platform win32-msvc2012 -prefix
call jom /j 1
EOF
cmd //c build-dyn.bat
popd
}
configure_static_qt5() {
if [[ $UNAME_SYSTEM == "Linux" ]]; then
$SRCDIR_QT/configure $COMMON_CONFIG_OPTIONS $LINUX_CONFIG_OPTIONS $COMMON_STATIC_OPTIONS -optimized-qmake -qt-xcb -qt-xkbcommon -gtkstyle -no-gstreamer -no-icu -skip qtsvg -no-warnings-are-errors -no-compile-examples
else
$SRCDIR_QT/configure $COMMON_CONFIG_OPTIONS $COMMON_STATIC_OPTIONS -optimized-qmake -no-gstreamer -no-warnings-are-errors
fi
}
configure_dynamic_qt5() {
# The argument to '-i' is mandatory for compatibility with mac
sed -i~ '/^[[:space:]]*WEBKIT_CONFIG[[:space:]]*+=.*\<video\>/s/^/#/' \
$SRCDIR_QT/qtwebkit/Tools/qmake/mkspecs/features/features.prf
if [[ $UNAME_SYSTEM == "Linux" ]]; then
$SRCDIR_QT/configure $COMMON_CONFIG_OPTIONS $LINUX_CONFIG_OPTIONS -optimized-qmake -qt-xcb -qt-xkbcommon -gtkstyle -no-gstreamer -I $ICU_INSTALL_DIR/include -L $ICU_INSTALL_DIR/lib -icu -no-warnings-are-errors -no-compile-examples
else
$SRCDIR_QT/configure $COMMON_CONFIG_OPTIONS -optimized-qmake -no-gstreamer
fi
}
build_dynamic_qt() {
[[ -z $OPT_DYNAMIC ]] && return
rm -rf $DYN_BUILD_DIR
mkdir -p $DYN_BUILD_DIR
pushd $DYN_BUILD_DIR
configure_dynamic_qt5
make -j$(getconf _NPROCESSORS_ONLN)
# no need to make install with -developer-build option
# make install
popd
}
build_static_qt_windows() {
[[ -z $OPT_STATIC ]] && return
rm -rf $STATIC_BUILD_DIR
mkdir -p $STATIC_BUILD_DIR
pushd $STATIC_BUILD_DIR
cat <<EOF > build-dyn.bat
@echo off
if DEFINED ProgramFiles(x86) set _programs=%ProgramFiles(x86)%
if Not DEFINED ProgramFiles(x86) set _programs=%ProgramFiles%
set PATH=c:\windows;c:\windows\system32;%_programs\windows kits\8.0\windows performance toolkit;%_programs%\7-zip;C:\invariant\bin;c:\python27;c:\perl\bin;c:\ruby193\bin;c:\invariant\icu\bin;C:\invariant\\$QT_SOURCE_PACKAGE\gnuwin32\bin;%_programs%\microsoft sdks\typescript\1.0;c:\windows\system32\wbem;c:\windows\system32\windowspowershell\v1.0;c:\invariant\bin
call "%_programs%\microsoft visual studio 12.0\vc\vcvarsall.bat"
set MAKE=jom
call c:\invariant\\$QT_SOURCE_PACKAGE\configure.bat -make-tool jom $COMMON_CONFIG_OPTIONS $COMMON_STATIC_OPTIONS -angle -platform win32-msvc2012 -static-runtime -prefix
call jom /j 1
EOF
cmd //c build-dyn.bat
popd
}
build_static_qt() {
[[ -z $OPT_STATIC ]] && return
rm -rf $STATIC_BUILD_DIR
mkdir -p $STATIC_BUILD_DIR
pushd $STATIC_BUILD_DIR
configure_static_qt5
make -j$(getconf _NPROCESSORS_ONLN)
# no need to make install with -developer-build option
# make install
popd
}
fail() {
echo "FAIL: $@"
exit 1
}
usage() {
cat <<EOF
Build dynamic and static versions of Qt5
Required directories:
$BASEDIR
$SRCDIR_QT
Usage:
$(basename $0) [OPTION]
Options:
-d | --dynamic build dynamic version (default)
-s | --static build static version
-y | --non-interactive answer yes to all questions presented by the script
-h | --help this help
EOF
# exit if any argument is given
[[ -n "$1" ]] && exit 1
}
# handle commandline options
while [[ ${1:-} ]]; do
case "$1" in
-d | --dynamic ) shift
OPT_DYNAMIC=1
;;
-s | --static ) shift
OPT_STATIC=1
;;
-y | --non-interactive ) shift
OPT_YES=1
;;
-h | --help ) shift
usage quit
;;
* )
usage quit
;;
esac
done
if [[ -z $OPT_DYNAMIC ]] && [[ -z $OPT_STATIC ]]; then
# default: build dynamic only
OPT_DYNAMIC=1
fi
echo "Using sources from [$SRCDIR_QT]"
[[ -n $OPT_DYNAMIC ]] && echo "- Build [dynamic] version of Qt5"
[[ -n $OPT_STATIC ]] && echo "- Build [static] version of Qt5"
# confirm
if [[ -z $OPT_YES ]]; then
while true; do
read -p "Do you want to continue? (y/n) " answer
case $answer in
[Yy]*)
break ;;
[Nn]*)
echo "Ok, exiting"
exit 0
;;
*)
echo "Please answer yes or no."
;;
esac
done
fi
if [[ ! -d $BASEDIR ]]; then
fail "directory [$BASEDIR] does not exist"
fi
if [[ ! -d $SRCDIR_QT ]]; then
fail "directory [$SRCDIR_QT] does not exist"
fi
pushd $BASEDIR || exit 1
# stop in case of errors
set -e
# record start time
BUILD_START=$(date +%s)
if [[ $UNAME_SYSTEM == "Linux" ]] || [[ $UNAME_SYSTEM == "Darwin" ]]; then
if [[ $UNAME_SYSTEM == "Linux" ]]; then
export LD_LIBRARY_PATH=$ICU_INSTALL_DIR/lib
fi
build_dynamic_qt
build_static_qt
else
build_dynamic_qt_windows
build_static_qt_windows
fi
# record end time
BUILD_END=$(date +%s)
popd
time=$(( BUILD_END - BUILD_START ))
hour=$(( $time / 3600 ))
mins=$(( $time / 60 - 60*$hour ))
secs=$(( $time - 3600*$hour - 60*$mins ))
echo Time used for Qt5 build: $(printf "%02d:%02d:%02d" $hour $mins $secs)
# For Emacs:
# Local Variables:
# indent-tabs-mode:nil
# tab-width:4
# End:
# For VIM:
# vim:set softtabstop=4 shiftwidth=4 tabstop=4 expandtab:
| true
|
e1e6c3fa3adb0f47b2737c0582ef18958408ae7f
|
Shell
|
macisamuele/docker-images
|
/scripts/docker-push.sh
|
UTF-8
| 804
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [ $# -ne 2 ]; then
echo "Usage: $0 <build-platform> <service-name>" > /dev/stderr
exit 1
fi
set -euo pipefail -o posix -o functrace
# shellcheck source=scripts/common.sh
source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/common.sh"
docker_login_and_enable_experimental_cli
echo "Set base directory to git root directory"
cd "$(git_root_directory)"
BUILD_PLATFORM="$1"
SERVICE_NAME="$2"
assert_supported_platform "${BUILD_PLATFORM}"
image_name="$(docker_image_name "${SERVICE_NAME}" "${BUILD_PLATFORM}")"
if ! ${DOCKER} images -q "${image_name}" | grep -q '.'; then
echo "Image is not present. Rebuilding it"
bash "$(git_root_directory)/scripts/docker-build.sh" "$1" "$2"
fi
${DOCKER} push "$(docker_image_name "${SERVICE_NAME}" "${BUILD_PLATFORM}")"
| true
|
698ecfd3ae4e79d1de1cbd42b70d81615f41722c
|
Shell
|
StackArch/pkg-oslo.policy
|
/PKGBUILD
|
UTF-8
| 1,531
| 2.515625
| 3
|
[] |
no_license
|
# Maintainer: BigfootACA <bigfoot@classfun.cn>
_pyname=oslo.policy
_pycname=${_pyname/./-}
pkgname=python-${_pycname}
pkgver=3.8.2
pkgrel=1
pkgdesc="Oslo Policy library"
arch=(any)
url="https://docs.openstack.org/oslo.policy/latest/"
license=(Apache)
depends=(
python
python-pbr
python-requests
python-oslo-config
python-oslo-context
python-oslo-i18n
python-oslo-serialization
python-pyaml
python-stevedore
python-oslo-utils
)
makedepends=(
python-setuptools
python-sphinx
python-sphinxcontrib-apidoc
python-openstackdocstheme
python-reno
)
checkdepends=(
python-oslotest
python-requests-mock
python-stestr
python-sphinx
python-coverage
)
options=('!emptydirs')
source=(https://pypi.io/packages/source/${_pyname::1}/$_pyname/$_pyname-$pkgver.tar.gz)
md5sums=('511a86e6bf9ffd4e8f5fb041d2262d7a')
sha256sums=('233030f9acbc3cb894c66943fd71406ec12825776021f5dda4afab6f1762837f')
sha512sums=('6daea44a93f53608529314b5eb780d999a1ffd65c3c54f05d58341254f99754c94e177ad01bb37aa036f98f3211bb3705c499ad6244a68165ae56fa59943c79a')
export PBR_VERSION=$pkgver
build(){
cd $_pyname-$pkgver
export PYTHONPATH="$PWD"
python setup.py build
sphinx-build -b text doc/source doc/build/text
}
check(){
cd $_pyname-$pkgver
stestr run
}
package(){
cd $_pyname-$pkgver
python setup.py install --root="$pkgdir/" --optimize=1
install -Dm644 LICENSE "$pkgdir"/usr/share/licenses/$pkgname/LICENSE
mkdir -p "$pkgdir/usr/share/doc"
cp -r doc/build/text "$pkgdir/usr/share/doc/$pkgname"
rm -r "$pkgdir/usr/share/doc/$pkgname/.doctrees"
}
| true
|
d5b38f853ed93f1a65a13b1128b17b4bdfc8ece8
|
Shell
|
nicolerg/format_refseq
|
/compress_move.sh
|
UTF-8
| 493
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
tmpdir=$1
outdir=$2
cores=$3
set -e
cd ${tmpdir}
mkdir -p ${outdir}
tar_folder () {
local folder=$1
local outdir=$2
tar -czvf ${folder}.tar.gz ${folder}
rsync -Ptvh ${folder}.tar.gz ${outdir}
}
export -f tar_folder
folders=$(ls | grep -v -E '\.fna|genome_length|gz')
parallel --verbose --jobs ${cores} tar_folder ::: $(echo $folders) ::: ${outdir}
cat genome_length/*gl.txt > genome_length/all_lengths.txt
rsync -Ptvh genome_length/all_lengths.txt ${outdir}
| true
|
ec74e501e4b78ab13ed69b879914a07bc31ba7b2
|
Shell
|
xujun10110/pentest-console
|
/console_interface/scripts/ptc-copyProject.sh
|
UTF-8
| 3,087
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
## Moves all assets from one project to another
##
# Usage
if [ $# -ne 2 ]; then
echo "USAGE: $0 [from-engagement] [to-engagement]"
exit 1
fi
# Get Arguments
# TODO - Sanitization
FROM=$1
TO=$2
# Check if client already exists
if [ -e /var/trac/$TO ] ; then
echo "The client name already exists. Archive the old engagement or think of a new name variant."
exit 2
fi
echo "all good. starting."
if [ -d /var/svn/$1 ]; then
if [ -d /var/svn/$2 ]; then
echo "el problemo, senior, svn destination exists. not copying svn."
else
echo "copying svn..."
cp -rf /var/svn/$1 /var/svn/$2
fi
else
echo "yo, that svn project doesn't exist compadre"
fi
if [ -d /var/trac/$1 ]; then
if [ -d /var/trac/$2 ]; then
echo "el problemo, senior, trac destination exists. not copying trac."
else
echo "copying trac..."
cp -rf /var/trac/$1 /var/trac/$2
fi
else
echo "yo, that trac project doesn't exist compadre"
fi
# Set configuration - replace project specifics in the configuration files for this project
echo "modding trac.ini"
sed -e "s/$1/$2/g" /var/trac/$1/conf/trac.ini > /var/trac/$2/conf/trac.ini
## set perms - shouldn't be necessary if we're running as PTC user... -- need to make sure primary group is set properly, then things are all good.
##sudo chown www-data:users /var/trac/$2 -R
##sudo chown www-data:users /var/svn/$2 -R
## resync, now that we've changed the repo directory (modding trac.ini)
trac-admin /var/trac/$2 resync
### ## not sure wtf i was thinking...
###
### # If it doesn't exist, create the new (to-)engagement
### ./startEngagement.sh $TO
###
### echo "startEngagement called"
###
### ###### TEMPORARY STORAGE ######
### # Create a working temp-folder name
### TMPFOLDER=`uuidgen`
### TMPPATH=/tmp/$TMPFOLDER
###
### echo "working in $TMPPATH"
###
### # Check that the temp-folder doesn't exist
### if [ -d $TMPPATH ]; then
### rm -rf $TMPPATH;
### fi
###
### # Create the temp-folder
### mkdir $TMPPATH
###
### ###### WIKI #######
### #TODO - Split out into its own script
### mkdir $TMPATH/wiki
###
### trac-admin /var/trac/$FROM wiki dump $TMPPATH/wiki
### trac-admin /var/trac/$TO wiki load $TMPPATH/wiki
###
### echo "wiki copied from $FROM to $TO\n"
###
### #### TICKETS ######
### #TODO - create
### mkdir $TMPATH/tickets
###
### echo "can't do tickets from the commandline atm\n"
###
### #### SVN #####
### #TODO - Split out into its own script
### mkdir $TMPATH/svn
###
### # check out the new project
### svn co file:///var/svn/$FROM/trunk $TMPPATH/svn
###
### # add the old files to our new svn dir
### svn export file:///var/svn/$TO/trunk $TMPPATH/svn
###
### #necessarry?
### ##mv -f TEMPLATEpentest/trunk/* .
### ##rm -Rf TEMPLATEpentest
###
### #add all copied files to svn
### svn add $TMPPATH/svn
###
### #check in to new project
### svn ci $TMPPATH/svn -m "copied from $TO" *
###
### echo "svn copied from $FROM to $TO"
###
### ###### Cleanup temp-storage ######
### #TODO -wipe?
###
### rm -rf $TMPPATH
###
### echo "done"
| true
|
0ea39fc405ce0b8bf5461b8239f1ddb64b4cdaed
|
Shell
|
dang/scripts
|
/cfg/.bashrc.d/actions/aliases
|
UTF-8
| 242
| 2.515625
| 3
|
[] |
no_license
|
# vim: ft=sh:
# Local aliases
me=$(readlink --canonicalize --no-newline $BASH_SOURCE)
source "${SCRIPTS}/functions.sh"
#source "${SCRIPTS}/flavor/${FLAVOR}/functions.sh"
#
# Make new source this too
export ALIASFILES="${ALIASFILES} ${me}"
| true
|
514a783b82601d16ea372801fdd5cfda3e9dab73
|
Shell
|
GeRDI-Project/Bamboo-Scripts_Utils
|
/helper-scripts/git-utils.sh
|
UTF-8
| 5,024
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Copyright © 2018 Robin Weiss (http://www.gerdi-project.de/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script offers helper functions that concern Git.
# Returns the repository slug of a Git repository.
# The slug is a HTTP encoded identifier of a repository.
# Arguments:
# 1 - a link to the repository (default: the first repository of the bamboo plan)
#
GetRepositorySlugFromCloneLink() {
local cloneLink="${1-$bamboo_planRepository_1_repositoryUrl}"
local repositorySlug
repositorySlug=${cloneLink##*/}
repositorySlug=${repositorySlug%.git}
echo "$repositorySlug"
}
# Returns the project identifier of a Git repository.
# Arguments:
# 1 - a link to the repository (default: the first repository of the bamboo plan)
#
GetProjectIdFromCloneLink() {
local cloneLink="${1-$bamboo_planRepository_1_repositoryUrl}"
local projectId
projectId=${cloneLink%/*}
projectId=${projectId##*/}
echo "$projectId"
}
# Returns the number of unstaged files of the current git directory.
# Arguments: -
#
GetNumberOfUnstagedChanges() {
git diff --numstat | wc -l
}
# Clones a Git repository to the current directory.
# Arguments:
# 1 - a Bitbucket user name
# 2 - the login password that belongs to argument 1
# 3 - the ID of the project to which the repository belongs
# 4 - the identifier of the repository
#
CloneGitRepository() {
local userName="$(echo "$1" | sed -e "s/@/%40/g")"
local password="$2"
local projectId="$3"
local repositorySlug="$4"
local gitCredentials
gitCredentials="$userName:$password"
echo "Cloning repository code.gerdi-project.de/scm/$projectId/$repositorySlug.git" >&2
local response
response=$(git clone -q "https://$gitCredentials@code.gerdi-project.de/scm/$projectId/$repositorySlug.git" .)
if [ $? -ne 0 ]; then
echo "Could not clone repository:" >&2
echo "$response" >&2
exit 1
fi
}
# Creates a remote Git branch of the current repository.
# Arguments:
# 1 - the name of the branch
#
CreateGitBranch() {
local branchName="$1"
echo $(git checkout -b $branchName) >&2
echo $(git push -q --set-upstream origin $branchName) >&2
}
# Adds, commits, and pushes all files to Git.
# Arguments:
# 1 - the full name of the user that pushes the files
# 2 - the email address of the user that pushes the files
# 3 - the commit message
#
PushAllFilesToGitRepository() {
local userDisplayName="$1"
local userEmailAddress="$2"
local commitMessage="$3"
echo "Adding files to Git" >&2
git add -A
echo "Committing files to Git" >&2
git config user.email "$userDisplayName"
git config user.name "$userEmailAddress"
git commit -m "$commitMessage"
echo "Pushing files to Git" >&2
git push -q
}
# Retrieves a list of all relative file paths of files that have been added in a specified commit.
# Renamed files are NOT listed.
#
# Arguments:
# 1 - the commit hash of the commit that possibly added new files
# 2 - the path to the targeted local git directory (default: current directory)
#
GetNewFilesOfCommit() {
local commitId="${1:0:7}"
local gitDir="${2-.}"
local diff
diff=$((cd "$gitDir" && git diff $commitId~ $commitId) \
|| (cd "$gitDir" && git show $commitId))
echo "$diff" | tr '\n' '\t' | grep -oP '(?<=diff --git a/)([^\t]+)(?= b/\1\tnew file mode)'
}
# Retrieves a list of all relative file paths of files that have been changed in a specified commit.
# Renamed files are listed with their new name.
#
# Arguments:
# 1 - the commit hash of the commit that possibly changed files
# 2 - the path to the targeted local git directory (default: current directory)
#
GetChangedFilesOfCommit() {
local commitId="${1:0:7}"
local gitDir="${2-.}"
local diff
diff=$((cd "$gitDir" && git diff $commitId~ $commitId) \
|| (cd "$gitDir" && git show $commitId))
echo "$diff" | tr '\n' '\t' | grep -oP '(?<= b/)[^\t]+(?=\tindex )'
}
# Retrieves a list of all relative file paths of files that have been deleted in a specified commit.
# Renamed files are NOT listed.
#
# Arguments:
# 1 - the commit hash of the commit that possibly deleted files
# 2 - the path to the targeted local git directory (default: current directory)
#
GetDeletedFilesOfCommit() {
local commitId="${1:0:7}"
local gitDir="${2-.}"
local diff
diff=$((cd "$gitDir" && git diff $commitId~ $commitId) \
|| (cd "$gitDir" && git show $commitId))
echo "$diff" | tr '\n' '\t' | grep -oP '(?<=diff --git a/)([^\t]+)(?= b/\1\tdeleted file mode)'
}
| true
|
66d0398c4e4208e06162a154886cbb3378a3e4d6
|
Shell
|
krishnodey/Shell-Scripting-Exercises
|
/27.sh
|
UTF-8
| 168
| 2.96875
| 3
|
[] |
no_license
|
for((p=2; p<=100; p++))
do
count=0
for((i=2;i<=p-1;i++))
do
if (( p % i == 0))
then
((count++))
break
fi
done
if [[ $count == 0 && $p != 1 ]]
then echo $p
fi
done
| true
|
e281305dab3fb8b620118356bdc5977a8600c594
|
Shell
|
davidsbatista/TREMoSSo
|
/batch-evaluation.sh
|
UTF-8
| 1,293
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# Make sure REDIS is running
# sudo /etc/init.d/redis-server start
N_SIGS=(100 200 300 400 500 600)
N_BANDS=(25 50 75 100 150)
KNN=(1 3 5 7)
for s in 100 200 300 400 500 600; do
for b in 25 50 75 100 150; do
for k in 1 3 5 7; do
DIR=${s}.${b}.${k}
echo ${DIR}
mkdir ${DIR}
cd ${DIR}
# clear REDIS tables
redis-cli flushall
# link to file with generated features
ln -s ../features.txt
# index training data
../MinHashClassifier.py index ../training_data.txt ${N_BANDS} ${N_SIGS}
# classifiy training data
../MinHashClassifier.py classify2 ../set_b_shingles.txt ${N_BANDS} ${N_SIGS} ${KNN}
# create a separate file for each relationship type
../musico2evaluation.sh
# link to files needed by evaluation framework
for i in ../superset_*; do ln -vs ${i}; done
for i in ../*high_pmi*; do ln -sv ${i}; done
# evaluate each relationship type
../evaluate-musico.sh
# output results to single file
for i in *_results.txt; do echo ${i}; tail -n 5 ${i}; done >> results.txt
cd ..
done
done
done
| true
|
440d43841a2e42af7a8842c97bc5ff3dd299c6db
|
Shell
|
OrquestraCD/multicluster-scheduler
|
/test/e2e/test_argo.sh
|
UTF-8
| 1,840
| 3.171875
| 3
|
[
"Apache-2.0"
] |
permissive
|
set -euo pipefail
source test/e2e/aliases.sh
setup_argo() {
# Install Argo in cluster1
k1 create ns argo
k1 apply -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.2.1/manifests/install.yaml
# kind uses containerd not docker so we change the argo executor (default: docker)
# TODO modify install.yaml instead
k1 patch cm -n argo workflow-controller-configmap --patch '{"data":{"config":"{\"containerRuntimeExecutor\":\"kubelet\"}"}}'
k1 delete pod --all -n argo # reload config map
k1 apply -f examples/argo-workflows/_service-account.yaml
# the workflow service account must exist in the other cluster
k2 apply -f examples/argo-workflows/_service-account.yaml
# TODO download only if not present or version mismatch
curl -Lo argo https://github.com/argoproj/argo/releases/download/v2.2.1/argo-linux-amd64
chmod +x argo
# speed up container creations
docker pull argoproj/argoexec:v2.2.1 # may already be on host
kind load docker-image argoproj/argoexec:v2.2.1 --name cluster1
kind load docker-image argoproj/argoexec:v2.2.1 --name cluster2
}
tear_down_argo() {
k2 delete -f examples/argo-workflows/_service-account.yaml
k1 delete -f examples/argo-workflows/_service-account.yaml
k1 delete -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.2.1/manifests/install.yaml
k1 delete ns argo
}
test_blog_scenario_a_multicluster() {
k1 label ns default multicluster-scheduler=enabled
KUBECONFIG=kubeconfig-cluster1 ./argo submit --serviceaccount argo-workflow --wait examples/argo-workflows/blog-scenario-a-multicluster.yaml
if [ $(k2 get pod -l multicluster.admiralty.io/workflow | wc -l) -gt 1 ]; then
echo "SUCCESS"
else
echo "FAILURE"
exit 1
fi
KUBECONFIG=kubeconfig-cluster1 ./argo delete --all
k1 label ns default multicluster-scheduler-
}
| true
|
cd1de1ec281e9a68732c76e895eae4f31b54c332
|
Shell
|
Mahboub99/CMP303_A6
|
/lab1/1_1_7.sh
|
UTF-8
| 751
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
rm -rf lab1 #1
mkdir lab1 #2
cp words.txt numbers.txt lab1 #3
cd lab1
paste words.txt numbers.txt > MergedContent.txt #4
head -3 MergedContent.txt #5
sort MergedContent.txt > SortedMergedContent.txt #6
echo "The sorted file is :" #7
cat SortedMergedContent.txt #8
chmod u-r SortedMergedContent.txt #9
cat MergedContent.txt | sort | uniq #10
tr < SortedMergedContent.txt '[a-z]' '[A-Z]' #11
echo 'Permission denied as it is prevented anyone from reading the file MergedSortedContent ' #12
chmod u+r SortedMergedContent.txt #13
tr < SortedMergedContent.txt '[a-z]' '[A-Z]'
grep -n '^w.*[0-9]' MergedContent.txt #14
tr < MergedContent.txt 'i' 'o'|paste > NewMergedContent.txt #15
pr -m -t MergedContent.txt NewMergedContent.txt #16
| true
|
8fc9947e1dad152aa3e6fcaf237b0bec56f6029d
|
Shell
|
williamloures/Random-scripts
|
/split-mkv.sh
|
UTF-8
| 245
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
for file in *.mkv
do
mkvmerge -o output.mkv --split chapters:all "$file" && rm "$file"
done
for file in *.mkv
do
TRACK=${file:8:2}
ffmpeg -i "$file" -vn -c:a flac -sample_fmt s16 "$TRACK.flac"
rm "$file"
done
| true
|
63e9f89af3aa08bb16ac7968c8f3a18384a57ada
|
Shell
|
dpb587/upstream-blob-mirror
|
/bin/all
|
UTF-8
| 798
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu -o pipefail ; export SHELLOPTS
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.."
cd "$DIR"
exit=0
for metalink in $( ( find . -name metalink ; find . -name 'metalink-*' ) | cut -c3- | sort ); do
set +e
./bin/watch "$S3_BLOBS" "$( dirname "$metalink" )" "$( basename "$metalink" )"
watch=$?
set -e
if [[ "0" != "$?" ]]; then
exit=1
fi
done
cp index.html.header index.html
for repo in $( find . -name metalink | cut -c3- | xargs -n1 dirname | sort ); do
version=$( meta4-repo filter -n1 --format=version "file://./$repo" )
echo " <li><a href=\"$repo/index.xml\">$repo</a> (<a href=\"$repo/v$version.meta4\">v$version</a>)</li>" >> index.html
done
cat index.html.footer >> index.html
git add .
git commit -m 'bump' || true
exit "$exit"
| true
|
b7c68027bbcfbe6349022250aa07c6f2cdb798e7
|
Shell
|
lenik/uni
|
/devel/start2work/stwork.in
|
UTF-8
| 1,102
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
: ${RCSID:=$Id: - @VERSION@ @DATE@ @TIME@ - $}
: ${PACKAGE:=@PACKAGE@}
: ${PROGRAM_TITLE:=Start dev environ with all their tools}
: ${PROGRAM_SYNTAX:=[OPTIONS] [--] ...}
. shlib-import cliboot
option -q --quiet
option -v --verbose
option -h --help
option --version
sysconfdir=@sysconfdir@
libdir=@libdir@
pkgdatadir=@pkgdatadir@
if [ "@bindir@" = '@'bindir'@' ]; then
sysconfdir=/etc
libdir=/usr/lib
pkgdatadir=/usr/share/start2work
fi
stdirs=(
$libdir/stwork
$pkgdatadir/stwork
$sysconfdir/stwork
$HOME/.config/stwork
)
function setopt() {
case "$1" in
-h|--help)
help $1; exit;;
-q|--quiet)
LOGLEVEL=$((LOGLEVEL - 1));;
-v|--verbose)
LOGLEVEL=$((LOGLEVEL + 1));;
--version)
show_version; exit;;
*)
quit "invalid option: $1";;
esac
}
function main() {
for st in bin/st.*; do
_log1 "load $st..."
"$st" &
done
}
boot "$@"
| true
|
3021bdd33112e1c67653c18356cb8d1da28bce37
|
Shell
|
shugaoye/x86qemu
|
/recovery/root/sbin/network_start.sh
|
UTF-8
| 1,916
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/sbin/sh
# this was kanged from /system/etc/init.goldfish.sh
# we run it in the init.rc and it is required to get
# the network running in order for adb to work in
# emulator
# Setup networking when boot starts
ifconfig eth0 10.0.2.15 netmask 255.255.255.0 up
route add default gw 10.0.2.2 dev eth0
# ro.kernel.android.qemud is normally set when we
# want the RIL (radio interface layer) to talk to
# the emulated modem through qemud.
#
# However, this will be undefined in two cases:
#
# - When we want the RIL to talk directly to a guest
# serial device that is connected to a host serial
# device by the emulator.
#
# - We don't want to use the RIL but the VM-based
# modem emulation that runs inside the guest system
# instead.
#
# The following detects the latter case and sets up the
# system for it.
#
qemud=`getprop ro.kernel.android.qemud`
case "$qemud" in
"")
radio_ril=`getprop ro.kernel.android.ril`
case "$radio_ril" in
"")
# no need for the radio interface daemon
# telephony is entirely emulated in Java
setprop ro.radio.noril yes
stop ril-daemon
;;
esac
;;
esac
# Setup additionnal DNS servers if needed
num_dns=`getprop ro.kernel.ndns`
case "$num_dns" in
2) setprop net.eth0.dns2 10.0.2.4
;;
3) setprop net.eth0.dns2 10.0.2.4
setprop net.eth0.dns3 10.0.2.5
;;
4) setprop net.eth0.dns2 10.0.2.4
setprop net.eth0.dns3 10.0.2.5
setprop net.eth0.dns4 10.0.2.6
;;
esac
# disable boot animation for a faster boot sequence when needed
boot_anim=`getprop ro.kernel.android.bootanim`
case "$boot_anim" in
0) setprop debug.sf.nobootanimation 1
;;
esac
# set up the second interface (for inter-emulator connections)
# if required
my_ip=`getprop net.shared_net_ip`
case "$my_ip" in
"")
;;
*) ifconfig eth1 "$my_ip" netmask 255.255.255.0 up
;;
esac
| true
|
2305e76b7ed57cb20af59283366d35c8d7357609
|
Shell
|
timberline-secondary/hackerspace-control-repo
|
/site/profile/files/guest_account/post_login_default.sh
|
UTF-8
| 809
| 3.75
| 4
|
[] |
no_license
|
#!/bin/sh
# /etc/gdm3/PostLogin/Default
# Create a temporay home drive for the guest user
# https://unix.stackexchange.com/questions/258544/create-guest-account-in-gnome-3-x-on-arch-linux
guestuser="guest"
## Set up guest user session
if [ "$USER" = "$guestuser" ]; then
# create home drive
mkdir /tmp/"$guestuser"
cp /etc/skel/.* /tmp/"$guestuser"
chown -R "$guestuser":"$guestuser" /tmp/"$guestuser"
# show temp message -- DOESN'T WORK HERE
# notify-send 'Temporary Guest Session' 'All data created during this guest session will be deleted when you log out, and settings will be reset to defaults. Please save files on an external device like a USB stick, or in the cloud like Google Drive or Teams, or email the files to yourself, if you want to access them later.'
fi
exit 0
| true
|
7f1e314b4e8d514280deed46c6f42e5e76aff139
|
Shell
|
ndollar/misc
|
/bash/bashrc-extras.sh
|
UTF-8
| 838
| 3.90625
| 4
|
[] |
no_license
|
if [ -z "$(type -t createnv)" ]; then
echo "Adding createnv() to ~/.bashrc";
read -d '' createnv <<- "EOF"
# Usage: createnv [virtualenv directory name]
# Wraps call to virtualenv with extra arguments
# and places into .env directory.
createnv() {
if [[ ! $1 ]]; then
echo "Must provide top-level virtualenv directory";
return 1;
fi;
mkdir -p $1;
if [[ $? -gt 0 ]]; then
return $?;
fi
cd $1;
virtualenv --no-site-packages --python=/usr/bin/python2.7 --distribute --prompt="($(basename $1))" .env
if [[ $? -gt 0 ]]; then
return $?;
fi
ln -s .env/bin/activate activate
}
EOF
echo "$createnv" >> $HOME/.bashrc
else
echo "createnv() already exists ... skipping";
fi
# Reload .bashrc
. $HOME/.bashrc
| true
|
abf5604ecceb52cdcdb0d413b43c524105644bba
|
Shell
|
truenas/ports
|
/security/ruby-bitwarden/files/rubywarden-api.in
|
UTF-8
| 1,187
| 3.21875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# Created by: Mark Felder <feld@FreeBSD.org>
# PROVIDE: rubywarden-api
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following line to /etc/rc.conf to enable `rubywarden-api':
#
# rubywarden_api_enable="YES"
. /etc/rc.subr
name=rubywarden_api
rcvar=rubywarden_api_enable
load_rc_config ${name}
: ${rubywarden_api_enable:=NO}
: ${rubywarden_api_host:=localhost}
: ${rubywarden_api_port:=4567}
: ${rubywarden_api_signups:=NO}
: ${rubywarden_api_user:=www}
: ${rubywarden_api_group:=www}
: ${rubywarden_api_chdir=/usr/local/www/rubywarden}
pidfile="/var/run/rubywarden/${name}.pid"
procname=%%RUBY_WITH_SUFFIX%%
command="%%PREFIX%%/bin/rackup"
command_args="-P ${pidfile} -o ${rubywarden_api_host} -p ${rubywarden_api_port} -E production config.ru 2>&1 | logger -t rubywarden &"
start_precmd="start_precmd"
start_precmd()
{
if [ ! -e /var/run/rubywarden ] ; then
install -d -o ${rubywarden_api_user} -g ${rubywarden_api_group} /var/run/rubywarden;
fi
checkyesno rubywarden_api_signups
if [ "$?" -eq 0 ]; then
export ALLOW_SIGNUPS=1
echo "Bitwarden Signups Enabled"
fi
}
run_rc_command "$1"
| true
|
0f072de400418915a1e17352f9ef78a104606754
|
Shell
|
CarlosAugustoPO/gutodotfiles
|
/.bin/classicode
|
UTF-8
| 3,046
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#FG Colours
color_fg_none="\e[0m"
color_fg_lightGreen="\e[38;5;47m"
color_fg_lightBlue="\e[38;5;75m"
color_fg_lightYellow="\e[38;5;229m"
color_fg_lightPink="\e[38;5;211m"
color_fg_lightPurple="\e[38;5;163m"
color_fg_red="\e[38;5;196m"
#BG Colours
color_bg_purple="\e[48;5;54m"
amanha=$(date -d "+1 days" | awk '{print $1}' )
dia="$@"
if [[ $dia == "" ]]; then dia="hoj"; fi
echo -e "\
amanha $amanha
segunda Mon
terça Tue
quarta Wed
quinta Thu
sexta Fri
sabado Sat
domingo Sun" > /tmp/.queryclassicode;
if [[ "${dia}" != "hoj" ]] && [[ "${dia}" != "ama" ]] && [[ "${dia}" != "ont" ]] && [[ "${dia}" != "seg" ]] && [[ "${dia}" != "ter" ]] && [[ "${dia}" != "qua" ]] && [[ "${dia}" != "qui" ]] && [[ "${dia}" != "sex" ]] && [[ "${dia}" != "sab" ]] && [[ "${dia}" != "dom" ]]; then
echo -e "\
$color_fg_lightYellow Por favor especifique um dia
$color_fg_lightBlue Opções:
$color_fg_lightGreen hoj $color_fg_lightPink #Exibe o classico hoje ($(date | awk '{print $1}'))
$color_fg_lightGreen ama $color_fg_lightPink #Exibe o classico de amanhã ($(date -d "+1 days" | awk '{print $1}'))
$color_fg_lightGreen ont $color_fg_lightPink #Exibe o classico de ontem ($(date -d "-1 days" | awk '{print $1}'))
$color_fg_lightGreen seg $color_fg_lightPink #Exibe o classico de Segunda
$color_fg_lightGreen ter $color_fg_lightPink #Exibe o classico de Terça
$color_fg_lightGreen qua $color_fg_lightPink #Exibe o classico de Quarta
$color_fg_lightGreen qui $color_fg_lightPink #Exibe o classico de Quinta
$color_fg_lightGreen sex $color_fg_lightPink #Exibe o classico de Sexta
$color_fg_lightGreen sab $color_fg_lightPink #Exibe o classico de Sábado
$color_fg_lightGreen dom $color_fg_lightPink #Exibe o classico de Domingo
$color_fg_lightBlue Ex:$color_fg_lightYellow$ classicode$color_fg_lightGreen qua $color_fg_none
"
else
if [ "$dia" == "ama" ]; then
diaen=$(date -d "+1 days" | awk '{print $1}' )
elif [ "$dia" == "ont" ];
then
diaen=$(date -d "-1 days" | awk '{print $1}' )
elif [ "$dia" == "hoj" ];
then
diaen=$(date | awk '{print $1}' )
else
diaen=$(cat /tmp/.queryclassicode | ag -i --nocolor $dia | awk '{ print $2 }')
fi
if [[ $diaen == "Mon" ]]; then
varquery='segunda\-feira\s.*:';
elif [[ $diaen == "Tue" ]]; then
varquery='terça\-feira\s.*:';
elif [[ $diaen == "Wed" ]]; then
varquery='quarta\-feira\s.*:';
elif [[ $diaen == "Thu" ]]; then
varquery='quinta\-feira\s.*:';
elif [[ $diaen == "Fri" ]]; then
varquery='sexta\-feira\s.*:';
elif [[ $diaen == "Sat" ]]; then
varquery='sábado\s.*:';
elif [[ $diaen == "Sun" ]]; then
varquery='domingo\s.*:';
else
echo "Falha ao obter dia da semana";
fi
curl -s https://www.testapramim.com.br/mcdonalds-do-dia/ | ag -i $varquery | cut -f1 -d: | cut -c 13-100 > /tmp/.classicododia
echo -e "$color_fg_lightPink (McDonalds)$color_fg_lightYellow $( cat /tmp/.classicododia ) $color_fg_none"
fi
| true
|
00e7f8c10ac6c8c77f0029b2546189b595cb09b1
|
Shell
|
tringn/AgeGenderPrediction
|
/compile2Movidius.sh
|
UTF-8
| 537
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
if [ ! -d "./graph" ]; then
mkdir -p ./graph;
fi
AGENET_MODEL=$(ls -t models/AgeNet/caffenet_age_train*.caffemodel | head -n 1)
AGENET_DEPLOY=prototxt/AgeNet_deploy.prototxt
mvNCCompile -s 12 -w $AGENET_MODEL $AGENET_DEPLOY -o graph/AgeNet.graph
GENDERNET_MODEL=$(ls -t models/GenderNet/caffenet_gender_train*.caffemodel | head -n 1)
GENDERNET_DEPLOY=prototxt/GenderNet_deploy.prototxt
mvNCCompile -s 12 -w $GENDERNET_MODEL $GENDERNET_DEPLOY -o graph/GenderNet.graph
# Convert mean file
python3 utils/convert_mean.py
| true
|
1fd5eb26906aa81ced672f55883c00af28d845fa
|
Shell
|
abelardojarab/ase-testing
|
/test_afus/ccip_vtp_nlb_all/SW/run.sh
|
UTF-8
| 2,571
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Wait for readiness
echo "##################################"
echo "# Waiting for .ase_ready #"
echo "##################################"
while [ ! -f $ASE_WORKDIR/.ase_ready.pid ]
do
sleep 1
done
# Simulator PID
ase_pid=`cat $ASE_WORKDIR/.ase_ready.pid | grep pid | cut -d "=" -s -f2-`
## Copy BDX/SKX version
cp $ASEVAL_GIT/test_afus/ccip_vtp_nlb_all/SW/HelloALIVTPNLB.cpp.$RELCODE $BBB_GIT/BBB_cci_mpf/sample/Hello_ALI_VTP_NLB/SW/HelloALIVTPNLB.cpp
echo "######################################"
echo "# Compiling Hello_ALI_VTP_NLB #"
echo "######################################"
cd $BBB_GIT/BBB_cci_mpf/sample/Hello_ALI_VTP_NLB/SW/
make clean
make prefix=$MYINST_DIR CFLAGS="-I $BBB_GIT/BBB_cci_mpf/sw/include/"
echo "######################################"
echo "# Testing Hello_ALI_VTP_NLB #"
echo "######################################"
cd $BBB_GIT/BBB_cci_mpf/sample/Hello_ALI_VTP_NLB/SW/
timeout 3600 ./helloALIVTPnlb
if [[ $? != 0 ]];
then
"helloALIVTPnlb timed out -- FAILURE EXIT !!"
exit 1
fi
#######################################################################
## For SKX1 release
# if [ $RELCODE == "SKX1" ]
# then
# echo "##########################################################"
# echo "# Testing SKX1 NLB+MPF with fpgadiag in lpbk1 mode #"
# echo "##########################################################"
# ## Listing options
# fpgadiag_rdvc_arr="--rva --rvl0 --rvh0 --rvh1"
# fpgadiag_wrvc_arr="--wva --wvl0 --wvh0 --wvh1"
# fpgadiag_mcl_arr="1 2 4"
# fpgadiag_cnt_arr="64 1024 8192"
# fpgadiag_rdtype_arr="--rds --rdi"
# fpgadiag_wrtype_arr="--wt --wb"
# ## Run options
# cd $MYINST_DIR/bin
# for rdvc_set in $fpgadiag_rdvc_arr ; do
# for wrvc_set in $fpgadiag_wrvc_arr ; do
# for mcl_set in $fpgadiag_mcl_arr ; do
# for cnt_set in $fpgadiag_cnt_arr ; do
# for rd_set in $fpgadiag_rdtype_arr ; do
# for wr_set in $fpgadiag_wrtype_arr ; do
# date
# if ps -p $ase_pid > /dev/null
# then
# echo "./fpgadiag --target=ase --mode=lpbk1 --begin=$cnt_set $rd_set $wr_set --mcl=$mcl_set $rdvc_set $wrvc_set"
# timeout 1800 ./fpgadiag --target=ase --mode=lpbk1 --begin=$cnt_set $rd_set $wr_set --mcl=$mcl_set $rdvc_set $wrvc_set
# if [[ $? != 0 ]] ;
# then
# "fpgadiag timed out -- FAILURE EXIT !!"
# exit 1
# fi
# else
# echo "** Simulator not running **"
# exit 1
# fi
# done
# done
# done
# done
# done
# done
# fi
| true
|
303323173eeec093fcbeee0782d905c2d575995d
|
Shell
|
Gali-Madhan-Kumar/AssignmentSolutions
|
/Day-6 Solutions/factorial.sh
|
UTF-8
| 181
| 3.578125
| 4
|
[] |
no_license
|
#! /bin/bash -x
read -p "Enter the number to find the factorial " number
fact=1
for ((num=2; num<=$number; num+=1))
do
((fact *= num))
done
echo "Factorial of $number is " $fact
| true
|
ba76e56eefc13a4dcdd6bb1bbb072412e1e24035
|
Shell
|
patchkit-net/patchkit-development-kit
|
/src/install_vars.sh
|
UTF-8
| 253
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# install_vars <platform>
SRC_INSTALL_VARS_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PDK_INSTALL_TEMP_DIR=$SRC_INSTALL_VARS_SCRIPT_DIR/../temp$1
export PDK_INSTALL_PLATFORM_DIR=$SRC_INSTALL_VARS_SCRIPT_DIR/../$1
| true
|
6169c82ecaceb64d686c086692fae15f65a69b02
|
Shell
|
open-estuary/test-definitions
|
/auto-test/middleware/database/mysql-utilities/mysql/createdb.sh
|
UTF-8
| 414
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
# create database db1 db2
db1=$1
mysql -uroot -proot -e "create database if not exists $db1"
if false ;then
set dbname [lindex $argv 0]
EXPECT=$(which expect)
$EXPECT << EOF | tee out.log
set timeout 500
spawn mysql -u root -p
expect "password:"
send "root\r"
expect "mysql>"
send "create database $dbname;\r"
expect "OK"
send "show databases;\r"
expect "$dbname"
send "exit\r"
expect eof
EOF
fi
| true
|
e44b749fcb2bfcc8907abb80f505ec32dff5fc13
|
Shell
|
buzzy/linux.base
|
/sysroot/etc/network/if-pre-up.d/wpasupplicant
|
UTF-8
| 348
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$METHOD" == "manual" ] && [ "$PHASE" == "pre-up" ] && [ -n "$IF_WIRELESS" ]; then
IF_WPA_CONF="${IF_WPA_CONF:-/etc/wpa_supplicant/wpa_supplicant.conf}"
ip link set dev $IFACE up
wpa_supplicant -B -i $IFACE -s -c $IF_WPA_CONF -P /var/run/wpa_supplicant-$IFACE.pid
wpa_cli -i $IFACE -B -a /etc/wpa_supplicant/event_handler.sh
fi
| true
|
78099cdf707f144fc74b3ac6fb3004998b3be511
|
Shell
|
alpinelinux/aports
|
/main/iproute2-qos/setup-qos
|
UTF-8
| 2,076
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
PREFIX=
. "$PREFIX/lib/libalpine.sh"
conf="$ROOT/etc/conf.d/qos"
cfgval() {
awk -F= "/^$1/ {print \$2}" $conf 2>/dev/null
}
setcfg() {
local key=$1
local value=$2
sed -i "s/^\\(\\#\\)*$key=.*/$key=$value/" "$conf"
if ! grep "^$key=" "$conf" >/dev/null ; then
echo "$key=$value" >> "$conf"
fi
}
apk_add iproute2
if [ -f "$conf" ] ; then
_UPLINK_RATE=$(cfgval UPLINK_RATE)
_DOWNLINK_RATE=$(cfgval DOWNLINK_RATE)
_RATE_SUB_PERCENT=$(cfgval RATE_SUB_PERCENT)
else
echo "Configuration file '$conf' not found"
exit 1
fi
echo "**********************************************************************"
echo "Since ISPs tend to overestimate the speeds they offer, it would probably be best"
echo " if you measure this on a free line to set values very precisely."
echo "**********************************************************************"
echo
echon "Specify the upload speed of your internet connection (mbps, mbit, kbit, kbps, bps): [$_UPLINK_RATE] "
default_read _UPLINK_RATE $_UPLINK_RATE
echo
echon "Specify the download speed of your internet connection (mbps, mbit, kbit, kbps, bps): [$_DOWNLINK_RATE] "
default_read _DOWNLINK_RATE $_DOWNLINK_RATE
echo
echo "**********************************************************************"
echo "In order to prevent traffic queuing at the ISP side or in your modem,"
echo " you should set a slightly lower rate than real one."
echo "This way the bottleneck is the router,"
echo " not the ISP or modem, which allows to control the queue."
echo "**********************************************************************"
echo
echon "Specify amount of percents: [$_RATE_SUB_PERCENT] "
default_read _RATE_SUB_PERCENT $_RATE_SUB_PERCENT
echon "Start QoS? (y/n) [y] "
default_read startqos "y"
case "$startqos" in
[Yy]*) /etc/init.d/qos start;;
esac
echon "Make QoS to be started on boot? (y/n) [y] "
default_read bootstartqos "y"
case "$bootstartqos" in
[Yy]*) rc_add qos;;
esac
setcfg UPLINK_RATE $_UPLINK_RATE
setcfg DOWNLINK_RATE $_DOWNLINK_RATE
setcfg RATE_SUB_PERCENT $_RATE_SUB_PERCENT
| true
|
aa67372aba453f6d1aa906c5d221c8b0f941d272
|
Shell
|
lgannoaa/global-workflow
|
/parm/config/config.wave
|
UTF-8
| 4,162
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/ksh -x
########## config.wave ##########
# Wave steps specific
echo "BEGIN: config.wave"
# Parameters that are common to all wave model steps
# System and version
export wave_sys_ver=v1.0.0
# This config contains variables/parameters used in the fcst step
# Some others are also used across the workflow in wave component scripts
# General runtime labels
export CDUMPwave="${CDUMP}wave"
# In GFS/GDAS, restart files are generated/read from gdas runs
export CDUMPRSTwave="gdas"
# Grids for wave model
# GFSv16
export waveGRD='gnh_10m aoc_9km gsh_15m'
export waveGRDN='1 2 3' # gridnumber for ww3_multi
export waveGRDG='10 20 30' # gridgroup for ww3_multi
# ESMF input grid
export waveesmfGRD='glox_10m' # input grid
# Grids for input fields
export WAVEICE_DID=sice
export WAVEICE_FID=glix_10m
export WAVECUR_DID=rtofs
export WAVECUR_FID=glix_10m
export WAVEWND_DID=
export WAVEWND_FID=
# Grids for output fields (used in all steps)
export waveuoutpGRD=points
export waveinterpGRD='glo_15mxt at_10m ep_10m wc_10m glo_30m' # Grids that need to be interpolated from native
# in POST will generate grib unless gribOK not set
export wavesbsGRD='' # side-by-side grids generated as wave model runs, writes to com
export wavepostGRD='gnh_10m aoc_9km gsh_15m' # Native grids that will be post-processed (grib2)
# The start time reflects the number of hindcast hours prior to the cycle initial time
if [ "$CDUMP" = "gdas" ]; then
export FHMAX_WAV=${FHMAX:-9}
else
export FHMAX_WAV=$FHMAX_GFS
fi
export WAVHINDH=${WAVHINDH:-0}
export FHMIN_WAV=${FHMIN_WAV:-0}
export FHOUT_WAV=${FHOUT_WAV:-3}
export FHMAX_HF_WAV=${FHMAX_HF_WAV:-120}
export FHOUT_HF_WAV=${FHOUT_HF_WAV:-1}
# gridded and point output rate
export DTFLD_WAV=`expr $FHOUT_HF_WAV \* 3600`
export DTPNT_WAV=3600
export FHINCP_WAV=`expr $DTPNT_WAV / 3600`
# Selected output parameters (gridded)
export OUTPARS_WAV="WND HS FP DP PHS PTP PDIR"
# Restart file config
if [ "$CDUMP" = "gdas" ]; then
WAVNCYC=4
WAVHCYC=${assim_freq:-6}
FHMAX_WAV_CUR=${FHMAX_WAV_CUR:-48} # RTOFS forecasts only out to 8 days
elif [ ${gfs_cyc} -ne 0 ]; then
FHMAX_WAV_CUR=${FHMAX_WAV_CUR:-192} # RTOFS forecasts only out to 8 days
WAVHCYC=${assim_freq:-6}
else
WAVHCYC=0
FHMAX_WAV_CUR=${FHMAX_WAV_CUR:-192} # RTOFS forecasts only out to 8 days
fi
export FHMAX_WAV_CUR WAVHCYC WAVNCYC
# Restart timing business
if [ "${CDUMP}" != gfs ]; then # Setting is valid for GDAS and GEFS
export RSTTYPE_WAV='T' # generate second tier of restart files
export DT_1_RST_WAV=10800 # time between restart files, set to DTRST=1 for a single restart file
export DT_2_RST_WAV=43200 # restart stride for checkpointing restart
export RSTIOFF_WAV=0 # first restart file offset relative to model start
else # This is a GFS run
rst_dt_gfs=$(( restart_interval_gfs * 3600 ))
export RSTTYPE_WAV='F' # generate second tier of restart files
if [ $rst_dt_gfs -gt 0 ]; then export RSTTYPE_WAV='T' ; fi
export DT_1_RST_WAV=${rst_dt_gfs:-0} # time between restart files, set to DTRST=1 for a single restart file
export DT_2_RST_WAV=${rst_dt_gfs:-0} # restart stride for checkpointing restart
export RSTIOFF_WAV=0 # first restart file offset relative to model start
fi
#
# Set runmember to default value if not GEFS cpl run
# (for a GFS coupled run, RUNMEN would be unset, this should default to -1)
export RUNMEM=${RUNMEM:--1}
# Set wave model member tags if ensemble run
# -1: no suffix, deterministic; xxxNN: extract two last digits to make ofilename prefix=gwesNN
if [ $RUNMEM = -1 ]; then
# No suffix added to model ID in case of deterministic run
export waveMEMB=
else
# Extract member number only
export waveMEMB=`echo $RUNMEM | grep -o '..$'`
fi
# Determine if wave component needs input and/or is coupled
export WW3ATMINP='CPL'
export WW3ICEINP='YES'
export WW3CURINP='YES'
# Determine if input is from perturbed ensemble (T) or single input file (F) for all members
export WW3ATMIENS='F'
export WW3ICEIENS='F'
export WW3CURIENS='F'
echo "END: config.wave"
| true
|
60c76f0655b0e96da0b914a5e7efc0cef80bfdc4
|
Shell
|
smuthuswamy/spinnaker-the-hard-way
|
/stop-core.sh
|
UTF-8
| 1,171
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Stopping deck (for ui)..."
~/dev/spinnaker/scripts/deck-stop.sh
echo
sleep 1
echo "Checking if deck port is released..."
sudo lsof -t -i:9000
echo "---"
echo
echo "Stopping clouddriver (for interacting with clouds)..."
~/dev/spinnaker/scripts/clouddriver-stop.sh
echo
sleep 1
echo "Checking if clouddriver port is released..."
sudo lsof -t -i:7002
echo "---"
echo
echo "Stopping front50 (for persisting stuff to gcs)..."
~/dev/spinnaker/scripts/front50-stop.sh
echo
sleep 1
echo "Checking if front50 port is released..."
sudo lsof -t -i:8080
echo "---"
echo
echo "Stopping fiat (for authc/authz)..."
~/dev/spinnaker/scripts/fiat-stop.sh
echo
sleep 1
echo "Checking if fiat port is released..."
sudo lsof -t -i:7003
echo "---"
echo
echo "Stopping gate (for gating all api calls)..."
~/dev/spinnaker/scripts/gate-stop.sh
echo
sleep 1
echo "Checking if gate port is released..."
sudo lsof -t -i:8084
echo "---"
echo
echo "Stopping orca (for orchestrating the pipelines)..."
~/dev/spinnaker/scripts/orca-stop.sh
echo
sleep 1
echo "Checking if orca port is released..."
sudo lsof -t -i:8083
echo "Core Spinnaker Microservices have been stopped!"
| true
|
1c603544aa09f6914a4b547d05d09d3e0bedb4a1
|
Shell
|
nimbix/jardoc
|
/docs/recipe/jarvice-filemanager-plugin.sh
|
UTF-8
| 1,283
| 3.59375
| 4
|
[] |
no_license
|
function jarvice_job_plugin_usage {
echo "Available <jarvice_job_plugin_options>:"
echo -e " --up <src> <dst>\tUpload local <src> file to remote <dst>"
echo -e " --down <src> <dst>\tDownload remote <src> file to local <dst>"
}
function jarvice_job_plugin {
while [ $# -gt 0 ]; do
case $1 in
--up)
upload_src="$2"
upload_dst="$3"
shift; shift; shift
;;
--down)
download_src="$2"
download_dst="$3"
shift; shift; shift
;;
*)
jarvice_job_plugin_usage
return
;;
esac
done
webdav_url="https://$jarvice_job_address/owncloud/remote.php/webdav/"
curl="curl -u nimbix:$jarvice_job_password -s -k"
curl+=" --retry-delay 1 --retry 30 --retry-connrefused"
if [ -n "$upload_src" -a -n "$upload_dst" ]; then
echo "Uploading $upload_src to $upload_dst..."
$curl --upload-file "$upload_src" "$webdav_url$upload_dst"
fi
if [ -n "$download_src" -a -n "$download_dst" ]; then
echo "Downloading $download_src to $download_dst..."
$curl --output "$download_dst" "$webdav_url$download_src"
fi
}
| true
|
3ffb25a5e0bb960e3cc36c66032e6a55946dcb2f
|
Shell
|
MonetDB/eanthony
|
/eanthony.sh
|
UTF-8
| 3,680
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
#set -x
BASEDIR=$EABASEDIR
if [ -z $BASEDIR ] || [ ! -d $BASEDIR ]; then
echo "you need to set an existing basedir in \$EABASEDIR"
exit -1
fi
uname | grep -v CYGWIN > /dev/null
ISWIN=$?
while :
do
RUNID=`date +%s`
echo $RUNID
date
git -C $BASEDIR pull
# these files need to exist and denote R version and MonetDB HG branch to use
RTAG=`cat $BASEDIR/r-tag`
RUNTESTS=$BASEDIR/runtests
if [ -z $RTAG ] ; then
echo "need to define R version in $RTAG"
exit -1
fi
if [ ! -f $RUNTESTS ] ; then
echo "need to define tests to run in $RUNTESTS"
exit -1
fi
RSRCDIR=$BASEDIR/r-source-$RTAG
RINSTALLDIR=$BASEDIR/r-install-$RTAG
RBIN=$RINSTALLDIR/bin/R
if [ $ISWIN -eq 1 ]; then
RBIN=$RBIN.exe
fi
# R packages should be reinstalled with new R version
RLIBDIR=$BASEDIR/r-packages-$RTAG
RTMPDIR=$BASEDIR/r-tmp
RWDDIR=$BASEDIR/r-wd
LOGDIR=$BASEDIR/logs/$RUNID
find $RTMPDIR -name "Rtmp*" -type d -exec chmod -R 700 {} + -exec rm -rf {} +
rm -rf $RWDDIR/*
rm $BASEDIR/logs/current
ln -s $LOGDIR $BASEDIR/logs/current
mkdir -p $RSRCDIR $RLIBDIR $RTMPDIR $RWDDIR $LOGDIR $RINSTALLDIR
if [ ! -d $MINSTALLDIR ]; then
echo "Unable to make install dirs."
exit -1
fi
# build R
# TODO: remove old R source/install/packages dirs
# TODO: auto-install new R versions on Windows?
echo $RTAG > $LOGDIR/r-version
if [ ! -f $RBIN ] && [ $ISWIN -eq 0 ] ; then
RTBURL="https://cran.r-project.org/src/base/R-3/R-$RTAG.tar.gz"
curl -s $RTBURL | tar xz -C $RSRCDIR --strip-components=1
cd $RSRCDIR
./configure --prefix=$RINSTALLDIR --with-x=no --without-recommended-packages > $LOGDIR/r-configure.log 2>&1
make DEBUG=T > $LOGDIR/r-make.log 2>&1
# make R install without latex
touch $RSRCDIR/doc/NEWS.pdf
touch $RSRCDIR/doc/RESOURCES
touch $RSRCDIR/doc/FAQ
make install >> $LOGDIR/r-make.log 2>&1
cd ..
fi
if [ ! -f $RBIN ] ; then
echo "Still no R. FML."
exit -1
fi
export R_LIBS=$RLIBDIR PATH=$RINSTALLDIR/bin:/cygdrive/c/Rtools/bin:$PATH TMP=$RTMPDIR TEMP=$RTMPDIR
# install/update various packages
$RBIN -f $BASEDIR/packages.R > $LOGDIR/packages.log 2>&1
# record versions of installed packages
$RBIN --slave -e "write.table(installed.packages(lib.loc='$RLIBDIR')[, c('Package','Version')], '$LOGDIR/package-versions', sep='\t', quote=F, row.names=F, col.names=F)"
if [ $? != 0 ]; then
echo "Package installation failure"
continue
else
touch $LOGDIR/packages-success
fi
sleep 1
cp $RUNTESTS $LOGDIR
RTESTS=`cat $RUNTESTS | shuf`
for RSCRIPT in $RTESTS ; do
if [ -z $RSCRIPT ] || [ ! -f $BASEDIR/$RSCRIPT-setup.R ] ; then
echo "Could not run $RSCRIPT"
continue
fi
(echo "running $RSCRIPT"
touch $LOGDIR/$RSCRIPT-started
export RWD=$RWDDIR/$RSCRIPT-$RUNID
export TMP=$RTMPDIR/$RSCRIPT TEMP=$RTMPDIR/$RSCRIPT
mkdir -p $TMP
mkdir -p $RWD
set -o pipefail
timeout -k 40h 30h $RBIN -f $BASEDIR/$RSCRIPT-setup.R 2>&1 | awk '{print strftime("%Y-%m-%d %H:%M:%S"), $0; fflush(); }' > $LOGDIR/$RSCRIPT.log
SETUPRC=$?
echo -e "\nsetup return code=$SETUPRC" >> $LOGDIR/$RSCRIPT.log
if [ $SETUPRC != 0 ]; then
echo "$RSCRIPT setup fail"
else
touch $LOGDIR/$RSCRIPT-setup-success
timeout -k 40h 30h $RBIN -f $BASEDIR/$RSCRIPT-test.R 2>&1 | awk '{print strftime("%Y-%m-%d %H:%M:%S"), $0; fflush(); }' >> $LOGDIR/$RSCRIPT.log
TESTRC=$?
echo -e "\ntest return code=$TESTRC" >> $LOGDIR/$RSCRIPT.log
if [ $TESTRC != 0 ]; then
echo "$RSCRIPT test fail"
else
touch $LOGDIR/$RSCRIPT-test-success
fi
fi
touch $LOGDIR/$RSCRIPT-complete
rm -rf $RWD
) &
sleep 1
done
wait
touch $LOGDIR/complete
sleep 10
done
| true
|
dcb8464c05d96c369b844a65e50d1ee834456053
|
Shell
|
CMSROMA/Timing-TOFPET
|
/connect_TOFPET.sh
|
UTF-8
| 252
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
fileA="/tmp/d.sock"
fileB="/dev/shm/daqd_shm"
if [ -f "$fileA" ]
then
echo "removing $fileA"
rm -i $fileA
fi
if [ -f "$fileB" ]
then
echo "removing $fileB"
rm -i $fileB
fi
./daqd --socket-name=/tmp/d.sock --daq-type=GBE
| true
|
14ce5b512fd3d3b7e10c360ae80fbab4bd35acb9
|
Shell
|
notlixiang/robotics_setup
|
/cmake_source.sh
|
UTF-8
| 833
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# source: https://gist.github.com/phatblat/1713458
# Save script's current directory
DIR=$(pwd)
set -e
set -u
set -x
echo "############################"
echo "# CMake - cmake.org"
echo "############################"
echo ""
echo "CMake, the cross-platform, open-source build system. https://cmake.org"
echo ""
echo "https://github.com/Kitware/CMake/"
# os specific setup
OS=`uname`
case $OS in
'Linux')
sudo apt-get update
sudo apt-get install -y libtool pkg-config build-essential autoconf automake pkg-config libncurses5-dev
OS='Linux'
;;
*) ;;
'Darwin')
OS='Mac'
;;
esac
cd ~/src/
if [ ! -d ~/src/CMake ]
then
git clone https://github.com/Kitware/CMake.git -b release
fi
cd CMake
git pull
./bootstrap
# make -j will build faster, but may run out of memory
# sudo make -j install
sudo make install
cd $DIR
| true
|
f4bb01b4888251a1dce624a39e3d6bb657c93945
|
Shell
|
alexhillc/dotfiles
|
/prefs.sh
|
UTF-8
| 10,964
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
################ GENERAL ############################
# Disable the sound effects on boot
sudo nvram SystemAudioVolume=" "
# Set computer name (as done via System Preferences → Sharing)
sudo scutil --set ComputerName "Alex's MacBook Pro"
sudo scutil --set HostName "Alex's MacBook Pro"
sudo scutil --set LocalHostName "Alex's MacBook Pro"
sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server NetBIOSName -string "Alex's MacBook Pro"
# Avoid creating .DS_Store files on network volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
# Prevent Time Machine from prompting to use new hard drives as backup volume
defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
# Use scroll gesture with the Ctrl (^) modifier key to zoom
defaults write com.apple.universalaccess closeViewScrollWheelToggle -bool true
defaults write com.apple.universalaccess HIDScrollZoomModifierMask -int 262144
# Follow the keyboard focus while zoomed in
defaults write com.apple.universalaccess closeViewZoomFollowsFocus -bool true
#######################################################
################ FINDER PREFERENCES ###################
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Always open everything in Finder's column view.
defaults write com.apple.Finder FXPreferredViewStyle clmv
# Desktop prefs
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
defaults write com.apple.finder ShowMountedServersOnDesktop -bool true
defaults write com.apple.finder WarnOnEmptyTrash -bool false
# Show item info near icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
# Show item info to the bottom of the icons on the desktop
/usr/libexec/PlistBuddy -c "Set DesktopViewSettings:IconViewSettings:labelOnBottom false" ~/Library/Preferences/com.apple.finder.plist
# Enable snap-to-grid for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy kind" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy kind" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy kind" ~/Library/Preferences/com.apple.finder.plist
# Increase grid spacing for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
# Increase the size of icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:iconSize 52" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:iconSize 52" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:iconSize 52" ~/Library/Preferences/com.apple.finder.plist
# Set the size of text on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:textSize 12" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:textSize 12" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:textSize 12" ~/Library/Preferences/com.apple.finder.plist
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Expand print panel by default
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint -bool true
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint2 -bool true
# Enable sound beep
defaults write -g "com.apple.sound.beep.feedback" -int 1
# Automatically quit printer app once the print jobs complete
defaults write com.apple.print.PrintingPrefs "Quit When Finished" -bool true
# Disable the “Are you sure you want to open this application?” dialog
defaults write com.apple.LaunchServices LSQuarantine -bool false
# Always show scrollbars
defaults write NSGlobalDomain AppleShowScrollBars -string "Always"
# Possible values: `WhenScrolling`, `Automatic` and `Always`
# Use list view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `Flwv`
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Enable AirDrop over Ethernet and on unsupported Macs running Lion
defaults write com.apple.NetworkBrowser BrowseAllInterfaces -bool true
###############################################################################
# Trackpad, mouse, keyboard, Bluetooth accessories, and input #
###############################################################################
# Disable “natural” (Lion-style) scrolling
defaults write NSGlobalDomain com.apple.swipescrolldirection -bool false
# Disable press-and-hold for keys in favor of key repeat
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
# Set a blazingly fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 0
# Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
###############################################################################
# SSD-specific tweaks #
###############################################################################
# Disable local Time Machine snapshots
sudo tmutil disablelocal
# Disable hibernation (speeds up entering sleep mode)
sudo pmset -a hibernatemode 0
# Remove the sleep image file to save disk space
sudo rm /private/var/vm/sleepimage
# Create a zero-byte file instead…
sudo touch /private/var/vm/sleepimage
# …and make sure it can’t be rewritten
sudo chflags uchg /private/var/vm/sleepimage
# Disable the sudden motion sensor as it’s not useful for SSDs
sudo pmset -a sms 0
###############################################################################
# Screen #
###############################################################################
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
###############################################################################
# Dock, Dashboard, and hot corners #
###############################################################################
# Set the icon size of Dock items to 42 pixels
defaults write com.apple.dock tilesize -int 42
# Minimize windows into their application’s icon
defaults write com.apple.dock minimize-to-application -bool true
# Enable spring loading for all Dock items
defaults write com.apple.dock enable-spring-load-actions-on-all-items -bool true
# Show indicator lights for open applications in the Dock
defaults write com.apple.dock show-process-indicators -bool true
# Wipe all (default) app icons from the Dock
# This is only really useful when setting up a new Mac, or if you don’t use
# the Dock to launch apps.
defaults write com.apple.dock persistent-apps -array
# Disable Dashboard
defaults write com.apple.dashboard mcx-disabled -bool true
# Don’t show Dashboard as a Space
defaults write com.apple.dock dashboard-in-overlay -bool true
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Disable the Launchpad gesture (pinch with thumb and three fingers)
#defaults write com.apple.dock showLaunchpadGestureEnabled -int 0
# Reset Launchpad, but keep the desktop wallpaper intact
find "${HOME}/Library/Application Support/Dock" -name "*-*.db" -maxdepth 1 -delete
# Add iOS Simulator to Launchpad
sudo ln -sf "/Applications/Xcode.app/Contents/Developer/Applications/iOS Simulator.app" "/Applications/iOS Simulator.app"
###############################################################################
# Messages #
###############################################################################
# Disable automatic emoji substitution (i.e. use plain text smileys)
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "automaticEmojiSubstitutionEnablediMessage" -bool false
# Disable continuous spell checking
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "continuousSpellCheckingEnabled" -bool false
###############################################################################
# Transmission.app #
###############################################################################
# Use `~/Documents/Torrents` to store incomplete downloads
defaults write org.m0k.transmission UseIncompleteDownloadFolder -bool true
defaults write org.m0k.transmission IncompleteDownloadFolder -string "${HOME}/Documents/Torrents"
# Don’t prompt for confirmation before downloading
defaults write org.m0k.transmission DownloadAsk -bool false
# Trash original torrent files
defaults write org.m0k.transmission DeleteOriginalTorrent -bool true
# Hide the donate message
defaults write org.m0k.transmission WarningDonate -bool false
# Hide the legal disclaimer
defaults write org.m0k.transmission WarningLegal -bool false
################################################################################
# Terminal settings
################################################################################
chsh -s /bin/zsh
################ APPLESCRIPT OS X PREFS ###############
osascript -e 'tell application "System Events"
tell screen saver preferences
set delay interval to 0
set show clock to false
end tell
end tell'
#######################################################
echo "Restart required to implement some of these changes"
| true
|
0b26255b078f7ef9c5ce4798deb5d3022941b6f4
|
Shell
|
kblomqvist/my-bash-scripts
|
/dups3.sh
|
UTF-8
| 2,671
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Backuping
# ---------
# dups3 backup
#
# Restoring
# ---------
# dups3 restore /home/my/foo.txt ~/restore # Restore a file
# dups3 restore /home/my ~/restore # Restore a directory
#
# todo:
# dups3 restore -t 2010-09-22T01:10:00 ~/restore # Restore everything from a point in time
#
# References
# ----------
# [1] (http://icelab.com.au/articles/easy-server-backups-to-amazon-s3-with-duplicity/)
#
# ---------------------------------------------------------------------
# Settings
# ---------------------------------------------------------------------
# Amazon S3 bucket name and folder if any, for example 'mybucket'
# or 'mybucket/backups'
BUCKET=
# Amazon AWS key id and secret key
export AWS_ACCESS_KEY_ID=
export AWS_SECRET_ACCESS_KEY=
# Your GPG key which to use
GPG_KEY=
# Your GPG passphrase for the key defined above
export PASSPHRASE=
# MySQL access parameters. Be sure that you have MySQL privilege
# 'LOCK TABLES', otherwise database backup won't work.
#
# How to give 'LOCK TABLES' privilege for root:
# GRANT LOCK TABLES ON *.* TO 'root'@'localhost';
#
MYSQL_DB_USER=
MYSQL_DB_PASSWORD=
MYSQL_DB_HOST=localhost
# Destination where to store MySQL dump file. The file will be named
# as 'mysqldump.sql'.
MYSQL_DUMP_DEST_DIR=/var/dbdumps
# ---------------------------------------------------------------------
# Program starts here
# ---------------------------------------------------------------------
if [ "$#" -eq 1 ] && [ $1 = "backup" ]; then
# Prepare MySQL dump
MYSQL_ACCESS_PARAMS="-h $MYSQL_DB_HOST -u $MYSQL_DB_USER \
-p$MYSQL_DB_PASSWORD"
MYSQL_DUMP_FILE="$MYSQL_DUMP_DEST_DIR/mysqldump.sql"
mkdir -p $MYSQL_DUMP_DEST_DIR
mysqldump $MYSQL_ACCESS_PARAMS --all-databases > $MYSQL_DUMP_FILE \
&& chmod 600 $MYSQL_DUMP_FILE
# Run backups
duplicity \
--full-if-older-than 1M \
--s3-use-new-style \
--s3-european-buckets \
--encrypt-key=${GPG_KEY} \
--sign-key=${GPG_KEY} \
--include=/home \
--include=/etc \
--include=/root \
--include=${MYSQL_DUMP_DEST_DIR} \
--exclude=/** / s3+http://${BUCKET}
# Remove backups which are older than 6 month
duplicity remove-older-than 6M --force s3+http://${BUCKET}
elif [ "$#" -eq 3 ] && [ $1 = "restore" ]; then
duplicity --file-to-restore $2 s3+http://${BUCKET} $3
else
echo "Usage:"
echo " dups3 backup # Run backups"
echo " dups3 restore /home/my/foo.txt ~/restore # Restore a file"
echo " dups3 restore /home/my ~/restore # Restore a directory"
fi
# Reset the ENV variables. Don't need them sitting around
export AWS_ACCESS_KEY_ID=
export AWS_SECRET_ACCESS_KEY=
export PASSPHRASE=
exit 0
| true
|
7bd92b02e4184c2a9f059233007d2bf8ef11ebbc
|
Shell
|
jianyongchen/oor
|
/vagrant/install-oracle-java8.sh
|
UTF-8
| 511
| 3.09375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
export DEBIAN_FRONTEND=noninteractive
echo "Adding WebUpd8 PPA for Oracle Java 8 ..."
add-apt-repository -y ppa:webupd8team/java 2>/dev/null
echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections
apt-get -y -q update
apt-get -y -q -o Dpkg::Options::="--force-confnew" upgrade
echo "Downloading and installing Oracle Java 8 ..."
apt-get -y install oracle-java8-installer >/dev/null
echo "Installing Oracle Java 8 done."
| true
|
c3db5811319e4614ee385e90d9c2691c144cc8fc
|
Shell
|
nkyo/MBScript
|
/mbmenu/bin/autocheck/check_ram
|
UTF-8
| 433
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
MEM_THRESHOLD=90 #percent
MEM_TOTAL=$(free | grep "Mem:" | awk '{print $2}')
MEM_REMAINIG=$(free | grep "Mem:" | awk '{print $4}')
MEM_CURRENT=$(echo "scale=0;100-$MEM_REMAINIG*100/$MEM_TOTAL" | bc -l)
if [ $MEM_CURRENT -gt $MEM_THRESHOLD ] ; then
PIDMEM=$(ps -eo pid -eo %mem | sort -k 2 -r | grep -v %MEM | head -n 1 | awk '{print $1}')
KILL -9 $PIDMEM
else
exit 0
fi
echo '-----------------'
echo $MEM_CURRENT
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.