blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
cdda4f740fb08413f3377a0747d6076ab5997563 | Shell | albertinator/deploy-scripts | /express-2-doks/deploy.sh | UTF-8 | 1,798 | 3.6875 | 4 | [] | no_license | #!/bin/bash
# Pre-requisites:
# Install doctl:
# $ brew install doctl
# Install Kubectl:
# $ brew install kubectl
# Install envsubst command:
# $ brew install gettext
# $ echo 'export PATH="/usr/local/opt/gettext/bin:$PATH"' >> ~/.bash_profile
# Login
doctl auth init
# Prep variables
export REGISTRY="registry_name"
export CLUSTER_NAME="cluster_name"
export VM_ID="image_name"
export REGION="sfo2"
export VERSION="$(git rev-parse --short HEAD)"
export DOMAIN="api.domain.com"
export IMAGE_NAME=registry.digitalocean.com/${REGISTRY}/${VM_ID}:${VERSION}
# Select DOKS cluster
doctl kubernetes cluster kubeconfig save ${CLUSTER_NAME}
kubectl get nodes
# Build and upload image to DigitalOcean Container Registry
doctl registry login
export IMAGE_STATUS="$(doctl registry repository list-tags ${VM_ID} --format Tag --no-header | grep ^${VERSION}$ > /dev/null 2>&1 && echo OK || echo FAILED)"
if [ "$IMAGE_STATUS" = "FAILED" ] # only if image doesn't already exist
then
docker build -t ${IMAGE_NAME} .
docker push ${IMAGE_NAME}
echo "$(tput setaf 2)Pushed image to DigitalOcean Container Registry: $(tput setab 4)${IMAGE_NAME}$(tput sgr0)"
fi
echo "$(tput setaf 2)Applying all ENV secrets for deployment...$(tput sgr0)"
cat k8s/deployment-secret.yaml | envsubst | kubectl apply -f -
kubectl get secrets
echo "$(tput setaf 2)Applying deployment ${VM_ID}...$(tput sgr0)"
cat k8s/deployment.yaml | envsubst | kubectl apply -f -
kubectl get deployments
echo "$(tput setaf 2)Applying service (of type NodePort) ${VM_ID}...$(tput sgr0)"
cat k8s/service.yaml | envsubst | kubectl apply -f -
kubectl get services
echo "$(tput setaf 2)Applying ingress for NodePort ${VM_ID}...$(tput sgr0)"
cat k8s/ingress.yaml | envsubst | kubectl apply -f -
kubectl get ingress
# Check
open https://${DOMAIN}
| true |
3df98d9b5e0d1fa944c502ca81ee15d678186782 | Shell | brancz/tracee | /libbpfgo/selftest/perfbuffers/run.sh | UTF-8 | 769 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
GREEN=' \033[0;32m'
RED=' \033[0;31m'
NC='\033[0m'
VERSION_LIMIT=4.3
CURRENT_VERSION=$(uname -r | cut -d '.' -f1-2)
# Check that kernel version is big enough
if [ $(echo "$CURRENT_VERSION"$'\n'"$VERSION_LIMIT" | sort -V | head -n1) != "$VERSION_LIMIT" ]; then
echo -e "${RED}[*] OUTDATED KERNEL VERSION${NC}"
exit 1
fi
make -f $PWD/Makefile
if [ $? -ne 0 ]; then
echo -e "${RED}[*] MAKE FAILED"
exit 2
else
echo -e "${GREEN}[*] MAKE RAN SUCCESFULLY${NC}"
fi
timeout 5 $PWD/self
RETURN_CODE=$?
if [ $RETURN_CODE -eq 124 ]; then
echo -e "${RED}[*] SELFTEST TIMEDOUT${NC}"
exit 3
fi
if [ $RETURN_CODE -ne 0 ]; then
echo -e "${RED}[*] ERROR IN SELFTEST${NC}"
exit 4
fi
echo -e "${GREEN}[*] SUCCESS${NC}"
exit 0
| true |
5d41060252a9881daf46f6092a3f50c0e870bed7 | Shell | dozmorovlab/PDX-HiC_processingScripts | /5b.runHiCExplorer/hicBuildMatrix.sh | UTF-8 | 921 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#PBS -S /bin/bash
#PBS -l nodes=1:ppn=8
#PBS -N "hicexplorer"
#PBS -j oe
#PBS -q workq
#PBS -V
cd $PBS_O_WORKDIR
source activate HiCExplorer
export HDF5_USE_FILE_LOCKING='FALSE'
bampath=/PATHTO/${SAMPLE} # Set a path to R1 and R2 alignments
BAMR1=$bampath/${SAMPLE}*_R1.bam
BAMR2=$bampath/${SAMPLE}*_R2.bam
restrfile=/PATHTO/GATC_hg38.bed # This file was made with hicFindRestSite tool on hg38 reference genome using GATC as a restriction site.
# calc contact matrix based on specified binsize
hicBuildMatrix -s $BAMR1 $BAMR2 \
--binSize 10000 \
--restrictionSequence GATC \
--danglingSequence GATC \
--QCfolder ${SAMPLE}_binSizeQCs \
--threads 8 \
--outBam ${SAMPLE}_binSize.bam \
--outFileName ${SAMPLE}_binSize.h5 \
-rs $restrfile \
--inputBufferSize 400000
| true |
2830da6852b4b67abcda6b39e78350fd41dcaca7 | Shell | AustinSaintAubin/linux-bash-scripts-library-core | /downloaders/CAD-Serialized_Downloader.sh | UTF-8 | 3,135 | 3.28125 | 3 | [
"MIT"
] | permissive | # CAD Serialized Downloader By Date - 2011/06/09 - v4 - By: Austin Saint Aubin
# sh "$(nvram get usb_disk_main)/Tomato/Scripts/Downloaders/CAD-Serialized_Downloader.sh"
# ===============================================
#URL=http://www.cad-comic.com/cad/20021023
NAME="CAD - Ctrl+Alt+Del"
URL=http://www.cad-comic.com/cad/
fileTypes="png|jpg|gif"
URLFilerFind="comics"
startYear=2002
startMonth=10
startDay=23
endYear=2011
endMonth=06
endDay=10
# ======================
#FlashDrvDIR=$(nvram get usb_disk_main)
#ComDIR="Podcasts"
#PodsDIR="$FlashDrvDIR/$ComDIR"
PodsDIR="/volume1/Photo/Comics/"
# Loading Functions for Castget Loader
logIt() { echo "$@"; logger -t CAD-Downer "$@"; }
checkFolder() { [ -d "$1" ] && logIt "Folder Exsits: $1" || (logIt "Making Folder: $1"; mkdir -p "$1"); }
checkFolder "$PodsDIR/$NAME"
cd "$PodsDIR/$NAME"
logIt "Starting Serialized Downloader By Date"
logIt "============================"
logIt "Name:" $NAME
logIt "URL:" $URL
logIt "============================"
logIt "Start Year:" $startYear
logIt "End Year:" $endYear
logIt "Start Month:" $startMonth
logIt "End Month:" $endMonth
logIt "Start Day:" $startDay
logIt "End Day:" $endDay
logIt "============================"
YEAR=$startYear
while [ $YEAR -le $endYear ]; do
Folder="$PodsDIR/$NAME/$(printf "%.4d" $YEAR)"
checkFolder "$Folder"
cd "$Folder"
if [ -z $MONTH ]; then
MONTH=$startMonth
else
MONTH=01
fi
# while [ $MONTH -le $endMonth ]; do
while [ $MONTH -le 12 ]; do
#checkFolder "$PodsDIR/$NAME/$(printf "%.4d" $YEAR)/$(printf "%.2d" $MONTH)"
#cd "$PodsDIR/$NAME/$(printf "%.4d" $YEAR)/$(printf "%.2d" $MONTH)"
if [ -z $DAY ]; then
DAY=$startDay
else
DAY=01
fi
# while [ $DAY -le $endDay ]; do
while [ $DAY -le 32 ]; do
fileName=cad-$(printf "%.4d" $YEAR)$(printf "%.2d" $MONTH)$(printf "%.2d" $DAY)
logIt "Year: $(printf "%.4d" $YEAR)"
logIt "Month: $(printf "%.2d" $MONTH)"
logIt "Day: $(printf "%.2d" $DAY)"
logIt "~ - - - - - - - "
logIt "Folder: $Folder"
logIt "FileName: $fileName*"
#[ -f $fileName ] && logIt "File already exists." || (logIt "File does not exists, Downloading it."; FullURL=$(curl --silent $URL$NUM | egrep -i -o '(url|src|href)''="[^"]*' | grep -o '[^"]*$' | egrep -i -E '\.''('$fileTypes')' | grep -i "comic" ); echo $FullURL; wget -O $fileName $FullURL)
#if [ -f "$fileName" ]; then
if [ $(find -name "$fileName*") ]; then
logIt "File already exists."
else
logIt "File does not exists, Downloading it."
FullURL=$(curl --silent $URL$(printf "%.4d" $YEAR)$(printf "%.2d" $MONTH)$(printf "%.2d" $DAY) | egrep -i -o '(url|src|href)''="[^"]*' | grep -o '[^"]*$' | egrep -i -E '\.''('$fileTypes')' | grep -i "$URLFilerFind" )
if [ $FullURL ]; then
logIt URL: $URL$(printf "%.4d" $YEAR)$(printf "%.2d" $MONTH)$(printf "%.2d" $DAY)
logIt Download URL: $FullURL
# wget -U "Firefox" -O "$fileName" "$FullURL"
wget -U "Firefox" "$FullURL"
else
logIt "No downloadable content found"
fi
fi
let DAY++
logIt "------------------------------------------"
done
let MONTH++
done
let YEAR++
done | true |
32f0de23a42e00d1a135a66e0ed6f6001b898630 | Shell | gvsurenderreddy/ipv6-dhclient-script | /templates/CentOS6/etc_init.d_ipv6-dhclient | UTF-8 | 1,225 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# ipv6-dhclient Created by https://github.com/outime/ipv6-dhclient-script
# chkconfig: 2345 11 89
# description: ipv6-dhclient startup script
# Source function library.
. /etc/init.d/functions
RETVAL=0
prog="ipv6-dhclient"
LOCKFILE=/var/lock/subsys/$prog
INTERFACE={{INTERFACE}}
BLOCK_ADDR={{BLOCK_ADDR}}
BLOCK_SUBNET={{BLOCK_SUBNET}}
start() {
echo -n "Starting $prog: "
/sbin/dhclient -cf /etc/dhcp/dhclient6.conf -pf /var/run/dhclient6.$INTERFACE.pid -6 -P $INTERFACE && \
/sbin/ifconfig $INTERFACE inet6 add $BLOCK_ADDR/$BLOCK_SUBNET && \
success || failure
RETVAL=$?
[ $RETVAL -eq 0 ] && touch $LOCKFILE
echo
return $RETVAL
}
stop() {
echo -n "Shutting down $prog: "
/usr/bin/killall dhclient && \
/sbin/ifconfig $INTERFACE inet6 del $BLOCK_ADDR/$BLOCK_SUBNET && \
success || failure
RETVAL=$?
[ $RETVAL -eq 0 ] && rm -f $LOCKFILE
echo
return $RETVAL
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
;;
restart)
stop
start
;;
*)
echo "Usage: $prog {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL
| true |
b92d75bbd1b83573177b7da7762c8ee01599eb7a | Shell | lkomoro/Marine_Turtle_Rapture_Methods | /Associated Scripts/Rapture_bwa_baitref.sh | UTF-8 | 378 | 3.109375 | 3 | [] | no_license | #!/bin/bash
#run script in directory where files are, or change path accordingly below; would need to also change cut command accordingly
bwa index Rapture_reference.fasta
for file in ./*_RA.fastq
do
echo $file
sample=`echo $file |cut -f1,2,3,4 -d "_"`
echo $sample
bwa mem -t 10 ./Rapture_reference.fasta \
"$sample"_RA.fastq \
>"$sample".sam 2> "$sample".stderr
done
| true |
a03e9a610ef59d289b780a1bbef21b0a145defb1 | Shell | kissthink/ports | /fonts/font_ttf_gw/font_ttf_gw.build | UTF-8 | 1,565 | 3.234375 | 3 | [] | no_license | #!/bin/bash
#
# Maintainer: Christoph J. Thompson <cjsthompson@gmail.com>
source /usr/src/ports/Build/build.sh
NAME=font_ttf_gw
VERSION=1.0
ARCH=noarch
BUILD=1
# Description
cat > ${PKG}/install/slack-desc <<EOF
# HOW TO EDIT THIS FILE:
# The "handy ruler" below makes it easier to edit a package description. Line
# up the first '|' above the ':' following the base package name, and the '|'
# on the right side marks the last column you can put a character in. You must
# make exactly 11 lines for the formatting to be correct. It's also
# customary to leave one space after the ':'.
$(padd)|-----handy-ruler------------------------------------------------------|
${NAME}: font_ttf_gw
${NAME}:
${NAME}: A set of free UNICODE True Type fonts made by George Williams.
${NAME}:
${NAME}:
${NAME}:
${NAME}:
${NAME}:
${NAME}:
${NAME}:
${NAME}:
EOF
cat >> ${PKG}/install/doinst.sh <<EOF
#!/bin/sh
# Update the X font indexes:
if [ -x /usr/bin/mkfontdir ]; then
(
cd ${SYS_DIR[share]}/fonts/TTF
mkfontscale .
mkfontdir .
)
fi
if [ -x /usr/bin/fc-cache ]; then
/usr/bin/fc-cache -f
fi
EOF
# Sources
SRCNAME[0]=gw-fonts-ttf
SRCVERS[0]=${VERSION}
SRCPACK[0]=http://mirror.fsf.org/gnewsense/gnewsense/pool/universe/g/gw-fonts-ttf/${SRCNAME[0]}_${SRCVERS[0]}.orig.tar.gz
SRCROOT[0]=${SRCNAME[0]}-${SRCVERS[0]}
SRCCOPY[0]="BSD2"
build0()
{
gunzip *.gz
for archive in *.zip; do
src.unpack ${archive}
done
mv CUPOU___.TTF CupolaUnicode.ttf
mv CUPOULI_.TTF CupolaUnicodeLightItalic.ttf
install.dir ${PKG}${SYS_DIR[share]}/fonts/TTF
install.dat *.ttf ${PKG}${SYS_DIR[share]}/fonts/TTF
}
| true |
8c0b63877bf5fdd728ef0e0c768edc533187617d | Shell | TarasZakus/pre-commit-php | /pre_commit_hooks/php-cpd.sh | UTF-8 | 1,472 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
################################################################################
#
# Bash PHP Copy Paste Detector
#
# This will prevent a commit if the tool has detected duplicate code
#
# Exit 0 if no errors found
# Exit 1 if errors were found
#
# Requires
# - php
#
################################################################################
# Plugin title
title="PHP Copy Paste Detector"
# Possible command names of this tool
local_command="phpcpd.phar"
vendor_command="vendor/bin/phpcpd"
global_command="phpcpd"
# Print a welcome and locate the exec for this tool
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DIR/helpers/colors.sh
source $DIR/helpers/formatters.sh
source $DIR/helpers/welcome.sh
source $DIR/helpers/locate.sh
# Build our list of files, and our list of args by testing if the argument is
# a valid path
args=""
files=""
for arg in ${*}
do
if [ -e $arg ]; then
files+=" $arg"
else
args+=" $arg"
fi
done;
# Run the command with the full list of files
echo -e "${txtgrn} $exec_command --no-interaction --ansi${args}${files}${txtrst}"
OUTPUT="$($exec_command${args}${files})"
RETURN=$?
if [ $RETURN -ne 0 ]; then
echo -en "\n${txtylw}${title} found copied lines in the following files:${txtrst}\n "
echo -en "$OUTPUT" | awk -v m=3 -v n=2 'NR<=m{next};NR>n+m{print line[NR%n]};{line[NR%n]=$0}'
echo -en "\n${bldred}Please review and commit.${txtrst}\n"
exit 1
fi
exit 0
| true |
02cc331dcbabedfbfde2e58491c751ab5fe5b87a | Shell | centreon/centreon | /centreon/libinstall/CentWeb.sh | UTF-8 | 31,864 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#----
## @Synopsis Install script for Centreon Web Front (CentWeb)
## @Copyright Copyright 2008, Guillaume Watteeux
## @Copyright Copyright 2008-2020, Centreon
## @license GPL : http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
## Install script for Centreon Web Front (CentWeb)
#----
#----
## Centreon is developed with GPL Licence 2.0
##
## GPL License: http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
##
## Developed by : Julien Mathis - Romain Le Merlus
## Contributors : Guillaume Watteeux - Maximilien Bersoult
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License
## as published by the Free Software Foundation; either version 2
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## For information : infos@centreon.com
# debug ?
#set -x
echo -e "\n$line"
echo -e "\t$(gettext "Gorgone module Installation")"
echo -e "$line"
# locate gorgone
locate_gorgone_varlib
locate_gorgone_config
check_gorgone_user
check_gorgone_group
echo -e "\n$line"
echo -e "\t$(gettext "Start CentWeb Installation")"
echo -e "$line"
###### check space of tmp dir
check_tmp_disk_space
if [ "$?" -eq 1 ] ; then
if [ "$silent_install" -eq 1 ] ; then
purge_centreon_tmp_dir "silent"
else
purge_centreon_tmp_dir
fi
fi
###### Mandatory step
## Create install_dir_centreon
locate_centreon_installdir
# Create an examples directory to save all important templates and config
[ ! -d $INSTALL_DIR_CENTREON/examples ] && \
mkdir -p $INSTALL_DIR_CENTREON/examples
## locate or create Centreon log dir
locate_centreon_logdir
locate_centreon_etcdir
locate_centreon_bindir
locate_centreon_generationdir
locate_centreon_varlib
## Config pre-require
# define all necessary variables.
locate_rrdtool
locate_mail
locate_cron_d
locate_logrotate_d
locate_php_bin
locate_pear
locate_perl
## Check PHP version
check_php_version
if [ "$?" -eq 1 ] ; then
echo_info "\n\t$(gettext "Your php version does not meet the requirements")"
echo -e "\t$(gettext "Please read the documentation available here") : documentation.centreon.com"
echo -e "\n\t$(gettext "Installation aborted")"
purge_centreon_tmp_dir
exit 1
fi
## Check composer dependencies (if vendor directory exists)
check_composer_dependencies
if [ "$?" -eq 1 ] ; then
echo_info "\n\t$(gettext "You must first install the composer's dependencies")"
echo -e "\n\t$(gettext "composer install --no-dev --optimize-autoloader")"
echo -e "\t$(gettext "Please read the documentation available here") : documentation.centreon.com"
echo -e "\n\t$(gettext "Installation aborted")"
purge_centreon_tmp_dir
exit 1
fi
## Check frontend application (if www/static directory exists)
check_frontend_application
if [ "$?" -eq 1 ] ; then
echo_info "\n\t$(gettext "You must first build the frontend application")"
echo -e "\n\t$(gettext "Using npm install and then npm build")"
echo -e "\t$(gettext "Please read the documentation available here") : documentation.centreon.com"
echo -e "\n\t$(gettext "Installation aborted")"
purge_centreon_tmp_dir
exit 1
fi
## Config apache
check_httpd_directory
check_user_apache
check_group_apache
## Config PHP FPM
check_php_fpm_directory
## Ask for centreon user
check_centreon_group
check_centreon_user
## Ask for monitoring engine user
check_engine_user
## Ask for monitoring broker user
check_broker_user
## Ask for plugins directory
locate_monitoringengine_log
locate_plugindir
locate_centreon_plugins
## Add default value for centreon engine connector
if [ -z "$CENTREON_ENGINE_CONNECTORS" ]; then
if [ "$(uname -i)" = "x86_64" ]; then
CENTREON_ENGINE_CONNECTORS="/usr/lib64/centreon-connector"
else
CENTREON_ENGINE_CONNECTORS="/usr/lib/centreon-connector"
fi
fi
add_group "$WEB_USER" "$CENTREON_GROUP"
add_group "$MONITORINGENGINE_USER" "$CENTREON_GROUP"
get_primary_group "$MONITORINGENGINE_USER" "MONITORINGENGINE_GROUP"
add_group "$WEB_USER" "$MONITORINGENGINE_GROUP"
add_group "$CENTREON_USER" "$MONITORINGENGINE_GROUP"
add_group "$CENTREON_USER" "$WEB_GROUP"
## Config Sudo
# I think this process move on CentCore install...
configureSUDO "$INSTALL_DIR_CENTREON/examples"
## Config Apache
configureApache "$INSTALL_DIR_CENTREON/examples"
## Ask for fpm-php service
configure_php_fpm "$INSTALL_DIR_CENTREON/examples"
## Create temps folder and copy all src into
copyInTempFile 2>>$LOG_FILE
## InstallCentreon
# change rights centreon_log directory
log "INFO" "$(gettext "Modify rights on") $CENTREON_LOG"
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
"$CENTREON_LOG" >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Modify rights on") $CENTREON_LOG"
# change rights on successful installations files
log "INFO" "$(gettext "Modify rights on") $CENTREON_VARLIB/installs"
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
"$CENTREON_VARLIB/installs" >> "$LOG_FILE" 2>&1
chmod -R g+rwxs $CENTREON_VARLIB/installs
check_result $? "$(gettext "Modify rights on") $CENTREON_VARLIB/installs"
# change rights on centreon etc
log "INFO" "$(gettext "Modify rights on") $CENTREON_ETC"
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
"$CENTREON_ETC" >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Modify rights on") $CENTREON_ETC"
# change rights on centreon cache folder
log "INFO" "$(gettext "Modify rights on") $CENTREON_CACHEDIR"
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
"$CENTREON_CACHEDIR" >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Modify rights on") $CENTREON_CACHEDIR"
## Copy Web Front and Backend Sources in final folder
log "INFO" "$(gettext "Copy CentWeb and GPL_LIB in temporary final directory")"
cp -Rf $TMP_DIR/src/api $TMP_DIR/final
cp -Rf $TMP_DIR/src/www $TMP_DIR/final
cp -Rf $TMP_DIR/src/GPL_LIB $TMP_DIR/final
cp -Rf $TMP_DIR/src/config $TMP_DIR/final
mv $TMP_DIR/src/config/centreon.config.php.template $TMP_DIR/src/config/centreon.config.php
cp -f $TMP_DIR/src/container.php $TMP_DIR/final
cp -f $TMP_DIR/src/bootstrap.php $TMP_DIR/final
cp -f $TMP_DIR/src/composer.json $TMP_DIR/final
cp -f $TMP_DIR/src/package.json $TMP_DIR/final
cp -f $TMP_DIR/src/pnpm-lock.yaml $TMP_DIR/final
cp -f $TMP_DIR/src/pnpm-workspace.yaml $TMP_DIR/final
cp -f $TMP_DIR/src/.env $TMP_DIR/final
cp -f $TMP_DIR/src/.env.local.php $TMP_DIR/final
cp -Rf $TMP_DIR/src/src $TMP_DIR/final
## Prepare and copy composer module
OLDPATH=$(pwd)
cd $TMP_DIR/src/
log "INFO" "$(gettext "Copying composer dependencies...")"
cp -Rf vendor $TMP_DIR/final/
cd "${OLDPATH}"
## Build frontend app
OLDPATH=$(pwd)
cd $TMP_DIR/src/
log "INFO" "$(gettext "Copying frontend application...")"
cp -Rf www/index.html www/static $TMP_DIR/final/www/
cd "${OLDPATH}"
## Create temporary directory
mkdir -p $TMP_DIR/work/bin >> $LOG_FILE 2>&1
mkdir -p $TMP_DIR/work/www/install >> "$LOG_FILE" 2>&1
mkdir -p $TMP_DIR/work/cron/reporting >> "$LOG_FILE" 2>&1
mkdir -p $TMP_DIR/work/data >> "$LOG_FILE" 2>&1
mkdir -p $TMP_DIR/final/bin >> $LOG_FILE 2>&1
mkdir -p $TMP_DIR/final/cron/reporting >> "$LOG_FILE" 2>&1
mkdir -p $TMP_DIR/final/libinstall >> "$LOG_FILE" 2>&1
mkdir -p $TMP_DIR/final/data >> "$LOG_FILE" 2>&1
## Ticket #372 : add functions/cinstall fonctionnality
cp -Rf $TMP_DIR/src/libinstall/{functions,cinstall,gettext} \
$TMP_DIR/final/libinstall/ >> "$LOG_FILE" 2>&1
## Prepare insertBaseConf.sql
## Change Macro on sql file
log "INFO" "$(gettext "Change macros for insertBaseConf.sql")"
${SED} -e 's|@INSTALL_DIR_CENTREON@|'"$INSTALL_DIR_CENTREON"'|g' \
-e 's|@BIN_MAIL@|'"$BIN_MAIL"'|g' \
-e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@CENTREON_LOG@|'"$CENTREON_LOG"'|g' \
-e 's|@CENTREON_VARLIB@|'"$CENTREON_VARLIB"'|g' \
-e 's|@BIN_RRDTOOL@|'"$BIN_RRDTOOL"'|g' \
$TMP_DIR/src/www/install/insertBaseConf.sql > \
$TMP_DIR/work/www/install/insertBaseConf.sql
check_result $? "$(gettext "Change macros for insertBaseConf.sql")"
## Copy in final dir
log "INFO" "$( gettext "Copying www/install/insertBaseConf.sql in final directory")"
cp $TMP_DIR/work/www/install/insertBaseConf.sql \
$TMP_DIR/final/www/install/insertBaseConf.sql >> "$LOG_FILE" 2>&1
### Change Macro for sql update file
macros="@CENTREON_ETC@,@CENTREON_CACHEDIR@,@CENTPLUGINSTRAPS_BINDIR@,@CENTREON_LOG@,@CENTREON_VARLIB@,@CENTREON_ENGINE_CONNECTORS@"
find_macros_in_dir "$macros" "$TMP_DIR/src/" "www" "Update*.sql" "file_sql_temp"
log "INFO" "$(gettext "Apply macros")"
flg_error=0
${CAT} "$file_sql_temp" | while read file ; do
log "MACRO" "$(gettext "Change macro for") : $file"
[ ! -d $(dirname $TMP_DIR/work/$file) ] && \
mkdir -p $(dirname $TMP_DIR/work/$file) >> $LOG_FILE 2>&1
${SED} -e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@CENTREON_CACHEDIR@|'"$CENTREON_CACHEDIR"'|g' \
-e 's|@CENTPLUGINSTRAPS_BINDIR@|'"$CENTPLUGINSTRAPS_BINDIR"'|g' \
-e 's|@CENTREON_VARLIB@|'"$CENTREON_VARLIB"'|g' \
-e 's|@CENTREON_LOG@|'"$CENTREON_LOG"'|g' \
-e 's|@CENTREON_ENGINE_CONNECTORS@|'"$CENTREON_ENGINE_CONNECTORS"'|g' \
$TMP_DIR/src/$file > $TMP_DIR/work/$file
[ $? -ne 0 ] && flg_error=1
log "MACRO" "$(gettext "Copy in final dir") : $file"
cp -f $TMP_DIR/work/$file $TMP_DIR/final/$file >> $LOG_FILE 2>&1
done
check_result $flg_error "$(gettext "Change macros for sql update files")"
### Step 2.0: Modify rights on Centreon WebFront and replace macros
## create a random APP_SECRET key
HEX_KEY=($(dd if=/dev/urandom bs=32 count=1 status=none | $PHP_BIN -r "echo bin2hex(fread(STDIN, 32));"))
log "INFO" "$(gettext "Generated a random key") : $HEX_KEY"
## use this step to change macros on php file...
macros="@CENTREON_ETC@,@CENTREON_CACHEDIR@,@CENTPLUGINSTRAPS_BINDIR@,@CENTREON_LOG@,@CENTREON_VARLIB@,@CENTREONTRAPD_BINDIR@,@PHP_BIN@"
find_macros_in_dir "$macros" "$TMP_DIR/src/" "www" "*.php" "file_php_temp"
find_macros_in_dir "$macros" "$TMP_DIR/src/" "bin" "*" "file_bin_temp"
log "INFO" "$(gettext "Apply macros on php files")"
flg_error=0
${CAT} "$file_php_temp" "$file_bin_temp" | while read file ; do
log "MACRO" "$(gettext "Change macro for") : $file"
[ ! -d $(dirname $TMP_DIR/work/$file) ] && \
mkdir -p $(dirname $TMP_DIR/work/$file) >> $LOG_FILE 2>&1
${SED} -e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@CENTREON_CACHEDIR@|'"$CENTREON_CACHEDIR"'|g' \
-e 's|@CENTPLUGINSTRAPS_BINDIR@|'"$CENTPLUGINSTRAPS_BINDIR"'|g' \
-e 's|@CENTREONTRAPD_BINDIR@|'"$CENTREON_BINDIR"'|g' \
-e 's|@CENTREON_VARLIB@|'"$CENTREON_VARLIB"'|g' \
-e 's|@CENTREON_LOG@|'"$CENTREON_LOG"'|g' \
-e 's|@PHP_BIN@|'"$PHP_BIN"'|g' \
$TMP_DIR/src/$file > $TMP_DIR/work/$file
[ $? -ne 0 ] && flg_error=1
log "MACRO" "$(gettext "Copy in final dir") : $file"
cp -f $TMP_DIR/work/$file $TMP_DIR/final/$file >> $LOG_FILE 2>&1
done
check_result $flg_error "$(gettext "Change macros for php files")"
macros="@CENTREON_ETC@,@CENTREON_CACHEDIR@,@CENTPLUGINSTRAPS_BINDIR@,@CENTREON_LOG@,@CENTREON_VARLIB@,@CENTREONTRAPD_BINDIR@,%APP_SECRET%"
find_macros_in_dir "$macros" "$TMP_DIR/src" "config" "*.php*" "file_php_config_temp"
find_macros_in_dir "$macros" "$TMP_DIR/src/" "." ".env*" "file_env_temp"
log "INFO" "$(gettext "Apply macros on env and config files")"
flg_error=0
${CAT} "$file_php_config_temp" "$file_env_temp" | while read file ; do
log "MACRO" "$(gettext "Change macro for") : $file"
[ ! -d $(dirname $TMP_DIR/work/$file) ] && \
mkdir -p $(dirname $TMP_DIR/work/$file) >> $LOG_FILE 2>&1
${SED} -e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@CENTREON_CACHEDIR@|'"$CENTREON_CACHEDIR"'|g' \
-e 's|@CENTPLUGINSTRAPS_BINDIR@|'"$CENTPLUGINSTRAPS_BINDIR"'|g' \
-e 's|@CENTREONTRAPD_BINDIR@|'"$CENTREON_BINDIR"'|g' \
-e 's|@CENTREON_VARLIB@|'"$CENTREON_VARLIB"'|g' \
-e 's|@CENTREON_LOG@|'"$CENTREON_LOG"'|g' \
-e 's|%APP_SECRET%|'"$HEX_KEY"'|g' \
$TMP_DIR/src/$file > $TMP_DIR/work/$file
[ $? -ne 0 ] && flg_error=1
log "MACRO" "$(gettext "Copy in final dir") : $file"
cp -f $TMP_DIR/work/$file $TMP_DIR/final/$file >> $LOG_FILE 2>&1
done
check_result $flg_error "$(gettext "Change macros for php env and config file")"
### Step 2.1 : replace macro for perl binary
## use this step to change macros on perl file...
macros="@CENTREON_ETC@,@CENTREON_CACHEDIR@,@CENTPLUGINSTRAPS_BINDIR@,@CENTREON_LOG@,@CENTREON_VARLIB@,@CENTREONTRAPD_BINDIR@"
find_macros_in_dir "$macros" "$TMP_DIR/src" "bin/" "*" "file_perl_temp"
log "INFO" "$(gettext "Apply macros")"
flg_error=0
${CAT} "$file_perl_temp" | while read file ; do
log "MACRO" "$(gettext "Change macro for") : $file"
[ ! -d $(dirname $TMP_DIR/work/$file) ] && \
mkdir -p $(dirname $TMP_DIR/work/$file) >> $LOG_FILE 2>&1
${SED} -e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@CENTREON_CACHEDIR@|'"$CENTREON_CACHEDIR"'|g' \
-e 's|@CENTPLUGINSTRAPS_BINDIR@|'"$CENTPLUGINSTRAPS_BINDIR"'|g' \
-e 's|@CENTREONTRAPD_BINDIR@|'"$CENTREON_BINDIR"'|g' \
-e 's|@CENTREON_VARLIB@|'"$CENTREON_VARLIB"'|g' \
-e 's|@CENTREON_LOG@|'"$CENTREON_LOG"'|g' \
$TMP_DIR/src/$file > $TMP_DIR/work/$file
[ $? -ne 0 ] && flg_error=1
log "MACRO" "$(gettext "Copy in final dir") : $file"
cp -f $TMP_DIR/work/$file $TMP_DIR/final/$file >> $LOG_FILE 2>&1
done
check_result $flg_error "$(gettext "Change macros for perl binary")"
### Step 3: Modify rights on monitoring engine /etc/centreon folder
log "INFO" "$(gettext "Modify rights on") $MONITORINGENGINE_ETC"
flg_error=0
$INSTALL_DIR/cinstall $cinstall_opts \
-g "$MONITORINGENGINE_GROUP" -d 775 \
"$MONITORINGENGINE_ETC" >> "$LOG_FILE" 2>&1
[ $? -ne 0 ] && flg_error=1
find "$MONITORINGENGINE_ETC" -type f -print | \
xargs -I '{}' ${CHMOD} 775 '{}' >> "$LOG_FILE" 2>&1
[ $? -ne 0 ] && flg_error=1
find "$MONITORINGENGINE_ETC" -type f -print | \
xargs -I '{}' ${CHOWN} "$MONITORINGENGINE_USER":"$MONITORINGENGINE_GROUP" '{}' >> "$LOG_FILE" 2>&1
[ $? -ne 0 ] && flg_error=1
check_result $flg_error "$(gettext "Modify rights on") $MONITORINGENGINE_ETC"
### Modify rights to broker /etc/centreon-broker folder
log "INFO" "$(gettext "Modify rights on ") $BROKER_ETC"
flg_error=0
if [ -z "$BROKER_USER" ]; then
BROKER_USER=$MONITORINGENGINE_USER
get_primary_group "$BROKER_USER" "BROKER_GROUP"
else
get_primary_group "$BROKER_USER" "BROKER_GROUP"
add_group "$WEB_USER" "$BROKER_GROUP"
add_group "$MONITORINGENGINE_USER" "$BROKER_GROUP"
add_group "$BROKER_USER" "$CENTREON_GROUP"
fi
## Configure Gorgone user and group
add_group "$CENTREON_USER" "$GORGONE_GROUP"
add_group "$WEB_USER" "$GORGONE_GROUP"
add_group "$GORGONE_USER" "$CENTREON_GROUP"
add_group "$GORGONE_USER" "$BROKER_GROUP"
add_group "$GORGONE_USER" "$MONITORINGENGINE_GROUP"
add_group "$GORGONE_USER" "$WEB_GROUP"
if [ "$MONITORINGENGINE_ETC" != "$BROKER_ETC" ]; then
$INSTALL_DIR/cinstall $cinstall_opts \
-g "$BROKER_GROUP" -d 775 \
"$BROKER_ETC" >> "$LOG_FILE" 2>&1
[ $? -ne 0 ] && flg_error=1
find "$BROKER_ETC" -type f -print | \
xargs -I '{}' ${CHMOD} 775 '{}' >> "$LOG_FILE" 2>&1
[ $? -ne 0 ] && flg_error=1
find "$BROKER_ETC" -type f -print | \
xargs -I '{}' ${CHOWN} "$BROKER_USER":"$BROKER_GROUP" '{}' >> "$LOG_FILE" 2>&1
[ $? -ne 0 ] && flg_error=1
check_result $flg_error "$(gettext "Modify rights on") $BROKER_ETC"
fi
if [ "$upgrade" = "1" ]; then
echo_info "$(gettext "Disconnect users from WebUI")"
php $INSTALL_DIR/clean_session.php "$CENTREON_ETC" >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "All users are disconnected")"
fi
### Step 4: Copy final stuff in system folder
echo_info "$(gettext "Copy CentWeb in system directory")"
$INSTALL_DIR/cinstall $cinstall \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
$INSTALL_DIR_CENTREON/www >> "$LOG_FILE" 2>&1
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 755 -m 644 \
-p $TMP_DIR/final/www \
$TMP_DIR/final/www/* $INSTALL_DIR_CENTREON/www/ >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Install CentWeb (web front of centreon)")"
cp -Rf $TMP_DIR/final/src $INSTALL_DIR_CENTREON/ >> "$LOG_FILE" 2>&1
$CHOWN -R $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/src
cp -Rf $TMP_DIR/final/api $INSTALL_DIR_CENTREON/ >> "$LOG_FILE" 2>&1
$CHOWN -R $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/api
log "INFO" "$(gettext "Modify rights for install directory")"
$CHOWN -R $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/www/install/
check_result $? "$(gettext "Modify rights for install directory")"
[ ! -d "$INSTALL_DIR_CENTREON/www/modules" ] && \
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 755 \
$INSTALL_DIR_CENTREON/www/modules >> "$LOG_FILE" 2>&1
[ ! -d "$INSTALL_DIR_CENTREON/www/img/media" ] && \
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
$INSTALL_DIR_CENTREON/www/img/media >> "$LOG_FILE" 2>&1
cp -f $TMP_DIR/final/bootstrap.php $INSTALL_DIR_CENTREON/bootstrap.php >> "$LOG_FILE" 2>&1
$CHOWN $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/bootstrap.php
cp -f $TMP_DIR/final/.env $INSTALL_DIR_CENTREON/.env >> "$LOG_FILE" 2>&1
$CHOWN $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/.env
cp -f $TMP_DIR/final/.env.local.php $INSTALL_DIR_CENTREON/.env.local.php >> "$LOG_FILE" 2>&1
$CHOWN $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/.env.local.php
cp -f $TMP_DIR/final/container.php $INSTALL_DIR_CENTREON/container.php >> "$LOG_FILE" 2>&1
$CHOWN $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/container.php
cp -Rf $TMP_DIR/final/vendor $INSTALL_DIR_CENTREON/ >> "$LOG_FILE" 2>&1
$CHOWN -R $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/vendor
cp -f $TMP_DIR/final/composer.json $INSTALL_DIR_CENTREON/composer.json >> "$LOG_FILE" 2>&1
$CHOWN $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/composer.json
cp -f $TMP_DIR/final/package.json $INSTALL_DIR_CENTREON/package.json >> "$LOG_FILE" 2>&1
$CHOWN $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/package.json
cp -f $TMP_DIR/final/pnpm-lock.yaml $INSTALL_DIR_CENTREON/pnpm-lock.yaml >> "$LOG_FILE" 2>&1
$CHOWN $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/pnpm-lock.yaml
cp -f $TMP_DIR/final/pnpm-workspace.yaml $INSTALL_DIR_CENTREON/pnpm-workspace.yaml >> "$LOG_FILE" 2>&1
$CHOWN $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/pnpm-workspace.yaml
$INSTALL_DIR/cinstall $cinstall \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
$INSTALL_DIR_CENTREON/config >> "$LOG_FILE" 2>&1
cp -Rf $TMP_DIR/final/config/* $INSTALL_DIR_CENTREON/config/ >> "$LOG_FILE" 2>&1
$CHOWN -R $WEB_USER:$WEB_GROUP $INSTALL_DIR_CENTREON/config
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
$CENTREON_CACHEDIR/config >> "$LOG_FILE" 2>&1
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
$CENTREON_CACHEDIR/config/engine >> "$LOG_FILE" 2>&1
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
$CENTREON_CACHEDIR/config/broker >> "$LOG_FILE" 2>&1
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
$CENTREON_CACHEDIR/config/export >> "$LOG_FILE" 2>&1
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 775 \
$CENTREON_CACHEDIR/symfony >> "$LOG_FILE" 2>&1
log "INFO" "$(gettext "Copying GPL_LIB")"
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 755 -m 644 \
$TMP_DIR/final/GPL_LIB/* $INSTALL_DIR_CENTREON/GPL_LIB/ >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Install libraries")"
log "INFO" "$(gettext "Add rights for Smarty cache and compile")"
$CHMOD -R g+w $INSTALL_DIR_CENTREON/GPL_LIB/SmartyCache
check_result $? "$(gettext "Write rights to Smarty Cache")"
## Cron stuff
## need to add stuff for Unix system... (freeBSD...)
log "INFO" "$(gettext "Change macros for centreon.cron")"
${SED} -e 's|@PHP_BIN@|'"$PHP_BIN"'|g' \
-e 's|@PERL_BIN@|'"$BIN_PERL"'|g' \
-e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@INSTALL_DIR_CENTREON@|'"$INSTALL_DIR_CENTREON"'|g' \
-e 's|@CENTREON_LOG@|'"$CENTREON_LOG"'|g' \
-e 's|@CENTREON_USER@|'"$CENTREON_USER"'|g' \
-e 's|@WEB_USER@|'"$WEB_USER"'|g' \
$BASE_DIR/tmpl/install/centreon.cron > $TMP_DIR/work/centreon.cron
check_result $? "$(gettext "Change macros for centreon.cron")"
cp $TMP_DIR/work/centreon.cron $TMP_DIR/final/centreon.cron >> "$LOG_FILE" 2>&1
log "INFO" "$(gettext "Install centreon.cron")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 644 \
$TMP_DIR/final/centreon.cron $CRON_D/centreon >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Install Centreon cron.d file")"
## cron binary
cp -R $TMP_DIR/src/cron/ $TMP_DIR/final/
log "INFO" "$(gettext "Change macros for centAcl.php")"
${SED} -e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@PHP_BIN@|'"$PHP_BIN"'|g' \
$TMP_DIR/src/cron/centAcl.php > $TMP_DIR/work/cron/centAcl.php
check_result $? "$(gettext "Change macros for centAcl.php")"
cp -f $TMP_DIR/work/cron/centAcl.php \
$TMP_DIR/final/cron/centAcl.php >> "$LOG_FILE" 2>&1
log "INFO" "$(gettext "Change macros for downtimeManager.php")"
${SED} -e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@CENTREON_VARLIB@|'"$CENTREON_VARLIB"'|g' \
-e 's|@PHP_BIN@|'"$PHP_BIN"'|g' \
$TMP_DIR/src/cron/downtimeManager.php > $TMP_DIR/work/cron/downtimeManager.php
check_result $? "$(gettext "Change macros for downtimeManager.php")"
cp -f $TMP_DIR/work/cron/downtimeManager.php \
$TMP_DIR/final/cron/downtimeManager.php >> "$LOG_FILE" 2>&1
log "INFO" "$(gettext "Change macros for centreon-backup.pl")"
${SED} -e 's|@CENTREON_ETC@|'"$CENTREON_ETC"'|g' \
-e 's|@PHP_BIN@|'"$PHP_BIN"'|g' \
$TMP_DIR/src/cron/centreon-backup.pl > $TMP_DIR/work/cron/centreon-backup.pl
check_result $? "$(gettext "Change macros for centreon-backup.pl")"
cp -f $TMP_DIR/work/cron/centreon-backup.pl \
$TMP_DIR/final/cron/centreon-backup.pl >> "$LOG_FILE" 2>&1
log "INFO" "$(gettext "Install cron directory")"
$INSTALL_DIR/cinstall $cinstall_opts \
-u "$CENTREON_USER" -g "$CENTREON_GROUP" -d 755 -m 644 \
$TMP_DIR/final/cron/* $INSTALL_DIR_CENTREON/cron/ >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Install cron directory")"
log "INFO" "$(gettext "Modify rights for eventReportBuilder")"
${CHMOD} 755 $INSTALL_DIR_CENTREON/cron/eventReportBuilder >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Modify rights for eventReportBuilder")"
log "INFO" "$(gettext "Modify rights for dashboardBuilder")"
${CHMOD} 755 $INSTALL_DIR_CENTREON/cron/dashboardBuilder >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Modify rights for dashboardBuilder")"
log "INFO" "$(gettext "Modify rights for centreon-backup.pl")"
${CHMOD} 755 $INSTALL_DIR_CENTREON/cron/centreon-backup.pl >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Modify rights for centreon-backup.pl")"
log "INFO" "$(gettext "Modify rights for centreon-backup-mysql.sh")"
${CHMOD} 755 $INSTALL_DIR_CENTREON/cron/centreon-backup-mysql.sh >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Modify rights for centreon-backup-mysql.sh")"
## Logrotate
log "INFO" "$(gettext "Change macros for centreon.logrotate")"
${SED} -e 's|@CENTREON_LOG@|'"$CENTREON_LOG"'|g' \
$TMP_DIR/src/logrotate/centreon > $TMP_DIR/work/centreon.logrotate
check_result $? "$(gettext "Change macros for centreon.logrotate")"
cp $TMP_DIR/work/centreon.logrotate $TMP_DIR/final/centreon.logrotate >> "$LOG_FILE" 2>&1
log "INFO" "$(gettext "Install centreon.logrotate")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 644 \
$TMP_DIR/final/centreon.logrotate $LOGROTATE_D/centreon >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Install Centreon logrotate.d file")"
## Install traps insert binary
log "INFO" "$(gettext "Prepare centFillTrapDB")"
cp $TMP_DIR/src/bin/centFillTrapDB \
$TMP_DIR/final/bin/centFillTrapDB >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Prepare centFillTrapDB")"
log "INFO" "$(gettext "Install centFillTrapDB")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/centFillTrapDB \
$CENTREON_BINDIR/centFillTrapDB >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install centFillTrapDB")"
## Install centreon_trap_send
log "INFO" "$(gettext "Prepare centreon_trap_send")"
cp $TMP_DIR/src/bin/centreon_trap_send \
$TMP_DIR/final/bin/centreon_trap_send >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Prepare centreon_trap_send")"
log "INFO" "$(gettext "Install centreon_trap_send")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/centreon_trap_send \
$CENTREON_BINDIR/centreon_trap_send >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install centreon_trap_send")"
## Install centreon_check_perfdata
log "INFO" "$(gettext "Prepare centreon_check_perfdata")"
cp $TMP_DIR/src/bin/centreon_check_perfdata \
$TMP_DIR/final/bin/centreon_check_perfdata >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Prepare centreon_check_perfdata")"
log "INFO" "$(gettext "Install centreon_check_perfdata")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/centreon_check_perfdata \
$CENTREON_BINDIR/centreon_check_perfdata >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install centreon_check_perfdata")"
## Install centreonSyncPlugins
log "INFO" "$(gettext "Prepare centreonSyncPlugins")"
cp $TMP_DIR/src/bin/centreonSyncPlugins \
$TMP_DIR/final/bin/centreonSyncPlugins >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Prepare centreonSyncPlugins")"
log "INFO" "$(gettext "Install centreonSyncPlugins")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/centreonSyncPlugins \
$CENTREON_BINDIR/centreonSyncPlugins >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install centreonSyncPlugins")"
## Install centreonSyncArchives
log "INFO" "$(gettext "Prepare centreonSyncArchives")"
cp $TMP_DIR/src/bin/centreonSyncArchives \
$TMP_DIR/final/bin/centreonSyncArchives >> "$LOG_FILE" 2>&1
check_result $? "$(gettext "Prepare centreonSyncArchives")"
log "INFO" "$(gettext "Install centreonSyncArchives")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/centreonSyncArchives \
$CENTREON_BINDIR/centreonSyncArchives >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install centreonSyncArchives")"
## Install generateSqlLite
log "INFO" "$(gettext "Install generateSqlLite")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/generateSqlLite \
$CENTREON_BINDIR/generateSqlLite >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install generateSqlLite")"
## Install changeRrdDsName
log "INFO" "$(gettext "Install changeRrdDsName.pl")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/changeRrdDsName.pl \
$CENTREON_BINDIR/changeRrdDsName.pl >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install changeRrdDsName.pl")"
## Install binaries for check indexes
log "INFO" "$(gettext "Install export-mysql-indexes")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/export-mysql-indexes \
$CENTREON_BINDIR/export-mysql-indexes >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install export-mysql-indexes")"
log "INFO" "$(gettext "Install import-mysql-indexes")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/import-mysql-indexes \
$CENTREON_BINDIR/import-mysql-indexes >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install import-mysql-indexes")"
# Install Centreon CLAPI command line
log "INFO" "$(gettext "Install clapi binary")"
$INSTALL_DIR/cinstall $cinstall_opts \
-m 755 \
$TMP_DIR/final/bin/centreon \
$CENTREON_BINDIR/centreon >> $LOG_FILE 2>&1
check_result $? "$(gettext "Install clapi binary")"
# Install centreon perl lib
$INSTALL_DIR/cinstall $cinstall_opts -m 755 \
$TMP_DIR/src/lib/perl/centreon/common/ \
$PERL_LIB_DIR/centreon/common/ >> $LOG_FILE 2>&1
$INSTALL_DIR/cinstall $cinstall_opts -m 755 \
$TMP_DIR/src/lib/perl/centreon/script.pm \
$PERL_LIB_DIR/centreon/script.pm >> $LOG_FILE 2>&1
$INSTALL_DIR/cinstall $cinstall_opts -m 755 \
$TMP_DIR/src/lib/perl/centreon/reporting/ \
$PERL_LIB_DIR/centreon/reporting/ >> $LOG_FILE 2>&1
$INSTALL_DIR/cinstall $cinstall_opts -m 755 \
$TMP_DIR/src/lib/perl/centreon/script/dashboardBuilder.pm \
$PERL_LIB_DIR/centreon/script/dashboardBuilder.pm >> $LOG_FILE 2>&1
$INSTALL_DIR/cinstall $cinstall_opts -m 755 \
$TMP_DIR/src/lib/perl/centreon/script/eventReportBuilder.pm \
$PERL_LIB_DIR/centreon/script/eventReportBuilder.pm >> $LOG_FILE 2>&1
$INSTALL_DIR/cinstall $cinstall_opts -m 755 \
$TMP_DIR/src/lib/perl/centreon/script/logAnalyser.pm \
$PERL_LIB_DIR/centreon/script/logAnalyser.pm >> $LOG_FILE 2>&1
$INSTALL_DIR/cinstall $cinstall_opts -m 755 \
$TMP_DIR/src/lib/perl/centreon/script/logAnalyserBroker.pm \
$PERL_LIB_DIR/centreon/script/logAnalyserBroker.pm >> $LOG_FILE 2>&1
echo_success "$(gettext "Centreon Web Perl lib installed")" "$ok"
log "INFO" "$(gettext "Centreon Web Perl lib installed")"
# End
# Install libraries for Centreon CLAPI
$INSTALL_DIR/cinstall $cinstall_opts -m 755 \
$TMP_DIR/src/lib/Centreon/ \
$INSTALL_DIR_CENTREON/lib/Centreon/ >> $LOG_FILE 2>&1
## Prepare to install all pear modules needed.
# use check_pear.php script
echo -e "\n$line"
echo -e "\t$(gettext "Pear Modules")"
echo -e "$line"
pear_module="0"
first=1
while [ "$pear_module" -eq 0 ] ; do
check_pear_module "$INSTALL_VARS_DIR/$PEAR_MODULES_LIST"
if [ "$?" -ne 0 ] ; then
if [ "${PEAR_AUTOINST:-0}" -eq 0 ]; then
if [ "$first" -eq 0 ] ; then
echo_info "$(gettext "Unable to upgrade PEAR modules. You seem to have a connection problem.")"
fi
yes_no_default "$(gettext "Do you want to install/upgrade your PEAR modules")" "$yes"
[ "$?" -eq 0 ] && PEAR_AUTOINST=1
fi
if [ "${PEAR_AUTOINST:-0}" -eq 1 ] ; then
upgrade_pear_module "$INSTALL_VARS_DIR/$PEAR_MODULES_LIST"
install_pear_module "$INSTALL_VARS_DIR/$PEAR_MODULES_LIST"
PEAR_AUTOINST=0
first=0
else
pear_module="1"
fi
else
echo_success "$(gettext "All PEAR modules")" "$ok"
pear_module="1"
fi
done
#----
## Gorgone specific tasks
#----
echo "$line"
echo -e "\t$(gettext "Achieve gorgone's module integration")"
echo "$line"
## Copy pollers SSH keys (in case of upgrade) to the new "user" gorgone
if [ "$upgrade" = "1" ]; then
copy_ssh_keys_to_gorgone
fi
## Create gorgone's configuration structure
create_gorgone_configuration_structure
echo "$line"
echo -e "\t$(gettext "Create configuration and installation files")"
echo "$line"
## Create configfile for web install
createConfFile
## Write install config file
createCentreonInstallConf
## wait sql inject script....
| true |
ec30c3e959a0d1e42c09b7f5b43901faf25094a3 | Shell | duchenpaul/cmd_send_mail | /cmd_send_mail_auto.sh | UTF-8 | 1,597 | 4.1875 | 4 | [] | no_license | #!/bin/bash
SOURCE=`basename $0 .sh`
LOG_PATH=./script_logs
LOG=${LOG_PATH}/${SOURCE}_`date +"%Y%m%d"`.log
SEND_MAIL_SCRIPT=./send_mail.py
usage(){
echo "Sending notification with the result of command."
echo "$0 <CMD>"
}
send_mail(){
check_count=1
while [ ${check_count} -le 3 ]; do
sudo timeout 20m python3 ${SEND_MAIL_SCRIPT} "${subject}" "${content}"
rtncode=$?
echo "send mail return code: ${rtncode},tried $check_count time(s)"
if [ ${rtncode} -eq 0 ]; then
break
fi
check_count=$(($check_count + 1))
done
}
exit_process(){
exit_code=$1
case $exit_code in
0 )
subject="[Success] Script has completed at `date '+%b %d %T'`"
content="Script ${cmd} has completed at `date`, elapsed $(($duration / 60)) minutes and $(($duration % 60)) seconds "
send_mail
;;
1 )
subject="[Fail] Script has failed at `date '+%b %d %T'`"
content="Script ${cmd} has failed at `date`, elapsed $(($duration / 60)) minutes and $(($duration % 60)) seconds "
send_mail
;;
* )
echo "Undefined Return Code!"
;;
esac
echo ${content}
echo -e "$0 ended at `date`\n\n"
exit $exit_code
}
check_result()
{
duration=$SECONDS
return_status=$?
if [ $return_status -ne 0 ]; then
echo -e "`date '+%F %X'`: Failed! Return Status = $return_status"
exit_process 1
else
echo -e "`date '+%F %X'`: Done!"
fi
}
if [ $# -ne 1 ]; then
usage
exit 1
fi
cmd=$1
mkdir ${LOG_PATH}
# echo "" > ${LOG}
exec >> ${LOG} 2>&1
echo "$0 started at `date '+%b %d %T'` \n"
echo "`date '+%F %X'`: Executing ${cmd}... \n"
SECONDS=0
${cmd}
check_result
exit_process 0
| true |
d92b0d067a2ed0b8b31f946f67f1b11cc0e26143 | Shell | Kevin-Bridonneau/Formation-Dev-GO | /pool_c_d02/ex_09/createDir.sh | UTF-8 | 157 | 3.25 | 3 | [] | no_license | #!/bin/bash
for i in `seq 1 $1`;
do
if [ ! -d ex_0${i} ]
then
if [ ${i} -lt 10 ]
then
mkdir ex_0${i}
else
mkdir ex_${i}
fi
fi
done
| true |
71b4a4dfb840ea32a82140e86246c26108f6361c | Shell | ahri/dotfiles | /.config/Code/User/install-extensions.sh | UTF-8 | 113 | 2.625 | 3 | [] | no_license | #!/bin/sh
set -ue
cd "`dirname "$0"`"
while read ext; do code --install-extension $ext; done < extensions.list
| true |
79760a90d50adac46f871b1e9a0868a655d1803f | Shell | reven-tang/shell | /mongodb.sh | UTF-8 | 17,758 | 3.671875 | 4 | [] | no_license | #!/bin/bash
##############################################################################
# 脚本名称: mongodb.sh
# 版本:3.00
# 语言:bash shell
# 日期:2017-09-30
# 作者:Reven
# QQ:254674563
##############################################################################
# 颜色定义
red='\e[91m'
green='\e[92m'
yellow='\e[93m'
none='\e[0m'
# 定义脚本环境变量
PACKAGE_NAME=${1}
PACKAGES_DIR="/usr/local/script"
PACKAGE_DIR="/usr/local/script/${1}"
DOWNLOAD_URL="http://192.168.124.169:86/software/${1}"
ENV_DIR="/etc/profile"
ACTIVE=1 # 1:部署 2:卸载 3:回滚
ACTIVE_TIME=`date '+%Y-%m-%d'`
MENU_CHOOSE=$2
IS_DOWNLOAD=$3
INSTALL_DIR=$4
MONGODB_CONF="$INSTALL_DIR/mongodb/conf"
MONGODB_DATA=$5
MONGODB_LOGS="$INSTALL_DIR/mongodb/logs"
SHARD_PORTS=$6
CONFIG_PORT=$7
MONGOS_PORT=$8
OPLOGSIZE=$9
SHARDS_NUM=${10}
MONGODB1_IP=${11}
MONGODB2_IP=${12}
MONGODB3_IP=${13}
#--------------------------------- 基础模块 ---------------------------------#
# 检查命令是否正确运行
check_ok() {
if [ $? != 0 ] ; then
echo -e "${red}[*] Error! Error! Error! Please check the error info. ${none}"
exit 1
fi
}
# 如果包已经安装,则提示并跳过安装.
myum() {
if ! rpm -qa | grep -q "^$1" ; then
yum install -y $1
check_ok
else
echo $1 already installed
fi
}
# 添加用户
create_user() {
if ! grep "^$1:" /etc/passwd ; then
useradd $1
echo "$1" | passwd "$1" --stdin &>/dev/null
check_ok
else
echo $1 already exist!
fi
}
# 确保目录存在
dir_exists() {
[ ! -d "$1" ] && mkdir -p $1
}
pkg_download() {
if [[ ${IS_DOWNLOAD} = "Y" || ${IS_DOWNLOAD} = "y" ]]; then
echo -e "${green}正在下载软件,请稍等...${none}"
# 创建介质存放目录
mkdir -p ${PACKAGE_DIR}
wget -P ${PACKAGE_DIR} -r -np -nd -nH -R index.html -q ${DOWNLOAD_URL}"/"
check_ok
fi
}
#--------------------------------- 创建配置文件 ---------------------------------#
# 创建单实例配置文件
create_mongod_conf() {
cat > ${MONGODB_CONF}/mongod.yml << EOF
systemLog:
destination: file
path: "${INSTALL_DIR}/mongodb/logs/mongod.log"
logAppend: true
processManagement:
fork: true
pidFilePath: "${INSTALL_DIR}/mongodb/logs/mongod.pid"
net:
bindIp: 0.0.0.0
port: ${MONGOS_PORT}
storage:
dbPath: "${INSTALL_DIR}/mongodb/data/mongod"
journal:
enabled: true
# directoryPerDB: true
# engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
operationProfiling:
slowOpThresholdMs: 100
mode: slowOp
EOF
echo "创建数据存放目录"
dir_exists ${INSTALL_DIR}/mongodb/data/mongod
}
# 创建shard分片配置文件
create_shard_conf() {
cat > ${MONGODB_CONF}/shard${SHARD_NUM}.yml << EOF
systemLog:
destination: file
path: "${INSTALL_DIR}/mongodb/logs/shard${SHARD_NUM}.log"
logAppend: true
processManagement:
fork: true
pidFilePath: "${INSTALL_DIR}/mongodb/logs/shard${SHARD_NUM}.pid"
net:
bindIp: 0.0.0.0
port: ${SHARD_PORT}
storage:
dbPath: "${INSTALL_DIR}/mongodb/data/shard${SHARD_NUM}"
journal:
enabled: true
# directoryPerDB: true
# engine: wiredTiger
wiredTiger:
engineConfig:
cacheSizeGB: 1
operationProfiling:
slowOpThresholdMs: 100
mode: slowOp
replication:
replSetName: sRS${SHARD_NUM}
oplogSizeMB: ${OPLOGSIZE}
secondaryIndexPrefetch: all
sharding:
clusterRole: shardsvr
EOF
}
create_shards_conf() {
for ((i=0; i<${SHARDS_NUM}; i++)); do
SHARD_PORT=`expr ${SHARD_PORTS} + ${i}`
SHARD_NUM=`expr 1 + ${i}`
create_shard_conf
sleep 1
done
}
# 创建config配置文件
create_config_conf() {
cat > ${MONGODB_CONF}/config.yml << EOF
systemLog:
destination: file
path: "${INSTALL_DIR}/mongodb/logs/config.log"
logAppend: true
processManagement:
fork: true
pidFilePath: "${INSTALL_DIR}/mongodb/logs/config.pid"
net:
bindIp: 0.0.0.0
port: ${CONFIG_PORT}
storage:
dbPath: "${INSTALL_DIR}/mongodb/data/config"
journal:
enabled: true
replication:
replSetName: csRS
sharding:
clusterRole: configsvr
EOF
}
# 创建mongos路由配置文件
create_mongos_conf() {
cat > ${MONGODB_CONF}/mongos.yml << EOF
systemLog:
destination: file
path: "${INSTALL_DIR}/mongodb/logs/mongos.log"
logAppend: true
processManagement:
fork: true
pidFilePath: "${INSTALL_DIR}/mongodb/logs/mongos.pid"
net:
bindIp: 0.0.0.0
port: ${MONGOS_PORT}
sharding:
configDB: csRS/${MONGODB1_IP}:${CONFIG_PORT},${MONGODB2_IP}:${CONFIG_PORT},${MONGODB3_IP}:${CONFIG_PORT}
EOF
}
#--------------------------------- 创建启动服务 ---------------------------------#
# 创建单实例启动脚本
create_mongod_server() {
cat > /etc/init.d/mongod << EOF
#!/bin/bash
# Name:
# Version Number: 1.0.0
# Type: Shell
# Language: bash shell
# Date: 2017-09-30
# Author: Reven
# Email: 254674563@qq.com
################################################
# chkconfig: 2345 10 90
# description: mongod
################################################
EXEC=${INSTALL_DIR}/mongodb/bin/mongod
PIDFILE=${INSTALL_DIR}/mongodb/logs/mongod.pid
CONF="-f ${INSTALL_DIR}/mongodb/conf/mongod.yml"
PORT=${MONGOS_PORT}
################################################
case "\$1" in
start)
if [ -f \$PIDFILE ]
then
echo "\$PIDFILE exists, process is already running or crashed"
else
/usr/bin/numactl --interleave=all \$EXEC \$CONF &
# \$EXEC \$CONF &
echo -e "Starting MongoDB server... \033[1;32m[ O K ]\033[0m"
fi
;;
stop)
if [ ! -f \$PIDFILE ]
then
echo "\$PIDFILE does not exist, process is not running"
else
PID=\$(cat \$PIDFILE)
\$EXEC --port \$PORT \$CONF --shutdown
while [ -x /proc/\${PID} ]
do
echo "Waiting for MongoDB to shutdown ..."
sleep 1
done
echo -e "Stopped MongoDB server... \033[1;32m[ O K ]\033[0m"
rm -f \${PIDFILE}
fi
;;
status)
if [ ! -f \${PIDFILE} ]
then
echo 'MongoDB is not running'
else
PID=\$(cat \$PIDFILE)
echo "MongoDB is running (\$PID)"
fi
;;
restart)
if [ ! -f \${PIDFILE} ]
then
echo 'MongoDB is not running'
\$0 start
else
\$0 stop
\$0 start
fi
;;
*)
echo \$"Usage: \$0 { start | stop | restart | status }"
;;
esac
EOF
}
# 创建shard分片启动脚本
create_shard_server() {
cat > /etc/init.d/mongod_shard${SHARD_NUM} << EOF
#!/bin/bash
# Name:
# Version Number: 1.0.0
# Type: Shell
# Language: bash shell
# Date: 2017-09-30
# Author: Reven
# Email: 254674563@qq.com
################################################
# chkconfig: 2345 10 90
# description: mongod
################################################
EXEC=${INSTALL_DIR}/mongodb/bin/mongod
PIDFILE=${INSTALL_DIR}/mongodb/logs/shard${SHARD_NUM}.pid
CONF="-f ${INSTALL_DIR}/mongodb/conf/shard${SHARD_NUM}.yml"
PORT=${SHARD_PORT}
################################################
case "\$1" in
start)
if [ -f \$PIDFILE ]
then
echo "\$PIDFILE exists, process is already running or crashed"
else
/usr/bin/numactl --interleave=all \$EXEC \$CONF &
# \$EXEC \$CONF &
echo -e "Starting MongoDB server... \033[1;32m[ O K ]\033[0m"
fi
;;
stop)
if [ ! -f \$PIDFILE ]
then
echo "\$PIDFILE does not exist, process is not running"
else
PID=\$(cat \$PIDFILE)
\$EXEC --port \$PORT \$CONF --shutdown
while [ -x /proc/\${PID} ]
do
echo "Waiting for MongoDB to shutdown ..."
sleep 1
done
echo -e "Stopped MongoDB server... \033[1;32m[ O K ]\033[0m"
rm -f \${PIDFILE}
fi
;;
status)
if [ ! -f \${PIDFILE} ]
then
echo 'MongoDB is not running'
else
PID=\$(cat \$PIDFILE)
echo "MongoDB is running (\$PID)"
fi
;;
restart)
if [ ! -f \${PIDFILE} ]
then
echo 'MongoDB is not running'
\$0 start
else
\$0 stop
\$0 start
fi
;;
*)
echo \$"Usage: \$0 { start | stop | restart | status }"
;;
esac
EOF
}
create_shards_server() {
for ((i=0; i<${SHARDS_NUM}; i++)); do
SHARD_PORT=`expr ${SHARD_PORTS} + ${i}`
SHARD_NUM=`expr 1 + ${i}`
# 创建分片服务
create_shard_server
# 创建数据存放目录
dir_exists $MONGODB_DATA/shard${SHARD_NUM}
# 启动分片服务器
chmod 755 /etc/init.d/mongod_shard${SHARD_NUM}
/etc/init.d/mongod_shard${SHARD_NUM} start
sleep 1
done
}
# 创建config启动脚本
create_config_server() {
cat > /etc/init.d/mongod_config << EOF
#!/bin/bash
# Name:
# Version Number: 1.0.0
# Type: Shell
# Language: bash shell
# Date: 2017-09-30
# Author: Reven
# Email: 254674563@qq.com
################################################
# chkconfig: 2345 10 90
# description: mongod
################################################
EXEC=${INSTALL_DIR}/mongodb/bin/mongod
PIDFILE=${INSTALL_DIR}/mongodb/logs/config.pid
CONF="-f ${INSTALL_DIR}/mongodb/conf/config.yml"
PORT=${CONFIG_PORT}
################################################
case "\$1" in
start)
if [ -f \$PIDFILE ]
then
echo "\$PIDFILE exists, process is already running or crashed"
else
\$EXEC \$CONF &
echo -e "Starting MongoDB server... \033[1;32m[ O K ]\033[0m"
fi
;;
stop)
if [ ! -f \$PIDFILE ]
then
echo "\$PIDFILE does not exist, process is not running"
else
PID=\$(cat \$PIDFILE)
\$EXEC --port \$PORT \$CONF --shutdown
while [ -x /proc/\${PID} ]
do
echo "Waiting for MongoDB to shutdown ..."
sleep 1
done
echo -e "Stopped MongoDB server... \033[1;32m[ O K ]\033[0m"
rm -f \${PIDFILE}
fi
;;
status)
if [ ! -f \${PIDFILE} ]
then
echo 'MongoDB is not running'
else
PID=\$(cat \$PIDFILE)
echo "MongoDB is running (\$PID)"
fi
;;
restart)
if [ ! -f \${PIDFILE} ]
then
echo 'MongoDB is not running'
\$0 start
else
\$0 stop
\$0 start
fi
;;
*)
echo \$"Usage: \$0 { start | stop | restart | status }"
;;
esac
EOF
}
# 创建mongos路由启动脚本
create_mongos_server() {
cat > /etc/init.d/mongod_route << EOF
#!/bin/bash
# Name:
# Version Number: 1.0.0
# Type: Shell
# Language: bash shell
# Date: 2017-09-30
# Author: Reven
# Email: 254674563@qq.com
################################################
# chkconfig: 2345 10 90
# description: mongod
################################################
EXEC=${INSTALL_DIR}/mongodb/bin/mongos
PIDFILE=${INSTALL_DIR}/mongodb/logs/mongos.pid
CONF="-f ${INSTALL_DIR}/mongodb/conf/mongos.yml"
PORT=${MONGOS_PORT}
################################################
case "\$1" in
start)
if [ -f \$PIDFILE ]
then
echo "\$PIDFILE exists, process is already running or crashed"
else
\$EXEC \$CONF &
echo -e "Starting MongoDB Route server... \033[1;32m[ O K ]\033[0m"
fi
;;
stop)
if [ ! -f \$PIDFILE ]
then
echo "\$PIDFILE does not exist, process is not running"
else
PID=\$(cat \$PIDFILE)
#\$EXEC --port \$PORT \$CONF --shutdown
/bin/kill -9 \$PID
while [ -x /proc/\${PID} ]
do
echo "Waiting for MongoDB Route to shutdown ..."
sleep 1
done
echo -e "Stopped MongoDB Route server... \033[1;32m[ O K ]\033[0m"
rm -f \${PIDFILE}
fi
;;
status)
if [ ! -f \${PIDFILE} ]
then
echo 'MongoDB Route is not running'
else
PID=\$(cat \$PIDFILE)
echo "MongoDB Route is running (\$PID)"
fi
;;
restart)
if [ ! -f \${PIDFILE} ]
then
echo 'MongoDB Route is not running'
\$0 start
else
\$0 stop
\$0 start
fi
;;
*)
echo \$"Usage: \$0 { start | stop | restart | status }"
;;
esac
EOF
}
#--------------------------------- 程序模块 ---------------------------------#
# 开始安装
install_mongodb() {
echo "开始安装mongodb..."
pkg_download
dir_exists ${INSTALL_DIR}
cd ${PACKAGE_DIR}
tar -zxvf ${PACKAGE_DIR}/mongodb-linux-*[0-9]*.tgz -C ${INSTALL_DIR}
check_ok
cd ${INSTALL_DIR}
mv mongodb-linux-*[0-9] mongodb
check_ok
echo "创建mongodb的配置、数据、日志目录"
mkdir -p ${MONGODB_CONF} ${MONGODB_DATA} ${MONGODB_LOGS}
}
# 修改系统环境变量
set_env() {
echo "添加mongodb环境变量"
cat >> ${ENV_DIR} << EOF
####MongoDB...
export PATH=${INSTALL_DIR}/mongodb/bin:\$PATH
EOF
# 生效环境变量
source ${ENV_DIR}
check_ok
}
#----------------------------- Mongodb单实例 ------------------------------#
single_conf() {
echo "创建配置文件"
create_mongod_conf
echo "创建启动脚本"
create_mongod_server
chmod 755 /etc/init.d/mongod
echo "启动mongodb"
/etc/init.d/mongod start
check_ok
}
#--------------------------- Mongodb分片复制集 ----------------------------#
replset_shard_conf() {
echo "创建配置文件"
create_shards_conf
create_config_conf
create_mongos_conf
echo "创建启动脚本"
create_shards_server
create_config_server
create_mongos_server
chmod 755 /etc/init.d/mongod*
# 创建comfig配置数据存放目录
dir_exists $MONGODB_DATA/config
}
shard1_replset() {
# 要确保其他节点分片均已启动正常
echo "创建shardsvr的副本集"
SHARD_PORT=`expr ${SHARD_PORTS} + 0`
CREATE_SHARDONE="rs.initiate({_id:\"sRS1\", members:[{_id:0,host:\"${MONGODB1_IP}:${SHARD_PORT}\"},{_id:1,host:\"${MONGODB2_IP}:${SHARD_PORT}\"},{_id:2,host:\"${MONGODB3_IP}:${SHARD_PORT}\",arbiterOnly:true}]})"
echo "$CREATE_SHARDONE" | mongo --host ${MONGODB1_IP} --port ${SHARD_PORT} admin --shell
check_ok
}
shard2_replset() {
echo "创建shardsvr的副本集"
SHARD_PORT=`expr ${SHARD_PORTS} + 1`
CREATE_SHARDONE="rs.initiate({_id:\"sRS1\", members:[{_id:0,host:\"${MONGODB2_IP}:${SHARD_PORT}\"},{_id:1,host:\"${MONGODB3_IP}:${SHARD_PORT}\"},{_id:2,host:\"${MONGODB1_IP}:${SHARD_PORT}\",arbiterOnly:true}]})"
echo "$CREATE_SHARDONE" | mongo --host ${MONGODB2_IP} --port ${SHARD_PORT} admin --shell
check_ok
}
shard3_replset() {
echo "创建shardsvr的副本集"
SHARD_PORT=`expr ${SHARD_PORTS} + 2`
CREATE_SHARDONE="rs.initiate({_id:\"sRS1\", members:[{_id:0,host:\"${MONGODB3_IP}:${SHARD_PORT}\"},{_id:1,host:\"${MONGODB1_IP}:${SHARD_PORT}\"},{_id:2,host:\"${MONGODB2_IP}:${SHARD_PORT}\",arbiterOnly:true}]})"
echo "$CREATE_SHARDONE" | mongo --host ${MONGODB3_IP} --port ${SHARD_PORT} admin --shell
check_ok
}
configsvr_replset() {
echo "启动配置服务"
/etc/init.d/mongod_config start
check_ok
# 要确保其他节点配置服务均已启动正常。
echo "创建configsvr的副本集"
CREATE_REPLISET="rs.initiate({_id:\"csRS\", configsvr:true, members:[{_id:0,host:\"${MONGODB1_IP}:${CONFIG_PORT}\"},{_id:1,host:\"${MONGODB2_IP}:${CONFIG_PORT}\"},{_id:2,host:\"${MONGODB3_IP}:${CONFIG_PORT}\"}]})"
echo "$CREATE_REPLISET" | mongo --host ${MONGODB1_IP} --port ${CONFIG_PORT} admin --shell
check_ok
}
addshardtocluste() {
echo "启动mongos路由服务"
/etc/init.d/mongod_route start
check_ok
# 要确保其他节点路由服务均已启动正常。
echo "分别将三个shard分片添加集群"
for ((i=0; i<${SHARDS_NUM}; i++)); do
SHARD_PORT=`expr ${SHARD_PORTS} + ${i}`
SHARD_NUM=`expr 1 + ${i}`
ADD_SHARD="db.runCommand( { addshard : \"sRS1/${MONGODB1_IP}:${SHARD_PORT},${MONGODB2_IP}:${SHARD_PORT},${MONGODB3_IP}:${SHARD_PORT}\",maxSize: 0,name: \"shard${SHARD_NUM}\"})"
echo "$ADD_SHARDONE" | mongo --host ${MONGODB1_IP} --port ${MONGOS_PORT} admin --shell
check_ok
sleep 1
done
}
#--------------------------------- 部署选择 ---------------------------------#
case "$MENU_CHOOSE" in
1|Single)
install_mongodb
set_env
single_conf
;;
2|ShardReplset)
install_mongodb
set_env
replset_shard_conf
;;
3|shard1_replset)
shard1_replset
;;
4|shard2_replset)
shard1_replset
;;
5|shard3_replset)
shard1_replset
;;
6|configsvr_replset)
configsvr_replset
;;
7|addshardtocluste)
addshardtocluste
;;
*)
echo "only 1(Single) or 2(ShardReplset)"
exit 1
;;
esac | true |
f22a4d8b7c591eeee6fcfc92d5a6813a87007f83 | Shell | slachiewicz/docker-rackspace-cloud-backup | /container-files/run.sh | UTF-8 | 2,542 | 4.125 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
set -u
# Internal variables
CONFIG_FILE="/etc/driveclient/bootstrap.json"
LOG_FILE="/var/log/driveclient.log"
WAIT_TIME=30
PID=0
#########################################################
# Fill the config with provided values via ENV variables
# Globals:
# API_HOST
# API_KEY
# ACCOUNT_ID
# USERNAME
#########################################################
function update_config() {
sed -i "s/API_HOST/$API_HOST/g" $CONFIG_FILE
sed -i "s/API_KEY/$API_KEY/g" $CONFIG_FILE
sed -i "s/ACCOUNT_ID/$ACCOUNT_ID/g" $CONFIG_FILE
sed -i "s/USERNAME/$USERNAME/g" $CONFIG_FILE
echo "driveclient config updated:"
cat $CONFIG_FILE && echo
}
#########################################################
# Start the driveclient daemon, wait a bit,
# and check the status.
# Globals:
# PID
# WAIT_TIME
#########################################################
function start_driveclient() {
if [[ $PID != 0 ]]; then
echo "Shutting down driveclient..." && kill $PID && sleep 3
fi
: > $LOG_FILE && echo # truncate the log file, so we can grep always the current version
driveclient &
PID=$!
echo "Backup agent (driveclient) started, pid=$PID"
sleep $WAIT_TIME && echo
# In case of these strings were not found, the script will exit (set -e)
if [[ $(grep -i "HTTP connection error 400" $LOG_FILE) != "" ]]; then echo "Error: connection error. Check the routing/connection. Are you sure you run this container with --net=host option?" && exit 1; fi
if [[ $(grep -i "Could not register the agent" $LOG_FILE) != "" ]]; then echo "Error: Could not register the agent. Check your account id, username and/or api key (password)." && exit 1; fi
if [[ $(grep -i "Successfully authenticated the agent" $LOG_FILE) == "" ]]; then echo "Error: Could not authenticate the agent." && exit 1; fi
if [[ $(grep -i "Configuration parsed and loaded" $LOG_FILE) == "" ]]; then echo "Error: Could not parse the config file." && exit 1; fi
}
# tail the log to stdout (in the background), so it can be easily inspected via `docker logs`
touch $LOG_FILE && tail -F $LOG_FILE &
# Generate the config and start the deamon
update_config
start_driveclient
# In case we spot any problems, do restart until they disappear...
while [[ $(grep -i "HTTP(s) error code = 403" $LOG_FILE) != "" ]] || [[ $(grep -i "Could not post an event" $LOG_FILE) != "" ]]; do
echo "Problems with agent detected. Restaring..."
start_driveclient
done
echo "Backup agent (driveclient) positive status verified, pid=$PID"
wait $PID
| true |
1b7048099cf4560e10e932cd64b0656ca6713ba2 | Shell | capitalaslash/lisa-script | /select-compiler | UTF-8 | 1,649 | 2.9375 | 3 | [] | no_license | #!/bin/bash
if $(uname -a | grep "lagrange" 1>/dev/null 2>&1)
then
echo "You must be on a compile node, use the command compile_node"
return 1
fi
if [ "${MYCOMPILER}x" = "x" ]
then
MYCOMPILER=${PWD}
else
echo $MYCOMPILER
fi
if $(echo ${MYCOMPILER} | grep "gnu" 1>/dev/null 2>&1)
then
export COMPILER="gnu"
export CXX_FLAGS_OPT="-O3 -msse3 -ansi"
export C_FLAGS_OPT=${CXX_FLAGS_OPT}
export Fortran_FLAGS_OPT=${CXX_FLAGS_OPT}
export CXX_FLAGS_DBG="-O0 -g"
export C_FLAGS_DBG=${CXX_FLAGS_DBG}
export Fortran_FLAGS_DBG=${CXX_FLAGS_DBG}
export FPIC_BLAS=fPIC_option/lib
export FPIC_LAPACK=fPIC_Option/lib
export MYAR="/usr/bin/ar"
export FORTRAN_LIB="gfortran"
elif $(echo ${MYCOMPILER} | grep "intel" 1>/dev/null 2>&1)
then
export COMPILER="intel"
export CXX_FLAGS_OPT="-O3 -xHOST -ansi"
export C_FLAGS_OPT=${CXX_FLAGS_OPT}
export Fortran_FLAGS_OPT="-O3 -xHOST"
export CXX_FLAGS_DBG="-O0 -g"
export C_FLAGS_DBG=${CXX_FLAGS_DBG}
export Fortran_FLAGS_DBG=${CXX_FLAGS_DBG}
export FPIC_BLAS=lib_fpic_option
export FPIC_LAPACK=${FPIC_BLAS}
# export AR="/usr/bin/ar"
export MYAR="/data/apps/bin/Intel/cltoolkit/Compiler/11.1/073/bin/intel64/xiar"
export FORTRAN_LIB="ifcore"
else
echo "Compiler not recognized"
unset MYCOMPILER
return 1
fi
unset MYCOMPILER
echo "${COMPILER} compiler selected"
source ${HOME}/script/xopenmpi_${COMPILER}.sh
#export LINK_OPTION=" -Wl,--start-group ${MKLROOT}/lib/em64t/libmkl_intel_lp64.a \
# ${MKLROOT}/lib/em64t/libmkl_intel_thread.a \
# ${MKLROOT}/lib/em64t/libmkl_core.a -Wl,--end-group -openmp -lpthread"
| true |
919c9c700635aa41cba15bfef4c4b26fbb72a2b8 | Shell | zarnold/webdev | /comics/gen_index.sh | UTF-8 | 6,110 | 4.0625 | 4 | [] | no_license | #/bin/sh
# Your comics folder structure should be like :
# - ./bdd/sharable/<SERIE_NAME>/<EPISODE_NAME>/
# - image in this folder
# Your images should be named in alphabetical order aka a.jpg, b.jpg, c.jpg,...
# -- ERROR CODE
E_BDD_INVALID=2
E_UNSPECIFIED=1
# ===== DEBUG and Helpers ====
printLog ()
{
now=$(date +%Y%m%d_%H%M%S)
echo "[+] [${now}] -- $@">>$LOG_PATH
echo "[+] [${now}] -- $@"
}
printError ()
{
now=$(date +%Y%m%d_%H%M%S)
echo "[X] [${now}] [ERROR] -- $@">>$LOG_PATH
echo "[X] [${now}] [ERROR] -- $@"
}
checkOption ()
{
# -- OPTION DEFAULT VALUE
O_HUMAN_READABLE=FALSE
O_VERBOSE=1
printLog "Checking options $@"
while getopts “rv:” OPTION
do
case $OPTION in
r)
O_HUMAN_READABLE=TRUE
;;
v)
O_VERBOSE=$OPTARG
;;
esac
done
printLog "Human Readable : ${O_HUMAN_READABLE}"
printLog "Verbose : ${O_VERBOSE}"
}
# ===== JSON Syntax related =====
putJson ()
{
printf "$@" >> ${INDEX_PATH}
}
initiateJson ()
{
putJson '{'
}
initiateArray ()
{
putJson '['
}
closeArray ()
{
putJson ']'
}
initiateObject()
{
putJson '{'
}
closeObject ()
{
putJson '}'
}
closeJson ()
{
putJson '}'
}
putSeparator ()
{
putJson ','
if [ "$O_HUMAN_READABLE" == "TRUE" ]
then
putJson '\n'
fi
}
sanitizeJson ()
{
sed -i -e 's/,]/]\n/g' -e 's/,}/}\n/g' ${INDEX_PATH}
sed -i -e '$s/,$//' ${INDEX_PATH}
sed -i -e 's/}{/},\n{/g' ${INDEX_PATH}
sed -i -e 's/\[/[\n/g' ${INDEX_PATH}
}
# ===== Index Builder ====
extractInfos ()
{
authors=( spunch )
infoFile=${BDD_PATH}/${serie}/${episode}/infos.txt
if [ -f ${infoFile} ]
then
# Clean
printLog "Found info file in ${infoFile}"
sed -i -e 's/ //g' ${infoFile}
authors=( $(cat ${infoFile} | grep [aA]ut[eh][uo]r | cut -d ":" -f 2 |sed 's/,/ /g') )
printLog "Authors are ${authors}"
else
printError "info File not found in ${infoFile}. Using default"
fi
putJson "\"authors\":"
initiateArray
for author in "${authors[@]}"
do
printLog "Creating folder for ${author}"
[ ! -d ${BDD_PATH}/../authors/${author} ] && mkdir ${BDD_PATH}/../authors/${author}
ln -sfT ${BDD_PATH}/${serie}/${episode} ${BDD_PATH}/../authors/${author}/${serie}-${episode}
initiateObject
putJson "\"nom\": \"${author}\""
closeObject
putSeparator
done
closeArray
}
buildSerie ()
{
for abs_serie in ${BDD_PATH}/*
do
printLog "now in ${abs_serie}"
if [ -d "${abs_serie}" ]
then
extractName ${abs_serie}
serie=${NAME}
INDEX_PATH=${BDD_PATH}/${serie}/index.json
[ -f ${BDD_PATH}/${serie}/index.json ] && rm ${BDD_PATH}/${serie}/index.json
[ -f ${BDD_PATH}/${serie}/index.txt ] && rm ${BDD_PATH}/${serie}/index.txt
hr_serie=$(echo ${serie^} |sed -e 's/[-_]/ /g')
initiateObject
printLog "Serie was $NAME and now is $hr_serie"
putJson "\"nom\": \"${hr_serie}\""
putSeparator
putJson "\"slug\": \"${serie}\""
putSeparator
putJson "\"episodes\":"
initiateArray
buildEpisodes
INDEX_PATH=${BDD_PATH}/${serie}/index.json
closeArray
closeObject
sanitizeJson
cat ${BDD_PATH}/${serie}/index.txt >> ${BDD_PATH}/index.json
else
printError "${serie} is not a folder"
fi
done
}
buildEpisodes ()
{
for abs_episode in ${BDD_PATH}/${serie}/*
do
if [ -d "${abs_episode}" ]
then
extractName ${abs_episode}
episode=$NAME
hr_ep=$(echo ${episode^} |sed -e 's/\+//g' -e 's/[-_]/ /g')
INDEX_PATH=${BDD_PATH}/${serie}/${episode}/index.json
[ -f ${BDD_PATH}/${serie}/${episode}/index.json ] && rm ${BDD_PATH}/${serie}/${episode}/index.json
[ -f ${BDD_PATH}/${serie}/${episode}/index.txt ] && rm ${BDD_PATH}/${serie}/${episode}/index.txt
initiateObject
extractInfos
putSeparator
putJson "\"serie\": \"${hr_serie}\""
putSeparator
putJson "\"episode\": \"${hr_ep}\""
putSeparator
putJson "\"episodeslug\": \"${episode}\""
putSeparator
putJson "\"slug\": \"${serie}\""
putSeparator
#Le tableau des cases
putJson "\"cases\":"
initiateArray
for abs_image in ${BDD_PATH}/${serie}/${episode}/{*.jpg,*.gif,*.png}
do
extractName ${abs_image}
image=$NAME
#Warn :globbing pritn stars if no file found
if [[ ! "${image}" =~ \*.* ]]
then
initiateObject
# reaplce with your own server url
putJson "\"url\": \"http://my_server.com/${lang_folder}/${serie}/${episode}/${image}\""
closeObject
putSeparator
fi
done
closeArray
closeObject
putSeparator
sanitizeJson
printLog "Puttin ${INDEX_PATH} to ${BDD_PATH}/${serie}/index.json"
#valid json
cat ${INDEX_PATH} >> ${BDD_PATH}/${serie}/index.json
# List only
cat ${INDEX_PATH} >> ${BDD_PATH}/${serie}/index.txt
fi
done
}
extractName ()
{
#TODO = filter name for valid url
NAME=$(printf $@ | sed 's/^.*\///g')
}
#------------------------------------------------------
printLog "Starting Index Generation"
checkOption $@
do_gen()
{
INDEX_PATH="${BDD_PATH}/index.json"
printLog "Discarding ${INDEX_PATH}"
if [ -f ${INDEX_PATH} ]
then
printLog "Already an old index, moving it"
mv ${INDEX_PATH} ${INDEX_PATH}.previous
fi
#rm -rf ${BDD_PATH}/../authors/*
printLog "Checking the BDD"
if [ ! -d ${BDD_PATH} ]
then
printError "${BDD_PATH} is not a valid BDD for spunch maker"
printError "Leaving mutafuckaz"
exit ${E_BDD_INVALID}
fi
printLog "Building Catalog"
initiateJson
putJson "\"series\":"
initiateArray
buildSerie
INDEX_PATH="${BDD_PATH}/index.json"
closeArray
closeJson
sanitizeJson
gen_authors
}
printLog "Generate index"
lang_folder="sharable"
BDD_PATH="./bdd/sharable"
do_gen
| true |
f7f15581d51976db92b7a82aa7f268810b470d3a | Shell | arnavgupta180/ImageDuplicate | /Source/ImageAssets.sh | UTF-8 | 1,531 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# ImageAssets.sh
# shellScriptStringGeneration
#
# Created by Arnav Gupta on 06/12/19.
# Copyright © 2019 Arnav. All rights reserved.
#configure input variables for script here
projectDir="$SRCROOT"
scriptDir="$PODS_ROOT/ImageDuplicate"
# this ensure correct permissions are automatically added to any new shell script added to build-phase directory
ls $scriptDir | while read line; do
path="$projectDir/$line";
chmod u+x $path;
done
outputFile="$scriptDir/AVImageConstants.swift"
cd $projectDir
# remove any previously created temp files
rm txcassets*.* 2>/dev/null
# find all imageset names
find . -name '*.imageset' > txcassets1.sh
chmod 777 txcassets1.sh
sed -i -e 's/^/"/;s/$/",/' txcassets1.sh
cat txcassets1.sh > txcassets2.sh
sed -i -e 's/^/basename /' txcassets2.sh
sed -i -e 's/$/ .imageset/' txcassets2.sh
sh txcassets2.sh > txcassets3.sh
sed -i -e 's/^/ "/;s/$/"/' txcassets3.sh
sed -i -e 's/.imageset,"/",/' txcassets3.sh
touch txcassets5.sh
echo "// This file is autogenerated, do not edit here" >> txcassets5.sh
echo "" >> txcassets5.sh
echo "public class AVImageConstants {" >> txcassets5.sh
echo " static let imageNames: [String] = [" >> txcassets5.sh
cat txcassets3.sh >> txcassets5.sh
echo " ]" >> txcassets5.sh
echo " static let imagePaths: [String] = [" >> txcassets5.sh
cat txcassets1.sh >> txcassets5.sh
echo " ]" >> txcassets5.sh
echo "}" >> txcassets5.sh
rm $outputFile
mv txcassets5.sh $outputFile
# remove any temp files created
rm txcassets*.* 2>/dev/null
| true |
cf1d3146c2102f2dfe47fe40fdaa2f1930f73aaf | Shell | zarfleen/dotfiles | /zsh/prompt.zsh | UTF-8 | 1,433 | 3.109375 | 3 | [
"MIT"
] | permissive | autoload -U promptinit colors
promptinit
colors
#------------------------------
# Prompt
#------------------------------
setprompt () {
# load some modules
autoload -U colors zsh/terminfo # Used in the colour alias below
colors
setopt prompt_subst
# make some aliases for the colours: (coud use normal escap.seq's too)
#for color in RED GREEN YELLOW BLUE MAGENTA CYAN WHITE; do
# eval PR_$color='%{$fg[${(L)color}]%}'
#done
for color in RED GREEN YELLOW BLUE MAGENTA CYAN WHITE; do
eval PR_LIGHT_$color='%{$terminfo[bold]$fg[${(L)color}]%}'
eval PR_$color='%{$fg[${(L)color}]%}'
(( count = $count + 1 ))
done
PR_NO_COLOR="%{$terminfo[sgr0]%}"
# Check the UID
if [[ $UID -ge 500 ]]; then # normal user
eval PR_USER='${PR_LIGHT_GREEN}%n${PR_NO_COLOR}'
eval PR_USER_OP='${PR_LIGHT_GREEN}%#${PR_NO_COLOR}'
elif [[ $UID -eq 0 ]]; then # root
eval PR_USER='${PR_LIGHT_RED}%n${PR_NO_COLOR}'
eval PR_USER_OP='${PR_LIGHT_RED}%#${PR_NO_COLOR}'
fi
# Check if we are on SSH or not
if [[ -n "$SSH_CLIENT" || -n "$SSH2_CLIENT" || -n "$SSH_CONNECTION" ]]
then
eval PR_HOST='${PR_LIGHT_YELLOW}%M${PR_NO_COLOR}' #SSH
else
eval PR_HOST='${PR_LIGHT_BLUE}%M${PR_NO_COLOR}' # no SSH
fi
# set the prompt
eval PR_DECORATION='${PR_LIGHT_BLUE}'
PS1=$'${PR_DECORATION}[${PR_USER}${PR_DECORATION}@${PR_HOST}${PR_DECORATION}:%~${PR_DECORATION}]${PR_USER_OP} '
#PS2=$'%_>'
unset PS2
}
setprompt
| true |
d45de39098a4d4cf140743a4d9e84281c6557c66 | Shell | haveneer/libKriging | /docs/dev/envs/ubuntu20/install.sh | UTF-8 | 1,453 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | # To execute as root
apt update
# apt install -y curl
# ln -snf /usr/share/zoneinfo/$(curl https://ipapi.co/timezone) /etc/localtime
# DEBIAN_FRONTEND="noninteractive" apt install -y tzdata
echo 'tzdata tzdata/Areas select Europe' | debconf-set-selections
echo 'tzdata tzdata/Zones/Europe select Paris' | debconf-set-selections
DEBIAN_FRONTEND="noninteractive" apt install -y tzdata
# for ubuntu:18, the equivalent of liboctave-dev was octave-pkg-dev
apt install -y build-essential g++ cmake git python3 python3-pip octave liboctave-dev r-base liblapack-dev gfortran
apt install -y lcov valgrind # advanced tools
apt install -y ccache ninja-build vim curl # convenient tools
# only required for ubuntu:18
## commands from linux-macos/install.sh
## add kitware server signature cf https://apt.kitware.com
#apt-get install -y apt-transport-https ca-certificates gnupg software-properties-common
#curl -s https://apt.kitware.com/keys/kitware-archive-latest.asc | gpg --dearmor - | sudo tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null
#apt-add-repository 'deb https://apt.kitware.com/ubuntu/ bionic main'
#apt-get install -y cmake # requires cmake ≥3.13 for target_link_options
# When used inside a docker container, a good thing is to
# add non-root user for working (root is an unsafe user for working)
apt install -y sudo
useradd -m user --shell /bin/bash && yes password | passwd
echo "user ALL=NOPASSWD: ALL" | EDITOR='tee -a' visudo | true |
b36f98f61e2859b9ef77760984e53c83d5ebe539 | Shell | priyankagoma/rhcsa-learnings | /trainings/user_groups/scripts/create_groups_users.sh | UTF-8 | 841 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
## Script for creating users and groups ## <NAME OF THE SCRIPT>
## Created by (c) Priyanka Goma ## <COPYRIGHT STATEMENT>
## Created on 30 Sept 2021 ## <CREATED ON>
## This script is done to automate one of the questions in RHCSA exam ## <PURPOSE OF SCRIPT>
#-----------------------------------------------------------------------#
## Add group sysadm ##
groupadd sysadm
## Add users harry, natasha and sarah ##
useradd harry
useradd natasha
useradd sarah
## Add password for harry, natasha and sarah ##
passwd harry
passwd natasha
passwd sarah
## Add harry and natasha to group sysadm ##
usermod -aG sysadm harry
usermod -aG sysadm natasha
## Add no login shell for sarah ##
usermod -s /sbin/nologin sarah
| true |
00722fac7dca7cd6be1b2046b1b009676ea0e1b5 | Shell | CloudPadovana/caos-collector | /build_release.sh | UTF-8 | 2,097 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
################################################################################
#
# caos-collector - CAOS collector
#
# Copyright © 2017 INFN - Istituto Nazionale di Fisica Nucleare (Italy)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Author: Fabrizio Chiarello <fabrizio.chiarello@pd.infn.it>
#
################################################################################
set -e
PROJECT_DIR=$(dirname $(readlink -f $0))
source ${PROJECT_DIR}/ci-tools/common.sh
GIT_SHA=$(git rev-parse --verify HEAD)
DOCKER_BUILD_IMAGE="python:2.7"
releases_dir=releases
if [ ! -d ${releases_dir} ] ; then
say "Creating %s" ${releases_dir}
mkdir -p ${releases_dir}
fi
container_id=$(docker run -t -d -v $(readlink -e $(pwd)):/origin:ro -v /build -w /build ${DOCKER_BUILD_IMAGE})
say "Started container: %s\n" ${container_id}
function docker_exec () {
docker exec \
-e "CI_PROJECT_DIR=/build" \
-e "CI_COMMIT_SHA=${GIT_SHA}" \
"$@"
if [ $? != 0 ] ; then
die "Docker error"
fi
}
docker_exec ${container_id} git clone --no-checkout --no-hardlinks /origin /build
docker_exec ${container_id} git checkout -f ${GIT_SHA}
docker_exec ${container_id} ci-tools/prepare.sh
docker_exec ${container_id} ci-tools/release-build.sh
fname=${container_id}:/build/releases/caos_collector-$(CI_PROJECT_DIR=. CI_COMMIT_SHA=${GIT_SHA} ci-tools/git-semver-pbr.sh)-py2-none-any.whl
docker cp ${fname} ${releases_dir}/
docker stop ${container_id}
say "Stopped container: %s\n" ${container_id}
docker rm ${container_id}
say "Removed container: %s\n" ${container_id}
| true |
1b28ec0fb2a5d25569ef5101cf7070771cd3af6c | Shell | vmitris/ASC-DTRMV | /exec_script2.sh | UTF-8 | 178 | 2.703125 | 3 | [] | no_license | #!/bin/bash
[[ -z $COMPILER ]] && COMPILER="gcc"
if [[ $COMPILER="gcc" ]]; then
make -f Makefile build2;
for i in {1000..25000..2500}
do
./dtrmv 2 $i
done
fi
| true |
39cdd8fb26130cca040a6db4d3747acbedccc419 | Shell | mdibl/biocore_misc | /shell/stopJenkins.sh | UTF-8 | 591 | 3.6875 | 4 | [] | no_license | #!/usr/bin/sh
###############################################
# Shell to stop the running instance of Jenkins
#
#####
PID=`pgrep -fl jenkins.war | cut -d ' ' -f1`
if [ "$PID" = "" ]
then
echo "Jenkins was not running"
exit 0
fi
if [ $PID -ge 0 ]
then
echo "Stopping Jenkins process ID : $PID"
kill -9 $PID
fi
#
# Check that the process was killed
# if not exit with error
#
NPID=`pgrep -fl jenkins.war | cut -d ' ' -f1`
if [ "$NPID" = "" ]
then
echo "Jenkins process ID : $PID killed successfully "
exit 0
else
echo "Failed to kill Jenkins process $NPID"
exit 1
fi
| true |
350308b808d9a8590216eafbb1df70adeef73d47 | Shell | romkatv/advent-of-code-2019 | /27/solve | UTF-8 | 957 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env zsh
emulate -L zsh
setopt no_aliases err_exit no_unset extended_glob pipe_fail warn_create_global
local -A reactions
() {
local line match=() mbegin=() mend=()
while read -r line; do
[[ $line == (#b)(<1->' '[A-Z]##(', '<1->' '[A-Z]##)#)' => '(<1->)' '([A-Z]##) ]]
[[ ! -v reactions[$match[4]] && ! -v stock[$match[4]] ]]
reactions[$match[4]]="$match[3] ${match[1]//,}"
done
}
local -i res
local -A stock
function consume() {
local -i count=$1
local chemical=$2
if [[ $chemical == ORE ]]; then
(( res += count ))
return
fi
count=$((stock[$chemical] -= count))
(( count < 0 )) || return 0
local reaction=(${=reactions[$chemical]})
local -i n=$(((reaction[1] - count - 1) / reaction[1]))
count=$((stock[$chemical] += n * reaction[1]))
(( count >= 0 && count < reaction[1] ))
for count chemical in ${reaction:1}; do
consume $((n * count)) $chemical
done
}
consume 1 FUEL
echo -E - $res
| true |
2e46b54a1366f77a16451c14512bdea66a040e32 | Shell | we87/docker-pritunl | /start.sh | UTF-8 | 866 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
# Link with mongo
if [ ! -z "$MONGO_PORT_27017_TCP_ADDR" ]; then
MONGO_URI="mongodb://${MONGO_PORT_27017_TCP_ADDR}:${MONGO_PORT_27017_TCP_PORT}/pritunl"
fi
# Set MONGO URI
if [ ! -z "$MONGO_URI" ]; then
PRITNUL_SERVER_PORT=${PRITNUL_SERVER_PORT:-443}
cat << EOF >/etc/pritunl.conf
{
"mongodb_uri": "$MONGO_URI",
"log_path": "/var/log/pritunl.log",
"static_cache": true,
"temp_path": "/tmp/pritunl_%r",
"bind_addr": "0.0.0.0",
"debug": false,
"www_path": "/usr/share/pritunl/www",
"local_address_interface": "auto",
"port": ${PRITNUL_SERVER_PORT}
}
EOF
fi
# Set Server state
if [ ! -z "$PRITNUL_UUID" ]; then
echo -n "$PRITNUL_UUID" >/var/lib/pritunl/pritunl.uuid
fi
if [ ! -z "$PRITNUL_SETUP_KEY" ]; then
echo -n "$PRITNUL_SETUP_KEY" >/var/lib/pritunl/setup_key
fi
# Start
pritunl start
| true |
f2685f188a72e44eef29596e1c912655f74e7886 | Shell | vvranjek/ubuntu-start | /scripts/mount_remotes.sh | UTF-8 | 7,360 | 3.84375 | 4 | [] | no_license | #! /bin/bash
USER_SET=""
USER=${SUDO_USER:-${USER}}
while getopts u:d:p:f: option
do
case "${option}"
in
u) USER=${OPTARG};USER_SET=true;;
esac
done
# Info
echo
echo "Don't forget to forward port $PORT! Press any key..."
read nothing
# Check for user
echo User: $USER
if [[ -z "$USER_SET" ]]; then
echo "You can set user with [-u user]"
#exit 1;
fi
# Check if root
if [[ $EUID -ne 0 ]]; then
echo "Please run as root"
#exit 1
fi
############### Get the directory of original script, not the link
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null && pwd )"
echo Script location: $DIR
#MNT_DOWNLAODS=/media/$USER/NAS/Downloads
MNT_NAS=/media/$USER/NAS/
mkdir -p $MNT_NAS
#################### Variables #############################
FSTAB=/etc/fstab
SECRET_FILE="/etc/davfs2/secrets"
PORT="88"
URL=vidcloud.myqnapcloud.com
WEBDEV_URL="http://$URL"
WEBDAV_MOUNT="davfs defaults,_netdev,auto,user 0 0"
CIFS_MOUNT="cifs defaults,rw,credentials=$DIR/.smbcredentials"
SSH_MOUNT="fuse allow_other,noatime,follow_symlinks,delay_connect,defaults,auto,user 0 0"
#CIFS_MOUNT="cifs defaults,rw,credentials=$DIR/.smbcredentials,iocharset=utf8,sec=ntlm 0 0"
#CIFS_MOUNT="cifs defaults,rw,credentials=$DIR/.smbcredentials uid=1000,gid=46,dir_mode=0777,file_mode=0777 0 0"
#CIFS_MOUNT="cifs rw,user,credentials=$DIR/.smbcredentials,uid=1000,gid=1000,iocharset=utf8 0 0
# Fstab comment
# Replace the line if it already exists
if grep -q "QNAP vidcloud configuration" "$FSTAB"; then
echo "Entry \'QNAP vidcloud configuration\' exists"
else
echo "Adding \'QNAP vidcloud configuration\' to /etc/fstab"
sudo printf '\n\n# QNAP vidcloud configuration\n' >> $FSTAB
fi
############### sshfs #############################
# sshfs#someuser@remote.com:/remote_dir /media/remote_dir/ fuse auto,_netdev,port=22,user,allow_other,noatime,follow_symlinks,IdentityFile=/home/someuser/.ssh/id_rsa,reconnect 0 0
# sshfs#USER@MACHINE:/remote/path/ /mnt/local/path/ fuse user,_netdev,auto_cache,reconnect,uid=1000,gid=1000,IdentityFile=/full/path/to/.ssh/id_rsa,idmap=user,allow_other 0 2
FOLDER="share/"
REMOTE="sshfs#$URL:/$FOLDER"
REMOTE_COMMAND="$REMOTE $MNT_NAS $SSH_MOUNT"
sudo apt-get install sshfs
sudo usermod -a -G fuse $USER
sudo groupadd fuse
mkdir -p $MNT_NAS
echo
# Generate and copy key if it doesn't exist
if [ ! -f /home/$USER/.ssh/id_rsa ]; then
echo "Key not found in /home/$USER/.ssh/"
ssh-keygen
ssh-copy-id vid@vidcloud.myqnapcloud.com
fi
# Add "user_allow_other" to /etc/fuse.conf
FUSECONF="/etc/fuse.conf"
if grep -q "user_allow_other" $FUSECONF; then
echo "Entry user_allow_other exists, replacing"
sudo sed -i -e "s/.*user_allow_other.*/user_allow_other/" $FUSECONF
else
echo "Adding user_allow_other to $FUSECONF"
sudo sh -c "echo "user_allow_other" >> $FUSECONF"
fi
# Replace the ssh line if it already exists
if grep -q $URL "$FSTAB"; then
echo "Entry $URL exists, replacing"
REMOTE_DOWNLOADS_COMMAND_SED=$(echo "$REMOTE_COMMAND" | sed 's/\//\\\//g')
REMOTE_DOWNLOADS_SED=$(echo "$URL" | sed 's/\//\\\//g')
sudo sed -i -e "s/.*$REMOTE_DOWNLOADS_SED.*/$REMOTE_DOWNLOADS_COMMAND_SED/" $FSTAB
else
sudo sh -c "echo $REMOTE_COMMAND >> $FSTAB"
fi
# Remove comment for user_allow_other in /etc/fuse.conf
sudo sed '/user_allow_other/s/^# *//' /etc/fuse.conf
mount $MNT_NAS
###################################################
sudo cat $FSTAB
exit 0
################## WebDav ########################
# Add secret for devfs2
echo ""
echo -n "Enter user name of QNAP vidcloud:"
read USERNAME
echo -n "Enter password:"
read -s PASSWORD
chmod 0600 /home/$USER/.dav2fs/secrets
# Add user to group davfs2
sudo usermod -a -G davfs2 $USER
gpasswd -a $USER davfs2 # this updates users and groups
#newgrp davfs2 # this updates users and groups
#su -l $USER # this updates users and groups
#mkdir -p /home/$USER/.dav2fs/
#touch /home/$USER/.dav2fs/secrets
FOLDER="NAS"
REMOTE=$WEBDEV_URL:$PORT/$FOLDER
REMOTE_COMMAND="$REMOTE $MNT_NAS $WEBDAV_MOUNT"
mkdir -p $MNT_NAS
sudo apt-get install davfs2 -y
# Replace the webdav line if it already exists
if grep -q $URL "$FSTAB"; then
echo "Entry $REMOTE exists, replacing"
REMOTE_DOWNLOADS_COMMAND_SED=$(echo "$REMOTE_COMMAND" | sed 's/\//\\\//g')
REMOTE_DOWNLOADS_SED=$(echo "$URL" | sed 's/\//\\\//g')
sudo sed -i -e "s/.*$REMOTE_DOWNLOADS_SED.*/$REMOTE_DOWNLOADS_COMMAND_SED/" $FSTAB
else
sudo sh -c "echo $REMOTE_COMMAND >> $FSTAB"
fi
# Replace secret line if it already exists
WEBDEV_SECRET="$WEBDEV_URL:$PORT/$FOLDER $USERNAME $PASSWORD"
if grep -q "$WEBDEV_URL" "$SECRET_FILE"; then
echo "Entry vidcloud.myqnapcloud.com exists in secrets, replacing"
WEBDEV_SECRET=$(echo "$WEBDEV_SECRET" | sed 's/\//\\\//g')
REPLACE=$(echo "$URL" | sed 's/\//\\\//g')
sudo sed -i -e "s/.*$REPLACE.*/$WEBDEV_SECRET/" $SECRET_FILE
else
sudo sh -c "echo "$WEBDEV_URL:$PORT/$FOLDER $USERNAME $PASSWORD" >> $SECRET_FILE"
fi
###################################################
########################## CIFS ###################################
# Downloads
FOLDER=Download
REMOTE=vidcloud.myqnapcloud.com/$FOLDER
#REMOTE_COMMAND="//$REMOTE /home/$USER/NAS/Downloads $CIFS_MOUNT"
REMOTE_COMMAND="http://$REMOTE /home/$USER/NAS/Downloads $WEBDAV_MOUNT"
mkdir -p /home/$USER/NAS/Downloads
sudo apt-get install cifs-utils -y
# Replace the line if it already exists
if grep -q $REMOTE "$FSTAB"; then
echo "Entry $REMOTE exists, replacing"
REMOTE_DOWNLOADS_COMMAND_SED=$(echo "$REMOTE_COMMAND" | sed 's/\//\\\//g')
REMOTE_DOWNLOADS_SED=$(echo "$REMOTE" | sed 's/\//\\\//g')
sudo sed -i -e "s/.*$REMOTE_DOWNLOADS_SED.*/$REMOTE_DOWNLOADS_COMMAND_SED/" $FSTAB
else
sudo sh -c "echo $REMOTE_COMMAND >> $FSTAB"
fi
# vidcloud
FOLDER=vidcloud
REMOTE=vidcloud.myqnapcloud.com/$FOLDER
REMOTE_COMMAND="//$REMOTE /home/$USER/NAS/$FOLDER $CIFS_MOUNT"
mkdir -p /home/$USER/NAS/$FOLDER
# Replace the line if it already exists
if grep -q $REMOTE "$FSTAB"; then
echo "Entry $REMOTE exists, replacing"
REMOTE_DOWNLOADS_COMMAND_SED=$(echo "$REMOTE_COMMAND" | sed 's/\//\\\//g')
REMOTE_DOWNLOADS_SED=$(echo "$REMOTE" | sed 's/\//\\\//g')
sudo sed -i -e "s/.*$REMOTE_DOWNLOADS_SED.*/$REMOTE_DOWNLOADS_COMMAND_SED/" $FSTAB
else
sudo sh -c "echo $REMOTE_COMMAND >> $FSTAB"
fi
# Multimedia
FOLDER=Multimedia
REMOTE=vidcloud.myqnapcloud.com/$FOLDER
REMOTE_COMMAND="//$REMOTE /home/$USER/NAS/$FOLDER $CIFS_MOUNT"
mkdir -p /home/$USER/NAS/$FOLDER
# Replace the line if it already exists
if grep -q $REMOTE "$FSTAB"; then
echo "Entry $REMOTE exists, replacing"
REMOTE_DOWNLOADS_COMMAND_SED=$(echo "$REMOTE_COMMAND" | sed 's/\//\\\//g')
REMOTE_DOWNLOADS_SED=$(echo "$REMOTE" | sed 's/\//\\\//g')
sudo sed -i -e "s/.*$REMOTE_DOWNLOADS_SED.*/$REMOTE_DOWNLOADS_COMMAND_SED/" $FSTAB
else
sudo sh -c "echo $REMOTE_COMMAND >> $FSTAB"
fi
echo Restart the com
| true |
e65de914f665d4dd84d24b43208bd6d6443ac729 | Shell | sambhavdutt/ci-management | /jjb/fabric-sdk-node/shell/include-raw-publish-doc.sh | UTF-8 | 1,361 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
#
# SPDX-License-Identifier: Apache-2.0
##############################################################################
# Copyright (c) 2018 IBM Corporation, The Linux Foundation and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License 2.0
# which accompanies this distribution, and is available at
# https://www.apache.org/licenses/LICENSE-2.0
##############################################################################
set -o pipefail
ARCH=$(uname -m)
echo "--------> ARCH:" $ARCH
if [ "$ARCH" != "s390x" ]; then
cd ${WORKSPACE}/gopath/src/github.com/hyperledger/fabric-sdk-node
# Generate SDK-Node API docs
gulp doc
# Short Head commit
SDK_COMMIT=$(git rev-parse --short HEAD)
TARGET_REPO=$NODE_SDK_USERNAME.github.io.git
# Clone SDK_NODE API doc repository
git clone https://github.com/$NODE_SDK_USERNAME/$TARGET_REPO
# Copy API docs to target repository & push to gh-pages URL
cp -r docs/gen/* $NODE_SDK_USERNAME.github.io
cd $NODE_SDK_USERNAME.github.io
git add .
git commit -m "SDK commit - $SDK_COMMIT"
git config remote.gh-pages.url https://$NODE_SDK_USERNAME:$NODE_SDK_PASSWORD@github.com/$NODE_SDK_USERNAME/$TARGET_REPO
# Push API docs to Target repository
git push gh-pages master
fi
| true |
accfd8dd5870e48c7a4027dbc26fe40b1bbaf8ae | Shell | AO-StreetArt/CrazyIvan | /scripts/linux/container_install.sh | UTF-8 | 760 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#This script will install Crazy Ivan by cloning it from github and building from source
set -e
BRANCH="v2"
if [ "$#" -gt 0 ]; then
BRANCH=$1
fi
apt-get update
apt-get -y install software-properties-common build-essential g++ make
add-apt-repository -y ppa:ubuntu-toolchain-r/test
apt-get -y update
apt-get -y install git g++-6
export CXX=g++-6
export CC=gcc-6
git clone --depth=50 --branch=$BRANCH https://github.com/AO-StreetArt/CrazyIvan.git
mkdir ivan_deps
cp CrazyIvan/scripts/linux/deb/build_deps.sh ivan_deps
cd ivan_deps && ./build_deps.sh g++-6 -no-poco
export LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH"
export CFLAGS=-O2
cd ../CrazyIvan && make && make test
cp crazy_ivan /usr/bin
mkdir /etc/ivan
cp app.properties /etc/ivan/
| true |
263eac13b7131dc73a6dbb2e4c6e5558b491075b | Shell | iurnus/scripps_kaust_model | /coupler/L3.C3.coupled_SouthernOcean/install.sh | UTF-8 | 1,912 | 3.5 | 4 | [] | no_license | #!/bin/sh
echo "ESMF location? : " ${ESMF_DIR}
echo "WRF413 (with OA coupling) location? : " ${WRF_DIR}
echo "MITgcm (source code) location? : " ${MITGCM_DIR}
read -e -p "Using Intel compiler? (Y/N) :" -i "N" intelFlag
if [ $intelFlag == 'Y' ]; then
if [ $ESMF_OS == 'Linux' ]; then
echo "Using Intel compiler"
export MITGCM_OPT=mitgcm_optfile.ifort
elif [ $ESMF_OS == 'Unicos' ]; then
echo "Using Intel compiler for Cray"
export MITGCM_OPT=mitgcm_optfile.cray
fi
else
echo "Using PGI compiler"
export MITGCM_OPT=mitgcm_optfile.pgi
fi
echo "The option file is: $MITGCM_OPT"
read -e -p "Continue? (Y/N) :" -i "Y" continueFlag
if [ $continueFlag == 'Y' ]; then
echo "continue"
else
echo "stop"
exit
fi
# # build the MITGCM as an executable
# mkdir build_mit code_mit
# cp utils/* build_mit/ # copy the scripts to install MITGCM
# cp mitCode/* code_mit/ # copy the scripts to install MITGCM
# cp mitSettingRS/* code_mit/ # copy the scripts to install MITGCM
# rm code_mit/exf_get* # remove the exf_get file so that MITGCM read the file input
# rm code_mit/main.F # remove the main file
# cd build_mit
# sed -i s/code/code_mit/g makescript_fwd.sh
# ./makescript_fwd.sh # install MITGCM, generate *.f files
# cd ..
# build the MITGCM as a library
mkdir build code
cp utils/* build/ # copy the scripts to install MITGCM
cp mitCode/* code/ # copy the scripts to install MITGCM
cp mitSettingSO/* code/ # copy the scripts to install MITGCM
cd build
./makescript_fwd.sh # install MITGCM, generate *.f files
cp ${SKRIPS_MPI_INC}/mpif* .
./mkmod.sh ocn # install MITGCM as a library, generate *.mod files
cd ..
# build the test coupler
cd coupledCode
./Allmake.sh
cd ..
if [ -f ./coupledCode/esmf_application ]; then
echo "Installation is successful!"
echo The coupled model is installed as ./coupledCode/esmf_application
else
echo ERROR! Installation is NOT successful!
fi
| true |
9c4619d6b8f42dd4e3e1f647b73506762e19ab62 | Shell | rebl0x3r/aut0_mak3r | /lib/insta.sh | UTF-8 | 7,388 | 3.25 | 3 | [] | no_license | #!/bin/bash
# colors
# Reset
rs='\033[m'
# Regular Colors
r='\033[0;31m'
g='\033[0;32m'
y='\033[0;33m'
b='\033[0;34m'
p='\033[0;35m'
c='\033[0;36m'
w='\033[0;37m'
# Background
or='\033[41m'
og='\033[42m'
oy='\033[43m'
ob='\033[44m'
op='\033[45m'
oc='\033[46m'
ow='\033[47m'
# Bold
bd="\e[1m"
# Path
path=$(pwd)
path2="/data/data/com.termux/files/home"
path3=""
clear
# Path Check
if [[ "$path" == "$path2" ]]; then
cd /data/data/com.termux/files/home/aut0_mak3r
path3="/data/data/com.termux/files/home/aut0_mak3r"
elif [[ "$path" != "$path2" ]]; then
cd /data/data/com.termux/files/home/aut0_mak3r
path3="/data/data/com.termux/files/home/aut0_mak3r"
else
if [[ "$path" == "/data/data/com.termux/files/home/aut0_mak3r/lib" ]]; then
cd ..
path3="/data/data/files/home/aut0_mak3r"
fi
fi
# Functions
function phishing {
clear
echo -e "${r}[!] ${y}This installation could take a while\n"
sleep 1
echo -ne "${b}[${r}>${b}] ${p}Press enter to start> "
read enter
sleep 1
echo -e "\n${g}[i] ${b}Installing ${r}hiddeneye${b}...\n"
sleep 0.5
pkg install git python php curl openssh grep -y
cd $HOME
git clone -b Termux-Support-Branch https://github.com/DarkSecDevelopers/HiddenEye.git; chmod 777 HiddenEye; cd Hiddeneye; pip install requests; cd $HOME
echo -e "\n${g}[i] ${b}Successfully installed hiddeneye.\n"
echo -e "${g}[i] ${b}To start type: ${r}python HiddenEye.py"
sleep 1
echo -ne "${b}[${r}>${b}] ${p}Press enter to continue> "
read enter
echo -e "\n${g}[i] ${b}Installing ${r}SocialPhish${b}...\n"
sleep 0.5
git clone https://github.com/xHak9x/SocialPhish.git; cd SocialPhish; chmod +x socialphish.sh; cd $HOME
echo -e "\n${g}[i] ${b}Successfully installed socialphish.\n"
echo -e "${g}[i] ${b}To start type: ${r}bash socialphish.sh"
sleep 1
cd aut0_mak3r
echo -ne "${b}[${r}>${b}] ${p}Press enter to go back> "
main
}
function bruteforce {
clear
echo -e "${r}[!] ${y}This installation could take a while\n"
sleep 1
echo -ne "${b}[${r}>${b}] ${p}Press enter to start> "
read enter
sleep 1
echo -e "\n${g}[i] ${b}Installing ${r}instainsane${b}...\n"
sleep 0.5
cd $HOME
git clone https://github.com/permikomnaskaltara/instainsane; cd instainsane; chmod +x instainsane.sh; chmod +x install; bash install.sh; cd $HOME
sleep 1
echo -e "\n${g}[i] ${b}Successfully installed instainsane.\n"
echo -e "${g}[i] ${b}To start type: ${r}bash instainsane.sh"
sleep 1
echo -ne "${b}[${r}>${b}] ${p}Press enter to continue> "
read enter
echo -e "\n${g}[i] ${b}Installing ${r}InstaBrute${b}...\n"
sleep 0.5
git clone https://github.com/Ha3MrX/InstaBrute.git; apt install tor -y; chmod +x InstaBrute/*
echo -e "\n${g}[i] ${b}Successfully installed InstaBrute.\n"
echo -e "${g}[i] ${b}To start type: ${r}bash insta.sh"
sleep 0.5
echo -ne "${b}[${r}>${b}] ${p}Press enter to continue> "
read enter
echo -e "\n${g}[i] ${b}Installing ${r}PureL0G1Cs Bruter${b}...\n"
sleep 0.5
pkg install python; pkg install python-pip; pip install --upgrade pip; git clone https://github.com/Pure-L0G1C/Instagram; cd Instagram; pip3 install -r requirements.txt; cd $HOME
echo -e "\n${g}[i] ${b}Successfully installed PureL0G1Cs Bruter.\n"
echo -e "${g}[i] ${b}To start type: ${r}python3 instagram.py <username> <wordlist> -m 2"
sleep 0.5
echo -ne "${b}[${r}>${b}] ${p}Press enter to continue> "
read enter
sleep 1
echo -e "\n${g}[i] ${b}Installing ${r}alkrinsta${b}...\n"
sleep 0.5
pkg update; pkg upgrade -y; git clone https://github.com/ALKR-HACKHECKZ/alkrinsta.git
sleep 1
echo -e "\n${g}[i] ${b}Successfully installed alkrinsta.\n"
echo -e "${g}[i] ${b}To start type: ${r}python3 cupp.py -i"
echo -e "${g}[i] ${b}To start type: ${r}python3 alkrinsta.py"
sleep 1
cd aut0_mak3r
echo -ne "${b}[${r}>${b}] ${p}Press enter to go back> "
main
}
function instaservices {
clear
echo -e "${r}[!] ${y}This installation could take a while\n"
sleep 1
echo -ne "${b}[${r}>${b}] ${p}Press enter to start> "
read enter
sleep 1
echo -e "\n${g}[i] ${b}Installing ${r}instagram-tools${b}...\n"
sleep 0.5
cd aut0_mak3r
pkg install git -y; pkg install nodejs -y; git clone https://github.com/masokky/instagram-tools.git
sleep 1
echo -e "\n${g}[i] ${b}Successfully installed instagram-tools.\n"
echo -e "${g}[i] ${b}To start type: ${r}node index.js"
sleep 1
echo -ne "${b}[${r}>${b}] ${p}Press enter to continue> "
read enter
echo -e "\n${g}[i] ${b}Installing ${r}igtools${b}...\n"
sleep 0.5
git clone https://github.com/ikiganteng/bot-igeh.git; cd bot-igeh; unzip node_modules.zip; npm install https://github.com/huttarichard/instagram-private-api; npm audit fix; cd $HOME
echo -e "\n${g}[i] ${b}Successfully installed igtools.\n"
echo -e "${g}[i] ${b}To start type: ${r}ls; node <filename>"
sleep 1
cd aut0_mak3r
echo -ne "${b}[${r}>${b}] ${p}Press enter to go back> "
main
}
function main {
echo -e "
${rs} ${ob}${bd} ${rs}
${rs} ${ob}${bd} |\ |\. ${rs}
${rs} ${ob}${bd} \ \| | ${rs}
${rs} ${ob}${bd} \ | | ${rs}
${ob}${bd} ____ __ ______ __ .--'' / ${rs}
${ob}${bd} / _/__ ___ / /____ _ /_ __/__ ___ / /__ /o \' ${rs}
${ob}${bd} _/ // _ \(_-</ __/ _ \/ / / / _ \/ _ \/ (_-< \ /
${ob}${bd}/___/_//_/___/\__/\_,_/ /_/ \___/\___/_/___/ {>o<}='
${ob}${bd}
${or}by @Leakerhounds${rs} ${op}collection by @BlackFlare${rs}
${oy}Directory:${rs} ${op}$path${rs}
${bd}${b}[${r}1${b}] ${y}Insta-Phishing ${g} Installing some phishing frameworks${rs}
${bd}${b}[${r}2${b}] ${y}Insta-Bruteforce ${g} Installing some bruteforce tools${rs}
${bd}${b}[${r}3${b}] ${y}Insta-Services ${g} Installing instagram manage tools(likes etc.)${rs}
${bd}${b}[${r}4${b}] ${y}Back To Main ${g} Go back to main menu of tool${rs}
${bd}${b}[${r}5${b}] ${y}Exit ${g} Quit the Insta Tools tool${rs}
"
testt=0
while [ $testt = 0 ]
do
echo -e "${bd}"
echo -ne "${r}【 mak3r@root 】${y}/lib/insta.sh ${b}~>:${r} "
read use
case "$use" in
1)
phishing
testt=1
;;
2)
bruteforce
testt=1
;;
3)
instaservices
testt=1
;;
4)
if [ -f 4ut0m4t10n.sh ]
then
bash 4ut0m4t10n.sh
elif [ ! -f 4ut0m4t10n.sh ]
then
cd ..
bash 4ut0m4t10n.sh
else
cd $HOME/aut0_mak3r
bash 4ut0m4t10n.sh
fi
testt=1
;;
5)
echo -e "${b}[${g}i${b}] ${r}Quitting :-)"
exit
testt=1
;;
*)
echo '[!] Wrong command!'
sleep 1
;;
esac
done
}
main
#
# Written By @TheMasterCH
# Special Upload ...:)
#
| true |
ec53dccf9172eaa4573cccf13cbb21a2700cc6b5 | Shell | priesgo/leptospira-variant-calling | /src/hcvc/cnv_calling/cnvnator.sh | UTF-8 | 2,545 | 3.09375 | 3 | [] | no_license | #!/bin/bash
# Check input parameters
if [ $# -ne 3 ]
then
echo "Runs CNVnator"
echo "USAGE: cnvnator.sh INPUT_BAM OUTPUT_FOLDER REFERENCE"
exit 1
fi
#Configuration
SCRIPT=$(readlink -f "$BASH_SOURCE")
BASEDIR=$(dirname "$SCRIPT")
source $BASEDIR/../config/config.sh
# Copies input BAM into local folder
#cp $1 .
#cp $1.bai .
INPUT_BAM=$1
echo $INPUT_BAM
PREFIX_LOCAL=`basename $1 .bam`
OUTPUT_FOLDER=$2
echo $OUTPUT_FOLDER
REFERENCE=$3
echo $REFERENCE
# Prepare reference genome: split reference by chromosome and copy in destination folder
$GENOMETOOLS_HOME/gt splitfasta -splitdesc $OUTPUT_FOLDER $REFERENCE
# Runs CNVnator pipeline
$CNVNATOR_HOME/cnvnator -root $PREFIX_LOCAL.NC_008508.root -genome $REFERENCE -chrom NC_008508 -tree $INPUT_BAM
$CNVNATOR_HOME/cnvnator -root $PREFIX_LOCAL.NC_008508.root -genome $REFERENCE -chrom NC_008508 -his 300
$CNVNATOR_HOME/cnvnator -root $PREFIX_LOCAL.NC_008508.root -genome $REFERENCE -chrom NC_008508 -stat 300
$CNVNATOR_HOME/cnvnator -root $PREFIX_LOCAL.NC_008508.root -genome $REFERENCE -chrom NC_008508 -partition 300
$CNVNATOR_HOME/cnvnator -root $PREFIX_LOCAL.NC_008508.root -genome $REFERENCE -chrom NC_008508 -call 300 > $PREFIX_LOCAL.NC_008508.cnvnator
# Runs BAQ with PrintReads as HaplotypeCaller does not support it on the fly as UnifiedGenotyper does
echo "GATK PrintReads to calculate BAQ"
java -jar $GATK -T PrintReads -R $REFERENCE -I $INPUT_BAM -baq RECALCULATE -o $OUTPUT_DIR/$PREFIX_LOCAL.baq.bam
# Haplotype caller variant calling pipeline
echo "GATK HaplotypeCaller"
java -jar $GATK -T HaplotypeCaller -R $REFERENCE -I $OUTPUT_DIR/$PREFIX_LOCAL.baq.bam --genotyping_mode DISCOVERY -stand_emit_conf 30 -stand_call_conf 30 --min_base_quality_score 13 --downsampling_type NONE -ploidy 1 -nda -allowNonUniqueKmersInRef -bamout $INPUT_BAM.hc_reassembly.bam -o $OUTPUT_VCF --annotateNDA --annotation BaseQualityRankSumTest --annotation ClippingRankSumTest --annotation Coverage --annotation FisherStrand --annotation GCContent --annotation HomopolymerRun --annotation LikelihoodRankSumTest --annotation NBaseCount --annotation QualByDepth --annotation RMSMappingQuality --annotation StrandOddsRatio --annotation TandemRepeatAnnotator --annotation DepthPerAlleleBySample --annotation DepthPerSampleHC --annotation StrandAlleleCountsBySample --annotation StrandBiasBySample --excludeAnnotation HaplotypeScore --excludeAnnotation InbreedingCoeff
# Use this parameters to create all haplotypes -forceActive -disableOptimizations
rm -f $OUTPUT_DIR/$PREFIX_LOCAL.baq.ba*
| true |
9316168d74f765df56e83f4bb25f08d6ba613a3d | Shell | urbanware-org/snippets | /bash/video2jpg/video2jpg.sh | UTF-8 | 338 | 3.109375 | 3 | [
"MIT"
] | permissive | convert_video2jpg() {
input_video_file="$1"
input_video_fps="$2"
output_filename="$3"
if [ -z "$input_video_fps" ]; then
input_video_fps=30
fi
if [ -z "$output_filename" ]; then
output_filename="img_%08d.jpg"
fi
ffmpeg -i "$input_video_file" -vf fps=$input_video_fps "$output_filename"
}
| true |
e6f9d59446d73dbee906d841abfe31fea9f80b3b | Shell | irenedet/3d-cnn | /submission_scripts/TM_at_peaks/TM_submission.sh | UTF-8 | 3,028 | 2.90625 | 3 | [] | no_license | #! /bin/bash
#SBATCH -A mahamid
#SBATCH --nodes 1
#SBATCH --ntasks 1
#SBATCH --mem 128G
#SBATCH --time 0-2:00
#SBATCH -o slurm_outputs/TM_at_peaks.slurm.%N.%j.out
#SBAtCH -e slurm_outputs/TM_at_peaks.slurm.%N.%j.err
#SBATCH --mail-type=END,FAIL
#SBATCH --mail-user=irene.de.teresa@embl.de
module load Anaconda3
echo "activating virtual environment"
source activate /struct/mahamid/Processing/envs/.conda/3d-cnn/
echo "... done"
export PYTHONPATH=$PYTHONPATH:/g/scb2/zaugg/trueba/3d-cnn
export QT_QPA_PLATFORM='offscreen'
usage()
{
echo "usage: [[ [-output output_dir][-test_partition test_partition ]
[-model path_to_model] [-label label_name]
[-out_h5 output_h5_file_path] [-conf conf]] | [-h]]"
}
while [ "$1" != "" ]; do
case $1 in
-tomo_name | --tomo_name ) shift
tomo_name=$1
;;
-path_to_motl | --path_to_motl ) shift
path_to_motl=$1
;;
-path_to_dataset | --path_to_dataset ) shift
path_to_dataset=$1
;;
-path_to_output_csv | --path_to_output_csv ) shift
path_to_output_csv=$1
;;
-catalogue_path | --catalogue_path ) shift
catalogue_path=$1
;;
-ref_angles | --ref_angles ) shift
ref_angles=$1
;;
-angles_in_degrees | --angles_in_degrees ) shift
angles_in_degrees=$1
;;
-path_to_mask | --path_to_mask ) shift
path_to_mask=$1
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
echo tomo_name=$tomo_name
echo path_to_motl=$path_to_motl
echo path_to_dataset=$path_to_dataset
echo path_to_output_csv=$path_to_output_csv
echo catalogue_path=$catalogue_path
echo angles_in_degrees=$angles_in_degrees
echo ref_angles=$ref_angles
echo path_to_mask=$path_to_mask
export tomo_name=$tomo_name
export path_to_motl=$path_to_motl
export path_to_dataset=$path_to_dataset
export path_to_output_csv=$path_to_output_csv
export catalogue_path=$catalogue_path
export angles_in_degrees=$angles_in_degrees
export ref_angles=$ref_angles
export path_to_mask=$path_to_mask
echo "Starting python script..."
python3 /g/scb2/zaugg/trueba/3d-cnn/pipelines/template_matching_at_peaks/TM_from_motl.py -tomo_name $tomo_name -path_to_motl $path_to_motl -path_to_dataset $path_to_dataset -path_to_output_csv $path_to_output_csv -catalogue_path $catalogue_path -path_to_mask $path_to_mask -ref_angles $ref_angles -angles_in_degrees $angles_in_degrees
echo "...done." | true |
daa930e3a2101f853db1233e3950f1f470c318ac | Shell | revainisdead/settings | /init.sh | UTF-8 | 1,682 | 4.1875 | 4 | [] | no_license | #!/bin/bash
# Move all relevant dot files from this folder to home ~/
exclude_always=".git .gitignore git_hooks init.sh utils.sh alias.cmd README.md" # alias.cmd is for Windows
exclude_for_now=".asoundrc .Xdefaults"
exclude="${exclude_always} ${exclude_for_now}"
set -e
pushd $(dirname $0) > /dev/null
SCRIPT_PATH=$(pwd -P)
popd > /dev/null
# cd $SCRIPT_PATH/..
source $SCRIPT_PATH/utils.sh
# Point of this script is to move hidden files from this repo to the
# home directory, so must be able to move dot files with mv/cp command
# (disabled by default), use this command as a fix, for just this script
shopt -s dotglob
for path in $SCRIPT_PATH/*
do
name=`basename $path`
newName=$name
destination=~/ # with trailing slash
# Special case for moving ssh_config to a different target name: config
if [ $name == "ssh_config" ]; then
destination=~/.ssh/ # with trailing slash
newName="config"
# Enable interpretation of backslash escapes using `echo -e`
cecho "\tConverting ssh_config to ${newName} in ${destination}" yellow
fi
finalpath="${destination}${newName}"
# Exclude does not contain name
if [[ ! $exclude =~ $name ]]; then
# Only copy over if file does not already exist
if [ ! -f $finalpath ]; then
cecho "Copying file ${name} to ${finalpath}" green
cp -r $name "$finalpath"
else
cecho "File already exists $finalpath" red
fi
fi
done
# Set up pre-commit in project's .git/hooks directory
# Leverage install-hooks.sh to accomplish that
# step 1: move install-hooks.sh into .git/hooks
# step 2: run install-hooks.sh
| true |
16eda696d5ca992b26432dbc9d75aa0cf47b9633 | Shell | guanchenz/cookiecutter4ds | /utils/create_readme.sh | UTF-8 | 146 | 3.3125 | 3 | [
"MIT",
"BSD-2-Clause-Views"
] | permissive | #! /bin/sh
function create_readme {
PROJECT=$1
FILE_PATH=$2
DESC=$3
cat > "$FILE_PATH/README.md" << EOF
# $PROJECT
$DESC
EOF
} | true |
ac2916ff9b056fb4af7e878a07e16808599e2dc8 | Shell | rokibhasansagar/builder_demo | /setEnv.sh | UTF-8 | 1,436 | 3 | 3 | [] | no_license | #!/bin/bash
ROOTDIR=${PWD}
echo "::group::Apt setup"
sudo apt-fast update -qy
sudo apt-fast upgrade -qy
sudo apt-fast install -qy --no-install-recommends --no-install-suggests \
lsb-core linux-headers-$(uname -r) python3-dev python-is-python3 xzdec zstd libzstd-dev lib32z1-dev build-essential libc6-dev-i386 gcc-multilib g++-multilib ninja-build clang cmake libxml2-utils xsltproc expat re2c lib32ncurses5-dev bc libreadline-gplv2-dev gawk xterm rename schedtool gperf rclone pigz
sudo apt-get clean -y && sudo apt-get autoremove -y
sudo rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
echo "::endgroup::"
echo "::group::Latest make and ccache setup"
mkdir -p /tmp/env
cd /tmp/env || exit 1
curl -sL https://ftp.gnu.org/gnu/make/make-4.3.tar.gz -O
tar xzf make-4.3.tar.gz && cd make-*/
./configure && bash ./build.sh 1>/dev/null && sudo install ./make /usr/bin/make
cd /tmp/env || exit 1
git clone -q https://github.com/ccache/ccache.git && cd ccache && git checkout -q v4.2
mkdir build && cd build && cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr ..
make -j8 && sudo make install
echo "::endgroup::"
cd ${ROOTDIR} || exit 1
rm -rf /tmp/env
echo "::group::Ccache config"
mkdir -p /home/runner/.config/ccache
cat << EOC > /home/runner/.config/ccache/ccache.conf
cache_dir = /home/runner/.cache/ccache
compression = true
compression_level = 6
max_size = 5G
recache = true
EOC
ccache -p
echo "::endgroup::"
| true |
36f7665681ba9b1e9107cda077a7271338080ab3 | Shell | ali5ter/carrybag | /v1/scripts/setaccount | UTF-8 | 1,909 | 3.5625 | 4 | [] | no_license | #!/bin/bash
# ----------------------------------------------------------------------------
# @file setaccount
# Set up an account on a machine through root
# @author Alister Lewis-Bowen [alister@different.com]
# ----------------------------------------------------------------------------
# This software is distributed under the the MIT License.
#
# Copyright (c) 2008 Alister Lewis-Bowen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ----------------------------------------------------------------------------
REMOTE_HOST=$1
REMOTE_USER=${2:-$USER}
CREATE_USER=${3:-$USER}
echo -n "I will make an account, $CREATE_USER, on $REMOTE_HOST. ";
echo "Please provide the password for user, $REMOTE_USER...";\
command='(adduser "'"$CREATE_USER"'"; passwd "'"$CREATE_USER"'"; gpasswd -a "'"$CREATE_USER"'" wheel;)';
ssh $REMOTE_USER@$REMOTE_HOST $command;
echo "Your new account for $CREATE_USER has been set up on $REMOTE_HOST";
exit 0;
| true |
3271131889ce5375ccfc7aa93b41be9cc121943b | Shell | pmjones/ext-request | /.github/scripts/osx.sh | UTF-8 | 446 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e -o pipefail
# config
export PHP_VERSION=${PHP_VERSION:-"7.4"}
export NO_INTERACTION=1
export REPORT_EXIT_STATUS=1
export TEST_PHP_EXECUTABLE=${TEST_PHP_EXECUTABLE:-`which php`}
function install_brew_packages() (
set -x
brew install php@$PHP_VERSION
)
function install() (
set -x
phpize
./configure
make
)
function script() (
set -x
make test
)
install_brew_packages
install
script
| true |
676e1e209c203b0b4045f260a21d7735c5c6fafc | Shell | gpfti/wash | /test/non-interactive.bats | UTF-8 | 228 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bats
@test "runs a command with -c" {
result="$(wash -c 'echo hello')"
[ "$result" = hello ]
}
@test "runs a command with input from stdin" {
result="$(echo 'echo hello' | wash)"
[ "$result" = hello ]
}
| true |
121392e564ef6999ed97ba21b53fef3ac347b0ef | Shell | chriszanf/Extension-Attributes | /VerifyMDMEnrollment.sh | UTF-8 | 469 | 3.171875 | 3 | [] | no_license | #!/bin/sh
# Script to check if the Casper MDM profile has been installed.
# This is needed to make sure that our computers are able to receive configuration profiles.
# Author : contact@richard-purves.com
# Version: 1.0 - Inital Version
mdmEnrollmentProfileID="00000000-0000-0000-A000-4A414D460003"
enrolled=`/usr/bin/profiles -C | /usr/bin/grep "$mdmEnrollmentProfileID"`
if [ "$enrolled" != "" ]; then
echo "<result>Enrolled</result>"
else
echo "<result>Not Enrolled</result>"
fi
| true |
f14068ce0c47fe53859f067498275e8433a5ae9d | Shell | tusharsable/shell_development | /array/find_twin_digit.sh | UTF-8 | 396 | 3.75 | 4 | [] | no_license | #!/bin/bash -x
# for each number between 1 and 100 do
for x in {1..100}
do
#create a copy for use of x
number=$x
#get first digit
digit1=$(($number%10))
#get second digit
number=$(($number/10))
digit2=$(($number%10))
# if both digits are same then add this number to array
if (($digit1 == $digit2))
then
arr[((counter++))]=$x
fi
done
#print the resultant array
echo ${arr[@]}
| true |
aedfaa186af9aff7a21d9355ce380211ea6f279a | Shell | assdaxan/lstms_monitor | /install.sh | UTF-8 | 2,275 | 2.953125 | 3 | [] | no_license | #!/bin/bash
echo "Install Python3"
sudo apt install -y python3
echo "Add User lstms_m"
echo 'Y' | sudo adduser --shell /usr/sbin/nologin --no-create-home --disabled-password --disabled-login --quiet lstms_m
echo "LSTMS Permission Setting"
chown -R lstms_m:lstms_m /etc/lstms_m
echo "Register LSTMS Client"
sudo /usr/bin/python3 /etc/lstms_m/lstms-register.py
# echo "Add Crontab"
# (sudo crontab -u lstms_m -l; echo '* * * * * /usr/bin/python3 /etc/lstms_m/lstms-cpu.py' ) | sudo crontab -u lstms_m -
# (sudo crontab -u lstms_m -l; echo '* * * * * /usr/bin/python3 /etc/lstms_m/lstms-memory.py' ) | sudo crontab -u lstms_m -
# (sudo crontab -u lstms_m -l; echo '* * * * * /usr/bin/python3 /etc/lstms_m/lstms-disk.py' ) | sudo crontab -u lstms_m -
# (sudo crontab -u lstms_m -l; echo '* * * * * /usr/bin/python3 /etc/lstms_m/lstms-traffic.py' ) | sudo crontab -u lstms_m -
# (sudo crontab -u lstms_m -l; echo '* * * * * /usr/bin/python3 /etc/lstms_m/lstms-io.py' ) | sudo crontab -u lstms_m -
# (sudo crontab -u lstms_m -l; echo '* * * * * /usr/bin/python3 /etc/lstms_m/lstms-user.py' ) | sudo crontab -u lstms_m -
echo "Add Systemd Service"
sudo cp /etc/lstms_m/service/lstms-auth.service /etc/systemd/system/lstms-auth.service
sudo cp /etc/lstms_m/service/lstms-cpu.service /etc/systemd/system/lstms-cpu.service
sudo cp /etc/lstms_m/service/lstms-memory.service /etc/systemd/system/lstms-memory.service
sudo cp /etc/lstms_m/service/lstms-disk.service /etc/systemd/system/lstms-disk.service
sudo cp /etc/lstms_m/service/lstms-io.service /etc/systemd/system/lstms-io.service
sudo cp /etc/lstms_m/service/lstms-traffic.service /etc/systemd/system/lstms-traffic.service
sudo cp /etc/lstms_m/service/lstms-user.service /etc/systemd/system/lstms-user.service
echo "Enable LSTMS Service"
sudo systemctl enable lstms-auth
sudo systemctl enable lstms-cpu
sudo systemctl enable lstms-memory
sudo systemctl enable lstms-disk
sudo systemctl enable lstms-io
sudo systemctl enable lstms-traffic
sudo systemctl enable lstms-user
echo "Start LSTMS Service"
sudo systemctl start lstms-auth
sudo systemctl start lstms-cpu
sudo systemctl start lstms-memory
sudo systemctl start lstms-disk
sudo systemctl start lstms-io
sudo systemctl start lstms-traffic
sudo systemctl start lstms-user | true |
7ede7fa63d1b73d237849daf444c28cd7e78d3c4 | Shell | eaglexmw/PKGBUILD_Fork | /Library/qtwebkit_4.8/pack_all_a.sh | UTF-8 | 1,162 | 2.6875 | 3 | [] | no_license | #!/bin/sh
pushd . > /dev/null
echo "start build static lib"
pushd . > /dev/null
mkdir -p ./WebKit1_obj
cd WebKit1_obj
ar x ./../WebKit/release/libWebKit1.a
for item in `ls *.o`; do mv $item WebKit1_$item; done
cd ../
popd > /dev/null
pushd . > /dev/null
mkdir -p ./WebCore_obj
cd ./WebCore_obj
ar x ../WebCore/release/libWebCore.a
for item in `ls *.o`; do mv $item WebCore_$item; done
cd ../
popd > /dev/null
pushd . > /dev/null
mkdir -p ./ANGLE_obj
cd ./ANGLE_obj
ar x ../ThirdParty/ANGLE/release/libANGLE.a
for item in `ls *.o`; do mv $item ANGLE_$item; done
cd ../
popd > /dev/null
pushd . > /dev/null
mkdir -p ./JavaScriptCore_obj
cd ./JavaScriptCore_obj
ar x ../JavaScriptCore/release/libJavaScriptCore.a
for item in `ls *.o`; do mv $item JavaScriptCore_$item; done
cd ../
popd > /dev/null
pushd . > /dev/null
mkdir -p ./WTF_obj
cd ./WTF_obj
ar x ../WTF/release/libWTF.a
for item in `ls *.o`; do mv $item WTF_$item; done
cd ../
popd > /dev/null
ar crus $1 ./WebKit1_obj/*.o ./WebCore_obj/*.o ./ANGLE_obj/*.o ./JavaScriptCore_obj/*.o ./WTF_obj/*.o
rm -rf ./WebKit1_obj/ ./WebCore_obj/ ./ANGLE_obj/ ./JavaScriptCore_obj/ ./WTF_obj/
popd > /dev/null
| true |
c396f8ac1d84b513e9d150ebe07da2263871462e | Shell | xwzliang/daily_tools | /youplay_mp4 | UTF-8 | 752 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env bash
# Download playlist using youtube-dl
# convert to mp4 if the video is not
# Use socks5 proxy
# export https_proxy=socks5://127.0.0.1:1080
# export http_proxy=socks5://127.0.0.1:1080
url_playlist="$1"
# # Parallelism can be already be delegated to xargs -P x -n y where x is the number of parallel downloads and y is the number of URLs/IDs to pass to youtube-dl, most likely 1.
# n=0
# until [ $n -ge 10 ]
# do
# youtube-dl -j --flat-playlist "$url_playlist" | jq -r '"https://youtu.be/"+ .url' | xargs -n 1 -P 6 youtube-dl -ciw --write-sub --write-auto-sub --format "bestvideo+bestaudio[ext=m4a]/bestvideo+bestaudio/best" --merge-output-format mp4 && break
# n=$((n+1))
# sleep 15
# done
youplay $url_playlist && to_mp4 mkv
| true |
161b239c680a5789029ca06bcc466108935894a1 | Shell | denisidoro/dotfiles | /scripts/storm/upload | UTF-8 | 753 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
source "${DOTFILES}/scripts/storm/aux/core.sh"
source "${DOTFILES}/scripts/core/main.sh"
##? Upload
##?
##? Usage:
##? upload
doc::parse "$@"
_upload_buffer() {
local -r provider="$1"
dot storm run upload-buffer "$provider"
dot fs remove-empty-dirs "${STORM}/${provider}"
}
main() {
local errors=0
# _upload_buffer pcloud || errors=$((errors+1))
_upload_buffer box || errors=$((errors+1))
_upload_buffer alumni || errors=$((errors+1))
# _upload_buffer gphotos || errors=$((errors+1))
_upload_buffer vvgphotos || errors=$((errors+1))
dot storm run upload-telegram-buffer || errors=$((errors+1))
dot fs remove-empty-dirs "${STORM}/telegram"
return $errors
}
main "$@"
| true |
d6769bdcd59c08cb7b8642f1c01b3139fa430f41 | Shell | odem23/mps | /scripts/configuration/mps-cfg-snippets.sh | UTF-8 | 481 | 3.59375 | 4 | [] | no_license | #!/bin/bash
if [[ $# -ge 2 ]] ; then
if [[ -d files/snippets ]] ; then
if [[ -d $2 ]] ; then
if [[ ! -d $2/snippets ]] ; then
mkdir -p $2/snippets >/dev/null
fi
cp files/snippets/gitcommitpush.sh $2/snippets
chown $1:$1 -R $2/snippets/
chmod a+x -R $2/snippets/*
else
echo "DstPath not valid! Exiting now.."
exit 1
fi
else
echo "Path not valid! Exiting now.."
exit 1
fi
else
echo "Not enough arguments provided! Exiting now.."
exit 1
fi
| true |
f38888cfab608b21508b1b92d6ce4b18b5102bb0 | Shell | yokochi47/BMSxNmrML | /schema/chebi_luceneidx.sh | UTF-8 | 801 | 3.53125 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
OWL_NAME=chebi.owl
DB_FTP=ftp.ebi.ac.uk/pub/databases/chebi/ontology/
GZ_FILE=$OWL_NAME.gz
WGET_LOG=wget.log
CHEBI_OWL_INDEX=chebi_luceneidx
if [ -d $CHEBI_OWL_INDEX ] ; then
echo
echo "Do you want to update lucene index? (y [n]) "
read ans
case $ans in
y*|Y*) ;;
*) echo stopped.
exit 1;;
esac
rm -rf $CHEBI_OWL_INDEX
fi
wget -c -m ftp://$DB_FTP/$GZ_FILE -o $WGET_LOG || ( cat $WGET_LOG && exit 1 )
grep 'not retrieving' $WGET_LOG > /dev/null
if [ $? = 0 ] && [ -d $CHEBI_OWL_INDEX ] ; then
echo $OWL_NAME is update.
exit 0
fi
grep 'No such file' $WGET_LOG > /dev/null && exit 1
java -classpath ../extlibs/owl-indexer.jar owl2luceneidx --owl $DB_FTP/$GZ_FILE --idx-dir $CHEBI_OWL_INDEX --xmlns rdfs=http://www.w3.org/2000/01/rdf-schema# --attr rdfs:label
| true |
e5aeb12ce04d3a6460b3fd5e38a5bf6cf33538dc | Shell | New-Sina/Oreomeow_JD_Diy | /shell/user.sh | UTF-8 | 2,930 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
if [ -d "/jd" ]; then
root=/jd
else
root=/ql
fi
dir_jbot=$root/jbot
dir_diy=$dir_jbot/diy
dir_repo=$root/repo
url="https://raw.githubusercontent.com/chiupam/JD_Diy/master/jbot/diy/user.py"
file_user=$dir_diy/user.py
bot_set=$root/config/diybotset.json
fix() {
if [ -z "$(grep -E "shoptokenId" $bot_set)" ]
then echo "没有找到shoptokenId键,自动填写对应值!"
key_1="shoptokenId"
sed -i "s/key_1/$key_1/" $bot_set
value_1="-100123456789"
sed -i "s/value_1/$value_1/" $bot_set
fi
}
start() {
echo "稍等片刻后,输入手机号(带国家代码)和 Telegram 验证码以完成登录"
echo "登陆完成后使用 Ctrl + C 退出脚本,并使用以下命令启动 user 监控"
echo ""
if [ -d "/jd" ]
then echo "cd $root;pm2 restart jbot"
else
echo "cd $root;nohup python3 -m jbot > /ql/log/bot/bot.log 2>&1 &"
fi
echo ""
cd $root
python3 -m jbot
}
stop() {
cd $root
if [ -d "/jd" ]
then pm2 stop jbot
else
ps -ef | grep "python3 -m jbot" | grep -v grep | awk '{print $1}' | xargs kill -9 2>/dev/null
fi
}
restart() {
if [ -f $file_user ]
then cd $root
if [ -d "/jd" ]
then pm2 restart jbot
else
nohup python3 -m jbot > /ql/log/bot/bot.log 2>&1 &
fi
else
echo "你没有安装 user.py 无法重启!"
fi
}
install() {
if [ -f $file_user ]
then echo "你已经安装 user.py 请不要重复安装!"
else
stop
cd $root/jbot/diy
wget $url
start
fi
}
uninstall() {
if [ -f $file_user ]
then cd $root/jbot/diy
rm -f "user.py"
cd $root
rm -f "user.session"
rm -f "user.session-journal"
echo "卸载 user.py 及相关 session 文件"
else
echo "你没有使用 user.py 无法卸载!"
fi
}
update() {
if [ -f $file_user ]
then stop
cd $root/jbot/diy
rm -f "user.py"
wget $url
echo "升级完成,正在重启程序"
restart
else
echo "你没有使用 user.py 无法升级!"
fi
}
reinstall() {
if [ -f $file_user ]
then stop
cd $root/jbot/diy
rm -f "user.py"
wget $url
start
else
echo "你没有使用 user.py 无法重新安装!"
fi
}
relogin() {
if [ -f $file_user ]
then stop
cd $root
rm -f "user.session"
rm -f "user.session-journal"
start
else
echo "你没有使用 user.py 无法重新登录!"
fi
}
main() {
echo "请选择您需要进行的操作:"
echo " 1) 安装 user"
echo " 2) 卸载 user"
echo " 3) 更新 user"
echo " 4) 重新安装 user"
echo " 5) 重新登陆 user"
echo " 6) 退出脚本"
echo ""
echo -n "请输入编号: "
read N
case $N in
1) install ;;
2) uninstall ;;
3) update ;;
4) reinstall ;;
5) relogin ;;
6) exit ;;
*) echo "输入错误!请重新 bash user.sh 启动脚本" ;;
esac
}
fix
main | true |
55110d1b3f3f1ae0c4c5f7735165820f03198f78 | Shell | tbelmans/dotfiles | /bin/screenshot.sh | UTF-8 | 174 | 3 | 3 | [] | no_license | #!/bin/sh
if [ "$1" = "-window" ] ; then
import -quality 95 $HOME/ss/`date +'%Y%m%d-%H%M%S'`-sel.png
else
scrot '%Y%m%d-%H%M%S-full.png' -m -e 'mv $f '$HOME'/ss'
fi
| true |
e7e43278a6f8bc7e51a5c1bacd9bbf9c2f8dc65c | Shell | Siddhant-sarkar/Shell-Scripts | /18.sh | UTF-8 | 193 | 3.296875 | 3 | [] | no_license | #!/bin/bash
echo "Enter the number "
read n
rev=0
cd=0;
while [ $n -gt 0 ]
do
cd=$(($n % 10))
rev=$(($rev * 10 + $cd))
# echo $cd
n=$(($n / 10))
done
echo "The Reversed number is $rev"
| true |
c03a7c8589621dd3e9dddb63a5032d6205d886a5 | Shell | io-monad/dotfiles | /dotfiles/zsh/ap.zsh | UTF-8 | 594 | 3.34375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | # ap (cd to Atom project directory)
#
# require: fzf, atom, cson2json, jq
#
function fzf-get-atom-projects {
cson2json ~/.atom/projects.cson | jq -r ".[].paths[]" | fzf --query="$@" --select-1 --exit-0
}
function ap {
local dir="$(fzf-get-atom-projects "$@")"
[ -n "$dir" ] && cd "$dir"
}
function aap {
local dir="$(fzf-get-atom-projects "$@")"
[ -n "$dir" ] && atom "$dir"
}
function fzf-ap {
local dir="$(fzf-get-atom-projects "$LBUFFER")"
if [ -n "$dir" ]; then
BUFFER="cd $dir"
zle accept-line
else
zle reset-prompt
fi
}
zle -N fzf-ap
bindkey '^T' fzf-ap
| true |
05d30bae72006438bd907092b061270726a5c001 | Shell | mattgstevens/osx.files | /bin/mysql_backup.sh | UTF-8 | 1,630 | 4.5 | 4 | [] | no_license |
# FUNCS
# Create a tar backup
#
# args: from, dest
# from: full path to directory/file to backup
# dest: full path to archive to
function backup {
from="$1"
dest="$2"
start_dir=`pwd`
if [ -f $dest ]; then
echo "Creating a backup for existing $dest"
mv $dest ${dest}_bak
fi
cd `dirname $from`
echo "Archiving $from as $dest"
tar czf $dest `basename $from`
echo "Done backup"
cd $start_dir
}
function cleanup {
n_bak=5
start_dir=`pwd`
count=$(dirname $backup_dir | xargs ls | wc -l)
echo "Found $count backups. Settings are to keep $n_bak."
cd $(dirname $backup_dir)
for (( ; count > n_bak; count-- )); do
nfa=$(ls -lt | grep '^d' | tail -1 | tr " " "\n" | tail -1)
echo "Removing backup dir $nfa"
rm -rf $nfa
done
printf "Done cleanup\n\n"
cd $start_dir
}
function notify {
printf "\n####################\n$1\n####################\n"
}
function usage {
echo "What?"
echo "Will dump and archive a mysql database\n"
echo "Usage:"
echo" (db_name, db_user, db_password, backup_directory)"
}
# MAIN
if [ $# -lt 4 ]; then
usage
exit 1
fi
timestamp=`date +%Y%m%d`
printf "\nSTART: db backup ${timestamp}\n"
db_name=$1
db_user=$2
db_pass=$3
backup_dir=$4/${timestamp}
db_dump=${backup_dir}/mysql.dump
db_backup=${backup_dir}/mysql.tar.gz
# Ensure backup dir exists
mkdir -p $backup_dir
notify "Dumping mysql db $db_name as $db_dump"
mysqldump $db_name -u $db_user -p$db_pass > $db_dump
notify "Backing up db"
backup $db_dump $db_backup
rm $db_dump
notify "Looking to cleanup older backups"
cleanup
printf "END: db backup ${timestamp}\n\n"
| true |
26ae072740bbdccef8b4759e0711387313a4679d | Shell | eibenm/gps-tracks | /src/connect.sh | UTF-8 | 1,336 | 2.796875 | 3 | [] | no_license | # DOCKER COMMANDS _______________________________________________________________________
docker-compose up -d
docker-compose down --rmi all -v
docker container inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' gpstracks_postgres_1
# PERMISSIONS ( LINUX ) _________________________________________________________________
sudo chmod -R 755 LAMP
# RUNNING INTERACTIVE CONTAINER _________________________________________________________
docker container run --rm --name test -it \
-v ~/Documents/gps-tracks/src:/var/www/html \
--network gpstracks_lamp-network \
custom-php /bin/bash
# CONNECT TO EXISTING CONTAINER _________________________________________________________
docker exec -it gpstracks_webhost_1 /bin/bash
docker exec -it gpstracks_react_1 /bin/sh
# Add curl to nginx alpine
# apk add --no-cache curl
curl http://webhost/test.php
# RUNNING PGADMIN 4 CONTAINER ___________________________________________________________
docker pull dpage/pgadmin4
docker run -p 5000:80 \
-e "PGADMIN_DEFAULT_EMAIL=eibenm@gmail.com" \
-e "PGADMIN_DEFAULT_PASSWORD=password" \
--name pgadmin \
-d dpage/pgadmin4
# open web browser in localhost:5000
# NPM ___________________________________________________________
# npm packages used
npm ls
# npm packages that are outdated
npm outdate
| true |
7b4b4003c374c63ba5eabefef50f72919481f1e3 | Shell | liuyang1/test | /course/algs4/04_8puzzle/download.sh | UTF-8 | 242 | 3.28125 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env bash
set -e errexit
DIR="test"
FN="8puzzle-testing.zip"
[[ -d "$DIR" ]] || mkdir "$DIR"
pushd "$DIR"
wget http://coursera.cs.princeton.edu/algs4/testing/8puzzle-testing.zip
unzip "$FN"
rm -rf "$FN"
popd
echo "download $FN"
| true |
68fa836c3c7ff14999816d672b11b3d687956d44 | Shell | sashital-lab/Cas9_specificity | /scripts/process_KM02.sh | UTF-8 | 2,022 | 3.5 | 4 | [] | no_license | #!/bin/bash
module use /work/GIF/software/modules
module load jellyfish
module load bioawk
string="GGAAATTAGGTGCGCTTGGC"
file=$1
# extract the sequence
sed -n '2~4'p ${file} |\
sed 's/.*TTGCTGTACGAATCGTACAGGGTGCTTCAGGAT\(.\{20\}\)GGGGGTTGGTCAAGCTCGGACATCGTGATTGATAATGC.*/\1/g' |\
awk 'length($1)<21' |\
sed -e 's/\(.\)/\1\t/g' > ${file%.*}_extracted_cas.txt
reads=$(sed -n '2~4'p ${file} |wc -l)
extracted=$(cat ${file%.*}_extracted_cas.txt |wc -l)
# print summary
echo "total reads in file is $reads, extracted $extracted"
# generate positional frequency matrix
# requires datamash
module load GIF/datamash
header=$(echo $string | sed -e 's/\(.\)/\1\t/g')
echo -e "${file%.*}\t${header}" > ${file%.*}_pos-summary.txt
for i in {1..20}; do
datamash -s groupby $i count $i < ${file%.*}_extracted_cas.txt |\
grep -v "^N" | \
cut -f 2 |\
paste - - - - ;
done |\
datamash transpose |\
paste <(echo -e "A\nC\nG\nT") - >> ${file%.*}_pos-summary.txt
# calculate mismatch stats
sed 's/\t//g' ${file%.*}_extracted_cas.txt |\
awk -v x=${string} '{print x"\t"$0}' > ${file%.*}_mmfile.txt
/work/GIF/arnstrm/gitdirs/common_scripts/mismatch-counter.sh ${file%.*}_mmfile.txt > ${file%.*}_mm-stats.txt
cut -f 2 ${file%.*}_mm-stats.txt |\
sort -k1,1 -rn |\
uniq -c |\
awk '{print $2"\t"$1}' |\
sort -k1,1 -n > ${file%.*}_mismatches_summary.txt
#sed 's/\t//g' ${file%.*}_extracted_cas.txt | awk '{print ">"NR"\n"$1}' > ${file%.*}_extracted_cas.fa
#jellyfish count -m 20 -s 100M -t 12 -C ${file%.*}_extracted_cas.fa -o ${file%.*}_extracted_cas.jf
#jellyfish dump ${file%.*}_extracted_cas.jf > ${file%.*}_counts_dumps.fa
#bioawk -c fastx '{print $seq"\t"$name}' ${file%.*}_counts_dumps.fa | sort --parallel 10 -k2,2 -rn > ${file%.*}_freq.txt
#rm ${file%.*}_extracted_cas.txt ${file%.*}_mmfile.txt ${file%.*}_extracted_cas.jf ${file%.*}_counts_dumps.fa
sed 's/\t//g' ${file%.*}_extracted_cas.txt | sort --parallel 10 | uniq -c | awk '{print $2"\t"$1}' > ${file%.*}_freq.txt
rm ${file%.*}_extracted_cas.txt ${file%.*}_mmfile.txt
| true |
7d31e68c158c4a1d83d354f83c3f480d0b9c9418 | Shell | NorwegianClimateCentre/noresm2cmor | /legacy/script/cice2cmor.sh | UTF-8 | 4,121 | 3.40625 | 3 | [] | no_license | #!/bin/sh -e
### Customized parameters ######################################################
CASENAME=${1:-N1850AERCNOC_f19_g16_CTRL_02}
CMIPNAME=${2:-piControl}
YEAR1=${3:-0} # set to 0 if all available years are to be processed
YEARN=${4:-99999} # set to 99999 if all available years are to be processed
GROUP=${5:-ALL} # selection of groups e.g. "MON-DAY"
FIELD=${6:-ALL} # either "ALL" or a cmip variable name e.g. "sic"
BASEDIR=${7:-/norstore/project/norclim/noresm/cases}
CMIPDIR=${8:-/norstore/project/norclim/noresm/cmor}
################################################################################
# Derived parameters
ROOTDIR=`dirname $0`/..
WORKDIR=${ROOTDIR}/run_wrapper/work_${CMIPNAME}_${CASENAME}_ice_${YEAR1}-${YEARN}_${GROUP}_${FIELD}
# Accumulation intervals in years
ACCMON=300
ACCDAY=10
# Load modules
module load netcdf udunits
# Create dirs and change to work dir
mkdir -p $WORKDIR $CMIPDIR
cd $WORKDIR
# Prepare input namelist
rm -f cice2cmor_template.nml
echo "/ibasedir/s/=.*/= '"`echo ${BASEDIR} | sed -e 'y/\//:/'`"',/" > sedcmd
echo "/obasedir/s/=.*/= '"`echo ${CMIPDIR} | sed -e 'y/\//:/'`"',/" >> sedcmd
echo "y/:/\//" >> sedcmd
echo "/verbose/s/=.*/= .true.,/" >> sedcmd
cat $ROOTDIR/namelist/glb_cice2cmor.nml | sed -f sedcmd >> cice2cmor_template.nml
echo >> cice2cmor_template.nml
echo >> cice2cmor_template.nml
cat $ROOTDIR/namelist/exp_${CMIPNAME}.nml | sed \
-e "/casename/s/=.*/= '${CASENAME}',/" >> cice2cmor_template.nml
echo >> cice2cmor_template.nml
echo >> cice2cmor_template.nml
if [ $FIELD == "ALL" ]
then
cat $ROOTDIR/namelist/var_cice2cmor.nml | sed \
-e "s/\!\!/\ \ /g" >> cice2cmor_template.nml
else
cat $ROOTDIR/namelist/var_cice2cmor.nml | sed \
-e "/'${FIELD}[ ']/s/\!\!/\ \ /g" >> cice2cmor_template.nml
fi
# Auxillary function to determine first and last year for output group $1
set_time_range()
{
YEAR11=`ls $BASEDIR/$CASENAME/ice/hist | grep "\.$1\." | \
cut -d. -f4 | cut -d"-" -f1 | head -1`
if [ -z $YEAR11 ]
then
YEAR11=99999
elif [ $YEAR11 -lt $YEAR1 ]
then
YEAR11=$YEAR1
fi
#
YEARNN=`ls $BASEDIR/$CASENAME/ice/hist | grep "\.$1\." | \
cut -d. -f4 | cut -d"-" -f1 | tail -2 | head -1`
if [ -z $YEARNN ]
then
YEARNN=0
elif [ $YEARNN -gt $YEARN ]
then
YEARNN=$YEARN
fi
#
Y1=$YEAR11
#
if [ $YEAR11 -eq $YEARNN ]
then
M1=`ls $BASEDIR/$CASENAME/atm/hist | grep "\.$1\." | grep "\.${YEARNN}" | \
cut -d. -f4 | cut -d"-" -f2 | head -1`
M2=`ls $BASEDIR/$CASENAME/atm/hist | grep "\.$1\." | grep "\.${YEARNN}" | \
cut -d. -f4 | cut -d"-" -f2 | tail -1`
else
M1=01
M2=12
fi
}
# Process monthly data
if [ $GROUP == "ALL" ] || [ `echo $GROUP | grep MON` ]
then
set_time_range h
echo "Process monthly data for the years "${YEAR11}-${YEARNN}
while [ $Y1 -le $YEARNN ]
do
YN=`expr $Y1 + $ACCMON - 1`
if [ $YN -gt $YEARNN ]
then
YN=$YEARNN
fi
# customize namelist
cat cice2cmor_template.nml | sed \
-e "/year1/s/=.*/= $Y1,/" \
-e "/yearn/s/=.*/= $YN,/" \
-e "/month1/s/=.*/= $M1,/" \
-e "/monthn/s/=.*/= $M2,/" \
-e "/do_omon/s/=.*/= .true.,/" \
-e "/do_oimon/s/=.*/= .true.,/" > cice2cmor.nml
# run cmor
$ROOTDIR/bin/cice2cmor 2>&1
Y1=`expr $Y1 + $ACCMON`
done
fi
# Process daily data
if [ $GROUP == "ALL" ] || [ `echo $GROUP | grep DAY` ]
then
set_time_range h1
echo "Process daily data for the years "${YEAR11}-${YEARNN}
while [ $Y1 -le $YEARNN ]
do
YN=`expr $Y1 + $ACCDAY - 1`
if [ $YN -gt $YEARNN ]
then
YN=$YEARNN
fi
# customize namelist
cat cice2cmor_template.nml | sed \
-e "/year1/s/=.*/= $Y1,/" \
-e "/yearn/s/=.*/= $YN,/" \
-e "/month1/s/=.*/= $M1,/" \
-e "/monthn/s/=.*/= $M2,/" \
-e "/do_day/s/=.*/= .true.,/" > cice2cmor.nml
# run cmor
$ROOTDIR/bin/cice2cmor 2>&1
Y1=`expr $Y1 + $ACCDAY`
done
fi
# Remove work directory
cd ..
rm -rf $WORKDIR
| true |
7af62427879438ed1cab62bcca21700268f32ae7 | Shell | dominik-lekse/dotfiles | /zshrc.d/my_addons/aliases | UTF-8 | 2,740 | 2.75 | 3 | [] | no_license | # vim: set ft=zsh:
# theme
alias light="base16_solarized-light"
alias dark="base16_solarized-dark"
# replace htop with bottom
alias htop="btm -b"
# convinient way to print $PATH
alias path="echo $PATH | tr : '\n'"
# overwrite essential stuff
alias which="which -a"
alias ping="gping"
alias df="duf"
# exa
alias exa="exa --icons --group-directories-first -F --ignore-glob='**/.git|**/.DS_Store'"
alias ls="exa"
alias l="ls -lah -g --git"
alias lt="ls -lah -T -L=3 --git --git-ignore"
alias tree="exa -G -T --git-ignore"
# use alternative init rather than SpaceVim for the moment
alias vim="nvim -u ~/.config/my-quick-nvim/init.lua"
# once the transition is complete the line will change back to `alias vim="nvim"`
alias vi="vim"
alias vimdiff="nvim -dR"
alias diff="git diff --no-index"
alias less="less -r"
# commandify
alias touchreadme="touch README.md"
alias weather='curl wttr.in'
alias shebang='echo "#!/usr/bin/env sh"|pbcopy;echo "#!/usr/bin/env sh"'
# git
alias gs="git status"
alias tgs="tig status"
alias gd="git diff"
alias gdc="git diff --cached"
alias gl="git pull --rebase"
# viewer and filter
alias cat='bat'
alias k9s='TERM=xterm COLORTERM=256bit k9s'
alias rg="rg -p -i" # ripgrep
case "$(uname -s)" in
Darwin)
alias glow="~/dotfiles/bin/mac/glow.sh"
;;
*)
alias glow="glow -p" # glow
;;
esac
alias fzfp='fzf --height 100% --preview "bat --style=numbers --color=always {}"'
alias fzfpp='fzf|fpp'
# diff with bat
function bdiff () { diff -u $@ | bat;} # use `vimdiff` for editing
# shorthands
alias dc="docker-compose"
alias mp='multipass'
alias tf='terraform'
alias wttr='curl wttr.in'
# web search
alias googler="googler -n 3"
alias ddgr="ddgr -n 3"
# quickly jump via fsad - https://github.com/junegunn/fzf/wiki/examples#with-fasd-1
unalias j 2>/dev/null
j() {
[ $# -gt 0 ] && fasd_cd -d "$*" && return
local dir
dir="$(fasd -Rdl "$1" | fzf -1 -0 --no-sort +m)" && cd "${dir}" || return 1
}
# to measure zsh performance - https://blog.mattclemente.com/2020/06/26/oh-my-zsh-slow-to-load.html
timezsh() {
shell=${1-$SHELL}
for i in $(seq 1 10); do /usr/bin/time $shell -i -c exit; done
}
alias fulltimezsh="FORCE_LOAD_MY_ZSH_STUFF=1 timezsh"
## legacy
#
# alias ccat="ccat -G Keyword=\"darkgreen\" -G Plaintext=\"ellow\" -G Tag=\"fuchsia\" -G HTMLAttrName=\"darkteal\" -G Decimal=\"yellow\" -G Punctuation=\"lightgray\" --color=always"
#
# alias gmacs="open -a /Applications/Emacs.app"
# alias cmacs="/Applications/Emacs.app/Contents/MacOS/bin/emacsclient -c -n ."
#
# alias clj-repl='docker run -i -t pandeiro/lein repl'
#
# alias muxl="mux local"
# alias muxk="tmux kill-session -t"
# alias muxle="vi .tmuxinator.yml"
# alias muxs="mux start"
# alias muxe="mux open"
| true |
64fa9a09990ca0f55962bfeb2b3cf19f83925642 | Shell | joyoyoyoyoyo/docker-hadoop | /bin/entrypoint.sh | UTF-8 | 1,748 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eo pipefail
# Setup environment
source /opt/docker/libexec/hadoop-init.sh
check_namenode_dirs() {
for d in $(echo $1 | tr "," "\n"); do
if [ ! -d $d/current ]; then
return 1
fi
done
return 0
}
start_namenode() {
if ! check_namenode_dirs $HDFS_NAMENODE_DIRS; then
$HADOOP_HOME/bin/hdfs namenode -format dmx-hdfs
sleep 5
fi
$HADOOP_HOME/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
check_java
}
start_resourcemanager() {
$HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager
check_java
}
start_historyserver() {
$HADOOP_HOME/sbin/mr-jobhistory-daemon.sh --config $HADOOP_CONF_DIR start historyserver
check_java
}
start_slavenode() {
$HADOOP_HOME/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
$HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
check_java
}
start_datanode() {
$HADOOP_HOME/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
check_java
}
start_nodemanager() {
$HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager
check_java
}
main() {
case "$1" in
"namenode")
start_namenode
;;
"resourcemanager")
start_resourcemanager
;;
"historyserver")
start_historyserver
;;
"slavenode")
start_slavenode
;;
"datanode")
start_datanode
;;
"nodemanager")
start_nodemanager
;;
*)
exec $@
exit $?
;;
esac
}
main "$@"
| true |
e42dc17c7862708770bd41a2699a5506c42855c5 | Shell | ArtRozhe/self-checking | /docker/start.sh | UTF-8 | 439 | 2.84375 | 3 | [] | no_license | #!/usr/bin/env bash
C_IMAGE_NAME='self-checking-app';
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )";
docker build . -t "${C_IMAGE_NAME}" --build-arg http_proxy=${http_proxy} --build-arg https_proxy=${https_proxy}
docker run -it --network="bridge" \
-p3000:3000 \
--rm \
-u $( id -u ):$( id -g ) \
-e http_proxy=${http_proxy} \
-e https_proxy=${http_proxy} \
-v ${SCRIPT_DIR}/../:/app \
${C_IMAGE_NAME}
| true |
1af75c2b60f2101b525da76b7e6df68f3d51628e | Shell | toniPortillo/gIng | /proAdSis/bash2/comparacionCadenas.sh | UTF-8 | 1,401 | 3.21875 | 3 | [] | no_license | #!/bin/bash
read -p "Introduzca la cadena s1: " s1
read -p "Introduzca la cadena s2: " s2
if [ "$s1" == "$s2" ]
then
echo "Las cadenas s1 y s2 son iguales"
else
echo "Las cadenas s1 y s2 son distintas"
fi
if [ "$s1" != "$s2" ]
then
echo "Las cadenas s1 y s2 son distintas"
else
echo "Las cadenas s1 y s2 son iguales"
fi
if [ "$s1" ]
then
echo "La cadena s1 no esta vacia"
else
echo "La cadena s1 esta vacia"
fi
if [ "$s2" ]
then
echo "La cadena s2 no esta vacia"
else
echo "La cadena s2 esta vacia"
fi
if [ -n "$s1" ]
then
echo "La cadena s1 tiene longitud >0"
else
echo "La cadena s1 tiene longitud 0"
fi
if [ -n "$s2" ]
then
echo "La cadena s2 tiene longitud >0"
else
echo "La cadena s2 tiene longitud 0"
fi
if [ -z "$s1" ]
then
echo "La cadena s1 tiene longitud 0"
else
echo "La cadena s2 tiene longitud >0"
fi
if [ -z "$s1" ]
then
echo "La cadena s1 tiene longitud 0"
else
echo "La cadena s2 tiene longitud >0"
fi
if [ -z "$s1" ]
then
echo "La cadena s1 tiene longitud 0"
else
echo "La cadena s2 tiene longitud >0"
fi
if [ -z "$s1" ]
then
echo "La cadena s1 tiene longitud 0"
else
echo "La cadena s1 tiene longitud >0"
fi
if [ -z "$s2" ]
then
echo "La cadena s2 tiene longitud 0"
else
echo "La cadena s2 tiene longitud >0"
fi
if [[ "$s1" == "$s2"* ]]
then
echo "La cadena s1 empieza por s2"
else
echo "La cadena s1 no empieza por s2"
fi
| true |
608a8d3b1a65ec462be1a6958e073ee75e4eddb7 | Shell | TracyBallinger/GPSeq_git | /scripts/run_depmix.sh | UTF-8 | 1,599 | 3.0625 | 3 | [] | no_license | #!/bin/bash
#$ -N depmix
#$ -cwd
#$ -j y
#$ -l h_rt=5:00:00 # This takes about 20 minutes with the whole genome, and 3 states. It takes more than 2 hours for the whole genome with more than 5 states.
#$ -l h_vmem=10G
# Run this via:
# qsub -v GPSEQDATA=gps_data.txt -v PARAMS=params.txt -v NSTATES=nstates -v OUTPUT=out_prefix run_depmix.sh
# The files out_prefix_fmod.rdf and out_prefix_params.txt will be created.
# GPSeq_data.txt should have the following columns:
# <chr><start><end><ID><min1><min5><min10><min15><min30><on>
# params.txt should have the quantiles to break the umicounts into,
# the labels for the quantiles, and the number of states of the HMM.
# Ex: This is what should go into a param file that has 4 quantiles,
# one that is the bottom 75% labeled "L",
# one that is the top 25-10%, labeled "M",
# the top 10%, "H", and the top 1%, "VH"
# There are six states.
# param.txt
# ---------------------------------
# 0.75 0.90 0.99
# L M H VH
# 6
# ---------------------------------
unset MODULEPATH
. /etc/profile.d/modules.sh
module load igmm/apps/R/3.5.0
QUANTILES=`sed -n 1p $PARAMS | tr ' ' ':'`
QLABELS=`sed -n 2p $PARAMS | tr ' ' ':'`
# NSTATES=`sed -n 3p $PARAMS`
head -1 $PARAMS > $OUTPUT"_params.txt"
echo $NSTATES | cat $PARAMS - >> $OUTPUT"_params.txt"
scripts=/home/tballing/bioinfsvice/GPSeq/scripts
echo "Rscript $scripts/run_depmix.R $GPSEQDATA $OUTPUT $QUANTILES $NSTATES" >> $OUTPUT"_params.txt"
echo "Rscript $scripts/run_depmix.R $GPSEQDATA $OUTPUT $QUANTILES $NSTATES"
Rscript $scripts/run_depmix.R $GPSEQDATA $OUTPUT $QUANTILES $NSTATES
| true |
5d1f5708f5e0c80bb354e23ab1bb5180bffe46da | Shell | denil1111/JSint | /compare | UTF-8 | 117 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
if diff $1 $2 >/dev/null ; then
echo "\033[1;32m √ \033[0m"
else
echo "\033[1;31m Failed \033[0m"
fi | true |
e735c2a138e3bb2e755ae75260f7736d01dee83a | Shell | MVSE-Outreach/Coordination | /RPi-Setup/arduino/install_arduino_ide.sh | UTF-8 | 692 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
ARDUINO_PACKAGE_URL="https://www.dropbox.com/s/m4pp9stjhsrc0nf/arduino-1.6.5-arm_wheezy.tar.xz"
ARDUINO_BASE_INSTALL_PATH=/home/pi/arduino
ARDUINO_PACKAGE_NAME=arduino-1.6.5.xz
PACKAGES=wget xz-utils
sudo apt-get install $PACKAGES
cd /tmp
wget -c "$ARDUINO_PACKAGE_URL" -O "$ARDUINO_PACKAGE_NAME"
xz -d "$ARDUINO_PACKAGE_NAME"
mkdir -p "$ARDUINO_BASE_INSTALL_PATH"
if [ ! -d "${ARDUINO_BASE_INSTALL_PATH}/${ARDUINO_PACKAGE_NAME%%.xz}" ]; then
tar -xvf "${ARDUINO_PACKAGE_NAME%%.xz}" -C "$ARDUINO_BASE_INSTALL_PATH"
chmod +x "$ARDUINO_BASE_INSTALL_PATH/${ARDUINO_PACKAGE_NAME%%.xz}/install.sh"
fi
"$ARDUINO_BASE_INSTALL_PATH/${ARDUINO_PACKAGE_NAME%%.xz}/install.sh"
| true |
fc693165fe6487a3fe964ece9d907ae057885623 | Shell | capoony/popgentools | /users/martin/individual_data/identify_alleles.sh | UTF-8 | 5,633 | 3.359375 | 3 | [] | no_license | ########################################################### identification of alleles of individuals from indivdual sequencing ###############################
### The following pipeline describes my strategy to extract the sire allele of F1 hybrids between an male of a population of interest an a female of the isogenic strain mel36. The input has to be a sync file with the reference strain mel36 as first population followed by the iF1 indivduals. The script extract_consensus.py is used to create an output, which loosly resembles a pileup file and contains the alleles of all individuals. The procedure is rather complex and I would recommend reading the extensive help of the script before the first usage.
## E.g. you have a sync file with the reference strain and 11 individuals sequenced and you want to extract the sire alleles, your command line should look like this:
python /popgentools/users/martin/individual_data/extract_consensus.py --input individuals.sync --min-coverage 20 --max-coverage 0.05 --min-count 20 --CI-mode individual --output output_file
## the outputfile output_file.af can for example be used to visualize the distribution of the allele frequencies in the indivduals (which should cluster around 0.5) by plotting histograms in R like this:
echo '''
data=read.table("output_file.af")
par(mfrow=c(2,6))
for (i in 2:length(data)){
hist(data[,i])
}
''' > output_hist.r
Rscript output_hist.r
## the outputfile: output_file.consensus can now be used for further downstream analyses:
############################################################################### FST and pi ####################################################################
## For example, I wrote a script very specifically for my inversion project, but it might be also useful for some of you. The script FST4inversions.py calculates FST and pi for different combination of individuals. In my case, I knew for each indivdual, which inversion it was carrying. Therefore, I was able to group them according to the inversion. E.g. from the 11 indivduals 4 are carrying inversion In(2L)t. Therefore, I can calculate pi for this group and for the other 7, which are not carrying the inversion. Then, I can calculate FST between these two groups similarily to PoPoolation2. For some SNPs allelic information is not available for all individuals. Therefore, you have to define the minimum number individuals for which allelic information has to be available to perform the calculations (e.g. 3). See the help in the script for further information. In our example the command line would look like this:
python /popgentools/users/martin/individual_data/FST4inversions.py --input output_file.consensus --min-count 3 --in2lt 2,3,6,10 --all 0,1,2,3,4,6,7,8,10 --names 52,53,80,100,117,136,150,168,106 --output output
## I wrote another script to bin the values of the former analysis in non-overlapping windows for a better visual representation. This script can be used either on the FST or the pi output. You will need the header of a SAM file (usually called inh.sam) to provide total length of the chromosomal arms. Again, this script is very specific, but perhaps helpful for somebody....
python /popgentools/users/martin/individual_data/binning.py --input output_fstpi.fst --length inh.sam --data fst --window-size 10000 --output output_10k_fst
## and
python /popgentools/users/martin/individual_data/binning.py --input output_fstpi.pi --length inh.sam --data pi --window-size 10000 --output output_10k_pi
## now this data can be for example visualized using R, assuming that the FST value for In(3R)Mo is in column 3 in the output_10k_fst_values.txt and the pi values for In(3R)Mo are in column 3 and for all other indivduals in column 4 in output_10k_pi_values.txt:
echo '''
data=read.table("output_10k_fst_values.txt")
data1=read.table("output_10k_pi_values.txt")
pdf("output_10k_3R.pdf",width=15,height=10)
par(cex=2)
plot(data[data$V1=="3R",2],data[data$V1=="3R",6],type="l",main="3R",ylim=c(0,1),xlim=c(0,30000000),xlab="distance",ylab="Pi/FST",lwd=2)
points(data1[data1$V1=="3R",2],data1[data1$V1=="3R",7],type="l",col="red")
points(data1[data1$V1=="3R",2],data1[data1$V1=="3R",9],type="l",col="blue")
rect(15922589,0,28234649,1,col="transparent",border="black",lwd=3,lty=3)
rect(17058538,0,24780914,1,col="transparent",border="black",lwd=3,lty=1)
rect(12401910,0,20580518,1,col="transparent",border="black",lwd=3,lty=4)
dev.off()
''' > output_10k_3R.r
Rscript output_10k_3R.r
## here pi for In(3R)Mo is red, for all other indviduals blue, FST is black and the three inversions on 3R are black boxes with different line types
######################################################################### LD ##########################################################################################
## finally there is a script to calculate pairwise r^2 (as a measurement of LD) for a set of SNPs along a chromsomal arm. This script named LD_heatmap.py produces a tabular output of the distance matrix and PNG file containing a Heatmap of all pairwise comparisons with r^2 highlighted in color. The LDheatmap R Package needs to be installed. It can be found here: http://cran.r-project.org/web/packages/LDheatmap/index.html.see the help within the script for more details. Lets assume we want to test the LD on 2L for 4 indivduals carrying In(2L)t using 500 SNPs randomly picked along the chromosome. Then your commandline should look like this:
python /popgentools/users/martin/individual_data/LD_heatmap.py --input output_file.consensus --ind 2,3,6,10 --subsample 500 --chromosome 2L --output output_2L
| true |
1a7db911219265d948c5f80c97ee080bf749ac35 | Shell | runhyve/vm-webhooks | /vm-status.sh | UTF-8 | 428 | 3.78125 | 4 | [
"BSD-2-Clause"
] | permissive | #!/usr/local/bin/bash
. commons.sh
trap error ERR
if [ -z "$1" ]; then
echo "Usage: $0 <name>" > /dev/stderr
echo "Example: $0 FreeBSD-VM" > /dev/stderr
exit 2
fi
name="$1"
if ! check_vm "$name"; then
report_error "Virtual machine ${name} doesn't exist"
fi
status="$(get_vm_status "$name")"
if [ $? -ne 0 ]; then
report_error "Error getting status of vm ${name}."
else
report_success "$(jo state="$status")"
fi
| true |
515d566a98253c1deb44102ea22d229932a08a57 | Shell | nordugrid/arc | /src/tests/lrms/command-simulator.sh | UTF-8 | 2,834 | 4.15625 | 4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Script to simulate a given command. Read 'instructions' from outcome file, and
# acts accordingly. Each time this script is run, the outcome file is checked
# for content. If no content exist, script outputs ${SUCCESS_OUTPUT} and exits
# with code 0. Otherwise the first line from the outcome file is read,
# 'eval'-ed and then removed from the file.
#
if test ! -f ${SIMULATOR_OUTCOME_FILE}; then
SIMULATOR_OUTCOME_FILE="simulator-outcome.dat"
fi
if test -z ${SIMULATOR_ERRORS_FILE}; then
SIMULATOR_ERRORS_FILE="simulator-errors.dat"
fi
cmd="$(basename ${0})"
cmd_n_args="${cmd} ${*}"
if test -f "${SIMULATOR_OUTCOME_FILE}"; then
# Extract rargs from outcome file in order to be able to do reqular expresssion matching.
# If rargs matches cmd_n_args, use rargs as key at lookup in outcom file.
while read rargs; do
echo "${cmd_n_args}" | sed -n -e "${rargs} q 100"
if test ${?} == 100 && test "x${rargs}" != "x"; then
# Use rargs as key.
cmd_n_args="${rargs}"
break
fi
done <<< "$(sed -n -e "/^[[:space:]]*rargs=\"[^\"]*\"/ s/^[[:space:]]*rargs=\"\([^\"]*\)\".*/\1/ p" "${SIMULATOR_OUTCOME_FILE}")"
# Do not pipe output into while loop, because it creates a subshell, and then setting cmd_n_args has no effect.
# Escape special characters so they are not interpretted by sed at lookup.
cmd_n_args="$(echo "${cmd_n_args}" | sed -e 's/[][\/$*.^|]/\\&/g' -e 's/[(){}]/\\\\&/g')"
# Lookup cmd_n_args in outcome file, and return corresponding options.
outcome="$(sed -n -e '
/^[[:space:]]*r\?args="'"${cmd_n_args}"'"/ {
:ARGS n
/^[[:space:]]*\(#\|$\)/ b ARGS
/^[[:space:]]*\(sleep\|rc\)=/ {p; b ARGS}
/^[[:space:]]*output=<<<\([[:alnum:]]\+\)/ {
:OUTPUT N
/output=<<<\([[:alnum:]]\+\)\n.*\n\1$/ ! b OUTPUT
s/output=<<<\([[:alnum:]]\+\)\(\n.*\n\)\1/read -r -d "" output <<"\1"\2\1/ p
q 100
}
/^[[:space:]]*output="[^\"]*"/ {p; q 100}
q 50
}' ${SIMULATOR_OUTCOME_FILE})"
sed_rc=${?}
if test ${sed_rc} == 50; then
printf "Syntax error in simulator outcome file ('%s') options for arguments '%s'\n" "${SIMULATOR_OUTCOME_FILE}" "${cmd_n_args}" >> ${SIMULATOR_ERRORS_FILE}
exit 1
fi
if test "${cmd}" == "sleep" && test ${sed_rc} != 100; then
# Do not sleep - only if instructed to in SIMULATOR_OUTCOME_FILE
exit 0
fi
if test ${sed_rc} != 100; then
echo "Command '${cmd} ${@}' was not expected to be executed." >> ${SIMULATOR_ERRORS_FILE}
exit 1
fi
eval "${outcome}"
if test ! -z "${output+yes}"; then
echo "${output}"
else
echo "${SUCCESS_OUTPUT}"
fi
if test ! -z "${sleep+yes}"; then
/bin/sleep ${sleep}
fi
if test ! -z "${rc+yes}"; then
exit ${rc}
else
exit 0
fi
else
echo "${SUCCESS_OUTPUT}"
exit 0
fi
| true |
afe17f31045ff76f11f07543bfb7988c55cefbcf | Shell | sassoftware/iot-esp-and-omnio-integration | /scripts/startup.sh | UTF-8 | 1,097 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#Copyright © 2019, SAS Institute Inc., Cary, NC, USA. All Rights Reserved.
#SPDX-License-Identifier: Apache-2.0
#Docker container startup
id=$(docker ps -all -q --filter name=solarfarm-demo)
if [[ -z $id ]]
then
echo "###Starting the ESP docker container###"
docker run -d --net=host --name=solarfarm-demo -v ~/SV:/db docker.sas.com/razsha/solarfarm_demo_master
echo "Starting the IoTtool Box dashboard. Access the dashboard at iotshelf02:8090"
pkill -f "java -jar ./toolbox-0.1.3.jar"
cd ~/IoT_Toolbox
./IoT_Toolbox.sh > toolbox.out 2>&1 &
else
echo "###Please wait for the container cleanup###"
docker stop $(docker ps --all -q --filter name=solarfarm-demo)
docker rm $(docker ps --all -q --filter name=solarfarm-demo)
echo "###Starting the ESP docker container####"
docker run -d --net=host --name=solarfarm-demo -v ~/SV:/db docker.sas.com/razsha/solarfarm_demo_master
echo "Starting the IoTtool Box dashboard. Access the dashboard at iotshelf02:8090"
pkill -f "java -jar ./toolbox-0.1.3.jar"
cd ~/IoT_Toolbox
./IoT_Toolbox.sh > toolbox.out 2>&1 &
fi
| true |
e584aacc745cc8bd8b0649451d3f6d5a105a882a | Shell | Team5553-RoboLyon/LyonVision-pi-gen | /stage3/02-add-vision-service/00-run.sh | UTF-8 | 283 | 2.90625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash -e
# Install the service so the system can run it
install -m 644 files/vision.service "${ROOTFS_DIR}/etc/systemd/system"
on_chroot << EOF
# Reload the system services
sudo systemctl daemon-reload
# Enable our vision service on startup
sudo systemctl enable vision
EOF
| true |
6997d3161735b4da50b5dfa375f4f064d98f2902 | Shell | kazubu/ezztp | /setup.sh | UTF-8 | 2,377 | 3.8125 | 4 | [] | no_license | #!/bin/bash
check_linux(){
echo -n "Checking OS : "
if [ `uname` != 'Linux' ]; then
echo 'This script could be execute under Linux Environment.'
exit 1
fi
echo "Running with `uname`"
}
check_bash(){
echo -n "Checking Shell : "
if [ `readlink /proc/$$/exe` = '/bin/bash' ]; then
echo 'Running with bash.'
return 0
fi
echo 'This script reqires running with bash.'
exit 2
}
check_root(){
echo -n "Checking User : "
if [ ${EUID:-${UID}} -ne 0 ]; then
echo 'This script requires root privilege.'
exit 3
fi
echo "This script running as root."
}
check_exist_file(){
fname=$1
if [ -e $fname ];then
echo "${fname} is exist."
return 0
fi
echo "${fname} is not exist."
exit 4
}
set -e
cd `dirname $0`
check_linux
check_bash
check_root
echo "Checking required files... : "
check_exist_file ./sysconf/nginx/default
check_exist_file ./sysconf/init.d/ezztp-ftp
check_exist_file ./sysconf/init.d/ezztp-dhcpd
echo "OK."
aptitude update
aptitude safe-upgrade -Ry
# Ruby/Nginx/Passenger Configuration
aptitude install -Ry curl build-essential python-software-properties ruby-dev libsqlite3-dev
grep passenger /etc/apt/sources.list.d/passenger.list || gpg --keyserver keyserver.ubuntu.com --recv-keys 561F9B9CAC40B2F7
grep passenger /etc/apt/sources.list.d/passenger.list || gpg --armor --export 561F9B9CAC40B2F7 | sudo apt-key add -
aptitude install -Ry apt-transport-https
grep passenger /etc/apt/sources.list.d/passenger.list || echo 'deb https://oss-binaries.phusionpassenger.com/apt/passenger trusty main' >> /etc/apt/sources.list.d/passenger.list
chown root: /etc/apt/sources.list.d/passenger.list
chmod 600 /etc/apt/sources.list.d/passenger.list
aptitude update
aptitude install -Ry nginx-full passenger
sed -i "s/# passenger_root/passenger_root/g" /etc/nginx/nginx.conf
sed -i "s/# passenger_ruby/passenger_ruby/g" /etc/nginx/nginx.conf
cp ./sysconf/nginx/default /etc/nginx/sites-available/default
grep ezztp /etc/passwd || useradd -m -s /sbin/nologin ezztp
rm -rf /home/ezztp/ezztp
cp -rf ./webapp/ezztp /home/ezztp/
pushd /home/ezztp/ezztp
gem install bundler
bundle install
chown -R ezztp:ezztp /home/ezztp/ezztp
popd
/etc/init.d/nginx stop
/etc/init.d/nginx start
gem install --no-ri --no-rdoc ftpd rest-client
cp ./sysconf/init.d/ezztp-ftp /etc/init.d/ezztp-ftp
cp ./sysconf/init.d/ezztp-dhcpd /etc/init.d/ezztp-dhcpd
| true |
1217daa9e8c027c529a12920eaac014e9f793379 | Shell | thohal/openqrm | /trunk/src/plugins/solx86/client/etc/init.d/openqrm-client.solx86 | UTF-8 | 4,648 | 3.140625 | 3 | [] | no_license | #!/bin/bash
# This script starts/stops the remote-exec and monitoring subsystem for openQRM
#
# This file is part of openQRM.
#
# openQRM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation.
#
# openQRM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with openQRM. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2009, Matthias Rechenburg <matt@openqrm.com>
#
OPENQRM_RESOURCE_PARAMETER_FILE="/var/openqrm/openqrm-resource.conf"
export LANGUAGE=C
export LANG=C
export LC_ALL=C
if [ ! -f $OPENQRM_RESOURCE_PARAMETER_FILE ]; then
echo "ERROR: Cloud not find the openQRM configuration file $OPENQRM_RESOURCE_PARAMETER_FILE"
echo "ERROR: Is this system already integrated with openQRM ?"
exit 1
fi
. $OPENQRM_RESOURCE_PARAMETER_FILE
# currently static list of plugin with boot-services for solaris/opensolaris systems
OPENQRM_PLUGINS_FOR_SOLARIS="zfs-storage"
# define wget to use with https
if [ "$openqrm_web_protocol" == "https" ]; then
WGET_NO_CERT_CHECK="--no-check-certificate"
fi
case "$1" in
'start')
echo "Starting the openQRM-client"
# generate a new key
mkdir -p $resource_basedir/openqrm/etc/dropbear/
rm -f $resource_basedir/openqrm/etc/dropbear/dropbear_rsa_host_key
$resource_basedir/openqrm/bin/dropbearkey -t rsa -f $resource_basedir/openqrm/etc/dropbear/dropbear_rsa_host_key
# get the public key from the openQRM server
if ! wget -q $WGET_NO_CERT_CHECK $openqrm_web_protocol://$resource_openqrmserver/openqrm/boot-service/openqrm-server-public-rsa-key; then
echo "ERROR: Could not get the public key of the openQRM-server at $resource_openqrmserver ! Please check the certificates !"
exit 1
fi
if [ ! -d /root/.ssh ]; then
mkdir -p /root/.ssh
chmod 700 /root/.ssh
fi
mv -f openqrm-server-public-rsa-key /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
# start dropbear server
$resource_basedir/openqrm/sbin/dropbear -p $resource_execdport -r $resource_basedir/openqrm/etc/dropbear/dropbear_rsa_host_key
# start openqrm-monitord
$resource_basedir/openqrm/sbin/openqrm-monitord.solx86 &
# start the Solaris/openSolaris plugins
for SOL_PLUGIN in $OPENQRM_PLUGINS_FOR_SOLARIS; do
if echo $openqrm_plugins | grep $SOL_PLUGIN 1>/dev/null; then
echo "NOTICE: Getting boot-service for plugin $SOL_PLUGIN"
mkdir -p $resource_basedir/openqrm/plugins/$SOL_PLUGIN
cd $resource_basedir/openqrm/plugins/$SOL_PLUGIN
wget -q $WGET_NO_CERT_CHECK $openqrm_web_protocol://$resource_openqrmserver/openqrm/boot-service/openqrm-$SOL_PLUGIN.solx86.tgz
gunzip openqrm-$SOL_PLUGIN.solx86.tgz
tar -xf openqrm-$SOL_PLUGIN.solx86.tar
rm -f openqrm-$SOL_PLUGIN.solx86.tar
cd - 1>/dev/null
# check for init script to start
if [ -x $resource_basedir/openqrm/plugins/$SOL_PLUGIN/etc/init.d/$SOL_PLUGIN ]; then
$resource_basedir/openqrm/plugins/$SOL_PLUGIN/etc/init.d/$SOL_PLUGIN start
fi
fi
done
;;
'stop')
echo "Stopping the openQRM-client"
# stop the Solaris/openSolaris plugins
for SOL_PLUGIN in $OPENQRM_PLUGINS_FOR_SOLARIS; do
if echo $openqrm_plugins | grep $SOL_PLUGIN 1>/dev/null; then
# check for init script to stop
if [ -x $resource_basedir/openqrm/plugins/$SOL_PLUGIN/etc/init.d/$SOL_PLUGIN ]; then
$resource_basedir/openqrm/plugins/$SOL_PLUGIN/etc/init.d/$SOL_PLUGIN stop
fi
fi
done
DBPID=`ps -ef | grep dropbear | grep $resource_execdport | grep -v grep | awk {' print $2 '}`
if [ "$DBPID" != "" ]; then
kill $DBPID
fi
killall openqrm-monitord.solx86 1>/dev/null 2>&1
# in case we do not have killall
for OPENQRM_PID in `ps -ef 2>/dev/null | grep openqrm-monitord.solx86 | grep -v grep | awk {' print $2 '}`; do
kill $OPENQRM_PID
done
;;
*)
echo "Usage: $0 { start | stop }"
exit 1
;;
esac
| true |
25c469eb0c8cd666822cc12052d26b02eda09e0a | Shell | williampratt/splunk-installers | /splunk_clean_install.sh | UTF-8 | 1,591 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env bash
WGET_URL="https://d7wz6hmoaavd0.cloudfront.net/products/splunk/releases/8.2.2/linux/splunk-8.2.2-87344edfcdb4-Linux-x86_64.tgz"
FILENAME="splunk-8.2.2-87344edfcdb4-Linux-x86_64.tgz"
DL_PATH="/tmp/$FILENAME"
INSTALL_DIR="/opt"
SPLUNK_HOME="$INSTALL_DIR/splunk"
SPLUNK_USER="splunk"
SYSTEMD_MANAGED=0
BOOT_START="$SPLUNK_HOME/bin/splunk enable boot-start -systemd-managed $SYSTEMD_MANAGED -user $SPLUNK_USER"
if ! id $SPLUNK_USER &> /dev/null; then
echo "A user '$SPLUNK_USER' needs to exist on the system!!!"
return 0
fi
# In case splunk is already installed and running, shut it down before removing it (if it's installed and not running nbd)
if [[ -e $SPLUNK_HOME ]]; then
echo "Stopping Splunk..."
$SPLUNK_HOME/bin/splunk stop
fi
# Remove everything in and including SPLUNK_HOME if it exists
if [[ -e $SPLUNK_HOME ]]; then
echo "Removing existing $SPLUNK_HOME directory..."
rm -rf $SPLUNK_HOME
fi
# Download splunk and extract it into SPLUNK_HOME then delete the install file
echo "Installing Splunk..."
wget -O $DL_PATH $WGET_URL
tar -zxf $DL_PATH -C $INSTALL_DIR
chown -R $SPLUNK_USER:$SPLUNK_USER $SPLUNK_HOME
rm -rf $DL_PATH
# start splunk and accept the license. It will prompt you for the admin name and password
echo "Starting Splunk..."
su $SPLUNK_USER -c "$SPLUNK_HOME/bin/splunk start --accept-license --answer-yes"
echo "Stopping Splunk To Enable Boot Start..."
$SPLUNK_HOME/bin/splunk stop
echo "Setting enable boot-start..."
$BOOT_START
echo "Starting Splunk..."
su $SPLUNK_USER -c "$SPLUNK_HOME/bin/splunk start"
echo "All Done!!!"
| true |
159ceb0d1872141c264b40c26f38e3d301c52640 | Shell | refaktor/tkdis-bash | /sum-amounts | UTF-8 | 437 | 2.859375 | 3 | [] | no_license | #!/bin/bash
LIM=$1
./sort-by-company | awk -F"\t" -v limit=$LIM -v OFS='\t' '
BEGIN { sum = 0; count = 0; name= ""; sumall=0; countall=0 }
{ if (name != $7 && countall > 0) { print "\t", count, sum, limit == "ext" ? "" : name ; if (limit == "ext"){print ""}; name = $7; sum = 0; count = 0; } sumall += $4; sum += $4 ; count += 1; countall += 1; if (limit == "ext") { print }; }
END { print "\tALL:", countall, sumall }
' | true |
9ab41cc6f60783c607ac158b5a91fd5b05e85cdc | Shell | yktakaha4/Open-usp-Tukubai | /TEST/rank.test | UTF-8 | 4,094 | 4.1875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#!/usr/local/bin/bash -xv # コマンド処理系の変更例
#
# test script of rank
#
# usage: [<test-path>/]rank.test [<command-path> [<python-version>]]
#
# <test-path>は
# 「現ディレクトリーからみた」本スクリプトの相対パス
# または本スクリプトの完全パス
# 省略時は現ディレクトリーを仮定する
# <command-path>は
# 「本スクリプトのディレクトリーからみた」test対象コマンドの相対パス
# またはtest対象コマンドの完全パス
# 省略時は本スクリプトと同じディレクトリーを仮定する
# 値があるときまたは空値("")で省略を示したときはあとにつづく<python-version>を指定できる
# <python-version>は
# 使用するpython処理系のversion(minor versionまで指定可)を指定する
# (例 python2 python2.6 phthon3 python3.4など)
# 単にpythonとしたときは現実行環境下でのdefault versionのpythonを使用する
# 文字列"python"は大文字/小文字の区別をしない
# 省略時はpythonを仮定する
name=rank # test対象コマンドの名前
testpath=$(dirname $0) # 本スクリプト実行コマンドの先頭部($0)から本スクリプトのディレトリー名をとりだす
cd $testpath # 本スクリプトのあるディレクトリーへ移動
if test "$2" = ""; # <python-version>($2)がなければ
then pythonversion="python" # default versionのpythonとする
else pythonversion="$2" # <python-version>($2)があれば指定versionのpythonとする
fi
if test "$1" = ""; # <command-path>($1)がなければ
then commandpath="." # test対象コマンドは現ディレクトリーにある
else commandpath="$1" # <command-path>($1)があればtest対象コマンドは指定のディレクトリーにある
fi
com="${pythonversion} ${commandpath}/${name}" # python処理系によるtest対象コマンド実行の先頭部
tmp=/tmp/$$
ERROR_CHECK(){
[ "$(echo ${PIPESTATUS[@]} | tr -d ' 0')" = "" ] && return
echo $1
echo "${pythonversion} ${name}" NG
rm -f $tmp-*
exit 1
}
###########################################
#TEST1
# 引数を何も指定しないときは、単純に行番号を追加する
cat << FIN > $tmp-in
JPN 杉山______ 26
JPN 崎村______ 27
JPN 梶川______ 27
JPN 柳本______ 30
USA BOB_______ 25
USA GEROGE____ 29
USA SAM_______ 29
USA TOM_______ 35
FIN
cat << FIN > $tmp-out
1 JPN 杉山______ 26
2 JPN 崎村______ 27
3 JPN 梶川______ 27
4 JPN 柳本______ 30
5 USA BOB_______ 25
6 USA GEROGE____ 29
7 USA SAM_______ 29
8 USA TOM_______ 35
FIN
${com} $tmp-in > $tmp-ans
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST1 error"
###########################################
#TEST2
# ref指定
cat << FIN > $tmp-out
1 JPN 杉山______ 26
2 JPN 崎村______ 27
3 JPN 梶川______ 27
4 JPN 柳本______ 30
1 USA BOB_______ 25
2 USA GEROGE____ 29
3 USA SAM_______ 29
4 USA TOM_______ 35
FIN
${com} ref=1 $tmp-in > $tmp-ans
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST2 error"
###########################################
#TEST3
# key指定
cat << FIN > $tmp-out
1 JPN 杉山______ 26
2 JPN 崎村______ 27
2 JPN 梶川______ 27
4 JPN 柳本______ 30
5 USA BOB_______ 25
6 USA GEROGE____ 29
6 USA SAM_______ 29
8 USA TOM_______ 35
FIN
${com} key=3 $tmp-in > $tmp-ans
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST3 error"
###########################################
#TEST4
# refとkey指定
cat << FIN > $tmp-out
1 JPN 杉山______ 26
2 JPN 崎村______ 27
2 JPN 梶川______ 27
4 JPN 柳本______ 30
1 USA BOB_______ 25
2 USA GEROGE____ 29
2 USA SAM_______ 29
4 USA TOM_______ 35
FIN
${com} ref=1 key=3 $tmp-in > $tmp-ans
diff $tmp-ans $tmp-out
[ $? -eq 0 ] ; ERROR_CHECK "TEST4 error"
rm -f $tmp-*
echo "${pythonversion} ${name}" OK
exit 0
| true |
31736f40020b210f306c948d019f1a9a76b1a540 | Shell | kozie/eindbaasje.nl | /docs/vim.sh | UTF-8 | 551 | 2.984375 | 3 | [] | no_license | #!/bin/bash
# Execute with..
# $ bash -c "$(curl -fLsS http://eindbaasje.nl/vim.sh)"
# Check if not root
if [[ $EUID -eq 0 ]]; then
echo "Please do not run as root!"
exit 1
fi
# Download vimrc
curl -fLo ~/.vimrc http://eindbaasje.nl/vimrc
# Download Plug
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# Add FZF_DEFAULT_COMMAND using find to bashrc
echo "export FZF_DEFAULT_COMMAND='find * -type f'" >> ~/.bashrc
# Run first setup
vim +PlugInstall +qall
| true |
354d119ac09cc11395f764f18ceb2e225ee8f8a8 | Shell | plone/buildout.jenkins | /templates/code-analysis.sh | UTF-8 | 1,038 | 3.765625 | 4 | [] | no_license | #!/bin/sh
START=$(date +%s)
# Headline
echo
echo ${title}
echo "--------------------------------------------------------------------------------"
# Remove old logs
if [ -d ${buildout:jenkins-directory}/${log} ]; then
rm -rf ${buildout:jenkins-directory}/${log}
fi
if [ -s ${buildout:jenkins-directory}/${log} ]; then
rm ${buildout:jenkins-directory}/${log}
fi
if [ -s ${buildout:jenkins-directory}/${log}.tmp ]; then
rm ${buildout:jenkins-directory}/${log}.tmp
fi
command -v ${bin} >/dev/null 2>&1 || {
echo >&2 "${bin} not found!";
echo "Skip ${bin} code analysis. Please make sure ${bin} is installed on your machine."
exit 1
}
${before}
# Analyse packages
PACKAGES="${buildout:jenkins-test-directories}"
for pkg in $PACKAGES
do
echo -n "Analyse $pkg "
${analyse}
echo "...done"
done
${after}
END=$(date +%s)
echo "--------------------------------------------------------------------------------"
echo "Duration: $((END - START)) seconds."
echo "=> ${buildout:jenkins-directory}/${log}"
| true |
a7b9f126d34a705b585b3b761be448619c71aebb | Shell | MonsterMannen/dotfiles | /.bashrc | UTF-8 | 1,297 | 2.765625 | 3 | [] | no_license | #
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return # wtf is this?
# titlebar text
PROMPT_COMMAND='echo -ne "\033]0;terminal\007"'
# Aliases
alias ls='ls --color'
alias grep='grep --color=auto'
alias yt-dl='youtube-dl --extract-audio --audio-format mp3 --restrict-filenames -o "~/Music/%(title)s.%(ext)s" '
alias rm='timeout 3 rm -Iv --one-file-system'
alias reflector-update='reflector --latest 50 --protocol http --protocol https --sort rate --save /etc/pacman.d/mirrorlist'
alias sudo='sudo ' # sudo alias to make sudo work with other aliases
alias please='sudo '
alias fuck='sudo $(history -p !!)'
alias vi='vim'
# PS1 (dir $) # \[ \] around colors to fix wrap bug
PS1='\W\[\e[0;95m\] $\[\e[0m\] '
# colored output in 'man' by using 'less' (some weird color hack)
man() {
LESS_TERMCAP_md=$'\e[01;31m' \
LESS_TERMCAP_me=$'\e[0m' \
LESS_TERMCAP_se=$'\e[0m' \
LESS_TERMCAP_so=$'\e[01;44;33m' \
LESS_TERMCAP_ue=$'\e[0m' \
LESS_TERMCAP_us=$'\e[01;32m' \
command man "$@"
}
LS_COLORS='di=1;34:fi=0:ln=1;36:or=31:mi=0:ex=92;1'
export LS_COLORS
# Add script directory to path
PATH=$PATH:/home/viktor/scripts
# temp adding this here
alias chrome='chromium --disable-gpu-driver-bug-workarounds --use-gl=osmesa &>/dev/null &'
| true |
f68978ce2b66648265479ae1c0b792275b1a964b | Shell | andreweacott/dotfiles | /stow/zsh/.zshrc | UTF-8 | 3,357 | 2.78125 | 3 | [] | no_license | # Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block, everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
#Note - this alias must be before 'antigen init' so that solarised ls colors work correctly on OSX
alias dircolors='gdircolors'
source /opt/homebrew/share/antigen/antigen.zsh
antigen init $HOME/.antigenrc
ZSH_CUSTOM=$HOME/oh-my-zsh-custom-scripts
# Uncomment the following line to use case-sensitive completion.
CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
gitfast
git-extras
aws
common-aliases
dirhistory
docker
copypath
gnu-utils
history-substring-search
kubectl
)
#Update automatically every 13 days, without asking
zstyle ':omz:update' mode auto
zstyle ':omz:update' frequency 13
source $ZSH/oh-my-zsh.sh
source /opt/homebrew/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
# User configuration
export LS_COLORS="$(vivid generate molokai)"
alias ls="ls -G --color=auto"
bindkey "^[^[[D" backward-word
bindkey "^[^[[C" forward-word
unsetopt SHARE_HISTORY
autoload -U +X bashcompinit && bashcompinit
typeset -g POWERLEVEL9K_INSTANT_PROMPT=quiet
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
export PATH="/usr/local/bin:$PATH"
export LDFLAGS="-L/usr/local/opt/openssl@3/lib -L/usr/local/opt/zlib/lib -L/usr/local/opt/bzip2/lib"
export CPPFLAGS="-I/usr/local/opt/openssl@3/include -I/usr/local/opt/zlib/include -I/usr/local/opt/bzip2/include"
[ -s "$HOME/.jabba/jabba_profile.sh" ] && source "$HOME/.jabba/jabba_profile.sh"
alias k="kubectl"
alias wd="cd $HOME/git"
ssh-add ~/.ssh/id_rsa
export KUBECONFIG=~/.kube/config
if [[ -s "$HOME/.private_environment" ]]; then
source "$HOME/.private_environment"
else
# Add any private environment variables (e.g. secret tokens) to the file to be imported into shell
echo "No private environment file found (~/.private_environment)"
fi
| true |
3167120bc1cfbc2db31e4ebcd3dc903c93bc4ba3 | Shell | StefanoBelli/my_config_files | /scripts/getblockdev.sh | UTF-8 | 1,587 | 3.828125 | 4 | [] | no_license | #!/bin/sh
LEN_ARG="$#"
BLOCK_DEV=""
BLOCK_DEV_SYS_DIR="/sys/block"
DIALOG_PATH="/usr/bin/dialog"
DIALOG_TITLE="BlockDeviceInfo"
DIALOG_MSG="Device: "
DIALOG_SIZE="10 35"
BLKNAME=""
READONLY=""
REMOVABLE=""
SUBPART=""
get()
{
cd $BLOCK_DEV_SYS_DIR
if [ -d $BLKNAME ];
then
if [ $(cat $BLOCK_DEV_SYS_DIR/$BLKNAME/ro) -eq 0 ];
then
READONLY="no"
else
READONLY="yes"
fi
if [ $(cat $BLOCK_DEV_SYS_DIR/$BLKNAME/removable) -eq 0 ];
then
REMOVABLE="no"
else
REMOVABLE="yes"
fi
if find $BLOCK_DEV_SYS_DIR/$BLKNAME -name $BLKNAME >>/dev/null;
then
SUBPART="yes"
else
SUBPART="no"
fi
DIALOG_MSG="
Device: $BLOCK_DEV ; $BLKNAME
Size: $(cat $BLOCK_DEV_SYS_DIR/$BLKNAME/size) Bytes
Read-only: $READONLY
Removable: $REMOVABLE
Sub-partitions: $SUBPART
"
$DIALOG_PATH --title $DIALOG_TITLE --msgbox "$DIALOG_MSG" $DIALOG_SIZE
else
DIALOG_MSG="The device $BLKNAME does not exist!"
DIALOG_SIZE="7 25"
$DIALOG_PATH --title $DIALOG_TITLE --msgbox "$DIALOG_MSG" $DIALOG_SIZE
exit 3
fi
}
if [ -f $DIALOG_PATH ];
then
echo ""
else
printf "\033[31m*\033[0m Dialog is not installed!\n"
exit 1
fi
if [ $LEN_ARG -eq 1 ];
then
BLOCK_DEV="$1"
BLKNAME=$(echo $BLOCK_DEV 2>/dev/null | sed "s:/dev/::")
DIALOG_TITLE="BlockDevice:$BLOCK_DEV"
DIALOG_MSG="Device: $BLOCK_DEV"
get
cd
else
printf "\033[31m*\033[0mUsage: <$0> [/dev/xxx]\n"
exit 2
fi
| true |
b541b98525bc51e32b34726d4f70a54e319d193b | Shell | antomarsi/dotfiles | /install.sh | UTF-8 | 1,343 | 3.0625 | 3 | [] | no_license | #!/bin/bash
function install() {
SUDO=$(which sudo 2>/dev/null)
# don't execute where it doesn't have (running from a container)
if [ -z $SUDO ] ; then
SUDO=""
fi
$SUDO dnf install -y \
vim \
tmux \
git \
--best
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
curl -fsSL get.docker.com -o get-docker.sh
sh get-docker.sh
$SUDO rm get-docker.sh
gsettings set org.gnome.desktop.wm.preferences button-layout 'close,maximize,minimize:'
pip install --user powerline-status
wget https://github.com/powerline/powerline/raw/develop/font/PowerlineSymbols.otf
wget https://github.com/powerline/powerline/raw/develop/font/10-powerline-symbols.conf
mkdir -p ~/.fonts/ && mv PowerlineSymbols.otf ~/.fonts/
mkdir -p ~/.config/fontconfig/conf.d/ && mv 10-powerline-symbols.conf ~/.config/fontconfig/conf.d/
$SUDO fc-cache -vf ~/.fonts/
cp vim/.vimrc ~/.vimrc
cp bash/.bashrc ~/.bashrc
mkdir -p ~/.config/powerline/themes/tmux
cp tmux/default.json ~/.config/powerline/themes/tmux/default.json
mkdir -p ~/.config/powerline/themes/shell
cp bash/default.json ~/.config/powerline/themes/shell/default.json
mkdir -p ~/.tmux
cp tmux/.tmux.conf ~/.tmux/.tmux.conf
#create signature file
echo $'Created by \n\tRafael Mees' >~/.signature
}
install
| true |
bf507448346c468b1f68b08db404c9d013106548 | Shell | delkyd/alfheim_linux-PKGBUILDS | /banshee-community-extensions/PKGBUILD | UTF-8 | 3,491 | 2.53125 | 3 | [] | no_license | # Maintainer: Willem Mulder <14mRh4X0r@gmail.com>
pkgname=banshee-community-extensions
pkgver=2.4.0
pkgrel=2
pkgdesc="Banshee Community Extensions is a repository and project for extensions to the Banshee \
media player that are community contributed and maintained."
arch=('i686' 'x86_64')
url="http://banshee.fm/"
license=('GPL2')
# Banshee and its dependencies (from https://github.com/GNOME/banshee/tree/stable-2.4)
depends=('banshee>=2.4.0' 'mono>=2.4.3' 'sqlite>=3.4' 'gstreamer0.10>=0.10.26'
'gtk-sharp-2>=2.12.10' 'dbus-sharp>=0.7' 'dbus-sharp-glib>=0.5' 'mono-addins>=0.6.2'
'taglib-sharp>=2.0.3.7')
makedepends=('lsb-release' 'gnome-doc-utils>=0.17.3'
# AppIndicator
'libappindicator-sharp' 'notify-sharp'
# LastfmFingerprint and Mirage
'fftw>=3' 'libsamplerate'
# LIRC
'lirc'
# Telepathy
'empathy>=2.27.91' 'telepathy-gabble>=0.9' 'telepathy-mission-control>=5.3.1')
checkdepends=() # nunit is included in mono
optdepends=(#'banshee-youtube: for banshee with youtube extension enabled'
'libappindicator-sharp: For the AppIndicator extension'
'notify-sharp: For the AppIndicator extension'
'fftw>=3: For the LastFM Fingerprint and Mirage extensions'
'libsamplerate: For the LastFM Fingerprint and Mirage extensions'
'gstreamer0.10-base>=0.10.15: For the LastFM Fingerprint and Mirage extensions'
'gstreamer0.10-base-plugins>=0.10.15: For the LastFM Fingerprint and Mirage extensions'
'lcdproc: For the LCDproc extension'
'lirc: For the LIRC extension'
'empathy>=2.27.91: For the Telepathy extension'
'telepathy-gabble>=0.9: For the Telepathy extension'
'telepathy-mission-control>=5.3.1: For the Telepathy extension')
source=(http://download.banshee.fm/${pkgname}/${pkgver}/${pkgname}-${pkgver}.tar.bz2)
sha256sums=('6f20404de80090bb5d88a57c043e876a192f480ac3488ce7697344f1447d48b3')
build() {
cd ${pkgname}-${pkgver}
# Flag rationale:
# - Packages for ClutterFlow are not available
# - OpenVP fails checks
# - Packages for Zeitgeist are not available
./configure --prefix=/usr \
--with-vendor-build-id="$(lsb_release -ds | sed -r 's/^"(.*)"$/\1/')" \
--enable-gnome \
--enable-schemas-install \
--enable-alarmclock \
--enable-albumartwriter \
--enable-ampache \
--enable-appindicator \
--enable-awn \
--disable-clutterflow \
--enable-coverwallpaper \
--enable-duplicatesongdetector \
--enable-foldersync \
--enable-jamendo \
--enable-karaoke \
--enable-lastfmfingerprint \
--enable-lcd \
--enable-lirc \
--enable-lyrics \
--enable-liveradio \
--enable-magnatune \
--enable-mirage \
--disable-openvp \
--enable-radiostationfetcher \
--enable-randombylastfm \
--enable-streamrecorder \
--enable-telepathy \
--disable-zeitgeistdataprovider \
--enable-tests \
--disable-shave \
DMCS=/usr/bin/mcs
make
}
package() {
cd ${pkgname}-${pkgver}
make DESTDIR=$pkgdir/ install
}
check() {
cd ${pkgname}-${pkgver}
make check
}
| true |
63f1690813a0a648a83a7f0646394fda693b243b | Shell | brunston/object-tracking-blogpost | /install_cvBlob.sh | UTF-8 | 333 | 2.515625 | 3 | [] | no_license | #!/bin/bash
echo "--- Installing cvBlob..."
wget -O cvblob-0.10.4-src.tgz https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/cvblob/cvblob-0.10.4-src.tgz
tar xzvf cvblob-0.10.4-src.tgz
cd cvblob/
mkdir build
cd build
cmake ..
sudo make install /usr/local/lib
echo "--- Installation of cvBlob complete."
| true |
7f16ac902e711f1980834804da8746b06daf5582 | Shell | mmehali/keycloak-IAM | /Install/x509_keystore.sh | UTF-8 | 3,341 | 3.453125 | 3 | [] | no_license | #!/bin/bash
INSTALL_SRC=/vagrant
KEYSTORES_DIR=/opt/keycloak/standalone/configuration/keystores
echo "--------------------------------------------------------"
echo " Copier la cle privée tls.key et le certificat tls.crt "
echo " dans /etc/x509/https "
echo "--------------------------------------------------------"
if [ ! -d "/etc/x509/https" ]; then
mkdir -p /etc/x509/https
fi
cp ${INSTALL_SRC}/certificats/tls.key /etc/x509/https/
cp ${INSTALL_SRC}/certificats/tls.crt /etc/x509/https/
echo "---------------------------------------------------------------"
echo " Creation du keystore ${KEYSTORES_DIR}/https-keystore.pk12 "
echo "---------------------------------------------------------------"
if [ ! -d "${KEYSTORES_DIR}" ]; then
mkdir -p "${KEYSTORES_DIR}"
fi
# Auto-genérer un keystore https servant des certificats x509
if [ -f "/etc/x509/https/tls.key" ] && [ -f "/etc/x509/https/tls.crt" ]; then
echo "- Genérer et encoder un mot de passe de 32 caracteres"
PASSWORD=$(openssl rand -base64 32 2>/dev/null)
echo "- Creation du keysrore ${KEYSTORES_DIT}/https-keystore.pk12"
echo "sudo openssl pkcs12 -export -name keycloak-https-key -inkey /etc/x509/https/tls.key -in /etc/x509/https/tls.crt -out ${KEYSTORES_DIR}/https-keystore.pk12 -password pass:"${PASSWORD}" >& /dev/null"
sudo openssl pkcs12 -export -name keycloak-https-key -inkey /etc/x509/https/tls.key -in /etc/x509/https/tls.crt -out ${KEYSTORES_DIR}/https-keystore.pk12 -password pass:"${PASSWORD}" >& /dev/null
echo "- Importer le keystore java dans ${KEYSTORES_DIR}/https-keystore.jks"
echo "keytool -importkeystore -noprompt -srcalias keycloak-https-key -destalias keycloak-https-key -srckeystore ${KEYSTORES_DIR}/https-keystore.pk12 -srcstoretype pkcs12 -destkeystore ${KEYSTORES_DIR}/https-keystore.jks -storepass "${PASSWORD}" -srcstorepass "${PASSWORD}" "
sudo keytool -importkeystore -noprompt -srcalias keycloak-https-key -destalias keycloak-https-key -srckeystore ${KEYSTORES_DIR}/https-keystore.pk12 -srcstoretype pkcs12 -destkeystore ${KEYSTORES_DIR}/https-keystore.jks"-storepass "${PASSWORD}" -srcstorepass "${PASSWORD}" >& /dev/null
if [ -f "${KEYSTORES_DIR}/https-keystore.jks" ]; then
echo "keystore https crée avec succes : ${KEYSTORES_DIR}/https-keystore.jks"
else
echo "Impossible de creer le keystore https, verifier les permissions: ${KEYSTORES_DIR}/https-keystore.jks"
fi
echo "- Ajouter les parametres du keystore ci-dessous dans /opt/keycloak/bin/.jbossclirc"
echo "set keycloak_tls_keystore_password=${PASSWORD}" >> "/opt/keycloak/bin/.jbossclirc"
echo "set keycloak_tls_keystore_file=${KEYSTORES_DIR}/https-keystore.jks" >> "/opt/keycloak/bin/.jbossclirc"
echo "set configuration_file=standalone.xml" >> "/opt/keycloak/bin/.jbossclirc"
sudo /opt/keycloak/bin/jboss-cli.sh --file=${INSTALL_SRC}/cli/x509-keystore.cli >& /dev/null
sed -i '$ d' "/opt/keycloak/bin/.jbossclirc"
echo "set configuration_file=standalone-ha.xml" >> "/opt/keycloak/bin/.jbossclirc"
/opt/keycloak/bin/jboss-cli.sh --file=${INSTALL_SRC}/cli/x509-keystore.cli >& /dev/null
sed -i '$ d' "/opt/keycloak/bin/.jbossclirc"
fi
| true |
7bd5ba276596043cd94c9ec582d87e02886bf6a1 | Shell | tprk77/dotfiles | /dictionary/push_wordlist.sh | UTF-8 | 693 | 3.453125 | 3 | [] | no_license | #!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
SCRIPT_DIR="$(dirname "$(readlink -f "${0}")")"
WORDLIST_TXT="${SCRIPT_DIR}/wordlist.txt"
ASPELL_DICT="${HOME}/.aspell.en.pws"
# For Firefox, assume we only care about the first profile
FIREFOX_PROFILE=$(
find "${HOME}/.mozilla/firefox/" -maxdepth 1 -type d -name "*.default" | head -n 1)
FIREFOX_DICT=$(if [ -n "${FIREFOX_PROFILE}" ]; then echo "${FIREFOX_PROFILE}/persdict.dat"; fi)
# Write wordlist to Aspell dictionary
echo "personal_ws-1.1 en 0" > "${ASPELL_DICT}"
cat "${WORDLIST_TXT}" >> "${ASPELL_DICT}"
# Write to Firefox dictionary
if [ -n "${FIREFOX_DICT}" ]; then
cat "${WORDLIST_TXT}" > "${FIREFOX_DICT}"
fi
| true |
377ca4c1e0559a23deb89df247d25bc6ba2a7efd | Shell | wjohn1483/Audio_to_Scene_Image_Generator | /soundnet/scripts/check_exist_image.sh | UTF-8 | 377 | 3.6875 | 4 | [] | no_license | #!/bin/bash
image_dir=./images_size64/
file=./training_data_list_more.txt
output_file=./training_data_exist.txt
if [ -f $output_file ]; then
rm $output_file
fi
while read -r line
do
echo "$line"
filename=`echo "$line" | cut -d '/' -f 6 | cut -d '.' -f 1`
if [ -f $image_dir/$filename.jpg ]; then
echo "$filename" >> $output_file
fi
done < $file
| true |
898d44ba7b15a65bd62f189fd2bd977a1d4b3625 | Shell | UserXGnu/void | /stp/cwm/thm-generate | UTF-8 | 690 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env sh
wc="${WALLPAPER:-/tmp/wallcolor.png}"
wr='/tmp/wall-resized'
w1='/tmp/wall-1x1'
if test -f "${WALLPAPER}"; then
w=$(convert "${WALLPAPER}" -format "%w" info:)
else
w=50
c=$(cat "${PALETTE}" | head -n 1)
#bg=$(printf '%s\n' "${c}" | head -n 1)
printf '%s\n' "${c}" | pal-preview "${wc}"
fi
convert "${wc}" -gravity Center -crop "${w}x50+0+0" +repage "${wr}"
#convert "${wc}" -crop "${w}x50+0+0" +repage "${wr}"
#convert "${wc}" -crop "${w}x50+0+50" +repage "${wr}"
convert "${wr}" -resize 1x1! "${w1}"
m=$(convert "${w1}" -format "%[fx:mean]" info:)
b=$(echo "scale=0; ${m} * 100 / 1" | bc -s)
printf '%s' "${b}" > /tmp/status-brightness
rm "${wr}" "${w1}"
| true |
42295060103f370d7a84129fcfa9818da360cd50 | Shell | vtolstov/cloudagent | /build | UTF-8 | 1,134 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/bash -x
ORG_PATH="github.com/vtolstov"
REPO_PATH="${ORG_PATH}/cloudagent"
VERSION=`git describe --long`
BUILD_TIME=`date +%FT%T%z`
LDFLAGS="-X github.com/vtolstov/cloudagent/qga.Version=${VERSION} -X github.com/vtolstov/cloudagent/qga.BuildTime=${BUILD_TIME}"
rm -rf bin tmp
export GO15VENDOREXPERIMENT=1
export GOPATH=$(pwd)/gopath
mkdir -p $GOPATH
mkdir -p bin
mkdir -p tmp
go version | grep -q go1.5
if [ "x$?" != "x0" ]; then
export GOROOT=$(pwd)/goroot
export PATH=$GOROOT/bin:$PATH
mkdir -p $GOROOT
wget https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz -O tmp/go.tar.gz
tar --strip-components=1 -C $GOROOT -xf tmp/go.tar.gz
fi
if [ ! -h $GOPATH/src/${REPO_PATH} ]; then
mkdir -p $GOPATH/src/${ORG_PATH}
ln -s ../../../.. $GOPATH/src/${REPO_PATH} || echo "exit 255"
fi
set -e
for os in linux; do
#netbsd windows freebsd openbsd
GOOS=${os} GOARCH=amd64 go build -v -ldflags "${LDFLAGS}" -tags netgo -o bin/qemu-ga-${os}-x86_64 ${REPO_PATH}
GOOS=${os} GOARCH=386 go build -v -ldflags "${LDFLAGS}" -tags netgo -o bin/qemu-ga-${os}-x86_32 ${REPO_PATH}
done
| true |
c81287d89273f05c891a823006db4b61d37e7681 | Shell | rjw1/github-branch-renamer | /gbr | UTF-8 | 6,040 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
BOLD=$(tput bold)
NORMAL=$(tput sgr0)
usage() {
echo "Rename the 'master' branch for an entire account at once."
echo ""
echo "This tool will find all repositories on GitHub belonging to an account that:"
echo ""
echo " - aren't forks"
echo " - aren't archived or disabled"
echo " - have their default branch set to 'master'"
echo ""
echo "It will then rename the master branch to 'main' by default, and set that as the"
echo "default branch on GitHub instead of 'master'."
echo ""
echo "By default, the 'master' branch will be left untouched to prevent any accidental"
echo "data loss. If you'd like to delete those branches, set the '--delete' flag."
echo ""
echo "You must choose to either do a dry run ('--dry-run') or force the changes"
echo "('--force'). You must also specify one of '--org' or '--user' (but not both) to"
echo "select the account to collect repositories from."
echo ""
echo "${BOLD}USAGE${NORMAL}"
echo " $(basename "$0") [flags]"
echo ""
echo "${BOLD}FLAGS${NORMAL}"
echo " -u, --user string The username to collect repositories in"
echo " -o, --org string The organization to collect repositories in"
echo " -t, --team string The team within the organization (optional)"
echo " -b, --new-branch string The new name for the master branch (defaults to"
echo " 'main')"
echo " -d, --delete Delete the master branch from GitHub"
echo " -n, --dry-run Report what will happen, without making any changes"
echo " on GitHub"
echo " -f, --force Run for real, making changes on GitHub"
exit 1
}
if [ $# -lt 2 ]; then
usage
fi
ORG=
TEAM=
USER=
NEW_BRANCH=main
DELETE=0
DRY_RUN=0
FORCE=0
while [ $# -gt 0 ] ; do
case $1 in
-u | --user)
USER="$2"
;;
-o | --org)
ORG="$2"
;;
-t | --team)
TEAM="$2"
;;
-b | --new-branch)
NEW_BRANCH="$2"
;;
-d | --delete)
DELETE=1
;;
-n | --dry-run)
DRY_RUN=1
;;
-f | --force)
FORCE=1
;;
esac
shift
done
if [ -z "$FORCE" ] && [ -z "$DRY_RUN" ]; then
usage
fi
if [ "$FORCE" -eq 1 ] && [ "$DRY_RUN" -eq 1 ]; then
usage
fi
if [ -z "$USER" ] && [ -z "$ORG" ]; then
usage
fi
if [ -n "$USER" ] && [ -n "$ORG" ]; then
usage
fi
if [ -z "$ORG" ] && [ -n "$TEAM" ]; then
usage
fi
if [ -n "$USER" ] && [ -n "$TEAM" ]; then
usage
fi
REPOS_ENDPOINT=
ACCOUNT=
if [ -n "$USER" ]; then
REPOS_ENDPOINT="users/$USER/repos"
ACCOUNT=$USER
elif [ -n "$ORG" ] && [ -n "$TEAM" ]; then
REPOS_ENDPOINT="orgs/$ORG/teams/$TEAM/repos"
ACCOUNT=$ORG
else
REPOS_ENDPOINT="orgs/$ORG/repos"
ACCOUNT=$ORG
fi
if [ -n "$TEAM" ]; then
echo "${BOLD}==> Collecting relevant repositories for '$ORG/$TEAM'...${NORMAL}"
else
echo "${BOLD}==> Collecting relevant repositories for '$ACCOUNT'...${NORMAL}"
fi
PARAMS="?per_page=100"
REPOS=
while [ -n "$PARAMS" ]; do
RESPONSE=$(gh api "$REPOS_ENDPOINT$PARAMS" --include)
BODY=$(echo "$RESPONSE" | tail -1)
REPOS+=$(
echo "$BODY" |
jq "
map(
select(
.fork == false and
.archived == false and
.disabled == false and
.default_branch == \"master\"
) |
{
id: .id,
name: .name
}
)[]
"
)
# shellcheck disable=SC1004
PARAMS=$(
echo "$RESPONSE" |
grep -F Link: |
sed 's/Link: //' |
sed 's/, /\
/g' |
grep -F 'rel="next"' |
grep -o '\?.*>' |
sed -e 's/>$//'
)
done
if [ -z "$REPOS" ]; then
echo "No relevant repositories found."
exit
fi
TMP_DIR=/tmp/gbr/$ACCOUNT
echo "${BOLD}==> (Re)creating '$TMP_DIR'...${NORMAL}"
rm -rf "$TMP_DIR"
mkdir -p "$TMP_DIR"
cd "$TMP_DIR"
FAILED_RENAMES=()
FAILED_DELETIONS=()
for REPO in $(echo "$REPOS" | jq '.name' | sed 's/"//g'); do
{
echo "${BOLD}==> Updating '$ACCOUNT/$REPO'...${NORMAL}"
git clone "git@github.com:$ACCOUNT/$REPO.git" "$TMP_DIR/$REPO"
cd "$TMP_DIR/$REPO"
echo "${BOLD}Setting '$NEW_BRANCH' to point to the head of 'master'...${NORMAL}"
git checkout -B "$NEW_BRANCH" origin/master --no-track
if [ -z "$DRY_RUN" ]; then
git push --set-upstream origin "$NEW_BRANCH"
git remote set-head origin "$NEW_BRANCH"
fi
echo "${BOLD}Changing default branch to '$NEW_BRANCH'...${NORMAL}"
if [ -z "$DRY_RUN" ]; then
gh api repos/:owner/:repo \
--method PATCH \
--raw-field default_branch="$NEW_BRANCH" \
>/dev/null
fi
echo "${BOLD}Changing open pull request target branches to '$NEW_BRANCH'...${NORMAL}"
for PR in $(gh pr list --base master --limit 9999 | cut -f 1); do
if [ -z "$DRY_RUN" ]; then
gh api "repos/:owner/:repo/pulls/$PR" \
--method PATCH \
--raw-field base="$NEW_BRANCH" \
>/dev/null
fi
echo -n "."
done
echo ""
if [ "$DELETE" -eq 1 ]; then
echo "${BOLD}Deleting 'master'...${NORMAL}"
if [ -z "$DRY_RUN" ]; then
git push origin --delete master || FAILED_DELETIONS+=("$REPO")
fi
fi
} || FAILED_RENAMES+=("$REPO")
done
echo "Done!"
if [ ${#FAILED_RENAMES[@]} -gt 0 ]; then
echo ""
echo "${BOLD}NOTE${NORMAL}"
echo ""
echo "Failed to rename the 'master' branch for the following repositories:"
echo ""
for REPO in "${FAILED_RENAMES[@]}"; do
echo " $ACCOUNT/$REPO"
done
echo ""
echo "Manual intervention is required."
fi
if [ ${#FAILED_DELETIONS[@]} -gt 0 ]; then
echo ""
echo "${BOLD}NOTE${NORMAL}"
echo ""
echo "Failed to delete the 'master' branch from the following repositories:"
echo ""
for REPO in "${FAILED_DELETIONS[@]}"; do
echo " $ACCOUNT/$REPO"
done
echo ""
echo "Manual intervention is required."
fi
echo ""
echo "${BOLD}NOTE${NORMAL}"
echo ""
echo "Any CI or other code that relies on branch names will need to be updated manually."
| true |
8251eaa60b2718b13b97fd08483bd42603c01261 | Shell | dreboard/myapi | /dev_ops/vagrant_conf/all_in_one.sh | UTF-8 | 1,578 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# update / upgrade
sudo apt-get update
sudo apt-get -y upgrade
# install apache 2.5 and php 7
sudo apt-get install -y apache2
##########################################################
# Install PHP
##########################################################
sudo apt-get install software-properties-common
sudo add-apt-repository ppa:ondrej/php
sudo apt-get update
echo -e "\n--- Install PHP ---\n"
sudo apt-get install -y php7.1 php7.1-opcache php7.1-phpdbg php7.1-mbstring php7.1-cli php7.1-imap php7.1-ldap php7.1-pgsql php7.1-pspell php7.1-recode php7.1-snmp php7.1-tidy php7.1-dev php7.1-intl php7.1-gd php7.1-zip php7.1-xml php7.1-curl php7.1-json php7.1-mcrypt
sudo apt-get install php7.1-intl php7.1-xsl
sudo apt-get install -y php7.1-mysql
##########################################################
# Install Xdebug
##########################################################
echo -e "\n--- Installing Xdebug ---\n"
sudo apt-get install -y php-xdebug
# enable mod_rewrite
sudo a2enmod rewrite
##########################################################
# Install Extras
##########################################################
sudo apt-get -y install curl git nano
sudo apt-get install snmp
# restart apache
sudo apt-get -y install libapache2-mod-php7.1
sudo a2dismod php5
sudo a2enmod php7.1
sudo apt-get -y autoremove
# install Composer
echo "------------------------------------------ Installing Composer"
curl -s https://getcomposer.org/installer | php
sudo mv composer.phar /usr/local/bin/composer
service apache2 restart
| true |
19d7c0f6f5f7c11b28f95dff3ef8ae0cd00c4b6d | Shell | roguehorse/BanInformer | /bash/Working_Scripts/bi_daily_s2 | UTF-8 | 1,626 | 3.96875 | 4 | [] | no_license | #!/bin/bash
#
# fail2ban filter export S2
#
# Scan fail2ban log for banned IP's
# and report only those IP's which
# match the current date.
#
# Wed Apr 29 21:03:49 PDT 2015
# Written by: Scott DuBois
# fail2ban log file
log=/var/log/fail2ban.log
# date results to variable
ymd=$(date +%Y-%m-%d)
hm=$(date +%H:%M)
count=0
# verify file exists
if [ -f $log ]; then
declare -a dayIP
i=0
# read each line from file separately
while read -r line
do
bans=$line
# check to see if dates match
today=$(echo $bans | grep $ymd | grep actions)
if [ X"$today" != X"" ]; then
# extract necessary data
banIP=$(echo $today | cut --delimiter=' ' --fields=1,6,7)
if [[ $banIP != *"already" && $banIP != *"Unban"* && $banIP != *"banTime"* ]]; then
# print date relative lines
dayIP[$i]=$(echo $banIP)
((i++))
# count iterations
((count++))
else
# set count
count="No bans found"
fi
fi
done < $log
else
count="No files found"
fi
# create report file
echo "Daily bans Server 2" > /home/scott/Bans/DAILY-S2.txt
printf "Report: $ymd --> $hm\n" >> /home/scott/Bans/DAILY-S2.txt
printf "Bans: $count\n" >> /home/scott/Bans/DAILY-S2.txt
printf "==============================\n" >> /home/scott/Bans/DAILY-S2.txt
# sort the array
readarray -t sortDay < <(for a in "${dayIP[@]}"; do echo "$a"; done | sort)
# add the sorted info to the report file
for ea in "${sortDay[@]}"; do
echo "$ea" >> /home/scott/Bans/DAILY-S2.txt
done
# End of script
| true |
148fccb7e3ba2a82f48ef9c0a8e99cedf94abfe8 | Shell | openpreserve/fits-blackbox-testing | /bash-scripts/fits-test.sh | UTF-8 | 7,578 | 4.3125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
##
# Bash script to automate the regression testing of FITS.
#
# author Carl Wilson carl@openplanetsfoundation.org
#
# This initial cut is intended to be run from the root
# directory of a FITS development branch. Given the path
# to a FITS testing tool and a path to test corpora the script:
#
# * Checks that there's no uncommitted code.
# * Checks that you're not on master.
# * Builds the current branch and generates test output.
# * Checks out the master commit that's the source of this branch.
# * Builds FITS and generates the more test output.
# * Use the FITS testing tool to compare the output.
# * If successful, i.e. the output is unchanged report success.
# * If unsuccessful use git-bisect to find the last good commit.
#
# The FITS testing tool should be a command line application
# that tests 2 sets of FITS generated output and returns:
#
# 0 If the tests succeed
#
# 1-124 If the tests fail
#
# 125 If the tests cannot be performed
#
# These values are for use with git bisect run command
# https://www.kernel.org/pub/software/scm/git/docs/git-bisect.html
# http://git-scm.com/book/en/Git-Tools-Debugging-with-Git
#
# Script expects 2 parameters:
#
# $1 path to the FITS testing tool to use
# Mandatory
#
# $2 path to root directory of test corpora to use
# Mandatory
##
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Globals to hold the checked param vals
paramFitsToolLoc=""
paramCorporaLoc=""
resetHead=0
currentBranch=
globalOutput=".bb-testing"
fitsOutputDir="$globalOutput/output"
fitsReleaseDir="$globalOutput/release"
##
# Functions defined first, control flow at the bottom of script
##
# Check the passed params to avoid disapointment
checkParams () {
OPTIND=1 # Reset in case getopts previously used
while getopts "h?t:c:" opt; do # Grab the options
case "$opt" in
h|\?)
showHelp
exit 0
;;
t) paramFitsToolLoc=$OPTARG
;;
c) paramCorporaLoc=$OPTARG
;;
esac
done
if [ -z "$paramFitsToolLoc" ] || [ -z "$paramCorporaLoc" ]
then
showHelp
exit 0
fi
# Check that the FITS testing tool exists
if [[ ! -e "$paramFitsToolLoc" ]]
then
echo "FITS Testing tool not found: $paramFitsToolLoc"
exit 1;
fi
# Check that the corpora directory exists
if [[ ! -d "$paramCorporaLoc" ]]
then
echo "Corpora directory not found: $paramCorporaLoc"
exit 1;
fi
}
# Show usage message
showHelp() {
echo "usage: fits-test [-t <pathToTestTool>] [-c <pathToCorpora>] [-h|?]"
echo ""
echo " pathToTestTool : The full path to the FITS testing tool."
echo " pathToCorpora : The path to the root directory of the test corpora."
}
# Checks if there is a .bb-testing dir in the current working dir.
# if there is one, it is removed, so that a fresh test can be executed.
wipeOutOldData() {
if [[ -d "$globalOutput" ]]
then
echo "Old test output data found, removing...: $globalOutput"
rm -r "$globalOutput/"
fi
}
# Check we've got a master branch and it's not checked out,
# intended to be run on a development branch
checkMaster() {
gitbranch=$(git branch 2>&1)
currentBranchRegEx="^\* (.*)$"
masterBranchRegEx="^[ ]*master$"
masterFound=0
while IFS= read -r
do
if [[ $REPLY =~ $currentBranchRegEx ]]
then
currentBranch="${BASH_REMATCH[1]}"
elif [[ $REPLY =~ $masterBranchRegEx ]]
then
masterFound=1
fi
done <<< "$gitbranch"
if [[ $currentBranch =~ $masterBranchRegEx ]]
then
echo "Current branch is master, please check out another branch."
exit 1;
fi
if (( masterFound == 0 ))
then
echo "No master branch found to test against"
fi
}
# Get the commit hash of the current branch HEAD
getHeadHash() {
githeadhash=$(git rev-parse HEAD 2>&1)
}
# Find the hash of the current branch's split from master
findMergeBaseHash() {
gitshow=$(git show --pretty=format:"%H" `git merge-base "$currentBranch" master` 2>&1)
shaRegEx="^[0-9a-f]{40}$"
while IFS= read -r
do
if [[ $REPLY =~ $shaRegEx ]]
then
mergebasehash=$REPLY
fi
done <<< "$gitshow"
if [[ $mergebasehash == $githeadhash ]]
then
echo "$currentBranch is a fresh branch and doesn't differ from its master root"
exit 1
fi
}
# Checkout a particular revision on the current branch by hash
checkoutRevision() {
echo "Checking out revision $1"
gitcheckout=$(git checkout $1) # removing '.' (current directory). Otherwise the changed files are kept and screw up the build
coOkRegEx="^Checking out revision $1"
echo "$gitcheckout"
}
# Invoke the checkGitStatus script, exit on failure
checkGitStatus() {
bash "$SCRIPT_DIR/check-git-status.sh"
if (( $? != 0))
then
resetHead
exit $?;
fi
}
# Invoke the ant-release script, exit on failure
buildFits() {
bash "$SCRIPT_DIR/fits-ant-release.sh"
if (( $? != 0))
then
resetHead
exit $?;
fi
}
# Find the unzipped release directory
findRelease() {
releaseDir=$(find $fitsReleaseDir -name "fits-*" -type d 2>&1)
if [[ ! -d "$releaseDir" ]]
then
echo "FITS release NOT found."
echo "$releaseDir"
resetHead
exit 1;
fi
}
# Setup output directory and execute FITS
executeFits() {
githash=$1
outputDir="$fitsOutputDir/$githash"
if [[ -d "$outputDir" ]]
then
rm -rf "$outputDir"
fi
mkdir -p "$outputDir"
bash "$SCRIPT_DIR/execute-fits.sh" "$paramCorporaLoc" "$outputDir" "$releaseDir" "$githash"
if (( $? != 0))
then
resetHead;
checkoutCurrentBranch;
exit $?;
fi
}
# Output warning r.e. current git status, IF warning flag set
resetHead() {
if (( $resetHead == 1 ))
then
git reset HEAD --hard
fi
}
# Checks out the starting branch if the the parameter is set.
checkoutCurrentBranch() {
if [ "x$currentBranch" != "x" ]
then
echo "Current Branch is: $currentBranch"
checkoutRevision "$currentBranch"
fi
}
testHeadAgainstMergeBase() {
java -jar "$paramFitsToolLoc" -s "$fitsOutputDir/$mergebasehash" -c "$fitsOutputDir/$githeadhash" -k "$githeadhash"
case "$?" in
# Test passed so no need to look for broken revision
"0" )
echo "Test of HEAD against branch base succeeded, no broken revision to find."
resetHead
checkoutCurrentBranch;
exit 0;
;;
# Test of dev branch HEAD against master couldn't be performed
# We probably don't want to go on until tests execute
"125" )
echo "Test of HEAD against branch base could not be performed"
resetHead
checkoutCurrentBranch
exit 1;
;;
# Test failed, exit for now but the start for the
# revision that broke the test starts here
* )
echo "Test of HEAD against branch base failed"
resetHead
checkoutCurrentBranch;
exit 1;
esac
}
##
# Script Execution Starts HERE
##
# Check and setup parameters
checkParams "$@";
# Look for a .bb-testing directory and remove it
wipeOutOldData;
# We're in a git repo with no uncommitted changes?
checkGitStatus;
echo "In git repo ${PWD##*/}"
# There's a master branch and it's not checked out?
checkMaster;
echo "Testing branch: $currentBranch"
# Grab the git details
getHeadHash
echo "HEAD $githeadhash"
findMergeBaseHash;
echo "BASE commit $mergebasehash"
# Build current version of FITS
buildFits;
findRelease;
# Execute FITS sending output to hash named output dir
executeFits "$githeadhash";
# Set reset HEAD flag, we're about to check out changes
resetHead=1
resetHead;
checkoutCurrentBranch;
# Checkout master branch base
checkoutRevision "$mergebasehash"
# Build master revision for comparison
buildFits;
findRelease;
# Execute FITS sending output to hash named output dir
executeFits "$mergebasehash";
testHeadAgainstMergeBase;
# Reset repo to head
resetHead;
checkoutCurrentBranch;
| true |
6a947d6e1962ed98e67a68301dde0c037e5f522a | Shell | tdelacour/itinerary | /PopulateDB/runLoad.sh | UTF-8 | 123 | 2.5625 | 3 | [] | no_license | #!/bin/bash
for i in `seq 14 65`
do
echo $i
a=$(( i*1000 ))
java -cp .:gson-2.3.1.jar:ojdbc7.jar LoadScript $a
done
| true |
aca4281d10ebf41ff51eccb7642745c757015634 | Shell | dilsonlira/mega-data | /run.sh | UTF-8 | 488 | 3.078125 | 3 | [] | no_license | if flake8; then
if mypy .; then
if test -f docker-compose.yaml; then
docker-compose down -v
docker-compose build
if [ "$1" = db ]; then
if docker compose up | grep 'Database load completed' -m1; then
./db.sh
fi
else
docker compose up
fi
else
echo "There is no docker-compose.yaml in the current directory."
fi
fi
fi
| true |
81d0ae5d4f86a8276be9f5e60c4142fced970670 | Shell | marcusmueller/iscml | /pcs/acctm/comm/send_project_email | UTF-8 | 2,001 | 2.8125 | 3 | [] | no_license | #!/bin/bash
# send_project_email
#
# Send an e-mail from the cluster head node using
# statler-wcrl@mail.wvu.edu as the return address
#
# Called by web interface to send project-specific e-mails.
#
# Inputs
# 1. Recipient address
# 2. Message subject
# 3. Message body
#
# Calling example - send test message to Terry's gmail account
#
# > send_project_email terry.ferrett@gmail.com statler-wcrl-test-subject
# statler-wcrl-test-body
#
#
# Copyright (C) 2012, Terry Ferrett and Matthew C. Valenti
# For full copyright information see the bottom of this file.
sendemail -s mail.cemr.wvu.edu\
-f statler-wcrl@mail.csee.wvu.edu \
-t $1 \
-u $2 \
-m $3
# This library is free software;
# you can redistribute it and/or modify it under the terms of
# the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License,
# or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 US | true |
5573ee50c9bf677f74e1f69b909c09140d1216cd | Shell | ryanjhkruger/hassio-addons | /tensorflow/run.sh | UTF-8 | 926 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
# Install modules
echo "[Info] Install TensorFlow modules into deps"
export PYTHONUSERBASE=/config/deps
PYPI="absl-py==0.1.6 astor==0.7.1 termcolor==1.1.0 gast==0.2.0 keras_applications==1.0.6 keras_preprocessing==1.0.5"
# shellcheck disable=SC2086
if ! pip3 install --user --no-cache-dir --prefix= --no-dependencies ${PYPI}; then
echo "[Error] Can't install PyPI packages!"
exit 1
fi
echo "[Info] Install TensorFlow into deps"
# shellcheck disable=SC2086
wget https://storage.googleapis.com/tensorflow/raspberrypi/tensorflow-1.13.1-cp34-none-linux_armv7l.whl
mv tensorflow-1.13.1-cp34-none-linux_armv7l.whl tensorflow-1.13.1-cp37-none-linux_armv7l.whl
if ! pip3 install --user --no-cache-dir --prefix= --no-dependencies tensorflow-1.13.1-cp37-none-linux_armv7l.whl; then
echo "[Error] Can't install TensorFlow package!"
exit 1
fi
echo "[INFO] TensorFlow installed and ready for use"
| true |
71d081934b6151167e88ca3f64cbb3a062735075 | Shell | guitarrapc/docker-lab | /localstack/localstack/ready.d/sqs.sh | UTF-8 | 1,969 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -eo pipefail
# NOTE: use awslocal to access localstack service.
region=ap-northeast-1
queues=("hello-queue" "hello-deadletter-queue")
# create queues
for i in "${!queues[@]}"; do
awslocal sqs create-queue --queue-name "${queues[i]}" --region "${region}"
done
# configure Queue
for i in "${!queues[@]}"; do
queue_url="http://localhost:4566/000000000000/${queues[i]}"
delaySeconds=0
maximumMessageSize=262144 # 256kb
messageRetentionPeriod=1209600 # 14days
receiveMessageWaitTimeSeconds=0
visibilityTimeout=300
maxReceiveCount=10
# Normal Queue
(( i % 2 )) || awslocal sqs set-queue-attributes --queue-url "${queue_url}" --attributes VisibilityTimeout=${visibilityTimeout} --attributes ReceiveMessageWaitTimeSeconds=${receiveMessageWaitTimeSeconds} --attributes MessageRetentionPeriod=${messageRetentionPeriod} --attributes DelaySeconds=${delaySeconds} --attributes MaximumMessageSize=${maximumMessageSize} --attributes "RedrivePolicy='{\"deadLetterTargetArn\":\"arn:aws:sqs:${region}:000000000000:${queues[i+1]}\",\"maxReceiveCount\":\"${maxReceiveCount}\"}'" --region "${region}"
# DeadLetter Queue
(( i % 2 )) && awslocal sqs set-queue-attributes --queue-url "${queue_url}" --attributes VisibilityTimeout=${visibilityTimeout} --attributes ReceiveMessageWaitTimeSeconds=${receiveMessageWaitTimeSeconds} --attributes MessageRetentionPeriod=${messageRetentionPeriod} --attributes DelaySeconds=${delaySeconds} --attributes MaximumMessageSize=${maximumMessageSize} --attributes "RedriveAllowPolicy='{\"redrivePermission\":\"byQueue\",\"sourceQueueArns\":[\"arn:aws:sqs:${region}:000000000000:${queues[i-1]}\"]}'" --region "${region}"
done
# show queue detail
for i in "${!queues[@]}"; do
queue_url="http://localhost:4566/000000000000/${queues[i]}"
awslocal sqs get-queue-attributes --queue-url "${queue_url}" --attribute-names All --region "${region}"
done
# show queue list
awslocal sqs list-queues --region "${region}"
| true |
4aef477bd1877fc556c9c0caa8a428f3d6e9ca00 | Shell | WindowxDeveloper/PlayOnLinux_Tools | /PlayOnLinux_Packager/make_repository_beta | UTF-8 | 2,097 | 3.453125 | 3 | [] | no_license | #!/bin/bash
if [ "$VERSION" = "" ]
then
echo "Unknown version"
bash generate_deb_beta
exit
fi
mk_repository()
{
# 1 : Deb file
# 2 : Distribution
# 3 : Distribution version
echo "Building repository for $2 $3 ..."
if [ "$(cat $HOME/PlayOnLinux-beta/deb/conf/distributions|grep $2)" = "" ]
then
cat << FIN >> $HOME/PlayOnLinux-beta/deb/conf/distributions
Origin: PlayOnLinux
Label: PoL
Suite: $2
Codename: $2
Version: $3
Architectures: i386 amd64
Components: main
Description: PlayOnLinux permits you to install windows applications on linux thanks to wine - Visit : http://www.playonlinux.com/
SignWith: C4676186
FIN
fi
reprepro --ignore=undefinedtarget -b $HOME/PlayOnLinux-beta/deb includedeb $2 $1
# Building .list files
cat << EOF > $HOME/PlayOnLinux-beta/deb/playonlinux_$2.list
deb http://beta.playonlinux.com/deb/ $2 main
EOF
}
mkdir -p $HOME/PlayOnLinux-beta/deb/incoming
mkdir -p $HOME/PlayOnLinux-beta/deb/conf
if [ -e "$HOME/PlayOnLinux-beta/deb/conf/distributions" ]
then
rm $HOME/PlayOnLinux-beta/deb/conf/distributions
touch $HOME/PlayOnLinux-beta/deb/conf/distributions
fi
echo "Cleaning"
cd $HOME/PlayOnLinux-beta/deb
rm -rf incoming
rm -rf db
rm -rf pool
rm -rf dists
echo "PlayOnLinux version : $VERSION"
mkdir -p incoming
cp $HOME/PlayOnLinux-beta/files/$VERSION-$ver/PlayOnLinux-beta_$VERSION-$ver.deb ./incoming
cd $HOME/PlayOnLinux-beta/deb
#DEBFILES="incoming/PlayOnLinux_$VERSION.deb incoming/wine-$1.deb incoming/wine-$2.deb"
DEBFILES="incoming/PlayOnLinux-beta_$VERSION-$ver.deb"
for DEB_FILE in $DEBFILES #"incoming/PlayOnLinux_$VERSION.deb"
do
mk_repository "$DEB_FILE" "testing" "testing"
# Ubuntu
#mk_repository "$DEB_FILE" "lucid" "10.04"
#mk_repository "$DEB_FILE" "hardy" "8.04"
#mk_repository "$DEB_FILE" "intrepid" "8.10"
#mk_repository "$DEB_FILE" "jaunty" "9.04"
#mk_repository "$DEB_FILE" "karmic" "9.10"
#mk_repository "$DEB_FILE" "maverick" "10.10"
#mk_repository "$DEB_FILE" "natty" "11.04"
# Debian
# mk_repository "$DEB_FILE" "etch" "4.0"
# mk_repository "$DEB_FILE" "lenny" "5.0"
#mk_repository "$DEB_FILE" "squeeze" "6.0"
done
| true |
db63aedfd1874a5db4bdfb1f5b67d801227d78bb | Shell | HenriqueLR/payments | /env/sys/add-user.sh | UTF-8 | 402 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/bin/bash -xe
groupadd supersudo && echo "%supersudo ALL=(ALL:ALL) NOPASSWD: ALL" > /etc/sudoers.d/supersudo
adduser --disabled-password --gecos payments payments && usermod -a -G supersudo payments && mkdir -p /home/payments/.ssh
echo -e "Host github.com\n\tStrictHostKeyChecking no\n" > /home/payments/.ssh/config
sudo chown -R payments:payments /home/payments
sudo chmod 600 /home/payments/.ssh/* | true |
b7c68b3aa9b044d161f732dd79b507d4db39e943 | Shell | rebelplutonium/nixos-configuration | /custom/expressions/initialization-utils/src/aws.sh | UTF-8 | 1,002 | 3.609375 | 4 | [] | no_license | #!/bin/sh
while [ "${#}" -gt 0 ]
do
case "${1}" in
--aws-access-key-id)
AWS_ACCESS_KEY_ID="${2}" &&
shift 2 &&
true
;;
--default-region-name)
DEFAULT_REGION_NAME="${2}" &&
shift 2 &&
true
;;
--default-output-format)
DEFAULT_OUTPUT_FORMAT="${2}" &&
shift 2 &&
true
;;
*)
echo Unknown Option &&
echo "${1}" &&
echo "${0}" &&
echo "${@}" &&
exit 65 &&
true
;;
esac &&
true
done &&
(cat <<EOF
AWS_ACCESS_KEY_ID
DEFAULT_REGION_NAME
DEFAULT_OUTPUT_FORMAT
EOF
) | while read VAR do
do
eval VAL=\${${VAR}} &&
if [ -z "${VAL}" ]
then
echo Undefined ${VAR} &&
echo ${0} &&
exit 66 &&
true
fi &&
true
done &&
if [ ! -d "${HOME}/.aws" ]
then
AWS_SECRET_ACCESS_KEY="$(pass show ${AWS_ACCESS_KEY_ID})" &&
(cat <<EOF
${AWS_ACCESS_KEY_ID}
${AWS_SECRET_ACCESS_KEY}
${DEFAULT_REGION_NAME}
${DEFAULT_OUTPUT_FORMAT}
EOF
) | aws configure &&
true
fi &&
true
| true |
8f6e10e00fb557013c89362e05664d97ba754f2b | Shell | aalzehla/Netcool | /netcool/omnibus/bin/nco_aen | UTF-8 | 2,421 | 3.046875 | 3 | [] | no_license | #!/bin/sh
#
# Licensed Materials - Property of IBM
#
# 5724O4800
#
# (C) Copyright IBM Corp. 2003, 2007. All Rights Reserved
#
# US Government Users Restricted Rights - Use, duplication
# or disclosure restricted by GSA ADP Schedule Contract
# with IBM Corp.
#
# nco_aen
#
# --
#######################################
#######################################
# MAIN
#######################################
#######################################
# Find nco_common
NCO_COMMON=`dirname $0`/../bin/nco_common
# Check for nco_common, and load if found
if [ ! -f "$NCO_COMMON" ]; then
echo "Cannot find nco_common" 1>&2
exit 1
fi
. $NCO_COMMON
if [ "$NCHOME" = "" ]
then
echo Cannot find your NCHOME environment
exit 1
fi
# redefine OMNIHOME to NCHOME/omnibus
OMNIHOME="$NCHOME/omnibus"
#
# Check JRE was found by nco_common
#
if [ "$NCO_JRE_64_32" = "" -o ! -x "$NCO_JRE_64_32/bin/java" ]
then
echo Cannot find your Java environment
exit 1
fi
P_LNF="-Dswing.metalTheme=steel"
#
# Fix for JDK bug on AIX:
# ARCH Variable is set in nco_common
#
if [ $ARCH = "aix4" -o $ARCH = "aix5" ]; then
echo "Unsetting XPG_SUS_ENV" 1>&2
unset XPG_SUS_ENV
P_LNF=""
fi
#
# Set classpath to only include relevant jar files
#
if [ -z "${CLASSPATH}" ]; then
export CLASSPATH
fi
#
# NOTE oem_administrator.jar is NOT shipped with a standard install
# it is used when re badging the application
#
CLASSPATH=${OMNIHOME}/java/jars/oem_administrator.jar
CLASSPATH=${CLASSPATH}:${OMNIHOME}/java/jars/niduc.jar
CLASSPATH=${CLASSPATH}:${OMNIHOME}/java/jars/ControlTower.jar
CLASSPATH=${CLASSPATH}:${OMNIHOME}/java/jars/hsqldb.jar
CLASSPATH=${CLASSPATH}:${OMNIHOME}/java/jars/jms.jar
CLASSPATH=${CLASSPATH}:${OMNIHOME}/java/jars/log4j-1.2.8.jar
CLASSPATH=${CLASSPATH}:${OMNIHOME}/java/jars/jconn3.jar
#
# Convert env vars to java commandline properties
#
P_CODEBASE="-Djava.rmi.server.codebase=file://${OMNIHOME}/java/jars/ControlTower.jar"
P_SECURITY="-Djava.security.policy=file://${OMNIHOME}/etc/admin.policy"
P_OMNIHOME="-Domni.home=${OMNIHOME}"
P_NCHOME="-Dnc.home=${NCHOME}"
P_ARCHDIR="-Domni.arch.dir=${OMNIHOME}/platform/${ARCH}"
P_MEMORY="-Xms64m -Xmx512m"
#
# Start class with main entry
#
exec "${NCO_JRE_64_32}/bin/java" -classpath ${CLASSPATH} \
${P_CODEBASE} ${P_SECURITY} \
${P_NCHOME} ${P_OMNIHOME} ${P_ARCHDIR} \
${P_MEMORY} ${P_LNF} \
com.micromuse.aen.AenApplicationContext "$@"
| true |
970572073a0a3a4df5ab6f8adddad5ca4faec2a0 | Shell | petronny/aur3-mirror | /kemu/PKGBUILD | UTF-8 | 680 | 2.53125 | 3 | [] | no_license |
# Contributor: A Rojas (nqn1976 @ gmail.com)
pkgname=kemu
pkgver=0.0.4
pkgrel=2
pkgdesc="A graphical front-end for QEMU and KVM"
arch=('i686' 'x86_64')
url="http://www.kde-apps.org/content/show.php?content=116980"
license=('GPL')
depends=('qemu' 'kdebindings-korundum')
makedepends=('cmake' 'automoc4')
source=(http://rpdev.net/projects/${pkgname}/${pkgname}-${pkgver}.tar.gz)
md5sums=('2d78f1b0e0db99c087c8c16e6c391dab')
build() {
cd ${srcdir}/${pkgname}-${pkgver}/src/ui
make ui
cd "../.."
mkdir build
cd build
cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=Release ..
make
}
package() {
cd ${srcdir}/${pkgname}-${pkgver}/build
make DESTDIR="${pkgdir}" install
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.