blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
13edf378144a16fcbc787a373fec902cccd10158 | Shell | dannystaple/danny_unix_settings | /deploy.sh | UTF-8 | 321 | 2.78125 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash
#Run in the tools dir. Deploys the new settings on a target machine
#rc files
RC_FILES=.emacs .pylint
ln -s `pwd`/.emacs ~/.emacs || true
mkdir -p ~/bin
#bin tools
BIN_APPS="python_check_and_tests.sh submit_xml_as_jenkins_job.sh"
for APP in ${BIN_APPS}; do
ln -s `pwd`/${APP} ~/bin/${APP} || true
done
| true |
233434408bb73f6685a57e65feec7a4cafc727ea | Shell | mpslanker/archlinux-docker | /build | UTF-8 | 2,184 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/bin/bash -ex
DOCKER_ARCH=${BUILD_ARCH:-arm64v8}
DOCKER_ORG=mcharo
DOCKER_IMAGE=archlinux
if [[ "${DOCKER_ARCH:0:5}" = "arm32" ]]; then
DOCKER_PLATFORM="linux/arm/${DOCKER_ARCH:5:2}"
elif [[ "${DOCKER_ARCH:0:5}" = "arm64" ]]; then
DOCKER_PLATFORM="linux/arm64/${DOCKER_ARCH:5:2}"
elif [[ "${DOCKER_ARCH:0:5}" = "amd64" ]]; then
DOCKER_PLATFORM='linux/amd64'
fi
out() { printf "$1 $2\n" "${@:3}"; }
error() { out "==> ERROR:" "$@"; } >&2
die() { error "$@"; exit 1; }
# get latest Arch Linux tarball
LATEST_AMD64_URL='http://mirror.rackspace.com/archlinux/iso/latest'
LATEST_AMD64_FILENAME=$(curl -sL "${LATEST_AMD64_URL}" | grep 'tar.gz' | sed -E 's/.*href="(.*)".*/\1/g' | grep -v '.sig')
amd64="${LATEST_AMD64_URL}/${LATEST_AMD64_FILENAME}"
arm32v5='http://os.archlinuxarm.org/os/ArchLinuxARM-armv5-latest.tar.gz'
arm32v6='http://os.archlinuxarm.org/os/ArchLinuxARM-rpi-latest.tar.gz'
arm32v7='http://os.archlinuxarm.org/os/ArchLinuxARM-armv7-latest.tar.gz'
arm64v8='http://os.archlinuxarm.org/os/ArchLinuxARM-aarch64-latest.tar.gz'
LATEST_TARBALL="${!DOCKER_ARCH}"
FILENAME="${LATEST_TARBALL##*/}"
EXTRACTED_FILENAME="${FILENAME%.*}"
wget -q "${LATEST_TARBALL}"
if [[ "${DOCKER_ARCH}" = "amd64" ]]; then
wget -q "${LATEST_AMD64_URL}/md5sums.txt"
grep "${FILENAME}" md5sums.txt > "${FILENAME}.md5"
else
wget -q "${LATEST_TARBALL}.md5"
fi
# verify download is valid
md5sum -c "${FILENAME}.md5" || die "Checksum failed"
gzip -d "${FILENAME}"
if [[ "${DOCKER_ARCH}" = "amd64" ]]; then
sudo tar xf "${EXTRACTED_FILENAME}" -C /tmp/
pushd /tmp/root.x86_64
grep Worldwide etc/pacman.d/mirrorlist -A 3 | tail -n +2 | grep -v evowise | tr -d '#' | sudo tee etc/pacman.d/mirrorlist
sudo tar -c . | docker import - "${DOCKER_ORG}/${DOCKER_IMAGE}:latest-base-${DOCKER_ARCH}"
popd
else
docker import --platform=${DOCKER_PLATFORM} "${EXTRACTED_FILENAME}" "${DOCKER_ORG}/${DOCKER_IMAGE}:latest-base-${DOCKER_ARCH}"
fi
docker build --build-arg ARCH=${DOCKER_ARCH} \
-t ${DOCKER_ORG}/${DOCKER_IMAGE}:${DOCKER_ARCH} \
-f Dockerfile .
docker build --build-arg ARCH=${DOCKER_ARCH} \
-t ${DOCKER_ORG}/${DOCKER_IMAGE}:${DOCKER_ARCH}-devel \
-f Dockerfile.devel .
| true |
0b34a1b574d5a619dd352440232f9a4ba93e69db | Shell | davep-github/dpw | /bin/dp-apply-my-xemacs-patches | UTF-8 | 3,850 | 3.96875 | 4 | [] | no_license | #!/bin/bash
source script-x
progname="$(basename $0)"
source eexec
if vsetp "${eexec_program-}" # Did the caller provide a program?
then
EEXEC_SHIFT=:
else
eexec_program=$(EExec_parse "$@")
EEXEC_SHIFT=shift
fi
for op in $eexec_program
do
$op
${EEXEC_SHIFT}
done
EExec_verbose_msg $(echo_id eexec_program)
unset eexec_program
#export eexec_program
# Or export eexec_program to propagate eexec info to a called program.
# export eexec_program
: ${XEM_ORIGINALS_DIR:=../dp-patches-Originals}
[ "$1" = "-k" ] && {
shift
rm -rf "$XEM_ORIGINALS_DIR"
}
for i in "$@"
do
case "$1" in
-n) EExecDashN;;
-v) EExecVerbose;;
*) break;;
esac
shift
done
[ -z "$*" ] && {
echo 1>&2 "I need something with which to identify the current version."
exit 1
}
VERSION_ID="$1"
: ${XEM_PATCH_BASE_DIR:=$HOME/patches/xemacs}
: ${XEM_DUMPED_PATCHES:=$XEM_PATCH_BASE_DIR/dumped}
: ${XEM_SRC_PATCHES:=$XEM_PATCH_BASE_DIR/src}
: ${XEM_LISP_ORIGINALS_DIR:=$XEM_ORIGINALS_DIR/lisp}
: ${XEM_SRC_ORIGINALS_DIR:=$XEM_ORIGINALS_DIR/src}
echo "Looking for >$VERSION_ID< in >$PWD<"
case "$PWD" in
*$VERSION_ID*) ;;
*) echo 1>&2 "You don't seem to be in $VERSION_ID's dir."; exit 2;;
esac
# Patch lisp first.
# We should be some VERSION_ID'y lisp place.
echo "Looking for >*$VERSION_ID*/lisp< in >$PWD<"
case "$PWD" in
*$VERSION_ID*/lisp) ;;
*) echo 1>&2 "You don't seem to be in $VERSION_ID's lisp dir."; exit 3;;
esac
# -p lets it work if the dir exists but doesn't toss any other error checking.
EExec mkdir -p $XEM_LISP_ORIGINALS_DIR
EExec mkdir -p $XEM_SRC_ORIGINALS_DIR
#
# OK, we're in a lispy place, let's patch some lispy files.
#
#
# Count on my file naming convention, or use patch file info:
# --- buff-menu.el.orig 2005-12-23 06:40:33.000000000 -0500
# +++ buff-menu.el 2006-02-26 15:02:36.000000000 -0500
# `-- first char on line.
patch_em()
{
local originals_dir="$1"
shift
local patch_p=
# With at least one bash, using local breaks the array usage.
patch_files=("$@")
for patch_file in "${patch_files[@]}"; do
echo_id patch_file
ls $patch_file 2>/dev/null || {
echo "No matching patch files."
continue
} 1>&2
patch_file_base=$(basename $patch_file)
fname=$(head -n2 $patch_file | tail -n1 | sed -rn 's/(\+\+\+ )([^ \t]+)(.*)$/\2/p')
[ -z "$fname" ] && {
echo "Error extracting file name from diff file."
exit 1
} 1>&2
fname=$(basename $fname)
[ -z "$fname" ] && {
echo "Error extracting base file name."
exit 1
} 1>&2
echo "Attempting to patch \`$fname' with \`$patch_file'"
orig_file_name=$originals_dir/$fname
echo "Backing up \`$fname' to \`$orig_file_name'"
[ -e "$orig_file_name" ] && {
echo "Backup copy of original file \`$orig_file_name' already exists."
if diff -q "$orig_file_name" "$fname"
then
echo "It doesn't look like we're patched: backup and original are the same."
fi
read -e -p "Assume we're already patched and continue(Y/n/q)? "
case "$REPLY" in
[qQ]) exit 2;;
[nN]) patch_p=t;;
*) patch_p=;;
esac
} 1>&2
[ ! -e "$orig_file_name" -o -n "${patch_p}" ] && {
EExec cp -f $fname $orig_file_name
yes n | EExec patch < $patch_file
# Just in case the patch is now a few lines off. Files names will be
# different, so some code will be needed to diff the diffs w/o
# getting thrown off by the name diffs.
EExecDashN_p || {
local diff_file=$originals_dir/AM-I-DIFFERENT-$patch_file_base
diff -u $orig_file_name $fname > "$diff_file"
}
}
done
true
}
# Patch lisps
patch_em ${XEM_LISP_ORIGINALS_DIR} ${XEM_DUMPED_PATCHES}/*${VERSION_ID}*.diff
# srcs.
cd ../src
patch_em ${XEM_SRC_ORIGINALS_DIR} ${XEM_SRC_PATCHES}/*${VERSION_ID}*/*.diff
| true |
158db7409add3ab871e9bd416fde2daf8320ed76 | Shell | ohmyzsh/ohmyzsh | /plugins/perl/perl.plugin.zsh | UTF-8 | 1,466 | 3.6875 | 4 | [
"MIT"
] | permissive | # https://github.com/dbbolton
#
# Below are some useful Perl-related aliases/functions that I use with zsh.
# Aliases ###################################################################
# perlbrew ########
alias pbi='perlbrew install'
alias pbl='perlbrew list'
alias pbo='perlbrew off'
alias pbs='perlbrew switch'
alias pbu='perlbrew use'
# Perl ############
# perldoc`
alias pd='perldoc'
# use perl like awk/sed
alias ple='perl -wlne'
# show the latest stable release of Perl
alias latest-perl='curl -s https://www.perl.org/get.html | perl -wlne '\''if (/perl\-([\d\.]+)\.tar\.gz/) { print $1; exit;}'\'
# Functions #################################################################
# newpl - creates a basic Perl script file and opens it with $EDITOR
newpl () {
# set $EDITOR to 'vim' if it is undefined
[[ -z $EDITOR ]] && EDITOR=vim
# if the file exists, just open it
[[ -e $1 ]] && print "$1 exists; not modifying.\n" && $EDITOR $1
# if it doesn't, make it, and open it
[[ ! -e $1 ]] && print '#!/usr/bin/perl'"\n"'use strict;'"\n"'use warnings;'\
"\n\n" > $1 && $EDITOR $1
}
# pgs - Perl Global Substitution
# find pattern = 1st arg
# replace pattern = 2nd arg
# filename = 3rd arg
pgs() { # [find] [replace] [filename]
perl -i.orig -pe 's/'"$1"'/'"$2"'/g' "$3"
}
# Perl grep, because 'grep -P' is terrible. Lets you work with pipes or files.
prep() { # [pattern] [filename unless STDOUT]
perl -nle 'print if /'"$1"'/;' $2
}
| true |
9c8142f5d45a28231e9e9f52c131cfe1529cbced | Shell | lfranchi/Scripts | /gitgc.sh | UTF-8 | 446 | 4.03125 | 4 | [] | no_license | #!/bin/bash
# Garbage collect all Git repositories under the current directory.
CURRENT=$PWD
# Set to newline to loop over find output correctly on spaced paths.
IFS=$'\n'
function echorun {
echo + $*
$*
}
for SCM in $(find $CURRENT -name .git)
do
DIRECTORY=$(dirname $SCM)
cd $DIRECTORY
echo == Garbage collecting $(basename $DIRECTORY)
if [ -d .git ]
then
echorun git gc --aggressive
fi
echo
done
| true |
c723e7643de35b8cd3e3556e1907a2d45eb0b778 | Shell | domanhduy/ghichep | /DuyDM/Zabbix/scripts/install_zabbix4_c7.sh | UTF-8 | 2,621 | 2.96875 | 3 | [] | no_license | #!/bin/bash
#duydm
#Install zabbix 4.0 Centos7.7
#---------------------------
#Download repo zabbix và install package
#---------------------------
yum install epel-release wget -y
rpm -ivh https://repo.zabbix.com/zabbix/4.0/rhel/7/x86_64/zabbix-release-4.0-1.el7.noarch.rpm
yum -y install zabbix-server-mysql zabbix-web-mysql mysql mariadb-server httpd php
#---------------------------
# Create Db
#---------------------------
userMysql="root"
passMysql="duydm"
portMysql="3306"
hostMysql="localhost"
nameDbZabbix="zabbix_db"
userDbZabbix="zabbix_user"
passDbZabbix="duydm"
passAdminNew="domanhduy"
systemctl start mariadb
systemctl enable mariadb
mysql_secure_installation <<EOF
y
$passMysql
$passMysql
y
y
y
y
EOF
cat << EOF |mysql -u$userMysql -p$passMysql
DROP DATABASE IF EXISTS zabbix_db;
create database zabbix_db character set utf8 collate utf8_bin;
grant all privileges on zabbix_db.* to zabbix_user@localhost identified by '$passDbZabbix';
flush privileges;
exit
EOF
#---------------------------
#Import database zabbix
#---------------------------
cd /usr/share/doc/zabbix-server-mysql-4.0.13
gunzip create.sql.gz
mysql -u$userMysql -p$passMysql zabbix_db < create.sql
#---------------------------
#Config DB
# edit vi /etc/zabbix/zabbix_server.conf
#---------------------------
sed -i 's/# DBHost=localhost/DBHost=localhost/g' /etc/zabbix/zabbix_server.conf
sed -i "s/DBName=zabbix/DBName=$nameDbZabbix/g" /etc/zabbix/zabbix_server.conf
sed -i "s/DBUser=zabbix/DBUser=$userDbZabbix/g" /etc/zabbix/zabbix_server.conf
sed -i "s/# DBPassword=/DBPassword=$passDbZabbix/g" /etc/zabbix/zabbix_server.conf
#---------------------------
#Configure PHP Setting
#---------------------------
sed -i 's/max_execution_time = 30/max_execution_time = 600/g' /etc/php.ini
sed -i 's/max_input_time = 60/max_input_time = 600/g' /etc/php.ini
sed -i 's/memory_limit = 128M/memory_limit = 256M/g' /etc/php.ini
sed -i 's/post_max_size = 8M/post_max_size = 32M/g' /etc/php.ini
sed -i 's/upload_max_filesize = 2M/upload_max_filesize = 16M/g' /etc/php.ini
echo "date.timezone = Asia/Ho_Chi_Minh" >> /etc/php.ini
#---------------------------
#Change pass Admin zabbix
#---------------------------
cat << EOF |mysql -u$userMysql -p$passMysql
use zabbix_db;
update users set passwd=md5('$passAdminNew') where alias='Admin';
flush privileges;
exit
EOF
systemctl restart mariadb
#---------------------------
#Restart service
#---------------------------
systemctl start zabbix-server
systemctl enable zabbix-server
systemctl start httpd
systemctl enable httpd
systemctl restart zabbix-server
systemctl restart httpd
| true |
e3d33cb0bc2dabb318bade6d01268a84ab0ba208 | Shell | ssapra/bpm-release | /ci/scripts/bump-submodules | UTF-8 | 1,051 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -eu
MESSAGE_PATH="$PWD/bump-submodules-message/message"
IN_REPO_PATH="$PWD/repo"
OUT_REPO_PATH="$PWD/bumped-repo"
git clone "$IN_REPO_PATH" "$OUT_REPO_PATH"
cd "$OUT_REPO_PATH"
# Disable updates as runtime-spec is updated at a different cadence to runc
git config submodule.src/github.com/opencontainers/runtime-spec.update none
git submodule update --init --remote
go get github.com/vito/gosub
export GOPATH="$PWD"
./scripts/sync-package-specs
./scripts/sync-submodule-config
git clean -ffd
git add -A
STATUS=$(git status --porcelain)
echo "Re-enabling disabled submodules for update and updating them"
# Re-enable updates to runtime-spec for downstream tasks
git config --unset submodule.src/github.com/opencontainers/runtime-spec.update
git submodule update --init
if [ "$STATUS" == "" ]; then
exit 0
fi
git config --global user.name "Submodule Bumper"
git config --global user.email "cf-bpm+submodule-bumper@pivotal.io"
./scripts/submodule-log | git commit --file -
cat > "$MESSAGE_PATH" << EOF
Submodules were updated:
$STATUS
EOF
| true |
9f2d4b88f1f586a511f9791b0cb7774a7d6d007b | Shell | macropin/docker-roundcube | /entry.sh | UTF-8 | 1,418 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
[ "$DEBUG" == 'true' ] && set -x
genpasswd() {
export LC_CTYPE=C # Quiet tr warnings
local l=$1
[ "$l" == "" ] && l=16
cat /dev/urandom | tr -dc A-Za-z0-9_ | head -c ${l}
}
export DATABASE_HOST=${DATABASE_HOST:-$MARIADB_PORT_3306_TCP_ADDR}
export DATABASE_NAME=${DATABASE_NAME:-roundcube}
export DATABASE_USER=${DATABASE_USER:-roundcube}
# MySQL Service
export DB_DSNW="mysql://${DATABASE_USER}:${DATABASE_PASS}@${DATABASE_HOST}/${DATABASE_NAME}"
# IMAP Service
export DEFAULT_HOST="${DEFAULT_HOST-ssl://${MAILSERVER_PORT_993_TCP_ADDR}:$MAILSERVER_PORT_993_TCP_PORT}"
export DEFAULT_PORT="${DEFAULT_PORT:-$MAILSERVER_PORT_993_TCP_PORT}"
# SMTP Service
export SMTP_SERVER="${SMTP_SERVER:-ssl://${MAILSERVER_PORT_465_TCP_ADDR}:$MAILSERVER_PORT_465_TCP_PORT}"
export SMTP_PORT="${SMTP_PORT:-$MAILSERVER_PORT_465_TCP_PORT}"
export DES_KEY=$(genpasswd 24)
if [ "$SSL_ENABLED" == "true" ]; then
a2enmod ssl
export SSL_CRT=${SSL_CRT:-/etc/ssl/certs/ssl-cert-snakeoil.pem}
export SSL_KEY=${SSL_KEY:-/etc/ssl/private/ssl-cert-snakeoil.key}
export SSL_CA=${SSL_CA:-/etc/ssl/certs/ca-certificates.crt}
fi
# Apache MPM Tuning
export MPM_START=${MPM_START:-5}
export MPM_MINSPARE=${MPM_MINSPARE:-5}
export MPM_MAXSPARE=${MPM_MAXSPARE:-10}
export MPM_MAXWORKERS=${MPM_MAXWORKERS:-150}
export MPM_MAXCONNECTIONS=${MPM_CONNECTIONS:-0}
php /bootstrap.php
exec $@
| true |
cce5e0f8b26c5eec524d728aaeedec2320948197 | Shell | mlogue76/my-prep | /bin/tbgw_bak | UTF-8 | 658 | 2.921875 | 3 | [] | no_license | #! /bin/sh
prog= basename $0
if [ -z "$TB_MSGW_HOME" ];
then
echo '$TB_MSGW_HOME not defined.\n'
exit 1
fi
#Classpath
commonspool=$TB_MSGW_HOME/lib/commons-pool.jar
commonscollections=$TB_MSGW_HOME/lib/commons-collections.jar
log4j=$TB_MSGW_HOME/lib/log4j-1.2.15.jar
msjdbc=$TB_MSGW_HOME/lib/sqljdbc.jar
gateway=$TB_MSGW_HOME/lib/tbgateway.jar
#log4j properties
#log4jfile must be exists on classpath
log4jfile=log4j.properties
#Main Class
mainclass=com.tmax.tibero.gateway.main.GatewayMain
java -Xms64m -Xmx512m -Dlog4j.configuration=$log4jfile -classpath $commonspool:$commonscollections:$log4j:$gateway:$msjdbc:$TB_MSGW_HOME $mainclass $* &
| true |
60f13b9b07f130455e421e028003077cb86ec264 | Shell | dipietro-salvatore/scripts | /os/hyperthreading/hyperthread_off.sh | UTF-8 | 752 | 3.4375 | 3 | [] | no_license | PATHCPU="/sys/devices/system/cpu"
TURNOFF=""
bash hyperthread_on.sh
for CPU in $PATHCPU/cpu[0-9]*; do
CPUID=$(basename $CPU)
CORES="$(cat ${CPU}/topology/thread_siblings_list 2> /dev/null)"
CORE="$(echo $CORES | cut -d',' -f2)"
if [ "$(echo $CORES | tr ',' '\n' | wc -l)" -gt "1" ]; then
echo "CPU: $CPUID CORES: $CORES Turn Off: $CORE";
TURNOFF="${TURNOFF},${CORE}"
fi
done
for CORE in $(echo $TURNOFF | tr ',' '\n'); do
ONLINE="${PATHCPU}/cpu${CORE}/online"
if [ "$( cat $ONLINE )" -eq "1" ] ; then
echo "core=${CORE} -> disable";
echo "0" > $ONLINE;
fi;
done;
bash hyperthread_view.sh
| true |
d2518c017fcbe46e5d425889f977822bab355542 | Shell | sungwookson/RescueCamera | /install.sh | UTF-8 | 1,069 | 3.5625 | 4 | [] | no_license | #!/bin/bash
if [ `id -u` != "0" ]; then
echo "Run it again with root"
echo "sudo ./install.sh"
exit 1
fi
#Enable SSH
sudo systemctl enable ssh
sudo systemctl start ssh
#Updating apt-get to the latest
sudo apt-get update
sudo apt-get upgrade
#Install necessary components
sudo apt-get install -y fswebcam motion rpi.gpio
sudo apt-get install -y dnsmasq hostapd
sudo pip install flask
#Check if properly installed
function check_installed() {
if [ $(dpkg-query -W -f='${Status}' $1 2>/dev/null | grep -c "ok installed") -eq 0 ];
then
echo "Package $1 has not been installed please re-run the script";
exit 1
fi
}
check_installed fswebcam
check_installed motion
check_installed dnsmasq
check_installed hostapd
#Get Motion working
sudo cp config/motion.conf /etc/motion/motion.conf
sudo cp config/motion /etc/default/motion
sudo service motion restart
#Get Flask API Working
sudo cp config/rescuecam.service /etc/systemd/system/rescuecam.service
mkdir ~/rescueflask
cp -r app ~/rescueflask/
sudo systemctl enable rescuecam
#sudo ./startAP.sh | true |
dfa0d22f0036d614984911952baf39d4a3d8e58d | Shell | theypsilon/shell-scripts | /ds.sh | UTF-8 | 592 | 3.875 | 4 | [] | no_license | #!/bin/sh
# Detached shell, an improved 'command &'
# Usage: ds [commands]
readonly LOG_FILE="$HOME/.ds.log"
# Invocation with no arguments
if [ $# -eq 0 ]; then
if [ -f $LOG_FILE ]; then
cat $LOG_FILE
echo; echo "# Log stored in $LOG_FILE"
else
echo "No arguments supplied"
fi
exit
fi
# Redirect stderr and stdout
if test -t 1; then exec 1>>$LOG_FILE; fi
if test -t 2; then exec 2>>$LOG_FILE; fi
# Log makeup
if [ -f $LOG_FILE ]; then echo; fi
# For date command
LANG=en_US
# Logging input
echo "[$(date) | $PWD ]>\$ $@"
# Logging output
"$@" & | true |
9a9e09d2abd498f58abde114bd68bed337650e4d | Shell | Ninir/CentralReport | /install.sh | UTF-8 | 3,916 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# ------------------------------------------------------------
# CentralReport Unix/Linux bash installer
# Alpha version. Don't use in production environment!
# ------------------------------------------------------------
# https://github.com/miniche/CentralReport/
# ------------------------------------------------------------
# Importing scripts...
source bash/vars.sh
source bash/log.inc.sh
source bash/functions.inc.sh
source bash/macos.inc.sh
source bash/debian.inc.sh
# Modes: only "install" yet ("check" mode will be added soon)
ACTUAL_MODE=install
# We are ready to uninstall CentralReport. Log this and print the header.
logFile "-------------- Starting CentralReport installer --------------"
logConsole "\033[44m\033[1;37m"
logConsole " -------------- CentralReport installer --------------"
logConsole "\033[0;44m"
logConsole " Welcome! This script will install CentralReport on your host."
logConsole " If you want more details, please visit http://github.com/miniche/CentralReport."
logConsole " "
logConsole " When installing CentralReport, we may ask for your password. It will allow CentralReport to write files and directories such as the project binaries, logs, etc."
logConsole "\033[0m"
# In the future, it will be possible to have different modes.
if [ -n "$1" ]; then
ACTUAL_MODE=$1
fi
# Python is mandatory for CentralReport
getPythonIsInstalled
if [ $? -ne 0 ]; then
logError "Error! Python must be installed on your host to execute CentralReport."
exit 1
fi
# Getting current OS to check if uninstall will works for this host
getOS
# Check the actual mode.
if [ "install" == ${ACTUAL_MODE} ]; then
# Right now, it only works on MacOS.
# Support for Linux distrib coming soon.
if [ ${CURRENT_OS} != ${OS_MAC} ] && [ ${CURRENT_OS} != ${OS_DEBIAN} ]; then
logError " "
logError "ERROR"
logError "The install is only designed for Mac OS, Debian and Ubuntu."
logError "Support for other OS will come soon!"
else
logConsole " "
logConsole "Install mode enabled"
read -p "You will install CentralReport. Are you sure you want to continue (y/N): " RESP < /dev/tty
# Are you sure to install CR ?
checkYesNoAnswer ${RESP}
if [ $? -eq 0 ]; then
# It's an indev version. At each install, we delete everything.
# O=no error / 1=one or more errors
bit_error=0
if [ ${CURRENT_OS} == ${OS_MAC} ]; then
logInfo "Processing... CentralReport will be installed on this Mac."
macos_install
if [ $? -ne 0 ]; then
bit_error=1
fi
elif [ ${CURRENT_OS} == ${OS_DEBIAN} ]; then
logInfo "Processing... CentralReport will be installed on this Linux."
debian_install
if [ $? -ne 0 ]; then
bit_error=1
fi
fi
if [ ${bit_error} -eq 1 ]; then
logError "Something went wrong when installing CentralReport!"
logError "CentralReport isn't installed on this host."
else
# Displays the success text!
logConsole "\033[1;32m"
logConsole " "
logInfo "CentralReport is now installed!"
logInfo "For more options, you can edit the config file at /etc/centralreport.cfg"
logConsole " "
logInfo "More help at http://github.com/miniche/CentralReport"
logInfo "Have fun!"
logConsole " "
logConsole "\033[0m"
fi
fi
fi
else
logError " "
logError "ERROR!"
logError "Unknown argument"
logError "Use: install.sh [install]"
fi
# End of program
logConsole " "
logInfo " -- End of the program -- "
| true |
5c49155484074d34146cf2d0cf98ebc7398c2598 | Shell | ngara/arduino-temps | /graphite-client-initd-script | UTF-8 | 887 | 3.59375 | 4 | [] | no_license | #! /bin/sh
# /etc/init.d/graphite-client
# Some things that run always
touch /var/lock/graphite-client
GRAPHITE_HOME=/opt/graphite
CARBON_USER=www-data
# Carry out specific functions when asked to by the system
case "$1" in
start)
echo "Starting script graphite-client "
su $CARBON_USER -c "cd $GRAPHITE_HOME"; su $CARBON_USER -c "$GRAPHITE_HOME/bin/graphite-client.py start" ;;
stop)
echo "Stopping script graphite-client"
su $CARBON_USER -c "cd $GRAPHITE_HOME"; su $CARBON_USER -c "$GRAPHITE_HOME/bin/graphite-client.py stop"
;;
restart)
echo "Restarting script graphite-client"
su $CARBON_USER -c "cd $GRAPHITE_HOME"; su $CARBON_USER -c "$GRAPHITE_HOME/bin/graphite-client.py restart"
;;
*)
echo "Usage: /etc/init.d/graphite-client {start|stop|restart}"
exit 1
;;
esac
exit 0
| true |
c37a55482a8332afe275623aef2039f97030888f | Shell | chimay/configuration | /w3m/cgi-bin/fzf_surfraw.cgi | UTF-8 | 1,959 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env bash
### _ _ _ _
### __ _ ___ | |_| |__ | | ___| |_ _ _
### / _` |/ _ \| __| '_ \| |/ _ \ __| | | |
###| (_| | (_) | |_| |_) | | __/ |_| |_| |
### \__, |\___/ \__|_.__/|_|\___|\__|\__,_|
### |___/
### https://www.youtube.com/user/gotbletu
### https://lbry.tv/@gotbletu
### https://twitter.com/gotbletu
### https://github.com/gotbletu
### gotbletu@gmail.com
###
### Author : gotbletu
### Name : fzf_surfraw.cgi
### Version : 0.2
### Date : 2020-04-27
### Description : interactive surfraw smart prefix search engine (mainly use within w3m web browser)
### Depends On : surfraw fzf xsel gawk coreutils grep procps-ng
### Video Demo : https://youtu.be/p5NZb8f8AHA
### References : https://github.com/felipesaa/A-vim-like-firefox-like-configuration-for-w3m
### Setup
# vim ~/.w3m/keymap
# keymap xs COMMAND "SHELL ~/.w3m/cgi-bin/fzf_surfraw.cgi ; GOTO /usr/lib/w3m/cgi-bin/goto_clipboard_primary.cgi"
# keymap XS COMMAND "SHELL ~/.w3m/cgi-bin/fzf_surfraw.cgi ; TAB_GOTO /usr/lib/w3m/cgi-bin/goto_clipboard_primary.cgi"
# keymap xs COMMAND "SHELL ~/.w3m/cgi-bin/fzf_surfraw.cgi ; GOTO /usr/lib/w3m/cgi-bin/goto_clipboard.cgi"
# keymap XS COMMAND "SHELL ~/.w3m/cgi-bin/fzf_surfraw.cgi ; TAB_GOTO /usr/lib/w3m/cgi-bin/goto_clipboard.cgi"
clear
# select your elvi
PREFIX=$(surfraw -elvi | grep -v 'LOCAL\|GLOBAL'| fzf -e | awk '{print $1}')
# exit script if no elvi is selected (e.g hit ESC)
if [ "$PREFIX" = "" ]; then exit; fi
# get user input
read -r -e -p " $PREFIX >> Enter Your Search Keyword: " INPUT
# print proper url and copy to primary clipboard (aka highlighted clipboard) and tmux clipboard
surfraw -p "$PREFIX" "$INPUT" | xsel -p
# pidof tmux >/dev/null && tmux set-buffer "$(surfraw -p "$PREFIX" "$INPUT")"
| true |
e06948ea68ccc1b3fb1b6442881c434228d29d1e | Shell | dstern/TagMap | /pBac_tagmap.II.sh | UTF-8 | 5,283 | 3.734375 | 4 | [] | no_license | #!/bin/bash
# tagmap shell script
# author: David L. Stern
# Janelia Research Campus
# HHMI
# Ashburn, VA
# 27 February 2016
# v. 0.1
#
# Filters and maps reads, finds overlapping forward and reverse reads, plots overlap regions at low and high resolution
# Operates on a folder full of *fastq.gz files generated by the TagMap molecular protocol
# Currently uses bwa-mem for short-read mapping. This works great for most transposons, which duplicate the target site.
#
# Variables:
#
# To detect the orientation of the transposable element or duplicated sequence, provide the first N bp from the 5' of the sequence
#
# dependencies available to system:
# prinseq-lite-0.20.4 (http://prinseq.sourceforge.net)
# Make prinseq-lite executable
# Navigate to folder and type
# chmod 755 prinseq-lite.pl
# bwa 0.7.12-r1039 (http://bio-bwa.sourceforge.net/bwa.shtml)
# samtools 1.3 (http://samtools.sourceforge.net)
# gnuplot 5.0 patchlevel 1 (http://www.gnuplot.info)
# run as ./tagmap <mapping_genome>
mapping_genome=$1
##########################################################
#
# USER SUPPLIED 5' and 3' sequences
#
fiveprimeseq=TTAACCCTAGAAAGATAGTCTGCGTAAAATTGACGCATG #TTAA is duplicated pBac recognition sequence
threeprimeseq=TTAACCCTAGAAAGATAATCATATTGTGACGTACGTTAA
#
#
#
##########################################################
rc_fiveprimeseq=`echo $fiveprimeseq | tr ACGT TGCA | rev`
rc_threeprimeseq=`echo $threeprimeseq | tr ACGT TGCA | rev`
#prepare bwa index of relevant genome
if [ -e ${mapping_genome}.sa ]; then
echo bwa index already built.
else
echo building bwa index.
bwa index $mapping_genome
fi
#unzip all fastq.gz files
gunzip *fastq.gz
for filename in *.fastq; do
#remove PCR duplicates
#make prinseq-lite executable and put in executable
#prinseq automatically appends a fast suffix to output
prinseq-lite.pl -derep 12 -fastq ${filename} -out_good ${filename}.good -out_bad null
#map reads to genome, convert to bam, and sort
#Program: bwa (alignment via Burrows-Wheeler transformation)
#Version: 0.7.12-r1039
bwa mem $mapping_genome ${filename}.good.fastq | samtools view -b /dev/stdin | samtools sort /dev/stdin > ${filename}.good.fastq.sorted.bam
#make bai file
samtools index ${filename}.good.fastq.sorted.bam
# find locations with pBac sequences
samtools view -F 4 ${filename}.good.fastq.sorted.bam | egrep -i ${fiveprimeseq}\|${rc_threeprimeseq}\|${threeprimeseq}\|${rc_fiveprimeseq} > ${filename}.TESites.sam
samtools view -F 4 ${filename}.good.fastq.sorted.bam | egrep -i ${fiveprimeseq}\|${rc_threeprimeseq} > ${filename}.fwdSites.sam
samtools view -F 4 ${filename}.good.fastq.sorted.bam | egrep -i ${threeprimeseq}\|${rc_fiveprimeseq} > ${filename}.revSites.sam
#grab unique positions (chrom & bp)
cut -f 3-4 ${filename}.TESites.sam | uniq > ${filename}.HitSites.txt
#reduce to unique sites within 300bp using Python
python thin.py ${filename}.HitSites.txt
#grab positions (result: chrom, bp, count, orientation)
cut -f 2-4 ${filename}.fwdSites.sam | uniq -c|tr ' ' '\t'|awk '{ print $3 "\t" $4 "\t" $1 "\t" $2} ' > ${filename}.fwdHitSites.txt
cut -f 2-4 ${filename}.revSites.sam | uniq -c|tr ' ' '\t'|awk '{ print $3 "\t" $4 "\t" $1 "\t" $2} ' > ${filename}.revHitSites.txt
# python thin.py ${filename}.fwdHitSites.txt
# python thin.py ${filename}.revHitSites.txt
#Produce two files, with depth for reads in opposite orientation
samtools view -F 0x10 ${filename}.good.fastq.sorted.bam -b| samtools depth /dev/stdin > ${filename}.good.fastq.sorted.fwd.depth
samtools view -f 0x10 ${filename}.good.fastq.sorted.bam -b| samtools depth /dev/stdin > ${filename}.good.fastq.sorted.rev.depth
#print out 2kb flanking around each candidate site using both fwd and rev files
#for each candidate site, grab chromosome and position
old_chrom='null'
while read chrom bp #grab chromosome and bp position
do
if [ $chrom = $old_chrom ]; then
:
else
#grab position and depth from selected chromosome
grep ^$chrom ${filename}.good.fastq.sorted.fwd.depth | cut -f 2-3 > ${filename}.plot_forward
grep ^$chrom ${filename}.good.fastq.sorted.rev.depth | cut -f 2-3 > ${filename}.plot_reverse
grep ^$chrom ${filename}.fwdHitSites.txt | cut -f 2-3 > ${filename}.fwdHits.txt
grep ^$chrom ${filename}.revHitSites.txt | cut -f 2-3 > ${filename}.revHits.txt
old_chrom=$chrom
fi
#plot tview
let start=bp-10
samtools tview -d T -p $chrom:$start ${filename}.good.fastq.sorted.bam > ${filename}.$chrom.$bp.tview.txt
#grab 1kb up and downstream of site from for.depth and rev.depth files
let start=bp-1000
let stop=bp+1001
gnuplot -p -e "set terminal postscript color ;\
set xrange [$start:$stop];\
unset key;\
plot '${filename}.plot_forward' with dots lw 5 linecolor rgb 'red','${filename}.plot_reverse' with dots lw 5 linecolor rgb 'blue',\
'${filename}.fwdHits.txt' with points pointtype 1 linecolor rgb '#F08080',\
'${filename}.revHits.txt' with points pointtype 1 linecolor rgb '#00CED1'"> ${filename}.$chrom.$bp.gnuplot.ps
done < ${filename}.HitSites.txt.candidate_sites
# Comment out following line for troubleshooting
rm ${filename}.good* ${filename}.*HitSites.* ${filename}.*.sam ${filename}.plot* ${filename}.*Hits.txt
done
| true |
422d7db0b34f451c607685cdb5f1dd25b57a8bfc | Shell | Yang-33/SpatialSkylineQueries-on-TIN | /scripts/measure-multi-rand-test.sh | UTF-8 | 1,195 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -eu
# Compile
./scripts/build-release.sh
seedcount=$1
# P
input=(100 200 300 400)
arg="-Psize"
for val in ${input[@]}; do
for seed in $(seq 0 $(($seedcount - 1))); do
./out/release/tin-skyline -testmemods ${arg}=${val} -randseed=${seed} -qpossquare=true -forcepointupdate -reorder=2 #-onlyfast
done
done
# Q
input=(5 10 15 20)
arg="-Qsize"
for val in ${input[@]}; do
for seed in $(seq 0 $(($seedcount - 1))); do
./out/release/tin-skyline -testmemods ${arg}=${val} -randseed=${seed} -qpossquare=true -forcepointupdate -reorder=2 #-onlyfast
done
done
# MBR Q
input=(1 2 3 4)
arg="-QMBRPercentage"
for val in ${input[@]}; do
for seed in $(seq 0 $(($seedcount - 1))); do
./out/release/tin-skyline -testmemods ${arg}=${val} -randseed=${seed} -qpossquare=true -forcepointupdate -reorder=2 #-onlyfast
done
done
# TIN
input=("./data/q3-0001000.off" "./data/q3-0002000.off" "./data/q3-0003000.off" "./data/q3-0004000.off")
arg="-tinpath"
for val in ${input[@]}; do
for seed in $(seq 0 $(($seedcount - 1))); do
./out/release/tin-skyline -testmemods ${arg}=${val} -randseed=${seed} -qpossquare=true -forcepointupdate -reorder=2 #-onlyfast
done
done
| true |
8b343b6c83d211a3b418427ac8e4d4909de47ac2 | Shell | gtauro/Biocomp-Fall2018-180928-Exercise5 | /e2.sh | UTF-8 | 812 | 3.765625 | 4 | [] | no_license | # usage bash e2.sh wages.csv
# To be used to return gender (-f 1), yearsExperience (-f 2), and wage (-f 4) for top and bottom earners by wage
# Also returns number of "females" in top 10 earners by wage
list=$(cat "$1" | grep -v "gender" | cut -d , -f 1,2,4 | sort -nr -t , -k 3)
# Outputs top earner
echo "The top earner had the following gender, years of experience, and wage:"
echo "$list" | head -n 1
# Outputs bottom earner
echo -e "\nThe bottom earner had the following gender, years of experience, and wage:"
echo "$list" | tail -n 1
# Lists the number of females in the top 10 earners
f=$(echo "$list" | head -n 10 | grep "female" | wc -l)
if [ $f -eq 1 ]
then
echo -e "\nThere was $f female in the top 10 earners by wage."
else
echo -e "\nThere were $f females in the top 10 earners by wage."
fi
| true |
1156342d30d751869bd8e5b4d87c8e75a436e439 | Shell | begrif/homeconfig | /shell-tools/toggle-elan-touchpad | UTF-8 | 1,041 | 3.953125 | 4 | [] | no_license | #!/bin/sh
# A toggle tool for the touchpad, intended to be run via keyboard
# shortcut. Generally I prefer the mouse, and keep the touchpad
# disabled, but sometimes the mouse isn't handy.
device='ELAN1201:00 04F3:3054 Touchpad'
property='Device Enabled'
#
getstate() {
verbosestate=$(xinput --list-props "$device" | grep "$property")
case "X${verbosestate}X" in
XX) state="no-answer" ;;
# something(*) colon tab (': ') zero/one (0 1) end of line (X)
# *': '1X) state="1" ;;
# *': '0X) state="0" ;;
*) # try to parse out to be more general; note explicit tab in there
state="${verbosestate##* }"
;;
esac
echo "$state"
}
# takes a value and sets it; "Device Enabled" accepts 0 or 1, but other
# properties have other rules
setstate() {
xinput --set-prop "$device" "$property" "$1"
}
touchpadstate=$(getstate)
if [ "$touchpadstate" = 1 ] ; then
setstate 0
exit
fi
if [ "$touchpadstate" = 0 ] ; then
setstate 1
exit
fi
echo "$device in unexpected state: $touchpadstate"
exit 1
| true |
de420917ce9c00fcafc84314c7c4e5deccee83d7 | Shell | redorca/home-env | /bin/stage-stat | UTF-8 | 1,245 | 4.09375 | 4 | [] | no_license | #!/bin/bash
#
# Stage all modified files tracked but not yet
# in the index based on a git status.
#
#
# Generate a git status --porcelain output and filter for
# M,A,R,C and D files.
#
stat_files()
{
local ADDED=
local DELETED=
CMD="git status --porcelain=v1"
eval $1=\"$($CMD | sed -n -e '/^[ ][MARC]/p' | awk '{print $2}')\"
eval $2=\"$($CMD | sed -n -e '/^[ ]D/p' | awk '{print $2}')\"
}
git_action()
{
local ACTION=
local Token=
local CMD=
ACTION=${1##FILES_TO_}
CMD='git ${ACTION,,} $Token'
for Token in ${!1} ; do
eval echo $CMD
eval $CMD
done
}
REPO_ROOT=$(git rev-parse --show-toplevel)
#
# Variables holding files to process. The variable name
# contians the action to take on the set of files. E.G.
# git ACT <files> <==> FILES_TO_{ACT} so git add <files>
# uses the variable FILES_TO_ADD.
#
FILES_TO_ADD=
FILES_TO_RM=
if stat_files FILES_TO_ADD FILES_TO_RM ; then
for setlist in FILES_TO_ADD FILES_TO_RM ; do
[ -z "${!setlist}" ] && continue
git_action $setlist
done
fi
echo "FILES_TO_ADD: ($FILES_TO_ADD)"
echo "FILES_TO_RM: ($FILES_TO_RM)"
| true |
53a09246b569043a40c25ea06c903217d8ead599 | Shell | FlyingWombat/MSYS2-packages | /python3-more-itertools/PKGBUILD | UTF-8 | 1,192 | 2.78125 | 3 | [
"BSD-3-Clause"
] | permissive | # Maintainer: J. Peter Mugaas <jpmugaas@suddenlink.net>
_realname=more-itertools
pkgbase=python3-${_realname}
pkgname=("python3-${_realname}")
pkgver=7.2.0
pkgrel=1
pkgdesc='More routines for operating on iterables, beyond itertools'
arch=('any')
url='https://github.com/erikrose/more-itertools'
#url='https://pypi.python.org/pypi/more-itertools'
license=('MIT')
makedepends=('python3-setuptools')
depends=('python3')
source=("https://files.pythonhosted.org/packages/source/m/more-itertools/more-itertools-$pkgver.tar.gz")
sha512sums=('840b535bf5f2fc3cf9c4c0106f977f0b178049b95e5ccb6cf51b5e68d0a6afd77a577bb0d0af25ea8cdf4b7dd2ce9691754ba6c773a196f8b10dba5d7683c6b0')
prepare() {
cd "${srcdir}"
local builddir="python3-build-${CARCH}"
rm -rf ${builddir} | true
cp -r "${_realname}-${pkgver}" "${builddir}"
# Set version for setuptools_scm
export SETUPTOOLS_SCM_PRETEND_VERSION=$pkgver
}
build() {
msg "Python 3 build for ${CARCH}"
cd "${srcdir}/python3-build-${CARCH}"
python${pver} setup.py build
}
package() {
cd "${srcdir}/python3-build-${CARCH}"
python3 setup.py install --root="$pkgdir/" --optimize=0 --skip-build
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true |
ecf22fa0ad1e71bd645dd4941c47dbc85c4337dd | Shell | maxfii/dots | /bash/.bashrc | UTF-8 | 2,253 | 2.703125 | 3 | [] | no_license | alias "c=xclip"
alias "v=xclip -o"
alias vim=nvim
alias vi=nvim
alias v=nvim
alias rg='rg -u'
alias ra=ranger
alias ll='ls -l'
alias la='ls -la'
alias ..='cd ..'
alias gl='git log --oneline'
alias ga='git add'
alias gaa='git add --all'
alias gc='git commit'
alias gcc='git commit -m'
alias gs='git status'
alias gw='git switch'
alias gd='git diff'
alias gb='git branch'
alias gr='git rebase'
alias gp='git push'
alias grs='git rebase -i --autosquosh'
alias as='apt search'
alias ai='sudo apt install -y'
alias td='tmux new-session -s $(basename $(pwd))'
# go to fzf-ed directory and start a tmux session there
alias ct='cd $(find . -type d -print | fzf) && tmux new-session -A -s $(pwd)'
complete -cf sudo
# Bash won't get SIGWINCH if another process is in the foreground.
# Enable checkwinsize so that bash will check the terminal size when
# it regains control. #65623
shopt -s checkwinsize
shopt -s expand_aliases
# Enable history appending instead of overwriting. #139609
shopt -s histappend
PROMPT_COMMAND=__prompt_command # Function to generate PS1 after CMDs
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}
__prompt_command() {
local EXIT="$?" # This needs to be first
PS1=""
local RCol='\[\e[0m\]'
local Red='\[\e[0;31m\]'
local Gre='\[\e[0;32m\]'
local BYel='\[\e[1;33m\]'
local BBlu='\[\e[1;34m\]'
local Pur='\[\e[0;35m\]'
if [ $EXIT != 0 ]; then
PS1+="${Red}\u${RCol}" # Add red if exit code non 0
else
PS1+="${Gre}\u${RCol}"
fi
PS1=" \u${RCol}@${BBlu}\h \[\e[32m\]\w on \[\e[91m\]\$(parse_git_branch)\[\e[00m\]\n ~> "
}
export DOCKER_BUILDKIT=1
export PATH=$PATH:/home/janitor/.gem/ruby/2.6.0/bin:~/.dotnet/tools:~/.local/bin:/usr/local/go/bin
export VISUAL="/usr/bin/env nvim"
export EDITOR="$VISUAL"
export LANGUAGE="es_ES.UTF-8"
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
[ -f "/home/m/.ghcup/env" ] && source "/home/m/.ghcup/env" # ghcup-env
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
eval `ssh-agent` > /dev/null
. "$HOME/.cargo/env"
| true |
e343d962926e501ae4696582b1b08430e78dfbba | Shell | hak5/sharkjack-payloads | /payloads/library/recon/netdiscover/netdiscover-passive-payload/payload.sh | UTF-8 | 2,002 | 3.53125 | 4 | [] | no_license | #!/bin/bash
#
# Title: Passive netdiscover Payload for Shark Jack V 1.1.0
# Author: Charles BLANC ROLIN
# Version: 1.0
#
# Broadcast ARP with netdiscover using specified options. Saves each scan result
# to loot storage folder.
#
# Packages needed : coreutils, librt, timeout, libnet, netdiscover
#
# Packages downloads :
#
# https://downloads.openwrt.org/releases/18.06.5/packages/mipsel_24kc/packages/coreutils_8.23-4_mipsel_24kc.ipk
# https://downloads.openwrt.org/releases/18.06.5/targets/ramips/mt76x8/packages/librt_1.1.19-2_mipsel_24kc.ipk
# https://downloads.openwrt.org/releases/18.06.5/packages/mipsel_24kc/packages/coreutils-timeout_8.23-4_mipsel_24kc.ipk
# https://downloads.openwrt.org/releases/18.06.5/packages/mipsel_24kc/packages/libnet-1.2.x_1.2-rc3-4_mipsel_24kc.ipk
# https://downloads.openwrt.org/releases/18.06.5/packages/mipsel_24kc/packages/netdiscover_0.3-pre-beta7-1_mipsel_24kc.ipk
#
# To verify sha256 hashes :
# https://downloads.openwrt.org/releases/18.06.5/packages/mipsel_24kc/packages/Packages
# https://downloads.openwrt.org/releases/18.06.5/targets/ramips/mt76x8/packages/Packages
#
# Red ...........Setup
# Amber..........Scanning
# Green..........Finished
#
# See netdiscover -h for options. Default "-p -P -N" fast scan with infos only.
# Configure interface eth0 in passive mode
NETMODE TRANSPARENT
NETDISCOVER_OPTIONS="-p -P -N"
LOOT_DIR=/root/loot/netdiscover
# You can define passive network listening duration. 5 minutes by default.
TIME="300s"
# Setup
LED SETUP
# Create loot directory
mkdir -p $LOOT_DIR &> /dev/null
function finish() {
LED CLEANUP
# Grep netdiscover process and kill
if ps | grep -q "[n]etdiscover"; then
ps | grep netdiscover | grep -v grep | awk '{print $2}' | xargs kill -9
finish
else
LED FINISH
sleep 1
# Halt system
halt
fi
}
function run() {
LED ATTACK
# Start scan
timeout $TIME netdiscover $NETDISCOVER_OPTIONS > $LOOT_DIR/netdiscover-scan.txt
finish
}
# Run payload
run &
| true |
1f74bc533efe42126238a4405ee42e8bc0d64ec3 | Shell | bedrisendir/viz_workload | /scripts/start-sys-summary.sh | UTF-8 | 884 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Starts the dstat monitor on the local host
# Measures system cpu, memory, io and network activity
[ $# -ne 2 ] && echo USAGE: $0 TARGET_FN DELAY_SEC && exit 1
TARGET_FN=$1
DELAY_SEC=$2
# Check if dstat is installed
A=`which dstat`
[ "$?" -ne 0 ] && echo ERROR: dstat not found on $HOSTNAME. Exiting... && exit 1
STOP_FN=/tmp/${USER}/viz_workload/stop-sys-summary
rm -f $STOP_FN
# Check if a copy of this script is already running
NUM=`ps -efa | grep $0 | grep -v "vim\|grep\|ssh" | wc -l`
[ $NUM -gt 2 ] && echo WARNING: $0 appears to be running on $HOSTNAME
# Start dstat
DIRNAME=`dirname $TARGET_FN`
mkdir -p $DIRNAME
rm -f $TARGET_FN
dstat --time -v --net --output $TARGET_FN $DELAY_SEC 1>/dev/null &
PID=$!
trap "kill $PID; exit 1" SIGTERM SIGINT # Kill PID on CTRL-C
# Kill on semaphore
while [ ! -e $STOP_FN ]; do
sleep 1
done
kill $PID
rm -f $STOP_FN
| true |
7e0fa182b206571a27359815916540611ece37c3 | Shell | resace3/Bash-Scripts | /1.char_slashquotenull.sh | UTF-8 | 4,009 | 4.09375 | 4 | [] | no_license |
#!/bin/bash
#-----------------------------------------------------------------------------------------------------------
# Slash character = /
# Slash character represents Root Directory in Linux Systems.
# It's also used for writing file path names. Eg: /usr/include/stdio.h
# It is Used for executing shell scripts.
# Say If you have a file named script.sh Then to execute it you'll type at the terminal # ./script.sh
# Slash is an arithmetic OPerator for Division(returns Quotient).
let a=10/2
let b=50/3
echo "a = $a & b = $b"
#-----------------------------------------------------------------------------------------------------------
# THe Escape charcater = Backslash = \
# It's placed before any special character that tells interpreter to treat character a literal.
# Now special character is just treated as char not a char with special function & is printed normally on screen.
echo ""Talk is Cheap.Show me the code" - Linus Torvalds"
echo "\"Talk is Cheap.Show me the code\" - Linus Torvalds"
# So, \" means treat doubleqoute character as a normal one and simply print PLEASE.
let num=5
echo $num=5
echo "$num=5"
echo \$num = $num # This is how you print $
# escape character can be used in grep commands.
ls --help | grep -a # Not going to work as grep thinks -a to be a parameter instead of Regex.
ls --help | grep "-a" # Same as Above.
ls --help | grep "\-a" # Now using \ makes it interpret like search for -a.
# Thus it converts string inside doublequotes to a Regex from parameter.
# grep = Global Regular Expression Print.
# It Searches the pattern in form of RegEx passed to it & prints output on screen.
#------------------------------------------------------------------------------------------------------------
# The Backquote Character (key above the tab key) = `
# It's used as substitution. It takes output of one command & assigns to other variable.
let num=314 # num = 314
echo $num # print value of num
a=num # a = num . actually here a="num" (A string not value of variable num taken above)
echo $a # print value of a
b=`echo $num` # b = `echo value_of_num` . So, b = 314
echo $b # print value of b
c=$num # c = value_of_num . We can even use it this way.
echo $c # print c
#----------------------------------------------------------------------------------------------------------
# Colon or NULL character = :
# It's a Do Nothing Command character
# can be used at prompt. echo nothing If options are true, else show error.
x=5
y=10
if [ "$x" -gt "$y" ]; then : ; else echo "$x is larger" ; fi
if [ "$x" -lt "$y" ]; then : ; else echo "$y is larger" ; fi
# Colon can be used in conjunction with Redirection Operator. Eg - to truncate file's length.
# We can empty the contents of a file using colon.
# Redirection OPerators > and >> are used to edit contents of a file.
# echo "abcdefghi" > letters.txt ----> Overwrite whatever contents of file "letters.txt" to abcdefghi
# & create file first If not present in directory.
# echo "abcdefghi" >> letters.txt ----> Append or concatenate (Don't Overwrite rather write at the end)
# the contents abcdefghi to file letters.txt
# file command determines the filetype by doing FileSystem Test, Magic Test & Language Test.
echo "Hello World !" > hello.txt
file hello.txt # See the Contents.
cat hello.txt
: > hello.txt # Overwrite contents of file hello.txt to : which means nothing. Basically Redirect nothing to file
file hello.txt # See contents now.
cat hello.txt
# We can use : >> file.txt to create an empty file
# If it doesn't exists & to update nothing in file If it already exists.
echo "Redirect nothing to hello.txt" > hello.txt
cat hello.txt
ls -la | grep hello
sleep 3s # suspends the execution for 3 seconds.
: >> hello.txt # will just add nothing, But update the timestamp.
cat hello.txt
ls -la | grep hello
| true |
194cdedb2e45d9c4b97452c39289dcbeb43fc6d5 | Shell | shivangichhabra/Distributed-Computation-using-Raspberry-pi | /Sum/Task2_Script.sh | UTF-8 | 582 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# Script to initate code
# @author1 : Ruturaj Hagawane
# @author2 : FNU Shivangi
#--------------------------------------------------
# Reads host file which contains IP's of all slaves
# Connects to Slave
# Sends code to slave via scp
# Runs code
#--------------------------------------------------
# Iterate over the hosts
while IFS='' read -r line; do
echo "Connecting to pi: $line"
scp Slave*.class Task2OutChunk.class pi@$line:
nohup ssh pi@$line "java SlaveTask2" &
done < $1
sleep 2
echo "Running MasterTask2 pi code"
java -Xmx400m -Xms256m MasterTask2 $1 $2 $3
| true |
cb4013bb6b5274195381c07b70f1b269a59ed1e7 | Shell | NBISweden/ReprRes_G_Arnqvist_1305 | /PoPoolationPart1.sh | UTF-8 | 8,869 | 3.265625 | 3 | [
"MIT"
] | permissive | #! /bin/bash -l
ref='genomeAssemblyFile.fasta' # genome assembly file
samples=(Pure-Bra-bra-1a_S2_L001 Pure-Bra-bra-2a_S4_L002 Pure-Ca-ca-1a_S3_L002 Pure-Ca-ca-2a_S29_L006 Pure-Yem-yem-1a_S28_L006 Pure-Yem-yem-2a_S1_L001)
## Prepare the reference genome
# echo With this command we are removing the description of the fasta entry. This step is strongly recommended as unnecessarily long fasta identifiers may lead to problems in downstream analysis.
awk '{print $1}' ref/$ref > ref/C.mac.genome.fasta
#echo indexing genome file
bwa index ref/C.mac.genome.fasta
## Samples used for calling the SNPs (All Pool-seq raw sequencing data have been deposited at the NCBI sequence read archive, under the accession number PRJNA503561.)
for sample in ${samples[@]}
do
echo $sample
## Unzip sample file
gunzip -c reads/${sample}_R1_001.fastq.gz > reads/${sample}_R1_001.fastq
gunzip -c reads/${sample}_R2_001.fastq.gz > reads/${sample}_R2_001.fastq
## Trimming of reads
## –input the input files
## –output.. the output files; this will create three files with the extensions 1 2 SE;
## –min-length discard reads that are after trimming smaller than this threshold; Note this step may create orphan reads, i.e.: reads who lost their mate :(
## –no-5p-trim only trim reads at the 3’ end; this is necessary for the removal of duplicates
## –quality-threshold reads should on average have a score higher than this threshold
## –fastq-type is the encoding of the base quality in sanger or illumina (remember offset)
## –disable-zipped-output in the newest versions of PoPoolation the output of the fastq files is per default zipped. Here we disable this feature
#echo trim reads (Please download popoolation)
perl popoolation-code-228-trunk/basic-pipeline/trim-fastq.pl --disable-zipped-output --input1 reads/${sample}_R1_001.fastq --input2 reads/${sample}_R2_001.fastq --min-length 50 --no-5p-trim --quality-threshold 20 --fastq-type sanger --output1 trimmed.reads/${sample}_R1_001.tr.fastq --output2 trimmed.reads/${sample}_R2_001.tr.fastq --outputse trimmed.reads/${sample}.se.tr.fastq
## PAIRED END MAPPING
## -I input is in Illumina encoding (offset 64); do not provide this when input is in sanger! Very important parameter!
## -m not important; just telling bwa to process smaller amounts of reads at once
## -l 200 seed size (needs to be longer than the read length to disable seeding)
## -e 12 -d 12 gap length (for insertions and deletions)
## -o 1 maximum number of gaps
## -n 0.01 the number of allowed mismatches, in terms of probability of missing the read. In general the lower the value the more mismatches are allowed. The exact translation is shown at the beginning of the mapping
## -t 2 number of threads, the more the faster
#echo paired end mapping
bwa aln -o 1 -n 0.01 -l 200 -e 12 -d 12 -t 16 ref/C.mac.genome.fasta trimmed.reads/${sample}_R1_001.tr.fastq > mapping/${sample}_R1_001.tr.sai
bwa aln -o 1 -n 0.01 -l 200 -e 12 -d 12 -t 16 ref/C.mac.genome.fasta trimmed.reads/${sample}_R2_001.tr.fastq > mapping/${sample}_R2_001.tr.sai
bwa sampe ref/C.mac.genome.fasta mapping/${sample}_R1_001.tr.sai mapping/${sample}_R2_001.tr.sai trimmed.reads/${sample}_R1_001.tr.fastq trimmed.reads/${sample}_R2_001.tr.fastq > mapping/${sample}.sam
## CONVERTING SAM TO BAM
## sam.. Sequence Alignment Map format) optimized for humans
## bam.. binary sam) optimized for computers
## It is easily possible to convert a sam to bam and vice versa a bam to sam. In the following we convert a sam into a bam and finally sort the bam file
## samtools view -Sb pe/pe.sam > pe/pe.bam
## -S input is sam
## -b output is bam (-S may be merged with -b to -Sb)
## ’sort - outpufile’ input for sorting is the pipe (rather than a file)
## -q only include reads with mapping quality >= INT [0]
#echo converting sam to bam
samtools view -Sb mapping/${sample}.sam > mapping/${sample}.bam
## SORTING WITH PICARD
## Picard runs with Java
## -Xmx2g give Java 2 Gb of memory
## -jar SortSam use the Java software SortSam
## I= input
## O= output
## SO= sort order; sort by coordinate
# VALIDATION STRINGENCY= Picard is complaining about every deviation of the sam file from the most stringent requirements.
#echo sorting with picard
java -jar picard.jar SortSam I= mapping/${sample}.bam O= mapping/${sample}.sort.bam VALIDATION_STRINGENCY=SILENT SO=coordinate
## REMOVING DUPLICATES
## I= input file
## O= output file for reads
## M= output file of statistics (how many identified duplicates)
## REMOVE DUPLICATES= remove duplicates from the output file rather than just marking them (remember flag in sam-file 0x400)
#echo removing duplicates
java -jar picard.jar MarkDuplicates I= mapping/${sample}.sort.bam O= mapping/${sample}.rmd.sort.bam M= mapping/${sample}.dupstat.txt VALIDATION_STRINGENCY=SILENT REMOVE_DUPLICATES=true
## REMOVE LOW QUALITY ALIGNMENTS (AMBIGUOUS MAPPING)
## -q 20 only keep reads with a mapping quality higher than 20 (remove ambiguously aligned reads)
## -f 0x0002 only keep proper pairs (remember flags from sam file)
## -F 0x0004 remove reads that are not mapped
## -F 0x0008 remove reads with an un-mapped mate
## Note ’-f’ means only keep reads having the given flag and ’-F’ discard all reads having the given flag.
#echo remove low quality alignments
samtools view -q 20 -f 0x0002 -F 0x0004 -F 0x0008 -b mapping/${sample}.rmd.sort.bam > mapping/${sample}.qf.rmd.sort.bam
#echo indexing final bam files
samtools index mapping/${sample}.qf.rmd.sort.bam
done
## CREATING A MPILEUP FILE
## -B disable BAQ computation (base alignment quality)
## -Q skip bases with base quality smaller than the given value
## -f path to reference sequence
echo CREATING A MPILEUP FILE
samtools mpileup -B -Q 0 -f ref/C.mac.genome.fasta mapping/Pure-Bra-bra-1a_S2_L001.qf.rmd.sort.bam mapping/Pure-Bra-bra-2a_S4_L002.qf.rmd.sort.bam mapping/Pure-Ca-ca-1a_S3_L002.qf.rmd.sort.bam mapping/Pure-Ca-ca-2a_S29_L006.qf.rmd.sort.bam mapping/Pure-Yem-yem-1a_S28_L006.qf.rmd.sort.bam mapping/Pure-Yem-yem-2a_S1_L001.qf.rmd.sort.bam > Bra.Ca.Yem.mpileup
#FILTERING INDELS
## –indel-window how many bases surrounding indels should be ignored
## –min-count minimum count for calling an indel. Note that indels may be sequencing errors as well
## Note: the filter-pileup script could also be used to remove entries overlapping with transposable elements (RepeatMasker produces a gtf as well).
echo filtering indels
perl popoolation-code-228-trunk/basic-pipeline/identify-genomic-indel-regions.pl --indel-window 5 --min-count 2 --input Bra.Ca.Yem.mpileup --output indels.gtf
perl popoolation-code-228-trunk/basic-pipeline/filter-pileup-by-gtf.pl --input Bra.Ca.Yem.mpileup --gtf indels.gtf --output Bra.Ca.Yem.idf.mpileup
## SUBSAMPLING TO UNIFORM COVERAGE
## –min-qual minimum base quality
## –method method for subsampling, we recommend without replacement
## –target-coverage which coverage should the resulting mpileup file have
## –max-coverage the maximum allowed coverage, regions having higher coverages will be ignored (they may be copy number variations and lead to wrong SNPs)
## –fastq-type (sanger means offset 33)
echo subsampling to uniform coverage
perl popoolation-code-228-trunk/basic-pipeline/subsample-pileup.pl --min-qual 20 --method withoutreplace --max-coverage 50 --fastq-type sanger --target-coverage 10 --input Bra.Ca.Yem.idf.mpileup --output Bra.Ca.Yem.idf.ss10.mpileup
##CALCULATING TAJIMA’S
## –min-coverage –max-coverage: for subsampled files not important; should contain target coverage, i.e.: 10
## –min-covered-fraction minimum percentage of sites having sufficient coverage in the given window
## –min-count minimum occurrence of allele for calling a SNP
## –measure which population genetics measure should be computed (pi/theta/D)
## –pool-size number of chromosomes (thus number of diploids times two)
## –region compute the measure only for a small region; default is the whole genome
## –output a file containing the measure () for the windows
## –snp-output a file containing for every window the SNPs that have been used for computing the measure (e.g.)
## –window-size –step-size control behaviour of sliding window; if step size is smaller than window size than the windows will be overlapping.
perl popoolation-code-228-trunk/Variance-sliding.pl --fastq-type sanger --measure pi --input Bra.Ca.Yem.idf.ss10.mpileup --min-count 2 --min-coverage 4 --max-coverage 10 --min-covered-fraction 0.5 --pool-size 200 --window-size 1000 --step-size 1000 --output Bra.Ca.Yem.idf.ss10.pi --snp -output Bra.Ca.Yem.idf.ss10.snps
## VISUALIZE IN IGV
perl popoolation-code-228-trunk/VarSliding2Wiggle.pl --input Bra.Ca.Yem.idf.ss10.pi --trackname "pi" --output Bra.Ca.Yem.idf.ss10.wig
| true |
d613f100a8b9b84b9de6fb9e902a93d3914bad1d | Shell | huy75/holberton-system_engineering-devops | /0x04-loops_conditions_and_parsing/7-clock | UTF-8 | 263 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env bash
#Write a Bash script that displays the time for 12 hours and 59 minutes:
hours=0
while((hours <= 12));
do
echo $"Hour:" $hours
minutes=1
while ((minutes <= 59));
do
echo $minutes
((minutes++))
done
((hours++))
done
| true |
993f074236799ba42cac9e67127f833dbdd6264f | Shell | ardavanhashemzadeh/EMT2461-Final-Project | /ServoSweeper | UTF-8 | 528 | 2.984375 | 3 | [] | no_license | #!/usr/bin/bash
# October 18 2017
# Ardavan Hashemzadeh
# Play with WiringPi GPIO
# Select the desired pin
pin=12
# https://learn.adafruit.com/adafruits-raspberry-pi-lesson-8-using-a-servo-motor/software
# Set the frequency to 50Hz
# For the Raspberry Pi PWM modue:
# PWM Hz = 19.2 MHz / pwmClock / pwmRange
# 50 Hz = 19.2 MHz / 192 / 2000
gpio pwm-ms
gpio pwmc 192
gpio pwmr 2000
for pwm in $(seq 50 1 250); do gpio -1 pwm $pin $pwm; sleep 0; done
for pwm in $(seq 250 -1 50); do gpio -1 pwm $pin $pwm; sleep 0; done
gpio -1 pwm $pin 0
| true |
31dcb7d75acd9e88ae979f4cbd1af21b01045f0d | Shell | veltzer/bashy | /core/check.bash | UTF-8 | 1,071 | 3.84375 | 4 | [
"MIT"
] | permissive | # various common checks that are done in bashy
function checkVariableDefined() {
return
}
function checkDirectoryExists() {
local directory=$1
local -n __var2=$2
local -n __error2=$3
if [ ! -d "${directory}" ]
then
__error2="directory [${directory}] doesnt exist"
__var2=1
return 1
else
__var=0
return 0
fi
}
function checkExecutableFile() {
local filename=$1
local -n __var2=$2
local -n __error2=$3
if [ -f "${filename}" ] && [ -x "${filename}" ]
then
__var2=0
return 0
fi
__error2="file [${filename}] either doesnt exist or is not executable"
__var2=1
return 1
}
function checkReadableFile() {
local filename=$1
local -n __var2=$2
local -n __error2=$3
if [ -f "${filename}" ] && [ -r "${filename}" ]
then
__var2=0
return 0
fi
__error2="file [${filename}] either doesnt exist or is not readable"
__var2=1
return 1
}
function checkInPath() {
local app=$1
local -n __var2=$2
local -n __error2=$3
if pathutils_is_in_path "${app}"
then
__var2=0
return 0
fi
__error2="[${app}] is not in PATH"
__var2=1
return 1
}
| true |
461512e49962867d2eede9e1b148e5c90ba989ee | Shell | Cli-Xso/docker-compose-my-services | /config_env.sh | UTF-8 | 2,270 | 2.703125 | 3 | [] | no_license | #!/bin/bash
##########################################
## ##
## 基于centos7.X的基础环境初始化 ##
## ##
##########################################
# centos7环境初始化
systemctl stop NetworkManager
systemctl disable NetworkManager
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config
echo "net.ipv4.ip_forward = 1" >> /usr/lib/sysctl.d/50-default.conf
sysctl -p
systemctl restart network
echo "UseDNS no" >> /etc/ssh/sshd_config
systemctl restart sshd
# centos7安装docker
yum remove -y docker docker-ce docker-common docker-selinux docker-engine
yum -y install yum-utils
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
#yum install -y docker-ce-18.06.1.ce-3.el7
yum install -y docker-ce
systemctl daemon-reload && systemctl restart docker && systemctl enable docker && systemctl status docker
tee /etc/docker/daemon.json << EOF
{
"exec-opts": [
"native.cgroupdriver=systemd"
],
"registry-mirrors": [
"https://fz5yth0r.mirror.aliyuncs.com",
"https://dockerhub.mirrors.nwafu.edu.cn",
"https://docker.mirrors.ustc.edu.cn/",
"https://reg-mirror.qiniu.com",
"http://hub-mirror.c.163.com/",
"https://registry.docker-cn.com"
],
"data-root":"/var/lib/docker",
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
}
}
EOF
systemctl daemon-reload && systemctl restart docker
# centos7安装docker-compose
curl -L "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
/usr/local/bin/docker-compose -version
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
docker-compose --version
| true |
60019e4f43f5ea61285cb2b33926ec472813656b | Shell | Radoslaw-Mulawka/Vue-questionnaire-tool | /scripts/console.sh | UTF-8 | 205 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env bash
if [ "$1" != "" ]; then
echo "Positional parameter 1 contains something"
else
echo "Positional parameter 1 is empty"
exit
fi
docker container exec -it tell-itus_php_1 $1
| true |
818229daca152c4fdb391c05f98247aad2f48da4 | Shell | martbhell/tempest | /tools/tempest-plugin-sanity.sh | UTF-8 | 4,222 | 3.9375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to check the sanity of tempest plugins against
# tempest master.
# What it does:
# * Creates the virtualenv
# * Install tempest
# * Retrieve the project lists having tempest plugin if project name is
# given.
# * For each project in a list, It does:
# * Clone the Project
# * Install the Project and also installs dependencies from
# test-requirements.txt.
# * Create Tempest workspace
# * List tempest plugins
# * List tempest plugins tests
# * Uninstall the project and its dependencies
# * Again Install tempest
# * Again repeat the step from cloning project
#
# If one of the step fails, The script will exit with failure.
if [ "$1" == "-h" ]; then
echo -e "This script performs the sanity of tempest plugins to find
configuration and dependency issues with the tempest.\n
Usage: sh ./tools/tempest-plugin-sanity.sh [Run sanity on tempest plugins]"
exit 0
fi
set -ex
# retrieve a list of projects having tempest plugins
PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
# List of projects having tempest plugin stale or unmaintained from long time
BLACKLIST="networking-plumgrid,trio2o"
# Function to clone project using zuul-cloner or from git
function clone_project() {
if [ -e /usr/zuul-env/bin/zuul-cloner ]; then
/usr/zuul-env/bin/zuul-cloner --cache-dir /opt/git \
git://git.openstack.org \
openstack/"$1"
elif [ -e /usr/bin/git ]; then
/usr/bin/git clone git://git.openstack.org/openstack/"$1" \
openstack/"$1"
fi
}
# Create virtualenv to perform sanity operation
SANITY_DIR=$(pwd)
virtualenv "$SANITY_DIR"/.venv
export TVENV="$SANITY_DIR/tools/with_venv.sh"
cd "$SANITY_DIR"
# Install tempest in a venv
"$TVENV" pip install .
# Function to install project
function install_project() {
"$TVENV" pip install "$SANITY_DIR"/openstack/"$1"
# Check for test-requirements.txt file in a project then install it.
if [ -e "$SANITY_DIR"/openstack/"$1"/test-requirements.txt ]; then
"$TVENV" pip install -r "$SANITY_DIR"/openstack/"$1"/test-requirements.txt
fi
}
# Function to perform sanity checking on Tempest plugin
function tempest_sanity() {
"$TVENV" tempest init "$SANITY_DIR"/tempest_sanity
cd "$SANITY_DIR"/tempest_sanity
"$TVENV" tempest list-plugins
"$TVENV" tempest run -l
# Delete tempest workspace
"$TVENV" tempest workspace remove --name tempest_sanity --rmdir
cd "$SANITY_DIR"
}
# Function to uninstall project
function uninstall_project() {
"$TVENV" pip uninstall -y "$SANITY_DIR"/openstack/"$1"
# Check for *requirements.txt file in a project then uninstall it.
if [ -e "$SANITY_DIR"/openstack/"$1"/*requirements.txt ]; then
"$TVENV" pip uninstall -y -r "$SANITY_DIR"/openstack/"$1"/*requirements.txt
fi
# Remove the project directory after sanity run
rm -fr "$SANITY_DIR"/openstack/"$1"
}
# Function to run sanity check on each project
function plugin_sanity_check() {
clone_project "$1" && install_project "$1" && tempest_sanity "$1" \
&& uninstall_project "$1" && "$TVENV" pip install .
}
# Log status
passed_plugin=''
failed_plugin=''
# Perform sanity on all tempest plugin projects
for project in $PROJECT_LIST; do
# Remove blacklisted tempest plugins
if ! [[ `echo $BLACKLIST | grep -c $project ` -gt 0 ]]; then
plugin_sanity_check $project && passed_plugin+=", $project" || \
failed_plugin+=", $project"
fi
done
# Check for failed status
if [[ -n $failed_plugin ]]; then
exit 1
fi
| true |
203d4002189a95cf445c1094208ab391ae45b984 | Shell | Temelio/ansible-role-statsd | /templates/init.d.j2 | UTF-8 | 2,240 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# StatsD
#
# chkconfig: 3 50 50
### BEGIN INIT INFO
# Provides: statsd
# Required-Start: $remote_fs $network $local_fs $syslog
# Required-Stop: $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: StatsD init.d
# Description: StatsD init.d
### END INIT INFO
success() {
echo $@
}
failure() {
echo $@
}
prog="statsd"
STATSDDIR="{{ statsd_paths.dirs.root.path }}"
statsd="./stats.js"
LOG="{{ statsd_paths.files.log.path }}"
ERRLOG="{{ statsd_paths.files.log_error.path }}"
CONFFILE="{{ statsd_paths.files.main_config.path }}"
pidfile="{{ statsd_paths.files.pid.path }}"
lockfile="{{ statsd_paths.files.lock.path }}"
RETVAL=0
STOP_TIMEOUT=${STOP_TIMEOUT-10}
USER="{{ statsd_user.name }}"
start() {
echo -n $"Starting $prog: "
cd ${STATSDDIR}
# See if it's already running. Look *only* at the pid file.
if [ -f ${pidfile} ] && kill -SIGCONT $(cat ${pidfile}) 2> /dev/null; then
failure "PID file exists and process is running for statsd"
RETVAL=1
else
# Run as process
su ${USER} -s /bin/bash -c "PATH=${PATH} {{ statsd_nodejs_binary }} ${statsd} ${CONFFILE}" >> ${LOG} 1>> ${ERRLOG} &
RETVAL=$?
# Store PID
echo $! > ${pidfile}
# Success
[ $RETVAL = 0 ] && success "statsd started"
fi
echo
return $RETVAL
}
stop() {
[ ! -f ${pidfile} ] && echo "${prog} is not running" && return
echo -n $"Stopping $prog: "
kill -TERM $(cat ${pidfile})
RETVAL=$?
echo
[ $RETVAL = 0 ] && rm -f ${pidfile}
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
if
[ ! -f ${pidfile} ]
then
echo "${prog} is not running"
RETVAL=1
elif
kill -0 $(cat ${pidfile})
then
echo "${prog} is running (pid: $(cat ${pidfile}))"
RETVAL=0
else
echo "${prog} is dead (pid: $(cat ${pidfile}))"
RETVAL=2
fi
;;
restart)
stop
start
;;
condrestart)
if [ -f ${pidfile} ] ; then
stop
start
fi
;;
*)
echo $"Usage: $prog {start|stop|restart|condrestart|status}"
exit 1
esac
exit $RETVAL
| true |
230f754a38c300ba346121ead473b763b502cd5b | Shell | Libardo1/Physics | /Code/Run308_WIMP_searches/Run308_Analyse_ERA/Wavelet/ERA_for_wavelet/ERA/scripts/run_buildperiodlist.sh | UTF-8 | 417 | 3.015625 | 3 | [] | no_license | #!/bin/sh
eradir=$1
anadir=$2
bolo=$3
overwrite=0
export PYTHONPATH=${PYTHONPATH}:${eradir}
paramfile=${TMPDIR}/params.txt
echo "eradir = "${eradir} >> ${paramfile}
echo "anadir = "${anadir} >> ${paramfile}
echo "bolo = "${bolo} >> ${paramfile}
echo "overwrite = "${overwrite} >> ${paramfile}
more ${paramfile}
echo "** Etape: BuildPeriodList.py"
${eradir}/python/BuildPeriodList.py ${paramfile}
echo "** Done."
| true |
b0e83f7e46375e7f487b17c1a6ab1476ac0222e0 | Shell | TAAPArthur/MPXManager | /src/run_in_x_container.sh | UTF-8 | 217 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/sh -e
# Basically a stripped down version of xvfb-run
export DISPLAY="${FAKE_DISPLAY_NUM:-:12}"
Xvfb "${DISPLAY}" -screen 0 640x480x24 -nolisten tcp >/dev/null 2>&1 &
pid=$!
trap 'kill -9 $pid' 0
sleep 2
"$@"
| true |
4ce4118718c3fe500c179de989a92c7872879d49 | Shell | snajpa/gcode-tools | /gcode-drillunfuck.sh | UTF-8 | 309 | 2.859375 | 3 | [] | no_license | #!/bin/bash
prevline=
skip=0
while read line; do
if [[ $line =~ ^M05 ]]; then
read nuline;
if [[ $nuline =~ M02 ]]; then
echo "( program end detect )"
echo $line
echo $nuline
continue;
fi
for i in $(seq 1 7); do
read $devnull;
done
else
[[ $line =~ ^T ]] || echo $line
fi
done
| true |
f8818f3b4274354e7bb8f742d74a3100c2f47bea | Shell | LabNeuroCogDevel/anki_mratlas | /mkImg.bash | UTF-8 | 1,261 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
trap 'e=$?; [ $e -ne 0 ] && echo "$0 exited in error"' EXIT
#
# use nii.gz from mkMasks.bash to make jpg files to include in anki
#
atlas=TT_Daemon
afni -yesplugouts \
-com 'SET_XHAIRS OFF' \
-com 'SET_UNDERLAY TT_152_2009c+tlrc' \
-com "OPEN_WINDOW sagittalimage opacity=6" \
-com "OPEN_WINDOW axialimage opacity=6" \
-com "OPEN_WINDOW coronalimage opacity=6" \
-dset /usr/share/afni/atlases/TT_152_2009c+tlrc.HEAD \
masks/$atlas/*.nii.gz
#masks/$atlas.nii.gz
echo "waiting 5 second for afni to be all started"
sleep 5
grep . masks/$atlas/*txt|while IFS=":" read f xyz; do
bf=$(basename $f .txt)
echo $bf
[ ! -r masks/$atlas/$bf.axi.jpg ] &&
plugout_drive \
-com "SET_DICOM_XYZ $xyz" \
-com "SET_OVERLAY $bf.nii.gz" \
-com "SAVE_JPEG sagittalimage masks/$atlas/$bf.sag.jpg blowup=2" \
-com "SAVE_JPEG coronalimage masks/$atlas/$bf.cor.jpg blowup=2" \
-com "SAVE_JPEG axialimage masks/$atlas/$bf.axi.jpg blowup=2" \
-quit
[ -r masks/$atlas/$bf.jpg ] && rm masks/$atlas/$bf.jpg
[ ! -r masks/$atlas/$bf.all.jpg ] &&
convert +append masks/$atlas/$bf.{axi,cor,sag}.jpg masks/$atlas/$bf.all.jpg
done
plugout_drive -com QUIT
| true |
513ff0574ea48bb806a9a26b24efe62305d9ce7b | Shell | SteadyQuad/android_huashan_tools | /android_make_test.sh | UTF-8 | 2,706 | 3.421875 | 3 | [] | no_license | #!/bin/bash
ScriptDir=$PWD;
TimeStart=$(date +%s);
source $ScriptDir/android_set_variables.rc;
#FilePaths=("system/vendor/lib/hw/lights.msm8960.so");
#FilePaths=("system/priv-app/Dialer/Dialer.apk");
FilePaths=("system/vendor/lib/hw/power.qcom/so");
for FilePath in ${FilePaths[*]}
do
if [ -f $TargetDir/$FilePath ]; then rm $TargetDir/$FilePath; fi;
done;
cd $AndroidDir/;
source ./build/envsetup.sh;
croot;
breakfast $PhoneName;
LaunchBuild=1;
while [ $LaunchBuild != 0 ];
do
echo "";
echo " [ Making the requested libraries ]";
echo "";
cd $AndroidDir/;
#mka -j $BuildJobs com.android.phone.common | tee $LogFile;
#mka -j $BuildJobs libsurfaceflinger surfaceflinger libsurfaceflinger_ddmconnection | tee $LogFile;
mka -j $BuildJobs power.qcom | tee $LogFile;
#mmm -j8 hardware/qcom/display/msm8960 | tee $LogFile;
#mmm -B -j8 device/sony/huashan/liblights | tee $LogFile;
#mmm -B -j8 device/sony/nicki/liblights | tee $LogFile;
#mmm -B -j8 device/moto/shamu/liblights | tee $LogFile;
#mmm -B -j8 device/oppo/msm8974-common/liblight | tee $LogFile;
InstallLog=$(grep "Install:.*target/product" $LogFile | sort | uniq);
echo "$InstallLog";
echo "";
if [ -z "$(grep "make failed to build" $LogFile | uniq)" ]; then
LaunchBuild=0;
else
LaunchBuild=1;
printf " Press Enter to restart the build... ";
read key;
echo "";
echo "";
fi;
done;
TimeDiff=$(($(date +%s)-$TimeStart));
for FilePath in "$InstallLog"
do
FilePath=$(printf "$FilePath" | tr -d '[:cntrl:]' | sed "s/.*$PhoneName\([^\[]*\)/\1/g");
echo "aaa $FilePath";
#mkdir -p $(dirname $TargetDir/$FilePath);
#cp $OutDir/$FilePath $TargetDir/$FilePath;
done;
if [ "$(ls -A $TargetDir)" ]; then
for FilePath in ${FilePaths[*]}
do
mkdir -p $(dirname $TargetDir/$FilePath);
cp $OutDir/$FilePath $TargetDir/$FilePath;
done;
fi;
echo "";
printf " Windows : \"adb root & adb wait-for-device & adb remount";
for FilePath in ${FilePaths[*]}
do
if [[ $InstallLog == *"$FilePath"* ]]; then
printf " & adb push $FilePath /$FilePath";
fi;
done;
echo " & pause & adb reboot\"";
echo "";
adbPush=1;
while [ $adbPush != 0 ];
do
echo "";
echo " [ Upload new library files - Recovery / USB / mount system ]";
echo "";
printf " Press enter to continue...";
read key;
echo "";
adbPush=0;
$ScriptDir/android_root_adb.sh;
for FilePath in ${FilePaths[*]}
do
if [[ $InstallLog == *"$FilePath"* ]]; then
adb push $OutDir/$FilePath /$FilePath;
if [ $? != 0 ]; then adbPush=1; fi;
fi;
done;
echo "";
echo " Rebooting...";
sleep 5;
adb reboot;
echo "";
echo " [ Done in $TimeDiff secs ]";
echo "";
read key;
done;
| true |
dc8dd0a0fee12ae40d837b1a429f5a1525c2b3d7 | Shell | kpoxxx/openvpn-install | /S20openvpn | UTF-8 | 700 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# Startup script for openvpn server
#
# Make sure IP forwarding is enabled
/opt/bin/echo 1 > /proc/sys/net/ipv4/ip_forward
# Make device if not present (not devfs)
if ( [ ! -c /dev/net/tun ] ) then
# Make /dev/net directory if needed
if ( [ ! -d /dev/net ] ) then
mkdir -m 755 /dev/net
fi
mknod /dev/net/tun c 10 200
fi
# Make sure the tunnel driver is loaded
if ( !(lsmod | grep -q "^tun") ); then
insmod /lib/modules/2.6.22-tc/tun.ko
fi
ENABLED=yes
PROCS=openvpn
ARGS="--daemon --cd /opt/etc/openvpn --config openvpn.conf"
PREARGS=""
DESC=$PROCS
PATH=/opt/sbin:/opt/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
. /opt/etc/init.d/rc.func
| true |
8fada134f6f0de223f0da5fb1b8af77d0f1073f9 | Shell | alrojas/intellij-format-action | /entrypoint.sh | UTF-8 | 964 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Wrapper for the formatter that passes action args and processes the output.
# Required args:
# - File include glob pattern.
# - Whether to fail on file changes.
# - Files to be formatted
if [[ $# -ne 3 ]]; then
echo 'Exactly three parameters (input file pattern, fail on changes, files) required.'
exit 1
fi
include_pattern=$1
fail_on_changes=$2
files=$3
cd "/github/workspace/" || exit 2
formatted_files_before=$(git status --short)
/opt/idea/bin/format.sh -m $include_pattern -r $files
formatted_files_after=$(git status --short)
formatted_files=$(diff <(echo "$formatted_files_before") <(echo "$formatted_files_after"))
formatted_files_count=$(($(echo "$formatted_files" | wc --lines) - 1))
echo "$formatted_files"
echo "::set-output name=files-changed::$formatted_files_count"
if [[ "$fail_on_changes" == 'true' ]]; then
if [[ $formatted_files_count -gt 0 ]]; then
echo 'Failing, because these files changed:'
exit 1
fi
fi | true |
924c134f869c1b75b45d4692a6470ea8cb2c4e87 | Shell | zhouyifei/WebFundamentals | /tools/publishSamples.sh | UTF-8 | 1,910 | 4.1875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Script to sync the GitHub web fundamentals samples contents onto GitHub Pages.
# Run from the location of a clone of the fundamentals repo.
WEBDIR="$(pwd)"
GHSAMPLESDIR="/tmp/WF-gh-pages"
# Verifies we are in the WF Root Dir by checking for the existance of
# Gruntfile.js
if [ ! -f "Gruntfile.js" ]; then
# TODO: this should be more robust, right now any Gruntfile.js will work.
echo "Please make sure you're in the git checkout location."
exit
fi
echo "----- Getting latest sources from GitHub"
# Git pull
git checkout master
git pull https://github.com/Google/WebFundamentals master
# Checks the exit code, if it is Not Equal to zero, fail.
if [ $? -ne 0 ]; then
echo "Updating the git repo failed, please check for errors."
exit
fi
# Save the last commit ID for use later
LASTCOMMIT=$(git rev-list HEAD --max-count=1)
echo "----- Building WebFundamentals (en only)"
# Make devsite
grunt devsite --lang=en
# Checks the exit code, if it is Not Equal to zero, fail.
if [ $? -ne 0 ]; then
echo "Build failed - please fix before continuing"
exit
fi
echo "----- Syncing samples to Github Pages clone"
pushd .
# Checks if the $GHSAMPLEDIR exists, if not, create and clone repo there
if [[ ! -d $GHSAMPLESDIR ]]; then
mkdir $GHSAMPLESDIR
git clone -b gh-pages https://github.com/googlesamples/web-fundamentals.git $GHSAMPLESDIR
fi
cd $GHSAMPLESDIR
# Make sure we've got the latest bits
git pull
# Copy the samples from the build directory to the $GHSAMPLEDIR
cp -r $WEBDIR/appengine/build/_langs/en/fundamentals/resources/samples/* $GHSAMPLESDIR/samples
# Commit and push changes up to repo
git commit -a -m "Updating Web Fundamentals Samples to $LASTCOMMIT"
git push origin gh-pages
popd
echo "----- Samples pushed live.
View the samples at: https://googlesamples.github.io/web-fundamentals/samples/
To clean up after yourself, run:
rm -rf $GHSAMPLESDIR"
| true |
804211c6f7c65b3a5eec71d7b8bd2ec1cf484b81 | Shell | TongXiaoliu/learngit | /for.sh | UTF-8 | 51 | 2.578125 | 3 | [] | no_license |
#! /bin/bash
for((i=0;i<5;i++));do
echo $i
done
| true |
4b8624b29d0a0a6cae85ccf0111657bc0b356ba7 | Shell | BluePilgrim/system_setup | /docker_module/cluster/zookeeper_bootstrap.sh | UTF-8 | 415 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# when running a zookeeper container, SERVER_LIST and ZK_ID should be specified via "-e" option.
# attach zookeeper servers to zoo.cfg
IFS=,
i=1
for server in ${SERVER_LIST}; do
echo server.$i=$server:2888:3888 >> /etc/zookeeper/conf/zoo.cfg
(( i += 1 ))
done
# set zookeeper id
echo ${ZK_ID} > /etc/zookeeper/conf/myid
#service zookeeper start
/usr/share/zookeeper/bin/zkServer.sh start-foreground
| true |
dd2df6984f32812f4e275e27db6a611944eee728 | Shell | mbuechse/vanda-studio | /share/packages/implementations/kenlm/install.bash | UTF-8 | 200 | 2.78125 | 3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | id="kenlm"
varname="KENLM"
version="2013-10-17"
binpath="$id"
download () {
wget -O - http://kheafield.com/code/kenlm.tar.gz | tar xz
}
install_me () {
cd kenlm
./bjam
cp -r bin "$1/."
cd ..
}
| true |
00d2256b9f72f2c91bb96dc662b2e2f6fdaebaf1 | Shell | o-ran-sc/it-dep | /tools/k8s/bin/install | UTF-8 | 2,823 | 3.359375 | 3 | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
################################################################################
# Copyright (c) 2019 AT&T Intellectual Property. #
# Copyright (c) 2019 Nokia. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
# 1. Edit the ../etc/env.rc file for local deployment's Gerrit, Nexus repos, Helm repo
# parameters
# 2. Update the ../etc/openstack.rc file for OpenStack installation parameters
# 3. Running from an environment with OpenStackl CLI access
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
set -a
RCS="$(find $DIR/../etc -type f -maxdepth 1)"
for RC in $RCS; do
echo "reading in values in $RC"
source $RC
done
set +a
if [ -z "$WORKSPACE" ]; then
export WORKSPACE=`git rev-parse --show-toplevel`
fi
HEAT_DIR="$WORKSPACE/tools/k8s/heat"
BIN_DIR="$WORKSPACE/tools/k8s/bin"
stackname=aux
for stackname in ric aux; do
WORKDIR_NAME="WORKDIR_${stackname}"
WORKDIR="${BIN_DIR}/${WORKDIR_NAME}"
echo ./deploy-stack.sh -w "$WORKDIR_NAME" -s "$stackname" -n 2 -6 "../heat/env/${stackname}.env" ${SSH_KEY}
done
exit
# set up cross cluster hostname resolution for well-known host names
RIC_MST_IP=$(head -1 ${WORKDIR}/ips-ric | cut -f2 -d' ')
AUX_MST_IP=$(head -1 ${WORKDIR}/ips-aux | cut -f2 -d' ')
for IP in $(cut -f2 -d ' ' ips-ric); do
REMOTE_CMD="sudo sh -c \"echo '"$AUX_MST_IP" ves.aux.local' >> /etc/hosts; \
echo '"$AUX_MST_IP" es.aux.local' >> /etc/hosts\""
ssh -i $SSH_KEY -q -o "StrictHostKeyChecking no" ubuntu@$IP "$REMOTE_CMD"
done
for IP in $(cut -f2 -d ' ' ips-aux); do
REMOTE_CMD="sudo sh -c \"echo '"$RIC_MST_IP" a1.aux.local' >> /etc/hosts\""
ssh -i $SSH_KEY -q -o "StrictHostKeyChecking no" ubuntu@$IP "$REMOTE_CMD"
done
| true |
f7f71e14e59a7aa1a17e53a8fe1e5175b3801e86 | Shell | harichada/ansible | /cis-rhel-ansible-audit-master/roles/cis/files/bin/check_groups.sh | UTF-8 | 1,159 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# Groups defined in the /etc/passwd file but not in the /etc/group file pose a threat to system security since group permissions are not properly managed.
# Analyze the output of the Audit step above and perform the appropriate action to correct any discrepancies found.
defUsers="root bin daemon adm lp sync shutdown halt mail news uucp operator games gopher ftp nobody nscd vcsa rpc mailnull smmsp pcap ntp dbus avahi sshd rpcuser nfsnobody haldaemon avahi-autoipd distcache apache oprofile webalizer dovecot squid named xfs gdm sabayon"
userlist=$(/bin/cat /etc/passwd)
grouplist=$(/bin/cat /etc/group)
for x in ${userlist}
do
if [ "$x" = "" ]
then
break
fi
userid=`echo "$x" | cut -f1 -d':'`
found=0
for n in $defUsers
do
if [ $userid = "$n" ]
then
found=1
break
fi
done
if [ $found -eq 1 ]
then
continue
fi
groupid=`echo "$x" | /bin/cut -f4 -d':'`
for g in ${grouplist}
do
if [ "$g" = "" ]
then
echo "Groupid $groupid does not exist in /etc/group, but is used by $userid"
break
fi
y=`echo $g | cut -f3 -d":"`
if [ "$y" = "$groupid" ]
then
break
fi
done
done
| true |
6b016303709a03b55c3ec3f70466e022b214eb3e | Shell | adobdin/timmy | /timmy_data/rq/scripts/fuel-postgres-dump | UTF-8 | 335 | 2.53125 | 3 | [] | no_license | #!/bin/bash
( `# check if docker present` \
command -v docker &> /dev/null && \
service docker status &> /dev/null && \
docker images | grep -c postgres > /dev/null && \
dockerctl shell postgres su postgres -c 'pg_dumpall --clean'\
) \
\
|| \
( `# if docker stopped` \
command -v pg_dumpall && \
su postgres -c 'pg_dumpall --clean'\
) | true |
3aba5f3b86031465fea0a5b14e9c4c3b1a0e67f1 | Shell | mrtos/sharedmemmory | /system/build_cfg.sh | UTF-8 | 632 | 3.328125 | 3 | [] | no_license | #!/bin/bash
PWD=`pwd`
OUTPUT_IMG=$PWD/output/images/
USER_CFG_DIR=user_cfg_fs
USER_CFG_IMG=user_cfg.jffs2.img
# build version with date
DATE=$(date +%Y%m%d)
MAIN_VER=002
if [ "$1" != "" ]; then
Descp=$1
else
Descp=Config
fi
CFG_Version="$MAIN_VER"-"$DATE"-"$Descp"
echo $CFG_Version > $USER_CFG_DIR/ver_cfg
echo "------------build user cfg [Version: $CFG_Version]--------------"
rm -f $OUTPUT_IMG/$USER_CFG_IMG
./utils/mkfs.jffs2 -e 0x10000 -n -d $USER_CFG_DIR -o $USER_CFG_IMG --pad=0x100000
#./utils/mkfs.jffs2 -e 0x10000 -n -d user_cfg_fs -o $USER_CFG_IMG
cp -f $USER_CFG_IMG /tftpboot/share
mv $USER_CFG_IMG $OUTPUT_IMG
| true |
446cf9f5185c28453fcf985e6a0980045b7c425f | Shell | obitori/thp2 | /thp2_setup.sh | UTF-8 | 10,026 | 3 | 3 | [] | no_license | #!/bin/bash
# Part two of a bash script to install the packages recommended by Peter Kim in The Hacker Playbook 2
# http://www.amazon.com/dp/1512214566/
# Since Peter's book is based off of the Kali Linux platform, you can download the Kali Linux distro from: http://www.kali.org/downloads/. I highly recommend you download the VMware image (https://www.offensive-security.com/kali-linux-vmware-arm-image-download/) and download Virtual Player/VirtualBox. Remember that it will be a gz-compressed and tar archived file, so make sure to extract them first and load the vmx file.
#FIXES ERROR IN SMBEXEC SCRIPT (DEAD LINK)
cd /opt && git clone https://github.com/csababarta/ntdsxtract /opt/NTDSXtract
cd /opt/NTDSXtract && /usr/bin/python ./setup.py build install
cd /opt && git clone https://github.com/infoassure/dumpntds /opt/dumpntds
cd /opt/dumpntds && ln -s dshashes.py /opt/NTDSXtract/
#Tool Installation
#The Backdoor Factory:
# Patch PE, ELF, Mach-O binaries with shellcode.
cd /opt && git clone https://github.com/secretsquirrel/the-backdoor-factory /opt/the-backdoor-factory
cd /opt/the-backdoor-factory
./install.sh
#HTTPScreenShot
# HTTPScreenshot is a tool for grabbing screenshots and HTML of large numbers of websites.
pip install selenium
cd /opt && git clone https://github.com/breenmachine/httpscreenshot.git /opt/httpscreenshot
cd /opt/httpscreenshot
chmod +x install-dependencies.sh && ./install-dependencies.sh
# HTTPScreenShot only works if you are running on a 64-bit Kali by default. If you are running 32-bit PAE, install i686 phatomjs as follows:
#wget https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.8-linux-i686.tar.bz2
#bzip2 -d phantomjs-1.9.8-linux-i686.tar.bz2
#tar xvf phantomjs-1.9.8-linux-i686.tar
#cp phantomjs-1.9.8-linux-i686/bin/phantomjs /usr/bin/
#SMBExec
# A rapid psexec style attack with samba tools.
cd /opt && git clone https://github.com/pentestgeek/smbexec.git /opt/smbexec
cd /opt/smbexec && ./install.sh
# Select 1 - Debian/Ubuntu and derivatives
# Select all defaults
./install.sh
# Select 4 to compile smbexec binaries
# After compilation, select 5 to exit
#Masscan
# This is the fastest Internet port scanner. It can scan the entire Internet in under six minutes.
apt-get install git gcc make libpcap-dev
cd /opt && git clone https://github.com/robertdavidgraham/masscan.git /opt/masscan
cd /opt/masscan
make
make install
#Gitrob
# Reconnaissance tool for GitHub organizations
cd /opt && git clone https://github.com/michenriksen/gitrob.git /opt/gitrob
gem install bundler
#service postgresql start
#su postgres
#createuser -s gitrob --pwprompt
#createdb -O gitrob gitrob
#exit
cd /opt/gitrob/bin
gem install gitrob
#CMSmap
# CMSmap is a python open source CMS (Content Management System) scanner that automates the process of detecting security flaws
# cd /opt && git clone https://github.com/Dionach/CMSmap /opt/CMSmap
#WPScan
# WordPress vulnerability scanner and brute-force tool
cd /opt && git clone https://github.com/wpscanteam/wpscan.git /opt/wpscan
cd /opt/wpscan && ./wpscan.rb --update
#Eyewitness
# EyeWitness is designed to take screenshots of websites, provide some server header info, and identify default credentials if possible.
cd /opt && git clone https://github.com/ChrisTruncer/EyeWitness.git /opt/EyeWitness
#Printer Exploits
# Contains a number of commonly found printer exploits
cd /opt && git clone https://github.com/MooseDojo/praedasploit /opt/praedasploit
#SQLMap
# SQL Injection tool
cd /opt && git clone https://github.com/sqlmapproject/sqlmap /opt/sqlmap
Recon-ng
# A full-featured web reconnaissance framework written in Python
cd /opt && git clone https://bitbucket.org/LaNMaSteR53/recon-ng.git /opt/recon-ng
#Discover Scripts
# Custom bash scripts used to automate various pentesting tasks.
# cd /opt && git clone https://github.com/leebaird/discover.git /opt/discover
cd /opt/discover && ./update.sh
#BeEF Exploitation Framework
# A cross-site scripting attack framework
cd /opt/ && mkdir beef && cd beef/ && wget https://raw.github.com/beefproject/beef/a6a7536e/install-beef
chmod +x install-beef
./install-beef
#Responder
# A LLMNR, NBT-NS and MDNS poisoner, with built-in HTTP/SMB/MSSQL/FTP/LDAP rogue authentication server supporting NTLMv1/NTLMv2/LMv2, Extended Security NTLMSSP and Basic HTTP authentication. Responder will be used to gain NTLM challenge/response hashes
cd /opt && git clone https://github.com/SpiderLabs/Responder.git /opt/Responder
#The Hacker Playbook 2 - Custom Scripts
# A number of custom scripts written by myself for The Hacker Playbook 2.
cd /opt && git clone https://github.com/cheetz/Easy-P.git /opt/Easy-P
cd /opt && git clone https://github.com/cheetz/Password_Plus_One /opt/Password_Plus_One
cd /opt && git clone https://github.com/cheetz/PowerShell_Popup /opt/PowerShell_Popup
cd /opt && git clone https://github.com/cheetz/icmpshock /opt/icmpshock
cd /opt && git clone https://github.com/cheetz/brutescrape /opt/brutescrape
cd /opt && git clone https://www.github.com/cheetz/reddit_xss /opt/reddit_xss
#The Hacker Playbook 2 - Forked Versions
# Forked versions of PowerSploit and Powertools used in the book. Make sure you clone your own repositories from the original sources.
cd /opt && git clone https://github.com/cheetz/PowerSploit /opt/HP_PowerSploit
cd /opt && git clone https://github.com/cheetz/PowerTools /opt/HP_PowerTools
cd /opt && git clone https://githubmkdir dshashes && cd dshashes && wget -q https://raw.githubusercontent.com/lanmaster53/ptscripts/master/dshashes.py
#SPARTA:
# A python GUI application which simplifies network infrastructure penetration testing by aiding the penetration tester in the scanning and enumeration phase.
cd /opt && git clone https://github.com/secforce/sparta.git /opt/sparta
apt-get install python-elixir
apt-get install ldap-utils rwho rsh-client x11-apps finger
#NoSQLMap
# A automated pentesting toolset for MongoDB database servers and web applications.
cd /opt && git clone https://github.com/tcstool/NoSQLMap.git /opt/NoSQLMap
#Spiderfoot
# Open Source Footprinting Tool
mkdir /opt/spiderfoot/ && cd /opt/spiderfoot
wget http://sourceforge.net/projects/spiderfoot/files/spiderfoot-2.3.0-src.tar.gz/download
tar xzvf download
pip install lxml
pip install netaddr
pip install M2Crypto
pip install cherrypy
pip install mako
# WCE
# Windows Credential Editor (WCE) is used to pull passwords from memory
# Download from: http://www.ampliasecurity.com/research/windows-credentials-editor/ and save to /opt/. For example:
wget www.ampliasecurity.com/research/wce_v1_4beta_universal.zip
mkdir /opt/wce && unzip wce_v1* -d /opt/wce && rm wce_v1*.zip
#Mimikatz
# Used for pulling cleartext passwords from memory, Golden Ticket, skeleton key and more
# Grab the newest release from https://github.com/gentilkiwi/mimikatz/releases/latest
cd /opt/ && wget http://blog.gentilkiwi.com/downloads/mimikatz_trunk.zip
unzip -d ./mimikatz mimikatz_trunk.zip
#SET
# Social Engineering Toolkit (SET) will be used for the social engineering campaigns
cd /opt && git clone https://github.com/trustedsec/social-engineer-toolkit/ /opt/set/
cd /opt/set && ./setup.py install
# PowerSploit (PowerShell)
# PowerShell scripts for post exploitation
cd /opt && git clone https://github.com/mattifestation/PowerSploit.git /opt/PowerSploit
cd /opt/PowerSploit && wget https://raw.githubusercontent.com/obscuresec/random/master/StartListener.py && wget https://raw.githubusercontent.com/darkoperator/powershell_scripts/master/ps_encoder.py
#Nishang (PowerShell)
# Collection of PowerShell scripts for exploitation and post exploitation
cd /opt && git clone https://github.com/samratashok/nishang /opt/nishang
#Veil-Framework
# A red team toolkit focused on evading detection. It currently contains Veil-Evasion for generating AV-evading payloads, Veil-Catapult for delivering them to targets, and Veil-PowerView for gaining situational awareness on Windows domains. Veil will be used to create a python based Meterpreter executable.
cd /opt && git clone https://github.com/Veil-Framework/Veil /opt/Veil
cd /opt/Veil/ && ./Install.sh -c
#Burp Suite Pro
# Web Penetration Testing Tool
# Download: http://portswigger.net/burp/proxy.html. I would highly recommend that you buy the professional version. It is well worth the $299 price tag.
#ZAP Proxy Pro
# OWASP ZAP: An easy-to-use integrated penetration testing tool for discovering vulnerabilities in web applications.
# Download from: https://code.google.com/p/zaproxy/wiki/Downloads?tm=2
# *Included by default in Kali Linux (owasp-zap)
#Fuzzing Lists (SecLists)
# These are scripts to use with Burp to fuzz parameters
cd /opt && git clone https://github.com/danielmiessler/SecLists.git /opt/SecLists
#Password Lists
# For the different password lists, see the section: Special Teams - Cracking, Exploits, and Tricks
#Net-Creds Network Parsing
# Parse PCAP files for username/passwords
cd /opt && git clone https://github.com/DanMcInerney/net-creds.git /opt/net-creds
#Installing Firefox Add-ons
# Web Developer Add-on: https://addons.mozilla.org/en-US/firefox/addon/web-developer/
# Tamper Data: https://addons.mozilla.org/en-US/firefox/addon/tamper-data/
# Foxy Proxy: https://addons.mozilla.org/en-US/firefox/addon/foxyproxy-standard/
# User Agent Switcher: https://addons.mozilla.org/en-US/firefox/addon/user-agent-switcher/
#Wifite
# Attacks against WiFi networks
cd /opt && git clone https://github.com/derv82/wifite /opt/wifite
#WIFIPhisher
# Automated phishing attacks against WiFi networks
cd /opt && git clone https://github.com/sophron/wifiphisher.git /opt/wifiphisher
#Phishing (Optional):
# Phishing-Frenzy
cd /opt && git clone https://github.com/pentestgeek/phishing-frenzy.git /var/www/phishing-frenzy
# Custom List of Extras
git clone https://github.com/macubergeek/gitlist.git /opt/gitlist
#*Remember to check http://thehackerplaybook.com/updates/ for any updates.
wget https://raw.githubusercontent.com/brad-anton/freeradius-wpe/master/freeradius-wpe.patch
| true |
1f4530f0f47435219a91bfef2d2b10eacb9f261b | Shell | bioconda/bioconda-recipes | /recipes/qtip/build.sh | UTF-8 | 416 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
export C_INCLUDE_PATH=${PREFIX}/include
export LIBRARY_PATH=${PREFIX}/lib
# makefile wants to write a package version taken from git
# since this is no git repo, we do that manually to avoid a failure
echo $PKG_VERSION > VERSION
make -C src
PACKAGE_HOME=$PREFIX/opt/$PKG_NAME-$PKG_VERSION
mkdir -p $PREFIX/bin
mkdir -p $PACKAGE_HOME
cp -r * $PACKAGE_HOME
cd $PREFIX/bin
ln -s $PACKAGE_HOME/qtip qtip
| true |
4d782369b5ad207419b71cd2ec12b74b5be75bb6 | Shell | sytranvn/contest_library | /notebook/code/euler_test/test_skierowane/test.sh | UTF-8 | 522 | 2.8125 | 3 | [] | no_license | #! /bin/bash
#set -x
cp ../checker.cpp .
cp ../../header.h .
cat ../../euler.cpp | grep -v "NIESKIEROWANYCH" > euler.cpp
cat ../gen.py | grep -v "NIESKIEROWANYCH" > gen.py
chmod +x gen.py
g++ checker.cpp -o checker -O2 -static -DCHECK || exit 1
for i in `seq 1 100`; do
./gen.py > gen.in || exit 1
./checker < gen.in > /dev/null || exit 1
done
g++ checker.cpp -o checker -O2 -static -I.. || exit 1
echo
echo BENCH
echo
./gen.py 100000 1000000 > gen.in
time ./checker < gen.in > /dev/null || exit 1
rm gen.in
| true |
8bd6ab2dead7062dc74187088bfe2a702e1c39c4 | Shell | Jonatas-Gabriel/Tarefas | /Lista_06/ex05.sh | UTF-8 | 142 | 3.1875 | 3 | [] | no_license | #!/bin/bash
if [ $* == "Diretórios"];then
echo "DIretórios"
ls -R
elif [ $* == "Arquivos"];then
echo "Arquivos"
ls -a
else
echo "Fim"
| true |
1fc32b4ba9aa2fe9af53205b6db1655b585da30e | Shell | Tarsnap/scrypt | /tests/05-system-scrypt-encrypt-decrypt.sh | UTF-8 | 1,925 | 3.703125 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
### Constants
c_valgrind_min=1
reference_file="${scriptdir}/verify-strings/test_scrypt.good"
encrypted_file_1="${s_basename}-sys.enc"
decrypted_file_1="${s_basename}-sys.txt"
encrypted_file_2="${s_basename}-our.enc"
decrypted_file_2="${s_basename}-our.txt"
scenario_cmd() {
if [ -z "${system_scrypt}" ]; then
printf "no suitable system scrypt: " 1>&2
# Inform test suite that we are skipping.
setup_check_variables "system scrypt skip"
echo "-1" > "${c_exitfile}"
return
fi
# Encrypt a file with our scrypt.
setup_check_variables "scrypt enc for system"
(
echo "${password}" | ${c_valgrind_cmd} "${bindir}/scrypt" \
enc -P -t 1 "${reference_file}" "${encrypted_file_1}"
echo $? > "${c_exitfile}"
)
# Use the system scrypt to decrypt the file we just
# encrypted. Don't use valgrind for this.
setup_check_variables "system scrypt dec"
(
echo "${password}" | ${system_scrypt} \
dec -P "${encrypted_file_1}" "${decrypted_file_1}"
echo $? > "${c_exitfile}"
)
# The decrypted file should match the reference.
setup_check_variables "system scrypt dec output against reference"
cmp -s "${decrypted_file_1}" "${reference_file}"
echo $? > "${c_exitfile}"
# Encrypt a file with the system scrypt. Don't use
# valgrind for this.
setup_check_variables "system scrypt enc"
(
echo "${password}" | ${system_scrypt} \
enc -P -t 1 "${reference_file}" "${encrypted_file_2}"
echo $? > "${c_exitfile}"
)
# Use our scrypt to decrypt the file we just encrypted.
setup_check_variables "scrypt dec for system"
(
echo "${password}" | ${c_valgrind_cmd} "${bindir}/scrypt" \
dec -P "${encrypted_file_2}" "${decrypted_file_2}"
echo $? > "${c_exitfile}"
)
# The decrypted file should match the reference.
setup_check_variables "scrypt dec for system output against reference"
cmp -s "${decrypted_file_2}" "${reference_file}"
echo $? > "${c_exitfile}"
}
| true |
ca011228771b1415f789c54546f400b2c4840589 | Shell | anubhavashok/RSOLP_Video | /daemons/rsolp_process_inbox | UTF-8 | 2,220 | 3.59375 | 4 | [] | no_license | #! /bin/bash
#Run this script in the background with:
# nohup nice sudo ./rsolp_daemon.sh > ./rsolp_daemon.log 2> ./rsolp_daemon.err < /dev/null &
PROJECT_DIR='/srv/rsolp/rsolp/src/inbox/'
OUTPUT_DIR='/srv/rsolp/rsolp/src/outbox/'
WEB_DIR='http://vision-app1.cs.uiuc.edu:8080/result'
LOG_DIR='/srv/rsolp/rsolp/src/log/'
MATLAB_DIR='/srv/rsolp/rsolp/src/matlab'
FAIL_DIR='/srv/rsolp/rsolp/src/fail'
while [ true ]
do
find ${OUTPUT_DIR}/* -mtime +7 -exec rm {} \; #Remove processed files older than 7 days
find ${LOG_DIR}/* -mtime +7 -exec rm {} \; #Remove log files older than 7 days
project=`ls -tr ${PROJECT_DIR} | head -1`; #Get oldest result not yet processed
if [ -n "${project}" ]
then
(${MATLAB_DIR}/run_createBlenderScene.sh /opt/MATLAB/MATLAB_Compiler_Runtime/v79/ ${PROJECT_DIR}/${project} 2>&1) > ${LOG_DIR}/${project}.txt #Run matlab code, store logs
rm -f ${PROJECT_DIR}/${project}/out/*.py #Remove bpy script
email_add=`head -n 1 ${PROJECT_DIR}/${project}/user.txt`; #Read in email address associated with project
#cp ${PROJECT_DIR}/${project}/annotations.txt ${PROJECT_DIR}/${project}/out/annotations.txt #debug
CUR_DIR=`pwd`
cd ${PROJECT_DIR}/${project};
mv out ${project};
cp ${MATLAB_DIR}/RM_BLEND.txt ${project}/README.txt;
zip -r ${OUTPUT_DIR}/${project}.zip ${project}; #zip processed dir
cd ${CUR_DUR}
if [ -e ${OUTPUT_DIR}/${project}.zip ]
then
rm -rf "${PROJECT_DIR}/${project}"; #Clear processed dir
email_link="${WEB_DIR}/${project}.zip"; #Link to zip file on server
#Send email with result link
echo -e "Your project has been processed and can be downloaded here:\n\n${email_link}\n\nThis link will remain active for 7 days." | mailx -aFrom:rsolp_noreply@vision.cs.uiuc.edu -s "[RSOLP] Project ${project} complete" -t ${email_add}
else
mv "${PROJECT_DIR}/${project}" "${FAIL_DIR}/"
echo -e "For some reason, the scene you created was not able to be processed. A bug report has been automatically generated, and we are working to fix this issue. Sorry for the inconvenience, and we appreciate your patience." | mailx -aFrom:rsolp_noreply@vision.cs.uiuc.edu -s "[RSOLP] Project ${project} failed" -t ${email_add}
fi
else
sleep 1; #Sleep if no projects left to process
fi
done
| true |
e1528261163f0b0bf4f1e935b384da609998df6a | Shell | ICGC-TCGA-PanCancer/awesome-wfpkgs2 | /scripts/cleanup_temp_files.sh | UTF-8 | 235 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
for f in `find . -type d |egrep '(/work$)|(/.nextflow$)|(/outdir$)|(.nextflow.log)'`; do
echo remove $f;
rm -fr $f;
done
for f in `find . -type f |egrep '\.nextflow\.log'`; do
echo remove $f;
rm $f;
done
| true |
9fcd9f0b0bb51a1e25077cfe56d9f95cab3f5cf4 | Shell | zhujiangtao123/script | /zhushell/ll.sh | UTF-8 | 242 | 2.921875 | 3 | [] | no_license | #!/bin/bash
sed -n '/\[Statistics\] 16/{s/^.*] 16 //p}' 400009-debug-230000-0001 | while read line
do
time1=$(echo $line|awk '{print $2}')
time2=$(date -d @$time1 +"%Y-%m-%d %H:%M:%S")
echo $line | sed -n "s/$time1/$time2/p"
done
| true |
e0025d46beccc866ccc3d2dbc58a54c7b523d243 | Shell | gipsyblues/ServalSignalMapsGenerator | /generate_website.sh | UTF-8 | 329 | 2.65625 | 3 | [] | no_license | #!/bin/bash
echo Generating the base html using .trace files
python generator.py
echo Copying the html
cp *.html jekyll/
echo Using jekyll to generate the full website
cd jekyll/
jekyll
cd ../
echo Moving the website into local website directory
mv jekyll/_site/* website/
echo Cleaning up files
rm *.html
rm jekyll/*.html
| true |
37428cb8cc4532bfed7b54a729fc09662eb652db | Shell | bryce-s/AutoNotecards | /bin/formatCode.sh | UTF-8 | 466 | 2.875 | 3 | [] | no_license | #!/bin/bash
set -x
# npm -g install js-beautify (for js and html..)
autopep8 --in-place --aggressive --aggressive autonotecards/autonotecards/*.py
autopep8 --in-place --aggressive --aggressive autonotecards/processor/*.py
js-beautify -r autonotecards/templates/*.html
# recurse through dir applying formatters
for f in autonotecards/*;
do
[ -d $f ] && cd "$f" && autopep8 --in-place --aggressive --aggressive *.py && js-beautify -r *.html
cd ..
done;
| true |
f4f825e2e23590b3d336914d1c3548e4bf4ef099 | Shell | gaojiayi/voltdb_sdk | /deploy/deploy.sh | UTF-8 | 2,142 | 3.765625 | 4 | [] | no_license | #!/bin/bash
v_voltdb="voltdb"
v_jdk_install_path="$HOME/"
#
main()
{
echo -n "Do you install JDK1.6? [y/n default y] :"
v_temp=`check`
if [[ ${v_temp}X = X || ${v_temp} = 'Y' ]]; then
#statements
installJdk
fi
echo "To install voltdb."
if [[ -d $v_voltdb ]]; then
#statements
echo -n "Are you sure you want to overwrite the VOLTDB directory?[y/n default y]"
v_temp=`check`
if [[ ${v_temp}X = X ]]; then
#statements
installVoltdb y true
else
installVoltdb $v_temp true
fi
else
installVoltdb y false
fi
setfile
}
check()
{
read answer
temp=$(echo $answer | tr '[a-z]' '[A-Z]')
if [[ ${temp} != "" && ${temp} != "Y" && ${temp} != "N" ]]; then
#statements
check "$1"
fi
echo $temp
}
installJdk()
{
info=`tar xvf $HOME/jdk/jdk1.6.0_24.tar -C ${v_jdk_install_path}`
if [[ $? -ne 0 ]]; then
#statements
rm -rf $v_jdk_install_path/jdk1.6.0_24
echo 'deploy JDK file is failed.'
else
cp $HOME/cfg/bashrc.ini $HOME/cfg/bashrc.ini.back
sed "s/jdk1.8.0_65/jdk1.6.0_24/g" $HOME/cfg/bashrc.ini.back > $HOME/cfg/bashrc.ini
rm -rf $HOME/cfg/bashrc.ini.back
echo "deploy JDK file is successful."
fi
}
installVoltdb()
{
if [[ $1 = 'n' || $1 = 'N' || ${1}X = X ]]; then
#statements
return;
fi
#echo $1
#directory exist
if [[ $2 = 'true' ]]; then
rm -rf $HOME/$v_voltdb
fi
info=`tar zxvf $HOME/db/LINUX-voltdb-3.6.tar.gz -C ~`
if [[ $? -eq 0 ]]; then
#statements
mv ~/voltdb-3.6 $v_voltdb
echo "voltdb install successful."
else
echo "voltdb install failed."
fi
}
setfile()
{
if [[ ! -d $HOME/cfg ]]; then
#statements
cp cfg ~
fi
dos2unix $HOME/cfg/bashrc.ini
chmod +x $HOME/ailib/*.sh
vdir=`cat $HOME/ailib/deployment.xml | grep voltdbroot | awk -F '"' '{print $2}'`
mkdir -p $vdir
info=`grep -i "source $HOME/cfg/bashrc.ini" .bash_profile | wc -l`
if [[ $info == 0 ]]; then
#statements
echo "source $HOME/cfg/bashrc.ini" >> .bash_profile
fi
source ~/.bash_profile
}
clean()
{
#rm -rf $HOME/ailib
rm -rf $HOME/ai-voltdb.zip
rm -rf $HOME/db
rm -rf $HOME/jdk
rm -rf $HOME/deploy.sh
}
main
clean | true |
d9a8812db880fbdb4e6c5c35222e3f0c14508d1c | Shell | ahmedbutt7121991/dev_env_setup | /tondev_scripts/db_migration_v2/prod_bk_rm.sh | UTF-8 | 1,703 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
source config.sh
echo "======================"
echo "==== RM DB BK ===="
echo "======================"
echo "==sshing production server=="
db_bk_rm() {
echo "===========>>>>"
echo "======================================================================="
echo "=============== PRODUCTION TASKS RUNNING ================="
echo "======================================================================="
ssh ${PRODUCTION_SERVER} "pwd;
cd ${PROD_DOMAIN_PATH};
echo \"Script location dir Listing.............................................\";
ls -ltrh;
echo \"Checking db bk in domain root...........................................\";
ls -ltrh | grep db;
cd var;
pwd;
echo \"Checking db bk in domain root/var dir...................................\";
ls -ltrh | grep db;
cd ../pub;
pwd;
echo \"Checking db bk in domain root/pub dir...................................\";
ls -ltrh | grep db;
echo \"Removing db bk in domain root/pub dir...................................\";
rm -rf db.sql.gz;
echo \"Checking after removing db bk in domain root/pub dir....................\";
ls -ltrh | grep db;
exit"
pwd
# echo "Home dir Listing............................................."
# pwd
# cd ${PROD_DOMAIN_PATH}
# echo "==========================="
# echo "Script location dir Listing............................................."
# pwd
# echo "**** Creating DB dump using mage2 Script ****"
# bash ${db_dump_script} -dz
# cd var
# ls | grep db
# mv db.sql.gz ${db_dump_path}
# cd ${db_dump_path}
# ls ${db_dump_path}
}
db_bk_rm | true |
01b3065f961adaa24a02fc15827ffd38ccd15a29 | Shell | ggirelli/gpseq-seq-gg | /scripts/reads_align.sh | UTF-8 | 4,402 | 4.28125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# ------------------------------------------------------------------------------
#
# Author: Gabriele Girelli
# Email: gigi.ga90@gmail.com
# Version: 1.0.0
# Description: align reads
#
# ------------------------------------------------------------------------------
# ENV VAR ======================================================================
export LC_ALL=C
# INPUT ========================================================================
# Help string
helps="
usage: ./reads_align.sh [-h][-t threads] -o outDir -c cond
[-p][-r ref][-a aln][-i index]
Description:
Align reads using either bowtie2 or bwa mem.
Mandatory arguments:
-o outDir Output directory. Created if not found.
-c cond Condition to analyze.
Optional arguments:
-h Show this help page.
-t threads Number of threads for parallelization.
-p Option for paired-end sequencing.
-r ref Reference ref. Default: hg19.
-a aln Aligner. Either 'bwa' or 'bowtie2'. Default: 'bwa'.
-i index Path to BWA index.
Default: '\$DATA'/BiCro-Resources/genomes/'\$ref'bwt/'\$ref'.fa
"
# Default values
threads=1
paired=0
ref='hg19'
aligner='bwa'
# Parse options
while getopts ht:o:c:pr:a:i: opt; do
case $opt in
h)
echo -e "$helps\n"
exit 1
;;
t)
if [ 0 -ge "$OPTARG" ]; then
echo -e "Enforcing a minimum of 1 thread.\n"
else
threads=$OPTARG
fi
;;
o)
out_dir=$OPTARG
if [ ! -d "$OPTARG" ]; then
msg="Output folder not found, creating it."
mkdir -p $out_dir
fi
;;
c)
condition=$OPTARG
;;
p)
paired=1
;;
r)
ref=$OPTARG
;;
a)
if [ 'bwa' == "$OPTARG" -o 'bowtie2' == "$OPTARG" ]; then
aligner=$OPTARG
else
msg="Invalid -a option. Available values: 'bwa', 'bowtie2'."
echo -e "$helps\n$msg"
exit 1
fi
;;
i)
if [ -e $OPTARG ]; then
bwaIndex=$OPTARG
else
msg="Invalid -i option, file not found.\nFile: $OPTARG"
echo -e "$helps\n$msg"
exit 1
fi
;;
esac
done
# Check mandatory options
if [ -z "$out_dir" ]; then
echo -e "$helps\n!!! Missing mandatory -o option.\n"
exit 1
fi
if [ -z "$condition" ]; then
echo -e "$helps\n!!! Missing mandatory -c option.\n"
exit 1
fi
# Additional checks
if [ -z "$bwaIndex" ]; then
bwaIndex="$DATA"/BiCro-Resources/genomes/"$ref"bwt/"$ref".fa
fi
# RUN ==========================================================================
# Paired-end alignment ---------------------------------------------------------
if [[ 1 -eq $paired ]]; then
echo " · Performing paired-end alignment ..."
# BWA alignment
if [ -n "$aligner" -a "bwa" == "$aligner" ]; then
bwa mem -t $threads $bwaIndex $out_dir/filtered.r1.noLinker.fq \
$out_dir/filtered.r2.fq > $out_dir/"$condition".sam \
2> $out_dir/bwa.log
# Bowtie2 alignment
elif [ "bowtie2" == "$aligner" ]; then
bowtie2 -q -p $threads -1 $out_dir/filtered.r1.noLinker.fq \
-2 $out_dir/filtered.r2.fq -S $out_dir/"$condition".sam -x $ref \
2> $out_dir/bowtie2.log
else
echo -e "ERROR: unrecognized aligner."
exit 1
fi
# Single-end alignment ---------------------------------------------------------
else
echo " · Performing single-end alignment ..."
# BWA alignment
if [ -n "$aligner" -a "bwa" == "$aligner" ]; then
bwa mem -t $threads $bwaIndex $out_dir/filtered.r1.noLinker.fq \
> $out_dir/"$condition".sam 2> $out_dir/bwa.log
# Bowtie2 alignment
elif [[ 'bowtie2' == "$aligner" ]]; then
bowtie2 -q -p $threads -U $out_dir/filtered.r1.noLinker.fq \
-S $out_dir/"$condition".sam -x $ref 2> $out_dir/bowtie2.log
else
echo -e "ERROR: unrecognized aligner."
exit 1
fi
fi
# Save log ---------------------------------------------------------------------
if [ -n "$aligner" -o "bwa" == "$aligner" ]; then
cat $out_dir/bwa.log
elif [[ 'bowtie2' == "$aligner" ]]; then
cat $out_dir/bowtie2.log
else
echo -e "ERROR: unrecognized aligner."
exit 1
fi
# Generate BAM -----------------------------------------------------------------
echo -e " · Generating and sorting BAM file ..."
samtools sort -@ $threads -o $out_dir/"$condition".sorted.bam \
$out_dir/"$condition".sam
echo -e " · Indexing BAM file ..."
samtools index $out_dir/"$condition".sorted.bam $out_dir/"$condition".sorted.bai
# END --------------------------------------------------------------------------
################################################################################
| true |
ab935090fe7c81de2b8daa0eaece4cf15a470b6a | Shell | redhat-cip/edeploy-roles | /puppetdb-server.install | UTF-8 | 1,848 | 3.078125 | 3 | [] | no_license | #!/bin/bash
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
src="$1"
dir="$2"
version="$3"
ROLE=puppetdb-server
ORIG=$(cd $(dirname $0); pwd)
. ${ORIG}/functions
. ./repositories
install_ib_if_needed $ORIG $dir
add_puppet_repository $DIST
add_epel_repository $DIST
update_repositories $dir
case "$OS" in
"Debian")
repository=$(add_main_repository $DIST)
cat > ${dir}/etc/apt/sources.list.d/$RELEASE-backport.list <<EOF
deb $repository ${RELEASE}-backports main
EOF
install_packages_disabled $dir puppetdb augeas-tools git ntp puppetdb-terminus apache2 libapache2-mod-wsgi
;;
"RedHatEnterpriseServer")
# Attach to the pool "Red Hat Enterprise Linux OpenStack Platform (Physical)"
attach_pool_rh_cdn $dir $RHN_CDN_POOL_ID
add_rh_cdn_repo $dir rhel-7-server-openstack-6.0-rpms
add_rh_cdn_repo $dir rhel-7-server-rpms
;;&
"CentOS"|"RedHatEnterpriseServer")
install_packages_disabled $dir puppetdb git augeas ntp httpd puppetdb-terminus python-pip mod_wsgi apr-util-devel apr-devel httpd-devel zlib-devel openssl-devel libcurl-devel gcc-c++ gcc mod_ssl ruby-devel
;;
esac
do_chroot ${dir} rm -rf /var/lib/puppet/ssl/* || :
remove_puppet_repository $DIST
remove_epel_repository $DIST
clear_packages_cache $dir
| true |
c0a603f356866b008f144c17a78b993116acc3db | Shell | janth/vbox-snippets | /bin/vbox-pxe.sh | UTF-8 | 5,690 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
vbox_basedir=$HOME/VirtualBox
vbox_tftpdir=$HOME/.VirtualBox/TFTP # Same directory which has the file VirtualBox.xml: https://www.virtualbox.org/manual/UserManual.html#nat-tftp
# Setup for PXE:
: << X
# Same directory which has the file VirtualBox.xml: https://www.virtualbox.org/manual/UserManual.html#nat-tftp
vbox_tftpdir=$HOME/.VirtualBox/TFTP
mkdir -p ${vbox_tftpdir}/{pxelinux.cfg,images/centos/{6,7}}
cd ${vbox_tftpdir}/
# Get syslinux 6.03 (latest from 2014) PXE boot
wget -O - https://www.kernel.org/pub/linux/utils/boot/syslinux/6.xx/syslinux-6.03.tar.gz | tar -xzf - -C ${vbox_tftpdir}/ --transform='s/.*\///' syslinux-6.03/bios/{core/pxelinux.0,com32/{menu/{menu,vesamenu}.c32,libutil/libutil.c32,elflink/ldlinux/ldlinux.c32,chain/chain.c32,lib/libcom32.c32}}
cd images/centos/7/
wget http://mirror.centos.org/centos/7/os/x86_64/images/pxeboot/{initrd.img,vmlinuz}
cd ../6
wget http://mirror.centos.org/centos/6/os/x86_64/images/pxeboot/{initrd.img,vmlinuz}
cd ../../pxelinux.cfg
vim ${vbox_tftpdir}/pxelinux.cfg/default
X
: << Z
Kickstart:
https://www.centos.org/docs/5/html/Installation_Guide-en-US/s1-kickstart2-startinginstall.html
https://gist.github.com/ereli/e868fcaeb660e420d7a6
mkfs.msdos -C myfloppy.img 1440
sudo mount -o loop myfloppy.img /media/floppy/
sudo cp kickstart.sys /media/floppy/
sudo umount /media/floppy
ks=floppy:/<path>
ks=hd:fd0:/ks.cfg
ks=cdrom:/ks.cfg
Z
# About nat tuning: https://www.virtualbox.org/manual/UserManual.html#changenat
# For network booting in NAT mode, by default VirtualBox uses a built-in TFTP
# server at the IP address 10.0.2.4. This default behavior should work fine for
# typical remote-booting scenarios. However, it is possible to change the boot
# server IP and the location of the boot image with the following commands:
# VBoxManage modifyvm "VM name" --nattftpserver1 10.0.2.2
# VBoxManage modifyvm "VM name" --nattftpfile1 /srv/tftp/boot/MyPXEBoot.pxe
# -biospxedebug on|off:
# This option enables additional debugging output when using the Intel PXE boot
# ROM. The output will be written to the release log file (Section 12.1.2,
# “Collecting debugging information”.
# -nicbootprio<1-N> <priority>:
# This specifies the order in which NICs are tried for booting over the network
# (using PXE). The priority is an integer in the 0 to 4 range. Priority 1 is
# the highest, priority 4 is low. Priority 0, which is the default unless
# otherwise specified, is the lowest.
# Note that this option only has effect when the Intel PXE boot ROM is used.
cd ${vbox_basedir}
MyVM=testvm
vboxmanage unregistervm ${MyVM} --delete
[[ -d ${MyVM} ]] && rm -rf ${MyVM}
[[ -r ${vbox_tftpdir}/${MyVM} ]] && rm ${vbox_tftpdir}/${MyVM}
ln -s pxelinux.0 ${vbox_tftpdir}/${MyVM}
mkdir ${MyVM}
cd ${MyVM}
hd0=${vbox_basedir}/${MyVM}/${MyVM}_disk0.vdi
vboxmanage createhd --filename ${hd0} --size 1024 # MB
vboxmanage createvm --name ${MyVM} --ostype RedHat_64 --register
vboxmanage modifyvm ${MyVM} \
--memory 512 \
--vram=9 \
--acpi on \
--ioapic on \
--pae on \
--cpuexecutioncap 40 \
--nestedpaging on \
--largepages off \
--vtxvpid off \
--cpus 2 \
--rtcuseutc off \
--monitorcount 1 \
--accelerate3d off \
--accelerate2dvideo off \
--firmware bios \
--chipset piix3 \
--mouse ps2 \
--keyboard ps2 \
--uart1 off \
--uart2 off \
--audio none \
--usb off \
--usbehci off \
--vrde off \
--teleporter off \
--nic1 NAT \
--cableconnected2 off \
--nic2 hostonly \
--hostonlyadapter2 vboxnet0 \
--nictype2 virtio \
--nattftpfile1 /pxelinux.0
# optional second NIC \
# --nic2 bridged \
# --bridgeadapter2 enp0s25
vboxmanage modifyvm ${MyVM} --nictype1 virtio
# optional second NIC
# vboxmanage modifyvm ${MyVM} --nictype2 virtio
# to do PXE boot
vboxmanage modifyvm ${MyVM} --boot1 net --boot2 disk --boot3 none --boot4 none
# or for normal boot:
# vboxmanage modifyvm ${MyVM} --boot1 disk --boot2 net --boot3 dvd --boot4 none
vboxmanage storagectl ${MyVM} --name "SATA Controller" --add sata --controller IntelAHCI
vboxmanage storageattach ${MyVM} --storagectl "SATA Controller" --port 0 --device 0 --type hdd --medium ${hd0}
## VBoxManage modifyvm \
## $machine_name \
## --memory 1024 \
## --vram 5 \
## --acpi off \
## --ioapic on \
## --pae off \
## --nestedpaging on \
## --largepages off \
## --vtxvpid on \
## --cpus 2 \
## --rtcuseutc on \
## --monitorcount 1 \
## --accelerate3d off \
## --accelerate2dvideo off \
## --firmware bios \
## --chipset piix3 \
## --boot1 dvd \
## --boot2 disk \
## --boot3 none \
## --boot4 none \
## --mouse ps2 \
## --keyboard ps2 \
## --uart1 off \
## --uart2 off \
## --audio none \
## --usb off \
## --usbehci off \
## --vrde off \
## --teleporter off \
## # --nictype1 Am79C970A \
## # --nic2 hostonly \
## # --hostonlyadapter2 vboxnet0 \
## # --nictype2 82540EM \
## # --cableconnected2 on \
##
| true |
256727f379b038fc5540ef969669a70fffc7970b | Shell | Jerrykl/CoGraph | /src/runtest.sh | UTF-8 | 601 | 3.234375 | 3 | [] | no_license | #!/bin/bash
export OMP_WAIT_POLICY=PASSIVE
export OMP_PROC_BIND=false
export OMP_PLACES=cores
# parameter
# $1 dataset
result_dir="../result/"
dataset_dir="../../data/"
dataset=$1
nodes=node6,node8,node9,node11,node14,node17
nodes_array=(${nodes//,/ })
num_nodes=${#nodes_array[@]}
threshold=1000
sssp_source=12
programs=(pagerank cc sssp)
for program in ${programs[@]}
do
dir=$result_dir$program"-"$dataset-$num_nodes
mkdir $dir
for i in {1..10}
do
mpirun -hosts $nodes "./"$program $dataset_dir$dataset".bin" $threshold $sssp_source > $dir"/"$i".txt"
echo $dir"/"$i".txt"
done
done
| true |
2984639eb63719412314d227c009d143d639e090 | Shell | woodgreat/Tickeys-linux | /tickeys/kivy/tools/packaging/osx/kivy.sh | UTF-8 | 684 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
SCRIPT_PATH="${BASH_SOURCE[0]}";
if([ -h "${SCRIPT_PATH}" ]) then
while([ -h "${SCRIPT_PATH}" ]) do SCRIPT_PATH=`readlink "${SCRIPT_PATH}"`; done
fi
SCRIPT_PATH=`dirname ${SCRIPT_PATH}`
export PYTHONPATH=${SCRIPT_PATH}/kivy:${SCRIPT_PATH}/lib/sitepackages:$PYTHONPATH
export DYLD_FALLBACK_LIBRARY_PATH=${SCRIPT_PATH}/lib:$DYLD_FALLBACK_LIBRARY_PATH
export LD_PRELOAD_PATH=${SCRIPT_PATH}/lib:$LD_PRELOAD_PATH
export GST_PLUGIN_PATH=${SCRIPT_PATH}/lib/gst-plugins:$GST_PLUGIN_PATH
export GST_PLUGIN_SCANNER=${SCRIPT_PATH}/lib/bin/gst-plugin-scanner
export GST_REGISTRY_FORK="no"
exec $(python -c "import os, sys; print(os.path.normpath(sys.prefix))")/bin/python2.7 "$@"
| true |
a0129b2fa3fd95a5fd454c9ba7bf5f9a0b75011c | Shell | Africrop/fonio_smcpp | /run_bamCaller_multipleChr_fonio.bash | UTF-8 | 1,468 | 3.09375 | 3 | [] | no_license | #!/bin/sh
# Before running, ensure it is correctly formatted by using dos2unix script_name.sh
# Make it executable by chmod 755 script_name.sh
# Usage : qsub run_bamCaller.sh <input file> <depth>
# Caution : to be launched from the directory where resides the file to be processed
# ecrit les erreurs dans le fichier de sortie standard
#$ -j y
# shell que l'on veut utiliser
#$ -S /bin/bash
# indiquer son email pour suivre l'execution :
#$ -M philippe.cubry@ird.fr
# obtenir un message au demarrage (b) , a la fin (e), en cas d'abandon (a)
#$ -m bea
# la queue que l'on veut utiliser :
#$ -q *@node10
#$ -N bamCaller
#Exporte les variables d environnement
#$ -V
# Charge la bonne version de Python
module load system/python/3.6.0a3
#Crée le fichier temporaire
cd /scratch/cubry_tmp_fonio/
#Run the program using the defined file (suppose the ref file is in the scratch)
for i in {Dexi_CM05836_chr01A,Dexi_CM05836_chr01B,Dexi_CM05836_chr02A,Dexi_CM05836_chr02B,Dexi_CM05836_chr03A,Dexi_CM05836_chr03B,Dexi_CM05836_chr04A,Dexi_CM05836_chr04B,Dexi_CM05836_chr05A,Dexi_CM05836_chr05B,Dexi_CM05836_chr06A,Dexi_CM05836_chr06B,Dexi_CM05836_chr07A,Dexi_CM05836_chr07B,Dexi_CM05836_chr08A,Dexi_CM05836_chr08B,Dexi_CM05836_chr09A,Dexi_CM05836_chr09B}
do
samtools mpileup -q 20 -Q 30 -C 50 -r $i -u -f 150419_Digitaria_Exilis_v1.0_pseudomolecules.fasta $1| bcftools call -c -V indels | ~/scripts/msmc_tools/./bamCaller.py $2 $1-$i.mask.bed.gz | gzip -c >$1-$i.vcf.gz
done
| true |
2c54513fbb09fb954b662fde44416ed957f139b7 | Shell | HiSaber/fastlanedemo | /auto-command.sh | UTF-8 | 796 | 2.703125 | 3 | [] | no_license | project_path=$(cd `dirname $0`; pwd)
project_name="${project_path##*/}"
echo $project_path
echo $project_name
bundleId="com.supwisdon.fast"
#icon
cp -r $project_path/GeneratedFile/AppIcon.appiconset $project_path/FastlaneTest/Assets.xcassets
#launchImg
#cp $project_path/GeneratedFile/LaunchImage.launchimage $project_path/SWSuperApp/SWSuperApp/Resouece/Assets.xcassets
#cp $project_path/GeneratedFile/config.plist $project_path/SWSuperApp/SWSuperApp/SuportFile
#cp $project_path/GeneratedFile/YGConfigDefine.h $project_path/SWSuperApp/SWSuperApp/SuportFile
if [ $? -eq 0 ];then
cd $project_path
sed -i '' "s/PRODUCT_BUNDLE_IDENTIFIER = .*;/PRODUCT_BUNDLE_IDENTIFIER = $bundleId;/;" $project_path/FastlaneTest.xcodeproj/project.pbxproj
fastlane deploy
else printf '移动失败'
exit
fi
| true |
c46bcbab4f3b409a0e738946d1d862d439086fb2 | Shell | raywu2001/OSGeo4W | /scripts/pyshebangcheck.sh | UTF-8 | 476 | 3.0625 | 3 | [] | no_license | set -o pipefail
while read p; do
case "$p" in
"@ python3-"*)
p=${p#@ }
while read a; do
case "$a" in
"install:"*)
read _ a _ < <(echo $a)
while read f; do
if tar xjOf "$a" "$f" | egrep -q -i -a "[a-z]:[\\/]src"; then
echo "$p: Script $f with d:/src"
fi
done < <(tar tjf $a | grep -i apps/python39/scripts | grep -v "\.tmpl$")
break
;;
esac
done
while [ -n "$a" ]; do
read a
done
;;
esac
done <x86_64/setup.ini
| true |
4e628969a80260d513952930d6b0a76c8af41a2b | Shell | AkDmOl/Mipt_Linux | /FirstTask/backup.sh | UTF-8 | 1,218 | 4.3125 | 4 | [] | no_license | #!/bin/bash
usage="$(basename "$0") [-h] [-s source_directory] [-d destination_directory] [-n name_archive] [-e extentions] --backup all files with code and those extention
where:
-h show this help text
-s source directory
-d destination directory
-n name archive
-e space-separated extensions of files"
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-s)
SOURCE="$2"
shift # past argument
shift # past value
;;
-d)
DEST="$2"
shift # past argument
shift # past value
;;
-n)
NAME="$2"
shift
shift
;;
-e)
EXTENTIONS="$@"
shift # past argument
shift # past value
;;
-h)
echo "${usage}"
shift
;;
-*) # unknown option
echo "Error Unknown option $*"
echo "Try ${usage}"
shift # past argument
;;
*)
shift
;;
esac
done
for extention in $EXTENTIONS
do
find $SOURCE -name *.$extention > all_files
input="./all_files"
while IFS= read -r full_filename
do
full="$full_filename"
path=${full%/*}
#echo "$path"
mkdir -p ./$DEST/$path
cp $full_filename ./$DEST/$full_filename
done < "$input"
done
tar -cf $NAME.tar.gz $DEST
rm -r $DEST
echo "done"
| true |
58226fd244d5d112b224e9c66bff2010efb742ee | Shell | qiuyesuifeng/codis_keepalived_lvs | /scripts/codis_lvs_server | UTF-8 | 877 | 3.328125 | 3 | [] | no_license | #!/bin/bash
VIP="192.168.188.190"
VIP_MASK="255.255.255.255"
VIF="lo:0"
case "$1" in
start)
echo "Start lvsreal server for $VIP"
/sbin/ifconfig $VIF $VIP netmask $VIP_MASK up
/sbin/route add -host $VIP dev $VIF
/sbin/sysctl -w net.ipv4.conf.lo.arp_ignore=1 &> /dev/null
/sbin/sysctl -w net.ipv4.conf.lo.arp_announce=2 &> /dev/null
/sbin/sysctl -w net.ipv4.conf.all.arp_ignore=1 &> /dev/null
/sbin/sysctl -w net.ipv4.conf.all.arp_announce=2 &> /dev/null
;;
stop)
echo "Stop lvsreal server for $VIP"
/sbin/ifconfig $VIF down
/sbin/route del -host $VIP
/sbin/sysctl -w net.ipv4.conf.lo.arp_ignore=0 &> /dev/null
/sbin/sysctl -w net.ipv4.conf.lo.arp_announce=0 &> /dev/null
/sbin/sysctl -w net.ipv4.conf.all.arp_ignore=0 &> /dev/null
/sbin/sysctl -w net.ipv4.conf.all.arp_announce=0 &> /dev/null
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
esac
| true |
1b0c45a51b3a0087600936acea8a74711e400f5e | Shell | shruti-lamba/loadtest | /launch_instance.sh | UTF-8 | 2,163 | 3.40625 | 3 | [] | no_license | #!/bin/bash
set -e
source properties.sh
echo -n "Enter AMI ID: "
read AMI
#AMI=ami-fce3c696
echo -n "Enter Instance Type: "
read InstanceType
#InstanceType=t2.micro
echo -n "Enter Subnet-ID: "
read Subnet
#Subnet=subnet-a28a3dfa
echo -n "Enter Security Group ID: "
read SecurityGroup
#SecurityGroup=sg-ee751395
echo -n "Enter Name of Key Pair: "
read KeyPairName
#KeyPairName=key_Gunjan
echo -n "Enter Number of Slaves to be created: "
read NoOfInstances
echo -n "Enter URL of the Git Repository: "
read URL
cat <<here >> properties.sh
export AMI=$AMI
export InstanceType=$InstanceType
export Subnet=$Subnet
export SecurityGroup=$SecurityGroup
export KeyPairName=$KeyPairName
export NoOfInstances=$NoOfInstances
export URL=$URL
here
git add properties.sh
git commit -m "properties.sh"
git push $URL
#sudo wget https://s3.amazonaws.com/$BUCKET/user_data_file.sh -O /tmp/user_data_file.sh
#later need to add command for IAM ROLE creation with Admin ROLE
aws iam create-role --role-name LoadTesting-Role --assume-role-policy-document file://LoadTesting-Trust.json
aws iam put-role-policy --role-name LoadTesting-Role --policy-name LoadTesting-Permissions --policy-document file://LoadTesting-Permissions.json
aws iam create-instance-profile --instance-profile-name LoadTesting-Instance-Profile
aws iam add-role-to-instance-profile --instance-profile-name LoadTesting-Instance-Profile --role-name LoadTesting-Role
sleep 10
InstanceID=$(aws ec2 run-instances --image-id $AMI --iam-instance-profile Name=LoadTesting-Instance-Profile --key-name $KeyPairName --security-group-ids $SecurityGroup --instance-type $InstanceType --user-data file://user_data_file.sh --subnet $Subnet --associate-public-ip-address --output json | grep "InstanceId" | awk '{print $2}' | sed 's/\"//g' | sed 's/\,//g')
sleep 10
echo "Master created, Instance id= "$InstanceID
echo "Master IP= "$(aws ec2 describe-instances --instance-id $InstanceID --output json | grep "PublicIpAddress" | awk '{print $2}' | sed 's/\"//g' | sed 's/\,//g')
aws ec2 create-tags --resource $InstanceID --tags Key=Name,Value=Master_$PROJECT
echo "Wait while Master Instance is configured"
sleep 300
echo "Done!"
| true |
f7d268eebad97244686e84b986bc62e463832bfa | Shell | ifaist0s/vesta-hestia-management | /scripts/abuseipdb-DLtxt.sh | UTF-8 | 1,523 | 4.125 | 4 | [] | no_license | #!/bin/bash
# 2020-07-15-02
# Get AbuseIPDB data and save to a web accessible URL
APIKEY="change-this-to-your-API"
ABIPDB="change-this-to-a-web-accessible-path/public_html/$(printf '%(%Y-%m-%d)T\n' -1)-abuseipdb.txt"
ABIPOK="change-this-to-a-web-accessible-path/public_html/abuseipdb.txt"
unset DLSUCCESS
# Define command location (useful if run under cron)
curl=/usr/bin/curl
cp=/bin/cp
rm=/bin/rm
# Get the data with abuseipdb API Key
$curl -s -G https://api.abuseipdb.com/api/v2/blacklist \
-d confidenceMinimum=100 \
-H "Key: $APIKEY" \
-H "Accept: text/plain" > "$ABIPDB" && DLSUCCESS=1 || DLSUCCESS=0
# Function to check for valid IP address
function valid_ip()
{
local ip=$1
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
OIFS=$IFS
IFS='.'
ip=($ip)
IFS=$OIFS
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \
&& ${ip[2]} -le 255 && ${ip[3]} -le 255 ]]
stat=$?
fi
return $stat
}
if [ "$DLSUCCESS" -eq 1 ]; then
# Check if the first line of the downloaded file is an IP address and make a copy (keep older DBs)
if valid_ip "$(head --lines=1 "$ABIPDB")"; then
$cp "$ABIPDB" "$ABIPOK" && echo File "$ABIPDB" has been downloaded and verified.
else
# Delete if the first line is not an IP address (for example when it's an error message, html code, etc)
$rm "$ABIPDB" && echo Downloaded AbuseIPDB was invalid and has been deleted.
fi
else
echo Could not download AbuseIPDB from server.
fi
| true |
8a546afe19e879ad5d88387f43d8a18d1d5bde22 | Shell | hackoregon/urbandev-etl | /postgresql/scripts/bin/load_csv | UTF-8 | 1,324 | 4.0625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# example usage:
# ./load_csv <db name> <table name/csv name> <number of rows to skip>
#
# ./load_csv urbandev census_total_population 1
#
#
# On Mac OS X 10.11.5, you need to install the 'realpath' utility. For
# example, using Homebrew:
# $ brew install coreutils
# which includes 'grealpath'. So to make that work with the 'load' script,
# which uses 'realpath', follow the brew installation instructions to
# add gnubin to your PATH, like this:
# PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
# path to the directory of this script
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
db=$1
table=$2
skip=$3
data=$SCRIPTDIR/../data-loaders/data
# prevent `realpath` dependency for MacOSX
realpath() {
[[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
}
# construct path to csv file
csv=$(realpath ${data}/${table}.csv)
# grab field names from csv file (they will match the table column names)
fields=`head -n 1 $csv`
# create temp file to store csv data rows
tmpcsv=`mktemp`
# skip x number of lines from the csv file for headers
# required because psql '\copy' only supports skipping one line
tail -n +$(($skip+1)) $csv > $tmpcsv
# copy csv data into table
psql -d $db << EOF
\copy $table ($fields) FROM $tmpcsv CSV
EOF
# remove temp csv
rm $tmpcsv
| true |
d71f9a58df53fdb6b0645c14ba03577d197cf975 | Shell | kubesphere/container-common-scripts | /tests/test-lib/path_foreach | UTF-8 | 340 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
set -e
. test-lib.sh
path='a b c:d x:y'
exp_output="==a b c==
==.==
==d x==
==.==
==y==
==.=="
wrap() { for arg; do echo "==$arg=="; done; }
test "$(ct_path_foreach "$path" wrap .)" == "$exp_output"
ct_path_append path '/a'
exp_output="==/a==
==.==
$exp_output"
test "$(ct_path_foreach "$path" wrap .)" == "$exp_output"
| true |
553fa061ee3704d809f287c5e9a1fa8370750f13 | Shell | fjz92419/pecan | /scripts/update.psql.sh | UTF-8 | 677 | 3.609375 | 4 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# goto home
cd $(dirname $0)/../..
set -x
# command to connect to database
if [ "`uname -s`" != "Darwin" ]; then
export POSTGRES="sudo -u postgres"
fi
export CMD="${POSTGRES} psql -U bety"
# Fully qualified hostname
FQDN=$( hostname -f )
# load latest dump of the database
curl -o betydump.gz https://ebi-forecast.igb.illinois.edu/pecan/dump/betydump.psql.gz
${POSTGRES} dropdb bety
${POSTGRES} createdb -O bety bety
gunzip -c betydump.gz | ${CMD} bety
rm betydump.gz
# remove old runs
sudo rm -rf output
mkdir output
chmod 777 output
# add sites
if [ -e sites/addsites.sh ]; then
(cd sites && ./addsites.sh)
fi
# add models
$(dirname $0)/addmodels.sh
| true |
8595d4b3b51a58be7cc054eccfc0973090b48532 | Shell | liveforeverx/dlex | /stop-server.sh | UTF-8 | 305 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
DGRAPH_CONTAINER_NAME=dlex-dgraph
if docker ps -a --format '{{.Names}}' | grep -Eq "^${DGRAPH_CONTAINER_NAME}\$"; then
echo "Stopping and removing dgraph server..."
docker stop $DGRAPH_CONTAINER_NAME && docker rm $DGRAPH_CONTAINER_NAME
else
echo "Not running!"
fi
echo "Done."
| true |
889bf82c05ce76dcb2b1fcf33b2e2b1c22916e2e | Shell | phusion/passenger_rpm_automation | /docker-images/buildbox/install.sh | UTF-8 | 2,524 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
function header()
{
echo
echo "----- $@ -----"
}
function run()
{
echo "+ $@"
"$@"
}
export HOME=/root
export LANG=en_US.UTF-8
export LC_CTYPE=en_US.UTF-8
header "Creating users"
run groupadd --gid 2467 app
run adduser --uid 2467 --gid 2467 --password '#' app
header "Installing dependencies"
run dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-9.noarch.rpm
run dnf update -y
run dnf install -y --skip-broken \
git sudo gcc gcc-c++ ccache \
curl-devel openssl-devel \
httpd httpd-devel zlib-devel ca-certificates \
libxml2-devel libxslt-devel sqlite-devel \
libev-devel pcre-devel source-highlight \
apr-devel apr-util-devel which \
gd-devel gperftools-devel perl-devel perl-ExtUtils-Embed \
centos-release bash procps-ng \
nodejs npm createrepo mock rpmdevtools
run dnf --disablerepo=\* --enablerepo=baseos groupinstall -y "Development Tools"
KEYSERVERS=(
hkp://keyserver.pgp.com
hkp://keys.gnupg.net
ha.pool.sks-keyservers.net
hkp://p80.pool.sks-keyservers.net:80
hkp://ipv4.pool.sks-keyservers.net
keyserver.ubuntu.com
hkp://keyserver.ubuntu.com:80
hkp://pgp.mit.edu
pgp.mit.edu
-end-
)
KEYS=(
409B6B1796C275462A1703113804BB82D39DC0E3
7D2BAF1CF37B13E2069D6956105BD0E739499BDB
)
# We've had too many problems with keyservers. No matter which one we pick,
# it will fail some of the time for some people. So just try a whole bunch
# of them.
for KEY in "${KEYS[@]}"; do
for KEYSERVER in "${KEYSERVERS[@]}"; do
if [[ "$KEYSERVER" = -end- ]]; then
echo 'ERROR: exhausted list of keyservers' >&2
exit 1
else
echo "+ gpg --keyserver $KEYSERVER --recv-keys ${KEY}"
gpg --keyserver "$KEYSERVER" --recv-keys "${KEY}" && break || echo 'Trying another keyserver...'
fi
done
done
run curl --fail -sSLo /tmp/rvm.sh https://get.rvm.io
run bash /tmp/rvm.sh stable
source /usr/local/rvm/scripts/rvm
RUBY=3.1.2
run rvm install ruby-$RUBY || cat /usr/local/rvm/log/*_ruby-$RUBY/make.log
rvm use ruby-$RUBY
rvm --default ruby-$RUBY
run gem install bundler --no-document
run env BUNDLE_GEMFILE=/pra_build/Gemfile bundle install -j 4
header "Miscellaneous"
run sed -i 's/Defaults requiretty//' /etc/sudoers
run cp /pra_build/sudoers.conf /etc/sudoers.d/app
run chmod 440 /etc/sudoers.d/app
run usermod -a -G mock app
run sudo -u app -H rpmdev-setuptree
run mkdir -p /etc/container_environment
run cp /pra_build/my_init_python /sbin/my_init_python
run cp /pra_build/site-defaults.cfg /etc/mock/site-defaults.cfg
header "Cleaning up"
run dnf clean all
run rm -rf /pra_build
| true |
ad4c99224884aad2bf2078da91cd1b4bc059e7ec | Shell | warnerem/pyASC | /www/js/lib/js9-3.5/build/quicktest | UTF-8 | 544 | 3.03125 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | #!/bin/bash
echo "quick js9 test ..."
if [ x$1 != x ]; then
FILE="$1"
fi
if [ $? = 0 ]; then
pgrep -f js9Helper.js >/dev/null || build/nnode
printf "\nNB: if Chrome is your default browser, you probably need to exit Chrome,\nso that it can be re-started with the correct switches for this quicktest.\n\n"
./js9 -v -b &
sleep 3
if [ x$FILE != x ]; then
./js9load $FILE
./js9 SetScale log
./js9 SetColormap viridis
./js9 DisplayPlugin JS9Magnifier
fi
else
echo "requires Node.js (https://nodejs.org)"
fi
| true |
8b1750ca3b0ec687aca6845470f525114a632956 | Shell | YannickB/saas-deprecated | /saas/shell/save.sh | WINDOWS-1258 | 4,843 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# usage()
# {
# cat << EOF
# usage: $0 options
# This script run the test1 or test2 over a machine.
# OPTIONS:
# -h Show this message
# -t Test type, can be test1' or test2'
# -r Server address
# -p Server root password
# -v Verbose
# EOF
# }
# while getopts "ht:p:a:n:c:u:s:e:r:d:bkz" OPTION;
# do
# case $OPTION in
# h)
# usage
# exit 1
# ;;
# t)
# title=$OPTARG
# ;;
# a)
# admin_user=$OPTARG
# ;;
# p)
# admin_password=$OPTARG
# ;;
# n)
# instance=$OPTARG
# ;;
# c)
# archive_path=$OPTARG
# ;;
# u)
# user_name=$OPTARG
# ;;
# s)
# user_password=$OPTARG
# ;;
# e)
# user_mail=$OPTARG
# ;;
# r)
# server=$OPTARG
# ;;
# d)
# database_server=$OPTARG
# ;;
# k)
# skip_analytics=True
# ;;
# b)
# build=True
# archive='wikicompare_preprod'
# ;;
# z)
# test=True
# ;;
# ?)
# usage
# exit
# ;;
# esac
# done
save_dump()
{
$openerp_path/saas/saas/apps/$application_type/save.sh save_dump $application $saas_names $filename $server $database_server $instance $system_user $backup_directory $instances_path
scp $system_user@$server:$backup_directory/backups/prepare/${filename}.tar.gz $backup_directory/backups/${filename}.tar.gz
ssh $system_user@$server << EOF
rm $backup_directory/backups/prepare/${filename}.tar.gz
EOF
ncftp -u $ftpuser -p $ftppass $ftpserver<< EOF
put $backup_directory/backups/${filename}.tar.gz
EOF
}
save_after()
{
echo ncftp -u $ftpuser -p $ftppass $ftpserver
filename=`date +%Y-%m-%d -d '5 days ago'`-*
echo $filename
ncftp -u $ftpuser -p $ftppass $ftpserver<<EOF
rm $filename
EOF
ssh $shinken_server << EOF
ncftpget -u $ftpuser -p$ftppass -R $ftpserver $backup_directory/control_backups ./*
cd $backup_directory/control_backups
pwd
find . -name '*.tar.gz' -exec bash -c 'mkdir \`basename {} .tar.gz\`; tar -zxf {} -C \`basename {} .tar.gz\`' \;
rm *.tar.gz
EOF
find $backup_directory/backups/ -type f -mtime +4 | xargs -r rm
}
# cd $website_path
# filename=`date +%Y-%m-%d`_${letscoop_type}_www.tar.gz
# rm /var/wikicompare/backups/$filename
# drush archive-dump --destination=/var/wikicompare/backups/$filename
# ncftp -u $ftpuser -p $ftppass $ftpserver<< EOF
# rm $filename
# put /var/wikicompare/backups/$filename
# EOF
# echo website saved
# if [[ $letscoop_type != 'wezer' ]]
# then
# filename=`date +%Y-%m-%d`_wikicompare_analytics.tar.gz
# rm /var/wikicompare/backups/$filename
# ssh $piwik_server << EOF
# mkdir /var/wikicompare/backups/prepare/piwik_temp
# mkdir /var/wikicompare/backups/prepare/piwik_temp/wikicompare_analytics
# cp -r /var/www/piwik/* /var/wikicompare/backups/prepare/piwik_temp/wikicompare_analytics
# mysqldump -u piwik -p$piwik_password piwik > /var/wikicompare/backups/prepare/piwik_temp/wikicompare_analytics.sql
# cd /var/wikicompare/backups/prepare/piwik_temp
# tar -czf ../$filename ./*
# cd ../
# rm -rf /var/wikicompare/backups/prepare/piwik_temp
# EOF
# scp $piwik_server:/var/wikicompare/backups/prepare/$filename /var/wikicompare/backups/$filename
# ssh $piwik_server << EOF
# rm /var/wikicompare/backups/prepare/$filename
# EOF
# ncftp -u $ftpuser -p $ftppass $ftpserver<< EOF
# rm $filename
# put /var/wikicompare/backups/$filename
# EOF
# echo piwik saved
# fi
save_remove()
{
rm $backup_directory/backups/${filename}.tar.gz
ncftp -u $ftpuser -p $ftppass $ftpserver<<EOF
rm ${filename}.tar.gz
EOF
ssh $shinken_server << EOF
rm -rf $backup_directory/control_backups/$filename
EOF
}
case $1 in
save_dump)
application_type=$2
application=$3
saas_names=$4
filename=$5
server=$6
database_server=$7
instance=$8
system_user=$9
backup_directory=${10}
instances_path=${11}
openerp_path=${12}
ftpuser=${13}
ftppass=${14}
ftpserver=${15}
save_dump
exit
;;
save_prepare)
backup_directory=$2
shinken_server=$3
ssh $shinken_server << EOF
rm -rf $backup_directory/control_backups/*
EOF
exit
;;
save_after)
backup_directory=$2
shinken_server=$3
ftpuser=$4
ftppass=$5
ftpserver=$6
save_after
exit
;;
save_remove)
filename=$2
backup_directory=$3
shinken_server=$4
ftpuser=$5
ftppass=$6
ftpserver=$7
save_remove
exit
;;
?)
exit
;;
esac
| true |
7182c8a1a2c244f46221e7462da1af7162052f7a | Shell | PhilipConte/dotfiles | /bash/.prompt | UTF-8 | 1,181 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
red="\e[31m"
grn="\e[32m"
ylw="\e[33m"
cyn="\e[36m"
blu="\e[34m"
prp="\e[35m"
bprp="\e[35;1m"
gry="\e[94m"
rst="\e[0m"
git_branch() {
local git_status="$(git status 2> /dev/null)"
local on_branch="On branch ([^${IFS}]*)"
local on_commit="HEAD detached at ([^${IFS}]*)"
if [[ $git_status =~ $on_branch ]]; then
local branch=${BASH_REMATCH[1]}
echo -ne "${ylw} $branch ${rst}"
elif [[ $git_status =~ $on_commit ]]; then
local commit=${BASH_REMATCH[1]}
echo -ne "${cyn} $commit ${rst}"
fi
}
dirtyorclean() {
status="$(git status --porcelain 2> /dev/null)"
local exit="$?"
if [[ "$exit" -eq 0 ]]; then
if [[ ${#status} -eq 0 ]]; then
echo -ne "${grn}•${rst}"
else
echo -ne "${red}×${rst}"
fi
else
echo -ne ""
fi
}
prompt_pwd() {
echo -ne "\001${gry}\002$(dirs +0)\001${rst}\002"
}
rootornot() {
if [[ "$(id -u)" -eq 0 ]]; then
echo -ne "\001${red}\002#\001${rst}\002"
else
echo -ne "\001${red}\002>\001${rst}\002"
fi
}
PS1='$(prompt_pwd)$(git_branch)$(dirtyorclean)\n$(rootornot) '
PS2="→ "
| true |
1c1394c18946284d27d5217cc64a0a36f69a65ac | Shell | fmap-archive/vi-etc | /.bashrc | UTF-8 | 779 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env bash
# First, make sure we're running interactively:
if [ -n "$PS1" ] ; then
# Source my preferred environment, aliases and functions.
[ -r ~/.bash/environment ] && . ~/.bash/environment
[ -r ~/.bash/aliases ] && . ~/.bash/aliases
[ -r ~/.bash/functions ] && . ~/.bash/functions
# Then set my preferred prompts..
PS1="$(PS1) " # Command Prompt
PS2="$(PS2) " # Secondary Prompt
PS3="? " # Select Prompt
PS4="+ " # Debugging Prompt
# ..and the title, in being visible, should be informative.
tty? || trap 'echo -ne "\e]0;"; echo -n $BASH_COMMAND; echo -ne "\007"' DEBUG
PROMPT_COMMAND="set_window_title"
# Source host-specific settings, if any.
[ -r ~/.bash/local ] && . ~/.bash/local
fi
| true |
a4273b0ab486395c7af22feccc5e176f07bb1df1 | Shell | sa-mw-dach/podium | /mozaik/yaml/confmap-update.sh | UTF-8 | 2,309 | 4 | 4 | [] | no_license | #!/bin/bash
OPTIND=1
# This script replaces a ConfigMap object in OpenShift by a new object provided in a yaml template.
# The options (n)amespace and application_(d)omain pass the according parameters to that template.
# An additional option (c)onfigmap can be useful when appying this script for other use cases than the
# mozaik-config ConfigMap in Podium.
# This first section reads the defined command line arguments.
# One last positional argument may be provided to pass the yaml template name
# Any other argument is ignored.
while getopts "c:n:d:" opt; do
case $opt in
y) yaml_file=$OPTARG
;;
c) configmap=$OPTARG
;;
n) namespace=$OPTARG
;;
d) application_domain=$OPTARG
;;
esac
done
shift $((OPTIND-1))
[ "${1:-}" = "--" ] && shift
# Setting the default values
yaml_file=${yaml_file:-$@}
configmap=${configmap:-mozaik-config}
namespace=${namespace:-podium}
application_domain=${application_domain:-apps.cloud.example.com}
# First we delete the existing ConfigMap
oc delete configmap/$configmap -n $namespace
# We immediately create the new ConfigMap object. If the Podium has been deployed by the Podium Operator,
# the absence of this ConfigMap object may trigger a re-creation of the original object by the Operator.
# We expect the creation here to be so fast that the Operator does not take notice.
# In case you have trouble with the Operator, you may want to set the Operator objects spec.mozaik.enable to false.
# oc patch Podium mypodium -p '{"spec":{"mozaik":{"enable": false }}}' --type merge -n $namespace
oc process -f $yaml_file -p NAMESPACE=$namespace -p APPLICATION_DOMAIN=$application_domain | oc create -n $namespace -f -
# Unfortunately, there is no trigger avalable that automatically restarts the Mozaik Pod after the
# ConfigMap Object has changed. In order to activate the new ConfigMap, we trigger a re-deployment
# instead. To achieve this, we add the sha256sum of our new ConfigMap as annotation to the Mozaik
# deployment. Whenever this annotation changes, the re-deployment is triggered automatically.
configHash=$(oc get cm/mozaik-config -oyaml -n $namespace | sha256sum | cut -d' ' -f1)
patch="{\"spec\":{\"template\":{\"metadata\":{\"annotations\":{\"configHash\":\"$configHash\"}}}}}"
oc patch deployment mozaik -p $patch -n $namespace
| true |
b79ca6ac4e663d606e8b9e5e60bc5ef31d09298e | Shell | Wei-N-Ning/bashExamples | /src/strings_/wordSplitting.sh | UTF-8 | 474 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
function printArgs() {
local pre="<"
local post=">"
printf "$pre%s$post\n" "$@"
}
# prints out:
# <spaced>
# <out>
function demoDefaultIFS() {
local var=' spaced
out '
printArgs $var
}
# prints out:
# <there is>
# < a cow>
function demoCustomIFS() {
local var='there is| a cow'
local IFS='|'
printArgs $var
}
function run() {
demoCustomIFS
# expect the IFS value to reset
demoDefaultIFS
}
run
| true |
5d1361d333df6348cf74c58a5ed9ff0b2a68f303 | Shell | FauxFaux/debian-control | /c/crafty-books-small/crafty-books-small_1.0.debian1-2_all/postrm | UTF-8 | 1,239 | 3.359375 | 3 | [] | no_license | #! /bin/sh
# postrm script for crafty
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postrm> `remove'
# * <postrm> `purge'
# * <old-postrm> `upgrade' <new-version>
# * <new-postrm> `failed-upgrade' <old-version>
# * <new-postrm> `abort-install'
# * <new-postrm> `abort-install' <old-version>
# * <new-postrm> `abort-upgrade' <old-version>
# * <disappearer's-postrm> `disappear' <r>overwrit>r> <new-version>
# for details, see /usr/doc/packaging-manual/
case "$1" in
purge|disappear)
#Remove opening books
rm -rf /var/lib/crafty/book.bin || true
rm -rf /var/lib/crafty/books.bin || true
;;
abort-install|failed-upgrade|abort-upgrade)
#If install is aborted, then let the files where they were before install/upgrade
#(cancel the 'move' stuff started in preinst).
if [ -d /var/lib/crafty -a -d /var/cache/crafty ]
then rm -rf /var/lib/crafty
fi
;;
remove|upgrade)
;;
*)
echo "postrm called with unknown argument \`$1'" >&2
exit 0
esac
# dh_installdeb will replace next line with shell code automatically
# generated by other debhelper scripts.
| true |
1b29faac6836fa95400c6cba1f4f85c8459f510f | Shell | CESNET/secant | /probes/lynis_test/main.sh | UTF-8 | 1,667 | 3.640625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
VM_IP=$1
FOLDER_PATH=$2
SHOULD_SECANT_SKIP_THIS_TEST=${6-false}
BASE=$(dirname "$0")
CONFIG_DIR=${SECANT_CONFIG_DIR:-/etc/secant}
source ${CONFIG_DIR}/probes.conf
source $BASE/../../include/functions.sh
if $SHOULD_SECANT_SKIP_THIS_TEST;
then
echo "SKIPPED"
echo "Lynis test is actually skipped"
logging $TEMPLATE_IDENTIFIER "Skip LYNIS_TEST." "DEBUG"
else
LOGIN_AS_USER=secant
scp -q -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" -o PreferredAuthentications=publickey -r "$SECANT_PROBE_LYNIS" "$LOGIN_AS_USER"@$VM_IP:/tmp > /tmp/scp.log 2>&1
if [ "$?" -eq "0" ];
then
ssh -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" -o PreferredAuthentications=publickey "$LOGIN_AS_USER"@$VM_IP 'sudo bash -s' < ${BASE}/lynis-client.sh > $FOLDER_PATH/lynis_test.txt 2> /dev/null
ret=$?
if [ $ret -ne 0 ]; then
ssh -o "StrictHostKeyChecking no" -o "UserKnownHostsFile /dev/null" -o PreferredAuthentications=publickey "$LOGIN_AS_USER"@$VM_IP 'bash -s' < ${BASE}/lynis-client.sh > $FOLDER_PATH/lynis_test.txt
ret=$?
fi
if [ $ret -eq 0 ]; then
echo "OK"
echo "Logged in as user $LOGIN_AS_USER, and lynis test completed"
cat $FOLDER_PATH/lynis_test.txt
else
logging $TEMPLATE_IDENTIFIER "During Lynis testing!" "ERROR"
exit 1
fi
else
echo "SKIPPED"
echo "LYNIS_TEST skipped due to unsuccessful scp command!"
logging $TEMPLATE_IDENTIFIER "LYNIS_TEST failed due to unsuccessful scp commmand!" "ERROR"
fi
rm -f /tmp/scp.log
fi
| true |
5b170b5485b28e98d3465553698cf84d17d65055 | Shell | gnayuy/alleninstitute | /ivscc/summaryscores.sh | UTF-8 | 1,137 | 3.125 | 3 | [] | no_license |
curnum=0
for i in */*dist.txt;
do
num=${i%*/*}
if [[ $curnum -eq $num ]]
then
n=$((n+1))
else
n=0
curnum=$num
fi
if [[ $i =~ axon ]]
then
#
while read line
do
if [[ $line =~ "bi-directional" ]]
then
ascore=${line#*=\ *}
elif [[ $line =~ "differen-structure-average" ]]
then
adscore=${line#*=\ *}
elif [[ $line =~ "percent of different-structure (average)" ]]
then
adpscore=${line#*=\ *}
fi
done < $i
elif [[ $i =~ dendrite ]]
then
#
while read line
do
if [[ $line =~ "bi-directional" ]]
then
dscore=${line#*=\ *}
elif [[ $line =~ "differen-structure-average" ]]
then
ddscore=${line#*=\ *}
elif [[ $line =~ "percent of different-structure (average)" ]]
then
ddpscore=${line#*=\ *}
fi
done < $i
else
#
while read line
do
if [[ $line =~ "bi-directional" ]]
then
wscore=${line#*=\ *}
elif [[ $line =~ "differen-structure-average" ]]
then
wdscore=${line#*=\ *}
elif [[ $line =~ "percent of different-structure (average)" ]]
then
wdpscore=${line#*=\ *}
fi
done < $i
fi
if((n==2))
then
echo "$num, $wscore, $wdscore, $wdpscore, $ascore, $adscore, $adpscore, $dscore, $ddscore, $ddpscore"
fi
done
| true |
3a1a59674438905e1114602fe9db8a0e47720aac | Shell | petronny/aur3-mirror | /gdiff-ext-hg/PKGBUILD | UTF-8 | 1,232 | 3 | 3 | [] | no_license | # Maintainer: Nuno Araujo <nuno.araujo@russo79.com>
pkgname=gdiff-ext-hg
pkgver=1
pkgrel=1
pkgdesc="A Nautilus extension for launching file comparison tools"
arch=('i686' 'x86_64')
url="http://diff-ext.sourceforge.net/"
license=('BSD')
depends=('gnome-vfs' 'hicolor-icon-theme' 'nautilus>=2.14.0')
makedepends=('mercurial' 'intltool' 'pkgconfig>=0.9.0')
provides=('gdiff-ext')
conflicts=('gdiff-ext')
install=$pkgname.install
_hgroot="http://diff-ext.hg.sourceforge.net:8000/hgroot/diff-ext/gdiff-ext"
_hgrepo="gdiff-ext"
build() {
cd "$srcdir"
msg "Connecting to Mercurial server...."
if [[ -d "$_hgrepo" ]]; then
cd "$_hgrepo"
hg pull -u
msg "The local files are updated."
else
hg clone "$_hgroot" "$_hgrepo"
fi
msg "Mercurial checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_hgrepo-build"
cp -r "$srcdir/$_hgrepo" "$srcdir/$_hgrepo-build"
cd "$srcdir/$_hgrepo-build"
#
# BUILD HERE
#
autoreconf
./configure --prefix=/usr --sysconfdir=/etc --with-gconf-schema-file-dir=/usr/share/gconf/schemas --disable-schemas-install
make
}
package() {
cd "$srcdir/$_hgrepo-build"
make GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL=1 DESTDIR="$pkgdir/" install
}
# vim:set ts=2 sw=2 et:
| true |
aeb9647589638237092e6e4ed9b804c77d21f975 | Shell | chzhyu/shell-demo-01 | /demo/demo01.sh | UTF-8 | 680 | 3.609375 | 4 | [] | no_license | #!/bin/bash
#练习一:写一个脚本
# 1.设定变量FILE的值为/etc/passwd
# 2.依次向/etc/passwd中的每个用户问好,并且说出对方的ID是什么
# 形如:(提示:LINE=`wc -l /etc/passwd | cut -d" " -f1`)
# Hello,root,your UID is 0.
# 3.统计一个有多少个用户
FILE=/etc/passwd
LINES=`wc -l < $FILE`
LINES=`wc -l $FILE|cut -d" " -f1`
for I in `seq 1 $LINES`;do
userID=`head -$I $FILE |tail -1|cut -d: -f3`
userName=`head -$I $FILE |tail -1|cut -d: -f1`
echo "hello $userName your id is $userID"
done
echo "there are $LINES users"
| true |
a3c2d6c8b83c77e96a23b47dd0c4c522a6181b62 | Shell | ilventu/aur-mirror | /libpo6/PKGBUILD | UTF-8 | 615 | 2.578125 | 3 | [] | no_license | # Maintainer: Serge Zirukin <ftrvxmtrx@gmail.com>
pkgname=libpo6
pkgver=0.2.3
pkgrel=1
pkgdesc="POSIX wrappers for C++"
arch=('i686' 'x86_64')
url="https://github.com/rescrv/po6"
license=('BSD')
source=("http://hyperdex.org/src/${pkgname}-${pkgver}.tar.gz"
"unistd.patch")
md5sums=('53d135029d56165a571e389034a04451'
'f1cecdb445567ecb5082fdb1aee953ad')
build() {
cd "$srcdir/$pkgname-$pkgver"
patch -N -p1 -i "$srcdir/unistd.patch"
./configure --prefix=/usr
make
}
check() {
cd "$srcdir/$pkgname-$pkgver"
make -k check
}
package() {
cd "$srcdir/$pkgname-$pkgver"
make DESTDIR="$pkgdir/" install
}
| true |
541b51887b2821c1ccc6fceb84aac6beaabfab35 | Shell | ghsable/dotfiles | /bin/install_archlinux/etc/docker.sh | UTF-8 | 264 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "${0} ..."
# ------ Docker ------
# --- install(docker)
sudo pacman -Syu
sudo pacman -S docker
sudo pacman -Sc
# --- auto-load
# $ docker info
sudo systemctl enable docker.service
# --- add docker group
sudo gpasswd -a $(whoami) docker
groups
| true |
432d50ef2423b432037b5be5c1fa7c12382b62ae | Shell | nitindermohan/MPTCP-Cellular-DataCollection | /MPTCP scripts/screen_killer.sh | UTF-8 | 143 | 3.28125 | 3 | [] | no_license | #/bin/bash
sessionName="$1"
a=`screen -ls | grep -o "[0-9]*\.$sessionName"`
for session in $a;
do
screen -S "${session}" -X quit;
done
| true |
793f8bbf1c217c53f78a27d6114f698030f3cfd2 | Shell | alisonn/shell-scripting | /polish_canu_genomes.sh | UTF-8 | 5,467 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# Job name:
#SBATCH --job-name=polish
#SBATCH --account=co_bachtrog
#SBATCH --partition=savio2_bigmem
#SBATCH --qos=bachtrog_bigmem2_normal
#SBATCH --nodes=1
#SBATCH --ntasks=24
#SBATCH --time=120:00:00
### Polish genome assemblies from Canu assembler 3x with RACON and 1x with PILON
## Command(s) to run:
export PATH=$PATH:/global/home/groups/co_bachtrog/programs/minimap2/minimap2-master/
export PATH=$PATH:/global/home/groups/co_bachtrog/programs/miniasm
export PATH=$PATH:/global/home/groups/co_bachtrog/programs/samtools-1.5
export PATH=$PATH:/global/home/groups/co_bachtrog/programs/bwa_version_0.7.15
runRacon () {
## Command(s) to run:
export PATH=$PATH:/global/home/groups/co_bachtrog/programs/minimap2/minimap2-master/
export PATH=$PATH:/global/home/groups/co_bachtrog/programs/miniasm
export PATH=$PATH:/global/home/groups/co_bachtrog/programs/samtools-1.5
#export PATH=$PATH:/global/home/groups/co_bachtrog/programs/racon/bin
export PATH=$PATH:/global/scratch/dmai/software/racon/build/bin
export PATH=$PATH:/global/home/groups/co_bachtrog/programs/bwa_version_0.7.15
##export PATH=$PATH:/global/home/users/bracewel/Programs/samtools-0.1.19
mapDir="/global/scratch/alisonn/02_mapped"
refDir=$1
readsDir=$3
genome=${refDir}/$2
genomePrefix=$( echo $2 | cut -f 1 -d '.' )
longReads=${readsDir}/$4
#longReads=$3
iteration=$5
output=${refDir}/${genomePrefix}.reads_mapped.racon_${iteration}.paf
updatedGenome=${refDir}/${genomePrefix}.racon_${iteration}.contigs.fasta
echo "Running RACON to polish with long reads"
echo "genome: $genome"
echo "reads: $longReads"
echo "iteration: $iteration"
echo "mapped output: $output"
echo "new genome: $updatedGenome"
ls $genome
ls $longReads
ls $output
echo "starting minimap2"
minimap2 $genome $longReads > $output
echo "starting racon"
#racon
racon -t 24 -u $longReads $output $genome > $updatedGenome
## if you get the error chunk size too small, make sure your fastq.gz file has quality scores.. this is usually the issue
### old racon version command is below
###racon -t 24 $longReads $output $genome $updatedGenome
echo " "; echo " "
}
export -f runRacon
runPilon () {
genomeDir=$1
readsDir=$3
mappedDir="/global/scratch/alisonn/02_mapped/dpse-nanopore"
# genome reference is in this format: dpse124Y_nanopore_canu_v1.racon_3.contigs.fasta
prefix=$( echo $2 | sed -E 's/(\w+.racon_[0-9]{1,12}).contigs.fasta/\1/g' )
genome=${genomeDir}/$2
read1=${readsDir}/$4
read2=${readsDir}/$5
iteration=$6
samOut=${mappedDir}/${prefix}.illuminaMap.sam
bamOut=${mappedDir}/${prefix}.illuminaMap.sorted.bam
updatedGenome=${genomeDir}/${prefix}.pilon_${iteration}.contigs.fasta
echo "Running PILON to polish with short reads"
echo "prefix: $prefix"
echo "genome: $genome"
echo "reads: $read1"
echo " $read2"
echo "mapped illumina: $bamOut"
echo "new genome (prefix): $updatedGenome"
echo "iteration: $iteration"
### Pilon requires mapping short reads to the genome
### Pilon then uses this mapping to infer sites of confidence, or lackthereof, to update the genome
### As with every Broad software, Pilon requires an index for mappings
bwa index $genome
bwa mem -t 24 $genome $read1 $read2 > $samOut
echo "Finished mapping Illumina reads to genome"
samtools view -Suh $samOut | samtools sort -@ 24 -O BAM -o $bamOut -
echo "Finished sorting bam file"
samtools index -b $bamOut
echo "Finished making index of bam file"
echo "Starting pilon now"
module load java/1.8.0_121
java -Xmx60G -jar /global/scratch/dmai/projects/nasuta_group/albom_mini_assembly/pilon/pilon-1.22.jar --threads 24 --genome $genome --frags $bamFile --output test --outdir $genomeDir --output $updatedGenome
echo "Pilon run finished"
echo ""; echo ""
}
export -f runPilon
## runRacon requires 5 positional arguments, each separated by ONE(1) space
## 1: reference directory (do not include trailing /) ## 2: reference genome (STRING, do not include directory)
## 3: read directory (do not include last /) ## 4: long reads (STRING, do not include directory)
## 5: racon iteration (INT)
### RACON for Dpse124Y 25X repeats normal ###
#runRacon /global/scratch/alisonn/04_asm/canu_124Y_repeats_feb2019 dpse124Y_canu_repeats_25x.contigs.fasta /global/scratch/alisonn/00_rawData/nanopore_v2 run15_run25_run27_all_pass_nanopore_het_Y_unmapped_singleLine.fastq.gz 1
#runRacon /global/scratch/alisonn/04_asm/canu_124Y_repeats_feb2019 dpse124Y_canu_repeats_25x.racon_1.contigs.fasta /global/scratch/alisonn/00_rawData/nanopore_v2 run15_run25_run27_all_pass_nanopore_het_Y_unmapped_singleLine.fastq.gz 2
#runRacon /global/scratch/alisonn/04_asm/canu_124Y_repeats_feb2019 dpse124Y_canu_repeats_25x.racon_2.contigs.fasta /global/scratch/alisonn/00_rawData/nanopore_v2 run15_run25_run27_all_pass_nanopore_het_Y_unmapped_singleLine.fastq.gz 3
## runPilon requires 4 positional arguments separated by space
## 1: genome directory 2: genome (STRING, do not include directory) 3: read directory
## 4: short read pair 1 (STR, do not include dir) 5: short read pair 2 (same as 2) 6: pilon iteration
#runPilon /global/scratch/alisonn/04_asm/canu_124Y_repeats_feb2019 dpse124Y_canu_repeats_25x.racon_3.contigs.fasta /global/scratch/alisonn/00_rawData/dpse-gDNA DBCC035B8_S64_L008_R1_001_val_1.fq.gz DBCC035B8_S64_L008_R2_001_val_2.fq.gz 1
| true |
d8decc5fb7ffb8335a021e8535e45dbf1dccc5e7 | Shell | jafingerhut/p4-guide | /bin/create-demo-terminals.sh | UTF-8 | 1,390 | 3.1875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #! /bin/bash
# Copyright 2019-present Cisco Systems, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# On Ubuntu 16.04 gnome-terminal no longer supports the --title option.
# mate-terminal does.
if [ `which mate-terminal` ]; then
CMD=mate-terminal
elif [ `which gnome-terminal` ]; then
CMD=gnome-terminal
else
echo "Could note find gnome-terminal or mate-terminal in command path"
exit 1
fi
# Nice to have separate terminal windows to watch what is going on.
# Upper left: 'compile' for doing 'make run' commands
${CMD} --geometry=74x19+0+0 --title="compile & run" --zoom=1.1 &
# Upper right: miscellaneous
${CMD} --geometry=74x19-0+0 --title="whatever" --zoom=1.1 &
# Bottom left: bash running on host h1
${CMD} --geometry=74x22+0-0 --title="h1 bash" --zoom=1.1 &
# Bottom right: bash running on host h2
${CMD} --geometry=74x22-0-0 --title="h2 bash" --zoom=1.1 &
| true |
008b1cc32a14619162c7ea4c88035c19f67b8c84 | Shell | wavesoft/cernvm-online | /deployment/setup_handlers.sh | UTF-8 | 4,346 | 3.25 | 3 | [] | no_license | #!/bin/bash
#
# Step handlers
# each step consists of two methods:
# * <Step name>_do
# * <Step name>_undo
#
################################################################################
function make_dirs_do
{
managed_exec mkdir -p $BASE_DIR
return $?
}
function make_dirs_undo
{
managed_exec rm -Rf $BASE_DIR
return $?
}
################################################################################
function export_source_do
{
managed_exec yum install git -y || return $?
cd $BASE_DIR
managed_exec git clone $GIT_REPO git || return $?
cd git
managed_exec git checkout $GIT_BRANCH
return $?
}
function export_source_undo
{
managed_exec rm -Rf $BASE_DIR/git || return $?
managed_exec yum remove git -y
return $?
}
################################################################################
function prepare_python_env_do
{
managed_exec yum install python-pip python-devel gcc -y
return $?
}
function prepare_python_env_undo
{
managed_exec yum remove python-pip python-devel gcc -y
return $?
}
################################################################################
function install_cmvo_do
{
cd $BASE_DIR/git/src
managed_exec python setup.py sdist || return $?
managed_exec pip install -Iv --install-option="--prefix=$BASE_DIR" --upgrade dist/CernVM-Online-1.0.tar.gz || return $?
cd ../../
managed_exec rm -Rf $BASE_DIR/git || return $?
managed_exec mkdir $BASE_DIR/logs
return $?
}
function install_cmvo_undo
{
if [ -d $BASE_DIR/git ]; then
managed_exec rm -Rf $BASE_DIR/git || return $?
fi
managed_exec rm -Rf $BASE_DIR/lib || return $?
managed_exec rm -Rf $BASE_DIR/bin || return $?
managed_exec rm -Rf $BASE_DIR/logs
return $?
}
################################################################################
function configure_cvmo_do
{
managed_exec cp $SCRIPT_PATH/config.py $BASE_DIR/lib/python2.6/site-packages/cvmo/config.py
return $?
}
function configure_cvmo_undo
{
managed_exec rm $BASE_DIR/lib/python2.6/site-packages/cvmo/config.py
return 0
}
################################################################################
function make_public_do
{
managed_exec mkdir $BASE_DIR/public_html || return $?
export PATH="$PATH:$BASE_DIR/bin"
export PYTHONPATH="$PYTHONPATH:$BASE_DIR/lib/python2.6/site-packages"
export PYTHONPATH="$PYTHONPATH:$BASE_DIR/lib64/python2.6/site-packages"
managed_exec $BASE_DIR/bin/manage.py collectstatic --noinput
return $?
}
function make_public_undo
{
managed_exec rm -Rf $BASE_DIR/public_html
return $?
}
################################################################################
function install_mysql_do
{
managed_exec yum install mysql-devel -y || return $?
managed_exec easy_install -U distribute
managed_exec pip install mysql-python
return $?
}
function install_mysql_undo
{
managed_exec yum remove mysql-devel -y || return $?
managed_exec pip uninstall mysql-python
return $?
}
################################################################################
function install_apache_do
{
managed_exec yum install httpd mod_wsgi mod_ssl -y || return $?
managed_exec cp $SCRIPT_PATH/app.wsgi $BASE_DIR/bin/app.wsgi || return $?
managed_exec cp $SCRIPT_PATH/cernvm-online.conf /etc/httpd/conf.d/b_cernvm-online.conf || return $?
managed_exec mv /etc/httpd/conf.d/wsgi.conf /etc/httpd/conf.d/a_wsgi.conf || return $?
managed_exec mv /etc/httpd/conf.d/ssl.conf /etc/httpd/conf.d/a_ssl.conf || return $?
managed_exec chown apache:apache $BASE_DIR -R || return $?
# seLinux fix
# managed_exec setsebool -P httpd_can_network_connect on || return $?
managed_exec setenforce 0
managed_exec sed -i -e 's/^SELINUX=.*$/SELINUX=disabled/' /etc/selinux/config
managed_exec /etc/init.d/httpd restart
return 0 # ignore Apache for the moment...
}
function install_apache_undo
{
managed_exec yum remove httpd mod_wsgi -y || return $?
# managed_exec setsebool -P httpd_can_network_connect off || return $?
managed_exec sed -i -e 's/^SELINUX=.*$/SELINUX=enforcing/' /etc/selinux/config
managed_exec rm -Rf /etc/httpd
return $?
}
################################################################################
| true |
df78087cebb2b2c3b8a8d21bea85e0eedb2058f1 | Shell | comzyh/FerrisSensor | /ble_acc/openocd_flash.sh | UTF-8 | 322 | 2.71875 | 3 | [] | no_license | #!/usr/bin/expect
# This script is for flash NRF51 using openocd. Please install `expect` first.
# sudo apt install expect
set hostName [lindex $argv 0]
set port [lindex $argv 1]
set hexfile [lindex $argv 2]
spawn telnet $hostName $port
send "reset\n"
send "program ${hexfile}\n"
send "reset\n"
send "exit\n"
interact | true |
478947d1ce6145da21994e1eb941528787d54744 | Shell | bboozzoo/dotfiles | /apps/upgpkg | UTF-8 | 2,550 | 4.0625 | 4 | [] | no_license | #!/bin/bash
# upgpkg: Upgrades package versions in PKGBUILD and starts build.
# Author: Abhishek Dasgupta <abhidg at gmail.com>
# Thanks to cactus, profjim and daenyth for all the sed help!
# Edited: Florian Pritz <flo at xinu.at>
# I place this script in the public domain.
VERSION=0.4
# from makepkg
unset ALL_OFF BOLD BLUE GREEN RED YELLOW
if [[ -t 2 ]]; then
# prefer terminal safe colored and bold text when tput is supported
if tput setaf 0 &>/dev/null; then
ALL_OFF="$(tput sgr0)"
BOLD="$(tput bold)"
BLUE="${BOLD}$(tput setaf 4)"
GREEN="${BOLD}$(tput setaf 2)"
RED="${BOLD}$(tput setaf 1)"
YELLOW="${BOLD}$(tput setaf 3)"
else
ALL_OFF="\033[1;0m"
BOLD="\033[1;1m"
BLUE="${BOLD}\033[1;34m"
GREEN="${BOLD}\033[1;32m"
RED="${BOLD}\033[1;31m"
YELLOW="${BOLD}\033[1;33m"
fi
fi
readonly ALL_OFF BOLD BLUE GREEN RED YELLOW
die() {
local message="$1"
shift
printf "$RED==> $(gettext "Error"):$ALL_OFF $(gettext "$message")\n" "$@"
exit 1
}
warn() {
local message="$1"
shift
printf "$YELLOW==> $(gettext "Warning"):$ALL_OFF $(gettext "$message")\n" "$@"
}
scriptlet() {
if [ -f "upgpkg" ]; then
if [[ $(type -t upgpkg_$1) = "function" ]]; then
upgpkg_$1 || die "\"%s\" scriptlet failed" $1
fi
fi
}
help() {
echo "upgpkg $VERSION"
printf "$(gettext "usage: upgpkg [options] newver")\n"
printf "$(gettext " -h this help")\n"
printf "$(gettext " -g generate a template ./upgpkg file")\n"
exit 2
}
if [ -z "$1" ]; then
help
fi
while getopts "gh" OPTION; do
case $OPTION in
g)
cat > upgpkg <<EOF
upgpkg_pre_upgrade() {
# You might want to remove old sources here
true
}
upgpkg_build() {
makepkg
}
EOF
exit;
;;
h) help;
esac
done
[ ! -f PKGBUILD ] && die "No \"%s\" in %s" "PKGBUILD" "$PWD"
if [ -f "upgpkg" ]; then
source ./upgpkg
fi
source PKGBUILD
scriptlet pre_upgrade
if [[ $(vercmp $1 $pkgver) -le 0 ]]; then
warn "New version (%s) older or equal to current %s" "$1" "$pkgver"
fi
sed -i "s/pkgver=.*$/pkgver=$1/g" PKGBUILD
sed -i "s/pkgrel=.*$/pkgrel=1/g" PKGBUILD
awk <PKGBUILD '$0 ~ /^(md5|sha[0-9]+)sums/ {i = 1; if(!run==1) {system("makepkg -g 2>/dev/null")}; run=1; }; !i {print}; $0 ~ /\)/ {i = 0}' | sponge PKGBUILD
source PKGBUILD
if [ -f "upgpkg" ]; then
source ./upgpkg
fi
for i in $gpgsource; do
sigfile="$(basename "$i")"
if [[ $sigfile != $i ]]; then
wget -nv -O "$sigfile" "$i"
fi
gpg2 --verify "$sigfile" || die "Signature verification failed!"
done
scriptlet build
| true |
ad30e1effa08ce91003f3ba17620fd516afd5d05 | Shell | COSMOS-ASMC/ShowerMC | /Cosmos9.00/Scrpt/dot2perc.sh | UTF-8 | 935 | 3.578125 | 4 | [] | no_license | #!/bin/bash
if [ $# -ne 1 ]; then
cat <<EOF
To convert old workstation type structure construct component
into new fortran type (eg, a.b --> a%b )
Usage:
dot2perc.sh inputFortranFile
EOF
exit
fi
vn=`awk --version | awk '{gsub(/[\.,]/,"",$3);print $3;exit}'`
if [ $vn -lt 316 ] || [ $vn -gt 700 ]; then
cat <<EOF
awk verson is too old. Please use gnu awk of version >= 3.1.6
Some old awk's have vn > 1000 but is older than 3.1.6
EOF
exit
fi
file=$1
if [ "$file" == "Makfile" ]; then
cat <<EOF
This script should not be applied to Makefile
EOF
exit
fi
cp $file ${file}-bkup
awk -f $COSMOSTOP/Scrpt/dot2perc.awk $file > temp1temp1
# sed -f $COSMOSTOP/Scrpt/dot2perc.sed temp1temp1 > temp2temp2
same=`diff -q $file temp1temp1`
if [ -z "$same" ]; then
echo no change
rm temp1temp1
# rm temp2temp2
# rm temp3temp3
rm ${file}-bkup
else
mv temp1temp1 $file
fi
| true |
214665d74a124fb7d71a32a71fd49ec6aceb0171 | Shell | aminzai/lazyscripts_pool_opensuse | /Networking/swiftfox | UTF-8 | 4,177 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# -*- coding: utf-8 -*-
# Copyright (C) 2007 洪任諭 Hong Jen Yee (PCMan) <pcman.tw@gmail.com>
# Copyright (C) 2008 林哲瑋 Zhe-Wei Lin (billy3321,雨蒼) <billy3321 -AT- gmail.com>
# Copyright (C) 2009 張君平 Chun-Ping Chang (mrmoneyc) <moneyc.net -AT- gmail.com>
# Last Modified: 28 Apr 2009
# Released under GNU General Public License
# Download and install swiftfox.
# Please run as root.
#
# @name_enUS ''
# @name_zhTW '安裝 SwiftFox - 非官方最佳化版的 Firefox'
# @desc_enUS ''
# @desc_zhTW '針對各種不同 CPU 最佳化編譯的 Firefox,效能勝過 Firefox 官方版本'
# @category 'Networking'
# @maintaner '張君平 Chun-Ping Chang (mrmoneyc) <moneyc.net -AT- gmail.com>'
# @author '洪任諭 Hong Jen Yee (PCMan) <pcman.tw@gmail.com>'
# @license 'GPL'
# @opensuse ''
# @platform 'i386 AMD64'
# @child 'Common/cpu-type.py'
CPU=`./cpu-type.py`
echo "自動偵測這台電腦上的 CPU 架構:$CPU"
if [ -n "$CPU" ]; then
mkdir -p ./temp/swiftfox
pushd ./temp/swiftfox
case $CPU in
'prescott')
$WGET 'http://getswiftfox.com/builds/installer/prescott/install-swiftfox.sh'
chmod a+x install-swiftfox.sh
./install-swiftfox.sh
popd
rm -rf ./temp/
;;
'pentium-m'|'pentium3'|'pentium4'|'athlon-64-32bit')
$WGET 'http://getswiftfox.com/builds/installer/i686/install-swiftfox.sh'
chmod a+x install-swiftfox.sh
./install-swiftfox.sh
popd
rm -rf ./temp/
;;
#FIXME: I am not sure AMD64 CPU's name of type
'athlon-xp')
$WGET 'http://getswiftfox.com/builds/installer/athlon64/install-swiftfox.sh'
chmod a+x install-swiftfox.sh
./install-swiftfox.sh
popd
rm -rf ./temp/
;;
*)
echo '未知的 CPU,無法安裝 Swiftfox'
;;
esac
echo "下載與處理繁體中文介面"
SFVERSION=`cut -d / -f 3 swiftfox-rel-arch-file`
wget http://releases.mozilla.org/pub/mozilla.org/firefox/releases/$SFVERSION/linux-i686/xpi/zh-TW.xpi
#unzip -o zh-TW.xpi -d $HOME/swiftfox/
#mv $HOME/swiftfox/chrome.manifest #$HOME/swiftfox/chrome/zh-TW.manifest
#上面這3個步驟不確定是正確的,可能造成無法開啟 swiftfox
#改成較保守的方法:第一次啟動 swiftfox 安裝繁體中文介面
sed 's/"general.useragent.locale", "en-US"/"general.useragent.locale", "zh-TW"/' \
$HOME/swiftfox/defaults/pref/firefox-l10n.js > $HOME/swiftfox/defaults/pref/firefox-l10n.js.tmp
mv $HOME/swiftfox/defaults/pref/firefox-l10n.js.tmp $HOME/swiftfox/defaults/pref/firefox-l10n.js
#上面這個步驟相當於從 about:config 來改 general.useragent.locale 為 zh-TW
echo "處理路徑"
ln -s $HOME/swiftfox/swiftfox $HOME/bin
echo "處理 plugins"
test ! -e $HOME/.mozilla/plugins && mkdir $HOME/.mozilla/plugins
cp $HOME/swiftfox/plugins/* $HOME/.mozilla/plugins
rm -r $HOME/swiftfox/plugins
ln -s /usr/lib/browser-plugins $HOME/swiftfox/plugins
#直接將 swiftfox/plugins 連結到 suse 瀏覽器 plugin 的預設安裝位置
echo "處理選單啟動圖示"
echo "[Desktop Entry]" > swiftfox.desktop
i echo "Encoding=UTF-8" >> swiftfox.desktop
echo "Name=Swiftfox" >> swiftfox.desktop
echo "Name[zh_TW]=Swiftfox 網頁瀏覽器" >> swiftfox.desktop
echo "GenericName=Web browser" >> swiftfox.desktop
echo "GenericName[zh_TW]=網頁瀏覽器" >> swiftfox.desktop
echo "Comment=Web Browser" >> swiftfox.desktop
echo "Comment[zh_TW]=網頁瀏覽器" >> swiftfox.desktop
echo "Exec=swiftfox %u" >> swiftfox.desktop
echo "Type=Application" >> swiftfox.desktop
echo "Terminal=false" >> swiftfox.desktop
echo "Categories=Application;WebBrowser" >> swiftfox.desktop
echo "Icon=$HOME/swiftfox/icons/mozicon128.png" >> swiftfox.desktop
test ! -e $HOME/.kde/share/applications && mkdir -p $HOME/.kde/share/applications
cp swiftfox.desktop $HOME/.kde/share/applications
test ! -e $HOME/.gnome2/vfolders/applications && mkdir -p $HOME/.gnome2/vfolders/applications
cp swiftfox.desktop $HOME/.gnome2/vfolders/applications
test ! -e $HOME/.local/share/applications && mkdir -p $HOME/.local/share/applications
cp swiftfox.desktop $HOME/.local/share/applications
echo "第一次啟動 swiftfox 以安裝中文介面"
swiftfox zh-TW.xpi
echo "Swiftfox $SFVERSION 已安裝完成!"
else
echo "未知的 CPU,無法自動安裝 Swiftfox !"
fi
| true |
e3bee985860698309fa46ca859989c746a762555 | Shell | robashton/vir | /vir | UTF-8 | 7,888 | 3.984375 | 4 | [] | no_license | #!/usr/bin/env bash
build_release() {
if [[ $(uname) != "Linux" ]]; then
echo "You are running $(uname), not Linux - aborting release"
exit 1
fi
if [[ -z "$MANUAL_VERSION" ]]; then
while read line
do
local prev_build_no=$line
done < "deployment/build_no"
if [[ $RERELEASE -eq 0 ]]; then
echo Updating Release Number
local build_no=$((prev_build_no + 1))
else
local build_no=$prev_build_no
fi
while read line
do
local major=$line
done < "deployment/major_ver"
while read line
do
local minor=$line
done < "deployment/minor_ver"
else
IFS="." read major minor build_no <<< "$MANUAL_VERSION"
fi
if [[ -z $MANUAL_VERSION_LABEL ]]; then
local branch_name=$(git rev-parse --abbrev-ref HEAD)
local release="v$major.$minor.$build_no-$branch_name"
else
local release="$MANUAL_VERSION_LABEL"
fi
echo "Version $major.$minor.$build_no: $release"
# Update version on disc even if we're re-releasing
# because we might have switched branch in which case
# the full release name will change even though the build
# number didn't
if [[ -f "apps/shared/include/version.hrl" ]]; then
echo "-define(VERSION, \"$release\")." > apps/shared/include/version.hrl
fi
if [[ $RERELEASE -eq 0 ]] || [[ -n $MANUAL_VERSION ]] || [[ -n $MANUAL_VERSION_LABEL ]]; then
echo "$major" > "deployment/major_ver"
echo "$minor" > "deployment/minor_ver"
echo "$build_no" > "deployment/build_no"
echo "$release" > "deployment/label"
fi
echo Building Release $release
if [[ $DIRTYRELEASE -eq 1 ]]; then
echo "Not cleaning because it's a dirty release"
else
rebar3 clean
fi
if [[ -z $RELEASE_APP ]]; then
echo "Releasing all apps"
for app in $(ls apps/); do
rebar3 release -n $app
done
else
echo "Releasing $RELEASE_APP"
rebar3 release $RELEASE_APP
fi
if [[ $RERELEASE -eq 0 ]] || [[ -n $MANUAL_VERSION ]] || [[ -n $MANUAL_VERSION_LABEL ]]; then
# TODO: Deal with native deps if there are any
echo Making BoM and updating Git with new release tag
build_bill_of_materials $release "deployment/bill_of_materials"
fi
if [[ $RERELEASE -eq 1 ]]; then
echo "Re-release, not updating version files in git"
else
git add deployment/build_no
git add deployment/bill_of_materials.txt
git add deployment/bill_of_materials.info
git add deployment/label
ls -d apps/*/include | xargs -n 1 git add
git tag $release
git commit -m "Automated build number increase: $build_no"
git push --tags
git push
echo Git updated for version $release
fi
echo Building tars and publishing
build_tars $release
}
build_bill_of_materials() {
local release=$1
local output=$2
local logtotxt="$(pwd)/$output.txt"
local logtoinfo="$(pwd)/$output.info"
echo "BoM for release $release" > $logtotxt
echo "Built on: $(hostname) by $(id -un)" >> $logtotxt
echo "Date: $(date)" >> $logtotxt
echo >> $logtotxt
echo >> $logtotxt
echo "Root project: $(git remote -v)" >> $logtotxt
git log -n 1 >> $logtotxt
echo >> $logtotxt
echo -e "base\t$release\t$(git remote -v | head -1 | awk '{print $2}')\t$(git rev-parse HEAD)" > $logtoinfo
local all_deps=$(find deps -mindepth 1 -maxdepth 1 -type d | sort)
for dep in $all_deps ; do
pushd $dep > /dev/null
local this_dep=${dep##*/}
echo "Git tag for dependency $this_dep" >> $logtotxt
echo "pulled from $(git remote -v)" >> $logtotxt
git log -n 1 >> $logtotxt
echo >> $logtotxt
echo -e "dep\t$this_dep\t$(git remote -v | head -1 | awk '{print $2}')\t$(git rev-parse HEAD)" >> $logtoinfo
popd > /dev/null
done
}
build_tars() {
local git_tag=$1
local releases_folder="$PWD/releases"
mkdir -p $releases_folder
if [[ -z $RELEASE_APP ]]; then
for app in $(ls _build/default/rel)
do
local app_dir=_build/default/rel/$app
build_tar $app_dir $releases_folder $git_tag
done
else
local app_dir=_build/default/rel/$RELEASE_APP
build_tar $app_dir $releases_folder $git_tag
fi
}
build_tar() {
local app_dir=$1
local releases_folder=$2
local git_tag=$3
local app=$(basename $app_dir)
local working_dir=$(dirname $app_dir)
local tar_dir="${app}_$git_tag"
local tar_name="${tar_dir}.tar.gz"
local autorun_name="$releases_folder/install-${tar_name%.tar.gz}"
if [[ -f apps/$app/release-files/pre_tar.sh ]]; then
echo Running pre-tar script for $app
apps/$app//release-files/pre_tar.sh
fi
echo Building $app Archive...
rm -f $tar_name
pushd $working_dir > /dev/null
mv $app $tar_dir
tar cfz $releases_folder/$tar_name $tar_dir || { echo "Tar failed"; exit 1; }
mv $tar_dir $app
popd > /dev/null
if [[ -f apps/$app/release-files/post_tar.sh ]]; then
echo Running post-tar script for $app
apps/$app/release-files/post_tar.sh $tar_name $git_tag
else
cat $SOURCEDIR/autoextract.sh $releases_folder/$tar_name > $autorun_name
chmod +x $autorun_name
echo done
fi
rm $releases_folder/$tar_name
}
get_latest() {
pushd $1 > /dev/null
HeadName=`git rev-parse --abbrev-ref HEAD`
echo "On branch $HeadName"
if [ $HeadName == "HEAD" ] ; then
git checkout master
fi
git pull
popd > /dev/null
}
usage() {
local cmd=$1
case $cmd in
"upgrade")
echo "vir upgrade"
echo "---"
echo " updates the templates for vir (doesn't update this running script though)"
echo
;;
"release")
echo "vir release [-d] [-r] [-a <app_name>] [-v <major>.<minor>.<build>] [-l <build_label>]"
echo "---"
echo " Creates a self extracting tar of each application and updates the versions (if available)"
echo " -d is a dirty release (don't build deps, don't clean all)"
echo " use with caution"
echo " -r is used to re-release whatever the current version number is, so the current version number"
echo " is used without being incremented. Use with care."
echo " -a app_name just does the release / tar of app_name"
echo " -v 1.0.0 uses the provided version number rather than generating a new one. The provided version"
echo " version will be written to the deployment/ files as normal"
echo " -l v1.0.0-blah uses the specified build label rather than generating one. The provided label"
echo " will be written to a shared version.hrl as normal"
echo
;;
*)
echo
echo "Vir is just a release manager on top of rebar3"
echo "---"
echo
usage "upgrade"
usage "release"
echo
;;
esac
}
COMMAND=$1
TEMPLATE=empty
APP_NAME=
NODENAME=
TARGET_DIR=$(pwd)
OPTIND=2
SOURCEDIR=$HOME/.vir
TEMPLATEDIR=$SOURCEDIR/templates
RUN_MODE=''
DIRTYRELEASE=0
RERELEASE=0
COOKIE='cookie'
KERNEL='-kernel inet_dist_listen_min 9100 inet_dist_listen_max 9105 +K true +A 10'
MANUAL_VERSION=
MANUAL_VERSION_LABEL=
check_vir_path() {
if [[ ! -d $SOURCEDIR ]]; then
echo "Vir installation not found, cloning repo into $SOURCEDIR"
git clone https://github.com/robashton/vir.git $SOURCEDIR
fi
}
upgrade_vir() {
pushd $SOURCEDIR > /dev/null
git pull
popd > /dev/null
}
check_vir_path
case "$COMMAND" in
"upgrade")
upgrade_vir
;;
"release")
RELEASE_APP=
while getopts ":dra:v:l:" option; do
case "$option" in
d)
DIRTYRELEASE=1
;;
r)
RERELEASE=1
;;
a)
RELEASE_APP=$OPTARG
;;
v)
MANUAL_VERSION=$OPTARG
;;
l)
MANUAL_VERSION_LABEL=$OPTARG
;;
?)
echo "Error: unknown option -$OPTARG"
usage
exit 1
;;
esac
done
build_release
;;
*)
usage
exit 1
;;
esac
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.