blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
70a0f34373e2047bc57c6767dd1f6bab70b00c51
|
Shell
|
SampurnR/ICR-R
|
/installations.sh
|
UTF-8
| 2,105
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# Program: installations.sh
# Created by: Sampurn Rattan
# Purpose: Install tesseract 3.04 (with its training tools), along with its dependencies
# Version : 1.0
# Date: 2016-12-24
# Dependencies:
# 1. R (>3.0)
# 2. All other dependencies are being installed here
#
# Modification History:
#
# When | Who | What
# ----------------------------------------------------------------------------------
#
# ----------------------------------------------------------------------------------
#
# Usage:
# sh installations.sh
# Parameters:
#
#
echo "Running apt-get update..."
sudo apt-get -qq update -y
echo "Installing Leptopnica and Tesseract dependencies..."
sudo apt-get -qq install -y libpng12-dev libjpeg62-dev libtiff4-dev zlib1g-dev
sudo apt-get -qq install -y libicu-dev libpango1.0-dev libcairo2-dev
sudo apt-get -qq install -y gcc g++
sudo apt-get -qq install -y autotools-dev autoconf automake libtool checkinstall build-essential
echo "Fetching and compiling Leptonica..." && sleep 3
cd /tmp
wget http://www.leptonica.org/source/leptonica-1.72.tar.gz
tar -xvf leptonica-1.72.tar.gz
cd leptonica-1.72
./configure
make
sudo make install
echo "Installing Tesseract with training tools..." && sleep 3
cd /tmp
wget https://github.com/tesseract-ocr/tesseract/archive/3.04.00.tar.gz
tar -xvf 3.04.00.tar.gz
cd tesseract-3.04.00
./autogen.sh
./configure
make
sudo make install
make training
sudo make training-install
sudo ldconfig
echo "Fetching English (eng) trained data..."
cd /usr/local/share/tessdata
sudo wget https://github.com/tesseract-ocr/tessdata/raw/master/eng.traineddata
sudo apt-get -qq install -y
echo "Installing the R package..." && sleep 3
sudo su - -c "R -e \"install.packages('tesseract', repos='http://cran.rstudio.com/')\""
echo "Downloading jTessBoxEditor..."
sudo wget "http://downloads.sourceforge.net/project/vietocr/jTessBoxEditor/jTessBoxEditor-1.0.zip?r=https%3A%2F%2Fsourceforge.net%2Fprojects%2Fvietocr%2Ffiles%2FjTessBoxEditor%2F&ts=1482783057&use_mirror=excellmedia"
# TODOs:
# install opencpu
echo "Installations complete!"
| true
|
180f70ef95887451ac737e64b47b3234ffe00c3e
|
Shell
|
wmchad/NetworkResearch
|
/Shell/Common/DataParsing/MSCstep5-TowerFinal.sh
|
UTF-8
| 1,175
| 2.625
| 3
|
[] |
no_license
|
# This script calls a scala file to pull out just the fields needed
# for analyzing data at the tower level
# It then combines the resulting files into the TowerFinal directory
# under CDR_chad
# The final file has the following fields, semicolon delimited:
# Year (YYYY)
# Month (MM)
# Day (DD)
# Hour (HH)
# Minute (MM)
# Second (SS)
# From tower id
# To tower id
# Duration of call in seconds
# Example call: ./MSCstep5-TowerFinal.sh 2011 11 01
scala -J-Xmx64g -classpath /usr/lusers/wmchad/software/spark-0.8.0-incubating/examples/target/scala-2.9.3/spark-examples-assembly-0.8.0-incubating.jar /ischool/jb/roshan_anon/wmchad/Code/Scala/Functions/DataParsing/MSC/MSCParser.scala FinalFormatTower /ischool/jb/roshan_anon/CDR_chad/$1-$2/MSC/Matched/MSC-Call-$1-$2-$3.txt /ischool/jb/roshan_anon/CDR_chad/$1-$2/MSC/TowerFinal/MSC-Call-$1-$2-$3 /ischool/jb/roshan_anon/gis/TowerTranslation
cd "/ischool/jb/roshan_anon/CDR_chad/$1-$2/MSC/TowerFinal/MSC-Call-$1-$2-$3"
find part* -exec cat {} > ../MSC-Call-$1-$2-$3.txt \;
rm part*
rm .part*
cd ..
rm -r MSC-Call-$1-$2-$3
# rm "/ischool/jb/roshan_anon/CDR_chad/$1-$2/MSC/Reformatted/MSC-Call-$1-$2-$3.txt"
| true
|
fe0cd30d1a0013e0f831911715b65e77db5fdae0
|
Shell
|
KeyserSoze1/goestools
|
/scripts/setup_raspbian.sh
|
UTF-8
| 932
| 3.9375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#
# Setup environment for cross-compilation for Raspbian.
#
set -e
target_dir="xcompile/raspbian"
mkdir -p "${target_dir}"
cp -f "$(dirname "$0")/files/raspberrypi.cmake" "${target_dir}"
# Checkout tools repository if needed
if [ ! -d "${target_dir}/tools" ]; then
git clone https://github.com/raspberrypi/tools.git "${target_dir}/tools"
fi
urls() {
(
scripts/list_raspbian_urls.py \
librtlsdr-dev \
libairspy-dev \
libusb-1.0-0-dev \
libudev1
) | sort | uniq
}
tmp=$(mktemp -d)
trap "rm -rf $tmp" EXIT
# Download packages of interest
for url in $(urls); do
echo "Downloading ${url}..."
( cd "$tmp" && curl -LOs "${url}" )
done
# Extract into sysroot
for deb in "$tmp"/*.deb; do
echo "Extracting $(basename "${deb}")..."
dpkg-deb -x "${deb}" "${target_dir}/tools/arm-bcm2708/arm-linux-gnueabihf/arm-linux-gnueabihf/sysroot"
done
| true
|
f6f308c8b2ffbc54add05f583e5feb2d94d69d44
|
Shell
|
bjoris33/humanGutConj_mSystems
|
/bin/resume_adult_binning.sh
|
UTF-8
| 1,665
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
##############
# Author: Ben Joris
# Created: May 22nd, 2020
# Purpose: For adult dataset, map to already created bowtie databases, bin with metabat2
##############
newarray=( SRR5650151 SRR5650152 SRR5650153 SRR5650154 SRR5650155 SRR5650156 SRR5650157 SRR5650158 SRR5650159 )
for samplepath in ${newarray[@]};do
# IFS='/' read -ra samplearray <<< $samplepath
echo $samplepath
for innersamplepath in $(ls ../adult_na/data/mapping/SRR*/bowtiedb.1.bt2);do
IFS='/' read -ra innersamplearray <<< $innersamplepath
echo ${innersamplearray[4]}
/Volumes/data/bin/bowtie2.3.5/bowtie2 -p 20 -1 ../adult_na/data/reads/processed/paired_${innersamplearray[4]}_1.fastq.gz -2 ../adult_na/data/reads/processed/paired_${innersamplearray[4]}_2.fastq.gz -x ../adult_na/data/mapping/$samplepath/bowtiedb --no-unal --no-mixed --no-discordant -S ../adult_na/data/mapping/$samplepath/${innersamplearray[4]}.sam
samtools view --threads 20 -bS ../adult_na/data/mapping/$samplepath/${innersamplearray[4]}.sam > ../adult_na/data/mapping/$samplepath/${innersamplearray[4]}.bam
anvi-init-bam ../adult_na/data/mapping/$samplepath/${innersamplearray[4]}.bam -o ../adult_na/data/mapping/$samplepath/sorted_${innersamplearray[4]}.bam
done
allbam=`ls ../adult_na/data/mapping/$samplepath/sorted_*.bam`
cp ../adult_na/data/assemblies/$samplepath/contigs.fa $samplepath.fa
/Volumes/data/bin/miniconda3/pkgs/metabat2-2.12.1-0/bin/runMetaBat.sh --numThreads 20 $samplepath.fa $allbam
rm ../adult_na/data/mapping/$samplepath/*.sam
rm ../adult_na/data/mapping/$samplepath/*.bam
rm ../adult_na/data/mapping/$samplepath/sorted_*.bam.bai
echo "****"
done
| true
|
352b6f6c5c784b52bbeabbe7bb5c3a06a845ddbf
|
Shell
|
musq/dotfiles-old
|
/src/os/install/macos/gpg.sh
|
UTF-8
| 508
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")" \
&& . "../../utils.sh" \
&& . "./utils.sh"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print_in_purple "\n GPG\n\n"
./../gpg.sh
brew_install "GPG" "gpg"
brew_install "GPG 2" "gpg2"
# gpg-agent is removed because it is redundant without gnupg2
# https://github.com/Homebrew/homebrew-core/commit/965e130e04e5900e35bf1f0b6ebad9d1c2f680a7
# brew_install "GPG Agent" "gpg-agent"
brew_install "Pinentry" "pinentry-mac"
| true
|
0c34094bced01704566d116ba1e83e76dcefae07
|
Shell
|
imxieke/scripts
|
/sh/functions.sh
|
UTF-8
| 7,477
| 3.5625
| 4
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
# @Author: imxieke
# @Date: 2021-09-29 21:34:58
# @Last Modified by: imxieke
# @Last Modified time: 2021-11-13 23:05:13
# 字符转大写
strtoupper()
{
echo $1 | tr '[a-z]' '[A-Z]'
}
# 字符转小写
strtolower()
{
echo $1 | tr '[A-Z]' '[a-z]'
}
# 字符大小写反转
strtolower()
{
echo $1 |tr '[a-zA-Z]' '[A-Za-z]'
}
random_string(){
length=${1:-32}
echo `cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w ${length} | head -n 1`
}
# 终结包管理器运行 先提示 避免正在执行安装丢失数据
kill_apt()
{
if ps aux | grep -E "apt-get|dpkg|apt" | grep -qv "grep"; then
kill -9 $(ps -ef|grep -E "apt-get|apt|dpkg"|grep -v grep|awk '{print $2}')
if [[ -s /var/lib/dpkg/lock-frontend || -s /var/lib/dpkg/lock ]]; then
rm -f /var/lib/dpkg/lock-frontend
rm -f /var/lib/dpkg/lock
dpkg --configure -a
fi
fi
}
Auto_Swap()
{
swap=$(free |grep Swap|awk '{print $2}')
if [ "${swap}" -gt 1 ];then
echo "Swap total sizse: $swap";
return;
fi
if [ ! -d /www ];then
mkdir /www
fi
swapFile="/www/swap"
dd if=/dev/zero of=$swapFile bs=1M count=1025
mkswap -f $swapFile
swapon $swapFile
echo "$swapFile swap swap defaults 0 0" >> /etc/fstab
swap=`free |grep Swap|awk '{print $2}'`
if [ $swap -gt 1 ];then
echo "Swap total sizse: $swap";
return;
fi
sed -i "/\/www\/swap/d" /etc/fstab
rm -f $swapFile
}
sys_init()
{
# 初始化系统环境
if [[ -n "$(grep `whoami` /etc/passwd | grep 'zsh$')" ]]; then
USER_SHELL='zsh'
elif [[ -n "$(grep `whoami` /etc/passwd | grep 'bash$')" ]]; then
USER_SHELL='bash'
fi
USER_SHELL_BIN=$(command -v "${USER_SHELL}")
# 设置时区
if [[ -f '/etc/timezone' ]]; then
if [[ "$(cat /etc/timezone)" != "${TIMEZONE}" ]]; then
rm -fr /etc/localtime
ln -s /usr/share/zoneinfo/${TIMEZONE} /etc/localtime
fi
else
apt install --no-install-recommends tzdata locales
rm -fr /etc/localtime
ln -s /usr/share/zoneinfo/${TIMEZONE} /etc/localtime
fi
ntpdate -u pool.ntp.org
#尝试同步国际时间(从ntp服务器)
ntpdate 0.asia.pool.ntp.org
ntpdate -d cn.pool.ntp.org
}
get_os_name()
{
if grep -Eqi "CentOS" /etc/issue || grep -Eq "CentOS" /etc/*-release; then
DISTRO='CentOS'
PM='yum'
elif grep -Eqi "Aliyun" /etc/issue || grep -Eq "Aliyun Linux" /etc/*-release; then
DISTRO='Aliyun'
PM='yum'
elif grep -Eqi "Amazon Linux" /etc/issue || grep -Eq "Amazon Linux" /etc/*-release; then
DISTRO='Amazon'
PM='yum'
elif grep -Eqi "Fedora" /etc/issue || grep -Eq "Fedora" /etc/*-release; then
DISTRO='Fedora'
PM='yum'
elif grep -Eqi "Oracle Linux" /etc/issue || grep -Eq "Oracle Linux" /etc/*-release; then
DISTRO='Oracle'
PM='yum'
elif grep -Eqi "Red Hat Enterprise Linux" /etc/issue || grep -Eq "Red Hat Enterprise Linux" /etc/*-release; then
DISTRO='RHEL'
PM='yum'
elif grep -Eqi "Debian" /etc/issue || grep -Eq "Debian" /etc/*-release; then
DISTRO='Debian'
PM='apt'
elif grep -Eqi "Ubuntu" /etc/issue || grep -Eq "Ubuntu" /etc/*-release; then
DISTRO='Ubuntu'
PM='apt'
elif grep -Eqi "Raspbian" /etc/issue || grep -Eq "Raspbian" /etc/*-release; then
DISTRO='Raspbian'
PM='apt'
elif grep -Eqi "Deepin" /etc/issue || grep -Eq "Deepin" /etc/*-release; then
DISTRO='Deepin'
PM='apt'
elif grep -Eqi "Mint" /etc/issue || grep -Eq "Mint" /etc/*-release; then
DISTRO='Mint'
PM='apt'
elif grep -Eqi "Kali" /etc/issue || grep -Eq "Kali" /etc/*-release; then
DISTRO='Kali'
PM='apt'
else
DISTRO='unknow'
fi
}
Add_Swap()
{
if ! command -v python >/dev/null 2>&1; then
if [ "$PM" = "yum" ]; then
yum -y install python2
elif [ "$PM" = "apt" ]; then
apt-get --no-install-recommends install -y python
fi
fi
if command -v python >/dev/null 2>&1; then
Disk_Avail=$(python ${cur_dir}/include/disk.py)
elif command -v python3 >/dev/null 2>&1; then
Disk_Avail=$(python3 ${cur_dir}/include/disk.py)
elif command -v python2 >/dev/null 2>&1; then
Disk_Avail=$(python2 ${cur_dir}/include/disk.py)
fi
if [ "${MemTotal}" -lt 1024 ]; then
DD_Count='1024'
if [ "${Disk_Avail}" -lt 5 ]; then
Enable_Swap='n'
fi
elif [[ "${MemTotal}" -ge 1024 && "${MemTotal}" -le 2048 ]]; then
DD_Count='2028'
if [ "${Disk_Avail}" -lt 13 ]; then
Enable_Swap='n'
fi
elif [[ "${MemTotal}" -ge 2048 && "${MemTotal}" -le 4096 ]]; then
DD_Count='4096'
if [ "${Disk_Avail}" -lt 17 ]; then
Enable_Swap='n'
fi
elif [[ "${MemTotal}" -ge 4096 && "${MemTotal}" -le 16384 ]]; then
DD_Count='8192'
if [ "${Disk_Avail}" -lt 19 ]; then
Enable_Swap='n'
fi
elif [[ "${MemTotal}" -ge 16384 ]]; then
DD_Count='8192'
if [ "${Disk_Avail}" -lt 27 ]; then
Enable_Swap='n'
fi
fi
Swap_Total=$(free -m | grep Swap | awk '{print $2}')
if [[ "${Enable_Swap}" = "y" && "${Swap_Total}" -le 512 && ! -s /var/swapfile ]]; then
echo "Add Swap file..."
[ $(cat /proc/sys/vm/swappiness) -eq 0 ] && sysctl vm.swappiness=10
dd if=/dev/zero of=/var/swapfile bs=1M count=${DD_Count}
chmod 0600 /var/swapfile
echo "Enable Swap..."
/sbin/mkswap /var/swapfile
/sbin/swapon /var/swapfile
if [ $? -eq 0 ]; then
[ `grep -L '/var/swapfile' '/etc/fstab'` ] && echo "/var/swapfile swap swap defaults 0 0" >>/etc/fstab
/sbin/swapon -s
else
rm -f /var/swapfile
echo "Add Swap Failed!"
fi
fi
}
_remove_basedir_restrict()
{
while :;do
read -p "Enter website root directory: " website_root
if [ -d "${website_root}" ]; then
if [ -f ${website_root}/.user.ini ];then
chattr -i ${website_root}/.user.ini
rm -f ${website_root}/.user.ini
sed -i 's/^fastcgi_param PHP_ADMIN_VALUE/#fastcgi_param PHP_ADMIN_VALUE/g' /usr/local/nginx/conf/fastcgi.conf
/etc/init.d/php-fpm restart
/etc/init.d/nginx reload
echo "done."
else
echo "${website_root}/.user.ini is not exist!"
fi
break
else
echo "${website_root} is not directory or not exist!"
fi
done
}
add_swap()
{
if [ -z "$(grep 'swap' /etc/fstab)" ] && [ "${Swap}" == '0' ] && [ ${Mem} -le 2048 ]; then
echo "${CWARNING}Add Swap file, It may take a few minutes... ${CEND}"
dd if=/dev/zero of=/swapfile count=2048 bs=1M
mkswap /swapfile
swapon /swapfile
chmod 600 /swapfile
[ -z "`grep swapfile /etc/fstab`" ] && echo '/swapfile swap swap defaults 0 0' >> /etc/fstab
fi
}
check_502()
{
CheckURL="http://www.xxx.com"
STATUS_CODE=`curl -o /dev/null -m 10 --connect-timeout 10 -s -w %{http_code} $CheckURL`
#echo "$CheckURL Status Code:\t$STATUS_CODE"
if [ "$STATUS_CODE" = "502" ]; then
/etc/init.d/php-fpm restart
fi
}
| true
|
0b7274bc7e85127489781f42b846298f29165679
|
Shell
|
jinlongchen/stappler-deps
|
/android/jpeg-android.sh
|
UTF-8
| 1,209
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Android/ARM, armeabi (ARMv5TE soft-float), Android 2.2+ (Froyo)
CFLAGS="-Os"
CONFFLAGS="--enable-shared=no --enable-static=yes --with-pic=yes"
ORIGPATH=$PATH
LIBNAME=jpeg
ROOT=`pwd`
Compile () {
mkdir -p $LIBNAME
cd $LIBNAME
ARCH=$1
NDKABI=$2
TARGET=arm-linux-androideabi
if [ "$1" == "x86" ]; then
TARGET=i686-linux-android
fi
if [ "$1" == "arm64-v8a" ]; then
TARGET=aarch64-linux-android
fi
if [ "$1" == "x86_64" ]; then
TARGET=x86_64-linux-android
fi
TOOLCHAIN=$ROOT/toolchains/$1
export PATH=$TOOLCHAIN/bin:$PATH
NDKP=$TOOLCHAIN/bin/$TARGET
NDKF="$CFLAGS --sysroot $TOOLCHAIN/sysroot"
NDKARCH=$3
NDKLDFLAGS=$4
../../src/$LIBNAME/configure $CONFFLAGS \
CC=$NDKP-clang CFLAGS="$NDKF $NDKARCH" \
LD=$NDKP-ld LDFLAGS="$NDKLDFLAGS" \
--host=$TARGET --with-sysroot="$TOOLCHAIN/sysroot"\
--includedir=`pwd`/../$1/include \
--libdir=`pwd`/../$1/lib \
CPPFLAGS="-I`pwd`/../$1/include" \
LDFLAGS="-L`pwd`/../$1/lib" \
--prefix=`pwd`
make
make install
cd -
rm -rf $LIBNAME
export PATH=$ORIGPATH
}
Compile armeabi-v7a 14 '-march=armv7-a -mfloat-abi=softfp -mfpu=vfpv3-d16' '-march=armv7-a -Wl,--fix-cortex-a8'
Compile x86 14 '' ''
Compile arm64-v8a 21 '' ''
Compile x86_64 21 '' ''
| true
|
885cb08421944adce84b578a2546638fe9cc5400
|
Shell
|
supar/3proxy-debian-package
|
/debian/postinst
|
UTF-8
| 482
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
# postinst script for lbweb-admin
#
# see: dh_installdeb(1)
set -e
SYSTEMNAME=proxy3
if ! id -u $SYSTEMNAME > /dev/nul 2>&1; then
echo asdfasdf
adduser --system --no-create-home --disabled-login --group $SYSTEMNAME
fi
UID=`id -u $SYSTEMNAME`
GID=`id -g $SYSTEMNAME`
chown $UID:$GID -R /etc/3proxy
chown $UID:$GID /usr/sbin/3proxy
chown $UID /var/log/3proxy
sed -i.bak "s,%SETUID%,$UID,g" /etc/3proxy/main.cfg
sed -i.bak "s,%SETGID%,$GID,g" /etc/3proxy/main.cfg
| true
|
6886fdf002a779927dad496c2627779d7059b91a
|
Shell
|
ansell/ala-gbif-merge
|
/reprocess-acacia-longifolia.sh
|
UTF-8
| 4,969
| 3.078125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -e
set -x
#INPUT_FOLDER="/media/sf_HostDesktop/LeeBelbin_ALAGBIFMerge/FromLee/"
INPUT_FOLDER="./Source-Files/"
OUTPUT_FOLDER="./"
mkdir -p "${OUTPUT_FOLDER}"
CSVSUM_PATH="../csvsum/"
DWCA_UTILS_PATH="../dwca-utils/"
# Stage 1: Acacia longifolia from ALA
ACACIA_ALA_OUTPUT_FOLDER="${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-ALA/"
rm -rf "${ACACIA_ALA_OUTPUT_FOLDER}"
mkdir -p "${ACACIA_ALA_OUTPUT_FOLDER}"
# catalogNumber is in column 12 (0-based index makes that 11) in the ALA Acacia longifolia data
ACACIA_ALA_CORE_ID_INDEX="11"
cp "${INPUT_FOLDER}Acacia longifolia filtered and sorted/Acacia longifolia ALA data sorted.csv" "${ACACIA_ALA_OUTPUT_FOLDER}Source-AcaciaLongifolia-ALA.csv"
${CSVSUM_PATH}csvsum --input "${ACACIA_ALA_OUTPUT_FOLDER}Source-AcaciaLongifolia-ALA.csv" --output "Statistics-AcaciaLongifolia-ALA.csv" --show-sample-counts true --samples 1000
${DWCA_UTILS_PATH}csv2dwca --input "${ACACIA_ALA_OUTPUT_FOLDER}Source-AcaciaLongifolia-ALA.csv" --output "${ACACIA_ALA_OUTPUT_FOLDER}meta.xml" --core-id-index "${ACACIA_ALA_CORE_ID_INDEX}" --header-line-count 1 --show-defaults true
zip -rqj "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-ALA-Archive.zip" "${ACACIA_ALA_OUTPUT_FOLDER}"*
mkdir -p "./dwcacheck-AcaciaLongifolia-ALA-analysis/"
${DWCA_UTILS_PATH}dwcacheck --input "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-ALA-Archive.zip" --output "./dwcacheck-AcaciaLongifolia-ALA-analysis/"
# Stage 2: Acacia longifolia from GBIF
ACACIA_GBIF_OUTPUT_FOLDER="${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-GBIF/"
rm -rf "${ACACIA_GBIF_OUTPUT_FOLDER}"
mkdir -p "${ACACIA_GBIF_OUTPUT_FOLDER}"
# catalogNumber is in column 6 (0-based index makes that 5) in the GBIF Acacia longifolia data
ACACIA_GBIF_CORE_ID_INDEX="5"
cp "${INPUT_FOLDER}Acacia longifolia filtered and sorted/Acacia longifolia GBIF data.csv" "${ACACIA_GBIF_OUTPUT_FOLDER}Source-AcaciaLongifolia-GBIF.csv"
# Remove the GBIF known field prefix to reduce the size of the mapping field names
sed -i 's/occurrence_hdfs\.//g' "${ACACIA_GBIF_OUTPUT_FOLDER}Source-AcaciaLongifolia-GBIF.csv"
${CSVSUM_PATH}csvsum --input "${ACACIA_GBIF_OUTPUT_FOLDER}Source-AcaciaLongifolia-GBIF.csv" --output "Statistics-AcaciaLongifolia-GBIF.csv" --show-sample-counts true --samples 1000 --output-mapping "${ACACIA_GBIF_OUTPUT_FOLDER}Mapping-AcaciaLongifolia-GBIF.csv"
${CSVSUM_PATH}csvmap --input "${ACACIA_GBIF_OUTPUT_FOLDER}Source-AcaciaLongifolia-GBIF.csv" --output "${ACACIA_GBIF_OUTPUT_FOLDER}Mapped-AcaciaLongifolia-GBIF.csv" --mapping "Mapping-AcaciaLongifolia-GBIF.csv"
${CSVSUM_PATH}csvsum --input "${ACACIA_GBIF_OUTPUT_FOLDER}Mapped-AcaciaLongifolia-GBIF.csv" --output "Statistics-Mapped-AcaciaLongifolia-GBIF.csv" --show-sample-counts true --samples 1000
${DWCA_UTILS_PATH}csv2dwca --input "${ACACIA_GBIF_OUTPUT_FOLDER}Mapped-AcaciaLongifolia-GBIF.csv" --output "${ACACIA_GBIF_OUTPUT_FOLDER}meta.xml" --core-id-index "${ACACIA_GBIF_CORE_ID_INDEX}" --header-line-count 1 --show-defaults true --match-case-insensitive true
zip -rqj "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-GBIF-Archive.zip" "${ACACIA_GBIF_OUTPUT_FOLDER}"*
mkdir -p "./dwcacheck-AcaciaLongifolia-GBIF-analysis/"
${DWCA_UTILS_PATH}dwcacheck --input "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-GBIF-Archive.zip" --output "./dwcacheck-AcaciaLongifolia-GBIF-analysis/"
# Stage 3 : Merge the ALA and GBIF copies for AcaciaLongifolia
ACACIA_MERGED_OUTPUT_FOLDER="${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-Merged/"
rm -rf "${ACACIA_MERGED_OUTPUT_FOLDER}"
mkdir -p "${ACACIA_MERGED_OUTPUT_FOLDER}"
${DWCA_UTILS_PATH}dwcamerge --input "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-ALA-Archive.zip" --other-input "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-GBIF-Archive.zip" --output "${ACACIA_MERGED_OUTPUT_FOLDER}"
zip -rqj "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-Merged-Archive.zip" "${ACACIA_MERGED_OUTPUT_FOLDER}merged-archive/"*
mkdir -p "./dwcacheck-AcaciaLongifolia-Merged-analysis/"
${DWCA_UTILS_PATH}dwcacheck --input "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-Merged-Archive.zip" --output "./dwcacheck-AcaciaLongifolia-Merged-analysis/"
# Stage 4 : Merge the ALA and GBIF copies for AcaciaLongifolia leaving out non-vocabulary terms
ACACIA_MERGED_CLEAN_OUTPUT_FOLDER="${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-Merged-Clean/"
rm -rf "${ACACIA_MERGED_CLEAN_OUTPUT_FOLDER}"
mkdir -p "${ACACIA_MERGED_CLEAN_OUTPUT_FOLDER}"
${DWCA_UTILS_PATH}dwcamerge --input "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-ALA-Archive.zip" --other-input "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-GBIF-Archive.zip" --output "${ACACIA_MERGED_CLEAN_OUTPUT_FOLDER}" --remove-non-vocabulary-terms true
zip -rqj "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-Merged-Clean-Archive.zip" "${ACACIA_MERGED_CLEAN_OUTPUT_FOLDER}merged-archive/"*
mkdir -p "./dwcacheck-AcaciaLongifolia-Merged-Clean-analysis/"
${DWCA_UTILS_PATH}dwcacheck --input "${OUTPUT_FOLDER}Mapped-AcaciaLongifolia-Merged-Clean-Archive.zip" --output "./dwcacheck-AcaciaLongifolia-Merged-Clean-analysis/"
| true
|
0a1948e8cd71b3cfcaa3fb0a01a0b0d293b0517e
|
Shell
|
jmandrade/project_member
|
/project_member.sh
|
UTF-8
| 403
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
EMAIL=$(gcloud auth list --filter=status:ACTIVE --format="value(account)")
echo "Active user account is $EMAIL"
echo "Obtaining list of projects with member access..."
for PROJECT in $(\
gcloud projects list \
--format="value(projectId)" )
do
gcloud projects get-iam-policy $PROJECT --format="json" | \
awk -v project="$PROJECT" -v email="$EMAIL" '$0 ~ email {print project}'
done
| true
|
310936bbdc9414c095025d6c0d8a0215210b3f88
|
Shell
|
OrenBochman/knesset-data-pipelines
|
/devops/db_backup/provision_resources.sh
|
UTF-8
| 1,804
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ "${1}" == "" ] || [ "${2}" == "" ]; then
echo "provision all required google cloud resources to handle db backup and restore"
echo "usage: devops/db_backup/provision_resources.sh <SERVICE_ACCOUNT_NAME> <STORAGE_BUCKET_NAME>"
exit 1
fi
if [ "${CLOUDSDK_CORE_PROJECT}" == "" ] || [ "${CLOUDSDK_COMPUTE_ZONE}" == "" ]; then
echo " > Please set CLOUDSDK_CORE_PROJECT and CLOUDSDK_COMPUTE_ZONE environment variables for your google project"
exit 2
fi
export SERVICE_ACCOUNT_NAME="${1}"
export STORAGE_BUCKET_NAME="${2}"
export SERVICE_ACCOUNT_ID="${SERVICE_ACCOUNT_NAME}@${CLOUDSDK_CORE_PROJECT}.iam.gserviceaccount.com"
export SECRET_TEMPDIR="${SECRET_TEMPDIR:-`mktemp -d`}"
echo " > creating service account ${SERVICE_ACCOUNT_NAME}" >&2
gcloud iam service-accounts create "${SERVICE_ACCOUNT_NAME}" >&2
echo " > storing secret key at ${SECRET_TEMPDIR}/key" >&2
gcloud iam service-accounts keys create "--iam-account=${SERVICE_ACCOUNT_ID}" "${SECRET_TEMPDIR}/key" >&2
echo " > creating storage bucket gs://${STORAGE_BUCKET_NAME}" >&2
gsutil mb "gs://${STORAGE_BUCKET_NAME}" >&2 || true
echo " > setting minimal required permissions for the service account on the bucket" >&2
gsutil iam ch -d "serviceAccount:${SERVICE_ACCOUNT_ID}" "gs://${STORAGE_BUCKET_NAME}" >&2
gsutil iam ch "serviceAccount:${SERVICE_ACCOUNT_ID}:objectCreator,objectViewer,objectAdmin" "gs://${STORAGE_BUCKET_NAME}" >&2
echo " > done" >&2
echo "export SECRET_KEY_FILE=${SECRET_TEMPDIR}/key"
echo "export SERVICE_ACCOUNT_NAME=${SERVICE_ACCOUNT_NAME}"
echo "export SERVICE_ACCOUNT_ID=${SERVICE_ACCOUNT_ID}"
echo "export STORAGE_BUCKET_NAME=${STORAGE_BUCKET_NAME}"
echo "export CLOUDSDK_CORE_PROJECT=${CLOUDSDK_CORE_PROJECT}"
echo "export CLOUDSDK_COMPUTE_ZONE=${CLOUDSDK_COMPUTE_ZONE}"
| true
|
a268cb29bd7acdbbd52b279d969d94c7e3308bfc
|
Shell
|
adityareddy93/4103-102-OpSys-Kankanala
|
/assignments/homework-01/sum_args.sh
|
UTF-8
| 88
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
echo command_args.sh
b=0
for i in $*
do
b=$(expr "$i" + "$b")
done
echo $b
| true
|
db0cf8d1ce32d04cdf32f82661ae295de78fe85b
|
Shell
|
eranfuchs1/DI_Bootcamp
|
/Week4/Day3/daily_challenge/command.sh
|
UTF-8
| 294
| 3.125
| 3
|
[] |
no_license
|
#if [ -z "$1" ]; then echo "usage: $0 <shift value>"; exit 1; fi
test () {
if [ -z "$(diff <(cat test.txt | python untitled.py encrypt $1 | python untitled.py decrypt $1) test.txt)" ]; then echo 'works'; else echo "doesn't work"; fi;
}
for i in {-200..200}; do echo $(test $i) with $i; done
| true
|
4c9c043dcfdc2e88b308a32d9c9805e184f8fa0a
|
Shell
|
kiriakos/kandc-stash
|
/app/protected/tests/test-on-filechange.sh
|
UTF-8
| 94
| 2.6875
| 3
|
[] |
no_license
|
#! /bin/bash
dir=`dirname $0`
cd "$dir"
while inotifywait ../../
do
./test.sh $@
done
| true
|
c252bcbf740ed9a0dbb77ab42e66f2bdfe9b5b80
|
Shell
|
groffhibbitz/music_tools
|
/create_album_folders.sh
|
UTF-8
| 585
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
IFS=$'\t\n'
#i="340498_Waves_And_Sun_African_Waves_Mix_Dorfmeister.mp3"
for i in *.mp3; do
#if this mp3 doesn't have id3v2 information, only id3v1 info, then convert the tag
tag=`id3v2 -l $i | grep "id3v2"`
if [ -z $tag ]; then
echo "converting id3 tag"
id3v2 -C $i
fi
album=`id3v2 -l $i | grep "TAL (" | sed 's/.*: //'`
if [ -z $album ]; then
album=`id3v2 -l $i | grep "TALB (" | sed 's/.*: //'`
if [ -z $album ]; then
echo "could not find album for: $i"
continue
fi
fi
`mkdir -p $album; mv $i $_`
done
| true
|
7a8d6acfbef2956553f7327900ad69eab951b1dd
|
Shell
|
ChristinaB/Incubating-a-DREAM
|
/Sauk_DHSVM_modeldata/formatted_stn_Sauklist_DHSVM.sh
|
UTF-8
| 944
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
# use awk to format input for DHSVM
## run command example "./formatted_stn_Sauklist_DHSVM.sh Sauk_94CentroidVICpoints_UTM.csv Sauk_94CentroidVICpoints_inputlist.txt"
# gmauger,cband Jan-Feb 2016
# cband, cbev Nov 2016
incsv=$1
outfl=$2
# clear output file, if exists already:
\rm -f $outfl
# skip header line, format output:
# *** NOTE: assuming an 8-col csv file, with column order:
# FID,OBJECTID_1,OBJECTID,LAT,LONG_,ClimateDEM,NORTHING,EASTING
awk -F, '(NR>1){stnnum=$1+1; \
printf("Station Name %d = data_%.5f_%.5f\n",stnnum,$3,$4); \
printf("North Coordinate %d = %.6f\n",stnnum,$6); \
printf("East Coordinate %d = %.6f\n",stnnum,$7); \
printf("Elevation %d = %.0f\n",stnnum,$5); \
printf("Station File %d = ",stnnum); \
printf("/civil/shared/ecohydrology/SkagitSauk/DHSVM-Glacier/DHSVM/inputLivneh2013_WRFbc_historic/bc_2_WRF_delta_1500to3000/data_%.5f_%.5f\n",$3,$4); \
printf("\n");}' $incsv > $outfl
| true
|
f19fc2994e71bd15f556b505d20eab8771a374b5
|
Shell
|
mikowiec/hs-collection1
|
/hs-mini-httpd/run.sh
|
UTF-8
| 200
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
while true; do
echo Compiling.
ghc --make httpd.hs
if [ $? != 0 ]; then
read r
else
echo Starting.
./httpd -d $HOME/public_html
echo Restart!
sleep 1
fi
done
| true
|
deb85699e02b353349d2302e764e3188b08b7757
|
Shell
|
rIIh/portfolio-mevn
|
/deploy-traefik.sh
|
UTF-8
| 650
| 3.484375
| 3
|
[] |
no_license
|
# Compose file path
COMPOSE=docker-compose.traefik.yml
# Remote url first argument
export REMOTE=${1:?"Provide remote"}
# Registry url second argument. Default is url registry.yourremote.com
# Passed to compose file as variable
export REGISTRY=${2:-"registry.${REMOTE}"}/
echo Building images
docker-compose -f $COMPOSE build || exit $?
echo Pushing images to host registry
docker-compose -f $COMPOSE push || exit $?
echo Pull on remote?
docker-compose -f $COMPOSE -H "ssh://root@$REMOTE" pull || exit $?
echo Starting container on remote "$REMOTE"
docker-compose -f $COMPOSE -H "ssh://root@$REMOTE" up -d --force-recreate --no-build || exit $?
| true
|
8836e95580de0dd5baf78dbad777c1081e897d74
|
Shell
|
elifesciences/data-pipeline-sql-prototype
|
/airflow/install-dev.sh
|
UTF-8
| 384
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# removing old virtual environment, if any
pipenv --rm || true
rm -rf venv || true
# install develop and default dependencies
pipenv install --dev
# create venv link for convenience
ln -s $(pipenv --venv) venv
# install csv-generator and db-manager from source
source venv/bin/activate
cd ../csv-generator
pip install -e .
cd ../db-manager
pip install -e .
| true
|
d9503af75e7c19d083ecd258514acd018e908c97
|
Shell
|
elitonluiz1989/ubuntu-instalador
|
/sources/criarProjeto.sh
|
UTF-8
| 1,307
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
criarProjeto() {
echo "---> Defina o nome do projeto:"
read projeto
if [ -z "${projeto}" ]; then
echo "---> Nome do projeto está vazio."
echo "---> Deseja reinserí-lo? (S/n)"
read reiniciar
if [ -z "${reiniciar}" ] || [ "${reiniciar,,}" == "s" ]; then
criarProjetos
else
echo "--> Deseja voltar ao menu inicial ou sair? (S=sim/n=sair)"
read opcao
if [ -z "${opcao}" ] || [ "${opcao,,}" == "s" ]; then
menu
else
exit
fi
fi
else
projeto="${projeto,,}"
dir_projeto=${dir_projetos}/${projeto}
if [ -d "${dir_projeto}" ]; then
echo "--> A pasta do projeto ${projeto} já existe."
echo "--> Talvez o projeto já tenha sido criado."
echo "--> Deseja continuar? (S/n)"
read decisao
[ -z "${decisao}" ] || [ "${decisao,,}" == "s" ] || exit
else
mkdir -pv $dir_projeto
fi
echo "--> Deseja criar um VirtualHost? (S/n)";
read decisao
[ -z "${decisao}" ] || [ "${decisao,,}" == "s" ] && adicionarVirtualhost $projeto
echo "--> Deseja iniciar versionamento com GIT? (S/n)"
read decisao
[ -z "${decisao}" ] || [ "${decisao,,}" == "s" ] && iniciarGit $projeto
echo "--> Deseja clonar projeto com GIT? (S/n)"
read decisao
[ -z "${decisao}" ] || [ "${decisao,,}" == "s" ] && clonarGit $dir_projeto
fi
}
| true
|
872fa33a7daedb7032bb752f41299ba2a1d82e74
|
Shell
|
metalshreds/rpi_urine_monitoring
|
/installer.sh
|
UTF-8
| 2,331
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# This is an installer script for Rpi Urine Monitor station
# It is based on the MagicMirror2 installer script.
echo -e "\e[0m"
echo ''
echo 'Installing Rpi Urine Monitor'
echo ''
echo -e "\e[0m"
# Determine which Pi is running.
ARM=$(uname -m)
# Check the Raspberry Pi version.
if [ "$ARM" != "armv7l" ]; then
echo -e "\e[91mSorry, your Raspberry Pi is not supported."
echo -e "\e[91mPlease run Rpi Urine Monitor on a Raspberry Pi 2 or 3."
exit;
fi
# Define helper methods.
function version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
function command_exists () { type "$1" &> /dev/null ;}
# Update before first apt-get
echo -e "\e[96mUpdating packages ...\e[90m"
sudo apt-get update || echo -e "\e[91mUpdate failed, carrying on installation ...\e[90m"
# Installing helper tools
echo -e "\e[96mInstalling helper tools ...\e[90m"
sudo pip install ipython pyserial mettler_toledo_device || exit
# Download Rpi Urine Monitor script, and check if it has already been downloaded
cd ~ || exit #if cant change directory something is quite wrong...
if [ -d "$HOME/rpi_urine_monitoring" ] ; then
echo -e "\e[93mThe rpi_urine_monitoring appears to already be in this raspberry pi."
echo -e "To prevent overwriting, the installer will be aborted."
echo ""
echo -e "To check for any updates \e[1m\e[97mgit pull\e[0m from the ~/rpi_urine_monitoring directory."
echo ""
exit;
fi
echo -e "\e[96mCloning Rpi Urine Monitor Script...\e[90m"
if git clone --depth=1 https://github.com/metalshreds/rpi_urine_monitoring.git; then
echo -e "\e[92mCloning Rpi Urine Monitor Script Done!\e[0m"
else
echo -e "\e[91mUnable to clone Rpi Urine Monitor Script."
exit;
fi
echo -e "\e[92m"
echo " "
# echo -e "\e[92mWe're ready! Run \e[1m\e[97mDISPLAY=:0 npm start\e[0m\e[92m from the ~/MagicMirror directory to start your MagicMirror.\e[0m"
echo -e "The Rpi Urine Monitor script and dependencies have been successfully installed!"
echo -e "Take the mettler toledo scale out of standby mode, navigate to the script directory"
echo -e " \e[97m~/rpi_urine_monitor\e[92m and run the script:"
echo -e "\e[97mpython rpi_urine_monitor.py\e[92m to run on this raspberry pi"
echo -e "Otherwise see other intructions on the website to run remotely through PuTTY"
echo " "
echo " "
echo -e "\e[0m"
| true
|
33874c40941132240eefda8b3d24bf1733b7ce44
|
Shell
|
vbba/empty
|
/tools/trailer_export.sh
|
UTF-8
| 895
| 2.828125
| 3
|
[] |
no_license
|
prefile="tmp_prefile";
printf "%10s %20s\n" "cidNum" "当前文件专辑个数";
printf "%10s %20s\n" "commonNum" "和上个文件专辑相同的个数";
printf "%10s %20s\n" "delNum" "只在上个文件存在的专辑个数";
printf "%10s %20s\n" "cidNum" "只在当前文件存在的专辑个数";
printf "%40s %10s %10s %10s %10s\n" "fileName" "cidNum" "commonNum" "delNum" "addNum";
for file in cover_info_143* ; do
cidNum=$(cat $file|wc -l);
sumNum=$(cat $file $prefile|sort -u|wc -l);
cat $file | sort -u > tmp_file;
cat $prefile | sort -u > tmp_prefile;
delNum=$(diff tmp_file tmp_prefile | grep ">" | wc -l);
addNum=$(diff tmp_file tmp_prefile | grep "<" | wc -l);
commonNum=$(($sumNum - $delNum - $addNum));
printf "%40s %10s %10s %10s %10s\n" "$file" "$cidNum" "$commonNum" "$delNum" "$addNum";
prefile=$file;
done
| true
|
a2b0361051661f6bcbfa542e8f71c9b5dffffb73
|
Shell
|
jerome9189/lotus-leaf
|
/scripts/test.sh
|
UTF-8
| 1,676
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# A script that runs unit tests.
ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
source "${ROOT}/scripts/shflags"
DEFINE_boolean "debug" ${FLAGS_FALSE} "Whether to run unit tests in a debugger." "d"
DEFINE_string "envroot" "${ROOT}/src/server/env" "The server environment root." "e"
DEFINE_string "db_envroot" "${ROOT}/src/db/env" "The DB environment root." "D"
DEFINE_string "coverage_output_dir" "${ROOT}/dist/test/coverage" "The directory where coverage reports will be written." "c"
FLAGS "$@" || exit $?
eval set -- "${FLAGS_ARGV}"
set -e
set -o posix
echo -e "\e[1;45mRunning tests...\e[0m"
rm -f "$ROOT/.coverage"
# Run server tests.
echo -e "\e[1;33mRunning server tests...\e[0m"
source "${FLAGS_envroot}/bin/activate"
if [ ${FLAGS_debug} -eq ${FLAGS_TRUE} ]; then
# Run tests in a debugger.
python -m pdb src/server/testsuite.py
else
# Run tests with code coverage.
coverage run -a --omit="src/server/env/*,src/server/*_test.py,src/server/test*.py" \
src/server/testsuite.py
fi
deactivate
# Run DB tests.
echo -e "\e[1;33mRunning DB tests...\e[0m"
source "${FLAGS_db_envroot}/bin/activate"
if [ ${FLAGS_debug} -eq ${FLAGS_TRUE} ]; then
# Run tests in a debugger.
PYTHONPATH="$ROOT/src/server" \
python -m pdb src/db/gendata/testsuite.py
else
# Run tests with code coverage.
PYTHONPATH="$ROOT/src/server" \
coverage run -a --omit="src/db/env/*,src/db/gendata/*_test.py,src/db/gendata/test*.py,src/server/test*.py" \
src/db/gendata/testsuite.py
fi
deactivate
# Generate coverage report.
source "${FLAGS_envroot}/bin/activate"
coverage html -d "${FLAGS_coverage_output_dir}"
deactivate
| true
|
88986e2a9604593109a920808a1aea684aa48096
|
Shell
|
iocCN/docker
|
/compiler/officiald/builder.sh
|
UTF-8
| 443
| 2.828125
| 3
|
[] |
no_license
|
#! /bin/bash
set -eo pipefail
repo=$1
branch=$2
commit=$3
reponame=iocoin
#dpkg -s libboost-dev | grep Version # Version: 1.65.1.0ubuntu1
#dpkg -s libminiupnpc-dev | grep Version #Version: 1.9.20140610-4ubuntu2
# build core
#git clone $repo ${reponame}
git clone --single-branch --branch $branch $repo ${reponame}
cd ${reponame}
git checkout $commit
cd src
make -f makefile.unix clean
make -f makefile.unix
cp iocoind /repo/iocoind-$branch
| true
|
e8361b2a8918f29e4c652cdfe7b5db32aa78ec1a
|
Shell
|
yuioksk/private_directory
|
/jk.sh
|
UTF-8
| 426
| 3.0625
| 3
|
[] |
no_license
|
#backup files
echo ""
echo "spath and d path"
src="/home/e1027/saranya/"
targ="/opt/backup/"
echo ""
echo "Enter Directory Name"
read dirname
if [ ! -d "/home/e1027/saranya/$dirname" ]
then
echo "File doesn't exist. Creating now"
mkdir /home/e1027/saranya/$dirname
echo "File created"
else
echo "File exists"
fi
echo""
echo "spath to d path"
sudo cp -av /home/e1027/saranya/bkp.zip /opt/backup
made changes
| true
|
3be008304d438fcd4a283e197bbb79cc771b4838
|
Shell
|
ut-ims-robotics/warnie
|
/warnie/scripts/run_user_study.sh
|
UTF-8
| 2,524
| 3.796875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
RED="\e[31m"
YELLOW="\e[33m"
GREEN="\e[32m"
BOLD="\e[1m"
NL="\n"
RESET="\e[39m\e[0m"
# Success/failure control function
check_success() {
if [[ $? != 0 ]]; then
echo -e $RED$BOLD$1". Exiting"$RESET
exit
fi
}
ROBOSEMIOTICS_PTH=~/robosemiotics_study
WARNIE_PTH=/home/robert/catkin_ws/src/warnie/warnie
SUBJECT_COUNTER_PTH=$ROBOSEMIOTICS_PTH/subject_counter.txt
GAZEBO_WORLDS_PTH=worlds/
NR_OF_WORLDS=9
echo -e $GREEN$BOLD"Starting the robosemiotics user study" $SUBJECT_NR$RESET$NL
# Create robosemiotics folder
mkdir -p $ROBOSEMIOTICS_PTH/data
check_success "Failed to create data storage directories"
# Create the counter file
find $SUBJECT_COUNTER_PTH > /dev/null 2>&1
if [[ $? != 0 ]]; then
echo -e $GREEN"* Creating a subject counter file "$RESET
echo 0 > $SUBJECT_COUNTER_PTH
fi
# Get the subject nr
SUBJECT_NR=$(($(cat $SUBJECT_COUNTER_PTH)+1))
check_success "Failed to read the subject counter file"
echo -e $GREEN"* Running the user study on subject nr:" $SUBJECT_NR$RESET
# Load the simulation environment
GAZEBO_WORLD="study_track_$(((($SUBJECT_NR-1) % NR_OF_WORLDS) + 1)).world"
echo -e $GREEN"* Loading "$GAZEBO_WORLD$RESET
trap 'kill %1;' SIGINT
roslaunch warnie husky_in_world.launch joystick_enabled:=true gazebo_world:=$GAZEBO_WORLD load_rviz:=true & rosbag record -O $ROBOSEMIOTICS_PTH/husky_image_raw_compressed.bag /husky_warnie/image_raw/compressed
wait
check_success "Failed to load the simulation environment"
# Create subject specific data folder and move all the data into this folder
echo -e $GREEN"* Formating the data"$RESET
STUDY_DATA=$ROBOSEMIOTICS_PTH/data/subject_nr_$SUBJECT_NR
mkdir $STUDY_DATA
mv $ROBOSEMIOTICS_PTH/husky_trajectory.csv $STUDY_DATA/
mv $ROBOSEMIOTICS_PTH/husky_image_raw_compressed.bag $STUDY_DATA/
mv $ROBOSEMIOTICS_PTH/$GAZEBO_WORLD $STUDY_DATA/
python2.7 $WARNIE_PTH/scripts/data_processing_scripts/format_objects.py $STUDY_DATA/$GAZEBO_WORLD $STUDY_DATA/track_layout.csv
check_success "Failed to move the data"
# Increment the subject number
echo -e $GREEN"* Incrementing the subject counter "$RESET
echo "$SUBJECT_NR" > $SUBJECT_COUNTER_PTH
check_success "Failed to increment the subject counter"
# Generate a summary of the test
echo -e $GREEN"* Generating a summary of the test "$RESET
python2.7 $WARNIE_PTH/scripts/data_processing_scripts/test_summary.py $STUDY_DATA/husky_trajectory.csv $STUDY_DATA/summary.txt
check_success "Failed to generate the summary"
echo -e $NL$GREEN$BOLD"Experiment finished successfully."$RESET$NL
| true
|
6626e55f9585bdeb55326c4dce167312619bacf5
|
Shell
|
heychick/master
|
/oracle.sh
|
UTF-8
| 3,668
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
ip = `ifconfig ens3|awk '/netmask/ {print $2}'`
hostnamectl set-hostname oracle
hostname oracle
echo $ip oracle >> /etc/hosts
echo "#####安装包及yum源#####"
yum -y install zip unzip wget ftp xhost gcc glibc sysstat elfutils-libelf-devel compat-libstdc++-33 gcc-c++ libaio-devel unixODBC unixODBC-devel
wget -O /etc/yum.repos.d/CentOS7-Base-163.repo http://mirrors.163.com/.help/CentOS7-Base-163.repo &>/dev/null && echo "#####安装完成#####"
sleep 2
yum clean all && yum repolist
echo "#####1.添加用户组和用户,并给用户设置密码#######"
groupadd dba
groupadd oinstall
useradd -g oinstall -G dba oracle
echo 123456 | passwd --stdin oracle && echo "修改密码成功"
echo "#####2.创建安装目录,分配用户组与权限#######"
mkdir -p /u01/app/oracle/product
mkdir /u01/app/oradata
chown -R oracle:oinstall /u01
chmod -R 755 /u01
echo "#####3.解压软件包#####"
ftp -n <<EOF
open 192.168.1.254
user ftp \n
cd share/
prompt
bin
mget linuxamd64_12102_database_1of2.zip linuxamd64_12102_database_2of2.zip pdksh-5.2.14-1.i386.rpm
bye
EOF
echo "ftp下载包完成"
unzip linux.x64_11gR2_database_1of2.zip -d /u01/ &>/dev/null
unzip linux.x64_11gR2_database_2of2.zip -d /u01/ &>/dev/null
echo "#####4.在/etc目录下创建一个名为oraInst.loc的文件并修改权限#####"
echo "inventory_loc=/u01/app/oracle/oraInventory"> /etc/oraInst.loc
echo "inst_group=oinstall">> /etc/oraInst.loc
chown oracle:oinstall /etc/oraInst.loc
chmod 664 /etc/oraInst.loc
echo "#####5.修改系统参数#####"
echo fs.file-max = 6815744 >>/etc/sysctl.conf
echo fs.aio-max-nr = 1048576 >>/etc/sysctl.conf
echo kernel.shmall = 2097152 >>/etc/sysctl.conf
echo kernel.shmmax = 2147483648 >>/etc/sysctl.conf
echo kernel.shmmni = 4096 >>/etc/sysctl.conf
echo kernel.sem = 250 32000 100 128 >>/etc/sysctl.conf
echo net.ipv4.ip_local_port_range = 9000 65500 >>/etc/sysctl.conf
echo net.core.rmem_default = 4194304 >>/etc/sysctl.conf
echo net.core.rmem_max = 4194304 >>/etc/sysctl.conf
echo net.core.wmem_default = 262144 >>/etc/sysctl.conf
echo net.core.wmem_max = 1048576 >>/etc/sysctl.conf
sysctl -p
echo oracle soft nproc 2047 >> /etc/security/limits.conf
echo oracle hard nproc 16384 >> /etc/security/limits.conf
echo oracle soft nofile 1024 >> /etc/security/limits.conf
echo oracle hard nofile 65536 >> /etc/security/limits.conf
echo session required /lib/security/pam_limits.so >>/etc/pam.d/login
echo session required pam_limits.so >>/etc/pam.d/login
echo "if [ \$USER = \"oracle\" ]; then" >>/etc/profile
echo "if [ \$SHELL = \"/bin/ksh\" ]; then" >>/etc/profile
echo ulimit -p 16384 >>/etc/profile
echo ulimit -n 65536 >>/etc/profile
echo else >>/etc/profile
echo ulimit -u 16384 -n 65536 >>/etc/profile
echo fi >>/etc/profile
echo umask 022 >>/etc/profile
echo fi >>/etc/profile
source /etc/profile
echo export ORACLE_BASE=/u01/app/oracle >> /home/oracle/.bash_profile
echo "export ORACLE_HOME=\$ORACLE_BASE/product/11.2.0/db_1" >> /home/oracle/.bash_profile
echo export ORACLE_SID=bpas >> /home/oracle/.bash_profile
echo "export PATH=\$ORACLE_HOME/bin:\$PATH" >> /home/oracle/.bash_profile
echo export LANG=en_US.UTF-8 >> /home/oracle/.bash_profile
echo "export LD_LIBRARY_PATH=\$ORACLE_HOME/lib:/lib:/usr/lib" >> /home/oracle/.bash_profile
echo "export CLASSPATH=\$ORACLE_HOME/jre:\$ORACLE_HOME/jlib:\$ORACLE_HOME/rdbms/jlib" >> /home/oracle/.bash_profile
source /home/oracle/.bash_profile
#export DISPLAY=$ip:0.0
#强制安装
#rpm -ivh --force --nodeps pdksh-5.2.14-1.i386.rpm
| true
|
e2a54a541c57ddd51aec6136cd4a9e1229a75343
|
Shell
|
FangnaF/docker-jpeg2lxfml
|
/start.sh
|
UTF-8
| 345
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
wget $IMAGE_URL -O input.jpeg
# Add Java class to system Java & execute
javac Jpeg2Lxfml.java
java Jpeg2Lxfml input.jpeg
# Install gist gem to create gists from file
gem install gist
# Upload anonymous gist to GitHub
gist input.jpeg.lxfml
echo "Navigate to gist URL and save text as <name_of_image>.lxf file. Open in LDD. Enjoy!"
| true
|
0a94c92adc1a71b7068738f067c38913b825c9f2
|
Shell
|
tmellan/scripts_ThomasHPC
|
/1innerBatch.pbs
|
UTF-8
| 855
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash -l
#$ -S /bin/bash
#$ -N inner4loop
#$ -l mem=12G
#$ -l tmpfs=12G
#$ -pe mpi 120
#$ -l h_rt=00:19:00
#$ -cwd
dir=`pwd`
vaspdir="/home/mmm0011/src/VASP_5.4.4/vasp_5.4.4_stable_6May2017/bin"
pwd
cd $dir
pwd
#Lets make a script to run 5 set of jobs in parallel each usng 1 node (24 cores), and once the five have all completed, run another five in parallel, then close.
c=-1
for i in {1..10}; do
let c=c+1
mkdir "test-"$i
cd "test-"$i
cp ../subSTUFF/* .
echo "VASP_SRC_DIR: "$VASP_SRC_DIR
echo "JOB_ID is: " $JOB_ID
mpirun -n 24 $vaspdir/vasp_std > out.$JOB_ID &
# gerun $vaspdir/vasp_std > out.$JOB_ID
if (( $c % 5 == 0 )); then wait ; fi
#Debug messages
echo "the date is: " `date`
echo "directory is: " `pwd`
echo "command is VASP"
cd ../
done
wait
echo DONE
echo at
echo `pwd`
echo in
echo `date`
| true
|
b3fee949bc7e599586edb7a7129fdc5c84064aae
|
Shell
|
langmead-lab/recount-projects
|
/old/allen/get_meta.sh
|
UTF-8
| 265
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
d=$(dirname $0)
set -ex
study=allen
srp=SRP061902
get_metadata() {
pushd $d/../../src
python -m metadata.sradbv2 search "study_accession:${srp}" --gzip --output ${study}.json
popd
mv $d/../../src/${study}.json.gz $d/
}
get_metadata
| true
|
f77a686d021aee5c8b0c229970287acaf1942d83
|
Shell
|
samwhelp/archcraft-adjustment-prototype
|
/project/archcraft-adjustment-prototype-ctrl/PKGBUILD
|
UTF-8
| 1,128
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
##
## * https://wiki.archlinux.org/title/Creating_packages
## * https://wiki.archlinux.org/title/Arch_User_Repository
## * https://archlinux.org/groups/x86_64/base-devel/
## * https://wiki.archlinux.org/title/pacman
## * https://wiki.archlinux.org/title/makepkg
## * https://wiki.archlinux.org/title/PKGBUILD
## * https://wiki.archlinux.org/title/Arch_package_guidelines
##
pkgname=archcraft-adjustment-prototype-ctrl
pkgver=0.1
pkgrel=1
pkgdesc="Archcraft Adjustment / Prototype / Ctrl"
url="https://github.com/samwhelp/archcraft-adjustment-prototype/tree/project/archcraft-adjustment-prototype-ctrl"
arch=('any')
license=('MIT')
makedepends=()
depends=()
conflicts=()
provides=("${pkgname}")
options=(!strip !emptydirs)
prepare () {
cp -af ../asset/. "${srcdir}"
}
package () {
##
## Path
##
#local sys_etc_dir_path="${pkgdir}/etc"
#local sys_share_dir_path="${pkgdir}/usr/share/${pkgname}"
#local sys_skel_dir_path="${sys_etc_dir_path}/skel"
##
## Prepare Dir
##
#mkdir -p "$sys_etc_dir_path"
#mkdir -p "$sys_share_dir_path"
##
## Main Config
##
cp -af "${srcdir}/usr" "${pkgdir}/"
return
}
| true
|
2b79d72a93726c420d1e15d741f2ab0ca208a1e3
|
Shell
|
hplewa/cs342
|
/hw3/fileinfo
|
UTF-8
| 165
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $# < 1 ]]; then
echo "Usage: fileinfo <filepath> [additional filepaths..]" 1>&2;
exit 1;
fi;
java -cp bin:lib/pdfbox-app-2.0.6.jar Main $@
| true
|
64666d68d49cc0c10c5dc6a37ecc57b3c43fead3
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/gst-plugins-base-git/PKGBUILD
|
UTF-8
| 1,422
| 2.65625
| 3
|
[] |
no_license
|
# Original Package: Jan de Groot <jgc@archlinux.org>
# Maintainer: Lubosz Sarnecki <lubosz@gmail.com>
pkgname='gst-plugins-base-git'
pkgver=1.13.0.1.15990.7e94d2824
pkgrel=1
pkgdesc="GStreamer Multimedia Framework Base Plugins"
arch=('i686' 'x86_64')
license=('LGPL')
makedepends=('pkgconfig' 'gstreamer-git' 'orc' 'libxv' 'alsa-lib' 'cdparanoia' 'libvisual' 'libvorbis' 'libtheora' 'pango' 'gobject-introspection')
options=(!libtool !emptydirs)
url="http://gstreamer.freedesktop.org/"
conflicts=('gst-plugins-base' 'gst-plugins-base-libs')
provides=('gst-plugins-base='$pkgver 'gst-plugins-base-libs='$pkgver)
pkgdesc="GStreamer Multimedia Framework Base Plugins"
depends=('gstreamer-git' 'orc' 'libxv' 'alsa-lib' 'cdparanoia' 'libvisual' 'libvorbis' 'libtheora' 'pango')
source=('git://anongit.freedesktop.org/gstreamer/gst-plugins-base')
sha256sums=('SKIP')
_gitname='gst-plugins-base'
pkgver() {
cd $_gitname
version=$(grep AC_INIT configure.ac | sed 's/AC_INIT(\[GStreamer Base Plug-ins\],\[//' | sed 's/\],\[http:\/\/bugzilla.gnome.org\/enter_bug.cgi?product=GStreamer\],\[gst-plugins-base\])//')
hash=$(git log --pretty=format:'%h' -n 1)
revision=$(git rev-list --count HEAD)
echo $version.$revision.$hash
}
build() {
cd $_gitname
./autogen.sh --prefix=/usr --sysconfdir=/etc --localstatedir=/var \
--disable-static --enable-experimental
make || return 1
}
package() {
cd $_gitname
make DESTDIR="${pkgdir}" install
}
| true
|
47114f50d229028e8cca8216414ab984c9d583f9
|
Shell
|
austindyoung/dotfiles
|
/setup.sh
|
UTF-8
| 582
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd `dirname $BASH_SOURCE` && source resources/bootstrapping.sh
source dotfiles/lib/shellrc/functions.sh
case `uname` in
'Darwin')
source resources/osx.sh
source resources/brew.sh
;;
'Linux')
source resources/apt-get.sh
;;
esac
function install_powerline() {
hash pip 2>/dev/null || sudo easy_install pip
if test -z $(pip show Powerline | grep Location | awk '{print $2}');
then
sudo pip install --user git+git://github.com/Lokaltog/powerline
fi
}
install_powerline
source bootstrap.sh
| true
|
199b1dd127b37d3bfbc57fc8443fdaafad2e5822
|
Shell
|
ybayart/42-ft_services
|
/setup.sh
|
UTF-8
| 2,382
| 3.09375
| 3
|
[] |
no_license
|
##########################
## INIT ##
##########################
#rm -rf ~/.minikube/ ~/goinfre/minikube
#mkdir ~/goinfre/minikube
#ln -s ~/goinfre/minikube ~/.minikube/
if [ `uname -s` = 'Linux' ]
then
VMDRIVER="docker"
VMCORE=2
else
VMDRIVER="virtualbox"
VMCORE=4
fi
minikube start --vm-driver=$VMDRIVER --cpus=$VMCORE --memory=5000m
###########################
## DOCKER ##
###########################
eval $(minikube docker-env)
docker build -t telegraf:v1 srcs/containers/telegraf/
docker build -t ftps:v1 srcs/containers/ftps/
docker build -t nginx:v1 srcs/containers/nginx/
docker build -t wordpress:v1 srcs/containers/wordpress/
docker build -t phpmyadmin:v1 srcs/containers/phpmyadmin/
docker build -t grafana:v1 srcs/containers/grafana/
docker build -t mysql:v1 srcs/containers/mysql/
docker build -t influxdb:v1 srcs/containers/influxdb/
###########################
## LOADBALANCER ##
###########################
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.9.3/manifests/metallb.yaml
kubectl create secret generic -n metallb-system memberlist --from-literal=secretkey="$(openssl rand -base64 128)"
if [ $VMDRIVER = 'docker' ]
then
IP=`docker inspect minikube --format="{{range .NetworkSettings.Networks}}{{.Gateway}}{{end}}"`
else
IP=`minikube ip`
fi
IP=`echo $IP|awk -F '.' '{print $1"."$2"."$3"."128}'`
cp srcs/metallb_base.yaml srcs/metallb.yaml
sed -ie "s/IPTMP/$IP/g" srcs/metallb.yaml
###########################
## DEPLOY ##
###########################
kubectl create secret tls nginx --key srcs/containers/nginx/srcs/certs/server.key --cert srcs/containers/nginx/srcs/certs/server.crt
kubectl create -f srcs/metallb.yaml
kubectl create -f srcs/ftps.yaml
kubectl create -f srcs/nginx.yaml
kubectl create -f srcs/wordpress.yaml
kubectl create -f srcs/phpmyadmin.yaml
kubectl create -f srcs/grafana.yaml
kubectl create -f srcs/mysql.yaml
kubectl create -f srcs/influxdb.yaml
###########################
## DASHBOARD ##
###########################
echo "Waiting until Dashboard launch"
sleep 10
screen -dmS t0 minikube dashboard
###########################
## FTPS PASV_ADDRESS ##
###########################
screen -dmS t1 ./srcs/setup_ftps.sh
| true
|
48333cec27b167c1599687874b790f9d898e6a33
|
Shell
|
fullmei/rpi3-arm64-ubuntu
|
/buildfirmware.sh
|
UTF-8
| 355
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
cd build
D=$(pwd)
mkdir boot
git clone --depth 1 -b stable https://github.com/raspberrypi/firmware.git
cp -r firmware/boot/* $D/boot
rm $D/boot/*.dtb
rm $D/boot/*kernel*
rm -rf firmware
cd $D/rootfs/lib
sudo git clone --depth 1 https://github.com/rpi-distro/firmware-nonfree.git
sudo mv firmware-nonfree firmware
sudo rm -rf firmware/.git
cd $D
| true
|
a9bcec45c70024599304e064e4891ee66edbf8ec
|
Shell
|
sandervanvugt/bash-scripting
|
/script11
|
UTF-8
| 385
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
COUNTER=$1
COUNTER=$(( COUNTER * 60 ))
minusone()({
COUNNTER=$(( COUNTER - 1 ))
sleep 1
}
while [ $COUNTER -gt 0 ]
do
echo you still have $COUNTER seconds left
minusonne
done
[ $COUNTER = 0 ] && echo time is up && minusone
[ $COUNTER = "-1" ] && echo you now are one second late && minusone
while true
do
echo you now are ${COUNTER#-} seconds late
minusone
done
| true
|
a559e828aeeda5c7fd5e05a9dc9e7331fa7f458a
|
Shell
|
NitorCreations/aws-utils
|
/opt/nitor/template-snippets/nexus-userdata.sh
|
UTF-8
| 1,559
| 2.515625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -ex
# Copyright 2016-2017 Nitor Creations Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CF_AWS__StackName=""
CF_AWS__Region=""
CF_paramDnsName=""
CF_paramAwsUtilsVersion=""
CF_paramAmi=""
CF_paramAmiName=""
CF_paramEip=""
CF_paramSonatypeWorkSize="20"
export HOME=/root
cd $HOME
source /opt/nitor/cloud_init_functions.sh
source /opt/nitor/tool_installers.sh
AWSUTILS_VERSION="${CF_paramAwsUtilsVersion}" update_aws_utils
# reload scripts sourced above in case they changed:
source /opt/nitor/cloud_init_functions.sh
source /opt/nitor/tool_installers.sh
source /opt/nitor/apache_tools.sh
source /opt/nitor/ebs-functions.sh
source /opt/nitor/aws_tools.sh
source /opt/nitor/ssh_tools.sh
source /opt/nitor/nexus_tools.sh
set_region
aws_install_metadata_files
set_timezone
set_hostname
configure_and_start_nexus
apache_replace_domain_vars
apache_install_certs
apache_enable_and_start_service
nexus_wait_service_up
nexus_setup_snapshot_cron
ssh_install_hostkeys
ssh_restart_service
ec2-associate-eip
source /opt/nitor/cloud_init_footer.sh
| true
|
f0ceb91cc37421dc1553134124a8bf2907981880
|
Shell
|
ErickChacon/dotfiles-ubuntu-18
|
/.local/bin/colors-terminal.sh
|
UTF-8
| 1,342
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# select a random palette
path_pals="$HOME/.palettes"
path_palname="$HOME/.palette-name.vim"
pal=$(cat $path_palname)
path_colors=$path_pals/$pal
# set up the terminal colors
profile=$(dconf read /org/gnome/terminal/legacy/profiles:/default)
full_profile=/org/gnome/terminal/legacy/profiles:/:${profile//\'/}
IFS=$'\r\n' GLOBIGNORE='*' command eval 'XYZ=($(cat $path_colors))'
dconf write $full_profile/background-color "'"${XYZ[0]}"'"
dconf write $full_profile/foreground-color "'"${XYZ[1]}"'"
dconf write $full_profile/bold-color "'"${XYZ[11]}"'"
dconf write $full_profile/palette "[ \
'${XYZ[2]}', '${XYZ[3]}', '${XYZ[4]}', '${XYZ[5]}', '${XYZ[6]}', '${XYZ[7]}', \
'${XYZ[8]}', '${XYZ[9]}', '${XYZ[10]}', '${XYZ[11]}', '${XYZ[12]}', '${XYZ[13]}', \
'${XYZ[14]}', '${XYZ[15]}', '${XYZ[16]}', '${XYZ[17]}' \
]"
# 1 mutt bg unred messages
# 2 string, bar for error message, zip file ranger, read email color
# 3 keywords, chaconmo-Precicion-5510, ranger tab bg
# 4 unknown: ranger jpg
# 5 comments
# 6 parenthesis, objects, some keywords
# 7 functions in R, object in vim, terminal path
# 8 unknown
# 9 unknown
# 10 git symbol, unread email sender
# 11 chaconmo, function, executable files, chaconmo ranger
# 12 unknown
# 13 unknown: directories and ~/ ranger
# 14 unknown
# 15 unknown
# 16 fontground message
| true
|
aec628126677d8508874c8a649fa80ee9bb6ba60
|
Shell
|
dearith/mfext
|
/adm2/templates/mfxxx.start
|
UTF-8
| 2,511
| 3.46875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
. ${MFEXT_HOME}/lib/bash_utils.sh
exit_if_root
if test "${1:-}" = "--help"; then
echo "usage {{MODULE_LOWERCASE}}.start [NOINIT]"
echo " => start the {{MODULE_LOWERCASE}} module"
exit 0
fi
NOINIT=0
if test "${1:-}" = "NOINIT"; then
NOINIT=1
fi
STARTUP_FLAG=`env |grep "^${MODULE}_STARTUP_FLAG=" |awk -F '=' '{print $2;}'`
if test "${STARTUP_FLAG}" = "0"; then
echo_bold "${MODULE}_STARTUP_FLAG=0 => nothing to start"
exit 0
fi
{% if MODULE == "MFDATA" or MODULE == "MFSERV" or MODULE == "MFBASE" %}
PLUGIN_NAME=$(env |grep ^{{MODULE}}_CURRENT_PLUGIN_NAME |awk -F '=' '{print $2;}')
if test "${PLUGIN_NAME}" != ""; then
echo "ERROR: you can't use {{MODULE_LOWERCASE}}.start inside a plugin_env"
exit 1
fi
if test "${NOINIT}" = "0"; then
if ! test -d ${MODULE_RUNTIME_HOME}/var/plugins/base; then
{{MODULE_LOWERCASE}}.init
fi
if ! test -d ${MODULE_RUNTIME_HOME}/var/plugins/base; then
echo "ERROR: plugin system not initialized"
exit 1
fi
_install_or_update_configured_plugins.py
fi
{% endif %}
{% if MODULE == "MFADMIN" %}
if test "${NOINIT}" = "0"; then
if ! test -f "${MODULE_RUNTIME_HOME}/var/grafana/grafana.db"; then
{{MODULE_LOWERCASE}}.init
fi
fi
{% endif %}
LINE="***** STARTING ${MODULE} *****"
N=`echo "${LINE}" |wc -c`
HEADER=`echo "**************************************************************" |cut -c 2-${N}`
echo_bold "${HEADER}"
echo_bold "${LINE}"
echo_bold "${HEADER}"
echo " "
echo STARTING >${MODULE_RUNTIME_HOME}/var/status
if test -f ${MODULE_HOME}/config/logrotate.conf; then
echo -n "- Generating logrotate.conf..."
echo_running
cat ${MODULE_HOME}/config/logrotate.conf |envtpl >${MODULE_RUNTIME_HOME}/tmp/config_auto/logrotate.conf
if test -f ${MODULE_RUNTIME_HOME}/tmp/config_auto/logrotate.conf; then
echo_ok
else
echo_nok
fi
fi
{% block custom_before_circus %}
# Can be replaced by mfxxx.start.custom
{% endblock %}
if test -f ${MODULE_HOME}/config/circus.ini; then
_circus.start
fi
{% block custom %}
# Can be replaced by mfxxx.start.custom
{% endblock %}
_make_and_install_crontab.sh
echo -n "- Wait 2 sec..."
echo_running
sleep 2
echo_ok
MODULE_LOWERCASE=`echo ${MODULE} | tr '[:upper:]' '[:lower:]'`
${MODULE_LOWERCASE}.status QUIET
RES=$?
if test ${RES} -eq 0; then
echo RUNNING >${MODULE_RUNTIME_HOME}/var/status
else
echo ERROR >${MODULE_RUNTIME_HOME}/var/status
fi
echo " "
echo " "
exit ${RES}
| true
|
6940cdfa9f1601cdefd973cfdad0b7cceb3b8264
|
Shell
|
gitgjt/jee-universal-bms
|
/main/deploy/bin/start.sh
|
UTF-8
| 738
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
BASEDIR="$(dirname $(readlink -f $(dirname "$0")))"
profile=""
port=""
if [ $# -eq 2 ] ; then
profile="-Dspring.profiles.active=$1"
port="-Dserver.port=$2"
fi
str=`jps | grep -i "BmsApplication" | awk -F ' ' '{print $1}'`
if [ ! -n "$str" ]; then
echo "back-site process has been dead"
else
echo "kill back-site processId: $str"
kill -9 $str
fi
nohup java $profile $port -server -Xmx2048m -cp $BASEDIR:$BASEDIR/config:$BASEDIR/lib/*:. com.yuzhi.back.BmsApplication >> /data0/log/back/console.log 2>&1 &
str=`jps | grep -i "BmsApplication" | awk -F ' ' '{print $1}'`
if [ ! -n "$str" ]; then
echo "back-web process don't restart"
else
echo "restart back-web processId: $str"
fi
| true
|
bd6fa7b7892d66a0c026e3434d1944c576b5beb7
|
Shell
|
Clarence-pan/static-packer-demo
|
/install.sh
|
UTF-8
| 2,022
| 3.90625
| 4
|
[] |
no_license
|
#/bin/bash
cd "`dirname $0`"
if [ "$?" != "0" ]; then
echo "Cannot automatically switch to project root directory. Please switch by yourself and rerunt this script."
exit 1;
fi;
PROJECT_BASE="`pwd`"
echo Node version: `node --version`
if [ "$?" != "0" ]; then
echo "You must install node.js."
exit 1;
fi;
echo NPM version: `npm --version`
if [ "$?" != "0" ]; then
echo "You must install npm."
exit 1;
fi;
echo PHP version: `php --version`
if [ "$?" != "0" ]; then
echo "You must install php."
exit 1;
fi;
echo Composer version: `composer --version`
if [ "$?" != "0" ]; then
echo "You must install composer."
exit 1;
fi;
echo ===============================================================================
echo Setting up dynamic project and installing composer packages...
cd "$PROJECT_BASE/dynamic" && cp .env.example .env && composer self-update && composer install
if [ "$?" != "0" ]; then
echo "Something wrong...."
exit 1;
fi;
echo ===============================================================================
echo Setting up static project and installing node.js packages..
cd "$PROJECT_BASE/static" && cp .env.example .env && npm install
if [ "$?" != "0" ]; then
echo "Something wrong...."
exit 1;
fi;
if [ ! -e node_modules/app ]; then
ln -s `pwd`/src node_modules/app
fi;
if [ "$1" == "--build" ] || [ "$1" == "-b" ]; then
echo ===============================================================================
echo building...
cd "$PROJECT_BASE/static" && npm run build
if [ "$?" != "0" ]; then
echo "Failed to build."
exit 1;
fi;
echo "Build result:"
cd "$PROJECT_BASE"
cat dynamic/bootstrap/cache/*.php
cat static/public/*.json
cat static/public/*.js
cat static/*.log
exit 0
else
echo ===============================================================================
echo installation compeleted.
echo You can use \"npm run build\" to build this project.
fi;
| true
|
1080a419f153c715751e277dbb4216a438442f30
|
Shell
|
oemergenc/dotfiles
|
/.dot/aliases/aliases_helm.sh
|
UTF-8
| 828
| 2.765625
| 3
|
[] |
no_license
|
fhelm(){
ns=${1:-"omm"}
helm tiller run $ns -- helm list --all| sed -e '1,8d' | sed -e '$ d' | \
fzf \
--header="CTRL-Y/E/F/B to scroll preview, CTRL-S to copy release-name, CTRL-X to delete and purge, CTRL-C to exit" \
--preview="echo {} | sed 's/ .*//' | sed 's/[0-9]*//g' | xargs -I% helm tiller run $ns -- helm status %" \
--bind "j:down,k:up,ctrl-e:preview-down,ctrl-y:preview-up,ctrl-f:preview-page-down,ctrl-b:preview-page-up,q:abort" \
--bind "ctrl-s:execute(echo {} | sed 's/ .*//' | sed 's/[0-9]*//g' | pbcopy)+accept" \
--bind "ctrl-x:execute(echo {} | sed 's/ .*//' | sed 's/[0-9]*//g' | xargs -I % sh -c 'helm tiller run $ns -- helm del --purge % | less')+accept" \
--preview-window=right:60% \
--height 80%
}
| true
|
d73c4f83f719f46b39fa992714ce6b2e7a5ae76c
|
Shell
|
PacktPublishing/Python-Network-Programming
|
/Chapter21/21_5_open_contrail_compute_node.sh
|
UTF-8
| 750
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##############################################################################
# Python Network Programming Cookbook, Second Edition -- Chapter - 12
# Adopted from https://github.com/Juniper/contrail-controller/wiki/Install-and-Configure-OpenContrail-1.06
##############################################################################
# Configue the Ubuntu repositories.
echo "deb http://ppa.launchpad.net/opencontrail/ppa/ubuntu precise main" | sudo tee -a /etc/apt/sources.list.d/opencontrail.list
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 16BD83506839FE77
sudo apt-get update
# Install Contrail Virtual Rouer Agent
sudo apt-get install contrail-vrouter-agent
sudo modprobe vrouter
echo "vrouter" | sudo tee -a /etc/modules
| true
|
659d04ca378339af7f8c73fb8f19c72b1ce69352
|
Shell
|
joekir/js-reversing-workbench
|
/volumes/scripts/illuminate.sh
|
UTF-8
| 236
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
INPUT_FILE=`readlink -f $1`
pushd /opt/illuminate > /dev/null
./node_modules/@babel/cli/bin/babel.js --plugins "./illuminatejs/babel-plugin-deobfuscate/lib/illuminatejs.js" ${INPUT_FILE}
popd > /dev/null
| true
|
f98b7502646d261f74090a2af0c9dcbdb25bc88a
|
Shell
|
JCVenterInstitute/HumanSubtractionDB1
|
/run_Demo_subtraction.sh
|
UTF-8
| 3,265
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
# Here is a project-specific pipeline in script form.
# This script provides an example but it is specific to one dataset and one compute environment.
# The dataset is a subset of the JCVI data using Sendai virus.
# The compute environment is the SGE grid at JCVI.
SCRIPTDIR=$1
echo "ASSUME CORE SCRIPT DIRECTORY IS ${SCRIPTDIR}"
if [ -z "$1" ]
then
echo "Usage: $0 <core>"
exit 1
fi
SDB=subtraction.HUH7.contigs.fasta # for the demo
INDEX=SDB-HUH7 # for the demo
TRIMMER="run_trimmomatic.sh"
BOWTIE_BUILD="/usr/local/bin/bowtie2-build"
BOWTIE_BUILD=`which bowtie2-build`
BOWTIE_ALIGN="/usr/local/bin/bowtie2"
BOWTIE_ALIGN=`which bowtie2`
SAMTOOLS="/usr/local/bin/samtools"
SAMTOOLS=`which samtools`
FILTER="fastq-filter-by-name.pl"
date
echo "Clean up any output from previous runs."
rm -v trim.pair.*.fastq
rm -v trim.sing.*.fastq
rm -v nonSDB.*.fastq
rm -v sdb.*.sam sdb.*.bam
echo
echo "Index the subtraction database."
echo "This uses bowtie2."
echo "This requires RAM 2 x database size."
echo "This reads FASTA and generates *.bt2 files."
ls ${BOWTIE_BUILD}
${BOWTIE_BUILD} ${SDB} ${INDEX} > build.out 2>&1
echo -n $?; echo " exit status"
echo RESULTS
ls ${INDEX}
date
echo
echo "Trim the RNAseq reads."
echo "This uses trimmomatic which requires Java."
echo "This requires an adapter file corresponding to the Illumina library prep."
echo "Several adapter files are provided with trimmomatic."
echo "To avoid re-trimming, this will clean up trim.*.fastq files from previous runs."
echo "This runs on all files named *_R1_*.fastq and looks for the corresponding R2."
echo "This generates files named trim.pair.*.fastq which get mapped next."
echo "This also generates files named trim.sing.*.fastq which get ignored."
ls ${SCRIPTDIR}/${TRIMMER}
${SCRIPTDIR}/${TRIMMER} > trim.out 2>&1
echo RESULTS
ls trim.pair.*.fastq
date
echo
echo "Looop over pairs of trimmed read files."
ls ${BOWTIE_ALIGN}
THREADS="-p 4"
ALIGNMENT="--sensitive-local"
FASTQ="-q --phred33"
UNALIGNED="--no-unal" # keep unaligned out of the sam file
EXCLUDE="-v" # option to exclude the named reads
for FF in trim.pair.*_R1_*.fastq; do
MYR1=${FF}
MYR2=` echo $MYR1 | sed 's/_R1_/_R2_/' `
SAM="${MYR1}.sam"
BAM="${MYR1}.bam"
TEMP_IDS="${MYR1}.tmp"
echo
echo "Map trimmed reads to SDB."
echo "Writinng $BAM"
CMD="${BOWTIE_ALIGN} ${UNALIGNED} ${THREADS} ${ALIGNMENT} ${FASTQ} -x ${INDEX} -1 ${MYR1} -2 ${MYR2} -S sdb.${SAM}"
echo ${CMD}
nice ${CMD} > map.out 2>&1
echo -n $?; echo " exit status"
CMD="${SAMTOOLS} view -h -b -o sdb.${BAM} sdb.${SAM} "
echo ${CMD}
nice ${CMD}
echo -n $?; echo " exit status"
echo "Write read ID for every read mapped."
echo "These are the IDs to subtract."
echo "Assume reads 1 and 2 have the same read ID."
echo "If your reads have the /1 and /2 suffix, please change that and start again."
${SAMTOOLS} view sdb.${BAM} | cut -f 1 > ${TEMP_IDS}
perl ${SCRIPTDIR}/${FILTER} ${EXCLUDE} ${TEMP_IDS} < ${MYR1} > nonSDB.${MYR1} 2> subtract.R1.out
perl ${SCRIPTDIR}/${FILTER} ${EXCLUDE} ${TEMP_IDS} < ${MYR2} > nonSDB.${MYR2} 2> subtract.R2.out
done
echo
echo RESULTS
ls nonSDB.*
date
exit
| true
|
c1fed5554afb55dfd839fff3b2987376684d2653
|
Shell
|
AndreasPK/bench_hc_libs
|
/libs/binary/driver.sh
|
UTF-8
| 1,578
| 3.671875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#Execute using ./driver <pathToGhc> [aliasForGhc [BuildFlags [FlagAlias]]]
#If any optional flag is given all flags before that must also be given.
set -x
LOG_DIR=benchResults
mkdir -p "$LOG_DIR"
LOG_DIR="../$LOG_DIR"
BENCHMARKS="get builder generics-bench put"
HC=${1:-ghc}
HC_NAME=${2:-vanilla}
HC_FLAGS=${3:-""}
FLAG_ALIAS=${4:-$HC_NAME}
# HC_NAME=${1:-vanilla}
#We use a compiler and a build name in case we want to benchmark different flags.
#args: compiler-exec compiler-name build-flags log-name bench-name
function runBenchmark() {
HC="$1"
HC_NAME="$2"
HC_FLAGS="$3"
NAME="$4"
BENCHMARK="$5"
STORE_DIR="s_$HC_NAME"
BUILD_DIR="b_$NAME"
echo "Benchmark: $NAME"
cabal --store-dir="$HOME/.${STORE_DIR}" new-run --builddir="$BUILD_DIR" -w "$HC" --ghc-options="${HC_FLAGS}" --enable-benchmarks --disable-tests \
"$BENCHMARK" -- --csv "$LOG_DIR/${HC_NAME}.${NAME}.${BENCHMARK}.csv"
}
# custom per library, run inside of library repository
function setup_benchmarks() {
mkdir "build" -p
cp cabal.project "build"
cd "build"
#2019-04-13 Monad fail not patched yet so use fork
if [ ! -d "binary" ]; then
git clone https://github.com/kolmodin/binary
fi
cp binary/generics-bench.cache.gz .
sed "s/name: binary/name: binary-bench/" binary/binary.cabal -i
cabal new-update
}
setup_benchmarks
# "/e/ghc_regSpill/inplace/bin/ghc-stage2.exe"
for BENCHMARK in $BENCHMARKS
do
runBenchmark "${HC}" "${HC_NAME}" "${HC_FLAGS}" "${HC_NAME}" "${BENCHMARK}";
done
cd ..
| true
|
e01f666d0da7773c7f671785e88d50b48bc87d6e
|
Shell
|
DMBuce/clicraft
|
/src/etc/clicraft-starmade.conf.in
|
UTF-8
| 3,769
| 3.703125
| 4
|
[
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!bash
# This is an example clicraft configuration file for starmade servers. The
# format is bash, so configuration options take the form OPTION="value" (with
# no whitespace between the equals sign (=) ).
#
# This file is sourced from within clicraft before the default values are set,
# so you must take care to initialize any options that another option depends
# on. For example, if you set SERVER_JAR=$SERVER_DIR/custom_server.jar without
# setting SERVER_DIR first, clicraft will look for /custom_server.jar, which
# is probably not what you want.
#
# You can include any extra variables you wish to use in custom action
# scripts here. You can also embed shell logic without trouble; this behavior may
# change in future versions, however.
# MULTIPLEXER
# The terminal multiplexer to use. Valid values are "tmux" and "screen".
#
MULTIPLEXER="tmux"
# SERVER_DIR
# The directory that the server is launched from.
# This is the directory that your world data, server.properties, and server log
# (among other things) resides in.
#
SERVER_DIR="@DEFAULT_SERVER_DIR@"
# SERVER_NAME
# An identifier for the server. This is the value used to set the multiplexer's
# window and session names when starting the server. It is also used in some
# informational messages.
#
SERVER_NAME="starmade"
# SERVER_JAR, SERVER_URL
# The location of the server jar on disk and on the web, respectively
#
SERVER_JAR="$SERVER_DIR/StarMade/StarMade.jar"
SERVER_URL="http://files.star-made.org/StarMade-Starter.jar"
# SERVER_LOG
# The location of the server log file
#
SERVER_LOG="$SERVER_DIR/StarMade/logs/log.txt.0"
# SERVER_VERSION
# The server version. This will replace any instances of '%v' in
# DOWNLOAD_COMMAND. If set to 'release' or 'snapshot', the latest release or
# snapshot version will be substituted instead.
#
SERVER_VERSION="release"
# SERVER_TYPE
# If this option is set to "bukkit", SERVER_JAR and SERVER_URL will take on
# default values such that the server uses Bukkit Recommended Builds, instead
# of the vanilla minecraft jar. That is, they will have these default values:
#SERVER_JAR="$SERVER_DIR/craftbukkit.jar"
#SERVER_URL="http://cbukk.it/craftbukkit.jar"
#
SERVER_TYPE="starmade"
# START_COMMAND
# The command that should be used to launch the server.
#
START_COMMAND="$SERVER_DIR/StarMade/StarMade-dedicated-server-linux.sh"
# STOP_CMD
# The command to send to the server console to shut the server down.
#
STOP_CMD="/shutdown 0"
# DOWNLOAD_COMMAND
# The command used to download or update the server jar. Running this
# command should result in SERVER_URL being downloaded and saved to
# SERVER_JAR.
#
# this is a hack, but it's the best we have for now
mkdir -p "$(dirname "$SERVER_JAR")"
update_starmade() {
curl -#L -o "$SERVER_DIR/StarMade-Starter.jar" "$SERVER_URL"
java -jar "$SERVER_DIR/StarMade-Starter.jar" -nogui
#chmod +x "$SERVER_DIR/StarMade/StarMade-dedicated-server-linux.sh"
#[[ ! -f "$SERVER_JAR" ]] && mv "$SERVER_JAR.ccback" "$SERVER_JAR"
}
DOWNLOAD_COMMAND="update_starmade"
# TIMEOUT
# The amount of time, in seconds, that clicraft will wait for certain internal
# operations to finish.
#
TIMEOUT=20
# START_TIMEOUT
# The amount of time, in seconds, that clicraft will wait for the server to
# start up.
#
START_TIMEOUT=$TIMEOUT
# STOP_TIMEOUT
# The amount of time, in seconds, that clicraft will wait for the server to
# shut down.
#
STOP_TIMEOUT=$TIMEOUT
# CMD_TIMEOUT
# The amount of time, in seconds, that clicraft will wait for a command to finish
# when using `clicraft cmd`
#
CMD_TIMEOUT=$TIMEOUT
# CMD_LSTRIP
# Strip this string from the front of commands passed to `clicraft cmd`
CMD_LSTRIP=""
# REDB
# The location of the regex database
#
REDB="$CLICRAFT_CONFIG/redb/$SERVER_TYPE.tab"
| true
|
ff589bf9295ac36366518dc03faaaafa2e24fca4
|
Shell
|
akbiggs/Bombman
|
/compile.sh
|
UTF-8
| 405
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
set -o errexit
DIR_PATH=`pwd`
JAR_PATH="$DIR_PATH/bombman.jar:$DIR_PATH/lib/*"
CLASS_PATH="$DIR_PATH/bombmanplayer"
BUILD_CLASS_PATH="$JAR_PATH:$CLASS_PATH"
SRC_PATH="$DIR_PATH/bombmanplayer/PlayerAI.java"
# Clean
rm -f "$CLASS_PATH/*.class"
# Compile
javac -classpath $BUILD_CLASS_PATH $SRC_PATH -verbose || { echo "COMPILATION FAILED"; exit 1; }
echo "COMPILATION COMPLETED SUCCESSFULLY"
| true
|
fb546d917adb89e382d664cd4f393b5f252cb7ff
|
Shell
|
fasl/freenac
|
/bin/monitor_mysql_slave
|
UTF-8
| 4,055
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#!/usr/bin/sh -x
#
# FILE: monitor_mysql_slave
#
# FUNCTION: Alert once every 15 mins, if replication dies
# USAGE: Call from cron, if an emails come, react!
# */5 7-18 * * 1-5 /opt/vmps/monitor_mysql_slave
#
# 2006.07.19/Sean Boran. Based on original from:
# http://forge.mysql.com/snippets/view.php?id=6
# Matthew Montgomery mmontgom@rackspace.com
#
########################
subject="MySQL replication problem"
PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/mysql/bin
tempfile2=/tmp/monitor_replication.$$
repeat_alert_interval=15 # minutes
lock_file=/tmp/slave_alert.lck
active=yes
## Check if alert is already sent ##
function check_alert_lock () {
if [ -f $lock_file ] ; then
current_file=`find $lock_file -cmin -$repeat_alert_interval`
if [ -n "$current_file" ] ; then
# echo "Current lock file found"
return 1
else
# echo "Expired lock file found"
return 2
fi
else
return 0
fi
}
## Find the location of the mysql.sock file ##
function check_for_socket () {
if [ -z $socket ] ; then
if [ -S /var/lib/mysql/mysql.sock ] ; then
socket=/var/lib/mysql/mysql.sock
elif [ -S /tmp/mysql.sock ] ; then
socket=/tmp/mysql.sock
else
ps_socket=`netstat -ln | egrep "mysql(d)?\.sock" | awk '{ print $9 }'`
if [ "$ps_socket" ] ; then
socket=$ps_socket
fi
fi
fi
if [ -S "$socket" ] ; then
echo UP > /dev/null
else
echo "No valid socket file "$socket" found!"
echo "mysqld is not running or it is installed in a custom location"
echo "Please set the $socket variable at the top of this script."
exit 1
fi
}
check_for_socket
# Note SB: added '-i' to grep
Slave_IO_Running=`mysql -Bse "show slave status\G" | grep -i Slave_IO_Running | awk '{ print $2 }'`
Slave_SQL_Running=`mysql -Bse "show slave status\G" | grep -i Slave_SQL_Running | awk '{ print $2 }'`
Last_error=`mysql -Bse "show slave status\G" | grep -i Last_error | awk -F \: '{ print $2 }'`
if [ -z $Slave_IO_Running -o -z $Slave_SQL_Running ] ; then
echo "Replication is not configured or you do not have the required access to MySQL"
exit
fi
if [ $Slave_IO_Running == 'Yes' ] && [ $Slave_SQL_Running == 'Yes' ] ; then
if [ -f $lock_file ] ; then
rm $lock_file
echo "Replication slave is running"
echo "Removed Alert Lock"
fi
exit 0
elif [ $Slave_SQL_Running == 'No' ] ; then
if [ $active == 'yes' ] ; then
check_alert_lock
if [ $? = 1 ] ; then
## Current Lock ##
echo "up" > /dev/null
else
## Stale/No Lock ##
touch $lock_file
echo "SQL thread not running on server `hostname -s`!"
echo "Last Error:" $Last_error
fi
fi
exit 1
elif [ $Slave_IO_Running == 'No' ] ; then
if [ $active == 'yes' ] ; then
check_alert_lock
if [ $? = 1 ] ; then
## Current Lock ##
echo "up" > /dev/null
else
## Stale/No Lock ##
touch $lock_file
echo "LOG IO thread not running on server `hostname -s`!"
echo "Last Error:" $Last_error
fi
fi
exit 1
else
if [ $active == 'yes' ] ; then
check_alert_lock
if [ $? = 1 ] ; then
## Current Lock ##
echo "up" > /dev/null
else
## Stale/No Lock ##
touch $lock_file
echo "Unexpected Error!"
echo "Check Your permissions!"
fi
fi
exit 2
fi
| true
|
d40a72d7142a901e2dc2efc8170a0d5fd8249c3d
|
Shell
|
reposman/utils
|
/git/config.sh
|
UTF-8
| 243
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/sh
function run {
echo "$@"
$@
}
gitc="git config"
run "$gitc pack.packSizeLimit 4m"
run "$gitc gc.auto 0"
if [ -z "$1" ] ; then
true
elif [ "-b" == "$1" ] ; then
run "$gitc core.compression 0"
run "$gitc pack.compression 0"
fi
| true
|
06afd6c9bf30a1a5b9ea651f6e5d024b5b67512a
|
Shell
|
bediger4000/combinatory-logic
|
/runtests
|
UTF-8
| 1,247
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
ulimit -d `ulimit -a -H | grep data | sed 's/^..* *\([^ ][^ ]*\)$/\1/'`
ulimit -s `ulimit -a -H | grep stack | sed 's/^..* *\([^ ][^ ]*\)$/\1/'`
if [ ! -x ./cl ]
then
echo Compile ./cl first
exit 1
fi
if [ ! -d ./tests.output ]
then
mkdir ./tests.output
fi
for TNAME in tests.in/test.???
do
OUTPUT=`basename $TNAME`
echo "Running test $OUTPUT"
./cl -p < $TNAME > tests.output/$OUTPUT
if [ "$?" != "0" ]
then
XSTATUS=$?
echo Test $OUTPUT exited with non-zero status $XSTATUS
fi
DESIRED=tests.out/$OUTPUT
if [ -r $DESIRED ]
then
if diff $DESIRED tests.output/$OUTPUT > /dev/null
then
:
else
echo "Test $OUTPUT failed"
fi
else
echo "No desired output specified for $OUTPUT"
fi
done
# Run some instances with all command line flags, some
# nonsense options, etc, for code coverage.
./cl -m < /dev/null > /dev/null 2>&1
./cl -c -d -L tests.in/test.001 -m -p -s -T -t -x -B curry2 -B q -N 10 < /dev/null > /dev/null 2>&1
./cl -t -B curry -t -CX -N 53 < /dev/null > /dev/null 2>&1
./cl -t -B smidgeon -t -C X -N 10 < /dev/null > /dev/null 2>&1
./cl -C S -C K -C I -C B -C C -C W -C M -C T -C J < /dev/null > /dev/null 2>&1
./cl -p -L tests.in/test.001 -L tests.in/test.002 < /dev/null > /dev/null 2>&1
| true
|
1a7b97c986f4ade05e33cdeb3c45827c8aa24dc9
|
Shell
|
faerietree/shell__backup_using_git
|
/backup.all.sh
|
UTF-8
| 1,610
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
disks=
{
q # daten
,d # bilder
#,p # not included because this network disk is used to store not only the
#backups but also the bare shared repositories (accessible via SSH).
}
cd /
for d in $disks; do
echo "" >> backup.all.log
echo "Backing up disk: "$d >> backup.all.log
echo "Current working directory: "$PWD
echo "Changing working directory to: "$d
cd $d
# TODO Check if currently on branch "new" and abort if so?
#What if modifications / new files are overridden? git checkout master
git tag -D master_previous
echo "Deleting previous master_previous: "#TODO turn label into commit hash $master_previous
echo "Tagging master as master_previous to remember it to be able to reset to it."
git tag master_previous
#======= INFORMATION
echo "Current HEAD ("$HEAD") ." >> backup.all.log
#git branch new HEAD
#======= ADD, BACKUP CHANGES
echo "Committing modifications to branch 'new'..." >> backup.all.log
git commit -a -m "Update files."
echo "Pushing commit individually to keep transfer size low (backup often! Daily or" >> backup.all.log
echo "after important changes if advanced!) ..." >> backup.all.log
git push origin master
#======= ADD, COMMIT NEW FILES
echo "Committing new files ..."
git add ./[^$]* # TODO Test if this works properly!
git commit -m "Add new files."
echo "Pushing commit individually to keep transfer size low (backup often! Daily or" >> backup.all.log
echo "after important changes if advanced!) ..." >> backup.all.log
git push origin master
echo "*done* Backing up disk: "$d >> backup.all.log
end
| true
|
81ee86fcbbb4f9e2984e30d50ce070860149b365
|
Shell
|
unique379r/Useful_code
|
/Whole_genome_alignment/lastzExample.sh
|
UTF-8
| 1,540
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
##############################################################################
# This is template batch script for calling lastz to make pairwise gene/genome
# alignments on ASU's Saguaro Cluster.
#
# Requires the Whole_Genome_ALignment scripts from the Avian Genome Project.
# git clone https://github.com/WilsonSayresLab/Useful_code.git
#
# Replace anything inside <> before use.
##############################################################################
#SBATCH -N 1
#SBATCH -n 16
#SBATCH -t 4-0:0
#SBATCH --job-name=<job name>
#SBATCH -A mwilsons
#SBATCH -o slurm.%j.out
#SBATCH -e slurm.%j.err
#SBATCH --mail-type=END,FAIL
#SBATCH --mail-user=<user's e-mail>
# Make working directory and copy genomes to it
mkdir alignments/lastz/
cd alignments/lastz/
cp <path to fasta file> ./
cp <path to fasta file> ./
# Add path to lastz scripts
export PATH=$PATH:<path to Whole_genome_alignment/pairwise/bin/>
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:<path to Whole_genome_alignment/pairwise/bin/>
# Invoke lastz pipeline (may need to change the numbe rof cuts if the program finishes without producing output or times out):
lastz_CNM.pl <name of fasta file 1> <name of fasta file --run multi --cuts 100 --cpu 16 --hspthresh 2200 --inner 2000 --ydrop 3400 --gappedthresh 10000 --scores HoxD55 --chain --linearGap loose
# Change name and inline species identifiers of output maf file:
mv all.maf <species_names>.maf
sed -i 's/query/<species_1>/g' <species_names>.maf
sed -i 's/target/<species_2>/g' <species_names>.maf
| true
|
1c923674a8247304c52ec6e5b5ee520256fa0c06
|
Shell
|
threeworld/Security-baseline
|
/Linux/主机安全基线脚本/CIS-LBK/DEBIAN_FAMILY_LBK/functions/recommendations/nix_fed28_ensure_password_reuse_limited.sh
|
UTF-8
| 6,601
| 3.546875
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
#
# CIS-LBK Recommendation Function
# ~/CIS-LBK/functions/recommendations/nix_fed28_ensure_password_reuse_limited.sh
#
# Name Date Description
# ------------------------------------------------------------------------------------------------
# Eric Pinnell 09/28/20 Recommendation "Ensure password reuse is limited"
#
fed28_ensure_password_reuse_limited()
{
echo "- $(date +%d-%b-%Y' '%T) - Starting $RNA" | tee -a "$LOG" 2>> "$ELOG"
test=""
test1=""
test2=""
test3=""
# test if custom profile is in use, otherwise set custom profile if possible
cpro=$(authselect current | awk '/custom\// {print $3}')
if [ -z "$cpro" ]; then
custprofile="$(authselect list | awk -F / '/custom\// { print $2 }' | cut -f1)"
if [ "$(echo "$custprofile" | awk '{total=total+NF};END{print total}')" = 1 ]; then
authselect select custom/"$custprofile" with-sudo with-faillock without-nullok --force
cpro=$(authselect current | awk '/custom\// {print $3}')
else
test3=manual
fi
fi
# Test is system-auth file is configured
if grep -Eqs '^\s*password\s+(requisite|sufficient)\s+pam_unix\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' /etc/pam.d/system-auth || grep -Eqs '^\s*password\s+(requisite|required)\s+pam_pwhistory\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' /etc/pam.d/system-auth; then
test1=passed
else
[ -n "$cpro" ] && file="/etc/authselect/$cpro/system-auth"
if [ -e "$file" ]; then
if ! grep -Eqs '^\s*password\s+(requisite|required)\s+pam_pwhistory\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' "$file"; then
if grep -Eqs 'password\s+(requisite|required)\s+pam_pwhistory\.so(\s+[^#]+\s+)?\s*remember=' "$file"; then
sed -ri 's/^\s*(#\s*)?(password\s+(requisite|required)\s+pam_pwhistory\.so\s+)([^#]+\s+)?(remember=)(\S++\b)(.*)$/\2\4\55 \7/' "$file"
elif grep -Es 'password\s+(requisite|required)\s+pam_pwhistory\.so' "$file" | grep -vq 'remember='; then
sed -ri 's/^\s*(#\s*)?(password\s+(requisite|required)\s+pam_pwhistory\.so\s+)([^#]+\s*)?(#.*)?$/\2\4 remember=5\5/' "$file"
else
sed -ri '/password\s+(\S+)\s+pam_unix\.so\s+/ i password required pam_pwhistory.so remember=5 use_authtok' "$file"
fi
fi
if ! grep -Eqs '^\s*password\s+(requisite|sufficient)\s+pam_unix\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' "$file"; then
if grep -Eqs 'password\s+(requisite|sufficient)\s+pam_unix\.so(\s+[^#]+\s+)?\s*remember=' "$file"; then
sed -ri 's/^\s*(#\s*)?(password\s+(requisite|sufficient)\s+pam_unix\.so\s+)([^#]+\s+)?(remember=)(\S++\b)(.*)$/\2\4\55 \7/' "$file"
elif grep -Es 'password\s+(requisite|sufficient)\s+pam_unix\.so' "$file" | grep -vq 'remember='; then
sed -ri 's/^\s*(#\s*)?(password\s+(requisite|sufficient)\s+pam_unix\.so\s+)([^#]+\s*)?(#.*)?$/\2\4 remember=5\5/' "$file"
else
sed -ri '/password\s+(\S+)\s+pam_deny\.so\s+/ i password required pam_unix.so remember=5 use_authtok' "$file"
fi
fi
fi
fi
# Test is password-auth file is configured
if grep -Eqs '^\s*password\s+(requisite|sufficient)\s+pam_unix\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' /etc/pam.d/password-auth || grep -Eqs '^\s*password\s+(requisite|required)\s+pam_pwhistory\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' /etc/pam.d/password-auth; then
test2=passed
else
[ -n "$cpro" ] && file="/etc/authselect/$cpro/password-auth"
if [ -e "$file" ]; then
if ! grep -Eqs '^\s*password\s+(requisite|required)\s+pam_pwhistory\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' "$file"; then
if grep -Eqs 'password\s+(requisite|required)\s+pam_pwhistory\.so(\s+[^#]+\s+)?\s*remember=' "$file"; then
sed -ri 's/^\s*(#\s*)?(password\s+(requisite|required)\s+pam_pwhistory\.so\s+)([^#]+\s+)?(remember=)(\S++\b)(.*)$/\2\4\55 \7/' "$file"
elif grep -Es 'password\s+(requisite|required)\s+pam_pwhistory\.so' "$file" | grep -vq 'remember='; then
sed -ri 's/^\s*(#\s*)?(password\s+(requisite|required)\s+pam_pwhistory\.so\s+)([^#]+\s*)?(#.*)?$/\2\4 remember=5\5/' "$file"
else
sed -ri '/password\s+(requisite|sufficient)\s+pam_unix\.so\s+.*$/ i password required pam_pwhistory.so remember=5 use_authtok' "$file"
fi
fi
if ! grep -Eqs '^\s*password\s+(requisite|sufficient)\s+pam_unix\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' "$file"; then
if grep -Eqs 'password\s+(requisite|sufficient)\s+pam_unix\.so(\s+[^#]+\s+)?\s*remember=' "$file"; then
sed -ri 's/^\s*(#\s*)?(password\s+(requisite|sufficient)\s+pam_unix\.so\s+)([^#]+\s+)?(remember=)(\S++\b)(.*)$/\2\4\55 \7/' "$file"
elif grep -Es 'password\s+(requisite|sufficient)\s+pam_unix\.so' "$file" | grep -vq 'remember='; then
sed -ri 's/^\s*(#\s*)?(password\s+(requisite|sufficient)\s+pam_unix\.so\s+)([^#]+\s*)?(#.*)?$/\2\4 remember=5\5/' "$file"
else
sed -ri '/password\s+(required|requisite|sufficient)\s+pam_deny\.so\s+.*$/ i "password required pam_unix.so remember=5 use_authtok"' "$file"
fi
fi
fi
fi
# Test if recommendation is passed, remediated, manual, or failed remediation
if [ "$test1" = passed ] && [ "$test2" = passed ]; then
test=passed
else
if [ "$test3" = manual ]; then
test=manual
else
[ -n "$cpro" ] && authselect apply-changes
if grep -Eqs '^\s*password\s+(requisite|sufficient)\s+pam_unix\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' /etc/pam.d/system-auth || grep -Eqs '^\s*password\s+(requisite|required)\s+pam_pwhistory\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' /etc/pam.d/system-auth; then
test1=remediated
fi
if grep -Eqs '^\s*password\s+(requisite|sufficient)\s+pam_unix\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' /etc/pam.d/password-auth || grep -Eqs '^\s*password\s+(requisite|required)\s+pam_pwhistory\.so\s+([^#]+\s+)?remember=([5-9]|[1-9][0-9]+)\b' /etc/pam.d/system-auth; then
test2=remediated
fi
[ "$test1" = remediated ] && [ "$test2" = remediated ] && test=remediated
fi
fi
# Set return code and return
case "$test" in
passed)
echo "Recommendation \"$RNA\" No remediation required" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_PASS:-101}"
;;
remediated)
echo "Recommendation \"$RNA\" successfully remediated" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_PASS:-103}"
;;
manual)
echo "Recommendation \"$RNA\" requires manual remediation" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_FAIL:-106}"
;;
*)
echo "Recommendation \"$RNA\" remediation failed" | tee -a "$LOG" 2>> "$ELOG"
return "${XCCDF_RESULT_FAIL:-102}"
;;
esac
}
| true
|
8434bde7393d507db6605067b01557ba8d89cdbe
|
Shell
|
adriangrigore/ulinux
|
/ports/pkg/ports
|
UTF-8
| 625
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# shellcheck disable=SC1091
. /bin/pkg.common
index=
cleanup() {
rm -rf "$index"
}
get_index() {
index="$(mktemp -p /tmp -t ports-index-XXXXXX)"
wget -q -O "$index" "$URL"/index
}
sync_ports() {
(
cd "$PKG_PORTSDIR" || fatal "could not change directory to PKG_PORTSDIR $PKG_PORTSDIR"
while read -r file; do
mkdir -p "$(dirname "$file")"
wget -q -O "$file" "$URL/$file"
done < "$index"
)
}
_main() {
trap cleanup EXIT
get_index || fatal "error retriving index"
sync_ports || fatal "error syncing ports"
}
if [ -n "$0" ] && [ x"$0" != x"-bash" ]; then
_main "$@"
fi
| true
|
bb5e59dabf2a561421b9e56c19c8f45593b88009
|
Shell
|
prajnanBhuyan/GeekTrust
|
/TameOfThrones/build.sh
|
UTF-8
| 770
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
write_header()
{
printf '=%.0s' {1..30}
printf '\n'
printf " $1"
printf '\n'
printf '=%.0s' {1..30}
printf '\n'
}
write_header "CLEAN"
dotnet clean -c 'Release' ./src/Set5.sln
printf '\n'
printf '\n'
write_header "RESTORE"
dotnet restore -s https://api.nuget.org/v3/index.json ./src/Set5.sln
printf '\n'
printf '\n'
write_header "BUILD"
dotnet build -c 'Release' ./src/Set5.sln
printf '\n'
printf '\n'
write_header "TEST"
dotnet test -c 'Release' ./src/Set5.sln
printf '\n'
printf '\n'
write_header "PUBLISH"
dotnet publish -o ./src/Build/Published -c 'Release' ./src/TameOfThrones/TameOfThrones.csproj --no-build
GREEN='\033[0;32m'
printf "\n\n${GREEN}Executable can be found at: ./src/Build/Published/TameOfThrones\n"
| true
|
555ecd4a91cf4615b0227cb35284d376f4a2a071
|
Shell
|
mrummuka/geo
|
/spiritdvd2text
|
UTF-8
| 2,382
| 3.90625
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh
PROGNAME="$0"
usage() {
cat <<EOF
NAME
`basename $PROGNAME` - Spirit DVD Code to/from text
SYNOPSIS
`basename $PROGNAME` [options]
DESCRIPTION
Spirit DVD Code to or from text. Decoding uses '1', 'l', '|', or 'I' as
aliases for '1'.
OPTIONS
-e Encode
-D lvl Debug level
EXAMPLES
Decode:
$ echo "11--1--111--11----11--11" | spiritdvd2text
NORTH
Encode:
$ echo NORTH | spiritdvd2text -e
11--1--111--11----11--11
SEE ALSO
http://www.planetary.org/explore/projects/redrover/mars-dvd-code-clues.html
http://rumkin.com/tools/cipher/substitution.php
EOF
exit 1
}
#
# Report an error and exit
#
error() {
echo "`basename $PROGNAME`: $1" >&2
exit 1
}
debug() {
if [ $DEBUG -ge $1 ]; then
echo "`basename $PROGNAME`: $2" >&2
fi
}
#
# Process the options
#
DEBUG=0
ENC=0
while getopts "eD:h?" opt
do
case $opt in
e) ENC=1;;
D) DEBUG="$OPTARG";;
h|\?) usage;;
esac
done
shift `expr $OPTIND - 1`
#
# Main Program
#
awk -v ENC=$ENC '
BEGIN {
let["---"] = " "
let["--1"] = "E"
let["-1-"] = "A"
let["-11"] = "O"
let["1--"] = "R"
let["1-1---"] = "M"
let["1-1--1"] = "W"
let["1-1-1-"] = "F"
let["1-1-11"] = "G"
let["1-11--"] = "Y"
let["1-11-1"] = "P"
let["1-111-"] = "B"
let["1-1111---"] = "V"
let["1-1111--1"] = "K"
let["1-1111-1-"] = "J"
let["1-1111-11"] = "X"
let["1-11111--"] = "Q"
let["1-11111-1"] = "Z"
let["11----"] = "T"
let["11---1"] = "I"
let["11--1-"] = "N"
let["11--11"] = "H"
let["11-1--"] = "D"
let["11-1-1"] = "L"
let["11-11-"] = "C"
let["11-111"] = "U"
let["111"] = "S"
if (ENC)
for (i in let)
{
code[let[i]] = i
}
}
ENC == 0 {
text = $0
gsub("[l]", "1", text)
gsub("[|]", "1", text)
gsub("[I]", "1", text)
gsub("[^-1]", "", text)
while (length(text) > 0)
{
#print "Text: ", text
found = 0
for (l in let) {
len = length(l)
tok = substr(text, 1, len)
# print len, tok, let[l]
if (tok == l)
{
printf "%s", let[l]
text = substr(text, len+1)
found = 1
break
}
}
if (!found)
{
print "error!"
exit
}
}
printf "\n"
}
ENC == 1 {
text = toupper($0)
for (i = 1; i <= length(text); ++i)
{
c = substr(text, i, 1)
printf "%s", code[c]
}
printf "\n"
}
END {
}
'
| true
|
9b6d2bf568b7c628cf7f9a2cf1e615e980d842e5
|
Shell
|
faja/shell-helpers
|
/terraform/tf_module_list.sh
|
UTF-8
| 556
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash -
SCRIPT_VERSION=1.0.0
MODULE_PATH=~/GitRepos/AS/INFRA/infra-terraform-aws-modules
cd ${MODULE_PATH}
echo pulling latest changes from git..
git pull origin master
echo
echo
printf "%-30s\t%10s\n" "MODULE NAME" "VERSION"
for dir in $(ls -l)
do
if ! test -d "$dir"
then
continue
fi
VERSION=$(head -2 $dir/README.md | tail -1)
# lets skip copy-pase and work in progress modules
if echo "${VERSION}" | grep -q -e 'Copy-Paste' -e 'WORK IN PROGRESS'
then
continue
fi
printf "%-30s\t%10s\n" "${dir}" "${VERSION}"
done
| true
|
990e7c8cfd566afc4c1b361bc2c7f54b6235dc32
|
Shell
|
masoud-al/dotfiles-arch
|
/scripts/raid_demo.sh
|
UTF-8
| 2,619
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# this script is based on the template from
# https://agateau.com/2014/template-for-shell-based-command-line-scripts/
set -e
PROGNAME=$(basename $0)
ndev=8
last=$(expr $ndev - 1)
size=64M
raiddev=/dev/md/MtRAID6
mtpt=raidpt
ids=$(seq 0 $(expr $ndev - 1))
COMMANDS="create|stop|clean|fail"
command=""
subc=""
flag=0
count="8"
die() {
echo "$PROGNAME: $*" >&2
exit 1
}
usage() {
if [ "$*" != "" ] ; then
echo "Error: $*"
fi
cat << EOF
Usage: $PROGNAME [OPTION ...] <command>
A simple demo about using mdadm RAID configuration using qcow2 images.
commands = $COMMANDS
Options:
-h, --help display this usage message and exit
-d, --flag flag
-c, --count [n] number of devices
EOF
exit 1
}
create(){
echo "${ndev} qcow2 images ..."
seq 0 ${last} | parallel qemu-img create -f qcow2 -o preallocation=metadata storage-{}.qcow2 ${size}
modprobe nbd max_part=${ndev}
seq 0 ${last} | parallel qemu-nbd --connect=/dev/nbd{} storage-{}.qcow2
sleep 0.5
seq 0 ${last} | parallel sgdisk /dev/nbd{} -n 0:0:+60M -t 0:FD00
sleep 0.5
nd=$(expr $ndev - 3)
mdadm --create --verbose ${raiddev} -N MtRAID6 -l6 -n${nd} -x3 /dev/nbd*p1
mdadm --detail ${raiddev}
mkfs.ext4 ${raiddev}
mkdir -p ${mtpt}
mount ${raiddev} ${mtpt}
mdadm --detail --scan --verbose > mdadm.conf
}
stop(){
umount ${raiddev} || true
mdadm --stop ${raiddev} || true
}
clean(){
stop
rm -r ${mtpt} || true
seq 0 ${last} | parallel qemu-nbd --disconnect /dev/nbd{}
sleep 1
modprobe -r nbd || true
}
fail(){
echo "declare one device as failed"
mdadm --fail ${raiddev} /dev/nbd0p1
# mdadm --remove ${raiddev} /dev/nbd0p1
# mdadm --zero-superblock /dev/nbd0p1
sleep 1
mdadm --detail ${raiddev}
}
while [ $# -gt 0 ] ; do
case "$1" in
-h|--help)
usage
;;
-d|--flag)
flag=1
;;
-o|--count)
count="$2"
shift
;;
-*)
usage "Unknown option '$1'"
;;
*)
if [ -z "$command" ] ; then
command="$1"
elif [ -z "$subc" ] ; then
subc="$1"
else
usage "Too many arguments"
fi
;;
esac
shift
done
if [ -z "$command" ] ; then
usage "Not enough arguments"
fi
if [[ "$command" =~ ^${COMMANDS}$ ]]; then
$command
fi
#cat <<EOF
#command=$command
#subc=$subc
#flag=$flag
#count=$count
#EOF
# alias par="parallel --no-run-if-empty --dryrun"
# mdadm --monitor /dev/md/test_raid
# mdadm --misc --zero-superblock /dev/<partition>
| true
|
82d44f0d87ba36183cc1255e64cc0ef035b0ae56
|
Shell
|
amorriscode/computer-science-flash-cards
|
/build.sh
|
UTF-8
| 363
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
command -v mdanki >/dev/null 2>&1 || { echo -e >&2 "Install mdanki to build the decks:\nyarn add global mdanki"; exit 1; }
# Clean up build directory
rm -rf ./build; mkdir ./build
# Build all decks in the deck directory
cd decks
for dir in */ ; do
deckname=${dir%?}
mdanki ./${deckname} ../build/${deckname}.apkg --deck ${deckname}
done
| true
|
40b928f23839104661ecd01babddfd36fc4e15d3
|
Shell
|
kapamaroo/fluidanimate_cuda_version
|
/tools/test.sh
|
UTF-8
| 807
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
BUILDDIR="bin"
LOGDIR="log"
CPU_BIN="fluidanimate_cpu"
GPU_BIN="fluidanimate_gpu"
CHECK_BIN="./tools/checkfiles"
THREADS="256"
FRAMES="$@"
INPUT="inputs/in_100K.fluid"
CPU_OUTPUT="out_100K_cpu.fluid"
GPU_OUTPUT="out_100K_gpu.fluid"
mkdir -p $LOGDIR
echo
echo
echo
echo "######## Start Test for $@ frames #############"
echo
echo "############### CPU Test #####################"
echo
time ./$BUILDDIR/$CPU_BIN $THREADS $FRAMES ./$INPUT ./$LOGDIR/$CPU_OUTPUT
echo
echo "############### GPU Test #####################"
echo
time ./$BUILDDIR/$GPU_BIN $THREADS $FRAMES ./$INPUT ./$LOGDIR/$GPU_OUTPUT
echo
echo "############ Compare Results #################"
$CHECK_BIN ./$LOGDIR/$CPU_OUTPUT ./$LOGDIR/$GPU_OUTPUT
echo
echo "############## Test Finished #################"
echo
echo
echo
| true
|
f56ed9983381d499139ecb8bcc245cd228bb373f
|
Shell
|
starbops/dotfiles
|
/bash_profile
|
UTF-8
| 935
| 3.15625
| 3
|
[] |
no_license
|
#
# ~/.bash_profile
#
[[ -f ~/.bashrc ]] && . ~/.bashrc
function load_pyenv() {
PYENV_ROOT=$1
export PYENV_ROOT
PATH="$PYENV_ROOT/bin:$PATH"
export PATH
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
}
PATH="$PATH:/usr/local/sbin"
export PATH
CLICOLOR=1
LSCOLORS="gxfxcxdxbxegedabagacad"
export CLICOLOR
export LSCOLORS
#HOMEBREW_GITHUB_API_TOKEN=""
#export HOMEBREW_GITHUB_API_TOKEN
if [ -d "$HOME/.rvm" ]; then
PATH="$PATH:$HOME/.rvm/bin"
export PATH
fi
# Load RVM into a shell session *as a function*
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
case $(uname -s) in
"Darwin")
if [ -d "/usr/local/opt/pyenv" ]; then
load_pyenv "/usr/local/opt/pyenv"
fi
;;
"FreeBSD")
;;
"Linux")
if [ -d "$HOME/.pyenv" ]; then
load_pyenv "$HOME/.pyenv"
fi
;;
*)
;;
esac
| true
|
d840d4db43ea8f87d8cc88b7b9bdbf0ac3ab958d
|
Shell
|
alldatacenter/alldata
|
/studio/micro-services/SREWorks/paas/tesla-gateway/APP-META/docker-config/environment/common/bin/setenv.sh
|
UTF-8
| 6,142
| 3.09375
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"EPL-1.0",
"LGPL-2.0-or-later",
"MPL-2.0",
"GPL-2.0-only",
"JSON",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# SETENV_SETTED promise run this only once.
if [ -z $SETENV_SETTED ]; then
SETENV_SETTED="true"
# app
# set ${APP_NAME}, if empty $(basename "${APP_HOME}") will be used.
APP_HOME=$(cd $(dirname ${BASH_SOURCE[0]})/..; pwd)
if [[ "${APP_NAME}" = "" ]]; then
APP_NAME=$(basename "${APP_HOME}")
fi
NGINX_HOME=/home/admin/cai
export JAVA_HOME=/opt/taobao/java
export PATH=${PATH}:${JAVA_HOME}/bin
ulimit -c unlimited
echo "INFO: OS max open files: "`ulimit -n`
# when stop pandora boot process, will try to stop old tomcat process
export CATALINA_HOME=/opt/taobao/tomcat
export CATALINA_BASE=$APP_HOME/.default
export CATALINA_PID=$CATALINA_BASE/catalina.pid
# time to wait tomcat to stop before killing it
TOMCAT_STOP_WAIT_TIME=5
TOMCAT_PORT=7001
if [[ ! -f ${APP_HOME}/target/${APP_NAME}/bin/appctl.sh ]]; then
# env for service(pandora boot)
export LANG=zh_CN.UTF-8
export JAVA_FILE_ENCODING=UTF-8
export NLS_LANG=AMERICAN_AMERICA.ZHS16GBK
export LD_LIBRARY_PATH=/opt/taobao/oracle/lib:/opt/taobao/lib:$LD_LIBRARY_PATH
export CPU_COUNT="$(grep -c 'cpu[0-9][0-9]*' /proc/stat)"
mkdir -p "$APP_HOME"/.default
export SERVICE_PID=$APP_HOME/.default/${APP_NAME}.pid
export SERVICE_OUT=$APP_HOME/logs/service_stdout.log
export MIDDLEWARE_LOGS="${HOME}/logs"
export MIDDLEWARE_SNAPSHOTS="${HOME}/snapshots"
if [ -z "$SERVICE_TMPDIR" ] ; then
# Define the java.io.tmpdir to use for Service(pandora boot)
SERVICE_TMPDIR="${APP_HOME}"/.default/temp
fi
SERVICE_OPTS="${SERVICE_OPTS} -server"
let memTotal=`cat /proc/meminfo | grep MemTotal | awk '{printf "%d", $2/1024 }'`
echo "INFO: OS total memory: "$memTotal"M"
# if os memory <= 2G
if [ $memTotal -le 2048 ]; then
SERVICE_OPTS="${SERVICE_OPTS} -Xms1536m -Xmx1536m"
SERVICE_OPTS="${SERVICE_OPTS} -Xmn768m"
else
SERVICE_OPTS="${SERVICE_OPTS} -Xms4g -Xmx4g"
SERVICE_OPTS="${SERVICE_OPTS} -Xmn2g"
fi
SERVICE_OPTS="${SERVICE_OPTS} -XX:MetaspaceSize=256m -XX:MaxMetaspaceSize=512m"
SERVICE_OPTS="${SERVICE_OPTS} -XX:MaxDirectMemorySize=1g"
SERVICE_OPTS="${SERVICE_OPTS} -XX:SurvivorRatio=10"
SERVICE_OPTS="${SERVICE_OPTS} -XX:+UseConcMarkSweepGC -XX:CMSMaxAbortablePrecleanTime=5000"
SERVICE_OPTS="${SERVICE_OPTS} -XX:+CMSClassUnloadingEnabled -XX:CMSInitiatingOccupancyFraction=80 -XX:+UseCMSInitiatingOccupancyOnly"
SERVICE_OPTS="${SERVICE_OPTS} -XX:+ExplicitGCInvokesConcurrent -Dsun.rmi.dgc.server.gcInterval=2592000000 -Dsun.rmi.dgc.client.gcInterval=2592000000"
SERVICE_OPTS="${SERVICE_OPTS} -XX:ParallelGCThreads=${CPU_COUNT}"
SERVICE_OPTS="${SERVICE_OPTS} -Xloggc:${MIDDLEWARE_LOGS}/gc.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps"
SERVICE_OPTS="${SERVICE_OPTS} -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${MIDDLEWARE_LOGS}/java.hprof"
SERVICE_OPTS="${SERVICE_OPTS} -Djava.awt.headless=true"
SERVICE_OPTS="${SERVICE_OPTS} -Dsun.net.client.defaultConnectTimeout=10000"
SERVICE_OPTS="${SERVICE_OPTS} -Dsun.net.client.defaultReadTimeout=30000"
SERVICE_OPTS="${SERVICE_OPTS} -DJM.LOG.PATH=${MIDDLEWARE_LOGS}"
SERVICE_OPTS="${SERVICE_OPTS} -DJM.SNAPSHOT.PATH=${MIDDLEWARE_SNAPSHOTS}"
SERVICE_OPTS="${SERVICE_OPTS} -Dfile.encoding=${JAVA_FILE_ENCODING}"
SERVICE_OPTS="${SERVICE_OPTS} -Dhsf.publish.delayed=true"
SERVICE_OPTS="${SERVICE_OPTS} -Dproject.name=${APP_NAME}"
SERVICE_OPTS="${SERVICE_OPTS} -Dpandora.boot.wait=true -Dlog4j.defaultInitOverride=true"
SERVICE_OPTS="${SERVICE_OPTS} -Dserver.port=${TOMCAT_PORT} -Dmanagement.port=7002"
# enable coroutine(current JDK in Docker image do not support WispMonitor option)
# SERVICE_OPTS="${SERVICE_OPTS} -XX:+EnableCoroutine -XX:-UseBiasedLocking -XX:+UseWispMonitor"
SERVICE_OPTS="${SERVICE_OPTS} -XX:+EnableCoroutine -XX:-UseBiasedLocking"
SERVICE_OPTS="${SERVICE_OPTS} -Dcom.alibaba.transparentAsync=true -Dcom.alibaba.shiftThreadModel=true"
# debug opts
# jpda options
test -z "$JPDA_ENABLE" && JPDA_ENABLE=0
test -z "$JPDA_ADDRESS" && export JPDA_ADDRESS=8000
test -z "$JPDA_SUSPEND" && export JPDA_SUSPEND=n
if [ "$JPDA_ENABLE" -eq 1 ]; then
if [ -z "$JPDA_TRANSPORT" ]; then
JPDA_TRANSPORT="dt_socket"
fi
if [ -z "$JPDA_ADDRESS" ]; then
JPDA_ADDRESS="8000"
fi
if [ -z "$JPDA_SUSPEND" ]; then
JPDA_SUSPEND="n"
fi
if [ -z "$JPDA_OPTS" ]; then
JPDA_OPTS="-agentlib:jdwp=transport=$JPDA_TRANSPORT,address=$JPDA_ADDRESS,server=y,suspend=$JPDA_SUSPEND"
fi
SERVICE_OPTS="$SERVICE_OPTS $JPDA_OPTS"
fi
export SERVICE_OPTS
if [ -z "$NGINX_HOME" ]; then
NGINX_HOME=/home/admin/cai
fi
# if set to "1", skip start nginx.
test -z "$NGINX_SKIP" && NGINX_SKIP=0
# set port for checking status.taobao file. Comment it if no need.
STATUS_PORT=80
# time to wait for /status.taobao is ready
STATUS_TAOBAO_WAIT_TIME=3
STATUSROOT_HOME="${APP_HOME}/target/${APP_NAME}/META-INF/resources"
# make sure the directory exist, before tomcat start
mkdir -p $STATUSROOT_HOME
NGINXCTL=$NGINX_HOME/bin/nginxctl
# search pandora by "${PANDORA_NAME_LIST[@]}" order
PANDORA_NAME_LIST=(pandora taobao-hsf)
# set hsf online/offline time out (in second)
HSF_ONLINE_TIMEOUT=120
# if update pandora
UPDATE_PANDORA=true
else
# compatible with the existing jar application
export LANG=zh_CN.UTF-8
fi
fi
| true
|
8dbbd44f40e8a06f3c76db0fd38e9cbced53c637
|
Shell
|
XingGemini/GATKBestPractice
|
/GATK.sh
|
UTF-8
| 10,816
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BASEDIR=$(dirname "$0")
CONFIG=$BASEDIR/config
source $CONFIG
CHROM=( chr1 chr2 chr3 chr4 chr5 chr6 chr7 chr8 chr9 chr10 chr11 chr12 chr13 chr14 chr15 chr16 chr17 chr18 chr19 chr20 chr21 chr22 chrX chrY )
STDOUT='/dev/stdout'
STDIN='/dev/stdin'
TMPDIR=$PWD/tmp
HG19REF=$REFERENCEDIR/ucsc.hg19.fasta
HG19REFBWT=$REFERENCEDIR/ucsc.hg19.fasta.bwt
HG19REFFAI=$REFERENCEDIR/ucsc.hg19.fasta.fai
HG19REFDICT=$REFERENCEDIR/ucsc.hg19.dict
ENRICHMENTDIR=$REFERENCEDIR/hg19/Enrichment/SureSelectV5'
ENRICHMENTBED=$ENRICHMENTDIR/S04380110_Regions.bed
KNOWNINDEL1000GVCF=$REFERENCEDIR/1000G_phase1.indels.hg19.sites.vcf
KNOWNINDELMILLSVCF=$REFERENCEDIR/Mills_and_1000G_gold_standard.indels.hg19.sites.vcf
KNOWNDBSNPVCF=$REFERENCEDIR/dbsnp_138.hg19.vcf
FASTQ1=$1
FASTQ2=$2
SAMPLEID=$3
STEP=$4
FASTQBAM=$TMPDIR/$SAMPLEID.raw.bam
MARKADAPTERBAM=$TMPDIR/$SAMPLEID.markIlluminaAdapter.bam
MARKADAPTERRPT=$TMPDIR/$SAMPLEID\_markIlluminaAdapter.metrics.txt
MAPPEDBAM=$TMPDIR/$SAMPLEID.mapped.bam
SORTEDBAM=$TMPDIR/$SAMPLEID.mapped.sorted.bam
MARKDUPBAM=$TMPDIR/$SAMPLEID.mapped.sorted.markdup.bam
MARKDUPRPT=$TMPDIR/$SAMPLEID.mapped.sorted.markdup.metrics.txt
#REALNEDBAM=$TMPDIR/$SAMPLEID.mapped.sorted.markdup.realn.bam
FINALBAM=$PWD/$SAMPLEID.mapped.sorted.markdup.realn.recal.bam
GVCFGZ=$PWD/$SAMPLEID.g.vcf.gz
RAWVARIANTVCFGZ=$TMPDIR/$SAMPLEID.raw.variant.vcf.gz
RAWINDELVCFGZ=$TMPDIR/$SAMPLEID.raw.indel.vcf.gz
RAWSNPVCFGZ=$TMPDIR/$SAMPLEID.raw.snp.vcf.gz
FILTEREDVARIANTVCFGZ=$PWD/$SAMPLEID.filtered.variant.vcf.gz
FILTEREDINDELVCFGZ=$TMPDIR/$SAMPLEID.filtered.indel.vcf.gz
FILTEREDSNPVCFGZ=$TMPDIR/$SAMPLEID.filtered.snp.vcf.gz
USAGE="$0 <FASTQ1> <FASTQ2> <SAMPLEID> <STEP> where STEP can be step number or \'all\'"
if [ -z "$1" ]; then
echo $USAGE
exit
fi
if [ ! -d "$TMPDIR" ]; then
mkdir $TMPDIR
fi
if [ $STEP == 0 -o $STEP == 'all' ]; then
echo 'Step 0: Fastq to Sam'
$PICARD FastqToSam \
FASTQ=$FASTQ1 \
FASTQ2=$FASTQ2 \
OUTPUT=$FASTQBAM \
READ_GROUP_NAME=$SAMPLEID \
SAMPLE_NAME=$SAMPLEID \
LIBRARY_NAME=$SAMPLEID \
PLATFORM=ILLUMINA \
PLATFORM_UNIT=FlowCell1_Lane1_1 \
SEQUENCING_CENTER=MACROGEN_MD \
RUN_DATE=$(date +%Y%m%d) \
&> Step0.log
fi
if [ $STEP == 1 -o $STEP == 'all' ]; then
echo 'Step 1: Mark ILLUMINA adpaters'
$PICARD MarkIlluminaAdapters \
I=$FASTQBAM \
O=$MARKADAPTERBAM \
M=$MARKADAPTERRPT \
TMP_DIR=$TMPDIR \
&> Step1.log
fi
if [ $STEP == 2 -o $STEP == 'all' ]; then
echo 'Step 2: Mapping to Reference'
if [ ! -f $HG19REFBWT ]; then
echo 'Step 2.0.1 prepare bwa reference directory'
$BWA index -a bwtsw $HG19REF
fi
if [ ! -f $HG19REFFAI ]; then
echo 'Step 2.0.2 prepare faidx for reference fastq'
$SAMTOOLS faidx $HG19REF
fi
if [ ! -f $HG19REFDICT ]; then
echo 'Step 2.0.3 prepare picard dict for reference fastq'
$PICARD CreateSequenceDictionary \
R=$HG19REF \
O=$HG19REFDICT
fi
echo 'Step 2.1 Mapping to reference'
set -o pipefail
$PICARD SamToFastq \
I=$MARKADAPTERBAM \
FASTQ=$STDOUT \
CLIPPING_ATTRIBUTE=XT CLIPPING_ACTION=2 INTERLEAVE=true NON_PF=true \
TMP_DIR=$TMPDIR | \
#$BWA mem -M -t 32 -p $HG19REF $STDIN | \
$BWA mem -M -t 24 -p $HG19REF $STDIN | \
$PICARD MergeBamAlignment \
ALIGNED_BAM=$STDIN \
UNMAPPED_BAM=$FASTQBAM \
OUTPUT=$MAPPEDBAM \
R=$HG19REF CREATE_INDEX=true ADD_MATE_CIGAR=true \
CLIP_ADAPTERS=false CLIP_OVERLAPPING_READS=true \
INCLUDE_SECONDARY_ALIGNMENTS=true MAX_INSERTIONS_OR_DELETIONS=-1 \
PRIMARY_ALIGNMENT_STRATEGY=MostDistant ATTRIBUTES_TO_RETAIN=XS \
TMP_DIR=$TMPDIR \
&> Step2.log
fi
if [ $STEP == 3 -o $STEP == 'all' ]; then
echo 'Step 3: MarkDuplicates'
echo 'Step 3.1: Sort aligned bam'
$PICARD SortSam \
I=$MAPPEDBAM \
O=$SORTEDBAM \
SORT_ORDER=coordinate \
&> Step3.1.log
echo 'Step 3.2: Mark Duplicates'
$PICARD MarkDuplicates \
I=$SORTEDBAM \
O=$MARKDUPBAM \
METRICS_FILE=$MARKADAPTERRPT \
&> Step3.2.log
echo 'Step 3.3: Build Bam Index'
$PICARD BuildBamIndex \
I=$MARKDUPBAM \
&> Step3.3.log
fi
if [ $STEP == 4 -o $STEP == 'all' ]; then
echo 'Step 4: Realgin indel'
for i in "${CHROM[@]}"
do
if [ ! -f $ENRICHMENTDIR/$i\.bed ];
then
echo $i
echo 'grep "^$i\t" $ENRICHMENTBED |cut -f 1-3 > $ENRICHMENTDIR\/$i\.bed '
grep -P "^$i\t" $ENRICHMENTBED |cut -f 1-3 > $ENRICHMENTDIR\/$i\.bed # grep split bed file in chromosome files and keep only the first 3 columns
fi
done
echo 'Step 4: Logging' > Step4.log
for i in "${CHROM[@]}"
do
echo "Building Target List for $i"
$GATK -T RealignerTargetCreator \
-R $HG19REF \
-I $MARKDUPBAM \
-L $ENRICHMENTDIR/$i\.bed \
-known $KNOWNINDELMILLSVCF \
-known $KNOWNINDEL1000GVCF \
-o $TMPDIR/$i\.realignment_targets.list \
&> tmp.log
cat tmp.log >> Step4.log
echo "Realgin indels for $i"
$GATK -T IndelRealigner \
-R $HG19REF \
-I $MARKDUPBAM \
-L $ENRICHMENTDIR/$i\.bed \
-known $KNOWNINDELMILLSVCF \
-known $KNOWNINDEL1000GVCF \
-targetIntervals $TMPDIR/$i\.realignment_targets.list \
-o $TMPDIR/$i\.mapped.sorted.markdup.realn.bam \
&> tmp.log
cat tmp.log >> Step4.log
done
fi
if [ $STEP == 5 -o $STEP == 'all' ]; then
echo "Step5 Recalibration Base Quality"
echo 'Step 5: Logging' > Step5.log
for i in "${CHROM[@]}"
do
echo "5.1 Build recalibration base quality table for $i"
$GATK -T BaseRecalibrator \
-R $HG19REF \
-I $TMPDIR/$i\.mapped.sorted.markdup.realn.bam \
-knownSites $KNOWNINDELMILLSVCF \
-knownSites $KNOWNINDEL1000GVCF \
-knownSites $KNOWNDBSNPVCF \
-o $TMPDIR/$i\.mapped.sorted.markdup.realn.recal.table \
&> tmp.log
cat tmp.log >> Step5.log
done
for i in "${CHROM[@]}"
do
echo "5.2 Recalibration base quality for $i"
$GATK -T PrintReads \
-R $HG19REF \
-I $TMPDIR/$i\.mapped.sorted.markdup.realn.bam \
-BQSR $TMPDIR/$i\.mapped.sorted.markdup.realn.recal.table \
-o $TMPDIR/$i\.mapped.sorted.markdup.realn.recal.bam
cat tmp.log >> Step5.log
done
BAMLIST=''
for i in "${CHROM[@]}"
do
BAMLIST+=" $TMPDIR/$i.mapped.sorted.markdup.realn.recal.bam"
done
echo '5.3 Concadinate all individual Bam files'
echo " $SAMTOOLS cat -o $FINALBAM $BAMLIST >> Step5.log"
$SAMTOOLS cat -o $FINALBAM $BAMLIST >> Step5.log
echo 'Step 5.4: Build Bam Index'
$PICARD BuildBamIndex \
I=$FINALBAM \
&> tmp.log
cat tmp.log >> Step5.log
#rm -f $BAMLIST
fi
if [ $STEP == 6 -o $STEP == 'all' ]; then
echo "Step6: Variant Calling"
echo "Step6: Variant Calling Logging" > Step6.log
VCFLIST=''
for i in "${CHROM[@]}"
do
echo "6.1 Haplotype Calling for $i"
CHRGVCFGZ=$TMPDIR/$SAMPLEID.$i\.g.vcf.gz
VCFLIST+="-V $CHRGVCFGZ "
$GATK -T HaplotypeCaller \
-R $HG19REF \
-I $FINALBAM \
-L $ENRICHMENTDIR/$i\.bed \
--genotyping_mode DISCOVERY \
--emitRefConfidence GVCF \
--variant_index_type LINEAR --variant_index_parameter 128000 \
-stand_emit_conf 10 \
-stand_call_conf 30 \
-o $CHRGVCFGZ \
&> tmp.log
cat tmp.log >> Step6.log
done
echo 'combine gvcf files'
echo '/home/xing/jre1.8.0_91/bin/java -cp /home/xing/data/Tools/GATK/GenomeAnalysisTK.jar org.broadinstitute.gatk.tools.CatVariants \
-R $HG19REF \
-assumeSorted \
-o $RAWVARIANTVCFGZ \
$VCFLIST \
&> tmp.log'
$GATKCP org.broadinstitute.gatk.tools.CatVariants \
-R $HG19REF \
-assumeSorted \
-out $GVCFGZ \
$VCFLIST \
&> tmp.log
cat tmp.log >> Step6.log
fi
if [ $STEP == 7 -o $STEP == 'all' ]; then
echo "Step7: Genotype Calling"
echo "Step7: Genotype Calling Log" > Step7.log
VCFLIST=''
for i in "${CHROM[@]}"
do
echo "7.1 Genotype Calling for $i"
CHRRAWVARIANTSVCFGZ=$TMPDIR/$SAMPLEID.$i\.raw.variants.vcf.gz
VCFLIST+="-V $CHRRAWVARIANTSVCFGZ "
$GATK -T GenotypeGVCFs \
-R $HG19REF \
-V $TMPDIR/$SAMPLEID.$i\.g.vcf.gz \
-o $CHRRAWVARIANTSVCFGZ \
&> tmp.log
cat tmp.log >> Step7.log
done
echo 'combine raw vcf files'
$GATKCP org.broadinstitute.gatk.tools.CatVariants \
-R $HG19REF \
-assumeSorted \
-out $RAWVARIANTVCFGZ \
$VCFLIST \
&> tmp.log
cat tmp.log >> Step7.log
fi
if [ $STEP == 8 -o $STEP == 'all' ]; then
echo "Step8: Filter Indels"
echo "Step8: Filter Indels " > Step8.log
$GATK -T SelectVariants \
-R $HG19REF \
-V $RAWVARIANTVCFGZ \
-selectType INDEL \
-o $RAWINDELVCFGZ \
&> tmp.log
cat tmp.log >> Step8.log
$GATK -T VariantFiltration \
-R $HG19REF \
-V $RAWINDELVCFGZ \
-filter "QD<2.0 || FS>200.0 ||SOR > 10.0 || InbreedingCoeff< -0.8 || ReadPosRankSum< -20.0" \
-filterName "MG_indel_filter" \
-o $FILTEREDINDELVCFGZ \
&> tmp.log
cat tmp.log >> Step8.log
fi
if [ $STEP == 9 -o $STEP == 'all' ]; then
echo "Step9: Filter SNPs"
echo "Step9: Filter SNPs " > Step9.log
$GATK -T SelectVariants \
-R $HG19REF \
-V $RAWVARIANTVCFGZ \
-selectType SNP \
-o $RAWSNPVCFGZ \
&> tmp.log
cat tmp.log >> Step9.log
$GATK -T VariantFiltration \
-R $HG19REF \
-V $RAWSNPVCFGZ \
-filter "QD < 2.0 || FS > 60.0 || SOR > 4.0 || MQ < 40.0 || MQRankSum< -12.5 || ReadPosRankSum< -8.0" \
-filterName "MG_SNP_filter" \
-o $FILTEREDSNPVCFGZ \
&> tmp.log
cat tmp.log >> Step9.log
fi
if [ $STEP == 10 -o $STEP == 'all' ]; then
echo "Step10: Merge Variants"
echo "Step10: Merge Variants" > Step10.log
$GATK -T CombineVariants \
-R $HG19REF \
-V $FILTEREDINDELVCFGZ \
-V $FILTEREDSNPVCFGZ \
-o $FILTEREDVARIANTVCFGZ \
-genotypeMergeOptions UNSORTED \
&> tmp.log
cat tmp.log >> Step10.log
fi
| true
|
de815759a5698c30e156c50799165e2aada31f69
|
Shell
|
andrejchikilev/vimrc
|
/install.sh
|
UTF-8
| 503
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
if ! [ -d ~/.vim/ ]; then
mkdir ~/.vim
else if ! [ -d ~/.vim/bundle/ ];
then mkdir ~/.vim/bundle
fi
fi
if ! [ -d ~/.vim/bundle/Vundle.vim/ ]; then
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
fi
if ! [ -f ~/.vimrc ]; then
cp .vimrc ~/
else
mv ~/.vimrc ~/.vimrc.backup
cp .vimrc ~/
fi
if ! [ -f ~/.ctags ]; then
cp .ctags ~/
else
mv ~/.ctags ~/.ctags.backup
cp .ctags ~/
fi
vim +VundleInstall +qa!
| true
|
4bede6f977b0760d50ad0d0fcd348d16e7e76a47
|
Shell
|
Minigugus/CodinSchool
|
/lancer.sh
|
UTF-8
| 773
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# set -e
shopt -s extglob
echo Lancement de CodinSchool...
if [[ "$0" == '/docker-lancer.sh' ]]; then
echo Synchronisation du projet...
mkdir -p /app
rm -rf /app/!(package.json|node_modules) /app/.[a-zA-Z]* 2>/dev/null
cp -rf /usr/src/app/!(package.json|node_modules) /usr/src/app/.[a-zA-Z]* /app
VERSION_ACTUELLE=$(stat -c %Y /app/package.json 2>/dev/null)
NOUVELLE_VERSION=$(stat -c %Y /usr/src/app/package.json)
VERSION_ACTUELLE=${VERSION_ACTUELLE:-0}
cd /app
# `package.json` modifié -> on met à jour les dépendences locales
if [[ $VERSION_ACTUELLE -lt $NOUVELLE_VERSION ]]; then
echo Fichier « package.json » modifié. Mise à jour des dépendences...
cp /usr/src/app/package.json /app/package.json
npm i -D
fi
fi
exec "$@"
| true
|
347592b5a2bb466c63e68f1086576b910b821b5a
|
Shell
|
Xuanwo/dont-starve-together
|
/scripts/dontstarve_server
|
UTF-8
| 500
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Exit immediately on non-zero return codes.
set -e
# Make sure all files are belong to user steam.
chown -R $STEAM_USER:$STEAM_USER "$DST_HOME/mods"
chown -R $STEAM_USER:$STEAM_USER "$DST_CLUSTER_PATH"
# Workaround for main.lua not load
cd $DST_HOME/bin
gosu "$STEAM_USER" ./dontstarve_dedicated_server_nullrenderer \
-persistent_storage_root "$(dirname "$DST_CLUSTER_PATH")" \
-conf_dir "." \
-cluster "$(basename "$DST_CLUSTER_PATH")" \
-shard "$DST_SHARD"
| true
|
8ffa20daabb748e23585f201c24828fadab890af
|
Shell
|
3rav/MINGW-packages
|
/mingw-w64-capnproto/PKGBUILD
|
UTF-8
| 1,251
| 2.890625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Based on https://aur.archlinux.org/packages/mingw-w64-capnproto/
#
# Maintainer: Igor Matuszewski <xanewok@gmail.com>
# Contributors: Dave Reisner <dreisner@archlinux.org>
# Matthias Blaicher <matthias@blaicher.com>
# Severen Redwood <severen@shrike.me>
# Igor Matuszewski <xanewok@gmail.com>
_realname=capnproto
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}"
pkgver=0.8.0
pkgrel=2
pkgdesc="Cap'n Proto serialization/RPC system (mingw-w64)"
arch=('any')
url='https://capnproto.org/'
license=('MIT')
depends=("${MINGW_PACKAGE_PREFIX}-openssl")
source=("https://capnproto.org/capnproto-c++-${pkgver}.tar.gz")
sha512sums=('a32dbe6556a95761a5edc55237bd5558cb0ec08127f2fef1712076d5be4cd63e165a5d83b522307336bd3afeed1241f2c1e507830e8f12ac5dec78703a85417f')
prepare() {
cd "${srcdir}/capnproto-c++-${pkgver}"
}
build() {
[[ -d "${srcdir}/build-${CARCH}" ]] && rm -rf "${srcdir}/build-${CARCH}"
mkdir -p "${srcdir}/build-${CARCH}" && cd "${srcdir}/build-${CARCH}"
../capnproto-c++-${pkgver}/configure \
--prefix=${MINGW_PREFIX} \
--build=${MINGW_CHOST} \
--host=${MINGW_CHOST} \
--target=${MINGW_CHOST} \
--disable-shared \
--enable-static \
--with-openssl
make
}
package() {
cd "${srcdir}/build-${CARCH}"
make DESTDIR="${pkgdir}" install
}
| true
|
5c11054fb4dddcbd026a9cbb5a546b1277c38f2d
|
Shell
|
slavrd/tfev4-vagrant-proxy
|
/scripts/install_tfe_online.sh
|
UTF-8
| 702
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# installs replicated and pTFE
[ -v PTFE_PRIVATE_IP ] || PTFE_PRIVATE_IP="192.168.56.33"
[ -v PTFE_PUBLIC_IP ] || PTFE_PUBLIC_IP="192.168.56.33"
cp /vagrant/config/replicated.conf /etc/replicated.conf
curl -sS -o /tmp/install.sh https://install.terraform.io/ptfe/stable
if [ -v PROXY_ADDR ]; then
bash /tmp/install.sh http-proxy=${PROXY_ADDR} private-address=${PTFE_PRIVATE_IP} public-address=${PTFE_PUBLIC_IP}
else
bash /tmp/install.sh no-proxy private-address=${PTFE_PRIVATE_IP} public-address=${PTFE_PUBLIC_IP}
fi
while ! curl -ksfS --connect-timeout 5 https://localhost/_health_check >/dev/null 2>&1; do
echo "==> Waiting for TFE to start..."
sleep 30
done
| true
|
32cb542914c9d10d015346ea53a39b4e8d6170c2
|
Shell
|
EhsanEsc/TestGenerator
|
/file_gen.sh
|
UTF-8
| 297
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# file_gen.sh <directory>
# Create files for new problem
if [[ $# -ne 1 ]]
then
echo "Input problem number"
exit 1
fi
if [[ -d ./$1 ]]
then
echo "Directory is already existed!"
exit 1
fi
mkdir $1
cd $1
mkdir in
mkdir out
touch a.cpp in_gen.cpp sol.cpp test_desc.txt
| true
|
758febcdf07f76aee7aea9b1b4037d33a3f541d9
|
Shell
|
aweiteka/openshift-ansible-contrib
|
/reference-architecture/osp-cli/ch4.5.5_boot_infra_nodes.sh
|
UTF-8
| 712
| 3.25
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
DOMAIN=${DOMAIN:-ocp3.example.com}
GLANCE_IMAGE=${GLANCE_IMAGE:-rhel72}
# Retrive a Neutron net id by name
function net_id_by_name() {
# NAME=$1
neutron net-list --field id --field name | grep $1 | cut -d' ' -f2
}
for HOSTNAME in infra-node-0 infra-node-1
do
VOLUMEID=$(cinder show ${HOSTNAME}-docker | grep ' id ' | awk '{print $4}')
nova boot --flavor m1.medium --image ${GLANCE_IMAGE} --key-name ocp3 \
--nic net-id=$(net_id_by_name control-network) \
--nic net-id=$(net_id_by_name tenant-network) \
--security-groups infra-sg \
--block-device source=volume,dest=volume,device=vdb,id=${VOLUMEID} \
--user-data=user-data/${HOSTNAME}.yaml \
${HOSTNAME}.${DOMAIN}
done
| true
|
95287308830cf16864a0a39701293c6e9c62484d
|
Shell
|
k1rh4/EOS_TOOL
|
/RUN_NODE/SCRIPT/makeUser.sh
|
UTF-8
| 821
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
USER_NAME=$1
rm -rf /home/k1rh4/eosio-wallet/./$USER_NAME.wallet
cleos wallet create --to-console -n $USER_NAME
cleos wallet create_key -n $USER_NAME
echo "::::::::: CREATE RANDOM KEY ::::::::::::"
DATA=`cleos create key --to-console`
PRV_KEY=`echo $DATA | awk -F "[ ]" {'print $3'}`
echo $PRV_KEY
PUB_KEY=`echo $DATA | awk -F "[ ]" {'print $6'}`
echo $PUB_KEY
echo "::::::::::::::::::::::::::::::::::::::::"
if [ "$1" == "eosio" ]; then
cleos wallet import -n eosio --private-key 5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3
else
cleos wallet import --name $USER_NAME --private-key $PRV_KEY
fi
echo 'cleos create account eosio '$USER_NAME' EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV' $PUB_KEY
#cleos create account eosio $USER_NAME "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" $PUB_KEY
cleos system newaccount eosio $USER_NAME "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" $PUB_KEY --stake-net "1.0001 EOS" --stake-cpu "1.0001 EOS" --buy-ram-kbytes 4000
| true
|
ece423c0a22328c16d61b28ee99675da4154b00c
|
Shell
|
agus1010/Paralelos
|
/TP 1/Ej2/CorreTodo
|
UTF-8
| 1,198
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
printf "Algoritmo\tN\tHilos\tTiempos\n" > tiempos.csv
N=512
Hilos=1
tSecuencial="1"
tPthreads2="0"
tOpenmp2="0"
tPthreads4="0"
tOpenmp4="0"
finish=true
while [[ $finish == true ]]; do
echo "Probando para N=$N"
printf "Secuencial\t$N\t$Hilos\t" >> tiempos.csv
tSecuencial="$(./secuencial.exe $N | tr -d Tiempo\ en\ segundos\ )"
echo "$tSecuencial" >> tiempos.csv
let Hilos*=2
printf "PThreads\t$N\t$Hilos\t" >> tiempos.csv
tPthreads2="$(./pthreads.exe $N $Hilos | tr -d Tiempo\ en\ segundos\ )"
echo "$tPthreads2" >> tiempos.csv
printf "OpenMP\t$N\t$Hilos\t" >> tiempos.csv
tOpenmp2="$(./openmp.exe $N $Hilos | tr -d Tiempo\ en\ segundos\ )"
echo "$tOpenmp2" >> tiempos.csv
let Hilos*=2
printf "PThreads\t$N\t$Hilos\t" >> tiempos.csv
tPthreads4="$(./pthreads.exe $N $Hilos | tr -d Tiempo\ en\ segundos\ )"
echo "$tPthreads4" >> tiempos.csv
printf "OpenMP\t$N\t$Hilos\t" >> tiempos.csv
tOpenmp4="$(./openmp.exe $N $Hilos | tr -d Tiempo\ en\ segundos\ )"
echo "$tOpenmp4" >> tiempos.csv
let N*=2
Hilos=1
if [[ "$tSecuencial" > "$tPthreads2" ]]; then
if [[ "$tSecuencial" > "$tOpenmp2" ]]; then
finish=false
fi
fi
done
| true
|
0d4021c2d059a472dae93dce0e3605767e18c5e6
|
Shell
|
yatishbalaji/mailpop3
|
/tests/tls.sh
|
UTF-8
| 902
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
RANDOMID=$RANDOM
print_test "Sending test message to $EMAIL (str: $RANDOMID)"
OUTPUT=`./sendmail.sh -q 1 $EMAIL "subject with $RANDOMID" "body with $RANDOMID"`
print_result 0 $OUTPUT
print_test "Wrong port"
OUTPUT=`node tls.js --username $USER --password $PASS --host $HOST --port $PORT --login off`;
print_result 1 $OUTPUT
print_test "No login"
OUTPUT=`node tls.js --username $USER --password $PASS --host $HOST --port $TLSPORT --login off`;
print_result 0 $OUTPUT
print_test "Login only"
OUTPUT=`node tls.js --username $USER --password $PASS --host $HOST --port $TLSPORT --login on`;
print_result 0 $OUTPUT
print_test "Login and message download"
OUTPUT=`node tls.js --username $USER --password $PASS --host $HOST --port $TLSPORT --login on --download on`
OUTPUT=`echo $OUTPUT | grep $RANDOMID`
if [ $? -eq 1 ]; then OUTPUT="fail"; fi
print_result 0 $OUTPUT
| true
|
32ab118e126e74bb1f359a206531702242d75c57
|
Shell
|
776166/yggdrasil-django
|
/bin/clean_py.sh
|
UTF-8
| 333
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
APPLICATION=$DIR/$APP
P1=`dirname $0`
HOME=`cd $P1; cd .. ; pwd`
. $HOME/etc/env.conf
if [ ! -z $1 ]; then
clean_path=$HOME/$1
else
clean_path=$HOME
fi
find $clean_path -type d -name '__pycache__' -print | xargs rm -vr
find $clean_path -name '*.pyc' -print -delete
find $clean_path -name '*.pyo' -print -delete
| true
|
fcaf684a36d6fa60a0fd97a448b346c80f364aa4
|
Shell
|
wsimpso1/DS601_Spring21
|
/Demo/user-data_notcorrect.sh
|
UTF-8
| 1,422
| 3
| 3
|
[] |
no_license
|
# #!/usr/bin/env bash
# this will also work but installing anaconda is too much for the task
# # ++++++++++++++++++++ START ANACONDA INSTALL +++++++++++++++++++++
# cd /home/ec2-user
# su ec2-user
# # Download the Linux Anaconda Distribution
# # wget https://repo.anaconda.com/archive/Anaconda3-2019.03-Linux-x86_64.sh -O /tmp/anaconda3.sh
# # Run the installer (installing without -p should automatically install into '/' (root dir)
# # bash /tmp/anaconda3.sh -b -p /home/ec2-user/anaconda3
# # rm /tmp/anaconda3.sh
# ### Run the conda init script to setup the shell
# # echo ". /home/ec2-user/anaconda3/etc/profile.d/conda.sh" >> /home/ec2-user/.bashrc
# # . /home/ec2-user/anaconda3/etc/profile.d/conda.sh
# # source /home/ec2-user/.bashrc
# # Create a base Python3 environment separate from the base env
# conda create -y --name python3
# # +++++++++++++++++++++ END ANACONDA INSTALL ++++++++++++++++++++++
# # ++++++++++++++ SETUP ENV +++++++++++++++
# # Install necessary Python packages
# # Note that 'source' is deprecated, so now we should be using 'conda' to activate/deactivate envs
# conda activate python3
# conda install nltk -y
# conda install scikit-learn=='0.21.3' -y
# conda install Flask -y
# conda install flask_cors -y
# aws s3 cp s3://com.msarica.ds/server.txt ./server.py
# aws s3 cp s3://com.msarica.ds/model.pickle ./model.pickle
# export FLASK_APP=server.py
# flask run --host=0.0.0.0
| true
|
3c37df9c8bdbad8afc56fa97c24aa7a1a681861f
|
Shell
|
codacy-badger/win-sudo
|
/install.sh
|
UTF-8
| 493
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Downloading win-sudo...";trap 'echo "Failed to install, sorry :(";exit 1' ERR
mkdir -p ~/bin/win-sudo && cd "$_";git init -q && git config core.sparsecheckout true;echo s/ >> .git/info/sparse-checkout
git remote add -mf origin https://github.com/DemonixCraft/win-sudo.git && git pull -q origin master
echo "source ~/bin/win-sudo/s/path.sh" | tee -a ~/.bashrc >/dev/null 2>&1 && echo "Win-sudo successfully installed!"
rm -rf ~/.bash_profile >/dev/null 2>&1;exec bash
| true
|
3db776ccb1fdd351e11cac9b41dff36e7ec6bb82
|
Shell
|
TanguyLevent/RL4Microgrids
|
/mono_gpu.slurm
|
UTF-8
| 1,297
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --job-name=gpu_mono # nom du job
#SBATCH --ntasks=1 # nombre de tâche (un unique processus ici)
#SBATCH --gres=gpu:1 # nombre de GPU à réserver (un unique GPU ici)
#SBATCH --qos=qos_gpu-t4 #specifier le qos à executer
#SBATCH --cpus-per-task=10 # nombre de coeurs à réserver (un quart du noeud)
# /!\ Attention, la ligne suivante est trompeuse mais dans le vocabulaire
# de Slurm "multithread" fait bien référence à l'hyperthreading.
#SBATCH --hint=nomultithread # on réserve des coeurs physiques et non logiques
#SBATCH --time=60:00:00 # temps exécution maximum demande (HH:MM:SS)
#SBATCH --output=gpu_mono%A_%a.out # nom du fichier de sortie
#SBATCH --error=gpu_mono%A_%a.out # nom du fichier d'erreur (ici commun avec la sortie)
#SBATCH --array=0 ###-1
#######%5 c'est pour la ligne du dessus
# echo des commandes lancées
set -x
# on se place dans le répertoire de soumission
cd ${SLURM_SUBMIT_DIR}
#nettoyage
module purge
#chargement des modules
module load python/2.7.16
module load tensorflow-gpu/py3/2.0.0-beta1
module load pandas
# exécution du code
srun python Main_X.py
###mv *.out ${SLURM_SUBMIT_DIR}/results_gpu/
###mv *.csv ${SLURM_SUBMIT_DIR}/results/
| true
|
65577a6430b307e60506f3a8cb6f4be9e6b9479a
|
Shell
|
lbrookeIS/dotfiles
|
/install-docker.sh
|
UTF-8
| 696
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# !!! requires root !!!
set -e
echo "Installing apt packages..."
apt-get update -y
apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
linux-image-extra-$(uname -r)
echo "Installing docker..."
apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo ubuntu-xenial main" > /etc/apt/sources.list.d/docker.list
apt-get update -y
apt-get purge lxc-docker
apt-cache policy docker-engine
apt-get install -y --no-install-recommends docker-engine
service docker start
docker run --rm hello-world
docker rmi hello-world
usermod -aG docker logan
systemctl enable docker
| true
|
443664a3d68d3c73b5ce9d346ea317c8e50bead8
|
Shell
|
tclem/dotfiles
|
/bin/check
|
UTF-8
| 512
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# domainavailable
# Fast, domain name checker to use from the shell
# Use globs for real fun:
# domainavailable blah{1..3}.{com,net,org,co.uk}
# Inspired by foca / giles:
# http://gilesbowkett.blogspot.com/2009/02/simple-bash-domain-availability.html
for d in $@;
do
if host $d | grep "NXDOMAIN" >&/dev/null; then
if whois $d | grep -E "(No match for|NOT FOUND)" >&/dev/null; then
echo "$d AVAILABLE";
else
echo "$d taken";
fi
else
echo "$d taken";
fi
sleep 0.1;
done
| true
|
52b865d08d27245308aff23544256c0f81f364b4
|
Shell
|
datapressio/mylondon
|
/deploy.sh
|
UTF-8
| 933
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
TMP=/tmp/deploy_mylondon
BASEDIR="."
DEST_HOST=s3-eu-west-1.amazonaws.com
DEST_BUCKET=s3://my.london.gov.uk/
find . -name ".DS_Store" -exec rm -f {} \;
rm -rf $TMP
mkdir -p $TMP
cp $BASEDIR/index.html $TMP
cp $BASEDIR/mylondon.css $TMP
cp $BASEDIR/mylondon.js $TMP
cp -r $BASEDIR/http $TMP
mkdir -p $TMP/bower_components/html5shiv/dist/
mkdir -p $TMP/bower_components/es5-shim
mkdir -p $TMP/bower_components/es5-sham
mkdir -p $TMP/bower_components/console-polyfill
cp bower_components/html5shiv/dist/html5shiv.min.js $TMP/bower_components/html5shiv/dist/
cp bower_components/es5-shim/es5-shim.min.js $TMP/bower_components/es5-shim/
cp bower_components/es5-shim/es5-sham.min.js $TMP/bower_components/es5-sham/
cp bower_components/console-polyfill/index.js $TMP/bower_components/console-polyfill/
find $TMP -name "*.psd" -exec rm -f {} \;
boto-rsync \
--endpoint "$DEST_HOST" \
--delete \
$TMP \
$DEST_BUCKET
| true
|
8296f695a72ec6c562adc048765b5292f2ad88a0
|
Shell
|
devmittal/Concurrent-Programming
|
/lab0/test_random
|
UTF-8
| 323
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
get_seeded_random()
{
seed="$1"
openssl enc -aes-256-ctr -pass pass:"$seed" -nosalt \
</dev/zero 2>/dev/null
}
shuf -i1-5314 --random-source=<(get_seeded_random 5) > case3.txt
sort -n case3.txt > soln3.txt
./mysort case3.txt -o your3.txt
cmp --silent your3.txt soln3.txt && echo "Pass" || echo "Fail"
| true
|
972a98688d934e126d8680dc588f1b0747c5e116
|
Shell
|
fanmuzhi/chameleon
|
/patch/shasta-bootldr-SRAMtest-1.00.0001/build - original.sh
|
UTF-8
| 735
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
set -ex
unset TZ TERM LANG
pwd
date
# Remote repository for release:
GITROOT=ssh://gitms@git.synaptics.com/git/biometrics
RELTAG=fw/nassau_patch_bootldr_flasherase/shasta-1.01.0006
REPS="b1216/common b1216/tools fw/include fw/nassau/patch/bootldr fw/nassau/patch/common fw/nassau/common include tools"
rm -rf $REPS
for d in $REPS ; do
mkdir -p `dirname $d`
# using the .git suffix here for remote branch
(cd `dirname $d` ; git --no-pager clone -q $GITROOT/${d}.git)&
done
wait
for d in $REPS ; do
(cd $d ; git --no-pager checkout -q $RELTAG)
done
find . -depth -type d -name .git -exec rm -rf {} \;
find . -depth -type d -exec rmdir {} \; 2>/dev/null
cd fw/nassau/patch/bootldr/flasherase
make envid
time make shasta
date
| true
|
4c3f4cc3061745b4f1ac2e8548a8c9ceaa42ec24
|
Shell
|
runtime-verification/benchmark-challenge-2018
|
/Open/FOStreams/run.sh
|
UTF-8
| 410
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
BASE_DIR=$(dirname "${BASH_SOURCE[0]}")
cmd=$1
shift
case "$cmd" in
generator|Generator ) $BASE_DIR/generator.sh "$@";;
replayer|Replayer ) $BASE_DIR/replayer.sh "$@";;
oracle|Oracle ) $BASE_DIR/generator.sh "$@" -osig ./tmp.sig -oformula ./tmp.mfotl; monpoly -sig ./tmp.sig -formula ./tmp.mfotl -negate;;
* ) echo "Invalid command. Try 'generator', 'replayer' or 'oracle'";;
esac
| true
|
1bf8c79a44140326de37c7158aefc446c1c9f261
|
Shell
|
brandocorp-omnibus/omnibus-sickbeard
|
/package-scripts/sickbeard/postinst
|
UTF-8
| 304
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Perform necessary sickbeard setup steps
# after package is installed.
#
PROGNAME=$(basename $0)
addgroup --system sickbeard
adduser --system --home /opt/sickbeard --no-create-home --group sickbeard
chown -R sickbeard: /opt/sickbeard
echo "Thank you for installing sickbeard!"
exit 0
| true
|
e6a4ca6d3d888dce6cfddcd7ad0c3cf29207dfb6
|
Shell
|
alin-corodescu/Operating-Systems
|
/lab5/ex1.sh
|
UTF-8
| 317
| 3.296875
| 3
|
[] |
no_license
|
#/bin/bash
ascuns=0
alias=0
subdir=0
for item in `find $1 -mindepth 1`
do
if test -d $item
then
let subdir+=1
fi
if test -h $item
then
let alias+=1
fi
name=`basename $item`
if [[ $name =~ ^\..* ]]
then
let ascuns+=1
fi
done
echo "Subfoldere= $subdir"
echo "Linkuri = $alias"
echo "Ascuns = $ascuns"
| true
|
703785c23077e270f8a9e335832636e881675ad8
|
Shell
|
niklaskorz/.dotfiles
|
/xinitrc
|
UTF-8
| 1,129
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
userresources=$HOME/.Xresources
usermodmap=$HOME/.Xmodmap
sysresources=/etc/X11/xinit/.Xresources
sysmodmap=/etc/X11/xinit/.Xmodmap
# merge in defaults and keymaps
if [ -f $sysresources ]; then
xrdb -merge $sysresources
fi
if [ -f $sysmodmap ]; then
xmodmap $sysmodmap
fi
if [ -f "$userresources" ]; then
xrdb -merge "$userresources"
fi
if [ -f "$usermodmap" ]; then
xmodmap "$usermodmap"
fi
# German keyboard with capslock mapped to ESC
setxkbmap de -option caps:escape
xmodmap -e "clear Lock"
xmodmap -e "keycode 0x42=Escape"
# Configure multiple monitors
if [ `xrandr | grep -c ' connected '` -eq 2 ]; then
xrandr --output DP-0 --auto --primary --output HDMI-0 --auto --right-of DP-0
fi
# start some nice programs
if [ -d /etc/X11/xinit/xinitrc.d ] ; then
for f in /etc/X11/xinit/xinitrc.d/?*.sh ; do
[ -x "$f" ] && . "$f"
done
unset f
fi
#sh $HOME/.fehbg &
#sh $HOME/bin/wallpaper &
feh --quiet --recursive --randomize --bg-fill --slideshow-delay 900 $HOME/Media/archpapers
#compton --config $HOME/.compton.conf -b
#nitogren --restore &
redshift -l 49.584966:8.1811209 &
exec i3
| true
|
4712a861645872bd7d888b43d1a8a5f80393d101
|
Shell
|
lukas-vlcek/elasticsearch-prometheus-exporter-branches-switch
|
/branch-out-139.sh
|
UTF-8
| 5,696
| 4.125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#set -euxo pipefail
function usage() {
cat <<EOF
Usage: $0
Example: $0 1> branching.log 2> error.log
This script converts elasticsearch-prometheus-exporter repository into
branches as discussed in https://github.com/vvanholl/elasticsearch-prometheus-exporter/issues/139
In the beginning it will make a fresh clone of "Elasticsearch" and "ES Prometheus plugin" repositories
into local folders, alternatively, you can set the following variables:
DRY_RUN - if true then no changes are made to local plugin repo clone (defaults to true).
PUSH_CHANGES_BACK - if false then no branches are pushed to plugin origin repo (defaults to false).
ESPP_REPO_URL - defaults to https://github.com/vvanholl/elasticsearch-prometheus-exporter.git
ESPP_CLONE_PATH - local path where the Elasticsearch Prometheus Plugin repo is cloned into
(defaults to ./elasticsearch-prometheus-exporter).
Any existing folder at this path is deleted first when this script starts.
SKIP_ESPP_CLONE - skip cloning ES Prometheus plugin source code. Assuming local copy is used (defaults to false).
This is useful to locally debug the code.
ES_REPO_URL - defaults to https://github.com/elastic/elasticsearch.git
ES_CLONE_PATH - local path where the Elasticsearch repo is cloned into (defaults to ./elasticsearch).
Any existing folder at this path is deleted first when this script starts.
SKIP_ES_CLONE - skip cloning Elasticsearch code. Assuming local copy is used (defaults to false).
This is useful to locally debug the code.
EOF
}
#echo Versions used
#git --version #TODO: check we have git version >= 2.0, https://stackoverflow.com/a/14273595
#grep --version
SCRIPT_HOME=`pwd`
DRY_RUN=${DRY_RUN:-true}
PUSH_CHANGES_BACK=${PUSH_CHANGES_BACK:-false}
ESPP_REPO_NAME=elasticsearch-prometheus-exporter
ESPP_REPO_URL=${ESPP_REPO_URL:-https://github.com/vvanholl/${ESPP_REPO_NAME}.git}
SKIP_ESPP_CLONE=${SKIP_ESPP_CLONE:-false}
ESPP_CLONE_PATH=${ESPP_CLONE_PATH:-$SCRIPT_HOME/$ESPP_REPO_NAME}
ES_REPO_NAME=elasticsearch
ES_REPO_URL=${ES_REPO_URL:-https://github.com/elastic/${ES_REPO_NAME}.git}
SKIP_ES_CLONE=${SKIP_ES_CLONE:-false}
ES_CLONE_PATH=${ES_CLONE_PATH:-$SCRIPT_HOME/$ES_REPO_NAME}
case "${1:-}" in
--h*|-h*) usage ; exit 1 ;;
esac
# grep options use different syntax depending on host type
# https://ponderthebits.com/2017/01/know-your-tools-linux-gnu-vs-mac-bsd-command-line-utilities-grep-strings-sed-and-find/
function bsd_or_gnu_grep_switch() {
local switch="dunno"
# BSD or GNU?
if date -v 1d > /dev/null 2>&1; then
#BSD
switch='-Eo'
else
# GNU
switch='-Po'
fi
echo ${switch}
}
function clone_repo() {
local repo_url="$1"
local repo_path="$2"
shift; shift
local args=( "${@:-}" )
if [[ -d ${repo_path} ]] ; then
rm -rf ${repo_path}
fi
git clone ${repo_url} ${repo_path}
}
# argument is major ES version number (like "2")
function list_es_releases() {
local es_major_ver="$1"
shift
local args=( "${@:-}" )
pushd ${ES_CLONE_PATH} > /dev/null
# pull all git tags for given major version; skipping alpha, beta, rc, ...
local -a array=($(git tag --sort=v:refname 2>/dev/null | grep $(bsd_or_gnu_grep_switch) "^v${es_major_ver}\.\d+\.\d+$"))
popd > /dev/null
# get rid of the "v" prefix
for ix in ${!array[*]} ; do echo "${array[$ix]}" | cut -c 2-20 ; done
}
# argument is ES release version number (like "2.4.2")
function list_plugin_releases() {
local es_release_ver="$1"
shift
local args=( "${@:-}" )
pushd ${ESPP_CLONE_PATH} > /dev/null
# git tag --sort=v:refname 2>/dev/null | grep $(bsd_or_gnu_grep_switch) "^${es_major_ver}\.\d+\.\d+\.\d+$"
git tag --sort=v:refname 2>/dev/null | grep $(bsd_or_gnu_grep_switch) "^${es_release_ver}\.\d+$"
popd > /dev/null
}
if [[ "false" == "${SKIP_ESPP_CLONE}" ]] ; then
clone_repo ${ESPP_REPO_URL} ${ESPP_CLONE_PATH}
fi
if [[ "false" == "${SKIP_ES_CLONE}" ]] ; then
clone_repo ${ES_REPO_URL} ${ES_CLONE_PATH}
fi
# Which major ES releases we are going to process
declare -a es_major_versions=("2" "5" "6")
for es_major_ver in "${es_major_versions[@]}"
do
echo "Processing Elasticsearch releases for v${es_major_ver}.x:"
releases=$(list_es_releases ${es_major_ver})
for es_release in ${releases}
do
# Print all relevant release tags of ES Prometheus plugin
echo " - ES v${es_release}"
release_branches=($(list_plugin_releases ${es_release}))
rb_Len=${#release_branches[@]}
if [[ $rb_Len = 0 ]] ; then
(>&2 echo " - No plugin releases found for ES ${es_release}; you might want to fix this manually")
else
echo " - Create and populate new branch ${es_release} to include tags:"
for new_branch in "${release_branches[@]}"
do
echo " - ${new_branch}"
done
commands="git checkout ${release_branches[${#release_branches[@]}-1]}"
commands="${commands}; git checkout -b ${es_release}"
# if [[ "true" == "${PUSH_CHANGES_BACK}" ]] ; then
# commands="${commands}; git push origin ${es_release}"
# fi
commands="${commands}; git checkout master"
echo " \$ ${commands}"
if [[ "false" == "${DRY_RUN}" ]] ; then
pushd ${ESPP_CLONE_PATH} > /dev/null
eval ${commands}
popd > /dev/null
fi
fi
done
done
if [[ "true" == "${PUSH_CHANGES_BACK}" ]] ; then
commands="git push origin --all"
echo "\$ ${commands}"
if [[ "false" == "${DRY_RUN}" ]] ; then
pushd ${ESPP_CLONE_PATH} > /dev/null
eval ${commands}
popd > /dev/null
fi
fi
| true
|
55efd5952f35db302f501dedbc02b5c8b9a5b572
|
Shell
|
windhw/another-django-based-blog
|
/rhsite/restart.sh
|
UTF-8
| 401
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# Replace these three settings.
PROJDIR="/home/haowei/rhcloud/a/wsgi/rhsite/"
PIDFILE="$PROJDIR/mysite.pid"
SOCKET="$PROJDIR/mysite.sock"
pids=$(ps aux | grep python | grep manage.py | grep -v grep | awk '{print $2}')
for pid in ${pids};do
kill ${pid}
done
cd $PROJDIR
if [ -f $PIDFILE ]; then
rm -f -- $PIDFILE
fi
python manage.py runfcgi umask=0002 socket=$SOCKET pidfile=$PIDFILE
| true
|
5edd2bfb722be17a5da84872c48c8c0592abf88a
|
Shell
|
hkmshb/dotfiles
|
/bin/ssh-ip
|
UTF-8
| 206
| 2.59375
| 3
|
[] |
permissive
|
#!/bin/sh
#
# Usage: ssh-ip
#
# Display ip address for vagrant box described by a vagrantfile
# NOTE: (current directory should have a vagrantfile)
vagrant ssh -c "hostname -I | cut -d' ' -f2" 2>/dev/null
| true
|
5b7ee786919d14b5b9c65e187824dd42fdfc1cf3
|
Shell
|
cpackham/novaprova
|
/build/vagrant/centos.sh
|
UTF-8
| 1,106
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Setup C development environment and prereqs
domainname localdomain
# Setup EPEL
repo=/etc/yum.repos.d/epel.repo
if [ ! -f $repo ] ; then
cat <<EOM >/etc/yum.repos.d/epel-bootstrap.repo
[epel]
name=Bootstrap EPEL
mirrorlist=http://mirrors.fedoraproject.org/mirrorlist?repo=epel-\$releasever&arch=\$basearch
failovermethod=priority
enabled=0
gpgcheck=0
EOM
yum --enablerepo=epel -y install epel-release || exit 1
rm -f /etc/yum.repos.d/epel-bootstrap.repo
if [ ! -f $repo ] ; then
echo "Failed to bootstrap EPEL: no such file $repo"
exit 1
fi
# Horrible hack no.1 to work around a broken mirror
(
grep -v mirrors.fedoraproject.org /etc/hosts
echo '66.135.62.187 mirrors.fedoraproject.org'
) >> /etc/hosts.NEW && mv -f /etc/hosts.NEW /etc/hosts
# Horrible hack no.2 to work around a broken mirror
sed -e 's|https://|http://|g' < $repo > $repo.NEW && mv -f $repo.NEW $repo
fi
yum -y groupinstall 'Development Tools'
yum -y install \
git valgrind-devel binutils-devel libxml2-devel \
doxygen perl-XML-LibXML strace python-pip
pip install breathe Sphinx
| true
|
ab16f144bd527270d2d51bdf4a5dd9552339a645
|
Shell
|
atztao/sos_flow
|
/examples/synthetic_1/run_a_only.sh
|
UTF-8
| 1,372
| 3.234375
| 3
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/bash -e
export SOS_ROOT=$HOME/src/sos_flow
export SOS_CMD_PORT=22500
cwd=`pwd`
working=/tmp/sos_flow_working
mkdir -p ${working}
cd ${working}
if [ ! -f adios_config.xml ] ; then
ln -s ${cwd}/adios_config.xml .
fi
if [ ! -f tau.conf ] ; then
ln -s ${cwd}/tau.conf .
fi
# cleanup
rm -rf new1.ppm *.bp *.trc *.edf *.slog2 *info.txt *ready.txt *.db *.log *.lock
# start the SOS daemon
if [ -z $1 ]; then echo " >>> BATCH MODE!"; fi;
if [ -z $1 ]; then echo " >>> Starting the sosd daemons..."; fi;
${SOS_ROOT}/src/mpi.cleanall
if [ -z $1 ]; then echo " >>> Launching the sosd daemons..."; fi;
daemon0="-np 1 ${SOS_ROOT}/bin/sosd --role SOS_ROLE_DAEMON --port 22500 --buffer_len 8388608 --listen_backlog 10 --work_dir ${working}"
daemon1="-np 1 ${SOS_ROOT}/bin/sosd --role SOS_ROLE_DB --port 22503 --buffer_len 8388608 --listen_backlog 10 --work_dir ${working}"
echo ${daemon0}
echo ${daemon1}
mpirun ${daemon0} : ${daemon1} &
sleep 1
# launch our workflow
i=20
A="-np 2 ${SOS_ROOT}/bin/synthetic_worker_a ${i} 0"
#mpirun ${A}
mpirun -np 1 gdb --args ${SOS_ROOT}/bin/synthetic_worker_a ${i} 0
sleep 1
# post-process TAU files
files=(tautrace.*.trc)
if [ -e "${files[0]}" ] ; then
tau_treemerge.pl
tau2slog2 tau.trc tau.edf -o tau.slog2
rm *.trc *.edf
fi
# shut down the daemon. DAEMON GET OUT!
${SOS_ROOT}/bin/sosd_stop
sleep 1
| true
|
3eb77646fabc73d828deacabf11723c33d74da6a
|
Shell
|
elentok/dotfiles
|
/extra/scripts/git-fix-author
|
UTF-8
| 828
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Usage: git-fix-author <name> <new-email> <old-email>
#
# More details:
#
# - https://stackoverflow.com/questions/58263216/how-to-change-commit-author-for-multiple-commits-using-filter-branch
# - https://git-scm.com/docs/gitmailmap#_examples
set -euo pipefail
if [ $# -lt 2 ]; then
usage "$0"
exit 1
fi
name="$1"
new_author_email="$2"
old_author_email="$3"
dotf-pip git-filter-repo
rm -rf "$TMP/mailmap"
echo "$name <$new_author_email> <$old_author_email>" > "$TMP/mailmap"
git-filter-repo --mailmap "$TMP/mailmap"
# git filter-branch --env-filter "
# if [ \"\$GIT_AUTHOR_EMAIL\" = \"$old_author_email\" ]; then
# GIT_AUTHOR_EMAIL=$new_author_email
# fi
# if [ \"\$GIT_COMMITTER_EMAIL\" = \"$old_author_email\" ]; then
# GIT_COMMITTER_EMAIL=$new_author_email
# fi
# " -- --all
| true
|
866aee3349a4a7858787aa07cb1ed0417dffa0eb
|
Shell
|
alphonsetai/Ant-Media-Server
|
/src/main/server/create_app.sh
|
UTF-8
| 858
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
APP_NAME=$1
AMS_DIR=$2
APP_NAME_LOWER=$(echo $APP_NAME | sed 's/./\L&/g')
APP_DIR=$AMS_DIR/webapps/$APP_NAME
RED5_PROPERTIES_FILE=$APP_DIR/WEB-INF/red5-web.properties
WEB_XML_FILE=$APP_DIR/WEB-INF/web.xml
mkdir $APP_DIR
cp $AMS_DIR/StreamApp*.war $APP_DIR
cd $APP_DIR
jar -xf StreamApp*.war
rm StreamApp*.war
sed -i 's^webapp.dbName=.*^webapp.dbName='$APP_NAME_LOWER'.db^' $RED5_PROPERTIES_FILE
sed -i 's^webapp.contextPath=.*^webapp.contextPath=/'$APP_NAME'^' $RED5_PROPERTIES_FILE
sed -i 's^db.app.name=.*^db.app.name='$APP_NAME'^' $RED5_PROPERTIES_FILE
sed -i 's^db.name=.*^db.name='$APP_NAME_LOWER'^' $RED5_PROPERTIES_FILE
sed -i 's^<display-name>StreamApp^<display-name>'$APP_NAME'^' $WEB_XML_FILE
sed -i 's^<param-value>/StreamApp^<param-value>/'$APP_NAME'^' $WEB_XML_FILE
jar -cvf $APP_NAME.war *
cd ..
cp $APP_NAME/$APP_NAME.war .
rm -r $APP_DIR
| true
|
c9d263e93d5b965703ba468e514dc061f7c01712
|
Shell
|
HL7-DaVinci/prior-auth
|
/dockerRunnerDev.sh
|
UTF-8
| 1,110
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Handle closing application on signal interrupt (ctrl + c)
trap 'kill $CONTINUOUS_BUILD_PID $SERVER_PID; gradle --stop; exit' INT
export TOKEN_BASE_URI=http://localhost:9015
mkdir logs
# Reset log file content for new application boot
echo "*** Logs for 'gradle installBootDist --continuous' ***" > ./logs/builder.log
echo "*** Logs for 'gradle bootRun' ***" > ./logs/runner.log
# Print that the application is starting in watch mode
echo "starting application in watch mode..."
# Start the continious build listener process
echo "starting continuous build listener..."
gradle installBootDist --continuous 2>&1 | tee ./logs/builder.log & CONTINUOUS_BUILD_PID=$!
# Start server process once initial build finishes
( while ! grep -m1 'BUILD SUCCESSFUL' < ./logs/builder.log; do
sleep 1
done
echo "starting application server in debug mode..."
gradle bootRun -Pdebug --args='debug' 2>&1 | tee ./logs/runner.log ) & SERVER_PID=$!
# Handle application background process exiting
wait $CONTINUOUS_BUILD_PID $SERVER_PID
EXIT_CODE=$?
echo "application exited with exit code $EXIT_CODE..."
| true
|
68ac2690d565559fd141c15ce3028bc03b9166d4
|
Shell
|
bireme/fi-admin-migration
|
/1_trata_insumo/scripts/file_out.sh
|
UTF-8
| 8,053
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
# -------------------------------------------------------------------------- #
# index_master.sh - Realiza conversao de registro LILACS para versao 1.7 #
# -------------------------------------------------------------------------- #
#
# Chamada : index_master
# Exemplo : ./index_master.sh <FI>
#
# -------------------------------------------------------------------------- #
# Centro Latino-Americano e do Caribe de Informação em Ciências da Saúde
# é um centro especialidado da Organização Pan-Americana da Saúde,
# escritório regional da Organização Mundial da Saúde
# BIREME / OPS / OMS (P)2016
# -------------------------------------------------------------------------- #
# Historico
# versao data, Responsavel
# - Descricao
cat > /dev/null <<HISTORICO
vrs: 1.00 20170103, Ana Katia Camilo / Fabio Luis de Brito
- Edicao original
HISTORICO
# -------------------------------------------------------------------------- #
# Anota hora de inicio de processamento
export HORA_INICIO=`date '+ %s'`
export HI="`date '+%Y.%m.%d %H:%M:%S'`"
echo "[TIME-STAMP] `date '+%Y.%m.%d %H:%M:%S'` [:INI:] Processa ${0} ${1} ${2} ${3} ${4}"
echo ""
# ------------------------------------------------------------------------- #
# Ajustando variaveis para processamento
source /bases/fiadmin2/1_trata_insumo/tpl/settings.inc
# -------------------------------------------------------------------------- #
echo "- Verificacoes iniciais"
# Verifica passagem obrigatoria de 1 parametro
if [ "$#" -ne "1" ]; then
echo "ERROR: Parametro errado"
echo "Use: $0 <FI - que deve ser o nome do arquivo iso> "
echo "Exemplo: $0 bbo "
exit 0
fi
# -------------------------------------------------------------------------- #
echo "Inicio do processamento"
echo "Vai para o diretorio de trabalho"
cd $DIRWORK/$1
#for i in `ls -d cpo*.iso |cut -c1-6`
#do
# echo "Arquivo iso $i"
# $DIRISIS/mx $i "fst=2 0 v2/" fullinv=$i
#done
echo "cpo001"
$DIRISIS/mx wrk_OK "proc='d1d901d902',if p(v902) then '<1 0>'v902'</1>' else if p(v901) then '<1 0>'v901'</1>' else |<1 0>|v1|</1>| fi,fi" create=lil_001 -all now
echo "cpo004"
$DIRISIS/mx lil_001 "proc='d4d904',if p(v904) then (|<4 0>|v904|</4>|) fi" create=lil_002 -all now
echo "cpo005"
$DIRISIS/mx lil_002 "proc=if a(v905) then 'd*' else 'd5d905','<5 0>'v905'</5>' fi" create=lil_003 -all now
echo "cpo006"
$DIRISIS/mx lil_003 "proc=if a(v906) then 'd*' else 'd6d906','<6 0>'v906'</6>' fi" create=lil_004 -all now
echo "cpo008"
$DIRISIS/mx lil_004 "proc='d8d908',if p(v908) then (|<8 0>|v908|</8>|) fi" create=lil_005 -all now
echo "cpo009"
$DIRISIS/mx lil_005 "proc='d9d909',if p(v909) then '<9 0>'v909'</9>' fi" create=lil_006 -all now
echo "cpo010"
$DIRISIS/mx lil_006 "proc='d10d910',if p(v910) then (|<10 0>|v910|</10>|) fi" create=lil_007 -all now
echo "cpo012"
$DIRISIS/mx lil_007 "proc='d12d912',if p(v912) then (|<12 0>|v912|</12>|) fi" create=lil_008 -all now
echo "cpo014"
$DIRISIS/mx lil_008 "proc='d14d914',if p(v914) then '<14 0>'v914'</14>' else if p(v14) then |<14 0>|v14|</14>| fi,fi" create=lil_009 -all now
echo "cpo016"
$DIRISIS/mx lil_009 "proc='d16d916',if p(v916) then (|<16 0>|v916|</16>|) else if p(v16) then |<16 0>|v16|</16>| fi,fi" create=lil_010 -all now
echo "cpo018"
$DIRISIS/mx lil_010 "proc='d18d918',if p(v918) then (|<18 0>|v918|</18>|) else if p(v18) then |<18 0>|v18|</18>| fi,fi" create=lil_011 -all now
echo "cpo023"
$DIRISIS/mx lil_011 "proc='d23d923',if p(v923) then (|<23 0>|v923|</23>|) else if p(v23) then |<23 0>|v23|</23>| fi,fi" create=lil_012 -all now
echo "cpo025"
$DIRISIS/mx lil_012 "proc='d25d925',if p(v925) then (|<25 0>|v925|</25>|) else if p(v25) then |<25 0>|v25|</25>| fi,fi" create=lil_013 -all now
echo "cpo030"
$DIRISIS/mx lil_013 "proc='d30d930d945',if p(v930) then (|<30 0>|v930|</30>|) else if p(v30) then |<30 0>|v30|</30>| fi,fi" create=lil_014 -all now
echo "cpo035"
$DIRISIS/mx lil_014 "proc='d35d935',if p(v935) then (|<35 0>|v935|</35>|) else if p(v35) then |<35 0>|v35|</35>| fi,fi" create=lil_015 -all now
echo "cpo038"
$DIRISIS/mx lil_015 "proc='d38d938',if p(v938) then (|<38 0>|v938|</38>|) fi" create=lil_016 -all now
echo "cpo040"
$DIRISIS/mx lil_016 "proc='d40d940',if p(v940) then (|<40 0>|v940|</40>|) else if p(v40) then |<40 0>|v40|</40>| fi,fi" create=lil_017 -all now
echo "cpo057"
$DIRISIS/mx lil_017 "proc='d57d957',if p(v957) then (|<57 0>|v957|</57>|) else if p(v57) then |<57 0>|v57|</57>| fi,fi" create=lil_018 -all now
echo "cpo065"
$DIRISIS/mx lil_018 "proc='d65d965',if p(v965) then (|<65 0>|v965|</65>|) fi" create=lil_019 -all now
echo "cpo067"
$DIRISIS/mx lil_019 "proc='d67d967',if p(v967) then (|<67 0>|v967|</67>|) else if p(v67) then |<67 0>|v67|</67>| fi,fi" create=lil_020 -all now
echo "cpo071"
$DIRISIS/mx lil_020 "proc='d71d971',if p(v971) then (|<71 0>|v971|</71>|) fi" create=lil_021 -all now
echo "cpo076"
$DIRISIS/mx lil_021 "proc='d76d976',if p(v976) then (|<76 0>|v976|</76>|) fi" create=lil_022 -all now
echo "cpo083"
$DIRISIS/mx lil_022 "proc='d83d983',if p(v983) then (|<83 0>|v983|</83>|) else if p(v83) then |<83 0>|v83|</83>| fi,fi" create=lil_023 -all now
echo "cpo087"
$DIRISIS/mx lil_023 "proc='d87d987',if p(v987) then (|<87 0>|v987|</87>|) fi" create=lil_024 -all now
echo "cpo088"
$DIRISIS/mx lil_024 "proc='d88d988',if p(v988) then (|<88 0>|v988|</88>|) fi" create=lil_025 -all now
echo "cpo091"
$DIRISIS/mx lil_025 "proc='d91d991',if p(v991) then (|<91 0>|v991|</91>|) else if p(v91) then |<91 0>|v91|</91>| fi,fi" create=lil_026 -all now
echo "cpo110"
$DIRISIS/mx lil_026 "proc='d110d980',if p(v980) then (|<110 0>|v980|</110>|) fi" create=lil_027 -all now
echo "cpo111"
$DIRISIS/mx lil_027 "proc='d111d981',if p(v981) then (|<111 0>|v981|</111>|) fi" create=lil_028 -all now
echo "cpo112"
$DIRISIS/mx lil_028 "proc='d112d972',if p(v972) then (|<112 0>|v972|</112>|) fi" create=lil_029 -all now
echo "cpo113"
$DIRISIS/mx lil_029 "proc='d113d973',if p(v973) then (|<113 0>|v973|</113>|) fi" create=lil_030 -all now
echo "cpo114"
$DIRISIS/mx lil_030 "proc='d114d974',if p(v974) then (|<114 0>|v974|</114>|) fi" create=lil_031 -all now
echo "cpo115"
$DIRISIS/mx lil_031 "proc='d115d975',if p(v975) then (|<115 0>|v975|</115>|) fi" create=lil_032 -all now
echo "cpo700"
$DIRISIS/mx lil_032 "proc='d700d970',if p(v970) then (|<700 0>|v970|</700>|) fi" create=lil_033 -all now
echo "cpo950"
$DIRISIS/mx lil_033 "proc='d950',if p(v950) then (|<950 0>|v950|</950>|) fi" create=lil_034 -all now
echo "cpo500"
$DIRISIS/mx lil_034 "proc='d995',if p(v995) then (|<500 0>|v950|</500>|) fi" create=lil_035 -all now
echo "cpo505"
$DIRISIS/mx lil_035 "proc='d955',if p(v955) then (|<505 0>|v950|</505>|) fi" create=lil_036 -all now
echo "cpo013"
$DIRISIS/mx lil_036 "proc='d13d913',if p(v913) then (|<13 0>|v913|</13>|) fi" create=lil_037 -all now
echo "cpo019"
$DIRISIS/mx lil_037 "proc='d19d919',if p(v919) then (|<19 0>|v919|</19>|) fi" create=lil_038 -all now
echo "cpo085"
$DIRISIS/mx lil_038 "proc='d85d985',if p(v985) then (|<85 0>|v985|</85>|) fi" create=lil_039 -all now
echo "Final"
$DIRISIS/mx lil_039 "proc='S'" iso=lil_OK.iso -all now
# ---------------------------------------------------------------------------#
echo
echo
echo "Fim de processamento"
echo
HORA_FIM=`date '+ %s'`
DURACAO=`expr ${HORA_FIM} - ${HORA_INICIO}`
HORAS=`expr ${DURACAO} / 60 / 60`
MINUTOS=`expr ${DURACAO} / 60 % 60`
SEGUNDOS=`expr ${DURACAO} % 60`
echo
echo "DURACAO DE PROCESSAMENTO"
echo "-------------------------------------------------------------------------"
echo " - Inicio: ${HI}"
echo " - Termino: `date '+%Y.%m.%d %H:%M:%S'`"
echo
echo " Tempo de execucao: ${DURACAO} [s]"
echo " Ou ${HORAS}h ${MINUTOS}m ${SEGUNDOS}s"
echo
# ------------------------------------------------------------------------- #
echo "[TIME-STAMP] `date '+%Y.%m.%d %H:%M:%S'` [:FIM:] Processa ${0} ${1} ${2} ${3} ${4}"
# ------------------------------------------------------------------------- #
| true
|
f4aea882e1cf57fe26e037a7ebf1257e5be31665
|
Shell
|
depositolegale/ojs-archive
|
/archive.sh
|
UTF-8
| 335
| 2.65625
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
if [ -z "$1" ]
then
echo "USAGE: archive.sh {seed_file}"
exit
fi
jobname=$(basename "$1" .seeds)
wget-lua --user-agent='Wget/1.14.lua.20130523-9a5c - http://www.depositolegale.it' \
--lua-script=ojs.lua \
--input-file=$1 \
--warc-file=data/warc/$jobname \
--page-requisites \
--output-file=logs/$jobname.log
| true
|
f382a16aa1570b02b062788df3143c0e27819f76
|
Shell
|
BipoaroXigen/dotfiles-sway
|
/scripts/dckd.sh
|
UTF-8
| 169
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $1 == '--clean' ]]
then
docker stop $(docker ps -a -q)
docker image rm -f $(docker image ls -q)
docker volume rm -f $(docker volume ls -q)
fi
| true
|
a55b6ed5e2326cff04ff7f1cbd638e069c990670
|
Shell
|
git-for-windows/MSYS2-packages
|
/tree/PKGBUILD
|
UTF-8
| 843
| 2.515625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Alexey Pavlov <Alexpux@gmail.com>
pkgname=tree
pkgver=1.7.0
pkgrel=1
pkgdesc="A directory listing program displaying a depth indented list of files"
arch=('i686' 'x86_64')
url="http://mama.indstate.edu/users/ice/tree/"
license=('GPL')
depends=('msys2-runtime')
source=(http://mama.indstate.edu/users/ice/${pkgname}/src/${pkgname}-${pkgver}.tgz
'Makefile-Uncomment-for-Cygwin.patch')
sha256sums=('6957c20e82561ac4231638996e74f4cfa4e6faabc5a2f511f0b4e3940e8f7b12'
'4e3b7f5448aa1d80ad7c449a1294b0a54836446ef1c5e76e83e78aba12526eb3')
prepare() {
cd ${srcdir}/${pkgname}-${pkgver}
patch -p1 -i ${srcdir}/Makefile-Uncomment-for-Cygwin.patch
}
build() {
cd ${srcdir}/${pkgname}-${pkgver}
make
}
package() {
cd ${srcdir}/${pkgname}-${pkgver}
make prefix="${pkgdir}/usr" MANDIR="${pkgdir}/usr/share/man/man1" install
}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.