blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
54b3b636929a456f2f047e76617699767c084c76 | Shell | lachok/heroku-buildpack-aspnet-vnext-dnx451 | /bin/compile | UTF-8 | 4,428 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# fail fast
set -e
# bin/compile <build-dir> <cache-dir>
build_dir=$1
cache_dir=$2
# Expected steps in k.build file
## 1. Install Mono >= 3.4.1 (3.8.0 recommended)
## 2. Install KVM
## 3. Install KRE
## 4. Prepare Procfile
## Ex: {
## echo "web: k web" > $build_dir/Procfile
## }
# kbuild sample usage ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
# -----------------------------------------------------------
# 1. Install Mono >= 3.4.1 (3.8.0 recommended)
########## https://github.com/friism/heroku-buildpack-mono/blob/master/bin/compile
DEPENDENCY_S3_BUCKET_PATH="http://heroku-mono-vnext-build.s3-eu-west-1.amazonaws.com"
MONO_VERSION="8de00a1"
STACK="cedar-14"
arrow() {
while read line; do
echo "-----> ${line}"
done
}
indent() {
while read line; do
echo " ${line}"
done
}
function conditional_download() {
DOWNLOAD_URL="$1"
DESTINATION="$2"
if [ ! -d ${DESTINATION} ]; then
rm -rf ${DESTINATION}
mkdir -p ${DESTINATION}
echo "Downloading ${DOWNLOAD_URL}" | arrow
curl ${DOWNLOAD_URL} -s | tar xz -C ${DESTINATION}
fi
}
LP_DIR=`cd $(dirname $0); cd ..; pwd`
echo "LP_DIR:" | arrow
echo "$LP_DIR" | indent
echo "cache_dir:" | arrow
echo "${cache_dir}" | indent
echo "build_dir:" | arrow
echo "${build_dir}" | indent
cd ${build_dir}
mkdir -p ${cache_dir}
MONO_CACHE_LOCATION=${cache_dir}/${STACK}/mono-${MONO_VERSION}
conditional_download ${DEPENDENCY_S3_BUCKET_PATH}/${STACK}/mono-${MONO_VERSION}.tar.gz ${MONO_CACHE_LOCATION}
PACKAGE_DIR="/app/.packages"
# Copy mono to /app/.packages because mono expects to run out of /app/.packages
mkdir -p "${PACKAGE_DIR}"
cp -r "${MONO_CACHE_LOCATION}/mono" "${PACKAGE_DIR}/."
# Copy mono to build dir
mkdir -p "${build_dir}/.packages"
cp -r "${MONO_CACHE_LOCATION}/mono" "${build_dir}/.packages/."
#export PATH="/app/.packages/mono/bin:${PATH}"
export PATH="${PACKAGE_DIR}/mono/bin:${PATH}"
echo "MONO VERSION:" | arrow
mono --version | indent
echo "Updating SSL certificates..." | arrow
mozroots --import --sync --quiet | indent
yes | certmgr -ssl "https://www.myget.org" | indent
yes | certmgr -ssl "https://www.nuget.org" | indent
# 2. Install DNVM
#Create ~/.bash_profile to suppress warnings
echo "Creating ~/.bash_profile" | arrow
touch ~/.bash_profile
echo "Installing DNVM..." | arrow
curl -sSL https://raw.githubusercontent.com/aspnet/Home/dev/dnvminstall.sh | DNX_BRANCH=dev sh
source ~/.dnx/dnvm/dnvm.sh
# 3. Install DNX
echo "Installing DNX..." | arrow
dnvm upgrade | indent
# 3.1 Restore project packages...
NUGET_CONFIG="${build_dir}/NuGet.Config"
echo "Checking for NuGet.Config" | arrow
if [ ! -f $NUGET_CONFIG ]; then
echo "${NUGET_CONFIG} not found, creating..." | indent
mkdir -p ~/.config/NuGet
touch $NUGET_CONFIG
echo -e "<?xml version=\"1.0\" encoding=\"utf-8\"?>" > "${NUGET_CONFIG}"
echo -e "<configuration>" >> "${NUGET_CONFIG}"
echo -e " <packageSources>" >> "${NUGET_CONFIG}"
echo -e " <add key=\"AspNetVNext\" value=\"https://www.myget.org/F/aspnetvnext/api/v2/\" />" >> "${NUGET_CONFIG}"
echo -e " <add key=\"nuget.org\" value=\"https://www.nuget.org/api/v2/\" />" >> "${NUGET_CONFIG}"
echo -e " </packageSources>" >> "${NUGET_CONFIG}"
echo -e " <disabledPackageSources />" >> "${NUGET_CONFIG}"
echo -e "</configuration>" >> "${NUGET_CONFIG}"
else
echo "Found "${NUGET_CONFIG}"" | indent
cat "${NUGET_CONFIG}" | indent
fi
source ~/.dnx/dnvm/dnvm.sh
echo "Restore project packages..." | arrow
dnu restore | indent
echo "Copy DNX to build directory..." | indent
echo "cp -r ~/.dnx ${build_dir}/."
cp -r ~/.dnx $build_dir/.
# 3.2 Publish app
echo "Publish app to ${build_dir}/bin/output" | arrow
dnu publish -o "${build_dir}/bin/output" | indent
app_dir_name=`ls ${build_dir}/bin/output/approot/src/`
echo "app_dir_name is ${app_dir_name}" | arrow
# 4. Prepare Procfile
echo "Creating Procfile..." | arrow
echo "${build_dir}/Procfile" | indent
echo "web: source ~/.dnx/dnvm/dnvm.sh;dnx /app/bin/output/approot/src/${app_dir_name} run" > $build_dir/Procfile
echo "Procfile CAT..." | arrow
cat $build_dir/Procfile | indent
# default Procfile
#if [ ! -r $build_dir/Procfile ]; then
# echo " No Procfile; using \"web: sh k web --server.urls http://0.0.0.0:80\"."
# echo "web: sh k web --server.urls http://0.0.0.0:80" > $build_dir/Procfile
#fi
echo "Build SUCCESS" | arrow | true |
0dfe8aaf383dda2ea77663ecbbce513c8423f5f4 | Shell | sysboy/aoc | /2020/day04/part01.sh | UTF-8 | 442 | 2.875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
cat input.txt | awk '
BEGIN {
words = ""
}
{
if (NF==0) {
print words
words = ""
} else {
words = words " " $0
}
}
END {
print words
}
' | tee tidy.data| awk '
BEGIN {
good = 0
n = split("byr iyr eyr hgt hcl ecl pid", codes)
}
{
invalid = 0
for (i in codes) {
n = index($0,codes[i])
if (n == 0) {
invalid = 1
}
}
if ( invalid == 0 ) {
good = good + 1
}
}
END {
print good,"valid codes"
}
'
| true |
ad4ec5388b81539c36a64d92e882816c45ffa63e | Shell | johnyin123/private_documents | /trap.sh | UTF-8 | 1,602 | 3.65625 | 4 | [] | no_license | # TO BE SOURCED ONLY ONCE:
if [ -z ${__mylib_inc+x} ]; then
__mylib_inc=1
else
return 0
fi
# MAIN CODE:
###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
# Disable unicode.
LC_ALL=C
LANG=C
set -o pipefail # trace ERR through pipes
set -o errtrace # trace ERR through 'time command' and other functions
set -o nounset ## set -u : exit the script if you try to use an uninitialised variable
set -o errexit ## set -e : exit the script if any statement returns a non-true return value
###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
# FUNCTION: EXIT_HANDLER
###~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
function exit_handler ()
{
local error_code="$?"
test $error_code == 0 && return;
_backtrace=$( backtrace 2 )
}
trap exit_handler EXIT # ! ! ! TRAP EXIT ! ! !
trap exit ERR # ! ! ! TRAP ERR ! ! !
# FUNCTION: BACKTRACE
function backtrace
{
local _start_from_=0
local params=( "$@" )
if (( "${#params[@]}" >= "1" ))
then
_start_from_="$1"
fi
local i=0
local first=false
while caller $i > /dev/null
do
if test -n "$_start_from_" && (( "$i" + 1 >= "$_start_from_" ))
then
if test "$first" == false
then
echo "BACKTRACE IS:"
first=true
fi
caller $i
fi
let "i=i+1"
done
}
return 0
| true |
9d02ca338ffb4e39b4d9918ca79b2b6573712d31 | Shell | athos-ribeiro/mt-may | /02-distro/gerencia_nudoku.sh | UTF-8 | 1,609 | 4.25 | 4 | [] | no_license | #!/bin/bash
COMANDO=$1
VERSAO=$2
NUDOKU_DIR=nudoku
TMP_DIR=/tmp
REPO=git@github.com:jubalh/nudoku.git
checar_versao() {
if [[ $VERSAO ]]; then
git tag | grep "^$VERSAO$" > /dev/null
if [ $? -ne 0 ]; then
echo '[Error] Tag não existe, nudoku não instalado.'
exit 1
fi
else
VERSAO=`git tag | sort | tail -n1`
fi
}
construir() {
autoreconf -i > /dev/null 2>&1
./configure > /dev/null 2>&1
make > /dev/null 2>&1
}
checar_dependencias() {
sudo dnf install gettext gettext-devel autoconf automake
}
instale() {
checar_dependencias
if [ -f /usr/local/bin/nudoku ]; then
INSTALADA=`nudoku -v | head -n1`
echo "[INFO] nudoku já instalado: $INSTALADA"
remova
fi
echo "[INFO] instalando nudoku..."
rm -rf $TMP_DIR/$NUDOKU_DIR
pushd $TMP_DIR > /dev/null 2>&1
git clone $REPO > /dev/null 2>&1
cd $NUDOKU_DIR
checar_versao
git checkout $VERSAO > /dev/null 2>&1
construir
sudo cp src/nudoku /usr/local/bin/nudoku
if [ -d man ]; then
MANUAL=`ls man | grep 'nudoku\.[0-9]'`
sudo cp man/$MANUAL /usr/local/share/man/man6/nudoku.6
sudo mandb > /dev/null 2>&1
fi
popd > /dev/null 2>&1
rm -rf $TMP_DIR/$NUDOKU_DIR
echo "[INFO] feito, nudoku instalado com sucesso"
}
remova() {
echo "[INFO] removendo nudoku..."
sudo rm -rf /usr/local/bin/nudoku
sudo rm -rf /usr/local/share/man/man6/nudoku.6
sudo mandb > /dev/null 2>&1
echo "[INFO] feito, nudoku removido"
}
# Validando entrada:
case $COMANDO in
instalar)
instale
;;
remover)
remova
;;
*)
echo "[ERROR] comando nao suportado"
;;
esac
| true |
56a53d106f6ffab1e13a44bf915222a3041dcf2f | Shell | bbass/locations | /cidrCalc.sh | UTF-8 | 493 | 3.125 | 3 | [] | no_license | #!/bin/bash
# Get primary interface
#primaryInt=`/usr/sbin/netstat -rn | awk '/default/ { print $6 }'`
primaryInt=`/usr/sbin/networksetup -listnetworkserviceorder | grep Device | grep -m 1 en | awk ' { print $NF }' | sed 's/.$//'`
CIDR=$(while read y; do echo ${y%.*}".0/$(m=0; while read -n 1 x && [ $x = f ]; do m=$[m+4]; done < <(ifconfig $primaryInt | awk '/mask/{$4=substr($4,3);print $4}'); echo $m )"; done < <(ifconfig $primaryInt | awk '/inet[ ]/{print $2}'))
echo "$CIDR"
exit 0
| true |
9a19b99dc724d5dae1b28f52677097f4582961c1 | Shell | mkucenski/scripts | /whois-multiple.sh | UTF-8 | 371 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
. "${BASH_SOURCE%/*}/common-include.sh" || exit 1
# Run whois records consistently store the results in a specific directory
IP_FILE="$1"
DESTDIR="$2"
if [ $# -eq 0 ]; then
USAGE "IP_FILE" "DESTDIR" && exit 1
fi
if [ -z "$DESTDIR" ]; then
DESTDIR="./"
fi
while read -r LINE; do
"${BASH_SOURCE%/*}/whois.sh" "$LINE" "$DESTDIR"
done < "$IP_FILE"
| true |
8c8fcdb352c69ed250a90d4c79dfa32635a0314a | Shell | pfidr34/docker-rclone | /sync-abort.sh | UTF-8 | 250 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
if [ ! -f /tmp/sync.pid ]
then
echo "INFO: No outstanding sync $(date)"
else
echo "INFO: Stopping sync pid $(cat /tmp/sync.pid) $(date)"
pkill -P $(cat /tmp/sync.pid)
kill -15 $(cat /tmp/sync.pid)
rm -f /tmp/sync.pid
fi
| true |
bf8a262eda1d7c4b9745f3237329c905e1bc576b | Shell | devops-docker/consul | /entrypoint.sh | UTF-8 | 236 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
CONSUL=/bin/consul
case "$1" in
'client' )
exec ${CONSUL} agent -config-dir=/config
;;
'server' )
exec ${CONSUL} agent -server -config-dir=/config
;;
*)
echo "$@"
exec "$@"
;;
esac
| true |
fb9f4ba7e99a9cc503507addf71db97b43af5803 | Shell | CCBR/DNAnexus | /archive/ChIPSeqPipeline_Workflow_2018/ccbr_macs_se_peakcalling/src/ccbr_macs_se_peakcalling.sh | UTF-8 | 3,637 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# ccbr_macs_se_peakcalling 0.0.1
# Generated by dx-app-wizard.
#
# Basic execution pattern: Your app will run on a single machine from
# beginning to end.
#
# Your job's input variables (if any) will be loaded as environment
# variables before this script runs. Any array inputs will be loaded
# as bash arrays.
#
# Any code outside of main() (or any entry point you may add) is
# ALWAYS executed, followed by running the entry point itself.
#
# See https://wiki.dnanexus.com/Developer-Portal for tutorials on how
# to modify this file.
main() {
echo "Value of TreatmentTagAlign: '$TreatmentTagAlign'"
echo "Value of TreatmentPPQT: '$TreatmentPPQT'"
echo "Value of InputTagAlign: '$InputTagAlign'"
echo "Value of BwaIndex: '$BwaIndex'"
# The following line(s) use the dx command-line tool to download your file
# inputs to the local file system using variable names for the filenames. To
# recover the original filenames, you can use the output of "dx describe
# "$variable" --name".
mkdir -p /data
cd /data
reftargz=$(dx describe "$BwaIndex" --name)
ref=${reftargz%%.*}
ref=`echo $ref|awk '{print substr($1,1,2)}'`
if [ "$ref" == "hg" ]; then
genome="hs"
elif [ "$ref" == "mm" ]; then
genome="mm"
fi
t_tagalign=$(dx describe "$TreatmentTagAlign" --name)
dx download "$TreatmentTagAlign" -o $t_tagalign
t_ppqt=$(dx describe "$TreatmentPPQT" --name)
dx download "$TreatmentPPQT" -o $t_ppqt
i_tagalign=$(dx describe "$InputTagAlign" --name)
dx download "$InputTagAlign" -o $i_tagalign
outname=${t_tagalign}_vs_${i_tagalign}
narrowPeak=${outname}_peaks.narrowPeak
xls=${outname}_peaks.xls
bed=${outname}_summits.bed
#dx-docker run -v /data/:/data kopardev/ccbr_spp_1.14 run_spp.R -c=$t_tagalign -out=ppqt
extsize=`cat $t_ppqt|awk -F"\t" '{print $3}'|awk -F"," '{print $1}'`
tagsize=`cat $t_ppqt|awk -F"\t" '{print $5}'|awk -F"," '{print $1}'`
dx-docker run -v /data/:/data kopardev/ccbr_macs2_2.1.1.20160309 macs2 callpeak -t $t_tagalign -c $i_tagalign -n $outname --nomodel --extsize $extsize --tsize $tagsize -q 0.01 -f BED -g $genome --keep-dup=auto
# Fill in your application code here.
#
# To report any recognized errors in the correct format in
# $HOME/job_error.json and exit this script, you can use the
# dx-jobutil-report-error utility as follows:
#
# dx-jobutil-report-error "My error message"
#
# Note however that this entire bash script is executed with -e
# when running in the cloud, so any line which returns a nonzero
# exit code will prematurely exit the script; if no error was
# reported in the job_error.json file, then the failure reason
# will be AppInternalError with a generic error message.
# The following line(s) use the dx command-line tool to upload your file
# outputs after you have created them on the local file system. It assumes
# that you have used the output field name for the filename for each output,
# but you can change that behavior to suit your needs. Run "dx upload -h"
# to see more options to set metadata.
NarrowPeak=$(dx upload /data/$narrowPeak --brief)
Xls=$(dx upload /data/$xls --brief)
Bed=$(dx upload /data/$bed --brief)
# The following line(s) use the utility dx-jobutil-add-output to format and
# add output variables to your job's output as appropriate for the output
# class. Run "dx-jobutil-add-output -h" for more information on what it
# does.
dx-jobutil-add-output NarrowPeak "$NarrowPeak" --class=file
dx-jobutil-add-output Xls "$Xls" --class=file
dx-jobutil-add-output Bed "$Bed" --class=file
}
| true |
b9b7c15791726e061ea5e6ce0267d628a30dc327 | Shell | luenling/LCMV_project | /run_freebayes.sh | UTF-8 | 5,332 | 3.125 | 3 | [] | no_license | #!/bin/bash
#----------
# author: Lukas Endler
# date: 20.9.2015 at 16:46
# takes a bam file calls variants with freebayes
#--------------
BASEDIR=/Volumes/Temp/Lukas/LCMV_project
REFGENOME=$BASEDIR/References/viruses_short.fasta
PICARD=/usr/local/Cellar/picard-tools/2.5.0/share/java/picard.jar
GATK=/Volumes/Temp/Lukas/LCMV_project/Tools/GenomeAnalysisTK-3.4-46.jar
SAMTOOLS=/usr/local/bin/samtools
#FB=$BASEDIR/Tools/freebayes/bin/freebayes
FB=/usr/local/bin/freebayes
FN=`basename $1 .list`
MAXCOV=1000000
LOGFILE=${FN}.log
ERRORLOG=${FN}.err.log
INF=$1
if [ $2 ] ; then
if [[ $2 == "clip" ]] ; then
echo clipping overlaps at `date` >> $LOGFILE
while read BF ;
do
echo bam clipOverlap --in $BF --out `basename $BF .bam`_co.bam --stats >> $LOGFILE
bam clipOverlap --in $BF --out `basename $BF .bam`_co.bam --stats >> $LOGFILE 2>> $ERRORLOG
samtools index `basename $BF .bam`_co.bam
echo `basename $BF .bam`_co.bam >> ${FN}_co.list
done < $1
INF=${FN}_co.list
else
MAXCOV=$2
fi
fi
echo "start freebayes at" `date` >> $LOGFILE
echo $FB --fasta-reference $REFGENOME --hwe-priors-off --pooled-continuous --allele-balance-priors-off \
-u --haplotype-length 0 --use-best-n-alleles 4 --bam-list $INF --min-coverage 50\
--min-alternate-count 3 --min-alternate-fraction 0.001 -Q 10 --read-max-mismatch-fraction 0.10 \
--read-snp-limit 4 --read-indel-limit 1 --min-mapping-quality 30 --min-base-quality 30 \> ${FN}_freebayes_nocmpl.vcf >> $LOGFILE
$FB --fasta-reference $REFGENOME --hwe-priors-off --pooled-continuous --allele-balance-priors-off \
-u --haplotype-length 0 --use-best-n-alleles 4 --bam-list $INF --min-coverage 50\
--min-alternate-count 3 --min-alternate-fraction 0.001 -Q 10 --read-max-mismatch-fraction 0.10 \
--read-snp-limit 4 --read-indel-limit 1 --min-mapping-quality 30 --min-base-quality 30 > ${FN}_freebayes_nocmpl.vcf 2>> $ERRORLOG
echo "finished with freebayes at " `date` >> $LOGFILE
exit 0
for i in 0.01 0.05 0.1; do
python ~/LCMV_project/Scripts/add_afs_freebayes.py -i ${FN}_freebayes_nocmpl.vcf -m $i > ${FN}_freebayes_nocmpl_$i.vcf
bash ~/LCMV_project/Scripts/call_snpeff_freebayse.sh ${FN}_freebayes_nocmpl_$i.vcf
done
input filters:
-4 --use-duplicate-reads
Include duplicate-marked alignments in the analysis.
default: exclude duplicates marked as such in alignments
-m 30
Exclude alignments from analysis if they have a mapping
quality less than Q. default: 1
-q 30
Exclude alleles from analysis if their supporting base
quality is less than Q. default: 0
-R --min-supporting-allele-qsum Q
Consider any allele in which the sum of qualities of supporting
observations is at least Q. default: 0
-Y --min-supporting-mapping-qsum Q
Consider any allele in which and the sum of mapping qualities of
supporting reads is at least Q. default: 0
-Q 10
Count mismatches toward --read-mismatch-limit if the base
quality of the mismatch is >= Q. default: 10
-U
Exclude reads with more than N mismatches where each mismatch
has base quality >= mismatch-base-quality-threshold.
default: ~unbounded
-z --read-max-mismatch-fraction 0.10
Exclude reads with more than N [0,1] fraction of mismatches where
each mismatch has base quality >= mismatch-base-quality-threshold
default: 1.0
--read-snp-limit 4
Exclude reads with more than N base mismatches, ignoring gaps
with quality >= mismatch-base-quality-threshold.
default: ~unbounded
-e --read-indel-limit 1
Exclude reads with more than N separate gaps.
default: ~unbounded
-0 --standard-filters Use stringent input base and mapping quality filters
Equivalent to -m 30 -q 20 -R 0 -S 0
-F --min-alternate-fraction 0.005
Require at least this fraction of observations supporting
an alternate allele within a single individual in the
in order to evaluate the position. default: 0.2
-C --min-alternate-count 3
Require at least this count of observations supporting
an alternate allele within a single individual in order
to evaluate the position. default: 2
-3 --min-alternate-qsum N
Require at least this sum of quality of observations supporting
an alternate allele within a single individual in order
to evaluate the position. default: 0
-G --min-alternate-total N
Require at least this count of observations supporting
an alternate allele within the total population in order
to use the allele in analysis. default: 1
--min-coverage 50
Require at least this coverage to process a site. default: 0
--max-coverage N
Do not process sites with greater than this coverage. default: no limit
| true |
5baa0bb225c9a0c982fc7927e74e512fa293dcbb | Shell | matt-welch/GENI_VT | /docker/base.ubuntu/build_base.ubuntu.sh | UTF-8 | 370 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# docker build [OPTIONS] PATH | URL | -
REPO="bench/base"
TAG="ubuntu"
DOCKERFILE="Dockerfile.base.ubuntu"
echo Building ${REPO}:${TAG} $(grep FROM $DOCKERFILE)
docker build -f $DOCKERFILE \
--tag="${REPO}:${TAG}" \
--cpuset-cpus=1-4 .
# if the above built correctly, we should have a matching image
docker images | grep $REPO | grep $TAG --color
| true |
cc24f26700b527ddc7416245a942f28ede855d67 | Shell | liquanzhou/ops_doc | /Service/Zabbix/Zabbix触发器支持的函数说明.sh | UTF-8 | 7,960 | 3.25 | 3 | [] | no_license | Zabbix触发器支持的函数说明
http://pengyao.org/zabbix-triggers-functions.html
2013-05-06 by pengyao
原文出处: https://www.zabbix.com/documentation/2.0/manual/appendix/triggers/functions
译者: pengyao
abschange
参数: 直接忽略后边的参数
支持值类型: float, int, str, text, log
描述: 返回最近获取到的值与之前的值的差值的绝对值. 对于字符串类型,0表示值相等,1表示值不同
avg
参数: 秒或#num
支持值类型: float, int
描述: 返回指定时间间隔的平均值. 时间间隔可以通过第一个参数通过秒数设置或收集的值的数目(需要前边加上#,比如#5表示最近5次的值) 。如果有第二个,则表示时间漂移(time shift),例如像查询一天之前的一小时的平均值,对应的函数是 avg(3600,86400), 时间漂移是Zabbix 1.8.2加入进来的
change
参数: 直接忽略掉后边的参数
支持值类型: float, int, str, text, log
描述: 返回最近获取到的值与之前的值的差值. 对于字符串类型,0表示值相等,1表示值不同
count
参数: 秒或#num
支持值类型: float, int, str, text, log
描述: 返回指定时间间隔内的数值统计。 时间间隔可以通过第一个参数通过秒数设置或收集的值数目(需要值前边加上#)。本函数可以支持第二个参数作为样本(pattern)数据,第三个参数作为操作(operator)参数,第四个参数作为时间漂移(time shift)参数. 对于样本,整数(iteeger)监控项实用精确匹配,浮点型(float)监控项允许偏差0.0000001
支持的操作(operators)类型:
eq: 相等
ne: 不相等
gt: 大于
ge: 大于等于
lt: 小于
le: 小于等于
like: 内容匹配
对于整数和浮点型监控项目支持eq(默认), ne, gt, ge, lt, le;对于string、text、log监控项支持like(默认), eq, ne
例子:
count(600): 最近10分钟的值的个数
count(600,12): 最近10分钟,值等于12的个数
count(600,12,"gt"): 最近10分钟,值大于12的个数
count(#10,12,"gt"): 最近的10个值中,值大于12的个数
count(600,12,"gt",86400): 24小时之前的前10分钟数据中,值大于12的个数
count(600,,,86400): 24小时之前的前10分钟数据的值的个数
#num参数从Zabbix 1.6.1起开始支持, time shift参数和字符串操作支持从Zabbix 1.8.2开始支持
date
参数: 直接忽略掉后边的参数
支持值类型: 所有(any)
描述: 返回当前日期(格式为YYYYMMDD), 例如20031025
dayofmonth
参数: 直接忽略掉后边的参数
支持值类型: 所有(any)
描述: 返回当前是本月第几天(数值范围:1-31),该函数从Zabbix 1.8.5起开始支持
dayofweek
参数: 直接忽略掉后边的参数
支持值类型: 所有(any)
描述: 返回当前是本周的第几天(数值返回:1-7),星期一是 1,星期天是7
delta
参数: 秒或#num
支持值类型: float, int
描述: 返回指定时间间隔内的最大值与最小值的差值(max()-min())。时间间隔作为第一个参数可以是秒或者收集值的数目. 从Zabbix 1.8.2开始,支持可选的第二个参数 time_shift.
diff
参数: 忽略
支持值类型: float, int, str, text, log
描述: 返回值为1 表示最近的值与之前的值不同,0为其他情况
fuzzytime
参数: 秒
支持值类型: float, int
描述: 返回值为1表示监控项值的时间戳与Zabbix Server的时间多N秒, 0为其他. 常使用system.localtime来检查本地时间是否与Zabbix server时间相同.
iregexp
参数: 第一个为字符串,第二个为秒或#num
支持值类型: str, log, text
描述: 与regexp类似,区别是不区分大小写
last
参数: 秒或#num
支持值类型: float, int, str, text, log
描述: 最近的值,如果为秒,则忽略,#num表示最近第N个值,请注意当前的#num和其他一些函数的#num的意思是不同的
例子:
last(0) 等价于 last(#1)
last(#3) 表示最近**第**3个值(并不是最近的三个值)
本函数也支持第二个参数**time_shift**,例如
last(0,86400) 返回一天前的最近的值
如果在history中同一秒中有多个值存在,Zabbix不保证值的精确顺序
#num从Zabbix 1.6.2起开始支持, timeshift从1.8.2其开始支持,可以查询 avg()函数获取它的使用方法
logeventid
参数: string
支持值类型: log
描述: 检查最近的日志条目的Event ID是否匹配正则表达式. 参数为正则表达式,POSIX扩展样式. 当返回值为0时表示不匹配,1表示匹配。 该函数从Zabbix 1.8.5起开始支持.
logseverity
参数: 忽略
支持值类型: log
描述: 返回最近日志条目的日志等级(log severity). 当返回值为0时表示默认等级,N为具体对应等级(整数,常用于Windows event logs). Zabbix日志等级来源于Windows event log的Information列.
logsource
参数: string
支持值类型: log
描述: 检查最近的日志条目是否匹配参数的日志来源. 当返回值为0时表示不匹配,1表示匹配。通场用于Windows event logs监控. 例如 logsource["VMWare Server"]
max
参数: 秒或#num
支持值类型: float, int
描述: 返回指定时间间隔的最大值. 时间间隔作为第一个参数可以是秒或收集值的数目(前缀为#). 从Zabbix 1.8.2开始,函数支持第二个可选参数 time_shift,可以查看avg()函数获取它的使用方法.
min
参数: 秒或#num
支持值类型: float, int
描述: 返回指定时间间隔的最小值. 时间间隔作为第一个参数可以是秒或收集值的数目(前缀为#). 从Zabbix 1.8.2开始,函数支持第二个可选参数 time_shift,可以查看avg()函数获取它的使用方法.
nodata
参数: 秒
支持值类型: any
描述: 当返回值为1表示指定的间隔(间隔不应小于30秒)没有接收到数据, 0表示其他.
now
参数: 忽略
支持值类型: any
描述: 返回距离Epoch(1970年1月1日 00:00:00 UTC)时间的秒数
prev
参数: 忽略
支持值类型: float, int, str, text, log
描述:返回之前的值,类似于 last(#2)
regexp
参数: 第一个参数为string, 第二个参数为秒或#num
支持值类型: str, log, text
描述: 检查最近的值是否匹配正则表达式,参数的正则表达式为POSIX扩展样式, 第二个参数为秒数或收集值的数目,将会处理多个值. 本函数区分大小写。当返回值为1时表示找到,0为其他.
str
参数: 第一个参数为string, 第二个参数为秒或#num
支持值类型: str, log, text
描述: 查找最近值中的字符串。第一个参数指定查找的字符串,大小写敏感。第二个可选的参数指定秒数或收集值的数目,将会处理多个值。 当返回值为1时表示找到,0为其他.
strlen
参数: 秒或#num
支持值类型: str, log, text
描述: 指定最近值的字符串长度(并非字节), 参数值类似于last函数. 例如strlen(0)等价于strlen(#1),strlen(#3)表示最近的第三个值, strlen(0,86400)表示一天前的最近的值. 该函数从Zabbix 1.8.4起开始支持
sum
参数: 秒或#num
支持值类型: float, int
描述: 返回指定时间间隔中收集到的值的总和. 时间间隔作为第一个参数支持秒或收集值的数目(以#开始). 从Zabbix 1.8.2开始,本函数支持time_shift作为第二个参数。 可以查看avg函数获取它的用法
time
参数: 忽略
支持值类型: any
描述: 返回当前时间,格式为HHMMSS,例如123055
| true |
ec339196e68e467b79b6511627df1b6888864224 | Shell | agirorn/dotfiles | /bin/yarn-clean | UTF-8 | 418 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env zsh
cd `yarn cache dir`
# # Failing the script if it is not now running in the Yarn Caches directory
# pwd | rg "Caches/Yarn" && echo ok || exit 1
#
# for i in `ls`; do
# echo "removing $i - packages left: `ls -l | wc -l`";
# rm -rf $i;
# done
## This could be a much faster way to do it.
cd ..
mkdir -p .v6-empty;
mv v6 .v6
rsync -a --delete .v6-empty/ .v6/
rmdir .v6-empty;
rmdir .v6;
exit 0;
| true |
87da9f6e748d89dc3687650153c53d412be36001 | Shell | jorgediaz-lr/liferay-faster-deploy | /tomcat/cluster_local | UTF-8 | 3,915 | 3.21875 | 3 | [] | no_license | #!/bin/bash
appserverprops() {
. $(dirname ${BASH_SOURCE[0]})/../appserverprops
}
cluster() {
mkdir -p ${LIFERAY_HOME}/data/document_library
CLUSTER_ROOT=$(dirname $LIFERAY_HOME)
for file in $(ls -1 $LIFERAY_HOME | grep '^tomcat'); do
TOMCAT_FOLDER=$file
done
modjk
echo '#!/bin/bash' > $LIFERAY_HOME/start_cluster
echo '#!/bin/bash' > $LIFERAY_HOME/stop_cluster
chmod u+x $LIFERAY_HOME/start_cluster
chmod u+x $LIFERAY_HOME/stop_cluster
preparenode 1
echo '
while [ "" == "$(grep '"'"'Server startup'"'"' '${CLUSTER_ROOT}'/node1/'${TOMCAT_FOLDER}'/logs/catalina.out)" ]; do
sleep 1
done
' >> $LIFERAY_HOME/start_cluster
for i in $(seq 2 $NODE_COUNT); do
preparenode $i
done
sudo service apache2 reload
}
modjk() {
echo '
<VirtualHost liferay:80>
SetOutputFilter DEFLATE
JkMount / balancer
JkMount /c balancer
JkMount /c/* balancer
JkMount /combo balancer
JkMount /combo/* balancer
JkMount /image/* balancer
JkMount /documents/* balancer
JkMount /o/* balancer
JkMount /web/* balancer
JkMount /group/* balancer
JkMount /user/* balancer
JkMount /html/* balancer
</VirtualHost>
' > /etc/apache2/sites-available/001-liferay.conf
echo '
worker.list=balancer,status
worker.status.type=status
worker.template.type=ajp13
worker.template.socket_keepalive=true
worker.template.lbfactor=1
' > /etc/libapache2-mod-jk/workers.properties
NODE_LIST=$()
echo -n '
worker.balancer.type=lb
worker.balancer.sticky_session=1
worker.balancer.balance_workers=' >> /etc/libapache2-mod-jk/workers.properties
seq $NODE_COUNT | awk '{ print "node" $1 }' | perl -pne 'chomp if eof' | tr '\n' ',' >> /etc/libapache2-mod-jk/workers.properties
for i in $(seq $NODE_COUNT); do
echo "
worker.node${i}.reference=worker.template
worker.node${i}.host=127.0.0.1
worker.node${i}.port=8${i}09
" >> /etc/libapache2-mod-jk/workers.properties
done
}
mysql() {
. $(dirname ${BASH_SOURCE[0]})/../database/mysql
}
preparenode() {
local NODE_ROOT=${CLUSTER_ROOT}/node${1}
rm -rf ${NODE_ROOT}
mkdir -p ${NODE_ROOT}
cp -R ${LIFERAY_HOME}/$TOMCAT_FOLDER ${NODE_ROOT}/
if [ -d $LIFERAY_HOME/osgi ]; then
cp -R $LIFERAY_HOME/osgi ${NODE_ROOT}/
fi
sed -i.bak "s/\"80/\"8${1}/g" ${NODE_ROOT}/${TOMCAT_FOLDER}/conf/server.xml
echo "
include-and-override=$LIFERAY_HOME/portal-ext.properties
cluster.link.autodetect.address=
cluster.link.enabled=true
cluster.link.channel.properties.control=tcp.xml
cluster.link.channel.properties.transport.0=tcp.xml
ehcache.cluster.link.replication.enabled=true
index.read.only=false
lucene.replicate.write=true
portal.instance.protocol=http
portal.instance.http.port=8${1}80
portal.instance.inet.socket.address=localhost:8${1}80
cluster.link.node.bootup.response.timeout=300000
" > ${NODE_ROOT}/portal-ext.properties
mkdir -p ${NODE_ROOT}/deploy
rm -rf ${NODE_ROOT}/data/document_library
mkdir -p ${NODE_ROOT}/data
ln -s ${LIFERAY_HOME}/data/document_library ${NODE_ROOT}/data/document_library
echo "pushd ${NODE_ROOT}/$TOMCAT_FOLDER/bin; rm -f ../logs/*; ./startup.sh; popd" >> ${LIFERAY_HOME}/start_cluster
echo "pushd ${NODE_ROOT}/$TOMCAT_FOLDER/bin; ./shutdown.sh; popd" >> ${LIFERAY_HOME}/stop_cluster
chmod u+x ${NODE_ROOT}/${TOMCAT_FOLDER}/bin/*.sh
echo "
JPDA_ADDRESS=localhost:8${1}00" >> ${NODE_ROOT}/${TOMCAT_FOLDER}/bin/setenv.sh
echo 'CATALINA_OPTS="${CATALINA_OPTS} -DjvmRoute=node'${1}'"' >> ${NODE_ROOT}/${TOMCAT_FOLDER}/bin/setenv.sh
echo -n 'CATALINA_OPTS="${CATALINA_OPTS} -Djgroups.bind_addr=127.0.0.1 -Djgroups.tcpping.initial_hosts=' >> ${NODE_ROOT}/${TOMCAT_FOLDER}/bin/setenv.sh
seq -f '78%02g' $(expr $NODE_COUNT '*' 2) | awk '{ print "localhost[" $1 "]" }' | perl -pne 'chomp if eof' | tr '\n' ',' >> ${NODE_ROOT}/${TOMCAT_FOLDER}/bin/setenv.sh
echo '"' >> ${NODE_ROOT}/${TOMCAT_FOLDER}/bin/setenv.sh
}
NODE_COUNT=$1
if [ "" == "$NODE_COUNT" ]; then
NODE_COUNT=2
fi
appserverprops && mysql && cluster | true |
d48afac466add964bb83528f56cda1f25d0efcec | Shell | CiscoUcs/KUBaM | /manual/stage1/scripts/init.sh | UTF-8 | 1,931 | 3.96875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Looking for iso image in /kubam/ directory"
if [ ! -d /kubam ]; then
echo "Please rerun with -v <your ISO directory>/:/kubam"
echo "See documentation at: http://kubam.io"
exit 1
fi
echo "Looking for the ISO file to extract."
F=$(ls /kubam/*.iso 2>/dev/null | head -1)
if [[ F = "" ]]; then
echo "No ISO file found. Please add a Linux ISO file to your directory."
echo "The suffix of the file should be .iso"
exit 1
fi
KUBAM_ROOT=/usr/share/nginx/html/kubam
# link to the web server directory.
if [ ! -d $KUBAM_ROOT ]
then
ln -sf /kubam $KUBAM_ROOT
fi
cd $KUBAM_ROOT
# osirrox is so rad!
# thanks: https://stackoverflow.com/questions/22028795/is-it-possible-to-mount-an-iso-inside-a-docker-container
if [ ! -d centos7.3 ]
then
echo "Extracting centos 7.3 ISO..."
osirrox -prog kubam -indev ./*.iso -extract . centos7.3
echo "Finished extracting"
else
echo "centos7.3 directory exists."
fi
# extract the file in the kubam directory
if [ ! -d stage1 ]
then
echo "Making installation image."
mkdir -p stage1
cp -a centos7.3/isolinux stage1/
cp -a centos7.3/.discinfo stage1/isolinux/
cp -a centos7.3/LiveOS stage1/isolinux/
cp -a centos7.3/images/ stage1/isolinux/
cp /usr/share/kubam/stage1/centos7.3/isolinux.cfg stage1/isolinux/
fi
if [ ! -e centos7.3-boot.iso ]
then
echo "Compressing Installation image."
mkisofs -o $KUBAM_ROOT/centos7.3-boot.iso -b isolinux.bin \
-c boot.cat -no-emul-boot -V 'CentOS 7 x86_64' \
-boot-load-size 4 -boot-info-table -r -J -v -T stage1/isolinux
fi
# look for config file.
if [ -e stage1.yaml ]
then
echo "Generating kickstart files."
kickstart-builder.py
else
echo "no stage1.yaml file found. Kickstart files won't be generated."
echo "see documentation at http://kubam.io"
fi
# run the installation script on the ISO file and start the web server.
# start nginx
echo "starting web server"
/usr/sbin/nginx
| true |
976022fc080fea38dad3f247bd5815759830f8f1 | Shell | unnsa/rust | /src/ci/docker/dist-various-1/install-mips-musl.sh | UTF-8 | 1,018 | 2.9375 | 3 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"NCSA",
"LicenseRef-scancode-other-permissive",
"BSD-2-Clause"
] | permissive | # Copyright 2017 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
set -ex
mkdir /usr/local/mips-linux-musl
# originally from
# https://downloads.openwrt.org/snapshots/trunk/ar71xx/generic/
# OpenWrt-Toolchain-ar71xx-generic_gcc-5.3.0_musl-1.1.16.Linux-x86_64.tar.bz2
URL="https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror"
FILE="OpenWrt-Toolchain-ar71xx-generic_gcc-5.3.0_musl-1.1.16.Linux-x86_64.tar.bz2"
curl -L "$URL/$FILE" | tar xjf - -C /usr/local/mips-linux-musl --strip-components=2
for file in /usr/local/mips-linux-musl/bin/mips-openwrt-linux-*; do
ln -s $file /usr/local/bin/`basename $file`
done
| true |
9ceb03b644b220a43b7a64d85fca332b429612e3 | Shell | perwin/perspectiva | /run_unittest_sampler.sh | UTF-8 | 670 | 3.4375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Unit tests for the different sampler classes
# load environment-dependent definitions for CXXTESTGEN, CPP, etc.
. ./define_unittest_vars.sh
# UniformSampler
echo
echo "Generating and compiling unit tests for UniformSampler class..."
$CXXTESTGEN --error-printer -o test_runner_config.cpp unit_tests/unittest_uniform_sampler.t.h
$CPP -o test_runner_config test_runner_config.cpp src/sampler.cpp src/uniform_sampler.cpp \
-I. -I./src -I/usr/local/include -I$CXXTEST
if [ $? -eq 0 ]
then
echo "Running unit tests for UniformSampler class:"
./test_runner_config
exit
else
echo "Compilation of unit tests for UniformSampler class failed."
exit 1
fi
| true |
fc0da1a83b1ae69d6cc96f783ae1e7cf7905ad78 | Shell | wplib/box-cli2 | /cmd/util/cmd/get-box-ip-address/get-box-ip-address.sh | UTF-8 | 586 | 3.359375 | 3 | [] | no_license | #
# Command: box util get-box-ip-address
#
# @TODO generate IP address file if not exists.
# @TODO box util read-box-ip-address should be raw, no creation
# @SEE http://stackoverflow.com/questions/8988824/generating-random-number-between-1-and-10-in-bash-shell-script
#
ip_address="$(box util read-project-file-value ".box.ip_address")"
if isEmpty "${ip_address}" ; then
ip_address=""
project_dir="$(box util find-project-dir)"
ip_address="$(cat "${project_dir}/IP")"
fi
if isEmpty "${ip_address}" ; then
echo "IP address not configured."
exit
fi
echo "${ip_address}"
exit
| true |
96e8a4449d5e7f42b3925e695e2566481be93908 | Shell | MahendraAllada/aws_cis | /roles/aws_cis/templates/actual_fact/awsconfig_3_5actual.fact | UTF-8 | 1,827 | 3.515625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# ** AUTO GENERATED **
# 3.5 Ensure AWS Config is enabled in all regions
count=0
if [ "$(aws configservice describe-configuration-recorders | jq '.ConfigurationRecorders[].recordingGroup' | grep allSupported | grep true | wc -l)" -gt "0" ] && [ "$(aws configservice describe-configuration-recorders | jq '.ConfigurationRecorders[].recordingGroup' | grep includeGlobalResourceTypes | grep true | wc -l)" -gt "0" ];then
if [ "$(aws configservice describe-configuration-recorder-status | jq '.ConfigurationRecordersStatus[].lastStatus' | grep SUCCESS | wc -l)" -gt "0" ] && [ "$(aws configservice describe-configuration-recorder-status | jq '.ConfigurationRecordersStatus[].recording' | grep true | wc -l)" -gt "0" ];then
count=$((count+1))
fi
fi
VARIABLE=$(if [ "$(aws configservice describe-configuration-recorders | jq '.ConfigurationRecorders[].recordingGroup' | grep allSupported | grep true | wc -l)" -gt "0" ] && [ "$(aws configservice describe-configuration-recorders | jq '.ConfigurationRecorders[].recordingGroup' | grep includeGlobalResourceTypes | grep true | wc -l)" -gt "0" ];then
if [ "$(aws configservice describe-configuration-recorder-status | jq '.ConfigurationRecordersStatus[].lastStatus' | grep SUCCESS | wc -l)" -gt "0" ] && [ "$(aws configservice describe-configuration-recorder-status | jq '.ConfigurationRecordersStatus[].recording' | grep true | wc -l)" -gt "0" ];then
RecorderNameValue=$(aws configservice describe-configuration-recorder-status | jq -r '.ConfigurationRecordersStatus[].name')
echo "{ \"RecorderName\" : \"$RecorderNameValue\" }"
fi
fi | tr '\n' ',' | sed 's/,$//')
if [ "$count" -eq "0" ]; then
echo "{ \"3.5 Ensure AWS Config is enabled in all regions\" : \"AWS Config not enabled\" }"
else
echo "{ \"AWSConfigEnabled\" : [ $VARIABLE ] }"
fi
| true |
30c1387f73962f177989ead5806156bc9735bf45 | Shell | ComplianceAsCode/content | /linux_os/guide/system/auditing/configure_auditd_data_retention/auditd_data_retention_space_left_action/bash/shared.sh | UTF-8 | 546 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | # platform = Red Hat Virtualization 4,multi_platform_fedora,multi_platform_ol,multi_platform_rhel,multi_platform_sle,multi_platform_ubuntu
{{{ bash_instantiate_variables("var_auditd_space_left_action") }}}
#
# If space_left_action present in /etc/audit/auditd.conf, change value
# to var_auditd_space_left_action, else
# add "space_left_action = $var_auditd_space_left_action" to /etc/audit/auditd.conf
#
AUDITCONFIG=/etc/audit/auditd.conf
{{{ bash_replace_or_append("$AUDITCONFIG", '^space_left_action', "$var_auditd_space_left_action") }}}
| true |
f28b5db35d2c7e4f9138dc656de3bd8cb45ee46f | Shell | hw233/xiangjingchuangshuo_server-c- | /iconvall | UTF-8 | 156 | 3.234375 | 3 | [] | no_license | #!/bin/sh
dir=$1
for file in `ls $dir/*.{cpp,h}`
do
if [ -f $file ]
then
./iconv $file
echo "./iconv $file"
fi
done
echo "iconv complete"
| true |
c0dedad4a75125d0c838854c491d193f5fa0902c | Shell | Globaldots/curl-metrics | /timing.sh | UTF-8 | 1,131 | 3.90625 | 4 | [] | no_license | #!/bin/bash -
# save as timing.sh
# execute: ./timing.sh 100
#
# To add a 2 second pause between invocations:
# ./timing.sh 100 2s
# define variables
N=$1
# used as a prefix to the output file
TEST=test
URL="https://some.domain.co/library/assets/JsonFile/co.domain.styles.neighborhood.elephant.json?mt=123"
declare -a vars
# this is a list of curl metrics to output
vars=(time_namelookup time_connect time_appconnect time_pretransfer time_redirect time_starttransfer time_total speed_upload speed_download remote_ip http_code)
# nothing to modify down below
if [ -z "$2" ]
then
pause=0
else
pause=$2
fi
header=
template=
# set up the log header and the template format
for v in ${vars[@]}; do
header=${header}\\t${v}
template=${template}\\t%{${v}}
done
template=${template}\\n
# initialize the data header
# add timestamp
header=timestamp\\t${header}
echo -e "${header}" > $TEST.timing.log
for i in $(seq 1 $N); do
echo $TEST $i
timestamp=$(date +"%Y-%m-%d %T")
response=`curl -w "${template}" -s -o /dev/null $URL`
echo -e "${timestamp}\\t${response}" >> $TEST.timing.log
sleep $pause
done
| true |
7acea33349883fa8787253258592174233267bb7 | Shell | ardinor/misc | /Config Files/Laptop/.xinitrc | UTF-8 | 748 | 3.140625 | 3 | [] | no_license | #!/bin/sh
#
# ~/.xinitrc
#
# Executed by startx (run your window manager from here)
if [ -d /etc/X11/xinit/xinitrc.d ]; then
for f in /etc/X11/xinit/xinitrc.d/*; do
[ -x "$f" ] && . "$f"
done
unset f
fi
#export GTK_IM_MODULE='uim'
#export QT_IM_MODULE='uim'
#uim-xim &
#export XMODIFIERS='@im=uim'
#uim-toolbar-gtk &
#uim-toolbar-gtk3 &
#uim-toolbar-gtk3-systray &
# exec gnome-session
# exec startkde
# exec startxfce4
# ...or the Window Manager of your choice
#exec cinnamon-session
# Since we're not using a WM, merge Xresources in here
[[ -f ~/.Xresources ]] && xrdb -merge ~/.Xresources
session=${1:-cinnamon}
case $session in
i3|i3wm ) exec i3;;
cinnamon ) exec cinnamon-session;;
*) exec $1;;
esac
| true |
8426a90823859531c6be5af83743f00f44818e02 | Shell | Jill-Moore/Target-Gene-Prediction | /Scripts/Benchmark-Characteristics/Compare-CTCF-Signal.sh | UTF-8 | 1,280 | 2.890625 | 3 | [] | no_license | #!/bin/bash
data=$1
biosample=$(echo $data | awk -F "." '{print $1}')
setDir=~/Lab/Target-Gene/Benchmark
scriptDir=~/Projects/Target-Gene-Prediction/Scripts/Benchmark-Characteristics
train=$setDir/$data-Benchmark.v3.txt
output=~/Lab/Target-Gene/Benchmark/Characteristics/TF-Signal
dataDir=/data/projects/encode/data/
tss=~/Lab/Reference/Human/hg19/Gencode19/TSS.2019.bed
ccres=~/Lab/ENCODE/Encyclopedia/V4/Registry/V4-hg19/hg19-ccREs-Simple.bed
tfList=~/Lab/Target-Gene/Benchmark/Characteristics/CTCF-List.txt
cat $train | awk '{print $1}' | sort -u > ccres
awk 'FNR==NR {x[$1];next} ($5 in x)' ccres $ccres > tmp1
#cat $train | awk '{print $2}' | sort -u > genes
#awk 'FNR==NR {x[$1];next} ($7 in x)' genes $tss | \
# awk '{print $1 "\t" $2-250 "\t" $3+250 "\t" $4}' > tss
width=0
dset=$(grep $biosample $tfList | awk '{print $1}')
dsig=$(grep $biosample $tfList | awk '{print $2}')
awk -F "\t" '{printf "%s\t%.0f\t%.0f\t%s\n", $1,$2-'$width',$3+'$width',$4}' \
tmp1 | awk '{if ($2 < 0) print $1 "\t" 0 "\t" $3 "\t" $4 ; else print $0}' \
| sort -u > little
~/bin/bigWigAverageOverBed -bedOut=out2.bed $dataDir/$dset/$dsig.bigWig little out2
awk '{print $1 "\t" $5}' out2 | sort -k1,1 > $output/$data"-ELS."$dset"-"$dsig".txt"
rm ccres tmp1 little out2.bed out2
| true |
0bfbe7156aea49fdb0690fcb8d250c0d3d37da2b | Shell | aburasali/Unit-test-generation-using-machine-learning | /Linux script/Compile and Test/compileAndTest.sh | UTF-8 | 540 | 3.21875 | 3 | [] | no_license | couter=0
lastError=$(date)
maxErrors="10"
inSeconds="100"
while true; do
php compileAndTest.php "$1" "$2"
echo "Restart $1 att:$counter"
failedOn=$(date)
if [ "$counter" == "$maxErrors" ]; then
newDate=$(date)
minNewDate=$(date -d "$lastError +$inSeconds second")
first=$(date -d "$minNewDate" +%s)
second=$(date -d "$newDate" +%s)
echo "$first -ge $second"
if [ $first -ge $second ]; then
echo "Too many errors"
break
fi
lastError=$(date)
counter=0
echo "Retry"
fi
sleep 1
counter=$((counter+1))
done
| true |
30666517c8ac2202ab2d77f4402aef085aa3522c | Shell | philipyun/Code-Examples | /BASH-Projects/safe-rm-srm/trash | UTF-8 | 554 | 3.78125 | 4 | [] | no_license | #!/bin/sh
# Script: srm(trash)
# Author: Philip Yun 65035200
##### Initial checks for trash command #####
# Checks that all inputs to the script are valid
# There should be no parameters to trash, so any additional
# parameters will force trash to exit
if [ $# -ne 0 ]; then
echo usage: trash
exit
fi
# If the trash folder does not exist, create the trash folder.
if [ ! -d "~/.trash" ]; then
mkdir -p ~/.trash
fi
##### emptying the trash #####
# Forcefully and recursively remove everything inside the trash folder
rm -rf ~/.trash/*
| true |
43803f80f3b4da4f38ef976341040dd71b8a5c39 | Shell | lesurp/check_eq | /check_eq | UTF-8 | 1,487 | 4.25 | 4 | [] | no_license | #!/bin/bash
function get_changed_files() {
git --no-pager diff --name-only
}
print_arr() {
name=$1[@]
printf '%s\n' "${!name}"
}
new_version_f=$(mktemp)
old_version_f=$(mktemp)
patch_f=$(mktemp)
formatted=()
changed=()
changed_patches=()
while read -r file; do
# we copy the current file to both tmp file
cat $file > $new_version_f
cat $file > $old_version_f
# then revert the old_version_f
git --no-pager diff $file > $patch_f
patch -p1 -R $old_version_f $patch_f > /dev/null
# finally, we format both files and see if there's any difference...
# NOTE: sometimes multiple passes of clang-format generate
# a differente output (prob. conflicting settings or w/e)
# should we find another way to compare files?
clang-format -i $new_version_f
clang-format -i $old_version_f
diff=$(diff -u $old_version_f $new_version_f)
# there was no difference between the files
if [ $? -eq 0 ]
then
formatted+=($file)
else
changed+=($file)
changed_patches+=("$diff")
fi
done <<< "$(get_changed_files)"
if [ ${#formatted[@]} -ne 0 ]
then
printf "\e[32mFiles that were only reformatted:\e[0m\n"
print_arr formatted
fi
if [ ${#changed[@]} -ne 0 ]
then
printf "\e[33mFiles that were actually changed:\e[0m\n"
print_arr changed
printf "\n\e[34m... And the corresponding patches:\e[0m\n"
print_arr changed_patches
fi
rm $new_version_f
rm $old_version_f
rm $patch_f
| true |
9dd1a09cd98af22c0c5d8e75fd02f4173c0f8d0b | Shell | snowiow/dotfiles | /setup.sh | UTF-8 | 180 | 3.46875 | 3 | [] | no_license | #!/usr/bin/bash
pwd=${PWD}
for dotfile in $pwd/dotfiles/*
do
target=$HOME/.${dotfile##*/}
echo Creating Symlink for: $dotfile to $target
ln -s $dotfile $target
done
| true |
dc6bdbe2212d3869565ccbd348300767fefaef19 | Shell | Imran01000/shell_programs | /forloop/Primefactorization.sh | UTF-8 | 272 | 3.515625 | 4 | [] | no_license | #!/bin/bash -x
#To print a prime factors for the given number.
read -p "Enter the number" number
for((i=2;i<=number;i++))
do
if (($number%$i==0))
then
count=0;
while(($number%i==0))
do
number=$(($number/$i));
((count++))
echo $i;
done
fi
done
| true |
6b04ee5afd4c90745b4677742ff2f5e3c494e92d | Shell | bazelbuild/bazel | /tools/android/desugar.sh | UTF-8 | 5,129 | 3.0625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A wrapper around the desugar binary that sets
# jdk.internal.lambda.dumpProxyClasses and configures Java 8 library rewriting
# through additional flags.
RUNFILES="${RUNFILES:-$0.runfiles}"
CHECK_FOR_EXE=0
if [[ ! -d $RUNFILES ]]; then
# Try the Windows path
RUNFILES="${RUNFILES:-$0.exe.runfiles}"
CHECK_FOR_EXE=1
fi
RUNFILES_MANIFEST_FILE="${RUNFILES_MANIFEST_FILE:-$RUNFILES/MANIFEST}"
export JAVA_RUNFILES=$RUNFILES
export RUNFILES_LIB_DEBUG=1
# --- begin runfiles.bash initialization v2 ---
# Copy-pasted from the Bazel Bash runfiles library v2.
set -uo pipefail; f=bazel_tools/tools/bash/runfiles/runfiles.bash
source "${RUNFILES_DIR:-/dev/null}/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "${RUNFILES_MANIFEST_FILE:-/dev/null}" | cut -f2- -d' ')" 2>/dev/null || \
source "$0.runfiles/$f" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
source "$(grep -sm1 "^$f " "$0.exe.runfiles_manifest" | cut -f2- -d' ')" 2>/dev/null || \
{ echo>&2 "ERROR: cannot find $f"; exit 1; }; f=; set -e
# --- end runfiles.bash initialization v2 ---
if [[ $CHECK_FOR_EXE -eq 0 ]]; then
DESUGAR="$(rlocation "bazel_tools/src/tools/android/java/com/google/devtools/build/android/desugar/Desugar")"
else
DESUGAR="$(rlocation "bazel_tools/src/tools/android/java/com/google/devtools/build/android/desugar/Desugar.exe")"
fi
readonly TMPDIR="$(mktemp -d)"
trap "rm -rf ${TMPDIR}" EXIT
readonly DESUGAR_JAVA8_LIBS_CONFIG=(--rewrite_core_library_prefix java/time/ \
--rewrite_core_library_prefix java/lang/Double8 \
--rewrite_core_library_prefix java/lang/Integer8 \
--rewrite_core_library_prefix java/lang/Long8 \
--rewrite_core_library_prefix java/lang/Math8 \
--rewrite_core_library_prefix java/io/Desugar \
--rewrite_core_library_prefix java/io/UncheckedIOException \
--rewrite_core_library_prefix java/util/stream/ \
--rewrite_core_library_prefix java/util/function/ \
--rewrite_core_library_prefix java/util/Desugar \
--rewrite_core_library_prefix java/util/DoubleSummaryStatistics \
--rewrite_core_library_prefix java/util/IntSummaryStatistics \
--rewrite_core_library_prefix java/util/LongSummaryStatistics \
--rewrite_core_library_prefix java/util/Objects \
--rewrite_core_library_prefix java/util/Optional \
--rewrite_core_library_prefix java/util/PrimitiveIterator \
--rewrite_core_library_prefix java/util/Spliterator \
--rewrite_core_library_prefix java/util/StringJoiner \
--rewrite_core_library_prefix javadesugar/testing/ \
--rewrite_core_library_prefix java/util/concurrent/ConcurrentHashMap \
--rewrite_core_library_prefix java/util/concurrent/ThreadLocalRandom \
--rewrite_core_library_prefix java/util/concurrent/atomic/DesugarAtomic \
--auto_desugar_shadowed_api_use \
--emulate_core_library_interface java/util/Collection \
--emulate_core_library_interface java/util/Map \
--emulate_core_library_interface java/util/Map\$Entry \
--emulate_core_library_interface java/util/Iterator \
--emulate_core_library_interface java/util/Comparator \
--dont_rewrite_core_library_invocation "java/util/Iterator#remove" )
# Check for params file. Desugar doesn't accept a mix of params files and flags
# directly on the command line, so we need to build a new params file that adds
# the flags we want.
if [[ "$#" -gt 0 ]]; then
arg="$1";
case "${arg}" in
@*)
params="${TMPDIR}/desugar.params"
cat "${arg:1}" > "${params}" # cp would create file readonly
for o in "${DESUGAR_JAVA8_LIBS_CONFIG[@]}"; do
echo "${o}" >> "${params}"
done
"${DESUGAR}" \
"--jvm_flag=-XX:+IgnoreUnrecognizedVMOptions" \
"--jvm_flags=--add-opens=java.base/java.lang.invoke=ALL-UNNAMED" \
"--jvm_flags=--add-opens=java.base/java.nio=ALL-UNNAMED" \
"--jvm_flags=--add-opens=java.base/java.lang=ALL-UNNAMED" \
"--jvm_flag=-Djdk.internal.lambda.dumpProxyClasses=${TMPDIR}" \
"@${params}"
# temp dir deleted by TRAP installed above
exit 0
;;
esac
fi
"${DESUGAR}" \
"--jvm_flag=-XX:+IgnoreUnrecognizedVMOptions" \
"--jvm_flags=--add-opens=java.base/java.lang.invoke=ALL-UNNAMED" \
"--jvm_flags=--add-opens=java.base/java.nio=ALL-UNNAMED" \
"--jvm_flags=--add-opens=java.base/java.lang=ALL-UNNAMED" \
"--jvm_flag=-Djdk.internal.lambda.dumpProxyClasses=${TMPDIR}" \
"$@" \
"${DESUGAR_JAVA8_LIBS_CONFIG[@]}"
| true |
df857f6f4273cb57d96bda97f026653fe965c0df | Shell | big-stream/imay-cli-nodejs | /scripts/test.sh | UTF-8 | 782 | 3.390625 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash
script=`readlink -e $0`
cd ${script%/*}
echo "テスト開始:"
echo 引数: -VvvcN 'アリス' - x --col -d -- y z
tmp=$(mktemp)
{
echo a >> $tmp
sleep 1
echo b >> $tmp
sleep 1
echo c >> $tmp
} &
tail -f --pid=$! $tmp | ../bin/mycommand-simple.js -VvvcN 'アリス' - x --col -d -- y z
[[ $? = 0 ]] || exit 1
echo "テスト完了:"
# {
# version: [ true ],
# verbose: [ true, true ],
# '引数省略': [ '--color' ],
# color: [ true ],
# name: [ 'アリス' ],
# '標準入力希望': [ true ],
# 'オペランド': [ 'x', 'y', 'z' ],
# '曖昧': [ [ '--col', '--color-red', '--color' ] ],
# color_red: [ true ],
# debug: [ true ],
# 'オペランド希望': true,
# '未指定': [ '--number', '--all', '--help', '--type' ]
# }
| true |
7e1f158b68c685a233d840e4565a73bee201ebbe | Shell | paulfantom/dotfiles | /zsh/themes/paulfantom.zsh-theme | UTF-8 | 805 | 2.75 | 3 | [
"MIT"
] | permissive | # paulfantom.zsh-theme
#
# Author: Paweł Krupa
# URL: https://github.com/paulfantom
# primary prompt: dashed separator, directory and k8s info
PS1="${FG[237]}\${(l.\$COLUMNS..-.)}%{$reset_color%}
${FG[032]}%(4~|.../%3~|%~)\$(git_prompt_info)\$(hg_prompt_info) ${FG[105]}%(!.#.»)%{$reset_color%} "
PS2="%{$fg[red]%}\ %{$reset_color%}"
# right prompt: return code, k8s and context (user@host)
RPS1="%(?..%{$fg[red]%}%? ↵%{$reset_color%})"
if (( $+functions[kube_ps1] )); then
RPS1+=' $(kube_ps1)'
fi
RPS1+=" ${FG[237]}%n@%m%{$reset_color%}"
# git settings
ZSH_THEME_GIT_PROMPT_PREFIX=" ${FG[075]}(${FG[078]}"
ZSH_THEME_GIT_PROMPT_CLEAN=""
ZSH_THEME_GIT_PROMPT_DIRTY="${FG[214]}*%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="${FG[075]})%{$reset_color%}"
# kube ps1 settings
KUBE_PS1_SYMBOL_ENABLE=false
| true |
35a5ad29310d2a73afa0a25e0680bc9b03f1fa20 | Shell | ORESoftware/r2g.docker | /.r2g/exec.sh | UTF-8 | 1,738 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env bash
set -e;
if [ ! -f package.json ]; then
echo "there is no package.json file in your PWD." >&2;
false; // since there is no package.json file, probably should abort here
fi
map="$docker_r2g_fs_map"
search_root="$docker_r2g_search_root"
shared="$docker_r2g_shared_dir";
name="$docker_r2g_package_name" # your project's package.json name field
base_image="node:$r2g_node_version"
container="docker_r2g.$name";
docker stop "$container" || echo "no container with name $container running."
docker rm "$container" || echo "no container with name $container could be removed."
tag="docker_r2g_image/$name";
export zmx_gray='\033[1;30m'
export zmx_magenta='\033[1;35m'
export zmx_cyan='\033[1;36m'
export zmx_orange='\033[1;33m'
export zmx_yellow='\033[1;33m'
export zmx_green='\033[1;32m'
export zmx_no_color='\033[0m'
zmx(){
local v1="$1"; local v2="$2"; "$@" \
2> >( while read line; do echo -e "${zmx_magenta}[${v1} ${v2}] ${zmx_no_color} $line"; done ) \
1> >( while read line; do echo -e "${zmx_gray}[${v1} ${v2}] ${zmx_no_color} $line"; done )
}
export -f zmx;
docker build \
-f Dockerfile.r2g \
-t "$tag" \
--build-arg base_image="$base_image" \
--build-arg CACHEBUST="$(date +%s)" .
#docker run \
# -v "$search_root:$shared:ro" \
# -e docker_r2g_fs_map="$map" \
# -e dkr2g_run_args=${run_args} \
# -e MY_R2G_DOCKER_SEARCH_ROOT="/dev/null" \
# --name "$container" "$tag"
docker run \
-v "$search_root:$shared:ro" \
-e docker_r2g_fs_map="$map" \
-e r2g_container_id="$container" \
--entrypoint "dkr2g" \
--name "$container" "$tag" \
run --allow-unknown "$@"
## to debug:
# docker exec -ti <container-name> /bin/bash
| true |
ab943e5f8412d16e6aa4af480f5a96b69a896ffe | Shell | raf2739/cudaStressTest | /gpu_down.sh | UTF-8 | 565 | 2.796875 | 3 | [] | no_license | #!/bin/bash
str="$(cat /proc/cpuinfo | grep k1)"
if [ "${str}" ]; then
echo "Downclocking GPU Bus frequency..."
echo 72000000 > /sys/kernel/debug/clock/override.gbus/rate
echo 1 > /sys/kernel/debug/clock/override.gbus/state
echo "Current GPU Bus frequency:"
cat /sys/kernel/debug/clock/gbus/rate
echo "Downclocking GPU memory clock..."
echo 12750000 > /sys/kernel/debug/clock/override.emc/rate
echo 1 > /sys/kernel/debug/clock/override.emc/state
echo "Current GPU Memory clock frequency:"
cat /sys/kernel/debug/clock/emc/rate
else
echo "x1 commands"
fi
| true |
9d149254501ec7ea48e5f05d670a78a8fe071282 | Shell | DashaZhernakova/misc | /fileProcessing/mergeSNPs.sh | UTF-8 | 197 | 2.59375 | 3 | [] | no_license | #!/bin/bash
path=/Users/dashazhernakova/Documents/UMCG/1000g/Chr*
out=/Users/dashazhernakova/Documents/UMCG/1000g/allSNPMappings_b37.txt
touch $out
for f in $path
do
echo $f
cat $f >> $out
done
| true |
b49634fe1d9c566d9e859be5f73e4aa96b6bcb5e | Shell | slateci/slate-client-server | /test/init_test_env.sh | UTF-8 | 1,876 | 3.75 | 4 | [
"Unlicense"
] | permissive | #!/bin/sh
TEST_SOURCE_DIR=`dirname $0`
INITIAL_DIR=`pwd`
wait_pod_ready(){
PODNAME=$1;
echo "Waiting for $PODNAME to be ready"
READY="0 -eq 1"
unset DELAY # don't sleep on first iteration
until [ "$READY" ]; do
if [ "$DELAY" ]; then
sleep $DELAY
else
DELAY=1
fi
STATUS=`kubectl get pods --namespace kube-system 2>/dev/null | grep "$PODNAME"`
COUNT=`echo "$STATUS" | awk '{print $2}'`
echo " Containers: $COUNT ("`echo "$STATUS" | awk '{print $3}'`")"
READY=`echo "$COUNT" | sed 's|\(.*\)/\(.*\)|\1 -eq \2|'`' -a '`echo "$STATUS" | awk '{print $3}'`' = Running'
done
}
# MINIKUBE_STATUS=`minikube -p 'slate-server-test-kube' status`
# if [ `echo "$MINIKUBE_STATUS" | grep -c Running` -ge 2 \
# -a `echo "$MINIKUBE_STATUS" | grep -c 'Correctly Configured'` -eq 1 ] ; then
# echo "Using running minikube instance"
# else
# echo "Starting minikube"
# touch .stop_minikube_after_tests
# minikube -p 'slate-server-test-kube' start
# fi
# wait_pod_ready "kube-apiserver-minikube"
# # these components get nasty auto-generated names, but have predictable prefixes
# wait_pod_ready "kube-proxy"
# wait_pod_ready "dns"
if [ -z "$TEST_SRC" ];
then
TEST_SRC=$TEST_SOURCE_DIR
fi
echo "Starting Dynamo server"
./slate-test-database-server &
DBSERVER="$!"
until [ -f .test_server_ready ]; do
sleep 1 # wait for server to start
done
if ps -p "${DBSERVER}" > /dev/null ; then
: # good
else
echo "DBServer failed" 1>&2
exit 1
fi
echo "Preparing local helm repository"
mkdir -p test_helm_repo
cp -a "$TEST_SRC"/test_helm_repo .
helm package "$TEST_SOURCE_DIR"/test_helm_repo/test-app -d test_helm_repo > /dev/null
helm repo index test_helm_repo
# request running the helm server
echo "Starting local helm server"
curl -s 'http://localhost:52000/helm'
helm repo update > /dev/null
helm repo add local http://localhost:8879
echo "Initialization done"
| true |
0c4826aa4ea61906f9d260647a5fc2f7d2d5d6b1 | Shell | delkyd/alfheim_linux-PKGBUILDS | /tboplayer-git/PKGBUILD | UTF-8 | 1,070 | 2.734375 | 3 | [] | no_license | # maintainer kirill malyshev <keryascorpio at gmail.com>
pkgname=tboplayer-git
pkgver=146.5eb1897
pkgrel=1
pkgdesc="Raspberry Pi. A GUI for OMXPlayer"
url="https://github.com/KenT2/tboplayer"
arch=('armv6h' 'armv7h')
depends=('omxplayer' 'python2-gobject2' 'python2-dbus' 'pygtk' 'python2-requests' 'python2-pexpect' 'python2-ptyprocess' 'tk' 'ffmpeg' 'youtube-dl')
source=(git://github.com/KenT2/tboplayer.git)
md5sums=('SKIP')
pkgver() {
cd tboplayer
echo $(git rev-list --count HEAD).$(git rev-parse --short HEAD)
}
build() {
sed -i -e '/dpkg/a \ \ \ \ \ \ \ \ if True:' \
-e '/dpkg/d' \
-e 's/\/usr\/local\//\/usr\//g' \
"${srcdir}/tboplayer/tboplayer.py"
echo "#!/bin/sh" > "${srcdir}/tboplayer.sh"
echo "python2 /usr/lib/tboplayer/tboplayer.py" >> "${srcdir}/tboplayer.sh"
}
package() {
install -Dm755 "${srcdir}/tboplayer.sh" "${pkgdir}/usr/bin/tboplayer"
install -Dm644 "${srcdir}/tboplayer/tboplayer.py" "${pkgdir}/usr/lib/tboplayer/tboplayer.py"
install -Dm644 "${srcdir}/tboplayer/yt-dl_supported_sites" "${pkgdir}/usr/lib/tboplayer/yt-dl_supported_sites"
}
| true |
bf15b6bb4df61b19cce4b55458b718b056d320e5 | Shell | emersion/mrsh | /configure | UTF-8 | 5,618 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/bin/sh -e
SOVERSION=0.0.0
pkg_config=${PKG_CONFIG:-pkg-config}
outdir=${OUTDIR:-.build}
srcdir=${SRCDIR:-$(dirname "$0")}
CC=${CC:-cc}
LIBS=
use_readline=-1
readline=readline
static=
for arg
do
case "$arg" in
--prefix=*)
PREFIX=${arg#*=}
;;
--without-readline)
use_readline=0
;;
--with-readline=*)
use_readline=1
readline=${arg#*=}
;;
--static)
static=$arg
;;
--dynamic)
static=
;;
esac
done
libmrsh() {
genrules libmrsh \
'arithm.c' \
'array.c' \
'ast_print.c' \
'ast.c' \
'buffer.c' \
'builtin/alias.c' \
'builtin/bg.c' \
'builtin/break.c' \
'builtin/builtin.c' \
'builtin/cd.c' \
'builtin/colon.c' \
'builtin/command.c' \
'builtin/dot.c' \
'builtin/eval.c' \
'builtin/exec.c' \
'builtin/exit.c' \
'builtin/export.c' \
'builtin/false.c' \
'builtin/fg.c' \
'builtin/getopts.c' \
'builtin/hash.c' \
'builtin/jobs.c' \
'builtin/pwd.c' \
'builtin/read.c' \
'builtin/return.c' \
'builtin/set.c' \
'builtin/shift.c' \
'builtin/times.c' \
'builtin/trap.c' \
'builtin/true.c' \
'builtin/type.c' \
'builtin/ulimit.c' \
'builtin/umask.c' \
'builtin/unalias.c' \
'builtin/unset.c' \
'builtin/unspecified.c' \
'builtin/wait.c' \
'getopt.c' \
'hashtable.c' \
'parser/arithm.c' \
'parser/parser.c' \
'parser/program.c' \
'parser/word.c' \
'shell/arithm.c' \
'shell/entry.c' \
'shell/job.c' \
'shell/path.c' \
'shell/process.c' \
'shell/redir.c' \
'shell/shell.c' \
'shell/task/pipeline.c' \
'shell/task/simple_command.c' \
'shell/task/task.c' \
'shell/task/word.c' \
'shell/trap.c' \
'shell/word.c'
}
mrsh() {
if [ $use_readline -eq 1 ]
then
genrules mrsh \
'main.c' \
'frontend/readline.c'
else
genrules mrsh \
'main.c' \
'frontend/basic.c'
fi
}
highlight() {
genrules highlight example/highlight.c
}
genrules() {
target="$1"
shift
printf '# Begin generated rules for %s\n' "$target"
for file in "$@"
do
file="${file%.*}"
printf '%s.o: %s.c\n' "$file" "$file"
done
printf '%s_objects=\\\n' "$target"
n=0
for file in "$@"
do
file="${file%.*}"
n=$((n+1))
if [ $n -eq $# ]
then
printf '\t%s.o\n' "$file"
else
printf '\t%s.o \\\n' "$file"
fi
done
printf '# End generated rules for %s\n' "$target"
}
append_cflags() {
for flag
do
CFLAGS="$(printf '%s \\\n\t%s' "$CFLAGS" "$flag")"
done
}
append_ldflags() {
for flag
do
LDFLAGS="$(printf '%s \\\n\t%s' "$LDFLAGS" "$flag")"
done
}
append_libs() {
for flag
do
LIBS="$(printf '%s \\\n\t%s' "$LIBS" "$flag")"
done
}
test_cflags() {
[ ! -e "$outdir"/check.c ] && cat <<-EOF > "$outdir"/check.c
int main(void) { return 0; }
EOF
werror=""
case "$CFLAGS" in
*-Werror*)
werror="-Werror"
;;
esac
if $CC $werror "$@" -o /dev/null "$outdir"/check.c >/dev/null 2>&1
then
append_cflags "$@"
else
return 1
fi
}
test_ldflags() {
[ ! -e "$outdir"/check.c ] && cat <<-EOF > "$outdir"/check.c
int main(void) { return 0; }
EOF
if $CC "$@" -o /dev/null "$outdir"/check.c >/dev/null 2>&1
then
append_ldflags "$@"
else
return 1
fi
}
mkdir -p "$outdir"
if [ -n "$static" ]
then
test_ldflags $static
fi
for flag in \
-g -std=c99 -pedantic -Werror -Wundef -Wlogical-op \
-Wmissing-include-dirs -Wold-style-definition -Wpointer-arith -Winit-self \
-Wfloat-equal -Wstrict-prototypes -Wredundant-decls \
-Wimplicit-fallthrough=2 -Wendif-labels -Wstrict-aliasing=2 -Woverflow \
-Wformat=2 -Wno-missing-braces -Wno-missing-field-initializers \
-Wno-unused-parameter -Wno-unused-result
do
printf "Checking for $flag... "
if test_cflags "$flag"
then
echo yes
else
echo no
fi
done
for flag in -fPIC -Wl,--no-undefined -Wl,--as-needed
do
test_ldflags "$flag"
done
soname=libmrsh.so.$(echo "$SOVERSION" | cut -d. -f1)
printf "Checking for specifying soname for shared lib... "
if ! \
test_ldflags -Wl,-soname,$soname || \
test_ldflags -Wl,-install_name,$soname
then
echo no
echo "Unable to specify soname (is $(uname) supported?)" >&2
exit 1
else
echo yes
fi
printf "Checking for exported symbol restrictions... "
if ! \
test_ldflags -Wl,--version-script="libmrsh.gnu.sym" || \
test_ldflags -Wl,-exported_symbols_list,"libmrsh.darwin.sym"
then
echo no
echo "Unable to specify exported symbols (is $(uname) supported?)" >&2
exit 1
else
echo yes
fi
if [ $use_readline -eq -1 ]
then
printf "Checking for readline... "
if $pkg_config readline
then
readline=readline
use_readline=1
append_cflags -DHAVE_READLINE
# TODO: check for rl_replace_line
append_cflags -DHAVE_READLINE_REPLACE_LINE
echo yes
else
echo no
fi
fi
if [ $use_readline -eq -1 ]
then
printf "Checking for libedit... "
if $pkg_config libedit
then
echo yes
readline=libedit
use_readline=1
append_cflags -DHAVE_EDITLINE
else
echo no
fi
fi
if [ $use_readline -eq 1 ]
then
append_cflags $($pkg_config $static --cflags-only-I $readline)
append_libs $($pkg_config $static --libs $readline)
fi
printf "Creating %s/config.mk... " "$outdir"
cat <<EOF > "$outdir"/config.mk
SOVERSION=$SOVERSION
CC=$CC
PREFIX=${PREFIX:-/usr/local}
_INSTDIR=\$(DESTDIR)\$(PREFIX)
BINDIR?=${BINDIR:-\$(_INSTDIR)/bin}
LIBDIR?=${LIBDIR:-\$(_INSTDIR)/lib}
INCDIR?=${INCDIR:-\$(_INSTDIR)/include}
MANDIR?=${MANDIR:-\$(_INSTDIR)/share/man}
PCDIR?=${PCDIR:-\$(_INSTDIR)/lib/pkgconfig}
CFLAGS=${CFLAGS}
LDFLAGS=${LDFLAGS}
LIBS=${LIBS}
SRCDIR=${srcdir}
all: mrsh highlight libmrsh.so.\$(SOVERSION) \$(OUTDIR)/mrsh.pc
EOF
libmrsh >>"$outdir"/config.mk
mrsh >>"$outdir"/config.mk
highlight >>"$outdir"/config.mk
echo done
touch "$outdir"/cppcache
| true |
9fa2bce82637fd087c5112f0c5b00b64c54686ea | Shell | jteran3/bash_utils | /ssh-no-passwd.sh | UTF-8 | 718 | 3.25 | 3 | [] | no_license | #!/bin/bash
# AUTOR: Julio Teran <teranj@daycohost.com>
# USO: Copiar ID_RSA para hacer ssh sin solicitud de password
REMOTE_USER=<set_remote_user>
REMOTE_IP=<set_remote_ip>
FILE1=id_rsa
FILE2=id_rsa.pub
cd ~/.ssh
if [ -f $FILE1 ] && [ -f $FILE2 ]
then
ssh $REMOTE_USER@$REMOTE_IP mkdir -p .ssh
cat id_rsa.pub | ssh $REMOTE_USER@$REMOTE_IP 'cat >> .ssh/authorized_keys'
ssh $REMOTE_USER@$REMOTE_IP "chmod 700 .ssh; chmod 640 .ssh/authorized_keys"
ssh $REMOTE_USER@$REMOTE_IP
else
ssh-keygen -t rsa
ssh $REMOTE_USER@$REMOTE_IP mkdir -p .ssh
cat id_rsa.pub | ssh $REMOTE_USER@$REMOTE_IP 'cat >> .ssh/authorized_keys'
ssh $REMOTE_USER@$REMOTE_IP "chmod 700 .ssh; chmod 640 .ssh/authorized_keys"
ssh $REMOTE_USER@$REMOTE_IP
fi
| true |
bd0657d7e22990b376f93e149516d743f0beba76 | Shell | g-i-o-/fltrlib | /scripts/setup/setup.sh | UTF-8 | 126 | 3.015625 | 3 | [] | no_license | #! /bin/sh
scriptspath=`dirname $0`
for script in `ls "$scriptspath" | awk '/^[0-9]+-/'`; do
"$scriptspath/$script"
done
| true |
d8258d453d2d566c16ce4cb49d4ffa0782e672ed | Shell | S920105123/BattleCatTimer | /test_cache/run.sh | UTF-8 | 598 | 2.609375 | 3 | [] | no_license |
for ops in vga_lcd/*.ops; do
echo '------------------------------------------------------'
echo ' '$ops
echo '------------------------------------------------------'
for exe in *.exe; do
echo '[+] '$exe
./$exe vga_lcd/vga_lcd.tau2015 vga_lcd/vga_lcd.timing $ops vga_lcd/vga_lcd.myoutput 2> result
cat result | grep -E '(search).*([0-9]+) ms'
cat result | grep -E '(kth).*([0-9]+) ms'
cat result | grep -E 'total.*' -o | tail -n 1
echo ''
rm -f result
rm -f message.log
done
done
| true |
852afe9fd96f34a93d134e29a4133216ae98783c | Shell | reidab/clowder | /test/scripts/llvm/herd.sh | UTF-8 | 1,606 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# set -xv
cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.." || exit 1
. test_utilities.sh
export project_paths=( 'llvm' \
'klee' \
'libclc' \
'llvm/projects/dragonegg' \
'llvm/projects/libunwind' \
'openmp' \
'polly' \
'poolalloc' \
'vmkit' \
'zorg' \
'lldb' \
'llvm/tools/lld' \
'llvm/projects/libcxx' \
'llvm/projects/libcxxabi' \
'lnt' \
'test-suite' )
export fork_paths=( 'llvm/tools/clang' \
'llvm/tools/clang/tools/extra' \
'llvm/projects/compiler-rt' )
print_double_separator
echo 'TEST: llvm projects example test script'
print_double_separator
cd "$LLVM_EXAMPLE_DIR" || exit 1
./clean.sh
./init.sh
test_init_herd() {
print_double_separator
echo "TEST: Normal herd after init"
$COMMAND herd $PARALLEL || exit 1
echo "TEST: Check current branches are on master"
for project in "${project_paths[@]}"; do
pushd $project || exit 1
test_branch master
test_tracking_branch_exists master
popd || exit 1
done
for project in "${fork_paths[@]}"; do
pushd $project || exit 1
test_branch master
test_tracking_branch_exists master
popd || exit 1
done
}
test_init_herd
$COMMAND status || exit 1
| true |
f9fcd5a38ad1b25d0468213f260a22523978a762 | Shell | txu2k8/storage-test | /storagetest/pkgs/pts/postmark/src/install.sh | UTF-8 | 437 | 2.625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
wget http://archive.debian.org/debian/pool/main/p/postmark/postmark_1.51.orig.tar.gz
tar -zxvf postmark_1.51.orig.tar.gz
cd postmark-1.51/
cc -O3 $CFLAGS postmark-1.51.c -o postmark
echo $? > ~/install-exit-status
cd ~/
echo "#!/bin/sh
cd postmark-1.51/
echo \"set transactions \$1
set size \$2 \$3
set number \$4
show
run
quit\" > benchmark.cfg
./postmark benchmark.cfg > ./postmark.result 2>&1" > postmark
chmod +x postmark | true |
0e5400e6befa9544d6913a2c6eacc5bf6231f2d1 | Shell | AlvinTangGit/cryptopp-pem | /pem_create_keys.sh | UTF-8 | 4,889 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env bash
# Script to create the test keys used pem_test.cxx
##################################
# prerequisites
if [[ -z "$CXX" ]]; then
CXX=g++
fi
if [[ -z $(command -v "$CXX") ]]; then
echo "Please install a compiler like g++"
exit 1
fi
if [[ -z $(command -v openssl) ]]; then
echo "Please install openssl package"
exit 1
fi
if [[ -z $(command -v perl) ]]; then
echo "Please install perl package"
exit 1
fi
##################################
# test program
echo "Compiling test program with $CXX"
rm -rf pem_test.exe &>/dev/null
CXXFLAGS="-DDEBUG -g3 -O0 -Wall"
# Build crypto++ library if out of date.
if ! CXX="$CXX" CXXFLAGS="$CXXFLAGS" make -j 4; then
echo "Failed to build libcryptopp.a"
exit 1
fi
# Build the test program
if ! $CXX $CXXFLAGS pem_test.cxx ./libcryptopp.a -o pem_test.exe; then
echo "Failed to build pem_test.exe"
exit 1
fi
##################################
# test keys
echo "Generating OpenSSL keys"
# RSA private key, public key, and encrypted private key
openssl genrsa -out rsa-priv.pem 1024
openssl rsa -in rsa-priv.pem -out rsa-pub.pem -pubout
openssl rsa -in rsa-priv.pem -out rsa-enc-priv.pem -aes128 -passout pass:abcdefghijklmnopqrstuvwxyz
# DSA private key, public key, and encrypted private key
openssl dsaparam -out dsa-params.pem 1024
openssl gendsa -out dsa-priv.pem dsa-params.pem
openssl dsa -in dsa-priv.pem -out dsa-pub.pem -pubout
openssl dsa -in dsa-priv.pem -out dsa-enc-priv.pem -aes128 -passout pass:abcdefghijklmnopqrstuvwxyz
# EC private key, public key, and encrypted private key
openssl ecparam -out ec-params.pem -name secp256k1 -genkey
openssl ec -in ec-params.pem -out ec-priv.pem
openssl ec -in ec-priv.pem -out ec-pub.pem -pubout
openssl ec -in ec-priv.pem -out ec-enc-priv.pem -aes128 -passout pass:abcdefghijklmnopqrstuvwxyz
openssl dhparam -out dh-params.pem 512
##################################
# malformed
# Only the '-----BEGIN PUBLIC KEY-----'
echo "-----BEGIN PUBLIC KEY-----" > rsa-short.pem
# Removes last CR or LF (or CRLF)
perl -pe 'chomp if eof' rsa-pub.pem > rsa-trunc-1.pem
# This gets the last CR or LF and one of the dashes (should throw)
perl -pe 'chop if eof' rsa-trunc-1.pem > rsa-trunc-2.pem
# Two keys in one file; missing CRLF between them
cat rsa-trunc-1.pem > rsa-concat.pem
cat rsa-pub.pem >> rsa-concat.pem
# Uses only CR (remove LF)
sed 's/\n//g' rsa-pub.pem > rsa-eol-cr.pem
# Uses only LF (remove CR)
sed 's/\r//g' rsa-pub.pem > rsa-eol-lf.pem
# No EOL (remove CR and LF)
sed 's/\r//g; s/\n//g' rsa-pub.pem > rsa-eol-none.pem
echo "-----BEGIN FOO-----" > foobar.pem
head -c 180 /dev/urandom | base64 -w 64 >> foobar.pem
echo "-----END BAR-----" >> foobar.pem
##################################
# Test Certificate
cat << EOF > ./example-com.conf
[ req ]
prompt = no
default_bits = 2048
default_keyfile = server-key.pem
distinguished_name = subject
req_extensions = req_ext
x509_extensions = x509_ext
string_mask = utf8only
# CA/B requires a domain name in the Common Name
[ subject ]
countryName = US
stateOrProvinceName = NY
localityName = New York
organizationName = Example, LLC
commonName = Example Company
emailAddress = support@example.com
# A real server cert usually does not need clientAuth or secureShellServer
[ x509_ext ]
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid,issuer
basicConstraints = critical,CA:FALSE
keyUsage = digitalSignature
extendedKeyUsage = serverAuth, clientAuth, secureShellServer
subjectAltName = @alternate_names
nsComment = "OpenSSL Generated Certificate"
# A real server cert usually does not need clientAuth or secureShellServer
[ req_ext ]
subjectKeyIdentifier = hash
basicConstraints = critical,CA:FALSE
keyUsage = digitalSignature
extendedKeyUsage = serverAuth, clientAuth, secureShellServer
subjectAltName = @alternate_names
nsComment = "OpenSSL Generated Certificate"
# A real server cert should not have email addresses
# CA/B forbids IP addresses
[ alternate_names ]
DNS.1 = example.com
DNS.2 = www.example.com
DNS.3 = mail.example.com
DNS.4 = ftp.example.com
IP.1 = 127.0.0.1
IP.2 = ::1
email.1 = webmaster@example.com
email.2 = ftpmaster@example.com
email.3 = hostmaster@example.com
EOF
# And create the cert
openssl req -config example-com.conf -new -x509 -sha256 -newkey rsa:2048 -nodes \
-keyout example-com.key.pem -days 365 -out example-com.cert.pem
# Convert to ASN.1/DER
openssl x509 -in example-com.cert.pem -inform PEM -out example-com.cert.der -outform DER
# View PEM cert with 'openssl x509 -in example-com.cert.pem -inform PEM -text -noout'
# View DER cert with 'dumpasn1 example-com.cert.der'
##################################
# cacert.pem
if [ ! -e "cacert.pem" ]; then
wget http://curl.haxx.se/ca/cacert.pem -O cacert.pem
fi
| true |
096e324a1257565886723b0a06bffb522dba7742 | Shell | brianrasmusson/pywebtest | /tests/t25/gen.sh | UTF-8 | 681 | 2.8125 | 3 | [] | no_license | #!/bin/bash
fileid=0
for animal in cat dog elephant; do
for vegetable in tomato cucumber radish; do
for mineral in granite diamond mercury; do
echo "$animal $vegetable $mineral" > s1/file$fileid.txt
fileid=$[ $fileid + 1 ]
echo "$animal $mineral $vegetable" > s1/file$fileid.txt
fileid=$[ $fileid + 1 ]
echo "$vegetable $animal $mineral" > s1/file$fileid.txt
fileid=$[ $fileid + 1 ]
echo "$vegetable $mineral $animal" > s1/file$fileid.txt
fileid=$[ $fileid + 1 ]
echo "$mineral $animal $vegetable" > s1/file$fileid.txt
fileid=$[ $fileid + 1 ]
echo "$mineral $vegetable $animal" > s1/file$fileid.txt
fileid=$[ $fileid + 1 ]
done
done
done
| true |
d2ff2c20e1589f8cd7f009fe83ff33ce8f4d8f1b | Shell | mxie33/CS2200_spring16 | /prj4/run_script.sh | UTF-8 | 611 | 2.796875 | 3 | [] | no_license | #!/bin/bash
echo "Default Configuration"
for i in `ls ./traces/`; do
echo ""
echo "--$i--"
./cachesim -i ./traces/$i
done;
echo ""
echo "Direct Mapped Cache"
echo "C = 10, S = 0"
for i in `ls ./traces/`; do
echo ""
echo "--$i--"
./cachesim -c 10 -s 0 -i ./traces/$i
done;
echo ""
echo "4 Way Associative"
echo "C = 12, S = 2, B = 4"
for i in `ls ./traces/`; do
echo ""
echo "--$i--"
./cachesim -c 12 -s 2 -b 4 -i ./traces/$i
done;
echo ""
echo "Fully Associative"
echo "C = 12, S = 8, B = 4"
for i in `ls ./traces/`; do
echo ""
echo "--$i--"
./cachesim -c 12 -s 8 -b 4 -i ./traces/$i
done;
| true |
7c444372559b3dce4f7bef6f631db78a046169b2 | Shell | Karthik-Venkatesh/ATOM | /env_setup.sh | UTF-8 | 1,601 | 4.1875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
function printHelp() {
echo "Usage: "
echo " env_setup.sh [<option> <option value>]"
echo " <mode> - one of 'createEnv', 'removeEnv', 'installRequirements', 'reinitializeEnv'"
echo ""
echo " - 'createEnv' - Create conda env with name 'atom' and python 3.6"
echo " - 'removeEnv' - Removes conda env 'atom'"
echo " - 'installRequirements' - Install requirements which is in requirements.txt"
echo " - 'reinitializeEnv' - Completely removes 'atom' env, creates new one and installs all requirements"
echo ""
echo "Example: $ ./env_setup.sh --mode reinitializeEnv"
}
function createEnv() {
conda create --name atom python=3.6 --no-default-packages -y
}
function removeEnv() {
conda remove -n atom --all -y
}
function activateEnv() {
source activate atom
}
function installRequirements() {
pip install -r requirements.txt
}
function reinitializeEnv() {
removeEnv
createEnv
activateEnv
installRequirements
}
function parseParam() {
while [[ "$1" != "" ]]; do
case $1 in
--mode )
MODE=$2
;;
--help )
printHelp
exit
;;
* )
exit
;;
esac
shift
shift
done
}
function execute() {
if [[ "$MODE" == "createEnv" ]]; then
createEnv
elif [[ "$MODE" == "removeEnv" ]]; then
removeEnv
elif [[ "$MODE" == "installRequirements" ]]; then
installRequirements
elif [[ "$MODE" == "reinitializeEnv" ]]; then
reinitializeEnv
else
printHelp
exit 1
fi
}
parseParam $@
execute | true |
e00314fbd244003aa613eadbf76cb369c858b4dc | Shell | kiban18/.envsetup.org | /pushapk.sh | UTF-8 | 991 | 3.34375 | 3 | [] | no_license | #!/bin/bash
DEBUG=true
#DEBUG=false
if [[ -z "$ADBHOST" || -z "$OUT" ]]; then
echo ADBHOST or OUT is not defined. exiting...
exit 2
fi
$ADBCON_SH && adb -s $ADBHOSTPORT remount
#declare -A apks
apks=`grep "Install: out.*.apk" make_mm_build.log`
#echo $apks
array="$apks"
if [[ "$DEBUG" == "true" ]]; then
echo "array:$array"
fi
while [[ -n "$array" ]]
do
apk=${array%%.apk*}
if [[ "$DEBUG" == "true" ]]; then
echo "apk=\${array%%.apk*}:$apk"
fi
apk=${apk#*$TARGET_PRODUCT}.apk
if [[ "$DEBUG" == "true" ]]; then
echo "apk=\${apk#*$TARGET_PRODUCT}.apk:$apk"
fi
echo ""
echo "Install $apk"
echo "adb -s $ADBHOSTPORT push $OUT/$apk $apk"
adb -s $ADBHOSTPORT push $OUT/$apk $apk
adb -s $ADBHOSTPORT shell "sync; sync"
ls -al $OUT/$apk
adb -s $ADBHOSTPORT shell ls -al $apk
array=${array#*.apk}
if [[ "$DEBUG" == "true" ]]; then
echo "array=\${array#*.apk}:$array"
fi
done
echo ""
exit 0
| true |
0cbadcebd2a8f0014c0e553420275a1c7b596f5e | Shell | NewForce-at-Mountwest/client-side-mastery | /c6-curriculum/projects/tier-2/state-fair/chapters/scripts/statefair-install.sh | UTF-8 | 3,270 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -u
mkdir -p "$HOME/workspace/statefair"
mkdir -p "$HOME/workspace/statefair/scripts/rides"
mkdir -p "$HOME/workspace/statefair/scripts/food"
mkdir -p "$HOME/workspace/statefair/scripts/games"
mkdir -p "$HOME/workspace/statefair/scripts/sideshows"
mkdir -p "$HOME/workspace/statefair/styles"
cd "$HOME/workspace/statefair"
echo '<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>State Fair</title>
<link rel="stylesheet" href="./styles/main.css">
</head>
<body>
<main id="state-fair">
<section class="customers"> </section>
<section class="entry"> </section>
<section class="attractions">
<div class="attraction">
<h1>Rides</h1>
<div class="people rides"></div>
</div>
<div class="attraction">
<h1>Food</h1>
<div class="people food"></div>
</div>
<div class="attraction">
<h1>Games</h1>
<div class="people games"></div>
</div>
<div class="attraction">
<h1>Sideshow</h1>
<div class="people sideshow"></div>
</div>
</section>
</main>
<script type="module" src="./scripts/main.js"></script>
</body>
</html>
' > index.html
echo '@import url("https://fonts.googleapis.com/css?family=Comfortaa|Patua+One");
/*Typography
--------------------------------------------------------------*/
body,
button,
input,
select,
textarea {
color: #404040;
font-family: "Comfortaa", Arial, sans-serif;
font-size: 14px;
line-height: 1.5;
}
h1,h2,h3,h4,h5,h6 {
font-family: "Patua One", serif;
letter-spacing: 2px;
}
.ticketBooth {
margin-bottom: 1rem;
}
.attractions {
display: flex;
flex-direction: row;
flex-wrap: nowrap;
height: 30rem;
}
.attraction {
flex: 1;
border-left: 1px dashed gray;
border-right: 1px dashed gray;
text-align: center;
}
.people {
display: flex;
flex-direction: column;
flex-wrap: wrap;
height: 25rem;
}
.person {
height: 1rem;
width: 1rem;
margin: 0.2rem 0.2rem;
}
.eater {
background-color: fuchsia;
}
.rider {
background-color: dodgerblue;
}
.bigSpender {
background-color: orange;
}
.player {
background-color: lawngreen;
}
.gawker {
background-color: firebrick;
}
.customers {
font-weight: 700;
padding: 1rem;
}
' > ./styles/main.css
echo 'const contentTarget = document.querySelector(".entry")
const eventHub = document.querySelector("#state-fair")
eventHub.addEventListener()
export const TicketBooth = () => {
contentTarget.innerHTML = `
<div class="ticketBooth">
Add button here
</div>
`
}
' > ./scripts/TicketBooth.js
echo 'const contentTarget = document.querySelector(".rides")
const eventHub = document.querySelector("#state-fair")
export const RideTicketHolders = () => {
eventHub.addEventListener("", customEvent => {
})
}
' > ./scripts/rides/RideTicketHolders.js
echo '// Import and invoke the ticket booth component function
' > ./scripts/main.js
| true |
7b1e3f13f7a80024881bd7a8ce34bdec38400444 | Shell | arakaki-tokyo/rpi | /l_tika/sw_row.sh | UTF-8 | 922 | 3.609375 | 4 | [] | no_license | #!/bin/bash
GPIO_DIR=/sys/class/gpio
TARGET_PIN=${1}
TARGET_DIRECTION=${GPIO_DIR}/gpio${TARGET_PIN}/direction
TARGET_VALUE=${GPIO_DIR}/gpio${TARGET_PIN}/value
SW_PIN=${2}
SW_DIRECTION=${GPIO_DIR}/gpio${SW_PIN}/direction
SW_VALUE=${GPIO_DIR}/gpio${SW_PIN}/value
preProc() {
echo ${TARGET_PIN} >${GPIO_DIR}/export
echo ${SW_PIN} >${GPIO_DIR}/export
sleep 0.1
sudo echo out >${TARGET_DIRECTION}
}
postProc() {
echo ${TARGET_PIN} >${GPIO_DIR}/unexport
echo ${SW_PIN} >${GPIO_DIR}/unexport
exit
}
getSW() {
return $(cat ${SW_VALUE})
}
tgrSW() {
sw=$((sw ^ 1))
}
tgrTarget() {
curTargetValue=$(cat ${TARGET_VALUE})
echo $((curTargetValue ^ 1)) >${TARGET_VALUE}
}
main() {
preProc
trap "postProc" 2
getSW
sw=$?
echo "sw is ${sw}"
while true; do
sleep 0.1
getSW
curSW=$?
if [ ${sw} != ${curSW} ]; then
tgrSW
echo "sw is ${sw}"
if [ ${sw} == 0 ]; then
tgrTarget
fi
fi
done
}
main
| true |
454a34af51707c8b414bfdc0893255a6bc58195e | Shell | koenoe/dotfiles | /brew.sh | UTF-8 | 1,357 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env bash
# Install command-line tools using Homebrew.
# Ask for the administrator password upfront.
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Make sure we’re using the latest Homebrew.
brew update
# Extra taps
brew tap
brew tap homebrew/core
brew tap homebrew/services
brew install coreutils
brew install findutils
brew install moreutils
brew install fontconfig
brew install git
brew install git-extras
brew install python
brew install speedtest-cli
brew install ssh-copy-id
brew install zsh
brew install fnm
# Casks
brew install --cask 1password
brew install --cask alfred
brew install --cask bartender
brew install --cask cocoarestclient
brew install --cask daisydisk
brew install --cask discord
brew install --cask dropbox
brew install --cask google-chrome
brew install --cask iterm2-beta
brew install --cask mounty
brew install --cask mpg
brew install --cask philips-hue-sync
brew install --cask skype
brew install --cask slack-beta
brew install --cask spectacle
brew install --cask spotify
brew install --cask the-unarchiver
brew install --cask tidal
brew install --cask tower
brew install --cask visual-studio-code
brew install --cask whatsapp
# Remove outdated versions from the cellar.
brew cleanup
| true |
61740e38c32867da4ef4e0ad4e952e5c1c834afc | Shell | jtpunt/c-and-c-plus-plus | /Parallel Programming/project5/script | UTF-8 | 318 | 2.8125 | 3 | [] | no_license | #!/bin/bash
# test # indicates the function we are testing
for test in 1 2 3 4
do
# number of subdivisions
for s in 1024 32768 1048576 32000000
do
echo "Testing with test $test, ARRAYSIZE = $s."
g++ -DARRAYSIZE=$s -DTEST=$test -DNUMTRIES=5 simd.p5.cpp -o project5 -lm -fopenmp
./project5
echo
done
done
| true |
69c0a83e083699b06c845b5616758c85087d8346 | Shell | trbhoang/writerviet-monitoring-stack | /setup/server_init_harden.sh | UTF-8 | 5,784 | 3.28125 | 3 | [] | no_license | #!/bin/bash
#########################################################
# Remove amazon ssm agent which might become a backdoor
# Create sys admin user
# Secure ssh
# Set timezone to UTC
# Install & configure sendmail
# Install & configure CSF
#########################################################
# load config vars
source .env.sh
pwd=$(pwd)
# # remove amazon-ssm-agent
# snap remove amazon-ssm-agent
# # remove never-used services: snapd,...
# # ref: https://peteris.rocks/blog/htop/
sudo apt-get remove snapd -y --purge
sudo apt-get remove mdadm -y --purge
sudo apt-get remove policykit-1 -y --purge
sudo apt-get remove open-iscsi -y --purge
sudo systemctl stop getty@tty1
# remove git
sudo apt-get remove git -y --purge
sudo apt-get remove tmux -y --purge
sudo apt-get remove telnet -y --purge
sudo apt-get remove git-man -y --purge
sudo apt-get autoremove
# Fix environment
echo 'LC_ALL="en_US.UTF-8"' >> /etc/environment
echo 'LC_CTYPE="en_US.UTF-8"' >> /etc/environment
# Install essential packages
apt-get dist-upgrade ; apt-get -y update ; apt-get -y upgrade
apt-get -y --no-install-recommends install unattended-upgrades \
apt-transport-https \
ca-certificates \
software-properties-common \
gnupg \
curl \
htop
# apache2-utils
# Install security updates automatically
echo -e "APT::Periodic::Update-Package-Lists \"1\";\nAPT::Periodic::Unattended-Upgrade \"1\";\nUnattended-Upgrade::Automatic-Reboot \"false\";\n" > /etc/apt/apt.conf.d/20auto-upgrades
/etc/init.d/unattended-upgrades restart
# Change the timezone
echo $TIMEZONE > /etc/timezone
dpkg-reconfigure -f noninteractive tzdata
# Change hostname
hostnamectl set-hostname $HOST_NAME
sed -i "1i 127.0.1.1 $HOST_DNS $HOST_NAME" /etc/hosts
# Disable ipv6
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
sudo sysctl -p
# Create admin user
adduser --disabled-password --gecos "Admin" $SYSADMIN_USER
# Setup admin password
echo $SYSADMIN_USER:$SYSADMIN_PASSWD | chpasswd
# Allow sudo for sys admin user
echo "$SYSADMIN_USER ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
# Setup SSH keys
mkdir -p /home/$SYSADMIN_USER/.ssh/
echo $KEY > /home/$SYSADMIN_USER/.ssh/authorized_keys
chmod 700 /home/$SYSADMIN_USER/.ssh/
chmod 600 /home/$SYSADMIN_USER/.ssh/authorized_keys
chown -R $SYSADMIN_USER:$SYSADMIN_USER /home/$SYSADMIN_USER/.ssh
# Disable password login for this user
echo "PasswordAuthentication no" | tee --append /etc/ssh/sshd_config
echo "PermitEmptyPasswords no" | tee --append /etc/ssh/sshd_config
echo "PermitRootLogin no" | tee --append /etc/ssh/sshd_config
echo "Protocol 2" | tee --append /etc/ssh/sshd_config
# Have only 1m to successfully login
echo "LoginGraceTime 1m" | tee --append /etc/ssh/sshd_config
if [ $APP_ENV == 'production' ]
then
# Only allow specific user to login
echo "AllowUsers $SYSADMIN_USER" | tee --append /etc/ssh/sshd_config
# configure idle timeout interval (10 mins)
echo "ClientAliveInterval 600" | tee --append /etc/ssh/sshd_config
echo "ClientAliveCountMax 3" | tee --append /etc/ssh/sshd_config
fi
# disable port forwarding (yes: to support connecting from localhost)
echo "AllowTcpForwarding yes" | tee --append /etc/ssh/sshd_config
echo "X11Forwarding no" | tee --append /etc/ssh/sshd_config
echo "UseDNS no" | tee --append /etc/ssh/sshd_config
# Reload SSH changes
systemctl reload sshd
# Install & configure sendmail
apt-get -y install sendmail
sed -i "/MAILER_DEFINITIONS/ a FEATURE(\`authinfo', \`hash -o /etc/mail/authinfo/smtp-auth.db\')dnl" /etc/mail/sendmail.mc
sed -i "/MAILER_DEFINITIONS/ a define(\`confAUTH_MECHANISMS', \`EXTERNAL GSSAPI DIGEST-MD5 CRAM-MD5 LOGIN PLAIN\')dnl" /etc/mail/sendmail.mc
sed -i "/MAILER_DEFINITIONS/ a TRUST_AUTH_MECH(\`EXTERNAL DIGEST-MD5 CRAM-MD5 LOGIN PLAIN')dnl" /etc/mail/sendmail.mc
sed -i "/MAILER_DEFINITIONS/ a define(\`confAUTH_OPTIONS', \`A p')dnl" /etc/mail/sendmail.mc
sed -i "/MAILER_DEFINITIONS/ a define(\`ESMTP_MAILER_ARGS', \`TCP \$h 587')dnl" /etc/mail/sendmail.mc
sed -i "/MAILER_DEFINITIONS/ a define(\`RELAY_MAILER_ARGS', \`TCP \$h 587')dnl" /etc/mail/sendmail.mc
sed -i "/MAILER_DEFINITIONS/ a define(\`SMART_HOST', \`[email-smtp.us-east-1.amazonaws.com]')dnl" /etc/mail/sendmail.mc
mkdir /etc/mail/authinfo
chmod 750 /etc/mail/authinfo
cd /etc/mail/authinfo
echo "AuthInfo: \"U:root\" \"I:$SMTP_USER\" \"P:$SMTP_PASS\"" > smtp-auth
chmod 600 smtp-auth
makemap hash smtp-auth < smtp-auth
make -C /etc/mail
systemctl restart sendmail
echo "Subject: sendmail test" | sendmail -v $SYSADMIN_EMAIL
#
# Install Docker
#
sudo apt-get remove docker docker-engine docker.io
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
sudo apt-get install -y --no-install-recommends docker-ce
# https://www.digitalocean.com/community/questions/how-to-fix-docker-got-permission-denied-while-trying-to-connect-to-the-docker-daemon-socket
# switch to user SYSADMIN_USER ??? su $SYSADMIN_USER
sudo groupadd docker
sudo usermod -aG docker $USER $SYSADMIN_USER # may need to logout and login again
docker run hello-world
# Install docker-compose
sudo wget "https://github.com/docker/compose/releases/download/1.25.5/docker-compose-$(uname -s)-$(uname -m)" -O /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
docker-compose --version
#
# Install Fail2ban
#
cd $pwd
sudo apt-get -y install fail2ban
sudo cp ./fail2ban/jail.local /etc/fail2ban/jail.local
sudo systemctl restart fail2ban
| true |
135ba2156e0aa56702601ceda1d6e77c1ba411bd | Shell | PremiereGlobal/mkdocs-generator | /dockerBuild.sh | UTF-8 | 1,222 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
GIT_TAG=$(git describe --exact-match --tags HEAD 2>/dev/null)
VERSION="unknown"
echo "Got tag:\"${GIT_TAG}\""
if [ -z $GIT_TAG ]; then
GIT_BRANCH=$(git branch | grep \* | cut -d ' ' -f2)
echo "Got branch:\"${GIT_BRANCH}\""
if [ "$GIT_BRANCH" == "master" ]; then
VERSION="latest"
fi
if [ "$TRAVIS_BRANCH" == "master" ] ; then
VERSION="latest"
fi
else
VERSION=$GIT_TAG
fi
set -e
echo "-------------------------"
echo "Building mkdocs-generator"
echo "-------------------------"
docker run --rm -e VERSION=${VERSION} -e GO111MODULE=on -e HOME=/tmp -u $(id -u ${USER}):$(id -g ${USER}) -v "$PWD":/go/mkdocs-generator -w /go/mkdocs-generator golang:1.15 \
./build.sh
echo ""
echo "---------------------"
echo "Building mkdocs-generator Container version: ${VERSION}"
echo "---------------------"
DTAG="premiereglobal/mkdocs-generator:${VERSION}"
docker build . -t ${DTAG}
echo "---------------------"
echo "Created Tag ${DTAG}"
echo "---------------------"
if [[ ${TRAVIS} && "${VERSION}" != "unknown" && -n $DOCKER_USERNAME && -n $DOCKER_PASSWORD ]]; then
echo "Pushing docker image: ${DTAG}"
docker login -u="$DOCKER_USERNAME" -p="$DOCKER_PASSWORD"
docker push ${DTAG}
fi
| true |
90ce413770aba655b04fe3c83e16dfc9e0d45c6a | Shell | SURFnet/honas | /fuzz/bin/run_input_fuzz.sh | UTF-8 | 684 | 3.78125 | 4 | [] | permissive | #!/bin/sh
set -e
SCRIPT_DIR="$(cd "$(dirname "$0")"; pwd)"
cd "$SCRIPT_DIR/../.."
INPUT_MODULE="$1"
if [ -z "$INPUT_MODULE" ]; then
echo "Usage: $(basename "$0") <input-module-name>" >&2
exit 1
elif [ ! -d "fuzz/input/$INPUT_MODULE/testcases" ]; then
echo "Unknown input module '$INPUT_MODULE' or no initial test cases prepared!" >&2
exit 1
fi
shift
mkdir -p "fuzz/input/$INPUT_MODULE/findings"
# Make sure 'input_fuzz' program is build and up to date
[ -d fuzz/build ] || CC=afl-gcc meson fuzz/build
ninja -C fuzz/build input_fuzz
exec afl-fuzz -i "fuzz/input/$INPUT_MODULE/testcases/" -o "fuzz/input/$INPUT_MODULE/findings/" "$@" -- "fuzz/build/input_fuzz" "$INPUT_MODULE"
| true |
98ca806fdc175f57193a2b8df17ab479e878cd70 | Shell | indraoli429/Shell-Scripting | /systems/Country | UTF-8 | 694 | 3.546875 | 4 | [] | no_license | #!/bin/bash
#Indra Bahadur Oli
WorldcupCountry(){
echo -e "\nTop 5 Worldcup played country"
echo -e "Country name\t\tCountry code"
echo -e "Japan \t\t\tJPN"
echo -e "Argentina \t\tARG"
echo -e "Germany \t\tGER"
echo -e "France \t\tFAR"
echo -e "Brazil \t\tBRZ"
chooseCountry
}
chooseCountry(){
countryCode="FAR"
echo "Try to guess the best country name!"
until [ "$country" = "$countryCode" ]
do
echo -e "your guess: \c"
read country
if [[ "$country" = "$countryCode" ]]
then
break
else
echo "Sorry!! Wrong input"
fi
done
echo -e "Congratulation!! you choosed France\n"
country=""
source Players #Player is a file
playerList
}
| true |
90238ff09d575d059e7caa53df24ed90d8946da7 | Shell | dabreegster/mnemonicrl | /emotionrl/demorl | UTF-8 | 429 | 3.21875 | 3 | [] | no_license | #!/bin/sh
perl emotionrl $@ 2> log
if test -s log; then
echo "Some bugs occured while you were playing EmotionRL. The log is
(obviously) stored in the 'log.' Please post this to rgrd or send to me
somehow so I can fix them. If the game abruptly ended, I apologize; it was a
slightly more serious bug. Copy the log to a safe location before playing
again; your next session will overwrite the log."
else
rm -f log
fi
| true |
d835d930a5fd306c57763e43b662932c81b35778 | Shell | andrewbharrisiv/vagrant-linux-chef | /vagrant/shell/prepare-provisioner.sh | UTF-8 | 728 | 3.25 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# Prepare box for provisioning with chef solo
# Add rvm shell integration
source /usr/local/rvm/scripts/rvm
# copy chef file to temp folder
cp /vagrant/vagrant/chef/Cheffile /tmp/vagrant-chef-resources/Cheffile
# change directory to temp folder
pushd /tmp/vagrant-chef-resources
# Check for librarian-chef
if hash librarian-chef 2>/dev/null; then
# update chef dependent cookbooks
echo 'Running librarian-chef update'
librarian-chef update
else
echo 'Installing librarian-chef...'
gem install librarian-chef --no-rdoc --no-ri
# install chef dependent cookbooks
echo 'Running librarian-chef install'
librarian-chef install --clean
fi
# return to previous directory
popd | true |
7a1624b3ea6def113fe04155ac439bcf2564bcdc | Shell | lenisha/pcf-pipelines | /tasks/upload-product-and-stemcell/task.sh | UTF-8 | 1,634 | 3.453125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash -e
pivnet=`ls tool-pivnet-cli/pivnet-linux-* 2>/dev/null`
echo "chmoding $pivnet"
chmod +x $pivnet
echo "Checking for needed stemcell in metadata"
ls ./pivnet-product
STEMCELL_VERSION=`cat ./pivnet-product/metadata.json | jq --raw-output '.Dependencies[] | select(.Release.Product.Name | contains("Stemcells")) | .Release.Version'`
echo "Stemcell version is $STEMCELL_VERSION"
if [ -n "$STEMCELL_VERSION" ]; then
echo "Downloading diagnostic report"
diagnostic_report=$(
om-linux \
--target https://$OPS_MGR_HOST \
--username $OPS_MGR_USR \
--password $OPS_MGR_PWD \
--skip-ssl-validation \
curl --silent --path "/api/v0/diagnostic_report"
)
stemcell=$(
echo $diagnostic_report |
jq \
--arg version "$STEMCELL_VERSION" \
--arg glob "$IAAS" \
'.stemcells[] | select(contains($version) and contains($glob))'
)
if [[ -z "$stemcell" ]]; then
echo "Downloading stemcell $STEMCELL_VERSION"
$pivnet -k login --api-token="$PIVNET_API_TOKEN"
$pivnet -k download-product-files -p stemcells -r $STEMCELL_VERSION -g "*${IAAS}*" --accept-eula
SC_FILE_PATH=`find ./ -name *.tgz`
if [ ! -f "$SC_FILE_PATH" ]; then
echo "Stemcell file not found!"
exit 1
fi
om-linux -t https://$OPS_MGR_HOST -u $OPS_MGR_USR -p $OPS_MGR_PWD -k upload-stemcell -s $SC_FILE_PATH
echo "Removing downloaded stemcell $STEMCELL_VERSION"
rm $SC_FILE_PATH
fi
fi
echo "Uploading product"
FILE_PATH=`find ./pivnet-product -name *.pivotal`
om-linux -t https://$OPS_MGR_HOST -u $OPS_MGR_USR -p $OPS_MGR_PWD -k upload-product -p $FILE_PATH
| true |
f68951fc0aa9a7c9ba127ca1c7d99023949eb897 | Shell | GUIEEN/digginWithErr | /memos/bash/mkdirByParam.sh | UTF-8 | 428 | 3.828125 | 4 | [] | no_license | # shell-format
# shift+option+f
directory_name=$1
thisWillMakeDir() {
local variable="$1"
if [ -d $variable ]; then
echo "Directory( $variable ) already exists"
else
mkdir $variable
fi
}
thisWillMakeDir "$directory_name"
# ---------------------------------
# NOTE
# ---------------------------------
# mkdir -p $directory_name
# -p = --parents
# (no error if existing, make parent directories as needed)
| true |
191d741d40a53b1f289c83183ee90cfc33baa03b | Shell | ericstiles/alexa-list-maker | /bin/archive.sh | UTF-8 | 370 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#move to root project directory
cd $(dirname $0)/..
BASE_DIR=`pwd`
echo $BASE_DIR
ARCHIVE_DIR=$BASE_DIR/archive
echo $ARCHIVE_DIR
SRC_DIR=$BASE_DIR/src
echo $SRC_DIR
FILENAME=archive.$(date +"%m.%d.%Y.%H.%M").zip
echo $FILENAME
cd $SRC_DIR
zip -r $ARCHIVE_DIR/$FILENAME ./index.js ./AlexaSkill.js ./package.json ./node_modules/underscore
ls $ARCHIVE_DIR
| true |
3b4681e7ecc7246f6cd49b43064b74ef9bf9d0c7 | Shell | openopentw/ssh-csie | /sftp-csie.sh | UTF-8 | 710 | 3.515625 | 4 | [] | no_license | #!/bin/bash
cmd=''
id=$(cat ~/.ssh-csie.conf)
if [[ $# -eq 1 ]]; then
# specific workstation
first_letter=$(echo $1 | cut -c 1)
if [ $first_letter == 'b' ] || [ $first_letter == 'o' ] || [ $first_letter == 'l' ]; then
# specific bsd or oasis or linux
cmd="sftp $id@$1.csie.ntu.edu.tw"
else
# linux
cmd="sftp $id@linux$1.csie.ntu.edu.tw"
fi
else
# auto search for the best workstation
echo 'Searching for the best workstation ...'
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
machines=$(python3 ${DIR}/sort_server.py)
echo The best three workstations: $(echo "$machines" | head -n 3)
choose=$(echo "$machines" | head -n 1)
cmd="sftp $id@linux$choose.csie.ntu.edu.tw"
fi
echo $cmd
$cmd
| true |
33b39d918f20bffc20f27b13baf0e6b5a7803d60 | Shell | deinspanjer/sqitch_multiproject_deps_example | /setup_sqitch.sh | UTF-8 | 3,188 | 3.765625 | 4 | [
"Unlicense"
] | permissive | #!/usr/bin/env bash
shopt -s expand_aliases
_pause() {
read -rsp $'Press any key to continue...\n' -n1 key
}
_print_cmds() {
case $1 in
on) set -v ;;
off) set +v ;;
esac
}
alias print_cmds='{ _print_cmds $(cat); } 2>/dev/null <<<'
echo "Did you 'source make_sqitch_alias.sh'? That gives you the alias to run sqitch inside docker if you need it."
source make_sqitch_alias.sh
mkdir -p src
cd src
echo
echo "Clearing out old files from any previous runs..."
rm -rf .git *
echo "Trying to clear out any old data from the Docker pg instance..."
DOCKER_PG_PORT=$(docker port db 5432)
psql -h localhost -p ${DOCKER_PG_PORT#*:} -U postgres postgres -c 'drop database proj2'
psql -h localhost -p ${DOCKER_PG_PORT#*:} -U postgres postgres -c 'drop schema sqitch cascade;'
git init .
touch README.md
git add .
git commit -am 'Init test project'
_pause
echo
echo "Creating two projects..."
print_cmds on
sqitch init proj1 --engine pg --plan-file proj1.plan --target db:pg://postgres@db/postgres
sqitch init proj2 --engine pg --plan-file proj2.plan --target db:pg://postgres@db/proj2
print_cmds off
_pause
echo
echo "For some reason, even passing the --target argument doesn't actually put the targets in sqitch.conf, so let's add them."
print_cmds on
sqitch target
sqitch target add proj1 db:pg://postgres@db/postgres
sqitch target add proj2 db:pg://postgres@db/proj2
sqitch target
print_cmds off
_pause
echo
echo "Now let's add a change to create the proj2 database"
print_cmds on
sqitch add create_proj2_db proj1 -n "Create proj2 db"
sed -i '' -Ee "/BEGIN|COMMIT/d;s/.*XXX.*/CREATE DATABASE proj2;/" deploy/create_proj2_db.sql
sed -i '' -Ee "/BEGIN|COMMIT/d;s/.*XXX.*/DROP DATABASE proj2;/" revert/create_proj2_db.sql
print_cmds off
_pause
echo
echo "And deploy it..."
print_cmds on
sqitch deploy
print_cmds off
_pause
echo
echo "Here are the current plans:"
print_cmds on
sqitch --plan-file proj1.plan plan
sqitch status proj1
echo
sqitch --plan-file proj2.plan plan
sqitch status proj2
print_cmds off
_pause
echo
echo "Now, let's create the second change in proj2 which is dependent on the first."
echo "Note when I try to use the command shown in the sqitch-add.pod#Examples: sqitch add --change x proj2 ... it doesn't work."
echo "Either it adds the change to the default plan, or if I say proj2.plan, it says unknown argument."
echo "So, we'll do it this way instead."
_pause
print_cmds on
sqitch --plan-file proj2.plan add create_a_table -r 'proj1:create_proj2_db' -n "Create proj2 table"
sed -i '' -e "s/.*XXX.*/CREATE TABLE proj2_test_table;/" deploy/create_a_table.sql
sed -i '' -e "s/.*XXX.*/DROP TABLE proj2_test_table;/" revert/create_a_table.sql
print_cmds off
_pause
echo
echo "Here are the current plans:"
print_cmds on
sqitch --plan-file proj1.plan plan
sqitch status proj1
echo
sqitch --plan-file proj2.plan plan
sqitch status proj2
print_cmds off
_pause
echo
echo "And try to deploy it..."
print_cmds on
sqitch deploy proj2
print_cmds off
echo
echo "So, at the moment, it looks like this feature can't be used to order execution of changes across projects, it seems to just be a way to bring a change from another project into this one?"
| true |
433e7ef33bd05bd47688cd02cc76ce5b1583ba40 | Shell | Nhung121/elevation-analysis | /setup.sh | UTF-8 | 536 | 3.734375 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
virtual_env_name=".$(basename $(pwd))"
which python3
if [ $? -ne 0 ]; then
echo "INFO: Installing python 3"
brew install python3
fi
if [ ! -d $virtual_env_name ]; then
echo "INFO: Creating virtual env folder in current directory"
python3 -m venv $virtual_env_name
echo ${virtual_env_name} >> .gitignore
fi
echo "Activating virtual environment"
source $virtual_env_name/bin/activate
pip3 install -r requirements.txt
python3 -m ipykernel install --user --name=${virtual_env_name}
echo "Done!" | true |
62ad4115f7b652c949d7155bbcb641d4248e9bba | Shell | quinnjn/dotfiles | /systems/tully/config/polybar/amixer | UTF-8 | 578 | 3.84375 | 4 | [] | no_license | #!/bin/bash
ICON_VOLUME_ON=""
ICON_VOLUME_OFF=""
function speaker_toggle_mute() {
amixer --quiet set PCM toggle
}
function speaker_get_mute() {
is_muted=`awk -F"[][]" '{ print $6 }' <(amixer get PCM | tail -n 1)`
if [ "$is_muted" == "on" ]; then
echo -n "$ICON_VOLUME_ON "
else
echo -n "$ICON_VOLUME_OFF "
fi
}
function speaker_get_volume() {
speaker_get_mute
amixer get PCM | tail -n 1 | awk -F"[][]" '{ print $2 }'
}
case $1 in
volume-get)
speaker_get_volume
;;
volume-toggle-mute)
speaker_toggle_mute
;;
*)
;;
esac
| true |
310130c2c69c2420c9e00877f3d0dff4f2e45704 | Shell | mattleibow/oxyplot-xamarin-component | /create-component.sh | UTF-8 | 955 | 3.78125 | 4 | [] | no_license | #!/bin/sh
# Folders
OUTPUT=../out
cd src
# The package version to create the component for
VERSION=$1
# create the component version number
IFS='-' read -a split_version <<< "$VERSION"
NUGET_VERSION=$VERSION
COMPONENT_VERSION=${split_version[0]}
# get the remote icons
echo "Copying icons"
if [ ! -d "icons" ]; then
mkdir -v icons
fi
curl -L "https://raw.githubusercontent.com/oxyplot/oxyplot/develop/Icons/OxyPlot_128.png" -o icons/OxyPlot_128x128.png
curl -L "https://raw.githubusercontent.com/oxyplot/oxyplot/develop/Icons/OxyPlot_512.png" -o icons/OxyPlot_512x512.png
# set component.yaml version numbers
sed -i -bak "s/NUGET_VERSION/$NUGET_VERSION/g" component.yaml
sed -i -bak "s/COMPONENT_VERSION/$COMPONENT_VERSION/g" component.yaml
# package component
echo "Creating Xamarin Component"
xamarin-component package
# move component to out location
echo "Move old files"
if [ ! -d "$OUTPUT" ]; then
mkdir -v $OUTPUT
fi
mv *.xam $OUTPUT
cd .. | true |
da9778ba34234d412aba5f9d86bed06d2130ffd1 | Shell | jelmerp/genomics | /newhybrids/newhybrids_run.sh | UTF-8 | 1,787 | 3.75 | 4 | [] | no_license | #!/bin/bash
set -e
set -o pipefail
set -u
################################################################################
#### SET-UP ####
################################################################################
## Software/scripts:
NEWHYBRIDS=/datacommons/yoderlab/programs/newhybrids/newhybrids-no-gui-linux.exe
## Positional args:
INFILE=$1
OUTDIR=$2
BURNIN=$3
NSWEEPS=$4
[[ -z $BURNIN ]] && BURNIN=10000 #10k=default
[[ -z $NSWEEPS ]] && NSWEEPS=50000 #50k=default
## Report:
echo -e "\n################################################################################"
date
echo "##### newhybrids_run.sh: Starting script."
echo "##### newhybrids_run.sh: Input file: $INFILE"
echo "##### newhybrids_run.sh: Output dir: $OUTDIR"
echo "##### newhybrids_run.sh: Number of burn-in sweeps: $BURNIN"
echo "##### newhybrids_run.sh: Number of sweeps after burn-in: $NSWEEPS"
printf "\n"
## Process args:
[[ ! -d $OUTDIR ]] && echo -e "##### newhybrids_run: Creating output dir $OUTDIR \n" && mkdir -p $OUTDIR
cd $OUTDIR
################################################################################
#### RUN NEWHYBRIDS ####
################################################################################
echo "##### newhybrids_run.sh: Starting newhybrids run..."
$NEWHYBRIDS -d $INFILE --burn-in $BURNIN --num-sweeps $NSWEEPS --no-gui
################################################################################
#### REPORT ####
################################################################################
echo -e "\n################################################################################"
echo "##### newhybrids_run.sh: Listing output files in output dir $OUTDIR:"
ls -lh
printf "\n"
echo "##### newhybrids_run.sh: Done with script."
date
printf "\n" | true |
1acded9c8dc1bd18144d24833dd16571095900b5 | Shell | stephenmsachs/fv3gfs | /sorc/fv3gfs.fd/tests/detect_machine.sh | UTF-8 | 4,712 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
export ACCNR=${ACCNR:-nems}
case $(hostname -f) in
g10a1.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=1;; ### gyre 1
g10a2.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=1;; ### gyre 2
g14a1.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=1;; ### gyre 3
g14a2.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=1;; ### gyre 4
t10a1.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=1;; ### tide 1
t10a2.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=1;; ### tide 2
t14a1.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=1;; ### tide 3
t14a2.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=1;; ### tide 4
g20a1.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### gyre phase2
g20a2.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### gyre phase2
g20a3.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### gyre phase2
g21a1.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### gyre phase2
g21a2.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### gyre phase2
g21a3.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### gyre phase2
t20a1.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### tide phase2
t20a2.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### tide phase2
t20a3.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### tide phase2
t21a1.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### tide phase2
t21a2.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### tide phase2
t21a3.ncep.noaa.gov) MACHINE_ID=wcoss ; export pex=2;; ### tide phase2
llogin1) MACHINE_ID=wcoss_cray ;; ### luna
llogin2) MACHINE_ID=wcoss_cray ;; ### luna
llogin3) MACHINE_ID=wcoss_cray ;; ### luna
slogin1) MACHINE_ID=wcoss_cray ;; ### surge
slogin2) MACHINE_ID=wcoss_cray ;; ### surge
slogin3) MACHINE_ID=wcoss_cray ;; ### surge
gaea9) MACHINE_ID=gaea ;; ### gaea9
gaea10) MACHINE_ID=gaea ;; ### gaea10
gaea11) MACHINE_ID=gaea ;; ### gaea11
gaea12) MACHINE_ID=gaea ;; ### gaea12
gaea13) MACHINE_ID=gaea ;; ### gaea13
gaea14) MACHINE_ID=gaea ;; ### gaea14
gaea15) MACHINE_ID=gaea ;; ### gaea15
gaea16) MACHINE_ID=gaea ;; ### gaea16
tfe01) MACHINE_ID=theia ;; ### theia01
tfe02) MACHINE_ID=theia ;; ### theia02
tfe03) MACHINE_ID=theia ;; ### theia03
tfe04) MACHINE_ID=theia ;; ### theia04
tfe05) MACHINE_ID=theia ;; ### theia05
tfe06) MACHINE_ID=theia ;; ### theia06
tfe07) MACHINE_ID=theia ;; ### theia07
tfe08) MACHINE_ID=theia ;; ### theia08
tfe09) MACHINE_ID=theia ;; ### theia09
tfe10) MACHINE_ID=theia ;; ### theia10
yslogin1) MACHINE_ID=yellowstone ;;
yslogin2) MACHINE_ID=yellowstone ;;
yslogin3) MACHINE_ID=yellowstone ;;
yslogin4) MACHINE_ID=yellowstone ;;
yslogin5) MACHINE_ID=yellowstone ;;
yslogin6) MACHINE_ID=yellowstone ;;
yslogin7) MACHINE_ID=yellowstone ;;
yslogin8) MACHINE_ID=yellowstone ;;
yslogin9) MACHINE_ID=yellowstone ;;
yslogin10) MACHINE_ID=yellowstone ;;
esac
echo "Machine: " $MACHINE_ID " Account: " $ACCNR
# --- for Theia, find available account ID
#if [[ $1"" != "machineonly" ]] ; then
if [ ${MACHINE_ID} = theia ]; then
AP=account_params # Account info
if [ ${ACCNR:-null} = null ]; then
ac=`$AP 2>&1 | grep '^\s*Allocation: [0-9]' | awk '$4>100{print $3}'| head -1`
nr=`echo $ac|wc -w`
if [ $nr -eq 1 ]; then
ACCNR=$ac
echo "Found a valid account: using $ac"
else
ac=`$AP 2>&1 | grep '^\s*Allocation: [0-9]' | awk '{print $3}'| head -1`
nr=`echo $ac|wc -w`
if [ $nr -eq 1 ]; then
ACCNR=$ac
echo "Could not an find account with positive balance: using $ac"
echo "NOTE: Will run in windfall; longer wait times, be patient!"
else
echo "Check your account ID; No compute allocations found"
fi
fi
else
cphr=`$AP 2>&1 | grep '^\s*Allocation: [0-9]' | grep $ACCNR | awk '{print $4}'`
nr=`echo $cphr|wc -w`
if [ $nr -eq 0 ]; then
echo 'Wrong account choice: ' $ACCNR
else
echo "Account: " $ACCNR", available: " $cphr " CPU hrs"
fi
fi
fi
#fi
| true |
1f717d1adb1c00bf4a85fc86df524a099c3c5e68 | Shell | s0la/dotfiles_backup | /dotfiles/bar_tiny/song_progress.sh | UTF-8 | 544 | 3.421875 | 3 | [] | no_license | #!/bin/bash
percentage=0
[[ $1 ]] && progression_step=$1 || progression_step=4
current_progress=$(mpc | sed -n '/%/ s/.*(\([0-9]*\).*/\1/p')
num_value=`/usr/bin/printf "%.0f" $(bc <<< "scale=1; $current_progress / $progression_step")`
draw() {
for p in $(seq $2); do
((percentage += progression_step))
eval $1+=\"%{A:mpc seek $percentage%:}\$3%{A}\"
done
}
draw 'elapsed' $num_value "━"
draw 'remaining' $(((100 / progression_step) - num_value)) '━'
echo "%{F#c0c0c0}${elapsed}%{F#a2a2a2}${remaining}%{F#b2b2b2}" #A3B837 B3CC33
| true |
4b81563c4551b1e117fdc2d6708fe6a456257969 | Shell | phairow/linuxsetup | /.bash_aliases | UTF-8 | 843 | 2.65625 | 3 | [] | no_license | alias xterm='xterm -fbx -ls &'
export HISTCONTROL=ignoredups
export HISTCONTROL=ignoreboth
export HISTSIZE=10240
export HISTTIMEFORMAT='%F %T'
PS1="\[\033[0;31m\]\u\[\033[0;37m\]@\[\033[0;31m\]\h \[\033[0;36m\]\w\[\033[0;37m\] :\[\033[0m\]"
#make sure directory colors work
if [ -x /usr/bin/dircolors ]; then
eval "`dircolors -b`"
alias ls='ls --color=auto'
fi
export CLICOLOR=1
export LSCOLORS=ExFxCxDxBxegedabagacad
export EDITOR=vim
set -o vi
set editing-mode vi
set keymap vi
set convert-meta on
# ^p check for partial match in history
bind -m vi-insert "\C-p":dynamic-complete-history
# ^n cycle through the list of partial matches
bind -m vi-insert "\C-n":menu-complete
# ^l clear screen
bind -m vi-insert "\C-l":clear-screen
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
| true |
f8a9671165608b7e2b0ed07d6074863a8f9d639a | Shell | nitingautam/node-hapi-react-redux-sass-typescript-mongo-webpack-hmr-gulp | /script/setup.sh | UTF-8 | 14,849 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
set -e
IMPORT_PATH="${BASH_SOURCE%/*}"
if [[ ! -d "$IMPORT_PATH" ]]; then IMPORT_PATH="$PWD"; fi
source "$IMPORT_PATH/helpers.sh"
# Save script's current directory
DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#cd "${DIR}"
#########################################################################################
# Settings/Formats
DATE=`date +%m-%d-%Y" "%r`
FORMAT_WIDTH=105
WINDOW_WIDTH=$(tput cols)
#########################################################################################
# Software version targets
TARGET_NODE_VERSION=4.2.0
TARGET_NPM_VERSION=3.3.0
#########################################################################################
# Freshen Up
clear
#########################################################################################
## each separate version number must be less than 3 digit wide !
function version { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }'; }
#########################################################################################
# Pretty Feedback Functions ✔✔✔✔✔✔✔
# Print ✘
function echo_fail {
printf "\e[31m✘ ${1}"
echo "\033[0m"
}
# Print ✔
function echo_pass {
printf "\e[32m✔ ${1}"
echo "\033[0m"
}
# echo pass or fail
# example
# echo echo_if 1 "Passed"
# echo echo_if 0 "Failed"
function echo_if {
if [ $1 == 1 ]; then
echo_pass $2
else
echo_fail $2
fi
}
# Terminal output formatting
function echo_cause {
printf "\e[1;37m${1}"
}
function echo_effect {
printf "\e[32m${1}"
}
function echo_warn {
printf "\e[1;33m${1}"
}
function echo_success {
printf "\e[1;32m${1}"
}
function echo_clear {
echo "\033[0m${1}"
}
#########################################################################################
# Capture versions
NODE_VERSION="$(node --version | sed 's/[^0-9.]*//g')"
NPM_VERSION="$(npm --version | sed 's/[^0-9.]*//g')"
#########################################################################################
#
# WELCOME!
echo "\n$(echo_cause)"
if test $WINDOW_WIDTH -gt $FORMAT_WIDTH
then
echo "██████╗ ██████╗ ██████╗ ██╗███████╗ ██████╗████████╗ ███████╗███████╗████████╗██╗ ██╗██████╗ ";
echo "██╔══██╗██╔══██╗██╔═══██╗ ██║██╔════╝██╔════╝╚══██╔══╝ ██╔════╝██╔════╝╚══██╔══╝██║ ██║██╔══██╗";
echo "██████╔╝██████╔╝██║ ██║ ██║█████╗ ██║ ██║ ███████╗█████╗ ██║ ██║ ██║██████╔╝";
echo "██╔═══╝ ██╔══██╗██║ ██║██ ██║██╔══╝ ██║ ██║ ╚════██║██╔══╝ ██║ ██║ ██║██╔═══╝ ";
echo "██║ ██║ ██║╚██████╔╝╚█████╔╝███████╗╚██████╗ ██║ ███████║███████╗ ██║ ╚██████╔╝██║ ";
echo "╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚════╝ ╚══════╝ ╚═════╝ ╚═╝ ╚══════╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ";
else
echo "███████╗███████╗████████╗██╗ ██╗██████╗ ";
echo "██╔════╝██╔════╝╚══██╔══╝██║ ██║██╔══██╗";
echo "███████╗█████╗ ██║ ██║ ██║██████╔╝";
echo "╚════██║██╔══╝ ██║ ██║ ██║██╔═══╝ ";
echo "███████║███████╗ ██║ ╚██████╔╝██║ ";
echo "╚══════╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ";
fi
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' \█
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' \═
echo "Starting installation tool ${DATE}$(echo_clear)"
echo "Be sure to have Mongo running in another process during this installation$(echo_clear)"
#########################################################################################
#
# Check NVM globally if not already installed.
# echo "\n$(echo_cause)Checking NVM path$(echo_clear)"
# if [ ! -f ~/.nvm/nvm.sh ]; then
# echo "$(echo_warn)WARNING: NVM not found. $(echo_if 0)$(echo_clear)"
# echo "$(echo_warn)Please run:$(echo_clear)"
# echo "curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.29.0/install.sh | bash"
# echo "$(echo_warn)Then in ~/.bash_profile add:$(echo_clear)"
# echo 'export NVM_DIR="$HOME/.nvm"\n[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm"\n\n'
# exit 0
# else
# . ~/.nvm/nvm.sh
# NVM_VERSION="$(nvm --version | sed 's/[^0-9.]*//g')"
# echo "$(echo_effect)NVM version ${NVM_VERSION} found $(echo_if 1)$(echo_clear)"
# fi
# NOT WORKING????????
# Check NVM globally if not already installed.
#echo "\n$(echo_cause)Checking for NVM$(echo_clear)"
#if [ ! -x "$(nvm)" ]; then
# echo "$(echo_warn)WARNING: Check NVM version if issues persist$(echo_clear)"
# #run "${RUN_SUDO}npm install -g karma-cli" &
# #KARMA_PID=$!
# #wait $KARMA_PID
# #echo "$(echo_effect)Karma installed globally $(echo_if 1)$(echo_clear)"
#else
# echo "$(echo_effect)NVM found $(echo_if 1)$(echo_clear)"
#fi
#########################################################################################
#
# Prompt for sudo
echo ""
read -p "Run global installs with sudo? " -n 1 -r
echo # (optional) move to a new line
if [[ $REPLY =~ ^[Yy]$ ]]
then
RUN_SUDO="sudo "
else
RUN_SUDO=""
fi
#########################################################################################
#
# Check if Node is installed and at the right version
echo "\n$(echo_cause)Checking for Node version ${TARGET_NODE_VERSION} or greater$(echo_clear)"
if [ "$(version "$TARGET_NODE_VERSION")" -gt "$(version "$NODE_VERSION")" ]; then
echo "$(echo_warn)Node version does NOT meet requirements $(echo_if 0)$(echo_clear)"
echo "Please install nvm and use node ${TARGET_NODE_VERSION} or greater$(echo_clear)"
exit 0
else
echo "$(echo_effect)Node version ${NODE_VERSION} meets requirements $(echo_if 1)$(echo_clear)"
fi
#########################################################################################
#
# Check if NPM is installed and at the right version
echo "\n$(echo_cause)Checking for NPM version ${TARGET_NPM_VERSION} or greater$(echo_clear)"
if [ "$(version "$TARGET_NPM_VERSION")" -gt "$(version "$NPM_VERSION")" ]; then
echo "$(echo_warn)NPM version does NOT meet requirements $(echo_if 0)$(echo_clear)"
echo "Please install nvm and use node ${TARGET_NODE_VERSION} or greater$(echo_clear)"
exit 0
else
echo "$(echo_effect)NPM version ${NPM_VERSION} meets requirements $(echo_if 1)$(echo_clear)"
fi
#########################################################################################
#
# Install Nodemon globally if not already installed.
echo "\n$(echo_cause)Checking for Nodemon$(echo_clear)"
if [ ! -x "$(command -v nodemon)" ]; then
echo "$(echo_warn)WARNING: nodemon command not found, installing globally. $(echo_if 0)$(echo_clear)"
run "${RUN_SUDO}npm install -g nodemon" &
NODEMON_PID=$!
wait $NODEMON_PID
echo "$(echo_effect)Nodemon installed globally $(echo_if 1)$(echo_clear)"
else
echo "$(echo_effect)Nodemon found $(echo_if 1)$(echo_clear)"
fi
#########################################################################################
#
# Install Karma globally if not already installed.
echo "\n$(echo_cause)Checking for Karma testing CLI$(echo_clear)"
if [ ! -x "$(command -v karma)" ]; then
echo "$(echo_warn)WARNING: karma-cli command not found, installing globally. $(echo_if 0)$(echo_clear)"
run "${RUN_SUDO}npm install -g karma-cli" &
KARMA_PID=$!
wait $KARMA_PID
echo "$(echo_effect)Karma installed globally $(echo_if 1)$(echo_clear)"
else
echo "$(echo_effect)karma-cli found $(echo_if 1)$(echo_clear)"
fi
#########################################################################################
#
# Install Typescript globally if not already installed.
echo "\n$(echo_cause)Checking for Typescript$(echo_clear)"
if [ ! -x "$(command -v tsc)" ]; then
echo "$(echo_warn)WARNING: tsc command not found, installing globally. $(echo_if 0)$(echo_clear)"
run "${RUN_SUDO}npm install -g typescript" &
TSC_PID=$!
wait $TSC_PID
echo "$(echo_effect)TSD installed globally $(echo_if 1)$(echo_clear)"
else
echo "$(echo_effect)tsc found $(echo_if 1)$(echo_clear)"
fi
#########################################################################################
#
# Install Typescript Definition Utility globally if not already installed.
echo "\n$(echo_cause)Checking for Typescript Definitions CLI$(echo_clear)"
if [ ! -x "$(command -v tsd)" ]; then
echo "$(echo_warn)WARNING: tsd command not found, installing globally. $(echo_if 0)$(echo_clear)"
run "${RUN_SUDO}npm install -g tsd" &
TSD_PID=$!
wait $TSD_PID
echo "$(echo_effect)Typescript installed globally $(echo_if 1)$(echo_clear)"
else
echo "$(echo_effect)tsd found $(echo_if 1)$(echo_clear)"
fi
#########################################################################################
#
# Install NCU Definition Utility globally if not already installed.
echo "\n$(echo_cause)Checking for NCU package utility for NPM$(echo_clear)"
if [ ! -x "$(command -v ncu)" ]; then
echo "$(echo_warn)WARNING: ncu command not found, installing globally. $(echo_if 0)$(echo_clear)"
run "${RUN_SUDO}npm install -g npm-check-updates" &
NCU_PID=$!
wait $NCU_PID
echo "$(echo_effect)NCU installed globally $(echo_if 1)$(echo_clear)"
else
echo "$(echo_effect)ncu found $(echo_if 1)$(echo_clear)"
fi
#########################################################################################
#
# NPM Core install
echo "\n$(echo_cause)Starting NPM install and setup$(echo_clear)"
run 'npm config set registry http://registry.npmjs.org/'
echo "$(echo_effect)NPM registry config complete $(echo_if 1)$(echo_clear)"
# NPM Clean dependencies
run "npm prune"
echo "$(echo_effect)NPM prune complete $(echo_if 1)$(echo_clear)"
# NPM Install
run "npm install"
echo "$(echo_effect)NPM install complete $(echo_if 1)$(echo_clear)"
# NPM Link
read -p "Run NPM link command? (Not recommended for Yosemite/El Capitan users or if you are experiencing permission problems) " -n 1 -r
#echo # (optional) move to a new line
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo ""
run "npm link" &
NPM_LINK_PID=$!
wait $NPM_LINK_PID
echo "$(echo_effect)NPM link complete $(echo_if 1)$(echo_clear)"
else
echo "\n$(echo_warn)Run 'npm link' if you have errors when running locally$(echo_clear)"
fi
echo "$(echo_effect)NPM install and setup complete $(echo_if 1)$(echo_clear)"
#########################################################################################
# Stop MongoDB
#
npm run 'mongo-stop'
###############################################################################
# Setup MongoDB
###############################################################################
echo "$(echo_effect)Setup MongoDB $(echo_if 1)$(echo_clear)"
run 'npm run mongo-setup' &
MONGO_PID=$!
wait $MONGO_PID
#########################################################################################
# Start MongoDB
#
echo "$(echo_effect)Starting MongoDB $(echo_if 1)$(echo_clear)"
run 'npm run mongo-start' &
MONGO_PID=$!
wait $MONGO_PID
#########################################################################################
# Config Setup using Promptly in ./setup.js
#
echo "\n$(echo_cause)Starting config setup$(echo_clear)"
if [ ! -f ./config.js ]; then
echo "$(echo_effect)Config not present. Running config setup.$(echo_clear)"
# start ./setup.js
run "npm run setup-config"
# config setup complete
echo "$(echo_effect)Config setup complete $(echo_if 1)$(echo_clear)"
else
read -p "Config exists, would you like to run the config setup? " -n 1 -r
echo # (optional) move to a new line
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo ""
# start ./setup.js
run "npm run setup-config"
# config setup complete
echo "$(echo_effect)Config setup complete $(echo_if 1)$(echo_clear)"
else
echo "$(echo_success)SUCCESS $(echo_if 1)$(echo_if 1)$(echo_if 1)$(echo_if 1): NPM Setup complete!$(echo_clear)"
fi
fi
#########################################################################################
# Run Local instance?
#
echo "\n$(echo_cause)Finalizing project setup$(echo_clear)"
echo "\n$(echo_cause)You may stop Mongo now$(echo_clear)"
# read -p "Would you like to start the local instance? " -n 1 -r
# echo # (optional) move to a new line
# if [[ $REPLY =~ ^[Yy]$ ]]
# then
# clear
# DELTADATE=`date +%m-%d-%Y" "%r`
# echo "\n$(echo_cause)"
# printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' \*
# echo "$(echo_success)SUCCESS $(echo_if 1)$(echo_if 1)$(echo_if 1)$(echo_if 1): NPM Setup complete!$(echo_clear)\n\n"
# echo "$(echo_effect)Installation started ${DATE} and finished ${DELTADATE} $(echo_if 1)$(echo_clear)"
# echo "$(echo_cause)Starting local instance...$(echo_clear)\n\n"
# run "npm run watch"
# else
# clear
# DELTADATE=`date +%m-%d-%Y" "%r`
# echo "\n$(echo_cause)"
# printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' \*
# echo "$(echo_success)SUCCESS $(echo_if 1)$(echo_if 1)$(echo_if 1)$(echo_if 1): NPM Setup complete!$(echo_clear)\n\n"
# echo "$(echo_effect)Installation started ${DATE} and finished ${DELTADATE} $(echo_if 1)$(echo_clear)"
# fi
DELTADATE=`date +%m-%d-%Y" "%r`
echo "\n$(echo_cause)"
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' \*
echo "$(echo_success)SUCCESS $(echo_if 1)$(echo_if 1)$(echo_if 1)$(echo_if 1): NPM Setup complete!$(echo_clear)\n\n"
echo "$(echo_effect)Installation started ${DATE} and finished ${DELTADATE} $(echo_if 1)$(echo_clear)"
echo "$(echo_effect)Start the project locally by running npm run watch$(echo_clear)" | true |
5d1f76a92cbb51074a589a4cc0402eefc07a24e9 | Shell | postmodern/ruby-install | /test/system-tests/detect_package_manager_test.sh | UTF-8 | 2,442 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
. ./test/helper.sh
. ./share/ruby-install/system.sh
function test_detect_package_manager_on_redhat_based_systems_with_dnf()
{
[[ -f /etc/redhat-release ]] && command -v dnf >/dev/null || return 0
detect_package_manager
assertEquals "did not prefer dnf over yum" "dnf" "$package_manager"
}
function test_detect_package_manager_on_redhat_based_systems_with_yum()
{
[[ -f /etc/redhat-release ]] &&
! command -v dnf >/dev/null &&
command -v yum >/dev/null || return 0
detect_package_manager
assertEquals "did not fallback to yum" "yum" "$package_manager"
}
function test_detect_package_manager_on_debian_based_systems_with_apt()
{
[[ -f /etc/debian_version ]] && command -v apt >/dev/null || \
return 0
detect_package_manager
assertEquals "did not detect apt" "apt" "$package_manager"
}
function test_detect_package_manager_on_open_suse_systems_with_zypper()
{
[[ -f /etc/SuSE-release ]] && command -v zypper >/dev/null || return 0
detect_package_manager
assertEquals "did not detect zypper" "zypper" "$package_manager"
}
function test_detect_package_manager_on_void_systems_with_xbps()
{
command -v lsb_release >/dev/null &&
command -v xbps-install >/dev/null || return 0
assertEquals "did not detect xbps-install" "xbps" "$package_manager"
}
function test_detect_package_manager_on_bsd_systems_with_pkg()
{
[[ "$os_platform" == *BSD ]] && command -v pkg >/dev/null || return 0
detect_package_manager
assertEquals "did not detect pkg" "pkg" "$package_manager"
}
function test_detect_package_manager_on_macos_systems_with_homebrew()
{
[[ "$os_platform" == *Darwin ]] && command -v brew >/dev/null || \
return 0
detect_package_manager
assertEquals "did not prefer brew over port" "brew" "$package_manager"
}
function test_detect_package_manager_on_macos_systems_with_macports()
{
[[ "$os_platform" == *Darwin ]] &&
! command -v brew >/dev/null &&
command -v port >/dev/null || return 0
detect_package_manager
assertEquals "did not fallback to macports" "port" "$package_manager"
}
function test_detect_package_manager_when_RUBY_INSTALL_PKG_MANAGER_is_set()
{
RUBY_INSTALL_PKG_MANAGER="custom"
detect_package_manager
assertEquals "did not set package_manager to $$RUBY_INSTALL_PKG_MANAGER" \
"$RUBY_INSTALL_PKG_MANAGER" "$package_manager"
unset RUBY_INSTALL_PKG_MANAGER
}
function tearDown()
{
unset package_manager
}
SHUNIT_PARENT=$0 . $SHUNIT2
| true |
0a0cb5523519daf91ad03a114fb891879fc48882 | Shell | BabbleSim/ext_2G4_channel_Indoorv1 | /test/test2.sh | UTF-8 | 1,265 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# Copyright 2018 Oticon A/S
# SPDX-License-Identifier: Apache-2.0
# This test requires the results to be inspected manually
SIMULATION_ID="silly"
VERBOSITY_LEVEL=4
PROCESS_IDS=""; EXIT_CODE=0
function Execute(){
if [ ! -f $1 ]; then
echo -e " \e[91m`pwd`/`basename $1` cannot be found (did you forget to\
compile it?)\e[39m"
exit 1
fi
timeout 5 $@ & PROCESS_IDS="$PROCESS_IDS $!"
}
BSIM_OUT_PATH="${BSIM_OUT_PATH:-../../../}"
cd ${BSIM_OUT_PATH}/bin
Execute ./bs_device_2G4_playback \
-v=${VERBOSITY_LEVEL} -s=${SIMULATION_ID} -d=0 -inputf=../components/ext_2G4_channel_Indoorv1/test/data2/0
Execute ./bs_device_2G4_playback \
-v=${VERBOSITY_LEVEL} -s=${SIMULATION_ID} -d=1 -inputf=../components/ext_2G4_channel_Indoorv1/test/data2/1
Execute ./bs_device_2G4_playback \
-v=${VERBOSITY_LEVEL} -s=${SIMULATION_ID} -d=2 -inputf=../components/ext_2G4_channel_Indoorv1/test/data2/2
Execute ./bs_2G4_phy_v1 -v=${VERBOSITY_LEVEL} -s=${SIMULATION_ID} -channel=Indoorv1 \
-argschannel -at=30 -dist=../components/ext_2G4_channel_Indoorv1/test/data2/silly.matrix -atextra=10 -argsmain \
-D=3 -sim_length=20e6 $@
for PROCESS_ID in $PROCESS_IDS; do
wait $PROCESS_ID || let "EXIT_CODE=$?"
done
exit $EXIT_CODE #the last exit code != 0
| true |
d63025759ce54e080b1601e24b4ccb8a3847482f | Shell | PaddlePaddle/PaddleTest | /models/PaddleClas_restruct/scripts/shell/predict.sh | UTF-8 | 4,790 | 2.6875 | 3 | [] | no_license | # 输入变量:yaml、设置卡数CPU/SET_CUDA/SET_MULTI_CUDA 、 trained/pretrained
export yaml_line=${1:-ppcls/configs/ImageNet/ResNet/ResNet50.yaml}
export cuda_type=${2:-SET_MULTI_CUDA}
export input_model_type=${3:-pretrained}
export Project_path=${Project_path:-$PWD}
cd ${Project_path} #确定下执行路径
\cp -r -f ${Project_path}/../scripts/shell/prepare.sh .
# #通过相对路径找到 scripts 的路径,需要想一个更好的方法替代
source prepare.sh
\cp -r -f ${Project_path}/../scripts/shell/choose_model.sh .
export predict_step=True
# source choose_model.sh
# 因为训练不足导致预测BN算子报错,直接使用预训练模型 根因是epoch数不能小于5
if [[ ${model_name} == "PULC-language_classification-PPLCNet_x1_0" ]] \
|| [[ ${model_name} == "PULC-language_classification-MobileNetV3_small_x0_35" ]] \
|| [[ ${model_name} =~ "PULC-textline_orientation" ]] \
|| [[ ${model_name} =~ "PULC-textline_orientation" ]];then
input_model_type_tmp=${input_model_type}
export input_model_type=pretrained
source choose_model.sh
export input_model_type=${input_model_type_tmp}
else
source choose_model.sh
fi
size_tmp=`cat ${yaml_line} |grep image_shape|cut -d "," -f2|cut -d " " -f2`
#获取train的shape保持和predict一致
cd deploy
sed -i 's/size: 224/size: '${size_tmp}'/g' configs/inference_cls.yaml #修改predict尺寸
sed -i 's/resize_short: 256/resize_short: '${size_tmp}'/g' configs/inference_cls.yaml
echo model_type
echo ${model_type}
case ${model_type} in
ImageNet|slim|DeepHash)
if [[ ${yaml_line} =~ 'ultra' ]];then
python python/predict_cls.py -c configs/inference_cls_ch4.yaml \
-o Global.infer_imgs="./images" \
-o Global.batch_size=4 -o Global.inference_model_dir=${pretrained_model} \
-o Global.use_gpu=${set_cuda_flag} \
> ../${log_path}/predict/${model_name}_${input_model_type}.log 2>&1
else
python python/predict_cls.py -c configs/inference_cls.yaml \
-o Global.infer_imgs="./images" \
-o Global.batch_size=4 \
-o Global.inference_model_dir=${pretrained_model} \
-o Global.use_gpu=${set_cuda_flag} \
> ../${log_path}/predict/${model_name}_${input_model_type}.log 2>&1
fi
;;
GeneralRecognition)
python python/predict_system.py -c configs/inference_general.yaml \
-o Global.use_gpu=${set_cuda_flag} \
> ../${log_path}/predict/${model_name}_${input_model_type}.log 2>&1
;;
Cartoonface)
python python/predict_system.py -c configs/inference_cartoon.yaml \
-o Global.use_gpu=${set_cuda_flag} \
> ../${log_path}/predict/${model_name}_${input_model_type}.log 2>&1
;;
Logo)
python python/predict_system.py -c configs/inference_logo.yaml \
-o Global.use_gpu=${set_cuda_flag} \
> ../${log_path}/predict/${model_name}_${input_model_type}.log 2>&1
;;
Products)
python python/predict_system.py -c configs/inference_product.yaml \
-o Global.use_gpu=${set_cuda_flag} \
> ../${log_path}/predict/${model_name}_${input_model_type}.log 2>&1
;;
Vehicle)
python python/predict_system.py -c configs/inference_vehicle.yaml \
-o Global.use_gpu=${set_cuda_flag} \
> ../${log_path}/predict/${model_name}_${input_model_type}.log 2>&1
;;
PULC)
# 9中方向用 model_type_PULC 区分
python python/predict_cls.py -c configs/PULC/${model_type_PULC}/inference_${model_type_PULC}.yaml \
-o Global.inference_model_dir=${pretrained_model} \
-o Global.use_gpu=${set_cuda_flag} \
> ../${log_path}/predict/${model_name}_${input_model_type}.log 2>&1
;;
reid|metric_learning)
echo "predict unspported ${model_name}" > ../${log_path}/predict/${model_name}_${input_model_type}.log
;;
esac
# if [[ $? -eq 0 ]] \
# && [[ $(grep -c "Error" ../${log_path}/predict/${model_name}_${input_model_type}.log) -eq 0 ]];then
if [[ $? -eq 0 ]];then
# cat ../${log_path}/predict/${model_name}_${input_model_type}.log
echo -e "\033[33m successfully! predict of ${model_name}_${input_model_type} successfully!\033[0m" \
| tee -a ../${log_path}/result.log
echo "predict_exit_code: 0.0" >> ../${log_path}/predict/${model_name}_${input_model_type}.log
else
cat ../${log_path}/predict/${model_name}_${input_model_type}.log
echo -e "\033[31m failed! predict of ${model_name}_${input_model_type} failed!\033[0m" \
| tee -a ../${log_path}/result.log
echo "predict_exit_code: 1.0" >> ../${log_path}/predict/${model_name}_${input_model_type}.log
fi
sed -i 's/size: '${size_tmp}'/size: 224/g' configs/inference_cls.yaml #改回predict尺寸
sed -i 's/resize_short: '${size_tmp}'/resize_short: 256/g' configs/inference_cls.yaml
cd ..
| true |
8c6bce513b2950150c06e6463a8b3985df51579a | Shell | cabralfilho/VPSHM | /scripts/mysql | UTF-8 | 882 | 3.34375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo ""
echo "VPSHM - Virtual Private Server Host Manager Installer Shell"
echo ""
echo "Checking MYSQL"
echo "Adding MYSQL YUM repo"
cat <<EOF | sudo tee -a /etc/yum.repos.d/MariaDB.repo
# MariaDB 10.2 CentOS repository list - created 2018-02-26 19:23 UTC
# http://downloads.mariadb.org/mariadb/repositories/
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.2/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1
EOF
echo "Preparing to install MYSQL"
yum install MariaDB-server MariaDB-client -y
echo "Done."
echo "Start the MariaDB service and make it auto-start on system boot"
systemctl start mariadb.service
systemctl enable mariadb.service
echo "Secure the installation of MariaDB"
echo "You can answer /Y-RandomPass-RandomPass-Y-Y-Y-Y/ to the following prompts"
/usr/bin/mysql_secure_installation
mysql -V
echo "All done"
| true |
4b0947d2a2971d34af101b1b20b9d198a8e2fd39 | Shell | Onfroygmx/dotbench | /.dot/zsh/modules/aliases/aliases.zsh | UTF-8 | 1,478 | 3.0625 | 3 | [] | no_license | ####!/usr/bin/env zsh
#!/bin/zsh
# _ _
# __ _| (_) __ _ ___ ___ ___
# / _` | | |/ _` / __|/ _ \/ __|
# | (_| | | | (_| \__ \ __/\__ \
# \__,_|_|_|\__,_|___/\___||___/
#
## LS
alias ls='ls --color=auto --group-directories-first -F'
alias ll='ls --time-style=+"%d.%m.%Y %H:%M" -l'
alias la='ll -avh'
## base operation with security
alias cp='cp -iv' # Confirm before overwriting something
alias mv='mv -iv' # Confirm before overwriting something
alias rm='rm -iv' # Confirm before deleting anything
alias chmod='chmod --preserve-root -v'
alias chown='chown --preserve-root -v'
#copy and download
alias pcp='rsync -r --progress' # Copy with progress bar and speed
alias pch='rsync -ah --progress' # Copy with progress bar and speed
alias wget='wget --continue --progress=bar --timestamping'
alias curl='curl --continue-at - --location --progress-bar --remote-name --remote-time'
# Lists the ten most used commands.
alias history-stat="history 0 | awk '{print \$2}' | sort | uniq -c | sort -n -r | head"
#alias history-statc="fc -l 1 | awk '{ CMD[$2]++; count++; } END { for (a in CMD) print CMD[a] " " CMD[a]*100/count "% " a }' | grep -v "./" | sort -nr | head -20 | column -c3 -s " " -t | nl"
# Home bare repository for git/tree operations
alias dot='git --git-dir=$XDG_CONFIG_HOME/.dotgit/ --work-tree=$HOME'
alias treedot='tree -ahC -L 4 --dirsfirst -I .dotgit'
alias treedotclean='treedot -I completions\|.dotgit\|zpmod'
| true |
a711bc0041a01b1d1944300ae4548e9014dea56a | Shell | projectbuendia/buendia | /packages/buendia-networking/data/usr/bin/buendia-update-hosts | UTF-8 | 1,750 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2015 The Project Buendia Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distrib-
# uted under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# specific language governing permissions and limitations under the License.
tmp=/tmp/hosts.$$
trap 'rm -f $tmp' EXIT
# Remove previously added hostnames.
grep -v 'buendia-update-hosts' /etc/hosts > $tmp
# Ensure localhost is in the file.
if ! grep -q localhost /etc/hosts; then
echo '127.0.0.1 localhost' >> $tmp
fi
# Get the list of names by which this machine wants to be known.
unset CLICOLOR
unset CLICOLOR_FORCE
unset LSCOLORS
unset LS_COLORS
names="buendia buendia.local"
for continent in africa antarctica asia europe north-america oceania south-america; do
names="$names $continent.pool.ntp.org"
done
for number in 0 1 2 3; do
names="$names $number.pool.ntp.org"
done
# Add a line for each of the machine's IP addresses.
# TODO this matching pattern apparently isn't quite right
for ip in $(ip a | grep 'inet ' | cut -d' ' -f6 | cut -d/ -f1 | grep -v '^127\.'); do
echo "$ip $names # added by buendia-update-hosts" >> $tmp
done
# Install the new hosts file, if it has changed.
if ! diff -q $tmp /etc/hosts >/dev/null; then
mv $tmp /etc/hosts
# Tell dnsmasq to reload the hosts file, if it's running.
if [ -e /var/run/dnsmasq/dnsmasq.pid ]; then
service dnsmasq restart
fi
fi
| true |
043ea7376b7d0d4002f48e4aa3e53f97e1f186fb | Shell | shiro-saber/Compiladores | /ProyectoFinal/codeGeneration/massive2.sh | UTF-8 | 365 | 2.734375 | 3 | [] | no_license | mcs -out:a.exe *.cs
shopt -s nullglob
for i in *.int64; do
echo -e "\033[1;32m corriendo para"
echo $i
echo -e "\033[93m"
mono a.exe $i > /dev/null
if [[ $? == 0 ]]; then
echo "success"
elif [[ $? > 0 ]]; then
echo "fail"
fi
echo -e "\033[0m ----------------------------------------------"
done
echo -e "\033[0m termine"
| true |
7efafbe6616a8e4afea7d2564c105165f281b51e | Shell | Phill93/rpi_project | /install.sh | UTF-8 | 849 | 2.90625 | 3 | [] | no_license | #!/bin/bash
BASEDIR=$(realpath -L $(dirname "$0"))
echo "dtoverlay=dht11,gpiopin=4" >> /boot/config.txt
apt-get update
apt-get install apache2 mysql-server php5-mysql php5 libapache2-mod-php5 php5-mysqlnd php5-mcrypt -y
service apache2 restart
apt-get install python3 python3-pip -y
pip3 install pymysql
cp ${BASEDIR}/tools/weather.cnf /etc/mysql/conf.d/
systemctl restart mysql
echo ""
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo " Please enter MySQL root password!"
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo ""
mysql -u root -p < ${BASEDIR}/tools/weather.sql
echo "*/1 * * * * root python3 ${BASEDIR}/tools/read_sensor.py" > /etc/cron.d/weather
ln -s ${BASEDIR}/weather /var/www/html/weather
ln -s ${BASEDIR}/stuff /var/www/html/stuff
echo "Rebooting in 5 seconds"
sleep 5
reboot
| true |
9efd4ff79ee2f3c5ee81052cce2d60f7e4a16602 | Shell | Dicksoz/Bio722IndProject | /runfastp.sh | UTF-8 | 878 | 3.515625 | 4 | [] | no_license | #!/bin/bash
if [ $@ -eq 0 ]; then
echo "Usage: ./runfastp.sh ForwardReads.fq.gz Num_Threads [Adapters.fasta]"
echo "Note: Forward/reverse is assumed to be denoted with _R1_/_R2_";
echo "Note: Fastp doesn't use more than 16 threads";
fi
fwd="$1";
rev="${fwd/_R1_/_R2_}";
sample=$(basename "$fwd" .fastq.gz);
sample="${sample%_R1*}"
nThreads=$2
adapters=$3
if [ -z $nThreads ]; then nThreads=2; fi;
if [ $nThreads -gt 16 ]; then nThreads=16; fi;
if [ -z $adapters ]; then
adapters="--detect_adapter_for_pe";
else
adapters="--adapter_fasta $adapters";
fi
fastp -i "$fwd" -I "$rev" -o "${sample}_1U.fq.gz" -O "${sample}_2U.fq.gz" \
$adapters --n_base_limit 0 --length_required 30 \
--merge --merged_out "${sample}_M.fq.gz" --correction --trim_tail1 1 \
--html "${sample}.html" --json /dev/null -R $sample \
--thread $nThreads 2> "${sample}.log"
| true |
3bbc81f7c75691fe2aefc0f1055ddef2c6756750 | Shell | nk-gears/terraform | /lts-stack/providers/aws/lts/lts-infradev-us1/freeradius-user-data.txt | UTF-8 | 1,227 | 2.953125 | 3 | [] | no_license | #!/bin/bash
# setup hostname
hostname "lts-infradev"
echo "lts-infradev" > /etc/hostname
sed -i "s/127.0.0.1 localhost/127.0.0.1 lts-infradev/" /etc/hosts
# install NTP
apt-get -y update
apt-get -y install ntp
# install FreeRadius
apt-get -y update
apt-get -y install build-essential libpam0g-dev freeradius git libqrencode3 -y
# Install google authenticator
apt-get -y install libpam-google-authenticator
# configure freeradius
sed -i "s/user = .*/user = root/" /etc/freeradius/radiusd.conf
sed -i "s/group = .*/group = root/" /etc/freeradius/radiusd.conf
sed -i '50i DEFAULT Auth-Type := PAM' /etc/freeradius/users
sed -ir "s/#\s*pam/\tpam/" /etc/freeradius/sites-enabled/default
sed -i 's/^\([^#]\)/#\1/g' /etc/pam.d/radiusd
echo -e "\nauth requisite pam_google_authenticator.so forward_pass" >> /etc/pam.d/radiusd
# configure client endpoints
client_end_points="\nclient ${AD_CONNECTOR_IP1} {\n\tsecret = testing123\n\t\
shortname = ${AD_CONNECTOR_DNS}\n\tnastype = other\n}\n\n\
client ${AD_CONNECTOR_IP2} {\n\tsecret = testing123\n\t\
shortname = ${AD_CONNECTOR_DNS}\n\tnastype = other\n}"
echo -e $client_end_points >> /etc/freeradius/clients.conf
service freeradius restart | true |
40973f53e289fc5f9f3b75801f584b2e5ee0550d | Shell | turbinenreiter/bash-experiment | /regression_tests/r_complex.sh | UTF-8 | 897 | 3.5 | 4 | [] | no_license | #!/bin/bash
# test-suite.sh
# A partial Bash compatibility test suite.
# Origina is found at http://tldp.org/LDP/abs/html/portabilityissues.html
# Double brackets (test)
String="Double brackets supported?"
echo -n "Double brackets test: "
if [[ "$String" = "Double brackets supported?" ]]
then
echo "PASS"
else
echo "FAIL"
fi
# Double brackets and regex matching
String="Regex matching supported?"
echo -n "Regex matching: "
if [[ "$String" =~ R.....matching* ]]
then
echo "PASS"
else
echo "FAIL"
fi
# Arrays
test_arr=FAIL
Array=( If supports arrays will print PASS )
test_arr=${Array[5]}
echo "Array test: $test_arr"
# Completing this script is an exercise for the reader.
# Add to the above similar tests for double parentheses,
#+ brace expansion, $() command substitution, etc.
# TODO(Krasin): uncomment the following line once the execution has been implemented.
#exit $?
| true |
88cc5231565fed48e2695024c38218c21af578dd | Shell | Ninja3047/st | /PKGBUILD | UTF-8 | 1,311 | 2.859375 | 3 | [] | no_license | # Contributor: Patrick Jackson <PatrickSJackson gmail com>
# Maintainer: Christoph Vigano <mail@cvigano.de>
pkgname=st
pkgver=0.8.1
pkgrel=1
pkgdesc='A simple virtual terminal emulator for X.'
arch=('i686' 'x86_64')
license=('MIT')
depends=('libxft')
makedepends=('ncurses')
url="http://st.suckless.org"
source=(http://dl.suckless.org/st/$pkgname-$pkgver.tar.gz
config.h
1-st-0.8-scrollback.diff
2-st-0.8-scrollback-mouse.diff
3-st-0.8-scrollback-mouse-altscreen.diff
4-st-0.8-alpha.diff
5-st-0.8-base64-default-black.diff
6-st-0.8-spoiler.diff)
prepare() {
cd $srcdir/$pkgname-$pkgver
patch -p1 -i ../1-st-0.8-scrollback.diff
patch -p1 -i ../2-st-0.8-scrollback-mouse.diff
patch -p1 -i ../3-st-0.8-scrollback-mouse-altscreen.diff
patch -p1 -i ../4-st-0.8-alpha.diff
patch -p1 -i ../5-st-0.8-base16-default-dark.diff
patch -p1 -i ../6-st-0.8-spoiler.diff
}
build() {
cd $srcdir/$pkgname-$pkgver
make X11INC=/usr/include/X11 X11LIB=/usr/lib/X11
}
package() {
cd $srcdir/$pkgname-$pkgver
sed -i '/\@tic /d' Makefile
make PREFIX=/usr DESTDIR="$pkgdir" TERMINFO="$pkgdir/usr/share/terminfo" install
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
install -Dm644 README "$pkgdir/usr/share/doc/$pkgname/README"
}
| true |
99bfa7823b72e684317ef735a354863bcd5ac65b | Shell | jerluebke/cp_project_1 | /python/data/graphs/fix.sh | UTF-8 | 409 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# to get max extent, look picture with largest resolution, i.e.
# for $f in *.png; do
# file $f
# done
rm extent*
for i in {001..063}; do
f=out-$i.png
s=$(sed "${i}q;d" key.file)
convert $f -gravity center -extent 3617x827 tmp_$f
montage -label "$s" tmp_$f -pointsize 64 -geometry +0+0 extent_$f
rm tmp_$f
done
# vim: set ff=unix tw=79 sw=4 ts=4 et ic ai :
| true |
a12eabbd94d347cd96d0d639b13cfd02d2aff9da | Shell | silmar-alberti/DevelopmentPhpStack | /devPhpInstall | UTF-8 | 778 | 3.59375 | 4 | [
"MIT"
] | permissive | #! /bin/bash
set -e
relativeProjectFolder=$(dirname $0)
projectFolder=$(realpath $relativeProjectFolder)
INSTALL_DIR="/usr/local/bin/"
filesToInstall=(
'composer'
'kcacheGrid'
'php56'
'php72'
'php74'
'php80'
'php81'
'php82'
'disableDebugVars'
'enableDebugVars'
)
for file in ${filesToInstall[@]}
do
originFilePath="${projectFolder}/utils/$file"
destFilePath="${INSTALL_DIR}${file}"
echo "${destFilePath} ----> ${originFilePath}"
ln -sf "${originFilePath}" "${destFilePath}"
done
ln -sf "${INSTALL_DIR}/php81" "${INSTALL_DIR}/php"
echo "
--------------------------------------------------
--------------------------------------------------
--------------------------------------------------
Install success
"
| true |
205ae674d361d021046461e14bf270401bc84114 | Shell | Medium/picchu | /hack/kustom-parse.sh | UTF-8 | 389 | 3.390625 | 3 | [] | no_license | #!/bin/bash
mkdir -p resources
which yq || pip install yq
pushd resources
kustomize build ../config/crd | csplit - '/^---$/' {4}
for files in $(ls xx*)
do new_path=$(cat $files | yq '.metadata.annotations."config.kubernetes.io/origin"'|cut -d: -f2|cut -d \\ -f1|xargs)
echo "Copying $files --> $new_path"
mv $files ../config/crd/${new_path}
done
popd
rm -rf resources | true |
3471479f1213119f033adf0e5d6409533dc3b652 | Shell | adbailey4/modification_detection_pipeline | /scripts/transfer_to_s3.sh | UTF-8 | 427 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# This is a script transfers files to s3 from local directory
while [[ $# -gt 0 ]]
do
key="$1"
case ${key} in
-f|--file)
FILEPATH="$2"
shift # past argument
shift # past value
;;
-b|--bucket)
BUCKET="$2"
shift # past argument
shift # past value
;;
esac
done
echo FILEPATH = "${FILEPATH}"
echo BUCKET = "${BUCKET}"
echo `aws s3 cp ${FILEPATH} s3://${BUCKET}`
| true |
01d52a66a9e649d41e05280083220e9fc33fbf8c | Shell | keitharm/.config | /.bashrc | UTF-8 | 2,257 | 3.40625 | 3 | [] | no_license | # Exports
###########
export EDITOR=vim
# Terminal Settings
#####################
#TERM=xterm-color
TERM=xterm-256color
force_color_prompt=yes
color_prompt=yes
mesg no
# Don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# Append to the history file, don't overwrite it
shopt -s histappend
# For setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# Check the window size after each command and, if necessary,
# Update the values of LINES and COLUMNS.
shopt -s checkwinsize
# Prompt Settings
###################
if [ "$color_prompt" = yes ]; then
if [ $(id -u) -eq 0 ]; then
PS1="${debian_chroot:+($debian_chroot)}\
\[\033[01;31m\]\u\[\033[00;32m\]@\h\[\033[00;33m\]:\
\W\[\033[00m\]\$ \[\033[00m\]"
else
PS1="${debian_chroot:+($debian_chroot)}\
\[\033[00;32m\]\u@\h\[\033[00;33m\]:\W\[\033[00m\]\$ \[\033[00m\]"
fi
else
PS1="${debian_chroot:+($debian_chroot)}\u@\h:\W\$ "
fi
# Functions
#############
extract() {
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xvjf $1 ;;
*.tar.gz) tar xvzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xvf $1 ;;
*.tbz2) tar xvjf $1 ;;
*.tgz) tar xvzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "Don't know how to extract '$1'..." ;;
esac
else
echo "'$1' is not a valid file!"
fi
}
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
| true |
5f8e26a40a49e2c28cf0956427b4f8db3ccaec60 | Shell | manasyanhrant/kicad | /install_kicad_libraries.sh | UTF-8 | 647 | 3.59375 | 4 | [] | no_license | #!/bin/bash
PWD=`pwd`
export_file="/etc/environment"
#export_file="~/.bashrc"
append_variables(){
if [ $# -eq 2 ]; then
if ! grep $1 $export_file ; then
echo "$1=$2" | sudo tee -a $export_file
else
echo "Warning: Variable $1 already exported"
fi
else
echo "Error: Function should receive two arguments [var_name] [path]"
exit 1
fi
}
main(){
append_variables KICAD_PTEMPLATES "$PWD/kicad-templates"
append_variables KICAD_SYMBOL_DIR "$PWD/kicad-symbols"
append_variables KISYS3DMOD "$PWD/kicad-packages3D"
append_variables KISYSMOD "$PWD/kicad-kicad-footprints"
}
main
| true |
f91854cd1d4aee24d0f38ee94a21e310824939b1 | Shell | ConnectionMaster/amphtml | /build-system/sauce_connect/stop_sauce_connect.sh | UTF-8 | 1,479 | 3.515625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2018 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
# This script stops the sauce connect proxy, and waits for a clean exit.
CYAN() { echo -e "\033[0;36m$1\033[0m"; }
YELLOW() { echo -e "\033[1;33m$1\033[0m"; }
PID_FILE="sauce_connect_pid"
LOG_FILE="sauce_connect_log"
LOG_PREFIX=$(YELLOW "stop_sauce_connect.sh")
# Early exit if there's no proxy running.
if [[ ! -f "$PID_FILE" ]]; then
echo "$LOG_PREFIX Sauce Connect Proxy is not running"
exit 0
fi
# Stop the sauce connect proxy.
PID="$(cat "$PID_FILE")"
echo "$LOG_PREFIX Stopping Sauce Connect Proxy pid $(CYAN "$PID")"
kill "$PID"
# Clean up files.
if [[ -f "$LOG_FILE" ]]; then
echo "$LOG_PREFIX Cleaning up log file $(CYAN "$LOG_FILE")"
rm "$LOG_FILE"
fi
if [[ -f "$PID_FILE" ]]; then
echo "$LOG_PREFIX Cleaning up pid file $(CYAN "$PID_FILE")"
rm "$PID_FILE"
fi
# Done.
echo "$LOG_PREFIX Successfully stopped Sauce Connect Proxy"
| true |
a97e03a0b05edd2bef551f5c5e6de5f2f3d48f51 | Shell | FLAME-HPC/xparser | /tests/test_conditions/parse.sh | UTF-8 | 1,161 | 3.078125 | 3 | [] | no_license | #!/bin/bash
# Path to xparser
export FLAME_XPARSER_DIR="$PWD/../.."
cd $FLAME_XPARSER_DIR
echo "Now here: $PWD"
export TESTS=$(seq 1 1 16)
#export TESTS=$(seq 1 1 1)
for i in $TESTS; do
# Parse model a (time condition A false, memory condition B false)
export MODEL_DIR="$FLAME_XPARSER_DIR/tests/test_conditions/models/test_$i/a"
cd $FLAME_XPARSER_DIR
./xparser $MODEL_DIR/model.xml
cd $MODEL_DIR
make clean all
# Parse model b (time condition A false, memory condition B true)
export MODEL_DIR="$FLAME_XPARSER_DIR/tests/test_conditions/models/test_$i/b"
cd $FLAME_XPARSER_DIR
./xparser $MODEL_DIR/model.xml
cd $MODEL_DIR
make clean all
# Parse model c (time condition A true, memory condition B false)
export MODEL_DIR="$FLAME_XPARSER_DIR/tests/test_conditions/models/test_$i/c"
cd $FLAME_XPARSER_DIR
./xparser $MODEL_DIR/model.xml
cd $MODEL_DIR
make clean all
# Parse model d (time condition A true, memory condition B true)
export MODEL_DIR="$FLAME_XPARSER_DIR/tests/test_conditions/models/test_$i/d"
cd $FLAME_XPARSER_DIR
./xparser $MODEL_DIR/model.xml
cd $MODEL_DIR
make clean all
done
echo 'Script done.'
| true |
6baca4a0875ce5ff927d82a5f95fa6917e502c36 | Shell | latifkabir/Computation_using_C | /quad_openmp/quad_local_gcc.sh | UTF-8 | 519 | 2.875 | 3 | [] | no_license | #!/bin/bash
#
# Compile the program with GCC.
#
/usr/local/bin/gcc -fopenmp quad_openmp.c -lm
#
mv a.out quad
#
# Run with 1, 2, and 4 threads.
#
echo "Run with 1 thread."
export OMP_NUM_THREADS=1
./quad > quad_local_gcc_output.txt
#
echo "Run with 2 threads."
export OMP_NUM_THREADS=2
./quad >> quad_local_gcc_output.txt
#
echo "Run with 4 threads."
export OMP_NUM_THREADS=4
./quad >> quad_local_gcc_output.txt
#
# Discard the executable file.
#
rm quad
#
echo "Program output written to quad_local_gcc_output.txt"
| true |
bc95fc7078b08c56d359a5ea2e169dd08c460730 | Shell | evanx/chronica-scripts | /pull.sh | UTF-8 | 252 | 2.703125 | 3 | [] | no_license |
if ! pwd | grep -q '/chronica'
then
echo 'Please run from chronica app directory e.g. cd ~/chronica'
exit 1
fi
git pull
[ -f util/.git ] || git submodule init
git submodule update
npm install
cd util
git checkout master
git pull
| true |
b0e8d244745c1873bf19f1f76589eb45e0058e69 | Shell | nmattia/sorri | /sorri | UTF-8 | 17,587 | 3.390625 | 3 | [
"MIT"
] | permissive | # vim: ft=bash
# sorri: a Simpler lORRI
#
# This is a simpler implementation of Tweag's lorri:
# https://github.com/target/lorri
#
# TODO: document inputs and env variables
#
# sorri reuses lorri's tricks for figuring out the files to track for changes,
# but uses direnv's own mechanism for actually tracking those files.
# sorri uses a local cache at '~/.cache/sorri/<project>/v<sorri version>/'.
# Each entry is a directory containing two files:
#
# ~/.cache/sorri/niv/v1/
# └── 0716a121e4f986f9f8cf11f7c579d332
# ├── link -> /nix/store/jfzkisfgmv3qgpzz3i8nai12y1cry77v-nix-shell
# └── manifest
#
# `link` is the result of a previous evaluation. `manifest` is used to find
# that result of a previous evaluation. The directory name
# (0716a121e4f986f9f8cf11f7c579d332 above) is the hash of the `manifest`.
#
# `link` is a symlink to a shell script that sets a shell's variables.
#
# cat ~/.cache/sorri/niv/v1/0716a121e4f986f9f8cf11f7c579d332/link
# declare -x AR_x86_64_apple_darwin="/nix/store/amsm28x2hnsgp8c0nm4glkjc2gw2l9kw-cctools-binutils-darwin-927.0.2/bin/ar"
# declare -x BZ2_LIB_DIR="/nix/store/7yikqcm4v4b57xv3cqknhdnf0p1aakxp-bzip2-1.0.6.0.1/lib"
# declare -x BZ2_STATIC="1"
# declare -x CARGO_BUILD_TARGET="x86_64-apple-darwin"
# declare -x CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_LINKER="/nix/store/swiic36rl7njy6bfll5z0afl42c9q4s5-lld-9.0.1/bin/lld"
#
# `manifest` is a list of files used for an evaluation alongside their checksums:
#
# $ cat ~/.cache/sorri/niv/v1/0716a121e4f986f9f8cf11f7c579d332/manifest
# /Users/nicolas/niv/shell.nix:029451f2a9bee59f4ce002bdbdf20554
# /Users/nicolas/niv/nix/default.nix:7ff8c9138044fc7e31f1d4ed2bf1c0ba
# /Users/nicolas/niv/nix/overlays/buf/default.nix:c4a24e0bba0178b73f0211d0f26147e6
# ...
#
# sorri first checks the existing cache entries (sorri/niv/v1/0716...,
# etc); if it finds a cache entry with a manifest where all the _manifest_
# entries (nix/default.nix:7ff...) match local files, the link is loaded; if no
# manifest matches, a new entry is created and loaded.
# NOTES:
# we use some functions from direnv's stdlib:
# - watch_file <foo>: updates $DIRENV_WATCHES to tell direnv to watch <foo>
# - expand_path: similar to realpath from coreutils
# Print the line iff SORRI_DEBUG is set and not empty
sorri_debug() {
if [ -n "${SORRI_DEBUG:-}" ]; then echo "debug:" "$@"; fi
}
sorri_log() {
echo "sorri:" "$@"
}
sorri_log_bold() {
tput bold
echo "sorri:" "!!!!" "$@" "!!!!"
tput sgr0
}
# Print in red and return with 1
sorri_abort() {
tput setaf 1
echo sorri: ERROR: "$@"
echo sorri: please run "'direnv allow'" to reload the shell
tput sgr0
exit 1
}
# Removes duplicate lines in place
sorri_remove_duplicates() {
file="$1"
tmpfile=$(mktemp)
sort <"$file" | uniq >"$tmpfile"
mv "$tmpfile" "$file"
}
# Adds the given file to the specified manifest:
# echo "foo.nix:<hash of foo.nix>" >> manifest
sorri_add_to_manifest() {
{
expand_path "$1" | tr -d '\n'
echo -n ":"
nix-hash "$1"
} >>"$2"
}
# Parses a Nix -vv log file and creates a manifest
sorri_create_manifest_from_logs() {
logfile="$1" # The path to the logfile
manifest="$2" # The path to the manifest (will be created)
while IFS= read -r line; do
case $line in
trace*)
# shellcheck disable=2001
copied=$(echo "$line" | sed 's/^trace: file read: '"'"'\([^'"'"']*\)'"'"'.*/\1/')
sorri_debug "found trace $copied"
if ! [[ $copied == /nix/store* ]]; then
sorri_add_to_manifest "$copied" "$manifest"
fi
;;
copied*)
# shellcheck disable=2001
copied=$(echo "$line" | sed 's/^copied source '"'"'\([^'"'"']*\)'"'"'.*/\1/')
sorri_debug "found copied $copied"
if ! [[ $copied == /nix/store* ]]; then
sorri_add_to_manifest "$copied" "$manifest"
fi
;;
evaluating*)
# shellcheck disable=2001
copied=$(echo "$line" | sed 's/^evaluating file '"'"'\([^'"'"']*\)'"'"'.*/\1/')
sorri_debug "found evaluated $copied"
# skip files if they're in the store (i.e. immutable)
if ! [[ $copied == /nix/store* ]]; then
# when evaluating a `default.nix`, Nix sometimes prints the
# path to the file, and sometimes to the directory...
if [ -d "$copied" ]; then
sorri_add_to_manifest "$copied/default.nix" "$manifest"
else
sorri_add_to_manifest "$copied" "$manifest"
fi
fi
;;
esac
done <"$logfile"
sorri_remove_duplicates "$manifest"
}
# Wrapper function for creating a new manifest based on the files currently
# present in the source tree.
# NOTE: The manifest (and link) is created atomically meaning this works fine
# if two shells are opened concurrently
sorri_create_manifest() {
sorri_debug creating manifest for "$PWD"
evallogs=$(mktemp)
# A nix wrapper that imports ./shell.nix. It modifies the resulting
# derivation in two ways:
# - The builder is replaced with a bash function that calls `export >
# $out`, which effectively writes all the environment variables to $out.
# The variables can then be imported by sourcing this file.
# - The readFile and readDir builtins are overriden to print their
# arguments whenever they are called (so that we can parse that and track
# those files)
# TODO: use the same tricks for getEnv
local shellnix;
shellnix=$(cat <<EOF
let
overrides = {
import = scopedImport overrides;
scopedImport = x: builtins.scopedImport (overrides // x);
builtins = builtins // {
readFile = file: builtins.trace "file read: '\${toString file}'" (builtins.readFile file);
readDir = path: builtins.trace "file read: '\${toString path}'" (builtins.readDir path);
};
};
# TODO: how do we deal with shellHook s?
# if the shell hook sets a variable, then it should be handled by the shell
# If it does other stuff then this is not gonna work since direnv runs this
# in a subshell.
builder = builtins.toFile "foo-bidou" ''
[ -e \$stdenv/setup ] && . \$stdenv/setup
export > \$out
'';
imported =
let
raw = overrides.scopedImport overrides $(expand_path ./shell.nix);
in
if builtins.isFunction raw
then raw {}
else raw;
in
derivation (
imported.drvAttrs // {
args = [ "-e" builder ];
}
)
EOF
)
# The resulting link to the shell build (is used as a GC root)
buildout=$(mktemp -d)/result
sorri_log building shell, this may take a while
# We keep lines like these:
# 'copied source /niv/src to ...': source trees and files imported to the store
# 'evaluating file foo.nix ...' Nix files used for eval
# 'trace: file read: sources.json...' files from readFile & readDir
keepem=(grep -E "^copied source|^evaluating file|^trace: file read:")
# we drop all the lines like the above but that reference files in the
# store; those files are immutable so we don't want to watch them for
# changes
dropem=(grep -vE "^copied source '/nix|^evaluating file '/nix|^trace: file read: '/nix")
if [ -n "${SORRI_DEBUG:-}" ]; then
nix-build -E "$shellnix" -o "$buildout" -vv \
2> >(tee -a >("${keepem[@]}" | "${dropem[@]}" >"$evallogs")) || sorri_abort nix-build failed
else
logs=$(mktemp)
nix-build -E "$shellnix" -o "$buildout" -vv --max-jobs 8 \
2> >(tee -a "$logs" > >("${keepem[@]}" | "${dropem[@]}" >"$evallogs")) >/dev/null \
|| sorri_abort nix-build failed, logs can be found at "${logs}:"$'\n'"---"$'\n'"$(tail -n 5 "$logs")"$'\n'"---"
rm "$logs"
fi
sorri_debug build finished "$buildout"
tmpmanifest=$(mktemp)
sorri_create_manifest_from_logs "$evallogs" "$tmpmanifest"
# The identifier for this new cache
manifest_hash=$(nix-hash "$tmpmanifest" | tr -d '\n')
mkdir -p "$SORRI_CACHE_DIR/$manifest_hash"
# create the file atomically
mv -f "$tmpmanifest" "$SORRI_CACHE_DIR/$manifest_hash/manifest"
link="$SORRI_CACHE_DIR/$manifest_hash/link"
mv -f "$buildout" "$link"
rmdir "$(dirname "$buildout")"
# Register the shell build as a GC root
nix-store --indirect --add-root "$link" -r "$link"
sorri_log created cached shell "$manifest_hash"
sorri_import_link_of "$SORRI_CACHE_DIR/$manifest_hash"
}
# Load the environment variables saved in a cache entry by importing the link
# file
sorri_import_link_of() {
manifest="$1/manifest"
if [ ! -f "$manifest" ]; then
sorri_abort no manifest found at "$manifest"
fi
link="$1"/link
if [ ! -f "$link" ]; then
sorri_abort no link found at "$link"
fi
sorri_debug importing manifest "$manifest" and link "$link"
# read the manifest line by line and issue direnv `watch_file` calls for
# every file
while IFS= read -r watched; do
watched_file=${watched%:*}
sorri_debug adding file "$watched_file" to watch
watch_file "$watched_file"
done <"$manifest"
# this overrides Bash's 'declare -x'. The 'link' is a bash that calls
# 'declare -x' (== export) on every environment variable in the built
# shell, but there are some variables (PATH, HOME) that we don't actually
# want to inherit from the shell.
function declare() {
if [ "$1" == "-x" ]; then shift; fi
# Some variables require special handling.
case "$1" in
# vars from: https://github.com/NixOS/nix/blob/92d08c02c84be34ec0df56ed718526c382845d1a/src/nix-build/nix-build.cc#L100
"HOME="*) ;;
"USER="*) ;;
"LOGNAME="*) ;;
"DISPLAY="*) ;;
"PATH="*)
# here we don't use PATH_add from direnv because it's too slow
# https://github.com/direnv/direnv/issues/671
PATH="${1#PATH=}:$PATH";;
"TERM="*) ;;
"IN_NIX_SHELL="*) ;;
"TZ="*) ;;
"PAGER="*) ;;
"NIX_BUILD_SHELL="*) ;;
"SHLVL="*) ;;
# vars from: https://github.com/NixOS/nix/blob/92d08c02c84be34ec0df56ed718526c382845d1a/src/nix-build/nix-build.cc#L385
"TEMPDIR="*) ;;
"TMPDIR="*) ;;
"TEMP="*) ;;
"TMP="*) ;;
# vars from: https://github.com/NixOS/nix/blob/92d08c02c84be34ec0df56ed718526c382845d1a/src/nix-build/nix-build.cc#L421
"NIX_ENFORCE_PURITY="*) ;;
# vars from: https://www.gnu.org/software/bash/manual/html_node/Bash-Variables.html (last checked: 2019-09-26)
# reported in https://github.com/target/lorri/issues/153
"OLDPWD="*) ;;
"PWD="*) ;;
"SHELL="*) ;;
# some stuff we don't want set
# TODO: find a proper way to deal with this
"__darwinAllowLocalNetworking="*) ;;
"__impureHostDeps="*) ;;
"__propagatedImpureHostDeps="*) ;;
"__propagatedSandboxProfile"*) ;;
"__sandboxProfile="*) ;;
"allowSubstitutes="*) ;;
"buildInputs="*) ;;
"buildPhase"*) ;;
"builder="*) ;;
"checkPhase="*) ;;
"cmakeFlags="*) ;;
"configureFlags="*) ;;
"depsBuildBuild="*) ;;
"depsBuildBuildPropagated="*) ;;
"depsBuildTarget="*) ;;
"depsBuildTargetPropagated="*) ;;
"depsHostHost="*) ;;
"depsHostHostPropagated="*) ;;
"depsTargetTarget="*) ;;
"depsTargetTargetPropagated="*) ;;
"doCheck="*) ;;
"doInstallCheck="*) ;;
"dontDisableStatic="*) ;;
"gl_cv"*) ;;
"installPhase="*) ;;
"mesonFlags="*) ;;
"name="*) ;;
"nativeBuildInputs="*) ;;
"nobuildPhase="*) ;;
"out="*) ;;
"outputs="*) ;;
"patches="*) ;;
"phases="*) ;;
"postUnpack="*) ;;
"preferLocalBuild="*) ;;
"propagatedBuildInputs="*) ;;
"propagatedNativeBuildInputs="*) ;;
"rs="*) ;;
"shell="*) ;;
"shellHook="*) ;;
"src="*) ;;
"stdenv="*) ;;
"strictDeps="*) ;;
"system="*) ;;
"version="*) ;;
# pretty sure these can stay the same
"NIX_SSL_CERT_FILE="*) ;;
"SSL_CERT_FILE="*) ;;
*) export "${@?}" ;;
esac
}
# shellcheck disable=1090
. "$link"
unset declare
}
# Checks if a particular cache entry can be used by comparing the tracked files
# and their checksums.
sorri_check_manifest_of() {
sorri_debug "looking for manifest in $1"
if [ ! -f "$1"/manifest ]; then
sorri_abort "error: no manifest in $1"
fi
# loop over the entries in the manifest, exiting if one doesn't match the
# local file it references.
ok=true
while IFS= read -r watched; do
sorri_debug "read: $watched"
watched_file=${watched%:*}
watched_hash=${watched#*:}
sorri_debug "file: '$watched_file'"
sorri_debug "hash: '$watched_hash'"
if [ -f "$watched_file" ] \
&& [ "$(nix-hash "$watched_file" | tr -d '\n')" == "$watched_hash" ]; then
sorri_debug "$watched_file" "($watched_hash)" "ok"
else
sorri_debug "$watched_file" "($watched_hash)" "not ok"
sorri_debug giving up on "$1"
ok=false
break
fi
done <"$1/manifest"
"$ok"
}
# Lists the directories at "$1", most recent first.
sorri_find_recent_first() {
if find --help 2>/dev/null | grep GNU >/dev/null; then
# this assumes find and stat are the GNU variants
find "$1" \
-maxdepth 1 -mindepth 1 \
-type d -printf "%T+\t%p\n" \
| sort -r \
| cut -f 2-
elif stat --help 2>/dev/null | grep GNU>/dev/null; then
# this assumes BSD find and GNU stat
find "$1" \
-maxdepth 1 -mindepth 1 \
-type d -exec sh -c 'stat -c "%Y {}" {}' \; \
| sort -rn \
| cut -d ' ' -f 2-
else
# this assumes find and stat are the Darwin variants
find "$1" \
-maxdepth 1 -mindepth 1 \
-type d -exec stat -lt "%Y-%m-%d" {} \+ \
| cut -d' ' -f6- \
| sort -rn \
| cut -d ' ' -f 2-
fi
}
# removes all cache entries except the n most recent ones
# (LRU style)
sorri_prune_old_entries() {
local n_to_keep=${1:-5}
while IFS= read -r entry; do
sorri_log removing old cache entry "$entry"
# here we avoid rm -rf at all cost in case anything goes wrong with
# "$entry"'s content.
rm "$entry"/manifest
rm "$entry"/link
rmdir "$entry"
done < <(sorri_find_recent_first "$SORRI_CACHE_DIR" | tail -n +"$(( n_to_keep + 1 ))")
}
sorri_main() {
if [[ $# == 0 ]]
then
SORRI_CACHE_NAME="${SORRI_CACHE_NAME:-global}"
elif [[ $# == 1 ]]
then
SORRI_CACHE_NAME="$1"
else
sorri_abort "OH NOOOO"
fi
sorri_debug SORRI_CACHE_NAME "$SORRI_CACHE_NAME"
# ~/.cache/sorri/<project>/v42
SORRI_CACHE_DIR_PREFIX="${SORRI_CACHE_DIR_PREFIX:-$HOME/.cache/sorri/${SORRI_CACHE_NAME}}"
sorri_debug SORRI_CACHE_DIR_PREFIX "$SORRI_CACHE_DIR_PREFIX"
# NOTE: change version here
# ~/.cache/sorri/<project>/v42
SORRI_CACHE_DIR="${SORRI_CACHE_DIR_PREFIX}/v2"
sorri_debug SORRI_CACHE_DIR "$SORRI_CACHE_DIR"
mkdir -p "$SORRI_CACHE_DIR"
# If there are old entries, then tell user to delete it to avoid zombie
# roots
while IFS= read -r old_cache_entry; do
sorri_log_bold please delete "$old_cache_entry" unless you plan on going back to older sorri versions
done < <(find "$SORRI_CACHE_DIR_PREFIX" -mindepth 1 -maxdepth 1 -type d -not -wholename "$SORRI_CACHE_DIR")
if ! command -v nix &>/dev/null; then
sorri_abort nix executable not found
fi
# The Nix evaluation may be using `lib.inNixShell`, so we play the game
export IN_NIX_SHELL=impure
accepted=""
sorri_log looking for matching cached shell in "$SORRI_CACHE_DIR"
while IFS= read -r candidate; do
sorri_debug checking manifest "$candidate"
if sorri_check_manifest_of "$candidate"; then
sorri_debug accepting sorri cache "$candidate"
touch "$candidate" # label as most recently used
accepted="$candidate"
break
fi
done < <(sorri_find_recent_first "$SORRI_CACHE_DIR")
if [ -n "$accepted" ]; then
sorri_log using cache created "$(date -r "$accepted")" "($(basename "$accepted"))"
sorri_import_link_of "$accepted"
else
sorri_log no candidate accepted, creating manifest
sorri_create_manifest
# we only keep the 5 latest entries to avoid superfluous cruft in $TMP and
# Nix GC roots.
sorri_prune_old_entries 5
fi
}
sorri_main "$@"
| true |
71f8c99825924cbda0751914b1fdb8ae45923377 | Shell | surdeus/de | /x/trm/xmux | UTF-8 | 375 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/env rc
# Script to run tmuxed xtrm.
args = ()
leave = 0
while(~ $leave 0){
switch($1){
case -*
switch($1){
case -c -f -g -n -o -T -t -w -l
args = ($args $1 $2)
if(~ $#1 2)
shift
if not {shift;shift}
case -e
shift; leave = 1
case *
args = ($args $1)
shift
}
case *
leave = 1
}
}
exec xtrm $args -e tmux new-session $*
| true |
45fe60a9b8f3734bc1be5821ed56b94bcb2fb1b7 | Shell | bgamari/ghc-utils | /git-clone-workdir | UTF-8 | 329 | 3.296875 | 3 | [] | no_license | #!/bin/bash -e
parent=$(realpath $1)
dest=$2
git new-workdir $parent $dest
cd $dest
git submodule init
for sm in $(git submodule status | awk '{print $2}'); do
c=submodule.$sm.url
git config --local --replace-all $c $parent/$sm
git submodule update --init $sm
git submodule sync $sm
done
git submodule update
| true |
48492c660a1e3ea1ff3574f565f9bd2ee49426d5 | Shell | styczynski/docker-eniam | /eniam_src/tools/wosedon/wosedon/wosedon/apps/eval-rec.sh | UTF-8 | 771 | 2.703125 | 3 | [] | no_license | #!/bin/bash
EVAL_PREC=(10 20 30 40 50 60 70 80 90 100)
INDEX_FILE=/mnt/data/korpusy/kpwr-1.1_wsd_+_disamb_GTPersPRNormItModVRankNorm/index_wsd_pelnasciezka.txt
FILENAME_PREFIX=GTPersPRNormItModVRankNorm-rankperc
for pe in ${EVAL_PREC[@]}
do
echo "Running wosed-eval for percentage of ranking: ${pe}..."
wosedon-eval \
-d /home/pkedzia/repos/grafon/cfg/pkedzia-localhost.db \
-i ${INDEX_FILE} \
-r ${FILENAME_PREFIX}-${pe}-res.csv \
-p ${FILENAME_PREFIX}-${pe}-prec.csv \
-rc ${FILENAME_PREFIX}-${pe}-rec.csv \
-pr ${pe} \
-t nkjp 2> /dev/null
echo "Running prec-general for percentage of ranking: ${pe}..."
python ../prec-general.py \
-p ${FILENAME_PREFIX}-${pe}-prec.csv \
> ${FILENAME_PREFIX}-${pe}-prec-general.csv
done
| true |
f9956cfcf5d8f99d12c380b30a7c43cf12a4ce54 | Shell | woongchoi84/setup | /ubuntuSetup/bug_fix.sh | UTF-8 | 640 | 3.234375 | 3 | [] | no_license | #! /bin/bash
# ==================================================
# firefox downgrade
# ==================================================
# [Make Backup File]
wget -O ~/firefox.tar.bz2 "https://ftp.mozilla.org/pub/firefox/releases/69.0/linux-x86_64/en-US/firefox-69.0.tar.bz2"
sudo tar xjf ~/firefox.tar.bz2 -C /opt/
sudo mv /usr/lib/firefox/firefox /usr/lib/firefox/firefox_bug
sudo ln -s /opt/firefox/firefox /usr/lib/firefox/firefox
rm ~/firefox.tar.bz2
# [Completed Message]
echo "=================================================="
echo " firefox (69.0) has been installed"
echo "=================================================="
| true |
42e17e294f6958c713b110490d6d11b8659ab1c4 | Shell | ttotev/aws-dev-box | /update_devbox.sh | UTF-8 | 439 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | if [ -z "$AWS_DEVBOX_SUBNETID" ]
then
echo "\$AWS_DEVBOX_SUBNETID is empty"
exit 1
fi
if [ -z "$AWS_DEVBOX_VPCID" ]
then
echo "\$AWS_DEVBOX_VPCID is empty"
exit 1
fi
aws cloudformation update-stack --stack-name "Dev-Box" \
--use-previous-template \
--parameters ParameterKey=SubnetId,ParameterValue=$AWS_DEVBOX_SUBNETID ParameterKey=VpcId,ParameterValue=$AWS_DEVBOX_VPCID \
--capabilities CAPABILITY_IAM | true |
df1f171e6ef25a21b7005e3c0005ddaccde76b91 | Shell | NicolasHo/openvkl | /gitlab/source-scan-protex.sh | UTF-8 | 694 | 3.09375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
PROTEX_HOME=/NAS/tools/ip_protex/
PROTEX_ROOT=$PROTEX_HOME/ip_protex_7.1.3/protexIP
BDSTOOL=$PROTEX_ROOT/bin/bdstool
PROTEX_PROJECT_NAME=c_openvkl_20491
SRC_PATH=$CI_PROJECT_DIR/
export _JAVA_OPTIONS=-Duser.home=$PROTEX_HOME/protex_home
# enter source code directory before scanning
cd $SRC_PATH
$BDSTOOL new-project $PROTEX_PROJECT_NAME |& tee ip_protex.log
if grep -q "command failed" ip_protex.log; then
exit 1
fi
$BDSTOOL analyze |& tee -a ip_protex.log
if grep -q "command failed" ip_protex.log; then
exit 1
fi
if grep -E "^Files pending identification: [0-9]+$" ip_protex.log; then
echo "Protex scan FAILED!"
exit 1
fi
echo "Protex scan PASSED!"
exit 0
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.