blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a72ceb9da650c7b14a6029b4c68f2c44d334155b
|
Shell
|
ghuntley/monorepo
|
/third_party/git/t/t2104-update-index-skip-worktree.sh
|
UTF-8
| 1,181
| 3
| 3
|
[
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-3.0-only",
"GPL-2.0-only",
"MIT"
] |
permissive
|
#!/bin/sh
#
# Copyright (c) 2008 Nguyễn Thái Ngọc Duy
#
test_description='skip-worktree bit test'
. ./test-lib.sh
sane_unset GIT_TEST_SPLIT_INDEX
test_set_index_version 3
cat >expect.full <<EOF
H 1
H 2
H sub/1
H sub/2
EOF
cat >expect.skip <<EOF
S 1
H 2
S sub/1
H sub/2
EOF
test_expect_success 'setup' '
mkdir sub &&
touch ./1 ./2 sub/1 sub/2 &&
git add 1 2 sub/1 sub/2 &&
git ls-files -t | test_cmp expect.full -
'
test_expect_success 'index is at version 2' '
test "$(test-tool index-version < .git/index)" = 2
'
test_expect_success 'update-index --skip-worktree' '
git update-index --skip-worktree 1 sub/1 &&
git ls-files -t | test_cmp expect.skip -
'
test_expect_success 'index is at version 3 after having some skip-worktree entries' '
test "$(test-tool index-version < .git/index)" = 3
'
test_expect_success 'ls-files -t' '
git ls-files -t | test_cmp expect.skip -
'
test_expect_success 'update-index --no-skip-worktree' '
git update-index --no-skip-worktree 1 sub/1 &&
git ls-files -t | test_cmp expect.full -
'
test_expect_success 'index version is back to 2 when there is no skip-worktree entry' '
test "$(test-tool index-version < .git/index)" = 2
'
test_done
| true
|
843b3b70ced6b8c5ee039cbb530b7e6eebc87165
|
Shell
|
fadushin/esp8266
|
/micropython/bin/upload.sh
|
UTF-8
| 491
| 3.78125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
NARGS=$#
if [ ${NARGS} -lt 1 ] ; then
echo "Syntax: $0 <ip-address> [<file>]*"
exit 1
fi
IP_ADDRESS=$1
shift
if [ ${NARGS} -gt 1 ] ; then
FILES=$@
else
FILES=$(ls *.py)
fi
if [ ! $(which webrepl_cli.py) ] ; then
if [ -e "/work/src/github/micropython/webrepl/webrepl_cli.py" ]; then
PATH=/work/src/github/micropython/webrepl:$PATH
else
exit "webrepl_cli.py not found"
exit 1
fi
fi
for i in ${FILES}; do
webrepl_cli.py $i ${IP_ADDRESS}:/${i}
done
| true
|
604b584474c7f30638a65f53c7887a8f5dc49c9a
|
Shell
|
JokerFap/xPanel
|
/scripts/menu.bak/vpssim-phuc-hoi-database-sql
|
UTF-8
| 4,725
| 3.375
| 3
|
[] |
no_license
|
#!/bin/sh
clear
echo "========================================================================="
echo " VPSSIM - Manage VPS/Server by VPSSIM.COM "
echo "========================================================================="
echo " Restore Database "
echo "========================================================================="
echo""
echo ""
echo "File Backup Database must be *.SQL"
echo "If using other formats, this function will not work correctly"
echo "========================================================================="
echo "You must create the database before recovering "
echo "Create Database in [Database Manage]"
echo "-------------------------------------------------------------------------"
read -r -p "You have created the database before recovering ? [y/N] " response
case $response in
[yY][eE][sS]|[yY])
clear
echo "-------------------------------------------------------------------------"
echo "Prepare the database recovery process ... "
echo "-------------------------------------------------------------------------"
sleep 3
echo -n "Type in the database you want to restore [ENTER]: "
read namedatabase
if [ "$namedatabase" = "" ]; then
clear
echo "========================================================================="
echo "You typed wrong, Please fill accurately"
/etc/vpssim/menu/vpssim-sao-luu-phuc-hoi-database
exit
fi
#ten database
if [ ! -f /var/lib/mysql/$namedatabase/db.opt ]; then
clear
echo "========================================================================="
echo "Did not find out $namedatabase on server "
echo "-------------------------------------------------------------------------"
echo "please check again!"
/etc/vpssim/menu/vpssim-sao-luu-phuc-hoi-database
exit
fi
du -sh /var/lib/mysql/$namedatabase | awk 'NR==1 {print $1}'> /tmp/vpssim1
echo "-------------------------------------------------------------------------"
echo -n "Type user of Database [ENTER]: "
read userdatabase
if [ "$userdatabase" = "" ]; then
clear
echo "========================================================================="
echo "You typed wrong, Please fill accurately"
/etc/vpssim/menu/vpssim-sao-luu-phuc-hoi-database
exit
fi
echo "-------------------------------------------------------------------------"
echo -n "Type the password of Database [ENTER]: "
read passdatabase
if [ "$passdatabase" = "" ]; then
clear
echo "========================================================================="
echo "You typed wrong, Please fill accurately"
echo "========================================================================="
/etc/vpssim/menu/vpssim-sao-luu-phuc-hoi-database-phuc-hoi-database
exit
fi
echo "========================================================================="
echo "Type the path of Database backup file"
echo "For simplicity , you should upload the backup file to Home folder"
echo "Database backup path will be similar: /home/hostingaz.sql"
echo "========================================================================="
echo -n "Type the path of Database backup file [ENTER]: "
read dddatabase
if [ "$dddatabase" = "" ]; then
clear
echo "========================================================================="
echo "You typed wrong, Please fill accurately"
/etc/vpssim/menu/vpssim-sao-luu-phuc-hoi-database
exit
fi
if [ ! -f $dddatabase ]; then
clear
echo "========================================================================="
echo "Link backup of $namedatabase incorrect, please check back!"
/etc/vpssim/menu/vpssim-sao-luu-phuc-hoi-database
exit
fi
mysql -u $userdatabase -p$passdatabase $namedatabase < $dddatabase
du -sh /var/lib/mysql/$namedatabase | awk 'NR==1 {print $1}'> /tmp/vpssim2
check1=`cat /tmp/vpssim1`
check2=`cat /tmp/vpssim2`
if [ "$check1" == "$check2" ]; then
rm -rf /tmp/vpssim1
rm -rf /tmp/vpssim2
clear
echo "========================================================================="
echo "Restore database $namedatabase failed "
echo "-------------------------------------------------------------------------"
echo "Please check file backup's format and $namedatabase's infomation"
/etc/vpssim/menu/vpssim-sao-luu-phuc-hoi-database
else
rm -rf /tmp/vpssim1
rm -rf /tmp/vpssim2
clear
echo "========================================================================="
echo "Restored database $namedatabase successfully !"
/etc/vpssim/menu/vpssim-sao-luu-phuc-hoi-database
exit
fi
;;
*)
echo ""
;;
esac
clear
echo "========================================================================="
echo "Please create Database before you restore from the backup file"
/etc/vpssim/menu/vpssim-them-xoa-database
exit
fi
| true
|
ff2adb2534ad94ab1a026502f12c778ee98ca6b0
|
Shell
|
bluebird1999/nec-script
|
/update_nec-common
|
UTF-8
| 266
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
user=`whoami`
SOURCE_NEC_COMMON="/home/${user}/working/java/nec-common"
echo "----------------------------------"
echo "Deploying nec-common to the common!"
echo "recompile the code..."
cd $SOURCE_NEC_COMMON
mvn clean
mvn package
mvn install
| true
|
f7da531c75617b0318aba50b80833e16e9010d7b
|
Shell
|
dhawal1939/Assignments-And-Lab-Codes
|
/Free_Open_Source_Software_LAB/13-4.sh
|
UTF-8
| 174
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
echo Enter Directory path
read x
ls $x >/dev/null
if [ $? -eq 0 ];then
echo Number of files `ls $x|wc -l`
else
echo File path is invalid or file doesnot exist
fi
| true
|
9d80e4609376bc98e43badb752ab2c8c1fc25786
|
Shell
|
derickson82/brimud
|
/brimud/src/main/scripts/brimud
|
UTF-8
| 510
| 3.8125
| 4
|
[] |
no_license
|
#! /bin/sh
# /etc/init.d/brimud
#
export BRIMUD_HOME=./
# Some things that run always
touch /var/lock/brimud
# Carry out specific functions when asked to by the system
case "$1" in
start)
echo "Starting script brimud "
java -jar $BRIMUD_HOME/brimud.jar &
echo $! > $BRIMUD_HOME/brimud.pid
;;
stop)
echo "Stopping script brimud"
kill `cat $BRIMUD_HOME/brimud.pid`
rm $BRIMUD_HOME/brimud.pid
;;
*)
echo "Usage: /etc/init.d/brimud {start|stop}"
exit 1
;;
esac
| true
|
2b855d7a8a726838fac63a578cc82b5c329c4f92
|
Shell
|
twetto/iq-neuron
|
/PKGBUILD
|
UTF-8
| 847
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
# Maintainer: twetto <franky85912@gmail.com>
pkgname=iq-neuron
pkgver=0.2.2
pkgrel=1
pkgdesc="A library for IQIF"
arch=('any')
url="https://github.com/twetto/iq-neuron"
license=('MIT')
depends=('openmp')
makedepends=('git' 'gcc' 'cmake') # 'bzr', 'git', 'mercurial' or 'subversion'
source=('git+https://github.com/twetto/iq-neuron.git')
sha256sums=('SKIP')
# Please refer to the 'USING VCS SOURCES' section of the PKGBUILD man page for
# a description of each element in the source array.
pkgver() {
cd "$srcdir/${pkgname}"
# Git, tags available
printf "%s" "$(git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g')"
}
build() {
cd "$srcdir/${pkgname}"
cmake -B build -S "$srcdir/${pkgname}" \
-DCMAKE_INSTALL_PREFIX='/usr'
make -C build
}
package() {
cd "$srcdir/${pkgname}/build"
make DESTDIR="$pkgdir/" install
}
| true
|
4ba4d2b7d8ab86877657516fd1433639ce0e5b8c
|
Shell
|
avr-aics-riken/PMlib
|
/doc/scripts/Kcomputer/x.user-cpp-K-mpi.sh
|
UTF-8
| 1,315
| 2.90625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# source /home/system/Env_base
set -x
PACKAGE_DIR=${HOME}/pmlib/package
PMLIB_DIR=${HOME}/pmlib/usr_local_pmlib/K
PMLIB_LDFLAGS="-L${PMLIB_DIR}/lib -lPMmpi "
PAPI_DIR="yes"
# PAPI_DIR=/opt/FJSVXosDevkit/sparc64fx/target/usr/lib64
# PAPI_LDFLAGS="-lpapi -lpfm "
PAPI_LDFLAGS="-lpapi_ext -Wl,-Bstatic,-lpapi,-lpfm,-Bdynamic "
# mpi*px compilers automatically searches PAPI include/ and lib64/ directories
# 京コンピュータではPAPIは以下にインストールされている
# ログインノード /opt/FJSVXosDevkit/sparc64fx/target/usr/lib64
# 計算ノード /usr/lib64
OTF_DIR=${HOME}/otf/usr_local_otf/opt-1.12.5
OTF_LDFLAGS="-lotf_ext -L${OTF_DIR}/lib -lopen-trace-format -lotfaux "
LDFLAGS+=" ${PMLIB_LDFLAGS} ${PAPI_LDFLAGS} ${OTF_LDFLAGS} -w "
PMLIB_INCLUDES="-I${PMLIB_DIR}/include "
OTF_INCLUDES="-I${OTF_DIR}/include/open-trace-format "
INCLUDES="${PMLIB_INCLUDES} ${PAPI_INCLUDES} ${OTF_INCLUDES}"
WRK_DIR=${HOME}/tmp/check_pmlib
mkdir -p $WRK_DIR
cd $WRK_DIR; if [ $? != 0 ] ; then echo '@@@ Directory error @@@'; exit; fi
rm -f $WRK_DIR/*
cp ${PACKAGE_DIR}/doc/src_tutorial/main_mpi.cpp main.cpp
CXXFLAGS="-Kopenmp,fast -Ntl_notrt -w "
LDFLAGS="${LDFLAGS} --linkfortran"
mpiFCCpx ${CXXFLAGS} ${INCLUDES} -o a.out.mpi main.cpp ${LDFLAGS}
ls -l
file a.out.mpi
| true
|
8c1f09541d3b64870f87da84961c5cbe22627e1a
|
Shell
|
tuyen81/self_learning
|
/shell/running_package/testcases/ifenslave/ifenslave.sh
|
UTF-8
| 671
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#==============================================================================
# DESCRIPTION: Modprobe bonding module then attach slave to bonding device
#==============================================================================
check=0
. $CONFIG_DIR/network.sh
echo "Testing ifenslave" > ${log_file}
# Load module bonding
modprobe bonding 2>> ${log_file}
# Attach slave network device to a bonding device
ifenslave bond0 $ethernet_interface >> ${log_file} 2>&1
if [ $? -ne 0 ]; then
check=1
fi
# Check result of testcase
assert_passed $check 0
# Revert to default config
modprobe -r bonding >> ${log_file} 2>&1
/etc/init.d/networking restart
| true
|
e4cec706d27beb2c3d4d1384b465ed20f29f7dfa
|
Shell
|
kyxap1/sonatype-nexus-api
|
/nexus-api.sh
|
UTF-8
| 2,884
| 3.4375
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
#===============================================================================
#
# FILE: nexus-api.sh
#
# USAGE: ./nexus-api.sh
#
# DESCRIPTION: Shell CLI for Nexus Sonatype 2.xx API
#
# OPTIONS: ---
# REQUIREMENTS: apt-get install -y httpie jq
# BUGS: ---
# NOTES: ---
# AUTHOR: Aleksandr Kukhar (kyxap), kyxap@kyxap.pro
# COMPANY: Fasten.com
# ORGANIZATION: Operations
# CREATED: 07/21/2016 23:36
# REVISION: ---
#===============================================================================
set -e
set -o nounset # Treat unset variables as an error
export LANG=C
### ENVIRONMENT VARIABLES
# required vars
NEXUS_URL="${NEXUS_URL:?}"
NEXUS_USER="${NEXUS_USER:?}"
NEXUS_PASS="${NEXUS_PASS:?}"
# these vars are allowed to be overriden
REPOSITORY="${REPOSITORY:-}"
GROUPID="${GROUPID:-}" ARTIFACTID="${ARTIFACTID:-}" VERSION="${VERSION:-}"
PACKAGING="${PACKAGING:-}"
ROLE=
NODEID=
TYPE=
# common args to httpie
QUERY_AUTH="--auth ${NEXUS_USER}:${NEXUS_PASS}"
QUERY_ARGS="--check-status --body --json --follow --timeout 5"
# nexus rest services
POM_API="/service/local/artifact/maven"
SEARCH_API="/service/local/lucene/search"
CONTENT_API="/service/local/artifact/maven/content"
REDIRECT_API="/service/local/artifact/maven/redirect"
RESOLVE_API="/service/local/artifact/maven/resolve"
# artifacts mapping scheme
MAPPING_PLAIN="services-mapping.plain"
MAPPING_JSON="services-mapping.json"
query() {
local API="${1:-/nopath}"
shift
local HTTPIE_ARGS="${QUERY_AUTH} ${QUERY_ARGS}"
local QUERY_METHOD="${1:-GET}"
shift
local QUERY_URL="${NEXUS_URL}${API}"
local REQUEST_ITEMS=( "${@:-${REQUEST_ITEMS}}" )
local CONSTR="${FUNCNAME[1]:-}"
printf -v HTTPIE -- "${QUERY_METHOD} ${QUERY_URL}"
printf -v QUERY -- "${HTTPIE_ARGS} ${HTTPIE} ${REQUEST_ITEMS}"
}
### HOST VALIDATOR
validate() {
local hostname="${@:-}"
# sample: hostname=order-srv666
ROLE=${hostname%-*} # ROLE=order
NODEID=${hostname#${hostname/[[:digit:]]*}} # NODEID=666
HYPHEN=${hostname#${ROLE}-} # HYPHEN=-srv
TYPE=${HYPHEN%${NODEID}} # TYPE=srv
mapfile -t < services-mapping.plain
local re="${ROLE}-${TYPE}"
[[ "${MAPFILE[@]}" =~ $re ]] || { echo Hostname not fround in ${MAPFILE}; return 1; }
}
### CONSTRUCTORS
pom() {
local REQUEST_ITEMS=( "$@" )
query "${POM_API}" "GET" "${REQUEST_ITEMS[@]}"
}
search() {
local REQUEST_ITEMS=( "$@" )
query "${SEARCH_API}" "GET" "${REQUEST_ITEMS[@]}"
}
content() {
local REQUEST_ITEMS=( "$@" )
query "${CONTENT_API}" "GET" "${REQUEST_ITEMS[@]}"
}
redirect() {
local REQUEST_ITEMS=( "$@" )
query "${REDIRECT_API}" "GET" "${REQUEST_ITEMS[@]}"
}
resolve() {
local REQUEST_ITEMS=( "$@" )
query "${RESOLVE_API}" "GET" "${REQUEST_ITEMS[@]}"
}
| true
|
ceddfead6e8f32788ddc1e296a758d088791724a
|
Shell
|
LongIU/Grafana_Influxdb_build
|
/build.sh
|
UTF-8
| 936
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#確認SELINUX有沒有disabled
check_SELinux=$(cat /etc/selinux/config | grep "^SELINUX=" |awk -F\= '{print $2}')
[[ ${check_SELinux} != "disabled" ]] && echo ${1} |sudo -S sed -i '/^SELINUX=/c\SELINUX=disabled' /etc/selinux/config
#安裝套件
echo ${1} |sudo -S yum install -y git vim telnet
#更新套件
echo ${1} |sudo -S yum -y update
#安裝docker
echo ${1} |sudo -S yum install -y docker
#預設開機docker服務啟動
echo ${1} |sudo -S systemctl enable docker
#啟動docker
echo ${1} |sudo -S systemctl start docker
#建置influx
mkdir -p influxdb/influxdb
cp conf/influxdb.conf influxdb/influxdb/
cp script/influxdb_run_docker.sh influxdb/influxdb_run_docker.sh
cd influxdb;sh influxdb_run_docker.sh
cd ..
#建置grafana
mkdir -p grafana/grafana
cp conf/grafana.ini grafana/grafana/grafana.ini
cp script/grafana_run_docker.sh grafana/grafana_run_docker.sh
cd grafana;sh grafana_run_docker.sh
cd ..
| true
|
adc42454c135cce1e0a1341beaa326b903320a82
|
Shell
|
oleks/onlineta
|
/src/jail/sh-iofs
|
UTF-8
| 427
| 3.046875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
TMPFS_TARGET="tmpfs-target"
ROOTFS_TMPFS_TARGET="rootfs-target/home/student"
INPUT="input"
OUTPUT="output"
SIZE=1M
mount -t tmpfs -o size=$SIZE tmpfs "$TMPFS_TARGET" || exit 1
cp -r "$INPUT/"* "$TMPFS_TARGET/" || exit 1
mount --bind "$TMPFS_TARGET" "$ROOTFS_TMPFS_TARGET" || exit 1
$@
umount "$ROOTFS_TMPFS_TARGET" || exit 1
cp -r "$TMPFS_TARGET/"* "$OUTPUT/" || exit 1
umount "$TMPFS_TARGET" || exit 1
| true
|
fe562909f403c0e9bb563f1a9781209ae4fe94af
|
Shell
|
sailfishos/cross-template-sysroot
|
/precheckin.sh
|
UTF-8
| 433
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
ARCHS="aarch64 armv6l armv7l armv7hl armv7thl armv7tnhl mipsel i486 i586"
for x in $ARCHS; do
cp cross-template-sysroot.spec cross-$x-sysroot.spec
sed -i "s/@TARGET_CPU@/$x/g" cross-$x-sysroot.spec
sed -i "s/ExclusiveArch: none/ExclusiveArch: %ix86 x86_64/g" cross-$x-sysroot.spec
case $x in
arm*) sed -i "s/@GNU@/gnueabi/g" cross-$x-sysroot.spec ;;
*) sed -i "s/@GNU@/gnu/g" cross-$x-sysroot.spec ;;
esac
done
| true
|
8f53ab207723ba8bc817a4ff6afb929b08e2efa6
|
Shell
|
thevogel/Elk_Project
|
/Scripts/for_loops.sh
|
UTF-8
| 300
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#Create Variables
nums=$(echo {0..9})
states=('California' 'Hawaii' 'Colorado' 'Washington' 'Oregon')
#Create a loop that looks for 'Hawaii'
for state in ${states[1]}
do
if [ $state == 'Hawaii' ];
then
echo "Hawaii is the best!"
else
echo "I'm not a fan of Hawaii."
fi
| true
|
b54a59a85a67cc0345b07fdefa0ccdf947c29834
|
Shell
|
Griffin-Ashton/CoralPaper2015
|
/bin/runsourcetracker.sh
|
UTF-8
| 551
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#runall.sh A script to recreate all paper analyses
#Usage: runsourcetracker.sh <outputdir>
#Adam R. Rivers 2015-09-15
# Define output dir
$OUTPUTDIR="$1"
#convert biom file to txt
biom convert -i "$OUTPUTDIR"/otus/filtered_otu_table.biom -o "$OUTPUTDIR"/otus/filtered_otu_table.txt -b
#Run source tracker to remove samples contaminated by sequencing or seawater
R --slave --vanilla --args -i $OUTPUTDIR/otus/filtered_otu_table.txt -m ../data/map_full.txt -o $OUTPUTDIR/otus/sourcetracker_run1 < $SOURCETRACKER_PATH/sourcetracker_for_qiime.r
| true
|
4ca452ea221d1b343354f21bf832660675f33489
|
Shell
|
mingtao13595/NaturalLanguageProcessing
|
/NaturalLanguageProcessing/etc/ex10.sh
|
UTF-8
| 352
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# 読み込むファイル定義
FILENAME="./file/hightemp.txt"
# 配列定義
declare -a array=()
# ファイルの行数のcount
row_count=0;
# 一行づつ読み込む
while read LINE;
do
# 整数演算を行う
row_count=`expr $row_count + 1`
array+=($LINE)
done < $FILENAME
# 結果をコマンドラインに表示
echo $row_count
| true
|
5a4bd91606c47e29b644c0e960f4e0cb9f7c4291
|
Shell
|
decc/national-household-model-core-components
|
/system-tests/make-smaller-stock.sh
|
UTF-8
| 391
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# Stop on first error.
set -e;
### A script to make a smaller stock by passing in aacodes
test -n $1 || exit 1;
AA_CODES="$@";
AA_CODES="\"${AA_CODES// /\", \"}\"";
QUERY="select(.basicAttributes.aacode == (${AA_CODES}))"
echo "Making stock reduced.stock.gz using query ${QUERY}";
zcat EHS2012.json.gz | jq "${QUERY}" | tr '\n' ' ' > reduced.stock;
gzip reduced.stock -f -k;
| true
|
762ff13c14e0dad95fa281eaf5fa2e0a7be9e647
|
Shell
|
FlorianMold/fh-microservice
|
/bundle.sh
|
UTF-8
| 1,441
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
DIST_FOLDER=dist
dockerfile_flag='false'
use_registry_flag='false'
print_usage() {
cat <<EOF
init [OPTIONS]
Options:
-h Print the usage.
-d Use the dockerfile for building the docker image.
-r Do not store the image in the dist-folder. Used for deploying the image to a registry.
EOF
}
# Change working directory to script directory
BASEDIR=$(dirname "$0")
cd $BASEDIR
while getopts ':dr' flag; do
case "${flag}" in
d) dockerfile_flag='true' ;;
r) use_registry_flag='true' ;;
*) print_usage
exit 1 ;;
esac
done
# Cleanup & create directory structure
rm -rf $DIST_FOLDER
mkdir $DIST_FOLDER
# Build container
if [[ $dockerfile_flag = 'false' ]]; then
./docker/buildpack.sh
fi
if [[ $dockerfile_flag = 'true' ]]; then
./docker/build.sh
fi
if [[ $use_registry_flag = 'false' ]]; then
echo "Saving docker-image to" $DIST_FOLDER
docker save $DOCKER_MICROSERVICE_NAME:$MICROSERVICE_VERSION > $DIST_FOLDER/$DOCKER_CONTAINER_NAME:$MICROSERVICE_VERSION.tar
fi
# Copy helpers
echo "Copy run.sh to" $DIST_FOLDER
cp tools/run.sh $DIST_FOLDER/run.sh
echo "Copy HELP.md to" $DIST_FOLDER
cp tools/HELP.md $DIST_FOLDER/README.md
echo "Copy docker-compose.yml to" $DIST_FOLDER
cp tools/docker-compose.yml $DIST_FOLDER/docker-compose.yml
cp tools/docker-compose.db.yml $DIST_FOLDER/docker-compose.db.yml
echo "Copy .env to" $DIST_FOLDER
cp tools/.env $DIST_FOLDER/.env
| true
|
4aec61957b0a40e22dda3938988ebddf4e86caef
|
Shell
|
mmclane/maelstrom
|
/scripts/provision.sh
|
UTF-8
| 3,799
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
set -xeou pipefail
readonly TERRAFORM_VERSION="0.11.3"
readonly DOWNLOAD_FILE="${WORKSPACE}/tmp/terraform_${TERRAFORM_VERSION}.zip"
readonly DOWNLOAD_URL="https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip"
readonly INSTALL_DIR="${WORKSPACE}/bin"
readonly TF="${INSTALL_DIR}/terraform"
readonly VENV="${INSTALL_DIR}/venv"
dry_run=false
workers=
job_id=
action=
region=
function setup() {
setup_terraform
setup_azurecli
}
function setup_azurecli() {
if [[ ! -e ${VENV}/bin/python ]]; then
virtualenv -q -p python3 ${VENV}
${VENV}/bin/pip install -q --upgrade pip setuptools
# Pinning azure-mgmt-datalake-nspkg due to issue:
# https://github.com/Azure/azure-sdk-for-python/issues/3512
${VENV}/bin/pip install -q azure-mgmt-datalake-nspkg\<3 azure-cli~=2.0
fi
# We want to allow this script to do as it pleases I dont have
# Control of unbound variables here
set +u
source ${VENV}/bin/activate
set -u
set +x
echo "Authenticating to azure cli"
az login --service-principal -u ${ARM_CLIENT_ID} -p ${ARM_CLIENT_SECRET} --tenant ${ARM_TENANT_ID}
az account set --subscription="${ARM_SUBSCRIPTION_ID}"
set -x
}
function setup_terraform() {
mkdir -p ${WORKSPACE}/tmp
mkdir -p ${WORKSPACE}/bin
if [ -f ${TF} ]; then
if [ "$(${TF} version | grep -c ${TERRAFORM_VERSION})" -eq "1" ]; then
return
else
rm ${TF}
fi
fi
curl -o ${DOWNLOAD_FILE} ${DOWNLOAD_URL}
unzip ${DOWNLOAD_FILE} -d ${INSTALL_DIR}
rm ${DOWNLOAD_FILE}
}
function print_help() {
cat <<eof
Usage: provision.sh [OPTIONS]
Options:
-a: (REQUIRED) sets the action of the script, either "provision" or "decommision"
-d: (OPTIONAL) sets the process to be a dry run, no resources will be created or destroyed
Provision Options:
-j: (REQUIRED) sets the job id for the given process
-w: (REQUIRED) sets the amount of workers required for a given job
-r: (REQUIRED) sets the region of the job
eof
}
function decommission() {
pushd ${WORKSPACE}/tmp
tar xzf tf_files.tar.gz
popd
pushd ${WORKSPACE}/tmp/tf_files
${TF} init -input=false
if [ ${dry_run} = false ]; then
${TF} destroy -force
else
${TF} plan -destroy -input=false
fi
popd
}
function provision() {
local run_id=$1
local slave_count=$2
local region=$3
local dry_run=$4
mkdir -p ${WORKSPACE}/tmp/tf_files && rsync -av --copy-links ${WORKSPACE}/terraform/jmeter/ ${WORKSPACE}/tmp/tf_files
pushd ${WORKSPACE}/tmp/tf_files
echo 'run_id = "'${run_id}'"' >> ${run_id}.auto.tfvars
echo 'slave_count = "'${slave_count}'"' >> ${run_id}.auto.tfvars
echo 'region = "'${region}'"' >> ${run_id}.auto.tfvars
${TF} init -input=false
if [ ${dry_run} = false ]; then
${TF} apply -input=false -auto-approve
else
${TF} plan -input=false
fi
popd
pushd ${WORKSPACE}/tmp
tar czf tf_files.tar.gz tf_files
popd
}
while getopts "a:j:w:r:d" opt; do
case $opt in
a)
action="$OPTARG"
;;
j)
job_id="$OPTARG"
;;
w)
workers="$OPTARG"
;;
r)
region="$OPTARG"
;;
d)
dry_run=true
;;
esac
done
if [ -z ${action} ]; then
echo "Action must be specified!" >&2
print_help
exit 1
fi
setup
if [ "$action" = "provision" ]; then
if [ -z ${workers} ]; then
echo "Workers must be specified!" >&2
print_help
exit 1
fi
if [ -z ${job_id} ]; then
echo "Job ID must be specified!" >&2
print_help
exit 1
fi
if [ -z ${region} ]; then
echo "Region must be specified!" >&2
print_help
exit 1
fi
provision $job_id $workers $region $dry_run
elif [ "$action" = "decommission" ]; then
decommission
else
exit 0
fi
| true
|
736962b592943c9fd8d6610dc2272b36d58a8560
|
Shell
|
blueprint-cpp/blueprint
|
/test/functional/blueprint/run-blueprint.sh
|
UTF-8
| 408
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
pushd $(dirname $0) > /dev/null
root=../../..
# build blueprint
$root/build/build.sh
if [[ $? -ne 0 ]]; then
exit $?
fi
# run blueprint-premake
../common/run-blueprint-premake.sh
# run blueprint
blueprint=$root/output/bin/Debug/Blueprint
workspace=$root/build/blueprint/Blueprint.wks.json
outputdir=../../output/blueprint
$blueprint -l -f $workspace -o $outputdir
popd > /dev/null
| true
|
cad5d9ec479f2b6cb0c5966c2065a009a48de993
|
Shell
|
CipaX/env
|
/common/gen_unix/bin/xx_go.sh
|
UTF-8
| 2,009
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -eq 1 ] && [ "$1" == "-h" ]; then
_CMD="$(basename "${BASH_SOURCE[0]}")"
echo "usage: ${_CMD} <pattern>"
echo ""
echo "${_CMD} - matches the given file/directory path pattern within the base directory subtree and changes the directory to the first best path match."
echo ""
echo "The criteria for choosing the best path depends on the number of links, on their position and on the length of the path."
echo "The base directory is determined by calling a user function with the name obtained from the SMARTPROF_GO_BASE_GET_FUNC env variable. If the variable does not exist, the base directory is the user's home."
echo "First the exact <pattern>, with added leading and trailing wildcards is matched."
echo "If that yields no results the given pattern is then split into separate words (by whitespaces and camelcase rules), with wildcards in-between."
echo ""
echo "<pattern> = part1 [part2 [part3 [...]]]"
echo ""
echo "Important note:"
echo " Execute with source (or ". ") before, in order to keep the new location by avoiding to launch withing a subshell."
echo ""
echo "Examples:"
echo " Consider the following file structure:"
echo " Common/LibAbc/LibImpl/AbcImplFile.cc"
echo " Common/LibAbc/AbcFile.cc"
echo " Prototypes/LibAbc/LibImpl/AbcImplFile.cc"
echo " Prototypes/LibAbc/LibImpl/ProtoFile.cc"
echo ""
echo " ${_CMD} Abc # changes the folder to Common/LibAbc"
echo " ${_CMD} P Abc # changes the folder to Prototypes/LibAbc"
echo " ${_CMD} PAbc # same as ${_CMD} P Abc"
echo " ${_CMD} AbcFile # changes the folder to Common/LibAbc"
echo " ${_CMD} ProtoFile.cc # changes the folder to Prototypes/LibAbc/LibImpl/"
# Do not exit, because it may close the terminal
else
MATCH=$(xx_match_dir.sh $@)
if [[ -n ${MATCH} ]]; then
cd "${MATCH}"
pwd
else
echo "No directory or file matches the given pattern" 1>&2
fi
fi
| true
|
ce10dd037dfa786d1a7e8d48ff755708a5de1b1b
|
Shell
|
vlele/aks-class
|
/m14/snippet.sh
|
UTF-8
| 2,667
| 3.171875
| 3
|
[] |
no_license
|
#***************************************************************************************
## Objective: This module demonstrates the use of ServiceAccount, ClusterRole and ClusterRoleBinding to allow/disallow access to API Server from within a Pod in AKS
#***************************************************************************************
#--> Go to m14 module directory
cd ../m14
# Create and set context to "$namespace" namespace
NAMESPACE=rbac-apiserver
kubectl create namespace $NAMESPACE
# change the default namespace
# Create a Service Account
# 1. Execute the Command below to perform the following steps:
# - Create a custom service account foo
# - Create a role “service-reader” that only has read permissions on services resource
# - Bind the “service-reader” role to foo
# - create a Pod with the custom service principle foo
kubectl apply -f manifests/curl-custom-sa.yaml
# Open a bash shell inside the Pod
kubectl exec curl-custom-sa -c main -it bash
# 2. Execute the below Commands inside the pod and finally run an API command
token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
hostname=kubernetes.default.svc
curl -v --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization:Bearer $token" https://$hostname:443/api/v1/namespaces/default/services
# Note: The output should contain HTTP/1.1 200 OK
curl -v --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization:Bearer $token" https://$hostname:443/api/v1/namespaces/default/secrets
# Note: The output should contain HTTP/1.1 200 OK
exit
#--> Illustrate the updated yaml with changed permissions
# - Original Line: resources: ["services", "endpoints", "pods", "secrets"]
# - Updated Line: resources: ["endpoints", "pods", "secrets"]
kubectl apply -f manifests/curl-custom-sa.updated.yaml
# Open a bash shell inside the Pod
kubectl exec curl-custom-sa -c main -it bash
# 4. Execute the below Commands inside the pod and run an API command
token=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
hostname=kubernetes.default.svc
# Note: The output should contain HTTP/1.1 403 Forbidden
curl -v --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization:Bearer $token" https://$hostname:443/api/v1/namespaces/default/services
# Note: The output should contain HTTP/1.1 200 OK
curl -v --cacert /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -H "Authorization:Bearer $token" https://$hostname:443/api/v1/namespaces/default/secrets
exit
# Cleanup Steps:
kubectl delete namespace $NAMESPACE
| true
|
ab1976ea32c627a10503f09b29a2a7a58382ef8b
|
Shell
|
mdclyburn/scripts
|
/desktop/reencode.sh
|
UTF-8
| 752
| 4.28125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Encode all FLAC files in the current directory to MP3
#
# Arguments =====
# 1: bitrate in Kbps
#
if [ -z $1 ]
then
printf "reencode <bit rate (Kbps)> [-a]\n"
exit 1
else
if [ -z $2 ]
then
printf "Will encode %i FLAC files to %iKbps MP3. OK? [y/N] " "$(ls *.flac | wc -l)" "$1"
read response
if [ "$response" != "y" ] && [ "$response" != "Y" ]
then
exit 0
fi
fi
fi
for flac_file in *
do
mp3_file="$(echo $flac_file | sed 's/.flac/.mp3/')"
printf "Re-encoding %s -> %s... " "$flac_file" "$mp3_file"
ffmpeg -loglevel quiet -n -i "$flac_file" -ab ${1}K "$mp3_file"
if [ "$?" != "0" ]
then
printf "Error...\n"
else
printf "Done!\n"
fi
done
printf "Finished encoding %i files.\n" "$(ls *.flac | wc -l)"
exit 0
| true
|
a371251435f4e7a11650bda8beb2b71f2abd26cc
|
Shell
|
InformaticsMatters/docking-validation
|
/remote-exec-scripts/tej-submit.sh
|
UTF-8
| 1,081
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
if [ "$#" -ne 1 ]; then
echo "ERROR: Must specify directory with content to submit as the only argument"
exit 1
fi
SERVER=${SERVER}
SSH_KEY=$SSH_KEY
KNOWN_HOSTS=$KNOWN_HOSTS
POLL_INTERVAL=${POLL_INTERVAL:-10}
USERNAME=${USERNAME:-$USER}
DESTINATION=${USERNAME}@${SERVER}
DIR=$1
echo "Submiting job from $DIR"
cp start.sh $DIR
# start job
echo "Submitting Tej job using $DESTINATION ..."
JOB=$(tej submit $DESTINATION $DIR)
echo "Started Tej job $JOB"
while true
do
STATUS=$(tej status $DESTINATION --id $JOB)
echo "Status: $STATUS"
if [ "$STATUS" == "running" ]; then
sleep $POLL_INTERVAL
else
break
fi
done
echo "Job finished. Status: $STATUS"
if [ "$STATUS" == "finished 0" ]; then
echo "Downloading results ..."
cd $DIR
tej download $DESTINATION --id $JOB results
echo "Results downloaded"
#echo "Deleting job ..."
#tej delete $DESTINATION --id $JOB
#echo "Job $JOB deleted"
mv results/* .
rm -rf results
else
echo "Job did not complete successfully. Job is not deleted so this can be investigated."
fi
| true
|
21ce55038dd5c4f110d239a718ccc8f18aef8133
|
Shell
|
kelvinheng92/ml-docker-kubernetes
|
/build_push_image.sh
|
UTF-8
| 339
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
ADDRESS=gcr.io
PROJECT_ID=ml-docker-kubernetes
REPOSITORY=auto
VERSION=1.0.0
docker build -t ${PROJECT_ID}:${VERSION} .
ID="$(sudo docker images | grep ${PROJECT_ID} | head -n 1 | awk '{print $3}')"
docker tag ${ID} $ADDRESS/${PROJECT_ID}/${REPOSITORY}:${VERSION}
docker push $ADDRESS/${PROJECT_ID}/${REPOSITORY}:${VERSION}
| true
|
e5269fe77982904a803184d29bb507833b6bd7d8
|
Shell
|
nyimbi/Flask-DB-Docker
|
/cleanupImages.sh
|
UTF-8
| 357
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Exit on first error, print all commands.
set -ev
#Detect architecture
ARCH=`uname -m`
# Grab the current directory.
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Clean up the Docker images for the system.
docker rmi $(docker images starter/* -q)
docker network prune -f
docker volume prune -f
# Your system images are cleaned
| true
|
8f4c12316d5079b28ec87b412ec5dbd809194c17
|
Shell
|
rexblack/docker-shopware
|
/rootfs/usr/local/bin/entrypoint.sh
|
UTF-8
| 990
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
wait-mysql.sh
if ! [ -r /var/www/html/config.php ]
then
echo "Installing Shopware..."
tar --strip 1 -zxf ${PATH_SW}
ln -sf /usr/bin/composer composer.phar
ant -f build/build.xml \
-Ddb.user=${MYSQL_USER} \
-Ddb.password=${MYSQL_PASSWORD} \
-Ddb.name=${MYSQL_DATABASE} \
-Ddb.host=${MYSQL_HOST} \
build-unit
unzip -n ${PATH_IMAGES}
${PATH_CONSOLE} sw:media:migrate
fi
if [ -z $@ ]
then
echo "Starting Webserver.."
chown -R www-data:root var/cache/ var/log/ media/ files/ web/cache/
/usr/bin/supervisord -c /etc/supervisord.conf
else
# TODO extend me!
case "$@" in
cc)
${PATH_CONSOLE} sw:cache:clear
;;
cron)
${PATH_CONSOLE} sw:cron:run
;;
mediamigrate)
${PATH_CONSOLE} sw:media:migrate
;;
sh)
sh
;;
*)
echo "Commands: cc, mediamigrate, sh"
;;
esac
fi
| true
|
5785802670bc6423d9726b01d0f9f61e43c5ed67
|
Shell
|
Chrysostomus/manjaro-zsh-config
|
/.zshrc
|
UTF-8
| 382
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
# Use powerline
USE_POWERLINE="true"
# Has weird character width
# Example:
# is not a diamond
HAS_WIDECHARS="false"
# Source manjaro-zsh-configuration
if [[ -e /usr/share/zsh/manjaro-zsh-config ]]; then
source /usr/share/zsh/manjaro-zsh-config
fi
# Use manjaro zsh prompt
if [[ -e /usr/share/zsh/manjaro-zsh-prompt ]]; then
source /usr/share/zsh/manjaro-zsh-prompt
fi
| true
|
8c42fdc43536bf6ef795acb46f39229af6443943
|
Shell
|
jarrpa/ocs-operator
|
/hack/source-manifests.sh
|
UTF-8
| 4,009
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# example: ROOK_IMAGE=build-e858f56d/ceph-amd64:latest NOOBAA_IMAGE=noobaa/noobaa-operator:1.1.0 OCS_IMAGE=placeholder CSV_VERSION=1.1.1 hack/generate-manifests.sh
set -e
source hack/common.sh
source hack/operator-sdk-common.sh
function help_txt() {
echo "Environment Variables"
echo " NOOBAA_IMAGE: (required) The noobaa operator container image to integrate with"
echo " ROOK_IMAGE: (required) The rook operator container image to integrate with"
echo ""
echo "Example usage:"
echo " NOOBAA_IMAGE=<image> ROOK_IMAGE=<image> $0"
}
# check required env vars
if [ -z "$NOOBAA_IMAGE" ] || [ -z "$ROOK_IMAGE" ]; then
help_txt
echo ""
echo "ERROR: Missing required environment variables"
exit 1
fi
# always start fresh and remove any previous artifacts that may exist.
rm -rf "$(dirname $OCS_FINAL_DIR)"
mkdir -p "$(dirname $OCS_FINAL_DIR)"
mkdir -p $OUTDIR_TEMPLATES
mkdir -p $OUTDIR_CRDS
mkdir -p $OUTDIR_TOOLS
# ==== DUMP NOOBAA YAMLS ====
function dump_noobaa_csv() {
noobaa_dump_crds_cmd="crd yaml"
noobaa_dump_csv_cmd="olm csv"
noobaa_crds_outdir="$OUTDIR_CRDS/noobaa"
rm -rf $NOOBAA_CSV
rm -rf $noobaa_crds_outdir
mkdir -p $noobaa_crds_outdir
echo "Dumping Noobaa csv using command: $IMAGE_RUN_CMD --entrypoint=/usr/local/bin/noobaa-operator $NOOBAA_IMAGE $noobaa_dump_csv_cmd"
# shellcheck disable=SC2086
($IMAGE_RUN_CMD --entrypoint=/usr/local/bin/noobaa-operator "$NOOBAA_IMAGE" $noobaa_dump_csv_cmd) > $NOOBAA_CSV
echo "Dumping Noobaa crds using command: $IMAGE_RUN_CMD --entrypoint=/usr/local/bin/noobaa-operator $NOOBAA_IMAGE $noobaa_dump_crds_cmd"
# shellcheck disable=SC2086
($IMAGE_RUN_CMD --entrypoint=/usr/local/bin/noobaa-operator "$NOOBAA_IMAGE" $noobaa_dump_crds_cmd) > $noobaa_crds_outdir/noobaa-crd.yaml
}
# ==== DUMP ROOK YAMLS ====
function dump_rook_csv() {
rook_template_dir="/etc/ceph-csv-templates"
rook_csv_template="rook-ceph-ocp.vVERSION.clusterserviceversion.yaml.in"
rook_crds_dir=$rook_template_dir/crds
rook_crds_outdir="$OUTDIR_CRDS/rook"
rm -rf $ROOK_CSV
rm -rf $rook_crds_outdir
mkdir -p $rook_crds_outdir
crd_list=$(mktemp)
echo "Dumping rook csv using command: $IMAGE_RUN_CMD --entrypoint=cat $ROOK_IMAGE $rook_template_dir/$rook_csv_template"
$IMAGE_RUN_CMD --entrypoint=cat "$ROOK_IMAGE" $rook_template_dir/$rook_csv_template > $ROOK_CSV
echo "Listing rook crds using command: $IMAGE_RUN_CMD --entrypoint=ls $ROOK_IMAGE -1 $rook_crds_dir/"
$IMAGE_RUN_CMD --entrypoint=ls "$ROOK_IMAGE" -1 $rook_crds_dir/ > "$crd_list"
# shellcheck disable=SC2013
for i in $(cat "$crd_list"); do
# shellcheck disable=SC2059
crd_file=$(printf ${rook_crds_dir}/"$i" | tr -d '[:space:]')
echo "Dumping rook crd $crd_file using command: $IMAGE_RUN_CMD --entrypoint=cat $ROOK_IMAGE $crd_file"
($IMAGE_RUN_CMD --entrypoint=cat "$ROOK_IMAGE" "$crd_file") > $rook_crds_outdir/"$(basename "$crd_file")"
done;
rm -f "$crd_list"
}
# ==== DUMP OCS YAMLS ====
# Generate an OCS CSV using the operator-sdk.
# This is the base CSV everything else gets merged into later on.
function gen_ocs_csv() {
ocs_crds_outdir="$OUTDIR_CRDS/ocs"
rm -rf $OUTDIR_TEMPLATES/manifests/ocs-operator.clusterserviceversion.yaml
rm -rf $OCS_CSV
rm -rf $ocs_crds_outdir
mkdir -p $ocs_crds_outdir
gen_args="generate kustomize manifests -q"
# shellcheck disable=SC2086
$OPERATOR_SDK $gen_args
pushd config/manager
$KUSTOMIZE edit set image ocs-dev/ocs-operator="$OCS_IMAGE"
popd
$KUSTOMIZE build config/manifests | $OPERATOR_SDK generate bundle -q --overwrite=false --version "$CSV_VERSION"
mv bundle/manifests/*clusterserviceversion.yaml $OCS_CSV
cp config/crd/bases/* $ocs_crds_outdir
}
if [ -z "$OPENSHIFT_BUILD_NAMESPACE" ]; then
source hack/docker-common.sh
dump_noobaa_csv
dump_rook_csv
fi
gen_ocs_csv
echo "Manifests sourced into $OUTDIR_TEMPLATES directory"
mv bundle/manifests $OCS_FINAL_DIR
mv bundle/metadata "$(dirname $OCS_FINAL_DIR)"/metadata
rm -rf bundle
rm bundle.Dockerfile
| true
|
e4b71e6eb0c7ddc7cb0739efbed844e867cce27a
|
Shell
|
clacroi/ift6262_project
|
/Utility/aws_gpu_launch
|
UTF-8
| 1,327
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
# Download cuda toolkit, update packages and install cuda-toolkit
wget https://developer.nvidia.com/compute/cuda/8.0/prod/local_installers/cuda-repo-ubuntu1604-8-0-local_8.0.44-1_amd64-deb
sudo dpkg -i cuda-repo-ubuntu1604-8-0-local_8.0.44-1_amd64-deb
sudo apt-get update
sudo apt-get install -y cuda nvidia-cuda-toolkit
# Download and install cudnn toolkit
tar -zxvf cudnn-7.5-linux-x64-v5.0-ga.tgz
echo 'export LD_LIBRARY_PATH=/home/ubuntu/cuda/lib64:$LD_LIBRARY_PATH' >> ~/.bashrc
echo 'export CPATH=/home/ubuntu/cuda/include:$CPATH' >> ~/.bashrc
echo 'export LIBRARY_PATH=/home/ubuntu/cuda/lib64:$LD_LIBRARY_PATH' >> ~/.bashrc
# Download and install Anaconda
wget https://repo.continuum.io/archive/Anaconda3-4.2.0-Linux-x86_64.sh
bash Anaconda3-4.2.0-Linux-x86_64.sh
source .bashrc
#sudo apt-get install python3-pip
# Install Theano and Keras
pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git
pip install keras
#pip install keras==1.2.2
# Copy Theano and Keras configuration files
cp ./keras.json ~/.keras/keras.json
cp ./theanorc ~/.theanorc
# Create Data directory and untar Data
tar -xf ~/inpainting.tar.bz2
mkdir ~/project
mkdir ~/project/Data
mv ~/inpainting ~/project/Data/
# Install h5py --> no need with Anaconda ?
#sudo apt-get install libhdf5-dev
#sudo pip3 install h5py
| true
|
b6c995744750367442644edd5279588e6709ab0d
|
Shell
|
TChan92/320SatTranslator
|
/testing.sh
|
UTF-8
| 432
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#!/usr/bin/env python
for filename in SudokuPuzzles/*.txt; do
name=${filename##*/}
base=${name%.txt}
python2.7 sud2sat.py "$filename" "CNF_Files/$base.in"
done
for cnf in CNF_Files/*.in; do
name=${cnf##*/}
base=${name%.in}
minisat "$cnf" "CNF_Files/$base.out"
done
for out in CNF_Files/*.out; do
name=${out##*/}
base=${name%.out}
python2.7 sat2sud.py "$out" "CNF_Out/$base.txt"
done
| true
|
87606c80d4cede7a9834b11c6bca0218d9dc430f
|
Shell
|
ainpoenya/openwrt-huawei
|
/gpio.cgi
|
UTF-8
| 2,823
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Access-Control-Allow-Origin: *"
echo "Content-type: application/json"
echo ""
PASSWORD="openwrt"
SCRIPT="gpio/script/gpio.sh"
#KEY=`echo "$QUERY_STRING" | sed -n 's/^.*key=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
#echo "KEY IS= $KEY"
#exit 0
KEY=`echo "$QUERY_STRING" | sed -n 's/^.*key=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
AUTH=`echo "$QUERY_STRING" | sed -n 's/^.*auth=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
TOGGLE=`echo "$QUERY_STRING" | sed -n 's/^.*toggle=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
SINGLE=`echo "$QUERY_STRING" | sed -n 's/^.*single=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
GROUP=`echo "$QUERY_STRING" | sed -n 's/^.*group=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
ALLON=`echo "$QUERY_STRING" | sed -n 's/^.*allon=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
ALLOFF=`echo "$QUERY_STRING" | sed -n 's/^.*alloff=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
LIST=`echo "$QUERY_STRING" | sed -n 's/^.*list=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
STATUS=`echo "$QUERY_STRING" | sed -n 's/^.*status=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
LISTJADWAL=`echo "$QUERY_STRING" | sed -n 's/^.*listjadwal=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
UPDATENAMA=`echo "$QUERY_STRING" | sed -n 's/^.*updtaenama=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
TARGET=`echo "$QUERY_STRING" | sed -n 's/^.*target=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
STATUS=`echo "$QUERY_STRING" | sed -n 's/^.*status=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
NAMA=`echo "$QUERY_STRING" | sed -n 's/^.*nama=\([^&]*\).*$/\1/p' | sed "s/%20/ /g"`
# if [ $KEY = $PASSWORD ]
# then
# echo "PASSWORD ANDA BENAR"
# else
# echo "PASSWORD SALAH<br>"
# echo "KEY = $KEY"
# fi
if [[ $AUTH ]]
then
# outp=`bash $SCRIPT toggle $TARGET`
echo '{ "success": true, "message": "sukses login!" }';
#echo $outp
exit 0
fi
if [[ $TOGGLE ]]
then
outp=`bash $SCRIPT toggle $TARGET`
echo $outp
exit 0
fi
if [[ $SINGLE ]]
then
outp=`bash $SCRIPT single $TARGET $STATUS`
echo $outp
exit 0
fi
if [[ $GROUP ]]
then
outp=`bash $SCRIPT toggle $TARGET $STATUS`
echo $outp
exit 0
fi
if [[ $ALLON ]]
then
outp=`bash $SCRIPT allon`
echo $outp
exit 0
fi
if [[ $ALLOFF ]]
then
outp=`bash $SCRIPT alloff`
echo $outp
exit 0
fi
if [[ $LIST ]]
then
outp=`bash $SCRIPT list`
echo $outp
exit 0
fi
if [[ $STATUS ]]
then
outp=`bash $SCRIPT status`
echo $outp
exit 0
fi
if [[ $LISTJADWAL ]]
then
outp=`bash $SCRIPT list-jadwal`
echo $outp
exit 0
fi
if [[ $UPDATENAMA ]]
then
outp=`bash $SCRIPT update-nama $NAMA`
echo $outp
exit 0
fi
#echo '<html>'
#echo '<head>'
#echo '<title>Test CGI Bro</title>'
#echo '</head>'
#echo "<body>$DTA tes cgi script bro...yup..yup"
#echo "<br>PASSWORDNYA $PASSWORD<br>"
#echo "Query = $KEY <br>"
#echo '</body>'
#echo '</html>'
#exit 0
echo $QUERY_STRING
# echo '{ "success": false, "message": "request tidak lengkap atau salah" }';
| true
|
40d3d9aeb2caf58d3b1835ecd2f50dec2aaca138
|
Shell
|
ivan-ristovic/dotfiles
|
/bin/myip
|
UTF-8
| 425
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
host="@resolver1.opendns.com"
version=""
if [ $# -ge 0 ]; then
version=$1
fi
if [ ! -z $version ]; then
if [ $version == "4" ]; then
host="@resolver4.opendns.com"
elif [ $version == "6" ]; then
host="@resolver1.ipv6-sandbox.opendns.com AAAA"
else
echo "usage: $0 [4|6]"
exit 1
fi
version=-$version
fi
set -x;
dig +short $host myip.opendns.com $version
| true
|
a1bc6083febb0eb9a2fc7ee85029a9d01466a48b
|
Shell
|
salieff/jjrpulser
|
/html/mysql_settings.sh
|
UTF-8
| 2,444
| 3.53125
| 4
|
[] |
no_license
|
CURRENT_DIR="$( dirname "${0}" )"
CURRENT_DIR="$( cd "${CURRENT_DIR}"; pwd )"
MYSQL_DB_NAME='jjrpulser'
MYSQL_USER='www-data'
MYSQL_PASSWORD=''
function ExecSQL() {
/usr/bin/mysql -u "${MYSQL_USER}" -sN "${MYSQL_DB_NAME}" -e "$1" 2>>"/tmp/${MYSQL_DB_NAME}_debug.log"
MYSQL_RET_ERR_CODE="$?"
if [ "${MYSQL_RET_ERR_CODE}" != '0' ]
then
echo "[$( date '+%F %T' ) ${MYSQL_DB_NAME} ${MYSQL_RET_ERR_CODE}] $1" >>"/tmp/${MYSQL_DB_NAME}_debug.log"
fi
}
function WriteSetupCounters() {
exec {lock_fd}>"${CURRENT_DIR}/jjr_sett.lock"
flock "${lock_fd}"
echo "SETUP_COLD=$1" > "${CURRENT_DIR}/setup_counters.txt"
echo "SETUP_HOT=$2" >> "${CURRENT_DIR}/setup_counters.txt"
flock -u "${lock_fd}"
}
function ReadSetupCounters() {
exec {lock_fd}>"${CURRENT_DIR}/jjr_sett.lock"
[ "$1" = 'softmode' ] && flock -s "${lock_fd}" || flock "${lock_fd}"
if [ -f "${CURRENT_DIR}/setup_counters.txt" ]
then
source "${CURRENT_DIR}/setup_counters.txt"
[ "$1" = 'softmode' ] || rm -f "${CURRENT_DIR}/setup_counters.txt"
else
SETUP_COLD=-1
SETUP_HOT=-1
fi
flock -u "${lock_fd}"
}
function WriteStatistics() {
exec {lock_fd}>"${CURRENT_DIR}/jjr_stat.lock"
flock "${lock_fd}"
echo "UPTIMEDAYS=$1" > "${CURRENT_DIR}/statistics.txt"
echo "UPTIMEHOURS=$2" >> "${CURRENT_DIR}/statistics.txt"
echo "UPTIMEMINUTES=$3" >> "${CURRENT_DIR}/statistics.txt"
echo "UPTIMESECONDS=$4" >> "${CURRENT_DIR}/statistics.txt"
echo "UPTIMEMILLIS=$5" >> "${CURRENT_DIR}/statistics.txt"
echo "FREEHEAP=$6" >> "${CURRENT_DIR}/statistics.txt"
echo "HTTPREQSENT=$7" >> "${CURRENT_DIR}/statistics.txt"
echo "HTTPREQCOMMITED=$8" >> "${CURRENT_DIR}/statistics.txt"
echo "HTTPREQFAILED=$9" >> "${CURRENT_DIR}/statistics.txt"
STATDATETIME="$( /bin/date '+%F %T' )"
echo "STATDATETIME=\"${STATDATETIME}\"" >> "${CURRENT_DIR}/statistics.txt"
flock -u "${lock_fd}"
}
function ReadStatistics() {
exec {lock_fd}>"${CURRENT_DIR}/jjr_stat.lock"
flock -s "${lock_fd}"
if [ -f "${CURRENT_DIR}/statistics.txt" ]
then
source "${CURRENT_DIR}/statistics.txt"
else
UPTIMEDAYS=-1
UPTIMEHOURS=-1
UPTIMEMINUTES=-1
UPTIMESECONDS=-1
UPTIMEMILLIS=-1
FREEHEAP=-1
HTTPREQSENT=-1
HTTPREQCOMMITED=-1
HTTPREQFAILED=-1
fi
flock -u "${lock_fd}"
}
| true
|
ccd9b846bcdc72c8ec25dcd24c552a668123dda5
|
Shell
|
darfink/dotfiles
|
/dotbin/.bin/htmlencode
|
UTF-8
| 127
| 2.8125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Encode HTML entities
if [ $# -ne 0 ]
echo "$@" | recode utf8..html
else
cat | recode utf8..html
fi
| true
|
85a5c3f864f471fbfae846a5a1754a7ce26dae24
|
Shell
|
eliark/Arch-install-script
|
/bashrc
|
UTF-8
| 1,859
| 2.625
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
alias grep='grep --color=auto'
export PS1="\[\033[38;5;12m\][\[$(tput sgr0)\]\[\033[38;5;10m\]\u\[$(tput sgr0)\]\[\033[38;5;12m\]@\[$(tput sgr0)\]\[\033[38;5;7m\]\h\[$(tput sgr0)\]\[\033[38;5;12m\]]\[$(tput sgr0)\]\[\033[38;5;15m\]: \[$(tput sgr0)\]\[\033[38;5;7m\]\w\[$(tput sgr0)\]\[\033[38;5;12m\]>\[$(tput sgr0)\]\[\033[38;5;10m\]\\$\[$(tput sgr0)\]\[\033[38;5;15m\] \[$(tput sgr0)\]"
[ -e "/etc/DIR_COLORS" ] && DIR_COLORS="/etc/DIR_COLORS"
[ -e "$HOME/.dircolors" ] && DIR_COLORS="$HOME/.dircolors"
[ -e "$DIR_COLORS" ] || DIR_COLORS=""
eval "`dircolors -b $DIR_COLORS`"
### I ADDED THIS ###
export VISUAL="nano"
### Set alias
#############
alias ll="ls -la"
alias ls="ls --color=auto "
alias l="ls"
alias la="ls -a"
alias sf="screenfetch"
alias c="clear"
alias x="exit"
alias cls="clear"
alias off="shutdown -h now"
alias rut="sudo su"
alias root="sudo su"
alias leaf="leafpad"
alias snan="sudo nano"
alias ..="cd .."
alias ...="cd ../cd .."
######ARCH LINUX#######<
alias pac="sudo pacman"
alias pacs="sudo pacman -S"
alias yrts="yaourt -S"
alias yrt="yaourt"
alias inst="sudo pacman -S"
alias upd="sudo pacman -Sy
yaourt -Sy"
alias upg="sudo pacman -Syu
yaourt -Syu"
alias grb="sudo grub-mkconfig -o /boot/grub/grub.cfg"
alias del="sudo pacman -Rs"
#####DEBIAN/UBUNTU####
# alias upg="sudo apt-get upgrade"
# alias sup="sudo apt-get upgrade"
# alias upd="sudo apt-get update"
# alias inst="sudo apt install"
# alias sinstall="sudo apt-get install"
# alias grb="sudo update-grub"
# alias aplist="apt list --upgradable"
# alias arm="sudo apt autoremove"
# alias del="sudo apt-get remove"
## warning this next option removes all config files as well.
# alias del="sudo apt-get purge"
| true
|
7001571cb9f680431a1c7862a51baeb95ff3ea0c
|
Shell
|
koudaiii/jjug-ccc2016fall-devops-demo
|
/script/switch
|
UTF-8
| 424
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eu
set -o pipefail
echo "==> Getting current color"
CURRENT_COLOR=$(kubectl get svc demo-bg --namespace=demo --template='{{.spec.selector.color}}')
if [ "$CURRENT_COLOR" == "blue" ]; then
NEXT_COLOR=green
else
NEXT_COLOR=blue
fi
echo "Switch from $CURRENT_COLOR to $NEXT_COLOR"
cat kubernetes/bg-deployment/demo-svc.yaml | sed "s,switch,$NEXT_COLOR,g" | kubectl apply -f - --namespace=demo
| true
|
ec8061e340815258fe5ade461dc9786953773c3a
|
Shell
|
Yannick-W/antidote
|
/deployment/kubernetes/Nativ/deployDC.sh
|
UTF-8
| 2,795
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
### ---
### This deploys an antidote datacenter with:
### - name: $1
### - number of nodes: $2
### Including:
### - statefulset
### - headless service for the statefulset
### - job to create the antidote dc
### - services that expose each node to the outside net
### Configuration:
### Can be done through editing the templates or variables given in this script.
### ${*} expressions in the templates are replaced by this and other scripts.
### Output:
### This script creates a new directory in resources/deployments/$1
### There can be found all created yaml files that were deployed to the kubernetes cluster.
### ---
APP_LABEL=$1
NUM_NODES=$2
## Configuration
ANTIDOTE_DATACENTER="$APP_LABEL"
IMAGE= "antidotedb/"##"192.168.2.106:5000\/antidotedb-local-build:0.2.2" ## needs to be sed friendly;
IMAGE_PULL_POLICY="Never"
STORAGE_CLASS_NAME="local-storage"
### ---
## Dirs
RES_DIR=$(bash getConfig.sh resources_dir);
DEPLOYMENTS_DIR=$(bash getConfig.sh deployments_dir);
TEMPLATES_DIR=$(bash getConfig.sh templates_dir);
## Begin
echo app=$APP_LABEL
echo replicas=$NUM_NODES
## Create directory for deployment
if [ ! -d "$DEPLOYMENTS_DIR/$APP_LABEL" ]; then
mkdir "$DEPLOYMENTS_DIR/$APP_LABEL"
mkdir "$DEPLOYMENTS_DIR/$APP_LABEL/services_pod-exposer"
## add to config TODO...
fi
## Create statefulset yaml
STATEFUL_SET=$DEPLOYMENTS_DIR/$APP_LABEL/statefulset_$APP_LABEL.yaml
cp $TEMPLATES_DIR/statefulset_antidote-template.yaml $STATEFUL_SET;
sed -i s/"\${antidote_datacenter}"/"$ANTIDOTE_DATACENTER"/g $STATEFUL_SET;
sed -i s/"\${antidote_image}"/"$IMAGE"/g $STATEFUL_SET;
sed -i s/"\${image_pull_policy}"/"$IMAGE_PULL_POLICY"/g $STATEFUL_SET;
sed -i s/"\${storage_class_name}"/"$STORAGE_CLASS_NAME"/g $STATEFUL_SET;
## Create headless service yaml
HEADLESS_SERVICE=$DEPLOYMENTS_DIR/$APP_LABEL/service_$APP_LABEL-headless.yaml
cp $TEMPLATES_DIR/service_antidote-headless-template.yaml $HEADLESS_SERVICE;
sed -i s/"\${antidote_datacenter}"/"$ANTIDOTE_DATACENTER"/g $HEADLESS_SERVICE;
###
## Deploy
kubectl apply -f $DEPLOYMENTS_DIR/$APP_LABEL
kubectl scale --replicas=$NUM_NODES statefulset/$APP_LABEL
## CreateDC
echo "Creating data center from statefulset "$APP_LABEL"..."
echo "Check if the statefulset is ready:"
while [ $(bash ./scripts/readyProbe.sh $APP_LABEL) -eq -1 ]
do
echo $(bash ./scripts/readyProbe.sh $APP_LABEL)" -> sleep 10 seconds, then try again."
sleep 10
done
echo $(bash ./scripts/readyProbe.sh $APP_LABEL)" -> Statefulset for data center "$APP_LABEL" is ready!"
echo "Procede to create the data center for $APP_LABEL..."
bash ./scripts/createDC_goClient.sh $APP_LABEL
echo "Expose all nodes to the outside net for datacenter $APP_LABEL."
bash ./scripts/exposeDatacenter.sh $APP_LABEL
echo Done.
echo Deployment complete.
| true
|
0ca201e065db3081e19f0aeff0d7795e27019305
|
Shell
|
jht0664/Utility_python_gromacs
|
/python/n-prof-del-line.sh
|
UTF-8
| 368
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# delete every second line in a file
# $1 : initial number for folder
# $2 : final number for folder
# $3 : n-th line you want to take
rm b.nums
rm peo.nums
str1="npt"
init=$1
final=$2
until [ $init -gt $final ]
do
folder=$init$str1
awk 'NR%'$3'==0' ../$folder/b.nums >> b.nums
awk 'NR%'$3'==0' ../$folder/peo.nums >> peo.nums
let init=init+1
done
| true
|
8c5f25ececaac1cd095f03bbc60cc4ab6060b663
|
Shell
|
Huan111/UNSW
|
/COMP9044/ASS/ass01/test_sh/test07.sh
|
UTF-8
| 1,231
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
#ZANNNING WANG z5224151
#This test mainly for test subset 0
#this test only test shrug-show
#Attention others who may try this autotest need to change
#the path according to their own design of repo
#some of error messages may vary from different designs
index_path="./.shrug/index"
repo_path="./.shrug/version"
#clean the exists .shrug
rm -rf .shrug >/dev/null
#the meaning of color show belows
#[33m----color yellow----show warnings
#[32m----color Green-----all good
#[31m----color Red-------something wrong
#[30m----color Black-----stdout
#test beigin
echo -en "\e[32mTESTS00 BEGIN\n"
#initial the ".shrug"
sh shrug-init
echo line1 >a
sh shrug-add a
sh shrug-commit -m "first"
#test shrug-show
expected_output="line1a"
my_output=`sh shrug-show 0:a`
if test "$expected_output" = "$my_output"
then
echo -en "\e[32mshrug-show passed\n"
else
echo -en "\e[31mshrug-show failed\n"
fi
#test shrug-show
#file not exist
expected_output="shrug-show: error: 'not_exist' not found in index"
my_output=`sh shrug-show 0:not_exist`
if test "$expected_output" = "$my_output"
then
echo -en "\e[32mshrug-show passed(check error)\n"
else
echo -en "\e[31mshrug-show failed(check error)\n"
fi
#test end
echo -en "\e[32mTESTS00 END\n"
| true
|
3243adfc20b16c4be3a26daafa65921070f0fdba
|
Shell
|
shizhuo/bondapp
|
/complete.sh
|
UTF-8
| 319
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
date=$(redis-cli -h proliant lpop queue)
echo $date
while [[ $date != '' ]]
do
#echo $date
d=$(echo $date | cut -d':' -f 1)
option=$(echo $date | cut -d':' -f 2)
echo $d $option
casperjs history.js $d $option true
casperjs history.js $d $option false
date=$(redis-cli -h proliant lpop queue)
done
| true
|
5d27082ae10e1b45d7b82fed8e499aa92e82424f
|
Shell
|
renpy/renpy-deps
|
/scripts/build_linux_common.sh
|
UTF-8
| 652
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
try () {
"$@" || exit 1
}
DIR=$1
CPU=$2
mkdir /home/tom/ab/$DIR
try cd /home/tom/ab/$DIR
try /home/tom/ab/renpy-deps/build_python.sh
try /home/tom/ab/renpy-deps/build.sh
. /home/tom/ab/$DIR/env.sh
mkdir -p /home/tom/ab/patentfree/lib/linux-$CPU/lib
cp /home/tom/ab/$DIR/install/alt/lib/libavcodec.so.?? /home/tom/ab/patentfree/lib/linux-$CPU/lib
cp /home/tom/ab/$DIR/install/alt/lib/libavformat.so.?? /home/tom/ab/patentfree/lib/linux-$CPU/lib
cp /home/tom/ab/$DIR/install/alt/lib/libavutil.so.?? /home/tom/ab/patentfree/lib/linux-$CPU/lib
chmod +x /home/tom/ab/patentfree/lib/linux-$CPU/lib/*
echo You still need to build py4renpy.
| true
|
4da7646546b8b81a6048d664429ef469a6418c6d
|
Shell
|
foodora/consumer-tech-radar
|
/download.sh
|
UTF-8
| 284
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
MONTH=`date +%m`
YEAR=`date +%Y`
FILE="data/${YEAR}_${MONTH}.tsv"
curl "https://docs.google.com/spreadsheets/d/1HvsbibsBTBvZXDaJZWYyHwTxuA7a4eEbgcIKaPhuoxs/export?gid=0&format=tsv" -o "$FILE"
#remove first row
tail -n +2 "$FILE" > "$FILE.tmp" && mv "$FILE.tmp" "$FILE"
| true
|
0fa48a176a9b7329b694876a3ed2db17e05c84d7
|
Shell
|
AntonyBaasan/vagrants
|
/test-ubuntu/provision.sh
|
UTF-8
| 1,250
| 2.734375
| 3
|
[] |
no_license
|
apt-get update
apt-get install -y nginx
service nginx start
apt-get install -y git
echo "
server {
listen 80;
location / {
proxy_pass http://localhost:5000;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection keep-alive;
proxy_set_header Host \$host;
proxy_cache_bypass \$http_upgrade;
}
}
" > /etc/nginx/sites-available/default
echo '#############'
echo '/etc/nginx/sites-available/default:'
cat /etc/nginx/sites-available/default
nginx -t
nginx -s reload
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > microsoft.gpg
sudo mv microsoft.gpg /etc/apt/trusted.gpg.d/microsoft.gpg
sudo sh -c 'echo "deb [arch=amd64] https://packages.microsoft.com/repos/microsoft-ubuntu-trusty-prod trusty main" > /etc/apt/sources.list.d/dotnetdev.list'
sudo apt-get install -y dotnet-sdk-2.0.0
echo 'cd /home/vagrant' > run.sh
echo 'rm -rf pdf-support' >> run.sh
echo 'git clone https://github.com/AntonyBaasan/pdf-support' >> run.sh
echo 'dotnet build pdf-support/pdf-merge-server/' >> run.sh
echo 'dotnet run --project pdf-support/pdf-merge-server/webapi/webapi.csproj' >> run.sh
echo '#############'
echo 'run.sh content:'
cat run.sh
| true
|
40c24b9ebcd3de3ff7c9beb1299155af07efeaeb
|
Shell
|
jwhaley58/.config
|
/.zshrc
|
UTF-8
| 1,542
| 2.734375
| 3
|
[] |
no_license
|
echo "Pulling most recent sc_config"
cd ~/.config
git pull
cd ~
# Path to your oh-my-zsh installation.
export ZSH=~/.oh-my-zsh
export ZSHRC_PYTHON_PATH=$(which python3)
# Auto update zsh
DISABLE_UPDATE_PROMPT=true
ZSH_THEME="dstcustom"
plugins=(
git
last-working-dir
colored-man-pages
zsh-autosuggestions
autojump
zsh-256color
alias-tips
)
source $ZSH/oh-my-zsh.sh
function lazygit() {
git add .
git commit -a -m "$1"
git push
}
function lazypull() {
cd ~/notes
git reset --hard HEAD
git clean -xffd
git pull
}
if [ -f ~/.fzf/bin/fzf ]; then # Add programs to path if needed
path+=~/.fzf/bin/
fi
if [ -f ~/.fzfrc ]; then # Import my fzf cmds if present
source ~/.fzfrc
fi
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh # fzf zsh autocompletion
export PATH=~/Library/Python/3.7/bin:$PATH
# Custom Aliases
alias lo='locate'
alias hg="history | grep "
alias zshrc="vim ~/.zshrc"
alias vimrc="vim ~/.vimrc"
alias n="cd ~/notes; ll"
alias ncmd="grep \\\\$ "
alias ndir="grep / "
alias npeek="grep '##' "
alias x="exit"
alias sz="source ~/.zshrc"
alias grep='grep -i --color'
alias al="cat ~/.zshrc | grep -v '#' | grep alias"
alias alg="cat ~/.zshrc | grep -v '#' | grep alias | grep "
alias tree='tree -C'
alias ss="./box_jumper.sh"
alias gah="sudo !!"
alias sudo='sudo env PATH=$PATH'
alias hg='history | grep'
alias svim='sudo vim -u ~/.vimrc'
alias sc='systemctl'
alias ssc='sudo systemctl'
alias rhelpy='scl enable rh-python36 $(which zsh)'
alias rhyme="python3 ~/scripts/rhyme.py"
alias vg="vim -c 'Goyo' "
| true
|
8488f26cfc31d4faee9de5d7f0ad3a6420d47b28
|
Shell
|
nsailor/rocm-arch
|
/hip-hcc/PKGBUILD
|
UTF-8
| 1,420
| 2.8125
| 3
|
[] |
no_license
|
# Maintainer: acxz <akashpatel2008 at yahoo dot com>
pkgname=hip-hcc
pkgver=3.3.0
pkgrel=6
pkgdesc="Heterogeneous Interface for Portability ROCm"
arch=('x86_64')
url='https://rocmdocs.amd.com/en/latest/Installation_Guide/HIP.html'
license=('MIT')
depends=('hsa-rocr' 'comgr')
makedepends=('libelf' 'cmake' 'python' 'hcc' 'git')
provides=('hip')
conflicts=('hip')
_git='https://github.com/ROCm-Developer-Tools/HIP'
source=("$pkgname-$pkgver.tar.gz::$_git/archive/rocm-$pkgver.tar.gz")
sha256sums=('8ae7cf4134975c7a36e0c72a5e041694935f38c2d7df58f4ad55e9a23b7b875c')
prepare() {
cd "$srcdir/HIP-rocm-$pkgver"
# override __hcc_workweek__
# https://github.com/rocm-arch/rocm-arch/issues/68#issuecomment-604272120
sed -i 's/__hcc_workweek__/99999/g' $(grep __hcc_workweek__ . -rIl)
}
build() {
mkdir -p "$srcdir/build"
cd "$srcdir/build"
cmake -DCMAKE_INSTALL_PREFIX=/opt/rocm/hip \
-DHIP_COMPILER=hcc \
"$srcdir/HIP-rocm-$pkgver"
make
}
package() {
cd "$srcdir/build"
make DESTDIR="$pkgdir" install
# add links (hipconfig is for rocblas with tensile)
install -d "$pkgdir/usr/bin"
local _fn
for _fn in hipcc hipconfig; do
ln -s "/opt/rocm/hip/bin/$_fn" "$pkgdir/usr/bin/$_fn"
done
install -Dm644 /dev/stdin "$pkgdir/etc/ld.so.conf.d/hip.conf" <<EOF
/opt/rocm/hip/lib
EOF
install -Dm644 "$srcdir/HIP-rocm-$pkgver/LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
| true
|
e938917ee4c9b847cf7fc08c1fdabd28ca3a98e3
|
Shell
|
Thief007/docker-oracle-apex-ords
|
/scripts/download_files.sh
|
UTF-8
| 574
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
mkdir /files && cd files
downloadFiles () {
local url="https://github.com/Thief007/docker-oracle-apex-ords"
local files=(
oracle-xe_11.2.0-1.0_amd64.debaa
oracle-xe_11.2.0-1.0_amd64.debab
oracle-xe_11.2.0-1.0_amd64.debac
apex_5.1.2_en.zip
ords.3.0.11.180.12.34.zip
apache-tomcat-8.0.46.tar.gz
jre-7u80-linux-x64.tar.gz
)
local i=1
for part in "${files[@]}"; do
echo "[Downloading '$part' (part $i/7)]"
curl --progress-bar --retry 3 -m 60 -o $part -L $url/blob/master/files/$part?raw=true
i=$((i + 1))
done
}
downloadFiles
| true
|
64fb1d39b8834e9310712197934704ad3563a9d0
|
Shell
|
saiSunkari19/akash
|
/script/setup-kind.sh
|
UTF-8
| 1,038
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Set up a kubernetes environment with kind.
#
# * Install Akash CRD
# * Optionally install metrics-server
rootdir="$(dirname "$0")/.."
install_crd() {
kubectl apply -f "$rootdir/pkg/apis/akash.network/v1/crd.yaml"
}
install_metrics() {
# https://github.com/kubernetes-sigs/kind/issues/398#issuecomment-621143252
kubectl apply -f "$(dirname "$0")/kind-metrics-server.yaml"
kubectl wait pod --namespace kube-system \
--for=condition=ready \
--selector=k8s-app=metrics-server \
--timeout=90s
count=1
while ! kubectl top nodes; do
echo "[$((count++))] waiting for metrics..."
sleep 1
done
echo "metrics available"
}
usage() {
cat <<EOF
Install k8s dependencies for integration tests against "KinD"
Usage: $0 [crd|metrics]
crd: install the akash CRDs
metrics: install CRDs, metrics-server and wait for metrics to be available
EOF
exit 1
}
case "${1:-metrics}" in
crd)
install_crd
;;
metrics)
install_crd
install_metrics
;;
*) usage;;
esac
| true
|
dc37a27e8a4b5226a2ca137215b55b08e0bf60c7
|
Shell
|
raihaan/depr_hcp-micro-func
|
/preprocessed/preproc.sh
|
UTF-8
| 8,390
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# ====================================================================== #
# 2021_03_12 Raihaan Patel
# Creates individual command list file that is submitted by qbatch
# run per subject: for subj in ../raw_data/structural/??????; do ./preproc.sh $subj; done
# can then do the following to submit joblists for each subj:
# for file in ??????/*joblist; do echo bash $file; done > preproc_joblist
# and
# module load cobralab/2019b
# qbatch -w 00:30:00 -c 50 preproc_joblist
#
# Performs commands needed to parcellate rsfc surface data according to an atlas
# Mean rsfc in each region is taken
# vertices weighted according to vertex area - lareger area gets more weight
# outputs pscalar.nii file containing mean rsfc in each region, and .txt file containing the vals per region for more general processing
#
# Performs commands needed to parcellate t1t2 surface data according to an atlas
# Mean t1t2 in each region is taken
# vertices weighted according to vertex area - lareger area gets more weight
# outputs pscalar.nii file containing mean t1t2 in each region, and .txt file containing the vals per region for more general processing
#
# Performs commands needed to identify vertex coordinates of each region centroid
# computes euclid distance between each centrtoid
# outputs regionxregion matrix in .txt format containing distances
#
# ====================================================================== #
#set vars, just 1 var - input id
input=$1
subj=$(basename $input)
rsfc_out_dir="${subj}/rsfc/"
joblist="${subj}/${subj}.joblist"
#make output dir
mkdir -p $rsfc_out_dir
#set some paths to raw data
raw_func_dir="../raw_data/functional/${subj}/MNINonLinear/Results/"
raw_struct_dir="../raw_data/structural/"
#set path to dir with cifti atlases for parcellating
atlas_dir="../raw_data/atlas_files/"
#define input files
atlas_file="${atlas_dir}/Q1-Q6_RelatedValidation210.CorticalAreas_dil_Final_Final_Areas_Group_Colors.32k_fs_LR.dlabel.nii"
left_surface="${raw_struct_dir}/${subj}/T1w/fsaverage_LR32k/${subj}.L.midthickness_MSMAll.32k_fs_LR.surf.gii"
right_surface="${raw_struct_dir}/${subj}/T1w/fsaverage_LR32k/${subj}.R.midthickness_MSMAll.32k_fs_LR.surf.gii"
touch ${joblist}
#compute mean time series of each parcel
#use weighted avg based on surface file/vertex area
#create rsfc mx from parcellated tseries (parcel-parcel connectivity mx)
#convert to txt for general analysis
for rs_run in {rfMRI_REST1_LR,rfMRI_REST1_RL}
do
echo wb_command -cifti-parcellate ${raw_func_dir}/${rs_run}/${rs_run}_Atlas_MSMAll_hp2000_clean.dtseries.nii ${atlas_file} COLUMN ${rsfc_out_dir}/${subj}.${rs_run}.ptseries.nii -spatial-weights -left-area-surf ${left_surface} -right-area-surf ${right_surface} >> ${joblist}
echo wb_command -cifti-correlation -mem-limit 2 ${rsfc_out_dir}/${subj}.${rs_run}.ptseries.nii ${rsfc_out_dir}/${subj}.${rs_run}.netmat.pconn.nii -fisher-z >> ${joblist}
echo wb_command -cifti-convert -to-text ${rsfc_out_dir}/${subj}.${rs_run}.netmat.pconn.nii ${rsfc_out_dir}/${subj}.${rs_run}.netmat.txt >> ${joblist}
done
echo "" >> ${joblist}
#average LR and RL time series to compute 1 netmat per run
echo wb_command -cifti-average ${rsfc_out_dir}/${subj}.rfMRI_REST1_avg.netmat.pconn.nii -cifti ${rsfc_out_dir}/${subj}.rfMRI_REST1_LR.netmat.pconn.nii -cifti ${rsfc_out_dir}/${subj}.rfMRI_REST1_RL.netmat.pconn.nii >> ${joblist}
echo wb_command -cifti-convert -to-text ${rsfc_out_dir}/${subj}.rfMRI_REST1_avg.netmat.pconn.nii ${rsfc_out_dir}/${subj}.rfMRI_REST1.netmat.txt >> ${joblist}
echo "" >> ${joblist}
echo "" >> ${joblist}
echo "#now running t1t2 preproc" >> ${joblist}
echo "" >> ${joblist}
#now t1t2, BIAS CORRECTED
t1t2_out_dir="${subj}/t1t2/"
#make output dir
mkdir -p $t1t2_out_dir
#define input files
raw_t1t2_bc="${raw_struct_dir}/${subj}/MNINonLinear/fsaverage_LR32k/${subj}.MyelinMap_BC_MSMAll.32k_fs_LR.dscalar.nii"
parcellated_t1t2_bc="${t1t2_out_dir}/${subj}.weighted.parcellated.t1t2_bc.pscalar.nii"
parcellated_t1t2_bc_txt="${t1t2_out_dir}/${subj}.weighted.parcellated.t1t2_bc.txt"
#parcellate the raw t1t2 file according to Glasser atlas
#take the mean of t1t2 vals in each parcels
#weight vertices according to their vertex area -> larger vertices contribute more
echo wb_command -cifti-parcellate ${raw_t1t2_bc} ${atlas_file} COLUMN ${parcellated_t1t2_bc} -method MEAN -spatial-weights -left-area-surf ${left_surface} -right-area-surf ${right_surface} >> ${joblist}
#add a space betwn commands because im blind
echo "" >> ${joblist}
#convert to .txt file for python processing
echo wb_command -cifti-convert -to-text ${parcellated_t1t2_bc} ${parcellated_t1t2_bc_txt} >> ${joblist}
echo "" >> ${joblist}
#now repeat t1t2 for NON BIAS CORRECTED
#define input files
raw_t1t2="${raw_struct_dir}/${subj}/MNINonLinear/fsaverage_LR32k/${subj}.MyelinMap_MSMAll.32k_fs_LR.dscalar.nii"
parcellated_t1t2="${t1t2_out_dir}/${subj}.weighted.parcellated.t1t2.pscalar.nii"
parcellated_t1t2_txt="${t1t2_out_dir}/${subj}.weighted.parcellated.t1t2.txt"
#parcellate the raw t1t2 file according to Glasser atlas
#take the mean of t1t2 vals in each parcels
#weight vertices according to their vertex area -> larger vertices contribute more
echo wb_command -cifti-parcellate ${raw_t1t2} ${atlas_file} COLUMN ${parcellated_t1t2} -method MEAN -spatial-weights -left-area-surf ${left_surface} -right-area-surf ${right_surface} >> ${joblist}
#add a space betwn commands because im blind
echo "" >> ${joblist}
#convert to .txt file for python processing
echo wb_command -cifti-convert -to-text ${parcellated_t1t2} ${parcellated_t1t2_txt} >> ${joblist}
echo "" >> ${joblist}
echo "#now running distance preproc" >> ${joblist}
echo "" >> ${joblist}
distance_out_dir="${subj}/distance/"
#make output dir
mkdir -p $distance_out_dir
#get coordinates of left and right surface vertices
echo wb_command -surface-coordinates-to-metric ${left_surface} ${distance_out_dir}/left_coords.shape.gii >> ${joblist}
echo wb_command -surface-coordinates-to-metric ${right_surface} ${distance_out_dir}/right_coords.shape.gii >> ${joblist}
echo "" >> ${joblist}
#convert the metric .gii files to a scalar file which has one row for each of x,y,z and one col for each vertex
echo wb_command -cifti-create-dense-scalar ${distance_out_dir}/${subj}.wb_coords.dscalar.nii -right-metric ${distance_out_dir}/right_coords.shape.gii -left-metric ${distance_out_dir}/left_coords.shape.gii >> ${joblist}
echo wb_command -cifti-create-dense-scalar ${distance_out_dir}/${subj}.left_coords.dscalar.nii -left-metric ${distance_out_dir}/left_coords.shape.gii >> ${joblist}
echo wb_command -cifti-create-dense-scalar ${distance_out_dir}/${subj}.right_coords.dscalar.nii -right-metric ${distance_out_dir}/right_coords.shape.gii >> ${joblist}
#convert the scalar file to text - one line for every vertex containing xyz coords
echo wb_command -cifti-convert -to-text ${distance_out_dir}/${subj}.left_coords.dscalar.nii ${distance_out_dir}/${subj}.left_coords.txt >> ${joblist}
echo wb_command -cifti-convert -to-text ${distance_out_dir}/${subj}.right_coords.dscalar.nii ${distance_out_dir}/${subj}.right_coords.txt >> ${joblist}
echo "" >> ${joblist}
#parcellate the scalar coordinates file based on Glasser and weight by vertex area
#this output has 3 maps/rows - one for each of xyz coords, and one col for each parcel in Glasser
echo wb_command -cifti-parcellate ${distance_out_dir}/${subj}.wb_coords.dscalar.nii ${atlas_file} COLUMN ${distance_out_dir}/${subj}.glasser_centroids.pscalar.nii -method MEAN -spatial-weights -left-area-surf ${left_surface} -right-area-surf ${right_surface} >> ${joblist}
#convert to text
echo wb_command -cifti-convert -to-text ${distance_out_dir}/${subj}.glasser_centroids.pscalar.nii ${distance_out_dir}/${subj}.glasser_centroids.txt >> ${joblist}
echo "" >> ${joblist}
#get closest vertex
echo wb_command -surface-closest-vertex ${left_surface} ${distance_out_dir}/${subj}.glasser_centroids.txt ${distance_out_dir}/${subj}.glasser.left_centroid_vnum.txt >> ${joblist}
echo wb_command -surface-closest-vertex ${right_surface} ${distance_out_dir}/${subj}.glasser_centroids.txt ${distance_out_dir}/${subj}.glasser.right_centroid_vnum.txt >> ${joblist}
echo "" >> ${joblist}
#now run python script to create region by region matrix containing euclid distance btwn each region pair
echo python compute_euclid.py ${subj} >> ${joblist}
| true
|
4eb008d7c22da252408efbf8902663e5917f998e
|
Shell
|
gchq/gaffer-docker
|
/docker/accumulo/entrypoint.sh
|
UTF-8
| 4,934
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2020-2022 Crown Copyright
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
test -z "${ACCUMULO_INSTANCE_NAME}" && ACCUMULO_INSTANCE_NAME="accumulo"
if [ "$1" = "accumulo" ] && [ "$2" = "master" ]; then
# Try to find desired root password from trace config
if [ -f "${ACCUMULO_CONF_DIR}/accumulo-site.xml" ]; then
TRACE_USER=$(xmlstarlet sel -t -v "/configuration/property[name='trace.user']/value" ${ACCUMULO_CONF_DIR}/accumulo-site.xml)
if [ "${TRACE_USER}" = "root" ]; then
PASSWORD=$(xmlstarlet sel -t -v "/configuration/property[name='trace.token.property.password']/value" ${ACCUMULO_CONF_DIR}/accumulo-site.xml)
fi
fi
# Try to find desired root password from client config
if [ -f "${ACCUMULO_CONF_DIR}/client.conf" ]; then
CLIENT_USERNAME=$(cat ${ACCUMULO_CONF_DIR}/client.conf | grep "auth.principal" | grep -v "^#" | cut -d= -f2)
if [ "${CLIENT_USERNAME}" = "root" ]; then
PASSWORD=$(cat ${ACCUMULO_CONF_DIR}/client.conf | grep "auth.token" | grep -v "^#" | cut -d= -f2)
fi
fi
# Try to find desired root password from client config (accumulo 2)
if [ -f "${ACCUMULO_CONF_DIR}/accumulo-client.properties" ]; then
CLIENT_USERNAME=$(cat ${ACCUMULO_CONF_DIR}/accumulo-client.properties | grep "auth.principal" | grep -v "^#" | cut -d= -f2)
if [ "${CLIENT_USERNAME}" = "root" ]; then
PASSWORD=$(cat ${ACCUMULO_CONF_DIR}/accumulo-client.properties | grep "auth.token" | grep -v "^#" | cut -d= -f2)
fi
fi
# Try to find desired root password from accumulo.properties (accumulo 2)
if [ -f "${ACCUMULO_CONF_DIR}/accumulo.properties" ]; then
TRACE_USER=$(cat ${ACCUMULO_CONF_DIR}/accumulo.properties | grep "trace.user" | grep -v "^#" | cut -d= -f2)
if [ "${TRACE_USER}" = "root" ]; then
PASSWORD=$(cat ${ACCUMULO_CONF_DIR}/accumulo.properties | grep "trace.token.property.password" | grep -v "^#" | cut -d= -f2)
fi
fi
# Try to find desired root password from environment variable
[ ! -z "${ACCUMULO_ROOT_PASSWORD}" ] && PASSWORD="${ACCUMULO_ROOT_PASSWORD}"
if [ -z "${PASSWORD}" ]; then
echo "Unable to determine what the Accumulo root user's password should be."
echo "Please either set:"
echo "- \$ACCUMULO_ROOT_PASSWORD environment variable"
echo "- 'auth.token' property in ${ACCUMULO_CONF_DIR}/client.conf (if root is set for 'auth.principal')"
echo "- 'trace.token.property.password' property in ${ACCUMULO_CONF_DIR}/accumulo-site.xml (if you are using root for the trace user)"
exit 1
fi
# If possible, wait until all the HDFS instances that Accumulo will be using are available i.e. not in Safe Mode and directory is writeable
[ -f "${ACCUMULO_CONF_DIR}/accumulo.properties" ] && ACCUMULO_VOLUMES=$(grep instance.volumes ${ACCUMULO_CONF_DIR}/accumulo.properties | cut -d= -f2)
[[ -z "${ACCUMULO_VOLUMES}" && -f "${ACCUMULO_CONF_DIR}/accumulo-site.xml" ]] && ACCUMULO_VOLUMES=$(xmlstarlet sel -t -v "/configuration/property[name='instance.volumes']/value" ${ACCUMULO_CONF_DIR}/accumulo-site.xml)
if [ ! -z "${ACCUMULO_VOLUMES}" ]; then
HADOOP_CLASSPATH="${ACCUMULO_CONF_DIR}:${HADOOP_HOME}/share/hadoop/hdfs/*:${HADOOP_HOME}/share/hadoop/client/*:${HADOOP_HOME}/share/hadoop/common/lib/*"
until [ "${ALL_VOLUMES_READY}" == "true" ] || [ $(( ATTEMPTS++ )) -gt 6 ]; do
echo "$(date) - Waiting for all HDFS instances to be ready..."
ALL_VOLUMES_READY="true"
for ACCUMULO_VOLUME in ${ACCUMULO_VOLUMES//,/ }; do
SAFE_MODE_CHECK="OFF"
SAFE_MODE_CHECK_OUTPUT=$(java -cp ${HADOOP_CLASSPATH} org.apache.hadoop.hdfs.tools.DFSAdmin --fs ${ACCUMULO_VOLUME} -safemode get)
echo ${SAFE_MODE_CHECK_OUTPUT} | grep -q "Safe mode is OFF"
[ "$?" != "0" ] && ALL_VOLUMES_READY="false" && SAFE_MODE_CHECK="ON"
WRITE_CHECK="writeable"
java -cp ${HADOOP_CLASSPATH} org.apache.hadoop.fs.FsShell -mkdir -p ${ACCUMULO_VOLUME}
java -cp ${HADOOP_CLASSPATH} org.apache.hadoop.fs.FsShell -test -w ${ACCUMULO_VOLUME}
[ "$?" != "0" ] && ALL_VOLUMES_READY="false" && WRITE_CHECK="not writeable"
echo ${ACCUMULO_VOLUME} "- Safe mode is" ${SAFE_MODE_CHECK} "-" ${WRITE_CHECK}
done
[ "${ALL_VOLUMES_READY}" == "true" ] || sleep 10
done
[ "${ALL_VOLUMES_READY}" != "true" ] && echo "$(date) - ERROR: Timed out waiting for HDFS instances to be ready..." && exit 1
fi
echo "Initializing Accumulo..."
accumulo init --instance-name ${ACCUMULO_INSTANCE_NAME} --password ${PASSWORD}
fi
exec /usr/bin/dumb-init -- "$@"
| true
|
c7754237be22e069a651ad20f38794ef7c765b3f
|
Shell
|
admd/jdk9-jigsaw
|
/session-1-jigsaw-intro/04_Packaging/packing.sh
|
UTF-8
| 1,912
| 3.75
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
set -eu
# Escape code
esc=$(echo -en "\033")
info="${esc}[0;33m"
normal=$(echo -en "${esc}[m\017")
runTree()
{
if [[ "$OSTYPE" == "cygwin" ]] || [[ "$OSTYPE" == "msys" ]] ; then
cmd //c "tree /f /a $1"
else
tree -fl $1
fi
}
echo ""
echo "${info} *** Creating folder 'mlib' for the packages (jar files) to be created *** ${normal}"
mkdir -p mlib # this must be created in order for the jar commands below to be successful
echo ""
echo "${info} *** Creating module org.astro as a package file (jar) *** ${normal}"
jar --create \
--file mlib/org.astro@1.0.jar \
--module-version 1.0 \
-C mods/org.astro .
echo ""
echo "${info} *** Creating module com.greetings as a package file (jar) *** ${normal}"
jar --create \
--file mlib/com.greetings.jar \
--main-class=com.greetings.Main \
-C mods/com.greetings .
echo ""
echo "${info} *** Displaying the contents (package files created) of the 'mlib' folder *** ${normal}"
runTree mlib
echo ""
echo "${info} *** Printing module description for org.astro as recorded in the module-info.class file in the package (jar) *** ${normal}"
jar --verbose \
--describe-module \
--file=mlib/org.astro@1.0.jar
echo ""
echo "${info} *** Displaying contents of the module package org.astro@1.0 *** ${normal}"
jar --verbose \
--list \
--file=mlib/org.astro@1.0.jar
echo ""
echo "${info} *** Printing module description for com.greetings as recorded in the module-info.class file in the package (jar) *** ${normal}"
jar --verbose \
--describe-module \
--file=mlib/com.greetings.jar
echo ""
echo "${info} *** Displaying contents of the module package com.greetings *** ${normal}"
jar --verbose \
--list \
--file=mlib/com.greetings.jar
# Run 'jar --help' and 'jar --help-extra' to learn about all the above parameters used to create package files (jar) and also read their descriptions
| true
|
3e3087b5c9074de0845ef03889a981c08dc8fa2e
|
Shell
|
ryanmjacobs/camera-stats
|
/collect_data.sh
|
UTF-8
| 1,629
| 4.5625
| 5
|
[] |
no_license
|
#!/bin/bash
################################################################################
# collect_data.sh
#
# Scapes an input directory for geolocation exif data.
#
# January 15, 2015
################################################################################
output=exif_data.json
# Echo to stderr
stderr() {
echo "$@" 1>&2
}
# Echo to stderr and exit
abort() {
echo -e "\n$@" 1>&2
exit 1
}
# Show usage if we got no arguments
if [ $# -eq 0 ]; then
stderr "Scapes an input directory for geolocation exif data."
stderr "Usage: $0 [directory...]"
exit 1
fi
# Check for dependencies: exiftool, tee
stderr "Checking dependencies..."
for dep in exiftool tee; do
if ! type "$dep"; then
abort "error: please install '$dep'"
fi
done
# Check that the user gave us *existing* directories
for dir in "$@"; do
if [ ! -d "$dir" ]; then
abort "error: '$dir' is not a directory"
fi
done
# Collect GPS data
stderr "Gathering data..."
exiftool -f -fast -json -recurse -progress\
-coordFormat "%.8f"\
-GPSAltitude -GPSAltitudeRef -GPSLatitude -GPSLongitude -GPSCoordinates\
-if 'length($GPSAltitude) && length($GPSAltitudeRef) &&\
length($GPSLatitude) && length($GPSLongitude)'\
"$@" > "$output"
# Check whether or not we got any data
if [ ! -s "$output" ]; then
rm "$output"
abort "error: couldn't find any geolocation data :("
fi
# Convert Lat. and Long. to numbers
sed -i 's/\("GPSLatitude": "\)\([0-9]*\.[0-9]*\) [NE]/\1\2/g' "$output"
sed -i 's/\("GPSLongitude": "\)\([0-9]*\.[0-9]*\) [SW]/\1-\2/g' "$output"
| true
|
ef14673968249c96008dcdf3e82e6b77070397bd
|
Shell
|
michaelcolletti/tt-complete
|
/deployments/helm/create_monitoring.sh
|
UTF-8
| 1,073
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
#
PROMINFO=Prometheus_Instructions.txt
GRAFINFO=Grafana_Instructions.txt
kill jobs %1 %2 >/dev/null 2>&1
printf "Checking for existing sockets \n"
netstat -na|egrep '3000|9090'
printf "Install Prometheus \n"
helm install prometheus stable/prometheus |tee -a $PROMINFO
printf "Sleeping at `date` \n\n"
sleep 20
printf "_________________________ \n\n" >>$PROMINFO
export POD_NAME=$(kubectl get pods --namespace default -l "app=prometheus,component=server" -o jsonpath="{.items[0].metadata.name}")
kubectl --namespace default port-forward $POD_NAME 9090 & >>$PROMINFO
printf "Install Grafana Chart \n"
helm install grafana stable/grafana |tee -a $GRAFINFO
printf "Sleeping at `date` \n\n"
sleep 20
printf "################ \n\n" >>$GRAFINFO
export POD_NAME=$(kubectl get pods --namespace default -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=grafana" -o jsonpath="{.items[0].metadata.name}") >> $GRAFINFO
kubectl --namespace default port-forward $POD_NAME 3000 &
printf "Uninstall via helm : helm uninstall prometheus grafana \n"
| true
|
337419cd9a1ca9152337d5b5139a38c6091b363d
|
Shell
|
Cheppers/php-code-analysis
|
/files/pathreplace.sh
|
UTF-8
| 72
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
FILE=$1
FROM=$2
TO=$3
sed -i -e "s=${FROM}=${TO}=g" $FILE
| true
|
94dab22781ad685ffc85ae17128b17ae4f144153
|
Shell
|
hugme/nac
|
/nac-ui/cgi-bin/bin/commands_edit
|
UTF-8
| 4,934
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# set some variables
. $(cat varset)
use_admin_functions
use_base_functions
use_bust_functions
use_date_functions
use_check_functions
use_form_functions
#echo $QUERY_STRING
nno=0
for X in ${QUERY_STRING//&/ }; do
case $X in
SCR=*) SCR=${X#*=};;
DO=*) DO=${X#*=};;
COMMAND_ID=*) COMMAND_ID=${X#*=};;
DEL_NAME=*) NEW_NAME=$(bustit ${X#*=});;
NEW_NAME=*) NEW_NAME=$(bustit ${X#*=});;
NEW_COMMAND=*) NEW_COMMAND=$(sql_active_bust ${X#*=} | sed 's/%no pipes%/\|/g;s/\\/\\\\/g');;
esac
done
[[ $DO == NEW ]] && {
[[ ${#NEW_NAME} -gt 64 ]] && ERROR="$ERROR<br>That name is too long. Please pick something shorter"
[[ ${#NEW_NAME} -lt 2 ]] && ERROR="$ERROR<br>That name is too short. Please pick something longer"
[[ ! -z "$(echo "${NEW_NAME}" | tr -d "[A-Za-z0-9_.\-]")" ]] && ERROR="$ERROR <br> Invalid charcters in the name"
REPEAT=$(echo "select name from commands where name='$NEW_NAME';" |sql_query)
[[ ! -z $REPEAT ]] && ERROR="${ERROR} <br> That name already exists Please choose another"
[[ -z $ERROR ]] && {
echo "insert into commands (name) values ('$NEW_NAME');"|sql_update
COMMAND_ID=$(echo "select command_id from commands where name='$NEW_NAME';" |sql_query)
DO=COMMAND
} || {
echo "<p id=error>$ERROR</p>"
DO=SHOW
}
}
[[ $DO == DELETE ]] && {
REPEAT=$(echo "select command_id from commands where command_id='$COMMAND_ID';" |sql_query)
[[ -z $REPEAT ]] && ERROR="$ERROR <br> That command does not exist"
#### <<-- check to see if this command is beign used anywhere
[[ -z $ERROR ]] && {
echo "delete from commands where command_id='$COMMAND_ID'" |sql_update
[[ $? = 0 ]] && unset DO || {
ERROR="$ERROR <br> There was an unknown error in deleting that command."
DO=COMMAND
}
} || {
#echo "<p id=error>$ERROR</p>"
DO=COMMAND
}
}
[[ -z $DO || $DO == SHOW ]] && {
cat <<- EOF
<center><b>Commands</center></b><br>
<form method=POST action=/cgi-bin/auth.cgi>
<center>New Command: <input type=text name="NEW_NAME" size="30">
<input type=hidden name=SCR value=$SCR>
<input type=hidden name=DO value=NEW>
<input type=submit value="Add It">
</form></center><br>
<table border=1 cellspacing=0 cellpadding=5 align=center>
EOF
i=1
while IFS="|" read COMMAND_ID NAME _ ; do
[[ $i == 0 ]] && echo "<tr>"
echo "<td><a href=\"/cgi-bin/auth.cgi?SCR=$SCR&DO=COMMAND&COMMAND_ID=$COMMAND_ID\">$NAME</a></td>"
[[ $i == 4 ]] && { echo "</tr>"; i=0; }
i=$(($i+1))
done< <(echo "select command_id,name from commands order by name" | sql_query)
echo "</table>"
exit 0
}
[[ $DO == UPDATE ]] && {
## do your checks
EXISTS=$(echo "select command_id from commands where command_id='$COMMAND_ID';" |sql_query)
[[ -z $EXISTS ]] && {
ERROR="$ERROR <br> That Command does not exist"
} || {
[[ -z $NEW_NAME ]] && ERROR="$ERROR <br> The Command name is required"
[[ ${#NEW_NAME} -gt 64 ]] && ERROR="$ERROR<br>Your command name is too long. Please pick something shorter"
[[ ${#NEW_BUILD_NAME} -gt 1024 ]] && ERROR="$ERROR<br>Your Command is too long. Please pick something shorter"
[[ ! -z "$(echo "${NEW_NAME}" | tr -d "[A-Za-z0-9+_.\-]")" ]] && ERROR="$ERROR <br> Invalid charcters in your Command name"
[[ ! -z "$(echo "${NEW_COMMAND}" | tr -d "[ A-Za-z0-9+_.,\\\\\"\*:=;\|/&%$\-]")" ]] && ERROR="$ERROR <br> \
Invalid charcters in your Command ==$(echo "${NEW_COMMAND}" | tr -d "[ A-Za-z0-9+_.,\\\\\"\*:=;\|/&%$\-]")=="
[[ -z $ERROR ]] && UPDATE="name='$NEW_NAME', command='$NEW_COMMAND'"
[[ -z $ERROR ]] && {
echo "update commands set $UPDATE where command_id='$COMMAND_ID';" | sql_update
#echo "<br><br>update commands set $UPDATE where command_id='$COMMAND_ID';<br>===$UPDATE==="
}
########### <<-- Write everything to autit lots, error logs and complaint logs
}
DO=COMMAND
}
[[ $DO == COMMAND ]] && {
IFS="|" read NAME _ <<< "$(echo "select name from commands where command_id='$COMMAND_ID';" | sql_query)"
cat <<- EOF
<p id=error>$ERROR</p>
<form method=POST action=/cgi-bin/auth.cgi>
<center><b>$NAME</b></center>
<br>
Name: <input type=text name="NEW_NAME" value="$NAME" size="100"><br>
Command: <input type=text name="NEW_COMMAND" value="$(echo "$(echo "select command from commands where command_id='$COMMAND_ID';" | sql_query)" | sed 's/"/\"/g;s/\\/\\/g')" size="100"><br>
EOF
cat <<- EOF
<br><br>
<input type=hidden name=SCR value=$SCR>
<input type=hidden name=DO value=UPDATE>
<input type=hidden name=COMMAND_ID value=$COMMAND_ID>
<input type=submit value="Update This Command">
</form><br>
<form method=POST action=/cgi-bin/auth.cgi>
<input type=hidden name=SCR value=$SCR>
<input type=hidden name=DO value=DELETE>
<input type=hidden name=COMMAND_ID value=$COMMAND_ID>
<input type=submit value="Delete This Command">
</form><br>
EOF
}
| true
|
01c89df119b803ea6b1332a9e6a438a40c302e72
|
Shell
|
Copser/torch
|
/bin/test
|
UTF-8
| 2,906
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
TEST_ROOT="$(pwd)/test"
ASSETS_ROOT="$(pwd)/assets"
MIX_ENV=test mix compile --warnings-as-errors --force || {
echo 'Please fix all compiler warnings.'
exit 1
}
MIX_ENV=test mix credo --strict --ignore design,consistency || {
echo 'Elixir code failed Credo linting. See warnings above.'
exit 1
}
MIX_ENV=test mix docs || {
echo 'Elixir HTML docs were not generated!'
exit 1
}
MIX_ENV=test mix test || {
echo 'Elixir tests on Torch failed!'
exit 1
}
if [ $CI ]; then
if [ $TRAVIS ]; then
echo "----------------------------------------------------------"
echo "Running coveralls.travis..."
echo "----------------------------------------------------------"
mix coveralls.travis || {
echo 'Elixir coverage on Torch failed!'
exit 1
}
else
echo "----------------------------------------------------------"
echo "Running coveralls..."
echo "----------------------------------------------------------"
mix coveralls || {
echo 'Elixir coverage on Torch failed!'
exit 1
}
fi
fi
# Run JS assets lint checks
cd "$ASSETS_ROOT" && {
npm install || {
echo 'NPM install failed!'
exit 1
}
}
cd "$ASSETS_ROOT" && {
npm run test || {
echo 'NPM test failed!'
exit 1
}
}
# Run integration tests
iexCurrentVersion="$(iex -v | tail -n 1 | cut -d " " -f2)"
echo "[DEBUG] Current Elixir Version: $iexCurrentVersion"
otpCurrentVersion="$(erl -eval 'erlang:display(erlang:system_info(otp_release)), halt().' -noshell | sed 's/\"//g')"
echo "[DEBUG] Current OTP Version: $otpCurrentVersion"
requiredMaxVersion="1.12.0"
if [ "$(printf '%s\n' "$requiredMaxVersion" "$iexCurrentVersion" | sort -V | head -n1)" = "$requiredMaxVersion" ]; then
echo '[Skipping] Phoenix 1.3 is not supported on Elixir version 1.12+'
else
cd "$TEST_ROOT/support/apps/phx1_3" && {
bin/test || {
echo 'Integration tests on regular Phoenix 1.3 project failed!'
exit 1
}
}
fi
requiredMaxVersion="24"
if [ "$(printf '%s\n' "$requiredMaxVersion" "$otpCurrentVersion" | sort -V | head -n1)" = "$requiredMaxVersion" ]; then
echo '[Skipping] Phoenix 1.4 is not supported well on OTP 24+'
else
cd "$TEST_ROOT/support/apps/phx1_4" && {
bin/test || {
echo 'Integration tests on regular Phoenix 1.4 project failed!'
exit 1
}
}
fi
requiredMinVersion="21"
if [ "$(printf '%s\n' "$requiredMinVersion" "$otpCurrentVersion" | sort -Vr | head -n1)" = "$requiredMinVersion" ]; then
echo '[Skipping] OTP 22+ is required for Phoenix 1.5+'
else
cd "$TEST_ROOT/support/apps/phx1_5" && {
bin/test || {
echo 'Integration tests on regular Phoenix 1.5 project failed!'
exit 1
}
}
fi
| true
|
286a3aca2c52875569717179b2d6a99a39007ded
|
Shell
|
nynhex/Perinoid_Linux_Project
|
/functions/unincorporated/whonix/virtual_box/virtual_box.sh
|
UTF-8
| 1,039
| 3.25
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
Add_apt_virtualbox(){
_flave="$1"
if [ $(grep -oE "vivid|utopic|trusty|raring|quantal|precise|lucid|jessie|wheezy|squeeze" <<<${_flave}) ]; then
echo "deb http://download.virtualbox.org/virtualbox/debian ${_flave} contrib" | tee -a /etc/apt/sources.d/virtualbox.list
else
echo "deb http://download.virtualbox.org/virtualbox/debian jessie contrib" | tee -a /etc/apt/sources.d/virtualbox.list
fi
apt-key add oracle_vbox.asc || wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -
apt-get update && apt-get upgrade
apt-get install dkms
apt-get install virtualbox-5.0
}
Apply_virtualbox_whonix_security_mods(){
_gateway_name="${1:-Whonix-Gateway"
_workstation_name="${2:-Whonix-Workstation}"
if [ $(which VBoxManage) ]; then
VBoxManage modifyvm "$_gateway_name" --biossysemtimeoffset -35017
VBoxManage modifyvm "$_workstation_name" --biossysemtimeoffset +27931
fi
}
#Add_apt_virtualbox $_dist_id_nice
#Apply_virtualbox_whonix_security_mods "Whonix-Gateway" "Whonix-Workstation"
| true
|
fb16a2211211e55e2cf9aa8211b1dd5d8eb2c79d
|
Shell
|
Shijithinnate/TAMPM-StyleExpert
|
/bin/set_env.sh
|
UTF-8
| 319
| 3.59375
| 4
|
[] |
no_license
|
#creates .env file specific to environment in root folder
#and sets the environment variables in ElasticBeanstalk server through eb-cli
VARS="$(< $1.env)"
concat=""
rm -f ../.env
touch ../.env
for VAR in $VARS; do
concat+="$VAR "
printf "$VAR\n" >> ../.env
done
if [ $1 != "local" ]
then
eb setenv $concat -e $1
fi
| true
|
af5431ea6a413564edb7c18a28c41f8aa82f407e
|
Shell
|
kamilotrulit/ea-corp-0828
|
/Downloads/app/HitLeap-Viewer
|
UTF-8
| 1,723
| 3.796875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
exists()
{
command -v "$1" >/dev/null 2>&1
}
exit_with_message()
{
MESSAGE=$1
TITLE='A problem has ocurred'
CALL_ZENITY='zenity --error --text "$MESSAGE" --title "$TITLE"'
CALL_KDIALOG='kdialog --error "$MESSAGE" --title "$TITLE"'
CALL_XMESSAGE='xmessage "$MESSAGE" -nearmouse'
# Check if GDMSESSION is set; if not, check tool availability
# (kdialog first, since it's the least common):
if [ -z ${GDMSESSION+x} ]; then
if exists kdialog; then eval $CALL_KDIALOG
elif exists zenity; then eval $CALL_ZENITY
fi
exit -1
# Then, check desktop environment:
elif
[ $GDMSESSION = "ubuntu" ] ||
[ $GDMSESSION = "ubuntu-2d" ] ||
[ $GDMSESSION = "gnome-shell" ] ||
[ $GDMSESSION = "gnome-classic" ] ||
[ $GDMSESSION = "gnome-fallback" ] ||
[ $GDMSESSION = "cinnamon" ] ||
[ $GDMSESSION = "default" ] ||
[ $GDMSESSION = "Lubuntu" ] ||
[ $GDMSESSION = "xfce" ] &&
exists zenity; then eval $CALL_ZENITY
elif
[ $GDMSESSION="kde-plasma" ] &&
exists kdialog; then eval $CALL_KDIALOG
# If all fails, fallback to xmessage
else eval $CALL_XMESSAGE
fi
exit -1
}
if [ $(uname -m) != 'x86_64' ]; then
exit_with_message "HitLeap Viewer 3.0 only supports 64-bit Linux systems (x86_64)"
elif ! xset q &>/dev/null; then
echo "No X server at \$DISPLAY [$DISPLAY]" >&2
exit -1
fi
APP_DIR="$(cd "$(dirname "$0")"; pwd)"
OLD_PWD=$PWD
cd "$APP_DIR"
# Check if flash is installed
# Ubuntu-specific
if dpkg -l adobe-flashplugin &> /dev/null; then
PPAPI_PLUGIN="--flash-plugin=/usr/lib/adobe-flashplugin/libpepflashplayer.so"
fi
LD_LIBRARY_PATH=$PWD ./lua HitLeap-Viewer.lua Linux $@ $PPAPI_PLUGIN
cd $OLD_PWD
| true
|
19e5de63a4104a9d452c9daa409700e3553a46ff
|
Shell
|
davidoram/ubuntu_with_tools
|
/curl_cors.sh
|
UTF-8
| 865
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Retrieve CORS headers until we get a sucesfull response (empty)
#
# Usage is curl_cors.sh origin url retrycount wait
#
# eg:
#
# curl_cors.sh http://svc_authentication:8080 http://edge_splitter:8080/1/Log 10 2
#
# Will retrieve the OPTIONS 10 times, with a pausesec of 2 sec in between each request until it gets an empty body response, or fails
#
# will exit 0 if k, non-zero on failure
#
origin=$1
url=$2
retrycount=$3
pausesec=$4
while [ $retrycount -gt 0 ]; do
let retrycount=retrycount-1
output=$(curl -X OPTIONS --connect-timeout 2 -H "Access-Control-Request-Method: GET" -H "Origin: $origin" "$url")
if [ $? -ne 0 ]; then
echo "Command failed with code: $?"
elif [ "$output" != "" ]; then
echo "Got error: ${output}"
else
echo "OK"
exit 0
fi
sleep $pausesec
done
echo "FAILED: Exceeded retry count"
exit 1
| true
|
f042ce8027ce0d3033d38c4d12d4f5d989fccddf
|
Shell
|
MayMeakes/hello-world
|
/python/monitor_db_connection.sh
|
UTF-8
| 1,604
| 2.875
| 3
|
[] |
no_license
|
#script_name:monitor_slce.sh
#date:20191120
#writer:Daniel
echo "\n"
echo "\n"
echo "\n"
echo "\n"
echo "=============`date`================="
LOG=/tmp/slce_tmp.log
sqlplusconnect(){
#sqlplus -s sys/changed@slce029_monitor as sysdba >$LOG 2>&1<<EOF
sqlplus -s sys/changed@${SLCE_DB}_monitor as sysdba<<EOF
set head off
set pages 0 lines 200
set feedback off
SELECT DISTINCT SUBSTR(DOMAIN,2,7) FROM stub.COBRAND_SITE;
EOF
}
dummy_db=`ps -ef | grep pmon | grep -v grep|grep -v ASM | awk '{print $NF}' | cut -c 10- | sort | head -1`
. ~oracle/bin/db $dummy_db
MONITOR_DB=("slce001" "slce006" "slce009" "slcq061" "slce002" "slce003" "slce008")
for SLCE_DB in ${MONITOR_DB[@]}
do
sqlplusconnect >$LOG 2>&1
RC_SLCE=`sqlplus -s sys/changed@${SLCE_DB}_monitor as sysdba<<EOF
set head off
set pages 0 lines 200
set feedback off
SELECT DISTINCT SUBSTR(DOMAIN,2,7) FROM stub.COBRAND_SITE;
EOF
`
if [ $RC_SLCE = "$SLCE_DB" ]; then
echo "instance is runing"
elif [ `cat $LOG|grep ORA|wc -l` -ge 1 ] ; then
echo "Connection failed, try connecting again"
while true
do
sleep 5
let j++
sqlplusconnect >$LOG 2>&1
if [ `cat $LOG | grep ORA |wc -l` -le 0 ] ; then
echo "connect success"
break
#elif [ $j -eq 10 ] && [ `cat $LOG|grep ORA|wc -l` -ge 1 ] ; then
elif [ $j -eq 10 ] ; then
echo "${SLCE_DB}ecommdb is not runing as expected,please handle in time."
echo "${SLCE_DB}ecommdb is not runing as expected,please handle in time."|mailx -s "WARNING:${SLCE_DB} instance is unavailable," DL-SH-TO-DBA@ebay.com,gmei@stubhub.com,huajin@stubhub.com
break
fi
done
fi
done
| true
|
477dfc7aaa173c37af06e8b202df9f548842e98c
|
Shell
|
osjacky430/imu_driver
|
/.travis-install-arm-gcc
|
UTF-8
| 743
| 3.71875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e # exit immediately if a command exits with a non-zero status
set -x # print commands and their arguments as they are executed
pushd $HOME
# bash -x: True if file exists and is executable.
if [ ! -x gcc-arm-none-eabi-8-2019-q3-update/bin/arm-none-eabi-gcc ]; then
wget https://developer.arm.com/-/media/Files/downloads/gnu-rm/8-2019q3/RC1.1/gcc-arm-none-eabi-8-2019-q3-update-linux.tar.bz2?revision=c34d758a-be0c-476e-a2de-af8c6e16a8a2?product=GNU%20Arm%20Embedded%20Toolchain,64-bit,,Linux,8-2019-q3-update -O gcc.tar.bz2
# wget -O: download as filename
# tar -x: extract files from an archive
# tar -j: filter the archive though bzip2
# tar -f: use archive file
tar -xjf gcc.tar.bz2
fi
| true
|
5ffca318489d2e83f6973be98cbd2f5c987896d0
|
Shell
|
Escapist70/chromiumos
|
/src/platform/cryptohome/lib/cryptohome
|
UTF-8
| 12,764
| 3.6875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# A collection of functions which implement the bare
# minimum of functionality for creating, mounting, and
# unmounting sparse image files with dmsetup.
# Declare the executable dependencies for this code.
# This includes bash builtins so we can stub them.
utils::declare_commands read echo mkdir test mount grep blockdev
utils::declare_commands pkill chown df tr cut dd cat openssl
utils::declare_commands losetup dmsetup tune2fs chmod xxd exec
utils::declare_commands head tail cp umount touch rm date true sleep
# Be sure to use our e4fsprogs instead of the stable version.
export PATH=/usr/lib/e4fsprogs-git/bin:${PATH}
utils::declare_commands resize2fs e4defrag e2fsck mkfs.ext4 &> /dev/null
# TODO: deal with missing commands..
function cryptohome::log() {
$echo "$($date +%s)[$$]: $@" >> $LOG_FILE
}
function cryptohome::is_mounted() {
local mountpt="${1:-$DEFAULT_MOUNT_POINT}"
# TODO: we should make sure there is no trailing slash
# We dont care about mounting over tmpfs if we have to.
if $mount | $grep "$mountpt" | $grep -qv tmpfs; then
return 0
else
return 1
fi
}
# unmount [mountpt] [username]
function cryptohome::unmount() {
local mountpt="${1:-$DEFAULT_MOUNT_POINT}"
local user="${2:-$DEFAULT_USER}"
cryptohome::log "unmount start"
$pkill -9 -u $user && $true &> /dev/null
$umount "$mountpt"
cryptohome::close
cryptohome::detach
# Make sure the mountpoint can't be used on accident by a faulty log in.
# TODO: enable this when the default login goes away.
#$chown root:root "$mountpt"
cryptohome::log "unmount finished"
}
# total_blocks [some_path]
function cryptohome::total_blocks() {
local target="${1:-/home}"
local disk_size="$($df -P -B $BLOCK_SIZE "$target" |
$tr -s ' ' |
$grep -v Filesystem |
$cut -f2 -d' ')"
if [[ -z "$disk_size" ]]; then
echo 0
echo "Disk appears to be less than 1G or df output is unparseable!" 1>&2
return 1
fi
echo $disk_size
return 0
}
# make_table masterkey [loopdev]
function cryptohome::make_table() {
local masterkey="$1"
local loopdev="${2:-$DEFAULT_LOOP_DEVICE}"
[[ $# -lt 1 ]] && return 1 # argument sanity check
echo "0 $($blockdev --getsize $loopdev) crypt aes-cbc-essiv:sha256 "$masterkey" 0 $loopdev 0"
}
# maximize /path/to/image.img masterkey [online] [loopdev] [mapperdev]
function cryptohome::maximize() {
local image="$1"
local masterkey="$2"
local online="${3:-0}"
local loopdev="${4:-$DEFAULT_LOOP_DEVICE}"
local mapperdev="${5:-$DEFAULT_MAPPER_DEVICE}"
[[ $# -lt 2 ]] && return 1 # argument sanity check
cryptohome::log "maximize start"
local blocks="$(cryptohome::total_blocks $image)"
local current="$($blockdev --getsize64 $loopdev)"
current=$((current / BLOCK_SIZE))
blocks=$((blocks - BLOCKS_IN_A_GROUP))
if [[ "$blocks" -le "$current" ]]; then
cryptohome::log "no work to do: $blocks <= $current"
cryptohome::log "maximize end"
return 0
fi
$dd if=/dev/zero of="$image" count=0 bs=$BLOCK_SIZE seek=$blocks
$losetup -c "$loopdev"
$dmsetup suspend $mapperdev
$dmsetup reload $mapperdev <(cryptohome::make_table "$masterkey" "$loopdev")
$dmsetup resume $mapperdev
if [[ "$online" -eq 0 ]]; then
cryptohome::check -f
# Do it as quickly as possible if we are offline.
$resize2fs $mapperdev
else
# If we're online, we don't want to saturate the I/O and it's
# okay if it doesn't complete. So we add a block group at a time.
# With lazy inode tables, this isn't adding much data to disk, but
# it will blast several megabytes directly to disk if the image is
# quite large. For small images, the metadata needed is very little,
# but this rate limited resize won't take long either.
local next_blocks=$((current + BLOCKS_IN_A_GROUP))
$sleep 3 # TODO(wad) make start delay configurable
while [[ "$next_blocks" -lt "$blocks" ]]; do
$sleep 0.3 # TODO(wad) make configurable
$resize2fs -f $mapperdev ${next_blocks} || true
next_blocks=$((next_blocks + BLOCKS_IN_A_GROUP))
done
fi
cryptohome::log "maximize end"
}
# create_minimal /path/to/image
function cryptohome::create_minimal() {
local image="$1"
[[ $# -lt 1 ]] && return 1 # argument sanity check
$dd if=/dev/zero \
of="$image" \
bs=$BLOCK_SIZE \
seek=$((BLOCKS_IN_A_GROUP + 1)) \
count=0
}
# attach /path/to/image [loop device]
function cryptohome::attach() {
local image="$1"
local loopdev="${2:-$DEFAULT_LOOP_DEVICE}"
[[ $# -lt 1 ]] && return 1 # argument sanity check
$losetup "$loopdev" "$image"
}
# detach [loop device]
function cryptohome::detach() {
local loopdev="${1:-$DEFAULT_LOOP_DEVICE}"
$losetup -d "$loopdev"
}
# format max_resize_blocs [target mapper device] [loop device]
function cryptohome::format() {
local blocks="$1"
local mapperdev="${2:-$DEFAULT_MAPPER_DEVICE}"
local loopdev="${3:-$DEFAULT_LOOP_DEVICE}"
[[ $# -lt 1 ]] && return 1 # argument sanity check
$mkfs__ext4 -b $BLOCK_SIZE \
-O ^huge_file \
-E lazy_itable_init=1,resize=$blocks \
"$mapperdev"
$tune2fs -c -1 -i 0 "$mapperdev" # we'll be checking later.
}
# password_to_wrapper password salt_file [iteration_count]
# Create key from the passphrase using a per-user salt and
# an arbitrary iteration count for optional key strengthening.
function cryptohome::password_to_wrapper() {
local password="$1"
local salt_file="$2"
local itercount="${3:-1}"
local wrapped="$password"
local count=0
[[ $# -lt 2 ]] && return 1 # argument sanity check
if [[ ! -f "$salt_file" ]]; then
$head -c 16 /dev/urandom > $salt_file
fi
while [[ $count -lt "$itercount" ]]; do
wrapped="$($cat "$salt_file" <($echo -n "$wrapped") |
$openssl sha1)"
count=$((count+1))
done
$echo "$wrapped"
}
# master_key user_password userid [wrapped_keyfile]
function cryptohome::unwrap_master_key() {
local password="$1"
local userid="$2"
[[ $# -lt 2 ]] && return 1 # argument sanity check
local keyfile="${3:-$IMAGE_DIR/$userid/$KEY_FILE_USER_ZERO}"
local wrapper="$(cryptohome::password_to_wrapper \
"$password" "${keyfile}.salt")"
$openssl aes-256-ecb \
-in "$keyfile" -kfile <($echo -n "$wrapper") -md sha1 -d
}
# create_master_key user_password userid [wrapped_keyfile] [iters]
function cryptohome::create_master_key() {
local password="$1"
local userid="$2"
[[ $# -lt 2 ]] && return 1 # argument sanity check
local keyfile="${3:-$IMAGE_DIR/$userid/$KEY_FILE_USER_ZERO}"
local iters="${4:-1}"
local wrapper="$(cryptohome::password_to_wrapper \
"$password" "${keyfile}.salt" "$iters")"
local master_key="$($xxd -ps -l $KEY_SIZE -c $KEY_SIZE /dev/urandom)"
# openssl salts itself too, but this lets us do repeated iterations.
$openssl aes-256-ecb -out "$keyfile" -kfile <($echo -n "$wrapper") -md sha1 -e < <(echo -n $master_key)
$echo -n "$master_key"
}
# open masterkey [mapper dev] [loop dev]
function cryptohome::open() {
local masterkey="$1"
local mapperdev="${2:-$DEFAULT_MAPPER_DEVICE}"
local loopdev="${3:-$DEFAULT_LOOP_DEVICE}"
$dmsetup create "${mapperdev//*\/}" <(cryptohome::make_table "$masterkey")
}
# close [mapper dev]
function cryptohome::close() {
local mapperdev="${1:-$DEFAULT_MAPPER_DEVICE}"
$dmsetup remove -f "${mapperdev//*\/}"
}
# is_opened [mapper dev]
function cryptohome::is_opened() {
local mapperdev="${1:-$DEFAULT_MAPPER_DEVICE}"
if $test -b "$mapperdev"; then
return 0
else
return 1
fi
}
# is_attached [loop dev]
function cryptohome::is_attached() {
local loopdev="${1:-$DEFAULT_LOOP_DEVICE}"
if $test -b "$loopdev"; then
return 0
else
return 1
fi
}
# mount [mapper device] [mount point]
function cryptohome::mount() {
local mapperdev="${1:-$DEFAULT_MAPPER_DEVICE}"
local mountpt="${2:-$DEFAULT_MOUNT_POINT}"
$mount -o "$MOUNT_OPTIONS" "$mapperdev" "$mountpt"
}
# check [check argument] [mapper device]
function cryptohome::check() {
local arg="${1:-}"
local mapperdev="${2:-$DEFAULT_MAPPER_DEVICE}"
$e2fsck $arg -p "$mapperdev"
}
# prepare_skel [mount point]
function cryptohome::prepare_skel() {
local mountpt="${1:-$DEFAULT_MOUNT_POINT}"
if [[ ! -d $IMAGE_DIR/skel ]]; then
$mkdir -p $IMAGE_DIR/skel
$cp -r /etc/skel/. $IMAGE_DIR/skel
fi
if [[ ! -d $IMAGE_DIR/skel/logs ]]; then
$mkdir -p $IMAGE_DIR/skel/logs
fi
if [[ $mountpt/.xsession -nt $IMAGE_DIR/skel/.xsession ]]; then
$cp $mountpt/.xsession $IMAGE_DIR/skel
fi
return 0
}
function cryptohome::check_and_clear_loop() {
# TODO: use losetup -f explicitly and clean up on failure
if [[ "$($losetup -f)" != "$DEFAULT_LOOP_DEVICE" ]]; then
cryptohome::log "$DEFAULT_LOOP_DEVICE is unavailable!"
if cryptohome::is_mounted; then
cryptohome::log "attempting to unmount lingering mount"
cryptohome::unmount
fi
if cryptohome::is_opened; then
cryptohome::log "attempting to close a lingering dm device"
cryptohome::close
fi
if cryptohome::is_attached; then
cryptohome::log "attempting to detach the loop device"
cryptohome::detach
fi
if [[ "$($losetup -f)" != "$DEFAULT_LOOP_DEVICE" ]]; then
cryptohome::log "$DEFAULT_LOOP_DEVICE could not be freed."
return 1
fi
cryptohome::log "default loop device freed for use"
fi
return 0
}
# mount_or_create userid password
function cryptohome::mount_or_create() {
local userid="$1"
local password="$2"
[[ $# -lt 2 ]] && return 1 # argument sanity check
local image="$IMAGE_DIR/${userid}/image"
IMAGE="$image" # exported for use by cleanup handlers/logging
# Ensure a sane environment.
if [[ ! -d "$IMAGE_DIR/$userid" ]]; then
$mkdir -p "$IMAGE_DIR/$userid"
fi
cryptohome::prepare_skel
# We need a master key file and an image
if [[ -f "$image" && -f "$IMAGE_DIR/$userid/$KEY_FILE_USER_ZERO" ]]; then
cryptohome::log "mount start"
if ! cryptohome::check_and_clear_loop; then
cryptohome::log "mount_or_create bailing"
return 1
fi
cryptohome::attach "$image"
local masterkey="$(cryptohome::unwrap_master_key "$password" "$userid")"
# TODO: we should track mount attempts so we can delete a broken mount.
# right now, we will just fail forever.
# So if a user image gets in a wedged state they get stuck in tmpfs
# land.
cryptohome::open "$masterkey"
# checking is not forced and will only impact login time
# if there is a filesystem error. However, we don't have
# a way to give a user feedback.
# TODO: add UI or determine if we should just re-image.
# Filesystem checking is disabled for Indy to further minimize initial
# login impact. However, we need to determine our priorities in this
# area.
# cryptohome::check
cryptohome::mount
# Make sure to pass along .xsession updates and keep it rx.
if [[ $IMAGE_DIR/skel/.xsession -nt $DEFAULT_MOUNT_POINT/.xsession ]]; then
$cp $IMAGE_DIR/skel/.xsession $DEFAULT_MOUNT_POINT/.xsession
$chmod 755 $DEFAULT_MOUNT_POINT/.xsession
fi
$chown $DEFAULT_USER $DEFAULT_MOUNT_POINT
$chmod 750 $DEFAULT_MOUNT_POINT
# Perform an online resize behind the scenes just in case it
# wasn't completed before.
trap - ERR # disable any potential err handlers
cryptohome::maximize "$image" "$masterkey" 1 &
disown -a
cryptohome::log "mount end"
else
cryptohome::log "create_and_mount start"
# Creates a sparse file roughly 131M (BLOCK_SIZE * BLOCKS_IN_A_GROUP) in
# size so that the first block group is about the same size as the others
# after resize.
cryptohome::create_minimal "$image"
if ! cryptohome::check_and_clear_loop; then
cryptohome::log "mount_or_create bailing"
return 1
fi
cryptohome::attach "$image"
local masterkey="$(cryptohome::create_master_key "$password" "$userid")"
cryptohome::open "$masterkey"
cryptohome::format "$(cryptohome::total_blocks $image)"
cryptohome::mount
$cp -r $IMAGE_DIR/skel/. $DEFAULT_MOUNT_POINT/
$chown -R $DEFAULT_USER $DEFAULT_MOUNT_POINT
$chmod 750 $DEFAULT_MOUNT_POINT
# Perform an online resize behind the scenes
# and remove the retry trap.
trap - ERR # disable any potential err handlers
cryptohome::maximize "$image" "$masterkey" 1 &
disown -a
cryptohome::log "create_and_mount end"
fi
return 0
}
| true
|
efea44289be08b655a84d08b4ba92bc1546d7ea1
|
Shell
|
supernova106/chef
|
/learn-chef/cookbooks/jenkins_slave/test/integration/default/bats/chrome_installed.bats
|
UTF-8
| 112
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bats
@test "chrome binary is found in PATH" {
run which google-chrome
[ "$status" -eq 0 ]
}
| true
|
80a05a0bef5100f3908b0e50a5767a2bb2c73c3c
|
Shell
|
BlancoSebastianEzequiel/LabUnix
|
/compile.sh
|
UTF-8
| 387
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
ROOT=./
DIR=functions/
string1='functions'
string2='functions'
ARCHC=$DIR/functions.c
ARCHH=$DIR/functions.h
for case in $(ls $DIR); do
filename=$(basename ${case%%.*})
if [[ $string1 = $filename ]]; then
continue
fi
printf "Executing $filename...";
gcc -std=c99 -Wall -Werror $ARCHC $ARCHH $DIR/$case -o $ROOT/$filename
printf "OK\n";
done
| true
|
5f2ce9862b017662c0076daa4f0e809bc17acb65
|
Shell
|
thortineoc/sms-backend
|
/homework-service/scripts/run.sh
|
UTF-8
| 671
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
PID=""
running() {
PID=$(ps aux | grep "java.*homework-service" | grep -v "grep" | awk '{print $2}')
[[ $PID == "" ]]
}
running
case $1 in
start)
echo "Starting homework-service at port 24026"
setsid java -jar ./homework-service.jar \
-Xmx 256m \
> /dev/null 2>&1 < /dev/null &
;;
stop)
echo "Stopping homework-service"
if $(running); then
echo "homework-service is not running"
exit 1;
fi
kill $PID
;;
status)
echo "homework-service status:"
if $(running); then
echo "Not running"
else
echo "Running at pid: $PID";
fi
;;
*)
echo "Usage: ./run.sh {start|stop|status}"
exit 1
;;
esac
exit 0
| true
|
3c7a8056de202f924cdb6f9519352cde957ffa68
|
Shell
|
PEDASI/PEDASI
|
/scripts/goaccess.sh
|
UTF-8
| 512
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [[ "$#" -ne 1 ]]; then
echo "Requires output file name"
exit 1
fi
if [[ -f /var/log/nginx/access.log.2.gz ]]; then
zcat /var/log/nginx/access.log.*.gz | goaccess -q --log-format=COMBINED -o $1 /var/log/nginx/access.log /var/log/nginx/access.log.1 -
elif [[ -f /var/log/nginx/access.log.1 ]]; then
goaccess -q --log-format=COMBINED -o $1 /var/log/nginx/access.log /var/log/nginx/access.log.1 -
else
goaccess -q --log-format=COMBINED -o $1 /var/log/nginx/access.log
fi
| true
|
02b7cca6b4cf418e4a1068df1e96015730c0d215
|
Shell
|
rashack/skel
|
/bin/firefox-latest-setup.sh
|
UTF-8
| 476
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
CURRENT=$(readlink /usr/local/firefox)
LATEST=$(ls -1tr ~/Downloads/firefox-*.tar* | tail -1)
LATEST_VER=$(basename $LATEST | sed 's/.*\(firefox-[0-9\.]\+\)\.tar\..*/\1/')
echo "CURRENT=$CURRENT"
echo "LATEST=$LATEST"
echo "LATEST_VER=$LATEST_VER"
cd /usr/local
if [ -d $LATEST_VER ] ; then
echo "The latest available version is already in place."
exit 1
fi
sudo rm firefox
sudo tar xf $LATEST
sudo mv firefox $LATEST_VER
sudo ln -s $LATEST_VER firefox
| true
|
c603ea9a8b9575a2ee90a5ed123fbb4357c4f9cb
|
Shell
|
aur-archive/remakepkg
|
/PKGBUILD
|
UTF-8
| 388
| 2.65625
| 3
|
[] |
no_license
|
# Maintainer: Deci Aria <deciare@isisview.org>
pkgname=remakepkg
pkgver=1.0.1
pkgrel=1
pkgdesc="Converts an alterady-installed package back into an installable archive"
arch=('any')
license=('GPL')
url="https://github.com/deciare/remakepkg"
depends=('bash')
source=('remakepkg')
md5sums=('e6bad77ccb9939eb6590455ee6c30793')
package() {
install -D "${srcdir}/remakepkg" "${pkgdir}/usr/bin/remakepkg"
}
| true
|
6305741cd7ef6a2edd0010839ee7830e5f5418eb
|
Shell
|
DerSalvador/xebialabs-docker-files
|
/xldeploy/ext/scripts/unzipWindowsASPWeb-rollback.bat.ftl
|
UTF-8
| 560
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
<#list deployedApplication.environment.dictionaries as dict>
<#if dict.entries['IIS_HOME']??>
<#else>
</#if>
</#list>
<#list deployedApplication.deployeds as deployed>
IIS_TARGET="${deployed.targetPath}"
echo "deployed targetpath=" ${deployed.targetPath}
${deployed.targetPath}
</#list>
powershell -NonInteractive -ExecutionPolicy Unrestricted -WindowStyle Hidden -Command "Get-ChildItem '${deployed.targetPath}' -Filter ZKBPoC.zip | Expand-Archive -DestinationPath '${deployed.targetPath}\ZKBPoC' -Force"
exit 0
| true
|
8b53580f93ecdecc7c3926b3bc06cc2d0c28250b
|
Shell
|
gbsf/archlinux-packages
|
/libdrm/trunk/PKGBUILD
|
UTF-8
| 496
| 2.578125
| 3
|
[] |
no_license
|
# $Id$
# Maintainer: Jan de Groot <jgc@archlinux.org>
pkgname=libdrm
pkgver=2.3.0
pkgrel=1
pkgdesc="Userspace interface to kernel DRM services"
arch=(i686 x86_64)
depends=('glibc')
options=('nolibtool')
url="http://dri.freedesktop.org/"
source=(http://dri.freedesktop.org/${pkgname}/${pkgname}-${pkgver}.tar.bz2)
md5sums=('01a1e1ee0268a2403db42fa630036ab2')
build() {
cd ${startdir}/src/${pkgname}-${pkgver}
./configure --prefix=/usr
make || return 1
make DESTDIR=${startdir}/pkg install || return 1
}
| true
|
087aa952392cb4651587c6ee2af3f4c163807f8f
|
Shell
|
mypaceshun/practice
|
/bin/smart-install.sh
|
UTF-8
| 829
| 3.984375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# -*- coding: utf-8 -*-
# vi: set expandtab shiftwidth=4 :
# auther: shun kawai
#===============================
# smart-install.sh
#===============================
# err masage
function usage_exit() {
echo 'Usage: smart-install packa-gename'
exit 1
}
function already_installed() {
echo $package' is already installed'
exit 0
}
# functions
function install_Ubuntu() {
`dpkg -l $package > /dev/null 2>&1`
if [ $? -eq 0 ]; then
already_installed
fi
sudo apt install $package -y
}
function install_CentOS() {
echo $package
}
# main
if [ $# -ne 1 ]; then
usage_exit
fi
package=$1
case `get-os` in
Ubuntu)
install_Ubuntu
;;
CentOS)
install_CentOS
;;
*)
echo 'Unknown OS'
echo ''
usage_exit
;;
esac
| true
|
945707be8ead611a93dfa2d68f38b3ee1096ce91
|
Shell
|
wangjie07070910/sH_hybridization
|
/scripts/gene_duplication_tree.sh
|
UTF-8
| 1,394
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# SNP calling in S. haemotobium hybridzone(s).
# NPlatt
# Neal.platt@gmail.com
# gene_duplication_tree.sh - build a tree of m8 peptidases to identify
# gene duplication nodes
# Uses a conda to manage the enviroment
#Set up the environment
source /master/nplatt/schisto_hybridization/scripts/set_env.sh
source activate snp_calling
cd $RESULTS_DIR
mkdir m8_gene_family_expansion
cd m8_gene_family_expansion
#get all platyhelminth m8s plus some outgroup metazoans from pfam:
# outgroups are human, drosophila, celegans, danio
# DL'd as ->PF01457_selected_sequences.fa
#convert to single line
fasta_formatter -w0 -i PF01457_selected_sequences.fa >SL.fas
#find HExxH
cat SL.fas | grep -B1 HE..H >HExxH.fas
#extract peptidase
python ../../scripts/extract_peptidase.py HExxH.fas HExxH_peptidase.fas
#align
muscle -in HExxH_peptidase.fas -out HExxH_peptidase_muscle.fas
#manually edit headers
#m8_platy_meta_peptidase_HExxH_trimmed.fa
#remove sites with fewer than 75% aligned bases
#m8_platy_meta_75perc.fas
#cleanup for raxml
sed -i 's/:/#/gi' m8_platy_meta_75perc.fas
#build a tree
raxmlHPC-PTHREADS \
-T 12 \
-m PROTGAMMAWAG \
-p 12345 \
-s m8_platy_meta_75perc.fas \
-# 100 \
-n m8_platy_meta_75perc
#this tree is imported into megas gene duplication wizard and species names maped
# tree is manipulated in raxml and final tree is polished in inkscape
| true
|
f734debc9f195982cb90a67a1cc53a5aa8f5d30e
|
Shell
|
AmitKumarDas/oep-e2e
|
/scripts/director-health-checks/od-elasticsearch-check.sh
|
UTF-8
| 2,499
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
## Run common health check
# Specify test name
test_name=od-elasticsearch-health-check
echo $test_name
sed -e 's/generateName: app-check/generateName: od-elasticsearch-health-check/g' \
-e 's/app: app-litmus/app: od-elasticsearch-health-check-litmus/g' \
-e 's/value: test-name/value: od-elasticsearch-health-check/g' \
-e 's/value: default /value: default/g' \
-e 's/value: pod-name/value: od-elasticsearch/g' oep-e2e/litmus/director/common-checks/run_litmus_test.yml \
> oep-e2e/litmus/director/common-checks/es_run_litmus_test.yml
cat oep-e2e/litmus/director/common-checks/es_run_litmus_test.yml
# Run common health check litmus job
kubectl create -f oep-e2e/litmus/director/common-checks/es_run_litmus_test.yml
sleep 2;
# Check common health check job status
litmus_pod=$(kubectl get po -n litmus | grep $test_name | awk {'print $1'} | tail -n 1)
echo $litmus_pod
job_status=$(kubectl get po $litmus_pod -n litmus | awk {'print $3'} | tail -n 1)
while [[ "$job_status" != "Completed" ]]
do
job_status=$(kubectl get po $litmus_pod -n litmus | awk {'print $3'} | tail -n 1)
sleep 6
done
# Print common health check job logs
kubectl logs -f $litmus_pod -n litmus
# Check common health check job result
commonCheck=$(kubectl get litmusresult ${test_name} --no-headers -o custom-columns=:spec.testStatus.result)
echo $commonCheck;
## Run od-elasticsearch specific limtus job
# Specify test name
test_name=od-elasticsearch-check
echo $test_name
# Run od-elasticsearch specific tests limtus job
kubectl create -f oep-e2e/litmus/director/od-elasticsearch/run_litmus_test.yml
sleep 2;
# Get od-elasticsearch check job's pod name
litmus_pod=$(kubectl get po -n litmus | grep $test_name | awk {'print $1'} | tail -n 1)
echo $litmus_pod
# Check od-elasticsearch check job status
job_status=$(kubectl get po $litmus_pod -n litmus | awk {'print $3'} | tail -n 1)
while [[ "$job_status" != "Completed" ]]
do
job_status=$(kubectl get po $litmus_pod -n litmus | awk {'print $3'} | tail -n 1)
sleep 6
done
# Print od-elasticsearch check job logs
kubectl logs -f $litmus_pod -n litmus
# Check od-elasticsearch check job result
elasticsearchCheck=$(kubectl get litmusresult ${test_name} --no-headers -o custom-columns=:spec.testStatus.result)
echo $elasticsearchCheck
if [ "$commonCheck" = Pass ] && [ "$elasticsearchCheck" = Pass ]; then
testResult=Pass
else
testResult=Fail
fi
# Flush test result in result.txt
echo "$test_name: $testResult" >> result.txt;
| true
|
357ca7f7bc8b853b0fe30d619f5bb23ef66798ff
|
Shell
|
smartarch/afcens
|
/simulator/run-predictor-test.sh
|
UTF-8
| 254
| 2.71875
| 3
|
[] |
no_license
|
#/bin/bash
predId=$1
sbt "runMain afcens.TraceRecordWithPredictor $predId 0 1000"
pushd traces/v2/$predId
rm -f eat-ticks.csv
for d in [0-9]*; do
for f in $d/*.jsonl.gz; do
zcat $f | tail -n 1 | jq .eatTicks >> eat-ticks.csv
done
done
popd
| true
|
c32a80f1e7d90dcf919c4716787aeac36ad96d26
|
Shell
|
somesh-ballia/mcms
|
/MCMS/Main/Scripts/CreateProcess.sh
|
UTF-8
| 475
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
targetdir=Processes/$1
cp -R Processes/Demo $targetdir
mv $targetdir/DemoLib $targetdir/$1Lib
rm -f $targetdir/*/*.o
rm -f $targetdir/*/*.depend
rm -f $targetdir/*/*.*keep*
rm -f $targetdir/*/*.*contrib*
rm -f $targetdir/*/*.a
find $targetdir -name '*Demo*' -exec \Scripts/RenameDemo.sh {} $1 \;
find $targetdir -name 'Makefile' -exec \Scripts/RenameDemo.sh {} $1 \;
echo 'Process '$1' sources created in '$targetdir', Please add it to Include/McmsProcesses.h'
| true
|
d52de2dfa268cb2afdfb07a205da9507cdb0db23
|
Shell
|
moom0o/TCPTunnel
|
/create_client.sh
|
UTF-8
| 1,148
| 3.0625
| 3
|
[] |
no_license
|
new_client () {
# Generates the custom client.ovpn
{
cat /etc/openvpn/server/client-common.txt
#echo "<ca>"
cat /etc/openvpn/server/easy-rsa/pki/ca.crt
#echo "</ca>"
#echo "<cert>"
sed -ne '/BEGIN CERTIFICATE/,$ p' /etc/openvpn/server/easy-rsa/pki/issued/"$client".crt
#echo "</cert>"
#echo "<key>"
cat /etc/openvpn/server/easy-rsa/pki/private/"$client".key
#echo "</key>"
#echo "<tls-crypt>"
sed -ne '/BEGIN OpenVPN Static key/,$ p' /etc/openvpn/server/tc.key
#echo "</tls-crypt>"
} > /home/moomoo/"$client".ovpn
}
name=1
client=$(sed 's/[^0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-]/_/g' <<< "$name")
while [[ -z "$client" || -e /etc/openvpn/server/easy-rsa/pki/issued/"$client".crt ]]; do
#echo "$client: invalid name."
name=$((name + 1))
client=$(sed 's/[^0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-]/_/g' <<< "$name")
done
cd /etc/openvpn/server/easy-rsa/
EASYRSA_CERT_EXPIRE=3650 ./easyrsa build-client-full "$client" nopass
# Generates the custom client.ovpn
new_client
#echo
#echo "$client added. Configuration available in:" /home/moomoo/"$client.ovpn"
echo "$client"
exit
;;
| true
|
7a9e151e1a659639f6986310e7d437c3424bb4b7
|
Shell
|
dataseries/Lintel
|
/src/lintel-latex-rebuild.in
|
UTF-8
| 10,766
| 3.9375
| 4
|
[
"BSD-3-Clause",
"BSL-1.0",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# (c) Copyright 2008, Hewlett-Packard Development Company, LP
#
# See the file named COPYING for license details
#
# script for rebuilding latex files
usage() { # Full manpage is at the bottom
[ "$1" != "" ] && echo "$@"
echo "Usage: $0"
echo " [--tex <path>] [--bib <path>] [--bst <path>] [--paper <paper-size>]"
echo " [--error-on-warnings] [--latex-binary <path>] [--disable-flock]"
echo " [--max-step-time <60 seconds>] [--max-retries <default 3>]"
echo " <srcdir> <main-texfile-basename>"
echo "-----"
echo " <path> is a : separated sequence of paths"
exit 0
}
LATEX_COMMAND="@LATEX_COMPILER@"
if [ "$1" = "--selfcheck" ]; then
for i in "$LATEX_COMMAND" @BIBTEX_COMPILER@ @DVIPS_CONVERTER@ @PS2PDF14_CONVERTER@; do
if [ ! -x $i ]; then
echo "lintel-latex-rebuild selfcheck failed; $i is missing"
exit 1
fi
done
exit 0
fi
PAPER_SIZE=letter
CONTINUE=true
ERROR_ON_WARNINGS=false
# TODO: make this variable an option, ask Joe why when it was added the default
# was 9000.
MIN_CROSSREFS=9000
USE_FLOCK=true
MAX_STEP_TIME=60
MAX_RETRIES=3
while $CONTINUE; do
case "$1" in
--tex)
shift
[ $# != 0 ] || usage
TEXINPUTS=$1:$TEXINPUTS
shift
;;
--bib)
shift
[ $# != 0 ] || usage
BIBINPUTS=$1:$BIBINPUTS
shift
;;
--bst)
shift
[ $# != 0 ] || usage
BSTINPUTS=$1:$BSTINPUTS
shift
;;
--paper)
shift
[ $# != 0 ] || usage
PAPER_SIZE=$1
shift
;;
--error-on-warnings)
shift
ERROR_ON_WARNINGS=true
;;
--latex-binary)
shift
[ $# != 0 ] || usage
LATEX_COMMAND="$1"
shift
;;
--disable-flock)
shift
USE_FLOCK=false;
;;
--max-step-time)
shift
[ $# != 0 ] || usage
MAX_STEP_TIME="$1"
shift
;;
--max-retries)
shift
[ $# != 0 ] || usage
MAX_RETRIES="$1"
shift
;;
*)
CONTINUE=false
;;
esac
done
[ $# != 2 -o "$1" = "-h" ] && usage "Missing arguments ($#)"
[ ! -d "$1" ] && usage "'$1' is not a directory"
[ ! -f "$1/$2.tex" ] && usage "'$1/$2.tex' is not a file"
[ `echo "$2" | wc -w` != 1 ] && usage "Main texfile is not one word"
TEXINPUTS=$1:$TEXINPUTS:@CMAKE_INSTALL_PREFIX@/share/lintel-latex-rebuild:
BIBINPUTS=$1:$BIBINPUTS:@CMAKE_INSTALL_PREFIX@/share/lintel-latex-rebuild:
BSTINPUTS=$1:$BSTINPUTS:@CMAKE_INSTALL_PREFIX@/share/lintel-latex-rebuild:
export TEXINPUTS
export BIBINPUTS
export BSTINPUTS
TEXFILE=$2
if [ -f $1/$TEXFILE.blg -o -f $1/$TEXFILE.bbl -o -f $1/$TEXFILE.dvi -o -f $1/$TEXFILE.aux ]; then
echo "some of $1/$TEXFILE.{blg,bbl,dvi,aux} already exist."
echo "this is likely to give you the wrong result; make clean in that dir?"
exit 1
fi
if $USE_FLOCK; then
FLOCK_BITS=`lintel-flock --filename=$TEXFILE.rebuild.lock --callpid=$$`
# Lock will be auto-released when calling process exits, so do nothing to explicitly release
case "$FLOCK_BITS" in
success:*) echo "Acquired rebuild lock $TEXFILE.rebuild.lock" ;;
*) echo "lintel-flock error: '$FLOCK_BITS'"; exit 1 ;;
esac
fi
run_with_timeout() {
local retry_count=0
while [ $retry_count -lt $MAX_RETRIES ]; do
retry_count=`expr $retry_count + 1`
echo "$0: Starting $* with timeout $MAX_STEP_TIME, try $retry_count of $MAX_RETRIES"
"$@" &
WORK_PID=$!
(sleep $MAX_STEP_TIME; kill -TERM $WORK_PID; echo "Timeout on $*; killed" 1>&2) &
KILL_PID=$!
wait $WORK_PID
return_code=$?
if kill -TERM $KILL_PID >/dev/null 2>&1; then
: # normal completion
wait $KILL_PID
echo "$0: Finished $*, return code $return_code"
return $return_code
else
wait $KILL_PID
echo "$0: Timeout on $*, retrying"
fi
done
}
rebuild_latex() {
echo -n " latex"
echo "" >>$TEXFILE.rebuild.out
echo "--------------------------------------------------" >>$TEXFILE.rebuild.out
echo "--- rebuild latex ---" >>$TEXFILE.rebuild.out
echo "--------------------------------------------------" >>$TEXFILE.rebuild.out
run_with_timeout $LATEX_COMMAND $TEXFILE.tex </dev/null >>$TEXFILE.rebuild.out 2>&1
latex_err=$?
case $LATEX_COMMAND in
# perltex drops the exit code from latex so we have to infer it from the output.
*perltex) [ `grep 'Emergency stop.' $TEXFILE.rebuild.out | wc -l` -ge 1 ] && latex_err=77 ;;
esac
}
rebuild_bibtex() {
if grep citation $TEXFILE.aux >/dev/null 2>&1; then
echo -n " bibtex"
echo "" >>$TEXFILE.rebuild.out
echo "--------------------------------------------------" >>$TEXFILE.rebuild.out
echo "--- rebuild bibtex ---" >>$TEXFILE.rebuild.out
echo "--------------------------------------------------" >>$TEXFILE.rebuild.out
run_with_timeout @BIBTEX_COMPILER@ --min-crossrefs=$MIN_CROSSREFS $TEXFILE >>$TEXFILE.rebuild.out 2>&1
bibtex_err=$?
else
echo -n " (skip-bibtex-no-citations)"
bibtex_err=0
fi
}
rebuild() {
echo "Latex rebuild run at `date` on `hostname`" >$TEXFILE.rebuild.out
rebuild_latex
rebuild_bibtex
}
# Two initial rounds to get enough far through that it could be stable
echo -n "initial latex rounds:"
rebuild
if [ $latex_err != 0 ]; then
# clean up old files that may be bad and prevent a clean build.
rm $TEXFILE.aux $TEXFILE.bbl $TEXFILE.blg
rebuild
fi
rebuild_latex
echo " done."
i=0
# Run lots of times until it converges
while [ "`grep -c 'Rerun to get' $TEXFILE.rebuild.out`" != 0 ]; do
i=`expr $i + 1`
if [ $i -gt 10 ]; then
echo "ERROR: unable to achieve latex convergence after 9 tries."
echo "Log file is $TEXFILE.rebuild.out"
exit 1
fi
echo -n "latex convergence round $i:"
rebuild
rebuild_latex
echo
done
dvips_err=0
dvipdf_err=0
if [ $latex_err = 0 -a $bibtex_err = 0 ]; then
@DVIPS_CONVERTER@ -t $PAPER_SIZE -o $TEXFILE.ps.tmp $TEXFILE.dvi >>$TEXFILE.rebuild.out 2>&1
dvips_err=$?
# Make .ps file creation atomic
[ $dvips_err = 0 ] && mv $TEXFILE.ps.tmp $TEXFILE.ps
# Why not run dvipdf? The current version of dvipdf defaults to A4 paper
# and doesn't pass options to dvips (the underlying engine for dvipdf).
# so we call dvips and ps2pdf14 directly.
@DVIPS_CONVERTER@ -P pdf @DVIPS_P_cmz@ @DVIPS_P_amz@ -t $PAPER_SIZE -D 600 -Z -G0 -o $TEXFILE.pdftemp $TEXFILE.dvi >>$TEXFILE.rebuild.out 2>&1
dvipstemp_err=$?
# Note both options are needed to force all the fonts to be
# embedded; file size effect seems to be small in one test case;
# if that's wrong, then it should be made an option.
# you can verify font embedding with pdffonts
EMBEDFONTS="-dPDFSETTINGS=/prepress -dEmbedAllFonts=true"
# Doesn't seem to be any downside to using these options.
OPTIMIZE="-dOptimize=true -dUseFlateCompression=true"
@PS2PDF14_CONVERTER@ $EMBEDFONTS $OPTIMIZE $TEXFILE.pdftemp $TEXFILE.pdf14tmp >>$TEXFILE.rebuild.out 2>&1
ps2pdf14_err=$?
# make .pdf file creation atomic.
if [ $ps2pdf14_err = 0 ]; then
mv $TEXFILE.pdf14tmp $TEXFILE.pdf
rm $TEXFILE.pdftemp
fi
fi
if [ $latex_err != 0 ]; then
any_err=latex
elif [ $bibtex_err != 0 ]; then
any_err=bibtex
elif [ $dvips_err != 0 ]; then
any_err=dvips
elif [ $dvipstemp_err != 0 ]; then
any_err=dvipstemp
elif [ $ps2pdf14_err != 0 ]; then
any_err=ps2pdf14
fi
show_warnings() {
echo "--------- Warnings for $TEXFILE ----------"
grep -i warning $TEXFILE.rebuild.out
echo "--------- Warnings for $TEXFILE ----------"
}
cleanup_abort() {
echo "Full log file is $TEXFILE.rebuild.out"
echo "$any_err failed, removing $TEXFILE.{dvi,ps,pdf} files"
rm $TEXFILE.dvi $TEXFILE.ps $TEXFILE.pdf
exit 1
}
check_warnerror() {
if $ERROR_ON_WARNINGS; then
WARNING_COUNT=`grep -i warning $TEXFILE.rebuild.out | wc -l`
if [ $WARNING_COUNT != 0 ]; then
show_warnings
echo "ERROR: Warnings are currently treated as errors"
cleanup_abort
fi
fi
}
if [ "$any_err" = "" ]; then
check_warnerror
show_warnings
echo "Full log file is $TEXFILE.rebuild.out"
echo "Successful latex rebuild."
exit 0
else
echo "--------- ERROR in $TEXFILE -----------"
cat $TEXFILE.rebuild.out
echo "--------- ERROR in $TEXFILE -----------"
cleanup_abort
fi
POD=<<EOF
=pod
=head1 NAME
lintel-latex-rebuild - a program to rebuild latex documents correctly
=head1 DESCRIPTION
lintel-latex-rebuild takes an input .tex file and generates the derived .dvi, .ps, and .pdf files.
Rebuilding latex documents is surprisingly difficult. latex and bibtex need to be run several
times in order to guarantee they have stabilized; sundry specify options need to be provided to the
converters to generate ps and pdf that are generally usable. All of the options and techniques
have been worked out as a result of working around issues when creating documents and/or submitting
them to various conferences.
=head1 SYNOPSIS
% lintel-latex-rebuild <options> <srcdir> <main-texfile-basename>
=head1 OPTIONS
=over 4
=item --tex I<path>
Specify a : separated list of paths for latex and tex to search for inputs
=item --bib <path>
Specify a : separated list of paths for bibtex to search for bibliography inputs
=item --bst <path>
Specify a : separated list of paths for bibtex to search for bibliography style inputs
=item --paper <paper-size>
Specify the paper size, i.e. letter or A4
=item --error-on-warnings
If latex or bibtex generate warnings, exit with an error.
=item --latex-binary <path>
Specify the latex binary to use, e.g. pdftex.
=item --disable-flock
Disable the code that automatically uses lintel-flock to take out a lock on a file in /tmp.
This code avoids issues with parallel makes building the same document twice at the same time.
=item --max-step-time <60 seconds>
Specify the maximum time that an individual step can take. pdftex has been seen to hang
occasionally on centos. This option at least causes the build to stop.
=item --max-retries <3 tries>
Specify the maximum number of times to retry after a timeout. This allows us to succeed in
cases where the pdftex race condition would otherwise have caused failures. After max-retries
tries the build will stop.
=item <srcdir>
Specify the directory containing the sources for the latex file.
=item <main-texfile-basename>
Specify the basename of the main texfile, i.e. if the file is C<my-paper.tex>, this option would be
C<my-paper>. The resulting output will be C<my-paper>.{dvi,ps,pdf}
=back
=cut
EOF
| true
|
58e702ad20e3698c3663c880b033a532e78204c8
|
Shell
|
dlhuang/variationanalysis
|
/bin/rebuild-segment-datasets.sh
|
UTF-8
| 2,484
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -x
# (cd ~/goby3; git stash; git pull; mvn install)
# (cd ~/variationanalysis; git stash; git pull; ./build-all.sh -DskipTests=true)
DATE=`date +%Y-%m-%d`
if [ -e configure.sh ]; then
echo "Loading configure.sh"
source configure.sh
fi
if [ -z "${PREFIX+set}" ]; then
PREFIX="NA12878-GIAB"
echo "PREFIX set to ${PREFIX}. Change the variable to switch the naming of the produced dataset."
fi
if [ -z "${GENOMIC_CONTEXT_LENGTH+set}" ]; then
GENOMIC_CONTEXT_LENGTH="--genomic-context-length 1"
echo "GENOMIC_CONTEXT_LENGTH set to ${GENOMIC_CONTEXT_LENGTH}. Change the variable to switch the context length."
fi
if [ -z "${VARMAP+set}" ]; then
VARMAP="/scratchLocal/joc2080/reference_varmaps/NA12878-GIAB-gold.varmap"
echo "VARMAP set to ${VARMAP}. Change the variable to switch the location of the varmap with true calls."
fi
simulate-sbi.sh 10g -i ${VARMAP} \
--include chr4 --include chr6 --include chr8 \
-o "${PREFIX}-training.sbi" \
--genome ${SBI_GENOME} ${GENOMIC_CONTEXT_LENGTH}
simulate-sbi.sh 10g -i ${VARMAP} \
--include chr16 -o "${PREFIX}-validation.sbi" \
--genome ${SBI_GENOME} ${GENOMIC_CONTEXT_LENGTH}
simulate-sbi.sh 10g -i ${VARMAP} \
--include chr20 -o "${PREFIX}-test.sbi" \
--genome ${SBI_GENOME} ${GENOMIC_CONTEXT_LENGTH}
OPTIONS="-g 10 --map-features -s INDEL1 --sampling-rate 0.01 ${GENOMIC_CONTEXT_LENGTH} "
sbi-to-ssi.sh 10g -i "${PREFIX}-training.sbi" -o "${PREFIX}-training-${DATE}" ${OPTIONS} $@
sbi-to-ssi.sh 10g -i "${PREFIX}-validation.sbi" -o "${PREFIX}-validation-${DATE}" ${OPTIONS} $@
sbi-to-ssi.sh 10g -i "${PREFIX}-test.sbi" -o "${PREFIX}-test-${DATE}" ${OPTIONS} $@
randomize-ssi.sh 10g -i "${PREFIX}-training-${DATE}" -o "${PREFIX}-random-${DATE}-train"
randomize-ssi.sh 10g -i "${PREFIX}-validation-${DATE}" -o "${PREFIX}-random-${DATE}-validation"
randomize-ssi.sh 10g -i "${PREFIX}-test-${DATE}" -o "${PREFIX}-random-${DATE}-test"
sbi-to-ssi.sh 10g -i "${PREFIX}-validation.sbi" -o "${PREFIX}-validation-2018-01-12" -g 10 --map-features -s INDEL1 --sampling-rate 0.01 --genomic-context-length 21
sbi-to-ssi.sh 10g -i "${PREFIX}-test.sbi" -o "${PREFIX}-test-2018-01-12" -g 10 --map-features -s INDEL1 --sampling-rate 0.01 --genomic-context-length 21
randomize-ssi.sh 10g -i "${PREFIX}-validation-2018-01-12" -o "${PREFIX}-random-2018-01-12-validation"
randomize-ssi.sh 10g -i "${PREFIX}-test-2018-01-12" -o "${PREFIX}-random-2018-01-12-test"
| true
|
548769effa263aa0cda24cef44436e58f2f55ac5
|
Shell
|
mongodb/mongo-java-driver-reactivestreams
|
/.evergreen/run-tests.sh
|
UTF-8
| 843
| 3.546875
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
set -o xtrace # Write all commands first to stderr
set -o errexit # Exit the script with error if any of the commands fail
# Supported/used environment variables:
# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info)
# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java
# "jdk7", "jdk8"
MONGODB_URI=${MONGODB_URI:-}
JDK=${JDK:-jdk}
export JAVA_HOME="/opt/java/${JDK}"
############################################
# Main Program #
############################################
echo "Running tests with ${JDK} connecting to $MONGODB_URI"
./gradlew -version
./gradlew -Dorg.mongodb.test.uri=${MONGODB_URI} --stacktrace --info test
| true
|
a49f123376d677a1b5b6376d0504eacd01e8f229
|
Shell
|
Samsung/TAU
|
/tests/tau2device.sh
|
UTF-8
| 2,236
| 4
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-flora-1.1"
] |
permissive
|
#!/bin/bash
# This script builds framework rpm using gbs and installs them on target device.
# How to use:
# run ./tau2device.sh
# or
# run ./tau2device.sh <device_serial_number>
#
# IMPORTANT: before use please provide path to rpm's in RPM_SRC variable
#
# If there are some problems with RPM installation, please try to remove
# old packages on target device before instalation.
# To remove packages run following command in target device shell:
# rpm -e <package_name>
# e.g. rpm -e web-ui-fw-theme-tizen-black-0.2.83-1.1.redwood8974xx.noarch
#
# author Michał Szepielak <m.szepielak@samsung.com>
# Defines target device.
# If multiple target devices are connected you can specify it by passing
# target's serial number as argument;
# ./tau2device.sh <device_serial_number>
#
# Define target as '-s <serial_number>' to specify default target
# that is connected or leave it empty if only one target is connected.
# e.g. TARGET="-s emulator-11101"
TARGET=""
# Source path to RPM files with build framework by GBS
# e.g. /home/m.szepielak/GBS-ROOT/local/repos/tizendev/armv7l/RPMS
RPM_SRC=""
# Destination path to upload RPM files on device. Please do not change this path
RPM_DEST="/tmp/tauRpm"
# Check if serial number is passed
if [ "${1}" != "" ] ; then
TARGET="-s ${1}"
fi
# If TARGET was not passed and there are multiple devices connected suggest
# which target should be used
if [ -z "${TARGET}" ] && [ `sdb devices | wc -l` -gt 2 ] ; then
sdb devices
read -e -p "Which target want to use? " TARGET
TARGET="-s ${TARGET}"
fi
DEVICE_STATUS=`sdb ${TARGET} get-state`
if [ "${DEVICE_STATUS}" != "device" ] ; then
exit
else
echo "OK: Device connected"
fi
# Clean old RPMs
rm -f $RPM_SRC/*
# Build RPMs
gbs build -A armv7l --include-all
########################### Start working on device ############################
# Turn on root mode
sdb $TARGET root on
# Remount root.
sdb $TARGET shell "mount -o remount,rw /"
sdb $TARGET shell "if [ ! -d '/tmp' ] ; then
mkdir /tmp
fi"
sdb $TARGET shell "if [ ! -d '/tmp' ] ; then
mkdir /tmp/tauRpm
fi"
sdb $TARGET push $RPM_SRC $RPM_DEST
for filePath in $RPM_SRC/*
do
fileName=`basename $filePath`
sdb $TARGET shell "rpm -Uvh --force --nodeps ${RPM_DEST}/${fileName}"
done
echo "DONE!"
| true
|
baf4a0dab5403b20bd372dbe71db350de6d1f9d2
|
Shell
|
SaraMWillis/HPC_Examples
|
/FunBashrcFunctions/check_libraries.sh
|
UTF-8
| 416
| 4.0625
| 4
|
[] |
no_license
|
#### Checks for available system libraries. Input: full or partial library names
function library {
message="Searching for system libraries matching keyword: $1"
echo; echo $message
num=${#message}
v=$(printf "%-${num}s" "$message")
echo "${v// /$str}" ; echo
if [[ $(ldconfig -p | grep $1) ]]; then
ldconfig -p | grep $1 ; echo
else
echo "no libraries found"; echo
fi
}
| true
|
a2980409d9cf97fcf00d236b722fcf69ea33d4ab
|
Shell
|
myklhenn/rsync-backup
|
/common/backup-client-script.sh
|
UTF-8
| 762
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# TODO: find a way to "respond" if backups is paused or cancelled by user
refresh_menu() {
# TODO: temporary fix to allow running on Windows 10 (WSL)
if [ "$(uname)" = "Darwin" ]; then
open -g "bitbar://refreshPlugin?name=backup-menu.*?.sh"
fi
}
cleanup() {
if [ "$(uname)" = "Darwin" ]; then
pkill -1 -af "rsync --server --sender"
fi
rm -f $HOME/.backup-running
refresh_menu
}
trap cleanup 0 1 4
[ -e $HOME/.backup-paused ] && exit 1
echo "$(date "+%I:%M %p")" >$HOME/.backup-running
refresh_menu
rsync $@
if [ $? = 0 ]; then
rm -f $HOME/.backup-error
echo "$(date "+%a %m/%d/%Y %I:%M %p")" >$HOME/.backup-success
else
echo "$(date "+%a %m/%d/%Y %I:%M %p")" >$HOME/.backup-error
fi
exit 0
| true
|
e8d0465377a923ff42eab52b3594564dac2ace0f
|
Shell
|
mencaribug/oroshi
|
/config/zsh/last.zsh
|
UTF-8
| 1,903
| 3.40625
| 3
|
[] |
no_license
|
# Commands in this file will be executed very last in the zsh init process.
# NVM {{{
local nvmScript=~/.nvm/nvm.sh
if [[ -r $nvmScript ]]; then
source $nvmScript
fi
# }}}
# Direnv {{{
# Loads environment variables from .envrc files
if [ $commands[direnv] ]; then
eval "$(direnv hook zsh)"
# Prevent direnv from displaying anything when switching to a new dir
export DIRENV_LOG_FORMAT=
fi
# }}}
# Gvm
local gvmScript=~/.gvm/scripts/gvm
if [[ -r $gvmScript ]]; then
source $gvmScript
gvm use go1.11 &>/dev/null
fi
# Adding Chromium compilation tools to the path if present
local chromiumDepotTools=~/local/src/chromium/depot_tools
if [[ -r $chromiumDepotTools ]]; then
export PATH=$PATH:$chromiumDepotTools
fi
# Pyenv / pipenv
export PATH="/home/tim/.pyenv/bin:$PATH"
if [ $commands[pyenv] ]; then
# Make sure pyenv is found by putting it first
eval "$(pyenv init -)"
# Do not prefix the current virtual env in the prompt, neither for pyenv nor
# pipenv
export PYENV_VIRTUALENV_DISABLE_PROMPT='1'
export VIRTUAL_ENV_DISABLE_PROMPT='yes'
fi
# The next line updates PATH for the Google Cloud SDK.
if [ -f '/home/tim/local/src/google-cloud-sdk/path.zsh.inc' ]; then
source '/home/tim/local/src/google-cloud-sdk/path.zsh.inc';
fi
# Load fzf for fuzzy finding in Ctrl-R in terminal
if [ -f ~/.fzf.zsh ]; then
source ~/.fzf.zsh
fi
# RVM need to be loaded at the very last
local rvmScript=~/.rvm/scripts/rvm
if [[ -r $rvmScript ]]; then
if [[ ! -z "$TMUX" ]]; then
# It seems that $GEM_HOME and $GEM_PATH are not correctly set when starting
# a tmux session, so we'll re-source the `rvm` function and manually set the
# default. We suppress errors for not polluting the display.
source $rvmScript &>/dev/null
rvm use 2.5.1 &>/dev/null
else
# We simply source the rvmScript otherwise
source $rvmScript
rvm use 2.5.1 &>/dev/null
fi
fi
| true
|
b4157655302324943ade2f5b77e8773dec13cd8d
|
Shell
|
pytorch/text
|
/examples/libtorchtext/build.sh
|
UTF-8
| 434
| 2.625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -eux
this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
build_dir="${this_dir}/build"
mkdir -p "${build_dir}"
cd "${build_dir}"
git submodule update
cmake \
-DCMAKE_PREFIX_PATH="$(python -c 'import torch;print(torch.utils.cmake_prefix_path)')" \
-DRE2_BUILD_TESTING:BOOL=OFF \
-DBUILD_TESTING:BOOL=OFF \
-DSPM_ENABLE_SHARED=OFF \
..
cmake --build .
| true
|
0999fcaeb8da946b691e958b896706d027e0fff0
|
Shell
|
ethanweed/Studium_Generale
|
/build-info.sh
|
UTF-8
| 359
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
# build html documents
# convert course information docs to html
cd /Users/ethan/Documents/GitHub/Studium_Generale/StudiumGenerale2022/CourseInfo
find ./ -iname "*.md" -type f -exec sh -c 'pandoc "${0}" -o "./html/$(basename ${0%.md}.html)"' {} \;
# Push to github
git add -A
git commit -m "auto-updated with build.sh"
git push origin master
| true
|
44b31967e041da2277cd175dcccf4a7b838303ad
|
Shell
|
ajitsing/vim-setup
|
/setup_vim.sh
|
UTF-8
| 631
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
vim_config=".vimrc"
if [ -e ~/${vim_config} ]; then
echo "moving your old vim settings to ~/.vimrc_old"
mv ~/.vimrc ~/.vimrc_old
fi
vim_folder=".vim"
if [ -d ~/"${vim_folder}" ]; then
echo "moving old vim setup to ~/.vim_old"
mv ~/.vim ~/.vim_old
fi
echo "creating ~/.vim folder..."
mkdir ~/.vim
cp -R * ~/.vim/.
echo "copying vim configuration(.vimrc)..."
cp ~/.vim/vimrc ~/.vimrc
echo "setting up vundle..."
git clone https://github.com/gmarik/Vundle.vim.git ~/.vim/bundle/Vundle.vim
vim \
-u "~/.vimrc" \
"+set nomore" \
"+BundleInstall!" \
"+BundleClean" \
"+qall"
echo "congratulations, your vim setup is complete!"
| true
|
e3907ed8f1f3121339a46a092b72058fc509f4a6
|
Shell
|
codyhartsook/golang-kv-store
|
/deploy.sh
|
UTF-8
| 483
| 2.71875
| 3
|
[] |
no_license
|
name=$1
ip=$2
port=$3
view=$4
addr="${ip}:13800"
docker stop "${name}"
docker rm "${name}"
docker build -t kv-store:5.0 .
docker run --network=kv_subnet \
--name="${name}" \
--ip="${ip}" -p "${port}":13800/udp \
-e ADDRESS="${addr}" -p "${port}":13800 \
-e REPL_FACTOR=2 \
-e VIEW="${view}" \
kv-store:5.0
| true
|
ac520898f498ff99b23481b0644811d69e339746
|
Shell
|
dmh/generator-pxa-frontend
|
/app/templates/_win-start
|
UTF-8
| 351
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIRECTORY="<%= (dirr) %>"
FDIRECTORY="<%= (dirr) %>/typo3conf/ext"
GIT="<%= (gitt) %>"
if [ ! -d "$DIRECTORY" ]; then
git clone $GIT
fi
trap "echo ." SIGINT SIGTERM
if [ ! -d "$FDIRECTORY" ]; then
grunt shared_start
grunt shared_end
grunt commit
else
grunt mkdir:fonDir
grunt start
grunt end
grunt commit
fi
| true
|
86bbcb74ed30b83e988c36bf3889b385cc59aab2
|
Shell
|
rtucek/dotfiles
|
/dot_config/i3/executable_sound.sh
|
UTF-8
| 1,164
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Active sink
SINK=$(pacmd list-sinks | perl -n -e'/^\s*\*\s+index:\s+([0-9]+)$/ && print $1' | head -n 1)
pactl "$1" $SINK "$2" > /dev/null
VOLUME=$(pactl list sinks | grep '^[[:space:]]Volume:' | head -n $(($SINK + 1)) | tail -n 1 | sed -e 's,.* \([0-9][0-9]*\)%.*,\1,')
MUTE=$(pactl list sinks | grep '^[[:space:]]Mute:' | head -n $(($SINK + 1)) | tail -n 1 | sed -e 's,.*\(yes\|no\).*,\1,')
# As as special case, unmute if already muted, but volume change request
if [[ "$MUTE" == "yes" && "$1" == "set-sink-volume" ]]; then
pactl "set-sink-mute" $SINK "toggle"
MUTE="no"
fi
if [[ $VOLUME == 0 || "$MUTE" == "yes" ]]; then
# Show the sound muted notification
notify-send \
-a "changeVolume" \
-u low \
-t 500 \
-i audio-volume-muted-symbolic.symbolic \
-h string:synchronous:my-progress "Volume muted"
else
# Show the volume notification
notify-send \
-a "changeVolume" \
-u low \
-t 500 \
-i audio-volume-high-symbolic.symbolic \
-h int:value:${VOLUME} \
-h string:synchronous:my-progress "Volume: ${VOLUME}%"
fi
# Play the volume changed sound
paplay /usr/share/sounds/freedesktop/stereo/audio-volume-change.oga
| true
|
494b78ea6b9ea2c459ff4f4f4fde9da2dfb024b3
|
Shell
|
Mr-Rhino/Example-for-the-masses
|
/scripts/bash/countdown.ksh
|
UTF-8
| 615
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/ksh
# TEST COUNTING DOWN
typeset -i start=$1
typeset -i stop=$2
#
if [[ $# -ne 2 ]]
then
printf "\nYou didn't give me two arguments. This script takes two numbers and counts down from one to the other.\n\n"
exit
elif [[ ${start} -lt ${stop} ]]
then
printf "\n${start} is less than ${stop}. This script counts down, not up.\n\n"
elif [[ ${start} -eq ${stop} ]]
then
print "\n This script counts down from one number to another. You gave the same number twice.\n"
exit
fi
while [[ ${start} -ge ${stop} ]] ; do
printf "Counting down: ${start}\n"
start=`expr ${start} - 1`
sleep 1
done
| true
|
5c0c2c42b94159c6ffd737a6879b3f4e8077bafc
|
Shell
|
openEXO/cloud-kepler
|
/hadoop-mrjob/run_example_mrjob.sh
|
UTF-8
| 832
| 3.328125
| 3
|
[] |
no_license
|
echo "Load the Hadoop environment"
source setenv.sourceme
input_filename=README.md
echo "Making a directory for our input data"
hadoop dfs -mkdir mrjob-input
echo "Copying input text to HDFS"
hadoop dfs -put $input_filename mrjob-input/$input_filename
echo "Deleting old output directories"
hadoop dfs -rmr mrjob-wordcount-output
rm -r mrjob-wordcount-output
echo "Running our mrjob Python script"
python mr_word_freq_count.py \
-r hadoop \
hdfs:///user/$USER/mrjob-input/$input_filename \
--jobconf mapred.reduce.tasks=2 \
--output-dir hdfs:///user/$USER/mrjob-wordcount-output
echo "Copying the the results back to the local filesystem"
hadoop dfs -get /user/$USER/mrjob-wordcount-output .
echo "Print some fun job stats"
jobid=$(hadoop job -list all | tail -n1 | awk '{print $1}')
hadoop job -status $jobid
| true
|
3b8d0306f0de75ab8e5af58130d71d816703cefd
|
Shell
|
SoarinFerret/dokuwiki-docker
|
/gitbacked_setup.sh
|
UTF-8
| 1,268
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Load bitnami libraries
. /opt/bitnami/scripts/libbitnami.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libwebserver.sh
if [ -z "$SSH_RSA_KEY" -o -z "$GIT_REPO" -o -z "$GIT_FINGERPRINT" ]; then
info "DOKU_GITBACKED: Variables SSH_RSA_KEY, GIT_REPO, GIT_FINGERPRINT must be set for gitbacked to funciton"; exit;
fi
# Get DAEMON user homedir
homedir=$( getent passwd "bitnami" | cut -d: -f6 )
userid=$( id -u )
# Setup SSH for DAEMON user
echo $GIT_FINGERPRINT | base64 -d > $homedir/.ssh/known_hosts
echo $SSH_RSA_KEY | base64 -d > $homedir/.ssh/id_rsa
# Permissions
chown $userid:0 $homedir/.ssh -R
chmod 700 $homedir/.ssh -R
chmod 600 $homedir/.ssh/id_rsa
if [ ! -d "/bitnami/dokuwiki/data/docs" ]; then
info "DOKU_GITBACKED: Cloning GIT_REPO to /bitnami/dokuwiki/data/docs"
git clone $GIT_REPO /bitnami/dokuwiki/data/docs
info "DOKU_GITBACKED: Setting GIT CONFIG local properties"
cd /bitnami/dokuwiki/data/docs
git config --local user.name dokuwiki
git config --local user.email $DOKUWIKI_EMAIL
else
info "DOKU_GITBACKED: /bitnami/dokuwiki/data/docs already exists"
fi
info "DOKU_GITBACKED: note - you may need to update DOKUWIKI global conf to use the directories within /bitnami/dokuwiki/data/docs"
| true
|
7cbe59ea04e3553c398d5393941bc27995b59f03
|
Shell
|
summonersRift/primogeni
|
/netscript/bin/monitor_scripts/monitor.sh
|
UTF-8
| 176
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
RUN=0
while true
do
perl /primex/scripts/shutdown.pl
java -cp /primex/jprime.jar monitor.core.Monitor > /primex/run_${RUN}.out
RUN=`expr 1 + $RUN`
sleep 1
done
| true
|
220bcc3a2518bbe3cabd140ed0cc3192d9630e16
|
Shell
|
abehiroshi/ras-commu
|
/ak-020/config-network.sh
|
UTF-8
| 199
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh
if ! sudo grep soracom_ak-020 /etc/network/interfaces; then
cat << 'EOS' | sudo tee -a /etc/network/interfaces
allow-hotplug wwan0
iface wwan0 inet ppp
provider soracom_ak-020
EOS
fi
| true
|
fb66d1bff94012bb857d21be10cd196c43e61c32
|
Shell
|
tomtjes/DAW-converters
|
/hindenburg_to_samplitude-edl.sh
|
UTF-8
| 8,818
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# hindenburg_to_samplitude-edl.sh
# converts a Hindenburg Session file to a Samplitude EDL file
#
# Tools able to import Samplitude EDL include Reaper, Samplitude
#
# This script only places items on tracks. No volume adjustments, plugin settings, markers, et cetera will be converted.
# Both tracks and clipboard groups will be converted, clipboard groups being turned into tracks, with items spaced out evenly.
# Adjust gap seconds between clipboard clips:
gap=3
#
# This script should work on MacOS and Linux, XMLStarlet is required however:
# http://xmlstar.sourceforge.net/doc/UG/xmlstarlet-ug.html
#
# Usage: Open Terminal, navigate to folder containing the script and before first run enter:
# chmod +x hindenburg_to_samplitude-edl.sh
# To convert the Hindenburg file /path/to/my-session.nhsx enter:
# hindenburg_to_samplitude-edl.sh /path/to/my-session.nhsx
#
# Revision history :
# 14. Mar 2019 - v0.9 - creation by Thomas Reintjes (https://reidio.io)
#
#
##############################################################################################################################################
#command line variables
inputfile="$1"
#output
outputfile="${1%.*}.samplitude.edl"
outputpath="${1%/*}"
if [[ "$outputpath" == "" ]] ; then
outputpath="$PWD"
fi
#functions
#convert 00:00:00.000 timecodes into number of samples
samplify () {
local timecode=$1
local milliseconds="${timecode: -3}"
local seconds="${timecode: -6:2}"
local minutes="${timecode: -9:2}"
local hours="${timecode: -12:2}"
# 10# converts it to Base10 numbers, thereby removing leading 0s and converting empty variables to 0
local samples=$(( ((10#$hours)*3600+(10#$minutes)*60+(10#$seconds))*$samplerate+(10#$milliseconds)*$samplerate/1000 ))
echo "$samples"
}
#examine Regions in XML
regiondata () {
region_data[1]=$(xmlstarlet sel -t -m ''"$1"'' -v '@Ref' -n "$inputfile")
region_data[2]=$(xmlstarlet sel -t -m ''"$1"'' -v '@Name' -n "$inputfile")
region_data[3]=$(xmlstarlet sel -t -m ''"$1"'' -v '@Start' -n "$inputfile")
region_data[4]=$(xmlstarlet sel -t -m ''"$1"'' -v '@Length' -n "$inputfile")
region_data[5]=$(xmlstarlet sel -t -m ''"$1"'' -v '@Offset' -n "$inputfile")
region_data[3]=$(samplify "${region_data[3]}")
region_data[4]=$(samplify "${region_data[4]}")
region_data[5]=$(samplify "${region_data[5]}")
}
#get basic settings from Hindenburg Session
samplerate=$(xmllint --xpath 'string(/Session/@Samplerate)' "$inputfile")
folder="$(xmllint --xpath 'string(/Session/AudioPool/@Path)' "$inputfile")"
location="$(xmllint --xpath 'string(/Session/AudioPool/@Location)' "$inputfile")"
#get arrays for file IDs, files, and tracks
fileIDs=($(xmlstarlet sel -t -m '//AudioPool/File' -v '@Id' -n "$inputfile"))
filenames="$(xmlstarlet sel -t -m '//AudioPool/File' -v '@Name' -o '
' -n "$inputfile")"
# turn this into proper array:
IFS=$'\n' filenames=(${filenames})
tracks=$(xmlstarlet sel -t -m '//Tracks/Track' -v '@Name' -o '
' -n "$inputfile")
# turn this into proper array:
IFS=$'\n' tracks=(${tracks})
#determine full paths of files as well as max number of channels
i=0
channels_max=0
for filename in "${filenames[@]}" ; do
if [[ $filename != /** ]] ; then
if [[ $folder != "" ]] ; then
filename="$folder/$filename"
fi
if [[ $location != "" ]] ; then
filename="$location/$filename"
fi
filenames[$i]="$filename"
fi
channels=$(soxi -c "$filename")
if [ "$channels_max" -lt "$channels" ] ; then
channels_max="$channels"
fi
let "i+=1"
done
#print EDL header
printf "Samplitude EDL File Format Version 1.5\nTitle: \"converted from Hindenburg\"\nSample Rate: %s\nOutput Channels: %s\n\nSource Table Entries: %s\n" "$samplerate" "$channels_max" "${#filenames[@]}" > $outputfile
#print Source Table
i=1
for filename in "${filenames[@]}" ; do
printf " %s \"%s\"\n" "$i" "$filename" >>$outputfile
let "i+=1"
done
#print tracks
track_number=1
for track in "${tracks[@]}" ; do
printf "\nTrack %s: \"%s\" Solo: 0 Mute: 0\n" "$track_number" "$track" >>$outputfile
echo "#Source Track Play-In Play-Out Record-In Record-Out Vol(dB) MT LK FadeIn % CurveType FadeOut % CurveType Name" >>$outputfile
echo "#------ ----- ------------ ------------ ------------ ------------ -------- -- -- ------------ ----- ---------------------------------- ------------ ----- ---------------------------------- -----" >>$outputfile
region_count=$(xmlstarlet sel -t -c 'count(//Track[@Name="'"$track"'"]/Region)' -n "$inputfile")
i=1
while [ $i -le $region_count ] ; do
regiondata "(//Track[@Name=\"$track\"]/Region)[$i]"
playout=$(( ${region_data[3]} + ${region_data[4]} )) # start + length
recout=$(( ${region_data[5]} + ${region_data[4]} )) # offset + length
printf "%7s %5s %12s %12s %12s %12s 0.0 0 0 0 0 \"*default\" 0 0 \"*default\" \"%s\"\n" "${region_data[1]}" "$track_number" "${region_data[3]}" "$playout" "${region_data[5]}" "$recout" "${region_data[2]}" >>$outputfile
let "i+=1"
done
let "track_number+=1"
done
#print clipboard groups to additional tracks
clipboard=$(xmlstarlet sel -t -m '//Clipboard/Group' -v '@Caption' -o '
' -n "$inputfile")
# turn this into array:
IFS=$'\n' clipboard=(${clipboard})
for group in "${clipboard[@]}" ; do
region_count=$(xmlstarlet sel -t -c 'count(//Group[@Caption="'"$group"'"]/Region)' -n "$inputfile")
clip_count=$(xmlstarlet sel -t -c 'count(//Group[@Caption="'"$group"'"]/Clip)' -n "$inputfile")
total_count=$(( $region_count + $clip_count ))
#only create track if group has any regions or clips
if ! [ "$total_count" -gt 0 ] ; then
continue
fi
printf "\nTrack %s: \"Clips: %s\" Solo: 0 Mute: 1\n" "$track_number" "$group" >>$outputfile
echo "#Source Track Play-In Play-Out Record-In Record-Out Vol(dB) MT LK FadeIn % CurveType FadeOut % CurveType Name" >>$outputfile
echo "#------ ----- ------------ ------------ ------------ ------------ -------- -- -- ------------ ----- ---------------------------------- ------------ ----- ---------------------------------- -----" >>$outputfile
current_region=0
current_clip=0
region_start=0
i=1
while [ $i -le $total_count ] ; do
#check if node is a clip (if it has a Ref attribute it's not a clip)
is_clip=$(xmlstarlet sel -t -m '(//Group[@Caption="'"$group"'"]/Region|//Group[@Caption="'"$group"'"]/Clip)['"$i"']' -i '@Ref' -o 'false' -n "$inputfile")
#Region
if [ "$is_clip" == false ] ; then
let "current_region+=1"
regiondata "(//Group[@Caption=\"$group\"]/Region)[$current_region]"
playout=$(( $region_start + ${region_data[4]} )) # start + length
recout=$(( ${region_data[5]} + ${region_data[4]} )) # offset + length
printf "%7s %5s %12s %12s %12s %12s 0.0 0 0 0 0 \"*default\" 0 0 \"*default\" \"%s\"\n" "${region_data[1]}" "$track_number" "$region_start" "$playout" "${region_data[5]}" "$recout" "${region_data[2]}" >>$outputfile
region_start=$(( $region_start + ${region_data[4]} + $gap * $samplerate ))
#Clip
else
let "current_clip+=1"
clip_offset=$(xmlstarlet sel -t -m '(//Group[@Caption="'"$group"'"]/Clip)['"$current_clip"']' -v '@Start' -n "$inputfile")
clip_offset=$(samplify $clip_offset)
clip_length=$(xmlstarlet sel -t -m '(//Group[@Caption="'"$group"'"]/Clip)['"$current_clip"']' -v '@Length' -n "$inputfile")
clip_length=$(samplify $clip_length)
clip_region_count=$(xmlstarlet sel -t -c 'count(//Group[@Caption="'"$group"'"]/Clip['"$current_clip"']/Region)' -n "$inputfile")
j=1
while [ $j -le $clip_region_count ] ; do
regiondata "(//Group[@Caption=\"$group\"]/Clip[$current_clip]/Region)[$j]"
#modify region start, accounting for position on track and Clip Offset
region_data[3]=$(( $region_start + ${region_data[3]} - $clip_offset ))
playout=$(( ${region_data[3]} + ${region_data[4]} )) # start + length
recout=$(( ${region_data[5]} + ${region_data[4]} )) # offset + length
printf "%7s %5s %12s %12s %12s %12s 0.0 0 0 0 0 \"*default\" 0 0 \"*default\" \"%s\"\n" "${region_data[1]}" "$track_number" "${region_data[3]}" "$playout" "${region_data[5]}" "$recout" "${region_data[2]}" >>$outputfile
let "j+=1"
done
region_start=$(( $region_start + $clip_length + $gap * $samplerate ))
fi
let "i+=1"
done
let "track_number+=1"
done
| true
|
9058cf2872f79c27fb17d095ea542ccaa1e0c06d
|
Shell
|
alexandergraening/EE180D_CapstoneProject
|
/start_gesture_recognition_files/motion_data.sh
|
UTF-8
| 548
| 3.390625
| 3
|
[] |
no_license
|
#/bin/sh
#
# Usage sensor_sample.sh -t <TIME IN SECONDS> -f <OUTPUT_FILE_NAME>
#
# Enter Device MAC Address below
#
#
while getopts t:f: option
do
case "${option}"
in
t) TIME=${OPTARG};;
f) FILE=${OPTARG};;
esac
done
if [ "$TIME" -lt "1" ]
then
echo "Error time less than 1 seconds"
exit
fi
if [ "$TIME" -gt "10" ]
then
echo "Error time greater than 10 seconds"
exit
fi
gatttool -b C0:83:2b:31:5a:48 -t random --char-write-req --handle=0x0012 --value=0100 --listen > $FILE &
sleep $TIME
pkill gatttool
| true
|
7a444d63a727ec11fc83d95760047701e085c3cb
|
Shell
|
webskate101/django-polymer-addressbook
|
/scripts/common.sh
|
UTF-8
| 298
| 3.21875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
VIRTUALENV_DIR='venv'
# Force shell to fail on any errors.
set -e
function require() {
type $1 >/dev/null 2>&1 || { echo "$1 required but not found."; exit 1; }
}
require 'pip'
require 'virtualenv'
require 'npm'
if [ ! -d "$VIRTUALENV_DIR" ]; then
virtualenv venv
fi
. ./venv/bin/activate
| true
|
f0191b4fe10e3ecc9a0f2234de2dda3bf97018b6
|
Shell
|
kzhangair/steamapi
|
/MultiNodeProcessTh02.sh
|
UTF-8
| 276
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
for((i=5;i<8;i++))
do
{
awk -F '\v' 'BEGIN{min = 200000;max = -1;RS = "\a"} {if($1<min){min = $1}if($1>max){max = $1}} END{printf("%d %d\n", min, max)}' cleanedData0${i}.csv > result_0${i}
}&
done
wait
cat result_05 result_06 result_07 > MultiNodeResult_Thumm02
| true
|
3bb1ffcb1be7f23a9e63f4f1e1c36fe555ca7549
|
Shell
|
dmurph/bash_profile
|
/bash_profile
|
UTF-8
| 1,037
| 2.640625
| 3
|
[] |
no_license
|
export PATH=~/bin:/usr/local/bin:$PATH
export PATH=/usr/local/sbin:$PATH
source ~/bash_profile/functions
if [ -f /usr/local/etc/bash_completion.d/git-prompt.sh ]; then
source /usr/local/etc/bash_completion.d/git-prompt.sh
fi
#DIFF_HIGHLIGHT_LOCATION=''
#type brew >/dev/null 2>&1 && DIFF_HIGHLIGHT_LOCATION="$(brew --prefix)/share/git-core/contrib/diff-highlight/diff-highlight" || DIFF_HIGHLIGHT_LOCATION="/usr/share/git-core/contrib/diff-highlight/diff-highlight"
#ln -sf $DIFF_HIGHLIGHT_LOCATION ~/bin/diff-highlight
#unset DIFF_HIGHLIGHT_LOCATION
export LANG=en_US.UTF-8
export LC_COLLATE=en_US.UTF-8
export LC_CTYPE=en_US.UTF-8
export LC_MESSAGES=en_US.UTF-8
export LC_MONETARY=en_US.UTF-8
export LC_NUMERIC=en_US.UTF-8
export LC_TIME=en_US.UTF-8
export LC_ALL=en_US.UTF-8
if [ -f ~/bash_profile/git-ps1.sh ]; then
source ~/bash_profile/git-ps1.sh
fi
declare -A GIT_PS1_PATH_SHORTENER
GIT_PS1_HOST_SHORTENER[".corp.google.com"]=""
GIT_PS1_PATH_SHORTENER["/google/src/cloud/dmurph"]="~citc"
export GIT_PS1_PATH_SHORTENER;
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.