blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c04b243fffa8c6452d0aca7b3deb193faea21bce
|
Shell
|
snaury/MINGW-packages
|
/mingw-w64-lixslt/PKGBUILD
|
UTF-8
| 2,502
| 2.625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: Alexey Pavlov <alexpux@gmail.com>
_realname=libxslt
_mingw_suff=mingw-w64-${CARCH}
pkgname="${_mingw_suff}-${_realname}"
pkgver=1.1.28
pkgrel=1
pkgdesc="XML stylesheet transformation library (mingw-w64)"
arch=('any')
url="http://xmlsoft.org/XSLT/"
license=('custom')
depends=("${_mingw_suff}-crt" "${_mingw_suff}-libxml2" "${_mingw_suff}-libgcrypt")
groups=("${_mingw_suff}")
makedepends=("${_mingw_suff}-gcc")
options=('strip' 'staticlibs')
source=("http://xmlsoft.org/sources/libxslt-${pkgver}.tar.gz"
"libxslt-1.1.26-w64.patch"
"libxslt-1.1.27-disable_static_modules.patch"
"libxslt-1.1.28-win32-shared.patch"
"libxslt.m4-libxslt-1.1.26.patch"
0002-python-linking-on.mingw.patch
0003-fix-concurrent-directory-creation.all.patch
0004-add-missing-include-for-python.all.patch
0005-fix-freelocales-export.all.patch)
md5sums=('9667bf6f9310b957254fdcf6596600b7'
'b994c0d9df0f644e219cf63561ee0d4e'
'558b6714cf324a7504deb18411792900'
'19646f155b518b40cb63e41270215964'
'61b520bbd42006c16bbfc85bc5342b96'
'a944fcf65f9948bb9dba325a8f6e7c5c'
'a5b1902b4e254934032fb19f46170418'
'267c16ba85010d47a86827da1f339bdd'
'ee6e3e375b8ea2f42c7039e13f14f0c8')
prepare() {
cd "$srcdir/libxslt-${pkgver}"
patch -p1 -i "${srcdir}/libxslt-1.1.26-w64.patch"
patch -p1 -i "${srcdir}/libxslt-1.1.27-disable_static_modules.patch"
patch -p1 -i "${srcdir}/libxslt-1.1.28-win32-shared.patch"
patch -p1 -i "${srcdir}/libxslt.m4-libxslt-1.1.26.patch"
patch -p1 -i "${srcdir}/0002-python-linking-on.mingw.patch"
patch -p1 -i "${srcdir}/0003-fix-concurrent-directory-creation.all.patch"
patch -p1 -i "${srcdir}/0004-add-missing-include-for-python.all.patch"
patch -p1 -i "${srcdir}/0005-fix-freelocales-export.all.patch"
libtoolize --copy --force
aclocal
automake --add-missing
autoconf
}
build() {
mkdir -p "$srcdir/build-${CARCH}" && cd "$srcdir/build-${CARCH}"
$srcdir/libxslt-${pkgver}/configure \
--prefix=${MINGW_PREFIX} \
--build=${MINGW_CHOST} \
--host=${MINGW_CHOST} \
--target=${MINGW_CHOST} \
--enable-shared \
--with-libxml-prefix=${MINGW_PREFIX} \
--without-python
make
}
package() {
cd "$srcdir/build-${CARCH}"
make DESTDIR="$pkgdir" install
rm -r "${pkgdir}${MINGW_PREFIX}"/share
# strip --strip-unneeded "$pkgdir"/usr/${_arch}/bin/*.dll
# strip --strip-all "$pkgdir"/usr/${_arch}/bin/*.exe
# strip -g "$pkgdir"/usr/${_arch}/lib/*.a
}
| true
|
f2ef2065409ed57e4aa65efbb2695dbcb5b2b9fc
|
Shell
|
ZwAnto/ca22-selenium
|
/entrypoint
|
UTF-8
| 387
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
printenv > /etc/environment
echo '0 6,18 * * * cd /opt && /usr/local/bin/python3 -m scraper.main > /var/log/cron.log 2>&1' >> /etc/cron.d/hello-cron
chmod a+x /etc/cron.d/hello-cron
crontab /etc/cron.d/hello-cron
#fix link-count, as cron is being a pain, and docker is making hardlink count >0 (very high)
touch /etc/crontab /etc/cron.*/*
touch /var/log/cron.log
cron -f
| true
|
419e8abdc1eb8bc7fb96b8d0bb31936211090ce6
|
Shell
|
MeteorAdminz/redis
|
/3.2/test.sh
|
UTF-8
| 330
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [[ ! -z "${DEBUG}" ]]; then
set -x
fi
IMAGE=$1
NAME=$2
cid="$(docker run -d --name "${NAME}" "${IMAGE}")"
trap "docker rm -vf $cid > /dev/null" EXIT
redis() {
docker run --rm -i \
--link "${NAME}" \
"${IMAGE}" \
"${@}"
}
redis make check-ready host="${NAME}"
redis make flushall host="${NAME}"
| true
|
f0d252fcca8471f1fa43e77a34e6414c642fdb45
|
Shell
|
EOEPCA/template-svce
|
/travis/release.sh
|
UTF-8
| 1,233
| 3.828125
| 4
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
# fail fast settings from https://dougrichardson.org/2018/08/03/fail-fast-bash-scripting.html
set -eov pipefail
# Check presence of environment variables
TRAVIS_BUILD_NUMBER="${TRAVIS_BUILD_NUMBER:-0}"
# obtain current repository name
REPO_LOCAL_PATH=`git rev-parse --show-toplevel`
REPO_NAME=`basename $REPO_LOCAL_PATH`
# Create a Docker image and tag it as 'travis_<build number>'
buildTag=travis_$TRAVIS_BUILD_NUMBER # We use a temporary build number for tagging, since this is a transient artefact
if [ -n "${DOCKER_USERNAME}" -a -n "${DOCKER_PASSWORD}" ]
then
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
docker pull eoepca/${REPO_NAME}:${buildTag} # have to pull locally in order to tag as a release
# Tag and push as a Release following the SemVer approach, e.g. 0.1.1-Alpha
docker tag eoepca/${REPO_NAME}:${buildTag} eoepca/${REPO_NAME}:${TRAVIS_TAG} # This recovers the GitHub release/tag number
docker push eoepca/${REPO_NAME}:${TRAVIS_TAG}
# Tag and push as `latest`
docker tag eoepca/${REPO_NAME}:${buildTag} eoepca/${REPO_NAME}:latest
docker push eoepca/${REPO_NAME}:latest
else
echo "WARNING: No credentials - Cannot push to docker hub"
fi
| true
|
a1b2e5d4b254f5e9eabf8393ba4d94b81f615db9
|
Shell
|
joewww/ci-cd
|
/manage.sh
|
UTF-8
| 682
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
remote_config() {
terraform remote config \
-backend=s3 \
-backend-config="bucket=joewww-terraform" \
-backend-config="key=network/terraform.tfstate" \
-backend-config="region=us-east-1"
}
cd terraform/
case "$1" in
plan)
remote_config
terraform remote pull && terraform plan && terraform remote push
;;
apply)
remote_config
terraform remote pull && terraform apply && terraform remote push
;;
show)
remote_config
terraform show
;;
destroy)
remote_config
terraform remote pull && terraform destroy -force && terraform remote push
;;
*)
echo "Usage: $0 <plan/apply/show/destroy>"
exit 1
esac
| true
|
2a084572b1d5792ee0723ebad9edebfe847caff3
|
Shell
|
twintproject/twint-splunk
|
/twint-geo
|
UTF-8
| 1,187
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# This script downloads a user's tweets by year.
#
# Errors are fatal
set -e
#
# Change to where this script is
#
pushd $(dirname $0) > /dev/null
#
# Sanity check for our username.
#
if test ! "$4"
then
echo "! "
echo "! Syntax: $0 lat lon distance output_file [ options ]"
echo "! "
exit 1
fi
LAT=$1
LON=$2
DISTANCE=$3
BASE_FILE=$4
shift 4
#
# Make sure our directory exists and set a destination.
#
mkdir -p logs/location/
OUTPUT=logs/location/tweets-${BASE_FILE}.json
if test -f ${OUTPUT}.done
then
echo "File ${OUTPUT}.done exists, we don't need to download these tweets!"
exit 0
fi
#
# Set up our resume file
#
RESUME="resume-location-${BASE_FILE}.txt"
echo "# "
echo "# Starting download of tweets within ${DISTANCE} of ${LAT},${LON}"
echo "# to ${OUTPUT}..."
echo "# "
echo "# Resume file: ${RESUME}"
echo "# "
if test "$#"
then
echo "# Additional args: $@"
fi
STRING="-g=${LAT},${LON},${DISTANCE}"
./twint ${STRING} $@ --json -o ${OUTPUT} --resume ${RESUME}
echo "# OK: Done with download of tweets within ${DISTANCE} of ${LAT},${LON}..."
#
# After a successful run, we don't need our resume file anymore.
#
rm -f $RESUME
touch ${OUTPUT}.done
| true
|
6c939722a52e54710f0c3901e226db4201a1d397
|
Shell
|
JuJuKK/ttyDockerTraining
|
/runAllContainers.sh
|
UTF-8
| 1,033
| 2.796875
| 3
|
[] |
no_license
|
#! /bin/bash
echo "Starting containers for Palpo Docker assignment..."
# Stop and remove all containers
docker stop $(docker ps -a -q)
docker rm $(docker ps -a -q)
# Create new network
docker network create --subnet=172.20.0.0/16 palponet
# Haproxy; 172.20.0.2
docker run -d --name haproxy --net palponet --ip 172.20.0.2 -p 80:80 jukka/haproxy:1
# Frontend; 172.20.0.3
docker run -d --name frontend --net palponet --ip 172.20.0.3 jukka/frontend:1
# Logstash in; 172.20.0.4
docker run -d --name logstash_in --net palponet --ip 172.20.0.4 jukka/logstash_in:1
# RabbitMQ; 172.20.0.5
docker run -d --name rabbitmq --net palponet --ip 172.20.0.5 jukka/rabbitmq:1
# Logstash out; 172.20.0.6
docker run -d --name logstash_out --net palponet --ip 172.20.0.6 jukka/logstash_out:1
# MongoDB; 172.20.0.7
docker run -d --name mongo --net palponet --ip 172.20.0.7 -p 91:27017 -p 92:28017 mongo:3 mongod --rest
# Node; 172.20.0.8
docker run -d --name node --net palponet --ip 172.20.0.8 jukka/node:1
echo "Container start successful!"
| true
|
fa1fd22484cfca48dce8a8f621974d1e65ec6dcd
|
Shell
|
TheInventorMan/ConnectNow
|
/lib/dpkg/info/apt-xapian-index.postrm
|
UTF-8
| 266
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
if [ "$1" = "remove" -o "$1" = "purge" ]; then
echo "Removing index /var/lib/apt-xapian-index..."
rm -rf /var/lib/apt-xapian-index
fi
if [ "$1" = "remove" -o "$1" = "purge" ]; then
rm -ff /usr/share/apt-xapian-index/plugins/*.pyc
fi
exit 0
| true
|
1ed3d44ca24cf1aaed1ef255afff28d69492977c
|
Shell
|
sanjay-jaiswal/Shell-Script
|
/WhileLoop/TableOfPowerOf2.sh
|
UTF-8
| 242
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash -x
read -p "Please enter power of 2 : " power
sum=$((2**$power))
num=2
count=1
while [ $num -lt $sum ]
do
num=$((2**$count))
count=$(($count+1))
echo $num
if [ $num -eq 256 ]
then
break
fi
done
| true
|
badea238d385d0207f61bdc20c4def4e013f6fc9
|
Shell
|
sp1022/pigsty
|
/app/cmdb/install
|
UTF-8
| 1,639
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#==============================================================#
# File : install
# Ctime : 2021-06-29
# Mtime : 2021-06-29
# Desc : install pgsql monitoring system
# Path : app/pgsql/install
# Copyright (C) 2018-2021 Ruohang Feng
#==============================================================#
#========================================#
# detect environment
#========================================#
PROG_NAME="$(basename $0))"
PROG_DIR="$(cd $(dirname $0) && pwd)"
SQL_DIR=${PROG_DIR}/sql
DEFAULT_PIGSTY_HOMNE=${HOME}/pigsty
PIGSTY_HOME="${PIGSTY_HOME-${DEFAULT_PIGSTY_HOMNE}}"
DASHBOARD_DIR=${DASHBOARD_DIR-'/etc/dashboards'}
PIGSTY_CONFIG=${PIGSTY_HOME}/pigsty.yml
#========================================#
# create pigsty schema on meta database
#========================================#
psql service=meta -AXtwf ${SQL_DIR}/pigsty.sql
#========================================#
# finish install if PIGSTY_HOME not exists
#========================================#
if [[ ! -d ${PIGSTY_HOME} ]]; then
echo pigsty home not found, exit
exit 0
fi
#========================================#
# convert pigsty config into sql data
#========================================#
cat ${PIGSTY_CONFIG} | ${PIGSTY_HOME}/bin/yaml2sql pigsty > ${SQL_DIR}/data.sql
psql service=meta -AXtwf ${SQL_DIR}/data.sql
#========================================#
# replace config file with dynamic query script
#========================================#
cat > ${PIGSTY_HOME}/pigsty.yml <<-'EOF'
#!/bin/bash
psql service=meta -AXtwc 'SELECT text FROM pigsty.inventory;'
EOF
chmod 0755 ${PIGSTY_HOME}/pigsty.yml
| true
|
7efcaff35d9acadfb8b29ba369376f2cbc8fa322
|
Shell
|
weera00/Airlink1.0.1aN1jp
|
/mysql_pass.sh
|
UTF-8
| 830
| 2.8125
| 3
|
[] |
no_license
|
clear
echo "MySQL Database config"
echo "---------------------------------------"
echo ""
. /usr/local/bin/hotspot/mysar/etc/config.ini
if [ -z $SQL_PASS ]; then
read -p "Change MySql root password: " SQL_PASS
fi
if [ -z $SQL_PASS ]; then
echo "Abort."
echo "Password is not change..."
echo ""
else
sed -i "s/$dbPass/$SQL_PASS/g" /opt/hotspot/admin/system/air/config/database.php
sed -i "s/$dbPass/$SQL_PASS/g" /opt/hotspot/admin/true/db.php
sed -i "s/$dbPass/$SQL_PASS/g" /usr/local/bin/hotspot/include/config.inc.php
sed -i "s/$dbPass/$SQL_PASS/g" /usr/local/bin/hotspot/mysar/etc/config.ini
sed -i "s/$dbPass/$SQL_PASS/g" /usr/local/bin/hotspot/psql.pl
sed -i "s/$dbPass/$SQL_PASS/g" /etc/freeradius/sql.conf
sed -i "s/$dbPass/$SQL_PASS/g" /etc/script/changeaccess.sh
echo "Password save..."
echo ""
sleep 3
fi
| true
|
3f542c13024b257e6365529144521e0115445cea
|
Shell
|
tnw-open-source/credential-provision
|
/revoke-vpn-key
|
UTF-8
| 2,224
| 3.90625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ $# -lt 1 ]
then
echo Usage: 1>&2
echo " revoke-web-key EMAIL [COMMON_NAME]" 1>&2
exit 1
fi
email="$1"
common_name="${2:-"*"}"
desc="Revoke Web certificate for $1 - $2"
ca=${VPN_CA:-.}
ca_cert=${VPN_CA_CERT:-.}
bucket=${CRL_BUCKET:-""}
# Temp Files
TMP_CRL=${ca}/crl.tmp$$
TMP_WORK=/tmp/tmp$$
# Output Files
CRL=${ca}/crl
# Revocation List
REVOKE_REGISTER=${ca}/revoke_register
# Revoked Cert Dir
REVOKE_DIR=${ca}/revoked
# Certificate file prefix
CERT_PREFIX="cert."
rm -f ${TMP_WORK} ${TMP_CRL}
# Google cloud key
gkey=${KEY:-/key/private.json}
echo "* Revoke key/certificates..." 1>&2
if [ "${common_name}" == "*" ]; then
./find-cert -e "${email}" -p "${CERT_PREFIX}" -d "${ca}" | sort | uniq > ${TMP_WORK}
./find-cert -e "${email}" -p "${CERT_PREFIX}" -d "${ca}" -x | sort | uniq > ${TMP_WORK}-ext
else
./find-cert -e "${email}" -s "${common_name}" -p "${CERT_PREFIX}" -d "${ca}" | sort | uniq > ${TMP_WORK}
./find-cert -e "${email}" -s "${common_name}" -p "${CERT_PREFIX}" -d "${ca}" -x | sort | uniq > ${TMP_WORK}-ext
fi
if [ "$(wc -c < ${TMP_WORK} | sed -e "s/ //g" )" == "0" ]; then
echo "* No Certs Found..." 1>&2
rm ${TMP_WORK} ${TMP_WORK}-ext
exit 1
fi
cat ${TMP_WORK} >> ${REVOKE_REGISTER}
mkdir -p ${REVOKE_DIR}
for i in $(cut -f1 -d, ${TMP_WORK})
do
echo "* "$i 1>&2
mv ${ca}/*${i}* ${REVOKE_DIR}
done
rm ${TMP_WORK}
echo "* Update CRL..." 1>&2
./create-crl -k ${ca_cert}/key.ca -c ${ca_cert}/cert.ca -r ${REVOKE_REGISTER} > ${TMP_CRL}
mv ${TMP_CRL} ${CRL}
if [ "${bucket}" != "" ]; then
echo "* Upload CRL..." 1>&2
./upload-crl-to-storage ${gkey} ${bucket} ${CRL} vpn.crl
fi
./download-from-storage ${gkey} "${email}" INDEX > ${TMP_WORK}
for i in $(cut -f5 -d, ${TMP_WORK}-ext|sort|uniq)
do
echo "* Delete ${i}.ovpn from Google Storage..." 1>&2
./delete-from-storage ${gkey} "${email}" "${i}-us.ovpn"
./delete-from-storage ${gkey} "${email}" "${i}-uk.ovpn"
grep -v "\"device\": \"${i}\"" < ${TMP_WORK} > ${TMP_WORK}2
mv ${TMP_WORK}2 ${TMP_WORK}
done
echo "* Update index" 1>&2
./upload-to-storage ${gkey} "${email}" ${TMP_WORK} INDEX
rm -f ${TMP_WORK} ${TMP_WORK}-ext
echo "* All done." 1>&2
exit 0
| true
|
91665beea8ffc34509b3e63904e34da6934644c8
|
Shell
|
flow-r/ultraseq
|
/ultraseq/inst/scripts/split_fq
|
UTF-8
| 3,290
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash -e
## usage: split_fq number_of_chunks fastq
# Usage info
show_help() {
cat << EOF
FASTQ Splitter
sseth @ MD Anderson
Usage: ${0##*/} [-c number of chunks] [-n number of reads per file] -f [fastq file]
Split a fastq files into specific number of (predetermined) chunks.
-f FASTQ Input fastq file.
-o prefix Output prefix to be used (only necessary when input is pipe)
-c chunks Number of chunks, into which the fastq file would be divided OR
-n num Number of reads per output fastq file.
-h display this help and exit
-v verbose mode. Not used.
One needs to supply either number of chunks (using -c) OR number of reads (using -n), but not both.
EOF
}
fq= chunks= num= prefix=
while getopts "hf:c:n:o:z" opt; do
case $opt in
h)
show_help
exit 0
;;
f)
fq=$OPTARG
;;
c)
chunks=$OPTARG
;;
n)
num=$OPTARG
;;
o)
prefix=$OPTARG
;;
z)
zipit=1
;;
'?')
show_help >&2
exit 1
;;
esac
done
shift $((OPTIND - 1))
#fq=/rsrch1/iacs/tmp/illumina_platinum/50x/NA12877/ERR194146_1.fastq.gz
## --- needs the following variables declared in name space
#chunks=$1
#fq=$2
if [ ! -f "$fq" ] && [ ! $fq == "/dev/stdin" ]; then
echo -e "\nFastq file does not exist, please check. $fq\n"
show_help
exit 1
fi
## basename:
if [ $fq == "/dev/stdin" ];then
if [ "${prefix}" == "" ];then
echo "Please specify the output prefix, using -o fastq. ${prefix}"
show_help
exit 1
fi
echo `date` "; Using prefix: $prefix"
else
prefix="${fq%.*}."
echo `date` "; Using prefix: $prefix"
fi
## use extenstion
if [ "${fq##*.}" == "gz" ]; then
cat_cmd="zcat"
elif [ $fq == "/dev/stdin" ];then
cat_cmd="cat"
else
cat_cmd="cat"
fi
## calculate num
if [ ! ${chunks} == "" ]; then
echo `date` "; Working on $fq, to be split into $chunks chunks, getting total reads in the file..."
## -- get number of lines
tot_lines=$($cat_cmd $fq | wc -l )
## --- num should be divisible by 4 !
numl=$(expr $tot_lines / $chunks)
numl=$(expr $num % 4 + $num)
elif [ ! ${num} == "" ]; then
numl=$(expr $num \* 4)
echo -e `date` "; The file would be split with $num reads, $numl lines each, splitting..."
else
show_help
exit 1
fi
# actual splitting
base="$(basename `mktemp`)_"
$cat_cmd $fq | split -l $numl -d -a 3 - $base
#-d adds digit prefixes instead of alphabetic
#-a 3 allows you to add more digits to output files.
#b is the prefix to use
#In this example: output will be b000...b999
## rename them:
echo `date` "; Renaming files..."
## get total number of file
#end=$(expr $chunks - 1)
end=$(ls -l $base* | wc -l)
#end=$(expr $end - 1)
echo "working on $end files..."
cmdfile=$(mktemp)
for i in $(seq 0 $end); do
fl=$base$(printf "%03d" $i);
out=$prefix$(printf "%03d" $i).fastq;
if [ -f $fl ] && [ $zipit == 1 ]; then
echo "mv $fl ${out};gzip ${out}" >> $cmdfile
elif [ -f $fl ];then
echo "mv $fl ${fl}.fastq" >> $cmdfile
fi
done
echo "running $cmdfile"
cat $cmdfile | parallel -j 12
#rm $cmdfile
##~% FILE="example.tar.gz"
##~% echo "${FILE%%.*}"
##example
##~% echo "${FILE%.*}"
##example.tar
##~% echo "${FILE#*.}"
##tar.gz
##~% echo "${FILE##*.}"
##gz
| true
|
3b219dcf608dc761f5d7fdfcef021fce77bbf0e8
|
Shell
|
agliga/agliga-setup
|
/home/.profile
|
UTF-8
| 752
| 2.671875
| 3
|
[] |
no_license
|
# MacPorts Installer addition on 2012-10-05_at_07:47:19: adding an appropriate PATH variable for use with MacPorts.
export PATH="/opt/local/bin:/opt/local/sbin:$(du "$HOME/Scripts" | cut -f2 | paste -sd ':'):$HOME/.rvm/bin:$PATH"
# Finished adapting your PATH environment variable for use with MacPorts.
export DEV=~/development
export BROWSER="brave"
export EDITOR="nvim"
export TERMINAL="alacritty"
export PLASMA_USE_QT_SCALING=1
alias gr='grep -rn'
alias ls='ls -G'
alias ll='ls -la'
alias vim="nvim"
alias ls='ls --color'
alias gr='grep -rni --color=auto'
alias ll='ls -la'
alias gr='grep -rn'
alias ack='ack-5.12'
alias ls='ls --color=auto'
export GPG_TTY=$(tty)
source /usr/share/rvm/scripts/rvm
source /usr/share/rvm/scripts/completion
| true
|
6970a44363ed2709f36cfff0f2fa903e21abfcd3
|
Shell
|
ypapax/fuzzy_postgres_speed
|
/commands.sh
|
UTF-8
| 13,423
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -ex
set -o pipefail
run() {
docker-compose build
docker-compose up
}
rerun() {
set +e
docker kill fuzzy_postgres_speed_postgres_1
docker rm fuzzy_postgres_speed_postgres_1
set -e
run
}
sql() {
psql -h localhost -p 5439 -U postgres -d people -c "$@"
}
sqla() {
sql "EXPLAIN (ANALYZE) $*"
sql "$@"
}
c() {
sql "SELECT count(*) FROM people;"
}
last() {
sql "SELECT * FROM people ORDER BY id DESC limit 5"
}
fuz() {
# sqla "SELECT * FROM people WHERE name % '1100011'"
# sqla "SELECT * FROM people WHERE name % '120000085000008500000RN3SGD19HYZXYEK9TM0WQW1W1F62PMI6ZDP5GH5M5VAUZKUIWL'"
# Planning Time: 0.947 ms
# Execution Time: 243752.916 ms
# sqla "SELECT * FROM people WHERE name % '1200000'"
# Planning Time: 1.091 ms
# Execution Time: 928.161 ms
# sqla "SELECT * FROM people WHERE '1200000' % ANY(STRING_TO_ARRAY(name,' '))"
#Planning Time: 0.505 ms
# Execution Time: 23787.588 ms
# sqla "SELECT * FROM people WHERE name % 'amnesties Wilder ledge perception falconer'"
# Gather (cost=1000.00..137105.65 rows=8501 width=55) (actual time=14847.954..36051.803 rows=2 loops=1)
# Workers Planned: 2
# Workers Launched: 2
# -> Parallel Seq Scan on people (cost=0.00..135255.55 rows=3542 width=55) (actual time=28980.157..36047.508 rows=1 loops=3)
# Filter: (name % 'amnesties Wilder ledge perception falconer'::text)
# Rows Removed by Filter: 2833333
# Planning Time: 2.657 ms
# Execution Time: 36051.986 ms
# sqla "SELECT * FROM people WHERE SIMILARITY(name, 'amnesties Wilder ledge perception falconer') > 0.4"
#----------------------------------------------------------------------------------------------------------------------
# Seq Scan on people (cost=0.00..218491.70 rows=2833571 width=55) (actual time=105761.464..105761.509 rows=1 loops=1)
# Filter: (similarity(name, 'amnesties Wilder ledge perception falconer'::text) > '0.4'::double precision)
# Rows Removed by Filter: 8499999
# Planning Time: 1.407 ms
# Execution Time: 105762.077 ms
#(5 rows)
# sqla "SELECT * FROM people WHERE SIMILARITY(name, 'amnesties Wilder ledge perception falconer') > 0.1"
# Seq Scan on people (cost=0.00..218491.70 rows=2833571 width=55) (actual time=0.792..108442.755 rows=138062 loops=1)
# Filter: (similarity(name, 'amnesties Wilder ledge perception falconer'::text) > '0.1'::double precision)
# Rows Removed by Filter: 8361938
# Planning Time: 0.353 ms
# Execution Time: 109291.338 ms
# sqla "SELECT * FROM people WHERE LEVENSHTEIN(name, 'amnesties Wilder ledge perception falconer') < 5"
# Seq Scan on people (cost=0.00..218491.70 rows=2833571 width=55) (actual time=9036.638..35391.910 rows=1 loops=1)
# Filter: (levenshtein(name, 'amnesties Wilder ledge perception falconer'::text) < 5)
# Rows Removed by Filter: 8499999
# Planning Time: 0.290 ms
# Execution Time: 35392.373 ms
#(5 rows)
#
#+(./commands.sh:24): sqla(): myMac $ sql 'SELECT * FROM people WHERE LEVENSHTEIN(name, '\''amnesties Wilder ledge perception falconer'\'') < 5'
#+(./commands.sh:19): sql(): myMac $ psql -h localhost -p 5439 -U postgres -d people -c 'SELECT * FROM people WHERE LEVENSHTEIN(name, '\''amnesties Wilder ledge perception falconer'\'') < 5'
# id | name
#---------+---------------------------------------------
# 8499997 | amnesties Wilder ledge perceptions falconer
#(1 row)
# sqla "SELECT count(*) FROM people WHERE SOUNDEX(name) = SOUNDEX('amnesties Wilder ledge perception falconer')"
# sqla "SELECT * FROM people WHERE SOUNDEX(name) = SOUNDEX('amnesties Wilder ledge perception falconer')"
# count
#-------
# 5605
# Gather (cost=1000.00..149360.86 rows=42504 width=55) (actual time=0.492..766.034 rows=5605 loops=1)
# Workers Planned: 2
# Workers Launched: 2
# -> Parallel Seq Scan on people (cost=0.00..144110.46 rows=17710 width=55) (actual time=0.764..759.717 rows=1868 loops=3)
# Filter: (soundex(name) = 'A523'::text)
# Rows Removed by Filter: 2831465
# Planning Time: 0.688 ms
# Execution Time: 799.429 ms
# sqla "SELECT * FROM people WHERE METAPHONE(name, 10) = METAPHONE('amnesties Wilder ledge perception falconer', 10)"
#-----------------------------------------------------------------------------------------------------------------------------
# Gather (cost=1000.00..149360.86 rows=42504 width=55) (actual time=1170.171..1171.998 rows=1 loops=1)
# Workers Planned: 2
# Workers Launched: 2
# -> Parallel Seq Scan on people (cost=0.00..144110.46 rows=17710 width=55) (actual time=879.601..1167.020 rows=0 loops=3)
# Filter: (metaphone(name, 10) = 'AMNSTSWLTR'::text)
# Rows Removed by Filter: 2833333
# Planning Time: 0.745 ms
# Execution Time: 1172.109 ms
#(8 rows)
#
#+(./commands.sh:24): sqla(): myMac $ sql 'SELECT * FROM people WHERE METAPHONE(name, 10) = METAPHONE('\''amnesties Wilder ledge perception falconer'\'', 10)'
#+(./commands.sh:19): sql(): myMac $ psql -h localhost -p 5439 -U postgres -d people -c 'SELECT * FROM people WHERE METAPHONE(name, 10) = METAPHONE('\''amnesties Wilder ledge perception falconer'\'', 10)'
# id | name
#---------+---------------------------------------------
# 8499997 | amnesties Wilder ledge perceptions falconer
#(1 row)
# sqla "SELECT * FROM people WHERE METAPHONE(name, 8) = METAPHONE('amnesties Wilder ledge perception falconer', 8)"
# Gather (cost=1000.00..149360.86 rows=42504 width=55) (actual time=83.806..1051.563 rows=3 loops=1)
# Workers Planned: 2
# Workers Launched: 2
# -> Parallel Seq Scan on people (cost=0.00..144110.46 rows=17710 width=55) (actual time=425.120..1044.081 rows=1 loops=3)
# Filter: (metaphone(name, 8) = 'AMNSTSWL'::text)
# Rows Removed by Filter: 2833332
# Planning Time: 1.460 ms
# Execution Time: 1051.703 ms
#(8 rows)
#
#+(./commands.sh:24): sqla(): myMac $ sql 'SELECT * FROM people WHERE METAPHONE(name, 8) = METAPHONE('\''amnesties Wilder ledge perception falconer'\'', 8)'
#+(./commands.sh:19): sql(): myMac $ psql -h localhost -p 5439 -U postgres -d people -c 'SELECT * FROM people WHERE METAPHONE(name, 8) = METAPHONE('\''amnesties Wilder ledge perception falconer'\'', 8)'
# id | name
#---------+---------------------------------------------
# 8499997 | amnesties Wilder ledge perceptions falconer
# 6914400 | amnesty's wail's douched bombshell's blue's
# 5395167 | amnesty's Wilhelm martyrdom's Olaf's Hus's
#(3 rows)
# sqla "SELECT * FROM people WHERE DMETAPHONE(name) = DMETAPHONE('amnesties Wilder ledge perception falconer')"
# QUERY PLAN
#-------------------------------------------------------------------------------------------------------------------------------
# Gather (cost=1000.00..149360.86 rows=42504 width=55) (actual time=4.187..1909.798 rows=4928 loops=1)
# Workers Planned: 2
# Workers Launched: 2
# -> Parallel Seq Scan on people (cost=0.00..144110.46 rows=17710 width=55) (actual time=3.506..1902.885 rows=1643 loops=3)
# Filter: (dmetaphone(name) = 'AMNS'::text)
# Rows Removed by Filter: 2831691
# Planning Time: 1.313 ms
# Execution Time: 1938.173 ms
#(8 rows)
#
#+(./commands.sh:24): sqla(): myMac $ sql 'SELECT * FROM people WHERE DMETAPHONE(name) = DMETAPHONE('\''amnesties Wilder ledge perception falconer'\'')'
#+(./commands.sh:19): sql(): myMac $ psql -h localhost -p 5439 -U postgres -d people -c 'SELECT * FROM people WHERE DMETAPHONE(name) = DMETAPHONE('\''amnesties Wilder ledge perception falconer'\'')'
# id | name
#---------+-----------------------------------------------------------------------
# 6334323 | amniocentesis credo freight seething oarsman
# 6331645 | amnesty's moisture downswing chattel's angling's
# 6376625 | immunizing correcter voicemails mango Karla's
# 6337984 | omen's fillies dungaree's tightwad's tent's
# 6338806 | amnesia acquiescent characterizations construes feeler
# sqla "SELECT * FROM people WHERE metaphone=METAPHONE('bikes recuperating braved stolidest riffs', 10)"
# sqla "SELECT * FROM people WHERE metaphone % METAPHONE('bikes recuperating braved stolidest riffs', 10)"
# Bitmap Heap Scan on people (cost=2014.36..33407.35 rows=9595 width=87) (actual time=196.519..1434.978 rows=74 loops=1)
# Recheck Cond: (metaphone % 'BKSRKPRTNK'::text)
# Rows Removed by Index Recheck: 5653
# Heap Blocks: exact=5572
# -> Bitmap Index Scan on metaphone_trigram_idx (cost=0.00..2011.96 rows=9595 width=0) (actual time=195.275..195.280 rows=5727 loops=1)
# Index Cond: (metaphone % 'BKSRKPRTNK'::text)
# Planning Time: 19.024 ms
# Execution Time: 1436.955 ms
# sqla "SELECT * FROM people WHERE metaphone %> METAPHONE('bikes recuperating braved stolidest riffs', 10)"
# QUERY PLAN
#--------------------------------------------------------------------------------------------------------------------------------------
# Bitmap Heap Scan on people (cost=2014.36..33407.35 rows=9595 width=87) (actual time=80.547..80.659 rows=5 loops=1)
# Recheck Cond: (metaphone %> 'BKSRKPRTNK'::text)
# Rows Removed by Index Recheck: 4
# Heap Blocks: exact=9
# -> Bitmap Index Scan on metaphone_trigram_idx (cost=0.00..2011.96 rows=9595 width=0) (actual time=80.513..80.518 rows=9 loops=1)
# Index Cond: (metaphone %> 'BKSRKPRTNK'::text)
# Planning Time: 1.269 ms
# Execution Time: 80.876 ms
#(8 rows)
#
#+(./commands.sh:24): sqla(): myMac $ sql 'SELECT * FROM people WHERE metaphone %> METAPHONE('\''bikes recuperating braved stolidest riffs'\'', 10)'
#+(./commands.sh:19): sql(): myMac $ psql -h localhost -p 5439 -U postgres -d people -c 'SELECT * FROM people WHERE metaphone %> METAPHONE('\''bikes recuperating braved stolidest riffs'\'', 10)'
# id | name | metaphone
#---------+------------------------------------------------------+------------
# 436327 | crease recuperating clearest predisposing Freemasons | KRSRKPRTNK
# 2171175 | Zuni's recuperating vaulter daggers prisoner | SNSRKPRTNK
# 3599114 | house's recuperating dangerous Kalashnikov lashing | HSSRKPRTNK
# 4604075 | pup's recuperating portables flip's reschedules | PPSRKPRTNK
# 7804322 | mess's recuperating drunker espies Enif's | MSSRKPRTNK
#(5 rows)
# sqla "SELECT * FROM people WHERE name %> 'destination vituperating diphthong miracles undivided'"
#Bitmap Heap Scan on people (cost=1690.36..33083.35 rows=9595 width=87) (actual time=3257.574..3257.586 rows=1 loops=1)
# Recheck Cond: (name %> 'destination vituperating diphthong miracles undivided'::text)
# Heap Blocks: exact=2
# -> Bitmap Index Scan on name_trigram_idx (cost=0.00..1687.96 rows=9595 width=0) (actual time=3257.524..3257.529 rows=2 loops=1)
# Index Cond: (name %> 'destination vituperating diphthong miracles undivided'::text)
# Planning Time: 1.326 ms
# Execution Time: 3257.799 ms
#(7 rows)
#
#+(./commands.sh:24): sqla(): myMac $ sql 'SELECT * FROM people WHERE name %> '\''destination vituperating diphthong miracles undivided'\'''
#+(./commands.sh:19): sql(): myMac $ psql -h localhost -p 5439 -U postgres -d people -c 'SELECT * FROM people WHERE name %> '\''destination vituperating diphthong miracles undivided'\'''
# id | name | metaphone
#------+-------------------------------------------------------+------------
# 5082 | destination vituperating diphthong miracles undivided | TSTNXNFTPR
#(1 row)
sqla "SELECT * FROM people WHERE name %>> 'destination vituperating diphthong miracles undivided'"
# QUERY PLAN
#-------------------------------------------------------------------------------------------------------------------------------------
# Bitmap Heap Scan on people (cost=1690.36..33083.35 rows=9595 width=87) (actual time=3251.577..3251.588 rows=1 loops=1)
# Recheck Cond: (name %>> 'destination vituperating diphthong miracles undivided'::text)
# Heap Blocks: exact=1
# -> Bitmap Index Scan on name_trigram_idx (cost=0.00..1687.96 rows=9595 width=0) (actual time=3251.527..3251.533 rows=1 loops=1)
# Index Cond: (name %>> 'destination vituperating diphthong miracles undivided'::text)
# Planning Time: 1.777 ms
# Execution Time: 3251.887 ms
#(7 rows)
#
#+(./commands.sh:24): sqla(): myMac $ sql 'SELECT * FROM people WHERE name %>> '\''destination vituperating diphthong miracles undivided'\'''
#+(./commands.sh:19): sql(): myMac $ psql -h localhost -p 5439 -U postgres -d people -c 'SELECT * FROM people WHERE name %>> '\''destination vituperating diphthong miracles undivided'\'''
# id | name | metaphone
#------+-------------------------------------------------------+------------
# 5082 | destination vituperating diphthong miracles undivided | TSTNXNFTPR
#(1 row)
}
update() {
sql "UPDATE people SET metaphone=METAPHONE(name, 10)"
}
meta() {
sql "SELECT METAPHONE('biker recuperating braved stolidest riffs', 10)"
sql "SELECT METAPHONE('bikes recuperating braved stolidest riffs', 10)"
}
"$@"
| true
|
006fc50be4022e4546a91e7b80ba8989c7beb5b8
|
Shell
|
watchexec/cargo-watch
|
/bin/sign
|
UTF-8
| 1,147
| 3.875
| 4
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
if ! which rsign >/dev/null; then
echo "Requires rsign2 tool: $ cargo install rsign2"
exit 2
fi
missing=""
for f in {B3,SHA512}SUMS{,.auto.minisig}; do
[[ ! -f "$f" ]] && missing="$missing $f"
done
if [[ ! -z "$missing" ]]; then
echo "Usage: bin/sign [rsign options...]"
echo "You must first download the relevant sums and minisig files."
echo "Missing: $missing"
exit 1
fi
sigs=""
for algo in B3 SHA512; do
echo "Verifying ${algo}SUMS.auto.minisig:"
rsign verify \
-p "$(dirname $BASH_SOURCE)/../.github/workflows/release.pub" \
-x "${algo}SUMS.auto.minisig" \
"${algo}SUMS"
version=$(grep -m1 -oP 'cargo-watch-v[\d.]+' "${algo}SUMS" | cut -d- -f3)
ownsig="${algo}SUMS.$(whoami).minisig"
sigs="$sigs $ownsig"
echo "Signing ${algo}SUMS with your key to $ownsig:"
rsign sign \
-t "cargo-watch $version signed by maintainer: $(whoami)" \
-c 'see README.md for signing information' \
-x "$ownsig" \
$@ \
"${algo}SUMS"
done
echo "Done; please upload $sigs to Github release $version!"
| true
|
8ceb9b8cd21d7ed832660e5a8d92fdce79fc8271
|
Shell
|
mharasim/snoop
|
/scripts/run.sh
|
UTF-8
| 1,561
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
# MIT License
#
# Copyright (c) 2019 Marcin Harasimczuk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#!/bin/bash
echo "Running testapps..."
export LD_PRELOAD="${LIBSNOOP_PATH:-../libsnoop.so}"
export TESTS_DIR="${TESTS_PATH:-.}"
export OUTPUT_DIR="${OUTPUT_PATH:-snoops}"
echo "LD_PRELOAD=$LD_PRELOAD"
echo "TESTS_DIR=$TESTS_DIR"
for test_bin in $(ls $TESTS_DIR/test_*); do
echo "Running >>> $test_bin"
./$test_bin
echo "Done... <<< $test_bin"
done
echo "Running testapps... done"
echo "Use snooper app to inspect .snoop files"
echo "gl'n'hf..."
| true
|
655602af5dee61260d0fae981fe4ab2b2e021618
|
Shell
|
phys3800/Project-1
|
/run_integrate2.sh
|
UTF-8
| 383
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
# run_integrate2.sh
# shell script useful for running the program integrate for several cases,
# i.e., different orders of Laguerre polynomials used
# assumes the existence of file called wieghts.dat_#, where # is the order
# of the Laguerre polynomial
for n in 2 4 8 16 20 24 28 32
do
f=weights.dat_$n
(./integrate | tail -n 1) << !
$f
$n
!
done
| true
|
269d8e1b7f7d4057999ea8739d37218fab9882b9
|
Shell
|
mkienzle/MachineLearning
|
/Scripts/Tensorflow/classify.sh
|
UTF-8
| 2,298
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Download scripts
curl -LO https://github.com/tensorflow/hub/raw/master/examples/image_retraining/retrain.py
curl -LO https://github.com/tensorflow/tensorflow/raw/master/tensorflow/examples/label_image/label_image.py
rm -rf train
dir="train"
for line in $(cat ../../Data/Synthetic/Dots/${dir}/${dir}.csv); do
id=$(echo $line | awk -F, '{print $2}')
if [ $id != "\"imageId\"" ]; then
numDots=$(echo $line | awk -F, '{print $3}')
mkdir -p ${dir}/$numDots
echo "copying ../../Data/Synthetic/Dots/${dir}/img${id}.jpg to ${dir}/$numDots"
cp ../../Data/Synthetic/Dots/${dir}/img${id}.jpg ${dir}/$numDots
fi
done
echo "retrain model..."
rm -rf output; mkdir -p output/intermediate; mkdir -p output/summaries
python retrain.py --image_dir=train \
--summary_dir=output/summaries --saved_model_dir=output/model \
--output_labels=output/labels.txt --output_graph=output/graph.pb
# Check how well the training performed
echo "to check performance, type tensorboard --logdir=/tmp/retrain_logs/"
echo "test the training..."
varError="0"
numFailures="0"
numPredictions="0"
for line in $(cat ../../Data/Synthetic/Dots/test/test.csv); do
id=$(echo $line | awk -F, '{print $2}')
if [ $id != "\"imageId\"" ]; then
numDots=$(echo $line | awk -F, '{print $3}')
# classify
echo "classifying img${id}.jpg"
python label_image.py --image=../../Data/Synthetic/Dots/test/img${id}.jpg \
--graph=output/graph.pb --labels=output/labels.txt \
--input_layer=Placeholder --output_layer=final_result > result.txt
# find the most likely label
gotNumDots=$(python findNumDots.py result.txt)
diffSquare=$(python -c "print(($numDots - $gotNumDots)**2)")
if [ $diffSquare != "0" ]; then
echo "found $gotNumDots dot(s) in img${id}.jpg but there were $numDots"
numFailures=$(python -c "print($numFailures + 1)")
else
echo "found $numDots dot(s) (correct)"
fi
# update the score
varError=$(python -c "print($varError + $diffSquare)")
fi
numPredictions=$(python -c "print($numPredictions + 1)")
done
percentFailures=$(python -c "print(100*$numFailures/$numPredictions)")
echo "sum of is errors squared: $varError number of failure: $numFailures ($percentFailures %)"
| true
|
e7346118311d18a55bb01f6f5ac6536c5123f2ff
|
Shell
|
adgoudz/dotfiles
|
/zsh/.zpreztorc
|
UTF-8
| 2,009
| 2.59375
| 3
|
[] |
no_license
|
# Color output (auto set to 'no' on dumb terminals).
zstyle ':prezto:*:*' color yes
#
# Set Prezto modules to load
#
# 'directory' \
# 'spectrum' \
# 'utility' \
zstyle ':prezto:load' pmodule \
'editor' \
'history' \
'git' \
'syntax-highlighting' \
'prompt'
#
# Prompt
#
# Set the prompt theme to load.
zstyle ':prezto:module:prompt' theme 'andrew'
#
# Completions
#
# Set the entries to ignore in static */etc/hosts* for host completion.
zstyle ':prezto:module:completion:*:hosts' etc-host-ignores \
'0.0.0.0' \
'127.0.0.1'
#
# Editor
#
# Set the key mapping style to 'emacs' or 'vi'.
zstyle ':prezto:module:editor' key-bindings 'emacs'
# Auto convert .... to ../..
zstyle ':prezto:module:editor' dot-expansion 'yes'
#
# Git
#
# Set a format for oneline log output
zstyle ':prezto:module:git:log:oneline' format \
'%C(7)%h%Creset %C(5)%<(12,trunc)%an%Creset %<(15)%C(8)%ad%Creset %<(70,trunc)%s %C(6)%d%Creset'
# Syntax Highlighting
zstyle ':prezto:module:syntax-highlighting' styles \
'unknown-token' 'fg=1' \
'arg0' 'fg=8' \
'single-hyphen-option' 'none' \
'double-hyphen-option' 'none' \
'single-quoted-argument' 'none' \
'double-quoted-argument' 'none' \
'dollar-double-quoted-argument' 'fg=15' \
'back-double-quoted-argument' 'fg=15' \
'dollar-quoted-argument' 'fg=15' \
'back-dollar-quoted-argument' 'fg=15' \
'path' 'none' \
'globbing' 'fg=5' \
'history-expansion' 'fg=15' \
'command-substitution-delimiter' 'none' \
'process-substitution-delimiter' 'none' \
'back-quoted-argument-delimiter' 'none' \
'commandseparator' 'fg=4' \
'reserved-word' 'fg=5' \
'suffix-alias' 'none' \
'precommand' 'none' \
'rc-quote' 'none' \
'comment' 'none'
# vim: ft=zsh sw=2
| true
|
fd098bae31cdfd9e0790e311156ab3411912d24b
|
Shell
|
ministryofjustice/wmt-worker
|
/test/integration/resources/setup-aws.sh
|
UTF-8
| 1,699
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
export TERM=ansi
export AWS_ACCESS_KEY_ID=foobar
export AWS_SECRET_ACCESS_KEY=foobar
export AWS_DEFAULT_REGION=eu-west-2
export PAGER=
aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket wmt-worker --region ${AWS_DEFAULT_REGION} --create-bucket-configuration LocationConstraint=${AWS_DEFAULT_REGION}
aws --endpoint-url=http://localhost:4566 sqs create-queue --queue-name s3_extract_event_queue
aws --endpoint-url=http://localhost:4566 s3api put-bucket-notification-configuration --bucket wmt-worker --notification-configuration '{"QueueConfigurations":[{"QueueArn":"arn:aws:sqs:eu-west-2:000000000000:s3_extract_event_queue","Events": ["s3:ObjectCreated:*"]}]}'
aws --endpoint-url=http://localhost:4566 s3api create-bucket --bucket wmt-worker-dashboard --region ${AWS_DEFAULT_REGION} --create-bucket-configuration LocationConstraint=${AWS_DEFAULT_REGION}
aws --endpoint-url=http://localhost:4566 sqs create-queue --queue-name audit_event_queue
aws --endpoint-url=http://localhost:4566 sns create-topic --name domain-events
aws --endpoint-url=http://localhost:4566 sqs create-queue --queue-name domain_event_queue
aws --endpoint-url=http://localhost:4566 sns subscribe --topic-arn arn:aws:sns:eu-west-2:000000000000:domain-events --protocol sqs --notification-endpoint arn:aws:sqs:eu-west-2:000000000000:domain_event_queue --attributes '{"FilterPolicy":"{\"eventType\":[\"staff.available.hours.changed\"]}", "RawMessageDelivery": "true"}'
echo "S3 created bucket"
# to put a file using command line
# aws --endpoint-url=http://localhost:4566 s3api put-object --bucket wmt-worker --key extract/WMP_PS.xlsx --body test/integration/resources/WMP_PS.xlsx
| true
|
11a5e1aa8c80b754aa5237aaa7aa3be4c78523bc
|
Shell
|
launchdarkly/ld-find-code-refs
|
/.ldrelease/publish-dry-run.sh
|
UTF-8
| 1,042
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# The "products-for-release" makefile target does a goreleaser build but doesn't push to DockerHub
$(dirname $0)/run-publish-target.sh products-for-release
# Copy the Docker image that goreleaser just built into the artifacts - we only do
# this in a dry run, because in a real release the image will be available from
# DockerHub anyway so there's no point in attaching it to the release.
BASE_CODEREFS=ld-find-code-refs
GH_CODEREFS=ld-find-code-refs-github-action
BB_CODEREFS=ld-find-code-refs-bitbucket-pipeline
sudo docker save launchdarkly/${BASE_CODEREFS}:latest | gzip >${LD_RELEASE_ARTIFACTS_DIR}/${BASE_CODEREFS}.tar.gz
sudo docker save launchdarkly/${GH_CODEREFS}:latest | gzip >${LD_RELEASE_ARTIFACTS_DIR}/${GH_CODEREFS}.tar.gz
sudo docker save launchdarkly/${BB_CODEREFS}:latest | gzip >${LD_RELEASE_ARTIFACTS_DIR}/${BB_CODEREFS}.tar.gz
# dry run logic is built into these publish scripts
$(dirname $0)/publish-bitbucket-metadata.sh
$(dirname $0)/publish-circleci.sh
$(dirname $0)/publish-github-actions-metadata.sh
| true
|
8d215fcd7cda2aa6edb08c4a69f8b82ec8a5a293
|
Shell
|
jcockbain/advent-of-code
|
/run.sh
|
UTF-8
| 219
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
day=${1:-$DAY}
d=day${d}
if [ -z $day ]
then
echo "You must set \$DAY or pass day directory as an arg"
exit 1
fi
if [ ! -d $day ]
then
echo "$day is not a directory!"
exit 1
fi
cd $day
go run .
| true
|
c89577e8208ede9e9f07e2ecca5998e4aeaa2af9
|
Shell
|
lekoOwO/SuperChatSum
|
/run.bash
|
UTF-8
| 619
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
PYTHONIOENCODING=utf-8
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. "$DIR/config.ini"
output_dir="$DIR/output"
mkdir -p $output_dir
uuid=$(cat /proc/sys/kernel/random/uuid)
rawDataFilepath="$output_dir/$uuid.json"
calculatedFilepath="$output_dir/$uuid.calc.json"
URL=$1
echo -e "UUID:$uuid\n"
python "$DIR/chat-replay-downloader/chat_replay_downloader.py" "$URL" -message_type superchat -output "$rawDataFilepath" > /dev/null
python "$DIR/process.py" "$CURRENCYLAYER_API_KEY" "$rawDataFilepath" "$calculatedFilepath" -c "$DIR/$CURRENCY_FILE_NAME" -t "$TARGET_CURRENCY"
| true
|
f0089657b104b40022a0d8b26f349fe8cb9c7c8f
|
Shell
|
tannonk/playground
|
/huggingface/setup_env.sh
|
UTF-8
| 531
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# -*- coding: utf-8 -*-
home=/home/user/kew
transformers=$home/INSTALLS/transformers
conda create -y --name transformers python=3.8 pip
#set +euo pipefail
set +eu
# make conda available to current subshell
source $home/anaconda3/etc/profile.d/conda.sh
conda activate transformers
set -eu
pip3 install torch torchvision torchaudio
pip install --editable $transformers
pip install -r $transformers/examples/seq2seq/requirements.txt
echo ""
echo "done!"
echo ""
echo "run conda activate transformers"
echo ""
| true
|
723200d2668698f347e65f74537b868da3962fe3
|
Shell
|
cdbdev/arch-install
|
/script/my_arch_install.sh
|
UTF-8
| 8,105
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# ----------------------------------------------------------------------- #
# Arch Linux install script #
# ----------------------------------------------------------------------- #
# Author : Chris #
# Project : https://github.com/cdbdev/arch-install #
# Reference : https://wiki.archlinux.org/index.php/Installation_guide #
# ----------------------------------------------------------------------- #
# -----------------------------------------------
# Initial setup ( wireless, time/date )
# -----------------------------------------------
echo ":: Disabling soft blocks..."
rfkill unblock all
# Ask password for root
while true; do
echo -n ">> Please enter root password: "
read -s root_pass
echo
echo -n ">> Root password (confirm): "
read -s root_pass_cnf
echo
[ "$root_pass" = "$root_pass_cnf" ] && break || echo "Passwords don't match, try again."
done
echo
# Ask credentials for new user
echo -n ">> Please enter a name for the new user: "
read new_user
while true; do
echo -n ">> Please enter a password for new user: "
read -s new_user_pass
echo
echo -n ">> New user password (confirm): "
read -s new_user_pass_cnf
echo
[ "$new_user_pass" = "$new_user_pass_cnf" ] && break || echo "Passwords don't match, try again."
done
echo
# Ask interface name and enable it
ip link show
echo -n ">> Please enter wifi interface name [wlp1s0,...]: "
read wifi_int
ip link set "$wifi_int" up
# Ask wifi SSID + WPA Key and try to connect
echo -n ">> Please enter SSID: "
read ssid
echo -n ">> Please enter WPA key: "
read wpa_key
wpa_passphrase "$ssid" "$wpa_key" > wpa_supplicant-"$wifi_int".conf
wpa_supplicant -B -i "$wifi_int" -c wpa_supplicant-"$wifi_int".conf
dhcpcd "$wifi_int"
# Check internet connection
if ping -q -c 1 -W 1 google.com >/dev/null; then
echo ":: Internet up and running!"
else
echo ":: Please check your internet connection! Installation aborted."
exit 1
fi
timedatectl set-ntp true
# -----------------------------------------
# Start Gdisk for partitioning of the disks
# -----------------------------------------
echo ":: Starting gdisk..."
(
echo d # remove partition
echo 4 # partition 4 removal
echo d # remove partition
echo 5 # partition 5 removal
echo n # new partition
echo 4 # partition number 4
echo # default, start immediately after preceding partition
echo +412G # + 412 GB linux partition
echo 8300 # partition type linux
echo n # new partition
echo 5 # partition number 5
echo # default, start immediately after preceding partition
echo +12G # + 12 GB linux swap partition
echo 8200 # partition type swap
echo p # print in-memory partition table
echo w # save changes
echo y # confirm changes
) | gdisk /dev/sda
# -----------------------------------------------------------------
# Format partitions (one partition for system + partition for swap)
# -----------------------------------------------------------------
yes | mkfs.ext4 /dev/sda4
mkswap /dev/sda5
swapon /dev/sda5
# ---------------------
# Mount the file system
# ---------------------
mount /dev/sda4 /mnt
# -----------------
# Arch installation
# -----------------
# Rank mirrors
cp /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.backup
curl "https://www.archlinux.org/mirrorlist/?country=BE&country=NL&country=DE&country=FR&country=US&protocol=http&protocol=https" > /etc/pacman.d/mirrorlist
sed -i 's/^#Server/Server/' /etc/pacman.d/mirrorlist
# Install base + kernel(linux)
pacstrap /mnt base linux linux-lts linux-firmware
# --------------------
# Configure the system
# --------------------
# genfstab
genfstab -U /mnt >> /mnt/etc/fstab
# Copy necessary files to new system
cp my_arch_install_post.sh /mnt/root/
cp /etc/pacman.d/mirrorlist /mnt/root/
cp wpa_supplicant-"$wifi_int".conf /mnt/root/
cp -r conf/. /mnt/root/
# -----------------------------------------
# Mount efi partition for GRUB installation
# -----------------------------------------
mkdir /mnt/efi
mount /dev/sda1 /mnt/efi
# --------
# Chroot
# --------
echo ":: Change root into the new system"
arch-chroot /mnt /bin/bash <<EOF
# 1 Disable <beep>
echo ":: Disabling <beep>"
echo "blacklist pcspkr" > /etc/modprobe.d/nobeep.conf
# 2 Time zone
echo ":: Setup time zone"
ln -sf /usr/share/zoneinfo/Europe/Brussels /etc/localtime
hwclock --systohc
# 3 Localization
echo ":: Setup Localization"
echo "en_US.UTF-8 UTF-8" > /etc/locale.gen
locale-gen
echo "LANG=en_US.UTF-8" > /etc/locale.conf
echo "KEYMAP=be-latin1" > /etc/vconsole.conf
# 4 Network configuration
echo "myarch" > /etc/hostname
echo -e "127.0.0.1\tlocalhost" > /etc/hosts
echo -e "::1\t\tlocalhost" >> /etc/hosts
# 5 Set root password
echo ":: Setting password for root"
echo "root:${root_pass}" | chpasswd
# 6 Setup new user
echo -n ">> Setup new user"
useradd --create-home "$new_user"
echo "${new_user}:${new_user_pass}" | chpasswd
mv /root/my_arch_install_post.sh /home/"$new_user"/
# 7 Update mirrorlist
echo ":: Updating mirrorlist..."
#yes | pacman -S reflector --noconfirm
#reflector --verbose --latest 5 --sort rate --save /etc/pacman.d/mirrorlist
mv /root/mirrorlist /etc/pacman.d/mirrorlist
# 8 Install user specific packages
echo ":: Installing user specific packages..."
yes | pacman -S xf86-video-amdgpu dhcpcd e2fsprogs vi amd-ucode pacman-contrib sudo nftables wpa_supplicant base-devel arch-install-scripts vim acpi pulseaudio blueman wget which dosfstools ntfs-3g os-prober --noconfirm
# 8.1 Setup nftables
mv /root/nftables.conf /etc/
systemctl enable nftables.service
# 9 Change permissions for new user
echo ":: Change permissions for new user"
echo "$new_user ALL=(ALL:ALL) ALL" | EDITOR='tee -a' visudo
echo ":: Adding user to group 'wheel'..."
gpasswd -a "$new_user" wheel
# 10 Enable wifi at boot
echo ":: Enabling WIFI at boot..."
mv /root/wpa_supplicant-"$wifi_int".conf /etc/wpa_supplicant/
# 10.1 Add 'ctrl_interface=/var/run/wpa_supplicant' to 1st line of 'wpa_supplicant.conf'
sed -i '1 i\ctrl_interface=/var/run/wpa_supplicant\n' /etc/wpa_supplicant/wpa_supplicant-"$wifi_int".conf
systemctl enable wpa_supplicant@"$wifi_int"
# systemctl enable dhcpcd@"$wifi_int"
systemctl enable dhcpcd.service
# 10.2 Do not wait at startup for dhcpcd
mkdir /etc/systemd/system/dhcpcd@.service.d
echo -e "[Service]\nExecStart=\nExecStart=/usr/bin/dhcpcd -b -q %I" > /etc/systemd/system/dhcpcd@.service.d/no-wait.conf
# 10.3 unblock all softblocked interfaces
systemctl enable rfkill-unblock@all.service
# 11 Install and prepare XFCE
yes | pacman -S xorg-server --noconfirm
yes | pacman -S xfce4 xfce4-goodies xfce4-power-manager thunar-volman catfish xfce4-session --noconfirm
yes | pacman -S lightdm lightdm-gtk-greeter light-locker --noconfirm
sed -i 's/#greeter-session=example-gtk-gnome/greeter-session=lightdm-gtk-greeter/' /etc/lightdm/lightdm.conf
systemctl enable lightdm.service
mv /root/20-keyboard.conf /etc/X11/xorg.conf.d/
yes | pacman -S firefox ttf-dejavu ttf-liberation arc-gtk-theme moka-icon-theme screenfetch xreader libreoffice galculator gvfs conky --noconfirm
mv /root/90-blueman.rules /etc/polkit-1/rules.d/
# 12 Install and configure grub
yes | pacman -S grub efibootmgr --noconfirm
grub-install --target=x86_64-efi --efi-directory=/efi --bootloader=arch
# 12.1 Fix dark screen, hibernate & screen tearing (add 'acpi_backlight=none amdgpu.dc=0')
#sed -i '/GRUB_CMDLINE_LINUX_DEFAULT=/c\GRUB_CMDLINE_LINUX_DEFAULT=\"quiet acpi_backlight=none amdgpu.dc=0\"' /etc/default/grub
# 12.1 Disable grub submenu
echo "GRUB_DISABLE_SUBMENU=y" >> /etc/default/grub
os-prober
grub-mkconfig -o /boot/grub/grub.cfg
# 13 Add screenfetch
echo screenfetch >> /home/"$new_user"/.bashrc
# 14 Discard unused packages weekly
systemctl enable paccache.timer
# 15 Enable bluetooth
systemctl enable bluetooth
# 16. Disable acpi backlight (amdgpu backlight is already used)
systemctl mask systemd-backlight@backlight:acpi_video0.service
echo ":: Exit chroot..."
EOF
# ------
# Reboot
# ------
echo ":: Installation finished."
echo ":: You can unmount [umount -R /mnt] and reboot now [reboot], or remain in the system."
| true
|
ffe9d06b827947f25c3193b0e6cf45a72f8e9ba6
|
Shell
|
maxvgi/zabbix-templates
|
/check_backups/check_backups.sh
|
UTF-8
| 649
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
backup_config=/backups/buckets.zabbix
DIR=.
if [[ "$1" != "" ]] ; then
DIR=$1
if [[ "$DIR" == "discovery" ]] ; then
for bucket in `cat ` ; do
if [[ "$buckets" != "$backup_config" ]] ; then
buckets=$buckets','
fi
buckets=$buckets'{"{#BUCKET}":"'$bucket'"}'
done
echo '{"data":['$buckets']}'
exit
fi
fi
#checks if there are fresh files and each of them is not empty
last_files=`find $DIR -mtime -2 -exec echo {} \; | grep -ve "^$DIR$"`
if [[ "$last_files" == "" ]] ; then
echo 1
exit
fi
for file in $last_files ; do
size=`stat -c%s $file`
if [[ "$size" == "0" ]] ; then
echo 2
exit
fi
done
echo 0
| true
|
c8f55e76f61712b063733f282e401ec4a5325174
|
Shell
|
DouglasOY/scripts
|
/life/calc.sh
|
UTF-8
| 363
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -eq 1 ]
then
bound=$1
nbound=$((0 - bound))
portion=` awk '{ if (($1 >= '${nbound}') && ($1 <= '${bound}')) t+=$2; } END {print t; }' sort.dat `
total=` awk '{ sum+=$2; } END {print sum }' sort.dat `
echo " ${portion} / ${total} = "
echo " ${portion} / ${total} " | bc -l
else
echo "Usage: calc.sh <NUMBER>"
fi
| true
|
e66098d92ed29a4682f769c4b6e3a7b38b87da9a
|
Shell
|
SaurabhT311/CodeInClub-Day6
|
/primerange.sh
|
UTF-8
| 308
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash -x
read -p "enter start number: " start
read -p "enter end number: " end
for (( i=$start ; i<=$end; i++ ))
do
count=1
for(( j=2; j<=$i; j++ ))
do
if [ $(( i % j )) == 0 ]
then
count=$(( count + 1 ))
fi
done
if [[ $count -le 2 ]] && [[ $i -ne 1 ]]
then
echo -n $i " "
fi
done
| true
|
db56afd4d8154587b66b6c30ab22f9c2998c3158
|
Shell
|
bruno-kenji/aws-scripts
|
/scripts/invoke-lambda.sh
|
UTF-8
| 548
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
# you must setup aws-shell and aws-configure first (check the readme)
#!/bin/bash
# .main.sh provides API_ID
. ./support/aws-scripts/.main.sh
API_NAME="$(aws apigateway get-rest-api --rest-api-id $API_ID | jq -r '.name')"
# can receive lambda name
if [ $1 ]
then
LAMBDA_NAME=$1
else
read -p "Lambda name (not the file name!): " LAMBDA_NAME
fi
INVOKE_ARGS="${API_NAME}/${LAMBDA_NAME}/invocation-params.json"
invokeLambda() {
aws lambda invoke-async \
--function-name "$LAMBDA_NAME" \
--invoke-args "$INVOKE_ARGS"
}
invokeLambda
| true
|
0b5a1ed259e70ee2f95ed2f9411630af2dd26b27
|
Shell
|
azrobbo/dotfiles
|
/bin/stun
|
UTF-8
| 929
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/sh
# make it easier to two hop ssh
# Usage:
# stun user@farhost [nearhost]
# nearhost - first ssh host that localhost can contact directly
# farhost - second ssh host that only nearhost can reach
# caveats:
# - a public key is necessary on the nearhost because no opportunity
# is given to log in with a password
# defaults
port=2022
nearhost=a
# if second argument isn't a switch, it must be our nearhost
case ${2} in
-*)
;;
*)
test -z ${2} || nearhost=${2}
;;
esac
# first half of user@host => user
user=${1%@*}
# second half of user@host => host
farhost=${1##*@}
# establish tunnel
ssh -L ${port}:${farhost}:22 -N ${nearhost} &
ssh_pid=$!
# wait for tunnel to start listening
while ! lsof -P -p ${ssh_pid} | grep -q ${port} ; do sleep 1; done
# start interactive session over tunnel
ssh -p ${port} ${user}@localhost
# tear down tunnel after interactive session ends
kill ${ssh_pid}
| true
|
91d78c429b919393dc481939ee6dd10b8acddbd3
|
Shell
|
pauljxtan/miscellany
|
/utils/flac_to_mp3.bash
|
UTF-8
| 953
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Converts FLAC files to mp3, with tags.
for INFILE in "$@"
do
OUTFILE="${INFILE[@]/%flac/mp3}"
# TODO: Add more tags if needed
ALBUM=$(metaflac --show-tag=ALBUM "$INFILE" | sed s/ALBUM=//g)
ARTIST=$(metaflac --show-tag=ARTIST "$INFILE" | sed s/ARTIST=//g)
TITLE=$(metaflac --show-tag=TITLE "$INFILE" | sed s/TITLE=//g)
TRACKNUMBER=$(metaflac --show-tag=TRACKNUMBER "$INFILE" | sed s/TRACKNUMBER=//g)
TRACKTOTAL=$(metaflac --show-tag=TRACKTOTAL "$INFILE" | sed s/TRACKTOTAL=//g)
# Encode to mp3
# (N.B. need to decode flac separately since it's not supported by lame)
# TODO: Add more tags if needed
# (VBR q=0)
flac -d -c "$INFILE" | lame -V 0 --tl "$ALBUM" --ta "$ARTIST" --tt "$TITLE" --tn "$TRACKNUMBER"/"$TRACKTOTAL" - "$OUTFILE"
# (CBR 320)
#flac -d -c "$INFILE" | lame -b 320 --tl "$ALBUM" --ta "$ARTIST" --tt "$TITLE" --tn "$TRACKNUMBER"/"$TRACKTOTAL" - "$OUTFILE"
done
| true
|
c788848ef4a79976cefa25334601e45596e955ef
|
Shell
|
open-atmos/camp
|
/test/unit_rxn_data/test_first_order_loss.sh
|
UTF-8
| 604
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# exit on error
set -e
# turn on command echoing
set -v
# make sure that the current directory is the one where this script is
cd ${0%/*}
# make the output directory if it doesn't exist
mkdir -p out
((counter = 1))
while [ true ]
do
echo Attempt $counter
if [[ $1 = "MPI" ]]; then
exec_str="mpirun -v -np 2 ../../test_rxn_first_order_loss"
else
exec_str="../../test_rxn_first_order_loss"
fi
if ! $exec_str; then
echo Failure "$counter"
if [ "$counter" -gt 10 ]
then
echo FAIL
exit 1
fi
echo retrying...
else
echo PASS
exit 0
fi
((counter++))
done
| true
|
6467769b3b6fe7d2de783c9494ad97dcfb5f0c1d
|
Shell
|
greeneyesproject/testbed-demo
|
/vsn/script/greeneyes
|
UTF-8
| 1,840
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/sh
### BEGIN INIT INFO
# Provides: greeneyes-vsn
# Required-Start: $local_fs $network
# Required-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: GreenEyes VSN
# Description: Startup initialization script for GreenEyes VSN
### END INIT INFO
# Carry out specific functions when asked to by the system
case "$1" in
start)
echo "Starting GreenEyes VSN "
cd /opt/greeneyes-vsn
PID=`pidof greeneyes-vsn`
if [ -n "$PID" ]
then
echo "GreenEyes VSN already running."
exit 1
fi
if [ -f log.txt ]
then tail -c 10M log.txt > log.prev.txt
rm log.txt
fi
ifup wlan0 >>log.txt 2>&1
sleep 5
ifdown wlan0 >>log.txt 2>&1
#Camera
cpufreq-set -f 600M >>log.txt 2>&1
#Cooperator
#cpufreq-set -g ondemand --max 1000M >>log.txt 2>&1
ifup wlan0 >>log.txt 2>&1
iwlist wlan0 scan >>log.txt 2>&1
#Camera 1
ifconfig wlan0 192.168.200.55 netmask 255.255.255.0 >>log.txt 2>&1
./greeneyes-vsn camera 11 --telos /dev/ttyUSB0 >>log.txt 2>&1 &
#Camera 2
#ifconfig wlan0 192.168.200.56 netmask 255.255.255.0 >>log.txt 2>&1
#./greeneyes-vsn camera 12 --telos /dev/ttyUSB0 >>log.txt 2>&1 &
#Cooperator 1
#./greeneyes-vsn cooperator 21 >>log.txt 2>&1 &
#Cooperator 2
#./greeneyes-vsn cooperator 22 >>log.txt 2>&1 &
;;
stop)
echo "Stopping GreenEyes VSN"
PID=`pidof greeneyes-vsn`
if [ -z "$PID" ]
then
echo "GreenEyes VSN already stopped."
else
pkill -quit greeneyes-vsn
while ps -p $PID > /dev/null; do sleep 1; done;
fi
;;
restart)
#echo "Restarting GreenEyes VSN"
service greeneyes stop
service greeneyes start
;;
*)
echo "Usage: sudo service greeneyes {start|stop|restart}"
exit 1
;;
esac
exit 0
| true
|
cc63ec568665b236d0e19c980ff0857a5632f9cf
|
Shell
|
lemeurherve/mirror-scripts
|
/sync-recent-releases.sh
|
UTF-8
| 1,410
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -o nounset
set -o errexit
set -o pipefail
BASE_DIR=/srv/releases/jenkins
UPDATES_DIR=/var/www/updates.jenkins.io
REMOTE_BASE_DIR=data/
HOST=jenkins@ftp-osl.osuosl.org
SCRIPT_DIR=$PWD
[[ $# -eq 1 ]] || { echo "Usage: $0 <recent-releases.json>" >&2 ; exit 1 ; }
[[ -f "$1" ]] || { echo "$1 is not a file" >&2 ; exit 2 ; }
RECENT_RELEASES_JSON="$1"
echo ">> Update artifacts on get.jenkins.io"
source /srv/releases/.azure-storage-env
RECENT_RELEASES=$( jq --raw-output '.releases[] | .name + "/" + .version' "$RECENT_RELEASES_JSON" )
if [[ -z "$RECENT_RELEASES" ]] ; then
echo "No recent releases"
exit
fi
echo $RECENT_RELEASES
echo
while IFS= read -r release; do
echo "Uploading $release"
blobxfer upload --storage-account "$AZURE_STORAGE_ACCOUNT" --storage-account-key "$AZURE_STORAGE_KEY" --local-path "${BASE_DIR}/plugins/$release" --remote-path mirrorbits/plugins/${release} --recursive --mode file --no-overwrite --exclude 'mvn%20org.apache.maven.plugins:maven-release-plugin:2.5:perform' --file-md5 --skip-on-md5-match --no-progress-bar
ssh -n ${HOST} "mkdir -p jenkins/plugins/${release}"
rsync -avz ${BASE_DIR}/plugins/${release}/ ${HOST}:jenkins/plugins/${release}
echo $(date +%s) > ${BASE_DIR}/TIME
rsync -avz ${BASE_DIR}/TIME ${HOST}:jenkins/TIME
echo "Done uploading $release"
done <<< "${RECENT_RELEASES}"
ssh jenkins@ftp-osl.osuosl.org 'sh trigger-jenkins'
| true
|
b71b27ffcb87231e2ed1187e7b08cd8415c498bc
|
Shell
|
elambert/honeycomb
|
/whitebox/sp/src/scripts/cleanDisks.sh
|
WINDOWS-1252
| 2,840
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# $Id: cleanDisks.sh 10857 2007-05-19 03:01:32Z bberndt $
#
# Copyright 2008, Sun Microsystems, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# # Neither the name of Sun Microsystems, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Script to remove all stored data from cluster, plus
# log files.
#
usage() {
echo "Usage: $0 <num_nodes>"
exit 1
}
if [ $# -ne 1 ] ; then
usage
fi
if [ $1 -ne 16 -a $1 -ne 8 ] ; then
echo num_nodes must be 8 or 16
usage
fi
NUM_NODES=$1
CLEANUP=/tmp/$$.cleanup.sh
#
# make per-node disk scrub script
#
cat > $CLEANUP <<EOF
#!/bin/bash
scrub_data()
{
find /data/\$1/[0-9]* -type f | while read f ; do
/bin/rm \$f
done
/bin/rm -rf /data/\$1/MD_cache/*
}
# disable reboots, so skip
#svcadm disable honeycomb-server
sleep 5
for disk in 0 1 2 3 ; do
scrub_data \$disk &
done
for disk in 0 1 2 3 ; do
wait
done
/bin/rm -rf /hadb /data/0/hadb
# re-disable since stickage happens
#svcadm disable honeycomb-server
#svcadm enable honeycomb-server
#if we want to remove all traces, roll log & remove
#/bin/rm -f /var/adm/messages*
EOF
chmod +x $CLEANUP
c=1
while [ $c -le $NUM_NODES ] ; do
node=hcb`expr 100 + $c`
echo =================== $node
scp $CLEANUP ${node}:/tmp
ssh ${node} $CLEANUP &
c=$(( $c + 1 ))
done
echo WAIT..
c=1
while [ $c -le $NUM_NODES ] ; do
wait
echo node done
c=$(( $c + 1 ))
done
echo CLEANUP DONE
# can we reliably detect errors?
| true
|
11cce1f584ac936d7e43fb6ad234d5eae47e34f5
|
Shell
|
tolson-vkn/dex-k8s-ldap
|
/misc/gencert.sh
|
UTF-8
| 862
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
SSL_DIR=../ssl
mkdir -p $SSL_DIR
cat << EOF > $SSL_DIR/req.cnf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.example.com
DNS.2 = *.kube-auth.svc.cluster.local
EOF
openssl genrsa -out $SSL_DIR/ca-key.pem 2048
openssl req -x509 -new -nodes -key $SSL_DIR/ca-key.pem -days 10 -out $SSL_DIR/ca.pem -subj "/CN=kube-ca"
openssl genrsa -out $SSL_DIR/key.pem 2048
openssl req -new -key $SSL_DIR/key.pem -out $SSL_DIR/csr.pem -subj "/CN=kube-ca" -config $SSL_DIR/req.cnf
openssl x509 -req -in $SSL_DIR/csr.pem -CA $SSL_DIR/ca.pem -CAkey $SSL_DIR/ca-key.pem -CAcreateserial -out $SSL_DIR/cert.pem -days 10 -extensions v3_req -extfile $SSL_DIR/req.cnf
| true
|
bdb2d178588449574b3d10a35301613893108371
|
Shell
|
AlexanderGrooff/dotfiles
|
/scripts/setup_nvim.sh
|
UTF-8
| 450
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
set -x
# Check if AstroNvim is already present
pushd ~/.config/nvim || /bin/true
if ! git remote -v | grep -q Astro; then
# Remove local state
rm -rf ~/.config/nvim ~/.local/share/nvim ~/.local/state/nvim ~/.cache/nvim || /bin/true
cd
# Install AstroNvim
git clone --depth 1 https://github.com/AstroNvim/AstroNvim ~/.config/nvim
fi
popd
# Setup initial AstroNvim config
nvim --headless -c 'quitall'
| true
|
c74bc68fdee6fe26f3cbbcc7c32a35120a8b10f2
|
Shell
|
housir2001/configs-mirror
|
/programs/nix-scripts/use_nix.sh
|
UTF-8
| 4,027
| 4.21875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
use_nix() {
echo "Hello" 3>&1 1>&2 2>&3
if ! validate_version; then
echo "This .envrc requires direnv version 2.18.2 or above."
exit 1
fi
# define all local variables
local shell
local files_to_watch=()
local opt OPTARG OPTIND # define vars used by getopts locally
while getopts ":n:s:w:" opt; do
case "${opt}" in
s)
shell="${OPTARG}"
files_to_watch=("${files_to_watch[@]}" "${shell}")
;;
w)
files_to_watch=("${files_to_watch[@]}" "${OPTARG}")
;;
:)
fail "Invalid option: $OPTARG requires an argument"
;;
\?)
fail "Invalid option: $OPTARG"
;;
esac
done
shift $((OPTIND -1))
local f
for f in "${files_to_watch[@]}"; do
if ! [[ -f "${f}" ]]; then
fail "cannot watch file ${f} because it does not exist"
fi
done
# ====================================================
local path="$(nix-instantiate --find-file nixpkgs)"
if [ -f "${path}/.version-suffix" ]; then
local version="$(< $path/.version-suffix)"
elif [ -f "${path}/.git" ]; then
local version="$(< $(< ${path}/.git/HEAD))"
fi
local cache=".direnv/cache-${version:-unknown}"
local update_drv=0
if [[ ! -e "$cache" ]] || \
[[ "$HOME/.direnvrc" -nt "$cache" ]] || \
[[ .envrc -nt "$cache" ]] || \
[[ default.nix -nt "$cache" ]] || \
[[ shell.nix -nt "$cache" ]];
then
[ -d .direnv ] || mkdir .direnv
local tmp=$(nix-shell --show-trace --pure "$@" \
--run "\"$direnv\" dump bash")
echo "$tmp" > "$cache"
update_drv=1
else
log_status using cached derivation
fi
local term_backup=$TERM path_backup=$PATH
if [ -z ${TMPDIR+x} ]; then
local tmp_backup=$TMPDIR
fi
eval "$(< $cache)"
export PATH=$PATH:$path_backup TERM=$term_backup TMPDIR=$tmp_backup
if [ -z ${tmp_backup+x} ]; then
export TMPDIR=${tmp_backup}
else
unset TMPDIR
fi
# `nix-shell --pure` will invalid ssl certificate paths
if [ "${SSL_CERT_FILE:-}" = /no-cert-file.crt ]; then
unset SSL_CERT_FILE
fi
if [ "${NIX_SSL_CERT_FILE:-}" = /no-cert-file.crt ]; then
unset NIX_SSL_CERT_FILE
fi
# This part is based on https://discourse.nixos.org/t/what-is-the-best-dev-workflow-around-nix-shell/418/4
if [ "$out" ] && (( $update_drv )); then
local drv_link=".direnv/drv"
local drv="$(nix show-derivation $out | grep -E -o -m1 '/nix/store/.*.drv')"
local stripped_pwd=${PWD/\//}
local escaped_pwd=${stripped_pwd//-/--}
local escaped_pwd=${escaped_pwd//\//-}
ln -fs "$drv" "$drv_link"
ln -fs "$PWD/$drv_link" "/nix/var/nix/gcroots/per-user/$LOGNAME/$escaped_pwd"
log_status renewed cache and derivation link
fi
if [[ $# = 0 ]]; then
watch_file default.nix
watch_file shell.nix
# watch all the files we were asked to watch for the environment
for f in "${files_to_watch[@]}"; do
watch_file "${f}"
done
fi
}
fail() {
log_error "${@}"
exit 1
}
hash_contents() {
if has md5sum; then
cat "${@}" | md5sum | cut -c -32
elif has md5; then
cat "${@}" | md5 -q
fi
}
hash_file() {
if has md5sum; then
md5sum "${@}" | cut -c -32
elif has md5; then
md5 -q "${@}"
fi
}
validate_version() {
local version="$("$(command -v direnv)" version)"
local major="$(echo "${version}" | cut -d. -f1)"
local minor="$(echo "${version}" | cut -d. -f2)"
local patch="$(echo "${version}" | cut -d. -f3)"
if [[ "${major}" -gt 2 ]]; then return 0; fi
if [[ "${major}" -eq 2 ]] && [[ "${minor}" -gt 18 ]]; then return 0; fi
if [[ "${major}" -eq 2 ]] && [[ "${minor}" -eq 18 ]] && [[ "${patch}" -ge 2 ]]; then return 0; fi
return 1
}
savedrv () {
if [ "$out" ]
then
drv="$(nix show-derivation "$out" | perl -ne 'if(/"(.*\.drv)"/){print$1;exit}')"
if [ "$drv" ] && [ -e "$drv" ]
then
ln -fs "$drv" .drv
ln -fs "$PWD/.drv" "/nix/var/nix/gcroots/per-user/$LOGNAME/$(basename "$PWD")"
fi
fi
}
use_nix "$@"
| true
|
3651a8172f8db29cdb0fe5c9227d4e8988b07117
|
Shell
|
ayoshimatsu/shellPractice
|
/shellFile/if_cd.sh
|
UTF-8
| 69
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
if cd "$1"; then
echo success
else
echo fail
fi
| true
|
a0e86d764b601858a39123ccf4baac8218dc1a29
|
Shell
|
joshem/EC601
|
/Project1/FinalSprint.sh
|
UTF-8
| 704
| 2.84375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#run twitter api program
./clean.sh
python3 TwitterImages.py
cd TwitterPics/
pwd
myPicture = "TwitPic100.jpg"
#if file doesn't exist
if [ -f ${myPicture} ]; then
#ffmpeg -framerate 1/5 -start_number 1000 -i twitterimage%04d.jpg -c:v libx264 -r 30 -pix_fmt yuv420p out.mp4
#ffmpeg -framerate 1/5 -start_number 7000 -i TwitPic%d.jpg -c:v libx264 -r 30 -pix_fmt yuv420p vid.mp4
#ffmpeg -r 30 -f image2 -s 1920x1080 -start_number 7000 -i TwitPic%03d.jpg -vcodec libx264 -pix_fmt yuv420p test.mp4
ffmpeg -f image2 -framerate 0.5 -y -i TwitPic1%02d.jpg -c:v libx264 -pix_fmt yuv420p out.mp4
cd ..
python3 googleAPI.py
#run Google API program
else
echo "No picture found!"
fi
| true
|
0e2ded845c2cd206739a415b7ba22f5c11798fb4
|
Shell
|
josephcopenhaver/SeniorDesignProject_Spring2010
|
/Sensor/S70sensor
|
UTF-8
| 875
| 3.5625
| 4
|
[] |
no_license
|
#! /bin/sh
#
#This file should be named SXX_
#where XX is a two digit number denoting priority and _ is a descriptive name
#Place me in /etc/init.d to automatically start the sensor program upon booting
#If it doesn't seem to work, try changing the priority
#
### BEGIN INIT INFO
# Provides: Connection to Basestation
# Required-Start:
# Required-Stop:
# Should-Start: glibc
# Default-Start: S
# Default-Stop:
# Short-Description:Tries to connect to the Basestation
# Description:
### END INIT INFO
PATH=/sbin:/bin
do_start () {
/bin/./sensor #change this to the path where the sensor program is
}
case "$1" in
start|"")
do_start
;;
stop|restart|reload|force-reload)
echo "Error: argument '$1' not supported" >&2
exit 3
;;
stop)
#NO-OP
#kill $(pidof sensorsocket)
;;
*)
echo "Usage: sensorsocket.sh [start]" >&2
exit 3
;;
esac
| true
|
a9c45d328073581390f993d75087ea58c2ae867b
|
Shell
|
jasonm23/.zsh.d
|
/modules/gitconfig.zsh
|
UTF-8
| 2,124
| 2.765625
| 3
|
[] |
no_license
|
# General config defaults
git config --global --replace-all core.excludesfile '~/.gitignore_global'
git config --global --replace-all pull.rebase 'true'
git config --global --replace-all push.default 'upstream'
# Let editors deal with whitespace cleanup for now... (To be continued...)
git config --global --unset core.whitespace
git config --global --unset apply.whitespace
git config --global --replace-all color.branch 'auto'
git config --global --replace-all color.diff 'auto'
git config --global --replace-all color.interactive 'auto'
git config --global --replace-all color.status 'auto'
git config --global --replace-all color.ui 'auto'
git config --global --replace-all branch.autosetupmerge 'true'
# Aliases
git config --global --replace-all alias.di 'diff'
git config --global --replace-all alias.co 'checkout'
git config --global --replace-all alias.ci 'commit'
git config --global --replace-all alias.br 'branch'
git config --global --replace-all alias.sta 'stash'
git config --global --replace-all alias.z 'stash'
git config --global --replace-all alias.snapshot '! git stash save "snapshot: $(date)" && git stash apply "stash@{0}"'
git config --global --replace-all alias.st 'status -sb'
git config --global --replace-all alias.llog 'log --date=local'
git config --global --replace-all alias.l1 'log --oneline'
git config --global --replace-all alias.tree 'log --oneline --graph --decorate --all'
git config --global --replace-all alias.ap '! git add --intent-to-add . && git add --patch'
git config --global --replace-all alias.ours '! git checkout --ours $@ && git add $@'
git config --global --replace-all alias.theirs '! git checkout --theirs $@ && git add $@'
# Note any $PATH accessible script called `git-$name` will run as
#
# git $name
#
# You should setup complex git aliases like that.
#
# see ~/.zsh.d/bin/git-{rspec,jasmine,specs} for examples.
#
| true
|
c23838e49bcde0d8aa3a09f0a76138ffa4664112
|
Shell
|
jiangxincode/ShellTest
|
/httpd.sh
|
UTF-8
| 751
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh 定义实用的shell
#
# /etc/rc.d/rc.httpd
#
# Start/stop/restart the Apache web server.
#
# To make Apache start automatically at boot, make this
# file executable: chmod 755 /etc/rc.d/rc.httpd
#
#case结构开始,判断“位置参数”决定执行的操作。本程序携带一个“位置参数”,即$1
case "$1" in
'start') #若位置参数为start
/usr/sbin/apachectl start ;; #启动httpd进程
'stop') #若位置参数为stop
/usr/sbin/apachectl stop ;; #关闭httpd进程
'restart') #若位置参数为stop
/usr/sbin/apachectl restart ;; #重新启动httpd进程
*) #若位置参数不是start、stop或restart时
echo "usage $0 start|stop|restart" ;; #显示命令提示信息:程序的调用方法
esac #case结构结束
| true
|
7f685e9307256161a26557cb9a50526c939f8dfc
|
Shell
|
garner1/gpseq-hic
|
/main.sh
|
UTF-8
| 4,704
| 3.28125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# bash ~/Work/pipelines/aux.scripts/make-windows.sh 1000000 hg19 > hg19.binned.1M.bed
dir=$1 # the directory where data can be found and where gpseq.${resolution}.chr${chr}.bincount is created
resolution=$2 # 1M or 100K
gpseq=$3 # the gpseq bed file in the gpseq directory: ~/Work/dataset/gpseq+Hic/gpseq/BICRO48_TK77_10min_GG__cutsiteLoc-umiCount.transCorrected.bed
chr=$4 # the chromosome: 1, 2, ...
hicfile=$5 # fullpath to hic file
normalization=$6 # balancing of the hic matrix;NONE/VC/VC_SQRT/KR. VC is vanilla coverage, VC_SQRT is square root of vanilla coverage, and KR is Knight-Ruiz or Balanced normalization.
hictype=$7 # observed or oe (see juicer)
name=`echo ${gpseq}|rev|cut -d'/' -f1|rev`
present=$PWD
cd ${dir}
[ ! -d "gpseq" ] && echo "gpseq directory with bed files does not exists!"
mkdir -p gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount
echo "Intersect HiC and GPseq dataset: 4DNFI1E6NJQJ.hic and ${name} ..."
res=`numfmt --from=si ${resolution}`
java -jar ~/tools/juicer/scripts/juicer_tools.jar dump ${hictype} ${normalization} ${hicfile} ${chr} ${chr} BP ${res} chr${chr}_${resolution}.${normalization}.${hictype}.txt # generate HiC matrix
if [ ! -f hg19.binned.${resolution}.bed ]; then
bash ~/Work/pipelines/aux.scripts/make-windows.sh ${res} hg19 > hg19.binned.${resolution}.bed # bin the genome
fi
bedtools intersect -a hg19.binned.${resolution}.bed -b ${gpseq} -wa -wb | grep -w ^chr${chr} |
datamash -s -g 1,2,3 sum 8 | cut -f2,4 | LOCALE=C sort -k1,1 > ${gpseq}.${resolution}.chr${chr}.bincount # bin the gpseq data
mv ${gpseq}.${resolution}.chr${chr}.bincount gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount
LOCALE=C sort -k1,1 -o chr${chr}_${resolution}.${normalization}.${hictype}.txt chr${chr}_${resolution}.${normalization}.${hictype}.txt
LOCALE=C join -o1.1,1.2,1.3,2.1,2.2 chr${chr}_${resolution}.${normalization}.${hictype}.txt gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.bincount |
tr ' ' '\t' > gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.bincount.join-1 # join gpseq and hic row-wise
LOCALE=C sort -k2,2 -o chr${chr}_${resolution}.${normalization}.${hictype}.txt chr${chr}_${resolution}.${normalization}.${hictype}.txt
LOCALE=C join -1 2 -2 1 -o1.1,1.2,1.3,2.1,2.2 chr${chr}_${resolution}.${normalization}.${hictype}.txt gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.bincount |
tr ' ' '\t' > gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.bincount.join-2 # join gpseq and hic col-wise
parallel "sed -i.bak 's/\t/_/' {}" ::: gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.bincount.join-{1,2}
parallel "LOCALE=C sort -k1,1 -o {} {}" ::: gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.bincount.join-{1,2}
LOCALE=C join gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.bincount.join-{1,2} | # joined gpseq and hic
awk -v res=${res} '{print $3/res,$6/res,$2,$4,$7,$2/($4*$7)}' | # rescale rows and cols to resolution units
awk '$1!=$2' | # remove the diagonal
tr ' ' '\t' > gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat
cat gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat | awk '{print $2,$1,$3,$4,$5,$6}' |
tr ' ' '\t' > gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat.transposed # transpose the matrix
cat gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat.transposed gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat > gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat.concatenated # symmetrise the matrix
echo Cleaning ...
mv gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat.concatenated gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat
rm gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/${name}.${resolution}.chr${chr}.dat.transposed
rm gpseq.${resolution}.${normalization}.${hictype}.chr${chr}.bincount/*bincount*
cd ${present}
echo Done!
###########################################
| true
|
b5d40ff215d3c03cde9b459d9b9a9a4652361c50
|
Shell
|
termux/termux-packages
|
/packages/plantuml/build.sh
|
UTF-8
| 793
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
TERMUX_PKG_HOMEPAGE=https://plantuml.com/
TERMUX_PKG_DESCRIPTION="Draws UML diagrams, using a simple and human readable text description"
TERMUX_PKG_LICENSE="GPL-3.0"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=1.2023.7
TERMUX_PKG_SRCURL=https://downloads.sourceforge.net/project/plantuml/${TERMUX_PKG_VERSION}/plantuml-${TERMUX_PKG_VERSION}.tar.gz
TERMUX_PKG_SHA256=806c5726ff1c8ee7c8314880f964bbb07d4dbaf055b73fb80bf76f5e21850019
TERMUX_PKG_DEPENDS="openjdk-17"
TERMUX_PKG_BUILD_DEPENDS="ant"
TERMUX_PKG_PLATFORM_INDEPENDENT=true
TERMUX_PKG_BUILD_IN_SRC=true
termux_step_make() {
sh $TERMUX_PREFIX/bin/ant dist
}
termux_step_make_install() {
mkdir -p $TERMUX_PREFIX/share/java
install -Dm600 plantuml.jar $TERMUX_PREFIX/share/java/
install -Dm700 plantuml $TERMUX_PREFIX/bin/
}
| true
|
1dafe08368b8a89e2dae3ed2cbd4a14187678dda
|
Shell
|
Ametentia/LD43
|
/code/build
|
UTF-8
| 757
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
PROJECT="Ludum"
PROJECT="${PROJECT,,}"
PROJECT="${PROJECT^}"
COMPILER="clang++.exe"
echo "Compiling with \"$COMPILER\"..."
# Source and Executable Options
MainSource="SFML_${PROJECT}.cpp"
# Compilation Options
CompilerFlags="--driver-mode=cl -Wno-switch -Zi -Od /MDd /MTd -imsvc ../libs/SFML/include"
Defines="-D${PROJECT^^}_INTERNAL -D${PROJECT^^}_SLOW -D${PROJECT^^}_WINDOWS=1"
LinkerFlags="/LIBPATH:../libs/SFML/lib sfml-audio-d.lib sfml-graphics-d.lib sfml-window-d.lib sfml-system-d.lib opengl32.lib kernel32.lib"
if [ ! -d "../build" ];
then
mkdir ../build
fi
pushd ../build > /dev/null
$COMPILER $CompilerFlags $Defines ../code/$MainSource /link $LinkerFlags 2>&1 | grep "error\|warning" # | compile
popd > /dev/null
| true
|
1cf3e718d6d11027cda4d4a2bf56083f2dc3613b
|
Shell
|
christiangael/cloud-service-broker
|
/acceptance-tests/azure/cf-test-services.sh
|
UTF-8
| 364
| 3.328125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o pipefail
set -o nounset
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "${SCRIPT_DIR}/../functions.sh"
${SCRIPT_DIR}/cf-test-adhoc-services.sh && ${SCRIPT_DIR}/cf-test-spring-music-service.sh
RESULT=$?
if [ ${RESULT} -eq 0 ]; then
echo "SUCCEEDED: $0"
else
echo "FAILED: $0"
fi
exit ${RESULT}
| true
|
a7012a855dba1a5c426f5d362a7e30fc89580fb2
|
Shell
|
vittyz/robot-docker-chrome-alpine
|
/Example/run_test.sh
|
UTF-8
| 367
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd `dirname $0`
#rm occ_result/*
export round=$1
if [ $round != "" ]
then
$rerun="--rerunfailed /out/output.xml --output output$round.xml"
fi
docker run --rm -t --network=general \
-v "$(pwd)":/tests \
-v "$(pwd)/occ_result":/out \
robot-framework \
-V /tests/conf/test.yaml -e success \
-d /out "tests/test_*.robot"
| true
|
b44c72b031cb686aa5868ee6dd097ddd9a7a8966
|
Shell
|
bameda/prezto
|
/runcoms/zshrc
|
UTF-8
| 2,736
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Customize to your needs...
#
# WINE
#
WINEPREFIX="$HOME/.wine32"
#
# Fasd
#
# eval "$(fasd --init posix-alias zsh-hook zsh-ccomp zsh-ccomp-install zsh-wcomp zsh-wcomp-install)"
fasd_cache="$HOME/.fasd-init-zsh"
if [ "$(command -v fasd)" -nt "$fasd_cache" -o ! -s "$fasd_cache" ]; then
fasd --init posix-alias zsh-hook zsh-ccomp zsh-ccomp-install zsh-wcomp zsh-wcomp-install >| "$fasd_cache"
fi
source "$fasd_cache"
unset fasd_cache
#
# Pyenv
#
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
#
# Rust
#
export PATH="$HOME/.cargo/bin:$PATH"
if [[ -s "$HOME/.cargo/env" ]]; then
source "$HOME/.cargo/env"
fi
#
# Direnv
#
eval "$(direnv hook zsh)"
# venv python prompt
setopt PROMPT_SUBST
show_virtual_env() {
if [[ -n "$VIRTUAL_ENV" && -n "$DIRENV_DIR" ]]; then
echo "($(basename $VIRTUAL_ENV))"
fi
}
PS1='$(show_virtual_env)'$PS1
#
# sdkman
#
#THIS MUST BE AT THE END OF THE FILE FOR SDKMAN TO WORK!!!
export SDKMAN_DIR="/home/bameda/.sdkman"
[[ -s "/home/bameda/.sdkman/bin/sdkman-init.sh" ]] && source "/home/bameda/.sdkman/bin/sdkman-init.sh"
#
# grc
#
#for cmd in g++ gas head make ld ping6 tail traceroute6 $( ls /usr/share/grc/ ); do
# cmd="${cmd##*conf.}"
# type "${cmd}" >/dev/null 2>&1 && alias "${cmd}"="$( which grc ) --colour=auto ${cmd}"
#done
#
# Alias
#
alias diff='diff --color=auto'
alias battery-statu='upower -i /org/freedesktop/UPower/devices/battery_BAT0| grep -E "state|to\ full|percentage"'
alias vi="nvim"
alias vim="nvim"
alias pytest='python -m pytest -s --tb=native'
alias pytest.nomgrs="python -m pytest -s --tb=native --nomigrations"
alias pytest.nowarn="python -m pytest -s --tb=native -p no:warnings"
alias docker-remove-all-containers="docker ps -aq | xargs docker stop && docker ps -aq | xargs docker rm"
alias paca="pikaur"
alias paco="paru"
alias taiga-vpm-start="sudo systemctl start wg-quick@taigadev0.service"
alias taiga-vpm-status="sudo systemctl status wg-quick@taigadev0.service"
alias taiga-vpm-stop="sudo systemctl stop wg-quick@taigadev0.service"
alias kssh="kitty +kitten ssh" # https://sw.kovidgoyal.net/kitty/faq/#i-get-errors-about-the-terminal-being-unknown-or-opening-the-terminal-failing-when-sshing-into-a-different-computer
# This is the end
neofetch
autoload -Uz compinit
zstyle ':completion:*' menu select
fpath+=~/.zfunc
# >>>> Vagrant command completion (start)
fpath=(/opt/vagrant/embedded/gems/2.2.16/gems/vagrant-2.2.16/contrib/zsh $fpath)
compinit
# <<<< Vagrant command completion (end)
| true
|
05085c8c39304404a8ff4d17f0997c53fef24ec4
|
Shell
|
jadanis/seinfind
|
/src/seinfind.sh
|
UTF-8
| 139
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
str="'$*'"
if [ -z "$str" ]; then
echo "Whoops! You didn\'t provide a search string!"
else
python ./seinfind.py "$str"
fi
| true
|
5dad4cb8bfaaa8fa368d7943bcb997b4982c5a76
|
Shell
|
Vinotha16/WIN_ROLLBACK
|
/templates/linux_actualfacts/oracle6/passwdcreation_541_actual.fact
|
UTF-8
| 632
| 2.765625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
a=$(grep "pam_cracklib.so.*retry=3.*minlen=.*dcredit=.*ocredit.*lcredit.*" /etc/pam.d/system-auth | paste -sd ",")
b=$(grep "pam_cracklib.so.*retry=3.*minlen=.*dcredit=.*ocredit.*lcredit.*" /etc/pam.d/password-auth | paste -sd ",")
cmd="${a}","${b}"
if [ $(grep "pam_cracklib.so.*retry=3.*minlen=.*dcredit=.*ocredit.*lcredit.*" /etc/pam.d/password-auth | wc -l) -eq 0 ] || [ $(grep "pam_cracklib.so.*retry=3.*minlen=.*dcredit=.*ocredit.*lcredit.*" /etc/pam.d/system-auth | wc -l) -eq 0 ]; then
echo "{ \"passwdcreation_541_actual\" : \"\" }"
else
echo "{ \"passwdcreation_541_actual\" : \"$cmd\" }"
fi
| true
|
02bfd9ee913be950308d527ee823b8ac529b47d4
|
Shell
|
hojongs/dotfiles
|
/old/paste_to_dotfiles.sh
|
UTF-8
| 288
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
DST=~/dotfiles
SRC=~
for f in .zshrc .vimrc .ideavimrc
do
echo "cp $SRC/$f $DST/"
cp $SRC/$f $DST/
echo
done
echo "cp $SRC/.vim_runtime/my_configs.vim $DST/.vim_runtime/my_configs.vim"
cp $SRC/.vim_runtime/my_configs.vim $DST/.vim_runtime/my_configs.vim
| true
|
224556f252a83bca408b514a997beb8e238be732
|
Shell
|
twooster/dotfiles
|
/pkg/i3/bin/screenlock/locker.sh
|
UTF-8
| 1,256
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
XDG_RUNTIME_DIR="${XDG_RUNTIME_DIR:-$HOME}"
IMG_FORMAT=png
IMG_PATH="${XDG_RUNTIME_DIR}/sleepshot.${IMG_FORMAT}"
INHIBIT_TIME=0.5
LOCKER_CMD=( i3lock --nofork --ignore-empty-password --color=000033 --image="${IMG_PATH}" )
# Run before starting the locker
pre_lock() {
import \
-window root \
-colorspace Gray \
-filter gaussian \
-define filter:blur=12 \
-resize '100%' \
"${IMG_PATH}"
touch "${XDG_RUNTIME_DIR}/screenlock.flag"
}
LOCK_PID=
# Run after the locker exits
post_lock() {
rm -f -- "${XDG_RUNTIME_DIR}/screenlock.flag"
if ! [ -z "$LOCK_PID" ] ; then
kill $LOCK_PID
LOCK_PID=
fi
if [ -e "${IMG_PATH}" ] ; then
rm -f -- "${IMG_PATH}"
fi
}
###############################################################################
pre_lock
trap 'post_lock' EXIT
if [ -e /dev/fd/${XSS_SLEEP_LOCK_FD:--1} ] ; then
# lock fd is open, make sure the locker does not inherit a copy
"${LOCKER_CMD[@]}" ${XSS_SLEEP_LOCK_FD}<&- &
LOCK_PID=$!
sleep "${INHIBIT_TIME}"
# now close our fd (only remaining copy) to indicate we're ready to sleep
eval "exec ${XSS_SLEEP_LOCK_FD}<&-"
else
"${LOCKER_CMD[@]}" &
LOCK_PID=$!
fi
wait "${LOCK_PID}" # for locker to exit
LOCK_PID=
| true
|
f91226bdfc0677dc186678c8c739f7015c04a5da
|
Shell
|
jonghunDB/SciDB
|
/deployment/common/user_access.sh
|
UTF-8
| 2,045
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
#
# BEGIN_COPYRIGHT
#
# Copyright (C) 2008-2018 SciDB, Inc.
# All Rights Reserved.
#
# SciDB is free software: you can redistribute it and/or modify
# it under the terms of the AFFERO GNU General Public License as published by
# the Free Software Foundation.
#
# SciDB is distributed "AS-IS" AND WITHOUT ANY WARRANTY OF ANY KIND,
# INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,
# NON-INFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. See
# the AFFERO GNU General Public License for the complete license terms.
#
# You should have received a copy of the AFFERO GNU General Public License
# along with SciDB. If not, see <http://www.gnu.org/licenses/agpl-3.0.html>
#
# END_COPYRIGHT
#
set -eu
user=${1}
key=${2}
update=0
function add_public_key ()
{
local new_key="${1}"
if [ "0" == `cat ${HOME}/.ssh/authorized_keys | grep "${new_key}" | wc -l || true` ]; then
echo "${new_key}" >> ${HOME}/.ssh/authorized_keys
update=1
fi;
}
# Otherwise, ssh connect would/can ask about the "adding the host to the known host list"
function disable_host_checking ()
{
echo "Host *" >> ~/.ssh/config
echo " StrictHostKeyChecking no" >> ~/.ssh/config
echo " UserKnownHostsFile=/dev/null" >> ~/.ssh/config
}
function selinux_home_ssh ()
{
if [ selinuxenabled ]; then
chcon -R -v -t user_ssh_home_t ~/.ssh || true
fi
}
# Update right to ~/.ssh directory
function update_rights ()
{
disable_host_checking
chmod go-rwx,u+rwx ${HOME}/.ssh
chmod a-x,go-rw,u+rw ${HOME}/.ssh/*
case $(./os_detect.sh) in
"CentOS 6"|"CentOS 7"|"RedHat 6"|"RedHat 7")
selinux_home_ssh
;;
"Ubuntu 14.04")
:
;;
*)
echo "Not a supported OS";
exit 1
;;
esac
}
mkdir -p ${HOME}/.ssh
private=${HOME}/.ssh/id_rsa
public=${HOME}/.ssh/id_rsa.pub
if [[ ("1" != `ls ${private} | wc -l || true`) || ("1" != `ls ${public} | wc -l || true`)]]; then
rm -f ${private}
rm -f ${public}
echo "" | ssh-keygen -t rsa
fi;
add_public_key "${key}"
update_rights
exit 0
| true
|
6289aa3c0880bcfc9029b78e7ce6678c2dd48bad
|
Shell
|
meatcar/vagrant-phenomecentral
|
/setup.sh
|
UTF-8
| 1,512
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set -o pipefail
#########################
# Create VM
#
# if no virtualbox, just run vagrant up
if [ ! -x `which VBoxManage` ]; then
vagrant up
exit $?
fi
# The rest of this script is for VirtualBox
vagrant up --no-provision
## GuestAdditions require bzip2
vagrant ssh -c "sudo yum install -y -q bzip2"
vagrant vbguest
vagrant halt
#########################
# Resize VM's Drive
#
VM=`VBoxManage list vms | grep phenomecentral | cut -f1 -d ' ' | tr -d '"'`
pushd "$HOME/VirtualBox VMs/$VM"
DISK=`ls | grep vmdk | sed 's/.vmdk$//'`
echo "Cloning drive..."
VBoxManage clonemedium disk "$DISK".vmdk "$DISK".vdi --format VDI
echo "Growing new drive..."
VBoxManage modifyhd "$DISK".vdi --resize 50000
echo "Unmounting old drive..."
VBoxManage storageattach $VM --storagectl 'IDE Controller' --port 0 --device 0 --medium none
echo "Mounting new drive..."
VBoxManage storageattach $VM --storagectl 'IDE Controller' --port 0 --device 0 --medium "$DISK".vdi --type hdd
echo "Removing old drive..."
rm -rf "$DISK".vmdk
popd
###############################################
# Resize VM's Partition to encompass new space.
#
# make partition in the remaining space with flag 8e
vagrant ssh -c "sudo /home/vagrant/sync/makepart.sh"
vagrant ssh -c "sudo partprobe"
vagrant ssh -c "sudo pvcreate /dev/sda5"
vagrant ssh -c "sudo vgextend VolGroup00 /dev/sda5"
vagrant ssh -c "sudo lvextend /dev/VolGroup00/LogVol00 /dev/sda5"
vagrant ssh -c "sudo resize2fs /dev/mapper/VolGroup00-LogVol00"
| true
|
fb88444a4e05e39175ceec66559bfc934133742f
|
Shell
|
aerokube/moon-website-ru
|
/ci/docs.sh
|
UTF-8
| 442
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
TAG=$1
WD=$(pwd)
SOURCE_DIR=${WD}/docs
OUTPUT_DIR=${WD}/dist/docs
echo "Removing existing files"
mkdir -p ${OUTPUT_DIR}
rm -Rf ${OUTPUT_DIR}/*.pdf
echo "Generating docs"
set -x
cd "$SOURCE_DIR" && ls *.adoc | while read file; do
docker run --rm --pull always -v "$OUTPUT_DIR:/output" -v "$SOURCE_DIR:/source" asciidoctor/docker-asciidoctor:1 asciidoctor-pdf -a "revnumber=$TAG" -D /output "/source/$file"
done
| true
|
184d88b5adc0a08f25aa9e5db55e46d4ae5eb049
|
Shell
|
boulangg/phoenix
|
/kernel/scripts/initrd.s.sh
|
UTF-8
| 382
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Arguments: disk_file_name, output_file
rm $2 2> /dev/null
touch $2
printf "/\* This file is generated \*/\n" >> $2
printf ".section .rawdata\n" >> $2
printf "\n" >> $2
printf ".align 4096\n" >> $2
printf ".globl _initrd_start\n" >> $2
printf "_initrd_start:\n" >> $2
printf ".incbin \"$1\"\n" >> $2
printf ".globl _initrd_end\n" >> $2
printf "_initrd_end:\n" >> $2
| true
|
70005377088a6b93f8414b21461c682c5d62c314
|
Shell
|
aventador917/CPSC-240-X86-Assembly-Programming
|
/Assignment#4/run.sh
|
UTF-8
| 796
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
rm *.o
rm *.out
#echo "This is program <Passing Array>"
#echo "Assemble the module Control.asm"
nasm -f elf64 -l Control.lis -o Control.o Control.asm
#echo "Assemble the module Sum.asm"
nasm -f elf64 -l Sum.lis -o Sum.o Sum.asm
#echo "Compile the C++ module array_main.cpp"
g++ -c -m64 -Wall -o array_main.o array_main.cpp -fno-pie -no-pie -std=c++14
#echo "Compile the C++ module Display.cpp"
gcc -c -m64 -Wall -o Display.o Display.c -fno-pie -no-pie -std=c11
#echo "Compile the C++ module Fill.cpp"
g++ -c -m64 -Wall -o Fill.o Fill.cpp -fno-pie -no-pie -std=c++14
#echo "Link all object files already created"
g++ -m64 -o array_main.out array_main.o Display.o Fill.o Control.o Sum.o -fno-pie -no-pie -std=c++14
#echo "The bash script file is now closing."
./array_main.out
| true
|
22cf5f7269b1042280e1068d109a916517d5de55
|
Shell
|
memozhu/tf_CFO
|
/data_preprocess/fetch.sh
|
UTF-8
| 719
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# download dataset and put it in data directory
cd ..
mkdir data
mkdir raw_data
cd ./raw_data
printf "\nDownloading SimpleQuestions dataset...\n"
wget https://www.dropbox.com/s/tohrsllcfy7rch4/SimpleQuestions_v2.tgz
printf "\nUnzipping SimpleQuestions dataset...\n"
tar -xvzf SimpleQuestions_v2.tgz
rm SimpleQuestions_v2.tgz
cd ./SimpleQuestions_v2
mkdir dataset
mv annotated*.txt ./dataset
cd ..
printf "\nDownloading FB5M-extra...\n"
wget https://www.dropbox.com/s/dt4i1a1wayks43n/FB5M-extra.tar.gz
echo "\nUnzipping FB5M-extra...\n"
tar -xzf FB5M-extra.tar.gz
rm FB5M-extra.tar.gz
mkdir FB5M-extra
mv FB5M.en-name.txt FB5M-extra/
mv FB5M.name.txt FB5M-extra/
mv FB5M.type.txt FB5M-extra/
| true
|
dfc6e4d98d2df0120f9c2bbff5a70c954c28efc6
|
Shell
|
samir2901/Shell-Scripting
|
/conditional.sh
|
UTF-8
| 797
| 3.484375
| 3
|
[] |
no_license
|
#! /usr/bin/bash
#COMAPARISON OPERATORS
# Number
# -eq -> is equal to --> if ["$a" -eq "$b"]
# -ne -> is not equal to --> if ["$a" -ne "$b"]
# -gt -> is greater than --> if ["$a" -gt "$b"]
# -ge -> is greater than or equal to --> if ["$a" -ge "$b"]
# -lt -> is less than --> if ["$a" -lt "$b"]
# -le -> is less than or equal to --> if ["$a" -le "$b"]
# <, >= >, <=
#String
# = -> is equal to
# == -> is equal to
# != -> is not equal to
# < -> is greater than (ASCII alphabetical order)
# > -> less than
# -z -> string is null, has zero length
count=19
if [ $count > 10 ]
then
echo The number is $count
fi
word="abc"
if [ $word == "abc" ]
then
echo This is true
fi
count=19
if [ $count > 10 ]
then
echo IF
elif [[ $count < 5 ]]
then
echo ELIF
else
echo ELSE
fi
| true
|
1a4e8a26258461609d43603fb213c8a4c7576636
|
Shell
|
todd-dsm/process-ph2
|
/scripts/common/sshd.sh
|
UTF-8
| 704
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eux
export DEBIAN_FRONTEND=noninteractive
adminHome='/home/admin'
backupDir="$adminHome/backup"
sshdConf="/etc/ssh/sshd_config"
### Backup original configuration file
cp -pv "$sshdConf" "$backupDir"
# ensure that there is a trailing newline before attempting to concatenate
sed -i -e '$a\' "$sshdConf"
USEDNS="UseDNS no"
if grep -q -E "^[[:space:]]*UseDNS" "$sshdConf"; then
sed -i "s/^\s*UseDNS.*/${USEDNS}/" "$sshdConf"
else
echo "$USEDNS" >>"$sshdConf"
fi
GSSAPI="GSSAPIAuthentication no"
if grep -q -E "^[[:space:]]*GSSAPIAuthentication" "$sshdConf"; then
sed -i "s/^\s*GSSAPIAuthentication.*/${GSSAPI}/" "$sshdConf"
else
echo "$GSSAPI" >>"$sshdConf"
fi
| true
|
d738663b4a992ed0c12b4651af22f23fa25b897a
|
Shell
|
olivierh59500/Noriben
|
/NoribenSandbox.sh
|
UTF-8
| 1,569
| 3.703125
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#Noriben Sandbox Automation Script
#Responsible for:
#* Copying malware into a known VM
#* Running malware sample
#* Copying off results
#
#Ensure you set the environment variables below to match your system
if [ ! -f $1 ]; then
echo "Please provide executable filename as an argument."
echo "For example:"
echo "$0 ~/malware/ef8188aa1dfa2ab07af527bab6c8baf7"
exit
fi
DELAY=10
MALWAREFILE=$1
VMRUN="/Applications/VMware Fusion.app/Contents/Library/vmrun"
VMX="/Users/bbaskin/VMs/RSA Victim.vmwarevm/Windows XP Professional.vmx"
VM_SNAPSHOT="Baseline"
VM_USER=Administrator
VM_PASS=password
FILENAME=$(basename $MALWAREFILE)
NORIBEN_PATH="C:\\Documents and Settings\\$VM_USER\\Desktop\\Noriben.py"
ZIP_PATH=C:\\Tools\\zip.exe
LOG_PATH=C:\\Noriben_Logs
"$VMRUN" -T ws revertToSnapshot "$VMX" $VM_SNAPSHOT
"$VMRUN" -T ws start "$VMX"
"$VMRUN" -gu $VM_USER -gp $VM_PASS copyFileFromHostToGuest "$VMX" "$MALWAREFILE" C:\\Malware\\malware.exe
"$VMRUN" -T ws -gu $VM_USER -gp $VM_PASS runProgramInGuest "$VMX" C:\\Python27\\Python.exe "$NORIBEN_PATH" -d -t $DELAY --cmd "C:\\Malware\\Malware.exe" --output "$LOG_PATH"
if [ $? -gt 0 ]; then
echo "[!] File did not execute in VM correctly."
exit
fi
"$VMRUN" -T ws -gu $VM_USER -gp $VM_PASS runProgramInGuest "$VMX" "$ZIP_PATH" -j C:\\NoribenReports.zip "$LOG_PATH\\*.*"
if [ $? -eq 12 ]; then
echo "[!] ERROR: No files found in Noriben output folder to ZIP."
exit
fi
"$VMRUN" -gu $VM_USER -gp $VM_PASS copyFileFromGuestToHost "$VMX" C:\\NoribenReports.zip $PWD/NoribenReports_$FILENAME.zip
| true
|
5f23a503f14f84d9097dbc495209e1b215446ca2
|
Shell
|
rafaelsoaresbr/pkgbuild
|
/mate/libmateweather/PKGBUILD
|
UTF-8
| 865
| 2.703125
| 3
|
[] |
no_license
|
# Maintainer : Martin Wimpress <code@flexion.org>
_ver=1.12
pkgbase=libmateweather
pkgname=${pkgbase}-gtk3
pkgver=${_ver}.1
pkgrel=1
pkgdesc="Provides access to weather information from the Internet. (GTK3 version [EXPERIMENTAL])"
url="http://mate-desktop.org"
arch=('i686' 'x86_64')
license=('LGPL')
depends=('dconf' 'gtk3' 'libsoup')
makedepends=('mate-common')
source=("http://pub.mate-desktop.org/releases/${_ver}/${pkgname}-${pkgver}.tar.xz")
sha1sums=('dfd58b8fa7ec93e04773f61006b34b21a08e66d2')
install=${pkgbase}.install
build() {
cd "${srcdir}/${pkgbase}"
./configure \
--prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
--with-gtk=3.0 \
--disable-static \
--disable-python \
--enable-locations-compression
make
}
package() {
cd "${srcdir}/${pkgbase}"
make DESTDIR="${pkgdir}" install
}
| true
|
d26edf799d0aa6832cacea00fc5164ee9fdf5139
|
Shell
|
BuzaL/boost_deploy
|
/deploy_all_sources.sh
|
UTF-8
| 823
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
source ensure_variables.sh
cd $boost_path
if [ ! -d "${library}" ]; then (>&2 echo "$library not found"); exit; fi
# Copy library sources
>&2 echo "Deploying all! Please wait..."
find ${library} -maxdepth 2 -type d -path '*/src' -exec mkdir -p "${output}/{}" \; -exec cp -r {} "${output}/{}/.." \;
# Copy boost sources (copy whole folder + remove unwanted files)
## tar faster than simple copy
tar c ${source} | tar x -C ${output}
#cp -ar ${source} ${output}
find ${source} -type f \( ! \( -regex '.*\.\(cpp\|hpp\|h\|c\|ipp\)' -or ! -regex '.*\..*' \) \) -printf "${output}/%p\n" | xargs rm
## Whole copy + remove files faster than copy 1-by-1 more than 10k files:
#find ${source} -type f \( -regex '.*\.\(cpp\|hpp\|h\|c\|ipp\)' -or ! -regex '.*\..*' \) | xargs cp -a --parents -t ${output}
cd ->/dev/null
| true
|
66c4b196084278b1c0155b42ceb76afb20174a30
|
Shell
|
forresti/dnn_dsx
|
/finetune_datasets/finetune_nets.sh
|
UTF-8
| 383
| 2.953125
| 3
|
[] |
no_license
|
#do these one at a time for now.
for d in /nscratch/forresti/dsx_backup/nets_backup_1-9-15_LOGO/*
do
echo $d
cd $d
now=`date +%a_%Y_%m_%d__%H_%M_%S`
newestWeights=`ls -t $d/*caffemodel | head -1` #thx: stackoverflow.com/questions/5885934
$CAFFE_ROOT/build/tools/caffe train -solver=solver.prototxt -weights=$newestWeights -gpu=0 > finetune_$now.log 2>&1
done
| true
|
5cddc53b3fec7b20738d8fb1e81d2107f5308700
|
Shell
|
kdjay517/EmployeeWageCalculation
|
/empWage.sh
|
UTF-8
| 1,106
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash -x
IS_FULL_TIME=1;
IS_PART_TIME=2;
MAX_HRS_IN_MONTH=100;
EMP_RATE_PER_HR=20;
NUM_WORKING_DAYS=20;
totalEmpHrs=0;
totalWorkingDays=0;
declare -A dailyWage;
function getWorkHrs(){
local empCheck=$1;
case $empCheck in
$IS_PART_TIME )
workHrs=4;
;;
$IS_FULL_TIME )
workHrs=8;
;;
* )
workHrs=0;
;;
esac
echo $workHrs
}
function getEmpWage(){
local empHr=$1;
echo $(($empHr*$EMP_RATE_PER_HR))
}
while [ $totalEmpHrs -lt $MAX_HRS_IN_MONTH -a $totalWorkingDays -lt $NUM_WORKING_DAYS ]
do
((totalWorkingDays++))
empCheck=$((RANDOM%3))
empHrs="$(getWorkHrs $empCheck)"
totalEmpHrs=$(($totalEmpHrs+$empHrs))
dailyWage["Day" $totalWorkingDays]="$( getEmpWage $empHrs )"
done
totalSalary=$(($totalEmpHrs*$EMP_RATE_PER_HR));
echo ${dailyWage[@]}
echo ${!dailyWage[@]}
| true
|
58f49db5c73ad04e82d1ea52f7289029d13ab6de
|
Shell
|
c-paras/shpy
|
/demo04.sh
|
UTF-8
| 474
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
#This script simulates a user login form
#obtains user details
echo Enter username:
read username
echo Enter password:
read password
#authenticates user
if [ $username == "hello" ] && [ $password == "world" ]
then
#proceeds if user authentication succeeded
echo "You have been successfully authenticated"
exit 0
else
#exits with error status if authentication was unsuccessful
echo "Authentication failed"
echo "Wrong username or password"
exit 1
fi
| true
|
4b0582ce64854b236c7a8ac420aa38b8c95a7921
|
Shell
|
vavdoshka/gitpod-eks-guide
|
/ami/scripts/ubuntu2004/boilerplate.sh
|
UTF-8
| 699
| 2.8125
| 3
|
[
"MIT-0",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -o pipefail
set -o nounset
set -o errexit
# shellcheck disable=SC1091
source /etc/packer/files/functions.sh
# wait for cloud-init to finish
wait_for_cloudinit
# upgrade the operating system
apt-get update -y && apt-get upgrade -y
# install dependencies
apt-get install -y \
ca-certificates \
curl \
auditd \
parted \
unzip \
lsb-release
install_jq
# enable audit log
systemctl enable auditd && systemctl start auditd
# enable the /etc/environment
configure_http_proxy
# install aws cli
install_awscliv2
# install ssm agent
install_ssmagent
# partition the disks
systemctl stop rsyslog irqbalance polkit
partition_disks /dev/nvme1n1
reboot
| true
|
e864278691bbbd2fed42ebcfe65d0442b7cd5a01
|
Shell
|
Tsar/tv_tuner_management
|
/tv_tuner_installer.sh
|
UTF-8
| 982
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
# This script installs soft to work with AverMedia MCE116 Plus (for XUbuntu 11.10 and others)
# Run as root or with "sudo"
apt-get install ivtv-utils vlc
rmmod ivtv
modprobe ivtv
wget http://www.steventoth.net/linux/xc5000/HVR-12x0-14x0-17x0_1_25_25271_WHQL.zip #if this URL doesn't work, comment this line and uncomment next
#wget https://raw.github.com/Tsar/tv_tuner_management/master/files_for_installer/HVR-12x0-14x0-17x0_1_25_25271_WHQL.zip
unzip -j HVR-12x0-14x0-17x0_1_25_25271_WHQL.zip Driver85/hcw85bda.sys
wget http://linuxtv.org/hg/v4l-dvb/raw-file/3919b17dc88e/linux/Documentation/video4linux/extract_xc3028.pl #if this URL doesn't work, comment this line and uncomment next
#wget https://raw.github.com/Tsar/tv_tuner_management/master/files_for_installer/extract_xc3028.pl
perl extract_xc3028.pl
cp xc3028-v27.fw /lib/firmware/xc3028-v27.fw
echo "Now you should run TV tuner in Windows once; than reboot back and use \"tv_tuner_channels_switcher.sh\""
| true
|
302eacec8598a2327fad6b97773be1e8c1d54405
|
Shell
|
MichaelTong/bin
|
/mgreport.d/work_report
|
UTF-8
| 1,192
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# Serve mgreport
# Should not be called directly
if [[ -z $MGREPORT ]]; then
echo "This script needs to call within \"mgreport\""
exit -2
fi
# script starts here
regrex='^([0-9]{4})\.([0-9]{2})_([0-9])'
report_to_work_on=""
if [[ $# -lt 1 ]]; then
cd $REPORT_HOME
reports=($(ls -d *.*_*))
report_to_work_on=${reports[-1]}
echo "Working on most recent report: $report_to_work_on"
else
if [[ $1 =~ $regrex ]]; then
year=${BASH_REMATCH[1]}
month=${BASH_REMATCH[2]}
number=${BASH_REMATCH[3]}
report_to_work_on=$year.${month}_$number
else
echo "Error: wrong report name format"
print_usage
exit -1
fi
fi
if ! [[ -e $REPORT_HOME/$report_to_work_on ]]; then
echo "Report '$report_to_work_on' doesn't exist"
exit -3
fi
echo "Opening $report_to_work_on in file explorer"
cd $REPORT_HOME/$report_to_work_on
if ! [[ -z $FILEEXPLORER ]]; then
$FILEEXPLORER .
else
echo "\$FILEEXPLORER not defined, skip opening directory in file explorer"
fi
if [[ -z $EDITOR ]]; then
echo "Error: \$EDITOR not defined"
else
echo "Opening $report_to_work_on in $EDITOR"
$EDITOR .
fi
| true
|
3fee5297a1024a72d397d9872a08414b2f6c80fa
|
Shell
|
rajdeeppinge/Systems_Software_Labs
|
/Lab_9/environmentVar.sh
|
UTF-8
| 416
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Path command: $PATH"
echo "Hello $USER!"
echo "Your current working directory is: $PWD"
echo "Your home directory is: $HOME"
echo "Shell command: $SHELL"
# Additional environment variables
echo "arg 0: $0"
echo "arg 1: $1"
echo "arg 2: $2"
echo "arg 3: $3"
echo "arg 4: $4"
# Use of $* and $#
echo "There are $# arguments to the script"
echo "Arguments not including the name of the script: $*"
| true
|
1363b8f2a9268046b76230c3db52900f80c3d586
|
Shell
|
ali5ter/vmware_scripts
|
/vke/vke_deploy_wordpress
|
UTF-8
| 3,202
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# @file vke_deploy_wordpress
# Deploy a helm based app to K8s cluster backing a VKE Smart Cluster
# @author Alister Lewis-Bowen <alister@lewis-bowen.org>
type helm &> /dev/null || {
echo 'Please install helm. Installation instructions are available from'
echo 'https://docs.helm.sh/using_helm/#installing-helm'
echo 'If you use homebrew on macOS, install using:'
echo 'brew install kubernetes-helm'
exit 1
}
source vke_env.sh
# select a smart cluster and generate kube-config file -----------------------
vke_cluster_auth
# install/upgrade the K8s agent that helm talks to -----------------------------------
heading 'Install/upgrade helm agent (tiller) on the K8s cluster'
helm init --upgrade --wait
# deploy a helm chart for wordpress ------------------------------------------
heading 'Deploy chart for wordpress and wait for service to be externally available'
_prefix=$(echo "${VKE_DEPLOYMENT_PREFIX}-wp-$(date '+%s')" | tr '[:upper:]' '[:lower:]')
_doc="$PWD/$_prefix.txt"
echo -e "Chart documentation will be written to \n$_doc"
echo -n 'Deploying chart... '
helm install --name "$_prefix" stable/wordpress > "$_doc"
echo 'done'
echo -n 'Wait for the LB external IP to be assigned...'
kubectl get svc --namespace default -w "${_prefix}-wordpress" > /dev/null
echo 'done'
# test the deployment by using wordpress -------------------------------------
heading 'Show browsable URL to the wordpress site and kube dashboard'
## Unable to get the external IP using the following technique documented by
## this wordpress chart
##_ip=$(kubectl get svc --namespace default "${_prefix}-wordpress" -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
get_fqdn() { kubectl get svc --namespace default "${_prefix}-wordpress" -o jsonpath='{.status.loadBalancer.ingress[0].hostname}'; }
echo -n "Waiting for ingress hostname..."
_fqdn=''
while [ "$_fqdn" == "" ]; do
sleep 10; _fqdn=$(get_fqdn); echo -n '.'
done
echo 'done'
_url="http://${_fqdn}/admin"
echo -e "\nOpen the Wordpress admin UI using\n$_url"
[[ "$OSTYPE" == "darwin"* ]] && open "$_url"
echo -e "\nRefresh the webpage at this URL until the weberver responds."
_password=$(kubectl get secret --namespace default "${_prefix}-wordpress" -o jsonpath="{.data.wordpress-password}" | base64 --decode)
echo "Log in using credentials (user/$_password)"
# open kube dashboard --------------------------------------------------------
_fqdn=$(kubectl cluster-info | grep master | tr -d '[:cntrl:]' | sed 's/^.*https:\/\/api.\(.*\):443.*$/\1/')
_url="https://ui.$_fqdn/"
echo -e "\nOpen the Kube Dashboard using\n$_url"
[[ "$OSTYPE" == "darwin"* ]] && open "$_url"
# clean up -------------------------------------------------------------------
heading "Remove existing K8s deployments starting with $VKE_DEPLOYMENT_PREFIX"
read -p "Shall I clear out all existing deployments now? [y/N] " -n 1 -r
echo
[[ $REPLY =~ ^[Yy]$ ]] && {
for deployment in $(helm ls | grep "$VKE_DEPLOYMENT_PREFIX" | awk '{print $1}'); do
read -p "Delete $deployment? [y/N] " -n 1 -r
echo
[[ $REPLY =~ ^[Yy]$ ]] && {
helm del --purge "$deployment"
[[ -f "$_doc" ]] && rm "$_doc"
}
done
}
| true
|
517328cdafe49eeef079557e6bf32f9e6ed92901
|
Shell
|
kshiva1126/eqdock
|
/bin/eqdock
|
UTF-8
| 1,642
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
CR=`echo $'\n> '`
FILE_DIR="$0"
EQDOCK_ROOT_DIR="${FILE_DIR%/*}/../"
echo "Hello eqdock!"
. "${EQDOCK_ROOT_DIR}bin/helperDocker.lib"
checkDockerMachineActive
. "${EQDOCK_ROOT_DIR}bin/helperECCUBE.lib"
ECCUBE_VERSION=`checkFromTheBeginningOrTheMiddle`
. "${EQDOCK_ROOT_DIR}bin/helperDB.lib"
. "${EQDOCK_ROOT_DIR}bin/helperPHP.lib"
ECCUBE_VERSION="4.0.3"
while :
do
DB_KIND=`setDBKind`
DB_VERSION=`setDBVersion "${DB_KIND}"`
DB_NAME=`setDBName`
DB_USER=`setDBUser`
DB_PASSWD=`setDBPasswd`
if [ "_${DB_KIND}" = "_mysql" ]; then
DB_ROOT_PASSWD=`setDBRootPasswd`
echo "$DB_ROOT_PASSWD"
fi
PHP_VERSION=`setPHPVersion "${ECCUBE_VERSION}"`
MSG="この内容で環境構築を行います。
よろしいでしょうか? y/n
DB : ${DB_KIND}
DBバージョン : ${DB_VERSION}
DB名 : ${DB_NAME}
DBユーザ : ${DB_USER}
DBパスワード : ${DB_PASSWD}"
case "$DB_KIND" in
"mysql" )
MSG="${MSG}
DBrootパスワード : ${DB_ROOT_PASSWD}
PHPバージョン : ${PHP_VERSION}"
;;
"postgres" )
MSG="${MSG}
PHPバージョン : ${PHP_VERSION}"
;;
esac
read -ep "${MSG} ${CR}" IS_OK
case "$IS_OK" in
"y" | "yes" | "Y" | "YES" )
break
;;
* )
;;
esac
done
# git clone終了を待つ
wait
createEccubeEnv "$DB_KIND" "$DB_VERSION" "$DB_NAME" "$DB_USER" "$DB_PASSWD"
copyDockerMaterial "$DB_KIND"
checkInitial "$DB_KIND"
rewriteDockerfile "$DB_KIND" "$DB_VERSION" "$PHP_VERSION"
rewriteDockerCompose "$DB_KIND" "$DB_NAME" "$DB_USER" "$DB_PASSWD" "$DB_ROOT_PASSWD"
upDockerCompose
| true
|
30a6b95cfb9f15ad20c7686556364607576ddc12
|
Shell
|
evanshortiss/shipwars-deployment
|
/openshift/deploy.kafka-streams.sh
|
UTF-8
| 3,487
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
NAMESPACE=${NAMESPACE:-shipwars}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
if ! command -v oc &> /dev/null
then
echo "Please install oc CLI to use this script"
exit
fi
if ! command -v rhoas &> /dev/null
then
echo "Please install rhoas CLI to use this script"
exit
fi
if ! command -v jq &> /dev/null
then
echo "Please install jq CLI to use this script"
exit
fi
oc project $NAMESPACE > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Using project $NAMESPACE on cluster: $(oc whoami --show-server)"
else
echo "Failed to select $NAMESPACE project using the 'oc project' command."
echo "Did you forget to set the NAMESPACE variable, or have not run deploy.sh yet?"
exit 1
fi
# Check that the bind script has been run
oc get kafkaconnection
if [ $? -eq 1 ]; then
echo "No kafkaconnection CustomResource was found in the $NAMESPACE project."
echo "Please run deploy.kafka-bind.sh and retry this script."
exit 1
fi
rhoas login > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Logged in to OpenShift Streams for Apache Kafka."
else
echo "Login failed for OpenShift Streams for Apache Kafka."
exit 1
fi
KAFKA_COUNT=$(rhoas kafka list -o json | jq '.total')
if [ "0" == "$KAFKA_COUNT" ]; then
echo "Please create a Kafka instance using 'rhoas kafka create' and retry this script"
fi
# Force user to choose a the kafka instance
rhoas kafka use
KAFKA_BOOTSTRAP_SERVERS=$(rhoas kafka describe | jq .bootstrap_server_host -r)
echo "Deploy Quarkus Kafka Streams applications and visualisation UI"
# Deploy the enricher Kafka Streams application. This performs a join between
# a shot and player record to create a more informative enriched shot record
oc process -f "${DIR}/shipwars-streams-shot-enricher.yml" \
-p NAMESPACE="${NAMESPACE}" \
-p KAFKA_BOOTSTRAP_SERVERS="${KAFKA_BOOTSTRAP_SERVERS}" | oc create -f -
# An aggregator implemented in Kafka Streams that tracks how often a given
# cell coordinate is hit by players (AI and human) thereby capturing a shot
# distribution. This also exposes an HTTP server sent events endpoint that can
# stream shots to clients in realtime
oc process -f "${DIR}/shipwars-streams-shot-distribution.yml" \
-p NAMESPACE="${NAMESPACE}" \
-p KAFKA_BOOTSTRAP_SERVERS="${KAFKA_BOOTSTRAP_SERVERS}" | oc create -f -
# Another aggregator that tracks the shots fired for each game in series.
# This can be used to replay games and/or write them somewhere for long term
# permanent storage, e.g S3 buckets or DB
oc process -f "${DIR}/shipwars-streams-match-aggregates.yml" \
-p NAMESPACE="${NAMESPACE}" \
-p KAFKA_BOOTSTRAP_SERVERS="${KAFKA_BOOTSTRAP_SERVERS}" | oc create -f -
# Deploy the replay UI, and instruct it to use the internal match aggregates
# Quarkus/Kafka Streams application API
oc new-app quay.io/evanshortiss/shipwars-replay-ui \
-l "app.kubernetes.io/part-of=shipwars-analysis" \
-e REPLAY_SERVER="http://shipwars-streams-match-aggregates:8080" \
--name shipwars-replay
oc expose svc shipwars-replay
# A UI to see shots play out in realtime.
oc new-app quay.io/evanshortiss/s2i-nodejs-nginx~https://github.com/evanshortiss/shipwars-visualisations \
--name shipwars-visualisations \
-l "app.kubernetes.io/part-of=shipwars-analysis,app.openshift.io/runtime=nginx" \
--build-env STREAMS_API_URL="https://$(oc get route shipwars-streams-shot-distribution -o jsonpath='{.spec.host}')/shot-distribution/stream"
oc expose svc shipwars-visualisations
| true
|
b2ad222fe03478321106b2b8237a23d9ff22f78c
|
Shell
|
SofyaTavrovskaya/disk_perf_test_tool
|
/scripts/run_test.sh
|
UTF-8
| 3,938
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
function get_arguments() {
export FUEL_MASTER_IP=$1
if [ -z "${FUEL_MASTER_IP}" ]; then echo "Fuel master node ip is not provided"; fi
export EXTERNAL_IP=$2
if [ -z "${EXTERNAL_IP}" ]; then echo "Fuel external ip is not provided"; fi
export KEY_FILE_NAME=$3
if [ -z "${KEY_FILE_NAME}" ]; then echo "Key file name is not provided"; fi
export FILE_TO_TEST=$4
if [ -z "${KEY_FILE_NAME}" ]; then echo "Key file name is not provided"; fi
if [ ! -f $KEY_FILE_NAME ];
then
echo "File $KEY_FILE_NAME does not exist."
fi
export RESULT_FILE=$5
if [ -z "${RESULT_FILE}" ]; then echo "Result file name is not provided"; fi
export FUEL_MASTER_PASSWD=${6:-test37}
export TIMEOUT=${7:-360}
echo "Fuel master IP: $FUEL_MASTER_IP"
echo "Fuel master password: $FUEL_MASTER_PASSWD"
echo "External IP: $EXTERNAL_IP"
echo "Key file name: $KEY_FILE_NAME"
echo "Timeout: $TIMEOUT"
}
# note : function will works properly only when image dame is single string without spaces that can brake awk
function wait_image_active() {
image_state="none"
image_name="$IMAGE_NAME"
counter=0
while [ ["$image_state" == "active"] ] ; do
sleep 1
image_state=$(glance image-list | grep "$image_name" | awk '{print $12}')
echo $image_state
counter=$((counter + 1))
if [ "$counter" -eq "$TIMEOUT" ]
then
echo "Time limit exceed"
break
fi
done
}
function wait_floating_ip() {
floating_ip="|"
vm_name=$VM_NAME
counter=0
while [ "$floating_ip" != "|" ] ; do
sleep 1
floating_ip=$(nova floating-ip-list | grep "$vm_name" | awk '{print $13}' | head -1)
counter=$((counter + 1))
if [ $counter -eq $TIMEOUT ]
then
echo "Time limit exceed"
break
fi
done
}
function wait_vm_deleted() {
vm_name=$(nova list| grep "$VM_NAME"| awk '{print $4}'| head -1)
counter=0
while [ ! -z $vm_name ] ; do
sleep 1
vm_name=$(nova list| grep "$VM_NAME"| awk '{print $4}'| head -1)
counter=$((counter + 1))
if [ "$counter" -eq $TIMEOUT ]
then
echo "Time limit exceed"
break
fi
done
}
function get_floating_ip() {
IP=$(nova floating-ip-list | grep "$FLOATING_NET" | awk '{if ($5 == "-") print $2}' | head -n1)
if [ -z "$IP" ]; then # fix net name
IP=$(nova floating-ip-create "$FLOATING_NET"| awk '{print $2}')
if [ -z "$list" ]; then
echo "Cannot allocate new floating ip"
# exit
fi
fi
echo $FLOATING_NET
export VM_IP=$IP
echo "VM_IP: $VM_IP"
}
function run_openrc() {
source run_vm.sh "$FUEL_MASTER_IP" "$FUEL_MASTER_PASSWD" "$EXTERNAL_IP" novanetwork nova
source `get_openrc`
list=$(nova list)
if [ "$list" == "" ]; then
echo "openrc variables are unset or set to the empty string"
fi
echo "AUTH_URL: $OS_AUTH_URL"
}
get_arguments $@
echo "getting openrc from controller node"
run_openrc
echo "openrc has been activated on your machine"
get_floating_ip
echo "floating ip has been found"
bash prepare.sh
echo "Image has been sended to glance"
wait_image_active
echo "Image has been saved"
BOOT_LOG_FILE=`tempfile`
boot_vm | tee "$BOOT_LOG_FILE"
VOL_ID=$(cat "$BOOT_LOG_FILE" | grep "VOL_ID=" | sed 's/VOL_ID=//')
rm "$BOOT_LOG_FILE"
echo "VM has been booted"
wait_floating_ip
echo "Floating IP has been obtained"
source `prepare_vm`
echo "VM has been prepared"
# sudo bash ../single_node_test_short.sh $FILE_TO_TEST $RESULT_FILE
ssh $SSH_OPTS -i $KEY_FILE_NAME ubuntu@$VM_IP \
"cd /tmp/io_scenario;"
# echo 'results' > $RESULT_FILE; \
# curl -X POST -d @$RESULT_FILE http://http://172.16.52.80/api/test --header 'Content-Type:application/json'
# nova delete $VM_NAME
# wait_vm_deleted
# echo "$VM_NAME has been deleted successfully"
# cinder delete $VOL_ID
# echo "Volume has been deleted $VOL_ID"
| true
|
65f6ef00fb11b60d59f60f8bfa85c4b751a0f4af
|
Shell
|
chunqishi/lappsvm
|
/bin/ubuntu.sh
|
UTF-8
| 11,831
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
UBUNTU_DIR=$(pwd)
PROJECT_DIR="$HOME/Project"
TOMCAT6_URL="http://mirror.nexcess.net/apache/tomcat/tomcat-6/v6.0.39/bin/apache-tomcat-6.0.39.tar.gz"
TOMCAT6_TAR="$PROJECT_DIR/tomcat6.tar.gz"
TOMCAT6_HOME="$PROJECT_DIR/apache-tomcat-6.0.39/"
TOMCAT6_WEBAPP="${TOMCAT6_HOME}/webapps"
TOMCAT6_SERVER="http://localhost:8080"
USER_PASSWD="lappsgrid"
POSTGRESQL_ROLE="langrid"
POSTGRESQL_PASSWD="langrid"
POSTGRESQL_DB="langrid"
SERVICEGRID_DOWNLOAD="http://eldrad.cs-i.brandeis.edu:8080/download/servicegrid/"
SERVICEGRID_CORENODE="langrid-corenode-p2p-2.0.0-20120718-modified.zip"
SERVICEGRID_SERVICEMANAGER="service_manager.war"
MAVEN_SETTING_URL="http://eldrad.cs-i.brandeis.edu:8080/download/settings.xml"
function __ubuntu_log {
# echo "$(date '+%Y-%m-%d %H:%M:%S') (${3}) : ${1} = ${2} "
echo "$(date '+%Y-%m-%d %H:%M:%S') (${3}) : ${1} = ${2} " >> "${UBUNTU_DIR}/lapps-ubuntu.log"
}
function __ubuntu_remove_postgresql {
if [[ -d /var/lib/postgresql/9.1/main ]]; then
echo ""
else
mkdir -p /var/lib/postgresql/9.1/main
fi
## echo "${USER_PASSWD}" | sudo -S apt-get --yes --force-yes --purge remove postgresql*
echo "${USER_PASSWD}" | sudo -S apt-get --yes --force-yes --purge remove postgresql-9.1
echo "${USER_PASSWD}" | sudo -S apt-get -f autoremove
echo "${USER_PASSWD}" | sudo -S rm -rf /var/lib/postgresql/9.1/main
}
function __ubuntu_update {
echo "${USER_PASSWD}" | sudo -S apt-get update
}
function __wait {
if [[ -z ${KEYPRESS} ]]; then
echo ""
sleep 1
else
echo "Press a key ..."
read -n 1
echo ""
echo "Continuing ..."
fi
}
function __ubuntu_install {
echo ""
echo "Install python, git, maven2, unzip, java(jdk1.6), postgresql-9.1 ..."
echo ""
echo ""
echo "Install python ..."
echo ""
if [[ -z $(which python) ]]; then
echo "${USER_PASSWD}" | sudo -S apt-get --yes --force-yes install python
else
echo ""
echo "Python installed ."
echo ""
fi
if [[ -z $(which git) ]]; then
echo "${USER_PASSWD}" | sudo -S apt-get --yes --force-yes install git
else
echo ""
echo "Git installed ."
echo ""
fi
if [[ -z $(which unzip) ]]; then
echo "${USER_PASSWD}" | sudo -S apt-get --yes --force-yes install unzip
else
echo ""
echo "Unzip installed ."
echo ""
fi
if [[ -z $(which java) ]]; then
echo "${USER_PASSWD}" | sudo -S apt-get --yes --force-yes install default-jdk
else
echo ""
echo "Java installed ."
echo ""
fi
if [[ -z $(which mvn) ]]; then
echo "${USER_PASSWD}" | sudo -S apt-get --yes --force-yes install maven2
else
echo ""
echo "Maven installed ."
echo ""
fi
if [[ -d $HOME/.m2 ]]; then
echo ""
else
mkdir $HOME/.m2
fi
wget ${MAVEN_SETTING_URL} -O $HOME/.m2/settings.xml
if [[ -f $HOME/.m2/settings.xml ]]; then
echo ""
else
echo "Maven settings.xml NOT works !"
fi
if [[ -z $(which psql) ]]; then
echo ""
echo "Install PostgreSQL9.1 ..."
echo ""
if [[ -d /var/lib/postgresql/9.1/main ]]; then
echo "${USER_PASSWD}" | sudo -S rm -rf /var/lib/postgresql/9.1/main
fi
echo "${USER_PASSWD}" | sudo -S apt-get --yes --force-yes install postgresql postgresql-contrib
else
echo ""
echo "PostgreSQL installed ."
echo ""
fi
}
function __tomcat_wait {
max=30; while ! wget --spider ${TOMCAT6_SERVER} > /dev/null 2>&1; do
max=$(( max - 1 )); [ $max -lt 0 ] && break; sleep 1
done; [ $max -gt 0 ]
}
function __tomcat_stop {
if [[ -z $(ps x|grep ${TOMCAT6_HOME}|grep -v grep) ]]; then
echo "Tomcat stopped !"
else
tomcat_shutdown=$("$TOMCAT6_HOME/bin/shutdown.sh")
echo "Tomcat stop ..."
sleep 2
fi
}
function __tomcat_start {
### restart tomcat
##
#
echo "Tomcat start ..."
"$TOMCAT6_HOME/bin/startup.sh"
__tomcat_wait
}
function __tomcat_restart {
__tomcat_stop
### restart tomcat
##
#
"$TOMCAT6_HOME/bin/startup.sh"
__tomcat_wait
}
function __ubuntu_tomcat {
if [[ -d "${TOMCAT6_HOME}" ]]; then
echo "${TOMCAT6_HOME} already exists ."
else
wget "$TOMCAT6_URL" -O "$TOMCAT6_TAR"
tar -zxvf "$TOMCAT6_TAR"
fi
echo ""
echo "Test Tomcat6 ..."
echo ""
cd "$TOMCAT6_HOME"
__tomcat_restart
tomcat_home_page="$(wget -qO- ${TOMCAT6_SERVER} )"
if [[ $tomcat_home_page == *Apache* ]]; then
echo "Tomcat6 works !"
else
echo "Tomcat6 DOES NOT work !"
fi
}
function __postgresql_service_manager {
if [[ -d ${TOMCAT6_HOME} ]]; then
__tomcat_stop
else
echo ""
fi
# sudo nano /etc/postgresql/9.3/main/pg_hba.conf
# echo "# Database administrative login by Unix domain socket:"
# echo "local all postgres md5"
echo ""
echo "Test PostgreSQL ..."
echo ""
postgres_create_db_template1=$(echo "${USER_PASSWD}" | sudo -S -u postgres createdb template1)
if [[ ${postgres_create_db_template1} == *already* ]]; then
echo "PostgreSQL works !"
else
echo "PostgreSQL DOES NOT work !"
fi
echo ""
echo "Create ROLE(${POSTGRESQL_ROLE}) DB(${POSTGRESQL_DB}) ..."
echo ""
#postgres_create_role_lapps=$(sudo -u postgres psql -c "create role ${POSTGRESQL_ROLE} with createdb login password '${POSTGRESQL_PASSWD}';")
#if [[ ${postgres_create_db_template1} == *ROLE* ]]; then
# echo "Create ROLE=${POSTGRESQL_ROLE} works !"
#else
# echo "Create ROLE=${POSTGRESQL_ROLE} DOES NOT work !"
#fi
#
#postgres_createdb_lappsdb=$(createdb -U ${POSTGRESQL_ROLE} ${POSTGRESQL_DB})
#
#if [[ -z ${postgres_createdb_lappsdb} ]]; then
# echo "Create DB=${POSTGRESQL_DB} works !"
#else
# echo "Create DB=${POSTGRESQL_DB} DOES NOT work !"
#fi
if [[ -f "${PROJECT_DIR}/${SERVICEGRID_CORENODE}" ]]; then
echo ""
else
wget "${SERVICEGRID_DOWNLOAD}${SERVICEGRID_CORENODE}" -O "${PROJECT_DIR}/${SERVICEGRID_CORENODE}"
fi
if [[ -d "${PROJECT_DIR}/corenode" ]]; then
rm -rf "${PROJECT_DIR}/corenode"
fi
unzip "${PROJECT_DIR}/${SERVICEGRID_CORENODE}" -d "${PROJECT_DIR}/corenode"
create_storedproc="${PROJECT_DIR}/corenode/postgresql/create_storedproc.sql"
#echo "${USER_PASSWD}" | sudo -S -u postgres createuser -S -D -R -P ${POSTGRESQL_ROLE}
## http://www.postgresql.org/docs/8.1/static/sql-createrole.html
## http://www.postgresql.org/docs/9.2/static/app-createuser.html
postgres_create_role_lapps=$(echo "${USER_PASSWD}" | sudo -S -u postgres psql -c "create role ${POSTGRESQL_ROLE} with NOSUPERUSER NOCREATEDB NOCREATEROLE login password '${POSTGRESQL_PASSWD}';")
if [[ ${postgres_create_db_template1} == *ROLE* ]]; then
echo "Create ROLE=${POSTGRESQL_ROLE} works !"
else
echo "Create ROLE=${POSTGRESQL_ROLE} DOES NOT work !"
fi
echo "${USER_PASSWD}" | sudo -S -u postgres psql -c "SELECT pg_terminate_backend(pg_stat_activity.procpid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '${POSTGRESQL_DB}'
AND procpid <> pg_backend_pid();"
echo "${USER_PASSWD}" | sudo -S -u postgres psql -c "drop database ${POSTGRESQL_DB};"
echo "${USER_PASSWD}" | sudo -S -u postgres createdb ${POSTGRESQL_DB} -O ${POSTGRESQL_ROLE} -E 'UTF8'
echo "${USER_PASSWD}" | sudo -S -u postgres createlang plpgsql ${POSTGRESQL_DB}
echo "${USER_PASSWD}" | sudo -S -u postgres psql ${POSTGRESQL_DB} < ${create_storedproc}
echo "${USER_PASSWD}" | sudo -S -u postgres psql ${POSTGRESQL_DB} -c "ALTER FUNCTION \"AccessStat.increment\"(character varying, character varying, character varying, character varying, character varying, timestamp without time zone, timestamp without time zone, integer, timestamp without time zone, integer, timestamp without time zone, integer, integer, integer, integer) OWNER TO $ROLENAME"
}
function __tomcat_service_manager {
service_manager_war="${TOMCAT6_HOME}/webapps/service_manager.war"
### stop tomcat
##
#
cd "$TOMCAT6_HOME"
__tomcat_stop
sleep 2
if [[ -f ${service_manager_war} ]]; then
echo ""
else
echo "Download Service Manager War ..."
wget "${SERVICEGRID_DOWNLOAD}${SERVICEGRID_SERVICEMANAGER}" -O "${TOMCAT6_HOME}/webapps/service_manager.war"
fi
tomcat_langrid="${PROJECT_DIR}/corenode/tomcat-langrid"
#LANGRID_CONF=$(cat<<END_OF_LANGRID_CONF #END_OF_LANGRID_CONF)
LANGRID_CONF="<?xml version='1.0' encoding='utf-8'?>
<Context
reloadable='true'
displayName='Language Grid Core Node'
>
<Resource
name='jdbc/langrid' auth='Container' type='javax.sql.DataSource'
maxActive='100' maxIdle='50' maxWait='10000'
username='${POSTGRESQL_ROLE}' password='${POSTGRESQL_PASSWD}'
driverClassName='org.postgresql.Driver'
url='jdbc:postgresql:${POSTGRESQL_DB}'
/>
<Parameter
name='langrid.activeBpelServicesUrl'
value='http://eldrad.cs-i.brandeis.edu:8081/active-bpel/services'
/>
<Parameter
name='langrid.maxCallNest'
value='16'
/>
<Parameter name='langrid.node.gridId' value='lapps_grid_1' />
<Parameter name='langrid.node.nodeId' value='lapps_node_1' />
<Parameter name='langrid.node.name' value='lapps ubuntu' />
<Parameter name='langrid.node.url' value='http://127.0.0.1:8080/service_manager/' />
<Parameter name='langrid.node.os' value='Ubuntu 12.10' />
<Parameter name='langrid.node.cpu' value='Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz' />
<Parameter name='langrid.node.memory' value='121 G' />
<Parameter name='langrid.node.specialNotes' value='Mannual Installation' />
<Parameter name='langrid.operator.userId' value='lapps' />
<Parameter name='langrid.operator.initialPassword' value='lappsgrid' />
<Parameter name='langrid.operator.organization' value='lapps provider' />
<Parameter name='langrid.operator.responsiblePerson' value='lapps provider' />
<Parameter name='langrid.operator.emailAddress' value='lapps@' />
<Parameter name='langrid.operator.homepageUrl' value='http://' />
<Parameter name='langrid.operator.address' value='USA' />
<Parameter name='langrid.serviceManagerCopyright' value='Copyright 2014' />
<Parameter name='langrid.activeBpelServicesUrl' value='' />
<Parameter name='langrid.activeBpelAppAuthKey' value='' />
<Parameter name='langrid.atomicServiceReadTimeout' value='30000' />
<Parameter name='langrid.compositeServiceReadTimeout' value='30000' />
<Parameter name='langrid.maxCallNest' value='16' />
<Parameter name='appAuth.simpleApp.authIps' value='127.0.0.1' />
<Parameter name='appAuth.simpleApp.authKey' value='eldrad' />
</Context>
"
echo "CATALINA_OPTS=\"-Xmx512m -XX:PermSize=128m -XX:MaxPermSize=256m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp\"" > "${TOMCAT6_HOME}/bin/setenv.sh"
echo ""
echo "Configure Service Manager in Tomcat6 ..."
echo ""
echo "${LANGRID_CONF}" > ${TOMCAT6_HOME}/conf/Catalina/localhost/service_manager.xml
cp ${tomcat_langrid}/lib/*.jar ${TOMCAT6_HOME}/lib/
__tomcat_start
}
echo ""
echo "********** Repository **********"
echo ""
__ubuntu_update
__wait
echo ""
echo "********** Installation **********"
echo ""
__ubuntu_install
__wait
echo ""
echo "Prepare \"Project\" directory ..."
echo ""
if [[ -d "${PROJECT_DIR}" ]]; then
echo "${PROJECT_DIR} already exists ."
else
mkdir "${PROJECT_DIR}"
fi
cd "${PROJECT_DIR}"
echo ""
echo "********** PostgreSQL9 **********"
echo ""
__postgresql_service_manager
__wait
echo ""
echo "********** Tomcat6 **********"
echo ""
echo ""
echo "Install Tomcat6 to Project ..."
echo ""
__ubuntu_tomcat
__wait
echo ""
echo "********** Service Manager **********"
echo ""
__tomcat_service_manager
__wait
echo ""
echo "Test Service Manager ..."
echo ""
service_manager_page="$(wget -qO- ${TOMCAT6_SERVER}/service_manager)"
if [[ ${service_manager_page} == *lapps_grid_1* ]]; then
echo "Service Manager works !"
else
echo "Service Manager DOES NOT work !"
fi
cd ${UBUNTU_DIR}
echo ""
echo "${TOMCAT6_SERVER}/service_manager"
echo ""
| true
|
066d9aab75b40983c04ed13158fa34d3ef92bc43
|
Shell
|
SaifNOUMA/Network-Traffic-Prediction
|
/data/scripts/reduce
|
UTF-8
| 331
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# argument : $1 : The file that Contain the data which represent the trace per packets where each row contains the arrival time (seconds) and the length (bytes)
# argument : $2 : The granularity of the final data which is the timestep
name_fdata=fdata_Timestep_$2
awk -v granularity=$2 -f sum_up.awk $1 > $name_fdata
| true
|
eb484c54985f657967f7e7c534c5ba4302d41309
|
Shell
|
won21kr/corteza-docker
|
/webapp/src/autoconfig.sh
|
UTF-8
| 1,488
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -eu
################################################################################
# (re)generate configuration files
# Where to put the configs
BASEDIR=${BASEDIR:-"/webapp"}
FEDERATION_ENABLED=${FEDERATION_ENABLED:-""}
# See if VIRTUAL_HOST exists
# if it does, it should hold the hostname of where these webapps are served
VIRTUAL_HOST=${VIRTUAL_HOST:-"local.cortezaproject.org"}
# Assume API is using same scheme as webapps
API_SCHEME=${API_SCHEME:-""}
# Prefix to use with VIRTUAL_HOST when building API domain
API_PREFIX=${API_PREFIX:-"api."}
API_BASEURL=${API_BASEURL:-"${API_PREFIX}${VIRTUAL_HOST}"}
API_BASEURL_SYSTEM=${API_BASEURL_SYSTEM:-"${API_SCHEME}//${API_BASEURL}/system"}
API_BASEURL_MESSAGING=${API_BASEURL_MESSAGING:-"${API_SCHEME}//${API_BASEURL}/messaging"}
API_BASEURL_COMPOSE=${API_BASEURL_COMPOSE:-"${API_SCHEME}//${API_BASEURL}/compose"}
API_BASEURL_FEDERATION=${API_BASEURL_FEDERATION:-"${API_SCHEME}//${API_BASEURL}/federation"}
CONFIG=""
CONFIG="${CONFIG}window.SystemAPI = '${API_BASEURL_SYSTEM}'\n"
CONFIG="${CONFIG}window.MessagingAPI = '${API_BASEURL_MESSAGING}'\n"
CONFIG="${CONFIG}window.ComposeAPI = '${API_BASEURL_COMPOSE}'\n"
if [ ! -z "${FEDERATION_ENABLED}" ]; then
CONFIG="${CONFIG}window.FederationAPI = '${API_BASEURL_FEDERATION}'\n"
fi
echo -e "${CONFIG}" | tee \
${BASEDIR}/messaging/config.js \
${BASEDIR}/auth/config.js \
${BASEDIR}/admin/config.js \
${BASEDIR}/compose/config.js \
> ${BASEDIR}/config.js
| true
|
d338abb4be193437c6099bc57bd4b5a9d7a49f72
|
Shell
|
xfire/dotfiles
|
/bin/gsvn
|
UTF-8
| 1,769
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/sh
USAGE='[help|status|info|update|log|blame|commit|dcommit|switch]'
LONG_USAGE='gsvn help
print this long help message
gsvn status
shows information about current working dir status.
gsvn info
shows information about a file or directory similar to
what ‘svn info´ provides. (git svn info)
gsvn update
fetches revisions from the SVN parent of the current
HEAD and rebases the current (uncommitted to SVN) work
against it. (git svn rebase)
gsvn log
show svn log messages (git svn log)
gsvn blame
show what revision and author last modified each line
of a file. (git svn blame)
gsvn dcommit
push all local commits into the svn repository.
(git svn dcommit)
gsvn switch
switch working copy to trunk, a branch or a tag'
OPTIONS_SPEC=
SUBDIRECTORY_OK=yes
. git-sh-setup
require_work_tree
function show_current() {
# git reflog --all | grep "updating HEAD" | head -n 1 | sed 's|^[^:]*: \([^:]*\): updating HEAD$|\1|'
git reflog --all | grep "updating HEAD" | head -n 1 | sed 's|^[^.]*\.\.\. \([^:]*:[^:]*\): updating HEAD$|\1|'
}
case "$#" in
0)
usage ;;
*)
cmd="$1"
shift
case "$cmd" in
help)
usage ;;
status)
echo "# On svn:" $(show_current)
git status "$@" ;;
info)
git svn info "$@" ;;
up|update)
git svn rebase "$@" ;;
log)
git svn log "$@" ;;
blame)
git svn blame "$@" ;;
dcommit)
git svn dcommit "$@" ;;
switch)
git reset --hard "$@"
echo "now on svn:" $(show_current)
;;
*)
git "$cmd" "$@" ;;
esac
esac
| true
|
285905a8f963514f19edae0d9dd1f5dc5f034e9b
|
Shell
|
gem5/gem5
|
/util/dist/test/test-2nodes-AArch64.sh
|
UTF-8
| 3,772
| 2.640625
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
#! /bin/bash
#
# Copyright (c) 2015, 2022 Arm Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This is an example script to start a dist gem5 simulations using
# two AArch64 systems. It is also uses the example
# dist gem5 bootscript util/dist/test/simple_bootscript.rcS that will
# run the linux ping command to check if we can see the peer system
# connected via the simulated Ethernet link.
GEM5_DIR=$(pwd)/$(dirname $0)/../../..
IMG=$M5_PATH/disks/ubuntu-18.04-arm64-docker.img
VMLINUX=$M5_PATH/binaries/vmlinux.arm64
FS_CONFIG=$GEM5_DIR/configs/example/arm/dist_bigLITTLE.py
SW_CONFIG=$GEM5_DIR/configs/dist/sw.py
GEM5_EXE=$GEM5_DIR/build/ARM/gem5.opt
BOOT_SCRIPT=$GEM5_DIR/util/dist/test/simple_bootscript.rcS
GEM5_DIST_SH=$GEM5_DIR/util/dist/gem5-dist.sh
DEBUG_FLAGS="--debug-flags=DistEthernet"
#CHKPT_RESTORE="-r1"
NNODES=2
$GEM5_DIST_SH -n $NNODES \
-x $GEM5_EXE \
-s $SW_CONFIG \
-f $FS_CONFIG \
--m5-args \
$DEBUG_FLAGS \
--fs-args \
--cpu-type=atomic \
--little-cpus=1 \
--big-cpus=1 \
--machine-type=VExpress_GEM5_Foundation \
--disk=$IMG \
--kernel=$VMLINUX \
--bootscript=$BOOT_SCRIPT \
--cf-args \
$CHKPT_RESTORE
| true
|
d92cbddca72f7c261264073a0fc7cbcb75c91da1
|
Shell
|
regier/BFOD
|
/Compile_Install_Dependencies.sh
|
UTF-8
| 1,348
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# Welcome to Building FlightGear OnDebian
# This is the following version of this script.
bfod_ver="0.0.1.20200513.1110"
# This script's intention is to download and build
# dependencies required to build FlightGear and
# FlightGear itself.
# The script follows the KISS principle, each file does one thing only.
# This file Compiles everything.
alias Build="make -j $(nproc) && make install"
alias CMake='cmake -DCMAKE_BUILD_TYPE=Release -DOpenGL_GL_PREFERENCE=LEGACY'
# Building PLib
cd $fg_temp_files # Enter dir where stuff will be built from.
mkdir -p PLib && cd PLib # Creates work dir for PLib.
CMake $fg_source_files/PLib -DCMAKE_INSTALL_PREFIX="$fg_install_dir"
Build
# Building OSG
cd $fg_temp_files # Enter dir where stuff will be built from.
mkdir -p OSG && cd OSG # Creates work dir for OSG.
CMake $fg_source_files/OSG -DCMAKE_INSTALL_PREFIX="$fg_install_dir"
Build
# Building SimGear
cd $fg_temp_files # Enter dir where stuff will be built from.
mkdir -p SimGear && cd SimGear # Creates work dir for SimGear.
CMake $fg_source_files/SimGear -DCMAKE_INSTALL_PREFIX="$fg_install_dir"
# Building FlightGear
cd $fg_temp_files # Enter dir where stuff will be built from.
mkdir -p FlightGear && cd FlightGear # Creates work dir for FlightGear.
CMake $fg_source_files/FlightGear -DCMAKE_INSTALL_PREFIX="$fg_install_dir"
| true
|
edbcfc73cbac215c46f7ee19fa6e57b46d339d0d
|
Shell
|
2063978470/snail
|
/init.sh
|
UTF-8
| 530
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
p=`pwd`
# install php dependency
docker run --rm -v "$p":/usr/src/app stenote/composer install
# 删除 composer.lock
rm composer.lock
# 重新 composer install
# laravel 的依赖貌似有个问题, 第一次 composer install 完成后, 会安装上多余的依赖, 删除 composer.lock 后重新 compsoer install 会删除那两个多余的依赖
docker run --rm -v "$p":/usr/src/app stenote/composer install
docker run --rm -v "$p":/usr/src/app --entrypoint="/usr/bin/php" -t stenote/composer artisan optimize
| true
|
2f266328e3a4397a9511f39e0dbc8f9963db464e
|
Shell
|
luciaeveberger/OS_WITH_LINUX
|
/INTRO_TO_LINUX/BASH_SCRIPTING/LAB_3/conditional_logic_example
|
UTF-8
| 115
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
num=10
num2=5
if [[ $num -lt 4 ]] && [[ $num2 -gt 1 ]]; then
echo "Here!"
else
echo "Not here!"
fi
| true
|
2b35a3e3f356ef76ade0ae3ce79c2393f97d577e
|
Shell
|
sgringwe/buildkite-kubernetes-autoscaler
|
/build.sh
|
UTF-8
| 1,391
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Expects the directory structure:
# .
# └── projectname
# ├── build.sh
# └── src
# ├── custompackage
# │ └── custompackage.go
# └── main
# └── main.go
#
# This will build a binary called "projectname" at "projectname/bin/projectname".
#
# The final structure will look like:
#
# .
# └── projectname
# ├── bin
# │ └── projectname
# ├── build.sh
# └── src
# ├── custompackage
# │ └── custompackage.go
# └── main
# └── main.go
#
# Save the pwd before we run anything
PRE_PWD=`pwd`
# Determine the build script's actual directory, following symlinks
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
BUILD_DIR="$(cd -P "$(dirname "$SOURCE")" && pwd)"
# Derive the project name from the directory
PROJECT="$(basename $BUILD_DIR)"
# Setup the environment for the build
GOPATH=$BUILD_DIR:$GOPATH
# Build the project
cd $BUILD_DIR
mkdir -p bin
# Cross-platform build
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/$PROJECT ./cmd/buildkite-kubernetes-autoscaler
EXIT_STATUS=$?
if [ $EXIT_STATUS == 0 ]; then
echo "Build succeeded"
else
echo "Build failed"
fi
# Change back to where we were
cd $PRE_PWD
exit $EXIT_STATUS
| true
|
fb2969533c22247ad477951687815a4c96e9509a
|
Shell
|
MIT-LCP/mimic-code
|
/mimic-iii/buildmimic/sqlite/import.sh
|
UTF-8
| 1,620
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2021 Thomas Ward <thomas@thomasward.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
OUTFILE=mimic3.db
if [ -s "$OUTFILE" ]; then
echo "File \"$OUTFILE\" already exists." >&2
exit 111
fi
for FILE in *; do
# skip loop if glob didn't match an actual file
[ -f "$FILE" ] || continue
# trim off extension and lowercase file stem (e.g., HELLO.csv -> hello)
TABLE_NAME=$(echo "${FILE%%.*}" | tr "[:upper:]" "[:lower:]")
case "$FILE" in
*csv)
IMPORT_CMD=".import $FILE $TABLE_NAME"
;;
# need to decompress csv before load
*csv.gz)
IMPORT_CMD=".import \"|gzip -dc $FILE\" $TABLE_NAME"
;;
# not a data file so skip
*)
continue
;;
esac
echo "Loading $FILE."
sqlite3 $OUTFILE <<EOF
.headers on
.mode csv
$IMPORT_CMD
EOF
echo "Finished loading $FILE."
done
echo "Finished loading data into $OUTFILE."
| true
|
2b46b10423a4968d6e9abbcb027e52135f1df12e
|
Shell
|
CallumTeesdale/CSY2002-Assign2
|
/archive.sh
|
UTF-8
| 4,748
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
#############################PLEASE READ##########################################
# This is an archive script with a simple ui created by callum teesdale 15423186 #
# run using "bash archive.sh" in the terminal #
# Use the arrow keys to move through the menu and enter key to select #
# Assumes that the directory you want archive and write to exists #
# #
# #
# #
##################################################################################
# Store menu options selected by the user
INPUT=/tmp/menu.sh.$$
# Storage file for displaying output
OUTPUT=/tmp/output.sh.$$
# trap and delete temp files
trap "rm $OUTPUT; rm $INPUT; exit" SIGHUP SIGINT SIGTERM
#
# display output using msgbox
# $1 -> set msgbox height
# $2 -> set msgbox width
# $3 -> set msgbox title
#
function display_output(){
local h=${1-10} # box height default 10
local w=${2-41} # box width default 41
local t=${3-Output} # box title
dialog --backtitle "Archive Script" --title "${t}" --clear --msgbox "$(<$OUTPUT)" ${h} ${w}
}
#
# Purpose - Change tar settings
#
function tar_settings(){
while true
do
### display sub menu###
dialog --clear --backtitle "Archive Script" \
--title "Archive Script" \
--menu "Select an option" 15 50 4 \
DateTime "Change the tar settings" \
Source "Change Source location" \
Destination "Change Destination" \
Name "Change File Name" \
Exit "Back" 2>"${INPUT}"
menuitem=$(<"${INPUT}")
# handles hte user input
case $menuitem in
DateTime) dateTime;;
Execute) execute;;
Source) fsource;;
Destination) fdest;;
Name) fname;;
Exit) echo "Bye"; break;;
esac
# if temp files found, delete them
[ -f $OUTPUT ] && rm $OUTPUT
[ -f $INPUT ] && rm $INPUT
done
}
#change file name
function fname(){
# open fd
exec 3>&1
# Store data to $fname variable
fname=$(dialog --ok-label "Save" \
--backtitle "Change Name" \
--title "Change Name" \
--form "Enter File name " \
15 50 0 \
"Name:" 1 1 "$fname.tar.gz" 1 30 30 0 \
2>&1 1>&3)
# close fd
exec 3>&-
# display values just entered for testing
echo "$fname"
}
#change destination
function fdest(){
# open fd
exec 3>&1
# Store data to $fdest variable
fdest=$(dialog --ok-label "Save" \
--backtitle "Change Destination" \
--title "Change Destionation" \
--form "Enter path to the destination, path starts at home, directory must exist" \
15 50 0 \
"Destination:" 1 1 "$fdest" 1 30 30 0 \
2>&1 1>&3)
# close fd
exec 3>&-
# display values just entered for testing
echo "$fdest"
}
#change source location
function fsource(){
# open fd
exec 3>&1
# Store data to $src variable
fsrc=$(dialog --ok-label "Save" \
--backtitle "Change Source" \
--title "Change Source" \
--form "Enter path to the source, path starts at home, directory must all ready exist" \
15 50 0 \
"Source:" 1 1 "$fsrc" 1 30 30 0 \
2>&1 1>&3)
# close fd
exec 3>&-
# display values just entered for testing
echo "$fsrc"
}
#Change tar date/time settings
function dateTime(){
# Store data to $VALUES variable
values=$(dialog --ok-label "Save" \
--backtitle "Date Time Settings" \
--title "Date/Time" \
--form "Change The date/Time settings mins(0-59/*) hour(0-23/*) day(1-31/*) month(1-12/*) day of week(0-6/sunday=0)" \
15 50 0 \
"Minutes:" 1 1 "$mins" 1 10 15 0 \
"Hours:" 2 1 "$hours" 2 10 15 0 \
"Day:" 3 1 "$day" 3 10 15 0 \
"Month:" 4 1 "$month" 4 10 15 0 \
"Week Day:" 5 1 "$wkday" 5 10 15 0 2>&1 >/dev/tty)
# display values just entered
echo "$values"
}
#
#Execute tar command
#
function execute(){
rm cronlog
#
#Test that all variables are set
#
echo fsrc="$fsrc"
echo fdest="$fdest"
echo fname="$fname"
echo values="$values"
#Take the cron settings and assign them to an individual variable
i=0
while read -r line; do
((i++))
declare var$i="${line}"
done <<< "${values}"
echo "var1=${var1}"
echo "var2=${var2}"
echo "var3=${var3}"
echo "var4=${var4}"
echo "var5=${var5}"
#Get the date and time and format it
date=`date '+%Y-%m-%d %H:%M:%S'`
#execute the users settings
crontab -l >> cronlog
echo "${var1} ${var2} ${var3} ${var4} ${var5} tar -czf ./$fdest/$fname $fsrc on $date" >> cronlog
cat cronlog
crontab cronlog
}
function main(){
while true
do
### display main menu ###
dialog --clear --backtitle "Archive Script" \
--title "Archive Script" \
--menu "Select an option" 15 50 4 \
TarSettings "Change the tar settings" \
Execute "Execute with current settings" \
Exit "Exit to the shell" 2>"${INPUT}"
menuitem=$(<"${INPUT}")
# handle user input
case $menuitem in
TarSettings) tar_settings;;
Execute) execute;;
Exit) echo "Bye"; break;;
esac
# if temp files found, delete them
[ -f $OUTPUT ] && rm $OUTPUT
[ -f $INPUT ] && rm $INPUT
done
}
main
| true
|
4cd290d660d8df7dd2b25c3019e5c2cb06b6edf4
|
Shell
|
mmatschiner/anguilla
|
/wgs_assemblies/src/merge_rearrangements.sh
|
UTF-8
| 692
| 2.78125
| 3
|
[] |
no_license
|
# m_matschiner Mon Dec 9 14:53:26 CET 2019
# Load modules.
module load ruby/2.1.5
# Set the combined rearrangements table.
table=../res/tables/rearrangements.txt
# Combine the rearrangements tables for the four different species.
cat ../res/tables/rearrangements.???.txt > ${table}
# Set the merged table.
merged_table=../res/tables/rearrangements.merged.txt
# Set the maximum size of a region within which an inversion resides (longer regions will not be recorded).
maximum_region_size=10000
# Use a ruby script to merge rearrangements into a set of unique, potentially shared among species, rearrangements.
ruby merge_rearrangements.rb ${table} ${maximum_region_size} ${merged_table}
| true
|
123537241cf01abf61c05eb825533c6d9a143aaf
|
Shell
|
ChastityAM/LinuxShell
|
/Lab3_3.sh
|
UTF-8
| 773
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
#**************************************
#Script to Mount Point File system type
#Author: Chastity
#Date: 15 Feb 21
#**************************************
count=1
for word in $(mount)
do
# Get the specific information from the mount
if [[ $count -eq 1 ]]
then
echo "Drive: "
echo $word
fi
if [[ $count -eq 3 ]]
then
echo "Mount point: "
echo $word
fi
if [[ $count -eq 5 ]]
then
echo "Type: "
echo $word
fi
# Resets the count for each new line
count=$(( $count + 1 ))
if [[ $count -gt 6 ]]
then
count=1
echo
fi
done
| true
|
5c834519379edf593dbe741142a620a98466e194
|
Shell
|
oluwoleo/xgboost-php
|
/src/setup
|
UTF-8
| 222
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" ]; then
echo "configuring for PHP ${1}"
phpize$1;
./configure --with-php-config=/usr/bin/php-config$1
else
echo "configuring for the default installation of php"
phpize
./configure
fi
| true
|
36214cd6efc5f59ccb9f9b4b66be47599f356512
|
Shell
|
linsalrob/EdwardsLab
|
/bin/update_blastdb.sh
|
UTF-8
| 455
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
DATE=$(date +%Y%m%d)
cd /home2/db/blast
mkdir nr_$DATE
cd nr_$DATE
ncftpget ftp://ftp.ncbi.nlm.nih.gov/blast/db/nr*
cat *md5 > all.md5
md5sum -c all.md5
for t in *.tar.gz; do echo $t; tar xf $t; done
cd /home2/db/blast
rm -f nr
ln -s nr_$DATE nr
mkdir nt_$DATE
cd nt_$DATE
ncftpget ftp://ftp.ncbi.nlm.nih.gov/blast/db/nt*
cat *md5 > all.md5
md5sum -c all.md5
for t in *.tar.gz; do echo $t; tar xf $t; done
cd /home2/db/blast
rm -f nt
ln -s nt_$DATE nt
| true
|
1297a5ed14e0676bb463ba9be287592924c7ccda
|
Shell
|
akhila-s-rao/5G-SMART_traffic_gen_scripts
|
/ditg/send_ue1_ditg_traffic.sh
|
UTF-8
| 368
| 3.25
| 3
|
[] |
no_license
|
if [ $# -ne 1 ] ; then
echo 'Usage: bash command <path to save logs in>'
exit 1
fi
traffic_script="rttm_1000Bpkts_ue1_CBR_traffic_script1"
echo "traffic_script = "$traffic_script
# Save the traffic generation script used in the run folder for reference
cp ${traffic_script} ${1}/${traffic_script}
sudo ITGSend ${traffic_script} -l "${1}/ue1_ditg_send.log"
| true
|
764f68c23ae8b6f256dbc774bd3c1c38b5a4d5af
|
Shell
|
sunetos/dotfiles
|
/bin/update-julia
|
UTF-8
| 521
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
VERSION="0.3.0"
URL="http://status.julialang.org/download/osx10.7+"
APP="/Applications/Julia-$VERSION-prerelease.app"
echo "Downloading Julia v$VERSION nightly..."
dmg="/tmp/julia-v$VERSION-installer.dmg"
curl -fsSL "$URL" > "$dmg"
echo "Installing new DMG image..."
hdiutil attach -quiet "$dmg"
dmgapp=`find /Volumes/Julia -name 'Julia-*.app' 2> /dev/null`
tag=`basename "$dmgapp" .app`
cp -R $installer "$dmgapp" "$APP"
hdiutil detach -quiet /Volumes/Julia
rm "$dmg"
echo "Successfully installed $tag."
| true
|
4ff636fac3d81d8def301165c22706d0aac30900
|
Shell
|
FlowNICA/CentralityFramework
|
/Framework/scripts/example/start.sh
|
UTF-8
| 3,367
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
#
#$ -wd /weekly/$USER/Soft/Centrality/TMP/
#$ -cwd
#$ -N run_centrality_fit
#$ -q all.q
#$ -l h_rt=8:00:00
#$ -l s_rt=8:00:00
#$ -t 1-200
#
#$ -o /weekly/$USER/Soft/Centrality/TMP/
#$ -e /weekly/$USER/Soft/Centrality/TMP/
#
export JOB_ID=${JOB_ID}
export TASK_ID=${SGE_TASK_ID}
START_DIR=$PWD
# Where main script & template config file are stored
MAIN_DIR=/weekly/parfenov/Soft/Centrality/scripts/new/7gev_reco_urqmd_nosecondary
# Creating output
COMMIT=new_recoUrQMD_STARlike_7gev_NoSecondary
OUT=/weekly/parfenov/Soft/Centrality/OUT/${COMMIT}/${JOB_ID}
OUT_FILE=$OUT/file
OUT_ROOT=$OUT_FILE/root
OUT_PDF=$OUT_FILE/pdf
OUT_LOG=$OUT/log
mkdir -p $OUT_ROOT/glauber_qa
mkdir -p $OUT_ROOT/fit
mkdir -p $OUT_ROOT/b_test
#mkdir -p $OUT_PDF
mkdir -p $OUT_LOG
LOG=$OUT_LOG/JOB_${JOB_ID}_${TASK_ID}.log
echo "Job output." &> $LOG
echo "-------------------------------" &>> $LOG
PARAMETER_LIST=$MAIN_DIR/parameter.list
# Read N-th line from the parameter list. N = Number of the job in job array
PARAMETER_LINE=`sed "${TASK_ID}q;d" ${PARAMETER_LIST}`
# PARAMETER_LINE=`sed "250q;d" ${PARAMETER_LIST}`
# Parsing parameters from the parameter list
PARAMETER_f0=${PARAMETER_LINE%%:*[0-9]}
TAIL=${PARAMETER_LINE#[0-9]*:}
PARAMETER_f1=${TAIL%%:*[0-9]}
TAIL=${TAIL#[0-9]*:}
PARAMETER_k0=${TAIL%%:*[0-9]}
TAIL=${TAIL#[0-9]*:}
PARAMETER_k1=${TAIL%%:*[0-9]}
TAIL=${TAIL#[0-9]*:}
PARAMETER_mult0=${TAIL%%:*[0-9]}
TAIL=${TAIL#[0-9]*:}
PARAMETER_mult1=${TAIL%%:*[0-9]}
TAIL=${TAIL#[0-9]*:}
echo "Parameter list: ${PARAMETER_LINE}" &>>$LOG
echo "Reading:" &>>$LOG
echo " " &>>$LOG
echo "f0 = ${PARAMETER_f0}" &>>$LOG
echo "f1 = ${PARAMETER_f1}" &>>$LOG
echo "k0 = ${PARAMETER_k0}" &>>$LOG
echo "k1 = ${PARAMETER_k1}" &>>$LOG
echo "mult_min = ${PARAMETER_mult0}" &>>$LOG
echo "mult_max = ${PARAMETER_mult1}" &>>$LOG
echo "-------------------------------" &>> $LOG
# Creating tmp directory & log file
TMPALL=/weekly/parfenov/TMP/
TMP=$TMPALL/TMP_${JOB_ID}_${TASK_ID}
mkdir -p $TMP
# Copy config file to TMP directory
cp ${MAIN_DIR}/config.txt.template ${TMP}/config.txt
# Replacing template fit params with specific variable values
sed -e "s|fminfmin|${PARAMETER_f0}|" -i ${TMP}/config.txt
sed -e "s|fmaxfmax|${PARAMETER_f1}|" -i ${TMP}/config.txt
sed -e "s|kminkmin|${PARAMETER_k0}|" -i ${TMP}/config.txt
sed -e "s|kmaxkmax|${PARAMETER_k1}|" -i ${TMP}/config.txt
sed -e "s|multminmultmin|${PARAMETER_mult0}|" -i ${TMP}/config.txt
sed -e "s|multmaxmultmax|${PARAMETER_mult1}|" -i ${TMP}/config.txt
cat ${TMP}/config.txt &>>$LOG
echo "-------------------------------" &>> $LOG
# Sourcing ROOT
source /weekly/parfenov/Soft/MPDROOT/mpdroot_140920/build/config.sh
# Where is CentralityFramework stored
CENTRALITY_FRAMEWORK_DIR=/weekly/parfenov/Soft/Centrality/Framework/centrality-master
# Compile binaries
cd ${TMP}/
cmake $CENTRALITY_FRAMEWORK_DIR/ &>>$LOG
make &>>$LOG
echo "-------------------------------" &>> $LOG
# Do main program
./glauber ./config.txt &>>$LOG
echo "-------------------------------" &>> $LOG
# Copy output files into output directory
mv ${TMP}/glauber_qa.root $OUT_ROOT/glauber_qa/glauber_qa_${JOB_ID}_${TASK_ID}.root
mv ${TMP}/fit*.root $OUT_ROOT/fit/fit_${JOB_ID}_${TASK_ID}.root
mv ${TMP}/b_test.root $OUT_ROOT/b_test/b_test_${JOB_ID}_${TASK_ID}.root
cd $START_DIR
# Delete temporary directory
rm -rf ${TMP}
echo "Done!" &>> $LOG
| true
|
fefaab3cc94a8e83594ec3eb9aa9b4434c12d965
|
Shell
|
ofayans/freeipa_upstream_builder
|
/step1.sh
|
UTF-8
| 698
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# FreeIPA requires:
BUILD_DEPS=$(grep "^BuildRequires" freeipa.spec.in | awk '{ print $2 }' | grep -v "^/")
DEPS=$(grep "^Requires" freeipa.spec.in | awk '!/%/ { print $2 }' | grep -v "^/")
# Fedora min build env
# (https://fedoraproject.org/wiki/Packaging:Guidelines#Exceptions_2)
DEPS="$DEPS bash bzip2 coreutils cpio diffutils fedora-release"
DEPS="$DEPS findutils gawk gcc gcc-c++ grep gzip info make patch"
DEPS="$DEPS redhat-rpm-config rpm-build sed shadow-utils tar unzip"
DEPS="$DEPS util-linux which xz python-gssapi"
# install all the RPMs
dnf install -y rpm-build python-kdcproxy $BUILD_DEPS $DEPS --enablerepo updates-testing
dnf distro-sync --enablerepo updates-testing -y
| true
|
52445872cb1b4dcbb26fb3cbbfb81c621e862284
|
Shell
|
dwisniewski1993/Text-processing-labc
|
/TO DONE PTX/PTX1 Wiśniewski Dominik - RUBY/demo
|
UTF-8
| 1,032
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
echo 'Wykonał: Dominik Wiśniewski 396455'
echo 'Zadanie za: 3-4pkt'
echo 'Press any ENTER to continue'
read a
echo 'Zadanie polega na wyciągnięciu z wikipedi konkretnych informacji'
echo 'W tym wypadku są to informacje o noblistach'
echo 'Program potrzebuje imienia bądz nazwiska noblisty i jakiegoś słowa kluczowego'
echo 'Press any ENTER to continue'
read a
echo 'Przykład działania dla słów: Curie i Born'
echo './App.rb Curie Born'
echo 'Press any ENTER to continue'
read a
ruby App.rb Curie Born
echo 'Uzyskaliśmy informacje o urodzeniu noblistów o nazwisku Curie'
echo 'Press any ENTER to continue'
read a
echo 'Przykład działania dla słów: Einstein i Died'
echo './App.rb Einstein Died'
echo 'Press any ENTER to continue'
read a
ruby App.rb Einstein Died
echo 'Uzyskaliśmy informacje o śmierci Einsteina'
echo 'Press any ENTER to continue'
read a
echo 'Instrukcja ogólna: ./App.rb [imie/nazwisko] [słowo_kluczowe]'
echo 'Oba parametry muszą być podane'
echo 'Press any ENTER to continue'
read a
| true
|
d81b8ab5d2b378b2376295d59476fc3b823485c5
|
Shell
|
DanielCuSanchez/programacion-bash
|
/Ejemplo_And.sh
|
UTF-8
| 292
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/ksh
# Recibe un argumento y valida que es numero este [1...9]
# Validar el numero de argumentos
if [ ! \( $# -eq 1 \) ]
then
echo "ERROR\a"
else
# Condicion compuesta por un AND
if [ $1 -ge 1 -a $1 -lt 10 ]
then
echo "Correcto "
else
echo "ERROR\a"
fi
fi
| true
|
85ad1a816bd8169f8b4b88102e26e82184f6fe5f
|
Shell
|
pombredanne/bin
|
/goto-server
|
UTF-8
| 2,006
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# go - quick connection script
. lib.bash || exit
do_sshfs() {
local src=$1 mtpt=$2
mkdir -p "$mtpt"
mountpoint -q "$mtpt" ||
sshfs -o idmap=none "$src" "$mtpt"
echo "Mounted sshfs#$src on $mtpt"
}
do_rdp() {
local host=$1
debug "using RDP to $host"
mstsc "$host" "${@:2}"
debug "spawned 'mstsc...'"
}
do_ssh() {
debug "using SSH to $1"
exec ssh "$@"
}
cd ~
case ${0##*/} in
go|goto-server)
;;
*)
set -- "${0##*/}" "$@"
;;
esac
case $1 in
snow)
host="snow"
if [[ $2 == "fs" ]]; then
do_sshfs "$host:/" "$XDG_RUNTIME_DIR/fs/snow"
elif [[ $2 == "suspend" || $2 == "hibernate" ]]; then
do_ssh "$host" -oControlPersist=no \
-oServerAliveInterval=1 \
-oServerAliveCountMax=5 \
"$2" &
elif [[ $2 == "rdp" || (! $2 && ! -t 0) ]]; then
do_rdp "$host" -R
else
do_ssh "$host" "${@:2}"
fi
;;
ukweb)
host="193.219.181.209"
if [[ $2 == -a ]]; then
user='UTENOS-KOLEGIJA\Administrator'
else
user='UTENOS-KOLEGIJA\Mantas'
fi; export user
do_rdp "$host:1969" -F
;;
ukmoodle)
host="193.219.181.221"
if [[ $2 == -a ]]; then
user='UTENOS-KOLEGIJA\Administrator1'
else
user='User'
fi; export user
do_rdp "$host" -F
;;
krantas)
host="193.219.181.210"
if [[ $2 == -A ]]; then
user='KRANTAS\Administrator'
else
user='Mantas'
fi; export user
do_rdp "$host" -F
;;
ukftp)
if [[ $2 == -A ]]; then
user='Administrator'
else
user='User'
fi; export user
do_rdp "193.219.181.197" -R
;;
"")
echo "Usage: $progname <host>" >&2
die "missing host name"
;;
*)
name="$1"
if [[ $2 ]]; then
name+="-$2"
fi
ip= rest=
while read -r _name _ip _rest; do
if [[ $name == "$_name" ]]; then
ip=$_ip
rest=$_rest
fi
done < ~/lib/servers.txt
case $ip in
imap://*|imaps://*)
exec mutt -f "$ip" $rest;;
rdp://*)
do_rdp "${ip#rdp://}" $rest;;
*://*)
die "unknown protocol '${ip%%//*}//'";;
"")
die "unknown host '$1'";;
*)
do_rdp "$ip" $rest;;
esac
;;
esac
# vim: sts=8
| true
|
7d50aaa8b9da4f13d7bc7a4b5bf20dd09545c5aa
|
Shell
|
mrled/psyops
|
/progfiguration_blacksite/progfiguration_blacksite/roles/k3s/progfiguration-k3s.sh.temple
|
UTF-8
| 10,017
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/sh
set -eu
#### Configured in the progfiguration role
# The interface that will have the VIP added to it
# This must be the same on all nodes for kube-vip!
# Which is one reason we rename the primary NIC to psy0 earlier in the boot process.
interface={$}k3s_interface
# The address to use for the floating VIP
vipaddress={$}k3s_vipaddress
# The interface for the secondary floating VIP, used for gitea
interface2={$}k3s_interface2
# The address to use for the secondary floating VIP
vipaddress2={$}k3s_vipaddress2
#### Constants
kubevip2_name=kube-vip2
kubevip2_prometheus_port=2212
usage() {
cat <<ENDUSAGE
Usage: $0 [-h] <ACTION> [ARGS]
progfiguration k3s cluster management
To configure a cluster, first run '$0 init' on the first node,
get the token with '$0 token' on that node,
and then run '$0 join <TOKEN>' on the other nodes.
ARGUMENTS
-h | --help: Print help and exit
ACTION: The action to perform. Valid values are:
Cluster membership options:
init: Initialize a new cluster after configuring kube-vip
join <TOKEN>: Join an existing cluster
Requires a token from the cluster init node
leave: Leave the cluster -- not clean, should reboot after
WARNING: THIS WILL WIPE ALL KUBERNETES DATA ON THIS NODE, INCLUDING CEPH/LONGHORN DATA!
Information gathering options:
token: Get the token for joining the cluster
kubeconfig: Get a kubeconfig file for the cluster
Debugging options:
kubevip-only: Configure kube-vip, without initializing a new cluster (for debugging)
init-only: Initialize a new cluster, without configuring kube-vip (for debugging)
ENDUSAGE
}
exit_error_usage() {
echo "$1"
echo
usage
exit 1
}
# Create the kube-vip manifest
# Adapted from basic kube-vip installation documentation
action_kubevip_init() {
# Working directory for creating the kube-vip manifest
tmpinit=/var/lib/rancher/k3s/server/tmpinit
mkdir -p "$tmpinit"
# Get the RBAC manifest, which will be the start of the kube-vip manifest
curl https://kube-vip.io/manifests/rbac.yaml -o "$tmpinit/kube-vip.yaml"
# Set an alias for easier use
kvversion=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name")
ctr images pull ghcr.io/kube-vip/kube-vip:$kvversion
kubevip="ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$kvversion vip /kube-vip"
# Now you can run like '$kubevip -h' to see help
# Generate the DaemonSet manifest.
# This can be combined with the RBAC manifest by separating them with `\n---\n`,
# the standard YAML document separator.
# Add the daemonset for the main VIP to the file
printf "\n---\n" >> "$tmpinit/kube-vip.yaml"
$kubevip manifest daemonset \
--interface $interface \
--address $vipaddress \
--inCluster \
--taint \
--controlplane \
--services \
--arp \
--leaderElection \
>> "$tmpinit/kube-vip.yaml"
# Add the daemonset for the secondary VIP to the file
# We have to modify the output to use a different DaemonSet name and Prometheus port
printf "\n---\n" >> "$tmpinit/kube-vip.yaml"
$kubevip manifest daemonset \
--interface psy0 \
--address 192.168.1.201 \
--inCluster \
--taint \
--controlplane \
--services \
--arp \
--leaderElection |
sed "s/value: :2112/value: :$kubevip2_prometheus_port/g" |
sed "s/name: kube-vip-ds\$/name: $kubevip2_name/g" \
>> "$tmpinit/kube-vip.yaml"
# Copy it to the manifests folder (only needs to happen on the first cluster member)
mkdir -p /var/lib/rancher/k3s/server/manifests
cp "$tmpinit/kube-vip.yaml" /var/lib/rancher/k3s/server/manifests/
}
# Initialize a new cluster
# Adapted from basic k3s installation documentation
#
# The docs have you launch with "k3s server --cluster-init",
# and then wait for an undetermined amount of time, kill it,
# and then launch it again without --cluster-init.
#
# Instead, we modify the config file to pass the --cluster-init option before first launch,
# then change it back to the default after launch while k3s is still running.
# This way, 'rc-service k3s ...' commands all work as expected.
action_init() {
# Set options for starting k3s and creating the cluster
cat >/etc/conf.d/k3s <<ENDK3S
export PATH="/usr/libexec/cni/:\$PATH"
K3S_EXEC="server"
K3S_OPTS="--cluster-init"
ENDK3S
# Start k3s via OpenRC
rc-service k3s start
# Set default options for k3s, just the default k33s file from the Alpine package
cat >/etc/conf.d/k3s <<ENDK3S
export PATH="/usr/libexec/cni/:\$PATH"
K3S_EXEC="server"
K3S_OPTS=""
ENDK3S
}
# Retrieve the token for joining the cluster
# Cluster must have already been initialized
action_gettoken() {
tokfile=/var/lib/rancher/k3s/server/token
if test ! -f "$tokfile"; then
exit_error_usage "Cluster has not been initialized on this node yet"
fi
cat /var/lib/rancher/k3s/server/node-token
}
# Retrieve a kubeconfig file for the cluster.
# Modify it to use the VIP address.
action_kubeconfig() {
sed "s/127.0.0.1/$vipaddress/g" /etc/rancher/k3s/k3s.yaml
}
# Join an existing cluster.
# Adapted from basic k3s installation documentation and Alpine OpenRC scripts.
#
# THe docs have you launch with "k3s server --server https:....",
# and then wait for an undetermined amount of time, kill it,
# and launch it from rc-service which does so without the --server option.
#
# Instead, we modify the config file to pass the --server option and K3S_TOKEN var before first launch,
# then change it back to the default after launch while k3s is still running.
# This way, 'rc-service k3s ...' commands all work as expected.
action_join() {
k3s_token="$1"
# Set options for joining the cluster
# This is only necessary the first time
cat >/etc/conf.d/k3s <<ENDK3S
export K3S_TOKEN=$k3s_token
export PATH="/usr/libexec/cni/:\$PATH"
K3S_EXEC="server"
K3S_OPTS="--server https://$vipaddress:6443"
ENDK3S
# Start k3s via OpenRC
rc-service k3s start
# Set default options for k3s, removing the joining options from earlier
# This is just the default conf.d/k3s file from the Alpine package
cat >/etc/conf.d/k3s <<ENDK3S
export PATH="/usr/libexec/cni/:\$PATH"
K3S_EXEC="server"
K3S_OPTS=""
ENDK3S
}
# Leave the cluster
# We use the Alpine k3s package, rather than curlbash’ing the installer.
# This means that we don’t get a nice k3s-uninstall.sh script for us.
# (We do have a k3s-killall.sh which we have adapted from upstream.)
# See upstream code: <https://github.com/k3s-io/k3s/blob/03885fc38532afcb944c892121ffe96b201fc020/install.sh#L407-L449>
#
# This is tied to other psyopsOS conventions and what our cluster runs.
# For instance, the cluster runs Longhorn and Ceph,
# and we have to clean those up specially.
#
# After this, it is safest to reboot.
# If you do this, make sure to set k3s_start to False in the progfiguration role.
action_leave() {
# Stop all containers and the k3s service itself
# Note: This does unmount /var/lib/rancher/k3s; we have to remount it below
k3s-killall.sh
rc-service containerd stop
rc-service iscsid stop
set +e
# This is also necessary for some reason?
killall containerd-shim-runc-v2 traefik longhorn longhorn-manager kube-vip runsvdir pause sleep tini livenessprobe csi-node-driver-registrar cephcsi entry
# Make sure all relevant longhorn/ceph processes are killed too - might be some you have to clean up manually
manuals=""
if ps aux | grep -v grep | grep -i longhorn; then
manuals="$manuals longhorn"
elif ps aux | grep -v grep | grep -i ceph; then
manuals="$manuals ceph"
fi
# Unmount a bunch of bullshit that Docker mounts
umount /run/netns/* /var/lib/kubelet/pods/*/volumes/*/*
# Remove old cluster config/state.
# Do NOT remove /var/lib/rancher/k3s because that's an overlay mountpoint on psyopsOS
rm -rf /etc/rancher/k3s /psyopsos-data/overlays/var-lib-rancher-k3s/*
# Recommended in the kube-vip k3s documentation https://kube-vip.io/docs/usage/k3s/
ip addr flush dev lo
ip addr add 127.0.0.1/8 dev lo
# Make sure that /var/lib/rancher/k3s is mounted
mount -o bind /psyopsos-data/overlays/var-lib-rancher-k3s /var/lib/rancher/k3s
# Remove k3s configuration file
rm -f /etc/rancher/k3s/k3s.yaml
set -e
if test -n "$manuals"; then
echo "WARNING: The following systems still have processes running:"
echo "$manuals"
echo "You must kill them manually."
exit 1
fi
}
# Process script arguments
action=
token=
while test $# -gt 0; do
case "$1" in
-h | --help ) usage; exit 0;;
*)
if test -z "$action"; then
action="$1"
elif test "$action" = "join"; then
if test -z "$token"; then
token="$1"
else
exit_error_usage "Unknown argument: $1"
fi
else
exit_error_usage "Unknown argument: $1"
fi
shift
;;
esac
done
if test -z "$action"; then
exit_error_usage "No action specified"
elif test "$action" = "join" -a -z "$token"; then
exit_error_usage "No token specified"
fi
if test "$action" = "kubevip-only"; then
action_kubevip_init
elif test "$action" = "init-only"; then
action_init
elif test "$action" = "init"; then
action_kubevip_init
action_init
elif test "$action" = "token"; then
action_gettoken
elif test "$action" = "kubeconfig"; then
action_kubeconfig
elif test "$action" = "join"; then
action_join "$token"
elif test "$action" = "leave"; then
action_leave
else
exit_error_usage "Unknown action: $action"
fi
| true
|
cb3010411d448afde6e263e3726657ab75710884
|
Shell
|
rojoka/GRETAP_mesh_Backbone
|
/gre_backbone.sh
|
UTF-8
| 1,738
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
# Server name ending must be a single digit number
communityname="troisdorf"
server="troisdorf1 troisdorf2 troisdorf3 troisdorf4 troisdorf5 troisdorf6 troisdorf9"
domain="freifunk-troisdorf.de"
mtu=1500
# community MAC address, without the last Byte (:)!
communitymacaddress="a2:8c:ae:6f:f6"
# Network part of the network, without the trailing dot
communitynetwork="10.188"
# IPv6 network
communitynetworkv6="fda0:747e:ab29:7405:255::"
# Third octet from the server range
octet3rd="255"
# CIDR muss /16 sein
localserver=$(/bin/hostname)
for i in $server; do
(
for j in $server; do
if [ $i != $j ]; then
if [ $i = $(/bin/hostname) ]; then
/sbin/ip link add $j type gretap local $(/bin/hostname -I | /usr/bin/cut -f1 -d' ') remote $(/usr/bin/dig +short $j.$domain) dev eth0 nopmtudisc
/sbin/ip link set dev $j mtu $mtu
/sbin/ip link set address $communitymacaddress:${i#$communityname}${j#$communityname} dev $j
/sbin/ip link set $j up
/usr/sbin/batctl if add $j
fi
fi
done
)
done
# configure bat0
/sbin/ip link set address $communitymacaddress$:0${localserver#$communityname} dev bat0
/sbin/ip link set up dev bat0
/sbin/ip addr add $communitynetwork.$octet3rd.${localserver#$communityname}/16 broadcast $communitynetwork.255.255 dev bat0
/sbin/ip -6 addr add fda0:747e:ab29:7405:255::${localserver#$communityname}/64 dev bat0
/usr/local/sbin/alfred -i bat0 > /dev/null 2>&1 &
/usr/sbin/batadv-vis -i bat0 -s > /dev/null 2>&1 &
/usr/sbin/service bind9 restart
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.