blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e1997b250d092fcc70eb46b9f03b7004baf3b648
|
Shell
|
Volibra/cardano-testnet-devops
|
/scripts/install-bootstrap.sh
|
UTF-8
| 913
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
export DEBIAN_FRONTEND="noninteractive"
sudo apt-get install && sudo apt-get install -y tzdata curl
curl -L --output /tmp/go1.17.linux-amd64.tar.gz https://golang.org/dl/go1.17.linux-amd64.tar.gz
sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf /tmp/go1.17.linux-amd64.tar.gz
rm -f /tmp/go1.17.linux-amd64.tar.gz
sudo ln -s /usr/local/go/bin/go /usr/local/bin/go
mkdir -p "${HOME}/bin"
cd "/tmp/bootstrap"
go get ./...
go build -o "${HOME}/bin/bootstrap"
cat <<EOF >> "${HOME}/.bash_profile"
# set CARDANO_NODE_SOCKET_PATH to cardano-cli
#
export CARDANO_NODE_SOCKET_PATH=\${HOME}/alonzo-testnet/node-bft1/node.sock
# add ${HOME}/bin to path
#
export PATH="\${PATH}:\${HOME}/bin"
# aliases
#
alias ls="ls -sF --color"
EOF
if [ -f /tmp/restart-testnet.sh ] ; then
cp /tmp/restart-testnet.sh "${HOME}/bin/restart-testnet.sh"
chmod +x "${HOME}/bin/restart-testnet.sh"
fi
| true
|
1262b20fb7b3f128920a21a1ff42251aa166a2a0
|
Shell
|
sanjayrajputcse/raspberry-pi-server
|
/scripts/cron.sh
|
UTF-8
| 229
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
date
out=$(ps aux | grep "[r]aspberry-pi-server")
if [[ -z ${out} ]]
then
echo "starting raspberry-pi-server..."
/home/sanjay.rajput/run.sh
else
echo "raspberry-pi-server already running!!!"
fi
| true
|
509b54a5917eb5f7981140bd492a7e5e831520fd
|
Shell
|
theGeoffrey/infrastructure
|
/bin/deploy.sh
|
UTF-8
| 315
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
export branch="deploy-$(date +%s)"
export current=`git rev-parse HEAD`
git checkout -b $branch
npm install
mv geoffrey/ui/dist ui-dist
git add -f ui-dist/* promo/dist/*
git commit -m"Add compiled UIs for deploy"
git push deploy $branch:master
git tag -f deployed $current
git push origin --tags
| true
|
4b5acb6f3484464df51766942c6be6a0ddc22692
|
Shell
|
TheEarnest/EnKF_Script
|
/upgrade_st_archive_fanf.sh
|
UTF-8
| 197
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh -e
PREFIX=NorCPM_F19_tn21_mem
CASEROOT=/home/nersc/ywang/NorESM/cases
for MEM in `seq -w 01 30`
do
cp -f ./st_archive_fanf.sh $CASEROOT/${PREFIX}${MEM}/Tools/st_archive.sh
done
| true
|
7e21134b85bce1b27edcaccbcf232b9b36c65f73
|
Shell
|
erlang/otp
|
/lib/megaco/examples/meas/mstone1.sh.skel.src
|
UTF-8
| 7,181
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# %CopyrightBegin%
#
# Copyright Ericsson AB 2007-2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# %CopyrightEnd%
# Skeleton for a script intended to run the mstone1(N)
# performance test.
#
# Get the name of the program
program=`echo $0 | sed 's#.*/##g'`
usage="\
Usage: $program [options]
This shell script is used to run the mstone 1 (factor) performance
test. It is not intended to test the megaco stack but instead to
give a \"performance value\" of the host on which it is run.
Options:
-help display this help and exit.
-mp <message package> message package to use for test
default is time_test
-h <num> default process heap size
-a <num> async thread pool size (default is 0)
-t <run time> The runtime of the test
Format: <value>[unit], where unit can be:
s: seconds
m: minutes (default)
h: hours
If no unit is provided, minutes is assumed.
defaults to 10 minutes
-f <factor> normally the test is run with one process per codec
(= 12) (factor 1), one for each codec config. The test
can however be run with other factors, e.g.
factor 10 means that 10 processes will be started
for each megaco codec config.
The options -s and -f cannot both be present.
-s <num sched> normally the test is run with a fixed factor,
but if this option is given, the number of
schedulers is fixed (to the value set by this option)
and the factor is the variable.
The options -s and -f cannot both be present.
-d <drv-mode> driver mode for the test:
std - all codec config(s) will be used
flex - only the text codec config(s) utilizing the
flex scanner will be used
nd - only codec config(s) without drivers will be used
od - only codec config(s) with drivers will be used
-sbt <bind-type> Set scheduler bind type. See erl man page for more info.
tnnps - Thread no node processor spread (default)
u - Unbound
ns - No spread
ts - Thread spread
ps - Processor spread
s - Spread
nnts - No node thread spread
nnps - No node processor spread
-- everything after this is just passed on to erl.
"
ERL_HOME=<path to otp top dir>
MEGACO_HOME=$ERL_HOME/lib/erlang/lib/megaco-%VSN%
MEAS_HOME=$MEGACO_HOME/examples/meas
PATH=$ERL_HOME/bin:$PATH
MODULE=megaco_codec_mstone1
STARTF="start"
FACTOR=""
MSG_PACK=time_test
SBT="+sbt tnnps"
RT=10
while test $# != 0; do
# echo "DBG: Value = $1"
case $1 in
-help)
echo "$usage" ;
exit 0;;
-mp)
MSG_PACK="$2";
shift ; shift ;;
-h)
PHS="+h $2";
shift ; shift ;;
-a)
ATP="+A $2";
shift ; shift ;;
-t)
RT="$2";
shift ; shift ;;
-d)
case $2 in
std)
STARTF="start";
shift ; shift ;;
flex)
STARTF="start_flex";
shift ; shift ;;
nd)
STARTF="start_no_drv";
shift ; shift ;;
od)
STARTF="start_only_drv";
shift ; shift ;;
*)
echo "unknown driver mode: $2";
echo "$usage" ;
exit 0
esac;;
-sbt)
case $2 in
tnnps|u|ns|ts|ps|s|nnts|nnps)
SBT="+sbt $2";
shift ; shift ;;
*)
echo "unknown scheduler bind type: $2";
echo "$usage" ;
exit 0
esac;;
-f)
if [ "x$SCHED" != "x" ]; then
echo "option(s) -s and -f cannot both be given" ;
echo "$usage" ;
exit 0
fi
FACTOR="$2";
TYPE=factor;
shift ; shift ;;
-s)
if [ "x$FACTOR" != "x" ]; then
echo "option(s) -f and -s cannot both be given" ;
echo "$usage" ;
exit 0
fi
SCHED="$2";
TYPE=sched;
shift ; shift ;;
--)
shift ;
break;;
*)
echo "unknown option: $1";
echo "$usage" ;
exit 0
esac
done
if [ $TYPE = factor ]; then
MSTONE="-s $MODULE $STARTF $MSG_PACK $RT $FACTOR"
# SCHEDS="01 02 04"
# SCHEDS="01 02 04 08"
# SCHEDS="01 02 04 08 16"
# SCHEDS="01 02 04 08 16 32"
# SCHEDS="01 02 04 08 16 32 64"
SCHEDS="01 02 03 04 05 06 07 08"
for i in `echo $SCHEDS`; do
case $i in
01)
SMP_INFO="SMP: 1 scheduler"
SMP_OPTS="-smp +S $i"
LOG="mstone1-f$FACTOR-s$i.log"
;;
*)
SMP_INFO="SMP: $i schedulers"
SMP_OPTS="-smp +S $i"
LOG="mstone1-f$FACTOR-s$i.log"
;;
esac
echo ""
echo "---------------------------------------------"
echo "$SMP_INFO"
echo ""
ERL="erl \
-noshell \
$SBT \
$PHS \
$ATP \
$SMP_OPTS \
-pa $MEAS_HOME \
$MSTONE \
$* \
-s init stop"
echo $ERL
$ERL | tee $LOG
done
elif [ $TYPE = sched ]; then
MSTONE="-s $MODULE $STARTF $MSG_PACK $RT"
# FACTORS="01 02 03 04"
# FACTORS="01 02 03 04 05 06 07 08 09 10"
FACTORS="01 02 04 08 16 32"
# FACTORS="001 010 100"
case $SCHED in
*)
SMP_OPTS="-smp +S $SCHED"
;;
esac
for i in `echo $FACTORS`; do
LOG="mstone1-s$SCHED-f$i.log"
echo ""
echo "---------------------------------------------"
echo "Factor $i"
echo ""
ERL="erl \
-noshell \
$SBT \
$PHS \
$ATP \
$SMP_OPTS \
-pa $MEAS_HOME \
$MSTONE $i \
$* \
-s init stop"
echo $ERL
$ERL | tee $LOG
done
else
echo "Either option -f or -s must be specified"
echo "$usage" ;
exit 0
fi
| true
|
0acaf267aada31f28f96c83dc2e3ad3554edca84
|
Shell
|
herryslin/learning_script_shell
|
/check_config.sh
|
UTF-8
| 1,379
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
config=$1
if [ $config = "config_lua" ];then
m=`/bin/hostname|cut -c 1-7`
n=`cat /opt/app/edge/etc/config.lua|grep myNode|awk '{print $3}'|cut -c 2-8`
if [ "$m"x = "$n"x ];then
echo 0;
else
echo 1;
fi
elif [ $config = "servconf_ini" ];then
m=`hostname`
n=`cat /etc/nginx/servconf.ini|grep hostname|awk '{print $3}'|sed 's/"//g'`
if [ "$m"x = "$n"x ];then
echo 0;
else
echo 1;
fi
elif [ $config = "mac_id" ];then
m=`hostname`
n=`cat /etc/nginx/servconf.ini|grep machine_id|awk '{print $3}'|sed 's/"//g'`
if [ "$m"x = "$n"x ];then
echo 0;
else
echo 1;
fi
elif [ $config = "use_kt" ];then
m=`cat /opt/app/edge/etc/config.lua|grep use_kt|grep true|wc -l`
if [ $m -eq 1 ];then
echo 0;
else
echo 1;
fi
elif [ $config = "lua_edge" ];then
echo `rpm -q lua-edge|awk -F. '{print $2}'|sed 's/-//g'`
elif [ $config = "oct_ver" ];then
echo `rpm -q yunfancdn|awk -F- '{print $2}'|sed 's/\.//g'`
elif [ $config = "nginx_ver" ];then
echo `rpm -q openresty|awk -F"el" '{print $1}'|awk -F- '{print $2$3}'|sed 's/\.//g'`
fi
| true
|
751762c9425d95fd756c7108e5bc345f74d1464f
|
Shell
|
deweysasser/docker-baseimage
|
/test/verify
|
UTF-8
| 427
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
# Purpose: verify the image does what it needs to
assert() {
desc="$1"; shift
if eval "$@" ; then
echo "$desc...PASS"
else
echo "$desc...FAIL"
problems=$((${problems:-0} + 1))
fi
}
# Give things a chance to start up
assert "Initialization happened" test -f /root/test-runonce
assert "Startup happened" test -f /root/test-startup
assert "System is running" test -f /root/test-run
exit $problems
| true
|
aa5e9f52cbf97a1d2458e95e2704b950f8885f60
|
Shell
|
irwanmohi/test
|
/final/cek-tr.sh
|
UTF-8
| 898
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
red='\e[1;31m'
green='\e[0;32m'
NC='\e[0m'
MYIP=$(wget -qO- icanhazip.com);
echo "Script By geo"
clear
data=( `cat /var/log/trojan.log | grep -w 'authenticated as' | awk '{print $7}' | sort | uniq`);
echo "-------------------------------";
echo "-----=[ Trojan User Login ]=-----";
echo "-------------------------------";
for akun in "${data[@]}"
do
data2=( `lsof -n | grep -i ESTABLISHED | grep trojan | awk '{print $9}' | cut -d':' -f2 | grep -w 445 | cut -d- -f2 | grep -v '>127.0.0.1' | sort | uniq | cut -d'>' -f2`);
echo -n > /tmp/iptrojan.txt
for ip in "${data2[@]}"
do
jum=$(cat /var/log/trojan.log | grep -w $akun | awk '{print $4}' | cut -d: -f1 | grep -w $ip | sort | uniq)
if [[ -z "$jum" ]]; then
echo > /dev/null
else
echo "$jum" > /tmp/iptrojan.txt
fi
done
jum2=$(cat /tmp/iptrojan.txt | nl)
echo "user : $akun";
echo "$jum2";
echo "-------------------------------"
done
| true
|
c53dd71e55a966c85012568ad31f3b8356a76d98
|
Shell
|
andrerocker/random
|
/c/sockets/server.sh
|
UTF-8
| 266
| 3.1875
| 3
|
[] |
no_license
|
#/bin/bash
system=$(uname -s)
port=1337
if [[ $system == "Linux" ]]
then
# Possivel GNU Netcat \o/
echo "starting with gnu params"
nc -v -l -p $port -q 1 < client.c
else
# Possivel BSD Netcat :(
echo "starting with bsd params"
nc -l $port < client.c
fi
| true
|
6e6df7da156a42549fa68af5b9afa2679e734400
|
Shell
|
cjieming/jmtools
|
/sam2fastq.sh_ip
|
UTF-8
| 576
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
###############################
## USAGE
###############################
## sam2fastq.sh <samplename> <BAMFILE>
## --requires picard 2.9 SamToFastq
## --now is hardcoded picard and scratch paths
SNAME=$1
BAM=$2d
java \
-Xmx64g \
-Djava.io.tmpdir=/gne/scratch/HumGenet/${SNAME}/tmp \
-XX:ParallelGCThreads=1 -jar /gne/apps/picard/picard-tools-2.9/picard.jar SamToFastq \
INPUT=${PWD}/${BAM} \
FASTQ=${PWD}/${SNAME}_1.fastq \
SECOND_END_FASTQ=${PWD}/${SNAME}_2.fastq \
UNPAIRED_FASTQ=${PWD}/${SNAME}_unpaired.fastq \
VALIDATION_STRINGENCY=SILENT VERBOSITY=WARNING
| true
|
1fd8a6333ad79e5abe1e642d593c368f1e338e05
|
Shell
|
stephenjwatkins/we-the-pixels
|
/deploy
|
UTF-8
| 838
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Running deployment script."
DATE=$(date +"%Y-%m-%dT%H-%M-%S")
BUNDLE_NAME="wtp.$DATE.tgz"
echo "Bundling WTP."
meteor bundle ./$BUNDLE_NAME
echo "SCPing to Staging."
scp -v ./$BUNDLE_NAME $1@stage.bitmo.co:/srv/bloojoo/wtp/releases
echo "Removing locally."
rm ./$BUNDLE_NAME
echo "Unbundling at Staging."
ssh -t $1@stage.bitmo.co "\
cd /srv/bloojoo/wtp; \
\
echo \"Removing old bundle directory.\"; \
rm -r /srv/bloojoo/wtp/bundle; \
\
echo \"Extracting new bundle.\"; \
tar -zxvf ./releases/$BUNDLE_NAME; \
\
echo \"Reinstalling fibers.\"; \
cd /srv/bloojoo/wtp/bundle/server; \
sudo npm uninstall fibers; \
sudo npm install fibers; \
sudo service wtp restart; \
\
echo \"Resetting directory permissions.\"; \
cd /srv/bloojoo/wtp; \
chown :bloojoo -R bundle; \
chmod g+w -R bundle; \
\
echo \"Successfully deployed to staging.\";"
| true
|
f88238aad9bd18c7e32673f2343af0044430a81b
|
Shell
|
moukle/moocs
|
/06 -- DeepLearning - GAN Specialization/course_1/papers/rename_pdfs.sh
|
UTF-8
| 270
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
for filename in ./*.pdf; do
proper_title=$(pdftitle -p $filename)
lower_title=${proper_title,,}
spaces_to_underscores=${lower_title// /_}
length=${#spaces_to_underscores}
if [[ $length > 0 ]] ; then
mv $filename $spaces_to_underscores.pdf
fi
done
| true
|
23983dfddf26712c99e852271258692b072ed4b4
|
Shell
|
roywong200200/dataSystem
|
/top.sh
|
UTF-8
| 402
| 2.6875
| 3
|
[] |
no_license
|
SERVERIP=$1
ID=$2
SERVERURL=$1":8002/postdata.html"
OUTPUT="$(top -bn2 | awk '/^top -/ { p=!p } { if (!p) print }' | grep 'Cpu(s)' | awk {'print $8'})"
echo "*********** This is Checking CPU utilization; Utilization : ${OUTPUT}"
# echo "${OUTPUT}"
wget -O- --post-data='[{"id":"'$ID'","value":"top","top":"'$OUTPUT'"}]' \
--header=Content-Type:application/json \
"$SERVERURL"
| true
|
e426db26e72f9e707ea44c65e5c929a8ba5a0373
|
Shell
|
seomin3/fabric
|
/ops.icehouse/install-horizon-server.sh
|
UTF-8
| 760
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
source ./include/fuction.sh
source ./include/controller.sh
###########################################
#
# horizon dashboard
#
##########################################
logg_big "horizon dashboard"
logg "install dashboard components"
install_pkgs 'memcached python-memcached mod_wsgi openstack-dashboard'
# set dashboard config file
hconf="/etc/openstack-dashboard/local_settings"
sed -i "s/ALLOWED_HOSTS = \['horizon.example.com', 'localhost'\]/ALLOWED_HOSTS=['*']/g" $hconf
sed -i "s/OPENSTACK_HOST = \"\"/OPENSTACK_HOST = \"$controller_ip_vnc\"/g" $hconf
#sed -i "s/OPENSTACK_KEYSTONE_DEFAULT_ROLE = \"_member_\"/OPENSTACK_KEYSTONE_DEFAULT_ROLE= \"_admin_\"/g" $hconf
logg "starting httpd"
service_handle "httpd"
service_handle "memcached"
| true
|
19b4d8ba5cbb4cb28cce3db2f6d4be11cb9e8355
|
Shell
|
mortenterhart/heroku-buildpack-wildfly
|
/test/compile_test.sh
|
UTF-8
| 2,950
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# shellcheck disable=SC1090
source "${BUILDPACK_HOME}/test/module_loader.sh"
import "wildfly"
import "assertions/capture_assertions"
import "assertions/file_assertions"
import "lib/deployment_helper"
### --- SETUP HOOKS ---
setUpOnce() {
echo "### setUpOnce ###"
TEST_CACHE="/tmp/test-cache"
mkdir -p "${TEST_CACHE}"
WILDFLY_ZIP="${TEST_CACHE}/wildfly-${DEFAULT_WILDFLY_VERSION}.zip"
if [ ! -f "${WILDFLY_ZIP}" ]; then
download_wildfly "${DEFAULT_WILDFLY_VERSION}" "${WILDFLY_ZIP}"
else
status "Using WildFly ${DEFAULT_WILDFLY_VERSION} from cache"
fi
echo "## END setUpOnce ###"
}
### --- HELPER FUNCTIONS ---
useCachedWildfly() {
cp "${WILDFLY_ZIP}" "${CACHE_DIR}"
JBOSS_HOME="${BUILD_DIR}/.jboss/wildfly-${DEFAULT_WILDFLY_VERSION}"
}
createJavaMock() {
JDK_DIR="${BUILD_DIR}/.jdk"
mkdir -p "${JDK_DIR}/bin"
cat <<'EOF' > "${JDK_DIR}/bin/java"
#!/usr/bin/env bash
# This is a Java mocking script to prevent
# the compile script from downloading and
# installing a complete JDK
exec /usr/bin/env java "$@"
EOF
}
addConfigVar() {
local variable="$1"
local value="$2"
echo "${value}" > "${ENV_DIR}/${variable}"
}
configureWildflyVersion() {
local version="$1"
echo "wildfly.version=${version}" > "${BUILD_DIR}/system.properties"
}
### --- TESTS ---
testCompileSuccess() {
useCachedWildfly
createDeployment
createJavaMock
compile
assertCapturedSuccess
# Check that a log was created
assertDirExists "${CACHE_DIR}/logs"
assertDirNotEmpty "${CACHE_DIR}/logs"
# Check that JVM common was installed
assertDirExists "/tmp/jvm-common"
# Check that a JDK was installed
assertDirExists "${BUILD_DIR}/.jdk"
assertFileExists "${BUILD_DIR}/.jdk/bin/java"
# Check that WildFly 16 was installed and deployed
assertDirExists "${JBOSS_HOME}"
assertGlobExpands "${JBOSS_HOME}/standalone/deployments/*.war"
}
testCompileDebug() {
useCachedWildfly
createDeployment
createJavaMock
addConfigVar "BUILDPACK_DEBUG" "true"
compile
assertCapturedSuccess
assertCaptured "DEBUG: buildDir=${BUILD_DIR}"
assertCaptured "DEBUG: cacheDir=${CACHE_DIR}"
}
testCompileWithoutTargetDir() {
useCachedWildfly
createJavaMock
# Don't create target/ directory
compile
assertCapturedError 1 "Could not deploy WAR files: Target directory does not exist"
}
testCompileWithoutDeployment() {
useCachedWildfly
createJavaMock
# Create target/ directory, but no WAR files
# for deployment
createTargetDirectory
compile
assertCapturedError 1 "No WAR files found in 'target' directory"
}
testCompileInvalidWildflyVersion() {
useCachedWildfly
createDeployment
createJavaMock
configureWildflyVersion "undefined"
compile
assertCapturedError 1 "Unsupported WildFly version: undefined"
}
| true
|
e57bcc74bd7d70aed94d4c233ab9b4c1e3bd64d6
|
Shell
|
jarvist/archer-bin
|
/launch_vasp.sh
|
UTF-8
| 6,783
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
#Usage: ./launch_vasp.sh [Directories with vasp files]
# i.e. ./launch_vasp.sh ./ to submit job from PWD
#Change log
#Automagic .com CX1 (Imperial College London HPC) job submitter. A WIP.
#JMF 2007-09
#Bash is a stranger in an open car...
#2012-04-27: Finally got around to adding single CPU defaults (for quick semi-empirical QC)
#2012-05-26: Forking this code to use for running NWCHEM .nw programs
#2012-06: Now runs multi-host over MPI for NWCHEM
#2012-06-18: Extended to restart NWCHEM jobs. Also, I actually learnt how to use 'getopts' as part of this.
#2013-11-14: Very initial VASP / Archer version - lots still hardcoded. Wraps
#up 4x input files into the shell via redirects. Assumed I'd have to do this to
#run in the temporary file space, but Archer seems to default to dropping you
#directly into your work folder (where the job is submitted from) so not
#necessary. How do TMP files work here? Mmm.
# Aha hah - ARCHER doesn't have any tmp directory, as the compute nodes have no disks...
# RUN AS ./executable.sh OTHERWISE OPTIONS WILL NOT BE GATHERED!
#Default Options
NCPUS=24
MEM=11800mb #Simon Burbidge correction - lots of nodes with 12GB physical memory, leaves no overhead for OS
QUEUE="" #default route
TIME="23:58:02" # Two minutes to midnight :^)
HOSTS=1 #Ah, the Host!
RESTART="NAH"
BUNDLE=0 # Wrap up VASP INPUT files into Shell script? Doesn't work on small buffer qsub / msubs!
GAMMA=0 # GAMMA only VASP? 50% faster, woo.
SUBMIT=0
#Switch based on login hostname & fill in defaults for different machines
HOST=` hostname `
# wmd-master --> NEON
# aquila-0 --> AQUILA
# eslogin004 --> ARCHER
case "${HOST}" in
wmd-master )
echo "Hello Neon! <(_ _)>"
BUNDLE=0
ACCOUNT= ;;
aquila* )
echo "Hello Aquila! <(_ _)>"
BUNDLE=0
ACCOUNT= ;;
eslogin* )
echo "Hello Archer! <(_ _)>"
CPUSPERHOST=24 #2x12 core per host on Archer
if (( "${USER}" == "jarvist" ))
then
ACCOUNT=e05-gener-wal
echo "Hello jarvist... Account code: ${ACCOUNT}"
else
ACCOUNT=pr1u1304
echo "Hello RISKY... Account code: ${ACCOUNT}"
fi ;;
*)
echo "I don't think we've met ${HOST}. Might be problems! (>_<)>"
esac
function USAGE()
{
cat << EOF
Jarv's VASP file runner.
USAGE: ./launch_vasp.sh [-nmqtsl] VASP_DIRECTORIES(S)
OPTIONS:
-n number of cpus (Deprecated! Will be overwritten by HOSTS*CPUSPERHOST)
-m amount of memory (Not presently used...)
-q queue
-a account
-t time
-h hosts
-s submit
-c cpusperhost (Nb: overwrite with lower value to underutilise CPUs + enjoy higher MEM/cpu.)
DEFAULTS (+ inspect for formatting):
NCPUS = ${NCPUS}
MEM = ${MEM}
QUEUE = ${QUEUE}
TIME = ${TIME}
EOF
}
while getopts ":n:m:q:t:h:c:a:srg?" Option
do
case $Option in
#OPTIONS
n ) NCPUS=$OPTARG;;
m ) MEM=$OPTARG;;
q ) QUEUE=$OPTARG;;
t ) TIME="${OPTARG}";;
h ) HOSTS="${OPTARG}";;
c ) CPUSPERHOST="${OPTARG}";;
a ) ACCOUNT="${OPTARG}";;
#FLAGS
s ) SUBMIT=1;;
r ) RESTART="YEAH";;
g ) GAMMA=1;;
? ) USAGE
exit 0;;
* ) echo ""
echo "Unimplemented option chosen."
USAGE # DEFAULT
esac
done
#Next line important! Auto calculation of NCPUS...
NCPUS=$(($HOSTS*$CPUSPERHOST))
# Choose random fun name for the submission script
# First ~Dozen are by me; the rest are taken from Iain M Bank's Culture Ship names:
# https://en.wikipedia.org/wiki/List_of_spacecraft_in_the_Culture_series
# Limit of 15 characters?
# Only 10 characters show in the standard qstat
# L E
NAME=` shuf -n1 << EOF
TimeWaster
MostlyZeros
NaN-eater
kAu-eater
kAu-waster
IAintConverging
IAintMisbehavin
Disconvergent
DiracFailedMe
FeynmanFailedMe
Dis-solver
99RedBalloons
Nameless
HackTheDyson
ProfessorFalken
ShalWePlayAGame
OnlyWinningMove
NiceGameOfChess
WhatDoesThisBut
Empiricist
MistakeNot
JustTesting
IThoughtHeWasWi
Helpless
HappyIdiot
SacrificialVict
WorkedLastTime
Perfidy
ProblemChild
RecentConvert
HonestMistake
SteelyGlint
NoFixedAbode
ZeroGravitas
NotInventedHear
NaughtyMonsters
GermaneRiposte
InOneEar
InappropRespons
KissThisThen
LightlySeared
NowWeTryItMyWay
TotalIntReflect
AFineDisregard
TeethingProblem
SmileTolerantly
uCallThisClean
EOF
`
#OK, now we should have our options
cat <<EOF
Well, here's what I understood / defaulted to:
HOSTS = ${HOSTS}
NCPUS = ${NCPUS}
MEM = ${MEM}
QUEUE = ${QUEUE}
TIME = ${TIME}
RESTART = ${RESTART}
BUNDLE = ${BUNDLE}
ACCOUNT = ${ACCOUNT}
NAME = ${NAME}
EOF
shift $(($OPTIND - 1))
# Decrements the argument pointer so it points to next argument.
# $1 now references the first non option item supplied on the command line
#+ if one exists.
PWD=` pwd `
for COM in $*
do
cd "${COM}"
FULLPATH=` pwd `
echo $FULLPATH
JOBFIL="${FULLPATH##*/}_RUN.sh" #Might want to change this in future, so made a variable
echo JOBFIL "${JOBFIL}"
cat > ${JOBFIL} << EOF
#!/bin/bash --login
#PBS -l walltime=${TIME}
#PBS -l select=${HOSTS}
#PBS -N ${NAME}
# Not really necessary on Archer...:ncpus=${NCPUS}:mem=${MEM}
#PBS -A ${ACCOUNT}
export OMP_NUM_THREADS=1
ulimit -s unlimited
module load vasp5
export PBS_O_WORKDIR=\$(readlink -f \$PBS_O_WORKDIR)
cd "\${PBS_O_WORKDIR}" #Escaped to be interpreted by the subshell running job
EOF
#Inline input files (for machines where storage is a pain to fine).
#HOWEVER, won't work with machines running a short-buffer QSUB + big Pseudo-potential files
if (( BUNDLE ))
then
for VASPFIL in INCAR POSCAR KPOINTS POTCAR
do
echo >> ${JOBFIL}
echo "cat > ${VASPFIL} << EOFd16cfc822b4325e67e7a0695518f0242" >> ${JOBFIL} #Random md5sum to assure (statistically!) likely uniqueness in long files
cat ${VASPFIL} >> ${JOBFIL}
echo "EOFd16cfc822b4325e67e7a0695518f0242" >> ${JOBFIL}
done
fi
VASP=~/bin/vasp_std
# Local copies as currently I'm stupidly added to group=vasp rather than required group=vasp5!
if (( GAMMA ))
then
VASP=~/bin/vasp_gam
fi
if (( NCL ))
then
VASP=~/bin/vasp_ncl
fi
echo "VASP BINARY: ${VASP}"
#OK, RUN AND CLEANUP TIME
cat >> ${JOBFIL} << EOF
# THUNDERBIRDS ARE GO!
aprun -n '$NCPUS' $VASP > vasp.out
#VASP vomits files everywhere, so lets bundle them up into a folder
#mkdir "${JOBFIL%.*}_out"
#mv *.* "${JOBFIL%.*}_out"
#cp -a "${JOBFIL%.*}_out" ${PWD}/${WD}/
#AND finish on a quote - this will be copied to the PBS stdout .o?????
echo "For us, there is only the trying. The rest is not our business. ~T.S.Eliot"
EOF
# echo "CAPTURED QSUB COMMAND: "
# cat ${JOBFIL}
if (( SUBMIT ))
then
echo "Submitting job... "
qsub -q "${QUEUE}" ${JOBFIL}
else
echo "Cowardly refusing to submit job."
fi
cd -
done
| true
|
0b57f47a3fb8f85dc55ea321684a75ba65db044f
|
Shell
|
n64decomp/007
|
/extract_cdata.sh
|
UTF-8
| 2,336
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
if ! command -v md5sum &> /dev/null
then
echo "md5sum could not be found"
exit 1
fi
if ! command -v dd &> /dev/null
then
echo "dd could not be found"
exit 1
fi
if ! command -v dd &> /dev/null
then
echo "gzip could not be found"
exit 1
fi
ROM_FILENAME=
OUT_FILENAME=
ROM_MD5=
MD5_US="70c525880240c1e838b8b1be35666c3b"
MD5_JP="1880da358f875c0740d4a6731e110109"
MD5_EU="cff69b70a8ad674a0efe5558765855c9"
# US
ROM_FILENAME="baserom.u.z64"
OUT_FILENAME="baserom.u.cdata"
if [ -f "${ROM_FILENAME}" ]; then
ROM_MD5=$(md5sum "${ROM_FILENAME}" | cut -d " " -f1)
if [ "${ROM_MD5}" = "${MD5_US}" ]; then
echo "extracting US compressed data segment"
dd bs=1 skip=137616 count=71760 if="${ROM_FILENAME}" of="${OUT_FILENAME}" status=none
GZ=gzip tools/1172inflate.sh "${OUT_FILENAME}" "${OUT_FILENAME}.bin"
rm "${OUT_FILENAME}"
else
echo "cannot extract compressed data segment from ${ROM_FILENAME}, md5=${ROM_MD5}, expected ${MD5_US}"
fi
else
echo "${ROM_FILENAME} not found"
fi
# JP
ROM_FILENAME="baserom.j.z64"
OUT_FILENAME="baserom.j.cdata"
if [ -f "${ROM_FILENAME}" ]; then
ROM_MD5=$(md5sum "${ROM_FILENAME}" | cut -d " " -f1)
if [ "${ROM_MD5}" = "${MD5_JP}" ]; then
echo "extracting JP compressed data segment"
dd bs=1 skip=137680 count=71752 if="${ROM_FILENAME}" of="${OUT_FILENAME}" status=none
GZ=gzip tools/1172inflate.sh "${OUT_FILENAME}" "${OUT_FILENAME}.bin"
rm "${OUT_FILENAME}"
else
echo "cannot extract compressed data segment from ${ROM_FILENAME}, md5=${ROM_MD5}, expected ${MD5_JP}"
fi
else
echo "${ROM_FILENAME} not found"
fi
# EU
ROM_FILENAME="baserom.e.z64"
OUT_FILENAME="baserom.e.cdata"
if [ -f "${ROM_FILENAME}" ]; then
ROM_MD5=$(md5sum "${ROM_FILENAME}" | cut -d " " -f1)
if [ "${ROM_MD5}" = "${MD5_EU}" ]; then
echo "extracting EU compressed data segment"
dd bs=1 skip=129104 count=67680 if="${ROM_FILENAME}" of="${OUT_FILENAME}" status=none
GZ=gzip tools/1172inflate.sh "${OUT_FILENAME}" "${OUT_FILENAME}.bin"
rm "${OUT_FILENAME}"
else
echo "cannot extract compressed data segment from ${ROM_FILENAME}, md5=${ROM_MD5}, expected ${MD5_EU}"
fi
else
echo "${ROM_FILENAME} not found"
fi
| true
|
f05f8b443130218eba8003b835eb30ad1a4ce59d
|
Shell
|
TeoProt/Operating-Systems-Projects
|
/Projects/Project_1/function_sort_picture.sh
|
UTF-8
| 351
| 3.421875
| 3
|
[] |
no_license
|
function sort_picture
cd $argv[1]
find . -name "*.jpg" | while read line;
echo $line
set folder (eval date -r $line +%Y/%B/%d)
echo $folder
if test -d $argv[2]/$folder
cp $line $argv[2]/$folder
else
mkdir -p $argv[2]/$folder
cp $line $argv[2]/$folder
end
end
end
sort_picture "path_to_be_sorted" "path_to_save_sorted_files"
| true
|
46d3eea156ccca9a9346267c44b443ee9561bc19
|
Shell
|
casanovg/itops-scripts
|
/virtual-machines/cloud/restore-mattermost-db.sh
|
UTF-8
| 2,544
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Restore Mattermost MySQL (MariaDB) databases and system files
# ..............................................................
# 2020-06-13 gustavo.casanova@gmail.com
SYSTEM_DIR="/opt/mattermost"
BACKUP_SYSTEM="/data/mattermost-data/backup-system"
BACKUP_DATABASE="/data/mattermost-data/backup-database"
BKP_USR="netbackup"
BKP_GRP="wheel"
clear
echo ""
echo " **********************************************"
echo " * WARNING! WARNING! WARNING! *"
echo " * .......................................... *"
echo " * If you continue the HTA Mattermost team *"
echo " * service could be severely damaged and *"
echo " * become useless. If you are not a system *"
echo " * administrator or you do not know what *"
echo " * this database restore implies, please *"
echo " * exit now! *"
echo " **********************************************"
echo ""
echo -en "Please enter \e[38;2;255;0;0mrestore\e[0m to continue, or any key to \e[38;2;0;255;0mexit\e[0m: "
read USER_INPUT
if [ "$USER_INPUT" = "restore" ] || [ "$USER_INPUT" = "Restore" ] || [ "$USER_INPUT" = "RESTORE" ]; then
echo ""
echo "Ok, Mattermost database restore starting ..."
else
echo ""
echo "Exiting ..."
echo ""
exit
fi
# Stop Mattermost services
echo ""
echo "Stopping Mattermost services ..."
sudo systemctl stop nginx.service
sudo systemctl stop mattermost.service
DB_SERVICE_STATUS="$(systemctl is-active mariadb)"
# Restore Mattermost database
if [ "$DB_SERVICE_STATUS" = "active" ]; then
echo ""
echo "Mattermost database restore ..."
# Restore updated mattermost
echo "Restoring mattermost ..."
mysql -h localhost -u root -p"$(~/itops-scripts/virtual-machines/cloud/vm-setup/get-mysql-root-pwd.sh)" mattermost < "$(ls $BACKUP_DATABASE/mattermost.sql.*)"
else
echo ""
echo "WARNING! MariaDB not running, unable to restore the Mattermost database!"
fi
## Restore Mattermost system files
#echo ""
#echo -en "Restore also Mattermost system files? (Y/N): "
#read USER_INPUT
#if [ "$USER_INPUT" = "Y" ] || [ "$USER_INPUT" = "y" ] || [ "$USER_INPUT" = "yes" ] || [ "$USER_INPUT" = "Yes" ] || [ "$USER_INPUT" = "YES" ]
#then
# echo ""
# echo "Restoring Mattermost system files ..."
# sudo rsync -r -a $BACKUP_SYSTEM/* /opt/.
# #--- NO ---sudo chown -R root:wheel /opt/seafile.my.cnf
#fi
# Start Mattermost services
echo ""
echo "Starting Mattermost services ..."
sudo systemctl start mattermost.service
sudo systemctl start nginx.service
echo ""
| true
|
5cc689f766315b28683754c56c81b3806a6fc85f
|
Shell
|
douxing/dotfiles
|
/scripts/bootstrap.zsh
|
UTF-8
| 543
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/zsh
#
# bootstrap using zsh
PWDP=$(pwd -P)
link_file () {
local src=$1 dst=$2
ln -s "$src" "$dst"
}
# for file extension and base
# @see https://stackoverflow.com/questions/965053/extract-filename-and-extension-in-bash
# @see https://www.gnu.org/software/bash/manual/html_node/Shell-Parameter-Expansion.html#Shell-Parameter-Expansion
symbolize_all () {
for src in $(find -H "$PWDP" -maxdepth 2 -name '*.symlink')
do
dst="$HOME/.$(basename "${src%.*}")"
link_file "$src" "$dst"
done
}
symbolize_all
| true
|
8f8d83d43f21f41ba693117bf78c35a830b039b0
|
Shell
|
FizzyGalacticus/cards-against-humanity
|
/.husky/pre-commit
|
UTF-8
| 340
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
. "$(dirname $0)/_/husky.sh"
# Prevent commits to master
branch="$(git rev-parse --abbrev-ref HEAD)"
if [ "$branch" = "master" ]; then
echo -e "I can't let you do that Starfox...\nCommits to master are strictly prohibited"
exit 1
fi
# Lint & add files back to staging
node node_modules/.bin/fizzygalacticus-pre-commit-lint
| true
|
a9f7959aa879d31bb3cdd8cb955fb197ec597db9
|
Shell
|
xCrypt0r/Baekjoon
|
/src/15/15098.sh
|
UTF-8
| 485
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
: '
15098. No Duplicates
์์ฑ์: xCrypt0r
์ธ์ด: Bash
์ฌ์ฉ ๋ฉ๋ชจ๋ฆฌ: 18,284 KB
์์ ์๊ฐ: 8 ms
ํด๊ฒฐ ๋ ์ง: 2021๋
11์ 13์ผ
'
main() {
local words uniq=()
read -a words
for (( i = 0; i < ${#words[@]}; i++ )); do
for (( j = 0; j < ${#uniq[@]}; j++ )); do
if [[ ${words[i]} == ${uniq[j]} ]]; then
echo 'no'
exit
fi
done
uniq+=(${words[i]})
done
echo 'yes'
}
main
| true
|
80410d9cb8af42825dc60fcfe177ecc95990103d
|
Shell
|
arangodb/spring-boot-starter
|
/docker/find_active_endpoint.sh
|
UTF-8
| 271
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
COORDINATORS=("172.17.0.1:8529" "172.17.0.1:8539" "172.17.0.1:8549")
for a in ${COORDINATORS[*]} ; do
if curl -u root:test --silent --fail "http://$a"; then
echo "$a"
exit 0
fi
done
echo "Could not find any active endpoint!"
exit 1
| true
|
ae64f552099e4034869b1341c75975b5f3a5ba95
|
Shell
|
MKamnikar/i3-blocks-blocklets
|
/study-timew
|
UTF-8
| 1,464
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/env bash
# Blocklet shows the amount of time I spent studying today.
# If there is ongoing study session currently active, it will
# display it's duration as well.
# Clicking behaviour: if study is active, cancel it, otherwise continue it.
# TODO: Add clicking commands. <13-08-20, Maks Kamnikar> #
# case $BLOCK_BUTTON in
# # Left click.
# 1) notify-send -t 5000 "Study-timew" "$(timew continue)"
# Firstly check if there was any studying done today at all.
if [[ "1" == "$(timew summary faks | wc -l)" ]]; then
# No studying has been done today yet.
# If it were, there'd be at least 7 lines
echo "ZERO"
exit 0
else
# Get todays study time.
# Keep only the relevant line and clean it of whitespace. We also dont need seconds.
# Clear unneeded 0.
STUDY_TIME="$(timew summary faks | tail -2 | head -1 | sed 's/\s//g' | cut -d: -f -2 | sed 's/:0/:/')"
# Time of ongoing study session.
ONGOING_TIME=""
# TODO: This matches any tracking, not just tag 'faks'. Correct it! <13-07-20, Maks Kamnikar> #
if [[ 1 -eq $(timew get dom.active) ]]; then
ONGOING_TIME="($(timew get dom.active.duration | cut -dT -f 2 | cut -dM -f 1 | sed 's/H/:/'))"
# Clean errors: if less than a minute it has to be cleaned up.
if [[ -n "$(echo $ONGOING_TIME | grep S)" ]]; then
ONGOING_TIME="(0)"
fi
fi
printf "%s " "$STUDY_TIME"
printf "%s" "$ONGOING_TIME"
echo
exit 0
fi
| true
|
6c700a0667fde4db268fc9da58fc34aa4a37ccc3
|
Shell
|
jjtainio/ifstat
|
/backend/ifstat.sh
|
UTF-8
| 916
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
#echo "output is"
#echo "timestamp|interface|tx kbps|rx kbps"
#echo ""
#
# run this script on system boot
#
mkdir -p /tmp/ifstat
declare -A R1
declare -A R2
declare -A T1
declare -A T2
while true
do
IFLIST=`ls -1 /sys/class/net/ |grep -v lo|grep -v eth0`
for if in $IFLIST; do
R1[$if]=`cat /sys/class/net/$if/statistics/rx_bytes`
T1[$if]=`cat /sys/class/net/$if/statistics/tx_bytes`
done
sleep 1
for if in $IFLIST; do
R2[$if]=`cat /sys/class/net/$if/statistics/rx_bytes`
T2[$if]=`cat /sys/class/net/$if/statistics/tx_bytes`
done
timestamp=`date +%s`
for if in $IFLIST; do
Tt2=${T2[$if]}
Tt1=${T1[$if]}
Rt2=${R2[$if]}
Rt1=${R1[$if]}
TBPS=`expr $Tt2 - $Tt1`
RBPS=`expr $Rt2 - $Rt1`
TKBPS=`expr $TBPS / 128`
RKBPS=`expr $RBPS / 128`
echo "$timestamp|$if|$TKBPS|$RKBPS" >> /tmp/ifstat/$if.stat
done
done
| true
|
d8d73c1da2d73cba2bdcd5af438d2c1783bf915d
|
Shell
|
ryoma-jp/tools
|
/signal_generator/batch/create_signal.sh
|
UTF-8
| 2,345
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# optional arguments for signal generator:
# -h, --help show this help message and exit
# --type TYPE ๆณขๅฝขใฎ็จฎ้ก
# * ไนฑๆฐ : random (default)
# * ๆญฃๅผฆๆณข : sin
# * ไฝๅผฆๆณข : cos
# * ไธ่งๆณข : triangle
# * ็ฉๅฝขๆณข : square
# * ้ธๆณข : sawtooth
# --csv CSV ๅบๅcsvใใกใคใซ
# --png PNG ๅบๅpngใใกใคใซ
# --freq FREQ ็ๆใใๆณขๅฝขใฎๅจๆณขๆฐ[Hz](default=1.0Hz)
# --fs FS ็ๆใใๆณขๅฝขใฎใตใณใใชใณใฐๅจๆณขๆฐ[Hz](default=8000Hz)
# --duration DURATION ็ๆใใๆณขๅฝขใฎ้ทใ[sec](default=1.0sec)
# ใในๅฎ็พฉ
exe="bin/signal_generator"
# ็ๆใใฟใผใณๅฎ็พฉ
freq=(10 30 50 70 100 150 200 250)
fs=8000
duration=5
# ๅบๅใใฃใฌใฏใใชไฝๆ
csv_dir='./csv'
png_dir='./png'
mkdir -p ${csv_dir}
mkdir -p ${png_dir}
# ๆณขๅฝข็ๆ
for _freq in ${freq[@]}
do
# ๆญฃๅผฆๆณข
echo "[processing] sin, ${_freq}Hz"
out_csv="${csv_dir}/sin-freq_${_freq}Hz.csv"
out_png="${png_dir}/sin-freq_${_freq}Hz.png"
${exe} --type sin --csv ${out_csv} --png ${out_png} --freq ${_freq} --fs ${fs} --duration ${duration}
# ไฝๅผฆๆณข
echo "[processing] cos, ${_freq}Hz"
out_csv="${csv_dir}/cos-freq_${_freq}Hz.csv"
out_png="${png_dir}/cos-freq_${_freq}Hz.png"
${exe} --type cos --csv ${out_csv} --png ${out_png} --freq ${_freq} --fs ${fs} --duration ${duration}
# ไธ่งๆณข
echo "[processing] triangle, ${_freq}Hz"
out_csv="${csv_dir}/triangle-freq_${_freq}Hz.csv"
out_png="${png_dir}/triangle-freq_${_freq}Hz.png"
${exe} --type triangle --csv ${out_csv} --png ${out_png} --freq ${_freq} --fs ${fs} --duration ${duration}
# ็ฉๅฝขๆณข
echo "[processing] square, ${_freq}Hz"
out_csv="${csv_dir}/square-freq_${_freq}Hz.csv"
out_png="${png_dir}/square-freq_${_freq}Hz.png"
${exe} --type square --csv ${out_csv} --png ${out_png} --freq ${_freq} --fs ${fs} --duration ${duration}
# ้ธๆณข
echo "[processing] sawtooth, ${_freq}Hz"
out_csv="${csv_dir}/sawtooth-freq_${_freq}Hz.csv"
out_png="${png_dir}/sawtooth-freq_${_freq}Hz.png"
${exe} --type sawtooth --csv ${out_csv} --png ${out_png} --freq ${_freq} --fs ${fs} --duration ${duration}
done
| true
|
78a0cbf12be1ca63e08ca1659545936d236864d2
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/reclass/PKGBUILD
|
UTF-8
| 1,293
| 2.671875
| 3
|
[] |
no_license
|
# Maintainer: Niels Abspoel <aboe76@gmail.com>
pkgname=reclass
pkgver=1.4.1
pkgrel=1
pkgdesc="hierarchical inventory backend for configuration management systems (salt, ansible, puppet)"
arch=('i686' 'x86_64')
url="http://reclass.pantsfullofunix.net/"
license=("PerlArtistic")
depends=('python2')
backup=('etc/reclass/reclass-config.yml')
makedepends=()
optdepends=()
options=()
conflicts=('reclass-git')
provides=('reclass')
source=("http://debian.c3sl.ufpr.br/debian/pool/main/r/${pkgname}/${pkgname}_${pkgver}.orig.tar.gz" 'reclass-config.yml' '.AURINFO')
md5sums=('bb8d46cd739ca76befb12ebc70e79b14' 'ad011bd9cf89152b9eaaeebc0862732f' 'SKIP')
build() {
cd "$srcdir/$pkgname-$pkgver"
python2 setup.py build
}
package() {
cd "$srcdir/$pkgname-$pkgver"
python2 setup.py install --root="$pkgdir/" --optimize=1
#create default dir
mkdir -p $pkgdir/etc/reclass/nodes
mkdir -p $pkgdir/etc/reclass/classes
install -Dm644 ${srcdir}/reclass-config.yml ${pkgdir}/etc/reclass/reclass-config.yml
#examples are usefull
mkdir -p $pkgdir/usr/share/doc/${pkgname}/examples
for i in ${srcdir}/${pkgname}-${pkgver}/examples/*; do
cp -R $i ${pkgdir}/usr/share/doc/${pkgname}/examples/"$(basename $i)"
chown -R root:root ${pkgdir}/usr/share/doc/${pkgname}/examples/"$(basename $i)"
done
}
| true
|
c3807a4f05104d875b91f7137c1c504d37281621
|
Shell
|
sclorg/mysql-container
|
/root-common/usr/share/container-scripts/mysql/pre-init/20-validate-variables.sh
|
UTF-8
| 3,284
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
function usage() {
[ $# == 1 ] && echo "error: $1"
echo "You must either specify the following environment variables:"
echo " MYSQL_USER (regex: '$mysql_identifier_regex')"
echo " MYSQL_PASSWORD (regex: '$mysql_password_regex')"
echo " MYSQL_DATABASE (regex: '$mysql_identifier_regex')"
echo "Or the following environment variable:"
echo " MYSQL_ROOT_PASSWORD (regex: '$mysql_password_regex')"
echo "Or both."
echo "Optional Settings:"
echo " MYSQL_LOWER_CASE_TABLE_NAMES (default: 0)"
echo " MYSQL_LOG_QUERIES_ENABLED (default: 0)"
echo " MYSQL_MAX_CONNECTIONS (default: 151)"
echo " MYSQL_FT_MIN_WORD_LEN (default: 4)"
echo " MYSQL_FT_MAX_WORD_LEN (default: 20)"
echo " MYSQL_AIO (default: 1)"
echo " MYSQL_KEY_BUFFER_SIZE (default: 32M or 10% of available memory)"
echo " MYSQL_MAX_ALLOWED_PACKET (default: 200M)"
echo " MYSQL_TABLE_OPEN_CACHE (default: 400)"
echo " MYSQL_SORT_BUFFER_SIZE (default: 256K)"
echo " MYSQL_READ_BUFFER_SIZE (default: 8M or 5% of available memory)"
echo " MYSQL_INNODB_BUFFER_POOL_SIZE (default: 32M or 50% of available memory)"
echo " MYSQL_INNODB_LOG_FILE_SIZE (default: 8M or 15% of available memory)"
echo " MYSQL_INNODB_LOG_BUFFER_SIZE (default: 8M or 15% of available memory)"
echo
echo "For more information, see https://github.com/sclorg/mysql-container"
exit 1
}
function validate_variables() {
# Check basic sanity of specified variables
if [[ -v MYSQL_USER && -v MYSQL_PASSWORD ]]; then
[[ "$MYSQL_USER" =~ $mysql_identifier_regex ]] || usage "Invalid MySQL username"
[ ${#MYSQL_USER} -le 32 ] || usage "MySQL username too long (maximum 32 characters)"
[[ "$MYSQL_PASSWORD" =~ $mysql_password_regex ]] || usage "Invalid password"
user_specified=1
fi
if [ -v MYSQL_ROOT_PASSWORD ]; then
[[ "$MYSQL_ROOT_PASSWORD" =~ $mysql_password_regex ]] || usage "Invalid root password"
root_specified=1
fi
# If MYSQL_USER == "root", we have a special case
if [[ "${user_specified:-0}" == "1" && "$MYSQL_USER" == "root" ]]; then
if [[ "${root_specified:-0}" == "1" ]]; then
usage "When setting MYSQL_USER to 'root' you can only set either MYSQL_PASSWORD or MYSQL_ROOT_PASSWORD"
fi
# We will now behave as if MYSQL_USER was not specified
export MYSQL_ROOT_PASSWORD="$MYSQL_PASSWORD"
export -n MYSQL_USER
export -n MYSQL_PASSWORD
user_specified=0
root_specified=1
fi
# Either combination of user/pass/db or root password is ok
if [[ "${user_specified:-0}" == "0" && "${root_specified:-0}" == "0" ]]; then
usage
fi
# If the root user is not specified, database name is required
if [[ "${root_specified:-0}" == "0" ]]; then
[ -v MYSQL_DATABASE ] || usage "You need to specify database name or root password"
fi
if [ -v MYSQL_DATABASE ]; then
[[ "$MYSQL_DATABASE" =~ $mysql_identifier_regex ]] || usage "Invalid database name"
[ ${#MYSQL_DATABASE} -le 64 ] || usage "Database name too long (maximum 64 characters)"
fi
# Specifically check of incomplete specification
if [[ -v MYSQL_USER || -v MYSQL_PASSWORD || -v MYSQL_DATABASE ]] && \
[[ "${user_specified:-0}" == "0" ]]; then
usage
fi
}
if ! [ -v MYSQL_RUNNING_AS_SLAVE ] ; then
validate_variables
fi
| true
|
e0e8e81e9057416d8b74d1d6991399c30b0509f8
|
Shell
|
LeeNCompanyInc/Waffle_Firmware
|
/imagegen/make_fw.sh
|
UTF-8
| 4,208
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
#set -e on
CURPATH=$(pwd)
REV=${REV:-"latest"}
REV=$(basename $(readlink -f $REV))
if [ -z "$REV" ]; then
echo "failed to find image builder directory."
exit 1
fi
BRAND=${BRAND:-"waffle"}
TARGET=${TARGET:-"ar71xx"}
SUBTARGET=${SUBTARGET:-"generic"}
FILES=${FILES:-"files"}
NO_FILES=${NO_FILES:-""}
PROFILE=${PROFILE:-""}
PROFILE_8M=${PROFILE_8M:-""}
PROFILE_16M=${PROFILE_16M:-""}
case "$TARGET" in
ar71xx)
PROFILE="$PROFILE TLWR841"
PROFILE_8M="$PROFILE_8M TLWDR4300"
PROFILE_16M="$PROFILE_16M WNDR3700"
;;
ralink)
PROFILE_16M="$PROFILE_16M ZBT-WE826 XIAOMI-MIWIFI-MINI"
;;
ramips)
PROFILE_8M="$PROFILE_8M ArcherC2 ArcherC20"
PROFILE_16M="$PROFILE_16M MIWIFI-MINI WF-2881 SAP-G3200U3"
;;
esac
PACKAGES=${PACKAGES:-""}
PACKAGES="$PACKAGES luci luci-app-qos luci-app-p2pblock n2n-v2 coova-chilli"
if [ "" != "$(cat $REV/.config|grep kmod-ipt-coova)" ]; then
PACKAGES="$PACKAGES kmod-ipt-coova"
fi
PACKAGES_8M=${PACKAGES_8M:-""}
PACKAGES_8M="$PACKAGES $PACKAGES_8M curl"
PACKAGES_16M=${PACKAGES_16M:-""}
PACKAGES_16M="$PACKAGES $PACKAGES_8M $PACKAGES_16M"
TARGET_PATH=${TARGET_PATH:-"$HOME/Dropbox/firmware"}
ncfscmd="CLI/ncfscmd.sh"
ncfscmd_mkdir="mkdir -pv"
ncfscmd_put="cp -fpv"
ncfshome="CLI/lib"
if [ -n "$(brew --prefix coreutils)" ]
then
export PATH=$(brew --prefix coreutils)/libexec/gnubin:$PATH
fi
make_firmware() { # <rev>
local rev="$1"
# copy additional root files
[ -z "$NO_FILES" ] && {
if [ -d "$rev/files" ]; then
rm -rfv "$rev/files"
fi
mkdir -pv "$rev/files"
for i in $(ls $CURPATH/$FILES); do
cp -fpRv "$CURPATH/$FILES/$i" "$rev/files/" ;
done
}
[ ! -z "$NO_FILES" ] && {
rm -rfv "$rev/files"
}
cd $rev && {
make clean
for i in $PROFILE; do
[ -z "$NO_FILES" ] && {
make image PROFILE=$i PACKAGES="$PACKAGES_4M" FILES="files"
}
[ ! -z "$NO_FILES" ] && {
make image PROFILE=$i PACKAGES="$PACKAGES_4M" FILES=
}
done
for i in $PROFILE_8M; do
[ -z "$NO_FILES" ] && {
make image PROFILE=$i PACKAGES="$PACKAGES_8M" FILES="files"
}
[ ! -z "$NO_FILES" ] && make image PROFILE=$i PACKAGES="$PACKAGES_8M" FILES=
done
for i in $PROFILE_16M; do
[ -z "$NO_FILES" ] && make image PROFILE=$i PACKAGES="$PACKAGES_16M" FILES="files"
[ ! -z "$NO_FILES" ] && make image PROFILE=$i PACKAGES="$PACKAGES_16M" FILES=
done
}
}
upload_firmware() { # <rev> <files> <target> [subtarget=generic] [brand=Waffle]
local rev files target subtarget brand version branch dirname fw_dir remote_dir
rev="$1"; shift;
files="$1"; shift;
target="$1"; shift;
[ ! -z "$1" ] && { subtarget="$1"; shift; }
subtarget=${subtarget:-"generic"}
[ ! -z "$1" ] && { brand="$1"; shift; }
brand=${brand:-"Waffle"}
fw_dir="$rev"
[ -z "$NO_FILES" ] && {
version="$(cd $files && git describe --always --tags --dirty=m)"
branch="$(cd $files && git branch)"
branch="${branch##* }"
dirname="${files##files_}"
fw_dir="${fw_dir}-${dirname}-${version}"
}
remote_dir="$TARGET_PATH/$fw_dir"
NCFS_HOME="$ncfshome" $ncfscmd_mkdir $remote_dir
for i in $(ls $rev/bin/$target/*-factory.bin 2>/dev/null); do
filename=$(basename $i)
filename=${filename/openwrt-*$target-$subtarget/$brand}
filename=${filename/-squashfs-factory/}
NCFS_HOME="$ncfshome" $ncfscmd_put $i "$remote_dir/$filename"
done
for i in $(ls $rev/bin/$target/*-sysupgrade.bin 2>/dev/null); do
filename=$(basename $i)
filename=${filename/openwrt-*$target-$subtarget/$brand}
filename=${filename/-squashfs-sysupgrade/-upgrade}
NCFS_HOME="$ncfshome" $ncfscmd_put $i "$remote_dir/$filename"
done
}
[ -z "$NO_FILES" ] && cd $FILES && {
[ -e .git ] && {
git stash
git checkout -f
git fetch --all --tags
git pull -f
git stash pop
}
chmod 755 etc/dropbear
chmod 444 etc/dropbear/authorized_keys
chmod +x etc/init.d/*
}
make_firmware $CURPATH/$REV
# show firmware
cd $CURPATH
ls $REV/bin/$TARGET/*.bin
# upload firmware
upload_firmware $REV $FILES $TARGET $SUBTARGET $BRAND
| true
|
6ae79112f3ac9ea836ab9237b51aca7473b3e38d
|
Shell
|
meta1203/VideoEditing
|
/2pass.sh
|
UTF-8
| 436
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
file="$1"
name="${file%.*}"
size=$2
length=`ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "$file"`
bitrate=`echo "${size}*8192/${length}-128" | bc`
bitrate=$bitrate"k"
ffmpeg -y -i "$file" -c:v libx264 -preset medium -b:v $bitrate -pass 1 -an -f mp4 /dev/null && \
ffmpeg -i "$file" -c:v libx264 -preset medium -b:v $bitrate -pass 2 -b:a 128k "$name"_2pass.mp4
rm -fr ffmpeg2pass*
| true
|
1147820b2c25ccbfd8006958103f7a5c9ce5bea6
|
Shell
|
d6y/CGP.jl
|
/scripts/local-spawn.sh
|
UTF-8
| 1,265
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Launch a number of Atari experiments on the current machine.
# There are (num games * num seeds) concurrent executions.
#
# Usage:
# $ cd CGP.jl
# $ ./scripts/local-spawn.sh > spawn-`date +%Y%m%dZ%H%M%S`.out
SEEDS=( 1 2 3 4 5 )
GAMES=( "space_invaders" )
MAX_FRAMES=18000 # Control number of frames in each game
TOTAL_EVALS=100000 # Number of evaluations to perform on a given tournament
NUM_WORKERS=10
LOG_DIR="logs"
mkdir -p ${LOG_DIR}
echo "Starting `pwd` $0 at `TZ=UTC date`"
echo "Host: `hostname`"
echo "Arch: `uname -a`"
echo "Julia: `julia --version`"
echo "CPUs: `julia -e 'Sys.cpu_summary()'`"
echo "Git revision: `git log -1 --oneline`"
echo " "
echo "atari.yaml:"
echo ">>"
cat cfg/atari.yaml
echo "<<"
echo " "
echo "Launching:"
for game in ${GAMES[@]}
do
for seed in ${SEEDS[@]}
do
base=${LOG_DIR}/${game}_${seed}
echo "- ${base}"
echo " julia -p${NUM_WORKERS} experiments/atari.jl --id ${game} --frames ${MAX_FRAMES} --total_evals ${TOTAL_EVALS} --seed $seed --log ${base}.log"
JULIA_PROJECT=`pwd` nohup julia -p${NUM_WORKERS} experiments/atari.jl --id ${game} --frames ${MAX_FRAMES} --total_evals ${TOTAL_EVALS} --seed $seed --log ${base}.log 1> ${base}.out 2> ${base}.err &
echo " "
done
done
| true
|
eb23597d169ef833a8e463f99ccfd63812790a8d
|
Shell
|
schanur/libbivalvia
|
/test/module/software_testing.sh
|
UTF-8
| 1,494
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -o errexit -o nounset -o pipefail
BIVALVIA_PATH="$(dirname "${BASH_SOURCE[0]}")/../../bivalvia"
source "${BIVALVIA_PATH}/software_testing.sh"
function print_abc {
echo "abc"
}
function do_nothing {
true
}
function echo_stdin {
echo "${1}"
}
test_string_equal_with_duration "" "" 0 "Compare empty string"
test_string_equal_with_duration "-1" "-1" 0 "Compare negative number as string"
test_string_equal_with_duration "1234567890+asdfghjkl#yxcvbnm,.-" "1234567890+asdfghjkl#yxcvbnm,.-" 0 "Compare special chars"
test_string_equal "" "" "Compare empty string. No duration."
test_string_equal "-1" "-1" "Compare negative number as string. No duration."
test_string_equal "1234567890+asdfghjkl#yxcvbnm,.-" "1234567890+asdfghjkl#yxcvbnm,.-" "Compare special chars. No duration."
test_function print_abc 0 "abc" ""
test_function_return do_nothing 0
test_function_stdout print_abc "abc"
test_function_stdout print_abc "abc" "def"
# TODO: software testing functions cut off newline at end of string if
# printed to stdout. But it is not that important.
test_function_stdout echo_stdin $'a\n' $'a\n'
| true
|
f0690ae868b2d73fd8755f2239efe49eb16d8b32
|
Shell
|
Juan-cabrera-tw/nomad-instance
|
/modules/nomad/local/scripts/local-aws-cred.sh
|
UTF-8
| 859
| 2.75
| 3
|
[] |
no_license
|
cd modules/nomad/local/scripts
if ! [ -z "${AWS_ECR_PULL_ACCESSS_KEY}" ] && ! [ -z "${AWS_ECR_PULL_SECRET_KEY}" ]; then
echo "pipeline execution"
echo "AWS_ACCESS_KEY_ID=\"${AWS_ECR_PULL_ACCESSS_KEY}\"" >> aws.credentials
echo "AWS_SECRET_ACCESS_KEY=\"${AWS_ECR_PULL_SECRET_KEY}\"" >> aws.credentials
echo "AWS_DEFAULT_REGION=us-east-2" >> aws.credentials
else
echo "local execution"
vault read aws/creds/my-role >> keys
awk '{ print "\""$0"\""}' keys >> keys.credentials
rm keys
egrep 'access_key|secret_key' keys.credentials >> aws.credentials
rm keys.credentials
perl -i -pe 's/"access_key/AWS_ACCESS_KEY_ID="/g' aws.credentials
perl -i -pe 's/"secret_key/AWS_SECRET_ACCESS_KEY="/g' aws.credentials
perl -i -pe 's/ //g' aws.credentials
echo "AWS_DEFAULT_REGION=us-east-2" >> aws.credentials
fi
| true
|
7b862a42e2649c9c13e5bce22df52643864d01c2
|
Shell
|
ThorMortensen/plusScripts
|
/+/nuc-scp
|
UTF-8
| 241
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
TARGET=$2
[[ $TARGET == port* ]] && PORT_NUMBER=$(echo $TARGET | tr -dc '0-9') && TARGET="support@192.168.$PORT_NUMBER.2"
auth-wrapper scp -o ProxyCommand="ssh -W %h:%p nuc" -o StrictHostKeyChecking=no -F none -A $1 $TARGET:$3
| true
|
0468717e5d374d865c5c4da9b4fe3ca4b67e352d
|
Shell
|
PolymathNetwork/polymath-apps
|
/heroku.build
|
UTF-8
| 547
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh -e
usage() {
echo "OVERVIEW: Build apps according to BUILD_ENV value. Meant to be used for Heroku deployment"
exit
}
if [ "$1" = '-h' ] || [ "$1" = '--help' ]; then
usage
fi
echo $BUILD_ENV
if [ "$BUILD_ENV" = "issuer-web" ]; then
lerna run --parallel --stream --scope=@polymathnetwork/issuer build
elif [ "$BUILD_ENV" = "issuer-api" ]; then
lerna run --parallel --stream --scope=@polymathnetwork/offchain build
else
echo "Error: no build config for INATO_BUILD_ENV value '$INATO_BUILD_ENV'"
exit 1
fi
| true
|
3366faf95b5809d601e5f36ce643edd530a1a096
|
Shell
|
kstenerud/canonical-bin
|
/gitdiffstat.sh
|
UTF-8
| 1,195
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu
usage()
{
echo
}
show_help()
{
echo
}
show_commit_diffstats()
{
until_line=$1
until_commit=$2
until_import=$3
line_number=0
for commit in $(git log --oneline |awk '{print $1}' -); do
if [ $line_number -eq $until_line ]; then
return 0
fi
if [ $until_import -eq 1 ]; then
if git show --oneline $commit | head -1| grep "tag: pkg/import" >>/dev/null; then
return 0
fi
fi
echo;
git show --oneline $commit | head -1
git show $commit | diffstat
if [ "$commit" == "$until_commit" ]; then
return 0
fi
line_number=$(expr $line_number + 1)
done
}
UNTIL_LINE=-1
UNTIL_COMMIT=_
UNTIL_IMPORT=1
while getopts "?l:u:a" o; do
case "$o" in
\?)
show_help
exit 0
;;
l)
UNTIL_LINE=$OPTARG
UNTIL_IMPORT=0
;;
u)
UNTIL_COMMIT=$OPTARG
UNTIL_IMPORT=0
;;
u)
UNTIL_LINE=-1
UNTIL_COMMIT=_
UNTIL_IMPORT=0
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
show_commit_diffstats $UNTIL_LINE $UNTIL_COMMIT $UNTIL_IMPORT
| true
|
30738dab1f6f978642fc567346cf9c5a313650f9
|
Shell
|
nikitasokolov25/Executable-Analyzer
|
/scripts/build_linux_linux.sh
|
UTF-8
| 201
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -d "../build/" ]
then
echo -e "\033[31mFound build fox folder\033[0m"
cd ../build/
cmake --build .
else
echo -e "\033[31mBUILD FOLDER DO NOT EXIST! RUN CONFIGURE FIRST\033[0m"
fi
| true
|
0e80c4be2722d85e796384aa253f6e050d6e6a01
|
Shell
|
olzv/rails-vue-template
|
/docker-entrypoint.sh
|
UTF-8
| 1,126
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# Created by https://github.com/olzv
set -e
if [[ "$DRY_RUN" == 'true' ]]; then
echo
echo "Running in DRY Mode!!!"
echo
echo "Cleaning up ruby gems"
rm -rf vendor/bundle/*
echo "Done."
echo
echo
echo "Cleaning up node_modules"
rm -rf node_modules
echo "Done."
echo
fi
bundle check || bundle install --path $BUNDLE_PATH
chmod -R a+rwX $BUNDLE_PATH
yarn install
until pg_isready; do
>&2 echo "Postgres is not yet available - Waiting..."
sleep 2
done
if [[ "$DRY_RUN" == 'true' ]]; then
echo
echo "Dropping the database"
bundle exec rails db:drop
echo "Done."
echo
fi
if [ "$( psql -tAc "SELECT 1 FROM pg_database WHERE datname='${DB_NAME_PREFIX}_${RAILS_ENV}'" )" = '1' ]
then
echo "Database already exists."
else
if [ -z "$DATABASE_URL" ]; then
echo "Database does not exist. Creating one..."
bundle exec rails db:create
echo "Seeding DB ..."
bundle exec rails db:seed
fi
fi
if [[ "$MIGRATE_ON_START" == 'true' ]]; then
bundle exec rails db:migrate
else
echo 'MIGRATE_ON_START disabled. Skipping migrations...'
fi
exec "$@"
| true
|
9533ba30b84cc151d662d6f045d8e852d55d7b5f
|
Shell
|
Ayannah/activemq-docker
|
/activemq-entrypoint.sh
|
UTF-8
| 258
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
run_activemq() {
chown -R activemq:activemq $ACTIVEMQ_BASE
exec gosu activemq /usr/local/bin/activemq ${@:-console}
}
case "$1" in
activemq)
shift 1
run_activemq "$@"
;;
*)
exec "$@"
esac
| true
|
2d12b4c417d59ad6bd1448a7fed38eeb4abcb644
|
Shell
|
nemo-packaging/qt5-ofono-git
|
/PKGBUILD
|
UTF-8
| 1,059
| 2.59375
| 3
|
[] |
no_license
|
# $Id$
# Contributor: Bart Ribbers <bribbers@disroot.org>
# Contributor: Alexey Andreyev <aa13q@ya.ru>
# Maintainer: James Kittsmiller (AJSlye) <james@nulogicsystems.com>
_host="git.sailfishos.org"
_project=mer-core
_basename=ofono
_branch=master
_gitname=libq${_basename}
pkgname=qt5-$_basename-git
pkgver=0.99.r0.g5e74475
pkgrel=1
pkgdesc="A library of Qt 5 bindings for ofono. Sailfish implementation"
arch=('x86_64' 'aarch64')
url="https://$_host/$_project/$_gitname#branch=$_branch"
license=('LGPL-2.1-or-later')
depends=('qt5-declarative')
makedepends=('git')
provides=("${pkgname%-git}" "libqofono" "libqofono-git" "libqofono-qt5")
conflicts=("${pkgname%-git}" "libqofono" "libqofono-git" "libqofono-qt5")
source=("${pkgname}::git+${url}")
md5sums=('SKIP')
pkgver() {
cd "${srcdir}/${pkgname}"
git describe --long --tags | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
cd "${srcdir}/${pkgname}"
qmake
make
}
package() {
cd "${srcdir}/${pkgname}"
make INSTALL_ROOT="$pkgdir" install
# Remove tests
rm -r "$pkgdir"/opt
rm -r "$pkgdir"/usr/lib/libqofono-qt5/tests/
}
| true
|
6c7d0910970b19f5361858feffb85d80e276da55
|
Shell
|
TileDB-Inc/TileDB-NYSE-Ingestor
|
/scripts/install-clangformat.sh
|
UTF-8
| 925
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
die() {
echo "$@" 1>&2 ; popd 2>/dev/null; exit 1
}
install_apt_pkg() {
add-apt-repository 'deb http://apt.llvm.org/trusty/ llvm-toolchain-trusty-5.0 main' &&
wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
apt-get update -qq && apt-get install -qq -y clang-format-5.0
}
install_brew_pkg() {
brew upgrade && brew install clang-format
}
install_clang_format() {
if [[ $OSTYPE == linux* ]]; then
if [ -n "$(command -v apt-get)" ]; then
install_apt_pkg || die "could not install apt clang format package"
else
die "unsupported Linux package management system"
fi
elif [[ $OSTYPE == darwin* ]]; then
if [ -n "$(command -v brew)" ]; then
install_brew_pkg || die "could not install brew clang format package"
else
die "homebrew is not installed!"
fi
else
die "unsupported OS"
fi
}
run() {
install_clang_format
}
run
| true
|
3d289e87b8a74e5ccb9e357abd529fe34e5e5c98
|
Shell
|
itohdak/Competitive_Programming
|
/AtCoder/AHC/001/tools/run_all.sh
|
UTF-8
| 343
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
echo compiling...
g++atcoder ~/Competitive_Programming/AtCoder/AHC/001/A.cpp
for filename in `ls in`; do
echo processing $filename;
time cat ./in/$filename | ./a.out > /tmp/out.txt 2> /tmp/error.txt;
cargo run --release --bin vis ./in/$filename /tmp/out.txt;
gnome-open vis.html;
echo finished $filename;
done
| true
|
2f6c17e38658fa79943cccdfd3c0c9c65b816449
|
Shell
|
flavio-fernandes/odl-openstack-ci-1
|
/tools/createFloat.sh
|
UTF-8
| 21,850
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# This shell script will perform steps neutron and ovs would take in order to
# create a net, router, tenant vm and associate floating ip. With that, it
# will interact with a running Opendaylight as if it was a real Openstack
# environment.
# Ref: https://lists.opendaylight.org/pipermail/ovsdb-dev/2015-June/001544.html
# To use this script:
#
# 1) start ODL
#
# 2) copy this script to a system that has OVS installed and running.
# Make sure ODL_IP is correct
#
# 3) set manager of ovs to ODL (see setup_ovs function in this script)
#
# 4) verify that OVS connected to ODL okay, as well as pipeline in OVS is created
# An example for doing such is here: https://gist.github.com/391c9ba88d2c58cf40f7
#
# 5) run this script. Tweak away!
export ODL_IP='192.168.50.1'
export ODL_PORT='8080'
export ODL="http://${ODL_IP}:${ODL_PORT}/controller/nb/v2/neutron"
export DEBUG=1
# export DEBUG_FAKE_POST=yes
# export DEBUG_FAKE_OVS=yes
export CURL_HEADERS=('-H "Authorization: Basic YWRtaW46YWRtaW4="' '-H "Accept: application/json"' '-H "Content-Type: application/json"' '-H "Cache-Control: no-cache"')
export CURL_POST="curl -X POST ${CURL_HEADERS[*]}"
export CURL_PUT="curl -X PUT ${CURL_HEADERS[*]}"
export CURL_RETURN_FORMAT='-o /dev/null -sL -w "%{http_code}"'
export BIND_HOST_ID=$(hostname)
export TNT1_ID='cde2563ead464ffa97963c59e002c0cf'
export EXT_NET1_ID='7da709ff-397f-4778-a0e8-994811272fdb'
export EXT_SUBNET1_ID='00289199-e288-464a-ab2f-837ca67101a7'
export TNT1_RTR_ID='e09818e7-a05a-4963-9927-fc1dc6f1e844'
export NEUTRON_PORT_TNT1_RTR_GW='8ddd29db-f417-4917-979f-b01d4b1c3e0d'
export NEUTRON_PORT_TNT1_RTR_NET1='9cc1af22-108f-40bb-b938-f1da292236bf'
export TNT1_NET1_NAME='net1'
export TNT1_NET1_SEGM='1062'
export TNT1_NET1_ID='12809f83-ccdf-422c-a20a-4ddae0712655'
export TNT1_SUBNET1_NAME='subnet1'
export TNT1_SUBNET1_ID='6c496958-a787-4d8c-9465-f4c4176652e8'
export TNT1_NET1_DHCP_PORT_ID='79adcba5-19e0-489c-9505-cc70f9eba2a1'
export TNT1_NET1_DHCP_MAC='FA:16:3E:8F:70:A9'
export TNT1_NET1_DHCP_DEVICE_ID="dhcp58155ae3-f2e7-51ca-9978-71c513ab02ee-${TNT1_NET1_ID}"
export TNT1_NET1_DHCP_OVS_PORT='tap79adcba5-19'
export TNT1_VM1_PORT_ID='341ceaca-24bf-4017-9b08-c3180e86fd24'
export TNT1_VM1_MAC='FA:16:3E:8E:B8:05'
export TNT1_VM1_DEVICE_ID='20e500c3-41e1-4be0-b854-55c710a1cfb2'
export TNT1_NET1_VM1_OVS_PORT='tap341ceaca-24'
export TNT1_VM1_VM_ID='20e500c3-41e1-4be0-b854-55c710a1cfb2'
export FLOAT_IP1_ID='f013bef4-9468-494d-9417-c9d9e4abb97c'
export FLOAT_IP1_PORT_ID='01671703-695e-4497-8a11-b5da989d2dc3'
export FLOAT_IP1_MAC='FA:16:3E:3F:37:BB'
export FLOAT_IP1_DEVICE_ID='f013bef4-9468-494d-9417-c9d9e4abb97c'
export FLOAT_IP1_ADDRESS='192.168.111.22'
#--
function do_eval_command {
callerFunction=$1 ; shift
expectedRc=$1 ; shift
cmd="$*" ; shift
[ $DEBUG -gt 0 ] && echo -n "$callerFunction $cmd ==> "
[ -z $DEBUG_FAKE_POST ] && rc=$(eval $cmd) || rc=fake
[ $DEBUG -gt 0 ] && echo "$rc" && echo
[ -z "$expectedRc" ] && expectedRc=201
if [ "$rc" != "$expectedRc" ] && [ -z $DEBUG_FAKE_POST ]; then
echo "ERROR: $callerFunction $cmd unexpected rc $rc (wanted $expectedRc)"
exit 1
fi
}
#--
function check_get_code {
url="${ODL}/$1" ; shift
cmd="curl -X GET ${CURL_HEADERS[*]} $CURL_RETURN_FORMAT $url 2>&1"
[ -z "$1" ] && expectedRc=200 || expectedRc=$1
do_eval_command ${FUNCNAME[0]} $expectedRc $cmd
}
#--
function setup_ovs {
if [ -z $DEBUG_FAKE_OVS ]; then
[ $DEBUG -gt 0 ] && echo "setting ovs manager to tcp:${ODL_IP}:6640"
sudo ovs-vsctl set-manager tcp:${ODL_IP}:6640 || exit 2
# give it time for pipeline to be created...
sleep 10
fi
}
#--
function create_ovs_port {
callerFunction=$1 ; shift
expectedSuccess=$1 ; shift
ovsPort=$1 ; shift
macAddrRaw=$1 ; shift
neutronPortId=$1 ; shift
portVmId=$1 ; shift
macAddr="$(echo $macAddrRaw | tr '[:upper:]' '[:lower:]')"
cmd1=$(cat <<EOF
sudo ovs-vsctl add-port br-int ${ovsPort}
-- set Interface ${ovsPort} type=internal
-- set Interface ${ovsPort} external_ids:attached-mac=${macAddr}
-- set Interface ${ovsPort} external_ids:iface-status=active
-- set Interface ${ovsPort} external_ids:iface-id=${neutronPortId}
EOF
)
[ -z "$portVmId" ] && cmd2='' || cmd2="-- set Interface ${ovsPort} external_ids:vm-id=${portVmId}"
# cmd="$cmd1 $cmd2 ; echo $?"
cmd="$cmd1 $cmd2 2>&1 ; echo \$?"
[ $DEBUG -gt 0 ] && echo -n "$callerFunction $cmd ==> "
[ -z $DEBUG_FAKE_OVS ] && rc=$(eval $cmd) || rc=fake
[ $DEBUG -gt 0 ] && echo "$rc" && echo
[ "$expectedSuccess" != true ] && expectedRc=999 || expectedRc=0
if [ "$rc" != "$expectedRc" ] && [ "$expectedRc" -eq 0 ] && [ -z $DEBUG_FAKE_OVS ]; then
echo "ERROR: $callerFunction $cmd unexpected rc $rc (wanted $expectedRc)"
exit 1
fi
}
#--
function create_ext_net {
tntId=$1 ; shift
netId=$1 ; shift
url="${ODL}/networks/"
body=$(cat <<EOF
-d '{
"network": [
{
"provider:physical_network": "physnetext1",
"port_security_enabled": true,
"provider:network_type": "flat",
"id": "${netId}",
"provider:segmentation_id": null,
"router:external": true,
"name": "ext1",
"admin_state_up": true,
"tenant_id": "${tntId}",
"shared": false
}
]
}
'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function create_ext_subnet {
tntId=$1 ; shift
netId=$1 ; shift
subnetId=$1 ; shift
url="${ODL}/subnets/"
body=$(cat <<EOF
-d '{
"subnet": {
"name": "subext1",
"enable_dhcp": false,
"network_id": "${netId}",
"tenant_id": "${tntId}",
"dns_nameservers": [],
"gateway_ip": "192.168.111.254",
"ipv6_ra_mode": null,
"allocation_pools": [
{
"start": "192.168.111.21",
"end": "192.168.111.40"
}
],
"host_routes": [],
"shared": false,
"ip_version": 4,
"ipv6_address_mode": null,
"cidr": "192.168.111.0/24",
"id": "${subnetId}",
"subnetpool_id": null
}
}
'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function create_router {
tntId=$1 ; shift
rtrId=$1 ; shift
url="${ODL}/routers/"
body=$(cat <<EOF
-d '{
"router": {
"status": "ACTIVE",
"external_gateway_info": null,
"name": "rtr1",
"gw_port_id": null,
"admin_state_up": true,
"routes": [],
"tenant_id": "${tntId}",
"distributed": false,
"id": "${rtrId}"
}
}
'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function create_port_rtr_gateway {
tntId=$1 ; shift
rtrId=$1 ; shift
netId=$1 ; shift
subnetId=$1 ; shift
portId=$1 ; shift
url="${ODL}/ports/"
body=$(cat <<EOF
-d '{
"port": {
"binding:host_id": "",
"allowed_address_pairs": [],
"device_owner": "network:router_gateway",
"port_security_enabled": false,
"binding:profile": {},
"fixed_ips": [
{
"subnet_id": "${subnetId}",
"ip_address": "192.168.111.21"
}
],
"id": "${portId}",
"security_groups": [],
"device_id": "${rtrId}",
"name": "",
"admin_state_up": true,
"network_id": "${netId}",
"tenant_id": "",
"binding:vif_details": {},
"binding:vnic_type": "normal",
"binding:vif_type": "unbound",
"mac_address": "FA:16:3E:7E:A0:D8"
}
}
'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function update_router_port_gateway {
tntId=$1 ; shift
rtrId=$1 ; shift
netId=$1 ; shift
subnetId=$1 ; shift
portId=$1 ; shift
url="${ODL}/routers/${rtrId}"
body=$(cat <<EOF
-d '{
"router": {
"external_gateway_info": {
"network_id": "${netId}",
"enable_snat": true,
"external_fixed_ips": [
{
"subnet_id": "${subnetId}",
"ip_address": "192.168.111.21"
}
]
},
"name": "rtr1",
"gw_port_id": "${portId}",
"admin_state_up": true,
"distributed": false,
"routes": []
}
}'
EOF
)
cmd="$CURL_PUT $body $CURL_RETURN_FORMAT $url 2>&1"
[ -z "$1" ] && expectedRc=200 || expectedRc=$1
do_eval_command ${FUNCNAME[0]} $expectedRc "$cmd"
}
#--
function create_tnt_net {
tntId=$1 ; shift
netName=$1 ; shift
netId=$1 ; shift
netSegm=$1 ; shift
url="${ODL}/networks/"
body=$(cat <<EOF
-d '{
"network": {
"name": "${netName}",
"provider:physical_network": null,
"router:external": false,
"tenant_id": "${tntId}",
"admin_state_up": true,
"provider:network_type": "vxlan",
"shared": false,
"port_security_enabled": true,
"id": "${netId}",
"provider:segmentation_id": ${netSegm}
}
}
'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function create_tnt_subnet {
tntId=$1 ; shift
subnetName=$1 ; shift
netId=$1 ; shift
subnetId=$1 ; shift
url="${ODL}/subnets/"
body=$(cat <<EOF
-d '
{
"subnet": {
"name": "${subnetName}",
"enable_dhcp": true,
"network_id": "${netId}",
"tenant_id": "${tntId}",
"dns_nameservers": [
"192.168.111.254"
],
"gateway_ip": "10.1.0.1",
"ipv6_ra_mode": null,
"allocation_pools": [
{
"start": "10.1.0.2",
"end": "10.1.0.254"
}
],
"host_routes": [],
"shared": false,
"ip_version": 4,
"ipv6_address_mode": null,
"cidr": "10.1.0.0/24",
"id": "$subnetId",
"subnetpool_id": null
}
}
'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function create_port_dhcp {
# ${TNT1_ID} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID} ${TNT1_NET1_DHCP_PORT_ID} ${TNT1_NET1_DHCP_MAC} ${TNT1_NET1_DHCP_DEVICE_ID}
tntId=$1 ; shift
netId=$1 ; shift
subnetId=$1 ; shift
dhcpId=$1 ; shift
dhcpMac=$1 ; shift
dhcpDeviceId=$1 ; shift
url="${ODL}/ports/"
body=$(cat <<EOF
-d '
{
"port": {
"binding:host_id": "${BIND_HOST_ID}",
"allowed_address_pairs": [],
"device_owner": "network:dhcp",
"port_security_enabled": false,
"binding:profile": {},
"fixed_ips": [
{
"subnet_id": "${subnetId}",
"ip_address": "10.1.0.2"
}
],
"id": "${dhcpId}",
"security_groups": [],
"device_id": "${dhcpDeviceId}",
"name": "",
"admin_state_up": true,
"network_id": "${netId}",
"tenant_id": "${tntId}",
"binding:vif_details": {},
"binding:vnic_type": "normal",
"binding:vif_type": "unbound",
"mac_address": "${dhcpMac}"
}
}'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function update_port_dhcp {
portId=$1 ; shift
dhcpDeviceId=$1 ; shift
url="${ODL}/ports/${portId}"
body=$(cat <<EOF
-d '
{
"port": {
"binding:host_id": "${BIND_HOST_ID}",
"allowed_address_pairs": [],
"extra_dhcp_opts": [],
"device_owner": "network:dhcp",
"binding:profile": {},
"port_security_enabled": false,
"security_groups": [],
"device_id": "${dhcpDeviceId}",
"name": "",
"admin_state_up": true,
"binding:vif_details": {
"port_filter": true
},
"binding:vnic_type": "normal",
"binding:vif_type": "ovs"
}
}
'
EOF
)
cmd="$CURL_PUT $body $CURL_RETURN_FORMAT $url 2>&1"
[ -z "$1" ] && expectedRc=200 || expectedRc=$1
do_eval_command ${FUNCNAME[0]} $expectedRc "$cmd"
}
#--
function create_port_rtr_interface {
tntId=$1 ; shift
rtrId=$1 ; shift
netId=$1 ; shift
subnetId=$1 ; shift
portId=$1 ; shift
url="${ODL}/ports/"
body=$(cat <<EOF
-d '{
"port": {
"binding:host_id": "",
"allowed_address_pairs": [],
"device_owner": "network:router_interface",
"port_security_enabled": false,
"binding:profile": {},
"fixed_ips": [
{
"subnet_id": "${subnetId}",
"ip_address": "10.1.0.1"
}
],
"id": "${portId}",
"security_groups": [],
"device_id": "${rtrId}",
"name": "",
"admin_state_up": true,
"network_id": "${netId}",
"tenant_id": "${tntId}",
"binding:vif_details": {},
"binding:vnic_type": "normal",
"binding:vif_type": "unbound",
"mac_address": "FA:16:3E:C0:BD:8B"
}
}
'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function update_router_interface {
# ${TNT1_ID} ${TNT1_RTR_ID} ${TNT1_SUBNET1_ID} ${NEUTRON_PORT_TNT1_RTR_NET1}
tntId=$1 ; shift
rtrId=$1 ; shift
subnetId=$1 ; shift
portId=$1 ; shift
url="${ODL}/routers/${rtrId}/add_router_interface"
body=$(cat <<EOF
-d '{
"subnet_id": "${subnetId}",
"tenant_id": "${tntId}",
"port_id": "${portId}",
"id": "${rtrId}"
}'
EOF
)
cmd="$CURL_PUT $body $CURL_RETURN_FORMAT $url 2>&1"
[ -z "$1" ] && expectedRc=200 || expectedRc=$1
do_eval_command ${FUNCNAME[0]} $expectedRc "$cmd"
}
#--
function create_port_vm {
# ${TNT1_ID} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID} ${TNT1_VM1_PORT_ID} ${TNT1_VM1_MAC} ${TNT1_VM1_DEVICE_ID}
tntId=$1 ; shift
netId=$1 ; shift
subnetId=$1 ; shift
portId=$1 ; shift
macAddr=$1 ; shift
deviceId=$1 ; shift
url="${ODL}/ports/"
secGroupId='970d6a6d-bebf-43a3-85cc-a860fc994333'
body=$(cat <<EOF
-d '{
"port": {
"binding:host_id": "${BIND_HOST_ID}",
"allowed_address_pairs": [],
"device_owner": "compute:None",
"port_security_enabled": true,
"binding:profile": {},
"fixed_ips": [
{
"subnet_id": "${subnetId}",
"ip_address": "10.1.0.3"
}
],
"id": "${portId}",
"security_groups": [
{
"tenant_id": "${tntId}",
"description": "Default security group",
"id": "${secGroupId}",
"security_group_rules": [
{
"remote_group_id": null,
"direction": "egress",
"remote_ip_prefix": null,
"protocol": null,
"ethertype": "IPv4",
"tenant_id": "${tntId}",
"port_range_max": null,
"port_range_min": null,
"id": "3f260b84-637a-4edc-8ba6-a5ff36b2ae79",
"security_group_id": "${secGroupId}"
},
{
"remote_group_id": null,
"direction": "egress",
"remote_ip_prefix": null,
"protocol": null,
"ethertype": "IPv6",
"tenant_id": "${tntId}",
"port_range_max": null,
"port_range_min": null,
"id": "9c3a324a-822d-4a60-b4d9-bc9fc8a890e9",
"security_group_id": "${secGroupId}"
},
{
"remote_group_id": "${secGroupId}",
"direction": "ingress",
"remote_ip_prefix": null,
"protocol": null,
"ethertype": "IPv6",
"tenant_id": "${tntId}",
"port_range_max": null,
"port_range_min": null,
"id": "a3dc2551-2939-4a0b-8113-bcbce704c0fd",
"security_group_id": "${secGroupId}"
},
{
"remote_group_id": "${secGroupId}",
"direction": "ingress",
"remote_ip_prefix": null,
"protocol": null,
"ethertype": "IPv4",
"tenant_id": "${tntId}",
"port_range_max": null,
"port_range_min": null,
"id": "efa8f393-1494-4370-87c2-693f1c109190",
"security_group_id": "${secGroupId}"
}
],
"name": "default"
}
],
"device_id": "${deviceId}",
"name": "",
"admin_state_up": true,
"network_id": "${netId}",
"tenant_id": "${tntId}",
"binding:vif_details": {},
"binding:vnic_type": "normal",
"binding:vif_type": "unbound",
"mac_address": "${macAddr}"
}
}'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function create_port_floating_ip {
tntId=$1 ; shift
netId=$1 ; shift
subnetId=$1 ; shift
portId=$1 ; shift
macAddress=$1 ; shift
deviceId=$1 ; shift
url="${ODL}/ports/"
body=$(cat <<EOF
-d '
{
"port": {
"binding:host_id": "",
"allowed_address_pairs": [],
"device_owner": "network:floatingip",
"port_security_enabled": false,
"binding:profile": {},
"fixed_ips": [
{
"subnet_id": "${subnetId}",
"ip_address": "192.168.111.22"
}
],
"id": "${portId}",
"security_groups": [],
"device_id": "${deviceId}",
"name": "",
"admin_state_up": true,
"network_id": "${netId}",
"tenant_id": "",
"binding:vif_details": {},
"binding:vnic_type": "normal",
"binding:vif_type": "unbound",
"mac_address": "${macAddress}"
}
}'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function create_floating_ip {
tntId=$1 ; shift
netId=$1 ; shift
floatIpId=$1 ; shift
floatIpAddress=$1 ; shift
url="${ODL}/floatingips/"
body=$(cat <<EOF
-d '{
"floatingip": {
"floating_network_id": "${netId}",
"router_id": null,
"fixed_ip_address": null,
"floating_ip_address": "${floatIpAddress}",
"tenant_id": "${tntId}",
"status": "ACTIVE",
"port_id": null,
"id": "${floatIpId}"
}
}'
EOF
)
cmd="$CURL_POST $body $CURL_RETURN_FORMAT $url 2>&1"
do_eval_command ${FUNCNAME[0]} "$1" "$cmd"
}
#--
function associate_floating_ip {
# ${TNT1_ID} ${TNT1_RTR_ID} ${FLOAT_IP1_ID} ${FLOAT_IP1_ADDRESS} ${TNT1_VM1_PORT_ID}
tntId=$1 ; shift
netId=$1 ; shift
rtrId=$1 ; shift
floatIpId=$1 ; shift
floatIpAddress=$1 ; shift
vmPortId=$1 ; shift
url="${ODL}/floatingips/${floatIpId}"
body=$(cat <<EOF
-d '{
"floatingip": {
"floating_network_id": "${netId}",
"router_id": "${rtrId}",
"fixed_ip_address": "10.1.0.3",
"floating_ip_address": "${floatIpAddress}",
"tenant_id": "${tntId}",
"status": "ACTIVE",
"port_id": "${vmPortId}",
"id": "${floatIpId}"
}
}'
EOF
)
cmd="$CURL_PUT $body $CURL_RETURN_FORMAT $url 2>&1"
[ -z "$1" ] && expectedRc=200 || expectedRc=$1
do_eval_command ${FUNCNAME[0]} $expectedRc "$cmd"
}
#--
if [ -z "" ]; then
# setup_ovs
check_get_code networks/
check_get_code networksbad/ 404
create_ext_net ${TNT1_ID} ${EXT_NET1_ID}
create_ext_subnet ${TNT1_ID} ${EXT_NET1_ID} ${EXT_SUBNET1_ID} 201
create_ext_subnet ${TNT1_ID} ${EXT_NET1_ID} ${EXT_SUBNET1_ID} 400
create_router ${TNT1_ID} ${TNT1_RTR_ID}
create_router ${TNT1_ID} ${TNT1_RTR_ID} 400
create_port_rtr_gateway ${TNT1_ID} ${TNT1_RTR_ID} ${EXT_NET1_ID} ${EXT_SUBNET1_ID} ${NEUTRON_PORT_TNT1_RTR_GW}
create_port_rtr_gateway ${TNT1_ID} ${TNT1_RTR_ID} ${EXT_NET1_ID} ${EXT_SUBNET1_ID} ${NEUTRON_PORT_TNT1_RTR_GW} 400
update_router_port_gateway ${TNT1_ID} ${TNT1_RTR_ID} ${EXT_NET1_ID} ${EXT_SUBNET1_ID} ${NEUTRON_PORT_TNT1_RTR_GW}
create_tnt_net ${TNT1_ID} ${TNT1_NET1_NAME} ${TNT1_NET1_ID} ${TNT1_NET1_SEGM}
create_tnt_net ${TNT1_ID} ${TNT1_NET1_NAME} ${TNT1_NET1_ID} ${TNT1_NET1_SEGM} 400
create_tnt_subnet ${TNT1_ID} ${TNT1_SUBNET1_NAME} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID}
create_tnt_subnet ${TNT1_ID} ${TNT1_SUBNET1_NAME} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID} 400
create_port_dhcp ${TNT1_ID} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID} ${TNT1_NET1_DHCP_PORT_ID} ${TNT1_NET1_DHCP_MAC} ${TNT1_NET1_DHCP_DEVICE_ID}
create_ovs_port create_ovs_port_for_dhcp_net1 true ${TNT1_NET1_DHCP_OVS_PORT} ${TNT1_NET1_DHCP_MAC} ${TNT1_NET1_DHCP_PORT_ID}
create_ovs_port create_ovs_port_for_dhcp_net1 false ${TNT1_NET1_DHCP_OVS_PORT} ${TNT1_NET1_DHCP_MAC} ${TNT1_NET1_DHCP_PORT_ID}
update_port_dhcp ${TNT1_NET1_DHCP_PORT_ID} ${TNT1_NET1_DHCP_DEVICE_ID}
create_port_rtr_interface ${TNT1_ID} ${TNT1_RTR_ID} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID} ${NEUTRON_PORT_TNT1_RTR_NET1}
create_port_rtr_interface ${TNT1_ID} ${TNT1_RTR_ID} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID} ${NEUTRON_PORT_TNT1_RTR_NET1} 400
update_router_interface ${TNT1_ID} ${TNT1_RTR_ID} ${TNT1_SUBNET1_ID} ${NEUTRON_PORT_TNT1_RTR_NET1}
create_port_vm ${TNT1_ID} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID} ${TNT1_VM1_PORT_ID} ${TNT1_VM1_MAC} ${TNT1_VM1_DEVICE_ID}
create_port_vm ${TNT1_ID} ${TNT1_NET1_ID} ${TNT1_SUBNET1_ID} ${TNT1_VM1_PORT_ID} ${TNT1_VM1_MAC} ${TNT1_VM1_DEVICE_ID} 400
create_ovs_port create_ovs_port_for_vm1 true ${TNT1_NET1_VM1_OVS_PORT} ${TNT1_VM1_MAC} ${TNT1_VM1_PORT_ID} ${TNT1_VM1_VM_ID}
create_port_floating_ip "" ${EXT_NET1_ID} ${EXT_SUBNET1_ID} ${FLOAT_IP1_PORT_ID} ${FLOAT_IP1_MAC} ${FLOAT_IP1_DEVICE_ID}
create_floating_ip ${TNT1_ID} ${EXT_NET1_ID} ${FLOAT_IP1_ID} ${FLOAT_IP1_ADDRESS}
associate_floating_ip ${TNT1_ID} ${EXT_NET1_ID} ${TNT1_RTR_ID} ${FLOAT_IP1_ID} ${FLOAT_IP1_ADDRESS} ${TNT1_VM1_PORT_ID}
else
export DEBUG_FAKE_POST=yes ; export DEBUG_FAKE_OVS=yes ; echo testing
# associate_floating_ip ${TNT1_ID} ${EXT_NET1_ID} ${TNT1_RTR_ID} ${FLOAT_IP1_ID} ${FLOAT_IP1_ADDRESS} ${TNT1_VM1_PORT_ID}
fi
echo ok
| true
|
27eb3c8e5cb966fc4b01147c08ba81376fc2126a
|
Shell
|
michielkleinnijenhuis/EM
|
/snippets/3DEM/scratch_restored_pipeline.sh
|
UTF-8
| 15,340
| 2.515625
| 3
|
[] |
no_license
|
export PATH=/data/ndcn-fmrib-water-brain/ndcn0180/anaconda2/bin:$PATH
# source activate scikit-image-devel_0.13
# conda install h5py scipy
# pip install nibabel
scriptdir="${HOME}/workspace/EM"
datadir="${DATA}/EM/M3/M3_S1_GNU/restored" && cd $datadir
dataset="m000"
xmax=5217
ymax=4460
xs=1000; ys=1000;
z=30; Z=460;
declare -a datastems
i=0
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastems[$i]=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
i=$((i+1))
done
done
### maskDS, maskMM and maskMM-0.02 # TODO: remove small components
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
q="d"
qsubfile=$datadir/EM_prob2mask_submit_${x}-${X}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=2" >> $qsubfile
echo "#SBATCH --ntasks-per-node=10" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=01:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_s2s" >> $qsubfile
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastem=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
echo "python $scriptdir/convert/prob2mask.py \
$datadir $datastem -p \"\" stack -l 0 -u 10000000 -o _maskDS &" >> $qsubfile
echo "python $scriptdir/convert/prob2mask.py \
$datadir $datastem -p _probs0_eed2 stack -l 0.2 -o _maskMM &" >> $qsubfile
echo "python $scriptdir/convert/prob2mask.py \
$datadir $datastem -p _probs0_eed2 stack -l 0.02 -o _maskMM-0.02 &" >> $qsubfile
echo "python $scriptdir/convert/prob2mask.py \
$datadir $datastem -p '_probs' 'volume/predictions' -c 3 -l 0.3 -o _maskMB &" >> $qsubfile
done
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
### connected components in maskMM-0.02
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
q="d"
qsubfile=$datadir/EM_conncomp_submit_${x}-${X}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=1" >> $qsubfile
echo "#SBATCH --ntasks-per-node=5" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=01:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_s2s" >> $qsubfile
echo "export PATH=/data/ndcn-fmrib-water-brain/ndcn0180/anaconda2/bin:\$PATH" >> $qsubfile
echo "source activate scikit-image-devel_0.13" >> $qsubfile
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastem=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
echo "python $scriptdir/supervoxels/conn_comp.py \
$datadir $datastem --maskMM _maskMM-0.02 stack &" >> $qsubfile
done
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
# to nifti's
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
q="d"
qsubfile=$datadir/EM_conncomp_submit_${x}-${X}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=1" >> $qsubfile
echo "#SBATCH --ntasks-per-node=10" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=01:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_s2s" >> $qsubfile
echo "export PATH=/data/ndcn-fmrib-water-brain/ndcn0180/anaconda2/bin:\$PATH" >> $qsubfile
echo "source activate scikit-image-devel_0.13" >> $qsubfile
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastem=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
pf="_maskMM-0.02"
echo "python $scriptdir/convert/EM_stack2stack.py \
$datadir/${datastem}${pf}.h5 \
$datadir/${datastem}${pf}.nii.gz \
-e 0.0073 0.0073 0.05 -i 'zyx' -l 'xyz' &" >> $qsubfile
pf="_labelMA"
echo "python $scriptdir/convert/EM_stack2stack.py \
$datadir/${datastem}${pf}.h5 \
$datadir/${datastem}${pf}.nii.gz \
-e 0.0073 0.0073 0.05 -i 'zyx' -l 'xyz' &" >> $qsubfile
done
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
### manual deselections from _labelMA (takes about an hour for m000)
editsfile="m000_labelMAmanedit.txt"
echo "m000_00000-01000_00000-01000_00030-00460: 1" > $editsfile
echo "m000_00000-01000_01000-02000_00030-00460: 19 25 28" >> $editsfile
echo "m000_00000-01000_02000-03000_00030-00460: 58" >> $editsfile
echo "m000_00000-01000_03000-04000_00030-00460: 1 61" >> $editsfile
echo "m000_00000-01000_04000-04460_00030-00460: 8 12" >> $editsfile
echo "m000_01000-02000_00000-01000_00030-00460: 8 2 23 62" >> $editsfile
echo "m000_01000-02000_01000-02000_00030-00460: 26 45 43" >> $editsfile
echo "m000_01000-02000_02000-03000_00030-00460: 8 32 35 33" >> $editsfile
echo "m000_01000-02000_03000-04000_00030-00460: 1 35 54 55 81 82" >> $editsfile
echo "m000_01000-02000_04000-04460_00030-00460: 2 24" >> $editsfile
echo "m000_02000-03000_00000-01000_00030-00460: 9 30 55 57" >> $editsfile
echo "m000_02000-03000_01000-02000_00030-00460: 14 38 45 55" >> $editsfile
echo "m000_02000-03000_02000-03000_00030-00460: 12 29 40 69 68 74" >> $editsfile
echo "m000_02000-03000_03000-04000_00030-00460: 17 25 39 35 45 56 55 67 77" >> $editsfile
echo "m000_02000-03000_04000-04460_00030-00460: 4 1 12 25 26 27" >> $editsfile
echo "m000_03000-04000_00000-01000_00030-00460: 28 41" >> $editsfile
echo "m000_03000-04000_01000-02000_00030-00460: 31" >> $editsfile
echo "m000_03000-04000_02000-03000_00030-00460: 1 28 52" >> $editsfile
echo "m000_03000-04000_03000-04000_00030-00460: 36 63 66" >> $editsfile
echo "m000_03000-04000_04000-04460_00030-00460: 1 18 30 32 36 39 44" >> $editsfile
echo "m000_04000-05000_00000-01000_00030-00460: 21" >> $editsfile
echo "m000_04000-05000_01000-02000_00030-00460: 48" >> $editsfile
echo "m000_04000-05000_02000-03000_00030-00460: 35 38 46 49" >> $editsfile
echo "m000_04000-05000_03000-04000_00030-00460: 34 13 46" >> $editsfile
echo "m000_04000-05000_04000-04460_00030-00460: 8 14 18 21 30 24 33 36 35" >> $editsfile
echo "m000_05000-05217_00000-01000_00030-00460: 1 3" >> $editsfile
echo "m000_05000-05217_01000-02000_00030-00460: 1 4 5" >> $editsfile
echo "m000_05000-05217_02000-03000_00030-00460: 1 2 4 5" >> $editsfile
echo "m000_05000-05217_03000-04000_00030-00460: 2 3 5 7" >> $editsfile
echo "m000_05000-05217_04000-04460_00030-00460: 1 2" >> $editsfile
# grep $datastem m000_manedit.txt | awk '{$1 = ""; print $0;}'
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
q="d"
qsubfile=$datadir/EM_dellabels_${x}-${X}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=1" >> $qsubfile
echo "#SBATCH --ntasks-per-node=5" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=01:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_s2s" >> $qsubfile
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastem=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
echo "python $scriptdir/supervoxels/delete_labels.py \
$datadir $datastem -d `grep $datastem $editsfile | awk '{$1 = ""; print $0;}'` &" >> $qsubfile
done
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
### watershed on prob_ics: 20G; 1h for m000_03000-04000_02000-03000_00030-00460_ws_l0.95_u1.00_s064.h5
# l=0.95; u=1; s=64;
l=0.99; u=1; s=5;
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
q=""
qsubfile=$datadir/EM_supervoxels_${x}-${X}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=1" >> $qsubfile
echo "#SBATCH --ntasks-per-node=5" >> $qsubfile
echo "#SBATCH --mem-per-cpu=25000" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=02:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_s2s" >> $qsubfile
echo "export PATH=/home/ndcn-fmrib-water-brain/ndcn0180/anaconda2/bin:\$PATH" >> $qsubfile
echo "source activate scikit-image-devel_0.13" >> $qsubfile
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastem=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
echo "python $scriptdir/supervoxels/EM_watershed.py \
${datadir} ${datastem} -c 1 -l ${l} -u ${u} -s ${s} &" >> $qsubfile
done
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
### agglomerate watershedMA
# svoxpf='_ws_l0.95_u1.00_s064'
svoxpf='_ws_l0.99_u1.00_s005'
maskpf='_maskMA'
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
q="d"
qsubfile=$datadir/EM_aggloMA_${x}-${X}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=1" >> $qsubfile
echo "#SBATCH --ntasks-per-node=5" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=01:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_agglo" >> $qsubfile
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastem=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
echo "python $scriptdir/supervoxels/agglo_from_labelmask.py \
${datadir} ${datastem} \
-l _labelMAmanedit stack -s ${svoxpf} stack -m ${maskpf} &" >> $qsubfile
done
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
# to nifti's
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
q="d"
qsubfile=$datadir/EM_nifti_${x}-${X}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=1" >> $qsubfile
echo "#SBATCH --ntasks-per-node=10" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=01:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_s2s" >> $qsubfile
echo "export PATH=/data/ndcn-fmrib-water-brain/ndcn0180/anaconda2/bin:\$PATH" >> $qsubfile
echo "source activate scikit-image-devel_0.13" >> $qsubfile
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastem=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
pf="_maskMA"
echo "python $scriptdir/convert/EM_stack2stack.py \
$datadir/${datastem}${pf}.h5 \
$datadir/${datastem}${pf}.nii.gz \
-e 0.0073 0.0073 0.05 -i 'zyx' -l 'xyz' &" >> $qsubfile
pf="${svoxpf}_labelMA"
echo "python $scriptdir/convert/EM_stack2stack.py \
$datadir/${datastem}${pf}.h5 \
$datadir/${datastem}${pf}.nii.gz \
-e 0.0073 0.0073 0.05 -i 'zyx' -l 'xyz' &" >> $qsubfile
done
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
### fill holes in myelinated axons
nodes=3
tasks=10
memcpu=6000
wtime=10:00:00
q=""
for n in `seq 0 $((nodes-1))`; do
qsubfile=$datadir/EM_fillholes_${n}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=1" >> $qsubfile
echo "#SBATCH --ntasks-per-node=${tasks}" >> $qsubfile
echo "#SBATCH --mem-per-cpu=${memcpu}" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=${wtime}" >> $qsubfile
echo "#SBATCH --job-name=EM_fill" >> $qsubfile
echo "export PATH=/data/ndcn-fmrib-water-brain/ndcn0180/anaconda2/bin:\$PATH" >> $qsubfile
echo "source activate scikit-image-devel_0.13" >> $qsubfile
for t in `seq 0 $((tasks-1))`; do
datastem=${datastems[n*tasks+t]}
echo "python $scriptdir/supervoxels/fill_holes.py \
$datadir $datastem \
-l '_ws_l0.99_u1.00_s005_labelMA' 'stack' -m '_maskMA' 'stack' \
--maskMM '_maskMM' 'stack' --maskMA '_maskMA' 'stack' \
-o '_filled' -p '_holes' -w 2 &" >> $qsubfile
done
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
### Neuroproof agglomeration
# cp /data/ndcn-fmrib-water-brain/ndcn0180/EM/Neuroproof/M3_S1_GNU_NP/train/m000_01000-01500_01000-01500_00030-00460_NPminimal_ws_l0.95_u1.00_s064_PA_str2_iter5_parallel.h5 ../../M3/M3_S1_GNU/restored/
# cp /data/ndcn-fmrib-water-brain/ndcn0180/EM/Neuroproof/M3_S1_GNU_NP/train/m000_01000-01500_01000-01500_00030-00460_NPminimal_ws_l0.99_u1.00_s005_PA_str2_iter5_parallel.h5 ../../M3/M3_S1_GNU/restored/
export PATH=/data/ndcn-fmrib-water-brain/ndcn0180/anaconda2/bin:$PATH
CONDA_PATH=$(conda info --root)
PREFIX=${CONDA_PATH}/envs/neuroproof-test
NPdir=/home/ndcn-fmrib-water-brain/ndcn0180/workspace/Neuroproof_minimal
trainset="m000_01000-01500_01000-01500_00030-00460"
# svoxpf='_ws_l0.95_u1.00_s064' # 50G;>10min for m000_00000-01000_00000-01000
# svoxpf='_ws_l0.95_u1.00_s064_labelMA'
svoxpf='_ws_l0.99_u1.00_s005_labelMA'
classifier="_NPminimal_ws_l0.95_u1.00_s064_PA_str2_iter5_parallel"
cltype='h5'
thr=0.1
alg=1
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
q=''
qsubfile=$datadir/EM_NPagglo_${x}-${X}_${y}-${Y}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=1" >> $qsubfile
echo "#SBATCH --ntasks-per-node=1" >> $qsubfile
echo "#SBATCH --mem-per-cpu=60000" >> $qsubfile
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=01:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_s2s" >> $qsubfile
echo "export PATH=/data/ndcn-fmrib-water-brain/ndcn0180/anaconda2/bin:\$PATH" >> $qsubfile
echo "export LD_LIBRARY_PATH=${PREFIX}/lib" >> $qsubfile
datastem=${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`
echo "$NPdir/NeuroProof_stack \
-watershed $datadir/${datastem}${svoxpf}.h5 stack \
-prediction $datadir/${datastem}_probs.h5 volume/predictions \
-output $datadir/${datastem}_prediction${classifier}${cltype}_thr${thr}_alg${alg}M.h5 stack \
-classifier $datadir/${trainset}${classifier}.${cltype} \
-threshold ${thr} -algorithm ${alg} &" >> $qsubfile # -nomito
echo "wait" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
done
### TODO: EED probs3_eed2?
### classify neurons MA/UA
module load mpi4py/1.3.1
module load hdf5-parallel/1.8.14_mvapich2_gcc
module load python/2.7__gcc-4.8
x=2000;X=3000;y=2000;Y=3000;z=30;Z=460;
for x in `seq 0 $xs $xmax`; do
[ $x == 5000 ] && X=$xmax || X=$((x+xs))
for y in `seq 0 $ys $ymax`; do
[ $y == 4000 ] && Y=$ymax || Y=$((y+ys))
datastem="${dataset}_`printf %05d ${x}`-`printf %05d ${X}`_`printf %05d ${y}`-`printf %05d ${Y}`_`printf %05d ${z}`-`printf %05d ${Z}`"
q=""
qsubfile=$datadir/EM_classify_${x}-${X}.sh
echo '#!/bin/bash' > $qsubfile
echo "#SBATCH --nodes=4" >> $qsubfile
echo "#SBATCH --ntasks-per-node=16" >> $qsubfile
echo "#SBATCH --mem-per-cpu=6000" >> $qsubfile # 4GB per core (16 cores is too much for small nodes)
[ "$q" = "d" ] && echo "#SBATCH --time=00:10:00" >> $qsubfile || echo "#SBATCH --time=10:00:00" >> $qsubfile
echo "#SBATCH --job-name=EM_agglo" >> $qsubfile
echo "#SBATCH --job-name=classify" >> $qsubfile
echo ". enable_arcus-b_mpi.sh" >> $qsubfile
echo "mpirun \$MPI_HOSTS python $scriptdir/mesh/EM_classify_neurons.py \
$datadir $datastem \
--supervoxels '_prediction_NPminimal_ws_l0.95_u1.00_s064_PA_str2_iter5_parallelh5_thr0.1_alg1' 'stack' \
-o '_per' -m" >> $qsubfile
[ "$q" = "d" ] && sbatch -p devel $qsubfile || sbatch $qsubfile
done
done
| true
|
40efbe79d188d48efea10feb9ca57da27c05f78f
|
Shell
|
f0x52/dots
|
/zsh/.zshrc
|
UTF-8
| 2,023
| 2.53125
| 3
|
[] |
no_license
|
source ~/.localzsh
# export vars
typeset -U path
path+=("${HOME}/bin")
# enviroment
#export EDITOR="st -e nvim"
export EDITOR="nvim"
export ANSIBLE_NOCOWS=1
# aliases
alias :q="exit"
alias free="free -h"
alias grep="grep --color"
alias history="fc -l 1"
#alias ls="ls --color -F --group-directories-first"
# keys
typeset -A key
key[Delete]=${terminfo[kdch1]}
[[ -n "${key[Delete]}" ]] && bindkey "${key[Delete]}" delete-char
[[ -n "$key[Up]" ]] && bindkey -- "$key[Up]" up-line-or-beginning-search
[[ -n "$key[Down]" ]] && bindkey -- "$key[Down]" down-line-or-beginning-search
bindkey "${terminfo[kpp]}" up-line-or-history # [PageUp] - Up a line of history
bindkey "${terminfo[knp]}" down-line-or-history # [PageDown] - Down a line of history
bindkey "${terminfo[khome]}" beginning-of-line # [Home] - Go to beginning of line
bindkey "${terminfo[kend]}" end-of-line # [End] - Go to end of line
bindkey '^[[1;5C' forward-word # [Ctrl-RightArrow] - move forward one word
bindkey '^[[1;5D' backward-word # [Ctrl-LeftArrow] - move backward one word
# theming
eval "$(dircolors -b)"
# completion
autoload -Uz compinit
compinit -d "${HOME}/.zcompdump-${ZSH_VERSION}"
zstyle ':completion:*' list-dirs-first true
zstyle ':completion:*' matcher-list 'm:{a-zA-Z-_}={A-Za-z_-}' 'r:|=*' 'l:|=* r:|=*'
zstyle ':completion:*' list-colors "$LS_COLORS"
zstyle ':completion:*' menu select auto
zstyle ':completion::complete:*' cache-path "${HOME}/.cache/zcompcache"
zstyle ':completion::complete:*' use-cache 1
zstyle ':completion:*' rehash true
#history
HISTFILE=$HOME/.zsh_history
SAVEHIST=100000
HISTSIZE=100000
autoload -Uz up-line-or-beginning-search down-line-or-beginning-search
zle -N up-line-or-beginning-search
zle -N down-line-or-beginning-search
setopt append_history
setopt extended_history
setopt hist_expire_dups_first
setopt hist_ignore_dups
setopt hist_ignore_space
setopt hist_verify
setopt inc_append_history
setopt share_history
| true
|
54459e7c6a066685f90c5df8d823b64a5b62e0f7
|
Shell
|
ophum/n0web
|
/src/proto-gen.sh
|
UTF-8
| 1,144
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
CLIENT_OUT_BASE_DIR=n0proto.ts
PROTOC_GEN_TS_PATH="$(yarn bin)/protoc-gen-ts"
mkdir -p ${CLIENT_OUT_BASE_DIR}
proto_dirs=`find ./vender/n0stack -type d | grep -v "\.git" | grep -v "test"`
for d in ${proto_dirs}
do
ls -1 $d/*.proto > /dev/null 2>&1
if [ "$?" = "0" ]; then
echo '============================='
echo $d
CLIENT_OUT_DIR=${CLIENT_OUT_BASE_DIR}/`echo $d | sed -e 's/\.\/vender\/n0stack\/n0proto\/n0stack\///g'`
mkdir -p $CLIENT_OUT_DIR
echo $CLIENT_OUT_DIR
protoc -I${d} \
-I./vender \
-I./vender/n0stack/n0proto \
-I./vender/protobuf/src \
-I./vender/grpc-gateway \
-I./vender/grpc-gateway/third_party/googleapis \
--plugin="protoc-gen-ts=${PROTOC_GEN_TS_PATH}" \
--js_out="import_style=commonjs,binary:${CLIENT_OUT_DIR}" \
--grpc-web_out="import_style=typescript,mode=grpcwebtext:${CLIENT_OUT_DIR}" \
$* ${d}/*.proto
for f in "${CLIENT_OUT_DIR}"/*.js; do
printf '/* eslint-disable */\n//@ts-nocheck\n' | cat - "${f}" > temp && mv temp "${f}"
done
fi
done
# --js_out="import_style=commonjs,binary:${CLIENT_OUT_DIR}" \
# --ts_out="service=true:${CLIENT_OUT_DIR}" \
| true
|
a294a1963107782707a333ef721911a42b79d66c
|
Shell
|
appdesign1987/scripts
|
/Backup.sh
|
UTF-8
| 1,675
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
#create folder for duplicity
mkdir /duplicity
#git clone duplicity backup
cd /duplicity && git clone https://github.com/zertrin/duplicity-backup.git
cd /duplicity/duplicity-backup && mv * ../
cd /duplicity && rm -R duplicity-backup
chmod +x /duplicity/duplicity-backup.sh
# check if we need to do a restore if not then check if duplicity-backup.conf exist
# if restore is yes we are going to skip all of this and restore the files.
#if [ $restore -eq 0 ]; then
if [ -f /duplicity/duplicity-backup.conf ] then
echo the config file exist we are going to start the backup
while true; do sh /duplicity/duplicity-backup.sh --backup; sleep h$hours ;done
fi
echo the config file does not exist we are going to make it and make cron options
#create config file
cat >>/duplicity/duplicity-backup.conf <<EOL
AWS_ACCESS_KEY_ID="$AWS_ACCES_KEY"
AWS_SECRET_ACCESS_KEY="$AWS_SECRET_KEY"
ENCRYPTION='no'
ROOT="$Backupfolder"
DEST="s3+http://s3.amazonaws.com/$bucket/$Backupfolder"
STATIC_OPTIONS="--full-if-older-than 14D --s3-use-new-style --s3-european-buckets"
CLEAN_UP_TYPE="remove-all-but-n-full"
CLEAN_UP_VARIABLE="4"
EOL
#set our cronjob ot something to make it run every x hours
#crontab -l > backupjob
#echo "0 */$hours * * * sh /duplicity/duplicity-backup.sh --backup --full" >> backupjob
#install cron job
#crontab backupjob
#rm backupjob
while true; do sh /duplicity/duplicity-backup.sh --backup; sleep h$hours ; done
#else
#So the restore eq value said it's not 0 so we are going to do a restore! and make a cronjob afterwarts
#sh /duplicity/duplicity-backup.sh --restore $Backupfolder
#fi
| true
|
baafe6e4f46b53988734593b888ed2b6d2535bb4
|
Shell
|
sssho/dotfiles
|
/.config/zsh/30_aliases.zsh
|
UTF-8
| 989
| 2.984375
| 3
|
[] |
no_license
|
if [[ -r "$XDG_CONFIG_HOME"/user/aliases ]]; then
source "$XDG_CONFIG_HOME"/user/aliases
fi
alias -g A="| awk '{ print }'"
alias -g L='|& less'
alias -g G='| grep --color=always'
alias -g S="| sed -e 's///g'"
alias -g X='| xargs'
alias -g H='--help | less'
if which bat &> /dev/null && which cachef &> /dev/null; then
_bat_with_cachef() {
cachef $@
bat $@
}
alias b='_bat_with_cachef'
fi
if which xclip &> /dev/null; then
alias -g C='| xclip -selection c'
alias p='print -z $(xclip -o -selection c)'
fi
if which rg &> /dev/null; then
alias -g R='| rg -i'
fi
if which cachef &> /dev/null; then
_vim_with_cachef() {
cachef $@
vim --servername HOGE --remote $@
}
alias v='_vim_with_cachef'
fi
if which cachef &> /dev/null; then
_emacs_with_cachef() {
cachef $@
emacsclient -n $@
}
alias e='_emacs_with_cachef'
fi
if declare -f cd-gitroot > /dev/null; then
alias gr='cd-gitroot'
fi
| true
|
976cbe9778c622d2206129e75890308c8a53045f
|
Shell
|
mmatschiner/kumara
|
/src/get_pairwise_distance.sh
|
UTF-8
| 1,891
| 2.984375
| 3
|
[] |
no_license
|
# m_matschiner Thu Apr 19 10:43:13 CEST 2018
# Make the output directory if it doesn't exist yet.
mkdir -p ../res/tables
# Set the input and output files.
infile="../data/fasta/chloroplast.aligned.gblocks.subset.nex"
table_all="../res/tables/dists.all.txt"
table_banks_and_solander="../res/tables/dists.banks_and_solander.txt"
# Write the header line.
echo -e "input_file\tseq1\tseq2\tabsolute_distance\tn_sites\tproportional_distance" > ${table_all}
echo -e "input_file\tseq1\tseq2\tabsolute_distance\tn_sites\tproportional_distance" > ${table_banks_and_solander}
# Calculate distances among all specimens of the CL1 clade.
for spc1 in I_batatas_CIP441416 I_batatas_CIP441427 I_batatas_CIP400433 I_batatas_CIP420068 Banks_and_Solander I_batatas_CIP420386 I_batatas_CIP401523 I_batatas_CIP400941 I_batatas_CIP441177 I_batatas_CIP400157 I_batatas_CIP441314 I_batatas_CIP420602 I_batatas_DMC_407 I_batatas_CIP440615 I_batatas_CIP400218 I_batatas_CIP440551 I_batatas_CIP400453 I_batatas_CIP400786 I_batatas_MN_37607 I_batatas_CIP400423 I_batatas_CIP400551 I_batatas_CIP420882 I_batatas_CIP400287 I_batatas_CIP400033
do
for spc2 in I_batatas_CIP441416 I_batatas_CIP441427 I_batatas_CIP400433 I_batatas_CIP420068 Banks_and_Solander I_batatas_CIP420386 I_batatas_CIP401523 I_batatas_CIP400941 I_batatas_CIP441177 I_batatas_CIP400157 I_batatas_CIP441314 I_batatas_CIP420602 I_batatas_DMC_407 I_batatas_CIP440615 I_batatas_CIP400218 I_batatas_CIP440551 I_batatas_CIP400453 I_batatas_CIP400786 I_batatas_MN_37607 I_batatas_CIP400423 I_batatas_CIP400551 I_batatas_CIP420882 I_batatas_CIP400287 I_batatas_CIP400033
do
if [ ! ${spc1} == ${spc2} ]
then
ruby get_pairwise_distance.rb ${infile} ${spc1} ${spc2} >> ${table_all}
fi
done
done
# Make a table only for the distances with the Banks and Solander specimen.
cat ${table_all} | grep Banks_and_Solander >> ${table_banks_and_solander}
| true
|
c3f082ca833f67a647b92ed63fe5e851698b9afb
|
Shell
|
crg8055/USP-Lab-5th-Sem
|
/Week 4/2. Sum_of_even_no.sh
|
UTF-8
| 107
| 2.8125
| 3
|
[] |
no_license
|
echo "enter limit"
read n
i=2
while [ $i -lt $n ]
do
sum=$((sum+i))
i=$((i+2))
done
echo "sum:"$sum
| true
|
2a516a5d7b32fe4fb2212e74c9a05fb187235fc9
|
Shell
|
mehuljn/crossover_project
|
/mybb_install/install.sh
|
UTF-8
| 1,084
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
CONFIG="./mybb_config"
ORIG="./mybb"
WWWROOT="/var/www/html"
# Clean-up and copy files.
cp -r "$ORIG"/* "$WWWROOT"/
sed -e "s/__MYBB_DOMAINURL__/${__MYBB_DOMAINURL__}/g" "${CONFIG}/settings.php" > "${WWWROOT}/inc/settings.php"
sed -e "s/__MYBB_DATABASE_HOST__/${__MYBB_DATABASE_HOST__}/g" \
-e "s/__MYBB_DATABASE_USER__/${__MYBB_DATABASE_USER__}/g" \
-e "s/__MYBB_DATABASE_PASSWORD__/${__MYBB_DATABASE_PASSWORD__}/g" \
"${CONFIG}/config.php" > "${WWWROOT}/inc/config.php"
# Initialize database.
sed -e "s/__MYBB_DOMAINURL__/${__MYBB_DOMAINURL__}/g" \
"${CONFIG}/MyBB_InitDB.sql" | mysql \
--user="$__MYBB_DATABASE_USER__" \
--password="$__MYBB_DATABASE_PASSWORD__" \
--host="$__MYBB_DATABASE_HOST__" \
--database="mybb" || echo "WE ASSUME DATA ALREADY EXISTS!"
cd "$WWWROOT"
# chown www-data:www-data *
chmod 666 inc/config.php inc/settings.php
chmod 666 inc/languages/english/*.php inc/languages/english/admin/*.php
chmod 777 cache/ cache/themes/ uploads/ uploads/avatars/
chmod 777 cache/ cache/themes/ uploads/ uploads/avatars/ admin/backups/
| true
|
14c06a51121a0ef91c252c28c0501b46f521e323
|
Shell
|
psnewer/leisu
|
/src/db/run_refresh.sh
|
UTF-8
| 474
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
cd `dirname $0`
rm ids.json
echo "{}" > ids.json
function checkError()
{
if [ $? -eq 0 ]
then
echo "$1 success"
else
echo "$1 failed"
exit 0
fi
}
python matchDB.py
checkError "matchDB"
python oddsDB.py
checkError "oddsDB"
cd /Users/miller/Documents/workspace/leisu/src/engine/script
python get_groupcondition.py
checkError "get_groupcondition"
python get_extractorcondition.py
checkError "get_extractorcondition.py"
| true
|
49e94630762a5badf1453d2fc2278c89d697b7a1
|
Shell
|
khoadoan/system-utils
|
/hadoop-utils/GetLogs.sh
|
UTF-8
| 1,257
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
. BashLib.sh || exit 1
. HadoopLib.sh || exit 1
jobId=${1:?bad or missing job id}
if [[ $jobId =~ ^task ]]
then
taskId=${jobId}
jobId=${taskId/task/job}
jobId=${jobId/_[mr]_[0-9]*/}
logMsg "jobId: ${jobId}"
logMsg "taskId: ${taskId}"
fi
srcDir=/mnt/var/log/hadoop/userlogs
tgtDir=/var/tmp/${jobId}
if [ -d ${tgtDir} ]
then
rm -fr ${tgtDir}/*
else
mkdir ${tgtDir}
fi
# set the target directory if it's not specified
keyFile=${HOME}/.ssh/verve-shared.pem
for ip in $(getSlaves)
do
mkdir -p ${tgtDir}/${ip}
if [ -z "${taskId}" ]
then
# get all attempts
scp -o StrictHostKeyChecking=false -i ${keyFile} -r ${ip}:${srcDir}/${jobId} ${tgtDir}/${ip} 2>/dev/null
else
# get only attempts for specified task
scp -o StrictHostKeyChecking=false -i ${keyFile} -r ${ip}:${srcDir}/${jobId}/${taskId/task/attempt}* ${tgtDir}/${ip} 2>/dev/null
fi
done
read -p "view logs [y - all, 1 - syslog, 2 - stderr]: " ans
case ${ans} in
y)
find ${tgtDir} -type f -exec cat {} \; | less -X
;;
1)
for f in $(find ${tgtDir} -name "syslog")
do
echo ${f}
cat ${f}
done | less -X
;;
2)
for f in $(find ${tgtDir} -name "stderr")
do
echo ${f}
cat ${f}
done | less -X
;;
*)
logMsg "log collected to ${tgtDir}"
;;
esac
| true
|
f1d55e14366ed3f7ff493d96653137c7cf47d585
|
Shell
|
treykee/envgen-scripts
|
/preClone.sh
|
UTF-8
| 2,452
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# Init
# Make sure only root can run our script
if [[ $EUID -ne 0 ]];
then
printf "This script must be run as root" 1>&2
exit 1
else
# Set variables
logServices=(rsyslog auditd)
logDir="/var/log"
logFiles=(/var/log/audit/audit.log /var/log/wtmp /var/log/lastlog /var/log/grubby /var/log/messages /var/log/secure)
ifcfgFiles=( $(find /etc/sysconfig/network-scripts/ -type f | grep -E 'ifcfg-.*?[^lo]') )
# Check if required packages are installed.
rpm -qa | grep -qw yum-utils || yum install -y yum-utils
# Step 0: Stop logging processes.
for s in "${logServices[@]}"
do
ps auxw | grep "$s" | grep -v grep > /dev/null 2>&1
if [ $? != 0 ]
then
printf "Stopping $s && service stop $s"
else
printf "$s is already stopped"'!\n'
fi
done
# Step 1: Remove old kernels.
package-cleanup -y --oldkernels --count=1
# Step 2: Clean out yum.
yum clean all
# Step 3: Force the logs to rotate & remove logs we don't need.
logrotate -f /etc/logrotate.conf
find $logDir -name "*-????????" -type f -delete
find $logDir -name "dmesg.old" -type f -delete
find $logDir -name "anaconda" -type f -delete
# Step 4: Truncate the audit logs (and other logs we want to keep placeholders for).
for f in "${logFiles[@]}"
do
if [ -f "$f" ]
then
cat /dev/null > "$f"
else
touch "$f"
fi
done
# Step 5: Remove the udev persistent device rules.
if [[ -f /etc/udev/rules.d/70* ]]
then
rm -f "/etc/udev/rules.d/70*"
else
printf "No udev persistent device rules exist\n" >&2
fi
# Step 6: Remove the traces of the template MAC address and UUIDs.
if [ ifcfgFiles ]
then
for f in "${ifcfgFiles[@]}"
do
sed -i '/^(HWADDR|UUID)=/d' "$f"
done
else
printf "No virtual network adapter MAC address templates exist\n" >&2
fi
# Step 7: Clean /tmp out.
rm -rf /tmp/* &>/dev/null
rm -rf /var/tmp/* &>/dev/null
# Step 8: Remove the SSH host keys.
rm -f /etc/ssh/*key* >&2
# Step 9: Remove the root user's shell history.
rm -f ~root/.bash_history
history -c
unset HISTFILE
# Step 10: Remove the root userโs SSH history & kickstart configuration file.
rm -rf ~root/.ssh/
rm -f ~root/anaconda-ks.cfg
fi
| true
|
90d067923f17d035c93d4d2307e69005bcccf281
|
Shell
|
VanirLab/vanir-gui-agent-linux
|
/appvm-scripts/etc/init.d/vanir-gui-agent
|
UTF-8
| 765
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# chkconfig: 345 90 90
# description: Starts Vanir GUI agent
#
# Source function library.
. /etc/rc.d/init.d/functions
start()
{
echo -n $"Starting Vanir GUI agent:"
rm -f /tmp/vanir-session-env /tmp/vanir-session-waiter
# start console-kit-daemon
/usr/bin/ck-list-sessions > /dev/null 2>&1
# pretend tha user is at local console
touch /var/run/console/user
DISPLAY=:0 /usr/bin/vanir-gui 2> /var/log/vanir/gui-agent.log &
export DISPLAY=:0
success
echo ""
return 0
}
stop()
{
echo -n "Stopping Vanir GUI agent:"
killall vanir-gui && success || failure
echo ""
return 0
}
case "$1" in
start)
start
;;
stop)
stop
;;
*)
echo $"Usage: $0 {start|stop}"
exit 3
;;
esac
exit $RETVAL
| true
|
707b0de50c3102fbc703ada6af63daa44309d885
|
Shell
|
Eason0210/scripts
|
/dfn
|
UTF-8
| 855
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ -n "$1" ]]; then
VESTING=$1
SOLD=$2
fi
function get_price() {
curl -s "https://api.coinbase.com/v2/prices/${1}-USD/sell" \
| jq -r '.data.amount'
}
function show_price() {
printf "$1 $%'.02f" $(get_price $1)
}
show_price "ADA"; printf "\n"
show_price "ETH"; printf "\n"
show_price "BTC"; printf "\n"
# Write this to a file so Excel can read it whenever I refresh
export PRICE=$(get_price "ICP")
echo "$PRICE" > /tmp/icp.txt
function get_value_mm() {
echo "scale=4; (($PRICE * ($1)) / 1000000.0)" | bc
}
function net() {
get_value_mm "($1) * 0.81"
}
if [[ -n "$VESTING" ]]; then
printf "ICP $%'.02f = ($%.01fmm 8Y) ($%.01fmm T)\n" $PRICE \
$(net 103133.05) \
$(net "$VESTING - $SOLD")
else
printf "ICP $%'.02f\n" $PRICE
fi
| true
|
1fa8613b60d935290abe7e73137d47f8d2c5cafd
|
Shell
|
eraserhd/suggest-commit
|
/prepare-commit-msg
|
UTF-8
| 508
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
# When the second (or third) arguments are present, we are doing something
# weird (such as merging or amending) and we don't want to change or suggest a
# commit message.
if [ $# != 1 ]
then
exit 0
fi
suggestion=`git diff -b --cached |suggest-commit`
if [ $? -ne 0 ]
then
# suggest-commit failed. The user should see its error message after
# committing, so lets not abort.
exit 0
fi
if [ -z "$suggestion" ]
then
exit 0 # Nothing to suggest
fi
exec sed -i '' -e "1c\\
$suggestion
" "$1"
| true
|
ade91adae67683f710330e5832fd80705a82cecd
|
Shell
|
ArroTahur/skriptimine
|
/praks7/yl1
|
UTF-8
| 329
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
#
echo -n "Kui vana sa oled? "
read vanus
if test $vanus -gt 0 -a $vanus -lt 11
then
echo "Oled laps"
elif test $vanus -ge 11 -a $vanus -lt 18
then
echo "Oled nooruk"
elif test $vanus -ge 18 -a $vanus -lt 63
then
echo "Oled t2iskasvanu"
elif test $vanus -ge 63
then
echo "oled seenior"
else
echo "vigane sisend"
fi
| true
|
46595608cd5a18e596246ab0323716b81d052518
|
Shell
|
bioconda/bioconda-recipes
|
/recipes/metawatt/build.sh
|
UTF-8
| 623
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
PREFIX=$CONDA_PREFIX
DESTDIR=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM/
mkdir -p $PREFIX/bin/
mkdir -p $DESTDIR
cp -r ./dist/* $DESTDIR
cp -r ./databases/ $DESTDIR
# companion script to run MetaWatt
echo '#!/usr/bin/env bash' > $DESTDIR/metawatt
echo "WD=\"$DESTDIR\"" >> $DESTDIR/metawatt
echo 'JAR="$WD/MetaWatt-*.jar"' >> $DESTDIR/metawatt
echo 'MEM="2g"' >> $DESTDIR/metawatt
echo 'pushd $WD > /dev/null' >> $DESTDIR/metawatt
echo 'java -Xmx$MEM -jar $JAR "$@"' >> $DESTDIR/metawatt
echo 'popd > /dev/null' >> $DESTDIR/metawatt
chmod u+x $DESTDIR/metawatt
ln -s $DESTDIR/metawatt $PREFIX/bin/
| true
|
685e81f9135822f58303e83f7cab50eb0d3de0db
|
Shell
|
godaddy/asherah
|
/samples/go/aws/lambda/3-invoke.sh
|
UTF-8
| 2,327
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
FUNCTION_NAME=$(aws cloudformation describe-stack-resource \
--stack-name sample-lambda-go \
--logical-resource-id "function" \
--query 'StackResourceDetail.PhysicalResourceId' \
--output text)
encrypt() {
local partition=${1:-"partition-1"}
local raw_payload=${2:-"mysupersecrettext"}
local encoded="$(echo -n "${raw_payload}" | base64)"
local payload="{\"Name\": \"encrypt-${partition}\", \"Partition\": \"${partition}\", \"Payload\": \"${encoded}\"}"
local dest="out/out-encrypt.json"
echo
echo "Encrypt"
echo "======="
echo "invoking function with encrypt payload:"
echo $payload | jq -c .
aws lambda invoke --function-name $FUNCTION_NAME --payload "${payload}" $dest 1>/dev/null && print_results $dest
}
print_results() {
local outfile=$1
local errorMessage="$(jq -r '.errorMessage // empty' $outfile)"
if [[ ! -z "$errorMessage" ]]; then
echo $errorMessage
return 1
fi
echo "-------"
echo "Response received (modified):"
# Depending on the request type, i.e., encrypt or decrypt, the response JSON will contain either a DRR or PlainText
# attribute. The command below constructs a new JSON object consisting of whichever is present along with a few of
# the metrics collected by the sample application and included in the response.
jq -c '. | {Results: (.DRR // .PlainText), Metrics: {InvocationCount: .Metrics["asherah.samples.lambda-go.invocations"].count, SecretsAllocated: .Metrics["secret.allocated"].count, SecretsInUse: .Metrics["secret.inuse"].count}}' $outfile
# Replace the above the following (commented) command to print the entire function response JSON
# jq . $outfile
}
decrypt() {
local partition=${1:-"partition-1"}
local payload="{\"Name\": \"decrypt-${partition}\", \"Partition\": \"${partition}\", \"DRR\": $(jq -c .DRR out/out-encrypt.json)}"
local dest="out/out-decrypt.json"
echo
echo "Decrypt"
echo "======="
echo "invoking function with decrypt payload:"
echo "$payload" | jq -c .
# Note that the payload contains the DRR contained in the previous decrypt response
aws lambda invoke --function-name $FUNCTION_NAME --payload "${payload}" $dest 1>/dev/null && print_results $dest
}
encrypt "$@" && decrypt "$@"
| true
|
6cccd2f9ee74b06d0349c505a140be616941f8ab
|
Shell
|
cheeteh/MySQLBackup
|
/Incremental/inc_db_backup_31.sh
|
UTF-8
| 9,414
| 3.328125
| 3
|
[] |
no_license
|
# #################################################################################################
# File $URL: file:///var/svn/fw-repo/branches/4.2.7/Iguazu/Iguazu-Web/src/main/webapp/WEB-INF/bin/inc_db_backup_31.sh $
# Revision $Revision: 14488 $
# Author $Author: randy $
# Last Revised $Date: 2015-09-23 10:58:24 -0400 (Wed, 23 Sep 2015) $
# #################################################################################################
#!/bin/sh
##################
# Return codes
##################
SUCCESS=0
MYSQL_BACKUP_FAILED=1
MYSQL_BACKUP_NOT_INSTALLED=2
TAR_FAILED=3
GPG_FAILED=4
CUSTOMER_HOME_DOESNT_EXIST=5
MYSQL_BACKUP_STILL_RUNNING=6
NO_FULL_BACKUP_AVAILABLE=7
##################
# Variables
##################
BU_OUT_DIR=/chroot/home/db_backup
CURRENT_BACKUP_FILENAME=`date '+%A'`"_incremental.mbi"
BACKUP_DAY=`date '+%A'`
BACKUP_DIR=${BU_OUT_DIR}"/"${BACKUP_DAY}
PASSPHRASE_FILE=/usr/local/tomcat/serverA/webapps/Iguazu-Web/WEB-INF/bin/my_passphrase
LOG_FILE=/home/fairwarning/log/meb_inc_backup.log
MYSQL_USER=root
MYSQL_PASSWORD=r1singtide
CUSTOMER_HOME=/chroot/home
CUSTOMER_USER=tomcat.data
##################
# Functions
##################
check_return_code ()
{
return_code=$1
message=$2
exit_code=$3
if [ x"$1" != x"0" ] ; then
echo "$message - exiting with code of $exit_code" >> $LOG_FILE 2>&1
exit $3
fi
}
clean_up_failed_backup()
{
failed_backup_file=$1
backup_tmp_dir=$2
echo cleaning up failed backup $failed_backup_file $backup_tmp_dir >> $LOG_FILE 2>&1
# remove the incomplete backup
rm -f $failed_backup_file
# clean up the temp directory
rm -rf $backup_tmp_dir
}
##################
# Start execution
##################
echo "" >>$LOG_FILE 2>&1
echo "*****************************************************************" >>$LOG_FILE 2>&1
echo `date +'%Y-%m-%d %H:%M:%S'` " Starting MEB Incremental Backup..." >>$LOG_FILE 2>&1
echo "*****************************************************************" >>$LOG_FILE 2>&1
echo "" >>$LOG_FILE 2>&1
# Locate the mysql client executable...
MYSQL_EXE=`which mysql`
if [ "$MYSQL_EXE" == "" ] ; then
if [ -x /usr/local/mysql/bin/mysql ] ; then
MYSQL_EXE="/usr/local/mysql/bin/mysql"
elif [ -x /usr/bin/mysql ] ; then
MYSQL_EXE="/usr/bin/mysql"
else
check_return_code 1 "Cannot locate mysql client - exiting..." $MYSQL_CLIENT_NOT_INSTALLED
fi
fi
# Make sure backup isn't currently running...
BACKUP_RUNNING=`ps -ef | grep mysqlbackup | wc -l`
if [[ $BACKUP_RUNNING -gt 1 ]] ; then
check_return_code 1 "!\nMySQL Backup is still running. Exiting." $MYSQL_BACKUP_STILL_RUNNING
fi
if [[ ! -d $CUSTOMER_HOME || -z $CUSTOMER_HOME ]]; then
check_return_code 1 "Directory $CUSTOMER_HOME not found!\nExiting." $CUSTOMER_HOME_DOESNT_EXIST
fi
$MYSQL_EXE -u${MYSQL_USER} -p${MYSQL_PASSWORD} epictide -s -s -e"select setting_value from APP_SETTINGS where setting_key = 'FULL_BACKUP_PATHS';" >./fb_dirs.lst 2>/dev/null
BU_OUT_DIRS=$(< ./fb_dirs.lst)
BU_OUT_DIR0=`echo $BU_OUT_DIRS | cut -d ',' -f 1`
BU_OUT_DIR1=`echo $BU_OUT_DIRS | cut -d ',' -f 2`
if [ "${BU_OUT_DIR0}" == "" ] ; then
check_return_code 1 "App Setting is not returning location for backup file..." $BACKUP_LOCATION_NOT_SET
fi
# echo "Before attempting to fix: DIR0: ${BU_OUT_DIR0} - DIR1: ${BU_OUT_DIR1}"
# Make sure each var has a trailing "/"...
if [ "${BU_OUT_DIR0: -1}" != "/" ] ; then
BU_OUT_DIR0="${BU_OUT_DIR0}/"
fi
if [ "${BU_OUT_DIR1: -1}" != "/" ] ; then
BU_OUT_DIR1="${BU_OUT_DIR1}/"
fi
# Check to see if there is only one directory is requested...
if [ "${BU_OUT_DIR0}" == "${BU_OUT_DIR1}" ] || [ ${BU_OUT_DIR1} == "/" ] ; then
# echo "We only want one directory..."
SINGLE_DIR=1
echo "Single directory Dir 0 found at: $BU_OUT_DIR0" >>$LOG_FILE 2>&1
# echo "Dir 0 found at: $BU_OUT_DIR0"
else
SINGLE_DIR=0
echo "Dir 0 found at: $BU_OUT_DIR0 - Dir 1 found: $BU_OUT_DIR1" >>$LOG_FILE 2>&1
# echo "Dir 0 found at: $BU_OUT_DIR0 - Dir 1 found: $BU_OUT_DIR1"
fi
#check which one of them has the latest backup - and make that backup dir
if [ $SINGLE_DIR = 0 ]; then
if [ -f $BU_OUT_DIR0/full_db_backup.mbi ]; then
BU_OUT_DIR=${BU_OUT_DIR0}
fi
if [ -f $BU_OUT_DIR1/full_db_backup.mbi ]; then
BU_OUT_DIR=${BU_OUT_DIR1}
fi
fi
echo "Current Full DB Backup is at ::: $BU_OUT_DIR\n"
# if the output directory does not exist create it
if [ ! -d $BU_OUT_DIR ] ; then
mkdir -p $BU_OUT_DIR
chown $CUSTOMER_USER $BU_OUT_DIR
fi
# if MySQL enterprise backup is not installed, let's complain about it and exit.
mysqlbackup_bin=`which mysqlbackup`
if [ -x /opt/mysql/meb-3.10/bin/mysqlbackup ] ; then
mysqlbackup_bin="/opt/mysql/meb-3.10/bin/mysqlbackup --socket=/var/lib/mysql/mysql.sock"
elif [ -x /usr/bin/mysqlbackup ] ; then
mysqlbackup_bin="/usr/bin/mysqlbackup --socket=/var/lib/mysql/mysql.sock"
elif [ -x /usr/local/mysql/bin/mysqlbackup ] ; then
mysqlbackup_bin=/usr/local/mysql/bin/mysqlbackup
else
check_return_code 1 "MySQL enterprise backup is not installed at /usr/local/mysql/bin/mysqlbackup or /opt/mysql/meb-3.10/bin/mysqlbackup!!! exiting." $MYSQL_BACKUP_NOT_INSTALLED
fi
echo "MySQL Enterprise Backup Found: $mysqlbackup_bin" >>$LOG_FILE 2>&1
# Get the day of the week full backups possess...
$MYSQL_EXE -u${MYSQL_USER} -p${MYSQL_PASSWORD} -s -s \
-e"SELECT from_unixtime(substring(prev_fire_time,1,10),'%W') \
FROM quartz.QTZ_TRIGGERS \
WHERE trigger_group = 'DEFAULT' \
AND job_name = 'MEB Backup' \
AND job_group = 'SYSTEM_COMMAND';" >./fb_day.lst 2>>/dev/null
FULL_BACKUP_DAY=$(< ./fb_day.lst)
if [ "$FULL_BACKUP_DAY" == "" ] ; then
$MYSQL_EXE -uroot -pr1singtide events -s -s \
-e"SELECT date_format(start_time,'%W') \
FROM mysql.backup_history \
WHERE backup_type = 'FULL' \
AND exit_state = 'SUCCESS' \
ORDER BY backup_id desc limit 1;" \
>./fb_day.lst 2>>/dev/null
FULL_BACKUP_DAY=$(< fb_day.lst)
if [ "$FULL_BACKUP_DAY" == "" ] ; then
check_return_code 1 "Must have successful full backup in order to run incremental..." $NO_FULL_BACKUP_AVAILABLE
fi
fi
echo "Full Backups run on: $FULL_BACKUP_DAY" >> $LOG_FILE 2>&1
echo "Let's get the last day of backup..." >> $LOG_FILE 2>&1
# First see if we can get this from the backup_history_table...
$MYSQL_EXE -u${MYSQL_USER} -p${MYSQL_PASSWORD} -s -s -e"SELECT date_format(end_time,'%W') from mysql.backup_history where exit_state = 'SUCCESS' order by backup_id desc limit 1;" >./lb_day.lst 2>/dev/null
LAST_BACKUP_DAY=$(<lb_day.lst)
echo "Last Backup Day: [$LAST_BACKUP_DAY]" >>$LOG_FILE 2>&1
LAST_BACKUP_DIR=${BU_OUT_DIR}"/"${LAST_BACKUP_DAY}
if [ "$LAST_BACKUP_DAY" == "$FULL_BACKUP_DAY" ] ; then
# Check to make sure full backup is in the house...
if [ -f ${BU_OUT_DIR}"/""full_db_backup.mbi" ] ; then
LAST_BACKUP_DIR=${BU_OUT_DIR}"/tmp_backup/"
# Let's remove all of the previous incremental zip files...
for myday in "Sunday" "Monday" "Tuesday" "Wednesday" "Thursday" "Friday" "Saturday" ; do
if [ -f "${BU_OUT_DIR}"/"${myday}""_incremental.mbi" ] ; then
echo "removing ${BU_OUT_DIR}""/""${myday}""_incremental.mbi" >> $LOG_FILE 2>&1
rm -rf ${BU_OUT_DIR}"/""${myday}""_incremental.mbi"
fi
done
break
else
check_return_code 1 "No full backup to base incremental from ${BU_OUT_DIR}""/""full_db_backup.mbi" $MYSQL_BACKUP_FAILED
fi
else
if [ -f ${LAST_BACKUP_DIR}"/"${LAST_BACKUP_DAY}"_incremental.mbi" ] ; then
echo "Found prior backup on $LAST_BACKUP_DAY"
# Clean out current backup day directory in case Monday's clear-all didn't work...
if [ -d ${BACKUP_DIR} ] ; then
rm -rf ${BACKUP_DIR}
fi
break
fi
fi
echo "Last backup day: $LAST_BACKUP_DAY" >> $LOG_FILE 2>&1
echo "current backup day: $BACKUP_DAY" >> $LOG_FILE 2>&1
echo "backup executable found: $mysqlbackup_bin" >> $LOG_FILE 2>&1
# make the incremental backup - encryption not an option for incrmentals...
echo $mysqlbackup_bin -u${MYSQL_USER} -p${MYSQL_PASSWORD} --port=3306 --incremental --no-locking --backup-dir=${BACKUP_DIR}"/" \
--incremental-base=history:last_backup --encrypt --key-file=${PASSPHRASE_FILE} backup-to-image \
--backup-image=${BACKUP_DIR}"/"${CURRENT_BACKUP_FILENAME} >> $LOG_FILE 2>&1
$mysqlbackup_bin -u${MYSQL_USER} -p${MYSQL_PASSWORD} --port=3306 --incremental --no-locking --backup-dir=${BACKUP_DIR}"/" \
--incremental-base=history:last_backup --encrypt --key-file=${PASSPHRASE_FILE} backup-to-image --backup-image=${BACKUP_DIR}"/"${CURRENT_BACKUP_FILENAME} >> $LOG_FILE 2>&1
mysqlbackup_return=$?
# if the backup is not a success, do not leave an corrupt backup sitting around...
if [ x"$mysqlbackup_return" != x"0" ] ; then
clean_up_failed_backup ${BACKUP_DIR}/${CURRENT_BACKUP_FILENAME} ${BACKUP_DIR}
# exit and log on failure
check_return_code $mysqlbackup_return "mysqlbackup " $MYSQL_BACKUP_FAILED
else
# The backup was a success - now let's move it to a file at the same level as full backup...
mv ${BACKUP_DIR}"/"${CURRENT_BACKUP_FILENAME} ${BU_OUT_DIR}"/"
tar_rc=$?
if [ x"$tar_rc" == "x0" ] ; then
rm -rf ${BACKUP_DIR}"/"
fi
fi
exit $SUCCESS
| true
|
60bc7ea63636abfedecc59d52a6fc3c97c2dac0a
|
Shell
|
jmunozti/KubernetesWithTerraform
|
/shell_script/deployApps.sh
|
UTF-8
| 986
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
echo "Begin"
echo "============================================"
#Deploying an app
rm -rf /home/ubuntu/KubernetesWithTerraform
git clone https://github.com/jmunozti/KubernetesWithTerraform.git
echo "Deploying an app"
cd /home/ubuntu/KubernetesWithTerraform/app/
#kubectl delete deploy/hello-app svc/hello-app
kubectl create deployment hello-app --image=gcr.io/google-samples/hello-app:1.0
kubectl expose deployment hello-app --port 80 --target-port 8080
kubectl apply -f hello-app-ingress.yaml
sleep 5
#Deploying some apps with Helm3
echo "Deploying some apps with Helm3"
cd /home/ubuntu/KubernetesWithTerraform/
helm install --values mychart/values.yaml mychart/ --generate-name
kubectl create ns monitoring
helm install prometheus stable/prometheus --namespace monitoring
kubectl --namespace default get pods -l "release=my-prometheus-operator"
helm list
sleep 5
echo "Get all"
kubectl get all
echo "============================================"
echo "End"
| true
|
e43e5517101e816053e7c02fe6c37ad0d1dd68a7
|
Shell
|
gitter-badger/quinoa
|
/script/update_doc.sh
|
UTF-8
| 718
| 2.734375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
################################################################################
#
# \file script/update_doc.sh
# \author J. Bakosi
# \date Tue 07 Jul 2015 06:19:16 AM MDT
# \copyright 2012-2015, Jozsef Bakosi.
# \brief Update documentation and upload to github pages
#
################################################################################
cd /home/jbakosi/code/quinoa
DOC4COMMIT=$(git rev-parse --verify HEAD)
cd build/gnu
rm -rf doc/html
git clone git@github.com:jbakosi/quinoa.git --branch gh-pages --single-branch doc/html
cd doc/html
git rm -rf .
cd -
ninja doc
cd doc/html
touch .nojekyll
git add .
git commit -m "Automated documentation build for changeset ${DOC4COMMIT}"
git push origin gh-pages
| true
|
e56853bbec333f580af16b282163f66293f9fce7
|
Shell
|
petronny/aur3-mirror
|
/gmpc-lyricwiki-git/PKGBUILD
|
UTF-8
| 1,103
| 2.78125
| 3
|
[] |
no_license
|
#Submitter: Madek <gonzalosegueรฑ@gmail.com>
#Maintainer: sysrmr <sysrmr qt gmail dot com>
pkgname=gmpc-lyricwiki-git
provides=('gmpc-lyricwiki')
conflicts=('gmpc-lyricwiki' 'gmpc-lyricwiki-svn')
pkgver=20120830
pkgrel=1
pkgdesc="A plugin for gmpc that obtains lyrics from lyricwiki"
url="http://gmpc.wikia.com/wiki/GMPC_PLUGIN_LYRICWIKI"
license=('GPL')
arch=('i686' 'x86_64')
depends=('gmpc-git')
makedepends=('git' 'intltool' 'gob2' 'pkgconfig')
options=('!libtool')
source=('Fixed-building-automake-1.12.patch')
md5sums=('e0bda6d329ba5ad59466380dae453bcd')
_gitroot="git://repo.or.cz/gmpc-lyricwiki.git"
_gitname="gmpc-lyricwiki"
build() {
cd $srcdir
msg "Connecting to $_gitroot server..."
if [ -d $srcdir/$_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
cp -r $srcdir/$_gitname $srcdir/$_gitname-build
cd $srcdir/$_gitname-build
patch -p1 < ../../Fixed-building-automake-1.12.patch
./autogen.sh --prefix=/usr
make || return 1
make DESTDIR=$pkgdir install
}
| true
|
7c8c5e16161e0b850b8d3e67d1f90799fae9733f
|
Shell
|
liyustar/lyx_conf
|
/home_bin/xenc
|
UTF-8
| 377
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z $1 ]; then
echo "usage: xenc <file | directory> [out]"
exit
fi
FILE_PATH=$1
FILE=$(basename $FILE_PATH)
if [ -z $2 ]; then
OUTFILE=$FILE.des
else
OUTFILE=$2
fi
# ๅฐๆไปถๆๅ
ๆtarๆ ผๅผ
tar -cf $FILE.tar $FILE_PATH
# ๅฐtarๆ ผๅผ็ๆไปถ็จDESๅ ๅฏ
openssl enc -e -des -in $FILE.tar -out $OUTFILE
# ๅ ้คไธญ้ดๆไปถ
rm $FILE.tar
| true
|
e0e8a01d535cf6a0ba6e9118b042cb389e0447da
|
Shell
|
gedomagno/ovirt-engine-sdk-ruby
|
/automation/check-patch.sh
|
UTF-8
| 794
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh -ex
# Create a settings file that uses the our artifactory server as
# proxy for all repositories:
settings="$(pwd)/settings.xml"
cat > "${settings}" <<.
<settings>
<mirrors>
<mirror>
<id>ovirt-artifactory</id>
<url>http://artifactory.ovirt.org/artifactory/ovirt-mirror</url>
<mirrorOf>*</mirrorOf>
</mirror>
<mirror>
<id>maven-central</id>
<url>http://repo.maven.apache.org/maven2</url>
<mirrorOf>*</mirrorOf>
</mirror>
</mirrors>
</settings>
.
# There may be several versions of Java installed in the build
# enviroment, and we need to make sure that Java 8 is used, as
# it is required by the code generator:
export JAVA_HOME="${JAVA_HOME:=/usr/lib/jvm/java-1.8.0}"
# Build and run the tests:
mvn test -s "${settings}"
| true
|
db95fe035dd6cf7b51bc82735cfbfc023375ec51
|
Shell
|
tnakaicode/jburkardt
|
/r8row/r8row.sh
|
UTF-8
| 236
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#
cp r8row.hpp /$HOME/include
#
g++ -c -I /$HOME/include r8row.cpp
if [ $? -ne 0 ]; then
echo "Errors compiling r8row.cpp"
exit
fi
#
mv r8row.o ~/libcpp/$ARCH/r8row.o
#
echo "Library installed as ~/libcpp/$ARCH/r8row.o"
| true
|
42ff074f7da2e1de0b975db83fc16deb35458941
|
Shell
|
xapix-io/httpbin
|
/signal-listener.sh
|
UTF-8
| 643
| 3.625
| 4
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
# A wrapper around /entrypoint.sh to trap the SIGINT signal (Ctrl+C) and forwards it to the mysql daemon
# In other words : traps SIGINT and SIGTERM signals and forwards them to the child process as SIGTERM signals
signalListener() {
"$@" &
pid="$!"
trap "echo 'Stopping PID $pid'; kill -SIGTERM $pid" SIGINT SIGTERM
# A signal emitted while waiting will make the wait command return code > 128
# Let's wrap it in a loop that doesn't end before the process is indeed stopped
while kill -0 $pid > /dev/null 2>&1; do
wait
done
}
signalListener /httpbin/wrapper.sh $@
| true
|
0dfdfa84b2facb197f929219de6ca72854ec6c60
|
Shell
|
openbmc/openbmc
|
/meta-ampere/meta-mitchell/recipes-ampere/platform/ampere-utils/ampere_power_on_driver_binder.sh
|
UTF-8
| 1,227
| 3.53125
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Each driver include driver name and driver path
declare -a DRIVER_NAMEs=(
"107-0070"
"100-0071"
"101-0071"
"102-0071"
"103-0071"
"104-0071"
"100-0050"
"101-0050"
"102-0050"
"100-004c"
"101-004c"
"102-004c"
)
# Driver path should include / at the end
declare -a DRIVER_PATHs=(
"/sys/bus/i2c/drivers/pca954x/"
"/sys/bus/i2c/drivers/pca954x/"
"/sys/bus/i2c/drivers/pca954x/"
"/sys/bus/i2c/drivers/pca954x/"
"/sys/bus/i2c/drivers/pca954x/"
"/sys/bus/i2c/drivers/pca954x/"
"/sys/bus/i2c/drivers/at24/"
"/sys/bus/i2c/drivers/at24/"
"/sys/bus/i2c/drivers/at24/"
"/sys/bus/i2c/drivers/lm75/"
"/sys/bus/i2c/drivers/lm75/"
"/sys/bus/i2c/drivers/lm75/"
)
# get length of an array
arraylength=${#DRIVER_NAMEs[@]}
# use for loop to read all values and indexes
for (( i=0; i<"${arraylength}"; i++ ));
do
bindFile="${DRIVER_PATHs[$i]}bind"
driverDir="${DRIVER_PATHs[$i]}${DRIVER_NAMEs[$i]}"
echo "binding ${DRIVER_NAMEs[$i]} path ${DRIVER_PATHs[$i]} on Chassi Power On"
if [ -d "$driverDir" ]; then
echo "Driver ${DRIVER_NAMEs[$i]} is already bound."
else
echo "${DRIVER_NAMEs[$i]}" > "$bindFile"
fi
done
exit 0
| true
|
fd3b9a07cc196d8a2fbb28f964306b300656c82f
|
Shell
|
downspot/terraform-ec2-instance-daemontools
|
/destroy.sh
|
UTF-8
| 450
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
if (( $# != 1 )); then
echo "Usage: ${0} <preprod|prod>"
exit 1
fi
for i in clicks_trainer joiner recs_trainer ; do
terraform workspace list | grep example-contextual-bandit-${1}-${i} > /dev/null
if (( $? != 0 )); then
terraform workspace new example-contextual-bandit-${1}-${i}
fi
terraform workspace select example-contextual-bandit-${1}-${i}
terraform destroy -var-file=${1}.tfvars
done
| true
|
2a9dbed2b39233b3a9476f67677a730f492caa2d
|
Shell
|
alanbartels/geo_tools
|
/time_series/gen_mean_directory.sh
|
UTF-8
| 3,821
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Starting processing at: "`date`
img_dir=$1
out_dir=$2
tmp_dir=${out_dir}/tmp/
if [ ! -d "${tmp_dir}" ];
then
mkdir -p ${tmp_dir}
fi
img_list=(`find ${img_dir} -maxdepth 1 -type f -name "*.tif"`)
# create the temporary processing files that will be added to get average
# this is necessary because otherwise NoData turns the whole pixel stack into null
if [ ! -z "${img_list}" ];
then
for tif in ${img_list[@]};
do
# The point of this odd section is to unset 32767 as the nodata value
# so that these nodata pixels can be added as 0 to the acumulated
# valid pixels (valid_pix) and running total (to_add) rasters,
# instead of just being nodata, which messes up the calculation
echo "Finding valid pixels to average for: ${tif}"
gdal_translate -q -of VRT ${tif} ${tif}_nodata_is_zero.vrt -a_nodata 0
gdal_calc.py --quiet --overwrite -A ${tif}_nodata_is_zero.vrt --outfile=${tif}_valid_pix.tif \
--calc="1*(A<=1000)+0*(A==32767)"
gdal_calc.py --quiet --overwrite -A ${tif}_nodata_is_zero.vrt --outfile=${tif}_to_add.tif \
--calc="A*(A<=1000.0)+0*(A==32767)" --NoDataValue=32767
gdal_calc.py --quiet --overwrite -A ${tif}_to_add.tif --outfile=${tif}_to_add.tif \
--calc="A*(A>0.0)" --NoDataValue=32767
mv ${img_dir}*_to_add.tif ${tmp_dir}
mv ${img_dir}*_valid_pix.tif ${tmp_dir}
rm ${img_dir}*.vrt
done
fi
echo "Valid pixels found at " `date`
img_list_num=(`find ${tmp_dir} -maxdepth 1 -type f -name "*.tif*_to_add.tif"`)
img_list_den=(`find ${tmp_dir} -maxdepth 1 -type f -name "*.tif*_valid_pix.tif"`)
echo "Image list for numerator: ${img_list_num}"
echo "Image list for demonimator ${img_list_den}"
if [ ! -z "${img_list_num}" ];
then
# create a new blank raster to start accumulating into -- copy
# the first raster and multiply the whole thing by 0 to start fresh
cp ${img_list_num[0]} ${tmp_dir}/numerator.tif
cp ${img_list_den[0]} ${tmp_dir}/denominator.tif
echo "Creating blank numerator and denominator rasters to start with."
gdal_calc.py --quiet --overwrite -A ${tmp_dir}/numerator.tif \
--outfile=${tmp_dir}/numerator.tif --calc="A*0" --NoDataValue=32767 --type=Int32
gdal_calc.py --quiet --overwrite -A ${tmp_dir}/denominator.tif \
--outfile=${tmp_dir}/denominator.tif --calc="A*0" --NoDataValue=32767 --type=Int32
imga_num=${tmp_dir}/numerator.tif
imga_den=${tmp_dir}/denominator.tif
img_avg=${tmp_dir}/average.tif
# now add all the numerators together
# and all the denominators together
len=`echo ${#img_list_num[@]}`
for img in $(seq 0 "$((len-1))");
do
# now add each new image to the initial raster to get the acumulation
imgb_num=`echo ${img_list_num[$img]}`
imgb_den=`echo ${img_list_den[$img]}`
#calc_str="A + B"
gdal_cmd1_num=`echo gdal_calc.py -A ${imga_num} -B ${imgb_num} --outfile ${imga_num} --overwrite --quiet --calc=\"A+B\" --NoDataValue=32767 --type=Int32`
gdal_cmd1_den=`echo gdal_calc.py -A ${imga_den} -B ${imgb_den} --outfile ${imga_den} --overwrite --quiet --calc=\"A+B\" --NoDataValue=32767 --type=Int32`
#echo $gdal_cmd1
echo "Calculating numerator and denominator rasters at " `date`
eval $gdal_cmd1_num
eval $gdal_cmd1_den
done
# then divide by the numerator by the denominator for that day to get the mean
gdal_cmd2=`echo gdal_calc.py -A ${imga_num} -B ${imga_den} --outfile ${img_avg} --overwrite --quiet --type=Float32 --calc=\"\(A/B\)*0.001\" --NoDataValue=32767 --type=Float32`
echo "Calculating average per pixel: ${gdal_cmd2} at " `date`
eval $gdal_cmd2
# rm ${tmp_dir}/*tif_to_add.tif
# rm ${tmp_dir}/*tif_valid_pix.tif
mv ${tmp_dir}/*average*.tif ${out_dir}
echo "Processing finished at " `date`
fi
| true
|
e0dccb052581a7e9af259e757f407150a59a6d06
|
Shell
|
ZhaoHuiXin/Shell
|
/chapter04/t2.sh
|
UTF-8
| 320
| 3.640625
| 4
|
[] |
no_license
|
if [ $# -ne 2 ] #<==ๅฆๆๆง่กไผ ๅไธชๆฐไธ็ญไบ2
then
echo "USAGE:/bin/sh $0 arg1 arg2" #<==็ป็จๆทๆ็คบๆญฃ็กฎ็จๆณ๏ผ$0ๆๅฐ่ๆฌๅๅญๅ่ทฏๅพ
exit 1 #<==ๅฆๆไธๆปก่ถณ่ฆๆฑ๏ผ้ๅบ่ๆฌ๏ผ่ฟๅๅผไธบ1
fi
echo $1 $2 #<==่ฅๅๆฐๆปก่ถณ่ฆๆฑ๏ผๅๆๅฐ$1ๅ$2่ทๅๅฐ็ไผ ๅ็ๅญ็ฌฆไธฒ
| true
|
9d37c694bde79b94392a7db34a4a61cafd4293b4
|
Shell
|
alincc/domibus-docker
|
/dockerbuild/build/domibus/domibus-tomcat/install-domibus.sh
|
UTF-8
| 2,954
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
# Set DEBUG TO 1 to activate debugging
DEBUG=0
echo "--------------CATALINA_HOME: " ${CATALINA_HOME}
echo "--------------DOMIBUS_CONFIG_LOCATION: ${DOMIBUS_CONFIG_LOCATION}"
echo "--------------DOCKER_DOMINSTALL: ${DOCKER_DOMINSTALL}"
echo "--------------DOCKER_DOMIBUS_DISTRIBUTION: ${DOCKER_DOMIBUS_DISTRIBUTION}"
echo "--------------DB_TYPE: ${DB_TYPE}"
echo "--------------DB_HOST: ${DB_HOST}"
echo "--------------DB_PORT: ${DB_PORT}"
echo "--------------DB_NAME: ${DB_NAME}"
echo "--------------DB_USER: ${DB_USER}"
echo "--------------DB_PASS: ${DB_PASS}"
echo "--------------DOMIBUS_VERSION: ${DOMIBUS_VERSION}"
function sourceExternalFunctions {
echo ; echo "--Sourcing External Functions:"
. ${DOCKER_DOMINSTALL}/scripts/functions/common.functions
. ${DOCKER_DOMINSTALL}/scripts/functions/downloadJDBC.functions
. ${DOCKER_DOMINSTALL}/scripts/functions/getDomibus.functions
}
function initInstallation {
displayFunctionBanner ${FUNCNAME[0]}
mkdir -p ${DOMIBUS_CONFIG_LOCATION}
#copy the Tomcat configuration
unzip $DOCKER_DOMIBUS_DISTRIBUTION/domibus-distribution-${DOMIBUS_VERSION}-tomcat-configuration.zip -d ${DOMIBUS_CONFIG_LOCATION}
#copy the war in the webapps directory
unzip $DOCKER_DOMIBUS_DISTRIBUTION/domibus-distribution-${DOMIBUS_VERSION}-tomcat-war.zip -d ${CATALINA_HOME}/webapps
mv ${CATALINA_HOME}/webapps/domibus-MSH-tomcat-${DOMIBUS_VERSION}.war ${CATALINA_HOME}/webapps/domibus.war
#copy the sample keystore/truststore
unzip -j $DOCKER_DOMIBUS_DISTRIBUTION/domibus-distribution-${DOMIBUS_VERSION}-sample-configuration-and-testing.zip conf/domibus/keystores/* -d ${DOMIBUS_CONFIG_LOCATION}/keystores
#unzip $DOCKER_DOMIBUS_DISTRIBUTION/domibus-distribution-${DOMIBUS_VERSION}-sample-configuration-and-testing.zip -d ${DOMIBUS_CONFIG_LOCATION}/temp
#mv ${DOMIBUS_CONFIG_LOCATION}/conf/domibus/keystores ${DOMIBUS_CONFIG_LOCATION}
#rm -rf ${DOMIBUS_CONFIG_LOCATION}/temp
#copy the policies
mkdir -p ${DOMIBUS_CONFIG_LOCATION}/policies
cp ${DOCKER_DOMINSTALL}/policies/* ${DOMIBUS_CONFIG_LOCATION}/policies
#installing the plugins
mkdir -p ${DOMIBUS_CONFIG_LOCATION}/plugins/config
mkdir -p ${DOMIBUS_CONFIG_LOCATION}/plugins/lib
unzip -j ${DOCKER_DOMIBUS_DISTRIBUTION}/domibus-distribution-${DOMIBUS_VERSION}-default-ws-plugin.zip conf/domibus/plugins/config/tomcat/* -d ${DOMIBUS_CONFIG_LOCATION}/plugins/config
unzip -j ${DOCKER_DOMIBUS_DISTRIBUTION}/domibus-distribution-${DOMIBUS_VERSION}-default-ws-plugin.zip conf/domibus/plugins/lib/* -d ${DOMIBUS_CONFIG_LOCATION}/plugins/lib
}
#####################################################################################################################
##### MAIN PROGRAMM START HERE
####################################################################################################################
sourceExternalFunctions
initInstallation
exit
| true
|
dca38999db77249ece2213ff93b7be2b6848451f
|
Shell
|
JustinChristensen/dotfiles
|
/linux/bin/swaybar_status.sh
|
UTF-8
| 1,052
| 3.859375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
volume_status() {
local status='Unavailable'
local mute=$(pactl get-sink-mute @DEFAULT_SINK@)
local reg="\s+[0-9]+\s+\/\s+([0-9]+%)\s+\/\s+-?[0-9.]+\sdB"
if [[ "$mute" == "Mute: yes" ]]; then
status="Muted"
else
local vol=$(pactl get-sink-volume @DEFAULT_SINK@)
if [[ "$vol" =~ $reg ]]; then
status="${BASH_REMATCH[1]}"
fi
fi
echo "๐ $status"
}
date_status() {
local status=$(date +'%Y-%m-%d %I:%M:%S %p')
echo "โฑ $status"
}
battery_status() {
local status="$(acpi -b | fgrep -v 'unavailable' | cut -d ':' -f2-)"
echo "โก๏ธ$status"
}
brightness_status() {
local status=$(brightnessctl -m | cut -d, -f4)
echo "๐ฆ $status"
}
swaybar_status() {
local i=0
local battery=
while true; do
((i++ % 60 == 0)) && battery=$(battery_status)
printf "%s | %s | %s \n" \
"$(date_status)" \
"$(volume_status)" \
"$battery"
sleep 1
done
}
swaybar_status
| true
|
9d0e78c95cfdeab82a203cd2c741c8658c850edc
|
Shell
|
justin-lyon/sfdx-bin
|
/bin/lib/sfdx.sh
|
UTF-8
| 2,682
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
# Import utilities
. ./scripts/lib/utilities.sh
# Print SFDX Info
get_info () {
echo "*** Print SFDX Info"
sfdx --version
sfdx plugins --core
sfdx force:org:list
}
# Login to a Sandbox by JWT
jwt_login () {
echo "*** Logging in to $4 as $3 at $5."
sfdx force:auth:jwt:grant -d \
--clientid $1 \
--jwtkeyfile $2 \
--username $3 \
--setalias $4 \
--instanceurl $5
}
# Web login to sandbox or prod
web_login () {
HUB_ALIAS=${1:-"DevHub"}
echo "*** Initialize web login to $HUB_ALIAS"
sfdx force:auth:web:login \
--setdefaultdevhubusername \
--setalias $HUB_ALIAS
}
# Delete a Scratch Org
delete_org () {
echo "*** Removing old scratch org, $1"
sfdx force:org:delete \
--noprompt \
--targetusername $1
}
# Create a new Scratch Org
create_scratch () {
DURATION=${2:-10}
echo "*** Creating scratch Org. Alias: $1, for $DURATION days."
sfdx force:org:create \
--setdefaultusername \
--setalias "$1" \
--durationdays $DURATION \
--definitionfile "$3"
}
# Push local to a Scratch Org.
source_push () {
echo "*** Pushing metadata to $1"
sfdx force:source:push \
--targetusername $1
}
# Pull changes from a Scratch Org.
source_pull () {
echo "*** Pulling changes from $1"
sfdx force:source:pull \
--targetusername $1
}
source_retrieve () {
echo "*** Retrieving changes from $1"
sfdx force:source:retrieve \
--manifest manifest/package.xml
}
source_deploy () {
echo "*** Deploying changes to $1"
sfdx force:source:deploy \
--targetusername $1 \
--manifest manifest/package.xml
}
source_validate () {
echo "*** Validating changes to $1"
sfdx force:source:deploy \
--targetusername $1 \
--manifest manifest/package.xml \
--checkonly \
--testlevel RunLocalTests
}
# Import Data to scratch org
# Requires data path $2=data/my-plan.json
data_import () {
echo "*** Importing data from $2 to $1"
sfdx force:data:tree:import \
--targetusername $1 \
--plan $2
}
# Assign one Permission Set
assign_permset () {
echo "*** Assigning $2 Permission Set in $1"
sfdx force:user:permset:assign \
--targetusername $1 \
--permsetname $2
}
# Usage: $ bulk_assign_permsets $ORG_ALIAS $PERMSET_ONE $PERMSET_TWO $PERMSET_ETC
# ALT Usage: $ bulk_assign_permsets $1 ${@:2}
bulk_assign_permsets () {
for i in "${@:2}"
do
assign_permset $1 $i
done
}
# Run All Local Tests in Scratch Org
run_local_tests () {
disable_error_trapping
echo "*** Running All Local Apex Tests..."
sfdx force:apex:test:run -c \
--resultformat human \
--testlevel RunLocalTests \
-targetusername $1
handle_error $RETURN_CODE
}
| true
|
2a5cf4d9dcf2842edc62f108a366ca9601249eca
|
Shell
|
j-dr/l-addgals
|
/src/scripts/make_finishing_files.sh
|
UTF-8
| 7,408
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
#variables that get passed
OUTPATH=$1
NAME=$2
SHEARBASE=$3
FINAL_NAME=$4
SCRIPTS_DIR=${OUTPATH}/scripts
mkdir -p $SCRIPTS_DIR
#the shapes file
SHAPES_TEMPLATE=make_shapes_template.sh
SHAPES_EXE=${SCRIPTS_DIR}/make_shapes.sh
RUN_SHAPES_TEMPLATE=run_shapes.sh
RUN_SHAPES_EXE=${SCRIPTS_DIR}/run_shapes.sh
RUN_ALL_SHAPES_TEMPLATE=run_all_shapes.sh
RUN_ALL_SHAPES_EXE=${SCRIPTS_DIR}/run_all_shapes.sh
sed -e 's:DUMMY_NAME:'PO_$NAME':'\
< $SHAPES_TEMPLATE > $SHAPES_EXE
chmod 744 $SHAPES_EXE
cp $RUN_SHAPES_TEMPLATE $RUN_SHAPES_EXE
cp $RUN_ALL_SHAPES_TEMPLATE $RUN_ALL_SHAPES_EXE
mkdir -p ${SCRIPTS_DIR}/shapes_logs
#the finalize routines
FINALIZE_FILE=finalize_bcc_catalog.sav
RUN_FINALIZE_TEMPLATE=finalize_bcc_catalog_template.sh
RUN_FINALIZE=${SCRIPTS_DIR}/finalize_bcc_catalog.sh
RUN_ALL_FINALIZE=finalize_all_bcc_catalog.sh
mkdir -p ${SCRIPTS_DIR}/finalize_logs
cp $FINALIZE_FILE $SCRIPTS_DIR
cp $RUN_ALL_FINALIZE $SCRIPTS_DIR
sed -e 's:INPATH=:INPATH='$OUTPATH':'\
-e 's:OUTPATH=:OUTPATH='$OUTPATH':'\
-e 's:SHEARBASE=:SHEARBASE='$SHEARBASE':'\
-e 's:ONAME=:ONAME='$FINAL_NAME':'\
-e 's:FBASE=:FBASE=PO_'$NAME':'\
< $RUN_FINALIZE_TEMPLATE > ${RUN_FINALIZE}
chmod 744 $RUN_FINALIZE
#the DR8 training set
DR8_TRAINING_SET=${OUTPATH}/photoz_DR8/${FINAL_NAME}_DR8_training_set.fit
DR8_TRAINING_SET2=${OUTPATH}/photoz_DR8/${FINAL_NAME}_DR8_training_set_sdss_mag.fit
DR8_TRAINING_TEMPLATE=get_dr8_training_set_template.idl
DR8_TRAINING=${SCRIPTS_DIR}/get_dr8_training_set.idl
RUN_DR8=get_dr8_training_set.sh
sed -e 's:OUTPATH=:outpath="'$OUTPATH'/photoz_DR8/":'\
-e 's:PATH=:path="'$OUTPATH'":'\
-e 's:TRUTH_BASE=:truth_base="'$FINAL_NAME'_truth":'\
-e 's:SDSS_BASE=:sdss_base="'$FINAL_NAME'_sdss_mag":'\
-e 's:OUTFILE1=:outfile1="'$DR8_TRAINING_SET':'\
-e 's:OUTFILE2=:outfile2="'$DR8_TRAINING_SET2':'\
< $DR8_TRAINING_TEMPLATE > $DR8_TRAINING
cp $RUN_DR8 ${SCRIPTS_DIR}
#the DES training set
DES_TRAINING_SET=${OUTPATH}/photoz/${FINAL_NAME}_Optimistic_training_set.fit
DES_TRAINING_TEMPLATE=get_optimistic_bcc_training_set_template.idl
DES_TRAINING=${SCRIPTS_DIR}/get_optimistic_bcc_training_set.idl
RUN_DES_TRAINING=get_optimistic_bcc_training_set.sh
sed -e 's:FBASE=:fbase = "'$FINAL_NAME'_truth":'\
-e 's:OUTFILE=:outfile="'$DES_TRAINING_SET':'\
< $DES_TRAINING_TEMPLATE > $DES_TRAINING
cp $RUN_DES_TRAINING ${SCRIPTS_DIR}
#the photo-z run/submission scripts
PHOTOZ_SAV_FILE=run_zcarlos_bcc.sav
PHOTOZ_RUN_TEMPLATE=run_zcarlos_bcc_template.sh
PHOTOZ_RUN_FILE=${SCRIPTS_DIR}/run_zcarlos_bcc.sh
PHOTOZ_RUN_ALL_FILE=run_all_zcarlos_bcc.sh
GFILE=${OUTPATH}/truth/${FINAL_NAME}_truth
DR8_PHOTOZDIR=${OUTPATH}/photoz_DR8
DES_PHOTOZDIR=${OUTPATH}/photoz
DR8_OBSFILE=${OUTPATH}/DR8/${FINAL_NAME}_sdss_mag
DR8_OFILE=${DR8_PHOTOZDIR}/${FINAL_NAME}_DR8_zcarlos
DES_OBSFILE=${OUTPATH}/truth/${FINAL_NAME}_truth
DES_OFILE=${DES_PHOTOZDIR}/${FINAL_NAME}_zcarlos
CHECK_DR8_PHOTOZ_FILE=check_dr8_photoz.sh
CHECK_PHOTOZ_FILE=check_photoz.sh
mkdir -p $DR8_PHOTOZDIR
mkdir -p $DES_PHOTOZDIR
mkdir -p ${SCRIPTS_DIR}/photoz_logs
cp $PHOTOZ_SAV_FILE $SCRIPTS_DIR
cp $PHOTOZ_RUN_ALL_FILE $SCRIPTS_DIR
cp $CHECK_DR8_PHOTOZ_FILE $SCRIPTS_DIR
cp $CHECK_PHOTOZ_FILE $SCRIPTS_DIR
sed -e 's:GFILE=:GFILE='$GFILE'.${PIXEL}.fit:'\
-e 's:DR8_TRAINING_SET=:DR8_TRAINING_SET='$DR8_TRAINING_SET2':'\
-e 's:DR8_OBSFILE=:DR8_OBSFILE='$DR8_OBSFILE'.${PIXEL}.fit:'\
-e 's:DR8_OFILE=:DR8_OFILE='$DR8_OFILE'.${PIXEL}.fit:'\
-e 's:DES_TRAINING_SET=:DES_TRAINING_SET='$DES_TRAINING_SET':'\
-e 's:DES_OBSFILE=:DES_OBSFILE='$DES_OBSFILE'.${PIXEL}.fit:'\
-e 's:DES_OFILE=:DES_OFILE='$DES_OFILE'.${PIXEL}.fit:'\
< $PHOTOZ_RUN_TEMPLATE > $PHOTOZ_RUN_FILE
chmod 744 $PHOTOZ_RUN_FILE
#the rotation scripts
ROT_CAT=rotate_catalog.py
ROT_TOOLS=rot_mock_tools.py
ROT_SH=rotate_catalog.sh
ROT_ALL_TEMPLATE=rotate_all_catalogs_template.sh
ROT_ALL=${SCRIPTS_DIR}/rotate_all_catalogs.sh
ROT_OBSDIR=${OUTPATH}/obs_rotated
ROT_TRUTHDIR=${OUTPATH}/truth_rotated
RTIN=${OUTPATH}/truth/${FINAL_NAME}_truth
RTOUT=${ROT_TRUTHDIR}/${FINAL_NAME}_truth
ROIN=${OUTPATH}/obs/${FINAL_NAME}
ROOUT=${ROT_OBSDIR}/${FINAL_NAME}
mkdir -p $ROT_OBSDIR
mkdir -p $ROT_TRUTHDIR
mkdir -p ${SCRIPTS_DIR}/rotate_logs
cp $ROT_CAT $SCRIPTS_DIR
cp $ROT_TOOLS $SCRIPTS_DIR
cp $ROT_SH $SCRIPTS_DIR
sed -e 's:FIN_TRUTH=:FIN_TRUTH='$RTIN'.$i:'\
-e 's:FOUT_TRUTH=:FOUT_TRUTH='$RTOUT'.$i:'\
-e 's:FIN_OBS=:FIN_OBS='$ROIN'.$i:'\
-e 's:FOUT_OBS=:FOUT_OBS='$ROOUT'.$i:'\
< $ROT_ALL_TEMPLATE > $ROT_ALL
chmod 744 $ROT_ALL
#the mask scripts
MASK_SAV=mask_bcc_pixel.sav
RUN_MASK_TEMPLATE=mask_bcc_pixel_template.sh
RUN_MASK=${SCRIPTS_DIR}/mask_bcc_pixel.sh
MASK_ALL=mask_all_pixels.sh
mkdir -p ${SCRIPTS_DIR}/mask_logs
mkdir -p ${OUTPATH}/mask
cp $MASK_SAV ${SCRIPTS_DIR}
cp $MASK_ALL $SCRIPTS_DIR
sed -e 's:INBASE=:INBASE='$ROOUT':'\
-e 's:OUTBASE=:OUBASE=../mask/'$FINAL_NAME':'\
< $RUN_MASK_TEMPLATE > $RUN_MASK
chmod 744 $RUN_MASK
#single rotate and mask file
ROT_MASK_TEMPLATE=rotate_and_mask_template.sh
ROT_MASK=${SCRIPTS_DIR}/rotate_and_mask.sh
RUN_ROT_MASK=run_rotate_and_mask.sh
RUN_ALL_ROT_MASK=run_all_rotate_and_mask.sh
CHECK_MASK=check_mask.sh
mkdir -p ${SCRIPTS_DIR}/rotate_and_mask_logs
sed -e 's:FIN_TRUTH=:FIN_TRUTH='$RTIN':'\
-e 's:FOUT_TRUTH=:FOUT_TRUTH='$RTOUT':'\
-e 's:FIN_OBS=:FIN_OBS='$ROIN':'\
-e 's:FOUT_OBS=:FOUT_OBS='$ROOUT':'\
-e 's:MASK_OUT=:MASK_OUT=../mask/'${FINAL_NAME}_mask':'\
< $ROT_MASK_TEMPLATE > $ROT_MASK
chmod 744 $ROT_MASK
cp $RUN_ROT_MASK $SCRIPTS_DIR
cp $RUN_ALL_ROT_MASK $SCRIPTS_DIR
cp $CHECK_MASK $SCRIPTS_DIR
#script for creating the index files
INDEX_TEMPLATE=make_index_files_template.sh
INDEX_SCRIPT=${OUTPATH}/make_index_files.sh
STARS_DIR=/nfs/slac/g/ki/ki19/des/mbusha/catalogs/Stars/reformatted/
STARS_TRUTH=${STARS_DIR}/Aardvark_0.5c_truth_stars
STARS_OBS=${STARS_DIR}/Aardvark_0.5c_stars
ADDQSO_DIR=/nfs/slac/g/ki/ki19/des/mbusha/catalogs/QSO/Brazil/v1.0/
ADDQSO_TRUTH=${ADDQSO_DIR}/truth/ADDQSO_v1.0_truth
ADDQSO_OBS=${ADDQSO_DIR}/obs/ADDQSO_v1.0
ADDQSO_MASK=${ADDQSO_DIR}/mask/ADDQSO_v1.0_mask
DESQSO_DIR=/nfs/slac/g/ki/ki19/des/mbusha/catalogs/QSO/DESQSO/
DESQSO_TRUTH=${DESQSO_DIR}/truth/DESQSO_truth
DESQSO_OBS=${DESQSO_DIR}/obs/DESQSO
DESQSO_VHS=${DESQSO_DIR}/VHS/DESQSO
DESQSO_MASK=${DESQSO_DIR}/mask/DESQSO_mask
sed -e 's:DUMMY_NAME:'$FINAL_NAME':g'\
-e 's:DUMMY_STARS_TRUTH:'$STARS_TRUTH':'\
-e 's:DUMMY_STARS_OBS:'$STARS_OBS':'\
-e 's:DUMMY_ADDQSO_TRUTH:'$ADDQSO_TRUTH':'\
-e 's:DUMMY_ADDQSO_OBS:'$ADDQSO_OBS':'\
-e 's:DUMMY_ADDQSO_MASK:'$ADDQSO_MASK':'\
-e 's:DUMMY_DESQSO_TRUTH:'$DESQSO_TRUTH':'\
-e 's:DUMMY_DESQSO_OBS:'$DESQSO_OBS':'\
-e 's:DUMMY_DESQSO_VHS:'$DESQSO_VHS':'\
-e 's:DUMMY_DESQSO_MASK:'$DESQSO_MASK':'\
< $INDEX_TEMPLATE > $INDEX_SCRIPT
chmod 744 $INDEX_SCRIPT
#script for creating the halo catalog
HALO_SCRIPT=make_halo_catalog.sh
HALO_IDL_TEMPLATE=make_halo_catalog_template.idl
HALO_IDL=${SCRIPTS_DIR}/make_halo_catalog.idl
mkdir -p ${OUTPATH}/halos
cp $HALO_SCRIPT $SCRIPTS_DIR
sed -e 's:PATH=:path = "'$OUTPATH'/individual_box_files/":'\
-e 's:OUTNAME1=:outname1 = "PO_'$NAME'_1050_halos":'\
-e 's:OUTNAME2=:outname2 = "PO_'$NAME'_2600_halos":'\
-e 's:OUTNAME3=:outname3 = "PO_'$NAME'_4000_halos":'\
-e 's:OUTBASE=:outbase = "../halos/'$FINAL_NAME'_halos":'\
< $HALO_IDL_TEMPLATE > $HALO_IDL
| true
|
6966f55bbf0e540c876d73b71b1ad468104c30fa
|
Shell
|
cipriantruica/TM_TESTS
|
/run.sh
|
UTF-8
| 338
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
NUM_ITER=2
NUM_TOPICS_NEWS=20
NUM_CORES=1
FILE_NAME_CONF_CLEAN="dataset/"
OUTPUT_FILE="output/"
# change the compiler
#PyCC="python"
PyCC="/usr/share/anaconda/bin/python"
for i in `seq 1 5`
do
$PyCC tm_all_default.py $FILE_NAME_CONF_CLEAN $NUM_TOPICS_NEWS $NUM_ITER $NUM_CORES > $OUTPUT_FILE"news_cleanText_"$i
done;
| true
|
61690ba9d2fd9f9cfcdc0006bf1d02aaadf547c6
|
Shell
|
punisherVX/kfk-setup
|
/code/setup/setup-3-tools.sh
|
UTF-8
| 4,204
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
sudo apt-get update
# Install packages to allow apt to use a repository over HTTPS:
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
software-properties-common
# Add Dockerโs official GPG key:
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
# set up the stable repository.
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
# install docker
sudo apt-get update
sudo apt-get install -y docker-ce docker-compose
# give ubuntu permissions to execute docker
sudo usermod -aG docker $(whoami)
# log out
exit
# log back in
# make sure docker is working
docker run hello-world
# Add hosts entries (mocking DNS) - put relevant IPs here
echo "
10.29.75.150 tbd-lago-tools tltools
10.29.75.151 tbd-kafka1 tkfk1
10.29.75.151 tbd-zookeeper1 tzk1
10.29.75.152 tbd-kafka2 tkfk2
10.29.75.152 tbd-zookeeper2 tzk2
10.29.75.153 tbd-kafka3 tkfk3
10.29.75.153 tbd-zookeeper3 tzk3
" | sudo tee --append /etc/hosts
mkdir tools
# Create the docker compose file for ZooNavigator
echo "
version: '2'
services:
# https://github.com/elkozmon/zoonavigator
web:
image: elkozmon/zoonavigator-web:latest
container_name: zoonavigator-web
network_mode: host
environment:
API_HOST: 'localhost'
API_PORT: 9001
SERVER_HTTP_PORT: 8001
depends_on:
- api
restart: always
api:
image: elkozmon/zoonavigator-api:latest
container_name: zoonavigator-api
network_mode: host
environment:
SERVER_HTTP_PORT: 9001
restart: always
" | tee --append tools/zoonavigator-docker-compose.yml
# Create the docker compose file for Kafka Manager
echo "
version: '2'
services:
# https://github.com/yahoo/kafka-manager
kafka-manager:
image: qnib/plain-kafka-manager
network_mode: host
environment:
ZOOKEEPER_HOSTS: "tbd-zookeeper1:2181,tbd-zookeeper2:2181,tbd-zookeeper3:2181"
APPLICATION_SECRET: change_me_please
restart: always
" |tee --append tools/kafka-manager-docker-compose.yml
# Create the docker compose file for Kafka Topics UI
echo "
version: '2'
services:
# https://github.com/confluentinc/schema-registry
confluent-schema-registry:
image: confluentinc/cp-schema-registry:3.2.1
network_mode: host
environment:
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: tbd-zookeeper1:2181,tbd-zookeeper2:2181,tbd-zookeeper3:2181/kafka
SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
# please replace this setting by the IP of your web tools server
SCHEMA_REGISTRY_HOST_NAME: 'tltools'
restart: always
# https://github.com/confluentinc/kafka-rest
confluent-rest-proxy:
image: confluentinc/cp-kafka-rest:3.2.1
network_mode: host
environment:
KAFKA_REST_BOOTSTRAP_SERVERS: tbd-kafka1:9092,tbd-kafka2:9092,tbd-kafka3:9092
KAFKA_REST_ZOOKEEPER_CONNECT: tbd-zookeeper1:2181,tbd-zookeeper2:2181,tbd-zookeeper3:2181/kafka
KAFKA_REST_LISTENERS: http://0.0.0.0:8082/
KAFKA_REST_SCHEMA_REGISTRY_URL: http://localhost:8081/
# please replace this setting by the IP of your web tools server
KAFKA_REST_HOST_NAME: 'tltools'
depends_on:
- confluent-schema-registry
restart: always
# https://github.com/Landoop/kafka-topics-ui
kafka-topics-ui:
image: landoop/kafka-topics-ui:0.9.2
network_mode: host
environment:
KAFKA_REST_PROXY_URL: http://localhost:8082
PROXY: 'TRUE'
depends_on:
- confluent-rest-proxy
restart: always
" | tee --append tools/kafka-topics-ui-docker-compose.yml
# make sure you can access the zookeeper endpoints
nc -vz tbd-zookeeper1 2181
nc -vz tbd-zookeeper2 2181
nc -vz tbd-zookeeper3 2181
# make sure you can access the kafka endpoints
nc -vz tbd-kafka1 9092
nc -vz tbd-kafka2 9092
nc -vz tbd-kafka3 9092
# launch the containers
# Zoo Navigator runs on port 8001
# Kafka Manager runs on port 9000
# Kafka Topics UI runs on port 8000
docker-compose -f tools/kafka-manager-docker-compose.yml up -d
docker-compose -f tools/kafka-topics-ui-docker-compose.yml up -d
docker-compose -f tools/zoonavigator-docker-compose.yml up -d
| true
|
1916a8b7f7cb701cdd1058ade2abbc6aa5f13930
|
Shell
|
shellwedance/rook
|
/tests/scripts/create-bluestore-partitions.sh
|
UTF-8
| 1,226
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
#############
# VARIABLES #
#############
: "${BLUESTORE_TYPE:=${2}}"
: "${DISK:=${1}}"
SIZE=2048M
#############
# FUNCTIONS #
#############
function wipe_disk {
sudo sgdisk --zap-all --clear --mbrtogpt -g -- "$DISK"
sudo dd if=/dev/zero of="$DISK" bs=1M count=10
sudo parted -s "$DISK" mklabel gpt
sudo partprobe "$DISK"
sudo udevadm settle
sudo parted "$DISK" -s print
}
function create_partition {
sudo sgdisk --new=0:0:+"$SIZE" --change-name=0:"$1" --mbrtogpt -- "$DISK"
}
function create_block_partition {
sudo sgdisk --largest-new=0 --change-name=0:'block' --mbrtogpt -- "$DISK"
}
########
# MAIN #
########
# First wipe the disk
wipe_disk
case "$BLUESTORE_TYPE" in
block.db)
create_partition block.db
;;
block.wal)
create_partition block.db
create_partition block.wal
;;
*)
echo "invalid bluestore configuration $BLUESTORE_TYPE" >&2
exit 1
esac
# Create final block partitions
create_block_partition
# Inform the kernel of partition table changes
sudo partprobe "$DISK"
# Wait the udev event queue, and exits if all current events are handled.
sudo udevadm settle
# Print drives
sudo lsblk
sudo parted "$DISK" -s print
| true
|
1cfd8b74ca5146d7196c0079fad8c3413ad332fc
|
Shell
|
m-1-k-3/wick
|
/formulas/dnsmasq/files/dnsmasq
|
UTF-8
| 359
| 3.015625
| 3
|
[
"MIT-0",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env sh
# Force 127.0.0.1 to be prepended to the list of new domain name servers.
# This works around a bug in dhclient.
#
# See: http://www.fidian.com/problems-only-tyler-has/dhclient-not-honoring-prepend-config
#
# This is sh not Bash!
if [ -n "$new_domain_name_servers" ]; then
new_domain_name_servers="127.0.0.1 $new_domain_name_servers"
fi
| true
|
2fec71a6f2fe23bec964308f7bcebb5ab9bd1518
|
Shell
|
tinslice/docker-postgres
|
/fs/scripts/prepare-env-and-run.sh
|
UTF-8
| 2,695
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
_log() {
echo "`date +"%Y-%m-%d %T %Z"` ${1}"
}
export PG_SCRIPTS_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export PATH=/usr/lib/postgresql/${PG_VERSION}/bin:$PATH
PG_CONFIG="/etc/postgresql/${PG_VERSION}/main/postgresql.conf"
PG_DATA_PATH="/var/lib/postgresql/data"
if [ -n "$PGDATA" ]; then
PG_DATA_PATH="$PGDATA"
fi
export PG_SQL_SCRIPTS_PATH="/etc/postgresql/scripts"
rm -rf $PG_DATA_PATH/container_ready
rm -rf $PG_DATA_PATH/postmaster.pid
if [ -n "$PG_PORT" ]; then
_log "== set postgresql port to '$PG_PORT'"
sed -i "s/^port\\ =.*/port\\ =\\ $PG_PORT/g" $PG_CONFIG
fi
_log "== set default postgresql configuration"
# echo "host all all 0.0.0.0/0 md5" >> /etc/postgresql/${PG_VERSION}/main/pg_hba.conf
sed -i "s|^local\\ *all\\ *all.*|local\\ all\\ all\\ trust|g" /etc/postgresql/${PG_VERSION}/main/pg_hba.conf
sed -i "s|^[#]*listen_addresses\\ =.*|listen_addresses\\ =\\ '*'|g" $PG_CONFIG
sed -i "s|^[#]*data_directory\\ =.*|data_directory\\ =\\ '${PG_DATA_PATH}'|g" $PG_CONFIG
sed -i "s|^[#]*log_min_duration_statement\\ =.*|log_min_duration_statement\\ =\\ 500|g" $PG_CONFIG
sed -i "s|^[#]*log_checkpoints\\ =.*|log_checkpoints\\ =\\ on|g" $PG_CONFIG
sed -i "s|^[#]*log_connections\\ =.*|log_connections\\ =\\ on|g" $PG_CONFIG
sed -i "s|^[#]*log_disconnections\\ =.*|log_disconnections\\ =\\ on|g" $PG_CONFIG
sed -i "s|^[#]*log_duration\\ =.*|log_duration\\ =\\ off|g" $PG_CONFIG
sed -i "s|^[#]*log_lock_waits\\ =.*|log_lock_waits\\ =\\ on|g" $PG_CONFIG
sed -i "s|^[#]*log_statement\\ =.*|log_statement\\ =\\ none|g" $PG_CONFIG
PG_SHARED_LIBS='pg_stat_statements,pg_repack'
sed -i "s|^[#]*shared_preload_libraries\\ =.*|shared_preload_libraries\\ =\\ '${PG_SHARED_LIBS}'|g" $PG_CONFIG
export PG_FIRST_START=0
if [ -z "$POSTGRES_ENCODING" ]; then
export POSTGRES_ENCODING="UTF8"
fi
if [ ! "`ls -A ${PG_DATA_PATH}`" ]; then
_log "== initialise postgres"
initdb -E $POSTGRES_ENCODING -D $PG_DATA_PATH
export PG_FIRST_START=1
fi
_log "== starting postgresql server"
/usr/lib/postgresql/${PG_VERSION}/bin/postgres -D /var/lib/postgresql/${PG_VERSION}/main -c config_file=${PG_CONFIG} &
while ! pg_isready > /dev/null 2> /dev/null; do
_log ">> waiting for postgresql server to start"
sleep 1
done
if [ $PG_FIRST_START -eq 1 ]; then
mkdir -p $PG_SQL_SCRIPTS_PATH
fi
. ${PG_SCRIPTS_PATH}/configure-db.sh
# run sql files defined in the RUN_SCRIPTS env variable
. ${PG_SCRIPTS_PATH}/run-sql-scripts.sh
# run sql commands defined in the RUN_SQL env variable
. ${PG_SCRIPTS_PATH}/run-sql-commands.sh
touch $PG_DATA_PATH/container_ready
_log "== container ready"
while true; do sleep 10;done
| true
|
913cf1ed093113611142ebc8dde7714316f1a082
|
Shell
|
fayizk1/go-carbon
|
/deploy/go-carbon.init.centos
|
UTF-8
| 1,836
| 3.984375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# chkconfig: - 91 35
# description: Starts and stops the go-carbon daemon
#
# pidfile: /var/run/go-carbon.pid
# config: /usr/local/etc/carbon.conf
carbon="/usr/local/bin/go-carbon"
prog=$(basename $carbon)
pidfile="/var/run/go-carbon.pid"
config="/usr/local/etc/go-carbon.conf"
# Source function library.
if [ -f /etc/init.d/functions ] ; then
. /etc/init.d/functions
elif [ -f /etc/rc.d/init.d/functions ] ; then
. /etc/rc.d/init.d/functions
else
exit 1
fi
# Avoid using root's TMPDIR
unset TMPDIR
if [ -f /etc/sysconfig/$prog ]; then
. /etc/sysconfig/$prog
fi
# Check that config exists.
[ -f $config ] || exit 6
RETVAL=0
start() {
echo -n $"Starting $prog services: "
daemon --pidfile $pidfile $carbon -config $config -pidfile $pidfile -daemon
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/$prog || \
RETVAL=1
return $RETVAL
}
stop() {
echo -n $"Shutting down $prog services: "
killproc -p $pidfile
RETVAL=$?
echo
[ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/$prog
return $RETVAL
}
configtest() {
$carbon -config $config -check-config
}
restart() {
configtest || return $?
stop
start
}
rhstatus() {
status -p $pidfile
return $?
}
# Allow status as non-root.
if [ "$1" = status ]; then
rhstatus
exit $?
fi
# Check that we can write to it... so non-root users stop here
[ -w $config ] || exit 4
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
status)
rhstatus
;;
condrestart)
[ -f /var/lock/subsys/$prog ] && restart || :
;;
configtest)
configtest
;;
*)
echo $"Usage: $0 {start|stop|restart|status|condrestart|configtest}"
exit 2
esac
exit $?
| true
|
0982a9a282bcca133e39804a2c74cb171d072dfa
|
Shell
|
koko004/screenfetch-space
|
/screenfetch-install-spaceshow.sh
|
UTF-8
| 836
| 2.671875
| 3
|
[] |
no_license
|
apt-get install screenfetch
totaldisk=$(df -h -x aufs -x tmpfs -x overlay -x drvfs --total 2>/dev/null | tail -1)
echo $totaldisk
disktotal=$(awk '{print $2}' <<< "${totaldisk}")
diskused=$(awk '{print $3}' <<< "${totaldisk}")
diskusedper=$(awk '{print $5}' <<< "${totaldisk}")
diskusage="${diskused} / ${disktotal} (${diskusedper})"
diskusage_verbose=$(sed 's/%/%%/' <<< "$diskusage")
echo $diskused
echo $disktotal
echo $diskusedper
echo $diskusage_verbose
mydisk=$(echo -e "$labelcolor Disk:$textcolor $diskusage")
echo $mydisk
screenfetch -d '+disk'
echo "nano ~/.bashrc y anadir al final screenfetch -d '+disk'"
| true
|
79b9f91b79100a065d1763879013a520fdde7ee2
|
Shell
|
gmcvicker/MHC
|
/sh/get_imgt_allele_seqs.sh
|
UTF-8
| 276
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
GENE_FILE=$HOME/data/IMGT/gene_names.txt
SCRIPT=$HOME/proj/MHC/python/get_imgt_alleles.py
cat $GENE_FILE | while read gene;
do
OUT_DIR=$HOME/data/IMGT/extracted/$gene
mkdir -p $OUT_DIR
echo $gene >&2
python $SCRIPT --gene $gene $OUT_DIR
done
| true
|
bee9ac4750859dfa46eeb0965d58f39ee6c2a561
|
Shell
|
tiger31/i3blocks-arrows
|
/i3arrows
|
UTF-8
| 2,378
| 3.875
| 4
|
[] |
no_license
|
#!/bin/zsh
while getopts ":c:o:" opt; do
case $opt in
c) file="$OPTARG"
;;
o) out="$OPTARG"
;;
\?) echo "Invalid option -$OPTARG" >&2
;;
esac
done
#Function that calculates actula backgroud of block and adds param into config
calc() {
#If we've found background param in this block, then we shold use previous one
if [[ $background_found -eq 0 ]]; then
block_bg=$background_prev;
#If there was no backround param, then default will be applied
#Also means that $backgroud haven't been rewitten, so we should use it instead of $%_prev one
else
block_bg=$background;
background=$background_default;
fi
#Outter sed adds background_prev param after block declaration
#Inner sed escapes "[]" ([block] -> \[block\])
config=$(sed "/$(sed 's/\[/\\\[/; s/\]/\\\]/' <<< $block)/a background_next=$block_bg" <<< $config);
}
#Read config from file, also skip comments
config=$(grep "^[^#]" $file | sed '/^background_next=.*/d; /^script=.*/d; s/^command=\(.*\)/script=\1/g');
#Cycle over each line in config file
#I've been thinking about using sed/awk/perl for whole parsing
#But finally it's done in manual way
while IFS= read -r line
do
#If we found block declaration
if [[ $line =~ '^\[.*\]$' ]]; then
#If it's not first declaration we found, then we should add calculated g in previous block
if [[ -n $block ]]; then
calc;
fi
#Rewriting current block we're working with
block=$line;
#Also dropping background to "not found" state
background_found=1;
fi
#If line starts with "background=", then we found declaration of needed param
if [[ $line =~ '^background=' ]]; then
#Pushing bg state to "found"
background_found=0;
#Remebering background of prev block, we'll need it in future
background_prev=$background;
#Capturing color of block's bg
background=$(perl -le '${ARGV[0]} =~ /^background=(.*)$/; print $1' "$line");
#Or not block's
#If there was no block declarations at all, then we found param as default, also store it
if [[ -z $block ]]; then
#As far as it's not background of block, dropping state
background_found=1;
background_default=$background;
fi
fi
done < <(printf '%s\n' "$config")
#Calculating background of last declared block. It's not handled in cycle
calc;
config=$(sed '1s/^/command=i3arrows-build\n/' <<< $config);
echo $config > $out;
i3blocks -c "$out";
exit 0;
| true
|
1791df1941fb3adc0c2782d320700be669f102d3
|
Shell
|
zilpeakshay/awesome-bash-commands
|
/box.sh
|
UTF-8
| 1,368
| 2.578125
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/usr/bin/env bash
echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
sleep 0.1
echo "โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โ"
sleep 0.1
echo "โ โโโโ โโโโโ โโโโโ AWESOME BASH COMMANDS โโโโโ โ"
sleep 0.1
echo "โ โโโโ โโโฆโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โ"
sleep 0.1
echo "โ โโโ โโโโฉโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โ"
sleep 0.1
echo "โ โโโ โโโโโยฐโ โโ A curated list of awesome โโโ โ"
sleep 0.1
echo "โ โโโโโโโโโโโโโโโโโ Bash useful commands โโโโโ โ"
sleep 0.1
echo "โ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โ"
sleep 0.1
echo "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ"
sleep 0.1
echo
sleep 0.1
echo "https://github.com/joseluisq/awesome-bash-commands"
echo
| true
|
ad5aa55c7964926f0191e87918536e2c16d2556b
|
Shell
|
mahesh-mahajan/403Forbidden
|
/403ForbiddenFix.sh
|
UTF-8
| 326
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
dir=/home/somevhostdomain.com/htdocs/;
while true; do
# the exit case when we get to the top level directory /
if [ -z "$dir" -o "$dir" = "/" ]; then
break;
fi;
echo chmodding o+x $dir;
# make the directory exectuable (openable) by others
chmod o+x $dir;
# go 'up' a directory
dir=`dirname $dir`;
done
| true
|
f3220393b17c2f7dc6ca9d784a7776839873ed94
|
Shell
|
missingcharacter/janky-stuff
|
/bash/macos/mount_ntfs/mount_ntfs.sh
|
UTF-8
| 966
| 3.953125
| 4
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
function 10_chars() {
echo "${RANDOM}" | md5sum | head -c 10
}
function is_ntfs_readonly() {
local DISK="${1}"
local RET_VALUE='1'
local DISK_INFO
DISK_INFO="$(diskutil info "${DISK}")"
if grep -q 'File System Personality: NTFS' <<<"${DISK_INFO}" \
&& grep -q 'Volume Read-Only: Yes' <<<"${DISK_INFO}"; then
RET_VALUE='0'
fi
return "${RET_VALUE}"
}
function mount_ntfs() {
local DISK="${1}"
if is_ntfs_readonly "${DISK}"; then
sudo diskutil unmount "/dev/${DISK}"
sudo /usr/local/bin/ntfs-3g \
"/dev/${DISK}" \
"/Volumes/NTFS$(10_chars)" \
-o local \
-o allow_other \
-o auto_xattr \
-o auto_cache \
-o noappledouble
fi
}
while IFS= read -r disk; do
if [[ -n ${disk} ]]; then
mount_ntfs "${disk}"
fi
done < <(diskutil list external | grep "Windows_NTFS\|Microsoft Basic Data" | rev | awk '{ print $1 }' | rev)
| true
|
b814c851e1c53fcfe2531c292a4d1ba02832a1c8
|
Shell
|
lucasponce/hawkular-alerts-demo
|
/04_agent-process.sh
|
UTF-8
| 707
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
create_msg () {
TIMESTAMP=$(date +%s%3N)
VALUE="DOWN"
if [ $1 -gt 0 ]
then
VALUE="UP"
fi
MSG="["
MSG="$MSG{"
MSG="$MSG\"id\":\"demo-avail\","
MSG="$MSG\"timestamp\":$TIMESTAMP,"
MSG="$MSG\"value\":\"$VALUE\""
MSG="$MSG}"
MSG="$MSG]"
echo $MSG
}
send_data () {
MSG=$(create_msg $1)
TENANT="my-organization"
CODE=$(curl -s -o /dev/null -w "%{http_code}" -X POST \
--header "Hawkular-Tenant: $TENANT" \
--header "Content-Type:application/json" \
--data "$MSG" \
http://localhost:8080/hawkular/alerts/data)
echo "Sent data [$CODE]"
echo "$MSG"
echo ""
return 0
}
while :
do
NUM_SERVERS=$(ps -ef | grep java | grep 'port-offset=150' | wc -l)
send_data $NUM_SERVERS
sleep 2s
done
| true
|
0a8f31c65e6d8bcdd7d9fbd18b02a3089bef5c50
|
Shell
|
laaners/progetto-labiagi_pick_e_delivery
|
/catkin_ws/src/srrg_cmake_modules/ci_scripts/append_gtest_library.sh
|
UTF-8
| 1,628
| 3.65625
| 4
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
##### tg This script is added in order to run tests on multiple package in the same git repo
##### libgtest.so is appended to the artifacts, this is necessary because of catkin build
##### have individual build folder for each package
#ds check input parameters
if [ "$#" -ne 2 ]; then
echo "ERROR: call as $0 PROJECT_ROOT_PATH PROJECT_NAME"
exit -1
fi
#ds parameters
PROJECT_ROOT_PATH="$1"
PROJECT_NAME="$2"
echo -e "\e[1;96m--------------------------------------------------------------------------------\e[0m"
echo -e "\e[1;96mbash version: ${BASH_VERSION}\e[0m"
cd "/root/workspace/"
ls -al
#ds determine gtest library location in the build folder
GTEST_LIBRARY_PATH=$(find "build/${PROJECT_NAME}" -name "libgtest.so")
if [ -z ${GTEST_LIBRARY_PATH} ]; then
GTEST_LIBRARY_PATH=$(find "build/${PROJECT_NAME}" -name "libgtestd.so")
fi
echo -e "\e[1;96mGTEST_LIBRARY_PATH='${GTEST_LIBRARY_PATH}'\e[0m"
if [ ! -z "$GTEST_LIBRARY_PATH" ]; then
cd ${PROJECT_ROOT_PATH}/artifacts/
tar xzf build.tar.gz
rm build.tar.gz
GTEST_LIBRARY_PATH_PREVIOUS=$(find "build/" -name "libgtest.so")
if [ -z ${GTEST_LIBRARY_PATH_PREVIOUS} ]; then
GTEST_LIBRARY_PATH_PREVIOUS=$(find "build/" -name "libgtestd.so")
fi
echo -e "\e[1;96mGTEST_LIBRARY_PATH_PREVIOUS='${GTEST_LIBRARY_PATH_PREVIOUS}'\e[0m"
cd "/root/workspace/"
tar czf ${PROJECT_ROOT_PATH}/artifacts/build.tar.gz "$GTEST_LIBRARY_PATH" "$GTEST_LIBRARY_PATH_PREVIOUS"
fi
#ds log available artifacts
ls -al "${PROJECT_ROOT_PATH}/artifacts/"
echo -e "\e[1;96m--------------------------------------------------------------------------------\e[0m"
| true
|
65beb3f8874e33254efe77294172238df5177ed4
|
Shell
|
saidiahd/OpenERP_Cloud_Configuration
|
/tools/kickstart/FirstBootMySqlConfigurations.sh
|
UTF-8
| 753
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
#
sudo apt-get -y update
sudo apt-get -y upgrade
#
export INS="/home/yourself/installers"
export PRG="/home/yourself/programs"
export FAILURE_NOTICE="______Looks_like_it_failed______"
#
export SRV_CONFIG="https://raw.github.com/martinhbramwell/OpenERP_Cloud_Configuration/master"
#
export ADMIN_USERZ_UID=yourself
export ADMIN_USERZ_HOME=/home/$ADMIN_USERZ_UID
export ADMIN_USERZ_WORK_DIR=/home/$ADMIN_USERZ_UID/tmp
mkdir -p $ADMIN_USERZ_WORK_DIR
#
echo "Get MySql configurator"
#
# Obtain MySql Configurator script
cd ${PRG}/installTools
rm -f ./installMySql.sh
wget ${SRV_CONFIG}/tools/mysql/installMySql.sh
chmod +x ./installMySql.sh
#
#
echo "Running MySql configurator now ..."
#
./installMySql.sh
#
echo "Completed MySql configurator"
| true
|
0685cfbcc549b25b7c8b855cc9b3a5a7a98474af
|
Shell
|
ariesunitrends/dotfiles
|
/link.sh
|
UTF-8
| 254
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh -e
if [ $# != 0 ]
then
exit 1
fi
BIN=$(dirname $(readlink -f $0))
ln -sf $BIN/bash/bashrc ~/.bashrc
ln -sf $BIN/bash/bash_aliases ~/.bash_aliases
ln -sf $BIN/vim/vimrc ~/.vimrc
ln -sf $BIN/ssh/config ~/.ssh/config
chmod 0600 ~/.ssh/config
| true
|
9b8cd8e66bc2f767b49b29ec4b930f71caccaca7
|
Shell
|
simonhicks/csv-utils
|
/bin/untable
|
UTF-8
| 660
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
show_help() {
echo ""
echo "USAGE: $(basename $0) [-h] [-w sep] [-s sep]"
echo ""
echo " w sep : the whitespace seperator used in the input table (default: \s)"
echo " s sep : the seperator to use in the output (default: ,)"
echo " h : print this help text"
echo ""
}
SEPARATOR=${CSV_UTILS_SEPARATOR:-,}
WHITESPACE='\s'
while getopts "hs:w:" opt
do
case "$opt" in
h)
show_help
exit 0
;;
s) SEPARATOR="$OPTARG"
;;
w) WHITESPACE="$OPTARG"
;;
esac
done
shift $((OPTIND-1))
sed -e 's/^\s*//' -e 's/\s*$//' -e "s/${WHITESPACE}${WHITESPACE}*/${SEPARATOR}/g"
| true
|
833662d8c030783f1fa5592c8f2f74b0e00c7236
|
Shell
|
falcon-computing/genome-release
|
/test/regression/regression_test/2_bqsr.bats
|
UTF-8
| 1,134
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
load ../../lib/common
helper_normalRun() {
#"Normal run for BQSR"
local -r id="$1"
local -r tag="$2"
run ${FCSBIN} baserecal \
-r ${ref_genome} \
-i $baseline_dir/bwa/${id}_marked.bam \
-o ${id}_BQSR.table \
--knownSites $db138_SNPs -f -L ${illumina_capture} ${tag}
echo "output = ${output}"
[ "$status" -eq 0 ]
[ -f ${id}_BQSR.table ]
}
helper_compareBQSR() {
#"Compare BQSR table against baseline"
local -r id="$1"
local -r tag="$2"
subjectBQSR="${id}_BQSR.table"
if [ "$tag" = "--gatk4" ];then
baselineBQSR="$baseline_dir/baserecal/4.0/${id}_BQSR.table"
else
baselineBQSR="$baseline_dir/baserecal/3.8/${id}_BQSR.table"
fi
run compare_bqsr "$subjectBQSR" "$baselineBQSR" "$id"
echo "${output}"
[ "$status" -eq 0 ]
}
@test "Normal run for BQSR GATK3: $id" {
helper_normalRun "$id"
}
@test "Compare BQSR GATK3 table against baseline: $id" {
helper_compareBQSR "$id"
}
@test "Normal run for BQSR GATK4: $id" {
helper_normalRun "$id" --gatk4
}
@test "Compare BQSR GATK4 table against baseline: $id" {
helper_compareBQSR "$id" --gatk4
}
| true
|
3bfb0862dd931978f599e2eeb4b5f3c6e59922a9
|
Shell
|
BaiChaYuLu/host_manager
|
/other /ๆๅกๅจๅฎๅ
จ่ๆฌโ้็จไบredhat centos.sh
|
UTF-8
| 10,673
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
###################################################################################
# Security Script for RedHat Linux
# Author:51iker
# Date:2008/12/23
#
##################################################################################
#โโโโโโโโโโDefine Variableโโโโโโโโโโโโ-
export LANG=en
DATETIME=`date +%Y%m%d-%M%S`
SERVICES=(autofs firstboot cups gpm nfs nfslock xfs netfs sendmail yum\-updatesd restrorecond mcstrans avahi\-daemon anacron kudzu portmap)
MD5SUM=(ps netstat ls last w ifconfig tcpdump iptraf top swatch nice lastb md5sum name)
IPV6=$(ifconfig | grep โinet6โณ)
Filename=`ifconfig -a |grep inet |grep -v โ127.0.0.1โณ |awk โ{print $2}โ| head -1 | awk -Fโ:โ โ{ print $2}โ`-$DATETIME-md5
BKDir=/var/ikerbk
#โโโโโโโโโ-Create report/back Directoryโโโโโโโโ-
mkdir -p $BKDir
#โโโโโโโโโ-Modify Default Languageโโโโโโโโโโ
echo -n โmodfiy env_LANGโ
if [ -f /etc/sysconfig/i18n ]; then
cp /etc/sysconfig/i18n $BKDir/$DATETIME\_i18n
Lang=`grep โ^LANG=โ /etc/sysconfig/i18n`
Lang1=`grep โ^SUPPORTED=โ /etc/sysconfig/i18n`
Lang2=`grep โ^SYSFONT=โ /etc/sysconfig/i18n`
if [ -z "$Lang" ]; then
sed -i โ1i\LANG=โen_US.UTF-8โณโ /etc/sysconfig/i18n
echo โ : insert [OK]โ
else
sed -i โs/LANG=.*/LANG=โen_US.UTF-8โณ/gโ /etc/sysconfig/i18n
echo โ : modfiy [OK]โ
fi
if [ -z "$Lang1" ]; then
sed -i โ1a\SUPPORTED=โen_US.UTF-8:en_US:enโโ /etc/sysconfig/i18n
echo โSUPPORTED insert [OK]โ
else
sed -i โs/SUPPORTED=.*/SUPPORTED=โen_US.UTF-8:en_US:enโ/gโ /etc/sysconfig/i18n
echo โSUPPORTED modfiy [OK]โ
fi
if [ -z "$Lang2" ]; then
sed -i โ1a\SYSFONT=โlatarcyrheb-sun16โณโ /etc/sysconfig/i18n
echo โSYSFONT insert [OK]โ
else
sed -i โs/SYSFONT=.*/SYSFONT=โlatarcyrheb-sun16โณ/gโ /etc/sysconfig/i18n
echo โSYSFONT modfiy [OK]โ
fi
else
echo โ : File /etc/sysconfig/i18n not exist [False]โ
fi
#โโโโโโโโโโSSH Protocol 2โโโโโโโโโโโโ
echo -n โchange sshd <Protocol 2>โ
if [ -f /etc/ssh/sshd_config ] ; then
cp /etc/ssh/sshd_config $BKDir/$DATETIME-sshd_config
Proto=`sed -n โ/^Protocol/pโ /etc/ssh/sshd_config`
Proto1=`sed -n โ/^Protocol/pโ /etc/ssh/sshd_config | awk โ{ print $2 }โ`
if [ -z "$Proto" ]; then
sed -i โ1i\Protocol 2\โ /etc/ssh/sshd_config
echo โ [OK]โ
elif [ "$Proto1" != "2" ]; then
sed -i โs/^$Proto/Protocol 2/gโ /etc/ssh/sshd_config
echo โ [OK]โ
fi
else
echo โ :File /etc/ssh/sshd_config not exist [False]โ
fi
#โโโโโโโโโโStop Unuse Servicesโโโโโโโโโโโ
for x in โ${SERVICES[@]}โ; do
state1=`chkconfig โlist | grep $x | awk โ{print substr($5,3,5)}โ`
if [ "$state1" == "on" ]; then
service $x stop
chkconfig โlevel 3 $x off
else
echo โ$x state is stop [OK]โ
fi
done
for i in `ls /etc/rc3.d/S*`
do
CURSRV=`echo $i|cut -c 15-`
echo $CURSRV
case $CURSRV in
crond | irqbalance | microcode_ctl | lvm2-monitor | network | iptables | sshd |syslog)
echo โBase services, Skip!โ
;;
*)
echo โchange $CURSRV to offโ
chkconfig โlevel 2345 $CURSRV off
service $CURSRV stop
;;
esac
done
#โโโโโโโโโโForce Password Lenthโโโโโโโโโโโ
echo -n โchange <password> lengthโ
if [ -f /etc/login.defs ]; then
cp /etc/login.defs $BKDir/$DATETIME\_login.defs
sed -i โs/PASS_MIN_LEN.*5/PASS_MIN_LEN 8/โ /etc/login.defs
echo โ [OK]โ
else
echo โ :File /etc/login.defs not exist [False]โ
fi
#โโโโโโโโโ-Define SSH Session TIMEOUTโโโโโโโโโ
echo -n โmodfiy Histsize and TMOUTโ
if [ -f /etc/profile ]; then
cp /etc/profile $BKDir/$DATETIME\_profile
sed -i โs/HISTSIZE=.*/HISTSIZE=128/โ /etc/profile
echo โ [OK]โ
Timeout=`grep โTMOUT=โ /etc/profile`
if [ -z $Timeout ] ; then
echo โTMOUT=900โณ >> /etc/profile
else
sed -i โs/.*TMOUT=.*/TMOUT=900/gโ /etc/profile
fi
else
echo โ :File /etc/profile not exist [False]โ
fi
#โโโโโโโโโโCheck tmp Directory Stickโโโโโโโโโ
if [ -d /tmp/ ]; then
echo -n โmodfiy /tmp/ +tโ
chmod +t /tmp/
echo โ [OK]โ
else
mkdir /tmp && chmod 777 /tmp && chmod +t /tmp
echo โ [mkdir /tmp]โ
fi
#โโโโโโโโโโClose tty4/5/6โโโโโโโโโโโโโ
echo -n โmodify Control-Alt-Deleteโ
if [ -f /etc/inittab ]; then
cp /etc/inittab $BKDir/$DATETIME\_inittab
sed -i โs/\(^ca\:\:ctrlaltdel\:\/sbin\/shutdown.*\)/#\1/gโ /etc/inittab
sed -i โs/\(^4:2345:respawn.*\)/#\1/gโ /etc/inittab
sed -i โs/\(^5:2345:respawn.*\)/#\1/gโ /etc/inittab
sed -i โs/\(^6:2345:respawn.*\)/#\1/gโ /etc/inittab
echo โ : Control-Alt-Delete AND tty-456 [OK]โ
else
echo โfile /etc/inittab NOT EXISTโ
fi
#โโโโโโโโโโClean Console Informationโโโโโโโโโ
echo -n โClean boot infomationโ
Check=`sed -n โ/issue.net/pโ /etc/rc.local`
if [ -f /etc/issue -a -f /etc/issue.net ]; then
echo โโ > /etc/issue
echo โโ > /etc/issue.net
if [ -z "$Check" ]; then
echo โecho โโ > /etc/issueโ >> /etc/rc.local
echo โecho โโ > /etc/issue.netโ >> /etc/rc.local
echo โ [OK]โ
fi
else
echo โ :File /etc/issue or /etc/issue.net not exist [False]โ
fi
#โโโโโโโโโ-Close IPV6โโโโโโโโโโโโโโ-
if [ -n "$IPV6" ]; then
if [ -f /etc/sysconfig/network -a -f /etc/modprobe.conf ]; then
cp /etc/sysconfig/network $BKDir/$DATETIME\_network
cp /etc/modprobe.conf $BKDir/$DATETIME\_modprobe.conf
Netipv6=`grep โ^NETWORKING_IPV6=yesโ /etc/sysconfig/network`
echo -n โmodfiy ipv6 cleanโ
if [ -z $Netipv6 ]; then
echo โ already [OK]โ
else
sed -i โs/^NETWORKING_IPV6=yes/NETWORKING_IPV6=no/gโ /etc/sysconfig/network
echo โ [OK]โ
fi
Ipv6mod=`sed -n โ/^alias.*ipv6.*off/pโ /etc/modprobe.conf`
echo -n โmodfiy ipv6_mod cleanโ
if [ -z "$Ipv6mod" ]; then
echo โ
alias net-pf-10 off
alias ipv6 offโ >> /etc/modprobe.conf
echo โ [OK]โ
else
echo โ IPV6 mod already [OK]โ
fi
else โFile /etc/sysconfig/network or /etc/modprobe.conf not exist [False]โ
fi
else
echo โIPV6 not support [OK]โ
fi
#โโโโโโProtect File passwd/shadow/group/gshadow/servicesโโโโโ
echo -n โmodfiy passwd_file +i โ
#chattr +i /etc/passwd
#chattr +i /etc/shadow
#chattr +i /etc/group
#chattr +i /etc/gshadow
#chattr +i /etc/services
echo โ [OK]โ
#โโโโโโโโโโClean Command Historyโโโโโโโโโโ
echo -n โmodify bash_historyโ
if [ -f /root/.bash_logout ]; then
LOGOUT=`grep โrm -fโ /root/.bash_logout`
if [ -z "$LOGOUT" ] ; then
sed -i โ/clear/i \rm -f $HOME/.bash_historyโ /root/.bash_logout
echo โ [OK]โ
else
echo โ Already [OK]โ
fi
else
echo โ :File /root/.bash_logout not exist [False]โ
fi
#โโโโโโโโโโGroup wheel su rootโโโโโโโโโโโ
echo -n โmodify su rootโ
if [ -f /etc/pam.d/su ]; then
cp /etc/pam.d/su $BKDir/$DATETIME\_su
sed -i โs/.*pam_wheel.so use_uid$/auth required pam_wheel.so use_uid/โ /etc/pam.d/su
echo โ [OK]โ
else
echo โ :File /etc/pam.d/su not exist [False]โ
fi
#โโโโโโโโ-Log Important Commandโs MD5 Informationโโโโโโ
echo โMD5 check files โ
for xx in โ${MD5SUM[@]}โ; do
NAME=`whereis $xx | awk โ{print $2}โ`
if [ -z $NAME ]; then
continue
else
md5sum $NAME >> $BKDir/$Filename
echo โ$NAME [OK]โ
fi
done
#โโโโโโโโModify Kernel Parameters About Securityโโโโโโ
#โnet.ipv4.conf.all.rp_filter ๅ็ฝๅกๆ้ฎ้ข๏ผไธไฝฟ็จ
echo -n โmodfiy /etc/sysctl.confโ
if [ -f /etc/sysctl.conf ]; then
cp /etc/sysctl.conf $BKDir/$DATETIME\_sysctl.conf
Net=(net.ipv4.ip_forward
net.ipv4.conf.all.accept_source_route
net.ipv4.conf.all.accept_redirects
net.ipv4.tcp_syncookies
net.ipv4.conf.all.log_martians
net.ipv4.icmp_echo_ignore_broadcasts
net.ipv4.icmp_ignore_bogus_error_responses)
for i in โ${Net[@]::3}โ; do
Zero=`sed -n โ/^$i/pโ /etc/sysctl.conf | awk -Fโ=โ โ{ print $2 }โ | sed โs/ //gโ`
Zero1=`sed -n โ/^$i/pโ /etc/sysctl.conf`
if [ -z "$Zero" ]; then
if [ -z "$Zero1" ];then
echo โ$i = 0โณ >> /etc/sysctl.conf
echo โ$i is [OK]โ
else
sed -i โs/$i.*/$i = 0/gโ /etc/sysctl.conf
echo โ$i is [OK]โ
fi
fi
if [ "$Zero" == "0" ]; then
echo โ$i is [OK]โ
else
sed -i โs/$i.*/$i = 0/gโ /etc/sysctl.conf
fi
done
for i in โ${Net[@]:3}โ; do
One=`sed -n โ/^$i/pโ /etc/sysctl.conf | awk -Fโ=โ โ{ print $2 }โ | sed โs/ //gโ`
One1=`sed -n โ/^$i/pโ /etc/sysctl.conf`
if [ -z "$One" ]; then
if [ -z "$One1" ];then
echo โ$i = 1โณ >> /etc/sysctl.conf
echo โ$i is [OK]โ
else
sed -i โs/$i.*/$i = 1/gโ /etc/sysctl.conf
echo โ$i is [OK]โ
fi
fi
if [ "$One" == "1" ]; then
echo โ$i is [OK]โ
else
sed -i โs/$i.*/$i = 1/gโ /etc/sysctl.conf
fi
done
else
echo โ:File /etc/sysctl.conf not exist [Flase]โ
fi
sysctl -p >> $BKDir/$Filename
init q
| true
|
4da9df23ddbe9415b24c8bde286ce97118446037
|
Shell
|
kjamison/Pipelines
|
/DiffusionTractography/scripts/MergeDotMat3.sh
|
UTF-8
| 3,168
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
echo -e "\n START: MergeDotMat3"
#bindir=/home/stam/fsldev/ptx2 #Eventually FSLDIR (use custom probtrackx2 and fdt_matrix_merge for now)
bindir=${HCPPIPEDIR}/global/binaries
Caret7_command=${CARET7DIR}/wb_command
if [ "$5" == "" ];then
echo ""
echo "usage: $0 <StudyFolder> <Subject> <count> <GrayOrdinates_Templatedir> <Nrepeats>"
echo "Merge dot files and convert the merged.dot file to .dconn.nii"
exit 1
fi
StudyFolder=$1 # "$1" #Path to Generic Study folder
Subject=$2 # "$2" #SubjectID
count=$3 # Which Part of Matrix3 to process (1 for LH to All, 2 for RH to All, 3 for Subxortex to All)
TemplateFolder=$4
Nrepeats=$5 # How many dot files to merge
ResultsFolder="$StudyFolder"/"$Subject"/MNINonLinear/Results/Tractography
#Merge results from individual probtrackx runs
$bindir/fdt_matrix_merge $ResultsFolder/Mat3_${count}_list.txt $ResultsFolder/merged_matrix3_${count}.dot
#Save files before deleting
if [ -f $ResultsFolder/merged_matrix3_${count}.dot ]; then
imcp $ResultsFolder/Mat3_track_${count}_0001/tract_space_coords_for_fdt_matrix3 $ResultsFolder/tract_space_coords_for_fdt_matrix3_${count}
cp $ResultsFolder/Mat3_track_${count}_0001/coords_for_fdt_matrix3 $ResultsFolder/coords_for_fdt_matrix3_${count}
rm -f $ResultsFolder/Mat3_waytotal_${count}
rm -f $ResultsFolder/Mat3_waytotal_list_${count}
waytotal=0
for ((i=1;i<=${Nrepeats};i++));do
n=`zeropad $i 4`
wayp=`cat $ResultsFolder/Mat3_track_${count}_${n}/waytotal`
echo ${wayp} >> $ResultsFolder/Mat3_waytotal_list_${count}
waytotal=$((${waytotal} + ${wayp}))
done
echo ${waytotal} >> $ResultsFolder/Mat3_waytotal_${count}
rm -rf ${ResultsFolder}/Mat3_track_${count}_????
fi
#Each of the next three wb_commands take for count=1/2
# i)13 minutes and 13 GB of RAM
# ii) 4 minutes and 11 GB of RAM
# iii) 2 minutes and 1 GB of RAM
# And for count=3
# i) 21 minutes and 26GB of RAM
# ii) 4 minutes and 12GB of RAM
# iii) 2 minutes and 1GB of RAM
if [ ${count} -eq 1 ]; then
${Caret7_command} -probtrackx-dot-convert ${ResultsFolder}/merged_matrix3_${count}.dot ${ResultsFolder}/merged_matrix3_${count}.dconn.nii -row-cifti ${TemplateFolder}/91282_Greyordinates.dscalar.nii COLUMN -col-surface ${TemplateFolder}/L.atlasroi.32k_fs_LR.shape.gii
elif [ ${count} -eq 2 ]; then
${Caret7_command} -probtrackx-dot-convert ${ResultsFolder}/merged_matrix3_${count}.dot ${ResultsFolder}/merged_matrix3_${count}.dconn.nii -row-cifti ${TemplateFolder}/91282_Greyordinates.dscalar.nii COLUMN -col-surface ${TemplateFolder}/R.atlasroi.32k_fs_LR.shape.gii
elif [ ${count} -eq 3 ]; then
${Caret7_command} -probtrackx-dot-convert ${ResultsFolder}/merged_matrix3_${count}.dot ${ResultsFolder}/merged_matrix3_${count}.dconn.nii -row-cifti ${TemplateFolder}/91282_Greyordinates.dscalar.nii COLUMN -col-voxels ${TemplateFolder}/Atlas_ROIs.2.voxel_list.txt ${TemplateFolder}/Atlas_ROIs.2.nii.gz
fi
if [ -s ${ResultsFolder}/merged_matrix3_${count}.dconn.nii ]; then
rm -f ${ResultsFolder}/merged_matrix3_${count}.dot
fi
echo -e "\n END: MergeDotMat3"
| true
|
2762cd8d8a2a90cbbfa3d385892a9094edd8f087
|
Shell
|
jcleary/blade
|
/app/views/projects/_script.html.erb
|
UTF-8
| 890
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
REPO=git@github.com:CorporateRewards/myrewards.git
FOLDER=target_app
BRANCH=feature/faster_tests
BLADE_API=<%= api_project_builds_url(@project) %>
function header {
echo " ---------------------------------"
echo " Blade Runner"
echo " ---------------------------------"
}
function setup_docker {
docker-machine start default
eval $(docker-machine env default)
}
function clone_target_app {
if [ ! -d "$FOLDER" ]; then
git clone $REPO $FOLDER
fi
cd $FOLDER
git checkout $BRANCH
}
function run_tests {
docker-compose up -d
docker exec targetapp_myrewards.app_1 bash -c "rake db:drop db:create db:test:prepare && rspec -fj --out /app/log/rspec.json"
curl -XPOST -H 'Content-Type:application/json' -d @log/rspec.json $BLADE_API
docker-compose kill
}
header
setup_docker
clone_target_app
for i in `seq 1 10`;
do
echo "Running test $i"
run_tests
done
| true
|
c69b7a3dd94e0c6ac07e072b7f51f8c4ade7fa5c
|
Shell
|
Zeal0usD/SSHAudit
|
/sshaudit.sh
|
UTF-8
| 1,752
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
##
# SSH internal audit
# - Check authentication logs for failed logins
# - Check user sudo command history
# - Check bash history
# - Panic button to kill user session
##
server_name=$(hostname)
function flogin_check() {
echo ""
echo "Failed logins on ${server_name} are: "
egrep "Failed|Failure" /var/log/auth.log
echo ""
}
function userhistory_check() {
echo "Please enter the username:"
read uname
echo ""
echo "History of $uanme on ${server_name}: "
echo ""
tail /var/log/auth.log | grep $uname
echo ""
}
function bashhistory_check() {
echo "Please enter the username:"
read uname
echo "Bash history of $uname on ${server_name}: "
echo ""
sudo nano /home/$uname/.bash_history
echo ""
}
function panic_check() {
w
echo "Type the username to kill the remote session"
read uname
echo "kill user session on ${server_name}: "
sudo killall -u $uname
echo ""
}
function all_checks() {
flogin_check
userhistory_check
bashhistory_check
panic_check
}
##
# Color Variables
##
green='\e[32m'
blue='\e[34m'
clear='\e[0m'
##
# Color Functions
##
ColorGreen(){
echo -ne $green$1$clear
}
ColorBlue(){
echo -ne $blue$1$clear
}
menu(){
echo -ne "
SSH Internal Audit
$(ColorGreen '1)') Check failed logins
$(ColorGreen '2)') User Command History
$(ColorGreen '3)') Bash History of User
$(ColorGreen '4)') Panic Kill User Session
$(ColorGreen '0)') Exit
$(ColorBlue 'Choose an option:') "
read a
case $a in
1) flogin_check ; menu ;;
2) userhistory_check ; menu ;;
3) bashhistory_check ; menu ;;
4) panic_check ; menu ;;
0) exit 0 ;;
*) echo -e $red"Wrong option."$clear; WrongCommand;;
esac
}
# Call the menu function
menu
| true
|
2543ebc4aee3db2abfd3073ef60f74283a737946
|
Shell
|
bgeltz/dotfiles
|
/bin/bin/dates.sh
|
UTF-8
| 368
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
BIRTHDAYS=${HOME}/.gcal/birthdays
VIP_DATES=${HOME}/.gcal/brgdates
# Display birthdays and holidays
gcal -f ${BIRTHDAYS} --heading-text="Birthdays:" --starting-day=Monday --with-week-number --iso-week-number=yes -q US_OR -n- .+
# Display VIP dates
gcal -f ${VIP_DATES} --heading-text="2020:" -u 2020
gcal -f ${VIP_DATES} --heading-text="2021:" -u 2021
| true
|
46dde5974043906096c94913afdf0e4c86449919
|
Shell
|
jn7163/apps
|
/kpmcore/PKGBUILD
|
UTF-8
| 1,097
| 2.53125
| 3
|
[] |
no_license
|
pkgname=kpmcore
pkgver=3.0.3
_commit=cbe21e7ef974fe3c95ef418bbf098af716e5ff33
pkgrel=1
pkgdesc="Library for managing partitions. Common code for KDE Partition Manager and other projects."
arch=('x86_64')
url="https://github.com/KDE/kpmcore"
license=('GPL2')
depends=('ki18n' 'kio' 'kiconthemes' 'kservice' 'libatasmart' 'parted')
makedepends=('extra-cmake-modules' 'pkgconfig')
groups=('system')
options=('debug')
source=("http://download.kde.org/stable/kpmcore/${pkgver}/src/${pkgname}-${pkgver}.tar.xz")
#source=("https://github.com/KDE/kpmcore/archive/${_commit}.zip")
md5sums=('c4325d354cde3af125de0c0fc42dbe31')
build() {
mkdir build
cd build
cmake ../${pkgname}-${pkgver} \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DKDE_INSTALL_LIBDIR=lib \
-DKDE_INSTALL_SYSCONFDIR=/etc \
-DKDE_INSTALL_QMLDIR=/usr/lib/qt5/qml \
-DKDE_INSTALL_PLUGINDIR=/usr/lib/qt5/plugins \
-DKDE_INSTALL_USE_QT_SYS_PATHS=ON \
-DBUILD_TESTING=OFF
make
}
package() {
cd build
make DESTDIR=${pkgdir} install
}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.