blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
86cf8c880c65155ca9ffdab066188a7d506f311c
|
Shell
|
jdiaz5513/capnp-ts
|
/packages/js-examples/test.sh
|
UTF-8
| 1,042
| 3.03125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#! /usr/bin/env bash
#
# Quick script that compiles and runs the samples, then cleans up.
# Used for release testing.
set -exuo pipefail
cd `dirname $0`
yarn install
capnpc -o node_modules/.bin/capnpc-js addressbook.capnp
node addressbook.js write | node addressbook.js read
node addressbook.js dwrite | node addressbook.js dread
rm -f addressbook.capnp.js addressbook.capnp.ts
# Calculator example not yet implemented
# capnpc -oc++ calculator.capnp
# c++ -std=c++11 -Wall calculator-client.c++ calculator.capnp.c++ \
# $(pkg-config --cflags --libs capnp-rpc) -o calculator-client
# c++ -std=c++11 -Wall calculator-server.c++ calculator.capnp.c++ \
# $(pkg-config --cflags --libs capnp-rpc) -o calculator-server
# rm -f /tmp/capnp-calculator-example-$$
# ./calculator-server unix:/tmp/capnp-calculator-example-$$ &
# sleep 0.1
# ./calculator-client unix:/tmp/capnp-calculator-example-$$
# kill %+
# wait %+ || true
# rm calculator-client calculator-server calculator.capnp.c++ calculator.capnp.h /tmp/capnp-calculator-example-$$
| true
|
aaf79fdf5b3f74ee2978f0761544ed777e5c5493
|
Shell
|
nohjlau/Unraid-Scripts
|
/unraid_array_fan.sh
|
UTF-8
| 4,958
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Finding your fan controllers: find -L /sys/class/hwmon/ -maxdepth 5 -iname "pwm[0-9]" 2>/dev/null
# /sys/class/hwmon/hwmon3/pwm1 FAN 0 - FAN 5
# /sys/class/hwmon/hwmon3/pwm2 FAN 6 - FAN 7
# Original script: https://forums.unraid.net/topic/5375-temperature-based-fan-speed-control/?tab=comments#comment-51790
# unraid_array_fan.sh v0.5
# v0.1 First try at it.
# v0.2: Made a small change so the fan speed on low doesn't fluctuate every time the script is run.
# v0.3: It will now enable fan speed change before trying to change it. I missed
# it at first because pwmconfig was doing it for me while I was testing the fan.
# v0.4: Corrected temp reading to "Temperature_Celsius" as my new Seagate drive
# v0.5: Customized the script for my own use.
# was returning two numbers with just "Temperature".
# A simple script to check for the highest hard disk temperatures in an array
# or backplane and then set the fan to an apropriate speed. Fan needs to be connected
# to motherboard with pwm support, not array.
# DEPENDS ON:grep,awk,smartctl,hdparm
# v0.6: The pwm that is used switches randomly on reboot. Easier to set them all.
### VARIABLES FOR USER TO SET ###
# Amount of drives in the array. Make sure it matches the amount you filled out below.
NUM_OF_DRIVES=26
# unRAID drives that are in the array/backplane of the fan we need to control
HD[1]=/dev/sdb
HD[2]=/dev/sdc
HD[3]=/dev/sdd
HD[4]=/dev/sde
HD[5]=/dev/sdf
HD[6]=/dev/sdg
HD[7]=/dev/sdh
HD[8]=/dev/sdi
HD[9]=/dev/sdj
HD[10]=/dev/sdk
HD[11]=/dev/sdl
HD[12]=/dev/sdm
HD[13]=/dev/sdn
HD[14]=/dev/sdo
HD[15]=/dev/sdp
HD[16]=/dev/sdq
HD[17]=/dev/sdr
HD[18]=/dev/sds
HD[19]=/dev/sdt
HD[20]=/dev/sdu
HD[21]=/dev/sdv
HD[22]=/dev/sdw
HD[23]=/dev/sdx
HD[24]=/dev/sdy
HD[25]=/dev/sdz
HD[26]=/dev/sdaa
# Temperatures to change fan speed at
# Any temp between OFF and HIGH will cause fan to run on low speed setting
FAN_OFF_TEMP=35 # Anything this number and below - fan is off
FAN_HIGH_TEMP=40 # Anything this number or above - fan is high speed
# Fan speed settings. Run pwmconfig (part of the lm_sensors package) to determine
# what numbers you want to use for your fan pwm settings. Should not need to
# change the OFF variable, only the LOW and maybe also HIGH to what you desire.
# Any real number between 0 and 255.
FAN_OFF_PWM=100
FAN_LOW_PWM=150
FAN_HIGH_PWM=255
# Fan device. Depends on your system. pwmconfig can help with finding this out.
# pwm1 is usually the cpu fan. You can "cat /sys/class/hwmon/hwmon0/device/fan1_input"
# or fan2_input and so on to see the current rpm of the fan. If 0 then fan is off or
# there is no fan connected or motherboard can't read rpm of fan.
# ARRAY_FAN=/sys/class/hwmon/hwmon1/device/pwm2
# ARRAY_FAN = FAN 0-5. ARRAY_FAN_TWO = 6-7
ARRAY_FAN=/sys/class/hwmon/hwmon3/pwm1
ARRAY_FAN_TWO=/sys/class/hwmon/hwmon3/pwm2
ARRAY_FAN_THREE=/sys/class/hwmon/hwmon3/pwm3
ARRAY_FAN_FOUR=/sys/class/hwmon/hwmon3/pwm4
### END USER SET VARIABLES ###
# Program variables - do not modify
HIGHEST_TEMP=0
CURRENT_DRIVE=1
CURRENT_TEMP=0
# while loop to get the highest temperature of active drives.
# If all are spun down then high temp will be set to 0.
while [ "$CURRENT_DRIVE" -le "$NUM_OF_DRIVES" ]
do
SLEEPING=`hdparm -C ${HD[$CURRENT_DRIVE]} | grep -c standby`
if [ "$SLEEPING" == "0" ]; then
CURRENT_TEMP=`smartctl -A ${HD[$CURRENT_DRIVE]} | grep -m 1 -i Temperature_Celsius | awk '{print $10}'`
if [ "$HIGHEST_TEMP" -le "$CURRENT_TEMP" ]; then
HIGHEST_TEMP=$CURRENT_TEMP
fi
fi
#echo $CURRENT_TEMP
let "CURRENT_DRIVE+=1"
done
echo "Highest temp is: "$HIGHEST_TEMP
# Enable speed change on this fan if not already
if [ "$ARRAY_FAN" != "1" ]; then
echo 1 > "${ARRAY_FAN}_enable"
echo 1 > "${ARRAY_FAN_TWO}_enable"
echo 1 > "${ARRAY_FAN_THREE}_enable"
echo 1 > "${ARRAY_FAN_FOUR}_enable"
fi
# Set the fan speed based on highest temperature
if [ "$HIGHEST_TEMP" -le "$FAN_OFF_TEMP" ]; then
# set fan to off
echo $FAN_OFF_PWM > $ARRAY_FAN
echo $FAN_OFF_PWM > $ARRAY_FAN_TWO
echo $FAN_OFF_PWM > $ARRAY_FAN_THREE
echo $FAN_OFF_PWM > $ARRAY_FAN_FOUR
echo "Setting pwm to: "$FAN_OFF_PWM
elif [ "$HIGHEST_TEMP" -ge "$FAN_HIGH_TEMP" ]; then
# set fan to full speed
echo $FAN_HIGH_PWM > $ARRAY_FAN
echo $FAN_HIGH_PWM > $ARRAY_FAN_TWO
echo $FAN_HIGH_PWM > $ARRAY_FAN_THREE
echo $FAN_HIGH_PWM > $ARRAY_FAN_FOUR
echo "Setting pwm to: "$FAN_HIGH_PWM
else
CURRENT_SPEED=`cat $ARRAY_FAN`
# set fan to full speed first to make sure it spins up then change it to low setting.
if [ "$CURRENT_SPEED" -lt "$FAN_LOW_PWM" ]; then
echo $FAN_HIGH_PWM > $ARRAY_FAN
echo $FAN_HIGH_PWM > $ARRAY_FAN_TWO
echo $FAN_HIGH_PWM > $ARRAY_FAN_THREE
echo $FAN_HIGH_PWM > $ARRAY_FAN_FOUR
sleep 2
fi
echo $FAN_LOW_PWM > $ARRAY_FAN
echo $FAN_LOW_PWM > $ARRAY_FAN_TWO
echo $FAN_LOW_PWM > $ARRAY_FAN_THREE
echo $FAN_LOW_PWM > $ARRAY_FAN_FOUR
echo "Setting pwm to: "$FAN_LOW_PWM
fi
| true
|
dc72f4db17592ebedc129b0a0ae93bd0d49fe0fc
|
Shell
|
EmilyQue/CST-221
|
/Security/password_strength.sh
|
UTF-8
| 827
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
#variable for password criteria
x=0
#password is entered and read
echo 'Please enter password: '
read password;
#size variable is set to password
size=${#password}
#checks if password size is less than 8 characters
if (("$size" < 8)); then
echo 'Password must be at least 8 characters.'
#increments variable
else
let "x++"
fi
#checks if password includes numerical characters
if [[ $password =~ [0-9] ]]; then
#increments variable
let "x++"
else
echo 'Password does not contain any numeric characters'
fi
#checks if password includes any special characters
if [[ $password == *[@#$%'&'*+-=]* ]]; then
#increments variable
let "x++"
else
echo 'Password does not contain any special characters'
fi
#checks if all three criterias are met
if [ "$x" == 3 ]; then
echo 'Password meets all criteria'
fi
| true
|
075c6fb9b5cbdaa6761711a732110acff9b4a61b
|
Shell
|
puiterwijk/irma_configuration
|
/generate_keys.sh
|
UTF-8
| 2,212
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# Set this if you want seperate keys per credential
CREDENTIAL_KEYS=
# Set this if you want to archive and encrypt the keys
SECURE=true
CLEANUP=true
ARCHIVE_CMD=tar
ARCHIVE_OPT=-zcvf
ENCRYPT_OPT="--cipher-algo AES --symmetric"
PRIVATE_PATH=irma_private_keys
BASE_URI=http://www.irmacard.org/credentials/phase1
LOG=""
function archive_keys {
cd ${1}
local NOW=`date`
local PASSPHRASE=`mkpasswd "${1} ${2} ${3} ${NOW}"`
local TMP_FILE=`mktemp`
local KEY_FILE="${CONF_DIR}/irma_key_${2}_${3}.gpg"
${ARCHIVE_CMD} ${ARCHIVE_OPT} ${TMP_FILE} ${PRIVATE_PATH} &> /dev/null
gpg --batch --passphrase ${PASSPHRASE} --output ${KEY_FILE} \
--cipher-algo AES --symmetric ${TMP_FILE}
echo "Result: ${KEY_FILE} using passphrase: ${PASSPHRASE}"
echo ""
rm -rf ${1} ${TMP_FILE}
}
function generate_keys {
# Make sure the files are writable
touch ${1} ${2}
chmod +w ${1} ${2}
# Generate the keys
silvia_keygen -a 6 -n 1024 -p ${1} -P ${2} -u "${BASE_URI}/${3}/" &> /dev/null
# Make the keys readonly
chmod 440 ${1}
chmod 400 ${2}
}
function generate_issuer_keys {
WORK_DIR=`pwd`
echo "Generating keys for ${1} @ " `pwd`
if [[ ${SECURE} ]]
then
local WORK_DIR=`mktemp -d`
local KEY_DIR=${WORK_DIR}/${PRIVATE_PATH}/${1}/private
else
local KEY_DIR=${WORK_DIR}/private
fi
mkdir -p ${KEY_DIR}
generate_keys ipk.xml ${KEY_DIR}/isk.xml ${1}
[[ ${SECURE} ]] && (archive_keys ${WORK_DIR} ${1})
}
function generate_credential_keys {
cd ${2}
echo "Generating keys for ${1}: ${2} @ " `pwd`
local WORK_DIR=`mktemp -d`
local KEY_DIR=${WORK_DIR}/${PRIVATE_PATH}/${1}/Issues/${2}/private
mkdir -p ${KEY_DIR}
generate_keys ipk.xml ${KEY_DIR}/isk.xml ${1}/${2}
(archive_keys ${WORK_DIR} ${1} ${2})
}
function parse_issuer {
if [[ ! ${CREDENTIAL_KEYS} ]]
then
(generate_issuer_keys ${1})
else
cd Issues
for cred in `ls`; do
(generate_credential_keys ${1} ${cred})
done
fi
}
function parse_dir {
cd $1
[[ -d Issues ]] && (parse_issuer $1)
}
CONF_DIR=`pwd`
for dir in `ls`; do
[[ -d ${dir} ]] && (parse_dir ${dir})
done
# Cleanup
[[ ${CLEANUP} ]] && rm -rf ${WORK_DIR}
echo ""
echo ${LOG}
| true
|
4ccd16973845823a2dcc7852c2c0d1f9d7be063f
|
Shell
|
roc-wong/myriad-test
|
/myriad-test-application/src/main/scripts/startup.sh
|
UTF-8
| 4,933
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -
#===============================================================================
#
# FILE: startup.sh
#
# USAGE: ./startup.sh
#
# DESCRIPTION: springboot jar 启动脚本
#
# OPTIONS: springboot executable jar 依赖系统环境变量:APP_NAME
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Roc Wong (https://roc-wong.github.io), float.wong@icloud.com
# ORGANIZATION: 中泰证券
# CREATED: 04/19/2019 02:21:48 PM
# REVISION: ---
#===============================================================================
# set -o nounset # Treat unset variables as an error
#===========================================================================================
# dynamic parameter, it will be replaced with mvn process-resources
#===========================================================================================
SERVICE_NAME=@project.artifactId@
LOG_DIR=@logging.path@
GC_DIR=${LOG_DIR}/${SERVICE_NAME}/gc
HEAP_DUMP_DIR=${LOG_DIR}/${SERVICE_NAME}/gc/heapDump
SERVER_PORT=@server.port@
CONTEXT_PATH=@server.servlet.context-path@
#===========================================================================================
# JVM Configuration
#===========================================================================================
#JAVA_OPTS="${JAVA_OPTS} -server -Xms4g -Xmx4g -Xmn2g -XX:MetaspaceSize=128m -XX:MaxMetaspaceSize=320m -XX:NewSize=1536m -XX:MaxNewSize=1536m -XX:SurvivorRatio=8"
JAVA_OPTS="${JAVA_OPTS} -XX:+UseConcMarkSweepGC -XX:+UseCMSCompactAtFullCollection -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled -XX:SoftRefLRUPolicyMSPerMB=0 -XX:+CMSClassUnloadingEnabled -XX:SurvivorRatio=8 -XX:-UseParNewGC -XX:+ScavengeBeforeFullGC -XX:+PrintGCDateStamps"
JAVA_OPTS="${JAVA_OPTS} -Dserver.port=$SERVER_PORT -verbose:gc -Xloggc:$GC_DIR/gc.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=100 -XX:GCLogFileSize=25M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${HEAP_DUMP_DIR}/ -XX:+PrintGCDetails -XX:+PrintGCApplicationStoppedTime -XX:+PrintGCApplicationConcurrentTime"
JAVA_OPTS="${JAVA_OPTS} -XX:-OmitStackTraceInFastThrow"
JAVA_OPTS="${JAVA_OPTS} -Duser.timezone=Asia/Shanghai -Dclient.encoding.override=UTF-8 -Dfile.encoding=UTF-8 -Djava.security.egd=file:/dev/./urandom"
#export JAVA_OPTS="$JAVA_OPTS -XX:-ReduceInitialCardMarks"
export JAVA_OPTS=${JAVA_OPTS}
PATH_TO_JAR=${SERVICE_NAME}".jar"
SERVER_URL="http://localhost:${SERVER_PORT}/${CONTEXT_PATH}"
if [[ ! -d ${GC_DIR} ]]; then
mkdir -p ${GC_DIR}
fi
if [[ ! -d "${HEAP_DUMP_DIR}" ]]; then
mkdir -p ${HEAP_DUMP_DIR}
fi
#--- FUNCTION ----------------------------------------------------------------
# NAME: checkPidAlive
# DESCRIPTION: 检查springboot进程是否启动成功,默认进程路径 /var/run/${APP_NAME}/${SERVICE_NAME}.pid
# PARAMETERS:
# RETURNS:
#-------------------------------------------------------------------------------
function checkPidAlive() {
for i in `ls -t /var/run/${SERVICE_NAME}*/*.pid 2>/dev/null`
do
read pid < $i
result=$(ps -p "$pid")
if [[ "$?" -eq 0 ]]; then
return 0
else
printf "\npid - $pid just quit unexpectedly, please check logs under $LOG_DIR and /tmp for more information!\n"
exit 1;
fi
done
printf "\nNo pid file found, startup may failed. Please check logs under $LOG_DIR and /tmp for more information!\n"
exit 1;
}
cd `dirname $0`/..
for i in `ls ${SERVICE_NAME}-*.jar 2>/dev/null`
do
if [[ ! $i == *"-sources.jar" ]]
then
PATH_TO_JAR=$i
break
fi
done
if [[ ! -f PATH_TO_JAR && -d current ]]; then
cd current
for i in `ls ${SERVICE_NAME}-*.jar 2>/dev/null`
do
if [[ ! $i == *"-sources.jar" ]]
then
PATH_TO_JAR=$i
break
fi
done
fi
if [[ -f ${SERVICE_NAME}".jar" ]]; then
rm -rf ${SERVICE_NAME}".jar"
fi
printf "$(date) ==== Starting ==== \n"
ln ${PATH_TO_JAR} ${SERVICE_NAME}".jar"
#ln -s `pwd`/${SERVICE_NAME}".jar" /etc/init.d/${SERVICE_NAME}
#service ${SERVICE_NAME} start
chmod a+x ${SERVICE_NAME}".jar"
./${SERVICE_NAME}".jar" start
rc=$?;
if [[ $rc != 0 ]];
then
echo "$(date) Failed to start ${SERVICE_NAME}.jar, return code: $rc"
exit $rc;
fi
declare -i counter=0
declare -i max_counter=12 # 12*5=60s
declare -i total_time=0
printf "Waiting for server startup"
until [[ (( counter -ge max_counter )) || "$(curl -X GET --silent --connect-timeout 1 --max-time 2 --head $SERVER_URL | grep "HTTP")" != "" ]];
do
printf "."
counter+=1
sleep 5
checkPidAlive
done
total_time=counter*5
if [[ (( counter -ge max_counter )) ]];
then
printf "\n$(date) Server failed to start in $total_time seconds!\n"
exit 1;
fi
printf "\n$(date) Server started in $total_time seconds!\n"
exit 0;
| true
|
08e2dc661e42d7afdcbf61840b6f592dd8bca2ee
|
Shell
|
tejakondury/md_hw_teja
|
/hw2/prob_3/run.sh
|
UTF-8
| 3,256
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
for num in {30..50..1}
do
i=`echo "scale=2 ; ${num}/10" | bc` # convert into floating point values
############################################input_file###########################################################
cat >in.run$i <<!
# ---------- Initialize Simulation ---------------------
clear
units metal
dimension 3
boundary p p p
atom_style atomic
# ---------- Variables ---------------------
variable lat equal $i
# ---------- Create Atoms ---------------------
#lattice has to be specified first -> all geometry commands are based on it
lattice fcc $i
#region ID style args keyword (0 1 means 0 lat) (specifies the simulation cell)
region box block 0 1 0 1 0 1 units lattice
#create_box N region-ID (N=# of atom types)
create_box 1 box
lattice fcc $i orient x 1 0 0 orient y 0 1 0 orient z 0 0 1
#create_atoms type style
create_atoms 1 box
replicate 1 1 1
# ---------- Define Interatomic Potential ---------------------
pair_style eam/alloy
pair_coeff * * AlCu.eam.alloy Al
neighbor 2.0 bin
neigh_modify delay 10 check yes
# ---------- Define Settings ---------------------
#compute ID group-ID style
#potentail energy per atom
compute poteng all pe/atom
#the sum of all poteng
compute eatoms all reduce sum c_poteng
# ---------- Run Minimization ---------------------
#So timestep start at 0
reset_timestep 0
fix 1 all box/relax iso 0.0 vmax 0.001
thermo 10
thermo_style custom step pe lx ly lz press pxx pyy pzz c_eatoms
min_style cg
minimize 1e-25 1e-25 0 0
#write_data optimized.data
variable natoms equal "count(all)"
variable teng equal "c_eatoms"
variable length equal "lx"
variable ecoh equal "v_teng/v_natoms"
print "Total energy (eV) = ${teng};"
print "Number of atoms = ${natoms};"
print "Lattice constant (Angstoms) = ${length};"
print "Cohesive energy (eV) = ${ecoh};"
print "All done!"
!
#################################################end_input_file###########################################################
###################################################Run_job################################################################
cat >lammps$i.job <<!
#!/bin/bash
### Set the job name
#PBS -N MSE551
### Specify the PI group for this job
#PBS -W group_list=oiz
### Set the queue for this job as windfall or standard (adjust ### and #)
###PBS -q windfall
#PBS -q standard
### Set the number of nodes, cores and memory that will be used for this job.
### "pcmem=6gb" is the memory attribute for all of the standard nodes
#PBS -l select=1:ncpus=1:mem=6gb:pcmem=6gb
#PBS -l place=free:shared
### Specify "wallclock time" required for this job, hhh:mm:ss
#PBS -l walltime=00:10:00
### Specify total cpu time required for this job, hhh:mm:ss
### total cputime = walltime * ncpus
#PBS -l cput=00:10:00
### cd: set directory for job execution, ~netid = home directory path
cd /extra/tejakondury/MSE551/Lab1/ex2/md_hw_teja_2
### Load required modules/libraries
module load lammps/gcc/17Nov16
#export MPI_DSM_DISTRIBUTE
#export OMP_NUM_THREADS 1
mpirun -np 1 lmp_mpi-gcc -sf opt < in.run$i > out.run$i
!
#################################################End_Run_job###############################################################
qsub lammps$i.job
done
| true
|
f3417a2a0d5e0534bda7670a8c4a33b3142556c6
|
Shell
|
hand79/Hadoop
|
/xsync-xcall-script/xcall
|
UTF-8
| 252
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
pcount=$#
if((pcount==0));then
echo no args;
exit;
fi
echo -------------localhost----------
$@
for((host=101; host<=108; host++)); do
echo ----------hadoop$host---------
ssh hadoop$host $@
done
| true
|
984530fc06080fcf5c01adb891e753b1fc796f64
|
Shell
|
igemnace/aurscripts
|
/aurclean
|
UTF-8
| 247
| 3.5625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echoerr() {
1>&2 echo aurclean: "$@"
}
for package in "$@"; do
echoerr "Cleaning up $package..."
cd "${AUR_DIR:-$HOME/.aur}/$package" || { echoerr Aborting.; break; }
git clean -fxfd || { echoerr Aborting.; break; }
done
| true
|
322dfe11c37be06773a2ad9f7e087dddbd21db31
|
Shell
|
erikswanson/virtualbox-service-tool
|
/create-virtualbox-service.sh
|
UTF-8
| 1,839
| 4.21875
| 4
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
# Dedicated to the Public Domain as per the CC0 1.0 Universal declaration at
# http://creativecommons.org/publicdomain/zero/1.0/
set -e
this="$(basename $0)"
function print_usage {
cat 1>&2 <<EOF
Usage: ${this} service_path uuid_or_name [ip]
The service is created at the service_path with a 'down' file in place.
The uuid_or_name can be found using 'VBoxManage list vms'.
If specified, the ip is used to generate a 'check' script.
Example: ${this} /usr/local/var/svc.d/vm-pfsense pfSense 172.16.0.1
EOF
exit 1
}
function get_tool {
tool="$(which ${1})"
if [ -x "${tool}" ]
then
echo "${tool}"
else
echo "Could not find ${1}" 1>&2
exit 1
fi
}
path_VBoxHeadless="$(get_tool VBoxHeadless)"
path_VBoxManage="$(get_tool VBoxManage)"
path_sh="$(get_tool sh)"
path_svlogd="$(get_tool svlogd)"
path_ping="$(get_tool ping)"
service_path="$1"
[[ -n "${service_path}" ]] || print_usage
uuid="$2"
[[ -n "${uuid}" ]] || print_usage
ip="$3"
echo "Creating a service to control ${uuid}"
echo -n "at ${service_path}... "
mkdir -p ${service_path}
cd ${service_path}
touch down
mkdir -p log
pushd log > /dev/null
mkdir -p main
cat > run <<EOF
#!${path_sh}
exec ${path_svlogd} -tt main
EOF
chmod +x run
popd > /dev/null
mkdir -p control
pushd control > /dev/null
cat > t <<EOF
#!${path_sh}
exec 2>&1
exec ${path_VBoxManage} controlvm ${uuid} acpipowerbutton
EOF
chmod +x t
cat > k <<EOF
#!${path_sh}
exec 2>&1
exec ${path_VBoxManage} controlvm ${uuid} poweroff
EOF
chmod +x k
popd > /dev/null
cat > run <<EOF
#!${path_sh}
exec 2>&1
exec ${path_VBoxHeadless} --startvm ${uuid} --vrde off
EOF
chmod +x run
echo "Done."
if [ -n "${ip}" ]
then
echo -n "Generating a 'check' script that pings ${ip}... "
cat > check <<EOF
#!${path_sh}
exec >/dev/null 2>&1
exec ${path_ping} -q -c 1 ${ip}
EOF
chmod +x check
echo "Done."
fi
| true
|
7c6abea75adc9654e2ba7bc6eff147b1d8b2307a
|
Shell
|
TeamEOS/vendor_motorola
|
/edison/proprietary/bin/atrelay-ctrl.sh
|
UTF-8
| 1,192
| 3.578125
| 4
|
[] |
no_license
|
#!/system/bin/sh
export PATH="${PATH}:/system/bin"
# constants
MODE_PROP="mot.ste.modem.mode"
TTYDEVICE_PROP="ro.mot.ste.modem.ttydevice"
GADGET_TTY=/dev/ttyGS0
boot_mode=`getprop ro.bootmode`
if [ "${boot_mode}" = "bp-tools" ]; then
GADGET_TTY=/dev/ttyGS1
fi
# check command line usage
if [ ${#} -eq 1 ]; then
MODE="${1}"
fi
# get the tty device for this hardware
TTY_DEVICE="$(getprop ${TTYDEVICE_PROP})"
# start the correct atrelay service based on the modem's mode
if [ "${MODE}" = "service" ]; then
Z_PROT="-z"
MODEM_END_POINT="-m TTY -x ${TTY_DEVICE} -y 9600,n,8,1,off"
elif [ "${MODE}" = "itp" ]; then
Z_PROT=""
MODEM_END_POINT="-m TTY -x ${TTY_DEVICE} -y 115200,n,8,1,off"
elif [ "${MODE}" = "signaling" ]; then
Z_PROT=""
# Default is CAIF AT channel
MODEM_END_POINT=""
else
echo "usage: ${0} [signaling|itp|service]"
exit 2
fi
# build complete command string
ATRELAY_CMD="/system/bin/bridge_relay ${Z_PROT} \
-i TTY -p ${GADGET_TTY} -s 9600,n,8,1,off ${MODEM_END_POINT} -U radio";
# setup modem mode property
echo "setting ${MODE_PROP}=${MODE}"
setprop "${MODE_PROP}" "${MODE}"
# launch atrelay command
echo "starting '${ATRELAY_CMD}'"
exec ${ATRELAY_CMD}
exit 4
| true
|
ace2d67f444fe59b1a19b875329c19ba06dc5144
|
Shell
|
Serki07/CYB6004
|
/scripts/portfolio/week4/IpAddressesOnly.sh
|
UTF-8
| 184
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#call IpInfo script and assign to varable
IpOutput="$(./IpInfo.sh)"
#read the variable from above and print only matched line
echo "$IpOutput"| sed -n '/IP Address/p'
| true
|
107fd9a04dc5a1e97b3c8743289e87cca5f2fa7c
|
Shell
|
RHESSys/RHESSys
|
/util/GRASS/cst/cst.sh
|
UTF-8
| 1,318
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# This shell script runs the cst (Create Stream Table) program
debug_flag=
verbose_flag=
USAGE="Usage: `basename $0` [-dhv]"
# Parse command line options.
while getopts dhv OPT; do
case "$OPT" in
d)
debug_flag='-d'
;;
h)
echo $USAGE
;;
v)
verbose_flag='-v'
;;
\?)
# getopts issues an error message
echo $USAGE >&2
exit 1
;;
esac
done
# Remove the switches we parsed above.
shift `expr $OPTIND - 1`
# We want at least one non-option argument.
# Remove this block if you don't need it.
#if [ $# -eq 0 ]; then
# echo $USAGE >&2
# exit 1
#fi
# Access additional arguments as usual through
# variables $@, $*, $1, $2, etc. or using this loop:
#for PARAM; do
# echo $PARAM
#done
#cst $debug_flag $verbose_flag output=hp-stream-table.txt basin=nanbasin stream=str.t100 dem=bigdem patch=p.dem90m.cl zone=h.t100 hill=h.t100 \
# streamBottomWidth=2.1 streamTopWidth=1.3 streamDepth=5.5 ManningsN=0.55 maxPasses=20
# Hui's basin
cst output=nan.stream stream=str.t100 dem=bigdem patch=p.dem90m.cl zone=h.t100 hill=h.t100 basin=nanbasin ManningsN=0.05 streamTopWidth=topwidth streamBottomWidth=bottomwidth streamDepth=depth maxPasses=10
| true
|
54b8fd7eb85bdd26eb921e177b1e4e2685d64810
|
Shell
|
pablox-cl/dotfiles-b
|
/config/zsh/themes/prompt_pbr_setup
|
UTF-8
| 542
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
# vim: ft=zsh
__git_info() {
vcs_info
printf "$vcs_info_msg_0_" # branch name/action
command git rev-parse --is-inside-work-tree &>/dev/null || return
command git diff --quiet --ignore-submodules HEAD &>/dev/null
(( $? == 1 )) && printf '*'
}
prompt_pbr_setup() {
prompt_opts=( cr subst percent )
autoload -Uz vcs_info
zstyle ':vcs_info:*' enable git
zstyle ':vcs_info:git*' formats '%r:%b'
zstyle ':vcs_info:git*' actionformats '%r:%b|%a'
PROMPT='%# '
RPROMPT='%(?..%? )$(__git_info)'
}
prompt_pbr_setup "$@"
| true
|
0956fe3b6db5de1ec53a39d4a0e3cb6288d17dff
|
Shell
|
onuohagodswill1/UserManager
|
/setup.sh
|
UTF-8
| 2,178
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "Setup script running ...."
LOGFILE=/var/log/UserManagerAppSetup.log
APP_ENV=webapp/.env
# detect Operating System
lsb_dist=$( echo "$(. /etc/os-release && echo "$ID")" )
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
echo "$lsb_dist platform detected"
#exit 1
#checks if command exists
command_exists() {
command -v "$@" > /dev/null 2>&1
}
#installs docker-compose and pip that facilitates the installation
install_docker_compose(){
#assert python is installed
if ! command_exists python; then
echo "Please install python and re-run script"
exit 1
fi
#install pip if not installed
if ! command_exists pip; then
install_package python-pip
fi
#install docker-compose
sudo pip install docker-compose
}
#sets up docker and docker-compose
docker_setup(){
#install docker if not installed
if ! command_exists docker
then
{
wget -qO- https://get.docker.com/ | sh
}||{
echo "Platform not supported by docker installation script. Manually install docker and then run this script again"
exit 1
}
else
echo "docker installed at $(which docker)" >> $LOGFILE
fi
#install docker compose if not installed
if ! command_exists docker-compose
then
install_docker_compose
else
echo "docker-compose installed at $(which docker-compose)" >> $LOGFILE
fi
}
#installes a given package the right way for the distrinution
#takes one parameter, the package to install
install_package(){
case "$lsb_dist" in
centos|fedora)
sudo yum install $1 -y
;;
*)
{
sudo apt-get install $1 -y
}||{
echo "[x] error can't auto install packages: Manually install $1 and try again"
echo "installing package for unknown platform $lsb_dist " >> $LOGFILE
exit 1
}
esac
}
#create .env file
{
cat > $APP_ENV << XEOF
PORT=3000
DB_URL='mongodb://db:27017/userManagerApp'
XEOF
}||{
echo "Creating $APP_ENV [failed]: ensure webapp folder exists and is writeable"
echo "Creating $APP_ENV [failed]" >> $LOGFILE
exit 1
}
sudo chmod 666 $APP_ENV
#setup docker and docker-compose
docker_setup
sudo docker-compose -p myApp up
| true
|
548bd6e85e9aef310c474e849c516c1d3632985f
|
Shell
|
andrdi/kotel
|
/opt/sms/sms2mail.sh
|
UTF-8
| 1,311
| 3.140625
| 3
|
[] |
no_license
|
#! /bin/bash
if [ "$SMS_1_NUMBER" = "+79615777191" ] && [ "$SMS_1_TEXT" = "report" ]; then
echo "Subject: Current report:" > report.txt
echo "" >> report.txt
echo "Current report:" >> report.txt
echo "" >> report.txt
echo "Temperature:" >> report.txt
echo $(cat /sys/bus/w1/devices/28-04146da896ff/w1_slave | grep t= | sed 's|.*t=|Current temp1: |' | sed 's/./&./17') C >> report.txt
echo $(cat /sys/bus/w1/devices/28-04146dd20dff/w1_slave | grep t= | sed 's|.*t=|Current temp2: |' | sed 's/./&./17') C >> report.txt
echo "" >> report.txt
echo "Shedule:" >> report.txt
cat /opt/kotel/kotel.cfg | grep \"time >> report.txt
echo "" >> report.txt
echo "Kotel log:" >> report.txt
tail -20 /var/log/sv/kotel/current | grep -v "Current" >> report.txt
echo "" >> report.txt
echo "System info:" >> report.txt
echo "" >> report.txt
echo "Uptime:" >> report.txt
uptime >> report.txt
echo "" >> report.txt
echo "df:" >> report.txt
df -h >> report.txt
cat report.txt | msmtp -d andrdi@ya.ru > /dev/null
rm report.txt
exit
fi
printf "Subject: Message from $SMS_1_NUMBER: $SMS_1_TEXT
Message from $SMS_1_NUMBER
SMS Class: $SMS_1_CLASS
SMS Reference: $SMS_1_REFERENCE
SMS Message text: $SMS_1_TEXT
" | msmtp -d andrdi@ya.ru > /dev/null
| true
|
b07b2c60cf786d73244077e4ee1e4f102dd56b9a
|
Shell
|
mydevopsandcloud/lab-1
|
/userdata.sh
|
UTF-8
| 805
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
#!/bin/bash
# sleep until instance is ready
until [[ -f /var/lib/cloud/instance/boot-finished ]]; do
sleep 1
done
#!/bin/bash
sudo systemctl enable httpd
sudo systemctl start httpd
sudo yum -y install httpd
sudo service httpd start
sudo mkdir /var/www/html/mount-point
mount -t efs fs-12345678:/ /var/www/html/efs-mount-point
cd /var/www/html/efs-mount-point
sudo mkdir sampledir
sudo chown ec2-user sampledir
sudo chmod -R o+r sampledir
cd sampledir
echo "<html><h1> TEST APPACHE WEB SERVER </h1></html>" > index.html
export HOSTNAME=$(curl -s http://169.254.169.254/metadata/v1/hostname)
export PUBLIC_IPV4=$(curl -s http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address)
echo Hello from Droplet $HOSTNAME, with IP Address: $PUBLIC_IPV4 > /var/www/html/index.html
| true
|
88b690beef37be4a3120b789d40d1bd297bf0c09
|
Shell
|
Axway/ats-testexplorer
|
/src/main/db/postgresql/upgrade_postgresql.sh
|
UTF-8
| 6,866
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
OLD_DB_VERSION=4.0.10
NEW_DB_VERSION=4.0.11
NEEDS_UPGRADE=false
INTERACTIVE_MODE=0
BATCH_MODE=1
MODE=$INTERACTIVE_MODE
function print_help() {
echo "The usage is ${0} [OPTION] [VALUE] ...
The following script upgrades an ATS Logging DB from version $OLD_DB_VERSION to current version $NEW_DB_VERSION"
echo "Available options
-H <target_SQL_server_host>, default is: localhost; Might be specified by env variable: PGHOST
-p <target_SQL_server_port>, default is: 5432; Might be specified by env variable: PGPORT
-d <target_SQL_database_name>; default: no. Required for non-interactive (batch mode). Might be specified by env variable: PGDATABASE
-U <target_SQL_admin_name>, default: use current OS account; Might be specified by env variable: PGUSER
-S <target_SQL_admin_password>, default: no; Might be specified by env variable: PGPASSWORD
-u <target_SQL_user_name>, default is: AtsUser; Might be specified by env variable: PSQL_USER_NAME
-s <target_SQL_user_password>; Might be specified by env variable: PSQL_USER_PASSWORD"
}
# save the starting folder location
START_FOLDER="$PWD"
# navigate to the install file directory
cd $(dirname $0)
# delete previous tmpUpgradeDbScript.sql if one exists
rm -rf tmpUpgradeDbScript.sql
touch tmpUpgradeDbScript.sql
# delete previous upgrade.log if one exists
rm -rf upgrade.log
touch upgrade.log
if [ -z "$PGHOST" ]; then
PGHOST=localhost
else
echo "PGHOST environment variable is defined with the value: $PGHOST"
fi
if [ -z "$PGPORT" ]; then
PGPORT=5432
else
echo "PGPORT environment variable is defined with the value: $PGPORT"
fi
if [ -n "$PGDATABASE" ]; then
echo "PGDATABASE environment variable is defined with the value: $PGDATABASE"
MODE=$BATCH_MODE
fi
if [ -n "$PGUSER" ]; then
echo "PGUSER environment variable is defined with the value: $PGUSER"
fi
if [ -n "$PGPASSWORD" ]; then
echo "PGPASSWORD environment variable is defined and will be with be used"
fi
export PGPASSWORD=$PGPASSWORD
if [ -z "$PSQL_USER_NAME" ]; then
PSQL_USER_NAME="AtsUser"
else
echo "PSQL_USER_NAME environment variable is defined with the value: $PSQL_USER_NAME"
fi
if [ -z "$PSQL_USER_PASSWORD" ]; then
PSQL_USER_PASSWORD="AtsPassword1"
else
echo "PSQL_USER_PASSWORD environment variable is defined and will be used"
fi
while getopts ":H:p:d:u:s:U:S:h" option; do
case $option in
H)
PGHOST=$OPTARG
export PGHOST
;;
p)
PGPORT=$OPTARG
export PGPORT
;;
d)
PGDATABASE=$OPTARG
export PGDATABASE
MODE=$BATCH_MODE
;;
u)
PSQL_USER_NAME=$OPTARG
;;
s)
PSQL_USER_PASSWORD=$OPTARG
;;
U)
PGUSER=$OPTARG
export PGUSER
;;
S)
PGPASSWORD=$OPTARG
export PGPASSWORD
;;
h)
print_help
exit 1
;;
\?)
echo "Invalid option: -$OPTARG"
print_help
exit 1
;;
esac
done
if [[ -z "$PGUSER" ]]; then
echo "Admin user credentials need to be provided in order to create new database"
exit 2
fi
# password could have been provided externally from env
# if interactive mode
#if [ -z "$PGPASSWORD" ];
#then
# # reads silently the value without echo to the terminal
# read -sp 'Enter admin DB (postgres) password and press enter (input is hidden): ' PGPASSWORD
# export PGPASSWORD
# # new line
# echo ' '
#fi
if [ -z "$PGPASSWORD" ];
then
echo "Neither PGPASSWORD env variable nor -S option is set. Aborting upgrade"
# TODO: optionally check for ~/.pgpass but complex parsing is needed to check if there is line for desired host:user
exit 3
fi
function check_db_existence() {
# return the number of DBs with provided name.
PGDATABASE="$1"
# see if database exists.
# Make sure PGPASSWORD is already set
DBS_OUTPUT=$(psql -h $PGHOST -p $PGPORT -U $PGUSER -l)
if [ $? != 0 ]; then
echo "List of installed databases could not be retrieved. Possible cause is wrong host, port parameter, DB admin user or password"
echo "Use option \"-h\" for help"
exit 6
fi
# 1st column of result is the table. Check with spaces around to prevent possible substring match
DATABASE_EXISTS=$(echo "$DBS_OUTPUT" | grep -c --regexp="^ $PGDATABASE ")
# if [ "$DATABASE_EXISTS" == 0 ]; then
# if [ $MODE == $BATCH_MODE ]; then
# echo "No database exists with the given name. Upgrade not possible. "
# exit 3
# else
# echo "No database exists with the given name. Please select another name"
# fi
# fi
return "$DATABASE_EXISTS"
}
DATABASE_EXISTS=0
until [ "$DATABASE_EXISTS" == 1 ]; do
if [ "$MODE" == "$INTERACTIVE_MODE" ]; then
read -r -p 'Enter Database name: ' PGDATABASE
fi
# see if database exists
check_db_existence "$PGDATABASE"
DATABASE_EXISTS=$?
if [ "$DATABASE_EXISTS" == "0" ]; then
if [ "$MODE" == "$BATCH_MODE" ]; then
echo "Database named $PGDATABASE does not exist so it could not be upgraded. Installation will abort."
exit 3
else
echo "Database named $PGDATABASE does not exist. Please choose existing DB or Ctrl+C to abort script execution."
fi
fi
done
# get database version and change NEEDS_UPGRADE flag if needed
DB_VERSION=$(psql -h "$PGHOST" -p "$PGPORT" -U "$PGUSER" -d "$PGDATABASE" -t -c "SELECT \"value\" FROM \"tInternal\" WHERE \"key\" = 'version'" | xargs) # | xargs is used to trim the db version string
if [ "$DB_VERSION" = "$OLD_DB_VERSION" ]; then
NEEDS_UPGRADE=true
else
NEEDS_UPGRADE=false
fi
if [ "$NEEDS_UPGRADE" = true ]; then
echo "UPGRADING \"$PGDATABASE\" from version \"$DB_VERSION\" to \"$NEW_DB_VERSION\""
echo "\connect $PGDATABASE" >>tmpUpgradeDbScript.sql
echo " " >>tmpUpgradeDbScript.sql
echo "UPDATE \"tInternal\" SET value = '${NEW_DB_VERSION}_draft' WHERE key = 'version';" >>tmpUpgradeDbScript.sql
echo " " >>tmpUpgradeDbScript.sql
cat TestExplorerDb_PostgreSQL_Upgrade.sql >>tmpUpgradeDbScript.sql
psql -U "$PGUSER" -h "$PGHOST" -p "$PGPORT" -a -f tmpUpgradeDbScript.sql >upgrade.log 2>&1
# grep 'ERROR:\|WARNING:'
NUM_OF_ERRORS=$(cat upgrade.log | grep -ci --regex='ERROR:\|FATAL:')
if [[ "$NUM_OF_ERRORS" == 0 ]]; then
# check internal versions table
psql -U "$PGUSER" -h "$PGHOST" -p $PGPORT -d "$PGDATABASE" -t -c "UPDATE \"tInternal\" SET value = '$NEW_DB_VERSION' WHERE key = 'version'"
else
echo "Errors found during install: $NUM_OF_ERRORS. Check upgrade.log file for details"
if [ $MODE == $BATCH_MODE ]; then
exit 4
fi
fi
echo "Installation of database \"$PGDATABASE\" completed."
if [ $MODE == $BATCH_MODE ]; then
exit 0
fi
# back to the starting folder location
cd "$START_FOLDER" || {
echo "Failed to navigate back to last working directory"
exit 5
}
else
echo "Could not upgrade \"$PGDATABASE\" from \"$DB_VERSION\" to \"$NEW_DB_VERSION\""
if [ $MODE == $BATCH_MODE ]; then
exit 6
fi
fi
| true
|
40396303bf33d29a888f6e4c41ea1029a52c292f
|
Shell
|
rosshosman/patatetoy
|
/patatetoy.zsh
|
UTF-8
| 13,750
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
# Patatetoy
# by Maxime Loliée
# https://github.com/loliee/patatetoy
#
# Zsh initialy forked from https://github.com/sindresorhus/pure
PATATETOY_INSTALL_DIR=${PATATETOY_INSTALL_DIR:-$HOME}
. "$PATATETOY_INSTALL_DIR/.patatetoy/patatetoy_common.sh"
PATATETOY_VIM_MODE=${PATATETOY_VIM_MODE:-0}
PATATETOY_GIT_PULL=${PATATETOY_GIT_PULL:-1}
PATATETOY_GIT_DELAY_DIRTY_CHECK=${PATATETOY_GIT_DELAY_DIRTY_CHECK:-1800}
# Colors
PATATETOY_USERNAME_COLOR=${PATATETOY_USERNAME_COLOR:-white}
PATATETOY_ROOT_SYMBOL_COLOR=${PATATETOY_ROOT_SYMBOL_COLOR:-red}
PATATETOY_VIRTUALENV_COLOR=${PATATETOY_VIRTUALENV_COLOR:-8}
PATATETOY_CURSOR_COLOR_OK=${PATATETOY_CURSOR_COLOR_OK:-yellow}
PATATETOY_CURSOR_COLOR_KO=${PATATETOY_CURSOR_COLOR_KO:-red}
PATATETOY_GIT_ARROW_COLOR=${PATATETOY_GIT_ARROW_COLOR:-yellow}
PATATETOY_GIT_BRANCH_COLOR=${PATATETOY_GIT_BRANCH_COLOR:-8}
PATATETOY_GIT_DIRTY_SYMBOL_COLOR=${PATATETOY_GIT_DIRTY_SYMBOL_COLOR:-8}
PATATETOY_GIT_STASH_COLOR=${PATATETOY_GIT_STASH_COLOR:-red}
# stores (into prompt_patatetoy_cmd_exec_time) the exec time of the last command if set threshold was exceeded
prompt_patatetoy_check_cmd_exec_time() {
integer elapsed
(( elapsed = EPOCHSECONDS - ${prompt_patatetoy_cmd_timestamp:-$EPOCHSECONDS} ))
patatetoy_cmd_exec_time $elapsed
}
function zle-line-init zle-keymap-select {
prompt_patatetoy_vim_mode="%F{$PATATETOY_GIT_BRANCH_COLOR}${${KEYMAP/vicmd/n }/(main|viins)/i }%f"
zle reset-prompt
}
prompt_patatetoy_clear_screen() {
# enable output to terminal
zle -I
# clear screen and move cursor to (0, 0)
print -n '\e[2J\e[0;0H'
# print preprompt
prompt_patatetoy_preprompt_render precmd
}
prompt_patatetoy_set_title() {
# emacs terminal does not support settings the title
(( ${+EMACS} )) && return
# tell the terminal we are setting the title
print -n '\e]0;'
# show hostname if connected through ssh
[[ -n $SSH_CONNECTION ]] && print -Pn '(%m) '
case $1 in
expand-prompt)
print -Pn $2;;
ignore-escape)
print -rn $2;;
esac
# end set title
print -n '\a'
}
prompt_patatetoy_preexec() {
# attempt to detect and prevent prompt_patatetoy_async_git_fetch from interfering with user initiated git or hub fetch
[[ $2 =~ (git|hub)\ .*(pull|fetch) ]] && async_flush_jobs 'prompt_patatetoy'
prompt_patatetoy_cmd_timestamp=$EPOCHSECONDS
# shows the current dir and executed command in the title while a process is active
prompt_patatetoy_set_title 'ignore-escape' "$PWD:t: $2"
}
# string length ignoring ansi escapes
prompt_patatetoy_string_length_to_var() {
local str=$1 var=$2 length
# perform expansion on str and check length
length=$(( ${#${(S%%)str//(\%([KF1]|)\{*\}|\%[Bbkf])}} ))
# store string length in variable as specified by caller
typeset -g "${var}"="${length}"
}
prompt_patatetoy_preprompt_render() {
# store the current prompt_subst setting so that it can be restored later
local prompt_subst_status=$options[prompt_subst]
# make sure prompt_subst is unset to prevent parameter expansion in prompt
setopt local_options no_prompt_subst
# check that no command is currently running, the preprompt will otherwise be rendered in the wrong place
[[ -n ${prompt_patatetoy_cmd_timestamp+x} && "$1" != "precmd" ]] && return
# construct preprompt, beginning with path # username and machine if applicable
local preprompt="$prompt_patatetoy_username%F{blue}$(patatetoy_collapse_pwd)%f"
# git info
patatetoy_git_branch
preprompt+="%F{$PATATETOY_GIT_BRANCH_COLOR}${patatetoy_git_branch}%f"
preprompt+="%F{${PATATETOY_GIT_DIRTY_SYMBOL_COLOR}}${prompt_patatetoy_git_dirty}%f"
preprompt+="%F{$PATATETOY_GIT_STASH_COLOR}${patatetoy_git_stash}%f"
preprompt+="%F{$PATATETOY_GIT_ARROW_COLOR}${prompt_patatetoy_git_arrows}%f"
preprompt+="%F{$PATATETOY_VIRTUALENV_COLOR}$(patatetoy_virtualenv_info)%f"
# execution time
preprompt+="%F{yellow}$prompt_patatetoy_cmd_exec_time%f"
# make sure prompt_patatetoy_last_preprompt is a global array
typeset -g -a prompt_patatetoy_last_preprompt
# if executing through precmd, do not perform fancy terminal editing
if [[ "$1" == "precmd" ]]; then
print -P "\n${preprompt}"
else
# only redraw if the expanded preprompt has changed
[[ "${prompt_patatetoy_last_preprompt[2]}" != "${(S%%)preprompt}" ]] || return
# calculate length of preprompt and store it locally in preprompt_length
integer preprompt_length lines
prompt_patatetoy_string_length_to_var "${preprompt}" "preprompt_length"
# calculate number of preprompt lines for redraw purposes
(( lines = ( preprompt_length - 1 ) / COLUMNS + 1 ))
# calculate previous preprompt lines to figure out how the new preprompt should behave
integer last_preprompt_length last_lines
prompt_patatetoy_string_length_to_var "${prompt_patatetoy_last_preprompt[1]}" "last_preprompt_length"
(( last_lines = ( last_preprompt_length - 1 ) / COLUMNS + 1 ))
# clr_prev_preprompt erases visual artifacts from previous preprompt
local clr_prev_preprompt
if (( last_lines > lines )); then
# move cursor up by last_lines, clear the line and move it down by one line
clr_prev_preprompt="\e[${last_lines}A\e[2K\e[1B"
while (( last_lines - lines > 1 )); do
# clear the line and move cursor down by one
clr_prev_preprompt+='\e[2K\e[1B'
(( last_lines-- ))
done
# move cursor into correct position for preprompt update
clr_prev_preprompt+="\e[${lines}B"
# create more space for preprompt if new preprompt has more lines than last
elif (( last_lines < lines )); then
# move cursor using newlines because ansi cursor movement can't push the cursor beyond the last line
printf $'\n'%.0s {1..$(( lines - last_lines ))}
fi
# disable clearing of line if last char of preprompt is last column of terminal
local clr='\e[K'
(( COLUMNS * lines == preprompt_length )) && clr=
# modify previous preprompt
print -Pn "${clr_prev_preprompt}\e[${lines}A\e[${COLUMNS}D${preprompt}${clr}\n"
if [[ $prompt_subst_status = 'on' ]]; then
# re-eanble prompt_subst for expansion on PS1
setopt prompt_subst
fi
# redraw prompt (also resets cursor position)
zle && zle .reset-prompt
setopt no_prompt_subst
fi
# store both unexpanded and expanded preprompt for comparison
prompt_patatetoy_last_preprompt=("$preprompt" "${(S%%)preprompt}")
}
prompt_patatetoy_precmd() {
# check exec time and store it in a variable
prompt_patatetoy_check_cmd_exec_time
# by making sure that prompt_patatetoy_cmd_timestamp is defined here the async functions are prevented from interfering
# with the initial preprompt rendering
prompt_patatetoy_cmd_timestamp=
# check for git arrows
patatetoy_git_upstream
# shows the full path in the title
prompt_patatetoy_set_title 'expand-prompt' '%~'
# get vcs info
patatetoy_vcs_info
# preform async git dirty check and fetch
prompt_patatetoy_async_tasks
# print the preprompt
prompt_patatetoy_preprompt_render "precmd"
# remove the prompt_patatetoy_cmd_timestamp, indicating that precmd has completed
unset prompt_patatetoy_cmd_timestamp
}
# fastest possible way to check if repo is dirty
prompt_patatetoy_async_git_dirty() {
setopt localoptions noshwordsplit
builtin cd -q $1
patatetoy_git_dirty
}
patatetoy_async_git_stash() {
# use cd -q to avoid side effects of changing directory, e.g. chpwd hooks
builtin cd -q "$*"
patatetoy_git_stash
}
prompt_patatetoy_async_git_fetch() {
setopt localoptions noshwordsplit
# use cd -q to avoid side effects of changing directory, e.g. chpwd hooks
builtin cd -q $1
# set GIT_TERMINAL_PROMPT=0 to disable auth prompting for git fetch (git 2.3+)
export GIT_TERMINAL_PROMPT=0
# set ssh BachMode to disable all interactive ssh password prompting
export GIT_SSH_COMMAND=${GIT_SSH_COMMAND:-"ssh -o BatchMode=yes"}
command git -c gc.auto=0 fetch &>/dev/null || return 1
# check arrow status after a successful git fetch
prompt_patatetoy_async_git_arrows $1
}
prompt_patatetoy_async_git_arrows() {
setopt localoptions noshwordsplit
builtin cd -q $1
patatetoy_git_upstream
}
prompt_patatetoy_async_tasks() {
setopt localoptions noshwordsplit
# initialize async worker
((!${prompt_patatetoy_async_init:-0})) && {
async_start_worker "prompt_patatetoy" -u -n
async_register_callback "prompt_patatetoy" prompt_patatetoy_async_callback
prompt_patatetoy_async_init=1
}
# store working_tree without the "x" prefix
local working_tree="$patatetoy_vcs_working_tree"
# check if the working tree changed (prompt_patatetoy_current_working_tree is prefixed by "x")
if [[ ${prompt_patatetoy_current_working_tree#x} != $working_tree ]]; then
# stop any running async jobs
async_flush_jobs "prompt_patatetoy"
# reset git preprompt variables, switching working tree
unset patatetoy_git_branch
unset prompt_patatetoy_git_dirty
unset patatetoy_git_stash
unset prompt_patatetoy_git_last_dirty_check_timestamp
prompt_patatetoy_git_arrows=
# set the new working tree and prefix with "x" to prevent the creation of a named path by AUTO_NAME_DIRS
prompt_patatetoy_current_working_tree="x${working_tree}"
fi
# only perform tasks inside git working tree
[[ -n $working_tree ]] || return
async_job "prompt_patatetoy" prompt_patatetoy_async_git_arrows $working_tree
# do not preform git fetch if it is disabled or working_tree == HOME
if (( ${PATATETOY_GIT_PULL:-1} )) && [[ $working_tree != $HOME ]]; then
# tell worker to do a git fetch
async_job "prompt_patatetoy" prompt_patatetoy_async_git_fetch $working_tree
fi
# if dirty checking is sufficiently fast, tell worker to check it again, or wait for timeout
integer time_since_last_dirty_check=$(( EPOCHSECONDS - ${prompt_patatetoy_git_last_dirty_check_timestamp:-0} ))
if (( time_since_last_dirty_check > $PATATETOY_GIT_DELAY_DIRTY_CHECK)); then
unset prompt_patatetoy_git_last_dirty_check_timestamp
# check check if there is anything to pull
async_job "prompt_patatetoy" prompt_patatetoy_async_git_dirty $working_tree
fi
# check for stash
local time_since_last_stash_check=$(( $EPOCHSECONDS - ${prompt_patatetoy_git_last_stash_check_timestamp:-0} ))
if (( $time_since_last_stash_check > 1800 )); then
unset prompt_patatetoy_git_last_stash_check_timestamp
# check if there is anything any stash
async_job "prompt_patatetoy" patatetoy_async_git_stash "$working_tree"
fi
}
prompt_patatetoy_async_callback() {
setopt localoptions noshwordsplit
local job=$1 code=$2 output=$3 exec_time=$4
case $job in
prompt_patatetoy_async_git_dirty)
local prev_dirty=$prompt_patatetoy_git_dirty
if (( code == 0 )); then
prompt_patatetoy_git_dirty=""
else
prompt_patatetoy_git_dirty="$PATATETOY_GIT_DIRTY_SYMBOL"
fi
[[ $prev_dirty != $prompt_patatetoy_git_dirty ]] && prompt_patatetoy_preprompt_render
# When prompt_patatetoy_git_last_dirty_check_timestamp is set, the git info is displayed in a different color.
# To distinguish between a "fresh" and a "cached" result, the preprompt is rendered before setting this
# variable. Thus, only upon next rendering of the preprompt will the result appear in a different color.
(( $exec_time > 2 )) && prompt_patatetoy_git_last_dirty_check_timestamp=$EPOCHSECONDS
;;
patatetoy_async_git_stash)
patatetoy_git_stash=$output
prompt_patatetoy_preprompt_render
(( $exec_time > 2 )) && prompt_patatetoy_git_last_stash_check_timestamp=$EPOCHSECONDS
;;
prompt_patatetoy_async_git_fetch|prompt_patatetoy_async_git_arrows)
# prompt_patatetoy_async_git_fetch executes prompt_patatetoy_async_git_arrows
# after a successful fetch.
prompt_patatetoy_git_arrows=$patatetoy_git_upstream
if (( code == 0 )); then
prompt_patatetoy_preprompt_render
fi
;;
esac
}
prompt_patatetoy_setup() {
# prevent percentage showing up
# if output doesn't end with a newline
export PROMPT_EOL_MARK=''
prompt_opts=(subst percent)
# borrowed from promptinit, sets the prompt options in case pure was not
# promptinit and we need to take care of setting the options ourselves
# initialized via promptinit.
setopt noprompt{bang,cr,percent,subst} "prompt${^prompt_opts[@]}"
zmodload zsh/datetime
zmodload zsh/zle
zmodload zsh/parameter
autoload -Uz add-zsh-hook
autoload -Uz async && async
add-zsh-hook precmd prompt_patatetoy_precmd
add-zsh-hook preexec prompt_patatetoy_preexec
# if the user has not registered a custom zle widget for clear-screen,
# override the builtin one so that the preprompt is displayed correctly when
# ^L is issued.
if [[ $widgets[clear-screen] == 'builtin' ]]; then
zle -N clear-screen prompt_patatetoy_clear_screen
fi
# show username@host if logged in through SSH
if [[ "$SSH_CONNECTION" != '' ]] || [[ $PATATETOY_FORCE_DISPLAY_USERNAME == 1 ]]; then
prompt_patatetoy_username='%F{$PATATETOY_USERNAME_COLOR}%n%F{$PATATETOY_USERNAME_COLOR}@%m '
fi
# show red star if root
if [[ $UID -eq 0 ]]; then
prompt_patatetoy_username+="%F{$PATATETOY_ROOT_SYMBOL_COLOR}$PATATETOY_ROOT_SYMBOL%f "
fi
# Check if vim mode enable
if [[ $PATATETOY_VIM_MODE == 1 ]] then;
zle -N zle-line-init
zle -N zle-keymap-select
PROMPT='${prompt_patatetoy_vim_mode}'
else
PROMPT=''
fi
# prompt turns red if the previous command didn't exit with 0
PROMPT+='%(?.%F{$PATATETOY_CURSOR_COLOR_OK}.%F{$PATATETOY_CURSOR_COLOR_KO})$PATATETOY_PROMPT_SYMBOL%f '
}
prompt_patatetoy_setup "$@"
| true
|
428e5d5ff8bb0728f11ef52e18505554f58d7a05
|
Shell
|
marcociccone/Azure-GPU-Setup
|
/gpu-setup-part2.sh
|
UTF-8
| 1,455
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#if [ "$EUID" -ne 0 ]; then
# echo "Please run as root (use sudo)"
# exit
#fi
SETUP_DIR="$HOME/gpu-setup"
if [ ! -d $SETUP_DIR ]; then
echo "Setup directory not found. Did you run part 1?"
exit
fi
cd $SETUP_DIR
# install cudnn
wget http://developer.download.nvidia.com/compute/redist/cudnn/v5.1/cudnn-8.0-linux-x64-v5.1.tgz
if [ ! -f "cudnn-8.0-linux-x64-v5.1.tgz" ]; then
echo "You need to download cudnn-8.0 manually! Specifically, place it at: $SETUP_DIR/cudnn-8.0-linux-x64-v5.1.tgz"
exit
fi
echo "Installing CUDA toolkit and samples"
# install cuda toolkit
if [ ! -f "cuda_8.0.61_375.26_linux-run" ]; then
echo "CUDA installation file not found. Did you run part 1?"
exit
fi
sudo sh cuda_8.0.61_375.26_linux-run --silent --verbose --driver --toolkit
echo "Uncompressing cudnn"
tar xzvf cudnn-8.0-linux-x64-v5.1.tgz
sudo cp -P cuda/include/cudnn.h /usr/local/cuda/include/
sudo cp -P cuda/lib64/libcudnn* /usr/local/cuda/lib64/
sudo chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn*
# other Tensorflow dependencies
sudo apt-get -y install libcupti-dev
# upgrade pip
sudo pip install --upgrade pip
echo "deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list
curl https://bazel.build/bazel-release.pub.gpg | sudo apt-key add -
sudo apt-get update && sudo apt-get -y install bazel
sudo apt-get -y upgrade bazel
echo "Script done"
| true
|
f92a8c4ea8f0fd30cc05f140169d28d9802e2732
|
Shell
|
autopkg/grahampugh-recipes
|
/_Scripts/ExtensionAttribute-AdobeFlashPlayer.sh
|
UTF-8
| 290
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
FlashVersion=""
if [ -f "/Library/Internet Plug-Ins/Flash Player.plugin/Contents/Info.plist" ]; then
FlashVersion=$(defaults read /Library/Internet\ Plug-Ins/Flash\ Player.plugin/Contents/Info.plist CFBundleShortVersionString)
fi
echo "<result>$FlashVersion</result>"
exit 0
| true
|
0febf30d822a94790ac7ae42c978330236e85988
|
Shell
|
nettan20/.dotfiles
|
/.profile
|
UTF-8
| 1,736
| 2.921875
| 3
|
[] |
no_license
|
export BREW_PREFIX=/usr/local/opt
export BROWSER=open
export EC2_HOME=$(find /usr/local/Cellar/ec2-api-tools -type d -name libexec | head -n 1)
export EDITOR=vim
export GOPATH="~/.gopath"
export HISTCONTROL=ignoreboth
export HISTSIZE=10000
export JAVA_HOME="$(/usr/libexec/java_home)"
export PATH=$GOPATH/bin:$PATH # Go
export PATH=$HOME/.bin:$PATH # Dotfiles
export PATH=$HOME/.cabal/bin:$PATH # Haskell/Cabal
export PATH=$HOME/.dotfiles/bin:$PATH # Dotfiles
export PATH=/usr/local/bin:/usr/local/sbin:$PATH # Homebrew
export PATH=/usr/local/share/npm/bin:$PATH # Node/NPM
export PATH=bin:$PATH
# ruby
source $BREW_PREFIX/chruby/share/chruby/chruby.sh
print_ruby() { basename $RUBY_ROOT ;}
detect_chruby() { chruby $(cat .ruby-version) && print_ruby ;}
[ -f .ruby-version ] && detect_chruby || chruby 2.0.0
alias 19='chruby ruby-1.9 && print_ruby'
alias 20='chruby ruby-2.0 && print_ruby'
alias 21='chruby ruby-2.1 && print_ruby'
alias jr='chruby jruby && print_ruby'
# node
source $BREW_PREFIX/nvm/nvm.sh
nvm use 0.10 > /dev/null
# git + prompt
alias git=hub # Git ♥ 's GitHub
source $BREW_PREFIX/git/etc/bash_completion.d/git-completion.bash
source ~/.dotfiles/prompt.sh
# boot2docker
alias docker='docker -H tcp://0.0.0.0:4243'
# use
alias use="source _use"
# remove Dropbox when opening new terminal tabs
[[ -d ${PWD/Dropbox\//} ]] && cd ${PWD/Dropbox\//}
# vim-brained
alias :q=exit
# The Silver Searcher
alias ack=ag
# StarCraft
alias starcraft_race="ruby -e 'puts %w[Terran Zerg Protoss].sample'"
# Heroku
production() { heroku $@ --remote production ;}
staging() { heroku $@ --remote staging ;}
# Start wemux if it's not already running elsewhere
[[ -x /tmp/wemux-wemux ]] || wemux
# reset return code to 0
true
| true
|
ac2a8ff543b846f1d30514e37d405ea7b2c5b7ae
|
Shell
|
manikiranp/pattern
|
/user_registration.sh
|
UTF-8
| 1,242
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Welcome to User Registration"
echo "Patterns: Day 14 assignment"
#1
read -p "Enter your First name: " first
pattern="^([A-Z]){1}[a-zA-Z]{2,}$"
if [[ $first =~ $pattern ]]
then
echo True
else
echo False
fi
#2
read -p "Enter your Last name: " last
pattern="^([A-Z]){1}[a-zA-Z]{2,}$"
if [[ $last =~ $pattern ]]
then
echo True
else
echo False
fi
#3
echo "Email address format in abc.xyz@bl.co.in and xyz & in are optional:"
read -p "Enter email: " email
pattern="^([a-z]+)([a-z0-9\_\.\-]+)@([a-z]+)\.([a-z]{2})((.{1}[a-z]{2})?)$"
if [[ $email =~ $pattern ]]
then
echo True
else
echo False
fi
#4
read -p "Enter country code followed by space and phone number: " num
pattern="^[1-9]{1}[0-9]{0,2} [1-9]{1}[0-9]{9}$"
if [[ $num =~ $pattern ]]; then
echo True
else
echo False
fi
#8
echo "Password Rule1:Minimum 8 characters"
echo "Password Rule2:Atleast one uppercase"
echo "Password Rule3:Atlease one number"
echo "Password Rule4:Has exactly one special character"
read -p "Enter the password: " pass
#pattern="^(?=.*[A-Z])(?=.*\d)(?=.*[@$!_%*#?&])(?!(?:.*[@$!_%*#?&]){2})[\w@$!_%*#?&]{8,}$$"
if [[ $pass =~ $pattern ]]; then
echo True
else
echo False
fi
| true
|
755606c511ab283a6706960d4ac5eff6117324ab
|
Shell
|
nonfig/nestjs-config
|
/tools/package.sh
|
UTF-8
| 287
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Generate package
# Files to copy to dist folder
files_to_copy=(
package.json
package-lock.json
README.md
LICENSE
)
for file in "${files_to_copy[@]}"; do
cp $file dist
echo 🔹 $file copied ✅
done
echo '📦📦📦 Packaged successfully! 📦📦📦'
| true
|
9b688c735944fe19859fdebeb701f658f95b0a2a
|
Shell
|
hyeoncheon/hyeoncheon-elastic
|
/setup/reset.sh
|
UTF-8
| 409
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
sudo systemctl stop logstash.service
if false; then
echo "CLEANING EXISTING INDICES......"
sleep 5
for day in 14; do
for index in logstash syslog netflow snmp; do
curl -XDELETE localhost:9200/$index-2017.06.$day
done
done
fi
#./01-install-singlemode.sh
#./02-install-plugins.sh
#sleep 10
#./10-remote-syslog.sh
#./20-netflow.sh
#./30-snmp.sh
#./80-alert.sh
./restart.sh
| true
|
396876f10f300ea000acb8a53bb4797e5acd3e66
|
Shell
|
micheldebree/reality
|
/video/burn_subtitles.sh
|
UTF-8
| 402
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -ne 1 ]; then
echo "USAGE: $0 <video file>"
exit 1
fi
# Determine video height
HEIGHT=$(ffprobe -v error -of default=noprint_wrappers=1 -show_entries stream=height "$1" | grep -o 'height=[0-9]\+' | sed 's/height=//') || exit 1
# Calculate relative fontsize
((FONTSIZE=HEIGHT/30))
# Burn
ffmpeg -i "$1" -vf "subtitles=$1:force_style='Fontsize=$FONTSIZE" "subtitled-$1"
| true
|
eb1662156c2da2af6cc97be8aa4b276e209e8ea7
|
Shell
|
hoshsadiq/alpine-raspberry-pi
|
/make-image
|
UTF-8
| 3,211
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# these scripts are based on https://github.com/knoopx/alpine-raspberry-pi.
# todo https://raw.githubusercontent.com/Drewsif/PiShrink/master/pishrink.sh
set -eux
script_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
enter_chroot=false
if [[ "${1:-}" == "shell" ]]; then
shift
enter_chroot=true
fi
ALPINE_VERSION="${1:-3.12}"
ALPINE_ARCH="${2:-aarch64}"
if [[ "$ALPINE_ARCH" == "" ]] || [[ "$ALPINE_VERSION" == "" ]]; then
echo "Error: must specify arguments alpine arch and version"
echo "Usage:"
echo " $0 <version> <arch>"
echo ""
echo "Example:"
echo " $0 v3.10.2 aarch64"
fi
alpine_branch="$(echo "$ALPINE_VERSION" | awk -F. '{gsub("^v", "", $1); print "v"$1"."$2}')"
tmpdir="$(mktemp -d -t "alpine-rpi-$ALPINE_VERSION-$ALPINE_ARCH.XXXXXXXXXX")"
artifact_file="$script_dir/alpine-rpi-$ALPINE_VERSION-$ALPINE_ARCH.img"
rootfs="$tmpdir"
boot_dir="$rootfs/boot"
build_dir="$rootfs/build"
clean_up() {
[[ -x "$rootfs/destroy" ]] && "$rootfs/destroy" -y || true
findmnt -M "$rootfs" && umount "$rootfs"
[[ -n "$tmpdir" ]] && rm -rf "$tmpdir"
losetup --detach-all # todo this should be only the current loop
}
trap clean_up SIGTERM SIGINT SIGQUIT
truncate -s 2G "$artifact_file"
{
echo "o"
echo "n"
echo "p"
echo "1"
echo ""
echo "+128MB"
echo "t"
echo "c"
echo "n"
echo "p"
echo "2"
echo ""
echo ""
echo "w"
} | fdisk -H 255 -S 63 "$artifact_file"
LOOP_DEV=$(losetup --partscan --show --find "$artifact_file")
BOOT_DEV="$LOOP_DEV"p1
ROOT_DEV="$LOOP_DEV"p2
# format partitions
mkfs.fat -F32 -n ALPINE "$BOOT_DEV"
mkfs.ext4 -O '^has_journal' "$ROOT_DEV"
mkdir -p "$rootfs"
mount --make-private "$ROOT_DEV" "$rootfs"
mkdir -p "$boot_dir"
mount --make-private "$BOOT_DEV" "$boot_dir"
mkdir -p "$build_dir"
mount --bind "$script_dir/build" "$build_dir"
sudo ./alpine-chroot-install \
-a "$ALPINE_ARCH" \
-b "$alpine_branch" \
-d "$rootfs" \
-k "ARCH CI QEMU_EMULATOR RPI_CI_.* TRAVIS_.*" \
-p "ca-certificates ssl_client"
"${script_dir}/build/run.sh" "$rootfs"
if [[ "$enter_chroot" == "true" ]]; then
"$rootfs/enter-chroot"
fi
file_dirs=(
var/cache/apk
root
enter-chroot
destroy
etc/resolv.conf
env.sh
)
for file_dir in "${file_dirs[@]}"; do
file_dir="$rootfs/$file_dir"
ls -la "$file_dir"
[[ -d "$file_dir" ]] && find "$file_dir" -mindepth 1 -delete
[[ -f "$file_dir" ]] && rm "$file_dir"
done
umount -lf "$rootfs"
# shrink image
ROOT_PART_START=$(parted -ms "$artifact_file" unit B print | awk -F: 'END{gsub("B$", "", $2); print $2}')
ROOT_BLOCK_SIZE=$(tune2fs -l "$ROOT_DEV" | awk -F': *' '/^Block size:/{print $2}')
ROOT_MIN_SIZE=$(resize2fs -P "$ROOT_DEV" 2>/dev/null | awk -F': *' '/:/{print $2}')
# shrink fs
e2fsck -f -p "$ROOT_DEV"
resize2fs -p "$ROOT_DEV" "$ROOT_MIN_SIZE"
# shrink partition
PART_END=$((ROOT_PART_START + (ROOT_MIN_SIZE * ROOT_BLOCK_SIZE)))
parted ---pretend-input-tty "$artifact_file" unit B resizepart 2 "$PART_END" yes
losetup -d "$LOOP_DEV"
# truncate free space
FREE_START=$(parted -ms "$artifact_file" unit B print free | awk -F: 'END{gsub("B$", "", $2); print $2}')
truncate -s "$FREE_START" "$artifact_file"
#gzip -f "$artifact_file"
echo "DONE."
| true
|
b66eebd94bdb7bc2c320bef37f3655fcb82ecc91
|
Shell
|
Carlo13gen/Viri
|
/graph_analysis/gdt_origin/scripts/MakePublicGapi.sh
|
UTF-8
| 2,918
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
cd ..
#----------------------------------------------
#compile in DEMO mode
#----------------------------------------------
make clean
cp custom.${HOSTTYPE} custom.original
cp public_release/custom.demo custom.${HOSTTYPE}
make gdt
make tools
HOST=$HOSTTYPE
RELNUM=4.0_DEMO
# --------------------------------------------------
# Step 1 - create a temporary directory
# named gdt${RELNUM} and its subdirectories
# --------------------------------------------------
echo mkdir gdt${RELNUM}
mkdir gdt${RELNUM}
echo mkdir gdt${RELNUM}/incl
mkdir gdt${RELNUM}/incl
echo mkdir gdt${RELNUM}/incl/GDT
mkdir gdt${RELNUM}/incl/GDT
echo mkdir gdt${RELNUM}/bin
mkdir gdt${RELNUM}/bin
echo mkdir gdt${RELNUM}/bin/$HOST/
mkdir gdt${RELNUM}/bin/$HOST
echo mkdir gdt${RELNUM}/lib
mkdir gdt${RELNUM}/lib
echo mkdir gdt${RELNUM}/lib/$HOST
mkdir gdt${RELNUM}/lib/$HOST
echo mkdir gdt${RELNUM}/tools
mkdir gdt${RELNUM}/tools
echo mkdir gdt${RELNUM}/tools/simpletest
mkdir gdt${RELNUM}/tools/simpletest
echo mkdir gdt${RELNUM}/examples
mkdir gdt${RELNUM}/examples
echo mkdir gdt${RELNUM}/output
mkdir gdt${RELNUM}/output
# --------------------------------------
# Step 2 - copy all the relase files in
# the subdirectory gdt${RELNUM}
# --------------------------------------
echo cp custom.i386-linux-example gdt${RELNUM}
cp custom.i386-linux-example gdt${RELNUM}
echo cp public_release/public_makefile gdt${RELNUM}/makefile
cp public_release/public_makefile gdt${RELNUM}/makefile
echo cp public_release/README_DEMO gdt${RELNUM}
cp public_release/README_DEMO gdt${RELNUM}
echo cp incl/GDT/*.h gdt${RELNUM}/incl/GDT/
cp incl/GDT/*.h gdt${RELNUM}/incl/GDT/
echo cp -r lib/* gdt${RELNUM}/lib/
cp -r lib/* gdt${RELNUM}/lib/
echo cp bin/$HOST/simpletest gdt${RELNUM}/bin/${HOST}
cp bin/$HOST/simpletest gdt${RELNUM}/bin/${HOST}
echo cp tools/public_common.make gdt${RELNUM}/tools/common.make
cp tools/public_common.make gdt${RELNUM}/tools/common.make
echo cp tools/simpletest/simpletest.cpp gdt${RELNUM}/tools/simpletest/
cp tools/simpletest/simpletest.cpp gdt${RELNUM}/tools/simpletest/
echo cp tools/simpletest/public_makefile gdt${RELNUM}/tools/simpletest/makefile
cp tools/simpletest/public_makefile gdt${RELNUM}/tools/simpletest/makefile
echo cp examples/* gdt${RELNUM}/examples/
cp examples/* gdt${RELNUM}/examples/
# ---------------------------------------------------
# Step 3 - make a compressed archive gdt${RELNUM}.tgz
# and remove the subdirectory gdt4.0
# ---------------------------------------------------
echo tar cvzf gdt${RELNUM}_${HOSTTYPE}.tgz gdt${RELNUM}/
tar cvzf gdt${RELNUM}_${HOSTTYPE}.tgz gdt${RELNUM}/
echo rm -rf gdt${RELNUM}/
rm -rf gdt${RELNUM}/
#----------------------------------------------------
# Step 4 - restore the original custom.${HOSTTYPE}
#----------------------------------------------------
cp custom.original custom.${HOST}
| true
|
c32ba60199917ea77b360a2e42acd1397948c4c3
|
Shell
|
ucsdlib/github-reports
|
/commits
|
UTF-8
| 443
| 3.734375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/bash
set -x
UCSD_ROOT="$HOME/projects/ucsd"
START_DATE="2017-07-01"
END_DATE="2018-07-01"
REPORT_FILE="./commit-report-$START_DATE-$END_DATE"
echo "before loop"
cd "$UCSD_ROOT" || (echo "Project root directory doesn't exist" && exit 1)
for dir in /"$UCSD_ROOT"/*/
do
echo "$dir"
cd "$dir" || exit 1
commits=$(git log --oneline --since="$START_DATE" --until="$END_DATE" | wc -l)
echo "$dir: $commits" >> $REPORT_FILE
done
| true
|
83a2e978dcfd4cfdb2fbba2a72b9a5473cebbcfc
|
Shell
|
sitedata/droplet-1-clicks
|
/docker-18-04/files/etc/update-motd.d/99-one-click
|
UTF-8
| 1,017
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Configured as part of the DigitalOcean 1-Click Image build process
myip=$(hostname -I | awk '{print$1}')
cat <<EOF
********************************************************************************
Welcome to DigitalOcean's 1-Click Docker Droplet.
To keep this Droplet secure, the UFW firewall is enabled.
All ports are BLOCKED except 22 (SSH), 2375 (Docker) and 2376 (Docker).
* The Docker 1-Click Quickstart guide is available at:
https://do.co/3j6j3po#start
* You can SSH to this Droplet in a terminal as root: ssh root@$myip
* Docker is installed and configured per Docker's recommendations:
https://docs.docker.com/install/linux/docker-ce/ubuntu/
* Docker Compose is installed and configured per Docker's recommendations:
https://docs.docker.com/compose/install/#install-compose
For help and more information, visit https://do.co/3j6j3po
********************************************************************************
To delete this message of the day: rm -rf $(readlink -f ${0})
EOF
| true
|
4b0cc0c7ad166dfc541fea37d97d79eb03f781a1
|
Shell
|
rodriguezmDNA/rnaseqScripts
|
/Old/01b_TrimmingFullScript.sh
|
UTF-8
| 6,875
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# j rodriguez medina
# Brady lab @ ucdavis
# Trim sequencing files in two steps.
# If there is adapter contamination (likely to be determined by the output of FastQC on raw files or using minion)
## If one is certain there is no adapter, leave the tabu and seqAdapt blank
# ie: tabu='';seqAdapt=''
## If one is certain the barcodes have been removed (first 6-8 bp) set the -f parameter for fastx to 1.
#####
# Last modified
# 2017.07.jrm
####################
#####
# Set options:
# A. Remove only barcode (first 8bp) [fastx]
# B. Remove barcodes. Remove adapters. Trim based on quality, complexity (trims repeated bases and Ns) [fastx & reaper]
optionTrim=B
# Barcode trim options.
ntStart=9 #Non-zero positive integers
## eg: A value of 9 removes the first 8 nt.
## In some cases the sequencing facility removes barcodes, if so, set ntStart=1
# Reaper options
tabu='GATCGGAAGAGCACACGTCTGAACTCCAGTCAC' #Reaper recommends to use the 5' adapter sequence as tabu. If more than one contaminant is suspected, they can be inserted as a comma separated list: tabu='AAA,GGG'
seqAdapt='ATCTCGTATGCCGTCTTCTGCTTG' # 3' adapter. See below.
####################
#tabu="ACACGTCTGAACTCCAGTCACACTCAGGTATCTCGTATGCCG,GATCGGAAGAGCACACGTCTGAACTCCAGTCAC,ATCTCGTATGCCGTCTTCTGCTTG,GAAGAAGAAGAAA,CGGAAGAGCACACGTCTGAACTCCAGTCAC,GATCGGAAGAGCACACGTCTGAACTCCAGTCAC,CACGTCTGAACTCCAGTCACTACCATTGATCTCGTATGCCGT,GATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
#seqAdapt="GTGACTGGAGTTCAGACGTGTGCTCTTCCGATC"
# https://support.illumina.com/content/dam/illumina-support/documents/documentation/chemistry_documentation/experiment-design/illumina-adapter-sequences_1000000002694-01.pdf
## Possible contaminants
# ATCTCGTATGCCGTCTTCTGCTTG small RNA 3' Adapter
# GAAGAAGAAGAAA from minion. Run: minion search-adapter -i <file.fastq.gz>
# CGGAAGAGCACACGTCTGAACTCCAGTCAC #TruSeq adapter
# GATCGGAAGAGCACACGTCTGAACTCCAGTCAC #TruSeq adapter, extended
#####
## Record time
start_time=`date +%s`
## Get absolute path
#Get the full path of the current script: Go back one folder (from scripts to main)
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.."
## Keep track of errors and outputs in a log.
logDir=$DIR/logs #Create log folder if it doesn't exist
if [ ! -d $logDir ]; then echo `mkdir -p $logDir`; fi
######
##Uses the same name of the script but adds the log extension
## Two ways: split string by dots, keep the first part
#logPath=$DIR/logs/$(basename $BASH_SOURCE | cut -d . -f1).log
# Removes specific extension:
logPath=$DIR/logs/$(basename $BASH_SOURCE .sh).$optionTrim.log
##
echo `touch $logPath` #Create file to fill with log data
echo 2>&1 | tee $logPath ## Empty the file contents each time the script is run
echo "Started on `date`" 2>&1 | tee -a $logPath
######
echo "Trimming option $optionTrim" 2>&1 | tee $logPath
########################################################################################################################
########################################################################################################################
SeqDir="$DIR/RawData/*.fastq.gz"
## Remove first 8bp and sequences with low quality (< 33) with fastx_trimmer
## Choose flags for fastx trimmer and reaper
if [ $optionTrim == "A" ]; then # Only trim
echo 'A. Remove only barcode (first 8bp) [fastx]' 2>&1 | tee -a $logPath; #Describe
# Set parameters
ToDir="$DIR/01_trimmed/" #ToDir="$DIR/01_trimmed/A_trimBC_noReap"
fastxParams='-v -Q33 -z' #for fastx_trimmer, keep consistency with how reaper outputs clean files (.clean). This will help on subsequent steps.
else
if [ $optionTrim == "B" ]; then # Coupled with reaper
echo 'C. Remove barcodes and adapters. Trim reads based on quality and complexity [fastx & reaper]' 2>&1 | tee -a $logPath;
## Set params
ToDir="$DIR/01_trimmed/" #ToDir="$DIR/01_trimmed/C_trimBC_trimAdapt_Reap"
fastxParams='-v -Q33' # If coupled with reaper
reapParams='-geom no-bc -dust-suffix-late 10/ACTG -dust-suffix 10/ACTG --noqc -nnn-check 1/1 -qqq-check 33/10 -clean-length 30 -tri 20 -polya 5 --bcq-late' #for reaper; took away --fastqx-out
else
## Exit if no option selected
echo 'Wrong option, exiting program' 2>&1 | tee -a $logPath;
exit
fi; fi; ## Close every if opened
########### Done setting up parameters
## Crete out dir
echo `mkdir -p $ToDir`
############
### Process sequencing files
# For each file in the selected folder
## Fix the names
## Depending on the option selected:
## A) Just trim the first n letters.
## B) Trim and remove adapters. Clean sequences based on quality/complexity.
## Write the output to a file in a dedicated folder.
## Errors and screen outputs go to both screen and file.
for file in $SeqDir
do # do something on "$file"
base=`basename $file`; # Remove dir paths
base=${base%%.fastq.gz}; # Remove suffixes
echo "Trimming $base" 2>&1 | tee -a $logPath # Print file being processed
##############################
# First unzip to standar output
## Remove first n-bp and sequences with low quality (< 33) using fastx_trimmer
## Trim adapter, low complexity/quality sequences with reaper
## stdout and stderr to logfile while still display on screen
## Probably there's another fancier way to do this:
### Run trimmer with/without reaper:
if [ $optionTrim == "A" ]; then ##Only trim
echo 'A. Remove only barcode (first 8bp) [fastx]' #2>&1 | tee -a $logPath;
## Unzip to stdout | Trim the first n-bases and save output
gunzip -dc $file | fastx_trimmer -f $ntStart -Q33 $fastxParams -o $ToDir/$base.nobc.fq.clean.gz 2>&1 | tee -a $logPath # Use head -n 4000 in the middle for a quick test
else
if [ $optionTrim == "B" ]; then ##Coupled with reaper
echo 'B. Remove barcodes and adapters. Trim reads based on quality and complexity [fastx & reaper]' #2>&1 | tee -a $logPath;
echo 'Using' $tabu 'as tabu sequences' 2>&1 | tee -a $logPath;
echo 'Using' $seqAdapt 'as adapter sequences' 2>&1 | tee -a $logPath;
####
# Unzip the file, cut the first n bp, trim adapters and low quality segments.
gunzip -dc $file | fastx_trimmer -f $ntStart $fastxParams | reaper -3pa $seqAdapt -tabu $tabu -basename $ToDir/$base $reapParams 2>&1 | tee -a $logPath # Use head -n 4000 in the middle for a quick test
fi; fi; ## Close every if opened
####
echo -e "\n ----------------------------------------------------------" 2>&1 | tee -a $logPath
done >&2 | tee -a $logPath ## Only errors!
########################################################################################################################
## Record time
end_time=`date +%s`
echo -e "\nParameters used: $fastxParams -f $ntStart" 2>&1 | tee -a $logPath
echo -e "\nParameters used: $reapParams -3pa $seqAdapt -tabu $tabu" 2>&1 | tee -a $logPath
echo -e "\nExecution time was `expr $end_time - $start_time` s." 2>&1 | tee -a $logPath
echo -e "\nDone `date`" 2>&1 | tee -a $logPath
| true
|
c190923cf62592f862c7b6b4cdaf0ab7c1c344e5
|
Shell
|
Kamekure-Maisuke/megavite
|
/script/make_blog.sh
|
UTF-8
| 494
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
export LC_ALL=C
NOW=$(date '+%Y-%m-%d_%H:%M:%S')
TODAY="${NOW%_*}"
CONTENTS_DIR="$(dirname "$0")/../contents"
# 存在してたら作成しない
[ -e "${CONTENTS_DIR}/${TODAY}.md" ] && {
echo "今日は作成済み"
exit 1
}
# blogコンテンツの中身
contents(){
cat <<EOF
---
title: タイトル
description: 説明
tags:
- "タグ1"
- "タグ2"
- "タグ3"
created_at: $NOW
---
- サンプル本文
EOF
}
# 作成
contents > "${CONTENTS_DIR}/${TODAY}.md"
| true
|
32d74d27fef8c5b0425f6153574ac1bb18442eae
|
Shell
|
W3SS/wikia-scraper-1
|
/create-wordlist.sh
|
UTF-8
| 343
| 3.28125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
urldecode(){
echo -e "$(sed 's/+/ /g;s/%\(..\)/\\x\1/g;')"
}
# Concatinate all input files,
# Remove all blanks
# Convert everything to lower case
# Remove all colons
# sort
# Remove duplicates
# Decode percent encoding
cat "$@" | sed -E -e 's/[[:blank:]]+//g' | sed 's/.*/\L&/' | sed 's/://g' | sort | uniq | urldecode
| true
|
77bbe7603a3be265b068711cfc61ff3a9a36f409
|
Shell
|
alnyan/raspi-tests
|
/qemu.sh
|
UTF-8
| 362
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
QEMU_OPTS="-M raspi3 \
-kernel build/kernel.img \
-serial null \
-serial mon:stdio \
-display none"
if [ "${QEMU_DEBUG}x" != x ]; then
QEMU_OPTS="${QEMU_OPTS} -s -S"
fi
if [ "${QEMU_DINT}x" != x ]; then
QEMU_OPTS="${QEMU_OPTS} -d int"
fi
make ${MAKEOPTS}
qemu-system-aarch64 ${QEMU_OPTS}
| true
|
92226b84c555fb98278e66081e8b3452fc76ac42
|
Shell
|
isabella232/stampede
|
/bin/try-until
|
UTF-8
| 1,538
| 4.125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#------------------------------------------------------------------------------
# Copyright (c) 2011-2013, Think Big Analytics, Inc. All Rights Reserved.
#------------------------------------------------------------------------------
# try-until - Wait for an expression to succeed until a specified time.
thisdir=$(dirname ${BASH_SOURCE[0]})
. $thisdir/common.sh
if [[ "$1" =~ --?h.* ]]
then
cat <<EOF
$0 - Wait for an expression to succeed until a specified time.
Usage: $0 end_timestamp time_between_attempts expression
Where:
end_timestamp The timestamp, in Unix epoch seconds, when it should give up.
After this point in time, it returns the failure status of 1.
time_between_attempts How long to wait between attempts. Format is "Nx".
See "to-seconds --help" for the required format.
expression The remaining arguments are the bash expression to try on
every iteration. It must return a status of 0 when evaluated
to indicate success and nonzero for failure. For example,
"ls foo" returns 0 if "foo" exists, otherwise it returns 1.
Contrast with "try-for", where you specify a window of time to try. Here, you specify
a cutoff time.
NOTES:
1) The process will sleep between attempts.
2) If you don't want the expression's output, redirect to "/dev/null".
EOF
exit 0
fi
let end=$1
shift
retry_every=$1
shift
_do_try $end $retry_every "$@"
exit $?
| true
|
fd740c324f34ef1ad018b0bd577ce22840fa8b33
|
Shell
|
aascheneller/ipcsystools
|
/src/scripts/ipc-getfirmware
|
UTF-8
| 178
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#set -x
value=unknown
if [ -e /etc/ptxdist_version ]; then
value=`cat /etc/ptxdist_version | cut -d' ' -f2 | sed -n -e 's~kp-mcb2-\(.*\)\..*~\1~p'`
fi
echo $value
| true
|
5fc030c47d2bdeaa7f1704a28635023ca74f1f24
|
Shell
|
inm7/vbc_mri_pipeline
|
/code/examples/extract_BOLD_from_native_EPI.sh
|
UTF-8
| 4,194
| 3.0625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
grp=${1}
sbj=${2}
threads=${3}
atlname=Schaefer2018_100Parcels_17Networks
num=114
sp=/data/project/SC_pipeline/03_Functional_Connectivity
tp=/data/project/SC_pipeline/03_Structural_Connectivity
tmp=${tp}/${grp}/${sbj}/temp
atl=${tp}/${grp}/${sbj}/${atlname}_to_epi_native+subctx.nii.gz
atlt1w=${tp}/${grp}/${sbj}/${atlname}_to_fs_t1_native+subctx.nii.gz
gmneck=${tp}/${grp}/${sbj}/fs_t1_neck_gm_mask_to_dwi.nii.gz
epi=${sp}/${grp}/Derivatives/vbc_fmri/rfMRI/${sbj}/fMRI1/filtered_func_data.nii.gz
epiup=${sp}/${grp}/Derivatives/vbc_fmri/rfMRI/${sbj}/fMRI1/filtered_func_data_upsample.nii.gz
epi_avg=${tp}/${grp}/${sbj}/filtered_func_data_avg_upsample.nii.gz
bold=${sp}/${grp}/Derivatives/vbc_fmri/rfMRI/${sbj}/fMRI1/Atlas/filtered_func_data_upsample_${atlname}_native_subctx_BOLD.csv
# Transform function for loops
# ----------------------------
Transform()
{
idx=${1}
mask1=${tmp}/temp_label${idx}_mask1.nii.gz
mask2=${tmp}/temp_label${idx}_mask2.nii.gz
mask3=${tmp}/temp_label${idx}_mask3.nii.gz
mask4=${tmp}/temp_label${idx}_mask4.nii.gz
fslmaths ${atlt1w} -thr ${idx} -uthr ${idx} -bin ${mask1}
applywarp -i ${mask1} -r ${epi_avg} -o ${mask3} --premat=${tp}/${grp}/${sbj}/epi_to_fs_t1_invaffine.mat
fslmaths ${mask3} -thr 0.5 -uthr 0.5 ${mask4}
fslmaths ${mask3} -sub ${mask4} -thr 0.5 -bin -mul ${idx} ${mask3}
}
# BOLD extraction
# ---------------
BOLD_Extraction()
{
fp=${sp}/${grp}/Derivatives/vbc_fmri/rfMRI/${sbj}/fMRI1/Atlas
fslmeants -i ${epiup} --label=${atl} -o ${fp}/temp_BOLD.txt
cat ${fp}/temp_BOLD.txt | tr -s " " >> ${fp}/temp.txt
cat ${fp}/temp.txt | tr ' ' ',' >> ${fp}/temp2.txt
cat ${fp}/temp2.txt | sed 's/.$//' > ${fp}/temp3.txt
mv ${fp}/temp3.txt ${bold}
rm -f ${fp}/temp*.txt
}
source /etc/fsl/fsl.sh
# Call container_SC_dependencies
# ------------------------------
# source /usr/local/bin/container_SC_dependencies.sh
# export SUBJECTS_DIR=/opt/freesurfer/subjects
# Freesurfer license
# ------------------
# if [[ -f /opt/freesurfer/license.txt ]]; then
# printf "Freesurfer license has been checked.\n"
# else
# echo "${email}" >> $FREESURFER_HOME/license.txt
# echo "${digit}" >> $FREESURFER_HOME/license.txt
# echo "${line1}" >> $FREESURFER_HOME/license.txt
# echo "${line2}" >> $FREESURFER_HOME/license.txt
# printf "Freesurfer license has been updated.\n"
# fi
cd ${tmp}
fslsplit ${epi} temp_epi_ -t
cmd="fslmerge -t ${tmp}/merged_upsampled_epi.nii.gz"
for i in {0..299}; do
epinum=$(printf "%04d" ${i})
flirt -in ${tmp}/temp_epi_${epinum}.nii.gz -ref ${tmp}/temp_epi_${epinum}.nii.gz -applyisoxfm 1.0 -out ${tmp}/temp_epi_${epinum}_upsample.nii.gz
cmd+=" ${tmp}/temp_epi_${epinum}_upsample.nii.gz"
done
eval "${cmd}"
rm -rf ${tmp}/temp_epi_*.nii.gz
mv ${tmp}/merged_upsampled_epi.nii.gz ${epiup}
# Co-registration between T1-weighted image and EPI (rs-fMRI)
# -----------------------------------------------------------
fslmaths ${epiup} -Tmean ${epi_avg}
flirt -in ${epi_avg} -ref ${tp}/${grp}/${sbj}/fs_t1_brain.nii.gz -out ${tp}/${grp}/${sbj}/epi_to_fs_t1_affine.nii.gz -omat ${tp}/${grp}/${sbj}/epi_to_fs_t1_affine.mat -dof 6 -cost corratio
convert_xfm -omat ${tp}/${grp}/${sbj}/epi_to_fs_t1_invaffine.mat -inverse ${tp}/${grp}/${sbj}/epi_to_fs_t1_affine.mat
applywarp -i ${tp}/${grp}/${sbj}/fs_t1.nii.gz -r ${epi_avg} -o ${tp}/${grp}/${sbj}/fs_t1_to_epi.nii.gz --premat=${tp}/${grp}/${sbj}/epi_to_fs_t1_invaffine.mat
applywarp -i ${tp}/${grp}/${sbj}/fs_t1_brain.nii.gz -r ${epi_avg} -o ${tp}/${grp}/${sbj}/fs_t1_brain_to_epi.nii.gz --premat=${tp}/${grp}/${sbj}/epi_to_fs_t1_invaffine.mat
# Transform native parcellations to the EPI space
# -----------------------------------------------
nThr=0
for (( i = 1; i < num + 1; i++ ))
do
Transform ${i} &
(( nThr++ ))
printf "[+] Running thread ${nThr} - index ${i}\n"
if [[ ${nThr} -eq ${threads} ]]; then
wait
nThr=0
fi
done
wait
fslmaths ${epi_avg} -mul 0 ${tmp}/temp_mask.nii.gz
for (( i = 1; i < num + 1; i++ ))
do
fslmaths ${tmp}/temp_mask.nii.gz -add ${tmp}/temp_label${i}_mask3.nii.gz ${tmp}/temp_mask.nii.gz
done
mv ${tmp}/temp_mask.nii.gz ${atl}
rm -f ${tmp}/temp*.nii.gz
BOLD_Extraction
| true
|
d7311ff6c288c8ca4046fed66d9d99048e972f9c
|
Shell
|
prismocr/ocr
|
/scripts/valgrind.sh
|
UTF-8
| 609
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ $# -lt 1 ] ; then
echo "Invalid number of arguments."
exit 1
fi
# Reads arguments into the table `args`
IFS=' ' read -r -a args <<< $@
unset args[0]
CFLAGS="-std=c99 -Wall -Wextra -Werror -Wpedantic"
if [ $1 = "debug" ] ; then
CFLAGS+=" -g -O0 -DDEBUG"
elif [ $1 = "release" ] ; then
CFLAGS+=" -O3 -DNDEBUG"
else
echo "Invalid compilation configuration."
echo "Use 'debug' or 'release' instead of '$1'."
exit 1
fi
make TMPCFLAGS="${CFLAGS}" temp
valgrind --show-leak-kinds=all \
--track-origins=yes \
./build/temp/ocr \
${args[@]}
| true
|
c0fe414df610957c6769d08142c5d24f94a57fee
|
Shell
|
SrinivasaBharath/ceph-1
|
/src/test/docker-test-helper.sh
|
UTF-8
| 9,821
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
#
# Author: Loic Dachary <loic@dachary.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Public License for more details.
#
function get_image_name() {
local os_type=$1
local os_version=$2
echo ceph-$os_type-$os_version-$USER
}
function setup_container() {
local os_type=$1
local os_version=$2
local opts="$3"
local image=$(get_image_name $os_type $os_version)
local build=true
if docker images $image | grep --quiet "^$image " ; then
eval touch --date=$(docker inspect $image | jq '.[0].Created') $image
found=$(find -L test/$os_type-$os_version/* -newer $image)
rm $image
if test -n "$found" ; then
docker rmi $image
else
build=false
fi
fi
if $build ; then
#
# In the dockerfile,
# replace environment variables %%FOO%% with their content
#
rm -fr dockerfile
cp --dereference --recursive test/$os_type-$os_version dockerfile
os_version=$os_version user_id=$(id -u) \
perl -p -e 's/%%(\w+)%%/$ENV{$1}/g' \
dockerfile/Dockerfile.in > dockerfile/Dockerfile
docker $opts build --tag=$image dockerfile
rm -fr dockerfile
fi
}
function get_upstream() {
git rev-parse --show-toplevel
}
function get_downstream() {
local os_type=$1
local os_version=$2
local image=$(get_image_name $os_type $os_version)
local upstream=$(get_upstream)
local dir=$(dirname $upstream)
echo "$dir/$image"
}
function setup_downstream() {
local os_type=$1
local os_version=$2
local ref=$3
local image=$(get_image_name $os_type $os_version)
local upstream=$(get_upstream)
local dir=$(dirname $upstream)
local downstream=$(get_downstream $os_type $os_version)
(
cd $dir
if ! test -d $downstream ; then
# Inspired by https://github.com/git/git/blob/master/contrib/workdir/git-new-workdir
mkdir -p $downstream/.git || return 1
for x in config refs logs/refs objects info hooks packed-refs remotes rr-cache
do
case $x in
*/*)
mkdir -p "$downstream/.git/$x"
;;
esac
ln -s "$upstream/.git/$x" "$downstream/.git/$x"
done
cp "$upstream/.git/HEAD" "$downstream/.git/HEAD"
fi
cd $downstream
git reset --hard $ref || return 1
git submodule sync --recursive || return 1
git submodule update --force --init --recursive || return 1
)
}
function run_in_docker() {
local os_type=$1
shift
local os_version=$1
shift
local ref=$1
shift
local opts="$1"
shift
local script=$1
setup_downstream $os_type $os_version $ref || return 1
setup_container $os_type $os_version "$opts" || return 1
local downstream=$(get_downstream $os_type $os_version)
local image=$(get_image_name $os_type $os_version)
local upstream=$(get_upstream)
local ccache
mkdir -p $HOME/.ccache
ccache="--volume $HOME/.ccache:$HOME/.ccache"
user="--user $USER"
local cmd="docker run $opts --rm --name $image --privileged $ccache"
cmd+=" --volume $downstream:$downstream"
cmd+=" --volume $upstream:$upstream"
local status=0
if test "$script" = "SHELL" ; then
$cmd --tty --interactive --workdir $downstream $user $image bash
else
if ! $cmd --workdir $downstream $user $image "$@" ; then
status=1
fi
fi
return $status
}
function remove_all() {
local os_type=$1
local os_version=$2
local image=$(get_image_name $os_type $os_version)
docker rm $image
docker rmi $image
}
function usage() {
cat <<EOF
Run commands within Ceph sources, in a docker container
$0 [options] command args ...
[-h|--help] display usage
[--verbose] trace all shell lines
[--os-type type] docker image repository (centos, ubuntu, etc.)
(defaults to ubuntu)
[--os-version version] docker image tag (7 for centos, 16.04 for ubuntu, etc.)
(defaults to 16.04)
[--ref gitref] git reset --hard gitref before running the command
(defaults to git rev-parse HEAD)
[--all types+versions] list of docker image repositories and tags
[--shell] run an interactive shell in the container
[--remove-all] remove the container and the image for the specified types+versions
[--opts options] run the contain with 'options'
docker-test.sh must be run from a Ceph clone and it will run the
command in a container, using a copy of the clone so that long running
commands such as make check are not disturbed while development
continues. Here is a sample use case including an interactive session
and running a unit test:
$ lsb_release -d
Description: Ubuntu Xenial Xerus (development branch)
$ test/docker-test.sh --os-type centos --os-version 7 --shell
HEAD is now at 1caee81 autotools: add --enable-docker
bash-4.2$ pwd
/srv/ceph/ceph-centos-7
bash-4.2$ lsb_release -d
Description: CentOS Linux release 7.0.1406 (Core)
bash-4.2$
$ time test/docker-test.sh --os-type centos --os-version 7 unittest_str_map
HEAD is now at 1caee81 autotools: add --enable-docker
Running main() from gtest_main.cc
[==========] Running 2 tests from 1 test case.
[----------] Global test environment set-up.
[----------] 2 tests from str_map
[ RUN ] str_map.json
[ OK ] str_map.json (1 ms)
[ RUN ] str_map.plaintext
[ OK ] str_map.plaintext (0 ms)
[----------] 2 tests from str_map (1 ms total)
[----------] Global test environment tear-down
[==========] 2 tests from 1 test case ran. (1 ms total)
[ PASSED ] 2 tests.
real 0m3.759s
user 0m0.074s
sys 0m0.051s
The --all argument is a bash associative array literal listing the
operating system version for each operating system type. For instance
docker-test.sh --all '([ubuntu]="16.04 17.04" [centos]="7")'
is strictly equivalent to
docker-test.sh --os-type ubuntu --os-version 16.04
docker-test.sh --os-type ubuntu --os-version 17.04
docker-test.sh --os-type centos --os-version 7
The --os-type and --os-version must be exactly as displayed by docker images:
$ docker images
REPOSITORY TAG IMAGE ID ...
centos 7 87e5b6b3ccc1 ...
ubuntu 16.04 6b4e8a7373fe ...
The --os-type value can be any string in the REPOSITORY column, the --os-version
can be any string in the TAG column.
The --shell and --remove actions are mutually exclusive.
Run make check in centos 7
docker-test.sh --os-type centos --os-version 7 -- make check
Run make check on a giant
docker-test.sh --ref giant -- make check
Run an interactive shell and set resolv.conf to use 172.17.42.1
docker-test.sh --opts --dns=172.17.42.1 --shell
Run make check on centos 7, ubuntu 16.04 and ubuntu 17.04
docker-test.sh --all '([ubuntu]="16.04 17.04" [centos]="7")' -- make check
EOF
}
function main_docker() {
if ! docker ps > /dev/null 2>&1 ; then
echo "docker not available: $0"
return 0
fi
local temp
temp=$(getopt -o scht:v:o:a:r: --long remove-all,verbose,shell,help,os-type:,os-version:,opts:,all:,ref: -n $0 -- "$@") || return 1
eval set -- "$temp"
local os_type=ubuntu
local os_version=16.04
local all
local remove=false
local shell=false
local opts
local ref=$(git rev-parse HEAD)
while true ; do
case "$1" in
--remove-all)
remove=true
shift
;;
--verbose)
set -xe
PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
shift
;;
-s|--shell)
shell=true
shift
;;
-h|--help)
usage
return 0
;;
-t|--os-type)
os_type=$2
shift 2
;;
-v|--os-version)
os_version=$2
shift 2
;;
-o|--opts)
opts="$2"
shift 2
;;
-a|--all)
all="$2"
shift 2
;;
-r|--ref)
ref="$2"
shift 2
;;
--)
shift
break
;;
*)
echo "unexpected argument $1"
return 1
;;
esac
done
if test -z "$all" ; then
all="([$os_type]=\"$os_version\")"
fi
declare -A os_type2versions
eval os_type2versions="$all"
for os_type in ${!os_type2versions[@]} ; do
for os_version in ${os_type2versions[$os_type]} ; do
if $remove ; then
remove_all $os_type $os_version || return 1
elif $shell ; then
run_in_docker $os_type $os_version $ref "$opts" SHELL || return 1
else
run_in_docker $os_type $os_version $ref "$opts" "$@" || return 1
fi
done
done
}
| true
|
5ab367b32c4c4a73e01a146042bcf928127a9741
|
Shell
|
wh-forker/seeing_and_hearing
|
/scripts/train_baradel_audio.sh
|
UTF-8
| 967
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
export gpu_number="$1"
export classification_type="verb"
export root_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. >/dev/null && pwd )"
# Setting Nvidia GPU and Anancoda virtual environment path
conda activate ~/anaconda3/envs/epic_torch
export CUDA_VISIBLE_DEVICES=$gpu_number
export PYTHONPATH="$root_dir"
source "$root_dir"/scripts/commons.sh
cd "$root_dir"
if [ "$gpu_number" -eq "0" ]; then
conf_fname=conf/audio_baradel_args.json
else
conf_fname=conf/audio_baradel_args_"$gpu_number".json
fi
lr=$(cat $conf_fname | jq .lr)
arc=$(cat $conf_fname | jq .arc | tr -d '"')
log_fname="$arc"_baradel_`timestamp`.log
msg1="Training audio for $classification_type using $arc with learning rate $lr started on GPU $gpu_number"
msg2="Training audio for $classification_type using $arc with learning rate $lr finished on GPU $gpu_number"
python3 audio_main.py "$conf_fname" </dev/null 2>&1 | tee "$log_fname" &
msg_me "$!" "$msg1" "$msg2"
| true
|
dc0c987c40b970bf0322344342fd03c4cb00301a
|
Shell
|
siyue1226/alt-splice
|
/Mus_musculus/cq.Lung.sh
|
UTF-8
| 4,046
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
start_time=`date +%s`
### bash hints
### 2>&1 redirect stderr to stdout
### | tee -a log.log screen outputs also append to log file
### ; ( exit ${PIPESTATUS} ) correct program exitting status
### Only run parallel when you're sure that there are no errors.
cd /home/wangq/data/rna-seq/mouse_trans
### bowtie index
# bowtie2-build /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65
#----------------------------#
# cuffquant
#----------------------------#
# lane SRR453156
cd /home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453156/
echo "* Start cuffquant [Lung] [SRR453156] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453156/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453156/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Lung SRR453156 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Lung] [SRR453156] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# lane SRR453157
cd /home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453157/
echo "* Start cuffquant [Lung] [SRR453157] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453157/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453157/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Lung SRR453157 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Lung] [SRR453157] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# lane SRR453158
cd /home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453158/
echo "* Start cuffquant [Lung] [SRR453158] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453158/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453158/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Lung SRR453158 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Lung] [SRR453158] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# lane SRR453159
cd /home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453159/
echo "* Start cuffquant [Lung] [SRR453159] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
# cuffquant
/home/wangq/bin/cuffquant -p 8 \
--no-update-check -u -M /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.mask.gtf \
-b /home/wangq/data/rna-seq/mouse_trans/ref/mouse.65.fa \
/home/wangq/data/rna-seq/mouse_trans/process/merged_asm/merged.gtf \
-o /home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453159/cq_out \
/home/wangq/data/rna-seq/mouse_trans/process/Lung/SRR453159/th_out/accepted_hits.bam
[ $? -ne 0 ] && echo `date` Lung SRR453159 [cuffquant] failed >> /home/wangq/data/rna-seq/mouse_trans/fail.log && exit 255
echo "* End cuffquant [Lung] [SRR453159] `date`" | tee -a /home/wangq/data/rna-seq/mouse_trans/log/cuffquant.log
| true
|
98f81a3365d33c2a46c2eb54434738565ea53f0f
|
Shell
|
briancsparks/imstall
|
/lib/ubuntu/gh
|
UTF-8
| 745
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -e
script_dir="$( cd "$(dirname "$( readlink -f "${BASH_SOURCE[0]}" )" )" && pwd )"
. "${script_dir}/common/utils"
add_keyring 'https://cli.github.com/packages' 'githubcli-archive-keyring' 'https://cli.github.com/packages'
apt_get_install gh
# curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg \
# && sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg \
# && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null \
# && sudo apt update \
# && sudo apt install gh -y
| true
|
3c3bd2ee884f30624075e401a92f91323112d68b
|
Shell
|
heroku/cnb-shim
|
/bin/detect
|
UTF-8
| 272
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# fail hard
set -o pipefail
bp_dir="$(
cd "$(dirname "$0")/.."
pwd
)" # absolute path
target_dir="${bp_dir}/target"
"${target_dir}/bin/detect" "$(pwd)" >/dev/null 2>&1
EXITCODE=$?
case $EXITCODE in
1) exit 100 ;;
*) exit $EXITCODE ;;
esac
| true
|
fe70120691dc758fc489ba967713fc8fc6347e07
|
Shell
|
revanthreddy/builder-bootcamp-codepipeline-workshop
|
/upload-code-to-s3.sh
|
UTF-8
| 269
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ $# -ne 1 ]
then
echo 'upload_to_s3.sh <SOURCE_ARTIFACT_BUCKET_NAME>'
exit 1
fi
source_artifact_bucket_name=$1
zip_name=app.zip
cd app
zip -r $zip_name . -x *.git*
aws s3 cp $zip_name s3://$source_artifact_bucket_name
rm $zip_name
| true
|
366f6538b4a5c2bb25c697fb0095338628bbcecd
|
Shell
|
ypapax/do-k8s
|
/master.sh
|
UTF-8
| 1,391
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
export PS4='+(${BASH_SOURCE}:${LINENO}): ${FUNCNAME[0]:+${FUNCNAME[0]}(): } $HOSTNAME $ '
echo log is here: /var/log/cloud-init-output.log
export DEBIAN_FRONTEND=noninteractive
# Replace this with the token
TOKEN=xxxxxx.yyyyyyyyyyyyyyyy
apt-get update && apt-get upgrade -y
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF > /etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update -y
apt-get install -y docker.io
apt-get install -y --allow-unauthenticated kubelet kubeadm kubectl kubernetes-cni
export MASTER_IP=$(curl -s http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address)
kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address $MASTER_IP --token $TOKEN
HOME=/root
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
cp /etc/kubernetes/admin.conf $HOME/
chown $(id -u):$(id -g) $HOME/admin.conf
export KUBECONFIG=$HOME/admin.conf
set +e
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/k8s-manifests/kube-flannel-rbac.yml
set -e
# Install DigitalOcean monitoring agent
curl -sSL https://agent.digitalocean.com/install.sh | sh
| true
|
d30089a0d6474aa8e9bbac04f27d8921a46a70a4
|
Shell
|
IMAGINARY/lalalab-scripts
|
/bin/exhibit-harmonic-series
|
UTF-8
| 834
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This script is supposed to run Whitney music box exhibit.
# The wrapper script is supposed to be visible in ps -a resp. ps -u `id -u` to make
# it easy to kill it (by a script or manually)
PIDS=""
function finish {
kill $PIDS >/dev/null 2>&1
}
trap finish SIGINT SIGTERM EXIT
cd /opt/the-harmonic-series/lalalab
# Start the backend
npm start &
PIDS="$PIDS $!"
# Wait until the backend is likely to be ready
sleep 10 &
SLEEP_PID=$!
PIDS="$PIDS $SLEEP_PID"
wait $SLEEP_PID
# The nasty hack to wake up the socket
curl http://localhost:3000/socket.io/?transport=polling &
PIDS="$PIDS $!"
# Launch the exhibit in the background
kiosk-browser $KIOSK_OPTS --kiosk --fullscreen http://localhost:3000 &
PID_UI=$!
PIDS="$PIDS $PID_UI"
# Wait for the exhibit to finish or until the signal trap is triggered
wait $PID_UI
| true
|
72a03bd1de51b71a43ac24ae0a812ab16cc95880
|
Shell
|
garethgeorge/UCSBFaaS-Wrappers
|
/tools/timings/overheadMR.sh
|
UTF-8
| 4,035
| 3.28125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#! /bin/bash
if [ "$#" -ne 11 ]; then
echo "USAGE: ./overhead.sh aws_profile num_runs data_bucket_name prefix jobid C_job_bkt D_job_bkt F_job_bkt S_job_bkt T_job_bkt B_job_bkt"
exit 1
fi
PROF=$1
COUNT=$2
DATABKT=$3
PREFIX=$4
JOBID=$5 #must match reducerCoordinator "job_id" in config in setupApps.py for triggerBuckets
MRBKTC=$6 #must match reducerCoordinator "permission" in config in setupApps.py for triggerBuckets
MRBKTD=$7 #must match reducerCoordinator "permission" in config in setupApps.py for triggerBuckets
MRBKTF=$8 #must match reducerCoordinator "permission" in config in setupApps.py for triggerBuckets
MRBKTS=$9 #must match reducerCoordinator "permission" in config in setupApps.py for triggerBuckets
MRBKTT=${10} #must match reducerCoordinator "permission" in config in setupApps.py for triggerBuckets
MRBKTB=${11} #must match reducerCoordinator "permission" in config in setupApps.py for triggerBuckets
#0-base indexed via "${BKTLIST[2]}" (is F)
#must be in same order as SUFFIXES!!
BKTLIST=(
${MRBKTC} \
${MRBKTD} \
${MRBKTF} \
${MRBKTS} \
${MRBKTT} \
${MRBKTB} \
)
SUFFIXES=( C D F S T B )
#for testing or re-running, put the suffixes in here that you want to run
RUNTHESE=( B )
#update the below (must match lambda function names in configWestC.json
FMAP="/aws/lambda/mapper"
FMAP_NAME=mapper
FRED_NAME=reducer
FRED="/aws/lambda/reducer"
FRC="/aws/lambda/reducerCoordinator"
GRDIR=${PREFIX}/gammaRay
CWDIR=${PREFIX}/tools/cloudwatch
MRDIR=${GRDIR}/apps/map-reduce
TS=1401861965497 #some early date
#setup environment
cd ${GRDIR}
. ./venv/bin/activate
ITER=0
for suf in "${SUFFIXES[@]}"
do
#we have to do this to ensure that SUFFIXES stays in sync with BKTLIST
SKIP="donotrun"
for torun in "${RUNTHESE[@]}"
do
if [ "${suf}" = "${torun}" ]; then
SKIP="run"
break
fi
done
#echo ${BKTLIST[${ITER}]} ${FMAP}${suf} ${SKIP}
#Run it if we included it in RUNTHESE
if [ "${SKIP}" = "run" ]; then
MRBKT=${BKTLIST[${ITER}]}
MAP=${FMAP}${suf}
MAP_NAME=${FMAP_NAME}${suf}
RED=${FRED}${suf}
RED_NAME=${FRED_NAME}${suf}
RC=${FRC}${suf}
echo "Running experiment:" ${MRBKT} ${MAP} ${MAP_NAME} ${RED} ${RED_NAME} ${RC} ${JOBID} ${DATABKT} ${COUNT} times
for i in `seq 1 ${COUNT}`;
do
#clean out the s3 bucket we are about to use
aws s3 rm s3://${MRBKT}/${JOBID} --recursive --profile ${PROF}
#delete the logs
cd ${CWDIR}
python downloadLogs.py ${MAP} ${TS} -p ${PROF} --deleteOnly
python downloadLogs.py ${RED} ${TS} -p ${PROF} --deleteOnly
python downloadLogs.py ${RC} ${TS} -p ${PROF} --deleteOnly
#run job
cd ${MRDIR}
rm -f overhead.out
echo "Job: driver, " ${i} ${MRBKT} ${JOBID} ${MAP_NAME} ${RED_NAME} ${DATABKT} ${COUNT}
#use the driver
/usr/bin/time python driver.py ${MRBKT} ${JOBID} ${MAP_NAME} ${RED_NAME} --wait4reducers --databkt ${DATABKT} > overhead.out
mkdir -p ${i}/MRSYNC/${suf}
rm -f ${i}/MRSYNC/${suf}/overhead.out
mv overhead.out ${i}/MRSYNC/${suf}/
echo "sleeping 45secs..."
/bin/sleep 45 #seconds to wait for RC logs to commit
echo "downloading logs"
#download cloudwatch logs (and delete them)
cd ${CWDIR}
mkdir -p ${i}/MRSYNC/${suf}
rm -f ${i}/MRSYNC/${suf}/*.log
echo ${MAP} ${TS} ${PROF} ${i}/MRSYNC/${suf}
python downloadLogs.py ${MAP} ${TS} -p ${PROF} > ${i}/MRSYNC/${suf}/map.log
python downloadLogs.py ${RED} ${TS} -p ${PROF} > ${i}/MRSYNC/${suf}/red.log
python downloadLogs.py ${RC} ${TS} -p ${PROF} > ${i}/MRSYNC/${suf}/coord.log
echo done downloading logs...
done
fi
((ITER++)) #used to keep SUFFIXES and BKTLIST in sync
done
deactivate
| true
|
b8e39f02a469fa17112fb66c90e79e69b3c94277
|
Shell
|
alexantone/dme-socd2009
|
/make.sh
|
UTF-8
| 185
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
SRC=src/*.c
BUILD_DIR=build
[ -d $BUILD_DIR ] || mkdir BUILD_DIR
for fx in $SRC ; do
bfx=$(basename $fx)
gcc -g -o build/${bfx/.c/} -Isrc $fx -lrt src/common/*.c
done
| true
|
bed0723e78acf6668c80ed3c11e8aa66654ee82d
|
Shell
|
MaudGautier/detect-recombinants-in-F1
|
/src/config/slurm/04c_pass_second_genome.config
|
UTF-8
| 6,434
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Source project configuration file
config_file=$(pwd)/src/config/example-project.config
source $config_file
## Parameters
# Filters
FILT_DEPTH=100 # Minimum nb of reads supporting a variant
FILT_FREQ=0.64 # Relative variant frequency between 1-FILT_FREQ and FILT_FREQ
FILT_QUAL=20 # Minimum Phred-score of the sequenced nucleotide
FILT_ALLELES=2 # Minimum number of alleles of each parental haplotype carried by each fragment
FILT_NAME=FILT_depth${FILT_DEPTH}_freq${FILT_FREQ/0./}_qual${FILT_QUAL}
# Files
script_name_extract_recombinants=04b_extract_recombinants.bash
script_name_genotype_reads=04a_genotype_reads.bash
genome=$CAST_GENOME
intervals=$CAST_INTERVALS
genome_name="CAST" # Reversed as compared to pass on the first genome
alt_genome_name="B6" # Reversed as compared to pass on the first genome
vcf_file=${SNPCALL}/2_castaneus/Joint_variant_calling.raw_variants.vcf
freq_vcf_file=${vcf_file/.vcf/.freq_vcf.txt}
# Folders
genotyping_folder=${CAST_GENOT} # Folder genotyping Genome 1
recombinants_folder=${CAST_RECOMB} # Folder recombinants Genome 1
input_folder=${B6_RECOMB}
mapping_folder=$MAPPING/2_castaneus
mapping_suffix='sorted.markedDup.only_mapped_fragments.only_primary'
candidates_folder=$genotyping_folder/candidates_ID
tmp_folder=$RECOMBINANTS/tmp_CAST
# Create folders
if [ ! -d $candidates_folder ] ; then mkdir $candidates_folder ; fi
if [ ! -d $tmp_folder/ ] ; then mkdir -p $tmp_folder/ ; fi
## After recombinants have been identified in each subset (see step
## 04b_extract_recombinants), the remaining list of potential candidates is
## greatly reduced. As such, all the findings in all the subsets can be
## regrouped in only one file for each sample.
for sample in ${LIST_SAMPLES[@]} ; do
echo $sample
# Regroup all Recombinants.Min_allele
output_file=${input_folder}/${sample}.${FILT_NAME}.Recombinants.Min_allele_${FILT_ALLELES}.txt
zcat $input_folder/${sample}/${sample}.Subset_00.${FILT_NAME}.Recombinants.Min_allele_${FILT_ALLELES}.txt.gz | head -n1 > $output_file
for subset_file in $input_folder/${sample}/${sample}.*.${FILT_NAME}.Recombinants.Min_allele_${FILT_ALLELES}.txt.gz ; do
zcat $subset_file | sed '1,1d' >> $output_file
done
# Regroup all sorted.read_ends.txt
output_file=${input_folder}/${sample}.${FILT_NAME}.Fragments.sorted.read_ends.txt
zcat $input_folder/${sample}/${sample}.Subset_00.${FILT_NAME}.Fragments.sorted.read_ends.txt.gz | head -n1 > $output_file
for subset_file in $input_folder/${sample}/${sample}.*.${FILT_NAME}.Fragments.sorted.read_ends.txt.gz ; do
zcat $subset_file | sed '1,1d'>> $output_file
done
done
## Prepare freq vcf file
bash $PROJ_SRC/utils/prepare_freq_vcf_file.bash -i $vcf_file -o $freq_vcf_file
## Run on all samples
for sample in ${LIST_SAMPLES[@]} ; do
## PREPARE BED FILES
paired_bed=$mapping_folder/${sample}.${mapping_suffix}.paired_bed
paired_bam=$mapping_folder/${sample}.${mapping_suffix}.bam
if [ ! -f $paired_bed ] ; then
bash $PROJ_SRC/utils/prepare_paired_bed.bash -i $paired_bam -o $paired_bed
fi
## SUBSET INPUT FILES TO CANDIDATES
# Extract the IDs of the potential recombinants (candidates identified with
# processing on the first parental genome — see config files 04a and 04b)
list_candidates=$candidates_folder/${sample}.list_candidates.Min_allele_${FILT_ALLELES}.txt
sed '1,1d' ${input_folder}/${sample}.${FILT_NAME}.Recombinants.Min_allele_${FILT_ALLELES}.txt | cut -f1 > $list_candidates
# Extract the corresponding lines from the original bam file
input_bam_file=$SNPCALL/2_castaneus/${sample}.sorted.markedDup.only_mapped_fragments.only_primary.onIntervals.recal_reads.bam
subset_bam_file=$candidates_folder/${sample}.sorted.markedDup.only_mapped_fragments.only_primary.onIntervals.Candidates_Recombinants_Min_allele_${FILT_ALLELES}.bam
java -jar $PICARD/FilterSamReads.jar \
I=$input_bam_file \
O=$subset_bam_file \
FILTER=includeReadList \
WRITE_READS_FILES=FALSE \
READ_LIST_FILE=$list_candidates
## WRITE SUBMISSION FILE
SUB_file=$PROJ_SUB/04c_pass_second_genome.${sample}.sub
LOG_file=$PROJ_LOG/04c_pass_second_genome.${sample}
echo "#!/bin/bash" > $SUB_file
echo "#SBATCH --job-name=${sample}" >> $SUB_file
echo "#SBATCH --partition=normal" >> $SUB_file
echo "#SBATCH --time=3:00:00" >> $SUB_file
echo "#SBATCH --cpus-per-task=1" >> $SUB_file
echo "#SBATCH --mem=10G" >> $SUB_file
echo "#SBATCH --output=${LOG_file}.out" >> $SUB_file
echo "#SBATCH --error=${LOG_file}.err" >> $SUB_file
## GENOTYPE CANDIDATES ON THE SECOND GENOME
output_genotyping_prefix=$genotyping_folder/${sample}
tmp_prefix=$tmp_folder/${sample}
echo bash $PROJ_SRC_CORE/$script_name_genotype_reads -c $config_file -o $output_genotyping_prefix -t $tmp_prefix -i $subset_bam_file -f $freq_vcf_file -b $intervals -g $genome -r $genome_name -a $alt_genome_name --sub $SUB_file >> $SUB_file
## EXTRACT RECOMBINANTS ON THE SECOND GENOME
output_recombinants_prefix=$recombinants_folder/${sample}
echo bash $PROJ_SRC_CORE/$script_name_extract_recombinants -c $config_file -o $output_recombinants_prefix -i ${output_genotyping_prefix}.Genotyped_ReadsxVariants.txt -b $paired_bed -d $FILT_DEPTH -f $FILT_FREQ -q $FILT_QUAL -m $FILT_ALLELES -n $FILT_NAME -r $genome_name -a $alt_genome_name --sub $SUB_file >> $SUB_file
## SUBMIT JOB
sbatch $SUB_file
done
## (Optional): if we want to have the final recombinants with coordinates from the first genome
final_recombinants_folder=$FINAL_RECOMBINANTS
folder_ID_recombinants=${final_recombinants_folder}/Folder_ID
if [ ! -d $folder_ID_recombinants ] ; then mkdir $folder_ID_recombinants ; fi
for sample in ${LIST_SAMPLES[@]} ; do
# Extract IDs
list_IDs=$folder_ID_recombinants/${sample}.${FILT_NAME}.Recombinants.Min_allele_${FILT_ALLELES}.IDs.txt
zcat $recombinants_folder/${sample}.${FILT_NAME}.Recombinants.Min_allele_${FILT_ALLELES}.txt.gz | cut -f1 > $list_IDs
# Extract lines from recombinants on first genome
grep -f $list_IDs \
${input_folder}/${sample}.${FILT_NAME}.Recombinants.Min_allele_${FILT_ALLELES}.txt \
| awk -v SAMPLE=$sample -v OFS="\t" '
NR==1 {
$(NF-2)="CHR:START-STOP"
$NF=""
$(NF-1)="SAMPLE_FILE"
print $0
} NR > 1 {
$(NF-2)=$(NF-2)":"$(NF-1)"-"$NF
$NF=""
$(NF-1)=SAMPLE
print $0
}' - \
> $final_recombinants_folder/${sample}.${FILT_NAME}.Recombinants.Min_allele_${FILT_ALLELES}.coordinates_genome1.txt
done
| true
|
f7f6ef0b63c40250de490f80f9dc7c6ba361744b
|
Shell
|
wsl222000/test
|
/findservices.sh
|
UTF-8
| 18,850
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
. /etc/.profile
. $CCRUN/bin/shellfuns.sh
declare -a HOSTS=( $SYSLIST )
declare REQ_UID=$(perl -e "print scalar getpwuid ((stat '$CCSYSDIR/CHKDAEMONS.cf')[4])")
declare BLATHER=false
declare NOT_DEAD_YET=false
declare NCRONTAB
function usage()
{
(( $# > 0 )) && return 1
echo -e "\nusage: $(basename $0) [-s service] [-b by_key] [-v] [-2] [host ...]
If hosts given, report only services on those hosts
(otherwise all hosts in \$SYSLIST).
-s only reports on the given service, which may be one of:\n"
local S
for S in "${SERVICES[@]}" ; do
echo "$S"
done | column -c 60 | expand | perl -p -e 'print " "x7'
echo -e "
Matching is case-insensitive. Prefix matching is done, first
match found is used (ex: given just 'hl', will match HL7inbound).
-b sorts by given key: 'service' or 'host' (default is host).
-v creates verbose output with more detailed, unobscured data values.
-2 double-spaces the output lines, for easier reading.
Requires user id '$REQ_UID' to run beyond printing this help.\n"
return 0
}
function barf()
{
echo "!!! ${FUNCNAME[1]}(${BASH_LINENO[0]}) $*" >&2
exit 1
}
function blather()
{
[[ $BLATHER == 'true' ]] && echo "+ ${FUNCNAME[1]}(${BASH_LINENO[0]}) $*" >&2
return 0
}
function field()
{
(( $# != 1 )) && return 1
awk "{print \$$1}"
return $?
}
function runOn()
{
(( $# < 2 )) && return 1
local H=$1 ; shift
blather "$H, $*"
$NOT_DEAD_YET && echo -n '. ' >&2
if [[ $H == $HOST ]] ; then
eval $*
else
ssh $H ". /etc/.profile ; $*"
fi
return $?
}
function service()
{
(( $# > 0 )) && return 1
local -i I=1
while [[ ${FUNCNAME[$I]} != 'main' &&
${FUNCNAME[$I]##*goFind_} == ${FUNCNAME[$I]} ]] ; do
(( I += 1 ))
done
[[ ${FUNCNAME[$I]} != 'main' ]] && echo "${FUNCNAME[I]##*goFind_}"
return 0
}
function found()
{
(( $# < 1 )) && return 1
local H="$1" ; shift
echo -n "$H $(service)"
local STUFF="$*"
if (( $# > 0 )) ; then
! $VERBOSE && STUFF="$(perl -p -e 's/\b\d{1,3}\.\d{1,3}\.\d{1,3}\.(\d{1,3})\b/x.x.x.$1/g' <<< "$STUFF")"
echo -n ": $STUFF"
fi
echo
return 0
}
function printNumItemsOnHosts()
{
blather "item_hosts $*"
(( $# < 1 )) && return 0
for H in "${HOSTS[@]}" ; do
local -i NUM_ITEMS_ON_H=0
for IH in "$@" ; do
[[ $IH == $H ]] && (( NUM_ITEMS_ON_H += 1 ))
done
blather "$H num_items $NUM_ITEMS_ON_H"
(( $NUM_ITEMS_ON_H > 0 )) && found "$H" "$NUM_ITEMS_ON_H"
done
return 0
}
function getNcrontab()
{
(( $# > 0 )) && return 1
if [[ -z $NCRONTAB ]] ; then
[[ -r "$CCSYSDIR/ncrontab" ]] || return 2
local PRIMARY_HOST
while read LINE ; do
case ${LINE%%[[:blank:]]*} in
\* )
for H in "${HOSTS[@]}" ; do
NCRONTAB+="${LINE/#\*/$H}"$'\n'
done
;;
\. )
if [[ -z $PRIMARY_HOST ]] ; then
for H in "${HOSTS[@]}" ; do
netping $H >& /dev/null && PRIMARY_HOST="$H" && break
done
fi
[[ -n $PRIMARY_HOST ]] && NCRONTAB+="${LINE/#\./$PRIMARY_HOST}"$'\n'
;;
* )
NCRONTAB+="$LINE"$'\n'
;;
esac
done < <(grep -v -P '^\s*(\#.*)?$' "$CCSYSDIR/ncrontab")
blather 'ncrontab ' $(wc <<< "$NCRONTAB")
fi
return 0
}
function goFind_BHIE()
{
(( $# > 0 )) && return 1
getNcrontab
local H
for H in "${HOSTS[@]}" ; do
local BH
for BH in $(grep -F 'notesExpDisch' <<< "$NCRONTAB" |
field 1) ; do
blather "bh '$BH', h '$H'"
if [[ $BH == $H ]] && runOn "$H" "ps -C notesExpd -o command= |
grep -F -w -q -- '$CAMPUS'" ; then
found "$H"
break
fi
done
done
return 0
}
function goFind_ConfigTool()
{
(( $# > 0 )) && return 1
if [[ -r $CCSYSDIR/ConfigTool.rcf ]] ; then
local CFT_PRIM_HOST=$(gethostresource ConfigTool.PrimaryHost "$CCSYSDIR/ConfigTool.rcf")
blather "cft_prim_host='$CFT_PRIM_HOST'"
local H
for H in "${HOSTS[@]}" ; do
[[ $H == $CFT_PRIM_HOST ]] && found "$H" && break
done
else
found "$HOST" "not configured"
fi
return 0
}
function goFind_DietReports()
{
(( $# > 0 )) && return 1
getNcrontab
printNumItemsOnHosts $(grep -E -w 'diet.*\.scm' <<< "$NCRONTAB" |
field 1)
return 0
}
function goFind_FMRDs()
{
(( $# > 0 )) && return 1
local -a STRINGS=( $(find "$CCRUN/ccexe" \
-name .xsession \
-exec grep -l fmDisplay \{\} \+ |
perl -lne 'm|^'"$CCRUN/ccexe"'/([^/]+)| && print $1' |
sort -u) )
blather "${#STRINGS[*]} strings: ${STRINGS[*]}"
local REGEX=$(IFS='|' ; echo "${STRINGS[*]}")
blather "regex '$REGEX'"
local -a FMRDS
if [[ -n $REGEX ]] ; then
local DEVICE
for DEVICE in $(grep '^fetal' "$EERUN/conf/CCIconfig.bcf" 2> /dev/null |
field 1) ; do
local LINE=$(grep -v '^#' "$EERUN/daemons/boottab" 2> /dev/null |
grep -P '^'"$DEVICE"'\s')
blather "device '$DEVICE', line '$LINE'"
grep -q -P '\s('"$REGEX"')\s' <<< "$LINE" && FMRDS+=( "$DEVICE" )
blather "${#FMRDS[*]} fmrds: ${FMRDS[*]}"
done
fi
if (( ${#FMRDS[*]} > 0 )) ; then
REGEX=$(IFS='|' ; echo "${FMRDS[*]}")
blather "regex '$REGEX'"
printNumItemsOnHosts $(term_query |
grep -E '^('"$REGEX"')[[:space:]]' |
field 2)
fi
return 0
}
function goFind_GDRfeed()
{
(( $# > 0 )) && return 1
local -a FEEDS=( $(gethostresource transDumpOra.gdrhosts "$CCSYSDIR/ccResources") )
blather "${#FEEDS[*]} feeds: ${FEEDS[*]}"
local H
for H in "${HOSTS[@]}" ; do
local F
for F in "${FEEDS[@]}" ; do
[[ $H == $F ]] && found "$H" && break
done
done
return 0
}
function goFind_GDRreports()
{
(( $# > 0 )) && return 1
getNcrontab
printNumItemsOnHosts $(grep -E '\bcreateRep.*\b[Gg][Dd][Rr]' <<< "$NCRONTAB" |
field 1)
return 0
}
function goFind_GDRsHosted()
{
(( $# > 0 )) && return 1
local -a GDR_HOSTS=( $(grep -i -E '^gdr[[:digit:]]*\.dbstring' "$CCSYSDIR/ccResources" |
cut -d\: -f2) )
blather "${#GDR_HOSTS[*]} gdr_hosts: ${GDR_HOSTS[*]}"
if (( ${#GDR_HOSTS[*]} > 0 )) ; then
local REGEX=$(IFS='|' ; echo "${GDR_HOSTS[*]}")
blather "regex '$REGEX'"
local H
for H in "${HOSTS[@]}" ; do
local -a HOSTED=( $(runOn "$H" 'ifconfig -a' |
perl -n -e 'm/^\w+\:('"$REGEX"')\s/ and print "$1\n"') )
blather "h '$H', ${#HOSTED[*]} hosted: ${HOSTED[*]}"
(( ${#HOSTED[*]} > 0 )) && found "$H" "${HOSTED[*]}"
done
fi
return 0
}
function goFind_HL7inbound()
{
(( $# > 0 )) && return 1
local ALIAS_LINE=$(grep '\b'"${CAMPUS}"'-hl7\b' /etc/hosts 2> /dev/null |
grep -v '^\#')
blather "alias_line '$ALIAS_LINE'"
local ALIAS=$(field 2 <<< "$ALIAS_LINE")
local ALIAS_IP=$(field 1 <<< "$ALIAS_LINE")
blather "alias '$ALIAS', alias_ip '$ALIAS_IP'"
if [[ -n $ALIAS && -n $ALIAS_IP ]] ; then
local H
for H in "${HOSTS[@]}" ; do
if runOn "$H" "ifconfig -a" |
grep -E -q 'addr: ?'"$ALIAS_IP"' ' ; then
found "$H" "Network alias $ALIAS ($ALIAS_IP) active"
fi
done
fi
local LOGS_FILTER="grep -E -v 'ordersin\$'"
$VERBOSE && LOGS_FILTER='cat'
local LOG
for LOG in $(ps -fu cis |
perl -ne 'm|\bhl7server\s.*?(\S*hl7log\S*'"$CAMPUS"'\S*)| and print "$1\n"' |
eval $LOGS_FILTER |
sort -u) ; do
blather "log '$LOG'"
local -i HL7_LAST_UPDATE=0
local HL7_LAST_UPDATE_HOST=''
local H
for H in "${HOSTS[@]}" ; do
local -i UPDATE=$(runOn "$H" "stat \"$LOG\" 2>/dev/null" |
grep -F 'st_mtime' |
field 2)
blather "h '$H', update '$UPDATE', hl7_last_update '$HL7_LAST_UPDATE'"
if (( $UPDATE > $HL7_LAST_UPDATE )) ; then
(( HL7_LAST_UPDATE = $UPDATE ))
HL7_LAST_UPDATE_HOST=$H
blather "h '$H', hl7_last_update_host '$HL7_LAST_UPDATE_HOST'"
fi
done
[[ -z $HL7_LAST_UPDATE_HOST ]] && continue
local -i HL7_SIZE=$(runOn "$HL7_LAST_UPDATE_HOST" "stat \"$LOG\" 2>/dev/null" |
grep -F 'st_size' |
field 2)
blather "hl7_size '$HL7_SIZE'"
(( $HL7_SIZE <= 0 )) && continue
local RESULT
local DELIM=$(runOn "$HL7_LAST_UPDATE_HOST" "grep 'HL7 data' \"$LOG\" 2> /dev/null |
tail -1" |
cut -c31)
blather "delim '$DELIM'"
if [[ -n $DELIM ]] ; then
local -a APPS=($(runOn "$HL7_LAST_UPDATE_HOST" "grep '\[MSH' \"$LOG\" 2> /dev/null" |
cut -d"$DELIM" -f3 |
sort -u) )
blather "apps '$APPS'"
if (( ${#APPS[@]} > 0 )) ; then
$VERBOSE && RESULT="${APPS[*]} in "
else
RESULT='no source detected in '
fi
else
RESULT='no msgs detected in '
fi
blather "result '$RESULT'"
! $VERBOSE && LOG="${LOG##*\.}"
blather "log '$LOG'"
found "$HL7_LAST_UPDATE_HOST" "$RESULT$LOG ($(ccitime $HL7_LAST_UPDATE))"
done
return 0
}
function goFind_HL7outbound()
{
(( $# > 0 )) && return 1
local H
for H in "${HOSTS[@]}" ; do
if runOn "$H" 'ps -fu cis |
grep -v "grep" |
grep -F -- "'"$CAMPUS"'" |
grep -F -q TransShell' ; then
local -a OUTERS=( $(runOn "$H" 'F="$CCSYSDIR/outbound/conf/outbound.'"$H"'.rcf" ;
[[ -r $F ]] || F="$CCSYSDIR/outbound/conf/outbound.rcf" ;
grep -i "^remoteserver[1-9].servername" "$F" 2> /dev/null |
grep -v \^\#' |
cut -d\: -f2) )
blather "${#OUTERS[*]} outers: ${OUTERS[*]}"
if (( ${#OUTERS[*]} > 0 )) ; then
if ! $VERBOSE ; then
local -i I
for (( I = 0 ; $I < "${#OUTERS[@]}" ; I += 1 )) ; do
blather "i '$I', outer[$I] '${OUTERS[$I]}'"
[[ ${OUTERS[$I]} == $H ]] && unset OUTERS[$I]
done
fi
blather "num outers '${#OUTERS[*]}'"
(( ${#OUTERS[*]} > 0 )) && found "$H" ${OUTERS[*]}
fi
fi
done
return 0
}
function goFind_HostInfo()
{
(( $# > 0 )) && return 1
local H
for H in "${HOSTS[@]}" ; do
found "$H" $(runOn "$H" "echo \$(version -v) \$(uname -sr) \$(sed -e 's/Red Hat Enterprise Linux Server release/RHEL/' '/etc/redhat-release' 2> /dev/null)")
done
return 0
}
function goFind_Notification()
{
(( $# > 0 )) && return 1
[[ -r "$CCSYSDIR/notification.rcf" ]] || return 0
local ENABLED=$(gethostresource notification.enable "$CCSYSDIR/notification.rcf")
blather "enabled '$ENABLED'"
if [[ $ENABLED == [TtYy1]* ]] ; then
local WORK_HOST=$(gethostresource notification.workhost "$CCSYSDIR/notification.rcf")
blather "work_host '$WORK_HOST'"
local H
for H in "${HOSTS[@]}" ; do
[[ $H == $WORK_HOST ]] && found "$H" 'workhost'
done
fi
return 0
}
function goFind_OnWatch()
{
(( $# > 0 )) && return 1
if [[ -r "$CCSYSDIR/OnWatch.rcf" ]] ; then
local MAIN_HOST_ID=$(grep -E -i '^OnWatch.mainurl[[:space:]]*:' "$CCSYSDIR/OnWatch.rcf" 2> /dev/null |
tail -1 |
sed 's#.*//##')
local MAIN_HOST_NAME=$(grep -F -w -- "$MAIN_HOST_ID" /etc/hosts 2> /dev/null |
grep -E -v '^[[:space:]]*\#' |
field 2)
blather "main_host_id '$MAIN_HOST_ID', main_host_name '$MAIN_HOST_NAME'"
local ENABLED=$(gethostresource OnWatch.useonwatch2 "$CCSYSDIR/OnWatch.rcf")
blather "enabled '$ENABLED'"
local WORK_HOST
if [[ $ENABLED == [TtYy1]* ]] ; then
WORK_HOST=$(gethostresource OnWatch.WorkHost "$CCSYSDIR/OnWatch.rcf")
fi
blather "work_host '$WORK_HOST'"
local H
for H in "${HOSTS[@]}" ; do
[[ $H == $MAIN_HOST_NAME ]] && found "$H" 'main'
[[ $H == $WORK_HOST ]] && found "$H" 'workhost'
done
fi
return 0
}
function goFind_PatTaskList()
{
(( $# > 0 )) && return 1
[[ -r "$CCSYSDIR/PatTaskList.rcf" ]] || return 0
local DISABLED=$(gethostresource PTL.disable "$CCSYSDIR/PatTaskList.rcf")
blather "disabled '$DISABLED'"
if [[ $DISABLED != [TtYy1]* ]] ; then
local WORK_HOST=$(gethostresource PTL.workhost "$CCSYSDIR/PatTaskList.rcf")
blather "work_host '$WORK_HOST'"
local H
for H in "${HOSTS[@]}" ; do
[[ $H == $WORK_HOST ]] && found "$H" 'workhost'
done
fi
return 0
}
function goFind_UserLoad()
{
(( $# > 0 )) && return 1
local H
for H in "${HOSTS[@]}" ; do
local ACT=$(runOn "$H" 'activeterminals 5')
blather "h '$H', act '$ACT'"
local -i USERS=0
[[ $ACT != *No\ activity* ]] && (( USERS = $(wc -l <<< "$ACT") ))
found "$H" "$USERS"
done
return 0
}
function goFind_Vhosts()
{
(( $# > 0 )) && return 1
local H
for H in "${HOSTS[@]}" ; do
local -a VHOSTS_ON_H=( $(runOn "$H" 'ps -C qemu-kvm >& /dev/null &&
virsh -r -c qemu:///system list --all' |
grep -w 'running$' |
field 2) )
blather "h '$H', ${#VHOSTS_ON_H[*]} vhosts_on_h: ${VHOSTS_ON_H[*]}"
(( ${#VHOSTS_ON_H[*]} > 0 )) && found "$H" "KVM Guests: ${VHOSTS_ON_H[*]}"
done
return 0
}
function goFind_VistaImage()
{
(( $# > 0 )) && return 1
if [[ -r "$CCSYSDIR/OutboundReport.rcf" &&
$(gethostresource VistA.SendToImaging "$CCSYSDIR/OutboundReport.rcf") == [TtYy1]* ]] ; then
local H
getNcrontab
local PRIMARY_HOST=$(grep -F -w 'OutboundRep' <<< "$NCRONTAB" |
grep -F -w -v 'PRINT_CHART' |
head -1 |
field 1)
blather "primary_host '$PRIMARY_HOST'"
if [[ -n $PRIMARY_HOST ]] ; then
for H in "${HOSTS[@]}" ; do
[[ $H == $PRIMARY_HOST ]] && found "$H" 'primary host' && break
done
fi
local HL7_IP=$(grep -F -w -- "${CAMPUS}-hl7" /etc/hosts 2> /dev/null |
grep -E -v '^[[:space:]]*\#' |
field 1)
blather "hl7_ip '$HL7_IP'"
for H in "${HOSTS[@]}" ; do
runOn "$H" 'netstat -n --numeric-ports -l -p 2>/dev/null |
grep -F -- ":445"' |
grep -q -- "$HL7_IP" && found "$H" 'SMB'
done
if $VERBOSE ; then
for H in "${HOSTS[@]}" ; do
local CONNS=$(runOn "$H" 'smbstatus -S 2>/dev/null' |
perl -a -n -e 'push @a, "(service $F[0] machine $F[2])"
if $a and $F[0] eq "'"$CAMPUS"'";
m/------/ and $a=1;
END {print join " ", @a}')
blather "h '$H', conns '$CONNS'"
[[ -n $CONNS ]] && found "$H" "SMB connections: $CONNS"
done
fi
fi
return 0
}
declare -a SERVICES=( $(typeset -F |
perl -a -n -e '$F[2] =~ m/^goFind_(.*)/ and print "$1\n"' |
sort) )
declare SERVICE
declare -a SORT_KEYS=( '--key=1,1' '--key=2,2' )
declare SPACER=cat
declare VERBOSE=false
while getopts "2b:ds:vh" OPTION
do
case $OPTION in
2) SPACER='sed G'
;;
b) case $OPTARG in
host) SORT_KEYS=( '--key=1,1' '--key=2,2' ) ;;
service) SORT_KEYS=( '--key=2,2' '--key=1,1' ) ;;
*) echo -e '\nsort key must be one of: host service' >&2
usage >&2
exit 1
;;
esac
;;
d) BLATHER=true
;;
s) SERVICE="$OPTARG"
;;
v) VERBOSE=true
;;
h) usage
exit 0
;;
\?) usage >&2
exit 1
;;
esac
done
shift $(( $OPTIND-1 ));
(( $# )) && HOSTS=( "$@" )
blather "hosts: ${HOSTS[*]}"
! $BLATHER && [[ -z $SERVICE && -t 2 ]] && NOT_DEAD_YET=true
if [[ $(whoami) != $REQ_UID ]] ; then
echo "Must have user id '$REQ_UID'." >&2
exit 1
fi
blather "service: '$SERVICE'"
if [[ -n $SERVICE ]] ; then
declare LOW_SERVICE=$(tr '[:upper:]' '[:lower:]' <<< "$SERVICE")
declare S
for S in "${SERVICES[@]}" ; do
declare LOW_S=$(tr '[:upper:]' '[:lower:]' <<< "$S")
if [[ $LOW_S == $LOW_SERVICE* ]] ; then
blather "doing goFind_$S on ${HOSTS[*]}"
goFind_$S || barf "bad result $? from goFind_$S"
unset SERVICE
break
fi
done
if [[ -n $SERVICE ]] ; then
echo -e "\nNo such service as '$SERVICE'\n" >&2
usage >&2
exit 1
fi
echo -e "\n" >&2
else
declare S
for S in "${SERVICES[@]}" ; do
$NOT_DEAD_YET && echo -n '. ' >&2
blather "doing goFind_$S on ${HOSTS[*]}"
goFind_$S || barf "bad result $? from goFind_$S"
done
echo -e "\n" >&2
fi | sort --ignore-leading-blanks --stable "${SORT_KEYS[@]}" | $SPACER
exit 0
| true
|
7db6b03736068b0ee43c45d933765723f76ad270
|
Shell
|
renegeizu/UGR-SS
|
/Practica_02/Script.sh
|
UTF-8
| 11,088
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
# uso: ./Script.sh -eficiencia
# ejemplo: ./Script.sh -O2
# Colores para los mensajes por terminal
nocolor='\033[0m'
red='\033[0;31m'
green='\033[0;32m'
orange='\033[0;33m'
blue='\033[0;34m'
purple='\033[0;35m'
gray='\033[1;30m'
yellow='\033[1;33m'
# Pedimos por parametro la eficiencia de compilacion
if [ -z "$1" ]
then
echo -e "${red}Faltan el parametro de eficiencia en la compilacion${nocolor}"
else
# Guardamos los directorios necesarios
codigos=$(pwd)/Codigos
ejecutables=$(pwd)/Ejecutables
datos=$(pwd)/Datos
graficas=$(pwd)/Graficas
# Crear directorios necesarios
if [ -d $ejecutables ];
then
echo -e "${blue}Directorio 'Ejecutables' listo${nocolor}"
rm -Rf $ejecutables/*
else
mkdir $ejecutables
echo -e "${blue}Directorio 'Ejecutables' creado${nocolor}"
fi
if [ -d $datos ];
then
echo -e "${blue}Directorio 'Datos' listo${nocolor}"
rm -Rf $datos/*
mkdir $datos/V1
echo -e "${blue}Directorio 'Datos/V1' listo${nocolor}"
mkdir $datos/V2
echo -e "${blue}Directorio 'Datos/V2' listo${nocolor}"
mkdir $datos/V2_1
echo -e "${blue}Directorio 'Datos/V2_1' creado${nocolor}"
mkdir $datos/V3
echo -e "${blue}Directorio 'Datos/V3' listo${nocolor}"
mkdir $datos/V4
echo -e "${blue}Directorio 'Datos/V4' listo${nocolor}"
mkdir $datos/V5
echo -e "${blue}Directorio 'Datos/V5' listo${nocolor}"
mkdir $datos/V6
echo -e "${blue}Directorio 'Datos/V6' listo${nocolor}"
else
mkdir $datos
echo -e "${blue}Directorio 'Datos' creado${nocolor}"
mkdir $datos/V1
echo -e "${blue}Directorio 'Datos/V1' creado${nocolor}"
mkdir $datos/V2
echo -e "${blue}Directorio 'Datos/V2' creado${nocolor}"
mkdir $datos/V2_1
echo -e "${blue}Directorio 'Datos/V2_1' creado${nocolor}"
mkdir $datos/V3
echo -e "${blue}Directorio 'Datos/V3' listo${nocolor}"
mkdir $datos/V4
echo -e "${blue}Directorio 'Datos/V4' listo${nocolor}"
mkdir $datos/V5
echo -e "${blue}Directorio 'Datos/V5' listo${nocolor}"
mkdir $datos/V6
echo -e "${blue}Directorio 'Datos/V6' listo${nocolor}"
fi
echo -e "${orange}Compilando codigos...${nocolor}"
# Compilamos todos los .cpp y .C
g++ -std=c++11 $1 $codigos/montecarlo_v1.cpp -o $ejecutables/MonteCarlo_V1
g++ -std=c++11 $1 $codigos/montecarlo_v2.cpp -o $ejecutables/MonteCarlo_V2
g++ -std=c++11 $1 $codigos/montecarlo_v2_1.cpp -o $ejecutables/MonteCarlo_V2_1
g++ -std=c++11 $1 $codigos/montecarlo_v3.cpp -o $ejecutables/MonteCarlo_V3
g++ -std=c++11 $1 $codigos/montecarlo_v3_1.cpp -o $ejecutables/MonteCarlo_V3_1
g++ -std=c++11 $1 $codigos/montecarlo_v4.cpp -o $ejecutables/MonteCarlo_V4
g++ -std=c++11 $1 $codigos/montecarlo_v5.cpp -o $ejecutables/MonteCarlo_V5
g++ -std=c++11 $1 $codigos/montecarlo_v6.cpp -o $ejecutables/MonteCarlo_V6
echo -e "${orange}Fin de la compilacion${nocolor}"
# Le pasamos valores al modelo de MonteCarlo y recogemos la informacion en .dat
echo -e "${purple}Obteniendo datos del modelo de MonteCarlo V1, V2 y V2.1...${nocolor}"
numSimulaciones=(100 1000 5000 10000 100000)
for A in "${numSimulaciones[@]}"
do
for ((B=1;B<4;B=B+1))
do
$ejecutables/MonteCarlo_V1 10 1 $A $B >> $datos/V1/MonteCarlo_V1_X10Y1_$A-$B.dat
$ejecutables/MonteCarlo_V1 10 5 $A $B >> $datos/V1/MonteCarlo_V1_X10Y5_$A-$B.dat
$ejecutables/MonteCarlo_V1 10 10 $A $B >> $datos/V1/MonteCarlo_V1_X10Y10_$A-$B.dat
$ejecutables/MonteCarlo_V2 10 1 $A $B >> $datos/V2/MonteCarlo_V2_X10Z1_$A-$B.dat
$ejecutables/MonteCarlo_V2 10 5 $A $B >> $datos/V2/MonteCarlo_V2_X10Z5_$A-$B.dat
$ejecutables/MonteCarlo_V2 10 10 $A $B >> $datos/V2/MonteCarlo_V2_X10Z10_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 1 1 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y1Z1_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 5 1 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y5Z1_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 10 1 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y10Z1_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 1 5 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y1Z5_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 5 5 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y5Z5_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 10 5 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y10Z5_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 1 10 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y1Z10_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 5 10 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y5Z10_$A-$B.dat
$ejecutables/MonteCarlo_V2_1 10 10 10 $A $B >> $datos/V2_1/MonteCarlo_V2_1_X10Y10Z10_$A-$B.dat
done
done
echo -e "${purple}Finalizado${nocolor}"
# Le pasamos valores al modelo de MonteCarlo y recogemos la informacion en .dat
echo -e "${purple}Obteniendo datos del modelo de MonteCarlo V3, V3.1, V4 y V5...${nocolor}"
$ejecutables/MonteCarlo_V3 1000000 100 >> $datos/V3/ComparacionTiempos_V3.dat
$ejecutables/MonteCarlo_V3_1 1000000 100 >> $datos/V3/ComparacionTiempos_V3_1.dat
$ejecutables/MonteCarlo_V4 1000000 100 >> $datos/V4/ComparacionTiempos_V4.dat
$ejecutables/MonteCarlo_V5 1000000 100 >> $datos/V5/ComparacionTiempos_V5.dat
$ejecutables/MonteCarlo_V6 >> $datos/V6/ComparacionDatos_V6.dat
echo -e "${purple}Finalizado${nocolor}"
# Crear directorios necesarios
if [ -d $graficas ];
then
echo -e "${blue}Directorio 'Graficas' listo${nocolor}"
rm -Rf $graficas/*
mkdir $graficas/V1
echo -e "${blue}Directorio 'Graficas/V1' listo${nocolor}"
mkdir $graficas/V2
echo -e "${blue}Directorio 'Graficas/V2' listo${nocolor}"
mkdir $graficas/V2_1
echo -e "${blue}Directorio 'Graficas/V2_1' listo${nocolor}"
mkdir $graficas/V3
echo -e "${blue}Directorio 'Graficas/V3' listo${nocolor}"
mkdir $graficas/V4
echo -e "${blue}Directorio 'Graficas/V4' listo${nocolor}"
mkdir $graficas/V5
echo -e "${blue}Directorio 'Graficas/V5' listo${nocolor}"
else
mkdir $graficas
echo -e "${blue}Directorio 'Graficas' creado${nocolor}"
mkdir $graficas/V1
echo -e "${blue}Directorio 'Graficas/V1' creado${nocolor}"
mkdir $graficas/V2
echo -e "${blue}Directorio 'Graficas/V2' creado${nocolor}"
mkdir $graficas/V2_1
echo -e "${blue}Directorio 'Graficas/V2_1' listo${nocolor}"
mkdir $graficas/V3
echo -e "${blue}Directorio 'Graficas/V3' listo${nocolor}"
mkdir $graficas/V4
echo -e "${blue}Directorio 'Graficas/V4' listo${nocolor}"
mkdir $graficas/V5
echo -e "${blue}Directorio 'Graficas/V5' listo${nocolor}"
fi
# Lanzamos gnuplot para crear las graficas
echo -e "${gray}Creando graficas...${nocolor}"
for C in "${numSimulaciones[@]}"
do
for ((D=1;D<4;D=D+1))
do
gnuplot -e "plot '$datos/V1/MonteCarlo_V1_X10Y1_$C-$D.dat' using 1:2 title 'MonteCarlo_V1 - X(10) Y(1) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V1/MonteCarlo_V1_X10Y1_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V1/MonteCarlo_V1_X10Y5_$C-$D.dat' using 1:2 title 'MonteCarlo_V1 - X(10) Y(5) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V1/MonteCarlo_V1_X10Y5_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V1/MonteCarlo_V1_X10Y10_$C-$D.dat' using 1:2 title 'MonteCarlo_V1 - X(10) Y(10) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V1/MonteCarlo_V1_X10Y10_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2/MonteCarlo_V2_X10Z1_$C-$D.dat' using 1:2 title 'MonteCarlo_V2 - X(10) Z(1) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2/MonteCarlo_V2_X10Z1_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2/MonteCarlo_V2_X10Z5_$C-$D.dat' using 1:2 title 'MonteCarlo_V2 - X(10) Z(5) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2/MonteCarlo_V2_X10Z5_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2/MonteCarlo_V2_X10Z10_$C-$D.dat' using 1:2 title 'MonteCarlo_V2 - X(10) Z(10) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2/MonteCarlo_V2_X10Z10_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y1Z1_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(1) Z(1) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y1Z1_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y5Z1_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(5) Z(1) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y5Z1_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y10Z1_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(10) Z(1) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y10Z1_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y1Z5_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(1) Z(5) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y1Z5_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y5Z5_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(5) Z(5) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y5Z5_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y10Z5_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(10) Z(5) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y10Z5_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y1Z10_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(1) Z(10) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y1Z10_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y5Z10_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(5) Z(10) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y5Z10_$C-$D.png'; replot"
gnuplot -e "plot '$datos/V2_1/MonteCarlo_V2_1_X10Y10Z10_$C-$D.dat' using 1:2 title 'MonteCarlo_V2_1 - X(10) Y(10) Z(10) - Veces: $C - Apartado $D' with lines; set terminal png; set output '$graficas/V2_1/MonteCarlo_V2_1_X10Y10Z10_$C-$D.png'; replot"
done
done
gnuplot -e "set boxwidth 0.3; set style fill solid; plot '$datos/V3/ComparacionTiempos_V3.dat' using 2:xtic(1) title 'ComparacionTiemposV3' with boxes; set terminal png; set output '$graficas/V3/ComparacionTiempos_V3.png'; replot"
gnuplot -e "set boxwidth 0.3; set style fill solid; plot '$datos/V3/ComparacionTiempos_V3_1.dat' using 2:xtic(1) title 'ComparacionTiemposV3-1' with boxes; set terminal png; set output '$graficas/V3/ComparacionTiempos_V3_1.png'; replot"
gnuplot -e "set boxwidth 0.3; set style fill solid; plot '$datos/V4/ComparacionTiempos_V4.dat' using 2:xtic(1) title 'ComparacionTiemposV4' with boxes; set terminal png; set output '$graficas/V4/ComparacionTiempos_V4.png'; replot"
gnuplot -e "set boxwidth 0.3; set style fill solid; plot '$datos/V5/ComparacionTiempos_V5.dat' using 2:xtic(1) title 'ComparacionTiemposV5' with boxes; set terminal png; set output '$graficas/V5/ComparacionTiempos_V5.png'; replot"
echo -e "${gray}Finalizado${nocolor}"
fi
| true
|
af8fcf5b3e77eb36960830f590b1ceaf3ff24ea0
|
Shell
|
wp-cli/wp-cli-tests
|
/bin/run-phpcs-tests
|
UTF-8
| 206
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Run the code style check only if a configuration file exists.
if [ -f ".phpcs.xml" ] || [ -f "phpcs.xml" ] || [ -f ".phpcs.xml.dist" ] || [ -f "phpcs.xml.dist" ]
then
vendor/bin/phpcs "$@"
fi
| true
|
2ae71a94546b9396c4f6ae0f746b3f4c440f2209
|
Shell
|
enlighter/linux-customizations
|
/fedora/bashrc
|
UTF-8
| 950
| 3.15625
| 3
|
[] |
no_license
|
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Uncomment the following line if you don't like systemctl's auto-paging feature:
# export SYSTEMD_PAGER=
# User specific aliases and functions
# Enable tab completion
source ~/git-completion.bash
# colors!
green="\[\033[0;32m\]"
blue="\[\033[0;34m\]"
purple="\[\033[0;35m\]"
reset="\[\033[0m\]"
# Change command prompt
source ~/git-prompt.sh
export GIT_PS1_SHOWDIRTYSTATE=1
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
# '\u' adds the name of the current user to the prompt
# '\$(__git_ps1)' adds git-related stuff
# '\W' adds the name of the current directory
export PS1="$purple\u$green\$(__git_ps1)$blue \W $ $reset"
case $TERM in
xterm*)
PROMPT_COMMAND='printf "\033]0;%s@%s:%s\007" "${USER}" "${HOSTNAME%%.*}" "${PWD##*/}"'
;;
esac
#KDE unset ksshaskpass
unset SSH_ASKPASS
| true
|
f8d4aa29ced032cf1d7d202caa382524669b0779
|
Shell
|
lj-ditrapani/dotfiles
|
/.zshrc
|
UTF-8
| 1,463
| 2.671875
| 3
|
[] |
no_license
|
# Use vi bindings
bindkey -v
# Only show last part of path >
export PS1="%1~> "
# Show [user@host] short_path > (for remote servers)
# export PS1="[%n@%m] %1~> "
alias l='ls -FXC --color'
alias ll='l -Ahl'
alias grep='grep --color=auto'
#alias gcrl="sudo loadkeys us"
#alias uiop="sudo loadkeys dvorak"
alias gst='git status -u'
alias vim='nvim'
alias calh="ncal -b3"
bindkey "^R" history-incremental-search-backward
# Turn on and off mouspad
# xinput list # Get a list of input devices
alias tpoff="synclient TouchpadOff=1"
alias tpon="synclient TouchpadOff=0"
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
export SDKMAN_DIR="$HOME/.sdkman"
[[ -s "$HOME/.sdkman/bin/sdkman-init.sh" ]] && source "$HOME/.sdkman/bin/sdkman-init.sh"
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$HOME/.rbenv/bin:$PYENV_ROOT/bin:$PATH:$HOME/bin"
alias pyenvinit='eval "$(pyenv init -)"'
export HISTSIZE=50000
export SAVEHIST=$HISTSIZE
setopt EXTENDED_HISTORY
swup() {
eval $(ssh-agent)
sway
}
alias disableLaptopMonitor="swaymsg output eDP-1 disable"
alias enableLaptopMonitor="swaymsg output eDP-1 enable"
alias mirrormonitor="xrandr --output HDMI-0 --same-as DP-2"
alias vscode="code --enable-features=UseOzonePlatform --ozone-platform=wayland --log debug"
# Using an external monitor
# swaymsg -t get_outputs
# swaymsg output eDP-1 disable
# Perhaps needed for screen sharing?
export XDG_CURRENT_DESKTOP=sway
| true
|
51f04acdcd5c1e69936aad99422013f323d99e93
|
Shell
|
diegochiodini/shell-scripts
|
/find-empty-folders.sh
|
UTF-8
| 454
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/sh
FOLDER="$1"
Delete()
{
echo "Deleting... '$FOLDER'"
find "$FOLDER" -type d -empty -delete
echo "Done."
}
[ -z "$FOLDER" ] && FOLDER="."
RES=$(find "$FOLDER" -type d -empty -print)
if [[ -z "$RES" ]]; then
echo "No empty folder were found."
exit
else
echo "$RES"
fi
echo "Do you want to delete those folders? Y/N"
read ANSWER
if [[ "$ANSWER" == "y" ]]; then
Delete
elif [[ "$ANSWER" == "Y" ]]; then
Delete
else
echo "Cancel."
fi
| true
|
7157dbdcbf164b197c252f95c2444696e8f72921
|
Shell
|
geraldstanje/realtime_wordcloud
|
/start_docker.sh
|
UTF-8
| 543
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
start() {
docker build --no-cache -t outyet .
boot2docker up && $(boot2docker shellinit)
boot2docker ip
# Run a docker
docker run -p 8080:8080 -t outyet
}
info() {
# docker ps
docker ps
# docker inspect hash
}
stopall() {
# docker stop hash
docker stop $(docker ps -a -q)
}
cleanup() {
# Remove all containers
docker rm $(docker ps -a -q)
# Romove all images
#docker rmi $(docker images -q)
}
case $1 in start|info|stopall|cleanup) "$1" ;; *) printf >&2 '%s: unknown command\n' "$1"; exit 1;; esac
| true
|
29a1bba28465f73852f7ba31ce75d0f1569fa4f2
|
Shell
|
sammtcbn/dotfiles
|
/multipass_scripts/ubt2004_dev.bash
|
UTF-8
| 2,930
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
vmname=ubt2004
loginpw=1234
bridge_interface=eno1
function show_multipass_info()
{
echo multipass networks
multipass networks
echo
echo multipass list
multipass list
echo
}
function show_instance_info()
{
echo multipass info ${vmname}
multipass info ${vmname}
echo
}
function wait_multipass_ready()
{
while :
do
multipass list 2> /dev/null
ret=$?
if [ $ret -eq 0 ]; then
return
fi
echo waiting ...
sleep 1
done
}
function vmname_setup()
{
read -p "Instance name: [${vmname}] " tmpvmname
if [ ! -z "${tmpvmname}" ]; then
vmname=${tmpvmname}
fi
echo Instance Name will be ${vmname}
}
function show_all_network_interface()
{
echo All network interface:
ip -o link show | awk -F': ' '{print $2}'
echo
}
function bridgename_setup()
{
read -p "Bridge interface: [${bridge_interface}] " tmpbridge
if [ ! -z "${tmpbridge}" ]; then
bridge_interface=${tmpbridge}
fi
echo Bridge interface will be ${bridge_interface}
}
function confirm_install()
{
while true
do
read -p "Are you sure you want to continue to install? [Y/n]" ins
if [ -z "${ins}" ]; then
ins=y
fi
if [ "${ins}" == "n" ] || [ "${ins}" == "N" ]; then
exit 1
fi
if [ "${ins}" == "y" ] || [ "${ins}" == "Y" ]; then
break;
fi
done
}
function init_bridge()
{
echo sudo apt install lxd network-manager -y
sudo apt install lxd network-manager -y
echo
}
function init_multipass()
{
echo sudo snap install multipass --classic
sudo snap install multipass --classic
echo
}
function update_ubuntu()
{
multipass exec ${vmname} -- sudo apt -y update
multipass exec ${vmname} -- sudo apt -y upgrade
multipass exec ${vmname} -- sudo apt -y autoremove
multipass exec ${vmname} -- sudo apt -y autoclean
}
function hypervisor_setup()
{
multipass get local.driver | grep lxd
ret=$?
if [ $ret -eq 0 ]; then
return
fi
echo multipass set local.driver=lxd
multipass set local.driver=lxd
echo
}
function sshd_setup()
{
multipass exec ${vmname} -- bash -c "echo -e '${loginpw}\n${loginpw}' | sudo passwd ubuntu"
multipass exec ${vmname} -- sudo sed -r -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
multipass exec ${vmname} -- sudo systemctl restart sshd
}
function install_app()
{
multipass exec ${vmname} -- sudo apt -y install curl git vim
}
vmname_setup
show_all_network_interface
bridgename_setup
confirm_install
init_bridge
init_multipass
wait_multipass_ready
hypervisor_setup
wait_multipass_ready
echo multipass launch --name ${vmname} "20.04" --network ${bridge_interface}
multipass launch --name ${vmname} "20.04" --network ${bridge_interface}
echo
wait_multipass_ready
update_ubuntu
sshd_setup
install_app
show_multipass_info
show_instance_info
echo Note: You can ssh to ${vmname} with user ubuntu and password ${loginpw}
echo
| true
|
ed54faa54c63a9882c775fd233d5229400599b0f
|
Shell
|
zaptree/cli-tools
|
/docker/containers/liquibase.sh
|
UTF-8
| 1,187
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#postgres://postgres:postgres@192.168.59.103:5432/ua
#postgres://postgres:postgres@boot2docker:5432/ua
#set container name
POSTGRES=postgres
# create postgres database
function createDatabase() {
docker exec $1 yum -y install postgresql-contrib
docker exec $1 psql -U postgres -c "CREATE DATABASE $2" && echo "Database created!"
docker exec $1 psql -d $2 -U postgres -c 'CREATE EXTENSION "uuid-ossp"' && echo "uuid-ossp installed in database!"
}
createDatabase $POSTGRES $1
for i in ${@:2} ; do
#match the path and file name for yaml file (BASH_REMATCH array has regex results)
[[ $i =~ (.*)/([^/]+)$ ]]
MIGRATION_FILE=${BASH_REMATCH[2]}
SQL_URL="jdbc:postgresql://postgresql:5432/$1"
SQL_USER="postgres"
SQL_PASS="postgres"
SCHEMA_DIR=${BASH_REMATCH[1]}
echo "Running $MIGRATION_FILE migration on $SQL_URL as user $SQL_USER"
docker run --rm -ti -v $SCHEMA_DIR:/schema -w /schema \
--link $POSTGRES:postgresql \
docker-artifacts.ua-ecm.com/liquibase:latest \
--driver=org.postgresql.Driver \
--logLevel=info \
--changeLogFile=$MIGRATION_FILE \
--url=$SQL_URL \
--username=$SQL_USER \
--password=$SQL_PASS migrate
done
| true
|
8e16c2def32e6de823ccd53bb9d73d0d572af254
|
Shell
|
jrevertvila/app_Nodejs_Express_Angular1.5
|
/initdocker.sh
|
UTF-8
| 473
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
sudo docker build -t restimage backend/rest/
sudo docker build -t angularjsimage frontend/
if [ "$(sudo docker ps -a | grep container_rest)" ]; then
sudo docker start container_rest
else
sudo docker run --name container_rest -dit -p 3000:3000 restimage
fi
if [ "$(sudo docker ps -a | grep container_angularjs)" ]; then
sudo docker start container_angularjs
else
sudo docker run --name container_angularjs -dit -p 4000:4000 angularjsimage
fi
| true
|
f86911be532efb1c48b4fc38911169338b1c1c39
|
Shell
|
ianbstewart/nonstandard_word_dissemination
|
/scripts/data_processing/average_dataframes.sh
|
UTF-8
| 274
| 2.671875
| 3
|
[] |
no_license
|
# compute average over all cells in dataframes
DATA_DIR=../../data/frequency
DATA_NAME=tag_pcts
DATAFRAMES=$DATA_DIR/*"$DATA_NAME".tsv
TIMEFRAME=2015_2016
OUTPUT=../../output/average_dataframes.txt
(python average_dataframes.py $DATAFRAMES --timeframe $TIMEFRAME > $OUTPUT)&
| true
|
45cf701ab3afce049da3e442a883d42c1a5723a5
|
Shell
|
ellmo/ghgvc
|
/ghgvc.sh
|
UTF-8
| 6,731
| 2.625
| 3
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# GHGVC install script for rails application
# Written by Carl Crott <carlcrott@gmail.com>
sudo apt-get -y install git-core curl locate
git clone http://github.com/delinquentme/ghgvc.git
curl -L https://get.rvm.io | bash -s stable --ruby
## RVM reqs
sudo apt-get -y install build-essential openssl libreadline6 libreadline6-dev curl git-core zlib1g zlib1g-dev libssl-dev libyaml-dev libsqlite3-0 libsqlite3-dev sqlite3 libxml2-dev libxslt-dev autoconf libc6-dev ncurses-dev automake libtool bison subversion
RVM_DIR=$(which rvm)
if [ $RVM_DIR ]; then
echo "RVM confirmed"
rvm install ruby-1.9.3-p125
rvm --default use ruby-1.9.3-p125
echo "export rvm_trust_rvmrcs_flag=1" > ~/.rvmrc #auto-trust .rvmrc flags
cd ghgvc/
git checkout name_indexing
bundle install --without development
# build out net-cdf parsing
sudo apt-get -y install netcdf-bin libnetcdf-dev
cd /home/ubuntu/.rvm/rubies/ruby-1.9.3-p125/bin/
rvm --default use ruby-1.9.3-p125
wget http://www.gfd-dennou.org/arch/ruby/products/ruby-netcdf/release/ruby-netcdf-0.6.6.tar.gz
tar -zxvf ruby-netcdf-0.6.6.tar.gz && cd ruby-netcdf-0.6.6/
ruby -rubygems extconf.rb --with-narray-include=/home/ubuntu/.rvm/gems/ruby-1.9.3-p125@ghgvc/gems/narray-0.6.0.8/
# edit make makefile per:
#https://bbs.archlinux.org/viewtopic.php?id=163623
sudo make
sudo make install
cd ../ && sudo rm -rf ruby-netcdf*
# build out and workaround for specifying production
RAILS_ENV=production bundle exec rake db:create db:schema:load
else
echo " SSH back in to re-source RVM "
exit
fi
cat << 'EOF' > /config/setup_load_paths.rb
if ENV['MY_RUBY_HOME'] && ENV['MY_RUBY_HOME'].include?('rvm')
begin
gems_path = ENV['MY_RUBY_HOME'].split(/@/)[0].sub(/rubies/,'gems')
ENV['GEM_PATH'] = "#{gems_path}:#{gems_path}@global"
require 'rvm'
RVM.use_from_path! File.dirname(File.dirname(__FILE__))
rescue LoadError
raise "RVM gem is currently unavailable."
end
end
# If you're not using Bundler at all, remove lines bellow
ENV['BUNDLE_GEMFILE'] = File.expand_path('../Gemfile', File.dirname(__FILE__))
require 'bundler/setup'
EOF
## MySQL reqs
sudo apt-get -y install libxslt-dev libxml2-dev libsqlite3-dev libmysqlclient-dev
cd ~/ghgvc/
###### Server Configs ###
sudo useradd deploy
sudo apt-get -y update && sudo apt-get -y upgrade
sudo apt-get -y install build-essential zlib1g-dev libssl-dev libreadline-dev libyaml-dev libcurl4-openssl-dev curl git-core python-software-properties
gem install passenger
#passenger-install-nginx-module
rvmsudo -E /home/ubuntu/.rvm/wrappers/ruby-1.9.3-p125@ghgvc/ruby /home/ubuntu/.rvm/gems/ruby-1.9.3-p125@ghgvc/gems/passenger-4.0.19/bin/passenger-install-nginx-module
sudo chown -R ubuntu /home/ubuntu/ghgvc/*
echo 1.9.3-p125@ghgvc >> .ruby-version
wget -O init-deb.sh http://library.linode.com/assets/660-init-deb.sh
sudo mv init-deb.sh /etc/init.d/nginx
sudo chmod +x /etc/init.d/nginx
sudo /usr/sbin/update-rc.d -f nginx defaults
cat <<'EOF' > ~/ghgvc/config/database.yml
development:
adapter: mysql2
database: ghgvc_dev
pool: 5
timeout: 5000
test:
adapter: sqlite3
database: db/test.sqlite3
pool: 5
timeout: 5000
production:
adapter: sqlite3
database: db/prod.sqlite3
pool: 5
timeout: 5000
EOF
# irb
# require 'numru/netcdf'
sudo service nginx stop
sudo rm /opt/nginx/conf/nginx.conf
sudo cat <<'EOF' > nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
passenger_root /home/ubuntu/.rvm/gems/ruby-1.9.3-p125@ghgvc/gems/passenger-4.0.19;
passenger_ruby /home/ubuntu/.rvm/wrappers/ruby-1.9.3-p125@ghgvc/ruby;
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
server {
listen 80;
server_name localhost;
location / {
root /home/ubuntu/ghgvc/public; # <--- be sure to point to 'public'!
passenger_enabled on;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}
EOF
sudo cp nginx.conf /opt/nginx/conf/nginx.conf
sudo service nginx start
sudo apt-add-repository -y ppa:chris-lea/node.js
sudo apt-get -y install nodejs
sudo service nginx restart
echo "Nginx configured"
echo "Installing ghgvcR libs"
cd ~/
git clone http://github.com/delinquentme/ghgvcR
git clone http://github.com/PecanProject/pecan
echo "deb http://lib.stat.cmu.edu/R/CRAN/bin/linux/ubuntu precise/" >> /etc/apt/sources.list
sudo apt-get update
sudo apt-get -y install libcurl4-gnutls-dev r-cran-xml
# devtools:
#http://stackoverflow.com/questions/16467725/r-devtools-github-install-fails
echo "configure R and devtools for ghgvcR code"
#git clone git://github.com/hadley/devtools.git && R CMD build devtools
#R CMD install devtools_1.3.99.tar.gz /home/ubuntu/R/x86_64-pc-linux-gnu-library/2.14/
#sudo echo "deb http://cran.rstudio.com/bin/linux/ubuntu `lsb_release -s -c`/" > /etc/apt/sources.list.d/R.list
#sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9
#sudo apt-get -y update && sudo apt-get -y upgrade
#sudo apt-get -y install build-essential git gfortran openmpi-bin libhdf5-openmpi-dev r-base-core jags liblapack-dev libnetcdf-dev netcdf-bin bc libcurl4-openssl-dev curl udunits-bin libudunits2-dev libmysqlclient-dev
#sudo apt-get -y install libgdal1-dev libproj-dev
#echo 'install.packages("devtools", repos="http://cran.rstudio.com/")' | R --vanilla
#echo 'install.packages("devtools", repos="http://cran.rstudio.com/")' | R --vanilla
sudo apt-get -y install gksu
gpg --keyserver keyserver.ubuntu.com --recv-key E084DAB9
gpg -a --export E084DAB9 | sudo apt-key add -
sudo echo "deb http://cran.cnr.berkeley.edu/bin/linux/ubuntu precise/" >> /etc/apt/sources.list
sudo apt-get update
sudo apt-get -y install r-base
########################
sudo cat <<'EOF' > ./build_ghgvcR.r
install.packages("devtools") # this causes issues on ubuntu 12.04
install.packages("RCurl"); install.packages("rjson"); install.packages("httr")
library(devtools)
install("~/ghgvcR/")
install("/home/thrive/rails_projects/PEcAn/utils")
EOF
##############################
sudo chmod 0755 ./build_ghgvcR.r && ./build_ghgvcR.r
cd ~/ghgvcR/
sudo chmod 0755 /src/ghgvc_script.R
./src/ghgvc_script.R
#sudo apt-get -y install r-base-core=2.15.3* r-recommended=2.15.3* r-doc-html=2.15.3* r-base=2.15.3*
#$IP_ADDY=$(curl http://canhazip.com/)
#http://ec2-184-73-47-14.compute-1.amazonaws.com/
echo "GHGVC Server build complete."
#ssh -v -i ~/.ec2/ec2.pem ubuntu@ec2-184-73-47-14.compute-1.amazonaws.com
| true
|
f586a87c96d57f25d6086404f542d0368d579c43
|
Shell
|
nonlin-lin-chaos-order-etc-etal/files
|
/pregenerate.sh
|
UTF-8
| 978
| 3.421875
| 3
|
[
"Unlicense"
] |
permissive
|
#! /usr/bin/env sh
for d in $(find ./releases -type d); do
if [ $d = "download.i2p2.de" ]; then
break
fi
rm $d/index.md $d/index.html $d/README.md -fv
for f in $(ls $d); do
echo $d/$f
if [ -d $d/$f ]; then
echo "IS A DIR"
g="$f/index.html"
f=$g
fi
echo " - [$f]($f)" >> $d/README.md
pandoc $d/README.md -o $d/index.html
if [ -f $d/shasums.txt ]; then
echo '<pre><code>' >> $d/index.html
cat $d/shasums.txt >> $d/index.html
echo '</code></pre>' >> $d/index.html
fi
done
git add $d/index.html
rm $d/README.md
done
rm -f index.md index.html index2.html
for f in $(ls); do
# if [ $f = "download.i2p2.de" ]; then
# break
# fi
echo $f
if [ -d $f ]; then
echo "IS A DIR"
g="$f/index.html"
f=$g
fi
echo " - [$f]($f)" >> index.md
markdown index.md > index.html
done
mv index.html index2.html
pandoc README.md -o index.html
cat index2.html >> index.html
rm index2.html
| true
|
076a6ce1ba8573af1d2cc45086021a3ccfc22e36
|
Shell
|
prateek0103/json2es
|
/bulk_insert.sh
|
UTF-8
| 729
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# 0. Some constants to re-define to match your environment
ES_HOST=localhost:9200
JSON_FILE_IN=data.json
JSON_FILE_OUT=out.json
# 1. Python code to transform your JSON file
PYTHON="import json,sys;
out = open('$JSON_FILE_OUT', 'w');
with open('$JSON_FILE_IN') as json_in:
docs = json.loads(json_in.read());
for idx, doc in enumerate(docs):
out.write('%s\n' % json.dumps({'index': { '_index': 'segment', '_type': 'query', '_id': idx}}));
out.write('%s\n' % json.dumps(doc, indent=0).replace('\n', ''));
"
# 2. run the Python script from step 1
python -c "$PYTHON"
# 3. use the output file from step 2 in the curl command
curl -s -XPOST $ES_HOST/index/type/_bulk --data-binary @$JSON_FILE_OUT
| true
|
a63a2a20d4e9503f4122b84c8e51ac90f85ac315
|
Shell
|
amadden80/installfest_script
|
/scripts/copy_over_dotfiles.sh
|
UTF-8
| 372
| 3.203125
| 3
|
[] |
no_license
|
SRC_DIR=~/.wdi/installfest
SETTINGS=$SRC_DIR/settings
dotfiles=($SETTINGS/dotfiles/*)
timestamp=$(date +%s)
mkdir -p $HOME/.wdi/backups_$timestamp
for filepath in "${dotfiles[@]}"; do
dotfile=".$(basename $filepath)"
if [[ -a "$HOME/$dotfile" ]]; then
cp "$HOME/$dotfile" "$HOME/.wdi/backups_$timestamp/$dotfile"
fi
cp $filepath "$HOME/$dotfile"
done
| true
|
3508c5acf3d4a62e80f7e7a1e4e884ad82057fd2
|
Shell
|
apoelstra/rust-miniscript
|
/contrib/test.sh
|
UTF-8
| 903
| 3.234375
| 3
|
[
"CC0-1.0"
] |
permissive
|
#!/bin/sh -ex
FEATURES="compiler serde"
# Use toolchain if explicitly specified
if [ -n "$TOOLCHAIN" ]
then
alias cargo="cargo +$TOOLCHAIN"
fi
# Lint if told to
if [ "$DO_LINT" = true ]
then
(
rustup component add rustfmt
cargo fmt --all -- --check
)
fi
# Fuzz if told to
if [ "$DO_FUZZ" = true ]
then
(
cd fuzz
cargo test --verbose
./travis-fuzz.sh
# Exit out of the fuzzer,
# run stable tests in other CI vms
exit 0
)
fi
# Test without any features first
cargo test --verbose
# Test each feature
for feature in ${FEATURES}
do
cargo test --verbose --features="$feature"
done
# Also build and run each example to catch regressions
cargo build --examples
# run all examples
run-parts ./target/debug/examples
# Bench if told to
if [ "$DO_BENCH" = true ]
then
cargo bench --features="unstable compiler"
fi
| true
|
9b211cc19b41e7fafbfa9d4cc6fae6d561cbc1e8
|
Shell
|
digideskio/PartySuit
|
/installs/extra-osx-installs
|
UTF-8
| 1,512
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# extra-osx-installs
####= SuitAndCape Extra OS X Installation File
##============================================================================##
## Personalize OS X installation scripts here
echo "Beginning extra OS X installations"
## Require the administrator password
sudo -v
## Keep updating `sudo` timestamp until `extra-osx-installs` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
##== Homebrew Formulae ========================================================#
extra_casks=(
## Free Casks
arduino
blender
cactus
filezilla ## VS. 'transmit' ##
# flux ## "Automatically adjust brightness" causes to f.lux flicker
glueprint
ios-saver
sourcetree ## VS. 'tower' ##
spectacle ## VS. 'sizeup' ##
spotify
steam
transmission
vlc
## Paid Casks
# colorsnapper ### $08.95 ###
# dash ### $24.99 ###
# kaleidoscope ### $69.99 ###
# marked ### $09.99 ###
# screenflow ### $99.00 ###
# sizeup ## VS. 'spectacle' ## ### $12.99 ###
# sketch ### $99.00 ###
# things ### $49.99 ###
# tower ## VS. 'sourcetree' ## ### $69.00 ###
# transmit ## VS. 'filezilla' ## ### $69.00 ###
)
## Install Homebrew Cask applications
echo "Installing Casks (Homebrew maintained applications)"
brew cask install "${extra_casks[@]}"
| true
|
e721d12b4e7d02f48a62893c27eec0a53cc06612
|
Shell
|
kubiko/toolbox
|
/glue/bin/unset-aliases
|
UTF-8
| 375
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# loop through snap.yaml and setup aliases
snap_root=$(dirname $0)
snap_name=$(grep 'name: ' ${snap_root}/../meta/snap.yaml | awk '{ print $2}')
echo "This is helper script and it shoud be called by user directly!"
echo "Removing aliases for ${snap_name} snap"
snap aliases | grep toolbox | awk '{print $2}' | while read line
do
snap unalias ${line}
done
| true
|
b7f96ac72c5b89d04c000569c2fee28b78ccee1b
|
Shell
|
morika-t/cg-deploy-bosh
|
/ci/update-cloud-config-tooling.sh
|
UTF-8
| 671
| 3.140625
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
set -eu
files=("bosh-config/cloud-config/base.yml" "terraform-yaml/state.yml")
for file in ${MANIFEST_PATH:-}; do
files=(${files[@]} "${file}")
done
for environment in "development" "staging" "production"; do
if [ -s terraform-yaml-${environment}/state.yml ]; then
ENVIRONMENT=${environment} spruce merge --prune terraform_outputs \
bosh-config/cloud-config/bosh.yml \
terraform-yaml-${environment}/state.yml \
> ${environment}-bosh.yml
files=(${files[@]} ${environment}-bosh.yml)
fi
done
spruce merge --prune terraform_outputs "${files[@]}" > cloud-config-final.yml
bosh-cli -n update-cloud-config cloud-config-final.yml
| true
|
9af672c7cc8aba827fe5f407bdd65aa25afc8bf5
|
Shell
|
Spread0x/enhancd
|
/src/filter.sh
|
UTF-8
| 3,740
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
__enhancd::filter::exists()
{
local line
while read line
do
if [[ -d ${line} ]]; then
echo "${line}"
fi
done
}
__enhancd::filter::join()
{
if [[ -n $1 ]] && [[ -f $1 ]]; then
command cat "$1"
else
command cat <&0
fi | __enhancd::command::awk 'a[$0]++' 2>/dev/null
}
# __enhancd::filter::unique uniques a stdin contents
__enhancd::filter::unique()
{
if [[ -n $1 ]] && [[ -f $1 ]]; then
command cat "$1"
else
command cat <&0
fi | __enhancd::command::awk '!a[$0]++' 2>/dev/null
}
# __enhancd::filter::reverse reverses a stdin contents
__enhancd::filter::reverse()
{
if [[ -n $1 ]] && [[ -f $1 ]]; then
command cat "$1"
else
command cat <&0
fi \
| __enhancd::command::awk -f "$ENHANCD_ROOT/functions/enhancd/lib/reverse.awk" \
2>/dev/null
}
__enhancd::filter::fuzzy()
{
if [[ -z $1 ]]; then
cat <&0
else
if [[ $ENHANCD_USE_FUZZY_MATCH == 1 ]]; then
__enhancd::command::awk \
-f "$ENHANCD_ROOT/functions/enhancd/lib/fuzzy.awk" \
-v search_string="$1"
else
# Case-insensitive (don't use fuzzy searhing)
__enhancd::command::awk '$0 ~ /\/.?'"$1"'[^\/]*$/{print $0}' 2>/dev/null
fi
fi
}
__enhancd::filter::interactive()
{
local stdin="${1}"
if [[ -z ${stdin} ]] || [[ -p /dev/stdin ]]; then
stdin="$(command cat <&0)"
fi
if [[ -z ${stdin} ]]; then
echo "no entry" >&2
return $_ENHANCD_FAILURE
fi
local filter
filter="$(__enhancd::filepath::split_list "$ENHANCD_FILTER")"
local -i count
count="$(echo "${stdin}" | __enhancd::command::grep -c "")"
case "${count}" in
1)
if [[ -n ${stdin} ]]; then
echo "${stdin}"
else
return $_ENHANCD_FAILURE
fi
;;
*)
local selected
selected="$(echo "${stdin}" | eval ${filter})"
if [[ -z ${selected} ]]; then
return 0
fi
echo "${selected}"
;;
esac
}
__enhancd::filter::exclude()
{
__enhancd::command::grep -v -x -F "${1}" || true
}
__enhancd::filter::exclude_commented()
{
__enhancd::command::grep -v -E '^(//|#)' || true
}
__enhancd::filter::replace()
{
local old new
old="${1:?too few argument}"
new="${2:-""}"
__enhancd::command::awk \
-v old="${old}" \
-v new="${new}" \
'sub(old, new, $0) {print $0}'
}
__enhancd::filter::trim()
{
local str
str="${1:?too few argument}"
__enhancd::filter::replace "${str}"
}
__enhancd::filter::limit()
{
command head -n "${1:-10}"
}
__enhancd::filter::exclude_gitignore()
{
local -a ignores=()
if [[ -f $PWD/.gitignore ]]; then
ignores+=(".git")
else
# just do read the input and do output
# if no gitignore file
command cat <&0
return 0
fi
local ignore
while read ignore
do
if [[ -d ${ignore} ]]; then
ignores+=( "$(command basename ${ignore})" )
fi
done <${PWD}/.gitignore
contains() {
local input ignore
input=${1:?need one argument}
for ignore in "${ignores[@]}"
do
# https://www.gnu.org/savannah-checkouts/gnu/bash/manual/bash.html#Shell-Parameter-Expansion
if [[ ${input} =~ ${ignore//\./\\.} ]]; then
return 0
fi
done
return 1
}
local line
while read line
do
if contains ${line}; then
continue
fi
echo "${line}"
done
}
| true
|
2f88d2fcb12caafc91491bd00c26ef4b62c4bd48
|
Shell
|
skingFD/BFA
|
/projects/arc/scripts/summarize_counts.sh
|
UTF-8
| 859
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPTPATH=`readlink -f $0`
BASEDIR="`dirname $SCRIPTPATH`/.."
LOGSDIR=$BASEDIR/logs
OKLOG="$LOGSDIR/ok.log"
grep OK $LOGSDIR/bulk-basegraphs.log | cut -f1 -d' ' > $OKLOG
RAWLOGFILE="basegraphs-gen.log"
COUNTLOGFILE="counts.log"
SUMMARYCSV="$LOGSDIR/counts.csv"
echo "network,devices,processEtgVertices,processEtgEdges,ospfProcesses,bgpProcesses,staticProcesses,processEtgDiameter,instanceEtgVertices,instanceEtgEdges,ospfInstances,bgpInstances,staticInstances,deviceEtgVertices,deviceEtgEdges,policyGroups,separatePolicyGroups" > $SUMMARYCSV
cat $OKLOG | while read NETWORK; do
echo $NETWORK
NWLOGDIR=$LOGSDIR/$NETWORK
grep "COUNT:" $NWLOGDIR/$RAWLOGFILE > $NWLOGDIR/$COUNTLOGFILE
COUNTS=`cat $NWLOGDIR/$COUNTLOGFILE | cut -f3 -d' '`
COUNTS=`echo $COUNTS | sed -e 's/ /,/g'`
echo $NETWORK,$COUNTS >> $SUMMARYCSV
done
| true
|
893cab661a44b41c33457b3d55c408768eba59da
|
Shell
|
JOJ0/audio-utils
|
/tag_60_funk.sh
|
UTF-8
| 871
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
IFS=$'\n' # split only on newlines
#set -f # disable globbing
ARTIST="Andrew D. Gordon"
ALBUM="60 Of The Funkiest Keyboard Riffs Known To Mankind"
GENRE="Piano Practice"
YEAR="1995"
# adapt ls here:
for i in $(ls *Riff*mp3); do
#TITLE=$(echo "$i" | awk -F " - " '{ print $2 }' | sed -e 's/\.mp3//g')
TITLE=$(echo "$i" | awk -F "-" '{ print $2 }' | sed -e 's/\.mp3//g')
TRACKNO=$(echo "$i" | sed -e 's,.*Riff,,g' | sed -e 's/\.mp3//g')
echo file: "$i" ...
echo current tag:
id3v2 -l "$i" | grep -v -e "Encoded by" -e "id3v2 tag info for"
echo ""
echo will set "$TITLE" as title if \$1 is doit;
echo will set "$TRACKNO" as track number if \$1 is doit;
if [ "$1" = "doit" ]; then
echo ""
id3v2 -2 -T "$TRACKNO" -a "$ARTIST" -t "$TITLE" -A "$ALBUM" -g "$GENRE" -y "$YEAR" -c "" $i
fi
echo -e "\n\n"
done
| true
|
968fd858780c65167831130b501b41704944c816
|
Shell
|
zambezi/ez-build
|
/test/cli/module-formats.bats
|
UTF-8
| 1,003
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
load test-util
setup() {
load_fixture bare-project
}
teardown() {
unload_fixture bare-project
}
@test "should output umd modules by default" {
ez-build
assert_success
assert_expected "$(cat lib/index.js)"
}
@test "should output umd modules when specified" {
ez-build --flags modules:umd
assert_success
assert_expected "$(cat lib/index.js)"
}
@test "should output umd modules when invalid module format is specified" {
ez-build --flags modules:invalid
assert_success
assert_expected "$(cat lib/index.js)"
}
@test "should output amd modules when specified" {
ez-build --flags modules:amd
assert_success
assert_expected "$(cat lib/index.js)"
}
@test "should output commonjs modules when specified" {
ez-build --flags modules:commonjs
assert_success
assert_expected "$(cat lib/index.js)"
}
@test "should output ecmascript modules when specified" {
ez-build --flags modules:ecmascript
assert_success
assert_expected "$(cat lib/index.js)"
}
| true
|
2db1ab6c8cbf5ada37bda0396f81f4c70fec9cbf
|
Shell
|
DingtongHan/KungFu-1
|
/docker/build-images.sh
|
UTF-8
| 960
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
set -e
cd $(dirname $0)
SOURCES_LIST=sources.list.aliyun
PY_MIRROR='-i https://pypi.tuna.tsinghua.edu.cn/simple'
pack_kungfu() {
cd ..
tar -cvf - srcs cmake CMakeLists.txt setup.py go.mod | gzip -c >docker/kungfu.tar.gz
cd -
}
build_image() {
local tag=$1
local dockerfile=$2
local context=$3
docker build --rm \
--build-arg SOURCES_LIST="${SOURCES_LIST}" \
--build-arg PY_MIRROR="${PY_MIRROR}" \
-t ${tag} -f $dockerfile $context
}
run_example() {
docker run --rm \
-v $(pwd)/../examples:/examples \
-v $HOME/var/data:/root/var/data \
-it registry.gitlab.com/lsds-kungfu/image/kungfu:tf-cpu-ubuntu18 $@
}
# build_image registry.gitlab.com/lsds-kungfu/image/builder:ubuntu18 Dockerfile.builder-ubuntu18 .
pack_kungfu
build_image registry.gitlab.com/lsds-kungfu/image/kungfu:tf-cpu-ubuntu18 Dockerfile.tf-cpu-ubuntu18 .
run_example python3 ./examples/mnist_mlp.py
| true
|
d16be1d02637db3e8665e501cc6da7526662b8c4
|
Shell
|
marcopeg/docker-images
|
/mssql-bacpac/scripts/list.sh
|
UTF-8
| 404
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
# Reset AWS credentials
rm -rf ~/.aws
mkdir -p ~/.aws
echo "[default]" >> ~/.aws/credentials
echo "aws_access_key_id = ${AWS_ACCESS_KEY}" >> ~/.aws/credentials
echo "aws_secret_access_key = ${AWS_ACCESS_SECRET}" >> ~/.aws/credentials
clear
echo "###"
echo "### Reviso State"
echo "###"
echo ""
echo "loading from s3://${AWS_BUCKET_NAME}...."
echo ""
aws s3 ls s3://${AWS_BUCKET_NAME}
| true
|
d08a9f756ca0a12a50451856a16a4c67da6fdd60
|
Shell
|
mateuszkojro/uczelnia_s2
|
/sys_op/remote7/zadanie4.sh
|
UTF-8
| 208
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $1 = $2 ];then
echo "argumenty sa takie same"
elif [ -z $2 ] || [ ! -z $3 ];then
echo "zla ilosc arg"
elif [ -e $2 ];then
echo "plik docelowy juz istnieje"
else
cp $1 $2
fi
| true
|
263fd46dd912515715aef2de9d858fb84bdf2e18
|
Shell
|
vardancse/traffic
|
/weather.sh
|
UTF-8
| 317
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
start=2014-01-01
end=2014-02-01
while [ "$start" != $end ]; do
custom_start=`echo $start | sed "s/-//g"`
wget --output-document=New_York_City_$custom_start.json http://api.wunderground.com/api/fe5c0eae76462c31/history_$custom_start/q/NY/New_York_City.json
start=$(date -I -d "$start + 1 day")
done
| true
|
b2c8ebae1b784cbae50ed753295b633f1b476bfe
|
Shell
|
PaulZhutovsky/rs-fMRI-Preprocessing
|
/preprocessing/preprocessing_AROMA.sh
|
UTF-8
| 1,602
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
aroma() {
subjFullFolder="$1"
dataFolder="$2"
AROMAScript="$3"
subjFolder=$(basename "${subjFullFolder}")
echo ${subjFolder}
featFolder="${dataFolder}/${subjFolder}/func/preproc.feat" # what do we want as output folder?
fMRIData="${featFolder}/filtered_func_data.nii.gz"
mcFiles="${featFolder}/mc/prefiltered_func_data_mcf.par"
exampleFunc="${featFolder}/reg/example_func.nii.gz"
echo ${fMRIData}
structFolder="${dataFolder}/${subjFolder}/anat"
echo ${structFolder}
# 1. Create Mask (creates func.nii.gz (brain-extracted) and func_mask.nii.gz, we only need the latter und will remove the former)
echo "Creating Func Mask!"
bet ${exampleFunc} ${featFolder}/reg/func -f 0.3 -n -m -R
imrm ${featFolder}/reg/func.nii.gz
# 2. run AROMA
echo "Running AROMA"
echo ${fMRIData}
python $AROMAScript -in ${fMRIData} -out ${featFolder}/ICA_AROMA -mc ${mcFiles} -m ${featFolder}/reg/func_mask.nii.gz -affmat ${featFolder}/reg/ANTsEPI2T1_BBR.txt -affmat2 ${structFolder}/${subjFolder}_ANTsT1toMNI0GenericAffine.mat -warp ${structFolder}/${subjFolder}_ANTsT1toMNI1Warp.nii.gz
}
#dataFolder=${HOME}/fMRI_data/PTSD_veterans
#AROMAScript=${dataFolder}/code/ICA-AROMA_ANTS/ICA_AROMA.py
#session=ses-T0
#subjectToInclude='sub-ptsd*'
#N=10
projectFolder="/data/shared/ptsd_police"
dataFolder="${projectFolder}/derivatives/AROMApipeline"
AROMAScript="${projectFolder}/code/ICA_AROMA/ICA_AROMA.py"
subjectToInclude='sub-*'
N=5
for subjFullFolder in ${dataFolder}/${subjectToInclude}; do
((i=i%N)); ((i++==0)) && wait
aroma ${subjFullFolder} ${dataFolder} ${AROMAScript} &
done
| true
|
1754a543a7af6c098b30030d868bfd39e66e1bf6
|
Shell
|
TileDB-Inc/TileDB-xarray
|
/tools/hooks/pre-commit.sh
|
UTF-8
| 1,343
| 4.375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Function to automate running linting/formatting tests.
run_test() {
name=$2
fix_msg=$3
echo "* Running ${name}.. "
echo "..................."
$1
status=$?
echo "..................."
if [ $status -ne 0 ]; then
read -r -p "..failed. Would you like continue with commit? [y/N] " response
case "$response" in
[yY][eE][sS]|[yY])
echo "Continuing with tests .."
;;
*)
echo $fix_msg
exit $status
esac
else
echo "..passed"
fi
}
# get all python files that aren't deleted
python_files=$(git diff --cached --name-only --diff-filter=AM | grep '\.py$')
if [ ! -z "${python_files}" ]; then
# run isort
run_test "isort --check --diff ${python_files}" \
"isort" \
"Try running 'poetry run isort .' and add changes to git."
# run black
run_test "black --check ${python_files}" \
"black" \
"Try running 'poetry run black .' and add changes to git."
# run flake8
run_test "flake8 ${python_files}" "flake8" ""
# run mypy
run_test "mypy ${python_files}" "mypy" ""
fi
# Check for whitespace errors
if git rev-parse --verify HEAD >/dev/null 2>&1
then
against=HEAD
else
# Initial commit: diff against an empty tree object
against=$(git hash-object -t tree /dev/null)
fi
exec git diff-index --check --cached $against --
| true
|
39a2c532fa9c391d8efc5940fe29bfa8ddc45188
|
Shell
|
benhurstein/clab
|
/controle/controle-vent
|
UTF-8
| 2,748
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
MQTT_HOST=localhost
Talvo1=15031
Talvo=$[Talvo1+(`date +%s`-1482740000)/100]
if [ $Talvo -gt 15969 ]; then
Talvo=15969
fi
Tmed=$Talvo
Tvmed=$Talvo
Tinf=$[Talvo-10]
Tsup=$[Talvo+10]
Tmax=$Talvo
Tmin=$Talvo
estado_atual=NAOSEI
estado_quero=0
motor_atual=NAOSEI
motor_quero=100
teve_acima=0
proximo_envio=$(date +%s)
mosquitto_sub -h $MQTT_HOST -t /clab/sensor/adega-temp-garrafa-alto -t /clab/sensor/adega-temp-vent-saida|while read t
do
read tv
now=$(date +%s)
Tmed=`echo "($t*1000+$Tmed*19)/20"|bc -l`
Tvmed=`echo "($tv*1000+$Tvmed*19)/20"|bc -l`
Ttes=`echo $Tmed|cut -d. -f1`
if [ $Ttes -gt $Talvo ]; then
motor_quero=`echo "($Tmed-$Talvo)*10"|bc -l|cut -d. -f1`
if [ $motor_quero -lt 20 ]; then
if [ $teve_acima = 1 ]; then
motor_quero=0
else
motor_quero=20
fi
else
teve_acima=1
if [ $motor_quero -gt 100 ]; then
motor_quero=100
fi
#if [ $motor_quero -gt 20 ]; then
# motor_quero=40
#fi
fi
else
teve_acima=0
motor_quero=0
fi
#motor_quero=60
# if [ $Ttes -gt $Tsup ]; then
# estado_quero=1
# elif [ $Ttes -lt $Tinf ]; then
# estado_quero=0
# fi
if [ $estado_quero -eq 1 ]; then
if [ $Ttes -gt $Tmax ]; then
Tmax=$Ttes
elif [ $[Tmax-Ttes] -ge 10 -a $Ttes -lt $Talvo ]; then
estado_quero=0
Tmin=$Ttes
fi
else
if [ $Ttes -lt $Tmin ]; then
Tmin=$Ttes
elif [ $[Ttes-Tmin] -ge 10 -a $Ttes -gt $Talvo ]; then
estado_quero=1
Tmax=$Ttes
fi
fi
## novo teste do motor, controlado pela temperatura de saida do ar
if [ $estado_quero -eq 1 ]; then
#Tar=`echo "$tv*1000"|bc -l`
Tar=`echo $Tvmed|cut -d. -f1`
delta=$[Talvo-Tar]
# se o ar tiver 3 graus ou mais abaixo do alvo, nao precisa agua
# se tiver 2 graus ou menos, 100% da agua
motor_quero=$[(3000-delta)/10]
if [ $motor_quero -lt 20 ]; then
motor_quero=20
fi
if [ $motor_quero -gt 100 ]; then
motor_quero=100
fi
else
motor_quero=0
fi
# ignora se temperatura estiver muito alta
if [ $[Ttes-Talvo] -gt 200 ]; then
motor_quero=100
fi
##
if [ $estado_quero != $estado_atual -o $motor_quero != $motor_atual -o $now -ge $proximo_envio ]; then
mosquitto_pub -h $MQTT_HOST -t /clab/actuator/adega-vent -m $estado_quero
estado_atual=$estado_quero
mosquitto_pub -h $MQTT_HOST -t /clab/actuator/adega-bomba -m $motor_quero
motor_atual=$motor_quero
proximo_envio=$[now+10]
fi
Talvo=$[Talvo1+(`date +%s`-1482740000)/100]
if [ $Talvo -gt 15969 ]; then
Talvo=15969
fi
Tinf=$[Talvo-10]
Tsup=$[Talvo+10]
echo $Ttes $t $estado_atual $motor_atual $Talvo $now >> /home/benhur/clab/log/controle-vent.log
done
| true
|
5a2c70e8e573156436aadf0bd0517c47c6363503
|
Shell
|
keitaroyam/cheetah
|
/scripts/run_cheetah_dark
|
UTF-8
| 1,028
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -eq 0 ];
then
echo "USAGE: dark_cheetah rXXXX [rYYYY, rZZZZ, ...]"
echo "The following enviromnmental variables need to be defined: XTCDIR, CONFDIR_DARK and H5DIR_DARK"
else
cd $H5DIR_DARK
for (( i=1; i<=$#; i++ )); do
eval RUN=\${$i}
echo "Preparing to process $RUN"
eval RUNDIR=$RUN"_"$(date -d "today" +"%Y%m%d_%H%M%S")
echo "Creating directory: "$RUNDIR
mkdir $RUNDIR
if [ -f $CONFDIR_DARK/cheetah_$RUN.ini ];
then
cp $CONFDIR_DARK/cheetah_$RUN.ini $RUNDIR/cheetah.ini
else
cp $CONFDIR_DARK/cheetah.ini $RUNDIR/
fi
if [ -f $CONFDIR_DARK/psana_$RUN.cfg ];
then
cp $CONFDIR_DARK/psana_$RUN.cfg $RUNDIR/psana.cfg
else
cp $CONFDIR_DARK/psana.cfg $RUNDIR/
fi
echo "#!/bin/bash\n" > $RUNDIR/process.sh
echo "psana -c psana.cfg $XTCDIR/*$RUN*.xtc" > $RUNDIR/process.sh
chmod u+x $RUNDIR/process.sh
cd $RUNDIR
./process.sh
cd ..
echo "Setting link from $RUN to directory $RUNDIR"
rm $RUN
ln -s $RUNDIR $RUN
echo "Exit from processing $RUN"
done
fi
| true
|
c345fca29ca1813788e182828cb8abdc5381b6b3
|
Shell
|
ymxl85/MRs-based-test-suite-for-APR
|
/original/TS-mf/SP-tcas/mutants/MR2.sh
|
UTF-8
| 131
| 2.828125
| 3
|
[] |
no_license
|
MR=$1
number=$2
i=7
while [ $i -le $number ]
do
/bin/sh symbolicExeMR5.sh $MR v$i
/bin/sh MFCCMR2.sh $MR v$i
i=$((i+1))
done
| true
|
5c32c564d506528cf0f402bf957190e7f0a79c78
|
Shell
|
Crmiv/sh
|
/netmanage/autowifi.sh
|
UTF-8
| 686
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
IFACE=wlp3s0
IP_ADDR=192.168.1.10
SUBNET_MASK=255.255.255.0
GW=192.168.1.1
#if use physical cheat, cancel '#'
#HW_ADDR=''
ESSID="home"
WAP_KEY=''
#scan your surrounding,and use command 'iwlist scan'
FREQ=''
if [[ -n $WAP_KEY ]];
then
KEY_PART="key $WAP_KEY"
fi
#test root
if [ $UID -ne 0 ];
then
echo "Use root run it"
exit 1;
fi
#shutdown your interface
/sbin/ifconfig $IFACE down
#edit HW_ADDR in temp, and /sbin/reboot it revert
if [[ -n $HW_ADDR ]];
then
/sbin/ifconfig $IFACE hw ether $HW_ADDR
fi
/sbin/iwconfig $IFACE essid $ESSID $KEY_PART freq $FREQ
/sbin/ifconfig $IFACE $IP_ADDR netmask $SUBNET_MASK
route add default gw $GW $IFACE
print "Successful"
| true
|
d22a11ff6aa8f68b75f105a7ea0f6f6b9699f814
|
Shell
|
ikaika720/examples
|
/docker/integration/bin/download-pipework.sh
|
UTF-8
| 155
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
. ./env.sh
cd $baseDir
if [ -d $baseDir/pipework ]
then
cd pipework
git pull
else
git clone https://github.com/jpetazzo/pipework.git
fi
| true
|
38ca4e625afe3aae3dacd8194de6ade205bea3da
|
Shell
|
sebGuerrero/coding_tournament
|
/back/test/setup.sh
|
UTF-8
| 501
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
waitUntilHealthy() {
COMMAND="docker inspect -f \"{{.State.Health.Status}}\" $(docker-compose ps -q $1)"
HEALTH_STATUS=$(eval ${COMMAND})
while [ "${HEALTH_STATUS}" != "healthy" ]; do
echo "Service is not healthy yet"
sleep 1
HEALTH_STATUS=$(eval ${COMMAND})
done
echo "Service is healthy"
unset HEALTH_STATUS
}
DIR="$(dirname "$0")"
currentDir=$(pwd)
cd $DIR
echo "Starting docker containers"
docker-compose up -d
waitUntilHealthy "mongo"
cd $currentDir
| true
|
01a4b517dcbd60a45306fa777880035446bcee6a
|
Shell
|
PHANTOM-DEV1/core-commander
|
/modules/environment.sh
|
UTF-8
| 1,807
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
setup_environment_file ()
{
if [[ ! -e "${CORE_DATA}/.env" ]]; then
mkdir "${HOME}/.phantom"
local envFile="${CORE_DATA}/.env"
touch "$envFile"
echo "PHANTOM_LOG_LEVEL=debug" >> "$envFile" 2>&1
echo "PHANTOM_DB_HOST=localhost" >> "$envFile" 2>&1
echo "PHANTOM_DB_USERNAME=phantom" >> "$envFile" 2>&1
echo "PHANTOM_DB_PASSWORD=password" >> "$envFile" 2>&1
echo "PHANTOM_DB_DATABASE=phantom_devnet" >> "$envFile" 2>&1
fi
. "${CORE_DATA}/.env"
}
setup_environment ()
{
set_locale
if [[ ! -f "$commander_config" ]]; then
ascii
install_base_dependencies
install_program_dependencies
install_nodejs_dependencies
install_system_updates
# create ~/.commander
touch "$commander_config"
echo "CORE_REPO=https://github.com/PhantomCore/core" >> "$commander_config" 2>&1
echo "CORE_DIR=${HOME}/phantom-core" >> "$commander_config" 2>&1
echo "CORE_DATA=${HOME}/.phantom" >> "$commander_config" 2>&1
echo "CORE_CONFIG=${HOME}/.phantom/config" >> "$commander_config" 2>&1
echo "CORE_TOKEN=phantom" >> "$commander_config" 2>&1
echo "CORE_NETWORK=devnet" >> "$commander_config" 2>&1
echo "EXPLORER_REPO=https://github.com/PhantomCore/explorer" >> "$commander_config" 2>&1
echo "EXPLORER_DIR=${HOME}/phantom-explorer" >> "$commander_config" 2>&1
. "$commander_config"
# create ~/.phantom/.env
setup_environment_file
success "All system dependencies have been installed!"
check_and_recommend_reboot
press_to_continue
fi
if [[ -e "$commander_config" ]]; then
. "$commander_config"
setup_environment_file
fi
}
| true
|
12a18fb575aee9ed9a0fde0581fd5470ee4b7393
|
Shell
|
RobLewisQA/PrisonersDilemma
|
/scripts/docker-compose-install.sh
|
UTF-8
| 373
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
sudo apt update
sudo apt install -y curl jq
version=$(curl -s https://api.github.com/repos/docker/compose/releases/latest | jq -r '.tag_name')
sudo curl -L "https://github.com/docker/compose/releases/download/${version}/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
docker-compose --version
| true
|
bdcd85d70cb95de23c1ca31f36138267cdea0fe8
|
Shell
|
mike-prince/dotfiles
|
/zsh/zshenv
|
UTF-8
| 343
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
unsetopt GLOBAL_RCS # Do not read global configs
# Eliminate duplicates in paths
typeset -gU cdpath fpath path
# Base path
PATH=/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin
# Set GOPATH
export GOPATH="$HOME/go"
# Build path
PATH=$PATH:$GOPATH
# Source dotfiles
DOTFILES="$HOME/Code/dotfiles"
source $DOTFILES/zsh/zshrc
| true
|
34d9645a80eb4c8363b6772608e823448c9e5b0b
|
Shell
|
MarcelRobitaille/dotfiles
|
/bin/latex_rm
|
UTF-8
| 403
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
declare -a extensions=(".aux" ".bbl" ".bcf" ".blg" ".fdb_latexmk" ".fls" ".log" ".out" ".run.xml" ".glg" ".glg-abr" ".glo" ".glo-abr" ".gls" ".gls-abr" ".ist" ".lof" ".lot")
for ext in "${extensions[@]}"; do
echo "${1%.*}$ext"
done
read -p "Confirm deleting these files? [y/n] " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
for ext in "${extensions[@]}"; do
rm "${1%.*}$ext"
done
fi
| true
|
9587b84bd661466533ae8c5dbc10bc2df73454d2
|
Shell
|
syreddy001/web_page_counter
|
/scripts/create_certificate.sh
|
UTF-8
| 2,328
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
setup_environment () {
set -x
sleep 5
source /usr/local/bootstrap/var.env
IFACE=`route -n | awk '$1 == "192.168.9.0" {print $8;exit}'`
CIDR=`ip addr show ${IFACE} | awk '$2 ~ "192.168.9" {print $2}'`
IP=${CIDR%%/24}
if [ "${TRAVIS}" == "true" ]; then
ROOTCERTPATH=tmp
IP=${IP:-127.0.0.1}
LEADER_IP=${IP}
else
ROOTCERTPATH=etc
fi
export ROOTCERTPATH
}
create_certificate () {
# ${1} domain e.g. consul
# ${2} data centre e..g. DC1
# ${3} certificate duration in days
# ${4} additional ip addresses
# ${5} cert type either server, client or cli
[ -f /${ROOTCERTPATH}/${1}.d/pki/tls/private/${1}-${5}-key.pem ] &>/dev/null || {
echo "Start generating ${5} certificates for data centre ${2} with domain ${1}"
sudo mkdir --parent /${ROOTCERTPATH}/${1}.d/pki/tls/private /${ROOTCERTPATH}/${1}.d/pki/tls/certs
pushd /${ROOTCERTPATH}/${1}.d/pki/tls/private
sudo /usr/local/bin/consul tls cert create \
-domain=${1} \
-dc=${2} \
-key=/${ROOTCERTPATH}/ssl/private/${1}-agent-ca-key.pem \
-ca=/${ROOTCERTPATH}/ssl/certs/${1}-agent-ca.pem \
-days=${3} \
-additional-ipaddress=${4} \
-additional-dnsname="${HOSTNAME}.hashistack.ie" \
-additional-dnsname="hashistack.ie" \
-additional-dnsname="${5}.global.nomad" \
-${5}
sudo mv ${2}-${5}-${1}-0.pem /${ROOTCERTPATH}/${1}.d/pki/tls/certs/${1}-${5}.pem
sudo mv ${2}-${5}-${1}-0-key.pem /${ROOTCERTPATH}/${1}.d/pki/tls/private/${1}-${5}-key.pem
sudo chmod 755 /${ROOTCERTPATH}/${1}.d/pki/tls/certs/${1}-${5}.pem
sudo chmod 755 /${ROOTCERTPATH}/${1}.d/pki/tls/private/${1}-${5}-key.pem
sudo chown -R ${1}:${1} /${ROOTCERTPATH}/${1}.d
# debug
sudo ls -al /${ROOTCERTPATH}/${1}.d/pki/tls/private/
sudo ls -al /${ROOTCERTPATH}/${1}.d/pki/tls/certs/
popd
echo "Finished generating ${5} certificates for data centre ${2} with domain ${1}"
}
}
setup_environment
create_certificate $1 $2 $3 $4 $5
exit 0
| true
|
d3fbf091d76e2a770aecc8c9d31c64fa68ddd9d6
|
Shell
|
possan/sensortag-midi
|
/BundleAssets/midi/app.sh
|
UTF-8
| 439
| 3.03125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd "$(dirname "$0")"
# Start node midi server
../Resources/app/node ../Resources/app/server.js >/tmp/MIDIServer.log 2>&1 &
# Get PID
sleep 0.1
PID=`ps aux|grep node|grep server.js|grep -v grep|awk '{print $2}'`
if [ "${PID}" ]
then
# launch the cocoa app
./apache-callback-mac -url "file://$(dirname "$0")/../Resources/index.html"
# Stop node midi server
kill ${PID}
else
# Node failed to start
exit 1
fi
| true
|
dfff22d27e4ff4975904082abb585e8bdee25fa3
|
Shell
|
maclockard/dotfiles
|
/zshrc
|
UTF-8
| 3,586
| 2.859375
| 3
|
[] |
no_license
|
# zplug set-up
source ~/.zplug/init.zsh
isOSX="[[ $OSTYPE == *darwin* ]]"
isLinux="[[ $OSTYPE == *linux* ]]"
# zsh vi mode
# bindkey -v
# export KEYTIMOUT=1 # lag of .1 seconds when switching modes
# bindkey -M viins 'jk' vi-cmd-mode
# bindkey "^?" backward-delete-char # actually allow deleting
zplug 'zplug/zplug', hook-build:'zplug --self-manage'
# plugins etc.
zplug "lib/*", from:oh-my-zsh
zplug "plugins/git", from:oh-my-zsh
zplug "plugins/pip", from:oh-my-zsh
zplug "plugins/web-search", from:oh-my-zsh
zplug "plugins/tmux", from:oh-my-zsh
zplug "plugins/colored-man-pages", from:oh-my-zsh
zplug "plugins/yarn", from:oh-my-zsh
zplug "djui/alias-tips"
zplug "rupa/z", use:z.sh
# Load pure theme and dependencies
zplug "mafredri/zsh-async", from:github
zplug "sindresorhus/pure", use:pure.zsh, from:github, as:theme
# Install plugins if there are plugins that have not been installed
if ! zplug check; then
printf "Install new zsh plugins? [y/N]: "
if read -q; then
echo; zplug install
fi
fi
# this is here for rust completions
# remove once there is something better
fpath+=~/.zfunc
zplug load
PURE_PROMPT_SYMBOL=λ
# use the right term for tmux
TERM=screen-256color
# Remove "user@hostname" when I'm the one logged in
DEFAULT_USER="maclockard"
export EDITOR="vim"
# color stuff for ls
export LS_COLORS='di=31:ln=35:so=31;1:pi=0;1:ex=1;31:bd=0;1:cd=37;1:su=37;1:sg=0;1:tw=0;1:ow=0;1:'
if eval $isOSX; then
export LSCOLORS=bxfxbEaEBxxEhEhBaDaCaD
alias ls='ls -lGh'
fi
if eval $isLinux; then
alias ls='ls -lh --color=auto'
fi
# setup local scripts
export PATH="$HOME/bin:$PATH"
export PATH="$HOME/bin/local:$PATH"
### My aliases
# make updating dot files easy
alias dotfiles='cd ~/.dotfiles/'
alias upzsh='source ~/.zshrc'
alias zshrc='vim ~/.dotfiles/zshrc && upzsh'
alias zshrc_local='vim ~/.dotfiles/zshrc_local && upzsh'
alias vimrc='vim ~/.dotfiles/vimrc'
alias idot='~/.dotfiles/install'
alias updot='dotfiles ; git pull ; idot ; cd ~-'
alias pushdot='dotfiles ; git add . ; git commit ; git push origin master ; cd ~-'
# Spotify stuff
alias sp='spotify'
alias spp='spotify play'
# shortcuts
alias work='cd ~/workspace'
alias intel='open -a "IntelliJ IDEA CE"'
alias size='du -s -h *'
alias stat='stat -x'
# frontend
alias y='yarn'
alias ud="cat ./package.json | jq .dependencies | jq -r 'keys[]' | xargs -L 1 -I % sh -c 'rg % | wc -l | xargs echo %' | rg \" 1$\""
# utilities
alias untar='tar xvfz'
# fuzzy finding
export FZF_DEFAULT_COMMAND='fd -t f'
alias fz='fzf --height 45% --preview "head -100 {}"'
alias fzd='fd -t d | fzf --height 45% --preview "ls"'
alias ff='vim $(fz)'
alias ffd='cd $(fzd)'
# If there is a local configuration file, load it
if [ -f ~/.zshrc_local ]; then
source ~/.zshrc_local
fi
# make a gif from a quicktime movie
alias gif='ffmpeg -i in.mov -pix_fmt rgb24 -r 20 -f gif - | gifsicle --optimize=3 --delay=3 > out.gif'
# python shit
PATH="/Users/maclockard/Library/Python/3.7/bin:$PATH"
# ruby shit
PATH="/Users/maclockard/.gem/ruby/2.6.0/bin:$PATH"
# rust shit
export PATH="$HOME/.cargo/bin:$PATH"
# yarn shit
#export PATH="${HOME}/.config/yarn/global/node_modules/.bin:$PATH"
#export PATH="../../node_modules/.bin:$PATH"
#export PATH="./node_modules/.bin:$PATH"
# vs code
export PATH="$PATH:/Applications/Visual Studio Code.app/Contents/Resources/app/bin"
# useful env vars
export WORK="$HOME/workspace"
# brew completions
if type brew &>/dev/null; then
FPATH=$(brew --prefix)/share/zsh/site-functions:$FPATH
autoload -Uz compinit
compinit
fi
rm -f ~/.zcompdump; compinit
| true
|
e3644fcc173741120c41a918c52e16e88ffd82f3
|
Shell
|
tiedwu/install-shim
|
/memento_updater/find_omaha.sh
|
UTF-8
| 749
| 3.375
| 3
|
[] |
no_license
|
# Copyright (c) 2009 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Parent file must include memento_updater_logging.sh
# This file cannot be run by itself, it must be included.
# Return the value for a given key in the override lsb-release file.
# If no value is found, checks in the standard lsb-release file.
findLSBValue()
{
# Check factory lsb file.
value=$(grep ^$1 $FACTORY_LSB_FILE | cut -d = -f 2-)
if [ -z "$value" ]
then
value=$(grep ^$1 /etc/lsb-release | cut -d = -f 2-)
fi
# Don't remove this echo, this is not for test purpose
echo $value
}
FACTORY_LSB_FILE=/mnt/stateful_partition/dev_image/etc/lsb-factory
| true
|
37b90740289a2bbe9a4a796ee2b1521fb856cb0a
|
Shell
|
Wiredcraft/debian-packages
|
/packages/devops-etcd/build/DEBIAN/prerm
|
UTF-8
| 1,201
| 3.578125
| 4
|
[] |
no_license
|
#! /bin/sh
# prerm script for devops-etcd
#
# see: dh_installdeb(1)
set -e
USER=devops
# summary of how this script can be called:
# * <prerm> `remove'
# * <old-prerm> `upgrade' <new-version>
# * <new-prerm> `failed-upgrade' <old-version>
# * <conflictor's-prerm> `remove' `in-favour' <package> <new-version>
# * <deconfigured's-prerm> `deconfigure' `in-favour'
# <package-being-installed> <version> `removing'
# <conflicting-package> <version>
# for details, see /usr/doc/packaging-manual/
case "$1" in
remove|upgrade)
# Use timestamp to make database restoring easier
TIME=$(date +%Y-%m-%dT%H:%M:%S)
BACKUPDIR=$(mktemp -d -p /var/backups/ devops-etcd-$TIME.XXXXXX)
chown $USER:$USER $BACKUPDIR
service devops-etcd stop
cp -a /opt/devops/var/etcd $BACKUPDIR
chown -R root:root $BACKUPDIR
chmod 700 $BACKUPDIR
echo
echo The devops-etcd database has been backed up to $BACKUPDIR.
echo
;;
deconfigure)
;;
failed-upgrade)
;;
*)
echo "prerm called with unknown argument \`$1'" >&2
exit 1
;;
esac
#DEBHELPER#
exit 0
| true
|
0f6244f307b9d3e2db7b2f1d2e9d4ad2ee97fc66
|
Shell
|
f5devcentral/terraform-gcp-f5-sca
|
/demo/cis-kic/setup.sh
|
UTF-8
| 4,919
| 3.078125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
dir=${PWD}
GCP_PROJECT=$(gcloud config get-value project)
# expects sed/gcloud/jq best run in google cloud shell
# bigip
echo -n "Enter your bigip username and press [ENTER]: "
read BIGIP_ADMIN
secrets=$(gcloud secrets versions access latest --secret="bigip-secret")
BIGIP_PASS=$(echo $secrets | jq -r .pass)
echo "get big-ip info"
bigip1ExternalSelfIp=$(gcloud compute instances list --filter "demosca-f5vm01" --format json | jq .[0] | jq .networkInterfaces | jq -r .[0].networkIP)
bigip1ExternalNatIp=$(gcloud compute instances list --filter "demosca-f5vm01" --format json | jq .[0] | jq .networkInterfaces | jq -r .[0].accessConfigs[0].natIP)
bigip1MgmtIp=$(gcloud compute instances list --filter "demosca-f5vm01" --format json | jq .[0] | jq .networkInterfaces | jq -r .[2].networkIP)
# gke
echo "get GKE cluster info"
# cluster name
#gcloud container clusters list --filter "name:demosca*" --format json | jq .[].name
clusterName=$(gcloud container clusters list --filter "name:demosca*" --format json | jq -r .[].name)
# zone
#gcloud container clusters list --filter "name:demosca*" --format json | jq .[].zone
zone=$(gcloud container clusters list --filter "name:demosca*" --format json | jq -r .[].zone)
# cluster nodes
# gcloud compute instances list --filter "name:demosca*"-clu
# gcloud compute instances list --filter "name:demosca*"-clu --format json | jq .[].networkInterfaces[].accessConfigs[0].natIP
clusterNodesInt=$(gcloud compute instances list --filter "name:gke-demosca*" --format json | jq -r .[].networkInterfaces | jq -r .[0].networkIP)
clusterNodesExt=$(gcloud compute instances list --filter "name:gke-demosca*" --format json | jq -r .[].networkInterfaces[].accessConfigs[0].natIP)
# cluster creds
echo "get GKE cluster creds"
gcloud container clusters \
get-credentials $clusterName \
--zone $zone
# container connector
echo "set bigip-mgmtip $bigip1MgmtIp"
# f5-cluster-deployment-src.yaml > f5-cluster-deployment.yaml
cp cis/f5-cluster-deployment-src.yaml cis/f5-cluster-deployment.yaml
sed -i "s/-bigip-mgmt-address-/$bigip1MgmtIp/g" cis/f5-cluster-deployment.yaml
# deploy cis container
kubectl create secret generic bigip-login-kic -n kube-system --from-literal=username="${BIGIP_ADMIN}" --from-literal=password="${BIGIP_PASS}"
kubectl create serviceaccount k8s-bigip-ctlr-kic -n kube-system
kubectl create clusterrolebinding k8s-bigip-ctlr-clusteradmin-kic --clusterrole=cluster-admin --serviceaccount=kube-system:k8s-bigip-ctlr-kic
kubectl create -f cis/f5-cluster-deployment.yaml
# setup kic
# authorize docker to push custom images
echo "authorize docker to push to Google Container Registry"
gcloud auth configure-docker
# build kic+image for gcp
git clone https://github.com/nginxinc/kubernetes-ingress.git
cd kubernetes-ingress
git checkout tags/v1.9.1
# get secrets
echo "get secrets"
secrets=$(gcloud secrets versions access latest --secret="nginx-secret")
# install cert key
echo "setting info from Metadata secret"
# cert
cat << EOF > nginx-repo.crt
$(echo $secrets | jq -r .cert)
EOF
# key
cat << EOF > nginx-repo.key
$(echo $secrets | jq -r .key)
EOF
#cp ../nginx-plus-demos/licenses/nginx-repo.crt ../nginx-plus-demos/licenses/nginx-repo.key ./
echo "build kic container"
# standard
#make DOCKERFILE=DockerfileForPlus VERSION=v1.9.1 PREFIX=gcr.io/${GCP_PROJECT}/nginx-plus-ingress
# with app protect
make DOCKERFILE=appprotect/DockerfileWithAppProtectForPlus VERSION=v1.9.1 PREFIX=gcr.io/${GCP_PROJECT}/nginx-plus-ingress
cd $dir
# modify for custom registry
cp kic/nginx-plus-ingress-src.yaml kic/nginx-plus-ingress.yaml
sed -i "s/nginx-plus-ingress:1.9.1/gcr.io\/${GCP_PROJECT}\/nginx-plus-ingress:v1.9.1/g" kic/nginx-plus-ingress.yaml
# deploy nginx ingress
# namespace
kubectl create ns nginx-ingress
# service account
kubectl apply -f kic/nginx-plus-ingress-sa.yaml
# permissions
kubectl apply -f kic/nginx-plus-ingress-rbac.yaml
# default certificate
kubectl apply -f kic/default-server-secret.yaml
# custom nginx configmap
kubectl apply -f kic/nginx-config.yaml
# ingress deployment
kubectl apply -f kic/nginx-plus-ingress.yaml
# deploy service
kubectl apply -f kic/nginx-ingress.yml
# deploy application ingress
echo "set virtual address"
cp kic/cis-kic-ingress-src.yml kic/cis-kic-ingress.yml
sed -i "s/-external-virtual-address-/$bigip1ExternalSelfIp/g" kic/cis-kic-ingress.yml
kubectl apply -f kic/cis-kic-ingress.yml
# wait for pods
sleep 30
# finished
echo "check app at http://$bigip1ExternalNatIp"
# watch logs
# show ingress pods
kubectl -n nginx-ingress get pods -o wide
echo "type yes to tail the cis logs"
read answer
if [ $answer == "yes" ]; then
cisPod=$(kubectl get pods -n kube-system -o json | jq -r ".items[].metadata | select(.name | contains (\"k8s-bigip-ctlr-deployment-kic\")).name")
kubectl logs -f $cisPod -n kube-system | grep --color=auto -i '\[as3'
else
echo "Finished"
fi
echo "====Done===="
| true
|
bcdd2265955437fff0509f61a0855877b62eef74
|
Shell
|
beudbeud/dotfiles
|
/private_dot_config/rofi/scripts/executable_powermenu.sh
|
UTF-8
| 1,120
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#res=$(rofi -dmenu -p "menu:" -separator-style none -location 0 -width 250 -hide-scrollbar -padding 30 -font "UbuntuMono Nerd Font 18")
#
#if [ $res = "lock" ]; then
# /home/khoaduccao/.config/lock.sh
#fi
#if [ $res = "logout" ]; then
# i3-msg exit
#fi
#if [ $res = "suspend" ]; then
# systemctl suspend
#fi
#if [ $res = "restart" ]; then
# systemctl reboot
#fi
#if [ $res = "shutdown" ]; then
# systemctl poweroff
#fi
#exit 0
poweroff_command="systemctl poweroff"
reboot_command="systemctl reboot"
logout_command="/usr/bin/gnome-session-quit --logout --no-prompt"
lock_command="xdg-screensaver lock"
# you can customise the rofi command all you want ...
rofi_command="rofi"
options=$' Shutdown\n Reboot\n Logout\n Lockscreen'
# ... because the essential options (-dmenu and -p) are added here
command=$(echo "$options" | $rofi_command -dmenu -p "Powermenu" | cut -d' ' -f3)
if [ $? == 0 ]
then
case "${command}" in
"Shutdown") eval $poweroff_command;;
"Reboot") eval $reboot_command;;
"Logout") eval $logout_command;;
"Lockscreen") eval $lock_command;;
esac
fi
| true
|
a738b6a5752ee56b3799fb4d315045a49b07ce9b
|
Shell
|
yoonsung0711/microservices
|
/subprojects/service-registry/scripts/local/_4_stop-current-server-by-process-id.sh
|
UTF-8
| 730
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $PID -ne 0 ]; then
if [ -n $ZSH_VERSION ]; then
printf $'\e[31m\n◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎ stopping server ◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎\n\n'
else
echo -e '\n◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎ stopping server ◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎◼︎\n'
fi
echo -e "▶︎ stop running process\n"
kill -15 $PID
echo -e " port: $CUR_PORT\n"
echo -e " process: $PID\n"
if [ -n $ZSH_VERSION ]; then
echo -e $'\e[0m'
fi
fi
| true
|
f904daf9b941b2231f2a074b27f84ae18cfa25ea
|
Shell
|
jduda27/Unix_Shell-Scripts
|
/ScriptsW3/script8
|
UTF-8
| 340
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# reading values from a file
file="items.txt"
# $'\n' = new line character
# you can string multiple delimiters together to set multiple delimiters
# IFS is the variable that chooses the delimiter for reading in for loops
prevIFS=$IFS
IFS=$'\n'':'';'','
for item in $(cat $file)
do
echo "Processing $item"
done
IFS=$prevIFS
| true
|
8b310383a608ffd38569fefff22741ef255bc4f7
|
Shell
|
smittal6/cs251
|
/ass2/q2.sh
|
UTF-8
| 3,431
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
#Inputs n,m,d,file f,t and g
#Array for students[indexed at zero], with the value in the array being the day assigned to it
#array for number(by using percentages) of students on days.
#array to store the percentages from the file
#array for keeping track of actually allocated students to days[bascically array[day]++]
### Storing the required values
n=$1;
m=$2;
d=$3;
file=$4;
t=$5;
g=$6;
###Testing if input happening or not
#echo "$1 $2 $3 $5 $6"
###Found an amazing bash utility to shuffle list, shuf command
###Initializing the array
students="$(seq "$1" | shuf)"
tas="$(seq "$2" | shuf)"
nstud_day=()
count_left=0;
day_percentages=()
readarray day_percentages < "$4"
left=n-count;
###Test
#echo ${day_percentages[*]}
#echo "${students[*]}"
#echo "break"
#echo "${tas[*]}"
#echo "${nstud_day[*]}"
#echo "$count"
time_array=($5)
#time_array[0]="$t"
#for i in $(seq 1 $1)
# do time_array[i]="$(date -u -d "${time_array[i-1]}"+"$g" +%I:%Mpm)"
#done
for i in `seq 0 $1`;
#for getting the time array
do
oldt=${time_array[$i]}
newt=$(date -d $oldt+$6 +%I:%Mpm;);
time_array=(${time_array[@]} $newt);
# echo "${time_array[i]}"
done
awk -v n="$n" -v m="$m" -v d="$d" -v t="$t" -v g="$g" -v students1="${students[*]}" -v tas1="${tas[*]}" -v day_percentages1="${day_percentages[*]}" -v time1="${time_array[*]}" '
BEGIN{
split(day_percentages1,day_percentages," ")
split(students1,students, " ")
split(tas1,tas," ")
split(time1,timarr, " ")
#print time1;
#print timarr[3];
i=1
count=0
while(i<=d){
temp=int(day_percentages[i]*n*0.01)
if((day_percentages[i]*n*0.01)-temp>0.5){
nstud_day[i]=int(day_percentages[i]*n*0.01)+1;
count=count+nstud_day[i];
}
else{
nstud_day[i]=temp;
count=count+nstud_day[i];
}
i++;
}
i=1;
while(count!=n){
if(n-count>0){
nstud_day[i]++;
count++;
i++;
}
else{
nstud_day[i]--;
count--;
i++;
}
}
#nstud_day is in line with the percentages
tas_left=m
#Assign atleast each TA to a day.
i=1
#Array ntas_day will have Tas per day
while(i<=d){
ntas_day[i]=1
i++;
}
tas_left=m-d
i=1
while(i<=d){
tpd[i]=nstud_day[i]/ntas_day[i]
i++;
}
while (tas_left>0){
maxtpd=0
maxindex=1
for(i=1;i<=d;i++){
if(tpd[i]>=maxtpd){
maxindex=i;
maxtpd=tpd[i];
}
}
ntas_day[maxindex]++
tpd[maxindex]=(nstud_day[maxindex]/ntas_day[maxindex]);
tas_left--
}
#Testing the TAs per day
#for (i in ntas_day) {
#print ntas_day[i]
#}
#Day students
stu_index=1
counter=0
for(i=1;i<=d;i++){
counter=0;
printf "Day %s students:\n",i
while(counter<nstud_day[i]){
print students[stu_index];
stu_index++;
counter++;
}
}
#Day mappings
stu_index=1
ta_index=0
time_index=1
for(i=1;i<=d;i++){
equistudents=int(nstud_day[i]/ntas_day[i])
leftstudents=nstud_day[i]%ntas_day[i]
printf "Day %s mappings:\n",i
#time_index=1;
for(j=1;j<=ntas_day[i];j++){
time_index=1;
ta_index++;
for(k=1;k<=equistudents;k++){
printf "%s %s %s\n",tas[ta_index],students[stu_index],timarr[time_index]
stu_index++;
time_index++;
}
if (leftstudents>0){
printf "%s %s %s\n",tas[ta_index],students[stu_index],timarr[time_index];
leftstudents--;
stu_index++;
time_index++;
}
}
}
exit
}'
| true
|
1e91467229986d2b05c455657a2193bb92391263
|
Shell
|
a-suenami/koshucode-design
|
/toolkit/koshu-inout/temp/temp.sh
|
UTF-8
| 80
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
for temp in TEMP-KOSHU-*; do
echo "A temporary file exists."
done
| true
|
adc3ed8f3f30cfa8d961b3d2707a4af85e211b0b
|
Shell
|
qiujianben/csi
|
/install.sh
|
UTF-8
| 5,045
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash --login
date -u +%Y-%m-%d_%H.%M.%S
debug=$2
csi_deploy_type=$1
os=$(uname -s)
# TODO: Check that all configs exist
# MAKE .EXAMPLE and actual file the same
# if they're the same size prompt user to
# configure
# TODO: install ansible in this script if not installed
# to take advantage of encrypted configs early on
function usage() {
echo $"Usage: $0 <aws|ruby-gem|virtualbox|virtualbox-gui|vmware-fusion|vmware-fusion-gui|vmware-workstation|vmware-workstation-gui|vsphere>"
date -u +%Y-%m-%d_%H.%M.%S
exit 1
}
if [[ $# != 1 ]] && [[ $# != 2 ]]; then
usage
fi
vagrant plugin install vagrant-reload
# cd /csi && cat ./vagrant_rsync_userland_configs.lst | while read userland_config; do
# if [[ `basename ${userland_config}` == 'vagrant.yaml' && ! -e $userland_config ]]; then
# echo "USERLAND YAML: ${userland_config} NOT FOUND...Copying DEFAULTS from ${userland_config}.EXAMPLE. Be sure to change default passwords!"
# cp $userland_config.EXAMPLE $userland_config
# fi
# done
case $csi_deploy_type in
"aws")
export CSI_PROVIDER="aws"
if [[ -e "./etc/userland/aws/vagrant.yaml" ]]; then
vagrant plugin install vagrant-aws
vagrant plugin install vagrant-aws-dns
vagrant box add dummy https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box --force
if [[ $debug == '' ]]; then
vagrant up --provider=aws
else
vagrant up --provider=aws --debug
fi
else
echo "ERROR: Missing vagrant.yaml Config"
echo "Use ./etc/userland/aws/vagrant.yaml.EXAMPLE as a Template to Create ./etc/userland/aws/vagrant.yaml"
fi
;;
"kvm")
# TODO: Coming soon
echo "Coming soon..."
;;
"ruby-gem")
export CSI_PROVIDER="ruby-gem"
./packer/provisioners/rvm.sh
./packer/provisioners/ruby.sh
./packer/provisioners/csi.sh
;;
"virtualbox"|"virtualbox-gui")
if [[ -e "./etc/userland/virtualbox/vagrant.yaml" ]]; then
export CSI_PROVIDER="virtualbox"
if [[ $csi_deploy_type == "virtualbox-gui" ]]; then
export VAGRANT_GUI="true"
fi
if [[ $debug == '' ]]; then
vagrant up --provider=virtualbox
else
vagrant up --provider=virtualbox --debug
fi
else
echo "ERROR: Missing vagrant.yaml Config"
echo "Use ./etc/userland/virtualbox/vagrant.yaml.EXAMPLE as a Template to Create ./etc/userland/virtualbox/vagrant.yaml"
fi
;;
"vmware-fusion"|"vmware-fusion-gui"|"vmware-workstation"|"vmware-workstation-gui")
if [[ -e "./etc/userland/vmware/vagrant.yaml" ]]; then
export CSI_PROVIDER="vmware"
license_file=$(ruby -e "require 'yaml'; print YAML.load_file('./etc/userland/vmware/vagrant.yaml')['vagrant_vmware_license']")
vagrant plugin install vagrant-vmware-desktop
vagrant plugin license vagrant-vmware-desktop $license_file
case $csi_deploy_type in
"vmware-fusion"|"vmware-fusion-gui")
if [[ $csi_deploy_type == "vmware-fusion-gui" ]]; then
export VAGRANT_GUI="true"
fi
if [[ $debug == '' ]]; then
vagrant up --provider=vmware_fusion
else
vagrant up --provider=vmware_fusion --debug
fi
;;
"vmware-workstation"|"vmware-workstation-gui")
if [[ $csi_deploy_type == "vmware-workstation-gui" ]]; then
export VAGRANT_GUI="true"
fi
if [[ $debug == '' ]]; then
vagrant up --provider=vmware_workstation
else
vagrant up --provider=vmware_workstation --debug
fi
;;
esac
else
echo "ERROR: Missing vagrant.yaml Config"
echo "Use ./etc/userland/vmware/vagrant.yaml.EXAMPLE as a Template to Create ./etc/userland/vmware/vagrant.yaml"
fi
;;
"vsphere")
export CSI_PROVIDER="vmware"
vmx=$(find /csi/.vagrant/machines/default/ -name packer-vmware-iso.vmx | grep vmware)
if [[ -e $vmx ]]; then
vagrant status | grep running
if [[ $? == 0 ]]; then
vagrant halt
fi
vmx_basename=$(basename $vmx)
ova="$HOME/${vmx_basename}.ova"
ovftool $vmx $ova
if [[ $? == 0 ]]; then
echo "vSphere Image: ${ova}"
echo "Ready for deployment."
else
echo "There was an issue with the ovftool command."
echo "Ensure the VM is powered down and ovftool is in your path (i.e. Symlink to /usr/local/bin)"
fi
else
echo "ERROR: VMware VMX file for CSI is missing."
echo "HINTS: Before running ${0} vsphere"
echo "Run one of the following to deploy the local VMX necessary to create the vSphere OVA file:"
echo "${0} vmware-fusion"
echo "${0} vmware-fusion-gui"
echo "${0} vmware-workstation"
echo "${0} vmware-workstation-gui"
echo -e "Implement all of your userland requirements, update your SSH keys (if applicable), and try again.\n"
echo "Good Luck!"
fi
;;
*)
usage
;;
esac
date -u +%Y-%m-%d_%H.%M.%S
| true
|
cf5f28797199d873a07a986f99c509085a5c6a3e
|
Shell
|
appsembler/openedx-azure-devstack
|
/configure-openedx.sh
|
UTF-8
| 2,849
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright (c) Microsoft Corporation. All Rights Reserved.
# Licensed under the MIT license. See LICENSE file on the project webpage for details.
# print commands and arguments as they are executed
set -x
echo "starting open edx devstack provision on pid $$"
date
ps axjf
#############
# Parameters
#############
AZUREUSER=$1
HOMEDIR="/home/$AZUREUSER"
VMNAME=`hostname`
echo "User: $AZUREUSER"
echo "User home dir: $HOMEDIR"
echo "vmname: $VMNAME"
###################
# Common Functions
###################
ensureAzureNetwork()
{
# ensure the host name is resolvable
hostResolveHealthy=1
for i in {1..120}; do
host $VMNAME
if [ $? -eq 0 ]
then
# hostname has been found continue
hostResolveHealthy=0
echo "the host name resolves"
break
fi
sleep 1
done
if [ $hostResolveHealthy -ne 0 ]
then
echo "host name does not resolve, aborting install"
exit 1
fi
# ensure the network works
networkHealthy=1
for i in {1..12}; do
wget -O/dev/null http://bing.com
if [ $? -eq 0 ]
then
# hostname has been found continue
networkHealthy=0
echo "the network is healthy"
break
fi
sleep 10
done
if [ $networkHealthy -ne 0 ]
then
echo "the network is not healthy, aborting install"
ifconfig
ip a
exit 2
fi
}
ensureAzureNetwork
###################################################
# Update Ubuntu and install prereqs
###################################################
time sudo apt-get -y update && sudo apt-get -y upgrade
time sudo apt-get install -y build-essential software-properties-common python-software-properties curl git-core libxml2-dev libxslt1-dev libfreetype6-dev python-pip python-apt python-dev libxmlsec1-dev swig
time sudo pip install --upgrade pip
time sudo pip install --upgrade virtualenv
###################################################
# Pin specific version of Open edX (named-release/cypress for now)
###################################################
export OPENEDX_RELEASE='named-release/cypress'
EXTRA_VARS="-e edx_platform_version=$OPENEDX_RELEASE \
-e certs_version=$OPENEDX_RELEASE \
-e forum_version=$OPENEDX_RELEASE \
-e xqueue_version=$OPENEDX_RELEASE \
-e configuration_version=appsembler/azureDeploy \
-e edx_ansible_source_repo=https://github.com/appsembler/configuration \
"
###################################################
# Download configuration repo and start ansible
###################################################
cd /tmp
time git clone https://github.com/appsembler/configuration.git
cd configuration
time git checkout appsembler/azureDeploy
time sudo pip install -r requirements.txt
cd playbooks
sudo ansible-playbook -i localhost, -c local vagrant-devstack.yml $EXTRA_VARS
date
echo "completed open edx devstack provision on pid $$"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.