blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0f895a41c7cb20f1588da6295e4ca4e99d28a1f0
|
Shell
|
adefaria/clearscm
|
/clients/HP/bin/unmount_nfs
|
UTF-8
| 2,469
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/ksh
################################################################################
#
# File: mount_nfs
# RCS: $Header:$
# Description: A script to mount all nfs mounts. Note if the automounter is
# running then this script will first shutdown the automounter.
# This script returns 0 for success or non zero if it was unable
# to umount all nfs mounts. This script must run as root.
# Author: Andrew DeFaria, California Language Labs
# Created: Fri Jun 6 10:31:51 PDT 1997
# Modified:
# Language: Korn Shell
#
# (c) Copyright 2001, Andrew@DeFaria.com, all rights reserved
#
################################################################################
# Set me to command name
me=$(basename $0)
# Set adm_base
adm_base=${adm_base:-$HOME/adm}
# Set adm_fpath
adm_fpath=${adm_fpath:-$adm_base/functions}
# Source functions
. $adm_fpath/common
if is_not_root; then
error "This script must be run as root" 1
fi
integer automount_pid=$(process_is_running automount)
kill_automounter=yes
if [ $automount_pid -ne 0 ]; then
print "Attempting to shutdown the automounter..."
kill -15 $automount_pid
print "Waiting for the automounter to shutdown..."
integer max_tries=5
integer wait_time=10
while [ $max_tries -ne 0 ]; do
sleep 10
automount_pid=$(process_is_running automount)
process_is_running automount
if [ $automount_pid -ne 0 ]; then
print "The automounter ($automount_pid) is still running!"
print "I will wait $max_tries more time\c"
if [ $max_tries -gt 1 ]; then
print -u2 "s\c"
fi
print ". Waiting $wait_time seconds..."
sleep $wait_time
else
break
fi
let max_tries=max_tries-1
done
fi
automount_pid=$(process_is_running automount)
if [ $automount_pid -ne 0 ]; then
print "The automounter has not shutdown! Continuing..."
else
print "The automounter has been shut down successfully"
touch /etc/automounter_was_here
fi
print "\nAttempting to unmount all nfs mounts"
if [ "$OS" = "09" ]; then
/etc/umount -at nfs
else
/usr/sbin/umount -a -F nfs
fi
integer nfs_mounts_left=$(grep -c "nfs" /etc/mnttab)
if [ $nfs_mounts_left -eq 0 ]; then
print "All nfs filesystems have been successfully unmounted!"
exit 0
else
print "There \c"
if [ $nfs_mounts_left -eq 1 ]; then
print "is one filesystem left mounted:\n"
else
print "are $nfs_mounts_left filesystems left mounted:\n"
fi
grep nfs /etc/mnttab
exit 1
fi
| true
|
3f385a2f226f47cc9f8670a7daa64212753d608d
|
Shell
|
xformation/docker-builders
|
/confluence/setup-confluence-db.sh
|
UTF-8
| 702
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "******CREATING CONFLUENCE DATABASE******"
PGPASSWORD=postgres psql -h ${DB_HOST} -p 5432 --username postgres <<- EOSQL
CREATE USER confluence WITH PASSWORD 'confluence';
CREATE DATABASE confluence WITH ENCODING 'UNICODE' LC_COLLATE 'C' LC_CTYPE 'C' \
TEMPLATE template0;
GRANT ALL PRIVILEGES ON DATABASE confluence to confluence;
EOSQL
echo "******CONFLUENCE DATABASE CREATED******"
if [ -r '/data.sql' ]; then
echo "**IMPORTING CONFLUENCE DATABASE BACKUP**"
SERVER=$!; sleep 2
PGPASSWORD=postgres psql -h ${DB_HOST} -p 5432 --username postgres confluence < /data.sql
kill $SERVER; wait $SERVER
echo "**Confluence DATABASE BACKUP IMPORTED***"
fi
| true
|
e9e2d79589d74e86e49e7a936e607a4c23d8599d
|
Shell
|
curusarn/dotfiles
|
/dotfiles_old/bin/last_system_update_apt
|
UTF-8
| 210
| 3.03125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# prints DAYS since last system update
# pacman log
log='/var/log/pacman.log'
secs_then=`stat /var/lib/apt/lists --printf="%Y"`
secs_now=`date +%s`
echo $(( ($secs_now - $secs_then) / 86400))
| true
|
38affe2032bca43c0bfc9830c29228756d60f590
|
Shell
|
songzy12/HackerRank
|
/Shell/Bash/arithmetic_operations.sh
|
UTF-8
| 142
| 2.78125
| 3
|
[] |
no_license
|
# https://www.hackerrank.com/challenges/bash-tutorials---arithmetic-operations/problem
read exp
res=$(echo $exp | bc -l)
printf "%.3f" "$res"
| true
|
b45c79cbe4f0a56df31a48331feeb88edca696b9
|
Shell
|
thecodingmachine/docker-images-php
|
/tests-suite/variant-apache.sh
|
UTF-8
| 3,402
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
. ./config
if [[ $VARIANT != apache* ]]; then
echo "-- There is not an 'apache' variant"
return 0;
fi;
############################################################
## Run apache and try to retrieve var content
############################################################
test_displayVarInPhp() {
RESULT="$(curl -sq http://localhost:${DOCKER1_PORT}/apache/ 2>&1)"
assert_equals "foo" "$RESULT" "MYVAR was not populate onto php"
}
############################################################
## Run apache with relative document root
############################################################
test_documentRootRelative() {
RESULT="$(curl -sq http://localhost:${DOCKER2_PORT}/ 2>&1)"
assert_equals "foo" "$RESULT" "Apache document root (relative) does not work properly"
}
############################################################
## Run apache with absolute document root
############################################################
test_documentRootAbsolute() {
RESULT="$(curl -sq http://localhost:${DOCKER3_PORT}/ 2>&1)"
assert_equals "foo" "$RESULT" "Apache document root (absolute) does not work properly"
}
############################################################
## Run apache HtAccess
############################################################
test_htaccessRewrite() {
RESULT="$(curl -sq http://localhost:${DOCKER1_PORT}/apache/htaccess/ 2>&1)"
assert_equals "foo" "$RESULT" "Apache HtAccess RewriteRule was not applied"
}
############################################################
## Test PHP_INI_... variables are correctly handled by apache
############################################################
test_changeMemoryLimit() {
RESULT="$(curl -sq http://localhost:${DOCKER1_PORT}/apache/echo_memory_limit.php 2>&1 )"
assert_equals "2G" "$RESULT" "Apache PHP_INI_MEMORY_LIMIT was not applied"
}
setup_suite() {
# SETUP apache1
export DOCKER1_PORT="$(unused_port)"
export DOCKER1_NAME="test-apache1-${DOCKER1_PORT}"
docker run --name "${DOCKER1_NAME}" ${RUN_OPTIONS} --rm -e MYVAR=foo -e PHP_INI_MEMORY_LIMIT=2G -p "${DOCKER1_PORT}:80" -d -v "${SCRIPT_DIR}/assets/":/var/www/html \
"${REPO}:${TAG_PREFIX}${PHP_VERSION}-${BRANCH}-slim-${BRANCH_VARIANT}" > /dev/null
assert_equals "0" "$?" "Docker run failed"
# SETUP apache2
export DOCKER2_PORT="$(unused_port)"
export DOCKER2_NAME="test-apache2-${DOCKER2_PORT}"
docker run --name "${DOCKER2_NAME}" ${RUN_OPTIONS} --rm -e MYVAR=foo -e APACHE_DOCUMENT_ROOT=apache -p "${DOCKER2_PORT}:80" -d -v "${SCRIPT_DIR}/assets/":/var/www/html \
"${REPO}:${TAG_PREFIX}${PHP_VERSION}-${BRANCH}-slim-${BRANCH_VARIANT}" > /dev/null
assert_equals "0" "$?" "Docker run failed"
# SETUP apache3
export DOCKER3_PORT="$(unused_port)"
export DOCKER3_NAME="test-apache3-${DOCKER3_PORT}"
docker run --name "${DOCKER3_NAME}" ${RUN_OPTIONS} --rm -e MYVAR=foo -e APACHE_DOCUMENT_ROOT=/var/www/foo/apache -p "${DOCKER3_PORT}:80" -d -v "${SCRIPT_DIR}/assets/":/var/www/foo \
"${REPO}:${TAG_PREFIX}${PHP_VERSION}-${BRANCH}-slim-${BRANCH_VARIANT}" > /dev/null
assert_equals "0" "$?" "Docker run failed"
# Let's wait for Apache to start
waitfor http://localhost:${DOCKER1_PORT}
waitfor http://localhost:${DOCKER2_PORT}
waitfor http://localhost:${DOCKER3_PORT}
}
teardown_suite() {
docker stop "${DOCKER1_NAME}" "${DOCKER2_NAME}" "${DOCKER3_NAME}" > /dev/null 2>&1
}
| true
|
10ec2d34bb328feb8f29657e93119685c3eb3bda
|
Shell
|
larsll/deepracer-for-cloud
|
/scripts/training/start.sh
|
UTF-8
| 5,472
| 3.765625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
source $DR_DIR/bin/scripts_wrapper.sh
usage(){
echo "Usage: $0 [-w] [-q | -s | -r [n] | -a ] [-v]"
echo " -w Wipes the target AWS DeepRacer model structure before upload."
echo " -q Do not output / follow a log when starting."
echo " -a Follow all Sagemaker and Robomaker logs."
echo " -s Follow Sagemaker logs (default)."
echo " -v Updates the viewer webpage."
echo " -r [n] Follow Robomaker logs for worker n (default worker 0 / replica 1)."
exit 1
}
trap ctrl_c INT
function ctrl_c() {
echo "Requested to stop."
exit 1
}
OPT_DISPLAY="SAGEMAKER"
while getopts ":whqsavr:" opt; do
case $opt in
w) OPT_WIPE="WIPE"
;;
q) OPT_QUIET="QUIET"
;;
s) OPT_DISPLAY="SAGEMAKER"
;;
a) OPT_DISPLAY="ALL"
;;
r) # Check if value is in numeric format.
OPT_DISPLAY="ROBOMAKER"
if [[ $OPTARG =~ ^[0-9]+$ ]]; then
OPT_ROBOMAKER=$OPTARG
else
OPT_ROBOMAKER=0
((OPTIND--))
fi
;;
v) OPT_VIEWER="VIEWER"
;;
h) usage
;;
\?) echo "Invalid option -$OPTARG" >&2
usage
;;
esac
done
# Ensure Sagemaker's folder is there
if [ ! -d /tmp/sagemaker ]; then
sudo mkdir -p /tmp/sagemaker
sudo chmod -R g+w /tmp/sagemaker
fi
#Check if files are available
S3_PATH="s3://$DR_LOCAL_S3_BUCKET/$DR_LOCAL_S3_MODEL_PREFIX"
S3_FILES=$(aws ${DR_LOCAL_PROFILE_ENDPOINT_URL} s3 ls ${S3_PATH} | wc -l)
if [[ "$S3_FILES" -gt 0 ]];
then
if [[ -z $OPT_WIPE ]];
then
echo "Selected path $S3_PATH exists. Delete it, or use -w option. Exiting."
exit 1
else
echo "Wiping path $S3_PATH."
aws ${DR_LOCAL_PROFILE_ENDPOINT_URL} s3 rm --recursive ${S3_PATH}
fi
fi
# Base compose file
if [ ${DR_ROBOMAKER_MOUNT_LOGS,,} = "true" ];
then
COMPOSE_FILES="$DR_TRAIN_COMPOSE_FILE $DR_DOCKER_FILE_SEP $DR_DIR/docker/docker-compose-mount.yml"
export DR_MOUNT_DIR="$DR_DIR/data/logs/robomaker/$DR_LOCAL_S3_MODEL_PREFIX"
mkdir -p $DR_MOUNT_DIR
else
COMPOSE_FILES="$DR_TRAIN_COMPOSE_FILE"
fi
# set evaluation specific environment variables
STACK_NAME="deepracer-$DR_RUN_ID"
export DR_CURRENT_PARAMS_FILE=${DR_LOCAL_S3_TRAINING_PARAMS_FILE}
WORKER_CONFIG=$(python3 $DR_DIR/scripts/training/prepare-config.py)
if [ "$DR_WORKERS" -gt 1 ]; then
echo "Starting $DR_WORKERS workers"
if [[ "${DR_DOCKER_STYLE,,}" != "swarm" ]];
then
mkdir -p $DR_DIR/tmp/comms.$DR_RUN_ID
rm -rf $DR_DIR/tmp/comms.$DR_RUN_ID/*
COMPOSE_FILES="$COMPOSE_FILES $DR_DOCKER_FILE_SEP $DR_DIR/docker/docker-compose-robomaker-multi.yml"
fi
if [ "$DR_TRAIN_MULTI_CONFIG" == "True" ]; then
export MULTI_CONFIG=$WORKER_CONFIG
echo "Multi-config training, creating multiple Robomaker configurations in $S3_PATH"
else
echo "Creating Robomaker configuration in $S3_PATH/$DR_LOCAL_S3_TRAINING_PARAMS_FILE"
fi
export ROBOMAKER_COMMAND="./run.sh multi distributed_training.launch"
else
export ROBOMAKER_COMMAND="./run.sh run distributed_training.launch"
echo "Creating Robomaker configuration in $S3_PATH/$DR_LOCAL_S3_TRAINING_PARAMS_FILE"
fi
# Check if we are using Host X -- ensure variables are populated
if [[ "${DR_HOST_X,,}" == "true" ]];
then
if [[ -n "$DR_DISPLAY" ]]; then
ROBO_DISPLAY=$DR_DISPLAY
else
ROBO_DISPLAY=$DISPLAY
fi
if ! DISPLAY=$ROBO_DISPLAY timeout 1s xset q &>/dev/null; then
echo "No X Server running on display $ROBO_DISPLAY. Exiting"
exit 0
fi
if [[ -z "$XAUTHORITY" ]]; then
export XAUTHORITY=~/.Xauthority
if [[ ! -f "$XAUTHORITY" ]]; then
echo "No XAUTHORITY defined. .Xauthority does not exist. Stopping."
exit 0
fi
fi
fi
# Check if we will use Docker Swarm or Docker Compose
if [[ "${DR_DOCKER_STYLE,,}" == "swarm" ]];
then
ROBOMAKER_NODES=$(docker node ls --format '{{.ID}}' | xargs docker inspect | jq '.[] | select (.Spec.Labels.Robomaker == "true") | .ID' | wc -l)
if [[ "$ROBOMAKER_NODES" -eq 0 ]];
then
echo "ERROR: No Swarm Nodes labelled for placement of Robomaker. Please add Robomaker node."
echo " Example: docker node update --label-add Robomaker=true $(docker node inspect self | jq .[0].ID -r)"
exit 0
fi
SAGEMAKER_NODES=$(docker node ls --format '{{.ID}}' | xargs docker inspect | jq '.[] | select (.Spec.Labels.Sagemaker == "true") | .ID' | wc -l)
if [[ "$SAGEMAKER_NODES" -eq 0 ]];
then
echo "ERROR: No Swarm Nodes labelled for placement of Sagemaker. Please add Sagemaker node."
echo " Example: docker node update --label-add Sagemaker=true $(docker node inspect self | jq .[0].ID -r)"
exit 0
fi
DISPLAY=$ROBO_DISPLAY docker stack deploy $COMPOSE_FILES $STACK_NAME
else
DISPLAY=$ROBO_DISPLAY docker-compose $COMPOSE_FILES -p $STACK_NAME --log-level ERROR up -d --scale robomaker=$DR_WORKERS
fi
# Viewer
if [ -n "$OPT_VIEWER" ]; then
(sleep 5; dr-update-viewer)
fi
# Request to be quiet. Quitting here.
if [ -n "$OPT_QUIET" ]; then
exit 0
fi
# Trigger requested log-file
if [[ "${OPT_DISPLAY,,}" == "all" && -n "${DISPLAY}" && "${DR_HOST_X,,}" == "true" ]]; then
dr-logs-sagemaker -w 15
if [ "${DR_WORKERS}" -gt 1 ]; then
for i in $(seq 1 ${DR_WORKERS})
do
dr-logs-robomaker -w 15 -n $i
done
else
dr-logs-robomaker -w 15
fi
elif [[ "${OPT_DISPLAY,,}" == "robomaker" ]]; then
dr-logs-robomaker -w 15 -n $OPT_ROBOMAKER
elif [[ "${OPT_DISPLAY,,}" == "sagemaker" ]]; then
dr-logs-sagemaker -w 15
fi
| true
|
c0855b773776799dffbad8408492dc682eefbf0f
|
Shell
|
prydzynski/OpenVAS-Automation
|
/init_vas.sh
|
UTF-8
| 414
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
clear
echo "--------------------------------"
echo "--------Input File Name---------"
read -p "Please enter the file path and name [/path/to/file.csv]: " in_file
while read -u10 line
do
scanner=$(echo $line | awk -F',' '{ print $1 }')
echo "Initializing $scanner"
ssh -i /root/Documents/openvas/.slavekey.priv root@$scanner "
./vas.init &>/dev/null &
disown
"
done 10<"$in_file"
echo "Back"
| true
|
59925d50fa296ef81acb19a8a353cf22755ab009
|
Shell
|
ioppermann/munin-contrib
|
/plugins/network/ethtool_
|
UTF-8
| 1,847
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
# -*- sh -*-
#
# Plugin to monitor unusual activity/errors from ethernet driver/hardware.
#
# Parameters:
#
# config (required)
# autoconf (optional - only used by munin-config)
#
# Environment variables (optional):
#
# IGNORED_FIELDS: comma-separated list of fields (or field endings) to ignore
# WARN: regex defining fields which trigger 'warning' on non-zero values
# CRITICAL: regex defining fields which trigger 'critical' on non-zero values
#
# Author: Ted Dumitrescu (ted@mixpanel.com, webdev@cmme.org)
#
# Magic markers (optional - used by munin-config and some installation
# scripts):
#%# family=auto
#%# capabilities=autoconf
INTERFACE=${0##*ethtool_}
ETHTOOL="ethtool -S $INTERFACE"
if [ -z $IGNORED_FIELDS ]; then
IGNORED_FIELDS='packets,bytes,broadcast,multicast,long_byte_count,offload_good,tcp_seg_good,smbus'
fi
if [ -z $WARN ]; then
WARN='tx_.*restart.*'
fi
if [ -z $CRITICAL ]; then
CRITICAL='(rx_no_buffer_count|rx_missed_errors|rx_queue_.+_drops)'
fi
TO_REMOVE=`echo $IGNORED_FIELDS | sed 's/,/\\\|/g'`
STRIP_OUTPUT="1d; s/^[ \t]*//; /\($TO_REMOVE\):/d"
if [ "$1" = "autoconf" ]; then
$ETHTOOL 2>/dev/null >/dev/null
if [ $? -ne 0 ]; then
echo no
exit 1
else
echo yes
exit 0
fi
fi
if [ "$1" = "config" ]; then
echo "graph_title Ethtool $INTERFACE"
echo 'graph_args -l 0 --base 1000'
echo 'graph_category network'
echo 'graph_period second'
echo 'graph_info Unusual network hardware activity from ethtool'
$ETHTOOL | sed "$STRIP_OUTPUT" | awk -F: -v WARN=$WARN -v CRITICAL=$CRITICAL '
{
printf("%s.label %s\n%s.type DERIVE\n%s.min 0\n", $1, $1, $1, $1);
if ($1 ~ "^"WARN) {
printf("%s.warning 0:0\n", $1);
} else if ($1 ~ "^"CRITICAL) {
printf("%s.critical 0:0\n", $1);
}
}'
exit 0
fi
$ETHTOOL | sed "$STRIP_OUTPUT; s/:/.value/"
| true
|
0e1ebbc29c6b046f63e2646f39117786d1927c38
|
Shell
|
Rong419/ConstantDistanceOperator
|
/optimization/other_three_datasets/template/write_xml.sh
|
UTF-8
| 301
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
for data in {anolis,Shankarappa,RSV2}
do
for param in {1..2}
do
for sim in {1..30}
do
sed "s/FILE/${param}_${sim}/g" ./${data}_${param}.xml > ./xml/${data}_${param}_${sim}.xml
echo "use ${data}_${param}.xml to write ./${data}_${param}_${sim}.xml"
done
done
done
| true
|
c5426402fb30f54eb285a062e1302b4330d46ccf
|
Shell
|
plesager/ece3-postproc
|
/reproducibility/collect_ens.sh
|
UTF-8
| 3,797
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Purpose
# =======
# Extract the Reichler & Kim performance indices for a set of variables and
# from all members of an ensemble.
# The result is one file per variable, format expected by the R scripts.
#
# Option to tar the results with the 2x2 climatology of all runs
# Author: Martin Ménégoz
# Updated by Francois Massonnet (last update: November 2017)
#
# P. Le Sager (Jan 2018) - added to and adapted for the ece3-postproc tools suite package
set -eu
usage()
{
echo
echo "Usage: ${0##*/} [-t] STEM NB_MEMBER YEAR_START YEAR_END"
echo
echo " STEM : 3-letters STEM name of the ENSEMBLE experiments (\${STEM}1, \${STEM}2, ...)"
echo " NB_MEMBER : number of members (maximum 9)"
echo " YEAR_START : start year of the run"
echo " YEAR_END : end year of the run"
echo
echo "Options are:"
echo " -t : tar (in your \$SCRATCH) the PI index results, with the 2x2 climatology, for easy sharing."
}
do_tar=0
while getopts "h?t" opt; do
case "$opt" in
h|\?)
usage
exit 0
;;
t)
do_tar=1
;;
*)
usage
exit 1
esac
done
shift $((OPTIND-1))
# --- Check and store args
if [ $# -ne 4 ]
then
echo; echo "*EE* not enough arguments !!"; echo
usage
exit 1
fi
if [[ ! $1 =~ ^[a-Z_0-9]{3}$ ]]
then
echo; echo "*EE* argument STEM name (=$1) should be a 3-character string"; echo
usage
exit 1
fi
if [[ ! $2 =~ ^[1-9]$ ]]
then
echo ;echo "*EE* argument NB_MEMBER (=$2) should be between 1 and 9"; echo
usage
exit 1
fi
if [[ ! $3 =~ ^[0-9]{4}$ ]]
then
echo ;echo "*EE* argument YEAR_START (=$3) should be a 4-digit integer"; echo
usage
exit 1
fi
if [[ ! $4 =~ ^[0-9]{4}$ ]]
then
echo; echo "*EE* argument YEAR_END (=$4) should be a 4-digit integer"; echo
usage
exit 1
fi
root=$1
nb=$2
year1=$3
year2=$4
# --- Get location templates for the tables to parse, climatology, and output generated here
. $ECE3_POSTPROC_TOPDIR/conf/$ECE3_POSTPROC_MACHINE/conf_ecmean_${ECE3_POSTPROC_MACHINE}.sh
STEMID=$root # to be eval'd
REPRODIR=$(eval echo ${ECE3_POSTPROC_PI4REPRO}) # ensemble table dir
mkdir -p $REPRODIR
# --- Location of Tables and 2x2 climatologies from EC-mean
eval_loc() {
# eval the general path definitions found in conf_ecmean_${ECE3_POSTPROC_MACHINE}.sh
! (( $# == 1 )) && echo "*EE* eval_loc requires EXP argument" && exit 1
local EXPID=$1
TABLEXP=$(eval echo ${ECE3_POSTPROC_DIAGDIR})/table/$1
[[ -z "${CLIMDIR0:-}" ]] \
&& CLIMDIR=$(eval echo ${ECE3_POSTPROC_POSTDIR})/clim-${year1}-${year2} \
|| CLIMDIR=$(eval $CLIMDIR0)
}
# --- Extract PIs into one file per variable
var2d="t2m msl qnet tp ewss nsss SST SSS SICE T U V Q"
for var in ${var2d}
do
for k in $(eval echo {1..$nb})
do
eval_loc ${root}${k}
cat ${TABLEXP}/PI2_RK08_${root}${k}_${year1}_${year2}.txt | grep "^${var} " | \
tail -1 | \
awk {'print $2'} >> $REPRODIR/${root}_${year1}_${year2}_${var}.txt
done
done
# --- Archive everything needed for comparison with another ensemble
if (( do_tar ))
then
arch=$SCRATCH/reprod-${root}-${year1}-${year2}.tar
cd $REPRODIR/..
tar -cvf $arch ${root}
for k in $(eval echo {1..$nb})
do
eval_loc ${root}${k}
cd $CLIMDIR/../../..
# expected path, which limits what climdir can be in EC-Mean
totar=${root}${k}/post/clim-${year1}-${year2}
if [[ -d $totar ]]
then
tar --append -vf $arch ${root}${k}/post/clim-${year1}-${year2}
else
echo "*EE* 2x2 EC-mean climatology $totar is missing!!"
fi
done
gzip $arch
fi
| true
|
12a410f95e46a644e70e00cb9b444fbfb1255c7c
|
Shell
|
CHENHERNGSHYUE/shell
|
/0219/awk_test/calculate.sh
|
UTF-8
| 270
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
add(){
echo $1 $2|awk '{print $1+$2}'
}
minus(){
echo $1 $2|awk '{if($1>$2){print $1-$2}else{print $2-$1}}'
}
select i in "add" "minus"
do
case $i in
add)
add $1 $2
exit
;;
minus)
minus $1 $2
exit
;;
*)
echo no idea
;;
esac
done
| true
|
fd9433510858860f673f054552ee6f1c24766581
|
Shell
|
scallacs/fstricks
|
/resources/scripts/archive_logs.sh
|
UTF-8
| 409
| 3.046875
| 3
|
[] |
no_license
|
BACKUP_FOLDER="~/backup/fstricks/logs"
#COMPRESSOR_BIN = "C:\Program Files\WinRar\rar.exe"
COMPRESSOR_BIN="tar"
COMPRESSOR_FLAGS="cfzv ${BACKUP_FOLDER}/$(date +%Y-%m-%d).tar.gz"
LOGS_FOLDER="/var/www/fstricks/logs"
echo "Creating backup folder: $BACKUP_FOLDER"
# Database backup
mkdir -p $BACKUP_FOLDER
${COMPRESSOR_BIN} ${COMPRESSOR_FLAGS} ${LOGS_FOLDER}
rm -rf ${LOGS_FOLDER}/*
echo "Logs archiving done!"
| true
|
28eb9c1481a81e8efd2f5e9f1b1ea726ad3b8a23
|
Shell
|
wizzdev-pl/esp32_examples
|
/scripts/upload_all.sh
|
UTF-8
| 1,396
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
USERDIR=$(pwd)
BASEDIR=$(dirname "$0")
cd "$BASEDIR"
cd ./../ # In MicroPython dir
if [[ "$VIRTUAL_ENV" != "" ]]; then # TODO: Better solution to check if is in venv
INVENV=1
else
INVENV=0
fi
if test "$INVENV" ; then
echo "Already in venv"
else
if source ./venv/bin/activate; then
echo "Entered virtual env"
else
echo "Virtual env not found. Use setup.sh script or create new environment manually!"
exit 2
fi
fi
if python3 scripts/create_default_config.py; then
echo "Created new config file at MicroPython/src/config.json"
echo "Fields that need to be filled for minimum viable configuration are: ssid, password ..."
echo "...for use with local MQTT broker: local_endpoint, client_id, topic"
echo "...for use with AWS: aws_endpoint, client_id, topic, use_aws"
echo "Fill config with proper data and rerun 'uplaod_all.sh' script"
exit 0
fi
echo "Uploading MicroPython..."
scripts/upload_micropython.sh
sleep 1 # Wait for ESP32 serial to reset
echo "Uploading Script files..."
scripts/upload_scripts.sh
if test -f "cert/cacert.pem" && test -f "cert/priv.key" && test -f "cert/cert.crt"; then
echo "Uploading Certificates..."
scripts/upload_certs.sh
else
echo "No certificates fount in ./cert/"
echo "No certificates will be uploaded"
fi
esptool.py --chip esp32 --before default_reset run
cd "$USERDIR"
| true
|
981b2da4e9ed026ce4144dda577aecb55acfcaf4
|
Shell
|
dejayou/dotfiles
|
/home/.bin/rebase_branch
|
UTF-8
| 834
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $1 = '-v' ]]; then
echo Usage: rebase_branch [branch=master]
exit
fi
# MAIN_BRANCH will use the first parameter or default to 'master'
MAIN_BRANCH=${1-master}
BRANCH=`git symbolic-ref HEAD | sed 's#refs/heads/##'`
BRANCH_HAS_DIRTY_FILES=`git diff-index --name-only HEAD --`
if [[ $MAIN_BRANCH = $BRANCH ]]; then
echo "You are already on the $BRANCH branch"
exit
fi
echo "Rebasing branch $BRANCH using $MAIN_BRANCH"
if [[ $BRANCH_HAS_DIRTY_FILES ]]; then
echo "Stashing uncommitted changes"
git stash
fi
echo "Checkout $MAIN_BRANCH"
git checkout $MAIN_BRANCH
echo "Pulling..."
git pull
echo "Re-checking out $BRANCH"
git checkout $BRANCH
echo "All your rebase is belong to Wong"
git rebase $MAIN_BRANCH
if [[ $BRANCH_HAS_DIRTY_FILES ]]; then
echo "Unstashing the changes"
git stash pop
fi
| true
|
de6fb5a8a408c07e21f5609787bbf0d459f06ae5
|
Shell
|
jianguda/repairnator
|
/resources/scripts/check_commit_branch.sh
|
UTF-8
| 1,497
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ "$#" -ne 1 ]; then
echo "Usage: ./check_commit_branch.sh <Github repository>"
exit -1
fi
TMP_FILE_OLDBRANCHES=/tmp/oldbranches_`uuidgen`
TMP_FILE_COUNTER=/tmp/counter_`uuidgen`
if [ ! -d "$1" ]; then
TMP_GIT_DIR=/tmp/clean_repo_`uuidgen`
REPO=$1
mkdir $TMP_GIT_DIR
cd $TMP_GIT_DIR
git clone $REPO $TMP_GIT_DIR
else
TMP_GIT_DIR=$1
cd $TMP_GIT_DIR
fi
if [[ $? != 0 ]]
then
echo "Error while cloning"
exit 1
fi
git for-each-ref --shell --format="branchname=%(refname:strip=3)" refs/remotes | \
if [[ 1 -eq 1 ]]; then
while read entry
do
eval "$entry"
if [ "$branchname" == "master" ]; then
echo "Master branch ignored"
elif [ "$branchname" == "HEAD" ]; then
echo "Head ref ignored"
else
echo "Treating branch $branchname"
git checkout $branchname
NB_COMMIT=`git rev-list --count HEAD`
if [ $NB_COMMIT != "3" ] && [ $NB_COMMIT != "4" ]; then
echo "The number of commit is not 3 or 4 but : $NB_COMMIT"
export OLD_BRANCHES="$OLD_BRANCHES $branchname"
export COUNTER=$((COUNTER+1))
fi
fi
done
echo $COUNTER > $TMP_FILE_COUNTER
echo $OLD_BRANCHES > $TMP_FILE_OLDBRANCHES
fi
COUNTER=$(cat $TMP_FILE_COUNTER)
OLD_BRANCHES=$(cat $TMP_FILE_OLDBRANCHES)
echo "The following $COUNTER branches have a wrong number of commits: $OLD_BRANCHES"
| true
|
dd22bd03d07f453d96da9e7926734a69f54395d4
|
Shell
|
deepcoder42/mysql-db-admin
|
/test/unit/sonarqube_code_coverage.sh
|
UTF-8
| 2,043
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Unit test code coverage for SonarQube to cover all modules.
# This will run the Python code coverage module against all unit test modules.
# This will show the amount of code that was tested and which lines of code
# that was skipped during the test.
coverage erase
echo ""
echo "Running unit test modules in conjunction with coverage"
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/_process_json.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/_process_non_json.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/help_message.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/analyze.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/check.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/checksum.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/detect_dbs.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/listdbs.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/optimize.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/process_request.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/proc_all_dbs.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/proc_all_tbls.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/proc_some_tbls.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/run_check.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/run_checksum.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/run_optimize.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/run_analyze.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/main.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/run_program.py
coverage run -a --source=mysql_db_admin test/unit/mysql_db_admin/status.py
echo ""
echo "Producing code coverage report"
coverage combine
coverage report -m
coverage xml -i
| true
|
af7a3a7b206a382f785ba5c3c9008975e934046f
|
Shell
|
MultiValue-University/xdemo-uv-lin
|
/scripts/mvu_rfs_app_setup.sh
|
UTF-8
| 9,820
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
#################################################################
# This script is created for the purpose of excising the UVRFS
# related demos provided Rocket Software Multi-Value University.
#################################################################
# check the usage
if [ "$1" != "install" -a "$1" != "clean" ]
then
echo "Usage $0 [install | clean ]"
exit 1
fi
# check if UniVerse is installed
if [ ! -f /.uvhome ]
then
echo "It appears that this machine does not have an UniVerse installation."
exit 1
fi
# check if the installation has 'showuv'
#
uvhomedir=`cat /.uvhome`
if [ ! -f $uvhomedir/bin/showuv ]
then
echo "Unable to find the UniVerse bin/showuv."
exit 1
fi
# run showuv to see if 'uvsm' ( the main UVRFS daemon process) is running
uvsm=`$uvhomedir/bin/showuv | grep uvsm | grep -v uvsmm -v`
if [ "$uvsm" = "" ]
then
echo "Please start UV-12.1.1 database and try this script again."
exit 1
fi
# check if the current directory has VOC file
if [ ! -f VOC -o ! -d BP ]
then
echo "Unable to find the VOC file or BP directory under the current directory."
exit 1
fi
#adding $uvhomedir/bin to the current path
export PATH="$PATH:$uvhomedir/bin/"
if [ "$1" = "clean" ]
then
echo "Deleting PA START_2_000_000"
UVdelete VOC START_2_000_000
echo "Deleting PA START_2_050_010"
UVdelete VOC START_2_050_010
echo "Deleting PA START_2_500_010"
UVdelete VOC START_2_500_010
echo "Deleting PA START_4_000_000"
UVdelete VOC START_4_000_000
echo "Deleting PA START_4_050_010"
UVdelete VOC START_4_050_010
echo "Deleting PA START_4_500_010"
UVdelete VOC START_4_500_010
echo "Deleting PA STOPTEST"
UVdelete VOC STOPTEST
echo "Removing BP/TESTBP"
rm -f BP/TESTBP
echo "Removing media_test.sh"
rm -f media_test.sh
if [ -f TEST1 -a -f D_TEST1 ]
then
echo "Deleting TEST1"
$uvhomedir/bin/delete.file TEST1
fi
if [ -f TEST2 -a -f D_TEST2 ]
then
echo "Deleting TEST2"
$uvhomedir/bin/delete.file TEST2
fi
if [ -f TEST3 -a -f D_TEST3 ]
then
echo "Deleting TEST3"
$uvhomedir/bin/delete.file TEST3
fi
if [ -f TEST4 -a -f D_TEST4 ]
then
echo "Deleting TEST4"
$uvhomedir/bin/delete.file TEST4
fi
echo "Cleaned."
exit 0
fi
if [ -f TEST1 -o -f D_TEST1 ]
then
echo "TEST1 exists"
exit 1
fi
if [ -f TEST2 -o -f D_TEST2 ]
then
echo "TEST2 exists"
exit 1
fi
if [ -f TEST3 -o -f D_TEST3 ]
then
echo "TEST3 exists"
exit 1
fi
if [ -f TEST4 -o -f D_TEST4 ]
then
echo "TEST4 exists"
exit 1
fi
echo "Creating the data files..."
$uvhomedir/bin/create.file TEST1 7 200009 2
$uvhomedir/bin/create.file TEST2 7 200009 2
$uvhomedir/bin/create.file TEST3 7 200009 2
$uvhomedir/bin/create.file TEST4 7 200009 2
echo "Extracting the shell script 'media_test.sh' for media-recovery..."
cat <<- EndOfProgram > media_test.sh
#!/bin/sh
uv -admin -stop
#
# Reinitialize the logging system.
#
echo y | uvcntl_install
uv -admin -start
clear.file TEST1
clear.file TEST2
clear.file TEST3
clear.file TEST4
printf "SELECT VOC WITH @ID LIKE 'A...'\nCOPY FROM VOC TO TEST1\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'B...'\nCOPY FROM VOC TO TEST2\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'C...'\nCOPY FROM VOC TO TEST3\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'D...'\nCOPY FROM VOC TO TEST4\nY\n" | uvsh
printf "COUNT TEST1\n" | uvsh
printf "COUNT TEST2\n" | uvsh
printf "COUNT TEST3\n" | uvsh
printf "COUNT TEST4\n" | uvsh
#
# force the checkpoint
#
uvforcecp
#
# force to use the next archive: we will be given the next
# LSN of the archive. This LSN will be assoicated with the backup
#
echo "#################################################"
uvforcearch
echo "#################################################"
sleep 10
# now stop the database for making a cleanup backup
uv -admin -stop
#
# Now, we are ready to make the backup:
#
# make a clean system backup associated with the LSN
# number that uvforcearch reported.
#
if [ -d ../XDEMO.backup ]
then
rm -rf ../XDEMO.backup
fi
cp -rfp ../XDEMO ../XDEMO.backup
#
# bring the system up after the backup
#
uv -admin -start
#
# adding more reocrds to files
#
printf "SELECT VOC WITH @ID LIKE 'O...'\nCOPY FROM VOC TO TEST1\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'P...'\nCOPY FROM VOC TO TEST2\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'Q...'\nCOPY FROM VOC TO TEST3\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'R...'\nCOPY FROM VOC TO TEST4\nY\n" | uvsh
printf "COUNT TEST1\n" | uvsh
printf "COUNT TEST2\n" | uvsh
printf "COUNT TEST3\n" | uvsh
printf "COUNT TEST4\n" | uvsh
delete.file TEST3
delete.file TEST4
#
# recreate TEST3 and TEST4, and add more records into it
#
create.file TEST3 7 200009 2
create.file TEST4 7 200009 2
printf "SELECT VOC WITH @ID LIKE 'E...'\nCOPY FROM VOC TO TEST3\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'T...'\nCOPY FROM VOC TO TEST3\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'D...'\nCOPY FROM VOC TO TEST4\nY\n" | uvsh
printf "SELECT VOC WITH @ID LIKE 'R...'\nCOPY FROM VOC TO TEST4\nY\n" | uvsh
#
# delete some records in TEST1
#
printf "SELECT TEST1 WITH @ID LIKE 'A...'\nDELETE TEST1\nY\n" | uvsh
printf "COUNT TEST1\n" | uvsh
printf "COUNT TEST2\n" | uvsh
printf "COUNT TEST3\n" | uvsh
printf "COUNT TEST4\n" | uvsh
EndOfProgram
echo "Extracting the BASIC programes ..."
cat <<- EndOfProgram > BP/TESTBP
SENT = SENTENCE()
PARAM = CONVERT(' ', @FM, TRIM( SENT, ' ', 'D'))
NFILES = PARAM<4>
UPDPERM = PARAM<5>
DELPERM = PARAM<6>
CRT "NFILES=":NFILES:",UPDPERM=":UPDPERM:",DELPERM=":DELPERM
KEYBASE = 10000000
KEYFROM = 1
KEYMAX = 1000000
COUNT = 0
DIM FP(4)
FOR I=1 TO NFILES
OPEN '','TEST':I TO FP(I) ELSE STOP "Unable to open TEST":I
NEXT
SEED=@USERNO
IF SEED < 0 THEN
SEED = -SEED
END
RANDOMIZE SEED
LOOP
COUNT = COUNT + 1
IF MOD(COUNT, 2048) = 0 THEN
READ BODY FROM FP(1), 'CONTROL' THEN
IF (BODY='STOP') THEN
CRT "STOP request detected, exiting..."
STOP
END
END
END
** select the file
N = RND(NFILES) + 1
F = FP(N)
N = RND(KEYMAX) + KEYFROM
KEY = N
READ BODY FROM F, KEY THEN
IF RND(1000) < UPDPERM THEN
BODY<1> = BODY<1> + 1
WRITE BODY TO F, KEY
END
END ELSE
BODY<1>= 1
BODY<2>= KEY
BODY<3>= STR("*", 32 + MOD(COUNT,128))
WRITE BODY TO F, KEY
END
* IF MOD(COUNT,512)=0 THEN
* SLEEP 1
* END
IF MOD(COUNT, 1000) < DELPERM THEN
DELETE F, KEY
END
REPEAT
END
EndOfProgram
chmod +x media_test.sh
$uvhomedir/bin/nbasic BP TESTBP
echo "Creating PA STOPTEST"
UVwrite VOC STOPTEST \
PA \
"" \
"SH -c 'UVwrite TEST1 CONTROL STOP'"
echo "Creating PA START_2_000_000"
UVwrite VOC START_2_000_000 \
PA \
"" \
"DISPLAY ----------------------" \
"DISPLAY Test parameters:" \
"DISPLAY sessions = 3" \
"" \
"DISPLAY files = 2" \
"DISPLAY update per-mille = 0" \
"DISPLAY delete per-mille = 0" \
"DISPLAY ----------------------" \
"DISPLAY" \
"" \
"SH -c 'UVwrite TEST1 CONTROL 0'" \
"" \
"PHANTOM RUN BP TESTBP 2 0 0" \
"PHANTOM RUN BP TESTBP 2 0 0" \
"PHANTOM RUN BP TESTBP 2 0 0"
echo "Creating PA START_2_050_010"
UVwrite VOC START_2_050_010 \
PA \
"" \
"DISPLAY ----------------------" \
"DISPLAY Test parameters:" \
"DISPLAY sessions = 3" \
"" \
"DISPLAY files = 2" \
"DISPLAY update per-mille = 50" \
"DISPLAY delete per-mille = 10" \
"DISPLAY ----------------------" \
"DISPLAY" \
"" \
"SH -c 'UVwrite TEST1 CONTROL 0'" \
"" \
"PHANTOM RUN BP TESTBP 2 50 10" \
"PHANTOM RUN BP TESTBP 2 50 10" \
"PHANTOM RUN BP TESTBP 2 50 10"
echo "Creating PA START_2_500_010"
UVwrite VOC START_2_500_010 \
PA \
"" \
"DISPLAY ----------------------" \
"DISPLAY Test parameters:" \
"DISPLAY sessions = 3" \
"" \
"DISPLAY files = 2" \
"DISPLAY update per-mille =500" \
"DISPLAY delete per-mille = 10" \
"DISPLAY ----------------------" \
"DISPLAY" \
"" \
"SH -c 'UVwrite TEST1 CONTROL 0'" \
"" \
"PHANTOM RUN BP TESTBP 2 500 10" \
"PHANTOM RUN BP TESTBP 2 500 10" \
"PHANTOM RUN BP TESTBP 2 500 10"
echo "Creating PA START_4_000_000"
UVwrite VOC START_4_000_000 \
PA \
"" \
"DISPLAY ----------------------" \
"DISPLAY Test parameters:" \
"DISPLAY sessions = 3" \
"" \
"DISPLAY files = 4" \
"DISPLAY update per-mille = 0" \
"DISPLAY delete per-mille = 0" \
"DISPLAY ----------------------" \
"DISPLAY" \
"" \
"SH -c 'UVwrite TEST1 CONTROL 0'" \
"" \
"PHANTOM RUN BP TESTBP 4 0 0" \
"PHANTOM RUN BP TESTBP 4 0 0" \
"PHANTOM RUN BP TESTBP 4 0 0"
echo "Creating PA START_4_050_010"
UVwrite VOC START_4_050_010 \
PA \
"" \
"DISPLAY ----------------------" \
"DISPLAY Test parameters:" \
"DISPLAY sessions = 3" \
"" \
"DISPLAY files = 4" \
"DISPLAY update per-mille = 50" \
"DISPLAY delete per-mille = 10" \
"DISPLAY ----------------------" \
"DISPLAY" \
"" \
"SH -c 'UVwrite TEST1 CONTROL 0'" \
"" \
"PHANTOM RUN BP TESTBP 4 50 10" \
"PHANTOM RUN BP TESTBP 4 50 10" \
"PHANTOM RUN BP TESTBP 4 50 10"
echo "Creating PA START_4_500_010"
UVwrite VOC START_4_500_010 \
PA \
"" \
"DISPLAY ----------------------" \
"DISPLAY Test parameters:" \
"DISPLAY sessions = 3" \
"" \
"DISPLAY files = 2" \
"DISPLAY update per-mille =500" \
"DISPLAY delete per-mille = 10" \
"DISPLAY ----------------------" \
"DISPLAY" \
"" \
"SH -c 'UVwrite TEST1 CONTROL 0'" \
"" \
"PHANTOM RUN BP TESTBP 4 500 10" \
"PHANTOM RUN BP TESTBP 4 500 10" \
"PHANTOM RUN BP TESTBP 4 500 10"
echo "Done."
exit 0
| true
|
42e2ed40bda3fe239dd9e366644e4ebd176d0da1
|
Shell
|
isabella232/dsp
|
/dsp-regression-framework/src/main/dsp-docker-images/service/ipp-dsp-service.postinst
|
UTF-8
| 525
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
PAC=ipp-dsp-service
APP_UID=fk-idf-dev
APP_GID=fk-idf-dev
variant=base
CMD="$1"
if [ "$CMD" == "configure" ]; then
chown -R $APP_UID:$APP_GID /usr/share/$PAC
chown -R $APP_UID:$APP_GID /etc/$PAC
chown -R $APP_UID:$APP_GID /var/log/flipkart/$PAC
chmod -R 777 /etc/$PAC
fi
chmod 777 /etc/init.d/dsp-service.sh
source /etc/default/ipp-dsp-service.env
cat /etc/default/ipp-dsp-service.env | sudo tee -a profile
source /etc/profile
echo "*****************************************"
exit 0
| true
|
308dbdca37500a79f2c45bbc577ae882ce3b7422
|
Shell
|
122448281/myproject
|
/house/soufang/year_price/get_price_by_year.sh
|
UTF-8
| 438
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
tmpfile=tmp1.$0
wget "http://fangjia.fang.com/pinggu/ajax/chartajax.aspx?dataType=4&city=%u5317%u4EAC&Class=defaultnew&year=8" -O $tmpfile
sed -i 's/&/\n/g' $tmpfile
sed -i 's/],/]\n/g' $tmpfile
sed -i 's/\[\[/******\n\[/g' $tmpfile
sed -i 's/\]\]/]\n******/g' $tmpfile
cat $tmpfile | sed 's/\[//g' | sed 's/\]//g' |awk -F "," '{if($0 ~ /,/){aa=$1/1000; bb=strftime("%Y/%m/%d",aa); print bb" "$2}else{print $0}}'
rm $tmpfile
| true
|
0dd24c9f9ddad0881517c49cb9e859ffb654a54f
|
Shell
|
so5/JHPCN-DF
|
/tests/BenchmarkTest/comp.sh
|
UTF-8
| 2,499
| 2.890625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
###################################################################################
#
# JHPCN-DF : Data compression library based on
# Jointed Hierarchical Precision Compression Number Data Format
#
# Copyright (c) 2014-2015 Advanced Institute for Computational Science, RIKEN.
# All rights reserved.
#
###################################################################################
GCC=g++
GCC_OPT="-g --std=c++11 -O3 -fopenmp -masm=intel"
ICPC=icpc
ICPC_OPT="-g --std=c++11 -O3 -ip -fno-alias -unroll=0 -qopt-report=5 -openmp -masm=intel"
FCC=FCCpx
FCC_OPT="-g -Xg -O3 -Kfast -Kocl -Krestp=arg -Koptmsg=2 -Nsrc -Nsta"
# for Ivy bridge (FOCUS D/E system)
#GCC_OPT="${GCC_OPT} -march=core-avx-i"
#ICPC_OPT="${ICPC_OPT} -xCORE-AVX-I"
# for Sandy bridge
#GCC_OPT="${GCC_OPT} -march=corei7-avx"
#ICPC_OPT="${ICPC_OPT} -xAVX"
# for Westmere (FOCUS A/C system)
GCC_OPT="${GCC_OPT} -march=corei7"
ICPC_OPT="${ICPC_OPT} -xSSE4.2"
#PGCC=pgc++
#PGCC_OPT="-g --c++0x -fastsse -O3 -Mipa=fast,inline -Msmartalloc -mp -Msafeptr"
## FOCUSでPGIコンパイラ+C++11のコードがまともにコンパイルできないのでPGIの評価は中断
## ヘッダファイルを明示的にgcc 4.8のものを使うように指定すれば対応できそうだが・・・
echo "Benchmark program compile start at `date`" |tee compile.log
#echo "${FCC} --version" |tee -a compile.log
#${FCC} -Xg --version 2>&1 |tee -a compile.log
#echo "Compiler option for Intel: ${FCC_OPT}" |tee -a compile.log
echo "${GCC} --version" |tee -a compile.log
${GCC} --version 2>&1 |tee -a compile.log
echo "Compiler option for g++: ${GCC_OPT}" |tee -a compile.log
echo "${ICPC} --version" |tee -a compile.log
${ICPC} --version 2>&1 |tee -a compile.log
echo "Compiler option for Intel: ${ICPC_OPT}" |tee -a compile.log
for REAL_TYPE in float double
do
for BM in XOR AND ZeroPadding ZeroPadding2
do
#${FCC} ${FCC_OPT} -DREAL_TYPE=${REAL_TYPE} -I ../../src -I./ ${BM}Benchmark.cpp -o a.out_FX10_${REAL_TYPE}_${BM} 2>&1 |tee -a compile.log
${GCC} ${GCC_OPT} -DREAL_TYPE=${REAL_TYPE} -I ../../src -I./ ${BM}Benchmark.cpp -o a.out_GNU_${REAL_TYPE}_${BM} 2>&1 |tee -a compile.log
${ICPC} ${ICPC_OPT} -DREAL_TYPE=${REAL_TYPE} -I ../../src -I./ ${BM}Benchmark.cpp -o a.out_INTEL_${REAL_TYPE}_${BM} 2>&1 |tee -a compile.log
#${PGCC} ${PGCC_OPT} -DREAL_TYPE=${REAL_TYPE} -I ../../src -I./ ${BM}Benchmark.cpp -o a.out_PGI_${REAL_TYPE}_${BM}
done
done
| true
|
eaa93986385e67936c6fab3a4b3c4e56c7e02ab7
|
Shell
|
hnakamur/postgresql-pgpool2-failover-example-playbook
|
/roles/postgresql_db/templates/recovery_1st_stage.j2
|
UTF-8
| 1,839
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# recovery_1st_stage
# NOTE: This script must be placed in the PGDATA directory and the filename must
# be the same as the recovery_1st_stage_command in pgpool.conf
# NOTE: This script is executed on the primary server by the postgres user.
# http://www.pgpool.net/docs/latest/pgpool-ja.html#online_recovery_in_stream_mode
set -eu
echo start args=$* UID=$UID | logger -t recovery_1st_stage
PGDATA=$1
REMOTE_HOST=$2
REMOTE_PGDATA=$3
PORT=$4
# NOTE: We assume the port number of PostgreSQL is same across servers.
case $REMOTE_HOST in
{{ containers[0].addr_cidr | ipaddr('address') }})
primary_host={{ containers[1].addr_cidr | ipaddr('address') }}
;;
{{ containers[1].addr_cidr | ipaddr('address') }})
primary_host={{ containers[0].addr_cidr | ipaddr('address') }}
;;
*)
echo exit because of invalid REMOTE_HOST=$REMOTE_HOST | logger -t recovery_1st_stage
exit 1
;;
esac
ARCHIVE_DIR={{ postgresql_archive_dir }}
ssh_cmd="/bin/ssh -T postgres@$REMOTE_HOST"
$ssh_cmd "
mv $REMOTE_PGDATA $REMOTE_PGDATA.`date +%Y-%m-%d-%H-%M-%S`
{{ postgresql_bin_dir }}/pg_basebackup -h $primary_host -U {{ postgresql_replication_user }} -D $REMOTE_PGDATA -x -c fast
"
echo pg_basebackup done. | logger -t recovery_1st_stage
$ssh_cmd "
rm -f $REMOTE_PGDATA/{{ pgpool2_promote_trigger_filename }}
rm -f $REMOTE_PGDATA/recovery.done
rm -rf $ARCHIVE_DIR/*
mkdir -p $REMOTE_PGDATA/pg_xlog/archive_status
"
echo created archive_status. | logger -t recovery_1st_stage
$ssh_cmd "
cd $REMOTE_PGDATA
cat > recovery.conf << EOT
standby_mode = 'on'
primary_conninfo = 'host=$primary_host port=$PORT user={{ postgresql_replication_user }}'
restore_command = 'scp $primary_host:$ARCHIVE_DIR/%f %p'
trigger_file = '$REMOTE_PGDATA/{{ pgpool2_promote_trigger_filename }}'
EOT
"
echo created recovery.conf. | logger -t recovery_1st_stage
| true
|
4091236285a64cd52e4d13dc85bf46817bde2b1f
|
Shell
|
zzzdeb/dotfiles
|
/scripts/tools/mobileasmonitor
|
UTF-8
| 234
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$#" -ne 1 ]; then
echo "usage: mobileasmonitor MOBILENAME"
exit 1
fi
VIRT=VIRTUAL1
PRIM=$(xrandr | grep primary | awk '{print $1}')
xrandr --output $VIRT --mode "$1" --right-of $PRIM
monitor_vnc $VIRT
| true
|
7b2f31966eb11fc8d7655b198cddefb7954655cb
|
Shell
|
jmbarberan/heroku-buildpack-nginx-php-phalcon
|
/support/build/redis
|
UTF-8
| 625
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Build Path: /app/.heroku/redis/
OUT_PREFIX=$1
# fail hard
set -o pipefail
# fail harder
set -eux
DEFAULT_VERSION="3.0.3"
dep_version=${VERSION:-$DEFAULT_VERSION}
dep_dirname=redis-${dep_version}
dep_archive_name=${dep_dirname}.tar.gz
dep_url=http://download.redis.io/releases/${dep_archive_name}
echo "-----> Building Redis ${dep_version}..."
curl -L ${dep_url} | tar xz
pushd ${dep_dirname}
make -s -j 9
rm -rf ${OUT_PREFIX}/*
cp src/redis-server ${OUT_PREFIX}
cp src/redis-benchmark ${OUT_PREFIX}
cp src/redis-cli ${OUT_PREFIX}
cp src/redis-check-dump ${OUT_PREFIX}
popd
echo "-----> Done."
| true
|
a477975b8138c30dc0207226edad9f9c4a3a8e6e
|
Shell
|
rosnerpn/shellscript
|
/tarefa1.sh
|
UTF-8
| 1,240
| 3.4375
| 3
|
[] |
no_license
|
######################################################
# Nome: Rosner Pelaes Nascimento #
# Máteria: Programação Shell Scritp #
# Curso: Pós Graduação em Redes de Computadores #
# Unimep #
# Tarefa - 1 #
######################################################
#!/bin/bash
echo "Bem Vindo $USER"
echo "Digite a opção desejada: 1 - Copiar | 2 - Exibir"
read opcao
case $opcao in
1)
if [ -e /root/backup ]
then
cp -r /etc/network /root/backup
cp /etc/resolv.conf /root/backup
cp -r /etc/passwd /root/backup
else
mkdir /root/backup
cp -r /etc/network /root/backup
cp /etc/resolv.conf /root/backup
cp -r /etc/passwd /root/backup
fi;;
2)
echo "Qual arquivo deseja exibir: ?"
read arquivo
if [ -e /root/backup/$arquivo ] && [ $arquivo == "passwd" ]
then
cat /root/backup/passwd
elif [ -e /root/backup/$arquivo ] && [ $arquivo == "network" ]
then
ls /root/backup/network
elif [ -e /root/backup/$arquivo ] || [ $arquivo == "resolv" ]
then
cat /root/backup/resolv.conf
else
echo "arquivo $arquivo não localizado"
fi;;
*)
echo "Nenhum opção desejada";;
esac
| true
|
b04d3df77fc63776d33e31d134ff5b4b93e4e034
|
Shell
|
alwaysdatas/auto_captions_dl_pc
|
/youtube.sh
|
UTF-8
| 2,158
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Downloads YouTube captions and cleans them up (makes them a text file and adds punctuation)
# requires that there are no text files in current working directory
# depenendcies: youTube-dl, sed, rm, cat
# Usage: path_to_youtube.sh YouTubeURL
for file in "$@"
do
extension="${file##*.}"
basename=`basename "$file" .$extension`
newfile="$basename.txt"
# Step 1
# If you don't have the audio file remove "skip download" options
'C:\Python27\Scripts\youtube-dl.exe' -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/mp4' --write-auto-sub "$@"
# youtube-dl --write-auto-sub --skip-download "$@"
# Step 2
'C:\Python27\Scripts\aeneas_convert_syncmap.py' *.vtt out.srt
# Clean Up
rm *.vtt
## Step #3
# remove angle brackets
sed -i 's/<[^<>]*>//g' out.srt
## Step #4
# Remove spaces
sed -i '/^\s*$/d' out.srt
# Step #5
# Remove all lines that begin with a zero (video must be < 1 hour)
sed -i '/^0/ d' out.srt
# Step #6
# Delete every odd number line
sed -i -n '1~2!p' out.srt
# Step Step #7
# Remove line breaks
sed -i ':a;N;$!ba;s/\n/ /g' out.srt
# Step #8
# Clean up unicode error related to angle brackets
sed -i 's/>/>/g' out.srt
# Step #9
# Uncapitalize each word and then capitalize first word in each sentence (for some foreign language files)
# sed -i 's/\(.*\)/\L\1/' out.txt
# sed -i 's/\.\s*./\U&\E/g' out.txt
# Step #10 replace CONTENTS section of punctuator with transcript
sed -ri "s@CONTENTS@$(cat out.srt)@g" ./punctuate.sh
# Step #11 Run punctuator
./punctuate.sh
# Step # 15 Clean Up Punctuate script (remove transcript data)
sed -i 's@"text=.*"@"text=CONTENTS"@g' ./punctuate.sh
# Step # 16 Clean up Transcript
sed -i 's/\[,\ Music\ \]/\[\ Music\ \]/g' *.txt
sed -i 's/\[\ Music,\ \]/\[\ Music\ \]/g' *.txt
# #Step #17 Remove SRT file
rm out.srt
# Remove YouTube URL
for f in *-**; do mv "$f" "${f//-*/.mp4}"; done
# Send name of MP4 to text file
ls *.mp4 > test2.txt
sed -i 's/.mp4/.txt/g' test2.txt
# Rename Transcript file
cat test2.txt|mv "out.txt" "${f//out.txt/@}"
# Clean up transcript name file
for f in *-**; do mv "$f" "${f//-*/.txt}"; done
# Remove old file
rm test2.txt
done
| true
|
2d29004dcefcd9da0747de6b87dc9cfabd888f56
|
Shell
|
ipoval/scripts
|
/bash_create_account.bash
|
UTF-8
| 997
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# script that reduces account-creation tedium
[[ $1 ]] || { echo 'missing argument: username' >&2; exit 1; }
while getopts ":a:b" opt; do
case $opt in
a)
echo 'creating account ...'
useradd -m $1
passwd $1
mkdir -p /shared/$1
chown $1.users /shared/$1
chmod 775 /shared/$1
ln -s /shared/$1 /home/$1/shared
chown $1.users /home/$1/shared
echo "account created: /shared/${1}"
exit 0;
;;
b)
echo 'creating home directory for a user on the file server ...'
cp -R /etc/skel /home/$2
chown -R $2.$2 /home/$2
chmod 751 /home/$2 # this permission will make sure that nginx can access the folder to server user's content
echo "Unless you saw an error, everything is good."
exit 0;
;;
:)
echo "Option -${OPTARG} is missing and argument" >&2;
exit 1;
;;
\?)
echo "Unknown option -${OPTARG}" >&2;
exit 1;
;;
esac
done
| true
|
7fb33163276ec739e689305f8909544e453da5a7
|
Shell
|
imlunx/Portfolio_2
|
/Guessage.sh
|
UTF-8
| 1,064
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# Student Name : Aaron Ng Wee Xuan
# Student Number: 10518970
getRandomNo()
{
#get random number from 1-83
#life expectancy of Singapore
num=$(($RANDOM%83+1))
}
guessAge()
{
getRandomNo
echo "Welcome to Guess My Age!"
echo "########################"
echo "Hint: The average life expectancy of Singaporeans is 83 years old!"
echo "You have 5 tries!!"
#set REPLY variable outside loop to initiate loop
REPLY=0
#loop compares user input to random number
#also test if any attempts left
while [ "$REPLY" -ne "$num" -a $chance -gt 0 ];
do
read -p "Whats my age?: "
if [ "$REPLY" -eq "$num" ]; then
echo "Correct!"
elif [ "$REPLY" -lt "$num" ]; then
echo "Higher!"
elif [ "$REPLY" -gt "$num" ]; then
echo "Lower!"
fi
(( chance-=1 )) #reduce one attempt every loop
done
}
main()
{
chance=5 #set attempt to 5
guessAge #start guessing game
if chance=0 ;
then echo "Try again soon!"
fi
}
main
| true
|
0ce035902aa8362556271ec6860fb7f2822ab876
|
Shell
|
noblepepper/ralink_sdk
|
/source/user/rt2880_app/scripts/config-iTunes.sh
|
UTF-8
| 457
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
. /sbin/config.sh
. /sbin/global.sh
usage()
{
echo "Usage:"
echo " $0 <server_name> <passwd> <mp3_dir>"
echo "Example:"
echo " $0 Ralink ralink /media/sda1/mp3"
exit 1
}
if [ "$3" = "" ]; then
echo "$0: insufficient arguments"
usage $0
fi
lan_ip=`nvram_get 2860 lan_ipaddr`
server_name=$1
passwd=$2
mp3_dir=$3
mt-daapd.sh "$server_name" "$passwd" "$mp3_dir"
mDNSResponder $lan_ip thehost "$server_name" _daap._tcp. 3689 &
mt-daapd
| true
|
02c3e75c371cfc40561c109e0bef27da90f3fec1
|
Shell
|
rchain/rdoctor-backend
|
/run
|
UTF-8
| 325
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash -e
cd "$(readlink -f $(dirname ${BASH_SOURCE:-$0}))"
export ELASTIC_VERSION=6.7.0
export RDOCTOR_DATADIR=/var/lib/rdoctor
mkdir -p $RDOCTOR_DATADIR/elasticsearch
mkdir -p $RDOCTOR_DATADIR/redis
chown -R 1000 $RDOCTOR_DATADIR/elasticsearch
chown -R 999 $RDOCTOR_DATADIR/redis
./build-docker
docker-compose up
| true
|
f4a14bcba2ca8e274db3d2400fdddbaa2590830b
|
Shell
|
JoseLuisAcv2/GrupoLinux-scripts
|
/buscame
|
UTF-8
| 260
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
#Muestra paquetes instalados que cumplan con patron
if [ -z $1 ] ; then
echo -e "INGRESE PATRON DE BUSQUEDA\nbuscame [patron]"
exit 1
fi
aptitude search ~i --disable-columns -F "%p" | grep $1
if [ ! $? = 0 ] ; then
echo "NO ESTA INSTALADO"
fi
| true
|
a86a0e9210d7610a3a462b77ca14b0fda70a3f13
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/secure-delete/PKGBUILD
|
UTF-8
| 1,156
| 2.640625
| 3
|
[] |
no_license
|
# Original AUR Contributor: Federico Quagliata (quaqo) <linux@quaqo.org>
# Uploaded to AUR4: GI_Jack <iamjacksemail@hackermail.com>
pkgname=secure-delete
pkgver=3.1
pkgfile="secure_delete-$pkgver"
pkgrel=7
pkgdesc="Secure file, disk, swap, memory erasure utilities"
url="http://www.thc.org/"
depends=('glibc' 'sh')
conflicts=('srm')
license=('GPL')
arch=('i686' 'x86_64')
install=${pkgname}.install
source=(https://zerocount.net/hosted/secure_delete-${pkgver}.tar.gz)
sha256sums=('a9d846d1dce3f1bdf13bbb306e8596bc1f263198a086f6beecd90ccf7bddf8d5')
build()
{
cd "${srcdir}/${pkgfile}"
sed -i -e 's/mktemp/mkstemp/g' sfill.c
sed -i -e "s/sswap smem sdel-mod.o/sswap smem/" -e '/test.*sdel-mod/d' \
-e "s/^srm: /srm: sdel-lib.o /" -e "s/^sfill: /sfill: sdel-lib.o /" \
-e "s/^sswap: /sswap: sdel-lib.o /" -e "s/^smem: /smem: sdel-lib.o /" \
Makefile
make
}
package()
{
cd "${srcdir}/${pkgfile}"
make INSTALL_DIR="${pkgdir}/usr/bin" \
MAN_DIR="${pkgdir}/usr/share/man" \
DOC_DIR="${pkgdir}/usr/share/doc/secure_delete" \
install
# renamed due to naming conflicts
mv "${pkgdir}/usr/bin/smem" "${pkgdir}/usr/bin/semem"
chmod a+r "${pkgdir}/usr/bin"/*
}
| true
|
5c5845b466448d0899cdc5b83064d40331e71f03
|
Shell
|
tknpoon/tptcn
|
/devel/d_scrapy_ccl/runc.sh
|
UTF-8
| 707
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
DIRNAME=`cd $(dirname $0); pwd`
CURDIR=`cd $(dirname $0); pwd`
TAG_NAME=$(cd $DIRNAME ; basename `pwd`)
[ "${TAG_NAME:0:2}" == "d_" ] && PORTBASE=20000
[ "${TAG_NAME:0:2}" == "u_" ] && PORTBASE=30000
[ "${TAG_NAME:0:2}" == "p_" ] && PORTBASE=40000
[ "${TAG_NAME:0:2}" == "g_" ] && PORTBASE=50000
#PORT25=`expr $PORTBASE + 25`
#
url=http://www1.centadata.com/cci/cci_e.htm
docker run \
--name $TAG_NAME \
--network ${TAG_NAME:0:2}tptcn_overlay \
--env-file $HOME/.self_env \
-e URL_TO_SCRAP=$url \
-e STAGE=${TAG_NAME:0:1} \
-v $CURDIR/entrypoint.py:/var/lib/scrapyd/entrypoint.py \
--rm \
tknpoon/private:$TAG_NAME \
/bin/bash -c 'scrapy runspider /var/lib/scrapyd/entrypoint.py '
| true
|
0ba317a1ce518602d062eb9e2bec1d4628fde7c8
|
Shell
|
justinthomas/tinyspline
|
/tools/ci/build.macosx-x86_64.sh
|
UTF-8
| 4,548
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
SCRIPT_DIR=$( cd "$(dirname "$0")"; pwd -P)
ROOT_DIR="${SCRIPT_DIR}/../.."
VOLUME="${SCRIPT_DIR}/build.macosx-x86_64"
mkdir -p "${VOLUME}"
REPOSITORY="tinyspline"
TAG="build.macosx-x86_64"
IMAGE_NAME="${REPOSITORY}:${TAG}"
STORAGE="/dist"
SETUP_CMDS=$(cat << END
RUN echo 'debconf debconf/frontend select Noninteractive' \
| debconf-set-selections && \
apt-get update && \
apt-get install -y --no-install-recommends cmake swig
COPY src/. /tinyspline
WORKDIR /tinyspline
END
)
BUILD_RUN_DELETE() {
docker build -t ${IMAGE_NAME} -f - "${ROOT_DIR}" <<-END
${1}
END
docker run \
--rm \
--volume "${VOLUME}:${STORAGE}" \
${IMAGE_NAME} \
/bin/bash -c "${2}"
docker rmi ${IMAGE_NAME}
}
################################# C#, D, Java #################################
JDK8_URL="https://github.com/AdoptOpenJDK/openjdk8-binaries/releases/download/jdk8u212-b03/OpenJDK8U-jdk_x64_mac_hotspot_8u212b03.tar.gz"
BUILD_CSHARP_JAVA() {
BUILD_RUN_DELETE \
"FROM liushuyu/osxcross:latest
${SETUP_CMDS}
RUN apt-get install -y --no-install-recommends \
mono-mcs mono-reference-assemblies-2.0 nuget \
dub \
default-jdk maven" \
"wget ${JDK8_URL} -O /opt/jdk8.tar.gz && mkdir /opt/java && \
tar -C /opt/java -xf /opt/jdk8.tar.gz --strip 1 && \
CC=o64-clang CXX=o64-clang++ JAVA_HOME=/opt/java/Contents/Home \
cmake . \
-DCMAKE_SYSTEM_NAME=Darwin \
-DCMAKE_BUILD_TYPE=Release \
-DTINYSPLINE_ENABLE_CSHARP=True \
-DTINYSPLINE_ENABLE_DLANG=True \
-DTINYSPLINE_ENABLE_JAVA=True \
-DJava_JAVAC_EXECUTABLE=/usr/bin/javac \
-DJava_JAR_EXECUTABLE=/usr/bin/jar && \
cmake --build . --target tinysplinecsharp && \
nuget pack && \
chown $(id -u):$(id -g) *.nupkg && \
cp -a *.nupkg ${STORAGE} && \
mvn package && \
chown $(id -u):$(id -g) target/*.jar && \
cp -a target/*.jar ${STORAGE}"
}
BUILD_CSHARP_JAVA
##################################### Lua #####################################
LUA51_URL="https://homebrew.bintray.com/bottles/lua@5.1-5.1.5_8.el_capitan.bottle.tar.gz"
LUA53_URL="https://homebrew.bintray.com/bottles/lua-5.3.5_1.el_capitan.bottle.tar.gz"
BUILD_LUA() {
url="LUA5${1}_URL"
BUILD_RUN_DELETE \
"FROM liushuyu/osxcross:latest
${SETUP_CMDS}
RUN apt-get install -y --no-install-recommends \
luarocks lua5.${1}" \
"wget ${!url} -O /opt/lua.tar.gz && mkdir /opt/lua && \
tar -C /opt/lua -xf /opt/lua.tar.gz --strip 2 && \
CC=o64-clang CXX=o64-clang++ \
cmake . \
-DCMAKE_SYSTEM_NAME=Darwin \
-DCMAKE_BUILD_TYPE=Release \
-DTINYSPLINE_ENABLE_LUA=True \
-DLUA_INCLUDE_DIR=/opt/lua/include/lua5.${1} \
-DLUA_LIBRARY=/opt/lua/lib/liblua5.${1}.dylib && \
sed -i '/supported_platforms/,/}/d' *.rockspec && \
luarocks make --pack-binary-rock && \
for f in ./*.rock; do mv \$f \${f/linux/macosx}; done && \
chown $(id -u):$(id -g) *.rock && \
cp -a *.rock ${STORAGE}"
}
BUILD_LUA 1
BUILD_LUA 3
################################### Python ####################################
PYTHON27_URL="https://homebrew.bintray.com/bottles/python@2-2.7.16.sierra.bottle.1.tar.gz"
PYTHON37_URL="https://homebrew.bintray.com/bottles/python-3.7.3.sierra.bottle.tar.gz"
BUILD_PYTHON() {
url="PYTHON${1}${2}_URL"
if [ "${1}" = "3" ]; then v="3"; else v=""; fi
if [ "${1}" = "3" ]; then m="m"; else m=""; fi
if [ "${1}" = "3" ]; then s="36"; else s="27"; fi
basedir="/opt/python/Frameworks/Python.framework/Versions/${1}.${2}"
BUILD_RUN_DELETE \
"FROM liushuyu/osxcross:latest
${SETUP_CMDS}
RUN apt-get install -y --no-install-recommends \
python${v} python${v}-setuptools python${v}-wheel" \
"wget ${!url} -O /opt/python.tar.gz && mkdir /opt/python && \
tar -C /opt/python -xf /opt/python.tar.gz --strip 2 && \
CC=o64-clang CXX=o64-clang++ \
cmake . \
-DCMAKE_SYSTEM_NAME=Darwin \
-DCMAKE_BUILD_TYPE=Release \
-DTINYSPLINE_ENABLE_PYTHON=True \
-DPYTHON_INCLUDE_DIR=${basedir}/include/python${1}.${2}${m} \
-DPYTHON_LIBRARY=${basedir}/lib/libpython${1}.${2}.dylib && \
python${v} setup.py bdist_wheel && \
for f in dist/*.whl; do mv \$f \${f/${s}/${1}${2}$}; done && \
for f in dist/*.whl; do mv \$f \${f/${s}/${1}${2}$}; done && \
for f in dist/*.whl; do mv \$f \${f/$/}; done && \
for f in dist/*.whl; do mv \$f \${f/$/}; done && \
for f in dist/*.whl; do
mv \$f \${f/linux/macosx_10_14}
done && \
if [ \"${1}\" = \"2\" ]; then
for f in dist/*cp2*.whl; do
mv \$f \${f/mu-macosx/m-macosx}
done
fi && \
chown $(id -u):$(id -g) dist/*.whl && \
cp -a dist/*.whl ${STORAGE}"
}
BUILD_PYTHON 2 7
BUILD_PYTHON 3 7
| true
|
3ebd07e7d2dc109e72ce135a2097f227a98f706a
|
Shell
|
cyprienbole/livet
|
/regroup.sh
|
UTF-8
| 260
| 2.5625
| 3
|
[] |
no_license
|
# en deux étapes
cd wetransfer-daafab/
for i in *.TPS ; do
cat $i | grep -e , -e IMAGE | tr -d "\r" | tr "\n" ";" > ../out/$i
echo $i >> ../out/$i
echo -e "\n" >> ../out/$i
done
#ici
cd ../out
cat *.TPS | sed '/^$/d' | tr ' ' ';' > ../alltps.csv
| true
|
c51733b8b372a6911aac90880a7459614d5d1105
|
Shell
|
spcl/serverless-benchmarks
|
/dockerfiles/entrypoint.sh
|
UTF-8
| 403
| 3.390625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
USER_ID=${CONTAINER_UID}
GROUP_ID=${CONTAINER_GID}
USER=${CONTAINER_USER}
useradd --non-unique -m -u ${USER_ID} ${USER}
groupmod --non-unique -g ${GROUP_ID} ${USER}
mkdir -p /mnt/function && chown -R ${USER}:${USER} /mnt/function
export HOME=/home/${USER}
echo "Running as ${USER}, with ${USER_ID} and ${GROUP_ID}"
if [ ! -z "$CMD" ]; then
gosu ${USER} $CMD
fi
exec gosu ${USER} "$@"
| true
|
38b8894b1503df4a23117e1083c13da220b677bd
|
Shell
|
envoyproxy/envoy
|
/.azure-pipelines/docker/prepare_cache.sh
|
UTF-8
| 696
| 3.75
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -e
DOCKER_CACHE_PATH="$1"
NO_MOUNT_TMPFS="${2:-}"
DOCKER_CACHE_OWNERSHIP="vsts:vsts"
TMPFS_SIZE=5G
if [[ -z "$DOCKER_CACHE_PATH" ]]; then
echo "prepare_docker_cache called without path arg" >&2
exit 1
fi
if ! id -u vsts &> /dev/null; then
DOCKER_CACHE_OWNERSHIP=azure-pipelines
fi
echo "Creating cache directory (${DOCKER_CACHE_PATH}) ..."
mkdir -p "${DOCKER_CACHE_PATH}"
if [[ -z "$NO_MOUNT_TMPFS" ]]; then
echo "Mount tmpfs directory: ${DOCKER_CACHE_PATH}"
mount -o size="$TMPFS_SIZE" -t tmpfs none "$DOCKER_CACHE_PATH"
fi
mkdir -p "${DOCKER_CACHE_PATH}/docker"
mkdir -p "${DOCKER_CACHE_PATH}/bazel"
chown -R "$DOCKER_CACHE_OWNERSHIP" "${DOCKER_CACHE_PATH}"
| true
|
3349dfafb2b8fdfe8eb28672b8decf425a2716e4
|
Shell
|
ajminich/prezto
|
/runcoms/zlogout
|
UTF-8
| 357
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#
# Executes commands at logout.
#
# Execute code only if STDERR is bound to a TTY.
[[ -o INTERACTIVE && -t 2 ]] && {
# Jarvis goodbye message
echo -e '\033[96mHave a good day, sir.\033[00m'
# when leaving the console clear the screen to increase privacy
if [ "$SHLVL" = 1 ]; then
[ -x /usr/bin/clear_console ] && /usr/bin/clear_console -q
fi
} >&2
| true
|
87511dc8cfeb9c8af4a45f060e099d90249c05d3
|
Shell
|
opklnm102/workspace
|
/aws/route53/find-route53-record-sets.sh
|
UTF-8
| 674
| 3.921875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
fail() {
echo "ERROR: ${*}"
exit 2
}
usage() {
cat <<-EOM
USAGE: ${0##*/} [root domain]
e.g. ${0##*/} example.com
EOM
exit 1
}
if [[ $# -lt 1]]; then
usage
fi
# 원하는 도메인의 record sets을 추출
hosted_zone_name=${1}
## read hosted-zones
# 여기서 hosted-zones ID를 얻어서
hosted_zone_id=$(aws route53 list-hosted-zones --output text | grep ${hosted_zone_name} | awk '{print $3}')
# 여기서 hosted-zone의 record-set을 조회한다
for record_set in $(aws route53 list-resource-record-sets --hosted-zone-id ${hosted_zone_id} --output text)
do
echo "${record_set}"
done
| true
|
9284c30d789c316c5da7d8e18bca6ab64c8f9d77
|
Shell
|
STOP2/stop2-rpi
|
/bin/csmac
|
UTF-8
| 270
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -
#
# csmac: change the MAC for the RPi ethernet interface to enable
# networking in lab CK110 in Kumpula
#
# (Assumes we're running Raspbian)
MAC=9c:8e:99:f4:e5:d1
ifconfig eth0 down
ifconfig eth0 hw ether $MAC
ifconfig eth0 up
systemctl restart dhcpcd
| true
|
92ad016ea5cc86320ab6e24cbd5a40b18f908cfe
|
Shell
|
ningshuang-yao/D3_introgression
|
/sims/sim_gamma_0.01_seqs_vary_Ne.sh
|
UTF-8
| 1,292
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
#Change working directory
cd /N/dc2/scratch/mhibbins/introgression_statistic/
#Simulate coalescent trees in ms.
for i in {1..100}
do
msdir/ms 4 1000 -T -I 4 1 1 1 1 -ej 4.0 2 1 -ej 0.6 3 2 -ej 0.3 4 3 -es 0.05 3 0.99 -ej 0.05 5 2 | tail -n +4 | grep -v // >revision_seq_sims/vary_Ne/vary_Ne_gamma_0.01/vary_Ne_gamma_0.01_trees_$i.txt
#Split tree file into four smaller temporary files
split -500 revision_seq_sims/vary_Ne/vary_Ne_gamma_0.01/vary_Ne_gamma_0.01_trees_$i.txt gamma_0.01_trees_
#Simulate sequences from these temporary tree files using Seq-Gen.
Seq-Gen/source/seq-gen -m HKY -l 1000 -s 0.00025 <gamma_0.01_trees_aa >> revision_seq_sims/vary_Ne/vary_Ne_gamma_0.01/vary_Ne_gamma_0.01_seqs_$i.txt
Seq-Gen/source/seq-gen -m HKY -l 1000 -s 0.005 <gamma_0.01_trees_ab >> revision_seq_sims/vary_Ne/vary_Ne_gamma_0.01/vary_Ne_gamma_0.01_seqs_$i.txt
Seq-Gen/source/seq-gen -m HKY -l 1000 -s 0.01 <gamma_0.01_trees_ac >> revision_seq_sims/vary_Ne/vary_Ne_gamma_0.01/vary_Ne_gamma_0.01_seqs_$i.txt
Seq-Gen/source/seq-gen -m HKY -l 1000 -s 0.02475 <gamma_0.01_trees_ad >> revision_seq_sims/vary_Ne/vary_Ne_gamma_0.01/vary_Ne_gamma_0.01_seqs_$i.txt
#Remove temporary files
rm gamma_0.01_trees_aa
rm gamma_0.01_trees_ab
rm gamma_0.01_trees_ac
rm gamma_0.01_trees_ad
done
| true
|
c2042a338d638d5dd6f56d003f1e2503de31dfc4
|
Shell
|
shreshthajit/Linux
|
/shellscript_yes.sh
|
UTF-8
| 1,348
| 3.390625
| 3
|
[] |
no_license
|
# #! /bin/bash
# echo "Hello World" #i am going to write hello world in this file
# # to run this file: ./shellscript_yes.sh
# # change permission: chmod +x shellscript_yes.sh
# # to know the permission: ls -al
# #two types of variables:system vaiables and user variables
# # below are some system variables after echo
# echo our shell name is $BASH #this BASH variable will give us the name of the bash
# echo Our shell version is $BASH_VERSION
# echo our home directory is $HOME
# echo our current working directory is $PWD # present workig directory
# #below are some user vairables:
# name=Mark
# val=10
# echo The name is $name
# echo value $val
# second:
#! /bin/bash
# echo "Enter Names :"
# read name1 name2 name3 #these will take input from user
# #echo "Entered Name: $name"
# echo "Names : $name1 , $name2, $name3"
# #third:
# #p-flag
# read -p 'username : ' user_var #this will allow me to take input in the same line
# read -sp 'password : ' pass_var #this sp flag will not show the pass when taking input
# echo "username : $user_var"
# echo "password : $pass_var"
#fourth:
# echo "Enter Names : "
# read -a names #a for array
# echo "Names : ${names[0]},${names[1]},${names[2]}"
#five
echo "Enter name : "
read
echo "Name : $REPLY " #here REPLY is a built in varibale
| true
|
c86b9f188872c535b558c08dd02e03ec9466d075
|
Shell
|
toxik/disertatie
|
/src/updatelist.sh
|
UTF-8
| 382
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
PORT=3000
echo "upstream io_nodes { ip_hash; server 127.0.0.1 down; " > ../nginx/conf.d/_io-nodes.conf
docker ps | grep "node " | awk '{print $1}' | while read cont; do echo "server $(docker inspect -f '{{.NetworkSettings.IPAddress}}' $cont):$PORT;" >> ../nginx/conf.d/_io-nodes.conf ; done
echo } >> ../nginx/conf.d/_io-nodes.conf
docker exec -it proxy nginx -s reload
| true
|
30952e34b43c38041fdb7ed5f0d7ffadb4df7168
|
Shell
|
stefan0xC/aurutils-container
|
/build-aur-image.sh
|
UTF-8
| 1,756
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
container=$(buildah from archlinux:base-devel)
echo "pacman -Syu devtools pacman-contrib vim vifm"
buildah run $container pacman -Syu devtools pacman-contrib base-devel vim vifm --noconfirm --needed
echo "create user"
buildah run $container useradd -m user
buildah run $container sed -i "$ a user ALL=(ALL) NOPASSWD: ALL" /etc/sudoers
echo "install aurutils"
if [ -z "$AURUTILS_PACKAGE_FILE" ]
then
buildah copy --chown user:user $container https://aur.archlinux.org/cgit/aur.git/snapshot/aurutils.tar.gz /tmp/aurutils.tar.gz
buildah config --workingdir /tmp $container
buildah run --user user $container tar xzf aurutils.tar.gz
buildah config --workingdir /tmp/aurutils $container
buildah run --user user $container gpg --recv-keys DBE7D3DD8C81D58D0A13D0E76BC26A17B9B7018A
buildah run --user user $container makepkg -s -i --noconfirm
else
buildah copy $container $AURUTILS_PACKAGE_FILE /tmp/aurutils.pkg.tar.xz
buildah run $container pacman -U /tmp/aurutils.pkg.tar.xz --noconfirm
fi
echo "remove package cache"
buildah run $container pacman -Sc --noconfirm
echo "create /home/custompkgs"
buildah run $container install -d /home/custompkgs -o user -g user
echo "copy pacman-extra.conf from devtools"
buildah run $container cp /usr/share/devtools/pacman-extra.conf /etc/aurutils/pacman-custom.conf
echo "enable custom repo"
LINESTART=$(buildah run $container grep -nr "\[custom\]" /etc/pacman.conf | cut -d : -f1)
LINEEND=$((LINESTART+2))
buildah run $container sed -i "${LINESTART},${LINEEND} s/#//" /etc/pacman.conf
buildah run $container chown -R user:user /home
buildah config --workingdir /home/user $container
echo "commit image"
buildah commit $container archlinux-aurutils
buildah rm $container
| true
|
adaf6122d37f33066e8c6bcfe8a65456aafa5e38
|
Shell
|
wschaub/prep-kernel
|
/etc/initramfs/post-update.d/zig-kernel
|
UTF-8
| 133
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
version="$1"
if prep-kernel --supported >/dev/null 2>&1; then
prep-kernel ${version}
return 0
fi
| true
|
ab1bb05ba4627ad3cadc50c32d6c4a58dac4be96
|
Shell
|
slab14/IoT_Sec_Gateway
|
/docker_remote/demo_start.sh.v0
|
UTF-8
| 2,257
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
IP=$1
DOCKER_PORT=$2
OVS_PORT=$3
NAME="demo_cont"
BRIDGE="demo_ovs_br"
CONT_IFACE="eth1"
BRIDGE_REMOTE_PORT=6633
# Create the container (one that will spin)
#curl -X POST -H "Content-Type: application/json" -d '{"Image": "busybox", "Cmd": ["/bin/sh"], "NetworkDisabled": true, "HostConfig": {"AutoRemove": true}, "Tty": true}' http://"$IP":"$DOCKER_PORT"/v1.37/containers/create?name="$NAME"
curl -X POST -H "Content-Type: application/json" -d '{"Image": "busybox", "Cmd": ["/bin/sh"], "HostConfig": {"AutoRemove": true}, "Tty": true}' http://"$IP":"$DOCKER_PORT"/v1.37/containers/create?name="$NAME"
# Start the container
curl -s -X POST http://"$IP":"$DOCKER_PORT"/v1.37/containers/"$NAME"/start
# Add OVS Bridge
sudo ovs-vsctl --db=tcp:"$IP":"$OVS_PORT" --may-exist add-br "$BRIDGE"
# Add port to dataplane external interface
sudo ovs-vsctl --db=tcp:"$IP":"$OVS_PORT" --may-exist add-port "$BRIDGE" enp6s0f1 -- set Interface enp6s0f1 ofport_request=1
# Add port to docker container interface (make sure to include mask for ip address, otherwise assigns /32)
./ovs-docker-remote add-port $BRIDGE $CONT_IFACE $NAME $IP $OVS_PORT $DOCKER_PORT --ipaddress=10.1.2.1/16
# Add route for container
## if container has ip in it, can do this through the container:
EXEC_ID=`curl -s -X POST -H "Content-Type: application/json" -d '{"AttachStdout": true, "Tty": true, "Cmd": ["ip", "route", "add", "10.1.0.0/16", "dev", "eth0"], "Privileged": true}' http://$IP:$DOCKER_PORT/v1.37/containers/$NAME/exec | jq -r '.Id'`
curl -s -X POST -H "Content-Type: application/json" -d '{"Detach": false, "Tty": true}' http://$IP:$DOCKER_PORT/exec/$EXEC_ID/start
# Add OVS routes
## Make switch listen for remote commands
sudo ovs-vsctl --db=tcp:$IP:$OVS_PORT set-controller $BRIDGE ptcp:$BRIDGE_REMOTE_PORT
## Add flow rules
sudo ovs-ofctl add-flow tcp:$IP:$BRIDGE_REMOTE_PORT "priority=100 ip in_port=1 nw_src=10.1.1.2 nw_dst=10.1.2.1 actions=output:2"
sudo ovs-ofctl add-flow tcp:$IP:$BRIDGE_REMOTE_PORT "priority=100 ip in_port=2 nw_src=10.1.2.1 nw_dst=10.1.1.2 actions=output:1"
| true
|
7d973fec48a36133a2dd2e6ce30432c7ffee5f93
|
Shell
|
yuroyoro/dotfiles
|
/bin/socks-proxy
|
UTF-8
| 769
| 3.875
| 4
|
[] |
no_license
|
#!/bin/sh
# usage :
# socks-proxy [NETWORK] [on|off] [PROXY] [PORT]
# socks-proxy Wi-Fi on localhost 1080
#
# SOCKSプロキシを使った接続とその設定 - 帽子の中 http://d.hatena.ne.jp/seaborgium/20111227/1324993156
NETWORK=$1
SWITCH=$2
PROXY=$3
PORT=$4
if [ -z "${NETWORK}" ]; then
NETWORK="Ethernet"
fi
if [ -z "${SWITCH}" ]; then
SEITCH="on"
fi
if [ -n "${PROXY}" -a -n "${PORT}" ]; then
networksetup -setsocksfirewallproxy "${NETWORK}" ${PROXY} ${PORT}
echo "Network [${NETWORK}] : set socks proxy to '${PROXY}:${PORT}'"
fi
networksetup -setsocksfirewallproxystate "${NETWORK}" ${SWITCH}
if [ ${SWITCH} = "on" ]; then
echo "Network [${NETWORK}] : socks proxy is enabled"
else
echo "Network [${NETWORK}] : socks proxy is disabled"
fi
| true
|
a1726b17e16cdd6123e04dca7ca66976decd23a4
|
Shell
|
muratcesmecioglu/raspisetup
|
/expand-rootfs.sh
|
UTF-8
| 1,305
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# Taken from http://www.raspberryvi.org/wiki/doku.php/raspi-expand-rootfs
#
# Modded version of expand-rootfs in raspi-config for Kali 2018.2-nexmon-re4son_kernel
#
# Get the starting offset of the root partition
PART_START=$(parted /dev/mmcblk0 -ms unit s p | grep "^2" | cut -f 2 -d: | sed 's/.$//')
[ "$PART_START" ] || return 1
# Return value will likely be error for fdisk as it fails to reload the
# partition table because the root fs is mounted
fdisk /dev/mmcblk0 <<EOF
p
d
2
n
p
2
$PART_START
n
p
w
EOF
# now set up an init.d script
cat <<\EOF > /etc/init.d/resize2fs_once &&
#!/bin/sh
### BEGIN INIT INFO
# Provides: resize2fs_once
# Required-Start:
# Required-Stop:
# Default-Start: 2 3 4 5 S
# Default-Stop:
# Short-Description: Resize the root filesystem to fill partition
# Description:
### END INIT INFO
. /lib/lsb/init-functions
case "$1" in
start)
log_daemon_msg "Starting resize2fs_once" &&
resize2fs /dev/mmcblk0p2 &&
rm /etc/init.d/resize2fs_once &&
update-rc.d resize2fs_once remove &&
log_end_msg $?
;;
*)
echo "Usage: $0 start" >&2
exit 3
;;
esac
EOF
chmod +x /etc/init.d/resize2fs_once &&
update-rc.d resize2fs_once defaults &&
read -p "Bolum genisletildi. Sistemi yeniden baslatmak icin bir tusa basin..." &&
reboot
| true
|
5a342e94b6a3350c32ad7a775228a9bb0276e7b8
|
Shell
|
vyevenko/Arch_i3
|
/configs/.bashrc
|
UTF-8
| 1,409
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
PS1='[\u@\h \W]\$ '
export BROWSER=/usr/bin/firefox
export EDITOR=/usr/bin/nano
# IK-TECH
export IK_PROJECTS=/home/vyeve/projects/ik-tech
# Kubernetes
source <(kubectl completion bash)
export KUBE_EDITOR=$EDITOR
export KUBECONFIG=$HOME/.kube/config
# Color terminal
alias ls='ls --color=auto'
alias grep='grep --colour=auto'
alias egrep='egrep --colour=auto'
alias fgrep='fgrep --colour=auto'
# Color bash
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
PS1='\[\033[01;32m\]$(printf %s "$(date +%k:%M:%S)")@\u\[\033[00m\]:\[\033[01;33m\]\w\[\033[00m\]\[\033[01;31m\]$(parse_git_branch)\[\033[00m\]\$ '
# SSH agent
SSH_ENV="$HOME/.ssh/environment"
function start_agent {
#echo "Initialising new SSH agent..."
/usr/bin/ssh-agent | sed 's/^echo/#echo/' > "${SSH_ENV}"
#echo succeeded
chmod 600 "${SSH_ENV}"
. "${SSH_ENV}" > /dev/null
/usr/bin/ssh-add "$HOME/.ssh/ik-tech_rsa";
}
# Source SSH settings, if applicable
if [ -f "${SSH_ENV}" ]; then
. "${SSH_ENV}" > /dev/null
#ps ${SSH_AGENT_PID} doesn't work under cywgin
ps -ef | grep ${SSH_AGENT_PID} | grep ssh-agent$ > /dev/null || {
start_agent;
}
else
start_agent;
fi
# Go
export GOPATH=$HOME/go
export PATH=$GOPATH/bin:$PATH
| true
|
f7f7d27a144dbd2d2b24ab75fe3441026aebfaa2
|
Shell
|
Carloshp11/Master-in-Data-Science
|
/psql/Weather Tables/add_column.sh
|
UTF-8
| 418
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
for filename in ./*.csv; do
original_string="$filename"
string_to_replace_with=''
no_dot="${original_string/'./'/$string_to_replace_with}"
#echo $no_dot
awk --assign filename="$filename" '{ print $filename ";" $0 }' "$filename" > ./modified_csv/$no_dot
sed -i "1s/.*/Geo;Id;Fecha;Tmax;HTmax;Tmin;HTmin;Tmed;Racha;HRacha;Vmax;HVmax;TPrec;Prec1;Prec2;Prec3;Prec4/" ./modified_csv/$no_dot
done
| true
|
c74a91c92030a60907d15672e24c0a6b15831d25
|
Shell
|
jameskellynet/container-networking-ansible
|
/roles/opencontrail/files/ifup-vhost
|
UTF-8
| 1,755
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
. /etc/init.d/functions
cd /etc/sysconfig/network-scripts
. ./network-functions
[ -f ../network ] && . ../network
CONFIG=${1}
need_config "${CONFIG}"
source_config
if ! /sbin/modprobe vrouter >/dev/null 2>&1; then
net_log $"OpenContrail vrouter kernel module not available"
exit 1
fi
if [ -n "${MACADDR}" ]; then
hwaddr=${MACADDR}
else
if [ -n "${PHYSDEV}" ]; then
hwaddr=$(cat /sys/class/net/${PHYSDEV}/address)
fi
fi
if [ ! -d /sys/class/net/${DEVICE} ]; then
ip link add ${DEVICE} type vhost || net_log $"Error creating interface ${DEVICE}"
if [ -n "${hwaddr}" ]; then
ip link set ${DEVICE} address ${hwaddr} || net_log $"Error setting mac-address on ${DEVICE}"
fi
if [ -n "${PHYSDEV}" ]; then
vif --add ${PHYSDEV} --mac ${hwaddr} --vrf 0 --vhost-phys --type physical >/dev/null 2>&1 || net_log $"Error adding host interface to vrouter module"
vif --add ${DEVICE} --mac ${hwaddr} --vrf 0 --type vhost --xconnect ${PHYSDEV} >/dev/null 2>&1 || net_log $"Error setting cross-connect on host interface"
fi
fi
if [ -n "${IPADDR}" ]; then
ip addr add dev ${DEVICE} ${IPADDR} || net_log $"Error configuring IP address on interface"
fi
if [ -n "${PHYSDEV}" ]; then
if [ -f "/var/run/dhclient-${PHYSDEV}.pid" ]; then
pid=$(cat /var/run/dhclient-${PHYSDEV}.pid)
kill $pid && rm /var/run/dhclient-${PHYSDEV}.pid
fi
ip addr flush ${PHYSDEV} || net_log $"Error flushing ip addresses on ${PHYSDEV}"
fi
ip link set ${DEVICE} up || net_log $"Error setting link state up"
if [ -n "${GATEWAY}" ]; then
ip route replace default via ${GATEWAY} dev ${DEVICE} || net_log $"Error adding default gateway"
fi
exec /etc/sysconfig/network-scripts/ifup-post ${CONFIG} ${2}
| true
|
29ab9482fee08bd333ea1803ec3cdaf4ff0be8cb
|
Shell
|
mariadb-corporation/build-scripts-vagrant
|
/check_arch.sh
|
UTF-8
| 2,109
| 3.125
| 3
|
[] |
no_license
|
cat /proc/cpuinfo | grep cpu | grep POWER
if [ $? -ne 0 ] ; then
dpkg --version
if [ $? == 0 ] ; then
dpkg -l | grep libc6
export libc6_ver=`dpkg -l | sed "s/:amd64//g" |awk '$2=="libc6" { print $3 }'`
dpkg --compare-versions $libc6_ver lt 2.14
res=$?
else
export libc6_ver=`rpm --query glibc --qf "%{VERSION}"`
rpmdev-vercmp $libc6_ver 2.14
if [ $? == 12 ] ; then
res=0
else
res=1
fi
cat /etc/redhat-release | grep " 5\."
if [ $? == 0 ] ; then
res=0
fi
cat /etc/issue | grep "SUSE" | grep " 11 "
if [ $? == 0 ] ; then
res=0
fi
fi
set -x
if [ $res != 0 ] ; then
export mariadbd_link="http://jenkins.engskysql.com/x/mariadb-5.5.42-linux-glibc_214-x86_64.tar.gz"
export mariadbd_file="mariadb-5.5.42-linux-glibc_214-x86_64.tar.gz"
echo "embedded_ver: $embedded_ver"
if [ "$embedded_ver" == "10.0" ] ; then
export mariadbd_link="http://jenkins.engskysql.com/x/mariadb-10.0.24-linux-glibc_214-x86_64.tar.gz"
export mariadbd_file="mariadb-10.0.24-linux-glibc_214-x86_64.tar.gz"
fi
else
export mariadbd_link="http://jenkins.engskysql.com/x/mariadb-5.5.42-linux-x86_64.tar.gz"
export mariadbd_file="mariadb-5.5.42-linux-x86_64.tar.gz"
if [ "$embedded_ver" == "10.0" ] ; then
export mariadbd_link="http://jenkins.engskysql.com/x/mariadb-10.0.24-linux-x86_64.tar.gz"
export mariadbd_file="mariadb-10.0.24-linux-x86_64.tar.gz"
fi
fi
else
endian=`echo -n I | od -to2 | head -n1 | cut -f2 -d" " | cut -c6`
if [ $endian == 0 ] ; then
export mariadbd_link="http://jenkins.engskysql.com/x/mariadb-5.5.41-linux-ppc64.tar.gz"
export mariadbd_file="mariadb-5.5.41-linux-ppc64.tar.gz"
cat /etc/redhat-release | grep " 6\."
if [ $? == 0 ] ; then
export mariadbd_link="http://jenkins.engskysql.com/x/rhel6/mariadb-5.5.41-linux-ppc64.tar.gz"
fi
else
export mariadbd_link="http://jenkins.engskysql.com/x/mariadb-5.5.41-linux-ppc64le.tar.gz"
export mariadbd_file="mariadb-5.5.41-linux-ppc64le.tar.gz"
fi
fi
| true
|
340f6620ea4215c072dbeefc67f70fcea840aba8
|
Shell
|
jsunmapr/myazure
|
/run.sh
|
UTF-8
| 1,439
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
clear
azure config mode arm
subscriptionId="f1766062-4c0b-4112-b926-2508fecc5bdf"
azure account set $subscriptionId
storageAccountResourceGroupName="mapr"
storageAccountName="maprui"
containerName="deploy"
storageAccountKey=$(azure storage account keys list $storageAccountName --resource-group $storageAccountResourceGroupName --json | jq .[0].value | tr -d '"')
for f in *.*
do
# Upload all the files from the current folder to an Azure storage container
echo "Uploading $f"
azure storage blob upload --blobtype block --blob $f --file $f --container $containerName --account-name $storageAccountName --account-key $storageAccountKey --quiet
done
# Create Resource Group
newResourceGroupName="rgmapr1234"
location="westus"
azure group create --name $newResourceGroupName --location $location
# Validate template
templateUri="https://$storageAccountName.blob.core.windows.net/$containerName/mainTemplate.json"
# Valid parameters file:
# - mainTemplate.password.newVNet.parameters.json
# - mainTemplate.ssh.newVNet.parameters.json
# - mainTemplate.password.existingVNet.parameters.json
# - mainTemplate.ssh.existingVNet.parameters.json
parametersFile="mainTemplate.ssh.existingVNet.parameters.json"
deploymentName="deploy$newResourceGroupName"
echo "Deploying $parametersFile"
azure group deployment create --resource-group $newResourceGroupName --template-uri $templateUri --parameters-file $parametersFile --name $deploymentName
| true
|
0fe8552300591c8ddd7fe7219bf1dd7fbf768d3d
|
Shell
|
macciomauro/docker-import-database
|
/restore-db.sh
|
UTF-8
| 3,244
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd ./
echo "-----------------------------------------------------";
echo "--- Welcome to the Docker Database Restore System ---";
echo "-----------------------------------------------------";
echo "Tell me the .env file name and press [ENTER]:";
read ENV_NAME
if [ "${ENV_NAME}" == "" ]
then
echo "Empty .env name given";
exit 1;
fi
if [ ! -f "${ENV_NAME}.env" ]; then
echo "-----------------------------------------------------";
echo "[ERROR] ${ENV_NAME}.env file not found";
echo "-----------------------------------------------------";
exit 1;
fi
export $(egrep -v '^#' ${ENV_NAME}.env | xargs)
if [ -z "${DUMP_DIR}" ] || [ -z "${MYSQL_USER}" ] || [ -z "${MYSQL_PASSWORD}" ] || [ -z "${DATABASE_CONTAINER_NAME}" ] || [ -z "${MYSQL_DATABASE_NAME}" ]
then
echo "-----------------------------------------------------";
echo "[ERROR] Some .env values is missing or wrong";
echo "-----------------------------------------------------";
exit 1;
fi
echo "-----------------------------------------------------";
echo "Using ${ENV_NAME}.env";
echo "-----------------------------------------------------";
echo "Choose a gzipped file (*.sql.gz) to dump";
echo "ATTENTION! All previous data will be lost!";
echo "-----------------------------------------------------";
select FILENAME in "${DUMP_DIR}"/*.sql.gz;
do
if [[ $FILENAME = "" ]]; then
echo "-----------------------------------------------------";
echo "[ERROR] Empty Choise";
echo "-----------------------------------------------------";
break;
exit -1;
else
CHOSEN_DUMP=$(basename "$FILENAME");
echo "-----------------------------------------------------";
echo "You picked ${CHOSEN_DUMP}";
echo "-----------------------------------------------------";
echo "Start importing...";
docker cp ${DUMP_DIR}/${CHOSEN_DUMP} ${DATABASE_CONTAINER_NAME}:/db_dump.sql.gz
echo "-----------------------------------------------------";
echo "Import completed";
echo "-----------------------------------------------------";
echo "Start dumping to ${MYSQL_DATABASE_NAME}...";
docker exec ${DATABASE_CONTAINER_NAME} bash -c "mysql -u ${MYSQL_USER} -p${MYSQL_PASSWORD} --default-character-set=utf8 -e 'DROP DATABASE IF EXISTS ${MYSQL_DATABASE_NAME}; CREATE DATABASE IF NOT EXISTS ${MYSQL_DATABASE_NAME} DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci;'"
docker exec ${DATABASE_CONTAINER_NAME} bash -c "zcat /db_dump.sql.gz | mysql -u ${MYSQL_USER} -p${MYSQL_PASSWORD} --default-character-set=utf8 ${MYSQL_DATABASE_NAME}"
echo "-----------------------------------------------------";
echo "Dump completed";
echo "-----------------------------------------------------";
echo "Removing import file...";
docker exec ${DATABASE_CONTAINER_NAME} bash -c "rm /db_dump.sql.gz"
echo "-----------------------------------------------------";
echo "File removed";
echo "-----------------------------------------------------";
echo "Bye!";
echo "-----------------------------------------------------";
break;
fi
done
exit;
| true
|
8c45f1bf05dfa269b5dede473cc4adf86e27ca60
|
Shell
|
mandric/omi-automation-sandbox
|
/testing-dissectors/gen-data
|
UTF-8
| 1,024
| 4.1875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o noclobber
PCAP_BASE="${PCAP_BASE-./Directory/Data}"
LUA_BASE="${LUA_BASE-./wireshark-lua}"
INPUT="$1"
OUTPUT_DIR="$2"
SELF=$(basename "$BASH_SOURCE")
function usage {
cat <<EOF
Usage: $SELF <path to pcap-to-lua.txt> <output dir>
Generate JSON dissector data from a pcap-to-lua.txt file. The results are
saved in output dir with the same directory structure taken from
pcap-to-lua.txt.
Asserts every pcap file has a corresponding lua file.
Asserts pcap entries are unique. Uses \`set -o noclobber\` so overwriting
fails.
EOF
}
if [ -z "$1" ] || [ -z "$2" ]; then
usage
exit 1
fi
mkdir -p "$OUTPUT_DIR"
cat "$INPUT" | while read line; do
pcap="$(echo $line | sed 's/:.*//')"
lua="$(echo $line | sed 's/.*://')"
key="$(./get-key "$LUA_BASE/$lua")"
dir=$(dirname "$pcap")
file=$(basename "$pcap" | sed 's/\.pcap/\.json/')
mkdir -p "$OUTPUT_DIR/$dir"
./parse "$PCAP_BASE/$pcap" "$LUA_BASE/$lua" "$key" > \
"$OUTPUT_DIR/$dir/$file";
done
| true
|
b903083f496297bf94c794a95f2679f962d95eb3
|
Shell
|
LIP-Computing/bench_gpu
|
/job-scripts/submit-gromacs.sh
|
UTF-8
| 577
| 2.6875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
########### Examples of batch system options for job submition
# #$ -l gpu
# #$ -l release=el7
# #$ -P HpcGrid
######################################
# STORE Put the shared dir with worker nodes
STORE=$HOME
# Execution mode for udocker - see documentation
EMODE=F3
PATH=${STORE}/bin:$PATH
UDOCKER_DIR=${STORE}/.udocker
CONT=gr2018
WDIR=`pwd`
echo $PATH
echo $UDOCKER_DIR
echo "----------------"
echo "Doing the setup"
udocker setup --execmode=${EMODE} --nvidia ${CONT}
echo "RUNNING"
udocker run -w /home ${CONT} gmx mdrun -s /home/md.tpr -ntomp 1 -gpu_id 0
| true
|
15cf0c2d67ffa2af3118e3dab8a3c474fba4df6c
|
Shell
|
bigjimbeef/cgconfigure
|
/run.sh
|
UTF-8
| 7,715
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# cgconfigure
#
# run.sh
#
# This script will attempt to automatically configure the parameters
# used by cgminer, in a vague attempt to make something a bit more
# palatable than just repeatedly editing and rebooting.
#
# Exit codes.
PARSE_ERROR=1
NO_CGMINER=2
CGMINER_NO_PARAM=3
SUCCESS=42
# Warnings.
NO_START="NULL"
# For starting cgminer.
export DISPLAY=:0
export GPU_MAX_ALLOC_PERCENT=100
export GPU_USE_SYNC_OBJECTS=1
# Declare the data map for parsing parameters from the config file.
declare -A paramData
# $0 - file name
# $1 - value
function saveValue
{
DATA_DIR="data"
filePath=$DATA_DIR/$1
touch $filePath
echo $2 > $filePath
}
# $0 - file name
# return - value
function loadValue
{
filePath=$0/$1
value=$(head -n 1 $filePath)
return $value
}
# Parse the parameters from the options file, storing their values
# for use at various points throughout the program.
function parseOptions
{
OPTIONS_FILE="options.conf"
REGEX='^([A-Za-z_]+) ([0-9|a-zA-Z/:\+.]+)'
while read line
do
unset key
if [[ ( $line =~ $REGEX ) ]]; then
key=${BASH_REMATCH[1]}
val=${BASH_REMATCH[2]}
fi
if [ -n $key ]; then
case $key in
hide_startup_message ) NO_STARTUP_MSG=$val;;
cgminer_install_dir ) CGMINER_DIR=$val;;
cgminer_api_port ) CGMINER_API_PORT=$val;;
cgminer_url ) CGMINER_URL=$val;;
cgminer_user ) CGMINER_USER=$val;;
cgminer_pass ) CGMINER_PASS=$val;;
tc_timeout ) TC_TIMEOUT=$val;;
* ) ;;
esac
fi
done < $OPTIONS_FILE
}
# Parser for the config file.
# Will catch basic syntax errors and exit appropriately.
function parseParams
{
PARAMS_FILE="params.conf"
COMMENT_REGEX='^#'
NAME_REGEX='^([A-Za-z_]+)$'
OPTION_REGEX='([A-Za-z]+):[ ]*([0-9]*)$'
OPEN_SCOPE_REGEX='^\{$'
CLOSE_SCOPE_REGEX='^\}$'
currentParam=""
inParam=false
lineNum=0
while read line
do
lineNum=$((lineNum+1))
# Ignore comments.
[[ $line =~ $COMMENT_REGEX ]] && \
continue
# Scope comprehension.
[[ $line =~ $OPEN_SCOPE_REGEX ]] && \
inParam=true
[[ $line =~ $CLOSE_SCOPE_REGEX ]] && \
inParam=false
if [[ $line =~ $NAME_REGEX ]]; then
if ! $inParam; then
name=${BASH_REMATCH[1]}
currentParam=$name
else
echo "Error in $PARAMS_FILE: Mismatched braces near line $lineNum!"
exit $PARSE_ERROR
fi
elif [[ $line =~ $OPTION_REGEX ]]; then
if $inParam; then
key=${BASH_REMATCH[1]}
val=${BASH_REMATCH[2]}
mapKey=$currentParam-$key
paramData[$mapKey]=$val
else
echo "Error in $PARAMS_FILE: Mismatched braces near line $lineNum!"
exit $PARSE_ERROR
fi
fi
done < $PARAMS_FILE
}
# $1 - param name
# $2 - param property (e.g. min/max)
function getDatum
{
assembledKey=$1-$2
echo ${paramData[$assembledKey]}
}
# Get the hash rate of the zeroth GPU, for checking results.
function getHashRate
{
MHASH_REGEX='\"MHS av\":([0-9]+.[0-9][0-9])'
response=`echo "{ \"command\":\"gpu\", \"parameter\":\"0\" }" | nc 127.0.0.1 $CGMINER_API_PORT`
mhash=0
[[ $response =~ $MHASH_REGEX ]] && \
mhash=${BASH_REMATCH[1]}
if (( $(echo "$mhash == 0" | bc -l) )); then
echo $NO_START
else
echo $mhash
fi
}
# Start cgminer for tweaking settings.
# $1 - Setting name
# $2 - Setting value
function startCgMiner
{
case $1 in
tc ) setting_key="thread-concurrency";;
eng ) setting_key="gpu-engine";;
mem ) setting_key="gpu-memclock";;
* ) break;;
esac
setting_val=$2
if [[ -z $setting_key || -z $setting_val ]]; then
echo "Error: no settings supplied for cgminer to tweak!";
exit $CGMINER_NO_PARAM
fi
CGMINER_CONNECTION="-o $CGMINER_URL -O $CGMINER_USER:$CGMINER_PASS"
# Start the cgminer instance.
nohup $CGMINER_DIR/./cgminer --scrypt $CGMINER_CONNECTION --$setting_key $setting_val --api-listen >/dev/null 2>/dev/null &
}
function getCurrentTime
{
current=$(date +%s)
echo $current
}
function getTargetTime
{
current=$(getCurrentTime)
target=$(($current+$TC_TIMEOUT))
echo $target
}
function tuneThreadConcurrency
{
minTC=$(getDatum thread_concurrency min)
maxTC=$(getDatum thread_concurrency max)
stepTC=$(getDatum thread_concurrency step)
currentTC=$minTC
lastTC=0
while sleep 1; do
echo -n "Attempting to start with TC $currentTC"
startCgMiner tc $currentTC
active=0
targetTime=$(getTargetTime)
while sleep 1; do
echo -n "."
if [[ $(getCurrentTime) -lt $targetTime ]]; then
rate=$(getHashRate)
if [[ $rate != $NO_START ]]; then
echo " [OK]"
# Kill the cgminer process, and wait until it exits.
kill $! >/dev/null 2>&1
while pgrep cgminer >/dev/null; do
sleep 1
done
active=1
# Break from the sleep 1 while loop.
break;
fi
else
break;
fi
done
if [[ $active -eq 0 ]]; then
break;
fi
lastTC=$currentTC
currentTC=$((currentTC+$stepTC))
if [[ $lastTC -ge $maxTC ]]; then
break;
fi
done
# Tidy up, just in case.
killall cgminer
echo
echo "TC: $lastTC"
}
function main
{
# Read the options file into memory.
parseOptions
# At this point, we should have a cgminer install dir set.
foundCgMiner=false
if [[ -z $CGMINER_DIR ]]; then
echo "Warning: no cgminer config file supplied! Will attempt to find automatically..."
else
# Check this is the install dir.
if $($CGMINER_DIR/./cgminer --version >/dev/null 2>&1); then
foundCgMiner=true
else
echo "Warning: cgminer install dir set incorrectly in config file."
fi
fi
# Last ditch attempt to auto-find cgminer with type.
if ! $foundCgMiner; then
REGEX='is ([a-z/]+)'
cgminerDir=$(type cgminer) && \
[[ $cgminerDir =~ $REGEX ]] && \
CGMINER_DIR=${BASH_REMATCH[1]}
fi
if [[ -z $CGMINER_DIR ]]; then
echo "Error: Unable to locate cgminer. Please check config file and ensure it is installed."
exit $NO_CGMINER
fi
echo "cgminer install found at $CGMINER_DIR"
dateTime=`date +'%T on %x'`
echo -e "Commencing configuration at $dateTime.\n"
# Parse the default parameters from the config file.
parseParams
continue=false
if [[ -z $NO_STARTUP_MSG || $NO_STARTUP_MSG == "0" ]]; then
echo "Welcome to cgconfigure."
echo -e "[Please edit params.conf to tweak basic parameter selection.]\n"
echo "Start configuration now?"
select yn in "Yes" "No"; do
case $yn in
Yes ) continue=true; break;;
No ) exit;;
esac
done
else
continue=true
fi
if [ continue ]; then
tuneThreadConcurrency
fi
}
# Execute the main function.
main
# Tidy up dir on exit.
mv *.bin data/bin_files/
# Great success.
exit $SUCCESS
| true
|
7ec70daf371b7448302fc8e9b3cb9d074457fdf3
|
Shell
|
troylee/kaldi
|
/egs/rm/s1/data_prep/run.sh
|
UTF-8
| 4,271
| 3.515625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
# This script should be run from the directory where it is located (i.e. data_prep)
# Copyright 2010-2011 Microsoft Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# The input is the 3 CDs from the LDC distribution of Resource Management.
# The script's argument is a directory which has three subdirectories:
# rm1_audio1 rm1_audio2 rm2_audio
# Note: when creating your own data preparation scripts, it's a good idea
# to make sure that the speaker id (if present) is a prefix of the utterance
# id, that the output scp file is sorted on utterance id, and that the
# transcription file is exactly the same length as the scp file and is also
# sorted on utterance id (missing transcriptions should be removed from the
# scp file using e.g. ../scripts/filter_scp.pl)
# You get get some guidance how to deal with channels and segments (not
# an issue in RM) from ../scripts/make_mfcc_train_segs.sh.
if [ $# != 1 ]; then
echo "Usage: ./run.sh /path/to/RM"
exit 1;
fi
RMROOT=$1
if [ ! -d $RMROOT/rm1_audio1 -o ! -d $RMROOT/rm1_audio2 ]; then
echo "Error: run.sh requires a directory argument that contains rm1_audio1 and rm1_audio2"
exit 1;
fi
if [ ! -d $RMROOT/rm2_audio ]; then
echo "**Warning: $RMROOT/rm2_audio does not exist; won't create spk2gender.map file correctly***"
sleep 1
fi
(
find $RMROOT/rm1_audio1/rm1/ind_trn -iname '*.sph';
find $RMROOT/rm1_audio2/2_4_2/rm1/ind/dev_aug -iname '*.sph';
) | perl -ane ' m:/sa\d.sph:i || m:/sb\d\d.sph:i || print; ' > train_sph.flist
# make_trans.pl also creates the utterance id's and the kaldi-format scp file.
./make_trans.pl trn train_sph.flist $RMROOT/rm1_audio1/rm1/doc/al_sents.snr train_trans.txt train_sph.scp
mv train_trans.txt tmp; sort -k 1 tmp > train_trans.txt
mv train_sph.scp tmp; sort -k 1 tmp > train_sph.scp
sph2pipe=`cd ../../../..; echo $PWD/tools/sph2pipe_v2.5/sph2pipe`
if [ ! -f $sph2pipe ]; then
echo "Could not find the sph2pipe program at $sph2pipe";
exit 1;
fi
awk '{printf("%s '$sph2pipe' -f wav %s |\n", $1, $2);}' < train_sph.scp > train_wav.scp
cat train_wav.scp | perl -ane 'm/^(\w+_(\w+)\w_\w+) / || die; print "$1 $2\n"' > train.utt2spk
cat train.utt2spk | sort -k 2 | ../scripts/utt2spk_to_spk2utt.pl > train.spk2utt
for ntest in 1_mar87 2_oct87 4_feb89 5_oct89 6_feb91 7_sep92; do
n=`echo $ntest | cut -d_ -f 1`
test=`echo $ntest | cut -d_ -f 2`
root=$RMROOT/rm1_audio2/2_4_2
for x in `grep -v ';' $root/rm1/doc/tests/$ntest/${n}_indtst.ndx`; do
echo "$root/$x ";
done > test_${test}_sph.flist
done
# make_trans.pl also creates the utterance id's and the kaldi-format scp file.
for test in mar87 oct87 feb89 oct89 feb91 sep92; do
./make_trans.pl ${test} test_${test}_sph.flist $RMROOT/rm1_audio1/rm1/doc/al_sents.snr test_${test}_trans.txt test_${test}_sph.scp
mv test_${test}_trans.txt tmp; sort -k 1 tmp > test_${test}_trans.txt
mv test_${test}_sph.scp tmp; sort -k 1 tmp > test_${test}_sph.scp
awk '{printf("%s '$sph2pipe' -f wav %s |\n", $1, $2);}' < test_${test}_sph.scp > test_${test}_wav.scp
cat test_${test}_wav.scp | perl -ane 'm/^(\w+_(\w+)\w_\w+) / || die; print "$1 $2\n"' > test_${test}.utt2spk
cat test_${test}.utt2spk | sort -k 2 | ../scripts/utt2spk_to_spk2utt.pl > test_${test}.spk2utt
done
cat $RMROOT/rm1_audio2/2_5_1/rm1/doc/al_spkrs.txt \
$RMROOT/rm2_audio/3-1.2/rm2/doc/al_spkrs.txt | \
perl -ane 'tr/A-Z/a-z/;print;' | grep -v ';' | \
awk '{print $1, $2}' | sort | uniq > spk2gender.map
../scripts/make_rm_lm.pl $RMROOT/rm1_audio1/rm1/doc/wp_gram.txt > G.txt
# Getting lexicon
../scripts/make_rm_dict.pl $RMROOT/rm1_audio2/2_4_2/score/src/rdev/pcdsril.txt > lexicon.txt
echo Succeeded.
| true
|
e51f79bffec5475770c472ca4f191d2f32d3cdb2
|
Shell
|
apache/openoffice
|
/main/desktop/scripts/unopkg.sh
|
UTF-8
| 2,807
| 3.03125
| 3
|
[
"Apache-2.0",
"CPL-1.0",
"bzip2-1.0.6",
"LicenseRef-scancode-other-permissive",
"Zlib",
"LZMA-exception",
"LGPL-2.0-or-later",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-philippe-de-muyter",
"OFL-1.1",
"LGPL-2.1-only",
"MPL-1.1",
"X11",
"LGPL-2.1-or-later",
"GPL-2.0-only",
"OpenSSL",
"LicenseRef-scancode-cpl-0.5",
"GPL-1.0-or-later",
"NPL-1.1",
"MIT",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.0",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"BSL-1.0",
"LicenseRef-scancode-docbook",
"LicenseRef-scancode-mit-old-style",
"Python-2.0",
"BSD-3-Clause",
"IJG",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause",
"Autoconf-exception-generic",
"PSF-2.0",
"NTP",
"LicenseRef-scancode-python-cwi",
"Afmparse",
"W3C",
"W3C-19980720",
"curl",
"LicenseRef-scancode-x11-xconsortium-veillard",
"Bitstream-Vera",
"HPND-sell-variant",
"ICU"
] |
permissive
|
#!/bin/sh
#**************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#**************************************************************
# enable file locking
SAL_ENABLE_FILE_LOCKING=1
export SAL_ENABLE_FILE_LOCKING
# resolve installation directory
sd_cwd=`pwd`
sd_res=$0
while [ -h "$sd_res" ] ; do
cd "`dirname "$sd_res"`"
sd_basename=`basename "$sd_res"`
sd_res=`ls -l "$sd_basename" | sed "s/.*$sd_basename -> //g"`
done
cd "`dirname "$sd_res"`"
sd_prog=`pwd`
cd "$sd_cwd"
#collect all bootstrap variables specified on the command line
#so that they can be passed as arguments to javaldx later on
#Recognize the "sync" option. sync must be applied without any other
#options except bootstrap variables or the verbose option
for arg in $@
do
case "$arg" in
-env:*) BOOTSTRAPVARS=$BOOTSTRAPVARS" ""$arg";;
sync) OPTSYNC=true;;
-v) VERBOSE=true;;
--verbose) VERBOSE=true;;
*) OPTOTHER=$arg;;
esac
done
if [ "$OPTSYNC" = "true" ] && [ -z "$OPTOTHER" ]
then
JVMFWKPARAMS='-env:UNO_JAVA_JFW_INSTALL_DATA=$OOO_BASE_DIR/share/config/javasettingsunopkginstall.xml -env:JFW_PLUGIN_DO_NOT_CHECK_ACCESSIBILITY=1'
fi
# extend the ld_library_path for java: javaldx checks the sofficerc for us
if [ -x "$sd_prog/../basis-link/ure-link/bin/javaldx" ] ; then
my_path=`"$sd_prog/javaldx" $BOOTSTRAPVARS $JVMFWKPARAMS \
"-env:INIFILENAME=vnd.sun.star.pathname:$sd_prog/redirectrc"`
if [ -n "$my_path" ] ; then
LD_LIBRARY_PATH=$my_path${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}
export LD_LIBRARY_PATH
fi
fi
unset XENVIRONMENT
# uncomment line below to disable anti aliasing of fonts
# SAL_ANTIALIAS_DISABLE=true; export SAL_ANTIALIAS_DISABLE
# uncomment line below if you encounter problems starting soffice on your system
# SAL_NO_XINITTHREADS=true; export SAL_NO_XINITTHREADS
# execute binary
exec "$sd_prog/unopkg.bin" "$@" "$JVMFWKPARAMS" \
"-env:INIFILENAME=vnd.sun.star.pathname:$sd_prog/redirectrc"
| true
|
6273270e485979419ad47bd966c9195d3bdbb703
|
Shell
|
zayceva-nastya/inn
|
/cleanTrial.sh
|
UTF-8
| 330
| 3.09375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
echo "Removing: [~/.java/.userPrefs]"
! rm -rf ~/.java/.userPrefs
for DIR in ~/.config/JetBrains/*/
do
APP=${DIR%*/}
echo
echo "App: [${APP##*/}]"
echo "Removing: [${APP}/eval/]"
! rm -rf "${APP}/eval/"
echo "Removing: [${APP}/options/other.xml]"
! rm -rf "${APP}/options/other.xml"
done
| true
|
ae31c3c1e36e205a594282fa1fec29809d47a544
|
Shell
|
pogin503/my-vagrant-setting
|
/ats2/setup.sh
|
UTF-8
| 1,364
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -d "$HOME"/atshome ]; then mkdir -p "$HOME"/atshome; fi
pushd "$HOME"/atshome
wget --trust-server-name 'http://downloads.sourceforge.net/project/ats2-lang/ats2-lang/ats2-postiats-0.2.1/ATS2-Postiats-0.2.1.tgz?r=http%3A%2F%2Fsourceforge.net%2Fprojects%2Fats2-lang%2Ffiles%2Fats2-lang%2Fats2-postiats-0.2.1%2F&ts=1437234613&use_mirror=jaist'
wget --trust-server-name 'http://downloads.sourceforge.net/project/ats2-lang-contrib/ats2-lang-contrib/ATS2-Postiats-contrib-0.1.12.tgz?r=http%3A%2F%2Fsourceforge.net%2Fprojects%2Fats2-lang-contrib%2Ffiles%2Fats2-lang-contrib%2F&ts=1437234808&use_mirror=jaist'
tar xf ATS2-Postiats-0.2.1.tgz
PATSHOME="$(pwd)"/ATS2-Postiats-0.2.1
export PATSHOME
export PATH=${PATSHOME}/bin:${PATH}
rm ATS2-Postiats-0.2.1.tgz
tar xf ATS2-Postiats-contrib-0.1.12.tgz
PATSHOMERELOC="$(pwd)"/ATS2-Postiats-contrib-0.1.12
export PATSHOMERELOC
rm ATS2-Postiats-contrib-0.1.12.tgz
pushd "${PATSHOME}"
./configure
make
echo 'Add to .bashrc or .zshrc'
echo ' export PATSHOME="$HOME"/atshome/ATS2-Postiats-0.2.1'
echo ' export PATH=${PATSHOME}/bin:${PATH}'
echo ' export PATSHOMERELOC="$HOME"/atshome/ATS2-Postiats-contrib-0.1.12'
echo 'example:'
echo ' touch hello.dats'
echo ' echo '\''implement main0 () = println! ("Hello world!")'\'' > hello.dats'
echo ' patscc -o hello hello.dats'
echo ' ./hello'
| true
|
2da44ac67364b686d314bd059555f4b00ba4a9f9
|
Shell
|
LukMercury/Bash-Toolkit
|
/goto.sh
|
UTF-8
| 1,107
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# Depending on the parameter passed:
# Number: Opens a numbered link from a Bookmarks file in the default browser
# Works well together with bookmark.sh (bookmark -l lists your bookmarks)
# Path to File: Follows all links (containing http) in that file
# Usage: goto <line number>
# goto <file name>
LOG_FILE=/home/$USER/Documents/Notite/useful-links.txt
FOUND_LINK=1
while [ ! -z $1 ]; do
if [[ "$1" =~ ^[0-9]+$ ]]; then
nohup firefox $(cat "$LOG_FILE" | head -n "$1" | tail -1) &> /dev/null &
elif [ -f "$1" ]; then
for LINK in $(grep http "$1" | tr [:blank:]\"\' '\n' | grep http) ; do
nohup firefox $LINK &> /dev/null &
FOUND_LINK=0
done
for LINK in $(grep www "$1" | tr [:blank:]\"\' '\n' | grep www | grep -v http) ; do
nohup firefox --new-window "https://$LINK" &> /dev/null &
FOUND_LINK=0
done
if [ "$FOUND_LINK" -eq 1 ]; then
echo "No links in file $1"
fi
else
echo "Parameter must be a number or a file name"
fi
shift
done
| true
|
4cb03e1f99a1e10e234a2a2013f1717950465907
|
Shell
|
jmath1/django-infrastructure-cookiecutter
|
/{{cookiecutter.workspace}}/install_do_inventory.sh
|
UTF-8
| 724
| 3.21875
| 3
|
[] |
no_license
|
#/bin/bash
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) machine=Linux;;
Darwin*) machine=Mac;;
esac
if [ "${machine}" = "Mac" ]; then
wget https://github.com/do-community/do-ansible-inventory/releases/download/v2.0.0/do-ansible-inventory_2.0.0_macos_x86_64.tar.gz;
tar -C /usr/local/bin -zxvf do-ansible-inventory_2.0.0_macos_x86_64.tar.gz;
rm do-ansible-inventory_2.0.0_macos_x86_64.tar.gz;
elif [ "${machine}" = "Linux" ]; then
wget https://github.com/do-community/do-ansible-inventory/releases/download/v2.0.0/do-ansible-inventory_2.0.0_linux_x86_64.tar.gz;
tar -C /usr/local/bin -zxvf do-ansible-inventory_2.0.0_macos_x86_64.tar.gz;
rm do-ansible-inventory_2.0.0_macos_x86_64.tar.gz;
fi
| true
|
5dbfb0ad93d60ada766d41b4c6ba7c44093f96e5
|
Shell
|
yara0204/bash_course
|
/Sessions/shell-scripting-in-a-nutshell/excercise_4.sh
|
UTF-8
| 300
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -e /etc/shadow ]
then
echo "Shadow passwords are enabled."
if [ -w /etc/shadow ]
then
echo "You have permissions to edit /etc/shadow."
else
echo "You do NOT have permissions to edit /etc/shadow."
fi
else
echo "No existe el archivo shadow."
fi
| true
|
956c363995f79364023bf3549a7a176d2fee935e
|
Shell
|
tyden46/newTrainings
|
/PathoScope_C1_demo-master/scripts/pathomap_wrapper.sh
|
UTF-8
| 2,230
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
SN="pathomap_wrapper.sh"
echo "[---$SN---] ($(date)) Starting $SN"
t1=$(date +"%s")
umask 0002
#--- Load the pathoscope module
module load pathoscope
##########################################################################################
# Setup variables
##########################################################################################
#--- If array job, a file of file names (fofn) must be provided
if [[ -n "$SLURM_ARRAY_TASK_ID" ]]; then
[[ -z "$fofn" ]] && echo "No fofn provided" && exit 1
fastq=$(sed -n "$SLURM_ARRAY_TASK_ID"p $fofn)
fi
#--- If not an array job, the path for the fastq file must have been provided
[[ -z "$fastq" ]] && echo "No fastq provided" && exit 1
#--- Check that fastq file exists
[[ ! -e "$fastq" ]] && echo "[---$SN---] ($(date)) FAILED: file $fastq does not exist" && exit 1
#--- Output directory
outdir=$(basename ${fastq%.*})
#--- Path to bowtie2 databases
indexdir="references"
#--- Create lists for target and filter databases
targetnames=$(tr '\n' ',' < targets.txt | sed 's/,$//')
filternames=$(tr '\n' ',' < filter.txt | sed 's/,$//')
##########################################################################################
# Run pathoscope
##########################################################################################
#--- Print out the parameters
echo "[---$SN---] ($(date)) SETTINGS"
echo "[---$SN---] ($(date)) Fastq: $fastq"
echo "[---$SN---] ($(date)) Output directory: $outdir"
echo "[---$SN---] ($(date)) Index directory: $indexdir"
echo
echo "[---$SN---] ($(date)) Target databases:"
tr ',' '\n' <<< $targetnames | sed 's/^/ /'
echo
echo "[---$SN---] ($(date)) Filter databases:"
tr ',' '\n' <<< $filternames | sed 's/^/ /'
mkdir -p $outdir
pathoscope.py MAP -numThreads $(nproc) \
-U $fastq \
-indexDir $indexdir \
-targetIndexPrefixes "$targetnames" \
-filterIndexPrefixes "$filternames" \
-outDir $outdir \
-numThreads $(nproc) \
-outAlign outalign.sam
#---Complete job
t2=$(date +"%s")
diff=$(($t2-$t1))
echo "[---$SN---] Total time: ($(date)) $(($diff / 60)) minutes and $(($diff % 60)) seconds."
echo "[---$SN---] ($(date)) $SN COMPLETE."
| true
|
62bd8ab9365933ebbcb99bc751851ec558904c12
|
Shell
|
JamesMcGuigan/ecosystem-research
|
/wasm/wat/webassembly-with-rust/wasmcheckers/build.sh
|
UTF-8
| 729
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd $(dirname $BASH_SOURCE[0]); # cd current directory
set -x
wat2wasm checkers.wat
wasm2wat checkers.wasm -o checkers.wasm.wat
wasm2c checkers.wasm -o checkers.wasm.c
wasm-objdump checkers.wasm -hdxrs | tee checkers.wasm.objdump
## WABT Tools: https://github.com/WebAssembly/wabt
wat-desugar checkers.wat # strip comments + flat format syntax)
file checkers.wasm # checkers.wasm: WebAssembly (wasm) binary module version 0x1 (MVP)
hexdump checkers.wasm # 0000000 6100 6d73 0001 0000
wasm-decompile checkers.wasm # transpile to C (pseudocode)
wasm-validate checkers.wasm -v # BeginExportSection EndExportSection
ls -l *.wat *.wasm* | sort -nr -k5
#./test.sh
#./test.bats
| true
|
886b8476175b931ee0f3ad23f91e699c23b9a4fb
|
Shell
|
ericpromislow/hcf-tools
|
/bin/inprogress-upload-diego.bash
|
UTF-8
| 1,639
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
set -ex
# 2016-02-24:
# $0 v231 v0.1454.0 36 0.333.0
CF_RELEASE=$1
DIEGO_RELEASE=$2
ETCD_RELEASE=$3
GARDEN_LINUX_RELEASE=$4
if [ -z "$GARDEN_LINUX_RELEASE" ] ; then
echo "Usage $(dirname $0) cf-release diego-release etcd-release garden-linux-release"
exit 1
fi
deployments=$(bosh deployments 2>/dev/null | awk '/^\| [a-z]/ { print $2 }')
releases=$(bosh releases 2>/dev/null | awk '/^\| [a-z]/ { print $2 }')
cd $HOME/git/cloudfoundry/cf-release
# Set this var for diego-release:scripts/generate-bosh-lite-manifests
export CF_RELEASE_DIR=$PWD
if [[ $deployments =~ 'cf-warden[^-]' ]] ; then
echo "cf-warden already deployed"
else
git checkout $CF_RELEASE
scripts/update
scripts/generate-bosh-lite-dev-manifest
bosh create release --force
bosh upload release
bosh -n deploy
fi
if [[ $releases =~ garden ]] ; then
echo garden already uploaded
else
bosh upload release https://bosh.io/d/github.com/cloudfoundry-incubator/garden-linux-release?v=$GARDEN_LINUX_RELEASE
fi
if [[ $releases =~ etcd ]] ; then
echo etcd already uploaded
else
bosh upload release https://bosh.io/d/github.com/cloudfoundry-incubator/etcd-release?v=$ETCD_RELEASE
fi
if [[ $deployments =~ 'cf-warden-diego' ]] ; then
echo "cf-warden-diego already deployed"
else
cd $HOME/git/cloudfoundry-incubator/diego-release
git checkout $DIEGO_RELEASE
scripts/update
git clean -ffd
# $CF_RELEASE_DIR needs to be set for next command:
DIEGO_RELEASE_DIR=$PWD scripts/generate-bosh-lite-manifests
bosh deployment bosh-lite/deployments/diego.yml
bosh create release --force
bosh upload release
bosh -n deploy
fi
| true
|
96c53c77351022997d5a02a32df539a14709ee83
|
Shell
|
offensivenomad/dotfiles
|
/bash.d/archives/flutter
|
UTF-8
| 148
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
enableFLUTTER="n"
if [[ enableFLUTTER == "y" ]]; then
export FLUTTER_ROOT=$HOME/Flutter/Sdk
export PATH=$FLUTTER_ROOT/bin:$PATH
fi
| true
|
671f80aed8f7fa157fcfdf3bdf7764ceec6d4f3b
|
Shell
|
mi3z/system
|
/scripte/Mac OS/resize.trml
|
UTF-8
| 1,311
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
## Made for Mac OS
## This script resizes all images down to 50%.
## Images have to be in the same folder as the scritp.
## New resized images can be found in new subfolder
## To run this script with double click:
## 1. right click on this script -> Information -> Open with: -> select Terminal.app and select execute every time.
## 2. open terminal and execute 'chmod +x resize.trml'
#ok variable
ok=0
# go to original path - in case script was started with double click
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
cd "${DIR}"
# create new folder for images
folder=$(date "+%Y_%m_%d_%H_%M_%S")
mkdir $folder
# resize images
for f in *
do
file "$f" |grep -q -e image
if [ $? -ne "0" ]
then
continue
fi
H=$(sips -g pixelHeight "$f" | grep 'pixelHeight' | cut -d: -f2)
W=$(sips -g pixelWidth "$f" | grep 'pixelWidth' | cut -d: -f2)
H50=$(($H / 2))
W50=$(($W / 2))
sips --resampleHeight "$H50" "$f" --out $folder >/dev/null
#check if resize worked
if [ $? -ne "0" ]
then
ok=1
fi
echo $f
done
if [ $ok -eq "0" ]
then
echo "*************************************************************"
echo "* OK * OK * OK * OK * OK * OK * OK * OK * OK * OK * OK * OK * "
echo "*************************************************************"
fi
| true
|
98da8d0e6945a25d8247cd39d5e78d7fb6a6ca89
|
Shell
|
mikeakohn/java_grinder
|
/scripts/centos/build.sh
|
UTF-8
| 578
| 3.25
| 3
|
[] |
no_license
|
#/usr/bin/env bash
SOURCEDIR=/storage/git
VERSION=`date +"%Y-%m-%d"`
RPM_VERSION=`date +"%Y.%m.%d"`
VERSION_H=`date +"%B %d, %Y"`
PROGRAM=java_grinder
FULLNAME=${PROGRAM}-${VERSION}
cd ${SOURCEDIR}
git clone https://github.com/mikeakohn/java_grinder.git
cd
rm -rf ${FULLNAME} ${FULLNAME}.tar.gz
cp -r ${SOURCEDIR}/${PROGRAM} ${FULLNAME}
cd ${FULLNAME}
make clean
cat <<EOF > common/version.h
#ifndef _VERSION_H
#define _VERSION_H
#define VERSION "$VERSION_H"
#endif
EOF
rm -rf .git .gitignore
cd ..
tar cvzf ${FULLNAME}.tar.gz ${FULLNAME}
cp ${FULLNAME}.tar.gz /dist
| true
|
a0dd6c5eb2064e4151f7d60c93c0685cac4067e3
|
Shell
|
flyrainning/Docker_Server_PHP7-Nginx
|
/bin/start.sh
|
UTF-8
| 490
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$1" = "shell" ]; then
/bin/bash
exit 0
fi
if [ ! -d "/app/log" ]; then
mkdir /app/log
fi
if [ ! -d "/run/php" ]; then
mkdir /run/php
fi
if [ ! -d "/app/log/nginx" ]; then
mkdir /app/log/nginx
fi
if [ ! -d "/app/wwwroot" ]; then
mkdir /app/wwwroot
echo "<?php phpinfo(); ?>" > /app/wwwroot/index.php
fi
/usr/sbin/php-fpm7.0 --nodaemonize -c /etc/php/7.0/fpm/php.ini --fpm-config /etc/php/7.0/fpm/php-fpm.conf &
/usr/sbin/nginx -g 'daemon off; master_process on;'
| true
|
d9f03959923aa145d57013586c9875719cb4c91c
|
Shell
|
cyberwani/linux-setups
|
/php-install.sh
|
UTF-8
| 2,795
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
###
if ! which expect &> /dev/null;
then
echo "Unable to find command 'expect', run ./essentials.sh first"
exit
fi;
###
echo -n "Web user account [default=www-data]: "
read USERNAME
if [ -z "$USERNAME" ]
then
USERNAME='www-data'
fi
###
if [ "$USERNAME" != "www-data" ]
then
echo -n "Web user account password (leave blank for none): "
read -s PASSWORD
echo ""
if [ ! -z "$PASSWORD" ]
then
echo -n "Retype web user account password: "
read -s PASSWORD2
echo ""
if [ "$PASSWORD" != "$PASSWORD2" ]
then
echo "Web user account passwords do not match"
exit
fi
fi
###
useradd --home /var/www -M $USERNAME
if [ ! -z "$PASSWORD" ]
then
# add user password
expect << EOF
spawn passwd $USERNAME
expect "Enter new UNIX password:"
send "${PASSWORD}\r"
expect "Retype new UNIX password:"
send "${PASSWORD}\r"
expect eof;
EOF
fi
fi
###
add-apt-repository ppa:brianmercer/php
apt-get -yq update
apt-get -yq update
apt-get -yq install php5-fpm
###
sed -e "s/;\?expose_php .*/expose_php \= Off/g" -i /etc/php5/fpm/php.ini
###
sleep 2
###
mkdir /var/www/conf
mkdir /var/www/logs
mkdir /var/www/temp
###
FPMCONF='/etc/php5/fpm/php5-fpm.conf'
mv $FPMCONF "${FPMCONF}.bak"
cat > $FPMCONF << EOF
; global options
[global]
pid = /var/run/php5-fpm.pid
error_log = /var/log/php5-fpm.log
; default pool
[default]
listen = /etc/php5/fpm/php5-fpm.sock
listen.owner = $USERNAME
listen.group = $USERNAME
user = $USERNAME
group = $USERNAME
pm = dynamic
pm.max_children = 3
pm.start_servers = 2
pm.min_spare_servers = 2
pm.max_spare_servers = 3
pm.max_requests = 500
slowlog = /var/log/php5-fpm.slow.log
env[PATH] = /usr/local/bin:/usr/bin:/bin
env[TMP] = /var/www/temp
env[TMPDIR] = /var/www/temp
env[TEMP] = /var/www/temp
; pool options
; include=/var/www/*/conf/fpm-pool.conf
EOF
###
# not installing php5-cgi, will auto install apache
apt-get -yq install php-pear php5-cgi php5-cli php5-mysql php5-gd php5-imagick php5-curl php5-tidy php5-xmlrpc php5-mcrypt
apt-get -yq install php5-memcache
apt-get -yq install memcached
sed -e "s/^#\+/;/g" -i /etc/php5/fpm/conf.d/imagick.ini
sed -e "s/^#\+/;/g" -i /etc/php5/fpm/conf.d/mcrypt.ini
###
chown -R $USERNAME:$USERNAME /var/www
HTTPDCONF='/etc/apache2/httpd.conf'
if [ -f $HTTPDCONF ]
then
echo "Include /var/www/conf/php5-fpm.conf" >> $HTTPDCONF
cat > /var/www/conf/php5-fpm.conf << EOF
DirectoryIndex index.php index.html
<IfModule mod_fastcgi.c>
Alias /php.fcgi /var/www/php.fcgi
FastCGIExternalServer /var/www/php.fcgi -socket /etc/php5/fpm/php5-fpm.sock -pass-header Authorization
AddType application/x-httpd-php5 .php
Action application/x-httpd-php5 /php.fcgi
AddHandler fastcgi-script .fcgi
</IfModule>
EOF
fi
service php5-fpm start
service apache2 restart
| true
|
b8796ba9fed1f500125c1fbc697a31a42685d6bc
|
Shell
|
josemalcher/shell-script-CursoMarcosCastroSouza
|
/10-Construindo-menu-com-o-comando-select/script.sh
|
UTF-8
| 471
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# Utilizando o comando select para construir menu
opcoes=("python" "shell" "Java" "sair")
select nome in "${opcoes[@]}"
do
case $nome in
"python")
echo "Você escolheu Python"
;;
"shell")
echo "Você escolheu Shell"
;;
"Java")
echo "Você escolheu JAVA"
;;
"sair")
break
;;
*) echo "Opção invalida";;
esac
done
| true
|
988804376bf776e9562a5a875f0c68039c151dc0
|
Shell
|
Pseudonymous-coders/CRI-V1
|
/chromelib/crouton
|
UTF-8
| 720
| 3.171875
| 3
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#!/bin/bash
CGLOBS=~/Downloads/.tmp/globs
source $CGLOBS/globvar
source $CGLOBS/globfun
echo "
Installing chroot..."
cd $CTEMP
installer "$URLCROUTON" $CTEMP/crouton
echo "Downloading...."
if [ ! -d "$CROUTON" ]; then #Installs crouton
sudo su -c "sudo echo 'chrooter binner' | sh crouton -r trusty -t xiwi,extension,e17"
else
sudo su -c "sudo echo 'chrooter binner' | sh crouton -r trusty -t xiwi,extension,e17 -u"
fi
echo "user\n"
echo "password\n"
wait
sudo su -c "sudo echo 'y chrooter binner' | sudo enter-chroot -u root"
sudo su -c "sudo echo 'y chrooter binner' | sudo enter-chroot -u root" # Make sure everythin is installed and setup script is broken
sleep 5
cleaner $CBUILD # Clean up files
echo "Done"
| true
|
d6e9dd0a4b7ad43cf7a89e126b8eae6953fc19c6
|
Shell
|
VectorCell/leander-monitor
|
/app/monitor.sh
|
UTF-8
| 1,097
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
ZPOOL_FILE="/tmp/zpool.statusfile"
FS_FILE="/tmp/filesystem.statusfile"
BLK_FILE="/tmp/block.statusfile"
SMB_FILE="/tmp/samba.statusfile"
SMART_FILE="/tmp/smart.statusfile"
sudo rm /tmp/*.statusfile
while [ 1 ]; do
date > $ZPOOL_FILE
echo -e "\n" >> $ZPOOL_FILE
sudo zpool list >> $ZPOOL_FILE
echo -e "\n" >> $ZPOOL_FILE
sudo zpool iostat >> $ZPOOL_FILE
echo -e "\n" >> $ZPOOL_FILE
sudo zpool status >> $ZPOOL_FILE
df -hT > $FS_FILE
lsblk -i > $BLK_FILE
echo >> $BLK_FILE
sudo blkid >> $BLK_FILE
sudo smbstatus > $SMB_FILE
echo "SMART data for /dev/sdc:" > $SMART_FILE
sudo smartctl --all /dev/sdc >> $SMART_FILE
echo -e "\n\n" >> $SMART_FILE
echo "SMART data for /dev/sdd:" >> $SMART_FILE
sudo smartctl --all /dev/sdd >> $SMART_FILE
# disabled until I make it not terrible
#BADDRIVES=$(sudo zpool status | grep NAME -A 4 | grep -v NAME | grep -v ONLINE)
#if [ -n "$BADDRIVES" ]; then
# echo "bad drive detected: $BADDRIVES"
# curl http://bismith.net/api/sendzfsalert
#fi
sleep 5m
done
| true
|
d66d3ce2766bf8b7502eb7c72cd66f300aae17d2
|
Shell
|
dlyle65535/dotfiles
|
/bin/setjh
|
UTF-8
| 1,326
| 3.984375
| 4
|
[] |
no_license
|
#! /bin/bash
#function list_java_versions {
# ls -l /System/Library/Frameworks/JavaVM.framework/Versions | egrep "^l" | grep -v Current
#}
#
#VERSION=$1
#if [[ -z "$@" ]]; then
# echo >&2 "You must supply an argument: Try one of these:"
# list_java_versions
# exit 1
#fi
#
#JAVA_ROOT=/System/Library/Frameworks/JavaVM.framework/Versions
#
#JAVA_VERSION=$JAVA_ROOT/$1
#if [ -d $GRAILS_VERSION ]; then
# echo "Setting Java version to $1 (you may have enter your password for sudo)"
# sudo rm $JAVA_ROOT/CurrentJDK
# sudo ln -s $JAVA_VERSION $JAVA_ROOT/CurrentJDK
# echo "java -version"
# java -version
# echo "javac -version"
# javac -version
#else
# echo "Error: '$JAVA_VERSION' does not exist!!"
# echo "Use one of the following:"
# list_java_versions
# exit 1
#fi
cd /System/Library/Frameworks/JavaVM.framework/Versions
CURJDK="`readlink CurrentJDK`"
echo Current JDK version: $CURJDK
if [ "$1" == "" ]; then
echo Installed versions:
ls
exit
fi
VERFOUND=`ls | grep $1 | head -n 1`
if [ "$VERFOUND" != "$1" ]; then
BASE="`basename $0`"
echo Error: Could not change JDK-- version $1 not installed!
echo Run $BASE without arguments to see a list of installed versions.
exit 127
fi
echo You must now enter your Mac OS X password to change the JDK.
sudo ln -fhsv $1 CurrentJDK
| true
|
459766028bada726637046c0c86c0363e552e1f5
|
Shell
|
mishagam/progs
|
/bash/tfunc.sh
|
UTF-8
| 106
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
function quit {
exit
}
function e {
echo ${1}
}
e Hello
e World
quit
echo foo
| true
|
c2a1ede4cac44b7520c74f07569d2d9acd09e11c
|
Shell
|
antenore/svntogit-community
|
/python-pyadi-iio/repos/community-any/PKGBUILD
|
UTF-8
| 1,249
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: Filipe Laíns (FFY00) <lains@archlinux.org>
_pkgname=pyadi-iio
pkgname=python-$_pkgname
pkgver=0.0.8
pkgrel=1
pkgdesc='Python interfaces for ADI hardware with IIO drivers'
arch=('any')
url='https://github.com/analogdevicesinc/pyadi-iio'
license=('custom')
depends=('libiio' 'python-numpy' 'python-paramiko')
makedepends=('python-setuptools' 'python-sphinx' 'python-sphinx_rtd_theme')
checkdepends=('python-pytest-runner' 'python-scapy')
source=("$pkgname-$pkgver.tar.gz::$url/archive/v$pkgver.tar.gz")
sha512sums=('97f6b241230f54ff4932f677b42b4fd33c2e2eb69bb27d638b4c63b44e991b61c40744b9db09fd26d7973e83eeec6089cba65bbe63894206b06c3e10dd8f1f3e')
build() {
cd $_pkgname-$pkgver
python setup.py build
cd doc
make html
}
# need liblibc.a
#check() {
# cd $_pkgname-$pkgver
#
# python setup.py pytest
#}
package() {
cd $_pkgname-$pkgver
python setup.py install --root="$pkgdir" --optimize=1 --skip-build
# Remove tests
rm -rf "$pkgdir"/usr/lib/python*/site-packages/test
# Install documentation
install -dm 755 "$pkgdir"/usr/share/doc/$pkgname
cp -r -a --no-preserve=ownership doc/build/html "$pkgdir"/usr/share/doc/$pkgname
install -Dm 644 LICENSE "$pkgdir"/usr/share/licenses/$pkgname/LICENSE
}
# vim:set ts=2 sw=2 et:
| true
|
c97628453091b9b19c2c58722df7cbc78d817b6c
|
Shell
|
chledowski/word_embeddings_for_nli
|
/env.sh.default
|
UTF-8
| 344
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function setup () {
# Set up working directory here.
# DATA_DIR=your_path
if [ -z ${DATA_DIR+x} ]; then
echo "Set up DATA_DIR variable in env.sh.* file"
return 1
fi
export PYTHONPATH=$PWD:$PYTHONPATH
export DATA_DIR=$DATA_DIR
export NLI_DEBUG=0
mkdir -p $DATA_DIR
}
setup
| true
|
7e99ae4695103a74d2b04f160a64fd9dd72f5a2d
|
Shell
|
mtk09422/chromiumos-platform-ec
|
/board/twinkie/build_rw_variant
|
UTF-8
| 1,013
| 3.25
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
#
# Build a Twinkie firmware image with
# the regular Twinkie sniffer firmware in the RO partition
# and a firmware behaving as a USB PD sink in the RW partition.
#
BOARD=twinkie
VARIANT=sink
RO_SIZE=65536
# Clean build
make BOARD=${BOARD} clean
make BOARD=${BOARD} out=build/${BOARD}_${VARIANT} clean
# Build regular Twinkie sniffer firmware
make BOARD=${BOARD}
# Build Twinkie as a USB PD consumer/producer (defaulting as a sink)
make BOARD=${BOARD} PROJECT=${VARIANT} out=build/${BOARD}_${VARIANT}
# Generate the final image by concatenating the built images
VERSION=$(echo "VERSION" | cpp -P -imacros build/${BOARD}/ec_version.h)
FINAL_IMG=build/${BOARD}/${VERSION//\"}.combined.bin
cp build/${BOARD}/ec.bin ${FINAL_IMG}
dd if=build/${BOARD}_${VARIANT}/${VARIANT}.RW.bin of=${FINAL_IMG} bs=1 seek=${RO_SIZE} conv=notrunc
| true
|
1b09332249edf67d3fc5bf4fa3a42cf9072b29b3
|
Shell
|
elewarr/openbsd-arm64-src-dev
|
/bin/patch
|
UTF-8
| 1,263
| 3.546875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env ksh
if [ -z "${CROSS_OPENBSD_SRC_HOME}" ]; then
echo CROSS_OPENBSD_SRC_HOME is missing
exit 1
fi
BRANCH=$(git rev-parse --abbrev-ref HEAD)
if [ -z "${BRANCH}" ]; then
echo BRANCH is unknown
exit 1
fi
if [ -h "src" ]; then
rm "src"
fi
ln -s "${BRANCH}" "src"
for P in $(find "${CROSS_OPENBSD_SRC_HOME}/patches/" -type f -name "*.patch" -maxdepth 1 | sort); do
TARGETP=$(cat "${P}" | grep "Index: " | cut -d ' ' -f 2)
TARGETD=$(dirname "${TARGETP}")
TARGETF=$(basename "${TARGETP}")
echo ${TARGETD}
echo ${TARGETF}
if [ ! -d "${CROSS_OPENBSD_SRC_HOME}/src/${TARGETD}" ]; then
echo "Making dir: ${TARGETD}"
mkdir -p "${CROSS_OPENBSD_SRC_HOME}/src/${TARGETD}"
touch "${CROSS_OPENBSD_SRC_HOME}/src/${TARGETD}/${TARGETF}"
cd "${CROSS_OPENBSD_SRC_HOME}/src/"
cvs add "${TARGETD}"
cvs add "${TARGETD}/${TARGETF}"
rm "${TARGETD}/${TARGETF}"
cd -
else
touch "${CROSS_OPENBSD_SRC_HOME}/src/${TARGETD}/${TARGETF}"
cd "${CROSS_OPENBSD_SRC_HOME}/src/"
cvs add "${TARGETD}"
cd -
fi
patch -d "${CROSS_OPENBSD_SRC_HOME}/src" < "$P"
if [ $? -ne 0 ]; then
echo $P failed
exit 1
fi
done
| true
|
9a06f791f516c2fddeea583ad25f3934e2103b9c
|
Shell
|
pradand/bash-devops-libs
|
/libs/gcp/iam/main.sh
|
UTF-8
| 3,388
| 3.65625
| 4
|
[
"Apache-2.0"
] |
permissive
|
# Copyright 2020 Leonardo Andres Morales
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
### Validate a role of a email ###
# usage: validateRole <domain> <domain_id> <role> <email>
# domains: project folder billing
function validateRole {
getArgs "domain domain_id role email" "${@}"
# Validate role format
[[ ${role} == "roles/"* ]] || exitOnError "Role must use format roles/<role>" -1
if [ "${domain}" == "project" ]; then
cmd="gcloud projects get-iam-policy ${domain_id}"
elif [ "${domain}" == "folder" ]; then
cmd="gcloud alpha resource-manager folders get-iam-policy ${domain_id}"
elif [ "${domain}" == "billing" ]; then
cmd="gcloud alpha billing accounts get-iam-policy ${domain_id}"
elif [ "${domain}" == "function" ]; then
cmd="gcloud functions get-iam-policy ${domain_id}"
else
exitOnError "Unsupported get-iam-policy from '${domain}' domain" -1
fi
# Execute the validation
foundRoles=$(${cmd} --flatten="bindings[].members" --filter "bindings.role=${role} AND bindings.members:${email}" --format="table(bindings.members)")
exitOnError "Check your IAM permissions (for get-iam-policy) at ${domain}: ${domain_id}"
# If email role was not found
echo "${foundRoles}" | grep "${email}" > /dev/null
return $?
}
### Bind Role to a list of emails ###
# usage: bindRole <domain> <domain_id> <role> <email1> <email2> ... <emailN>
# domains: project folder
function bindRole {
getArgs "domain domain_id role @emails" "${@}"
# For each user
for email in ${emails[@]}; do
# Validate if the role is already provided
self validateRole ${domain} ${domain_id} ${role} ${email}
if [ ${?} -ne 0 ]; then
# Concat the domain
if [ "${domain}" == "project" ]; then
cmd="gcloud projects add-iam-policy-binding ${domain_id}"
elif [ "${domain}" == "folder" ]; then
cmd="gcloud alpha resource-manager folders add-iam-policy-binding ${domain_id}"
elif [ "${domain}" == "function" ]; then
cmd="gcloud functions add-iam-policy-binding ${domain_id}"
else
exitOnError "Unsupported add-iam-policy-binding to '${domain}' domain" -1
fi
echoInfo "Binding '${email}' role '${role}' to ${domain}: ${domain_id}..."
if [[ "${email}" == *".iam.gserviceaccount.com" ]]; then
${cmd} --member serviceAccount:${email} --role ${role} > /dev/null
elif [[ "${email}" == "allUsers" ]]; then
${cmd} --member ${email} --role ${role} > /dev/null
else
${cmd} --member user:${email} --role ${role} > /dev/null
fi
exitOnError "Failed to bind role: '${role}' to ${domain}: ${domain_id}"
fi
done
}
| true
|
2cb7c8321f993a7ec1687f7da9ebb01b0af6e5bb
|
Shell
|
nohkwangsun/kube-sugar
|
/bin/mk.all
|
UTF-8
| 695
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# in mk.get
function help {
echo "Usage: mk.get {resource type} {resource command}"
echo " ex) $ mk.all po pod # make new script, ks.get.po for getting pod of in namespaces"
echo " $ mk.all sc service # make new script, ks.get.service for getting sc in all namespaces"
}
if [ $# == 0 ]
then
help
exit 0
elif [ $# != 2 ]
then
echo "Error: invalid the number of arguments : $#"
help
exit 1
fi
FILE_NAME="ks.all.$2"
if [ -f $FILE_NAME ]
then
echo "Error: already exists file : $FILE_NAME"
exit 2
fi
cat << EOF > $FILE_NAME
#!/bin/bash
# in $FILE_NAME
CMD="kubectl --all-namespaces=true get $1 \$@"
echo \$CMD
exec \$CMD
EOF
chmod +x $FILE_NAME
| true
|
7895a655a93079cc9de5775cbe316da212b5b87c
|
Shell
|
openembedded/meta-openembedded
|
/meta-oe/recipes-support/uthash/uthash/run-ptest
|
UTF-8
| 161
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
cd tests
for i in test*[0-9] ; do
if ./${i} | cmp -s "${i}.ans" - ; then
echo "PASS: ${i}"
else
echo "FAIL: ${i}"
fi
done
| true
|
49896a74a695dbda2c43e751e359b39ac744a3c8
|
Shell
|
bjorndown/wasgeit
|
/build-and-install.sh
|
UTF-8
| 365
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e # stop script on errors
set -u # fail on unresolved variables
cd frontend
npm install
# build frontend
node node_modules/webpack/bin/webpack.js
cd ..
docker build -t wasgeit . && \
docker stop wasgeit && \
docker rm wasgeit && \
docker run -dp 80:8080 --name wasgeit -e WASGEIT_ACCESS_TOKEN=${WASGEIT_ACCESS_TOKEN} wasgeit
| true
|
127e7f4ed7d45139e58d9da33fa26ea244f9abad
|
Shell
|
hydruid/zenoss
|
/core-autodeploy/4.2.4/misc/zenup.sh
|
UTF-8
| 1,294
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#######################################################
# Version: 01a Alpha - 04 #
# Status: Functional...Not ready for production #
# Notes: Almost ready, just a few bugs to squash #
#######################################################
# Create symbolic link
ln -s /usr/local/zenoss /opt
# Install required packages
apt-get install libssl1.0.0 libssl-dev -y
ln -s /lib/x86_64-linux-gnu/libssl.so.1.0.0 /usr/lib/libssl.so.10
ln -s /lib/x86_64-linux-gnu/libcrypto.so.1.0.0 /usr/lib/libcrypto.so.10
# Download and extract ZenUp RPM
mkdir ~zenoss/temp-zenup && cd ~zenoss/temp-zenup
wget -N http://wiki.zenoss.org/download/core/zenup/zenup-1.0.0.131-1.el6.x86_64.rpm
rpm2cpio zenup-1.0.0.131-1.el6.x86_64.rpm | cpio -i --make-directories
cp -fr ~zenoss/temp-zenup/opt/zenup /usr/local/zenoss/
chown -R zenoss:zenoss /usr/local/zenoss/zenup
ln -s /usr/local/zenoss/zenup /opt
chmod +x /usr/local/zenoss/zenup/bin/zenup
echo "zenoss 4.2.4-1897.el6 zenoss_core" >> /opt/zenoss/.manifest && chown -R zenoss:zenoss /opt/zenoss/.manifest
wget -N https://raw.github.com/hydruid/zenoss/master/core-autodeploy/4.2.4/misc/zenup-helper.sh -P ~zenoss/temp-zenup/ && chmod +x zenup-helper.sh
su - zenoss -c '/bin/sh ~zenoss/temp-zenup/zenup-helper.sh'
| true
|
d7dc71da5d28092b0ae4d9e958b78f0a4be9f91f
|
Shell
|
apache/incubator-milagro-dta
|
/build-static.sh
|
UTF-8
| 2,856
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -e
apt-get update && apt-get install \
ca-certificates git g++ gcc curl \
make cmake automake libtool libssl-dev
CURRENT_PATH=$(pwd)
OUTPUT_PATH=$CURRENT_PATH/bin
BUILD_PATH=`mktemp -d`
LIB_SOURCE_PATH=$BUILD_PATH/src
export LIBRARY_PATH=$BUILD_PATH/lib
export C_INCLUDE_PATH=$BUILD_PATH/include
export CGO_LDFLAGS="-L $LIBRARY_PATH"
export CGO_CPPFLAGS="-I $C_INCLUDE_PATH"
echo Building Milagro Crypt C library
git clone https://github.com/apache/incubator-milagro-crypto-c.git $LIB_SOURCE_PATH/milagro-crypto-c
cd $LIB_SOURCE_PATH/milagro-crypto-c
git checkout 1.0.0
mkdir build
cd build
cmake \
-DCMAKE_BUILD_TYPE=Release \
-DBUILD_SHARED_LIBS=OFF \
-DAMCL_CHUNK=64 \
-DAMCL_CURVE="BLS381,SECP256K1" \
-DAMCL_RSA="" \
-DBUILD_PYTHON=OFF \
-DBUILD_BLS=ON \
-DBUILD_WCC=OFF \
-DBUILD_MPIN=OFF \
-DBUILD_X509=OFF \
-DCMAKE_C_FLAGS="-fPIC" \
-DCMAKE_INSTALL_PREFIX=$BUILD_PATH ..
make && make install
echo Building LibOQS
git clone https://github.com/open-quantum-safe/liboqs.git $LIB_SOURCE_PATH/liboqs
cd $LIB_SOURCE_PATH/liboqs
git checkout 7cb03c3ce9182790c77e69cd21a6901e270781d6
autoreconf -i
./configure \
--prefix=$BUILD_PATH \
--disable-shared \
--disable-aes-ni \
--disable-kem-bike \
--disable-kem-frodokem \
--disable-kem-newhope \
--disable-kem-kyber \
--disable-sig-qtesla \
--disable-doxygen-doc
make -j && make install
echo Building pqnist
mkdir -p $LIB_SOURCE_PATH/pqnist
cd $LIB_SOURCE_PATH/pqnist
cmake \
-DCMAKE_BUILD_TYPE=Release\
-DBUILD_SHARED_LIBS=OFF \
-DCMAKE_INSTALL_PREFIX=$BUILD_PATH \
$CURRENT_PATH/libs/crypto/libpqnist
make && make install
echo Downloading Go
curl -o "$BUILD_PATH/go.tar.gz" "https://dl.google.com/go/go1.12.9.linux-amd64.tar.gz"
cd $BUILD_PATH && tar xzvf go.tar.gz
export GOROOT=$BUILD_PATH/go
cd $CURRENT_PATH
GO111MODULES=on \
CGO_ENABLED=1 \
$GOROOT/bin/go build -o target/out \
-ldflags '-w -linkmode external -extldflags "-static"' \
-o $OUTPUT_PATH/milagro \
github.com/apache/incubator-milagro-dta/cmd/service
| true
|
bf90218877b4c4b2fe2c6d5cf7ed494532e2ea13
|
Shell
|
totte/core
|
/pkgconfig/PKGBUILD
|
UTF-8
| 912
| 2.859375
| 3
|
[] |
no_license
|
#
# Chakra Packages for Chakra, part of chakra-project.org
#
# maintainer (i686): Phil Miller <philm[at]chakra-project[dog]org>
# maintainer (x86_64): Manuel Tortosa <manutortosa[at]chakra-project[dot]org>
pkgname=pkg-config
pkgver=0.26
pkgrel=1
pkgdesc="A system for managing library compile/link flags"
arch=('i686' 'x86_64')
url="http://pkgconfig.freedesktop.org/wiki/"
license=('GPL')
groups=('base-devel')
depends=('glibc' 'popt' 'glib2')
provides=("pkgconfig=${pkgver}")
conflicts=('pkgconfig')
replaces=('pkgconfig')
source=(http://pkg-config.freedesktop.org/releases/${pkgname}-${pkgver}.tar.gz)
md5sums=('47525c26a9ba7ba14bf85e01509a7234')
build() {
cd ${srcdir}/${pkgname}-${pkgver}
# Use system popt
./configure --prefix=/usr --with-installed-popt
make
}
check() {
cd ${srcdir}/${pkgname}-${pkgver}
make check
}
package() {
cd ${srcdir}/${pkgname}-${pkgver}
make DESTDIR=${pkgdir} install
}
| true
|
808ee62c4531ae35717e724cfcc4a337159654f5
|
Shell
|
tikrai/payments
|
/docker/postgres/testenv.sh
|
UTF-8
| 165
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Creating test database"
"${psql[@]}" --username $POSTGRES_USER <<-EOSQL
CREATE DATABASE "${POSTGRES_DB}_test" ;
EOSQL
echo "Test database created"
| true
|
4dfad730927eb081d774909f259d7f9e29a0dc7c
|
Shell
|
wangyoucao577/go-release-action
|
/release.sh
|
UTF-8
| 6,948
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -eux
# prepare binary_name/release_tag/release_asset_name
BINARY_NAME=$(basename ${GITHUB_REPOSITORY})
if [ x${INPUT_BINARY_NAME} != x ]; then
BINARY_NAME=${INPUT_BINARY_NAME}
fi
RELEASE_TAG=$(basename ${GITHUB_REF})
if [ ! -z "${INPUT_RELEASE_TAG}" ]; then
RELEASE_TAG=${INPUT_RELEASE_TAG}
elif [ ! -z "${INPUT_RELEASE_NAME}" ]; then # prevent upload-asset by tag due to github-ref default if a name is given
RELEASE_TAG=""
fi
RELEASE_NAME=${INPUT_RELEASE_NAME}
RELEASE_ASSET_NAME=${BINARY_NAME}-${RELEASE_TAG}-${INPUT_GOOS}-${INPUT_GOARCH}
if [ ! -z "${INPUT_GOAMD64}" ]; then
RELEASE_ASSET_NAME=${BINARY_NAME}-${RELEASE_TAG}-${INPUT_GOOS}-${INPUT_GOARCH}-${INPUT_GOAMD64}
fi
if [ ! -z "${INPUT_GOARM}" ] && [[ "${INPUT_GOARCH}" =~ arm ]]; then
RELEASE_ASSET_NAME=${BINARY_NAME}-${RELEASE_TAG}-${INPUT_GOOS}-${INPUT_GOARCH}v${INPUT_GOARM}
fi
if [ ! -z "${INPUT_ASSET_NAME}" ]; then
RELEASE_ASSET_NAME=${INPUT_ASSET_NAME}
fi
RELEASE_REPO=${GITHUB_REPOSITORY}
if [ ! -z "${INPUT_RELEASE_REPO}" ]; then
RELEASE_REPO=${INPUT_RELEASE_REPO}
fi
# prompt error if non-supported event
if [ ${GITHUB_EVENT_NAME} == 'release' ]; then
echo "Event: ${GITHUB_EVENT_NAME}"
elif [ ${GITHUB_EVENT_NAME} == 'push' ]; then
echo "Event: ${GITHUB_EVENT_NAME}"
elif [ ${GITHUB_EVENT_NAME} == 'workflow_dispatch' ]; then
echo "Event: ${GITHUB_EVENT_NAME}"
elif [ ${GITHUB_EVENT_NAME} == 'workflow_run' ]; then
echo "Event: ${GITHUB_EVENT_NAME}"
else
echo "Unsupport event: ${GITHUB_EVENT_NAME}!"
exit 1
fi
# workaround to solve the issue: fatal: detected dubious ownership in repository at '/github/workspace'
git config --global --add safe.directory ${GITHUB_WORKSPACE}
# execute pre-command if exist, e.g. `go get -v ./...`
if [ ! -z "${INPUT_PRE_COMMAND}" ]; then
eval ${INPUT_PRE_COMMAND}
fi
# binary suffix
EXT=''
if [ ${INPUT_GOOS} == 'windows' ]; then
EXT='.exe'
fi
# prefix for ldflags
LDFLAGS_PREFIX=''
if [ ! -z "${INPUT_LDFLAGS}" ]; then
LDFLAGS_PREFIX="-ldflags"
fi
# fulfill GOAMD64 option
if [ ! -z "${INPUT_GOAMD64}" ]; then
if [[ "${INPUT_GOARCH}" =~ amd64 ]]; then
GOAMD64_FLAG="${INPUT_GOAMD64}"
else
echo "GOAMD64 should only be use with amd64 arch." >>/dev/stderr
GOAMD64_FLAG=""
fi
else
if [[ "${INPUT_GOARCH}" =~ amd64 ]]; then
GOAMD64_FLAG="v1"
else
GOAMD64_FLAG=""
fi
fi
# fulfill GOARM option
if [ ! -z "${INPUT_GOARM}" ]; then
if [[ "${INPUT_GOARCH}" =~ arm ]]; then
GOARM_FLAG="${INPUT_GOARM}"
else
echo "GOARM should only be use with arm arch." >>/dev/stderr
GOARM_FLAG=""
fi
else
if [[ "${INPUT_GOARCH}" =~ arm ]]; then
GOARM_FLAG=""
else
GOARM_FLAG=""
fi
fi
# build
BUILD_ARTIFACTS_FOLDER=build-artifacts-$(date +%s)
mkdir -p ${INPUT_PROJECT_PATH}/${BUILD_ARTIFACTS_FOLDER}
cd ${INPUT_PROJECT_PATH}
if [[ "${INPUT_BUILD_COMMAND}" =~ ^make.* ]]; then
# start with make, assumes using make to build golang binaries, execute it directly
GOAMD64=${GOAMD64_FLAG} GOARM=${GOARM_FLAG} GOOS=${INPUT_GOOS} GOARCH=${INPUT_GOARCH} eval ${INPUT_BUILD_COMMAND}
if [ -f "${BINARY_NAME}${EXT}" ]; then
# assumes the binary will be generated in current dir, copy it for later processes
cp ${BINARY_NAME}${EXT} ${BUILD_ARTIFACTS_FOLDER}/
fi
else
GOAMD64=${GOAMD64_FLAG} GOARM=${GOARM_FLAG} GOOS=${INPUT_GOOS} GOARCH=${INPUT_GOARCH} ${INPUT_BUILD_COMMAND} -o ${BUILD_ARTIFACTS_FOLDER}/${BINARY_NAME}${EXT} ${INPUT_BUILD_FLAGS} ${LDFLAGS_PREFIX} "${INPUT_LDFLAGS}"
fi
# executable compression
if [ ! -z "${INPUT_EXECUTABLE_COMPRESSION}" ]; then
if [[ "${INPUT_EXECUTABLE_COMPRESSION}" =~ ^upx.* ]]; then
# start with upx, use upx to compress the executable binary
eval ${INPUT_EXECUTABLE_COMPRESSION} ${BUILD_ARTIFACTS_FOLDER}/${BINARY_NAME}${EXT}
else
echo "Unsupport executable compression: ${INPUT_EXECUTABLE_COMPRESSION}!"
exit 1
fi
fi
# prepare extra files
if [ ! -z "${INPUT_EXTRA_FILES}" ]; then
cd ${GITHUB_WORKSPACE}
cp -r ${INPUT_EXTRA_FILES} ${INPUT_PROJECT_PATH}/${BUILD_ARTIFACTS_FOLDER}/
cd ${INPUT_PROJECT_PATH}
fi
cd ${BUILD_ARTIFACTS_FOLDER}
ls -lha
# INPUT_COMPRESS_ASSETS=='TRUE' is used for backwards compatability. `AUTO`, `ZIP`, `OFF` are the recommended values
if [ ${INPUT_COMPRESS_ASSETS^^} == "TRUE" ] || [ ${INPUT_COMPRESS_ASSETS^^} == "AUTO" ] || [ ${INPUT_COMPRESS_ASSETS^^} == "ZIP" ]; then
# compress and package binary, then calculate checksum
RELEASE_ASSET_EXT='.tar.gz'
MEDIA_TYPE='application/gzip'
RELEASE_ASSET_FILE=${RELEASE_ASSET_NAME}${RELEASE_ASSET_EXT}
if [ ${INPUT_GOOS} == 'windows' ] || [ ${INPUT_COMPRESS_ASSETS^^} == "ZIP" ]; then
RELEASE_ASSET_EXT='.zip'
MEDIA_TYPE='application/zip'
RELEASE_ASSET_FILE=${RELEASE_ASSET_NAME}${RELEASE_ASSET_EXT}
( shopt -s dotglob; zip -vr ${RELEASE_ASSET_FILE} * )
else
( shopt -s dotglob; tar cvfz ${RELEASE_ASSET_FILE} * )
fi
elif [ ${INPUT_COMPRESS_ASSETS^^} == "OFF" ] || [ ${INPUT_COMPRESS_ASSETS^^} == "FALSE" ]; then
RELEASE_ASSET_EXT=${EXT}
MEDIA_TYPE="application/octet-stream"
RELEASE_ASSET_FILE=${RELEASE_ASSET_NAME}${RELEASE_ASSET_EXT}
cp ${BINARY_NAME}${EXT} ${RELEASE_ASSET_FILE}
else
echo "Invalid value for INPUT_COMPRESS_ASSETS: ${INPUT_COMPRESS_ASSETS} . Acceptable values are AUTO,ZIP, or OFF."
exit 1
fi
MD5_SUM=$(md5sum ${RELEASE_ASSET_FILE} | cut -d ' ' -f 1)
SHA256_SUM=$(sha256sum ${RELEASE_ASSET_FILE} | cut -d ' ' -f 1)
# prefix upload extra params
GITHUB_ASSETS_UPLOADR_EXTRA_OPTIONS=''
if [ ${INPUT_OVERWRITE^^} == 'TRUE' ]; then
GITHUB_ASSETS_UPLOADR_EXTRA_OPTIONS="-overwrite"
fi
# update binary and checksum
github-assets-uploader -logtostderr -f ${RELEASE_ASSET_FILE} -mediatype ${MEDIA_TYPE} ${GITHUB_ASSETS_UPLOADR_EXTRA_OPTIONS} -repo ${RELEASE_REPO} -token ${INPUT_GITHUB_TOKEN} -tag=${RELEASE_TAG} -releasename=${RELEASE_NAME} -retry ${INPUT_RETRY}
if [ ${INPUT_MD5SUM^^} == 'TRUE' ]; then
MD5_EXT='.md5'
MD5_MEDIA_TYPE='text/plain'
echo ${MD5_SUM} >${RELEASE_ASSET_FILE}${MD5_EXT}
github-assets-uploader -logtostderr -f ${RELEASE_ASSET_FILE}${MD5_EXT} -mediatype ${MD5_MEDIA_TYPE} ${GITHUB_ASSETS_UPLOADR_EXTRA_OPTIONS} -repo ${RELEASE_REPO} -token ${INPUT_GITHUB_TOKEN} -tag=${RELEASE_TAG} -releasename=${RELEASE_NAME} -retry ${INPUT_RETRY}
fi
if [ ${INPUT_SHA256SUM^^} == 'TRUE' ]; then
SHA256_EXT='.sha256'
SHA256_MEDIA_TYPE='text/plain'
echo ${SHA256_SUM} >${RELEASE_ASSET_FILE}${SHA256_EXT}
github-assets-uploader -logtostderr -f ${RELEASE_ASSET_FILE}${SHA256_EXT} -mediatype ${SHA256_MEDIA_TYPE} ${GITHUB_ASSETS_UPLOADR_EXTRA_OPTIONS} -repo ${RELEASE_REPO} -token ${INPUT_GITHUB_TOKEN} -tag=${RELEASE_TAG} -releasename=${RELEASE_NAME} -retry ${INPUT_RETRY}
fi
# execute post-command if exist, e.g. upload to AWS s3 or aliyun OSS
if [ ! -z "${INPUT_POST_COMMAND}" ]; then
eval ${INPUT_POST_COMMAND}
fi
| true
|
3cd30fc1116f4d08eaed4f1f58114c9cb1c65e64
|
Shell
|
66pix/lambda-resize
|
/deploy.sh
|
UTF-8
| 1,368
| 3.671875
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/bash
set -o nounset
set -o errexit
ENVIRONMENT=$1
SIZES=$2
DESTINATION_BUCKET=""
if [ -z "$LAMBCI_PULL_REQUEST" ] && [ "$LAMBCI_BRANCH" == "master" ]; then
DESTINATION_BUCKET=$AWS_LAMBDA_IMAGE_RESIZE_PRODUCTION_BUCKET
else
DESTINATION_BUCKET=$AWS_LAMBDA_IMAGE_RESIZE_STAGING_BUCKET
fi
BRANCH=`echo ${LAMBCI_BRANCH//\//_}`
VERSION="$ENVIRONMENT-$BRANCH-$LAMBCI_BUILD_NUM"
echo ""
echo "Preparing config.json"
cp _config.json config.json
echo "Destination bucket: $DESTINATION_BUCKET"
sed -i "s/DESTINATION_BUCKET/$DESTINATION_BUCKET/g" config.json
echo "Sizes: $SIZES"
sed -i "s/SIZES/$SIZES/g" config.json
echo "Creating deploy.env file"
echo "SENTRY_DSN=$SENTRY_DSN" >> deploy.env
echo ""
echo "Deploying to $ENVIRONMENT"
./node_modules/node-lambda/bin/node-lambda deploy \
--description "Resize uploaded images to $SIZES on $DESTINATION_BUCKET" \
--environment "$ENVIRONMENT" \
--timeout 180 \
--memorySize 1536 \
--accessKey "$AWS_LAMBDA_DEPLOY_ACCESS_KEY_ID" \
--secretKey "$AWS_LAMBDA_DEPLOY_ACCESS_KEY_SECRET" \
--functionName "${ENVIRONMENT}-resize-on-upload" \
--handler index.handler \
--region "$AWS_LAMBDA_IMAGE_RESIZE_REGION" \
--role "$AWS_LAMBDA_IMAGE_RESIZE_ROLE" \
--runtime "nodejs4.3" \
--description "Creates resized copies of images on $DESTINATION_BUCKET when uploads occur" \
--configFile deploy.env
| true
|
7edd8d7473ef105476be8f4f06bc41e086ec1fff
|
Shell
|
blackawa/oracle-backup-table-script
|
/execute_new_ddl.sh
|
UTF-8
| 707
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
echo "----------------------------------------------"
echo "./new_ddl ディレクトリ内のファイルを全てSQLとして"
echo "実行する。"
echo "----------------------------------------------"
# Load Configurations
. ./database_configurations.conf
CONNECTION_STRING=${USER}/${PASSWORD}@${HOSTNAME}:${PORTNO}/${SERVICENAME}
echo
echo "${CONNECTION_STRING}でアクセスしようとしています。意図した挙動ですか?"
echo -n "PRESS ENTER to execute me"
read wait
cd ./new_ddl
for FILE_NAME in `ls`
do
echo "EXECUTING ${FILE_NAME} ..."
sqlplus -s ${CONNECTION_STRING} << EOF
-- コマンドの結果出力を非表示
set termout off
@${FILE_NAME}
EXIT;
EOF
done
| true
|
f7b8ad08f07298ae9bfc37e3fece4a2d1d0937b4
|
Shell
|
znz/anyenv-update
|
/bin/anyenv-update
|
UTF-8
| 5,061
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Summary: Update all **env and all installed plugins.
#
# Usage: anyenv update [--force] [--verbose|--quiet|--noop] [--without-plugin] [--without-manifest] [<envs>]
#
# -f/--force Force update
# -v/--verbose Verbose mode: print downloading status to stdout
# -q/--quiet Quiet mode: no printing
# -n/--noop Noop mode: dry run and do nothing
# --without-plugin Update envs, but not update plugin of each envs
# --without-manifest Update envs, but not anyenv's install manifests
#
# For detailed information, see: https://github.com/znz/anyenv-update
#
set -eo pipefail
[ -n "$ANYENV_DEBUG" ] && set -x
# Provide anyenv completions
if [ "$1" = "--complete" ]; then
echo --force
echo --verbose
echo --quiet
echo --noop
echo --without-plugin
echo --without-manifest
echo anyenv
exec anyenv-envs
fi
if [[ ${BASH_VERSINFO[0]} -lt 4 ]]; then
sigint_handler () {
exit 130
}
trap sigint_handler SIGINT
fi
usage () {
anyenv-help update 2>/dev/null
[ -z "$1" ] || exit "$1"
}
parse_options () {
OPTIONS=()
ARGUMENTS=()
local arg option index
for arg in "$@"; do
if [ "${arg:0:1}" = "-" ]; then
if [ "${arg:1:1}" = "-" ]; then
OPTIONS[${#OPTIONS[*]}]="${arg:2}"
else
index=1
while option="${arg:$index:1}"; do
[ -n "$option" ] || break
OPTIONS[${#OPTIONS[*]}]="$option"
index=$((index+1))
done
fi
else
ARGUMENTS[${#ARGUMENTS[*]}]="$arg"
fi
done
}
indent_output() {
while read -r data; do
printf " ${color}|${reset} %s\n" "$data"
done
}
print_colored() {
if [ -z "$QUIET" ]; then
printf "${color}%s${reset}\n" "$1"
fi
}
get_allenvs () {
echo "anyenv $(anyenv-envs | tr '\n' ' ')"
}
git_currentbranch () {
branch=$(command git branch | grep '^\*' | awk '{print $2}')
echo "$branch"
}
git () {
if [ -n "$NOOP" ]; then
# dry run
echo git "$@" | indent_output
elif [ -n "$QUIET" ]; then
# mute all
command git "$@" &>/dev/null
elif [ -n "$VERBOSE" ]; then
# output all
command git "$@" 2>&1 | indent_output
else
# output only stderr
# shellcheck disable=SC2069
command git "$@" 2>&1 >/dev/null | indent_output
fi
}
print_use_forceopt () {
if [ -z "$QUIET" ]; then
printf " ${fail_color}| %s${reset}\n" \
"Failed to update. Use 'verbose' option for detailed, or 'force' option."
fi
}
anyenv_update () {
if [ -d .git ]; then
print_colored "Updating '$1'..."
if [ -n "$VERBOSE" ]; then
echo "cd $(pwd)" | indent_output
fi
if [ -n "$FORCE" ]; then
branch=$(git_currentbranch)
git fetch --prune
git checkout "$branch"
git reset --hard "origin/$branch"
else
git pull --no-rebase --ff || print_use_forceopt
fi
else
print_colored "Skipping '$1'; not git repo"
fi
}
anyenv_update_plugins () {
shopt -s nullglob
for plugin in plugins/*; do
pushd "$plugin" >/dev/null
anyenv_update "$1/$(basename "$plugin")"
popd >/dev/null
done
shopt -u nullglob
}
# acts like `anyenv install --update`
anyenv_install_update () {
if [ -z "${ANYENV_DEFINITION_ROOT+x}" ]; then
# $XDG_CONFIG_HOME/anyenv/anyenv-install or ~/.config/anyenv/anyenv-install
ANYENV_DEFINITION_ROOT="${XDG_CONFIG_HOME:-${HOME}/.config}/anyenv/anyenv-install"
fi
if [ ! -d "$ANYENV_DEFINITION_ROOT" ]; then
printf "${fail_color}%s${reset}\n" "Manifest directory doesn't exist: ${ANYENV_DEFINITION_ROOT}"
printf "${fail_color}%s${reset}\n" "Try anyenv install --init"
return 1
fi
pushd "$ANYENV_DEFINITION_ROOT" > /dev/null
anyenv_update "anyenv manifest directory"
popd > /dev/null
}
# Parse command line flags
FORCE=""
VERBOSE=""
QUIET=""
NOOP=""
WITHOUT_PLUGIN=""
WITHOUT_MANIFEST=""
parse_options "$@"
for option in "${OPTIONS[@]}"; do
case "$option" in
"h" | "help" )
usage 0
;;
"f" | "force" )
FORCE=true
;;
"v" | "verbose" )
VERBOSE=true
;;
"q" | "quiet" )
QUIET=true
;;
"n" | "noop" )
NOOP=true
;;
"without-plugin" )
WITHOUT_PLUGIN=true
;;
"without-manifest" )
WITHOUT_MANIFEST=true
;;
* )
echo "no such option: ${option}" >&2
echo
usage 1 >&2
;;
esac
done
# Set colored output for TTY
if [ -t 1 ]; then
color="\e[1;32m"
fail_color="\e[1;33m"
reset="\e[0m"
else
color=""
fail_color=""
reset=""
fi
if [ -n "$NOOP" ] && [ -n "$QUIET" ]; then
QUIET=""
print_colored "Both --noop and --quiet given; ignoring --quiet" >&2
fi
TARGET_ENVS="${ARGUMENTS[*]}"
if [ "${#ARGUMENTS[@]}" -eq 0 ]; then
# set all envs when no args are given
TARGET_ENVS=$(get_allenvs)
fi
for env in $TARGET_ENVS; do
ENV_ROOT_VALUE=$(echo "${env}_ROOT" | tr '[:lower:]' '[:upper:]')
ENV_ROOT_VALUE=$(eval echo "\${${ENV_ROOT_VALUE}:-\$($env root)}")
cd "$ENV_ROOT_VALUE"
anyenv_update "$env"
[ -n "$WITHOUT_PLUGIN" ] || anyenv_update_plugins "$env"
done
[ -n "$WITHOUT_MANIFEST" ] || anyenv_install_update
| true
|
82e0af1c552475785996d843d84218ad70c4ce9d
|
Shell
|
fgeller/dotfiles
|
/bash/themes/mine.theme.bash
|
UTF-8
| 1,662
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
SCM_THEME_PROMPT_DIRTY=" ${bold_red}✘"
SCM_THEME_PROMPT_CLEAN=" ${bold_green}✔"
SCM_THEME_PROMPT_PREFIX=" "
SCM_THEME_PROMPT_SUFFIX=" "
SCM_NONE_CHAR=""
RVM_THEME_PROMPT_PREFIX="${bold_red}✦ "
RVM_THEME_PROMPT_SUFFIX=" ${reset_color}"
ERROR="${bold_red}❌ Error \n${normal}"
EXIT_STATUS="if [ \$? = 0 ]; then echo \"\"; else echo \"${ERROR}\"; fi"
rvmrc_version_prompt () {
if [ -f .rvmrc ] || [ -f .ruby\-version ] || [ -f Gemfile ]; then
if which rvm &> /dev/null; then
rvm=$(rvm tools identifier) || return
clean=${rvm//ruby-/}
echo -e "$RVM_THEME_PROMPT_PREFIX$clean$RVM_THEME_PROMPT_SUFFIX"
fi
fi
}
aws_saml_prompt () {
local now=$( date -u +%s )
local remain=$(( EXPIRE - now ))
if [ -n "$AWS_ROLE" ] && [ -n "$EXPIRETIME" ] && [ $remain -gt 0 ]; then
echo -e "${cyan}[${AWS_ROLE} $(($remain/60))m]${reset_color} "
fi
}
prompt_right () {
RIGHT_PROMPT="\033[1;32m$USER\033[0m on \033[1;32m$HOSTNAME\033[0m ● $(date +"%T")\033[0m"
RIGHT_PROMPT="$(date +"%T")\033[0m"
echo -e "$(echo ${RIGHT_PROMPT})"
}
prompt_left () {
LEFT_PROMPT="\[\033[G\]\`${EXIT_STATUS}\`\[${yellow}\]\w \[\$(aws_saml_prompt)\]\[\$(rvmrc_version_prompt)\]\[${bold_blue}\]\[\$(scm_char)\]\[\$(scm_prompt_info)\]\[${normal}\]\[${reset_color}\]"
echo -e "${LEFT_PROMPT}"
}
prompt () {
if env | grep -q TERM_PROGRAM; then
__iterm_tab_title
fi
# left, right
# compensate=4
# PS1=$(printf "%*s\r%s\n${PS2}" "$(($(tput cols)+${compensate}))" "$(prompt_right)" "$(prompt_left)")
PS1=""$(prompt_left)"\@\n${PS2}"
}
PS2="\[${yellow}\]𝌆\[${normal}\]\[${reset_color}\] "
PROMPT_COMMAND=prompt
| true
|
6ad17299b769397d09dbf39af02fa5f2faee3c4f
|
Shell
|
bvermeer/CprE583
|
/MP-3/sw/host/avi2raw.sh
|
UTF-8
| 808
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
# Remove existing images
rm -rf ../data/image-*
# Converts the test.avi file into multiple frames
# -r is FPS, -dframes is number of frames, -t is duration
ffmpeg -i ../data/test.avi -r 10 -ss 00:00:14.2 -t 00:00:5.0 -s vga ../data/image-%3d.png
# Grab the filenames for the frames
FILES=`ls ../data/image-*.png`
NFRAMES=`ls ../data/image-*.png | wc -l`
ADDR=1074790400
HEXADDR=`echo "obase=16; $ADDR" | bc`
echo "#define STARTADDR 0x$HEXADDR" > ../data/nframes.h
echo "#define NFRAMES $NFRAMES" >> ../data/nframes.h
rm -rf ../data/loadvideo.bat
for f in $FILES
do
echo "Converting $f to .raw format and updating script"
./png2raw ../data/$f
HEXADDR=`echo "obase=16; $ADDR" | bc`
echo "bload ../data/$f.raw 0x$HEXADDR" >> ../data/loadvideo.bat
ADDR=`expr $ADDR + 614400`
done
| true
|
69384f68a8fbd2aa3bcdf858c44e2c0c2b6c53fe
|
Shell
|
key4hep/key4hep-spack
|
/packages/bhlumi/BHLUMI
|
UTF-8
| 5,168
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# Wrapper around the BHLUMI/BHLUMI.exe executable
help(){
echo ""
echo "+++ Wrapper around the BHLUMI.exe executable +++"
echo ""
echo -e "Usage: \tBHLUMI -e Ecms -n Nevts -f Thmin -t Thmax -x epscms -o output_file [-s seed_file]"
echo -e " \tKKMCee -c config_file [-s seed_file]"
echo ""
echo "Switches:"
echo -e " -c, --config file \t\tPath to configuration file"
echo -e " -e, --ecms energy \t\tCenter of Mass energy in GeV"
echo -e " -n, --nevts energy \t\tNumber of events to be generated"
echo -e " -f, --Thmin theta \t\tMinimum theta [rad]"
echo -e " -t, --Thmax theta \t\tMaximum theta [rad]"
echo -e " -x, --epscms fraction \tEnergy cut-off in fraction of Ecms"
echo -e " -o, --outfile file \t\tFile with the generated events in LHE format"
echo -e " -s, --seedfile file \t\tFile to be used for seeding (randomly generated, if missing)"
echo ""
echo "Examples:"
echo -e "BHLUMI -f 0.022 -t 0.082 -x 0.001 -e 91.2 -n 10000 -o kkmu_10000.LHE"
echo -e "BHLUMI -c bhlumi.input"
echo ""
echo "Additional switches (for experts only):"
echo -e " -k, --keyopt KEYOPT \t\tTechnical parameters switch [default 3021]"
echo -e " \t\t\t\tKEYOPT = 1000*KEYGEN + 100*KEYREM + 10*KEYWGT + KEYRND"
echo -e " -r, --keyrad KEYRAD \t\tPhysics parameters switch [default 1021]"
echo -e " \t\t\t\tKEYRAD = 1000*KEYZET + 100*KEYUPD + 10*KEYMOD + KEYPIA"
echo -e " (Contact BHLUMI authors for details, e.g. through https://github.com/KrakowHEPSoft/BHLUMI)"
echo ""
}
_ECMS="91.2"
_NEVTS="100"
_THMIN=".022768344"
_THMAX=".082035891"
_EPSCMS="1D-3"
_KEYOPT="3021"
_KEYRAD="1021"
_OUTFILE="BHLUMI_OUT.LHE"
# Read arguments
while [[ $# -gt 0 ]]
do
arg="$1"
case $arg in
-c | --config)
_CONFFILE="$2"
shift # past argument
shift # past value
;;
-e | --ecms)
_ECMS="$2"
shift
shift
;;
-f | --thmin)
_THMIN="$2"
shift
shift
;;
-t | --thmax)
_THMAX="$2"
shift
shift
;;
-x | --epscms)
_EPSCMS="$2"
shift
shift
;;
-k | --keyopt)
_KEYOPT="$2"
shift
shift
;;
-r | --keyrad)
_KEYRAD="$2"
shift
shift
;;
-n | --nevts)
_NEVTS="$2"
shift
shift
;;
-s | --seedfile)
_SEEDFILE="$2"
shift
shift
;;
-o | --outfile)
_OUTFILE="$2"
shift
shift
;;
* | -h | --help)
help
exit 1
esac
done
BHLUMI_DIR=$(dirname $(dirname $0))
BHLUMI_EXE=${BHLUMI_DIR}/bin/BHLUMI.exe
# Run existing config file
BHLUMIINPUT="./bhlumi.input"
if test -f $BHLUMIINPUT ; then
echo "Existing input file saved to ${BHLUMIINPUT}.last ... "
cp -rp $BHLUMIINPUT "${BHLUMIINPUT}.last"
rm -f $BHLUMIINPUT
fi
if test ! "x$_CONFFILE" = "x" && test -f $_CONFFILE; then
ln -sf ${_CONFFILE} $BHLUMIINPUT
else
# We create a config file based on the input switches: this is the initial common part
cat > $BHLUMIINPUT <<EOF
+++========= Input Data set for BHLUMI ===============================
0 1 0 0 0 0 0 0 KAT1,KAT2,KAT3,KAT4,KAT5,KAT6,KAT7,KAT8
${_NEVTS} NEVT
${_KEYOPT} KEYOPT = 1000*KEYGEN +100*KEYREM +10*KEYWGT +KEYRND
${_KEYRAD} KEYRAD = 100*KEYUPD +10*KEYMOD +KEYPIA
2 KEYTRI Obsolete!!!
${_ECMS}D0 CMSENE
${_THMIN} Tming theta_min [rad] genaration
${_THMAX} Tmaxg theta_max [rad] generation
0.9999D0 VMAXG v_max generation
${_EPSCMS} XK0 eps_CMS generation
.024 TminW theta_min sical trigger wide
.058 TmaxW theta_max sical trigger wide
.024 TminN theta_min trigger narrow
.058 TminN theta_max trigger narrow
0.5D0 VMAXE v_max trigger maximum v
32 NPHI nphi sical trigger no of phi sect.
16 NTHE ntheta sical trigger no of theta sect.
================= the end of input data set for BHLUMI ================
EOF
fi
# Create seed file
if test ! "x$_SEEDFILE" = "x" && test -f $_SEEDFILE; then
ln -sf $_SEEDFILE ./iniseed
else
_seed=`date +"%j%H%M%N"`
_seed1=`echo ${_seed:0:8}`
_seed2=`echo ${_seed:8:15}`
echo "Seeds: $_seed1 $_seed2"
cat > ./iniseed <<EOF
$_seed1 IJKLIN= $_seed2
0 NTOTIN= 0
0 NTOT2N= 0
EOF
fi
cat ./iniseed
# Run
cat > ./semaphore <<EOF
START
EOF
# Save existing output file
DEFOUTFILE="out.lhe"
if test -f $DEFOUTFILE ; then
echo "Existing LHE output file saved to ${DEFOUTFILE}.last ... "
mv $DEFOUTFILE "${DEFOUTFILE}.last"
fi
time ${BHLUMI_EXE} < $BHLUMIINPUT
# Set output file
if test -f $DEFOUTFILE ; then
mv $DEFOUTFILE ${_OUTFILE}
else
echo "==== LHE output file $DEFOUTFILE was not produced!"
fi
| true
|
e4db724d4a07a87dfdc232be783235fcf82b7292
|
Shell
|
Appdynamics/AWS_Tools
|
/extension-ctl.sh
|
UTF-8
| 5,104
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
# Copyright (c) AppDynamics Inc
# All rights reserved
#
# Maintainer: David Ryder, david.ryder@appdynamics.com
#
# AppDynamics AWS Extension download and configuration tool
#
# Execute as sequence:
# 1) check
# 2) download - download the extensions
# 3) install - install the extensions
# 4) prepare - copy yaml file to local dir
# 5) Modify yaml files: metricPrefix, awsAccessKey, awsSecretKey, displayAccountName
# 6) config - deploy the configuration files from local dir to extension dirs
# 7) validate
# 8) start - Start the machine agent
#
#
# Target Directory of where the machine agent is installed
# AWS Extensions are installed the monitors directory
TARGET_MACHINE_AGENT_DIR="~/agents/mac"
#
# List of AppDynamics AWS Extensions to download, configure, install
# Validate download URLS at https://www.appdynamics.com/community/exchange/
AWS_EXTENSIONS_LIST=(\
"AWSLambdaMonitor,config.yml,awslambdamonitor-2.0.1.zip,https://www.appdynamics.com/media/uploaded-files/1553252150" \
"AWSSNSMonitor,conf/config.yaml,awssnsmonitor-1.0.2.zip,https://www.appdynamics.com/media/uploaded-files/1522284590" \
"AWSSQSMonitor,conf/config.yaml,awssqsmonitor-1.0.3.zip,https://www.appdynamics.com/media/uploaded-files/1522286224" \
"AWSS3Monitor,config.yml,awss3monitor-2.0.1.zip,https://www.appdynamics.com/media/uploaded-files/1553252907" \
"AWSELBMonitor,config.yml,awselbmonitor-1.2.2.zip,https://www.appdynamics.com/media/uploaded-files/1564682169" \
)
#
_parseExtensionListItem() {
ITEM=$1
EXT_NAME=`echo $ITEM | cut -d ',' -f1`
CONFIG_FILE=`echo $ITEM | cut -d ',' -f2`
ZIP_FILE=`echo $ITEM | cut -d ',' -f3`
DOWNLOAD_URL=`echo $ITEM | cut -d ',' -f4`
}
_deployConfig() {
EXT_NAME=$1
EXT_TARGET=$2
echo "Copying config to extension: $EXT_NAME"
cp $EXT_NAME-config.yaml $TARGET_MACHINE_AGENT_DIR/monitors/$EXT_NAME/$EXT_TARGET
}
_prepareConfig() {
# Copy to *.yaml
EXT_NAME=$1
EXT_SRC=$2
echo "Copying config from extension: $EXT_NAME $EXT_SRC"
cp $TARGET_MACHINE_AGENT_DIR/monitors/$EXT_NAME/$EXT_SRC $EXT_NAME-config.yaml
}
_validateConfig() {
EXT_NAME=$1
EXT_TGT=$2
V1=`md5sum $EXT_NAME-config.yaml | cut -d ' ' -f1`
V2=`md5sum $TARGET_MACHINE_AGENT_DIR/monitors/$EXT_NAME/$EXT_TGT | cut -d ' ' -f1`
echo $EXT_NAME $V1 $V2
}
cmd=${1:-"unknown"}
if [ $cmd == "check" ]; then
# Check what extensions will be downloaded and installed
for i in "${AWS_EXTENSIONS_LIST[@]}"; do
_parseExtensionListItem $i
echo $EXT_NAME $CONFIG_FILE $ZIP_FILE $DOWNLOAD_URL
done
elif [ $cmd == "download" ]; then
# Download the extension zip files
for i in "${AWS_EXTENSIONS_LIST[@]}"; do
_parseExtensionListItem $i
curl $DOWNLOAD_URL/$ZIP_FILE -o $ZIP_FILE
done
elif [ $cmd == "install" ]; then
# Monitoring extensions install into the the monitors directory
for i in "${AWS_EXTENSIONS_LIST[@]}"; do
_parseExtensionListItem $i
unzip $ZIP_FILE -d $TARGET_MACHINE_AGENT_DIR/monitors
done
elif [ $cmd == "prepare" ]; then
# Copy in the config.yml files
for i in "${AWS_EXTENSIONS_LIST[@]}"; do
_parseExtensionListItem $i
_prepareConfig $EXT_NAME $CONFIG_FILE
done
elif [ $cmd == "config" ]; then
# Copy in the config.yml files
for i in "${AWS_EXTENSIONS_LIST[@]}"; do
_parseExtensionListItem $i
_deployConfig $EXT_NAME $CONFIG_FILE
done
elif [ $cmd == "validate" ]; then
for i in "${AWS_EXTENSIONS_LIST[@]}"; do
_parseExtensionListItem $i
_validateConfig $EXT_NAME $CONFIG_FILE
done
elif [ $cmd == "start" ]; then
# Stop and restart the machine agent
pkill -f "machineagent.jar"
sleep 2
rm -rf $TARGET_MACHINE_AGENT_DIR/logs/*
rm -f $TARGET_MACHINE_AGENT_DIR/monitors/analytics-agent/analytics-agent.id
rm -f nohup.out
eval MAC_AGENT_PATH=`echo $TARGET_MACHINE_AGENT_DIR/bin/machine-agent`
echo "Running $MAC_AGENT_PATH"
nohup $MAC_AGENT_PATH -Dad.agent.name="analytics-"`hostname` &
# Check that its starts
TAIL_DURATION_SEC=60
echo "Tailing nohup.out for $TAIL_DURATION_SEC seconds"
sleep 5
tail -f nohup.out &
TAIL_PID=$!
(sleep $TAIL_DURATION_SEC; echo "Stopping $TAIL_PID"; kill -9 $TAIL_PID; ) &
elif [ $cmd == "stop" ]; then
# Stop machine agent
pkill -f "machineagent.jar"
elif [ $cmd == "clean" ]; then
# Delete all the extensions
for i in "${AWS_EXTENSIONS_LIST[@]}"; do
_parseExtensionListItem $i
rm -rf $TARGET_MACHINE_AGENT_DIR/monitors/$EXT_NAME
done
elif [ $cmd == "test1" ]; then
echo "test1"
else
echo "Commands:"
echo " check - list extensions to download"
echo " download - download extensions"
echo " install - install the extensions in the machine agent monitors directory"
echo " prepare - copy extension config files to current directory"
echo " config - copy current directory extension config files back to monitors directory "
echo " validate - checksum extension config files"
echo " start - restart the machine agent"
echo " stop - stop the machine agent"
echo " clean - delete all extensions from machine agent monitors directory"
fi
| true
|
941dfb03f525a98e943e45c78f5a9bf0675fe917
|
Shell
|
jelix/jelix-langpacks
|
/build_module_packages.sh
|
UTF-8
| 557
| 3.90625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
MODULE="$1"
if [ "$MODULE" == "" ]; then
echo "Error: module name is missing"
exit 1
fi
CURRENT_DIR="$(dirname $0)"
TARGET_DIR=_dist/$MODULE
mkdir -p $CURRENT_DIR/$TARGET_DIR
LOCALES=$(ls $CURRENT_DIR/modules/locales/)
for LOCALE in $LOCALES; do
if [ -d $CURRENT_DIR/modules/locales/$LOCALE/$MODULE ]; then
(cd $CURRENT_DIR/modules/locales/ &&
zip -r ../../$TARGET_DIR/$MODULE-locales-$LOCALE.zip $LOCALE/$MODULE &&
zip -r ../../$TARGET_DIR/$MODULE-locales-all.zip $LOCALE/$MODULE
)
fi
done
| true
|
df2bfda0732ae4562a7cc8cb93cae9eb5a35d75f
|
Shell
|
djparente/coevol-utils
|
/src/Get_Global_Map.sh
|
UTF-8
| 1,292
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
# Daniel J. Parente
# MD/PhD Candidate
# Swint-Kruse Laboratory
# University of Kansas Medical Center
scriptPath="."
createMapScriptPath=$scriptPath/createMap.py
composeRenumScriptPath=$scriptPath/composeRenumberings.py
invMapScriptPath=$scriptPath/invertMap.py
# Read command line arguments
if [ $# -ne 5 ]; then
echo Usage: sh Get_Global_Map.sh [Subfamily MSA] [Reference MSA] [Subfam Ref Seq Name, In SF Align] [Subfam Ref Seq Name, In Ref Align] [Global Ref Seq Name, In Ref Align]
exit -1;
fi
subMSAPath=$1
refMSAPath=$2
sfNameSub=$3
sfNameRef=$4
refNameRef=$5
# Declare temporary file path names
sfcol_to_bn=`mktemp -p .` || exit 1
bn_to_recol=`mktemp -p .` || exit 1
bn_to_bnlac=`mktemp -p .` || exit 1
# Create the three intermediate maps
$createMapScriptPath $subMSAPath $sfNameSub 0.198515243 .5 | dos2unix | grep -v "[^0-9]$" | dos2unix > $sfcol_to_bn
$createMapScriptPath $refMSAPath $sfNameRef -100 100 | dos2unix | $invMapScriptPath | dos2unix > $bn_to_recol
$createMapScriptPath $refMSAPath $refNameRef -100 100 | dos2unix > $bn_to_bnlac
# Compose the three maps into a single map (removing the intermediates)
$composeRenumScriptPath $sfcol_to_bn $bn_to_recol $bn_to_bnlac | sort -n | dos2unix
#Cleanup temporary files
rm $sfcol_to_bn
rm $bn_to_recol
rm $bn_to_bnlac
| true
|
4efae79ee38bc7e2016113823ecd84683b61582f
|
Shell
|
rverst/pdfmerge
|
/pdfmerge.sh
|
UTF-8
| 1,759
| 4
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
WAIT_TIMEOUT=300
ROTATE_ODD=0
ROTATE_EVEN=1
REVERSE_EVEN=1
INPUT="/input"
OUTPUT="/output"
ODD_PREFIX="odd"
EVEN_PREFIX="even"
ODD="merge_o.pdf"
EVEN="merge_e.pdf"
MERGED="merged.pdf"
function moveToOutput() {
local file="$(date +%Y-%m-%d_%H-%M-%S)_$1"
mv "$1" "$OUTPUT/$file" && \
printf "done ($file)\n" || exit 1
}
cd "$INPUT"
[[ "$1" == "$ODD" || "$1" == "$EVEN" ]] && exit
printf "got a file, wait for it to close: $1..."
inotifywait -q -t $WAIT_TIMEOUT -e close "$1" && echo " OK" || echo " TIMEOUT"
sleep 2
if [[ ! -f "$1" ]]; then
exit 0
fi
if [[ "$1" == "$MERGED" ]]; then
printf "got merged pfd, move direct to output... "
moveToOutput "$1" && exit || exit 1
fi
if [[ "$1" == *.jpg || "$1" == *.tiff ]]; then
printf "got a picture, move direct to output... "
moveToOutput "$1" && exit || exit 1
fi
if [[ "$1" != *.pdf ]]; then
echo "not a pdf file, ignoring ($1)" && exit 0
fi
if [[ "$1" != $ODD_PREFIX*.pdf && "$1" != $EVEN_PREFIX*.pdf ]]; then
echo "$1"
printf "detected non mutlipage file, move direct to output... "
moveToOutput "$1" && exit || exit 1
fi
if [[ "$1" == $ODD_PREFIX*.pdf ]]; then
echo "odd file detected"
mv "$1" "$ODD"
else
echo "even file detected"
mv "$1" "$EVEN"
fi
if [[ -f "$ODD" && -f "$EVEN" ]]; then
echo "odd and even file available, merging"
if [[ $ROTATE_ODD -eq 1 ]]; then
qpdf "$ODD" --rotate=+180 --replace-input
fi
if [[ $ROTATE_EVEN -eq 1 ]]; then
qpdf "$EVEN" --rotate=+180 --replace-input
fi
if [[ $REVERSE_EVEN -eq 1 ]]; then
qpdf --empty --pages "$EVEN" z-1 -- reversed.tmp
rm "$EVEN"
EVEN="reversed.tmp"
fi
qpdf --collate "$ODD" --pages "$ODD" "$EVEN" -- "$MERGED" && rm -f "$ODD" "$EVEN"
fi
| true
|
49bfbc0a4c1839476c5917eff6ec24c7ffcfc9a6
|
Shell
|
sbeliakou/kat-example
|
/courses/docker/05-compose/tasks/7/verify.sh
|
UTF-8
| 1,691
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
# docker network create php
# docker run -d --net php --hostname db --name db -v $(pwd)/mysql:/var/lib/mysql -e MYSQL_ROOT_PASSWORD=root -e MYSQL_USER=admin -e MYSQL_PASSWORD=test -e MYSQL_DATABASE=database -p 8889:3306 mysql:5.7
# docker run -d --net php --hostname web --name web -v $(pwd)/app:/var/www/html -e ALLOW_OVERRIDE=true -p 10087:80 web_locally_build
docker inspect db | jq -r '.[].Config.Labels."com.docker.compose.version"' | grep null &&
docker inspect db | jq -r '.[].Config.Env[]' | egrep '^MYSQL_ROOT_PASSWORD=root$' &&
docker inspect db | jq -r '.[].Config.Env[]' | egrep '^MYSQL_USER=admin$' &&
docker inspect db | jq -r '.[].Config.Env[]' | egrep '^MYSQL_PASSWORD=test$' &&
docker inspect db | jq -r '.[].Config.Env[]' | egrep '^MYSQL_DATABASE=database$' &&
docker inspect db | jq -r '.[].Config.Hostname' | grep '^db$' &&
docker inspect db | jq '.[].NetworkSettings.Networks.bridge' | grep null &&
docker inspect db | jq -r '.[].HostConfig.PortBindings."3306/tcp"[].HostPort' | egrep '^8889$' &&
docker inspect db | jq -r '.[].Mounts[].Destination' | egrep '^/var/lib/mysql$' &&
docker inspect web | jq -r '.[].Config.Labels."com.docker.compose.version"' | grep null &&
docker inspect web | jq -r '.[].Config.Image' | grep 'web_locally_build' &&
docker inspect web | jq -r '.[].Config.Hostname' | grep '^web$' &&
docker inspect web | jq -r '.[].HostConfig.PortBindings."80/tcp"[].HostPort' | egrep '^10087$' &&
docker inspect web | jq -r '.[].Mounts[].Destination' | egrep '^/var/www/html[/]*$' &&
docker inspect web | jq '.[].NetworkSettings.Networks.bridge' | grep null &&
curl -s localhost:10087 | grep 'Connected successfully. Great work!'
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.