blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
3b38d667ad6e194ff7d3937a817a4718c3149fed | Shell | x-stride/lfs-arm-utilite | /chapter_5/5_14_Check-0.9.14.sh | UTF-8 | 292 | 2.703125 | 3 | [] | no_license | #!/bin/bash
set -e
[ ! -d "/tmp/build" ] && mkdir -v /tmp/build
cd /tmp/build
tar -xvf /mnt/lfs/sources/check-0.9.14.tar.gz
cd /tmp/build/check-0.9.14
PKG_CONFIG= ./configure --prefix=/tools
make
make check
make install
[ -d "/tmp/build/check-0.9.14" ] && rm -rf /tmp/build/check-0.9.14
| true |
4f1af1434b24f3e4553f4e2084ff07fc281e7c4e | Shell | serandules/scripts | /production.sh | UTF-8 | 1,127 | 3.28125 | 3 | [] | no_license | #!/bin/sh
REPO=~/sources/github
HUB=$REPO/serandules/hub
CLIENT=$REPO/serandules/hub-client
COMPS=$REPO/serandomps
MODS=$REPO/serandules
DBPATH=~/resources/mongodb/data
#cleanup
#pkill mongo
pkill forever
pkill node
#starts mongodb
#mongod --dbpath=$DBPATH &
#setup env variables
export NODE_ENV=development
export GIT_REPO=$REPO
#setup node_modules dir
rm -rf $MODS/node_modules
mkdir $MODS/node_modules
cd $MODS
for dir in *;
do
ln -s $MODS/$dir $MODS/node_modules/$dir
done
#setup hub
cd $COMPS
for dir in *;
do
rm -rf $HUB/components/serandomps-$dir
ln -s $COMPS/$dir $HUB/components/serandomps-$dir
done
cd $MODS
for dir in *;
do
rm -rf $HUB/node_modules/$dir
# ln -s $MODS/$dir $HUB/node_modules/$dir
done
rm $HUB/node_modules/node_modules
rm $HUB/node_modules/hub
#setup hub-client
cd $MODS
for dir in *;
do
rm -rf $CLIENT/node_modules/$dir
# ln -s $MODS/$dir $CLIENT/node_modules/$dir
done
rm $CLIENT/node_modules/node_modules
rm $CLIENT/node_modules/hub-client
rm $CLIENT/node_modules/hub
cd $HUB
supervisor index.js > $HUB/console.log &
cd $CLIENT
supervisor index.js > $CLIENT/console.log &
| true |
58c2fee372b3881b2fba22e5f99a3577b8de3c67 | Shell | mgoltzsche/docker-containers | /roundcube/entrypoint.sh | UTF-8 | 3,656 | 3.578125 | 4 | [] | no_license | #!/bin/sh
set -e
: ${RC_LANGUAGE:=en_US}
: ${RC_LOG_DRIVER:=syslog}
: ${RC_SYSLOG_ID:=roundcube}
: ${RC_DEBUG_LEVEL:=1} # sum of: 1 = show in log; 4 = show in browser
: ${RC_DEFAULT_HOST:=mail} # Use ssl:// prefix to encrypt. Then CA certificate for remote host should be placed in /etc/ssl/
: ${RC_DEFAULT_PORT:=143}
: ${RC_SMTP_SERVER:=mail} # Use tls:// prefix to encrypt
: ${RC_SMTP_PORT:=25}
: ${RC_SMTP_USER:=%u}
: ${RC_SMTP_PASS:=%p}
: ${RC_SMTP_HELO_HOST:=$(hostname -f)}
: ${RC_SMTP_LOG:=false}
: ${RC_AUTO_CREATE_USER:=true}
: ${RC_CREATE_DEFAULT_FOLDERS:=true}
: ${RC_USERNAME_DOMAIN:=}
: ${RC_PASSWORD_CHARSET:=UTF-8}
: ${RC_IDENTITIES_LEVEL:=1}
: ${RC_SUPPORT_URL:=}
: ${RC_ENABLE_SPELLCHECK:=false}
: ${RC_ENABLE_INSTALLER:=true} # Set to true serves /installer
: ${RC_DES_KEY:=$(date +%s | sha256sum | base64 | head -c 24)}
: ${RC_DB_PREFIX:=rc_}
: ${DB_TYPE:=sqlite}
: ${DB_HOST:=postgres}
: ${DB_DATABASE:=}
: ${DB_USERNAME:=roundcube}
: ${DB_PASSWORD:=}
case "$DB_TYPE" in
sqlite)
[ "$DB_DATABASE" ] || DB_DATABASE=/db/roundcube-sqlite.db
RC_DB_DSNW="sqlite:///$DB_DATABASE?mode=0646"
echo "WARN: Running roundcube with sqlite DB $DB_DATABASE" >&2
;;
pgsql)
[ "$DB_DATABASE" ] || DB_DATABASE=$DB_USERNAME
RC_DB_DSNW="pgsql://$DB_USERNAME:$DB_PASSWORD@$DB_HOST/$DB_DATABASE"
;;
*)
echo "Unsupported DB type: $DB_TYPE" >&2
exit 1
;;
esac
# Runs the provided command until it succeeds.
# Takes the error message to be displayed if it doesn't succeed as first argument.
awaitSuccess() {
MSG="$1"
shift
until $@; do
[ ! "$MSG" ] || echo "$MSG" >&2
sleep 1
done
}
writeConfig() {
echo 'Setting up roundcube with (see https://github.com/roundcube/roundcubemail/wiki/Configuration):'
set | grep -E '^DB_|^RC_' | sed -E 's/(^[^=]+_(PASSWORD|DSNW|KEY)=).+/\1***/i' | xargs -n1 echo ' ' # Show variables
CFG_CONTENT=
for CFG_KEY_UPPER in $(set | grep -Eo '^RC_[^=]+' | sed 's/^RC_//'); do
CFG_KEY="$(echo -n "$CFG_KEY_UPPER" | tr '[:upper:]' '[:lower:]')" # User name lower case
CFG_VAL="$(eval "echo \"\$RC_$CFG_KEY_UPPER\"")"
echo "$CFG_KEY" | grep -Eq '^enable|^auto|level$|port$|_log$' || CFG_VAL="'$CFG_VAL'"
CFG_CONTENT="$(echo "$CFG_CONTENT"; echo "\$config['$CFG_KEY'] = $CFG_VAL;")"
done
cat > /roundcube/config/config.inc.php <<-EOF
<?php
$CFG_CONTENT
\$config['plugins'] = array();
EOF
}
setupInstallerIfEnabled() {
if [ "$RC_ENABLE_INSTALLER" = 'true' ]; then
cp -r /roundcube-installer /roundcube/installer &&
chown -R root:www-data /roundcube/installer
else
rm -rf /roundcube/installer
fi
}
testConfig() {
gosu www-data hhvm -c /etc/hhvm/server.ini /rcinit.php testconfig
}
waitForDB() {
if [ "$DB_TYPE" = "pgsql" ]; then
export PDO_DB_DSN="pgsql:host=$DB_HOST;port=5432;dbname=$DB_DATABASE"
export PDO_DB_USERNAME="$DB_USERNAME"
export PDO_DB_PASSWORD="$DB_PASSWORD"
awaitSuccess "Waiting for postgres DB server $DB_HOST:5432" gosu www-data hhvm -c /etc/hhvm/server.ini /rcinit.php testconnection
unset PDO_DB_DSN PDO_DB_USERNAME PDO_DB_PASSWORD
fi
}
initDBIfEmpty() {
if ! gosu www-data hhvm -c /etc/hhvm/server.ini /rcinit.php testschema; then
gosu www-data hhvm -c /etc/hhvm/server.ini /rcinit.php initschema
fi
}
waitForMailServer() {
if [ ! "$SKIP_MAIL_SERVER_CHECK" ]; then
awaitSuccess "Waiting for MDA on $RC_DEFAULT_HOST:$RC_DEFAULT_PORT" nc -zvw1 "$RC_DEFAULT_HOST" "$RC_DEFAULT_PORT" &&
awaitSuccess "Waiting for MTA on $RC_SMTP_SERVER:$RC_SMTP_PORT" nc -zvw1 "$RC_SMTP_SERVER" "$RC_SMTP_PORT"
fi
}
writeConfig &&
testConfig &&
setupInstallerIfEnabled &&
waitForDB &&
initDBIfEmpty &&
waitForMailServer
| true |
287f229fb691c2cdb5052ac525d291d3795e673c | Shell | TheDuc/renative | /scripts/publishAlpha.sh | UTF-8 | 383 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
OUTPUT=$(npm run publish:alpha | tail -1)
VERSION="$(cut -d'@' -f2 <<< $OUTPUT)"
if [ $VERSION != "" ]
then
echo "publishing $VERSION"
git add -A
git commit -nm "release $VERSION"
git tag -a $VERSION -m "release $VERSION"
git push origin $(git rev-parse --abbrev-ref HEAD) && git push origin $VERSION
else
echo "Can't get version"
exit 100
fi
| true |
d1bf7e1c9edd14c8b799ee8d6f1289f5f37d0f1f | Shell | nghinv/tqa-dataset-utils-weeklytest | /scripts/PLF40x/init/tqa-dataset-utils-weeklytest-init.sh | UTF-8 | 1,582 | 2.9375 | 3 | [] | no_license | #!/bin/bash
current_dir=`dirname $0`
echo "`date`,current_dir=${current_dir}"
. ../tqa-dataset-utils-weeklytest-set_global_vars.sh
set_global_vars;
# EMPTY DATA
../tqa-dataset-utils-weeklytest-stop-app.sh;
if [ -d ${g_shared_dir} ]; then rm -rf ${g_shared_dir}/data ; fi
mkdir ${g_shared_dir}/data
mysql -uroot -ptest_control -e "drop database plf_jcr; drop database plf_idm; create database plf_jcr; create database plf_idm;"
CONFIG_FILE=/home/qahudson/testsuite/${g_conf_file}_`hostname`_CURRENT_CONFIG
config_line="${g_tomcat_dir} NO_RESTORE plf_jcr plf_idm!DATASET_INJ 1 10 3600 9999 ${g_host} 8080 DATASET true true - - WEEKLY_INJ - /home/qahudson/testsuite/props/tqa-dataset-utils-weeklytest-TEST_INJ.properties!eXoPLF.install.sh"
echo ${config_line} >${CONFIG_FILE}
../tqa-dataset-utils-weeklytest-start-app.sh; sleep 30; tomcat_log_file=${g_tomcat_dir}/logs/catalina.out.DATASET_INJ
grep "Server start" ${tomcat_log_file}
echo "`date`,Init platform right after 1st deployment"
${g_jmeter_bin_dir}/jmeter.sh -n -t "${current_dir}/PLF_INIT.jmx" -l "${current_dir}/PLF_INIT.jmx.jtl" -Jexpinit_continue_file_path="${current_dir}/continue.continue.continue"
echo "`date`,Init platform right after 1st deployment, done"
while true; do
sleep 60
if [ -f continue.continue.continue ]; then echo "continue!!!"; break
else echo "waiting... Please create file continue.continue.continue (in init folder) after you finish welcome-screen!"
fi
done
../tqa-dataset-utils-weeklytest-stop-app.sh; ../tqa-dataset-utils-weeklytest-backup.sh ${g_ds_init_name};
echo "COMPLETE init.sh"
| true |
9c0a5849787d43cd564f2777dc8cade691233d76 | Shell | leachiM2k/dotfiles | /bin/co | UTF-8 | 199 | 3.203125 | 3 | [] | no_license | #!/bin/bash
if [ "$1" = "" ] ;then
echo "Checkout a branch (git 1.4 and git 1.5)"
echo "Usage:"
echo " co <branch>"
else
(git checkout $1 || git checkout -b $1 origin/$1) &&
pull &&
branch
fi
| true |
f10a3acb840998db1cf9a0b8cbc889f68009cc3d | Shell | jensdietrich/null-annotation-inference | /experiments-additional/capture-guava.sh | UTF-8 | 1,978 | 3.671875 | 4 | [
"UPL-1.0"
] | permissive | #!/bin/bash
## observe nullability while exercising a program running its tests
## @author jens dietrich
. ./additional.env
NAME=guava
PROJECT_FOLDER=$PROJECT_FOLDERS/$NAME
if [ ! -d "$PROJECT_FOLDER" ]; then
echo "Project folder does not exist, fetch projects first: $PROJECT_FOLDER"
fi
INSTRUMENTED_PROJECT_FOLDER=$INSTRUMENTED_PROJECT_FOLDERS/$NAME
if [ -d "$INSTRUMENTED_PROJECT_FOLDER" ]; then
echo "Instrumented project already exists, will reuse (delete folder to recreate): $INSTRUMENTED_PROJECT_FOLDER"
else
echo "copying project"
mkdir -p $INSTRUMENTED_PROJECT_FOLDER
cp -r $PROJECT_FOLDER $INSTRUMENTED_PROJECT_FOLDERS
echo "replacing build script by instrumented script"
cp ${INSTRUMENTED_BUILD_SCRIPT_FOLDER}/${NAME}/pom.xml $INSTRUMENTED_PROJECT_FOLDER/
cp ${INSTRUMENTED_BUILD_SCRIPT_FOLDER}/${NAME}/guava-tests/pom.xml $INSTRUMENTED_PROJECT_FOLDER/guava-tests
fi
if [ ! -d "$RESULT_FOLDER_OBSERVED" ]; then
echo "Result folder does not exit, creating folder: " $RESULT_FOLDER_OBSERVED
mkdir -p $RESULT_FOLDER_OBSERVED
else
echo "Issues observed will be saved in " $RESULT_FOLDER_OBSERVED
fi
# copy agents
echo "copying agents"
cp $AGENT2 $INSTRUMENTED_PROJECT_FOLDER/guava-tests
cp $AGENT $INSTRUMENTED_PROJECT_FOLDER/guava-tests
echo "cleaning old issues"
rm $INSTRUMENTED_PROJECT_FOLDER/guava-tests/null-*.json
cd $INSTRUMENTED_PROJECT_FOLDER
echo "running instrumented build"
start=`date +%s`
mvn clean test -Dmaven.test.failure.ignore=true -Dmaven.test.error.ignore=true
end=`date +%s`
runtime=$((end-start))
echo "$runtime" > $RESULT_FOLDER_OBSERVED/capture-runtime-${NAME}.log
echo "merging results"
java -Xmx20g -jar $MERGER -i ${INSTRUMENTED_PROJECT_FOLDER}/guava-tests -o ${RESULT_FOLDER_OBSERVED}/${NULLABLE}-${NAME}.json
echo "cleaning raw captured data"
rm $INSTRUMENTED_PROJECT_FOLDER/guava-tests/null-*.json
echo "done -- merged results written to ${RESULT_FOLDER_OBSERVED}/$NULLABLE-${NAME}.json"
| true |
6afb943ca0508c42c70d97bd94de7421ee89d526 | Shell | padmakarkotule/python-101 | /docs/istio/scripts_istio_installation/07_istio_configure_ingress_gateway.sh | UTF-8 | 1,606 | 3.1875 | 3 | [] | no_license | #!/bin/bash
export LAB_DIR=$HOME/istio
mkdir $LAB_DIR
cd $LAB_DIR
cd ./istio-*
export PATH=$PWD/bin:$PATH
## Igress Gateway - Open the application to outside traffic.
echo "Associate application with Istio Gateway ..." &>>gke_istio_ingress_gateway_configure.log
kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml &>>gke_istio_ingress_gateway_configure.log
echo "Verify gateway is created .." &>>gke_istio_ingress_gateway_configure.log
kubectl get gateway &>>gke_istio_ingress_gateway_configure.log
echo "Determining the ingress IP and ports .." &>>gke_istio_ingress_gateway_configure.log
kubectl get svc istio-ingressgateway -n istio-system &>>gke_istio_ingress_gateway_configure.log
# Set the ingress IP and ports: (If have load balancing)
export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
# Set the firewall rules
gcloud compute firewall-rules create allow-gateway-http --allow tcp:$INGRESS_PORT &>>gke_istio_ingress_gateway_configure.log
gcloud compute firewall-rules create allow-gateway-https --allow tcp:$SECURE_INGRESS_PORT &>>gke_istio_ingress_gateway_configure.log
# Set GATEWAY URL
export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT
# Verify external access #Access url from browser
echo "http://$GATEWAY_URL/productpage" | true |
7ee819d5bdbffed58cd2dff9983c0c73f213a86f | Shell | FlaviaTG/POPGENOMICS | /GenomeScan/Rehh-workflow.sh | UTF-8 | 4,959 | 2.875 | 3 | [] | no_license | module load jdk/1.8.0_172-fasrc01
#programs to use
module load GCC/7.3.0-2.30 OpenMPI/3.1.1 BCFtools/1.9
module load jdk/1.8.0_172-fasrc01
module load vcftools/0.1.14-fasrc01
module load GCC/8.2.0-2.31.1 SAMtools/1.9
module load tabix/0.2.6-fasrc01
#need the path to the reference genome index file
MB=/path/to/genomes/referencegenome.fasta.fai
gtool=/path/to/programs/glactools/glactools
BPATH=/path/to//programs/beagle.18May20.d20.jar
#need to format the beagle output from ANGSD to vcf and phase it.
#Beagle file with GL to vcf in 3 steps. You can find a Slurm Job example for this in: Job-beagle-Formating.sh
$gtool beagle2glf --fai $MB genolike.beagle.gz > All.glf.gz
$gtool glf2acf All.glf.gz > All.acf.gz
$gtool glac2vcf All.acf.gz > All.vcf.gz
#
#
bcftools view --header-only All.vcf
#reheader in necesary
bcftools reheader -s new-header-vcf.txt All.vcf > newH-All.vcf
#Phase it with Beagle. You can find a Slurm Job example for this in:
java -Xmx20000m -jar $BPATH gt=newH-All.vcf out=All-PHASED
#bcftools index the vcf file generated
bcftools index All-PHASED.vcf
#
#subset per subespecies, with vcftools where pop_map-species1.txt is the list of individuals for that subespecies subset
vcftools --vcf newH-All.vcfz --keep pop_map-species1.txt --max-missing 1 --recode --recode-INFO-all --out newH-genolike_species1-PHASED
#
vcftools --vcf newH-All.vcfz --keep pop_map-species2.txt --max-missing 1 --recode --recode-INFO-all --out newH-genolike_species2-PHASED
########split per chromosomes each subset per species, where CHROM-autosome.txt has the list of the chromosomes to subset.
for i in $(cat CHROM-autosome.txt); do vcftools --gzvcf newH-genolike_species1-PHASED.vcf --chr $i --recode --recode-INFO-all --out ./CHROM/species1_$i
##################################################################
#########haplotype genome scan with rehh in R ####################
#Now that you have separete files, the format and phased genotypes, you can run in R rehh genome scan
###make a test like this and if it works, you will need to make a R script to run it on all chromosomes. See the example script: Rehh-RUN-CHR1.R
library(rehh)
library(tidyverse)
# read in data for each species
# house
house_hh <- data2haplohh(hap_file = "newH-genolike_species1-PHASED-CHR1.vcf",
polarize_vcf = FALSE)
# bactrianus
bac_hh <- data2haplohh(hap_file = "newH-genolike_species2-PHASED-CHR1.vcf",
polarize_vcf = FALSE)
# filter on MAF - here 0.05
house_hh_f <- subset(house_hh, min_maf = 0.05)
bac_hh_f <- subset(bac_hh, min_maf = 0.05)
# perform scans
house_scan <- scan_hh(house_hh_f, polarized = FALSE)
bac_scan <- scan_hh(bac_hh_f, polarized = FALSE)
# perform iHS on house
house_ihs <- ihh2ihs(house_scan, freqbin = 1)
bac_ihs <- ihh2ihs(bac_scan, freqbin = 1)
#plot statistics
pdf("Chr1.pdf")
ggplot(house_ihs$ihs, aes(POSITION, IHS)) + geom_point()
#Or we can plot the log P-value to test for outliers.
ggplot(house_ihs$ihs, aes(POSITION, LOGPVALUE)) + geom_point() + geom_hline(yintercept=5, linetype="dashed", color = "red")
ggplot(bac_ihs$ihs, aes(POSITION, LOGPVALUE)) + geom_point() + geom_hline(yintercept=5, linetype="dashed", color = "red")
#############
####################with both
# perform xp-ehh
house_bac <- ies2xpehh(bac_scan, house_scan,
popname1 = "species1", popname2 = "species2",
include_freq = T,standardize = TRUE)
# plot
ggplot(house_bac, aes(POSITION, XPEHH_bicknell_minimus)) + geom_point()
ggplot(house_bac, aes(POSITION, LOGPVALUE)) + geom_point() + geom_hline(yintercept=5, linetype="dashed", color = "red")
#
ggplot(ext, aes(POSITION, LOGPVALUE)) + geom_point() + geom_hline(yintercept=5, linetype="dashed", color = "red")
###
# find the highest hit
hit <- house_bac %>% arrange(desc(LOGPVALUE)) %>% top_n(1)
# get SNP position
x <- hit$POSITION
marker_id_h <- which(house_hh_f@positions == x)
marker_id_b <- which(bac_hh_f@positions == x)
#Now we are ready to plot the bifurcation of haplotypes around our site of selection. We do this like so:
house_furcation <- calc_furcation(house_hh_f, mrk = marker_id_h)
bac_furcation <- calc_furcation(bac_hh_f, mrk = marker_id_b)
#We can also plot both of these to have a look at them:
plot(house_furcation, xlim = c(19.18E+6, 19.22E+6))
plot(bac_furcation, xlim = c(19.18E+6, 19.22E+6))
#Calculating the furcation pattern also makes it possible to calculate the haplotype length around our signature of selection.
house_haplen <- calc_haplen(house_furcation)
bac_haplen <- calc_haplen(bac_furcation)
#With the haplotype length calculated, we can now plot this to)
# see how haplotype structure differs between our two populations.
plot(house_haplen)
plot(bac_haplen)
# write out house bactrianus xpEHH table to plot it later
house_bac <- tbl_df(house_bac)
colnames(house_bac) <- tolower(colnames(house_bac))
write_tsv(house_bac, "./Chr1-minimus-bicknell_xpEHH.tsv")
dev.off()
| true |
e69a0d426e93ed5cb327d5ca08647d345532d3b3 | Shell | mirsella/pronote-notifications | /wrapper.bash | UTF-8 | 1,011 | 3.25 | 3 | [] | no_license | #!/bin/bash
cd /home/mirsella/pronote-notifications
messages=$(node index.js)
[ $messages == '' ] && notif 'bug in the matrix'
if [ ! "$(jq -j '. | length' last.json)" == "4" ]; then
echo '{ "discussions": 0, "others": 0, "informations": 0, "last": 0 }' > last.json;
fi
last=$(cat last.json)
count=$(jq '.last' <<< $last)
((count++))
nMessages=$(jq -j '.informations, .discussions, .others' <<< $messages)
nLast=$(jq -j '.informations, .discussions, .others' <<< $last)
echo "$(date)
$nMessages $messages
$nLast $last" > log.txt
if ! grep -q '000' <<< $nMessages; then
if grep -q '000' <<< $nLast; then
notif "New Notifications ! $nMessages"
elif bc -l <<< "$count/10" | grep -q '^[0-9]*\.*00000000000000000000$'; then
notif "Still Got Notifications ! $nMessages"
elif [ $nMessages -gt $nLast ]; then
notif "Even More Notifications ! $nMessages"
else
echo 'got notif, waiting to send a new push notif'
fi
else
count=0
fi
jq ". + {last: $count}" <<< $messages > last.json
| true |
660661d4a327bc2824a426c414347c9b0a5fe608 | Shell | BlueRainSoftware/id4i-cli | /test/tests/transfer.bats | UTF-8 | 2,844 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bats
setup() {
guid=$(./id4i guids create -l 8 -c 1 | jq -r ".id4ns[0]")
source .preflightData
# Put all data from the second user into environment variables
IFS=$'\n'
for line in $( cat .preflightData.2 ); do export U2_$line; done;
}
@test "Transfer - Help is available" {
./id4i help transfer | grep "send Prepare the transfer of an ID to another organization"
}
@test "Transfer - Transfer GUID" {
./id4i transfer status -i ${guid} | grep "recipientOrganizationIds\":\[\]"
./id4i transfer send -i ${guid} -r ${U2_ORGANIZATION}
./id4i transfer status -i ${guid} | grep "recipientOrganizationIds\":\[\"${U2_ORGANIZATION}\"\]"
./id4i transfer status -i ${guid} | grep "keepOwnership\":false"
}
@test "Transfer - Transfer GUID keeping ownership" {
./id4i transfer status -i ${guid} | grep "recipientOrganizationIds\":\[\]"
./id4i transfer send -i ${guid} -k -r ${U2_ORGANIZATION}
./id4i transfer status -i ${guid} | grep "recipientOrganizationIds\":\[\"${U2_ORGANIZATION}\"\]"
./id4i transfer status -i ${guid} | grep "keepOwnership\":true"
}
@test "Transfer - Transfer to multiple recipients" {
./id4i transfer status -i ${guid} | grep "recipientOrganizationIds\":\[\]"
./id4i transfer send -i ${guid} -r ${U2_ORGANIZATION} -r ${ORGANIZATION}
[ $(./id4i transfer status -i $guid | jq ".recipientOrganizationIds | length") -eq "2" ]
}
@test "Transfer - Open GUID for claims" {
./id4i transfer status -i ${guid} | grep "openForClaims\":false"
./id4i transfer send -i ${guid} -c
[ $(./id4i transfer status -i $guid | jq ".recipientOrganizationIds | length") -eq "0" ]
./id4i transfer status -i ${guid} | grep "openForClaims\":true"
}
@test "Transfer - Receive GUID" {
./id4i transfer status -i ${guid} | grep "ownerOrganizationId\":\"${ORGANIZATION}\""
./id4i transfer send -i ${guid} -r ${U2_ORGANIZATION}
./id4i transfer status -i ${guid} | grep "recipientOrganizationIds\":\[\"${U2_ORGANIZATION}\"\]"
./id4i transfer receive --config ./.id4i.2.properties -i ${guid}
./id4i transfer status -i ${guid} | grep "ownerOrganizationId\":\"${U2_ORGANIZATION}\""
}
@test "Transfer - Receive GUID open for claims " {
./id4i transfer status -i ${guid} | grep "ownerOrganizationId\":\"${ORGANIZATION}\""
./id4i transfer status -i ${guid} | grep "openForClaims\":false"
./id4i transfer send -i ${guid} --open-for-claims
./id4i transfer status -i ${guid} | grep "openForClaims\":true"
./id4i transfer receive --config ./.id4i.2.properties -i ${guid}
./id4i transfer status -i ${guid} | grep "ownerOrganizationId\":\"${U2_ORGANIZATION}\""
./id4i transfer status -i ${guid} | grep -v "openForClaims\":true" # Don't search for openForClaims:false, for some strange reason it can also be null.
}
| true |
f16c079d60d0471e5f867909014a50429b5616e4 | Shell | thelaser/gcp_snippets | /CloudScripts/bash/batch-permissions/batch_permissions.sh | UTF-8 | 235 | 2.59375 | 3 | [] | no_license | #!/bin/bash
ORG_ID=$1
ROLE=$2
BILLING_PROJECT=$3
cat name-list | while read line; do $(gcloud organizations add-iam-policy-binding $ORG_ID --condition=None --member="user:$line" --role=$ROLE --billing-project=$BILLING_PROJECT); done
| true |
02a407e9d9fd012d71cfda51db3e21f65b0c288b | Shell | chregu/GeotagRawFromJpeg | /GeotagRawFromJpeg.sh | UTF-8 | 341 | 3.125 | 3 | [] | no_license | #!/bin/bash
for j in `find . -name *.JPG`
do
FILE=`echo $j | cut -f 2 -d "." `
FILE=".$FILE"
if [ -f $FILE.NEF ]
then
echo $FILE
IFS=$'\n'
A=` exiv2 -Pkyv $FILE.JPG | grep GPSInfo`
echo -n "" > meta.txt
for i in $A
do
echo "set $i" >> meta.txt
done
exiv2 -m meta.txt $FILE.NEF
mv $FILE.JPG ./oldjpegs/
fi
done
| true |
1156d60f19035c17e646fca7585c0c3f2d72200a | Shell | projectriff/system | /.github/workflows/acceptance.sh | UTF-8 | 4,443 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
source ${FATS_DIR}/.configure.sh
# setup namespace
kubectl create namespace ${NAMESPACE}
fats_create_push_credentials ${NAMESPACE}
source ${FATS_DIR}/macros/create-riff-dev-pod.sh
if [ $RUNTIME = "core" ] || [ $RUNTIME = "knative" ]; then
for location in cluster local; do
for test in function application; do
name=system-${RUNTIME}-${location}-uppercase-node-${test}
image=$(fats_image_repo ${name})
echo "##[group]Run function $name"
if [ $location = 'cluster' ] ; then
riff $test create $name --image $image --namespace $NAMESPACE --tail \
--git-repo https://github.com/${FATS_REPO}.git --git-revision ${FATS_REFSPEC} --sub-path ${test}s/uppercase/node &
else
riff $test create $name --image $image --namespace $NAMESPACE --docker-network host --tail \
--local-path ${FATS_DIR}/${test}s/uppercase/node
fi
riff $RUNTIME deployer create $name --${test}-ref $name --namespace $NAMESPACE --ingress-policy External --tail
if [ $test = 'function' ] ; then
curl_opts="-H Content-Type:text/plain -H Accept:text/plain -d system"
expected_data="SYSTEM"
else
curl_opts="--get --data-urlencode input=system"
expected_data="SYSTEM"
fi
# invoke ClusterLocal
source ${FATS_DIR}/macros/invoke_incluster.sh \
"$(kubectl get deployers.${RUNTIME}.projectriff.io ${name} --namespace ${NAMESPACE} -ojsonpath='{.status.address.url}')" \
"${curl_opts}" \
"${expected_data}"
# invoke External
source ${FATS_DIR}/macros/invoke_contour.sh \
"$(kubectl get deployers.${RUNTIME}.projectriff.io ${name} --namespace ${NAMESPACE} -ojsonpath='{.status.url}')" \
"${curl_opts}" \
"${expected_data}"
riff $RUNTIME deployer delete $name --namespace $NAMESPACE
riff $test delete $name --namespace $NAMESPACE
fats_delete_image $image
echo "##[endgroup]"
done
done
fi
if [ $RUNTIME = "streaming" ]; then
echo "##[group]Create gateway"
if [ $GATEWAY = "inmemory" ]; then
riff streaming inmemory-gateway create test --namespace $NAMESPACE --tail
fi
if [ $GATEWAY = "kafka" ]; then
riff streaming kafka-gateway create test --bootstrap-servers kafka.kafka.svc.cluster.local:9092 --namespace $NAMESPACE --tail
fi
if [ $GATEWAY = "pulsar" ]; then
riff streaming pulsar-gateway create test --service-url pulsar://pulsar.pulsar.svc.cluster.local:6650 --namespace $NAMESPACE --tail
fi
echo "##[endgroup]"
for test in node ; do
name=system-${RUNTIME}-fn-uppercase-${test}
image=$(fats_image_repo ${name})
echo "##[group]Run function ${name}"
riff function create ${name} --image ${image} --namespace ${NAMESPACE} --tail \
--git-repo https://github.com/${FATS_REPO} --git-revision ${FATS_REFSPEC} --sub-path functions/uppercase/${test}
lower_stream=${name}-lower
upper_stream=${name}-upper
riff streaming stream create ${lower_stream} --namespace $NAMESPACE --gateway test --content-type 'text/plain' --tail
riff streaming stream create ${upper_stream} --namespace $NAMESPACE --gateway test --content-type 'text/plain' --tail
riff streaming processor create $name --function-ref $name --namespace $NAMESPACE --input ${lower_stream} --output ${upper_stream} --tail
kubectl exec riff-dev -n $NAMESPACE -- subscribe ${upper_stream} --payload-encoding raw | tee result.txt &
sleep 10
kubectl exec riff-dev -n $NAMESPACE -- publish ${lower_stream} --payload "system" --content-type "text/plain"
actual_data=""
expected_data="SYSTEM"
cnt=1
while [ $cnt -lt 60 ]; do
echo -n "."
cnt=$((cnt+1))
actual_data=$(cat result.txt | jq -r .payload)
if [ "$actual_data" == "$expected_data" ]; then
break
fi
sleep 1
done
fats_assert "$expected_data" "$actual_data"
kubectl exec riff-dev -n $NAMESPACE -- pkill subscribe
riff streaming stream delete ${lower_stream} --namespace $NAMESPACE
riff streaming stream delete ${upper_stream} --namespace $NAMESPACE
riff streaming processor delete $name --namespace $NAMESPACE
riff function delete ${name} --namespace ${NAMESPACE}
fats_delete_image ${image}
echo "##[endgroup]"
done
riff streaming ${GATEWAY}-gateway delete test --namespace $NAMESPACE
fi
| true |
6b865f83e1ada00b110aebc63c641bbcae8bd05a | Shell | lumixraku/shell_playground | /switch.sh | UTF-8 | 322 | 3.78125 | 4 | [] | no_license | #!/bin/bash
#将输入的值赋值给n
read -p "Please input a number, it must greater than "1":" n
#数学计算要用’[ ]’括起来并且外头要带一个’$’。脚本结果为:
n=$[$n%2]
echo "n is $n"
case $n in
0)
echo "偶数"
;;
1)
echo "奇数"
;;
*)
echo "default"
;;
esac
| true |
d3450cd9add54d2061579e7bc16a000d939d358d | Shell | lucaswannen/source_code_classification_with_CNN | /dataset_v2/bash/12267200.txt | UTF-8 | 289 | 3 | 3 | [] | no_license | #!/bin/bash
path="/foo/"
today=$(date +%Y%m%d)
keepDays=7
keepSeconds=$(date -d "-$keepDays day" +%s)
for f in $path"*"; do
fileSeconds=$(date -d ${f##*-} +%s)
if [ $fileSeconds -lt $keepSeconds ]
then
rm $f
fi
done
date: extra operand `/foo/foo.bar.tar.gz-20120904'
| true |
95939d23202bf8a885a041b5c69242bb62190e01 | Shell | golang/mobile | /internal/binres/testdata/gen.sh | UTF-8 | 607 | 3.171875 | 3 | [
"LicenseRef-scancode-google-patent-license-golang",
"BSD-3-Clause"
] | permissive | #! /usr/bin/env bash
# version of build-tools tests run against
AAPT=${ANDROID_HOME:-${HOME}/Android/Sdk}/build-tools/32.0.0/aapt
# minimum version of android api for resource identifiers supported
APIJAR=${ANDROID_HOME:-${HOME}/Android/Sdk}/platforms/android-16/android.jar
for f in *.xml; do
RES=""
if [ -d "${f:0:-4}-res" ]; then
RES="-S ${f:0:-4}-res"
fi
cp "$f" AndroidManifest.xml
"$AAPT" p -M AndroidManifest.xml $RES -I "$APIJAR" -F tmp.apk
unzip -qq -o tmp.apk AndroidManifest.xml resources.arsc
mv AndroidManifest.xml "${f:0:-3}bin"
mv resources.arsc "${f:0:-3}arsc"
rm tmp.apk
done
| true |
d4b08228d4b260d8360b40c097b1d94776d60204 | Shell | sullyD64/bigdata-2019 | /project1/src/hive/load.sh | UTF-8 | 307 | 3.140625 | 3 | [] | no_license | #!/bin/sh
if [ -z "$1" || -z "$2" ]
then
echo "usage: load filename dataset"
exit 1
fi
filename=$1
dataset=$2
hdfs dfs -rm -r "user/user33/hiveinput"
hdfs dfs -mkdir "user/user33/hiveinput"
hdfs dfs -cp "/user/user33/$dataset/*" /user/user33/hiveinput/
hive -f $filename > "outputs/$jname_output"
| true |
58a40f2497c61cd6c8b91c089acb5a6fa51dfa47 | Shell | delkyd/alfheim_linux-PKGBUILDS | /alot/PKGBUILD | UTF-8 | 1,887 | 2.65625 | 3 | [] | no_license | # Contributor: Mark Foxwell <fastfret79@archlinux.org.uk>
# Contributor: Nicolas Pouillard [https://nicolaspouillard.fr]
# Contributor: seschwar -- contact via Arch Linux forum or AUR
# Maintainer: Ian Denhardt <ian@zenhack.net>
pkgname=alot
pkgver=0.6
pkgrel=1
pkgdesc="terminal-based MUA for the notmuch mail system"
arch=(any)
url="https://github.com/pazz/alot"
license=(GPL)
depends=(notmuch
python2-gpg
python2-magic
python2-configobj
python2-urwid
python2-urwidtrees
python2-twisted)
makedepends=(python2-sphinx)
options=(!emptydirs)
source=($pkgname-$pkgver.tar.gz::https://github.com/pazz/$pkgname/archive/$pkgver.tar.gz)
build() {
cd "$srcdir/$pkgname-$pkgver"
# The archlinux package python2-magic's egg calls itself "file-magic",
# as opposed to the python-magic on pypi. The result is that the alot
# executable can't find the module, so we patch setup.py to fix the
# dependency:
sed -i -e 's/python-magic/file-magic/' setup.py
python2 setup.py build
# The makefile is not actually concurrency-safe; the different calls to
# sphinx will trample on each other if we try to parallelize the build.
# So we pass --jobs=1.
make SPHINXBUILD=sphinx-build2 --jobs=1 -C docs man html
}
package() {
cd "$srcdir/$pkgname-$pkgver"
python2 setup.py install --optimize=1 --root="$pkgdir"
install -Dm644 extra/completion/alot-completion.zsh \
"$pkgdir/usr/share/zsh/functions/_alot"
install -dm755 "$pkgdir/usr/share/alot/themes/examples"
install -Dm644 extra/themes/{mutt,solarized_light,solarized_dark,sup,tomorrow} \
"${pkgdir}/usr/share/alot/themes/examples"
install -dm755 "$pkgdir/usr/share/doc/$pkgname"
cp -a docs/build/html/* "$pkgdir/usr/share/doc/$pkgname"
install -Dm644 docs/build/man/alot.1 "$pkgdir/usr/share/man/man1/alot.1"
}
md5sums=('db355cfb3e905aede50e757cb723ad4d')
| true |
a210d3fb00f0bbf018b0bcccffbef2748ac61803 | Shell | AditiPawar24/BootCampProgram | /hormonic.sh | UTF-8 | 141 | 3.109375 | 3 | [] | no_license | #!/Bin/bash -x
echo "Enter number"
read num
double count
for((count=1;count<=num;count++))
do
ans="$ans"+"(1/$count)"
done
echo $ans
| true |
6080e436fe1b872068d922eb28c52c0a3e77d7da | Shell | johnfelipe/Scripts-1 | /comprobar-spammer.sh | UTF-8 | 444 | 2.96875 | 3 | [] | no_license | #!/bin/bash
for ip in `cat /auditoria2/ips`
do
url=http://www.stopforumspam.com/api?ip=$ip
respuesta=`curl -s $url | sed -ne '/<\/appears>/ { s/<[^>]*>\(.*\)<\/appears>/\1/; p }'`
#aparece=`awk -vRS="</appears>" '{gsub(/.*<appears.*>/,"");print}' '$respuesta'`
if [ $respuesta = "yes" ];
then
echo $ip
# else
# echo $ip " No aparece"
fi
done
| true |
00d01a60a45acf485b5de9b65b9f3020f37b0e68 | Shell | Tommykaf/dotfiles | /.config/scripts/startup-scripts/.xrandr-second-monitor | UTF-8 | 188 | 2.734375 | 3 | [] | no_license | #!/bin/bash
for display in $( xrandr | grep " connected " | grep -v "eDP1" | cut -f1 --delimiter=" ")
do
[ $display = "HDMI1" ] && xrandr --output $display --auto --right-of eDP1;
done
| true |
604bae2935b44d9ad8577f3b75c109108ca6bc7b | Shell | seletreby310/tkg-lab | /scripts/generate-and-apply-fluent-bit-yaml.sh | UTF-8 | 2,258 | 3.234375 | 3 | [] | no_license | #!/bin/bash -e
TKG_LAB_SCRIPTS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source $TKG_LAB_SCRIPTS/set-env.sh
if [ ! $# -eq 1 ]; then
echo "Must supply cluster_name as args"
exit 1
fi
export CLUSTER_NAME=$1
kubectl config use-context $CLUSTER_NAME-admin@$CLUSTER_NAME
export TKG_ENVIRONMENT_NAME=$(yq e .environment-name $PARAMS_YAML)
if [ $(yq e .shared-services-cluster.name $PARAMS_YAML) = $CLUSTER_NAME ];
then
export ELASTICSEARCH_CN=elasticsearch.elasticsearch-kibana
export ELASTICSEARCH_PORT="9200"
else
export ELASTICSEARCH_CN=$(yq e .shared-services-cluster.elasticsearch-fqdn $PARAMS_YAML)
export ELASTICSEARCH_PORT="80"
fi
mkdir -p generated/$CLUSTER_NAME/fluent-bit/
export CONFIG_OUTPUTS=$(cat << EOF
[OUTPUT]
Name es
Match *
Host $ELASTICSEARCH_CN
Port $ELASTICSEARCH_PORT
Logstash_Format On
Replace_Dots On
Retry_Limit False
Buffer_Size False
tls Off
EOF
)
export CONFIG_FILTERS=$(cat << EOF
[FILTER]
Name kubernetes
Match kube.*
Kube_URL https://kubernetes.default.svc:443
Kube_CA_File /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
Kube_Token_File /var/run/secrets/kubernetes.io/serviceaccount/token
Kube_Tag_Prefix kube.var.log.containers.
Merge_Log On
Merge_Log_Key log_processed
K8S-Logging.Parser On
K8S-Logging.Exclude On
[FILTER]
Name record_modifier
Match *
Record tkg_cluster $CLUSTER_NAME
Record tkg_instance $TKG_ENVIRONMENT_NAME
EOF
)
yq e ".fluent_bit.config.outputs = strenv(CONFIG_OUTPUTS)" --null-input > generated/$CLUSTER_NAME/fluent-bit/fluent-bit-data-values.yaml
yq e -i ".fluent_bit.config.filters = strenv(CONFIG_FILTERS)" generated/$CLUSTER_NAME/fluent-bit/fluent-bit-data-values.yaml
VERSION=$(tanzu package available list fluent-bit.tanzu.vmware.com -oyaml | yq eval ".[0].version" -)
tanzu package install fluent-bit \
--package-name fluent-bit.tanzu.vmware.com \
--version $VERSION \
--namespace tanzu-kapp \
--values-file generated/$CLUSTER_NAME/fluent-bit/fluent-bit-data-values.yaml \
--poll-timeout 10m0s
| true |
0af1aef3adffed8f5b3bbdafdeb1ad21ad4b7412 | Shell | Robien/acharn | /data/mesh/transformTex.sh | UTF-8 | 149 | 2.59375 | 3 | [] | no_license | #!/bin/bash
for f in $@
do
echo "$f ..."
cp $f $f.back && ./nePasLancer.sh $f > $f.new && mv $f.new $f && echo "réussi ! \o/" && rm $f.back
done
| true |
11dd232b7e24f36c939f787379ee2f753a21d16e | Shell | jriguera/raspbian-cloud | /stage8/41-dhcpd-dnsmasq/files/dhcpcd.exit-hook | UTF-8 | 1,885 | 3.4375 | 3 | [] | no_license | #!/bin/sh
# https://www.daemon-systems.org/man/dhcpcd-run-hooks.8.html
# https://linux.die.net/man/8/dhclient-script
# http://www.thekelleys.org.uk/dnsmasq/docs/dnsmasq-man.html
# Generate equivalent dnsmasq /etc/resolv.conf
RESOLVCONF_DNSMASQ=/etc/resolv.dhcp.conf
TIMESYNCD_CONF=/run/systemd/timesyncd.conf.d/01-dhclient.conf
unset_timesyncd() {
if test -e $TIMESYNCD_CONF
then
rm -f $TIMESYNCD_CONF
systemctl try-restart systemd-timesyncd.service || true
fi
}
set_timesyncd() {
if test -z "${new_ntp_servers}"
then
mkdir -p $(dirname $TIMESYNCD_CONF)
cat <<EOF > ${TIMESYNCD_CONF}.new
# NTP server entries received from DHCP server
[Time]
NTP=$new_ntp_servers
EOF
if ! diff ${TIMESYNCD_CONF}.new ${TIMESYNCD_CONF} > /dev/null 2>&1
then
unset_timesyncd
mv ${TIMESYNCD_CONF}.new ${TIMESYNCD_CONF}
systemctl try-restart systemd-timesyncd.service || true
fi
fi
}
set_dns() {
if test -z "${new_domain_name_servers}"
then
local count=$(cat $RESOLVCONF_DNSMASQ | wc -m)
[[ "$count" -lt "16" ]] && unset_dns
return 0
fi
{
echo "# Generated at $(date) for interface ${interface} by $0"
for dnsaddr in ${new_domain_name_servers}
do
echo "nameserver ${dnsaddr}"
done
} > $RESOLVCONF_DNSMASQ
}
unset_dns() {
{
echo "nameserver 1.1.1.1"
echo "nameserver 8.8.8.8"
} > $RESOLVCONF_DNSMASQ
}
if test "x${if_up}" = "xtrue"
then
case "${reason}" in
BOUND6|RENEW6|REBIND6|REBOOT6|INFORM6)
new_domain_name_servers="$new_dhcp6_name_servers"
set_dns
set_timesyncd
;;
BOUND|RENEW|REBIND|REBOOT)
set_dns
set_timesyncd
;;
EXPIRE|FAIL|RELEASE|STOP)
unset_dns
unset_timesyncd
;;
esac
fi
| true |
e6b711693cb7f211f4ccd814013e5a0884f18218 | Shell | roman-grek/DevOps | /Final_task/bash_provision.sh | UTF-8 | 623 | 3.0625 | 3 | [] | no_license | #! /bin/bash
yum update -y
# install apache, php and mysql-server
yum install -y httpd24 php73 mysql57-server php73-mysqlnd
# start apache web server
service httpd start
# configure apache to start on every boot
chkconfig httpd on
# add ec2-user to apache group and give permissions to manipulate webserver files
usermod -a -G apache ec2-user
chown -R ec2-user:apache /var/www
chmod 2775 /var/www
find /var/www -type d -exec sudo chmod 2775 {} \;
find /var/www -type f -exec sudo chmod 0664 {} \;
# add php file for testing
echo "<?php phpinfo(); ?>" > /var/www/html/phpinfo.php
# install ansible
pip install ansible | true |
164a0272f418ec44c65ee436c5bd4ec192932898 | Shell | xuyinhao/lgpbenchmark | /loongoopBench/api/bin/chgrp/case/6-3 | UTF-8 | 474 | 2.6875 | 3 | [] | no_license | #!/bin/sh
$rm $apiPath/chgrp6-3a > /dev/null 2>&1
$rmr $apiPath/chagrp6-3b > /dev/null 2>&1
groupdel chgrp6-3g 2>/dev/null
$mkdir $apiPath/chgrp6-3a > /dev/null 2>&1
$mkdir $apiPath/chgrp6-3b > /dev/null 2>&1
groupadd chgrp6-3g
$chgrp chgrp6-3g $apiPath/chgrp6-3a $apiPath/chgrp6-3b 2>/dev/null
result=$?
ret=`checkOkNeedWinacl $result "chgrp6-3g" "$apiPath/chgrp6-3a" 1`
if [ 1 -eq $ret ]; then
checkOkNeedWinacl $result "chgrp6-3g" "$apiPath/chgrp6-3b" 1
else
echo 0
fi
| true |
abbbd40d242a94fe38c383abae20e97f71550988 | Shell | sorja/LinuxFundamentals2016 | /week4/ex3/generalized.sh | UTF-8 | 683 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Takes arg a dir
# Generates gnuplot
old_date=""
max=0
min=100
filename=$(basename $@)
#best
rm -rf /tmp/filename
for line in $(find $@* -name *temps.txt);do
date=$(echo $line | grep -ho '\d\d\d\d.\d\d.\d\d')
temp=$(grep -ho 'PROCESSOR_ZONE *[0-9]*C' $line | grep -ho '[0-9]\+' )
if [ "$temp" -gt "$max" ];then
max=$temp
fi
if [ "$temp" -lt "$min" ];then
min=$temp
fi
if [ "$date" != "$old_date" ];then
echo $date $max $min >> /tmp/$filename
max=0
min=100
temp=0
fi
old_date=$date
done
gnuplot -e "outfile='$filename'; infile='/tmp/$filename'" generalized.gnuplot | true |
5460f56608bde6c01185fd38f34dfd9bffc8180a | Shell | fiddlerwoaroof/dotfiles | /zsh/zsh_plugins/dependencies/editor-setup.zsh | UTF-8 | 593 | 3.125 | 3 | [] | no_license | #:depends-on:path-setup
if command -v emacsclient.scpt 2>&1 >/dev/null; then
export VISUAL="emacsclient.scpt"
elif command -v newemacs 2>&1 >/dev/null; then
export VISUAL=newemacs
elif command -v emacsclient 2>&1 >/dev/null; then
cat > "$HOME/bin/newemacs" <<"EOF"
#!/bin/sh
emacsclient -c "$@"
EOF
chmod +x "$HOME/bin/newemacs"
export VISUAL=newemacs
elif command -v vim 2>&1 >/dev/null; then
export VISUAL=$(which vim)
fi
export EDITOR=vim
vim() {
stty -ixon
env vim $*
stty ixany
}
function :e {
$VISUAL "$@"
}
alias vi='vim'
alias v=$VISUAL
alias e=$EDITOR
| true |
2842b04d75b025861202091f591850f210aac6db | Shell | MichaelHoltTech/puppetserver | /my_init.d/05_run_puppet.sh | UTF-8 | 646 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# load environment variables
source /etc/container_environment.sh
# default cron setting is every 30 minutes
test -z "$PUPPET_AGENT_CRON" && export PUPPET_AGENT_CRON="0,30 * * * *"
read -d '' cronscript <<EOF
# Runs the Puppet Agent on a Schedule!
SHELL=/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin
#Automatically run puppet agent 5 minutes after container boot to give puppetserver enough time to start up
@reboot root sleep 300 && /usr/bin/puppet agent -t
$PUPPET_AGENT_CRON root /usr/bin/puppet agent -t
EOF
if [ ! -f /root/.new_rsa ]
then
echo "$cronscript" > /etc/cron.d/puppet-agent && chmod +x /etc/cron.d/puppet-agent
fi
| true |
c7f2c26d01af33340ff93baa50a99ad5abd09d16 | Shell | Durengo/VIKO_OS_PRACTICAL_TASKS_WITH_LINUX | /t2/task2b.sh.save | UTF-8 | 432 | 3.1875 | 3 | [] | no_license | #!bin/bash
echo "Please choose a TXT file for this script to load, then you will be prompted to choose which line to read"
echo "Enter directory and file (Enter 0 to choose this file): "
read destination
if [ $destination != 0 ]; then
cd /
cd $destination
echo "You have loaded: "$destination
elif [ $destination = 0 ]; then
echo "This text file is loaded."cd task2b.sh
fi
cat $destination
echo "Which line do you want to select? "
| true |
bb8a14c76f659fc5b6b92b482fe0deaff5ee9748 | Shell | saqibnizami/dat-dashboard | /private/setup/development/execute.sh | UTF-8 | 250 | 2.734375 | 3 | [] | no_license | #!/bin/sh
ROOT_DIRECTORY=$(dirname $0)/../../..
# make sure we execute this script from it's location
cd $ROOT_DIRECTORY
# activate the environment
. venv/bin/activate
# run the project with development configuration
./index.py --env development
| true |
921a49b25b2e00c16a7d7535e011d912db055bf0 | Shell | Lachele/Mucin-Like-alpha-Dystroglycan | /TREATED/d4g/PLOTS/4WAY_HOH/convert_plots.bash | UTF-8 | 235 | 2.671875 | 3 | [] | no_license | #!/bin/bash
for site in 4 5 6 7 ; do
for phase in All All_EQ ; do
for oxygen in O OG1 ; do
Gnuplot_File_Prefix=${phase}_site-${site}_${oxygen}
convert ${Gnuplot_File_Prefix}.ps ${Gnuplot_File_Prefix}.png
done
done
done
| true |
a6e1131cdbfa94250397a2b892d98662768f9f7e | Shell | Xkirk/AuraAnalysis | /deploy/mysql/create-table.sh | UTF-8 | 419 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env bash
# 使用特权账号执行该指令, 将-u <user>和-p<password> 替换为对应的特权账户的用户名和密码
mysql -u root -proot -e "CREATE USER 'bigdata'@'%' IDENTIFIED BY 'bigdata';"
mysql -u root -proot -e "GRANT ALL PRIVILEGES ON `aura`.* TO 'bigdata'@'%';"
# 创建数据库和表
mysql -u bigdata -pbigdata < aura_sql
# 加载初始数据
mysql -u bigdata -pbigdata < aura_init.sql
| true |
ac7e81db1ef158ca69336e8474cdc131fa295062 | Shell | ans-cst/Cloud-Scripts | /Azure/DevOps/Install-LinuxVSTSAgent.sh | UTF-8 | 2,491 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# Version 1.0
# Installs and configures VSTS, Docker, az cli and Kubernetes
# Command usage: Install-LinuxVSTSAgent.sh $VSTS_AGENT_INPUT_TOKEN $VSTS_AGENT_INPUT_POOL
export VSTS_DOWNLOAD_URL="https://vstsagentpackage.azureedge.net/agent/2.141.1/vsts-agent-linux-x64-2.141.1.tar.gz"
export ORG="ans-devops"
export ADMINUSER=$3
# Environment variables used in VSTS configuration
export VSTS_AGENT_INPUT_URL="https://dev.azure.com/$ORG"
export VSTS_AGENT_INPUT_AUTH="pat"
export VSTS_AGENT_INPUT_TOKEN=$1
export VSTS_AGENT_INPUT_POOL=$2
export VSTS_AGENT_INPUT_AGENT=$HOSTNAME
sudo apt-get update -y
sudo apt-get upgrade -y
if [ ! $(which curl) ]; then
sudo apt-get install -y curl
fi
if [ ! -a /etc/systemd/system/vsts.agent.$ORG.$AGENT.service ]; then
# Download, extract and configure the agent
curl $VSTS_DOWNLOAD_URL --output /tmp/vsts-agent-linux.x64.tar.gz
mkdir /home/$ADMINUSER/agent
cd /home/$ADMINUSER/agent
tar zxf /tmp/vsts-agent-linux.x64.tar.gz
sudo chown -R $ADMINUSER:999 /home/$ADMINUSER/agent
# Install dependencies
sudo ./bin/installdependencies.sh
# TODO: Config needs to be configured for unattended access
su --command "./config.sh --unattended --acceptTeeEula" $ADMINUSER
# Configure the agent as a service
sudo ./svc.sh install
sudo ./svc.sh enable
sudo ./svc.sh start
cd /home/$ADMINUSER
fi
# Install dependencies and install Docker
if [ ! $(which docker) ]; then
sudo apt-get install -y apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update -y
sudo apt-get install -y docker-ce
sudo systemctl enable docker
sudo systemctl start docker
sudo usermod -aG docker $ADMINUSER
# Install crontab for user to clear down images
echo "30 8 * * * docker images | egrep 'azurecr|none' | awk '{print "'$3'"}' | xargs docker rmi --force" | crontab -
fi
# Install AZ CLI and Kubectl
if [ ! $(which az) ]; then
curl -L https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add -
sudo add-apt-repository -y "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $(lsb_release -cs) main"
sudo apt-get -y update
sudo apt-get -y install apt-transport-https azure-cli
sudo az aks install-cli
fi
| true |
a3e15e16b55379a21065aac1f980d27ad30a38b4 | Shell | leroyg/digihel | /docker-entrypoint.sh | UTF-8 | 165 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
while ! nc -z db 5432; do echo "Waiting for database connection..." && sleep 3; done
echo "Database available, starting application"
exec "$@"
| true |
d56040d8043122bd942d60ea64350f08735fcf32 | Shell | HPCHub/benchmarks | /tests/hpcc/install.sh | UTF-8 | 574 | 3.125 | 3 | [] | no_license | #!/bin/bash
if [ -f ../platform.sh ]; then
. ../platform.sh
fi
. include.sh
HPCHUB_TEST_STATE=install
if [ -f "${HPCHUB_PLATFORM}" ]; then
. ${HPCHUB_PLATFORM}
fi
if [ ! -f hpcc-${hpcc_version}.tar.gz ]; then
wget http://icl.cs.utk.edu/projectsfiles/hpcc/download/hpcc-${hpcc_version}.tar.gz
tar -xvzf hpcc-${hpcc_version}.tar.gz
fi
cd hpcc-${hpcc_version}
cp ../Make.hpchub hpl/Make.hpchub
${HPCHUB_COMPILE_PREFIX} make arch=hpchub
if [ $HPCHUB_PLATFORM == 'azure' ]; then
for i in $NODES; do
scp -r ../../../tests/ $i:$HOME/hpchub_benchmark/
done
fi
| true |
3bf584b3d87ef86f67029415637f2d7be4b91347 | Shell | cognitect-labs/transducers-js | /build/revision | UTF-8 | 384 | 3.703125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Returns the revision number used for deployment.
set -e
REVISION=`git --no-replace-objects describe --tags --match v0.0`
# Extract the version number from the string. Do this in two steps so
# it is a little easier to understand.
REVISION=${REVISION:5} # drop the first 5 characters
REVISION=${REVISION:0:${#REVISION}-9} # drop the last 9 characters
echo $REVISION
| true |
af2412459075ace82b43fb46810c7445172a114e | Shell | baitxaps/PythonAutomation | /shell/shell_deep/ifstatements.sh | UTF-8 | 827 | 3.234375 | 3 | [] | no_license | #! /bin/bash
#if语句
#if...;then
#...
#elif...;then
#...
#else
#...
#fi
# 一行,分号分隔三部分
if ((2<10));then echo "true"; fi
#(echo "trun")后分号可省
if ((2<10));then
echo "true"
fi
#分号可省
if ((2<10))
then
echo "true"
fi
# 测试:./ifstatements.sh 90 90
#两个小括号,复合命令。必须要用两个小括号,如果不用小括号,用let
declare -i a a
a=$1;b=$2
if((a<b));then
echo "$a 小于 $b"
elif((a>b));then
echo "$a 大于 $b"
else
echo "$a 等于 $b"
fi
if let "a<b" ;then
echo "$a 小于 $b"
elif let "a>b";then
echo "$a 大于 $b"
else
echo "$a 等于 $b"
fi
if grep -q ^chen /ect/passwd; then
echo 'chen this account exist'
fi
if [ -d /root/tmp ];then
echo '/root/tmp document exist'
else
echo '/root/tmp document no exist'
fi
| true |
5f02d81b4fdffda7c6545d16d358bf23857ff0c6 | Shell | echojs/config | /init.d/unicorn | UTF-8 | 1,122 | 3.6875 | 4 | [] | no_license | #! /bin/sh
### BEGIN INIT INFO
# Provides: unicorn
# Required-Start: $local_fs $remote_fs $network $nginx
# Required-Stop: $local_fs $remote_fs $network $nginx
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: starts the unicorn web server
### END INIT INFO
#
# Install with: update-rc.d unicorn defaults 99
PATH=/bin:/usr/bin:/sbin:/usr/sbin
USER=echojs
HOME=/home/echojs
ENV=production
PID=`cat $HOME/echojs/tmp/pids/unicorn.pid`
as_user() {
if [ "xroot" != "x`whoami`" ]
then
bash -c "$2"
else
su - -c "$2" $1
fi
}
case "$1" in
start)
echo "Starting unicorn for user '$USER'"
as_user $USER "cd $HOME/echojs; unicorn -c config/unicorn.rb -E $ENV -D"
echo "."
;;
stop)
echo "Stopping unicorn for user '$USER'"
kill -QUIT $PID
echo "."
;;
restart)
echo "Restarting unicorn for user '$USER'"
kill -USR2 $PID
sleep 1
kill -QUIT $PID
echo "."
;;
reload)
echo "Reloading unicorn configuration for user '$USER'"
kill -HUP $PID
echo "."
;;
*)
echo "Usage: /etc/init.d/unicorn {start|stop|reload|restart}"
exit 1
esac
exit 0
| true |
de2d614efcd03585f84b61cffdd2d385f9235a8a | Shell | InternationalDataSpaces/InformationModel-deprecated- | /create-ontology-ttl.sh | UTF-8 | 5,996 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
write_to_file()
{
# initialize a local var
local file="Ontology.ttl"
local version=$1
if [ -z "$version" ] ; then
echo "Warning! No version parameter supplied. Assuming version 2.1.0"
version="2.1.0"
fi
#delete old content and add prefixes
echo "@prefix owl: <http://www.w3.org/2002/07/owl#> ." > "$file"
echo "@prefix dct: <http://purl.org/dc/terms/> ." >> "$file"
echo "@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> ." >> "$file"
echo "@prefix prov: <http://www.w3.org/ns/prov#> ." >> "$file"
echo "@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> ." >> "$file"
echo "@prefix ids: <https://w3id.org/idsa/core/> ." >> "$file"
echo "@prefix vann: <http://purl.org/vocab/vann/> ." >> "$file"
echo "@prefix void: <http://rdfs.org/ns/void#>." >> "$file"
echo "@prefix voaf: <http://purl.org/vocommons/voaf#>." >> "$file"
echo "@prefix foaf: <http://xmlns.com/foaf/0.1/>." >> "$file"
echo "@prefix cc: <http://creativecommons.org/ns#>. " >> "$file"
echo "@prefix xsd: <http://www.w3.org/2001/XMLSchema#>." >> "$file"
echo "" >> "$file"
#add meta-data
echo '# Description of this ontology' >> "$file"
echo '# ----------------------------' >> "$file"
echo '' >> "$file"
echo 'ids:' >> "$file"
echo ' a voaf:Vocabulary, owl:Ontology ;' >> "$file"
echo ' rdfs:isDefinedBy <https://w3id.org/idsa/core/> ;' >> "$file"
echo ' rdfs:label "IDS Information Model"@en;' >> "$file"
echo ' dct:title "The Industrial Dataspace Information Model"@en;' >> "$file"
echo ' rdfs:comment "This ontology defines classes and properties for describing participants, infrastructure, data and services of the Industrial Dataspace.";' >> "$file"
echo ' cc:license <http://www.apache.org/licenses/LICENSE-2.0> ;' >> "$file"
echo ' dct:creator <https://github.com/cmader> ;' >> "$file"
echo ' dct:creator <https://github.com/jpullmann> ;' >> "$file"
echo ' dct:creator <http://sebastian.tramp.name> ;' >> "$file"
echo ' dct:contributor _:AndreasMueller ;' >> "$file"
echo ' dct:contributor _:AnnaKasprzik ;' >> "$file"
echo ' dct:contributor <https://github.com/sebbader> ;' >> "$file"
echo ' dct:contributor <https://github.com/Maboeckmann/> ;' >> "$file"
echo ' dct:contributor <https://github.com/HaydarAk> ;' >> "$file"
echo ' dct:publisher ids:IDSA ;' >> "$file"
echo ' dct:created "2017-09-26"^^xsd:date;' >> "$file"
echo ' dct:modified "'$(date +%Y-%m-%d)'"^^xsd:date;' >> "$file"
echo ' owl:versionInfo "'$version'";' >> "$file"
echo ' owl:versionIRI "https://w3id.org/idsa/core/'$version'>";' >> "$file"
echo ' vann:preferredNamespaceUri "https://w3id.org/idsa/core/";' >> "$file"
echo ' vann:preferredNamespacePrefix "ids" ;' >> "$file"
echo ' rdfs:seeAlso <https://industrialdataspace.github.io/InformationModel/> ;' >> "$file"
echo ' void:vocabulary' >> "$file"
echo ' <http://purl.org/vocab/vann/> ,' >> "$file"
echo ' <http://rdfs.org/ns/void#> ,' >> "$file"
echo ' <http://purl.org/vocommons/voaf#> ,' >> "$file"
echo ' <http://purl.org/dc/terms/> ,' >> "$file"
echo ' <http://purl.org/vocab/vann/> ,' >> "$file"
echo ' <http://xmlns.com/foaf/0.1/> ;' >> "$file"
echo '.' >> "$file"
echo '' >> "$file"
echo '<http://www.apache.org/licenses/LICENSE-2.0>' >> "$file"
echo ' rdfs:label "Apache License Version 2.0, January 2004" ;' >> "$file"
echo '.' >> "$file"
echo '' >> "$file"
echo 'ids:IDSA' >> "$file"
echo ' a dct:Agent, foaf:Organization;' >> "$file"
echo ' foaf:homepage <https://www.internationaldataspaces.org> ;' >> "$file"
echo ' rdfs:label "International Data Spaces Association" ;' >> "$file"
echo '.' >> "$file"
echo '' >> "$file"
echo '<http://sebastian.tramp.name>' >> "$file"
echo ' a dct:Agent, foaf:Person ;' >> "$file"
echo ' foaf:name "Sebastian Tramp";' >> "$file"
echo '.' >> "$file"
echo '' >> "$file"
echo '<https://github.com/cmader>' >> "$file"
echo ' a dct:Agent, foaf:Person ;' >> "$file"
echo ' foaf:name "Christian Mader";' >> "$file"
echo '.' >> "$file"
echo '' >> "$file"
echo '<https://github.com/jpullmann>' >> "$file"
echo ' a dct:Agent, foaf:Person ;' >> "$file"
echo ' foaf:name "Jaroslav Pullmann";' >> "$file"
echo '.' >> "$file"
echo '' >> "$file"
echo '_:AndreasMueller' >> "$file"
echo ' a dct:Agent, foaf:Person ;' >> "$file"
echo ' foaf:name "Andreas Müller"' >> "$file"
echo '.' >> "$file"
echo '' >> "$file"
echo '_:AnnaKasprzik a dct:Agent, foaf:Person ;' >> "$file"
echo ' foaf:name "Anna Kasprzik";' >> "$file"
echo '.' >> "$file"
echo '' >> "$file"
echo '<https://github.com/sebbader> a dct:Agent, foaf:Person ;' >> "$file"
echo ' foaf:name "Sebastian Bader";' >> "$file"
echo '.' >> "$file"
echo '<https://github.com/HaydarAk> a dct:Agent, foaf:Person ;' >> "$file"
echo ' foaf:name "Haydar Akyürek";' >> "$file"
echo '.' >> "$file"
echo '<https://github.com/Maboeckmann> a dct:Agent, foaf:Person ;' >> "$file"
echo ' foaf:name "Matthias Böckmann";' >> "$file"
echo '.' >> "$file"
# "open the file to edit" ... not required. echo will do
# search for files in selcted folders
echo '# ----------------------------' >> "$file"
echo '# Imports of class files' >> "$file"
echo 'ids:' >> "$file"
for class in $(find model/* -maxdepth 1 -name "*.ttl")
do
if [[ -f $class ]]; then
echo " owl:imports <$class> ; " >> "$file"
fi
done
# search for files in selcted folders
#for class in $(find metamodel/* -name "*.ttl")
#do
# if [[ -f $class ]]; then
# echo " owl:imports <$class> ; " >> "$file"
# fi
#done
# search for files in selcted folders
for class in $(find taxonomies/* -maxdepth 1 -name "*.ttl")
do
if [[ -f $class ]]; then
echo " owl:imports <$class> ; " >> "$file"
fi
done
for class in $(find codes/* -maxdepth 1 -name "*.ttl")
do
if [[ -f $class ]]; then
echo " owl:imports <$class> ; " >> "$file"
fi
done
echo ". " >> "$file"
}
# execute it
write_to_file $1
| true |
048275a12183154cf595fe37b7a2e383abf312da | Shell | RuoAndo/nii-cyber-security-admin | /mapreduce/pig/session/split2/cat-avg.sh | UTF-8 | 306 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | TESTFILE=$1
rm -rf cat-avg-all
touch cat-avg-all
fn=`readlink -f tmp-avg/* | grep part`
ary=(`echo $fn`)
#echo ${#ary[@]}
#touch cat-cls-$a
for i in `seq 1 ${#ary[@]}`
do
#echo ${ary[$i-1]}
#wc -l ${ary[$i-1]}
cat ${ary[$i-1]} >> cat-avg-all
done
more cat-avg-all
| true |
3cbe404a4f1bce1ee910df47319f305b32f2f430 | Shell | ilgaz/ilgaz-ready | /main.sh | UTF-8 | 556 | 3.484375 | 3 | [] | no_license | #!/bin/bash
export INSTALLATION_DIR=$HOME/Dotfiles
export ZSHRC=$INSTALLATION_DIR/zsh/.zshrc
if [[ -d $INSTALLATION_DIR ]]
then
echo "Directory already exists, not creating new one"
else
echo "Setup directory not found! Creating it"
mkdir $INSTALLATION_DIR
if [[ $0 != 0 ]];
then
echo "Could not create installation folder, restarting"
./main.sh && exit
fi
fi
./installations.sh
./clone.sh
# Change default shell to zsh
chsh -s "/bin/zsh"
echo "You're going to need to reboot in order to have zsh as your default shell"
echo "Jobs done!"
| true |
45584a6620c9d069c3c79120f58b33d499781fe2 | Shell | hjanime/FreeHiC | /run_FreeHiC.sh | UTF-8 | 1,152 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
paraFile=$1
source $paraFile
## ==========================
## print the parameter values
## ==========================
echo "projDir=${projDir}"
echo "fastqFile=${fastqFile}"
echo "ref=${ref}"
echo "refrag=${refrag}"
echo "outDir=${outDir}"
echo "simuName=${simuName}"
echo "summaryFile=${summaryFile}"
echo "bwaDir=${bwaDir}"
echo "samtoolsDir=${samtoolsDir}"
echo "bedtoolsDir=${bedtoolsDir}"
echo "train=${train}"
echo "postProcess=${postProcess}"
echo "coreN=$coreN"
echo "mismatchN=${mismatchN}"
echo "gapN=${gapN}"
echo "mismatchP=${mismatchP}"
echo "gapP=${gapP}"
echo "chimericP=${chimericP}"
echo "simuN=${simuN}"
echo "readLen=${readLen}"
echo "resolution=${resolution}"
echo "lowerBound=${lowerBound}"
echo "refragU=${refragU}"
echo "cutsite=${cutsite}"
## =================
## Start of FreeHi-C
## =================
bash freeHiC.sh "$projDir" "$fastqFile" "$outDir" "$simuName" "$ref" "$bwaDir" "$samtoolsDir" "$bedtoolsDir" "$train" "$postProcess" "$coreN" "$mismatchN" "$gapN" "$mismatchP" "$gapP" "$chimericP" "$simuN" "$readLen" "$resolution" "$lowerBound" "$refragU" "$cutsite" "$refrag" "$summaryFile" >$paraFile.log
| true |
f430291fc33248542fa7dc4e53664340daa77af2 | Shell | 0z-cloud/vx | /ansible/roles/!_acme/letsencrypt-pacemaker/files/acme_sh/deploy/keychain.sh | UTF-8 | 663 | 2.71875 | 3 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
######## Public functions #####################
#domain keyfile certfile cafile fullchain
keychain_deploy() {
_cdomain="$1"
_ckey="$2"
_ccert="$3"
_cca="$4"
_cfullchain="$5"
_debug _cdomain "$_cdomain"
_debug _ckey "$_ckey"
_debug _ccert "$_ccert"
_debug _cca "$_cca"
_debug _cfullchain "$_cfullchain"
/usr/bin/security import "$_ckey" -k "/Library/Keychains/System.keychain"
/usr/bin/security import "$_ccert" -k "/Library/Keychains/System.keychain"
/usr/bin/security import "$_cca" -k "/Library/Keychains/System.keychain"
/usr/bin/security import "$_cfullchain" -k "/Library/Keychains/System.keychain"
return 0
}
| true |
5bed275b8c79448bfa4b153aefd98148c40fc10b | Shell | andymoe/nodester | /scripts/gitrepoclone.sh | UTF-8 | 502 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
# post-commit hook to create git file directory for node subdomain
# cd ..
gitdirsuffix=${PWD##*/}
gitdir=${gitdirsuffix%.git}
if [ -d "../$gitdir" ]; then
cd ../$gitdir;
unset GIT_DIR;
git pull;
exec git-update-server-info;
fi;
if [ ! -d "../$gitdir" ]; then
git clone . ../$gitdir/;
fi;
# kill and restart the app
cd ../$gitdir/;
P=`cat .app.pid`;
kill ${P};
sleep 1;
curl "http://127.0.0.1:4001/app_restart?repo_id=${gitdir}&restart_key=KeepThisSecret" >/dev/null 2>&1 | true |
d7f48832cc91417f6d97618e0db65c91759950bd | Shell | DorinesRosario/project_pp | /12.plots_psiblast_svm/pb_K3_J2.35/set_by_set/bash_mcc_set_by_set.sh | UTF-8 | 458 | 2.5625 | 3 | [] | no_license | #!/bin/bash
input_files=/home/rosario/Desktop/project_pp/12.plots_psiblast_svm/pb_K3_J2.35/set_by_set/
output_files=/home/rosario/Desktop/project_pp/12.plots_psiblast_svm/pb_K3_J2.35/set_by_set/
for file in $input_files/*.svm3.pred; do
base=`basename $file .svm3.pred`
python mcc_script.py $input_files/$base.features.txt $input_files/$base.svm3.pred $output_files/$base.mcc.txt
done
#paste -d '' structure.topology.txt sequence.treated.txt > output.txt | true |
0a167807acce39db3c762754b0c557e03858d03c | Shell | jboyer87/Common | /GitHooks/prepare-commit-msg | UTF-8 | 569 | 3.96875 | 4 | [] | no_license | #!/bin/sh
#
# Prepends the feature branch name to the commit messages against this branch IE:
# 123 - [commit message here]
#
if [ -z "$BRANCHES_TO_SKIP" ]; then
BRANCHES_TO_SKIP=(master test develop)
fi
BRANCH_NAME=$(git symbolic-ref --short HEAD)
BRANCH_NAME="${BRANCH_NAME##*/}"
BRANCH_EXCLUDED=$(printf "%s\n" "${BRANCHES_TO_SKIP[@]}" | grep -c "^$BRANCH_NAME$")
BRANCH_IN_COMMIT=$(grep -c "\$BRANCH_NAME\ - " $1)
if [ -n "$BRANCH_NAME" ] && ! [[ $BRANCH_EXCLUDED -eq 1 ]] && ! [[ $BRANCH_IN_COMMIT -ge 1 ]]; then
sed -i.bak -e "1s/^/$BRANCH_NAME - /" $1
fi | true |
e3ede8e5c729082335b83afc809bdf5f8aedc740 | Shell | BeastNeedsMoreTorque/primaries-2016 | /raw-assets/generate_39x60_headshots.sh | UTF-8 | 655 | 3.359375 | 3 | [] | no_license | #!/bin/sh
DIR="$(dirname "$0")"
IN_DIR="$DIR/headshots"
OUT_DIR="$DIR/../assets/images"
for path in $(ls $IN_DIR/*.png); do
basename="$(basename "$path")"
# Headshots start at 100x155px.
#
# Steps:
#
# 1. Add a 5px border, making it 110x165px. (This is so we can add a blur)
# 2. Add a white outline, with a blur.
# 3. Resize to 48x72px.
set -x
convert "$IN_DIR/$basename" \
-bordercolor transparent -border 5 \
\( +clone -channel A -blur 7x7 -level 0,10% +channel +level-colors white \) \
-compose DstOver -composite \
-thumbnail 48x72 \
-define png:include-chunk=none \
"$OUT_DIR/$basename"
set +x
done
| true |
966b5e579b2ca4e3f1908d604093cd9ba796728a | Shell | mojodna/marblecutter-tilezen | /functions/indexer/bin/get_info.sh | UTF-8 | 1,393 | 3.234375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
input=$1
set -eo pipefail
# TODO figure this out from the path of this file
export PATH=/var/task/bin:$PATH
export PYTHONPATH=/var/task/.local/lib/python2.7/site-packages:/var/runtime
function update_aws_credentials() {
set +u
# attempt to load credentials from an IAM profile if none were provided
if [[ -z "$AWS_ACCESS_KEY_ID" || -z "$AWS_SECRET_ACCESS_KEY" ]]; then
set +e
local role=$(curl -sf --connect-timeout 1 http://169.254.169.254/latest/meta-data/iam/security-credentials/)
local credentials=$(curl -sf --connect-timeout 1 http://169.254.169.254/latest/meta-data/iam/security-credentials/${role})
export AWS_ACCESS_KEY_ID=$(jq -r .AccessKeyId <<< $credentials)
export AWS_SECRET_ACCESS_KEY=$(jq -r .SecretAccessKey <<< $credentials)
export AWS_SESSION_TOKEN=$(jq -r .Token <<< $credentials)
set -e
fi
set -e
}
update_aws_credentials
info=$(rio info $input)
resolution=$(get_resolution.py $input)
rio shapes --mask --as-mask --precision 6 --sampling 100 ${input} | \
build_metadata.py \
--meta \
dimensions=$(jq -c '.shape | reverse' <<< $info) \
bands=$(jq -c .count <<< $info) \
dtype=$(jq -c .dtype <<< $info) \
crs="$(jq -c .crs <<< $info)" \
colorinterp=$(jq -c .colorinterp <<< $info) \
resolution=$(jq -c .res <<< $info) \
resolution_in_meters=${resolution}
| true |
336d7bda9611bd5177eb5c5787b6ddbdc6195358 | Shell | zpooky/dotfiles | /dotfiles/lib/vimcpp/shared.sh | UTF-8 | 13,648 | 3.78125 | 4 | [] | no_license | #!/usr/bin/env bash
#==================================================================
#=======GTEST=Stuff================================================
#==================================================================
# arg: path
# out: 0:true/1:false
# result: $test_EXECUTABLE
function find_test_executable() {
local path=$1
local path="$(dirname $path)"
echo "path: ${path}"
search_path_upwards "${path}" "TESTMARKER"
if [ $? -eq 0 ]; then
local test_MARKER="${search_RESULT}/TESTMARKER"
local TEST_EXECUTABLE_NAME=`cat "${test_MARKER}"`
test_EXECUTABLE="${search_RESULT}/${TEST_EXECUTABLE_NAME}"
return 0
else
echo "thetest executable was not found"
exit 1
fi
}
# ======
function is_gtest_file() {
return 0
}
# arg: strline
# out: 0:true/1:false
function is_line_gtest() {
local line="$1"
local regEx_TEST_F='^[[:space:]]*TEST_F\((.+)[[:space:]]*,[[:space:]]*(.+)\)'
local regEx_TEST_P='^[[:space:]]*TEST_P\((.+)[[:space:]]*,[[:space:]]*(.+)\)'
local regEx_TEST='^[[:space:]]*TEST\((.+)[[:space:]]*,[[:space:]]*(.+)\)'
if [[ $line =~ $regEx_TEST || $line =~ $regEx_TEST_P || $line =~ $regEx_TEST_F ]]; then
return 0
else
return 1
fi
}
# arg: file
# out: 0:true/1:false
# result: ...
function smart_gtest_test_cases() {
local in_FILE="$1"
local in_SEARCH_line="$2"
if [ ! -e "$in_FILE" ]; then
return 1
fi
if [ ! -f "$in_FILE" ]; then
return 1
fi
group_matches=()
test_matches=()
all_tests=1
local line_cnt=1
while IFS='' read -r line || [[ -n "$line" ]]; do
# TODO count nested levels{} to figure out if we are in root(meaning all
# tests should run) or that the cursor are inside a test function(meaning
# only that test should be run(the last in the arrray))
is_line_gtest "$line"
if [ $? -eq 0 ]; then
# echo "./test/thetest --gtest_filter=\"*${BASH_REMATCH[1]}.${BASH_REMATCH[2]}*\""
# echo "$line_cnt: $line"
# echo "${BASH_REMATCH[@]}"
# echo "base[1]: ${BASH_REMATCH[1]}"
# echo "base[2]: ${BASH_REMATCH[2]}"
local exact_match="${BASH_REMATCH[1]}.${BASH_REMATCH[2]}"
# Default/ReadWriteLockThreadTest.threaded_TryPrepare/13 (5 ms)
local param_match="*${BASH_REMATCH[1]}.${BASH_REMATCH[2]}/*"
test_matches+=("${exact_match}:${param_match}")
group_matches+=("${BASH_REMATCH[1]}")
fi
# if we are currently on the searched line
if [ $in_SEARCH_line -eq $line_cnt ]; then
# TODO if [ ! $nested_count -eq 0 ]; then
# echo "matches ${#test_matches[@]}"
# if there is more than zero tests
if [ ${#test_matches[@]} -gt 0 ]; then
# take the last found test
test_matches=(${test_matches[-1]})
group_matches=(${group_matches[-1]})
all_tests=0
# echo "constraint ${test_matches}"
break
fi
# fi
fi
local line_cnt=$((line_cnt + 1))
done <"$in_FILE"
# unique
group_matches=($(for v in "${group_matches[@]}"; do echo "$v";done| sort| uniq| xargs))
return 0
}
# arg: file
# arg: line
function gtest_for_file_line() {
local in_FILE="$1"
local in_SEARCH="$2"
if [ ! -e "$in_FILE" ]; then
echo "file '${in_FILE}' does not exist"
return 1
fi
if [ ! -f "$in_FILE" ]; then
echo "is not a file '${in_FILE}'"
return 1
fi
test_matcher=""
local test_cnt=0
local line_cnt=1
while IFS='' read -r line || [[ -n "$line" ]]; do
# TODO count nested levels{} to figure out if we are in root(meaning all
# tests should run) or that the cursor are inside a test function(meaning
# only that test should be run(the last in the array))
# echo "${line}"
is_line_gtest "${line}"
if [ $? -eq 0 ]; then
local exact_match="${BASH_REMATCH[1]}.${BASH_REMATCH[2]}"
# Default/ReadWriteLockThreadTest.threaded_TryPrepare/13 (5 ms)
local param_match="*${BASH_REMATCH[1]}.${BASH_REMATCH[2]}/*"
test_matcher="${exact_match}:${param_match}"
local test_cnt=$((test_cnt + 1))
fi
# if we are currently on the searched line
if [ $in_SEARCH -eq $line_cnt ]; then
# if there is more than zero tests
if [ $test_cnt -gt 0 ]; then
break
else
echo "not tests found '${test_cnt}'"
return 1
fi
fi
local line_cnt=$((line_cnt + 1))
done <"$in_FILE"
# echo "constraint ${test_matcher}"
return 0
}
#==================================================================
#====UTILS=========================================================
#==================================================================
# if true set global variable search_RESULT to result path not containing the
# needle
#
# arg: str path
# arg: file searh
# out: 0:true/1:false
# result: search_RESULT
function search_path_upwards() {
local path="${1}"
local needle="${2}"
if [ ! -d "${path}" ]; then
# not a directory, goto parent
local path="$(dirname $path)"
fi
while [[ "$path" != "/" ]]; do
for needle in "${@}"; do
local test_path="${path}/${needle}"
if [ -e "${test_path}" ]; then
search_RESULT="${path}"
return 0
fi
done
local path="$(readlink -f $path/..)"
done
return 1
}
function is_cygwin() {
if [[ $(uname -s) =~ CYGWIN.* ]]; then
return 0
else
return 1
fi
}
function has_feature() {
local feature="$1"
# which $feature > /dev/null 2>&1
# local which_feature=$?
hash $feature > /dev/null 2>&1
local hash_feature=$?
# if [ $which_feature -eq $hash_feature ]; then
if [ $hash_feature -eq 0 ]; then
return 0
fi
# fi
return 1
}
#==================================================================
#====TMUX==========================================================
#==================================================================
function tmux_new_window(){
local name="${1}"
local tmp_wid=$(mktemp /tmp/tmp.XXXXXXXXXXXXXX -u)
local comm="tmux display -p '#{window_id}' > $tmp_wid"
tmux new-window -n "${name}" "${comm};${SHELL}"
if [ ! $? -eq 0 ]; then
return 1
fi
while [ ! -e "${tmp_wid}" ]; do
sleep 0.1
done
window_id=$(cat ${tmp_wid})
rm $tmp_wid
return 0
}
# function is_tmux_window() {
# local needle="${1}"
# return 0
# }
function switch_tmux_window() {
# an empty session means the current session
local session="${1}"
local window="${2}"
tmux select-window -t "${session}":"${window}"
}
function tmux_list_clients() {
tmux list-clients
}
function tmux_list_sessions() {
tmux list-sessions
}
function tmux_list_windows() {
tmux list-windows
}
function tmux_list_panes() {
# in current window
tmux list-panes
# # in window 4
# tmux list-panes -t :4
}
function tmux_send_keys() {
# an empty session means the current session
local session="${1}"
local window="${2}"
local pane_idx="${3}"
local command="${4}"
tmux send-keys -t "${session}":"${window}"."${pane_idx}" "$command" C-m
}
function tmux_send_keys_id() {
local pane_id="${1}"
# %38 is pane_id
# tmux send-keys -t %38 "ls" C-m
# TODO
}
# function tty_for_pid() {
# echo ""
# }
#
# function tty_for_exe() {
# local exe="${1}"
# pids=( $(pgrep "${exe}") )
# if [ ! $? -eq 0 ]; then
# echo "pgrep failed"
# return 1
# fi
#
# if [ ${#pids[@]} -gt 1 ]; then
# echo "ambigous pids"
# return 1
# fi
#
# if [ ! ${#pids[@]} -eq 1 ]; then
# return 1
# fi
#
# tty_for_pid "${pids[0]}"
#
# # snap=$(ps aux | grep "${exe}")
# # echo "snap: $snap"
# # if [ ! -z "${snap}" ]; then
# #
# # exe_tty=""
# # return 0
# # else
# # return 1
# # fi
# }
# » ll /proc/10120/fd
# lrwxrwxrwx 1 fredrik Domain Users 0 Apr 20 15:18 0 -> /dev/pty17
# lrwxrwxrwx 1 fredrik Domain Users 0 Apr 20 15:18 1 -> /dev/pty17
# lrwxrwxrwx 1 fredrik Domain Users 0 Apr 20 15:18 10 -> /dev/pty17
# lrwxrwxrwx 1 fredrik Domain Users 0 Apr 20 15:18 2 -> /dev/pty17
# function tmux_pane_tty() {
# local session="${1}"
# local window="${2}"
# local pane_idx="${3}"
# # tmux list-panes -t :gdb.1 -F "#{pane_id} #{pane_index} #{pane_tty}"
# pane_tty=""
# return 1
# # TODO
# }
function ppid_for_pid() {
pid="${1}"
is_cygwin
if [ $? -eq 0 ]; then
str=($(ps -p "${pid}" | tail -1))
if [ ! $? -eq 0 ]; then
echo "pid is not running"
return 1
fi
ppid_out="${str[1]}"
else
str=($(ps -p "${pid}" o pid,ppid | tail -1))
if [ ! $? -eq 0 ]; then
echo "pid is not running"
return 1
fi
ppid_out="${str[1]}"
fi
return 0
}
# function ppid_for_exe() {
# local exe="${1}"
#
# pids=($(pgrep "${exe}"))
# if [ ! $? -eq 0 ]; then
# echo "failed to pgrep"
# return 1
# fi
#
# if [ ${#pids[@]} -eq 0 ]; then
# echo "no running ${exe}"
# return 1
# fi
#
# }
# cygwin
# $ ps aux | grep 7528 [0][0.0s][]
# PID PPID PGID WINPID TTY UID STIME COMMAND
# 228 7528 228 8756 pty4 1051816 15:19:31 /usr/bin/gdb
# 7528 4452 7528 7784 pty4 1051816 15:19:30 /usr/bin/zsh
function pid_for_pane_id() {
local pane_id="${1}"
local needle="[${pane_id}]"
local str=$(tmux list-panes -t "${pane_id}" -F "[#{pane_id}] #{pane_pid}")
if [ $? -eq 0 ]; then
# local str=$(echo ${str} | grep "\[${pane_idx}\]")
local arr=(${str})
local next=false
local set=false
local id=""
for current in "${arr[@]}"; do
# echo "${current}:${next}"
if [ "$next" = true ]; then
local result="${current}"
local set=true
break
fi
if [ "${current}" = "${needle}" ]; then
local next=true
fi
done
# echo "_${result}_"
# echo "|${str}|"
if [ "$set" = true ]; then
pid_out="${result}"
return 0
else
echo "needle: '${needle}' was not found"
return 1
fi
else
echo "failed to: '${command}'"
return 1
fi
}
function tmux_pane_id_for() {
local session="${1}"
local window="${2}"
local pane_idx="${3}"
# echo "tmux list-panes -t ${session}:${window} -F \"[#{pane_index}] #{pane_id}\""
local str=$(tmux list-panes -t "${session}":"${window}" -F "[#{pane_index}] #{pane_id}")
if [ $? -eq 0 ]; then
# local str=$(echo ${str} | grep "\[${pane_idx}\]")
local arr=($str)
local next=false
local set=false
local id=""
for c in "${arr[@]}"; do
# echo "${c}:${next}"
if [ "$next" = true ]; then
local id="${c}"
local set=true
break
fi
if [ "${c}" = "[${pane_idx}]" ]; then
local next=true
fi
done
# echo "_${id}_"
# echo "|${str}|"
if [ "$set" = true ]; then
pane_id="${id}"
return 0
else
return 1
fi
else
echo "fauiled to tmux list-panes"
return 1
fi
}
# function pids_for_exe_and_ppid() {
# # ps aux [0][0.0s][]
# # PID PPID PGID WINPID TTY UID STIME COMMAND
# # 8320 1804 8320 5488 pty6 1051816 11:43:37 /home/fredrik/bin/sshpass
# # 300 1512 300 9696 pty22 1051816 12:00:10 /home/fredrik/bin/vim
# local exe="${1}"
# local pane_pid="${2}"
#
# is_cygwin
# if [ $? -eq 0 ]; then
# return 1
# else
# echo "no linux support"
# exit 1
# fi
# }
function is_exe_running_in_pane() {
local session="${1}"
local window="${2}"
local pane_idx="${3}"
local exe="${4}"
tmux_pane_id_for "${session}" "${window}" "${pane_idx}"
if [ ! $? -eq 0 ]; then
echo "failed to find pane: ${session}:${window}.${pane_idx}"
return 1
fi
pid_for_pane_id "${pane_id}"
if [ ! $? -eq 0 ]; then
echo "failed to get PPID from pid"
return 1
fi
pane_pid="${pid_out}"
# pids_for_exe_and_ppid "${exe}" "${pane_pid}"
local pids=($(pgrep "${exe}"))
if [ ! $? -eq 0 ]; then
echo "failed to pgrep: ${exe}"
return 1
fi
pids_cnt=${#pids[@]}
# if [ ! ${pids_cnt} -eq 1 ]; then
# echo "'${pids_cnt}' is not 1, exe: '${exe}' ppid: '${pane_id}'"
# return 1
# fi
if [ ${pids_cnt} -eq 0 ]; then
echo "number of pids: '${pids_cnt}', exe: '${exe}' ppid: '${pane_id}'"
return 1
fi
for pid in "${pids[@]}"; do
ppid_for_pid "${pid}"
if [ $? -eq 0 ]; then
# echo "${ppid_out} = ${pane_pid}"
if [ "${ppid_out}" = "${pane_pid}" ]; then
# echo "pid: ${pid} match"
return 0
fi
fi
done
return 1
# tmux_pane_tty "${session}" "${window}" "${pane_idx}"
# if [ $? -eq 0 ]; then
# if [ "${exe_tty}" = "${pane_tty}" ]; then
# return 0
# else
# echo "${exe_tty} != ${pane_tty}"
# return 1
# fi
# else
# echo "failed to find pane ${session}:${window}.${pane_idx}"
# return 1
# fi
# elif [ $? -eq 2 ]; then
# echo "ambiguous: multiple running: '${exe}'"
# return 1
# else
# echo "not running: '${exe}'"
# return 1
# gdb:
# file /home/fredrik/development/sputil/exe
# run
}
function is_sp_gdb_running() {
local session=""
local window="sp_gdb"
local pane_idx="1"
# TODO will not work when it is aliased
local exe="$(which gdb)"
is_exe_running_in_pane "${session}" "${window}" "${pane_idx}" "${exe}"
}
| true |
9b0d51f55bc0699467254e8e48fe368b773845bd | Shell | mustssr/RoadToDevOps | /01-installation-scripts/06-Gitlab/01-install-gitlab-bydocker.sh | UTF-8 | 717 | 2.78125 | 3 | [] | no_license | #!/bin/bash
export GITLAB_HOME=/data/gitlab-ee-data
[ -d $GITLAB_HOME/data ] || mkdir -p $GITLAB_HOME/data
[ -d $GITLAB_HOME/logs ] || mkdir -p $GITLAB_HOME/logs
[ -d $GITLAB_HOME/config ] || mkdir -p $GITLAB_HOME/config
echo "Asia/Shanghai" > /etc/timezone
git_ip=$(ip a|grep inet|grep -v 127.0.0.1|grep -v inet6 | awk '{print $2}' | tr -d "addr:" | sed -n '1p' | awk -F "/" '{print$1}')
docker run --detach \
--hostname $git_ip \
-p 443:443 -p 80:80\
--name gitlab-ee \
--restart always \
-v $GITLAB_HOME/config:/etc/gitlab \
-v $GITLAB_HOME/logs:/var/log/gitlab \
-v $GITLAB_HOME/data:/var/opt/gitlab \
-v /etc/localtime:/etc/localtime \
-v /etc/timezone:/etc/timezone \
gitlab/gitlab-ee:latest | true |
d0fa087b0c1b33fa828166ff8c28165b190e6ec2 | Shell | mycloudlab/kafka-perda-dados | /scripts-helper/verify_started_kafka | UTF-8 | 425 | 3.453125 | 3 | [] | no_license | #!/bin/bash
set -e
x=1
while [ $x -le 10 ]
do
echo "Aguardando start do kafka... $x"
if [[ $( docker-compose -f $1 logs kafka1 | tail | grep started | wc -l ) -ne 1 ]]; then
sleep 1
else
break
fi
x=$(( $x + 1 ))
done
if [[ $x -eq 6 ]]; then
echo "Kafka não iniciou, tem algum problema no ambiente que precisa ser corrigido verifique o log dos containeres."
exit 1
else
echo "kafka iniciado."
fi
| true |
360b85cc958743337ca08d25600da531474211b8 | Shell | pruizf/pasrl | /proposition_extraction/kp/maui/run_maui_on_dir.sh | UTF-8 | 492 | 3.25 | 3 | [] | no_license | #!/usr/bin/env bash
indir="$1"
outdir="$2"
mauidir="/home/pablo/projects/ie/tools/RAKEt"
modeldir="$mauidir/data/models"
jar="$mauidir/maui.jar"
#model="$modeldir/keyword_extraction_model"
model="$modeldir/kw_fao780__model"
maxkps=50
if [ $# -eq 0 ] ; then
echo "Usage: $0 indir outdir"
exit
fi
[ ! -d "$outdir" ] && mkdir -p "$outdir"
for fn in $(ls "$indir"); do
echo "$fn"
java -Xmx1024m -jar "$jar" run "$indir/$fn" -m "$model" -v none -n $maxkps > "$outdir/$fn"
done
| true |
032e6460a946c9a1df0615d872fc6c8eadfc9a67 | Shell | pkmnfrk/ages-disasm | /fixbuild.sh | UTF-8 | 1,150 | 3.9375 | 4 | [] | no_license | #!/bin/sh
# Check which build mode the makefile uses, and rearrange the build directories
# accordingly.
# Copy this to .git/hooks/post-checkout to make sure your build directories are
# managed automatically.
function to_precmp {
folder='build_'$1
v=$folder\_v
e=$folder\_e
[ -d $e ] \
&& echo "ERROR: Folder '$e' exists already, not fixing build mode" \
&& exit 1
[ ! -d $v ] && mkdir -v $v
mv -v $folder $e
mv -v $v $folder
}
function to_modifiable {
folder='build_'$1
v=$folder\_v
e=$folder\_e
[ -d $v ] \
&& echo "ERROR: Folder '$v' exists already, not fixing build mode" \
&& exit 1
[ ! -d $e ] && mkdir -v $e
mv -v $folder $v
mv -v $e $folder
}
if grep -q 'BUILD_VANILLA\s*=\s*true' Makefile; then
[ ! -f build/no_use_precompressed ] && exit 0
echo "Build mode change detected, switching to precompressed mode"
to_precmp 'ages'
to_precmp 'seasons'
else
[ ! -f build/use_precompressed ] && exit 0
echo "Build mode change detected, switching to modifiable mode"
to_modifiable 'ages'
to_modifiable 'seasons'
fi
| true |
50110be3afd217c419a15c9ee1d62921b19304ba | Shell | JustinMarcy/ClarksonUniversity | /Bash/Operations/whichyears | UTF-8 | 538 | 3.71875 | 4 | [] | no_license | #!/bin/bash
c=$(date +%Y) #current year
d=$(date -d "$1 $2 $3" +%A) #day of interest
if [[ $# != 3 ]];then
echo "Please enter month day year (example: Mar 5 1995)"
elif [[ $# == 3 ]];then
if(($3 <=$c));then
for ((i=$3;i<$c;i++));do
select=$(date -d "$1 $2 $i" +%A)
if [[ $select == $d ]];then
echo $1 $2 $i is a $d
fi
done
fi
if(($3 > $c));then
for ((x=$c;x<$3;x++));do
select=$(date -d "$1 $2 $x" +%A)
if [[ $select == $d ]];then
echo $1 $2 $x is a $d
fi
done
fi
fi
| true |
7316650f609534c1679a21fa713c9529f0b3d775 | Shell | saubermacherag/gpr-docker-publish | /entrypoint.sh | UTF-8 | 2,994 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#Publish Docker Container To GitHub Package Registry
####################################################
# exit when any command fails
set -e
#check inputs
if [[ -z "$REGISTRY_TOKEN" ]]; then
echo "You must supply the environment variable REGISTRY_TOKEN."
exit 1
fi
if [[ -z "$INPUT_IMAGE_NAME" && -z "$INPUT_DOCKERHUB_REPOSITORY" ]]; then
echo "Set either the IMAGE_NAME or a valid DOCKERHUB_REPOSITORY."
exit 1
fi
if [[ -z "$INPUT_DOCKERFILE_PATH" ]]; then
echo "Set the DOCKERFILE_PATH input."
exit 1
fi
if [[ -z "$INPUT_BUILD_CONTEXT" ]]; then
echo "Set the BUILD_CONTEXT input."
exit 1
fi
# The following environment variables will be provided by the environment automatically: GITHUB_REPOSITORY, GITHUB_SHA
if [[ -z "$INPUT_DOCKERHUB_REPOSITORY" ]]; then
DOCKER_REGISTRY=docker.pkg.github.com
BASE_NAME="${DOCKER_REGISTRY}/${GITHUB_REPOSITORY}/${INPUT_IMAGE_NAME}"
# send credentials through stdin (it is more secure)
user=$(curl -s -H "Authorization: token ${REGISTRY_TOKEN}" https://api.github.com/user | jq -r .login)
# lowercase the username
username="$(echo ${user} | tr "[:upper:]" "[:lower:]")"
else
if [ -z "$INPUT_DOCKERHUB_USERNAME" ]
then
echo "If you use Docker Hub as repository please provide your username as DOCKERHUB_USERNAME."
exit 1
fi
username="${INPUT_DOCKERHUB_USERNAME}"
BASE_NAME="${INPUT_DOCKERHUB_REPOSITORY}"
fi
echo ${REGISTRY_TOKEN} | docker login -u "${username}" --password-stdin ${DOCKER_REGISTRY}
# Set Local Variables
shortSHA=$(echo "${GITHUB_SHA}" | cut -c1-12)
SHA_NAME="${BASE_NAME}:${shortSHA}"
# Build additional tags based on the GIT Tags pointing to the current commit
ADDITIONAL_TAGS=
for git_tag in $(git tag -l --points-at HEAD)
do
echo "Processing ${git_tag}"
ADDITIONAL_TAGS="${ADDITIONAL_TAGS} -t ${BASE_NAME}:${git_tag}"
done
echo "following additional tags will be created: ${ADDITIONAL_TAGS}"
# Add Arguments For Caching
BUILDPARAMS=""
if [ "${INPUT_CACHE}" == "true" ]; then
# try to pull container if exists
if docker pull ${BASE_NAME} 2>/dev/null; then
echo "Attempting to use ${BASE_NAME} as build cache."
BUILDPARAMS=" --cache-from ${BASE_NAME}"
fi
fi
# Build The Container
if [ "${INPUT_TAG}" ]; then
CUSTOM_TAG="${BASE_NAME}:${INPUT_TAG}"
docker build $BUILDPARAMS -t ${SHA_NAME} -t ${BASE_NAME}${ADDITIONAL_TAGS} -t ${CUSTOM_TAG} -f ${INPUT_DOCKERFILE_PATH} ${INPUT_BUILD_CONTEXT}
docker push ${CUSTOM_TAG}
else
docker build $BUILDPARAMS -t ${SHA_NAME} -t ${BASE_NAME}${ADDITIONAL_TAGS} -f ${INPUT_DOCKERFILE_PATH} ${INPUT_BUILD_CONTEXT}
fi
# Push two versions, with and without the SHA
docker push ${BASE_NAME}
docker push ${SHA_NAME}
echo "::set-output name=IMAGE_SHA_NAME::${SHA_NAME}"
if [[ -z "$INPUT_DOCKERHUB_REPOSITORY" ]]; then
echo "::set-output name=IMAGE_URL::https://github.com/${GITHUB_REPOSITORY}/packages"
else
echo "::set-output name=IMAGE_URL::https://hub.docker.com/r/${INPUT_DOCKERHUB_REPOSITORY}"
fi
| true |
a6111f48dc1c2c3a8a448f2f1af4eaf0f7439d94 | Shell | RyWhal/BoxFTPShare | /boxshare.sh | UTF-8 | 1,197 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Quick an dirty FTP upload tool for box.com
# Author: Ryan Whalen
# 2014
#function for usage statement. Called later in the script
usage()
{
cat << EOF
USAGE: $0 [OPTIONS]
OPTIONS:
-h Prints this help message
-u set the username ex. ryan@gmail.com [Required]
-f filename you wish to upload
-d specify a directory to sync all files from
EOF
}
USER=""
FILE=""
DIR=""
if [[ $# -eq 0 ]];
then
usage
exit
fi
#getopt to get parameters
while getopts "hu:f:d:" OPTION; do
case $OPTION in
h)
usage
exit 1
;;
u)
USER="$OPTARG"
;;
f)
FILE="$OPTARG"
;;
d)
DIR="$OPTARG"
;;
?)
#print usage if there is an unknown variable
usage
exit
;;
esac
done
if [[ $DIR == "" ]];
then
#run the actual curl for a single file or multiple files
`curl -k \
--disable-epsv \
--ftp-skip-pasv-ip \
-u "$USER" \
--ftp-ssl \
--upload-file {"$FILE"} \
ftp://ftp.box.com/`
else
FILE=`ls -l "$DIR" | awk 'NR!=1 && !/^d/ {print $NF}'| tr '\n' ',' | sed 's/.$//'`
#run the curl for all files in a directory
`curl -k \
--disable-epsv \
--ftp-skip-pasv-ip \
-u "$USER" \
--ftp-ssl \
--upload-file {"$FILE"} \
ftp://ftp.box.com/`
fi
#finish
| true |
fe45292440543c922bd318484907c2b23d7f080e | Shell | pengxie-bioinfo/Morph_analysis | /data/preprocess.sh | UTF-8 | 1,596 | 3.359375 | 3 | [] | no_license | #!/bin/sh
source ./config.conf
swc_path=$(echo ${swc_prefix}${cell_type})
cur_path=$(pwd)
if [ ! -e "processed_swc" ]; then mkdir processed_swc; fi
for cell_name in $(ls -lSr ${swc_path}/*swc |awk -F " " '{print $9}' |awk -F "/" '{print $NF}'|awk -F "_" '{print $2}')
do
# 1. Prepare SWC file
cell_no=$(echo ${cell_name} | sed 's/^0*//g')
# echo $(find ${swc_path}/${brain_name}_${cell_name}*swc | grep -v "minification" |awk -F " " '{if(NR==1)print $1}'|awk -F "/" '{print $NF}')
input=$(echo ${cur_path}/temp_dir/${brain_name}_${cell_type}_${cell_name}.swc)
output=$(echo ${cur_path}/processed_swc/Whole/${brain_name}_${cell_type}_${cell_name}.processed.swc)
if [ ! -e ${output} ]
then
sleep 1s
echo ${input} | awk -F "/" '{print $NF}'
# Input files
cp $(find ${swc_path}/${brain_name}_${cell_name}*swc | grep -v "minification" |awk -F " " '{if(NR==1)print $1}') temp_dir/${brain_name}_${cell_type}_${cell_name}.swc
awk -F "," -v x=${cell_no} '{if($3==x)print $0}' ${soma_path} >temp_dir/${brain_name}_${cell_type}_${cell_name}.apo
# Run
while (( $(echo "$(ps -A -o %cpu | awk '{s+=$1} END {print s}') > 300" | bc -l) ))
# while (( $(echo "$(ps -ax | grep -i vaa3d64 |grep -v grep|wc -l) > 4" |bc -l) ))
do
echo $(ps -A -o %cpu | awk '{s+=$1} END {print "CPU usage:\t",s,"%"}')
sleep 1s
done
vaa3d64 -x preprocess -f preprocess -p "#i ${input} #o ${output} #l 2 #s 0 #m 70 #t 5 #r 0 #d 0 #f 0" &
fi
done
#rm ${cur_path}/processed_swc/Whole/*temp.swc
| true |
9a781c4c9b004b228bcd262dec73c88b95c82254 | Shell | ghostwang/hadooptools | /startCluster.sh | UTF-8 | 372 | 3.0625 | 3 | [] | no_license | #!/bin/sh
normallist=("HadoopMaster" "HadoopSlave01" "HadoopSlave02")
for vm in ${normallist[@]}
do
status=$(/usr/bin/VBoxManage showvminfo $vm |grep State|awk {'print $2'})
if [ $status = "running" ]
then
echo $vm is running,skip
else
echo $vm is not running, Starting
/usr/bin/VBoxManage startvm $vm --type headless
fi
done
| true |
1728af2d14c30d41f0c8b564d7615510c877da9f | Shell | janxb/heroku-zipped-app-buildpack | /bin/compile | UTF-8 | 665 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# bin/compile <build-dir> <cache-dir> <env-dir>
set -eu
BUILD_DIR=$1
CACHE_DIR=$2
ENV_DIR=$3
APP_ARCHIVE=$(cat $ENV_DIR/APP_ARCHIVE)
ARCHIVE_PATH=$CACHE_DIR/src.zip
SOURCE_PATH=$CACHE_DIR/src
rm -f $ARCHIVE_PATH
rm -rf $SOURCE_PATH
echo "-----> Downloading $APP_ARCHIVE"
wget -q $APP_ARCHIVE -O $ARCHIVE_PATH >/dev/null
echo "-----> Unpacking application archive"
unzip -q $ARCHIVE_PATH -d $SOURCE_PATH
FOLDER_COUNT=$(ls -1 $SOURCE_PATH | wc -l)
if [ $FOLDER_COUNT -eq 1 ]; then
echo " Removing first folder level"
mv $SOURCE_PATH/*/* $SOURCE_PATH/
fi
rsync -qavI $SOURCE_PATH/ $BUILD_DIR
echo "-----> Application files deployed." | true |
85b7b787a6215fbeb3d0e6ef8324c94ef681f888 | Shell | shudac/dotfiles | /.bash_profile | UTF-8 | 475 | 3 | 3 | [] | no_license | #!/usr/bin/env bash
# Simplified dotfile for video recordings
# Load dotfiles:
for file in ~/.{bash_prompt,aliases,private}; do
[ -r "$file" ] && [ -f "$file" ] && source "$file";
done;
unset file;
#Git auto-complete
if [ -f ~/.git-completion.bash ]; then
source ~/.git-completion.bash
fi
# Setting PATH for Python 3.7
# PATH="/Library/Frameworks/Python.framework/Versions/3.7/bin:${PATH}"
# export PATHif which rbenv > /dev/null; then eval "$(rbenv init -)"; fi
| true |
255391363ffabbc8c90fd579e7c4f04e6cb7de7b | Shell | mward29/hubot-stackstorm | /selfcheck.sh | UTF-8 | 7,932 | 4 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
docker="/usr/bin/sudo /usr/bin/docker"
st2="/usr/bin/st2"
failure="
===============================================
Uh oh! Something went wrong!
Please perform the steps outlined in the error message above
and then re-launch this script.
If you're still having trouble, gist the log files
and come see us in our Slack community:
\e[4mhttps://stackstorm.com/community-signup\e[0m
You can access Hubot logs with:
\e[1mdocker logs hubot\e[0m
StackStorm logs are stored in:
\e[1m/var/log/st2/\e[0m
"
success="
===============================================
\e[1mEverything seems to be fine!\e[0m
Hubot is working, StackStorm commands are loaded normally
and messages from StackStorm are getting through.
If you can't see the bot in your chat at this point,
the most probable cause is incorrect login credentials.
Check that your bot is using the right credentials to log in.
If you installed StackStorm with the All-In-One Installer,
the Hubot init script is located at:
\e[1m/etc/init.d/docker-hubot\e[0m
If you're still having trouble, gist the log files
and come see us in our Slack community:
\e[4mhttps://stackstorm.com/community-signup\e[0m
You can access Hubot logs with:
\e[1mdocker logs hubot\e[0m
StackStorm logs are stored in:
\e[1m/var/log/st2/\e[0m
"
echo
echo -e "Starting the Nine-Step Hubot Self-Check Program"
echo -e "==============================================="
echo
if [ "0" = "$($st2 action execute core.local cmd=echo 2>/dev/null | grep -c "execution get")" ]; then
echo -e "\e[31mStackStorm client couldn't connect to StackStorm.\e[0m"
echo
echo -e " Before you run the script you need to make sure"
echo -e " the StackStorm client can connect to the instance."
echo
echo -e " Authenticate with your credentials:"
echo -e " \e[1mexport ST2_AUTH_TOKEN=\`st2 auth <username> -p <password> -t\`\e[0m"
echo
echo -e " Check if you can connect to StackStorm:"
echo -e " \e[1mst2 action execute core.local cmd=echo\e[0m"
echo -e "$failure"
exit 1
fi
# Check if Hubot is installed and running
if [ "true" = "$($docker inspect --format='{{.State.Running}}' hubot 2>/dev/null)" ]; then
echo -e "Step 1: Hubot is running."
else
echo -e "\e[31mStep 1 failed: Hubot container is not running on this machine.\e[0m"
echo
echo -e " Try launching it with:"
echo
echo -e " \e[1mservice docker-hubot start\e[0m"
echo
echo -e " If there's no \"docker-hubot\" service, then"
echo -e " your StackStorm installation could be outdated."
echo -e " Try reinstalling or running the update script:"
echo
echo -e " \e[1msudo update-system\e[0m"
echo -e "$failure"
exit 1
fi
# Check if Hubot-stackstorm is installed
npm=$($docker exec -it hubot npm list 2>/dev/null | grep hubot-stackstorm | sed -r "s/.*\s(hubot.*)\\r/\1/")
if [ "0" = "$(echo "$npm" | wc -c)" ]; then
echo -e "\e[31mStep 2 failed: Hubot-stackstorm is not installed inside the container.\e[0m"
echo
echo -e " It's possible the container is outdated or corrupted."
echo -e " Try removing it and restarting the init script:"
echo
echo -e " \e[1msudo service docker-hubot stop\e[0m"
echo -e " \e[1msudo docker rmi stackstorm/hubot\e[0m"
echo -e " \e[1msudo service docker-hubot start\e[0m"
echo -e "$failure"
exit 1
else
echo -e "Step 2: Hubot-stackstorm is installed ($npm)."
fi
# Check if there are any enabled StackStorm aliases
if [ "0" = "$($st2 action-alias list -a enabled 2>/dev/null | grep -c True)" ]; then
echo -e "\e[31mStep 3 failed: StackStorm doesn't seem to have registered and enabled aliases.\e[0m"
echo
echo -e " Create one or install a sample pack with aliases."
echo -e " The \"st2\" pack would be a good example:"
echo
echo -e " \e[1mst2 action execute packs.install packs=st2\e[0m"
echo -e "$failure"
exit 1
else
echo -e "Step 3: StackStorm has aliases that are registered and enabled."
fi
# Check that chatops.notify rule is present
if [ "0" = "$($st2 rule list 2>/dev/null | grep -c chatops.notify)" ]; then
echo -e "\e[31mStep 4 failed: Chatops.notify rule is not present.\e[0m"
echo
echo -e " ChatOps pack may not be installed or the rule may not be registered."
echo -e " Try to restart StackStorm first:"
echo
echo -e " \e[1mst2ctl restart\e[0m"
echo
echo -e " Then register the rule with:"
echo
echo -e " \e[1mst2ctl reload --register-all\e[0m"
echo -e "$failure"
exit 1
else
echo -e "Step 4: Chatops.notify rule is present."
fi
# Check that chatops.notify rule is enabled
if [ "0" = "$($st2 rule list 2>/dev/null | grep chatops.notify | grep -c True)" ]; then
echo -e "\e[31mStep 5 failed: Chatops.notify rule is present but disabled.\e[0m"
echo
echo -e " Enable it with the following command:"
echo
echo -e " \e[1mst2 rule enable chatops.notify\e[0m"
echo -e "$failure"
exit 1
else
echo -e "Step 5: Chatops.notify rule is enabled."
fi
hubotlog=$({ echo -n; sleep 5; echo 'hubot help'; echo; sleep 2; } | $docker exec -i hubot bash -c "export HUBOT_ADAPTER=shell; export EXPRESS_PORT=31337; bin/hubot"; 2>/dev/null)
# Check that Hubot responds to help
if [ "0" = "$(echo "$hubotlog" | grep -c "help - Displays")" ]; then
echo -e "\e[31mStep 6 failed: Hubot doesn't respond to the \"help\" command.\e[0m"
echo
echo -e " Try reinstalling the container. This error shouldn't happen"
echo -e " unless the Hubot installation wasn't successful."
echo
echo -e " \e[1msudo service docker-hubot stop\e[0m"
echo -e " \e[1msudo docker rmi stackstorm/hubot\e[0m"
echo -e " \e[1msudo service docker-hubot start\e[0m"
echo -e "$failure"
exit 1
else
echo -e "Step 6: Hubot responds to the \"help\" command."
fi
# Check that hubot-stackstorm at least tried to load commands.
if [ "0" = "$(echo "$hubotlog" | grep -c "commands are loaded")" ]; then
echo -e "\e[31mStep 7 failed: Hubot doesn't try to load commands from StackStorm.\e[0m"
echo
echo -e " Try reinstalling the container and checking credentials."
echo -e " This error means the \"hubot-stackstorm\" plugin couldn't"
echo -e " load, connect to StackStorm or authenticate."
echo
echo -e " \e[1msudo service docker-hubot stop\e[0m"
echo -e " \e[1msudo docker rmi stackstorm/hubot\e[0m"
echo -e " \e[1msudo service docker-hubot start\e[0m"
echo -e "$failure"
exit 1
else
echo -e "Step 7: Hubot loads commands from StackStorm."
fi
channel=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)
execution=$($($st2 action execute chatops.post_message channel="$channel" message="Debug. If you see this you're incredibly lucky but please ignore." 2>/dev/null | grep "execution get") 2>/dev/null)
hubotlogs=$($docker logs hubot | grep "$channel")
# Check that post_message is executed successfully.
if [ "0" = "$(echo "$execution" | grep -c "succeeded")" ]; then
echo -e "\e[31mStep 8 failed: chatops.post_message doesn't work.\e[0m"
echo
echo -e " Something is wrong with your StackStorm instance,"
echo -e " because \"chatops.post_message\" couldn't finish."
echo
echo -e " Check StackStorm logs for more information."
echo -e "$failure"
exit 1
else
echo -e "Step 8: chatops.post_message execution succeeded."
fi
# Check that post_message is getting through.
if [ "0" = "$(echo "$hubotlogs" | wc -l)" ]; then
echo -e "\e[31mStep 9 failed: chatops.post_message hasn't been received.\e[0m"
echo
echo -e " Try to check both Hubot and StackStorm logs for more information."
echo -e "$failure"
exit 1
else
echo -e "Step 9: chatops.post_message has been received."
fi
echo -e "$success"
exit 0
| true |
00edb8f2e447099fe1bec24cac6dbf45f0de599d | Shell | GoogleCloudPlatform/selkies-examples | /code-server/images/code-server-cloudshell/config/entrypoint.sh | UTF-8 | 1,276 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "INFO: Waiting for docker sidecar"
CERTFILE="/var/run/docker-certs/cert.pem"
until [[ -f ${CERTFILE} ]]; do sleep 1; done
echo "INFO: Docker sidecar is ready, starting unix socket proxy"
sudo /usr/share/code-server/start-docker-unix-proxy.sh
if [[ "${USE_XPRA:-'false'}" == 'true' ]]; then
# Mimic healthz endpoint
sudo touch /usr/share/xpra/www/healthz
# Send xpra logs to stdout
tail -F ${HOME}/.xpra.log &
# Start xpra in forground
while true; do
/usr/share/code-server/start-xpra.sh --daemon=no
sleep 1
done
killall tail >/dev/null 2>&1
else
echo "INFO: Starting code-server"
exec /usr/local/bin/code-server --auth=none --bind-addr=0.0.0.0:3180
fi
| true |
f993dbb843bfbd9c3746fc5343e5617f0b30dfea | Shell | ProfessionalFarmer/lake | /mountain/getRefBaseAccording2ChrPos.sh | UTF-8 | 876 | 3.09375 | 3 | [] | no_license | #! /bin/bash
# Create on: 20160519
# accept pipestream: chr pos
# then print chr pos ref
smts="/data/SG/Env/software_installed/samtools-1.2/samtools"
fa="/data/SG/Env/reference_data/ucsc.hg19.fasta"
# split("str",arr,"-") 用-将str分割,放在arr中
if [ -z $1 ];then
while read line;
do
echo "$line" | sed '#^[chr|Chr]##g' | awk '{print "chr"$1":"$2"-"$2}' | xargs -L 1 $smts faidx $fa | sed 's#^>##g' | awk -F '-' '{ \
if(NR%2==1) {split($1,arr,":");printf("%s\t%s\t",arr[1],arr[2])} \
else {print $1} \
}'
done
else
cat $1 sed '#^[chr|Chr]##g' | awk '{print "chr"$1":"$2"-"$2}' | xargs -L 1 $smts faidx $fa | sed 's#^>##g' | awk -F '-' '{ \
if(NR%2==1) {split($1,arr,":");printf("%s\t%s\t",arr[1],arr[2])} \
else {print $1} \
}'
fi
| true |
dc8ee0922f1217b0d3aceef76fd9b16289351299 | Shell | Miyurz/scripts | /Generic_scripts/occurrence.sh | UTF-8 | 311 | 3.625 | 4 | [] | no_license | #!/bin/bash
echo "Number to be searched $2 "
echo "File name passed : $1"
filename=$1
count=0
while read line
do
for word in $line; do
#echo "Number = $word"
if [ "$2" == "$word" ]; then
count=$(expr $count + 1)
fi
done
done < $filename
echo count = $count
| true |
7a7a1137c0c260efdedc1187bcf674d146156511 | Shell | jrouly/hap-with-map | /aws/hierarchicalKMeans.sh | UTF-8 | 2,082 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | rm -r dump
mkdir dump
mkdir dump/level1
mkdir dump/level2
mkdir dump/level3
WORKTAR=$1
FILENAME=$(echo $WORKTAR | sed 's/.*\///g')
FOLDER=$(echo $FILENAME | sed 's/\..*//g')
wget $WORKTAR
tar xvf $FILENAME
hadoop fs -mkdir /user/hadoop/
hadoop fs -put $FOLDER .
# LEVEL 1
mahout canopy -i $FOLDER/vectors -o $FOLDER/canopy -t1 20 -t2 10 -xm mapreduce -ow -cl
mahout kmeans -i $FOLDER/vectors -o $FOLDER/level1 -c $FOLDER/canopy/clusters-0-final -x 5 -xm mapreduce -ow -cl
mahout clusterdump -s $FOLDER/level1/clusters-*-final/ -p $FOLDER/canopy/clusteredPoints -o dump/level1/root
# LEVEL 2
hadoop fs -mkdir $FOLDER/level2
mahout clusterpp -i $FOLDER/level1 -o $FOLDER/level2/data -xm sequential
rm -r data
hadoop fs -get $FOLDER/level2/data .
for x in `ls data | grep -v SUCCESS`; do
echo
mahout canopy -i $FOLDER/level2/data/$x -o $FOLDER/level2/canopy/$x -t1 10 -t2 5 -xm mapreduce -ow -cl
mahout kmeans -i $FOLDER/level2/data/$x -o $FOLDER/level2/$x -c $FOLDER/level2/canopy/$x/clusters-0-final -x 1 -xm mapreduce -ow -cl
done
rm -r data
# LEVEL 3
hadoop fs -mkdir $FOLDER/level3
rm -r level2
hadoop fs -get $FOLDER/level2 .
for x in `ls level2/ | grep -v data | grep -v canopy`; do
echo
mahout clusterdump -s $FOLDER/level2/$x/clusters-*-final/ -p $FOLDER/level2/canopy/$x/clusteredPoints -o dump/level2/$x
mahout clusterpp -i $FOLDER/level2/$x -o $FOLDER/level3/data/$x -xm sequential
done
rm -r level2
rm -r data
hadoop fs -get $FOLDER/level3/data .
for x in `ls data`; do
echo
for y in `ls data/$x | grep -v SUCCESS`; do
mahout canopy -i $FOLDER/level3/data/$x/$y -o $FOLDER/level3/canopy/$x-$y -t1 5 -t2 2 -xm mapreduce -ow -cl
mahout kmeans -i $FOLDER/level3/data/$x/$y -o $FOLDER/level3/$x-$y -c $FOLDER/level3/canopy/$x-$y/clusters-0-final -x 1 -xm mapreduce -ow -cl;
done
done
rm -r data
rm -r level3
hadoop fs -get $FOLDER/level3/ .
for x in `ls level3/ | grep -v data | grep -v canopy`; do
mahout clusterdump -s $FOLDER/level3/$x/clusters-*-final/ -p $FOLDER/level3/canopy/$x/clusteredPoints -o dump/level3/$x
done
rm -r level3
| true |
92b40a08602f6e715fa28c655de8bab7f30a0799 | Shell | zuevval/source | /commands/bioinf_spring2022project/upload_fastq.sh | UTF-8 | 330 | 2.734375 | 3 | [] | no_license | # upload FASTQ files (processed by TrimmoMatic) from PowerHouse to cluster
scc-scp-lus(){ # securely copy file to cluster
scp -i ~/.ssh/zuev_id_ecdsa $1 vzuev@login1.hpc.spbstu.ru:/home/nilmbb/vzuev/LusGen/
}
for i in 55 56 57 58 59 60; do
scc-scp-lus "SRR81777${i}_1_paired.fastq.gz"
scc-scp-lus "SRR81777${i}_2_paired.fastq.gz"
done
| true |
1c3dd20242d1e391389b519580472a3a42707108 | Shell | fagan2888/DBank | /TSG_features_classification/execute_API_classifier.sh | UTF-8 | 1,030 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# parameters for cross-validation
NFOLDS=10
NITER=1
FEATURES_PATH=./
RESULTS_PATH=.classification_results/
KFOLDS=('Stratified')
CLASSIFIERS=('RF' 'DT' 'NB' 'LR' 'KNN' 'SVM' 'ADA' 'GBDT' 'NN_MLP' 'KerasNN_MLP')
DATASETS=(
# "good_banker_ety"
"good_banker_API"
)
# 0 means only using given feature, 1 means using given feature and SR feature
# input "_ety" feature and using 1 can produce single SR feature
SR_METHOD=('1')
echo "Creating directory $RESULTS_PATH..."
mkdir -p $RESULTS_PATH
echo "Removing files from $RESULTS_PATH..."
# prompt before cleaning results folder
rm -rI $RESULTS_PATH/*.csv
echo "Running the classifiers"
# Running the classifiers
python3 API_classifier.py --nfolds $NFOLDS --niterations $NITER --features "$FEATURES_PATH" --sr_method ${SR_METHOD[@]} --results $RESULTS_PATH --classifiers ${CLASSIFIERS[@]} --datasets ${DATASETS[@]} --kfolds ${KFOLDS[@]} --debug --save-fi
# echo "Removing incomplete results"
rm $RESULTS_PATH/*incomplete*.csv
echo "Script complete."
| true |
2a727f102ffa03a1d2d8356b872c7da9b8f79a44 | Shell | ojalaquellueva/ncbi | /includes/functions.sh | UTF-8 | 1,747 | 4.15625 | 4 | [] | no_license | #!/bin/bash
#################################################################
# General purpose shell functions
# Author: Brad Boyle (bboyle@email.arizona.edu)
# Date created: 27 June 2016
#################################################################
checkemail()
{
# Simple email validation function
# Returns 0 if valid, 2 is missing, 1 if bad
if [ -z "$1" ]; then
# No email supplied
#echo "No email supplied"
return 2
else
email=$1
if [[ "$email" == ?*@?*.?* ]] ; then
#echo $email": Valid email"
return 0
else
#echo $email": Bad email"
return 1
fi
fi
}
echoi()
{
# Echos message only if first token=true, otherwise does nothing
# If optionally pass most recent exit code, will abort if error
# Provides a compact alternative to wrapping echo in if...fi
# Options:
# -n Standard echo -n switch, turns off newline
# -e Exit status of last operation. If used, -e MUST be
# followed by $? or $? saved as variable.
# Gotcha: may behave unexpectedly if message = "true" or true
# first token MUST be 'true' to continue
if [ "$1" = true ]; then
shift
msg=""
n=" "
while [ "$1" != "" ]; do
# Get remaining options, treating final
# token as message
case $1 in
-n ) n=" -n "
shift
;;
-e ) shift
rc=$1
#echo "rc="$rc
if [[ $rc != 0 ]]; then
kill -s TERM $TOP_PID
fi
shift
;;
* ) msg=$1
break
;;
esac
done
echo $n $msg
fi
}
etime()
{
# Returns elapsed time in seconds
# Accepts: $prev, previous time
# Returns: difference between now and $prev
now=`date +%s%N`
prev=$1
elapsed=`echo "scale=2; ($now - $prev) / 1000000000" | bc`
echo $elapsed
}
| true |
e4b8a65cf005af58af86764f3c774d73c535b631 | Shell | scribblemaniac/_ebDev | /scripts/imgAndVideo/nconvert2imgNN.sh | UTF-8 | 624 | 3.296875 | 3 | [] | no_license | # UNTESTED: imgFileNoExt val.
# If I ever use it (I thought I might, but maybe not), I'll fix it up if necessary and remove this first comment.
# USAGE
# Invoke this script with three parameters, being:
# $1 input file
# $2 output format
# $3 px wide to resize to by nearest neighbor method, maintaining aspect
# template command; resizes to x800 px maintaining aspect ratio:
# nconvert -ratio -rtype quick -resize 800 -ratio -out jpeg -o outPutFileName.jpg inputFile.png
imgFileNoExt=`echo $1 | gsed 's/\(.*\)\..\{1,4\}/\1/g'`
nconvert -ratio -rtype quick -resize $3 -ratio -out $2 -o outPutFileName.jpg $imgFileNoExt.$2 | true |
203d9519128d246f2a0b0852818c6dfb6e55280b | Shell | welshstew/ocpstuff | /satellite/scripts/prep.sh | UTF-8 | 2,689 | 2.640625 | 3 | [] | no_license | export ANSIBLE_HOST_KEY_CHECKING=False
curl http://satellite.home.nicknach.net/pub/hosts_libvirt > hosts
curl http://satellite.home.nicknach.net/pub/prep.sh > prep.sh && chmod +x prep.sh
./prep.sh
mv hosts /etc/ansible -f
ansible "*" -m script -a "./prep.sh"
curl https://raw.githubusercontent.com/openshift/openshift-ansible/master/roles/openshift_storage_glusterfs/files/glusterfs-template.yml > /usr/share/ansible/openshift-ansible/roles/openshift_storage_glusterfs/files/glusterfs-template.yml
ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml
[root@satellite pub]# cat prep.sh
export ANSIBLE_HOST_KEY_CHECKING=False && echo ANSIBLE_HOST_KEY_CHECKING=False >> /etc/environment
curl http://satellite.home.nicknach.net/pub/bootstrap.py > bootstrap.py && chmod +x bootstrap.py && ./bootstrap.py -l admin -o nicknach -a ocp-node -s satellite.home.nicknach.net -L home -g ocp-nodes -p welcome1 --skip-puppet --force
yum clean all
## install sub manager
yum install -d1 -y -q subscription-manager yum-utils wget
curl http://satellite.home.nicknach.net/pub/satellite.home.nicknach.net.crt > satellite.home.nicknach.net.crt && mv -f satellite.home.nicknach.net.crt /etc/pki/ca-trust/source/anchors && restorecon /etc/pki/ca-trust/source/anchors/satellite.home.nicknach.net.crt && update-ca-trust
subscription-manager repos --disable nicknach_nvidia_cuda
subscription-manager repos --disable nicknach_epel_epel
## grab your LDAP server's cert
#curl http://satellite.home.nicknach.net/pub/my-ldap-ca-bundle.crt > ~/my-ldap-ca-bundle.crt
## install some general pre-req packages
yum install -d1 -y yum-utils wget git net-tools bind-utils iptables-services bridge-utils bash-completion nfs-utils dstat mlocate screen
## install openshift client package (oc)
yum install -d1 -y atomic-openshift-clients
yum install -d1 -y openshift-ansible-playbooks
#sed -i 's/#log_path/log_path/' /etc/ansible/ansible.cfg
## install container runtime
yum install -d1 -y docker
#yum install -d1 -y crio cri-tools podman skopeo
## set the repo in runtime config (disconnected only)
sed -i "s/registry.access.redhat.com'/registry.access.redhat.com\', \'satellite.home.nicknach.net:8888\'/" /etc/containers/registries.conf
## enable container runtime
systemctl enable docker --now
#systemctl enable crio --now
## wipe the gluster disk
#wipefs --all /dev/sdb -f
## install gluster packages
yum install -d1 -y cns-deploy heketi-client
## make sure your nodes are up-to-date
yum -d1 -y update
mkdir -p /etc/origin/master
curl http://satellite.home.nicknach.net/pub/my-ldap-ca-bundle.crt > /etc/origin/master/my-ldap-ca-bundle.crt
sed -i 's/#log_path/log_path/' /etc/ansible/ansible.cfg
| true |
04f827f27da955e57b76eb200b8e278c4cd5875e | Shell | JinsYin/ops | /shell/lantern/uninstall-lantern.sh | UTF-8 | 442 | 3.515625 | 4 | [] | no_license | #!/bin/bash
# Author: JinsYin <github.com/jinsyin>
set -e
fn::check_permission()
{
if [ $(id -u) -ne 0 ]; then
echo "You must run as root user or through the sudo command."
exit 1
fi
}
fn::command_exists()
{
command -v "$@" > /dev/null 2>&1
}
fn::ubuntu::unstall_lantern()
{
if fn::command_exists lantern; then
apt-get purge -y lantern
fi
}
main()
{
fn::check_permission
fn::ubuntu::unstall_lantern
}
main $@
| true |
6a4520ac17f64576f770aacb9eafefd128b87c62 | Shell | johnjdailey/snowflake-sqlalchemy | /scripts/build_pyarrow_linux.sh | UTF-8 | 2,323 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
#
# Build Snowflake Connector for Python in our manylinux docker image
#
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CONNECTOR_DIR="$( dirname "${THIS_DIR}")"
source $THIS_DIR/build_init.sh
function build_connector_with_python() {
PYTHON=$1
source "/home/user/venv-build-${PYTHON}/bin/activate"
log INFO "Creating a wheel: snowflake_connector using $PYTHON $U_WIDTH"
cd $CONNECTOR_DIR
rm -rf build/
export ENABLE_EXT_MODULES=true
rm -f generated_version.py
flake8
python setup.py bdist_wheel -d $CONNECTOR_DIR/dist/docker/$PYTHON/
unset ENABLE_EXT_MODULES
# audit wheel files
mkdir -p $CONNECTOR_DIR/dist/docker/repaired_wheels
auditwheel repair --plat manylinux2010_x86_64 -L connector $CONNECTOR_DIR/dist/docker/$PYTHON/*.whl -w $CONNECTOR_DIR/dist/docker/repaired_wheels
rm $CONNECTOR_DIR/dist/docker/repaired_wheels/*manylinux1_x86_64.whl || true
deactivate
}
if [[ -n "$CLEAN" ]]; then
log WARN "Deleting artifacts for Python Connector in $CONNECTOR_DIR/build, $CONNECTOR_DIR/dist"
rm -rf $CONNECTOR_DIR/build || true
rm -rf $CONNECTOR_DIR/dist/snowflake{_,-}connector* || true
fi
generate_version_file "$RELEASE_PACKAGE"
cd $CONNECTOR_DIR
rm -rf dist/docker/
PYTHON_VERSION=$1
# if no arguments provided to this script, by default we will build using all versions
# of python connector
if [[ -z $PYTHON_VERSION ]]; then
PYTHON_VERSIONS=("3.5" "3.6" "3.7" "3.8")
else
PYTHON_VERSIONS=($PYTHON_VERSION)
fi
for PYTHON_TUPLE in ${PYTHON_VERSIONS[@]}; do
build_connector_with_python "$PYTHON_TUPLE"
source /home/user/multibuild/manylinux_utils.sh
generate_reqs_file /opt/python/cp35-cp35m/bin/virtualenv "$(cpython_path $PYTHON 16)/bin/python" "$(latest_wheel $CONNECTOR_DIR/dist/docker/$PYTHON/*.whl)" "/home/user/py${PYTHON_TUPLE}_tmp_env"
# Move .reqs files into new directories as per SNOW-122208
full_python_version="$($(cpython_path $PYTHON 16)/bin/python --version | cut -d' ' -f2-)"
mkdir "${CONNECTOR_DIR}/dist/docker/${full_python_version}"
log INFO "Going to move reqs file to $CONNECTOR_DIR/dist/docker/$full_python_version for full version number"
mv ${CONNECTOR_DIR}/dist/docker/${PYTHON}/*.reqs "${CONNECTOR_DIR}/dist/docker/${full_python_version}/"
done
| true |
ca4b9dfefa21a04df3b38f95f478459c19a699e0 | Shell | mrtos/ambarella | /prebuild/third-party/armv7-a-hf/pppd/usr/bin/test_3g | UTF-8 | 3,894 | 3.75 | 4 | [] | no_license | #!/bin/sh
linux_version=$(uname -r)
if [ $# = 0 ]
then
help="yes"
fi
m_type="3gnet"
ARGS='getopt m:t:h "$@"'
eval set -- "${ARGS}"
while [ -n "$1" ]
do
case "$1" in
-m)
m_module="$2"
help="no"
;;
-t)
m_type="$2"
;;
-h)
help="yes"
;;
--)
shift
break
;;
esac
shift
done
power_up_sim5320() {
if [ ! -e /sys/class/gpio/gpio96/value ]
then
echo 96 > /sys/class/gpio/export
fi
echo high > /sys/class/gpio/gpio96/direction
}
power_down_sim5320() {
echo low > /sys/class/gpio/gpio96/direction
}
power_up_u130(){
rmmod usbserial
sleep 2
modprobe usbserial vendor=0x1546 product=0x1101
}
power_up() {
echo Power up $1 module,Please wait ...
if [ "$1" = "sim5320" ]
then
power_up_sim5320
fi
if [ "$1" = "u130" ]
then
power_up_u130
fi
}
power_down() {
echo Power down $1 module
if [ "$1" = "sim5320" ]
then
power_down_sim5320
fi
if [ "$1" = "mu739" ]
then
echo Please reboot mu739 power by reset button
sleep 30
fi
}
start_pppd() {
echo start PPPD server on $2 module $1 protocol
## test dependency
if [ ! -e /usr/sbin/pppd ]
then
echo "Error: You need to compile pppd into firmware"
return
fi
if [ ! -e /lib/modules/${linux_version}/kernel/drivers/usb/serial/usb_wwan.ko ]
then
echo "Error: You need to compile usb_wwan.ko into firmware"
return
fi
if [ ! -e /lib/modules/${linux_version}/kernel/drivers/usb/serial/option.ko ]
then
echo "Error: You need to compile option.ko into firmware"
return
fi
if [ -e /etc/ppp/resolv.conf ]
then
rm /etc/ppp/resolv.conf
fi
if [ -e /proc/ambarella/uport ]
then
echo host > /proc/ambarella/uport
fi
echo shutdown eth0
ifconfig eth0 down
needpowerdown=0
pppd_server="pppd call $2/$1 &"
while true
do
if [ -e /var/log/pppd.log ]
then
rm /var/log/pppd.log
fi
power_up $2
sleep 10
if [ -e /etc/ppp/resolv.conf ]
then
rm /etc/ppp/resolv.conf
fi
echo run pppd_server
eval $pppd_server
waitsec=60
while [ $waitsec -gt 0 ]
do
echo wait $((waitsec--)) seconds
sleep 1
if [ -e /etc/ppp/resolv.conf ]
then
echo found resolv.conf
break
fi
logout=`grep -o "Connection terminated" /var/log/pppd.log | wc -l`
if [ $logout -ge 1 ]
then
echo restart
break
fi
done
if [ -e /etc/ppp/resolv.conf ]
then
echo copy dns file
cp /etc/ppp/resolv.conf /etc/
echo pppd succeed !!!
while true
do
pppd_num=$(ps | grep -c pppd )
#echo pppd_num is $pppd_num
if [ $pppd_num -lt 2 ]
then
break
fi
#check link every 10 seconds
sleep 10
done
else
echo pppd failed,restart...
ps | grep pppd | awk 'NR==1{print $1}' | xargs kill -9
ps | grep chat | awk 'NR==1{print $1}' | xargs kill -9
sleep 5
#sim5320 must connect twice
if [ $needpowerdown -gt 1 ]
then
power_down $2
fi
$((needpowerdown++))
continue
fi
done
}
if [ "${help}" == "yes" ]
then
echo 'test_3g'
echo ' -m: module. like sim5320, mu739,u130'
echo ' -t: type,3gnet or 3gwap'
echo ' -h: help'
else
start_pppd ${m_type} ${m_module}
fi
| true |
7a5f8a8e7ff3642a08f14eced98b7573379fbbc1 | Shell | frankywen/genieacs | /docker/gui/init.sh | UTF-8 | 568 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
for f in `ls /app/genieacs-gui/config/*-sample.yml`
do
echo "mv samply.yml to .yml"
mv "$f" "${f/-sample.yml/.yml}"
done
if [ -f /app/genieacs-gui/config/graphs-sample.json.erb ]; then
echo "mv graphs-sample.json.erb to graphs.json.erb"
mv /app/genieacs-gui/config/graphs-sample.json.erb /app/genieacs-gui/config/graphs.json.erb
fi
echo "set RAILS_ENV production bundle"
cd /app/genieacs-gui && RAILS_ENV=production bundle && RAILS_ENV=production bundle exec rake assets:precompile
echo "run bundle install"
cd /app/genieacs-gui && bundle install | true |
6d1f4ace304eb9c7cdc38028c1dfcf717e79a829 | Shell | ershov-ilya/cron-git-scripts | /pu.sh | UTF-8 | 1,183 | 3.390625 | 3 | [] | no_license | identity=$HOME"/.ssh/nopass_id"
a=$(pwd)
b=${a%/*} # вверх по директории
c=$b/hashes
echo $c
#git config user.name "ILYA ERSHOV"
#git config user.email ershov.ilya@gmail.com
whoami
#git config user.name
#git config user.email
sshagentstatus=$(ps -e | grep [s]sh-agent)
echo $sshagentstatus
#if [ -n "$sshagentstatus" ]; then
# echo "sshagentstatus is not empty"
#fi
#
#if [ -z "$sshagentstatus" ]; then
# echo "sshagentstatus is empty"
#fi
if [ -z "$sshagentstatus" ]; then
echo "ssh-agent is OFF, starting again..."
ssh-agent /bin/bash
fi
ssh-add $identity
rc=$?
if [[ $rc != 0 ]]; then
echo -e "\e[31merror\e[39m"
exit $rc
else
echo -e "\e[32mOK\e[39m"
fi
commitname=$(php write-hashes.php)
echo "New commit name: "$commitname
cd $c
ls
git add -A
echo "add result: "
rc=$?
if [[ $rc != 0 ]]; then
echo -e "\e[31merror\e[39m"
exit $rc
else
echo -e "\e[32mOK\e[39m"
fi
git commit -m "$commitname"
echo "commit result: "
rc=$?
if [[ $rc != 0 ]]; then
echo -e "\e[31merror\e[39m"
exit $rc
else
echo -e "\e[32mOK\e[39m"
fi
git push
echo "push result: "
rc=$?
if [[ $rc != 0 ]]; then
echo -e "\e[31merror\e[39m"
exit $rc
else
echo -e "\e[32mOK\e[39m"
fi
| true |
030963614b45f65420d748299c84271ea9ab9431 | Shell | karpoke/scripts | /lynis.sh | UTF-8 | 398 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
[ -z "$1" ] && echo "Usage: $(basename "$0") rcpto@example.com" && exit 1
[ "$(id -u)" -ne 0 ] && echo "ERROR: You must be root user to run this program" && exit 3
OUTFILE=$(mktemp)
/usr/bin/nice -n 0 /usr/sbin/lynis --cronjob --auditor "$(hostname -f)" > "$OUTFILE"
[ ! -s "$OUTFILE" ] && exit 2
SUBJECT="[lynis] $(hostname -f) report"
mail -s "$SUBJECT" "$1" < "$OUTFILE"
rm -f "$OUTFILE"
| true |
73ef2fbf6faa92e54fe7e3972db50ee57f3ee4d0 | Shell | vurrut/vurrutROM | /customize/tweaks/S02saver | UTF-8 | 795 | 2.65625 | 3 | [] | no_license | #!/system/bin/sh
#vurrutROM tweaks
#cpu
echo "120000" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq
echo "1000000" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq
echo "conservative" > /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
#scheduler cfq
for i in $(ls -1 /sys/block/stl*) $(ls -1 /sys/block/mmc*)
do
echo "cfq" > $i/queue/scheduler
done
fi;
#Conservative settings for saver mode.
echo "95" > /sys/devices/system/cpu/cpufreq/conservative/up_threshold
echo "120000" > /sys/devices/system/cpu/cpufreq/conservative/sampling_rate
echo "1" > /sys/devices/system/cpu/cpufreq/conservative/sampling_down_factor
echo "40" > /sys/devices/system/cpu/cpufreq/conservative/down_threshold
echo "10" > /sys/devices/system/cpu/cpufreq/conservative/freq_step
| true |
f7629827b65f11035ba86e16e0c896e42867eea0 | Shell | nptcl/npt | /develop/check/test_amalgamation.sh | UTF-8 | 690 | 3.609375 | 4 | [
"Unlicense"
] | permissive | #!/bin/sh
checkerr()
{
if [ $? -ne 0 ]; then
echo "$@"
exit 1
fi
}
type="$1"
[ -n "${type}" ]
checkerr "type error"
case "$2" in
freebsd) name="freebsd";;
linux) name="linux";;
*) name="";;
esac
cd $(dirname $0)
checkerr "cd error"
release="$(/bin/ls ../npt-*.*.*.tar.gz 2> /dev/null)"
[ -r "${release}" ]
checkerr "release file error: ${release}"
./make/clean.sh
checkerr "clean.sh error"
cp -n ${release} make/.
checkerr "cp error: ${release}"
[ -n "${name}" ]
checkerr "argument error"
./make/amalgamation.sh ${type} ${name} 2>&1
checkerr "amalgamation.sh error"
./make/check.sh amalgamation
checkerr "check.sh error"
./make/clean.sh
checkerr "clean.sh error"
exit 0
| true |
56d8d8ae79f7da2e534a6404da0ed43305a140ba | Shell | SerenityOS/serenity | /Ports/jakt/package.sh | UTF-8 | 1,376 | 3.109375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env -S bash ../.port_include.sh
port='jakt'
version='git'
useconfigure='true'
depends=(
'llvm'
)
commit_hash='063e9767ff80db1a1cfe1a805cc8b7e2e577d9f3'
archive_hash='0cb858291d0426e80c8378d7d5876a2a8de747467a289bb691782316c79a2f59'
files=(
"https://github.com/SerenityOS/jakt/archive/${commit_hash}.tar.gz ${archive_hash}"
)
workdir="jakt-${commit_hash}"
configure() {
host_env
install_path="$(realpath "${workdir}/jakt-install")"
run cmake \
-GNinja \
-B build-host \
-S . \
-DCMAKE_BUILD_TYPE=Release \
-DSERENITY_SOURCE_DIR="${SERENITY_SOURCE_DIR}" \
-DCMAKE_INSTALL_PREFIX="${install_path}"
target_env
# FIXME: CMAKE_INSTALL_PREFIX should be correctly set by the cmake toolchain file,
# but CMakeToolchain.txt sets it to the host path /usr/local.
run cmake \
-GNinja \
-B build \
-S . \
-DCMAKE_TOOLCHAIN_FILE="${SERENITY_BUILD_DIR}/CMakeToolchain.txt" \
-DSERENITY_SOURCE_DIR="${SERENITY_SOURCE_DIR}" \
-DCMAKE_BUILD_TYPE=Release \
-DJAKT_BOOTSTRAP_COMPILER="${install_path}/bin/jakt" \
-DCMAKE_INSTALL_PREFIX="${SERENITY_INSTALL_ROOT}/usr/local"
}
build() {
run cmake --build build-host
run cmake --install build-host
run cmake --build build
}
install() {
run cmake --install build
}
| true |
cc4d2dccaeed408aedd0aed53fb95ed4b10fea60 | Shell | lawrennd/talks | /substituteSlideDiagram.sh | UTF-8 | 745 | 3.203125 | 3 | [] | no_license | #!/bin/bash
for dir in _*
do
for file in `find $dir -maxdepth 1 -name '*.md'`
do
if grep -q "diagramsDir/" $file
then
sed -i-sett -e 's+\.\./\\diagramsDir/+\\writeDiagramsDir/+g' $file
sed -i-sett -e 's+'"'"'\\diagramsDir/+'"'"'\\writeDiagramsDir/+g' $file
sed -i-sett -e 's+"\\diagramsDir/+"\\writeDiagramsDir/+g' $file
fi
done
if [ -d $dir/includes ]
then
for file in `find $dir/includes -maxdepth 1 -name '*.md'`
do
if grep -q "diagramsDir/" $file
then
sed -i-sett -e 's+\.\./\\diagramsDir/+\\writeDiagramsDir/+g' $file
sed -i-sett -e 's+'"'"'\\diagramsDir/+'"'"'\\writeDiagramsDir/+g' $file
sed -i-sett -e 's+"\\diagramsDir/+"\\writeDiagramsDir/+g' $file
fi
done
fi
done
| true |
be4612844f540bd575904c80354f64177826bc47 | Shell | COMSYS/cppl | /examples/test_cases/bin/run_test.sh | UTF-8 | 2,873 | 3.828125 | 4 | [
"MIT",
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] | permissive | #!/bin/bash
CPPL_COMPRESS=$1
CPPL_EVALUATE=$2
if [ ! -x $CPPL_COMPRESS ]
then
echo "Cannot find or execute cppl_compress -> Aborting."
exit 1
fi
if [ ! -x $CPPL_EVALUATE ]
then
echo "Cannot find or execute cppl_evaluate -> Aborting."
exit 1
fi
#clean files generated by last test
if [ -d generated_test_cases ]
then
rm -rf generated_test_cases
fi
if [ -d test_result ]
then
rm -rf test_results
fi
if [ -e policy_compressed.ccppl ]
then
rm policy_compressed.ccppl
fi
#test cases preparation
python ./bin/test_cases_generator.py
mkdir test_results
#generate compressed policy
env LD_LIBRARY_PATH=$LD_LIBRARY_PATH:../../policy-decision-point\
$CPPL_COMPRESS --input-file policy_textual.cppl --policy-definition-file policy_definition.json -o policy_compressed.ccppl
#do tests
TEST_CASE_ID=0
FAILED_TESTS=0
while [ -e generated_test_cases/policy_nodeparams_$TEST_CASE_ID.json ]
do
OUT_PUT_FILE=test_results/test_result_$TEST_CASE_ID
echo $OUT_PUT_FILE:
#output textual policy file
echo -e "--------------------------------\npolicy:" > $OUT_PUT_FILE
cat policy_textual.cppl >> $OUT_PUT_FILE
#output node params of current test case
echo -e "\n------------------------------\npolicy_nodeparams_$TEST_CASE_ID.json:\n" >> $OUT_PUT_FILE
cat generated_test_cases/policy_nodeparams_$TEST_CASE_ID.json >> $OUT_PUT_FILE
#output node runtime params of current test case
echo -e "\n------------------------------\npolicy_noderuntimeparams_$TEST_CASE_ID.json:\n" >> $OUT_PUT_FILE
cat generated_test_cases/policy_noderuntimeparams_$TEST_CASE_ID.json >> $OUT_PUT_FILE
#output eval result of current test case
echo -e "\n------------------------------\nresult:\n" >> $OUT_PUT_FILE
env LD_LIBRARY_PATH=$LD_LIBRARY_PATH:../../policy-decision-point\
$CPPL_EVALUATE --ccppl-file policy_compressed.ccppl\
--policy-definition policy_definition.json\
--function-handler ../../policy-decision-point/func_handler_23.so\
--variables-file generated_test_cases/policy_nodeparams_$TEST_CASE_ID.json\
--runtime-variables generated_test_cases/policy_noderuntimeparams_$TEST_CASE_ID.json >> $OUT_PUT_FILE
#result from python
echo -e "\n------------------------------\nresult from python:\c" >> $OUT_PUT_FILE
python ./bin/test_evaluation.py generated_test_cases/policy_nodeparams_$TEST_CASE_ID.json generated_test_cases/policy_noderuntimeparams_$TEST_CASE_ID.json >> $OUT_PUT_FILE
CHECK_SAME=$(cat $OUT_PUT_FILE | sed -n 's/\(^Policy\ result:\ \|^result\ from\ python:\)\(.*\)$/\2/p' | uniq -d)
if [ -z "$CHECK_SAME" ]
then
(( FAILED_TESTS += 1 ))
echo -e "\tTEST FAILED (results differ)" | tee -a $OUT_PUT_FILE
else
echo -e "\tTEST OK (obtained expected result)" | tee -a $OUT_PUT_FILE
fi
(( TEST_CASE_ID += 1 ))
done
if (( FAILED_TESTS == 0 ))
then
echo "OK: All tests passed"
exit 0
else
echo "ERROR: $FAILED_TESTS tests failed"
exit 1
fi
| true |
f11e7734532f451a2a303a5a84cfc852ef466eef | Shell | CuteBeaeast/BME1063_HW1 | /script/3-map_quality_summary.sh | UTF-8 | 499 | 2.90625 | 3 | [] | no_license | BWA=/public/home/panxq/BME1063/BME1063_HW1/data/bwa_result
BOWTIE=/public/home/panxq/BME1063/BME1063_HW1/data/bowtie2_result
OPATH=/public/home/panxq/BME1063/BME1063_HW1/FinalReport/mapping_summary
for P in ${BWA}/dataset*
do
cd ${P}
FILE=./*.sam
dwgsim_eval -S ${FILE} 2>&- | sed '/#/d' > ${OPATH}/bwa_mapping/$(basename ${FILE}).quality
done
for P in ${BOWTIE}/dataset*
do
cd ${P}
FILE=./*.sam
dwgsim_eval -S ${FILE} 2>&- | sed '/#/d' > ${OPATH}/bowtie2_mapping/$(basename ${FILE}).quality
done
| true |
bba0ec0989e76c4b65d7296b5af9f84abfd41002 | Shell | OctoConsulting/fedhipster-iac | /iac-auto.sh | UTF-8 | 8,333 | 3.703125 | 4 | [] | no_license | #!/bin/bash
function echo_header() {
echo
echo "########################################################################"
echo $1
echo "########################################################################"
}
function usage {
echo "Usage: $(basename $0) [ -destroy ]"
exit
}
if [[ $# -gt 1 ]]; then
usage
fi
if [[ $# -eq 1 ]]; then
if [[ "$1" != "-destroy" ]]; then
usage
else
echo_header "Destroying infrastructure"
chmod u+x destroy.sh
./destroy.sh
exit
fi
fi
chmod u+x delete-pipeline.sh
echo_header "OneClick Setup: ($(date))"
##### Tools #####
echo "Checking tools"
#Install tools iff they don't exist
if [ ! -x $(command -v brew) ]; then
yes "" | /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
if [ ! -x $(command -v createdb) ]; then
brew install libpq
brew link --force libpq
brew install jq
fi
if [ ! -x $(command -v wget) ]; then brew install wget; fi
if [ ! -x $(command -v aws) ]; then brew install awscli; fi
if [ ! -x $(command -v aws-iam-authenticator) ]; then brew install aws-iam-authenticator; fi
if [ ! -x $(command -v terraform) ]; then brew install terraform; fi
if [ ! -x $(command -v kubectl) ]; then brew install kubernetes-cli; fi
#Uninstall Helm3, if any, just to make sure we are not using Helm v3, we need Helm v2
brew uninstall kubernetes-helm
brew install helm@2
echo "All necessary tools installed"
##### Prompts #####
read -p "Enter AWS Access Key: " AWS_ACCESS_KEY
read -s -p "Enter AWS Secret Key: " AWS_SECRET_KEY
printf "\n"
read -p "Enter AWS Region (us-east-2 | us-east-2(default)): " AWS_REGION_INPUT
AWS_REGION=${AWS_REGION_INPUT:-"us-east-2"}
printf "\n AWS Region: $AWS_REGION"
export AWS_DEFAULT_REGION=$AWS_REGION
printf "\n"
read -p "Enter GitHub username (not email) with access to repo: " GITHUB_USER
read -s -p "Enter password for GitHub user: " GITHUB_PASS
printf "\n"
#read -p "Enter GitHub Repo URL (for example https://github.com/OctoConsulting/app-name): " GITHUB_REPO
##### Variables #####
AWS_REGION=us-east-2
GITHUB_APP=https://github.com/OctoConsulting/fedhipster-template
GITHUB_MICRO=https://github.com/OctoConsulting/blue-line-micro
IMAGE_APP=octolabs/fedhipster-app
IMAGE_MICRO=octolabs/micro
printf "$AWS_ACCESS_KEY\n$AWS_SECRET_KEY\n$AWS_REGION\n\n" | aws configure
printf "\n"
#IMAGE_REPO=$(aws ecr create-repository --repository-name app | grep repositoryUri | sed 's|.*: \(.*\)|\1|;s/"//g;s/\/.*//')
#echo "Create ECR Repository: $IMAGE_REPO"
#Using Terraform to install and configure the AWS EKS Environment
printf "Using Terraform to install and configure AWS EKS Environment"
#### AWS Terraform #####
terraform init
terraform apply --auto-approve
#Rerun apply to create Elasticsearch resources, if failed in the first run (See https://github.com/terraform-providers/terraform-provider-aws/issues/7725)
terraform apply --auto-approve > /dev/null 2>&1
##### Infrastructure Setup #####
export PGPASSWORD=$(terraform output cluster_master_password)
ECR_APP=$(terraform output ecr-app)
ECR_MICRO=$(terraform output ecr-micro)
IMAGE_REPO=$(echo $ECR_APP | cut -f1 -d'/')
##### Kubernetes Config #####
terraform output kubeconfig > ./kubeconfig
terraform output config_map_aws_auth > ./config_map_aws_auth.yaml
export KUBECONFIG=$(pwd)/kubeconfig
kubectl apply -f ./config_map_aws_auth.yaml
kubectl cluster-info
kubectl cluster-info > outputs.txt
kubectl config get-contexts
kubectl apply -f rbac.yaml
kubectl create rolebinding default-role \
--clusterrole=cluster-admin \
--serviceaccount=default:default
##### KMS Terraform #####
cd ./terraform-aws-kms/examples/with-default-policy
terraform init
terraform apply --auto-approve
cd ../../..
##### CI/CD #####
cd ./cicd/
helm init --service-account tiller
kubectl -n kube-system rollout status deploy/tiller-deploy
helm repo update
sleep 30
chmod u+x *.sh
./cicd-startup.sh default cicd \
$AWS_ACCESS_KEY $AWS_SECRET_KEY $GITHUB_USER $GITHUB_PASS $IMAGE_REPO $GITHUB_APP $GITHUB_MICRO $AWS_REGION
cd ..
##### Environments #####
for NAMESPACE in {prod,stage,dev}
do
kubectl create namespace $NAMESPACE
echo "Creating $NAMESPACE App Database"
createdb -h $(terraform output cluster_endpoint) -p 5432 -U $(terraform output cluster_master_username) ${NAMESPACE}-db
echo "Creating $NAMESPACE Micro Database"
createdb -h $(terraform output cluster_endpoint) -p 5432 -U $(terraform output cluster_master_username) ${NAMESPACE}-micro-db
#Create kubernetes secrets from terraform outputs
#Create secrets for RDS
kubectl -n ${NAMESPACE} create secret generic rds-${NAMESPACE} \
--from-literal=rds_db=${NAMESPACE}-db \
--from-literal=rds_endpoint=$(terraform output cluster_endpoint) \
--from-literal=rds_password=$(terraform output cluster_master_password) \
--from-literal=rds_username=$(terraform output cluster_master_username)
kubectl -n ${NAMESPACE} create secret generic rds-micro-${NAMESPACE} \
--from-literal=rds_db=${NAMESPACE}-micro-db \
--from-literal=rds_endpoint=$(terraform output cluster_endpoint) \
--from-literal=rds_password=$(terraform output cluster_master_password) \
--from-literal=rds_username=$(terraform output cluster_master_username)
#Create secrets for Elasticsearch domains
kubectl -n ${NAMESPACE} create secret generic elastic-${NAMESPACE} \
--from-literal=elastic_uri=https://$(terraform output domain_endpoint_${NAMESPACE})
#Create secrets for API Gateway URLs
# kubectl -n ${NAMESPACE} create secret generic lambda-${NAMESPACE} \
# --from-literal=api_url=$(terraform output retro_api_${NAMESPACE}_url)
#S3 bucket names
kubectl -n ${NAMESPACE} create secret generic bucket-${NAMESPACE} \
--from-literal=bucket_name=$(terraform output ${NAMESPACE}_bucket)
kubectl -n ${NAMESPACE} create secret generic aws-keys \
--from-literal=aws_access=$AWS_ACCESS_KEY \
--from-literal=aws_secret=$AWS_SECRET_KEY
kubectl -n ${NAMESPACE} create rolebinding ${NAMESPACE}-role \
--clusterrole=cluster-admin \
--serviceaccount=default:default \
cd ./terraform-aws-kms/examples/with-default-policy
kubectl -n ${NAMESPACE} create secret generic kms-key \
--from-literal=kms_key_id=$(terraform output key_id)
cd ../../..
#Install and setup initial environments
cd ./kompose/
chmod u+x *.sh
./kube-startup.sh $NAMESPACE $AWS_REGION $IMAGE_APP $IMAGE_MICRO
cd ..
done
##### Data Scraping #####
# cd bin
# chmod u+x ./invokeScrape.sh
# SCRAPE_INTERVAL_SEC=7
# DATA_SOURCE=OneTimeDataSourceUrlLoad_WIKI.csv
# FIRST_SCRAPE_WAIT_MIN=30
# SCRAPE_ENV=dev
# printf "\n Scheduling background scraper process for ${SCRAPE_ENV}. Check ${SCRAPE_ENV}-out.log for the status. \n"
# nohup ./invokeScrape.sh $DATA_SOURCE $SCRAPE_ENV $SCRAPE_INTERVAL_SEC $FIRST_SCRAPE_WAIT_MIN > ${SCRAPE_ENV}-out.log &
# sleep 1
# SCRAPE_ENV=stage
# printf "\n Scheduling background scraper process for ${SCRAPE_ENV}. Check ${SCRAPE_ENV}-out.log for the status. \n"
# nohup ./invokeScrape.sh $DATA_SOURCE $SCRAPE_ENV $SCRAPE_INTERVAL_SEC $FIRST_SCRAPE_WAIT_MIN > ${SCRAPE_ENV}-out.log &
# sleep 1
# SCRAPE_ENV=prod
# printf "\n Scheduling background scraper process for ${SCRAPE_ENV}. Check ${SCRAPE_ENV}-out.log for the status. \n"
# nohup ./invokeScrape.sh $DATA_SOURCE $SCRAPE_ENV $SCRAPE_INTERVAL_SEC $FIRST_SCRAPE_WAIT_MIN > ${SCRAPE_ENV}-out.log &
# sleep 1
# cd ..
#Install and setup monitoring
cd ./monitoring/
chmod u+x *.sh
./monitoring.sh $AWS_REGION
cd ..
##### Environment Output #####
chmod u+x environments.sh
./environments.sh
mv environments.txt iac-output.txt
echo_header "Environment Information"
cat iac-output.txt
#Kube-monkey
kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default
kubectl apply -f kube-monkey/configmap.yaml
kubectl apply -f kube-monkey/deployment.yaml
#Kubernetes Dashboard
kubectl apply -f monitoring/kubernetes-dashboard.yaml
kubectl proxy &
echo "Kubernetes Dashboard localhost URL: http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/"
echo "Use following command to get the token : aws eks get-token --cluster-name eks-cluster-app | jq -r '.status.token'"
exit
| true |
787daa31cd8f66a1849512d936e6b7367f1891b1 | Shell | evnp/commit-protection | /pre-commit | UTF-8 | 442 | 3.6875 | 4 | [] | no_license | #!/bin/sh
# Don't allow committing of files that include the flag DO_NOT_COMMIT
if git diff --staged | grep -q "DO_NOT_COMMIT"
then
echo "Error: You've attempted to commit a change that you've marked to not be committed."
echo
echo $(git diff --staged | grep -n "DO_NOT_COMMIT")
echo
echo "If you'd like to commit this change, remove the DO_NOT_COMMIT"
echo "flag from the line and re-commit."
echo
exit 1
fi
| true |
79107ae41535b65e396dd08f333a66f2cf6131b8 | Shell | Tharwat96/Shell_DBeng | /db_main.sh | UTF-8 | 10,385 | 4.1875 | 4 | [] | no_license | #!/bin/bash
export scriptDir=$(pwd)
source ./tableOuterOperation.sh
function mainMenu() {
#Display welcome on first time use of the engine.
if [[ ! -d ~/DBeng ]]
then whiptail --title "Welcome to the DBeng project" --msgbox "Hello there,\nseems like this is the your first time using this project\nStart by creating a Database" 12 48
fi
dbOperations=$(whiptail --cancel-button "Exit" --title "DBeng main menu" --fb --menu "Choose an option" 15 60 6 \
"1" "Create database" \
"2" "List existing databases" \
"3" "Delete database" \
"4" "Do a table operation" \
3>&1 1>&2 2>&3)
exitstatus=$?
[[ "$exitstatus" = 1 ]] && exit; #test if exit button is pressed
case $dbOperations in
1) #Create Database
#Check if the parent directory for Database Engine exists, if not create one.
if [[ ! -d ~/DBeng ]]
then
mkdir ~/DBeng && cd ~/DBeng
else cd ~/DBeng
fi
userInput=$(whiptail --inputbox "Enter the name of your Database:" 15 60 --title "Enter DB name" 3>&1 1>&2 2>&3)
exitstatus=$? #test if cancel button is pressed if existstatus == 1 then it is pressed
if [[ "$exitstatus" = 0 ]]
then
inputNF=($(echo $userInput | awk '{print NF}'))
if [ -z "$userInput" ] #Handle empty input
then whiptail --ok-button Done --msgbox "Database name cannot be empty, please try again." 15 60 #10 = Height 80 = Width
elif [ $inputNF -gt 1 ]
then whiptail --title "Error" --msgbox "The input can't be more than one, please enter just one continous valid database name." 16 65
else
setDBname="$userInput.beng" #Database name always ends with a .beng
if [[ ! -d ~/DBeng/$setDBname ]] #if there is no directory with the same name
then
mkdir $setDBname
whiptail --ok-button Done --msgbox "Database $setDBname created at `pwd` on `date`" 15 60 #10 = Height 80 = Width
else
whiptail --ok-button Done --msgbox "Database $setDBname already exists." 15 60 #10 = Height 80 = Width
fi
fi
fi
;;
2) #List Databases
if [[ ! -d ~/DBeng ]] #Check if no databases exist
then whiptail --title "No database found" --msgbox "Currently no Databases exist, start by creating one" 8 45
mainMenu #redisplay the menu
else
cd ~/DBeng
countDir=$(ls | wc -l) #Count how many databases currently exist
if [ $countDir -eq 0 ]
then whiptail --title "No databases exist in ~/DBeng" --msgbox "No databases to list" 8 45
else
whiptail --title "Found $countDir Databases" --scrolltext --msgbox "`find . -type d -name "*.beng" -printf "%f\n" | rev | cut -d"." -f2- | rev`" 15 60
#List all the directories ending with .beng
#-printf changes find behavior, instead of outputing
#./directoryName this makes it output just directoryName
fi
fi
;;
3) #Delete Database
if [[ ! -d ~/DBeng ]] #Check if parent database directory doesn't exist
then whiptail --title "No ~/DBeng directory found" --msgbox "Currently no Databases exist, start by creating one" 8 45
else
cd ~/DBeng
countDir=$(ls | wc -l) #Count how many databases currently exist
if [ $countDir -eq 0 ]
then whiptail --title "No databases exist in ~/DBeng" --msgbox "Create a Database first" 8 45
else
userInput=$(whiptail --scrolltext --inputbox "Enter the name of the Database to be deleted\n*Name is case sensitive\nCurrent available DBs are:\n`find . -type d -name "*.beng" -printf "%f\n"| rev | cut -d"." -f2- | rev`" 15 60 --title "Delete Database" 3>&1 1>&2 2>&3)
exitstatus=$? #test if cancel button is pressed if existstatus == 1 then it is pressed
if [[ "$exitstatus" = 0 ]]
then
#find if the database exist or not, grep is used to give the correct
#return code as find always returns 0 "Success" even if the directory doesn't exist
# 1> redirection hides the find command ouptput "./$userInput"
find . -type d -name "$userInput.beng" | grep $userInput 1> /dev/null
if [ ! $? -eq 0 ]
then whiptail --title "Database doesn't exist" --msgbox "No database named $userInput found." 8 45
#then echo "No database named \"$userInput\" was found!!"
else
rm -rf "$userInput.beng"
if [ $? -eq 0 ]
then whiptail --title "Database Successfully removed" --msgbox "Database $userInput.beng was removed at `date`" 8 45
#echo "Database $userInput.beng was removed at `date`"
else
whiptail --title "Unknown error occured" --msgbox "For some reason we were unable to remove $userInput database" 8 45
#echo "An error occured during deletion"
fi
fi
fi
fi
fi
;;
4) #Do a table operation
if [[ ! -d ~/DBeng ]] #Check if parent database directory doesn't exist
then whiptail --title "No ~/DBeng directory found" --msgbox "Currently no Databases exist, start by creating one" 8 45
else
cd ~/DBeng
countDir=$(ls | wc -l) #Count how many databases currently exist
if [ $countDir -eq 0 ]
then whiptail --title "No databases exist in ~/DBeng" --msgbox "Create a Database first" 8 45
else
userInput=$(whiptail --scrolltext --inputbox "Enter the name of the Database from the list\n*Name is case sensitive\n`find . -type d -name "*.beng" -printf "%f\n"| rev | cut -d"." -f2- | rev` " 15 60 --title "Table Operation" 3>&1 1>&2 2>&3)
exitstatus=$? #test if cancel button is pressed if existstatus == 1 then it is pressed
if [[ "$exitstatus" = 0 ]]
then
if [ -z "$userInput" ] #checks if empty input
then
whiptail --ok-button Done --msgbox "No input found, please try again." 8 45 #8 = Height 45 = Width
else
#check if the name of the user input already exist
find . -type d -name "$userInput.beng" | grep $userInput 1> /dev/null #throws stdout to null so it don't output
if [ ! $? -eq 0 ] #if it doesn't exist, prompt an error
then whiptail --title "Database name mismatch" --msgbox "No database named $userInput found." 8 45
else cd "$userInput.beng" && tableOuterOperation
fi
fi
fi
fi
fi
;;
esac
mainMenu
}
mainMenu
# select DBoperation in "Create Database" "List Databases" "Delete Database" "Use Database for table operations" "Exit"
# do
# case $DBoperation in
# "Create Database") createDB
# ;;
# "List Databases")
# #Check if no databases exist
# if [[ ! -d ~/DBeng ]]
# then
# echo "Start by creating a Database first"
# else
# cd ~/DBeng
# countDir=$(ls | wc -l) #Count how many databases currently exist
# if [ $countDir -eq 0 ]
# then echo "Currently no databases exist, create a Database first"
# else
# echo "Available Databases: $countDir"
# #List all the directories ending with .beng
# #-printf changes find behavior, instead of outputing
# #./directoryName this makes it output just directoryName
# find . -type d -name "*.beng" -printf "%f\n"
# fi
# fi
# ;;
# "Delete Database")
# #Check if no databases exist
# if [[ ! -d ~/DBeng ]]
# then echo "Start by creating a Database first"
# else
# cd ~/DBeng
# countDir=$(ls | wc -l) #Count how many databases currently exist
# if [ $countDir -eq 0 ]
# then echo "Currently no databases exist, create a Database first"
# else
# echo "Enter the name of the Database to be deleted:"
# read userInput
# #find if the database exist or not, grep is used to give the correct
# #return code as find always returns 0 "Success" even if the directory doesn't exist
# # 1> redirection hides the find command ouptput "./$userInput"
# find . -type d -name "$userInput.beng" | grep $userInput 1> /dev/null
# if [ ! $? -eq 0 ]
# then echo "No database named \"$userInput\" was found!!"
# else
# rm -rf "$userInput.beng"
# if [ $? -eq 0 ]
# then echo "Database $userInput.beng was removed at `date`"
# else
# echo "An error occured during deletion"
# fi
# fi
# fi
# fi
# ;;
# "Use Database for table operations")
# #Check if no databases exist
# if [[ ! -d ~/DBeng ]]
# then echo "Start by creating a Database first"
# else
# cd ~/DBeng
# countDir=$(ls | wc -l) #Count how many databases currently exist
# if [ $countDir -eq 0 ]
# then echo "Currently no databases exist, create a Database first"
# else
# echo "Select the database you want to do the operation on from the following:"
# find . -type d -name "*.beng" -printf "%f\n"
# read userInput
# find . -type d -name "$userInput.beng" | grep $userInput 1> /dev/null
# if [ ! $? -eq 0 ]
# then echo "Please enter a correct DB name from the list"
# else
# cd "$userInput.beng" && bash "$scriptDir/tableRelatedOp.sh"
# fi
# fi
# fi
# ;;
# "Exit")
# exit
# ;;
# *) echo "Enter a valid choice from the list"
# esac
# done
| true |
dc407609429ddb311854bb2c72ca49936fefc72e | Shell | miberecz/authelia | /.buildkite/hooks/pre-artifact | UTF-8 | 846 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set +u
DOCKER_IMAGE=authelia/authelia
if [[ $BUILDKITE_COMMAND == "authelia-scripts --log-level debug ci" ]];
then
tar -czf dist.tar.gz dist
tar -czf web.tar.gz web
fi
if [[ $BUILDKITE_LABEL =~ ":docker: Build Image" ]];
then
echo "--- :docker: Saving artifacts for :buildkite: :docker: :github: releases"
# Save binary for buildkite and github artifacts
docker create --name authelia-binary $DOCKER_IMAGE:latest
docker cp authelia-binary:/usr/app/authelia ./authelia-linux-$ARCH
docker cp authelia-binary:/usr/app/public_html ./
docker rm -f authelia-binary
tar -czf authelia-linux-$ARCH.tar.gz authelia-linux-$ARCH public_html
sha256sum authelia-linux-$ARCH.tar.gz > authelia-linux-$ARCH.tar.gz.sha256
# Saving image for push to DockerHub
docker save $DOCKER_IMAGE | gzip > authelia-image-$ARCH.tar.gz
fi | true |
d8aa4f04b2aca89bee6f5cea6a96a2e2a9412a71 | Shell | songweizhi/Katana_cmds | /Metagenomics_workshop_Bernd.sh | UTF-8 | 11,335 | 2.84375 | 3 | [] | no_license | ################################
## Trimmomatic ##
## Mac Terminal ##
## 04-Dec-2017 ##
## Workshop UNSW ##
################################
################################
## Basic Cmds ##
################################
# - gzip filename: compress file with the name “filename”; filename gets appended with “.gz”
# - gunzip filename: compress file with the name “filename”; “.gz” gets removed from filename
# Make files readable, writable and executable.
chmod 777 *
# Generate symbolic links to the gzipped fastq files (hint:‘ ln -s ../1_RawData/*.gz . ’) and the TruSeq3 adapter file
cd /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/2_Processing
ln -s ../1_RawData/*.gz
ln -s ../0_DataNeeded/Trimmomatic-0.36/adapters/TruSeq3-PE.fa
/Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/1_RawData/WORKSHOP-L001_R1_001.fastq.gz
/Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/1_RawData/WORKSHOP-L001_R2_001.fastq.gz
################################
## FastQc ##
################################
# 1) Double click on the FastQC icon in the MacUser folder. Alternatively you go to fastqc folder and type ‘chmod +x fastqc’ then run fastqc by typing ‘./fastqc’
cd /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_dataneeded/fastqc
./fastqc
# 2) Open the sequence files
# 3) Check the various information provided in the left panel
################################
## Trimmomatic ##
################################
cd /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_dataneeded/Trimmomatic-0.36
java -jar trimmomatic-0.36.jar
cd /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/2_Processing
java -jar /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_dataneeded/Trimmomatic-0.36/trimmomatic-0.36.jar PE -phred33 /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/1_RawData/WORKSHOP-L001_R1_001.fastq.gz /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/1_RawData/WORKSHOP-L001_R2_001.fastq.gz output_forward_paired.fq output_forward_unpaired.fq output_reverse_paired.fq output_reverse_unpaired.fq ILLUMINACLIP:TruSeq3-PE.fa:2:30:10 SLIDINGWINDOW:4:20 MINLEN:36 HEADCROP:10 CROP:260
#########################################
## Assembly with SPAdes ##
#########################################
/Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_dataneeded/SPAdes-3.11.1-Darwin/bin/spades.py --only-assembler -1 output_forward_paired.fq -2 output_reverse_paired.fq -s output_forward_unpaired.fq -s output_reverse_unpaired.fq --meta -t 1 -m 3 -o metaSPAdes
# Check the output by going into the output directory “metaspades”.
cd /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/2_Processing/metaSPAdes
perl /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_dataneeded/get_fasta_stats.pl -T contigs.fasta
##############################################################
## Mapping reads to an assembly with Bowtie ##
##############################################################
# 01 build a map index for your assembled sequences.
/Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_DataNeeded/bowtie2-2.3.3.1-macos-x86_64/bowtie2-build contigs.fasta CTG
# 02 create a mapping file called “mapping.sam”. (Note this file is very large).
/Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_DataNeeded/bowtie2-2.3.3.1-macos-x86_64/bowtie2 -x CTG -1 output_forward_paired.fq -2 output_reverse_paired.fq -U output_forward_unpaired.fq -U output_reverse_unpaired.fq -S mapping.sam
# 03 Open Tablet by typing ‘tablet’ (or clicking on the icon on Mac). Select “Import an assembly” and then select your mapping file as your “primary assembly file” and your assembled fasta file ‘contigs.fasta’ as your “reference file”. Press “Open” and wait for the assembly to load.
#################################################
## Genome binning with Metabat ##
#################################################
# 01 convert the .sam format into the much smaller .bam format.
samtools view -bS mapping.sam > mapping.bam
# 02 created the sorted .bam file.
samtools sort mapping.bam > mapping.sorted
# 03 convert the .bam file into a sequence coverage file.
/Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_DataNeeded/metabat2/jgi_summarize_bam_contig_depths --outputDepth depth.txt mapping.sorted
# 04 You should get a few new .fa files (.e.g. MetaBAT_bin.1.fa) that represent the bin sequences.
/Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_DataNeeded/metabat2/metabat1 -i contigs.fasta -a depth.txt -o MetaBAT_bin
##############################################################
## Annotation with PROKKA and Prodigal ##
##############################################################
# Prokka is a software tool for the rapid annotation of prokaryotic genomes. A typical 4 Mbp genome can be fully annotated
# in less than 10 minutes on a quad-core computer, and scales well to 32 core SMP systems. It produces GFF3, GBK and SQN files
# that are ready for editing in Sequin and ultimately submitted to Genbank/DDJB/ENA. It uses Prodigal, which is a well-known
# tool to predict open reading frames.
# In addition, the sequencing centre needs to be provided and the minimum contig length is set to 200. The ‘--accver’ will add
# the version of the genome/bin. We use the ‘--gcode 11’ option to tell prokka that we are dealing with the genetic code of
# prokaryotes. The ‘--locustag XXX’ option will add XXX as prefix to all loci found in the genome. The locustag usually is a
# short abbreviation giving basic information in taxonomy and/or other environmental features, such as the sample origin.
prokka --compliant --centre goettingen --accver 1 --gcode 11 --cpus 1 --locustag BIN1 --outdir BIN1 MetaBAT_bin.1.fa
# Prodigal to predict ORFs directly.
# The output will be two files: MetaBAT_bin.1.ffn will contain the nucleotide sequences oft he predicted ORF, while MetaBAT_bin.1.faa
# will contain the protein sequences oft he predicted ORF.
/Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_DataNeeded/prodigal.osx.10.9.5 -i MetaBAT_bin.1.fa -a MetaBAT_bin.1.faa -d MetaBAT_bin.1.ffn -o MetaBAT_bin.1.txt
##############################################################
## Annotation with Kaiju and KEGG ##
##############################################################
# Kaiju is a program for sensitive and fast taxonomic classification of metagenomic or metatranscriptomics sequences.
# Each sequence is assigned to a taxon in the NCBI taxonomy by comparing it to a reference database containing microbial
# and viral protein sequences. By using protein-level classification, Kaiju achieves a higher sensitivity compared with
# methods based on nucleotide comparison. Kaiju can use either the set of available complete genomes from NCBI RefSeq or
# the microbial subset of the NCBI BLAST non-redundant protein database nr, optionally also including fungi and microbial
# eukaryotes.
#
# DNA sequences are translated into amino acid sequences, which are then searched in the database using a modified backward
# search on a memory-efficient implementation of the Burrows-Wheeler transform, which finds maximum exact matches (MEMs),
# optionally allowing mismatches in the protein alignment.
1) Change into the Prokka folder
2) Gzip the ffn file that contains the ORFs (Kaiju only accepts gzipped files)
3) Upload your file to the kaiju webserver http://kaiju.binf.ku.dk/server (Use default values)
4) Inspect the results (the taxon path count and Krona files are quite useful for classifying your bins)
5) Download the output files to the 2_Processing folder
6) Gunzip the kaiju.out.gz file
7)
perl /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_DataNeeded/kaiju_reader.pl kaiju.out
# KEGG (Kyoto Encyclopedia of Genes and Genomes) is a collection of databases dealing with genomes, biological pathways,
# diseases, drugs, and chemical substances. KEGG is utilized for bioinformatics research and education, including data
# analysis in genomics, metagenomics, metabolomics and other omics studies, modelling and simulation in systems biology,
# and translational research in drug development.
1) Change into the Prokka folder
2) Upload the faa file containing all the proteins predicted for the bin to blastkoala (http://www.kegg.jp/blastkoala/).
You need to provide some basic data: the taxonomy of your bin (wait for kaiju results) and which database you would like
to use (go for species_prokaryotes). You will receive an email asking for job confirmation.
3) Inspect the results
4) Download the annotation data to the 2_Processing folder
5)
perl /Users/zzfanyi/Bioinfo/Scp_UnixMacTerminal/Metagenomics_workshop_04Dec17/MacUser/0_DataNeeded/kegg_reader.pl user_ko.txt
# KEGG also provides taxonomic data. However, this data is based on blast against all KEGG genomes (currently around 6000 complete genomes).
# The smallest database for Kaiju (NCBI RefSeq) contains more than 8000 complete genomes.
##############################################################
## Others ##
##############################################################
# You can also annotate your entire metagenome (but this will run for a while):
1) Type ‘prodigal -i metaSPAdes/contigs.fasta -a metagenome.faa -d metagenome.ffn -o metagenome.1.txt -p meta’
[For Mac users, please see prokka alternatives section]
2) The ffn file contains the ORF sequences and can be uploaded after compression with gzip to Kaiju.
3) The faa file, containing all the protein sequences, can be submitted to ghostkoala (http://www.kegg.jp/ghostkoala/)
4) Inspect the output
5) Try to run them through the two perl scripts
Other notable pipelines/tools for taxonomic and functional annotation:
IMG: https://img.jgi.doe.gov/
MG-RAST: http://metagenomics.anl.gov/
MEGAN: http://www-ab.informatik.uni-tuebingen.de/software/megan6/
Further reading:
Overview Metagenomics:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3351745/
FastQC manual
https://biof-edu.colorado.edu/videos/dowell-short-read-class/day-4/fastqc-manual
Trimmomatic
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4103590/
SPAdes
https://www.ncbi.nlm.nih.gov/pubmed/22506599
Bowtie2
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3010897/
MetaBAT
https://peerj.com/articles/1165/
Prodigal
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2848648/
Prokka
https://www.ncbi.nlm.nih.gov/pubmed/24642063
KEGG
https://www.ncbi.nlm.nih.gov/pubmed/10592173
Kaiju
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4833860/
| true |
35c665b373cd43b6d41ec054fc9d618d323925d8 | Shell | phylame/jemiv | /imabw/src/main/scripts/imabw.sh | UTF-8 | 2,831 | 3.640625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# ------------------------------------------------------------------------------
# Copyright 2014-2016 Peng Wan <phylame@163.com>
#
# This file is part of Imabw.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
message()
{
TITLE="Cannot start PW Imabw"
if [ -n "`which zenity`" ]; then
zenity --error --title="$TITLE" --text="$1"
elif [ -n "`which kdialog`" ]; then
kdialog --error --title "$TITLE" "$1"
elif [ -n "`which xmessage`" ]; then
xmessage -center "ERROR: $TITLE: $1"
elif [ -n "`which notify-send`" ]; then
notify-send "ERROR: $TITLE: $1"
else
echo "ERROR: $TITLE\n$1"
fi
}
GREP=`which egrep`
CAT=`which cat`
# Get the IMABW home
if [ -z "$IMABW_HOME" -o ! -d "$IMABW_HOME" ]; then
PRG="$0"
# need this for relative symlinks
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`"/$link"
fi
done
IMABW_HOME=`dirname "$PRG"`/..
# make it fully qualified
IMABW_HOME=`cd "$IMABW_HOME" > /dev/null && pwd`
fi
VM_OPTIONS_FILE=""
if [ -n "$IMABW_VM_OPTIONS" -a -r "$IMABW_VM_OPTIONS" ]; then
# explicit
VM_OPTIONS_FILE="$IMABW_VM_OPTIONS"
elif [ -r "$HOME/.imabw/imabw.vmoptions" ]; then
# user-overridden
VM_OPTIONS_FILE="$HOME/.imabw/imabw.vmoptions"
elif [ -r "$IMABW_HOME/bin/imabw.vmoptions" ]; then
# default, standard installation
VM_OPTIONS_FILE="$IMABW_HOME/bin/imabw.vmoptions"
else
VM_OPTIONS_FILE="$IMABW_HOME/imabw.vmoptions"
fi
VM_OPTIONS=""
if [ -r "$VM_OPTIONS_FILE" ]; then
VM_OPTIONS=`"$CAT" "$VM_OPTIONS_FILE" | "$GREP" -v "^#.*"`
else
message "Cannot find VM options file"
fi
# IMABW main class
IMABW_CLASS=jem.imabw.app.AppKt
# Set extension JAR
IMABW_CLASS_PATH=""
LIB_DIR="$IMABW_HOME"/lib
EXT_DIR="$IMABW_HOME"/lib/ext
find_jars(){
if [ -d "$1" ]; then
for file in "$1"/*.jar; do
IMABW_CLASS_PATH="$IMABW_CLASS_PATH:$file"
done
if [ -n "$IMABW_CLASS_PATH" ]; then
len=`expr length "$IMABW_CLASS_PATH"`
IMABW_CLASS_PATH=`expr substr "$IMABW_CLASS_PATH" 2 "$len"`
fi
fi
}
find_jars ${LIB_DIR}
find_jars ${EXT_DIR}
# Run Jem SCI
java $VM_OPTIONS -cp "${IMABW_CLASS_PATH}" ${IMABW_CLASS} "$@"
| true |
a3e72f31e1342df1ad5d55d538020e1872087e64 | Shell | Mees-Molenaar/NGS_Alzheimer_Project | /src/samtobam.sh | UTF-8 | 1,012 | 4.09375 | 4 | [] | no_license | #! /bin/bash
# Default directory to store data is in the data folder, otherwise you can give your directory with -d flag
WORKDIR="data/"
SAMTOOLS="samtools"
while getopts "d:l:" opt; do
case $opt in
d) WORKDIR=$OPTARG ;;
l) SAMTOOLS=$OPTARG ;;
*) echo 'error' >&2
exit 1
esac
done
if [ ! -d $WORKDIR ]; then
echo "Could not find working directory: $WORKDIR, exiting. Please make sure the working directory exists"
exit 1
fi
cd $WORKDIR
for dir in /$WORKDIR/*/; do
dir=${dir%*/}
dir=${dir##*/}
if [[ $dir =~ "SRR" ]]; then
echo "Samtools is converting ${dir}.sam to a bam file."
$SAMTOOLS view -bS $WORKDIR/$dir/Hisat2/$dir.sam > $WORKDIR/$dir/Hisat2/$dir.bam
echo "Samtools is sorting ${dir}.bam"
$SAMTOOLS sort $WORKDIR/$dir/Hisat2/$dir.bam -o $WORKDIR/$dir/Hisat2/$dir.sorted.bam
echo "Samtools is indexing ${dir}.sorted.bam"
$SAMTOOLS index $WORKDIR/$dir/Hisat2/$dir.sorted.bam
fi
done | true |
9549761e377f362fffddc0fb7a64411b5d0cc116 | Shell | BrianWGray/pentest-utils | /staged-scanners/unicornscan-staged.sh | UTF-8 | 718 | 3.78125 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -e
set -x
set -o pipefail
IFS=$'\n\t'
if [ -z $1 ]; then
echo '[!] Usage: unicorn-staged <target>'
exit 1
fi
target=$1
tcpfile='unicorn-tcp.txt'
if ! [ -f $tcpfile ]; then
echo "[+] Beginning TCP UnicornScan (all ports) of $target"
unicornscan -i tap0 -mT ${target}:a -l ${tcpfile}
echo "[*] TCP UnicornScan complete"
cat ${tcpfile}
else
echo "[+] TCP UnicornScan file already exists. Skipping..."
fi
udpfile='unicorn-udp.txt'
if ! [ -f $udpfile ]; then
echo "[+] Beginning UDP UnicornScan (all ports) of $target"
unicornscan -i tap0 -mU ${target}:a -l ${udpfile}
echo "[*] UDP UnicornScan complete"
cat ${udpfile}
else
echo "[+] UDP UnicornScan file already exists. Skipping..."
fi
| true |
eefbd8854fe25ab18e3d786fa4cc0332716de1d9 | Shell | rohandavidg/Couch_CARRIERS_pipeline | /bin/CARRIERS_RunBigGVCF.sh | UTF-8 | 2,836 | 3.375 | 3 | [] | no_license | #! /bin/bash
#####################################################
#this script is used to estimate the
#performance of the primer for each lane
#####################################################
usage() {
cat <<EOF
usage: $0 options
This script is used to estimate the performance of
the carriers primers for each lane in the run
OPTIONS:
-h Show this message
-g gVCF list
-t path to TOOL info
-o output directory
EOF
}
while getopts "hg:t:o:" OPTION
do
case $OPTION in
h) usage ; exit 1 ;;
g) gVCF_LIST=$OPTARG ;;
t) TOOL_INFO=$OPTARG ;;
o) OUT_DIR=$OPTARG ;;
?) usage ; exit 1 ;;
esac
done
MEM="-l h_vmem=12G"
QUE="-q ngs-sec -l medp=TRUE"
#QUE="-q lg-mem"
output_dir=$OUT_DIR/variants/subsets
if [ -d $output_dir ];then
true
else
echo $output_dir
mkdir $output_dir
fi
BEDTOOLS=/data5/bsi/epibreast/m087494.couch/Scripts/Progams/bin/bedtools
export JAVA7="/usr/local/biotools/java/jdk1.7.0_03/bin"
export REF_GENOME="/data2/bsi/reference/sequence/human/ncbi/hg19/allchr.fa"
export GenotypeGVCFs_JVM="-XX:CompileThreshold=1000 -XX:MaxHeapFreeRatio=70 -XX:ReservedCodeCacheSize=256m -Xmx58g -Xms8g"
export GATK="/data5/bsi/bictools/alignment/gatk/3.4-46/"
export THREADS="1" # Better memory effiency
export gatk_param="-R $REF_GENOME -et NO_ET -K $GATK/Hossain.Asif_mayo.edu.key"
export output=$output_dir
export gvcfList=$gVCF_LIST
export TOOLINFO=$TOOL_INFO
function set_intervals {
cd $OUT_DIR
if [ ! -d intervals ];
then
TARGET_BED=`cat $TOOL_INFO | grep "ONTARGET" | cut -d"=" -f2`
echo $TARGET_BED
echo $TARGET_BED | xargs cat | awk '{print $1"\t"$2-200"\t"$3+200}' | $BEDTOOLS merge > target.wide.bed
echo "Make Dir & Split Intervals"
mkdir intervals
cd intervals
split -a 4 -d -l 30 ../target.wide.bed target_
for file in *; do mv $file $file.bed; done
cd ../
fi
}
function run_gatk {
cd $OUT_DIR
for file in intervals/*;
do
f=`basename $file`;
outputvcf="$f.vcf.gz";
range="-L $OUT_DIR/$file"
#echo "
qsub -e $OUT_DIR/logs -o $OUT_DIR/logs $QUE -N GVCF_$f -M gnanaolivu.rohandavid@mayo.edu -m a -l h_stack=20M -pe threaded 2 -V -cwd $MEM -b y $JAVA7/java $GenotypeGVCFs_JVM -Djava.io.tmpdir=$output/temp/ -jar $GATK/GenomeAnalysisTK.jar -T GenotypeGVCFs -V $gvcfList -nt $THREADS -o $output/$outputvcf $range $gatk_param
/projects/bsi/bictools/scripts/dnaseq/GENOME_GPS/tags/4.0.1/check_qstat.sh $TOOLINFO 8000
done
}
if [ $# -eq 0 ];then
echo "No arguments supplied"
usage
exit 1
elif [ $# -lt 3 ];then
usage
exit 1
else
if [ -d $OUT_DIR ];then
cd $OUT_DIR
if [ ! -d logs ];then
mkdir logs
set_intervals
run_gatk
else
rm -rf logs
mkdir logs
set_intervals
run_gatk
fi
else
echo "$OUT_DIR doesnt exist"
fi
fi
| true |
9c392fd6a731307a6a7c56f99b02e8b2e6263c36 | Shell | therson/Hackathon2017 | /shell_test/importSAMTopology.sh | UTF-8 | 527 | 2.921875 | 3 | [] | no_license | importSAMTopology () {
SAM_DIR=/root/Hackathon2017/SAM/MachineLogAnalytics-v0.json
TOPOLOGY_NAME=MachineLog-Demo
#Import Topology
sed -r -i 's;\{\{HOST1\}\};'$AMBARI_HOST';g' $SAM_DIR
sed -r -i 's;\{\{CLUSTERNAME\}\};'$CLUSTER_NAME';g' $SAM_DIR
export TOPOLOGY_ID=$(curl -F file=@$SAM_DIR -F 'topologyName='$TOPOLOGY_NAME -F 'namespaceId='$NAMESPACE_ID -X POST http://$AMBARI_HOST:7777/api/v1/catalog/topologies/actions/import| grep -Po '\"id\":([0-9]+)'|grep -Po '([0-9]+)')
echo $TOPOLOGY_ID
}
importSAMTopology | true |
cc6449a50bd6b018b08bb1c5038ff0985b4795df | Shell | JohannesKleine/usb-live-linux | /scripts/functions.sh | UTF-8 | 416 | 3.890625 | 4 | [] | no_license | #!/bin/sh
repo_root() {
git rev-parse --show-toplevel
}
cd_repo_root() {
echo "current dir: " $(pwd)
cd `dirname "${0}"`
echo "now at $(pwd), finding repo root"
ROOT=$(repo_root)
cd "${ROOT}"
}
check_program_exists() {
[ ! -z $(command -v "${@}") ] && return 0
echo "Program '${@}' does NOT exist in path, try # apt install ${@}"
return 1
}
| true |
aadbfc99923736bbcdb79bc536dc3ef74bbaaa10 | Shell | mguilhermetavares/LeagueOfLegends | /ScriptLoL.sh | UTF-8 | 1,156 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
#####################################################
# #:
# #:
# DESENVOLVIDO POR ISRAEL WILSON #:
# 10 de Abril de 2019 #:
# #:
# #:
#####################################################
# Comando para ir para o Desktop.
cd ~/Desktop
# Cria o diretório para a geração do comando.
mkdir LOLWorkingMicFile.command
# Acessa local base onde o jogo foi instalado.
cd '/Applications/League of Legends.app/Contents/LoL/'
# Executa em Terminal o jogo.
./LeagueClient.app/Contents/MacOS/LeagueClient
#########################
# #
# .:. Observações .:. #
# #
#########################
: '
1. Não fechar o terminal, basta minimizar o mesmo.
2. Dar o permissionamento para o script ( chmod +x ScriptLoL.sh).
3. Executar o script com o terminal.
4. Sempre que fechar o jogo abrir usando o script.
5. Aproveitem.
'
# Fim do Script.
| true |
c03ebebd748af68949d9906913f2ed834010c907 | Shell | KarinaApodaca-NOAA/EMC_verif-global | /scripts/exgrid2obs_step1.sh | UTF-8 | 5,829 | 3.359375 | 3 | [] | no_license | #!/bin/sh
# Program Name: grid2obs_step1
# Author(s)/Contact(s): Mallory Row
# Abstract: Run METplus for global grid-to-observation verification
# to produce SL1L2 and VL1L2 stats
# History Log:
# 2/2019: Initial version of script
#
# Usage:
# Parameters:
# agrument to script
# Input Files:
# file
# Output Files:
# file
#
# Condition codes:
# 0 - Normal exit
#
# User controllable options: None
set -x
# Set up directories
mkdir -p $RUN
cd $RUN
# Set up environment variables for initialization, valid, and forecast hours and source them
if [ $g2o1_fhr_max -gt 168 ]; then
export g2o1_fhr_max=168
fi
python $USHverif_global/set_init_valid_fhr_info.py
status=$?
[[ $status -ne 0 ]] && exit $status
[[ $status -eq 0 ]] && echo "Succesfully ran set_init_valid_fhr_info.py"
echo
. $DATA/$RUN/python_gen_env_vars.sh
[[ $status -ne 0 ]] && exit $status
[[ $status -eq 0 ]] && echo "Succesfully sourced python_gen_env_vars.sh"
echo
# Link needed data files and set up model information
mkdir -p data
python $USHverif_global/get_data_files.py
[[ $status -ne 0 ]] && exit $status
[[ $status -eq 0 ]] && echo "Succesfully ran get_data_files.py"
echo
# Create output directories for METplus
python $USHverif_global/create_METplus_output_dirs.py
[[ $status -ne 0 ]] && exit $status
[[ $status -eq 0 ]] && echo "Succesfully ran create_METplus_output_dirs.py"
echo
# Create job scripts to run METplus
python $USHverif_global/create_METplus_job_scripts.py
[[ $status -ne 0 ]] && exit $status
[[ $status -eq 0 ]] && echo "Succesfully ran create_METplus_job_scripts.py"
# Run METplus job scripts
chmod u+x metplus_job_scripts/job*
if [ $MPMD = YES ]; then
ncount=$(ls -l metplus_job_scripts/poe* |wc -l)
nc=0
while [ $nc -lt $ncount ]; do
nc=$((nc+1))
poe_script=$DATA/$RUN/metplus_job_scripts/poe_jobs${nc}
chmod 775 $poe_script
export MP_PGMMODEL=mpmd
export MP_CMDFILE=${poe_script}
if [ $machine = WCOSS_C ]; then
launcher="aprun -j 1 -n ${nproc} -N ${nproc} -d 1 cfp"
elif [ $machine = WCOSS_DELL_P3 ]; then
launcher="mpirun -n ${nproc} cfp"
elif [ $machine = HERA -o $machine = ORION ]; then
launcher="srun --export=ALL --multi-prog"
fi
$launcher $MP_CMDFILE
done
else
ncount=$(ls -l metplus_job_scripts/job* |wc -l)
nc=0
while [ $nc -lt $ncount ]; do
nc=$((nc+1))
sh +x $DATA/$RUN/metplus_job_scripts/job${nc}
done
fi
# Copy data to user archive or to COMOUT
gather_by=$g2o1_gather_by
DATE=${start_date}
while [ $DATE -le ${end_date} ] ; do
export DATE=$DATE
export COMIN=${COMIN:-$COMROOT/$NET/$envir/$RUN.$DATE}
export COMOUT=${COMOUT:-$COMROOT/$NET/$envir/$RUN.$DATE}
m=0
arch_dirs=($model_arch_dir_list)
for model in $model_list; do
export model=$model
export arch_dir=${arch_dirs[m]}
arch_dir_strlength=$(echo -n $arch_dir | wc -m)
if [ $arch_dir_strlength = 0 ]; then
arch_dir=${arch_dirs[0]}
fi
for type in $g2o1_type_list; do
if [ $gather_by = VALID ]; then
if [ $type = upper_air ]; then
gather_by_hour_list=$g2o1_vhr_list_upper_air
elif [ $type = conus_sfc ]; then
gather_by_hour_list=$g2o1_vhr_list_conus_sfc
fi
else
gather_by_hour_list=$g2o1_fcyc_list
fi
for gather_by_hour in $gather_by_hour_list; do
if [ $gather_by = VSDB ]; then
if [ $type = upper_air ]; then
valid_hr_beg=$g2o1_valid_hr_beg_upper_air
valid_hr_end=$g2o1_valid_hr_end_upper_air
elif [ $type = conus_sfc ]; then
valid_hr_beg=$g2o1_valid_hr_beg_conus_sfc
valid_hr_end=$g2o1_valid_hr_end_conus_sfc
fi
verif_global_filename="metplus_output/gather_by_$gather_by/stat_analysis/$type/$model/${model}_${DATE}${valid_hr_beg}_${DATE}${valid_hr_end}_${gather_by_hour}.stat"
else
verif_global_filename="metplus_output/gather_by_$gather_by/stat_analysis/$type/$model/${model}_${DATE}${gather_by_hour}.stat"
fi
arch_filename="$arch_dir/metplus_data/by_$gather_by/grid2obs/$type/${gather_by_hour}Z/$model/${model}_${DATE}.stat"
comout_filename="$COMOUT/${model}_grid2obs_${type}_${DATE}_${gather_by_hour}Z_${gather_by}.stat"
if [ -s $verif_global_filename ]; then
if [ $SENDARCH = YES ]; then
mkdir -p $arch_dir/metplus_data/by_$gather_by/grid2obs/$type/${gather_by_hour}Z/$model
cpfs $verif_global_filename $arch_filename
fi
if [ $SENDCOM = YES ]; then
mkdir -p $COMOUT
cpfs $verif_global_filename $comout_filename
if [ "${SENDDBN^^}" = YES ]; then
$DBNROOT/bin/dbn_alert MODEL VERIF_GLOBAL $job $veif_global_filename
fi
fi
else
echo "*************************************************************"
echo "** WARNING: $verif_global_filename was not generated or zero size"
echo "*************************************************************"
fi
done
done
m=$((m+1))
done
DATE=$(echo $($NDATE +24 ${DATE}00 ) |cut -c 1-8 )
done
# Send data to METviewer AWS server
if [ $SENDMETVIEWER = YES ]; then
python $USHverif_global/load_to_METviewer_AWS.py
fi
| true |
941c529d7303712a4af672fd6720d88fae1dcfa1 | Shell | brathnayaka/eventing | /tests/ci/setupvm | UTF-8 | 525 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -xv
set -e
rm -f ~/*.good
rm -f ~/gerrit.list
sudo rm -rf /var/www/lost+found
sudo rm -rf /opt/build/lost+found
sudo cp -r ~/xvdb/.htaccess /var/www/
sudo cp -r ~/xvdb/var/www/* /var/www/
sudo chown -r buildbot.buildbot /var/www
sudo chown -r buildbot.buildbot /opt/build
if grep -qs CCACHE_DIR ~/.profile; then
echo ccache already setup
else
echo 'export PATH="/usr/lib/ccache:$PATH"' >> ~/.profile
echo 'export CCACHE_DIR=/opt/.ccache' >> ~/.profile
echo 'max_size = 12G' >> /opt/.ccache/ccache.conf
fi
| true |
3db680274bfa4e1e52343e70703d2a03f22be16a | Shell | lnls-sirius/docker-rbac-wildfly | /deploy-app.sh | UTF-8 | 429 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env bash
set -a
set -u
set -e
SERVICE="$1"
# Source env vars
. ./env-vars.sh ${SERVICE}
# Copy files to correct locations
if [ "${SERVICE}" == "rbac" ]; then
cp ${RBAC_TARGET_WAR} /deploy
cp postgresql-${POSTGRES_VERSION}.jar /deploy
elif [ "${SERVICE}" == "mgmt" ]; then
cp ${RBAC_TARGET_WAR} /deploy
cp postgresql-${POSTGRES_VERSION}.jar /deploy
else
echo "Unsupported service"
exit 1
fi
| true |
c2ffb2b2c2009a6349cc805709f989fc2f9340da | Shell | q5390498/my-linux-config | /vim/install.sh | UTF-8 | 796 | 3.46875 | 3 | [] | no_license | #!/bin/bash
pushd "$(dirname "$0")"
if [[ -d ~/.vim/bundle/Vundle.vim ]]; then
echo "Vundle has already been installed."
else
echo "Install Vundle..."
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
echo "Done!"
fi
echo "Update vimrc..."
cp my.vim ~/.vim/my.vim
read -r -d '' add_source << EOM
" Load my common config
source ~/.vim/my.vim
EOM
if [[ ! -e ~/.vimrc ]]; then
echo -e "$add_source\n" > ~/.vimrc
elif [[ "$(grep 'source ~/.vim/my.vim' ~/.vimrc)" == "" ]]; then
echo -e "$add_source\n" | cat - ~/.vimrc > vimrc.tmp && mv vimrc.tmp ~/.vimrc
fi
echo "Done!"
echo "Install Vim Plugins..."
line=$(grep -n "filetype plugin on" my.vim | cut -f1 -d ":")
vim -u <(head -$line ~/.vim/my.vim) +PluginInstall +qall
echo "Done!"
popd
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.