blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4fdf78c2e2c668c7586f8620674e8f87060533ce
|
Shell
|
XuSihan/refactoring
|
/src/good.sh~
|
UTF-8
| 2,342
| 2.5625
| 3
|
[] |
no_license
|
#!bash
cd aws-sdk-java_before
file_path=$(find /home/sihan/refactoring/src/aws-sdk-java_before -print | grep "com/amazonaws/services/dynamodbv2/datamodeling/DynamoDBMapper.java")
result=$(echo $file_path | grep "com/amazonaws/services/dynamodbv2/datamodeling/DynamoDBMapper.java")
if [ "$result" != "" ]
then
echo "True"
file_path_before=$(find /home/sihan/refactoring/src/aws-sdk-java_before -print | grep "com/amazonaws/services/dynamodbv2/datamodeling/DynamoDBMapper.java")
file_path_after=$(find /home/sihan/refactoring/src/aws-sdk-java_after -print | grep "com/amazonaws/services/dynamodbv2/datamodeling/DynamoDBMapper.java")
else
echo "False"
file_path_before=$(find /home/sihan/refactoring/src/aws-sdk-java_before -print | grep "datamodeling.java")
file_path_after=$(find /home/sihan/refactoring/src/aws-sdk-java_after -print | grep "datamodeling.java")
fi
cd ..
java -cp /home/sihan/refactoring/src/gumtree-spoon-ast-diff-1.1.0-SNAPSHOT-jar-with-dependencies.jar gumtree.spoon.AstComparator $file_path_before $file_path_after pause pauseExponentially
git clone https://github.com/apache/hive.git
mv hive hive_before
cp -R hive_before hive_after
cd hive_before
git checkout 56cffb2d1edc97825ee1d093e3d8e46dab6cdf4b
cd ..
cd hive_after
git checkout e2dd54ab180b577b08cf6b0e69310ac81fc99fd3
cd ..
cd hive_before
file_path=$(find /home/sihan/refactoring/src/hive_before -print | grep "org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java")
result=$(echo $file_path | grep "org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java")
if [ "$result" != "" ]
then
echo "True"
file_path_before=$(find /home/sihan/refactoring/src/hive_before -print | grep "org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java")
file_path_after=$(find /home/sihan/refactoring/src/hive_after -print | grep "org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java")
else
echo "False"
file_path_before=$(find /home/sihan/refactoring/src/hive_before -print | grep "optimizer.java")
file_path_after=$(find /home/sihan/refactoring/src/hive_after -print | grep "optimizer.java")
fi
cd ..
java -cp /home/sihan/refactoring/src/gumtree-spoon-ast-diff-1.1.0-SNAPSHOT-jar-with-dependencies.jar gumtree.spoon.AstComparator $file_path_before $file_path_after foldExprFull foldExpr
| true
|
c2b4da08503343afc94e92c41e44df413baf3041
|
Shell
|
o-o-overflow/dc2020q-whooo-are-u-public
|
/image/generate_packages.sh
|
UTF-8
| 412
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p $1
split -l650 <(
curl https://popcon.ubuntu.com/all-popcon-results.txt.gz | zcat | grep Package |
egrep -v ": (lib|linux-|python|language|xserver|gnome|firefox|openoffice|thunderbird|gir1.2|nvidia|unity|erlang|texlive|ubuntu|pidgin|nautilus|virtualbox|emodule|x11proto|plasma|kde|rhythmbox|x11|xwayland|wayland|tzdata)" |
sort -k4 -r -n | head -n65000 | awk '{print $2}'
) $1/packages-
| true
|
076df16ccb7bd1d601e69fa06e865b837b0de11d
|
Shell
|
davidbebb/resin-nodejs-opencv
|
/openCV_script.sh
|
UTF-8
| 443
| 2.546875
| 3
|
[] |
no_license
|
apt-get update && \
apt-get install -y cmake
git clone https://github.com/itseez/opencv.git /usr/local/src/opencv
cd /usr/local/src/opencv && \
mkdir release && cd release && \
cmake -D CMAKE_BUILD_TYPE=RELEASE -D CMAKE_INSTALL_PREFIX=/usr/local ..
cd /usr/local/src/opencv/release && \
make && make install
rm -rf /usr/local/src/opencv \
&& apt-get purge -y cmake \
&& apt-get autoremove -y --purge
ENV LD_LIBRARY_PATH /usr/local/lib
| true
|
5cdeca8a78fe375fbc48e793e1a7e1c57af07302
|
Shell
|
ericclaus14/UnixAdminCourse
|
/ScriptTemplate.shtp
|
UTF-8
| 1,369
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
##############################################################################################
#Author: Eric Claus
#Date: 2/5/17
#Unix Admin Lab - Scripting 1
#Part 4
#Purpose: Install VSFTPD and configure it to enable write and ssl. If VSFTPD is installed,
# first remove it then reinstall.
#Script must be run as root.
##############################################################################################
###################################
# Get rid of bash error messages. #
###################################
exec 2>/dev/null
#############
# Help file #
#############
function display_help {
echo
echo " -- Script to install VSFTPD, enable writing, and enable SSL."
echo " -- If VSFTPD is already installed, it will be uninstalled/reinstalled"
echo
echo " -- Script must be run as root."
echo
echo " -h Display the help file."
echo
exit
}
###########################################
# getops (parsing command line arguments) #
# Note: getops was used instead of getop #
# for the sake of simplicity #
###########################################
while getopts 'h' option; do
case "option" in
h) display_help
;;
*) display_help
;;
esac
done
##############################################################################################
#Declaration of main function
function Main {
}
#Call Main function
Main
| true
|
bf7300c4deed0ece54ca8642b72de570c1e6dcf8
|
Shell
|
samir7575/spring-boot-mysql-springdatajpa-hibernate
|
/start.sh
|
UTF-8
| 2,074
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
export JAVA_HOME=/apps/WebSphere85/LibertyCore/java/java_1.8_64/
export PATH=$JAVA_HOME/bin:$PATH
export LOG_PATH=@LOG_PATH@
# Renseignement de la chaîne par défaut si configurée
[ "$chaine" = "" ] && chaine="@chaine.default@"
# Si $chaine n'est pas renseignée/correcte avant de lancer ce script on le demande interactivement
while [ "`echo \"$chaine\" | grep -P '^(chaine)?\d$'`" = "" ]
do
read -p "Quelle chaine [1|4] ? " chaine
done
# Accepte à la fois chaine=1 et chaine=chaine1
export chaine=chaine`echo $chaine | sed s/chaine//g`
# Création de la clé pour le SSL si inexistante
# Désactivé : les paramètres server.ssl.* doivent être configurés manuellement dans le fichier application.properties après installation
#[ -f $chaine.p12 ] || keytool -genkey -alias apicj -storetype PKCS12 -keyalg RSA -keysize 2048 -keystore $chaine.p12 -validity 365 -dname "cn=`hostname`" -storepass canalnet -keypass canalnet
# Lancement en tâche de fond
OPTIONS=
#OPTIONS="$OPTIONS -Djavax.net.debug=ssl"
#OPTIONS="$OPTIONS -Djavax.net.debug=all"
#OPTIONS="$OPTIONS -Dserver.ssl.trust-store=$JAVA_HOME/jre/lib/security/jssecacerts -Dserver.ssl.trust-store-type=pkcs12 -Dserver.ssl.trust-store-password=changeit"
#OPTIONS="$OPTIONS -Djavax.net.ssl.keyStore=$JAVA_HOME/jre/lib/security/jssecacerts -Djavax.net.ssl.keyStoreType=pkcs12 -Djavax.net.ssl.keyStorePassword=changeit"
#OPTIONS="$OPTIONS -Djavax.net.ssl.trustStore=$JAVA_HOME/jre/lib/security/jssecacerts -Djavax.net.ssl.trustStoreType=pkcs12 -Djavax.net.ssl.trustStorePassword=changeit"
mkdir -p $LOG_PATH
nohup java $* $OPTIONS "-DLOG_PATH=$LOG_PATH" -Dspring.profiles.active=$chaine -Dloader.path=.,oidc-services-apicj-conf-@project.version@-@env@.jar -jar oidc-services-apicj-@project.version@.jar >$LOG_PATH/apicj-$chaine.out 2>$LOG_PATH/apicj-$chaine.err &
echo $! > $chaine.pid
# Affichage de la trace pour suivre le démarrage - Ctrl+C pour quitter
while [ ! -f $LOG_PATH/apicj-$chaine.log ]; do sleep 1; done
tail -f $LOG_PATH/apicj-$chaine.log
| true
|
af8252ff7f0847049bfc49ab512946e1c54b5261
|
Shell
|
olgierdh/snippets
|
/git/store-branch.sh
|
UTF-8
| 451
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
tag_name=$(echo "$1" | sed -e 's/^\([a-z]*\)-\(.*\)/\1\/\2/g')
echo "checkout to $1..."
git fetch origin
git checkout $1
echo "pulling changes..."
git pull origin $1
echo "tagging the branch with the name $tag_name..."
git tag $tag_name
echo "checkouting the master..."
git checkout master
echo "delete local branch..."
git branch -D $1
echo "delete remote branch..."
git push origin :$1
echo "push tags..."
git push --tags
echo "done"
| true
|
efecca9bcb072a23f52d4c36d01d751a2e11faaf
|
Shell
|
fact-project/mars_pulse_truth
|
/.svn/pristine/ef/efecca9bcb072a23f52d4c36d01d751a2e11faaf.svn-base
|
UTF-8
| 3,139
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# ========================================================================
#
# *
# * This file is part of MARS, the MAGIC Analysis and Reconstruction
# * Software. It is distributed to you in the hope that it can be a useful
# * and timesaving tool in analysing Data of imaging Cerenkov telescopes.
# * It is distributed WITHOUT ANY WARRANTY.
# *
# * Permission to use, copy, modify and distribute this software and its
# * documentation for any purpose is hereby granted without fee,
# * provided that the above copyright notice appear in all copies and
# * that both that copyright notice and this permission notice appear
# * in supporting documentation. It is provided "as is" without express
# * or implied warranty.
# *
#
#
# Author(s): Daniela Dorner 08/2004 <mailto:dorner@astro.uni-wuerzburg.de>
# Author(s): Daniel Hoehne-Moench 01/2009 <mailto:hoehne@astro.uni-wuerzburg.de>
#
# Copyright: MAGIC Software Development, 2000-2009
#
#
# ========================================================================
#
# This script launches the filling of the results of star into the db
# for all mc sequences of which the results are not yet in the database.
#
# After checking if the script is already running the todolist is written.
# Then for each sequence in the todo list the star results are filled
# into the table Star in the database using the macro fillmcstar.C
# If this was successful, the status is inserted into the database using
# the function setstatus.
#
source `dirname $0`/sourcefile
printprocesslog "INFO starting $0"
program=fillmcstar
column=fFillStar
set -C
scriptlog=$runlogpath/$program-$datetime.log
date >> $scriptlog 2>&1
# check if the script is already running
lockfile=$lockpath/lock-$program.txt
checklock >> $scriptlog 2>&1
# get todo list
gettodo >> $scriptlog 2>&1
cd $mars
# run fillstar for sequences
for (( s=0 ; s < $num ; s++ ))
do
sequence=${primaries[$s]}
printprocesslog "INFO starting $program for mc sequence $sequence"
no=`printf %08d $sequence | cut -c 0-4`
no2=`printf %08d $sequence`
path="$mcpath/star/$no/$no2"
starfile=$path/star$no2.root
fillstarlogpath=$logpath/$program/$no
makedir $fillstarlogpath >> $scriptlog 2>&1
fillstarlog=$fillstarlogpath/$program-$sequence.log
echo "run $program for mc sequence $sequence" >> $scriptlog 2>&1
setstatus "start" >> $scriptlog 2>&1
check2=`root -q -b $macrospath/fillmcstar.C+\("\"$starfile\""\,kFALSE\) | tee $fillstarlog | intgrep`
case $check2 in
1) echo " check2=$check2 -> everything ok " >> $scriptlog 2>&1
printprocesslog "INFO done fillmcstar successfully for mc sequence $sequence"
;;
0) echo " check2=$check2 -> no connection to db -> continue..." >> $scriptlog 2>&1
printprocesslog "WARN connection to DB failed"
check="no"
;;
*) echo " check2=$check2 -> ERROR -> step has to be repeated" >> $scriptlog 2>&1
printprocesslog "ERROR fillmcstar failed for mc sequence $sequence"
com=$Ffillmcstar
check=$check2
;;
esac
setstatus "stop" >> $scriptlog 2>&1
done
finish >> $scriptlog 2>&1
| true
|
962b8362ec390032115b7a2c9e43eb346a513982
|
Shell
|
COUNTRik/systemd
|
/scripts/systemd.sh
|
UTF-8
| 1,506
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
# Авторизуемся для получения root прав
mkdir -p ~root/.ssh
cp ~vagrant/.ssh/auth* ~root/.ssh
# Monitor
cp /vagrant/monitor/monitor.config /etc/sysconfig/monitor.config
cp /vagrant/monitor/monitor.timer /etc/systemd/system/monitor.timer
cp /vagrant/monitor/monitor.service /etc/systemd/system/monitor.service
chmod +x /vagrant/monitor/monitor.sh
systemctl daemon-reload
systemctl enable monitor.timer
systemctl start monitor.timer
# spawn-fcgi
yum install epel-release -y && yum install -y spawn-fcgi php php-cli mod_fcgid httpd mc vim
echo "SOCKET=/var/run/php-fcgi.sock" >> /etc/sysconfig/spawn-fcgi
echo "OPTIONS=\"-u apache -g apache -s $SOCKET -S -M 0600 -C 32 -F 1 -P /var/run/spawn-fcgi.pid -- /usr/bin/php-cgi\"" >> /etc/sysconfig/spawn-fcgi
cp /vagrant/spawn/spawn-fcgi.service /etc/systemd/system/spawn-fcgi.service
systemctl daemon-reload
systemctl enable spawn-fcgi.service
systemctl start spawn-fcgi.service
# httpd
cp /vagrant/httpd/httpd@.service /etc/systemd/system/httpd@.service
cp /vagrant/httpd/httpd-80 /etc/sysconfig/httpd-80
cp /vagrant/httpd/httpd-8080 /etc/sysconfig/httpd-8080
cp /etc/httpd/conf/httpd.conf /etc/httpd/conf/80.conf
sed 's!Listen 80!Listen 8080!' /etc/httpd/conf/httpd.conf > /etc/httpd/conf/8080.conf
echo "PidFile /var/run/httpd/httpd-8080.pid" >> /etc/httpd/conf/8080.conf
systemctl daemon-reload
systemctl enable httpd@80
systemctl enable httpd@8080
systemctl start httpd@80
systemctl start httpd@8080
| true
|
8971e461888b98b8c0f820874a7d0675a124213d
|
Shell
|
reachanshul/notary-kubernetes
|
/scripts/deploy-postgres.sh
|
UTF-8
| 1,293
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
cd "$(dirname $0)/.."
# work around to make this script idempotent. The helm chart doesn't allow
# running an upgrade without providing it the root password.
if kubectl get secret notary-postgresql --namespace notary > /dev/null 2>&1 ; then
printf "Found postgresql secret, reusing root password\n"
POSTGRESS_PASSWORD_BLOCK="postgresqlPassword: $(kubectl get secret --namespace notary notary-postgresql -o jsonpath="{.data.postgresql-password}" | base64 -d)"
fi
printf "\n### Deploying postgres\n\n"
helm upgrade --install --namespace notary --create-namespace notary --repo https://charts.bitnami.com/bitnami postgresql --version 10.2.4 --values - > /dev/null <<EOF
${POSTGRESS_PASSWORD_BLOCK:-}
persistence:
enabled: false
volumePermissions:
enabled: true
tls:
enabled: true
certificatesSecret: postgres-tls
certFilename: tls.crt
certKeyFilename: tls.key
certCAFilename: ca.crt
initdbScripts:
create_databases.sql: |
CREATE USER signer;
CREATE DATABASE notarysigner WITH OWNER signer;
GRANT ALL ON notarysigner TO signer;
CREATE USER server;
CREATE DATABASE notaryserver WITH OWNER server;
GRANT ALL ON notaryserver TO server;
EOF
printf "\n"
helm list --namespace notary
| true
|
71afd4a98e2f383a531ed6d5ff12f1a775a3e5d0
|
Shell
|
minyk/spark-notebook-sandbox
|
/scripts/common.sh
|
UTF-8
| 2,631
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#java
JAVA_VERSION="7"
JAVA_UPDATE="79"
JAVA_ARCHIVE=jdk-${JAVA_VERSION}u${JAVA_UPDATE}-linux-x64.tar.gz
JAVA_HOME="jdk1.${JAVA_VERSION}.0_${JAVA_UPDATE}"
#hadoop
HADOOP_PREFIX=/usr/local/hadoop
HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
HADOOP_VERSION=2.7.2
HADOOP_ARCHIVE=hadoop-${HADOOP_VERSION}.tar.gz
HADOOP_MIRROR_DOWNLOAD=http://archive.apache.org/dist/hadoop/core/hadoop-${HADOOP_VERSION}/${HADOOP_ARCHIVE}
HADOOP_RES_DIR=/vagrant/resources/hadoop
HDFS_USER="hdfs"
#spark
SPARK_VERSION=1.6.1
SPARK_ARCHIVE=spark-${SPARK_VERSION}-bin-hadoop2.6.tgz
SPARK_MIRROR_DOWNLOAD=http://www.apache.org/dist/spark/spark-${SPARK_VERSION}/${SPARK_ARCHIVE}
SPARK_RES_DIR=/vagrant/resources/spark
SPARK_CONF_DIR=/usr/local/spark/conf
SPARK_USER="spark"
#ssh
SSH_RES_DIR=/vagrant/resources/ssh
RES_SSH_COPYID_ORIGINAL=$SSH_RES_DIR/ssh-copy-id.original
RES_SSH_COPYID_MODIFIED=$SSH_RES_DIR/ssh-copy-id.modified
RES_SSH_CONFIG=$SSH_RES_DIR/config
#flume
FLUME_VERSION=1.5.2
FLUME_ARCHIVE=apache-flume-1.5.2-bin.tar.gz
FLUME_MIRROR_DOWNLOAD=http://www.apache.org/dist/flume/1.5.2/apache-flume-1.5.2-bin.tar.gz
FLUME_RES_DIR=/vagrant/resources/flume
FLUME_HOME=/usr/local/flume
FLUME_CONF=${FLUME_HOME}/conf
#Kafka
KAFKA_VERSION=0.9.0.1
KAFKA_NAME=kafka_2.10-${KAFKA_VERSION}
KAFKA_ARCHIVE=${KAFKA_NAME}.tgz
KAFKA_MIRROR_DOWNLOAD=http://www.apache.org/dist/kafka/${KAFKA_VERSION}/${KAFKA_ARCHIVE}
KAFKA_RES_DIR=/vagrant/resources/kafka
KAFKA_HOME=/usr/local/kafka
KAFKA_CONF=${KAFKA_HOME}/conf
KAFKA_USER="kafka"
#Cassandra
CASSANDRA_VERSION=2.1.10
CASSANDRA_NAME=apache-cassandra-${CASSANDRA_VERSION}-bin
CASSANDRA_ARCHIVE=${CASSANDRA_NAME}.tar.gz
CASSANDRA_MIRROT_DOWNLOAD=http://www.apache.org/dist/cassandra/${CASSANDRA_VERSION}/${CASSANDRA_ARCHIVE}
CASSANDRA_RES_DIR=/vagrant/resources/cassandra
CASSANDRA_HOME=/usr/local/cassandra
CASSANDRA_CONF=${CASSANDRA_HOME}/conf
#spark-notebook
SPARKNOTEBOOK_VERSION=0.6.3
SCALA_VERSION=2.10.5
SPARKNOTEBOOK_NAME=spark-notebook-${SPARKNOTEBOOK_VERSION}-scala-${SCALA_VERSION}-spark-${SPARK_VERSION}-hadoop-${HADOOP_VERSION}-with-hive-with-parquet
SPARKNOTEBOOK_ARCHIVE=${SPARKNOTEBOOK_NAME}.tgz
SPARKNOTEBOOK_MIRROR_DOWNLOAD=https://s3.eu-central-1.amazonaws.com/spark-notebook/tgz/${SPARKNOTEBOOK_ARCHIVE}
SPARKNOTEBOOK_RES_DIR=/vagrant/resources/spark-notebook
SPARKNOTEBOOK_HOME=/usr/local/spark-notebook
SPARKNOTEBOOK_CONF=${SPARKNOTEBOOK_HOME}/conf
function resourceExists {
FILE=/vagrant/resources/$1
if [ -e $FILE ]
then
return 0
else
return 1
fi
}
function fileExists {
FILE=$1
if [ -e $FILE ]
then
return 0
else
return 1
fi
}
#echo "common loaded"
| true
|
156726bf30121d08fec71ba142b3ab8ddffd7b6b
|
Shell
|
salsaproj/IndexedHBase
|
/apps/socialDataAnalysis-truthy/bin/moe_copy_hdfs.sh
|
UTF-8
| 984
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
DIR=/home/data/truthy_tmp
DESTDIR_ON_MOE=/home/appuser/truthy_tmp
DESTDIR_ON_HDFS=/truthy/data
REMOTE_USER=appuser
HOST=moe.soic.indiana.edu
KEY=${HOME}/.ssh/id_rsa_moe
# # uncomment this block to set debug mode on
# set -x
# terminate script on first error
set -e
# create SSH agent and add key
eval `ssh-agent -s` 1>/dev/null
ssh-add ${KEY} 2>/dev/null
pushd ${DIR} 1>/dev/null
LATEST_GZ=`find ${DIR} -type f -mtime 0 -name "*.json.gz" -printf "%f"`
rsync -aq --ignore-existing ${LATEST_GZ} ${REMOTE_USER}@${HOST}:${DESTDIR_ON_MOE}
popd 1>/dev/null
# upload file on HDFS
ssh ${REMOTE_USER}@${HOST} "hadoop fs -copyFromLocal -f ${DESTDIR_ON_MOE}/${LATEST_GZ} ${DESTDIR_ON_HDFS}/${LATEST_GZ}"
# remove the tmp file
ssh ${REMOTE_USER}@${HOST} "rm -f ${DESTDIR_ON_MOE}/${LATEST_GZ}"
# write the latest json.gz filename on MOE status
ssh ${REMOTE_USER}@${HOST} "echo ${LATEST_GZ} > ${DESTDIR_ON_MOE}/latestJsonGz"
# kill SSH agent
ssh-agent -k 1>/dev/null
# set +x
| true
|
aa3bd1dff7c73ead79862afe88c0e4e1c04a72da
|
Shell
|
nchikuma/wagasci_software
|
/RunCommand/shell/pedestal_check_run.sh
|
UTF-8
| 623
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Frédéric Magniette, Miguel Rubio-Roy
# This file is part of Calicoes.
. /opt/pyrame/ports.sh
ini_pe=2
fin_pe=2
step_pe=1
overwrite=append
step=0
pe=${ini_pe}
while [ ${pe} -le ${fin_pe} ]
do
echo "start run, pe:${pe}"
sleep 1
run_time=`expr \( 30 + \( ${pe} - 1 \) \* 90 \) \* 60`
echo $run_time
$WAGASCI_RUNCOMMANDDIR/python/pedestal_check_run.py $1 ${run_time} ${overwrite} ${pe}
if [ $? -eq 1 ]; then
exit 1
fi
sleep 1
pe=`expr ${pe} + ${step_pe}`
done #pe
calibdir="${WAGASCI_XMLDATADIR}/${1}"
$WAGASCI_MAINDIR/bin/wgAnaPedestalSummary -f ${calibdir}
| true
|
5e0880f0c3bf3c5cdcf56ab7e65f03d290b5b35d
|
Shell
|
alxayo/MagentoCEAzureTemplate
|
/ubuntu14/iac_install.sh
|
UTF-8
| 1,350
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
clear
echo "Installing software requirements for Magento 2.x"
echo "----"
echo "Update and Upgate Apt"
echo "----"
sudo apt-get -y update
sudo apt-get -y upgrade
echo "Install Apache 2.4.x"
echo "----"
sudo apt-get -y install apache2
apache2 -v
echo "enable Apache rewrite module"
echo "----"
sudo a2enmod rewrite
service apache2 restart
echo "Install PHP 5.6"
echo "----"
sudo apt-get -y update
sudo apt-get install -y language-pack-en-base
sudo LC_ALL=en_US.UTF-8 add-apt-repository ppa:ondrej/php -y
sudo apt-get -y update
sudo apt-get -y install php5.6 php5.6-mcrypt php5.6-mbstring php5.6-curl php5.6-cli php5.6-mysql php5.6-gd php5.6-intl php5.6-xsl php5.6-zip
echo "Check PHP version. Should be: 5.6"
php -v
echo "Install Composer"
echo "----"
curl -sS https://getcomposer.org/installer | sudo php -- --install-dir=/usr/local/bin --filename=composer
echo "Installing MySQL 5.6"
echo "----"
sudo debconf-set-selections <<< "mysql-server-5.6 mysql-server/root_password password $1"
sudo debconf-set-selections <<< "mysql-server-5.6 mysql-server/root_password_again password $1"
sudo apt-get -y -q install mysql-server-5.6 mysql-client-5.6
echo "Create Magento Database"
echo "----"
mysql -uroot -p$1 <<MYSQL_SCRIPT
create database magento;
GRANT ALL ON magento.* TO magento@localhost IDENTIFIED BY 'magento';
MYSQL_SCRIPT
| true
|
fdb13a80a71259df980a3c5f2e6f80b8a0ad6c55
|
Shell
|
PSOCER/de2i150_edgedet
|
/demo_advanced/hardware/amm_master_qsys_with_pcie/simulation/synopsys/vcs/vcs_setup.sh
|
UTF-8
| 9,488
| 2.625
| 3
|
[] |
no_license
|
# (C) 2001-2015 Altera Corporation. All rights reserved.
# Your use of Altera Corporation's design tools, logic functions and
# other software and tools, and its AMPP partner logic functions, and
# any output files any of the foregoing (including device programming
# or simulation files), and any associated documentation or information
# are expressly subject to the terms and conditions of the Altera
# Program License Subscription Agreement, Altera MegaCore Function
# License Agreement, or other applicable license agreement, including,
# without limitation, that your use is for the sole purpose of
# programming logic devices manufactured by Altera and sold by Altera
# or its authorized distributors. Please refer to the applicable
# agreement for further details.
# ACDS 14.0 200 linux 2015.02.23.22:38:15
# ----------------------------------------
# vcs - auto-generated simulation script
# ----------------------------------------
# initialize variables
TOP_LEVEL_NAME="amm_master_qsys_with_pcie"
QSYS_SIMDIR="./../../"
QUARTUS_INSTALL_DIR="/package/eda/altera/altera14.0/quartus/"
SKIP_FILE_COPY=0
SKIP_ELAB=0
SKIP_SIM=0
USER_DEFINED_ELAB_OPTIONS=""
USER_DEFINED_SIM_OPTIONS="+vcs+finish+100"
# ----------------------------------------
# overwrite variables - DO NOT MODIFY!
# This block evaluates each command line argument, typically used for
# overwriting variables. An example usage:
# sh <simulator>_setup.sh SKIP_ELAB=1 SKIP_SIM=1
for expression in "$@"; do
eval $expression
if [ $? -ne 0 ]; then
echo "Error: This command line argument, \"$expression\", is/has an invalid expression." >&2
exit $?
fi
done
# ----------------------------------------
# initialize simulation properties - DO NOT MODIFY!
ELAB_OPTIONS=""
SIM_OPTIONS=""
if [[ `vcs -platform` != *"amd64"* ]]; then
:
else
:
fi
# ----------------------------------------
# copy RAM/ROM files to simulation directory
vcs -lca -timescale=1ps/1ps -sverilog +verilog2001ext+.v -ntb_opts dtm $ELAB_OPTIONS $USER_DEFINED_ELAB_OPTIONS \
-v $QUARTUS_INSTALL_DIR/eda/sim_lib/altera_primitives.v \
-v $QUARTUS_INSTALL_DIR/eda/sim_lib/220model.v \
-v $QUARTUS_INSTALL_DIR/eda/sim_lib/sgate.v \
-v $QUARTUS_INSTALL_DIR/eda/sim_lib/altera_mf.v \
$QUARTUS_INSTALL_DIR/eda/sim_lib/altera_lnsim.sv \
-v $QUARTUS_INSTALL_DIR/eda/sim_lib/cycloneiv_hssi_atoms.v \
-v $QUARTUS_INSTALL_DIR/eda/sim_lib/cycloneiv_pcie_hip_atoms.v \
-v $QUARTUS_INSTALL_DIR/eda/sim_lib/cycloneiv_atoms.v \
$QSYS_SIMDIR/submodules/altera_merlin_arbitrator.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_2_rsp_mux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_2_rsp_demux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_2_cmd_mux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_2_cmd_demux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_2_router_001.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_2_router.sv \
$QSYS_SIMDIR/submodules/altera_avalon_st_handshake_clock_crosser.v \
$QSYS_SIMDIR/submodules/altera_avalon_st_clock_crosser.v \
$QSYS_SIMDIR/submodules/altera_avalon_st_pipeline_base.v \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_rsp_mux_004.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_rsp_mux_002.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_rsp_mux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_rsp_demux_001.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_rsp_demux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_cmd_mux_001.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_cmd_mux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_cmd_demux_004.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_cmd_demux_003.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_cmd_demux_002.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_cmd_demux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_router_006.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_router_005.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_router_004.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_router_002.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1_router.sv \
$QSYS_SIMDIR/submodules/altera_merlin_width_adapter.sv \
$QSYS_SIMDIR/submodules/altera_merlin_address_alignment.sv \
$QSYS_SIMDIR/submodules/altera_merlin_burst_uncompressor.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_0_rsp_mux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_0_rsp_demux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_0_cmd_mux.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_0_cmd_demux.sv \
$QSYS_SIMDIR/submodules/altera_merlin_burst_adapter_13_1.sv \
$QSYS_SIMDIR/submodules/altera_merlin_burst_adapter.sv \
$QSYS_SIMDIR/submodules/altera_merlin_burst_adapter_new.sv \
$QSYS_SIMDIR/submodules/altera_wrap_burst_converter.sv \
$QSYS_SIMDIR/submodules/altera_incr_burst_converter.sv \
$QSYS_SIMDIR/submodules/altera_default_burst_converter.sv \
$QSYS_SIMDIR/submodules/altera_avalon_st_pipeline_stage.sv \
$QSYS_SIMDIR/submodules/altera_merlin_traffic_limiter.sv \
$QSYS_SIMDIR/submodules/altera_merlin_reorder_memory.sv \
$QSYS_SIMDIR/submodules/altera_avalon_sc_fifo.v \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_0_router_001.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_0_router.sv \
$QSYS_SIMDIR/submodules/altera_merlin_slave_agent.sv \
$QSYS_SIMDIR/submodules/altera_merlin_master_agent.sv \
$QSYS_SIMDIR/submodules/altera_merlin_slave_translator.sv \
$QSYS_SIMDIR/submodules/altera_merlin_master_translator.sv \
$QSYS_SIMDIR/submodules/altpcie_pipe_interface.v \
$QSYS_SIMDIR/submodules/altpcie_pcie_reconfig_bridge.v \
$QSYS_SIMDIR/submodules/altera_pcie_hard_ip_reset_controller.v \
$QSYS_SIMDIR/submodules/altpcie_rs_serdes.v \
$QSYS_SIMDIR/submodules/altpcie_pll_100_250.v \
$QSYS_SIMDIR/submodules/altpcie_pll_125_250.v \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_pcie_ip_altgx_internal.vo \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_a2p_addrtrans.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_a2p_fixtrans.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_a2p_vartrans.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_control_register.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_cfg_status.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_cr_avalon.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_cr_interrupt.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_cr_mailbox.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_p2a_addrtrans.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_reg_fifo.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_rx.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_rx_cntrl.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_rx_resp.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_tx.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_tx_cntrl.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_txavl_cntrl.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_stif_txresp_cntrl.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_stif/altpciexpav_clksync.v \
$QSYS_SIMDIR/submodules/synopsys/avalon_lite/altpciexpav_lite_app.v \
$QSYS_SIMDIR/submodules/altpciexpav_stif_app.v \
$QSYS_SIMDIR/submodules/altpcie_hip_pipen1b_qsys.v \
$QSYS_SIMDIR/submodules/altera_reset_controller.v \
$QSYS_SIMDIR/submodules/altera_reset_synchronizer.v \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_irq_mapper.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_2.v \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_1.v \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_mm_interconnect_0.v \
$QSYS_SIMDIR/submodules/user_module.sv \
$QSYS_SIMDIR/submodules/arit.sv \
$QSYS_SIMDIR/submodules/controlunit.sv \
$QSYS_SIMDIR/submodules/flex_counter.sv \
$QSYS_SIMDIR/submodules/ImageSpecRegs.sv \
$QSYS_SIMDIR/submodules/memory_cntrl.sv \
$QSYS_SIMDIR/submodules/outputlogic.sv \
$QSYS_SIMDIR/submodules/readCounter.sv \
$QSYS_SIMDIR/submodules/shift_register.sv \
$QSYS_SIMDIR/submodules/startdecoder.sv \
$QSYS_SIMDIR/submodules/top_levelu.sv \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_altpll_qsys.vo \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_sdram.v \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_sgdma.v \
$QSYS_SIMDIR/submodules/amm_master_qsys_with_pcie_pcie_ip.v \
$QSYS_SIMDIR/amm_master_qsys_with_pcie.v \
-top $TOP_LEVEL_NAME
# ----------------------------------------
# simulate
if [ $SKIP_SIM -eq 0 ]; then
./simv $SIM_OPTIONS $USER_DEFINED_SIM_OPTIONS
fi
| true
|
5c421c54722d93a8c1c1ecabe3fa28bbfeb96f09
|
Shell
|
bamarni/pi64
|
/make/release
|
UTF-8
| 356
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -ex
cd build
release=$(date +"%Y-%m-%d")
git tag $release
git push origin $release
github-release release --user bamarni --repo pi64 --tag $release --draft
for version in "lite" "desktop" ; do
github-release upload --user bamarni --repo pi64 --tag $release --name "pi64-$version.zip" --file "build/pi64-$version.zip"
done
| true
|
b5b89a879a697d79615ac9f16986ffdfa2528001
|
Shell
|
dOpensource/dsiprouter
|
/testing/2.sh
|
UTF-8
| 170
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
. include/common
test="dsip-init Service Started"
# Is service started
systemctl is-active --quiet dsip-init; ret=$?
process_result "$test" $ret
| true
|
db8cab2d8ed3e2271e07c06c25879980d92928e9
|
Shell
|
kata-containers/tests
|
/.ci/kata-simplify-log.sh
|
UTF-8
| 4,920
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2018 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
set -o errexit
set -o nounset
set -o pipefail
readonly script_name=${0##*/}
cidir="$(dirname $(readlink -f "$0"))"
source "${cidir}/lib.sh"
usage()
{
cat <<EOF
Description: Simplify the specified logfile by replacing common fields with
fixed strings to make diff(1)-ing easier.
Usage: $script_name <log-file>
$script_name [-h|--help|help]
Options:
-h : Show this help.
--help :
help :
Limitations:
- This script uses simple heuristics and might break at any time.
EOF
}
# Use heuristics to convert patterns in the specified structured logfile into
# fixed strings to aid in comparision with other logs from the same system
# component.
simplify_log()
{
local -r file="$1"
# Pattern for a standard timestamp.
#
# Format: "YYYY-MM-DDTHH:MM:SS.NNNNNNNNNxZZ:ZZ" where "x" is "+" or "-"
typeset -r timestamp_pattern="[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{,9}[+-][0-9]{2}:[0-9]{2}"
# Slightly different timestamp format used by the agent.
#
# Format: "YYYY-MM-DDTHH:MM:SS.NNNNNNNNNZ"
typeset -r timestamp_pattern_agent="[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}.[0-9]{,9}Z"
# Pattern used to detect the agent displaying total guest memory.
#
# Format: "\"DDDDDD kB"
typeset -r memory_size_pattern="\"[0-9][0-9]* kB"
# Pattern used to detect architectures (uses golang architecture names).
typeset -r arch_pattern="(amd64|arm64|ppc64le)"
# Pattern used to detect the prefix used when mounting resources into the
# container.
typeset -r mount_hash_pattern="[[:xdigit:]]{64}-[[:xdigit:]]{16}-"
# Pattern for 64-byte hash values.
typeset -r hash_pattern="[[:xdigit:]]{64}"
# Pattern for detecting duration messages from the guest kernel modules.
typeset -r duration_pattern="duration=[^ ][^ ]* "
# Pattern for detecting memory addresses.
typeset -r address_pattern="0x[[:xdigit:]]{,10}"
# Pattern for detecting UUIDs (see uuidgen(1)).
typeset -r uuid_pattern="[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}"
# Pattern for detecting network MAC addresses.
typeset -r mac_addr_pattern="[[:xdigit:]]{2}:[[:xdigit:]]{2}:[[:xdigit:]]{2}:[[:xdigit:]]{2}:[[:xdigit:]]{2}:[[:xdigit:]]{2}"
# Pattern for detecting git(1) commits.
typeset -r commit_pattern="[[:xdigit:]]{40}"
# Pattern for detecting IPv4 address.
#
# Format: "XXX.XXX.XXX.XXX"
typeset -r ip_addr_pattern="[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}"
# Pattern for detecting IPv4 address with a netmask.
#
# Format: "XXX.XXX.XXX.XXX/XXX"
typeset -r ip_addr_with_netmask_pattern="[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}/[0-9]{1,3}"
# Pattern for detecting process IDs in the structured logs.
typeset -r pid_pattern="pid=[0-9][0-9]*"
# Pattern for detecting files in the proc(5) filesystem.
typeset -r proc_fs_pattern="/proc/[0-9][0-9]*/"
# Pattern used to detect kernel diagnostic messages that show how long it
# took to load a kernel module.
typeset -r kernel_modprobe_pattern="returned -*[0-9][0-9]* after [0-9][0-9]* usecs"
# Pattern to detect numbers (currently just integers).
typeset -r number_pattern="[0-9][0-9]*"
# Notes:
#
# - Some of the patterns below use "!" as the delimiter as the patterns
# contain forward-slashes.
#
# - The patterns need to be in most-specific-to-least-specific order to
# ensure correct behaviour.
#
# - Patterns that anchor to a structured logging field need to ensure the
# replacement text is also a valid structured log field (for example
# duration and pid patterns).
sed -r \
-e "s/${timestamp_pattern}/TIMESTAMP/gI" \
-e "s/${timestamp_pattern_agent}/TIMESTAMP/gI" \
-e "s/${memory_size_pattern}/MEMORY-SIZE/gI" \
-e "s/${arch_pattern}/ARCHITECTURE/gI" \
-e "s/${mount_hash_pattern}/MOUNT-HASH/gI" \
-e "s/${hash_pattern}/HASH/gI" \
-e "s/${duration_pattern}/duration=DURATION /gI" \
-e "s/${address_pattern}/HEX-ADDRESS/gI" \
-e "s/${uuid_pattern}/UUID/gI" \
-e "s/${mac_addr_pattern}/MAC-ADDRESS/gI" \
-e "s/${commit_pattern}/COMMIT/gI" \
-e "s!${ip_addr_with_netmask_pattern}!IP-ADDRESS-AND-MASK!gI" \
-e "s/${ip_addr_pattern}/IP-ADDRESS/gI" \
-e "s/${pid_pattern}/pid=PID/gI" \
-e "s!${proc_fs_pattern}!/proc/PID/!gI" \
-e "s/${kernel_modprobe_pattern}/returned VALUE after VALUE usecs/g" \
-e "s/${number_pattern}/NUMBER/gI" \
"$file"
}
[ $# -ne 1 ] && usage && die "need argument"
case "$1" in
-h|--help|help)
usage
exit 0
;;
*)
file="$1"
;;
esac
simplify_log "$file"
| true
|
5124df7a5582d92ecc151c7b7ad531c6fd01b875
|
Shell
|
pyar6329/sql-examples
|
/middleware/docker/scripts/cockroach_init.sh
|
UTF-8
| 1,209
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
CURRENT_DIR=$(echo $(cd $(dirname $0) && pwd))
COCKROACH_PORT=26257
# COCKROACH_DATABASE="examples"
# COCKROACH_USERNAME="pyar6329"
# COCKROACH_PASSWORD="passwordexample"
# EXECUTE_QUERY=$(printf " \
# CREATE DATABASE IF NOT EXISTS ${COCKROACH_DATABASE}; \
# CREATE USER ${COCKROACH_USERNAME} WITH PASSWORD '${COCKROACH_PASSWORD}'; \
# GRANT ALL ON DATABASE ${COCKROACH_DATABASE} TO ${COCKROACH_USERNAME}; \
# ")
# INSECURE_EXECUTE_QUERY=$(printf " \
# CREATE DATABASE IF NOT EXISTS ${COCKROACH_DATABASE}; \
# ")
# if ! [ -e "${CURRENT_DIR}/../var/cockroach/data_master" ]; then
# # while ! nc -w 1 -z localhost ${COCKROACH_PORT}; do sleep 0.1; done;
# # docker exec -it sql-examples-cockroach-master ./cockroach sql --certs-dir=/certs --execute="${EXECUTE_QUERY}"
# docker exec -it sql-examples-cockroach-master ./cockroach sql --insecure --execute="${INSECURE_EXECUTE_QUERY}"
# fi
#
EXECUTE_QUERY_CEEATE_TABLE="$(tr -d '\n' < ${CURRENT_DIR}/../cockroach/create_databases.sql)"
while ! nc -w 1 -z localhost ${COCKROACH_PORT} > /dev/null 2>&1; do
sleep 0.1
done
docker exec -it sql-examples-cockroach-master ./cockroach sql --insecure --execute="${EXECUTE_QUERY_CEEATE_TABLE}"
| true
|
e356a4e5bcd21a361e0b1e1b2c871566253964f8
|
Shell
|
michaeldeongreen/HandyScripts
|
/bash/tv-episode-filename-formatter.sh
|
UTF-8
| 8,319
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -eu
##########################################################################################################################################################################################
#- Purpose: Script is used to rename tv episodes into a "S##E#" format ie "Hokuto no Ken 001.mkv" to "Hokuto no Ken S01E01.mkv"
#- Parameters are:
#- [-n] seriesName - Series name (ex: Hokuto no Ken).
#- [-s] seriesSeasonNumber - Series season number (ex: 1,12).
#- [-d] episodeDirectory - Episode directory (ex: "c:/Hokuto No Ken").
#- [-e] fileExtension - File Extension filter (ex: mkv, mp4).
#- [-l] episodeNumberLength - Episode number length (ex: Series with that is 99 or less would be 2, 100 to 999 would be 3). Must be 2 or 3.
#- [-p] episodeNumberStartPosition - The position where the episode number starts for each file. Assumes it is the same for each file (ex: Hokuto no Ken 001.mkv would be 14, which is).
#- [-t] testing - OPTIONAL flag to do a test run before renaming the files. Output will be logged to the log file.
#- [-r] renumber - OPTIONAL flag used to re-number the episodes, starting at 1. Assumes files are ordered correctly by default.
#- [-c] renumberStart - OPTIONAL flag used to define the starting number for the re-number logic. Default is 1 when -r set and not provided.
#- [-h] help
###########################################################################################################################################################################################
############################################################
#- function used to print out script usage
############################################################
function usage() {
echo
echo "Arguments:"
echo -e "\t-n \t Series name (ex: Hokuto no Ken) (required)"
echo -e "\t-s \t Series season number (ex: 1,12) (required)"
echo -e "\t-d \t Episode directory (ex: "c:/Hokuto No Ken") (required)"
echo -e "\t-e \t File Extension filter (ex: mkv, mp4) (required)"
echo -e "\t-l \t Episode number length (ex: Series with that is 99 or less would be 2, 100 to 999 would be 3). Must be 2 or 3 (required)"
echo -e "\t-p \t The position where the episode number starts for each file. Assumes it is the same for each file (ex: Hokuto no Ken 001.mkv would be 14, which is) (required)"
echo -e "\t-t \t Flag to do a test run before renaming the files. Output will be logged to the log file (optional)"
echo -e "\t-r \t flag used to re-number the episodes, starting at 1. Assumes files are ordered correctly by default (optional)"
echo -e "\t-c \t flag used to define the starting number for the re-number logic. Default is 1 when -r set and not provided"
echo -e "\t-h \t Help (optional)"
echo
echo "Example:"
echo -e "./tv-episode-filename-formatter.sh -n \"Hokuto no Ken\" -s 1 -d \"c:/Hokuto no Ken\" -e mkv -l 2 -p 15 -t"
}
# set default execute mode to testing = false
testing="false"
# set default renumber flag = false
renumber="false"
# set default renumberStart = 1
renumberStart=1
# Loop, get parameters & remove any spaces from input
while getopts "n:s:d:e:l:p:trc:h" opt; do
case $opt in
n)
# Series Name
seriesName=$OPTARG
;;
s)
# Series Season Number
seriesSeasonNumber=$OPTARG
;;
d)
# Episode Directory
episodeDirectory=$OPTARG
;;
e)
# File Extension
fileExtension=$OPTARG
;;
l)
# Episode Number Length
episodeNumberLength=$OPTARG
;;
p)
# Episode Number Start Position
episodeNumberStartPosition=$OPTARG
;;
t)
# Testing
testing="true"
;;
r)
# Renumber
renumber="true"
;;
c)
# Renumber Start
renumberStart=$OPTARG
;;
:)
echo "Error: -${OPTARG} requires a value"
exit 1
;;
*)
usage
exit 1
;;
esac
done
# If user did not provide required parameters then non-usage.
if [[ $# -eq 0 || -z $seriesName || -z $seriesSeasonNumber || -z $episodeDirectory || -z $fileExtension || -z $episodeNumberLength || -z $episodeNumberStartPosition ]]; then
echo "Parameters missing! Required parameters are: [-n] seriesName [-s] seriesSeasonNumber [-d] episodeDirectory [-e] fileExtension [-l] episodeNumberLength [-p] episodeNumberStartPosition"
exit 1;
fi
# as of now, the episode number length must be 2 or 3 ie 01-99 or 001-999
if [[ "$episodeNumberLength" -ne "2" && "$episodeNumberLength" -ne "3" ]]; then
echo "Parameter episodeNumberLength is invalid. Must be 2 or 3!"
exit 1;
fi
#######################################################
#- function used to loop through directory and rename
# files but keep the original episode numbers.
#- $1 - Log file entry
#######################################################
dontRenumberEpisodes () {
# loop through each file
for f in $files
do
# write to log file
writeToLogFile "Processing file $f"
# get the file name only
filename=$(basename -- "$f")
# get the episode number only
episodeNumber=${filename:episodeNumberStartPosition:episodeNumberLength}
# get the first digit of the episode number
firstDigit=${episodeNumber:0:1}
# logic to check the first digit
if [ "$zero" == "$firstDigit" ]; then
if [[ "$episodeNumberLength" == "2" ]]; then
newEpisodeNumber=${episodeNumber:0:2} # start at first position string
else
newEpisodeNumber=${episodeNumber:1:2} # start at second position in string
fi
newEpisodeName="$episodeDirectory/$seriesName $preS$seriesSeasonNumber$preE$newEpisodeNumber.$fileExtension"
# rename file
rename "$f" "$newEpisodeName"
else
newEpisodeName="$episodeDirectory/$seriesName $preS$seriesSeasonNumber$preE$episodeNumber.$fileExtension"
# rename file
rename "$f" "$newEpisodeName"
fi
done
}
#######################################################
#- function used to rename a file using the mv command
#- $1 - Original file name
#- $2 - New file name
#######################################################
rename () {
if [[ "$testing" == "false" ]]; then
mv "$1" "$2"
fi
# write to log file
writeToLogFile "New file name: $2\n\n"
}
#######################################################
#- function used to loop through directory and rename
# files but re-numbers episodes.
#- $1 - Log file entry
#######################################################
renumberEpisodes () {
episodeNumber=$renumberStart
# loop through each file
for f in $files
do
# write to log file
writeToLogFile "Processing file $f"
# get the file name only
filename=$(basename -- "$f")
# format episode number
if [ ${#episodeNumber} == 1 ]; then
formattedEpisodeNumber="0$episodeNumber"
else
formattedEpisodeNumber="$episodeNumber"
fi
newEpisodeName="$episodeDirectory/$seriesName $preS$seriesSeasonNumber$preE$formattedEpisodeNumber.$fileExtension"
# rename file
rename "$f" "$newEpisodeName"
episodeNumber=$((episodeNumber+1))
done
}
#######################################################
#- function used to write to log file
#- $1 - Log file entry
#######################################################
writeToLogFile () {
echo -e "$1" | tee -a "$logFile"
}
# resetting the internal separator to newline so a directory with spaces is not split by spaces
IFS='|'
# first digit
zero="0"
# S before season number
preS="S"
# E before episode number
preE="E"
# log file
logFile="$episodeDirectory/tv-episode-filename-formatter.log"
# directory location with filter
files="$episodeDirectory/*.$fileExtension"
# write to log file
writeToLogFile "Parameters: [-n] $seriesName [-s] $seriesSeasonNumber [-d] $episodeDirectory [-e] $fileExtension [-l] $episodeNumberLength [-p] $episodeNumberStartPosition\n\n"
# concat 0 onto series season number if length is 1
if [ ${#seriesSeasonNumber} == 1 ]; then
seriesSeasonNumber="0$seriesSeasonNumber"
fi
# determine whether episodes need to be renumbered
if [ "$renumber" == "false" ]; then
dontRenumberEpisodes
else
renumberEpisodes
fi
| true
|
2a40247c0ec4a9ad499be4c2f212318f09430897
|
Shell
|
ccplabwustl/RobertJirsaraie
|
/study-PDS/apps_freesurfer.sh
|
UTF-8
| 6,453
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
###################
STUDY_DIR=/scratch/rjirsara/study-PDS
DIR_TOOLBOX=/scratch/rjirsara/RobertJirsaraie/toolbox
mkdir -p ${STUDY_DIR}/apps/freesurfer
module purge ; module load "freesurfer-5.3.0"
######
### Build Freesurfer License If Missing
######
FREESURFER_LICENSE=`echo $DIR_TOOLBOX/bids_apps/freesurfer/license_freesurfer.txt`
if [[ ! -f $FREESURFER_LICENSE && ! -z $DIR_TOOLBOX ]] ; then
echo ""
echo "⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # "
echo "`basename $FREESURFER_LICENSE` Not Found - Register For One Here: "
echo " https://surfer.nmr.mgh.harvard.edu/registration.html "
echo "⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # ⚡ # "
printf "rjirsara@uci.edu
40379
*CBT0GfF/00EU
FSP82KoWQu0tA \n" | grep -v local > $FREESURFER_LICENSE
fi
######
### Submit Jobs For Cross-Sectional Processing
######
SCRIPT_CROSS=${DIR_TOOLBOX}/bids_apps/freesurfer/pipeline_anatomical-cross.sh
if [[ -f $SCRIPT_CROSS && -d $STUDY_DIR ]] ; then
for INPUT in `find ${STUDY_DIR}/bids -iname *_T1w.nii.gz | grep -v '_run-'` ; do
SUB_IDS=`basename $INPUT | sed s@'_T1w.nii.gz'@''@g`
SUB=`echo $SUB_IDS | cut -d '_' -f1 | cut -d '-' -f2`
SES=`echo $SUB_IDS | cut -d '_' -f2 | cut -d '-' -f2`
JOBNAME=`echo FSCROSS${SUB}x${SES}` ; JOBSTATUS=`qstat -u $USER | grep "${JOBNAME}\b" | awk {'print $10'}`
if [[ ! -z "$JOBSTATUS" ]] ; then
echo ""
echo "##########################################################"
echo "#${SUB}x${SES} Is Currently Being Processed: ${JOBSTATUS} "
echo "##########################################################"
elif [[ -d `echo $STUDY_DIR/apps/freesurfer/sub-${SUB}_ses-${SES}` ]] ; then
echo ""
echo "##########################################"
echo "#${SUB}x${SES} Was Processed Successfully "
echo "##########################################"
else
echo ""
echo "####################################################################"
echo "Submitting Freesurfer Cross-sectional Job For Subect: ${SUB}x${SES} "
echo "####################################################################"
cat $SCRIPT_CROSS \
| sed s@'$1'@${STUDY_DIR}@g \
| sed s@'$2'@${SUB}@g \
| sed s@'$3'@${SES}@g > ${SCRIPT_CROSS}_GO
qsub -N $JOBNAME ${SCRIPT_CROSS}_GO ; rm ${SCRIPT_CROSS}_GO
fi
done
fi
######
### Submit Jobs To Create Subject-Specific Base Templates
######
SCRIPT_BASE=${DIR_TOOLBOX}/bids_apps/freesurfer/pipeline_anatomical-base.sh
if [[ -f $SCRIPT_BASE && -d $STUDY_DIR ]] ; then
for SUB in `find ${STUDY_DIR}/bids -maxdepth 1 | grep sub | sed s@'sub-'@'#'@g | cut -d '#' -f2` ; do
NBID=`find ${STUDY_DIR}/bids/sub-${SUB} -iname *_T1w.nii.gz | grep -v _run- | wc -l`
NFREE=`echo $STUDY_DIR/apps/freesurfer/sub-${SUB}_ses-* | tr ' ' '\n' | grep -v ERROR | grep -v long | wc -l`
JOBNAME=`echo FSBASE${SUB}x${SES}` ; JOBSTATUS=`qstat -u $USER | grep "${JOBNAME}\b" | awk {'print $10'}`
STATS_FILE=`echo $STUDY_DIR/apps/freesurfer/sub-${SUB}_base/stats/aseg.stats`
if [[ ! -z "$JOBSTATUS" ]] ; then
echo ""
echo "########################################################"
echo "#${SUB}_base Is Currently Being Processed: ${JOBSTATUS} "
echo "########################################################"
elif [[ -f $STATS_FILE ]] ; then
echo ""
echo "########################################"
echo "#${SUB}_base Was Processed Successfully "
echo "########################################"
elif [[ $NBID != $NFREE ]] ; then
echo ""
echo "##################################################################"
echo "#${SUB}_base Does Not Have All Sessions Processed $NBID != $NFREE "
echo "##################################################################"
else
echo ""
echo "##################################################"
echo "Submitting Freesurfer Base Job For Subect: ${SUB} "
echo "##################################################"
cat $SCRIPT_BASE \
| sed s@'$1'@${STUDY_DIR}@g \
| sed s@'$2'@${SUB}@g > ${SCRIPT_BASE}_GO
qsub -N $JOBNAME ${SCRIPT_BASE}_GO ; rm ${SCRIPT_BASE}_GO
fi
done
fi
######
### Submit Jobs For Longitudinal Processing
######
SCRIPT_LONG=${DIR_TOOLBOX}/bids_apps/freesurfer/pipeline_anatomical-long.sh
if [[ -f $SCRIPT_LONG && -d $STUDY_DIR ]] ; then
for SUB in `find ${STUDY_DIR}/apps/freesurfer -maxdepth 1 -printf "%f\n" | grep sub | grep -v _base | grep -v _long | head -n1` ; do
SUBID=`echo $SUB | cut -d '_' -f1 | cut -d '-' -f2`
SESID=`echo $SUB | cut -d '_' -f2 | cut -d '-' -f2`
LONG_FILE=`echo ${STUDY_DIR}/apps/freesurfer/${SUB}_long/scripts/recon-all.log`
BASE_FILE=`echo ${STUDY_DIR}/apps/freesurfer/sub-${SUBID}_base/scripts/recon-all.log`
JOBNAME=`echo FSLONG${SUBID}x${SESID}` ; JOBSTATUS=`qstat -u $USER | grep "${JOBNAME}\b" | awk {'print $10'}`
if [[ ! -z "$JOBSTATUS" ]] ; then
echo ""
echo "########################################################"
echo "#${SUB}_long Is Currently Being Processed: ${JOBSTATUS} "
echo "########################################################"
elif [[ -f ${LONG_FILE} ]] ; then
echo ""
echo "########################################"
echo "#${SUB}_long Was Processed Successfully "
echo "########################################"
elif [[ ! -f "$BASE_FILE" ]] ; then
echo ""
echo "#############################################################"
echo "#${SUB} Does Not Have a Base Template Yet So Must Be Skipped "
echo "#############################################################"
else
echo ""
echo "##########################################################"
echo "Submitting Freesurfer Longitudinal Job For Subect: ${SUB} "
echo "##########################################################"
cat $SCRIPT_LONG \
| sed s@'$1'@${STUDY_DIR}@g \
| sed s@'$2'@${SUBID}@g \
| sed s@'$3'@${SESID}@g > ${SCRIPT_LONG}_GO
qsub -N $JOBNAME ${SCRIPT_LONG}_GO ; rm ${SCRIPT_LONG}_GO
fi
done
fi
########⚡⚡⚡⚡⚡⚡#################################⚡⚡⚡⚡⚡⚡################################⚡⚡⚡⚡⚡⚡#######
#### ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ####
########⚡⚡⚡⚡⚡⚡#################################⚡⚡⚡⚡⚡⚡################################⚡⚡⚡⚡⚡⚡#######
| true
|
cd6c89ae335c1602b69ed9398508c7157409e084
|
Shell
|
viniciusbig/cakephp-docker
|
/.docker/users/.bashrc
|
UTF-8
| 156
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
# Alias
alias cake='bin/cake'
alias node='nodejs'
# NVM
export NVM_DIR="/var/www/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
| true
|
8f1a84ebd1539e28ef879d0100c92d257d0baa56
|
Shell
|
reinfer/aelita
|
/signup/activate.sh
|
UTF-8
| 187
| 2.515625
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
cd `dirname $0`
if [ ! -d venv/ ]; then
virtualenv venv
pip install -r requirements.txt
fi
source ./venv/bin/activate
source ./config.sh
export FLASK_APP=`pwd`/main.py
| true
|
bdb179a7be62806cc10a8680ed7d52b23fe4d3e4
|
Shell
|
mackenco/password-validator
|
/example/examples.sh
|
UTF-8
| 1,224
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
echo "\ninvalid passwords"
echo "running 'cat \"./example/invalid.txt\" | password_validator'"
cat ./example/invalid.txt | password_validator
echo "\nall valid"
echo "running 'cat \"./example/valid.txt\" | password_validator'"
cat ./example/valid.txt | password_validator
echo "\nwith custom password list"
echo "running 'echo \"password1,averystrongpassword,password 1\" | password_validator ./example/weak_password_list.txt'"
echo "password1,averystrongpassword,password 1" | password_validator ./example/weak_password_list.txt
echo "\npasswords passed as options"
echo "running 'password_validator -p password1,abc,averystrongpassword ./example/weak_password_list.txt'"
password_validator -p password1,abc,averystrongpassword,10000000 ./example/weak_password_list.txt
echo "\npassword file passed in as an option"
echo "running 'password_validator -i ./example/valid.txt'"
password_validator -i ./example/valid.txt
echo "\nyou can even use them all in conjunction"
echo "running 'cat ./example/invalid.txt | password_validator -p "password9,xxx" -i ./example/valid.txt ./example/weak_password_list.txt'"
cat ./example/invalid.txt | password_validator -p "password9,xxx" -i ./example/valid.txt ./example/weak_password_list.txt
| true
|
e56fd910b8ee59a73176a793a29731882204e119
|
Shell
|
dbactual/qp
|
/tests/load_test.sh
|
UTF-8
| 2,873
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
GOPATH=`pwd`/.go
PROCESS=./qp
CONCURRENT=10
MAX_DSNS=8
PORT=9666
URL=:$PORT
function cleanup {
kill $PID
kill $SSH_TUNNEL_PID
}
trap cleanup EXIT
function die {
echo $1
exit 666
}
# If called with no arguments a new timer is returned.
# If called with arguments the first is used as a timer
# value and the elapsed time is returned in the form HH:MM:SS.
#
function timer()
{
if [[ $# -eq 0 ]]; then
echo $(date '+%s')
else
local stime=$1
etime=$(date '+%s')
if [[ -z "$stime" ]]; then stime=$etime; fi
dt=$((etime - stime))
ds=$((dt % 60))
dm=$(((dt / 60) % 60))
dh=$((dt / 3600))
printf '%d:%02d:%02d' $dh $dm $ds
fi
}
QUERY="$(cat tests/load_test_query.sql)"
TUNNEL_HOST=$1
if [[ ! $TUNNEL_HOST ]]; then
die "Please supply username@hostname for MySQL tunnels"
fi
TUNNELS=""
QS=()
START_PORT=20889
for i in $(seq 1 $MAX_DSNS); do
port=$((START_PORT + i))
Q="{\"dsn\":\"root@tcp(127.0.0.1:$port)/monetate_session?charset=utf8\",\"query\":\"__QUERY__\"}"
QS=(${QS[@]} $Q)
Q="{\"dsn\":\"root@tcp(127.0.0.1:$port)/monetate_session?charset=utf8\",\"query\":\"__QUERY__\"}"
QS=(${QS[@]} $Q)
TUNNELS="$TUNNELS -L$port:127.0.0.1:3306"
done
echo "Opening ssh tunnels to mysql..."
ssh -N $TUNNELS $TUNNEL_HOST &
SSH_TUNNEL_PID=$!
echo "Started ssh tunnel to $TUNNEL_HOST with pid:$SSH_TUNNEL_PID"
ALL_Q=${QS[@]}
SHARDS=$(echo "${ALL_Q// /|}" | sed -e "s/__QUERY__/$QUERY/g")
QUIET=$2
if [ QUIET ]; then
$PROCESS -url $URL -maxDsns=$MAX_DSNS -maxConnsPerDsn=24 2>&1 1>/dev/null &
else
$PROCESS -url $URL -maxDsns=$MAX_DSNS -maxConnsPerDsn=24 2>&1 1>/dev/null &
fi
PID=$!
echo "Started qp process pid:$PID"
sleep 1
ITERATIONS=5
tmr=$(timer)
SHARDQ=$(echo $SHARDS | tr "|" ",")
for i in $(seq 1 $ITERATIONS); do
for j in $(seq 1 $CONCURRENT); do
Q="{\"flat\":true,\"queries\":[$SHARDQ]}"
python -c "import requests; requests.post('http://localhost:$PORT/', '$Q')" &
PIDS[${j}]=$!
done
echo "iteration process ids: ${PIDS[*]}"
for p in ${PIDS[*]}; do
wait $p
done
unset PIDS
done
T1=$(timer $tmr)
tmr=$(timer)
for i in $(seq 1 $ITERATIONS); do
OLDIFS=$IFS
IFS=$'|'
for j in $SHARDS; do
for k in $(seq -s "|" 1 $CONCURRENT); do
Q="{\"flat\":true,\"queries\":[$j]}"
python -c "import requests; requests.post('http://localhost:$PORT/', '$Q')" &
PIDS[${k}]=$!
done
echo "iteration process ids: ${PIDS[*]}"
for p in ${PIDS[*]}; do
wait $p
done
unset PIDS
done
IFS=$OLDIFS
done
T2=$(timer $tmr)
if [[ "$T1" < "$T2" ]]; then
echo "Passed: parallel time: $T1, sequential time: $T2"
else
die "Failed: parallel time: $T1, sequential time: $T2"
fi
| true
|
e23e5df1b54b26b4889a841431fc3315a7c8c3bd
|
Shell
|
lovasko/Modra
|
/tests/floating/main.sh
|
UTF-8
| 358
| 2.65625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
echo "Type GLfloat GLdouble" > 'floating.dat'
grep "primitives" float | cut -d'(' -f2 | cut -d' ' -f1 > 'primitive_counts'
grep "Median draw time:" float | cut -d: -f2 > 'float_medians'
grep "Median draw time:" double | cut -d: -f2 > 'double_medians'
paste primitive_counts float_medians double_medians > 'table'
cat table >> floating.dat
gnuplot main.gpi
| true
|
a38825ae1dcc13b988faac36700647d9c33496da
|
Shell
|
liangzr/github-run
|
/notify.sh
|
UTF-8
| 913
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env sh
# GitHub didn't update the contributions graph immediately, but
# only the commits from default branch can be display on the
# contributions graph. So we can change the default branch to
# force it refresh
# Wait a moment
sleep 60
curl https://api.github.com/repos/${GITHUB_USER}/github-run \
-u ${GITHUB_USER}:${GITHUB_DEV_TOKEN} \
-X PATCH \
-H 'Content-Type: application/json;charset=UTF-8' \
-d '{"name":"github-run","default_branch":"master"}' \
> /dev/null -s
echo 'Change default branch to master'
# curl https://github.com/${GITHUB_USER} > /dev/null -s
curl https://api.github.com/repos/${GITHUB_USER}/github-run \
-u ${GITHUB_USER}:${GITHUB_DEV_TOKEN} \
-X PATCH \
-H 'Content-Type: application/json;charset=UTF-8' \
-d '{"name":"github-run","default_branch":"graph"}' \
> /dev/null -s
echo 'Default branch change back to graph'
echo 'Notified the GitHub'
| true
|
fa8c0e41abaae5b306a4ac665bdc360dca232170
|
Shell
|
edgarcosta/lmfdb-gce
|
/server_scripts/install.sh
|
UTF-8
| 1,507
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
#prerequisites for sage
sudo apt-get install -y binutils gcc g++ gfortran make m4 perl tar git libssl-dev
#install gcfuse
export GCSFUSE_REPO=gcsfuse-`lsb_release -c -s`
echo "deb http://packages.cloud.google.com/apt $GCSFUSE_REPO main" | sudo tee /etc/apt/sources.list.d/gcsfuse.list
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo apt-get update
sudo apt-get install -y gcsfuse
sudo usermod -a -G fuse $USER
# create users
sudo useradd sage -u 1200 -d /home/sage -m
sudo useradd lmfdb -u 1300 -d /home/lmfdb -m
# add lmfdb to fuse
sudo usermod -a -G fuse lmfdb
sudo su lmfdb -c "mkdir -p /home/lmfdb/data"
#install git
sudo apt-get update
sudo apt-get install -y git
#install supervisord
sudo apt-get install -y supervisord
sudo update-rc.d supervisor disable
# clone git
git clone https://github.com/edgarcosta/lmfdb-gce.git ~/lmfdb-gce/
#update fstab
echo "updating fstab"
sudo cp ~/lmfdb-gce/config/fstab /etc/fstab
bash ~/lmfdb-gce/server_scripts/mount.sh
echo "installing the client"
sudo su lmfdb -c "sh ~/lmfdb-gce/scripts/install_lmfdb.sh"
echo "you might need copy sage from a disk, e.g., by doing:"
echo "# sudo su sage -c \"mkdir -p /home/sage/image\""
echo "# sudo mount /dev/disk/by-label/SAGE /home/sage/image"
echo "# sudo su sage -c \"rsync -av --progress /home/sage/image/ /home/sage/\""
echo "Now you can start lmfdb with:"
echo "# sudo su lmfdb -c \"/home/lmfdb/start-supervisord\""
set +e
| true
|
a843ea87dd1acc76b9808733384b9ac3eeb31a15
|
Shell
|
sundari0627/Nagios-Monitering-on-Hadoop-Cluster
|
/check-hu.sh
|
UTF-8
| 333
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#set -x
pat=/usr/local/nagios/libexec
hdfs_rm=`awk -F':' '/hadoop/{print $4}' /etc/group | wc -l`
awk -F':' '/hadoop/{print $4}' /etc/group > $pat/users
if [ $hdfs_rm -eq 1 ]
then
echo "Hadoop group users are `cat $pat/users` "
exit 0
else
echo "Hadoop group does not have any users"
exit 2
fi
| true
|
67199b3d4dd525b50c9199c7289f258ad6a5e072
|
Shell
|
qnib/d-hadoop
|
/opt/qnib/hdfs/datanode/bin/start.sh
|
UTF-8
| 612
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/local/bin/dumb-init /bin/bash
sleep 5
if [ ! -d /data/hadoopdata/hdfs/datanode/ ];then
mkdir -p /data/hadoopdata/hdfs/datanode/
chown hdfs: -R /data/hadoopdata/hdfs/datanode
fi
if [ "X${HADOOP_HDFS_NAMENODE}" != "Xtrue" ];then
consul-template -consul localhost:8500 -once -template /etc/consul-templates/hdfs/core-site.xml.ctmpl:/etc/hadoop/conf/core-site.xml
fi
if [ ! -f /etc/hadoop/conf/hdfs-site.xml ];then
consul-template -consul localhost:8500 -once -template /etc/consul-templates/hdfs/hdfs-site.xml.ctmpl:/etc/hadoop/conf/hdfs-site.xml
fi
su -c '/usr/bin/hadoop datanode' hdfs
| true
|
dfd652a73b809ad0bde927f11493b8e3efa84233
|
Shell
|
Djamil17/ERNIE
|
/Technical/harden-and-update/safe-period-detectors/active-postgres-queries.sh
|
UTF-8
| 2,014
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
usage() {
cat << 'HEREDOC'
NAME
active_postgres_queries.sh -- check the running non-system Postgres queries in a Postgres DB
SYNOPSIS
active_postgres_queries.sh [-v] postgres_DB
active_postgres_queries.sh -h: display this help
DESCRIPTION
Check the running non-system Postgres queries in the specified database.
Running queries are all queries excluding:
1. `idle` queries
2. Queries executed by `postgres` user
The following options are available:
-v verbose: print all execute lines
ENVIRONMENT
Pre-requisite dependencies:
# `pcregrep`
EXIT STATUS
The utility exits with one of the following values:
0 No running queries
1 Running queries are found
AUTHOR(S)
Written by Dmitriy "DK" Korobskiy.
HEREDOC
exit 1
}
set -e
set -o pipefail
# if a character is followed by a colon, the option is expected to have an argument
while getopts vh OPT; do
case "$OPT" in
v)
readonly VERBOSE=true
;;
*) # -h or `?`: an unknown option
usage
;;
esac
done
shift $((OPTIND - 1))
# Positional parameters
[[ $1 == "" ]] && usage
readonly POSTGRES_DB="$1"
[[ "${VERBOSE}" == true ]] && set -x
if ! command -v pcregrep >/dev/null; then
echo "Please install pcregrep"
exit 1
fi
echo "Checking active Postgres queries in the $POSTGRES_DB DB".
readonly QUERIES=$(
# Avoid any directory permission warning
cd /tmp
# language=PostgresPLSQL
sudo -u jenkins psql -v ON_ERROR_STOP=on "$POSTGRES_DB" << 'HEREDOC'
SELECT *
FROM pg_stat_activity
WHERE pid <> pg_backend_pid() and state <> 'idle' and usename != 'postgres';
HEREDOC
)
# Minus header and footer
declare -i QUERY_COUNT=$(pcregrep -o1 '^\((\d+) rows?\)$'<<< "$QUERIES")
if ((QUERY_COUNT > 0)); then
cat <<HEREDOC
**Not in a quiet period.** The following $QUERY_COUNT non-system Postgres queries are running:
-----
HEREDOC
printf '%s\n' "${QUERIES[@]}"
echo "-----"
exit 1
fi
echo "In a quiet period"
exit 0
| true
|
8edbba1215595a7a226cc4f7a28cd40e5f42d655
|
Shell
|
lisheng-li/huashan
|
/huashan.txt
|
UTF-8
| 960
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
#----変数代入------------------------------
LOG_FILE=/xxxxxx/$0.log #ログ出力ファイル
INTERVAL=120 #監視間隔(秒)
ERROR=90 #エラー閾値(%)
DATE=$(date +'%Y/%m/%d %H:%M:%S') #システム時刻
PROCESS_NAME='mintty' #PROCESS名
function catch() {
echo "${DATE} ERROR: the shell is stoping..." >> ${LOG_FILE}
exit
}
trap catch exit
#--- メイン処理 ---------------------------
while true
do
TOTAL=$(free | grep "Mem:" | awk '{print $2}') #総メモリー
REMAINING=$(free | grep "Mem:" | awk '{print $7}') #残メモリー
USED=`expr ${TOTAL} - ${REMAINING}`
RATE=`echo | awk "{print 100*$USED/$TOTAL}"`
#メモリ使用率判定処理
if [ ${RATE} -gt ${ERROR} ];then
echo "${DATE} ERROR: memory used 80% over" >> ${LOG_FILE}
PIDS=$(ps -ef|grep "$PROCESS_NAME"|grep -v grep|grep -v PPID|awk '{ print $2}')
for ID in ${PIDS}
do
kill -9 ${ID}
done
fi
sleep ${INTERVAL}
done
| true
|
074df52c2edca8a301eb015537d97bb493ac64e5
|
Shell
|
hmcts/ccd-docker
|
/bin/utils/idam-get-user.sh
|
UTF-8
| 394
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eu
if [ "${ENVIRONMENT:-local}" != "local" ]; then
exit 0;
fi
dir=$(dirname ${0})
email=${1}
apiToken=$(${dir}/idam-authenticate.sh "${IDAM_ADMIN_USER}" "${IDAM_ADMIN_PASSWORD}")
curl --silent --show-error -H 'Content-Type: application/json' -H "Authorization: AdminApiAuthToken ${apiToken}" \
${IDAM_API_BASE_URL:-http://localhost:5000}/users?email=${email}
| true
|
161604562d882e8105982a6826e1897c8df915e8
|
Shell
|
martinburchell/wordpress_devtools
|
/overwrite_local_uploads_with_remote
|
UTF-8
| 617
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
bin_dir="$( cd "$( dirname "$0" )" && pwd )"
source ${bin_dir}/website_config
source ${bin_dir}/error_functions
source ${bin_dir}/rsync_functions
source ${bin_dir}/confirm_functions
source ${bin_dir}/svn_functions
svn_ensure_changes_checked_in $wordpress_local_uploads
echo Checking for remote changes
rsync_list_changes $wordpress_remote_uploads $wordpress_local_uploads
if confirm "Proceed with these changes?"; then
echo Copying uploads from remote site
rsync_copy_changes_no_mods $wordpress_remote_uploads $wordpress_local_uploads
exit 0
else
echo Aborted
exit 1
fi
| true
|
854918c3dab1306ebb63323b41e9e5b44cc21d5a
|
Shell
|
cnwangjihe/Raspberry_pi_config
|
/frp/install.sh
|
UTF-8
| 1,008
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
if command -v systemctl >/dev/null 2>&1; then
systemctl stop frpc
systemctl stop RefreshpublicIP
else
/etc/init.d/frpc stop
/etc/init.d/RefreshpublicIP stop
fi
cp frpc /usr/bin/
mkdir /usr/share/frpc
if [ ! -f "/usr/share/frpc/frpc.ini.template" ]; then
cp frpc.ini.template /usr/share/frpc/
fi
cp RefreshpublicIP /usr/share/frpc/
chmod 775 /usr/share/frpc/RefreshpublicIP
chmod 775 /usr/bin/frpc
chmod 777 /usr/share/frpc/frpc.ini.template
if command -v systemctl >/dev/null 2>&1; then
cp RefreshpublicIP.service /etc/systemd/system/
cp frpc.service /etc/systemd/system/
systemctl daemon-reload
systemctl enable RefreshpublicIP
systemctl enable frpc
systemctl start frpc
systemctl start RefreshpublicIP
else
cp RefreshpublicIP.sh /etc/init.d/RefreshpublicIP
cp frpc.sh /etc/init.d/frpc
chmod 775 /etc/init.d/RefreshpublicIP
chmod 775 /etc/init.d/frpc
/etc/init.d/frpc start
/etc/init.d/RefreshpublicIP start
fi
echo Done!
| true
|
01bea24c19fb140d36886f63c9ba60e858f8dc85
|
Shell
|
codemonkeylwt/blog
|
/shell/copy_jars.sh
|
UTF-8
| 302
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
cur_date="`date +%Y%m%d`"
APP_DIR=/opt/blog/$cur_date
mkdir -p $APP_DIR
JAR_BASE_DIR=/data/jenkins/workspace/blog/
cp $JAR_BASE_DIR/index/target/blog-index.jar $APP_DIR
cp $JAR_BASE_DIR/user/target/blog-user.jar $APP_DIR
cd /opt/blog/
rm -f app
ln -s $APP_DIR app
chmod 777 app/*
| true
|
088d79026bd96b0e3efb274011ca290088162b96
|
Shell
|
plaidml/onnxruntime
|
/tools/ci_build/github/linux/docker/scripts/install_centos.sh
|
UTF-8
| 845
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
os_major_version=$(cat /etc/redhat-release | tr -dc '0-9.'|cut -d \. -f1)
if ! rpm -q --quiet epel-release ; then
yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-$os_major_version.noarch.rpm
fi
echo "installing for os major version : $os_major_version"
yum install -y redhat-lsb-core expat-devel libcurl-devel tar unzip curl zlib-devel make libunwind icu aria2 rsync bzip2 git bzip2-devel
# install dotnet core dependencies
yum install -y lttng-ust openssl-libs krb5-libs libicu libuuid
# install dotnet runtimes
yum install -y https://packages.microsoft.com/config/centos/7/packages-microsoft-prod.rpm
yum install -y dotnet-sdk-2.2 java-1.8.0-openjdk-devel ccache gcc gcc-c++ python3 python3-devel python3-pip
# install automatic documentation generation dependencies
yum install -y graphviz
| true
|
796c3f5c8f836e90724763451d929b8b830a4f6c
|
Shell
|
pkliczewski/hyperconverged-cluster-operator
|
/hack/make_local.sh
|
UTF-8
| 614
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -ex
LOCAL_DIR=_local
FORMAT=${FORMAT:-txt}
hco_namespace=kubevirt-hyperconverged
set -o allexport
source hack/config
set +o allexport
export WEBHOOK_MODE=false
mkdir -p "${LOCAL_DIR}"
./hack/make_local.py "${LOCAL_DIR}" "${FORMAT}"
sed "s/\(^.*\/operator.yaml$\)/### \1/" deploy/deploy.sh > _local/deploy.sh
sed -i "s|-f https://raw.githubusercontent.com/kubevirt/hyperconverged-cluster-operator/master/deploy|-f deploy|g" _local/deploy.sh
chmod +x _local/deploy.sh
kubectl config set-context --current --namespace=${hco_namespace}
_local/deploy.sh
kubectl apply -f _local/local.yaml
| true
|
6acc590a8b1ecf94421c0212cdf10df3ab5f165e
|
Shell
|
WillForan/dotconf
|
/bin/gitmoji-select
|
UTF-8
| 1,242
| 3.640625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# insert gitmoji from fzf/rofi
# use "xdo" to type it
# 20200129 - init
# 20210219 - args: 'xdo' for rofi and autotype; 'autokey' for rofi but still return key
# depends: jq, dc, xargs, xdotool (optionally type)
# 20220329 - add and then disable an insert delay
sleep="sleep 1"; sleep=""
cmd=echo
select="fzf"
while [ $# -gt 0 ]; do
case "$1" in
xdo*) cmd="xdotool $sleep type"; select="rofi -dmenu -i";;
autokey*) select="rofi -dmenu -i";;
*) echo "unkown option $1"; exit ;;
esac
shift
done
## do we need to re cache
CACHE=$HOME/.local/share/gitmoji
get() {
local tmp
tmp=$(mktemp "${TMPDIR:-/tmp}/gitmoji-XXX")
curl "https://raw.githubusercontent.com/carloscuesta/gitmoji/master/packages/gitmojis/src/gitmojis.json" |
jq -r '.gitmojis[]|[.emoji, .name, .description]|@tsv' > "$tmp"
[ -s "$tmp" ] && mv "$tmp" "$CACHE"
[ -r "$tmp" ] && rm "$tmp"
return 0
}
sec() { date +%s "$@"; }
[ ! -s $CACHE ] && get
cachedate="$(stat -c '%y' $CACHE)"
# more than a week old (604800 seconds)
weeks_old=10
echo $(sec) $(sec -d "$cachedate") "- 60 60 * 24 * 7 * / p" | dc | xargs test $weeks_old -le && get
cat $CACHE | $select | cut -f1 | xargs $cmd
| true
|
210bcd8bb2de5c44da6bcfc17f3dbd10a1450068
|
Shell
|
jubicoy/openshift-cartridge-gitolite
|
/bin/install
|
UTF-8
| 2,005
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
if [[ -d /usr/lib64 ]]; then
_libdir=/usr/lib64/
else
_libdir=/usr/lib/
fi
case "$1" in
-v|--version)
version="$2"
esac
source $OPENSHIFT_CARTRIDGE_SDK_BASH
env_dir="${OPENSHIFT_GITOLITE_DIR}env/"
# Create and link required directories and files
mkdir -p ${OPENSHIFT_GITOLITE_DIR}{run,tmp}
rm -rf ${OPENSHIFT_GITOLITE_DIR}modules ${OPENSHIFT_GITOLITE_DIR}conf/magic
ln -s ${_libdir}httpd/modules ${OPENSHIFT_GITOLITE_DIR}modules
mkdir -p ${OPENSHIFT_GITOLITE_DIR}conf
ln -s /etc/httpd/conf/magic ${OPENSHIFT_GITOLITE_DIR}conf/magic
if [[ -d ${OPENSHIFT_DATA_DIR}gitolite-home ]]; then
client_result ""
client_result "Existing installation found."
export GITOLITE_HTTP_HOME=${OPENSHIFT_DATA_DIR}gitolite-home/
set_env_var 'GITOLITE_HTTP_HOME' "${OPENSHIFT_DATA_DIR}gitolite-home/" $env_dir
else
# Setup home directory into static storage
mkdir ${OPENSHIFT_DATA_DIR}gitolite-home
export PATH=${OPENSHIFT_GITOLITE_DIR}usr/bin/:$PATH
# Run gitolite setup
username=$(generate_username)
password=$(generate_password)
cd $GITOLITE_HTTP_HOME; HOME=$GITOLITE_HTTP_HOME gitolite setup -a "$username"
htpasswd -bc ${GITOLITE_HTTP_HOME}.htpasswd "$username" "$password"
# Print results
client_result ""
client_result "Gitolite installed. Please make note of these credentials:"
client_result ""
client_result " Admin User: $username"
client_result " Admin Password: $password"
client_result " Config Repository: gitolite-admin"
client_result ""
client_result "Access repository with git:"
client_result " git clone https://$OPENSHIFT_APP_DNS/git/gitolite-admin.git"
client_result ""
client_result "Users are added via ssh and htpasswd for now."
cart_props "repo_url=https://$OPENSHIFT_APP_DNS/git/gitolite-admin.git"
cart_props "username=${username}"
cart_props "password=${password}"
fi
sed -i "/\%RC = (/a \ \ \ \ HTTP_ANON_USER => 'anonymous'," ${GITOLITE_HTTP_HOME}.gitolite.rc
exit 0
| true
|
1caca177a6f984acca9c125215e517f6aab89e88
|
Shell
|
ar45/helper-utils
|
/git/clone-or-update-git-repo
|
UTF-8
| 3,152
| 4.375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
read -r -d '' USAGE <<EOF
USAGE: $0
--repo-url git-url
[--destination-dir dest] Clone into <dest>. If /path/to/dest/ ends with a "/", the repo name will be appended to the destination dir.
[--git-ref {ref/tag}] Checkout / Clone to the specified ref/tag
[--remote] The remote name (Defaults to origin)
[--checkout] Checkout <git-ref> if repo is already cloned
[--stash] Force checkout by stashing local changes.
[--no-update-local] Do not update local branch
-h | --help Display this help.
EOF
usage()
{
echo "$USAGE";
exit 1;
}
exit_error()
{
echo "$1";
[ $# -gt 1 ] && exit $2 || exit 1
}
ARGS=`getopt -o 'h' -l 'help,repo-url:,destination-dir:,git-ref:,remote:,checkout,stash,no-update-local' -n "$0" -- "$@"`
if [ $? != 0 ]; then
usage
fi
repo_url=
remote_name=origin
destination_dir=
git_ref=
checkout=false
stash=false
update_local=true
eval set -- "$ARGS"
while [ $# -gt 0 ] ; do
case "$1" in
--repo-url)
repo_url="$2"
shift 2
;;
--destination-dir)
destination_dir="$2"
shift 2
;;
--git-ref)
git_ref="$2"
shift 2
;;
--remote)
remote_name=$2
shift 2
;;
--checkout)
checkout=true
shift
;;
--stash)
stash=true
shift
;;
--no-update-local)
update_local=false
shift
;;
--help|-h)
usage
;;
--)
shift
;;
*)
echo "Got invalid argument $@" 2>&1
usage
;;
esac
done
if [ -z "$repo_url" ]; then
usage
fi
if [ -z "$destination_dir" ]; then
d=`basename $repo_url`
destination_dir="${PWD}/${d%.git}"
elif [ "${destination_dir: -1:1}" = "/" ]; then
d=`basename $repo_url`
destination_dir="${destination_dir}${d}"
fi
if [ -z "$git_ref" ] && $checkout; then
echo "Cannot checkout without specifying --git-ref" >&2;
exit 3
fi
repo_path="`echo "$repo_url" | sed 's/.*:/:/g'`";
cat <<EOF
Cloning / Updating repo:
repo_url = $repo_url
repo_path = $repo_path
remote_name = $remote_name
destination_dir = $destination_dir
git_ref = $git_ref
checkout = $checkout
stash = $stash
update_local = $update_local
EOF
clone_repo()
{
local opts=()
if [ ! -z "$remote_name" ]; then
opts+=("--origin" "$remote_name")
fi
if [ -d $destination_dir/.git ]; then
if ! git --git-dir=$destination_dir/.git remote -v | grep -q "$repo_path"; then
exit_error "$destination_dir is not empty and is not a clone of $repo_path repo"
elif ! git --git-dir=$destination_dir/.git remote -v | grep -q "^$remote_name[[:space:]].*$repo_path"; then
exit_error "$destination_dir does not have a remote with the name '$remote_name'"
fi
cd "$destination_dir" && refresh_git_repo "$git_ref"
else
git clone $repo_url ${opts[@]} -b "$git_ref" "$destination_dir"
fi
}
refresh_git_repo()
{
local ref="$1"
git fetch "$remote_name" --tags || exit_error "Could not fetch from remote [$remote_name]"
if $stash; then
git stash --include-untracked || exit_error "Failed to stash local changes"
fi
if $checkout; then
git checkout ${ref} || exit_error "Failed to checkout ${ref}"
fi
if $update_local; then
git pull --ff-only || exit_error "Failed to fast-forward from remote."
fi
}
clone_repo
| true
|
eea9654838fb7ae41cf65805bfc639639b501369
|
Shell
|
datamonk/strata19
|
/scripts/get_slides.sh
|
UTF-8
| 700
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
CWD=$(/bin/pwd -P)
OUTPUT_PREFIX="/home/afcamar/proj/gitlab/strata19/slides/"
# Download Oreilly Strata slidedeck web source page. Iterate
# through to parse and download URL paths meeting the file
# extention critera (PDF's, PPT, ZIP). Write to OUTPUT_PREFIX
# directory.
wget 'https://conferences.oreilly.com/strata/strata-ca/public/schedule/proceedings' \
&& cat ${CWD}/proceedings \
| grep '.ppt\|.pptx\|.pdf\|.zip' \
| cut -d'"' -f2 \
| while read line; do wget "${line}" -P ${OUTPUT_PREFIX}; done < "${1:-/dev/stdin}"
# Fix garbage chars and whitespace in filenames
detox -s iso8859_1 -v ${OUTPUT_PREFIX}/*.[pz][pdi][tfp]*
# Cleanup web source file
rm -f ${CWD}/proceedings
| true
|
e1155e3c8380686809b5e66409829ae059e1d4b3
|
Shell
|
jmeagher/dotfiles
|
/bash.d/fasd.sh
|
UTF-8
| 610
| 3.4375
| 3
|
[] |
no_license
|
if [ "" != "`which fasd 2> /dev/null `" ] ; then
eval "$(fasd --init auto)"
alias fv="fasd -aie $EDITOR"
alias v="fasd -aie $EDITOR"
alias d="fasd_cd -di"
# Copied from the fasd init code to detect completion
if [ "$BASH_VERSION" ] && complete > /dev/null 2>&1; then # bash
complete -F _fasd_bash_cmd_complete d
complete -F _fasd_bash_cmd_complete v
complete -F _fasd_bash_cmd_complete fv
fi
else
function setup_fasd() {
echo "Setting up fasd in ~/bin"
( cd ~; mkdir -p .tools; cd .tools ; git clone git@github.com:clvv/fasd.git; cd ~/bin; ln -s ../.tools/fasd/fasd . )
}
fi
| true
|
a44023362f71086f333e6c291c331ffcccce81cd
|
Shell
|
NBISweden/Knox-ePouta
|
/experiments/profiles/supernode/files/sob-tests
|
UTF-8
| 920
| 3.03125
| 3
|
[] |
no_license
|
# -*-sh-*-
# Write a 24GB file in chunks of 8MB. Basic test of write I/O
# bandwidth. For this kind of test it is important that the file is
# substantially larger than the main memory of the machine. If the
# file is 2GB and main memory is 1GB then up to 50% of the file could
# be cached by the operating system and the reported write bandwidth
# would be much higher than what the disk+filesystem could actually
# provide.
# Read and Write
sob -rw -b 8m -s 10g
# Writing 500 files of 1 MB, spread out in 10 directories
sob -w -b 64k -s 1m -n 500 -o 50
# Write 50 128MB files (6.4GB) with a block size of 64kB, then
# read random files among these 5000 times. A good way to test
# random access and mask buffer cache effects (provided the sum
# size of all the files is much larger than main memory).
sob -w -R 5000 -n 50 -s 128m -b 64k
# Read and write 1 file of 1 GB. Is it cached in mem?
sob -rw -b 128k -s 1g
| true
|
3381f72ca9abdcb24ef550fc0310e6fe1f8abbb2
|
Shell
|
takase1121/dotfiles
|
/private_dot_config/executable_manage_picom
|
UTF-8
| 531
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
find_picom() {
picom_path="$(which picom)"
for proc in /proc/[0-9]*/exe; do
if [ "$(readlink "$proc")" = "$picom_path" ]; then
echo "$proc" | rev | cut -d"/" -f2 | rev
return 0
fi
done
return 1
}
case $1 in
start)
if find_picom >/dev/null 2>&1; then
echo "picom already running."
exit 1
fi
nohup picom --experimental-backends >/dev/null 2>&1 &
;;
stop)
picom_pid="$(find_picom)"
if [ "$?" -ne 0 ]; then
echo "picom is not running"
exit 1
fi
kill "$picom_pid"
;;
esac
| true
|
135d1034da6c85fe7696a44d60c5886fe697170b
|
Shell
|
vcwebio/modeco.package.ingest_journal_sasl
|
/modeco/deploy-local-up
|
UTF-8
| 281
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
prefix="package_ingest_journal_sasl_$2"
if [[ "package_ingest_journal_sasl_ingest_journal_sasl_journalbeat_volume" == "$prefix"* ]] ; then
executionplane --silent docker volume create package_ingest_journal_sasl_ingest_journal_sasl_journalbeat_volume
fi
| true
|
93df037d2c590192c095a0e7b1a954ec03521be3
|
Shell
|
Azure/azure-service-operator
|
/scripts/v2/make-multitenant-tenant.sh
|
UTF-8
| 2,972
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euxo pipefail
version="$1"
target="multitenant-tenant_${version}.yaml"
source="$2"
tenant="tenant1"
tenant_namespace="$tenant-system"
# Collect the few documents we want in the per-tenant yaml.
namespace='.kind == "Namespace"'
leader_election_role='(.kind == "Role" and .metadata.name == "azureserviceoperator-leader-election-role")'
leader_election_binding='(.kind == "RoleBinding" and .metadata.name == "azureserviceoperator-leader-election-rolebinding")'
manager_role_binding='(.kind == "ClusterRoleBinding" and .metadata.name == "azureserviceoperator-manager-rolebinding")'
crd_reader_binding='(.kind == "ClusterRoleBinding" and .metadata.name == "azureserviceoperator-crd-reader-rolebinding")'
deployment='(.kind == "Deployment" and .metadata.name == "azureserviceoperator-controller-manager")'
serviceaccount='.kind == "ServiceAccount"'
query="select($namespace or $leader_election_role or $leader_election_binding or $manager_role_binding or $crd_reader_binding or $deployment or $serviceaccount)"
yq eval "$query" "$source" > "$target"
function inplace_edit() {
command=$1
yq eval -i "$command" "$target"
}
# Create the tenant namespace.
inplace_edit "(select($namespace).metadata.name) = \"$tenant_namespace\""
# Put the other resources into that namespace (except for the cluster
# role binding since that's not namespaced).
inplace_edit "(select($leader_election_role or $leader_election_binding or $deployment).metadata.namespace) = \"$tenant_namespace\""
# Update the subject namespaces for the bindings so they refer to the
# service account in the tenant namespace
inplace_edit "(select($leader_election_binding or $manager_role_binding or $crd_reader_binding).subjects[0].namespace) = \"$tenant_namespace\""
inplace_edit "(select($serviceaccount).metadata.namespace) = \"$tenant_namespace\""
# Rename the cluster role bindings so bindings for different tenants
# can coexist.
inplace_edit "(select($manager_role_binding).metadata.name) = \"azureserviceoperator-manager-rolebinding-$tenant\""
inplace_edit "(select($crd_reader_binding).metadata.name) = \"azureserviceoperator-crd-reader-rolebinding-$tenant\""
# Changes to the deployment:
# * Remove the webserver cert volume and mount.
inplace_edit "del(select($deployment) | .spec.template.spec.volumes)"
manager_container="select($deployment) | .spec.template.spec.containers[] | select(.name == \"manager\")"
inplace_edit "del(${manager_container}.volumeMounts)"
# * Remove the rbac-proxy container.
inplace_edit "del(select($deployment) | .spec.template.spec.containers[] | select(.name == \"kube-rbac-proxy\"))"
# * Remove the webhook port.
inplace_edit "del(${manager_container} | .ports[] | select(.name == \"webhook-server\"))"
# * Set the operator-mode env var to "watchers".
new_env_val='{"name": "AZURE_OPERATOR_MODE", "value": "watchers"}'
inplace_edit "(${manager_container} | .env[] | select(.name == \"AZURE_OPERATOR_MODE\")) = $new_env_val"
| true
|
392682d5ff7449d25292cc9cc4ee4b25b1a2370b
|
Shell
|
doctaphred/profile
|
/bin/rmate
|
UTF-8
| 1,462
| 4.25
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
# This script replaces itself with the rmate executable, then runs the given command!
{ # Defer execution until the matching brace.
sha256=830a590e15b2d1b89273428222736bbebcd677c5c6678c82e61bd19f256fcd2c
url=https://raw.githubusercontent.com/textmate/rmate/master/bin/rmate
# TODO: Support other platforms, and enable version updates.
# Save the name of this file.
filename=$(basename "$0")
# Log to stderr.
>&2 echo "$0: installing $filename from $url"
# Create a temporary working directory.
workdir="$(mktemp -d)"
# Silence pushd/popd to avoid interfering with the command.
pushd "$workdir" >/dev/null
# Download the file.
# -s/--silent: Don't show progress meter or error messages.
# -S/--show-error: Do still show error messages.
# -L/--location: Follow HTTP redirects.
curl --silent --show-error --location "$url" >"$filename"
# Verify the integrity of the downloaded file.
# -s/--status: Don't show error messages.
# -c/--check: Verify checksum (instead of calculating it).
shasum --status --check <<<"$sha256 $filename"
# Make it executable.
chmod a+x "$filename"
# Return to the original working directory.
popd >/dev/null
# Replace this script with the downloaded file.
mv "$workdir/$filename" "$0"
# Clean up the temporary working directory.
rm -rf "$workdir"
# YOLO
"$0" "$@"
}
| true
|
96fc433758961a870b45b6a909f2d62f8c9dec8d
|
Shell
|
vboiteau/GTI770
|
/lab03/simple.sh
|
UTF-8
| 852
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
source ./config.sh
HiddenLayer=80
if [ -z $1 ]; then
echo "First parameter should be hidden layers, will use default $HiddenLayer"
else
HiddenLayer=$1
fi
Cycle=1000
if [ -z $2 ]; then
echo "Second parameter should be cycle, will use default $Cycle"
else
Cycle=$2
fi
TRAIN_FILE="./arff/Char_UpperLower52.train.arff"
TEST_FILE="./arff/Char_UpperLower52.val.arff"
OUTPUT_NAME="test_H${HiddenLayer}_N${Cycle}"
if [ -z $3 ]; then
echo "Second parameter should be the output name without extension, will use default $OUTPUT_NAME."
else
OUTPUT_NAME=$3
fi
classifier="functions.MultilayerPerceptron"
arguments="-L 0.3 -M 0.2 -S 0 -E 20 -N $Cycle -H $HiddenLayer -t $TRAIN_FILE -T $TEST_FILE -d output/$OUTPUT_NAME.model"
eval "java -cp $WEKA_JAR weka.classifiers.${classifier} $arguments > output/$OUTPUT_NAME.txt"
| true
|
cebee667136f7776544317fad55ca692bbcc5369
|
Shell
|
graphcore/tensorflow
|
/tensorflow/lite/micro/tools/make/ext_libs/xtensa_download.sh
|
UTF-8
| 3,322
| 3.8125
| 4
|
[
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Downloads necessary to build with OPTIMIZED_KERNEL_DIR=xtensa.
#
# Called with four arguments:
# 1 - Path to the downloads folder which is typically
# tensorflow/lite/micro/tools/make/downloads
# 2 - Xtensa variant to download for (e.g. hifi4)
#
# This script is called from the Makefile and uses the following convention to
# enable determination of sucess/failure:
#
# - If the script is successful, the only output on stdout should be SUCCESS.
# The makefile checks for this particular string.
#
# - Any string on stdout that is not SUCCESS will be shown in the makefile as
# the cause for the script to have failed.
#
# - Any other informational prints should be on stderr.
set -e
DOWNLOADS_DIR=${1}
if [ ! -d ${DOWNLOADS_DIR} ]; then
echo "The top-level downloads directory: ${DOWNLOADS_DIR} does not exist."
exit 1
fi
if [[ ${2} == "hifi4" ]]; then
LIBRARY_URL="http://github.com/foss-xtensa/nnlib-hifi4/raw/master/archive/xa_nnlib_hifi4_02_11_2021.zip"
LIBRARY_DIRNAME="xa_nnlib_hifi4"
LIBRARY_MD5="8b934f61ffe0a966644849602810fb1b"
elif [[ ${2} == "hifi5" ]]; then
LIBRARY_URL="http://github.com/foss-xtensa/nnlib-hifi5/raw/master/archive/xa_nnlib_hifi5_02_22.zip"
LIBRARY_DIRNAME="xa_nnlib_hifi5"
LIBRARY_MD5="08cd4d446b3e0b7d180f9ef0dec9ad0a"
else
echo "Attempting to download an unsupported xtensa variant: ${2}"
exit 1
fi
LIBRARY_INSTALL_PATH=${DOWNLOADS_DIR}/${LIBRARY_DIRNAME}
if [ -d ${LIBRARY_INSTALL_PATH} ]; then
echo >&2 "${LIBRARY_INSTALL_PATH} already exists, skipping the download."
else
TMP_ZIP_ARCHIVE_NAME="${LIBRARY_DIRNAME}.zip"
wget ${LIBRARY_URL} -O /tmp/${TMP_ZIP_ARCHIVE_NAME} >&2
MD5=`md5sum /tmp/${TMP_ZIP_ARCHIVE_NAME} | awk '{print $1}'`
if [[ ${MD5} != ${LIBRARY_MD5} ]]
then
echo "Bad checksum. Expected: ${LIBRARY_MD5}, Got: ${MD5}"
exit 1
fi
unzip -qo /tmp/${TMP_ZIP_ARCHIVE_NAME} -d ${DOWNLOADS_DIR} >&2
if [[ ${2} == "hifi4" ]]; then
pushd ${DOWNLOADS_DIR}/xa_nnlib_hifi4/ >&2
git init . >&2
git config user.email "tflm@google.com"
git config user.name "TensorflowLite Micro"
git add *
git commit -a -m "Commit for a temporary repository." > /dev/null
git apply ../../ext_libs/xtensa_patch.patch
popd >&2
fi
if [[ ${2} == "hifi5" ]]; then
pushd ${DOWNLOADS_DIR}/xa_nnlib_hifi5/ >&2
git init . >&2
git config user.email "tflm@google.com"
git config user.name "TensorflowLite Micro"
git add *
git commit -a -m "Commit for a temporary repository." > /dev/null
git apply ../../ext_libs/xtensa_depthwise_patch_hifi5.patch
popd >&2
fi
fi
echo "SUCCESS"
| true
|
d093eff5725396b4bc31fbc032e124a62bde64fb
|
Shell
|
tikservices/powerdown
|
/default.powerup
|
UTF-8
| 1,784
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# powerdown - powerup
#
# bus
opts "/sys/bus/*/devices/*/power/control" on
# usb autosuspend
opts "/sys/bus/usb/devices/*/power/autosuspend" 600
opts "/sys/bus/usb/devices/*/power/control" on
# nmi_watchdog
opt /proc/sys/kernel/nmi_watchdog 1
# cpu
opts "/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor" performance
load_mod msr
run x86_energy_perf_policy normal
# aspm
opt /sys/module/pcie_aspm/parameters/policy default
# kernel write mode
opt /proc/sys/vm/laptop_mode 0
opt /proc/sys/vm/dirty_ratio 30
opt /proc/sys/vm/dirty_background_ratio 10
opt /proc/sys/vm/dirty_expire_centisecs 600
opt /proc/sys/vm/dirty_writeback_centisecs 600
# disk
opts "/sys/class/scsi_host/host*/link_power_management_policy" medium_power
for dev in $(awk '/^\/dev\/sd/ {print $1}' /etc/mtab); do run mount -o remount,relatime "$dev"; done
for dev in $(awk '/^\/dev\/sd/ {print $1}' /etc/mtab); do run blockdev --setra 256 "$dev"; done
for dev in $(awk '/^\/dev\/sd/ {print $1}' /etc/mtab); do run hdparm -B 254 -S 253 "$dev"; done
# sound card
opt /sys/module/snd_hda_intel/parameters/power_save 0
opt /sys/module/snd_hda_intel/parameters/power_save_controller N
opt /sys/module/snd_ac97_codec/parameters/power_save 0
# net
for i in $(iw dev | awk '$1 == "Interface" { print $2 }'); do run iw dev "$i" set power_save off; done
run ethtool -s eth0 wol g
# screen
brightness "/sys/class/backlight/*" 5
# webcam
load_mod videodev
# bluetooth
load_mod bluetooth
# open source ATI
opt /sys/kernel/debug/vgaswitcheroo/switch ON
opts "/sys/class/drm/card*/device/power_method" profile
opts "/sys/class/drm/card*/device/power_profile" default
opts "/sys/class/drm/card*/device/power_dpm_state" balanced
# i915
#opt /sys/module/i915/parameters/i915_enable_rc6 0
exit 0
| true
|
97f3fd90c68243ea0171cac14d92bb761058ca9f
|
Shell
|
edulutionzm/offline_testing
|
/extract_baselines.sh
|
UTF-8
| 1,127
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
DIRECTORIES=( ~/reports ~/backups ~/reports/baseline)
for DIRECTORY in ${DIRECTORIES[@]}; do
if [ ! -d "$DIRECTORY" ]; then
mkdir "$DIRECTORY"
else
echo "$DIRECTORY already exists. Skipping this step"
fi
done
test -f ~/.testing_server/public/test_responses.sqlite
#if db file exists then extraction and submission begin. If not, will output error message to contact support
if [ "$?" = "0" ]; then
if (echo $1 |\
egrep '^(1[0-2]|0[0-9])[-/][0-9]{2}' > /dev/null
); then
echo Stopping ka lite server
sudo service ka-lite stop > /dev/null
sudo service nginx stop > /dev/null
echo "Extracting baseline tests for month $1"
echo Extracting tests.....
# fetch the first argument given on the command line and use it as an argument to the Rscript
Rscript ~/.testing_server/baseline.R "$1"
# After Rscript executes, execute send report script
~/.testing_server/send_baseline.sh
else
echo Please enter a valid year and month e.g 02-17
exit 1
fi
else
echo Error. Baselines NOT extracted. Please contact tech support 1>&2
exit 1
fi
| true
|
0ae50b94cd3f6529bab50d9563eee92678b7dee6
|
Shell
|
mortnir/ely-start
|
/ely-start.sh
|
UTF-8
| 1,907
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
usage () {
echo "# - usage: ./ely-start.sh import-host host-username (tag/branch name)"
exit 1
}
#curl -L -O https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-7.6.2-amd64.deb
#curl -L -O https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.6.2-amd64.deb
echo "# - Filebeat and metricbeat downloaded"
#sleep 1
#[ -z $1 ] && IDHOST=$1 || read -r -p "# Which host to fetch id_rsa from? (sw01 is standard) " IDHOST
IDHOST=sw01
IDUSER=user
IDREMOTE=/home/$IDUSER/.ssh/
IDLOCAL=/home/$LOGNAME/.ssh/
echo "# - Checking if $IDLOCAL/id_rsa exists"
if [ ! -f "$IDLOCAL/id_rsa" ]; then
echo "# - importing id_rsa"
scp user@$IDHOST:$IDREMOTE/id_rsa $IDLOCAL/id_rsa
fi
echo "# - Checking if $IDLOCAL/id_rsa.pub exists"
if [ ! -f "$IDLOCAL/id_rsa.pub" ]; then
echo "# - importing id_rsa.pub"
scp user@$IDHOST:$IDREMOTE/id_rsa.pub $IDLOCAL/id_rsa.pub
fi
#scp user@$IDHOST:/home/user/.ssh/id_rsa.pub /home/user/.ssh/id_rsa.pub
#[ -f $HOME/.ssh/id_rsa ] && echo "$FILE exist" || echo "$FILE does not exist"
echo "# - Checking if $IDLOCAL/id_rsa and id_rsa.pub exists"
#if [[ -f "$IDLOCAL/id_rsa" && -f "$IDLOCAL/id_rsa.pub"]]; then
if [[ -f $IDLOCAL/id_rsa && -f $IDLOCAL/id_rsa.pub ]]; then
eval "$(ssh-agent -s)"
ssh-add $IDLOCAL/id_rsa
#read -r -p "# - Want to display(echo) id_rsa.pub? (yes)" DISPLAYRSA
echo "# - Displaying id_rsa.pub"
cat $IDLOCAL/id_rsa.pub
echo "# -"
echo "# - add id_rsa.pub to github (log in -> settings -> SSH and GPG -> New ssh)"
else
echo "# - id_rsa and/or id_rsa.pub does not exist"
echo "# - $ls -al $IDLOCAL"
ls -al $IDLOCAL
echo "# - Exiting."
exit 1
fi
exit 1
sudo dpkg -i *.deb
echo "# - change and copy the configs from ./config/ after chaning password"
echo "# - run \"sudo Xbeat modules enable system\"
echo "# - run \"sudo Xbeat setup\"
echo "# - run \"sudo /etc/init.d/Xbeat start\"
| true
|
8bb8d0d6b93eea6afe8cb47b06b762c1e2ac83dd
|
Shell
|
gotroyb127/dotfiles
|
/scripts/shell_functions/Time
|
UTF-8
| 202
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
Time() {
a=0
if [ $# -ge 2 ]
then
m=$1
shift
else
echo "Time: usage: 'Time Repeat Cmd.'" >&2
return 5
fi
time \
while [ $((++a)) -le $m ]
do
"$@" > /dev/null <&1
done
}
| true
|
3c3678ac472aac6452998283181986afe8b942b7
|
Shell
|
halhenke/gist-bin-pub
|
/git-sync-repositorium/git-sync-repositorium.sh
|
UTF-8
| 324
| 3.234375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Autocommitting with inotify (repo: repositorium)
repos=repositorium
for r in $repos
do
cd -- "${XDG_DOCUMENTS_DIR}/Uni/${r}" &&
{
git ls-files --deleted -z | xargs -0 git rm 1>/dev/null 2>&1
git add -A . 1>/dev/null 2>&1
git commit -a -m "inotify $(date)"
}
done
| true
|
ebeb7f7fa6845eb647d3c7aad7542df5b1f36261
|
Shell
|
tdegeus/ghostscript_ext
|
/pdfmeta-set
|
UTF-8
| 2,113
| 4.1875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# ==============================================================================
function printhelp {
echo "\
NAME
pdfmeta-set - set PDF properties using Ghost-script
USAGE
$ pdfmeta-set [-t TITLE] [-a AUTHOR] FILE
DESCRIPTION
This program changes the properties of a PDF-documents using Ghost-script.
OPTIONS
-h, --help
Print help.
-t STR
Title of the PDF.
-a STR
Author of the PDF.
RELEASE
Version 1.0, October 2012
COPYRIGHT
T.W.J. de Geus
tom@geus.me
www.geus.me
"
}
# ==============================================================================
# set usage
usage="pdfmeta-set [-t TITLE] [-a AUTHOR] PDFNAME"
# set default
titlespec=0 # title not specified
author="Ghostscript" # author
# read optional input
while getopts 'h-:a:t:' OPT; do
case $OPT in
# help (-h or --help)
h ) printhelp; exit 0; ;;
- ) case $OPTARG in
help) printhelp; exit 0 ;;
*) echo $usage; exit 1 ;;
esac;;
# set the author
t ) titlespec=1; title=$OPTARG ;;
# set the title
a ) author=$OPTARG ;;
# error on unknown options
\? ) echo $usage; exit 1 ;;
esac
done
# remove the input options from "$@"
shift $(($OPTIND-1))
# read the input file
pdf="$1"
# set the default title as the pdf name
if [ $titlespec -eq 0 ]; then
title="$pdf"
fi
# ==============================================================================
# generate an unlikely string
numb=`date '+%y%m%d%H%M%S'`
# set temporary settings file
tset="$pdf$numb.pdfmarks"
# set temporary output
tout="$pdf$numb.out"
# generate a temporary file with the pdfmarks settings
echo "[ /Title ($title)" > "$tset"
echo " /Author ($author)" >> "$tset"
echo " /DOCINFO pdfmark" >> "$tset"
# apply settings using ghostscipt
# http://milan.kupcevic.net/ghostscript-ps-pdf/
gs -dBATCH -dNOPAUSE -dQUIET -sDEVICE=pdfwrite\
-sOutputFile="$tout" "$pdf" "$tset"
# move temporary output
mv "$tout" "$pdf"
# remove temporary settings file
rm "$tset"
# ==============================================================================
exit 0
| true
|
5fe00f6673fafd7e66475d45bea435ebb691d32b
|
Shell
|
trschmitt/notes
|
/07/2-tuesday-32/todos-sequelize-starter.bash
|
UTF-8
| 2,932
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ -d ~/code/todosql ]]; then
echo "~/code/todosql: Already exists. Exiting..."
exit 1
fi
cd ~/code
git clone https://github.com/tiy-dc-wdcp-jun-17/base-express-app.git todosql
cd todosql
npm install sequelize pg
cd server
sequelize init
cat <<EOS > config/config.json
{
"development": {
"username": null,
"password": null,
"database": "todos_development",
"host": "127.0.0.1",
"dialect": "postgres"
},
"test": {
"username": "root",
"password": null,
"database": "todos_test",
"host": "127.0.0.1",
"dialect": "postgres"
}
}
EOS
sequelize model:create --name todo --attributes 'description:text completed:boolean'
dropdb todos_development
createdb todos_development
sequelize db:migrate
sequelize seed:create --name todos
for seed_file in seeders/*; do
cat <<EOS > $seed_file
"use strict";
module.exports = {
up: function(queryInterface, Sequelize) {
return queryInterface.bulkInsert("todos",[
{
description: "Finished this app",
completed: false,
createdAt: new Date(),
updatedAt: new Date()
}, {
description: "Practiced writing code",
completed: false,
createdAt: new Date(),
updatedAt: new Date()
}
], {});
},
down: function(queryInterface, Sequelize) {
return queryInterface.bulkDelete("todos", null, {});
}
};
EOS
done
sequelize db:seed:all
cat <<EOS > routes/todos.js
const express = require("express");
const router = express.Router();
const models = require("../models");
router.get("/", (req, res) => {
models.todo.findAll().then(todos => {
res.render("todos/index", { todos: todos });
});
});
router.get("/:id", (req, res) => {
models.todo.findById(req.params.id).then(todo => {
res.render("todos/show", { todo: todo });
});
});
module.exports = router;
EOS
mkdir -p views/todos
cat <<EOS > views/todos/index.mustache
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Todos</title>
</head>
<body>
<h1>Todos</h1>
<ul>
{{#todos}}
<li><a href="/todos/{{id}}">{{description}}</a> | {{completed}}</li>
{{/todos}}
</ul>
</body>
</html>
EOS
cat <<EOS > views/todos/show.mustache
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Todo</title>
</head>
<body>
<h1>Todo</h1>
{{#todo}}
<h2>{{description}}</h2>
<p>
{{#completed}}
DONE
{{/completed}}
{{^completed}}
NOT COMPLETED
{{/completed}}
</p>
{{/todo}}
</body>
</html>
EOS
# Add one route
cat index.js | sed -e 's/app.use("\/", require(".\/routes\/homepage"));/app.use("\/", require(".\/routes\/homepage"));app.use("\/todos", require(".\/routes\/todos"));/' > index.js.new
mv index.js.new index.js
echo
echo "Type:"
echo "cd ~/code/todosql"
echo "npm start"
echo
echo "Go to http://localhost:3000/todos"
| true
|
00bf157b976b8ada0d68f1e5e647ce7b8cf1d633
|
Shell
|
paper42/pm
|
/pkgs/dejavu-nerd
|
UTF-8
| 474
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
version=2.0.0
source="https://github.com/ryanoasis/nerd-fonts/releases/download/v$version/DejaVuSansMono.zip"
sha512="92f8f0a41774ec78ae4e4addb811ba5cae1f904ee14bf54eb0e64662fa0ee2a1a73220b15e1e83679badd4eaeae0fcc6f8790fd8d5aea89a8957ba79c5136baf DejaVuSansMono.zip"
build() {
mkdir -p "$DESTDIR"/home/.fonts
for f in *; do
if ! echo "$f" | grep -i windows; then
install -D -m 0644 "$f" "$DESTDIR/home/.fonts/$f"
fi
done
}
| true
|
751892556ba43ecb377e92503389c18b858dec65
|
Shell
|
kostafey/dotfiles-1
|
/setup
|
UTF-8
| 9,788
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/zsh
set -e
MY_GPG_KEY_ID="12C87A28FEAC6B20"
script_name="$(basename "$0")"
dotfiles_dir="$(cd "$(dirname "$0")"; pwd)"
cd "$dotfiles_dir"
assign() {
op="$1"
if [[ "$op" != "link" && "$op" != "copy" ]]; then
echo "Unknown operation: $op"
exit 1
fi
orig_file="$2"
dest_file="$3"
mkdir -p "$(dirname "$orig_file")"
mkdir -p "$(dirname "$dest_file")"
rm -rf "$dest_file"
if [[ "$op" == "link" ]]; then
ln -s "$orig_file" "$dest_file"
echo "$dest_file -> $orig_file"
else
cp -R "$orig_file" "$dest_file"
echo "$dest_file <= $orig_file"
fi
}
link() {
assign "link" "$dotfiles_dir/$1" "$HOME/$1"
}
copy() {
assign "copy" "$dotfiles_dir/$1" "/$1"
}
systemctl_enable_start() {
if [ "$#" -eq 1 ]; then
target="system"
name="$1"
else
target="$1"
name="$2"
fi
if [[ "$target" == "user" ]]; then
echo "systemctl --user enable & start "$name""
systemctl --user enable "$name"
systemctl --user start "$name"
else
echo "systemctl enable & start "$name""
systemctl enable "$name"
systemctl start "$name"
fi
}
if [ "$(whoami)" != "root" ]; then
echo "======================================="
echo "Setting up dotfiles for current user..."
echo "======================================="
link "bin"
link ".ghc/ghci.conf"
link ".gnupg/gpg.conf"
link ".config/htop"
link ".config/nvim"
link ".config/ranger/rc.conf"
link ".config/rofi"
link ".config/qalculate/qalc.cfg"
link ".config/systemd/user/backup-packages.service"
link ".config/systemd/user/backup-packages.timer"
link ".agignore"
link ".gitconfig"
link ".gitignore"
link ".mdlrc"
link ".npmrc"
link ".pylintrc"
link ".tigrc"
link ".tmux.conf"
link ".zprofile"
link ".zsh"
link ".zshrc"
if [[ "$HOST" =~ "desktop-" ]]; then
link ".i3"
link ".i3status.conf"
link ".gnupg/gpg-agent.conf"
link ".config/chromium-flags.conf"
link ".config/copyq/copyq.conf"
link ".config/dunst"
link ".config/fontconfig/conf.d/30-infinality-custom.conf"
link ".config/fontconfig/conf.d/70-monospace.conf"
link ".config/gsimplecal"
link ".config/gtk-3.0"
link ".config/kitty"
link ".config/mimeapps.list"
link ".config/mpv/mpv.conf"
link ".config/redshift.conf"
link ".config/repoctl"
link ".config/transmission/settings.json"
link ".config/USBGuard"
link ".config/systemd/user/urlwatch.service"
link ".config/systemd/user/urlwatch.timer"
link ".local/share/fonts/taskbar.ttf"
link ".compton.conf"
link ".gtkrc-2.0"
link ".urlwatch/urls.yaml"
link ".xsession"
link ".Xresources"
fi
echo ""
echo "================================="
echo "Enabling and starting services..."
echo "================================="
systemctl --user daemon-reload
systemctl_enable_start "user" "backup-packages.timer"
if [[ "$HOST" =~ "desktop-" ]]; then
systemctl_enable_start "user" "dunst.service"
systemctl_enable_start "user" "redshift.service"
systemctl_enable_start "user" "urlwatch.timer"
systemctl_enable_start "user" "yubikey-touch-detector.service"
fi
echo ""
echo "======================================="
echo "Finishing various user configuration..."
echo "======================================="
if ! gpg -k | grep "$MY_GPG_KEY_ID" > /dev/null; then
echo "Importing my public PGP key"
curl -s https://keybase.io/maximbaz/pgp_keys.asc| gpg --import
gpg --trusted-key "$MY_GPG_KEY_ID" > /dev/null
fi
if [[ "$HOST" =~ "desktop-" ]]; then
if [[ ! -a "$HOME/.config/Yubico/u2f_keys" ]]; then
echo "Configuring YubiKey for sudo access (touch it now)"
mkdir -p "$HOME/.config/Yubico"
pamu2fcfg -umaximbaz > "$HOME/.config/Yubico/u2f_keys"
fi
if [[ -a "$HOME/.password-store" ]]; then
echo "Configuring automatic git push for pass"
echo "#!/usr/bin/env bash\n\npass git push" >! "$HOME/.password-store/.git/hooks/post-commit"
chmod +x "$HOME/.password-store/.git/hooks/post-commit"
fi
echo "Disabling Dropbox autoupdate"
rm -rf ~/.dropbox-dist
install -dm0 ~/.dropbox-dist
echo "Configuring GTK file chooser dialog"
gsettings set org.gtk.Settings.FileChooser sort-directories-first true
echo "Ignoring further changes to often changing config"
git update-index --assume-unchanged ".config/transmission/settings.json"
echo "Creating pacman cache for custom AUR repo"
[[ ! -a "/var/cache/pacman/maximbaz-aur/" ]] && sudo install -d "/var/cache/pacman/maximbaz-aur" -o maximbaz
if [[ ! -a "/var/cache/pacman/maximbaz-aur/maximbaz-aur.db.tar" ]]; then
if read -q "?Press 'y' to mirror the remote repo or 'n' to create an empty one: "; then
echo ""
wget -m -nH -np -q --show-progress --reject="index.html*" --cut-dirs=1 -P '/var/cache/pacman/maximbaz-aur/' 'https://arch-repo.maximbaz.com:4433/maximbaz-aur/'
else
echo ""
repo-add -s "/var/cache/pacman/maximbaz-aur/maximbaz-aur.db.tar"
fi
fi
fi
echo ""
echo "====================================="
echo "Switching to root user to continue..."
echo "====================================="
echo "..."
sudo -s "$dotfiles_dir/$script_name"
exit
fi
if [[ "$(whoami)" == "root" ]]; then
echo ""
echo "=========================="
echo "Setting up /etc configs..."
echo "=========================="
copy "etc/conf.d/snapper"
copy "etc/snap-pac.conf"
copy "etc/snapper/configs/root"
copy "etc/ssh/ssh_config"
copy "etc/sysctl.d/10-swappiness.conf"
copy "etc/sysctl.d/99-idea.conf"
copy "etc/sysctl.d/99-sysctl.conf"
copy "etc/systemd/journald.conf"
copy "etc/systemd/system/paccache.service"
copy "etc/systemd/system/paccache.timer"
copy "etc/systemd/system/reflector.service"
copy "etc/systemd/system/reflector.timer"
copy "etc/updatedb.conf"
if [[ "$HOST" =~ "desktop-" ]]; then
copy "etc/grub.d/41_snapshots-btrfs_config"
copy "etc/lightdm/lightdm.conf"
copy "etc/lightdm/lightdm-gtk-greeter.conf"
copy "etc/NetworkManager/dispatcher.d/pia-vpn"
copy "etc/pacman.conf"
copy "etc/pacman.d/maximbaz-aur"
copy "etc/pam.d/sudo"
copy "etc/private-internet-access/pia.conf"
copy "etc/sudoers"
copy "etc/systemd/logind.conf"
copy "etc/systemd/system/backup-aur@devbox"
copy "etc/systemd/system/backup-aur@.service"
copy "etc/udev/rules.d/81-ac-battery-change.rules"
copy "etc/usbguard/usbguard-daemon.conf"
copy "etc/X11/xorg.conf.d/00-keyboard.conf"
copy "etc/X11/xorg.conf.d/30-touchpad.conf"
fi
if [[ "$HOST" =~ "crmdevvm-" ]]; then
copy "etc/systemd/system/reverse-ssh@devbox"
copy "etc/systemd/system/reverse-ssh@.service"
fi
echo ""
echo "================================="
echo "Enabling and starting services..."
echo "================================="
sysctl --system > /dev/null
systemctl daemon-reload
systemctl_enable_start "system" "paccache.timer"
systemctl_enable_start "system" "reflector.timer"
systemctl_enable_start "system" "NetworkManager.service"
systemctl_enable_start "system" "NetworkManager-wait-online.service"
systemctl_enable_start "system" "docker.service"
systemctl_enable_start "system" "ufw.service"
systemctl_enable_start "system" "snapper-cleanup.timer"
if [[ "$HOST" =~ "desktop-" ]]; then
systemctl enable "lightdm.service"
systemctl enable "backup-aur@devbox.service"
systemctl_enable_start "system" "dropbox@maximbaz.service"
systemctl_enable_start "system" "pcscd.service"
systemctl_enable_start "system" "teamviewerd.service"
systemctl_enable_start "system" "usbguard.service"
# tlp
systemctl_enable_start "system" "tlp.service"
systemctl_enable_start "system" "tlp-sleep.service"
systemctl_enable_start "system" "NetworkManager-dispatcher.service"
systemctl mask "systemd-rfkill.service"
fi
if [[ "$HOST" =~ "crmdevvm-" ]]; then
systemctl_enable_start "system" "sshd.socket"
systemctl_enable_start "system" "reverse-ssh@devbox.service"
fi
echo ""
echo "==============================="
echo "Creating top level Trash dir..."
echo "==============================="
mkdir --parent /.Trash
chmod a+rw /.Trash
chmod +t /.Trash
echo "Done"
echo ""
echo "======================================="
echo "Finishing various user configuration..."
echo "======================================="
echo "Adding my public key to pacman"
if ! pacman-key --list-keys | grep "$MY_GPG_KEY_ID" > /dev/null; then
pacman-key --recv-keys "$MY_GPG_KEY_ID"
pacman-key --lsign-key "$MY_GPG_KEY_ID"
fi
echo "Configuring devtools/makepkg"
sed -i "s/PKGEXT='.pkg.tar.xz'/PKGEXT='.pkg.tar'/" /usr/share/devtools/makepkg-x86_64.conf
echo "Configuring firewall"
[[ "$(ufw status | grep -o '[^ ]\+$')" != "active" ]] && ufw --force reset > /dev/null
ufw default reject
[[ "$HOST" =~ "crmdevvm-" ]] && ufw allow ssh
ufw enable
find /etc/ufw -type f -name '*.rules.*' -delete
if [[ "$HOST" =~ "desktop-" ]]; then
echo "Configuring aurutils-contrib"
ln -sf /usr/share/aurutils/contrib/aur-vercmp-devel /usr/lib/aurutils/
echo "Joining autologin group"
groupadd -rf autologin
gpasswd -a maximbaz autologin
echo "Enabling infinality aliases"
ln -sf /etc/fonts/conf.avail/30-infinality-aliases.conf /etc/fonts/conf.d/30-infinality-aliases.conf
fi
if [[ "$HOST" =~ "crmdevvm-" ]]; then
echo "Configuring gpg-agent forwarding"
sed -zi "s/\(VersionAddendum[^\n]*\n\)\(StreamLocalBindUnlink[^\n]*\n\)\?/\1StreamLocalBindUnlink yes\n/" /etc/ssh/sshd_config
fi
echo "Reload udev rules"
udevadm control --reload
udevadm trigger
fi
| true
|
253a4f03c2ae98e049ce53c6b725070e8955ca8d
|
Shell
|
xuyueshu/YZQREPO
|
/shell_test/xiugaikuozhanming.sh
|
UTF-8
| 411
| 3.5625
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
#编写批量修改扩展名脚本,如批量将 txt 文件修改为 doc 文件
# 执行脚本时,需要给脚本添加位置参数
# 脚本名 txt doc(可以将 txt 的扩展名修改为 doc)
# 脚本名 doc jpg(可以将 doc 的扩展名修改为 jpg)
for i in `ls *.$1`
do
if [ -z $i ];then ## if -z判断是否为空
echo "不存在该类文件"
else
mv $i ${i%.*}.$2
fi
done
| true
|
629e14040cee30231cb746ce0ee0a0c56cfcb5fb
|
Shell
|
Leo311/heroku-buildpack-isyntax
|
/bin/compile
|
UTF-8
| 409
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
indent() {
sed -u 's/^/ /'
}
echo "-----> Downloading and installing iSyntax libraries and executables"
TARBALL_URL='https://s3.amazonaws.com/proscia-viewer/buildpacks/isyntax.tar.gz'
OUTPUT_DIR="${1}/vendor/isyntax"
mkdir -p $OUTPUT_DIR
curl -s $TARBALL_URL | tar xz -C $OUTPUT_DIR
if [ $? != 0 ]; then
echo "Error downloading isyntax and unpacking to build dir" | indent
exit 1
fi
| true
|
9a19330350f045445d7340e5037a8a07ec46d120
|
Shell
|
SlurmRelated/slurm-llnl-misc-plugins
|
/Epilog.d/00_clean
|
UTF-8
| 975
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This script will kill any user processes on a node when the last
# SLURM job there ends. For example, if a user directly logs into
# an allocated node SLURM will not kill that process without this
# script being executed as an epilog.
if [ x$SLURM_UID == "x" ] ; then
exit 0
fi
if [ x$SLURM_JOB_ID == "x" ] ; then
exit 0
fi
# Don't try to kill user root or system daemon jobs
if [ $SLURM_UID -lt 1000 ] ; then
exit 0
fi
# Don't do anything if there other jobs running of the same user
job_list=`squeue --noheader --format=%i --user=$SLURM_UID --node=localhost`
for job_id in $job_list; do
if [ "$job_id" != "$SLURM_JOB_ID" ]; then
exit 0
fi
done
# No other SLURM jobs, purge all remaining processes of this user
pkill -KILL -U $SLURM_UID
# clean shared memory files generated by QLogic PSM stack
find /dev/shm -name 'psm_shm.*' -uid $SLURM_UID -delete
# clean /tmp
find /tmp -uid $SLURM_UID -delete
# Exit cleanly when finishing
exit 0
| true
|
8312441e6f1b467dc3299bc65486f340b033c50a
|
Shell
|
spiralofhope/ruby-random
|
/dead/unc/bashrc_addition.sh
|
UTF-8
| 720
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
# Everything is totally screwed up because of Bash. You have to rig a gateway through bash to run a script which will change into another directory.
# for $HOME/.bashrc
unc ()
{
if [ -z "$@" ]; then
echo 'unc <filename>'
else
# TODO: This should run the script once, and then chop it apart..
# this way the scripting can properly communicate to bash and its screen -- to print help screens or other useful information.
# I could process things if I did something like this.
# test=`ruby ~/bin/unc "$*"`
# then act on it intelligently...
# echo $1
# cd $2
# echo $3-$* (if that is possible)
cd "$( ruby ~/bin/unc "$*" )" || return $?
fi
}
| true
|
d96884d277f8ffc5b68953d06ba97289f9694129
|
Shell
|
delafont/random_scripts
|
/tophat_mapping.sh
|
UTF-8
| 1,849
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# By Julien Duc, Evarist Planet
#BSUB -L /bin/bash
#BSUB -J tophat[1-24]
#BSUB -o logs/tophat_%I.out
#BSUB -e logs/tophat_%I.err
#BSUB -u julien.delafontaine@epfl.ch
#BSUB -N
#BSUB -M 8000000
#BSUB -R rusage[mem=8000]
#BSUB -n 4
#BSUB -R "span[hosts=1]"
module add UHTS/Analysis/samtools/0.1.19;
#module add UHTS/Aligner/bowtie/0.12.9
module add UHTS/Aligner/bowtie2/2.2.1;
module add UHTS/Aligner/tophat/2.0.13;
set -e
## for who is the analysis
who="jerome3"
## Folder name for experiment
exp="tophat"
## Monthly or weekly
when="monthly"
## Where are the data
inputdir="."
[ ! -d $inputdir ] && echo "You are not in the right directory you dumbass" && exit 1
tmp=("dummy") # because LSF arrays are 0-based or shit like this
fastq=($(ls -1 ${inputdir}/fastq/*.fq.gz))
data=("${tmp[@]}" "${fastq[@]}")
#if you wanna compute only one
#data=("empty" "theguy")
sample=`basename ${data[$LSB_JOBINDEX]}`
sample=${sample%.fq}
outputdir="/scratch/cluster/$when/$USER/$who/$exp/${sample}"
outdata=${data[$LSB_JOBINDEX]%.fq}
#clean previous logs
mkdir -p logs
rm -f logs/tophat_${LSB_JOBINDEX}.*
rm -rf $outputdir
mkdir -p $outputdir
org="hg38"
md5="cbcc5aeeb39d29065c6641aafd5ccaa430706008"
reads="/scratch/cluster/$when/$USER/$who/fastq/${data[$LSB_JOBINDEX]}"
index="/scratch/cluster/$when/$USER/$who/$exp/index_$org/$md5"
gtf="/scratch/cluster/$when/$USER/$who/$exp/index_$org/${md5}_ENSEMBL.gtf"
## create an alias for /scratch/local directory
localdir="/scratch/local/daily/${USER}/${LSB_JOBID}_${LSB_JOBINDEX}"
mkdir -p $localdir
cd $localdir
## Actual mapping
echo " >>> Working with ${data[$LSB_JOBINDEX]} <<< "
cmd="tophat -p 4 -g 1 --no-novel-juncs --no-novel-indels -G $gtf --transcriptome-index $exp/index_$org/trx --b2-sensitive -o $localdir $index $reads"
eval $cmd
## Move and clean localdir
cp -rv ./* $outputdir
rm -rfv $localdir
| true
|
5d56ad8aed71a2871b132b3708c1500ae2797d64
|
Shell
|
BlankNetscape/ALPS_manager
|
/alp_master.sh
|
UTF-8
| 1,875
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
editor="nano"
##
alpID=;
alpNm=;
alpSt=;
##
echo -e "\e[38;5;105mEnter AlpID"" - format XXXX or less\e[0m"
printf "\e[38;5;105m└─> \e[0m:"
read -r
if [[ $REPLY =~ ^-?[0-9]+$ ]]
then
if [[ ${#REPLY} -gt 4 ]]; then
echo " - Too long."
exit
fi
else
echo " - NaN"
exit
fi
alpID=$REPLY
##
echo -e "\e[38;5;105mEnter Alp Name"" - format 'aabbccd' [x7 or less]\e[0m"
printf "\e[38;5;105m└─> \e[0m:"
read -r
if [[ $REPLY == "" ]]
then
echo -e " - Empty. Exiting.."
return
fi
alpNm=$REPLY
##
echo -e "\e[38;5;105mChoose status"" - format ('1'= Enable)/('0'= Disable)\e[0m"
printf "\e[38;5;105m└─> \e[0m:"
read -r
if [[ $REPLY =~ ^[0]$ ]]
then
printf ""
elif [[ $REPLY =~ ^[1]$ ]]; then
printf ""
else
echo -e "Death"
return
fi
alpSt=$REPLY
##
# echo "$alpID $alpNm $alpSt"
function HAT {
printf ""
printf "######################################################## \n### ALP ### %04d ### %s\n" "$1" "$2"
}
# $1=ID $2=Nm
function FUT {
printf "###\n#alpID: $1\n#alpNm: $2\n#status: $3\n#author: $(hostname)\n#help: $4\n########################################################"
}
# $1=ID $2=Nm $3=St $4=Help
###
###
###
touch temp.alp
##
echo -e "### Aliases goes down here. \n# e.x.: al1as test=(\"do things\")\n# e.x.: funct1on name() { \"do this\" } #=\"comment here\"\n#" > ./temp.alp
${editor} temp.alp
#
echo -e "$(HAT ${alpID} "${alpNm}")" | cat - temp.alp > temp && mv temp temp.alp
#
echo -e "$(FUT ${alpID} ${alpNm} ${alpSt} ${alpHp})" >> ./temp.alp
#
printf "\e[38;5;105mName file:\e[0m\n 1. '%04d.alp'\n 2. '%s.alp'\n" "${alpID}" "${alpNm}"
printf "\e[38;5;105m└─> \e[0m:"
read -r
case "$REPLY" in
1) nTemp=$(printf "%04d" "${alpID}")
mv temp.alp "${nTemp}.alp";;
2) mv temp.alp "${alpNm}.alp";;
*) echo -e "Undefined parameter: $REPLY";;
esac
echo -e "\e[38;5;11mComplete!\e[0m"
| true
|
1408997250fb671198151eae9ea38ea318a429f7
|
Shell
|
jonkelly/utils
|
/dnsupdate.r53/dnsupdate.sh
|
UTF-8
| 1,168
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# USAGE: $0 subdomain ip_address
# e.g. dnsupdate.sh partytime 33.33.33.33
# would create an A record for partytime.DOMAIN pointing to 33.33.33.33
# depends on awcli, should be configured with default profile in credentials
# TODO: profile support
# TODO: input sanitization
# install: run from PWD, or copy scripts to /usr/local/bin and config to
# /usr/local/etc
if [[ $# -le 1 ]]; then
echo USAGE: $0 subdomain ip_address
exit
fi
unset DOMAIN
# config file goes here
# check /usr/local/etc first, override with file in PWD if exists
. /usr/local/etc/dnsupdate.conf 2>&1 &>/dev/null
. dnsupdate.conf 2>&1 &>/dev/null
SUBDOMAIN=$1
SUBDOMAIN_IP=$2
TEMPFILE=`mktemp`
if [ -x "$(command -v "$GEN_SCRIPT")" ]; then
$GEN_SCRIPT $SUBDOMAIN $SUBDOMAIN_IP $DOMAIN > $TEMPFILE
elif [ -x ./$GEN_SCRIPT ]; then
./$GEN_SCRIPT $SUBDOMAIN $SUBDOMAIN_IP $DOMAIN > $TEMPFILE
else
echo ERROR: $GEN_SCRIPT not found
exit
fi
# aws route53 change-resource-record-sets --hosted-zone-id ZXXXXXXXXXX --change-batch file://sample.json
aws route53 change-resource-record-sets --hosted-zone-id $HOSTED_ZONE_ID --change-batch file://$TEMPFILE
rm -f $TEMPFILE
| true
|
fa5f70de4a3834428e4546fe49ef9d979a14c0f7
|
Shell
|
vincentmartin/audio2video
|
/audio2video.sh
|
UTF-8
| 384
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
mkdir output
for f in *.wma; do
echo "processing $f"
#ffmpeg -loop 1 -framerate 1 -i ./song_background.png -i "$f" -c:v libx264 -preset veryslow -crf 0 -c:a copy -shortest output/"$f.mkv"
convert -pointsize 60 label:"$f" -gravity center -extent 1920x1080 png:- | ffmpeg -y -f image2pipe -i - -i "$f" -filter_complex "loop=-1:1:0" -shortest output/"$f.mp4"
done
| true
|
8564a7ea1b38f5508d913efcf47d72a3045c7209
|
Shell
|
archion/mylinux
|
/bin/playonline
|
UTF-8
| 524
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
dl=false
while getopts :d OPTION
do
case $OPTION in
d)
dl=true
;;
esac
done
shift $((OPTIND - 1))
if $dl; then
if echo $1 | grep -q youtube; then
echo "using 127.0.0.1:8888"
you-get -s 127.0.0.1:8888 $1
else
you-get $1
fi
else
if echo $1 | grep -q youtube; then
echo "using 127.0.0.1:8888"
you-get -s 127.0.0.1:8888 -p mpv $1
else
you-get -p mpv $1
fi
fi
#danmaku2ass -o foo.ass -s 1280x720 -fs 38 -a 0.6 -dm 5 -ds 5 这个空耳真厉害!第三期.cmt.xml
| true
|
0f45b9cde25262cf4731be49f930ab4c1f3e0991
|
Shell
|
vinay-ebi/tark-loader
|
/travisci/harness.sh
|
UTF-8
| 648
| 3.109375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
ENSDIR="${ENSDIR:-$PWD}"
export PERL5LIB=$ENSDIR/bioperl-live:$ENSDIR/ensembl-test/modules:$PWD/lib:$ENSDIR/ensembl/modules:$ENSDIR/ensembl-hive/modules
export TEST_AUTHOR=$USER
echo "Running test suite"
if [ "$COVERALLS" = 'true' ]; then
PERL5OPT='-MDevel::Cover=+ignore,bioperl,+ignore,ensembl-test,+ignore,ensembl' perl $ENSDIR/ensembl-test/scripts/runtests.pl -verbose lib/t
else
perl $ENSDIR/ensembl-test/scripts/runtests.pl lib/t
fi
rt=$?
if [ $rt -eq 0 ]; then
if [ "$COVERALLS" = 'true' ]; then
echo "Running Devel::Cover coveralls report"
cover --nosummary -report coveralls
fi
exit $?
else
exit $rt
fi
| true
|
cd5cf5b90460c3167d586cdfe5c964f93908625d
|
Shell
|
srynobio/CVDO
|
/bin/CVDO
|
UTF-8
| 1,566
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# Shell script by Shawn Rynearson For Karen Eilbeck
# shawn.rynearson@gmail.com
# Used for DO development project.
#----------------------------------------------------------------------
# Generating the obo file from MeSH XML.
echo "Generating obo file from XML file."
./MeSHtoOBO_Parser.pl ../data/desc2013.xml
./OBO_Generator.pl tmp1.txt MeSHOBO.obo
mv MeSHOBO.obo ../data
#----------------------------------------------------------------------
# Takes reference obo file and adds xref's based on accession number
echo "Adding xrefs to obo file."
# Pulls out all xrefs form a reference obo file.
./Xref_Term_Parser.pl ../Ontologies/HumanDO.obo
# Isolates just xrefs with D0..accession number.
./Xref_Iso_Diease.pl
# Takes isolated terms and addeds them to the working onlology.
./Xref_Combine.pl
# Formats to obo file specs.
./Xref_Formatter.pl
# Talks the working oob file and cleans up the xref terms
# so they have correct format.
./Xref_Final.pl
#----------------------------------------------------------------------
# Looks for and outputs parent relationship of each node.
echo "Isolating parent relationships."
./GO_Traverser.pl --uniq ../data/CVDO.obo
./Tree_Dup_Remover.pl traversed.txt
./Tree_Uniq_Reporter.pl
./GO_Traverser.pl --common ../data/CVDO.obo
#----------------------------------------------------------------------
rm uniq_list.txt traversed_dupes.txt traversed_report.txt traversed_uniqs.txt traversed.txt tmp1.txt
rm ../data/working.obo ../data/merged.txt ../data/combine_xref.txt ../data/xref_list.txt ../data/MeSHOBO.obo
| true
|
941ea03c01384dfa42b78c99196760c206991daf
|
Shell
|
klapcsik/multi-environment-chatops-bot-for-controltower
|
/deploy.sh
|
UTF-8
| 4,418
| 3.953125
| 4
|
[
"MIT-0"
] |
permissive
|
#!/bin/bash
# Purpose: Install chatops-lex
# Author: Luiz Decaro {lddecaro@amazon.com}
# ------------------------------------------
# $1 = # of seconds
# $@ = What to print after "Waiting n seconds"
countdown() {
secs=$1
shift
msg=$@
while [ $secs -gt 0 ]
do
printf "\r\033[KWaiting %.d seconds $msg" $((secs--))
sleep 1
done
printf ""
echo
}
red=`tput setaf 1`
green=`tput setaf 2`
reset=`tput sgr0`
orange=`tput setaf 3`
source ./chatops-lex-bot/deploy.sh
projectName=''
bucketName=''
echo "${green}------------------------------------------"
echo "PHASE 1 COMPLETE: ChatOps Lex Bot is installed"
echo "------------------------------------------${reset}"
echo ""
echo "${orange}------------------------------------------"
echo "Starting ChatOps Installer (Phase 2)"
echo "------------------------------------------${reset}"
echo ""
#tempProjectName="chatops-lex-"$(openssl rand -hex 2)
tempProjectName="chatops-lex-"$lexBotProjectSuffix
printf "Choose a project name for Phase 2: [$tempProjectName]: "
read projectName
if [ -z $projectName ]
then
projectName=$tempProjectName
fi
NOW=$(date +%F-%H-%M-%S)
printf "Choose a bucket name for source code upload [$projectName]: "
read bucketName
if [ -z $bucketName ]
then
bucketName="$projectName"
fi
echo "ChatOps-Lex-Bot is already deployed in region${orange} ${lexBotRegion} ${reset}"
printf "Choose a mailbox to receive approval e-mails for Account vending requests: "
read mailbox
if [ -z $AWS_DEFAULT_REGION ]
then
region=$(aws configure get region)
echo "Using default region from aws configure get region: $region"
else
region=$AWS_DEFAULT_REGION
echo "Using region from \$AWS_DEFAULT_REGION variable: $region"
fi
#region=$(aws ec2 describe-availability-zones --output text --query 'AvailabilityZones[0].[RegionName]')
printf "Choose the AWS region where your vending machine is installed [$region]: "
read chatOpsRegion
if [ -z "$chatOpsRegion" ]
then
chatOpsRegion=$region
else
region=$chatOpsRegion
fi
echo "Using region ${green} $region ${reset}"
##printf "Please tell us in which region the lex bot is deployed: "
##read lexbotregion
if [[ $(aws s3api list-buckets --query "Buckets[?Name == '$bucketName'].[Name]" --output text) = $bucketName ]];
then
echo "Bucket $bucketName is already created." ;
else
echo "Creating a new S3 bucket on $region for your convenience..."
aws s3 mb s3://$bucketName --region $region
aws s3api wait bucket-exists --bucket $bucketName --region $region
echo "Bucket $bucketName successfully created!"
fi
echo "Trying to find the ${green}AWS Control Tower Account Factory Portfolio${reset}"
echo ""
portfolioName="AWS Control Tower Account Factory Portfolio"
portfolioId="$(aws servicecatalog list-portfolios --query "PortfolioDetails[?DisplayName == '$portfolioName'].[Id]" --output text)"
if [[ -z $portfolioId ]]
then
echo "Could not find portfolio named $portfolioName. Is Control Tower installed ? Is this the Master Account ?"
echo "Exiting..."
exit 1
fi
echo "Using project name....................${green}$projectName${reset}"
echo "Using bucket name.....................${green}$bucketName${reset}"
echo "Using mailbox for approvals...........${green}$mailbox${reset}"
echo "Using lexbot region...................${green}$lexBotRegion${reset}"
echo "Using service catalog portfolio-id....${green}$portfolioId${reset}"
echo ""
echo "If these parameters are wrong press ${red}ctrl+c to stop now...${reset}"
countdown 10 "before continuing"
if command -v mvn &> /dev/null
then
mvn clean
fi
rm *.zip
rm -rf ./target
zip -qq -r "$projectName.zip" . -x "*.git*" -x "*.DS_Store"
aws s3 cp "$projectName.zip" "s3://$bucketName/$projectName.zip"
aws cloudformation package --template-file devops.yml --s3-bucket $bucketName --output-template-file devops-packaged.yml
aws cloudformation deploy --region $region --template-file devops-packaged.yml --stack-name "$projectName-cicd" --parameter-overrides ProjectName=$projectName CodeS3Bucket=$bucketName PortfolioId=$portfolioId ApprovalMailbox=$mailbox LexBotRegion=$lexBotRegion --capabilities CAPABILITY_NAMED_IAM
echo "${green}------------------------------------------"
echo "PHASE 2 COMPLETE: ChatOps Pipeline is installed"
echo "ChatOps Lex Pipeline and Chatops Lex Bot Pipelines successfully installed"
echo "------------------------------------------${reset}"
echo ""
| true
|
664d2da85c863ace1fa421307518524bbb62383a
|
Shell
|
tpythoner/Mall
|
/restart.sh
|
UTF-8
| 513
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
select var in "production" "development";
do
if [ $var = "production" ];then
#forever方式启动
#NODE_ENV=production forever start -l ~/.forever/access.log -e ~/.forever/error.log -w -a app.js
#PM2启动方式
#pm2 start -i max --merge-logs --watch -e ./logs/errors.log -o ./logs/access.log app.js
pm2 start -i max --merge-logs --watch --name node app.js
break
else
#NODE_ENV=development forever start -l forever.log -e err.log -a app.js
supervisor app.js
break
fi
done
exit
| true
|
ed21175f44490edb1b7760e07eb79c27ffa8075e
|
Shell
|
cdbbnnyCode/AdventOfCode-2018
|
/day05/part1.sh
|
UTF-8
| 607
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
source ../fetch.sh
input=$(fetch 5) # It's one string today!!
letters=( {a..z} )
function deletePoly() {
inp=$1
ltr=$2
upper=${ltr^^} # Only works on Bash 4.0+
lower=${ltr,,}
init_len=${#inp}
out=$(echo $inp | sed -e "s/${lower}${upper}//g" -e "s/${upper}${lower}//g")
echo "$out"
}
function reactAll() {
in=$1
for l in ${letters[@]}; do
in=$(deletePoly $in $l)
done
echo "$in"
}
echo ${#input}
while true; do
len=${#input}
input=$(reactAll $input)
echo "-> ${#input}"
if [ $len -eq ${#input} ]; then
echo "Cannot reduce anymore"
break
fi
done
| true
|
b7a48a66532ce16deffbe956602237f5ac81d727
|
Shell
|
udtrokia/marst
|
/mark/dao/app/scripts/dep
|
UTF-8
| 469
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
path=`dirname $0`
# git
# git init && git remote add repo https://github.com/udtrokia/gig.git
# git config core.sparsecheckout true &&
# echo "/mark" >> .git/info/sparse-checkout && git pull repo master
#
echo install truffle, solc...
npm install -g truffle solc next
echo
cd ${path}/../.. && echo enter app path...
echo
echo now we install the dependences...
echo
cd ${path}/.. && npm i --save
# echo
# echo run app server...
# npm run dev
| true
|
fc02ddfe929b7cbc9449313839fe16fbb7dbeee6
|
Shell
|
rancher/rke-tools
|
/cert-deployer
|
UTF-8
| 1,712
| 3.84375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
SSL_CRTS_DIR=${CRTS_DEPLOY_PATH:-/etc/kubernetes/ssl}
mkdir -p $SSL_CRTS_DIR
chmod 755 $SSL_CRTS_DIR
for i in $(env | grep -o KUBE_.*=); do
name="$(echo "$i" | cut -f1 -d"=" | tr '[:upper:]' '[:lower:]' | tr '_' '-').pem"
env=$(echo "$i" | cut -f1 -d"=")
value=$(echo "${!env}")
if [ ! -f $SSL_CRTS_DIR/$name ] || [ "$FORCE_DEPLOY" = "true" ]; then
echo "$value" > $SSL_CRTS_DIR/$name
chmod 600 $SSL_CRTS_DIR/$name
fi
done
for i in $(env | grep -o KUBECFG_.*=); do
name="$(echo "$i" | cut -f1 -d"=" | tr '[:upper:]' '[:lower:]' | tr '_' '-').yaml"
env=$(echo "$i" | cut -f1 -d"=")
value=$(echo "${!env}")
if [ ! -f $SSL_CRTS_DIR/$name ]; then
echo "$value" > $SSL_CRTS_DIR/$name
chmod 600 $SSL_CRTS_DIR/$name
fi
done
# only enabled if we are running etcd with custom uid/gid
# change ownership of etcd cert and key and kube-ca to the custom uid/gid
if [ -n "${ETCD_UID}" ] && [ -n "${ETCD_GID}" ]; then
# set minial mask to allow effective read access to the certificates
setfacl -R -m m::rX "${SSL_CRTS_DIR}" && echo "Successfully set ACL mask for certs dir"
# we remove certs dir acl if any for the custom etcd uid, since chown will give that access
setfacl -R -x u:${ETCD_UID} "${SSL_CRTS_DIR}" && echo "Successfully unset user ACL for certs dir"
# allow certs dir read access to the custom etcd gid
setfacl -R -x g:${ETCD_GID} "${SSL_CRTS_DIR}" && echo "Successfully unset group ACL for certs dir"
for name in $SSL_CRTS_DIR/*.pem; do
if [[ $name == *kube-etcd* ]] ; then
chown "${ETCD_UID}":"${ETCD_GID}" $name
fi
if [[ $name == *kube-ca.pem ]] ; then
chmod 644 $name
fi
done
chmod 755 $SSL_CRTS_DIR
fi
| true
|
0196e2404034a031dd316f8be685fd7072e05d4a
|
Shell
|
jdegges/gumstix-pip-oe
|
/gumstix-oe/org.openembedded.snapshot/packages/slugos-init/files/boot/.svn/text-base/ram.svn-base
|
UTF-8
| 1,263
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# boot from the hard disk partition "$1" (which
# must be given) using options from the rest of
# the command line.
#
# Use the standard init path (see /etc/init.d/rcS)
export PATH=/sbin:/bin:/usr/sbin:/usr/bin
#
# Load the helper functions
. /etc/default/functions
#
leds beep -r 2
leds boot system
#
if test -n "$1"
then
device="$1"
shift
echo "boot: rootfs: mount $* $device"
#
# prepare the device. This uses tmpfs to avoid dependency
# on non-built-in file systems and because tmpfs can expand
# to bigger than the 10MByte ram0 partition used before.
# The 'device' is currently unused.
if mount -t tmpfs "$@" tmpfs /mnt
then
cd /
# filter out boot (with zimage), linuxrc and anything
# below /var, keep dev or the boot will fail (note that
# nothing is mounted this early in the bootstrap).
find . -mount -print |
sed '\@^./boot/@d;\@^./boot$@d;\@^./linuxrc@d;\@^./var/@d' |
cpio -p -d -m -u /mnt
# checkmount checks for sh, init and no .recovery plus
# either mnt or initrd, mnt must exist!
if checkmount /mnt
then
# pivot to /mnt
cd /
swivel mnt mnt
# swivel failed
fi
# Failure: unmount the partition.
umount /mnt
fi
fi
# fallback - use the flash boot
leds beep -f 1000 -r 2
exec /boot/flash
| true
|
60381983b582efbd1df6f2a15d7b8a1106c33e26
|
Shell
|
michaelepley/openshift-demo-nauticalcharts
|
/setup-initial-app.sh
|
UTF-8
| 4,750
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# Configuration
. ./config-demo-openshift-nauticalcharts.sh || { echo "FAILED: Could not configure" && exit 1 ; }
# Additional Configuration
#None
echo -n "Verifying configuration ready..."
: ${APPLICATION_NAME?"missing configuration for APPLICATION_NAME"}
: ${APPLICATION_REPOSITORY_GITHUB?"missing configuration for APPLICATION_REPOSITORY_GITHUB"}
: ${OPENSHIFT_MASTER?"missing configuration for OPENSHIFT_MASTER"}
: ${OPENSHIFT_APPS?"missing configuration for OPENSHIFT_APPS"}
: ${OPENSHIFT_USER_REFERENCE?"missing configuration for OPENSHIFT_APPS"}
: ${OPENSHIFT_OUTPUT_FORMAT?"missing configuration for OPENSHIFT_OUTPUT_FORMAT"}
: ${CONTENT_SOURCE_DOCKER_IMAGES_RED_HAT_REGISTRY?"missing configuration for CONTENT_SOURCE_DOCKER_IMAGES_RED_HAT_REGISTRY"}
OPENSHIFT_PROJECT_DESCRIPTION_QUOTED=\'${OPENSHIFT_PROJECT_DESCRIPTION}\'
echo "OK"
echo "Setup nautical chart demo Configuration_____________________________________"
echo " APPLICATION_NAME = ${APPLICATION_NAME}"
echo " APPLICATION_REPOSITORY_GITHUB = ${APPLICATION_REPOSITORY_GITHUB}"
echo " OPENSHIFT_MASTER = ${OPENSHIFT_USER_REFERENCE}"
echo " OPENSHIFT_APPS = ${OPENSHIFT_MASTER}"
echo " OPENSHIFT_USER_REFERENCE = ${OPENSHIFT_APPS}"
echo " CONTENT_SOURCE_DOCKER_IMAGES_RED_HAT_REGISTRY = ${CONTENT_SOURCE_DOCKER_IMAGES_RED_HAT_REGISTRY}"
echo " OPENSHIFT_OUTPUT_FORMAT = ${OPENSHIFT_OUTPUT_FORMAT}"
echo "Create Simple PHP nautical chart demo"
echo " --> Make sure we are logged in (to the right instance and as the right user)"
pushd config >/dev/null 2>&1
. ./setup-login.sh -r OPENSHIFT_USER_REFERENCE || { echo "FAILED: Could not login" && exit 1; }
popd >/dev/null 2>&1
[ "x${OPENSHIFT_CLUSTER_VERIFY_OPERATIONAL_STATUS}" != "xfalse" ] || { echo " --> Verify the openshift cluster is working normally" && oc status -v >/dev/null || { echo "FAILED: could not verify the openshift cluster's operational status" && exit 1; } ; }
echo " --> prepare for later CI/CD demo by creating a jenkins deployment now (it takes a couple of minutes)"
oc get dc/jenkins >/dev/null 2>&1 || oc new-app --template=jenkins-ephemeral >/dev/null 2>&1 || { echo "FAILED: Could not find or create the jenkins runtime" && exit 1; }
echo " --> Create the original application from the ${NAUTICALCHART_ORIGINAL_APPLICATION_NAME} application git repo"
oc get dc/${NAUTICALCHART_ORIGINAL_APPLICATION_NAME} >/dev/null 2>&1 || oc new-app --name=${NAUTICALCHART_ORIGINAL_APPLICATION_NAME} --code=${NAUTICALCHART_ORIGINAL_APPLICATION_REPOSITORY_GITHUB} -l app=${NAUTICALCHART_ORIGINAL_APPLICATION_NAME},part=frontend >/dev/null 2>&1 || { echo "FAILED: Could not find or create the app=${NAUTICALCHART_ORIGINAL_APPLICATION_NAME},part=frontend " && exit 1; }
echo -n " --> Waiting for the ${NAUTICALCHART_ORIGINAL_APPLICATION_NAME} application to start....press any key to proceed"
while ! oc get pods | grep ${NAUTICALCHART_ORIGINAL_APPLICATION_NAME} | grep -v build | grep Running >/dev/null 2>&1 ; do echo -n "." && { read -t 1 -n 1 && break ; } && sleep 1s; done; echo ""
echo " --> Expose a generic endpoint for the original application"
oc get route ${NAUTICALCHART_ORIGINAL_APPLICATION_NAME} >/dev/null 2>&1 || oc expose service ${NAUTICALCHART_ORIGINAL_APPLICATION_NAME} >/dev/null 2>&1 || { echo "FAILED: Could not verify route to application frontend" && exit 1; } || { echo "FAILED: Could patch frontend" && exit 1; }
echo " --> Expose a canonical endpoint for external users, which will never change...start them with the ORIGINAL application"
echo " --> Try it! Go to ${NAUTICALCHART_CANONICAL_APPLICATION_NAME}.${OPENSHIFT_APPS}"
oc get route ${NAUTICALCHART_CANONICAL_APPLICATION_NAME} >/dev/null 2>&1 || oc expose service ${NAUTICALCHART_ORIGINAL_APPLICATION_NAME} --name ${NAUTICALCHART_CANONICAL_APPLICATION_NAME} -l app=${NAUTICALCHART_CANONICAL_APPLICATION_NAME} --hostname="${NAUTICALCHART_CANONICAL_APPLICATION_NAME}.${OPENSHIFT_APPS}" >/dev/null 2>&1
echo -n " --> Waiting for both application endpoints to resolve successfully....press any key to proceed"
COUNTER=0
while [ $(( COUNTER ++ )) -lt 30 ] && ! curl -f -s -i ${NAUTICALCHART_ORIGINAL_APPLICATION_NAME}-${OPENSHIFT_PROJECT}.${OPENSHIFT_APPS} >/dev/null 2>&1 && ! curl -f -s -i ${NAUTICALCHART_CANONICAL_APPLICATION_NAME}.${OPENSHIFT_APPS} >/dev/null 2>&1 ; do echo -n "." && read -t 1 -n 1 && break ; done
echo " --> Checking for the presence of a newly requested weather feature"
curl -f -s -i ${NAUTICALCHART_CANONICAL_APPLICATION_NAME}.${OPENSHIFT_APPS} | grep -i weather >/dev/null 2>&1 || { echo " --> On Noes! the weather toolbar is missing!" && echo " --> Let's fix this" ; }
echo "Done."
| true
|
49dfdb619b50f94294a90e6ae1c18f8a1c9697cc
|
Shell
|
nhattvm11/docs
|
/scripts/install-intel-mkl-from_apt.sh
|
UTF-8
| 1,995
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
echo ""
echo "************************************************ Please confirm *******************************************************"
echo " Installing Intel(R) Math Kernel Library (Intel(R) MKL) Using APT Repository. "
echo " Select n to skip Intel MKL installation or y to install it."
read -p " Continue installing Intel MKL (y/n) ? " CONTINUE
if [[ "$CONTINUE" == "y" || "$CONTINUE" == "Y" ]]; then
echo "";
echo "Installing Intel MKL 2019";
echo "";
echo "\nInstalling the GPG key for the repository\n";
wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB
apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB
echo "\nAdding the APT Repository\n";
sudo sh -c 'echo deb https://apt.repos.intel.com/mkl all main > /etc/apt/sources.list.d/intel-mkl.list'
sudo apt update
sudo apt intel-mkl-2019.3-062
update-alternatives --install /usr/lib/x86_64-linux-gnu/libblas.so libblas.so-x86_64-linux-gnu /opt/intel/mkl/lib/intel64/libmkl_rt.so 50
update-alternatives --install /usr/lib/x86_64-linux-gnu/libblas.so.3 libblas.so.3-x86_64-linux-gnu /opt/intel/mkl/lib/intel64/libmkl_rt.so 50
update-alternatives --install /usr/lib/x86_64-linux-gnu/liblapack.so liblapack.so-x86_64-linux-gnu /opt/intel/mkl/lib/intel64/libmkl_rt.so 50
update-alternatives --install /usr/lib/x86_64-linux-gnu/liblapack.so.3 liblapack.so.3-x86_64-linux-gnu /opt/intel/mkl/lib/intel64/libmkl_rt.so 50
echo "/opt/intel/lib/intel64" > /etc/ld.so.conf.d/mkl.conf
echo "/opt/intel/mkl/lib/intel64" >> /etc/ld.so.conf.d/mkl.conf
sudo ldconfig
echo "The libraries and other components that are required to develop Intel MKL-DNN enabled applications under the /usr/local directory";
echo "Shared libraries (/usr/local/lib): libiomp5.so, libmkldnn.so, libmklml_intel.so";
echo "Header files (/usr/local/include): mkldnn.h, mkldnn.hpp, mkldnn_types.h";
else
echo "";
echo "Skipping Intel MKL installation";
echo "";
fi
| true
|
915e4c1a1bd2fe567eb0e14cba8522c92ff7f51b
|
Shell
|
florida/dotfiles
|
/git_setup.sh
|
UTF-8
| 4,799
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
Color_Off='\033[0m' # Text Reset
# Bold
BBlack='\033[1;30m' # Black
BRed='\033[1;31m' # Red
BGreen='\033[1;32m' # Green
BYellow='\033[1;33m' # Yellow
write_banner() {
local text="$1"
local color="${2:-$BGreen}"
# Set the fixed width of the banner
local width=100
# Draw the banner using the text
echo
printf "${color}"
printf "=%.0s" $(seq 1 $width)
printf "=\n"
printf "|%*s %s %*s|\n" $(((width - ${#text} - 2)/2)) "" "$text" $(((width - ${#text} - 2)/2))
printf "=%.0s" $(seq 1 $width)
printf "=\n"
printf "${Color_Off}"
echo
}
bold_text() {
local text=$1
echo -e "${BBlack}${text}${Color_Off}"
}
info_text() {
local text=$1
echo -e "${BYellow}${text}${Color_Off}"
}
alert_text() {
local text=$1
echo -e "${BRed}${text}${Color_Off}"
}
success_text() {
local text=$1
echo -e "${BGreen}${text}${Color_Off}"
}
prompt_user() {
local question="$1"
local validation_text="$2"
while true; do
read -p "$question" answer
case $answer in
[yY] ) return 0;;
[nN] ) return 1;;
* ) echo "Please enter 'y' for yes or 'n' for no.";;
esac
done
}
setup_ssh() {
local SSH_FILE=~/.ssh
local SSH_CONFIG_FILE=~/.ssh/config
if [ -f "$SSH_FILE" ]; then
echo "$SSH_FILE exists!"
else
echo "$SSH_FILE does not exist."
bold_text "Enter your name and email to generate SSH key and git config"
read -p "What's your github name? " GITHUB_NAME
read -p "What's your github email? " GITHUB_EMAIL
info_text "Generating a new SSH key & git config..."
echo
ssh-keygen -t ed25519 -C $GITHUB_EMAIL
git config --global user.name $GITHUB_NAME
git config --global user.email $GITHUB_EMAIL
echo
write_banner "SSH key and git config created"
fi
write_banner "Running ssh-agent in the background..."
eval "$(ssh-agent -s)"
if [ -f "$SSH_CONFIG_FILE" ]; then
echo "$SSH_CONFIG_FILE exists!";
else
echo "$SSH_CONFIG_FILE does not exist!";
info_text "Writing new SSH config...";
echo "Host *.github.com\n AddKeysToAgent yes\n IdentityFile ~/.ssh/id_ed25519" >> $SSH_CONFIG_FILE
success_text "SSH config created!"
fi
info_text "Adding SSH private key to ssh-agent...";
ssh-add ~/.ssh/id_ed25519
success_text "SSH private key was added!";
## SSH COPY PROMPT
if prompt_user "Do you want to copy SSH public key?(y/n) "; then
bold_text "Great! Copying SSH public key to clipboard";
pbcopy < ~/.ssh/id_ed25519.pub
echo
bold_text "Copy your SSH public keys to https://github.com/settings/keys"
else
info_text Skipping copying SSH...;
echo
bold_text "to manually copy SSH public key run:"
echo
echo "pbcopy < ~/.ssh/id_ed25519.pub"
echo
fi
write_banner "SSH SETUP FINISHED!"
}
setup_gpg() {
if which gpg >/dev/null; then
success_text "gpg command exists!"
generate_gpg_keys
else
info_text "gpg command does not exists"
if prompt_user "Would you like to install GPG through homebrew?(y/n) "; then
write_banner "GPG Installation"
bold_text "Great! Let's install GPG installed";
brew install gnupg
write_banner "GPG Installation Complete"
generate_gpg_keys
write_banner "GPG Setup Complete"
else
write_banner "Skipping GPG install and setup..." $BYellow
fi
fi
}
generate_gpg_keys() {
info_text "Generating GPG keys"
gpg --full-generate-key
success_text "Generated GPG keys successfully"
bold_text "Displaying GPG keys"
gpg --list-secret-keys --keyid-format=long
read -p "Which long key would you like to export? " GPG_LONG_KEY
gpg --armor --export $GPG_LONG_KEY
bold_text "Copy your GPG public key to https://github.com/settings/keys"
}
write_banner "HI! WELCOME TO YOUR GIT SSH & GPG SETUP"
# Github CLI install
if prompt_user "Would you like to install Github CLI?(y/n) "; then
info_text "Installing Github CLI..."
brew install gh
success_text "Github CLI installed"
else
info_text "Skipping installing Github CLI..."
fi
## SSH SETUP PROMPT
if prompt_user "Do you want to setup SSH?(y/n) "; then
write_banner "SSH SETUP"
bold_text "Great! Let's get your SSH setup";
setup_ssh
else
write_banner "Aborting SSH Setup." $BRed
fi
## GPG SETUP PROMPT
if prompt_user "Do you want to start GPG Setup? "; then
bold_text "Great! Let's get your GPG setup";
write_banner "GPG SETUP"
setup_gpg
else
write_banner "Aborting GPG setup" $BRed;
fi
write_banner "SETUP FINISHED! Goodbye!"
| true
|
27b2e9d98eb12e0f69466bcbfc9fb8f1e1d720b9
|
Shell
|
fluffle/unrealircd-deb
|
/build.sh
|
UTF-8
| 551
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
LANG=C
pkgver=3.2.10.3
[ ! -e Unreal$pkgver.tar.gz ] && wget http://www.unrealircd.com/downloads/Unreal$pkgver.tar.gz
[ ! -e pkg ] && mkdir pkg
cd pkg
[ ! -e unrealircd-$pkgver.tar.gz ] && ln -s ../Unreal$pkgver.tar.gz unrealircd-$pkgver.tar.gz
[ ! -e unrealircd_$pkgver.orig.tar.gz ] && ln -s ../Unreal$pkgver.tar.gz unrealircd_$pkgver.orig.tar.gz
[ -e unrealircd-$pkgver ] && rm -rf unrealircd-$pkgver
tar zxf unrealircd-$pkgver.tar.gz
mv Unreal$pkgver unrealircd-$pkgver
cd unrealircd-$pkgver
cp -r ../../debian debian
debuild -us -uc
| true
|
ee2487dc3e29444c43376420d7a76b920594b19e
|
Shell
|
nartesfasrum/music-scripts
|
/der.sh
|
UTF-8
| 268
| 3.265625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
for file in *
do
IFS=';' read -a files <<< "$file"
artist=${files[0]}
album=${files[2]}
song=${files[1]}
track=${files[3]}
mp3=".mp3"
track=${track//$mp3/}
song=$track"_"$song".mp3"
echo $artist
mkdir -p "$artist"/"$album"
mv $file "$artist"/"$album"
done
| true
|
7875cda3762707aadc545e06ab607a6cb8f8ad38
|
Shell
|
lpawlik91/USBSpeedTest
|
/collect_results.sh
|
UTF-8
| 503
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
BUFFER=64
MODE=S
SIZE_SET=107374200
FILENAME="$MODE"
FILENAME+="_"
FILENAME+="$SIZE_SET"
FILENAME+=".data"
while [ $BUFFER -gt 21 ]; do
./libusbtest2/libusbtest2/libusb $MODE $BUFFER $SIZE_SET 1 >> Results/$FILENAME
let BUFFER=BUFFER-2
done
BUFFER=64
MODE=A
SIZE_SET=107374200
FILENAME="$MODE"
FILENAME+="_"
FILENAME+="$SIZE_SET"
FILENAME+=".data"
while [ $BUFFER -gt 21 ]; do
./libusbtest2/libusbtest2/libusb $MODE $BUFFER $SIZE_SET 1 >> Results/$FILENAME
let BUFFER=BUFFER-2
done
| true
|
3df8444de130d5fb556418c3e8fc78ee70f9e73e
|
Shell
|
fatman2021/archlinux-packages
|
/community-staging.sh
|
UTF-8
| 7,241
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
MATE_VER=1.10
TARGET="staging"
ACTION="build"
ACTION="move"
MACHINE=$(uname -m)
TEST_DEVTOOLS=$(pacman -Qq devtools 2>/dev/null)
if [ $? -ne 0 ]; then
echo "ERROR! You must install 'devtools'."
exit 1
fi
echo "Configure packager"
echo 'PACKAGER="Martin Wimpress <code@flexion.org>"' > "${HOME}"/.makepkg.conf
echo 'GPGKEY="0864983E"' >> "${HOME}"/.makepkg.conf
echo "Checkout svn-community"
if [ ! -d "${HOME}"/BitSync/Source/archlinux.org/svn-community ]; then
mkdir -p "${HOME}"/BitSync/Source/archlinux.org
cd "${HOME}"/BitSync/Source/archlinux.org
svn checkout -N svn+ssh://svn-community@nymeria.archlinux.org/srv/repos/svn-community/svn svn-community
cd svn-community
else
cd "${HOME}"/BitSync/Source/archlinux.org/svn-community
svn update
fi
# http://wiki.mate-desktop.org/status:1.10
CORE=(
#mate-common
#mate-desktop
#libmatekbd
#libmatemixer
#libmateweather
#mate-icon-theme
#caja
#caja-gtk3
#mate-polkit
#marco
#marco-gtk3
#mate-settings-daemon
#mate-settings-daemon-gtk3
#mate-session-manager
#mate-session-manager-gtk3
#mate-menus
#mate-panel
#mate-panel-gtk3
#mate-backgrounds
#mate-themes
#mate-notification-daemon
#mate-control-center
#mate-control-center-gtk3
#mate-screensaver
#mate-screensaver-gtk3
#mate-media
#mate-media-gtk3
#mate-power-manager
#mate-power-manager-gtk3
#mate-system-monitor
)
EXTRA=(
#atril
#atril-gtk3
#caja-extensions
#caja-extensions-gtk3
#engrampa
#engrampa-gtk3
#eom
#eom-gtk3
#mate-applets
#mate-applets-gtk3
#mate-icon-theme-faenza
#mate-netbook
#mate-netbook-gtk3
#mate-netspeed
#mate-netspeed-gtk3
#mate-sensors-applet
#mate-sensors-applet-gtk3
#mate-terminal
#mate-terminal-gtk3
#mate-user-guide - not released
#mate-user-share
#mate-user-share-gtk3
#mate-utils
#mate-utils-gtk3
mozo
#mozo-gtk3
#pluma
#pluma-gtk3
#python2-caja
#python2-caja-gtk3
)
AUR_EXTRA=(
caja-dropbox
mate-indicator-applet
)
OTHER=(
galculator
gnome-main-menu
obex-data-server
blueman
)
BUILD_ORDER=("${CORE[@]}" "${EXTRA[@]}")
function pkg_builder() {
local PKG=${1}
local REPO="${2}"
local PKGBASE=$(echo ${PKG} | sed 's/-gtk3//')
# If there is a git clone check the revision.
if [ -f ${PKGBASE}/FETCH_HEAD ]; then
echo " - Fetching revision from git"
# git version
local _ver=$(grep -E ^_ver PKGBUILD | cut -f2 -d'=')
cd ${PKGBASE}
git fetch
local PKGBUILD_VER=$(printf "%s.%s.%s" "${_ver}" "$(git log -1 --format=%cd --date=short | tr -d -)" "$(git rev-list --count HEAD)")
cd ..
else
# pacakge version
#local PKGBUILD_VER=$(grep -E ^pkgver PKGBUILD | cut -f2 -d'=' | head -n1)
local POINT_VER=$(grep -E ^pkgver PKGBUILD | cut -d'.' -f2)
local PKGBUILD_VER="${MATE_VER}.${POINT_VER}"
fi
local PKGBUILD_REL=$(grep -E ^pkgrel PKGBUILD | cut -f2 -d'=')
local PKGBUILD=${PKGBUILD_VER}-${PKGBUILD_REL}
local TEST_ANY=$(grep "^arch=" PKGBUILD | grep any)
if [ -n "${TEST_ANY}" ]; then
if [ "${MACHINE}" == "i686" ] || [ "${MACHINE}" == "x86_64" ]; then
local CHROOT_ARCHS=(i686)
fi
else
if [ "${MACHINE}" == "i686" ] || [ "${MACHINE}" == "x86_64" ]; then
local CHROOT_ARCHS=(i686 x86_64)
fi
fi
local PUBLISH=0
for CHROOT_ARCH in ${CHROOT_ARCHS[@]};
do
if [ "${PKG}" == "caja-extensions" ]; then
PKG_CHECK="caja-share"
elif [ "${PKG}" == "caja-extensions-gtk3" ]; then
PKG_CHECK="caja-share-gtk3"
else
PKG_CHECK="${PKG}"
fi
if [ -n "${TEST_ANY}" ]; then
echo " - Looking for ${PKG_CHECK}-${PKGBUILD}-any.pkg.tar.xz"
EXIST=$(ls -1 ${PKG_CHECK}-${PKGBUILD}-any.pkg.tar.xz 2>/dev/null)
local RET=$?
else
echo " - Looking for ${PKG_CHECK}-${PKGBUILD}-${CHROOT_ARCH}.pkg.tar.xz"
EXIST=$(ls -1 ${PKG_CHECK}-${PKGBUILD}-${CHROOT_ARCH}.pkg.tar.xz 2>/dev/null)
local RET=$?
fi
if [ ${RET} -ne 0 ]; then
echo " - Building ${PKG}"
sudo ${REPO}-${CHROOT_ARCH}-build
if [ $? -ne 0 ]; then
echo " - Failed to build ${PKG} for ${CHROOT_ARCH}. Stopping here."
exit 1
fi
local PUBLISH=1
else
if [ -n "${TEST_ANY}" ]; then
echo " - ${PKG}-any is current"
else
echo " - ${PKG}-${CHROOT_ARCH} is current"
fi
fi
done
if [ ${PUBLISH} -eq 1 ]; then
if [ "${REPO}" == "community" ]; then
communitypkg
else
community-${REPO}pkg
fi
ssh flexiondotorg@nymeria.archlinux.org /community/db-update
fi
}
for PKG_NAME in ${BUILD_ORDER[@]};
do
# Build
if [ "${ACTION}" == "build" ]; then
echo "Building ${PKG_NAME}"
# Update svn
cd "${HOME}"/BitSync/Source/archlinux.org/svn-community/
communityco ${PKG_NAME}
if [ ! -d "${PKG_NAME}" ]; then
mkdir -p "${PKG_NAME}"/{repos,trunk}
cp -a "${HOME}"/BitSync/Source/archlinux-packages/bitbucket/"${PKG_NAME}"/PKGBUILD "${HOME}"/BitSync/Source/archlinux.org/svn-community/"${PKG_NAME}"/trunk/ 2>/dev/null
cp -a "${HOME}"/BitSync/Source/archlinux-packages/bitbucket/"${PKG_NAME}"/*.{diff,install,pam,patch} "${HOME}"/BitSync/Source/archlinux.org/svn-community/"${PKG_NAME}"/trunk/ 2>/dev/null
svn add "${PKG_NAME}"
svn propset svn:keywords "Id" "${PKG_NAME}"/trunk/PKGBUILD
TEST_SVN_ID=`head -n1 "${PKG_NAME}"/trunk/PKGBUILD | grep Id`
RET=$?
if [ ${RET} -eq 1 ]; then
echo '# $Id$' > /tmp/svn_id
cat /tmp/svn_id "${PKG_NAME}"/trunk/PKGBUILD > /tmp/PKGBUILD
mv /tmp/PKGBUILD "${PKG_NAME}"/trunk/PKGBUILD
fi
svn commit -m "Added ${PKG_NAME}"
else
cp -a "${HOME}"/BitSync/Source/archlinux-packages/bitbucket/"${PKG_NAME}"/PKGBUILD "${HOME}"/BitSync/Source/archlinux.org/svn-community/"${PKG_NAME}"/trunk/ 2>/dev/null
cp -a "${HOME}"/BitSync/Source/archlinux-packages/bitbucket/"${PKG_NAME}"/*.{diff,install,pam,patch} "${HOME}"/BitSync/Source/archlinux.org/svn-community/"${PKG_NAME}"/trunk/ 2>/dev/null
svn commit -m "Updated ${PKG_NAME}"
fi
cd "${HOME}"/BitSync/Source/archlinux.org/svn-community/"${PKG_NAME}"/trunk/
if [ "${TARGET}" == "community" ]; then
pkg_builder "${PKG_NAME}" extra
elif [ "${TARGET}" == "staging" ]; then
pkg_builder "${PKG_NAME}" staging
elif [ "${TARGET}" == "testing" ]; then
pkg_builder "${PKG_NAME}" testing
fi
fi
# Move package
if [ "${ACTION}" == "move" ]; then
ssh flexiondotorg@nymeria.archlinux.org /srv/repos/svn-community/dbscripts/db-move community-staging community "${PKG_NAME}"
fi
done
| true
|
addf7451ff7f08423f51d1aed727b598631e9876
|
Shell
|
Aarkon/terminal-soundboard
|
/soundboard.sh
|
UTF-8
| 391
| 3.734375
| 4
|
[
"Unlicense"
] |
permissive
|
#! /bin/bash
# path for the sound files be put in
path=/home/username/sounds
$choice
$clip
# main menue
menu() {
for file in “$path”/* ; do
i=$(( i+1 ))
clip[$i]=$file
echo
echo $i: ${file##*/}
done
echo
read -n 1 -p “Which sound do you want to be played? ” choice
echo ${clip[$choice]}
play
}
# playing
play() {
mplayer ${clip[$choice]}
i=o
menu
}
menu
| true
|
94be73485ba753ca31aeb871148f2431438a7875
|
Shell
|
leshorne/dotfiles
|
/bin/opdir
|
UTF-8
| 416
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# ----------------------------------------------------------------
#
# DESCRIPTION:
#
# A Script that opens the current folder in Finder
#
# ----------------------------------------------------------------
#
# USAGE: just run this in the terminal
#
# ----------------------------------------------------------------
/usr/bin/osascript -e "tell application \"Finder\" to open POSIX file \"`pwd`\""
| true
|
bbeb3164c9bc8bbde643615811c12c4d10313ee9
|
Shell
|
caHarkness/openvpn-install
|
/install.sh
|
UTF-8
| 1,872
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source common.sh
apt-get update
apt-get install openvpn iptables openssl ca-certificates -y
# Download and install EasyRSA
export EASYRSA_DOWNLOAD="https://github.com/OpenVPN/easy-rsa/releases/download/v3.0.5/EasyRSA-nix-3.0.5.tgz"
curl -Lo ./download.tgz "$EASYRSA_DOWNLOAD"
tar xzf ./download.tgz
rm -f download.tgz
mv EasyRSA-* easyrsa
chown -R root:root easyrsa
cd easyrsa
./easyrsa init-pki
./easyrsa --batch build-ca nopass
EASYRSA_CERT_EXPIRE=3650 ./easyrsa build-server-full server nopass
EASYRSA_CERT_EXPIRE=3650 ./easyrsa build-client-full default nopass
EASYRSA_CRL_DAYS=3650 ./easyrsa gen-crl
cp -f \
pki/ca.crt \
pki/private/ca.key \
pki/issued/server.crt \
pki/private/server.key \
pki/crl.pem \
/etc/openvpn
go_back
chown nobody:nogroup /etc/openvpn/crl.pem
openvpn --genkey --secret /etc/openvpn/ta.key
cp template/dh.pem /etc/openvpn/dh.pem
cp template/server.conf /etc/openvpn
# Make sure the server.conf file is configured properly
sed -i "s/__PORT__/$PORT/g" /etc/openvpn/server.conf
sed -i "s/__PROTOCOL__/$PROTOCOL/g" /etc/openvpn/server.conf
# Enable port forwarding
if [[ -d /etc/sysctl.d ]]
then
cp template/30-openvpn-forward.conf /etc/sysctl.d
echo 1 > /proc/sys/net/ipv4/ip_forward
else
echo "WARNING: /etc/sysctl.d is not a directory, is this a Debian distribution?"
exit
fi
# Install port forwarding service
if [[ -d "/etc/systemd/system" ]]
then
echo "Installing service..."
cp template/openvpn-iptables.service /etc/systemd/system
sed -i "s/__PUBLIC_IP__/$PUBLIC_IP/g" /etc/systemd/system/openvpn-iptables.service
systemctl enable --now openvpn-iptables.service
systemctl restart openvpn@server.service
echo "Done installing service."
else
echo "WARNING: /etc/systemd/system is not a directory, is this a Debian distribution?"
exit
fi
| true
|
56bf42feb3504df38c54d51869113255c64ae1d9
|
Shell
|
troykitten/cm9_device_u8120
|
/prebuilt/etc/init.d/02huawei
|
UTF-8
| 621
| 2.84375
| 3
|
[] |
no_license
|
#!/system/bin/sh
echo "try link huawei additional partition"
if [ -b /dev/block/mtdblock7 ];
then
mkdir /data/cust;
chown 1000.1000 /data/cust;
chmod 777 /data/cust;
mount /dev/block/mtdblock7 /data/cust;
fi
# sdcard speed fix
if [ -e /sys/devices/virtual/bdi/179:0/read_ahead_kb ];
then
/system/xbin/echo "4096" > /sys/devices/virtual/bdi/179:0/read_ahead_kb;
fi
# Bfq scheduler
echo "bfq" > /sys/block/mmcblk0/queue/scheduler;
echo 1 > /sys/block/mmcblk0/queue/rotational;
for i in 1 2 3 4 5;
do
echo "bfq" > /sys/block/mtdblock$i/queue/scheduler;
echo 0 > /sys/block/mtdblock$i/queue/rotational;
done
| true
|
1667a38ce0f997d9dca49c59dac4fb80cffeafe8
|
Shell
|
038hwolff/init
|
/scripts/02
|
UTF-8
| 463
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
(sudo apt-get update -y && sudo apt-upgrade -y) >> /var/log/update_script.log
# can aussi be writter: sudo apt-get update -y > /var/log/update_script.log && sudo apt-upgrade -y >> /var/log/update_script.log
#program once a week at 4 a.m.
#Make sure this script is inside /usr/local/sbin, for example /user/local/sbin/02.
#/usr/local/sbin is for root scripts, it's needed to execute sudo commands.
#crontab -e
#00 4 * * 1 root /bin/sh /usr/local/sbin/02
| true
|
b3e0cd13d4c3ee9c33dd1c0d4c590dd91c2a3d4f
|
Shell
|
muschellij2/FSL6.0.0
|
/src/possum/generate_b0calc
|
UTF-8
| 2,768
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/sh
# POSSUM
#
# Tejas Pendse 12.12.12
#
# Wrapper script for b0calc. To used primarily by the GUI (B0 field -> Make B0 file)
#################################
#$ -l h_rt=5:0:0
#$ -l h_vmem=2.5G,tmem=2.5G
#$ -S /bin/bash
#$ -cwd
#$ -N makeb0
#$ -V
#################################
usage()
{
echo "makeb0.sh -- Tejas Pendse 14.12.12"
echo
echo "Compulsory Options:"
echo " -i <input-file> - Input Air-tissue segmentation"
echo " -o <output-file> - Output B0 file(s) basename"
echo
echo "Optional Options:"
echo " -f <field-str> - Field Strength (in Tesla)"
echo " -m - Motion (generates 9 volumes)"
echo
}
## Get input options
motion="no"
verbose="yes"
fieldstr=1
compset=0
while [ ! -z "$1" ]
do
case "$1" in
-i) input="$2";compset=$((compset+1));shift;;
-o) output="$2";compset=$((compset+1));shift;;
-f) fieldstr="$2";shift;;
-m) motion="yes";shift;;
*) break;;
esac
shift
done
if [ $compset != "2" ]
then
usage
exit
fi
if [ ! -z $verbose ]
then
echo "Input: $input; Output: $output;"
echo "FieldStrength: ${fieldstr}T; Motion: $motion"
fi
if [ ! -e $input ]; then echo "Error: Can't find '$input'!" >&2; exit; fi
date
hostname
outwd=$(dirname $output)
output=$(basename $output)
output=$(echo $output | sed -e 's/.nii.gz//')
if [ $motion == "yes" ]
then
## b0x
if [ ! -z $verbose ];then echo "------B0X------";fi
b0calc -i $input -o $outwd/b0x --b0x=1 --b0y=0 --b0=0 --xyz
fslsplit $outwd/b0x $outwd/b0x -t
## b0y
if [ ! -z $verbose ];then echo "------B0Y------";fi
b0calc -i $input -o $outwd/b0y --b0x=0 --b0y=1 --b0=0 --xyz
fslsplit $outwd/b0y $outwd/b0y -t
## b0z
if [ ! -z $verbose ];then echo "------B0Z------";fi
b0calc -i $input -o $outwd/b0z --b0x=0 --b0y=0 --b0=1 --xyz
fslsplit $outwd/b0z $outwd/b0z -t
if [ ! -z $verbose ];then echo "------Merging------";fi
fslmerge -t $outwd/$output $outwd/b0z0002 $outwd/b0z0001 $outwd/b0z0000 $outwd/b0y0002 $outwd/b0y0001 $outwd/b0y0000 $outwd/b0x0002 $outwd/b0x0001 $outwd/b0x0000
## Multiply by the field strength if other than 1T
if [ $fieldstr -ne 1 ]
then
fslmaths $outwd/$output -mul $fieldstr $outwd/$output
fi
## Clean up
rm -rv $outwd/b0x.nii.gz $outwd/b0y.nii.gz $outwd/b0z.nii.gz $outwd/b0z0002.nii.gz $outwd/b0z0001.nii.gz $outwd/b0z0000.nii.gz $outwd/b0y0002.nii.gz $outwd/b0y0001.nii.gz $outwd/b0y0000.nii.gz $outwd/b0x0002.nii.gz $outwd/b0x0001.nii.gz $outwd/b0x0000.nii.gz
else
if [ ! -z $verbose ];then echo "------B0CALC SINGLE------";fi
b0calc -i $input -o $outwd/$output
## Multiply by the field strength if other than 1T
if [ $fieldstr -ne 1 ]
then
fslmaths $outwd/$output -mul $fieldstr $outwd/$output
fi
fi
echo "Files generated at $outwd/$output"
| true
|
2181dce3d8ffbc30a8aa7a2dfea96bacba75a92f
|
Shell
|
kishanadd/136devops
|
/shell-scripts/08-exit-status.sh
|
UTF-8
| 430
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
echo Hai
#exit 1
echo BYE
## exit command by default exits with 0, unless you specify some value
# we cannot use exit command in function , because it exists the script. So functions also have exit status. But exit command will completly exit the script .. So we have alternate 'return' command to exit function with exit status
F() {
echo Hai
return 2
echo BYE
}
F
echo Function exit status = $?
| true
|
809ed15cc2e76ed850c99ea3d8566bf5062e333b
|
Shell
|
sourcegraph/sourcegraph
|
/dev/check/no-alpine-guard.sh
|
UTF-8
| 1,471
| 3.71875
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -e
echo "--- no alpine guard"
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
path_filter() {
local IFS=
local withPath="${*/#/ -o -path }"
echo "${withPath# -o }"
}
set +e
ALPINE_MATCHES=$(git grep -e '\salpine\:' --and --not -e '^\s*//' --and --not -e 'CI\:LOCALHOST_OK' \
':(exclude)doc/admin/updates/docker_compose.md' \
':(exclude)docker-images/README.md' \
':(exclude)docker-images/alpine-3.12/' \
':(exclude)doc/batch_changes/' \
':(exclude)web/src/enterprise/batches/create/CreateBatchChangePage.tsx' \
':(exclude)*vendor*' \
':(exclude)*testdata*')
set -e
if [ -n "$ALPINE_MATCHES" ]; then
echo
echo "Error: Found instances of \"alpine:\":"
# shellcheck disable=SC2001
echo "$ALPINE_MATCHES" | sed 's/^/ /'
cat <<EOF
Using 'alpine' is forbidden. Use 'sourcegraph/alpine' instead which provides:
- Fixes DNS resolution in some deployment environments.
- A non-root 'sourcegraph' user.
- Static UID and GIDs that are consistent across all containers.
- Base packages like 'tini' and 'curl' that we expect in all containers.
You should use 'sourcegraph/alpine' even in build stages for consistency sake.
Use explicit 'USER root' and 'USER sourcegraph' sections when adding packages, etc.
If the linter is incorrect, either:
1) add the comment "CI:ALPINE_OK" to the line where "alpine" occurs, or
2) add an exclusion clause in the "git grep" command in no-alpine-guard.sh
EOF
echo "^^^ +++"
exit 1
fi
| true
|
123d8f3d034b3c6c4dcfc504d7a2570a7cb31493
|
Shell
|
AlicLi/PHT
|
/cs259/quickstart.sh
|
UTF-8
| 1,605
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
while true; do
echo " setup options: "
echo "[min] for minimal setup"
echo "[yum] for minimal setup + update using yum"
echo "[fpga] for minimal setup + fpga instances setup"
read -p "[min/yum/fpga] " option
echo " "
case $option in
"min" | "yum" )
if [ $option == "yum" ] ; then
echo -e ''$_{1..100}'\b=' ;
echo "Starting yum update" ;
echo -e ''$_{1..100}'\b=' ;
yes | sudo yum update ;
yes | sudo yum upgrade ;
yes | sudo yum install nano ;
else
echo " No yum installation "
fi
echo -e ''$_{1..100}'\b=' ;
echo " Starting Basic setup " ;
echo -e ''$_{1..100}'\b=' ;
git clone https://github.com/aws/aws-fpga.git $AWS_FPGA_REPO_DIR ;
cd $AWS_FPGA_REPO_DIR
source vitis_setup.sh
break;;
"fpga" )
echo -e ''$_{1..100}'\b=' ;
echo " Starting FPGA instances setup " ;
echo -e ''$_{1..100}'\b=' ;
git clone https://github.com/aws/aws-fpga.git $AWS_FPGA_REPO_DIR ;
cd $AWS_FPGA_REPO_DIR
source vitis_runtime_setup.sh
echo -e ''$_{1..100}'\b=' ;
echo " Starting Basic setup " ;
echo -e ''$_{1..100}'\b=' ;
cd $AWS_FPGA_REPO_DIR
source vitis_setup.sh
sleep 1m ;
./host ./vadd.awsxclbin ;
break;;
* ) echo " Please put in correct option. " ;
echo "setup options: " ;
echo "[min] for minimal setup" ;
echo "[yum] for minimal setup + update using yum" ;
echo "[fpga] for minimal setup + fpga instances setup";;
esac
done
echo " Done"
| true
|
2c7e181db4f0e60a30b8483f7aec7dcf735d0304
|
Shell
|
deepglint/backbone-cmd
|
/template/backbone_project/app/app/gen_to_vulcand.sh
|
UTF-8
| 693
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
#echo $1 $2 $3 $4
USERNAME=jiajiachen
NAME=bgmodelset2
VULCAND=http://192.168.5.46:8182
LOCAL=http://192.168.1.19:8004
FRONTPATH='PathRegexp("'$USERNAME/$NAME'.*")'
#"'PathRegexp(\"/"$USERNAME"/"$NAME".*\")'"
vctl backend upsert -id b_$USERNAME_$NAME --vulcan $VULCAND
vctl server upsert -id svr_$USERNAME_$NAME -b b_$USERNAME_$NAME --url=$LOCAL --vulcan $VULCAND
echo $FRONTPATH
#echo "'PathRegexp(\"/${USERNAME}/${NAME}.*\")'"
vctl frontend upsert -id f_$USERNAME_$NAME --route $FRONTPATH -b b_$USERNAME_$NAME --vulcan $VULCAND
vctl rewrite upsert -f f_$USERNAME_$NAME -id r_$USERNAME_$NAME --regexp="/${USERNAME}/${NAME}/(.*)" --replacement='/$1' --vulcan $VULCAND
| true
|
c6b1fa36fc53376a76e06d3da75d47e806dd1c6d
|
Shell
|
roby89br/mattiols_hassio_repository
|
/bticino_smarter/run.sh
|
UTF-8
| 1,936
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bashio
CLIENT_ID=$(bashio::config 'client_id')
CLIENT_SECRET=$(bashio::config 'client_secret')
SUBSCRIPTION_KEY=$(bashio::config 'subscription_key')
REDIRECT_URL=$(bashio::config 'redirect_url')
API_USER=$(bashio::config 'api_user')
API_PASS=$(bashio::config 'api_pass')
C2C_SUBSCRIPTION=$(bashio::config 'subscribe_c2c')
MQTT_BROKER=$(bashio::config 'mqtt_broker')
MQTT_PORT=$(bashio::config 'mqtt_port')
MQTT_USER=$(bashio::config 'mqtt_user')
MQTT_PASS=$(bashio::config 'mqtt_pass')
MQTT_INTERVAL=$(bashio::config 'mqtt_interval')
API_PIDS=()
# Check Options data
if ! bashio::config.has_value 'client_id' || ! bashio::config.has_value 'client_secret' || ! bashio::config.has_value 'subscription_key' || ! bashio::config.has_value 'redirect_url' || ! bashio::config.has_value 'api_user' || ! bashio::config.has_value 'api_pass'; then
bashio::exit.nok "No valid options!"
fi
if ! bashio::config.has_value 'mqtt_broker' || ! bashio::config.has_value 'mqtt_port' || ! bashio::config.has_value 'mqtt_user' || ! bashio::config.has_value 'mqtt_pass' ; then
bashio::exit.nok "No valid options!"
fi
bashio::log.info "Setup config file..."
# Setup config
cat << EOF > config/config.yml
api_config:
client_id: ${CLIENT_ID}
client_secret: ${CLIENT_SECRET}
subscription_key: ${SUBSCRIPTION_KEY}
redirect_url: ${REDIRECT_URL}
api_user: ${API_USER}
api_pass: ${API_PASS}
subscribe_c2c: ${C2C_SUBSCRIPTION}
EOF
cat << EOF > config/mqtt_config.yml
mqtt_config:
mqtt_broker: ${MQTT_BROKER}
mqtt_port: ${MQTT_PORT}
mqtt_user: ${MQTT_USER}
mqtt_pass: ${MQTT_PASS}
mqtt_interval: ${MQTT_INTERVAL}
EOF
# Start API
python3 bticino.py &
API_PID+=($!)
function stop_api() {
bashio::log.info "Kill Processes..."
kill -15 "${API_PID[@]}"
wait "${API_PID[@]}"
bashio::log.info "Done."
}
trap "stop_api" SIGTERM SIGHUP
# Wait until all is done
wait "${API_PID[@]}"
| true
|
640a17628f83452f99a181a0664d0a5807e63c5e
|
Shell
|
stepleton/lisa-fig68k
|
/build.sh
|
UTF-8
| 5,400
| 3.34375
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
# Build script for lisa-fig68k
#
# This script is a flimsy alternative to a Makefile or similar, and it has been
# created for use with the EASy68k command-line assembler distributed by Ray
# Arachelian (https://github.com/rayarachelian/EASy68K-asm). If you have an
# `asy68k` binary available, you must supply a path to it here:
ASM=../EASy68K-asm/ASM68Kv5.15.4/asy68k
# You will also need a copy of the `build_bootable_disk_image.py` script from
# the `bootloader_hd` project (https://github.com/stepleton/bootloader_hd).
# Supply a path to this script here:
BUILD_HD=../bootloader_hd/build_bootable_disk_image.py
# Finally, you'll need the `srec_cat` utility from the srecord project
# (http://srecord.sourceforge.net/). On recent Debian and similar Linux
# distributions (e.g. Ubuntu, Raspberry Pi OS), you can obtain this by running
# `apt install srecord`.
SREC_CAT=srec_cat
# After you've adapted the path specifications above, simply cd to the
# directory containing this script and then run the script without any
# arguments. In a few seconds you should end up with three compressed hard
# drive image files: forth.blu.zip, forth.dc42.zip, and forth.image.zip.
# It's possible that other 68000 assemblers that support Motorola syntax will
# work --- the code itself is fairly uncomplicated and uses no advanced
# features like macros. To adapt this build procedure to a different
# assembler, you will need to account for the following:
#
# * The assembler must produce a listing file called `forth.L68` (note capital
# letter L). For each line of code, this listing file must list memory
# addresses in hexadecimal, starting from the leftmost text column. At least
# one space character must follow each address. Example:
#
# 0000092A 4E75 200 RTS
#
# * The assembler must emit an SRecord output file called `forth.S68` (note
# capital letter S).
#
# Look a few lines below for the phrase "Edit here" to find the place where
# the assembler is invoked. Make changes there that will cause your assembler
# to produce the outputs just mentioned.
#
# ----------
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org>
#
# ----------
#
# Changelog:
#
# 2021/05/03: Initial release. Tom Stepleton <stepleton@gmail.com>, London
# make clean
rm -f forth.L68 forth.S68 forth.bin forth-squished.bin
rm -f forth.image forth.dc42 forth.blu
rm -f forth.image.zip forth.dc42.zip forth.blu.zip
# Assemble and look for errors in the listing
$ASM --macroexpand forth.x68 # XXX Edit here to use a different assembler XXX
grep 'errors detected' forth.L68
grep -C 4 -m 1 ERROR: forth.L68
# If the assembly succeded, then build drive images
# XXX You may have to modify this "success detection" heuristic here, too XXX
if grep -q 'No errors detected' forth.L68; then
# Convert ASM68K S-record output to binary
$SREC_CAT -Disable-Sequence-Warnings forth.S68 \
--offset -0x800 -o forth.bin -binary
# Identify how much of that binary comes before the big run of zeros that
# make up the free space for user definitions
part1hexbytes=$(grep 'INITDP.*EQU' forth.L68 | cut -f 1 -d ' ')
part1size=$((0x$part1hexbytes - 0x800))
# Copy that part of the binary into a new file
dd if=forth.bin of=forth-squished.bin bs=$part1size count=1 status=none
# Identify the size and starting location of the binary that goes after
# big run of zeros
part2hexstart=$(grep 'kHiMemStart.*EQU' forth.L68 | cut -f 1 -d ' ')
part2hexend=$(grep 'kHiMemEnd.*EQU' forth.L68 | cut -f 1 -d ' ')
part2size=$((0x$part2hexend - 0x$part2hexstart))
part2skipbytes=$((0x$part2hexstart - 0x800))
# Tack it onto the binary data just under construction---no big run of zeros
dd if=forth.bin of=forth-squished.bin skip=$part2skipbytes \
bs=$part2size count=1 status=none \
iflag=skip_bytes oflag=append conv=notrunc
# Assemble the hard drive images
python3 -B $BUILD_HD forth-squished.bin -f raw -o forth.image
python3 -B $BUILD_HD forth-squished.bin -f dc42 -o forth.dc42
python3 -B $BUILD_HD forth-squished.bin -f blu -o forth.blu
# And package them into zip archives
for i in image dc42 blu; do zip forth.$i.zip forth.$i; done
fi
| true
|
ab76536b67d182c9e66a0885fc5796b57079465c
|
Shell
|
MEHColeman/dotfiles
|
/install/personal_mac_install
|
UTF-8
| 1,185
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "This script installs stuff I want on a home machine, but not a work machine, for macOS"
update_command="brew update"
install_command="brew install --cask"
clean_command="brew cleanup"
echo "Installing with $install_command"
# both brew and apt-get have the same name
declare -a casks=(
"affinity-photo"
"alfred"
"arduino"
"arq"
"backblaze"
"daisydisk"
"discord"
"divvy"
"dropbox"
"dymo-label"
"emby-server"
"fing"
"iina"
"gemini"
"inkscape"
"ledger-live"
"makemkv"
"mimestream"
"monero-wallet"
"mullvadvpn"
"multifirefox"
"nault"
"obs"
"omnifocus"
"omniplan"
"onyx"
"plex-media-server"
"spotify"
"telegram"
"transmission"
"vlc"
"zoom"
)
declare -a kegs=(
"ffmpeg"
"yt-dlp"
)
eval "$update_command"
for keg in "${kegs[@]}"
do
read -p "Install ${keg} ? (y/n) " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo "Installing ${keg}"
eval "brew install $keg"
fi
done
for cask in "${casks[@]}"
do
read -p "Install ${cask} ? (y/n) " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo "Installing ${cask}"
eval $install_command $cask
fi
done
eval "$clean_command"
echo "Done"
| true
|
927f5c09211c05942b18a831a740204c926e90c2
|
Shell
|
neuronsimulator/nrn
|
/src/nrnmpi/mkdynam.sh
|
UTF-8
| 1,090
| 2.890625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
names=`sed -n '
/extern /s/extern [a-z*]* \(nrnmpi_[a-zA-Z0-9_]*\)(.*);/\1/p
' nrnmpidec.h`
#generate nrnmpi_dynam_wrappers.inc
sed -n '
/extern void/s/extern \(void\) \(nrnmpi_[a-zA-Z0-9_]*\)\(.*\);/\1 \2\3 {@ (*p_\2)\3;@}/p
/extern [^v]/s/extern \([a-z*]*\) \(nrnmpi_[a-zA-Z0-9_]*\)\(.*\);/\1 \2\3 {@ return (*p_\2)\3;@}/p
' nrnmpidec.h | tr '@' '\n' | sed '
/p_nrnmpi/ {
s/, [a-zA-Z0-9_*]* /, /g
s/)([a-zA-Z_0-9*]* /)(/
s/char\* //g
s/char\*\* //g
s/std::string& //g
}
'> nrnmpi_dynam_wrappers.inc
#generate nrnmpi_dynam.h
(
echo '
#ifndef nrnmpi_dynam_h
#define nrnmpi_dynam_h
/* generated by mkdynam.sh */
#if NRNMPI_DYNAMICLOAD
'
for i in $names ; do
echo "#define $i f_$i"
done
echo '
#endif /* NRNMPI_DYNAMICLOAD */
#endif
'
) > nrnmpi_dynam.h
#generate nrnmpi_dynam_cinc
(
sed -n '
/extern/s/extern \([a-z*]*\) \(nrnmpi_[a-zA-Z0-9_]*\)\(.*\);/static \1 (*p_\2)\3;/p
' nrnmpidec.h
echo '
static struct {
const char* name;
void** ppf;
} ftable[] = {'
for i in $names ; do
echo " \"f_$i\", (void**)&p_$i,"
done
echo ' 0,0
};
'
) > nrnmpi_dynam_cinc
| true
|
448ebe5023f283cde26444c37701d17642a31d48
|
Shell
|
h908714124/my-scripts
|
/misc/padding
|
UTF-8
| 602
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
addLine () {
echo -n "PADDING " >>"$1"
openssl rand -base64 23 >>"$1"
}
CLOBBER=Y
if [[ "-x" == "$1" ]] || [[ "--noclobber" == "$1" ]]; then
CLOBBER=N
shift
fi
FILE="$1"
HEAD=HEAD
OUT=OUT
if [[ -z "$FILE" ]]; then
echo "Argument: file"
exit 1
fi
cp "$FILE" "${FILE}_${OUT}"
for i in `seq 100`; do
addLine "${FILE}_${HEAD}"
addLine "${FILE}_${OUT}"
done
cat "${FILE}_${OUT}" >>"${FILE}_${HEAD}"
mv "${FILE}_${HEAD}" "${FILE}_${OUT}"
if [[ "Y" == "$CLOBBER" ]]; then
mv "${FILE}_${OUT}" "$FILE"
echo "$FILE was modified"
else
echo "${FILE}_${OUT} created"
fi
| true
|
4ef19cdce5e37c8b7eeb033935d01c6a1806cb0e
|
Shell
|
richroslund/ark-deployer
|
/app/process-explorer.sh
|
UTF-8
| 470
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
process_explorer_start()
{
process_explorer_stop
heading "Starting Explorer..."
parse_explorer_args "$@"
cd $EXPLORER_PATH
nohup npm run bridgechain &
success "Start OK!"
}
process_explorer_stop()
{
heading "Stopping..."
killall npm || true
success "Stop OK!"
}
process_explorer_restart()
{
heading "Restarting..."
process_explorer_stop "$@"
process_explorer_start "$@"
success "Restart OK!"
}
| true
|
b389c0537edde067c8059f4e5f1ed80f7251b0e6
|
Shell
|
kowalik-lukasz/bash-labs
|
/lab05/script.sh
|
UTF-8
| 1,363
| 3.4375
| 3
|
[] |
no_license
|
# ! /bin/bash
# a) print info about processes that consume less than 10% of CPU.
echo "a)"
ps aux | awk ' $3 < 10 {print "Uzytkownik " $1 " ma uruchomiony proces o PID " $2 " - CPU = " $3 ", MEM = " $4} '
# b) print if COMMAND fields contain "/usr/".
echo "b)"
ps aux | awk ' {for(i=11; i<=NF; i++) {if($i ~ /usr/) print "Uzytkownik " $1 " ma uruchomiony proces COMMAND: " $i}} '
# c) Sum of CPU and memory usage.
echo "c)"
ps aux | awk ' NR>1{cpu_sum += $3; mem_sum += $4} END {print "Suma CPU: " cpu_sum ", Suma MEM: " mem_sum} '
# The results differ from the ones obtained using top command probably because the ps aux command displays the numerical values to one decimal place.
# d) Count processes per user.
echo "d)"
ps aux | awk ' NR>1{users[$1]++} END {for (key in users) print "Uzytkownik " key " lacznie ma uruchomionych " users[key] " procesow"} '
# e) Find users with min and max number of processes.
echo "e)"
ps aux | awk ' \
NR>1{users[$1]++} \
END {for (key in users) print "Uzytkownik " key " lacznie ma uruchomionych " users[key] " procesow"} \
' | awk '\
BEGIN{min=99999999; max=$6; max_user=$2} \
{if($6 > max){max = $6; max_user = $2}; if($6 < min){min = $6; min_user = $2}} \
END{print "Uzytkownik " max_user " ma najwiecej uruchomionych procesow: " max ". Uzytkownik " min_user " ma najmniej uruchomionych procesow: " min}\
'
| true
|
c0ab26eaae9a367aca2eb9dcfaddb9c7e0e19252
|
Shell
|
mdogan/MicroRaft
|
/tools/build-site.sh
|
UTF-8
| 934
| 3.78125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ -z "$1" ]; then
echo "usage: build-site.sh microraft-version"
exit 1
fi
yell() { echo "$0: $*" >&2; }
die() {
yell "$*"
exit 1
}
try() { "$@" || die "cannot $*"; }
MICRORAFT_VERSION=$1
POM_FILE=pom.xml
JAVADOC_SOURCE=microraft/target/site/apidocs
SITE_FILES_DIR=site-src
SITE_DIR=${SITE_FILES_DIR}/site
JAVADOC_TARGET=${SITE_DIR}/javadoc/${MICRORAFT_VERSION}
[[ -z "${MKDOCS_ENV}" ]] && MKDOCS_CMD='mkdocs' || MKDOCS_CMD="${MKDOCS_ENV}"
BUILD_SITE_CMD="${MKDOCS_CMD} build"
if [ ! -f "$POM_FILE" ]; then
echo "Please run this script on the root directory of the MicroRaft repository."
exit 1
fi
try mvn clean javadoc:javadoc
try test -d ${JAVADOC_SOURCE}
rm -rf $SITE_DIR
try cd $SITE_FILES_DIR
try $BUILD_SITE_CMD
cd ..
try test -d ${SITE_DIR}
try mkdir -p $JAVADOC_TARGET
try cp -avr ${JAVADOC_SOURCE}/* $JAVADOC_TARGET
try test -f ${JAVADOC_TARGET}/index.html
ls -l $SITE_DIR
echo "All good."
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.