blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a43b0181645692f3341856b250f5184953279b24
|
Shell
|
solo-yolo/glowroot-benchmark
|
/docker-entrypoint.sh
|
UTF-8
| 514
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [[ $@ ]]; then
exec "$@"
fi
: ${JVM_ARGS:=-Xmx1g -Xms1g}
if [[ -n $GLOWROOT_DIST_ZIP ]]; then
rm -rf glowroot
unzip $GLOWROOT_DIST_ZIP
fi
echo '{"transactions":{"captureThreadStats":false}}' > glowroot/config.json
java $HARNESS_JVM_ARGS -jar benchmarks.jar $JMH_ARGS -rf json -prof gc \
-jvmArgs "$JVM_ARGS -Djava.security.egd=file:/dev/urandom -Djmh.shutdownTimeout=0 -Djmh.shutdownTimeout.step=0"
java -cp benchmarks.jar org.glowroot.benchmark.ResultFormatter jmh-result.json
| true
|
96491eeaab50ed408e9679b1bda833451230f569
|
Shell
|
maxzo/TP1-SO-2012
|
/eje2.sh
|
UTF-8
| 271
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Bienvenido, usuario: $(whoami)"
echo "Usted se ha conectado el día: $(date +"%d/%m/%Y")"
echo "A la hora: $(date +"%T")"
echo "En la terminal: $(tty)"
echo "Se encuentran conectados en este momento, los siguientes usuarios:"
who | awk '{ print $1 }'
| true
|
eeb747c265ea507b00d751691ed376a64f07b223
|
Shell
|
mequ/Hack
|
/sshgateway.sh
|
UTF-8
| 601
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
USER="root"
LPORT="6522"
GATEWAY="217.218.62.250"
#DOMAIN="xamin.ir"
COMMAND="ssh -fN -L $LPORT:$1:22 root@$GATEWAY"
echo "Connect to Gate way $GATEWAY"
ssh -fN -L $LPORT:$1:22 root@$GATEWAY
if [ $? -eq 0 ]; then
echo "CONNECTION ESTABLISHED."
else
echo "CAN NOT CONNECT TO GATEWAY"
exit
fi
#sleep 1
SSHPID=$(ps aux| grep -F "$COMMAND" | grep -v -F 'grep'|awk '{ print $2 }')
echo "Gate Way PID = $SSHPID"
echo "connect to server $1 ..."
ssh $USER@localhost -p $LPORT -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null
echo "Closing gate way ..."
kill $SSHPID
| true
|
a92ae0dd66292c8bc406d95ba41b5c5b95f92acc
|
Shell
|
eagafonov/supermake
|
/tools/superm.sh
|
UTF-8
| 183
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -x
set -u
set -e
echo $@
SUPERMAKE_HOME=$(readlink -f $(dirname $(readlink -f $0))/..)
make -f ${SUPERMAKE_HOME}/tools/Makefile SUPERMAKE_HOME=${SUPERMAKE_HOME} $@
| true
|
dfcafd7c4c4c1f869ef8c229ad7c5cd47c251b61
|
Shell
|
Donaschmi/LINGI2142-2019-2020
|
/scripts/template/_topo.mako
|
UTF-8
| 938
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# Group number
GROUPNUMBER=8
# Node configs
CONFIGDIR=frrouting_cfg
# boot script name
BOOT="boot"
# startup script name
STARTUP="start"
PREFIXBASE="fde4:8"
PREFIXLEN=32
# You can reuse the above two to generate ip addresses/routes, ...
# This function describes the network topology that we want to emulate
function mk_topo {
echo "@@ Adding links and nodes"
%for router in range(len(data)):
%for inter in data[router]["interfaces"]:
% if inter["virtual"] == "False":
% if data[router]["id"]<inter["id"]:
# ${data[router]["name"]} links to ${inter["name"]}
add_link ${data[router]["name"]} ${inter["name"]}
% endif
% endif
%endfor
%endfor
echo "@@Adding bridges nodes"
%for router in range(len(data)):
%for inter in data[router]["interfaces"]:
% if inter["virtual"]=="True":
bridge_node ${data[router]["name"]} ${inter["bridge"]} ${inter["interface"]}
% endif
%endfor
%endfor
}
| true
|
b723f097a2b6b71af6e24564f71f571a8da28fde
|
Shell
|
adi06/cloudpi
|
/src/main/resources/shell_scripts/list_data.sh
|
UTF-8
| 486
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
bucket_name=s3-cloudpi
list_keys=(`aws s3api list-objects --bucket ${bucket_name} --query 'Contents[].{Key: Key}' --output text`)
#get objects
if [ ${#list_keys[@]} -ne 0 ]; then
for key in "${list_keys[@]}"
do
if [ "$key" != "None" ]; then
aws s3api get-object --bucket ${bucket_name} --key $key /tmp/$key > /dev/null
echo $key `cat /tmp/$key`
else
echo "bucket is empty"
fi
done
else
echo "bucket is empty"
fi
| true
|
a610dd0b0066a280fbdb5d900647450905e23122
|
Shell
|
YagamiShadow/sectest
|
/setup_docker.sh
|
UTF-8
| 738
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$EUID" -eq 0 ]; then
echo >&2 "[WARNING] You should not run this script as root if you want to setup a sudoless docker installation"
fi
echo "[INFO ] Installing docker..."
sudo apt install docker-compose -y || { echo >&2 "[ERROR] Failed to install docker-compose"; exit 1; }
echo "[INFO ] Creating docker group"
sudo groupadd docker
echo "[INFO ] Addiung user ${USER} to docker group"
sudo usermod -aG docker $USER || { echo >&2 "[ERROR] Failed to add user ${USER} to docker group"; exit 1; }
echo "[INFO ] Restarting docker service" || { echo >&2 "[ERROR] Failed to restart docker service"; exit 1; }
sudo service docker restart
if [ "$EUID" -ne 0 ]; then
echo "[INFO ] Logging into docker group"
newgrp docker
fi
| true
|
89f189c82fbdd5d4dde17601c0f983396a886646
|
Shell
|
zakkudo/polymer-3-starter-project
|
/scripts/demo.sh
|
UTF-8
| 235
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
PROJECT_DIR=$(git rev-parse --show-toplevel)
BIN_DIR=$(npm bin)
STORYBOOK="$BIN_DIR/start-storybook"
echo '{}' > $PROJECT_DIR/.jest-test-results.json
$STORYBOOK -p 6006 -c $PROJECT_DIR/.demo -s $PROJECT_DIR "$@"
| true
|
2a075a845b0db9fcb17c9cdc4968d1ac0ec16087
|
Shell
|
kula/one-offs
|
/yubi-env/yubi-env.sh
|
UTF-8
| 143
| 2.53125
| 3
|
[] |
no_license
|
# Put this in your .bashrc
# Assumes you have gopass installed
yubi-env() {
eval $(gopass "$1" | awk '/^_export/ {print "export", $2}')
}
| true
|
29cea5776640ceb9f7f6829abdfa2a338814faee
|
Shell
|
ChairsDaily/pyiface
|
/deploy.sh
|
UTF-8
| 346
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
version=$(python -c 'import pyiface; pyiface.version()')
echo "Deploying version $version"
# git related commands
git tag -s -m "Version $version" v$version
python2 setup.py sdist
git add .
git commit -a
git push origin master # only push master branch
rm -rf build/ dist/
| true
|
dd9ce179ad0072aca7698680eaf2bf0a9f0b6e1f
|
Shell
|
OpenMandrivaAssociation/jflex
|
/create-tarball.sh
|
UTF-8
| 421
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage: ./create-tarball.sh VERSION"
exit 1
fi
VERSION=${1}
NAME="jflex"
wget http://jflex.de/${NAME}-${VERSION}.tar.gz
tar xvf ${NAME}-${VERSION}.tar.gz
(
cd ${NAME}-${VERSION}
find . -name "*.jar" -delete
rm -Rf src/main/java/java_cup/ examples/
)
tar czvf ${NAME}-${VERSION}-clean.tar.gz ${NAME}-${VERSION}
rm -Rf ${NAME}-${VERSION}.tar.gz ${NAME}-${VERSION}
| true
|
03d5254e6b373f3ada867543413bc032f458c26d
|
Shell
|
AdamSLevy/pkgbuilds
|
/telegraf/PKGBUILD
|
UTF-8
| 1,952
| 2.53125
| 3
|
[] |
no_license
|
# Maintainer: Nicolas Leclercq <nicolas.private@gmail.com>
# Maintainer: Adam S Levy <adam@aslevy.com>
pkgname='telegraf'
pkgver='1.9.0'
pkgrel='1'
pkgdesc='Server-level metric gathering agent for InfluxDB'
arch=('i686' 'x86_64' 'armv6h' 'armv7h')
url='https://github.com/InfluxData/telegraf'
license=('MIT')
makedepends=('go' 'git')
provides=('telegraf')
backup=('etc/telegraf/telegraf.conf')
install="telegraf.install"
options=('emptydirs')
source=("git+https://github.com/influxdata/telegraf#tag=$pkgver"
'telegraf.sysusers'
'telegraf.tmpfiles')
sha256sums=('SKIP'
'31c038721ff674519a506418b0d70045e2c51115a78a5939743886c44ef5e1bb'
'36b309e79422ddbaf6067f814c8bd69bd842cc662c3c9dbbf507ee5716282779')
build()
{
export GOPATH="$srcdir"
export GOBIN="$GOPATH/bin"
export PATH="$GOBIN:$PATH"
mkdir -p "$GOPATH/src/github.com/influxdata"
cp -af "$srcdir/telegraf" "$GOPATH/src/github.com/influxdata/"
cd "$GOPATH/src/github.com/influxdata/telegraf"
echo "Downloading dependencies"
go get -v -u github.com/golang/dep/cmd/dep
dep ensure -v -vendor-only
revision=`git rev-parse HEAD`
version=`git describe --tags`
echo "Building ${pkgname} version=$version commit=$revision branch=master"
_LDFLAGS="-X main.version=$version -X main.commit=$revision -X main.branch=master"
go install -ldflags="$_LDFLAGS" ./...
}
package()
{
cd "$srcdir"
install -Dm644 telegraf.sysusers "$pkgdir/usr/lib/sysusers.d/telegraf.conf"
install -Dm644 telegraf.tmpfiles "$pkgdir/usr/lib/tmpfiles.d/telegraf.conf"
cd "$GOBIN"
install -Dsm755 telegraf "$pkgdir/usr/bin/telegraf"
cd "$GOPATH/src/github.com/influxdata/telegraf"
install -d "$pkgdir/etc/telegraf/telegraf.d/"
install -Dm644 scripts/telegraf.service "$pkgdir/usr/lib/systemd/system/telegraf.service"
install -Dm644 etc/telegraf.conf "$pkgdir/etc/telegraf/telegraf.conf"
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/telegraf/LICENSE"
}
| true
|
2ac84c9bcd75f5dca6462c934fafff08b7578e08
|
Shell
|
cotyembry/AddingUsersUnix
|
/modifyUsers.sh
|
UTF-8
| 4,382
| 3.375
| 3
|
[] |
no_license
|
#!/usr/local/bin/bash
#/bin/bash
#Author: John Coty Embry
# Date: 11-10-2016
#Dependencies:
# 3 files: /home/faculty/mkt/unix_admin/cs_roster.txt
# /home/faculty/mkt/unix_admin/active_cs.txt,
# /etc/passwd
# input: /home/faculty/mkt/unix_admin/active_cs.txt
#output: ./modified_users.txt
#-------
#Name Changes
#Note: May include both username and GECOS field modifications
#Major Changes
#Note: May include changing to or from Computer Science major and should result in appropriate placement of home directory.
#-------
majorCodeForComputerScience=0510
majorDirectory='/home/STUDENTS/majors'
nonmajorDirectory='/home/STUDENTS/nonmajors'
echo -n '' > modified_users.txt #to clear the file out
(
while read line; do
isMajor=0
userAlreadyExisted=0
ecuid=$(echo $line | cut -d ':' -f3)
fullname=$(echo $line | cut -d ':' -f2)
username=$(echo $line | cut -d ':' -f1)
etcCurrentDirectory=''
#echo "usermod -l newUsername oldUsername"
#usermod –c “User for transfer files” transfer_user
#has the name changed? Time to find out
(
while read line2; do
userFullName=$(echo $line2 | cut -d ':' -f5 | cut -d '+' -f1)
userECUId=$(echo $line2 | cut -d ':' -f5 | cut -d '+' -f2)
etcUsername=$(echo $line2 | cut -d ':' -f1)
if [ "$ecuid" == "$userECUId" ]; then
userAlreadyExisted=1
#now that I have located the user in the file, time to see if the name has changed
if [ "$fullname" != "$userFullName" ]; then
#if here then the users full name has changed and needs to be updated
#here I will assume that the active_cs.txt file has the sayso on which
#name is more current so I will use the full name from the active_cs.txt file
echo "usermod -c \"${fullname}+${ecuid}\"" >> modified_users.txt
fi
#now see if their username has changed
if [ "$username" != "$etcUsername" ]; then
#the username has changed and needs to be updated
echo "usermod -l $username $etcUsername" >> modified_users.txt
fi
#also I need to account for:
#May include changing to or from Computer Science major and should result in appropriate placement of home directory
#I have to do the next while loop to see if the person is an a major or non major
(
while read line3; do
csRosterUserId=$(echo $line3 | cut -d '|' -f4)
majorCode=$(echo $line3 | cut -d '|' -f5)
secondMajorCode=$(echo $line3 | cut -d '|' -f6)
if [ "$ecuid" == "$csRosterUserId" ]; then
#now that I've found the person in this file, time to compare and see if this student is a major or nonmajor
if [ "$majorCode" == "$majorCodeForComputerScience" ]; then
isMajor=1
elif [ "$secondMajorCode" == "$majorCodeForComputerScience" ]; then
isMajor=1
else
#if they dont have the major code declared in the major or secondary major parts then their directory needs to be /home/STUDENTS/nonmajors/
isMajor=0
fi
fi
done
) < /home/faculty/mkt/unix_admin/cs_roster.txt #change this to point to /home/faculty/mkt/cs_roster.txt after done with the assignment
#awesome, I have flags now to tell me if the student is a major or not
#now to use them
etcCurrentDirectory=$(echo $line2 | cut -d ':' -f5 | cut -d '/' -f4)
if [ "$isMajor" == "1" ]; then
if [ "majors" != "$etcCurrentDirectory" ]; then
#if they are a major but their current directory is not in the majors directory
#I need to change their directory
#the -m moves the content of their home directory also
echo "usermod -m -d /home/STUDENTS/majors/${username}" >> modified_users.txt
fi
elif [ "$isMajor" == "0" ]; then
if [ "majors" == "$etcCurrentDirectory" ]; then
#if the student is not a major and they have the majors directory, they need to be moved to the nonmajors directory
echo "usermod -m -d /home/STUDENTS/nonmajors/${username}" >> modified_users.txt
fi
fi
fi
done
) < /etc/passwd #change this to point to /etc/passwd after done with the assignment
#Im done with the flow for users that already existed
#If I wanted to address any issues for users that didnt already exist
#in the etc/passwd file I could do that here
done
) < /home/faculty/mkt/unix_admin/active_cs.txt #change this to point to /home/faculty/mkt/active_cs.txt after done with the assignment
| true
|
c95822d426ef344222abb31b2f0a96246f64b1ec
|
Shell
|
CSUNetSec/proddle
|
/docs/scripts/auto-yogi-alexa.sh
|
UTF-8
| 942
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage: $0 USERNAME PASSWORD COUNT"
exit 1
fi
USERNAME=$1
PASSWORD=$2
SSL_CA_CERT="/etc/ssl/mongodb/cacert.pem"
SSL_CERT="/etc/ssl/mongodb/proddle.crt"
SSL_KEY="/etc/ssl/mongodb/proddle.key"
MONGO_HOST="mongo1.netsec.colostate.edu"
TMP_DIR="/tmp"
#download alexa top 1 million domains
wget -O $TMP_DIR/top-1m.csv.zip http://s3.amazonaws.com/alexa-static/top-1m.csv.zip
unzip -p $TMP_DIR/top-1m.csv.zip > $TMP_DIR/top-1m.csv
#read file
COUNT=0
while read LINE
do
#parse domain
DOMAIN=`echo $LINE | cut -f 2 -d ','`
#add with yogi
./../../yogi/target/debug/yogi -c $SSL_CA_CERT -e $SSL_CERT -k $SSL_KEY -u $USERNAME -p $PASSWORD -I $MONGO_HOST operation add HttpGet $DOMAIN -t core -t http -p "timeout|30"
#check counter
COUNT=$[COUNT+1]
if [ $COUNT == $3 ];
then
break
fi
done < $TMP_DIR/top-1m.csv
#perform some cleanup
rm $TMP_DIR/top-1m.csv*
| true
|
f653357573129284d14bcaa70f240990c5849bb5
|
Shell
|
imifarago/laravel-docker
|
/Docker/purge.sh
|
UTF-8
| 176
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -a
[ -f .env ] && . .env
set +a
if [ -z $PROJECT_SLUG ]; then
echo ".env PROJECT_SLUG is empty!"
exit 1;
fi
docker-compose -p $PROJECT_SLUG down --volumes
| true
|
0f92397e3710906cf61c00adac45ac04d37a564b
|
Shell
|
harishj1729/Database-system-implementation
|
/sqlite/create.sh
|
UTF-8
| 277
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#Source: http://www.cs.toronto.edu/~ryanjohn/teaching/cscc43-s11/sqlite+tpc-h+windows-v03.pdf
tblDir=../data/1G
cp ${tblDir}/*.tbl .
for f in *.tbl; do sed --in-place= -e 's/|$//' $f; done
for f in *.tbl; do sqlite3 tpch.db ".import $f $(basename $f .tbl)"; done
| true
|
3cd0c8a04eb36b1578bc5c861ab6f25c40138f73
|
Shell
|
clementsoullard/rest-scheduler-ws
|
/src/scripts/control-proxy.sh
|
UTF-8
| 499
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
WORKFOLDER=/home/clement/scheduler/work
FILEDENY=$WORKFOLDER/denyproxy
FILEALLOW=$WORKFOLDER/allowproxy
if [ -f $FILEDENY ]; then
echo "Deny"
cp /etc/tinyproxy.conf.deny /etc/tinyproxy.conf
rm -f $FILEDENY
NEEDRESTART=true
fi
if [ -f $FILEALLOW ]; then
echo "Allow"
cp /etc/tinyproxy.conf.allow /etc/tinyproxy.conf
rm -f $FILEALLOW
NEEDRESTART=true
fi
if [ ! -z ${NEEDRESTART+x} ]; then
echo "Need retart"
service tinyproxy restart
fi
| true
|
0f4d2f8f2aea067ab1289c03941143ac9f2ffaa1
|
Shell
|
EthrosDemon/safereboot
|
/safereboot
|
UTF-8
| 380
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
##
# safereboot protects you
# from shutting down the wrong
# server while working remotely
##
MNAME=`hostname`
REBOOT=`which reboot`
echo -e "safereboot: you try to reboot machine \033[1m$MNAME\033[0m, continue by writing this hostname!"
read -p "$ " HOST
if [ "$MNAME" == "$HOST" ]
then
$REBOOT $@
else
echo "wrong hostname, good i asked you for it..."
fi
| true
|
84273c357f5d0e246bba604929a1d728c8bfd406
|
Shell
|
yuju-huang/CS6410_project
|
/benchmark/tensorflow_cifar10/run_multiple_times.sh
|
UTF-8
| 141
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
TIMES=$1
if [ -z $1 ]; then
echo "arg1 as times"
exit
fi
for ((i = 0 ; i < $1 ; i++)); do
python3 cifar10.py
done
| true
|
cf96f916115b97f718e013168d7199adbeb27417
|
Shell
|
RJVB/macstrop
|
/_obsolete,upstreamed_/qt5-kde-devel/files/devResources/create-min-tree.sh
|
UTF-8
| 949
| 3.765625
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
# create a minimal ports tree containing for port:qt5-kde*
if [ $# -lt 1 -o -f "$1" ] ;then
echo "Usage: $0 <target directory>"
exit -1
fi
ECHO="echo"
case $1 in
/*)
DEST="$1"
;;
*)
DEST="`pwd`/$1"
;;
esac
# chdir to the port directory
cd `dirname $0`
PORT=`port -q info --name`
PORTDIR=`basename ${PWD}`
cd ..
CATDIR=`basename ${PWD}`
cd ..
# we're now in the port tree itself
# required portgroups.
PORTGROUPS="_resources/port1.0/group/qt5*-1.0.tcl \
_resources/port1.0/group/qmake5-1.0.tcl"
${ECHO} mkdir -p "${DEST}/_resources/port1.0/group/"
${ECHO} rsync -aAXHv --delete ${PORTGROUPS} "${DEST}/_resources/port1.0/group/"
${ECHO} mkdir -p "${DEST}/${CATDIR}"
${ECHO} rsync -aAXHv --delete "${CATDIR}/${PORTDIR}" "${CATDIR}/qt5-kde" "${DEST}/${CATDIR}/"
${ECHO} mkdir -p "${DEST}/kde"
${ECHO} rsync -aAXHv --delete kde/Ciment-icons kde/OSXdg-icons "${DEST}/kde/"
| true
|
e847c5a47a9f6ccf3b9a85b33e11e481e61328bc
|
Shell
|
zygolife/Apophysomyces_genomes
|
/Phylogeny/phylo/fast_run.sh
|
UTF-8
| 584
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/bash
#SBATCH --nodes 1 --ntasks 32 --mem 24gb --time 8:00:00 -p intel --out fasttree_run.%A.log
module load fasttree/2.1.11
module unload perl
module load miniconda3
NUM=$(wc -l ../prefix.tab | awk '{print $1}')
source ../config.txt
ALN=../$PREFIX.${NUM}_taxa.JGI_1086.aa.fasaln
TREE1=$PREFIX.${NUM}_taxa.JGI_1086.ft_lg.tre
TREE2=$PREFIX.${NUM}_taxa.JGI_1086.ft_lg_long.tre
if [ ! -s $TREE1 ]; then
FastTreeMP -lg -gamma < $ALN > $TREE1
echo "ALN is $ALN"
if [ -s $TREE1 ]; then
perl ../PHYling_unified/util/rename_tree_nodes.pl $TREE1 ../prefix.tab > $TREE2
fi
fi
| true
|
cbcab8a03d0c5a55a4e5cfe4a0d9946ee4ace33d
|
Shell
|
archivaldo/virustotal_checker
|
/checkvirustotal.sh
|
UTF-8
| 342
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# VirusTotal API public key limits:
# 4 requests/minute
# 5760 requests/day
# 178560 requests/month
#
for file in /x64/*.dll
do
/root/.pyenv/shims/python /root/checkvirustotal.py -f "$file" -c -n
sleep 45
done
for file in /x86/*.dll
do
/root/.pyenv/shims/python /root/checkvirustotal.py -f "$file" -c -n
sleep 45
done
| true
|
621e4a6385d9047d0a6efe598ab0c1f05c61f03e
|
Shell
|
wayming/pg
|
/dbsetup.sh
|
UTF-8
| 454
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
# Run as postgres
set -x
dbusr=test
dbpwd=test
dbname=testdb
/usr/pgsql-13/bin/pg_ctl start -D /var/lib/pgsql/13/data
psql <<END
CREATE USER $dbusr WITH PASSWORD '$dbpwd' CREATEROLE CREATEDB REPLICATION BYPASSRLS;
CREATE DATABASE $dbname OWNER=$dbusr
END
psql postgres://$dbusr:$dbpwd@localhost/testdb -c '\l+'
psql postgres://$dbusr:$dbpwd@localhost/testdb -c 'SELECT 1'
/usr/pgsql-13/bin/pg_ctl stop -D /var/lib/pgsql/13/data
| true
|
9cbf6d02e2ec7e8c1529717b52085164ab687800
|
Shell
|
johnjreiser/thermorbios953w
|
/wview-patches/wview-5.5.0-patch/wview-5.5.0-mods/wviewconfig/wviewconfig.sh
|
UTF-8
| 10,336
| 3.828125
| 4
|
[] |
no_license
|
################################################################################
#
# File: wviewconfig.sh
#
# Description: Provide a script to interactively configure a wview
# installation.
#
# Usage: (must be root)
# wviewconfig
# wviewconfig get
# wviewconfig set [new_config_path]
#
# History:
# Engineer Date Ver Comments
# MS Teel 06/20/05 1 Original
# J Barber 02/24/06 2 Partitioned into functions; added get/set
# MS Teel 02/25/06 3 Tweaked arg and function names
# MS Teel 06/21/08 4 Better station type support
# MS Teel 08/23/08 5 Modify to use sqlite config database
#
################################################################################
################################################################################
################################# M A C R O S ################################
################################################################################
WVIEWD_PID=$WVIEW_DATA_DIR/wviewd.pid
CFG_STATION_TYPE=$STATION_TYPE
################################################################################
####################### D E F I N E F U N C T I O N S #######################
################################################################################
show_usage()
{
echo ""
echo "wviewconfig"
echo " Configures wview interactively"
echo "wviewconfig get"
echo " Prints current settings in text format to stdout"
echo "wviewconfig set [new_config_path]"
echo " Takes text format file of configuration at [new_config_path] and applies"
echo " it to the wview configuration database - it can be a partial or full list"
echo ""
}
print_config()
{
if [ -f $WVIEW_CONF_DIR/wview-conf.sdb ]; then
# get settings from the sqlite database:
echo ".separator =" > $WVIEW_CONF_DIR/commands.sql
echo "select name,value from config;" >> $WVIEW_CONF_DIR/commands.sql
echo ".read $WVIEW_CONF_DIR/commands.sql" | sqlite3 $WVIEW_CONF_DIR/wview-conf.sdb
rm -rf $WVIEW_CONF_DIR/commands.sql
else
echo "wview Configuration database $WVIEW_CONF_DIR/wview-conf.sdb NOT FOUND - ABORTING!"
exit 1
fi
}
interactive_intro()
{
echo "################################################################################"
echo " !!!!!!!!!!!!!!!! READ THIS BEFORE PROCEEDING !!!!!!!!!!!!!!!!"
echo ""
echo "--> System Configuration for wview"
echo ""
echo "--> Values in parenthesis are your existing values (if they exist) or defaults - "
echo " they will be used if you just hit enter at the prompt..."
echo ""
echo "--> Note: This script will save the existing wview-conf.sdb file to"
echo " $WVIEW_CONF_DIR/wview-conf.old before writing the new file"
echo " based on your answers here - if that is not what you want, hit CTRL-C now to"
echo " abort this script!"
echo ""
echo "################################################################################"
echo ""
echo -n "pausing 3 seconds "
sleep 1
echo -n "."
sleep 1
echo -n "."
sleep 1
echo "."
echo ""
}
get_wview_conf_interactive()
{
if [ -f $WVIEW_CONF_DIR/wview-conf.sdb ]; then
# get settings from the sqlite database:
echo ".separator |" > $WVIEW_CONF_DIR/commands.sql
echo "select name,value,description,dependsOn from config;" >> $WVIEW_CONF_DIR/commands.sql
echo ".read $WVIEW_CONF_DIR/commands.sql" | sqlite3 $WVIEW_CONF_DIR/wview-conf.sdb > $WVIEW_CONF_DIR/parms.out
rm -rf $WVIEW_CONF_DIR/commands.sql
# Construct the editor script:
echo "#!/bin/sh" > $WVIEW_CONF_DIR/editparm
echo "if [ \"\" != \"\$4\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " echo \"select value from config where name='\$4';\" > $WVIEW_CONF_DIR/cmnd.sql" >> $WVIEW_CONF_DIR/editparm
echo " echo \".read $WVIEW_CONF_DIR/cmnd.sql\" | sqlite3 $WVIEW_CONF_DIR/wview-conf.sdb > $WVIEW_CONF_DIR/value.out" >> $WVIEW_CONF_DIR/editparm
echo " IS_ENABLED=\`cat $WVIEW_CONF_DIR/value.out\`" >> $WVIEW_CONF_DIR/editparm
echo " rm -rf $WVIEW_CONF_DIR/cmnd.sql $WVIEW_CONF_DIR/value.out" >> $WVIEW_CONF_DIR/editparm
echo "else" >> $WVIEW_CONF_DIR/editparm
echo " IS_ENABLED=yes" >> $WVIEW_CONF_DIR/editparm
echo "fi" >> $WVIEW_CONF_DIR/editparm
echo "if [ \"\$IS_ENABLED\" != \"yes\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " exit 0" >> $WVIEW_CONF_DIR/editparm
echo "fi" >> $WVIEW_CONF_DIR/editparm
echo "NEWVAL=\$2" >> $WVIEW_CONF_DIR/editparm
echo "echo \"-------------------------------------------------------------\"" >> $WVIEW_CONF_DIR/editparm
echo "echo \"\$3\"" >> $WVIEW_CONF_DIR/editparm
echo "echo \"PARAMETER: \$1\"" >> $WVIEW_CONF_DIR/editparm
echo "echo -n \"(\$2): \"" >> $WVIEW_CONF_DIR/editparm
echo "read INVAL" >> $WVIEW_CONF_DIR/editparm
echo "if [ \"\" != \"\$INVAL\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " echo \"update config set value='\$INVAL' where name='\$1';\" > $WVIEW_CONF_DIR/cmnd.sql" >> $WVIEW_CONF_DIR/editparm
echo " echo \".read $WVIEW_CONF_DIR/cmnd.sql\" | sqlite3 $WVIEW_CONF_DIR/wview-conf.sdb" >> $WVIEW_CONF_DIR/editparm
echo " rm -rf $WVIEW_CONF_DIR/cmnd.sql" >> $WVIEW_CONF_DIR/editparm
echo " if [ \"\$1\" = \"STATION_TYPE\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " if [ \"\$INVAL\" = \"VantagePro\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " echo \"wviewd_vpro\" > $WVIEW_CONF_DIR/wview-binary" >> $WVIEW_CONF_DIR/editparm
echo " else" >> $WVIEW_CONF_DIR/editparm
echo " if [ \"\$INVAL\" = \"WXT510\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " echo \"wviewd_wxt510\" > $WVIEW_CONF_DIR/wview-binary" >> $WVIEW_CONF_DIR/editparm
echo " else" >> $WVIEW_CONF_DIR/editparm
echo " if [ \"\$INVAL\" = \"WS-2300\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " echo \"wviewd_ws2300\" > $WVIEW_CONF_DIR/wview-binary" >> $WVIEW_CONF_DIR/editparm
echo " else" >> $WVIEW_CONF_DIR/editparm
echo " if [ \"\$INVAL\" = \"WMR918\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " echo \"wviewd_wmr918\" > $WVIEW_CONF_DIR/wview-binary" >> $WVIEW_CONF_DIR/editparm
echo " else" >> $WVIEW_CONF_DIR/editparm
echo " if [ \"\$INVAL\" = \"Simulator\" ]; then" >> $WVIEW_CONF_DIR/editparm
echo " echo \"wviewd_sim\" > $WVIEW_CONF_DIR/wview-binary" >> $WVIEW_CONF_DIR/editparm
echo " fi" >> $WVIEW_CONF_DIR/editparm
echo " fi" >> $WVIEW_CONF_DIR/editparm
echo " fi" >> $WVIEW_CONF_DIR/editparm
echo " fi" >> $WVIEW_CONF_DIR/editparm
echo " fi" >> $WVIEW_CONF_DIR/editparm
echo " fi" >> $WVIEW_CONF_DIR/editparm
echo "fi" >> $WVIEW_CONF_DIR/editparm
chmod +x $WVIEW_CONF_DIR/editparm
cd $WVIEW_CONF_DIR
# Edit parms one at a time:
gawk -F"|" '{
sysstring=sprintf("./editparm \"%s\" \"%s\" \"%s\" \"%s\"", $1, $2, $3, $4)
system(sysstring)
}' $WVIEW_CONF_DIR/parms.out
rm -rf $WVIEW_CONF_DIR/editparm $WVIEW_CONF_DIR/parms.out
else
echo "wview Configuration database $WVIEW_CONF_DIR/wview-conf.sdb NOT FOUND - ABORTING!"
exit 1
fi
}
set_config_from_file()
{
if [ -f $WVIEW_CONF_DIR/wview-conf.sdb ]; then
# Construct the update script:
echo "#!/bin/sh" > $WVIEW_CONF_DIR/updateparm
echo "if [ \"\" != \"\$2\" ]; then" >> $WVIEW_CONF_DIR/updateparm
echo " echo \"update config set value='\$2' where name='\$1';\" > $WVIEW_CONF_DIR/cmnd.sql" >> $WVIEW_CONF_DIR/updateparm
echo " echo \".read $WVIEW_CONF_DIR/cmnd.sql\" | sqlite3 $WVIEW_CONF_DIR/wview-conf.sdb" >> $WVIEW_CONF_DIR/updateparm
echo " rm -rf $WVIEW_CONF_DIR/cmnd.sql" >> $WVIEW_CONF_DIR/updateparm
echo "fi" >> $WVIEW_CONF_DIR/updateparm
chmod +x $WVIEW_CONF_DIR/updateparm
cd $WVIEW_CONF_DIR
# Update parms one at a time:
gawk -F"=" '{
sysstring=sprintf("./updateparm \"%s\" \"%s\"", $1, $2)
system(sysstring)
}' $SET_FILE
rm -rf $WVIEW_CONF_DIR/updateparm
else
echo "wview Configuration database $WVIEW_CONF_DIR/wview-conf.sdb NOT FOUND - ABORTING!"
exit 1
fi
}
################################################################################
################## S C R I P T E X E C U T I O N S T A R T #################
################################################################################
# First test to make sure that wview is not running for interactive and set...
if [ -f $WVIEWD_PID -a "$1" != "get" ]; then
echo "wviewd is running - stop wview before running this script..."
exit 3
fi
# Make sure that the config DB is there:
if [ ! -f $WVIEW_CONF_DIR/wview-conf.sdb ]; then
echo "wview configuration database NOT FOUND"
exit 4
fi
METHOD=$1
SET_FILE=$2
if [ "$METHOD" = "" ] # run interactively
then
interactive_intro
get_wview_conf_interactive
echo ""
echo ""
echo "################################################################################"
echo "--> wview Configuration Complete!"
echo "--> Now run wviewhtmlconfig to select your site skin."
echo "################################################################################"
else
case "$METHOD" in
"get" )
print_config
;;
"set" )
if [ "$SET_FILE" = "" ]; then
echo "set requires a source file:"
show_usage
exit 1
fi
if [ ! -f $SET_FILE ]; then
echo "source path $SET_FILE does not exist"
show_usage
exit 1
fi
set_config_from_file
;;
*)
echo "$METHOD not supported"
show_usage
exit 1
;;
esac
fi
exit 0
| true
|
5378bb04f8b9ccec5d4e21fd4c245c9bba62e152
|
Shell
|
aa3313322122/AutoInstall
|
/mysql_install.sh
|
UTF-8
| 1,148
| 3.3125
| 3
|
[] |
no_license
|
mysql_package="mysql-5.6.28-linux-glibc2.5-x86_64.tar.gz"
mysql_name="mysql-5.6.28-linux-glibc2.5-x86_64"
mysql_package_num=`rpm -qa|grep -i mysql |wc -l`
if [ ${mysql_package_num} -ge 1 ];then
rpm -ev `rpm -qa|grep -i mysql` --nodeps
fi
rm -rf `find / -name mysql`
rm -rf /usr/local/mysql*
ps -ef|grep mysql|grep -Ev "grep|mysql_install.sh"|awk '{print $2}'|xargs kill -9
userdel -r mysql
groupdel mysql
groupadd mysql
useradd -r -g mysql mysql
tar xzvf /$mysql_package -C /usr/local
cd /usr/local
ln -s $mysql_name mysql
chown -R mysql:mysql /usr/local/mysql
cd /usr/local/mysql/
scripts/mysql_install_db --user=mysql
cp /usr/local/mysql/support-files/my-default.cnf /etc/my.cnf
cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqld
chkconfig --add mysqld
service mysqld start
sed -i '/MYSQL_HOME/d' /etc/profile
sed -i '/mysql/d' /etc/profile
echo "MYSQL_HOME=/usr/local/mysql" >> /etc/profile
echo 'export PATH=$PATH:$MYSQL_HOME/bin' >> /etc/profile
source /etc/profile
####mysqladmin -u root password "quanyan888"
####GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' IDENTIFIED BY 'quanyan888' WITH GRANT OPTION;
####flush privileges;
| true
|
38d5d2a4154ac83fbdf23653aa02dfdbb8b87f70
|
Shell
|
KCP-quarantine-area/apps
|
/toxcore/PKGBUILD
|
UTF-8
| 707
| 2.515625
| 3
|
[] |
no_license
|
pkgname=toxcore
pkgver=0.0.0.1
_commit=532629d486e3361c7d8d95b38293cc7d61dc4ee5
pkgrel=2
pkgdesc='Easy to use, all-in-one communication platform that ensures full privacy and secure message delivery.'
arch=('x86_64')
url='https://tox.chat'
license=('GPL3')
depends=('systemd' 'libconfig' 'libsodium' 'libvpx' 'opus')
makedepends=('check')
options=('!staticlibs')
source=("https://github.com/irungentoo/toxcore/archive/${_commit}.zip")
md5sums=('f8a982ae1f946a17c724e43e57f1a7d3')
build() {
cd ${pkgname}-${_commit}
autoreconf -i
./configure \
--prefix=/usr \
--enable-tests
make
}
check() {
cd ${pkgname}-${_commit}
make check
}
package() {
cd ${pkgname}-${_commit}
make DESTDIR=${pkgdir} install
}
| true
|
a03c98d9f20cc0128f5a6e9581b6bb0a42bfbcb6
|
Shell
|
liuzongqing/munin
|
/mysql/mysql_lock.sh
|
UTF-8
| 966
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
#create by zongqing
#Innodb_row_lock_time unit: milliseconds Des:The total time spent in acquiring row locks, in milliseconds. Added in MySQL 5.0.3.
#Innodb_row_lock_waits unit: times Des:The number of times a row lock had to be waited for. Added in MySQL 5.0.3.
if [[ $1 == "autoconf" ]]; then
echo yes
exit 0
elif [[ $1 == "config" ]]; then
echo "graph_title Mysql_Lock"
echo "graph_category mysql"
echo "graph_args --base 1000"
echo "graph_vlabel innodb row lock"
echo "time.type DERIVE"
echo "time.min 0"
echo "time.draw LINE2"
echo "time.label time(milliseconds)"
echo "waits.type DERIVE"
echo "waits.min 0"
echo "waits.draw LINE2"
echo "waits.label waits(times)"
exit 0
else
MYSQLADMIN="/usr/bin/mysqladmin extended-status"
Time=`$MYSQLADMIN | grep "Innodb_row_lock_time " | awk '{print $4}'`
Wait=`$MYSQLADMIN | grep "Innodb_row_lock_waits " | awk '{print $4}'`
echo "time.value ${Time}"
echo "waits.value ${Wait}"
fi
| true
|
7e40ca0cffeef29ca1198b4693875f3c471dda1b
|
Shell
|
medbsq/my_scripts
|
/tache_vps
|
UTF-8
| 945
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
function nucl(){
filename="$(echo "$1" | sha256sum |awk '{print $1}')"
echo $1 |nuclei -t ~/nuclei-templates/ -exclude ~/nuclei-templates/subdomain-takeover/ -exclude ~/nuclei-templates/technologies/ -exclude ~/nuclei-templates/misc/missing-hsts.yaml -exclude ~/nuclei-templates/misc/missing-x-frame-options.yaml -exclude ~/nuclei-templates/misc/missing-csp.yaml -exclude ~/nuclei-templates/fuzzing/basic-auth-bruteforce.yaml -c 100 -retries 2 |tee -a ./ncl/$filename
cat ./ncl/$filename |xargs -n1 - I {} bash -c 'curl -s "https://api.telegram.org/bot1446427088:AAF33_4m-xNosGz8A3oFqyWS6Nc44CSng6I/sendMessage?chat_id=1154530812&text=${}"'
cat h_tache |grep -v "$1" |sort -uo h_tachie
}
cd ~/app
mkdir -p ./ncl
export -f nucl
if [[ -d h_tache ]];then
cat Hosts |sort -uo h_tache
fi
cat h_tache |xargs -n 1 -P 50 -I {} bash -c 'nucl "$@"' _ {}
rsync -azp ~/app/ncl/ medbsq@161.35.84.62:~/app/
| true
|
06ce25df87c927d3434767bb27036a6fea005bcd
|
Shell
|
CharJay/stadmin
|
/stadmin-shell/dev/bat_uploadwar.sh
|
UTF-8
| 306
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
echo "!!!! Start shell "
source ./allconfig.cfg
for host in ${hostjq_keyin[*]}
do
echo "=====开始上传${warName}至内网服务器:${host}"
ssh tomcat@${cmdhost_keyout} "scp /usr/webser/springboot/${warName} ${user}@${host}:/usr/webser/springboot/"
echo "${host}上传成功"
done
| true
|
cde8c5256039f5c93fd0df5935aa395c21f3c99f
|
Shell
|
shilpaye/GitPt
|
/shellscripting
|
UTF-8
| 127
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
# My first script
a=10
b=20
if [ $a == $b ]
then
echo "a is equal to b"
else
echo "a is not equal to b"
fi
| true
|
b2a24f4baadbdcda98131c9aaff878f3b7c96148
|
Shell
|
RaInta/CoherentDirectedDAG
|
/Scripts/InstallPerl.sh
|
UTF-8
| 1,493
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# local perl home
HERE=$(pwd -P)
############################################################
##### Uncomment below if you need perl 5.10.1 ###########
############################################################
## install perl 5.10.1
#if [ ! -x $HERE/perl-5.10.1 ]; then
# wget http://www.cpan.org/src/5.0/perl-5.10.1.tar.gz
# tar -xzf perl-5.10.1.tar.gz
#fi
#
#if [ ! -x ${HERE}/bin/perl ]; then
# cd perl-5.10.1/
# ./Configure -des -Dprefix=$HERE
# make
# make install
# cd ..
# export PATH="${HERE}/bin:${PATH}"
# echo -n "export " >>${HOME}/.bash_profile
# env | grep ^PATH= >>${HOME}/.bash_profile
#fi
############################################################
# install local::lib
if [ ! -x local-lib-2.000012 ]; then
curl -L http://search.cpan.org/CPAN/authors/id/H/HA/HAARG/local-lib-2.000012.tar.gz | tar xz
cd local-lib-2.000012
perl Makefile.PL --bootstrap
make install
cd ..
eval $(perl -I${HOME}/perl5/lib/perl5 -Mlocal::lib)
echo 'eval $(perl -I${HOME}/perl5/lib/perl5 -Mlocal::lib)' >> ${HOME}/.bash_profile
fi
# install cpanm
if [ ! -x ${HOME}/perl5/bin/cpanm ]; then
curl -L http://cpanmin.us | ${HERE}/bin/perl - --local-lib=${HOME}/perl5 --notest --self-upgrade --reinstall --force
fi
# install needed modules
${HOME}/perl5/bin/cpanm --local-lib=${HOME}/perl5 --notest --reinstall --force XML::Simple XML::Twig IO::Compress::Bzip2 IO::Uncompress::Bunzip2
source ${HOME}/.bash_profile
| true
|
d1c7a959dd27e6ecb6ac89b826abeff70788ab55
|
Shell
|
ARO-user/work-share
|
/GMRT/link_8node.sh
|
UTF-8
| 236
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
DISK1=$1
DISK2=$2
STEM1=/mnt/$DISK1/gsbuser/EoR
STEM2=/mnt/$DISK2/gsbuser/EoR
FILE=$3
for node in node{111..118}
do
ssh -n $node "for num in {0..16}; do ln -s $STEM1/$FILE.node\$num $STEM2/$FILE.node\$num; done"
done
| true
|
c43d36c59d6d5b574ee1410768615e26d65d0ea1
|
Shell
|
WillLennon/BYOS
|
/vmss/Scripts/Linux/3/test.sh
|
UTF-8
| 260
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# We require 3 inputs: $1 is url, $2 is pool, $3 is PAT
# 4th input is option $4 is either '--once' or null
url=$1
pool=$2
pat=$3
runArgs=$4
echo $runArgs
# schedule the build agent to run immediately
/bin/bash ./runagent.sh $runArgs
echo done
| true
|
8f8ea3e31587e3758324f7c059f5b2e806902060
|
Shell
|
dorin-ionita/PerforAD
|
/thomas_scripts/wave1d.sh
|
UTF-8
| 1,195
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -l
# Batch script to run an OpenMP threaded job on Legion with the upgraded
# software stack under SGE.
# 1. Force bash as the executing shell.
#$ -S /bin/bash
# 2. Request ten minutes of wallclock time (format hours:minutes:seconds).
#$ -l h_rt=6:0:0
# 3. Request 1 gigabyte of RAM for each core/thread (must be an integer)
#$ -l mem=5G
# 4. Request 15 gigabyte of TMPDIR space (default is 10 GB)
#$ -l tmpfs=15G
# 5. Set the name of the job.
#$ -N stenciladwave1d
# 6. Select 12 threads (the most possible on Legion).
#$ -pe smp 12
# 7. Set the working directory to somewhere in your scratch space. This is
# a necessary step with the upgraded software stack as compute nodes cannot
# write to $HOME.
# Replace "<your_UCL_id>" with your UCL user ID :)
#$ -wd /home/mmm0334/Scratch/output
#$ -A Imperial_ESE
# 8. Run the application.
export OMP_PLACES=cores
export OMP_PROC_BIND=close
/shared/ucl/apps/numactl/2.0.12/bin/numactl -H
/shared/ucl/apps/numactl/2.0.12/bin/numactl -s
/shared/ucl/apps/numactl/2.0.12/bin/numactl --cpunodebind=0 --membind=0 /home/mmm0334/stencil_ad/PerforAD/generated/vary-threads.sh /home/mmm0334/stencil_ad/PerforAD/generated/driver1dwave 10000000000
| true
|
925864c0b64fb2a0ea0b89a47285e019975a5385
|
Shell
|
anzhihe/learning
|
/shell/book/笨办法学BashShell编程-基础篇/示例脚本/10-22continue-levels.sh
|
UTF-8
| 227
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
for outer in I II III IV V
do
echo; echo -n "Group $outer: "
for inner in 1 2 3 4 5 6 7 8 9 10
do
if [ "$inner" -eq 7 ]; then
continue 2
fi
echo -n "$inner "
done
done
echo; echo
exit 0
| true
|
288190ecdb73d13be03b9a2dc9d52a5474e50fe3
|
Shell
|
jayleicn/TVRetrieval
|
/baselines/clip_alignment_with_language/scripts/compute_upper_bound.sh
|
UTF-8
| 762
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# run at project root dir
dset_name=$1 # see case below
split_name=$2 # train/val/test, some datasets may not support all the 3 splits
result_dir="baselines/clip_alignment_with_language/results"
echo "Running with dataset ${dset_name} with split ${split_name}"
case ${dset_name} in
tvr) # only supports train/val
eval_file_path=data/tvr_${split_name}_release.jsonl
save_path=${result_dir}/tvr_${split_name}_proposal_upper_bound.json
;;
*)
echo -n "Unknown argument"
;;
esac
echo "Running evaluation"
python baselines/clip_alignment_with_language/local_utils/compute_proposal_upper_bound.py \
-dset_name=${dset_name} \
-eval_file_path=${eval_file_path} \
-save_path=${save_path} \
-verbose
| true
|
9e3727e53a95230b088b3fd78835a9be0549d3c2
|
Shell
|
meain/programmingfonts-screenshots
|
/scripts/filterimages
|
UTF-8
| 428
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
mkdir -p bad-images
for image in images/*; do
DOMINANT_COLOR="$(convert "$image" -scale 50x50! -depth 8 +dither -colors 8 -format "%c" histogram:info: |\
sed -n 's/^[ ]*\(.*\):.*[#]\([0-9a-fA-F]*\) .*$/\1,#\2/p' | sort -r -n -k 1 -t "," | head -n1)"
if echo "$DOMINANT_COLOR" | grep -q '#F5F5F5';then
echo "Filterd $image" && mv "$image" "bad-images/$(basename "$image")"
fi
done
| true
|
f597bd6f044377ed5cd6c0d01bb050ad31f27fcd
|
Shell
|
chanmi168/Fall-Detection-DAT
|
/stage3/oldscripts0615/stage3_aggregate_results_allHP.sh
|
UTF-8
| 2,535
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set -u
stage=3
# train a model
if [ $stage -eq 3 ]; then
echo '=================================running stage 3================================='
echo '=================================running UMAFall_UPFall_cross_config [Resample] allHP================================='
training_params_file='training_params_list_v1.json'
variable_name='channel_n'
inputdir='../../data_mic/stage2/modeloutput_18hz_5fold_UPFall_UMAFall_cross-config_diffCV'
outputdir='../../data_mic/stage3/UMAFall_UPFall_cross_config_Resample_diffCV'
mkdir -p $outputdir
python stage3_model_eval_allHP.py \
--input_folder $inputdir \
--output_folder $outputdir \
--training_params_file $training_params_file \
--variable_name $variable_name \
--tasks_list 'UMAFall_chest-UPFall_neck UMAFall_wrist-UPFall_wrist UMAFall_waist-UPFall_belt UMAFall_leg-UPFall_rightpocket UMAFall_ankle-UPFall_ankle' \
| tee $outputdir/stage3_UMAFall_UPFall_logs.txt
python stage3_model_eval_allHP.py \
--input_folder $inputdir \
--output_folder $outputdir \
--training_params_file $training_params_file \
--variable_name $variable_name \
--tasks_list 'UPFall_neck-UMAFall_chest UPFall_wrist-UMAFall_wrist UPFall_belt-UMAFall_waist UPFall_rightpocket-UMAFall_leg UPFall_ankle-UMAFall_ankle' \
| tee $outputdir/stage3_UPFall_UMAFall_logs.txt
echo '=================================running UMAFall_UPFall_cross_config [HPF] allHP================================='
training_params_file='training_params_list_v1.json'
inputdir='../../data_mic/stage2/modeloutput_WithoutNormal_18hz_5fold_UPFall_UMAFall_cross-config_diffCV'
outputdir='../../data_mic/stage3/UMAFall_UPFall_cross_config_HPF_diffCV'
mkdir -p $outputdir
python stage3_model_eval_allHP.py \
--input_folder $inputdir \
--output_folder $outputdir \
--training_params_file $training_params_file \
--variable_name $variable_name \
--tasks_list 'UMAFall_chest-UPFall_neck UMAFall_wrist-UPFall_wrist UMAFall_waist-UPFall_belt UMAFall_leg-UPFall_rightpocket UMAFall_ankle-UPFall_ankle' \
| tee $outputdir/stage3_UMAFall_UPFall_logs.txt
python stage3_model_eval_allHP.py \
--input_folder $inputdir \
--output_folder $outputdir \
--training_params_file $training_params_file \
--variable_name $variable_name \
--tasks_list 'UPFall_neck-UMAFall_chest UPFall_wrist-UMAFall_wrist UPFall_belt-UMAFall_waist UPFall_rightpocket-UMAFall_leg UPFall_ankle-UMAFall_ankle' \
| tee $outputdir/stage3_UPFall_UMAFall_logs.txt
echo '=================================testing stage 1================================='
fi
| true
|
01de67b56bbe3281cf64032e71252a607ae4b58a
|
Shell
|
hsadoyan/SDN_OpenFlow
|
/host4/setup_flood.sh
|
UTF-8
| 327
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
HOST=$1
HOSTNAME=$2
#Setup floodlight
cd host4
scp floodlight.zipa* cs4516@$HOST:~/
ssh cs4516@$HOST "cat ./floodlight.zipa* > ./floodlight.zip && unzip floodlight.zip"
scp ./CS4516.java cs4516@$HOST:~/floodlight/src/main/java/net/floodlightcontroller/cs4516/CS4516.java
ssh cs4516@$HOST "cd ~/floodlight && ant"
| true
|
293c4205f4747e4f0cac3901a77aea0ca9e2bf5f
|
Shell
|
sadbumblebee/pizero_photo
|
/process_image.sh
|
UTF-8
| 493
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
rm photos/output.jpg
rm photos/output_border.jpg
dw=$(ls photos -tp | grep -v '/$' | head -n +4)
ta=$(echo $dw | awk '{ for (i=NF; i>1; i--) printf("%s ",$i); print $1; }')
(cd photos && montage $ta -tile 2x2 -border 5 -geometry +10+10 -density 300 -units PixelsPerInch output.jpg)
(cd photos && montage output.jpg -border 35 -bordercolor "#ffffff" -density 300 -units PixelsPerInch -geometry +0+0 output_border.jpg)
lp -d Canon_SELPHY_CP1200_USB -o raw photos/output_border.jpg
| true
|
1dbc0dd71d735730ff2227660fd74ac902ababcb
|
Shell
|
bdpiprava/learnings
|
/lesson-6.sh
|
UTF-8
| 289
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Enter start:" start
read -p "Enter end:" end
for ((i=$start; i<=$end;i++)) do
for ((j=$start; j<=$i;j++)) do
echo -n "$j"
done
echo ""
done
echo ""
echo ""
for ((i=$end; i>=$start;i--)) do
for ((j=$i; j>=$start;j--)) do
echo -n "$j"
done
echo ""
done
| true
|
9ec49a83f2b59f68f653cd144437680b29d925db
|
Shell
|
ChenZewei/SET-MRTS
|
/src/scripts/EXP1/EXP.sh
|
UTF-8
| 689
| 3.3125
| 3
|
[] |
no_license
|
#PROC_NUM=`cat /proc/cpuinfo | grep "processor" | wc -l`
PROC_NUM=24
ROOT=`pwd`
FOLDER="EXP"
PARAM_NUM=288
GAP=`expr $PARAM_NUM / $PROC_NUM `
rm -rf $FOLDER
if(($PARAM_NUM > `expr $GAP \* $PROC_NUM`))
then
GAP=`expr $GAP + 1`
fi
mkdir $FOLDER
for ((index=0; index < $PROC_NUM; index++))
do
cd $ROOT
mkdir "$FOLDER/sp$index"
cp SubTest "$FOLDER/sp$index"
cp config.xml "$FOLDER/sp$index"
cp start.sh "$FOLDER/sp$index"
START=`expr $index \* $GAP`
END=`expr $START + $GAP - 1`
for ((index2=$START; index2 <= $END; index2++))
do
echo "$index2" >> "$FOLDER/sp$index/param"
done
cd $FOLDER/sp$index
echo "--------------Processor $index--------------"
nohup ./start.sh &
done
| true
|
4b82f6d57898862325f10537e8e7d1a1b028b622
|
Shell
|
aoqingy/vCloudTerminal-Ubuntu1804
|
/make-configure.sh
|
UTF-8
| 1,836
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#Powered by Ubuntu 16.04.3 desktop
cd `dirname $0`
#apt-get install -y ssh vim cython squashfs-tools compizconfig-settings-manager
ProductName="vClassTerminal"
IsoVersion=`awk '$1~/^Version/ {print $2}' ./vfd/DEBIAN/control`
IsoName="${ProductName}-${IsoVersion}"
function Main
{
ConfigureSystem
}
function ConfigureSystem
{
#配置SSH
sed -i -e 's/^PermitRootLogin .*/PermitRootLogin yes/g' /etc/ssh/sshd_config
systemctl enable ssh
systemctl restart ssh
#屏幕锁定
gsettings set org.gnome.desktop.session idle-delay 0
gsettings set org.gnome.desktop.lockdown disable-lock-screen true
#以下从本地安装软件包
dpkg -i ./upgrade/update/10_deplist/deb.special/firefox*.deb
#ICA关联
touch /tmp/empty.ica
firefox file:///tmp/empty.ica
#FireFox插件
firefox https://addons.mozilla.org/en-US/firefox/addon/disable-f12-shortcut/
firefox https://addons.mozilla.org/en-US/firefox/addon/status-4-evar/
firefox https://addons.mozilla.org/en-US/firefox/addon/1659/
#生成安装包
rm -rf ./upgrade/update/firefox/mozilla
rm -rf ./upgrade/update/config/root/config
cp -a ~/.mozilla ./upgrade/update/firefox/mozilla
cp -a ~/.config ./upgrade/update/config/root/config
./upgrade/make_update.sh iso
#安装
cp -a ./upgrade/update_${IsoVersion}.tgz /
tar xzf /update_${IsoVersion}.tgz -C /
rm -f /update_${IsoVersion}.tgz
mkdir -p /update/
tar xzf /update_${IsoVersion} -C /update
rm -f /MD5SUM
rm -f /update_${IsoVersion}
/update/update/update.sh
rm -rf /update
#GRUB超时
sed -i -e 's/if \[ "\\\${timeout}" = 0 \]; then$/if \[ "\\\${timeout}" != 0 \]; then/g' /boot/grub/grub.cfg
sed -i -e 's/set timeout=.*$/set timeout=0/g' /boot/grub/grub.cfg
reboot
}
Main
| true
|
07e305c83670321070a1440592332dfccc5ff6ad
|
Shell
|
cucumberlinux/ports
|
/cucumber/base/linux-firmware/linux-firmware.buildinfo
|
UTF-8
| 2,227
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright 2016, 2018 Scott Court
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Cucumber Linux Buildinfo for linux-firmware
NAME=linux-firmware
VERSION=$(echo $OWD/linux-firmware-*.tar.xz | rev | cut -d / -f 1 | cut -d . -f 3- | rev | cut -d - -f 3-)
URL=()
BUILDDEPS=(git)
download () {
if [ -e $OWD/linux-firmware-*.tar.xz ]; then
echo $OWD/linux-firmware-*.tar.xz already exists. Delete it if you wish to redownload the sources.
return
fi
NEWVERSION=$(date --iso-8601 | sed s/-//g)
# Clone the git repository
if [ -e /tmp/$$ ]; then
rm /tmp/$$ -r
fi
mkdir /tmp/$$
cd /tmp/$$
git clone git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git || return 1
tar -cJf $OWD/$NAME-$NEWVERSION.tar.xz linux-firmware || return 1
}
build () {
# Extract the sources
tar -xJf $OWD/$NAME-$VERSION.tar.xz || exit 1
# Clean up the git repository
find . -name ".git*" -exec rm -rf "{}" \;
chown -R root:root .
# Move over the package files
install -d $DESTDIR/lib
mv linux-firmware $DESTDIR/lib/firmware
# Copies ths install scripts (slack-desc and doint.sh) from $OWD to
# $DESTDIR/install, creating $DESTDIR/install if need be.
pkgapi_copy_package_scripts
}
| true
|
6f2e335bedee95a5f7c5ba123abfc33bccb532c9
|
Shell
|
ryanhossain9797/snips-nlu-rs
|
/.travis/test.sh
|
UTF-8
| 1,638
| 3.09375
| 3
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -ev
export PATH="$HOME/.cargo/bin:$PATH"
if [[ "${RUST_TESTS}" == "true" ]]; then
echo "Running rust tests..."
cargo test --all
cargo check --benches
fi
if [[ "${PYTHON_TESTS}" == "true" ]]; then
echo "Running python tests..."
cd platforms/python
pip install tox
tox
cd -
fi
if [[ "${KOTLIN_TESTS}" == "true" ]]; then
echo "Running kotlin tests..."
cargo build -p snips-nlu-ffi
cd platforms/kotlin
./gradlew -Pdebug -PrustTargetPath=../../target test --info
cd -
fi
if [[ "${MACOS_SWIFT_TESTS}" == "true" ]]; then
echo "Running macOS swift tests..."
cargo build -p snips-nlu-ffi
cd platforms/swift
mkdir -p build/DerivedData
set -o pipefail && xcodebuild \
-IDECustomDerivedDataLocation=build/DerivedData \
-workspace SnipsNlu.xcworkspace \
-scheme SnipsNlu-macOS \
TARGET_BUILD_TYPE=debug \
SNIPS_USE_LOCAL=1 \
clean \
test \
| xcpretty
cd -
fi
if [[ "${IOS_SWIFT_TESTS}" == "true" ]]; then
echo "Running iOS swift tests..."
TARGET_SYSROOT=$(xcrun --sdk iphonesimulator --show-sdk-path) \
cargo build -p snips-nlu-ffi --target x86_64-apple-ios
cd platforms/swift
mkdir -p build/DerivedData
set -o pipefail && xcodebuild \
-IDECustomDerivedDataLocation=build/DerivedData \
-workspace SnipsNlu.xcworkspace \
-scheme SnipsNlu-iOS \
-destination 'platform=iOS Simulator,name=iPhone 8,OS=latest' \
TARGET_BUILD_TYPE=debug \
SNIPS_USE_LOCAL=1 \
clean \
test \
| xcpretty
cd -
fi
| true
|
228b3326b9391a7105fd2160f694bbd53501d326
|
Shell
|
UCL-RITS/rcps-buildscripts
|
/perl-5.22.0_with_perlbrew_install
|
UTF-8
| 637
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###############################################
# Installing Perl with Perlbrew
#
# by Heather Kelly, 2015
#
NAME=${NAME:-perl}
VERSION=${VERSION:-5.22.0}
PERLBREW_OPTIONS=${PERLBREW_OPTIONS:-"install --thread"}
PERL_OPTIONS=${PERL_OPTIONS:-"-Duseshrplib"}
# Details: useshrplib builds libperl.so
# If you need to force an install when tests fail, set $PERLBREW_OPTIONS to "--notest install --thread"
set -e
# perlbrew must be loaded
hash perlbrew 2>/dev/null || { echo >&2 "Perlbrew must be loaded. Aborting."; exit 1; }
source $PERLBREWROOT/etc/bashrc
perlbrew ${PERLBREW_OPTIONS} ${NAME}-${VERSION} ${PERL_OPTIONS}
| true
|
b2d8be6e15733572c675dee5fa9057c98655aaf8
|
Shell
|
brownman/do_for_others_first_old
|
/.CODE/RUNNERS/BANK/wrapper/0.0.1/OLD/CFG/LOOPER/loop_fallback_for_easy_life.sh
|
UTF-8
| 977
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
breaking(){
local str0=`caller`
local str="gvim +$str0"
echo "goto : $str"
proxy print_color 31 'breaking'
echo breaking
break
}
single(){
local dir_self=`dirname $0`
local input="$input"
if [ -n "$input" ];then
local file="$dir_self/wrapper.sh"
#zbab
if [ -f "$file" ];then
local cmd="bash -xc \"$file $input\""; \
echo "[ try ] $cmd"
proxy sleep 4
( eval "$cmd" )
else
echo 'no such file ' "$file"
fi
else
echo no input "$input"
breaking
fi
}
loop(){
#zbab1
local counter_death=0
while [ 1 ];do
local cmd="set -o nounset"
proxy "every 5 \"$cmd\""
proxy "cowsay \"new life: $counter_death\""
single
proxy sleep 2
let 'counter_death+=1'
done
}
steps(){
eval "$cmd_trap"
loop
}
args=( $@ )
input="${args[@]}"
steps
| true
|
db970ed7d7f076487e5af1cb8fdf6e51641b9bfe
|
Shell
|
Jfeatherstone/i3-themes
|
/summer-heat/install.sh
|
UTF-8
| 7,904
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
# install.sh
# This file will move all of the theme elements to where they need to be
# Must be run as su since it adds the commands changeVolume and changeBrightness to /usr/bin
# Things that are installed in this file:
# scripts/changeBrightness -> /usr/bin/ (doesn't overwrite)
# scripts/changeVolume -> /usr/bin/ (doesn't overwrite)
# i3/config -> ~/.config/i3/config (preserves old config in config.old if it exists)
# i3/wallpaper.jpg -> ~/.config/i3/config (preserves old image in wallpaper.jpg.old if it exists)
# i3/i3_cheatsheet.pdf -> ~/.config/i3/i3_cheatsheet.pdf (preserves old pdf in i3_cheatsheet.pdf.old if it exists)
# dunst/dunstrc -> ~/.config/dunst/dunstrc (preserves old config in dunstrc.old if it exists)
# rofi/rofi_launcher.sh-> ~/.config/rofi/rofi_launcher.sh (preserves old script in rofi_launcher.sh.old if it exists)
# compton/compton.conf -> ~/.config/compton/compton.conf (preserves old config in compton.conf.old if it exists)
# galendae/config -> ~/.config/galendae/config (preserves old config in config.old if it exists)
# kitty/kitty.conf -> ~/.config/kitty/kitty.conf (preserves old config in kitty.conf.old if it exists)
# vim/vimrc -> ~/.vimrc (preserves old config in .vimrc.old if it exists)
# firefox/chrome/* -> $CHROME_DIRECTORY/ (preserves userChrome and userContent if they exist)
# changeBrightness
if [ -e /usr/bin/changeBrightness ]; then
echo "changeBrightness script already exists, not overwriting! To manually overwrite use 'cp changeBrightness /usr/bin/'i"
echo ""
else
cp scripts/changeBrightness /usr/bin/
echo "changeBrightness script installed to /usr/bin/"
echo ""
fi
# changeVolume
if [ -e /usr/bin/changeVolume ]; then
echo "changeVolume script already exists, not overwriting! To manually overwrite use 'cp changeVolume /usr/bin/'"
echo ""
else
cp scripts/changeVolume /usr/bin/
echo "changeVolume script installed to /usr/bin/"
echo ""
fi
# We have to make sure the .config file actually exists in the first place
if [ ! -e ~/.config ]; then
mkdir ~/.config
echo "Created folder ~/.config, creating subfolders..."
echo ""
fi
# And the rest of the folders
if [ ! -e ~/.config/rofi ]; then
mkdir ~/.config/rofi
echo "Created ~/.config/rofi directory"
echo ""
fi
if [ ! -e ~/.config/i3 ]; then
mkdir ~/.config/i3
echo "Created ~/.config/i3 directory"
echo ""
fi
if [ ! -e ~/.config/dunst ]; then
mkdir ~/.config/dunst
echo "Created ~/.config/i3 directory"
echo ""
fi
if [ ! -e ~/.config/galendae ]; then
mkdir ~/.coinfig/galendae
echo "Created ~/.config/galendae directory"
echo ""
fi
if [ ! -e ~/.config/picom ]; then
mkdir ~/.config/picom
echo "Created ~/.config/picom directory"
echo ""
fi
if [ ! -e ~/.config/kitty ]; then
mkdir ~/.config/kitty
echo "Created ~/.config/kitty directory"
echo ""
fi
# Now we actually move things
# i3-config
if [ -e ~/.config/i3/config ]; then
# Move the old file
mv ~/.config/i3/config ~/.config/i3/config.old
cp i3/config ~/.config/i3/config
echo "i3 config file installed to ~/.config/i3/config (old config moved to config.old)"
echo ""
else
cp i3/config ~/.config/i3/config
echo "i3 config file installed to ~/.config/i3/config"
echo ""
fi
# wallpaper
if [ -e ~/.config/i3/wallpaper.jpg ]; then
# Move the old file
mv ~/.config/i3/wallpaper.jpg ~/.config/i3/wallpaper.jpg.old
cp i3/wallpaper.jpg ~/.config/i3/wallpaper.jpg
echo "wallpaper.jpg installed to ~/.config/i3/wallpaper.jpg (old image moved to wallpaper.jpg.old)"
echo ""
else
cp i3/wallpaper.jpg ~/.config/i3/wallpaper.jpg
echo "wallpaper.jpg installed to ~/.config/i3/wallpaper.jpg"
echo ""
fi
# i3_cheatsheet
if [ -e ~/.config/i3/i3_cheatsheet.pdf ]; then
# Move the old file
mv ~/.config/i3/i3_cheatsheet.pdf ~/.config/i3/i3_cheatsheet.pdf.old
cp i3/i3_cheatsheet.pdf ~/.config/i3/i3_cheatsheet.pdf
echo "i3_cheatsheet.pdf installed to ~/.config/i3/wallpaper.jpg (old image moved to wallpaper.jpg.old)"
echo ""
else
cp i3/i3_cheatsheet.pdf ~/.config/i3/i3_cheatsheet.pdf
echo "i3_cheatsheet.pdf installed to ~/.config/i3/wallpaper.jpg"
echo ""
fi
# dunstrc
if [ -e ~/.config/dunst/dunstrc ]; then
# Move the old file
mv ~/.config/dunst/dunstrc ~/.config/dunst/dunstrc.old
cp dunst/dunstrc ~/.config/dunst/dunstrc
echo "dunstrc installed to ~/.config/dunst/dunstrc (old config moved to dunstrc.old)"
echo ""
else
cp dunst/dunstrc ~/.config/dunst/dunstrc
echo "dunstrc installed to ~/.config/dunst/dunstrc"
echo ""
fi
# rofi_script
if [ -e ~/.config/rofi/rofi_launcher.sh ]; then
mv ~/.config/rofi/rofi_launcher.sh~/.config/rofi/rofi_launcher.sh.old
cp rofi/rofi_launcher.sh ~/.config/rofi/rofi_launcher.sh
echo "rofi_launcher.sh installed to ~/.config/rofi/rofi_launcher.sh (old script moved to rofi_launcher.sh.old)"
echo ""
else
mv rofi/rofi_launcher.sh ~/.config/rofi/rofi_launcher.sh
echo "rofi_launcher.sh installed to ~/.config/rofi/rofi_launcher.sh"
echo ""
fi
# kitty
if [ -e ~/.config/kitty/kitty.conf ]; then
mv ~/.config/kitty/kitty.conf ~/.config/kitty/kitty.conf.old
cp kitty/kitty.conf ~/.config/kitty/kitty.conf
echo "kitty.conf installed to ~/.config/kitty/kitty.conf (old config moved to kitty.conf.old)"
echo ""
else
cp kitty/kitty.conf ~/.config/kitty/kitty.conf
echo "kitty.conf installed to ~/.config/kitty/kitty.conf"
echo ""
fi
# Galendae
if [ -e ~/.config/galendae/config ]; then
mv ~/.config/galendae/config ~/.config/galendae/config.old
cp galendae/config ~/.config/galendae/config
echo "galendae config installed to ~/.config/galendae/config (old config moved to config.old)"
echo ""
else
cp galendae/config ~/.config/galendae/config
echo "galendae config installed to ~/.config/galendae/config"
echo ""
fi
# Picom
if [ -e ~/.config/picom/picom.conf ]; then
mv ~/.config/picom/picom.conf ~/.config/picom/picom.conf.old
cp picom/picom.conf ~/.config/picom/picom.conf
echo "picom config installed to ~/.config/picom/picom.conf (old config moved to picom.conf.old)"
echo ""
else
cp picom/picom.conf ~/.config/picom/picom.conf
echo "picom config installed to ~/.config/picom/picom.conf"
echo ""
fi
# Vim
if [ -e ~/.vimrc ]; then
mv ~/.vimrc ~/.vimrc.old
cp vim/vimrc ~/.vimrc
echo "vim config installed to ~/.vimrc (old config moved to ~/.vimrc.old)"
echo "You may need to run :PluginInstall to update Vundle plugins"
echo ""
else
cp vim/vimrc /.vimrc
echo "vim config installed to ~/.vimrc"
echo "You may need to run :PluginInstall to update Vundle plugins"
echo ""
fi
# Firefox
# We have to first find the chrome directory though
# This was taken from https://github.com/mut-ex/minimal-functional-fox
MOZILLA_USER_DIRECTORY="$(find ~/.mozilla/firefox -maxdepth 1 -type d -regextype egrep -regex '.*[a-zA-Z0-9]+.default-release')"
CHROME_DIRECTORY="$(find $MOZILLA_USER_DIRECTORY -maxdepth 1 -type d -name 'chrome')"
if [ -e $CHROME_DIRECTORY/userChrome.css ]; then
mv $CHROME_DIRECTORY/userChrome.css $CHROME_DIRECTORY/userChrome.css.old
echo "Firefox userChrome.css moved to userChrome.css.old"
echo ""
fi
if [ -e $CHROME_DIRECTORY/userContent.css ]; then
mv $CHROME_DIRECTORY/userContent.css $CHROME_DIRECTORY/userContent.css.old
echo "Firefox userContent.css moved to userContent.css.old"
echo ""
fi
# Now move all of the stuff
cp firefox/chrome/* $CHROME_DIRECTORY/
echo "Installation completed! This directory can now be safely deleted!"
echo "If you have any issues with my theme, or want to recommend edits, please submit an issue on the Github page"
echo "https://github.com/Jfeatherstone/i3-themes"
echo "Thanks for using my theme!"
| true
|
0ba82471e02684136d9f3d37ea8de00c1258b6f5
|
Shell
|
sspans/ladvd
|
/scripts/_tarball.sh
|
UTF-8
| 196
| 2.671875
| 3
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
[ -n "${BASE}" ] || . scripts/_init.sh
# create dist tarball
autoreconf -fi
./configure
make distcheck
mv *tar.gz ${RELEASE}
# create signature
gpg -ba ${RELEASE}/*tar.gz
| true
|
344d6068d597b77f70fa4f1b64932e25f4f6380a
|
Shell
|
rushweigelt/professional-Git
|
/AdvancedProgramming(bash, scripts, python)/Labs/lab03/testStuff/reextend.bash
|
UTF-8
| 263
| 2.953125
| 3
|
[] |
no_license
|
#Rush Weigelt rw643 rename files script
# version 4.3.48(1)-release (x86_64-pc-linux-gnu)
# 4/20/18
if [[ -z "$1" || -z "$2" || -n "$3" ]] ; then
echo "We are looking for EXACTLY two arguments please!"
else
for i in *$1 ; do
mv "$i" "${i/$1/$2}"
done
fi
| true
|
8194a0e781a08d75d64e197d7d995e22729d2581
|
Shell
|
RedBaron80/numbered_directories_sync
|
/backup.sh
|
UTF-8
| 808
| 3.9375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function copy_dir {
echo "copying $1"
cp $1 $2 -r
}
function sync_dir {
echo "sincronizing $1"
rsync -az $1 $2;
}
##################################################################################
cd $1
edir=0
isnumber_regex='^[0-9]+$'
for file in *; do
if [[ $file =~ $isnumber_regex ]] && [ -d $file ]
then
# if directory exists and it is a number (image directory)
if [ -d "$2/$file" ] ; then
# The lastest existing directory is kept for sync it later
if [ $edir -lt $file ]; then
edir=$file
fi
else
# Copying the unexisting directory at the destiny
copy_dir $file $2
fi
elif ! [ "$file" == "temp" ]
then
# Syncing the other directories (avatars, logs...)
sync_dir $file $2;
fi
done
#syncing the last directory
sync_dir $edir $2
echo "Done!"
| true
|
a96da4b040998b852bd5d42fc7adefd5522a49c5
|
Shell
|
morrowdigital/DetoxRecorder
|
/release.sh
|
UTF-8
| 1,887
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/zsh -e
# Assumes gh is installed and logged in
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
BRANCH=$(git rev-parse --abbrev-ref HEAD)
if [ ! "$BRANCH" = "master" ]; then
printf >&2 "\033[1;31mNot on master branch, abording\033[0m"
exit 255
fi
if [[ -n $(git status --porcelain) ]]; then
printf >&2 "\033[1;31mCannot release version because there are unstaged changes, aborting\nChanges:\033[0m\n"
git status --short
exit 255
fi
if [[ -n $(git log --branches --not --remotes) ]]; then
echo -e "\033[1;34mPushing pending commits to git\033[0m"
git push
fi
echo -e "\033[1;34mCreating release notes\033[0m"
RELEASE_NOTES_FILE="${SCRIPTPATH}/Distribution/_tmp_release_notes.md"
touch "${RELEASE_NOTES_FILE}"
open -Wn "${RELEASE_NOTES_FILE}"
if ! [ -s "${RELEASE_NOTES_FILE}" ]; then
echo -e >&2 "\033[1;31mNo release notes provided, aborting\033[0m"
rm -f "${RELEASE_NOTES_FILE}"
exit 255
fi
"${SCRIPTPATH}/Scripts/updateCopyright.sh"
"${SCRIPTPATH}/build.sh"
echo -e "\033[1;34mCopying script\033[0m"
cp "${SCRIPTPATH}/record.sh" "${SCRIPTPATH}/Distribution"
echo -e "\033[1;34mUpdating package.json version\033[0m"
SHORT_VERSION=$(/usr/libexec/PlistBuddy -c "Print CFBundleShortVersionString" "${SCRIPTPATH}/Distribution/DetoxRecorder.framework/Info.plist")
BUILD_NUMBER=$(/usr/libexec/PlistBuddy -c "Print CFBundleVersion" "${SCRIPTPATH}/Distribution/DetoxRecorder.framework/Info.plist")
VERSION="${SHORT_VERSION}"."${BUILD_NUMBER}"
cd "${SCRIPTPATH}/Distribution"
npm version "${VERSION}" --allow-same-version
# echo -e "\033[1;34mReleasing\033[0m"
npm publish
git add -A &> /dev/null
git commit -m "${VERSION}" &> /dev/null
git push
echo -e "\033[1;34mCreating GitHub release\033[0m"
gh release create --repo wix/DetoxRecorder "$VERSION" --title "$VERSION" --notes-file "${RELEASE_NOTES_FILE}"
rm -f "${RELEASE_NOTES_FILE}"
| true
|
16545ce313af3250063d1b52b92c2ad52f9e4123
|
Shell
|
RaySajuuk/GoAgentX
|
/Resources/ssh/echo.sh
|
UTF-8
| 161
| 3.25
| 3
|
[
"GPL-2.0-only",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
prompt=$1
check_yesno=${prompt/yes\/no/}
(( len = ${#prompt} - ${#check_yesno} ))
if [ $len == 6 ]; then
echo yes
else
echo $ECHO_CONTENT
fi
| true
|
2ce5d1af0ae934c297c594f8d06916c356c3a3c0
|
Shell
|
gueyebabacar/gdi-portal-back
|
/bin/reinit.sh
|
UTF-8
| 173
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Lancement du reinit..."
if [ "$#" != 1 ]
then
echo "Usage: reinit.sh env"
exit
fi
BASH_DIRECTORY=`dirname $0`
sh ${BASH_DIRECTORY}/reinit_$1.sh
| true
|
a145c4d960b5ef9bd1c4769a5a13809cb5b441fd
|
Shell
|
kgoutsos/todo.txt-addons
|
/do
|
UTF-8
| 153
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
[ "$1" = "usage" ] && exit 0
shift
TASK=$(sed "$1!d" "$TODO_FILE")
"$TODO_SH" command do "$@"
python "$TODO_ACTIONS_DIR"/recur.py "$TASK"
| true
|
2ba24cad63d15370da80ef80b02455698baa8d1a
|
Shell
|
Sjors/libwally-swift
|
/build-libwally.sh
|
UTF-8
| 3,370
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
set -e # abort if any command fails
export PATH=$PATH:/opt/homebrew/bin/
export PYTHON="/usr/bin/python3"
cd CLibWally/libwally-core
# Switch to vanilla libsecp256k1, rather than the more experimental libsecp256k1-zkp.
# Since libsecp256k1-zkp is rebased on vanilla libsecp256k1, we can simply checkout
# a common commit.
pushd src/secp256k1
# Latest tagged release used in Bitcoin Core:
# https://github.com/bitcoin/bitcoin/commits/master/src/secp256k1
git remote | grep bitcoin-core || git remote add bitcoin-core https://github.com/bitcoin-core/secp256k1.git
git fetch bitcoin-core --tags
git checkout v0.2.0 || exit 1
git rev-parse HEAD | grep 21ffe4b22a9683cf24ae0763359e401d1284cc7a || exit 1
popd
BUILD_DIR="$(pwd)/build"
build() {
SDK_NAME=$1 # iphonesimulator, iphoneos
HOST=$2 # 'aarch64-apple-darwin' or 'x86_64-apple-darwin'
EXTRA_CFLAGS=$3 # '-arch arm64 -mios...'
CC="$(xcrun --sdk $SDK_NAME -f clang) -isysroot $(xcrun --sdk $SDK_NAME --show-sdk-path)"
CC_FOR_BUILD="$(xcrun --sdk macosx -f clang) -isysroot $(xcrun --sdk macosx --show-sdk-path)"
sh ./tools/autogen.sh
./configure --disable-shared --host=$HOST --enable-static --disable-elements --enable-standard-secp \
CC="$CC $EXTRA_CFLAGS" \
CPP="$CC $EXTRA_CFLAGS -E" \
CC_FOR_BUILD="$CC_FOR_BUILD" \
CPP_FOR_BUILD="$CC_FOR_BUILD -E" \
make
SDK_DIR="${BUILD_DIR}/${SDK_NAME}"
mkdir -p "${SDK_DIR}"
cp src/.libs/libwallycore.a "${SDK_DIR}/libwallycore-$HOST.a"
cp src/secp256k1/.libs/libsecp256k1.a "${SDK_DIR}/libsecp256k1-$HOST.a"
make clean
}
if [[ ${ACTION:-build} = "build" || $ACTION = "install" ]]; then
if [[ $PLATFORM_NAME = "macosx" ]]; then
TARGET_OS="macos"
elif [[ $PLATFORM_NAME = "iphonesimulator" ]]; then
TARGET_OS="ios-simulator"
else
TARGET_OS="ios"
fi
if [[ $CONFIGURATION = "Debug" ]]; then
CONFIGURATION="debug"
else
CONFIGURATION="release"
fi
ARCHES=()
LIBWALLYCORE_EXECUTABLES=()
LIBSECP256K1_EXECUTABLES=()
NEEDS_LIPO=false
for ARCH in $ARCHS
do
TARGET_ARCH=$ARCH
if [[ $TARGET_ARCH = "arm64" ]]; then
TARGET_ARCH="aarch64"
fi
LIBWALLY_DIR="${BUILD_DIR}/${PLATFORM_NAME}/libwallycore-${TARGET_ARCH}-apple-darwin.a"
SECP_DIR="${BUILD_DIR}/${PLATFORM_NAME}/libsecp256k1-${TARGET_ARCH}-apple-darwin.a"
# If we haven't built our static library, let's go ahead and build it. Else, we can probably just not try and build at all.
if [ ! -f $LIBWALLY_DIR ] || [ ! -f $SECP_DIR ]
then
echo "DEBUG:: File not found, let's build!"
build ${PLATFORM_NAME} ${TARGET_ARCH}-apple-darwin "-arch ${ARCH} -m${TARGET_OS}-version-min=7.0 -fembed-bitcode"
# Tracks our list of executables so we know the static libraries we need to lipo later
LIBWALLYCORE_EXECUTABLES+=($LIBWALLY_DIR)
LIBSECP256K1_EXECUTABLES+=($SECP_DIR)
# Something changed, we should lipo later.
NEEDS_LIPO=true
fi
done
# If nothing changed, we can just not try lipo at all and skip.
if [ "$NEEDS_LIPO" = true ] ; then
xcrun --sdk $PLATFORM_NAME lipo -create "${LIBWALLYCORE_EXECUTABLES[@]}" -output "${BUILD_DIR}/LibWallyCore"
xcrun --sdk $PLATFORM_NAME lipo -create "${LIBSECP256K1_EXECUTABLES[@]}" -output "${BUILD_DIR}/libsecp256k1"
fi
elif [[ $ACTION = "clean" ]]; then
make clean
fi
| true
|
5d16b5d2cfb73430a6ab3bd23373d0ca9da1654e
|
Shell
|
GGain/linux_study
|
/update.sh
|
UTF-8
| 688
| 3.296875
| 3
|
[] |
no_license
|
#! /bin/sh
#set path to folder store source code
export SRC_PATH=/home/public/Desktop/linux_bsp
export SUB_PATH=${SRC_PATH}/${bsp_type}
echo "Download src for ${bsp_type}!"
echo "Go to ${SRC_PATH}"
cd ${SRC_PATH}
if [ ! -d ${bsp_type} ]; then
mkdir ${bsp_type}
#git clone http://sw-stash.freescale.net/scm/alb/${bsp_type}.git
#http://sw-stash.freescale.net/scm/alb/linux.git
#http://sw-stash.freescale.net/scm/alb/u-boot.git
#http://sw-stash.freescale.net/scm/alb/buildroot.git
if [ ! -d ${bsp_type} ]; then
echo "Can not download source code for ${bsp_type}!"
cd -
exit 1
fi
else
cd ${bsp_type}
git pull
fi
cd -
exit 0
| true
|
1f8bd2115c1cf2b2b0e79e0c1347f895ff7ed76a
|
Shell
|
lunarcity7/frontier
|
/daemon.sh
|
UTF-8
| 2,893
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/sh
set -ueo pipefail
config()
{
json="$1"
ip=`echo "$json" | jq -r '.ip'`
port=`echo "$json" | jq -r '.port'`
domains=`echo "$json" | jq -r '.domains'`
if [ "$ip" = "null" ]; then
echo "ERROR: duplicate domain specified for $domains" 1>&2
return
fi
if echo "$json" | jq -er '.tags' > /dev/null; then
tags=`echo "$json" | jq -r '.tags'`
else
tags=""
fi
if echo $tags | grep -q '\bredir2www\b'; then
baredomains="$domains"
if echo $baredomains | grep -q 'www\.'; then
echo "ERROR: www domain specification not supported when tag redir2www is specified: $baredomains" 1>&2
return
fi
# prefix all domains with www.
domains=`echo $domains | sed 's/^/www./;s/, /, www./g'`
fi
cat <<EOF
$domains {
encode gzip
reverse_proxy * $ip:$port
EOF
if echo $tags | grep -q '\blogin\b'; then
cat <<EOF
basicauth {
$FRONTIER_USER $FRONTIER_PASS
}
EOF
fi
echo '}'
echo
if echo $tags | grep -q '\bredir2www\b'; then
echo "$baredomains" | tr ', ' '\n' | grep '[a-z]' | while read baredomain; do
cat <<EOF
$baredomain {
redir https://www.$baredomain
}
EOF
done
fi
}
get_config_from_docker_socket()
{
curl -s --unix-socket "$socket_file" --header 'Accept: application/json' \
'http://localhost/containers/json?filters=\{"status":\["running"\],"label":\["frontier.domains"\]\}' | \
jq -c '.[] | select(.Labels | has("frontier.domains") and has("frontier.port")) | { "ip": .NetworkSettings.Networks.'$network_name'.IPAddress, "port": .Labels["frontier.port"], "domains": .Labels["frontier.domains"], "tags": .Labels["frontier.tags"] }' | \
while read json; do
config "$json"
done
}
get_config_from_rancher()
{
curl -s --header 'Accept: application/json' http://rancher-metadata/2016-07-29/services | \
jq -r '.[].containers[]? | select(.labels | has("frontier.domains") and has("frontier.port")) | { "ip": .ips[0], "port": .labels["frontier.port"], "domains": .labels["frontier.domains"], "tags": .labels["frontier.tags"] }' | \
while read l; do
config "$json"
done
}
get_config()
{
cat <<GLOBAL_CFG
{
http_port 80
https_port 443
email $email
servers {
protocol {
strict_sni_host
}
}
storage file_system {
root /state
}
}
GLOBAL_CFG
case $data_src in
"docker-socket")
get_config_from_docker_socket
;;
"rancher")
get_config_from_rancher
;;
*)
error "unrecognized data source - $data_src"
esac
}
current_cfg="`get_config`"
echo "$current_cfg" > Caddyfile
export HOME=/tmp
/caddy run &
pid=$!
while :; do
ps -o pid | grep -q "^ *$pid$" || break
next_cfg="`get_config`"
if [ ! "$current_cfg" = "$next_cfg" ]; then
current_cfg="$next_cfg"
echo "Reloading with new config:"
echo "$current_cfg"
echo "$current_cfg" > Caddyfile
/caddy reload
fi
sleep 10
done
wait $pid
caddy_exit=$?
echo "Caddy exited with code $caddy_exit"
exit $caddy_exit
| true
|
81c4c6128929c25b844e0006fc9975780612db98
|
Shell
|
OpenModelica/OMLibraries
|
/check-latest.sh
|
UTF-8
| 888
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
SVNOPTS="--non-interactive --username anonymous"
ROOT=`svn info $SVNOPTS --xml "$1" | xpath -q -e '/info/entry/repository/root/text()'`
URL=`svn info $SVNOPTS --xml "$1" | xpath -q -e '/info/entry/url/text()'`
# The following is not used because it only checks the revision of the checked out version - we want the repository revision...
# `svn info $SVNOPTS --xml "$1" | xpath -q -e '/info/entry/commit/@revision' | grep -o "[0-9]*"`
CURREV=`cat "$1.rev"`
#if svn info $SVNOPTS --xml "$ROOT" >& /dev/null; then
# URL=$ROOT
#fi
REMOTEREV=`svn info $SVNOPTS --xml "$URL" | xpath -q -e '/info/entry/commit/@revision' | grep -o "[0-9]*"`
if test "$CURREV" -lt "$REMOTEREV"; then
echo $1 uses $CURREV but $REMOTEREV is available. Changed paths include `svn log -qv -r$CURREV:$REMOTEREV $URL | egrep -o "(/(tags|branches)/[^/]*/|/trunk/)" | sed "s, (from /,/," | sort -u`
fi
| true
|
5aaf067cff9c375570766170a3d16c2cda6c97f0
|
Shell
|
saevarom/docker-wagtail-develop
|
/setup.sh
|
UTF-8
| 1,299
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Fail if any command fails.
set -e
if [ ! -d "bakerydemo" ]; then
git clone https://github.com/wagtail/bakerydemo.git
else
echo Directory bakerydemo already exists, skipping...
fi
if [ ! -d "wagtail" ]; then
git clone https://github.com/wagtail/wagtail.git
else
echo Directory wagtail already exists, skipping...
fi
mkdir -p libs
if [ ! -d "libs/django-modelcluster" ]; then
git clone https://github.com/wagtail/django-modelcluster.git libs/django-modelcluster
else
echo Directory libs/django-modelcluster already exists, skipping...
fi
if [ ! -d "libs/Willow" ]; then
git clone https://github.com/wagtail/Willow.git libs/Willow
else
echo Directory libs/Willow already exists, skipping...
fi
# Set up bakerydemo to use the Postgres database in the sister container
if [ ! -f bakerydemo/bakerydemo/settings/local.py ]; then
echo "Creating local settings file"
cp bakerydemo/bakerydemo/settings/local.py.docker-compose-example bakerydemo/bakerydemo/settings/local.py
fi
# Create a blank .env file in bakerydemo to keep its settings files from complaining
if [ ! -f bakerydemo/.env ]; then
echo "Creating file for local environment variables"
echo "DJANGO_SETTINGS_MODULE=bakerydemo.settings.local" > bakerydemo/.env
fi
| true
|
59f76f2e556125ae2f5818935821515222c2c011
|
Shell
|
rponte/qconsp2012_hibernate_efetivo
|
/configure_mysql-server.sh
|
UTF-8
| 1,703
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
###############################
# MySQL Server dependencies #
###############################
# The latest source code can be found at https://gist.github.com/rponte/7561856
set -e # Exit script immediately on first error.
#set -x # Print commands and their arguments as they are executed.
# Check if MySQL environment is already installed
RUN_ONCE_FLAG=~/.mysql_env_build_time
MYSQL_PASSWORD="root"
if [ -e "$RUN_ONCE_FLAG" ]; then
echo "MySQL Server environment is already installed."
exit 0
fi
# Update Ubuntu package index
sudo apt-get update -y
# Installs MySQL 5.5
echo "mysql-server-5.5 mysql-server/root_password password $MYSQL_PASSWORD" | sudo debconf-set-selections
echo "mysql-server-5.5 mysql-server/root_password_again password $MYSQL_PASSWORD" | sudo debconf-set-selections
sudo apt-get install -y mysql-server-5.5 mysql-client
# Configures MySQL
sudo sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf
sudo sed -i '/\[mysqld\]/a\lower_case_table_names=1' /etc/mysql/my.cnf
echo "MySQL Password set to '${MYSQL_PASSWORD}'. Remember to delete ~/.mysql.passwd" | tee ~/.mysql.passwd;
mysql -uroot -p$MYSQL_PASSWORD -e "GRANT ALL ON *.* TO root@'%' IDENTIFIED BY '$MYSQL_PASSWORD'; FLUSH PRIVILEGES;";
mysql -uroot -p$MYSQL_PASSWORD -e "CREATE SCHEMA qconsp DEFAULT CHARACTER SET utf8";
sudo service mysql restart
# Installs basic dependencies
sudo apt-get install -y unzip git curl
# Configures prompt color
sed -i 's/#force_color_prompt=yes/force_color_prompt=yes/g' ~/.bashrc
echo 'source ~/.bashrc' >> ~/.bash_profile
source ~/.bash_profile
# Cleaning unneeded packages
sudo apt-get autoremove -y
sudo apt-get clean
# sets "run once" flag
date > $RUN_ONCE_FLAG
| true
|
761d9ad3c085e60dcd0f73bda465b5b086c0e5e3
|
Shell
|
shivansh/parallel-image-filtering
|
/test.sh
|
UTF-8
| 384
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This script compares the output of the convoluteizing program against the
# expected output.
i=1
while [[ $i -lt 4 ]]; do
rm -f "my_output$i"
cat "testcases/sample_input$i" | mpirun -n $1 -stdin all ./convolute > "my_output$i"
git --no-pager diff --no-index --color-words "testcases/sample_output$i" "my_output$i"
rm -f "my_output$i"
((i++))
done
| true
|
fca9ac61c249d1143010928dcf75c82c173d6c16
|
Shell
|
jamsyoung/barebones-webpack-hmr-express-react
|
/.scripts/colors.sh
|
UTF-8
| 342
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2034
black="$(tput setaf 0)"
red="$(tput setaf 1)"
green="$(tput setaf 2)"
yellow="$(tput setaf 3)"
blue="$(tput setaf 4)"
magenta="$(tput setaf 5)"
cyan="$(tput setaf 6)"
white="$(tput setaf 7)"
grey="$(tput setaf 8)"
reset="$(tput sgr0)"
good_mark="${green}✔︎${reset}"
bad_mark="${red}𝗫${reset}"
| true
|
d96fbfc77727e1b657a6e7ccad59effd190bb1b6
|
Shell
|
xuyinhao/lgpbenchmark
|
/hiveBench/bin/create/3-1
|
UTF-8
| 491
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#3-1 创建一个view
# 新建一个表创建成功后,导入数据到该表中,新建视图调用该表,
#查看该视图显示查询结果是否和刚刚创建的表查询显示一致
tableName="hive_view_tb_3_1"
preTableName="hive_view_base_3_1"
# pre table
preTable "${preTableName}"
${exec}cmd "create view ${tableName} as select * from $preTableName ;"
ret=`checkExistTb "${tableName}" "view"`
if [ $ret -eq 0 ];then
compareTableValue $preTableName $tableName
fi
| true
|
9d590dbe4ab841eb5fcd2bfd1aef21b0339205b8
|
Shell
|
mattrix27/moos-ivp-oyster
|
/.svn/pristine/9d/9d590dbe4ab841eb5fcd2bfd1aef21b0339205b8.svn-base
|
UTF-8
| 4,908
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash -e
#-------------------------------------------------------
# Part 1: Check for and handle command-line arguments
#-------------------------------------------------------
TIME_WARP=1
JUST_MAKE="no"
AMT=1
GOOD_GUYS="yes"
BAD_GUYS="yes"
VTEAM1="red"
VTEAM2="blue"
for ARGI; do
if [ "${ARGI}" = "--help" -o "${ARGI}" = "-h" ] ; then
printf "%s [SWITCHES] [time_warp] \n" $0
printf " --just_make, -j \n"
printf " --help, -h \n"
exit 0;
elif [ "${ARGI//[^0-9]/}" = "$ARGI" -a "$TIME_WARP" = 1 ]; then
TIME_WARP=$ARGI
elif [ "${ARGI}" = "--just_build" -o "${ARGI}" = "-j" ] ; then
JUST_MAKE="yes"
elif [ "${ARGI}" = "--bad_guys_no" -o "${ARGI}" = "-b" ] ; then
BAD_GUYS="no"
elif [ "${ARGI}" = "--good_guys_no" -o "${ARGI}" = "-g" ] ; then
GOOD_GUYS="no"
elif [ "${ARGI:0:6}" = "--amt=" ] ; then
AMT="${ARGI#--amt=*}"
else
printf "Bad Argument: %s \n" $ARGI
exit 0
fi
done
# Ensure AMT is in the range of [1,26]
if [ $AMT -gt 26 ] ; then
AMT=20
fi
if [ $AMT -lt 1 ] ; then
AMT=1
fi
#-------------------------------------------------------
# Part 1: Create the Shoreside MOOS file
#-------------------------------------------------------
SHORE_LISTEN="9300"
nsplug meta_shoreside.moos targ_shoreside.moos -f WARP=$TIME_WARP \
SNAME="shoreside" SHARE_LISTEN=$SHORE_LISTEN SPORT="9000" \
VTEAM1=$VTEAM1 VTEAM2=$VTEAM2
if [ ! -e targ_shoreside.moos ]; then echo "no targ_shoreside.moos"; exit; fi
#-------------------------------------------------------
# Part 2: Create the GoodGuy .moos and .bhv files.
#-------------------------------------------------------
VNAME="henry"
START_POS="0,0,180"
nsplug meta_vehicle.moos targ_henry.moos -f WARP=$TIME_WARP \
VNAME=$VNAME SHARE_LISTEN="9301" \
VPORT="9001" SHORE_LISTEN=$SHORE_LISTEN \
VTEAM=$VTEAM1 START_POS=$START_POS
nsplug meta_vehicle.bhv targ_henry.bhv -f VNAME=$VNAME \
START_POS=$START_POS
if [ ! -e targ_henry.moos ]; then echo "no targ_henry.moos"; exit; fi
if [ ! -e targ_henry.bhv ]; then echo "no targ_henry.bhv "; exit; fi
#-------------------------------------------------------
# Part 3: Create the BadGuy .moos and .bhv files.
#-------------------------------------------------------
VNAMES=( apia baku cary doha elko fahy galt hays iola juba kiev lima mesa
nuuk oslo pace quay rome sako troy ubly vimy waco xian york zahl )
STARTX=( 0 40 60 80 100 120 140 5 25 45 65 85 105 125 145 10 50 90 130
15 55 95 135 140 145 150 155 )
for INDEX in `seq 0 $(($AMT-1))`;
do
VNAME=${VNAMES[$INDEX]}
VPOSX=${STARTX[$INDEX]}
VPORT=`expr $INDEX + 9400`
LPORT=`expr $INDEX + 9500`
START_POS=$VPOSX",-80,180"
echo "Vehicle:" $VNAME
echo "Index:" $INDEX "Port:" $VPORT "POS:" $START_POS
nsplug meta_tagger.moos targ_$VNAME.moos -f WARP=$TIME_WARP \
VNAME=$VNAME \
VPORT=$VPORT \
VTEAM=$VTEAM2 \
SHARE_LISTEN=$LPORT \
SHORE_LISTEN=$SHORE_LISTEN \
START_POS=$START_POS
nsplug meta_tagger.bhv targ_$VNAME.bhv -f VNAME=$VNAME \
START_POS=$START_POS
if [ ! -e targ_$VNAME.moos ]; then echo "no targ_$VNAME.moos"; exit; fi
if [ ! -e targ_$VNAME.bhv ]; then echo "no targ_$VNAME.bhv "; exit; fi
done
#-------------------------------------------------------
# Part 4: Possibly exit now if we're just building targ files
#-------------------------------------------------------
if [ ${JUST_MAKE} = "yes" ] ; then
printf "targ files built. Nothing launched.\n"
exit 0
fi
if [ ${BAD_GUYS} = "no" -a ${GOOD_GUYS} = "no"] ; then
printf "targ files built. Nothing launched.\n"
exit 0
fi
#-------------------------------------------------------
# Part 5: Launch the Shoreside
#-------------------------------------------------------
printf "Launching $SNAME MOOS Community (WARP=%s) \n" $TIME_WARP
pAntler targ_shoreside.moos >& /dev/null &
printf "Done Launching Shoreside \n"
#-------------------------------------------------------
# Part 6: Launch the GoodGuy processes
#-------------------------------------------------------
if [ ${GOOD_GUYS} = "yes" ] ; then
printf "Launching $VNAME MOOS Community (WARP=%s) \n" $TIME_WARP
pAntler targ_henry.moos >& /dev/null &
printf "Done Launching Good Guys \n"
fi
#-------------------------------------------------------
# Part 7: Launch the BadGuy processes
#-------------------------------------------------------
for INDEX in `seq 0 $(($AMT-1))`;
do
VNAME=${VNAMES[$INDEX]}
printf "Launching $VNAME MOOS Community (WARP=%s) \n" $TIME_WARP
pAntler targ_$VNAME.moos >& /dev/null &
sleep 0.1
done
printf "Done Launching Bad Guys \n"
uMAC targ_shoreside.moos
printf "Killing all processes ... \n"
mykill
printf "Done killing processes. \n"
| true
|
cade8573ec127a7bee3567c5195a4b5430c27713
|
Shell
|
ahmedt26/CS1XA3
|
/Project01/project_analyze.sh
|
UTF-8
| 7,531
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
cd ..
# Fancy script startup. It's nothing functional. Trying to make it seem more like a UI.
echo ' '
echo '====================================================='
echo ' ...Initializing GPPE... '
echo '====================================================='
echo ' '
sleep 0.5
echo ' ............... '
sleep 0.5
echo ' .......Epic Buildup........ '
sleep 0.5
echo ' ............... '
sleep 0.5
echo ' '
echo '====================================================='
echo ' Welcome to the General Purpose Project Helper! '
sleep 0.5
echo ' By: Tahseen Ahmed, ahmedt26 '
echo '====================================================='
sleep 0.5
echo ' '
# I made this UI to be fairly intuitive. The only reason for
# someone to read the README is for detailed instructions
# on what each of the features do, and not for UI-related help.
# Regardless, I've iven instructions on how to use the UI
# for time travellers.
echo '====================================================='
echo ' If this is your first time touching a computer... '
sleep 0.5
echo ' Nice time travel, read the README. '
echo '====================================================='
sleep 0.5
echo ' '
selectScreen() { # Prints a menu which displays available actions.
echo '====================================================='
echo 'Select the action that best suits your needs: '
sleep 0.20
echo 'Type in code word to execute command '
sleep 0.20
echo 'Create TODO Log - TODO '
sleep 0.20
echo 'Last Modded File - LMF '
sleep 0.20
echo 'File Type Count - FTC '
sleep 0.20
echo 'Super Secret Function - IAMTHESENATE '
sleep 0.20
echo 'End Script - BYEBYE '
echo '====================================================='
}
selectAction() { # Executes feature if correct code word is given, or asks to try again if given nothing or something else.
read -p 'Select a codeword: ' code
if [ $code = 'BYEBYE' ]; then
echo "You chose the codeword: $code"
byebye
elif [ $code = 'IAMTHESENATE' ]; then
echo "You chose the codeword: $code"
order66
elif [ $code = 'TODO' ]; then
echo "You chose the codeword: $code"
toDo
elif [ $code = 'FTC' ]; then
echo "You chose the codeword: $code"
fileTypeCount
echo ' I did not do this yet! :( Sorry. '
elif [ $code = 'LMF' ]; then
echo "You chose the codeword: $code"
lastModdedFile
elif [ $code -eq 0 ]; then
echo "You gave me no input! Try again."
selectAction
else
echo "You gave me gibberish or had a typo. Check caps and spelling."
selectAction
fi
}
toDo() { # Creates a log of lines with #TODO in them along with their respective filepaths.
echo ' '
echo '====================================================='
echo 'Creating a To Do Log File and finding TODO items...'
rm -f ./Project01/logs/todo.log
touch ./Project01/logs/todo.log
grep -r '#TODO' --exclude="project_analyze.sh" --exclude="README.md" --exclude="todo.log" . >> ./Project01/logs/todo.log
sleep 0.25
echo 'Log file compliled!'
echo ' '
echo '=====================================================' # Reads file to user for convienence.
echo ' Here is what I found: '
cat ./Project01/logs/todo.log
echo '====================================================='
echo ' '
selectScreen
selectAction
}
fileTypeCount() { # Counts the number of filetype extensions in the repository. WIP
echo ' Counting filetypes in repository...'
html=$(find . -name "*.html" | wc -l)
javascript=$(find . -name "*.js" | wc -l)
css=$(find . -name "*.css" | wc -l)
py=$(find . -name "*.py" | wc -l)
haskell=$(find . -name "*.hs" | wc -l)
bsh=$(find . -name "*.sh" | wc -l)
sleep 0.5 # The output: a fancy display of all the totals.
echo ' '
echo '====================================================='
echo ' Results '
sleep 0.20
echo "HTML: $html"
sleep 0.20
echo "JavaScript: $javascript"
sleep 0.20
echo "CSS: $css"
sleep 0.20
echo "Python: $py"
sleep 0.20
echo "Haskell: $haskell"
sleep 0.20
echo "Bash Script: $bsh"
sleep 0.20
echo ' '
echo 'Filetype Scan Complete!'
echo '====================================================='
echo ' '
selectScreen # Call selectScreen again to pick another ation.
selectAction
}
lastModdedFile() { # Finds all modified files within the last x minutes/hours/days in the current directory (project repo).
echo ' '
echo 'Tell me how far back you want to search for modified files, use integers please.'
sleep 1.0
echo 'Use M for minutes, H for hours and D for days.'
sleep 1.0
read -p 'M, H or D? ' unit # Read user's time frame.
read -p 'What interval? ' interval
if [ $unit = 'M' ] ; then
echo ' '
echo '====================================================='
echo "Searching for files modified within the last $interval minute(s)."
find . -mmin -$interval #-mmin takes the input as minutes, and the - before the number means less than or equal to.
echo ' '
echo 'All done!'
echo '====================================================='
echo ' '
selectScreen
selectAction
fi
if [ $unit = 'H' ] ; then
echo ' '
echo '====================================================='
echo "Searching for files modified within the last $interval hour(s)."
find . -mmin -$(($interval * 60))
echo ' '
echo 'All done!'
echo '====================================================='
echo ' '
selectScreen
selectAction
fi
if [ $unit = 'D' ] ; then
echo ' '
echo '====================================================='
echo "Searching for files modified within the last $interval day(s)."
find . -mtime -$interval # -mtime takes the input as number of days.
echo ' '
echo 'All done!'
echo '====================================================='
echo ' '
selectScreen
selectAction
fi
}
order66() { # I am the senate. Prints the Tradedy of Darth Plaeuis Copypasta.
echo ' '
echo '====================================================='
echo 'Did you ever hear the tragedy of Darth Plagueis The Wise?'
sleep 1
echo 'I thought not.'
sleep 1
echo 'Its not a story the Jedi would tell you. Its a Sith legend. '
sleep 1
echo 'Darth Plagueis was a Dark Lord of the Sith,'
sleep 1
echo 'so powerful and so wise he could use the Force to influence the midichlorians to create life '
sleep 1
echo 'He had such a knowledge of the dark side that he could even keep the ones he cared about from dying. '
sleep 1
echo 'The dark side of the Force is a pathway to many abilities some consider to be unnatural. '
sleep 1
echo 'He became so powerful the only thing he was afraid of was losing his power, which eventually, of course, he did. '
sleep 1
echo 'Unfortunately, he taught his apprentice everything he knew, then his apprentice killed him in his sleep. '
sleep 1
echo '...'
sleep 1
echo 'Ironic.'
sleep 1
echo 'He could save others from death, but not himself.'
echo ' '
echo '====================================================='
echo ' '
sleep 1
selectScreen
selectAction
}
byebye() { # Exits script.
echo "Goodbye!"
sleep 0.25
exit 0;
}
# Actual initialization of program functions. Will recursively go through actions until BYEBYE code used.
selectScreen
selectAction
| true
|
802829c8f248ba394fc746ff3a568b6f21bf4dfe
|
Shell
|
gurkamal/Continuous-Delivery-example
|
/scripts/multitail.sh
|
UTF-8
| 353
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
year=$(date +"%Y")
month=$(date +"%m")
day=$(date +"%d")
echo Tailing logs for ${year}_${month}_${day}
multitail \
-l "ssh bekkopen@node2.morisbak.net tail -1000f /server/bekkopen/logs/stderrout.${year}_${month}_${day}.log" \
-l "ssh bekkopen@node3.morisbak.net tail -1000f /server/bekkopen/logs/stderrout.${year}_${month}_${day}.log"
| true
|
a86dfb1ebb34fdbebfb93edef12866273ba9ee6b
|
Shell
|
leechau926/rmrb
|
/rmrbd.sh
|
UTF-8
| 513
| 3.015625
| 3
|
[] |
no_license
|
#! /bin/bash
# date definition
def=$2
year=${def:0:4}
month=${def:4:2}
day=${def:6:2}
# clear download links file
echo "" > down.txt
# create download links and write them to file
for i in `seq -w -s ' ' 1 $1`
do
echo "http://paper.people.com.cn/rmrb/page/${year}-${month}/${day}/$i/rmrb${year}${month}${day}$i.pdf" >> down.txt
done
# download files
wget -i down.txt -P ~/rmrb -a ./rmrb.log
# AFTER: apt install pdftk
pdftk ./*.pdf cat output ~/rmrb${year}${month}${day}.pdf
# Delete temp files
rm *.pdf
| true
|
af42cf6b22eb8a72e2afa8cf235527af19272f5a
|
Shell
|
rezroo/scan
|
/container_utils/run-cis-scan.sh
|
UTF-8
| 495
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
logfile=docker-bench-security.sh
while getopts ":l:" opt; do
case "${opt}" in
l)
logfile=${OPTARG}
;;
:)
echo "Option $opt and -$OPTARG requires an argument." >&2
exit 1
;;
\?)
#echo "Invalid option $opt and $OPTARG"
;;
esac
done
if [ -z ${logfile+x} ]; then
logfile=/results
./docker-bench-security.sh -l ${logfile} $@
else
./docker-bench-security.sh $@
fi
| true
|
79e4903c279d0134f1dfddf26eb03ffd30eea271
|
Shell
|
brendanfitz/python-airflow-portfolio-data-pipeline
|
/bash_scripts/airflow_setup.sh
|
UTF-8
| 1,017
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
echo $'Did you run airflow_bashrc_updates.sh and then re-source your bashrc?\nEnter "yes" or "no":'
read answer
if [ $answer == 'yes' ]
then
sudo apt-get update
sudo apt-get install build-essential
pip install \
apache-airflow[postgres,aws]==1.10.12 \
--constraint "https://raw.githubusercontent.com/apache/airflow/constraints-1.10.12/constraints-3.7.txt"
sudo apt-get install -y --no-install-recommends \
freetds-bin \
krb5-user \
ldap-utils \
# libffi6 \
libsasl2-2 \
libsasl2-modules \
libssl1.1 \
locales \
lsb-release \
sasl2-bin \
sqlite3 \
unixodbc
# correction for errors installing libffi6 on ubuntu 20.04
wget http://mirrors.kernel.org/ubuntu/pool/main/libf/libffi/libffi6_3.2.1-8_amd64.deb
sudo apt-get install ./libffi6_3.2.1-8_amd64.deb
rm ./libffi6_3.2.1-8_amd64.deb
echo $'\nInstallation Complete\n'
else
echo $'\nPlease do so\n'
fi
| true
|
c3d9424de042e66b17c97cb7993f97aeeae36ae4
|
Shell
|
ca-borja/lz77ppm
|
/bin/test-pipe.sh
|
UTF-8
| 242
| 2.953125
| 3
|
[
"Unlicense"
] |
permissive
|
./lz77ppm "$1" | ./lz77ppm -d | diff "$1" -
if [ $? = 0 ]; then
echo -e "\033[1;32mOK, the decompressed file is equal to the original.\033[0m"
else
echo -e "\033[1;31mError! The decompressed file differs from the original.\033[0m"
fi
| true
|
813bf5a78e8901d6f1fb1324a98398f73a84a825
|
Shell
|
erochest/slab-blog-posts
|
/bin/mdpost2html.sh
|
UTF-8
| 143
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
SRC="$1"
DEST=${SRC%.md}.html
pandoc --smart -f markdown -t html5 $SRC | ./bin/fix_code.py > $DEST
cat $DEST | pbcopy
open $DEST
| true
|
fc44a01d3d0dbb4d6e37fc465bce815ddeb0ccc3
|
Shell
|
Arin-Er/Systeembeheer
|
/scripts/firewallscript
|
UTF-8
| 363
| 3.140625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
start () {
iptables-restore < /etc/iptables/regelsIPV4
ip6tables-restore < /etc/iptables/regelsIPV6
}
stop () {
iptables-save > /etc/iptables/regelsIPV4
ip6tables-save > /etc/iptables/regelsIPV6
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
esac
exit 0
| true
|
30fc1cba0baea01b8ceae793b65321c74b2b2894
|
Shell
|
danielpeach/external-accounts
|
/build-tools/version.sh
|
UTF-8
| 3,031
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------------------------------------------------------
# Calculates the next version based on git tags and current branch.
#
# Examples:
# * Snapshot version: 0.1.0-snapshot.[uncommitted].chore.foo.bar.test.050f9cd
# * RC version: 0.1.0-rc.9
# * Release version: 0.1.0
#
# Step logic:
# * On release branch, only patch version is stepped depending on the latest git tag matching the branch name
# * On all other branches, if latest tag is not rc, step minor and set patch=0. Otherwise, step rc number
#------------------------------------------------------------------------------------------------------------------
VERSION_TYPE=${VERSION_TYPE:-snapshot}
BRANCH=${BRANCH:-$(git rev-parse --abbrev-ref HEAD)}
[[ ! $VERSION_TYPE =~ snapshot|rc|release ]] && echo "Usage: $(basename "$0"). Optional environment variables: VERSION_TYPE=snapshot|rc|release, BRANCH" && exit 1
function first_version() {
case $BRANCH in
release-*)
tmp=$(echo "$BRANCH" | cut -d'-' -f 2)
read -r br_major br_minor br_patch <<<"${tmp//./ }"
current_branch_version="${br_major}.${br_minor}.0-rc.0"
;;
*)
current_branch_version=0.1.0-rc.0
;;
esac
}
function get_current_branch_version() {
case $BRANCH in
release-*)
tmp=$(echo "$BRANCH" | cut -d'-' -f 2)
read -r br_major br_minor br_patch <<<"${tmp//./ }"
current_branch_version=$(git tag --sort=-v:refname | grep "v$br_major.$br_minor" | head -1 | sed 's|v||g')
;;
*)
current_branch_version=$(git tag --sort=-v:refname | head -1 | sed 's|v||g')
;;
esac
if [[ -z $current_branch_version ]]; then
first_version
fi
}
function split_in_version_parts() {
full_version=$1
read -r major minor patch rc <<<"${full_version//./ }"
patch=$(echo "$patch" | sed 's|[^0-9]*||g') # Remove "-rc" from patch part
}
function step_version() {
SOLID_VERSION_RELEASED=$([[ -n $(git tag -l "v$major.$minor.$patch") ]] && echo 1 || echo 0)
case $BRANCH in
release-*)
if [[ $SOLID_VERSION_RELEASED = 1 ]]; then
((patch++))
rc=1
else
((rc++))
fi
;;
*)
if [[ $SOLID_VERSION_RELEASED = 1 ]]; then
((minor++))
patch=0
rc=1
else
((rc++))
fi
;;
esac
}
function format_version() {
case $VERSION_TYPE in
snapshot)
if [ "x$(git status --porcelain)" != "x" ]; then u=".uncommitted"; fi
br=$(echo ".$BRANCH" | sed 's|[-/_]|.|g')
commit=$(git rev-parse --short HEAD)
output_version="${major}.${minor}.${patch}-snapshot$u$br.$commit"
;;
rc)
output_version="${major}.${minor}.${patch}-rc.${rc}"
;;
release)
output_version="${major}.${minor}.${patch}"
;;
esac
}
get_current_branch_version
split_in_version_parts "$current_branch_version"
#echo "Current version: major: $major, minor: $minor, patch: $patch, rc: $rc"
step_version
#echo "New version: major: $major, minor: $minor, patch: $patch, rc: $rc"
format_version
echo -n "$output_version"
| true
|
0a2a14e6ea2689d74b4768aa69c0c547e029d01c
|
Shell
|
sleavitt/solr-ecs
|
/6.0/wrapper.sh
|
UTF-8
| 450
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
: ${PUBLIC_PORT:="80"}
: ${PUBLIC_HOST:=`wget -qO- http://169.254.169.254/latest/meta-data/hostname`}
: ${PLACEMENT:=`wget -qO- http://169.254.169.254/latest/placement/availability-zone`}
chown -R $SOLR_USER:$SOLR_USER $SOLR_HOME
cp -n /opt/solr/server/solr/solr.xml $SOLR_HOME
sudo -E -u $SOLR_USER /opt/solr/bin/solr start -f -h $PUBLIC_HOST -c -z $ZOOKEEPER_HOSTS -s $SOLR_HOME -a "-DhostPort=$PUBLIC_PORT -Dplacement=$PLACEMENT"$@
| true
|
60c87bb3f1e18f50df72299f339d9ea6ecdb22f1
|
Shell
|
eekes/blog-vagrant-1
|
/provision/components/apache.sh
|
UTF-8
| 406
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
apt-get update
apt-get install -y apache2
# Copy the vhost config file
cp /var/www/provision/config/apache/vhosts/mytestproject.local.com.conf /etc/apache2/sites-available/mytestproject.local.com.conf
# Disable the default vhost file
a2dissite 000-default
# Enable our custom vhost file
a2ensite mytestproject.local.com.conf
# Restart for the changes to take effect
service apache2 restart
| true
|
3deb62c51092ae4452f11f00e310f8e87379bf1c
|
Shell
|
taktik/ozone-components
|
/packages/demo/ozone-components-demo/docker/jsonpatch.sh
|
UTF-8
| 868
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
JSON_FILES=$@
# Process all files
for JSON_FILE in $JSON_FILES; do
if [ -r $JSON_FILE ]; then
# Prepare JQ command
JQ_CMD=""
# Process all keys
KEYS=`jq -r -c 'path(..)|[.[]|tostring]|join("/")' $JSON_FILE`
for KEY in $KEYS; do
# Build environment key
ENV_KEY=${KEY^^}
ENV_KEY=${ENV_KEY//\//_}
# Check if environment variable is defined
if [ -v "$ENV_KEY" ]; then
# Build jq path
JQ_PATH="."
IFS='/' read -r -a PARTS <<< $KEY
for PART in "${PARTS[@]}"; do
JQ_PATH+="[\"$PART\"]"
done
# Complete JQ command
[ "$JQ_CMD" != "" ] && JQ_CMD+=" | "
JQ_CMD+="${JQ_PATH} = env.$ENV_KEY"
fi
done
# Execute JQ command
JQ_CMD="jq -M '${JQ_CMD:-.}' $JSON_FILE"
eval $JQ_CMD > ${JSON_FILE}.tmp
cat ${JSON_FILE}.tmp > $JSON_FILE
rm ${JSON_FILE}.tmp
fi
done
| true
|
eff09dc7a37724e423dfa1af3f1ede50f1b6b354
|
Shell
|
SerenityOS/serenity
|
/Ports/cfunge/package.sh
|
UTF-8
| 452
| 2.6875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env -S bash ../.port_include.sh
port='cfunge'
version='2bc4fb27ade2a816ca9a90a6d9f6958111123fa9'
useconfigure='true'
files=(
"https://github.com/VorpalBlade/cfunge/archive/${version}.zip 364994a890ed1083684956db576a2a5cfb94b3117bae868910d6a75111033f55"
)
configure() {
run cmake -B build "${configopts[@]}"
}
build() {
run make -C build "${makeopts[@]}"
}
install() {
run cp build/cfunge "${SERENITY_INSTALL_ROOT}/bin"
}
| true
|
2267d3fb8f42e65aedf2952824981287ffc7730f
|
Shell
|
CDLUC3/merritt-docker
|
/bin/it_build.sh
|
UTF-8
| 1,436
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#set -x
# get dir of this script
START_DIR=$(pwd)
SCRIPT_HOME=$(dirname $0)
DOCKER_ENV_FILE=$SCRIPT_HOME/docker_environment.sh
# source env vars
echo "Setting up docker environment"
[ -f "$DOCKER_ENV_FILE" ] && . "$DOCKER_ENV_FILE" || echo "file $DOCKER_ENV_FILE not found"
# cd into mrt-services
REPOS_DIR="$SCRIPT_HOME/../mrt-inttest-services"
cd $REPOS_DIR
echo "Setup ECS login"
echo "ECR_REGISTRY: $ECR_REGISTRY"
aws ecr get-login-password --region us-west-2 | \
docker login --username AWS \
--password-stdin ${ECR_REGISTRY} || exit 1
echo
echo "Building mock-merritt-it"
docker-compose -f mock-merritt-it/docker-compose.yml build --pull
docker-compose -f mock-merritt-it/docker-compose.yml push
echo
echo "Building mrt-it-database"
docker-compose -f mrt-it-database/docker-compose.yml build --pull
docker-compose -f mrt-it-database/docker-compose.yml push
echo "Building mrt-it-database-audit-replic"
docker-compose -f mrt-it-database/docker-compose-audit-replic.yml build --pull
docker-compose -f mrt-it-database/docker-compose-audit-replic.yml push
echo
echo "Building mrt-minio-it"
docker-compose -f mrt-minio-it/docker-compose.yml build --pull
docker-compose -f mrt-minio-it/docker-compose.yml push
echo
echo "Building mrt-minio-with-content-it"
docker-compose -f mrt-minio-it-with-content/docker-compose.yml build --pull
docker-compose -f mrt-minio-it-with-content/docker-compose.yml push
| true
|
11cdeb8ac023cc53e70d7a468b387d7f20b49c5a
|
Shell
|
linux-on-ibm-z/scripts
|
/TensorflowServing/2.2.0/build_tensorflow_serving.sh
|
UTF-8
| 13,015
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# © Copyright IBM Corporation 2020.
# LICENSE: Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
#
# Instructions:
# Download build script: wget https://raw.githubusercontent.com/linux-on-ibm-z/scripts/master/TensorflowServing/2.2.0/build_tensorflow_serving.sh
# Execute build script: bash build_tensorflow_serving.sh (provide -h for help)
#
set -e -o pipefail
PACKAGE_NAME="tensorflow-serving"
PACKAGE_VERSION="2.2.0"
SOURCE_ROOT="$(pwd)"
USER="$(whoami)"
FORCE="false"
TESTS="false"
LOG_FILE="${SOURCE_ROOT}/logs/${PACKAGE_NAME}-${PACKAGE_VERSION}-$(date +"%F-%T").log"
trap cleanup 0 1 2 ERR
#Check if directory exists
if [ ! -d "$SOURCE_ROOT/logs/" ]; then
mkdir -p "$SOURCE_ROOT/logs/"
fi
if [ -f "/etc/os-release" ]; then
source "/etc/os-release"
fi
function prepare() {
if command -v "sudo" >/dev/null; then
printf -- 'Sudo : Yes\n' >>"$LOG_FILE"
else
printf -- 'Sudo : No \n' >>"$LOG_FILE"
printf -- 'You can install the same from installing sudo from repository using apt, yum or zypper based on your distro. \n'
exit 1
fi
if [[ "$FORCE" == "true" ]]; then
printf -- 'Force attribute provided hence continuing with install without confirmation message\n' |& tee -a "$LOG_FILE"
else
# Ask user for prerequisite installation
printf -- "\nAs part of the installation, dependencies would be installed/upgraded. \n"
while true; do
read -r -p "Do you want to continue (y/n) ? : " yn
case $yn in
[Yy]*)
printf -- 'User responded with Yes. \n' >> "$LOG_FILE"
break
;;
[Nn]*) exit ;;
*) echo "Please provide confirmation to proceed." ;;
esac
done
fi
}
function cleanup() {
# Remove artifacts
rm -rf $SOURCE_ROOT/bazel/bazel-2.0.0-dist.zip
printf -- "Cleaned up the artifacts\n" | tee -a "$LOG_FILE"
}
function configureAndInstall() {
printf -- 'Configuration and Installation started \n'
printf -- "Create symlink for python 3 only environment\n" |& tee -a "$LOG_FILE"
sudo ln -sf /usr/bin/python3 /usr/bin/python || true
#Install grpcio
printf -- "\nInstalling grpcio. . . \n"
export GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=True
sudo -E pip3 install grpcio |& tee -a "${LOG_FILE}"
# Build Bazel
printf -- '\nBuilding Bazel..... \n'
cd $SOURCE_ROOT
mkdir bazel && cd bazel
wget https://github.com/bazelbuild/bazel/releases/download/2.0.0/bazel-2.0.0-dist.zip
unzip bazel-2.0.0-dist.zip
sudo chmod -R +w .
#Adding fixes and patches to the files
PATCH="https://raw.githubusercontent.com/linux-on-ibm-z/scripts/master/Tensorflow/2.2.0/patch"
curl -sSL $PATCH/patch1.diff | patch -p1 || echo "Error: Patch Bazel conditions/BUILD file"
curl -sSL $PATCH/patch2.diff | patch -Np0 --ignore-whitespace || echo "Error: Patch Bazel third_party/BUILD file"
sed -i "152s/-classpath/-J-Xms1g -J-Xmx1g -classpath/" scripts/bootstrap/compile.sh
cd $SOURCE_ROOT/bazel
env EXTRA_BAZEL_ARGS="--host_javabase=@local_jdk//:jdk" bash ./compile.sh
export PATH=$PATH:$SOURCE_ROOT/bazel/output/
echo $PATH
#Patch Bazel Tools
cd $SOURCE_ROOT/bazel
bazel --host_jvm_args="-Xms1024m" --host_jvm_args="-Xmx2048m" build --host_javabase="@local_jdk//:jdk" //:bazel-distfile
JTOOLS=$SOURCE_ROOT/remote_java_tools_linux
mkdir -p $JTOOLS && cd $JTOOLS
unzip $SOURCE_ROOT/bazel/derived/distdir/java_tools_javac11_linux-v7.0.zip
curl -sSL $PATCH/tools.diff | patch -p1 || echo "Error: Patch Bazel tools"
# Build TensorFlow
printf -- '\nDownload Tensorflow source code..... \n'
cd $SOURCE_ROOT
rm -rf tensorflow
git clone https://github.com/linux-on-ibm-z/tensorflow.git
cd tensorflow
git checkout v2.2.0-s390x
export PYTHON_BIN_PATH="/usr/bin/python3"
yes "" | ./configure || true
printf -- '\nBuilding Tensorflow..... \n'
bazel --host_jvm_args="-Xms1024m" --host_jvm_args="-Xmx2048m" build //tensorflow/tools/pip_package:build_pip_package
#Build and install TensorFlow wheel
printf -- '\nBuilding and installing Tensorflow wheel..... \n'
cd $SOURCE_ROOT/tensorflow
bazel-bin/tensorflow/tools/pip_package/build_pip_package $SOURCE_ROOT/tensorflow_wheel
sudo pip3 install $SOURCE_ROOT/tensorflow_wheel/tensorflow-2.2.0-cp*-linux_s390x.whl
#Install Boringssl
cd $SOURCE_ROOT
rm -rf boringssl
wget https://github.com/google/boringssl/archive/7f634429a04abc48e2eb041c81c5235816c96514.tar.gz
tar -zxvf 7f634429a04abc48e2eb041c81c5235816c96514.tar.gz
mv boringssl-7f634429a04abc48e2eb041c81c5235816c96514/ boringssl/
cd boringssl/
sed -i '/set(ARCH "ppc64le")/a \elseif (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "s390x")\n\ set(ARCH "s390x")' src/CMakeLists.txt
sed -i '/OPENSSL_PNACL/a \#elif defined(__s390x__)\n\#define OPENSSL_64_BIT' src/include/openssl/base.h
#Build Tensorflow serving
printf -- '\nDownload Tensorflow serving source code..... \n'
cd $SOURCE_ROOT
rm -rf serving
git clone https://github.com/tensorflow/serving
cd serving
git checkout 2.2.0
#Apply Patches
export PATCH_URL="https://raw.githubusercontent.com/linux-on-ibm-z/scripts/master/TensorflowServing/2.2.0/patch"
printf -- '\nPatching Tensorflow Serving..... \n'
wget -O tfs_patch.diff $PATCH_URL/tfs_patch.diff
sed -i "s?source_root?$SOURCE_ROOT?" tfs_patch.diff
git apply tfs_patch.diff
cd $SOURCE_ROOT/tensorflow
wget -O tf_patch.diff $PATCH_URL/tf_patch.diff
git apply tf_patch.diff
printf -- '\nBuilding Tensorflow Serving..... \n'
cd $SOURCE_ROOT/serving
bazel --host_jvm_args="-Xms1024m" --host_jvm_args="-Xmx2048m" build --color=yes --curses=yes --local_resources 5000,1.0,1.0 --host_javabase="@local_jdk//:jdk" --verbose_failures --output_filter=DONT_MATCH_ANYTHING -c opt tensorflow_serving/model_servers:tensorflow_model_server
bazel --host_jvm_args="-Xms1024m" --host_jvm_args="-Xmx2048m" build --verbose_failures //tensorflow_serving/tools/pip_package:build_pip_package
bazel-bin/tensorflow_serving/tools/pip_package/build_pip_package $SOURCE_ROOT/tfs
if [[ "$DISTRO" == "ubuntu-18.04" ]]; then
printf -- '\nInside Ubuntu 18.04..... \n'
sudo pip3 install --upgrade cython && sudo pip3 uninstall -y enum34
fi
sudo pip3 --no-cache-dir install --upgrade $SOURCE_ROOT/tfs/tensorflow_serving_api-*.whl
sudo cp $SOURCE_ROOT/serving/bazel-bin/tensorflow_serving/model_servers/tensorflow_model_server /usr/local/bin
#Creating tflite.model
printf -- '\nCreating and replacing default model.tflite..... \n'
sudo rm -rf /tmp/saved_model_half_plus_two*
sudo python $SOURCE_ROOT/serving/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two.py
sudo cp /tmp/saved_model_half_plus_two_tflite/model.tflite $SOURCE_ROOT/serving/tensorflow_serving/servables/tensorflow/testdata/saved_model_half_plus_two_tflite/00000123/
# Run Tests
runTest
#Cleanup
cleanup
printf -- "\n Installation of %s %s was successful \n\n" $PACKAGE_NAME $PACKAGE_VERSION
}
function runTest() {
set +e
if [[ "$TESTS" == "true" ]]; then
printf -- "TEST Flag is set , Continue with running test \n"
if [[ "$DISTRO" == "ubuntu-16.04" ]]; then
printf -- "Upgrade setuptools to resolve test failures with an error '_NamespacePath' object has no attribute 'sort' \n" |& tee -a "$LOG_FILE"
sudo pip3 install --upgrade setuptools
fi
cd $SOURCE_ROOT/serving
bazel --host_jvm_args="-Xms1024m" --host_jvm_args="-Xmx2048m" test --host_javabase="@local_jdk//:jdk" --test_tag_filters=-gpu,-benchmark-test -k --build_tests_only --test_output=errors --verbose_failures -c opt tensorflow_serving/...
printf -- "Tests completed. \n"
fi
set -e
}
function logDetails() {
printf -- '**************************** SYSTEM DETAILS *************************************************************\n' >>"$LOG_FILE"
if [ -f "/etc/os-release" ]; then
cat "/etc/os-release" >>"$LOG_FILE"
fi
cat /proc/version >>"$LOG_FILE"
printf -- '*********************************************************************************************************\n' >>"$LOG_FILE"
printf -- "Detected %s \n" "$PRETTY_NAME"
printf -- "Request details : PACKAGE NAME= %s , VERSION= %s \n" "$PACKAGE_NAME" "$PACKAGE_VERSION" |& tee -a "$LOG_FILE"
}
# Print the usage message
function printHelp() {
echo
echo "Usage: "
echo " bash build_tensorflow_serving.sh [-d debug] [-y install-without-confirmation] [-t install-with-tests]"
echo
}
while getopts "h?dyt" opt; do
case "$opt" in
h | \?)
printHelp
exit 0
;;
d)
set -x
;;
y)
FORCE="true"
;;
t)
TESTS="true"
;;
esac
done
function gettingStarted() {
printf -- '\n***********************************************************************************************\n'
printf -- "Getting Started: \n"
printf -- "To verify, run TensorFlow Serving from command Line : \n"
printf -- " $ cd $SOURCE_ROOT \n"
printf -- " $ export TESTDATA=$SOURCE_ROOT/serving/tensorflow_serving/servables/tensorflow/testdata \n"
printf -- " $ tensorflow_model_server --rest_api_port=8501 --model_name=half_plus_two --model_base_path=$TESTDATA/saved_model_half_plus_two_cpu & \n"
printf -- " $ curl -d '{"instances": [1.0, 2.0, 5.0]}' -X POST http://localhost:8501/v1/models/half_plus_two:predict\n"
printf -- "Output should look like:\n"
printf -- " $ predictions: [2.5, 3.0, 4.5]\n"
printf -- 'Make sure JAVA_HOME is set and bazel binary is in your path in case of test case execution.'
printf -- '*************************************************************************************************\n'
printf -- '\n'
}
###############################################################################################################
logDetails
prepare #Check Prequisites
DISTRO="$ID-$VERSION_ID"
case "$DISTRO" in
"ubuntu-20.04" )
printf -- "Installing %s %s for %s \n" "$PACKAGE_NAME" "$PACKAGE_VERSION" "$DISTRO" |& tee -a "$LOG_FILE"
printf -- "Installing dependencies... it may take some time.\n"
sudo apt-get update
sudo apt-get install sudo vim wget curl libhdf5-dev python3-dev python3-pip pkg-config unzip openjdk-11-jdk zip libssl-dev git python3-numpy libblas-dev liblapack-dev python3-scipy gfortran swig cython3 automake libtool -y |& tee -a "${LOG_FILE}"
sudo ldconfig
sudo pip3 install --no-cache-dir numpy==1.16.2 future wheel backports.weakref portpicker futures enum34 keras_preprocessing keras_applications h5py tensorflow_estimator setuptools pybind11 |& tee -a "${LOG_FILE}"
export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-s390x/
export PATH=$JAVA_HOME/bin:$PATH
configureAndInstall |& tee -a "${LOG_FILE}"
;;
"ubuntu-18.04" )
printf -- "Installing %s %s for %s \n" "$PACKAGE_NAME" "$PACKAGE_VERSION" "$DISTRO" |& tee -a "$LOG_FILE"
printf -- "Installing dependencies... it may take some time.\n"
sudo apt-get update
sudo apt-get install sudo vim wget curl libhdf5-dev python3-dev python3-pip pkg-config unzip openjdk-11-jdk zip libssl-dev git python3-numpy libblas-dev liblapack-dev python3-scipy gfortran swig cython3 automake libtool -y |& tee -a "${LOG_FILE}"
sudo ldconfig
sudo pip3 install --no-cache-dir numpy==1.16.2 future wheel backports.weakref portpicker futures enum34 keras_preprocessing keras_applications h5py==2.10.0 tensorflow_estimator setuptools pybind11 |& tee -a "${LOG_FILE}"
export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-s390x/
export PATH=$JAVA_HOME/bin:$PATH
configureAndInstall |& tee -a "${LOG_FILE}"
;;
*)
printf -- "%s not supported \n" "$DISTRO" |& tee -a "$LOG_FILE"
exit 1
;;
esac
gettingStarted |& tee -a "${LOG_FILE}"
| true
|
6af768d8a9952ce2d7e82ede12f9061a6b8fea8d
|
Shell
|
JeDeLu/mpht
|
/pltd/bin/setperm
|
UTF-8
| 2,595
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
VERSION="2.2"
if [[ $1 != '' ]]; then
LOCAL_PATH="$1"
else
LOCAL_PATH=`pwd`
fi
if [[ ! -f "${LOCAL_PATH}/MPHT.sh" ]] ; then
if [[ `find / -type f -name "MPHT.sh" 2>/dev/null | grep "${VERSION}" | wc -l` -eq 1 ]] ; then
echo "[INFO] SEARCHING FOR CORRECT PATH ..."
PATH_NAME=`find / -type f -name "MPHT.sh" 2>/dev/null | grep "${VERSION}"`
LOCAL_PATH=`dirname ${PATH_NAME}`
else
echo "[ERROR] PATH NOT FOUND - THE LOCATION OF MPHT.sh WAS NOT FOUND"
exit 1
fi
fi
echo "[INFO] PATH: ${LOCAL_PATH}"
chown root:root "${LOCAL_PATH}/MPHT.sh"
chown root:root "${LOCAL_PATH}/pre-processing.sh"
chmod 750 "${LOCAL_PATH}/MPHT.sh"
chmod 750 "${LOCAL_PATH}/pre-processing.sh"
if [[ -d "${LOCAL_PATH}/bin" ]]; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/bin"
chown -R root:root "${LOCAL_PATH}/bin"
chmod -R 750 "${LOCAL_PATH}/bin"
fi
if [[ -d "${LOCAL_PATH}/modules.d" ]] ; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/modules.d"
chown -R root:root "${LOCAL_PATH}/modules.d"
chmod -R 750 "${LOCAL_PATH}/modules.d"
fi
if [[ -d "${LOCAL_PATH}/patch.d" ]] ; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/patch.d"
chown -R root:root "${LOCAL_PATH}/patch.d"
chmod 750 "${LOCAL_PATH}/patch.d"
chmod 600 "${LOCAL_PATH}/patch.d/"*
fi
if [[ -d "${LOCAL_PATH}/templates.d" ]] ; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/templates.d"
chown -R root:root "${LOCAL_PATH}/templates.d"
chmod 750 "${LOCAL_PATH}/templates.d"
chmod 750 "${LOCAL_PATH}/templates.d/"*
chmod 600 "${LOCAL_PATH}/templates.d/"*/*
fi
if [[ -d "${LOCAL_PATH}/profile.d" ]] ; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/profile.d"
chown -R root:root "${LOCAL_PATH}/profile.d"
chmod 750 "${LOCAL_PATH}/profile.d"
chmod 600 "${LOCAL_PATH}/profile.d/"*
fi
if [[ -d "${LOCAL_PATH}/tmp" ]] ; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/tmp"
chown -R root:root "${LOCAL_PATH}/tmp"
chmod 1777 "${LOCAL_PATH}/tmp"
fi
if [[ -d "${LOCAL_PATH}/logging" ]] ; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/logging"
chown -R root:root "${LOCAL_PATH}/logging"
chmod 750 "${LOCAL_PATH}/logging"
chmod 600 "${LOCAL_PATH}/logging/"*
fi
if [[ -d "${LOCAL_PATH}/backup/`hostname`" ]] ; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/backup/`hostname`"
chown -R root:root "${LOCAL_PATH}/backup/`hostname`"
chmod -R 750 "${LOCAL_PATH}/backup/`hostname`/"
fi
if [[ -d "${LOCAL_PATH}/output" ]] ; then
echo "[INFO] SET PERM : ${LOCAL_PATH}/output"
chown -R root:root "${LOCAL_PATH}/output"
chmod 750 "${LOCAL_PATH}/output"
chmod 750 "${LOCAL_PATH}/output/`hostname`/"*
fi
| true
|
c822dba34c66f5c276a784445b619242b73e0669
|
Shell
|
klampworks/dotfiles
|
/bin/ff-tmp
|
UTF-8
| 280
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
u=tmp
if [[ ! -d "/home/${u}" ]]
then
echo "error: create user $u first"
exit 1
fi
sudo mount -t tmpfs none /home/${u}
"$(realpath "$(dirname "$0")")/share-screen-with" ${u}
sudo -u tmp XAUTHORITY=/home/${u}/.Xauthority firefox --no-remote
sudo umount /home/${u}
| true
|
df270a1e136b489c820245ebd12f7396574c5a3a
|
Shell
|
boklm/mageia-puppet
|
/modules/mirror/templates/mirrordir
|
UTF-8
| 379
| 3.109375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
remoteurl="<%= remoteurl%>"
localdir="<%= localdir %>"
rsync_options="<%= rsync_options %>"
lockfile="<%= lockfile %>"
if [ -f "$lockfile" ]; then
# show error message when run from command line
[ -t 1 ] && cat $lockfile
exit
fi
echo "sync in progress since $(date)" > "$lockfile"
/usr/bin/rsync $rsync_options "$remoteurl" "$localdir"
rm -f "$lockfile"
| true
|
2428159ca3e0d1ecc3a4d4b2e534bc01c6f0e516
|
Shell
|
krysopath/herbstluft-config
|
/cpu
|
UTF-8
| 475
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
map_stdin() {
local FUNCTION=$1
while read LINE; do
$FUNCTION $LINE
done
}
div_1000() {
echo "scale=2; $1/1000" | bc -l
}
get_cpu_temperature() {
temp=$( sensors | grep Core | cut -d " " -f 10 | xargs )
#temp=`expr $temp / 1000`
echo $temp
}
get_cpu_mhz() {
mhz=$( cat /proc/cpuinfo | grep MHz | cut -d : -f 2 | xargs -n1 | map_stdin div_1000 | xargs )
echo $mhz
}
echo "TEMP $(get_cpu_temperature) | GHZ $(get_cpu_mhz) |"
| true
|
6b5dd0eae722ac47155ec19570d449ea312384ab
|
Shell
|
infamousjoeg/openshift-conjur-deploy
|
/0_check_dependencies.sh
|
UTF-8
| 1,154
| 3.5
| 4
|
[] |
no_license
|
#!/bin/bash
set -eo pipefail
. utils.sh
# Confirm logged into OpenShift.
if ! oc whoami 2 > /dev/null; then
echo "You must login to OpenShift before running this demo."
exit 1
fi
# Confirm Conjur project name is configured.
if [ "$CONJUR_PROJECT_NAME" = "" ]; then
echo "You must set CONJUR_PROJECT_NAME before running this script."
exit 1
fi
# Confirm docker registry is configured.
if [ "$DOCKER_REGISTRY_PATH" = "" ]; then
echo "You must set DOCKER_REGISTRY_PATH before running this script."
exit 1
fi
# Confirm Conjur account is configured.
if [ "$CONJUR_ACCOUNT" = "" ]; then
echo "You must set CONJUR_ACCOUNT before running this script."
exit 1
fi
# Confirm Conjur admin password is configured.
if [ "$CONJUR_ADMIN_PASSWORD" = "" ]; then
echo "You must set CONJUR_ADMIN_PASSWORD before running this script."
exit 1
fi
conjur_appliance_image=conjur-appliance:4.9-stable
# Confirms Conjur image is present.
if [[ "$(docker images -q $conjur_appliance_image 2> /dev/null)" == "" ]]; then
echo "You must have the Conjur v4 Appliance tagged as $conjur_appliance_image in your Docker engine to run this script."
exit 1
fi
| true
|
01ee4f6307325997c3e9050f0a8d98414f2164ab
|
Shell
|
shang-demo/headless-chrome
|
/config/script-tools/push-git.sh
|
UTF-8
| 1,454
| 3.796875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
projectDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd ../.. && pwd )"
# 载入依赖
cd ${scriptDir}
source constants.sh
source util.sh
source build.sh
function resetDir() {
cd ${projectDir}
}
function checkDependencies() {
_checkDependence gsed
_checkDependence jq
}
function push() {
local env=${1:-dev}
local nodeEnv=${2:-${defaultEnv}}
resetDir
if [ ${env} = "prod" ]
then
cd ${buildDir}
fi
local pushInfo=( $(_getPushInfo ${projectDir}/${defaultConfigPath} ${projectDir} ${env}) )
echo ${pushInfo[*]}
local pushUrl=${pushInfo[0]}
local pushRemote=${pushInfo[1]}
local currentBranch=${pushInfo[2]}
local pushBranch=${pushInfo[3]}
_initGit ${pushRemote} ${pushUrl}
if [ ${env} = "prod" ]
then
local envDockerDir=${projectDir}/${DockerfilePath}/${nodeEnv}
resetDir
baseBuild ${nodeEnv} ${envDockerDir} ${buildDir}
cd ${buildDir}
git add -A
now=`date +%Y_%m_%d_%H_%M_%S`
git commit -m "${now}" || echo ""
echo $(pwd)
echo "git push ${pushRemote} ${currentBranch}:${pushBranch}(${nodeEnv}) -f"
git push ${pushRemote} ${currentBranch}:${pushBranch}\(${nodeEnv}\) -f
else
echo $(pwd)
echo "git push ${pushRemote} ${currentBranch}:${pushBranch}"
git push ${pushRemote} ${currentBranch}:${pushBranch}
fi
}
function lift() {
checkDependencies
push $*
}
lift $*
| true
|
2901179b3c375548b2780ad013d6d7103096cfc1
|
Shell
|
anoopcs/kaldi
|
/egs/yesno/s5/local/prepare_data.sh
|
UTF-8
| 2,167
| 3.4375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
mkdir -p data/local
local=`pwd`/local
scripts=`pwd`/scripts
export PATH=$PATH:`pwd`/../../../tools/irstlm/bin
echo "Preparing train and test data"
train_base_name=train_yesno
test_base_name=test_yesno
waves_dir=$1 #$1=waves_yesno
ls -1 $waves_dir > data/local/waves_all.list
cd data/local
#Split the first half of wavefiles to training set and remaining half to test set
../../local/create_yesno_waves_test_train.pl waves_all.list waves.test waves.train
#Create test_yesno_wav.scp file with content fileID<Space>location. File ID is filename without extension
../../local/create_yesno_wav_scp.pl ${waves_dir} waves.test > ${test_base_name}_wav.scp
#Create train_yesno_wav.scp file with content fileID<Space>location. File ID is filename without extension
../../local/create_yesno_wav_scp.pl ${waves_dir} waves.train > ${train_base_name}_wav.scp
#From test files listing at waves.test generates the labels 1-->YES & 0-->NO. List the correspondence between file ID and label in Output file test_yesno.txt
../../local/create_yesno_txt.pl waves.test > ${test_base_name}.txt
#From test files listing at waves.train generates the labels 1-->YES & 0-->NO. List the correspondence between file ID and label in Output file train_yesno.txt
../../local/create_yesno_txt.pl waves.train > ${train_base_name}.txt
#Copy task.arpabo file to data/local as lm_tg.arpa
cp ../../input/task.arpabo lm_tg.arpa
#Back to s5 directory
cd ../..
# This stage was copied from WSJ example
for x in train_yesno test_yesno; do
#Make the directories train_yesno and test_yesno under data
mkdir -p data/$x
#Copy the train_yesno_wav.scp, test_yesno_wav.scp to
#data/train_yesno and data/test_yesno folders respectively
cp data/local/${x}_wav.scp data/$x/wav.scp
#Copy the train_yesno.txt, test_yesno.txt to
#data/train_yesno and data/test_yesno folders respectively
cp data/local/$x.txt data/$x/text
#Sample utt2spk content after cat operation: 0_0_0_0_1_1_1_1 global
cat data/$x/text | awk '{printf("%s global\n", $1);}' > data/$x/utt2spk
#Mapping speakers to utterances
utils/utt2spk_to_spk2utt.pl <data/$x/utt2spk >data/$x/spk2utt
done
| true
|
7bf13862b07cf90f22257653f2af2b20c88de2a1
|
Shell
|
ilovedanzi521/deploy-center
|
/repo/shell.sh
|
UTF-8
| 179
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
execute() {
cmdStr=$@
result=`${cmdStr} 2>/dev/null`
code=$?
echo $result
if [ $code -eq 0 ];then
echo "OK"
fi
}
case $1 in
*)
execute $@
;;
esac
| true
|
3e6fa5c9259d3dd752596f1f9df471f813ecee56
|
Shell
|
roowe/hongkongresort-el-ebin
|
/sh/node_config_example/common_env
|
UTF-8
| 687
| 3.15625
| 3
|
[] |
no_license
|
# -*- mode: sh -*-
#!/bin/sh
# not support link, please use real file path
SCRIPT_DIR=$(cd ${0%/*} && pwd) # 执行脚本所在目录
SCRIPT_NAME=${0##*/} # 可以用来做工具的节点名
SERVER_HOME=${SCRIPT_DIR%/*/*} # 项目的根目录
IPs=(`ip addr |sed -n 's/.*inet \([0-9\.]\+\).*/\1/p'`)
IP=127.0.0.1
for var in ${IPs[@]};do
case $var in
'192.168.1'*)
IP=$var
break;;
'10.9'*)
IP=$var
break;;
esac
done
EBIN_ROOT="${SERVER_HOME}/ebin ${SERVER_HOME}/deps/*/ebin"
EBIN_ARGS="-pa $EBIN_ROOT"
ERL_DIR= #如果是源码安装,erl路径不是在PATH里面,可以在custom_env填erl的目录全路径
| true
|
d108e4c09d561e592bb38adcf737907b4a4b08ba
|
Shell
|
phouverneyuff/jhipster-docker
|
/docker-download-update-setup.sh
|
UTF-8
| 2,109
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Efetuando o download do docker do JHipster ..."
sudo docker pull jdubois/jhipster-docker:latest
echo "Efetuando o download do docker do MySQL ..."
sudo docker pull mysql/mysql-server:latest
MYSQL_DATABASE=pedido
MYSQL_ROOT_PASSWORD=rootpwd
MYSQL_USER=pedido
MYSQL_PASSWORD=pedidopwd
MYSQL_VAR=mysql-var
VOLUME_MYSQL_VAR=`pwd`/$MYSQL_VAR
VOLUME_JHIPSTER=$1
if [ ! -d $MYSQL_VAR ]; then
mkdir $MYSQL_VAR
echo "Criando a pasta $MYSQL_VAR em `pwd` para armazenar os dados persistentes do MySQL"
fi
echo ""
echo "###################################################################################################"
echo "Volume compartilhado do MySQL: $VOLUME_MYSQL_VAR"
echo "Database: $MYSQL_DATABASE"
echo "Usuário admin e senha: root:$MYSQL_ROOT_PASSWORD"
echo "Usuário e senha padrões: $MYSQL_USER:$MYSQL_PASSWORD"
echo "Service Name to Mysql: 'mysql'"
echo "###################################################################################################"
echo ""
echo "Iniciando pela primeira vez o serviço do MySQL. Para iniciar da proxima vez: 'docker start mysql'"
sudo docker rm -f mysql
sudo docker run -v $VOLUME_MYSQL_VAR:/var/lib/mysql -P -e MYSQL_DATABASE=$MYSQL_DATABASE -e MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD -e MYSQL_USER=$MYSQL_USER -e MYSQL_PASSWORD=$MYSQL_PASSWORD -d --name mysql mysql/mysql-server
echo ""
echo "###################################################################################################"
echo "Volume compartilhado do JHipster: $VOLUME_JHIPSTER"
echo "###################################################################################################"
echo ""
echo "Iniciando pela primeira vez o serviço do JHipster. Para iniciar da proxima vez: 'docker start jhipster'"
sudo docker rm -f jhipster
sudo docker run --name jhipster --link mysql:mysql -v $VOLUME_JHIPSTER:/jhipster -p 8081:8080 -p 3000:3000 -p 3001:3000 -p 4022:22 -d -t -i jdubois/jhipster-docker /bin/bash
echo ""
echo "Parando o docker do jhipster ..."
sudo docker stop jhipster
echo "Parando o docker mysql ..."
sudo docker stop mysql
| true
|
e9bd78d797e3fd0c972fe1ab972172f7e66c35d8
|
Shell
|
VMinute/ELL
|
/tools/utilities/profile/make_profiler.sh.in
|
UTF-8
| 1,770
| 3.734375
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/sh
# A script that creates a compiled profiler in a subdirectory:
# - create subdirectory (w/name of model)
# - run compile on model and save model .h and .o to subdirectory
# - copy c++ and cmake files to subdirectory
# Usage: make_profiler.sh model_file [output_directory] <compile options>
set -x
pushd .
model_name=$1
shift
model_file=$model_name".ell"
profiler_directory=$model_name"_profiler"
next_opt=$1
if [ ${next_opt:0:1} != '-' ] ; then
profiler_directory=$next_opt
shift
fi
mkdir $profiler_directory
@COMPILE_EXECUTABLE@ -imap $model_file --profile --ir --objectCode --header -od $profiler_directory -ob compiled_model $@
@COMPILE_EXECUTABLE@ -imap $model_file --ir --objectCode --header -od $profiler_directory -ob compiled_model_noprofile $@
# Platform-specific options
pi3_llc_opts="-relocation-model=pic -mtriple=armv7-linux-gnueabihf -mcpu=cortex-a53"
llc_opts=$pi3_llc_opts
cd $profiler_directory
@LLC_EXECUTABLE@ compiled_model.ll -o compiled_model_llc.o -O3 --filetype=obj $llc_opts
@OPT_EXECUTABLE@ compiled_model.ll -o compiled_model_opt.ll -O3
@LLC_EXECUTABLE@ compiled_model_opt.ll -o compiled_model_opt.o -O3 --filetype=obj $llc_opts
@LLC_EXECUTABLE@ compiled_model_noprofile.ll -o compiled_model_noprofile_llc.o -O3 --filetype=obj $llc_opts
@OPT_EXECUTABLE@ compiled_model_noprofile.ll -o compiled_model_noprofile_opt.ll -O3
@LLC_EXECUTABLE@ compiled_model_noprofile_opt.ll -o compiled_model_noprofile_opt.o -O3 --filetype=obj $llc_opts
cp ../tools/utilities/profile/compiled_profile_main.cpp .
cp ../tools/utilities/profile/compiled_exercise_model_main.cpp .
cp ../tools/utilities/profile/CMakeLists-compiled.txt.in ./CMakeLists.txt
cp ../tools/utilities/profile/OpenBLASSetup.cmake ./OpenBLASSetup.cmake
popd
| true
|
9eafcc7141ec9ea8baf60f8066bff36a7b080b6e
|
Shell
|
tjwudi/usaco-solutions
|
/barn1/test.sh
|
UTF-8
| 160
| 2.765625
| 3
|
[] |
no_license
|
PROGNAME="barn1"
for i in {1..2}
do
cp "${PROGNAME}.in${i}" "${PROGNAME}.in"
./main
diff "${PROGNAME}.out" "${PROGNAME}.out${i}"
echo "Test $i OK"
done
| true
|
e93e34cd855ddd086728cf7c3e3e0214ec9b0fd5
|
Shell
|
cx1964/cx1964ReposOTCSRestAPIs
|
/01_create_git_remote_repos.sh
|
UTF-8
| 3,233
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# Filenaam: 01_create_git_remote_repos.sh
# Functie: create centrale remote git repository van de commandline
# Opmerking: nieuwe style mbv GitHub API Token
# Referentie: https://medium.com/better-programming/create-github-repos-remotely-25153a6e6890
## Instelling github Repository naam ##
NEW_REPO_NAME='cx1964ReposOTCSRestAPIs' # <<<<<<-------------------------------------------------------------
#
#
#
###########################################
### BEGIN constante deel van het script ###
###########################################
# GitHub User Name
# Store current working directory.
CURRENT_DIR=$PWD
# Project directory can be passed as second argument to setup-repo, or will default to current working directory.
PROJECT_DIR=${2:-$CURRENT_DIR}
# Maak de github repos aan
curl -u $GH_USER https://api.github.com/user/repos -d '{"name":"'$NEW_REPO_NAME'"}'
# ### 00 Eenmalig voor iedere nieuwe Github repository deployment key aanmaken ### <<<<<<----------------------------------
# ---------------------------------------------------------------------------------
# Om access tokens te kunnen gebruiken (zie hieronder bij "01 Instelling en aanmaken access token")
# moet men eerst eenmalig de deploy keys aanmaken
# zie procedure https://developer.github.com/v3/guides/managing-deploy-keys/#deploy-keys
# stap1: Run het command ssh-keygen op je werkstation
# mbv het script 00_maak_pub_en_priv_keys.shgit -C $PROJECT_DIR remote add origin git@github.com:$GH_USER/$NEW_REPO_NAME.git
# Geef als waarde mee de naam van de git repository (cx1964ReposOTCSRestAPIs)
# Default wordt er onder Linux er 2 files aangemaakt in de current directory
# - cx1964ReposOTCSRestAPIs
# - cx1964ReposOTCSRestAPIs.pub
#
# Volg vanaf stap2a de overige 8 stappen van de Setup paragraaf van https://developer.github.com/v3/guides/managing-deploy-keys/#deploy-keys
# Stap5
# zie https://github.com/cx1964/<respository naam>/settings/keys
# Dit wordt nu: https://github.com/cx1964/cx1964ReposOTCSRestAPIs/settings
# 5a: Kies uit option menu voor Deploy keys
# 5b: Kies <add deploy key>
# Stap6
# 6a: Gebruik de volledige inhoud van de file (uit stap1) cx1964ReposOTCSRestAPIs.pub
# bij het aanmaken van een deployment key van een specifieke respository
# 6b: zet optie Allow write access aan
# 6c: Druk op <add key>
# 6d: verwijder of verplaats de files van stap1 uit current directory
# - cx1964ReposOTCSRestAPIs
# - cx1964ReposOTCSRestAPIs.pub
# Vervolgens moet voor iedere nieuwe repository stap2 t/m stap8 van het aanmaken van deploy keys doorlopen.
#
# Deployment key
GH_Deployment_key='7a:ea:6b:70:a3:f7:58:50:e3:24:18:5b:6d:73:f0:40' # <<<<<<-------------------------------------------------------------------------------
# Maak een local git repository aan
git init $PROJECT_DIR
# Markeer welke wijzigingen meegenomen moeten worden
git add *.sh
git add *.py
git add README.md
git add .gitignore
# Commit de local wijzigingen
git commit -m "Initiele files"
# koppel local aan remote repository
git -C $PROJECT_DIR remote add origin https://github.com/$GH_USER/$NEW_REPO_NAME.git
# Schrijf de wijzigingen weg naar de remote repository
git push -u origin master
| true
|
8f28b6d0da0f76982cf29bd56826ecd342896f2c
|
Shell
|
vicentebolea/velox-report
|
/velox-exp
|
UTF-8
| 755
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
HELP="
velox-exp: Velox Utility to run and record experiments using Velox and Hadoop
USAGE: velox-exp <ACTION> [ARGS]
ACTIONS:
run workload1 workload2..., run and log the workloads
log [N], log last N experiments
drylog, log last N experiments
help, print this
WORKLOAD FILE EXAMPLE:
ALPHA=(0)
MIN_BLOCK_SIZE=(167772161)
LEAN_INPUT_SPLIT=(0.00 0.10 0.20 0.30 0.40 0.50 0.60 0.70 0.80 0.90 1.00)
LAUNCHER=run_awc.sh
SCHEDULER=scheduler_lean
"
case "$1" in
run) shift; ./velox-exp-run "$@"; exit;;
log) ./velox-exp-log $1; exit;;
drylog) ./velox-exp-scrap; exit;;
help) echo "$HELP"; exit;;
*) echo "NO ACTION GIVEN"; echo "$HELP"; exit;;
esac
| true
|
50b289d10b529b3a8f4d88c11640021e50e27ca6
|
Shell
|
randomowo/dotfiles
|
/linux/bin/bash/calc
|
UTF-8
| 148
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "obase: " obase
read -p "ibase: " ibase
read -p "numbers: " num
echo -n "$num = "
echo "obase=$obase; ibase=$ibase; $num" | bc
| true
|
4645d1d4d8f9565ecac12bf1e81c3f057127fc8f
|
Shell
|
wotsrovert/dotfiles
|
/zsh/prompt.zsh
|
UTF-8
| 1,630
| 3.71875
| 4
|
[] |
no_license
|
autoload colors && colors
is_git_repo() {
[[ -d .git ]] || git rev-parse --git-dir > /dev/null 2>&1
}
git_prompt() {
if is_git_repo; then
# in a git repo!
echo "$(git_dirty)$(need_push)"
else
# not in a git repo :(
echo ""
fi
}
git_branch() {
echo "$(git symbolic-ref HEAD 2>/dev/null | awk -F/ {'print $NF'})"
}
git_wip() {
local wip=$(git log -1 --oneline 2>/dev/null | awk '{print tolower($2)}' | awk '{gsub(/[^a-z]/, ""); print}')
if [[ "$wip" == "wip" ]]; then
echo "%{$fg[red]%}± wip%{$reset_color%}"
else
echo ""
fi
}
git_dirty() {
local git_status="$(git status --porcelain 2>/dev/null)"
local exit_status=$?
if [[ "$git_status" == "" ]]; then
# clean git status
echo "on %{$fg[green]%}$(git_prompt_info)%{$reset_color%}"
else
# unclean git status
echo "on %{$fg[red]%}$(git_prompt_info)%{$reset_color%}"
fi
}
git_prompt_info () {
local ref="$(git symbolic-ref HEAD 2>/dev/null)" || return
# echo "on %{$fg[yellow]%}${ref#refs/heads/}%{$reset_color%}"
echo "${ref#refs/heads/}"
}
unpushed () {
git cherry -v @{upstream} 2>/dev/null
}
need_push () {
if [[ "$(unpushed)" == "" ]]
then
echo ""
else
echo " with %{$fg[magenta]%}unpushed%{$reset_color%}"
fi
}
host_prompt() {
echo "%{$fg[yellow]%}$(hostname -s)%{$reset_color%}"
}
directory_name(){
echo "%{$fg[cyan]%}%1/%\/%{$reset_color%}"
}
set_prompt () {
export PROMPT=$'$(host_prompt) in $(directory_name) $(git_prompt)\n$(git_wip)› '
export RPROMPT=$'%{$fg_bold[green]%}%~ @ %*%{$reset_color%}'
}
precmd() {
title "zsh" "%m" "%55<...<%~"
set_prompt
}
| true
|
e899a5e91a2538ec36d3989e9a67d55766597d7b
|
Shell
|
BrunoFroger/multiTerm
|
/fichiers_config_reference/init_config_files.bash
|
UTF-8
| 548
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# copie des fichiers de configurations dans les bons repertoires
HOME_DIR=/Users/obfe6300
SOURCE_DIR=${HOME_DIR}/devBruno/multiTerm/fichiers_config_reference
DEST_DIR_CFG=${HOME_DIR}/devBruno/multiTerm/multiTerm/build-multiTerm-Desktop_Qt_6_0_1_clang_64bit-Debug/multiTerm.app/Contents/MacOS
DEST_DIR_CNX=${HOME_DIR}/multiTerm
if [ ! -f ${DEST_DIR_CFG}/multiTerm.cfg ] ; then
ln -s ${SOURCE_DIR}/multiTerm.cfg ${DEST_DIR_CFG}
fi
if [ ! -f ${DEST_DIR_CNX}/multiTerm.cnx ] ; then
ln -s ${SOURCE_DIR}/multiTerm.cnx ${DEST_DIR_CNX}
fi
| true
|
14f84e0d60eceac0a127a301ad549c11023611cd
|
Shell
|
OpenUpSA/khetha-deploy
|
/deploy-target.sh
|
UTF-8
| 1,541
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh -e
cd "$(dirname "$0")"
USAGE="
Usage: deploy-target NAME DOCKER_HOST [SOURCE_BUILD]
Deploy khetha-django as NAME to DOCKER_HOST.
NAME (example: 'staging')
The name to use for the deployment instance.
DOCKER_HOST (example: 'ssh://user@host')
A valid DOCKER_HOST to deploy to.
SOURCE_BUILD (optional, example: 'khetha-django')
Path to a source checkout of khetha-django to build.
"
TARGET_NAME="${1:?"$USAGE"}"
TARGET_DOCKER_HOST="${2:?"$USAGE"}"
SOURCE_BUILD="${3}"
# Begin tracing.
set -x
# Base repo / tag to save the deployment images to.
# TODO: Move to OpenUp organisation
export BASE_TAG="${BASE_TAG:-"pidelport/khetha-deploy:${TARGET_NAME}"}"
# Build or pull BASE_TAG
if test -n "${SOURCE_BUILD}"; then
echo "Building ${BASE_TAG} from ${SOURCE_BUILD}"
docker build \
--build-arg DJANGO_STATICFILES_STORAGE='whitenoise.storage.CompressedManifestStaticFilesStorage' \
--build-arg WHITENOISE_KEEP_ONLY_HASHED_FILES='True' \
--pull "${SOURCE_BUILD}" --tag "${BASE_TAG}"
else
SOURCE_TAG='pidelport/khetha-django:latest' # XXX: Hard-coded stable source for now.
echo "Pulling ${BASE_TAG} from ${SOURCE_TAG}"
docker pull "${SOURCE_TAG}"
docker tag "${SOURCE_TAG}" "${BASE_TAG}"
fi
# Build deployment images from BASE_TAG
docker-compose build --build-arg BASE_TAG
# Push
docker push "${BASE_TAG}"
docker-compose push
# Deploy
DOCKER_HOST="$TARGET_DOCKER_HOST" docker stack deploy -c docker-compose.yml -c "target.${TARGET_NAME}.yml" "khetha-${TARGET_NAME}"
| true
|
5bbccc5557dd6e2e57257599d6834a95e66891d2
|
Shell
|
ratmir-kulakov/daripodelu
|
/console/scripts/update-stock-info.sh
|
UTF-8
| 647
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
# SCRIPT_PATH=`pwd`/`dirname "$0"`
SCRIPT_PATH="$HOME/xn--80ahbenushh0b.xn--p1ai/docs/console/scripts"
ROOT_PATH="$SCRIPT_PATH/../.."
source "$SCRIPT_PATH/lib.sh"
src_folder="$ROOT_PATH/downloads/current"
dst_folder="$ROOT_PATH/downloads/archive"
create_archive "$src_folder" "$dst_folder";
if [ "$(ls -A $src_folder)" ]; then
`rm $src_folder/*`
fi
# загрузка xml-файлов с сайта gifs.ru
php -c ~/etc/php.ini $ROOT_PATH/yii load/downloadstock
php -c ~/etc/php.ini $ROOT_PATH/yii update/stock
create_archive "$src_folder" "$dst_folder" "stock";
if [ "$(ls -A $src_folder)" ]; then
`rm $src_folder/*`
fi
| true
|
d423e5ba1fa605ca2afbde28307c72b4e2884732
|
Shell
|
tjhei/dealii-vm
|
/postinstall/visit.sh
|
UTF-8
| 273
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "installing visit..."
cd ~/bin
rm -rf visit*
wget -q "http://portal.nersc.gov/svn/visit/trunk/releases/2.8.2/visit2_8_2.linux-x86_64-ubuntu11.tar.gz"
tar xf visit2_8_2.linux-x86_64-ubuntu11.tar.gz
rm visit2_8_2.linux-x86_64-ubuntu11.tar.gz
echo "done."
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.