blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4089bcdc2178c91e6f808fbc30438d3b59003a1b
|
Shell
|
Zirpon/hero
|
/shell/runclient.sh
|
UTF-8
| 361
| 3.3125
| 3
|
[] |
no_license
|
dirs=`ls | grep "dev"`
#echo "ddddd ${dirs}"
maxnum=100
if [ $# -gt 0 ]; then
maxnum=$1
fi
echo "maxnum $maxnum"
num=0
for i in ${dirs}; do
if [ $num -eq $maxnum ]; then
break
fi
exedir=./$i
if [ -d $exedir ]; then
echo $exedir
exepath=$exedir/dev.exe
start $exepath
let num=num+1
fi
done
echo "open $num clients "
| true
|
2495149b06d8e406ba11dbb0a442bfe3a18b28dc
|
Shell
|
chendotjs/snippets
|
/crosscorrelation/test/convert_gray.sh
|
UTF-8
| 173
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
imgs=`ls *.png`
for file in $imgs
do
echo $file
newfile=${file%".png"}-gray.png
convert $file -fx '(r+g+b)/3' -colorspace Gray $newfile
echo $newfile
done
| true
|
0629ce73704858e056246c57957cf63a9280fd73
|
Shell
|
mafintosh/tabalot
|
/template.sh
|
UTF-8
| 998
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
# Tab completion for {cmd}
#
# Auto install to bash by doing:
# {install} --save
#
# Which similar to
# {install} > {completionDir}/{cmd}
#
# Remember to source {profile} or restart your terminal afterwards
# Manual install by adding the following to {profile} {zshrc-help}
if type complete &>/dev/null; then
_{cmd}_completion () {
COMPREPLY=()
local output output_file
output="$({completion} completion -- $(( $COMP_CWORD - 1 )) "${COMP_WORDS[@]:1}" 2> /dev/null)"
[ $? == 15 ] && output_file=yes
local IFS=$'\n'
output=("$output")
for word in ${output[@]}; do
COMPREPLY+=("$word")
done
if [ "$output_file" == "yes" ]; then
type compopt >&/dev/null && compopt -o filenames 2> /dev/null || compgen -f /non-existing-dir/ > /dev/null
fi
}
complete -F _{cmd}_completion {cmd}
elif type compdef &>/dev/null; then
_{cmd}_completion () {
compadd -- $({completion} completion -- $(( CURRENT - 2 )) "${words[@]:1}" 2> /dev/null)
}
compdef _{cmd}_completion {cmd}
fi
| true
|
67443e2354556994d56833e1c17d44135e01d845
|
Shell
|
speedwing/cardano-auth
|
/build-api.sh
|
UTF-8
| 310
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
sbt "project api" stage
VERSION=$(git describe --tags)
DOCKER_IMAGE="gimbalabs/balcony-api:${VERSION}"
echo "Building image: ${DOCKER_IMAGE}"
docker buildx use x86
docker buildx build --platform linux/amd64 --load -t "${DOCKER_IMAGE}" -f api/Dockerfile .
docker push "${DOCKER_IMAGE}"
| true
|
636036d016b19c7aed6115d4dbc555cd0a4015b8
|
Shell
|
danilozano413/ionic-capacitor-release
|
/src/rename_directories.bash
|
UTF-8
| 401
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Rename folders and directories ${2} -> ${3}";
# Rename directories
find ${1} -type d -name "${2}*" | while read FILE ; do
newfile="$(echo ${FILE} |sed -e "s/${2}/${3}/")" ;
mv "${FILE}" "${newfile}" ;
done
# Rename files
find ${1} -type f -name "${2}*" | while read FILE ; do
newfile="$(echo ${FILE} |sed -e "s/${2}/${3}/")" ;
mv "${FILE}" "${newfile}" ;
done
| true
|
f5b7108396402c8ed5d0df0175c22909c5ee577a
|
Shell
|
RonieGSS/bz_infra_design
|
/ops/docker/mkdocs/bin/logs
|
UTF-8
| 212
| 3.078125
| 3
|
[] |
no_license
|
#! /bin/bash
container_name='biz_mkdocs'
if docker ps -f name=$container_name | grep 'mkdocs' -o;
then
# Show container logs
docker logs --tail -f $container_name
else
echo "Mkdocs container not running!"
fi
| true
|
2b0c29be7880b19569c854aa03e655fa23a18e81
|
Shell
|
harbaum/RPI-BLE-toy-control
|
/python-gatt-install.sh
|
UTF-8
| 927
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$(id -u)" != "0" ]; then
echo "Dieses Script muss vom Root-User oder per sudo gestartet werden!" 1>&2
exit 1
fi
echo "Installiere python-gatt und bluez-5.44 ..."
systemctl stop bluetooth
apt-get update
apt-get -y install python3-pip python3-dbus
pip3 install gatt
apt-get -y install libusb-dev libdbus-1-dev libglib2.0-dev libudev-dev libical-dev libreadline-dev libdbus-glib-1-dev unzip
cd
mkdir bluez
cd bluez
wget http://www.kernel.org/pub/linux/bluetooth/bluez-5.44.tar.xz
tar xf bluez-5.44.tar.xz
cd bluez-5.44
./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --enable-library
make
make install
ln -svf /usr/libexec/bluetooth/bluetoothd /usr/sbin/
systemctl daemon-reload
systemctl start bluetooth
hciconfig hci0 up
# check for corrent version
if [ "`bluetoothd --version`" == "5.44" ]; then
echo "Installation erfolgreich";
else
echo "Installation nicht erfolgreich";
fi
| true
|
f237ad76f128ff6b5a71db0c36b8309fd4c5d069
|
Shell
|
anubhavmishra/consul-service-mesh-gateways-demo
|
/vms/templates/consul-gateway.tpl
|
UTF-8
| 1,578
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
apt-get update && apt-get install -y unzip
# Fetch Consul
cd /tmp
wget https://releases.hashicorp.com/consul/1.6.0/consul_1.6.0_linux_amd64.zip -O ./consul.zip
unzip ./consul.zip
mv ./consul /usr/local/bin
# Fetch Envoy
wget https://github.com/nicholasjackson/cloud-pong/releases/download/v0.1.1/envoy -O /usr/local/bin/envoy
chmod +x /usr/local/bin/envoy
# Create the consul config
mkdir -p /etc/consul
cat << EOF > /etc/consul/config.hcl
data_dir = "/tmp/"
log_level = "DEBUG"
datacenter = "dc2"
bind_addr = "0.0.0.0"
client_addr = "0.0.0.0"
ports {
grpc = 8502
}
connect {
enabled = true
}
enable_central_service_config = true
advertise_addr = "${advertise_addr}"
retry_join = ["${consul_cluster_addr}"]
EOF
# Setup system D
cat << EOF > /etc/systemd/system/consul.service
[Unit]
Description=Consul Server
After=syslog.target network.target
[Service]
ExecStart=/usr/local/bin/consul agent -config-file=/etc/consul/config.hcl
ExecStop=/bin/sleep 5
Restart=always
[Install]
WantedBy=multi-user.target
EOF
chmod 644 /etc/systemd/system/consul.service
# Setup system D
cat << EOF > /etc/systemd/system/consul-gateway.service
[Unit]
Description=Consul Gateway
After=syslog.target network.target
[Service]
ExecStart=/usr/local/bin/consul connect envoy -mesh-gateway -register -wan-address ${gateway_addr}:443 -- -l debug
ExecStop=/bin/sleep 5
Restart=always
[Install]
WantedBy=multi-user.target
EOF
chmod 644 /etc/systemd/system/consul-gateway.service
systemctl daemon-reload
systemctl start consul.service
systemctl start consul-gateway.service
| true
|
e3245768bf6f0e908542e0b146200cf6b4d27daa
|
Shell
|
blakehull/medium-app
|
/docker-build.sh
|
UTF-8
| 432
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
docker build -t mltag:latest .
docker run -it -d -P mltag:latest
docker ps --format "{{.Ports}}"
url=$(docker ps --format "{{.Ports}}{{.Image}}" | grep mltag | cut -d- -f1 | tr -d '[:space:]')
container=$(docker ps --format "{{.ID}}->{{.Image}}" | grep mltag | cut -d- -f1 | tr -d '[:space:]')
echo "RUNNING ON: " ${url}
echo "Starting Up..."
sleep 20
curl -w "\n" -X GET "http://${url}/similar_tags/machine%20learning"
| true
|
bff427197d0715acae80f6f3d6482cef228e7111
|
Shell
|
hugoShaka/pushjet-docker
|
/pushjet/pushjet.sh
|
UTF-8
| 1,347
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
# Need a lockfile so we dont try to initilize the database if we restart
LOCKFILE="/root/db_init_done"
PUSHJET_DIR="/srv/http/api.pushjet.io"
echo "Database settings are:"
echo "MYSQL_USER: ${MYSQL_USER}"
echo "MYSQL_PASS: ${MYSQL_PASS}"
echo "MYSQL_HOST: ${MYSQL_HOST}"
echo "MYSQL_PORT: ${MYSQL_PORT}"
echo "Setting config.py settings"
# Setup all the ENV variables
## MySQL
sed -i -e "s/MYSQL_USER/${MYSQL_USER}/" $PUSHJET_DIR/config.py
sed -i -e "s/MYSQL_PASS/${MYSQL_PASS}/" $PUSHJET_DIR/config.py
sed -i -e "s/MYSQL_HOST/${MYSQL_HOST}/" $PUSHJET_DIR/config.py
sed -i -e "s/MYSQL_PORT/${MYSQL_PORT}/" $PUSHJET_DIR/config.py
## Google API key
sed -i -e "s/GOOGLE_API_KEY/${GOOGLE_API_KEY}/" $PUSHJET_DIR/config.py
sed -i -e "s/GOOGLE_SENDER_ID/${GOOGLE_SENDER_ID}/" $PUSHJET_DIR/config.py
## ZeroMQ
sed -i -e "s@ZEROMQ_RELAY@${ZEROMQ_RELAY}@" $PUSHJET_DIR/config.py
# Create the MySQL database if we havent already
if [ ! -f $LOCKFILE ]; then
dbinfo=`grep -i database_uri ${PUSHJET_DIR}/config.py`
echo "Creating database on: ${dbinfo}"
mysql -h ${MYSQL_HOST} -u${MYSQL_USER} -p${MYSQL_PASS} -e "create database pushjet_api;"
cat $PUSHJET_DIR/database.sql | mysql -h ${MYSQL_HOST} -u${MYSQL_USER} -p${MYSQL_PASS} -D pushjet_api
fi
cd $PUSHJET_DIR && /usr/bin/gunicorn -b 0.0.0.0:8000 -w 4 --timeout=30 application:app
| true
|
527c59e6e0b54502a256ee04ee4e2d3de3eb1513
|
Shell
|
IBM-Security/performance
|
/IAM/scripts/SDS_tuning_scripts/db2perf.sh
|
UTF-8
| 2,237
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
##############################################################################
#
# Licensed Materials - Property of IBM
#
# Restricted Materials of IBM
#
# (C) COPYRIGHT IBM CORP. 2006. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
#
##############################################################################
#
# Script: db2perf.sh
#
# Author: Michael Seedorff, IBM/Tivoli Services
#
# Description: This script creates the necessary bindings to use the
# REOPT command and increase performance for the DB2 database.
#
# Prerequisites:
# This script must be executed by the owner of the DB2 instance in which
# the specified database resides. The default database is ldapdb2.
#
# Change History:
# 2006/02/10 Version 1.0 - Michael Seedorff, IBM/Tivoli Services
# Original version.
#
usage()
{
cat <<EOF
Usage: db2perf.sh [ -db dbname ]
Options:
-db dbname DB name to update with bind scripts (Default=ldapdb2)
Notes: Must be executed as the DB2 instance owner
EOF
}
# Setup Default variable settings
DBNAME="ldapdb2"
BNDDIR="$HOME/sqllib/bnd"
# Check command line parameters
while [ $# -gt 0 ]
do
case $1 in
-db)
if [ "x$2" = "x" ]
then
usage
exit 25
fi
DBNAME=$2
shift
shift
;;
-\?)
usage
exit 25
;;
--help)
usage
exit 25
;;
*)
echo "Invalid parameter - \"$1\""
usage
exit 56
;;
esac
done
if [ ! -d "${BNDDIR}" ]
then
echo ""
echo " ERROR: Directory ${BNDDIR} not found."
echo ""
exit 59
fi
cd "${BNDDIR}"
db2 connect to "${DBNAME}"
db2 bind @db2ubind.lst BLOCKING ALL GRANT PUBLIC
db2 bind @db2cli.lst BLOCKING ALL GRANT PUBLIC
db2 bind db2schema.bnd BLOCKING ALL GRANT PUBLIC sqlerror continue
db2 bind db2clipk.bnd collection NULLIDR1
db2 bind db2clipk.bnd collection NULLIDRA
| true
|
3afb07461da392f17b2111bcc1822b3262ebea9f
|
Shell
|
MarounMaroun/shell-checker
|
/entrypoint.sh
|
UTF-8
| 774
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
GITHUB_API_URI="https://api.github.com"
GITHUB_API_HEADER="Accept: application/vnd.github.v3+json"
token="$1"
severity="$2"
exclude="$3"
echo "---= Excluded params: [$exclude] =---"
echo "---= Severity: $severity =---"
pr_num=$(jq -r .pull_request.number "$GITHUB_EVENT_PATH")
echo "---= Running on PR #$pr_num =---"
body=$(curl -sSL -H "Authorization: token $token" -H "$GITHUB_API_HEADER" "$GITHUB_API_URI/repos/$GITHUB_REPOSITORY/pulls/$pr_num/files")
err=0
for file in $(echo "$body" | jq -r 'map(select(.status != "removed")) | .[].filename'); do
extension=$(echo "$file" | awk -F . '{print $NF}')
if [[ "$extension" =~ (sh|bash|zsh|ksh) ]]; then
shellcheck -e "$exclude" -S "$severity" "$GITHUB_WORKSPACE/$file" || err=$?
fi
done
exit $err
| true
|
75bd4e3f53ff68251bc86afd44e495e9072d14ba
|
Shell
|
truedays/desktop
|
/rfind.sh
|
UTF-8
| 5,637
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# July 23, 2017 version 0.1
# Standardizing the way I organize with find.
# /tank == path to index
# -xdev == do not traverse other filesystems
#find /tank -xdev -printf "$(uname -n)\t%f\t%s\t%Cs\t%Ts\t%p\t%F\t%D\t%y\t%#m\t%i\t%u\t%U\t%g\t%G\n"; }
# %.10A@ == %A@ == unix time with high precission .10A@ == just the first 10 digits unix time of last file ACCESS
# %A@s == is better than that ^
[[ $UID -eq 0 ]] || { echo "Must be run as root"; exit 1; }
indexPath="${1:-/tank}"
indexPathShort="${indexPath##*/}"
#indexPathShort="$(basename ${indexPath})"
saveFile="/rfind_${HOSTNAME:-NO_HOSTNAME}_${indexPathShort}_$(date +%F).out"
indexDate="$(date +%s)"
indexHost="$(uname -n)" # this or $HOSTNAME ??
df="$(df --block-size=1K --local --print-type --exclude-type=tmpfs $indexPath | sed 's@^@#\t@g')"
rfindVersion='0.1'
progName=$0
progPID=$$
for i in {0..9}; do line+=_;done
progDUMP="# $line $progName BEGIN $line
$(cat /proc/${progPID}/fd/255 | sed 's@^@# @g')
# $line $progName END $line"
#strfmt='%f\t%s\t%As\t%Cs\t%Ts\t%p\t%F\t%D\t%y\t%#m\t%n\t%i\t%u\t%U\t%g\t%G\0'
strfmt='%f|%s|%As|%Cs|%Ts|%p|%F|%D|%y|%#m|%n|%i|%u|%U|%g|%G\n'
function main {
{ # create header
echo -e "# rfind rfindVersion=${rfindVersion} date=$(date +%s) ($(date))
# indexHost=$indexHost \n# indexPath=$indexPath \n# DF: \n$df
#
#\n${progDUMP//\\/\\\\}
#
# strfmt='${strfmt//\\/\\\\}'
#";
#find "${indexPath}" -xdev -printf "$strfmt"; } | tee >(gzip > ${saveFile}.gz)
find "${indexPath}" -xdev -printf "$strfmt"; } | gzip > ${saveFile}.gz
}
#time main
main && echo -e "$line$line$line$line\n Indexing root@$indexHost:$indexPath completed @ $(date) \n $line$line$line$line\n\tTRY: zless ${saveFile}.gz" || echo something failed..
exit 0
## ~$ COLUMNS=888 man -Pcat find | \grep -Ei '^\s*%' | sed -e 's/^\s*/# /g' # <---- ___GENERATE THE BELOW REFERENCE___
# %% A literal percent sign.
# %a File’s last access time in the format returned by the C ‘ctime’ function.
# %Ak File’s last access time in the format specified by k, which is either ‘@’ or a directive for the C ‘strftime’ function. The possible values for k are listed below; some of them might not be available on all systems, due to differences in ‘strftime’ between systems.
# %b The amount of disk space used for this file in 512-byte blocks. Since disk space is allocated in multiples of the filesystem block size this is usually greater than %s/512, but it can also be smaller if the file is a sparse file.
# %c File’s last status change time in the format returned by the C ‘ctime’ function.
# %Ck File’s last status change time in the format specified by k, which is the same as for %A.
# %d File’s depth in the directory tree; 0 means the file is a command line argument.
# %D The device number on which the file exists (the st_dev field of struct stat), in decimal.
# %f File’s name with any leading directories removed (only the last element).
# %F Type of the filesystem the file is on; this value can be used for -fstype.
# %g File’s group name, or numeric group ID if the group has no name.
# %G File’s numeric group ID.
# %h Leading directories of file’s name (all but the last element). If the file name contains no slashes (since it is in the current directory) the %h specifier expands to ".".
# %H Command line argument under which file was found.
# %i File’s inode number (in decimal).
# %k The amount of disk space used for this file in 1K blocks. Since disk space is allocated in multiples of the filesystem block size this is usually greater than %s/1024, but it can also be smaller if the file is a sparse file.
# %l Object of symbolic link (empty string if file is not a symbolic link).
# %m File’s permission bits (in octal). This option uses the ‘traditional’ numbers which most Unix implementations use, but if your particular implementation uses an unusual ordering of octal permissions bits, you will see a difference between the actual value of the file’s mode and the output of %m. Normally you will want to have a leading zero on this number, and to do this, you should use the # flag (as in, for example, ‘%#m’).
# %M File’s permissions (in symbolic form, as for ls). This directive is supported in findutils 4.2.5 and later.
# %n Number of hard links to file.
# %p File’s name.
# %P File’s name with the name of the command line argument under which it was found removed.
# %s File’s size in bytes.
# %S File’s sparseness. This is calculated as (BLOCKSIZE*st_blocks / st_size). The exact value you will get for an ordinary file of a certain length is system-dependent. However, normally sparse files will have values less than 1.0, and files which use indirect blocks may have a value which is greater than 1.0. The value used for BLOCKSIZE is system-dependent, but is usually 512 bytes. If the file size is zero, the value printed is undefined. On systems which lack support for st_blocks, a file’s sparseness is assumed to be 1.0.
# %t File’s last modification time in the format returned by the C ‘ctime’ function.
# %Tk File’s last modification time in the format specified by k, which is the same as for %A.
# %u File’s user name, or numeric user ID if the user has no name.
# %U File’s numeric user ID.
# %y File’s type (like in ls -l), U=unknown type (shouldn’t happen)
# %Y File’s type (like %y), plus follow symlinks: L=loop, N=nonexistent
# %Z (SELinux only) file’s security context.
| true
|
1085931edfeb6964ce246e6e8bb28364b6a738ae
|
Shell
|
ninthdrug/doom
|
/script/main/sync_doom_user
|
UTF-8
| 296
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/sh
LIBS=""
for JAR in $DOOMHOME/lib/*.jar
do
LIBS=$LIBS:$JAR
done
exec scala -save -cp "$LIBS" $0 $@
!#
import doom._
import ninthdrug.command._
val userid = prompt(args, 0, "userid")
for (domain <- ConfigCache.domains) {
Sh("sync_domain_user " + domain.name + " " + userid).print
}
| true
|
26bd385630ff3da97b25a5033dfdaef293060126
|
Shell
|
thomas-maurice/promulgator
|
/promulgate.sh
|
UTF-8
| 421
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z "${1}" ]; then
echo "You need to promulgate something !"
exit 1
fi;
convert \
-background '#0000' \
-fill black \
-geometry +665+250 \
-size 210x290 caption:"${1}" \
-rotate -354 \
static/trump.jpg \
+swap \
-composite \
out.jpg
| true
|
5e9ce82f2bd0e3e9591f80913c8f64a13867514f
|
Shell
|
Jeket/dotfiles-3
|
/.bin/notsodeep.load
|
UTF-8
| 1,492
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# https://github.com/farukuzun/notsodeep
# https://home.regit.org/netfilter-en/using-nfqueue-and-libnetfilter_queue/
# https://wiki.nftables.org/wiki-nftables/index.php/Moving_from_iptables_to_nftables
# remove if exist
sudo iptables -D INPUT -p tcp --tcp-flags SYN,ACK SYN,ACK --sport 443 -j NFQUEUE --queue-num 200 --queue-bypass
sudo iptables -t raw -D PREROUTING -p tcp --sport 80 --tcp-flags SYN,ACK SYN,ACK -j NFQUEUE --queue-num 200 --queue-bypass
# install
# sudo iptables -S INPUT
# sudo iptables --table raw --list
sudo iptables -A INPUT -p tcp --tcp-flags SYN,ACK SYN,ACK --sport 443 -j NFQUEUE --queue-num 200 --queue-bypass
sudo iptables -t raw -I PREROUTING -p tcp --sport 80 --tcp-flags SYN,ACK SYN,ACK -j NFQUEUE --queue-num 200 --queue-bypass
# https://wiki.nftables.org/wiki-nftables/index.php/Queueing_to_userspace
# sudo nft add
# Generated by iptables-translate
# nft add rule ip filter INPUT tcp sport 443 tcp flags & (syn|ack) == syn|ack counter queue num 200 bypass
# nft insert rule ip raw PREROUTING tcp sport 80 tcp flags & (syn|ack) == syn|ack counter queue num 200 bypass
# iptables-translate -t raw -I PREROUTING -p tcp --sport 80 --tcp-flags SYN,ACK SYN,ACK -j NFQUEUE --queue-num 200 --queue-bypass
#
# sudo nft add ip input protocol tcp tcp flags { syn, ack } queue num 200 bypass
#
# sudo iptables-translate -A INPUT -p tcp --tcp-flags SYN,ACK SYN,ACK --sport 443 -j NFQUEUE --queue-num 200 --queue-bypass
| true
|
a518d856114cb2a098eac0ddf8870446177d05d6
|
Shell
|
Martiusweb/YaPhpDoc
|
/YaPhpDoc
|
UTF-8
| 381
| 3.4375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Configurable vars
script_name=YaPhpDoc.php
# Where is PHP binary ?
exe=`which php`
if [ ! -x "$exe" ]
then
echo 'Unable to find or run PHP, aborting'
exit 1
fi
# echo 'Found PHP Binary : '$exe
# Where am I ?
script_path=$(dirname `which $0`)
# echo 'Script path is : '$script_path
# Ready !
# echo 'Running '$scriptname
$exe -f $script_path/$script_name -- $@
| true
|
66d0cb51662a285d8027727a9164803b0fdd74b3
|
Shell
|
fr34k8/gentoo-cloud-image-builder
|
/config.sh
|
UTF-8
| 2,793
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
BASE_URL="http://distfiles.gentoo.org/releases/amd64/autobuilds/"
#IMG="gentoo.img"
#ISO="install-amd64-minimal-20150430.iso"
#STAGE=stage3-amd64-20150507.tar.bz2
#PORTAGE=portage-20150511.tar.bz2
#QEMU_MEMORY="512"
#QEMU_NET_TYPE="user"
#KERNEL_CONFIGURE="0"
#KERNEL_MAKE_OPTS=""
#EMERGE_EXTRA_PACKAGES
EMERGE_BASE_PACKAGES="acpid dmidecode syslog-ng cronie dhcpcd mlocate xfsprogs dosfstools grub sudo postfix cloud-init vim gentoo-sources linux-firmware parted portage-utils gentoolkit bash-completion gentoo-bashcomp eix tmux app-misc/screen dev-vcs/git net-misc/curl usbutils pciutils logrotate gptfdisk sys-block/gpart"
source config.cfg &> /dev/null
if [ -z "${ISO}" -o ! -f "iso/${ISO}" ];then
OUTPUT=$(curl "${BASE_URL}latest-install-amd64-minimal.txt" 2> /dev/null)
CURRENT=$(echo "${OUTPUT}" | sed -e 's/#.*$//' -e '/^$/d' | cut -d ' ' -f1)
ISO=$(echo "${CURRENT}" | cut -d '/' -f2)
if [ -f "iso/${ISO}" ];then
:
echo "latest iso ${ISO} already downloaded"
else
:
echo "downloading current iso ${ISO}"
curl -o "iso/${ISO}" "${BASE_URL}${CURRENT}";
fi
fi
if [ -z "${STAGE}" -o ! -f "builder/${STAGE}" ];then
OUTPUT=$(curl "${BASE_URL}latest-stage3-amd64.txt" 2> /dev/null)
CURRENT=$(echo "${OUTPUT}" | sed -e 's/#.*$//' -e '/^$/d' | cut -d ' ' -f1)
STAGE=$(echo "${CURRENT}" | cut -d '/' -f2)
if [ -f "builder/${STAGE}" ];then
:
echo "latest stage ${STAGE} already downloaded"
else
:
echo "downloading current stage ${STAGE}"
curl -o "builder/${STAGE}" "${BASE_URL}${CURRENT}";
fi
fi
if [ -z "${PORTAGE}" ];then
PORTAGE="portage-$(date --date yesterday +%Y%m%d).tar.bz2"
if [ -f "builder/${PORTAGE}" ];then
:
echo "latest portage ${PORTAGE} already downloaded"
else
:
echo "downloading current portage ${PORTAGE}"
curl -o "builder/${PORTAGE}" "http://distfiles.gentoo.org/releases/snapshots/current/${PORTAGE}";
fi
fi
if [ -z "${IMG}" ];then
IMG="gentoo.img"
fi
if [ ! -f "${IMG}" ];then
echo "creating ${IMG} image file"
qemu-img create -f qcow2 "${IMG}" 10G
fi
rm builder/builder.cfg 2> /dev/null
echo "# autogenerated by config.sh" >> builder/builder.cfg
echo "PORTAGE=\"${PORTAGE}\"" >> builder/builder.cfg
echo "STAGE=\"${STAGE}\"" >> builder/builder.cfg
echo "DEV=\"/dev/vda\"" >> builder/builder.cfg
echo "PART=\"/dev/vda1\"" >> builder/builder.cfg
echo "KERNEL_CONFIGURE=\"${KERNEL_CONFIGURE}\"" >> builder/builder.cfg
echo "KERNEL_MAKE_OPTS=\"${KERNEL_MAKE_OPTS}\"" >> builder/builder.cfg
echo "EMERGE_BASE_PACKAGES=\"${EMERGE_BASE_PACKAGES}\"" >> builder/builder.cfg
echo "EMERGE_EXTRA_PACKAGES=\"${EMERGE_EXTRA_PACKAGES}\"" >> builder/builder.cfg
| true
|
cb384033215c9d7c103774a21f945421ea96bfdf
|
Shell
|
Aranjan21/ec2-list
|
/elbservicerestart.sh
|
UTF-8
| 1,424
| 3.390625
| 3
|
[] |
no_license
|
arn=$(aws elbv2 describe-target-groups --names "internal-data-api" --query TargetGroups[].TargetGroupArn --output text)
instances=($(aws elbv2 describe-target-health --target-group-arn "${arn}" --query TargetHealthDescriptions[].Target.Id --output text))
instanceid=${instances[0]}
for instanceid in "${instances[@]}"; do
instanceip=$(aws ec2 describe-instances --instance-ids ${instanceid} --query Reservations[].Instances[].PrivateIpAddress --output text)
instancename=$(aws ec2 describe-tags --filters Name=resource-id,Values=${instanceid} Name=key,Values=Name --query Tags[].Value --output text)
echo "Removing ${instancename} from Target Group"
aws elbv2 deregister-targets --target-group-arn "${arn}" --targets Id=${instanceid}
echo "Waiting for traffic to drain"
sleep 30
# ssh command is written for running within the VPC (from ssh jump host)
# if running from local machine, need ssh through ssh jump host
echo "Restarting data-api-server on ${instancename}"
ssh -A ubuntu@${instanceip} sudo systemctl restart data-api-server
echo "Adding ${instancename} back to Target Group"
aws elbv2 register-targets --target-group-arn "${arn}" --targets Id=${instanceid}
echo "Waiting for ${instancename} healthy status"
aws elbv2 wait target-in-service --target-group-arn "${arn}" --targets Id=${instanceid}
echo "Done with ${instancename}"
done
echo "All done"
| true
|
84f0e8677cb089974d80e134ac60421ac5f79211
|
Shell
|
ncsa/GenomicsCortextVarNextflow
|
/nextflow_scripts/templates/PDVariantCallingInd.sh
|
UTF-8
| 1,725
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
######################################################################################################################################################################################
# Script to run PATH DIVERGENCE Cortex #
######################################################################################################################################################################################
IFS=", "
colorBashString="${color}"
removeFirstPar=\${colorBashString#[}
removeLastPar=\${removeFirstPar/]/}
index=0
declare -a dataArray
for x in \${removeLastPar}
do
dataArray[\$index]=\$x
index=\$((\$index+1))
done
combinationGraph=\${dataArray[0]}
colorList=\${dataArray[1]}
sampleIndex=\${dataArray[2]}
declare -a colorNames
while IFS= read line
do
colorNames+=(\$line)
done <"\$colorList"
sampleNameRaw=\${colorNames[\$sampleIndex]}
sampleNameMedium=\${sampleNameRaw##*/}
prefix="pathToCleaned"
sampleName=\${sampleNameMedium#*\$prefix}
echo "using \${combinationGraph} as combination graph" > ${PDLogDir}/\${sampleName}_PD.log
echo "using \${colorList} as color list" >> ${PDLogDir}/\${sampleName}_PD.log
echo "using \${sampleIndex}" >> ${PDLogDir}/\${sampleName}_PD.log
echo "Now running PD variant calling on \${sampleName}" >> ${PDLogDir}/\${sampleName}_PD.log
# Cortex command
${params.cortexDirVariantCalling} ${params.variantCallingCortexConfig} --max_var_len ${params.maxVarLength} --multicolour_bin \${combinationGraph} --path_divergence_caller \${sampleIndex} --ref_colour 0 --list_ref_fasta ${params.pathToReferenceList} --path_divergence_caller_output ${params.resultsDir}/variantCallingOutput/\${sampleName} --print_colour_coverages >> ${PDLogDir}/\${sampleName}_PD.log
| true
|
bfa1982cbbbaca2128b79cb96a031db7f264e117
|
Shell
|
PrakashYNWA/GO-JEK
|
/ParkingLot/parking_lot.sh
|
UTF-8
| 229
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
chmod 777 parking_lot.sh
PASSED=$1
if [[ -f $PASSED ]]
then
mvn package
java -jar target/ParkingLot-0.0.1-SNAPSHOT.jar file_inputs.txt
else
mvn package
java -jar target/ParkingLot-0.0.1-SNAPSHOT.jar
fi
| true
|
2c5b7d616b8b55f5b05dd51f66bcf40124dc8ac0
|
Shell
|
crails-framework/puppeteer-recipe-wordpress
|
/setup/03_mariadb.sh
|
UTF-8
| 765
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash -ex
source ./variables 2> /dev/null
apt-get install -y mariadb-server
systemctl enable mariadb.service
if [[ -z "$APP_SQL_DATABASE" ]] ; then
export APP_SQL_DATABASE="$INSTANCE_NAME"
fi
if [[ -z "$APP_SQL_USER" ]] ; then
export APP_SQL_USER="$APP_USER"
fi
user_exists=`mysql -e "SELECT COUNT(User) FROM mysql.user WHERE User='$APP_SQL_USER'" | grep 1 || echo "0"`
if [[ $user_exists == 1 ]] ; then
mysql -e "SET PASSWORD FOR '$APP_SQL_USER'@'localhost' = PASSWORD('$APP_SQL_PASSWORD');"
else
mysql -e "CREATE USER '$APP_SQL_USER'@'localhost' IDENTIFIED BY '$APP_SQL_PASSWORD';"
fi
mysql -e "CREATE DATABASE IF NOT EXISTS \`$APP_SQL_DATABASE\`"
mysql -e "GRANT ALL PRIVILEGES ON \`$APP_SQL_DATABASE\`.* TO '$APP_SQL_USER'@'localhost';"
| true
|
54e197c9f63beedf82239143e2a266a69054e845
|
Shell
|
kubedge/kubedge-operator-ansible
|
/manualbuild.sh
|
UTF-8
| 613
| 2.59375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
export BASE_IMAGE=quay.io/water-hole/ansible-operator
export COMPONENT=kubedge-operator-ansible
export VERSION=0.0.1
export DHUBREPO="hack4easy/$COMPONENT-dev"
export DOCKER_NAMESPACE="hack4easy"
export DOCKER_USERNAME="kubedgedevops"
export DOCKER_PASSWORD=$KUBEDGEDEVOPSPWD
# cp $HOME/bin/arpscan .
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
docker build -t $DHUBREPO:v$VERSION --build-arg BASE_IMAGE=${BASE_IMAGE} -f build/Dockerfile .
docker tag $DHUBREPO:v$VERSION $DHUBREPO:latest
docker tag $DHUBREPO:v$VERSION $DHUBREPO:from-kubedgesdk
docker push $DHUBREPO
| true
|
1b5c12172153ada8184ae2d87293513e21788b69
|
Shell
|
chrmorais/device-custom
|
/common/mke2img.sh
|
UTF-8
| 260
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
SRC=$1
DST=$2
SIZE=$(du -h -BM --max-depth=1 $SRC | awk '{print int($1)}')
# echo "create image size=${SIZE}M"
dd if=/dev/zero of=$DST bs=1M count=$SIZE >/dev/null 2>&1
mke2fs -F -t ext2 $DST >/dev/null 2>&1
mke2fs -F -d $SRC $DST >/dev/null 2>&1
| true
|
78db1410ab5cdb9dcab3ffdfc0fb03e05c2d6281
|
Shell
|
ViBiOh/scripts
|
/release
|
UTF-8
| 8,428
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -o nounset -o pipefail -o errexit
if [[ ${TRACE:-0} == "1" ]]; then
set -o xtrace
fi
release_clean() {
var_info "Cleaning ${OUTPUT_DIR}"
rm -rf "${OUTPUT_DIR:?}"
mkdir "${OUTPUT_DIR}"
}
golang_build() {
if ! command -v go >/dev/null 2>&1; then
var_error "go not found"
return 1
fi
local SOURCE_DIR
SOURCE_DIR="${ROOT_DIR}/..."
(
cd "${ROOT_DIR}"
local OUTPUT_BINARIES
mapfile -t OUTPUT_BINARIES < <(go list -f '{{ .Dir }} {{ .Name }}' "${SOURCE_DIR}" | grep "main" | awk '{print $1}')
local GO_ARCHS="${GO_ARCHS:-linux/amd64 linux/arm linux/arm64 darwin/amd64 darwin/arm64 windows/amd64 windows/arm64}"
for main in "${OUTPUT_BINARIES[@]}"; do
local EXTENSION
local NAME
NAME="$(basename "${main}")"
local LDFLAGS="-s -w"
if [[ -n ${GIT_TAG-} ]] && [[ -n ${GO_VERSION_PATH-} ]]; then
LDFLAGS+=" -X '${GO_VERSION_PATH}=${GIT_TAG}'"
fi
for OS_ARCH in ${GO_ARCHS[*]}; do
local BUILD_GOOS
BUILD_GOOS="$(printf '%s' "${OS_ARCH}" | awk -F '/' '{ print $1 }')"
local BUILD_GOARCH
BUILD_GOARCH="$(printf '%s' "${OS_ARCH}" | awk -F '/' '{ print $2 }')"
(
export GOOS="${BUILD_GOOS}"
export GOARCH="${BUILD_GOARCH}"
export CGO_ENABLED="0"
if [[ ${GOOS} == "windows" ]]; then
EXTENSION=".exe"
else
EXTENSION=""
fi
var_info "Building binary ${NAME}_${GOOS}_${GOARCH} to ${OUTPUT_DIR}"
go build "-ldflags=${LDFLAGS}" -installsuffix nocgo -o "${OUTPUT_DIR}/${NAME}_${GOOS}_${GOARCH}${EXTENSION}" "${main}"
if [[ -n ${GPG_FINGERPRINT-} ]]; then
gpg --no-tty --batch --detach-sign --armor --local-user "${GPG_FINGERPRINT}" "${OUTPUT_DIR}/${NAME}_${GOOS}_${GOARCH}${EXTENSION}"
fi
)
done
done
)
}
docker_dependencies() {
docker run -v "$(pwd):/tmp/" --rm "alpine" /bin/sh -c 'apk --update add tzdata ca-certificates zip && cd /usr/share/zoneinfo/ && zip -q -r -0 /tmp/zoneinfo.zip . && cp /etc/ssl/certs/ca-certificates.crt /tmp/ca-certificates.crt'
if [[ ${RELEASE_NEED_WAIT-} == "true" ]]; then
local WAIT_VERSION="0.0.2"
for platform in ${DOCKER_ARCHS:-linux/amd64 linux/arm linux/arm64}; do
local BUILD_GOOS
BUILD_GOOS="$(printf '%s' "${platform}" | awk -F '/' '{ print $1 }')"
local BUILD_GOARCH
BUILD_GOARCH="$(printf '%s' "${platform}" | awk -F '/' '{ print $2 }')"
local WAIT_BINARY_NAME="wait_${BUILD_GOOS}_${BUILD_GOARCH}"
curl \
--disable \
--silent \
--show-error \
--location \
--max-time 300 \
--output "${WAIT_BINARY_NAME}" \
"https://github.com/ViBiOh/wait/releases/download/v${WAIT_VERSION}/${WAIT_BINARY_NAME}"
chmod +x "${WAIT_BINARY_NAME}"
done
fi
}
docker_build() {
if ! command -v docker >/dev/null 2>&1; then
var_error "docker not found"
return 1
fi
local DOCKER_PLATFORMS
DOCKER_PLATFORMS="${DOCKER_ARCHS:-linux/amd64 linux/arm linux/arm64}"
DOCKER_PLATFORMS="${DOCKER_PLATFORMS// /,}"
local BUILT_IMAGE="${DOCKER_IMAGE}:${IMAGE_VERSION}"
var_read DOCKER_IMAGE
var_read IMAGE_VERSION
var_read DOCKERFILE "Dockerfile"
export DOCKER_CLI_EXPERIMENTAL="enabled"
export DOCKER_BUILDKIT="1"
var_info "Building and pushing image ${BUILT_IMAGE} for ${DOCKER_PLATFORMS}"
docker buildx build \
--push \
--platform "${DOCKER_PLATFORMS}" \
--file "${DOCKERFILE}" \
--tag "${BUILT_IMAGE}" \
--build-arg "VERSION=${IMAGE_VERSION}" \
-o "type=registry,oci-mediatypes=true,compression=estargz,force-compression=true" \
.
}
release() {
if ! git_is_inside; then
var_warning "not inside a git tree"
return 1
fi
var_info "Identifying semver"
local LAST_TAG
LAST_TAG="$(git_last_tag)"
local VERSION_REF
local PREVIOUS_REF
if [[ -n ${LAST_TAG-} ]]; then
VERSION_REF="$(git log --no-merges --invert-grep --grep "\[skip ci\] Automated" --color --pretty=format:'%Cred%h%Creset%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' "HEAD...${LAST_TAG}" | fzf --height=20 --ansi --reverse | awk '{printf("%s", $1)}')"
var_read PREVIOUS_REF "$(git tag --sort=-creatordate | grep --invert-match "${VERSION_REF}" | head -1)"
else
PREVIOUS_REF="HEAD^1"
VERSION_REF="HEAD"
fi
local CHANGELOG
CHANGELOG=$(git_changelog "${VERSION_REF}" "${PREVIOUS_REF}")
printf "%bCHANGELOG:%b\n\n%s%b\n\n" "${YELLOW}" "${GREEN}" "${CHANGELOG}" "${RESET}"
local VERSION_TYPE="patch"
if [[ ${CHANGELOG} =~ \#\ BREAKING\ CHANGES ]]; then
VERSION_TYPE="major"
elif [[ ${CHANGELOG} =~ \#\ Features ]]; then
VERSION_TYPE="minor"
fi
printf "%bRelease seems to be a %b%s%b\n" "${BLUE}" "${YELLOW}" "${VERSION_TYPE}" "${RESET}"
var_info "Specify explicit git tag or major|minor|patch for semver increment"
local VERSION
VERSION="$(printf "%bpatch\n%bminor\n%bmajor" "${GREEN}" "${YELLOW}" "${RED}" | fzf --height=20 --ansi --reverse)"
local GIT_TAG
GIT_TAG="$(version_semver "${VERSION}" "${VERSION_REF}" "quiet")"
local GITHUB_TOKEN
GITHUB_TOKEN="$(github_token)"
var_read GITHUB_TOKEN "" "secret"
var_read GITHUB_REPOSITORY "$(git_remote_repository)"
var_read RELEASE_NAME "${GIT_TAG}"
var_info "Creating release ${RELEASE_NAME} for ${GITHUB_REPOSITORY}..."
http_init_client --header "Authorization: token ${GITHUB_TOKEN}"
HTTP_CLIENT_ARGS+=("--max-time" "120")
local PAYLOAD
PAYLOAD="$(jq --compact-output --null-input \
--arg tag "${RELEASE_NAME}" \
--arg target "$(git rev-parse "${VERSION_REF}")" \
--arg name "${RELEASE_NAME}" \
--arg body "${CHANGELOG}" \
'{tag_name: $tag, target_commitish: $target, name: $name, body: $body}')"
http_request --header "Content-Type: application/json" --request "POST" "https://api.github.com/repos/${GITHUB_REPOSITORY}/releases" --data "${PAYLOAD}"
if [[ ${HTTP_STATUS} != "201" ]]; then
http_handle_error "Unable to create release"
return 1
fi
rm "${HTTP_OUTPUT}"
var_success "${GITHUB_REPOSITORY}@${RELEASE_NAME} created!"
}
release_upload() {
if ! [[ -d ${OUTPUT_DIR} ]]; then
var_warning "Nothing to upload!"
fi
http_init_client --header "Authorization: token ${GITHUB_TOKEN}"
HTTP_CLIENT_ARGS+=("--max-time" "120")
var_read GIT_TAG
http_request --header "Content-Type: application/json" "https://api.github.com/repos/${GITHUB_REPOSITORY}/releases/tags/${GIT_TAG}"
if [[ ${HTTP_STATUS} != "200" ]]; then
http_handle_error "Unable to get release"
return 1
fi
local RESPONSE_URL
RESPONSE_URL="$(jq --raw-output .upload_url "${HTTP_OUTPUT}" | sed 's|{.*||')"
rm "${HTTP_OUTPUT}"
for asset in "${OUTPUT_DIR}"/*; do
var_info "Uploading asset ${asset}"
http_request --header "Content-Type: application/x-executable" --request POST "${RESPONSE_URL}?name=$(basename "${asset}")" --data-binary "@${asset}"
if [[ ${HTTP_STATUS} != "201" ]]; then
http_handle_error "Unable to upload asset ${asset}"
return 1
fi
rm "${HTTP_OUTPUT}"
done
}
release_usage() {
printf "Usage of %s\n" "${0}"
printf "clean\n\tClean output dir %s\n" "${OUTPUT_DIR}"
printf "build\n\tBuild artifacts\n"
printf "docker\n\tBuild docker images\n"
printf "release\n\tCreate GitHub release\n"
printf "assets\n\tUpload output dir content to GitHub release\n"
printf "clea,\n\tClean created output directory\n"
}
script_dir() {
local FILE_SOURCE="${BASH_SOURCE[0]}"
if [[ -L ${FILE_SOURCE} ]]; then
dirname "$(readlink "${FILE_SOURCE}")"
else
(
cd "$(dirname "${FILE_SOURCE}")" && pwd
)
fi
}
main() {
source "$(script_dir)/meta" && meta_check "var" "git" "github" "http" "pass" "version"
local ROOT_DIR
ROOT_DIR="$(git_root)"
local OUTPUT_DIR="${ROOT_DIR}/release"
for arg in "${@}"; do
case "${arg}" in
"build")
release_clean
if [[ -f "${ROOT_DIR}/go.mod" ]]; then
golang_build
fi
;;
"docker")
docker_dependencies
docker_build
;;
"release")
release
;;
"assets")
release_upload
;;
"clean")
release_clean
;;
*)
release_usage
;;
esac
done
}
DEFAULT_ARGS=("release" "build" "assets" "clean")
main "${@:-${DEFAULT_ARGS[@]}}"
| true
|
2b62a53b15dbc4226cff90efe25d03d476700987
|
Shell
|
jwrona/ARC
|
/2proj/Scripts/calc.sh
|
UTF-8
| 584
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
IN_FILE=$1
SEQ_T=0
while read LINE
do
N=`echo "${LINE}" |cut "-d;" -f 1`
SIZE=`echo "${LINE}" |cut "-d;" -f 2`
TIME_SCI=`echo "${LINE}" | cut "-d;" -f 10`
T=`echo "${TIME_SCI}" | sed -e 's/[eE]+*/\\*10\\^/'`
if [ $N -eq 1 ]
then #line with sequential results
echo $SIZE
SEQ_T=$T
#continue
fi
S=`echo "(${SEQ_T})/(${T})" | bc -l`
E=`echo "(${S})/(${N})" | bc -l`
ALFA=`echo "(${T} * ${N} - ${SEQ_T})/(${SEQ_T} * (${N} - 1))" | bc -l`
echo -e "N = ${N}\tT(N) = ${T}\tS = ${S}\tE = ${E}\tALFA = ${ALFA}"
done < "${IN_FILE}"
echo
| true
|
b4e4c2b6736b3d263f48da55a6d147786f8b128f
|
Shell
|
lingling1420q/kubernet-application
|
/src/bash/install.sh
|
UTF-8
| 1,399
| 2.53125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
sudo yum remove -y docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
sudo yum install -y yum-utils
sudo yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
sudo yum-config-manager --enable docker-ce-nightly
sudo yum install -y docker-ce docker-ce-cli containerd.io
# file日志模式,方便日志采集
sudo mkdir -p /etc/docker && sudo bash -c "echo '{"log-driver": "json-file","log-opts": {"max-size": "100m","max-file": "3"}' > /etc/docker/daemon.json"
sudo systemctl start docker
# 使用kubeadm安装k8s
sudo bash -c "cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF"
# 将 SELinux 设置为 permissive 模式(相当于将其禁用)
setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
sudo systemctl enable --now kubelet
| true
|
8029f7b4c6cce2a39a02904414ee26da1f2dd19b
|
Shell
|
X-DataInitiative/tick
|
/sh/build_benchmarks.sh
|
UTF-8
| 1,062
| 3.46875
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -ex
CWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $CWD/..
ROOT=$PWD
N_CORES=${N_CORES:=1}
CMAKE_DIR="${ROOT}/lib"
mkdir -p $ROOT/build/bench && cd $ROOT/build/bench
rm -rf build_noopt && mkdir -p build_noopt
echo ${CMAKE_DIR}
builds=( build_noopt )
printf "\nNo optimization build\n"
(cd build_noopt && \
cmake VERBOSE=1 -DBENCHMARK=ON -DCMAKE_BUILD_TYPE=Release ${CMAKE_DIR})
if [ -z "$SKIP_BLAS" ]; then
printf "\nBLAS build\n"
rm -rf build_blas && mkdir -p build_blas
(cd build_blas && \
cmake VERBOSE=1 -DBENCHMARK=ON -DUSE_BLAS=ON -DCMAKE_BUILD_TYPE=Release ${CMAKE_DIR})
builds+=(build_blas)
fi
if [ -z "$SKIP_MKL" ]; then
printf "\nMKL build\n"
rm -rf build_mkl && mkdir -p build_mkl
(cd build_mkl && \
cmake VERBOSE=1 -DBENCHMARK=ON -DUSE_MKL=ON -DCMAKE_BUILD_TYPE=Release ${CMAKE_DIR})
builds+=(build_mkl)
fi
mkdir -p $ROOT/build/bench && cd $ROOT/build/bench
# Use only one core if no argument is provided
for d in "${builds[@]}" ; do (cd ${d} && make V=1 -j${N_CORES} VERBOSE=1); done
| true
|
9ef553e6c793b6fa2511a90584e4e20b4fa15909
|
Shell
|
QAInsights/Istio-Tracing
|
/milestone2/test.sh
|
UTF-8
| 716
| 3.140625
| 3
|
[] |
no_license
|
BASEDIR=$(dirname "$0")
kubectl apply -f $BASEDIR/k8s
kubectl apply -f $BASEDIR/istio
echo "Wait 20 seconds until k8s deployments are ready ..."
n=1
while [ $n -le 20 ]
do
sleep 1
echo "$n seconds"
(( n++ ))
done
echo "Forward port to localhost, if it fails, please wait until all pods are ready and run the following commands manually ..."
kill `ps -ef|grep "kubectl port-forward"|awk '{print $2}'`
kubectl port-forward service/istio-ingressgateway 8080:80 -n istio-system &
echo "Wait 10 seconds until port forwarding is ready ..."
sleep 10
echo "Call the eShop microservice, if it fails, please wait a moment and run the following commands manually ..."
curl 127.0.0.1:8080/checkout
| true
|
b5affde76d09e51d14d4c10b3389dcf39bd33c5a
|
Shell
|
carymrobbins/dotfiles
|
/mac/bin/bs
|
UTF-8
| 810
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
usage() {
echo "Usage: $0 <action> <service>"
}
case "$#" in
1)
case "$1" in
l)
brew services list
exit
;;
*)
>&2 usage
exit 1
;;
esac
;;
2)
action="$1"
case "$action" in
e) action=start;;
d) action=stop;;
r) action="stop start";; # seems to work better than restart
s) action=status;;
esac
service="$2"
case "$service" in
chunkwm) service=crisidev/chunkwm/chunkwm;;
khd) service=koekeishiya/formulae/khd;;
mongo) service=mongodb26;;
cass) service=cassandra@2.1;;
esac
for a in $action; do
(
set -x
exec brew services "$a" "$service"
)
done
exit
;;
*)
>&2 usage
exit 1
;;
esac
| true
|
e13d1b1f4e960e30cf8214060ae8790ccd1f77dd
|
Shell
|
priyankaraghavan/ZapPipelineADO
|
/scripts/runzapSwagger.sh
|
UTF-8
| 1,925
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Run Docker image of zap
IMAGE="owasp/zap2docker-weekly"
ROOTPATH=$1
JSON=$2
AUTHURL=$3
REPORTNAME=$4
CLIENTID=$5
CLIENTSECRET=$6
SCOPE=$7
USERID=$8
EMAIL=$9
#
echo "ROOTPATH"
echo "$ROOTPATH"
#
chmod 777 *.*
chmod 777 -R $ROOTPATH
authtoken=$(python3 ${ROOTPATH}/scripts/Authtoken.py ${AUTHURL} ${CLIENTID} ${CLIENTSECRET} ${SCOPE})
#ls -l
echo "$pwd"
echo "START RUNNING ZAP"
rm auth.prop
echo "----> Building prop file:"
echo "replacer.full_list(0).description=auth1" >> auth.prop
echo "replacer.full_list(0).enabled=true" >> auth.prop
echo "replacer.full_list(0).matchtype=REQ_HEADER" >> auth.prop
echo "replacer.full_list(0).matchstr=Authorization" >> auth.prop
echo "replacer.full_list(0).regex=false" >> auth.prop
echo "replacer.full_list(0).replacement=Bearer $authtoken" >> auth.prop
echo "formhandler.fields.field\\(0\\).fieldId=userId" >> auth.prop
echo "formhandler.fields.field\\(0\\).value=${USERID}" >> auth.prop
echo "formhandler.fields.field\\(0\\).enabled=true" >>auth.prop
echo "formhandler.fields.field\\(1\\).fieldId=includeCountryCode" >> auth.prop
echo "formhandler.fields.field\\(1\\).value=true" >> auth.prop
echo "formhandler.fields.field\\(1\\).enabled=true" >> auth.prop
echo "formhandler.fields.field\\(2\\).fieldId=includeFunctionCode" >> auth.prop
echo "formhandler.fields.field\\(2\\).value=true" >> auth.prop
echo "formhandler.fields.field\\(2\\).enabled=true" >> auth.prop
echo "formhandler.fields.field\\(3\\).fieldId=email" >> auth.prop
echo "formhandler.fields.field\\(3\\).value=${EMAIL}" >> auth.prop
echo "formhandler.fields.field\\(3\\).enabled=true" >> auth.prop
docker pull ${IMAGE}
sudo docker run -v ${ROOTPATH}:/zap/wrk/:rw -t ${IMAGE} zap-api-scan.py \
-t ${JSON} -f openapi -r ${REPORTNAME} -z "-configfile /zap/wrk/auth.prop" -d
echo "ls -l"
ls-l
cp ${ROOTPATH}/${REPORTNAME} ${ROOTPATH}/reports/${REPORTNAME}
echo "DONE"
| true
|
753eff0c4d90f60256a0a2b1517d6840f335acfb
|
Shell
|
psranga/libeli5
|
/elido/tests/backtick1.sh
|
UTF-8
| 662
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set -x
test -n "$ELIDO"
actual=$(seq 1 4 | $ELIDO echo '%readable_num(X, 5, 2)%' | tr "\n" " ")
expected=" 1.00 2.00 3.00 4.00 "
test "$actual" = "$expected"
actual=$(seq 1 4 | $ELIDO --varname=Y echo '%readable_num(Y, 5, 2)%' | tr "\n" " ")
expected=" 1.00 2.00 3.00 4.00 "
test "$actual" = "$expected"
# Errors out saying Z not found.
actual=$(seq 1 4 | $ELIDO --varname=Z echo '%readable_num(Y, 5, 2)%' || echo notok)
expected="notok"
test "$actual" = "$expected"
actual=$(seq 1 4 | $ELIDO echo '"%readable_num(X, 5, 2)%"' | tr "\n" " ")
expected='" 1.00" " 2.00" " 3.00" " 4.00" '
test "$actual" = "$expected"
echo "All done"
| true
|
56b289b50328ed9bd2bafb2935efc0641cec0962
|
Shell
|
santosomar/osquery
|
/kernel/tools/unload_with_retry.sh
|
UTF-8
| 441
| 3.375
| 3
|
[
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
KERNEL_EXTENSION_IDENTIFIER="com.facebook.security.osquery"
if kextstat | grep -qcm1 $KERNEL_EXTENSION_IDENTIFIER; then
tries=5
n=0
until [ $n -ge $tries ]; do
kextunload -b $KERNEL_EXTENSION_IDENTIFIER && break
n=$[$n+1]
sleep 1 # We need to know the daemon has stopped for long enough for the
# kernel extension to allow unloading.
done
if [ $n -ge $tries ]; then
exit 1
fi
fi
| true
|
eebe1ca6cba4fec30df365b2b59c7ada1ded9afa
|
Shell
|
aasplund/iot-lab
|
/deploy-all.sh
|
UTF-8
| 1,243
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function note() {
local GREEN NC
GREEN='\033[0;32m'
NC='\033[0m' # No Color
printf "\n${GREEN}$@ ${NC}\n" >&2
}
note "Login to aws..."
eval "$(aws ecr get-login --no-include-email --region eu-west-1)";
note "Tag and push lightbulb-api..."
docker tag lightbulb-api "$(aws ecr describe-repositories --repository-names=lightbulb-api --output json | jq -r '.repositories[0].repositoryUri'):latest"
docker push "$(aws ecr describe-repositories --repository-names=lightbulb-api --output json | jq -r '.repositories[0].repositoryUri'):latest"
note "Tag and push lightbulb-spa..."
docker tag lightbulb-spa "$(aws ecr describe-repositories --repository-names=lightbulb-spa --output json | jq -r '.repositories[0].repositoryUri'):latest"
docker push "$(aws ecr describe-repositories --repository-names=lightbulb-spa --output json | jq -r '.repositories[0].repositoryUri'):latest"
note "Undeploy lightbulb-api..."
kubectl delete pod lightbulb-api.gunnebo.se
note "Undeploy lightbulb-spa..."
kubectl delete pod lightbulb-spa.gunnebo.se
sleep 5
note "Redeploy lightbulb-api..."
kubectl create -f lightbulb-api/lightbulb-api.yml
note "Redeploy lightbulb-spa..."
kubectl create -f lightbulb-spa/lightbulb-spa.yml
| true
|
db10f2d984188d0dfa9eb413db51784b780f8154
|
Shell
|
OpenIndiana/oi-userland
|
/components/developer/golang-120/bootstrap_get.sh
|
UTF-8
| 293
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/bash
GOLANG_ARCHIVE="go1.19.5.illumos-amd64.tar.gz"
if ! [ -f "${GOLANG_ARCHIVE}" ]; then
wget "https://illumos.org/downloads/${GOLANG_ARCHIVE}" -O ${GOLANG_ARCHIVE}
fi
mkdir -p go_bootstrap
if ! [ -f "go_bootstrap/bin" ]; then
gtar -C go_bootstrap -xzf ${GOLANG_ARCHIVE}
fi
| true
|
d6339638ec995e40fbad06af69b60910f86450cb
|
Shell
|
petronny/aur3-mirror
|
/cedet-bzr/PKGBUILD
|
UTF-8
| 1,372
| 2.984375
| 3
|
[] |
no_license
|
# Contributor: Jed Brown <jed@59A2.org>
pkgname=cedet-bzr
pkgver=8450
pkgrel=1
pkgdesc="Collection of Emacs Development Enviromnent Tools (Bazaar version)"
arch=('any')
url="http://cedet.sourceforge.net/"
license=('GPL')
conflicts=('cedet' 'cedet-cvs')
provides=('cedet')
depends=('emacs')
makedepends=('bzr')
source=('cedet-bzr.install')
md5sums=('0468130bff9ea8d48fe4c7ec996f3686')
install=$pkgname.install
_bzrtrunk='bzr://cedet.bzr.sourceforge.net/bzrroot/cedet/code/trunk'
_bzrmod=cedet
build() {
cd "$srcdir"
msg "Connecting to BZR server...."
if [ -d $_bzrmod/.bzr ]; then
(cd $_bzrmod && bzr update -v)
msg "BZR update done or server timeout"
else
bzr checkout --lightweight $_bzrtrunk $_bzrmod
msg "BZR checkout done or server timeout"
fi
msg "Starting build ..."
rm -rf ${_bzrmod}-build
cp -a ${_bzrmod} ${_bzrmod}-build
cd ${_bzrmod}-build
unset MAKEFLAGS
make
}
package() {
cd "$srcdir/${_bzrmod}-build"
install -d $pkgdir/usr/share/emacs/site-lisp/cedet
install cedet-devel-load.el cedet-remove-builtin.el $pkgdir/usr/share/emacs/site-lisp/cedet
cp -a lisp contrib $pkgdir/usr/share/emacs/site-lisp/cedet
# http://sourceforge.net/tracker/index.php?func=detail&aid=3585232&group_id=17886&atid=117886
touch $pkgdir/usr/share/emacs/site-lisp/cedet/.nosearch
make INFODIR=$pkgdir/usr/share/info install-info
}
| true
|
22b75aba8ab4765f7c63cd3c6348e519e4dfd4e3
|
Shell
|
bangpound/ci-php-lint
|
/src/lint_php.sh
|
UTF-8
| 539
| 3.875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
function lint_php() {
if [ -z "${PHP_BIN:-}" ]; then
PHP_BIN=$(which php)
fi
if [ ! -x "${PHP_BIN}" ] || [ ! -f "${PHP_BIN}" ]; then
echo "PHP executable ${PHP_BIN} does not exist."
return 1
fi
output=$($PHP_BIN --no-php-ini --syntax-check -ddisplay_errors\=1 -derror_reporting\=E_ALL -dlog_errors\=0 2>&1 <&0)
status=$?
echo "$output"
return $status
}
if [ "${BASH_SOURCE[0]}" != "${0}" ]; then
export -f lint_php
else
lint_php "${@}"
exit ${?}
fi
| true
|
ab8729de661868cefef7d1b4a2672dec4bed0ad3
|
Shell
|
entryword/react-project
|
/build_script/frontendBuild.sh
|
UTF-8
| 678
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/sh
cd /var/www/html/website2018/
## 1. build
echo "[Pyladies] build web..."
cd frontend_react && yarn run build && cd ..
## 2. clean
echo "[Pyladies] clean..."
if [ -d ./frontend/eventlist/static ]
then
rm -rf ./frontend/eventlist/static/*
else
mkdir ./frontend/eventlist/static
fi
## 3. move
echo "[Pyladies] move..."
mv ./frontend_react/build/static/* ./frontend/eventlist/static/
mv ./frontend/eventlist/static/css/main.*.chunk.css ./frontend/eventlist/static/css/main.css
mv ./frontend/eventlist/static/js/1.*.chunk.js ./frontend/eventlist/static/js/chunk.js
mv ./frontend/eventlist/static/js/main.*.chunk.js ./frontend/eventlist/static/js/main.chunk.js
| true
|
d4fdfcaecb056be774191a3c1413941e7dd20f13
|
Shell
|
alectramell/chnet
|
/chnet.sh
|
UTF-8
| 2,529
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
clear
bash speed.sh
clear
paper=$(tput smso)
bold=$(tput bold)
black=$(tput setaf 0)
white=$(tput setaf 7)
red=$(tput setaf 1)
green=$(tput setaf 2)
blue=$(tput setaf 6)
sky=$(tput setaf 153)
gold=$(tput setaf 3)
yellow=$(tput setaf 190)
purple=$(tput setaf 5)
reset=$(tput sgr0)
IPADDR=$(curl -s ipinfo.io/ip)
REGION=$(curl -s ipinfo.io/region)
LOCATE=$(curl -s ipinfo.io/loc)
CHSTAT=$(curl -s https://raw.githubusercontent.com/alectramell/chnet/master/netcode)
mv speedstats.txt? speedstats.txt
if [ "$CHSTAT" == "76656761" ]
then
echo "Your Connection to the WORLD-WIDE-WEB is ACTIVE" > ../$(date +%d-%m-%Y-%S.netlog)
echo "Your Network-Sub-IP-Address is directed to you by $IPADDR" >> ../$(date +%d-%m-%Y-%S.netlog)
echo "Your Network-Speed was measured at $(cat speedstats.txt) kbps (kilobytes per second)" >> ../$(date +%d-%m-%Y-%S.netlog)
echo "Your Network-Region is located in $REGION" >> ../$(date +%d-%m-%Y-%S.netlog)
echo "Your Network-Base-Coordinates are $LOCATE" >> ../$(date +%d-%m-%Y-%S.netlog)
echo "${bold}${black}Your ${white}Connection${black} to the ${gold}world-wide-web${black} is ${green}ACTIVE${reset}"
echo "${bold}${black}Your ${white}Network-Sub-IP-Address${black} is directed to you by ${green}$IPADDR${reset}"
echo "${bold}${black}Your ${white}Network-Speed${black} was measured at ${green}$(cat speedstats.txt) kbps ${white}(kilobytes per second)${reset}"
echo "${bold}${black}Your ${white}Network-Region${black} is located in ${gold}$REGION${reset}"
echo "${bold}${black}Your ${white}Network-Base-Coordinates${black} are ${green}$LOCATE${reset}"
echo "${bold}${black}Press ${red}[ANY-KEY] ${black}to exit..${reset}"
sleep 2.5
else
echo "Your Connection to the WORLD-WIDE-WEB is INACTIVE" > ../$(date +%d-%m-%Y-%S.netlog)
echo "Your Network-Sub-IP-Address is UNKNOWN" >> ../$(date +%d-%m-%Y-%S.netlog)
echo "Your Network-Speed was measured at 0.00 kbps (kilobytes per second)" >> ../$(date +%d-%m-%Y-%S.netlog)
echo "Your Network-Region is UNKNOWN" >> ../$(date +%d-%m-%Y-%S.netlog)
echo "Your Network-Base-Coordinates are UNKNOWN" >> ../$(date +%d-%m-%Y-%S.netlog)
echo "${bold}${black}Your ${white}connection${black} to the ${gold}world-wide-web${black} is ${red}INACTIVE${reset}"
echo "${bold}${black}Your ${white}Network-Speed${black} was measured at ${red}0.00 kbps ${white}(kilobytes per second)${reset}"
echo "${bold}${black}Press ${red}[ANY-KEY] ${black}to exit..${reset}"
sleep 2.5
fi
read -s -n1
clear
rm speedstats.txt
rm speeder.tmp
clear
| true
|
dbf4d41c4730f36b5095d87c87378199d6003d4d
|
Shell
|
GSI-CS-CO/tmd
|
/scripts/feed_dashboards/push-common.sh
|
UTF-8
| 294
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# Common functions used by the push scripts.
# Convert WR sync status to numeric value
# expected WR sync status: TRACKING, NO SYNC
get_sync_numeric()
{
local num_val=0
case $1 in
"TRACKING") num_val=100;;
"NO SYNC") num_val=20;;
*) num_val=0;;
esac
echo $num_val
}
| true
|
66819981b451ab0d004c6c5d4e5c8a7756bb1306
|
Shell
|
jiribroulik/scripts
|
/master_config_offline.sh
|
UTF-8
| 2,741
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash -xe
export SALT_MASTER_DEPLOY_IP=172.16.164.15
export SALT_MASTER_MINION_ID=cfg01.deploy-name.local
export DEPLOY_NETWORK_GW=172.16.164.1
export DEPLOY_NETWORK_NETMASK=255.255.255.192
export DNS_SERVERS=8.8.8.8
export CICD_CONTROL_ADDRESS=172.16.174.90
export INFRA_CONFIG_ADDRESS=172.16.174.15
echo "Configuring network interfaces"
echo "127.0.0.1 cfg cfg01" >> /etc/hosts
envsubst < /root/interfaces > /etc/network/interfaces
ifdown ens3; ifup ens3
echo "Preparing metadata model"
mount /dev/cdrom /mnt/
cp -r /mnt/model/model11Stacklight/* /srv/salt/reclass/
chown root:root /srv/salt/reclass/*
chmod -R 644 /srv/salt/reclass/classes/cluster/*
chmod -R 644 /srv/salt/reclass/classes/system/*
echo "updating gerrit repos"
cp -r /mnt/mk-pipelines/* /srv/glusterfs/gerrit/git/mk/mk-pipelines.git/
cp -r /mnt/decapod-pipelines/* /srv/glusterfs/gerrit/git/mk/decapod-pipelines.git/
cp -r /mnt/pipeline-library/* /srv/glusterfs/gerrit/git/mcp-ci/pipeline-library.git/
umount /dev/cdrom
cd /srv/salt/reclass/classes/service
ln -s /usr/share/salt-formulas/reclass/service/devops_portal
echo "Configuring salt"
#service salt-master restart
envsubst < /root/minion.conf > /etc/salt/minion.d/minion.conf
service salt-minion restart
while true; do
salt-key | grep "$SALT_MASTER_MINION_ID" && break
sleep 5
done
sleep 5
for i in `salt-key -l accepted | grep -v Accepted | grep -v "$SALT_MASTER_MINION_ID"`; do
salt-key -d $i -y
done
# replace IPs
if [ $CICD_CONTROL_ADDRESS != "10.167.4.90" ] ; then
systemctl stop docker
find /etc/docker/compose/* -type f -print0 | xargs -0 sed -i -e 's/10.167.4.90/'$CICD_CONTROL_ADDRESS'/g'
fi
if [ $INFRA_CONFIG_ADDRESS != "10.167.4.15" ] ; then
systemctl stop docker
find /etc/docker/compose/* -type f -print0 | xargs -0 sed -i -e 's/10.167.4.15/'$INFRA_CONFIG_ADDRESS'/g'
fi
# set proxy for gerrit, jenkins, aptly
find /etc/docker/compose/* -type f -print0 | xargs -0 sed -i -e 's/10.20.0.1/'$SALT_MASTER_DEPLOY_IP'/g'
# update jenkins repos
rm -rf /srv/glusterfs/jenkins/workspace/git-mirror-downstream-*
rm /srv/glusterfs/jenkins/.ssh/known_hosts
# update gerrit
systemctl status docker | grep inactive >/dev/null
RC=$?
if [ $RC -eq 0 ] ; then
systemctl start docker
cd /etc/docker/compose/gerrit/
docker stack deploy --compose-file docker-compose.yml gerrit
cd /etc/docker/compose/jenkins/
docker stack deploy --compose-file docker-compose.yml jenkins
cd /etc/docker/compose/aptly/
docker stack deploy --compose-file docker-compose.yml aptly
fi
salt-call saltutil.refresh_pillar
salt-call saltutil.sync_all
salt-call state.sls linux,openssh,salt
salt-call state.sls maas.cluster,maas.region,keepalived,haproxy,reclass
reboot
| true
|
640ff31adcac1c86ee4472b75cf070e79019bc82
|
Shell
|
Intrinsarc/intrinsarc-evolve
|
/LTSA-Uni-SB/ltsa/dist/run.sh
|
UTF-8
| 404
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# modify PATH (e.g. for "dot")
#PATH=/opt/graphviz/bin:$PATH
# set another java path
JAVA="java"
#JAVA="/opt/sun-jdk-1.6.0.05/bin/java"
# the *maximal* amount of memory to use (must not exceed system memory)
MEMORY=1024m
############# do not change anything below this line ########################
DIR=${0%/*}
if [[ -z "$DIR" ]]; then DIR="."; fi
$JAVA -Xmx$MEMORY -jar $DIR/LTSA.jar
| true
|
bfd6858c6ac51e1ec1236a0f83c5ce452c621421
|
Shell
|
justomat/osx
|
/osx-packages.sh
|
UTF-8
| 1,361
| 2.515625
| 3
|
[] |
no_license
|
# install Homebrew
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# get homebrew cask
brew install caskroom/cask/brew-cask
brew tap homebrew/versions
brew tap homebrew/dupes
brew tap homebrew/php
brew tap caskroom/versions
brew tap caskroom/fonts
binaries=(
axel
bash
brew-cask
clang-format
cmake
coreutils
curl
curl
docker
docker-compose
docker-machine
findutils
git
homebrew/dupes/grep
maven
php70
php70-mcrypt
python
python3
thefuck
tmux
tree
vim
wget
zsh
)
apps=(
alfred
atom
ccleaner
dropbox
google-chrome
haskell-platform
intellij-idea
iterm2
java
mendeley-desktop
nvm
sequel-pro
spectacle
sublime-text3
transmission
xquartz
vagrant
vagrant-bar
virtualbox
)
fonts=(
font-anka-coder
font-fira-code
font-fira-mono
font-hasklig
font-inconsolata-dz
font-input
font-monoid
font-roboto
font-source-code-pro
)
echo "installing binaries"
brew install ${binaries[@]}
echo "installing apps"
brew cask install --appdir="/Applications" ${apps[@]}
brew cask alfred link
echo "installing fonts"
brew cask install ${fonts[@]}
nvm install stable
nvm alias default stable
pip install --upgrade distribute
pip install --upgrade pip
echo "PATH=$(brew --prefix coreutils)/libexec/gnubin:$PATH" >> ~/.bash_profile
| true
|
953f44d6173197b93f17953f522228e81ac87d4a
|
Shell
|
ApollusEHS-OSS/operator
|
/build/init/init.sh
|
UTF-8
| 1,696
| 3.6875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ "$OPENSHIFT" != "true" ]; then
echo "Not in Openshift, so do not try to update Security Groups"
exit 0
fi
## Collect the AWS credentials from the secret in the kube-system and
# then export them for use the in security group setup
# Collect the credential in json format
creds=$(kubectl get secret -n kube-system aws-creds -o json)
if [ $? -ne 0 ]; then
echo "No AWS credentials in kube-system namespace, need to retry initializing security groups."
echo "$creds"
if [ "$REQUIRE_AWS" == "true" ]; then
exit 1
else
exit 0
fi
fi
# Parse out the key id and then decode it
encoded_key_id=$(echo $creds | jq --raw-output '.data.aws_access_key_id')
if [ $? -ne 0 ]; then
echo "Failed to parse key id from aws-creds.data.aws_access_key_id: $encoded_key_id"
exit 1
fi
key_id=$(echo $encoded_key_id | base64 --decode)
if [ $? -ne 0 ]; then
echo "Failed to decode key id from aws-creds.data.aws_access_key_id: $key_id"
exit 1
fi
export AWS_ACCESS_KEY_ID=$key_id
# Parse out the key and then decode it
encoded_key=$(echo $creds | jq --raw-output '.data.aws_secret_access_key')
if [ $? -ne 0 ]; then
echo "Failed to parse key from aws-creds.data.aws_secret_access_key: $encoded_key"
exit 1
fi
key=$(echo $encoded_key | base64 --decode)
if [ $? -ne 0 ]; then
echo "Failed to decode key from aws-creds.data.aws_secret_access_key: $key"
exit 1
fi
export AWS_SECRET_ACCESS_KEY=$key
# Grab the availability-zone from the AWS metadata and then cut it down the the
# region.
export AWS_DEFAULT_REGION=$(curl --silent http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -e 's/^\(.*[0-9]\)[a-z]*/\1/')
/aws-setup-security-groups.sh
| true
|
368e02cb2e92ad08a61744d3c7699e324da78d7a
|
Shell
|
akankshreddy/Kaustav-CSE-LABS-and-Projects
|
/Sem04-Embedded-Systems-LAB/OTHERS/Assement Prep/damn2/kkk/OSLab (copy)/lab3/factorial.sh
|
UTF-8
| 145
| 3.15625
| 3
|
[] |
no_license
|
echo "Eingresar número"
read number
fact=1
for((i=2;i<=number;i++))
{
fact=$((fact * i))
}
echo "Factorial de $number es $fact"
# echo $fact
| true
|
e478017afa6d5240e17310eeed19e82132790696
|
Shell
|
mudbugmedia/beertapp
|
/vendor/vagrant_provision.sh
|
UTF-8
| 994
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# packages may not install without running an update
apt-get update
# Install default database.yml
if [[ ! -e /vagrant/config/database.yml ]]; then
echo "Installing initial database.yml from database.yml.example"
sudo -u vagrant cp /vagrant/config/database.yml.example /vagrant/config/database.yml
fi
# Install default config.yml
if [[ ! -e /vagrant/config/config.yml ]]; then
echo "Installing initial config.yml from config.yml.example"
sudo -u vagrant cp /vagrant/config/config.yml.example /vagrant/config/config.yml
fi
# Bring in rvm so we aren't installing to the /opt/vagrant_ruby
. /etc/profile.d/rvm.sh
cd /vagrant
rvm get stable
rvm install 2.1.8
rvm --default use 2.1.8
gem install bundler
echo "Running 'bundle'"
rvmsudo -u vagrant bundle
echo "Running 'rake db:setup'"
rvmsudo -u vagrant bundle exec rake db:setup
# Run rvmsudo as vagrant to allow users run bundle as the vagrant user.
echo "Switching to 'vagrant' user"
su vagrant
rvmsudo rvm get stable
| true
|
622e60caeb1cf1e62ea1b6c8634dfc9f1535b49b
|
Shell
|
trool/dotfiles
|
/files/bashrc
|
UTF-8
| 4,823
| 2.984375
| 3
|
[] |
no_license
|
# .bashrc # Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
export PATH=$PATH:/bin/
# [[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
set -o emacs
set -o notify
set -o noclobber
# set -o nounset
#set -o xtrace # useful for debugging
shopt -s cdspell
shopt -s cdable_vars
shopt -s checkhash
shopt -s checkwinsize
shopt -s mailwarn
shopt -s sourcepath
shopt -s no_empty_cmd_completion
shopt -s histappend histreedit
shopt -s extglob # useful for programmable completion
# Colours
red='\e[0;31m'
RED='\e[1;31m'
green='\e[0;32m'
GREEN='\e[1;32m'
blue='\e[0;34m'
BLUE='\e[1;34m'
purple='\e[0;35m'
PURPLE='\e[1;35m'
cyan='\e[0;36m'
CYAN='\e[1;36m'
WHITE='\e[1;37m'
white='\e[0;37m'
NC='\e[0m' # No Colour
HZ=10
set_prompt ()
{
local SAVE_CRS=`tput sc 2> /dev/null`
local RESET_CRS=`tput rc 2> /dev/null`
local CLOCKPOS=`tput cup 0 $(($HZ-10)) 2> /dev/null`
local FOREG=`tput setaf 6 2> /dev/null` #4
local ALT_FOREG=`tput setaf 3 2> /dev/null` #4
local BACKG=`tput setab 0 2> /dev/null` #6
local NORMAL=`tput sgr0 2> /dev/null`
local BOLD=`tput bold 2> /dev/null`
PS1="\[${NORMAL}${SAVE_CRS}${CLOCKPOS}${FOREG}${BACKG}${BOLD} \@
${RESET_CRS}${BOLD}${ALT_FOREG}\]\u\[${NORMAL}\]@\h:\w\$ "
}
#set_prompt
# Shell prompt
function fastprompt()
{
unset PROMPT_COMMAND
# TIME=$(date +%H:%M)
TIME=$(date +%a_%d_%b_%Y)
case $TERM in xterm | rxvt | dtterm )
PS1="${PURPLE}[\u @ \h : \$TIME ]
${green}\w ${white}\n$ \[\033]0;[\u@\h] \w\007\]" ;;
*)
PS1="${blue}[\u @ \h : \$TIME ] ${green}\w ${WHITE}\n$ " ;;
esac
}
function powerprompt()
{
_powerprompt()
{
LOAD=$(uptime|sed -e "s/.*: \([^,]*\).*/\1/" -e "s/ //g")
TIME=$(date +%H:%M)
}
PROMPT_COMMAND=_powerprompt
case $TERM in
xterm | dtterm | rxvt )
PS1="${RED}[\$TIME \$LOAD]$WHITE\n[\h \#] \W > \[
\033]0;[\u@\h] \w\007\]" ;;
linux )
PS1="${GREEN}[\$TIME - \$LOAD]$WHITE\n[\h \#] \w > " ;;
* )
PS1="[\$TIME - \$LOAD]\n[\h \#] \w > " ;;
esac
}
# if [ -f $HOME/git-prompt.sh ]; then
# source $HOME/git-prompt.sh
# PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
# GIT_PS1_SHOWCOLORHINTS=1
# fi
source $HOME/git-prompt.sh
GIT_PS1_SHOWCOLORHINTS=1
TIME=$(date +%a_%d_%b_%Y)
# PROMPT_COMMAND='__git_ps1 "${purple}[\u @ \h : \$TIME ]\n${green}\w ${WHITE}\n" ">"'
# export PS1="${purple}[\u @ \h : \$TIME ]\n$(__git_ps1 "(%s)")${green}\w ${WHITE}\n$ "
export PS1="${purple}[\u @ \h : \$TIME ]\n"'$(__git_ps1 "${BLUE}<%s>")'"${green}[\w]${WHITE}\n> "
# ALIASES
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
alias h='history'
alias j='jobs -l'
alias r='rlogin'
alias which='type -a'
alias ..='cd ..'
alias ...='cd ../..'
alias cd.='cd ..'
alias cd..='cd ../..'
alias cd...='cd ../../..'
alias path='echo -e ${PATH//:/\\n}'
alias du='du -h'
alias df='df -h'
alias lss='ls -lrS'
alias lc='ls --color=auto'
#alias ls='ls --color=auto -F'
#alias la='ls -AlhF --color=auto |less'
alias la='ls -AlhF |less'
alias lr='ls -lhR --color=auto'
alias lt='ls -lhsrt --color=auto'
alias lm='ls -lh --color=auto|less'
alias ll='ls -lh --color=auto|less'
alias l='ls -l -h --color=auto'
alias la='ls -alh --color=auto'
alias dirsh='ls -alF | grep /'
alias dirs='ls -lF | grep /'
alias q='exit'
alias psg='ps -ef | grep '
alias hsg='history | grep '
alias pwd='pwd -P'
alias cd='cd -P'
export EDITOR='/usr/bin/vim'
export PATH=$PATH:'/usr/local/bin'
PATH=$HOME/bin:$PATH # Add custom apps/scripts to path
# PATH=$HOME/.rvm/bin:$PATH # Add RVM to PATH for scripting
function fing(){
find $1 -name $2
}
function api_grep(){
tail -f "$1" | egrep -v ': (user|get_struct|set_struct|get_auth|customer_trial)' | egrep 'API_CALL:'
}
function error_check(){
head -1 /share/$1/process/*/tmp/ERROR* | grep -v MAC | grep -v "<==" | grep -v Hostname | grep -v "NIC" | grep -v -e "^$"
}
export PATH=$PATH:/opt/galileo-server/bin/
export PATH=$PATH:/sbin/
export PATH=/home/ATS/mshevertalov/src/gpe-server.git/bin/:/home/ATS/mshevertalov/src/gpe-server.git/vendor/ruby/bin/:$PATH
export PATH=/home/ATS/mshevertalov/src/gpe-server.git/vendor/node/bin/:$PATH
export PATH=/home/ATS/mshevertalov/src/gpe-server.git/vendor/node_modules/.bin/:$PATH
export PATH=/home/ATS/mshevertalov/.rbenv/bin:$PATH
export PATH=/home/ATS/mshevertalov/.rbenv/shims:$PATH
alias post_deploy='sudo vim /opt/galileo-server/vendor/httpd/conf/httpd.conf && sudo /sbin/service galileo start db && sudo /sbin/service galileo start app && sudo /sbin/service hyperic-agent restart'
export DOCKER_HOST=tcp://docker.galileosuite.com:2375
if [ -n "$STY" ] ; then
unset PROMPT_COMMAND
fi
| true
|
952e28b9899dd0c22126193eec0dbf6363364c73
|
Shell
|
ludiazv/cell
|
/nginx/push_etcd.sh
|
UTF-8
| 2,333
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
PREFIX="/"
ETS="etcdctl set $PREFIX/nginx/"
etcdctl mkdir $PREFIX/nginx
# Config dir
etcdctl mkdir $PREFIX/nginx/conf
# Upstreams dir
etcdctl mkdir $PREFIX/nginx/upstreams
# Sites dir
etcdctl mkdir $PREFIX/nginx/sites
# Maintenance mode dir
etcdctl mkdir $PREFIX/nginx/maintenance
# ---- Conf -----
$ETS conf/worker_processes 'auto'
$ETS conf/worker_rlimit_nofile 1000000
$ETS conf/worker_connections 1204
$ETS conf/events 'multi_accept on; use epoll;'
# ---- http ----
$ETS conf/index 'index.php index.html index.htm;'
read -r -d '' HTTP_CONF <<-EOF
EOF
$ETS conf/http-conf "$HTTP_CONF"
# Caches information about open FDs, freqently accessed files.
#open_file_cache both turns on cache activity and specifies the maximum number
#of entries in the cache, along with how long to cache them. We’ll set our maximum
#to a relatively high number, and we’ll get rid of them from the cache if they’re inactive
#for 20 seconds. Can boost performance, but you need to test those values
# open_file_cache max=65000 inactive=20s;
#open_file_cache_valid specifies interval for when to check the validity of the information about the item in open_file_cache.
# open_file_cache_valid 30s;
#open_file_cache_min_uses defines the minimum use number of a file within the
#time specified in the directive parameter inactive in open_file_cache.
# open_file_cache_min_uses 2;
#open_file_cache_errors specifies whether or not to cache errors when searching for a file.
# open_file_cache_errors on;
#Configuracion del proxy cache
#Verificar que existe estructura y una vez creada darle permisos al usuario nginx: "chown nginx:nginx /var/blahblah"
#Hay que pasar en los server o location un "proxy_cache zona_uno;"
#proxy_buffering on;
# proxy_cache_valid any 10m;
# proxy_cache_path /var/www/cache levels=1:2 keys_zone=zona_uno:8m max_size=1000m inactive=600m;
# proxy_temp_path /var/www/cache/tmp;
# proxy_buffer_size 4k;
# proxy_buffers 100 8k;
#proxy_connect_timeout 60;
#proxy_send_timeout 60;
#proxy_read_timeout 60;
# Load modular configuration files from the /etc/nginx/conf.d directory.
#include /etc/nginx/conf.d/*.conf;
#index index.php index.html index.htm;
#}
| true
|
4327eb453b3674eb7d2b6492dc6f9772a369b32e
|
Shell
|
creazytom/MoeClub.github.io
|
/rand.sh
|
UTF-8
| 1,786
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
while [[ $# -ge 1 ]]; do
case $1 in
-n|--num)
shift
inNum="$1"
shift
;;
-N|--number)
shift
isNumber='1'
;;
-L|--lower)
shift
isLower='1'
;;
-U|--upper)
shift
isUpper='1'
;;
-h|-H|--help)
echo -ne " Usage:\n\t$0\t[NUL][number]\n\t\t-N\--number\n\t\t-L\--lower\n\t\t-U\--upper\n"
exit 1;
;;
*)
[ -n "$(echo -n "$1" |grep 'N\|number')" ] && isNumber='1'
[ -n "$(echo -n "$1" |grep 'L\|lower')" ] && isLower='1'
[ -n "$(echo -n "$1" |grep 'U\|upper')" ] && isUpper='1'
inNum="$(echo -n "$1" |sed 's/-\|L\|lower\|N\|number\|U\|upper//g')"
shift
;;
esac
done
[ -n $inNum ] && [ "$inNum" != "$(echo "$inNum" |grep -o '[0-9]\{1,\}' |xargs |sed 's/ //g')" ] && echo "Error, invalid input." && exit 1;
[ -z $inNum ] && Num='8' || Num="$inNum"
[ "$isLower" != '1' -a "$isUpper" != '1' ] && isNumber='1'
pool_N=(0 1 2 3 4 5 6 7 8 9)
pool_L=(a b c d e f g h i j k l m n o p q r s t u v w x y z)
pool_U=(A B C D E F G H I J K L M N O P Q R S T U V W X Y Z)
[ "$isNumber" == '1' ] && POOL=(${pool_N[@]})
[ "$isLower" == '1' ] && POOL=(${pool_L[@]})
[ "$isUpper" == '1' ] && POOL=(${pool_U[@]})
[ "$isNumber" == '1' ] && [ "$isLower" == '1' ] && POOL=(${pool_N[@]} ${pool_L[@]})
[ "$isNumber" == '1' ] && [ "$isUpper" == '1' ] && POOL=(${pool_N[@]} ${pool_U[@]})
[ "$isLower" == '1' ] && [ "$isUpper" == '1' ] && POOL=(${pool_L[@]} ${pool_U[@]})
[ "$isNumber" == '1' ] && [ "$isLower" == '1' ] && [ "$isUpper" == '1' ] && POOL=(${pool_N[@]} ${pool_L[@]} ${pool_U[@]})
i=0; while :; do STR[$i]=${POOL[$((RANDOM%${#POOL[@]}))]} && i=$[i+1]; [ "$i" -ge "$Num" ] && break; done
for str in ${STR[*]}; do echo -n $str; done
| true
|
a0c6a0e116d80221ca3dc1437f13e18e59f334e0
|
Shell
|
rudymccomb/Infrastructure
|
/site-cookbooks/oe-postgresql/templates/default/init.d/repmgrd.erb
|
UTF-8
| 1,319
| 3.5
| 4
|
[
"Apache-2.0",
"PostgreSQL"
] |
permissive
|
#!/bin/sh
###############################################################################
#
### BEGIN INIT INFO
# Provides: repmgrd
# Required-Start: $remote_fs $all
# Required-Stop: $remote_fs
# Should-Start: $local_fs
# Should-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start repmgrd
# Description: Start Repmgr daemon
### END INIT INFO
#
###############################################################################
REPMGRD_OPTS="-f <%= node['postgresql']['prefix']['cfg'] %>/repmgr.conf --monitoring-history"
[ -x <%= node['postgresql']['prefix']['install'] %>/bin/repmgrd ] || exit 1
ACTION=${1} ; shift
case "${ACTION}" in
start)
start-stop-daemon -S \
-c <%= node['postgresql']['user']['name'] %> \
-g <%= node['postgresql']['user']['name'] %> \
-x <%= node['postgresql']['prefix']['install'] %>/bin/repmgrd \
--background \
-- ${REPMGRD_OPTS}
;;
stop)
start-stop-daemon -K \
-c <%= node['postgresql']['user']['name'] %> \
-g <%= node['postgresql']['user']['name'] %> \
-x <%= node['postgresql']['prefix']['install'] %>/bin/repmgrd \
;;
restart)
$0 stop
$0 start
;;
*)
echo " Usage: ${0} start|stop|restart"
esac
| true
|
57ab46ed6a8dcc0d429fe49cd3f6bd77fb71c04d
|
Shell
|
cohenjo/dbautils
|
/oracle/dbmon/genkilltoadall
|
UTF-8
| 2,414
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/ksh
#*****************************************************************************
#
# This script is used to generate kill user
#
# USAGE: genkillsession username
#
# By Adi Zohar - Sep 2004
#*****************************************************************************
TEMP_FILE=/tmp/genkilltoad_$$.sql
TEMP_FILE_LOG=/tmp/genkilltoad_run_$$.sql
sqlplus -s "/ as sysdba" <<-EOSQL
-- this is called from dba/bin/showtoad
set feed off tab off
col sid for 99999
col serial# for 99999
col username for a22 head "Oracle User"
col osuser for a14 trunc
col machine for a30
col status for a5 trunc
col prg_mod for a8 trunc
alter session set nls_date_format = 'dd-MON:hh24:mi:ss';
set feed off pages 80 lines 200
select
inst_id,
SID,
SERIAL#,
USERNAME,
STATUS,
OSUSER,
PROCESS,
MACHINE,
LOGON_TIME, program||module PRG_MOD
from gv\$session
where
(module = 'T.O.A.D.' or upper(program) like 'TOAD%' or program like '%T.O.A.D%') order by inst_id,sid;
quit
EOSQL
sqlplus -s "/ as sysdba" <<EOF
clear buffer
set feed off verify off line 132 pages 0 echo off trimspo on
spool $TEMP_FILE
select 'ALTER SYSTEM KILL SESSION '''||SID||','||SERIAL#||''' immediate;'
from V\$SESSION
WHERE ( module = 'T.O.A.D.' or upper(program) like 'TOAD%' or program like '%T.O.A.D%')
order by sid;
spool off
EOF
#--------------------------------------------------------------------------------------------
num_of_changes=`grep -i "ALTER" $TEMP_FILE | wc -l`
echo "------------------------------------------------------------------------------"
echo "$num_of_changes sessions to kill "
echo "Temporary script: $TEMP_FILE"
echo "------------------------------------------------------------------------------"
if (( num_of_changes==0 ))
then
rm -f $TEMP_FILE
exit 0
fi
#--------------------------------------------------------------------------------------------
echo "Would you like to execute ? (Y/N) \c"; read Local
if [ "$Local" = "Y" -o "$Local" = "y" ]
then
echo "set echo on pages 0 lines 199 trimspo on
spool $TEMP_FILE_LOG
@$TEMP_FILE
spool off
" | sqlplus "/ as sysdba"
num_of_errors=`grep -i "ORA-" $TEMP_FILE_LOG | wc -l`
echo "------------------------------------------------------------------------------"
echo "$num_of_errors errors while killing session"
echo "------------------------------------------------------------------------------"
fi
rm -f $TEMP_FILE
rm -f $TEMP_FILE_LOG
| true
|
df09ac3bd2773c4b0c8b443ac6385a01e7e4d50e
|
Shell
|
aminespinoza/shArg
|
/examples/simple_example.sh
|
UTF-8
| 579
| 3.578125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# #########################################################
# Navigate to the examples folder and invoke this file.
# cd examples
# ./simple_example.sh -m hello -d
# #########################################################
# load shArg
source ../scripts/shArg.sh
declare MESSAGE
declare DEBUG
# register arguments
shArgs.arg "MESSAGE" -m --message PARAMETER true
shArgs.arg "DEBUG" -d --debug FLAG true
# parse inputs
shArgs.parse $@
echo "The message is $MESSAGE"
if [ "$DEBUG" == true ]; then
echo "DEBUG is true!"
else
echo "DEBUG is false"
fi
| true
|
b477ea5cbc2aebf65cef4251bc5b0997b844a0d4
|
Shell
|
blockchaingroup4/webank
|
/day2/贾学雨/LAG-app/tool/LAG_run.sh
|
UTF-8
| 978
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
function usage()
{
echo " Usage : "
echo " bash LAG_run.sh deploy initialSupply CreditName CreditSymbol"
echo " bash LAG_run.sh getTotalSupply"
echo " bash LAG_run.sh balanceOf asset_account"
echo " bash LAG_run.sh transfer to_asset_account amount "
echo " "
echo " "
echo "examples : "
echo " bash LAG_run.sh deploy 500 LAGC LAG"
echo " bash LAG_run.sh getTotalSupply"
echo " bash LAG_run.sh balanceOf asset_account "
echo " bash LAG_run.sh transfer Asset1 11111 "
exit 0
}
case $1 in
deploy)
[ $# -lt 1 ] && { usage; }
;;
getTotalSupply)
[ $# -lt 1 ] && { usage; }
;;
transfer)
[ $# -lt 3 ] && { usage; }
;;
balanceOf)
[ $# -lt 2 ] && { usage; }
;;
*)
usage
;;
esac
java -cp 'apps/*:conf/:lib/*' org.fisco.bcos.LAGCredit.client.LAGClient $@
| true
|
1de5891658e79d07a6fa2bd3b859b6c163ebd7f3
|
Shell
|
GalaxyLu/shGadgets
|
/pkg.sh
|
UTF-8
| 974
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e -u
SU=""
if [ "$(id -u)" != 0 ]; then SU="sudo "; fi
show_help(){
cat<<%
Usage: ${SU}pkg command [arguments]
A wrapper for apt(8) and apt-get(8)
Commands:
h show this help (also [empty])
i install packages
l list packages
li list installed packages
lu list upgradable packages
r remove packages
ar auto-remove packages
u update and upgrade
U update only
fu full upgrade (no update)
m show package info
s search
f list files for installed packages
%
}
if [ $# -eq 0 ]; then show_help; exit; fi
CMD="$1"
shift
case "$CMD" in
h) show_help;;
i) apt install $@;;
l) apt list $@;;
li) apt list --installed $@;;
lu) apt list --upgradable $@;;
r) apt remove $@;;
ar) apt autoremove $@;;
u) apt update && apt upgrade;;
U) apt update;;
fu) apt full-upgrade;;
m) apt show $@;;
s) apt search $@;;
f) dpkg -L $@;;
*) echo "Unknown command: $CMD"; exit 1;;
esac
| true
|
badfcd0385acd17101c55f2f85a06da50452a19e
|
Shell
|
PaperPesto/bash-script
|
/local_variables.sh
|
UTF-8
| 587
| 3.3125
| 3
|
[] |
no_license
|
function getname
{
local VAR1=SILVESTER
local VAR2=STALLONE
echo "Inside the function."
echo "VAR1 is $VAR1"
echo "VAR2 is $VAR2"
}
VAR1=ARNOLD
VAR2=SHWARZENEGGER
echo "You are in the main script."
echo "VAR1 and VAR2 are defined as local inside funtion getname, so their values remains inside function scope."
echo "Outside the function:"
echo "VAR1 is $VAR1"
echo "VAR2 is $VAR2"
echo "Called to getname"
getname
echo "Again outside the function"
echo "VAR1 is $VAR1"
echo "VAR2 is $VAR2"
echo "This is the main variables usage in such programs as C and others."
| true
|
5e76674dbc9c3121407f488ec9da67c95d18e9d0
|
Shell
|
mihaicris/dotfiles
|
/scripts/hha
|
UTF-8
| 366
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
heading "Checkout previous commit"
is_git_folder || exit 1
TOP_LEVEL_DIR=$(git rev-parse --show-toplevel)
WORKDIRS=$(git -C $TOP_LEVEL_DIR worktree list \
--porcelain \
| grep worktree \
| awk '{ print $2 }')
WORKDIRS=(${(f)WORKDIRS})
for WORKDIR in $WORKDIRS; do
pushd $WORKDIR
hh
popd
done
| true
|
2f3913094f1f3e4117b972977dab90f0e0e6fa95
|
Shell
|
mike-grant/docker-adb
|
/files/update-platform-tools.sh
|
UTF-8
| 609
| 4.125
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env sh
set -euo pipefail
install_platform_tools() {
local URL="https://dl.google.com/android/repository/platform-tools-latest-linux.zip"
local TMPFILE=$(mktemp)
mkdir -p /opt
echo "Fetching ${URL}" >&2
wget -O "${TMPFILE}" "${URL}"
echo "Removing previous version of platform tools if any" >&2
rm -rf /opt/platform-tools
echo "Unpacking platform tools" >&2
unzip -d /opt "${TMPFILE}"
rm "${TMPFILE}"
local VERSION=$(grep Revision /opt/platform-tools/source.properties |cut -d '=' -f 2)
echo "Platform tools version: ${VERSION} installed!" >&2
}
install_platform_tools
| true
|
27e329aec52d50e6c3a251764c335f32b99d78c1
|
Shell
|
thomasleese/will-there-be-space
|
/script/test
|
UTF-8
| 607
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# script/test: Run test suite for application. Optionallly pass in a path to an
# individual test file to run a single test.
function progress {
echo "$(tput bold)$(tput setaf 4)==>$(tput sgr0) $(tput bold)$1$(tput sgr0)"
}
set -e
cd "$(dirname "$0")/.."
if [ -z "$CI" ]; then
script/update
else
script/setup
fi
source venv/bin/activate
export DATABASE_URL=postgresql://localhost/willtherebespace
export REDIS_URL=redis://localhost/willtherebespace
progress "Running tests…"
if [ -n "$1" ]; then
./setup.py test $*
else
./setup.py test
fi
deactivate
| true
|
8c9e9c363ff37a784d09d62b53dbf6dbecbb9ad4
|
Shell
|
teja624/home
|
/.zsh/modules/aws/lib/sh/api/iot/certificates_by_ca_list.sh
|
UTF-8
| 176
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
aws_iot_certificates_by_ca_list() {
local ca_certificate_id="$1"
shift 1
log_and_run aws iot list-certificates-by-ca --ca-certificate-id $ca_certificate_id "$@"
}
| true
|
f0631c25b604fc1ea51d3aab00e098f12b06d62b
|
Shell
|
bobquest33/go4git
|
/tools/tests/testShowIndex.sh
|
UTF-8
| 346
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ $# -ne 1 ]
then
echo "Usage: testShowIndex <index_file>"
exit -1
fi
INDEX_FILE=$1
echo "diff <(showpackindex ${INDEX_FILE}) <(git show-index < ${INDEX_FILE})"
diff <(showpackindex ${INDEX_FILE}) <(git show-index < ${INDEX_FILE}) > /dev/null
if [ $? -eq 0 ]
then
echo "Success"
exit 0
else
echo "FAILED..."
exit -1
fi
| true
|
e715f909a41a54054b678791c0c786d685914306
|
Shell
|
veekrum/devops_course
|
/Linux/Day7/reading_input.sh
|
UTF-8
| 195
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash ## interpretar
echo "HI, what is your name?" ## printing the text
read name ## reading the name from input
echo "your name is $name" ## printing the input
| true
|
918b930ddddc9923e51515798ca04f24a33bfc35
|
Shell
|
GM-990/martiis-buildsystem-cs
|
/scripts/target/xupnpd.init
|
UTF-8
| 299
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
case "$1" in
stop)
kill $(pidof xupnpd) 2>/dev/null
;;
start|restart)
[ "$1" == "restart" ] && kill $(pidof xupnpd) 2>/dev/null
(
sleep 10 # Give Neutrino a couple of seconds for startup
mkdir -p /tmp/xupnpd-feeds
XUPNPDROOTDIR=/share/xupnpd /bin/xupnpd
) &
;;
esac
| true
|
24caf57cacce1b3f6c75f02e977a15b081ffca25
|
Shell
|
Trialp/Ressources_GETALP
|
/multivec-master/benchmarks/cldc.sh
|
UTF-8
| 1,067
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
corpus=data/europarl/europarl.cldc
output_dir=benchmarks/CLDC/output
threads=16
if [ $# -lt 1 ]
then
echo "Usage: ./cldc-evaluate.sh RCV_DIR"
fi
rcv_dir=$1
mkdir -p $output_dir
./bin/multivec-bi --train-src $corpus.en --train-trg $corpus.de --sg --iter 10 --subsampling 1e-04 --alpha 0.025 --beta 4 --dimension 40 --negative 30 --window-size 5 --threads $threads --save $output_dir/model.40.en-de.bin --min-count 1 > $output_dir/model.40.en-de.out
for i in `seq 1 10`;
do
./benchmarks/CLDC/evaluate-model.sh $output_dir/model.40.en-de.bin $rcv_dir >> $output_dir/model.40.en-de.out
done
./bin/multivec-bi --train-src $corpus.en --train-trg $corpus.de --sg --iter 10 --subsampling 1e-04 --alpha 0.025 --beta 4 --dimension 128 --negative 30 --window-size 5 --threads $threads --save $output_dir/model.128.en-de.bin --min-count 1 > $output_dir/model.128.en-de.out
for i in `seq 1 10`; # average over 10 runs
do
./benchmarks/CLDC/evaluate-model.sh $output_dir/model.128.en-de.bin $rcv_dir >> $output_dir/model.128.en-de.out
done
| true
|
acfbd5e27d03e483bbd0b8ab206e0c7610a1675e
|
Shell
|
dc7kr/ca_tools
|
/create_mail_cert.sh
|
UTF-8
| 660
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
. ~/.ca_config
NAME=$1
if [ -z "$NAME" ]
then
echo "Enter a certificate name (part of the filename)"
exit 1
fi
openssl genrsa -aes256 -out "$NAME.key.pem" 2048
chmod 400 "$NAME.key.pem"
KEYDIR=$PWD
cd $CA_BASE_DIR
# CSR
openssl req -config mail-intermediate/openssl.cnf \
-key "$KEYDIR/$NAME.key.pem" \
-new -sha256 -out "mail-intermediate/csr/$NAME.csr.pem"
openssl ca -config mail-intermediate/openssl.cnf \
-extensions usr_cert -days 375 -notext -md sha256 \
-in "mail-intermediate/csr/$NAME.csr.pem" \
-out "mail-intermediate/certs/$NAME.cert.pem"
chmod 444 "mail-intermediate/certs/$NAME.cert.pem"
| true
|
83551fb9d7af8d58a05ca40e3fe3f6f5567ad521
|
Shell
|
perissinotti/timesheet
|
/timesheet.sh
|
UTF-8
| 1,730
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
zero ()
{
k=${1:?"Number to print missing"}
j=${2:?"Number of digits missing"}
fin=1
h=1
dig=0
while [ $fin -eq 1 ]; do
if [ $k -lt $((10**$h)) ]; then
fin=2
dig=$h
else
h=$(($h+1))
fi
done
zero=$(($j-$dig))
zeros=''
for (( i=1 ; i<=$zero ; i++ )); do
zeros=$zeros'0'
done
num=$zeros$k
echo $num
}
strt=$(date)
inisecs=$(date +%s)
acum=0
echo
echo "Time started at $strt"
echo
echo "Time elapsed: (press q to quit, p to pause)"
while true; do
act=$(date +%s)
dif=$(( $act - $inisecs + $acum))
hs=$(( $dif / 3600 ))
ms=$(( $dif / 60 - $hs * 60 ))
ss=$(( $dif - $ms * 60 - $hs * 3600 ))
HH=$(zero $hs 2)
MM=$(zero $ms 2)
SS=$(zero $ss 2)
echo -en "\b\b\b\b\b\b\b\b\b\b\b $HH:$MM:$SS "
read -t 1 -n 1 key
if [[ "$key" = "q" ]]; then
break
elif [[ "$key" = "p" ]]; then
echo -e "\nPaused. Press ENTER to continue"
read a
acum=$dif
inisecs=$(date +%s)
fi
done
echo -e "\n\nClient(enter c) or Training/other(enter t)"
read text
if [[ "$text" = "t" ]]; then
echo -e "\n\nJob Description:"
read text
echo
echo "$strt, $dif, $HH:$MM:$SS, $text"
echo "$strt, $dif, $HH:$MM:$SS, $text" >> timesheet.csv
echo
echo "Saved timesheet.csv"
else
echo -e "\n\nJob Description:"
read text1
echo -e "\n\nDepartment:"
read text2
echo -e "\n\nClient Name:"
read text3
echo -e "\n\nClient Role:"
read text4
echo -e "\n\nSolution/approach:"
read text5
echo -e "\n\nComplaints:"
read text6
echo
echo "$strt, $dif, $HH:$MM:$SS, $text1, $text2, $text3, $text4, $text5, $text6"
echo "$strt, $dif, $HH:$MM:$SS, $text1, $text2, $text3, $text4, $text5, $text6" >> timesheet.csv
echo
echo "Saved timesheet.csv"
fi
| true
|
e5dae8f3816565e065a1d83e6ad3d14fc982b6b8
|
Shell
|
ProyectoRH/PlataformaIRH
|
/install.sh~
|
UTF-8
| 3,051
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Paquete de instalación de la plataforma de infromación de recursos hídricos del Atlántico..."
cd ~/
echo "*********************************************************************"
echo "Actualizando el sistema..."
sudo apt-get update
sudo apt-get upgrade
echo "*********************************************************************"
echo "Instalando ambiente virtual para Django"
sudo apt-get install python-virtualenv
sudo virtualenv myenv
cd myenv
echo "*********************************************************************"
echo "Activando ambiente virtual..."
source bin/activate
echo "*********************************************************************"
echo "*********************************************************************"
sudo apt-get install git
echo "Descargando Repositorio"
git clone https://github.com/ProyectoRH/PlataformaIRH.git
echo "*********************************************************************"
echo "Instalando librerias y adaptadores para Python y Postgres..."
sudo apt-get install libpq-dev python-dev python-psycopg2
echo "*********************************************************************"
#pip install -r requirements.txt --allow-external requirements.txt --allow-unverified requirements.txt
pip install Django --allow-external Django --allow-unverified Django
pip install Pillow --allow-external Pillow --allow-unverified Pillow
pip install argparse --allow-external argparse --allow-unverified argparse
pip install distribute --allow-external distribute --allow-unverified distribute
pip install django-geojson --allow-external django-geojson --allow-unverified django-geojson
pip install django-leaflet --allow-external django-leaflet --allow-unverified django-leaflet
pip install django-smart-selects --allow-external django-smart-selects --allow-unverified django-smart-selects
pip install django-suit --allow-external django-suit --allow-unverified django-suit
pip install django-wysiwyg-redactor --allow-external django-wysiwyg-redactor --allow-unverified django-wysiwyg-redactor
pip install jsonfield --allow-external jsonfield --allow-unverified jsonfield
pip install psycopg2 --allow-external psycopg2 --allow-unverified psycopg2
pip install six --allow-external six --allow-unverified six
pip install wsgiref --allow-external wsgiref --allow-unverified wsgiref
read -p "El nombre de la base de datos: " dbname
export dbname=$dbname
sudo echo "export dbname=$dbname">> /etc/profile
read -p "El nombre del usuario postgres: " usname
export usname=$usname
sudo echo "export usname=$usname">> /etc/profile
read -p "La contraseña del usuario postgres: " uscontra
export uscontra=$uscontra
sudo echo "export uscontra=$uscontra">> /etc/profile
read -p "La ip o nombre del servidor postgres: " ipdir
export ipdir=$ipdir
sudo echo "export ipdir=$ipdir">> /etc/profile
echo "OK la instalación ha terminado...."
cd ~/myenv/
source bin/activate
cd PlataformaIRH
python manage.py syncdb
python manage.py makemigrations
python manage.py migrate
python manage.py runserver 0.0.0.0:8000
| true
|
32c2049f4ddf059a1ce9962f1cef137178f13134
|
Shell
|
iacanaw/ThermalManagement_ManyCore
|
/new.sh
|
UTF-8
| 3,243
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#column1=$(cat data11.csv | cut -d',' -f1)
while read line
do
#echo $line
if echo "$line" | egrep 'Quantum' >/dev/null
then
i=$((i+1))
j=0
elif echo "$line" | egrep 'Router '$j > /dev/null
then
echo $line > newfile.txt
pLocal[$j]=$(cat newfile.txt | cut -d',' -f2)
pEast[$j]=$(cat newfile.txt | cut -d',' -f3)
pWest[$j]=$(cat newfile.txt | cut -d',' -f4)
pNorth[$j]=$(cat newfile.txt | cut -d',' -f5)
pSouth[$j]=$(cat newfile.txt | cut -d',' -f6)
j=$((j+1))
fi
if [ $j -eq 24 ]
then
${VALOR[1]}
echo ";;" ${pLocal[20]} ";;;" ${pLocal[21]} ";;;" ${pLocal[22]} ";;;" ${pLocal[23]} ";;;" ${pLocal[24]} ";" >> fluxoQuantum$i.csv
echo " ; R20 ; " ${pEast[20]} " ; " ${pWest[21]} " ; R21 ; " ${pEast[21]} " ; " ${pWest[22]} " ; R22 ; " ${pEast[22]} " ; " ${pWest[23]} " ; R23 ; " ${pEast[23]} " ; " ${pWest[24]} " ; R24" >>fluxoQuantum$i.csv
echo " ; " ${pSouth[20]} "; ; ; " ${pSouth[21]} " ;; ; " ${pSouth[22]} " ;; ; " ${pSouth[23]} " ;; ; " ${pSouth[24]} ";" >> fluxoQuantum$i.csv
echo " ; " ${pNorth[15]} " ; " ${pLocal[15]} " ;; " ${pNorth[16]} " ; " ${pLocal[16]} " ;; " ${pNorth[17]} " ; " ${pLocal[17]} " ;; " ${pNorth[18]} " ; " ${pLocal[18]} " ;; " ${pNorth[19]} ";" ${pLocal[19]} ";" >>fluxoQuantum$i.csv
echo " ; R15 ; " ${pEast[15]} " ; " ${pWest[16]} " ; R16 ; " ${pEast[16]} " ; " ${pWest[17]} " ; R17 ; " ${pEast[17]} " ; " ${pWest[18]} " ; R18 ; " ${pEast[18]} " ; " ${pWest[19]} " ; R19" >>fluxoQuantum$i.csv
echo " ; " ${pSouth[15]} " ;; ; " ${pSouth[16]} " ;; ; " ${pSouth[17]} " ; ;; " ${pSouth[18]} " ;; ; " ${pSouth[19]} ";" >> fluxoQuantum$i.csv
echo " ; " ${pNorth[10]} " ; " ${pLocal[10]} " ;; " ${pNorth[11]} " ; " ${pLocal[11]} " ;; " ${pNorth[12]} " ; " ${pLocal[12]} " ;; " ${pNorth[13]} " ; " ${pLocal[13]} " ;; " ${pNorth[14]} ";" ${pLocal[14]}";" >> fluxoQuantum$i.csv
echo " ; R10 ; " ${pEast[10]} " ; " ${pWest[11]} " ; R11 ; " ${pEast[11]} " ; " ${pWest[12]} " ; R12 ; " ${pEast[12]} " ; " ${pWest[13]} " ; R13 ; " ${pEast[13]} " ; " ${pWest[14]} " ; R14" >>fluxoQuantum$i.csv
echo " ; " ${pSouth[10]} " ;; ; " ${pSouth[11]} " ;; ; " ${pSouth[12]} "; ; ; " ${pSouth[13]} "; ; ; " ${pSouth[14]} ";" >> fluxoQuantum$i.csv
echo " ; " ${pNorth[5]} " ; " ${pLocal[5]} " ;; " ${pNorth[6]} " ; " ${pLocal[6]} " ;; " ${pNorth[7]} " ; " ${pLocal[7]} "; ; " ${pNorth[8]} " ; " ${pLocal[8]} " ;; " ${pNorth[9]} ";" ${pLocal[9]} ";;" >>fluxoQuantum$i.csv
echo " ; R5 ; " ${pEast[5]} " ; " ${pWest[6]} " ; R6 ; " ${pEast[6]} " ; " ${pWest[7]} " ; R7 ; " ${pEast[7]} " ; " ${pWest[8]} " ; R8 ; " ${pEast[8]} " ; " ${pWest[9]} " ; R9" >>fluxoQuantum$i.csv
echo " ; " ${pSouth[5]} "; ; ; " ${pSouth[6]} " ;; ; " ${pSouth[7]} " ;; ; " ${pSouth[8]} " ;; ; " ${pSouth[9]} ";" >> fluxoQuantum$i.csv
echo " ; " ${pNorth[0]} " ; " ${pLocal[0]} " ;; " ${pNorth[1]} " ; " ${pLocal[1]} " ; ;" ${pNorth[2]} " ; " ${pLocal[2]} "; ; " ${pNorth[3]} " ; " ${pLocal[3]} " ;; " ${pNorth[4]} ";" ${pLocal[4]} ";" >>fluxoQuantum$i.csv
echo " ; R0 ; " ${pEast[0]} " ; " ${pWest[1]} " ; R1 ; " ${pEast[1]} " ; " ${pWest[2]} " ; R2 ; " ${pEast[2]} " ; " ${pWest[3]} " ; R3 ; " ${pEast[3]} " ; " ${pWest[4]} " ; R4" >>fluxoQuantum$i.csv
fi
done < "data11.csv"
| true
|
c4fb479bc5550ce877fe4a1d9d4346dca86c08b0
|
Shell
|
mohitsaraswat411/termux-startup-tone
|
/install.sh
|
UTF-8
| 621
| 2.53125
| 3
|
[] |
no_license
|
while true; do
read -p " Do you wish to install this program? press y to install & press n to cancle install (y/n) " yn
case $yn in
[Yy]* ) make install; break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
cp -r ~/termux-startup-tone/bash.bashrc /data/data/com.termux/files/usr/etc/
cd ~/
mkdir .tone
cp -r ~/termux-startup-tone/startup-sound.mp3 ~/.tone/
date | lolcat
pkg install screenfetch
date | lolcat
pkg install mpv
date | lolcat
pkg install pulseaudio
clear
cd ..
cd ..
cd usr/
cd etc/
cat motd
cd ~/
screenfetch
cd ..
clear
| true
|
9bc91cc67044f03fb12da23dd4ab33dd6f79636f
|
Shell
|
007kevin/keystroke_dynamics
|
/analysis/biometric_analysis.sh
|
UTF-8
| 404
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
CLASSPATH="/home/local/Documents/machine_learning/weka-3-8-0/weka.jar"
OPTIND=2
while getopts ":t:" opt; do
case $opt in
t) filename=$(basename "$OPTARG")
ext="${filename##*.}"
filename="${filename%.*}"
TFILE=$filename".threshold."$ext
;;
esac
done
java -cp $CLASSPATH "$@" -threshold-file $TFILE -threshold-label "85"
| true
|
df3132e0cbfb76e5c348d1c5bae48ef106b40021
|
Shell
|
AmrBenSalem/NetworkManager
|
/scans/instant/getDeviceInformations
|
UTF-8
| 413
| 3.078125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
current_ip="/mnt/hgfs/NetworkManager/scans/instant/current_ip";
if grep -q "." $current_ip
then
echo "$(grep . $current_ip)"> $current_ip ;
while read LINE;do
#do sed -n "$LINE"p "$ipfile" >> "$finalfile"
/mnt/hgfs/NetworkManager/snmp_files/instant/get_all_informations $LINE $1
done < $current_ip
fi
#echo "$(tail -n +2 $finalfile)" > $finalfile;
# echo "$(grep . $system_name)"> $system_name;
| true
|
6fe9ef1be676dadd9a80f4c40be43fa02f8b60dd
|
Shell
|
latifkabir/Computation_using_C
|
/burgers_solution/burgers_solution_prb.sh
|
UTF-8
| 609
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
#
gcc -c -g burgers_solution_prb.c >& compiler.txt
if [ $? -ne 0 ]; then
echo "Errors compiling burgers_solution_prb.c."
exit
fi
rm compiler.txt
#
gcc burgers_solution_prb.o /$HOME/libc/$ARCH/burgers_solution.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading burgers_solution_prb.o."
exit
fi
#
rm burgers_solution_prb.o
#
mv a.out burgers_solution_prb
./burgers_solution_prb > burgers_solution_prb_output.txt
if [ $? -ne 0 ]; then
echo "Errors running burgers_solution_prb."
exit
fi
rm burgers_solution_prb
#
echo "Program output written to burgers_solution_prb_output.txt"
| true
|
facf6653f16a739a641c43376708bf1f7883dc89
|
Shell
|
James6xie/fm-orchestrator
|
/openshift/integration/koji/pipelines/tests/runtests
|
UTF-8
| 1,554
| 3.59375
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -ex
export PIPELINE_ID=$1
if [ -z "$PIPELINE_ID" ]; then
echo You must specify the pipeline ID
exit 1
fi
if [ "$TESTCASES" == "skip" ]; then
echo "TESTCASES=skip defined, skipping tests"
exit 0
fi
DOMAIN=${2:-"cloud.paas.psi.redhat.com"}
CONTROLLER=http://${PIPELINE_ID}.${DOMAIN}
export TEST_DIR=$(realpath $(dirname $0))
echo "Provisioning pipeline services..."
curl -X POST -F data=@${TEST_DIR}/mbs-cgimport-vars.yaml $CONTROLLER/scripts/provision
CERT_DIR=$(mktemp -d)
export CACERT="${CERT_DIR}/ca.crt"
curl -s ${CONTROLLER}/ca/cacert > $CACERT
export KRB5_CONFIG=$(mktemp)
curl -so $KRB5_CONFIG ${CONTROLLER}/krb5/configfile
export MBS_FRONTEND_HOST="$(curl -s ${CONTROLLER}/vars/MBS_FRONTEND_HOST)"
# The MBS user is defined in the Ansible vars file
export MBS_USER="mbs-user-${PIPELINE_ID}"
export MBS_USER_PASSWORD=$(curl -s ${CONTROLLER}/krb5/principal/${MBS_USER})
kinit -V $MBS_USER <<<$MBS_USER_PASSWORD
KOJI_HUB_HOST="$(curl -s ${CONTROLLER}/vars/KOJI_HUB_HOST)"
export KOJI_CONFIG=$(mktemp)
cat > $KOJI_CONFIG <<EOF
[koji]
server = https://${KOJI_HUB_HOST}/kojihub
weburl = https://${KOJI_HUB_HOST}/koji
serverca = $CACERT
authtype = kerberos
EOF
for TEST_FILE in $(ls ${TEST_DIR}); do
TEST="${TEST_DIR}/${TEST_FILE}"
if [ -x "$TEST" ] && [ "${TEST_FILE}" != "runtests" ]; then
if [ -z "$TESTCASES" ] || echo "$TESTCASES" | grep "${TEST_FILE}"; then
echo "Running test ${TEST_FILE}"
$TEST
else
echo "Skipping test ${TEST_FILE}"
fi
fi
done
| true
|
f31acca877d1f8f052cfcf1aa92ebaa2fe381cbe
|
Shell
|
jacobben85/vagrant-ubuntu
|
/startup.sh
|
UTF-8
| 360
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh -e
export JAVA_HOME="/usr/lib/jvm/java-8-oracle"
echo "Added JAVA_HOME"
sudo -H -u vagrant bash -c "touch ~/aps.log"
sudo -H -u vagrant bash -c "touch ~/pep.log"
sudo -H -u vagrant bash -c "nohup sh /vagrant/aps/aps-express-edition/start_server.sh &> ~/aps.log &"
sudo -H -u vagrant bash -c "java -jar /vagrant/pep/target/pep.jar &> ~/pep.log &"
| true
|
24dd365e214ca564b899b68bb0d01af4d2da1cee
|
Shell
|
erdc/petsc-dev
|
/bin/mpiexec.lam
|
UTF-8
| 548
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/sh
#set -x
#
# This program is a wrapper over lam's mpiexec. It converts
# the mpich's mpiexec formated command to lam's format.
# and invokes lam's mpiexec with it.
#
if [ $1 != "-np" ]; then
echo "Error in mpiexec command"
exit 1
fi
shift
np=$1
shift
progname=$1
shift
options=$*
#
# Please modify the following path to point to the correct location of LAM's mpiexec
#
lam_path=/home/petsc/soft/solaris-9-lam/lam-6.5.8/bin
#
# Now execute the mpiexec comman
#
$lam_path/mpiexec -w -c $np -s n0 $progname -- $options
$lam_path/lamclean
| true
|
1a1f4a2cb6694baf5e6b5de39583aa303b03e6df
|
Shell
|
cmf2073/INF-snippets
|
/awslogs/dockervars-CA.sh
|
UTF-8
| 631
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
## Old code
#AWSAK=`grep awsak trec-sid.me | cut -c7-26`
#AWSSAK=`grep awssak trec-sid.me | cut -c8-48`
#echo $AWSAK
#echo $AWSSAK
### New code
#AK=$(grep VAR_ak certs/awslogs-ci.file | cut -f2 | rev | openssl enc -d -a -A -aes-256-cbc)
#SAK=$(grep VAR_sak certs/awslogs-ci.file | cut -f2 | rev | openssl enc -d -a -A -aes-256-cbc)
source ../awsvars-awslogs.sh
# Print VARs values usto for test purpouses
#echo $AWSAK
#echo $AWSSAK
# Export VARs for use
export AWS_ACCESS_KEY_ID=$AK
export AWS_SECRET_ACCESS_KEY=$SAK
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export AWS_REGION=us-west-1
awslogs groups
| true
|
1a75cad83ddd6800a372a29a13276f134bb6e01b
|
Shell
|
ChidambaramR/CSE506_Grading
|
/script/error_errno.sh
|
UTF-8
| 356
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/sh
cmd='./a.out'
errno=10
# Test Case 1: Small Files
echo -e "\e[1;36m************Test Case 1: Small Files***********\e[0m"
var=`./xhw1 -c -m 777 $s1 $s2 $s3 $s4 | egrep -o [0-9]{2}`
echo -e "\e[1;33mExpected Output: Errno = $errno\e[0m"
printf "Checking Output..."
if [ "x$var" = "x" ]; then
echo "Word empty"
else
echo "Word Not empty"
fi
| true
|
87c999d2fddda1b7f297872e1d9d539707f64578
|
Shell
|
ychubachi/.profile.d
|
/.zshrc
|
UTF-8
| 6,019
| 2.84375
| 3
|
[] |
no_license
|
# 注意:このスクリプトはdot-zshrc.orgが自動生成したものです。
echo '~/.zshrc'
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
# ZSH_THEME="random"
# ZSH_THEME="crunch"
case "$TERM" in
eterm*)
ZSH_THEME="robbyrussell"
;;
*)
# ZSH_THEME="wedisagree" # かわいいけどterminal-modeで使えない・・・
# ZSH_THEME="amuse" # rubyのバージョンが出る
# ZSH_THEME="bureau" # 白基調のシンプル.アイコンがわかりづらい?
# ZSH_THEME="peepcode" # NG: rvmがないと言われる
# ZSH_THEME="pure" # NG: とてもシンプル
ZSH_THEME="sonicradish" #ホスト名あり
;;
esac
# Set to this to use case-sensitive completion
CASE_SENSITIVE="true"
# Comment this out to disable bi-weekly auto-update checks
DISABLE_AUTO_UPDATE="true"
# Uncomment to change how often before auto-updates occur? (in days)
export UPDATE_ZSH_DAYS=13
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want to disable command autocorrection
DISABLE_CORRECTION="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
COMPLETION_WAITING_DOTS="true"
# Uncomment following line if you want to disable marking untracked files under
# VCS as dirty. This makes repository status check for large repositories much,
# much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git git-exstras ruby rails rbenv bundler git-flow git-hubflow gem capistrano command-not-found vagrant)
source $ZSH/oh-my-zsh.sh
# Customize to your needs...
## ================================================================
## Application settings including PATHs and other environment vers.
## ================================================================
## set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi
## For texlive
if [ -d /usr/local/texlive/2014/ ]; then
export INFOPATH=$INFOPATH:/usr/local/texlive/2014/texmf-dist/doc/info
export MANPATH=$MANPATH:/usr/local/texlive/2014/texmf-dist/doc/man
export PATH=/usr/local/texlive/2014/bin/i386-linux:$PATH
export PATH=/usr/local/texlive/2014/bin/x86_64-linux:$PATH # Workaround
fi
## For JDK1.7
if [ -d "$HOME/opt/jdk1.7.0_45" ]; then
export JAVA_HOME="$HOME/opt/jdk1.7.0_45"
export PATH="$JAVA_HOME/bin:$PATH"
fi
## Added by the Heroku Toolbelt
if [ -d /usr/local/heroku/bin ]; then
export PATH="/usr/local/heroku/bin:$PATH"
fi
## Amazon EC2
if [ -d "$HOME/opt/ec2-api-tools-1.6.9.0" ]; then
export EC2_HOME=$HOME/opt/ec2-api-tools-1.6.9.0
export PATH=$PATH:$EC2_HOME/bin
fi
## InteliJ IDEA
if [ -d $HOME/opt/ida-IC-129.1359 ]; then
export IDEA_HOME=$HOME/opt/ida-IC-129.1359
export PATH=$PATH:$IDEA_HOME/bin
fi
## Eclipse
if [ -d $HOME/opt/eclipse ]; then
export ECLIPSE_HOME=$HOME/opt/eclipse
export PATH=$PATH:$ECLIPSE_HOME
fi
## Scala
if [ -d $HOME/opt/scala-2.10.3 ]; then
export SCALA_HOME=$HOME/opt/scala-2.10.3
export PATH=$PATH:$SCALA_HOME/bin
fi
## for rbenv
export RBENV_ROOT=~/.rbenv # Mac?
if which rbenv > /dev/null; then eval "$(rbenv init -)"; fi
if [ -f ~/bin/aws_env.sh ]; then
source ~/bin/aws_env.sh
fi
## ================================================================
## Alias definitions.
## ================================================================
if [ -f ~/.aliases ]; then
. ~/.aliases
fi
# global aliases
alias -g L="| $PAGER"
alias -g M="| $PAGER"
alias -g G='| grep'
alias -g C='| xclip'
alias -g W='| wc'
alias -g H='| head'
alias -g T='| tail'
alias -g S='| sort'
## ================================================================
## Shell functions.
## ================================================================
case "$(uname -s)" in
Linux*)
# Do something under Linux platform
echo "Linux"
function emacs() { command emacsclient -c -a "" $* }
function vim() { command emacsclient -t -a "" $* }
function killemacs() { command emacsclient -e "(kill-emacs)"}
# Eclipse menu workaround
alias eclipse='UBUNTU_MENUPROXY=0 eclipse'
;;
Darwin*)
# Do something under Mac OS X platform
echo "Darwin"
function emacs() {
/Applications/Emacs.app/Contents/MacOS/Emacs -r $* &
}
;;
MINGW32_NT*)
# Do something under Windows NT platform
echo "MinGW"
;;
CYGWIN*)
# Do something under Cygwin shell
echo "Cygwin"
;;
*)
echo "Other"
;;
esac
alias em=emacs
alias vi=vim
function dotpng() {command dot -Tpng $1.dot -o $1.png}
# added by travis gem
## ================================================================
## Time
## ================================================================
REPORTTIME=8 # CPUを8秒以上使った時は time を表示
TIMEFMT="\
The name of this job. :%J
CPU seconds spent in user mode. :%U
CPU seconds spent in kernel mode. :%S
Elapsed time in seconds. :%E
The CPU percentage. :%P"
if [ -f $HOME/.xmodmap ]; then
xmodmap $HOME/.xmodmap
fi
# Chef
export PATH=/opt/chefdk/bin:$PATH
# Hub
eval "$(hub alias -s)"
export NVM_DIR="/home/yc/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
# Cask
export PATH="/home/yc/.cask/bin:$PATH"
| true
|
f7c8890fb9ab185772b7d8e1d080c61fc66ea001
|
Shell
|
dladams/dune-dev
|
/dune
|
UTF-8
| 321
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
SUBCOM=$1
shift
ARGS="$*"
if [ -z "$SUBCOM" -o "$SUBCOM" = "-h" ]; then
echo Usage: dune COM
echo " COM = checkout, build, show, ..."
echo " For details, see \"dune help\""
exit 0
fi
COM=$DUNE_INSDIR/dune-$SUBCOM
if ! test -x $COM; then
echo Command not found: $SUBCOM
exit 1
fi
$COM $ARGS
| true
|
09bdc8253ef40ffdc5e1971291733d599c524c2c
|
Shell
|
Xilinx/Vitis-AI
|
/examples/waa/apps/resnet50_waa_aie/app_test.sh
|
UTF-8
| 6,872
| 3.59375
| 4
|
[
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-3-Clause-Open-MPI",
"LicenseRef-scancode-free-unknown",
"Libtool-exception",
"GCC-exception-3.1",
"LicenseRef-scancode-mit-old-style",
"OFL-1.1",
"JSON",
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-other-permissive",
"GPL-2.0-or-later",
"GPL-3.0-only",
"LicenseRef-scancode-issl-2018",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-or-later",
"Zlib",
"BSD-Source-Code",
"ClArtistic",
"LicenseRef-scancode-unknown-license-reference",
"ISC",
"NCSA",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"CC-BY-4.0",
"FSFULLR",
"Minpack",
"Unlicense",
"BSL-1.0",
"NAIST-2003",
"Apache-2.0",
"LicenseRef-scancode-protobuf",
"LicenseRef-scancode-public-domain",
"Libpng",
"Spencer-94",
"BSD-2-Clause",
"Intel",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Supported Modes & Models
usage() {
echo -e ""
echo "Usage:"
echo "------------------------------------------------"
echo " ./app_test.sh --xmodel_file <xmodel-path>"
echo " --image_dir <image-dir>"
echo " --use_sw_pre_proc (For software Preprocessing)"
echo " --no_zero_copy (To disable zero copy)"
echo " --label_file <label-file>"
echo " --verbose (To print Top 5 outputs for each image) "
echo " --performance_diff (To compare the Performance of Software and Hardware preprocessing)"
echo " --accuracy_diff (To compare the Accuracy of Software and Hardware preprocessing)"
echo -e ""
}
# Defaults
xmodel_file=""
img_dir=""
sw_proc=0
no_zcpy=0
label_file=""
verbose=0
performance_diff=0
accuracy_diff=0
# Parse Options
while true
do
if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then
usage;
exit 0;
fi
if [ -z "$1" ]; then
break;
fi
if [[ "$1" != "--use_sw_pre_proc" && "$1" != "--verbose" && "$1" != "--no_zero_copy" && "$1" != "--performance_diff" && "$1" != "--accuracy_diff" && -z "$2" ]]; then
echo -e "\n[ERROR] Missing argument value for $1 \n";
exit 1;
fi
case "$1" in
--xmodel_file ) xmodel_file="$2" ; shift 2 ;;
--image_dir ) img_dir="$2" ; shift 2 ;;
--use_sw_pre_proc ) sw_proc=1 ; shift 1 ;;
--no_zero_copy ) no_zcpy=1 ; shift 1 ;;
--label_file ) label_file="$2" ; shift 2 ;;
--verbose ) verbose=1 ; shift 1 ;;
--performance_diff ) performance_diff=1 ; shift 1 ;;
--accuracy_diff ) accuracy_diff=1 ; shift 1 ;;
*) echo "Unknown argument : $1";
echo "Try ./app_test.sh -h to get correct usage. Exiting ...";
exit 1 ;;
esac
done
if [[ "$xmodel_file" = "" ]]; then
echo -e ""
echo -e "[ERROR] No xmodel file selected !"
echo -e "[ERROR] Check Usage with: ./app_test.sh -h "
echo -e ""
exit 1
fi
if [[ "$img_dir" = "" ]]; then
echo -e ""
echo -e "[ERROR] No image directory selected !"
echo -e "[ERROR] Check Usage with: ./app_test.sh -h "
echo -e ""
exit 1
fi
CPP_EXE="./bin/resnet50_waa_aie.exe"
if [[ "$performance_diff" -eq 0 && "$accuracy_diff" -eq 0 ]];
then
exec_args="$xmodel_file $img_dir $sw_proc $no_zcpy $verbose $label_file"
${CPP_EXE} ${exec_args}
fi
if [ "$performance_diff" -eq 1 ];
then
echo -e "\n Running Performance Diff: "
echo -e "\n Running Application with Software Preprocessing \n"
sw_proc=1
no_zcpy=1
verbose=0
exec_args="$xmodel_file $img_dir $sw_proc $no_zcpy $verbose $label_file"
${CPP_EXE} ${exec_args} |& grep -e "E2E Performance" -e "Pre-process Latency" -e "Execution Latency" -e "Post-process Latency" > z.log
grep "E2E Performance" z.log > x.log
grep "Pre-process Latency" z.log > x1.log
grep "Execution Latency" z.log > x2.log
grep "Post-process Latency" z.log > x3.log
awk '{print $3 > "xx.log"}' x.log
awk '{print $3 > "xx1.log"}' x1.log
awk '{print $3 > "xx2.log"}' x2.log
awk '{print $3 > "xx3.log"}' x3.log
read i<xx.log
read a<xx1.log
read b<xx2.log
read c<xx3.log
i=$(printf "%.2f" $i)
a=$(printf "%.2f" $a)
b=$(printf "%.2f" $b)
c=$(printf "%.2f" $c)
printf " E2E Performance: %.2f fps\n" $i
printf " Pre-process Latency: %.2f ms\n" $a
printf " Execution Latency: %.2f ms\n" $b
printf " Post-process Latency: %.2f ms" $c
echo -e "\n"
rm z.log
rm x.log
rm xx.log
rm x1.log
rm xx1.log
rm x2.log
rm xx2.log
rm x3.log
rm xx3.log
echo -e " Running Application with Hardware Preprocessing \n"
sw_proc=0
no_zcpy=0
verbose=0
exec_args="$xmodel_file $img_dir $sw_proc $no_zcpy $verbose $label_file"
${CPP_EXE} ${exec_args} |& grep -e "E2E Performance" -e "Pre-process Latency" -e "Execution Latency" -e "Post-process Latency" > z1.log
grep "E2E Performance" z1.log > y.log
grep "Pre-process Latency" z1.log > y1.log
grep "Execution Latency" z1.log > y2.log
grep "Post-process Latency" z1.log > y3.log
awk '{print $3 > "yy.log"}' y.log
awk '{print $3 > "yy1.log"}' y1.log
awk '{print $3 > "yy2.log"}' y2.log
awk '{print $3 > "yy3.log"}' y3.log
read j<yy.log
read a<yy1.log
read b<yy2.log
read c<yy3.log
j=$(printf "%.2f" $j)
a=$(printf "%.2f" $a)
b=$(printf "%.2f" $b)
c=$(printf "%.2f" $c)
k=$(awk -vn1="$j" -vn2="$i" 'BEGIN{ print ( n1 - n2) }')
f=$(awk -vn1="$k" -vn2="100" 'BEGIN{ print ( n1 * n2) }')
printf " E2E Performance: %.2f fps\n" $j
printf " Pre-process Latency: %.2f ms\n" $a
printf " Execution Latency: %.2f ms\n" $b
printf " Post-process Latency: %.2f ms" $c
h=$(awk -vn1="$f" -vn2="$i" 'BEGIN{ print ( n1 / n2) }')
echo -e "\n"
printf " The percentage improvement in throughput is %.2f" $h
echo -e " %\n"
rm z1.log
rm y.log
rm yy.log
rm y1.log
rm yy1.log
rm y2.log
rm yy2.log
rm y3.log
rm yy3.log
fi
if [ "$accuracy_diff" -eq 1 ];
then
if [[ "$label_file" = "" ]];
then
echo -e ""
echo -e "[ERROR] No label file selected !"
echo -e ""
exit 1
fi
echo -e "\n Running Accuracy Diff: "
echo -e "\n Running Application with Software Preprocessing \n"
sw_proc=1
no_zcpy=1
verbose=0
exec_args="$xmodel_file $img_dir $sw_proc $no_zcpy $verbose $label_file"
${CPP_EXE} ${exec_args} |& grep "accuracy of the network" > x.log
awk '{print $7 > "xx.log"}' x.log
read i<xx.log
i=$(printf "%.2f" $i)
printf " Accuracy of the network is %.2f %%" $i
echo -e "\n"
rm x.log
rm xx.log
echo -e " Running Application with Hardware Preprocessing \n"
sw_proc=0
no_zcpy=0
verbose=0
exec_args="$xmodel_file $img_dir $sw_proc $no_zcpy $verbose $label_file"
${CPP_EXE} ${exec_args} |& grep "accuracy of the network" > y.log
awk '{print $7 > "yy.log"}' y.log
read j<yy.log
j=$(printf "%.2f" $j)
k=$(awk -vn1="$j" -vn2="$i" 'BEGIN{ print ( n1 - n2) }')
f=$(awk -vn1="$k" -vn2="100" 'BEGIN{ print ( n1 * n2) }')
printf " Accuracy of the network is %.2f %%" $j
h=$(awk -vn1="$f" -vn2="$i" 'BEGIN{ print ( n1 / n2) }')
echo -e "\n"
printf " The percentage improvement in accuracy is %.2f " $h
echo -e " %\n"
rm y.log
rm yy.log
fi
| true
|
f1fd3d7fa7fb2c97b9014965fda12db85456c018
|
Shell
|
filfreire/scripts
|
/snippet-if-str
|
UTF-8
| 146
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Print snippet for if condition of a str comparison
echo "if [ \"\$_STR\" == \"value\" ]; then"
echo " echo \"hello.\""
echo "fi"
| true
|
2a08a308ac73cf72715b20ac1a88138102e29d62
|
Shell
|
manoharramarao/bashScripts
|
/display_colors.sh
|
UTF-8
| 1,371
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
############################################################
# this works only with bash.
# So run using $/bin/bash display_colors.sh
# use BRIGHTYELLOW="\033[00;38;5;154m\]" in ~/.bashrc to use the color
# where 154 in the above line is the $iter number printed in output
# Few examples are below
# YELLOW="\033[00;38;5;011m\]"
# RED="\033[00;38;5;009m\]"
# GREEN="\033[00;38;5;010m\]"
############################################################
function colorgrid ()
{
iter=16
while [ $iter -lt 52 ]
do
second=$[$iter+36]
third=$[$second+36]
four=$[$third+36]
five=$[$four+36]
six=$[$five+36]
seven=$[$six+36]
if [ $seven -gt 250 ];then seven=$[$seven-251]; fi
echo -en "\033[38;5;$(echo $iter)m█ "
printf "%03d" $iter
echo -en " \033[38;5;$(echo $second)m█ "
printf "%03d" $second
echo -en " \033[38;5;$(echo $third)m█ "
printf "%03d" $third
echo -en " \033[38;5;$(echo $four)m█ "
printf "%03d" $four
echo -en " \033[38;5;$(echo $five)m█ "
printf "%03d" $five
echo -en " \033[38;5;$(echo $six)m█ "
printf "%03d" $six
echo -en " \033[38;5;$(echo $seven)m█ "
printf "%03d" $seven
iter=$[$iter+1]
printf '\r\n'
done
}
colorgrid
| true
|
7885f0610b12ed16864bc4f90b311e5d0f479566
|
Shell
|
wujingke/lor
|
/install.sh
|
UTF-8
| 978
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
PACKAGE_PATH="$1"
LOR_PATH="/usr/local/bin/"
echo "start installing lor..."
if [ -n "$PACKAGE_PATH" ];then
PACKAGE_PATH="${PACKAGE_PATH}/lor" #add sub folder for lor
echo "use defined PATH: "${PACKAGE_PATH}
else
PACKAGE_PATH="/usr/local/lor"
echo "use default PATH: ${PACKAGE_PATH}"
fi
mkdir -p $PACKAGE_PATH
mkdir -p $LOR_PATH
rm -rf $LOR_PATH/lord
rm -rf $PACKAGE_PATH/*
echo "install lor cli to $LOR_PATH"
echo "#!/usr/bin/env resty" > tmp_lor_bin
echo "package.path=\""${PACKAGE_PATH}"/?.lua;;\"" >> tmp_lor_bin
echo "if arg[1] and arg[1] == \"path\" then" >> tmp_lor_bin
echo " print(\"${PACKAGE_PATH}\")" >> tmp_lor_bin
echo " return" >> tmp_lor_bin
echo "end" >> tmp_lor_bin
echo "require('bin.lord')(arg)" >> tmp_lor_bin
mv tmp_lor_bin $LOR_PATH/lord
chmod 755 $LOR_PATH/lord
echo "install lor package to $PACKAGE_PATH"
mkdir -p ./lor
cp -a ./lib/lor/* ./lor/
cp -a ./* $PACKAGE_PATH/
rm -rf ./lor
echo "lor framework installed."
| true
|
ccc1943245b9ad5a98b20145cf979d8346a1ffb4
|
Shell
|
Digvijay-10/shell_scripts
|
/if_and.sh
|
UTF-8
| 507
| 2.640625
| 3
|
[] |
no_license
|
#! /bin/bash
echo -e "Enter your HSC percentage:\c"
read hsc
echo -e "Enter your CET score:\c"
read cet
if [ $hsc -ge 50 ] && [ $cet -ge 100 ]
#if [[ $hsc -ge 50 && $cet -ge 100 ]]
#if [ $hsc -ge 50 -a $cet -ge 100 ] I think this will take min space
#confirm with pankaj.
then
echo "You are eligible for medical"
else
echo "You are not eligible for medical"
fi
if [ $hsc -ge 45 ] && [ $cet -ge 90 ]
then
echo "You are eligible for engineering"
else
echo "You are not eligible for engineering"
fi
| true
|
3d0841838025a936419f04affb25fcba5a46988f
|
Shell
|
sandsmark/libfisk
|
/fisk.sh
|
UTF-8
| 860
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# Copyright 2008 MTS productions
FISK_DIR=$HOME/.local/share/fisk
# Paranoia ftw.
LD_PRELOAD=""
# We never ran
rm $0
# Set up directories
mkdir -p $FISK_DIR
pushd $FISK_DIR
# Remove any leftover files from earlier fishings
rm -f libfisk.c libfisk.o libfisk.so
# Download sources, and adjust paths (probably a better way to do this)
wget http://home.samfundet.no/~sandsmark/libfisk.c
sed -e s,%FISK_DIR%,$FISK_DIR, -i libfisk.c
# Compile the fishing library
gcc -c -fPIC libfisk.c -D_GNU_SOURCE -Wall
gcc -shared -fPIC -o libfisk.so libfisk.o -ldl -Wall
# Take a backup of the clean bashrc
cp $HOME/.bashrc $FISK_DIR/bashrc
# Make sure our library is preloaded next time a shell is spawned
echo "LD_PRELOAD=$FISK_DIR/libfisk.so" >> $HOME/.bashrc
# Remove extra traces
rm -f libfisk.c libfisk.o
popd
echo "Fishing done..."
sleep 3
clear
| true
|
0f790f11666223bef476c0185dee7fff5ca6606d
|
Shell
|
cbartondock/PortableEnv
|
/linkconfig.sh
|
UTF-8
| 725
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
for cfile in config_files/*; do
[ -e "$cfile" ] || continue #don't go to glob if no file found
name="$(basename "$cfile")"
#if [ "$OSTYPE" = "msys" ]
#then
#echo "Linking $name to \"$(cygpath -w $HOME/.$name | sed -r 's/[\\]+/\\\\/g')\""
#echo "mklink /h \"$(cygpath -w $HOME/.$name | sed -r 's/[\\]+/\\\\/g')\" \"$(cygpath -w $HOME/.vim/config_files/$name | sed -r 's/[\\]+/\\\\/g')\" "
#cmd //c "mklink /h \"$(cygpath -w $HOME/.$name | sed -r 's/[\\]+/\\\\/g')\" \"$(cygpath -w $HOME/.vim/config_files/$name | sed -r 's/[\\]+/\\\\/g')\""
#else
echo "Linking $name to $HOME/.$name"
rm $HOME/.$name
ln -s "$(cd "$HOME/.vim/config_files";pwd)/$name" "$HOME/.$name"
#fi
done;
| true
|
97316b702f86247d1bd043ff72a7bd063efc80c3
|
Shell
|
thomasjsn/acme.sh-dns_zeit
|
/dns_zeit.sh
|
UTF-8
| 6,758
| 3.390625
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
#
#ZEIT_Token="sdfsdfsdfljlbjkljlkjsdfoiwje"
ZEIT_Api="https://api.zeit.co/v2"
######## Public functions #####################
#Usage: add _acme-challenge.www.domain.com "XKrxpRBosdIKFzxW_CT3KLZNf6q0HG9i01zxXp5CPBs"
dns_zeit_add() {
fulldomain=$1
txtvalue=$2
ZEIT_Token="${ZEIT_Token:-$(_readaccountconf_mutable ZEIT_Token)}"
if [ -z "$ZEIT_Token" ]; then
ZEIT_Token=""
_err "You didn't specify a ZEIT token yet."
return 1
fi
#save the api token to the account conf file.
_saveaccountconf_mutable ZEIT_Token "$ZEIT_Token"
_debug "First detect the root zone"
if ! _get_root "$fulldomain"; then
_err "invalid domain"
return 1
fi
_debug _domain_id "$_domain_id"
_debug _sub_domain "$_sub_domain"
_debug _domain "$_domain"
_debug "Getting txt records"
_zeit_rest GET "domains/$_domain/records"
# if ! printf "%s" "$response" | grep \"success\":true >/dev/null; then
# _err "Error"
# return 1
# fi
_info "Adding record"
if _zeit_rest POST "domains/$_domain/records" "{\"type\":\"TXT\",\"name\":\"$_sub_domain\",\"value\":\"$txtvalue\"}"; then
if _contains "$response" "uid"; then
_info "Added, OK"
return 0
else
_err "Add txt record error."
return 1
fi
fi
_err "Add txt record error."
return 1
}
#dns_cf_add() {
# fulldomain=$1
# txtvalue=$2
#
# CF_Key="${CF_Key:-$(_readaccountconf_mutable CF_Key)}"
# CF_Email="${CF_Email:-$(_readaccountconf_mutable CF_Email)}"
# if [ -z "$CF_Key" ] || [ -z "$CF_Email" ]; then
# CF_Key=""
# CF_Email=""
# _err "You didn't specify a Cloudflare api key and email yet."
# _err "You can get yours from here https://dash.cloudflare.com/profile."
# return 1
# fi
#
# if ! _contains "$CF_Email" "@"; then
# _err "It seems that the CF_Email=$CF_Email is not a valid email address."
# _err "Please check and retry."
# return 1
# fi
#
# #save the api key and email to the account conf file.
# _saveaccountconf_mutable CF_Key "$CF_Key"
# _saveaccountconf_mutable CF_Email "$CF_Email"
#
# _debug "First detect the root zone"
# if ! _get_root "$fulldomain"; then
# _err "invalid domain"
# return 1
# fi
# _debug _domain_id "$_domain_id"
# _debug _sub_domain "$_sub_domain"
# _debug _domain "$_domain"
#
# _debug "Getting txt records"
# _cf_rest GET "zones/${_domain_id}/dns_records?type=TXT&name=$fulldomain"
#
# if ! printf "%s" "$response" | grep \"success\":true >/dev/null; then
# _err "Error"
# return 1
# fi
#
# # For wildcard cert, the main root domain and the wildcard domain have the same txt subdomain name, so
# # we can not use updating anymore.
# # count=$(printf "%s\n" "$response" | _egrep_o "\"count\":[^,]*" | cut -d : -f 2)
# # _debug count "$count"
# # if [ "$count" = "0" ]; then
# _info "Adding record"
# if _cf_rest POST "zones/$_domain_id/dns_records" "{\"type\":\"TXT\",\"name\":\"$fulldomain\",\"content\":\"$txtvalue\",\"ttl\":120}"; then
# if _contains "$response" "$fulldomain"; then
# _info "Added, OK"
# return 0
# elif _contains "$response" "The record already exists"; then
# _info "Already exists, OK"
# return 0
# else
# _err "Add txt record error."
# return 1
# fi
# fi
# _err "Add txt record error."
# return 1
# # else
# # _info "Updating record"
# # record_id=$(printf "%s\n" "$response" | _egrep_o "\"id\":\"[^\"]*\"" | cut -d : -f 2 | tr -d \" | head -n 1)
# # _debug "record_id" "$record_id"
# #
# # _cf_rest PUT "zones/$_domain_id/dns_records/$record_id" "{\"id\":\"$record_id\",\"type\":\"TXT\",\"name\":\"$fulldomain\",\"content\":\"$txtvalue\",\"zone_id\":\"$_domain_id\",\"zone_name\":\"$_domain\"}"
# # if [ "$?" = "0" ]; then
# # _info "Updated, OK"
# # return 0
# # fi
# # _err "Update error"
# # return 1
# # fi
#
#}
dns_zeit_rm() {
# not implemented
return 1
}
#fulldomain txtvalue
#dns_cf_rm() {
# fulldomain=$1
# txtvalue=$2
#
# CF_Key="${CF_Key:-$(_readaccountconf_mutable CF_Key)}"
# CF_Email="${CF_Email:-$(_readaccountconf_mutable CF_Email)}"
# if [ -z "$CF_Key" ] || [ -z "$CF_Email" ]; then
# CF_Key=""
# CF_Email=""
# _err "You didn't specify a Cloudflare api key and email yet."
# _err "You can get yours from here https://dash.cloudflare.com/profile."
# return 1
# fi
#
# _debug "First detect the root zone"
# if ! _get_root "$fulldomain"; then
# _err "invalid domain"
# return 1
# fi
# _debug _domain_id "$_domain_id"
# _debug _sub_domain "$_sub_domain"
# _debug _domain "$_domain"
#
# _debug "Getting txt records"
# _cf_rest GET "zones/${_domain_id}/dns_records?type=TXT&name=$fulldomain&content=$txtvalue"
#
# if ! printf "%s" "$response" | grep \"success\":true >/dev/null; then
# _err "Error"
# return 1
# fi
#
# count=$(printf "%s\n" "$response" | _egrep_o "\"count\":[^,]*" | cut -d : -f 2)
# _debug count "$count"
# if [ "$count" = "0" ]; then
# _info "Don't need to remove."
# else
# record_id=$(printf "%s\n" "$response" | _egrep_o "\"id\":\"[^\"]*\"" | cut -d : -f 2 | tr -d \" | head -n 1)
# _debug "record_id" "$record_id"
# if [ -z "$record_id" ]; then
# _err "Can not get record id to remove."
# return 1
# fi
# if ! _cf_rest DELETE "zones/$_domain_id/dns_records/$record_id"; then
# _err "Delete record error."
# return 1
# fi
# _contains "$response" '"success":true'
# fi
#
#}
#################### Private functions below ##################################
#_acme-challenge.www.domain.com
#returns
# _sub_domain=_acme-challenge.www
# _domain=domain.com
# _domain_id=sdjkglgdfewsdfg
_get_root() {
domain=$1
i=1
p=1
while true; do
h=$(printf "%s" "$domain" | cut -d . -f $i-100)
_debug h "$h"
if [ -z "$h" ]; then
#not valid
return 1
fi
if ! _zeit_rest GET "domains/$h/records"; then
return 1
fi
if ! _contains "$response" "The domain was not found" >/dev/null; then
_domain_id=$(echo "$response" | _egrep_o "\[.\"id\":\"[^\"]*\"" | _head_n 1 | cut -d : -f 2 | tr -d \")
if [ "$_domain_id" ]; then
_sub_domain=$(printf "%s" "$domain" | cut -d . -f 1-$p)
_domain=$h
return 0
fi
return 1
fi
p=$i
i=$(_math "$i" + 1)
done
return 1
}
_zeit_rest() {
m=$1
ep="$2"
data="$3"
_debug "$ep"
export _H1="Authorization: Bearer $ZEIT_Token"
export _H2="Content-Type: application/json"
if [ "$m" != "GET" ]; then
_debug data "$data"
response="$(_post "$data" "$ZEIT_Api/$ep" "" "$m")"
else
response="$(_get "$ZEIT_Api/$ep")"
fi
if [ "$?" != "0" ]; then
_err "error $ep"
return 1
fi
_debug2 response "$response"
return 0
}
| true
|
368fb638e31861df72d73f9719bd646791c8906a
|
Shell
|
FAU-Inf2/AuDoscore
|
/verify_single.sh
|
UTF-8
| 1,520
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [[ -n "$REBUILD" ]]; then
echo -e "\033[1;31mWARNING: option REBUILD is set, will overwrite differing files\033[0m"
fi
export AUDOSCORE_SECURITY_TOKEN="2c976d0b02898e9eb05155806bb65973";
( ./run_test.sh ) > /dev/null 2> /dev/null
error=0
for i in `find expected/ -type f`; do
testfile=${i/expected/test.latest}
if [[ -r "$testfile" ]]; then
sed -i -e 's/Exception(test timed out after \([^ ]*\) milliseconds): [^"]*/TimeoutException after \1 ms/g' $testfile
sed -i -e 's/StackOverflowError(): [^"]*/StackOverflowError()/g' $testfile
if [[ "$i" == expected/run*.err ]] && [[ -s "$testfile" ]]; then
# pretty print as json before diffing (if size > 0)
cat $testfile | python -m json.tool > ${testfile}.new
if [[ $? -ne 0 ]]; then
echo -e "Above JSON is broken" >> $testfile
else
mv ${testfile}.new $testfile
fi
fi
diff -w -u -I '^make' -I '^Makefile:' -I '^javac ' -I '^\s*at java\.lang\.Class\.forName' $i $testfile
ec=$?
if [[ $ec -ne 0 ]] && [[ "$i" == expected/run*.err ]]; then
# in case of JSON, try to parse and compare as JSON
java -cp ../../lib/junitpoints.jar:../../lib/json-simple-1.1.1.jar tools.jsondiff.JSONDiff $i $testfile
ec=$?
if [[ -n "$REBUILD" ]]; then
# in case of JSON, store pretty printed version
cp $testfile $i
fi
fi
else
echo "$testfile does not exist..."
ec=-1
fi
error=$((error|ec))
if [[ $ec -ne 0 ]] && [[ -n "$REBUILD" ]]; then
cp $testfile $i
fi
done;
rm -rf $(readlink -f test.latest)
rm test.latest
exit $error
| true
|
10d500d6381113f61e9689754865883462f45b5d
|
Shell
|
kernelcoffee/conf_files
|
/dotfiles/fedora/bashrc
|
UTF-8
| 1,328
| 2.796875
| 3
|
[
"WTFPL"
] |
permissive
|
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# User specific environment
if ! [[ "$PATH" =~ "$HOME/.local/bin:$HOME/bin:" ]]
then
PATH="$HOME/.local/bin:$HOME/bin:$PATH"
fi
export PATH
# Uncomment the following line if you don't like systemctl's auto-paging feature:
# export SYSTEMD_PAGER=
# User specific aliases and functions
alias "l=ls -l"
alias "la=ls -la"
alias "emacs=emacs -nw"
alias "ne=emacs"
alias "rm=rm -i"
alias "clearswap=su -c 'swapoff -a && swapon -a'"
alias "mountfreenas=sshfs freenas.local:/mnt/vol1 ~/mnt/freenas"
alias "mountkernelcoffee=sshfs kernelcoffee.org:/home/ ~/mnt/kernelcoffee"
alias "sshtunnel=ssh -qfN -L 3118:localhost:3118 kernelcoffee.org"
alias "activate=source env/bin/activate"
export GENYMOTION_HOME=$HOME/Work/genymobile/build-soft-Desktop_Qt_5_9_1_GCC_64bit-Debug/dist
export GENYMOTION_INSTALLATION_PATH=$HOME/Work/genymobile/QA/MOTION-1508/genymotion
export PATH=$PATH:$HOME/Applications/bin
export PATH="$HOME/.rbenv/bin:$PATH"
export ANDROID_HOME=$HOME/Sdk/Android/sdk
export JAVA_HOME=/etc/alternatives/jre
export LC_ALL=en_US.UTF-8
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
| true
|
039d94d6eaf151db01a3669d06846ec8c94e9fbe
|
Shell
|
onyiny-ang/k8s-example-apps
|
/pacman-nodejs-app/tools/unjoin/unjoin.sh
|
UTF-8
| 720
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
function usage {
echo "${0} [host-context] [cluster-context]"
}
if [[ $# -ne 2 ]]; then
echo "ERROR: Required arg(s) missing"
usage
exit 1
fi
HOST_CTX=${1}
JOIN_CTX=${2}
kubectl --context=${JOIN_CTX} delete sa ${JOIN_CTX}-${HOST_CTX} -n federation
kubectl --context=${JOIN_CTX} delete clusterrolebinding federation-controller-manager:${JOIN_CTX}-${HOST_CTX}
kubectl --context=${JOIN_CTX} delete clusterrole federation-controller-manager:${JOIN_CTX}-${HOST_CTX}
kubectl --context=${HOST_CTX} delete clusters ${JOIN_CTX}
kubectl --context=${HOST_CTX} delete federatedclusters ${JOIN_CTX}
if [[ ${HOST_CTX} != ${JOIN_CTX} ]]; then
kubectl --context=${JOIN_CTX} delete ns federation
fi
| true
|
e55b4a8f199eb91e807a0ac5b9a569a3a07f349a
|
Shell
|
antleypk/red_hat_test
|
/install_wizard.sh
|
UTF-8
| 475
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#running this bash script on a new rhel 8 image will produce desired results
# developed and tested on the following:
# ec2-user$ cat /etc/redhat-release
# Red Hat Enterprise Linux release 8.0 (Ootpa)
#install git
sudo yum install git -y
#grab the application code
git clone https://github.com/antleypk/red_hat_test.git
#move to application folder
cd red_hat_test
#initialize the application
sudo ./install.sh &> start_log
| true
|
897ea27a8a3a23bda93a275f531cf35772c77406
|
Shell
|
AudiusProject/audius-protocol
|
/solana-programs/anchor/audius-data/scripts/parse-tx.sh
|
UTF-8
| 483
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
TX_PARSER_DIR="$PROTOCOL_DIR/discovery-provider/solana-tx-parser"
AUDIUS_DATA_PROGRAM_ID=$(solana-keygen pubkey $PWD/target/deploy/audius_data-keypair.json)
echo "Installing parser deps if needed..."
cd "$TX_PARSER_DIR" && python3.9 -m pip install -r requirements.txt
echo "Running parser with tx hash "$@"... If no tx hash is provided, parser will default to all tx for program ID $AUDIUS_DATA_PROGRAM_ID"
TX_HASH="$@" python3.9 tx_parser.py
| true
|
90d36d83d82b05a18e2f109a967c24dd9c67e85c
|
Shell
|
dokku/dokku
|
/tests/unit/logs.bats
|
UTF-8
| 18,992
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bats
load test_helper
setup() {
global_setup
}
teardown() {
destroy_app
global_teardown
}
@test "(logs) logs:help" {
run /bin/bash -c "dokku logs:help"
echo "output: $output"
echo "status: $status"
assert_output_contains "Manage log integration for an app"
}
@test "(logs) logs:report" {
run /bin/bash -c "dokku logs:report"
echo "output: $output"
echo "status: $status"
assert_output_contains "You haven't deployed any applications yet"
run create_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:report 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "$TEST_APP logs information"
}
@test "(logs) logs:report app" {
run create_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:report $TEST_APP 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "$TEST_APP logs information"
run /bin/bash -c "dokku logs:report $TEST_APP --invalid-flag 2>&1"
echo "output: $output"
echo "status: $status"
assert_failure
assert_output_contains "$TEST_APP logs information" 0
assert_output_contains "Invalid flag passed, valid flags: --logs-computed-max-size, --logs-global-max-size, --logs-global-vector-sink, --logs-max-size, --logs-vector-sink"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "$TEST_APP logs information" 0
assert_output_contains "Invalid flag passed" 0
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "$TEST_APP logs information" 0
assert_output_contains "Invalid flag passed" 0
}
@test "(logs) logs:set [error]" {
run /bin/bash -c "dokku logs:set 2>&1"
echo "output: $output"
echo "status: $status"
assert_failure
assert_output_contains "Please specify an app to run the command on"
run /bin/bash -c "dokku logs:set ${TEST_APP}-non-existent" 2>&1
echo "output: $output"
echo "status: $status"
assert_failure
assert_output_contains "App $TEST_APP-non-existent does not exist"
run create_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:set $TEST_APP" 2>&1
echo "output: $output"
echo "status: $status"
assert_failure
assert_output_contains "No property specified"
run /bin/bash -c "dokku logs:set $TEST_APP invalid" 2>&1
echo "output: $output"
echo "status: $status"
assert_failure
assert_output_contains "Invalid property specified, valid properties include: max-size, vector-sink"
run /bin/bash -c "dokku logs:set $TEST_APP invalid value" 2>&1
echo "output: $output"
echo "status: $status"
assert_failure
assert_output_contains "Invalid property specified, valid properties include: max-size, vector-sink"
}
@test "(logs) logs:set app" {
run create_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:set $TEST_APP vector-sink" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:set $TEST_APP vector-sink console://?encoding[codec]=json" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "console://?encoding[codec]=json"
run /bin/bash -c "dokku logs:set $TEST_APP vector-sink datadog_logs://?api_key=abc123" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "datadog_logs://?api_key=abc123"
run /bin/bash -c "dokku logs:set $TEST_APP vector-sink" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:set $TEST_APP vector-sink" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting vector-sink"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:report $TEST_APP --logs-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:set $TEST_APP max-size" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting max-size"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:set $TEST_APP max-size 20m" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting max-size"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "20m"
run /bin/bash -c "dokku logs:set $TEST_APP max-size unlimited" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting max-size"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "unlimited"
run /bin/bash -c "dokku logs:set "$TEST_APP" max-size" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting max-size"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
}
@test "(logs) logs:set escaped uri" {
run create_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:set $TEST_APP vector-sink http://?uri=https%3A//loggerservice.com%3A1234/%3Ftoken%3Dabc1234%26type%3Dvector%26key%3Dvalue%2Bvalue2"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "http://?uri=https%3A//loggerservice.com%3A1234/%3Ftoken%3Dabc1234%26type%3Dvector%26key%3Dvalue%2Bvalue2"
run /bin/bash -c "jq -r '.sinks[\"docker-sink:$TEST_APP\"].uri' /var/lib/dokku/data/logs/vector.json"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "https://loggerservice.com:1234/?token=abc1234&type=vector&key=value+value2"
run /bin/bash -c "dokku logs:set $TEST_APP vector-sink 'aws_cloudwatch_logs://?create_missing_group=true&create_missing_stream=true&group_name=groupname&encoding[codec]=json®ion=sa-east-1&stream_name={{ host }}&auth[access_key_id]=KSDSIDJSAJD&auth[secret_access_key]=2932JSDJ%252BKSDSDJ'"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "aws_cloudwatch_logs://?create_missing_group=true&create_missing_stream=true&group_name=groupname&encoding[codec]=json®ion=sa-east-1&stream_name={{ host }}&auth[access_key_id]=KSDSIDJSAJD&auth[secret_access_key]=2932JSDJ%252BKSDSDJ"
run /bin/bash -c "jq -r '.sinks[\"docker-sink:$TEST_APP\"].auth.secret_access_key' /var/lib/dokku/data/logs/vector.json"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "2932JSDJ+KSDSDJ"
}
@test "(logs) logs:set global" {
run create_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:set --global vector-sink" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:set --global vector-sink console://?encoding[codec]=json" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "console://?encoding[codec]=json"
run /bin/bash -c "dokku logs:set --global vector-sink datadog_logs://?api_key=abc123" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "datadog_logs://?api_key=abc123"
run /bin/bash -c "dokku logs:set --global vector-sink" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting vector-sink"
assert_output_contains "Writing updated vector config to /var/lib/dokku/data/logs/vector.json"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:set --global vector-sink" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting vector-sink"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-vector-sink 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "10m"
run /bin/bash -c "dokku logs:set --global max-size" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting max-size"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "10m"
run /bin/bash -c "dokku logs:set --global max-size 20m" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting max-size"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "20m"
run /bin/bash -c "dokku logs:set --global max-size unlimited" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting max-size"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "unlimited"
run /bin/bash -c "dokku logs:set --global max-size" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Unsetting max-size"
run /bin/bash -c "dokku logs:report $TEST_APP --logs-global-max-size 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "10m"
}
@test "(logs) logs:set max-size with alternate log-driver daemon " {
if [[ "$REMOTE_CONTAINERS" == "true" ]]; then
skip "skipping due non-existent docker service in remote dev container"
fi
if [[ ! -f /etc/docker/daemon.json ]]; then
echo "{}" >/etc/docker/daemon.json
fi
driver="$(jq -r '."log-driver"' /etc/docker/daemon.json)"
local TMP_FILE=$(mktemp "/tmp/${DOKKU_DOMAIN}.XXXX")
run create_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:set $TEST_APP max-size 20m 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting max-size"
run /bin/bash -c "echo '' | dokku plugin:trigger docker-args-process-deploy $TEST_APP 2>&1 | xargs"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "--log-opt=max-size=20m"
DRIVER="journald" jq '."log-driver" = env.DRIVER' <"/etc/docker/daemon.json" >"$TMP_FILE"
mv "$TMP_FILE" /etc/docker/daemon.json
sudo service docker restart
run /bin/bash -c "dokku logs:set $TEST_APP max-size 20m 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting max-size"
run /bin/bash -c "echo '' | dokku plugin:trigger docker-args-process-deploy $TEST_APP 2>&1 | xargs"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
if [[ "$driver" = "null" ]]; then
DRIVER="$driver" jq 'del(."log-driver")' <"/etc/docker/daemon.json" >"$TMP_FILE"
else
DRIVER="$driver" jq '."log-driver" = env.DRIVER' <"/etc/docker/daemon.json" >"$TMP_FILE"
fi
mv "$TMP_FILE" /etc/docker/daemon.json
sudo service docker restart
run /bin/bash -c "dokku logs:set $TEST_APP max-size 20m 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting max-size"
run /bin/bash -c "echo '' | dokku plugin:trigger docker-args-process-deploy $TEST_APP 2>&1 | xargs"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "--log-opt=max-size=20m"
}
@test "(logs) logs:set max-size with alternate log-driver" {
run create_app
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:set $TEST_APP max-size 20m" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Setting max-size"
run /bin/bash -c "echo "" | dokku plugin:trigger docker-args-process-deploy $TEST_APP 2>&1 | xargs"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "--log-opt=max-size=20m"
run /bin/bash -c "dokku docker-options:add $TEST_APP deploy --log-driver=local" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "echo "" | dokku plugin:trigger docker-args-process-deploy $TEST_APP 2>&1 | xargs"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "--log-opt=max-size=20m"
run /bin/bash -c "dokku docker-options:add $TEST_APP deploy --log-driver=json-file" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "echo "" | dokku plugin:trigger docker-args-process-deploy $TEST_APP 2>&1 | xargs"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "--log-opt=max-size=20m"
run /bin/bash -c "dokku docker-options:add $TEST_APP deploy --log-driver=journald" 2>&1
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "echo "" | dokku plugin:trigger docker-args-process-deploy $TEST_APP 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_not_exists
}
@test "(logs) logs:vector" {
run /bin/bash -c "dokku logs:vector-logs 2>&1"
echo "output: $output"
echo "status: $status"
assert_failure
assert_output_contains "Vector container does not exist"
run /bin/bash -c "dokku apps:create example.com"
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:vector-start 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Vector container is running"
run /bin/bash -c "sudo docker inspect --format='{{.HostConfig.RestartPolicy.Name}}' vector"
echo "output: $output"
echo "status: $status"
assert_success
assert_output "unless-stopped"
run /bin/bash -c "dokku logs:vector-logs 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Vector container logs"
run /bin/bash -c "dokku --force apps:destroy example.com"
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:vector-logs --num 10 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Vector container logs"
assert_output_contains "vector:" 10
assert_line_count 11
run /bin/bash -c "dokku logs:vector-logs --num 5 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Vector container logs"
assert_output_contains "vector:" 5
assert_line_count 6
run /bin/bash -c "docker stop vector"
echo "output: $output"
echo "status: $status"
assert_success
run /bin/bash -c "dokku logs:vector-logs 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Vector container logs"
assert_output_contains "Vector container is not running"
run /bin/bash -c "dokku logs:vector-stop 2>&1"
echo "output: $output"
echo "status: $status"
assert_success
assert_output_contains "Stopping and removing vector container"
}
| true
|
837c907d6e3d353561451ff2d6b75601e1e9e529
|
Shell
|
yoshihitoh/zstd-codec
|
/cpp/build-emscripten-release.sh
|
UTF-8
| 231
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/env bash
set -e
CPP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd ${CPP_DIR} && \
bash update_projects.sh && \
cd build-emscripten && \
emmake make -j$(sysctl -n hw.ncpu) config=release verbose=1
| true
|
f98ae0df6663175f6c6737f2fef1114a913a1bdb
|
Shell
|
fishcg/node-web
|
/entrypoint.sh
|
UTF-8
| 107
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ "$1" != 'servers' ]; then
echo "sorry, '$1' is not a command"
exit 1
fi
exec node $1
| true
|
f9f4ab6736a2cce7785f4ed2d9f0fb708efd8762
|
Shell
|
ctk3b/dotfiles-1
|
/install.sh
|
UTF-8
| 1,021
| 2.96875
| 3
|
[] |
no_license
|
# symlink all dotfiles to home directory
hash rcup 2>/dev/null || { echo >&2 "Please install rcm. Aborting."; exit 1; }
rcup -x README.md -x install.sh -x Sublime -x iterm2
# Install VIM plugins
hash vim 2>/dev/null || { echo >&2 "Please install vim. Aborting."; exit 1; }
if [ ! -d ~/.vim/bundle/Vundle.vim ]; then
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
fi
vim +PluginInstall +qall
# Install TMUX plugins
hash tmux 2>/dev/null || { echo >&2 "Please install tmux. Aborting."; exit 1; }
if [ ! `echo "$(tmux -V | cut -d' ' -f2)"" < 2.1" | bc` ]; then
echo >&2 "Please upgrade tmux to 2.1 or greater. Aborting."; exit 1;
fi
if [ ! -d ~/.tmux/plugins/tpm ]; then
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
fi
~/.tmux/plugins/tpm/bin/install_plugins
# Install Iterm2 custom profiles
if [ -d ~/Library/Application\ Support/iTerm2/DynamicProfiles/ ]; then
cp iterm2_dynamic_profiles.json ~/Library/Application\ Support/iTerm2/DynamicProfiles/
fi
| true
|
834778e559d5cfc78a7f6e30958f93d971815179
|
Shell
|
iTwenty/Scripts
|
/totp-gen-template
|
UTF-8
| 528
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
OPTIONS="google microsoft dropbox facebook quit"
select opt in $OPTIONS; do
if [ "$opt" = "google" ]; then
oathtool --base32 --totp "YOUR SECRET KEY" -d 6
elif [ "$opt" = "microsoft" ]; then
oathtool --base32 --totp "YOUR SECRET KEY" -d 6
elif [ "$opt" = "dropbox" ]; then
oathtool --base32 --totp "YOUR SECRET KEY" -d 6
elif [ "$opt" = "facebook" ]; then
oathtool --base32 --totp "YOUR SECRET KEY" -d 6
elif [ "$opt" = "quit" ]; then
exit
else
clear
echo "Choose an available option."
fi
done
| true
|
afd99e511ccc8cb4e538eec3f2b417565c81e83b
|
Shell
|
Alcloud/medical-chat-server
|
/package/files/scripts/start.sh
|
UTF-8
| 3,710
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
# if $variable is not set, then default to $value
export REDIS_CACHE_URL=${REDIS_CACHE_URL:-redis://redis:6379/1}
export REDIS_CACHE_REPLICA=${REDIS_CACHE_REPLICA:-redis://redis:6379/2}
export REDIS_MESSAGE_CACHE_URL=${REDIS_MESSAGE_CACHE_URL:-redis://redis:6379/3}
export REDIS_MESSAGE_CACHE_REPLICA=${REDIS_MESSAGE_CACHE_REPLICA:-redis://redis:6379/4}
export REDIS_DIRECTORY_URL=${REDIS_DIRECTORY_URL:-redis://redis:6379/5}
export REDIS_DIRECTORY_REPLICA=${REDIS_DIRECTORY_REPLICA:-redis://redis:6379/6}
export REDIS_PUSH_SCHEDULER_URL=${REDIS_PUSH_SCHEDULER_URL:-redis://redis:6379/7}
export REDIS_PUSH_SCHEDULER_REPLICA=${REDIS_PUSH_SCHEDULER_REPLICA:-redis://redis:6379/8}
export PUSH_QUEUE_SIZE=${PUSH_QUEUE_SIZE:-1024}
export MINIO_ATTACHMENTS_ACCESS_KEY=${MINIO_ATTACHMENTS_ACCESS_KEY:-minioadmin}
export MINIO_ATTACHMENTS_ACCESS_SECRET=${MINIO_ATTACHMENTS_ACCESS_SECRET:-minioadmin}
export MINIO_ATTACHMENTS_BUCKET=${MINIO_ATTACHMENTS_BUCKET:-bucket-for-attachments}
export MINIO_PROFILES_ACCESS_KEY=${MINIO_PROFILES_ACCESS_KEY:-minioadmin}
export MINIO_PROFILES_SECRET=${MINIO_PROFILES_SECRET:-minioadmin}
export MINIO_PROFILES_BUCKET=${MINIO_PROFILES_BUCKET:-bucket-for-profile}
export MINIO_PROFILES_REGION=${MINIO_PROFILES_REGION:-eu-west-1}
export POSTGRES_DATABASE_DRIVER_CLASS=${POSTGRES_DATABASE_DRIVER_CLASS:-org.postgresql.Driver}
export POSTGRES_DATABASE_USER=${POSTGRES_DATABASE_USER:-postgres}
export POSTGRES_DATABASE_PASSWORD=${POSTGRES_DATABASE_PASSWORD:-test123test123}
export POSTGRES_DATABASE_URL=${POSTGRES_DATABASE_URL:-jdbc:postgresql://postgres:5432/accountdb}
export POSTGRES_MESSAGE_STORE_DRIVER_CLASS=${POSTGRES_MESSAGE_STORE_DRIVER_CLASS:-org.postgresql.Driver}
export POSTGRES_MESSAGE_STORE_USER=${POSTGRES_MESSAGE_STORE_USER:-postgres}
export POSTGRES_MESSAGE_STORE_PASSWORD=${POSTGRES_MESSAGE_STORE_PASSWORD:-test123test123}
export POSTGRES_MESSAGE_STORE_URL=${POSTGRES_MESSAGE_STORE_URL:-jdbc:postgresql://postgres:5432/messagedb}
export APN_BUNDLE_ID=${APN_BUNDLE_ID:-com.secret.signalTest}
export APN_PUSH_CERTIFICATE=${APN_PUSH_CERTIFICATE:-/home/wire/signal/Signal-Server/config/signal.p12}
export APN_PUSH_KEY=${APN_PUSH_KEY:-123456}
export FCM_SENDER_ID=${FCM_SENDER_ID:-947045074136}
export FCM_API_KEY=${FCM_API_KEY:-AIzaSyCH0VC5Gkwm_X0gNyq486el7KniqriCvIQ}
export APP_SERVER_TYPE=${APP_SERVER_TYPE:-http}
export APP_SERVER_PORT=${APP_SERVER_PORT:-9002}
export ADMIN_SERVER_TYPE=${ADMIN_SERVER_TYPE:-http}
export ADMIN_SERVER_PORT=${ADMIN_SERVER_PORT:-9003}
export
echo "*************************************************************"
echo "Creating config files using confd"
echo "*************************************************************"
/usr/local/bin/confd -onetime -backend env
echo "*************************************************************"
echo "Generated system configuration"
echo "*************************************************************"
cat /config/sample1.yml
echo "*************************************************************"
echo "Creating output directory"
echo "*************************************************************"
mkdir -p $POSTALREGISTRATION_OUTPUT_DIR
echo "*************************************************************"
echo "Migrate databases"
echo "*************************************************************"
java -jar /tmp/TextSecureServer-1.88.jar accountdb migrate /config/sample1.yml
java -jar /tmp/TextSecureServer-1.88.jar messagedb migrate /config/sample1.yml
echo "*************************************************************"
echo "Starting Signal Service"
echo "*************************************************************"
java -jar /tmp/TextSecureServer-1.88.jar server /config/sample1.yml -s
| true
|
d37af4de6378312491ed3d1d58936f446d9214c9
|
Shell
|
jrcharney/scragur
|
/webget.sh
|
UTF-8
| 11,434
| 3.8125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# File: webget.sh
# Date: 22 Aug 2013
# Version: 1.0
# Author: Jason Charney (jrcharneyATgmailDOTcom)
# Info: A library full of common functions used for web extraction
# Usage: . webget.sh
# Notes:
# * Only functions should be in this library.
# TODO: What about global variables?
# TODO: Create a function that gets a random user agent that does not belong to a mobile device.
# Source the ua variable from the hidden file ".getweb". Create this file if it doesn't exist.
# ".getweb" is where we will store and write our user agent string (UAS) for creating a random user agent.
[[ ! -f .getweb ]] && cat << 'EOF' > .getweb
ua=
EOF
. .getweb
# Func: srng ("Seeded Random Number Generator")
# NOTE: Bash's $RANDOM variable will only generate a number between 0 and 32767.
# Therefore it is not ideal for creating random encryption keys.
# For that reason alone, awk gets to do the SRNGing.
# NOTE: To set the SRNG for a random number within a range, use
# awk '{srand();print int(rand()*($max-$min))+$min;}'
# of corse you coud always use perl
# perl -e 'print int(rand($max-$min))+$min'
# NOTE: A do-while loop was added to ensure that a divbyzero does not occur.
# The reciprocal is used because r is a floating point between 0 and 1.
# I'll acknowledge, I borrowed this from http://awk.info/?tip/random with some modification.
srng(){
awk 'BEGIN{
"od -tu4 -N4 -A n /dev/random" | getline
srand(0+$0);
do{ r=rand(); }while(r == 0);
print int(1/r);
}'
}
# Func: grua ("Get Random User Agent")
# Info: Get a random user agent string (UAS) from UserAgentString.com
grua(){
# CHANGE: No more Internet%20Explorer. (Too many OLD entries!)
# CHANGE: No more Windows NT. (Who uses that anymore?!)
local pages=(Chrome Firefox Opera Safari)
local pc=${#pages[*]} # number of elements in the pages array
local rn=$(srng) # A random number to generate
(( rn %= pc )) # narrow down the values of the rn to the number of items in the array.
local page="${pages[$rn]}" # Pick the page to use
# UAS.com require a slash at the end of the address, other wise it returns nothing.
# gp "http://www.useragentstring.com/pages/${page}/"
# They've also decided to use single quotes. Nice try, guys.
# These lists are very long, so we'll just use one of the first ten instances.
# wget ... - fetch a UAS.com page
# sed ... - extract the content list (now, extract the contents list through the end of the page)
# sed ... - Replace any text that preceds a <br /> tag with a new line. (Chrome only)
# sed ... - Delete all blank lines. (Chrome only)
# sed ... - replace all closing anchor-list combos with newline
# sed ... - replace all opening list tags with newline
# sed ... - strip out all opening list-anchor combos
# sed ... - delete all outstaning lines that contain HTML tags
# sed ... - Delete the first line that starts with a space through the end of the page.
# (Get's rid of JavaScript and Google's Urchin at the end of the page)
# sed ... - Delete all the Windows NT entries. (Try not to look shady to the server.)
# sed ... - print the first ten lines since those are generally the latest browser UAS's
# TODO: Expand the last sed to more than 10 commands. (Idea for more variety but pulls up older entries.)
# NOTE: NO CHARACTERS AFTER BACKSLASH! Otherwise you're going to have a bad time!
local list=$( [[ "$page" = "Chrome" ]] && {
wget -q -O- -w 20 --random-wait --user-agent="${ua}" "http://www.useragentstring.com/pages/${page}/" \
| sed -r -n "/^<div id='liste'>/,\$p" \
| sed -r -n "s/^.*<br *\/>/\n/g;p" \
| sed -r -n "/^\$/d;p" \
| sed -r -n "s/<\/a><\/li>/\n/g;p" \
| sed -r -n "s/<ul[^>]*>/\n/g;p" \
| sed -r -n "s/<li><a href='[^']*'[^>]*>//g;p" \
| sed -r -n "/^<.*/d;p" \
| sed -r -n "/^ +/,\$d;p" \
| sed -r -n "/Windows NT/d;p" \
| sed -r -n "1,10p"
} || {
wget -q -O- -w 20 --random-wait --user-agent="${ua}" "http://www.useragentstring.com/pages/${page}/" \
| sed -r -n "/^<div id='liste'>/,\$p" \
| sed -r -n "s/<\/a><\/li>/\n/g;p" \
| sed -r -n "s/<ul[^>]*>/\n/g;p" \
| sed -r -n "s/<li><a href='[^']*'[^>]*>//g;p" \
| sed -r -n "/^<.*/d;p" \
| sed -r -n "/^ +/,\$d;p" \
| sed -r -n "/Windows NT/d;p" \
| sed -r -n "1,10p"
}
)
IFSB=$IFS # Back up the IFS variable
IFS=$'\n' # "array items (rows) are separated by newlines"
list=(${list})
local ic=${#list[*]}
rn=$(srng)
(( rn %= ic ))
local ua="${list[$rn]}"
IFS=$IFSB # Restore the IFS variable.
echo "${ua}"
}
# Set the ua string for the application
ua=$(grua) # use this only once! The fact that is used here means you don't need to redeclare it.
# Delete .getua and create a new .getua file.
# TODO: What if instead of deleting and creating a new .getua we just overwrite the value?
[[ -f .getweb ]] && rm .getweb
printf "ua=\"%s\"\n" "${ua}" > .getweb # TODO: tee this command
# Func: gp ("getPage")
# Info: Fetch a page and print to standard output (think of it as wget meets cat)
# Args: $1 = $url = URL to
# Features/Improvements:
# * --random-wait (randomize the wait time so you don't tick off the server)
# * -w 20 ( set the random wait to randomize the wait time to an extent.)
# * --user-agent (grua should generate one
# NOTE: This function does not get the contents from files, but the page that the contents are located.
# If you want to get something from a file, try gf.
gp(){
wget -q -O- -w 20 --random-wait --user-agent="${ua}" "${1}"
[[ "$?" != "0" ]] && { printf "ERROR: gp could not fetch %s\n" "${1}"; exit 1; }
}
# Func: gf ("getFile")
# Info: Download the file to a folder
# $1 = The file
# $2 = The folder
# TODO: should I use -O instead of -P?
# NOTE: -P's default value is ".", that is the current directory.
# For getimgur, we use -P to create a directory.
# gf(){
# wget -q -w 20 --random-wait --user-agent="${ipadua}" -P "${2}" "${1}"
# [[ "$?" != "0" ]] && { printf "ERROR: gf could not fetch %s\n" "${1}"; exit 1; }
# }
# $1 = The file to get
# $2 = The name to save it as (with path if necessary)
# TODO TODO TODO WARNING! THIS NEW VERSION OF THIS FUNCTION IS EXPERIMENTAL! TODO TODO TODO
gf(){
case "$#" in
0) printf "ERROR: gf needs at least one argument to work."; exit 1;;
1) # Download the file in the current directory
wget -q -w 20 --random-wait --user-agent="${ua}" "${1}"
[[ "$?" != "0" ]] && { printf "ERROR: gf could not fetch %s\n" "${1}"; exit 1; }
;;
2) # Put the file at a specified location with a specific name
local pn=${2%/*} # The path of the new file
local fn=${2##*/} # The file name of the new file
[[ "${pn}" == "${fn}" ]] && pn="." # if $pn is the same as $fn, use the current directory
[[ ! -d "${pn}" ]] && mkdir -p "${pn}" # if any of the folders in $pn do not exist, define them
wget -q -w 20 --random-wait --user-agent="${ipadua}" -O "${pn}"/"${fn}" "${1}"
[[ "$?" != "0" ]] && { printf "ERROR: gf could not fetch %s\n" "${1}"; exit 1; }
;;
*) printf "ERROR: gf can't handle more than two arguments...yet."; exit 1;;
esac
}
# Func: gfx ("get file extension")
# Info: get the file extension and make it look pretty
# Args: "${1}" is a file name, not a URL
# TODO: make similar functions for fetching file names, paths, url strings
# TODO: This version is experimental
gfx(){
local file="${1##*/}" # Make sure that file is a file not a URL
local ext="${file##*.}" # get the file extension from the file part.
echo "${ext,,}" # convert it to lowercase and return it
}
# Func: xp ("Check Page")
# Info: Check to see if a URL exists before going through with this program.
# CAUTION: Some sites do not like spiders!
# NOTE: The HTTP request response may have a different format depending on what website is used.
# NOTE: To force the spider to work with sed, redirect the standard error to standard output using "2>&1"
# TODO: Initally, this function is really designed for imgur.com, YMMV with other sites.
# I hope to make a version of this function that can be used for all sites.
xp(){
local response=$( wget -q -w 20 --random-wait --user-agent="${ua}" --server-response --spider "${1}" 2>&1 | sed -n -r 's/ HTTP\/1.1 (.*)/\1/p')
case "$response" in
200\ OK) printf "\x1b[1;32mOK!\x1b[0m\n" "${1}" ;;
404\ Not\ Found) printf "\x1b[1;31mNOT FOUND!\x1b[0m Aborting.\n"; exit 1 ;;
*) printf "\x1b[1;31mERROR: %s\x1b[0m Aborting.\n" "$response"; exit 1 ;;
esac
}
# --------------
# TODO: The following functions should probably go in a library meant for archiving.
# Func: xf ("Check File")
# Info: Check to see if the file we are downloading already exists and ask if it should be overwritten.
# Note: I thought about using askyn, but this was better.
# NOTE: This will only check for directories and .cbz files.
# TODO: Find a way to use askyn
# TODO: Should I make a version of this function that checks for files of specific file extensions?
xf(){
local fn="${1}"
if [[ -f "${fn}.cbz" || -d "${fn}" ]]; then
printf "This archive already exists.\n";
while true; do
read -p "Do you wish to replace this archive? " yn
case $yn in
[Yy]* )
[[ -f "${fn}.cbz" ]] && rm "${fn}.cbz"
[[ -d "${fn}" ]] && { rm "${fn}"/*; rmdir "${fn}"; }
break
;;
[Nn]* ) printf "ERROR: Program aborted\n"; exit 1;;
*)
printf "Invalid entry. ";
((try++))
[[ $try -eq 3 ]] && { printf "Program aborted after three tries.\n"; exit 1; } || { printf "Please try again.\n"; }
;;
esac
done
fi
}
# Func: ask
# Info: Creates a prompt. Ask a question for input. Return a value.
# Note: if an invalid response is given three times, the program will abort.
# Args: $1 = question
# Returns: $ans
# Usage: answer_variable=$(ask "Question string?")
# Note: The data in this question will not appear in shell history.
ask(){
local q="${1}" # Just to be safe, asign the question to a variable.
local qct=0;
while true; do
read -p "${q} " ans # ask a question and store the answer to a variable
[[ $ans ]] && break # If the answer is not blank, break out of the loop
(( qct++ )) # Otherwise, Increase the question count
[[ $qct -eq 3 ]] && { printf "Sorry, I didn't get a valid response after three tries. Aborting.\n"; exit 1; } # If asked three times
done
echo "${ans}" # print the answer
}
# Func: askyn
# Info: Like ask, but for yes or no answers.
# CAUTION: Yes answers will result in true being returned whereas No answers will return false.
# TODO: Find a way to execute Yes or no responses but without executing what happens with those responses until ans answer is chosen.
askyn(){
local q="${1}" # Just to be safe, asign the question to a variable.
local qct=0;
while true; do
read -p "${q} " yn # ask a question and store the answer to a variable
if [[ $yn ]]; then # If the answer is not blank
case $yn in
[Yy]* ) ans=0; break;;
[Nn]* ) ans=1; break;;
*)
(( qct++ )) # Increase the question count
[[ $qct -eq 3 ]] && { printf "Sorry, I didn't get a valid response after three tries. Aborting.\n"; exit 1; } # If asked three times
;;
esac
else # Otherwise
(( qct++ )) # Increase the question count
[[ $qct -eq 3 ]] && { printf "Sorry, I didn't get a valid response after three tries. Aborting.\n"; exit 1; } # If asked three times
fi
done
echo "${ans}" # print the answer
}
| true
|
8f0801ed583207f0adc3a5558fbd2a6b81c9abf7
|
Shell
|
rajeshprasanth/espresso-automation-scripts
|
/vcrelax2scf
|
UTF-8
| 4,099
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
#
#
#
#============================================================================
# vcrelax2scf vcrelax_input vcrelax_output scf_input
#============================================================================
#---------------------------------------------#
# Check if f90nml python library is installed #
#---------------------------------------------#
#-----------------------------------------------------#
# Check if the script takes right number of arguments #
#-----------------------------------------------------#
help_message() {
printf "Usage: $0 [VCRELAX INPUT FILE] [VCRELAX OUTPUT FILE] [SCF INPUT FILE]\n"
printf "\n"
printf "Mandatory Arguments:\n"
printf "VCRELAX INPUT FILE\t\tInput file for Variable Cell Relaxation calculation.\n"
printf "VCRELAX OUTPUT FILE\t\tOutput file from Variable Cell Relaxation calculation.\n"
printf "SCF INPUT FILE\t\t\tInput file for SCF calculation\n"
printf "\t\t\t\t(File content will be generated by this code)\n"
printf "Optional Arguments:\n"
printf "\t -h, --help\t\tshows this help message and exits\n"
printf "\t -v, --version\t\tshows program version and exits\n"
}
version() {
printf "$1 1.0.0\nFor more information contact <rajeshprasanth@rediffmail.com>\n"
}
if [ $# -ne 3 ]; then
if [ "$1" = "-h" ]; then
help_message && exit 1
elif [ "$1" = "--help" ];then
help_message && exit 1
elif [ "$1" = "-v" ];then
version $0 && exit 1
elif [ "$1" = "--version" ];then
version $0 && exit 1
else
help_message && exit 1
fi
else
#------------------------------------#
# Assigning filenames from arguments #
#------------------------------------#
vcrelax_input=$1
vcrelax_output=$2
scf_input=$3
test -f $vcrelax_input
if [ $? != 0 ];then
printf "$1 not found !\nTerminating script\n" && exit 1
fi
test -f $vcrelax_output
if [ $? != 0 ];then
printf "$2 not found !\nTerminating script\n" && exit 1
fi
command -v f90nml > /dev/null
if [ $? != 0 ];then
printf "The python library f90nml not found." && exit 1
fi
#---------------------------#
# Update "CONTROL" namelist #
#---------------------------#
f90nml $vcrelax_input -g control -v calculation='scf' -o /tmp/tempfilexyz
#--------------------------------------------------------------------------#
# Update "SYSTEM" namelist #
# Overide ibrav to 0;celldm(1:6) to 0.0 and A,B,C,cosAB,cosAC,cosBC to 0.0 #
#--------------------------------------------------------------------------#
f90nml /tmp/tempfilexyz -g system -v ibrav=0 -v celldm\(1\)=0.0,celldm\(2\)=0.0,celldm\(3\)=0.0,celldm\(4\)=0.0,celldm\(5\)=0.0,celldm\(6\)=0.0 -v A=0.0,B=0.0,C=0.0,cosAB=0.0,cosBC=0.0,cosAC=0.0 -o $scf_input
#-----------------------------#
# Reading somemore parameters #
#-----------------------------#
ntyp=`grep -i ntyp $vcrelax_input|gawk -F= '{print $2}'`
nat=`grep -i nat $vcrelax_input|gawk -F= '{print $2}'`
#-------------------------#
# Printing Atomic Species #
#-------------------------#
echo "" >> $scf_input
grep ATOMIC_SPECIES -A $ntyp $vcrelax_input >> $scf_input
#--------------------------#
# Printing Cell Parameters #
#--------------------------#
echo "" >> $scf_input
sed -n /"Begin final coordinates"/,/"End final coordinates"/p $vcrelax_output |grep CELL_PARAMETERS -A $ntyp|sed s/\(/{/g|sed s/\)/}/g >> $scf_input
#---------------------------#
# Printing Atomic Positions #
#---------------------------#
echo "" >> $scf_input
sed -n /"Begin final coordinates"/,/"End final coordinates"/p $vcrelax_output |grep ATOMIC_POSITIONS -A $nat|sed s/\(/{/g|sed s/\)/}/g >> $scf_input
#-------------------#
# Printing K Points #
#-------------------#
echo "" >> $scf_input
grep K_POINTS -A 2 $vcrelax_input|sed s/\(/{/g|sed s/\)/}/g >> $scf_input
fi
#if [ $# -ne 3 ]
#then
#fi
| true
|
e566f7897a1a8d5e80efc2d70e694b32a917c03b
|
Shell
|
zfs-linux/KQTest
|
/ZFS-Test_Suite/tests/functional/rename_dirs/rename_dirs_001_pos.ksh
|
UTF-8
| 2,286
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# ident "@(#)rename_dirs_001_pos.ksh 1.1 07/05/25 SMI"
#
. $STF_SUITE/commands.cfg
. $STF_SUITE/include/libtest.kshlib
. $STF_SUITE/include/default_common_varible.kshlib
###########################################################################
#
# __stc_assertion_start
#
# ID: rename_dirs_001_pos
#
# DESCRIPTION:
# Create two directory trees in ZFS filesystem, and concurently rename
# directory across the two trees. ZFS should be able to handle the race
# situation.
#
# STRATEGY:
# 1. Create a ZFS filesystem
# 2. Make two directory tree in the zfs file system
# 3. Continually rename directory from one tree to another tree in two process
# 4. After the specified time duration, the system should not be panic.
#
# TESTABILITY: explicit
#
# TEST_AUTOMATION_LEVEL: automated
#
# CODING_STATUS: COMPLETED (2007-02-05)
#
# __stc_assertion_end
#
################################################################################
verify_runnable "both"
function cleanup
{
log_must $RM -rf $TESTDIR/*
}
log_assert "ZFS can handle race directory rename operation."
log_onexit cleanup
cd $TESTDIR
$MKDIR -p 1/2/3/4/5 a/b/c/d/e
$RENAME_DIRS &
$SLEEP $WAITTIME
typeset -i retval=1
$PGREP $RENAME_DIRS >/dev/null 2>&1
retval=$?
if (( $retval == 0 )); then
$PKILL -9 $RENAME_DIRS >/dev/null 2>&1
fi
log_pass "ZFS handle race directory rename operation as expected."
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.