blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
64dca6b1b0aaa41de22daf87f9dd3b165e20be48
|
Shell
|
cory-work/local_site_tools
|
/remove_site.sh
|
UTF-8
| 1,748
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
# Copyright (c) 2019 Adobe
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#! /bin/bash
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
if [ -z "$1" ]
then
echo please provide a domain
exit 1
fi
# Common vars
DOMAIN=$1
BASE_DIR=`eval echo "~$SUDO_USER"/sites/$DOMAIN`
CERT_DIR=$BASE_DIR/certs
LOG_DIR=$BASE_DIR/log
WWW_DIR=$BASE_DIR/www
ROOT_CERT_DIR=root_certs
rm -rf BASE_DIR $WWW_DIR
rm /usr/local/etc/dnsmasq.d/${DOMAIN}.conf
rm /etc/resolver/${DOMAIN}
rm /usr/local/etc/nginx/servers/${DOMAIN}.conf
sudo -u $SUDO_USER brew services restart nginx
brew services restart dnsmasq
#flush cache
echo Flush DNS cache
killall -HUP mDNSResponder
killall mDNSResponderHelper
dscacheutil -flushcache
| true
|
ed84edb5c60144af385c96ce98105945ae03dd55
|
Shell
|
ARM-Solutions/Antivirus_Kali
|
/K_master.sh
|
UTF-8
| 3,702
| 3.203125
| 3
|
[] |
no_license
|
#Any comments with ## are meant for Debian and should be ignored
seperate() {
for I in $(seq 30); do echo -n " "; done
}
list() {
echo -e "
badfile
hosts
top
backdoor
firewall
kernel
startup
rootlogin
passwords
shadow
updates
admins
qqr
sudoers
guest
autologin
policy
viruses
external
scan
"
}
badfile() {
echo -n "[Possible File Threats]"
seperate
locate home | grep -ie password -ie users -ie passlist
echo
##/home is just to filter out the non user directories
#Use the -ie flag to add more search terms
echo
}
hosts() {
echo -n "[Possible Threats in /etc/hosts]"
seperate
grep "www" /etc/hosts
echo
}
backdoor() {
echo -n "[Possible Backdoor Threats]"
seperate
ps x | grep -ie "nc" -ie "netcat" | grep -iv "grep"
echo
}
firewall() {
#Requires ufw
echo -n "[Firewall Status]"
seperate
sudo ufw status
echo
}
kernel() {
echo -n "[Kernel Release]"
seperate
uname -r
echo
echo -n "[Kernel Version]"
seperate
uname -v
echo
}
startup() {
##echo -n "[rc.local Non-comment Results]"
seperate
##cat /etc/rc.local | grep -v "#"
echo
}
rootlogin() {
echo -n "[Root Login]"
seperate
cat /etc/ssh/sshd_config | grep PermitRootLogin
echo
}
passwords() {
echo -n "[Profiles Without Password]"
seperate
sudo grep "::1" /etc/shadow
#Warning: Check Shadow file manually for "::1" or similar
echo
}
shadow() {
echo -n "[Profiles in Shadow]"
seperate
sudo grep -ve root -ve daemon -ve bin -ve sys -ve sync -ve games -ve man -ve lp -ve mail -ve news- -ve uucp -ve proxy -ve data -ve news -ve backup -ve list -ve irc -ve gnats -ve nobody -ve libuuid -ve messagebus -ve usbmux -ve dnsmasq -ve avahi -ve kernoops -ve saned -ve whoopsie -ve speech-dispatcher -ve lightdm -ve colord -ve hplip -ve pulse -ve ftp -ve rtkit /etc/shadow | cut -f1 -d":"
echo
}
updates() {
echo -n "[Update Invterval]"
seperate
##sudo grep -P '(?<=Update-Package-Lists ").' /etc/apt/apt.conf.d/10periodic
echo
echo -n "[Auto Updates]"
seperate
##sudo grep -P '(?<=Unattended-Upgrade ").' /etc/apt/apt.conf.d/10periodic
echo
}
admins() {
echo -n "[Current Admins]"
seperate
sudo getent group sudo
echo
}
sudoers() {
#Do not trust results from this function
echo -n "[Sudoers File Results (!!Not Trusted)!!]"
seperate
sudo cat /etc/sudoers | grep -ve "#" -ve "Defaults" -ve "%sudo" -ve "%admin"
echo
}
guest() {
echo -n "[Guest Account]"
seperate
##sudo cat /usr/share/lightdm/lightdm.conf.d/50-ubuntu.conf
echo
}
autologin() {
echo -n "[Auto Login]"
seperate
##cat /usr/share/lightdm/lightdm.conf.d/50-ubuntu.conf | grep -i auto
echo
}
greeter() {
echo -n "[Greeter]"
seperate
##cat /usr/share/lightdm/lightdm.conf.d/50-ubuntu.conf | grep -i greeter
echo
}
policy() {
echo -n "[Password Retries]"
seperate
sudo cat /etc/login.defs | grep -i LOGIN_RETRIES
echo
echo -n "[Max Password Age]"
seperate
cat /etc/login.defs | grep ^PASS_MAX_DAYS
echo
echo -n "[Minimum Password Age]"
seperate
cat /etc/login.defs | grep ^PASS_MIN_DAYS
echo
}
program() {
echo -n "[Possible Hacking Tools]"
seperate
apt list --installed | grep -ie john -ie samba -ie netcat -ie metasploit -ie nmap -ie openssh -ie wiresharl -ie nessus -ie aircrack -ie snort -ie crack
echo
}
viruses() {
if [ `sudo apt list | grep -ic clamav` -eq 0 ]; then
sudo apt-get install clamav
else
echo -n "[Clam AV]"
seperate
echo "Installed"
echo
fi
}
external() {
echo -n "[External Media]"
seperate
ls /media/$(ls /media | grep -v rom)
echo
}
scan() {
echo -n "[Dangerous File Locations]"
locate john samba netcat metasploit nmap openssh wiresharl nessus aircrack snort crack
#Sort by sbin to locate executables
}
run() {
badfile
hosts
backdoor
firewall
kernel
startup
rootlogin
passwords
shadow
updates
admins
sudoers
guest
autologin
greeter
policy
program
viruses
}
| true
|
7def62125fb9bbfd558ba8b3b8567f19dfa5dc6e
|
Shell
|
h4tr3d/IncludeOS
|
/test/fs/integration/fat16/fat16_disk.sh
|
UTF-8
| 630
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
### FAT16 TEST DISK ###
DISK=my.disk
MOUNTDIR=tmpdisk
# If no args supplied, create a fat disk and fill with data
if [ $# -eq 0 ]
then
# Remove disk if exists
rm -f $DISK
# Create "my.disk" with 16500 blocks (8 MB)
dd if=/dev/zero of=$DISK count=16500
# Create FAT filesystem on "my.disk"
mkfs.fat $DISK
# Create mount dir
mkdir -p $MOUNTDIR
sudo mount $DISK $MOUNTDIR/
sudo cp banana.txt $MOUNTDIR/
sync # Mui Importante
sudo umount $MOUNTDIR/
rmdir $MOUNTDIR
# If "clean" is supplied, clean up
elif [ $1 = "clean" ]
then
echo "> Cleaning up FAT16 TEST DISK: $DISK"
rm -f $DISK
fi
| true
|
240141f0b7a87dc4f8b4f54c551cb20f5658ec91
|
Shell
|
anar-shamilov/vagrant-elasticsearch-cluster-with-snapshot
|
/scripts/elkinstall.sh
|
UTF-8
| 1,732
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
sudo cat <<'EOF' > /etc/yum.repos.d/elasticsearch.repo
[elasticsearch-6.x]
name=Elasticsearch repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
EOF
sudo yum install -y elasticsearch
sudo cp /etc/elasticsearch/elasticsearch.yml /root
sudo cat <<EOF > /etc/elasticsearch/elasticsearch.yml
path.data: /var/lib/elasticsearch
path.repo: ["/etc/elasticsearch/elasticsearch-backup"]
path.logs: /var/log/elasticsearch
cluster.name: elkepam
node.name: node-master
node.master: true
node.data: true
network.host: $1
http.port: 9200
discovery.zen.ping.unicast.hosts: ["192.168.120.40", "192.168.120.21","192.168.120.22","192.168.120.23"]
#discovery.zen.minimum_master_nodesdiscovery.zen.minimum_master_nodes: 2
EOF
sudo touch /etc/elasticsearch/elasticsearch.keystore
sudo chown -R elasticsearch:elasticsearch /etc/elasticsearch/
sudo chmod -R 750 /etc/elasticsearch/
sudo systemctl enable elasticsearch && sudo systemctl start elasticsearch
sudo yum install -y kibana
sudo cp /etc/kibana/kibana.yml /root
sudo cat <<EOF > /etc/kibana/kibana.yml
server.host: "$1"
EOF
sudo systemctl enable kibana && sudo systemctl start kibana
sudo yum install -y logstash
sudo echo 'export PATH=$PATH:/usr/share/logstash/bin' >> ~/.bashrc
#sudo echo 'export PATH=$PATH:/usr/share/logstash/bin' >> /etc/environment
sudo cp /etc/logstash/logstash.yml /root/
sudo cat <<'EOF' > /etc/logstash/logstash.yml
path.data: /var/lib/logstash
path.logs: /var/log/logstash
EOF
sudo systemctl start logstash && sudo systemctl enable logstash
| true
|
91ae3da2dbde65a0fca3f871dd8f64d3e5608e93
|
Shell
|
amarshah1/bigquery-openstreetmap
|
/layered_gis/transport/transport.sh
|
UTF-8
| 3,086
| 2.859375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
LAYER=(
"5622:amenity=bus_station"
"5641:amenity=taxi"
"5655:aeroway=helipad"
"5656:aeroway=apron"
"5661:amenity=ferry_terminal"
)
for layer in "${LAYER[@]}"
do
CODE="${layer%%:*}"
KV="${layer##*:}"
K="${KV%%=*}"
V="${KV##*=}"
echo "SELECT
$CODE AS layer_code, 'landuse' AS layer_class, '$V' AS layer_name, feature_type AS gdal_type, osm_id, osm_way_id, osm_timestamp, all_tags, geometry
FROM \`${GCP_PROJECT}.${BQ_SOURCE_DATASET}.features\`
WHERE EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE tags.key = '$K' AND tags.value='$V')" > "$V.sql"
done
#5621
#highway=bus_stop, or public_transport=stop_position + bus=yes
echo "SELECT
5621 AS layer_code, 'transport' AS layer_class, 'bus_stop' AS layer_name, feature_type AS gdal_type, osm_id, osm_way_id, osm_timestamp, all_tags, geometry
FROM \`${GCP_PROJECT}.${BQ_SOURCE_DATASET}.features\`
WHERE EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE (tags.key = 'highway' AND tags.value='bus_stop'))
OR (
EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE tags.key = 'public_transport' AND tags.value='stop_position')
AND
EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE tags.key = 'bus' AND tags.value='yes')
)" > "bus_stop.sql"
#5651
#amenity=airport or aeroway=aerodrome unless type=airstrip
echo "SELECT
5621 AS layer_code, 'transport' AS layer_class, 'airport' AS layer_name, feature_type AS gdal_type, osm_id, osm_way_id, osm_timestamp, all_tags, geometry
FROM \`${GCP_PROJECT}.${BQ_SOURCE_DATASET}.features\`
WHERE NOT EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE (tags.key = 'type' AND tags.value='airstrip'))
AND (
EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE tags.key = 'amenity' AND tags.value='airport')
OR
EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE tags.key = 'aeroway' AND tags.value='aerodrome')
)" > "airport.sql"
#5652
#aeroway=airfield, military=airfield, aeroway=aeroway with type=airstrip
echo "SELECT
5621 AS layer_code, 'transport' AS layer_class, 'airfield' AS layer_name, feature_type AS gdal_type, osm_id, osm_way_id, osm_timestamp, all_tags, geometry
FROM \`${GCP_PROJECT}.${BQ_SOURCE_DATASET}.features\`
WHERE EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE (tags.key = 'aeroway' AND tags.value='airfield'))
OR EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE (tags.key = 'military' AND tags.value='airfield'))
OR (
EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE tags.key = 'aeroway' AND tags.value='aeroway')
AND
EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE tags.key = 'type' AND tags.value='airstrip')
)" > "airfield.sql"
#5671
echo "SELECT
5621 AS layer_code, 'transport' AS layer_class, 'aerialway_station' AS layer_name, feature_type AS gdal_type, osm_id, osm_way_id, osm_timestamp, all_tags, geometry
FROM \`${GCP_PROJECT}.${BQ_SOURCE_DATASET}.features\`
WHERE EXISTS(SELECT 1 FROM UNNEST(all_tags) as tags WHERE (tags.key = 'aerialway' AND tags.value='station'))" > "aerialway_station.sql"
| true
|
322d20224ebe30ce49494d22895207b08623d1ff
|
Shell
|
bdereims/cPod
|
/shwrfr/compute/list_vapp.sh
|
UTF-8
| 723
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#bdereims@vmware.com
. ./env
PS_SCRIPT=list_vapp.ps1
SCRIPT_DIR=/tmp/scripts
SCRIPT=/tmp/scripts/$$.ps1
mkdir -p ${SCRIPT_DIR}
cp ${COMPUTE_DIR}/${PS_SCRIPT} ${SCRIPT}
sed -i -e "s/###VCENTER###/${VCENTER}/" \
-e "s/###VCENTER_ADMIN###/${VCENTER_ADMIN}/" \
-e "s/###VCENTER_PASSWD###/${VCENTER_PASSWD}/" \
-e "s/###VCENTER_DATACENTER###/${VCENTER_DATACENTER}/" \
-e "s/###VCENTER_CLUSTER###/${VCENTER_CLUSTER}/" \
${SCRIPT}
echo "List vApp."
#docker run --rm -it --dns=10.50.0.3 -v ${SCRIPT_DIR}:${SCRIPT_DIR} vmware/powerclicore:ubuntu14.04 powershell -NoLogo ${SCRIPT}
docker run --rm --dns=10.50.0.3 -v ${SCRIPT_DIR}:${SCRIPT_DIR} vmware/powerclicore:ubuntu14.04 powershell -NoLogo ${SCRIPT}
rm -fr ${SCRIPT}
| true
|
400a790f89dd48e10a47fbb7b2714db47d71884a
|
Shell
|
somebodyhuman/kubicluster
|
/scripts/worker/setup_containerd.sh
|
UTF-8
| 6,181
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#/!bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source ${DIR}/../utils/env-variables "$@"
CNI_PLUGINS_DIR=${NODE_WORK_DIR}/cni-plugins-${CNI_PLUGINS_VERSION}
CONTAINERD_DIR=${NODE_WORK_DIR}/containerd-${CONTAINERD_VERSION}
# download and extract cni plugins
if [ ! -f ${NODE_WORK_DIR}/cni-plugins-${CNI_PLUGINS_VERSION}.tar.gz ]; then
if ! (dpkg -s ca-certificates); then apt-get update ; apt-get install -y ca-certificates; fi
wget -q --show-progress --https-only --timestamping \
"https://github.com/containernetworking/plugins/releases/download/v${CNI_PLUGINS_VERSION}/cni-plugins-linux-amd64-v${CNI_PLUGINS_VERSION}.tgz" -O ${NODE_WORK_DIR}/cni-plugins-${CNI_PLUGINS_VERSION}.tar.gz
fi
if [ ! -d ${CNI_PLUGINS_DIR} ]; then
mkdir ${CNI_PLUGINS_DIR}
tar -xzf ${NODE_WORK_DIR}/cni-plugins-${CNI_PLUGINS_VERSION}.tar.gz -C ${CNI_PLUGINS_DIR}
fi
# configure cni
mkdir -p ${CNI_CONF_DIR}
if [ ! -f ${CNI_CONF_DIR}/10-kubicluster.conf ] || [ "${FORCE_UPDATE}" = true ]; then
cat << EOF | tee ${CNI_CONF_DIR}/10-kubicluster.conf
{
"cniVersion": "0.2.0",
"name": "kubicluster",
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"ipam": {
"type": "host-local",
"subnet": "172.19.0.0/24",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}
EOF
fi
# download and extract containerd
if [ ! -f ${NODE_WORK_DIR}/containerd-${CONTAINERD_VERSION}.tar.gz ]; then
if ! (dpkg -s ca-certificates); then apt-get update ; apt-get install -y ca-certificates; fi
wget -q --show-progress --https-only --timestamping \
"https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VERSION}/containerd-${CONTAINERD_VERSION}-linux-amd64.tar.gz" -O ${NODE_WORK_DIR}/containerd-${CONTAINERD_VERSION}.tar.gz
fi
if [ ! -d ${CONTAINERD_DIR} ]; then
mkdir ${CONTAINERD_DIR}
tar -xzf ${NODE_WORK_DIR}/containerd-${CONTAINERD_VERSION}.tar.gz -C ${CONTAINERD_DIR}
fi
if [ ! -f /usr/local/bin/containerd ] || \
[ "${FORCE_UPDATE}" = true ]; then
for item in containerd containerd-shim containerd-shim-runc-v1 containerd-shim-runc-v2 containerd-stress ctr; do
if [ -h /usr/local/bin/${item} ]; then rm -f /usr/local/bin/${item} ; fi
ln -s ${CONTAINERD_DIR}/bin/${item} /usr/local/bin/${item}
done
fi
CONTAINERD_RESULT=$(containerd --version | cut -d ' ' -f 3)
if [ "${CONTAINERD_RESULT}" != "v${CONTAINERD_VERSION}" ]; then
echo "containerd ${CONTAINERD_VERSION} installation failed."
exit 1
else
echo "containerd version is ${CONTAINERD_VERSION}."
fi
# configure containerd
if [ ! -f /etc/containerd/config.toml ] || [ "${FORCE_UPDATE}" = true ]; then
mkdir -p /etc/containerd/
rm -f ${NODE_CERTS_AND_CONFIGS_DIR}/config-*.toml
echo '[plugins]' >${NODE_CERTS_AND_CONFIGS_DIR}/config-before-registries.toml
for node in ${REGISTRIES}; do
name_ip=($(echo $node | tr "," "\n"))
if [ ! -e ${NODE_CERTS_AND_CONFIGS_DIR}/config-registries-mirrors.toml ]; then
echo ' [plugins.cri.registry]' >${NODE_CERTS_AND_CONFIGS_DIR}/config-registries-mirrors.toml
echo ' [plugins.cri.registry.mirrors]' >>${NODE_CERTS_AND_CONFIGS_DIR}/config-registries-mirrors.toml
fi
#TODO allow port to be configured
echo " [plugins.cri.registry.mirrors.\"${name_ip[1]}:6666\"]" >>${NODE_CERTS_AND_CONFIGS_DIR}/config-registries-mirrors.toml
echo " endpoint = [\"https://${name_ip[1]}:6666\"]" >>${NODE_CERTS_AND_CONFIGS_DIR}/config-registries-mirrors.toml
done
for node in ${REGISTRIES}; do
name_ip=($(echo $node | tr "," "\n"))
if [ ! -e ${NODE_CERTS_AND_CONFIGS_DIR}/config-registries.toml ]; then
echo ' [plugins.cri.registry.configs]' >>${NODE_CERTS_AND_CONFIGS_DIR}/config-registries.toml
fi
#TODO allow port to be configured
echo " [plugins.cri.registry.configs.\"${name_ip[1]}:6666\".tls]" >>${NODE_CERTS_AND_CONFIGS_DIR}/config-registries.toml
echo " ca_file = \"${NODE_CERTS_AND_CONFIGS_DIR}/${name_ip[0]}-nexus-fullchain.pem\"" >>${NODE_CERTS_AND_CONFIGS_DIR}/config-registries.toml
done
cat << EOF | tee ${NODE_CERTS_AND_CONFIGS_DIR}/config-after-registries.toml
[plugins.cri.containerd]
no_pivot = false
[plugins.cri.containerd.runtimes]
[plugins.cri.containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v1"
[plugins.cri.containerd.runtimes.runc.options]
NoPivotRoot = false
NoNewKeyring = false
ShimCgroup = ""
IoUid = 0
IoGid = 0
BinaryName = "runc"
Root = ""
CriuPath = ""
SystemdCgroup = false
[plugins.cri.containerd.runtimes.kata]
runtime_type = "io.containerd.kata.v2"
[plugins.cri.containerd.default_runtime]
runtime_type = "io.containerd.kata.v2"
[plugins.cri.cni]
# conf_dir is the directory in which the admin places a CNI conf.
conf_dir = "${CNI_CONF_DIR}"
EOF
cat ${NODE_CERTS_AND_CONFIGS_DIR}/config-before-registries.toml \
${NODE_CERTS_AND_CONFIGS_DIR}/config-registries-mirrors.toml \
${NODE_CERTS_AND_CONFIGS_DIR}/config-registries.toml \
${NODE_CERTS_AND_CONFIGS_DIR}/config-after-registries.toml >/etc/containerd/config.toml
fi
if [ ! -f /etc/systemd/system/containerd.service ] || \
[ "$(systemctl status containerd.service | grep running)" = "" ] || \
[ ! -f /usr/local/bin/containerd ] || \
[ "${FORCE_UPDATE}" = true ]; then
if [ ! -f /etc/systemd/system/containerd.service ] || \
[ "${FORCE_UPDATE}" = true ]; then
cat << EOF | tee /etc/systemd/system/containerd.service
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target
[Service]
ExecStartPre=/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Restart=always
RestartSec=5
Delegate=yes
KillMode=process
OOMScoreAdjust=-999
LimitNOFILE=1048576
LimitNPROC=infinity
LimitCORE=infinity
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
fi
systemctl enable containerd.service
if [ "${FORCE_UPDATE}" = true ]; then
systemctl stop kube-apiserver
fi
systemctl start containerd.service
systemctl status containerd.service
fi
| true
|
4838d6222f52f3d9369bb6b6b9858f93d15d2ce3
|
Shell
|
arashmodrad/Parallel-computing
|
/Homework 4/2D/GPU/run_script_gpu.qsub
|
UTF-8
| 335
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
###
###
#SBATCH --time=10:00:00
#SBATCH --tasks=1
#SBATCH --job-name=sw_2d_GPU
#SBATCH --output=outputGPU.o%j
#SBATCH -p gpuq
NX=200
DT=0.004
LX=10.0
TFINAL=0.2
for i in {1..40};
do
dt=$(bc <<< "scale = 15; $DT / $i")
./sw_2d $((i*NX)) $dt $LX $TFINAL
done
status=$?
if [ $status -ne 0 ]; then
exit $status
fi
| true
|
8c8f94006a5e8b2071532287015d2e5adfb078ff
|
Shell
|
pete-may/bash
|
/.bash_profile
|
UTF-8
| 1,451
| 2.984375
| 3
|
[] |
no_license
|
export PATH=/usr/local/opt/python/libexec/bin:/usr/local/share/python:/usr/local/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/go/bin:/Applications/Wireshark.app/Contents/MacOS
if [ -f ~/.bashrc ]; then
source ~/.bashrc
fi
# added by Anaconda2 5.1.0 installer
export PATH="/Users/peter.may@ibm.com/anaconda2/bin:$PATH"
export PATH="/usr/local/opt/node@8/bin:$PATH"
### Colors
## Change bash prompt to be colorized, rearranges prompt to be: "username@hostname:cwd $ "
export PS1="@\[\033[36m\]\u\[\033[m\]:\[\033[33;1m\]\W\[\033[m\]\$ "
#changed to capital W to play with readability
## Enable command line colors, define colors for the 'ls' command
export CLICOLOR=1
export LSCOLORS=ExFxBxDxCxegedabagacad
## Makes reading directory listings easier
## -G: colorize output, -h: sizes human readable, -F: throws a / after a directory, * after an executable, and a @ after a symlink
alias ls='ls -GFh'
## Source bash_profile
alias reload="source ~/.bash_profile"
## Chuck
alias chuck="ssh petermay@chuck.dblayer.com"
## Use Python3
alias python="/usr/local/bin/python3"
alias pip="/usr/local/bin/pip3"
## Janusgraph
alias janusgraph="/Users/peter.may@ibm.com/Documents/JanusGraph/0.3.0/app/janusgraph-0.3.0-hadoop2/bin/gremlin.sh"
## cd to Compose directory
alias compose="cd /Users/peter.may@ibm.com/Documents/Compose"
## cd to Janugraph/conf directory
alias conf="cd /Users/peter.may@ibm.com/Documents/Janusgraph/0.3.0/app/janusgraph-0.3.0-hadoop2/conf"
## History
alias h='history'
| true
|
063b1b883f671b803107f27602f40714afb9c7a5
|
Shell
|
9thcirclegames/wisdom-of-cthulhu
|
/_scripts/install.countersheet.sh
|
UTF-8
| 1,380
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Build Dir
if [ -n "${TRAVIS+x}" ]; then
echo "** Executing in Travis CI environment";
export BUILD_DIR=$TRAVIS_BUILD_DIR
else
if [ -n "${WOC_BUILD_DIR+x}" ]; then
echo "** Executing in local environment; build dir set to $WOC_BUILD_DIR";
export BUILD_DIR=$WOC_BUILD_DIR
else
echo "** Executing in local environment; build dir set to `pwd`"
export BUILD_DIR=`pwd`
fi
fi
#wget -N https://github.com/lifelike/countersheetsextension/archive/master.zip -P /tmp/countersheetsextension/
#unzip -o -j "/tmp/countersheetsextension/master.zip" "countersheetsextension-master/countersheet.py" -d "$BUILD_DIR/"
#rm /tmp/countersheetsextension/master.zip
#wget -N https://github.com/lifelike/countersheetsextension/archive/6845bda4c0cdfc887a1d82a02f00755ab241590c.zip -P /tmp/countersheetsextension/
#tar xzf /tmp/countersheetsextension/6845bda4c0cdfc887a1d82a02f00755ab241590c.zip -C $BUILD_DIR/ --strip-components 1 countersheetsextension-6845bda4c0cdfc887a1d82a02f00755ab241590c/countersheet.py
#wget -N https://github.com/lifelike/countersheetsextension/archive/2.0.tar.gz -P /tmp/countersheetsextension/
#tar xzf /tmp/countersheetsextension/2.0.tar.gz -C $BUILD_DIR/ --strip-components 1 countersheetsextension-2.0/countersheet.py
#rm /tmp/countersheetsextension/*.*
pip install --user --requirement=$BUILD_DIR/requirements.txt
| true
|
27457a86a0cbb46fb6699b79773bc0409ac99e8e
|
Shell
|
dalalsunil1986/Beelzebub
|
/scripts/grab_genisoimage.sh
|
UTF-8
| 422
| 3.28125
| 3
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
PRECMD=""
if [ ! -v MISC_TOOLS_DIR ]
then
export MISC_TOOLS_DIR="/usr/local/beelzebub-tools"
PRECMD="sudo"
fi
mkdir -p $MISC_TOOLS_DIR
if [ ! -e "$MISC_TOOLS_DIR/genisoimage" ]
then
pushd "$MISC_TOOLS_DIR"
$PRECMD wget "http://u.vercas.com/genisoimage"
RES=$?
if [ $RES != 0 ]
then
$PRECMD wget "https://www.dropbox.com/s/auy4n5zc7hwpm41/genisoimage?dl=1"
fi
chmod +x genisoimage
popd
fi
| true
|
5213843915f5fa1960cd8e7762d9cbb0af7d6146
|
Shell
|
shafiahmed/beansight-website
|
/design/exportsHTML/makelocal.sh
|
UTF-8
| 367
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
echo "Create a folder, create .html files with HTML code (that you get by showing the sources of the pages). Then enter the name of the folder:"
read -e FOLDER
svn export ../../Website/trunk/beansight/main/public $FOLDER/public
find ./ -type f -name '*.html' -exec sed -i 's/\/public/public/g' {} \;
find ./ -type f -name '*.css' -exec sed -i 's/\/public/../g' {} \;
| true
|
2163ffa7320c977b83756431499be5d6d5a251c5
|
Shell
|
burtonjb/computer-graphics
|
/run_lua_scripts.sh
|
UTF-8
| 385
| 2.90625
| 3
|
[] |
no_license
|
# The lua files need to be run from the ./bin folder, or I need to fix the lua path (lua doesn't resolve paths for libraries)
# I've created this shell script for cleaning up, running all the lua files, and then creating the images
rm images/*.pam
rm images/*.png
rm images/*.jpeg
cp './lua/utils.lua' 'bin/'
cd bin
for s in ../lua/*.lua ; do \
echo $s; \
lua $s; \
done
cd ..
| true
|
fb7ea6dd322606dbf49244cb5414fffca1c98e23
|
Shell
|
iocron/typo3-theme-githooks
|
/post-merge
|
UTF-8
| 5,703
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# shellcheck source=/dev/null
# MIT © Sebastian Pieczona - sp@iocron.com
# Bash Exit if a command exits with a non-zero status #
set -e
# Set Generic Variables
if [[ -d "$(pwd)/.git" ]]; then
GIT_DIR="$(pwd)/.git"
TPL_DIR="$(pwd)"
ROOT_DIR="$(dirname "$(dirname "$(dirname "$(pwd)")")")"
elif [[ -d "$(dirname "$(pwd)")/.git" ]]; then
GIT_DIR="$(dirname "$(pwd)")/.git"
TPL_DIR="$(dirname "$(pwd)")"
ROOT_DIR="$(dirname "$(dirname "$(dirname "$(dirname "$(pwd)")")")")"
else
printf "ERROR: No .git folder $(pwd)/.git nor $(dirname "$(pwd)")/.git found\n" | tee -a "error.log";
exit 1
fi
LOG_DIR="$GIT_DIR/logs"
LOG_ERROR="$LOG_DIR/error.log"
LOG_DEPLOY="$LOG_DIR/deploy.log"
#TIME_FORMAT=$(date "%Y-%m-%d_%H:%M:%S");
TIME_FORMAT_FILE=$(date +"%Y-%m-%d-%H%M%S")
TPL_NAME="$(basename "$TPL_DIR")"
TPL_FILEADMIN_DIR="$TPL_DIR/Initialisation/Files"
ROOT_FILEADMIN_DIR="$ROOT_DIR/fileadmin"
ROOT_FILEADMIN_TPL_DIR="$ROOT_FILEADMIN_DIR/$TPL_NAME"
printf "###########################\n" | tee -a "$LOG_DEPLOY";
printf "START OF GITHOOK POST-MERGE: $TIME_FORMAT_FILE\n" | tee -a "$LOG_DEPLOY";
printf "###########################\n" | tee -a "$LOG_DEPLOY";
printf "GENERIC VARIABLES: \n" | tee -a "$LOG_DEPLOY";
printf "GIT_DIR=$GIT_DIR\n" | tee -a "$LOG_DEPLOY"
printf "TPL_DIR=$TPL_DIR\n" | tee -a "$LOG_DEPLOY"
printf "TPL_NAME=$TPL_NAME\n" | tee -a "$LOG_DEPLOY"
printf "TPL_FILEADMIN_DIR=$TPL_FILEADMIN_DIR\n" | tee -a "$LOG_DEPLOY"
printf "ROOT_DIR=$ROOT_DIR\n" | tee -a "$LOG_DEPLOY"
printf "ROOT_FILEADMIN_DIR=$ROOT_FILEADMIN_DIR\n" | tee -a "$LOG_DEPLOY"
printf "ROOT_FILEADMIN_TPL_DIR=$ROOT_FILEADMIN_TPL_DIR\n" | tee -a "$LOG_DEPLOY"
########### DIRECTORY / FILE INIT: ###########
if [[ ! -d "$LOG_DIR" ]]; then
mkdir "$LOG_DIR"
fi
########### TYPO3 Fileadmin Sync: ###########
printf "###########################\n" | tee -a "$LOG_DEPLOY";
printf "TYPO3 FILEADMIN SYNC: \n" | tee -a "$LOG_DEPLOY";
if [[ -d "$ROOT_FILEADMIN_DIR" && ! -d "$ROOT_FILEADMIN_TPL_DIR" ]]; then
mkdir $ROOT_FILEADMIN_TPL_DIR && printf "INFO: ROOT_FILEADMIN_TPL_DIR: Created the directory $ROOT_FILEADMIN_TPL_DIR for the sync process.\n" | tee -a "$LOG_DEPLOY";
fi
if [[ ! -d "$TPL_DIR" ]]; then
printf "$TIME_FORMAT_FILE: ERROR: TPL_DIR: The folder $TPL_DIR doesn't exist.\n" | tee -a "$LOG_ERROR";
exit 1;
elif [[ ! -d "$TPL_FILEADMIN_DIR" ]]; then
mkdir -p $TPL_FILEADMIN_DIR
printf "$TIME_FORMAT_FILE: INFO: TPL_FILEADMIN_DIR: The folder $TPL_FILEADMIN_DIR has been created.\n" | tee -a "$LOG_DEPLOY";
# printf "$TIME_FORMAT_FILE: ERROR: TPL_FILEADMIN_DIR: The folder $TPL_FILEADMIN_DIR doesn't exist.\n" | tee -a "$LOG_ERROR";
exit 1;
elif [[ ! -d "$ROOT_FILEADMIN_DIR" ]]; then
printf "$TIME_FORMAT_FILE: ERROR: ROOT_FILEADMIN_DIR: The folder $ROOT_FILEADMIN_DIR doesn't exist.\n" | tee -a "$LOG_ERROR";
exit 1;
elif [[ ! -d "$ROOT_FILEADMIN_TPL_DIR" ]]; then
printf "$TIME_FORMAT_FILE: ERROR: ROOT_FILEADMIN_TPL_DIR: The folder $ROOT_FILEADMIN_TPL_DIR doesn't exist.\n" | tee -a "$LOG_ERROR";
exit 1;
else
printf "$TIME_FORMAT_FILE: INFO: Syncing $TPL_FILEADMIN_DIR/ to $ROOT_FILEADMIN_TPL_DIR/, please wait a moment..\n" | tee -a "$LOG_DEPLOY";
if hash rsync 2>/dev/null; then
rsync -avz "$TPL_FILEADMIN_DIR/" "$ROOT_FILEADMIN_TPL_DIR/" | tee -a "$LOG_DEPLOY";
elif hash cp 2>/dev/null; then
find "$TPL_FILEADMIN_DIR" -maxdepth 1 -type d ! -name "$(basename $TPL_FILEADMIN_DIR)" -print0 | xargs -I{} -0 cp -Rfpv {} "$ROOT_FILEADMIN_TPL_DIR"
else
printf "$TIME_FORMAT_FILE: ERROR: The commands rsync and cp do not exist on your system.\n" | tee -a "$LOG_ERROR";
exit 1;
fi
if [[ $? -eq 0 ]]; then
printf "FINISHED: The rsync process has been finished successfully.\n" | tee -a "$LOG_DEPLOY";
else
printf "ERROR: Something went wrong on rsync, please see the errors above.\n" | tee -a "$LOG_ERROR";
exit 1;
fi
fi
########### GULP INIT: ###########
printf "###########################\n" | tee -a "$LOG_DEPLOY";
printf "GULP INIT: \n" | tee -a "$LOG_DEPLOY";
if hash curl 2>/dev/null; then
TMP_REPO="https://raw.githubusercontent.com/iocron/typo3-gulp-scss/master";
curl -o "$TPL_DIR/gulpfile.js" "$TMP_REPO/gulpfile.js";
if [[ -f "$TPL_DIR/gulpfile.js" ]]; then
printf "$TIME_FORMAT_FILE: INFO: Downloaded $TPL_DIR/gulpfile.js\n" | tee -a "$LOG_DEPLOY";
else
printf "$TIME_FORMAT_FILE: WARNING: $TPL_DIR/gulpfile.js was not downloaded.\n" | tee -a "$LOG_ERROR";
fi
curl -o "$TPL_DIR/package.json" "$TMP_REPO/package.json";
if [[ -f "$TPL_DIR/package.json" ]]; then
printf "$TIME_FORMAT_FILE: INFO: Downloaded $TPL_DIR/package.json\n" | tee -a "$LOG_DEPLOY";
else
printf "$TIME_FORMAT_FILE: WARNING: $TPL_DIR/package.json was not downloaded.\n" | tee -a "$LOG_ERROR";
fi
curl -o "$TPL_DIR/package-lock.json" "$TMP_REPO/package-lock.json";
if [[ -f "$TPL_DIR/package-lock.json" ]]; then
printf "$TIME_FORMAT_FILE: INFO: Downloaded $TPL_DIR/package-lock.json\n" | tee -a "$LOG_DEPLOY";
else
printf "$TIME_FORMAT_FILE: WARNING: $TPL_DIR/package-lock.json was not downloaded.\n" | tee -a "$LOG_ERROR";
fi
if [[ -f "$TPL_DIR/gulpfile.js" && -f "$TPL_DIR/package.json" && -f "$TPL_DIR/package-lock.json" ]]; then
if hash npm 2>/dev/null; then
npm install | tee -a "$LOG_DEPLOY";
printf "$TIME_FORMAT_FILE: INFO: npm install done.\n" | tee -a "$LOG_DEPLOY";
else
printf "$TIME_FORMAT_FILE: ERROR: npm command doesn't exist on your system\n" | tee -a "$LOG_ERROR";
exit 1;
fi
fi
else
printf "$TIME_FORMAT_FILE: ERROR: THE COMMAND curl DOESN'T EXIST ON YOUR SYSTEM.\n" | tee -a "$LOG_ERROR";
exit 1;
fi
printf "\n" | tee -a "$LOG_DEPLOY";
| true
|
a72996afc7532cb479f95eea8756510505c8eb43
|
Shell
|
combet/CLstack2mass
|
/stack_install.sh
|
UTF-8
| 2,208
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -xe
#
# A script to setup the Travis build environment with Miniconda
# and install the LSST stack into it.
MINICONDA_VERSION=${MINICONDA_VERSION:-"latest"}
CHANNEL=${CHANNEL:-"http://conda.lsst.codes/stack/0.13.0"}
CACHE_DIR="$HOME/miniconda.tarball"
CACHE_DIR_TMP="$CACHE_DIR.tmp"
CACHE_TARBALL_NAME="miniconda.tar.gz"
CACHE_TARBALL_PATH="$CACHE_DIR/$CACHE_TARBALL_NAME"
PACKAGES="gcc lsst-daf-persistence lsst-log lsst-afw lsst-skypix lsst-meas-algorithms lsst-pipe-tasks lsst-obs-cfht"
# Store a record of what's in the cached tarball
# This record allows us to automatically regenerate the tarball if the installed packages change.
rm -f "$HOME/info.txt"
cat > "$HOME/info.txt" <<-EOT
# -- cache information; autogenerated by ci/install.sh
MINICONDA_VERSION=$MINICONDA_VERSION
CHANNEL=$CHANNEL
PACKAGES=$PACKAGES
EOT
cat "$HOME/info.txt"
ls -l $HOME
ls -l $CACHE_DIR
if [ -f "$CACHE_TARBALL_PATH" ] && cmp "$HOME/info.txt" "$CACHE_DIR/info.txt"; then
# Restore from cached tarball
tar xzf "$CACHE_TARBALL_PATH" -C "$HOME"
ls -l "$HOME"
export PATH="$HOME/miniconda/bin:$PATH"
source activate lsst
else
# Miniconda install
# Install Python 2.7 Miniconda
wget https://repo.continuum.io/miniconda/Miniconda2-$MINICONDA_VERSION-Linux-x86_64.sh -O miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
export PATH="$HOME/miniconda/bin:$PATH"
hash -r
conda config --set always_yes yes --set changeps1 no
conda update -q conda
conda info -a
# Stack install
conda config --add channels "$CHANNEL"
conda create -q -n lsst python=$TRAVIS_PYTHON_VERSION
source activate lsst
conda install -q $PACKAGES
# Pack for caching. We pack here as Travis tends to time out if it can't pack
# the whole directory in ~180 seconds.
rm -rf "$CACHE_DIR" "$CACHE_DIR_TMP"
mkdir "$CACHE_DIR_TMP"
tar czf "$CACHE_DIR_TMP/$CACHE_TARBALL_NAME" -C "$HOME" miniconda
mv "$HOME/info.txt" "$CACHE_DIR_TMP"
mv "$CACHE_DIR_TMP" "$CACHE_DIR" # Atomic rename
ls -l "$CACHE_DIR"
fi
# Install obs_cfht
#source eups-setups.sh
#setup daf_persistence
#git clone https://github.com/lsst/obs_cfht.git
#cd obs_cfht
#git checkout b7ab2c4
#setup -k -r .
#scons opt=3
#eups declare -r . -t travis
#cd ../
| true
|
cf2fe4a0ec208dbcd7d3706602f73b42b81e5b72
|
Shell
|
elves/posixsh
|
/pkg/spec/posix/export.test.sh
|
UTF-8
| 585
| 3.515625
| 4
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#### Exporting a variable to external commands
export foo=bar
sh -c 'echo $foo'
## stdout: bar
#### Exportinging multiple variables
export foo=bar lorem=ipsum
sh -c 'echo $foo $lorem'
## stdout: bar ipsum
#### Exporting a variable without assigning to it
foo=bar
export foo
echo $foo
## stdout: bar
#### Exporting an unset variable doesn't set it
export foo
echo ${foo-unset}
## stdout: unset
#### export -p
export foo=bar
export -p
## stdout-regexp: (?m).*^export foo=bar$.*
#### export -p, exported but unset variable
export foo
export -p
## stdout-regexp: (?m).*^export foo$.*
| true
|
749c30d1549b38f6752ad178740d03d11a610956
|
Shell
|
fairliereese/lab_pipelines
|
/lr_splitseq_pipeline/sbatch_tc.sh
|
UTF-8
| 1,016
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --job-name=tc
#SBATCH -n 32
#SBATCH -A SEYEDAM_LAB
#SBATCH -o processing_tables/%x.o%A
#SBATCH -e processing_tables/%x.e%A
#SBATCH --partition=standard
#SBATCH --time=24:00:00
#SBATCH --mail-type=START,END
#SBATCH --mem=64G
#SBATCH --mail-user=freese@uci.edu
module load samtools
opref=$1
sam=${opref}_mapped.sam
sam_noscaff=${opref}_sorted_no_scaff.sam
# references
tc_path=/dfs6/pub/freese/mortazavi_lab/bin/TranscriptClean/
genome=/dfs6/pub/freese/mortazavi_lab/ref/mm10/mm10.fa
sjs=/dfs6/pub/freese/mortazavi_lab/ref/mm10/mm10_SJs.tsv
# remove reads that mapped to scaffold chromosomes and sort
grep -v '^@' $sam | awk ""'{if($3 !~ "_") print $0}'" "| \
cat <(samtools view -H $sam) - | samtools view -bS | \
samtools sort | samtools view -h > $sam_noscaff
# run tc
time python ${tc_path}TranscriptClean.py \
--sam $sam_noscaff \
--genome $genome \
--spliceJns $sjs \
-t 32 \
--canonOnly \
--primaryOnly \
--deleteTmp \
--outprefix ${opref}
| true
|
88d51883d68c12bbf9ad16853f08486098b5af21
|
Shell
|
durgeshanaokar/windup-documentation
|
/legacy/scripts/windupDocCopyWiki.sh
|
UTF-8
| 11,104
| 2.5625
| 3
|
[] |
no_license
|
if [ "$1" == "" ]; then
echo "You must pass the path to the windup.wiki GitHub directories on the command line."
echo "For example: scripts/windupDocCopyWiki.sh ~/GitRepos/windup.wiki"
exit
fi
echo "Copy the original wiki files from $1 into the wiki-docs/ folder"
cp $1/*.asciidoc wiki-docs/
echo ""
echo "Copy and convert asciidoc files from $1 into into docs/topics adoc files"
echo ""
cp $1/About-Rules.asciidoc docs/topics/About-Rules.adoc
cp $1/About-the-HOME-Variable.asciidoc docs/topics/About-the-HOME-Variable.adoc
cp $1/About-this-Wiki.asciidoc docs/topics/About-this-Wiki.adoc
cp $1/Architectural-Components.asciidoc docs/topics/Architectural-Components.adoc
cp $1/Core-Development-Guide.asciidoc docs/topics/Core-Development-Guide.adoc ## this topic is not used
cp $1/Dev-Add-Images-to-the-Wiki.asciidoc docs/topics/Dev-Add-Images-to-the-Wiki.adoc
cp $1/Dev-Bootstrap-Process.asciidoc docs/topics/Dev-Bootstrap-Process.adoc
cp $1/Dev-Build-from-Source.asciidoc docs/topics/Dev-Build-from-Source.adoc
cp $1/Dev-Classloading-Notes.asciidoc docs/topics/Dev-Classloading-Notes.adoc
cp $1/Dev-Concepts-and-Philosophy.asciidoc docs/topics/Dev-Concepts-and-Philosophy.adoc
cp $1/Dev-Connect-to-the-Graph-via-Rexster-or-Gremlin.asciidoc docs/topics/Dev-Connect-to-the-Graph-via-Rexster-or-Gremlin.adoc
cp $1/Dev-Create-A-Test-for-A-Java-based-Rule-Add-on.asciidoc docs/topics/Dev-Create-A-Test-for-A-Java-based-Rule-Add-on.adoc
cp $1/Dev-Create-Your-First-Java-based-Rule-Add-on.asciidoc docs/topics/Dev-Create-Your-First-Java-based-Rule-Add-on.adoc
cp $1/Dev-Create-the-JavaDoc.asciidoc docs/topics/Dev-Create-the-JavaDoc.adoc
cp $1/Dev-Customer-Portal-Tags.asciidoc docs/topics/Dev-Customer-Portal-Tags.adoc ## this topic is not used
cp $1/Dev-Debugging-and-Profiling.asciidoc docs/topics/Dev-Debugging-and-Profiling.adoc
cp $1/Dev-Decompiling.asciidoc docs/topics/Dev-Decompiling.adoc
cp $1/Dev-Dependencies.asciidoc docs/topics/Dev-Dependencies.adoc
cp $1/Dev-Development-Guidelines-and-Conventions.asciidoc docs/topics/Dev-Development-Guidelines-and-Conventions.adoc
cp $1/Dev-Documentation-Process.asciidoc docs/topics/Dev-Documentation-Process.adoc
cp $1/Dev-Execute-Built-from-Source.asciidoc docs/topics/Dev-Execute-Built-from-Source.adoc
cp $1/Dev-Frames-Extensions.asciidoc docs/topics/Dev-Frames-Extensions.adoc
cp $1/Dev-Get-the-Source-Code.asciidoc docs/topics/Dev-Get-the-Source-Code.adoc
cp $1/Dev-Git-Rebasing.asciidoc docs/topics/Dev-Git-Rebasing.adoc
cp $1/Dev-Internal-API-Features.asciidoc docs/topics/Dev-Internal-API-Features.adoc
cp $1/Dev-Jenkins-Setup.asciidoc docs/topics/Dev-Jenkins-Setup.adoc ## this topic is not used
cp $1/Dev-Logging.asciidoc docs/topics/Dev-Logging.adoc
cp $1/Dev-Port-WindRide-Functionality.asciidoc docs/topics/Dev-Port-WindRide-Functionality.adoc ## this topic is not used
cp $1/Dev-Project-Information.asciidoc docs/topics/Dev-Project-Information.adoc
cp $1/Dev-Project-Structure.asciidoc docs/topics/Dev-Project-Structure.adoc
cp $1/Dev-Release-Checklist.md docs/topics/Dev-Release-Checklist.md ## this topic is not used
cp $1/Dev-Release-Process.asciidoc docs/topics/Dev-Release-Process.adoc
cp $1/Dev-Rule-Documentation-Tracker.asciidoc docs/topics/Dev-Rule-Documentation-Tracker.adoc ## this topic is not used
cp $1/Dev-Rule-Metadata-and-API-Discrepancies.asciidoc docs/topics/Dev-Rule-Metadata-and-API-Discrepancies.adoc ## this topic is not used
cp $1/Dev-Startup-Configuration-Arguments.asciidoc docs/topics/Dev-Startup-Configuration-Arguments.adoc ## this topic is not used
cp $1/Dev-Submit-Code-Updates-to-the-Project.asciidoc docs/topics/Dev-Submit-Code-Updates-to-the-Project.adoc
cp $1/Dev-Team-Meeting.asciidoc docs/topics/Dev-Team-Meeting.adoc ## this topic is not used
cp $1/Dev-Test-Logging.asciidoc docs/topics/Dev-Test-Logging.adoc ## this topic is not used
## cp $1/Dev-Troubleshoot-Issues.asciidoc docs/topics/Dev-Troubleshoot-Issues.adoc
cp $1/Dev-Variables-Stack.asciidoc docs/topics/Dev-Variables-Stack.adoc
cp $1/Dev-Web-Instructions.asciidoc docs/topics/Dev-Web-Instructions.adoc ## this topic is not used
cp $1/Execute.asciidoc docs/topics/Execute.adoc
cp $1/Export-the-Report-for-Use-by-Spreadsheet-Programs.asciidoc docs/topics/Export-the-Report-for-Use-by-Spreadsheet-Programs.adoc
cp $1/Extend-the-Rules.asciidoc docs/topics/Extend-the-Rules.adoc ## this topic is not used
cp $1/Features.asciidoc docs/topics/Features.adoc
cp $1/Get-Involved.asciidoc docs/topics/Get-Involved.adoc
cp $1/Glossary.asciidoc docs/topics/Glossary.adoc
cp $1/Home.asciidoc docs/topics/Home.adoc ## this topic is not used
cp $1/Important-Links.asciidoc docs/topics/Important-Links.adoc
cp $1/Install-and-Configure-Maven.asciidoc docs/topics/Install-and-Configure-Maven.adoc
cp $1/Install.asciidoc docs/topics/Install.adoc
cp $1/Known-Issues.asciidoc docs/topics/Known-Issues.adoc
cp $1/Lab-Setup-Scripts.asciidoc docs/topics/Lab-Setup-Scripts.adoc ## this topic is not used
cp $1/Mavenize-Your-Application.asciidoc docs/topics/Mavenize-Your-Application.adoc
cp $1/Migration-Planning-Guide.asciidoc docs/topics/Migration-Planning-Guide.adoc ## this topic is not used
cp $1/Migration-Planning-Process.asciidoc docs/topics/Migration-Planning-Process.adoc ## this topic is not used
cp $1/News.asciidoc docs/topics/News.adoc ## this topic is not used
cp $1/OBSOLETE-Migration-Technology-Categories.asciidoc docs/topics/OBSOLETE-Migration-Technology-Categories.adoc ## this topic is not used
cp $1/OBSOLETE-Rules-Ops-Reporting-Classification.asciidoc docs/topics/OBSOLETE-Rules-Ops-Reporting-Classification.adoc
cp $1/OBSOLETE-Rules-Ops-Reporting-Hint.asciidoc docs/topics/OBSOLETE-Rules-Ops-Reporting-Hint.adoc
cp $1/OBSOLETE-Rules-Ops-Reporting-TypeReference.asciidoc docs/topics/OBSOLETE-Rules-Ops-Reporting-TypeReference.adoc
cp $1/OBSOLETE-Rules-Ops-Xml-XsltTransformation.asciidoc docs/topics/OBSOLETE-Rules-Ops-Xml-XsltTransformation.adoc
cp $1/OBSOLETE:-Rules-Rules-Operations.asciidoc docs/topics/OBSOLETE-Rules-Rules-Operations.adoc
cp $1/Optimize-Performance.asciidoc docs/topics/Optimize-Performance.adoc
cp $1/Performance-tuning.asciidoc docs/topics/Performance-tuning.adoc ## this topic is not used
cp $1/Processing-Overview.asciidoc docs/topics/Processing-Overview.adoc
cp $1/Report-Issues.asciidoc docs/topics/Report-Issues.adoc
cp $1/Review-the-Quickstarts.asciidoc docs/topics/Review-the-Quickstarts.adoc
cp $1/Review-the-Report.asciidoc docs/topics/Review-the-Report.adoc
cp $1/Rule-Metadata.asciidoc docs/topics/Rule-Metadata.adoc
cp $1/Rule-Phases.asciidoc docs/topics/Rule-Phases.adoc
cp $1/Rules-Available-Rules-Utilities.asciidoc docs/topics/Rules-Available-Rules-Utilities.adoc
cp $1/Rules-Basic-Rule-Execution-Flow-Patterns.asciidoc docs/topics/Rules-Basic-Rule-Execution-Flow-Patterns.adoc
cp $1/Rules-Create-a-Basic-Java-based-Rule-Add-on.asciidoc docs/topics/Rules-Create-a-Basic-Java-based-Rule-Add-on.adoc
cp $1/Rules-Create-a-Basic-XML-Rule.asciidoc docs/topics/Rules-Create-a-Basic-XML-Rule.adoc
cp $1/Rules-Create-a-Groovy-Rule-Add-on.asciidoc docs/topics/Rules-Create-a-Groovy-Rule-Add-on.adoc ## this topic is not used
cp $1/Rules-Create-an-Advanced-Ruleset.asciidoc docs/topics/Rules-Create-an-Advanced-Ruleset.adoc
cp $1/Rules-Create-Custom-Reports.asciidoc docs/topics/Rules-Create-Custom-Reports.adoc ## this topic is not used
##cp $1/Rules-Create-Java-Queries.asciidoc docs/topics/Rules-Create-Java-Queries.adoc
cp $1/Rules-Creating-Rule-Operations.asciidoc docs/topics/Rules-Creating-Rule-Operations.adoc
cp $1/Rules-Development-Guide.asciidoc docs/topics/Rules-Development-Guide.adoc ## this topic is not used
cp $1/Rules-Difference-Between-XML-based-and-Java-based-Rules.asciidoc docs/topics/Rules-Difference-Between-XML-based-and-Java-based-Rules.adoc
cp $1/Ruleset-Core.asciidoc docs/topics/Ruleset-Core.adoc ## this topic is not used
cp $1/Ruleset-Docs-Template.asciidoc docs/topics/Ruleset-Docs-Template.adoc ## this topic is not used
cp $1/Ruleset-Java-Archives-Identification.asciidoc docs/topics/Ruleset-Java-Archives-Identification.adoc ## this topic is not used
cp $1/Ruleset-Java-Basic-Ruleset.asciidoc docs/topics/Ruleset-Java-Basic-Ruleset.adoc
cp $1/Ruleset-Java-Classifications-and-Inline-Hints.asciidoc docs/topics/Ruleset-Java-Classifications-and-Inline-Hints.adoc
cp $1/Ruleset-Java-Code-Matching-Examples.asciidoc docs/topics/Ruleset-Java-Code-Matching-Examples.adoc ## this topic is not used
cp $1/Ruleset-Java-EE-Apps.asciidoc docs/topics/Ruleset-Java-EE-Apps.adoc
cp $1/Ruleset-Java-EE-Servers.asciidoc docs/topics/Ruleset-Java-EE-Servers.adoc
cp $1/Ruleset-Reporting.asciidoc docs/topics/Ruleset-Reporting.adoc
cp $1/Ruleset-Server-Configuration-Migration.asciidoc docs/topics/Ruleset-Server-Configuration-Migration.adoc
cp $1/Ruleset-XML.asciidoc docs/topics/Ruleset-XML.adoc
cp $1/Ruleset-XML-Ruleset-Element-Reference.asciidoc docs/topics/Ruleset-XML-Ruleset-Element-Reference.adoc ## this topic is not used
cp $1/Rules-Create-Your-First-Rule.asciidoc docs/topics/Rules-Create-Your-First-Rule.adoc
cp $1/Rules-Important-Links.asciidoc docs/topics/Rules-Important-Links.adoc
cp $1/Rules-Java-based-Rule-Structure.asciidoc docs/topics/Rules-Java-based-Rule-Structure.adoc
cp $1/Rules-Models.asciidoc docs/topics/Rules-Models.adoc
cp $1/Rules-Review-the-Existing-XML-Rules.asciidoc docs/topics/Rules-Review-the-Existing-XML-Rules.adoc
cp $1/Rules-Rule-Configuration.asciidoc docs/topics/Rules-Rule-Configuration.adoc ## this topic is not used
cp $1/Rules-Rule-Execution-Lifecycle.asciidoc docs/topics/Rules-Rule-Execution-Lifecycle.adoc
cp $1/Rules-Rulesets.asciidoc docs/topics/Rules-Rulesets.adoc
cp $1/Rules-Rules-Overview.asciidoc docs/topics/Rules-Rules-Overview.adoc ## this topic is not used
cp $1/Rules-Rule-Story-Points.asciidoc docs/topics/Rules-Rule-Story-Points.adoc
cp $1/Rules-Test-a-Basic-XML-Rule.asciidoc docs/topics/Rules-Test-a-Basic-XML-Rule.adoc
cp $1/Rules-Validate-Rulesets-Against-the-Schema.asciidoc docs/topics/Rules-Validate-Rulesets-Against-the-Schema.adoc
cp $1/Rules-Override-Rules.asciidoc docs/topics/Rules-Override-Rules.adoc
cp $1/Rules-XML-Rule-Construction.asciidoc docs/topics/Rules-XML-Rule-Construction.adoc ## this topic is not used
cp $1/Rules-XML-Rule-Perform-Action-Syntax.asciidoc docs/topics/Rules-XML-Rule-Perform-Action-Syntax.adoc
cp $1/Rules-XML-Rule-When-Condition-Syntax.asciidoc docs/topics/Rules-XML-Rule-When-Condition-Syntax.adoc
cp $1/Rule-XML-Ruleset-Examples-Match-on-XMLFile.asciidoc docs/topics/Rule-XML-Ruleset-Examples-Match-on-XMLFile.adoc ## this topic is not used
cp $1/Sande-ScratchPad.asciidoc docs/topics/Sande-ScratchPad.adoc ## this topic is not used
cp $1/_Sidebar.asciidoc docs/topics/_Sidebar.adoc ## this topic is not used
cp $1/Start.asciidoc docs/topics/Start.adoc ## this topic is not used
cp $1/System-Requirements.asciidoc docs/topics/System-Requirements.adoc ## this topic is not used
cp $1/User-Guide.asciidoc docs/topics/User-Guide.adoc ## this topic is not used
cp $1/What-is-it.asciidoc docs/topics/What-is-it.adoc
echo "Copy is complete!"
| true
|
131d5983ef21fa36532195e35fc00af676d18320
|
Shell
|
joiningdata/databio_sources
|
/uniprot/import_uniprot.sh
|
UTF-8
| 609
| 2.6875
| 3
|
[] |
no_license
|
STAMP=`TZ=UTC date "+%FT%T"`
curl -LO ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.dat.gz
gunzip -c uniprot_sprot.dat.gz | egrep '^(ID|AC|DE|GN|OX|DR|//)' >uniprot.filtered.dat
grep '^AC ' uniprot.filtered.dat |cut -c6- |sed $'s/; */\\\n/g' |grep -v '^$' >uniprot_accessions.txt
echo import new org.uniprot.acc "\"UniprotKB Accession\""
echo import urls org.uniprot.acc "\"https://uniprot.org\"" "\"https://www.uniprot.org/uniprot/%s\""
echo import ref org.uniprot.acc uniprot.ris
echo import -d "\"$STAMP\"" index org.uniprot.acc uniprot_accessions.txt
| true
|
ef04d86b3b7624bcf94e5d62ea80f00a74f87e48
|
Shell
|
Lenv12138/HLS-Cryptography-Accelerator
|
/zip
|
UTF-8
| 222
| 3.0625
| 3
|
[] |
no_license
|
#! /bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
cd "$DIR"
DIRS="rsa sha aes cracker"
OUT="submission.zip"
rm -rf $OUT
git clean -xdf $DIRS
zip -r $OUT $DIRS README.md
md5sum $OUT
| true
|
4a4c004a3ca950fcaef17c8f1ec75ceccb2242e1
|
Shell
|
rnestler/dotfiles
|
/bin/watchfile.sh
|
UTF-8
| 309
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
function die {
echo usage: $0 file command
exit 1
}
[ -f "$1" ] || die
file="$1"
shift
### Set initial time of file
LTIME=$(stat -c %Z "$file")
while true; do
ATIME=$(stat -c %Z "$file")
if [[ "$ATIME" != "$LTIME" ]]; then
echo $*
$* || true
LTIME=$ATIME
fi
sleep 1
done
| true
|
3e9a467dab0d5d256279077c1e9a36563af18385
|
Shell
|
Kho-Dialga/configs
|
/.local/bin/game/dmenugame
|
UTF-8
| 1,965
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
# Small dmenu script that launches games.
# Game title # Command for launching the game
vidya="\
Touhou Puppet Dance Performance tpdp
Touhou 06: Embodiment of Scarlet Devil 2hu 06
Touhou 07: Perfect Cherry Blossom 2hu 07
Touhou 07.5: Inmaterial and Missing Power 2hu 075
Touhou 08: Imperishable Night 2hu 08
Touhou 09: Phantasmagoria of Flower View 2hu 09
Touhou 09.5: Shoot the Bullet 2hu 095
Touhou 10: Mountain of Faith 2hu 10
Touhou 10.5: Scarlet Weather Rhapsody 2hu 105
Touhou 11: Subterranean Animism 2hu 11
Touhou 12: Undefined Fantastic Object 2hu 12
Touhou 12.3: Hisotensoku 2hu 123
Touhou 12.5: Double Spoiler 2hu 125
Touhou 12.8: Fairy Wars 2hu 128
Touhou 13: Ten Desires 2hu 13
Touhou 13.5: Hopeless Masquerade 2hu 135
Touhou 14: Double Dealing Character 2hu 14
Touhou 14.5: Urban Legend in Limbo 2hu 145
Touhou 15: Legacy of Lunatic Kingdom 2hu 15
Touhou 15.5: Antimony of Common Flowers 2hu 155
Touhou 16: Hidden Star in Four Seasons 2hu 16
Touhou 16.5: Violet Detector 2hu 165
Touhou 17: Wily Beast and Weakest Creature 2hu 17
Touhou 18: Unconnected Marketeers steam steam://rungameid/1566410
Genshin Impact an-anime-game-launcher
Honkai Impact 3rd honkers-launcher
PS1 epsxe
PS2 pcsx2-qt
PS3 rpcs3
3DS citra-qt
osu! osu-wine
osu!Lazer osu-lazer
Geometry Dash steam steam://rungameid/322170
Ace Combat 7 steam steam://rungameid/502500
Europa Universalis IV steam steam://rungameid/236850
Yakuza 0 steam steam://rungameid/638970
Persona 5 Royal steam steam://rungameid/1687950
OMORI steam steam://rungameid/1150690
FEZ steam steam://rungameid/224760
Celeste steam steam://rungameid/504230
Super Meat Boy steam steam://rungameid/40800
"
choice="$(echo "$vidya" | cut -d' ' -f 1 | dmenu -l 10 -p "Gaming time 😎🥱" -i)" || exit 1
`echo "$vidya" | grep "^$choice " | cut -d ' ' -f2-`
| true
|
316ff963c40887a3287ffae6e1c4e0b94e199f7d
|
Shell
|
h0wXD/Raspberry
|
/WebControlledSocket/etc/init.d/quadrelay
|
UTF-8
| 677
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#/etc/init.d/quadrelay
#TODO after:
#sudo chmod +x /etc/init.d/quadrelay
#sudo update-rc.d -f quadrelay start 4
GPIO="/usr/local/bin/gpio"
COUNT=3
case "$1" in
start)
echo "Starting relay" > /tmp/quadrelay
# write to pins; which makes sure the relay is kept off
for i in $(seq 0 $COUNT);
do
$GPIO write $i 1 >> /tmp/quadrelay
$GPIO mode $i out >> /tmp/quadrelay
echo "Disabling auto start for relay $i" >> /tmp/quadrelay
done
export PATH=$PATH:/usr/local/bin/
echo $PATH >> /tmp/quadrelay
php -f /home/pi/WebControlledSocket/startup.php >> /tmp/quadrelay
;;
stop)
echo "Stopping relay"
;;
*)
echo "Usage: $0 {start|stop}"
exit 1
;;
esac
exit 0
| true
|
2d6bd894a5b0b3a4a76eed0f911cfce1bcfde0e9
|
Shell
|
SumGuyV5/FreeBSDScripts
|
/bash_setup.sh
|
UTF-8
| 858
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/sh
if [ `whoami` != root ]; then
echo "Please run as root"
exit 1
fi
if [ "$1" = "" ] || [ "$1" = "-h" ]; then
echo "Please run as root and pass the user name."
exit 1
fi
if [ "$(uname)" = 'Linux' ]; then
echo "This Script is for FreeBSD."
exit 1
fi
if [ ! -x /usr/local/bin/bash ]; then
echo "Please install bash."
echo "Would you like to install 'bash'? [Y/N]"
read yesno
case $yesno in
[Yy]* );;
[Nn]* ) exit 1;;
esac
echo "Would you like to use PKG? [Y/N]"
read yesno
case $yesno in
[Yy]* ) pkg install -y bash ;;
[Nn]* ) cd /usr/ports/shells/bash && make install clean;;
esac
fi
BASH_USER=$1
if id "$BASH_USER" >/dev/null 2>&1; then
echo "user does exist."
else
echo "user does not exist."
exit 1
fi
if [ -n "$BASH_USER" ]; then
chsh -s /usr/local/bin/bash $BASH_USER
fi
| true
|
5b1b8b52235ef93a942aafb99be5e55c5cf1436a
|
Shell
|
11philip22/scripts
|
/proxy_check.sh
|
UTF-8
| 105
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
if nc -z $1 $2; then
exec nc $1 $2 # connect directly
else
exec ssh m1006 nc $1 $2 # use proxy
fi
| true
|
745a779813637d5231a37fe8707b6c35d09b0ff5
|
Shell
|
yoshuawuyts/templates
|
/note/main
|
UTF-8
| 375
| 3.765625
| 4
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
__dirname=$(dirname "$(readlink -f "$0")")
. "$(readlink -f "$__dirname/../shared.sh")"
# define note
if [ -z "$1" ]; then
printf 'What is the note about?\n❯ '
read -r note
if [ "$note" = "" ]; then
printf 'no details provided, exiting\n'
exit 1
fi
else
note="$1"
fi
note="$(echo "$note" | tr ' ' '-')"
now="$(date -I)"
touch "$now-$note.md"
| true
|
0e129f9e1911c84356f8c8709e137a78b14f0de5
|
Shell
|
Tetoronie/Scripting-and-Automation
|
/Bash/13/Class/blockApache-skel.bash
|
UTF-8
| 2,393
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
# Script to view Apache logs via a GUI and generate a block list for bad IPs.
function which_ruleset() {
the_ruleset=$(yad --form --title="Select Ruleset" --field="Select Ruleset:CB" "IPTables\!Cisco\!Windows Firewall")
}
function console_status() {
# Get the return value
retVal=$?
# Allow user to see logs again
if [[ ${retVal} -eq 10 ]]
then
view_logs
else
# Or exit if select no
exit 0
fi
}
function view_logs() {
# Present the apache access log screen
block_ip=$(yad --height=300 --list --checklist --width=800 --column="Select IP" --column=ip --column=date --column=method \
--column=status --column=fsize --column=uri $(cat parse.apache) 2>/dev/null)
# Count the number of rules.
num_rules=$(echo ${block_ip} | awk -F"|" ' { print NF } ')
# If no rules are selected, the value of ${num_rules} will be 0.
if [[ ${num_rules} -eq 0 ]]
then
# Prompt for finding no IPs selected
yad --text="No IPs were selected. Would you like to view the logs again?" \
--button="Yes":10 --button="No":20
console_status
else
# Get the IP address (field $2) from the yad output and format the results into an IPTables drop rule
which_ruleset
# File save dialog will
ruleset=$(echo "${the_ruleset}" | awk -F"|" ' { print $1 } ')
case ${ruleset} in
"IPTables")
echo "${ruleset}"
# Get the IP address field $2 from the ruleset
the_rules=$(echo "${block_ip}" | sort -u | awk -F"|" ' { print "iptables -A INPUT " $2 " -j DROP\n"} ')
;;
"Cisco")
the_rules=$(echo "${block_ip}" | sort -u | awk -F"|" ' { print "iaccess-list 1 deny host " $2 "\n"} ')
;;
"Windows Firewall")
the_rules=$(echo "${block_ip}" | sort -u | awk -F"|" ' { print "netsh advfirewall firewall add rule name=\"IP BLOCK\" dir=in interface=any action=block remoteip="$2"/32\n"} ')
;;
*)
echo "Invalid Option"
sleep 2
view_logs
;;
esac
# file save dialog
save_ip=$(yad --file --directory --save --width=600 --height=400)
# Save the IPs to the file specified.
# |& tee is the same as >
echo "${the_rules}" |& tee ${save_ip}
# Prompt to view the logs again.
yad --text="Would you like to view the logs again?" \
--button="Yes":10 \
--button="No":20
# Call function to determin if showing log console again or exit the program
console_status
fi
} # end view_logs function
# Display the main menu
view_logs
| true
|
de74afd063e0ebe852107da69d10c0282a43fdfd
|
Shell
|
hmgu-itg/Genetics-of-Osteoarthritis-1
|
/GWAMA.sh
|
UTF-8
| 12,939
| 3.125
| 3
|
[] |
no_license
|
# GWAMA.sh
# 30th March 2020
# Path to working directory
wd=$1
# Data paths
input=$2
# Install latest version of GWAMA:
mkdir $wd/GWAMA
mkdir $wd/GWAMA/gwama
cd $wd/GWAMA/gwama
wget http://www.geenivaramu.ee/tools/GWAMA_v2.2.2.zip
unzip GWAMA_v2.2.2.zip
make
# Excecutable is called: $wd/GWAMA/gwama/GWAMA
# Create a list of the files for each phenotype to input into GWAMA
for PHENO in AllOA HipOA KneeOA KneeHipOA HandOA ThumbOA FingerOA SpineOA THR TKR TJR ; do
echo "$input/Female_${PHENO}/GO.FILTER.GW.final.meta.results.Female_${PHENO}.p1e-06.n2.MAF0.0001.txt.gz F" > ${PHENO}.gwamafile.in
echo "$input/Male_${PHENO}/GO.FILTER.GW.final.meta.results.Male_${PHENO}.p1e-06.n2.MAF0.0001.txt.gz M" >> ${PHENO}.gwamafile.in
done
# Notes for GWAMA
# https://genomics.ut.ee/en/tools/gwama-tutorial
# Input requirements are:
# 1) MARKERNAME – snp name
# 2) EA – effect allele
# 3) NEA – non effect allele
# 4) OR - odds ratio
# 5) OR_95L - lower confidence interval of OR
# 6) OR_95U - upper confidence interval of OR
# In case of quantitative trait:
# 4) BETA – beta
# 5) SE – std. error
# Study files might also contain columns:
# 7) N - number of samples
# 8) EAF – effect allele frequency
# 9) STRAND – marker strand (if the column is missing then program expects all markers being on positive strand)
# 10) IMPUTED – if marker is imputed or not (if the column is missing then all markers are counted as directly genotyped ones)
# Print the commands into a single file to run the analysis:
for PHENO in AllOA HipOA KneeOA KneeHipOA HandOA ThumbOA FingerOA SpineOA THR TKR TJR ; do
echo "$wd/GWAMA/gwama/GWAMA --filelist ${PHENO}.gwamafile.in --sex --name_marker CPTID --name_or OR --name_or_95l OR_L95 --name_or_95u OR_U95 --output ${PHENO}.gwama.out" >>gwama.cmds
done
chmod 770 gwama.cmds
bsub -G t144_nargwas -M20000 -R 'select[mem>20000] rusage[mem=20000]' -o gwama.o -e gwama.e ./gwama.cmds
############################################################################
## Tabix the files to produce regional plots ##
############################################################################
#!/bin/sh
# Path to hgi profile
hgi=$3
./$hgi
# Path to team144 software
team=$4
export PATH=$team/bin:$PATH
module add $(module avail 2>&1 | grep '/tabix/' | grep latest | sed 's/.latest.//')
#### To prepare the bgzipped files
#Path to tabix directory
tabix=$5
#Females
for PHENO in AllOA HipOA KneeOA KneeHipOA HandOA ThumbOA FingerOA SpineOA THR TKR TJR ; do cat <(echo \#"$(zcat $input/Female_${PHENO}/GO.FILTER.GW.final.meta.results.Female_${PHENO}.p1e-06.n2.MAF0.0001.txt.gz | head -1)") <(zcat $input/Female_${PHENO}/GO.FILTER.GW.final.meta.results.Female_${PHENO}.p1e-06.n2.MAF0.0001.txt.gz | tail -n+2 | sort -k20,20n -k21,21n) | bgzip > $tabix/GO-meta1.Female_${PHENO}.txt.bgz & #; done
done
#Males
for PHENO in AllOA HipOA KneeOA KneeHipOA HandOA ThumbOA FingerOA SpineOA THR TKR TJR ; do cat <(echo \#"$(zcat $input/Male_${PHENO}/GO.FILTER.GW.final.meta.results.Male_${PHENO}.p1e-06.n2.MAF0.0001.txt.gz | head -1)") <(zcat $input/Male_${PHENO}/GO.FILTER.GW.final.meta.results.Male_${PHENO}.p1e-06.n2.MAF0.0001.txt.gz | tail -n+2 | sort -k20,20n -k21,21n) | bgzip > $tabix/GO-meta1.Male_${PHENO}.txt.bgz & #; done
done
####### Then to tabix:
for PHENO in AllOA HipOA KneeOA KneeHipOA HandOA ThumbOA FingerOA SpineOA THR TKR TJR ; do tabix -c "#" -s 20 -b 21 -e 21 $tabix/GO-meta1.Female_${PHENO}.txt.bgz &
done
for PHENO in AllOA HipOA KneeOA KneeHipOA HandOA ThumbOA FingerOA SpineOA THR TKR TJR ; do tabix -c "#" -s 20 -b 21 -e 21 $tabix/GO-meta1.Male_${PHENO}.txt.bgz &
done
#Produce some regional plots for ALDH1A2
#Path to regional plots output directory
rp=$6
mkdir $rp
cd $rp
# Prepare a list of the regions to plot:
# prepare the input files for locus zoom:
# (1) GWAS summary stats
mkdir $rg/input
cat ALHD1A2.list.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo " tabix -h $tabix/GO-meta1.Female_${PHENO}.txt.bgz ${Window1Mb} | cut -f1,8-10,20,21 | sed '/^#/ d' | awk 'BEGIN{OFS=\"\\t\"; print \"CPTID\",\"BETA\",\"SE\",\"P\",\"chromosome\",\"position\",\"rsid\"}{if(\$1==\$1){newid=\"chr\"\$5\":\"\$6} print \$0,newid;}' > input/${PHENO}_Female_${CPTID}_1Mbwindow.input" >> Prepare.input.cmds ; done
cat ALHD1A2.list.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo " tabix -h $tabix/GO-meta1.Female_${PHENO}.txt.bgz ${Window2Mb} | cut -f1,8-10,20,21 | sed '/^#/ d' | awk 'BEGIN{OFS=\"\\t\"; print \"CPTID\",\"BETA\",\"SE\",\"P\",\"chromosome\",\"position\",\"rsid\"}{if(\$1==\$1){newid=\"chr\"\$5\":\"\$6} print \$0,newid;}' > input/${PHENO}_Female_${CPTID}_2Mbwindow.input" >> Prepare.input.cmds ; done
cat ALHD1A2.list.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo " tabix -h $tabix/GO-meta1.Male_${PHENO}.txt.bgz ${Window1Mb} | cut -f1,8-10,20,21 | sed '/^#/ d' | awk 'BEGIN{OFS=\"\\t\"; print \"CPTID\",\"BETA\",\"SE\",\"P\",\"chromosome\",\"position\",\"rsid\"}{if(\$1==\$1){newid=\"chr\"\$5\":\"\$6} print \$0,newid;}' > input/${PHENO}_Male_${CPTID}_1Mbwindow.input" >> Prepare.input.cmds ; done
cat ALHD1A2.list.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo " tabix -h $tabix/GO-meta1.Male_${PHENO}.txt.bgz ${Window2Mb} | cut -f1,8-10,20,21 | sed '/^#/ d' | awk 'BEGIN{OFS=\"\\t\"; print \"CPTID\",\"BETA\",\"SE\",\"P\",\"chromosome\",\"position\",\"rsid\"}{if(\$1==\$1){newid=\"chr\"\$5\":\"\$6} print \$0,newid;}' > input/${PHENO}_Male_${CPTID}_2Mbwindow.input" >> Prepare.input.cmds ; done
./$hgi
export PATH=$team/bin:$PATH
module add $(module avail 2>&1 | grep '/tabix/' | grep latest | sed 's/.latest.//')
chmod 770 Prepare.input.cmds
bsub -G t144_nargwas -o Prepare.input.cmds.o -e Prepare.input.cmds.e ./Prepare.input.cmds
# (2) The input SNPlist for the LD files
cd $rp/input
for FILE in `ls *1Mbwindow.input` ; do cat ${FILE} | awk 'BEGIN{OFS="\t"}{if($1==$1) print $7,$5,$6;}' | sed 's/rsid/snp/g'| sed 's/chromosome/chr/g' | sed 's/position/pos/g' > ${FILE}.snp_pos.txt ; done
for FILE in `ls *2Mbwindow.input` ; do cat ${FILE} | awk 'BEGIN{OFS="\t"}{if($1==$1) print $7,$5,$6;}' | sed 's/rsid/snp/g'| sed 's/chromosome/chr/g' | sed 's/position/pos/g' > ${FILE}.snp_pos.txt ; done
# Prepare running commands for the Locus Zoom script:
# Files to use for LD calculation
ld=$7
cd $rp/input
cat ../ALHD1A2.list.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo "./locusZoomForGO_1MbWindow-Loz.sh ${PHENO}_Female_${CPTID}_1Mbwindow.input ${rsid} $ld/chr${CHR}-noduplicates " >> GO_ALDH1A2.1MbWindow.LZ.cmds ; done
cat ../ALHD1A2.list.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo "./locusZoomForGO_2MbWindow-Loz.sh ${PHENO}_Female_${CPTID}_2Mbwindow.input ${rsid} $ld/chr${CHR}-noduplicates " >> GO_ALDH1A2.2MbWindow.LZ.cmds ; done
cat ../ALHD1A2.list.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo "./locusZoomForGO_1MbWindow-Loz.sh ${PHENO}_Male_${CPTID}_1Mbwindow.input ${rsid} $ld/chr${CHR}-noduplicates " >> GO_ALDH1A2.1MbWindow.LZ.cmds ; done
cat ../ALHD1A2.list.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo "./locusZoomForGO_2MbWindow-Loz.sh ${PHENO}_Male_${CPTID}_2Mbwindow.input ${rsid} $ld/chr${CHR}-noduplicates " >> GO_ALDH1A2.2MbWindow.LZ.cmds ; done
chmod 770 *cmds
./GO_ALDH1A2.1MbWindow.LZ.cmds
./GO_ALDH1A2.2MbWindow.LZ.cmds
#########################################################################################
# Extract the information for each of the 100 GO signals for the appropriate phenotype
#########################################################################################
# Working dierctory
GO_signals=$8
cd $GO_signals
cat Signals.txt | tail -n+2 | while read PHENO CPTID EA NEA SNP CHR POS rsid Window1Mb Window2Mb ; do echo "zgrep ${CPTID} ${PHENO}.gwama.out.out " >> GWAMAsexResultsfor100GOindepSignals.cmds ; done
chmod 770 *cmds
./GWAMAsexResultsfor100GOindepSignals.cmds >> GWAMAsexResultsfor100GOindepSignals.results
# Plot a qq for the Pdiff, Phet and a Manhat for th meta-analysis of the sex specific results only
cd $GO_signals
# Column header is:
head -n 1 TKR.gwama.out.out | sed 's/\t/\n/g' | awk '{print NR, $0}'
# get a list of the files:
ls *out.out | sed 's/.out.out//g' > GWAMA.filelist.txt
more GWAMA.filelist.txt
# Add in chromosome and position from the first colum (3:52236363_C_T)
for FILE in `cat GWAMA.filelist.txt` ; do cat ${FILE}.out.out | awk '{if(NR >1){split($1,a,"_"); chrpos=a[1]; print chrpos,$0;}}' | awk 'BEGIN{OFS="\t"; print "chrpos","rs_number","reference_allele","other_allele","eaf","OR","OR_se","OR_95L","OR_95U","z","p-value","_-log10_p-value","q_statistic","q_p-value","i2","n_studies","n_samples","effects","male_eaf","male_OR","male_OR_se","male_OR_95L","male_OR_95U","male_z","male_p-value","male_n_studies","male_n_samples","female_eaf","female_OR","female_OR_se","female_OR_95L","female_OR_95U","female_z","female_p-value","female_n_studies","female_n_samples","gender_differentiated_p-value","gender_heterogeneity_p-value","CHR","POS"}{if(NR >=1){split($1,a,":"); chr=a[1]; pos=a[2]; print chrpos,$0,chr,pos;}}' | gzip > ${FILE}.plotting.gz ; done
for file in `ls *.plotting.gz ` ; do zcat ${file} | wc -l ; done
ls *.plotting.gz
for file in `cat GWAMA.filelist.txt` ; do wc -l ${file}.out.out ; done
grep NA AllOA.gwama.out.out #<-no nothing printed to screen!
grep "\-9" AllOA.gwama.out.out > temp
head temp
ls *.plotting.gz > GWAMA.plotfilelist.txt
for FILE in `cat GWAMA.plotfilelist.txt` ; do zgrep -v "\-9" ${FILE} | gzip > ${FILE}.minus9sout.gz ; done
ls *.minus9sout.gz > GWAMA.plotfilelist.txt
for FILE in `cat GWAMA.plotfilelist.txt` ; do zcat ${FILE} | wc -l ; done
# Running on gen1 interactive
#Path to manqq
run_manqq=$9
#5e-8
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col gender_differentiated_p-value --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.gender_differentiated.plot ; done
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col gender_heterogeneity_p-value --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.gender_heterogeneity.plot ; done
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col p-value --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.metanalysis.plot ; done
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col male_p-value --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.Male-metanalysis.plot ; done
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col female_p-value --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.Female-metanalysis.plot ; done
#1.3e-8
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col gender_differentiated_p-value --sig 1.3e-8 --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.gender_differentiated_pval-1-3e8.plot ; done
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col gender_heterogeneity_p-value --sig 1.3e-8 --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.gender_heterogeneity_pval-1-3e8.plot ; done
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col p-value --sig 1.3e-8 --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.metanalysis_pval-1-3e8.plot ; done
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col male_p-value --sig 1.3e-8 --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.Male-metanalysis_pval-1-3e8.plot ; done
cat GWAMA.plotfilelist.txt | while read FILE ; do ./Rscript $run_manqq/run_manqq.R --chr-col CHR --pos-col POS --pval-col female_p-value --sig 1.3e-8 --image pdf --a2 reference_allele --a1 other_allele --build 37 --af-col eaf $FILE $FILE.Female-metanalysis_pval-1-3e8.plot ; done
| true
|
5b9889c4fb98378f131c1e60a0bf001dc1dc9bc0
|
Shell
|
flint-stone/magic-docker-grader
|
/msgdropsinglefailure_grader.sh
|
UTF-8
| 1,217
| 3.265625
| 3
|
[] |
no_license
|
#echo "============================================"
#echo "Message Drop Single Failure Scenario"
#echo "============================"
grade=0
magic=`head -1 ${1}`
if [ ${magic} -ne 131 ]
then
echo ${grade}
exit
fi
joincount=`grep joined $1 | cut -d" " -f2,4-7 | sort -u | wc -l`
if [ $joincount -eq 100 ]; then
grade=`expr $grade + 15`
else
joinfrom=`grep joined $1 | cut -d" " -f2 | sort -u`
cnt=0
for i in $joinfrom
do
jointo=`grep joined $1 | grep '^ '$i | cut -d" " -f4-7 | grep -v $i | sort -u | wc -l`
if [ $jointo -eq 9 ]; then
cnt=`expr $cnt + 1`
fi
done
if [ $cnt -eq 10 ]; then
grade=`expr $grade + 15`
fi
fi
failednode=`grep "Node failed at time" $1 | sort -u | awk '{print $1}'`
failcount=`grep removed $1 | sort -u | grep $failednode | wc -l`
if [ $failcount -ge 9 ]; then
grade=`expr $grade + 15`
fi
#echo $grade
if [ $grade -eq 30 ]; then
echo {\"fractionalScore\": 0.33, \"feedback\": \"Congratulations! You got it right!\"}
elif [ $grade -eq 0 ]; then
echo {\"fractionalScore\": 0, \"feedback\": \"Congratulations! Sorry, your answer was incorrect.\"}
else
echo {\"fractionalScore\": 0.11, \"feedback\": \"Almost there! You got some test cases right.\"}
fi
| true
|
e2c7be9e098c8ffb3e939582a8ba1f98f2bcf10b
|
Shell
|
FernandoBasso/dotfiles
|
/bin/mntserv-vbsmidia.bash
|
UTF-8
| 1,682
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
my_dirs=(~/ServDev ~/ServProd ~/ServTemp)
#
# TODO: Check if `mountpoint` command is available on the system.
#
usage() {
echo "Usage: ${0##*/} --(on|off)"
}
do_mount() {
for my_dir in "${my_dirs[@]}" ; do
if mountpoint -q "$my_dir" ; then
echo "~/${my_dir##*/} already mounted."
else
echo -n "Mounting ~/${my_dir##*/}..."
mount "$my_dir"
echo ' Done!'
fi
done
}
do_umount() {
for my_dir in "${my_dirs[@]}" ; do
if mountpoint -q "$my_dir" ; then
echo -n "Umounting ~/${my_dir##*/}..."
umount "$my_dir"
echo ' Done!'
else
echo "~/${my_dir##*/} is not mounted."
fi
done
}
if [[ -z $1 ]] ; then
usage
exit 1
fi
case $1 in
'--on')
do_mount
;;
'--off')
do_umount
;;
*)
usage
;;
esac
#
# In the shell, 0 is truthy, all other values indicate some form of falsiness
# or type of error.
# http://stackoverflow.com/questions/2933843/why-0-is-true-but-false-is-1-in-the-shell
#
# $ bash mntserv --on
# ~/ServDev already mounted.
# ~/ServProd already mounted.
# ~/ServTemp already mounted.
#
# ~/bin/ → 09:01:03 ✓
# $ bash mntserv --off
# Umounting ~/ServDev... Done!
# Umounting ~/ServProd... Done!
# Umounting ~/ServTemp... Done!
#
# ~/bin/ → 09:01:30 ✓
# $ bash mntserv --on
# Mounting ~/ServDev... Done!
# ~/ServProd already mounted.
# Mounting ~/ServTemp... Done!
#
# ~/bin/ → 09:01:40 ✓
# $ bash mntserv --off
# Umounting ~/ServDev... Done!
# Umounting ~/ServProd... Done!
# ~/ServTemp is not mounted.
| true
|
9c747f7ebe02c93f895a1812f4a6af3f73f9fcad
|
Shell
|
seObando19/holberton-system_engineering-devops
|
/0x0C-web_server/0-transfer_file
|
UTF-8
| 314
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Bash script that transfers a file from our client to a server
expectedValue=4
ARGC=("$#")
ARGV=("$@")
if [ $ARGC -eq $expectedValue ]
then
scp -i "$4" -o "StrictHostKeyChecking=no" "$1" "$3@$2":~
else
echo "Usage: 0-transfer_file PATH_TO_FILE IP USERNAME PATH_TO_SSH_KEY"
echo "$ARGC"
fi;
| true
|
2f1f3ea1f1273e1fc330d7ae52a9a6a5702e02bb
|
Shell
|
trussworks/brainsik-ott-concourse-ci
|
/set-pipelines.sh
|
UTF-8
| 227
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Sets all pipelines in the pipelines directory.
#
set -ex
fly="fly -t demo"
cd pipelines; for pipeline in *.yml; do
$fly set-pipeline -p ${pipeline/.yml/} -c $pipeline -l ../credentials.yml -n
done; cd ..
| true
|
05f94ba35000e49b30b1414ebe813f1fb1c1cef3
|
Shell
|
krasimirvasilev1/nbu-masters
|
/raspberry/rasp_check.sh
|
UTF-8
| 2,131
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
function log () {
echo "`date +"%m-%d-%Y %T"`: $1"
}
function check_cloud_health () {
wget -q --spider http://google.com
}
function cloud_check () {
local pic=$1
local cloud_url=$2
local cloud_response=`(echo -n '{"image": "'; base64 $pic; echo '"}') | curl -sH "Content-Type: application/json" -d @- $cloud_url`
local plate=`echo "$cloud_response" | jq -r . | jq '.plate'`
local status=`echo "$cloud_response" | jq -r . | jq '.status'`
local timestamp=`echo "$cloud_response" | jq -r . | jq '.timestamp'`
log "`echo $cloud_response | jq -r .`"
if [[ "$status" -eq 200 ]]; then
add_to_cache_file $plate
raise_access_barrier
fi
}
function recognise_plate () {
local pic=$1
return docker run -it --rm -v $(pwd):/data:ro krasimirvasilev1/nbu-alpr -j -c eu + $pic | jq -r '.results[].candidates[].plate'
}
function raise_access_barrier () {
access_allowed
sleep 20
access_denied
}
function access_denied () {
echo "0" > /sys/class/gpio/gpio17/value
echo "1" > /sys/class/gpio/gpio18/value
}
function access_allowed () {
echo "0" > /sys/class/gpio/gpio18/value
echo "1" > /sys/class/gpio/gpio17/value
}
function check_cache_env () {
local plate=$1
if [[ " ${CHECK_CACHE[@]} " =~ " ${plate} " ]]; then
log "Success from offline check!"
return "Success!"
fi
}
function check_cache_file () {
local plate=$1
if grep -Fxq "$plate" /tmp/rasp_cache.txt; then
log "Success from offline check!"
fi
}
function add_to_cache_file () {
local plate=$1
# add check to see if the plate num is already in the cache before adding it
echo "$plate" >> /tmp/rasp_cache.txt
sort -u /tmp/rasp_cache.txt -o /tmp/rasp_cache.txt
}
function offline_check () {
local pic=$1
local plates=recognise_plate $pic
for plate in plates
do
return check_cache_file $plate
done
}
PIC=$1
CLOUD_URL=http://192.168.1.8:9000/2015-03-31/functions/function/invocations
if check_cloud_health
then
cloud_check $PIC $CLOUD_URL
else
offline_check $PIC
fi
| true
|
3a482d01e79136ffe11285cc8d690f759ce84046
|
Shell
|
adzhou/oragle
|
/coreutils/tests/tail-2/assert-2.sh
|
UTF-8
| 1,390
| 3.375
| 3
|
[
"BSL-1.0",
"GPL-3.0-or-later",
"GFDL-1.3-or-later",
"GPL-3.0-only"
] |
permissive
|
#!/bin/sh
# This variant of 'assert' would get a UMR reliably in 2.0.9.
# Due to a race condition in the test, the 'assert' script would get
# the UMR on Solaris only some of the time, and not at all on Linux/GNU.
# Copyright (C) 2000-2014 Free Software Foundation, Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ tail
# Not "expensive" per se, but sleeping for so long is annoying.
very_expensive_
ok='ok ok ok'
touch a
tail --follow=name a foo > err 2>&1 &
tail_pid=$!
# Arrange for the tail process to die after 12 seconds.
(sleep 12; kill $tail_pid) &
echo $ok > f
echo sleeping for 7 seconds...
sleep 7
mv f foo
# echo waiting....
wait
case "$(cat err)" in
*$ok) ;;
*) fail=1;;
esac
test $fail = 1 && cat err
Exit $fail
| true
|
f1c8127eecfc37d82a3c7b7772dbf79e5af0f1d6
|
Shell
|
steve8x8/geotoad
|
/tools/proximity.sh.ex
|
UTF-8
| 386
| 3.34375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
CENTER=GC77 # or whatever should be the center of your search
DISTANCE=2km # radius around $CENTER
# extract coordinates for $CENTER cache
geotoad -q wid -x list -o `pwd`/$CENTER.list $CENTER
# get coordinates from list
COORDS=`awk '{printf("%s,%s\n",$2,$3)}' $CENTER.list`
# now run the real query
geotoad -q coord -x gpx -o `pwd`/$CENTER-prox.gpx "$COORDS"
| true
|
421d356ef9171d127e96d00058e15b7707da1b52
|
Shell
|
open-mpi/ompi
|
/contrib/ompi-time.sh
|
UTF-8
| 33,128
| 3.859375
| 4
|
[
"mpich2",
"BSD-3-Clause-Open-MPI"
] |
permissive
|
#!/bin/sh
#
# Copyright (c) 2015 Mellanox Technologies, Inc.
# All rights reserved.
# Copyright (c) 2022 Cisco Systems, Inc. All rights reserved
# $COPYRIGHT$
#
# Additional copyrights may follow
#
# $HEADER$
#
# This script is used to measure PMIx performance.
#
# --exec: scenario to run as sync or "test1 test2"
# --mpidir: path to mpi installation (/usr default)
# --parse: path to collected results
# HOWTO:
# 1 .Set test matrix using variables $node_list, ppn_list, test_list
# 2. Allocate nodes:
# $salloc --nodelist=node[1-4]
# or
# $salloc -N4
# 3. Launch script:
# $./opmi-time.sh --exec="test1 test2" --mpidir=<open>
# $./opmi-time.sh --mpidir=<open>
#
# Output location is test name folder
# Output file formats
# ()_base.log
# timestamp (usec) hostnode label
# 1441715028369350 mir14 start
# 1441715030540656 mir14 end
#
# ()_out.log
# timestamp (usec) rank node
# 1441715030460727 0 mir9
# 1441715030460628 1 mir10
#
# ()_result.log
# time rank node
# 2.089 3 mir12
# 2.093 2 mir11
#
# report.log
# nodes ppn mintime maxtime
# 4 1 2.089 2.093
# Settings
###############################################################################
node_list=(2 4)
ppn_list=(1 2)
test_list="test1 test2 test3 test4 test5 test6 test7 test8 test9 test10 test11 test12 test13"
# Declarations
###############################################################################
prefix=pmix
module=${BASH_SOURCE[0]}
# Command line parsing
###############################################################################
opt=""
while [ "$#" -gt 0 ]; do
case "$1" in
--parse=*) parse="${1#*=}"; shift 1;;
--exec=*) exec="${1#*=}"; shift 1;;
--mpidir=*) mpidir="${1#*=}"; shift 1;;
--parse|--exec|--mpidir) echo "$1 requires an argument" >&2; exit 1;;
-*) echo "unknown option: $1" >&2; exit 1;;
*) shift 1;;
esac
done
# The scenario of measurement
if [ -n "$exec" ]; then
test_list="$exec"
fi
# The mpi path
mpidir=${mpidir:=/usr}
# Functions
###############################################################################
# format text
function do_format() {
local is_format=true
if [[ $is_format == true ]] ; then
res=""
for ((i=2; i<=$#; i++)) ; do
case "${!i}" in
"bold" ) res="$res\e[1m" ;;
"underline" ) res="$res\e[4m" ;;
"reverse" ) res="$res\e[7m" ;;
"red" ) res="$res\e[91m" ;;
"green" ) res="$res\e[92m" ;;
"yellow" ) res="$res\e[93m" ;;
esac
done
echo -e "$res$1\e[0m"
else
echo "$1"
fi
}
# print message
function do_msg() {
echo -e "$*" 2>&1 | tee -a $logfile
}
# print error message and exit script
function do_err() {
echo -e $(do_format "$module failed. aborting. $*" "red" "bold") 2>&1 | tee -a $logfile
exit 1
}
# print the seconds and current microseconds.
function do_timestamp() {
do_msg "$(($(date +%s%N)/1000))\t$(hostname -s)" "$1"
}
# swap two files
function do_fswap() {
if (( $# == 2 )); then
mv "$1" /tmp/
mv "$2" "`dirname $1`"
mv "/tmp/`basename $1`" "`dirname $2`"
else
echo "Usage: swap <file1> <file2>"
return 1
fi
}
function do_cmd() {
cmd="$*"
do_msg "Doing:"
do_msg "=================================================="
do_msg "$*"
eval $cmd >> $logfile 2>&1
local status=$?
if test "$status" != "0"; then
echo "$module failed. Log:"
tail -20 $logfile
cat $logfile
exit $status
fi
do_msg "DONE"
do_msg ""
}
function do_export() {
do_msg "Exporting PATHs:"
do_msg "=================================================="
do_msg "$1"
export PATH="$1/bin:${PATH}"
export LD_LIBRARY_PATH="$1/lib:${LD_LIBRARY_PATH}"
export MANPATH="$1/share/man:${MANPATH}"
do_msg "DONE"
do_msg ""
}
function do_nodeinfo() {
do_msg "Node information:"
do_msg "=================================================="
do_msg $(hostname)
do_msg $(cat /etc/issue | grep We)
do_msg $(cat /proc/cpuinfo | grep 'model name' | sort -u | awk '{print $4, $5, $6, $7, $9}')
do_msg $(cat /proc/cpuinfo | grep proce | wc | awk '{print $1}')
do_msg $(uname -a | awk '{print $12}')
do_msg $(cat /proc/meminfo | grep [M,m]em)
do_msg $(uname -a | awk '{print $3}')
do_msg $(ibstat | grep -e "CA type" -e "Firmware version")
do_msg $(ibstatus | grep -e rate -e state | grep -v 'phys state')
do_msg $(ofed_info | head -6 | grep OFED)
do_msg "DONE"
do_msg ""
}
function do_validate() {
command -v mpiexec >/dev/null 2>&1 || { do_err "mpiexec is not found."; }
command -v srun >/dev/null 2>&1 || { do_err "srun is not found."; }
command -v salloc >/dev/null 2>&1 || { do_err "salloc is not found."; }
}
function do_check_pmix() {
eval "srun --mpi=list 2>&1 | grep pmix"
}
function do_checksync_mpisync() {
local status
local tooldir=${tempdir}/mpisync
local verbose=$1
local option=$*
do_msg "Checking synchronization using mpisync:"
if [ ! -e ${tooldir} ]; then
mkdir -p ${tooldir}
cd ${tooldir}
wget --no-check-certificate https://github.com/open-mpi/ompi/raw/main/ompi/tools/mpisync/mpigclock.c >> $logfile 2>&1
wget --no-check-certificate https://github.com/open-mpi/ompi/raw/main/ompi/tools/mpisync/mpigclock.h >> $logfile 2>&1
wget --no-check-certificate https://github.com/open-mpi/ompi/raw/main/ompi/tools/mpisync/hpctimer.c >> $logfile 2>&1
wget --no-check-certificate https://github.com/open-mpi/ompi/raw/main/ompi/tools/mpisync/hpctimer.h >> $logfile 2>&1
wget --no-check-certificate https://github.com/open-mpi/ompi/raw/main/ompi/tools/mpisync/sync.c >> $logfile 2>&1
mpicc hpctimer.c mpigclock.c sync.c -o mpisync >> $logfile 2>&1
fi
if [ ! -e "$tooldir" ] || [ ! -f "$tooldir/mpisync" ]; then
do_err "can not find $tooldir/mpisync"
fi
mpiexec -n $(($nodes)) -npernode 1 $mpioptions $tooldir/mpisync -o ${syncfile} ${option} 2>&1
do_msg "Analysing ${syncfile}"
cat ${syncfile} >> $logfile 2>&1
diff=$(grep -v '^#' ${syncfile} | cut -f3 -d' ' | sort -n | awk 'BEGIN {min=1000000; max=0;}; { if($1<min && $1 != "") min = $1; if($1>max && $1 != "") max = $1; } END { printf("%0.06f %0.06f %0.06f", min, max, max-min) }') >> $logfile 2>&1
do_msg "sync drift is equal: $diff"
diff=`echo $diff | cut -f3 -d' '`
status=$(if (( `bc <<< "$diff >= 0.001"` == 1 )); then echo "value $diff >= 0.001"; fi)
if [ -n "$status" ] && [ -n $verbose -a "$verbose" == "on" ]; then
do_err "mpisync reports issue with synchronization as $status"
else
do_msg "Warning: mpiperf reports issue with synchronization as $status"
fi
do_msg "DONE"
do_msg ""
}
function do_checksync_mpiperf() {
local status
local tooldir=${tempdir}/mpiperf-0.0.3
local verbose=$1
do_msg "Checking synchronization using mpiperf:"
if [ ! -f ${tempdir}/mpiperf-0.0.3.tar.gz ]; then
wget http://mpiperf.cpct.sibsutis.ru/uploads/Main/mpiperf-0.0.3.tar.gz >> $logfile 2>&1
tar zxvf mpiperf-0.0.3.tar.gz >> $logfile 2>&1
cd $tooldir
make >> $logfile 2>&1
fi
if [ ! -e "$tooldir" ] || [ ! -f "$tooldir/src/mpiperf" ]; then
do_err "can not find $tooldir/src/mpiperf"
fi
mpiexec -n 1 $mpioptions $tooldir/src/mpiperf -T >> $logfile 2>&1
if [ -z "$(mpiexec -n 1 $mpioptions $tooldir/src/mpiperf -j -t gettimeofday 2>&1 | tee -a $logfile | sed -n '/PASSED/p')" ]; then
do_err "mpiperf does not support gettimeofday"
fi
mpiexec -n $(($nodes)) -npernode 1 $mpioptions $tooldir/src/mpiperf -t gettimeofday WaitPatternNull >> ${syncfile} 2>&1
do_msg "Analysing ${syncfile}"
cat ${syncfile} >> $logfile 2>&1
status=$(grep -v '^#' ${syncfile} | awk -F ' ' '{ print $6 }' | while read i; do if (( `bc <<< "$i >= 1"` == 1 )); then echo "value $i >= 1.00"; break; fi; done)
if [ -n "$status" ] && [ -n $verbose -a "$verbose" == "on" ]; then
do_err "mpiperf reports issue with synchronization as $status"
else
do_msg "Warning: mpiperf reports issue with synchronization as $status"
fi
do_msg "DONE"
do_msg ""
}
# $1 - sync filename
# $2 - verbose mode: on - exit in case synchronization values exceed a threshold and off - silent mode (default: off)
# $3+ - application additional options
function do_checksync() {
if [ -z "$1" ]; then
syncfile=${tempdir}/mpisync.log
else
syncfile=$1
fi
do_checksync_mpisync $2 "-a 0"
# do_checksync_mpisync $2 "-a 1"
# do_checksync_mpiperf
do_msg "syncfile: $syncfile"
}
function do_analysis() {
local testdir=$1
local basefile=$2
local outfile=$3
local outfile1="${3}.1"
local resultfile=${testdir}/${nodes}x${ppn}_result.log
if [ ! -e $tesdir ]; then
do_err "can not find testdir: $testdir"
fi
if [ -z $basefile -o ! -f $basefile ]; then
do_err "can not find basefile: $basefile"
fi
if [ -z $outfile -o ! -f $outfile ]; then
do_err "can not find outfile: $outfile"
fi
if [ "$(cat $outfile | wc -l)" != "$(($nodes * $ppn))" ]; then
do_msg "Warning: number of lines in $outfile ($(cat $outfile | wc -l)) is not equal ($nodes * $ppn)."
fi
start_t=`awk -F $'\t' '{ if (NR == 1) print $1 }' $basefile`
# Add sync value in output file
while read line; do
if [[ ! $line =~ ^[0-9] ]]; then
do_msg "Warning: ignoring line: $line."
continue
fi
local n=$(echo $line | cut -f3 -d' ')
local v1=$(echo $line | cut -f1 -d' ')
local v2=0
if [ ! -z $syncfile -o -f $syncfile ]; then
v2=$(echo "scale=2; ($(grep $n $syncfile | cut -f3 -d' ') * 1000000)" | bc -l)
# Round float value to int
v2=$(echo ${v2%%.*})
v2=${v2:=0}
fi
echo -e "$(($v1 + $v2))\t${v2}\t${line}" >> $outfile1
done < $outfile
# Find maximum and minimum lines
min_line=`sort -n $outfile1 | head -n1`
max_line=`sort -n $outfile1 | tail -n1`
if [ -z "$min_line" -o -z "$max_line" ]; then
do_err "can not find max/min lines in : $outfile1"
fi
min_t=$( echo "$min_line" | cut -f1 -d$'\t')
max_t=$( echo "$max_line" | cut -f1 -d$'\t')
echo -e "`bc -l <<< "scale=3; (($min_t - $start_t) / 1000000)"`\t`echo "$min_line" | cut -f4 -d$'\t'`\t`echo "$min_line" | cut -f5 -d$'\t'`" >> $resultfile 2>&1
echo -e "`bc -l <<< "scale=3; (($max_t - $start_t) / 1000000)"`\t`echo "$max_line" | cut -f4 -d$'\t'`\t`echo "$max_line" | cut -f5 -d$'\t'`" >> $resultfile 2>&1
echo -e "\n# Used synchronization file: $syncfile" >> $outfile1
do_report $testdir $resultfile
}
function do_report() {
local testdir=$1
local resultfile=$2
local reportfile=${testdir}/report.log
if [ -z $resultfile -o ! -f $resultfile ]; then
do_err "can not find resultfile: $resultfile"
fi
min_t=`awk -F $'\t' '{ if (NR == 1) print $1 }' $resultfile`
max_t=`awk -F $'\t' '{ if (NR == 2) print $1 }' $resultfile`
echo -e "${nodes}\t${ppn}\t${min_t}\t${max_t}" >> $reportfile 2>&1
}
function do_postresult() {
cd $tempdir/..
tar -zcvf $PWD/pmix.$$.tar.gz $tempdir > /dev/null 2>&1
}
include_timestamp_func=$(cat <<END_MSG
#include <dlfcn.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdint.h>
#include <sys/stat.h>
#include <time.h>
#include <sys/time.h>
static inline void timestamp(FILE *file)
{
struct timeval tv;
char name[256];
char *host_name = NULL;
char *domain = NULL;
int procid = -1;
char *str = NULL;
gettimeofday(&tv, NULL);
if (gethostname(name, sizeof(name)) != 0)
strcpy(name, "localhost");
host_name = strdup(name);
domain = strchr(host_name, '.');
if (domain)
*domain = '\0';
str = getenv("SLURM_PROCID");
procid = ( str ? strtol(str, NULL, 10) : -1);
fprintf(file, "%lld\t%d\t%s\n", tv.tv_sec * 1000000LL + tv.tv_usec, procid, host_name);
fflush(file);
}
END_MSG
)
function do_exec() {
# The number of nodes (see SLURM_NNODES)
nodes=${SLURM_NNODES}
nodes=${nodes:=2}
# The number of tasks per node (see SLURM_NTASKS_PER_NODE or SLURM_TASKS_PER_NODE)
ppn=${SLURM_NTASKS_PER_NODE}
ppn=${ppn:=1}
mpioptions=' -novm -mca btl_openib_warn_default_gid_prefix 0 -mca mpi_add_procs_cutoff 100000 '
slurmoptions=' OMPI_MCA_btl_openib_warn_default_gid_prefix=0 OMPI_MCA_mpi_add_procs_cutoff=100000 '
if [ -z "$(env | grep SLURM)" ]; then
do_err "Do not see allocated nodes by SLURM. Probably salloc -N option is not set"
fi
#if [ "${SLURM_NPROCS}" != "$(($nodes * $ppn))" ]; then
# do_err "SLURM_NPROCS=${SLURM_NPROCS} is not equal ($nodes * $ppn). Probably salloc -N option is not set"
#fi
do_msg ""
do_msg "Configuration:"
do_msg "=================================================="
do_msg "tempdir: $tempdir"
do_msg "logfile: $logfile"
do_msg "mpi: $mpidir"
do_msg "exec: $exec"
do_msg "nodes: $nodes"
do_msg "ppn: $ppn"
do_msg "mpioptions: $mpioptions"
do_msg "slurmoptions: $slurmoptions"
do_msg "node list: $node_list"
do_msg "ppn list: $ppn_list"
do_msg "test list: $test_list"
do_msg ""
do_export $mpidir
do_nodeinfo
do_validate
if [ -f "${tempdir}/mpisync.log" ]; then
syncfile=${tempdir}/mpisync.log
do_msg "found sync data at ${syncfile}"
elif [ -f "${tempdir}/mpiperf.log" ]; then
syncfile=${tempdir}/mpiperf.log
do_msg "found sync data at ${syncfile}"
else
do_msg "sync data is not found"
fi
# Launch scenario
node_list_len=${#node_list[*]}
ppn_list_len=${#ppn_list[*]}
for ((i=0; $i < $node_list_len; i=$((i=$i+1)))); do
for ((j=0; $j < $ppn_list_len; j=$((j=$j+1)))); do
for test in $test_list; do
nodes=${node_list[$i]}
ppn=${ppn_list[$j]}
if [ "$test" = "sync" ]; then
do_checksync "${tempdir}/${nodes}x${ppn}_mpisync00.log" "off"
else
do_checksync "${tempdir}/${nodes}x${ppn}_mpisync_before.log" "off"
eval "do_${test}"
do_checksync "${tempdir}/${nodes}x${ppn}_mpisyn_after.log" "off"
fi
done
done
done
do_postresult
}
# $1 - result location
function do_parse() {
local parsedir=$1
local result_list
local test_list
local parsefile
for result in `ls -1 $workdir`; do
if [ ! -d "${parsedir}/${result}" ]; then
continue
fi
for test in `ls -1 "${parsedir}/${result}" | grep -v mpisync`; do
if [ ! -d "${parsedir}/${result}/${test}" ]; then
continue
fi
result_list="${result_list} ${result}"
test_list="${test_list} ${test}"
done
done
result_list=`echo $result_list | tr " " "\n" | sort | uniq | tr "\n" " "`
test_list=`echo $test_list | tr " " "\n" | sort | uniq | tr "\n" " "`
do_msg "results: $result_list"
do_msg "tests: $test_list"
for test in $test_list; do
parsefile="${parsedir}/parse_${test}.log"
for result in $result_list; do
echo -e "\n${result}:" >> $parsefile 2>&1
echo -e "nodes\tppn\tmin\tmax" >> $parsefile 2>&1
cat "${parsedir}/${result}/${test}/report.log" >> $parsefile 2>&1
done
done
}
# Pure application srun launch
#####################################################
function do_test1
{
local status
local scenario=test1
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
int main()
{
timestamp(stdout);
return 0;
}
END_MSG
gcc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
srun -n$(($nodes * $ppn)) -N$nodes --ntasks-per-node=$ppn ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "srun pure overhead" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# Pure application mpiexec launch
#####################################################
function do_test2
{
local status
local scenario=test2
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
int main()
{
timestamp(stdout);
return 0;
}
END_MSG
gcc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
mpiexec -n $(($nodes * $ppn)) -npernode $ppn $mpioptions ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "mpiexec pure overhead" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# Pure application oshrun launch
#####################################################
function do_test3
{
local status
local scenario=test3
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
int main()
{
timestamp(stdout);
return 0;
}
END_MSG
gcc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
oshrun -n $(($nodes * $ppn)) -npernode $ppn $mpioptions ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "osrun pure overhead" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# MPI_init application srun/pmi2 launch
#####################################################
function do_test4
{
local status
local scenario=test4
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
#include "mpi.h"
int main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
timestamp(stdout);
MPI_Finalize();
return 0;
}
END_MSG
mpicc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
env $slurmoptions srun -n$(($nodes * $ppn)) -N$nodes --ntasks-per-node=$ppn --mpi=pmi2 ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "srun --mpi=pmi2:MPI_Init" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# start_pes application srun/pmi2 launch
#####################################################
function do_test5
{
local status
local scenario=test5
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
#include "shmem.h"
int main(int argc, char* argv[])
{
start_pes(0);
timestamp(stdout);
return 0;
}
END_MSG
oshcc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
env $slurmoptions srun -n$(($nodes * $ppn)) -N$nodes --ntasks-per-node=$ppn --mpi=pmi2 ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "srun --mpi=pmi2:start_pes" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# MPI_Init application mpiexec launch
#####################################################
function do_test6
{
local status
local scenario=test6
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
#include "mpi.h"
int main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
timestamp(stdout);
MPI_Finalize();
return 0;
}
END_MSG
mpicc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
mpiexec -n $(($nodes * $ppn)) -npernode $ppn $mpioptions ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "mpiexec:MPI_Init" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# start_pes application oshrun launch
#####################################################
function do_test7
{
local status
local scenario=test7
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
#include "shmem.h"
int main(int argc, char* argv[])
{
start_pes(0);
timestamp(stdout);
return 0;
}
END_MSG
oshcc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
oshrun -n $(($nodes * $ppn)) -npernode $ppn $mpioptions ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "osrun:start_pes" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# pure application mpiexec:orte_daemon
#####################################################
function do_test8
{
local status
local scenario=test8
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
#include "mpi.h"
int main(int argc, char* argv[])
{
return 0;
}
END_MSG
cat > lib_$scenario.c <<END_MSG
#define _GNU_SOURCE
$include_timestamp_func
int orte_daemon(int argc, char *argv[])
{
static int (*_orte_daemon)(int argc, char *argv[]) = NULL;
if (!_orte_daemon) {
_orte_daemon=dlsym(RTLD_NEXT,"orte_daemon");
if (!_orte_daemon) {
fprintf(stderr, "Error in 'dlsym': %s\n", dlerror());
exit(1);
} else {
FILE *fd = NULL;
char filename[1024];
char *str = getenv("SLURM_PROCID");
if (str) {
sprintf(filename, "%s.%s", "$outfile", str);
fd = fopen(filename, "a");
if (fd) {
timestamp(fd);
fclose(fd);
}
}
}
}
return _orte_daemon(argc, argv);
}
END_MSG
mpicc $scenario.c -o $scenario.out >> $logfile 2>&1
gcc lib_$scenario.c -o $scenario.so -shared -fPIC -ldl >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
LD_PRELOAD=$PWD/$scenario.so mpiexec -n $(($nodes * $ppn)) -npernode $ppn $mpioptions ./$scenario.out
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
eval cat $outfile.* >> $outfile
rm $outfile.*
echo -e "mpiexec:orte_daemon" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# pure application oshrun:orte_daemon
#####################################################
function do_test9
{
local status
local scenario=test9
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
#include "mpi.h"
int main(int argc, char* argv[])
{
return 0;
}
END_MSG
cat > lib_$scenario.c <<END_MSG
#define _GNU_SOURCE
$include_timestamp_func
int orte_daemon(int argc, char *argv[])
{
static int (*_orte_daemon)(int argc, char *argv[]) = NULL;
if (!_orte_daemon) {
_orte_daemon=dlsym(RTLD_NEXT,"orte_daemon");
if (!_orte_daemon) {
fprintf(stderr, "Error in 'dlsym': %s\n", dlerror());
exit(1);
} else {
FILE *fd = NULL;
char filename[1024];
char *str = getenv("SLURM_PROCID");
if (str) {
sprintf(filename, "%s.%s", "$outfile", str);
fd = fopen(filename, "a");
if (fd) {
timestamp(fd);
fclose(fd);
}
}
}
}
return _orte_daemon(argc, argv);
}
END_MSG
mpicc $scenario.c -o $scenario.out >> $logfile 2>&1
gcc lib_$scenario.c -o $scenario.so -shared -fPIC -ldl >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
LD_PRELOAD=$PWD/$scenario.so oshrun -n $(($nodes * $ppn)) -npernode $ppn $mpioptions ./$scenario.out
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
eval cat $outfile.* >> $outfile
rm $outfile.*
echo -e "oshrun:orte_daemon" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# pure application mpiexec:orte_rml_base_update_contact_info
#####################################################
function do_test10
{
local status
local scenario=test10
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
#include "mpi.h"
int main(int argc, char* argv[])
{
return 0;
}
END_MSG
cat > lib_$scenario.c <<END_MSG
#define _GNU_SOURCE
$include_timestamp_func
int orte_rml_base_update_contact_info(void * data)
{
static int (*_real_func)(void* data) = NULL;
if (!_real_func) {
_real_func=dlsym(RTLD_NEXT,"orte_rml_base_update_contact_info");
if (!_real_func) {
fprintf(stderr, "Error in 'dlsym': %s\n", dlerror());
exit(1);
} else {
FILE *fd = NULL;
char filename[1024];
char *str = getenv("SLURM_PROCID");
if (str) {
sprintf(filename, "%s.%s", "$outfile", str);
fd = fopen(filename, "a");
if (fd) {
timestamp(fd);
fclose(fd);
}
}
}
}
return _real_func(data);
}
END_MSG
mpicc $scenario.c -o $scenario.out >> $logfile 2>&1
mpicc lib_$scenario.c -o $scenario.so -shared -fPIC -ldl >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
LD_PRELOAD=$PWD/$scenario.so mpiexec -n $(($nodes * $ppn)) -npernode $ppn $mpioptions ./$scenario.out
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
eval "cat $outfile.* >> $outfile" >> $logfile 2>&1
rm $outfile.* >> $logfile 2>&1
echo -e "mpiexec:orte_rml_base_update_contact_info" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# pure application oshrun:orte_rml_base_update_contact_info
#####################################################
function do_test11
{
local status
local scenario=test11
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
#include "shmem.h"
int main(int argc, char* argv[])
{
return 0;
}
END_MSG
cat > lib_$scenario.c <<END_MSG
#define _GNU_SOURCE
$include_timestamp_func
int orte_rml_base_update_contact_info(void * data)
{
static int (*_real_func)(void* data) = NULL;
if (!_real_func) {
_real_func=dlsym(RTLD_NEXT,"orte_rml_base_update_contact_info");
if (!_real_func) {
fprintf(stderr, "Error in 'dlsym': %s\n", dlerror());
exit(1);
} else {
FILE *fd = NULL;
char filename[1024];
char *str = getenv("SLURM_PROCID");
if (str) {
sprintf(filename, "%s.%s", "$outfile", str);
fd = fopen(filename, "a");
if (fd) {
timestamp(fd);
fclose(fd);
}
}
}
}
return _real_func(data);
}
END_MSG
oshcc $scenario.c -o $scenario.out >> $logfile 2>&1
oshcc lib_$scenario.c -o $scenario.so -shared -fPIC -ldl >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
LD_PRELOAD=$PWD/$scenario.so oshrun -n $(($nodes * $ppn)) -npernode $ppn $mpioptions ./$scenario.out
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
eval "cat $outfile.* >> $outfile" >> $logfile 2>&1
rm $outfile.* >> $logfile 2>&1
echo -e "oshrun:orte_rml_base_update_contact_info" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# MPI_Init application mpiexec:srun/pmix
#####################################################
function do_test12
{
local status
local scenario=test12
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
do_check_pmix
if [ $? -eq 0 ]; then
do_msg "slurm has pmix plugin"
else
do_msg "skipping this test : slurm does not have pmix plugin"
return 1
fi
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
#include "mpi.h"
int main(int argc, char* argv[])
{
MPI_Init(&argc, &argv);
timestamp(stdout);
MPI_Finalize();
return 0;
}
END_MSG
mpicc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
env $slurmoptions srun -n$(($nodes * $ppn)) -N$nodes --ntasks-per-node=$ppn --mpi=pmix ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "srun --mpi=pmix:MPI_Init" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# start_pes application oshrun:srun/pmix
#####################################################
function do_test13
{
local status
local scenario=test13
local testdir=${tempdir}/$scenario
local outfile=${testdir}/${nodes}x${ppn}_out.log
local basefile=${testdir}/${nodes}x${ppn}_base.log
do_msg "Running $scenario ${nodes}x${ppn} :"
do_check_pmix
if [ $? -eq 0 ]; then
do_msg "slurm has pmix plugin"
else
do_msg "skipping this test : slurm does not have pmix plugin"
return 1
fi
mkdir -p $testdir
cd $testdir
cat > $scenario.c <<END_MSG
$include_timestamp_func
#include "shmem.h"
int main(int argc, char* argv[])
{
start_pes(0);
timestamp(stdout);
return 0;
}
END_MSG
oshcc $scenario.c -o $scenario.out >> $logfile 2>&1
# Do test
do_timestamp "start" 2>&1 | tee -a $basefile
env $slurmoptions srun -n$(($nodes * $ppn)) -N$nodes --ntasks-per-node=$ppn --mpi=pmix ./$scenario.out >> $outfile 2>&1
test $? -eq 0 && status=OK || status=FAIL
do_timestamp "end" 2>&1 | tee -a $basefile
if [ "$status" == "FAIL" ]; then
do_err "can not launch a test"
fi
echo -e "srun --mpi=pmix:start_pes" > ${testdir}/info.log 2>&1
do_analysis $testdir $basefile $outfile
do_msg "DONE"
}
# Main
###############################################################################
# Check if --exec option is passed ($exec is defined)
if test ${exec+defined}; then
tempdir=$PWD/tmp/${prefix}.$$
logfile=${tempdir}/${prefix}-time.log
mkdir -p $tempdir
rm -f $logfile
cd $tempdir
do_exec
fi
# Check if --parse option is passed ($parse is defined)
if test ${parse+defined}; then
if [ -z "$parse" ]; then
tempdir=$PWD/tmp
else
tempdir=$parse
fi
logfile=${tempdir}/${prefix}-parse.log
mkdir -p $tempdir
rm -f $logfile
cd $tempdir
do_parse "$tempdir"
fi
exit 0
| true
|
60f4323a8f0d8647149a15ec4c2318f8d6053e8d
|
Shell
|
Vinesma/.dotfiles
|
/install/scripts/setup-yt-dlp.sh
|
UTF-8
| 916
| 3.671875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Setup/update yt-dlp, a video extractor/downloader for youtube and other sites.
WORK_DIR=$HOME/.dotfiles/install/workdir/
SOURCE=https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp
SOURCE_DOCS=https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz
ZSH_COMPLETION_DIR=/usr/share/zsh/functions/Completion/X
# -- SETUP --
mkdir -p "$WORK_DIR"
# -- ACT --
cd "$WORK_DIR" || exit 1
if ! command -v yt-dlp > /dev/null; then
sudo curl -L "$SOURCE" -o /usr/local/bin/yt-dlp
sudo chmod a+rx /usr/local/bin/yt-dlp
else
sudo yt-dlp -U
fi
curl -L "$SOURCE_DOCS" -o yt-dlp.tar.gz
tar xf yt-dlp.tar.gz
cd yt-dlp || exit 1
sudo cp -vf yt-dlp.1 /usr/share/man/man1
[ -d $ZSH_COMPLETION_DIR ] && \
sudo cp -vf completions/zsh/_yt-dlp $ZSH_COMPLETION_DIR
printf "%s\n" "Updating mandb..."
sudo mandb -q
# -- CLEANUP --
cd "$HOME" || exit 1
rm -rf "$WORK_DIR"
| true
|
282199b9c2f6f000f27d22ecc1592f82091b5f2c
|
Shell
|
w0lker/maven_scripts
|
/mvn-completion
|
UTF-8
| 630
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
local_etc="$HOME/.local/etc"
local_mvn_etc=$local_etc/maven
settings=$(ls $local_mvn_etc)
name=$1
names=""
index=0
for setting in $settings; do
setting_name="$(echo "$setting" | sed 's/\.xml$//')"
if [ $index -eq 0 ]; then
names=$setting_name
else
names="$names $setting_name"
fi
index=`expr $index + 1`
done
_mvn() {
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
case "$cur" in
*)
if [[ 1 -eq $COMP_CWORD ]]; then
COMPREPLY=( $(compgen -W "$names" -- ${cur} ) )
fi
;;
esac
}
complete -F _mvn mvn
| true
|
5029e396efa3dfe9dd73fc0bc61445d0377e0432
|
Shell
|
rook/rook
|
/tests/scripts/helm.sh
|
UTF-8
| 1,120
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash +e
temp="/tmp/rook-tests-scripts-helm"
helm_version="${HELM_VERSION:-"v3.8.0"}"
arch="${ARCH:-}"
detectArch() {
case "$(uname -m)" in
"x86_64" | "amd64")
arch="amd64"
;;
"aarch64")
arch="arm64"
;;
"i386")
arch="i386"
;;
*)
echo "Couldn't translate 'uname -m' output to an available arch."
echo "Try setting ARCH environment variable to your system arch:"
echo "amd64, x86_64. aarch64, i386"
exit 1
;;
esac
}
install() {
# Download and unpack helm
local dist
dist="$(uname -s)"
dist=$(echo "${dist}" | tr "[:upper:]" "[:lower:]")
mkdir -p "${temp}"
wget "https://get.helm.sh/helm-${helm_version}-${dist}-${arch}.tar.gz" -O "${temp}/helm.tar.gz"
tar -C "${temp}" -xvf "${temp}/helm.tar.gz" --strip-components 1
}
if [ -z "${arch}" ]; then
detectArch
fi
case "${1:-}" in
up)
install
;;
*)
echo "usage:" >&2
echo " $0 up" >&2
echo " $0 clean" >&2
esac
| true
|
ca7102f83e5d4b5caf762b65007d82b6e25b696e
|
Shell
|
artera/copr-repo
|
/bin/dockerrun
|
UTF-8
| 410
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
BASEDIR="$(dirname "$(dirname "$(readlink -fn "$0")")")"
cd "$BASEDIR"
IMAGE=centos7-mock
if [ -z "$(docker images -q $IMAGE)" ]; then
docker build -t $IMAGE .
fi
mkdir -p cache
exec docker run --privileged=true -it \
-v "$PWD:/home/builder/rpmbuild" \
-v "$HOME/.gnupg:/home/builder/.gnupg" \
-v "$PWD/cache:/var/cache/mock" \
-v "$PWD/mock.cfg:/etc/mock/default.cfg" \
"$@"
| true
|
e4845d87cc76916b6ae995097911c763af659681
|
Shell
|
mradkov/aepp-todolist
|
/deploy-gh-pages.sh
|
UTF-8
| 444
| 2.625
| 3
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
GIT_REV="$(git rev-parse HEAD)"&& \
rm -rf dist && \
rm -rf node_modules && \
npm install && \
NODE_ENV=prod npm run build && \
cd dist/ && \
git init && \
git remote add origin git@github.com:mradkov/aepp-todolist.git && \
git checkout -b gh-pages && \
git add * && \
git commit -m "todolist aepp ${GIT_REV} deployment to gh-pages" && \
git fetch && git rebase -s recursive -Xtheirs origin/gh-pages && \
git push origin gh-pages
| true
|
d222e916cf08c6989f2587d6e1335d888cc2b3eb
|
Shell
|
dergoegge/dotfiles
|
/tmux_status_right
|
UTF-8
| 365
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/zsh
# vim:ft=sh
function _hg_status_line() {
hg root >/dev/null 2>&1 || return 1
print "hg: [$(hg branch)]$(hg diff --stat | tail -n1) |"
}
function _git_status_line() {
git rev-parse --is-inside-work-tree >/dev/null 2>&1 || return 1
print "git: [$(git symbolic-ref -q HEAD --short)]$(git diff --shortstat) |"
}
_git_status_line
_hg_status_line
| true
|
50bc4cb25687a9df43fa24f2e4dc750736aa965f
|
Shell
|
umccr/workflows
|
/configurations/scripts/draw_fusions_hg38.sh
|
UTF-8
| 2,113
| 3.484375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Hard-coded project directory for now; don't want this to go off the rails #
for DIRECTORY in /g/data/gx8/projects/PROJECT/2020*/ ;
do
RUNDIR="$DIRECTORY"
if [ -n "$(ls -A $RUNDIR/final)" ]; then
# Run finished; trigger Arriba move and plot results
for FOLDER in "$RUNDIR"/work/arriba/* ;
do
# Get sample ID
SAMPLE=$(basename "$FOLDER")
# Make sure that Arriba sample id matches bcbio's folder
if [ -n "$(ls -A $RUNDIR/final/$SAMPLE)" ]; then
# Move Arriba data from work to final
echo "Moving Arriba results for $SAMPLE"
mkdir -p "$RUNDIR"/final/"$SAMPLE"/arriba
cp -al "$FOLDER"/* "$RUNDIR"/final/"$SAMPLE"/arriba/
# Kick off draw
BAM="$RUNDIR"/final/"$SAMPLE"/"$SAMPLE"-ready.bam
FUSION="$RUNDIR"/final/"$SAMPLE"/arriba/fusions.tsv
PDF="$RUNDIR"/final/"$SAMPLE"/arriba/fusions.pdf
export PATH=/g/data3/gx8/local/development/bcbio/anaconda/bin:/g/data/gx8/local/development/bin:/opt/bin:/bin:/usr/bin:/opt/pbs/default/bin
draw_fusions.R --fusions="$FUSION" --alignments="$BAM" --output="$PDF" --annotation=/g/data3/gx8/local/development/bcbio/genomes/Hsapiens/hg38/rnaseq/ref-transcripts.gtf --cytobands=/g/data/gx8/extras/arriba/cytobands_hg38_GRCh38_2018-02-23.tsv --proteinDomains=/g/data/gx8/extras/arriba/protein_domains_hg38_GRCh38_2018-03-06.gff3
echo "--"
else
echo "Unknown sample $SAMPLE, skipping"
fi
done
fi
done
| true
|
5cb759abf418399de0d7a42fa64162f0913013e4
|
Shell
|
sbandyo2/apigatewaymodule
|
/gateway/gateway-entrypoint.sh
|
UTF-8
| 265
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh
echo 'Starting the custom script'
while ! nc -z auth-server 9101
do
echo "Waiting for upcoming Authentication Server"
sleep 2
done
echo 'Pre-requisite done now moving on the execution of current container service'
nohup java -jar gateway.jar
| true
|
f8a62f8c21be8a1354b2ee64bc966bbef6b259e0
|
Shell
|
thelaser/gcp_snippets
|
/CloudScripts/bash/check_ownership/check_projects_ownership.sh
|
UTF-8
| 1,508
| 4.03125
| 4
|
[] |
no_license
|
################
# Explanations #
################
# This script finds and shows from the list of projects accessible by the active account, the projects owned explicitly by an account in GCP (without counting inheritances from an organization)
# If run without arguments, it will perform the search for the account being currently used with Cloud SDK
# Otherwise it can accept one argument which will be the account to check ownerships for.
# Example of usage:
# ./check_projects_ownership.sh myaccount@mydomain.com
#########
# TO DO #
#########
# - Check if jq is installed
#####################
# One-liner version #
#####################
# (for project in $(gcloud projects list --format 'value(project_id)'); do if [ $(gcloud projects get-iam-policy $project --format json 2>/dev/null|jq '.bindings[]|select(.role=="roles/owner").members'|grep -E "user:$(gcloud config get-value account)") ]; then echo $project; fi; done >> myprojects.txt)&
####################
# Expanded version #
####################
if [ $1 ]; then
ACCOUNT=$1
else
ACCOUNT=$(gcloud config get-value account)
fi
echo -e "Checking project ownerships for the account $ACCOUNT \n"
PROJECT_LIST=$(gcloud projects list --format 'value(project_id)')
for PROJECT in $PROJECT_LIST;
do
PROJECT_POLICIES=$(gcloud projects get-iam-policy $PROJECT --format json 2>/dev/null)
OWNER=$(echo $PROJECT_POLICIES | jq '.bindings[]? | select(.role?=="roles/owner").members?' | grep "user:$ACCOUNT")
if [ $OWNER ]; then
echo $PROJECT;
fi
done
| true
|
357c36ec45cfb5a30b6d4cbe9457eb7f0485a57b
|
Shell
|
diegocasmo/dotfiles
|
/zsh/aliases.zsh
|
UTF-8
| 1,711
| 2.703125
| 3
|
[] |
no_license
|
# Aliases
alias ..="cd .."
alias ...="cd ../.."
alias dt="cd ~/Desktop"
alias dn="cd ~/Downloads"
alias ls="ls -G"
alias la="ls -AGFoh"
alias grep="grep --color=auto"
alias rsynccopy="rsync --partial --progress --append --rsh=ssh -r -h " # cp with progressbar
alias rsyncmove="rsync --partial --progress --append --rsh=ssh -r -h --remove-sent-files " # mv with progressbar
# OSX
alias ip="dig +short myip.opendns.com @resolver1.opendns.com"
alias ips="ifconfig -a | perl -nle'/(\d+\.\d+\.\d+\.\d+)/ && print $1'"
alias flush="dscacheutil -flushcache" # Flush DNS cache
alias pubkey="more ~/.ssh/id_rsa.pub | pbcopy | printf '=> Public key copied to pasteboard.\n'" # Copy my public key to the pasteboard
alias deadsym="find / -type l ! -exec test -r {} \; -print" # Find "dead" symbolic links ?
alias fixow='/System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/LaunchServices.framework/Versions/A/Support/lsregister -kill -seed -r -f -v -domain local -domain user -domain system;echo "Open With has been rebuilt"'
alias showdotfiles="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder" # Show hidden files in Finder
alias hidedotfiles="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder" # Hide hidden files in Finder
alias rmds="find . -name '.DS_Store' -depth -exec rm -v {} \;"
alias chrome="open -a Google\ Chrome"
alias map="xargs -n1"
# Other tools
alias dos2unix="perl -pi -e 's/\r\n/\n/g'"
alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1]);"'
alias getpage="wget --no-clobber --page-requisites --html-extension --convert-links --no-host-directories" # Download web page with all assets
| true
|
c794338ffe9ed05f8a2d6ba87afd7b93d185552f
|
Shell
|
alrickemilien/ft_ls
|
/icons-in-terminal/uninstall.sh
|
UTF-8
| 401
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -xe
rm -rf ./fonts/
FONT_DIRECTORY=${HOME}/.fonts/
if [ `uname` = "Darwin" ]; then
FONT_DIRECTORY=${HOME}/Library/Fonts/
fi
rm -f ${FONT_DIRECTORYcons}/*.ttf
# run fc-cache -fv ~/.fonts to let freetype2 know of those fonts
if [ `uname` != "Darwin" ]; then
fc-cache -fvr --really-force > /dev/null
fi
set +xe
echo "ttf uninstalled. Close all your terminal windows."
| true
|
f8a24e888f84d2107c9731c730cefc1c0cf5f6ab
|
Shell
|
FAIMS/reviewExporter
|
/export.sh
|
UTF-8
| 1,806
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# $1 module directory e.g. /var/www/faims/modules/b28ea04f-2e6b-421a-a3fd-8be4c6c50259
# $2 user entered data as json file e.g. /tmp/something.json => {"Label1":"some text","Label2":["Item1","Item2"],"Label3":"Item2"}
# $3 directory to put generated files in e.g. /tmp/exported_files
# $4 file to write markdown text into e.g. /tmp/mark_down.txt => h3. Traditional html title
# read json interface input file into string
json=`python -mjson.tool $2`
find /tmp/ -maxdepth 1 -type d -wholename "/tmp/tmp*" -print0 | xargs -0 -I{} rm -rf {}
echo "Hi there"
echo -e "cd `pwd`; sudo bash ./export.sh $1 $2 $3 $4\n" > /tmp/exportRun
# export database to csv using json inputs and pass output into export file inside download directory
echo -e "\nTiming data:\n\nExport Started: $(date)" > $4
python shapefile.py $1 $3 $2 > /tmp/bar 2> /tmp/foo
echo -e "\nExport Finished: $(date)\n" >> $4
echo -e "\n\n\n**Your data have been prepared for export.**
**Click \"Download file\" below to get your data as a single compressed file.**
The data will be in .tar.bz2 format to allow large-file export which can be unpacked via programs like [7zip](http://www.7-zip.org/download.html) on windows or tar on linux. You may need to unpack the tarball after decompressing the bz2 outer container: in plain langauge, unpack it twice. This is normal and everything is fine.
If you see 'Segmentation Fault' in the text below, retry the export.
----------
If the download button doesn't appear, [contact support@fedarch.org immediately](mailto:support@fedarch.org?subject=ExportDebug) and paste the following information into the email:
" >> $4
awk '{print ""$0"\n"}' /tmp/bar >> $4
echo "
"
awk '{print ""$0"\n"}' /tmp/foo >> $4
sed -i -e "s/^//" $4
cat $4
# generate markup and pass output to markup file
| true
|
b86baabc5ca69dc12c7e03cc682942351de91e1b
|
Shell
|
spyysalo/consensus-pipeline
|
/pipeline/220-list-pubmed-contents.sh
|
UTF-8
| 1,027
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# List PubMed DB contents.
set -euo pipefail
SCRIPT="$(basename "$0")"
# https://stackoverflow.com/a/246128
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
INDIR="$SCRIPTDIR/../data/pubmed/db"
OUTDIR="$SCRIPTDIR/../data/pubmed/contents"
inpath="$INDIR/pubmed.sqlite"
if [ ! -s "$inpath" ]; then
echo "$SCRIPT:ABORT: $inpath not found"
exit 1
fi
mkdir -p "$OUTDIR"
outpath="$OUTDIR/pubmed.sqlite.listing"
if [ -s "$outpath" ]; then
echo "$SCRIPT:$outpath exists, skipping ..."
else
command="$SCRIPTDIR/../scripts/lssqlite.py"
echo "$SCRIPT:running \"$command\" on $inpath with output to $outpath" >&2
python3 "$command" "$inpath" > "$outpath"
fi
inpath="$outpath"
outpath="$OUTDIR/pubmed.sqlite.listing.ids"
if [ -s "$outpath" ]; then
echo "$SCRIPT:$outpath exists, skipping ..."
else
echo "$SCRIPT:processing $inpath with output to $outpath" >&2
egrep '\.txt' < "$inpath" | perl -pe 's/\.txt//' > "$outpath"
fi
echo "SCRIPT:done." >&2
| true
|
450f8b2f75f18107eb1f046ed047f8bdf24b4867
|
Shell
|
FrontEndCoffee/laravel-docker
|
/build-and-release.sh
|
UTF-8
| 1,066
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
AUTHOR="uptimeproject"
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m'
MODE=${1:--local}
function print_info {
printf $GREEN
printf "$1\n"
printf $NC
}
function print_error {
printf $RED
printf "$1\n"
printf $NC
}
function deploy {
CONTEXT=$1
FILE="$1/$2"
IMAGE="$AUTHOR/$1"
TAG=$(printf $2 | sed 's/\.[^.]*$//')
print_info "Building image $IMAGE:$TAG"
docker build -t $IMAGE:$TAG --file $FILE $CONTEXT
if [ "$MODE" = "--release" ]; then
print_info "Releasing image $IMAGE:$TAG"
docker push $IMAGE:$TAG
fi
}
# Run
if [ "$MODE" = "--release" ]; then
print_info "\nBUILD AND RELEASE CONTAINERS\n"
else
print_info "\nBuild containers locally\n"
fi
for build_context in *; do
if [ -d "$build_context" ]; then
print_info "Checking build context '$build_context' for dockerfiles.."
cd $build_context
for dockerfile in *.dockerfile; do
if [ -f "$dockerfile" ]; then
cd ..
deploy $build_context $dockerfile
cd $build_context
fi
done
cd ..
fi
done
| true
|
6cdb4f168dc43b416aa27abe8d9948c44877e085
|
Shell
|
gmasson/nuxtool
|
/modules/update-dist/help.sh
|
UTF-8
| 184
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
case $language in
pt-br) echo -e "$green update-dist $fcolor - Atualiza sua distribuição" ;;
*) echo -e "$green update-dist $fcolor - Updates its distribution" ;;
esac
| true
|
2a84a226a3852738256c677599c5010a5b488b15
|
Shell
|
sash2104/loop_exp
|
/src/preprocessing/convertAllVideo.sh
|
UTF-8
| 314
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
in_dir="$HOME/loop_exp/data/original/"
out_dir="$HOME/loop_exp/data/small/"
for video in `ls ${in_dir}*.mp4`
do
file=${video##*/}
./convertVideoSize.sh ${in_dir}${file} 240 ${out_dir}${file}
done
for video in `ls ${out_dir}*.mp4`
do
file=${video##*/}
./video2image.sh ${out_dir}${file}
done
| true
|
0240f4312a05283f68b06c5c652b30a3db4aadce
|
Shell
|
lemejiamo/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/8-for_ls
|
UTF-8
| 139
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Prints the name of files that have '-'
list=$(ls)
for i in $list
do
echo "$i" | cut -d '-' --field=2
done
| true
|
ff226dfb0e52512f53529ac35caefdd74a6d77e8
|
Shell
|
b2bdd/custom-site-template
|
/provision/vvv-init.sh
|
UTF-8
| 17,538
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Provision WordPress Stable
set -eo pipefail
echo " * B2BDD site template provisioner ${VVV_SITE_NAME} - downloads and installs a copy of WP stable for testing, building client sites, etc"
# fetch the first host as the primary domain. If none is available, generate a default using the site name
DB_NAME=$(get_config_value 'db_name' "${VVV_SITE_NAME}")
DB_NAME=${DB_NAME//[\\\/\.\<\>\:\"\'\|\?\!\*]/}
DB_PREFIX=$(get_config_value 'db_prefix' 'wp_')
DOMAIN=$(get_primary_host "${VVV_SITE_NAME}".test)
PUBLIC_DIR=$(get_config_value 'public_dir' "htdocs")
SITE_TITLE=$(get_config_value 'site_title' "${DOMAIN}")
WP_LOCALE=$(get_config_value 'locale' 'en_US')
WP_TYPE=$(get_config_value 'wp_type' "single")
WP_VERSION=$(get_config_value 'wp_version' 'latest')
# b2bdd custom variables for building wordmove file
DEVELOPMENT_URL=$(get_config_value 'development_url' "${VVV_SITE_NAME}".b2bdd.website)
DEVELOPMENT_DB_NAME=$(get_config_value 'development_database' "${VVV_SITE_NAME}")
DEVELOPMENT_DB_USER=$(get_config_value 'development_database_user' "${VVV_SITE_NAME}")
DEVELOPMENT_DB_PASS=$(get_config_value 'development_database_pass' "${VVV_SITE_NAME}")
DEVELOPMENT_SERVER_PATH=$(get_config_value 'development_server_path' '/full/server/path/here')
DEVELOPMENT_SERVER=$(get_config_value 'development_server' '1.2.3.4')
DEVELOPMENT_SERVER_USER=$(get_config_value 'development_server_user' 'username')
DEVELOPMENT_SERVER_PASS=$(get_config_value 'development_server_pass' 'password')
PRODUCTION_URL=$(get_config_value 'production_url' "${VVV_SITE_NAME}")
PRODUCTION_DB_NAME=$(get_config_value 'production_database' "${VVV_SITE_NAME}")
PRODUCTION_DB_USER=$(get_config_value 'production_database_user' "${VVV_SITE_NAME}")
PRODUCTION_DB_PASS=$(get_config_value 'production_database_pass' "${VVV_SITE_NAME}")
PRODUCTION_SERVER_PATH=$(get_config_value 'production_server_path' '/full/server/path/here')
PRODUCTION_SERVER=$(get_config_value 'production_server' '1.2.3.4')
PRODUCTION_SERVER_USER=$(get_config_value 'production_server_user' 'username')
PRODUCTION_SERVER_PASS=$(get_config_value 'production_server_pass' 'password')
PUBLIC_DIR_PATH="${VVV_PATH_TO_SITE}"
if [ ! -z "${PUBLIC_DIR}" ]; then
PUBLIC_DIR_PATH="${PUBLIC_DIR_PATH}/${PUBLIC_DIR}"
fi
# Make a database, if we don't already have one
setup_database() {
echo -e " * Creating database '${DB_NAME}' (if it's not already there)"
mysql -u root --password=root -e "CREATE DATABASE IF NOT EXISTS \`${DB_NAME}\`"
echo -e " * Granting the wp user priviledges to the '${DB_NAME}' database"
mysql -u root --password=root -e "GRANT ALL PRIVILEGES ON \`${DB_NAME}\`.* TO wp@localhost IDENTIFIED BY 'wp';"
echo -e " * DB operations done."
}
setup_nginx_folders() {
echo " * Setting up the log subfolder for Nginx logs"
noroot mkdir -p "${VVV_PATH_TO_SITE}/log"
noroot touch "${VVV_PATH_TO_SITE}/log/nginx-error.log"
noroot touch "${VVV_PATH_TO_SITE}/log/nginx-access.log"
echo " * Creating the public folder at '${PUBLIC_DIR}' if it doesn't exist already"
noroot mkdir -p "${PUBLIC_DIR_PATH}"
}
install_plugins() {
WP_PLUGINS=$(get_config_value 'install_plugins' '')
if [ ! -z "${WP_PLUGINS}" ]; then
isurl='(https?|ftp|file)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]'
for plugin in ${WP_PLUGINS//- /$'\n'}; do
if [[ "${plugin}" =~ $isurl ]]; then
echo " ! Warning, a URL was found for this plugin, attempting install and activate with --force set for ${plugin}"
noroot wp plugin install "${plugin}" --activate --force
else
if noroot wp plugin is-installed "${plugin}"; then
echo " * The ${plugin} plugin is already installed."
else
echo " * Installing and activating plugin: '${plugin}'"
noroot wp plugin install "${plugin}" --activate
fi
fi
done
fi
}
install_themes() {
WP_THEMES=$(get_config_value 'install_themes' '')
if [ ! -z "${WP_THEMES}" ]; then
isurl='(https?|ftp|file)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]'
for theme in ${WP_THEMES//- /$'\n'}; do
if [[ "${theme}" =~ $isurl ]]; then
echo " ! Warning, a URL was found for this theme, attempting install of ${theme} with --force set"
noroot wp theme install --force "${theme}"
else
if noroot wp theme is-installed "${theme}"; then
echo " * The ${theme} theme is already installed."
else
echo " * Installing theme: '${theme}'"
noroot wp theme install "${theme}"
fi
fi
done
fi
}
copy_nginx_configs() {
echo " * Copying the sites Nginx config template"
if [ -f "${VVV_PATH_TO_SITE}/provision/vvv-nginx-custom.conf" ]; then
echo " * A vvv-nginx-custom.conf file was found"
noroot cp -f "${VVV_PATH_TO_SITE}/provision/vvv-nginx-custom.conf" "${VVV_PATH_TO_SITE}/provision/vvv-nginx.conf"
else
echo " * Using the default vvv-nginx-default.conf, to customize, create a vvv-nginx-custom.conf"
noroot cp -f "${VVV_PATH_TO_SITE}/provision/vvv-nginx-default.conf" "${VVV_PATH_TO_SITE}/provision/vvv-nginx.conf"
fi
echo " * Applying public dir setting to Nginx config"
noroot sed -i "s#{vvv_public_dir}#/${PUBLIC_DIR}#" "${VVV_PATH_TO_SITE}/provision/vvv-nginx.conf"
LIVE_URL=$(get_config_value 'live_url' '')
if [ ! -z "$LIVE_URL" ]; then
echo " * Adding support for Live URL redirects to NGINX of the website's media"
# replace potential protocols, and remove trailing slashes
LIVE_URL=$(echo "${LIVE_URL}" | sed 's|https://||' | sed 's|http://||' | sed 's:/*$::')
redirect_config=$( (cat <<END_HEREDOC
if (!-e \$request_filename) {
rewrite ^/[_0-9a-zA-Z-]+(/wp-content/uploads/.*) \$1;
}
if (!-e \$request_filename) {
rewrite ^/wp-content/uploads/(.*)\$ \$scheme://${LIVE_URL}/wp-content/uploads/\$1 redirect;
}
END_HEREDOC
) |
# pipe and escape new lines of the HEREDOC for usage in sed
sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/\\n\\1/g'
)
noroot sed -i -e "s|\(.*\){{LIVE_URL}}|\1${redirect_config}|" "${VVV_PATH_TO_SITE}/provision/vvv-nginx.conf"
else
noroot sed -i "s#{{LIVE_URL}}##" "${VVV_PATH_TO_SITE}/provision/vvv-nginx.conf"
fi
}
setup_wp_config_constants(){
set +e
noroot shyaml get-values-0 -q "sites.${VVV_SITE_NAME}.custom.wpconfig_constants" < "${VVV_CONFIG}" |
while IFS='' read -r -d '' key &&
IFS='' read -r -d '' value; do
lower_value=$(echo "${value}" | awk '{print tolower($0)}')
echo " * Adding constant '${key}' with value '${value}' to wp-config.php"
if [ "${lower_value}" == "true" ] || [ "${lower_value}" == "false" ] || [[ "${lower_value}" =~ ^[+-]?[0-9]*$ ]] || [[ "${lower_value}" =~ ^[+-]?[0-9]+\.?[0-9]*$ ]]; then
noroot wp config set "${key}" "${value}" --raw
else
noroot wp config set "${key}" "${value}"
fi
done
set -e
}
restore_db_backup() {
echo " * Found a database backup at ${1}. Restoring the site"
noroot wp config set DB_USER "wp"
noroot wp config set DB_PASSWORD "wp"
noroot wp config set DB_HOST "localhost"
noroot wp config set DB_NAME "${DB_NAME}"
noroot wp config set table_prefix "${DB_PREFIX}"
noroot wp db import "${1}"
echo " * Installed database backup"
}
download_wordpress() {
# Install and configure the latest stable version of WordPress
echo " * Downloading WordPress version '${1}' locale: '${2}'"
noroot wp core download --locale="${2}" --version="${1}"
}
initial_wpconfig() {
echo " * Setting up wp-config.php"
noroot wp config create --dbname="${DB_NAME}" --dbprefix="${DB_PREFIX}" --dbuser=wp --dbpass=wp
noroot wp config set WP_DEBUG true --raw
noroot wp config set SCRIPT_DEBUG true --raw
}
maybe_import_test_content() {
INSTALL_TEST_CONTENT=$(get_config_value 'install_test_content' "")
if [ ! -z "${INSTALL_TEST_CONTENT}" ]; then
echo " * Downloading test content from github.com/poststatus/wptest/master/wptest.xml"
noroot curl -s https://raw.githubusercontent.com/poststatus/wptest/master/wptest.xml > /tmp/import.xml
echo " * Installing the wordpress-importer"
noroot wp plugin install wordpress-importer
echo " * Activating the wordpress-importer"
noroot wp plugin activate wordpress-importer
echo " * Importing test data"
noroot wp import /tmp/import.xml --authors=create
echo " * Cleaning up import.xml"
rm /tmp/import.xml
echo " * Test content installed"
fi
}
maybe_set_b2bdd_wordpress_configuration() {
B2B_WP_CONFIG=$(get_config_value 'b2b_wp_config' "")
if [ ! -z "${B2B_WP_CONFIG}" ]; then
echo " * Setting Permalink Structure..."
noroot wp option update permalink_structure "/news/%postname%/"
echo " * Setting General Settings..."
noroot wp option update date_format "F j, Y"
noroot wp option update timezone_string "America/New York"
noroot wp option update start_of_week "1"
noroot wp option update time_format "g:i"
noroot wp option update users_can_register "0"
noroot wp option update gzipcompression "1"
noroot wp option update WPLANG "en_US"
echo " * Setting Reading Settings..."
noroot wp option update blog_public "0"
echo " * Setting Discussion Settings..."
noroot wp option update close_comments_days_old "0"
noroot wp option update close_comments_for_old_posts "1"
noroot wp option update comment_moderation "1"
noroot wp option update comment_registration "1"
noroot wp option update default_comment_status "closed"
noroot wp option update default_ping_status "closed"
noroot wp option update show_avatars "0"
echo " * Setting Media Settings..."
noroot wp option update thumbnail_crop "0"
noroot wp option update uploads_use_yearmonth_folders "0"
fi
}
install_wp() {
echo " * Installing WordPress"
ADMIN_USER=$(get_config_value 'admin_user' "b2bdd")
ADMIN_PASSWORD=$(get_config_value 'admin_password' "password")
ADMIN_EMAIL=$(get_config_value 'admin_email' "info@b2bdd.com")
echo " * Installing using wp core install --url=\"${DOMAIN}\" --title=\"${SITE_TITLE}\" --admin_name=\"${ADMIN_USER}\" --admin_email=\"${ADMIN_EMAIL}\" --admin_password=\"${ADMIN_PASSWORD}\""
noroot wp core install --url="${DOMAIN}" --title="${SITE_TITLE}" --admin_name="${ADMIN_USER}" --admin_email="${ADMIN_EMAIL}" --admin_password="${ADMIN_PASSWORD}"
echo " * WordPress was installed, with the username '${ADMIN_USER}', and the password '${ADMIN_PASSWORD}' at '${ADMIN_EMAIL}'"
if [ "${WP_TYPE}" = "subdomain" ]; then
echo " * Running Multisite install using wp core multisite-install --subdomains --url=\"${DOMAIN}\" --title=\"${SITE_TITLE}\" --admin_name=\"${ADMIN_USER}\" --admin_email=\"${ADMIN_EMAIL}\" --admin_password=\"${ADMIN_PASSWORD}\""
noroot wp core multisite-install --subdomains --url="${DOMAIN}" --title="${SITE_TITLE}" --admin_name="${ADMIN_USER}" --admin_email="${ADMIN_EMAIL}" --admin_password="${ADMIN_PASSWORD}"
echo " * Multisite install complete"
elif [ "${WP_TYPE}" = "subdirectory" ]; then
echo " * Running Multisite install using wp core ${INSTALL_COMMAND} --url=\"${DOMAIN}\" --title=\"${SITE_TITLE}\" --admin_name=\"${ADMIN_USER}\" --admin_email=\"${ADMIN_EMAIL}\" --admin_password=\"${ADMIN_PASSWORD}\""
noroot wp core multisite-install --url="${DOMAIN}" --title="${SITE_TITLE}" --admin_name="${ADMIN_USER}" --admin_email="${ADMIN_EMAIL}" --admin_password="${ADMIN_PASSWORD}"
echo " * Multisite install complete"
fi
DELETE_DEFAULT_PLUGINS=$(get_config_value 'delete_default_plugins' '')
if [ ! -z "${DELETE_DEFAULT_PLUGINS}" ]; then
echo " * Deleting the default plugins akismet and hello dolly"
noroot wp plugin delete akismet
noroot wp plugin delete hello
fi
DELETE_DEFAULT_THEMES=$(get_config_value 'delete_default_themes' '')
if [ ! -z "${DELETE_DEFAULT_THEMES}" ]; then
echo " * Deleting default themes"
noroot wp theme delete twentytwentythree
noroot wp theme delete twentytwentytwo
noroot wp theme delete twentytwentyone
noroot wp theme delete twentytwenty
fi
maybe_set_b2bdd_wordpress_configuration
maybe_import_test_content
}
update_wp() {
if [[ $(noroot wp core version) > "${WP_VERSION}" ]]; then
echo " * Installing an older version '${WP_VERSION}' of WordPress"
noroot wp core update --version="${WP_VERSION}" --force
else
echo " * Updating WordPress '${WP_VERSION}'"
noroot wp core update --version="${WP_VERSION}"
fi
}
setup_cli() {
rm -f "${VVV_PATH_TO_SITE}/wp-cli.yml"
echo "# auto-generated file" > "${VVV_PATH_TO_SITE}/wp-cli.yml"
echo "path: \"${PUBLIC_DIR}\"" >> "${VVV_PATH_TO_SITE}/wp-cli.yml"
echo "@vvv:" >> "${VVV_PATH_TO_SITE}/wp-cli.yml"
echo " ssh: vagrant" >> "${VVV_PATH_TO_SITE}/wp-cli.yml"
echo " path: ${PUBLIC_DIR_PATH}" >> "${VVV_PATH_TO_SITE}/wp-cli.yml"
echo "@${VVV_SITE_NAME}:" >> "${VVV_PATH_TO_SITE}/wp-cli.yml"
echo " ssh: vagrant" >> "${VVV_PATH_TO_SITE}/wp-cli.yml"
echo " path: ${PUBLIC_DIR_PATH}" >> "${VVV_PATH_TO_SITE}/wp-cli.yml"
}
#Custom B2BDD functions
copy_htaccess_file() {
if [ ! -f "${PUBLIC_DIR_PATH}/.htaccess" ]; then
if [ -f "${VVV_PATH_TO_SITE}/provision/.htaccess" ]; then
echo " * Copying .htaccess file"
noroot cp -f "${VVV_PATH_TO_SITE}/provision/.htaccess" "${PUBLIC_DIR_PATH}/.htaccess"
fi
else
echo " * .htaccess already exists"
fi
}
create_movefile() {
if [ ! -f "${PUBLIC_DIR_PATH}/movefile.yml" ]; then
echo " * movefile.yml was found"
noroot cp -f "${VVV_PATH_TO_SITE}/provision/movefile.yml" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{SITENAME}}#${VVV_SITE_NAME}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{DEVELOPMENTURL}}#${DEVELOPMENT_URL}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{DEVELOPMENTSERVERPATH}}#${DEVELOPMENT_SERVER_PATH}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{DEVELOPMENTDB}}#${DEVELOPMENT_DB_NAME}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{DEVELOPMENTDBUSER}}#${DEVELOPMENT_DB_USER}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{DEVELOPMENTDBPASS}}#${DEVELOPMENT_DB_PASS}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{DEVELOPMENTSSHHOST}}#${DEVELOPMENT_SERVER}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{DEVELOPMENTSSHUSER}}#${DEVELOPMENT_SERVER_USER}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{DEVELOPMENTSSHPASSWORD}}#${DEVELOPMENT_SERVER_PASS}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{PRODUCTIONURL}}#${PRODUCTION_URL}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{PRODUCTIONSERVERPATH}}#${PRODUCTION_SERVER_PATH}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{PRODUCTIONDB}}#${PRODUCTION_DB_NAME}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{PRODUCTIONDBUSER}}#${PRODUCTION_DB_USER}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{PRODUCTIONDBPASS}}#${PRODUCTION_DB_PASS}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{PRODUCTIONSSHHOST}}#${PRODUCTION_SERVER}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{PRODUCTIONSSHUSER}}#${PRODUCTION_SERVER_USER}#" "${PUBLIC_DIR_PATH}/movefile.yml"
noroot sed -i "s#{{PRODUCTIONSSHPASSWORD}}#${PRODUCTION_SERVER_PASS}#" "${PUBLIC_DIR_PATH}/movefile.yml"
else
echo " * movefile.yml exists"
fi
}
install_b2bdd_theme() {
echo " * Attempting to install B2BDD theme into WordPress theme directory"
if [ ! -d "${PUBLIC_DIR_PATH}/wp-content/themes/b2bdd/" ]; then
if [ -d "${PUBLIC_DIR_PATH}/wp-content/themes/" ]; then
echo " * Theme directory exists. Attempting to clone b2bdd-theme repository from bitbucket to b2bdd folder"
cd "${PUBLIC_DIR_PATH}/wp-content/themes/"
echo "git clone git@bitbucket.org:b2bdd/b2bdd-theme.git b2bdd"
noroot git clone git@bitbucket.org:b2bdd/b2bdd-theme.git b2bdd
if [ $? -eq 0 ]; then
echo " * B2BDD theme cloned - creating child theme folder"
noroot cp -r b2bdd/b2bdd-child .
else
echo " * Git clone failed - b2bdd theme not created. Confirm keys are setup on your machine and bitbucket"
fi
fi
else
echo " * B2BDD Theme directory already exists."
fi
}
cd "${VVV_PATH_TO_SITE}"
setup_cli
setup_database
setup_nginx_folders
if [ "${WP_TYPE}" == "none" ]; then
echo " * wp_type was set to none, provisioning WP was skipped, moving to Nginx configs"
else
echo " * Install type is '${WP_TYPE}'"
# Install and configure the latest stable version of WordPress
if [[ ! -f "${PUBLIC_DIR_PATH}/wp-load.php" ]]; then
download_wordpress "${WP_VERSION}" "${WP_LOCALE}"
fi
if [[ ! -f "${PUBLIC_DIR_PATH}/wp-config.php" ]]; then
initial_wpconfig
fi
if ! $(noroot wp core is-installed ); then
echo " * WordPress is present but isn't installed to the database, checking for SQL dumps in wp-content/database.sql or the main backup folder."
if [ -f "${PUBLIC_DIR_PATH}/wp-content/database.sql" ]; then
restore_db_backup "${PUBLIC_DIR_PATH}/wp-content/database.sql"
elif [ -f "/srv/database/backups/${VVV_SITE_NAME}.sql" ]; then
restore_db_backup "/srv/database/backups/${VVV_SITE_NAME}.sql"
else
install_wp
fi
else
update_wp
fi
fi
copy_nginx_configs
copy_htaccess_file #b2bdd
setup_wp_config_constants
create_movefile #b2bdd
install_plugins
install_themes
install_b2bdd_theme #b2bdd
echo " * Site Template provisioner script completed for ${VVV_SITE_NAME}"
| true
|
4e89ba9dfd12042cd6da8830033a6dad19fa5edc
|
Shell
|
otus-kuber-2019-06/SergeSpinoza_platform
|
/kubernetes-vault/vault-guides/secrets/database-mysql-gcloud-demo/scripts/install_mysql_ubuntu.sh
|
UTF-8
| 669
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
cd /tmp
echo "Installing MariaDB MySQL..."
sudo apt-get update -y
sudo apt-get install -y mariadb-server
echo -e "\n[mysqld]\nbind-address=0.0.0.0\nskip-name-resolve=1" | sudo tee -a /etc/mysql/my.cnf
# Start MySQL and set root password
sudo systemctl start mysql
sudo mysqladmin -uroot password 'bananas'
# Load some sample data into an 'employees' database
git clone https://github.com/datacharmer/test_db
cd test_db
sudo mysql -u root -p'bananas' < employees.sql
# Create our Vault user
sudo mysql -u root -p'bananas' << EOF
GRANT ALL PRIVILEGES ON *.* TO 'vaultadmin'@'%' IDENTIFIED BY 'vaultpw' WITH GRANT OPTION;
FLUSH PRIVILEGES;
EOF
| true
|
1292fca8d6bd9527a08f4a447778ff454d154d87
|
Shell
|
xeno-by/rsc
|
/bin/scalafmt
|
UTF-8
| 440
| 3.125
| 3
|
[
"Apache-2.0",
"BSD-3-Clause",
"DOC",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
VERSION="1.2.0"
CACHE="$HOME/.scalafmt"
COURSIER="$CACHE/coursier"
SCALAFMT="$CACHE/scalafmt-$VERSION"
if [ ! -d $CACHE ]; then
mkdir -p $CACHE
fi
if [ ! -f $COURSIER ]; then
curl -L -o $COURSIER https://git.io/vgvpD
chmod +x $COURSIER
fi
if [ ! -f $SCALAFMT ]; then
$COURSIER bootstrap com.geirsson:scalafmt-cli_2.11:$VERSION --main org.scalafmt.cli.Cli -o $SCALAFMT
chmod +x $SCALAFMT
fi
$SCALAFMT "$@"
| true
|
610116c28ea0702ecb20cbd543f9c5be2c37ad94
|
Shell
|
canercandan/agat-engine
|
/resources/admin/variable/create_passwd.sh
|
UTF-8
| 511
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
LOGIN=$1
PASSWD=$2
GROUP="agat"
HTDIGEST_PATH=$3
passwd_digest=
is_login()
{
if [ "$LOGIN" = "" ]; then
exit 0
fi
}
is_passwd()
{
if [ "$PASSWD" = "" ]; then
exit 0
fi
}
is_path()
{
if [ "$HTDIGEST_PATH" = "" ]; then
exit 0
fi
}
passwd_generate()
{
passwd_digest=$(echo -n $LOGIN":"$GROUP":"$PASSWD | openssl dgst -md5)
}
add_passwd()
{
echo $LOGIN":"$GROUP":"$passwd_digest >> $HTDIGEST_PATH;
}
is_login
is_passwd
is_path
passwd_generate
add_passwd
| true
|
c86d2dd8889d25387ee56bf4a754ff2b35f0821f
|
Shell
|
trumanz/algorithms
|
/coin_or/coin_install.sh
|
UTF-8
| 335
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
DIR_3PP=$(pwd)/3pp
DIR_3PP_INSTALL=${DIR_3PP}/install_dir
mkdir -p ${DIR_3PP_INSTALL} && cd ${DIR_3PP} \
wget https://www.coin-or.org/download/source/Bcp/Bcp-1.4.3.zip && \
unzip Bcp-1.4.3.zip && cd Bcp-1.4.3 && mkdir build && cd build && ../configure --prefix=${DIR_3PP_INSTALL} && \
make && make test && make install
| true
|
0f387d828bc85156ee0b543241f58e128c250f2f
|
Shell
|
zInnovationLab/BMzCC
|
/scripts/jenkins_slave/pull_images.sh
|
UTF-8
| 267
| 2.796875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
read -r -a IMGS <<< $(echo $(curl http://${REGISTRY}:${REGISTRY_PORT}/v2/_catalog |python -m json.tool | grep -o '"'${USR_NAME}'/bluemix.*"' | sed 's/"//g'))
for img in "${IMGS[@]}"
do
# echo "$img"
docker pull "${REGISTRY}:${REGISTRY_PORT}/$img"
done
| true
|
76827f4b5e28d83ddd301cd585d2651a9c36a881
|
Shell
|
hurwitzlab/Centrifuge_Bubble
|
/config.sh
|
UTF-8
| 1,022
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#Establish current working directory
export CWD=$PWD
#Directory where scripts are located
export SCRIPT_DIR="$CWD/scripts"
#Centrifuge Database Location
export CENT_DB="/rsgrps/bh_class/b_compressed+h+v/b_compressed+h+v"
#Directory containing FASTA/Q files (Centrifuge ran on these files)
export FASTA_DIR="/rsgrps/bhurwitz/jetjr/THS/reads"
#Single or Paired End Reads? (single || paired)
#IMPORTANT: For paired end files see README.md for additional information
export TYPE="single"
#FASTA/Q File Extension (common extensions include fasta, fa, fastq, fastq)
#DO NOT INCLUDE the dot "."
export FILE_EXT="fasta"
#Centrifuge Report Out Directory
export REPORT_DIR="$CWD/reports"
#Bubble Plot Out Directory
export PLOT_OUT="$CWD/plots/"
#Plot file name and title
export PLOT_FILE="bubble"
export PLOT_TITLE='Dermatitis_Example_Plot'
#File type (Fasta or Fastq | fasta = f; fastq = q)
export FILE_TYPE="f"
#Standard Error/Out Directory
export STDERR_DIR="$CWD/std-err"
export STDOUT_DIR="$CWD/std-out"
| true
|
fa8c9d437e71c1ec6adedb86ab9cec9b0f667b26
|
Shell
|
ululh/seo_monitor
|
/grafana_mysql/mysql_scripts/initial_load.sh
|
UTF-8
| 303
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
PASS=$1
mysql -uroot -p$PASS < /mysql_scripts/create_table_mdr.sql
for file in $(ls /var/lib/mysql/CSVs)
do
sed -e "s/TOTO/$file/" /mysql_scripts/load_csv.sql.template > /mysql_scripts/load_csv.sql
cat /mysql_scripts/load_csv.sql
mysql -uroot -p$PASS < /mysql_scripts/load_csv.sql
done
| true
|
78b2ad648284fdc00e4798def90fb73c2e5a80a1
|
Shell
|
zunda/photocan
|
/etc/usbmount/mount.d/99_copy_photo
|
UTF-8
| 628
| 3.703125
| 4
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/sh
logger -t photocan $UM_MOUNTPOINT has been mounted
mkdir -p /tmp/photocan/backup
mkdir -p /tmp/photocan/upload
rm -f /tmp/photocan/poweroff.*
name=`basename $UM_MOUNTPOINT`
echo $UM_MOUNTPOINT > /tmp/photocan/backup/$name
logger -t photocan Checking $name for photos to be backed up
/usr/local/bin/backup-photos $UM_MOUNTPOINT
RETVAL="$?"
logger -t photocan Finished backing up photos from $name
if [ "$RETVAL" = 1 ]; then
# Need to upload photos later
mv /tmp/photocan/backup/$name /tmp/photocan/upload/$name
sh /etc/usbmount/umount.d/99_copy_photo
else
rm /tmp/photocan/backup/$name
umount $UM_MOUNTPOINT
fi
| true
|
8d6fcf668c280ccda51b14d74e81047cfc7e4ce1
|
Shell
|
gooofy/aqb
|
/dist.sh
|
UTF-8
| 3,894
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
VERSION=0.8.3preview1
WORKDIR=`pwd`/target/m68k-amigaos/dist
LHA=${WORKDIR}/aqb-${VERSION}.lha
rm -rf ${WORKDIR}/aqb
rm -rf ${WORKDIR}/aqb.lha
mkdir -p ${WORKDIR}/aqb/lib
mkdir -p ${WORKDIR}/aqb/examples
mkdir -p ${WORKDIR}/aqb/examples/bench
mkdir -p ${WORKDIR}/aqb/examples/demo
mkdir -p ${WORKDIR}/aqb/tutorial
cp target/m68k-amigaos/bin/aqb ${WORKDIR}/aqb/
cp src/lib/_brt/_brt.sym ${WORKDIR}/aqb/lib/
cp src/lib/_brt/_brt.a ${WORKDIR}/aqb/lib/
cp src/lib/_aqb/_aqb.sym ${WORKDIR}/aqb/lib/
cp src/lib/_aqb/_aqb.a ${WORKDIR}/aqb/lib/
cp src/lib/IFFSupport/IFFSupport.sym ${WORKDIR}/aqb/lib/
cp src/lib/IFFSupport/IFFSupport.a ${WORKDIR}/aqb/lib/
cp src/lib/AnimSupport/AnimSupport.sym ${WORKDIR}/aqb/lib/
cp src/lib/AnimSupport/AnimSupport.a ${WORKDIR}/aqb/lib/
cp src/lib/GadToolsSupport/GadToolsSupport.sym ${WORKDIR}/aqb/lib/
cp src/lib/GadToolsSupport/GadToolsSupport.a ${WORKDIR}/aqb/lib/
cp src/lib/OSDevices/OSDevices.sym ${WORKDIR}/aqb/lib/
cp src/lib/OSExec/OSExec.sym ${WORKDIR}/aqb/lib/
cp src/lib/OSGraphics/OSGraphics.sym ${WORKDIR}/aqb/lib/
cp src/lib/OSHardware/OSHardware.sym ${WORKDIR}/aqb/lib/
cp src/lib/OSIntuition/OSIntuition.sym ${WORKDIR}/aqb/lib/
cp src/lib/OSGadTools/OSGadTools.sym ${WORKDIR}/aqb/lib/
cp src/lib/OSGadTools/OSGadTools.a ${WORKDIR}/aqb/lib/
cp src/lib/OSUtility/OSUtility.sym ${WORKDIR}/aqb/lib/
cp src/lib/startup.o ${WORKDIR}/aqb/lib/
cp README.guide ${WORKDIR}/aqb/
cp CHANGELOG.md ${WORKDIR}/aqb/CHANGELOG
cp dist/amiga/Icons/aqb_topdir.info ${WORKDIR}/aqb.info
cp dist/amiga/Icons/aqb.info ${WORKDIR}/aqb/aqb.info
cp dist/amiga/Icons/examples.info ${WORKDIR}/aqb/examples.info
cp dist/amiga/Icons/bench.info ${WORKDIR}/aqb/examples/bench.info
cp dist/amiga/Icons/demo.info ${WORKDIR}/aqb/examples/demo.info
cp dist/amiga/Icons/README.guide.info ${WORKDIR}/aqb/
cp dist/amiga/Icons/CHANGELOG.info ${WORKDIR}/aqb/CHANGELOG.info
cp dist/amiga/Icons/tutorial.info ${WORKDIR}/aqb/
for EX in examples/bench/*.bas ; do
cp $EX ${WORKDIR}/aqb/examples/bench/
cp dist/amiga/Icons/`basename $EX`.info ${WORKDIR}/aqb/examples/bench/
done
for EX in examples/demo/*.bas ; do
cp $EX ${WORKDIR}/aqb/examples/demo/
cp dist/amiga/Icons/`basename $EX`.info ${WORKDIR}/aqb/examples/demo/
done
for EX in tutorial/*.bas ; do
cp $EX ${WORKDIR}/aqb/tutorial/
cp dist/amiga/Icons/`basename $EX`.info ${WORKDIR}/aqb/tutorial/
done
cp -r tutorial/imgs ${WORKDIR}/aqb/tutorial/
cp -r examples/demo/imgs ${WORKDIR}/aqb/examples/demo/
# remove unfinished examples from distribution
rm -f ${WORKDIR}/aqb/examples/demo/fplot*
rm -f ${WORKDIR}/aqb/examples/demo/banana*
cp -r help ${WORKDIR}/aqb/
cp -r dist/amiga/Fonts ${WORKDIR}/aqb/tutorial
cp -r dist/amiga/8svx ${WORKDIR}/aqb/tutorial
cp -r dist/amiga/imgs ${WORKDIR}/aqb/tutorial
cp -r dist/amiga/Fonts ${WORKDIR}/aqb/examples/demo
cp -r dist/amiga/8svx ${WORKDIR}/aqb/examples/demo
cp -r dist/amiga/imgs ${WORKDIR}/aqb/examples/demo
cd ${WORKDIR}
lha a ${LHA} aqb.info aqb
cd -
echo "${LHA} created."
LAMIGA=$XAMIGA/..
echo cp ${LHA} $XAMIGA
cp ${LHA} $XAMIGA
if [ -e "$LAMIGA/Apps" ]; then
cd "$LAMIGA/Apps"
rm -rf aqb*
lha x -f ${LHA}
cd -
fi
#EAMIGA=/mnt/amiga
#if [ -e "$EAMIGA/Apps" ]; then
#
# cp ${LHA} $EAMIGA/x/
# cd "$EAMIGA/Apps"
# rm -rf aqb*
# lha x ${LHA}
# cd -
#
#fi
EXPORT=$XAMIGA/../../export
cp ${LHA} ${EXPORT}
| true
|
24b5b124c438676ab2af89b85a09a321640befc6
|
Shell
|
ProjectBarks/STEMClasses
|
/distribute.sh
|
UTF-8
| 3,917
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/sh
#Global Variables
EXPORT=./export/
TIMESTAMP_SERVER='http://timestamp.digicert.com'
UPDATER_FILE='Updater.jar'
CORE_FILE='STEMClasses.jar'
NOTIFICATION_FILE='libOSXNotification.dylib'
#Check if a file exists then copy it to the export directory
checkcopy ()
{
if [ -z $1 ] || [ -z $2 ]; then
echo 'Empty variable!'
exit 1
fi
if [ ! -f $1 ]; then
printf '%s %s\n' $2 'not found!'
exit 1
fi
printf ' %s %s\n' 'Copying' $2
cp $1 ${EXPORT}$2
}
#Signing a jar and all of its classes
sign ()
{
if [ -z $1 ] || [ -z $2 ]; then
echo 'Empty variable!'
exit 1
fi
printf ' %s %s\n' 'Signing...' $1
jarsigner -tsa http://timestamp.digicert.com ${EXPORT}$1 mykey -storepass $2 | grep 'error'
}
usage() {
echo 'usage: distribute [[-c] [-h]] [-l] [-p (password)]'
echo ' [-c | --compile] - Enables compiling of maven projects'
echo ' [-l | --launch] - Launch the compiled project after compile'
echo ' [-h | --help] - Information about the paramaters'
echo ' [-p | --password] pas - The password auto provided to avoid input'
}
#Argument reader
launch=false
while [ "$1" != "" ]; do
case $1 in
#Compile - compile all projects in the parent
-c | --compile ) echo 'Compiling projects...'
mvn -q install compile package
;;
#Launches after compile
-l | --launch ) launch=true
;;
#Usage help
-h | --help ) usage
exit
;;
#Password
-p | --password) shift
password=$1
;;
#Invalid param
* ) usage
exit 1
esac
shift
done
echo '=== STEMClasses Distributer ==='
#Cleanup - Delete and recreate directory
echo 'Starting cleanup'
if [ -d ${EXPORT} ]; then
printf ' %s %s\n' 'Deleteting' ${EXPORT}
rm -rf ${EXPORT}
fi
printf ' %s %s\n' 'Creating' ${EXPORT}
mkdir -p ${EXPORT}
#Copy the compiled projects
echo 'Starting copy process'
checkcopy ./Core/target/core-jar-with-dependencies.jar ${CORE_FILE}
checkcopy ./Deployer/target/Deployer-jar-with-dependencies.jar ${UPDATER_FILE}
checkcopy ./resources/libOSXNotification.dylib ${NOTIFICATION_FILE}
#Jar Signer
#Read password
if [ -z ${password} ]; then
echo -n Keystore Password:
read -s password
fi
printf '\nStaring signing process\n'
#Sign every jar
sign ${CORE_FILE} ${password}
sign ${UPDATER_FILE} ${password}
echo 'Starting packager process'
pck_os=${EXPORT}package/Contents/MacOS/
java -jar packr.jar ./resources/package.json | sed 's/^/ /'
echo 'Starting package patcher process'
mv ${pck_os}jre/ ${pck_os}jre_temp/
echo ' Unzipping...'
unzip ./resources/jre.zip -d ${pck_os} | grep 'error'
echo ' Patching...'
rm ${pck_os}jre/lib/rt.jar
cp ${pck_os}jre_temp/lib/rt.jar ${pck_os}jre/lib/rt.jar
cp ./resources/icon.icns ${EXPORT}package/Contents/Resources/icons.icns
rm ${pck_os}STEMClasses
cp ./resources/launcher.sh ${pck_os}STEMClasses
chmod +x ${pck_os}STEMClasses
sed -i -e 's/com\.yourcompany\.identifier/net.\projectbarks.\stemclasses/g' ${EXPORT}package/Contents/Info.plist
echo ' Cleanup...'
rm -rf ${pck_os}jre_temp/
rm -rf ${EXPORT}package/Contents/MacOS/jre/man
rm ${EXPORT}package/Contents/MacOS/jre/lib/libjfxwebkit.dylib
rm ${EXPORT}package/Contents/MacOS/jre/lib/jfxrt.jar
rm -rf ${EXPORT}package/Contents/Info.plist-e
echo ' Patch complete'
mv ${EXPORT}package/ ${EXPORT}STEMClasses.app/
echo 'Package created'
#Launch jar if enabled
if [ ${launch} = true ]; then
echo 'Launching Application...'
java -jar ${EXPORT}'Updater.jar'
echo 'Completed!'
fi
| true
|
e286f7ae8be1cdb64e5a263bd8b7e86e44ee595a
|
Shell
|
igregson/dotfiles
|
/zshrc
|
UTF-8
| 3,848
| 2.65625
| 3
|
[] |
no_license
|
# ---------------------------------------------------
# Pure Prompt
# source:
# https://github.com/sindresorhus/pure#manually
fpath=( "$HOME/.zfunctions" $fpath )
autoload -U promptinit && promptinit
# pure prompt options
PURE_CMD_MAX_EXEC_TIME=10
prompt pure
# for better terminal emacs colors
export TERM=xterm-256color
#---------------------------------------------------
# Key Bindings
bindkey "^X^I" expand-or-complete-prefix
# bind UP and DOWN arrow keys -- z-history-substring-plugin
zmodload zsh/terminfo
bindkey "$terminfo[kcuu1]" history-substring-search-up
bindkey "$terminfo[kcud1]" history-substring-search-down
# copy & paste shortcuts
alias "c=xclip"
alias "v=xclip -o"
# ---------------------------------------------------
# PATH
source ~/.phpbrew/bashrc
export GOPATH=$HOME/Web/Go
export NODE_PATH=/usr/local/lib/node_modules
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:$HOME/bin:$HOME/npm/bin:$HOME/.rvm/bin:/usr/local/go/bin:$HOME/Web/Go/bin:$HOME/.rbenv/bin:$HOME/.rbenv/plugins/ruby-build/bin
eval "$(rbenv init -)"
# ---------------------------------------------------
# Aliases
alias zreload=". $HOME/.zshrc && echo '[GOOD-NEWS] Z-Shell successfully reloaded!'" # Reload Z-Shell
alias zre="source $HOME/.zshrc && echo '[GOOD-NEWS] Z-Shell successfully reloaded!'" # Reload Z-Shell
alias fontsreload="fc-cache -vf" # Clear Font Cache
alias zedit="vim ~/.zshrc"
alias i3edit="vim ~/.i3/config"
# cd
alias ..='cd ..'
alias ....='cd ../../'
alias ig='cd Web/ig.com'
alias themes='cd ~/Web/Theme-Dev'
alias blockster='cd ~/Web/Theme-Dev/t-Blockster'
alias blocksterwp='cd ~/Web/Theme-Dev/t-Blockster/Blockster-WP/wp-content/themes/blockster-wp'
alias codestead='cd ~/Web/theCodestead.com'
alias t-codestead='cd ~/Web/theCodestead.com/themes/codestead'
alias filethemes='cd ~/Web/FileThemes/FileThemes-Statamic-2/site/themes/filethemes-1'
alias ffh='cd ~/Web/FlatFileHub'
alias wes.com='cd ~/Web/for-Wesfed/wesfed-stat/wesfed.com'
alias zippy='cd ~/Web/for-Wesfed/zippyspot'
alias nextlevel='cd ~/Web/for-Wesfed/nextlevelkirby'
alias dutchman='cd ~/Web/for-Wesfed/dutchman'
alias stattracker='cd ~/Web/for-Wesfed/StatTracker/staticInitial'
alias ump='cd ~/Web/for-Fr.Peter/UMP/ump.com'
alias orthc='cd ~/Web/for-Fr.Peter/Orthodoxc/orthodoxc.com/site/themes/orthodoxc-1'
alias orthethos='cd ~/Web/for-Fr.Peter/OrthodoxEthos/orthodoxethos.com/site/themes/orthodoxethos-1'
alias beheard='cd ~/Web/beHeard/source'
alias wp-beheard='cd ~/Web/beHeard/plugin-wp/wp-content/plugins/beHeard'
alias akathists='cd ~/Web/akathists/akathists.com'
alias t-centio='cd ~/Web/Theme-Dev/t-Centio/source/content/themes/centio'
# li
alias lb='\ls --color=auto' # on preventing call to other alias: http://unix.stackexchange.com/questions/39291/run-a-command-that-is-shadowed-by-an-alias
alias lba='\ls -a --color=auto'
alias ls='ls -l --color=auto'
alias l='ls -l --color=auto'
alias ll='ls -al --color=auto'
alias la='ls -al --color=auto'
alias programs='\ls /usr/bin/' # for finding names of excutables
alias shortcuts='xfconf-query -c xfce4-keyboard-shortcuts -l -v | cut -d'"'"'/'"'"' -f4 | awk '"'"'{printf "%30s", $2; print "\t" $1}'"'"' | sort | uniq'
# on the quote-maddness above:
# http://stackoverflow.com/questions/1250079/how-to-escape-single-quotes-within-single-quoted-strings
# open
alias -s com='google-chrome'
# ssh
alias sshpxk="ssh isaac@212.47.235.134"
alias sshpixelandkraft="ssh isaac@212.47.235.134"
alias sshpxkr="ssh root@212.47.235.134"
alias sshump="ssh orthodoxc@web514.webfaction.com"
# ---------------------------------------------------
# Plugins
source $HOME/.zplugins/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
source $HOME/.zplugins/zsh-history-substring-search/zsh-history-substring-search.zsh
source $HOME/.zplugins/zsh-tab-completion/completion.zsh
| true
|
1bf2b15c1db88907db8705b8785884fc39126d4b
|
Shell
|
ho-nl/webcomponents-flag-icon
|
/svg/convert2PNG
|
UTF-8
| 2,127
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$2" = "16" ]; then
resize="-filter box -resize"
width="$2"
if [ "$1" == "43" ] || [ "$1" == "32" ] || [ "$1" == "53" ] || [ "$1" == "2" ]; then
height="11"
elif [ "$1" = "1" ]; then
height="$width"
fi
elif [ "$2" = "36" ]; then
resize="-resize"
width="$2"
if [ "$1" = "43" ]; then
height="27"
elif [ "$1" = "32" ]; then
height="24"
elif [ "$1" = "53" ]; then
height="22"
elif [ "$1" = "2" ]; then
height="18"
elif [ "$1" = "1" ]; then
height="$width"
fi
elif [ "$2" = "75" ]; then
resize="-filter triangle -resize"
width="$2"
if [ "$1" = "43" ]; then
height="56"
elif [ "$1" = "32" ]; then
height="50"
elif [ "$1" = "53" ]; then
height="45"
elif [ "$1" = "2" ]; then
height="38"
elif [ "$1" = "1" ]; then
height="$width"
fi
elif [ "$2" = "225" ]; then
resize="-filter triangle -resize"
width="$2"
if [ "$1" = "43" ]; then
height="168"
elif [ "$1" = "32" ]; then
height="150"
elif [ "$1" = "53" ]; then
height="135"
elif [ "$1" = "2" ]; then
height="112"
elif [ "$1" = "1" ]; then
height="$width"
fi
else
echo "argument 2 must be 16, 36, 75, or 225"
fi
if [ -z "$height" ]; then
echo "argument 1 must be '43', '32', '53', '2', or '1'"
elif [ -n "$width" ]; then
echo "running for $width x $height"
rm -f *.png;
#uses ImageMagick
for filename in `find . -regex ".*.svg"`; do
convert -background none $filename ${resize} ${width}x${height}! ${filename%svg}png;
#filter box
#convert -background none $filename -filter triangle -interpolative-resize ${width}x${height}! -blur 0x.3 -raise 1 thumbnail.png;
#convert thumbnail.png -fill gray50 -colorize 100% -raise 1 -normalize -blur 0x4 overlay.png;
#convert thumbnail.png overlay.png -compose hardlight -composite ${filename%svg}png;
done
#rm thumbnail.png overlay.png;
echo "PNGcrush-ing"
mkdir crush${width}
for filename in `find . -regex ".*.png"`; do
pngcrush -q -rem alla -rem text $filename crush${width}/${filename:2}
done
mv -v crush${width}/* ../../png/${width}/${PWD##*/};
rm -Rf crush${width};
rm *.png;
echo "Generated $width x $height in ${PWD##*/}"
fi
| true
|
972bad20028a7cf812c93bfb7a5515e352702e2b
|
Shell
|
mbodenhamer/docker-emacs-elpy
|
/emacs-elpy
|
UTF-8
| 891
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
uid=$(id -u)
gid=$(id -g)
shifts=0
function set_ids {
if [[ "$1" == "--uid" || "$1" == "--gid" ]]; then
if [[ "$1" == "--uid" ]]; then
uid="$2"
shifts=$(($shifts + 2))
shift; shift
set_ids "$@"
return
fi
if [[ "$1" == "--gid" ]]; then
gid="$2"
shifts=$(($shifts + 2))
shift; shift
set_ids "$@"
fi
fi
}
set_ids "$@"
for i in $(seq 1 $shifts); do
shift
done
image=emacs-elpy-${uid}-${gid}
if [ -z "$(docker images | grep ${image})" ]; then
tmpdir=$(mktemp -d)
echo "FROM mbodenhamer/emacs-elpy:latest" > ${tmpdir}/Dockerfile
docker build -t ${image}:latest --build-arg uid=$uid \
--build-arg gid=$gid $tmpdir
rm -rf $tmpdir
fi
if [[ -n $(which xhost) ]]; then
xhost +local:docker
fi
docker run --rm -it -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY \
-v $(pwd):/files ${image} "$@"
| true
|
924f7819d6c5ca0b93cc38e02aa5d9835f4fb2ad
|
Shell
|
mahshidgh/data-collection-pipeline
|
/columbia/scripts/export/export-ssl.sh
|
UTF-8
| 1,108
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
source constants.sh
eval `ssh-agent`
ssh-add /home/sanjay/.ssh/sanjay_rsa
while true; do
for f in $1*
do
# Get filesize information
FILESIZE=$(stat -c%s "$f")
echo "Looking at file $f"
# Check if filesize is greater than MAX_SSL_FILESIZE
if [ $FILESIZE -ge $MAX_SSL_FILESIZE ]
then
# Check if the file is still open / being written to
if [[ "$(lsof "$f") > /dev/null" ]]
then
# Create a date/time label for when we are exporting/uploading
current_time=$(date "+%Y-%m-%d-%H%M")
filename=$(basename "$f" | sed 's/\(.*\)\..*/\1/')
echo "Got a basename of $filename"
# Get directory name for uploads
directoryname=$(dirname $f)
mkdir -p $directoryname/uploads
# Rename file ready for anonymization / upload
nf="$directoryname/uploads/"$filename"_"$current_time".pcapng"
echo "Set $f as ready for upload as: $nf"
mv $f $nf
else
echo "file '$f' has not been closed yet"
fi
else
echo "file '$f' still being written to"
fi
done
sleep 10
done
| true
|
46243231dfefe17c35738983a7a3a718126b3140
|
Shell
|
mingcheng/ssr-subscriber
|
/scripts/launcher.sh
|
UTF-8
| 1,406
| 3.734375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
###
# File: launch-ssrs.sh
# Author: Ming Cheng<mingcheng@outlook.com>
#
# Created Date: Thursday, August 8th 2019, 3:32:38 pm
# Last Modified: Tuesday, April 27th 2021, 7:40:19 pm
#
# http://www.opensource.org/licenses/MIT
###
if [ -z $SSR_SUBSCRIBER ]; then
SSR_SUBSCRIBER=http://ssr-subscriber.default.svc.cluster.local/random/json
fi
CHECK_SOCK5_URL=https://www.google.com
SSR_CONF_FILE=/tmp/ss-local.json
SSR_PID_FILE=/var/run/ss-local.pid
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] ${1}"
}
start_ssr() {
if [ -f $SSR_PID_FILE ]; then
log "ss-local maybe is running, so stop first"
stop_ssr
fi
curl -sSkL -o $SSR_CONF_FILE $SSR_SUBSCRIBER
cat $SSR_CONF_FILE
ss-local $SSR_OPT -v -c $SSR_CONF_FILE -l 1086 -b 0.0.0.0 -f $SSR_PID_FILE &
while true; do
sleep 600
check_ssr
done
}
check_ssr() {
PROXY_ADDR=127.0.0.1:1086
curl_command="curl -sSkL -w %{http_code} \
-o /dev/null -X HEAD \
-x socks5://${PROXY_ADDR} \
--connect-timeout 10 --max-time 30 \
${CHECK_SOCK5_URL}"
echo $curl_command
if [ $($curl_command) == "200" ]; then
log "ss-local connection check is ok"
return 0
else
log "ss-local connection check is failed"
return 255
fi
}
stop_ssr() {
kill -INT $(cat $SSR_PID_FILE)
}
case $1 in
start)
start_ssr
;;
stop)
stop_ssr
;;
check)
check_ssr
;;
*)
echo "Usage $0 start|stop|check"
;;
esac
| true
|
60ca77dde968abe19d42c1cfb54cbba73d4bc13c
|
Shell
|
ucsdscheduleplanner/splanner
|
/deploy.sh
|
UTF-8
| 198
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
has_update() {
}
pull() {
}
rebuild() {
}
restart() {
}
start() {
}
# first time only
deploy() {
}
cron_job() {
if has_update; then
pull
rebuild
restart
fi
}
| true
|
1e1af272997403eb219e1853a17f62a570a12cf9
|
Shell
|
Accacio/dots
|
/scripts/bin/mpdMenuImage
|
UTF-8
| 501
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
export MPD_HOST="$HOME/.config/mpd/socket"
[[ $1 ]] || exit 2
# artists=$(find $1 -maxdepth 2 \( -name "*.jp*g" -o -name "*.png" \) \
# |sort|sxiv -tibof)
albumImage=$(find $1 \( -name "*.png" -or -name "*.jp*g" \) \
|sort|sxiv -tibof)
[[ $albumImage ]] || exit 2
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
# set me
mpc clear
for f in $albumImage
do
# echo "$f"
album=$(dirname "$f")
mpc add $album
done
mpc play
~/dots/scripts/musicNotify
# restore $IFS
IFS=$SAVEIFS
| true
|
65aa403e0c00ac6af38e232eb0bd7cecbc17567b
|
Shell
|
jmacato/fread-userland
|
/config/opt/fread/read_config_txt.sh
|
UTF-8
| 3,241
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/sh
# This script parses the fread.ink settings.txt file
CONF_PATH="/mnt/us/fread/config.txt"
if [ ! -f "$CONF_PATH" ]; then
exit 0
fi
OPT_WIFI="disable"
OPT_WIFI_IP_METHOD="dhcp"
OPT_WIFI_IP_ADDRESS="192.168.15.42"
OPT_WIFI_IP_NETMASK="255.255.255.0"
# parse each line that looks like "FOO = VAR" into $SETTING and $VALUE
while read -r LINE; do
# trim leading and trailing whitespace
LINE="$(echo -e "${LINE}" | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//')"
# ignore comments
if [ "$LINE" != "${LINE#\#}" ]; then
continue
fi
SETTING=""
VALUE=""
COUNT=0
while [ "$LINE" ]; do
COUNT=$(( COUNT+1 ))
if [ "$COUNT" -gt "2" ]; then
break
fi
CUR=${LINE%%=*}
if [ "$COUNT" -eq "1" ]; then
# trim trailing whitespace
SETTING=$(echo -e "${CUR}" | sed -e 's/[[:space:]]*$//')
elif [ "$COUNT" -eq "2" ]; then
# VALUE=$CUR
# trim trailing double-quote
VALUE=$(echo -e "${CUR}" | sed -e 's/\"\+.*$//')
fi
[ "$LINE" = "$CUR" ] && LINE='' || LINE="${LINE#*=\"}"
done
if [ "$COUNT" -ne "2" ]; then
continue
fi
# echo "$SETTING = $VALUE"
case "$SETTING" in
WIFI)
OPT_WIFI=$VALUE
;;
WIFI_SSID)
OPT_WIFI_SSID=$VALUE
;;
WIFI_PASSWORD)
OPT_WIFI_PASSWORD=$VALUE
;;
WIFI_IP_METHOD)
OPT_WIFI_IP_METHOD=$VALUE
;;
WIFI_IP_ADDRESS)
OPT_WIFI_IP_ADDRESS=$VALUE
;;
WIFI_IP_NETMASK)
OPT_WIFI_IP_NETMASK=$VALUE
;;
WIFI_IP_GATEWAY)
OPT_WIFI_IP_GATEWAY=$VALUE
;;
WIFI_IP_DNS)
OPT_WIFI_IP_DNS=$VALUE
;;
SSH)
if [ "$SSH" = "enable" ]; then
/etc/init.d/dropbear start
else
/etc/init.d/dropbear stop
fi
;;
USB)
if [ "$VALUE" = "ethernet" ]; then
modprobe g_ether
sleep 3
ip addr add 192.168.15.1/24 dev usb0
ip link set dev usb0 up
/etc/init.d/dnsmasq start
/etc/init.d/dropbear start
else
echo "USB mode $VALUE not implemented"
fi
;;
*)
echo "Unknown option $SETTING"
esac
done < "$CONF_PATH"
if [ "$OPT_WIFI" = "enable" ] && { [ "${#OPT_WIFI_SSID}" -gt "0" ]; }; then
CFGPATH="/var/lib/connman/wifi.config"
if [ "$OPT_WIFI_IP_METHOD" = "static" ]; then
CFG_IPV4="${OPT_WIFI_IP_ADDRESS}/${OPT_WIFI_IP_NETMASK}"
if [ "${#OPT_WIFI_IP_GATEWAY}" -gt "0" ]; then
CFG_IPV4="${CFG_IPV4}/$OPT_WIFI_IP_GATEWAY"
fi
else
CFG_IPV4="dhcp"
fi
if [ "${#OPT_WIFI_IP_DNS}" -gt "6" ]; then
CFG_NAMESERVERS="Nameservers=$OPT_WIFI_IP_DNS"
fi
cat << EOF > $CFG_PATH
[service_wifi]
Type=wifi
Name=$OPT_WIFI_SSID
AutoConnect=True
Passphrase=$OPT_WIFI_PASSWORD
IPv4=$CFG_IPV4
$CFG_NAMESERVERS
EOF
/etc/init.d/connman start
fi
| true
|
0eb8c37df859dc7fdd6c830d22d5b7b2ae1a03b9
|
Shell
|
fretlink/ansible-clever
|
/scripts/dhall_check.sh
|
UTF-8
| 402
| 3.6875
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
go() {
local ERROR=0
while IFS= read -r -d '' file
do
cd "$(dirname "$file")" || exit
echo "Typechecking ${file}"
if ! dhall --explain resolve < "$(basename "$file")" >/dev/null; then
echo "Failed to resolve $file"
ERROR=1
fi;
cd - >/dev/null || exit
done < <(find . -type f -name "*.dhall" -print0)
exit $ERROR;
}
go
| true
|
7dd204b34beff6b149172e5ae06b4f52abd3b125
|
Shell
|
cabaalexander/doom-arch-install
|
/prompt.sh
|
UTF-8
| 3,515
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
set -Eeuo pipefail
# install 'dialog' (required)
if ! command -v dialog &> /dev/null; then
sudo pacman --needed --noconfirm -S dialog &> /dev/null
fi
# Prefix for generated files
# ==========================
DOOM_PROMPT_REPLY=$(mktemp)
trap '{ rm -rf "$DOOM_PROMPT_REPLY" ; }' SIGINT SIGTERM EXIT
sd_size(){
local sd
sd=${1:-"sda"}
lsblk |
awk "/^$sd/ {print \$4}" |
tr -d '[:alpha:]'
}
input_box(){
local OPT message default_value height input_type
while getopts ":p" OPT; do
case $OPT in
p) input_type="--passwordbox" ;;
*) # do default stuff ;;
esac
done
shift $((OPTIND - 1))
message=${1:-dialog box}
default_value=${2:-}
height=${3:-"0"}
dialog --title "Doom arch-linux" \
--ok-button "Continue" \
--cancel-button "Exit" \
${input_type:-"--inputbox"} \
"$message" \
$((9 + height)) 80 \
"$default_value" \
2> "$DOOM_PROMPT_REPLY"
}
save_reply(){
# Set the replay to the key you pass and also save it to the '.env' file
local KEY VALUE KEY_EQUALS_VALUE
KEY=$1
VALUE=${2:-$(<"$DOOM_PROMPT_REPLY")}
touch ./.env
KEY_EQUALS_VALUE="$KEY=$VALUE"
eval "$KEY_EQUALS_VALUE"
echo "$KEY_EQUALS_VALUE" >> ./.env
}
awk_math(){
[ $# -gt 1 ] && exit 0
awk "BEGIN {printf \"%.0f\",($1)}"
}
sd_size_acc(){
# this adds up the space taken by the new hard drives
SD_SIZE_ACC=$(awk_math "${SD_SIZE_ACC:-0} + $1")
}
#########
# #
# Begin #
# #
#########
# Clean ./.env
rm -f ./.env
#######
# #
# sd* #
# #
#######
sd_available=$(lsblk | grep -E "^sd.*$" | awk '{printf "• %s %s:",$1,$4}')
sd_available_height=$(grep -o : <<<$"$sd_available" | wc -l)
sd_message="Available hard drives (Choose)\n\n$(tr ':' '\n' <<<"$sd_available")"
input_box "$sd_message" "sda" "$sd_available_height"
save_reply "SD"
########
# #
# SWAP #
# #
########
recommended_swap_space=$(
free -m \
| grep -E "^Mem:" \
| awk -F' ' '{printf "%.0f",($2 * 2 / 1024)"G"}'
)
swap_message="Swap space (GB)\n\nRecommended: $recommended_swap_space"
input_box "$swap_message" "1" "1"
save_reply "SWAP"
sd_size_acc "$SWAP"
########
# #
# ROOT #
# #
########
sd_size="$(sd_size "$SD")"
sd_seventy_five_percent=$(awk_math "($sd_size - $SWAP) * 0.75")
input_box "Root space (GB)" "$sd_seventy_five_percent"
save_reply "ROOT"
sd_size_acc "${ROOT:-}"
########
# #
# HOME #
# #
########
sd_space_left=$(awk_math "$sd_size - $SD_SIZE_ACC")
input_box "Home space (GB)" "$sd_space_left"
save_reply "HOME"
############
# #
# HOSTNAME #
# #
############
input_box "Hostname" "archlinux"
save_reply "HOSTNAME"
#################
# #
# ROOT PASSWORD #
# #
#################
# input_box -p "Root password" "welc0me"
input_box "Root password" "welc0me"
save_reply "ROOT_PSSWD"
############
# #
# SURE (?) #
# #
############
sure_msg="All sure? (y/n)\n\n$(<.env)"
sure_options_height=$(awk 'END {print NR}' ./.env)
# Save EFI variable if EFI-mode is on
# ===================================
ls /sys/firmware/efi/efivars &> /dev/null \
&& save_reply "EFI" 0
# Export all variable in `.env` file
# so that the other scripts can use them
# ===================
sed -i 's/^/export /' ./.env
input_box "$sure_msg" "y" "$sure_options_height"
[[ "$(<"$DOOM_PROMPT_REPLY")" =~ ^[yY][eE]?[sS]?$ ]] || exit 1
| true
|
8720a538c23e9d4098f1add7a12f7e067082c77d
|
Shell
|
krisfield/meza
|
/scripts/ElasticSearch.sh
|
UTF-8
| 4,047
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
print_title "Starting script ElasticSearch.sh"
#
# This script installs everything required to use elasticsearch in MediaWiki
#
# Dependencies
# - PHP compiled with cURL
# - Elasticsearch
# - JAVA 7+
# - Extension:Elastica
# - Extension:CirrusSearch
#
# Ref:
# https://www.mediawiki.org/wiki/Extension:CirrusSearch
# https://en.wikipedia.org/w/api.php?action=cirrus-config-dump&srbackend=CirrusSearch&format=json
# https://git.wikimedia.org/blob/mediawiki%2Fextensions%2FCirrusSearch.git/REL1_25/README
# https://www.mediawiki.org/wiki/Extension:CirrusSearch/Tour
# https://wikitech.wikimedia.org/wiki/Search
#
if [[ $PATH != *"/usr/local/bin"* ]]; then
PATH="/usr/local/bin:$PATH"
fi
#
# Install JAVA
#
# http://docs.oracle.com/javase/8/docs/technotes/guides/install/linux_jdk.html#BJFJHFDD
# http://stackoverflow.com/questions/10268583/how-to-automate-download-and-installation-of-java-jdk-on-linux
#
echo "******* Downloading and installing JAVA Development Kit *******"
cd "$m_meza/scripts"
yum -y install java-1.7.0-openjdk
# Reference this for if we want to try JDK 8: http://tecadmin.net/install-java-8-on-centos-rhel-and-fedora/
## wget --no-check-certificate --no-cookies --header "Cookie: oraclelicense=accept-securebackup-cookie" http://download.oracle.com/otn-pub/java/jdk/8u45-b14/jdk-8u45-linux-x64.rpm
## rpm -ivh jdk-8u45-linux-x64.rpm
# Display java version for reference
java -version
# Set $JAVA_HOME
#
# http://askubuntu.com/questions/175514/how-to-set-java-home-for-openjdk
#
echo "export JAVA_HOME=/usr/bin" > /etc/profile.d/java.sh
source /etc/profile.d/java.sh
echo "JAVA_HOME = $JAVA_HOME"
# Install Elasticsearch via yum repository
#
# https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-repositories.html
#
echo "******* Installing Elasticsearch *******"
# Download and install the public signing key:
cd "$m_meza/scripts"
rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch
# Add yum repo file
ln -s "$m_config/core/elasticsearch.repo" /etc/yum.repos.d/elasticsearch.repo
# Install repo
yum -y install elasticsearch
# Configure Elasticsearch to automatically start during bootup
echo "******* Adding Elasticsearch service *******"
chkconfig elasticsearch on
# *** MANUAL INSTALLATION OPTION (delete) ***
# cd ~/mezadownloads
# curl -L -O https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.6.0.tar.gz
# tar -xvf elasticsearch-1.6.0.tar.gz
# cp -r elasticsearch-1.6.0 /etc/elasticsearch-1.6.0
# cd /etc/elasticsearch-1.6.0/bin
#
# Elasticsearch Configuration
#
# https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html
#
echo "******* Adding Elasticsearch configuration *******"
# Add host name per https://github.com/elastic/elasticsearch/issues/6611
echo "127.0.0.1 meza" >> /etc/hosts
# Rename the standard config file and link to our custom file
cd /etc/elasticsearch
mv /etc/elasticsearch/elasticsearch.yml /etc/elasticsearch/elasticsearch-old.yml
ln -s "$m_config/core/elasticsearch.yml" /etc/elasticsearch/elasticsearch.yml
# Make directories called out in elasticsearch.yml
# ref: http://elasticsearch-users.115913.n3.nabble.com/Elasticsearch-Not-Working-td4059398.html
mkdir "$m_meza/data/elasticsearch/data"
mkdir "$m_meza/data/elasticsearch/work"
mkdir "$m_meza/data/elasticsearch/plugins"
# Grant elasticsearch user ownership of these new directories
chown -R elasticsearch "$m_meza/data/elasticsearch/data"
chown -R elasticsearch "$m_meza/data/elasticsearch/work"
chown -R elasticsearch "$m_meza/data/elasticsearch/plugins"
# Start Elasticsearch
echo "******* Starting elasticsearch service *******"
service elasticsearch start
sleep 20 # Waits 10 seconds
# install kopf, head, bigdesk and inquisitor plugins
/usr/share/elasticsearch/bin/plugin install lmenezes/elasticsearch-kopf/1.0
/usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head
/usr/share/elasticsearch/bin/plugin install lukas-vlcek/bigdesk
/usr/share/elasticsearch/bin/plugin install polyfractal/elasticsearch-inquisitor
| true
|
5e7b6d6f088788b04c1d7bd398de767205a9d982
|
Shell
|
puddinging/shell_space
|
/function.sh
|
UTF-8
| 253
| 2.734375
| 3
|
[] |
no_license
|
doFunc(){
echo 'this is first func'
}
doFunc
# 带返回值
funcWithReturn(){
echo '带返回值的函数'
return '返回值'
}
echo funcWithReturn
# 带参数的函数
funcWithParam(){
echo "$0"
echo "$1"
}
echo funcWithParam 8|
| true
|
1c1d243a2ad5096351ca408a75d0a8fe16b33939
|
Shell
|
sycomix/repos
|
/debian-10-bonescript/version.sh
|
UTF-8
| 701
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash -e
package_name="bonescript"
debian_pkg_name="${package_name}"
package_version="0.6.3"
package_source="${package_name}_${package_version}.orig.tar.gz"
src_dir="${package_name}-${package_version}"
git_repo=""
git_sha=""
reprepro_dir="b/${package_name}"
dl_path="pool/main/${reprepro_dir}/"
debian_version="${package_version}-0rcnee0"
debian_untar=""
debian_patch=""
dl_mirror="https://github.com/rcn-ee/npm-package-bonescript/raw/master/deploy/buster/"
v6="v6.13.0"
v8="v8.9.4"
bonescript="bonescript-0.6.3-ae48732"
winston="winston-2.1.1"
debian_dl_1="${dl_mirror}/${bonescript}-${v8}.tar.xz"
debian_dl_2="${dl_mirror}/${winston}-${v8}.tar.xz"
buster_version="~buster+20180305"
| true
|
9a3c05e202a53350445c059b67194b6e38c832db
|
Shell
|
johnjdailey/snowflake-connector-python
|
/ci/run_travis.sh
|
UTF-8
| 1,178
| 3.375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
#
# Run Travis Tests
#
# shellcheck disable=SC2034
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
set -o pipefail
if [ "$TRAVIS_OS_NAME" == "osx" ]; then
TIMEOUT_CMD=("gtimeout" "-s" "SIGUSR1" "3600s")
else
TIMEOUT_CMD=("timeout" "-s" "SIGUSR1" "3600s")
fi
source ./venv/bin/activate
ret=0
if [ -n "$SNOWFLAKE_AZURE" ]; then
echo "Running Azure tests only..."
# shellcheck disable=SC2068
${TIMEOUT_CMD[@]} py.test -vvv --cov=snowflake.connector \
--cov-report=xml:python_connector_${TRAVIS_PYTHON_VERSION}_coverage.xml \
-m "putget" test || ret=$?
elif [ -n "$SNOWFLAKE_GCP" ]; then
echo "Running GCP tests only..."
# shellcheck disable=SC2068
${TIMEOUT_CMD[@]} py.test -vvv --cov=snowflake.connector \
--cov-report=xml:python_connector_${TRAVIS_PYTHON_VERSION}_coverage.xml \
-m "putget" test || ret=$?
else
echo "Running regular tests..."
# shellcheck disable=SC2068
${TIMEOUT_CMD[@]} py.test -vvv --cov=snowflake.connector \
--cov-report=xml:python_connector_${TRAVIS_PYTHON_VERSION}_coverage.xml \
--ignore=test/sso test || ret=$?
fi
# TIMEOUT or SUCCESS
[ $ret != 124 -a $ret != 0 ] && exit 1 || exit 0
| true
|
10d5a060694fff22025d8c2ef978018b1999bcdc
|
Shell
|
philippwindischhofer/DCGAN
|
/preprocess_images.sh
|
UTF-8
| 729
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
CUR_DIR=`pwd`
SIZESTRING="32x32"
POSARG=()
while [[ $# -gt 0 ]]
do
key=$1
case $key in
--in)
IN_DIR="$2/"
shift
shift
;;
--out)
OUT_DIR="$2/"
shift
shift
;;
*)
POSARG+=("$1")
shift
;;
esac
done
set -- "${POSARG[@]}"
mkdir -p $OUT_DIR
PIC_LIST=`ls $IN_DIR`
COUNT=0
for PIC in $PIC_LIST
do
echo "writing" $OUT_DIR$COUNT.png
#convert $IN_DIR$PIC -thumbnail $SIZESTRING -background black -gravity center -extent $SIZESTRING $OUT_DIR$COUNT.png
convert $IN_DIR$PIC -thumbnail $SIZESTRING"^" -gravity center -extent $SIZESTRING $OUT_DIR$COUNT.png
convert $OUT_DIR$COUNT.png -alpha off $OUT_DIR$COUNT.png
COUNT=`expr $COUNT + 1`
done
| true
|
9fcbed34151bec5aa4962ab89aa43d8ee6f42985
|
Shell
|
sdgyxx/CentosShellTools
|
/shell/funs/getNetWorkName.sh
|
UTF-8
| 895
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
###############
# Name: 获取网卡名称
# author: ZhangTianJie
# email: ztj1993@gmail.com
###############
### 判断安装网络服务
nmcli -v > /dev/null 2>&1
[ $? -ne 0 ] && /data/shell/funs/yumInstallSoftware.sh NetworkManager && [ $? -ne 0 ] && exit 1
### 启动网络服务
systemctl start NetworkManager.service
### 获取活动的第一个网卡
NetWorkName1=`LANG=en && nmcli dev status | grep -m1 "ethernet connected" | awk '{ print $1 }'`
if [ "${NetWorkName1:0:3}" == "enp" ] || [ "${NetWorkName1:0:3}" == "eth" ]; then
echo "${NetWorkName1}" && exit 0
fi
### 获取非活动的第一个网卡
NetWorkName2=`LANG=en && nmcli dev status | grep -m1 "ethernet disconnected" | awk '{ print $1 }'`
if [ "${NetWorkName2:0:3}" == "enp" ] || [ "${NetWorkName2:0:3}" == "eth" ]; then
echo "${NetWorkName2}" && exit 0
fi
echo ">>>>> Error: get network name error"
exit 1
| true
|
dad80e659c89f5260f118d978d43d8e32aee0880
|
Shell
|
DFKI-NLP/eventx
|
/scripts/random_repeats_sdw.sh
|
UTF-8
| 832
| 2.796875
| 3
|
[
"MIT"
] |
permissive
|
# Run 5 random repeats
#
# edit these variables before running script
export CUDA_DEVICE=0
export NUM_EPOCHS=100
export BATCH_SIZE=32
CONFIG_FILE=configs/plass_bert.jsonnet
export TRAIN_PATH=data/smartdata-corpus/train/train_sdw_with_events.jsonl
export DEV_PATH=data/smartdata-corpus/dev/dev_sdw_with_events.jsonl
export TEST_PATH=data/smartdata-corpus/test/test_sdw_with_events.jsonl
ITER=1
for RANDOM_SEED in 54360 44184 20423 80520 27916; do
SEED=$RANDOM_SEED
PYTORCH_SEED=`expr $RANDOM_SEED / 10`
NUMPY_SEED=`expr $PYTORCH_SEED / 10`
export SEED=$SEED
export PYTORCH_SEED=$PYTORCH_SEED
export NUMPY_SEED=$NUMPY_SEED
echo Run ${ITER} with seed ${RANDOM_SEED}
OUTPUT_DIR=data/runs/random_repeats_sdw/run_"$ITER"
allennlp train --include-package eventx $CONFIG_FILE -s $OUTPUT_DIR -f
ITER=$(expr $ITER + 1)
done
| true
|
1a88e5c29b1a58f3f1d2a6a2ab6d883b42766666
|
Shell
|
danielgrigg/emacs-hub
|
/bin/sync
|
UTF-8
| 475
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
function deleted_bundles {
git ls-files -d -- | cut -d'/' -f2 | sort -u
}
function untracked_bundles {
git ls-files --others | while read line; do dirname $line; done | sort -u | grep bundle | cut -d'/' -f2
}
for bundle in $(deleted_bundles); do
git rm -r bundle/$bundle
git commit -qm "Removing plugin $bundle"
done
for bundle in $(untracked_bundles); do
echo "adding $bundle"
git add bundle/$bundle
git commit -qm "Adding plugin $bundle"
done
| true
|
a442aa8b0a7b6f658dbaa7bca6fd5e9fc6f43fc6
|
Shell
|
mwthinker/Scripts
|
/bash/cmake_replace_sources
|
UTF-8
| 1,544
| 4.34375
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
if [ "$1" == "-h" ] || [ "$1" == "--help" ]; then
echo "Usage: $(basename $0) OPTIONS [keyword='set(SOURCES'] [cmake_file='CMakeLists.txt'] [new_sources]
OPTIONS:
-h --help show this help
-i --interactive ask for permission when modifing and removing files.
Replace the source list in $cmake_file with the new source list provided.
Example:
Replaces sources provided to cmake_file
$(basename $0) 'set(SOURCES' CMakeLists.txt test.h test.cpp
"
exit 0
fi
main() {
local interactive=0
if [ "$1" == "-i" ] || [ "$1" == "--interactive" ]; then
interactive=1
shift
fi
local cmake_file="$1"
shift
local keyword="$1"
shift
local new_sources="$@"
local content=""
local found_sources=0
IFS=$'\n'
while read -r line ; do
if [[ $line = *"$keyword"* ]] || [[ $line = *"${keyword^^}"* ]] || [[ $line = *"${keyword,,}"* ]]; then
if [[ $found_sources = 0 ]]; then
found_sources=1
content="$content\n$keyword"
for source in "$@"
do
content="$content\n\t$source"
done
content="$content\n)"
fi
fi
if [[ $found_sources = 1 ]]; then
if [[ $line == *")"* ]]; then
found_sources=2
fi
else
if [ -z "$content" ]; then
content="$line"
else
content="$content\n$line"
fi
fi
done < "$cmake_file"
if [[ $interactive = 1 ]]; then
read -rp "$(basename $0): overwrite '$cmake_file'? " overwrite
if [[ $overwrite = [Yy] ]]; then
printf "$content\n" > "$cmake_file"
fi
else
printf "$content\n" > "$cmake_file"
fi
}
main $@
| true
|
e7bb1e1abc22e41659973d4de687af4d6c47bdbe
|
Shell
|
petronny/aur3-mirror
|
/kscreenlockmgr-git/PKGBUILD
|
UTF-8
| 1,172
| 3.0625
| 3
|
[] |
no_license
|
# Maintainer: Alex Merry <dev@randomguy3.me.uk>
pkgname=kscreenlockmgr-git
pkgver=20110903
pkgrel=1
pkgdesc="A hack to make screen locking work when using Plasma Netbook"
arch=(i686 x86_64)
url="http://randomguy3.wordpress.com/2011/09/04/screen-locking-with-the-plasma-netbook-interface/"
license=('GPL')
groups=()
depends=('kdelibs' 'libxext' 'libxss')
makedepends=('git' 'automoc4' 'cmake')
provides=()
conflicts=()
replaces=()
backup=()
options=()
install=
source=()
noextract=()
md5sums=()
_gitroot=git://anongit.kde.org/scratch/alexmerry/kscreenlockmgr
_gitname=kscreenlockmgr
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [[ -d "$_gitname" ]]; then
cd "$_gitname" && git pull origin
msg "The local files are updated."
else
git clone "$_gitroot" "$_gitname"
fi
msg "GIT checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_gitname-build"
mkdir "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
#
# BUILD HERE
#
cmake ../kscreenlockmgr -DCMAKE_INSTALL_PREFIX=$(kde4-config --prefix)
make
}
package() {
cd "$srcdir/$_gitname-build"
make DESTDIR="$pkgdir/" install
}
# vim:set ts=2 sw=2 et:
| true
|
17bffae287e33e550f9895103a8e953598bc4c52
|
Shell
|
wadaniel/cmairl
|
/data/createobs.sh
|
UTF-8
| 341
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
export OMP_NUM_THREADS=4
target=0.0
RUNDIR="./t${target}_base"
#RUNDIR="."
mkdir $RUNDIR
for i in {4..10}
do
fname=run-vracer-continuous-t${target}-$i.py
#fname=run-vracer-$i.py
sed "5 a run = $i\ntarget = ${target}" run-vracer.py > "${RUNDIR}/${fname}"
pushd .
cd $RUNDIR
python3 $fname
popd
done
| true
|
3a031e559b9751745284fc95c4c15cfe464ad385
|
Shell
|
MoritzFeigl/FSO_mHM
|
/scripts/02_prepare_env_mHM/submit_mhm_fso_restart.sh
|
UTF-8
| 1,760
| 2.921875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ----------------------------------------------------------------------
# qsub arguments
# ----------------------------------------------------------------------
#$ -N mhm_spinup
#$ -S /bin/bash
#$ -wd /work/$USER
#$ -o /work/$USER/stdout/$JOB_NAME-$JOB_ID-$TASK_ID.log
#$ -e /work/$USER/stderr/$JOB_NAME-$JOB_ID-$TASK_ID.log
#$ -j y
#$ -l h_rt=00:20:00
#$ -l h_vmem=8G
#$ -binding linear:1
#$ -cwd
# call it via 'qsub -t 1-$(wc -l /path/to/input.files) array-job.sh /path/to/input.files'
echo qsub arguments
# ----------------------------------------------------------------------
# load required modules
# ----------------------------------------------------------------------
echo load required modules
# load python environment
sys_name=$(uname -n)
if [[ ${sys_name:0:8} != 'frontend' ]]; then
module purge
module load foss/2019b
module load netCDF-Fortran
module load CMake
module use /global/apps/modulefiles
module load python_env_mpr
module load git-subrepo
module load pFUnit/4.0.0_foss_2019b
export NETCDF_DIR="$EBROOTNETCDF"
export NETCDF_FORTRAN_DIR="$EBROOTNETCDFMINFORTRAN"
export FC=mpifort
fi
# ----------------------------------------------------------------------
# main script
# ----------------------------------------------------------------------
echo main script
# get all basins
# find <path>/sub* -type d -maxdepth 1 -printf "%f\n" | tee input.files
# get current basin by indexing
# get list of input files from first argument
job_list=$1
# get the specific input file for this array job task
# this is the n-th (where n is current task ID) line of the file
folder=$(awk "NR==$SGE_TASK_ID" "$job_list")
# cd
cd /work/ottor/FSO_mHM_major_basins/config/"${folder}" || exit
# execute
./mhm
| true
|
6235f38641859d1c3380d325d07df116e94a8b1a
|
Shell
|
hixio-mh/node-red-nodes
|
/hardware/HummingboardGPIO/fixup.sh
|
UTF-8
| 633
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "*********************************************"
echo "Moving gpiohb to /usr/local/bin/"
if [[ $EUID -ne 0 ]]; then
echo "Not running as user root" 2>&1
echo "please run the following commands manually"
echo " sudo cp $PWD/gpiohb /usr/local/bin/gpiohb"
echo " sudo chmod 4755 /usr/local/bin/gpiohb"
echo "or re-run npm install as root / sudo"
echo "*********************************************"
exit 1
else
cp gpiohb /usr/local/bin/
chmod 4755 /usr/local/bin/gpiohb
echo "OK - gpiohb moved to /usr/local/bin"
echo "*********************************************"
fi
| true
|
dad33e58fec9435333c1e51e961e5e6387caa01f
|
Shell
|
ryz/blynks-zsh-theme
|
/blynks.zsh-theme
|
UTF-8
| 1,373
| 3.109375
| 3
|
[] |
no_license
|
# blynks, a modified blinks zsh 'theme'
#
# https://github.com/ryz/blynks-zsh-theme
function _prompt_char() {
if $(git rev-parse --is-inside-work-tree >/dev/null 2>&1); then
echo "%{%F{blue}%}±"
else
echo ' '
fi
}
# This theme works with both the "dark" and "light" variants of the
# Solarized color schema. Set the SOLARIZED_THEME variable to one of
# these two values to choose. If you don't specify, we'll assume you're
# using the "dark" variant.
case ${SOLARIZED_THEME:-dark} in
light) bkg=white;;
*) bkg=black;;
esac
# Used by git_prompt_info
ZSH_THEME_GIT_PROMPT_PREFIX="[%{%B%F{blue}%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%{%f%k%b%K{${bkg}}%B%F{green}%}]"
ZSH_THEME_GIT_PROMPT_DIRTY=""
ZSH_THEME_GIT_PROMPT_CLEAN=""
# Used by git_prompt_status
ZSH_THEME_GIT_PROMPT_ADDED="%{$fg_bold[green]%}+"
ZSH_THEME_GIT_PROMPT_MODIFIED="%{$fg_bold[blue]%}*"
ZSH_THEME_GIT_PROMPT_DELETED="%{$fg_bold[red]%}-"
ZSH_THEME_GIT_PROMPT_RENAMED="%{$fg_bold[magenta]%}>"
ZSH_THEME_GIT_PROMPT_UNMERGED="%{$fg_bold[yellow]%}#"
ZSH_THEME_GIT_PROMPT_STAGED="%{$fg_bold[green]%}*"
PROMPT='%{%f%k%b%}%{%K{${bkg}}%B%F{green}%}%n%{%B%F{blue}%}@%{%B%F{red}%}%m%{%B%F{green}%}%{%b%F{yellow}%K{${bkg}}%} %~ $(_prompt_char)%{%B%F{green}%}$(git_prompt_info)$(git_prompt_status)%E%{%f%k%b%}
%{%K{${bkg}}%}%#%{%f%k%b%} '
RPROMPT='!%{%B%F{cyan}%}%!%{%f%k%b%} ${vim_mode}'
| true
|
78f99335a1b0487aeb78d0f07ec2534896783959
|
Shell
|
ThomasVitale/spring-tutorials
|
/spring-boot-multitenancy-schema/db/init/db_init.sh
|
UTF-8
| 286
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
which psql > /dev/null || (echoerr "The PostgreSQL client is not in your PATH" && exit 1)
export PGPASSWORD=secret_password
psql -U james_bond -d secret_database -h localhost -f schema.sql
psql -U james_bond -d secret_database -h localhost -f data.sql
| true
|
3c922272837bd57d8db48cafa15c1799ef3ba479
|
Shell
|
vishisth29/app_demo
|
/utils/start.sh
|
UTF-8
| 208
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -x
echo 'Current directory' ${PWD}
chmod +x ./utils/build-base-images.sh;
./utils/build-base-images.sh;
# docker compose command comes here
docker-compose config
docker-compose up
| true
|
a6cafd09bfc1ce845fdb334614d7118ba6b32fea
|
Shell
|
facloud/todo
|
/hack/compile-proto
|
UTF-8
| 1,469
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# vim: ft=sh
set -e
ROOT_DIR=$(cd $(dirname $BASH_SOURCE)/..; pwd)
check_image() {
if ! docker images | grep protoc_image > /dev/null; then
$ROOT_DIR/hack/build/protoc/build
else
echo '[DEBUG] `protoc_image` Docker image was found.'
fi
}
get_rand() {
cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 4 | head -n 1
}
compile_go() {
rand=$(get_rand)
name="protoc-run-$rand"
docker run -it \
--name $name \
-v $ROOT_DIR/src/protos:/opt/todo/in \
protoc_image \
protoc \
--proto_path=/opt/todo/in \
--go_out=plugins=grpc:/opt/todo/out \
/opt/todo/in/todo.proto
temp_dir=$(mktemp -d)
docker cp $name:/opt/todo/out $temp_dir
docker rm $name > /dev/null
cp -rf $temp_dir/out/* $ROOT_DIR/src/database
rm -Rf $temp_dir
echo '[INFO] Go files were compiled!'
}
compile_php() {
rand=$(get_rand)
name="protoc-run-$rand"
docker run -it \
--name $name \
-v $ROOT_DIR/src/protos:/opt/todo/in \
protoc_image \
protoc \
--proto_path=/opt/todo/in \
--grpc_out=/opt/todo/out \
--php_out=/opt/todo/out \
--plugin=protoc-gen-grpc=/usr/local/bin/grpc_php_plugin \
/opt/todo/in/todo.proto
temp_dir=$(mktemp -d)
docker cp $name:/opt/todo/out $temp_dir
docker rm $name > /dev/null
cp -rf $temp_dir/out/* $ROOT_DIR/src/web
rm -Rf $temp_dir
echo '[INFO] PHP files were compiled!'
}
main() {
check_image
compile_go
compile_php
}
main
| true
|
48385292b9c213fbff7dd65a751ecb276e9d8343
|
Shell
|
jhaoheng/redmine
|
/worker/entrypoint.sh
|
UTF-8
| 549
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# set base path
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
# environment
env | grep HowManayTimeToBackup
if [ "$HowManayTimeToBackup" = "" ]; then
HowManayTimeToBackup="0 */4 * * *"
fi
# wait database is running
sleep 60s
# run first backup
bash $DIR/task.sh
# crontab
## generate crontab file
echo "$HowManayTimeToBackup /bin/bash $DIR/task.sh" > $DIR/cron.d/backup
chmod 644 $DIR/cron.d/backup
## add task to crontab
crontab $DIR/cron.d/backup
## start cron
/etc/init.d/cron start
# use tty
/bin/bash
| true
|
263ef01f7807d9325f13204b22f8ce7e996d38d4
|
Shell
|
iij/mruby
|
/test/posix/all.sh
|
UTF-8
| 553
| 3.609375
| 4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
D=`dirname $0`
result=""
retval=0
list=`cd ${D}; ls -1 *.sh | grep -v all.sh`
for t in $list; do
echo "###################################"
echo "# exec test/posix/${t}; start..."
sh -c "cd $D; sh $t"
ret=$?
retval=`expr $retval + $ret`
if [ $ret -ne 0 ]; then
result="${result} ${t}"
fi
echo
done
echo "###################################"
if [ $retval -eq 0 ]; then
echo "POSIX test result: OK"
else
echo "POSIX test result: NG"
echo " fail test is :${result}"
fi
echo "###################################"
exit $retval
| true
|
2fe556d031c48b6dd37e9e387f02b06beb85806e
|
Shell
|
mrgplolek/SystemyOperacyjne2
|
/lab2/zad1.sh
|
UTF-8
| 1,064
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash -eu
JEDYNECZKA=${1}
DWOJECZKA=${2}
echo -e "Wpisales ${JEDYNECZKA} oraz ${DWOJECZKA} \n" #gdy, któraś ze ścieżek wyrzuci błąd to tutaj się on pojawi
PLIKI_W_JEDYNECZCE=$(ls ${JEDYNECZKA})
for PLIK in $PLIKI_W_JEDYNECZCE
do
if [[ -d "${JEDYNECZKA}/${PLIK}" ]]; then #flaga -d czyli plik istnieje i jest to folder
if [[ -L "${JEDYNECZKA}/${PLIK}" ]]; then #tu jest sprawdzane czy to katalog czy dowiazanie
echo "${PLIK} to dowiazanie symboliczne (tzw. link)"
else
echo "${PLIK} to katalog"
P=${PLIK}
TEMP_NAME="${P%.*}_ln.${P##*.}"
ln -s "../${JEDYNECZKA}/${PLIK}" "${DWOJECZKA}/${TEMP_NAME}" #komenda ln tworzy dowiazanie o zadanej nazwie w odpowiednim miejscu
fi
else
if [[ -L "${JEDYNECZKA}/${PLIK}" ]]; then
echo "${PLIK} to dowiazanie symboliczne (tzw. link)"
else
echo "${PLIK} to plik regularny"
P=${PLIK}
TEMP_NAME="${P%.*}_ln.${P##*.}"
ln -s "../${JEDYNECZKA}/${PLIK}" "${DWOJECZKA}/${TEMP_NAME}"
fi
fi
done
| true
|
b1989943d1313289fb07f7a7e6018220010b93aa
|
Shell
|
lgsanjos/dot_files
|
/func/jump
|
UTF-8
| 1,738
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
JUMP_STORAGE="$HOME/.jumplist"
JUMP_STORAGE_TMP="/tmp/jumplisttmp"
JUMP_STORAGE_BOOKMARK="$HOME/.jumpbookmark"
touch "$JUMP_STORAGE"
__add_current_dir_to_jump () {
local dir="$(pwd -P)/"
local found="$(egrep -m 1 "$dir\$" "$JUMP_STORAGE")"
[ "$found" == "" ] && echo "0|$dir" >> "$JUMP_STORAGE"
}
j () {
local dir
case "$1" in
"--") shift; dir="$@" ;;
"-") pwd -P > "$JUMP_STORAGE_BOOKMARK" && return ;;
"") dir=$(cat "$JUMP_STORAGE_BOOKMARK") ;;
*) dir="$(grep -m 1 -i "${1}" "$JUMP_STORAGE")"
esac
if [ "$dir" != "" ]; then
cp "$JUMP_STORAGE" "$JUMP_STORAGE_TMP"
local path=$(echo "$dir" | cut -d\| -f2-)
awk -v path="$path" 'BEGIN { FS = "|" } {
if ($2 == path) print $1+1 "|" $2
else print $_
}' "$JUMP_STORAGE_TMP" | sort -nr | head -n 200 > "$JUMP_STORAGE"
eval "cd ${path// /\\ } && l"
fi
}
jl () {
echo "Jump history:"
column -t -s '|' "$JUMP_STORAGE"
echo -e "\nJump bookmark:"
cat "$JUMP_STORAGE_BOOKMARK"
}
jj () {
local dir=$(cut -d\| -f2- "$JUMP_STORAGE" | fzf)
j -- "$dir"
}
| true
|
ca61d7390f63525d9a11dcff5c6f1753ecf213e2
|
Shell
|
antonyfg/My_config_files
|
/.local/bin/XMYAY
|
UTF-8
| 954
| 2.828125
| 3
|
[
"WTFPL"
] |
permissive
|
#!/usr/bin/env zsh
# Author - Haron Prime
# License WTFPL http://www.wtfpl.net/
if [[ -z $(pgrep yay) ]]; then
urxvtc -name update -e yay -Syu &&
while [[ -n $(pgrep yay) ]]; do
sleep 1
done
# XMMWC &
UPD=`checkupdates | wc -l`
if [[ $UPD -eq 0 ]]; then
echo $UPD > /tmp/haron/ChU
echo "<fc=#0e0> System is up to date</fc>" > /tmp/haron/UP
sleep 5 &&
echo "<action=XMUpdateNew><fc=#0e0></fc></action>" > /tmp/haron/UP
else
if [[ $UPD -eq 1 ]]; then
PKG='package'
elif [[ $UPD -gt 1 ]]; then
PKG='packages'
fi
echo $UPD > /tmp/haron/ChU
echo "<action=XMYAY><fc=#fb0> $UPD $PKG can be updated</fc></action>" > /tmp/haron/UP
notify-send -u critical -i "/home/haron/.icons/nouveGnomeGray/24x24/status/pk-update-security.png" " <big><b><u>Update info</u></b></big>" " Update do not complete"
fi
fi
exit 0
| true
|
c3e89fefe2c035feb3fd031439215f6c2973bf41
|
Shell
|
Homebrew/ruby-macho
|
/test/src/make-inconsistent.sh
|
UTF-8
| 1,171
| 4.0625
| 4
|
[
"APSL-2.0",
"MIT",
"Apache-2.0",
"Ruby",
"NCSA",
"LLVM-exception"
] |
permissive
|
#!/usr/bin/env bash
main() {
local use_dir
for use_dir in "${@}"; do
# we only want to build libinconsistent.dylib as a fat mach-o
[[ "${use_dir}" = fat* ]] || continue
inconsistent_for "${use_dir}"
done
}
inconsistent_for() {
# splits a fat directory spec like fat-i386-x86_64 into
# its constituent arch(3) pairs (e.g., i386 and x86_64)
local fat_dir="${1}"
local split_fat_dir
IFS=- read -a split_fat_dir <<<"${fat_dir#fat-}"
# future versions of the test suite might have more than two architectures
# in a fat file, but we only care about the first two here
local arch1="${split_fat_dir[0]}"
local arch2="${split_fat_dir[1]}"
# order is arbitrary, as long as the libs chosen have different linkages
local lib1="${arch1}/libhello.dylib"
local lib2="${arch2}/libextrahello.dylib"
[[ -f "${lib1}" ]] || die "Missing file: ${lib1}. Did you run make?"
[[ -f "${lib2}" ]] || die "Missing file: ${lib2}. Did you run make?"
echo "[+] Creating libinconsistent.dylib for ${fat_dir}"
lipo -create "${lib1}" "${lib2}" -output "${fat_dir}/libinconsistent.dylib"
}
die() {
echo "Error: ${*}" >&2
exit 1
}
main "${@}"
| true
|
b306a717ba322047d062adead29e8417cd424504
|
Shell
|
JMichaelStringer/first
|
/AzureML-NeMo/setup_data.sh
|
UTF-8
| 1,763
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
clear
MYPWD=${PWD}
echo $MYPWD
DATA_DIR='DATA_DIR'
# rm -rf $DATA_DIR Use bash cleanse.sh instead
NER_DATA_DIR=$DATA_DIR/NER
GLUE_DATA_DIR=$DATA_DIR/glue_data
# pip install wget
python create_data.py --data_dir=$DATA_DIR
cd $DATA_DIR/
python download_glue_data.py
cd $MYPWD
mv $DATA_DIR/NER/devel.tsv $DATA_DIR/NER/dev.tsv
python DATA_DIR/import_from_iob_format.py --data_file=DATA_DIR/NER/train.tsv
python DATA_DIR/import_from_iob_format.py --data_file=DATA_DIR/NER/dev.tsv
python DATA_DIR/import_from_iob_format.py --data_file=DATA_DIR/NER/test.tsv
echo "----------- head NER/text_train.txt -----------"
head $NER_DATA_DIR/text_train.txt
echo "----------- head NER/labels_train.txt -----------"
head $NER_DATA_DIR/labels_train.txt
echo "----------- head CoLA/train.tsv -----------"
head $GLUE_DATA_DIR/CoLA/train.tsv
# let's first create a subset of our dev data
head -n 100 $NER_DATA_DIR/text_dev.txt > $NER_DATA_DIR/sample_text_dev.txt
head -n 100 $NER_DATA_DIR/labels_dev.txt > $NER_DATA_DIR/sample_labels_dev.txt
mv DATA_DIR/NCBI_corpus.zip DATA_DIR/NER/NCBI_corpus.zip
mv DATA_DIR/NCBI_corpus_development.txt DATA_DIR/NER/NCBI_corpus_development.txt
mv DATA_DIR/NCBI_corpus_testing.txt DATA_DIR/NER/NCBI_corpus_testing.txt
mv DATA_DIR/NCBI_corpus_training.txt DATA_DIR/NER/NCBI_corpus_training.txt
rm -rf mlruns
rm -rf wandb
echo "----------- files in DATA_DIR -----------"
ls -lh $DATA_DIR
echo "----------- files in GLUE_DATA_DIR -----------"
ls -lh $GLUE_DATA_DIR
echo "----------- files in NER_DATA_DIR -----------"
ls -lh $NER_DATA_DIR
git clone --branch v1.0.0b1 https://github.com/NVIDIA/NeMo.git
mv NeMo NeMo_v1.0.0b1
sleep 10
git clone https://github.com/NVIDIA/NeMo.git
echo "----------- Data Setup Done -----------"
| true
|
b2129510c4f7fedf4539f8e16123b58d6be63c66
|
Shell
|
manuel-freire/wp2-demo
|
/go.sh
|
UTF-8
| 10,296
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#######
# This file contains instructions to build the demo environment
# from source
#
# - as root,
# - within a bare Ubuntu 14.04 docker image
#
# 1. To launch into the bare image, use
# sudo docker run -p 3000:3000 -p 3350:3350 -p 3111:3111 -p 8080:8080 -it ubuntu:14.04 /bin/bash
#
# 2. Copy and paste this entire file into the prompt
#
# 3. Run one by one all the update functions except update_all
# (or run only update_all)
# This step requires downloading around 500M, and
# some pretty heavy compilation.
#
# 4. Optional. Save your work so steps 1-3 need not be repeated:
# - exit the image: execute 'exit'
# - save the image: execute 'sudo docker clone <id> <name>'
# (use 'sudo docker ps -a' to find its <id>)
# - re-start the image:
# sudo docker run -p 3000:3000 -p 3350:3350 -p 3111:3111 -p 8080:8080 -it <name> /bin/bash
#
# 5. Launch supporting servers
# - launch_redis && launch_mongo && launch_el
# - launch_zookeeper
# - launch_kafka
# - launch_storm
# - launch_openlrs
#
# 6. Launch WP2 servers, one by one
# - launch_openlrs
# - launch_test_users
# - launch_lrs
# - launch_gf
# - launch_emo
#
export MAVEN_VERSION="3.3.3"
export NODE_NUM_VERSION="v0.12.7"
export NODE_VERSION="node-v0.12.7-linux-x64"
export REDIS_VERSION="redis-3.0.4"
export EL_VERSION="elasticsearch-1.7.1"
export STORM_VERSION="apache-storm-0.9.5"
export ZOOKEEPER_VERSION="zookeeper-3.4.6"
export KAFKA_NUM_VERSION="0.8.2.1"
export KAFKA_VERSION="kafka_2.10-0.8.2.1"
export PATH_TO_GLEANER_REALTIME_JAR="/opt/gleaner-realtime/target/realtime-jar-with-dependencies.jar"
export PATH_TO_L_I_SPACE_WEBAPP="/opt/lostinspace/html/target/webapp"
# used to download sources, executables
function update_tools {
apt-get update && apt-get install -y nano git wget gcc g++ make openjdk-7-jdk
cd /opt
wget http://apache.rediris.es/maven/maven-3/${MAVEN_VERSION}/binaries/apache-maven-${MAVEN_VERSION}-bin.tar.gz
tar -xvzf apache-maven-${MAVEN_VERSION}-bin.tar.gz
cd /
ln -sf /opt/apache-maven-${MAVEN_VERSION}/bin/mvn /usr/local/bin
}
function update_with_git {
cd /opt
git clone https://github.com/$1/$2
sleep 1s
cd $2
git fetch origin $3
git pull origin $3
sleep 1s
}
function update_node {
cd /tmp
wget https://nodejs.org/dist/${NODE_NUM_VERSION}/${NODE_VERSION}.tar.gz
cd /opt
tar -xvzf /tmp/${NODE_VERSION}.tar.gz
cd /
ln -sf /opt/${NODE_VERSION}/bin/* /usr/local/bin
npm install -g bower
}
function scriptify { # name dir commands...
TARGET=/opt/${1}.sh
shift
cd /opt
echo "#! /bin/bash" > $TARGET
echo cd $1 >> $TARGET
shift
echo "$@" >> $TARGET
cd /opt
chmod 0755 $TARGET
}
function update_mongo {
# mongo via apt; see http://docs.mongodb.org/master/tutorial/install-mongodb-on-ubuntu/
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10
echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/3.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-3.0.list
apt-get update
apt-get install -y mongodb-org
}
function update_redis {
cd /opt
wget http://download.redis.io/releases/${REDIS_VERSION}.tar.gz
tar xvzf ${REDIS_VERSION}.tar.gz
cd ${REDIS_VERSION}
make
ln -sf /opt/${REDIS_VERSION}/src/redis-server /usr/local/bin
}
function update_el {
cd /opt
wget https://download.elastic.co/elasticsearch/elasticsearch/${EL_VERSION}.deb
dpkg -i ${EL_VERSION}.deb
}
function update_storm {
cd /opt
wget http://apache.rediris.es/storm/${STORM_VERSION}/${STORM_VERSION}.tar.gz
tar -xvzf ${STORM_VERSION}.tar.gz
cd ${STORM_VERSION}/conf
echo "ui.port: 8081" >> storm.yaml
cd /
ln -sf /opt/${STORM_VERSION}/bin/storm /usr/local/bin
}
function update_zookeeper {
cd /opt
wget http://apache.rediris.es/zookeeper/${ZOOKEEPER_VERSION}/${ZOOKEEPER_VERSION}.tar.gz
tar -xvzf ${ZOOKEEPER_VERSION}.tar.gz
cd /
ln -sf /opt/${ZOOKEEPER_VERSION}/bin/zk*.sh /usr/local/bin
cd /opt/${ZOOKEEPER_VERSION}/conf/
cp zoo_sample.cfg zoo.cfg
}
function update_kafka {
cd /opt
wget http://apache.rediris.es/kafka/${KAFKA_NUM_VERSION}/${KAFKA_VERSION}.tgz
tar -xvzf ${KAFKA_VERSION}.tgz
cd /
ln -sf /opt/${KAFKA_VERSION}/bin/*.sh /usr/local/bin
}
function update_gleaner_realtime { # updates .m2 cache
update_with_git RotaruDan gleaner-realtime toledo-09-15
cd /opt/gleaner-realtime
mvn clean install
}
function update_openlrs {
update_with_git RotaruDan OpenLRS toledo-09-15
}
# updates .m2 cache; SLOW
function update_lostinspace {
update_with_git RotaruDan lostinspace toledo-09-15
update_with_git e-ucm xmltools master
cd /opt/xmltools
mvn clean install
cd /opt/lostinspace
mvn clean install -Phtml
}
function update_test_users {
update_with_git RotaruDan test-users toledo-09-15
npm install
npm run fast-setup
npm run gen-apidoc
# npm test # requires redis, mongo running
scriptify test-users test-users npm start
}
# depends: gleaner-realtime
function update_lrs {
update_with_git RotaruDan lrs toledo-09-15
cd /opt/lrs
echo "exports.defaultValues.realtimeJar='${PATH_TO_GLEANER_REALTIME_JAR}';" >> config-values.js
echo "exports.defaultValues.stormPath='/opt/${STORM_VERSION}/bin';" >> config-values.js
npm install
npm run fast-setup
npm run gen-apidoc
# npm test # requires redis, mongo running
scriptify lrs lrs npm start
}
# depends: lost-in-space
function update_gf {
update_with_git gorco gf toledo-09-15
cd /opt/gf
npm install
bower --allow-root install
npm run fast-setup
mkdir app
mkdir app/public
rm -rf app/public/lostinspace
cp -r ${PATH_TO_L_I_SPACE_WEBAPP} app/public/lostinspace
cd app/public/
wget https://dl.dropboxusercontent.com/u/3300634/inboxed.tar.gz
tar -xvzf inboxed.tar.gz
mv webapp inboxed
scriptify gf gf npm start
}
# front and back-ends for emotions
function update_emo {
update_with_git gorco emoB master
cd /opt/emoB
npm install
scriptify emoB emoB npm start
update_with_git gorco emoF master
cd /opt/emoF
npm install
scriptify emoF emoF npm start
}
function update_all {
update_tools
update_node
update_mongo
update_redis
update_el
update_storm
update_zookeeper
update_kafka
update_gleaner_realtime
update_openlrs
update_lostinspace
update_test_users
update_lrs
update_gf
update_emo
}
function get_pids { # $! is broken in docker
ps -Af | grep $1 \
| tr -s " " "|" | cut -d "|" -f 2 | head -n -1 \
| xargs
}
function launch_redis {
PIDFILE="/opt/redis.pid"
LOGFILE="/opt/redis.log"
kill $(cat ${PIDFILE})
# in warning shown when launched otherwise
echo never > /sys/kernel/mm/transparent_hugepage/enabled
(redis-server > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids redis)
echo -n $PIDS > $PIDFILE
echo "Launched redis: $PIDS"
cd /opt
}
function launch_mongo {
PIDFILE="/opt/mongo.pid"
LOGFILE="/opt/mongo.log"
kill $(cat ${PIDFILE})
mkdir /opt/mongoDB
(mongod --dbpath /opt/mongoDB > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids mongod)
echo -n $PIDS > $PIDFILE
echo "Launched mongo: $PIDS"
cd /opt
}
function launch_el {
/etc/init.d/elasticsearch restart
echo "Launched ElasticSearch (via init.d)"
}
function launch_kafka {
PIDFILE="/opt/kafka.pid"
LOGFILE="/opt/kafka.log"
kill $(cat ${PIDFILE})
cd /opt/${KAFKA_VERSION}
(bin/kafka-server-start.sh config/server.properties > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids kafka_2)
echo -n $PIDS > $PIDFILE
echo "Launched kafka: $PIDS"
cd /opt
}
function launch_zookeeper {
PIDFILE="/opt/zookeeper.pid"
LOGFILE="/opt/zookeeper.log"
kill $(cat ${PIDFILE})
cd /opt/${ZOOKEEPER_VERSION}/bin
(./zkServer.sh start > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids zookeeper)
echo -n $PIDS > $PIDFILE
echo "Launched zookeeper: $PIDS"
cd /opt
}
function launch_storm {
PIDFILE="/opt/storm.pid"
kill $(cat ${PIDFILE})
LOGFILE="/opt/storm_nimbus.log"
(storm nimbus > ${LOGFILE} 2>&1 & )
PIDS=$(get_pids nimbus)
echo -n "$PIDS " > $PIDFILE
sleep 2s
LOGFILE="/opt/storm_supervisor.log"
(storm supervisor > ${LOGFILE} 2>&1 & )
PIDS=$(get_pids supervisor)
echo -n "$PIDS " >> $PIDFILE
sleep 2s
LOGFILE="/opt/storm_ui.log"
(storm ui > ${LOGFILE} 2>&1 & )
PIDS=$(get_pids .ui)
echo -n "$PIDS " >> $PIDFILE
sleep 2s
echo "Launched storm: $PIDS"
cd /opt
}
function launch_openlrs {
PIDFILE="/opt/openlrs.pid"
LOGFILE="/opt/openlrs.log"
kill $(cat ${PIDFILE})
cd /opt/OpenLRS
chmod 0755 run.sh
echo "Warning - this takes a long time to start (~1m)"
(./run.sh > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids openlrs)
echo -n $PIDS > $PIDFILE
echo "Launched OpenLRS: $PIDS"
cd /opt
}
function launch_node {
PIDFILE="/opt/$1.pid"
LOGFILE="/opt/$1.log"
kill $(cat ${PIDFILE})
(./$1.sh > ${LOGFILE} 2>&1 & )
sleep 4s
PIDS=$(get_pids $1.sh)
echo -n $PIDS > $PIDFILE
echo "Launched $1 via Node: $PIDS"
cd /opt
}
function launch_test_users {
launch_node test-users
}
function launch_lrs {
launch_node lrs
}
function launch_gf {
launch_node gf
}
function launch_emo {
launch_node emoB
launch_node emoF
}
# WARNING - this is for reference; do not execute directly
# as services take a while to start, and some require others
# to be running to start properly
function launch_all {
launch_zookeeper #
launch_redis
launch_mongo # 27017
launch_el
launch_storm # 8081 + internal
launch_kafka
launch_openlrs # 8080
launch_test_users # 3000 ; also :3000/api
launch_lrs # 3300 ;
launch_gf # 3350
launch_emo # 3111 (frontend); 3232 (be)
}
function log {
tail -n 100 -f $1
}
| true
|
1c1a6704491c577b0fadb9d8cc00ee2566fa48c1
|
Shell
|
Cloudxtreme/activity.sh
|
/activity.sh
|
UTF-8
| 26,985
| 3.796875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Author: Arijit Basu
# Email: sayanarijit@gmail.com
# Documentation: https://sayanarijit.github.io/activity.sh
# set -x # For debugging
# Menu with function declarations ----------------------------------------------
declare -A MENU
MENU[a]="ping-check"
MENU[b]="ssh-check"
MENU[c]="console-check"
MENU[d]="config-check"
MENU[e]="execute-command"
MENU[f]="login-check"
MENU[g]="health-check"
MENU[h]="mount-check"
MENU[i]="port-scan"
MENU[j]="os-check"
MENU[w]="publish-unpublish"
MENU[x]="remove-this-activity"
MENU[y]="rename-this-activity"
MENU[z]="exit"
# Initialize global variables --------------------------------------------------
REPORT_DIR="$HOME/activity-reports"
# ACTIVITY_NAME
if [ -d "$REPORT_DIR" ] && [ "$(ls $REPORT_DIR)" ]; then
echo "Previous activities"
echo "───────────────────"
ls -t -1 "$REPORT_DIR"
echo
read -p "Enter activity name to continue or leave blank to start fresh : " ACTIVITY_NAME
echo
[ "$ACTIVITY_NAME" ] && [ ! -d "$REPORT_DIR/$ACTIVITY_NAME" ] && echo "Not found !" && exit 1
[ ! "$ACTIVITY_NAME" ] && ACTIVITY_NAME=$(date +%d-%h-%y_%Hh%Mm%Ss)
else
ACTIVITY_NAME=$(date +%d-%h-%y_%Hh%Mm%Ss)
fi
# Paths
ACTIVITY_DIR="$REPORT_DIR/$ACTIVITY_NAME"
BASIC_REPORT_DIR="$ACTIVITY_DIR/basic_report" # Files under it contains hostnames only
PING_CHECK_DIR="$BASIC_REPORT_DIR/ping_check"
SSH_CHECK_DIR="$BASIC_REPORT_DIR/ssh_check"
CONSOLE_CHECK_DIR="$BASIC_REPORT_DIR/console_check"
LOGIN_CHECK_DIR="$BASIC_REPORT_DIR/login_check"
HEALTH_CHECK_DIR="$BASIC_REPORT_DIR/health_check"
MOUNT_CHECK_DIR="$BASIC_REPORT_DIR/mount_check"
PORT_SCAN_DIR="$BASIC_REPORT_DIR/port_scan"
OS_CHECK_DIR="$BASIC_REPORT_DIR/os_check"
ADVANCE_REPORT_DIR="$ACTIVITY_DIR/advance_report" # Files/directories under it contains outputs
EXECUTE_COMMAND_DIR="$ADVANCE_REPORT_DIR/execute_command"
CONFIG_CHECK_DIR="$ADVANCE_REPORT_DIR/config_check"
SET_SSH_KEY_SCRIPT="" # Will run if 1st ssh attempt fails
WEBSITE_PATH="/var/www/html/activity-reports" # To publish reports in website
WEBPAGE_FILE="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/activity.php" # This is the home page for website
# Timeouts
SSH_TIMEOUT=10
SET_SSH_KEY_TIMEOUT=60
EXECUTE_COMMAND_TIMEOUT=60
# Servers
WEBSERVER="localhost" # Will be used to publish reports
REFERENCE_SERVER="localhost" # Will be used to varify ssh passwords
# unix PASSWORD
if [ "$REFERENCE_SERVER" ]; then
while [ ! "$PASSWORD" ]; do
read -sp "Enter unix password : " PASSWORD && echo
done
else
PASSWORD="dummy"
fi
# Other variables
MAX_BACKGROUND_PROCESS=5000 # Maximum no. of background process to run simultaneously
HR=$(for ((i=0;i<$(tput cols);i++));do echo -en "─";done;echo)
# Custom functions (can be edited)----------------------------------------------
# Single action functions (executes one host at a time)
generate-ping-report ()
{
if ping -c1 -w5 $1 &>/dev/null; then
echo $1 >> "$PING_CHECK_DIR/available_hosts"
else
echo $1 >> "$PING_CHECK_DIR/unavailable_hosts"
fi
return 0
}
generate-ssh-report ()
{
sudo ssh-keygen -R $1 &>/dev/null
ssh-keygen -R $1 &>/dev/null
# Check if port 22 is open
if nc -z $1 22 &>/dev/null;then
echo $1 >> "$SSH_CHECK_DIR/port_22_open"
else
echo $1 >> "$SSH_CHECK_DIR/port_22_closed"
echo $1 >> "$SSH_CHECK_DIR/ssh_unreachable_hosts"
return 1
fi
# Try 1 : Try login with root
start=$(date +%s)
connection=$(timeout -s9 $SSH_TIMEOUT sudo ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1 "echo connected" 2>/dev/null) &>/dev/null
end=$(date +%s)
if [ "$connection" == "connected" ];then
echo $1 >> "$SSH_CHECK_DIR/ssh_reachable_hosts"
echo $1 >> "$SSH_CHECK_DIR/ssh_with_root_login"
if (( $end-$start <= 5 )); then
echo $1 >> "$SSH_CHECK_DIR/ssh_time_within_5_sec"
else
echo $1 >> "$SSH_CHECK_DIR/ssh_time_above_5_sec"
fi
return 0
fi
# Try 2 : Set passwordless key and try login with root
if [ -f "$SET_SSH_KEY_SCRIPT" ];then
temp=$(timeout -s9 $SET_SSH_KEY_TIMEOUT sudo $SET_SSH_KEY_SCRIPT $1 &>/dev/null) &>/dev/null
start=$(date +%s)
connection=$(timeout -s9 $SSH_TIMEOUT sudo ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1 "echo connected" 2>/dev/null) &>/dev/null
end=$(date +%s)
if [ "$connection" == "connected" ];then
echo $1 >> "$SSH_CHECK_DIR/ssh_reachable_hosts"
echo $1 >> "$SSH_CHECK_DIR/ssh_with_root_login"
if (( $end-$start <= 5 )); then
echo $1 >> "$SSH_CHECK_DIR/ssh_time_within_5_sec"
else
echo $1 >> "$SSH_CHECK_DIR/ssh_time_above_5_sec"
fi
return 0
fi
fi
# Try 3 : Login with unix account
start=$(date +%s)
connection=$(timeout -s9 $SSH_TIMEOUT sshpass -p "$PASSWORD" ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1 "echo connected" 2>/dev/null) &>/dev/null
end=$(date +%s)
if [ "$connection" == "connected" ];then
echo $1 >> "$SSH_CHECK_DIR/ssh_reachable_hosts"
echo $1 >> "$SSH_CHECK_DIR/ssh_root_login_not_possible"
if (( $end-$start <= 5 )); then
echo $1 >> "$SSH_CHECK_DIR/ssh_time_within_5_sec"
else
echo $1 >> "$SSH_CHECK_DIR/ssh_time_above_5_sec"
fi
return 0
fi
# If everything fails
echo $1 >> "$SSH_CHECK_DIR/ssh_unreachable_hosts"
return 1
}
generate-execute-command-report ()
{
hosts=()
file="$SSH_CHECK_DIR/ssh_with_root_login"
[ -f "$file" ] && hosts=( $(cat "$file") )
if in-array $1 ${hosts[*]}; then
ssh_string="sudo ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1"
else
ssh_string="sshpass -p "$PASSWORD" ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1"
fi
$ssh_string "$2" > "$3/output/$1" 2> "$3/error/$1"
[ "$(cat $3/output/$1 2>/dev/null)" ] || rm -f "$3/output/$1"
[ "$(cat $3/error/$1 2>/dev/null)" ] || rm -f "$3/error/$1"
return 0
tmp=$(timeout -s9 $EXECUTE_COMMAND_TIMEOUT $ssh_string "$2" > $3/output/$1 2> $3/error/$1) &>/dev/null
}
generate-console-report ()
{
cons="ilo con imm ilom alom xscf power"
for con in $cons;do
fqdn=""
ping -c1 -w1 "$1-$con" &>/dev/null && \
fqdn=$(nslookup "$1-$con"|grep -i "$1-$con"|grep -v NXDOMAIN|awk '{ if (/Name:/) {print $2} else if (/canonical name/) {print $1} else {print $0} }') && \
echo "$1 $fqdn" >> "$CONSOLE_CHECK_DIR/console_available" && break
done
[ ! "$fqdn" ] && echo $1 >> "$CONSOLE_CHECK_DIR/console_not_available"
return 0
}
generate-login-report ()
{
hosts=()
file="$SSH_CHECK_DIR/ssh_with_root_login"
[ -f "$file" ] && hosts=( $(cat "$file") )
if in-array $1 ${hosts[*]}; then
if [ "$2" ]; then
user=$2
else
user=$(sudo ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1 \
"last|grep pts|grep -v root|tail -1"|awk '{print $1}')
fi
[ ! "$user" ] && echo $1 >> "$LOGIN_CHECK_DIR/no_user_found" && return 0
id=$(sudo ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1 "id $user" 2>/dev/null) 2>/dev/null
home=$(timeout -s9 $SSH_TIMEOUT sudo ssh -q -o ConnectTimeout=3 -o \
StrictHostKeyChecking=no $1 "su $user -c 'cd && pwd'" 2>/dev/null) 2>/dev/null
if [ "$id" ]; then
echo $1 >> "$LOGIN_CHECK_DIR/user_id_exists"
else
echo $1 >> "$LOGIN_CHECK_DIR/user_id_missing"
fi
if [ "$home" ]; then
echo $1 >> "$LOGIN_CHECK_DIR/home_dir_exists"
else
echo $1 >> "$LOGIN_CHECK_DIR/home_dir_missing"
fi
if [ "$id" ]&&[ "$home" ];then
echo $1 >> "$LOGIN_CHECK_DIR/login_successful"
else
echo $1 >> "$LOGIN_CHECK_DIR/login_failed"
fi
else
echo $1 >> "$LOGIN_CHECK_DIR/ssh_root_login_not_possible"
fi
return 0
}
generate-health-report ()
{
hosts=()
file="$SSH_CHECK_DIR/ssh_with_root_login"
[ -f "$file" ] && hosts=( $(cat "$file") )
if in-array $1 ${hosts[*]}; then
ssh_string="sudo ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1"
else
ssh_string="sshpass -p "$PASSWORD" ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1"
fi
cpu_usage=$($ssh_string "uptime"|awk '{print $NF*100}' 2>/dev/null)
[ "$cpu_usage" ]||cpu_usage=0
active_sessions=$($ssh_string "who"|wc -l 2>/dev/null)
disk_full=$($ssh_string "df -l"|grep "^/dev/"|grep -e '9[5-9]%\|100%'|awk '{print $NF}' 2>/dev/null)
uptime=$($ssh_string "uptime"|grep -i days|cut -dd -f1|awk '{print $NF}' 2>/dev/null)
[ "$uptime" ]||uptime=0
# ram_usage=$($ssh_string "free"|grep -i mem|awk '{print $3*100/$2}'|cut -d. -f1 2>/dev/null)
# [ "$ram_usage" ]||ram_usage=0
# if [ "$disk_full" ]||[ "$cpu_usage" -ge 70 ]||[ "$ram_usage" -ge 70 ]||[ "$uptime" -ge 200 ]||[ "$active_sessions" -ge 20 ]; then
if [ "$disk_full" ]||[ "$cpu_usage" -ge 70 ]||[ "$uptime" -ge 200 ]||[ "$active_sessions" -ge 20 ]; then
[ "$cpu_usage" -ge 70 ] && echo $1 >> "$HEALTH_CHECK_DIR/cpu_usage_above_70%"
[ "$cpu_usage" -ge 80 ] && echo $1 >> "$HEALTH_CHECK_DIR/cpu_usage_above_80%"
[ "$cpu_usage" -ge 90 ] && echo $1 >> "$HEALTH_CHECK_DIR/cpu_usage_above_90%"
# [ "$ram_usage" -ge 70 ] && echo $1 >> "$HEALTH_CHECK_DIR/ram_usage_above_70%"
# [ "$ram_usage" -ge 80 ] && echo $1 >> "$HEALTH_CHECK_DIR/ram_usage_above_80%"
# [ "$ram_usage" -ge 90 ] && echo $1 >> "$HEALTH_CHECK_DIR/ram_usage_above_90%"
[ "$uptime" -ge 200 ] && echo $1 >> "$HEALTH_CHECK_DIR/uptime_above_200_days"
[ "$uptime" -ge 350 ] && echo $1 >> "$HEALTH_CHECK_DIR/uptime_above_350_days"
[ "$uptime" -ge 500 ] && echo $1 >> "$HEALTH_CHECK_DIR/uptime_above_500_days"
[ "$active_sessions" -ge 20 ] && echo $1 >> "$HEALTH_CHECK_DIR/active_sessions_above_20"
[ "$active_sessions" -ge 35 ] && echo $1 >> "$HEALTH_CHECK_DIR/active_sessions_above_35"
[ "$active_sessions" -ge 50 ] && echo $1 >> "$HEALTH_CHECK_DIR/active_sessions_above_50"
for d in $disk_full;do
echo $1 >> "$HEALTH_CHECK_DIR/disk_usage_above_95%_:_$(echo $d|sed 's/\//\⁄/g')"
done
echo $1 >> "$HEALTH_CHECK_DIR/unhealthy_hosts"
else
echo $1 >> "$HEALTH_CHECK_DIR/healthy_hosts"
fi
return 0
}
generate-mount-report ()
{
host=$1 && shift
file="$SSH_CHECK_DIR/ssh_with_root_login"
[ -f "$file" ] && hosts=( $(cat "$file") )
if in-array $host ${hosts[*]}; then
ssh_string="sudo ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $host"
login_user="root"
else
ssh_string="sshpass -p "$PASSWORD" ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $host"
login_user=$USER
fi
for v; do
mounted=$(timeout -s9 $SSH_TIMEOUT $ssh_string "df $v|tail -1" 2>/dev/null) 2>/dev/null
report_file=$(echo "$v"|sed 's/\//\⁄/g')
if [ "$mounted" ]; then
echo $host >> "$MOUNT_CHECK_DIR/mounted_:_$report_file"
used=$(echo "$mounted"|grep -o "...%"|awk '{print $1}'|cut -d% -f1)
[ "$used" ]&&[ $used -ge 95 ] && echo $host >> "$MOUNT_CHECK_DIR/volume_usage_above_95%_:_$report_file"
owner=$(timeout -s9 $SSH_TIMEOUT $ssh_string "ls -ld $v"|awk '{print $3}' 2>/dev/null) 2>/dev/null
if [ "$login_user" == "root" ]; then
read_only=$(timeout -s9 $SSH_TIMEOUT $ssh_string "su $owner -s /bin/sh -c \
'touch $v/mount_test || echo read_only'" 2>/dev/null) 2>/dev/null
[ "$read_only" ] && echo $host >> "$MOUNT_CHECK_DIR/read_only_mount_:_$report_file"
fi
else
echo $host >> "$MOUNT_CHECK_DIR/not_mounted_:_$report_file"
fi
done
return 0
}
generate-port-scan-report ()
{
if nc -z $1 $2 &>/dev/null; then
echo $1 >> "$PORT_SCAN_DIR/port_"$2"_open"
else
echo $1 >> "$PORT_SCAN_DIR/port_"$2"_closed"
fi
return 0
}
generate-os-report ()
{
hosts=()
file="$SSH_CHECK_DIR/ssh_with_root_login"
[ -f "$file" ] && hosts=( $(cat "$file") )
if in-array $1 ${hosts[*]}; then
ssh_string="sudo ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1"
else
ssh_string="sshpass -p "$PASSWORD" ssh -q -o ConnectTimeout=3 -o StrictHostKeyChecking=no $1"
fi
unm=$(timeout -s9 $SSH_TIMEOUT $ssh_string "uname -smr;python -c 'import platform; print(\"_\".join(platform.dist()).replace(\" \",\"_\").replace(\"/\",\"_\"));'" 2>/dev/null) 2>/dev/null
kernel=$(echo $unm|awk '{print $1}')
release=$(echo $unm|awk '{print $2}')
arch=$(echo $unm|awk '{print $3}')
[ "$arch" ] && echo $1 >> "$OS_CHECK_DIR/$arch"
if [ "$kernel" == "Linux" ];then
echo $1 >> "$OS_CHECK_DIR/$kernel"
linux_distro=$(echo -e "$unm"|head -2|tail -1) 2>/dev/null
if [ "$linux_distro" ]&&[ "$linux_distro" != "$unm" ];then
echo $1 >> "$OS_CHECK_DIR/$linux_distro"
else
echo $1 >> "$OS_CHECK_DIR/unknown_linux"
fi
elif [ "$kernel" ];then
echo $1 >> "$OS_CHECK_DIR/$kernel"
if [ "$release" ];then
echo $1 >> "$OS_CHECK_DIR/$kernel"_"$release"
else
echo $1 >> "$OS_CHECK_DIR/unknown_unix"
fi
elif [ ! "$kernel" ];then
echo $1 >> "$OS_CHECK_DIR/unknown_kernel"
fi
return 0
}
# Looper functions (reads input and calls single action functions in loop)
ping-check ()
{
[ -d "$PING_CHECK_DIR" ] && rm -rf "$PING_CHECK_DIR"
mkdir -p "$PING_CHECK_DIR" || exit 1
echo "Paste targets below and press 'CTRL+D'"
echo "──────────────────────────────────────"
readarray targets
echo ${targets[*]}|tr " " "\n"|sort|uniq > $PING_CHECK_DIR/all_hosts
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
i=0
c=${#targets[*]}
for t in ${targets[*]}; do
i=$(($i+1))
echo -en " Generating ping check report... ($i/$c) \r"
generate-ping-report $t &
[ $(jobs|wc -l) -ge 10 ] && wait # 10 parallel process is enough for ping check [to avoide write error]
done
wait
echo " "
}
ssh-check ()
{
[ -f "$PING_CHECK_DIR/available_hosts" ] || ping-check
[ -d "$SSH_CHECK_DIR" ] && rm -rf "$SSH_CHECK_DIR"
mkdir -p "$SSH_CHECK_DIR" || exit 1
targets=( $(cat "$PING_CHECK_DIR/available_hosts") )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
sudo ssh 2>/dev/null
i=0
c=${#targets[*]}
for t in ${targets[*]}; do
sudo ssh 2>/dev/null
i=$(($i+1))
echo -en " Generating ssh check report... ($i/$c) \r"
generate-ssh-report $t &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
wait
echo " "
}
execute-command ()
{
[ -f "$SSH_CHECK_DIR/ssh_reachable_hosts" ] || ssh-check
dir="$EXECUTE_COMMAND_DIR/$(date +%s)"
mkdir -p "$dir/output" || exit 1
mkdir -p "$dir/error" || exit 1
targets=( $(cat "$SSH_CHECK_DIR/ssh_reachable_hosts") )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
read -p "Enter command to run on reachable servers : " command_to_run
[ ! "$command_to_run" ] && echo "No command to run !" && exit 1
echo "$command_to_run" > "$dir/name" || exit 1
sudo ssh 2>/dev/null
c=${#targets[*]}
i=0
for t in ${targets[*]}; do
sudo ssh 2>/dev/null
i=$(($i+1))
echo -en " Generating command output report... ($i/$c) \r"
generate-execute-command-report $t "$command_to_run" "$dir" &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
wait
echo " "
echo "Find the report inside directory- $dir"
echo "or publish this activity report to access it in browser."
echo
read -sp "[press ENTER to continue]"
}
console-check ()
{
[ -f "$PING_CHECK_DIR/all_hosts" ] || ping-check
[ -d "$CONSOLE_CHECK_DIR" ] && rm -rf "$CONSOLE_CHECK_DIR"
mkdir -p "$CONSOLE_CHECK_DIR" || exit 1
targets=( $(cat "$PING_CHECK_DIR/all_hosts") )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
i=0
c=${#targets[*]}
for t in ${targets[*]}; do
i=$(($i+1))
echo -en " Generating console check report... ($i/$c) \r"
generate-console-report $t &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
wait
echo " "
}
config-check ()
{
files_to_check=( "/etc/fstab" "/etc/mtab" "/etc/network/interfaces" \
"/etc/nsswitch.conf" "/etc/yp.conf" "/etc/ssh/sshd_config" \
"/etc/puppet.conf" "/etc/sudoers" )
command_to_run="echo OS Arch;echo =============================;uname -a;echo;echo;"
command_to_run=$command_to_run"echo Linux distro;echo =============================;lsb_release -a 2>/dev/null;echo;echo;"
command_to_run=$command_to_run"echo Uptime;echo =============================;uptime;echo;echo;"
command_to_run=$command_to_run"echo Network;echo =============================;ifconfig -a;echo;echo;"
command_to_run=$command_to_run"echo Gateway;echo =============================;netstat -nr;echo;echo;"
for f in ${files_to_check[*]}; do
command_to_run=$command_to_run"[ -f $f ] && echo $f && echo ============================= && cat $f && echo && echo;"
done
[ -f "$SSH_CHECK_DIR/ssh_reachable_hosts" ] || ssh-check
dir="$CONFIG_CHECK_DIR/$(date +%s)"
mkdir -p "$dir/output" || exit 1
mkdir -p "$dir/error" || exit 1
targets=( $(cat "$SSH_CHECK_DIR/ssh_reachable_hosts" 2>/dev/null) )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
echo $(date +%d-%h-%y" "%H:%M) > "$dir/name" || exit 1
sudo ssh 2>/dev/null
c=${#targets[*]}
i=0
for t in ${targets[*]}; do
sudo ssh 2>/dev/null
i=$(($i+1))
echo -en " Generating configuration check report... ($i/$c) \r"
generate-execute-command-report $t "$command_to_run" "$dir" &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
wait
echo " "
echo "Find the report inside directory- $dir"
echo "or publish this activity report to access it in browser."
echo
read -sp "[press ENTER to continue]"
}
login-check ()
{
[ -f "$SSH_CHECK_DIR/ssh_reachable_hosts" ] || ssh-check
[ -d "$LOGIN_CHECK_DIR" ] && rm -rf "$LOGIN_CHECK_DIR"
mkdir -p "$LOGIN_CHECK_DIR" || exit 1
targets=( $(cat "$SSH_CHECK_DIR/ssh_reachable_hosts") )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
read -p "Enter username to check or leave blank for last active user : " user
sudo ssh 2>/dev/null
c=${#targets[*]}
i=0
for t in ${targets[*]}; do
sudo ssh 2>/dev/null
i=$(($i+1))
echo -en " Generating login check report... ($i/$c) \r"
generate-login-report $t $user &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
wait
echo " "
}
health-check ()
{
[ -f "$SSH_CHECK_DIR/ssh_reachable_hosts" ] || ssh-check
[ -d "$HEALTH_CHECK_DIR" ] && rm -rf "$HEALTH_CHECK_DIR"
mkdir -p "$HEALTH_CHECK_DIR" || exit 1
targets=( $(cat "$SSH_CHECK_DIR/ssh_reachable_hosts") )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
sudo ssh 2>/dev/null
c=${#targets[*]}
i=0
for t in ${targets[*]}; do
sudo ssh 2>/dev/null
i=$(($i+1))
echo -en " Generating health check report... ($i/$c) \r"
generate-health-report $t &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
wait
echo " "
}
mount-check ()
{
[ -f "$SSH_CHECK_DIR/ssh_reachable_hosts" ] || ssh-check
[ -d "$MOUNT_CHECK_DIR" ] && rm -rf "$MOUNT_CHECK_DIR"
mkdir -p "$MOUNT_CHECK_DIR" || exit 1
targets=( $(cat "$SSH_CHECK_DIR/ssh_reachable_hosts") )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
echo "Enter mount points to check and press 'CTRL+D'"
echo "─────────────────────────────────────────────"
readarray mounts
echo
[ ! "${mounts}" ] && return 1
sudo ssh 2>/dev/null
c=${#targets[*]}
i=0
for t in ${targets[*]}; do
sudo ssh 2>/dev/null
i=$(($i+1))
echo -en " Generating mount check report... ($i/$c) \r"
generate-mount-report $t ${mounts[*]} &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
wait
echo " "
}
os-check ()
{
[ -f "$SSH_CHECK_DIR/ssh_reachable_hosts" ] || ssh-check
[ -d "$OS_CHECK_DIR" ] && rm -rf "$OS_CHECK_DIR"
mkdir -p "$OS_CHECK_DIR" || exit 1
targets=( $(cat "$SSH_CHECK_DIR/ssh_reachable_hosts") )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
sudo ssh 2>/dev/null
c=${#targets[*]}
i=0
for t in ${targets[*]}; do
sudo ssh 2>/dev/null
i=$(($i+1))
echo -en " Generating os check report... ($i/$c) \r"
generate-os-report $t &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
wait
echo " "
}
port-scan ()
{
[ -f "$PING_CHECK_DIR/available_hosts" ] || ping-check
[ -d "$PORT_SCAN_DIR" ] && rm -rf "$PORT_SCAN_DIR"
mkdir -p "$PORT_SCAN_DIR" || exit 1
targets=( $(cat "$PING_CHECK_DIR/available_hosts") )
echo
[ ! "${targets}" ] && echo "No target found..." && exit 1
echo "Enter port numbers to scan and press 'CTRL+D'"
echo "─────────────────────────────────────────────"
readarray ports
echo
[ ! "${ports}" ] && return 1
i=0
c=${#targets[*]}
for t in ${targets[*]}; do
i=$(($i+1))
echo -en " Generating port scan report... ($i/$c) \r"
for p in ${ports[*]};do
generate-port-scan-report $t $p &
[ $(jobs|wc -l) -ge $MAX_BACKGROUND_PROCESS ] && wait
done
done
wait
echo " "
}
# Core functions (do not edit) -------------------------------------------------
in-array ()
{
x=$1 && shift
for e; do
[[ $x == $e ]] && return 0
done
return 1
}
publish-unpublish ()
{
if sudo ssh $WEBSERVER "ls -d $WEBSITE_PATH/$ACTIVITY_NAME &>/dev/null" ; then
echo
echo -e "This activity is published on \e[40;38;5;82m http://\e[30;48;5;82m$WEBSERVER/activity-reports/$ACTIVITY_NAME \e[0m"
echo
read -sp "[press ENTER to unpublish current activity report]"
sudo ssh $WEBSERVER "rm -rf $WEBSITE_PATH/$ACTIVITY_NAME"
else
read -sp "[press ENTER to publish current activity report]"
sudo scp -r "$ACTIVITY_DIR" "$WEBSERVER:$WEBSITE_PATH/$ACTIVITY_NAME"
sudo scp "$WEBPAGE_FILE" "$WEBSERVER:$WEBSITE_PATH/$ACTIVITY_NAME/index.php"
if [ $? == 0 ]; then
echo
echo -e "This activity report is published on \e[40;38;5;82m http://\e[30;48;5;82m$WEBSERVER/activity-reports/$ACTIVITY_NAME \e[0m"
echo
read -sp "[press ENTER to continue]"
else
echo
echo "Could not publish activity report. Please try again."
echo
read -sp "[press ENTER to continue]"
fi
fi
}
remove-this-activity ()
{
echo
echo "You are going to delete $ACTIVITY_DIR and unpublish website if exists."
echo
read -sp "[press ENTER to confirm deletion]"
echo
if sudo ssh $WEBSERVER "ls -d $WEBSITE_PATH/$ACTIVITY_NAME &>/dev/null" ; then
sudo ssh $WEBSERVER "rm -rf $WEBSITE_PATH/$ACTIVITY_NAME"
fi
[ -d "$ACTIVITY_DIR" ] && rm -rf "$ACTIVITY_DIR" && echo "Deleted $ACTIVITY_DIR"
exit 0
}
rename-this-activity ()
{
if [ ! -d "$ACTIVITY_DIR" ];then
echo; echo "Activity hasn't started yet !"; echo
else
read -p "Enter new name for this activity : " name
if [ "$name" ]; then
name=$(echo "$name"|sed -e 's/[^a-zA-Z0-9]/_/g')
if sudo ssh $WEBSERVER "ls -d $WEBSITE_PATH/$ACTIVITY_NAME &>/dev/null" ; then
sudo ssh $WEBSERVER "mv -f $WEBSITE_PATH/$ACTIVITY_NAME $WEBSITE_PATH/$name"
fi
mv -f "$ACTIVITY_DIR" "$REPORT_DIR/$name" && echo "Rename successful..." && exit 0
fi
fi
read -sp "[press ENTER to continue]"
}
display-menu ()
{
declare -A reports
while :; do
clear
[ -d "$ACTIVITY_DIR" ] && chmod -R 0777 "$ACTIVITY_DIR" 2>/dev/null
# Display report
echo -e "Activity name: $ACTIVITY_NAME Activity dir: $ACTIVITY_DIR"
echo $HR
basic_reports=( $(find "$BASIC_REPORT_DIR" -type d 2>/dev/null) )
unset basic_reports[0]
i=0
report=""
for d in ${basic_reports[*]}; do
report=$report"_\n *_\e[4m$(basename $d)\e[0m \n"
found=( $(find $d -type f 2>>/dev/null) )
for f in ${found[*]}; do
i=$(($i+1))
reports[$i]="$f"
report=$report" $i)_$(basename $f) :_$(cat $f|wc -l) \n"
done
done
advance_reports=( $(find "$ADVANCE_REPORT_DIR" -maxdepth 1 -type d 2>/dev/null) )
unset advance_reports[0]
i=0
for d in ${advance_reports[*]}; do
report=$report"_\n *_\e[4m$(basename $d)\e[0m \n"
found=( $(find "$d" -maxdepth 1 -type d 2>/dev/null) )
unset found[0]
for f in ${found[*]}; do
i=$(($i+1))
reports[e$i]="$f/error"
reports[o$i]="$f/output"
[ -f "$f/name" ] && report=$report" o$i)_$(cat "$f/name"|tr " " "_")_>_output :_$(ls "$f/output"|wc -w) \n"
[ -f "$f/name" ] && report=$report" e$i)_$(cat "$f/name"|tr " " "_")_>_error :_$(ls "$f/error"|wc -w) \n"
done
done
[ ! "${reports[*]}" ] && report="Nothing to show !"
echo -e "$report"|column -t|tr '_' ' '
echo
# Print menu
menu=""
for k in ${!MENU[@]};do
menu=$menu"$k)_${MENU[$k]}\n"
done
echo $HR
echo -e "$menu"|column -x|tr "-" " "|tr "_" " "
echo $HR
# Prompt for input
ans=""
read -p "> " ans
case $ans in
[1-9]|[1-9][0-9])
[ "${reports[$ans]}" ] && echo && cat ${reports[$ans]} && echo && read -sp "[Press ENTER to continue]";;
[eo][1-9]|[eo][1-9][0-9])
read -p "Search hostname(wildcard) or leave blank to display all : " search && echo
option=""
[ "$search" ] && option="-name "$search
for h in $(find ${reports[$ans]} -type f $option|xargs -l basename 2>/dev/null);do
echo " $h $HR"; cat "${reports[$ans]}/$h"; echo;echo;
done
read -sp "[Press ENTER to continue]";;
[a-z])
${MENU[$ans]};;
esac
[ -d "$ACTIVITY_DIR" ] && chmod -R 0777 "$ACTIVITY_DIR" 2>/dev/null
done
echo
}
# Function call ----------------------------------------------------------------
display-menu
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.