blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b87f4738943fc2d7708080bd7647c2431da3fda8
|
Shell
|
XGWang0/Suse_testsuite
|
/tests/qa_test_coreutils/qa_test_coreutils/orig_test_suite/cp/symlink-slash
|
UTF-8
| 1,104
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
# Make sure that cp -dR dereferences a symlink arg if its name is
# written with a trailing slash.
if test "$VERBOSE" = yes; then
set -x
cp --version
fi
. $srcdir/../envvar-check
. $srcdir/../lang-default
pwd=`pwd`
t0=`echo "$0"|sed 's,.*/,,'`.tmp; tmp=$t0/$$
trap 'status=$?; cd $pwd; rm -rf $t0 && exit $status' 0
trap '(exit $?); exit' 1 2 13 15
framework_failure=0
mkdir -p $tmp || framework_failure=1
cd $tmp || framework_failure=1
mkdir dir || framework_failure=1
ln -s dir symlink || framework_failure=1
if test $framework_failure = 1; then
echo 'failure in testing framework'
exit 1
fi
fail=0
cp -dR symlink/ s || fail=1
set `ls -l s`
# Prior to fileutils-4.0q, the following would have output ...`s -> dir'
# because the trailing slash was removed unconditionally (now you have to
# use the new --strip-trailing-slash option) causing cp to reproduce the
# symlink. Now, the trailing slash is interpreted by the stat library
# call and so cp ends up dereferencing the symlink and copying the directory.
test "$*" = 'total 0' && : || fail=1
(exit $fail); exit $fail
| true
|
64d9a028a98412446368a6134b54f05dc0cf1895
|
Shell
|
medranSolus/fractal_service
|
/setup_server.sh
|
UTF-8
| 398
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
# Run as root
# Do this only once!
if ! [ -z "$1" ]; then
if [ "$1" == "first" ]; then
echo "/home/mpi_fractal/fractal_cluster *(rw,async,no_root_squash,no_subtree_check)" >> /etc/exports
fi
echo "Unknown command!"
exit -1
fi
# NFS server must be started and firewall disabled
systemctl stop nftables
systemctl start nfs-server
systemctl start sshd
exportfs -a
| true
|
743ae7282879eaebea3e9794427c0fa89c7c3c4d
|
Shell
|
liyang2019/nuclei_private
|
/mask_rcnn/build_lib.sh
|
UTF-8
| 1,727
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
ARCH=sm_37
# build the torch *.so lib for your system
echo '##########################################################'
echo '########### building torch NMS ###########################'
echo '##########################################################'
TORCH_NMS_DIR=net/lib/box/nms/torch_nms/
nvcc -c -o $TORCH_NMS_DIR'src/nms_kernel.cu.o' $TORCH_NMS_DIR'src/nms_kernel.cu' -x cu -Xcompiler -fPIC -arch=$ARCH
python $TORCH_NMS_DIR'build.py'
echo '##########################################################'
echo '########### building roi align pooling layer #############'
echo '##########################################################'
ROI_ALIGN_POLL_TF_DIR=net/lib/roi_align_pool_tf/
nvcc -c -o $ROI_ALIGN_POLL_TF_DIR'src/crop_and_resize_kernel.cu.o' $ROI_ALIGN_POLL_TF_DIR'src/crop_and_resize_kernel.cu' -x cu -Xcompiler -fPIC -arch=$ARCH
python $ROI_ALIGN_POLL_TF_DIR/build.py
# build the cython *.so lib for your system
echo '##########################################################'
echo '########### building cython NMS ##########################'
echo '##########################################################'
cd net/lib/box/nms/cython_nms
python setup.py build_ext --inplace
echo '##########################################################'
echo '########### building gpu NMS #############################'
echo '##########################################################'
cd ../gpu_nms/
python setup.py build_ext --inplace
echo '##########################################################'
echo '########### building cython overlap layer ################'
echo '##########################################################'
cd ../../overlap/cython_overlap
python setup.py build_ext --inplace
| true
|
11f4f71589fd3c3ef5b048dc2896a14c1b31cbf3
|
Shell
|
ajbeattie/pushgateway
|
/scripts/build-jenkins.sh
|
UTF-8
| 522
| 2.78125
| 3
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# build-jenkins.sh builds an ubuntu image
#
set -e
DOCKER_REGISTRY=${DOCKER_REGISTRY:-stg-commercial-systems.docker-registry.canonical.com}
DOCKER=${DOCKER:-docker}
VERSION=`git rev-parse --verify HEAD`
$DOCKER Dockerfile.ubuntu build \
--build-arg http_proxy \
--build-arg https_proxy \
--build-arg no_proxy \
--build-arg NO_PROXY \
-t ${DOCKER_REGISTRY}/pushgateway-ubuntu:$VERSION \
-t ${DOCKER_REGISTRY}/pushgateway-ubuntu:latest .
$DOCKER push --all-tags ${DOCKER_REGISTRY}/pushgateway-ubuntu
| true
|
c830f37791c53b4d682ad4f08b6c56736432037d
|
Shell
|
cbxcube/bashrepo
|
/remote/remoteapp3.sh
|
UTF-8
| 3,223
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# PUT ON REMOTE SERVER
#
# Remote Application 03
# Should be started under local system user
# This script "remoteapp3.sh@kimsufi" should be initialized only from remote system using "manageapp3.sh@loclahost"
# VARIABLES
HOME="/home/$(echo $USER)/scripts/"
LOG="$HOME/appl3.log"
LOCK="$HOME/.appl3.lock"
debugid="1"
app3pid=""
# FUNCTIONS
checklog() {
if [ -f $LOG ]; then
date >> $LOG
printf "APPL3 log file : $LOG already exists and will be appended with new entries." >> $LOG
else
touch $LOG
printf "APPL3.log file not found in APPL3 home. Creating empty log file." >> $LOG
fi
}
debugres() {
# Can be called from new line after command or with "||" on one line
# Example : cmd1 || debugres
echo "~~~ DEBUG ~~~ : Exit code of operation ID $debugid = $?"
let debugid="$debugid+1"
}
appl3init() {
yes > /dev/null 2>&1 &
app3pid=$!
debugres
echo "$app3pid" > $LOCK
debugres
printf "\nAPPL3INIT: - - - APPL3 RUNNING as $app3pid\n" >> $LOG
}
appl3kill() {
if [ -z $app3pid ]; then
app3pid="$(cat $LOCK)"
fi
kill $app3pid
debugres
yes | rm $LOCK
printf " APPL3KILL: - - - APPL3 STOPPED $app3pid\nAPPL3 lock file removed.\n" >> $LOG
}
appl3start() {
if [ -f $LOCK ]; then
printf "Lock file detected. APPL3 is already running\nExiting.\n" >> $LOG
debugres
exit 1
else
touch $LOCK
debugres
printf "Lock file created in : $LOCK\n ~ ~ ~ \n ~ ~ ~\nStarting APPL3\n">> $LOG
debugres
appl3init &
printf "APPL3 successfully started with PID : $app3pid\n ~ ~ ~ \n ~ ~ ~\nAPPL3 is UP.\n" >> $LOG
debugres
fi
}
appl3stop() {
appl3kill
debugres
printf "APPL3 with PID : $app3pid stopped.\n ~ ~ ~ \n ~ ~ ~\nAPPL3 is DOWN.\n" >> $LOG
}
appl3status() {
if [ -f $LOCK ]; then
printf "APPL3 is running as the lock file is present in : $LOCK.\nPID of APPL3 is = "$(cat $LOCK)\n"" >> $LOG
else
printf "Lock file and PID of APPL3 not found.\n APPL3 is not running." >> $LOG
fi
}
appl3help() {
less << EOHELP
REMOTEAPP3(1) User Commands REMOTEAPP3(1)
NAME
remoteapp3.sh - Run application3 on local server. Script is started by remote user over ssh.
SYNOPSIS
remoteapp3.sh [OPTION]...
DESCRIPTION
Starts application3 (SRuns "yes" command)
OPTIONS
start Start application3
stop Stop application3
status Show application3 status
restart Restart application3
-h Show help page for application3
EXIT STATUS
On success, 0 is returned, a non-zero failure code otherwise.
ENVIRONMENT
$APP3HOME
Path to home folder of remoteapp2.sh.
NOTES
The exit() function uses a global variable that is not protected, so it is not thread-safe.
And / Or
command1 && command2
command2 is executed if, and only if, command1 returns an exit status
of zero.
An OR list has the form
command1 || command2
EOHELP
}
# INIT
checklog
case "$1" in
start)
appl3start
;;
stop)
appl3stop
;;
status)
appl3stat
;;
restart)
appl3stop
appl3start
;;
-h)
appl3help
;;
*)
echo $"Usage: $0 {start|stop|status|restart|-h}"
exit 1
esac
exit 0
debugres
| true
|
c052efff9d1b1a19b608efedd6e33c643cd322a3
|
Shell
|
veroandreo/script_collection
|
/proj_scripts/latlongED502utmED50.sh
|
UTF-8
| 514
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Markus Neteler 2/2003
# proj von http://remotesensing.org/proj/
echo "Coordinate transformation
from Lat/Long/ED50 datum
to UTM keeping ED50 datum"
echo "we need the UTM zone for the resulting UTM coordinates:"
echo " e.g 32 for Germany"
echo " 32 (F. Ovest) / 33 (F. Est) for Italy"
echo "Enter zone now:"
read zone
echo "Example: 16d32'52E 45d8'23N (d is degree)"
echo "Enter east north [z]"
#projection: LATLONG/ED50 -> UTM/ED50
cs2cs +init=epsg:4230 +to +init=epsg:230$zone
| true
|
ef64c19351c9ac2976a5db60010eb2dca968c614
|
Shell
|
dengmeng123456789/dms
|
/script/ping.sh
|
UTF-8
| 538
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
Network=`ifconfig ens33 |awk '/broadcast/{print $2}' | awk -F '.' '{print $1"."$2"."$3}'`
files=/root/sh/IP.txt
[ -f $files ] && echo "存在" || touch $files
#写入空值到文件保证文件字符为0by
echo " " > $files
for i in `seq 1 15`
do
( ping -c 3 "$Network".$i
if [ $? -eq 0 ];then
#这步保证的是文件没有重复的IP 100%为存在的IP
echo "$Network".$i >> $files
# else
# #将不存在IP从$file 剔除
# grep "$Network".$i $files
# echo "$Network".$i "不在线"
fi
)&
done
| true
|
d866be56d2ef6aede8c31830fb653a6d71d5b476
|
Shell
|
DalavanCloud/hawq
|
/contrib/vexecutor/veinstall.sh
|
UTF-8
| 497
| 3.1875
| 3
|
[
"Apache-2.0",
"MIT",
"BSD-4-Clause-UC",
"BSD-3-Clause",
"ISC",
"bzip2-1.0.6",
"LicenseRef-scancode-unknown-license-reference",
"BSD-4-Clause",
"Artistic-2.0",
"PostgreSQL",
"LicenseRef-scancode-unknown"
] |
permissive
|
#!/bin/bash
if [ -z "$1" ]; then
echo "master directory required"
exit 1
fi
if [ -z "$2" ]; then
echo "segment directory required"
exit 1
fi
if [ -z "$3" ]; then
echo "dbname required"
exit 1
fi
MASTER_DIRECTORY=$1
SEGMENT_DIRECTORY=$2
DBNAME=$3
echo "shared_preload_libraries = 'vexecutor' " >> $MASTER_DIRECTORY/postgresql.conf
echo "shared_preload_libraries = 'vexecutor' " >> $SEGMENT_DIRECTORY/postgresql.conf
hawq restart cluster -a
psql -d $DBNAME -f ./create_type.sql
exit 0
| true
|
1546ade2d6c17bac2dc51784909658464ee2d768
|
Shell
|
dimon-v/k8s-deploy
|
/kubeadm_v1.13.0/02_install_docker.sh
|
UTF-8
| 913
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Uninstall installed docker
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine
# Set up repository
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Use Aliyun Docker
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Install a validated docker version
# https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.13.md#external-dependencies
yum install docker-ce-18.06.0.ce -y
systemctl enable docker
systemctl start docker
docker version
# Use Aliyun docker registry
./use_aliyun_docker_registry.sh
| true
|
3b81d47ff2e2f1bc192c18d1f7d07db98dedef6a
|
Shell
|
andrewrothstein/coreos-skydns-cloudformation
|
/visible/update_iam_stack.sh
|
UTF-8
| 572
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPT_PATH=$( cd $(dirname $0) ; pwd -P )
STACK_NAME="Innovation-Platform-Visible-IAM"
aws cloudformation update-stack \
--stack-name $STACK_NAME \
--template-body file://$SCRIPT_PATH/iam.json \
--capabilities CAPABILITY_IAM \
--no-use-previous-template \
--parameters \
"ParameterKey=readableS3BucketsGlob,UsePreviousValue=true" \
"ParameterKey=dockerRegistryS3BucketName,UsePreviousValue=true" \
"ParameterKey=registerableLoadBalancersPath,UsePreviousValue=true" \
"ParameterKey=readableDynamoDBTablesPath,UsePreviousValue=true"
| true
|
f162b661019fc480fa8dabaf35414dbc583c6281
|
Shell
|
gmag11/CA_OpenSSL_RSA
|
/create_server_cert.sh
|
UTF-8
| 2,011
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
source .env
echo "Enter subdomain without $DOMAIN ending"
read SUBDOMAIN
echo "Enter private key password. Can be empty to generate unencrypted key only"
read -s KEY_PASS
if [[ "$KEY_PASS" != "" ]]
then
echo "Repeat key password"
read -s KEY_PASS_1
fi
if [[ "$KEY_PASS" == "" ]]
then
KEY_PASS="123456"
else
if [[ "$KEY_PASS" != "$KEY_PASS_1" ]]
then
# echo "Passwords do not match $KEY_PASS - $KEY_PASS_1"
exit 1
fi
fi
#echo Password is ${KEY_PASS}
cp intermediate/openssl_server.cnf intermediate/openssl_server.${SUBDOMAIN}.cnf
sed -i "s/#SAN_DOMAIN#/${SUBDOMAIN}.${DOMAIN}/" intermediate/openssl_server.${SUBDOMAIN}.cnf
# Create the private key
openssl genrsa -aes256 -out intermediate/private/${SUBDOMAIN}.${DOMAIN}.key.pem -passout pass:${KEY_PASS} 2048
chmod 600 intermediate/private/${SUBDOMAIN}.${DOMAIN}.key.pem
# Create the CSR
openssl req -config intermediate/openssl_server.${SUBDOMAIN}.cnf -new -key intermediate/private/${SUBDOMAIN}.${DOMAIN}.key.pem -out intermediate/csr/${SUBDOMAIN}.${DOMAIN}.csr -passin pass:${KEY_PASS}
# Decrypt key
openssl rsa -in intermediate/private/${SUBDOMAIN}.${DOMAIN}.key.pem -out intermediate/private/${SUBDOMAIN}.${DOMAIN}.key -passin pass:${KEY_PASS}
chmod 600 intermediate/private/${SUBDOMAIN}.${DOMAIN}.key
# Create the Certificate
openssl ca -config intermediate/openssl_server.${SUBDOMAIN}.cnf -extensions server_cert -days 360 -in intermediate/csr/${SUBDOMAIN}.${DOMAIN}.csr -out intermediate/certs/${SUBDOMAIN}.${DOMAIN}.crt.pem -passin pass:${INT_CA_KEY_PASS} -batch
# Validate the certificate
openssl x509 -noout -text -in intermediate/certs/${SUBDOMAIN}.${DOMAIN}.crt.pem
# Create certificate chain
cat intermediate/certs/${SUBDOMAIN}.${DOMAIN}.crt.pem intermediate/certs/${INT_CA_FILE_PREFIX}.crt.pem > intermediate/certs/${SUBDOMAIN}.${DOMAIN}.fullchain.pem
if [[ "$KEY_PASS" == "123456" ]]
then
rm intermediate/private/${SUBDOMAIN}.${DOMAIN}.key.pem
fi
rm intermediate/openssl_server.${SUBDOMAIN}.cnf
| true
|
1c8457cfe2dc4de1c83f08ba929541e3078ca72b
|
Shell
|
OpenMS/build-scripts
|
/platformScripts/shell/macOS/inferSystemVariables.sh
|
UTF-8
| 1,348
| 3.421875
| 3
|
[] |
no_license
|
export SUBDISTRO_NAME="macOS"
export SUBDISTRO_VERSION=$(sw_vers -productVersion)
## Caution: the next line is necessary for the ".app"s to run on older macOS
## even thought the code is backwards-compatible and runs from the command line.
## Caution2: this might interfere with the system libraries that are linked, if
## we ever consider building with e.g. GCC on macOS
export MACOSX_DEPLOYMENT_TARGET="10.9"
export OPENMS_TARGET_ARCH=${ARCH}
export REMOTE_CONTRIB_FOLDER="contrib/$OPSYS/$SUBDISTRO_VERSION/$OPENMS_TARGET_ARCH/$COMPILER/"
export CONTRIB_URL="${ARCHIVE_URL_PREFIX}/openms/${REMOTE_CONTRIB_FOLDER}/contrib_build.tar.gz"
## Special for macOS to allow multiple brew installations
if [ -z "${OPENMS_BREW_FOLDER+x}" ]
then
echo "OPENMS_BREW_FOLDER was not set. Using standard brew installation under /usr/local"
export OPENMS_BREW_FOLDER="/usr/local"
fi
export PATH="${PATH}:${OPENMS_BREW_FOLDER}/bin"
export OPENMS_BREW="${OPENMS_BREW_FOLDER}/bin/brew"
if [ -z "${loginpw+x}" ]
then
echo "loginpw was not set. Keychain will not be unlocked."
else
echo "loginpw was set. Trying to unlock login keychain to access signing identities."
security unlock-keychain -p $loginpw login.keychain
fi
## Just pass and execute the arguments
function runNative {
echo "Running $*"
(eval "$*")
}
sourceHere ./updatePackageManager.sh
| true
|
eefa42fb20ff0de35b03f16892a3bad836aa436c
|
Shell
|
sio2sio2/doc-linux
|
/docs/_downloads/ecf95272fbdac31610133ef5ca96d092/discover.sh
|
UTF-8
| 6,624
| 4
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
LIST="${DISCOVER_LIST:-`dirname "$0"`/macs.txt}"
MAIL="${DISCOVER_MAIL:-root@localhost}"
help() {
echo "$(basename "$0") [opciones]
Descubre máquinas conectadas a la red.
Opciones:
-h, --help Muestra esta misma ayuda.
-i, --interface <IFACE> Interfaz por la que se descubrirán máquinas. Si
no se especifica se sobreentenderán todas las que
posean una sireccion IP. Puede repertirse la opción
para realizar la búsqueda a través de varias
interfaces.
-l, --list <FICHERO> Lista de máquinas conocidas de la red.
-m, --mail <EMAIL> Dirección a la que enviará el correo.
-o, --check-on Comprueba qué máquinas que deberían estar apagadas,
se han quedado encendidas.
-O, --check-off Comprueba qué máquinas que deberían estar encendidas,
se han apagado.
-s, --stdout Muestra el mensaje por pantalla.
Cada ítem de la lista de máquina conocidas tiene este formato:
MAC [-]Descripción
donde 'MAC' es la dirección MAC de la tarjeta y la descripción puede o no ir
antecedida por un signo -. El signo indica que la máquina se espera que esté
siempre encendida.
"
}
#
# Tratamiento de errores
#
error() {
local EXITCODE=$1
shift
if [ "$EXITCODE" -eq 0 ]; then
echo "¡Atención! "$* >&2
else
echo "ERROR. "$* >&2
exit "$EXITCODE"
fi
}
soy_root() {
[ "$(id -u)" -eq 0 ]
}
#
# Comprueba que el argumento suministrado existe y
# es un argumento, no una opción.
#
check_arg() {
[ -n "${1%%-*}" ]
}
#
# Comprueba si en la frase suministrada se encuentra el patrón
# suministrado como prier argumento.
# Si se pasa como segundo argumento "-s", la frase completa debe coincidir
# estrictamente con el patrón.
# La frase se puede pasar como argumento o por entrada estándar.
#
es_dir() {
local pattern="$1"
shift
if [ "$1" = "-s" ]; then
pattern="^$pattern\$"
shift
fi
if [ -n "$1" ]; then
echo "$1" | grep -Eo "$pattern"
else
grep -Eo "$pattern"
fi
}
#
# Usa es_dir para determinar si se pasa una dirección MAC.
#
es_mac() {
es_dir '\b(?[0-9A-F]{2}:){5}[0-9A-F]{2}\b' "$@"
}
#
# Usa es_dir para determinar si se pasa una dirección IP.
#
es_ip() {
es_dir '\b(?[0-9]{1,3}\.){3}[0-9]{1,3}\b' "$@"
}
#
# Obtiene las interfaces con IPv4 asignada (excepto la de loopback).
# $@: Las interfaces que se quiere comprobar que existen y tienen IPv4.
# Si no se incluye ninguna, se obtienen todas excepto lo.
#
get_ifaces() {
local IFS="|"
local cond='$2 != "lo"'
[ $# -gt 0 ] && cond='$2 ~ '"/($*)/"
ip -o -4 addr show | awk "$cond"' {print $2}'
}
#
# Filtra la salida de arp-scan para obtener sólo las líneas
# que representan máquinas.
#
filtro_arp() {
stdbuf -o0 awk '$0 ~ /^[0-9]+\./ {print $2, $1}' | filtro_macs
}
#
# Busca las MACs proporcionadas en la lista de máquinas conocidas.
# Si se encuentra la MAC añade la descripción, y si no deja la
# entrada tal y como viene.
# Por la entrada estándar recibe línea de la forma:
# MAC IP
#
filtro_macs() {
local device mac found on IFS cond
while read -r device; do
mac="${device% *}"
# Si está encendido y debería estar encendido
found=$(echo "$LISTP" | grep -Eoi '^'"$mac") && on="$on $found"
if [ -n "$ON" ] && [ -z "$found" ]; then
# Si está encendido y debería estar apagado
if found=$(echo "$LIST" | grep -Ei '^'"$mac"); then
set -- $found
shift
echo "$device '$*'"
else
echo "$device 'Dispositivo desconocido'"
fi
fi
done
# Listas las máquinas apagadas que deberían estar encendidas.
if [ -n "$OFF" ]; then
set -- $on
if [ $# -gt 0 ]; then
IFS="|"
cond='$1 !~ /'"($*)/"
fi
echo "$LISTP" | awk "$cond "'{$2=sprintf("APAGADO \"%s", substr($2, 2)); print $0 "\""}'
fi
}
#
# Formatea las líneas de salida.
# Su entrada es "MAC IP 'Descripción'"
#
formatear() {
xargs -L1 printf "%17s [%15s] ... %s\n"
}
#
# Envía un mensaje de correo.
# $1: Texto del mensaje
#
send_mail() {
echo "From: root@localhost
To: $mail
Subject: Informe sobre dispositivos encendidos/apagados.
$1" | sendmail -t
}
soy_root || error 1 "El scrip usa arp-scan que requiere permisos de administración"
#
# Tratamiento de argumentos
#
{
IFACES=
while [ $# -gt 0 ]; do
case $1 in
-h|--help)
help
exit 0
;;
-i|--interface)
check_arg "$2" || error 2 "Opción $1: Falta argumento"
shift
IFACES="$IFACES $1"
;;
-l|--list)
check_arg "$2" || error 2 "Opción $1: Falta argumento"
shift
LIST="$1"
;;
-m|--mail)
check_arg "$2" || error 2 "Opción $1: Falta argumento"
shift
MAIL="$1"
;;
-o|--check-on)
ON=1
;;
-O|--check-off)
OFF=1
;;
-s|--stdout)
STDOUT=1
;;
*)
error 2 "$1: Opción desconocida"
;;
esac
shift
done
# Si no se especifica qué se quiere hacer, se presupone
# que la intención es compobar si se han quedado encendidos
# los dispositivos que deberían estar apagados.
[ -z "$ON$OFF" ] && ON=1
[ -n "$STDOUT" ] && MAIL=
}
if [ -n "$MAIL" ] && ! which sendmail >/dev/null; then
error 1 "Falta el ejecutable sendmail. Use -s si quiere imprimir por pantalla"
fi
if [ ! -f "$LIST" ]; then
error 0 "'$LIST' no existe, por lo que no se usará."
LIST=""
[ -n "$OFF" ] && error 1 "Sin listado de máquinas no puede determinarse qué máquinas deberían estar encendidas"
else
# Listado de máquinas que deberían estar encendidas.
LISTP=$(awk '/^[^#]/ && $2 ~ /^-/ {print}' "$LIST")
# Lista de máquinas que deberían estar apagadas.
LIST=$(awk '/^[^#]/ && $2 !~ /^-/ {print}' "$LIST") || LIST=
fi
mensaje="$(get_ifaces $IFACES | xargs -n1 -P0 arp-scan -l -I | filtro_arp | formatear)"
[ -n "$mensaje" ] || return 0
mensaje="El sistema ha detectado que los siguientes dispositivos
o están encendidos sin tener por qué o están apagados debiendo
estar encendidos:
$mensaje
"
[ -n "$mail" ] && send_mail "$mensaje" || echo "$mensaje"
| true
|
727dd020e3dc207ebca9220817d8ead4df22854b
|
Shell
|
ARM-DOE/warno
|
/utility_setup_scripts/gen_certs.sh
|
UTF-8
| 491
| 2.734375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Source http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
openssl genrsa -out $DIR/../proxy/privkey.pem 2048
openssl req -new -key $DIR/../proxy/privkey.pem -out $DIR/../privreq.csr
openssl x509 -req -in $DIR/../privreq.csr -CA $DIR/../data_store/data/rootCA.pem -CAkey $DIR/../data_store/data/rootCA.key -CAcreateserial -out $DIR/../proxy/cacert.pem -days 1095 -sha256
| true
|
559b3cb4f48c6020ab2bc1126fb838cc08c32233
|
Shell
|
RHeijblom/masterproject_benchmark
|
/ltsmin2eventstats.sh
|
UTF-8
| 3,534
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
# HARDCODED CONTROL PARAMETERS
# Flag to force each file to be checked if produced by $LTSMIN_BIN; files failing this condition will be notified to the user
CHECK_NATURE_FLAG=true
UNKNOWN_VALUE='"unknown",'
EMPTY_VALUE='"",'
OOTIME_VALUE='"ootime",'
OOMEM_VALUE='"oomem",'
ERROR_VALUE='"error",'
# Regroup is fixed during experiment and MAY not be extracted from filename
has_regroup=true
# VERIFY CORRECT USAGE
# If usage is incorrect, print script use and exit
if [ $# -lt 3 ]; then
>&2 echo "Combines the outputfiles in a directory into a single csv file"
>&2 echo "Usage: $0 <outputdirectory> <ltsmin_binary> <result>.csv"
exit 1
fi
# VERIFY PROGRAM ARGUMENTS
# Validate input directory
INPUT_DIR=$1
if [ ! -d "$INPUT_DIR" ]; then
>&2 echo "$INPUT_DIR does not exists or is not a directory."
exit 1
fi
# Name of the binary of LTSmin used for simulation. This var determines the identification of result produced by ltsminStat
LTSMIN_BIN=$2
# Validate output_file
OUTPUT_FILE=$3
touch "$OUTPUT_FILE"
if [ $? -ne 0 ]; then
>&2 echo "Cannot create or modify $OUTPUT_FILE."
exit 1
fi
# Helper method to add empty values
# $1 = file which needs to be padded
# $2 = number of values which needs to be padded (optional, default 1)
function padvalue() {
repeat=1
if [ $# -ge 2 ]; then
repeat=$2
fi
for v in `seq 1 $repeat`; do
>>"$1" echo -n "$EMPTY_VALUE"
done
}
# START CSV FILE CREATION
# Print csv header
>"$OUTPUT_FILE" echo '"filename","filetype","event-span","event-span-norm","weighted-event-span","weighted-event-span-norm",'
# Analyse all files
for file in $(find "$INPUT_DIR" -type f); do
do_analyse_file=true
# FILE FORMAT CHECK
if [ CHECK_NATURE_FLAG ]; then
echo "$file" | grep "$LTSMIN_BIN" > /dev/null
if [ $? -ne 0 ]; then
# File violates format
>&2 echo "$file: violates format and is skipped for analysis." # Notify user
do_analyse_file=false
fi
fi
if $do_analyse_file; then
# FILENAME and FILETYPE
grep ": opening " "$file" > /dev/null
if [ $? -eq 0 ]; then
filename=$(awk '{ if ($3 == "opening") { "basename "$4 | getline name ; printf "%s", name } }' "$file")
# 'Magic' snatched from http://stackoverflow.com/questions/965053/extract-filename-and-extension-in-bash
model="${filename%.*}"
extension="${filename##*.}"
>>"$OUTPUT_FILE" echo -n "\"$model\",\"$extension\","
else
# No file found
>>"$OUTPUT_FILE" echo -n "$UNKNOWN_VALUE"
padvalue "$OUTPUT_FILE"
fi
# EVENT SPAN
grep "Event Span: " "$file" > /dev/null
if [ $? -eq 0 ]; then
awk '{
if ($1" "$2 == "Event Span:") printf "\"%s\",", $3
}' "$file" >>"$OUTPUT_FILE"
else
padvalue "$OUTPUT_FILE"
fi
grep "Normalized Event Span: " "$file" > /dev/null
if [ $? -eq 0 ]; then
awk '{
if ($1" "$2" "$3 == "Normalized Event Span:") printf "\"%s\",", $4
}' "$file" >>"$OUTPUT_FILE"
else
padvalue "$OUTPUT_FILE"
fi
# WEIGHTED EVENT SPAN
grep "Weighted Event Span, " "$file" > /dev/null
if [ $? -eq 0 ]; then
awk '{
if ($1" "$2" "$3 == "Weighted Event Span,") printf "\"%s\",", $7
}' "$file" >>"$OUTPUT_FILE"
else
padvalue "$OUTPUT_FILE"
fi
grep "Normalized Weighted Event Span, " "$file" > /dev/null
if [ $? -eq 0 ]; then
awk '{
if ($1" "$2" "$3" "$4 == "Normalized Weighted Event Span,") printf "\"%s\",", $7
}' "$file" >>"$OUTPUT_FILE"
else
padvalue "$OUTPUT_FILE"
fi
# New line in order to finish current row
>>"$OUTPUT_FILE" echo ""
fi
done
| true
|
df749e9664b4b39d6c09e83ac269ddc9fa5a642e
|
Shell
|
nuno-c-afonso/gesto_eval
|
/basho_bench/scripts/change_tree.sh
|
UTF-8
| 898
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -u
set -e
FAIL=0
command="setting tree to: $1"
leafs=`cat ./scripts/leafs`
internals=`cat ./scripts/internals`
receivers=`cat ./scripts/receivers`
Command2="cd ./saturn_leaf && sudo sed -i -e 's#{tree.*#{tree, \"$1\"},#' src/saturn_leaf.app.src"
echo $command" for leafs:"$leafs
for node in $leafs
do
ssh -o ConnectTimeout=10 -t root@$node ${Command2/localhost/$node} &
done
echo $command done
echo $command" for internals:"$internals
for node in $internals
do
ssh -o ConnectTimeout=10 -t root@$node ${Command2/localhost/$node} &
done
echo $command done
echo $command" for receivers:"$receivers
for node in $receivers
do
ssh -o ConnectTimeout=10 -t root@$node ${Command2/localhost/$node} &
done
echo $command done
for job in `jobs -p`
do
wait $job || let "FAIL+=1"
done
if [ "$FAIL" == "0" ];
then
echo "$command finished."
else
echo "Fail! ($FAIL)"
fi
| true
|
1eb62728ae6de6d3e84996c696a9e0c138ea5b10
|
Shell
|
xoqhdgh1002/SHELL_SCRIPT
|
/CHAPTER3/olddel/olddel.sh
|
UTF-8
| 531
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# 두 파일을 비교해서 오래된 파일 삭제하기
# 비교 대상 파일
log1="log1.log"
log2="log2.log"
# 인수 파일이 존재하는지 확인해서 존재하지 않으면 종료
filecheck()
{
if [ ! -e "$1" ]; then
echo "ERROR: File $1 does not exist." >&2
exit 1;
fi
}
filecheck "$log1"
filecheck "$log2"
# 두 파일을 비교해서 오래된 쪽 삭제
if [ "$log1" -nt "$log2" ]; then
echo "[$log1]->newer, [$log2]->older"
rm $log2
else
echo "[$log2]->newer, [$log1]->older"
rm $log1
fi
| true
|
bf6b127c20ea86ba3bb0a1a4251d5823a0916c23
|
Shell
|
NGenetzky/dotfiles
|
/downloads/mplab.sh
|
UTF-8
| 284
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
SCRIPTDIR="$(CDPATH= cd -- "$(dirname -- "$0")" && pwd -P)"
source "${SCRIPTDIR}/_downloads.bash"
# Bash Strict Mode
set -eu -o pipefail
wget_download \
'ww1.microchip.com/downloads/en/DeviceDoc/MPLABX-v5.10-linux-installer.tar' \
'MPLABX-v5.10-linux-installer.tar'
| true
|
f7446830cc8454dbb0944696b609b8e46215a479
|
Shell
|
ankit1057/android-scripts
|
/pull
|
UTF-8
| 1,096
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# A simple script for backing up important files(contacts, call log, messages) for android
source common
adb_zip_and_pull(){
#TODO handle paths, currently file or folder is pulled in the same directory without structure
filename="$1"
backup_dir="$2"
adb_zip "$filename" && adb pull "$filename.zip" "$backup_dir/"
adb shell "rm $filename.zip"
}
adb_backup(){
#TODO maybe use hash to check if the files in backup and in phone are synced
if [ $# -lt 1 ]; then
echo 'Usage: adb_backup <list_file> [backup_dir]'
return;
fi
list_file="$1"
if [ "$2" == "" ]; then
backup_dir="android-backup-$(date +%d%m%y%H%M%S)"
else
backup_dir="$2"
fi
if [ ! -d "$backup_dir" ]; then
mkdir "$backup_dir"
fi
logfile="$backup_dir/log.txt"
if [ ! -f "$logfile" ]; then
touch "$logfile"
fi
files_to_backup=$(comm -23 "$list_file" "$logfile")
adb root
for i in ${files_to_backup}; do
adb_zip_and_pull "$i" "$backup_dir" && echo "$i" >> "$logfile"
done
}
| true
|
e0479d26f5b86921615706a0c827649a9e65b8ca
|
Shell
|
openhpc/ohpc
|
/tests/dev-tools/numpy/ohpc-tests/test_mpi_families
|
UTF-8
| 941
| 3.3125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# -*-sh-*-
TEST_LOGS=""
MAKEFLAGS=""
status=0
source ./common/TEST_ENV || exit 1
source ./common/functions || exit 1
cd dev-tools/numpy || exit 1
export BATS_JUNIT_CLASS=Numpy
for compiler in $COMPILER_FAMILIES ; do
for python in $PYTHON_FAMILIES ; do
echo " "
echo " "
echo "----------------------------------------------------------"
echo "Dev tools: $python-Numpy tests: $compiler"
echo "----------------------------------------------------------"
get_python_vars $python
module purge || exit 1
module load $compiler || exit 1
module load $python_module_prefix-numpy || exit 1
make clean >& /dev/null || exit 1
make -k check || status=1
save_logs_compiler_family tests $python-$compiler
make clean >& /dev/null
done
done
exit ${status}
| true
|
7a10c85375a59c3f442ea12fa62fbbc19005f13d
|
Shell
|
omalashenko/hg2cvs
|
/hg2cvs.sh
|
UTF-8
| 7,463
| 3.875
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# set -x
DEBUG=0
DRY_RUN=0
USE_CVS_LOCK=0
if [ -z "$DRY_RUN" ] ; then
DRY_RUN=0
fi
warning()
{
echo "WARNING: $@" 1>&2
}
debug()
{
if [ "$DEBUG" = "1" ] ; then
echo "DEBUG: $@"
fi
}
do_cvs()
{
if [ $DRY_RUN -ne 0 ] ; then
cvs -d "$CVS_ROOT" -n "$@"
else
cvs -d "$CVS_ROOT" "$@"
fi
}
add_cvsfolder()
{
a="$1"
p=
first="/*"
second="*/"
if [ "$a" != "." ]
then
while [ "$a" != "$p" ]
do
p="$a"
f="${a/$first}"
a="${a#$second}"
if [ ! -d "$f/CVS" ] ; then
do_cvs -Q add "$f"
fi
cd "$f"
done
fi
}
cvs_lock_files()
{
if [ "$USE_CVS_LOCK" = "1" ] ; then
if [ "x$@" != "x" ] ; then
do_cvs -Q admin -l "$@"
fi
fi
}
cvs_unlock_files()
{
if [ "$USE_CVS_LOCK" = "1" ] ; then
if [ "x$@" != "x" ] ; then
do_cvs -Q admin -u "$@"
fi
fi
}
do_cvsimport()
{
local hg_rev="$1"
local is_merge="$2"
hg up -C $hg_rev
if [ $? -ne 0 ] ; then
warning "Unable to update working copy to $rev"
return 1
fi
local file_list=/tmp/hg2cvs.${hg_rev}.changes
hg log --style "$HG_FILES_STYLE" -r "$hg_rev" > $file_list
if [ $? -ne 0 ] ; then
warning "Unable to identify affected files"
rm -f $file_list
return 1
fi
local added_files=()
local changed_files=()
local deleted_files=()
while read line
do
local mod_type=${line:0:1}
local mod_file="${line:2}"
# skip .hg* files
if [ "${mod_file:0:3}" = ".hg" ] ; then
continue;
fi
case $mod_type in
A) added_files[${#added_files[@]}]="$mod_file" ;;
M) changed_files[${#changed_files[@]}]="$mod_file" ;;
R) deleted_files[${#deleted_files[@]}]="$mod_file" ;;
*) warning "Bad modification type $mod_type" ; return 1 ;;
esac
done < $file_list
rm $file_list
debug added: "${added_files[@]}"
debug changed: "${changed_files[@]}"
debug deleted: "${deleted_files[@]}"
local n_files=$((${#deleted_files[@]} + ${#added_files[@]} + ${#changed_files[@]}))
if [ "$n_files" -eq 0 ] ; then
debug "Nothing to commit to CVS"
return 0;
fi
if [ ${#deleted_files[@]} -gt 0 ] ; then
do_cvs remove -f "${deleted_files[@]}"
fi
for added in "${added_files[@]}"
do
pushd .
local dirpath=$(dirname "$added")
add_cvsfolder "$dirpath"
if [ $? -ne 0 ]; then
return 1
fi
local filename=$(basename "$added")
do_cvs add "$filename"
if [ $? -ne 0 ]; then
return 1
fi
popd
done
local descfile=/tmp/hg2cvs.desc.$hg_rev
local revspec=$hg_rev
if [ -n "$is_merge" ] ; then
revspec="ancestors($hg_rev) - ancestors(${hg_rev}^1)"
fi
hg log --template "{desc}\n-- {author} {date|isodatesec} commit {node|short}\n\n" -r "$revspec" > $descfile
cvs_lock_files "${deleted_files[@]}" "${changed_files[@]}"
if [ $? -ne 0 ] ; then
warning Unable to CVS-lock files
cvs_unlock_files "${deleted_files[@]}" "${changed_files[@]}"
return 1
fi
# dry run to check if commit succeeds
do_cvs -Q -n commit -F $descfile "${added_files[@]}" "${deleted_files[@]}" "${changed_files[@]}"
if [ $? -ne 0 ] ; then
warning CVS commit verification failed
cvs_unlock_files "${deleted_files[@]}" "${changed_files[@]}"
return 1
fi
# real commit, we don't expect it to fail
# if succeeded, commit releases all the locks held
do_cvs -Q commit -F $descfile "${added_files[@]}" "${deleted_files[@]}" "${changed_files[@]}"
local ret=$?
if [ $ret -ne 0 ] ; then
warning CVS commit failed, revision $hg_rev might be half-commited to CVS
cvs_unlock_files "${deleted_files[@]}" "${changed_files[@]}"
fi
rm $descfile
return $ret
}
export_commits()
{
local branch="$1"
local history_file="$2"
local last_imported_rev=null
if [ -f "$history_file" ] ; then
last_imported_rev=$(tail -n 1 "$history_file")
fi
debug "Last imported revision: $last_imported_rev"
local heads=$(hg log --template '{node} ' -r "heads(branch($branch))")
debug "Heads: $heads"
if [ -n "$(echo $heads | grep $last_imported_rev)" ] ; then
echo "Branch $branch is up to date"
return 0;
fi
while true; do
if [ "$last_imported_rev" = "null" ] ; then
last_imported_rev=$(hg log --template '{node}' -r "roots(branch($branch))")
# TODO bail out if more than one root
else
last_imported_rev=$(hg log --template '{node}' \
-r "branch($branch) and first(children(${last_imported_rev}), 1)")
fi
local is_merge=$(hg log --template '{node}' -r "merge() and $last_imported_rev" )
local tags=$(hg log --template '{tags}' -r $last_imported_rev | sed 's|tip||')
echo Importing $last_imported_rev
if [ -n "$tags" ] ; then
debug "$last_imported_rev TAGGED $tags"
fi
if [ -n "$is_merge" ] ; then
echo "$last_imported_rev is a MERGE changeset";
fi
do_cvsimport "$last_imported_rev" "$is_merge"
if [ $? -ne 0 ] ; then
warning "CVS import of $last_imported_rev failed"
return 1
fi
echo $last_imported_rev > $history_file
for t in $tags ; do
echo Tagging with $t
do_cvs -Q tag -F -R $t .
if [ $? -ne 0 ]; then
warning CVS tag has failed: $t
fi
done
if [ -n "$(echo $heads | grep $last_imported_rev)" ] ; then
break;
fi
done
}
###############################################################################
# ENTRY POINT #
###############################################################################
if [ $# -ne 2 ] ; then
echo "Usage: hg2cvs <cvsroot> <cvs-sandbox-path>"
exit 1
fi
CVS_ROOT="$1"
CVS_SANDBOX="$2"
HG_FILES_STYLE=$(dirname $0)/files.style
hg_branches=$(hg branches | cut -f 1 -d ' ')
lock_file="$(hg root)/.hg/hg2cvs.lock"
lockfile -r0 $lock_file
if [ $? -ne 0 ]; then
warning "Someone else is currently using hg2cvs bridge,
your changes have been submitted to the Mercurial repository but
not yet committed to CVS. They will be committed to CVS next
time someone pushes to the Mercurial repository."
exit 1
fi
echo "============== hg2cvs =============="
for branch in $hg_branches
do
cvs_branch="${CVS_SANDBOX}/${branch}"
if [ ! -d "$cvs_branch" ]; then
echo "Skipping unmapped HG branch ${branch}"
continue
fi
echo "Importing branch $branch"
hg push -b $branch "$cvs_branch"
if [ $? -ne 0 ] ; then
warning "Push to $cvs_branch failed, skipping"
continue
fi
history_file="$(pwd)/.hg/hg2cvs.${branch}.history"
pushd "$cvs_branch" > /dev/null 2>&1
export_commits "$branch" "$history_file"
popd > /dev/null 2>&1
done
rm -f $lock_file
echo "============== done hg2cvs =============="
| true
|
4ef641b957cabe8319d95025370b1e3bf693dbc7
|
Shell
|
toradex/a71ch-demo
|
/toradex/target/update.sh
|
UTF-8
| 3,748
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
#****************************************************************************
#
# Copyright (C) 2019 Toradex AG
# Contact: https://www.toradex.com/locations
#
# This file is part of the Toradex of the A71CH workshop demo.
#
# BSD License Usage
# Alternatively, you may use this file under the terms of the BSD license
# as follows:
#
# "Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Toradex Ag nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#****************************************************************************/
DEFAULT_CONTAINER_NAME="a71ch_demo_debian"
echo ""
echo "***************************************"
echo " POTENTIAL TORADEX UPDATE TOOL "
echo " "
echo " ** "
echo " ************ "
echo " **************** "
echo " ****************** "
echo " ******** **** * **** **** "
echo " ******** *** ****** ******"
echo " ******** ***** * *** *** "
echo " ***************** "
echo " ************** "
echo " ********** "
echo " "
echo " This is a demonstration for the the "
echo " usage of the A71CH in combination with"
echo " the Toradex Colibri iMX6ULL and the "
echo " new OS of Toradex called Torizon. "
echo " "
echo "***************************************"
echo ""
echo "Type in the DOCKER IMAGE ID which you want to replace by a new one, followed by [ENTER]:"
read TOREPLACE
echo "Type in the target TARBALL NAME of the DOCKER IMAGE which you want to install, followed by [ENTER]:"
read IMAGETODOWNLOAD
echo "Image to replace: $TOREPLACE"
echo "Image to download and install: $IMAGETODOWNLOAD"
#download of the file
echo "Executing the secure downoad of the requested image."
./a71chtdx -s 192.168.10.1 -p 8080 -f $IMAGETODOWNLOAD
if [ $? -ne 0 ]; then #if the secure download failed, stop the process
echo "Secure download failed. Update process is cancelled. Old container is still available."
exit 1
fi
#Stop and remove the running container
docker stop $DEFAULT_CONTAINER_NAME > /dev/null 2>&1 || true
docker rm $DEFAULT_CONTAINER_NAME > /dev/null 2>&1 || true
#Remove the image, which should be replaced by the new one
docker rmi $TOREPLACE > /dev/null 2>&1
#importing image
echo "Download done."
echo "Executing the import of the image into docker."
cat $IMAGETODOWNLOAD | docker import - torzion/a71chdemo:latest /dev/null 2>&1
if [ $? -ne 0 ]; then #if the import failed, stop the process
echo "Import failed. Unfortunately, the old container and the image is already deleted."
#delete local tar file
rm $IMAGETODOWNLOAD
exit 1
fi
#delete local tar file
rm $IMAGETODOWNLOAD
#run the image
echo "Import done."
echo "Creating a container out of the image and run it."
echo ""
docker run -it --privileged --entrypoint=/opt/welcome.sh --name $DEFAULT_CONTAINER_NAME -v /var/run/dbus:/var/run/dbus -v /dev:/dev torzion/a71chdemo:latest
| true
|
446b3b2f4cdce9c9fdcb7a29503da5729decb4f9
|
Shell
|
milbul/OS
|
/lab3/6task_handler.sh
|
UTF-8
| 307
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
echo $$ > .pid
ans=1
operation="+"
usr1()
{
operation="+"
}
usr2()
{
operation="*"
}
sigterm()
{
exit
}
trap 'usr1' USR1
trap 'usr2' USR2
trap 'sigterm' SIGTERM
while true; do
case $operation in
"+")
ans=$(($ans+2))
;;
'*')
ans=$(($ans*2))
;;
esac
echo $ans
sleep 1
done
| true
|
55c9ed9e69efcab070c090e2eb306054e1ded7a2
|
Shell
|
xxwdll/shell_repo
|
/03开机启动脚本/start_task.sh
|
GB18030
| 1,290
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# chkconfig: 2345 10 90
# description: start task
export pid=` ps -ef | grep java |grep taskuap_8003|grep -v grep|awk '{ print $2 }'`
if [ -z $pid ];then
echo "the process is not running"
else
kill -9 $pid
echo "kill the session "$pid
fi
sleep 2
cd /home/weblogic/bea/user_projects/domains/task_uap_domain/servers/taskuap_8003
rm -rf cache/ data/ logs/ tmp/
cd /home/weblogic/bea/user_projects/domains/task_uap_domain/bin
rm -rf nohup.out
rm -rf taskuap_85.log
#xhost +
nohup /home/weblogic/bea/user_projects/domains/task_uap_domain/bin/startWebLogic.sh >taskuap_85.log &
sleep 2
echo "TASKUAPУɺTASKȴ"
sleep 250
echo "TASKУ"
export pid=` ps -ef | grep java |grep task_8004|grep -v grep|awk '{ print $2 }'`
if [ -z $pid ];then
echo "the process is not running"
else
kill -9 $pid
echo "kill the session "$pid
fi
sleep 2
cd /home/weblogic/bea/user_projects/domains/task_domain/servers/task_8004
rm -rf cache/ data/ logs/ tmp/
cd /home/weblogic/bea/user_projects/domains/task_domain/bin
rm -rf nohup.out
rm -rf task_85.log
#xhost +
nohup /home/weblogic/bea/user_projects/domains/task_domain/bin/startWebLogic.sh >task_85.log &
sleep 2
#tail -400f task_85.log
| true
|
eaa21f41a7689905098985f3c703ee08f6d81878
|
Shell
|
xcountry02/Group-Forward-Regression
|
/Simulations/Result/Int/gather.sh
|
UTF-8
| 1,471
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
#===========================================================================
# Program: gather.sh
#
# Author: K. Michels
# Language: Bash
# To Compile: ./gather.sh p0 n n_grp var sigma mbic ibic iter
#
#-----------------------------------------------------------------------------
#
# Description: This bash script simply takes the first 3 lines of each
# result file (from analysis), and gathers them into one
# file.
#
# Input: $1 -- p0 -- Number of groups
# $2 -- n -- Sample size
# $3 -- n_grp -- group size
# $4 -- var -- var_ind (0 or 1)
# $5 -- sigma -- Sigma value
# $6 -- mbic -- main BIC value
# $6 -- ibic -- interaction BIC value
# $7 -- iter -- Number of iteractions for each data set
#
# Output: Creates "results.*" file
#
# Known Bugs: None; all operations work as they should.
#
#===========================================================================
# ./gather.sh p0 n n_grp var sigma mbic ibic iter
p0=$1
n=$2
ngrp=$3
var=$4
sigma=$5
mbic=$6
ibic=$7
iter=$8
for i in $(seq 0 $(($iter - 1)))
do
if [ $i -eq 0 ]
then
head -3 $p0$n$ngrp$sigma$mbic$ibic$var$i.out > "results.$p0.$n.$ngrp.$sigma.$mbic.$ibic.$var.out"
else
head -3 $p0$n$ngrp$sigma$mbic$ibic$var$i.out >> "results.$p0.$n.$ngrp.$sigma.$mbic.$ibic.$var.out"
fi
done
| true
|
a22525d814e30bf7006e41365517d482b0ef054b
|
Shell
|
MenkeTechnologies/zpwr
|
/autoload/common/zpwrVimAllEdit
|
UTF-8
| 406
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
# -*- mode: sh -*-
# vim: set ft=sh:
function zpwrVimAllEdit(){
BUFFER="$(zpwrFzvimAll)"
if [[ -z "$BUFFER" ]]; then
return
fi
BUFFER="$ZPWR_VIM $BUFFER"
zpwrLogDebug "builtin cd $ZPWR"
eval "builtin cd $ZPWR"
BUFFER="$BUFFER; zpwrClearList; zpwrIsGitDir && git diff HEAD"
zpwrLogDebug "$BUFFER"
print -s -- "$BUFFER"
eval "$BUFFER"
}
zpwrVimAllEdit "$@"
| true
|
5134da522b40e1d23699584a9084cb777c8c6e37
|
Shell
|
dambor/diego-release
|
/jobs/rootfses/templates/pre-start
|
UTF-8
| 888
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -ex
# vim: set ft=sh
CONF_DIR=/var/vcap/jobs/rootfses/config
ROOTFS_PACKAGE=/var/vcap/packages/rootfs_cflinuxfs2
ROOTFS_DIR=$ROOTFS_PACKAGE/rootfs
if [ ! -d $ROOTFS_DIR ]; then
mkdir -p $ROOTFS_DIR
tar -pzxf $ROOTFS_PACKAGE/cflinuxfs2.tar.gz -C $ROOTFS_DIR
fi
rm -f $ROOTFS_DIR/usr/local/share/ca-certificates/*
# Split files on '----END CERTIFICATE-----' and increment our file counter by 1
pushd $ROOTFS_DIR
awk -v n=1 '
split_after == 1 {n++;split_after=0}
/-----END CERTIFICATE-----/ {split_after=1}
NF {print > "usr/local/share/ca-certificates/trusted_ca_" n ".crt"}' < $CONF_DIR/certs/trusted_ca.crt
popd
# have to set TMPDIR so we can mktemp inside the chrooted subshell
TMPDIR=/tmp timeout --signal=KILL 10s chroot $ROOTFS_DIR /usr/sbin/update-ca-certificates -f
# change modification time to invalidate garden's image cache
touch $ROOTFS_DIR
| true
|
bd9bf66fbeae4583078cb163bca5adfa2defc7fc
|
Shell
|
swkim01/nuclei-sdk
|
/setup.sh
|
UTF-8
| 621
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
NUCLEI_TOOL_ROOT=~/Nuclei
NMSIS_ROOT=../NMSIS
# Create your setup_config.sh
# and define NUCLEI_TOOL_ROOT and NMSIS_ROOT like below
# NUCLEI_TOOL_ROOT=/home/develop/Software/Nuclei
# NMSIS_ROOT=/home/develop/Source/NMSIS
SETUP_CONFIG=setup_config.sh
[ -f $SETUP_CONFIG ] && source $SETUP_CONFIG
[ -f .ci/build_sdk.sh ] && source .ci/build_sdk.sh
[ -f .ci/build_applications.sh ] && source .ci/build_applications.sh
echo "Setup Nuclei SDK Tool Environment"
echo "NUCLEI_TOOL_ROOT=$NUCLEI_TOOL_ROOT"
export PATH=$NUCLEI_TOOL_ROOT/gcc/bin:$NUCLEI_TOOL_ROOT/openocd/bin:$PATH
export NMSIS_ROOT=$(readlink -f $NMSIS_ROOT)
| true
|
0cc90d897da9f08799132ef57ba432f860530080
|
Shell
|
QFAB-Bioinformatics/beatson_bootstrap
|
/bootstrap_beatson.sh
|
UTF-8
| 1,535
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Shell script to bootstrap Scott Beatson's laptop
#
# Actions: sets the shell as tcsh and installs Homebrew
# installs a collection of tools using Brew ('brew.file')
# sets some basic system defaults ('osx_defaults.sh')
# installs tcsh-enabled Conda ('conda_installer.sh')
# installs Beatson Lab Conda environments (see 'conda_installer.sh')
fancy_echo() {
local fmt="$1"; shift
printf "\n$fmt\n" "$@"
}
fancy_echo 'Please enter your password if prompted'
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.macos` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
if ! command -v brew >/dev/null; then
fancy_echo "Installing homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# As this is a provision run, we'll clear the persistent apps on the dock
defaults write com.apple.dock persistent-apps -array
else
fancy_echo "Homebrew already installed. Skipping ..."
fi
fancy_echo 'Installing tcsh...'
brew install tcsh
fancy_echo 'Setting tcsh as default shell..'
chsh -s /bin/tcsh
fancy_echo 'Installing git...'
brew install git
fancy_echo 'Updating homebrew..'
brew update
fancy_echo 'Running Brew file..'
brew bundle --file=brew.file
fancy_echo 'Setting OSX defaults..'
source osx_defaults.sh
fancy_echo 'Installing Conda for tcsh..'
source conda_installer.sh
fancy_echo 'All done! Some of these changes require a restart to take effect.'
| true
|
93c75a0ab7e5590d0ffe0db0bdc73ba06cdd93ce
|
Shell
|
murphp30/I_LOFAR_workshop_2020
|
/BST_tutorial/BST_data/modea/solar_modea_20190507_160220.sh
|
UTF-8
| 2,127
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#variables
rcumode=3
bits=8
duration=10800 #21600 # observation duration in seconds
observation="solar_mode3"
today="`date +"%Y.%m.%d"`"
start_time="`date +"%H%M%S"`"
#check below exists
#write data datapath on LCU
datapath=/data/home/user1/data/$today/$observation/$start_time/
echo "*** Statistics being saved to $datapath"
#swlevel 3 to allow beamforming
echo "*** Going to swlevel 3"
swlevel 3
#allow lots of time to reach swlevel 3
#sleep 180 #come back to this
#create the data directory & copy this script to it as a record of what we've done.
mkdir -p $datapath
cp $0 $datapath
band="10_90" # assume mode 3
antennaset="LBA_INNER"
if [ $bits -eq 8 ]; then
subbands="7:494" #"51:450" #Freq. range = 10.0-88.0MHz
beamlets="0:487" #"0:399"
else
subbands="203:446" #Freq. range = 40-87MHz
freq_range="40-87MHz"
beamlets="0:243" #automate this!
fi
#Setup the pointings
#CRAB="1.459672668,0.384225508,J2000"
theSUN="0,0,SUN"
echo "*** Observing in bitmode $bits with $antennaset in subbands $subbands, frequency range $freq_range"
#set up beams to point at sun
if [ $bits -eq 8 ]; then
# Setup 8-bit mode, otherwise assume 16(12) bit mode
rspctl --bitmode=8
sleep 10
echo "*** Setting beam to point at $theSUN"
beamctl --antennaset=$antennaset --rcus=0:191 --band=$band --subbands=$subbands --beamlets=$beamlets --anadir=$theSUN --digdir=$theSUN > $datapath/beamctl.log 2>&1 &
else
rspctl --bitmode=16
echo "*** Setting beam to point at $theSUN"
beamctl --antennaset=$antennaset --rcus=0:191 --band=$band --subbands=$subbands --beamlets=$beamlets --anadir=$theSUN --digdir=$theSUN > $datapath/beamctl.log 2>&1 &
fi
#actually record statistic files (bst) nohup is forced execution
nohup rspctl --statistics=beamlet --duration=$duration --integration=1 --directory=$datapath > $datapath/rspctl_beamlet.log 2>&1 &
echo " "
echo "*** Recording started at: " $(date)
echo "*** Recording beamlet statistics for "$duration" seconds...."
sleep $duration
echo "*** Observation finished, killing the beam"
killall beamctl # kill any existing beams
#echo "*** Going back to swlevel 0"
#swlevel 0
| true
|
7e32168c16c1e91716286ab4e97128cbad1454b9
|
Shell
|
Agraphie/zversion
|
/scripts/ssh/openSSHVersionDistribution.sh
|
UTF-8
| 476
| 3.265625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Output filename: openssh_version_distribution
printf "Script name: $0\n"
printf "Input file: $1\n"
printf '%s\n' '-------------OpenSSH version distribution-------------'
printf "`grep "OpenSSH" $1 | jq 'select(.Vendor == "OpenSSH") | .SoftwareVersion' | sort | uniq -c | sort -nr` \n"
printf '\nTotal: %s\n' "`grep "OpenSSH" $1 | jq 'select(.Vendor == "OpenSSH") | .SoftwareVersion' | wc -l`"
printf '%s\n' '-----------------------------------------------------'
| true
|
933f7fb003a3bd35c55a3f92ddd5313d8115092c
|
Shell
|
skyitachi/MIT6.824
|
/src/raft/run_test.sh
|
UTF-8
| 572
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
realpath() {
[[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
}
TEST_CASE=$1
echo $TEST_CASE
SHELL_FOLDER=$(dirname $(realpath "$0"))
PARENT_SHELL_FOLDER=$(dirname "$SHELL_FOLDER")
GRAND_PARENT_SHELL_FOLDER=$(dirname "$PARENT_SHELL_FOLDER")
export GOPATH=$GRAND_PARENT_SHELL_FOLDER:$GOPATH
PROJECT_ROOT=$SHELL_FOLDER
cd $PROJECT_ROOT
success=0
for i in {1..10000}
do
echo "running $i test"
go test -run ${TEST_CASE} > ./raft.log
if [[ $? -eq 0 ]]
then
success=$(( success + 1 ))
else
exit 1
fi
done
echo "success: ${success}"
| true
|
004b9aefd81a2f686a4bd8f1326a55c3e95b1edc
|
Shell
|
htugraz/abs
|
/x86_64/community/python-pytest/PKGBUILD
|
UTF-8
| 2,201
| 2.53125
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 152799 2015-12-11 03:32:52Z fyan $
# Maintainer: Felix Yan <felixonmars@archlinux.org>
# Contributor: Felix Kaiser <felix.kaiser@fxkr.net>
pkgbase=python-pytest
pkgname=('python-pytest' 'python2-pytest')
pkgver=2.8.4
pkgrel=2
pkgdesc="Simple powerful testing with Python"
arch=('any')
license=('MIT')
url="http://pytest.org/"
makedepends=('python-setuptools' 'python2-setuptools' 'python-py' 'python2-py' 'git'
'python-pluggy' 'python2-pluggy')
checkdepends=('lsof' 'python-nose' 'python2-nose'
'python-mock' 'python2-mock' 'python-yaml' 'python2-yaml' 'python2-enum34'
'python-pexpect' 'python2-pexpect' 'python-pytest-xdist' 'python2-pytest-xdist'
'python-zope-interface' 'python2-zope-interface' 'python-twisted' 'python2-twisted')
source=("git+https://github.com/pytest-dev/pytest.git#tag=$pkgver"
fix-deprecated_call.patch)
sha512sums=('SKIP'
'37754ab7eabf7e389ef30b4253152d2ba2fca4bbcfe11a14bf1f13da0a17e92f88808e76bbc7b66d2cddbc00fb1ecde814dc19277a36a92b7d540d84ae0391f2')
prepare() {
# Remove bundled pluggy - disabled for now as it will break tests
# rm -r pytest/_pytest/vendored_packages
# sed -i "s/'_pytest.vendored_packages'//" pytest/setup.py
# https://github.com/pytest-dev/pytest/issues/1238
(cd pytest; patch -p1 -i ../fix-deprecated_call.patch)
cp -a pytest{,-py2}
}
build() {
cd "$srcdir/pytest"
python setup.py build
cd "$srcdir/pytest-py2"
python2 setup.py build
}
check() {
cd "$srcdir/pytest"
python setup.py test
cd "$srcdir/pytest-py2"
python2 setup.py test || warning "Tests failed"
# https://github.com/pytest-dev/pytest/issues/927
}
package_python-pytest() {
depends=('python-py' 'python-setuptools') # 'python-pluggy'
cd pytest
python setup.py install --root="${pkgdir}" --optimize=1
install -D -m644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
package_python2-pytest() {
depends=('python2-py' 'python2-setuptools') # 'python2-pluggy'
cd pytest-py2
python2 setup.py install --root="${pkgdir}" --optimize=1
install -D -m644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
mv "${pkgdir}/usr/bin/py.test" "${pkgdir}/usr/bin/py.test2"
}
| true
|
5fa4496a39c6de8caf2b9f5d8c4bd3383414d730
|
Shell
|
zxzwxdl/docker-aspnetcore-build
|
/build_init
|
UTF-8
| 244
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -z "$SLN_FILE_NAME" ] || [ -z "$RELEASE_PATH" ]; then
echo "sln file name or release path undefined!"
exit 1
fi
cd /app
dotnet restore $SLN_FILE_NAME && dotnet publish $SLN_FILE_NAME -c Release -o $RELEASE_PATH
| true
|
3b431748063e47d5e27088a8a3679e6539b55264
|
Shell
|
hiremaga/ciborg-vagrant
|
/bootstrap.sh
|
UTF-8
| 983
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
sudo apt-get update > /dev/null
packages="git build-essential openssl libreadline6 libreadline6-dev libreadline5 curl git-core zlib1g zlib1g-dev libssl-dev libyaml-dev libsqlite3-dev sqlite3 libxml2-dev libxslt1-dev autoconf libc6-dev libncurses5-dev automake libtool bison subversion pkg-config"
selections=`dpkg --get-selections`
for package in $packages
do
if ! echo "$selections" | grep "^$package\s" > /dev/null
then
to_install="$to_install $package"
fi
done
if [ ! -z "$to_install" ]
then
sudo apt-get install -y $to_install
fi
test -d /usr/local/rvm || curl --location https://get.rvm.io | sudo bash -s stable
sudo tee /etc/profile.d/rvm.sh > /dev/null <<RVMSH_CONTENT
[[ -s "/usr/local/rvm/scripts/rvm" ]] && source "/usr/local/rvm/scripts/rvm"
RVMSH_CONTENT
sudo tee /etc/rvmrc > /dev/null <<RVMRC_CONTENTS
rvm_install_on_use_flag=1
rvm_trust_rvmrcs_flag=1
rvm_gemset_create_on_use_flag=1
RVMRC_CONTENTS
sudo usermod vagrant -a -G rvm
| true
|
a3380ad4aa2f8edbaaecb0de1285f35d44490b9b
|
Shell
|
julienpoirier/viewpoint-installer
|
/install/glassfish
|
UTF-8
| 675
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# description: Glassfish start/stop/restart
# processname: glassfish
# chkconfig: 2345 10 90
JAVA_HOME=/usr/lib/jvm/jre-1.7.0-openjdk.x86_64
export JAVA_HOME
PATH=$JAVA_HOME/bin:$PATH
export PATH
GLASSFISH_HOME=/data/bddpmon/viewpoint/viewpoint/bin/glassfish3/glassfish/
GLASSFISH_USER=bddpmon
case $1 in
start)
su $GLASSFISH_USER -c "$GLASSFISH_HOME/bin/asadmin start-domain domain1"
;;
stop)
su $GLASSFISH_USER -c "$GLASSFISH_HOME/bin/asadmin stop-domain domain1"
;;
restart)
su $GLASSFISH_USER -c "$GLASSFISH_HOME/bin/asadmin stop-domain domain1"
su $GLASSFISH_USER -c "$GLASSFISH_HOME/bin/asadmin start-domain domain1"
;;
esac
exit 0
| true
|
cc5e920e757f18efd8fcf2b221eae8d511e44e74
|
Shell
|
atulbhingarde/HW-Linux-2
|
/Instructions/2-mkdircd/mkdircd
|
UTF-8
| 474
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
echo $#
# check if there are sufficient parameters passed
if [ $# -eq 2 ] ; then
# set the target directory that is first parameter
TargetDir=$1
#set the target file to be created second parameter
TargetFile=$2
# create the directory with -p option
mkdir -p ${TargetDir}
# load the targetfilel in nano editor
nano ${TargetDir}/${TargetFile}
else
echo "inadequate parameters"
fi
# for mkdircd Linux-Week-2 ClassNotes.txt
| true
|
d9661b22dc8a11a91e4c21500235446b708c2619
|
Shell
|
BeckYoung/android-x265-buildscript
|
/x86_64/build.sh
|
UTF-8
| 1,358
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
export ANDROID_NDK_HOME=~/Android/android-ndk-r16b
export CMAKE_HOME=~/Android/Sdk/cmake/3.6.4111459/bin
export NUMBER_OF_CORES=4
export PATH=$CMAKE_HOME:$PATH
ANDROID_CPU=x86_64
if [[ “$@“ =~ "-d" ]];then
echo "----------------------------cmake debug----------------------------"
cmake -DDEBUG=ON -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK_HOME/build/cmake/android.toolchain.cmake \
-DANDROID_NDK=$ANDROID_NDK_HOME \
-DANDROID_ABI=$ANDROID_CPU \
-DANDROID_TOOLCHAIN=clang \
-DANDROID_PLATFORM=android-21 \
-DANDROID_STL=gnustl_static \
../../../source
else
echo "----------------------------cmake release----------------------------"
cmake -DDEBUG=NO -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK_HOME/build/cmake/android.toolchain.cmake \
-DANDROID_NDK=$ANDROID_NDK_HOME \
-DANDROID_ABI=$ANDROID_CPU \
-DANDROID_TOOLCHAIN=clang \
-DANDROID_PLATFORM=android-21 \
-DANDROID_STL=gnustl_static \
../../../source
fi
sed -i 's/-lpthread/-pthread/' CMakeFiles/cli.dir/link.txt
sed -i 's/-lpthread/-pthread/' CMakeFiles/x265-shared.dir/link.txt
sed -i 's/-lpthread/-pthread/' CMakeFiles/x265-static.dir/link.txt
make
$ANDROID_NDK_HOME/toolchains/x86_64-4.9/prebuilt/linux-x86_64/bin/x86_64-linux-android-strip libx265.so
make DESTDIR=$(pwd)/build/$ANDROID_CPU install
| true
|
d9d510a2211e05f64bb6a42ac5852c66d467b98c
|
Shell
|
gibranfp/SMH-Topic-Discovery
|
/scripts/run_lda.sh
|
UTF-8
| 716
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Script to discover topics with LDA from the 20 Newsgroups and Reuters corpora.
#
# It receives directory for SMH experiments as argument
mkdir -p $1/20newsgroups
mkdir -p $1/reuters
for K in 200 400
do
echo "Discovering $K topics using LDA"
python python/discovery/lda_topic_discovery.py \
--number_of_topics K \
data/20newsgroups/20newsgroups20000.corpus \
data/20newsgroups/20newsgroups20000.vocab \
$1/20newsgroups/
echo "Discovering $K topics using LDA"
python python/discovery/lda_topic_discovery.py \
--number_of_topics K \
data/reuters/reuters100000.corpus \
data/reuters/reuters100000.vocab \
$1/reuters/
done
| true
|
326058a939163e868fbb6d99230f4f9f5764ebd9
|
Shell
|
NBISweden/agda
|
/agda/pconsc/templates/pconsc/predictall.sh
|
UTF-8
| 1,395
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
### Setup
module add python/2.7.6-build01
module add pcons-fold/140128
module add blast+/2.2.27-1 # for prepare_db
module add plmdca/2012-build01 # reminder that this workflow uses this plmDCA version.
scratch=$SNIC_TMP
agdaworkdir=$(pwd)
DEBUG=false
if [ "$DEBUG" = "true" ]; then
cp /scratch/local/test/intermediary_predictions.tgz .
tar xf intermediary_predictions.tgz '*.png' '*.out' '*.txt'
mv intermediary_predictions/* .
exit
fi
blastdb=$(dirname {{jackhmmerdb}})/$(basename {{jackhmmerdb}} .gz).blastdb.tar.gz
hhblitsdbname=$(basename {{hhblitsdb}} .tar.gz)
local_hhblitsdb=$scratch/$hhblitsdbname/$hhblitsdbname
local_jackhmmerdb=$scratch/$(basename {{jackhmmerdb}} .gz)
intermediariesname=$(basename {{intermediaries}} .tgz)
### Stage data
prepare_db {{hhblitsdb}} {{jackhmmerdb}} $blastdb
cp {{query}} $scratch
### Run prediction
pushd $scratch
predictAll_1.0.py -c $SLURM_JOB_CPUS_PER_NODE $local_hhblitsdb $local_jackhmmerdb {{query}} 2> $agdaworkdir/{{log}}
predictAll_2.0.py $local_hhblitsdb $local_jackhmmerdb {{query}} $SLURM_JOB_CPUS_PER_NODE 2>> $agdaworkdir/{{log}}
### Assemble results
cp *.png *.out $agdaworkdir
mkdir $intermediariesname
mv query.fasta.horiz *.psicov *.plmdca *.png *.ss *.ss2 *.rsa *.out $intermediariesname
cp $agdaworkdir/{{log}} $intermediariesname
tar czf $agdaworkdir/{{intermediaries}} $intermediariesname
popd
| true
|
4dde6f54f23e1d81df6967310b029ece08ac531f
|
Shell
|
coderall/2017_practice
|
/shell/bash.sh
|
UTF-8
| 272
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
# this file show you some way to use the bash directly
# No.1 check the script syntax
bash -x (some shell script)
# No.2 exec some shell
bash some_shell.sh
# this is very useful to exec some shell mannually with some mount file system without exec option
| true
|
a683cdf61e1816facc357aaaa23ac95a2c8f1ff5
|
Shell
|
jaruserickson/brilliant-bash
|
/.bashrc
|
UTF-8
| 3,379
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
### aliased sudo: so you can use custom aliases as sudo
alias sudo="sudo "
### weather: pass your city or zip code, and it returns the weather!
weather() { curl wttr.in/"$1"; }
### myip: prints out your IP address. Handy to check if your VPN is on!
alias myip="curl icanhazip.com"
### plz: re-run the last command as root.
alias plz="fc -l -1 | cut -d' ' -f2- | xargs sudo"
### add some color to your ls.
### OLD PS1="\[\033[36m\]\u\[\033[m\]@\[\033[36m\]\h:\[\033[36;1m\]\W\[\033[m\⚡️ \]"
emojis=(🐶 🐭 🐹 🐰 🐸 🐨 🐷 🐮 🐵 🐼 🐧 🐍 🐢 🐠 🐳 🐬 🍕 🍟 🇨🇦 ⚡️ 〽️)
emoji='`echo ${emojis[$RANDOM % 21]}`'
###[ $TERM_PROGRAM == "Apple_Terminal" ]
export PS1="\[\033[36m\]\u\[\033[m\]@\[\033[36m\]\h:\[\033[36;1m\]\W\[\033[m\] $emoji "
###export PS1="\[\033[30m\]\u\[\033[m\]@\[\033[90m\]\h:\[\033[90;1m\]\W\[\033[m\] $emoji "
export CLICOLOR=1
export LSCOLORS=gxBxhxDxfxhxhxhxhxcxcx
alias ls="ls -GFh"
### a more verbose, colorful ls: see almost everything!
alias lsm="ls -lAhG"
### up: cd .. when you're too lazy to use the spacebar
alias up="cd .."
### cls: a better clear with listed directories.
### DEPENDENCY - lsm (see above)
alias cls="clear;lsm"
### update: update all of your packages!
if [ ! -z "$(which pacman)" ]; then
alias update="sudo pacman -Syyu"
elif [ ! -z "$(which apt)" ]; then
alias update="sudo apt update && sudo apt upgrade"
elif [ ! -z "$(which apt-get)" ]; then
alias update ="sudo apt-get update && sudo apt-get upgrade"
elif [ ! -z "$(which dnf)" ]; then
alias update="sudo dnf upgrade"
elif [ ! -z "$(which yum)" ]; then
alias update="su -c 'yum update'"
fi
### ports: lists all ports open and which programs are using them
### TIP - add ports to your NOPASSWD list.
alias ports="sudo netstat -tulpn"
### space: gets space left on disk
alias space="df -h"
### incognito: no saving your command history!
incognito() {
case $1 in
start)
set +o history;;
stop)
set -o history;;
*)
echo -e "USAGE: incognito start - disable command history.
incognito stop - enable command history.";;
esac
}
### gpom: simplistic git push origin master .
alias gpom="git push origin master"
### restart: a quick refresh for your shell instance.
alias restart="source ~/.bashrc"
## my stuff
### cbase: change base of a number from base x to base y.
### convention: [ cbase z x y ] converts z from base x to base y.
function cbase() {
echo "obase=$3;$(($2#$1))" | bc
}
### convert mp4s to web optimized gifs
function gif() {
ffmpeg -i $1 -vf palettegen palette.png
ffmpeg -i $1 -i palette.png -lavfi paletteuse -r 24 -s 256x144 $2
rm palette.png
}
### ff: fast forward branch pull/push
function ff() {
echo "Fast Forward v0.1"
echo "<---------PULLING----------"
git pull upstream $1
echo ""
echo "----------PUSHING--------->"
git push origin $1
}
### react native stuff
alias native="react-native"
alias ios="react-native run-ios --simulator 'iPhone X'"
alias android="react-native run-android"
### python env stuff
alias leave="source deactivate"
alias py2="source activate py2"
alias py3="source activate py36"
### toronto
alias wttr="curl wttr.in/toronto"
alias tasks=top
### tasks: a command that makes sense for top
alias tasks=top
### bonk bonk
alias bonk="tput bel"
alias sbonk="say bonk"
| true
|
1d1de0399512231c1ddb65ea5caa65340a4970dd
|
Shell
|
gmauro/ena-cog-uk-wfs
|
/run_reporting.sh
|
UTF-8
| 2,063
| 3.234375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# bot-specific settings
BOT_TAG='bot-go-report'
BOT_RESPONSE='report-bot-ok'
DEST_TAG='cog-uk_report'
DEST_NAME_SUFFIX='Reporting'
VCF_DATA='Final (SnpEff-) annotated variants'
JOB_YML='reporting-job.yml'
_API_KEY=$API_KEY
# common for all bots
JOB_YML_DIR='job-yml-templates'
# start processing
WORKDIR=$BOT_TAG'_run_'$(date '+%s')
pwd
echo $WORKDIR
mkdir $WORKDIR
date
echo "Generating yaml file $WORKDIR/$JOB_YML"
#generate the job.yml needed by planemo run
cat "$JOB_YML_DIR/$JOB_YML" | python3 bioblend-scripts/find_datasets.py "$VCF_DATA" -g "https://usegalaxy.eu" -a $_API_KEY -t $BOT_TAG --collections-only -n 1 --from-template -o "$WORKDIR/$JOB_YML"
# TO DO: remove $BOT_TAG from history - as part of find_datasets.py or via separate script
if [ -s "$WORKDIR/$JOB_YML" ]; then
date
echo "Starting Planemo"
# if the formatted job.yml file contains data, we know a suitable source history was found
# otherwise we assume no history is ready for processing
SOURCE_HISTORY_ID=$(grep '#from_history_id:' "$WORKDIR/$JOB_YML" | cut -d ' ' -f 2-)
SOURCE_HISTORY_NAME=$(grep '#from_history_name:' "$WORKDIR/$JOB_YML" | cut -d ' ' -f 2-)
# wait for successful completion of workflow scheduling by planemo run, then tag the new history and retag the source history
#(while [ ! -s "$WORKDIR/run_info.txt" ]; do sleep 60; done; DEST_HISTORY_ID=$(grep -m1 -o 'histories/[^?]*' "$WORKDIR/run_info.txt" | cut -d / -f 2) && python bioblend-scripts/tag_history.py $DEST_HISTORY_ID -g "https://usegalaxy.eu" -a $API_KEY -t $DEST_TAG && python bioblend-scripts/tag_history.py $SOURCE_HISTORY_ID -g "https://usegalaxy.eu" -a $API_KEY -t $BOT_RESPONSE -r $BOT_TAG) &
# run the viral beacon WF
#planemo -v run 4e9e995d3ce690bf "$WORKDIR/$JOB_YML" --history_name "$SOURCE_HISTORY_NAME - $DEST_NAME_SUFFIX" --galaxy_url 'https://usegalaxy.eu' --galaxy_user_key $API_KEY --engine external_galaxy 2>&1 > /dev/null | grep -o 'GET /api/histories/[^?]*\?' > "$WORKDIR/run_info.txt"
fi
date
echo "Cleaning"
pwd
#rm -R $WORKDIR
| true
|
77144fa34b08fa52c1df96c0022b8feaf2b91dad
|
Shell
|
thapr0digy/scripts
|
/mkv2mp4.sh
|
UTF-8
| 337
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This script will change mkv files with AC3 audio to mp4 files with AAC audio
#
# Usage: mkv2mp4.sh <mkv_file.mkv> <mp4_file.mp4>
#
#
mkv_file=$1
mp4_file=$2
echo Changing mkv file to mp4....
# Input the mkv file and output to mp4_file
ffmpeg -i $mkv_file -vcodec copy -acodec aac -ab 256000 -sn -strict -2 $mp4_file
| true
|
47590f6fcfe2637d59114572fe78bdbd7663c547
|
Shell
|
yuichisuzuki0601/internal-document-search-service
|
/database/update-module.sh
|
UTF-8
| 280
| 2.59375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd `dirname $0`
MODULE_PATH1=../../ysd-util
cd ${MODULE_PATH1}
./mvnw clean install
cd -
MODULE_PATH2=../../ysd-db-migration
cd ${MODULE_PATH2}
./mvnw clean package
cd -
rm -f ysd-db-migration-*.jar
cp ${MODULE_PATH2}/target/ysd-db-migration-*.jar ./
| true
|
68e5b053c65e427de7ade24b16306d94fd50952b
|
Shell
|
wuhp/lfs7.5
|
/build-lfs-system/build-mpfr.sh
|
UTF-8
| 305
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
export SOURCE="mpfr-3.1.2"
[ -d ${SOURCE} ] & rm -rf ${SOURCE}
tar -Jxf ../sources/mpfr-3.1.2.tar.xz
cd ${SOURCE}
./configure --prefix=/usr \
--enable-thread-safe \
--docdir=/usr/share/doc/mpfr-3.1.2
make
#make check
make install
make html
make install-html
| true
|
e2d736cc4560dfd56a55117ffb596a518609c974
|
Shell
|
LEI/dot-sh
|
/bin/e
|
UTF-8
| 608
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
# Usage: e [<path>]
# Quick shortcut to the program specified by VISUAL or EDITOR
# e: opens the current directory in your editor
# e <path>: opens the specified directory in your editor
# set -e
# shellcheck disable=SC1090
. "$HOME"/bin/hasfunc
if ! hasfunc e; then
e() {
editor="${VISUAL:-$EDITOR}"
if [ -z "$editor" ]; then
printf >&2 '%s\n' "EDITOR is empty or not set"
exit 1
fi
if [ $# -ne 0 ]; then
$editor "$@"
else
$editor .
fi
unset editor
}
fi
# shellcheck disable=SC2128
if [ "$(basename -- "$0")" = e ]; then
e "$@"
fi
| true
|
cd356e200ad354559b8ca742329e11fb5897c348
|
Shell
|
MaxRzv/sick_lidar_localization
|
/test/scripts/clion.bash
|
UTF-8
| 1,304
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# init ros environment
source /opt/ros/melodic/setup.bash
if [ -f ../../../../devel/setup.bash ] ; then source ../../../../devel/setup.bash ; fi
if [ -f ../../../../install/setup.bash ] ; then source ../../../../install/setup.bash ; fi
# start edit resource-files
gedit ./run_simu.bash ./run_cola_examples.bash ./send_cola_examples.bash run_demo_simu.bash ./run_error_simu.bash ./run.bash &
# start clion
echo -e "Starting clion...\nNote in case of clion/cmake errors:"
echo -e " Click 'File' -> 'Reload Cmake Project'"
echo -e " cmake/clion: Project 'XXX' tried to find library '-lpthread' -> delete 'thread' from find_package(Boost REQUIRED COMPONENTS ...) in CMakeLists.txt"
echo -e " rm -rf ../../../.idea # removes all clion settings"
echo -e " rm -f ~/CMakeCache.txt"
echo -e " 'File' -> 'Settings' -> 'CMake' -> 'CMake options' : -DCATKIN_DEVEL_PREFIX=~/TASK013_PA0160_SIM_Localization/catkin_ws/devel"
echo -e " 'File' -> 'Settings' -> 'CMake' -> 'Generation path' : ../cmake-build-debug"
echo -e "Note: Do NOT install Hatchery plugin for launch.file support. It doesn't work but crashes clion. If done by accident:"
echo -e " 'File' -> 'Settings' -> 'Plugins' -> 'Installed' -> Deactivate Hatchery"
pushd ../../../..
~/Public/clion-2018.3.3/bin/clion.sh ./src &
popd
| true
|
f9a643ddbbfb60330d3ab8dcf8fe99c55fb34b8a
|
Shell
|
linanqiu/cs4705
|
/coding/hw4/run.sh
|
UTF-8
| 639
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "==== Compiling"
javac TaggerTest.java
echo "==== Running"
echo "== Estimated runtime: ~6min"
start=`date +%s`
java -Xms1024m -Xmx2048m Test
end=`date +%s`
runtime=$((end-start))
echo "== Runtime: $runtime"
echo "==== Question 4"
echo "== 4.1: No answer required"
echo "== 4.2: ibm1_devwords_ranking.txt"
echo "== 4.3: ibm1_alignment.txt"
echo "==== Question 5"
echo "== 5.1: No answer required"
echo "== 5.2: No answer required"
echo "== 5.3: ibm2_alignment.txt"
echo "==== Question 6"
echo "== 6.1 unscrambled.en"
echo "== Evaluating Question 6"
python eval_scramble.py unscrambled.en original.en
echo "==== Done"
| true
|
3d914f72afc03b49c29570a102027408addc5e8e
|
Shell
|
xueye9/study
|
/HadoopDoc/component/lookapp/look.sh
|
UTF-8
| 1,045
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
nmport=59842
host="mob616:50088"
host="platform30:8088"
version="v1"
accept="Accept: application/json"
resource="apps?state=RUNNING"
url="http://$host/ws/${version}/cluster/${resource}"
echo $url
dt=`date +"%Y%m%d%H%M%S"`
day=${dt::8}
dir=./data/$day
mkdir -p $dir
file=$dir/${dt}.json.log
curl -H "$accept" -X GET "$url" -o $file 2>/dev/null
#http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/ResourceManagerRest.html
#http://mob616:50088/ws/v1/cluster/metrics
#http://mob616:50088/ws/v1/cluster/scheduler
#http://mob616:50088/ws/v1/cluster/apps
#http://mob616:50088/ws/v1/cluster/apps/{appid}
#http://mob616:50088/ws/v1/cluster/apps?state=RUNNING
#采集node manager的container的列表
dt=`date +"%Y%m%d%H%M"`
nodes=`cat ${HADOOP_CONF_DIR}/slaves`
for node in $nodes
do
echo $node
dir=./nodedata/$day/$node
mkdir -p $dir
file=$dir/${dt}.log
url="http://${node}:${nmport}/ws/${version}/node/containers"
echo $url
curl -H "$accept" -X GET "$url" -o $file 2>/dev/null
done
| true
|
7b9193b923d9a34f02848dc542cfaa0bfe6e8583
|
Shell
|
just-paja/optimics-webpack-hello
|
/install.sh
|
UTF-8
| 133
| 2.75
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
for example in $(find examples -mindepth 1 -maxdepth 1 -type d); do
cd $example
npm install
cd ../..
done
| true
|
d60766d5f60e2d65fa8996d8eb7f28f86a9fe67e
|
Shell
|
B9mkr/bashScripts
|
/comInfo.sh
|
UTF-8
| 3,141
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# echo "in - information about my scripts ex. [in]"
echo "ll - ls -alF ex. [ll]"
echo "la - ls -A ex. [la]"
echo "l - ls -CF ex. [l]"
echo "t - ls -R ex. [t]"
echo "tl - ls -Rl ex. [tl]"
echo ".. - cd ../ ex. [..]"
echo "... - cd ../../ ex. [...]"
echo ".... - cd ../../../ ex. [....]"
echo "..... - cd ../../../../ ex. [.....]"
echo "cy - copy from file ex. [cy fileName]"
echo "pe - paste from file ex. [pe fileName]"
echo "pdf - open pdf file ex. [pdf file.pdf]"
echo "myupdate - max update ex. [myupdate]"
echo "tr - timer ex. [tr 5m]"
echo "pin - ping google ex. [pin 10 30s]"
echo "c - clear screen ex. [c]"
echo "psmem10 - report memory ex. [psmem10]"
echo "pscpu10 - report process ex. [pscpu10]"
echo "rpass - generate password ex. [rpass 8]"
# echo "tmp - see temperature in Lublin ex. [tmp]"
echo "untar - extract from .tar files ex. [untar file.tar]"
echo "extract - extract archive file ex. [extract file.rar]"
echo "myip - see my ip address ex. [myip]"
echo "m - simple calculator ex. [m 1+3-3*4]"
echo "sw - swopwatch ex. [sw]"
echo "q - exit terminal ex. [q]"
echo "eth - edit text to html on clipboard ex. [eth]"
echo "eht - edit html to text on clipboard ex. [eht]"
echo "ttr - text in russian on clipboard ex. [ttr]"
echo "coda - Copy from 'date' to clipboard ex. [data]"
echo "gscm - Generate script chapter my ex. [gscm nameDir 1 34]"
echo "gscme - Generate script chapter my edit ex. [gscme]"
echo "ec - edit chapters ex. [ec directory/*.html]"
echo "ece - edit script ex. [ece]"
echo "hmw - how mutch words in clipboard ex. [hmw]"
echo "ruFirefox - translate from clipboard to Firefox ex. [ruFirefox]"
echo "enterChapter - generate chapter from clipboard ex. [enterChapter directory lastChapter thisChapter]"
echo "eenterChapter - edit source script ex. [eenterChapter]"
echo "enterChapterN - generate chapter from clipboard ex. [eenterChapterN directory lastChapter thisChapter]"
echo "eenterChapterN - edit source script ex. [eenterChapterN]"
echo "tocGenerate - ex. [tocGenerate url]"
echo "etocGenerate - ex. [etocGenerate]"
| true
|
3a872854614b6055563d9d6a7cb8a4f9f9419b67
|
Shell
|
fika/Slack-SL-Bot
|
/test.sh
|
UTF-8
| 2,017
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "" > /var/log/apache2/error.log
OLDCOUNT=0
dmy=$(date "+%a %b %d")
while :
do
NEWS=$(grep -P '\[\K[^\]]+' /var/log/apache2/error.log | grep "$dmy" | grep info | grep TxoLNNB1IIM7i3R2N2IghTtr)
COUNT=$(echo "$NEWS" | wc -l)
DIFF=$((COUNT-OLDCOUNT))
if [ $DIFF -gt 0 ]
then
traveltype=$(echo "$NEWS" | grep -oP '\&trigger_word=[^&]+' | grep -oP "\%21.*" | sed -e 's/%21//' | tail -n $DIFF)
if [ "$traveltype" = "buss" ]
then
siteid=$(/root/uppslag.pl $(echo "$NEWS" | grep -oP '\&text=[^&]+' | grep -oP "\+.*" | sed -e 's/+//' | tail -n $DIFF))
output=$(/root/buss.pl $siteid)
IFS=$'.'
for i in $output; do
echo $i
/root/post.sh $i
done
unset IFS
elif [ "$traveltype" = "tub" ]
then
siteid=$(/root/uppslag.pl $(echo "$NEWS" | grep -oP '\&text=[^&]+' | grep -oP "\+.*" | sed -e 's/+//' | tail -n $DIFF))
output=$(/root/tåg.pl $siteid)
IFS=$'.'
for i in $output; do
echo $i
/root/post.sh $i
done
unset IFS
elif [ "$traveltype" = "pendel" ]
then
siteid=$(/root/uppslag.pl $(echo "$NEWS" | grep -oP '\&text=[^&]+' | grep -oP "\+.*" | sed -e 's/+//' | tail -n $DIFF))
output=$(/root/pendel.pl $siteid)
IFS=$'.'
for i in $output; do
echo $i
/root/post.sh $i
done
unset IFS
fi
fi
OLDCOUNT=$COUNT
done
| true
|
2eb3537f6af2a2c1b884ebacb86d33881cbcd460
|
Shell
|
h4ckl4bm3/ravstack
|
/share/clean.sh
|
UTF-8
| 442
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
for home in /root /home/*; do
sed -i '/ovirt\|ravstack/!d' $home/.ssh/authorized_keys
rm -f $home/.bash_history
done
rm -f /var/lib/cloud/instance
rm -rf /var/lib/cloud/instances/*
find /var/lib/cloud -type f | xargs rm -f
rm -f /var/lib/dhclient/*.lease
rm -f /var/run/ravstack/*
cp -f /dev/null /var/run/utmp
cp -f /dev/null /var/log/btmp
cp -f /dev/null /var/log/wtmp
which updatedb >/dev/null 2>&1 && updatedb
sync
| true
|
b763422eaba539f0783de860507c6450f5d9ee77
|
Shell
|
JeepGuy/bash_scripts
|
/BASH tips/functions.sh
|
UTF-8
| 3,954
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This script demonstrate functions.
# ------------------------------------------------------------------------------------
# Set Global Variables first so they are always defined before being called.
#------------- Sample below
GLOBAL_VAR1="one"
GLOBAL_VAR2="two"
# Create Functions second - so they are read into the current bash session second
# If you create a gloval variable by ommitting the local keyword...
# that variable is not available outside the function until that function is called and executed.
# it sia best practice to use only local variable in a funciton
# that way you won't accidentially use the same name for a variable elsewhere and
# create a hard to find bug in your program.
#------------- Two Samples below
function function_one {
local LOCAL_VAR1="one"
# <Replace with function code.>
echo "This script called the sample function (function_one) which does nothing (except print this line)."
}
# call the function ------- just write the name of the funciton like a linux scommand
# function_one
# different way.
function_two() {
local LOCAL_VAR2="two"
# <Replace with function code.>
echo "This script called the sample function (function_two) which does nothing (except print this line)."
}
#call the function two
# function_two
#Prefferred way...
# The log function.
#
# log() {
# # The local variable is scoped to the function only...
# local VERBOSE="${1}"
# shift # kicks out the first argument and re-numbers all the remaing arguments.
# local MESSAGE="${@}"
# # @ expands to all positional parameters starting from 1 (not 0)
# # $0 is still that name of the shell script itself... not the function...
# # echo '${MESSAGE}' this does not expand the variable so it is wrong.
# if [[ "${VERBOSE}" = 'true' ]]
# then
# echo "${MESSAGE}"
# fi
# }
#
# # Body of the script
# # ------------------
# # call the function
# log 'true' 'Hello!'
# # ########## no longer needed... VERBOSE='true'
# # or... using a global variable... Which is often called evil or dangerous.
# VERBOSITY='true'
# echo
# log "${VERBOSITY}" 'This is fun!'
# echo
log() {
# Function sends a message syslog and to STDOUT if verbose is true.
local MESSAGE="${@}"
if [[ "${VERBOSE}" = 'true' ]]
then
echo "${MESSAGE}"
fi
logger -t luser-demo10.sh "${MESSAGE}"
}
backup_file() {
# Back up script for a file Returns non-zero statu on error.
# Files in /tmp will not survive a reboot... file in tmp are cleared on boot and cleared more often n a running system
# centos 7 - default /tmp are deleted every 10 and /var/tmp are cleared every 30 days.
# AND /var/tmp will survive a reboot.
local FILE="${1}"
# Make sure the file exists.
if [[ "${FILE}" ]]
then
local BACKUP_FILE="/var/tmp/$(basename ${FILE}).$(date +%F-%N)"
# basename command strips out the path and leaves the filename.
log "Backing up ${FILE} to ${BACKUP_FILE}."
# The exit status of the function will be the exit status of the cp command.
cp -p ${FILE} ${BACKUP_FILE}
# -p stands for preserves = preserves the files mode, ownership, and timestamps
else
# The files doesn't exit, so return a non-zero exit status.
return 1 # if you use the exit keyword/command it will kill the script
fi
}
# Body of the script
# ------------------
# call the function
readonly VERBOSE='true' # readonly is the shell version of a constant variable.
#### This eliminates the danger of a global variable being reset - if appropriate for your script.
# optionally syntax... VERBOSE='true' readonly
# type -a logger .... command line utility executiabl;e so use the man page.
# -t option is the key one for this case.
echo
log 'Hello!'
log 'This is fun!'
echo
backup_file '/etc/passwd'
# Make a decision based on the exit status of the function.
if [[ "${?}" -eq '0' ]]
then
log 'File Back up succeded'
else
log 'File backup failed!'
exit 1
fi
| true
|
01cb7376a958eb6dd77da268151053d3a4e2e7f3
|
Shell
|
fivetwentysix/brigade-matchmaker
|
/test/session/test_successful_create.sh
|
UTF-8
| 616
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "Test the create user and session"
EPOCH=`date +%s`
echo "TestID=$EPOCH, Using cookies-$EPOCH.txt"
# move to the directory of the script
BINDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $BINDIR
URL="http://localhost:5465/api/user/create_and_login"
echo "Calling $URL"
wget -qO- \
--save-cookies cookies-$EPOCH.txt \
--keep-session-cookies \
--post-data "email=designforsf#$EPOCH@gmail.com&username=designforsf#$EPOCH@gmail.com&password=$EPOCH" \
$URL
sleep 1
echo
URL="http://localhost:5465/api/user/session"
echo "Calling $URL"
wget -qO- \
--load-cookies cookies-$EPOCH.txt \
$URL
echo
echo "Done!"
| true
|
8ef6d6a9661f869ca3c39cf8a49a7d64d5553b82
|
Shell
|
06094051/Scripts-Utils
|
/tmp_read.sh
|
UTF-8
| 144
| 2.546875
| 3
|
[] |
no_license
|
for line in `cat '/root/Scripts/repo_name'`
do
IFS=","
arr=($line)
for str in ${arr[@]}
do
echo 'ii:'$str
done
echo 'jj:'${arr[1]}
done
| true
|
7c9b4c72ba5901add6ead0a87e97d59f826f0754
|
Shell
|
valery-zhurbenko/vlc_record
|
/create_deb.sh
|
UTF-8
| 5,018
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
#-----------------------------------------------------------------------
# Finally I found the time (and the need) to create a more or less
# good loking deb package!
# Many thanks to Ilja for the first steps already done!
# (c)2014 By Jo2003 All rights reserved!
#-----------------------------------------------------------------------
BIN_NAME=${1}
PACKAGE=$(echo -n ${BIN_NAME} | sed 's/_/-/')
OFF_NAME=${2}
VER="${3}-$(date +%Y%m%d)"
SERVICE=${4}
ARCH=${5}
QTVER=${6}
BUILD_FOLDER="$(pwd)/packages/${BIN_NAME}_${VER}_${ARCH}"
usage () {
echo "Usage: ${0} [binary name] [official name] [version] [Service] [arch] [qt major version]"
}
create_folders() {
rm -rf ${BUILD_FOLDER}
mkdir -p "${BUILD_FOLDER}/usr/bin"
mkdir -p "${BUILD_FOLDER}/usr/share/${BIN_NAME}/language"
mkdir -p "${BUILD_FOLDER}/usr/share/${BIN_NAME}/modules"
mkdir -p "${BUILD_FOLDER}/usr/share/${BIN_NAME}/doc"
mkdir -p "${BUILD_FOLDER}/usr/share/${BIN_NAME}/resources"
mkdir -p "${BUILD_FOLDER}/usr/share/doc/${PACKAGE}"
mkdir -p "${BUILD_FOLDER}/usr/share/man/man7"
mkdir -p "${BUILD_FOLDER}/usr/share/applications"
mkdir -p "${BUILD_FOLDER}/DEBIAN"
}
copy_content() {
strip -s release/${BIN_NAME}
cp -f release/${BIN_NAME} "${BUILD_FOLDER}/usr/bin/"
cp -f *.qm "${BUILD_FOLDER}/usr/share/${BIN_NAME}/language/"
cp -f eula_*.txt "${BUILD_FOLDER}/usr/share/${BIN_NAME}/language/"
cp -f modules/*.mod "${BUILD_FOLDER}/usr/share/${BIN_NAME}/modules/"
cp -f qhc/${OFF_NAME}/* "${BUILD_FOLDER}/usr/share/${BIN_NAME}/doc/"
cp -f resources/${BIN_NAME}.png "${BUILD_FOLDER}/usr/share/${BIN_NAME}/"
}
create_desktop_file() {
cat << EOF > "${BUILD_FOLDER}/usr/share/applications/${OFF_NAME}.desktop"
[Desktop Entry]
Name=${OFF_NAME}
Comment=A tool to watch and record IPTV program streams from ${SERVICE}.
Exec=/usr/bin/${BIN_NAME}
Terminal=false
Type=Application
Icon=/usr/share/${BIN_NAME}/${BIN_NAME}.png
Categories=AudioVideo;Player;Recorder
EOF
}
create_control_file() {
cat << EOF > "${BUILD_FOLDER}/DEBIAN/control"
Package: ${PACKAGE}
Version: ${VER}
Section: video
Priority: extra
Architecture: ${ARCH}
Installed-Size: $(($(du -b --max-depth=0 ${BUILD_FOLDER}/usr|gawk '{print $1}') / 1024))
EOF
if [ "${QTVER}" == "4" ] ; then
cat << EOF >> "${BUILD_FOLDER}/DEBIAN/control"
Depends: gtk2-engines-pixbuf (>= 2.24.10), libqt4-help (>= 4:4.8.1), libqt4-network (>= 4:4.8.1), libqt4-sql-sqlite (>= 4:4.8.1), libqt4-xml (>= 4:4.8.1), libqtcore4 (>= 4:4.8.1), libqtgui4 (>= 4:4.8.1), libvlc5 (>= 2.0.3), vlc (>= 2.0.3), libc6 (>= 2.13)
EOF
else
cat << EOF >> "${BUILD_FOLDER}/DEBIAN/control"
Depends: gtk2-engines-pixbuf (>= 2.24.10), libqt5help5 (>= 5.0.2), libqt5network5 (>= 5.0.2), libqt5sql5-sqlite (>= 5.0.2), libqt5xml5 (>= 5.0.2), libqt5core5a (>= 5.0.2), libqt5gui5 (>= 5.0.2), libvlc5 (>= 2.0.8), vlc (>= 2.0.8), libc6 (>= 2.15)
EOF
fi
cat << EOF >> "${BUILD_FOLDER}/DEBIAN/control"
Maintainer: Jo2003 <olenka.joerg@gmail.com>
Description: IPTV program stream player for ${SERVICE}
It uses the Qt framework as well as libVLC from VLC player.
Please note: You need to buy a subscription from ${SERVICE} to find this
program to be useful!
Homepage: http://jo2003.github.io/vlc_record/
EOF
}
create_changelog() {
cat << EOF | gzip -9 -c >"${BUILD_FOLDER}/usr/share/doc/${PACKAGE}/changelog.Debian.gz"
${PACKAGE} (${VER}) precise; urgency=minor
* New release, makes us really happy!
-- Jo2003 <olenka.joerg@gmail.com> $(date -R)
EOF
sed -n -e 's/^|\(.*\)$/\1/p' version_info.h | gzip -9 -c >"${BUILD_FOLDER}/usr/share/doc/${PACKAGE}/changelog.gz"
}
create_deb() {
dpkg-deb --build "${BUILD_FOLDER}"
}
create_man_page() {
cat << EOF | gzip -9 -c > "${BUILD_FOLDER}/usr/share/man/man7/${BIN_NAME}.7.gz"
.\" Manpage for ${BIN_NAME}.
.\" Contact olenka.joerg@gmail.com to correct errors or typos.
.TH man 7 "$(date -R)" "1.0" "${BIN_NAME} man page"
.SH NAME
${BIN_NAME} \- starts ${OFF_NAME} in GUI mode
.SH SYNOPSIS
${BIN_NAME}
.SH DESCRIPTION
For me it doesn't look like this program needs a man page. Nevertheless lintian wants one - so here it is. If you need help with the program use the nice looking in program help.
.SH OPTIONS
No options so far.
.SH BUGS
There are for sure bugs. If you find one please contact the author!
.SH AUTHOR
Jo2003 (olenka.joerg@gmail.com)
EOF
}
create_copyright_file() {
cat << EOF > "${BUILD_FOLDER}/usr/share/doc/${PACKAGE}/copyright"
${OFF_NAME}
Copyright 2010-$(date +%Y) Jörg Neubert (olenka.joerg@gmail.com)
All rights reserved!
This program uses the libVLC from Videolans VLC-Player (http://www.videolan.org)
and the Qt framework (c) by Trolltech, Nokia, Digia, Qt-Project, who knows ...
${OFF_NAME} is released under the GPL 3.
A copy of this license can be found here: /usr/share/common-licenses/GPL-3 .
Many thanks to Ilja(s), Victor, Sascha, Dima!
For Olenka!
EOF
}
if [ ${#} -lt 6 ] ; then
usage
exit 1
fi
create_folders
copy_content
create_desktop_file
create_changelog
create_man_page
create_copyright_file
create_control_file
create_deb
| true
|
2af39b7620b516cda1c24e54f68f460133f08af8
|
Shell
|
AndreyAgafonov/OTUS
|
/lesson13/ansible/provision.sh
|
UTF-8
| 1,109
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
#===================VARIBLES========================
vagrant_dir="${vagrant_work_dir:-}" # /vagrant/'MashineName'
work_dir= "/ansible"
#===================Понеслася========================
#Устанавливаем ansible
yum install -y epel-release
yum install -y ansible mc
#sudo easy_install pip
#pip install docker docker-py docker-compose
#делаем правильные права для приватного ключа
chmod 0600 /vagrant/ansible.pem
#создаем рабочую директорию
mkdir $work_dir
#Наполняем рабочую директорию
cp -R $vagrant_dir $work_dir
# меняем рабочий каталог
cd $work_dir # Накуя только
# Тест зависимотей ролей - docker <- prepare
cd /vagrant/ansible/roles
# Устанавливаем Kibana
ansible-playbook install_kibana.yml
# Устанавливаем Nginx
ansible-playbook install_nginx.yml
# Устанавливаем сервер удаленного прияема логов
ansible-playbook install_log.yml
| true
|
746e9ffd429a9a333e7ec4c6ad68d51ed87a55aa
|
Shell
|
phoenix-rtos/phoenix-rtos-project
|
/docker-build.sh
|
UTF-8
| 1,338
| 3.71875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
DOCKER_IMG_NAME=phoenixrtos/build
DOCKER_USER="$(id -u):$(id -g)"
TMPFS_OVERLAY=()
# FIXME: due to building as root in docker the tmpfs approach doesn't work
# if [ "$(uname)" = "Darwin" ]; then
# # I/O operations on bind mounts in Darwin are painfully slow - use tmpfs for intermediate build artifacts
# chmod 777 "_build" # fix against invalid tmpfs permissions
# TMPFS_OVERLAY=("--tmpfs" "/src/_build:exec")
# fi
if [ "$#" -eq 1 ] && [ "$1" = "bash" ]; then
# run interactive shell - using ROOT user
exec docker run -it --rm -v "$(pwd):/src" -w /src -e TARGET -e SYSPAGE -e CONSOLE --entrypoint bash $DOCKER_IMG_NAME
else
# FIXME: run build - use our own UID/GID to create files with correct owner
#exec docker run -it --user "$DOCKER_USER" --rm -v "$(pwd):/src:delegated" -w /src "${TMPFS_OVERLAY[@]}" -e TARGET -e SYSPAGE -e CONSOLE $DOCKER_IMG_NAME "$@"
# FOR NOW: run build as root to be able to overwrite files installed in toolchain
docker run -it --rm -v "$(pwd):/src:delegated" -w /src "${TMPFS_OVERLAY[@]}" -e TARGET -e SYSPAGE -e CONSOLE $DOCKER_IMG_NAME "$@"
# FIX file ownership in "_build"
docker run -it --rm -v "$(pwd):/src:delegated" -w /src "${TMPFS_OVERLAY[@]}" --entrypoint bash $DOCKER_IMG_NAME -c "chown -R $DOCKER_USER _build/ _fs/$TARGET _boot"
fi
| true
|
fdc78ba1492e1f1419f1ab553f5831456f63f6ab
|
Shell
|
nickwhitman1993/SAAF-MFEM
|
/buildScripts/qiLibPar.sh
|
UTF-8
| 432
| 3.84375
| 4
|
[] |
no_license
|
#! /bin/sh
set -e
set -o pipefail
usage ()
{
echo
echo Useage:
echo " $0 compiler_options_file"
echo
}
if [[ $# -ne 1 ]]; then
echo
echo ERROR: Need to specify file with compiler options.
usage
else
if [ -e $1 ]; then
source ./$1
source ./versions.sh
source ./QuickInstallLibOMPMPI.sh
else
echo
echo ERROR: compiler options file \'$1\' not found.
usage
fi
fi
| true
|
d1e46b309fdb17259283963720fe27a5677fa77f
|
Shell
|
cheeseit/RP2
|
/code/shell/opennebula_run.sh
|
UTF-8
| 840
| 3.84375
| 4
|
[] |
no_license
|
#! /bin/bash
#this script create multiple instances of the same image
# looks at the ids and does a git pull for all of them
# and does a git pull. Then it will also create a host file
# and scp it to all all the hosts.
MACHINES=""
if [[ -f hosts ]]
then
rm hosts
fi
#get options still needs to be filled in
while getopts "m:f:" opt; do
case $opt in
m)
# The reservation number for the calculations
MACHINES="-m $OPTARG"
;;
f) # The template file for the virtual machine
FILE="$OPTARG"
;;
esac
done
# make multiple machines and get the ids
ID=$(onevm create $MACHINES $FILE | cut -d ":" -f 2)
echo $ID
# get ipaddress of each machine
for i in $ID
do
$(onevm show $i | grep "IP" | grep -oP '\d.+\d' | gawk '{ print "root@" $0 }' >> hosts)
done
| true
|
b7fd934b32f4e6968926bd9791da668946f88413
|
Shell
|
unitial/snippet
|
/bash/word-cnt.sh
|
UTF-8
| 299
| 3.21875
| 3
|
[] |
no_license
|
#/bin/sh
tr -s ' ' '\n' < words.txt | tr '[A-Z]' '[a-z]' | sed 's/[;:,.?]//' > step1.txt
cat step1.txt | sort | uniq > step2.txt
echo "\c" > step3.txt
for word in `cat step2.txt`; do
echo "$word \c" >> step3.txt
grep -w $word step1.txt | wc -l | sed 's/ *//'>> step3.txt
done
cat step3.txt
| true
|
24dad0e163fba649eafbaa94acdc5c2b93a3e8e1
|
Shell
|
ifapmzadu6/docker-go-sql-memcached
|
/run.sh
|
UTF-8
| 495
| 2.703125
| 3
|
[
"MIT"
] |
permissive
|
# このディレクトリに移動
cd `dirname $0`
echo $MYSQL_ADDR
echo $MYSQL_PASSWORD
# Delete all containers
docker rm $(docker ps -a -q)
# Delete all images
docker rmi $(docker images -q)
# ビルド
docker build --no-cache --rm -t test:0.1.0 .
# 前回起動中のものがあれば削除
docker stop hoppin
docker rm hoppin
# リンクして実行
docker run -d -p 443:8080 -e MYSQL_ADDR=$MYSQL_ADDR -e MYSQL_PASSWORD=$MYSQL_PASSWORD -v $(pwd)/ssl:/ssl:ro --name hoppin test:0.1.0
| true
|
936b1372d70fe792be53a37d30803d6acd94c9d5
|
Shell
|
jhx0/deb-mkdesk
|
/deb-mkdesk
|
UTF-8
| 7,683
| 3.53125
| 4
|
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
##
# Variables
##
PKG_MANAGER='apt' # change this if you need to
USERNAME='x'
HOSTNAME="`hostname -f`" # change this only if you need something special
DOTFILES_DIR='dots'
DOTFILES_URL='https://github.com/jhx0/dotfiles.git'
##
# Features
##
SSD="YES" # is a SSD used?
VBOX="YES" # install VirtualBox?
VSCODE="YES" # install VS Code?
CHROME="YES" # install Google Chrome?
DOCKER="YES" # install Docker?
SALT="YES" # install Salt Master?
NVIDIA="NO" # install Nvidia video driver?
INTEL="NO" # install Intel video driver?
##
# External packages
##
PAPER_URL='https://snwh.org/paper/download.php?owner=snwh&ppa=ppa&pkg=paper-icon-theme,18.04'
VSCODE_URL='https://go.microsoft.com/fwlink/?LinkID=760868'
CHROME_URL='https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb'
##
# External repos
##
VBOX_REPO='deb [arch=amd64] https://download.virtualbox.org/virtualbox/debian buster contrib'
##
# Sysctl config
##
SYSCTL_CONF="net.ipv4.conf.all.rp_filter=1
net.ipv4.tcp_syncookies=1
net.ipv4.conf.all.accept_redirects = 0
net.ipv6.conf.all.accept_redirects = 0
net.ipv4.conf.all.send_redirects = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv6.conf.all.accept_source_route = 0
net.ipv4.conf.all.log_martians = 1
kernel.sysrq=1
vm.swappiness=10"
##
# extra packages
##
EXTRAS='pcmanfm lxappearance'
##
# Color definitions
##
RED="\033[1;31m"
GREEN="\033[1;32m"
RESET="\033[0m"
##
# Helper functions
##
info() {
echo -e "[${RED}deb-mkdesk${RESET}] ${GREEN}::${RESET} $1"
}
##
# Main functions
##
#
# install/setup sudo
#
setup_sudo() {
info "Setup sudo"
su -c "$PKG_MANAGER install sudo && echo \"${USERNAME} ALL=(ALL:ALL) NOPASSWD: ALL\" > /etc/sudoers.d/x"
}
#
# install git
#
setup_git() {
info "Install git"
sudo $PKG_MANAGER install git -y
}
#
# clone dotfiles and install them
#
setup_dotfiles() {
info "Clone and install dotfiles"
# clean home directory before deploying the dotfiles
rm -rf /home/${USERNAME}/.*
git clone $DOTFILES_URL $DOTFILES_DIR &>/dev/null
cd dots && ./install && cd ~
}
#
# install / setup postfix
#
setup_postfix() {
info "Installing and configuring Postfix"
sudo debconf-set-selections <<< "postfix postfix/mailname string $HOSTNAME"
sudo debconf-set-selections <<< "postfix postfix/main_mailer_type string 'Local only'"
sudo $PKG_MANAGER install postfix -y
}
#
# install all packages
#
install_pkgs() {
info "Installing all packages"
# setup wireshark-common before package install
sudo debconf-set-selections <<< "wireshark-common wireshark-common/install-setuid boolean true"
sudo $PKG_MANAGER install $(cat ${DOTFILES_DIR}/.debian/debian-pkg | grep -v "^#") -y
}
#
# setup ntp sync
#
setup_ntp() {
info "Setup NTP Synchronization"
sudo timedatectl set-ntp true
echo -e "[Time]\nNTP=de.pool.ntp.org\n" | sudo tee /etc/systemd/timesyncd.conf >/dev/null
sudo systemctl restart systemd-timesyncd
}
#
# setup fstrim, if needed
#
setup_fstrim() {
if [ "$SSD" == "YES" ]; then
info "Setup Filesystem Trim"
sudo systemctl enable fstrim.timer
fi
}
#
# change grub settings
#
setup_grub() {
info "Setup Grub"
sudo sed -i 's/GRUB_CMDLINE_LINUX_DEFAULT\=\"quiet\"/GRUB_CMDLINE_LINUX_DEFAULT\=\"\"/g' /etc/default/grub
sudo chmod ugo-x /etc/grub.d/05_debian_theme
sudo update-grub2 &>/dev/null
}
#
# setup group membership
#
setup_groups() {
info "Adding user to different groups"
sudo gpasswd -a $USERNAME adm
sudo gpasswd -a $USERNAME wireshark
sudo gpasswd -a $USERNAME kvm
}
#
# install virtualbox
#
install_virtualbox() {
if [ "$VBOX" == "YES" ]; then
info "Installing VirtualBox"
sudo sh -c "echo \"$VBOX_REPO\" > /etc/apt/sources.list.d/vbox.list"
/bin/wget -q https://www.virtualbox.org/download/oracle_vbox_2016.asc -O- | sudo apt-key add -
sudo $PKG_MANAGER update && sudo $PKG_MANAGER install virtualbox-6.1 -y
sudo $PKG_MANAGER install -f -y
sudo gpasswd -a $USERNAME vboxusers
fi
}
#
# setup sysctl.conf
#
setup_sysctl() {
info "Setup Sysctl"
sudo sh -c "echo \"$SYSCTL_CONF\" > /etc/sysctl.d/custom.conf"
}
#
# setup noclear on tty1
#
setup_noclear() {
info "Setup noclear on getty (TTY1)"
sudo mkdir -p /etc/systemd/system/getty@tty1.service.d/
sudo sh -c "echo \"[Service]\nTTYVTDisallocate=no\n\" > /etc/systemd/system/getty@tty1.service.d/noclear.conf"
}
#
# install extras
#
install_extras() {
info "Installing extras"
sudo $PKG_MANAGER install --no-install-recommends $EXTRAS -y
}
#
# install visual studio code
#
install_vscode() {
if [ "$VSCODE" == "YES" ]; then
info "Installing Visual Studio Code"
wget -q $VSCODE_URL -P /tmp
sudo dpkg -i /tmp/code*.deb
sudo $PKG_MANAGER install -f -y
sudo rm -rf /tmp/code*.deb
fi
}
#
# configure fonts
#
setup_fonts() {
info "Configuring fonts"
sudo ln -sf /usr/share/fontconfig/conf.avail/10-hinting-slight.conf /etc/fonts/conf.d/
sudo ln -sf /usr/share/fontconfig/conf.avail/10-sub-pixel-rgb.conf /etc/fonts/conf.d/
}
#
# install google chrome
#
install_chrome() {
if [ "$CHROME" == "YES" ]; then
info "Installing Google Chrome"
wget -q $CHROME_URL -P /tmp
sudo dpkg -i /tmp/google*.deb
sudo $PKG_MANAGER install -f -y
sudo rm -rf /tmp/google*.deb
fi
}
install_docker() {
if [ "$DOCKER" == "YES" ]; then
info "Installing Docker"
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
sudo sh -c "echo \"deb [arch=amd64] https://download.docker.com/linux/debian buster stable\" > /etc/apt/sources.list.d/docker.list"
sudo $PKG_MANAGER update
sudo $PKG_MANAGER install docker-ce docker-ce-cli containerd.io -y
sudo gpasswd -a $USERNAME docker
fi
}
install_salt() {
if [ "$SALT" == "YES" ]; then
info "Installing Salt Master"
wget -O - https://repo.saltstack.com/py3/debian/10/amd64/latest/SALTSTACK-GPG-KEY.pub | sudo apt-key add -
sudo sh -c "echo \"deb http://repo.saltstack.com/py3/debian/10/amd64/latest buster main\" > /etc/apt/sources.list.d/saltstack.list"
sudo $PKG_MANAGER update
sudo $PKG_MANAGER install salt-master -y
fi
}
setup_firewall() {
info "Setting up firewall"
# make sure nftables are installed
sudo $PKG_MANAGER install nftables -y
sudo sh -c "cat<<EOT >/etc/nftables.conf
#!/usr/sbin/nft -f
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0;
meta iif lo accept;
tcp dport 22 accept;
ct state related,established accept;
reject with icmp type port-unreachable;;
}
chain forward {
type filter hook forward priority 0;
policy drop;
}
chain output {
type filter hook output priority 0;
policy accept;
}
}
EOT
"
sudo nft -f /etc/nftables.conf
}
change_shell() {
info "Changing default shell to ZSH. Enter your password: "
chsh -s /usr/bin/zsh
}
install_nvidia() {
if [ "$NVIDIA" == "YES" ]; then
info "Installing Nvidia video driver"
sudo $PKG_MANAGER nvidia-driver nvidia-settings -y
fi
}
install_intel() {
if [ "$INTEL" == "YES" ]; then
info "Installing Intel video driver"
sudo $PKG_MANAGER xserver-xorg-video-intel -y
fi
}
main() {
info "Only execute this script once after a fresh install!"
setup_sudo
setup_git
setup_dotfiles
setup_postfix
install_pkgs
setup_ntp
setup_fstrim
setup_grub
setup_groups
install_virtualbox
setup_sysctl
setup_noclear
install_polybar
install_extras
install_vscode
setup_fonts
install_chrome
install_docker
install_salt
install_nvidia
install_intel
setup_firewall
change_shell
info "Done. You can now reboot your system. Enjoy!"
exit 0
}
main
| true
|
8bfe2320edb33ac796d8102b44f9cdfb3eee5953
|
Shell
|
vincent040/laboratory
|
/shell/utility/array.sh
|
UTF-8
| 162
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
value="one two three four"
#a=($value)
a=(one two three four)
i=0
while [ $i -le ${#a} ]
do
echo "a[$i]=${a[$i]}"
i=$((i + 1))
done
| true
|
3e90d0e331faa5249b20d274089a4e6876a4525d
|
Shell
|
rjw41x/pwssec
|
/rjw/download_uaa.sh
|
UTF-8
| 4,764
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$DEBUG_SCRIPT" = "True" ]
then
set -x
fi
PROCESS_LOG=~/logs/download_files
LOG_FILE=~/logs/download_uaa.out
clean_up() {
rm -f /tmp/$$local_files /tmp/$$aws_files > /dev/null 2>&1
}
usage() {
echo "$0: /local/Directory date_string"
echo "/local/Directory must be writable"
echo "date_string = YYYY.MM.DD"
echo "Issue is $1"
clean_up
exit 2
}
> $PROCESS_LOG > /dev/null 2>&1
if [ $? != 0 ]
then
usage "cannot truncate $PROCESS_LOG"
fi
if [ ! -d $(dirname $LOG_FILE) ]
then
usage "Log directory $(dirname $LOG_FILE) does not exist. Aborting"
exit 1
fi
log() {
echo $* >> $LOG_FILE
}
log "=================== $0 run at $(date) ====================="
if [ $# -lt 2 ]
then
usage "num_args"
fi
if [ ! -d "$1" ]
then
usage "directory $1 down not exist"
else
LOCAL_DIR=$1
fi
# RJW - has to be adjusted on every platform. several included here
TEST_DATE=$(echo $2 | sed 's,\.,/,g')
# validate the passed in date format
# RJW _ Mac only version
# date -j -f "%Y.%m.%d" "$2" > /dev/null 2>&1
# another version
# date -d $TEST_DATE > /dev/null 2>&1
date --date="$TEST_DATE" > /dev/null 2>&1
if [ $? == 1 ]
then
log "date_format $2"
usage "date_format"
else
DATE_STR=$2
fi
if [ "$3" != "force" ]
then
# before we start make sure we have NOT processed this DATE_STR
grep "$DATE_STR SUCCESS" $LOG_FILE > /dev/null 2>&1
if [ $? -eq 0 ]
then
log "$DATE_STR already processed - PROCEEDING"
log "===================================================="
exit 0
# download was a success previously
# usage "$DATE_STR already processed"
fi
fi
log $LOCAL_DIR
log $DATE_STR
echo $LOCAL_DIR
echo $DATE_STR
# start processing them as they appear
DATE_PATH=$(echo $DATE_STR | sed -e "s,\.,/,g")
DATE_PATH_PARTS=$(echo $DATE_STR | awk -F"." '{ printf("%s %s %s", $1, $2, $3 );}' )
LOCAL_PATH=$LOCAL_DIR
CUR_PATH=$(pwd)
for part in $DATE_PATH_PARTS
do
cd $LOCAL_PATH
mkdir $part > /dev/null 2>&1
LOCAL_PATH="${LOCAL_PATH}/$part"
done
# make sure directory creation all worked
if [ -d ${LOCAL_DIR}/${DATE_PATH} ]
then
cd $CUR_PATH
else
log "path creation problem $LOCAL_DIR $DATE_PATH"
usage "path creation problem $LOCAL_DIR $DATE_PATH"
fi
DIR_EXISTS=false
log "DIR EXISTS $DIR_EXISTS"
# process was already run it appears
if [ "$DIR_EXISTS" = true ]
then
# RJW - Start here
# 2016-07-01 23:15:37 163675409 prod-logsearch/2016/07/01/2016.07.01.22-e09820c2c576a58e82a7a47130952576.log.gz
# 2016-07-01 23:13:38 156657813 prod-logsearch/2016/07/01/2016.07.01.22-ede2e3ce19bc0c604250d782fd54966d.log.gz
# 2016-07-01 12:21:19 92321 prod-logsearch/2016/07/01/uaa-2016.07.01.11-1472f67f81e7c7d74ad7d54d14ebc2b2.log.gz
# 2016-07-01 12:26:51 89628 prod-logsearch/2016/07/01/uaa-2016.07.01.11-173f2d65685301e948b88e87e7ae8f42.log.gz
# check to be sure files already downloaded
ls ${LOCAL_DIR}/${DATE_PATH} > /tmp/$$local_files
# how many?
local_num_files=$(ls /tmp/$$local_files | wc -l | awk '{ printf("%d",$1);}')
aws s3 ls s3://pivotal-cloudops-prod-log-archive/prod-logsearch/${DATE_PATH} --recursive | grep 'uaa' > /tmp/$$aws_files
aws_num_files=$(ls /tmp/$$aws_files | wc -l | awk '{ printf("%d",$1);}')
# there are 2 directories after a successful run. Take those into account
# if we have the same # of files then skip download else redownload all - too hard to figure out where to start
if [ $(($local_num_file+2)) -eq $aws_num_files ]
then
log "files already downloaded. Skipping"
AWS_COPY=0
else
# aws s3 cp s3://pivotal-cloudops-prod-log-archive/prod-logsearch/ $LOCAL_DIR --recursive --exclude '\*' --include "*uaa-${DATE_STR}*" | awk '{ print $NF }' > $PROCESS_LOG 2>&1
aws s3 cp s3://pivotal-cloudops-prod-log-archive/prod-logsearch/${DATE_PATH} /var/vcap/store/gpadmin/pwssec/${DATE_PATH} --recursive --exclude="*" --include="*uaa-${DATE_STR}*" | awk '{ print $NF }' > $PROCESS_LOG 2>&1
AWS_COPY=$?
fi
# first time for this date string
else
# get the remote files
aws s3 cp s3://pivotal-cloudops-prod-log-archive/prod-logsearch/${DATE_PATH} /var/vcap/store/gpadmin/pwssec/${DATE_PATH} --recursive --exclude="*" --include="*uaa-${DATE_STR}*" > /dev/null 2>&1
AWS_COPY=$?
fi
# RJW - need some sort of if to determine success/failure
if [ $AWS_COPY = 0 ]
then
echo "DOWNLOAD $DATE_STR SUCCESS"
log "$DATE_STR SUCCESS"
# clean up the working files
rm -f /tmp/$$local_files /tmp/$$aws_files > /dev/null 2>&1
else
log "$DATE_STR FAILED"
usage "DOWNLOAD $DATE_STR FAILED"
fi
log "======================== $0 FINISH $(date) ============================"
exit 0
| true
|
67a3c52518921bb06b419733724923c5fe80377c
|
Shell
|
beluganos/beluganos
|
/etc/test/ribs/test_ribs.sh
|
UTF-8
| 2,502
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
# -*- coding: utf-8 -*-
GOBGP="$HOME/go/bin/gobgp"
MICOPT="-p 10001"
RICOPT="-p 10002"
RIBSBIN="../../../bin/ribs2c"
RIBSOPT="--ribs-addr localhost:50072"
MICDEV=micbr1
RICDEV=ricbr1
GWADDR=10.0.0.1
GWMAC=11:22:33:44:55:66
SLEEP=1
VPN1_PREFIX=100.0.10.0/24
VPN1_NEXTHOP=10.0.1.1
VPN1_RD=10:10
VPN1_RT=10:5
VPN1_LABEL=10101
VPN2_PREFIX=100.0.20.0/24
VPN2_NEXTHOP=10.0.2.1
VPN2_RD=20:10
VPN2_RT=20:5
VPN2_LABEL=10201
IP1_PREFIX=20.0.10.0/24
IP1_NEXTHOP=20.0.1.1
IP2_PREFIX=20.0.20.0/24
IP2_NEXTHOP=20.0.2.1
do_init() {
echo "### init ###"
# sudo ip link add $MICDEV type bridge
sudo ip link add $RICDEV type bridge
}
do_clean() {
echo "### clean ###"
#sudo ip link del $MICDEV
sudo ip link del $RICDEV
}
do_mic_rib() {
CMD=$1
$GOBGP $MICOPT global rib $CMD -a vpnv4 $VPN1_PREFIX label $VPN1_LABEL rd $VPN1_RD rt $VPN1_RT \
nexthop $VPN1_NEXTHOP origin igp med 10 local-pref 110
$GOBGP $MICOPT global rib $CMD -a vpnv4 $VPN2_PREFIX label $VPN2_LABEL rd $VPN2_RD rt $VPN2_RT \
nexthop $VPN2_NEXTHOP origin igp med 10 local-pref 110
}
do_ric_rib() {
CMD=$1
$GOBGP $RICOPT global rib $CMD -a ipv4 $IP1_PREFIX nexthop $IP1_NEXTHOP origin egp med 10 local-pref 120
$GOBGP $RICOPT global rib $CMD -a ipv4 $IP2_PREFIX nexthop $IP2_NEXTHOP origin egp med 10 local-pref 120
}
do_show() {
echo ""
echo "[MIC] IPv4 ----------"
$GOBGP $MICOPT global rib
echo ""
echo "[MIC] VPNv4 ---------"
$GOBGP $MICOPT global rib -a vpnv4
echo ""
echo "[RIC] IPv4 ----------"
$GOBGP $RICOPT global rib
echo ""
echo "[RIC] VPNv4 ---------"
$GOBGP $RICOPT global rib -a vpnv4
echo ""
echo "[RIBS] RICS -----"
$RIBSBIN $RIBSOPT dump rics
echo ""
echo "[RIBS] Nexthops -----"
$RIBSBIN $RIBSOPT dump nexthops
echo ""
echo "[RIBS] NexthopMap -----"
$RIBSBIN $RIBSOPT dump nexthop-map
}
do_usage() {
echo "$0 <init | clean | show>"
echo "$0 mic <add-rib | del-rib>"
echo "$0 ric <add-rib | del-rib>"
}
case $1 in
init) do_init;;
clean) do_clean;;
show) do_show;;
mic)
case $2 in
add-rib) do_mic_rib add;;
del-rib) do_mic_rib del;;
*) do_usage;;
esac
;;
ric)
case $2 in
add-rib) do_ric_rib add;;
del-rib) do_ric_rib del;;
*) do_usage;;
esac
;;
*) do_usage;;
esac
| true
|
7d0e4f1a8f4c0377837e42d28d72a6036cf0314d
|
Shell
|
isabella232/BotADayBotAway
|
/2018/01/11/CheckIfBirdTooOld.sh
|
UTF-8
| 262
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
LASTTWEETSTAMP=$(t timeline birdbot4 -n 1 -c | cut -d, -f 2 | sed -n 2p | xargs -I@ date --date="@" +%s)
CURRENTSTAMP=$(date +%s)
TIMESINCE=$(bc -l <<<"$CURRENTSTAMP - $LASTTWEETSTAMP")
if [[ "$TIMESINCE" -gt "1800" ]]; then
exit 1
else
exit 0
fi
| true
|
04ed65f8df8fa14ad2fe1f05de91191b5ee38e8d
|
Shell
|
zgdkik/myops
|
/shell/日志分析脚本/日志截取分析平台后端脚本模板/rest/analyze_scripts/input_date.sh
|
UTF-8
| 1,814
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
#decription: only for passport...
#ver: v1.0
#auth: by zhengxin20180604
#email: hzzxin@tairanchina.com
LOG_ANALYSE_DIR="/usr/local/weihu/passport_rest_log_analyze"
LOG_DIR="/usr/local/tomcat/logs"
HIS_LOG_DIR="/var/log/tomcat_log"
TEMP_DIR="${LOG_ANALYSE_DIR}/temp"
CUT_LOG_DIR="${LOG_ANALYSE_DIR}/logs"
LOG_TIMESTAMPS_FILE="$TEMP_DIR/timestamps.txt"
#create directory
mkdir -p $TEMP_DIR $CUT_LOG_DIR
##输入需要分析的日志日期,并将对应日志文件copy到日志分析目录中
INPUT_DATE() {
# read -p "please input date: " DATE
DATE="$1"
DATE=$(date -d "$1" +%Y%m%d)
TIMESTAMPS="${DATE}_2359"
if [ x"${DATE}" = x"$(date +%Y%m%d)" ];then
SOURCE_LOG_FILE="${LOG_DIR}/catalina.out"
else
SOURCE_LOG_FILE="${HIS_LOG_DIR}/${TIMESTAMPS}/catalina.out"
if ls -l ${SOURCE_LOG_FILE} &>/dev/null;then
:
else
if ls -l ${HIS_LOG_DIR}/${TIMESTAMPS}.tar.gz &>/dev/null;then
tar xf ${HIS_LOG_DIR}/${TIMESTAMPS}.tar.gz -C ${HIS_LOG_DIR}/
fi
if ls -l ${SOURCE_LOG_FILE}_${TIMESTAMPS}.tar.gz &>/dev/null;then
tar xf ${SOURCE_LOG_FILE}_${TIMESTAMPS}.tar.gz -C ${HIS_LOG_DIR}/${TIMESTAMPS}
else
echo "no such compress file..."
exit 1
fi
fi
fi
##创建切割日志存放目录
mkdir -p ${CUT_LOG_DIR}/${DATE}
ACCESS_FILE="${CUT_LOG_DIR}/${DATE}/catalina.out_${DATE}"
if [ x"${DATE}" = x"$(date +%Y%m%d)" ];then
\cp -a ${SOURCE_LOG_FILE} ${ACCESS_FILE}
else
if [ ! -f $ACCESS_FILE ];then
\cp -a ${SOURCE_LOG_FILE} ${ACCESS_FILE}
fi
fi
##截取日志时间戳信息并保存到文本中
grep -E ".*\[" ${ACCESS_FILE} | awk -F'[' '{print $1}' >$LOG_TIMESTAMPS_FILE
}
| true
|
b3ec91dad5606f3e2fa340f6812cf6e994b348fa
|
Shell
|
ilventu/aur-mirror
|
/cplay/PKGBUILD
|
UTF-8
| 846
| 2.5625
| 3
|
[] |
no_license
|
# Maintainer: aksr <aksr at t-com dot me>
# Contributor: Murtuza Akhtari <inxsible at gmail com>
# Contriburor: Andrea Scarpino <bash.lnx@gmail.com>
pkgname=cplay
pkgver=1.49
pkgrel=3
pkgdesc="A curses front-end for various audio players."
arch=('i686' 'x86_64')
url="http://mask.tf.hut.fi/~flu/cplay/"
license=('GPL2')
depends=('ncurses' 'python')
backup=('etc/cplayrc')
install=cplay.install
source=(http://ftp.de.debian.org/debian/pool/main/c/$pkgname/${pkgname}_${pkgver}.orig.tar.gz)
md5sums=('fae9e13b8dafd98ffcd58cf9d6f92f33')
build() {
cd $startdir/src/$pkgname-$pkgver
sed -i '1s,env python,&2,' cplay
mkdir -p $startdir/pkg/usr/{bin,share/man/man1}
make || return 1
make cplayrc || return 1
sed -i 's|$(PREFIX)/man/man1|$(PREFIX)/share/man/man1|' Makefile
install -D -m644 cplayrc $startdir/pkg/etc/cplayrc
make PREFIX=$startdir/pkg/usr install
}
| true
|
b1edf0fe32da323599b0eecd823d22ac75839d7c
|
Shell
|
jie-d-cheng/linuxBash
|
/filecreate.sh
|
UTF-8
| 375
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
read -p "please input number and prefix: " num prefix
if [[ ! "$num" =~ ^[0-9]+$ ]];then
echo "your input $num is not a number."
exit
fi
if [ -z "$prefix" ];then
echo "please input prefix which is more than one char."
exit
fi
for i in `seq $num`
do
filename=$prefix$i
touch /tmp/$filename
if [ $? -eq 0 ];then
echo "$filename created..."
fi
done
| true
|
8f134687c119c16fb378c5be36e23654c56ae137
|
Shell
|
wickedshimmy/dotfiles
|
/.zshrc
|
UTF-8
| 5,935
| 2.84375
| 3
|
[] |
no_license
|
# As advised in the emerge log by the always helpful Gentoo team
autoload -U compinit promptinit
compinit
promptinit
# Moved this out of the setprompt() fn
# Thanks to littleprince.zh (http://w-a-n.cn)
autoload colors
zmodload zsh/terminfo
if [[ "$terminfo[colors]" -gt 8 ]]; then
colors
fi
# More recommendations from Gentoo
# http://www.gentoo.org/doc/en/zsh.xml
zstyle ':completion::complete:*' use-cache 1
zstyle ':completion:*:descriptions' format '%U%B%d%b%u'
zstyle ':completion:*:warnings' format '%BSorry, no matches for: %d%b'
# Allow Ctrl-x f to disable cleverness in tab-selection
# Thanks to ft in #git
zle -C complete-files complete-word _generic;
zstyle ':completion:complete-files:*' completer _files
bindkey '^xf' complete-files
HISTSIZE=10000
SAVEHIST=10000
HISTFILE=~/.history
setopt histignoredups
bindkey ^R history-incremental-search-backward
# Enable colors for ls, etc. Prefer ~/.dir_colors #64489
# Ripped from the Gentoo skeleton bashrc I have
if ls --color -d . >/dev/null 2>&1; then
alias ls="ls --color=auto"
if [[ -f ~/.dir_colors ]]; then
eval $(dircolors -b ~/.dir_colors)
else
eval $(dircolors -b /etc/DIR_COLORS)
fi
elif ls -G -d . >/dev/null 2>&1; then
alias ls="ls -G"
fi
# precmd is a built-in zsh function that is called on each redraw
# Used here to set the dynamic variables (GITBRANCH, etc.)
# (More thanks for Aaron Toponce)
function precmd {
# Ripped from git's bash completion scripts, via Aaron Toponce
git_ps1 () {
if which git > /dev/null; then
local g="$(git rev-parse --git-dir 2>/dev/null)"
if [ -n "$g" ]; then
local o
local b
if [ -d "$g/rebase-apply" ]; then
if [ -f "$g/rebase-apply/rebasing" ]; then
o="|REBASE"
elif [ -f "$g/rebase-apply/applying" ]; then
o="|AM"
else
o="|AM/REBASE"
fi
b="$(git symbolic-ref HEAD 2>/dev/null)"
elif [ -f "$g/rebase-merge/interactive" ]; then
o="|REBASE-i"
b="$(cat "$g/rebase-merge/head-name")"
elif [ -d "$g/rebase-merge" ]; then
o="|REBASE-m"
b="$(cat "$g/rebase-merge/head-name")"
elif [ -f "$g/MERGE_HEAD" ]; then
o="|MERGING"
b="$(git symbolic-ref HEAD 2>/dev/null)"
else
if [ -f "$g/BISECT_LOG" ]; then
o="|BISECTING"
fi
if ! b="$(git symbolic-ref HEAD 2>/dev/null)"; then
if ! b="$(git describe --exact-match HEAD 2>/dev/null)"; then
b="$(cut -c1-7 "$g/HEAD")..."
fi
fi
fi
if [ -n "$1" ]; then
printf "$1" ${b##refs/heads/}$o
else
printf "%s" ${b##refs/heads/}$o
fi
fi
else
printf ""
fi
}
GITBRANCH=" $(git_ps1)"
# Keeping prompt name from wrapping beyond terminal width
# From Phil Gold (http://aperiodic.net/phil/prompt/)
# Again via Aaron Toponce
local TERMWIDTH
(( TERMWIDTH = ${COLUMNS} - 1))
local PROMPTSIZE=${#${(%):--- %D{%R.%S %a %b %d %Y}\! }}
local PWDSIZE=${#${(%):-%~}}
if [[ "$PROMPTSIZE + $PWDSIZE" -gt $TERMWIDTH ]]; then
(( PR_PWDLEN = $TERMWIDTH - $PROMPTSIZE ))
fi
}
# Enable vi keybindings
bindkey -v
function zle-keymap-select {
VIMODE="${${KEYMAP/vicmd/ xx}/(main|viins)/}"
zle reset-prompt
}
zle -N zle-keymap-select
# Prompt design and accompanying subfunctions from Aaron Toponce
# http://pthree.org/wp-content/uploads/2008/11/zsh_ps1.txt
setprompt () {
# Need this, so the prompt will work
setopt prompt_subst
colors
# Load colors into the environment and set them properly
for COLOR in RED GREEN YELLOW WHITE BLACK BLUE CYAN MAGENTA; do
eval PR_$COLOR='%{$fg[${(L)COLOR}]%}'
eval PR_BRIGHT_$COLOR='%{$fg_bold[${(L)COLOR}]%}'
done
PR_RESET="%{$reset_color%}"
# Set the prompt
PROMPT='%B${PR_BRIGHT_BLACK}<${PR_RED}<${PR_BRIGHT_RED}<%b\
${PR_GREEN} %n@%m ${PR_BLUE}%${PR_PWDLEN}<..<%~%<<${PR_BRIGHT_RED}${GITBRANCH}${PR_BRIGHT_YELLOW}${MONOENV}\
${PR_BRIGHT_BLACK}>${PR_GREEN}>${PR_BRIGHT_GREEN}>${PR_RESET} '
PROMPT2='${PR_BRIGHT_BLACK}>${PR_GREEN}>${PR_BRIGHT_GREEN}>\
%_ ${PR_BRIGHT_BLACK}>${PR_GREEN}>\
${PR_BRIGHT_GREEN}>${PR_RESET} '
}
setprompt
#export PATH=$HOME/.gem/ruby/**/bin:$PATH
#[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
# Useful function that, if vi is invoked on a list of files, will check
# and execute "sudo vi" on them if owned by root.
# Thanks to William Scott, originally via Gary Kerbaugh
# http://xanana.ucsc.edu/~wgscott/xtal/wiki/index.php/Why_zsh_Should_Be_the_Default_Shell_on_OS_X
function vim {
LIMIT=$#
for ((i = 1; i <= $LIMIT; i++)) do
eval file="\$$i"
if [[ -e $file && ! -O $file ]] then
otherfile=1
fi
done
if [[ $otherfile = 1 ]] then
sudo vim "$@"
else
command vim "$@"
fi
}
function clr-env {
MONO_PREFIX=/opt/$1
GNOME_PREFIX=/usr
export DYLD_LIBRARY_PATH=$MONO_PREFIX/lib:$DYLD_LIBRARY_PATH
export LD_LIBRARY_PATH=$MONO_PREFIX/lib:$LD_LIBRARY_PATH
export C_INCLUDE_PATH=$MONO_PREFIX/include:$GNOME_PREFIX/include
export ACLOCAL_PATH=$MONO_PREFIX/share/aclocal
export PKG_CONFIG_PATH=$MONO_PREFIX/lib/pkgconfig:/usr/local/pkgconfig:$GNOME_PREFIX/lib/pkgconfig
export CONFIG_SITE=$HOME/.config/automake/config.site
export MONO_GAC_PREFIX=/usr
PATH=$MONO_PREFIX/bin:$PATH
echo "Switched active Mono environment!"
MONOENV=" ${MONO_PREFIX}"
}
| true
|
c7c65dade2c28ba3e0d03502d80a2f568ee9c026
|
Shell
|
uo-adapt/ADS_Scripts
|
/rsfMRI/afni_proc/wave2/batch_rsfMRIproc_w2.sh
|
UTF-8
| 683
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This batch file calls on your subject
# list (named subject_list.txt) in the same
# folder and will run job_rsfMRIproc_w2.tcsh
# for each subject in that list.
# Set your study
STUDY=/projects/adapt_lab/shared/ADS
# Set subject list
#SUBJLIST=`cat sublist_restw2_n84.txt`
SUBJLIST=`cat sub_test.txt`
for SUBJ in $SUBJLIST
do sbatch --export ALL,SUBID=${SUBJ},STUDY=${STUDY} --job-name rsfMRIproc_w2_"${SUBJ}" --partition=short --mem-per-cpu=8G --cpus-per-task=1 -o "${STUDY}"/Scripts/rsfMRI/afni_proc/wave2/output/"${SUBJ}"_rsfMRIproc_w2_output.txt -e "${STUDY}"/Scripts/rsfMRI/afni_proc/wave2/output/"${SUBJ}"_rsfMRIproc_w2_error.txt job_rsfMRIproc_w2.tcsh
done
| true
|
889c8f841ade8853c86b6d955d39063685e26734
|
Shell
|
svalpha1104/UVLM
|
/run_make.sh
|
UTF-8
| 401
| 3
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#! /bin/sh
mkdir -p lib
export PREFIX=$(conda info --json | python -c "import sys, json; print(json.load(sys.stdin)['active_prefix'])")
if [ "$PREFIX" = "None" ]; then
echo "*** Please check that the python environment is active."
echo "*** Run ``conda activate sharpy_env``."
exit 1
fi
export EIGEN3_INCLUDE_DIR=$PREFIX/include/eigen3
export MKL_ROOT=$PREFIX
make
cp lib/* ../sharpy/lib/
| true
|
17ea3e630a65961dde242e5096cc2c5700c1926e
|
Shell
|
c0ns0le/dot-files-1
|
/old-dotfiles/.bashrc
|
UTF-8
| 1,653
| 3.0625
| 3
|
[] |
no_license
|
source /private/etc/bashrc
source /Users/jearsh/exports.sh
# # System-wide .bashrc file for interactive bash(1) shells.
#
# ### this was uncommented so that screen would read it
# #if [ -z "$PS1" ]; then
# # return
# #fi
#
# #PS1='\h:\W \u\$ '
# # Make bash check its window size after a process completes
# shopt -s checkwinsize
#
# export HISTCONTROL=ignoredups
# export HISTIGNORE='history'
# export HISTSIZE=10000
# export HISTFILESIZE=10000
# export EDITOR="vim"
#
# shopt -s histappend
# source ~/functions.sh
# source ~/aliases.sh
#
# # [ System Preferences ]
#
# # [ Bash ]
#
# export PS1='\[\e[0;33m\]\u@mini:\[\e[0;36m\] \w\n\$> \[\e[0;0m\]'
# export PROMPT_COMMAND="echo -ne '\033]0;mini\007'"
#
#
# ## environment variables for bc (the calculator)
# export BC_ENV_ARGS="-q -l"
#
#
# ## if you are in a screen session, change the prompt/title
# if [[ $TERM == 'screen' ]]
# then
# export PS1='\[\e[0;33m\]\u@screen:\[\e[0;36m\] \w\n\$> \[\e[0;0m\]'
# export PROMPT_COMMAND="echo -ne '\033]0;screen\007'"
# fi
#
# ## if you are connected remotely, change the prompt/title
# if [[ -n $SSH_CONNECTION ]]
# then
# export PS1='\[\e[0;33m\]\u@mini (ssh):\[\e[0;36m\] \w\n\$> \[\e[0;0m\]'
# export PROMPT_COMMAND="echo -ne '\033]0;mini (ssh)\007'"
# fi
#
# ## if you are in a vim shell, change the prompt/title
# if [[ -n $VIM ]]
# then
# export PS1='\[\e[0;33m\]\u@mini (vim):\[\e[0;37m\] \w\n\$> \[\e[0;0m\]'
# export PROMPT_COMMAND="echo -ne '\033]0;mini (vim)\007'"
# fi
#
# # only resize the terminal window if you are using Eterm (usually only at home)
# if [[ $TERM == 'Eterm' ]]
# then
# printf "\e[8;70;180;t"
# fi
| true
|
b500ea334f57018259e3053c8dab4aa5e19c4a3d
|
Shell
|
Turncloak-Exile/TowerFall
|
/Exier/options/jarvis/JarvisH.sh
|
UTF-8
| 4,167
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
ANSWER_SETUP="1"
ANSWER_RUNNINGSCRIPTS="2"
ANSWER_STARTBASH="3"
ANSWER_LOOPBASH="4"
ANSWER_MACCHANGER="5"
ANSWER_IPSPOOF="6"
ANSWER_MAINMENU="9"
ANSWER_EXIT="10"
until [ "$ANSWER" == "$ANSWER_MAINMENU" ];
do
echo -e "\e[0;33m
???????? ????????? ??????????
?????????? ??????????? ????????????
??? ??? ??? ??? ??? ???
?? ?? ?? ?? ?? ??
?? ?? ??
??? ??? ???
?????? ?????? ??????
???? ???? ????
?? ?? ??
?? ?? ??
?? ?? ??
?? ?? ??
?? ?? ??
<---------- JarvisH was brought to you by Turncloak Exile ---------->
<-------Last updated------->
<-----05/12/15----->
JarvisH: Available Options:
_______________________________________
| |
| |
| 6) Changing Ip through Terminal |
| |
| 5) Changing Macc through Terminal |
| |
| 3) Starting your own bash scripts |
| |
| 4) How to loop bash scripts |
| |
| 1) Setting up Kali |
| |
| 2)Running of scripts |
| |
| 7) N/A at this time |
| |
| 8) N/A at this time |
| |
| 10) Exit |
| |
| 9) Main Menu |
|_______________________________________|";
read ANSWER
if [ "$ANSWER" == "$ANSWER_SETUP" ];
then
sleep 3
echo "\e[0;33mJarvisH: Now loading options for setting up Kali";
sleep 1
exier/options/tutorials/settingupkali.sh
elif [ "$ANSWER" == "$ANSWER_RUNNINGSCRIPTS" ];
then
echo -e "\e[0;32mSystem: Now loading options for running scripts in kali";
exier/options/tutorials/runningscripts.sh
sleep 1
elif [ "$ANSWER" == "$ANSWER_STARTBASH" ];
then
echo -e "\e[0;33mJarvisH: Now loading a usefull website for starting Bashscript";
xdg-open http://www.tldp.org/LDP/Bash-Beginners-Guide/html/sect_02_02.html
sleep 4
elif [ "$ANSWER" == "$ANSWER_LOOPBASH" ];
then
echo -e "\e[0;33mJarvisH: Now loading a usefull website for starting Bashscript";
xdg-open http://tldp.org/HOWTO/Bash-Prog-Intro-HOWTO-7.html
sleep 4
elif [ "$ANSWER" == "$ANSWER_MACCHANGER" ];
then
echo -e "\e[0;33mJarvisH: Now loading help for macchanger";
sleep 1
macchanger --help
sleep 4
elif [ "$ANSWER" == "$ANSWER_IPSPOOF" ];
then
echo -e "\e[0;33mJarvisH: Now loading a website for IPspoofing."
echo -e "Please note all files are already installed though Jarvis";
sleep 1
xdg-open http://kanishkashowto.com/2013/10/18/how-to-spoof-your-ip-address-on-kali-linux/
sleep 1
elif [ "$ANSWER" == "$ANSWER_MAINMENU" ];
then
echo -e "\e[1;34mJarvis: Exiting to Main menu";
sleep 1
elif [ "$ANSWER" == "$ANSWER_EXIT" ];
then
echo -e "\e[1;34mJarvis: Closing JarvisH";
sleep 1
echo JarvisH Closed
sleep 1
else
echo -e "\e[0;32mJarvisS: There are currently no tutorials available \e[0m";
echo -e "\e[0;32mJarvisS: Please define appropriate field and try again \e[0m";
sleep 2
fi
done
| true
|
925fd88dad3c2ca58ed50dc7c4251f0b9f1946e1
|
Shell
|
Cai900205/test
|
/0604_source/gpio/gpio_app/shell/fm1_mac10_test.sh
|
UTF-8
| 444
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
fm1_mac10_old=0
while [ 1 ]
do
fm1_mac10_new=`ifconfig fm1-mac10 | grep "RX packets"|awk -F ' ' '{print $2}'|awk -F ':' '{print $2}'`
if [ $fm1_mac10_new -ne $fm1_mac10_old ]
then
fm1_mac10_old=$fm1_mac10_new-$fm1_mac10_old
/app/gpio_app 1 1 13 10
fm1_mac10_old=$fm1_mac10_new
else
if [ $fm1_mac10_new -ne 0 ]
then
/app/gpio_app 2 1 13 1
fi
fi
done
| true
|
5f80b73cbc7661faa8a89d4c3ce4a760d134b540
|
Shell
|
eng4beer/build
|
/scap/scap-builder
|
UTF-8
| 1,527
| 2.546875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export image=rhel75.qcow2
export build_dir=~/build
export image_path=$build_dir/$image
cp ~/Downloads/rhel-server-7.5-update-4-x86_64-kvm.qcow2 $image_path
virt-customize -a $image_path --upload scap-security-guide-0.1.36-10.el7_5.noarch.rpm:/opt --upload openscap-scanner-1.2.16-8.el7_5.x86_64.rpm:/opt --upload openscap-1.2.16-8.el7_5.x86_64.rpm:/opt --upload xml-common-0.6.3-39.el7.noarch.rpm:/opt --run-command 'yum -y install /opt/*.rpm '
virt-customize -a $image_path --run-command "oscap xccdf generate fix --template urn:xccdf:fix:script:sh --profile xccdf_org.ssgproject.content_profile_stig-rhel7-disa --output /opt/overcloud-remediation.sh /usr/share/xml/scap/ssg/content/ssg-rhel7-ds.xml"
## Getting Hardening Script from Image
sudo mkdir -p /mnt/guest
sudo LIBGUESTFS_BACKEND=direct guestmount -a $image_path -i /mnt/guest
sudo cp /mnt/guest/opt/overcloud-remediation.sh $build_dir/
sudo guestunmount /mnt/guest
sudo chown $USER:$USER $build_dir/overcloud-remediation.sh
## Removing commands from hardening script that will break the image, or not work
sed -i '/xccdf_org.ssgproject.content_rule_security_patches_up_to_date/,+6 d' overcloud-remediation.sh
sed -i '/xccdf_org.ssgproject.content_rule_ensure_gpgcheck_globally_activated/,+87 d' overcloud-remediation.sh
sed -i '/package_command install dracut-fips/,+20 d' overcloud-remediation.sh
sed -i "s/service_command enable firewalld/service_command disable firewalld/g" overcloud-remediation.sh
#virt-customize --selinux-relabel -a $image_path
| true
|
103a890cb029c2956dff9e2b7d6855616aa337fa
|
Shell
|
mah454/cluster-healthy-check
|
/test-scripts/check-ping
|
UTF-8
| 565
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
. ./init
if [[ -z $1 || -z $2 || -z $3 || -z $4 ]]; then
echo -e "\nUsage: check-ping [IP_ADDRESS] [PORT] [PARALLEL_REQUEST_SIZE] [SEQ_DELAY]"
echo -e "Example: check-ping 172.17.0.2 8080 10 5\n"
exit 1
fi
while true; do
for I in $(seq 1 "${PARALLEL_SIZE}"); do
COUNT=$(get_process_count "${SERVER_HOST_NAME}")
if [[ $COUNT -lt "${PARALLEL_SIZE}" ]] ; then
curl -s http://"${SERVER_HOST_NAME}:${SERVER_PORT}"/api/v1/ping &
fi
done
sleep "${SEQ_DELAY}"
echo "-------------------------------------------------"
done
| true
|
ec424ef0e2271ab737ba9e8bc0971de2955965a9
|
Shell
|
bolthole/freeha
|
/alerthasrv
|
UTF-8
| 903
| 3.34375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
#-------------------------------------------------------------------------#
# This file gets called when the freehad demon changes states. #
# (ie from 'INITIAL' to 'RUNNING') #
# successfully does so, etc. etc. #
# You dont have to have anything in here. This script exists for those #
# people that want automatic notification every time ha services change #
# their status. #
# #
#-------------------------------------------------------------------------#
# Args passed in:
# "$1" == numerical value of state [see "freehad.h" for the enum list]
# "$2" == string representation of state
# mailx -s "`uname -n` freehad changed state to $2" admin@yourhost.com
exit 0
| true
|
f8c11a229332e1985e8a2c91778a99d1b012d1fc
|
Shell
|
ByronHsu/mace
|
/tools/build-standalone-lib.sh
|
UTF-8
| 2,116
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
LIB_DIR=builds/lib
INCLUDE_DIR=builds/include/mace/public
mkdir -p $LIB_DIR
mkdir -p $INCLUDE_DIR
# copy include headers
cp mace/public/*.h $INCLUDE_DIR/
# make directories
rm -rf $LIB_DIR/armeabi-v7a
mkdir -p $LIB_DIR/armeabi-v7a
rm -rf $LIB_DIR/arm64-v8a
mkdir -p $LIB_DIR/arm64-v8a
rm -rf $LIB_DIR/linux-x86-64
mkdir -p $LIB_DIR/linux-x86-64
# build shared libraries
echo "build shared lib for armeabi-v7a"
bazel build --config android --config optimization mace/libmace:libmace_dynamic --define neon=true --define openmp=true --define opencl=true --define hexagon=true --cpu=armeabi-v7a
cp bazel-bin/mace/libmace/libmace.so $LIB_DIR/armeabi-v7a/
cp third_party/nnlib/*so $LIB_DIR/armeabi-v7a/
echo "build shared lib for arm64-v8a"
bazel build --config android --config optimization mace/libmace:libmace_dynamic --define neon=true --define openmp=true --define opencl=true --cpu=arm64-v8a
cp bazel-bin/mace/libmace/libmace.so $LIB_DIR/arm64-v8a/
if [[ "$OSTYPE" != "darwin"* ]];then
echo "build shared lib for linux-x86-64"
bazel build mace/libmace:libmace_dynamic --config optimization --define openmp=true
cp bazel-bin/mace/libmace/libmace.so $LIB_DIR/linux-x86-64/
fi
# build static libraries
echo "build static lib for armeabi-v7a"
bazel build --config android --config optimization mace/libmace:libmace_static --define neon=true --define openmp=true --define opencl=true --define hexagon=true --cpu=armeabi-v7a
cp bazel-genfiles/mace/libmace/libmace.a $LIB_DIR/armeabi-v7a/
cp third_party/nnlib/*so $LIB_DIR/armeabi-v7a/
echo "build static lib for arm64-v8a"
bazel build --config android --config optimization mace/libmace:libmace_static --define neon=true --define openmp=true --define opencl=true --cpu=arm64-v8a
cp bazel-genfiles/mace/libmace/libmace.a $LIB_DIR/arm64-v8a/
if [[ "$OSTYPE" != "darwin"* ]];then
echo "build static lib for linux-x86-64"
bazel build mace/libmace:libmace_static --config optimization --define openmp=true
cp bazel-genfiles/mace/libmace/libmace.a $LIB_DIR/linux-x86-64/
fi
echo "LIB PATH: $LIB_DIR"
echo "INCLUDE FILE PATH: $INCLUDE_DIR"
| true
|
38c92b6b2d062782e7193d965cdb97125cacb8ab
|
Shell
|
LiHRaM/nxt-cross-compiler
|
/build_arm_toolchain.sh
|
UTF-8
| 10,775
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Copyright (c) 2008 the NxOS developers
#
# See AUTHORS for a full list of the developers.
#
# Redistribution of this file is permitted under
# the terms of the GNU Public License (GPL) version 2.
#
# Build an ARM cross-compiler toolchain (including binutils, gcc and
# newlib) on autopilot.
################################
# Modification by loic.cuvillon :
# 1-august2009: download from NxOS website and modified a bit to build
# a nxtOSEK compatible toolchain
# -remove --with-float=soft
# -add multibs options (mhard-float/msoft-float) to t-arm-elp (gcc/config)# -add c++ compiler
# -set compiler to gcc-4.2 (ubuntu 8.10)
# 30-january 2010: update to gcc 4.4 (ubuntu 9.10) and rectify flags for c++ support in gcc>4.0
# -reverse to wget instead of curl to perform downloads
# -add comments (#)
# -add howto for manual installation at end of the file
# 1 may 2010:
# -comment gdb compilation
# Dec 21 2011
# Updated the gcc version to 4-6
# Fixed link to binutils-2.20.1.tar.bz2
##################################################################
# set the right compiler
# -change to gcc4-6 on ubuntu 11.10
# -change to gcc4-4 on ubuntu 9.10
# -change to gcc4-2 on ubuntu 8.10 (gcc4-3 not working)
##################################################################
GCC_BINARY_VERSION=/usr/bin/gcc-4.8
if [ ! -e $GCC_BINARY_VERSION ]; then
echo "Error: $GCC_BINARY_VERSION not found, check GCC_BINARY_VERSION in script ";
exit 1;
fi
export CC=$GCC_BINARY_VERSION
##################################################################
# set repertories for source(src), build and final toolchain(gnuarm)
# afer building, src and build can be deleted
# -should not be modified
##################################################################
ROOT=`pwd`
SRCDIR=$ROOT/src
BUILDDIR=$ROOT/build
PREFIX=$ROOT/gnuarm
##################################################################
# set url for download
# -change URLs if not valid anymore (googling)
##################################################################
GCC_URL=http://ftp.gnu.org/pub/gnu/gcc/gcc-4.4.2/gcc-4.4.2.tar.bz2
GCC_VERSION=4.4.2
GCC_DIR=gcc-$GCC_VERSION
BINUTILS_URL=http://ftp.gnu.org/gnu/binutils/binutils-2.20.1.tar.bz2
BINUTILS_VERSION=2.20.1
BINUTILS_DIR=binutils-$BINUTILS_VERSION
NEWLIB_URL=ftp://sources.redhat.com/pub/newlib/newlib-1.18.0.tar.gz
NEWLIB_VERSION=1.18.0
NEWLIB_DIR=newlib-$NEWLIB_VERSION
#GDB_URL=ftp://sourceware.org/pub/insight/releases/insight-6.8-1.tar.bz2
#GDB_VERSION=6.8-1
#GDB_DIR=insight-$GDB_VERSION
##################################################################
# display a summary on screen before compiling
##################################################################
echo "I will build an arm-elf cross-compiler:
Prefix: $PREFIX
Sources: $SRCDIR
Build files: $BUILDDIR
Software: Binutils $BINUTILS_VERSION
Gcc $GCC_VERSION
Newlib $NEWLIB_VERSION
Gdb $GDB_VERSION (disable)
Host compiler : $GCC_BINARY_VERSION
Press ^C now if you do NOT want to do this or any key to continue."
##################################################################
# Helper functions.
# ensure source : check if software archive present or else download
# unpack_source : extract software source
##################################################################
ensure_source()
{
URL=$1
FILE=$(basename $1)
if [ ! -e $FILE ]; then
wget $URL #or curl -L -O $URL
fi
}
unpack_source()
{
(
cd $SRCDIR
ARCHIVE_SUFFIX=${1##*.}
if [ "$ARCHIVE_SUFFIX" = "gz" ]; then
tar zxvf $1
elif [ "$ARCHIVE_SUFFIX" = "bz2" ]; then
tar jxvf $1
else
echo "Unknown archive format for $1"
exit 1
fi
)
}
##################################################################
# Create all the directories we need.
# Grab all the source and unpack them
##################################################################
mkdir -p $SRCDIR $BUILDDIR $PREFIX
(
cd $SRCDIR
# First grab all the source files...
ensure_source $GCC_URL
ensure_source $BINUTILS_URL
ensure_source $NEWLIB_URL
#rboissat: Adding GNU gdb
#ensure_source $GDB_URL
# ... And unpack the sources.
unpack_source $(basename $GCC_URL)
unpack_source $(basename $BINUTILS_URL)
unpack_source $(basename $NEWLIB_URL)
#unpack_source $(basename $GDB_URL)
)
##################################################################
# Set the PATH to include the binaries we're going to build.
##################################################################
OLD_PATH=$PATH
export PATH=$PREFIX/bin:$PATH
##################################################################
# Stage 1: Build binutils
##################################################################
(
mkdir -p $BUILDDIR/$BINUTILS_DIR
cd $BUILDDIR/$BINUTILS_DIR
$SRCDIR/$BINUTILS_DIR/configure --target=arm-elf --prefix=$PREFIX \
--disable-werror --enable-interwork --enable-multilib \
&& make all install
) || exit 1
##################################################################
# Stage 2: Patch the GCC multilib rules, then build the gcc compiler only
##################################################################
(
MULTILIB_CONFIG=$SRCDIR/$GCC_DIR/gcc/config/arm/t-arm-elf
echo "
MULTILIB_OPTIONS += mhard-float/msoft-float
MULTILIB_DIRNAMES += fpu soft
MULTILIB_EXCEPTIONS += *mthumb/*mhard-float*
MULTILIB_OPTIONS += mno-thumb-interwork/mthumb-interwork
MULTILIB_DIRNAMES += normal interwork
" >> $MULTILIB_CONFIG
mkdir -p $BUILDDIR/$GCC_DIR
cd $BUILDDIR/$GCC_DIR
$SRCDIR/$GCC_DIR/configure --target=arm-elf --prefix=$PREFIX \
--enable-interwork --enable-multilib \
--disable-__cxa_atexit \
--enable-languages="c,c++" --with-newlib \
--with-headers=$SRCDIR/$NEWLIB_DIR/newlib/libc/include \
&& make all-gcc install-gcc
) || exit 1
##################################################################
# Stage 3: Build and install newlib
##################################################################
(
# And now we can build it.
mkdir -p $BUILDDIR/$NEWLIB_DIR
cd $BUILDDIR/$NEWLIB_DIR
$SRCDIR/$NEWLIB_DIR/configure --target=arm-elf --prefix=$PREFIX \
--enable-interwork --enable-multilib \
&& make all install
) || exit 1
##################################################################
# Stage 4: Build and install the rest of GCC.
##################################################################
(
cd $BUILDDIR/$GCC_DIR
make all install
) || exit 1
##################################################################
# Stage 5: Build and install GDB
##################################################################
#(
#mkdir -p $BUILDDIR/$GDB_DIR
#cd $BUILDDIR/$GDB_DIR
#
#$SRCDIR/$GDB_DIR/configure --target=arm-elf --prefix=$PREFIX \
# --disable-werror --enable-interwork --enable-multilib \
# && make all install
#) || exit 1
echo "
Build complete!
"
##################################################################
##################################################################
# HOWTO manual installation (ubuntu 8.10, gcc4.2)
##################################################################
# This is a manual installation with steps similar to those of the previous script.
#
# * Set the current terminal environment:
# o prepare the path to the toolchain executables. It is assumed you choose to install the gnu-arm toolchain in the directory gnuarm at the root of your home (/home/[your-home]/gnuarm)
# o set gcc-4.2 as compiler (gcc-4.3 can at the same time be installed but can not be used since it fails to compile this version of the toolchain)
#
# ~$ export CC=/usr/bin/gcc-4.2
# ~$ export PATH=$PATH:/home/[your-home]/gnuarm/bin
#
####################################
# * Compilation of binutils:
# o download binutils-2.18.50 source in any local folder other than /home/[your-home]/gnuarm
# o note: binutils-2.18 version do not work.
# o and run the following commands:
#
# ~$ tar xf binutils-2.18.50.tar.bz2
# ~$ mkdir binutils-build; cd binutils-build
# ~$ ../binutils-2.18.50/configure --target=arm-elf --prefix=/home/[your-home]/gnuarm --enable-interwork --enable-multilib
# ~$ make all install
#
# o the binutils for arm architecture are now installed in the directory /home/[your-home]/gnuarm
#
########################################
# * Compilation of arm-gcc compiler
# o Download gcc-4.2.2 source and newlib-1.16.0 source in any local folder other than /home/[your-home]/gnuarm.
#
# ~$ tar xf gcc-4.2.2.tar.bz2
# ~$ tar xf newlib-1.16.0.tar.gz
#
# o uncomment the 5 following lines (remove the char '#' in front) by edition the file gcc-4.2.2/gcc/config/arm/t-arm-elf
#
# # MULTILIB_OPTIONS += mhard-float/msoft-float
# # MULTILIB_DIRNAMES += fpu soft
# # MULTILIB_EXCEPTIONS += *mthumb/*mhard-float*
#
# # MULTILIB_OPTIONS += mno-thumb-interwork/mthumb-interwork
# # MULTILIB_DIRNAMES += normal interwork
#
# o note: this enables compilation of libraries for both hard and soft fpu (float point unit) needed to compile nxtOSEK, and so do NOT configure gcc and newlib with the option --with-float=soft
#
# o and run the following commands:
#
# ~$ mkdir gcc-build; cd gcc-build
# ~$ ../gcc-4.2.2/configure --target=arm-elf --prefix=/home/[your-home]/gnuarm --enable-interwork --enable-multilib --enable-languages="c,c++" --disable-__cxa_atexit --with-newlib --with-headers=[absolute path to newlib-1.16.0 folder]/newlib/libc/include
# ~$ make all-gcc install-gcc
#
########################################
# * Compilation of newlib:
# o run the following commands:
#
# ~$ mkdir newlib-build; cd newlib-build
# ~$ ../newlib-1.16.0/configure --target=arm-elf --prefix=/home/[your-home]/gnuarm/ --enable-interwork --enable-multilib
# ~$ make all install
#
# * Final compilation of gcc (libs):
# o run the following commands:
#
# ~$ cd gcc-build
# ~$ make all install
#
########################################
# * (Optional) compilation of gdb :
# o download insight-6.6.tar.bz2 in any local folder other than /home/[your-home]/gnuarm
# o and run the following commands:
#
# ~$ tar xf insight-6.6.tar.bz2
# ~$ mkdir insight-build; cd insight-build
# ~$ ../insight-6.6/configure --target=arm-elf --prefix=/home/[your-home]/gnuarm --enable-interwork --enable-multilib
# ~$ make all install
#
# o the binutils for arm architecture are now installed in the directory /home/[your-home]/gnuarm
#
| true
|
682e352cd5786d3a71b9278acd47c15850e412b0
|
Shell
|
robinchew/misc
|
/linux/bin/edge
|
UTF-8
| 675
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
#%%.* r the words to cut off
fname=${1%.*}
name=${fname%-*}
corner=${fname##*-}
ext=${1#*.}
echo $fname $name $corner $ext
for i in "1" "2" "3" "4"
do
if [ "$corner" = "t" ];then
convert $name"-t."$ext -rotate 90 $name"-r."$ext
corner="r"
echo $name"-r."$ext
elif [ "$corner" = "r" ];then
convert $name"-r."$ext -rotate 90 $name"-b."$ext
corner="b"
echo $name"-b."$ext
elif [ "$corner" = "b" ];then
convert $name"-b."$ext -rotate 90 $name"-l."$ext
corner="l"
echo $name"-l."$ext
elif [ "$corner" = "l" ];then
convert $name"-l."$ext -rotate 90 $name"-t."$ext
corner="t"
echo $name"-t."$ext
fi
done
| true
|
bfa166a1a5c76d38b0bfc359d357f37c7b2052ba
|
Shell
|
vmagrotest/assignments
|
/project-1/mutate
|
UTF-8
| 246
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Mutating!!!!!"
echo $@
echo "Hello $1" > hello.txt
git add hello.txt
git commit -m "[course staff] Project 1"
# the git commit might fail if it's empty, so we'll just say that if we got to
# this point we can return 0
exit 0
| true
|
4d7a3a85ef19fe5c0ef3a7f63f3c336cc9528231
|
Shell
|
chenbodeng/jz2440v2
|
/kernel/envsetup-kernel.sh
|
UTF-8
| 321
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# usage:
# source envsetup-kernel.sh
# store the current root path
Q_KERNEL_PATH_CUR=$(cd $(dirname "${BASH_SOURCE}") && pwd)
KERNEL_2_6_22_6="linux-2.6.22.6"
KERNEL_3_4_2="linux-3.4.2"
KERNEL_VERSION=$KERNEL_2_6_22_6
KERNEL_ROOT_PATH="$Q_KERNEL_PATH_CUR/$KERNEL_VERSION"
export KERNEL_ROOT_PATH
| true
|
8e4ea84299b58cba4c6e7236a3089f478084a222
|
Shell
|
nkk1/kernel-experiements
|
/script
|
UTF-8
| 1,453
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
set -x
busybox_version=1.29.3
linux_version=4.20
work_dir=/tmp/mynewlinux
artifacts_dir=/tmp/artifacts
busybox=busybox-${busybox_version}
linux=linux-${linux_version}
initrd_dir=${work_dir}/myinitrd
linux_dir=${work_dir}/${linux}
busybox_dir=${work_dir}/${busybox}
rm -rf $work_dir $initrd_dir $artifacts_dir
mkdir -p $work_dir $initrd_dir $artifacts_dir
rm -f ${linux}.tar.xz
rm -f ${busybox}.tar.bz2
rm -rf ${linux}
sudo yum -y -d0 -e0 install vim wget git ncurses-devel glibc-static pam-devel gcc gcc bison flex bc
wget -q "https://busybox.net/downloads/${busybox}.tar.bz2" -O ${busybox_dir}.tar.bz2
wget -q "https://cdn.kernel.org/pub/linux/kernel/v4.x/${linux}.tar.xz" -O ${linux_dir}.tar.xz
cd $work_dir
tar -xf ${linux}.tar.xz
bunzip2 ${busybox}.tar.bz2
tar -xf ${busybox}.tar
######### build the linux kernel
cp /tmp/kernel_config ${linux_dir}/.config
cd $linux_dir
make olddefconfig
make -j $(nproc)
cp arch/x86/boot/bzImage $artifacts_dir
######### build the initrd
cd $busybox_dir
cp /tmp/busybox_config ${busybox_dir}/.config
make -j $(nproc)
make install
cp -rf ${busybox_dir}/_install/* $initrd_dir
######### build the init script
cat << EOF > ${initrd_dir}/init
#!/bin/sh
mkdir -p /proc /sys
mount -t proc none /proc
mount -t sysfs none /sys
exec /bin/sh
EOF
chmod +x ${initrd_dir}/init
cd ${initrd_dir}
find . -print0 | cpio --null -ov --format=newc | gzip -9 > ${artifacts_dir}/initramfs.cpio.gz
| true
|
946ba65331802dc464b32c6cd4b40f464ef8f32b
|
Shell
|
samuelffn/shell-script
|
/test.sh
|
UTF-8
| 796
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
#arquivo=$1
#dir=~/dev/workspace/OCULTADO/admin
tatu=SERVICE.ATTENDANCES.COUNTER_TIP.ON_HOLD
arr=(carro)
#declare -A arr
if [ -f result.txt ]
then
rm result.txt
fi
search(){
#while IFS= read -r linha || [[ -n "$linha" ]]
while read linha
#for linha in $(cat arq.txt)
do
#bash sam.sh $linha
#cd ~/dev/workspace/OCULTADO/admin
#echo $linha
#linecount=$((linecount + 1))
#echo $linecount
arr+=($linha)
# grep -r -l -w "$linha"
# if grep -r -l -w "$linha"
# then
# echo "Entrou"
# cd ~/scripts
# echo $linha>>result.txt
# else
# echo "Foi nao"
# fi
#done
done < arq.txt
}
roda(){
for i in `echo ${arr[*]}`; do
cd ~/dev/workspace/OCULTADO/admin
grep -r -l -w $i
if [ ! $? -eq 0 ]
then
cd ~/scripts
echo $i>>result.txt
fi
done
}
search
#sleep 2
roda
| true
|
83279e47798a2b522d582055eeabedb5c141b6d1
|
Shell
|
aaloy/dotfiles
|
/xsession/xinitrc
|
UTF-8
| 944
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
# setup the keyboard in two layouts at the same time: english (ANSI US) and
# spanish (ALT-GR international) and maps the Capslock key as another Ctrl Key.
# The menu key is used to toggle between the layouts
setxkbmap -layout us,es -variant altgr-intl, -option grp:menu_toggle -option grp_led:scroll -option ctrl:nocaps
# load the X11 configuration for urxvt and xterm
xrdb -merge ~/.Xresources
# start xcape to do that Ctrl key act like an Esc key when is pressed and
# released on its own
~/.bin/xcape
# start udiskie for usb drives automount
udiskie &
# start autocutsel to admin only one clipboard
autocutsel -fork &
autocutsel -selection PRIMARY -fork &
# start the notification daemon dunst
dunst &
# hide the mouse cursor when is inactive
unclutter &
# start a compositor for X11
compton -b --config ~/.compton.conf
# start urxvt as a daemon
urxvtd -q -o -f &
# start the tiling window manager awesome
exec awesome
| true
|
2bfd37054d55ea868bcc823f4eb086e0db2b8b50
|
Shell
|
Frijke1978/LinuxAcademy
|
/Using Terraform to Manage Applications and Infrastructure/Creating a Jenkins Job.sh
|
UTF-8
| 955
| 3.3125
| 3
|
[] |
no_license
|
In this lesson, we will start working with Jenkins by creating a simple build job. This job will deploy a Docker container using Terraform, list the container, and then destroy it.
In the Jenkins dashboard, Click New Item.
Select Freestyle Project, and enter an item name of DeployGhost. Click Ok.
Under Source Code Management, select Git. Enter a Repository URL of https://github.com/linuxacademy/content-terraform-docker.git
In the Build section, click Add build step and select Execute shell from the dropdown.
Add the following in the Command area:
terraform init
terraform plan -out=tfplan
terraform apply tfplan
docker container ls
terraform destroy -auto-approve
Click Save.
Now, if we click Build Now in the left-hand menu, our project will start building. Clicking the little dropdown arrow next to #1 will give us a menu. Select Console Output to watch things build. Once we get a Finished: SUCCESS message, we're done.
| true
|
47d011ac9979f344d404253b3dc9abc654e8755d
|
Shell
|
821-N/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/8-for_ls
|
UTF-8
| 112
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# print each file without the leading "number-"
for _ in {1..1}
do
ls | cut -d- -f2-
done
| true
|
98176cfc13d265630e51b3bc4282d85b73b929a0
|
Shell
|
rkalis/liquidefi
|
/sharktokens/test/start_ganache_fork.sh
|
UTF-8
| 434
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#! /usr/bin/env sh
DIR=$(dirname "$0")
cd "$DIR"
source ../.env
cmd="ganache-cli --gasLimit 10000000"
if [ ! -z "$MNEMONIC" ]; then cmd="$cmd --mnemonic $MNEMONIC"; fi
if [ ! -z "$INFURA_ID" ]; then cmd="$cmd --fork https://mainnet.infura.io/v3/$INFURA_ID --unlock 0x9eb7f2591ed42dee9315b6e2aaf21ba85ea69f8c --unlock 0xe126b3E5d052f1F575828f61fEBA4f4f2603652a --unlock 0xcd8393b5b0ec5ab8dad4e648f709be6bac11874d"; fi
eval "$cmd"
| true
|
f6ffd56bc2839745e71cefec60a92e69838f2893
|
Shell
|
kjing/ce-demo-lms
|
/deploy/start_deployment.sh
|
UTF-8
| 740
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
# update file to use provided passwords
sed -i -e "s/<sql-pass>/$1/g" deploy-gce-demo.yaml
sed -i -e "s/<sup-pass>/$2/g" deploy-gce-demo.yaml
# do sqladmin outside deployment manager due to timing delays
# that sometime cause deployments to fail
# gcloud service-management enable sqladmin.googleapis.com
# do compute engine outside of deployment so you can delete
# deployment without disabling compute engine api
gcloud services enable compute.googleapis.com
# enable deployment manager api
gcloud services enable deploymentmanager.googleapis.com
# create the deployment
gcloud deployment-manager deployments create lms --config deploy-gce-demo.yaml
# finish up with post-deployment actions
. ./finish_deployment.sh $1
| true
|
50b0e577e2421e233ee274b1c4cf7f18e34d6596
|
Shell
|
kierse/unix_backup
|
/rsync_erin.sh
|
UTF-8
| 1,075
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
SERVER=pissiphany.com
BACKUPPATH=$HOME/backup/elange
BACKUPDIR=`date +%A`
SHARE=/media/share/kierse
HOME_OPTS="--archive --backup --backup-dir=$BACKUPPATH/$HOME/$BACKUPDIR/ --compress --delete --delete-excluded -F -v"
SHARE_OPTS="--archive --backup --backup-dir=$BACKUPPATH/$SHARE/$BACKUPDIR/ --compress --delete --delete-excluded -F -v"
# backup home directory - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# the following line clears the last weeks incremental directory
[ -d $HOME/emptydir ] || mkdir $HOME/emptydir
rsync --archive --delete -v $HOME/emptydir/ $SERVER:$BACKUPPATH/$HOME/$BACKUPDIR/
rmdir $HOME/emptydir
rsync $HOME_OPTS $HOME/ $SERVER:$BACKUPPATH/$HOME/current/
# backup share directory- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# the following line clears the last weeks incremental directory
[ -d $HOME/emptydir ] || mkdir $HOME/emptydir
rsync --archive --delete -v $HOME/emptydir/ $SERVER:$BACKUPPATH/$SHARE/$BACKUPDIR/
rmdir $HOME/emptydir
rsync $SHARE_OPTS $SHARE/ $SERVER:$BACKUPPATH/$SHARE/current/
| true
|
aad478d5c060b1f60eb7242090081422c3fc575a
|
Shell
|
KrbAlmryde/Utilities
|
/WorkShop/SHELL/nwDRIVR.sh
|
UTF-8
| 3,676
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
# ================================================================
# This is a call script which sources various functions and allows
# for making calls to specific pipelines
#
#
# call <Study Profile> <processing piple>
# call tap preprocessing [function-name]
#
# available options for <Study Profile>: [tap, attnmen, stroop, wb1, ice, rat, test]
# specific variable names, and functions if defined provided by the profile
#
# available options for <proccessing pipelines>:
# two potential options, the first is the class of Pipeline, ie Preprocessing, Analysi, etc
# Classes are capitalized in order to distinguish them from specifc function calls.
# Calling the Reconstruction class will execute the predefined list of operations for that
# pipeline. If the Study Profile specified has a Class of the same name defined under its
# namespace, then that Pipeline will be utilized instead of the default one already defined.
# Specify a 'Class' (ie Preprocessing) or a specifc function name. In both cases, calling the
# 'Class'
# [reconstruct
# --build_functional
# --build_anat
# --_renameAnat
# preprocessing
# --timeShift
# --sliceTiming
# --deSpiking
# --volTrim
# --volReg
# analysis,
# --anova
# --ttest
# --ica
# registration,
# --warpToMNI
# --warpToTLRC
# --somethingelse
# regression,
# --deconvolve
# --something else
# ]
source reconstruct_functions
source registration_functions
source regression_functions
source utility_functions
if [[ -e ${study}_functions.sh]]; then
# This would be calling overide functions that will overide
# a method name I have in one of the sourced fuctions above.
source ${study}_functions
fi
call tap Preprocessing # this would run the preprocessing pipeline as defined by the tap profile
# if such a pipeline exists under that study profile. Otherwise it will
# use the default pipeline. Regardless of case, the tap profile will supply
# all required variables for the Preprocessing pipline functions.
call tap sliceTiming # this would run the sliceTiming function as defined by the tap profile,
# if it exists under that study profile. Otherwise, it will the default
# method supplied with varibles defined under the tap profile.
# In both examples, these calls would iterate over every subject and every run, because the default
# behavior is to do so, unless a specifc set of subjects and or scans is specified.
call tap sliceTiming {1,3,7} 1 # This example is calling the sliceTiming function under
# the tap namespace, but only for subjects 1,3,7
# at scan 1
call tap Preprocessing 1, {1,4} # This is a similar example showing the various inputs. THis would
# only perform Preprocessing operations on subject 1, for runs 1 and 4
#--------------Start of Main--------------
context=${1} # This is the study profile (can also be test). It is called context becase it represents
# the context of with which the pipelines and function will execute. Or something...
operation=${2} # Can be either a <Pipeline>, or it can be a <function> (both <<case-sensitive>>)
# Both must exist, whether as a default Pipleine/Function, or a context specific
# Pipeline/Function
source ${PROFILE}/Profile.${context}.sh
if [ $# -lt 4 ]; then
for subj in ${subjArray[*]}; do
${PROG}/${context}_${operation}
done
fi
| true
|
538788398db03899d842a42de5bec8e4450a4dd9
|
Shell
|
chungmanchau/dotfiles
|
/scripts/cask.sh
|
UTF-8
| 1,282
| 2.890625
| 3
|
[] |
no_license
|
log() {
local fmt="$1"; shift
printf "\n\e[94m$fmt\n" "$@"
}
casks="$(brew cask list)"
install_cask() {
if echo $casks | grep -w $1 > /dev/null 2>&1; then
log "Already have %s installed. Skipping ..." "$1"
else
log "Installing %s ..." "$1"
brew cask install "$@" 2> /dev/null
fi
}
brew tap caskroom/cask
brew tap caskroom/versions
# Browsers
install_cask firefox
install_cask google-chrome-beta
# Cloud
install_cask cloud
install_cask dropbox
install_cask google-drive
# Comms
install_cask google-hangouts
install_cask skype
# Media
install_cask imageoptim
install_cask spotify
install_cask iina
# Development
install_cask atom
install_cask intellij-idea
install_cask gitify
install_cask iterm2-nightly
install_cask java
install_cask transmit
install_cask vmware-fusion
# Mac OS Enhancements
install_cask alfred
install_cask bartender
install_cask bettertouchtool
install_cask gpgtools
install_cask istat-menus
install_cask keepingyouawake
install_cask the-unarchiver
# Other stuff
install_cask appcleaner
install_cask daisydisk
install_cask deluge
install_cask flux
# Fonts
# https://github.com/caskroom/homebrew-fonts
brew tap caskroom/fonts
install_cask caskroom/fonts/font-hack
install_cask caskroom/fonts/font-hack-nerd-font
brew cask cleanup
| true
|
da570cd5f024304ab0e76daf7d4251e3374acaf9
|
Shell
|
zzzzzpaul/shellofcentos
|
/installAPT.sh
|
UTF-8
| 485
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#这个脚本会在centos下安装apt, 以提高软件安装的兼容性
#wget http://yum.baseurl.org/download/3.4/yum-3.4.3.tar.gz
#tar -vxf yum-3.4.3.tar.gz
#touch /etc/ yum.conf
#cd yum-3.4.3
#./yummain.py install -y yum
#yum update
#修改host
vim /etc/hosts
# 尾行添加 199.232.96.133 raw.githubusercontent.com
curl https://raw.githubusercontent.com/dvershinin/apt-get-centos/master/apt-get.sh -o /usr/local/bin/apt-get
chmod 0755 /usr/local/bin/apt-get
| true
|
4041e434790735872bf0ba06042e91b75c91c131
|
Shell
|
apollo434/kernel_1.1.1
|
/performance/formal/testmpionwr_shm_simplified_V202010/run_test.sh
|
UTF-8
| 817
| 3
| 3
|
[] |
no_license
|
#$1: total process num
#$2: DIV NUM process num
#$3: count of running
#$4: warm count
if [ $# != 4 ]; then
echo "Usage: "
echo " #1:total process num "
echo " #2:DIV num"
echo " #3: count of running "
echo " #4: warm count"
exit 1;
fi
result=intel_mpi_test_realOS_`date "+%Y%m%d%H%M%S"`.log
#echo $result
date1=`date`
cat ./run_test.sh >>$result
t2=1
t1=$[$1+$t2]
taskset -c 1-$1 mpirun -n $1 ./intelmpi_emt $3 $4 $1 $2 | tee $result
#taskset -c 1-10 mpirun -genv I_MPI_PIN_PROCESSOR_LIST=1-10 -n 8 ./emt1 2000000 | tee $result
#taskset -c 1-24 mpirun -n 23 ./emt1 2000000 | tee $result
echo "program starts at $date1" >>$result
date2=`date`
echo "program ends at $date2" >>$result
date>>$result
echo "------- program run completely!--------------"
echo "log saved into $result"
| true
|
eb6913aedd9d5d36b81d89c181bd5b6fcd97c49f
|
Shell
|
tikalk/ft-geocoder-facade
|
/run-bin/geocoder-facade.sh
|
UTF-8
| 182
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Starting FleetTracker Geocoder Facade"
DIRNAME=`dirname $0`
APP_HOME=`cd $DIRNAME/..;pwd;`
export APP_HOME;
java -jar $APP_HOME/build/libs/as-geocoder-facade.jar
| true
|
0a649df3ffc0266ef153ce8ea51a6e9fd0638b87
|
Shell
|
ozkanpakdil/nwjs-build
|
/build_linux64.sh
|
UTF-8
| 4,116
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
whoami
pwd
MAIN='nw27'
echo "Building nwjs from sources, with ffmpeg patches [branch: $MAIN]"
# create main dir
mkdir -p nwjs-build
cd nwjs-build # nwjs-build
# get depot tool
git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
export PATH=`pwd`/depot_tools:"$PATH"
export GYP_DEFINES=target_arch=x64
# get nwjs sources
mkdir -p nwjs
cd nwjs # nwjs-build/nwjs
# generate .gclient
echo -e "solutions = [
{ \"name\" : \"src\",
\"url\" : \"https://github.com/nwjs/chromium.src.git@origin/$MAIN\",
\"deps_file\" : \"DEPS\",
\"managed\" : True,
\"custom_deps\" : {
\"src/third_party/WebKit/LayoutTests\": None,
\"src/chrome_frame/tools/test/reference_build/chrome\": None,
\"src/chrome_frame/tools/test/reference_build/chrome_win\": None,
\"src/chrome/tools/test/reference_build/chrome\": None,
\"src/chrome/tools/test/reference_build/chrome_linux\": None,
\"src/chrome/tools/test/reference_build/chrome_mac\": None,
\"src/chrome/tools/test/reference_build/chrome_win\": None,
},
\"safesync_url\": \"\",
},
]
cache_dir = None" > .gclient
# get repos
git clone https://github.com/nwjs/nw.js.git src/content/nw
cd src/content/nw
git checkout $MAIN
cd ../../.. # nwjs-build/nwjs
git clone https://github.com/nwjs/node src/third_party/node-nw
cd src/third_party/node-nw
git checkout $MAIN
cd ../../.. # nwjs-build/nwjs
git clone https://github.com/nwjs/v8 src/v8
cd src/v8
git checkout $MAIN
cd ../.. # nwjs-build/nwjs
# get source code
gclient sync --with_branch_heads --nohooks --no-history
./src/build/install-build-deps.sh --no-prompt --quick-check
./src/third_party/instrumented_libraries/scripts/install-build-deps.sh --no-prompt --quick-check
gclient runhooks
# build ninja conf
cd src
gn gen out/nw --args='is_debug=false is_component_ffmpeg=true target_cpu="x64" nwjs_sdk=true enable_nacl=false ffmpeg_branding="Chrome" proprietary_codecs=true enable_ac3_eac3_audio_demuxing=true enable_hevc_demuxing=true is_official_build=true enable_mse_mpeg2ts_stream_parser=true'
cd ../../.. # ./
sed -i 's/--enable-decoder=vorbis,libopus,flac/--enable-decoder=avs,eac3,aac,ac3,aac3,h264,mp1,mp2,mp3,mpeg4,mpegvideo,hevc,flv,dca,flac/g' nwjs-build/nwjs/src/third_party/ffmpeg/chromium/scripts/build_ffmpeg.py
sed -i 's/--enable-demuxer=ogg,matroska,wav,flac/--enable-demuxer=avs,eac3,aac,ac3,h264,mp3,mp4,m4v,matroska,wav,mpegvideo,mpegts,mov,avi,flv,dts,dtshd,vc1,flac,ogg,mov/g' nwjs-build/nwjs/src/third_party/ffmpeg/chromium/scripts/build_ffmpeg.py
sed -i "s/--enable-parser=opus,vorbis,flac/--enable-parser=avs,eac3,aac,ac3,aac3,h261,h263,h264,opus,vorbis,mepgvideo,mpeg4video,mpegaudio,dca,hevc,vc1,flac','--enable-libopus','--enable-libvorbis','--enable-libvpx','--enable-gpl','--enable-nonfree/g" nwjs-build/nwjs/src/third_party/ffmpeg/chromium/scripts/build_ffmpeg.py
#add extra options to ffmpeg build
cd nwjs-build/nwjs/src # nwjs-build/nwjs/src
# rebuild ffmpeg conf files
cd third_party/ffmpeg
./chromium/scripts/build_ffmpeg.py linux x64 --config-only
# build ffmpeg
cd build.x64.linux/ChromeOS
make
# trick nwjs into thinking it's chrome, not chromeos build (enables avi files)
cd ..
rm -r Chrome
cp -R ChromeOS Chrome
cd ..
# copy ffmpeg conf
./chromium/scripts/copy_config.sh
# generate gyp for ffmpeg build
./chromium/scripts/generate_gn.py
cd ../.. # nwjs-build/nwjs/src
exit 0
# generate ninja build files
GYP_CHROMIUM_NO_ACTION=0 ./build/gyp_chromium -I third_party/node-nw/common.gypi third_party/node-nw/node.gyp
# build nwjs
ninja -C out/nw nwjs
# build node
ninja -C out/Release node
# copy node lib
ninja -C out/nw copy_node
# strip binaries & libs
cd out/nw # nwjs-build/nwjs/src/out/nw
strip nw
strip lib/*.so
# move required files to out/dist
cd .. #nwjs-build/nwjs/src/out
mkdir -p dist
cp -R nw/nw nw/lib nw/locales nw/icudtl.dat nw/natives_blob.bin nw/nw_100_percent.pak nw/nw_200_percent.pak nw/resources.pak nw/snapshot_blob.bin dist/
rm -rf dist/lib/*.TOC
echo "See built nwjs in: nwjs-build/nwjs/src/out/dist"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.