blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
6a7b7dea667dda0f41f84b874c81076b12362559 | Shell | intelix/commons-devops-build-systems | /maven/compile.sh | UTF-8 | 243 | 2.8125 | 3 | [] | no_license | #!/bin/bash
set -e
# Set directory to where we expect code to be
cd /src/${SOURCE_PATH}
echo "Building"
govendor sync
echo "Fix formatting"
go fmt ./...
echo "Running Tests"
go test ./...
echo "Building source"
go build
echo "Build Successful" | true |
0b1acf02a0c472f4c618908da1de32b81a5baeeb | Shell | f4lco/testprio | /bin/commits-in-push.sh | UTF-8 | 482 | 2.9375 | 3 | [] | no_license | #!/bin/bash
set -eu
. .testprio
function run() {
strategy=$1
owner=$2
repo=$3
java -jar $PRIO_JAR ${strategy} \
--project ${owner}/${repo} \
--user ma \
--cache cache-tr-git-commits-in-push \
--patches "tr_commits_in_push" \
--output results/${owner}@${repo}/commits-in-push/${repo}@${strategy}.csv
}
while IFS= read -r project; do
while IFS= read -r strategy; do
run ${strategy} ${project}
done <data/strategies-matrix
done <data/projects
| true |
0620d1478ba2247bea658f77259ca4f4377ff371 | Shell | Shved2502/OS | /Task_1/Task_1_a.sh | UTF-8 | 178 | 2.6875 | 3 | [] | no_license | #!bin/bash
a="a"
b="b"
c="c"
for i in `seq 1 33`; do
touch > a$i.txt
done
for i in `seq 34 66`; do
touch > b$i.txt
done
for i in `seq 67 100`; do
touch > c$i.txt
done
| true |
33d3e63e3658da3ac3d6688ae8142a533a27e9bf | Shell | yvancouver/Workflow | /reFormathsMetrics.sh | UTF-8 | 318 | 3.078125 | 3 | [] | no_license | cat $1| awk -F "\t" ' {
if ( NF != 0 ){
FS Filed Separator
if ( $1 ~/^#/) {
print $0}
else
for (f = 1; f <= NF; f++)
a[NR, f] = $f
}
else
print $0
}
NF > nf { nf = NF }
END {
for (f = 1; f <= nf; f++)
for (r = 1; r <= NR; r++)
printf a[r, f] (r==NR ? RS : FS)
}' | awk '{sub(/\t{6}/,"\t");print}' | true |
b64cdd4a405c4bc2e6b46048f2947f76eed96493 | Shell | fleanend/adapt-mnmt | /setup-env.sh | UTF-8 | 957 | 3.03125 | 3 | [] | no_license | #!/bin/bash
#
# for library specific requiremtns, see README of each repo
#
EXPDIR=$PWD
# libraries
MOSES=https://github.com/moses-smt/mosesdecoder.git
SENT_PIECE='sentencepiece==0.1.8'
TENSORFLOW='tensorflow-gpu==1.4.1'
#OPENNMT=https://github.com/OpenNMT/OpenNMT-tf/tree/v1.15.0 # install updated version ./OpenNM
# Data, Processing
if [ ! -d $EXPDIR/mosesdecoder ]; then
echo "Cloning Mosesdecoder ..."
git clone $MOSES
fi
echo "Installing SentencePiece ..."
pip install $SENT_PIECE
# NMT
echo "Install tensorflow.."
pip install $TENSORFLOW
if [ -d $EXPDIR/OpenNMT ]; then
cd ./OpenNMT
pip install -e ./
fi
echo "Install CUDA 8"
wget https://developer.nvidia.com/compute/cuda/8.0/Prod2/local_installers/cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb
dpkg -i cuda-repo-ubuntu1604-8-0-local-ga2_8.0.61-1_amd64-deb
apt-key add /var/cuda-repo-8-0-local-ga2/7fa2af80.pub
apt-get update
apt-get install cuda=8.0.61-1
apt autoremove
| true |
a34fb3037efbef86ed20835b3e89caca434ed7ae | Shell | mchi/marks-xmonad | /screenlayout/config.sh | UTF-8 | 776 | 2.640625 | 3 | [] | no_license | #!/bin/bash
export BIGMONITOR_OUTPUT=$(xrandr -q | egrep '^(DP|HDMI)' | grep ' connected' | awk '{ print $1 }')
echo ${BIGMONITOR_OUTPUT}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
"${DIR}"/clear.sh
export LTWIDTH=1360
export LTHEIGHT=768
export LTMODE=${LTWIDTH}x${LTHEIGHT}
xrandr \
--output eDP-1-1 --primary --mode ${LTMODE} --pos 1210x2160 --rotate normal \
--output ${BIGMONITOR_OUTPUT} --mode 3780x2160 --pos 0x0 --rotate normal \
--setmonitor D "1360/0x768/0+1210+2160" eDP-1-1 \
--setmonitor W "1260/0x1660/0+0+500" ${BIGMONITOR_OUTPUT} \
--setmonitor E "1260/0x1660/0+1260+500" none \
--setmonitor R "1260/0x1660/0+2520+500" none \
--setmonitor Y "3780/0x500/0+0+0" none \
# total width is 3780
| true |
2f673134e220d0b48fe101641d4739a09f3da950 | Shell | brainlife/app-noddi-matlab | /main | UTF-8 | 674 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#PBS -l nodes=1:ppn=16,vmem=64gb,walltime=32:00:00
#PBS -N noddi_matlab
set -e
# create brainmask (if necessary) and unzip files
time singularity exec -e docker://brainlife/fsl:5.0.9 ./brainmask.sh
# run noddi_matlab
time singularity exec -e docker://brainlife/mcr:neurodebian1604-r2017a ./compiled/noddi_matlab
# cleanup and error check
if [ -f noddi_fit_ficvf.nii ];then
mkdir noddi;
mv noddi_fit_ficvf.nii ./noddi/icvf.nii;
mv noddi_fit_fiso.nii ./noddi/isovf.nii;
mv noddi_fit_odi.nii ./noddi/od.nii;
gzip ./noddi/icvf.nii;
gzip ./noddi/isovf.nii;
gzip ./noddi/od.nii;
# gzip mask.nii;
# rm -rf *noddi_fit*;
else
echo "output missing"
exit 1
fi
| true |
dc68b275dcb9591b42acbd7291e5f96bdcc32975 | Shell | gluster/anthill | /.travis/push_container.sh | UTF-8 | 822 | 3.734375 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/bash
# Usage: push_container.sh <repo> <verbatim|version> <tag>
set -e -o pipefail
image="$1"
if [[ "x$2" == "xversion" ]]; then
[[ "$3" =~ ^v([0-9]+.*) ]] || exit 1;
tag="${BASH_REMATCH[1]}"
else
tag="$3"
fi
if [[ "x${QUAY_USERNAME}" != "x" && "x${QUAY_PASSWORD}" != "x" ]]; then
echo "$QUAY_PASSWORD" | docker login -u "$QUAY_USERNAME" --password-stdin quay.io
finalimage="quay.io/$image:$tag"
docker tag "$image" "$finalimage"
docker push "$finalimage"
fi
if [[ "x${DOCKER_USERNAME}" != "x" && "x${DOCKER_PASSWORD}" != "x" ]]; then
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin docker.io
finalimage="docker.io/$image:$tag"
docker tag "$image" "$finalimage"
docker push "$finalimage"
fi
| true |
77eeab2fc59c0f2f1ea5666e828a2cc2d48d7709 | Shell | git-thinh/vuepress-canvas | /install.sh | UTF-8 | 844 | 3.96875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
project_directory=${0%/*}
# move to the project's directory if needed
cd "$project_directory"
echo "Installing dependencies..."
echo
{ which yarn > /dev/null && yarn; } || npm install
if [ ! -f config.js ]; then
echo
echo "Creating config file..."
cp config.js.example config.js
fi
# go to the docs folder
cd ..
if [ ! -f README.md ]; then
echo "Creating README in the docs folder..."
echo -e "---\nhome: true\n---" > README.md
fi
shopt -s nullglob
n_markdown_files=( *.md )
if (( ${#n_markdown_files[@]} == 0 )); then
echo "Creating a sample post..."
echo '# My first post' > my-first-post.md
echo "Don't forget to edit the config.js file to personalize your blog" >> my-first-post.md
fi
echo
echo "To see it run, execute this in your docs folder: vuepress dev"
echo "DONE!"
| true |
ef6accbdbf5a80b49f5dbeecd2f0f015ff3dff47 | Shell | pingpotter/Octo | /ydb/scripts/replication_stop | UTF-8 | 382 | 3.28125 | 3 | [] | no_license | #!/bin/bash
state=$1
if [ -z $state ]
then
echo "Replication state not define !!!"
exit 1
elif [ $state == "primary" ]
then
mupip replicate -source -shutdown -timeout=2
elif [ $state == "secondary" ]
then
mupip replicate -receiver -shutdown -timeout=2
mupip replicate -source -shutdown -timeout=2
else
echo "Replication state not found !!"
exit 2
fi
| true |
b8c8f3479b964bb175ec57c933ce3cce2b737ce0 | Shell | aamemiya/letkf | /lorenz63/model/run/train.sh | UTF-8 | 496 | 2.796875 | 3 | [] | no_license | #!/bin/sh
#set -e
F90=ifort
VAR=''
CDIR=`pwd`
cd ../..
L63DIR=`pwd`
cd ..
ENKFDIR=`pwd`
COMDIR=$ENKFDIR/common
OUTDIR=$L63DIR/DATA
WKDIR=$L63DIR/tmp
rm -rf $WKDIR
mkdir -p $WKDIR
cd $WKDIR
cp $COMDIR/SFMT.f90 .
cp $COMDIR/common.f90 .
cp $L63DIR/model/lorenz63${VAR}.f90 .
cp $L63DIR/model/run/train.f90 .
$F90 -o nature SFMT.f90 common.f90 lorenz63${VAR}.f90 train.f90
rm *.mod
rm *.o
ln -s $OUTDIR/spinup/init01.dat fort.10
time ./nature
mv fort.90 $OUTDIR/train.dat
cp $CDIR/train.ctl $OUTDIR
| true |
301dba5bde2449d3babd6d0e8a6135a825ac6482 | Shell | huangyingw/loadrc | /kvmrc/vshrink.sh | UTF-8 | 246 | 3.578125 | 4 | [] | no_license | #!/bin/zsh
if [ -z "$1" ]
then
echo -e "${red}Please provide the qcow2 file ... ${NC}"
exit 1
fi
TARGET=$(realpath "$1")
BAK="$TARGET".bak
mv -v "$TARGET" "$BAK" \
&& qemu-img convert -O qcow2 -c "$BAK" "$TARGET" \
&& rm "$BAK"
| true |
8b27a04fee9da418acb6836fdfc483e5ab52d1dc | Shell | awmyhr/shell-functions | /src/is_true | UTF-8 | 1,093 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# Author: awmyhr <awmyhr@gmail.com>
# Contact: awmyhr <awmyhr@gmail.com>
# Project: Shell Functions
# Proj Home: https://github.com/awmyhr/shell-functions
# Copyright: 2019 awmyhr
# License: Apache-2.0
# Revised: 2019-06-28 # This revision is for the test-suite portion
# Created: 2019-06-28
##--==
#==============================================================================
is_true() {
#:"""
#: .. function:: is_true()
#:
#: .. project:: Shell Functions <https://github.com/awmyhr/shell-functions>
#:
#: .. revision:: 2019-06-28
#:
#: Check if a string is a standard true flag
#: Ref: https://github.com/fedora-sysv/initscripts)
#:
#: :param str boolean: String to check
#:"""# {% raw %}
if [ "${#}" -ne 1 ] ; then # {% endraw %}
return 64
fi
case "${1}" in
[tT] | [yY] | [yY][eE][sS] | [tT][rR][uU][eE] | 1) return 0 ;;
*) return 1
esac
}
##==---
#==============================================================================
if ! (return 0 2>/dev/null) ; then
is_true "${@}"
fi
| true |
4ee250080eae2643eb43d0a99415d629abcf87dc | Shell | niclabs/HTCondorLabs | /Examples/docker_examples/multiple_io_as_vanilla_job/vanilla_docker_ex.sh | UTF-8 | 884 | 4.0625 | 4 | [] | no_license | #! /bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function clean_up () {
docker rm -f $1
exit
}
args_list=(
"The quick"
"brown fox"
"jumps"
"over the"
"lazy dog"
)
number=$1
name="simple-io-example-$number"
## Trap the signal used by condor to terminate a job.
## When the signal is received, stop the running container and exit
trap "clean_up $name" SIGINT SIGTERM
## Create the output directory, which will be mounted
## as a volume to the docker container.
## We create it beforehand to assure it it owned by the
## same user that runs the container, otherwise it will be
## created but it will be owned by root.
mkdir -p $DIR/output
docker rm -f $name
docker build --tag simple-io-example .
docker run --name $name -v $DIR/output:/output \
-u $(id -u):$(id -g) simple-io-example \
/output/output-$number.txt "${args_list[$number]}"
clean_up $name
| true |
73a4489afe451ff1709f271ebf57a7b1cc7f1148 | Shell | artemohanjanyan/university | /6_term/translation/boolean/test.sh | UTF-8 | 769 | 3.140625 | 3 | [] | no_license | #!/bin/bash
make
declare -a tests=('(a and b) or not (c xor (a or not b))'
'((((((a and b) or not (c xor (a or not b)))))))'
'((((((a and b) or not (c xor (a or not b))))))'
'(((((a and b) or not (c xor (a or not b)))))))'
'a and b and c'
'a and b or c'
'a or b and c'
'a or b or c'
'a impl b or c impl d or e and not f'
'a impl b impl c'
'')
for ((i = 0; i < ${#tests[@]}; i++))
do
test="${tests[$i]}"
echo "TEST $test"
if echo $test | ./main $i
then
dot -Tsvg $i -o $i.svg
rm $i
else
echo "FAIL"
fi
echo
echo "=========="
echo
echo
done
| true |
ebb742f730b3ebffcbafa8041277d0c96db71641 | Shell | luki201508/scripting | /CSVtoLDIFparser.sh | UTF-8 | 11,095 | 3.953125 | 4 | [] | no_license | #!/bin/bash
#title :Parseador de .csv a .ldif para servidores LDAP
#description :Este script genera un archivo ldif pasándole el dominio y la ruta de un csv curado
#author :Lucas Galípolo Uriarte
#date :26/11/2019
#version :1.0
#usage :bash CSVtoLDIFparser.sh
#notes :Tener un Servidor 'OpenLDAP' y el módulo 'dialog' instalados
#bash_version :4.1.5(1)-release
#==============================================================================
## Titulo trasero, el cual será el mismo en todo el programa
BACKTITLE="Parseador de .csv a .ldif para OpenLDAP"
## Mensaje de texto que utiliza el menú
MSGBOX=$(cat << END
Ve eligiendo las diferentes opciones y asegurate
de tener todo listo antes de empezar
Es aconsejable ir en orden.
Elige una opción.
END
)
## Variable para determinar la posición en la que se encuentra en el menu
default_page=Nombre
## El nombre del admin tomará como valor por defecto admin (suele ser así)
admin_name=admin
## Variables no definidas, sin valor
let domain_name
let domain_extension
let csv_path
## Función para salir/abortar el programa
## Abort or terminated
exit_program() {
# Eliminamos todos los archivos temporales relacionados a nuestro programa
rm -f /tmp/csv-ldif-parser.tmp.*
clear
echo "Program $1"
if [ "$1" == "aborted" ]
then
exit 1
fi
exit
}
## Función usada para determinar si el programa debe abortar o salir exitosamente (exit 0)
exit_case() {
case $1 in
1) exit_program "terminated" ;;
255) exit_program "aborted" ;;
esac
}
## Menu principal que se servirá de navegador entre las distintas opciones del programa
main() {
dialog --clear \
--title "[CSV to LDIF Parser]" \
--backtitle "$BACKTITLE" \
--ok-label "Aceptar" \
--cancel-label "Cancelar" \
--default-item $default_page \
--menu "$MSGBOX" 20 0 20 \
Nombre "Indica el nombre del admin OpenLDAP" \
Servidor "Indica el nombre del servidor" \
Extension "Indica la extensión del servidor" \
OrigenCSV "Indica el nombre del fichero CSV" \
Script "Ver la información del script" \
Salir "Salir del script" \
2> /tmp/csv-ldif-parser.tmp.$$
exit_status=$?
# En caso de que se le de salir o abortar (Ctrl+C), ejecutará la función exit previa
exit_case $exit_status
# Variable usada para determinar la elección del usuario respecto al menu
main_val=$(cat /tmp/csv-ldif-parser.tmp.$$)
}
## Input donde se mete el nombre del admin
## Tiene valor por defecto 'admin' pero si previamente se
## le ha asignado otro valor, se pondrá ese valor por defecto
input_admin_name() {
dialog --clear \
--title "[Nombre Admin]" \
--backtitle "$BACKTITLE" \
--ok-label "Aceptar" \
--cancel-label "Cancelar" \
--inputbox "$2" 8 60 "$admin_name" \
2> /tmp/csv-ldif-parser.tmp.$$
exit_status=$?
# En caso de que el usuario le de solo al botón de 'Aceptar'
# Guardará el valor del input en un archivo temporal
if [ $exit_status -eq 0 ]
then
admin_name=$(cat /tmp/csv-ldif-parser.tmp.$$)
fi
}
## Input donde se mete el nombre del dominio
## Valor por defecto el que se haya puesto previamente
input_domain_name() {
dialog --clear \
--title "[Nombre Dominio]" \
--backtitle "$BACKTITLE" \
--ok-label "Aceptar" \
--cancel-label "Cancelar" \
--inputbox "$2" 8 60 "$domain_name" \
2> /tmp/csv-ldif-parser.tmp.$$
exit_status=$?
if [ $exit_status -eq 0 ]
then
domain_name=$(cat /tmp/csv-ldif-parser.tmp.$$)
fi
}
## Input donde se mete la extensión del dominio
## Valor por defecto el que se haya puesto previamente
input_domain_extension() {
dialog --clear \
--title "[Extensión Dominio]" \
--backtitle "$BACKTITLE" \
--ok-label "Aceptar" \
--cancel-label "Cancelar" \
--inputbox "$2" 8 60 "$domain_extension" \
2> /tmp/csv-ldif-parser.tmp.$$
exit_status=$?
if [ $exit_status -eq 0 ]
then
domain_extension=$(cat /tmp/csv-ldif-parser.tmp.$$)
fi
}
## Input donde se selecciona el fichero csv
csv_input() {
dialog --clear \
--title "[Importar CSV]" \
--backtitle "$BACKTITLE" \
--ok-label "Aceptar" \
--cancel-label "Cancelar" \
--fselect $HOME/ 14 48 \
2> /tmp/csv-ldif-parser.tmp.$$
csv_path=$(cat /tmp/csv-ldif-parser.tmp.$$)
}
## Mostrará una ventana donde enseñará todos los datos pasados previamente
## En caso de no tener alguno de los parámetros con algún valor, lanzará
## Un mensaje diciendo el texto que le falta
script_info() {
if [ -z "$admin_name" ]
then
script_info_case "admin_name"
elif [ -z "$domain_name" ]
then
script_info_case "domain_name"
elif [ -z "$domain_extension" ]
then
script_info_case "domain_extension"
elif [ -z "$csv_path" ]
then
script_info_case "csv_path"
else
# Variable que se usa como texto del mensaje
SCRIPT_INFO=" Nombre del admin: $admin_name
Dominio: $domain_name.$domain_extension
Ruta del CSV: $csv_path"
dialog --clear \
--title "[Script info]" \
--backtitle "$BACKTITLE" \
--ok-label "Crear LDIF" \
--extra-button \
--extra-label "Cancelar" \
--msgbox "$SCRIPT_INFO" 10 40
exit_status=$?
## En caso de darle a 'Crear LDIF' se mostrará una advertencia
## Preguntando si está seguro de que se quiere crear el archivo
if [ $exit_status -eq 0 ]
then
dialog --clear \
--title "[Crear Script]" \
--backtitle "$BACKTITLE" \
--yes-label "Segurísimo" \
--yesno "¿Esta seguro de que quiere crear el script?" 10 40
script_option=$?
## Si el usuario le da a 'Segurísimo' se ejecutará la función que crea el ldif
if [ $script_option -eq 0 ]
then
## Genera un archivo ldif a partir del pasado csv
csv_to_ldif
## Muestra la primera y la última entrada del archivo ldif
show_ldif_file_info
## Agregamos todos los usuarios del ldif al dominio mediante este comando
ldapadd -x -D cn=$admin_name,dc=$domain_name,dc=$domain_extension -W -f $HOME/add_users.ldif
## Guardar la última entrada del ldap en un archivo temporal
slapcat | tail -21 > $temp_file
## Mostrar la última entrada del ldap
dialog --clear \
--title "[OpenLDAP última entrada]" \
--backtitle "$BACKTITLE" \
--exit-label "Atrás" \
--textbox $temp_file 40 40
fi
fi
fi
}
## Función utilizada para mostrar la primera y última entrada del
## ldif, a parte del número total de entradas del mismo
show_ldif_file_info() {
# Definir el archivo temporal
temp_file=/tmp/csv-ldif-parser.tmp.$$
echo "[Primera entrada del ldif]" > $temp_file
# Mostrar las primera 13 líneas del archivo
head -13 $HOME/add_users.ldif >> $temp_file
printf "\n" >> $temp_file
echo "[Segunda entrada del ldif]" >> $temp_file
# Mostrar las últimas 13 líneas del archivo
tail -14 $HOME/add_users.ldif >> $temp_file
printf "\n" >> $temp_file
echo "[Numero de entradas totales]" >> $temp_file
# Contar todos los saltos de línea del archivo
grep -c ^$ $HOME/add_users.ldif >> $temp_file
dialog --clear \
--title "[Script content]" \
--backtitle "$BACKTITLE" \
--exit-label "Atrás" \
--textbox $temp_file 40 70
}
## Función usada para mostrar el error en la opción 'Script' del menú
script_info_case() {
case "$1" in
admin_name) script_info_error "Nombre del admin" ;;
domain_name) script_info_error "Nombre del dominio" ;;
domain_extension) script_info_error "Extensión del dominio" ;;
csv_path) script_info_error "Ruta del archivo .csv" ;;
esac
}
## Mensaje de error si alguno de los Input está vacío
script_info_error() {
dialog --clear \
--title "[Script info]" \
--backtitle "$BACKTITLE" \
--msgbox "Falta información: $1" 7 40
}
## Función que parsea el CSV y crea un LDIF a partir de éste
csv_to_ldif() {
# Con IFS separamos por comas
# Con read metemos cada parámetro del csv en una variable
# Bucle para recorrer todo el csv
while IFS=, read -r uidNumber description name name_id
do
# Variable para definir la ubicación del ldif
ldif_file=$HOME/add_users.ldif
echo dn: uid=$name,ou=script,dc=$domain_name,dc=$domain_extension >> $ldif_file
echo uid: $name >> $ldif_file
echo cn: $name >> $ldif_file
echo givenName: $description >> $ldif_file
echo sn: $name-$uidNumber >> $ldif_file
echo objectClass: inetOrgPerson >> $ldif_file
echo objectClass: posixAccount >> $ldif_file
echo objectClass: top >> $ldif_file
echo loginShell: /bin/bash >> $ldif_file
echo uidNumber: $uidNumber >> $ldif_file
echo gidNumber: 1 >> $ldif_file
echo homeDirectory: /home/$name >> $ldif_file
# Generamos un contraseña con la encriptación SHA con el nombre pasado por el csv
echo userPassword: $(slappasswd -h {SHA} -s "$name") >> $ldif_file
echo "" >> $ldif_file
done < $csv_path ## Fichero csv
## Mensaje de información, donde se encuentra el archivo generado
dialog --clear \
--title "[ldif path]" \
--backtitle "$BACKTITLE" \
--msgbox "Archivo ldif generado en $HOME/add_users.ldif" 0 0
}
#################################################################
###### MAIN LOOP #######
#################################################################
while true; do
main
case $main_val in
0) exit_program "terminated" ;;
Nombre) input_admin_name
default_page=Nombre ;;
Servidor) input_domain_name
default_page=Servidor ;;
Extension) input_domain_extension
default_page=Extension ;;
OrigenCSV) csv_input
default_page=OrigenCSV ;;
Script) script_info
default_page=Script ;;
Salir) exit_program "terminated" ;;
esac
done
exit 0
| true |
9a8c518ce071b0bb2a585f8a5c9d128a3313980b | Shell | robinmoussu/config | /install.sh | UTF-8 | 2,529 | 3.4375 | 3 | [] | no_license |
INSTALL_PACKER=1
INSTALL_PACMAN=1
INSTALL_PACKER=1
if [ $UID -eq 0 ]
then
PACMAN="pacman"
else
PACMAN="pacman"
fi
declare -a INSTALL=(
"base base-devel wget"
"git"
"zsh vim"
"dialog wpa_supplicant"
"grub-efi-x86_64 os-prober mtools libisoburn efibootmgr dosfstools fuse freetype2"
"xorg-server xorg-server-utils xorg-apps xf86-video-intel"
"mdm"
"i3-wm i3status dmenu synapse"
"libreoffice firefox chromium"
"elinks links"
)
declare -a INSTALL_PACKER_PAQUET=(
"gdm"
"oh-my-zsh-git"
)
INSTALL_ALL=0
for i in ${!INSTALL[*]}
do
if [ $INSTALL_ALL -eq 0 ] && [ $INSTALL_PACMAN -eq 1 ]
then
echo "Installer : ${INSTALL[i]} ? (y/n/a/c)"
unset rep
while [ -z "$rep" ]; do
read rep
done
if [ $rep == "a" ]
then
INSTALL_ALL=1
elif [ $rep == "c" ]
then
unset INSTALL
INSTALL_PACMAN=0
elif [ $rep != "y" ]
then
unset INSTALL[i]
fi
fi
done
INSTALL_ALL_PACKER=0
for i in ${!INSTALL_PACKER_PAQUET[*]}
do
if [ $INSTALL_ALL_PACKER -eq 0 ] && [ $INSTALL_PACMAN -eq 1 ]
then
echo "Installer : ${INSTALL_PACKER_PAQUET[i]} ? (y/n/a/c)"
unset rep
while [ -z "$rep" ]; do
read rep
done
if [ $rep == "a" ]
then
INSTALL_ALL_PACKER=1
elif [ $rep == "c" ]
then
unset INSTALL_PACKER_PAQUET
INSTALL_PACKER=0
elif [ $rep != "y" ]
then
unset INSTALL_PACKER_PAQUET[i]
fi
fi
done
echo "Les pacquets suivant sont selectionnés :"
for i in ${!INSTALL[*]}
do
echo " ${INSTALL[i]}"
done
for i in ${!INSTALL_PACKER_PAQUET[*]}
do
echo " ${INSTALL_PACKER_PAQUET[i]}"
done
echo "Continuer ? (y/n)"
unset rep
while [ -z "$rep" ]; do
read rep
done
if [ $rep != "y" ]
then
exit
fi
echo "AUR : installer packer ? (y/n)"
unset rep
while [ -z "$rep" ]; do
read rep
done
if [ $rep != "y" ]
then
INSTALL_PACKER=1
else
INSTALL_PACKER=0
fi
if [[ $INSTALL_PACMAN ]]
then
$PACMAN -S --needed --noconfirm $INSTALL
fi
if [[ $INSTALL_PACKER ]]
then
mkdir tmp_install
cd tmp_install
wget https://aur.archlinux.org/packages/pa/packer/packer.tar.gz
tar -zxf packer.tar.gz
cd packer
makepkg -sf --asroot --noconfirm
pacman -U packer*.tar.xz --noconfirm
packer -Syu --noconfirm
cd ../..
rm -rf tmp_install
fi
if [[ $INSTALL_PACKER ]]
then
packer -S --noconfirm $INSTALL
fi
| true |
9efc60a3023a1226d4354ffff56eb02d185afc74 | Shell | petronny/aur3-mirror | /pyusb-svn/PKGBUILD | UTF-8 | 874 | 2.921875 | 3 | [] | no_license | # Maintainer: Tim Hatch <tim@timhatch.com>
# Contributor: Jason Giangrande <jgiangrande@clarku.edu>
pkgname=pyusb-svn
pkgver=svn
pkgrel=1
pkgdesc="A native Python module written in C which provides USB access. (SVN Version)"
arch=('i686' 'x86_64')
url="http://pyusb.berlios.de/"
license=('GPL')
depends=('python' 'libusb')
conflicts=('pyusb' 'pyusb10')
source=()
md5sums=()
_svntrunk="https://pyusb.svn.sourceforge.net/svnroot/pyusb/"
_svnmod="pyusb-svn"
build() {
cd ${srcdir}
if [ -d $_svnmod/.svn ]; then
(cd $_svnmod && svn up -r $pkgver)
else
svn co https://pyusb.svn.sourceforge.net/svnroot/pyusb $_svnmod
fi
cd ${srcdir}/${_svnmod}/trunk/
# python2 fix
for file in `find ./ -name "*.py"`; do
sed -i 's_#!/usr/bin/env python_#!/usr/bin/env python2_' $file
sed -i 's_#!/usr/bin/python_#!/usr/bin/python2_' $file
done
python2 setup.py install --root=${pkgdir} || return 1
}
| true |
dda8541dac20c2e239108ff00e35c2bba9eeca4f | Shell | BobDeng1974/repos | /debian-9-galcore-headers/version.sh | UTF-8 | 483 | 2.546875 | 3 | [] | no_license | #!/bin/bash -e
package_name="galcore-headers"
debian_pkg_name="${package_name}"
package_version="4.6.6.1381"
package_source=""
src_dir=""
git_repo=""
git_sha=""
reprepro_dir="g/${package_name}"
dl_path="pool/main/${reprepro_dir}/"
debian_version="${package_version}-0rcnee3"
debian_untar=""
debian_patch=""
jessie_version="~jessie+20180831"
stretch_version="~stretch+20180831"
buster_version="~buster+20180831"
xenial_version="~xenial+20180831"
bionic_version="~bionic+20180831"
| true |
aef7095b38aa6ad377e4549908c63499077c0aab | Shell | kissthink/ports | /network/bridge_utils/bridge_utils.SlackBuild | UTF-8 | 2,362 | 3.65625 | 4 | [] | no_license | #!/bin/sh
CWD=`pwd`
NAME=bridge_utils
VERSION=1.2
ARCH=i586
BUILD=1
PKGNAME=bridge-utils
PKGVER=$VERSION
BASEDIR=$PKGNAME-$PKGVER
ARCHIVE=$BASEDIR.tar.gz
REPOSITORY=http://belnet.dl.sourceforge.net/sourceforge/bridge
FLAGS="-O2 -march=pentium -mtune=pentium -fno-strength-reduce \
-fomit-frame-pointer -ffast-math"
PKG=/tmp/package-$NAME
rm -rf $PKG
mkdir -p $PKG
# Obtain sources
if [ ! -e $ARCHIVE ]; then
if `wget "$REPOSITORY/$ARCHIVE"`; then
true
else
exit 1
fi
fi
# Compile
cd /tmp
tar zxvf $CWD/$ARCHIVE
cd $BASEDIR
bzcat $CWD/$NAME-$VERSION-autoconf.patch.bz2 | patch -p1 -s
bzcat $CWD/$NAME-$VERSION-compile.patch.bz2 | patch -p1 -s
chmod 755 configure
CFLAGS=$FLAGS CXXFLAGS=$FLAGS ./configure --prefix=/usr --mandir=/usr/man
make
# Install
make install DESTDIR=$PKG
mkdir -p $PKG/install $PKG/usr/doc/$NAME-$VERSION
cat > $PKG/install/slack-desc <<EOF
# HOW TO EDIT THIS FILE:
# The "handy ruler" below makes it easier to edit a package description. Line
# up the first '|' above the ':' following the base package name, and the '|'
# on the right side marks the last column you can put a character in. You must
# make exactly 11 lines for the formatting to be correct. It's also
# customary to leave one space after the ':'.
|-----handy-ruler------------------------------------------------------|
bridge_utils: Network bridging utilities
bridge_utils:
bridge_utils: This package contains the the brctl(8) utility needed to control a
bridge_utils: Linux ethernet bridge. Usually two ethernet network cards on one
bridge_utils: machine are on different network segments. Bridging makes those
bridge_utils: two network segments appear as one. To use network bridging you
bridge_utils: will need to have bridging support compiled into the kernel.
bridge_utils:
bridge_utils:
bridge_utils:
bridge_utils:
EOF
cat > $PKG/install/slack-required <<EOF
sysfsutils >= 2.1.0-i486-1
EOF
install -m644 -g0 -o0 AUTHORS ChangeLog README THANKS TODO doc/FAQ \
doc/FIREWALL doc/HOWTO doc/PROJECTS doc/SMPNOTES doc/WISHLIST \
$PKG/usr/doc/$NAME-$VERSION
chmod 444 $PKG/usr/man/man?/*.?
gzip -9nf $PKG/usr/man/man?/*.?
strip $PKG/usr/sbin/* || :
chown -R root.root $PKG
# Make package
cd $PKG
cat install/slack-desc | grep "$NAME:" > /tmp/$NAME-$VERSION-$ARCH-$BUILD.txt
makepkg -l y -c n /tmp/$NAME-$VERSION-$ARCH-$BUILD.tgz
| true |
e65cee2b0141971f0ca491a6488c37dc6e0f6650 | Shell | Khanhvuong8939/project | /GNT-Forval/.svn/pristine/76/7658d1ef9421d43d8441291952ed49435211410c.svn-base | EUC-JP | 2,400 | 3.171875 | 3 | [] | no_license | #!/bin/bash
######################################################################
#ե̾FAIB000001.sh
#ǽ INޥ
# ʤ
# 0 1ٹ
# §
# 2006/03/18
#VER 1.00
# YYYY/MM/DD
# ѹƤ
######################################################################
################################################################################
# ̾ :ޥ
# :ʤ
# :ʤ
################################################################################
. ${AOBA_HOME}/batch/exec/env/aoba_env.env
#. ${AOBA_HOME}/batch/exec/env/aoba_rate_env.env
################################################################################
JOBID=FAIB000001
JOBNAME=INޥ
################################################################################
INFILE=${SQL}/init_truc_in.txt
EXEFILE=${SQL}/init_truc_in.sql
SPOOLFILE=${SPOOL}/${JOBID}.lst
################################################################################
# ե¸ߥå
# ե뤬¸ߤʤ硧touch
################################################################################
LOG_FILE=${BATCH_LOG}
if [[ ! -a ${LOG_FILE} ]]
then
touch ${LOG_FILE}
fi
if [[ ! -a ${SPOOLFILE} ]]
then
touch ${SPOOLFILE}
fi
################################################################################
touch $EXEFILE
rm $EXEFILE
while read line; do
echo "TRUNCATE TABLE "$line";" >> $EXEFILE
done < "$INFILE"
################################################################################
sqlplus ${ORALOGIN_BT}/${ORAPSWD_BT}@${ORACLE_SID_BT} <<EOF > /dev/null
SET NEWPAGE 0
SET PAGESIZE 0
SET FEEDBACK OFF
SET HEADING OFF
SET SPACE 0
SET TRIMS ON
SET LINESIZE 2000
SET ECHO OFF
SET TERMOUT OFF
SET VERIFY OFF
--
SPOOL ${SPOOLFILE}
@$EXEFILE
SPOOL off
--
EXIT
EOF
################################################################################
RET=$?
if [ ${RET_OK} -ne ${RET} ];
then
exit ${RET_NG}
fi
#SPOOLå
${COM}/put_oracle_error.sh ${SPOOLFILE} ${JOBID} ${PINT002}
RET=$?
if [ ${RET_OK} -ne ${RET} ];
then
exit ${RET_NG}
fi
#
${COM}/put_message.sh ${JOBID} ${PINT003} ${MSG002} ${JOBNAME}
exit ${RET_OK}
| true |
d17696ed633cb25a8d1690c1132aa56df250166f | Shell | ebrand0007/jazzybot_hardware_and_Arduino_code | /jazzy_base_arduino_code/jazzy_base_arduino-v2.x/catkin_jbot2_msg_ws/build.sh | UTF-8 | 385 | 2.59375 | 3 | [] | no_license | #!/bin/sh
SOURCE=/tmp/jbot/jazzybot_hardware_and_Arduino_code/jazzy_base_arduino_code/jazzy_base_arduino-v2.x/catkin_jbot2_msg_ws/src
DEST=/tmp/catkin_jbot2_msg_ws
rsync -av $SOURCE* $DEST/.
catkin_make clean
catkin_make
rm -rf install
catkin_make install
echo "Now:"
echo " cd install"
echo " . setup.bash"
echo " rosrun rosserial_arduino make_libraries.py ~/Arduino/libraries"
| true |
101a4bbf29e2cd0bf083059788390742a3954b28 | Shell | jchwenger/dataset.gutenberg-language | /download.sh | UTF-8 | 6,141 | 4.15625 | 4 | [] | no_license | #!/bin/bash
# Found here:
# https://www.exratione.com/2014/11/how-to-politely-download-all-english-language-text-format-files-from-project-gutenberg/
#
# Download the complete archive of text format files from Project Gutenberg.
#
# Estimated size in Q2 2014: 7G in zipfiles which unzip to about 21G in text
# files. So have 30G spare if you run this.
#
# Note that as written here this is a 36 to 48 hour process on a fast
# connection, with pauses between downloads. This minimizes impact on the
# Project Gutenberg servers.
#
# You'll only have to do this once, however, and this script will pick up from
# where it left off if it fails or is stopped.
#
# ------------------------------------------------------------------------
# Preliminaries
# ------------------------------------------------------------------------
if [ $# -lt 1 ]; then
echo "please choose a language, e.g. 'en', 'fr', etc."
exit 1
fi
set -o nounset
set -o errexit
# Restrict downloads to this file format.
FORMAT=txt
# Restrict downloads to this language.
LANG=$1
# The directory in which this file exists.
DIR="$( cd "$( dirname "$0" )" && pwd)"
# File containing the list of zipfile URLs.
ZIP_LIST="${DIR}/links-zipfile.txt"
# A subdirectory in which to store the zipfiles.
ZIP_DIR="${DIR}/zipfiles"
# A directory in which to store the unzipped files.
UNZIP_DIR="${DIR}/files"
mkdir -p "${ZIP_DIR}"
mkdir -p "${UNZIP_DIR}"
# ------------------------------------------------------------------------
# Obtain URLs to download.
# ------------------------------------------------------------------------
# This step downloads ~700 html files containing ~38,000 zip file links. This
# will take about 30 minutes.
echo "-------------------------------------------------------------------------"
echo "Harvesting zipfile URLs for format [$FORMAT] in language [$LANG]."
echo "-------------------------------------------------------------------------"
# Only do this if it hasn't been done already.
if [ ! -f "${ZIP_LIST}" ] ; then
echo "downloading list of zip files into '$ZIP_DIR'"
echo ""
# The --mirror mode of wget spiders through files listing links.
# The two second delay is to play nice and not get banned.
wget \
--continue \
--no-verbose \
--wait=0.5 \
--mirror \
"http://www.gutenberg.org/robot/harvest?filetypes[]=${FORMAT}&langs[]=${LANG}"
# Process the downloaded HTML link lists into a single sorted file of zipfile
# URLs, one per line.
grep -oh 'http://[a-zA-Z0-9./\-]*.zip' "${DIR}/www.gutenberg.org/robot/harvest"* | \
sort | \
uniq > "${ZIP_LIST}"
# Get rid of the downloaded harvest files now that we have what we want.
rm -Rf "${DIR}/www.gutenberg.org"
else
echo "${ZIP_LIST} already exists. Skipping harvest."
fi
# ------------------------------------------------------------------------
# Download the zipfiles.
# ------------------------------------------------------------------------
# This will take a while: 36 to 48 hours. Just let it run. Project Gutenberg is
# a non-profit with a noble goal, so don't crush their servers, and it isn't as
# though you'll need to do this more than once.
echo "-------------------------------------------------------------------------"
echo "Downloading zipfiles. Starting with utf-8 encoding."
echo "See here: file types, cf. here: https://www.gutenberg.org/files/"
echo "-------------------------------------------------------------------------"
echo "Starting with -0"
echo "-------------------------------------------------------------------------"
for URL in $(cat "${ZIP_LIST}"| grep '\-0')
do
echo "- ${URL##*/}"
ZIP_FILE="${ZIP_DIR}/${URL##*/}" # only the very end, e.g. '8224.zip'
# Only download it if it hasn't already been downloaded in a past run.
wget \
--no-verbose \
--continue \
--directory-prefix="${ZIP_DIR}" "${URL}"
# # Play nice with a delay.
# sleep 0.5
done
echo "-------------------------------------------------------------------------"
echo "Now to files with -8"
echo "-------------------------------------------------------------------------"
for URL in $(cat "${ZIP_LIST}"| grep '\-8')
do
echo $URL
ZIP_FILE="${ZIP_DIR}/${URL##*/}"
ZIP_FILE_0=${ZIP_FILE%-8*}-0.zip
exit 1
# Only download it if it hasn't already been downloaded in a past run.
if [ ! -f "${ZIP_FILE_0}" ] ; then
wget \
--no-verbose \
--continue \
--directory-prefix="${ZIP_DIR}" "${URL}"
# # Play nice with a delay.
# sleep 0.5
else
echo "A version of ${ZIP_FILE##*/} already exists. Skipping download."
fi
done
echo "-------------------------------------------------------------------------"
echo "Finally, files with neither -0 nor -8."
echo "-------------------------------------------------------------------------"
for URL in $(cat "${ZIP_LIST}"| grep -P '(?<!\-[80])\.zip')
do
echo $URL
ZIP_FILE="${ZIP_DIR}/${URL##*/}"
ZIP_FILE_0=${ZIP_FILE}-0.zip
ZIP_FILE_8=${ZIP_FILE}-8.zip
exit 1
# Only download it if it hasn't already been downloaded in a past run.
if [ ! -f "${ZIP_FILE_0}" ] && [ ! -f "${ZIP_FILE_8}" ]; then
wget \
--no-verbose \
--continue \
--directory-prefix="${ZIP_DIR}" "${URL}"
# # Play nice with a delay.
# sleep 0.5
else
echo "A version of ${ZIP_FILE##*/} already exists. Skipping download."
fi
done
# ------------------------------------------------------------------------
# Unzip the zipfiles.
# ------------------------------------------------------------------------
echo "-------------------------------------------------------------------------"
echo "Unzipping files."
echo "-------------------------------------------------------------------------"
for ZIP_FILE in $(find ${ZIP_DIR} -name '*.zip')
do
UNZIP_FILE=$(basename ${ZIP_FILE} .zip)
UNZIP_FILE="${UNZIP_DIR}/${UNZIP_FILE}.txt"
# Only unzip if not already unzipped. This check assumes that x.zip unzips to
# x.txt, which so far seems to be the case.
if [ ! -f "${UNZIP_FILE}" ] ; then
unzip -o "${ZIP_FILE}" -d "${UNZIP_DIR}"
else
echo "${ZIP_FILE##*/} already unzipped. Skipping."
fi
done
| true |
4b9c31ae285f2e60aabcf4d4420040e454b3e509 | Shell | airavata-courses/PixelGram | /OpenShift/process_files.sh | UTF-8 | 922 | 3.28125 | 3 | [] | no_license | if (($# <5))
then
echo "Usage : $0 <DOCKER_PROJECT_NAME> <APP_NAME> <IMAGE_TAG> <directory containing k8s files> <TIMESTAMP> <PORT/TARGEt PORT> <POD REPLICA COUNT>"
exit 1
fi
PROJECT_NAME=$1
APP_NAME=$2
IMAGE=$3
WORK_DIR=$4
TIMESTAMP=$5
PORT=$6
REPLICA=$7
main(){
find $WORK_DIR -name "*.yaml" -type f -exec sed -i.bak1 's#__PROJECT_NAME__#'$PROJECT_NAME'#g' {} \;
find $WORK_DIR -name "*.yaml" -type f -exec sed -i.bak2 's#__APP_NAME__#'$APP_NAME'#g' {} \;
find $WORK_DIR -name "*.yaml" -type f -exec sed -i.bak3 's#__IMAGE__#'$IMAGE'#g' {} \;
find $WORK_DIR -name "*.yaml" -type f -exec sed -i.bak4 's#__TIMESTAMP__#'$TIMESTAMP'#' {} \;
find $WORK_DIR -name "*.yaml" -type f -exec sed -i.bak5 's#__PORT__#'$PORT'#' {} \;
find $WORK_DIR -name "*.yaml" -type f -exec sed -i.bak6 's#__TARGET_PORT__#'$PORT'#' {} \;
find $WORK_DIR -name "*.yaml" -type f -exec sed -i.bak7 's#__REPLICA__#'$REPLICA'#' {} \;
}
main | true |
7a2a97d6e56c300aa421fb4f63531257b2e6b771 | Shell | isabella232/lsf-workbench | /ood-setup.sh | UTF-8 | 719 | 2.78125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Disable SELinux
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux
sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config
# Add user to system and apache basic auth
groupadd ood
useradd --create-home --gid ood ood
echo -n "ood" | passwd --stdin ood
scl enable httpd24 -- htpasswd -b -c /opt/rh/httpd24/root/etc/httpd/.htpasswd ood ood
# Set basic auth for ood
cat >> /etc/ood/config/ood_portal.yml << EOODF
auth:
- 'AuthType Basic'
- 'AuthBasicProvider file'
- 'AuthUserFile /opt/rh/httpd24/root/etc/httpd/.htpasswd'
- 'AuthName "Welcome Message."'
- 'Require valid-user'
EOODF
# Misc
mkdir -p /etc/ood/config/clusters.d
mkdir -p /etc/ood/config/apps/shell
| true |
3099d5f8dee869de75b4ece37091dac627aa485f | Shell | stant/docker-jenkins | /build.sh | UTF-8 | 2,406 | 3.609375 | 4 | [] | no_license | #!/bin/bash
# Written by: Stan Towianski - Oct 2016
echo ""
echo -n "Jenkins version to install 1) 1.651.3, 2) 2.7.4 3) [2.19.2] : "
read ans
if [ "$ans" = "1" ];
then
JENKINS_VERSION="1.651.3"
elif [ "$ans" = "2" ];
then
JENKINS_VERSION="2.7.4"
elif [ "$ans" = "3" ] || [ "$ans" = "" ];
then
JENKINS_VERSION="2.19.2"
else
JENKINS_VERSION="$ans"
fi
echo ""
echo -n "base docker image to use 1) [centos], 2) revive/scientific-linux : "
read docker_base_image
if [ "$docker_base_image" = "1" ] || [ "$docker_base_image" = "centos" ] || [ "$docker_base_image" = "" ];
then
docker_base_image="centos"
dockerfile="./Dockerfile.centos"
NEW_IMAGE_NAME=jm-centos:1.0
elif [ "$docker_base_image" = "2" ] || [ "$docker_base_image" = "scientific" ];
then
docker_base_image="revive/scientific-linux"
dockerfile="./Dockerfile.scientific"
NEW_IMAGE_NAME=jm-scientific:1.0
else
cp ./Dockerfile.centos ./Dockerfile.other
sed -i -e "s/FROM centos.*/From \\$docker_base_image/g" ./Dockerfile.other
dockerfile="./Dockerfile.other"
NEW_IMAGE_NAME=jm-other
fi
echo ""
echo -n "Run as daemon N, [Y] ? "
read ans
if [ "$ans" = "N" ] || [ "$ans" = "n" ];
then
daemon_flag=""
else
daemon_flag="-d"
fi
echo ""
echo -n "Start Jenkins when done N, [Y] ? "
read ans
if [ "$ans" = "N" ] || [ "$ans" = "n" ];
then
start_flag="N"
else
start_flag="Y"
fi
TIMEZONE="America/Detroit"
host_jenkins_home=/encrypt/data/jenkins_home
### do Build ###
echo ""
docker build -t $NEW_IMAGE_NAME --build-arg JENKINS_VERSION=$JENKINS_VERSION --build-arg TIMEZONE=$TIMEZONE -f $dockerfile .
docker images
### Create start script ###
echo "#!/bin/bash" > start-docker-jenkins.sh
echo "# Written by: Stan Towianski - Oct 2016" >> start-docker-jenkins.sh
echo "/usr/bin/docker run $daemon_flag -p 8080:8080 --sysctl net.ipv6.conf.all.disable_ipv6=1 -v $host_jenkins_home:/var/jenkins_home --env JENKINS_OPTS="--prefix=/jenkins" --name jm $NEW_IMAGE_NAME" >> start-docker-jenkins.sh
cat start-docker-jenkins.sh
### Create kill and rmi script ###
echo "#!/bin/bash" > kill-rmi.sh
echo "# Written by: Stan Towianski - Oct 2016" >> kill-rmi.sh
echo "docker images" >> kill-rmi.sh
echo "docker stop jm" >> kill-rmi.sh
echo "docker rm jm" >> kill-rmi.sh
echo "docker rmi $NEW_IMAGE_NAME" >> kill-rmi.sh
echo "docker images" >> kill-rmi.sh
### Start Jenkins ? ###
if [ "$start_flag" = "Y" ];
then
echo "Will start Jenkins"
./start-docker-jenkins.sh
fi
| true |
2554ab1b4bb2f1d330a9df05485f45158228dc3d | Shell | kdave/xfstests | /tests/overlay/046 | UTF-8 | 6,580 | 3.375 | 3 | [] | no_license | #! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2018 Huawei. All Rights Reserved.
#
# FS QA Test No. 046
#
# Test fsck.overlay how to deal with redirect xattr in overlayfs.
#
. ./common/preamble
_begin_fstest auto quick fsck
# Import common functions.
. ./common/filter
. ./common/attr
# real QA test starts here
_supported_fs overlay
_require_scratch_nocheck
_require_attrs trusted
_require_command "$FSCK_OVERLAY_PROG" fsck.overlay
# remove all files from previous tests
_scratch_mkfs
OVL_XATTR_OPAQUE_VAL=y
OVL_XATTR_IMPURE_VAL=y
# Create impure directories
make_impure_dir()
{
for dir in $*; do
mkdir -p $dir
$SETFATTR_PROG -n $OVL_XATTR_IMPURE -v $OVL_XATTR_IMPURE_VAL $dir
done
}
# Create a redirect directory
make_redirect_dir()
{
local target=$1
local value=$2
mkdir -p $target
$SETFATTR_PROG -n $OVL_XATTR_REDIRECT -v $value $target
}
# Check redirect xattr
check_redirect()
{
local target=$1
local expect=$2
value=$(_getfattr --absolute-names --only-values -n \
$OVL_XATTR_REDIRECT $target)
[[ "$value" == "$expect" ]] || echo "Redirect xattr incorrect"
}
check_no_redirect()
{
local target=$1
value=$(_getfattr --absolute-names -d -m \
$OVL_XATTR_REDIRECT $target)
[[ -z "$value" ]] || echo "Redirect xattr not empty"
}
# Check opaque xattr
check_opaque()
{
local target=$1
value=$(_getfattr --absolute-names --only-values -n \
$OVL_XATTR_OPAQUE $target)
[[ "$value" == "$OVL_XATTR_OPAQUE_VAL" ]] || \
echo "Opaque xattr incorrect"
}
# Create a whiteout
make_whiteout()
{
for arg in $*; do
mknod $arg c 0 0
done
}
# Check whiteout
check_whiteout()
{
for arg in $*; do
local ttype=`stat -c "%F:%t,%T" $arg`
[[ "$ttype" == "character special file:0,0" ]] || \
echo "Valid whiteout removed incorrectly"
done
}
# Create test directories
lowerdir=$OVL_BASE_SCRATCH_MNT/lower
lowerdir2=$OVL_BASE_SCRATCH_MNT/lower2
upperdir=$OVL_BASE_SCRATCH_MNT/upper
workdir=$OVL_BASE_SCRATCH_MNT/workdir
make_test_dirs()
{
rm -rf $lowerdir $lowerdir2 $upperdir $workdir
mkdir -p $lowerdir $lowerdir2 $upperdir $workdir
}
# Test invalid redirect xattr point to a nonexistent origin, should remove
echo "+ Invalid redirect"
make_test_dirs
make_redirect_dir $upperdir/testdir "invalid"
_overlay_fsck_expect $FSCK_NONDESTRUCT $lowerdir $upperdir $workdir -p
check_no_redirect $upperdir/testdir
# Test invalid redirect xattr point to a file origin, should remove
echo "+ Invalid redirect(2)"
make_test_dirs
touch $lowerdir/origin
make_redirect_dir $upperdir/testdir "origin"
_overlay_fsck_expect $FSCK_NONDESTRUCT $lowerdir $upperdir $workdir -p
check_no_redirect $upperdir/testdir
# Test valid redirect xattr point to a directory origin in the same directory,
# should not remove
echo "+ Valid redirect"
make_test_dirs
mkdir $lowerdir/origin
make_whiteout $upperdir/origin
make_redirect_dir $upperdir/testdir "origin"
make_impure_dir $upperdir
_overlay_fsck_expect $FSCK_OK $lowerdir $upperdir $workdir -p
check_redirect $upperdir/testdir "origin"
# Test valid redirect xattr point to a directory origin in different directories
# should not remove
echo "+ Valid redirect(2)"
make_test_dirs
mkdir $lowerdir/origin
make_whiteout $upperdir/origin
make_redirect_dir $upperdir/testdir1/testdir2 "/origin"
make_impure_dir $upperdir/testdir1
_overlay_fsck_expect $FSCK_OK $lowerdir $upperdir $workdir -p
check_redirect $upperdir/testdir1/testdir2 "/origin"
# Test valid redirect xattr but missing whiteout to cover lower target,
# should fix whiteout
echo "+ Missing whiteout"
make_test_dirs
mkdir $lowerdir/origin
make_redirect_dir $upperdir/testdir "origin"
_overlay_fsck_expect $FSCK_NONDESTRUCT $lowerdir $upperdir $workdir -p
check_redirect $upperdir/testdir "origin"
check_whiteout $upperdir/origin
# Test valid redirect xattrs exchanged by rename, should not remove
echo "+ Valid redirect(3)"
make_test_dirs
mkdir $lowerdir/{testdir1,testdir2}
make_redirect_dir $upperdir/testdir1 "testdir2"
make_redirect_dir $upperdir/testdir2 "testdir1"
make_impure_dir $upperdir
_overlay_fsck_expect $FSCK_OK $lowerdir $upperdir $workdir -p
check_redirect $upperdir/testdir1 "testdir2"
check_redirect $upperdir/testdir2 "testdir1"
# Test invalid redirect xattr with lower same name directory exists,
# should remove invalid redirect xattr and set opaque in yes mode
echo "+ Invalid redirect(3)"
make_test_dirs
mkdir $lowerdir/testdir
make_redirect_dir $upperdir/testdir "invalid"
# Question get yes answer: Should set opaque dir ?
_overlay_fsck_expect $FSCK_NONDESTRUCT $lowerdir $upperdir $workdir -y
check_no_redirect $upperdir/testdir
check_opaque $upperdir/testdir
# Test duplicate redirect xattrs point to one origin, should fail in
# auto mode, and should remove either of the duplicates in yes mode
echo "+ Duplicate redirect"
make_test_dirs
mkdir $lowerdir2/origin
make_redirect_dir $lowerdir/testdir1 "origin"
make_redirect_dir $lowerdir/testdir2 "origin"
make_redirect_dir $upperdir/testdir3 "origin"
_overlay_fsck_expect $FSCK_UNCORRECTED "$lowerdir:$lowerdir2" $upperdir $workdir -p
# Question get yes answer: Duplicate redirect directory, remove xattr ?
_overlay_fsck_expect $FSCK_NONDESTRUCT "$lowerdir:$lowerdir2" $upperdir $workdir -y
redirect_1=`check_redirect $lowerdir/testdir1 "origin" 2>/dev/null`
redirect_2=`check_redirect $lowerdir/testdir2 "origin" 2>/dev/null`
[[ $redirect_1 == $redirect_2 ]] && echo "Redirect xattr incorrect"
check_no_redirect $upperdir/testdir3
# Test duplicate redirect xattr duplicate with merge directory, should
# fail in auto mode, and should remove the redirect xattr in yes mode
echo "+ Duplicate redirect(2)"
make_test_dirs
mkdir $lowerdir/origin $upperdir/origin
make_redirect_dir $upperdir/testdir "origin"
_overlay_fsck_expect $FSCK_UNCORRECTED $lowerdir $upperdir $workdir -p
# Question get yes answer: Duplicate redirect directory, remove xattr ?
_overlay_fsck_expect $FSCK_NONDESTRUCT $lowerdir $upperdir $workdir -y
check_no_redirect $upperdir/testdir
# Test duplicate redirect xattr with lower same name directory exists,
# should remove the duplicate redirect xattr and set opaque in yes mode
echo "+ Duplicate redirect(3)"
make_test_dirs
mkdir $lowerdir/{origin,testdir} $upperdir/origin
make_redirect_dir $upperdir/testdir "invalid"
# Question one get yes answer: Duplicate redirect directory, remove xattr?
# Question two get yes answer: Should set opaque dir ?
_overlay_fsck_expect $FSCK_NONDESTRUCT $lowerdir $upperdir $workdir -y
check_no_redirect $upperdir/testdir
check_opaque $upperdir/testdir
# success, all done
status=0
exit
| true |
dd555c8e7759c82719a7f34431f683536f6743e4 | Shell | roncterry/doaway | /doaway/bin/mklablist | UTF-8 | 3,590 | 3.75 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
#============================================================================
# mklablist
#
# Version = 0.2.0
# Date = 2015-09-01
#
# License (New and Simplified BSD):
# ---------------------------------
# Copyright (c) 2010, Ron Terry
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Ron Terry nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# All elements of the doaway.sh embedded payload are governed by the above
# licensing restrictions, unless explicitely stated otherwise.
#===============================================================================
#
# Maintainer(s) = Ron Terry - roncterry (at) gmail (dot) com
#
# The latest version can be found at:
#
# http://pronetworkconsulting.com/linux/scripts/deployit.html
#
# Description:
# This script generates a host list of machines, populates the
# ~/.ssh/known_hosts file with their host keys and then generates a
# MAC list file for those hosts.
# It calls the mkhostlist, populate-known_hosts and
# mkmaclist-from-hosts commands. It accepts the same options as the
# mkhostlist command.
#
#============================================================================
################################################################################
# Read config files and set variables
################################################################################
CONFIG=/etc/doaway.conf
if [ -e ${CONFIG} ]
then
. ${CONFIG}|
else
SCRIPT_PATH="/usr/bin"
fi
################################################################################
# Script Functions
################################################################################
mklablist_usage() {
echo "USAGE: ${0} [options]"
echo
echo " For options see mkhostlist -h"
}
################################################################################
# Main Code Body
################################################################################
case ${1} in
-h|--help)
echo
mklablist_usage
echo
exit
;;
esac
${SCRIPT_PATH}/mkhostlist ${*}
${SCRIPT_PATH}/populate-known_hosts
${SCRIPT_PATH}/mkmaclist-from-hosts
| true |
a0a5fd1a8066e2020487555d9fce2e9b89a82889 | Shell | huthanh89/iot-security-game-production | /build/package.sh | UTF-8 | 1,003 | 3.71875 | 4 | [] | no_license | #!/bin/bash
# packages up game-server and uploads it
# increment build version
version=`cat VERSION`
version=$((version + 1))
echo $version > VERSION
# create the build
echo "making build $version"
cp -r ../game-server game-server
rm game-server/autosave.json
rm game-server/saves/*
cp VERSION game-server/
zip -r archive/game-server-$version.zip game-server
# upload it and link to latest
echo -n Password:
read -s PASSWORD
sshpass -p $PASSWORD scp archive/game-server-$version.zip vitapoly@45.33.41.65:/var/www/html/iotsec
sshpass -p $PASSWORD ssh -t vitapoly@45.33.41.65 rm /var/www/html/iotsec/game-server-latest.zip
sshpass -p $PASSWORD ssh -t vitapoly@45.33.41.65 ln -s /var/www/html/iotsec/game-server-$version.zip /var/www/html/iotsec/game-server-latest.zip
# clean up
rm -r game-server
# write VERSION to JS Game Server
echo "VERSION='$version';" > ../game-server/html/VERSION.js
echo $version built
echo tagging build
cd ../
git tag -a build_$version -m "Dev Build Version $version"
| true |
4e2f56cf5e682cbaee9b45df3af49b876ed527bf | Shell | lionelstellar/CWorkSpace | /build_project.sh | UTF-8 | 1,055 | 3.171875 | 3 | [] | no_license | #!/bin/bash
PROJECT=$1
CURDIR=`pwd`/${PROJECT}/
# This expects that this is place as a first level folder relative to the other
# OP-TEE folder in a setup using default repo configuration as described by the
# documentation in optee_os (README.md)
OPTEE_HOME=/home/jiangyikun/qemu_optee
# ROOT=`dirname $ROOT`
# Path to the toolchain
export PATH=${OPTEE_HOME}/toolchains/aarch32/bin:$PATH
# Path to the TA-DEV-KIT coming from optee_os
export TA_DEV_KIT_DIR=${OPTEE_HOME}/optee_os/out/arm/export-ta_arm32
# Path to the client library (GP Client API)
export TEEC_EXPORT=${OPTEE_HOME}/optee_client/out/export
export PLATFORM=vexpress-qemu_virt
#export PLATFORM_FLAVOR=qemu_virt
# Toolchain prefix for user space code (normal world)
HOST_CROSS_COMPILE=arm-linux-gnueabihf-
# Build the host application
cd $CURDIR/host
make CROSS_COMPILE=$HOST_CROSS_COMPILE $@
# Toolchain prefix for the Trusted Applications
TA_CROSS_COMPILE=arm-linux-gnueabihf-
# Build the Trusted Application
cd $CURDIR/ta
make O=$CURDIR/ta/out CROSS_COMPILE=$TA_CROSS_COMPILE $@ | true |
c8c4cc35da95a0cb7a2417ae75197d62a2692d64 | Shell | gaston-quispe/tpsisop | /__scripts/movep.sh | UTF-8 | 3,550 | 3.75 | 4 | [] | no_license | #/bin/bash
#
#script que se encarga de mover los archivos
#
#parametro 1 origen del fichero a mover
#parametro 2 destino del fichero a mover
#parametro 3 comando que lo invoca
#$1 es el comando
#$2 es el mensaje
#$3 es el tipo de error
function grabarLog {
tipoError=$3
if [ -z "$tipoError" ]; then
tipoError="INFO"
fi
#caso especifico del inited
if [ "$1" == "initep" ];then
logep "$1" "$2" "$tipoError" "0" "$GRUPO/dirconf/"
else
logep.sh "$1" "$2" "$tipoError" "0"
fi
}
#$1 archivo con path completo
#$2 comando invocador
function obtenerSecuencia {
pathArchivo=$1
dirArchivo=$(dirname $pathArchivo)/
archivo=${pathArchivo##*/}
maxCantArchDupl=999
rango=2
cantArchivos=`ls $dirArchivo | grep -c "^${archivo}[.][1-9]\([0-9]\)\?\{$rango\}$"`
let cantArchivos=$cantArchivos+1
if [ $cantArchivos -gt $maxCantArchDupl ];then
#loguear Error
grabarLog "$2" "no se pudo mover porque el archivo $1 supero la secuencia maxima permitida de $maxCantArchDupl" "WAR"
archivo=""
else
archivo=$archivo.$cantArchivos
archivo=$dirArchivo$archivo
fi
echo $archivo
}
#$1 archivo origen
#$2 directorio destino
#$3 comando que lo invoca
function main {
archivo=$1
dirOrigen=$(dirname $archivo)/
dirDestino=$2
terminacionBarra="^.*/$"
#if [ $# -lt 2 ] | [ $# -gt 3 ];then
# #loguear mensaje
# echo "cantidad de paramtros incorrectos"
# exit -1
#fi
comandos=(demonep listep initep procep)
if [ -z "$3" ];then
comando=movep
else
for i in ${comandos[*]}
do
if [ $i == $3 ];then
comando=$3
break
else
comando=movep
fi
done
fi
if ! [ -f $archivo ];then
#loguear mensaje
grabarLog "$comando" "no se pudo mover debido a que : $archivo no es un archivo " "WAR"
exit -2
fi
if ! [ -d $dirDestino ];then
grabarLog "$comando" "no se pudo mover debido a que : $dirDestino no es un directorio " "WAR"
#loguear mensaje
exit -3
fi
if ! [[ $dirDestino =~ $terminacionBarra ]];then
dirDestino=$dirDestino/
fi
empiezaDelHome="^/.*/$"
path=${PWD}
if ! [[ $dirDestino =~ $empiezaDelHome ]];then
#se tiene que agregar la ruta completa
if [ $dirDestino == "./" ];then
dirDestino=$path/
else
dirDestino=$path/$dirDestino
fi
fi
if ! [[ $dirOrigen =~ $empiezaDelHome ]];then
#se tiene que agregar la ruta completa
if [ $dirOrigen == "./" ];then
dirOrigen=$path/
else
dirOrigen=$path/$dirOrigen
fi
fi
archivoDestino=${archivo##*/}
archivoDestinoDuplicado=$archivoDestino
archivoDestino=$dirDestino$archivoDestino
dirDestinoDuplicado=${dirDestino}dpl/
archivoDestinoDuplicado=$dirDestinoDuplicado$archivoDestinoDuplicado
if [ "$dirDestino" == "$dirOrigen" ]; then
grabarLog "$comando" "no se pudo mover debido a que : el directorio destino y el directorio origen son el mismo " "WAR"
#loguer mensaje
exit -4
fi
if ! [ -f $archivoDestino ];then
grabarLog "$comando" "el archivo $archivo se movio satisfactoriamente a $dirDestino " "INFO"
mv $archivo $archivoDestino
else
if ! [ -d $dirDestinoDuplicado ];then
mkdir $dirDestinoDuplicado
fi
archivoDestinoDuplicado=$(obtenerSecuencia $archivoDestinoDuplicado $comando)
if [ -z $archivoDestinoDuplicado ];then
exit -5
fi
grabarLog "$comando" "el archivo se encuentra duplicado en $dirDestino, y se movio a $dirDestinoDuplicado" "INFO"
# echo "-----------------"
# echo "archivo destino $archivoDestinoDuplicado"
# echo "archivo origen $archivo"
# echo "-----------------"
mv $archivo $archivoDestinoDuplicado
fi
}
main "$1" "$2" "$3"
| true |
f45bfbab961521d56c91e0fd2acc1481a5421cb7 | Shell | dan-f/concurrent-wc | /wc-bash/wc-bash | UTF-8 | 637 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
WC_TMP_DIR='/tmp/concurrent-wc'
WC_TIME_FILE="${WC_TMP_DIR}/wc-bash-time"
mkdir -p "${WC_TMP_DIR}"
if [[ -n "${1}" ]]; then
DIR="${1}"
else
DIR="$(pwd)"
fi
WC_CMD="find ${DIR} -maxdepth 1 -type f | xargs -P 10 -I '{}' wc -l '{}'"
{ time eval "${WC_CMD}"; } 2>"${WC_TIME_FILE}"
python3 -c "
import re
real_time_line = ''
with open('${WC_TIME_FILE}') as f:
real_time_line = [line for line in f][1]
regex = '^real\s+(\d+)m(\d+\.\d+)s$'
match = re.search(regex, real_time_line)
mins, secs = match.groups()
elapsed_ms = int((float(mins) * 60 * 1000) + (float(secs) * 1000))
print('Took {}ms'.format(elapsed_ms))
"
| true |
19c1d21f8d51b7fb57d7f57fd3fa5747b60a0c22 | Shell | noxifoxi/minecraft-server-backup | /autostart.sh | UTF-8 | 1,596 | 3.90625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# ======== #
# Config #
# ======== #
cd /home/minecraft/ # change working directory where the scripts are located
# don't execute script if backup is in progress (the -i argument ignores this)
[[ -f backup.lock && "$1" != "-i" ]] && exit 0
server_jar="fabric-server-launch.jar" # server jar to monitor
screen_name="minecraft" # name of the screen the server is running
dir="." # cd the screen session in this directory ("." if no change)
disable_joining=0 # EXPERIMENTAL: disable joining for <amount> seconds by renaming the whitelist (includes the time the server needs to start, 0 to disable this feature)
start_script="start.sh"
send_screen(){
screen -S $screen_name -p 0 -X stuff "$1^M"
}
# =========== #
# Autostart #
# =========== #
# cancel script if autostart.sh is already running
for pid in $(pidof -x autostart.sh); do
if [ $pid != $$ ]; then
exit 0
fi
done
# check if minecraft server is not running
if ! pgrep -a java | grep -q $server_jar; then
# check if screen session does not exists
if ! screen -list | grep -q $screen_name; then
# create screen
screen -dmS $screen_name
# wait for the screen session to get created?
sleep 2
# change directory
send_screen "cd $dir"
fi
# disable joining if feature is enabled
[ $disable_joining -gt 0 ] && mv -f whitelist.json whitelist.json.bak
# start server
send_screen "./$start_script"
# re-enable joining if feature is enabled
if [ $disable_joining -gt 0 ]; then
sleep $disable_joining
mv -f whitelist.json.bak whitelist.json
send_screen "whitelist reload"
fi
fi
| true |
63c8c26cf6b94330ce89bcd6a3fd9a11c2213b07 | Shell | Kadinvanvalin/dotfiles | /.local/bin/ticket | UTF-8 | 178 | 3.59375 | 4 | [] | no_license | #!/bin/sh
function ticket() {
branch=$(git rev-parse --abbrev-ref HEAD)
branch_to_upper=$(echo "$branch" | tr '[:lower:]' '[:upper:]')
echo ${branch_to_upper#MOB/}
}
ticket | true |
faa215b37f3405ceb0653a5106033673bb312b37 | Shell | KNMI/adaguc-server | /Docker/adaguc-server-chkconfig.sh | UTF-8 | 1,243 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | DOCKER_ADAGUC_PATH=/adaguc/adaguc-server-master
DOCKER_ADAGUC_CONFIG=/adaguc/adaguc-server-config.xml
THISSCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
### Check if ADAGUC_PATH is set externally, if not set it to default ###
if [ ! -f "${ADAGUC_PATH}/bin/adagucserver" ]; then
export ADAGUC_PATH=${DOCKER_ADAGUC_PATH}
fi
### Check if we can find adaguc executable at default location, otherwise try it from this location ###
if [ ! -f "${ADAGUC_PATH}/bin/adagucserver" ]; then
export ADAGUC_PATH=${THISSCRIPTDIR}/../
fi
### Check if we could find adaguc executable ###
if [ ! -f "${ADAGUC_PATH}/bin/adagucserver" ]; then
>&2 echo "No adagucserver executable found in path ADAGUC_PATH/bin/adagucserver [${ADAGUC_PATH}/bin/adagucserver] "
exit 1
fi
### Check configuratiion file location ###
if [ ! -f "${ADAGUC_CONFIG}" ]; then
export ADAGUC_CONFIG=${DOCKER_ADAGUC_CONFIG}
fi
### Checks if configuration file exists
if [ ! -f "${ADAGUC_CONFIG}" ]; then
>&2 echo "No configuration file found ADAGUC_CONFIG variable [${ADAGUC_CONFIG}] "
exit 1
fi
echo "Using adagucserver from ${ADAGUC_PATH}"
echo "Using config from ${ADAGUC_CONFIG}"
export ADAGUC_TMP=/tmp
export ADAGUC_ONLINERESOURCE=""
| true |
c89708de1691be7bb244ecbd262d3bd527051fb8 | Shell | gwimm/- | /cfg/bash/prompt.bash | UTF-8 | 2,336 | 3.640625 | 4 | [] | no_license | # bash
ps_color::fg() {
local esc_open="\[" esc_close="\]" col=$1
case "$col" in
black|none|NONE) col=0 ;;
red) col=1 ;;
green) col=2 ;;
yellow) col=3 ;;
blue) col=4 ;;
magenta) col=5 ;;
cyan) col=6 ;;
reset|white) col=7 ;;
esac
printf "${esc_open}\\e[3${col}m${esc_close}"
}
ps_pwd_dev() {
while read -r; do
set -- $REPLY
local d="$1" mp="$2" l c ind
[[ $PWD == $mp* ]] || continue
c="$(ps_color::fg yellow)"
l="$(blkid -o value -s LABEL $d)"
ind="${1/\/dev\//}"
[[ $l ]] && {
ind+="[$l]"
} || {
ind+="[$(ps_cwd::col "${mp}" $c)]"
}
[[ $PWD == $mp ]] && {
printf "${c}${ind} "
return
}
printf "${c}${ind} $(ps_cwd "${PWD/${mp}\//}" $c) "
return
done <<< "$(<'/proc/mounts' grep -E '^/dev/sd.*')"
}
ps_cwd::slash() {
local d p="$1"
for d in ./*; do
[[ -d $d ]] || continue
p+="/"
break
done
printf "$p"
}
ps_cwd::col() {
local p="$1" c="$2"
printf "${c}${p//\//$(ps_color::fg reset)\/${c}}"
}
ps_cwd() {
local p="$1" c="$2"
p="$(ps_cwd::slash "$p")"
ps_cwd::col "$p" "$c"
}
ps_pwd() {
in_dev=$(ps_pwd_dev)
[[ $in_dev ]] && {
printf "$in_dev"
return
}
[[ $DIRS ]] || DIRS="/ red /"$'\n'
while read -r; do
set -- $REPLY
local CWD d="$1" col="$(ps_color::fg $2)" rs="$3"
case "$PWD" in
$d)
printf "${col}${rs} "
return
;;
$d*)
CWD+="${PWD/$d/$rs}"
printf "$(ps_cwd "$CWD" $col) "
return
;;
esac
done <<< "$DIRS"
}
ps_vcs() {
if git diff --shortstat HEAD &>/dev/null; then
[[ $(git diff --shortstat HEAD 2>/dev/null) ]] && {
printf "$(ps_color::fg red)± "
return
}
printf "± "
fi
}
ps_battery() {
local col cap bat stat
bat="/sys/class/power_supply/BAT1"
cap="$(< ${bat}/capacity)"
stat="$(< ${bat}/status)"
case "$stat" in
Full|Unknown) return ;;
Discharging) col=$(ps_color::fg red) ;;
Charging) col=$(ps_color::fg green) ;;
esac
printf "${col}${cap} "
}
ps_exit() {
local ind="!"
case "$EXIT_CODE" in
0) return ;;
1) ;;
*) ind="$EXIT_CODE${ind}"
esac
printf "$(ps_color::fg red)${ind} "
}
ps_init() {
EXIT_CODE="$?"
unset PS1; for component in "battery" "exit" "pwd" "vcs"; {
PS1+="$(ps_$component)$(ps_color::fg reset)"
}
PS1+="> "
}
DIRS+="$HOME/ green ~/"$'\n'
DIRS+="$HOME green ~"$'\n'
DIRS+="/ red /"$'\n'
PROMPT_COMMAND=ps_init
| true |
b74fa83d4705d2d22f84ccad07cc21c5170eca29 | Shell | traveling-soul/python | /shell_program/operation.sh | UTF-8 | 267 | 2.734375 | 3 | [] | no_license | #!/bin/bash
#输出13
expr 10 + 3
#输出字符串10+3
expr 10+3
expr 10 - 3
#转义
expr 10 \* 3
expr 10 / 3
expr 10 % 3
#将计算结果赋值给变量
num1=$(expr 10 % 3)
#将计算结果赋值给变量
num2=`expr 10 % 3`
echo num1: $num1
echo num2: $num2
| true |
c216cb37c791e33d38be9758d70c2c3f75bc0cb3 | Shell | yabinmeng/dseutilities | /scripts/bash/specific_purpose/tpc_test/bin/cpdseconf679.sh | UTF-8 | 1,416 | 3.546875 | 4 | [
"MIT"
] | permissive | #! /bin/bash
DSECONF_BKUP_DIR=~/conf_bkup/dse
BACKUP_DSE_VER=6.7.9
usageExit() {
echo "cpdseconf.sh [002|003] [default|pd-std|pd-ssd|local-ssd] [aio|noaio]"
exit
}
if [[ $# != 2 && $# != 3 ]]; then
usageExit
fi
NODE_NAME=""
if [[ "$1" == "002" ]]; then
NODE_NAME="node002"
elif [[ "$1" == "003" ]]; then
NODE_NAME="node003"
else
usageExit
fi
SUBDIR=""
if [[ "$2" == "default" ]]; then
SUBDIR="default"
elif [[ "$2" == "pd-std" ]]; then
SUBDIR="pd-std"
elif [[ "$2" == "pd-ssd" ]]; then
SUBDIR="pd-std"
elif [[ "$2" == "local-ssd" ]]; then
SUBDIR="local_ssd"
else
usageExit
fi
if [[ "$2" != "default" ]]; then
if [[ "$3" == "noaio" ]]; then
SUBDIR="$SUBDIR""/noaio"
elif [[ "$3" == "aio" ]]; then
SUBDIR="$SUBDIR""/aio"
else
usageExit
fi
fi
CURRUN_DSE_VER=$(dse -v)
if [[ "$BACKUP_DSE_VER" != "$CURRUN_DSE_VER" ]]; then
echo "Copying wrong version of DSE configuraiton files (currently running DSE version: $CURRUN_DSE_VER; backup DSE version: $BACKUP_DSE_VER)"
exit
fi
sudo cp $DSECONF_BKUP_DIR/$NODE_NAME/$BACKUP_DSE_VER/jvm.options $DSE_HOME/resources/cassandra/conf
echo "$DSECONF_BKUP_DIR/$NODE_NAME/$SUBDIR"
sudo cp $DSECONF_BKUP_DIR/$NODE_NAME/$BACKUP_DSE_VER/$SUBDIR/cassandra.yaml $DSE_HOME/resources/cassandra/conf
sudo cp $DSECONF_BKUP_DIR/$NODE_NAME/$BACKUP_DSE_VER/$SUBDIR/cassandra-env.sh $DSE_HOME/resources/cassandra/conf
| true |
8b1f8bdda88c0fa96393cb1047e7ee73b0418170 | Shell | krantideep95/proto-api-docs-action | /entrypoint.sh | UTF-8 | 440 | 2.875 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
echo "cloning protobuf files"
bash /sparse-checkout.sh
echo "generating swagger docs"
cd / && buf beta mod update && buf generate
echo "combining swagger docs into 1 file"
bash /combine_swagger_docs.sh /docs
echo "create repo/branch dir structure"
mkdir -p /_docs/${GITHUB_REPOSITORY#*/} && mv combined.json /_docs/${GITHUB_REPOSITORY#*/}/${GITHUB_REF##*/}.json
echo "upload to s3"
bash /upload_to_s3.sh
| true |
d251e5f75b52ffd0b7de8f45a2d5b1a0e569b834 | Shell | shensimeteor/linux_configs_scripts | /script/move_merge_dir/test_dest/head_highlight.sh | UTF-8 | 417 | 2.546875 | 3 | [] | no_license | #!/bin/bash
# --------------------------------------------------------------------------
# Description:
#
# same as 'head' but only to highlight each file title
#
# History:
#
# 2014-06-25:
#
# [shensi]: First creation.
# --------------------------------------------------------------------------
/usr/bin/head "$@" > .head_highlight_temp
add_color_lines.sh -f .head_highlight_temp -c "red bold" -k "^==>"
| true |
3070a0e047079c4b6d4d3454499cdec4d89f43ca | Shell | scivm/kubespawner | /ci/install-kube.sh | UTF-8 | 1,350 | 3.15625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# install minikube on CI
# this sets up minikube with vm-driver=none, so should not be used anywhere but CI
set -eux
mkdir -p bin $HOME/.kube $HOME/.minikube
touch $KUBECONFIG
# install kubectl, minikube
# based on https://github.com/LiliC/travis-minikube
echo "installing kubectl"
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v${KUBE_VERSION}/bin/linux/amd64/kubectl
chmod +x kubectl
mv kubectl bin/
echo "installing minikube"
curl -Lo minikube https://storage.googleapis.com/minikube/releases/v${MINIKUBE_VERSION}/minikube-linux-amd64
chmod +x minikube
mv minikube bin/
echo "starting minikube"
sudo $PWD/bin/minikube start --vm-driver=none --kubernetes-version=v${KUBE_VERSION}
sudo chown -R travis: /home/travis/.minikube/
minikube update-context
# can be used to check a condition of nodes and pods
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'
echo "waiting for kube-addon-manager"
until kubectl -n kube-system get pods -l component=kube-addon-manager -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1
done
echo "waiting for kube-dns"
until kubectl -n kube-system get pods -l k8s-app=kube-dns -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1
done
kubectl get nodes
kubectl get pods --all-namespaces
| true |
41cb1de6d5552d7fd3daed2a8bc8b0e034f81000 | Shell | broadinstitute/ddp-study-server | /scripts/consolidate-db/consolidate.sh | UTF-8 | 5,425 | 3.96875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
set -o errexit
set -o pipefail
# Configuration file used throughout script
CONFIG=''
# Variables used throughout script
INSTANCE_ID=''
BUCKET_NAME=''
confirm() {
local msg="$1"
read -r -p "$msg" reply
reply=$(echo "$reply" | tr '[:upper:]' '[:lower:]')
if [[ "$reply" == 'y' ]] || [[ "$reply" == 'yes' ]]; then
return 0
else
return 1
fi
}
pause() {
read -r -p 'Press enter to continue... ' ignored
return 0
}
set_google_project() {
local name=$(jq -r '.gcp_project' "$CONFIG")
gcloud config set project "$name"
}
async_create_instance() {
local root_pw=$(jq -r '.root_password' "$CONFIG")
local addl_flags=$(jq -r '.instance_create_flags[]' "$CONFIG" | tr '\n' ' ')
echo 'Asynchronously creating new Cloud SQL instance'
echo "Instance ID: $INSTANCE_ID"
echo "Root password: $root_pw"
echo "Additional creation flags: $addl_flags"
echo ''
set -o xtrace
gcloud sql instances create "$INSTANCE_ID" \
--root-password="$root_pw" --async \
$addl_flags
set +x
}
create_db_user() {
local app="$1"
local name=$(jq -r ".$app.username" "$CONFIG")
local pass=$(jq -r ".$app.password" "$CONFIG")
gcloud sql users create "$name" --host='%' --password="$pass" --instance="$INSTANCE_ID"
}
create_bucket() {
if gsutil ls "gs://$BUCKET_NAME" >/dev/null 2>&1; then
echo "Bucket with name '$BUCKET_NAME' already exist, reusing"
return 0
fi
gsutil mb -c standard -l us-central1 "gs://$BUCKET_NAME"
}
grant_bucket_read() {
local instance="$1"
local description=$(gcloud sql instances describe "$instance" --format=json 2>/dev/null)
local sa_email=$(echo "$description" | jq -r '.serviceAccountEmailAddress')
echo "Granting read permission for instance '$instance' to bucket '$BUCKET_NAME'"
gsutil acl ch -r -u "$sa_email:READ" "gs://$BUCKET_NAME"
}
grant_bucket_write() {
local instance="$1"
local description=$(gcloud sql instances describe "$instance" --format=json 2>/dev/null)
local sa_email=$(echo "$description" | jq -r '.serviceAccountEmailAddress')
echo "Granting write permission for instance '$instance' to bucket '$BUCKET_NAME'"
gsutil acl ch -u "$sa_email:WRITE" "gs://$BUCKET_NAME"
}
cleanup_bucket() {
local msg='Remove bucket '$BUCKET_NAME' and all its content? (y/n): '
if confirm "$msg"; then
gsutil rm -r "gs://$BUCKET_NAME"
fi
}
async_export() {
local app="$1"
local schema=$(jq -r ".$app.schema" "$CONFIG")
local old_instance_id=$(jq -r ".$app.old_instance_id" "$CONFIG")
local bucket_file="gs://$BUCKET_NAME/$schema.sql"
echo "Asynchronously exporting data"
echo "Schema: $schema"
echo "Instance: $old_instance_id"
echo "Destination: $bucket_file"
gcloud sql export sql "$old_instance_id" "$bucket_file" --database="$schema" --async
}
import_data() {
local app="$1"
local schema=$(jq -r ".$app.schema" "$CONFIG")
local bucket_file="gs://$BUCKET_NAME/$schema.sql"
echo "Importing data for schema '$schema'"
gcloud sql import sql "$INSTANCE_ID" "$bucket_file"
}
main() {
if (( $# < 1 )) || [[ ! -f "$1" ]]; then
echo 'Missing configuration file'
exit 1
fi
CONFIG="$1"
echo "Using config file: $CONFIG"
echo ''
echo 'This script will create a new sql instance, export data from old instances,'
echo 'and import them into the new instance.'
echo ''
echo 'Please make sure to have the `gcloud`, `gsutil`, and `jq` tools installed.'
echo ''
if confirm 'Continue? (y/n): '; then
echo 'Moving on...'
else
echo 'Exiting...'
exit 1
fi
INSTANCE_ID=$(jq -r '.instance_id' "$CONFIG")
BUCKET_NAME=$(jq -r '.bucket' "$CONFIG")
echo "Start time: $(date -u)"
printf '\n=> Setting GCP project\n'
set_google_project
echo ''
echo 'To ensure gcloud SDK has the sufficient permissions, the gcloud auth flow'
echo 'will be triggered, which will open a browser window. Please authenticate'
echo 'with a user that has the necessary permissions.'
echo ''
pause
gcloud auth login
printf '\n=> Creating bucket and granting permissions\n'
pause
create_bucket
echo ''
grant_bucket_write $(jq -r '.pepper.old_instance_id' "$CONFIG")
grant_bucket_write $(jq -r '.housekeeping.old_instance_id' "$CONFIG")
grant_bucket_write $(jq -r '.dsm.old_instance_id' "$CONFIG")
printf '\n=> Creating sql instance and export dumps\n'
pause
if confirm "Create new instance '$INSTANCE_ID'? (y/n): "; then
async_create_instance
fi
echo ''
async_export 'pepper'
echo ''
async_export 'housekeeping'
echo ''
async_export 'dsm'
echo ''
echo 'Please open Google Cloud console to check sql instance is created.'
echo 'And check that export dumps has landed in bucket before continuing.'
pause
printf '\n=> Creating database users\n'
create_db_user 'pepper'
echo ''
create_db_user 'housekeeping'
echo ''
create_db_user 'dsm'
printf '\n=> Granting permissions and importing data\n'
pause
grant_bucket_read "$INSTANCE_ID"
echo ''
echo "Start: $(date -u)"
import_data 'pepper'
echo "End: $(date -u)"
echo ''
echo "Start: $(date -u)"
import_data 'housekeeping'
echo "End: $(date -u)"
echo ''
echo "Start: $(date -u)"
import_data 'dsm'
echo "End: $(date -u)"
printf '\n=> Cleaning up bucket\n'
cleanup_bucket
printf '\n=> Done!\n'
echo "End time: $(date -u)"
echo "Instance '$INSTANCE_ID' is ready to go!"
}
main "$@"
| true |
a6e02784c496f0495eadc967c48aa4b53c283eb5 | Shell | gabrielrih/man-in-the-middle-attack | /serverShareInternet.sh | UTF-8 | 655 | 2.609375 | 3 | [] | no_license | #!/bin/bash
#
# Share internet. It's used with man in the middle
#
# Gabriel Richter <gabrielrih@gmail.com>
# Last Modification: 2017-03-17
#
interface=wlan0
modprobe ip_tables
modprobe iptable_nat
iptables -F INPUT
iptables -F OUTPUT
iptables -F POSTROUTING -t nat
iptables -F PREROUTING -t nat
echo 1 > /proc/sys/net/ipv4/ip_forward
iptables -P FORWARD ACCEPT
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -A POSTROUTING -t nat -o $interface -j MASQUERADE
#iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN -m tcpmss --mss 1400:1536 -j TCPMSS --clamp-mss-to-pmtu
echo "nameserver 8.8.8.8" >> /etc/resolv.conf
echo "nameserver 8.8.4.4" >> /etc/resolv.conf | true |
da9f88e98bb64b522c2b03cf94d9d679a427dc93 | Shell | QueuingKoala/netfilter-samples | /reset-rules/reset-loaded-nf-tables.sh | UTF-8 | 733 | 3.4375 | 3 | [] | no_license | #!/bin/sh
# Unloading netfilter rules the less-abusive way.
# Please see README.md for a discussion on why this is useful.
# Define your program paths.
# You may use relative paths if allowed by your env.
SAVE_BIN="/sbin/iptables-save"
RESTORE_BIN="/sbin/iptables-restore"
check_table() {
case "$line" in
"*filter"|"*security"|"*mangle")
echo "
$line
:INPUT ACCEPT
:OUTPUT ACCEPT
:FORWARD ACCEPT"
[ "$line" = "*mangle" ] && echo "
:PREROUTING ACCEPT
:POSTROUTING ACCEPT"
;;
"*nat"|"*raw")
echo "
$line
:PREROUTING ACCEPT
:OUTPUT ACCEPT"
[ "$line" = "*nat" ] && echo "
:POSTROUTING ACCEPT"
;;
*)
return
;;
esac
echo "COMMIT"
}
("$SAVE_BIN" | while read line
do check_table; done) | "$RESTORE_BIN"
| true |
ec22a61b1be4871a6e11968d42d8eb076d6fdff1 | Shell | aeternity/infrastructure | /scripts/health_check.sh | UTF-8 | 2,696 | 3.828125 | 4 | [
"ISC"
] | permissive | #!/usr/bin/env bash
#example usage: health_check.sh --network=main --min_height=55000 --version=2.1.0 $HOSTNAME
#--genesis_hash & --network_id override defaults for --network
set -eo pipefail
genesis_hash_main="kh_pbtwgLrNu23k9PA6XCZnUbtsvEFeQGgavY4FS2do3QP8kcp2z"
genesis_hash_uat="kh_wUCideEB8aDtUaiHCtKcfywU6oHZW6gnyci8Mw6S1RSTCnCRu"
network_id_main=ae_mainnet
network_id_uat=ae_uat
for arg in "$@"; do
case $arg in
--network=*)
network="${arg#*=}"
genesis_hash_var=genesis_hash_${network}
genesis_hash=${!genesis_hash_var}
network_id_var=network_id_${network}
network_id=${!network_id_var}
shift # past argument=value
;;
--network_id=*)
network_id="${arg#*=}"
shift
;;
--min_height=*)
min_height="${arg#*=}"
shift
;;
--genesis_hash=*)
genesis_hash="${arg#*=}"
shift
;;
--min_sync_pct=*)
min_sync_pct="${arg#*=}"
shift
;;
--version=*)
version="${arg#*=}"
shift
;;
*)
# unknown option
;;
esac
done
if [ $# -eq 0 ]; then
HOST=127.0.0.1
else
HOST=${@:$#}
fi
min_height=${min_height:-0}
get_node_status() {
curl -sS -m5 http://$HOST:3013/v2/status
}
check_genesis_hash() {
test $(echo $node_status| jq -r '.genesis_key_block_hash') == $genesis_hash
}
check_network_id() {
test $(echo $node_status| jq -r '.network_id') == $network_id
}
check_sync_progress() {
echo "$(echo $node_status| jq -r '.sync_progress')>=$min_sync_pct"|bc
}
check_top_min_height() {
test $(curl -sS -m5 http://$HOST:3013/v2/key-blocks/current | jq '.height') -ge $min_height
}
check_version() {
test $(echo $node_status| jq -r '.node_version') == $version
}
node_status=$(get_node_status)
passed=node_status
check_top_min_height && passed+=" min_height"|| failed+=" min_height"
if [ -n "$genesis_hash" ]; then
check_genesis_hash && passed+=" genesis"|| failed+=" genesis"
fi
if [ -n "$network_id" ]; then
check_network_id && passed+=" network_id"|| failed+=" network_id"
fi
if [ -n "$version" ]; then
check_version && passed+=" version"|| failed+=" version"
fi
if [ -n "$min_sync_pct" ]; then
if [ "$(check_sync_progress)" -ne 1 ]; then
failed+=" sync_progress"
else
passed+=" sync_progress"
fi
fi
if [ "$passed" != "" ]; then
printf "Passed tests:\n"
for pass in $passed; do
printf "%s\n" "- $pass"
done
fi
if [ "$failed" != "" ]; then
printf "\nFailed tests:\n" >&2
for fail in $failed; do
printf "%s\n" "- $fail" >&2
done
exit 1
fi
| true |
ed9640b8f27698babd2403b33f486ad12d8e7b87 | Shell | adriandugan/mail | /.env.example | UTF-8 | 1,347 | 2.828125 | 3 | [
"Unlicense"
] | permissive | #-------------------------------------------------------------------------------
# Specify your mail sending service
# mail - default PHP `mail()` command, available on Linux and Windows
# sendmail - Linux mail sending command
# smtp - Simple Mail Transfer Protocol
#-------------------------------------------------------------------------------
MAIL_DRIVER=mail
#MAIL_DRIVER=sendmail
#MAIL_DRIVER=smtp
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Default "From" address
#-------------------------------------------------------------------------------
MAIL_FROM_EMAIL=from.address@example.com
MAIL_FROM_NAME=John Doe
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# SMTP settings
#-------------------------------------------------------------------------------
MAIL_SMTP_HOST=
MAIL_SMTP_PORT=
MAIL_SMTP_USERNAME=
MAIL_SMTP_PASSWORD=
# SMTP DEBUGGING
# 0 = off (for production)
# 1 = show client messages
# 2 = show client AND server messages (for dev environment)
# 3 = as 2, but more verbose
# 4 = as 3, but goes very low level
MAIL_SMTP_DEBUG=3
#-------------------------------------------------------------------------------
| true |
f1cef4b990ba6cbf4cfcb9869bcc4496a90de983 | Shell | farhan85/Misc | /java/ivy/update-dependencies.sh | UTF-8 | 196 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env bash
LIB_DIR=".../lib"
IVY_JAR=".../ivy-2.5.0-rc1.jar"
java -jar $IVY_JAR \
-ivy dependencies.xml \
-sync \
-retrieve "$LIB_DIR/[artifact]-[type]-[revision].[ext]"
| true |
a6284b77a6ddf522dfadb83dbe3628c62e2527f8 | Shell | qian-hao-developer/Tips | /Jenkins/ps152/ps152-make_api.sh | UTF-8 | 1,563 | 3.109375 | 3 | [] | no_license | #!/bin/bash -xe
MODEL_NAME=JT-C52
branch_arg=$1
case $branch_arg in
cobra-cts) BRANCH=ps152-cobra-cts; CUSTOM=002;;
cobra-beta) BRANCH=ps152-cobra-dev-beta; CUSTOM=002;;
platform) BRANCH=ps152-platform-dev; CUSTOM=001;;
cobra) BRANCH=ps152-cobra-dev; CUSTOM=C02;;
cobra-rel) BRANCH=ps152-cobra-release; CUSTOM=C14;;
pingu) BRANCH=ps152-pingu-dev; CUSTOM=D21;;
pingu-rel) BRANCH=ps152-pingu-release; CUSTOM=D33;;
pell) BRANCH=ps152-pell-dev; CUSTOM=002;;
cobra-advance-dev) BRANCH=ps152-cobra-advance-dev; CUSTOM=C03;;
cobra-contact-rel) BRANCH=ps152-cobra-contact-release; CUSTOM=C12;;
arch-cl-rel) BRANCH=ps152-arch-cl-release; CUSTOM=D32;;
*) usage ; exit 1;;
esac
SI=$2
if [ "$SI" = "" ]; then
exit 1
fi
REGION=$3
if [ "$REGION" = "" ]; then
exit 1
fi
cd /home/gitosis/android/$BRANCH/
SSD=$4
if [ "$SSD" = "ssd" ]; then
cd /home2/$BRANCH/
fi
RELEASE=$5
cd nightly/
echo "make api.xml start"
source build/envsetup.sh
lunch JT_C52-user
make update-api
make cts-test-coverage
DSTDIR=develop
if [ "$BRANCH" == "ps152-cobra-release" ]; then
DSTDIR=release
fi
if [ "$BRANCH" == "ps152-pingu-release" ]; then
DSTDIR=release
fi
if [ "$BRANCH" == "ps152-arch-cl-release" ]; then
DSTDIR=release
fi
zip /qnap/PS152-1/$DSTDIR/nightly/$BRANCH/PS152-12-${CUSTOM}-${SI}-${REGION}-api.zip \
out/host/linux-x86/cts-api-coverage/api.xml
echo date
echo "make api.xml end"
| true |
8b029a15ed615a7f437d0cdc2579385e29618377 | Shell | stevescherer97/media-backups | /sync.sh | UTF-8 | 910 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
tmp='/home/wlss26/Dropbox/tmp3.txt';
email_script='/home/wlss26/Dropbox/sync_status.py';
# sync files between NTFS1 and NTFS2
before_primary_backup=`find /media/drive1_ntfs/ -type f -print | wc -l`
before_secondary_backup=`find /media/drive2_ntfs/ -type f -print | wc -l`
# sync'ing files between primary backup and secondary backup
rsync -avxHAXW /media/drive1_ntfs /media/drive2_ntfs;
after_primary_backup=`find /media/drive1_ntfs/ -type f -print | wc -l`
after_secondary_backup=`find /media/drive2_ntfs/ -type f -print | wc -l`
echo "Synchronization of files between NTFS1 and NTFS2 has completed. Primary backup started with -- $before_primary_backup Primary backup finished with -- $after_primary_backup. Secondary Backup started with -- $before_secondary_backup Secondary Backup finished with -- $after_secondary_backup" > $tmp;
/usr/local/bin/python $email_script;
| true |
409b4a7c2b7350f4b14be682ebde8231ca772d37 | Shell | forest-fire/universal-fire | /scripts/workspace/test.sh | UTF-8 | 512 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [[ -z "$1" ]]; then
echo "┏━━━ 🎯 TEST: all packages ━━━━━━━━━━━━━━━━━━━"
echo ""
yarn lerna run test --stream --concurrency 4
else
if [[ -z "$2" ]]; then
echo "┏━━━ 🎯 TEST: ($1) ━━━━━━━━━━━━━━━━━━━"
else
echo "┏━━━ 🎯 TEST: ($1: $2) ━━━━━━━━━━━━━━━━━━━"
fi
echo ""
cd packages/$1
npx jest $2
cd - > /dev/null
fi
| true |
15ba1c272335a000799fd4d67e21c8df479d12af | Shell | hbalp/callers | /bin/launch_link_edition.sh | UTF-8 | 806 | 2.578125 | 3 | [] | no_license | #!/bin/bash
#set -x
# Copyright (C) 2015 Thales Communication & Security
# - All Rights Reserved
# coded by Hugues Balp
common=`which common.sh`
source $common
# List generated json files
#find . -type f -name "*.gen.json.gz" -exec gunzip {} \;
list_files_in_dirs `pwd` .file.callers.gen.json dir.callers.gen.json ".libs:.deps:all:doc:msbuild:m4:build-aux:autom4te.cache:demos:samples:Samples:configs:cxxtestplus-0.3:.git:projects:Projects:ref:tests:config"
# List all defined symbols in file defined_symbols.json
list_defined_symbols defined_symbols.json `pwd` dir.callers.gen.json
#read_defined_symbols.native defined_symbols.json file.callers.gen.json
# add extcallees to json files
source add_extcallees.sh `pwd`
# add extcallers to json files
source add_extcallers.sh .
source indent_jsonfiles.sh .
| true |
1dc56e9ff8b23e48279f2941d4962356b97ae1c6 | Shell | 7onn/system-config | /start.sh | UTF-8 | 2,303 | 3.75 | 4 | [] | no_license | #!/bin/sh
# sh -c "$(curl -H 'Cache-Control: no-cache' -LsSo- https://raw.githubusercontent.com/IgorAssuncao/system-config/master/start.sh)"
downloadRepo() {
checkInput
echo "Downloading repo"
git clone git@github.com:IgorAssuncao/system-config.git
echo "Finished downloading repo"
}
echo "This script automatically installs some tools and creates symlinks for you."
echo "All of these tools will be prompted to install:
- QTile (A window manager written in python)
- Oh-my-zsh
- NeoVim (Newer version of vim)
- Custom init.vim (nvim config file)
- Custom Xresources
"
if [ -d "~/.i3/config" ]; then
echo "Renaming ~/.i3/config to ~/.i3/config.bkp"
mv ~/.i3/config ~/.i3/config.bkp
echo "Creating ~/.i3/config symlink"
ln -s ~/system-config/.i3/config ~/.i3/config
echo "Finished creating ~/.i3/config symlink"
fi
echo "Creating .i3status.conf symlink"
if [ -a "~/.i3status.conf" ]; then
echo "Renaming ~/.i3status.conf to ~/.i3status.conf.bkp"
mv ~/.i3status.conf ~/.i3status.conf.bkp
echo "Creating ~/.i3status.conf symlink"
ln -s /etc/i3status.conf ~/.i3status.conf
echo "Finished symlink creation"
fi
echo "Creating qtile symlink"
if [ -d "~/.config/qtile" ]; then
echo "Renaming ~/.config/qtile to ~/.config/qtile.bkp"
mv ~/.config/qtile ~/.config/qtile.bkp
fi
sh -c "$(curl -fsSL https://raw.github.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
echo "Creating ~/.zshrc symlink"
if [ -a "~/.zshrc" ]; then
echo "Renaming ~/.zshrc to ~/.zshrc.bkp"
mv ~/.zshrc ~/.zshrc.bkp
fi
echo "Install NeoVim?"
sudo pacman -S neovim
if [ -d "~/.config/nvim" ]; then
echo "Renaming ~/.config/nvim to ~/.config/nvim.bkp"
mv ~/.config/nvim ~/.config/nvim.bkp
fi
echo "Creating ~/.config/nvim symlink"
ln -s ~/system-config/.config/nvim ~/.config/nvim
echo "Renaming ~/.Xresources to ~/.Xresources.bkp"
mv ~/.Xresources ~/.Xresources.bkp
echo "Renaming ~/.xinitrc to ~/.xinitrc.bkp"
mv ~/.xinitrc ~/.xinit.bkp
echo "Creating symlinks" && \
ln -s ~/system-config/.config/x/.Xresources ~/.Xresource
ln -s ~/system-config/.config/x/.xinitrc ~/.xinitrc
echo "Installing exa (ls replacement written in Rust)"
sudo pacman -S exa
echo "Installing bat (cat replacement written in Rust)"
sudo pacman -S bat
echo "Installing ripgrep"
sudo pacman -S ripgrep
| true |
2cb4c342da976f291af23922cefb261ddd4e02c0 | Shell | pjanowski/Pawel_PhD_Scripts | /namd/namdcheck.sh | UTF-8 | 1,479 | 3.015625 | 3 | [] | no_license | #!/bin/bash
###need to change the location of the out files (first line below) and the location of the restart file with box dimensions(third line).
rm tmp.txt
OUTPUTS=../*log
#for i in `ls -ld ../${OUTPUTS} | grep ^- | awk '{print $9}' | grep ^${OUTPUTS}`; do
for i in `ls -d $OUTPUTS`; do
grep ENERGY: $i >> tmp.txt
done
cut -c9-320 tmp.txt >tmp2.txt
cat > volcheck.m <<EOF
volume=cellvolume('../equil30.rst7');
all=load('tmp2.txt');
plot(all(:,10))
print kineticE.jpg -djpeg
close(1)
plot(all(:,11))
print totalE.jpg -djpeg
close(1)
plot(all(:,11))
print totalE.jpg -djpeg
close(1)
plot(all(:,12))
print temp.jpg -djpeg
close(1)
plot(all(:,13))
print potentialE.jpg -djpeg
close(1)
plot(all(:,15))
print tempAvg.jpg -djpeg
close(1)
plot(all(:,16))
print pressure.jpg -djpeg
close(1)
plot(all(:,19))
print pressureAvg.jpg -djpeg
close(1)
MIN=min(all(:,18))
MINPER=MIN/volume*100
MAX=max(all(:,18))
MAXPER=MAX/volume*100
MEAN=mean(all(:,18))
MEANPER=MEAN/volume*100
vol=all(:,18)./volume*100;
plot(vol)
print volume.jpg -djpeg
close(1)
pwd=pwd
fid=fopen('volume.txt','wt');
fprintf(fid, '%s\n',pwd);
fprintf(fid, 'crystal volume = %.2f\n', volume);
fprintf(fid, 'min volume = %.2f %.2f percent\n', MIN, MINPER);
fprintf(fid, 'max volume = %.2f %.2f percent\n', MAX, MAXPER);
fprintf(fid, 'mean volume = %.2f %.2f percent\n', MEAN, MEANPER);
fclose(fid);
EOF
matlab -nosplash -nodisplay -nodesktop < volcheck.m > mtl.out
cat volume.txt
eog volume.jpg &
| true |
9820b06e3be5de488b854aaf8006179fcafb028e | Shell | r2d2c3p0/perl-n-a-shell | /Shell/run.ksh | UTF-8 | 9,655 | 3.6875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/ksh93
# Script Name : run.ksh.
# Date of birth : 08/11/2012. # 08.11.12v is the initial version.
# Version : 08.12.12v
# The version is obtained by the latest date of the tool in working condition.
# Author : Shane Reddy.
# Explanation : Universal execution tool. Tested in AIX, Need testing in other flavors.
# WebSphere mentioned here is WebSphere Application Server and IBM HTTP Server.
# Tool checks for the WebSphere product.
#
# Dependencies :
# Modifications :
#
# Contact me : r2d2c3p0
#
#
# Global variables
#---------------------------------------------------------------------------------------------------------------------------------------------------
function SAFE_EXIT
{
LOGERROR "SIGNAL INTERRUPT received."
LOGERROR "Safe exiting."
trap "" 2
}
trap "SAFE_EXIT" 2
LANG_FLAG=0
INFOFILE=../logs/systemout.log
ERRFILE=../logs/systemerr.log
PY_DIR=../python
# Functions
#----------------------------------------------------------------------------------------------------------------------------------------------------
function USAGE
{
echo
echo " Usage: run.ksh [-file|-f] [-info|-i] [-help|-h] [-version|-v] [-interactive] {-nri}"
echo
echo " Where:"
echo " -file or -f = Input filename. This argument needs to be followed by a filename."
echo " This tool is designed for websphere installation, administration, deployment and uninstallation purpose."
echo " Supported languages - JACL and Jython/Python."
echo " Use option <-nri> = Non-Root installation to suppress root execution, Where: The WebSphere is installed by non-root user."
echo " Enter option -nri at the very end."
echo " -info or -i = Prints basic information."
echo " -help or -h = Prints usage and available input arguments."
echo " -version or -v = Prints the current version."
echo " -interactive = Invokes interactive mode/shell. Enter 'q' to quit the mode."
echo
exit 1
}
function PARSEFILE
{
if [ ! -f $PY_DIR/$IPTFILE ]; then
LOGERROR "File $IPTFILE is not found under python."
exit 1
fi
IPTFILEX=$PY_DIR/$IPTFILE
GETEXT=`echo $IPTFILE |awk -F . '{print NF}'`
EXTENTION=`echo $IPTFILE|cut -d '.' -f$GETEXT`
case $EXTENTION in
python|py|jy|jython)
LANG_FLAG=1
;;
jacl|tcl)
LANG_FLAG=2
;;
*)
LOGWAR "Input file does not have recommended file extention. [$EXTENTION]"
;;
esac
egrep 'def|proc' $IPTFILEX > /dev/null 2>&1
if [ $? -ne 0 ]; then
LOGERROR "Contents of $IPTFILE not supported. Refer usage[-h]"
exit 1
fi
}
function LOGINFO
{
echo "$DATE | INFO: $*" |tee -a $INFOFILE
}
function VALIDATEFILE
{
awk '!x[$0]++' $PROPFILE > $PROPFILE\_$$
mv $PROPFILE\_$$ $PROPFILE
set -A KV_ARGS
KV_ARGS[0]="[ Key(s) missing value(s):"
kv_flag=0
y=1
for KVPAIR in `cat $PROPFILE|grep -v ^#`
do
key=`echo $KVPAIR|cut -d"=" -f1`
value=`echo $KVPAIR|cut -d"=" -f2`
[ -z "$value" ] && { KV_ARGS[y]=$key ; ((y+=1)); kv_flag=1; }
done
z=${#KV_ARGS[@]}
((z+=1))
KV_ARGS[$z]="]"
#[ -z `grep '[^[:alnum:] _-./\#=\$]' $PROPFILE` ] || { LOGERROR "$PROPFILE validation failed. Special characters found." ; exit 1 ; }
if [ $kv_flag -ne 0 ]; then
LOGERROR "Key-Value. $PROPFILE validation failed."
echo $DATE ${KV_ARGS[@]}|tee -a $ERRFILE
exit 1
else
LOGINFO "$PROPFILE validation passed."
fi
}
function LOGERROR
{
echo "$DATE | ERROR: $*" |tee -a $INFOFILE
echo "$DATE | ERROR: $*" >> $ERRFILE
}
function YOELREY
{
shift `expr $# - 1`
ROOT_FLAG=$1
if [ $ROOT_FLAG == "-nri" ]; then
LOGWAR "Suppressing root execution, Reason: Non-Root WebSphere installation maybe..???"
else
if [ $USER != "root" ]; then
LOGERROR "[$USER] Root/Sudo privileges are required to run this tool."
exit 1
fi
fi
}
function LOGWAR
{
echo "$DATE | WARNING: $*" |tee -a $INFOFILE
echo "$DATE | WARNING: $*" >> $ERRFILE
}
function LOGCLEAN
{
#LOG_SIZE=`ls -lrt $INFOFILE |awk '{print $5}'`
#echo $LOG_SIZE
find ../logs \
-type f \
-name '*.log'\
-print \
| while read file
do
SIZE=`du -m $file|awk '{print $1}'`
if [ $SIZE -gt 5 ]; then
LOGWAR "$file is over the limit."
mv $file $file\_`date +"%d%m%Y" `
LOGINFO "$file is rotated."
fi
done
}
function MAIN
{
PYFILE=$PY_DIR/$2
shift 2
REST=$*
if [ $LANG_FLAG -eq 2 ]; then
CMD="sudo $WSADMIN_PATH/wsadmin.sh -lang jacl -f $PYFILE $REST"
else
CMD="sudo $WSADMIN_PATH/wsadmin.sh -lang jython -f $PYFILE $REST"
fi
$CMD | egrep -v "SOAP connector;|argv variable"|tee -a $INFOFILE
}
function GETINOPT
{
set -A OPTIONS_ARGS
OPTIONS_ARGS[0]="[Input arguments:"
x=1
for a in $*; do
if [ $x -eq $# ]; then
OPTIONS_ARGS[x]=$a]
else
OPTIONS_ARGS[x]=$a,
fi
((x+=1))
done
echo ${OPTIONS_ARGS[@]}|tee -a $INFOFILE
}
# Main
#-----------------------------------------------------------------------------------------------------------------------------------------------------
function MAIN_INT
{
echo >> $INFOFILE
echo ">" >> $INFOFILE
echo >> $INFOFILE
# If you are running on other flavors adjust the below ksh version accordingly.
LOGINFO "Running the _main_ initialization."
echo "Filename - run.ksh"|tee -a $INFOFILE
echo "ksh version - ${.sh.version}"|tee -a $INFOFILE
echo "OS flavor - `uname -a`"|tee -a $INFOFILE
print "run: ${.sh.name}[ ${.sh.subscript}]=${.sh.value}"|tee -a $INFOFILE
echo "$USER"|tee -a $INFOFILE
GETINOPT $*
}
# Launchpad checks.
#
if [ `uname` != "AIX" ]; then
echo "Runs on AIX only."
exit 1
else
EXPIRY=`date +"%Y"`
if [ $EXPIRY -gt 2015 ]; then
LOGERROR "FATAL: The tool run.ksh expired!!!"
exit 1
fi
if [ ! -f /usr/bin/ksh93 ]; then
echo "Enhanced korn shell is missing."
exit 1
fi
for dir in logs properties lib misc python perl ; do
if [ ! -d ../$dir ]; then
echo "Missing directories/files [$dir], Please check the directories."
exit 1
fi
done
if [ $# -eq 0 ]; then
USAGE # Usage
else
IPT=`echo $1| tr [A-Z] [a-z]`
fi
if [ ! -f ../properties/main.properties ]; then
echo "Error loading main properties file."
exit 1
else
. ../properties/main.properties
PROPFILE=../properties/main.properties
VALIDATEFILE $PROPFILE
fi
fi
# End of checks.
echo $CLEAR
case $IPT in
-interactive)
LOGINFO "->Invoked int mode."
echo ">>" # Tested on Linux and AIX. Can have -ne flag.
read -t 9 argmt
while [ $argmt != "q" ]; do
echo ">>"
read -t 9 argmt
done
LOGINFO "Exited int mode."
exit 0
;;
-info|-i)
sed -n '3,11'p $0|sed 's/#//' #| sed -e :a -e 's/^.\{1,167\}$/ & /;ta'
exit 0
;;
-version|-v)
sed -n '5'p $0|awk '{print $4}'
exit 0
;;
-help|-h)
USAGE
;;
-file|-f)
if [ $# -gt 1 ]; then
YOELREY $*
IPTFILE=$2
PARSEFILE $IPTFILE
LOGCLEAN
MAIN_INT $*
if [ ! -f ../misc/run.lock ]; then
LOGWAR "Another instance is running. Script is locked."
exit 1
fi
mv ../misc/run.lock ../misc/run.lockx
MAIN $*
mv ../misc/run.lockx ../misc/run.lock
echo "<" >> $INFOFILE
else
LOGERROR "-file|-f: missing argument."
LOGERROR "Filename is followed by -f|-file argument."
USAGE
fi
;;
*)
LOGERROR "Unrecognized input: $1"
USAGE
;;
esac
#endMain | true |
3427190b65e07f3414e8b1e4c3bf0fedd5bb70cf | Shell | jmgc/SoSAT | /scripts/run.sh | UTF-8 | 237 | 2.828125 | 3 | [] | no_license | #!/bin/bash
# This script is just for simple input in the form
# run.sh cnf.dimacs N SEED
# We recommend that you use the sosat command directly
if [ "$2" == "0" ]; then
ALGO="genetic"
else
ALGO="ant"
fi
sosat $1 -a $ALGO -s $3 | true |
03b31da6da6976b31ac16a93d9241fc186287c90 | Shell | MiLk/SyncDay | /start.command | UTF-8 | 2,558 | 3.6875 | 4 | [
"Beerware"
] | permissive | #!/bin/bash
#############################################################################################################
# #
# Name : SyncDay #
# Author : Emilien Kenler <hello@emilienkenler.com> #
# Source : https://github.com/MiLk/SyncDay #
# License : Beerware #
# #
#############################################################################################################
CD=""
if [ -d "/Applications/CocoaDialog.app" ]; then
CD="/Applications/CocoaDialog.app/Contents/MacOS/CocoaDialog"
elif [ -d "$HOME/Applications/CocoaDialog.app" ]; then
CD="$HOME/Applications/CocoaDialog.app/Contents/MacOS/CocoaDialog"
else
echo "CocoaDialog.app not found"
exit 1
fi
SYNCDAY=""
if [ -d "${HOME}/SyncDay" ]; then
SYNCDAY="${HOME}/SyncDay"
elif [ -d "$PWD" ]; then
SYNCDAY="$PWD"
else
rv=`$CD fileselect \
--title "Localisation de SyncDay" \
--text "Ou se trouve SyncDay ?" \
--with-directory $HOME \
--select-directories \
--select‑only‑directories`
if [ -n "$rv" ]; then
echo -e "$rv" | while read file; do
if [ -d "$file" ]; then
SYNCDAY="$file"
else
echo "SyncDay non trouvé."
exit 1
fi
done
else
echo "SyncDay non trouvé."
exit 1
fi
fi
CONFIG=""
rv=`$CD fileselect \
--title "SyncDay - Fichier de configuration de la synchronisation" \
--text "Choississez le fichier de configuration correspondant à la synchronisation que vous souhaitez faire." \
--with-directory ${SYNCDAY}/config \
--select‑only‑directories`
if [ -n "$rv" ]; then
echo -e "$rv" | while read file; do
if [ -e "$file" ]; then
CONFIG="$file"
${SYNCDAY}/mount_sync.sh "${CONFIG}"
else
echo "Fichier de configuration non trouvé."
exit 1
fi
done
else
echo "Fichier de configuration non trouvé."
exit 1
fi
rv=`$CD ok-msgbox --title "SyncDay" \
--text "Synchronisation terminée" \
--float \
--icon "info" \
--no-cancel \
--informative-text "La synchronisation a été effectuée avec succès."` | true |
3116f6070d26280c2a2161f2e2fa5f156d86dac9 | Shell | sumankumarpagadala/docker-tools | /images/git-dump/entrypoint.sh | UTF-8 | 1,304 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/sh
if [ ! -f /etc/timezone ] && [ ! -z "$TZ" ]; then
# At first startup, set timezone
apk add --update tzdata
cp /usr/share/zoneinfo/$TZ /etc/localtime
echo $TZ >/etc/timezone
fi
SSH_PATH=/home/$USERNAME/.ssh
mkdir -p -m 700 $SSH_PATH
if [ ! -z "$SSHKEY_SECRET" ]; then
cp /run/secrets/$SSHKEY_SECRET $SSH_PATH/$SSHKEY_SECRET
chmod 400 $SSH_PATH/$SSHKEY_SECRET
cat <<EOF >$SSH_PATH/config
Host *
IdentityFile $SSH_PATH/$SSHKEY_SECRET
Port $SSH_PORT
EOF
if [ ! -z "$REPO_PREFIX" ]; then
SSH_HOST=$(echo $REPO_PREFIX | cut -d@ -f 2 | cut -d: -f 1)
ssh-keyscan -p $SSH_PORT $SSH_HOST >>$SSH_PATH/known_hosts
fi
fi
chown -R $USERNAME /home/$USERNAME
[ -e /var/log/git-dump.log ] || touch /var/log/git-dump.log
[ -e /var/log/git-dump-status.txt ] || touch /var/log/git-dump-status.txt
mkdir -p -m 750 $DEST_DIR
chown $USERNAME.$GROUP $DEST_DIR /var/log/git-dump.log /var/log/git-dump-status.txt
cat <<EOF >/etc/opt/git-dump
# Options for /usr/local/bin/git-dump
API_TOKEN_SECRET=$API_TOKEN_SECRET
LOGFILE=/var/log/git-dump.log
REPO_PREFIX=$REPO_PREFIX
STATFILE=/var/log/git-dump-status.txt
EOF
cat <<EOF >/etc/crontabs/$USERNAME
$MINUTE $HOUR * * * /usr/local/bin/git-dump.sh $DEST_DIR $KEEP_DAYS $REPOS
EOF
crond -L /var/log/cron.log
tail -fn 1 /var/log/git-dump.log
| true |
c7f70157f7326d97d5ebf950f7ad7e34a60596a5 | Shell | kitech/android-nmp | /settings.sh | UTF-8 | 1,471 | 3.421875 | 3 | [] | no_license | #!/bin/bash
# set the base path to your Android NDK (or export NDK to environment)
NDK_BASE=/opt/android-ndk
if [[ "x$NDK_BASE" == "x" ]]; then
NDK_BASE=/usr/local/android-ndk
echo "No NDK_BASE set, using $NDK_BASE"
fi
NDK_PLATFORM_VERSION=8
NDK_SYSROOT=$NDK_BASE/platforms/android-$NDK_PLATFORM_VERSION/arch-arm
NDK_UNAME=`uname -s | tr '[A-Z]' '[a-z]'`
# NDK_TOOLCHAIN_BASE=$NDK_BASE/toolchains/arm-linux-androideabi-4.4.3/prebuilt/$NDK_UNAME-x86
NDK_TOOLCHAIN_BASE=$NDK_BASE/toolchains/arm-linux-androideabi-4.8/prebuilt/$NDK_UNAME-x86_64
ACC="$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-gcc --sysroot=$NDK_SYSROOT"
# ACC="$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-gcc"
ACXX="$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-g++ --sysroot=$NDK_SYSROOT"
ALD=$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-ld
AELF=$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-readelf
AAR=$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-ar
AAS=$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-as
ARANLIB=$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-ranlib
ASTRIP=$NDK_TOOLCHAIN_BASE/bin/arm-linux-androideabi-strip
NDK_LDPATH="-L${NDK_SYSROOT}/usr/lib"
ARM7_CFLAGS="-mcpu=cortex-a8"
# i use only a small number of formats - set this to 0 if you want everything.
# changed 0 to the default, so it'll compile shitloads of codecs normally
if [[ "x$minimal_featureset" == "x" ]]; then
minimal_featureset=1
fi
function current_dir {
echo "$(cd "$(dirname $0)"; pwd)"
}
| true |
f12fe5090572161d95376fa7c89e953108dd7c30 | Shell | InsightSoftwareConsortium/ITK | /Utilities/ITKv5Preparation/UseNativeC++Syntax.sh | UTF-8 | 3,284 | 2.90625 | 3 | [
"IJG",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"SMLNJ",
"BSD-3-Clause",
"BSD-4.3TAHOE",
"LicenseRef-scancode-free-unknown",
"Spencer-86",
"LicenseRef-scancode-llnl",
"FSFUL",
"Libpng",
"libtiff",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-permissive",
... | permissive | #!/bin/bash
# \author Hans J. Johnson
#
# Script to process a directory to remove unnecessary
# backwards compatibility layers for C++11
# options that are now required.
#
# Step 1 for migration to ITKv5: Continue building your toolkit with ITKv4, but use -DITK_FUTURE_LEGACY_REMOVE:BOOL=ON
function ReplaceCXXString()
{
oldstring="$1"
newstring="$2"
git grep -l "${oldstring}" | \
fgrep -v itk_compiler_detection.h | fgrep -v itkMacro.h | fgrep -v CMakeLists.txt |fgrep -v .cmake | \
fgrep -v ITKv5Preparation | \
xargs sed -i '' -e "s/ ${oldstring}/ ${newstring}/g"
file_changed=$(expr $(git status --porcelain 2>/dev/null| grep "^ M" | wc -l))
if [[ file_changed -gt 0 ]];then
cat > /tmp/COMMIT_MSG << EOF
COMP: Use C++11 ${newstring} directly
git grep -l \"${oldstring}\" | \
fgrep -v itk_compiler_detection.h | fgrep -v itkMacro.h | fgrep -v CMakeLists.txt |fgrep -v .cmake | \
xargs sed -i '' -e \"s/ ${oldstring}/ ${newstring}/g\"
EOF
git add -A
git commit -F /tmp/COMMIT_MSG
if [[ $? -ne 0 ]]; then
echo "ERROR: COMMIT DID NOT SUCCEED"
echo " Fix, then use: git commit -F /tmp/COMMIT_MSG"
exit -1
fi
fi
}
ReplaceCXXString ITK_NOEXCEPT_OR_THROW ITK_NOEXCEPT
ReplaceCXXString ITK_HAS_CXX11_STATIC_ASSERT ITK_COMPILER_CXX_STATIC_ASSERT
ReplaceCXXString ITK_DELETE_FUNCTION ITK_DELETED_FUNCTION
ReplaceCXXString ITK_HAS_CPP11_ALIGNAS ITK_COMPILER_CXX_ALIGNAS
# cxx_nullptr
# define ITK_NULLPTR nullptr
ReplaceCXXString ITK_NULLPTR nullptr
# cxx_deleted_functions
# define ITK_DELETED_FUNCTION = delete
ReplaceCXXString ITK_DELETED_FUNCTION "= delete"
# cxx_constexpr
# define ITK_CONSTEXPR constexpr
#COMP: Use C++11 constexpr directly
ReplaceCXXString ITK_CONSTEXPR_VAR constexpr
ReplaceCXXString ITK_CONSTEXPR_FUNC constexpr
# cxx_noexcept
ReplaceCXXString ITK_NOEXCEPT noexcept
### --- Other considerations for replacement
# cxx_std_98
# cxx_template_template_parameters
# cxx_std_11
# cxx_alias_templates
# cxx_alignas
# cxx_alignof
# cxx_attributes
# cxx_auto_type
# cxx_decltype
# cxx_decltype_incomplete_return_types
# cxx_default_function_template_args
# cxx_defaulted_functions
# cxx_defaulted_move_initializers
# cxx_delegating_constructors
# cxx_enum_forward_declarations
# cxx_explicit_conversions
# cxx_extended_friend_declarations
# cxx_extern_templates
# cxx_final
# cxx_func_identifier
# cxx_generalized_initializers
# cxx_inheriting_constructors
# cxx_inline_namespaces
# cxx_lambdas
# cxx_local_type_template_args
# cxx_long_long_type
# cxx_nonstatic_member_init
# cxx_override
# cxx_range_for
# cxx_raw_string_literals
# cxx_reference_qualified_functions
# cxx_right_angle_brackets
# cxx_rvalue_references
# cxx_sizeof_member
# cxx_static_assert
# cxx_strong_enums
# cxx_thread_local
# cxx_trailing_return_types
# cxx_unicode_literals
# cxx_uniform_initialization
# cxx_unrestricted_unions
# cxx_user_literals
# cxx_variadic_macros
# cxx_variadic_templates
# cxx_std_14
# cxx_aggregate_default_initializers
# cxx_attribute_deprecated
# cxx_binary_literals
# cxx_contextual_conversions
# cxx_decltype_auto
# cxx_digit_separators
# cxx_generic_lambdas
# cxx_lambda_init_captures
# cxx_relaxed_constexpr
# cxx_return_type_deduction
# cxx_variable_templates
# cxx_std_17
| true |
9b431499b9e812a7b237fcd039cc23faf9bfeaad | Shell | guozhongluo/netron | /tools/mxnet | UTF-8 | 842 | 3.640625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
root=$(cd $(dirname ${0})/..; pwd)
src=${root}/src
tools=${root}/tools
third_party=${root}/third_party
python=${python:-python}
pip=${pip:-pip}
identifier=mxnet
git_sync() {
mkdir -p "${third_party}"
if [ -d "${third_party}/${1}" ]; then
git -C "${third_party}/${1}" fetch -p --quiet
git -C "${third_party}/${1}" reset --quiet --hard origin/master
else
echo "Clone ${2}..."
git -C "${third_party}" clone --recursive ${2} ${1}
fi
git submodule update --init
}
sync() {
git_sync mxnet https://github.com/apache/incubator-mxnet.git
}
metadata() {
echo "Update 'mxnet-script.json'"
# ${python} mxnet-script.py
}
while [ "$#" != 0 ]; do
command="$1" && shift
case "${command}" in
"sync") sync;;
"metadata") metadata;;
esac
done
| true |
887e64f8d027839972b785d0ff42399002e47bad | Shell | mistio/vpn-proxy | /scripts/install.sh | UTF-8 | 1,982 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/.. && pwd )"
if [ -z "$WEB_SOCKET" ]; then
echo "Please enter IP or IP:PORT (private) for the webserver:"
read WEB_SOCKET
echo
fi
if [ -z "$VPN_IP" ]; then
echo "Please enter public IP address for the VPN server:"
read VPN_IP
echo
fi
if [ -z "$SOURCE_CIDRS" ]; then
echo "Please enter the CIDR(s) of the source host(s) (separated by space):"
read SOURCE_CIDRS
echo
fi
if [ -z "$IN_IFACE" ]; then
echo "Please enter the lan interface of the server:"
read IN_IFACE
echo
fi
echo "Installing vpn-proxy from $DIR."
echo "Webserver will be listening to $WEB_SOCKET."
echo "VPN server will be listening to $VPN_IP."
echo "VPN server will be forwarding requests originating from $SOURCE_CIDRS."
echo
set -ex
apt-get update -q
apt-get install -yq --no-install-recommends \
python python-pip openvpn uwsgi uwsgi-plugin-python \
python-dev build-essential
pip install -U pip
pip install -r $DIR/requirements.txt
echo "VPN_SERVER_REMOTE_ADDRESS = \"$VPN_IP\"" > $DIR/vpn-proxy/conf.d/0000-vpn-ip.py
SOURCE_CIDRS=`echo "$SOURCE_CIDRS" | sed 's/ /", "/g'`
echo "SOURCE_CIDRS = [\"$SOURCE_CIDRS\"]" > $DIR/vpn-proxy/conf.d/0001-src-cidrs.py
echo "IN_IFACE = \"$IN_IFACE\"" > $DIR/vpn-proxy/conf.d/0002-lan-iface.py
$DIR/vpn-proxy/manage.py migrate
$DIR/vpn-proxy/manage.py autosuperuser
mkdir -p $DIR/tmp
cat > /etc/uwsgi/apps-available/vpn-proxy.ini << EOF
[uwsgi]
chdir = $DIR/vpn-proxy
module = project.wsgi
http = $WEB_SOCKET
processes = 4
master = true
vacuum = true
uid = root
gid = root
EOF
cat /etc/uwsgi/apps-available/vpn-proxy.ini
ln -sf /etc/uwsgi/apps-available/vpn-proxy.ini /etc/uwsgi/apps-enabled/
systemctl restart uwsgi
systemctl status uwsgi
sed -i 's/^#\?\s*net.ipv4.ip_forward\s*=\s*.*$/net.ipv4.ip_forward=1/' /etc/sysctl.conf
grep '^net.ipv4.ip_forward=1$' /etc/sysctl.conf || \
echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.conf
sysctl -p
| true |
6ea20e7382bf5634d97792869c9e78a31a3acd5e | Shell | c00kiemon5ter/scripts | /dmn | UTF-8 | 439 | 2.875 | 3 | [] | no_license | #!/usr/bin/env bash
#dmenu_run -i -nb 'black' -nf 'white' -sb 'orange' -sf 'black'
font="-*-terminus-*-*-*-*-*-*-*-*-*-*-*-*"
term=urxvtc
normbgcolor="black"
normfgcolor="white"
selbgcolor="orange"
selfgcolor="black"
cmd=$(dmenu_path | dmenu -i -b -fn $font -nb $normbgcolor -nf $normfgcolor -sb $selbgcolor -sf $selfgcolor)
case $cmd in
ncmpcpp|htop|vim) exec $term -name $cmd -e $cmd ;;
*) exec $cmd
esac
| true |
c3109291f4b013cb6d329d82e67b14941a035d1c | Shell | hurtsky/ramdisk | /sbin/ext/hurtsky.sh | UTF-8 | 2,701 | 3.046875 | 3 | [] | no_license | #!/system/bin/sh
#
# Kernel customizations post initialization
# Created by Christopher83 all credit goes to him
# Adapted for Hurtsky's Kernel usage
# Mount system for read and write
echo "Mount system for read and write"
mount -o rw,remount /system
echo
#
# Fast Random Generator (frandom) support at boot
#
if [ -f "/lib/modules/frandom.ko" ]; then
# Load frandom module if not built inside the zImage
echo "Fast Random Generator (frandom): Loading module..."
insmod /lib/modules/frandom.ko
fi
if [ -c "/dev/frandom" ]; then
# Redirect random and urandom generation to frandom char device
echo "Fast Random Generator (frandom): Initializing..."
rm -f /dev/random
rm -f /dev/urandom
ln /dev/frandom /dev/random
ln /dev/frandom /dev/urandom
chmod 0666 /dev/random
chmod 0666 /dev/urandom
echo "Fast Random Generator (frandom): Ready!"
echo
else
echo "Fast Random Generator (frandom): Not supported!"
echo
fi
#
# VM tweaks
#
if [ -d /proc/sys/vm ]; then
echo "Setting VM tweaks..."
echo "50" > /proc/sys/vm/dirty_ratio
echo "10" > /proc/sys/vm/dirty_background_ratio
echo "90" > /proc/sys/vm/vfs_cache_pressure
echo "500" > /proc/sys/vm/dirty_expire_centisecs
if [ -f /proc/sys/vm/dynamic_dirty_writeback ]; then
echo "3000" > /proc/sys/vm/dirty_writeback_active_centisecs
echo "1000" > /proc/sys/vm/dirty_writeback_suspend_centisecs
else
echo "1000" > /proc/sys/vm/dirty_writeback_centisecs
fi
echo
fi
#
# Processes to be preserved from killing
#
if [ -f /sys/module/lowmemorykiller/parameters/donotkill_proc ]; then
echo "Setting user processes to be preserved from killing..."
echo 1 > /sys/module/lowmemorykiller/parameters/donotkill_proc
echo "com.cyanogenmod.trebuchet,android.inputmethod.latin," > /sys/module/lowmemorykiller/parameters/donotkill_proc_names
fi
if [ -f /sys/module/lowmemorykiller/parameters/donotkill_sysproc ]; then
echo "Setting system processes to be preserved from killing..."
echo 1 > /sys/module/lowmemorykiller/parameters/donotkill_sysproc
echo "android.process.acore,com.android.phone," > /sys/module/lowmemorykiller/parameters/donotkill_sysproc_names
fi
# Touchscreen
# Configure touchscreen sensitivity
# More-Sensitive
# Taken from Project Lard - http://forum.xda-developers.com/showthread.php?p=41073252
if [ -f /sys/class/touch/switch/set_touchscreen ]; then
echo 7025 > /sys/class/touch/switch/set_touchscreen;
echo 8002 > /sys/class/touch/switch/set_touchscreen;
echo 11001 > /sys/class/touch/switch/set_touchscreen;
echo 13030 > /sys/class/touch/switch/set_touchscreen;
echo 14005 > /sys/class/touch/switch/set_touchscreen;
fi
# Mount system read-only
echo "Mount system read-only"
mount -o ro,remount /system
| true |
34fedd0ea1f01d666a1f5d65e46af4ad0c461bad | Shell | traveltek-jmacintyre/dots | /traveltek/bash/bashrc | UTF-8 | 3,997 | 3.484375 | 3 | [] | no_license | # ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# ===== Settings =====
if [ -t 1 ]; then
[[ -f ~/.bash_aliases ]] && . ~/.bash_aliases
[[ -f ~/.bash_funcs ]] && . ~/.bash_funcs
[[ -f ~/.sensible.bash ]] && . ~/.sensible.bash
shopt -s histverify # Expand history before executing when using bangs
shopt -s autocd
shopt -s cdspell
shopt -s direxpand dirspell
bind '"\t":menu-complete'
bind '"\e[Z":menu-complete-backward'
bind 'set mark-symlinked-directories on'
bind '"\C-h": backward-kill-word'
bind '"\e[3;5~": kill-word'
complete -d cd
fi
# ===== Colors =====
if [[ -x /usr/bin/dircolors ]]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# ===== Exports =====
# Coloured prompt
COLOR_RED="\033[0;31m"
COLOR_YELLOW="\033[0;33m"
COLOR_GREEN="\033[0;32m"
COLOR_OCHRE="\033[38;5;95m"
COLOR_BLUE="\033[0;34m"
COLOR_WHITE="\033[0;37m"
COLOR_RESET="\033[0m"
function git_color {
git_status="$(git status 2> /dev/null)"
[[ $? > 0 ]] && exit 0
if [[ ! $git_status =~ "working tree clean" ]]; then
echo -e $COLOR_YELLOW
elif [[ $git_status =~ "Your branch is ahead of" ]]; then
echo -e $COLOR_RED
elif [[ $git_status =~ "nothing to commit" ]]; then
echo -e $COLOR_GREEN
else
echo -e $COLOR_OCHRE
fi
}
function git_branch {
local git_status="$(git status 2> /dev/null)"
local on_branch="On branch ([^${IFS}]*)"
local on_commit="HEAD detached at ([^${IFS}]*)"
if [[ $git_status =~ $on_branch ]]; then
local branch=${BASH_REMATCH[1]}
echo "($branch) "
elif [[ $git_status =~ $on_commit ]]; then
local commit=${BASH_REMATCH[1]}
echo "($commit) "
fi
}
standard_prompt='\[\033[38;5;14m\][\[$(tput bold)\]\[$(tput sgr0)\]\[\033[38;5;11m\]\u@\h\[$(tput sgr0)\]\[$(tput sgr0)\]\[\033[38;5;15m\] \[$(tput sgr0)\]\[\033[38;5;10m\]\w\[$(tput sgr0)\]\[\033[38;5;14m\]]\[$(tput sgr0)\]\[\033[38;5;15m\] \[$(tput sgr0)\]'
color=git_color
branch=git_branch
export PROMPT_COMMAND='color=$(git_color); branch=$(git_branch); export PS1="${standard_prompt}\[${color}\]${branch}\[$COLOR_RESET\]\$ "; unset color; unset branch'
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# PATH
export PATH="$PATH:$HOME/bin"
| true |
1240a508ba300b78e8f2b346fda1bc7403413958 | Shell | DistributedSystemResearch/VStore | /VStore-NoScope/vstore/src/backup-repo.sh | UTF-8 | 387 | 3.40625 | 3 | [] | no_license | #!/bin/bash
# to be execute *locally* on remote or local
# stop on error
set -e
# no trailing /. will add later
SRC=/home/xzl/video-streamer
DEST=/home/xzl/video-streamer-`date +"%m%d%y-%H_%M_%S"`
mkdir ${DEST}
rsync -a -vv --exclude='*.bin' --exclude='*.o' ${SRC}/ ${DEST}; \
touch ${DEST}/BACKUP; \
mv ${DEST} ${DEST}-complete;
echo "[[Finished]]: backed up ${SRC} to ${DEST}"
| true |
1adf2c5d5ca01339a631682e9c07a41e5128876b | Shell | prof-alazhary/DBMS_by_bash | /insertTable.sh | UTF-8 | 234 | 2.890625 | 3 | [] | no_license | #!/bin/bash
insertData() {
awk 'BEGIN{FS=":"; max=0} {if ($1>max) max=$1} END{id=$max+1; print ""id":'$1'" >>"'$2'"}' $2
}
# read -p "Enter your data ti insert like this name:age "
# echo $REPLY
# insertData $REPLY ./mydb1/table1
| true |
b756dfd9612d0be87192a56134e406cdf3e23c61 | Shell | rfabbri/vpe | /scripts/devsetup/bootstrap | UTF-8 | 332 | 3.203125 | 3 | [] | no_license | #!/bin/sh
# script/bootstrap: Resolve all dependencies that the application requires to
# run.
set -e
cd "$(dirname "$0")/.."
#if [[ "`uname`" != Linux ]]; then
# MYOS="OSX"
# Install macports or homebrew packages
#else
# MYOS="Linux"
# Attempt to install Ubuntu packages or other distro packages
#fi
| true |
2b273df4b26200db33ad62d5a834ecc9fd4b20d2 | Shell | sdjaindeqode/myfirstrepo | /siddhant.sh | UTF-8 | 542 | 2.875 | 3 | [] | no_license | mkdir ~/sample
cd ~/sample/
echo "Hi! This is just a sample text file created using a shell script." > sample.txt
cat sample.txt
grep -o t sample.txt | wc -l
chmod u=rwx sample.txt
echo "Hi! This is just another sample text added to the file." >> sample.txt
chmod g=r sample.txt
chmod a-rwx sample.txt
chmod u=rwx sample.txt
cat sample.txt>sample2.txt
seq 1 1000 >> sample.txt
cat sample.txt | head -50
cat sample.txt | tail -50
touch prog1.txt prog2.txt program.txt code.txt info.txt
ls | grep prog
alias listprog="ls | grep prog"
listprog
| true |
c07102283441a42623be430fec99746ff9ed803c | Shell | pointim/point | /backup/redis.sh | UTF-8 | 191 | 3.015625 | 3 | [] | no_license | #!/bin/bash
redis_dir="../data/redis"
for s in $(ls -1 $redis_dir); do
echo $s SAVE
docker exec -it "point_redis-${s}_1" redis-cli 'SAVE'
done
tar cjf "$2/redis-$1.tar.bz2" $redis_dir
| true |
39db1714929d9fe5365bf01377ce1a1b0437cc86 | Shell | bowanggithub/shell | /prg37.sh | UTF-8 | 440 | 3.140625 | 3 | [] | no_license | clear
echo "enter five scores"
read m1
read m2
read m3
read m4
read m5
sum=`echo $m1+$m2+$m3+$m4+$m5|bc`
per=`echo $sum/5|bc`
echo "per is $per"
if [ $per -ge 60 ]
then
echo "first division"
else
if [ $per -lt 60 ] && [ $per -ge 50 ]
then
echo "second division"
else
if [ $per -lt 50 ] && [ $per -ge 40 ]
then
echo "third division"
else
if [ $per -lt 40 ]
then
echo "fail"
fi
fi
fi
fi
| true |
090abd971d6aadfbde4b4ae061acce1782cedf9b | Shell | pvcastro/ner-re-pt | /tools/nltk/run-scripts/output-gold-mini.sh | UTF-8 | 367 | 2.9375 | 3 | [] | no_license | #!/bin/bash
declare -a levels=("cat" "filtered")
printf "\n*** output gold ***\n"
printf "\n** mini **\n"
for level in "${levels[@]}"
do
printf "\n** level "$level" **\n"
FOLDER=../outputs
OUT=../outputs
~/Desenvolvimento/anaconda2/bin/python "../src/out-conll-gold.py" $FOLDER/t-$level-mini-doc.txt $OUT/out-$level-mini-gold.txt 'ISO-8859-1'
done
| true |
f1ab2b0b8ee71c1664a9f50b994a2bfb1332d764 | Shell | SKA-ScienceDataProcessor/IDOS | /test/OSKAR_CASA/daliuge/oskar_casa_daliuge.sh | UTF-8 | 585 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
APP_ROOT="/BIGDATA1/ac_shao_tan_1/OSKAR/daliuge-master/dfms/deploy/pawsey"
SID=$(date +"ska1_low_N"$1_"%Y-%m-%dT%H-%M-%S")
LOG_DIR=$APP_ROOT"/logs/"$SID
mkdir -p $LOG_DIR # to remove potential directory creation conflicts later
GRAPH_DIR="/BIGDATA1/ac_shao_tan_1/OSKAR/IDOS/test/OSKAR_CASA/daliuge/lg/oskar_casa_img.json"
CLUSTER="Tianhe2"
rm -rf /BIGDATA1/ac_shao_tan_1/OSKAR/IDOS/test/OSKAR_CASA/daliuge/config/*
yhrun -n $1 -N $1 -p gpu /BIGDATA1/ac_shao_tan_1/OSKAR/python/bin/python $APP_ROOT"/start_dfms_cluster.py" -l $LOG_DIR -L $GRAPH_DIR -d -c $CLUSTER -v 3
| true |
950562d576a715c97429a0be24bb7982f746e57e | Shell | malradhi/merlin | /misc/scripts/vocoder/continuous/02_analysis.sh | UTF-8 | 1,585 | 3.265625 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | #!/bin/bash
#########################################
######## Continuous vocoder ###########
#########################################
echo ""
echo "Step 2: Analysis by Continuous vocoder"
echo ""
# tools directory
current_working_dir=$(pwd)
wav_dir=${current_working_dir}/example/wav/
lf0_dir=${current_working_dir}/example/analysis/lf0/
mvf_dir=${current_working_dir}/example/analysis/mvf/
sp_dir=${current_working_dir}/example/analysis/sp/
mgc_dir=${current_working_dir}/example/analysis/mgc/
mkdir -p ${lf0_dir}
mkdir -p ${mvf_dir}
mkdir -p ${sp_dir}
mkdir -p ${mgc_dir}
# sampling frequency
fs=16000
# these numbers are valid only for fs=16 kHz
nFFTHalf=1024
alpha=0.58
#bap order depends on sampling freq.
mcsize=59
echo "extract continuous parameters: lf0, MVF, MGC"
echo "take only a few seconds per wave file..."
echo ""
python3 cont_features_extraction.py ${wav_dir} ${lf0_dir} ${mvf_dir} ${mgc_dir}
for file in ${wav_dir}/*.wav
do
filename="${file##*/}"
file_id="${filename%.*}"
### extract log spectrum (sp) ###
$current_working_dir/spec_env ${wav_dir}/$file_id.wav 0 ${sp_dir}/$file_id.sp 0
### convert log spectrum (sp) to mel-generalized cepstrum (mgc)
sptk x2x +df ${sp_dir}/$file_id.sp | sptk sopr -R -m 32768.0 | sptk mcep -a $alpha -m $mcsize -l $nFFTHalf -e 1.0E-8 -j 0 -f 0.0 -q 3 > ${mgc_dir}/$file_id.mgcep
done
# echo "deleting intermediate analysis files..."
rm -rf $sp_dir
rm -rf 0
echo ""
echo "the analysis part is done! ... parameters are in: ${current_working_dir}/example/analysis"
echo ""
| true |
af0af8b85861584bba28f5a3507baf55990655c5 | Shell | zaralger/foo | /changeExtension.sh | UTF-8 | 255 | 3.328125 | 3 | [] | no_license | #!/bin/bash
EXTENSION_SOURCE="cbr"
EXTENSION_DEST="zip"
FOLDER="$1"
echo $FOLDER"coucou."$EXTENSION_SOURCE
for file in "$FOLDER"*".$EXTENSION_SOURCE"
do
echo $file
mv $file `echo $file | sed "s%\(.*\.\)$EXTENSION_SOURCE%\1$EXTENSION_DEST%"`
done
| true |
babf869540d53ddb0fff9551004c58247e98a010 | Shell | myidpt/istio-tasks | /traffic_management/ingress/secure_gateways_sds/deploy.sh | UTF-8 | 6,379 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# Task info: https://istio.io/docs/tasks/traffic-management/ingress/secure-ingress-sds/
VER=1.4.0
PATH=$PATH:$PWD/istio-$VER/bin
install_controlplane() {
curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.4.0 sh -
pushd istio-$VER
istioctl manifest apply
kubectl get pod -n istio-system
popd
}
get_ingressgateway() {
kubectl get svc istio-ingressgateway -n istio-system
export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
echo Ingress host: $INGRESS_HOST
echo Ingress port: $INGRESS_PORT
echo Secure Ingress port: $SECURE_INGRESS_PORT
while [ -z "$INGRESS_HOST" ]
do
sleep 10
export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
echo Ingress host: $INGRESS_HOST
echo Ingress port: $INGRESS_PORT
echo Secure Ingress port: $SECURE_INGRESS_PORT
done
}
generate_certs() {
git clone https://github.com/nicholasjackson/mtls-go-example
pushd mtls-go-example
./generate.sh httpbin.example.com 123456
mkdir ../httpbin.example.com && mv 1_root 2_intermediate 3_application 4_client ../httpbin.example.com
popd
}
configure_sds_gateway() {
istioctl manifest generate \
--set values.gateways.istio-egressgateway.enabled=false \
--set values.gateways.istio-ingressgateway.sds.enabled=true > \
istio-ingressgateway.yaml
kubectl apply -f istio-ingressgateway.yaml
kubectl get pod -n istio-system
}
setup_httpbin() {
kubectl apply -f httpbin_example.yaml
kubectl create -n istio-system secret generic httpbin-credential \
--from-file=key=httpbin.example.com/3_application/private/httpbin.example.com.key.pem \
--from-file=cert=httpbin.example.com/3_application/certs/httpbin.example.com.cert.pem
kubectl apply -f gateway_and_vs.yaml
kubectl get pod
}
verify_httpbin() {
curl -v -HHost:httpbin.example.com \
--resolve httpbin.example.com:$SECURE_INGRESS_PORT:$INGRESS_HOST \
--cacert httpbin.example.com/2_intermediate/certs/ca-chain.cert.pem \
https://httpbin.example.com:$SECURE_INGRESS_PORT/status/418
}
setup_httpbin_mtls() {
kubectl apply -f httpbin_example.yaml
kubectl -n istio-system delete secret httpbin-credential
kubectl create -n istio-system secret generic httpbin-credential \
--from-file=key=httpbin.example.com/3_application/private/httpbin.example.com.key.pem \
--from-file=cert=httpbin.example.com/3_application/certs/httpbin.example.com.cert.pem \
--from-file=cacert=httpbin.example.com/2_intermediate/certs/ca-chain.cert.pem
kubectl apply -f gateway_and_vs_mtls.yaml
kubectl get pod
}
setup_httpbin_mtls_with_hash() {
kubectl apply -f httpbin_example.yaml
kubectl -n istio-system delete secret httpbin-credential
kubectl create -n istio-system secret generic httpbin-credential \
--from-file=key=httpbin.example.com/3_application/private/httpbin.example.com.key.pem \
--from-file=cert=httpbin.example.com/3_application/certs/httpbin.example.com.cert.pem \
--from-file=cacert=httpbin.example.com/2_intermediate/certs/ca-chain.cert.pem
kubectl apply -f gateway_and_vs_mtls_hash.yaml
kubectl get pod
kubectl get gateway mygateway -o yaml
}
verify_httpbin_mtls() {
curl -v -HHost:httpbin.example.com \
--resolve httpbin.example.com:$SECURE_INGRESS_PORT:$INGRESS_HOST \
--cacert httpbin.example.com/2_intermediate/certs/ca-chain.cert.pem \
--cert httpbin.example.com/4_client/certs/httpbin.example.com.cert.pem \
--key httpbin.example.com/4_client/private/httpbin.example.com.key.pem \
https://httpbin.example.com:$SECURE_INGRESS_PORT/status/418
}
clean_up() {
kubectl delete mutatingwebhookconfiguration --all
kubectl delete validatingwebhookconfiguration --all
kubectl delete psp --all
kubectl delete daemonset --all
kubectl delete deploy --all
kubectl delete configmap --all
kubectl delete service --all
kubectl delete ingress --all
kubectl delete namespace --all
kubectl delete rule --all
kubectl delete denier --all
kubectl delete checknothing --all
kubectl delete serviceaccount --all
kubectl delete secret --all
kubectl delete EgressRules --all
kubectl delete MeshPolicy --all
kubectl delete serviceentry --all
kubectl delete virtualservice --all
kubectl delete gateway --all
kubectl delete destinationrule --all
kubectl delete poddisruptionbudgets --all
}
install() {
clean_up
sleep 60
install_controlplane
sleep 60
install_httpbin
generate_certs
configure_sds_gateway
}
####### Normal TLS #########
setup_tls_ingressgateway() {
echo "Test TLS Ingress Gateway"
install
sleep 60
get_ingressgateway
setup_httpbin
sleep 45
verify_httpbin
}
####### Mutual TLS #########
setup_mtls_ingressgateway() {
echo "Test MTLS Ingress Gateway"
install
sleep 60
get_ingressgateway
setup_httpbin_mtls
sleep 45
echo
echo "#################### SHOULD FAIL ######################"
verify_httpbin # Will fail
echo
echo "#################### SHOULD SUCCEED ######################"
verify_httpbin_mtls
}
####### Mutual TLS with Hash verification #########
# This has issue now.
setup_mtls_with_hash_ingressgateway() {
echo "Test TLS Ingress Gateway with hash verification"
install
sleep 60
get_ingressgateway
setup_httpbin_mtls_with_hash
sleep 45
verify_httpbin_mtls
}
clean_all() {
echo "Clean up the files..."
rm -rf istio-$VER/
rm -rf httpbin.example.com/
rm -rf mtls-go-example/
}
case "$1" in
-t|--tls)
setup_tls_ingressgateway
;;
-m|--mtls)
setup_mtls_ingressgateway
;;
-h|--hash)
setup_mtls_with_hash_ingressgateway
;;
-c|--clean)
clean_all
;;
*)
echo "Programming error"
exit 3
;;
esac
| true |
b4d1ca54e8ef92b5cb15ca392a784aea3b4bdf97 | Shell | branty/openstack | /multi_region/auto-config/build_trust.sh | UTF-8 | 578 | 2.546875 | 3 | [] | no_license | #!/usr/bin/expect -f
# Copyright © 2014 EasyStack, Inc
# Author: Branty <jun.wang@easystack.cn>
# user,default is root
set user root
# hostname
set host 10.20.0.3
# password
set password passw0rd
set timeout 100
spawn scp /root/.ssh/id_rsa.pub $user@$host:/root/
expect {
"*)?*" {send "yes\r";exp_continue}
"*assword:*" {send "$password\r";exp_continue}
}
spawn ssh $user@$host "cat /root/id_rsa.pub >> /root/.ssh/authorized_keys"
#spawn ssh $user@$host "ls -al"
expect {
"*)?*" {send "yes\r";exp_continue}
"*assword:*" {send "$password\r";exp_continue}
}
| true |
7ebeef200c1d55231a88f70eea1902d878c71fec | Shell | victorkp/tomcat-tools | /install-server.sh | UTF-8 | 1,716 | 3.640625 | 4 | [] | no_license | #!/bin/bash
echo Which Linux distribution are you using? Enter 1, 2, or 3
select di in "Fedora" "Ubuntu" "Cancel"; do
case $di in
"Fedora" ) sudo yum install mysql-server tomcat tomcat-webapps tomcat-admin-webapps;
sudo systemctl start mysqld.service;
sudo systemctl enable tomcat.service;
sudo systemctl start tomcat.service;
echo;
echo;
echo "==STARTING-THE-SERVER==";
echo "If you ever shutdown and restart your computer,";
echo "you will need to start MySQL and Tomcat again, use the following:";
echo "sudo systemctl start tomcat.service";
echo "sudo systemctl start mysqld.service";
echo "-----------------------";
echo "";
break;;
"Ubuntu" ) sudo apt-get install mysql-server && sudo apt-get install mysql-client;
sudo apt-get install tomcat7 && sudo apt-get install tomcat7-admin;
sudo /etc/init.d/tomcat7 start;
sudo /etc/init.d/mysql start;
echo;
echo;
echo "==STARTING-THE-SERVER==";
echo "Tomcat and MySQL Server are now installed!";
echo "If you ever shutdown and restart your computer,";
echo "you will need to start MySQL and Tomcat again, use the following:";
echo "sudo /etc/init.d/tomcat7 start";
echo "sudo /etc/init.d/mysqld start";
echo "-----------------------";
echo "";
break;;
"Cancel" ) exit;;
esac
done
echo ""
echo "======MYSQL-SETUP======"
echo Note that you will need to configure MySQL priviledges
echo "\"GRANT PRIVILEGES.....\""
echo "-----------------------"
echo ""
echo ""
echo "===DEPLOYING--CODE====="
echo "You can deploy code through Tomcat manager"
echo "by navigating to http://localhost:8080"
echo "or by using deploy.sh (through ssh)
echo "-----------------------"
echo ""
echo ""
| true |
1be4a22a995332ecfcb601627277995e79d9f246 | Shell | cms-sw/cmssw | /Utilities/RelMon/scripts/compare_two_releases.sh | UTF-8 | 5,547 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/bash
# commands to run a comparison
RELEASE1=$1
RELEASE2=$2
echo About to compare $RELEASE1 and $RELEASE2
#-------------------------------------------------------------------------------
# Set Some useful variables ----------------------------------------------------
echo "Set Some useful variables..."
# The output directory name
COMPDIR="$RELEASE1"VS"$RELEASE2"
# The base directory on AFS
RELMONAFSBASE=/afs/cern.ch/cms/offline/dqm/ReleaseMonitoring
RELMONAFS="$RELMONAFSBASE"/"$COMPDIR"
# The base directory on Castor
RELMONCASTOR=/castor/cern.ch/user/d/dpiparo/TestRelMonOnCastor/"$COMPDIR"
# The number of simultaneous processes
NPROCESSES=6
# Fetch Files and Organise them ------------------------------------------------
# Full and FastSim: Get them from the GUI
echo "Fetching MC datasets..."
fetchall_from_DQM.py $RELEASE1 -mc --p2 "START" 2>&1 |tee step_1_fetch_MC_rel1.log
fetchall_from_DQM.py $RELEASE2 -mc --p2 "START" 2>&1 |tee step_1_fetch_MC_rel2.log
# Make directories and copy into them
mkdir FastSim
mv *FastSim*root FastSim
mkdir FullSim
mv *root FullSim
# Arrange files for a FullSimFastSim comparison
# create and enter the directory
FULLSIMFASTISMDIR=FullSimFastSim;
mkdir $FULLSIMFASTISMDIR;
cd $FULLSIMFASTISMDIR;
# link all fastsim files
for FASTSIMFILE in `ls ../FastSim|grep "$RELEASE1"`; do
ln -s ../FastSim/"$FASTSIMFILE" .;
done
# Link only those files that correspond to the FSim ones
# Isolate the dataset name
for DSET in `ls ../FastSim|sed 's/__/ /g'| cut -f2 -d " "`;do
# The datasets can be more than one: e.g. pt10 or pt100
FULLSIMFILES=`echo ../FullSim/*"$DSET"*"$RELEASE1"*`;
# therefore loop on them
for FULLSIMFILE in `echo $FULLSIMFILES`; do
if [ -f $FULLSIMFILE ]; then
ln -s $FULLSIMFILE .;
fi;
done; # end loop on fullsim datasets files matching the particular fastsim dataset
done; # end loop on datasets
get out of the dir
cd -
# Data: Get them from the GUI
echo "Fetching Data datasets..."
fetchall_from_DQM.py $RELEASE1 -data 2>&1 |tee step_2_fetch_DATA_rel1.log
fetchall_from_DQM.py $RELEASE2 -data 2>&1 |tee step_2_fetch_DATA_rel2.log
# Make directories and copy into them
mkdir Data
mv *root Data
# Creating dir on AFS -----------------------------------------------------------
echo "Creating directory on AFS"
mkdir $RELMONAFS
# Run the Comparisons, make the reports and copy them----------------------------
echo "Creating Reports"
echo " @@@ FastSim"
ValidationMatrix.py -a FastSim -o FastSimReport -N $NPROCESSES 2>&1 |tee step_3_reports_FastSim.log
echo "Compressing report for web"
dir2webdir.py FastSimReport 2>&1 |tee step_4_compress_FastSim.log
echo "Copying report on the web"
cp -r FastSimReport $RELMONAFS
echo " @@@ FastSim HLT"
ValidationMatrix.py -a FastSim -o FastSimReport_HLT -N $NPROCESSES --HLT 2>&1 |tee step_3_reports_FastSim_HLT.log
echo "Compressing report for web"
dir2webdir.py FastSimReport_HLT 2>&1 |tee step_4_compress_FastSim_HLT.log
echo "Copying report on the web"
cp -r FastSimReport_HLT $RELMONAFS
echo " @@@ FullSim"
ValidationMatrix.py -a FullSim -o FullSimReport -N $NPROCESSES 2>&1 |tee step_3_reports_FullSim.log
echo "Compressing report for web"
dir2webdir.py FullSimReport 2>&1 |tee step_4_compress_FullSim.log
echo "Copying report on the web"
cp -r FullSimReport $RELMONAFS
echo " @@@ FullSim_HLT"
ValidationMatrix.py -a FullSim -o FullSimReport_HLT -N $NPROCESSES --HLT 2>&1 |tee step_3_reports_FullSim_HLT.log
echo "Compressing report for web"
dir2webdir.py FullSimReport_HLT 2>&1 |tee step_4_compress_FullSim_HLT.log
echo "Copying report on the web"
cp -r FullSimReport_HLT $RELMONAFS
echo " @@@ FullSimFastSim"
FULLSIMFASTSIMREPORTDIR="$RELEASE1"_FullSimFastSimReport
ValidationMatrix.py -a $FULLSIMFASTISMDIR -o $FULLSIMFASTSIMREPORTDIR -N $NPROCESSES 2>&1 |tee step_3_reports_FullSimFastSim.log
echo "Compressing report for web"
dir2webdir.py $FULLSIMFASTSIMREPORTDIR 2>&1 |tee step_4_compress_FullSimFastSim.log
echo "Copying report on the web"
cp -r $FULLSIMFASTSIMREPORTDIR $RELMONAFSBASE
echo " @@@ FullSimFastSim"
FULLSIMFASTSIMREPORTDIR_HLT="$RELEASE1"_FullSimFastSimReport_HLT
ValidationMatrix.py -a $FULLSIMFASTISMDIR -o $FULLSIMFASTSIMREPORTDIR_HLT -N $NPROCESSES --HLT 2>&1 |tee step_3_reports_FullSimFastSim.log
echo "Compressing report for web"
dir2webdir.py $FULLSIMFASTSIMREPORTDIR_HLT 2>&1 |tee step_4_compress_FullSimFastSim_HLT.log
echo "Copying report on the web"
cp -r $FULLSIMFASTSIMREPORTDIR_HLT $RELMONAFSBASE
echo " @@@ FullSimFastSim_HLT"
export FULLSIMFASTSIMREPORTDIR_HLT="$RELEASE1"_FullSimFastSimReport_HLT
ValidationMatrix.py -a $FULLSIMFASTSIMDIR -o $FULLSIMFASTSIMREPORTDIR_HLT -N $NPROCESSES --HLT 2>&1 |tee step_3_reports_FullSimFastSim_HLT.log
echo "Compressing report for web"
dir2webdir.py $FULLSIMFASTSIMREPORTDIR_HLT 2>&1 |tee step_4_compress_FullSimFastSim_HLT.log
echo "Copying report on the web"
cp -r $FULLSIMFASTSIMREPORTDIR_HLT $RELMONAFSBASE
echo " @@@ Data"
ValidationMatrix.py -a Data -o DataReport -N $NPROCESSES 2>&1 |tee step_3_reports_Data.log
echo "Compressing report for web"
dir2webdir.py DataReport 2>&1 |tee step_4_compress_Data.log
echo "Copying report on the web"
cp -r DataReport $RELMONAFS
# copy everything on castor ----------------------------------------------------
echo "Backup of the material"
BACKUPDIR="$COMPDIR"Reports_HLT
mkdir $BACKUPDIR
mv *Report* $BACKUPDIR
tar -cvf - $BACKUPDIR | gzip > "$BACKUPDIR".tar.gz
/usr/bin/rfmkdir $RELMONCASTOR
/usr/bin/rfcp "$BACKUPDIR".tar.gz $RELMONCASTOR
| true |
f5a675b3096b120fc59d2eaed6a4c65cd6146168 | Shell | PugnaBogdan/Operating_Systems | /p8.sh | UTF-8 | 280 | 3.109375 | 3 | [] | no_license | #!/bin/bash
df | tail -n +2 | while read -r volume; do
usage=$(echo $volume | awk '{print $5}' | sed 'y/%/ /')
capacity=$(echo $volume | awk '{print $2}')
capacity=$(($capacity/1000000))
if [ $usage -le 10 -o $capacity -le 1 ]; then
echo $volume | awk '{print $1}'
fi
done
| true |
f878b137370d75dbf22f0e36ac68a167d7292893 | Shell | AfricasVoices/Project-WUSC-KEEP | /run_scripts/03_04_clean_messages.sh | UTF-8 | 884 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
if [ $# -ne 2 ]; then
echo "Usage: sh 03_04_clean_messages.sh <user> <data-root>"
echo "Cleans radio show answers, and exports to CSVs for analysis."
exit
fi
USER=$1
DATA_DIR=$2
ORG="AVF - KEEP II"
cd ../messages_pipeline
mkdir -p "$DATA_DIR/03 Clean Messages"
mkdir -p "$DATA_DIR/04 Message CSVs"
SHOWS=(
"01 ELIMU"
"02 AKAI"
"03 RITA"
"04 AKIRU"
"05 JOHN"
"06 EBEI"
"07 MARY"
"08 AROP"
"09 GIRL"
"10 SHULE"
)
for SHOW in "${SHOWS[@]}"
do
echo "sh docker-run.sh" "$USER" "$DATA_DIR/02 Raw Messages Concatenated/$SHOW.json" \
"$DATA_DIR/03 Clean Messages/$SHOW.json" "$DATA_DIR/04 Message CSVs/$SHOW.csv"
sh docker-run.sh "$USER" "$DATA_DIR/02 Raw Messages Concatenated/$SHOW.json" \
"$DATA_DIR/03 Clean Messages/$SHOW.json" "$DATA_DIR/04 Message CSVs/$SHOW.csv"
done
| true |
700685802a6456b70e424c8ba993d73d1b9a97a4 | Shell | amacharla/holberton-system_engineering-devops | /0x04-loops_conditions_and_parsing/3-until_holberton_school | UTF-8 | 157 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env bash
# Prints @PRINT @N times using until loop
PRINT="Holberton School"
N=10
until [ "$N" == "0" ]; do
echo "$PRINT"
let "N--"
done
| true |
ef4ae299d63975b87c72e97bcad023d83f2f0c99 | Shell | brentwpeterson/server-utilities | /website/vhost-site.sh | UTF-8 | 1,398 | 4 | 4 | [] | no_license | #!/bin/bash
# This script will install all required data for a development environment.
#Run with SUDO?
function createDatabase ()
{
MYSQL=`which mysql`
Q1="CREATE DATABASE IF NOT EXISTS $1;"
Q2="CREATE USER '$1'@'localhost' IDENTIFIED BY '$2';"
Q3="GRANT USAGE on *.* to '$1'@'localhost' IDENTIFIED BY '$2';"
Q4="GRANT ALL PRIVILEGES ON $1.* TO '$1'@'localhost' IDENTIFIED BY '$2';"
Q5="FLUSH PRIVILEGES;"
SQL="${Q1}${Q2}${Q3}${Q4}${Q5}"
echo $SQL
mysql -uroot -e "$SQL"
$MYSQL -e "$SQL"
echo "Database Created"
}
##
# Create username and set password
##
echo "Enter the username, followed by [ENTER]:"
read user
echo "Enter the password for the account created, followed by [ENTER]:"
read password
echo "Adding user"
useradd $user
echo $password | passwd $user --stdin
##
# Create required directories
##
echo "Creating Needed Directories"
mkdir -p /var/www/vhosts/$user/html
chown -R $user:$user /var/www/vhosts/$user/
#
# Create apache conf
##
echo "Creating Apache Configuration"
sed -e "s;%USER%;$user;" $HOME/server-scripts/amazon/base.conf > '/etc/httpd/vhosts.d/'$user'.conf'
echo "Restarting Apache Gracefully"
service httpd graceful
##
# Creating Database
###
echo "Starting Database creation and DB user"
createDatabase $user $password
echo 'Test Login'
echo "mysql -u"$user "-p'"$password"'" $db > $HOME"/"$user"_"$db".txt"
| true |
9ec1a91f1a6277d4dc371be3d3529b49ee582a0b | Shell | lnyng/cs744_final_project | /distlog-0.5.0/scripts/stop-proxy.sh | UTF-8 | 431 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
LWP_SHARD_ID=1
LWP_SERVICE_PORT=4181
LWP_STATS_PORT=20001
for i in $(seq 1 $1); do
echo "$LWP_SHARD_ID $LWP_SERVICE_PORT $LWP_STATS_PORT"
export WP_SHARD_ID=$LWP_SHARD_ID
export WP_SERVICE_PORT=$LWP_SERVICE_PORT
export WP_STATS_PORT=$LWP_STATS_PORT
./distributedlog-proxy-server/bin/dlog-daemon.sh stop writeproxy
let LWP_SHARD_ID+=3
let LWP_SERVICE_PORT+=1
let LWP_STATS_PORT+=1
done
| true |
cabd7b514ddba13b2951f21b86ab5477db8c4d1f | Shell | dramcar/SocialBeer | /comandos.sh | UTF-8 | 3,865 | 2.875 | 3 | [] | no_license | #! /bin/bash
#HOSTNAME=${HOSTNAME:davidr}
if [ $# -eq 0 ]; then
echo "$0 compila|ejecuta|restaura"
else
if [ "$1" = "compila" ]; then
# Compilar el servicio
javac socialwebservice/SocialBeer.java
# Empaquetar clases compiladas del servicio
jar cvf socialwebservice.jar socialwebservice/*.class
# Copiar el .jar en el directorio webapps
cp socialwebservice.jar ./axis-1_4/webapps/
# Parar el servidor SimpleAxisServer en el directorio axis-1_4/webapps
cd ./axis-1_4/webapps
java -cp ../lib/axis-ant.jar:../lib/commons-logging-1.0.4.jar:../lib/axis.jar:../lib/jaxrpc.jar:../lib/saaj.jar:../lib/commons-discovery-0.2.jar:../lib/log4j-1.2.8.jar:../lib/wsdl4j-1.5.1.jar:../../mail.jar:../../activation.jar org.apache.axis.client.AdminClient -p 8888 quit
# Ejecutar el SimpleAxisServer en el directorio axis-1_4/webapps
java -cp ../lib/axis-ant.jar:../lib/commons-logging-1.0.4.jar:../lib/axis.jar:../lib/jaxrpc.jar:../lib/saaj.jar:../lib/commons-discovery-0.2.jar:../lib/log4j-1.2.8.jar:../lib/wsdl4j-1.5.1.jar:../../mail.jar:../../activation.jar:socialwebservice.jar:./ org.apache.axis.transport.http.SimpleAxisServer -p 8888 1> ../../logServer.txt 2>&1 &
cd ../../
# Desplegar el servidor en el directorio axis-1_4/webapps
cd ./axis-1_4/webapps
cp ../../deploySocial.wsdd ./axis/
java -cp ../lib/axis-ant.jar:../lib/commons-logging-1.0.4.jar:../lib/axis.jar:../lib/jaxrpc.jar:../lib/saaj.jar:../lib/commons-discovery-0.2.jar:../lib/log4j-1.2.8.jar:../lib/wsdl4j-1.5.1.jar:../../mail.jar:../../activation.jar org.apache.axis.client.AdminClient -p 8888 axis/deploySocial.wsdd
# Generar el wsdl de Banco desde el directorio axis-1_4/webapps
java -cp ../lib/axis-ant.jar:../lib/commons-logging-1.0.4.jar:../lib/axis.jar:../lib/jaxrpc.jar:../lib/saaj.jar:../lib/commons-discovery-0.2.jar:../lib/log4j-1.2.8.jar:../lib/wsdl4j-1.5.1.jar:../../mail.jar:../../activation.jar org.apache.axis.wsdl.WSDL2Java "http://localhost:8888/axis/services/SocialBeer?wsdl"
# Copiar directorios desde el directorio axis-1_4/webapps
cp -r localhost/ ../../
cp -r $HOSTNAME/ ../../
cd ../../
# Compila ClienteSocial con clases generadas a partir del wsdl
javac -cp axis-1_4/lib/axis-ant.jar:axis-1_4/lib/commons-logging-1.0.4.jar:axis-1_4/lib/axis.jar:axis-1_4/lib/jaxrpc.jar:axis-1_4/lib/saaj.jar:axis-1_4/lib/commons-discovery-0.2.jar:axis-1_4/lib/log4j-1.2.8.jar:axis-1_4/lib/wsdl4j-1.5.1.jar:mail.jar:activation.jar:./ SocialBeerCliente.java
elif [ "$1" = "ejecuta" ]; then # A SocialBeerCliente se le pasan los argumentos que van tras "ejecuta"
shift
java -cp axis-1_4/lib/axis-ant.jar:axis-1_4/lib/commons-logging-1.0.4.jar:axis-1_4/lib/axis.jar:axis-1_4/lib/jaxrpc.jar:axis-1_4/lib/saaj.jar:axis-1_4/lib/commons-discovery-0.2.jar:axis-1_4/lib/log4j-1.2.8.jar:axis-1_4/lib/wsdl4j-1.5.1.jar:mail.jar:activation.jar:./ SocialBeerCliente $*
elif [ "$1" = "restaura" ]; then
# Parar el servidor SimpleAxisServer en el directorio axis-1_4/webapps
cd ./axis-1_4/webapps
java -cp ../lib/axis-ant.jar:../lib/commons-logging-1.0.4.jar:../lib/axis.jar:../lib/jaxrpc.jar:../lib/saaj.jar:../lib/commons-discovery-0.2.jar:../lib/log4j-1.2.8.jar:../lib/wsdl4j-1.5.1.jar:../../mail.jar:../../activation.jar org.apache.axis.client.AdminClient -p 8888 quit
rm -R ./localhost/
rm -R ./$HOSTNAME/
rm -R ../../localhost/
rm -R ../../$HOSTNAME/
rm ./axis/deploySocial.wsdd
cd ../../
rm *.class
rm socialwebservice.jar
rm ./socialwebservice/*.class
rm ./axis-1_4/webapps/socialwebservice.jar
else
echo "$0 compila|ejecuta|restaura"
fi
fi | true |
e611b3d75945d0bf3d72c06ea91cd7ed23a54214 | Shell | FBruynbroeck/oh-my-zsh.config | /.oh-my-zsh-custom/functions.zsh | UTF-8 | 1,614 | 3.515625 | 4 | [] | no_license | # -------------------------------------------------------------------
# FUNCTIONS
# -------------------------------------------------------------------
# Return my IP address
function myip() {
ifconfig lo0 | grep 'inet ' | sed -e 's/:/ /' | awk '{print "lo0 : " $2}'
ifconfig en0 | grep 'inet ' | sed -e 's/:/ /' | awk '{print "en0 (IPv4): " $2 " " $3 " " $4 " " $5 " " $6}'
ifconfig en0 | grep 'inet6 ' | sed -e 's/ / /' | awk '{print "en0 (IPv6): " $2 " " $3 " " $4 " " $5 " " $6}'
ifconfig en1 | grep 'inet ' | sed -e 's/:/ /' | awk '{print "en1 (IPv4): " $2 " " $3 " " $4 " " $5 " " $6}'
ifconfig en1 | grep 'inet6 ' | sed -e 's/ / /' | awk '{print "en1 (IPv6): " $2 " " $3 " " $4 " " $5 " " $6}'
}
# Trac search
function trac_search() {
# get the open command
local open_cmd
if [[ $(uname -s) == 'Darwin' ]]; then
open_cmd='open'
else
open_cmd='xdg-open'
fi
local url="http://trac.$1.be/trac/ticket/"
# no keyword provided, simply open the search engine homepage
if [[ $# -le 1 ]]; then
$open_cmd "$url"
return
fi
shift # shift out $1
while [[ $# -gt 0 ]]; do
url="${url}$1+"
shift
done
url="${url%?}" # remove the last '+'
$open_cmd "$url"
}
function change_base_buildout_version() {
gsed -i "s:buildout.cerise.base/raw/[0-9.]*:buildout.cerise.base/raw/$1:g" *.cfg && git ci -a
}
function clone() {
git clone git@git.affinitic.be:arsia/$1.git
}
# Exclude bad commands from history
function zshaddhistory() {
whence ${${(z)1}[1]} >| /dev/null || return 1
}
# Forces the user to type exit or logout
set -o ignoreeof
| true |
29d286f2edeabd7f24509897c5d791643bf69978 | Shell | ecornell/media-scripts | /update_plex_local_file.sh | UTF-8 | 484 | 3.3125 | 3 | [] | no_license | #!/bin/bash
[[ "${DEBUG}" == "true" ]] && set -x
set -u -o pipefail
DIR=$(dirname "${1}")
echo "Updating Plex - $DIR - ${DIR/\/var\/media/}"
case "$1" in
*/movies/* )
~/scripts/plex-scanner -s -r -c 1 -d "/data${DIR/\/var\/media/}"
;;
*/tv/* )
~/scripts/plex-scanner -s -r -c 2 -d "/data${DIR/\/var\/media/}"
;;
*/other/* )
~/scripts/plex-scanner -s -r -c 8 -d "/data${DIR/\/var\/media/}"
;;
* ) echo "Error - no match";;
esac
| true |
e50010e0d284b1c395ca99edee297df1c10ad940 | Shell | iost-official/iost-docs | /website/translated_docs/ru/assets/boot.sh | UTF-8 | 2,516 | 3.78125 | 4 | [] | no_license | #!/bin/sh
#
# boot.sh
# Copyright (C) 2019 jack <jack@iOSTdeMacBook-Pro.local>
#
# Distributed under terms of the MIT license.
#
set -ue
PREFIX=${PREFIX:="/data/iserver"}
VERSION=${VERSION:="latest"}
PRODUCER_KEY_FILE=keypair
CURL="curl -fsSL"
install_docker() {
$CURL https://get.docker.com | sudo sh
return 1
}
install_docker_compose() {
_SYS=$(uname)
if [ x$_SYS = x"Linux" ]; then
>&2 echo Installing docker-compose ...
sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
docker-compose version >/dev/null && return 0
fi
>&2 echo Install docker-compose failed. See https://docs.docker.com/compose/install/.
return 1
}
#
# Pre-check
#
{
curl --version &>/dev/null
python -V &>/dev/null
docker version &>/dev/null || install_docker
docker-compose version &>/dev/null || install_docker_compose
}
if [ -d "$PREFIX" ]; then
>&2 echo Warning: path \"$PREFIX\" exists\; this script will remove it.
>&2 echo You may press Ctrl+C now to abort this script.
( set -x; sleep 20 )
fi
sudo rm -rf $PREFIX
sudo mkdir -p $PREFIX
sudo chown -R $(id -nu):$(id -ng) $PREFIX
cd $PREFIX
#
# Generate key producer pair
#
( docker run --rm iostio/iost-node:$VERSION ./iwallet key; ) >> $PRODUCER_KEY_FILE
#
# Get genesis info
#
$CURL "https://developers.iost.io/docs/assets/testnet/$VERSION/genesis.tgz" | tar zxC $PREFIX
$CURL "https://developers.iost.io/docs/assets/testnet/$VERSION/iserver.yml" -o $PREFIX/iserver.yml
#
# Config producer
#
#PUBKEY=$(cat $PRODUCER_KEY_FILE | python -c 'import sys,json;print(json.load(sys.stdin)["Pubkey"]'))
PRIKEY=$(cat $PRODUCER_KEY_FILE | python -c 'import sys,json;print(json.load(sys.stdin)["Seckey"])')
#sed -i.bak 's/ id: .*$/ id: '$PUBKEY'/g' iserver.yml
sed -i.bak 's/ seckey: .*$/ seckey: '$PRIKEY'/g' iserver.yml
#
# Ready to start iServer
#
cat <<EOF >docker-compose.yml
version: "2.2"
services:
iserver:
image: iostio/iost-node:$VERSION
container_name: iserver
restart: on-failure
ports:
- "30000-30003:30000-30003"
volumes:
- $PREFIX:/var/lib/iserver:Z
EOF
docker-compose up -d
until $($CURL localhost:30001/getNodeInfo &>/dev/null); do
printf '.'
sleep 2
done
>&2 echo Your network ID is:
( set -x; $CURL localhost:30001/getNodeInfo | python -c 'import json,sys;print(json.load(sys.stdin)["network"]["id"])' )
| true |
b2be359cb1ff0d51cc4da6917b518147f792a89c | Shell | RecodeFei/makeperso | /checkapk_perso.sh | UTF-8 | 3,867 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#-------------------------------------------------------------------------------
#
# this file is used to check whether all the custpack apk which should be signed
# has been added to file int/misc/releasekey.sh ,if not
# add to the right list in file releasekey.sh:ApkforSharedKey,ApkforMediaKey,ApkforPlatformKey
# excluding *-res.apk which just containing resources,
# if defined LOCAL_CERTIFICATE := shared in Android.mk, MD5 in apk file: MD5: 5D:C8:20:1F:7D:B1:BA:4B:9C:8F:C4:41:46:C5:BC:C2
# if defined LOCAL_CERTIFICATE := platform in Android.mk,MD5 in apk file: MD5: 8D:DB:34:2F:2D:A5:40:84:02:D7:56:8A:F2:1E:29:F9
# if defined LOCAL_CERTIFICATE := media in Android.mk, MD5 in apk file: MD5: 19:00:BB:FB:A7:56:ED:D3:41:90:22:57:6F:38:14:FF
# if no LOCAL_CERTIFICATE defined in Android.mk, MD5 in apk file: MD5: E8:9B:15:8E:4B:CF:98:8E:BD:09:EB:83:F5:37:8E:87
#
# HISTORY
# 2013/06/06 : Ding Erlei : creation
#-------------------------------------------------------------------------------
#
android_path=`pwd`
echo $android_path
#app_custpack="/local/alto5_wimdata_ng_gene/out/target/product/$1/system/custpack/"
app_custpack=$1
TOP=$2
echo "app_custpack:$app_custpack"
cd $app_custpack
find . -name *.apk | grep -v "JRD_custres" | while read apkname_path
do
#check APK zipalign
$TOP/out/host/linux-x86/bin/zipalign -c 4 $apkname_path
if [ "$?" == "1" ];then
aligned_file=$TOP/`basename $apkname_path`
$TOP/out/host/linux-x86/bin/zipalign -f 4 $apkname_path $aligned_file
mv $aligned_file $apkname_path
fi
if (unzip -l $apkname_path |grep META-INF);then
rsa_cmd=$(echo "unzip -p $apkname_path META-INF/*.*SA | keytool -printcert | grep MD5")
rsa_md5=$(eval $rsa_cmd)
echo $apkname_path
apkname_path=${apkname_path#./}
echo $rsa_md5
#apkname=$(basename "$apkname_path" ".apk")
if ( echo $rsa_md5 | grep "MD5: 5D:C8:20:1F:7D:B1:BA:4B:9C:8F:C4:41:46:C5:BC:C2" ) || ( echo $rsa_md5 | grep "MD5: E8:9B:15:8E:4B:CF:98:8E:BD:09:EB:83:F5:37:8E:87" ); then
echo "$apkname_path need signed with shared key, please add to ApkforSharedKey list in file releasekey.sh"
apkexist_cmd=$(echo "grep \"^${apkname_path}\" $android_path/releasekey.sh")
eval $apkexist_cmd
if [ $? -eq 0 ] ;then
echo "$apkname_path has been added to file $android_path/releasekey.sh"
else
add_cmd=$(echo "sed -i -e '/^shared_apkfiles=(/ a\\${apkname_path}' $android_path/releasekey.sh")
eval $add_cmd
fi
elif ( echo $rsa_md5 | grep "MD5: 8D:DB:34:2F:2D:A5:40:84:02:D7:56:8A:F2:1E:29:F9" ); then
echo "$apkname need signed with platform key, please add to ApkforPlatformKey list in file releasekey.sh"
apkexist_cmd=$(echo "grep \"^$apkname_path\" $android_path/releasekey.sh")
eval $apkexist_cmd
if [ $? -eq 0 ] ;then
echo "$apkname has been added to file $android_path/releasekey.sh"
else
add_cmd=$(echo "sed -i -e '/^platform_apkfiles=(/ a\\$apkname_path' $android_path/releasekey.sh")
eval $add_cmd
fi
elif ( echo $rsa_md5 | grep "MD5: 19:00:BB:FB:A7:56:ED:D3:41:90:22:57:6F:38:14:FF" ); then
echo "$apkname need signed with media key, please add to ApkforMediaKey list in file releasekey.sh"
apkexist_cmd=$(echo "grep \"^$apkname_path\" $android_path/releasekey.sh")
eval $apkexist_cmd
if [ $? -eq 0 ] ;then
echo "$apkname has been added to file $android_path/releasekey.sh"
else
add_cmd=$(echo "sed -i -e '/^media_apkfiles=(/ a\\$apkname_path' $android_path/releasekey.sh")
eval $add_cmd
fi
else
echo "no need to sign again"
fi
else
echo "this apk has no rsa information"
fi
done
| true |
4e1c4f603df474d1d730c23ac343afebf2fce28f | Shell | msgphp/msgphp | /bin/release | UTF-8 | 7,262 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
source bin/.bashrc
[[ $# -ne 1 ]] && label "Usage: $0 <type>" ko && exit 1
git_clean
[[ -f var/.env ]] && source var/.env
base_uri="https://github.com/msgphp"
repo="git@github.com:msgphp/msgphp.git"
curr_version() {
local version="$(git describe --abbrev=0 --tags)" && [[ ${version} == v* ]] && version=${version:1}
echo "${version}"
}
next_version() {
local version=${1:?missing version} && [[ ${version} == v* ]] && version=${version:1}
local parts=(${version//./ })
[[ ${#parts[@]} -ne 3 ]] && echo "Invalid version" && exit 1
case $2 in
major) ((++parts[0])); parts[1]=0; parts[2]=0;;
minor) ((++parts[1])); parts[2]=0;;
patch) ((++parts[2]));;
esac
echo "${parts[0]}.${parts[1]}.${parts[2]}"
}
branch_alias() {
local version=${1:?missing version} && [[ ${version} == v* ]] && version=${version:1}
local parts=(${version//./ })
[[ ${#parts[@]} -ne 3 ]] && echo "Invalid version" && exit 1
echo "${parts[0]}.${parts[1]}"
}
release_branches() {
local branches="$(git branch --remote --list "origin/[0-9].[0-9]")"
echo "${branches:-origin/master}"
}
checkout() {
local branch=${1:?missing branch}
local parts=(${branch//// })
[[ ${#parts[@]} -ne 2 ]] && echo "Invalid branch" && exit 1
git checkout --quiet -B "${parts[1]}" "${parts[0]}/${parts[1]}" && \
git pull --quiet "${parts[0]}" "${parts[1]}"
}
tag() {
local version=${1:?missing version} && [[ ${version} == v* ]] && version=${version:1}
git tag -sm enjoy "v${version}"
}
changelog() {
local branch=${1:?missing branch}
local since_version=${2:?missing since version} && [[ ${since_version} == v* ]] && since_version=${since_version:1}
local next_version=${3:?missing next version} && [[ ${next_version} == v* ]] && next_version=${next_version:1}
local filename="CHANGELOG-"$(branch_alias "${next_version}")".md"
[[ "${next_version}" == 0.* ]] && filename="CHANGELOG-1.0-dev.md"
[[ ! -d "var/changelog/${branch}" ]] && mkdir -p "var/changelog/${branch}"
[[ -d ../../var/changelog ]] && cp -R ../../var/changelog var/
[[ -z ${GITHUB_TOKEN} ]] && echo "(!) Generating changelog without GitHub token"
for package in $(find src/*/composer.json -type f); do
name="$(package_name "${package}")"
file="$(dirname "${package}")/${filename}"
[[ ! -f "${file}" ]] && echo "# Changelog" >> "${file}"
rm -f "${file}.tmp" && \
docker run --init -it --rm \
-u $(id -u):$(id -g) \
-v $(pwd):/app \
-w /app \
ferrarimarco/github-changelog-generator \
-u msgphp -p msgphp -t "${GITHUB_TOKEN}" \
--cache-file "var/changelog/${branch}" \
--output "${file}.tmp" \
--since-tag "v${since_version}" \
--future-release "v${next_version}" \
--release-branch "${branch}" \
--release-url "${base_uri}/${name}/tree/%s" \
--include-labels "${name/-/,}" \
--simple-list \
--header-label "" \
--no-issues \
--no-filter-by-milestone \
--no-author \
--no-compare-link \
--no-verbose >/dev/null && \
sed -e '/^\\\* \*This Change Log was automatically generated .*/d' -i "${file}.tmp" && \
sed -e :a -e '/./,$!d;/^\n*$/{$d;N;};/\n$/ba' -i "${file}.tmp" && \
sed -e '1 a \\n' -i "${file}" && \
sed -e "2 r ${file}.tmp" -i "${file}" && \
sed -e :a -e '/./,$!d;/^\n*$/{$d;N;};/\n$/ba' -i "${file}" && \
rm -f "${file}.tmp" && \
git add "${file}"
[[ $? -ne 0 ]] && return 1
done
cp -R var/changelog ../../var/
git commit --quiet -m "update changelog to ${next_version}"
return 0
}
bump_version() {
local branch=${1:?missing branch}
sh -c "$(make -s entrypoint) bin/package-exec composer config extra.branch-alias.dev-master \"${branch}-dev\""
[[ $? -ne 0 ]] && return 1
[[ $(git status --porcelain) ]] && \
git add src/*/composer.json && \
git commit --quiet -m "bumped branch alias to ${branch}"
return 0
}
bump_deps() {
local branch=${1:?missing branch}
bin/package-exec sed -i -E "s/\(\\\"msgphp\\\/.+\\\":\\\s*\\\"\).+\(\\\",?\)/\\\1^${branch}\\\2/" composer.json
[[ $? -ne 0 ]] && return 1
[[ $(git status --porcelain) ]] && \
git add src/*/composer.json && \
git commit --quiet -m "bumped dependencies to ${branch}"
return 0
}
confirm "Run smoke test?" yes
if [[ $? -eq 1 ]]; then
make smoke-test
[[ $? -ne 0 ]] && label "Failed" ko && exit 1
fi
confirm "Build docs?" yes
if [[ $? -eq 1 ]]; then
bin/build-docs
[[ $? -ne 0 ]] && label "Failed" ko && exit 1
fi
confirm "Continue $1 release?"
[[ $? -ne 1 ]] && label "Aborted" ok && exit 0
label "Synchronizing source"
git_sync var/release "${repo}"
[[ $? -ne 0 ]] && label "Failed" ko && exit 1
pushd var/release &> /dev/null
restore() {
git reset HEAD . && git checkout -- . && git clean -df && popd &> /dev/null
}
case $1 in
major|minor)
curr_version="$(curr_version)"
next_version="$(next_version "${curr_version}" "$1")"
branch="$(branch_alias "${next_version}")"
label "Releasing ${curr_version} -> ${next_version}"
confirm "Generate changelog?" yes
if [[ $? -eq 1 ]]; then
changelog "$(git rev-parse --abbrev-ref HEAD)" "${curr_version}" "${next_version}"
[[ $? -ne 0 ]] && label "Failed" ko && restore && exit 1
fi;
confirm "Bump version?" yes
if [[ $? -eq 1 ]]; then
bump_version "${branch}"
[[ $? -ne 0 ]] && label "Failed" ko && restore && exit 1
confirm "Bump msgphp dependencies?" yes
if [[ $? -eq 1 ]]; then
bump_deps "${branch}"
[[ $? -ne 0 ]] && label "Failed" ko && restore && exit 1
fi
fi
if [[ "${branch}" != 0.* ]]; then
confirm "Create release branch ${branch}?"
release_branch=$?
if [[ ${release_branch} -eq 1 ]]; then
git checkout --quiet -b "${branch}"
[[ $? -ne 0 ]] && label "Failed" ko && restore && exit 1
fi;
fi;
confirm "Tag version?" yes
if [[ $? -eq 1 ]]; then
tag "${next_version}"
[[ $? -ne 0 ]] && label "Failed" ko && restore && exit 1
fi;
if [[ ${release_branch} -eq 1 ]]; then
upcoming_branch="$(branch_alias "$(next_version "${next_version}" minor)")"
git checkout --quiet master && \
bump_version "${upcoming_branch}" && \
bump_deps "${upcoming_branch}"
[[ $? -ne 0 ]] && label "Failed" ko && restore && exit 1
fi
label "Done" ok
;;
patch)
for branch in $(release_branches); do
checkout "${branch}"
[[ $? -ne 0 ]] && label "Failed" ko && restore && exit 1
curr_version="$(curr_version)"
next_version="$(next_version "${curr_version}" patch)"
label "Releasing ${curr_version} -> ${next_version}"
changelog "$(git rev-parse --abbrev-ref HEAD)" "${curr_version}" "${next_version}" && \
tag "${next_version}"
[[ $? -ne 0 ]] && label "Failed" ko && restore && exit 1
label "Done" ok
done;
;;
esac
restore
| true |
4467ffe5da12cb8c9876e8de3d5e200a35009d6f | Shell | ygregw/archrice | /.local/bin/remaps | UTF-8 | 677 | 2.921875 | 3 | [] | no_license | #!/bin/sh
# This script is called on startup to remap keys.
# Increase key speed via a rate change
xset r rate 300 50
# Map the caps lock key to super...
setxkbmap -option caps:ctrl_modifier
# But when it is pressed only once, treat it as escape.
killall xcape 2>/dev/null ; xcape -e 'Caps_Lock=Escape'
# Map the menu button to right super as well.
#xmodmap -e 'keycode 135 = Super_R'
xmodmap -e 'clear Lock'
xmodmap -e 'clear Control'
xmodmap -e 'add Control = Caps_Lock Control_R'
xmodmap -e 'clear Mod4'
xmodmap -e 'add Mod4 = Super_L Control_L'
# Turn off the caps lock if on since there is no longer a key for it.
xset -q | grep "Caps Lock:\s*on" && xdotool key Caps_Lock
| true |
5bda4fe8bccdc65454520ea04c663995657e2bb7 | Shell | jphafner/physicsStyle | /awesome-cv/create.sh | UTF-8 | 414 | 2.625 | 3 | [] | no_license | #!/bin/bash
colors=(emerald skyblue red pink orange nephritis concrete darknight)
#colors=(emerald)
for color in ${colors[@]}; do
sed "s/COLOR/${color}/" resume.tex > resume-${color}.tex
latexmk -quiet -lualatex -f ./resume-${color}.tex
#latexmk -lualatex -f ./resume-${color}.tex
done
latexmk -c
for color in ${colors[@]}; do
rm -f resume-${color}.tex
mv resume-${color}.pdf ./output2/
done
| true |
7ed5a0bdda75f36980354702b8dbff9c2566fd39 | Shell | freddy36/PostNAS | /import/konv_batch.sh | UTF-8 | 8,276 | 3.015625 | 3 | [] | no_license | #!/bin/bash
## -------------------------------------------------
## Konvertierung von ALKIS NAS-Format nach PostGIS -
## NAS-Daten in einem Ordner konvertieren -
## Batch-Teil, Aufruf mit geprueften Parametern -
## -------------------------------------------------
##
## Ordner-Struktur:
## /mandant/
## /0001/*.xml.zip
## /0002/*.xml.zip
## usw.
## /temp/
## Auf der gleichen Ebene wie die Datenordner muss ein Ordner /temp/ existieren.
## Dort werden die NAS-Daten temporaer ausgepackt.
## Relativ zum mitgegebenen Parameter ist das: '../temp/'
## Parallel laufende Konvertierungen zum gleichen Mandanten
## wuerden hier durcheinander geraten. Vermeiden!
##
## Stand:
## 2012-02-10 Umbennung nach 0.7
## 2012-02-17 Optimierung
## 2012-02-28 Neuer Parameter 4 um Post-Prozessing zu unterdruecken
## 2012-04-25 Durch GDAL Patch #5444 werden die Loeschungen als Trigger auf Tabelle 'delete' verarbeitet
## 2012-05-18 Umzug neue GDI, GDAL-Trunk unter Pfad
## 2012-06-04 SQL-Skripte in deren Verzeichnis ausfuehren (Voraussetzung fuer \i Includes)
## 2012-10-30 Umgebungsvariable setzen, delete-Tabelle am Ende fuer Analyse gefuellt lassen.
## Test als 0.7a mit gepatchter gdal-Version (noch 2.0dev)
## 2013-10-16 F.J. krz: Neues Script "pp_praesentation_action.sql" für Reparatur der
## Präsentationsobjekte Straßenname im Post-Processing
## 2013-10-24 F.J. krz: Zwischenlösung "praesentation_action.sql" wieder deaktiviert.
## 2014-01-31 F.J. krz: Import Eintrag erzeugen (nach Vorschlag Marvin Brandt, Unna)
## 2014-02-13 A.Emde WhereGroup: Einführung DBUSER, damit im Skript der Datenbankbenutzer angegeben werden kann
## 2014-05-12 F.J. krz: Unterschiedliche Pfade in Test (TRUNK) und Produktion (Rel. 1.11.0)
## 2014-06-18 F.J. DB-User nicht "postgres" (in $con).
## Konverter ind Nacharbeiten sonst mit unterschiedlichem User.
## Abgleich Test/Prod-Version.
## Entfernen der historischen Objekte nach Konvertierung.
## 2014-09-09 F.J. krz: Parameter "--config PG_USE_COPY YES" zur Beschleunigung. Ausgabe import-Tabelle.
## 2014-09-11 F.J. krz: Eintrag in import-Tabelle repariert.
## Keine Abfrage des Symlinks auf kill/hist. Enstscheidend ist die aktuelle DB, nicht der Symlink
## 2014-09-23 F.J. krz: Zählung der Funktionen in delete, dies in import-Tabelle eintragen (Metadaten)
## ToDo:
## - Unterscheidung e/a noch sinnvoll? Immer "a" = Aktualisierung = -update ?
## - PostProcessing: Aufruf Script, sonst redundant zu pflegen
POSTNAS_HOME=$(dirname $0)
# Konverterpfad. TRUNK-Version (immer letzter Stand der Entwicklung)
PATH=/opt/gdal-2.0/bin:$PATH
EPSG=25832
DBUSER=b600352
if [ $DBUSER == "" ]
then
echo "kein DBUSER gesetzt"
else
PGUSER=" -U ${DBUSER} "
fi
if [ $DBUSER == "" ]
then
echo "kein DBUSER gesetzt"
else
OGRPGUSER=" user=${DBUSER}"
fi
echo "
**********************************************
** K o n v e r t i e r u n g PostNAS **
**********************************************"
## Parameter:
ORDNER=$1
DBNAME=$2
UPD=$3
PP=$4
if [ $ORDNER == "" ]
then
echo "Parameter 1 'Ordner' ist leer"
exit 1
fi
if [ $DBNAME == "" ]
then
echo "Parameter 2 'Datenbank' ist leer"
exit 2
fi
if [ $UPD == "a" ]
then
verarb="NBA-Aktualisierung"
update=" -update "
else
if [ $UPD == "e" ]
then
verarb="Erstladen"
update=""
else
echo "Parameter 3 'Aktualisierung' ist weder e noch a"
exit 3
fi
fi
if [ $PP == "nopp" ]
then
echo "KEIN Post-Processing nach dieser Konvertierung."
else
if [ $PP == "pp" ]
then
echo "normales Post-Processing."
else
echo "FEHLER: Parameter 4 'Post-Proscessing' ist weder 'nopp' noch 'pp'"
exit 4
fi
fi
# Fehlerprotokoll:
errprot=${POSTNAS_HOME}'/log/postnas_err_'$DBNAME'.prot'
echo "GDAL/PostNAS Konverter-Version:" >> $errprot
ogr2ogr --version >> $errprot
# DB-Connection
con="${PGUSER} -p 5432 -d ${DBNAME} "
echo "Datenbank-Name . . = ${DBNAME}"
echo "DBUSER ${DBUSER}"
echo "PGUSER ${PGUSER}"
echo "OGRPGUSER ${OGRPGUSER}"
echo "Ordner NAS-Daten . = ${ORDNER}"
echo "Verarbeitungs-Modus= ${verarb}"
echo "POSTNAS_HOME ${POSTNAS_HOME}"
# noch alte delete-Eintraege?
echo "Leeren der delete-Tabelle"
psql $con -c 'TRUNCATE table "delete";'
#echo "Bisherige Konvertierungen (Import-Tabelle):"
#psql $con -c "SELECT * FROM import ORDER by id;"
# Import Eintrag erzeugen
# Ursprünglich für Trigger-Steuerung benötigt. Nun als Metadaten nützlich.
echo "INSERT INTO import (datum,verzeichnis,importart) VALUES ('"$(date '+%Y-%m-%d %H:%M:%S')"','"${ORDNER}"','"${verarb}"');" | psql $con
# Ordner abarbeiten
cd ${ORDNER}
rm ../temp/*.gfs
echo "Dateien in " ${ORDNER} " (ls) :"
ls
for zipfile in *.zip ; do
echo " "
rm ../temp/*.xml
echo "*********"
#echo "* Archiv: " $zipfile
unzip ${zipfile} -d ../temp
# Es sollte nur ein XML-File in jedem ZIP-File stecken, aber es geht auch anders.
for nasdatei in ../temp/*.xml ; do
# echo "* Datei: " $nasdatei
# Zwischenueberschrift im Fehlerprotokoll
echo "* Datei: " $nasdatei >> $errprot
# Umgebungsvariable setzen:
export GML_FIELDTYPES=ALWAYS_STRINGS # PostNAS behandelt Zahlen wie Strings, PostgreSQL-Treiber macht daraus Zahlen
export OGR_SETFIELD_NUMERIC_WARNING=YES # Meldung abgeschnittene Zahlen?
#export CPL_DEBUG=ON # Meldung, wenn Attribute ueberschrieben werden
# PostNAS Konverter-Aufruf
# --config PG_USE_COPY YES
ogr2ogr -f "PostgreSQL" -append ${update} -skipfailures \
PG:"dbname=${DBNAME} host=localhost port=5432 ${OGRPGUSER}" -a_srs EPSG:$EPSG ${nasdatei} 2>> $errprot
nasresult=$?
echo "* Resultat: " $nasresult " fuer " ${nasdatei} | tee -a $errprot
done # Ende Zipfile
done # Ende Ordner
rm ../temp/*.xml
echo " "
echo "** Ende Konvertierung Ordner ${ORDNER}"
# Durch Einfügen in Tabelle 'delete' werden Löschungen und Aktualisierungen anderer Tabellen getriggert
echo "** Die delete-Tabelle enthaelt so viele Zeilen:"
psql $con -c 'SELECT COUNT(featureid) AS delete_zeilen FROM "delete";'
echo "** aufgeteilt auf diese Funktionen:"
psql $con -c 'SELECT context, COUNT(featureid) AS anzahl FROM "delete" GROUP BY context ORDER BY context;'
# Kontext-Funktionen zählen und dei Anzahl als Metadaten zum aktuellen Konvertierungslauf speichern
psql $con -c "
UPDATE import SET anz_delete=(SELECT count(*) FROM \"delete\" WHERE context='delete')
WHERE id=(SELECT max(id) FROM import) AND verzeichnis='${ORDNER}' AND anz_delete IS NULL;
UPDATE import SET anz_update=(SELECT count(*) FROM \"delete\" WHERE context='update')
WHERE id=(SELECT max(id) FROM import) AND verzeichnis='${ORDNER}' AND anz_update IS NULL;
UPDATE import SET anz_replace=(SELECT count(*) FROM \"delete\" WHERE context='replace')
WHERE id=(SELECT max(id) FROM import) AND verzeichnis='${ORDNER}' AND anz_replace IS NULL;"
# ignored = true auswerten, ggf. warnen ?
#
# Post-Processing / Nacharbeiten
#
if [ $PP == "nopp" ]
then
echo "** KEIN Post-Processing - Dies spaeter nachholen."
# Dies kann sinnvoll sein, wenn mehrere kleine Aktualisierungen hintereinander auf einem grossen Bestand laufen
# Der Aufwand für das Post-Processing ist dann nur bei der LETZTEN Aktualisierung notwendig.
else
echo "** Post-Processing (Nacharbeiten zur Konvertierung)"
echo "** - Optimierte Nutzungsarten neu Laden (Script nutzungsart_laden.sql):"
(cd $POSTNAS_HOME; psql $con -f nutzungsart_laden.sql)
echo "** - Fluren, Gemarkungen, Gemeinden und Straßen-Namen neu Laden (Script pp_laden.sql):"
(cd $POSTNAS_HOME; psql $con -f pp_laden.sql)
fi
# Aufräumen der historischen Objekte -- besser vorher als nachher. Analyse für Trigger-Entwicklung
#echo " delete-Tabelle loeschen:"
#psql $con -c 'TRUNCATE table "delete";'
#echo "** geendete Objekte entfernen:"
#psql $con -c "SELECT alkis_delete_all_endet();"
echo "Das Fehler-Protokoll wurde ausgegeben in die Datei $errprot"
echo "** ENDE PostNAS 0.8-Konvertierung DB='$DBNAME' Ordner='$ORDNER' "
| true |
d6d6e1ddf1daf17fcd34279f21a906036ff4bd42 | Shell | adericbourg/shortest-path-sandbox | /src/main/resources/net/dericbourg/ratp/gtfs/extract.sh | UTF-8 | 500 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env bash
set -x
# Extracts only metro gtfs data
# ./extract.sh ~/tmp/gtfs_ratp METRO
GTFS_ROOT_DIR="$1"
TYPE="$2"
FILE_PATTERN="RATP_GTFS_$TYPE*.zip"
if [[ -z "$GTFS_ROOT_DIR" ]] ; then
echo "No GTFS root directory specified. Exiting..."
exit 1
fi
cd "$GTFS_ROOT_DIR"
for zipfile in $FILE_PATTERN; do
directory=${zipfile%.zip}
echo "Create directory $directory"
mkdir -p "$directory"
cd "$directory"
echo "Unzipping file $zipfile"
unzip ../$zipfile
cd "$GTFS_ROOT_DIR"
done
| true |
c9cf48394ca957690fa6f65883938fe0dd8c2373 | Shell | hobbitalastair/patchman | /patchman.in | UTF-8 | 8,185 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/bash
#
# patchman: provide a utility for managing changes to packaged files
#
# Author: Alastair Hughes
# Email: hobbitalastair at yandex dot com
# Date: 24-9-2016
shopt -s nullglob extglob
VERSION=1.0
PACBACK="${PACBACK:-@PACBACK@}"
PATCHDIR="${PATCHDIR:-@PATCHDIR@}"
PACCACHEDIR="${PACCACHEDIR:-@PACCACHEDIR@}"
# Define some colors.
C_ERR="$(tput setaf 1; tput bold)"
C_WARN="$(tput setaf 3; tput bold)"
C_OK="$(tput setaf 2; tput bold)"
C_BOLD="$(tput setaf 9; tput bold)"
C_RESET="$(tput sgr0)"
error() {
local status="$1"
shift
message error "$@"
exit "${status}"
}
message() {
local level="$1"
shift
local fmt="%s\n"
local min_level="0"
case "${level}" in
debug) fmt="DBG %s\n"
min_level="2";;
info) fmt="${C_OK}-->${C_RESET} %s\n"
min_level="1";;
warn) fmt="${C_WARN}>>>${C_RESET} %s\n";;
error) fmt="${C_ERR}!!!${C_BOLD} %s${C_RESET}\n";;
*) printf "${C_ERR}BUG${C_RESET} Unknown message format '%s'!\n" \
"${level}" 1>&2
exit 1;;
esac
# Add a timestamp if debug is set.
if [ "${VERBOSE}" -ge 2 ]; then
fmt="$(date '+%m:%S') ${fmt}"
fi
# Print the messages if the verboseness is high enough.
if [ "${VERBOSE}" -ge "${min_level}" ]; then
printf -- "${fmt}" "$@" 1>&2
fi
}
list_changed() {
# List the changed files.
local file sum pkg newsum
while IFS=" " read file sum pkg <&3; do
if [ -e "${PATCHDIR}/${file}/IGNORE" ]; then
message debug "Skipping ignored file ${file}"
elif [ ! -r "/${file}" ]; then
message warn "Skipping ${file}"
else
if ! "${ORIGINAL}" && [ -e "${PATCHDIR}/${file}" ]; then
local newfile="$(get_pkg_file "/${file}")" || \
error 1 "Failed to extract file for ${file}!"
sum="$(md5sum "${newfile}" | cut -d' ' -f1)"
fi
newsum="$(md5sum "/${file}" | cut -d' ' -f1)"
if [ -n "${newsum}" ] && [ "${newsum}" != "${sum}" ]; then
message info "${file} has been changed"
fi
fi
done 3< <("${PACBACK}")
}
get_pkg_owning() {
# Print the name of the package owning the given file.
pacman -Qqo "${file}" || \
error 1 "Could not find an owner for '${file}'"
}
get_pkg_file() {
# Print a path to the actual packaged file to stdout.
local file="$1"
local pkgname pkginfo version arch pkg
pkgname="$(get_pkg_owning "${file}")" || exit "$?"
pkginfo="$(pacman -Qi --color=never "${pkgname}")" || \
error 2 "Could not find information on package '${pkgname}'"
version="$(printf "${pkginfo}" | grep '^Version' | \
rev | cut -d' ' -f1 | rev)"
arch="$(printf "${pkginfo}" | grep '^Architecture' | \
rev | cut -d' ' -f1 | rev)"
pkg="${PACCACHEDIR}/${pkgname}-${version}-${arch}"
if [ ! -f "${pkg}".pkg.tar.!(*.sig) ]; then
error 3 "Could not find package for ${pkgname}"
else
pushd "${PATCHTMPDIR}" > /dev/null
bsdtar -xf "${pkg}".pkg.tar.!(*.sig) "${file:1}" || \
error 3 "Failed to extract '${pkg}'!"
popd > /dev/null
fi
local path="${PATCHTMPDIR}/${file:1}"
if ! "${ORIGINAL}"; then
if [ -f "${PATCHDIR}/${file:1}" ]; then
message debug "Replacing file"
cat "${PATCHDIR}/${file:1}" > "${path}"
else
local patch
for patch in "${PATCHDIR}/${file:1}/"*.patch; do
message debug "Applying patch ${patch} to ${path}"
patch "${path}" < "${patch}" > /dev/null
done
fi
fi
printf '%s\n' "${path}"
}
vimdiff_file() {
# Generate a diff for the given file.
local file="$1"
vimdiff "${file}" "$(get_pkg_file "${file}")"
}
diff_file() {
# Generate a diff for the given file.
local file="$1"
message debug "Generating diff for '${file}'"
diff "$(get_pkg_file "${file}")" "${file}"
}
print_file() {
# Print the unpatched file.
local file="$1"
message debug "Printing file for '${file}'"
cat "$(get_pkg_file "${file}")"
}
revert() {
# Revert the given file to the one provided by the package.
local file="$1"
message debug "Reverting '${file}'"
mv -f "$(get_pkg_file "${file}")" "${file}" || \
error 1 "Failed to revert ${file}!"
}
save() {
# Save the given file.
local file="$1"
message debug "Saving '${file}'"
if [ ! -d "${PATCHDIR}/$(dirname "${file:1}")" ]; then
mkdir -p "${PATCHDIR}/$(dirname "${file:1}")" || \
error 1 "Failed to create ${PATCHDIR}/$(dirname "${file:1}")!"
fi
cat "${file}" > "${PATCHDIR}/${file:1}" || \
error 1 "Failed to save '${file}'!"
}
ignore() {
# Mark the given file as ignored.
local file="$1"
message debug "Marking '${file}' as ignored"
local path="${PATCHDIR}/${file:1}"
if [ ! -d "${path}" ]; then
mkdir -p "${path}" || error 1 "Failed to create ${path}!"
fi
: > "${PATCHDIR}/${file:1}/IGNORE" || \
error 1 "Failed to ignore ${file}!"
}
# Parse the arguments.
VERBOSE="${VERBOSE:-1}"
FILE_ARGS=false
ORIGINAL=false
LIST_CHANGED=false
VIMDIFF=false
DIFF=false
PRINT=false
REVERT=false
SAVE=false
IGNORE=false
TARGETS=()
for arg in "$@"; do
case "${arg}" in
-h|--help)
printf "${C_BOLD}%s${C_RESET} %s\n\n" "${0}" "${VERSION}"
printf "Manage changes to packaged files.
${C_OK}-h|--help${C_RESET} Print this message
${C_OK}-v|--version${C_RESET} Print the version
${C_OK}-d|--debug${C_RESET} Run verbosely
${C_OK}-q|--quiet${C_RESET} Run quitely
${C_OK}-o|--original${C_RESET} Use the original file unpatched
${C_OK}-l|--list-changed${C_RESET} List the changed backup files
${C_OK}-V|--vimdiff <files>${C_RESET} Interactively diff the files
${C_OK}-D|--diff <files>${C_RESET} Diff the given files
${C_OK}-p|--print <files>${C_RESET} Print the patched file
${C_OK}-r|--revert <files>${C_RESET} Revert the given files
${C_OK}-s|--save <files>${C_RESET} Save the changes to the file
${C_OK}-i|--ignore <files>${C_RESET} Mark the given files as ignored
Author: Alastair Hughes <hobbitalastair at yandex dot com>\n"
exit 0;;
-v|--version)
printf "%s version %s\n" "$0" "${VERSION}"
exit 0;;
-q|--quiet) export VERBOSE="0";;
-d|--debug) export VERBOSE="2";;
-o|--original) ORIGINAL="true";;
-l|--list-changed) LIST_CHANGED="true";;
-p|--print) PRINT="true"; FILE_ARGS="true";;
-V|--vimdiff) VIMDIFF="true"; FILE_ARGS="true";;
-D|--diff) DIFF="true"; FILE_ARGS="true";;
-r|--revert) REVERT="true"; FILE_ARGS="true";;
-s|--save) SAVE="true"; FILE_ARGS="true";;
-i|--ignore) IGNORE="true"; FILE_ARGS="true";;
*) if "${FILE_ARGS}"; then
TARGETS+=("${arg}")
else
error 1 "Unknown argument '${arg}'"
fi;;
esac
done
# Check the results.
if [ "${#TARGETS[@]}" -eq 0 ] && "${FILE_ARGS}"; then
message error "No targets given"
fi
if [ "$#" -eq 0 ]; then
printf "${C_BOLD}%s${C_RESET} %s\n" "${0}" "${VERSION}" 1>&2
fi
# Create a cachedir. This is used for extracting files, to preserve
# permissions.
PATCHTMPDIR="$(mktemp -d "${TMPDIR:-/tmp}/patchman.XXXXXX")" || \
error 1 "Could not create a temporary dir!"
trap "rm -rf '${PATCHTMPDIR}'" EXIT
if "${LIST_CHANGED}"; then
list_changed
fi
for file in "${TARGETS[@]}"; do
if [ ! -f "${file}" ]; then
error 1 "Could not find file '${file}'"
fi
if "${PRINT}"; then
print_file "${file}"
fi
if "${VIMDIFF}"; then
vimdiff_file "${file}"
fi
if "${DIFF}"; then
diff_file "${file}"
fi
if "${REVERT}"; then
revert "${file}"
fi
if "${SAVE}"; then
save "${file}"
fi
if "${IGNORE}"; then
ignore "${file}"
fi
done
| true |
afa3976a1496281239f61c9df8e6e0db9c175961 | Shell | jangidkrishna/scripts | /lvm/mkfs.sh | UTF-8 | 138 | 2.65625 | 3 | [] | no_license | #!/bin/bash
echo "choose the file system "
read x
echo "give the name of vg and lv"
read y
read z
mkfs -t $x /dev/$y/$z
ls -lt /dev/$y/$z
| true |
bcfc30ed2bb979584a30e671bd15aa0be0c403c7 | Shell | soumikgh/Soft_Engg_Proj_Code | /Shell_scripts_for_manipulating_data/getPackages.sh | UTF-8 | 118 | 2.96875 | 3 | [] | no_license | #!/bin/bash
# $1 - directory in which the files are there
for i in $( ls $1 )
do
head -n1 $1/$i >> packages.txt
done
| true |
54f2d08ca14e519731fb8b629441b3583b082c3c | Shell | chamara-dev/nwjs-e2e-test | /scripts/installTestEnv.sh | UTF-8 | 656 | 2.9375 | 3 | [] | no_license | # current directory
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
UITEST_DIR=${DIR}/../test/env
URL_SELENIUM="http://selenium-release.storage.googleapis.com/2.44/selenium-server-standalone-2.44.0.jar"
URL_CD="http://dl.nwjs.io/v0.11.6/chromedriver-nw-v0.11.6-osx-x64.zip"
URL_NW="http://dl.nwjs.io/v0.11.6/node-webkit-v0.11.6-osx-x64.zip"
FN_SELENIUM=$(basename "$URL_SELENIUM")
FN_CD=$(basename "$URL_CD")
FN_NW=$(basename "$URL_NW")
DIR_NW="${FN_NW%.*}"
cd $UITEST_DIR
curl -O $URL_SELENIUM
curl -O $URL_CD
curl -O $URL_NW
# TODO: for linux or windows, too!
unzip -j $FN_CD
unzip $FN_NW
mv $DIR_NW/node-webkit.app .
# npm install -g mocha | true |
a91d603aa230b1d756ed15f8fd25c68ebf42b4d9 | Shell | orzzzli/zhongyuhuacai | /kernel/linux_tool/checkprocess.sh | UTF-8 | 789 | 3.5 | 4 | [] | no_license | #!/bin/sh
php_path="/usr/local/php7.1/bin/php"
project_path="/www/wwwroot/test.gitwan.vsgogo.com"
process_name=(sendGiftTaskDemon)
ac=$1
if [ ! -n "$ac" ] ;then
echo "ac is null,checkAndStart or restart"
exit
fi
function checkAndStart(){
name=`ps -fe|grep $1 |grep -v grep`
if [ ! -n "$name" ] ;then
cd $project_path
`nohup $php_path cmd.php $1 > output 2>&1 & `
echo "start $1 ok!"
else
pid=`echo $name|awk '{print $2}'`
echo "$1 pid is $pid"
if [ "$ac" = "restart" ];then
`kill $pid`
cd $project_path
`nohup $php_path cmd.php $1 > output 2>&1 & `
echo "restart $1 ok!"
fi
fi
}
for name in ${process_name[*]}
do
checkAndStart $name $ac
done | true |
bbe6a58b86a6f48cfe8ee295c6f49dcb94d2f657 | Shell | sandywang/CCS | /bin/ccs_template_02_anatproc | UTF-8 | 4,767 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
#################################################################################
## SCRIPT TO RUN GENERAL RESTING-STATE PREPROCESSING
##
## Written by Xindi Wang.
## Email: sandywang.rest@gmail.com
##
#################################################################################
#################################################################################
## PARAMETERS
#################################################################################
ADir=$( pwd )
SubjListName=subject.list
AnatDir=anat
AnatName=mprage
DoGPU=false
StandardHead=${FSLDIR}/data/standard/MNI152_T1_2mm.nii.gz
StandardBrain=${FSLDIR}/data/standard/MNI152_T1_2mm_brain.nii.gz
DoRefine=false
StandardRefine=
#Directory where scripts are located
ScriptDir=$CCSDIR/bin
#Full path to the list of subjs
SubjList=${ADir}/scripts/${SubjListName}
#################################################################################
##---START OF SCRIPT-----------------------------------------------------------##
#################################################################################
#set -e
echo "====================$( date )===================="
echo "CCS anatomical images processing ..."
## Get subjs to run
subjs=$( cat ${SubjList} )
## SUBJECT LOOP
## Segmenting and reconstructing surfaces: anatomical images
echo "====================$( date )===================="
echo "Surface reconstraction processing ... (May take more than 24 hours for one subject)"
num_of_queue=0
trap 'jobs -p|xargs -i ccs_killall -15 {};killall -s SIGTERM -u $USER recon-all;exit 1' INT KILL TERM
for subj in ${subjs}
do
{
while [[ $num_of_queue -ge $CCS_MAX_QUEUE ]]
do
wait
num_of_queue=0
done
let num_of_queue=num_of_queue+1
{
set -e
if [ ! -f ${ADir}/${subj}/mri/brainmask.mgz ]
then
echo "WARNNING: ${subj} may not be checked the skull stripping for quality control!!" >&2
exit 1
fi
logfile=${ADir}/${subj}/scripts/ccs_01_anatsurfrecon.log
errfile=${ADir}/${subj}/scripts/ccs_01_anatsurfrecon.error
trap "mv -f ${logfile} ${errfile} 2>/dev/null; exit 1" INT KILL TERM
if [ ! -f ${logfile} ]
then
echo "-->RUNNING: surface reconstruction for ${subj}"
echo "=================================================" >> ${logfile}
echo "CCS $( date ) " >> ${logfile}
echo "Segmenting and reconstructing cortical surfaces for ${subj} ..." >> ${logfile}
echo "=================================================" >> ${logfile}
${ScriptDir}/ccs_01_anatsurfrecon.sh ${subj} ${ADir} ${AnatName} ${AnatDir} ${DoGPU} >> ${logfile}
if [ -f ${ADir}/${subj}/scripts/recon-all.done ]
then
echo CCS reconstruction for ${subj} has been done! >> ${logfile}
rm -f ${errfile}
else
echo "ERROR: reconstruction stopped with errors! Please check the ccs_01_anatsurfrecon.error and recon-all.error for ${subj}" >&2
mv ${logfile} ${errfile} 2>/dev/null
exit 1
fi
else
echo "-->Finished: Segmentation and surface reconstruction for ${subj}"
fi
}&
}
done
wait
## Registering anatomical images
echo "====================$( date )===================="
echo "Registering anatomical images to MNI152 template"
num_of_queue=0
trap 'jobs -p|xargs -i ccs_killall -15 {} && exit 1' INT KILL TERM
for subj in ${subjs}
do
{
while [[ $num_of_queue -ge $CCS_MAX_QUEUE ]]
do
wait
num_of_queue=0
done
let num_of_queue=num_of_queue+1
{
if [ ! -f ${ADir}/${subj}/scripts/ccs_01_anatsurfrecon.log ]
then
echo "ERROR: Please check and run the surface reconstraction step for ${subj} first..." >&2
exit 1
else
logfile=${ADir}/${subj}/scripts/ccs_02_anatregister.log
errfile=${ADir}/${subj}/scripts/ccs_02_anatregister.error
trap "mv -f ${logfile} ${errfile} 2>/dev/null; exit 1" INT KILL TERM
if [ ! -f ${logfile} ]
then
echo "-->RUNNING: subj ${subj} registrating anatomical images to MNI152 template"
echo "=================================================" >> ${logfile}
echo "CCS $( date ) " >> ${logfile}
echo "Registering anatomical images to MNI152 template for ${subj} ..." >> ${logfile}
echo "=================================================" >> ${logfile}
${ScriptDir}/ccs_02_anatregister.sh ${subj} ${ADir} ${AnatDir} ${StandardHead} ${StandardBrain} >> ${logfile}
if [ -f ${ADir}/${subj}/${AnatDir}/reg/highres2standard_warp.nii.gz ]
then
echo "CCS registration of anatomical images for ${subj} has been done!" >> ${logfile}
rm -f ${errfile}
else
mv ${logfile} ${errfile}
echo "ERROR: registeratin stopped with errors! Please check the logfile ccs_02_anatregister.error for ${subj}" >&2
exit 1
fi
else
echo "-->Finished: Registration for ${subj}"
fi
fi
}&
}
done
wait
exit 0
| true |
44e70c00627c27d949068be8becbcc2c6e4b60df | Shell | distriqt/airnativeextensions | /nativemaps/example/tools/build | UTF-8 | 529 | 2.734375 | 3 | [] | no_license | #!/bin/bash
PACKAGE_NAME=air.com.distriqt.test.debug
APPNAME=../bin-debug/TestNativeMaps
APKNAME=../bin-debug/TestNativeMaps.apk
CERTIFICATE=/path/to/your/certificate.p12
PASSWORD=password
TARGET=apk-debug
EXTDIR=/path/to/ANE/folder
ADT_PATH=adt
echo "Building..."
# Compile debug APK using ADT with our p12 certificate
$ADT_PATH -package -target $TARGET -storetype pkcs12 -keystore $CERTIFICATE -storepass $PASSWORD $APKNAME $APPNAME-app.xml $APPNAME.swf ../bin-debug/icons/ ../bin-debug/res/ -extdir $EXTDIR
echo "Finished." | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.