blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
11bc4fe673eaced90b4e2be08a3908c66f3d3513 | Shell | dmalyuta/dotfiles | /.modules/python.sh | UTF-8 | 1,863 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# shellcheck disable=SC1090
#
# Python programming tools.
#
# Author: Danylo Malyuta, 2020.
sudo apt-get -y install python3 python3-pip
sudo update-alternatives --install /usr/bin/python python /usr/bin/python3 100
# ..:: Anaconda ::..
if not_installed conda; then
wget -4 https://repo.anaconda.com/archive/Anaconda3-2021.11-Linux-x86_64.sh \
-O /tmp/anaconda.sh
chmod +x /tmp/anaconda.sh
/tmp/anaconda.sh
# Update conda
. ~/.bashrc
export PATH="/home/danylo/anaconda3/bin:$PATH"
conda install -y anaconda
conda update -y -n base -c defaults conda
fi
# ..:: Default Python environment ::..
source ~/.bashrc
PYVERSION=3.10.6
PYENV_NAME=py"${PYVERSION//./}"
CONDA_PATH=$(conda info --base)/etc/profile.d/conda.sh
if ! (conda info --envs | grep -q $PYENV_NAME); then
source "$CONDA_PATH"
conda create -y -n $PYENV_NAME python=3.10.6
conda activate $PYENV_NAME
conda install -y ipython
# Make it the default virtualenv on bash startup
cat << EOF >> ~/.bashrc
conda activate $PYENV_NAME
EOF
# Install some Python modules
pip install jedi
pip install flake8 pdbpp
pip install scipy numpy nptyping
pip install pandas pytest black pyfzf
pip install virtualenv
# Add virtualenv to Jupyter
# https://gist.github.com/swedishmike/902fb27d627313c31a95e31c44e302ac
pip install --user ipykernel
python -m ipykernel install --user --name=$PYENV_NAME
fi
# ..:: Other tools ::..
# Profiling
sudo apt-get -y install pyprof2calltree
# Add the virtualenv path to the PATH
if ! echo "$PATH" | grep -q virtualenv; then
echo export PATH="$PATH":"$(command -v virtualenv)" >> ~/.bashrc
fi
# Update to make sure that the new Python is loaded
source ~/.bashrc
# ..:: Jupyter notebook ::..
sudo apt-get -y install jupyter \
jupyter-notebook
pip install jupyterlab
| true |
4948fa92184214b4a2b160e54a2cb2e7fb0d9cdc | Shell | TrainingByPackt/Command-Line-Fundamentals | /Lesson04/Exercise 21/pgn_extract7.sh | UTF-8 | 1,447 | 4.375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# A blank line is a line containing (only) 0 or more whitespaces
regex_blank="^[[:space:]]*$"
# reads through the initial game data of PGN data and returns
# success if any line matched the regex in the first argument
function filter_game()
{
# ret will be returned by the function - initially assume failure
local ret=1
while read -r line
do
# Set ret to 0 if the line matches the filter
[[ $line =~ $1 ]] && ret=0
# FOR TESTING: Print the line
[[ $line =~ $1 ]] && echo "$line"
# Break if line is blank
[[ $line =~ $regex_blank ]] && break
done
return $ret
}
# Read all the lines of the PGN move list and concatenate them into the variable moves
function read_moves()
{
# Clear the moves variable
moves=''
while read -r line
do
# Quit if line is blank
[[ $line =~ $regex_blank ]] && return 0
# Append the line to moves with space in between
moves="${moves} ${line}"
done
# Quit with failure return code here since read could not get another line
return 1
}
# counts the number of moves in the moves list of a PGN format game
# Assumes that "moves" is a string containing the complete moves list
function count_moves()
{
num_moves=$(tr -d -c '.' <<< "$moves" | wc -c)
}
for i in {1..3}
do
filter_game '\[Result "(1-0|0-1)"\]'
read_moves
echo "$moves"
count_moves
echo "$num_moves" moves in game
echo
done
| true |
0ed1de88e7318f8e963b50fba750697d5bd18d7a | Shell | hschwaiger-usgs/volcano-ash3d-metreader | /autorun_scripts/get_nam91.sh | UTF-8 | 9,914 | 3.0625 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
# This file is a component of the volcanic ash transport and dispersion model Ash3d,
# written at the U.S. Geological Survey by Hans F. Schwaiger (hschwaiger@usgs.gov),
# Larry G. Mastin (lgmastin@usgs.gov), and Roger P. Denlinger (roger@usgs.gov).
# The model and its source code are products of the U.S. Federal Government and therefore
# bear no copyright. They may be copied, redistributed and freely incorporated
# into derivative products. However as a matter of scientific courtesy we ask that
# you credit the authors and cite published documentation of this model (below) when
# publishing or distributing derivative products.
# Schwaiger, H.F., Denlinger, R.P., and Mastin, L.G., 2012, Ash3d, a finite-
# volume, conservative numerical model for ash transport and tephra deposition,
# Journal of Geophysical Research, 117, B04204, doi:10.1029/2011JB008968.
# We make no guarantees, expressed or implied, as to the usefulness of the software
# and its documentation for any purpose. We assume no responsibility to provide
# technical support to users of this software.
INSTALLDIR="/opt/USGS"
yearmonthday=$1
FChour=$2
SERVER="https://nomads.ncep.noaa.gov/pub/data/nccf/com/nam/prod"
#SERVER="ftp://ftp.ncep.noaa.gov/pub/data/nccf/com/nam/prod"
echo "------------------------------------------------------------"
echo "running get_nam091.sh script for $yearmonthday ${FChour}"
echo `date`
echo "------------------------------------------------------------"
t0=`date`
HourMax=36
HourStep=1
WINDROOT="/data/WindFiles"
NAMDATAHOME="${WINDROOT}/nam/091"
#name of directory containing current files
FC_day=${yearmonthday}_${FChour}
#******************************************************************************
#START EXECUTING
# Note: Since grid 91 files are so big (~800 Mb/hourly-file), we will follow the
# instructions on
# http://nomads.ncep.noaa.gov/txt_descriptions/fast_downloading_grib.shtml
# and first download the idx file, process it, and grab only the grib
# layers needed.
# The following extra utilities are required:
# ftp://ftp.cpc.ncep.noaa.gov/wd51we/fast_downloading_grib/get_inv.pl
# ftp://ftp.cpc.ncep.noaa.gov/wd51we/fast_downloading_grib/get_grib.pl
# These files will be downloaded if they are not on the path
#go to correct directory
mkdir -p ${NAMDATAHOME}/${FC_day}
cd ${NAMDATAHOME}/${FC_day}
# Make sure we have the needed perl scripts for processing the index files
which get_inv.pl > /dev/null 2>&1
if [[ $? -ne 0 ]]; then
echo "Can not fine file get_inv.pl"
echo "Downloading a fresh copy"
wget ftp://ftp.cpc.ncep.noaa.gov/wd51we/fast_downloading_grib/get_inv.pl
chmod 775 get_inv.pl
my_get_inv=${NAMDATAHOME}/${FC_day}/get_inv.pl
else
my_get_inv=get_inv.pl
fi
which get_grib.pl > /dev/null 2>&1
if [[ $? -ne 0 ]]; then
echo "Can not fine file get_grib.pl"
echo "Downloading a fresh copy"
wget ftp://ftp.cpc.ncep.noaa.gov/wd51we/fast_downloading_grib/get_grib.pl
chmod 775 get_grib.pl
my_get_grib=${NAMDATAHOME}/${FC_day}/get_grib.pl
else
my_get_grib=get_grib.pl
fi
####################################################################
# Set up parameters describing the variables we intend to download.
####################################################################
# Variables as a function of isobaric2
# Geopotential_height_isobaric
# u-component_of_wind_isobaric
# v-component_of_wind_isobaric
# Vertical_velocity_pressure_isobaric
# Temperature_isobaric
# Relative_humidity_isobaric
# Specific_humidity_isobaric
niso2=42
iso2=("10 mb" "20 mb" "30 mb" "50 mb" "75 mb" "100 mb" "125 mb" "150 mb" "175 mb" "200 mb" "225 mb" "250 mb" "275 mb" "300 mb" "325 mb" "350 mb" "375 mb" "400 mb" "425 mb" "450 mb" "475 mb" "500 mb" "525 mb" "550 mb" "575 mb" "600 mb" "625 mb" "650 mb" "675 mb" "700 mb" "725 mb" "750 mb" "775 mb" "800 mb" "825 mb" "850 mb" "875 mb" "900 mb" "925 mb" "950 mb" "975 mb" "1000 mb")
nvar_iso2=7
var_iso2=("HGT" "UGRD" "VGRD" "VVEL" "TMP" "RH" "SPFH")
# Variables as a function of isobaric3
# Cloud_mixing_ratio_isobaric
# Snow_mixing_ratio_isobaric
niso3=40
iso3=("30 mb" "50 mb" "75 mb" "100 mb" "125 mb" "150 mb" "175 mb" "200 mb" "225 mb" "250 mb" "275 mb" "300 mb" "325 mb" "350 mb" "375 mb" "400 mb" "425 mb" "450 mb" "475 mb" "500 mb" "525 mb" "550 mb" "575 mb" "600 mb" "625 mb" "650 mb" "675 mb" "700 mb" "725 mb" "750 mb" "775 mb" "800 mb" "825 mb" "850 mb" "875 mb" "900 mb" "925 mb" "950 mb" "975 mb" "1000 mb")
nvar_iso3=2
var_iso3=("CLWMR" "SNMR")
# Variables as a function of height_above_ground7
# u-component_of_wind_height_above_ground
# v-component_of_wind_height_above_ground
nhag7=2
hag7=("10 m above ground" "80 m above ground")
nvar_hag7=2
var_hag7=("UGRD" "VGRD")
# Variables as a function of depth_below_surface_layer
# Volumetric_Soil_Moisture_Content_depth_below_surface_layer
ndbsl=4
dbsl=("0-0.1 m below ground" "0.1-0.4 m below ground" "0.4-1 m below ground" "1-2 m below ground")
nvar_dbsl=1
var_dbsl=("SOILW")
# Variables as a function of surface
# Planetary_Boundary_Layer_Height_surface
# Frictional_Velocity_surface
# Snow_depth_surface
# Surface_roughness_surface
# Wind_speed_gust_surface
# Categorical_Rain_surface
# Categorical_Snow_surface
# Categorical_Freezing_Rain_surface
# Categorical_Ice_Pellets_surface
# Precipitation_rate_surface
nsurf=1
surf=("surface")
nvar_surf=10
var_surf=("HPBL" "FRICV" "SNOD" "SFCR" "GUST" "CRAIN" "CSNOW" "CFRZR" "CICEP" "PRATE")
# Variables as a function of some special 2d variable
# Pressure_cloud_base
# Pressure_cloud_tops
# Total_cloud_cover_entire_atmosphere
nmisc2d=3
misc2d=("cloud base" "cloud top" "entire atmosphere (considered as a single layer)")
nvar_misc2d=("PRES" "PRES" "TCDC")
####################################################################
# Now start the loop over all time steps
####################################################################
t=0
while [ "$t" -le ${HourMax} ]; do
if [ "$t" -le 9 ]; then
hour="0$t"
else
hour="$t"
fi
# Set up file names and get the index file listing locations of
# the grib records.
INFILE=nam.t${FChour}z.alaskanest.hiresf${hour}.tm00.grib2
INFILEx=${INFILE}.idx
OUTFILE=nam.t${FChour}z.alaskanest.hiresf${hour}.tm00.loc.grib2
URL=${SERVER}/nam.${yearmonthday}/${INFILE}
echo "$t of ${HourMax} $URL"
# This script needs to be current (12/2018 or later) to handle https
${my_get_inv} $URL.idx > my_inv
# Get all variables that are a function of isobaric2
rm -f iso2.grib2
touch iso2.grib2
for (( iv=0;iv<$nvar_iso2;iv++))
do
for (( id=0;id<$niso2;id++))
do
echo "${t}/${HourMax} isobaric2 ${iv}/${nvar_iso2} ${var_iso2[iv]} ${iso2[id]}"
grep ":${var_iso2[iv]}:" my_inv | grep ":${iso2[id]}:" > rec.tmp
cat rec.tmp | ${my_get_grib} $URL tmp.grib2 > /dev/null 2>&1
cat iso2.grib2 tmp.grib2 >> iso2.grib2 2>/dev/null
done
done
# Get all variables that are a function of isobaric3
rm -f iso3.grib2
touch iso3.grib2
for (( iv=0;iv<$nvar_iso3;iv++))
do
for (( id=0;id<$niso3;id++))
do
echo "${t}/${HourMax} isobaric3 ${iv}/${nvar_iso3} ${var_iso3[iv]} ${iso3[id]}"
grep ":${var_iso3[iv]}:" my_inv | grep ":${iso3[id]}:" > rec.tmp
cat rec.tmp | ${my_get_grib} $URL tmp.grib2 > /dev/null 2>&1
cat iso3.grib2 tmp.grib2 >> iso3.grib2 2>/dev/null
done
done
# Get all variables that are function of height_above_ground7
rm -f hag7.grib2
touch hag7.grib2
for (( iv=0;iv<$nvar_hag7;iv++))
do
for (( id=0;id<$nhag7;id++))
do
echo "${t}/${HourMax} ght_above_ground7 ${iv}/${nvar_hag7} ${var_hag7[iv]} ${hag7[id]}"
grep ":${var_hag7[iv]}:" my_inv | grep ":${hag7[id]}:" > rec.tmp
cat rec.tmp | ${my_get_grib} $URL tmp.grib2 > /dev/null 2>&1
cat hag7.grib2 tmp.grib2 >> hag7.grib2 2>/dev/null
done
done
# Get all variables that are function of depth_below_surface_layer
rm -f dbsl.grib2
touch dbsl.grib2
for (( iv=0;iv<$nvar_dbsl;iv++))
do
for (( id=0;id<$ndbsl;id++))
do
echo "${t}/${HourMax} depth_below_surface_layer ${iv}/${nvar_dbsl} ${var_dbsl[iv]} ${dbsl[id]}"
grep ":${var_dbsl[iv]}:" my_inv | grep ":${dbsl[id]}:" > rec.tmp
cat rec.tmp | ${my_get_grib} $URL tmp.grib2 > /dev/null 2>&1
cat dbsl.grib2 tmp.grib2 >> dbsl.grib2 2>/dev/null
done
done
# Get all variables that are function of surface
rm -f surf.grib2
touch surf.grib2
for (( iv=0;iv<$nvar_surf;iv++))
do
for (( id=0;id<$nsurf;id++))
do
echo "${t}/${HourMax} surface ${iv}/${nvar_surf} ${var_surf[iv]} ${surf[id]}"
grep ":${var_surf[iv]}:" my_inv | grep ":${surf[id]}:" > rec.tmp
cat rec.tmp | ${my_get_grib} $URL tmp.grib2 > /dev/null 2>&1
cat surf.grib2 tmp.grib2 >> surf.grib2 2>/dev/null
done
done
# Get all variables that are function of some special 2d variable
rm -f misc2d.grib2
touch misc2d.grib2
for (( iv=0;iv<$nvar_misc2d;iv++))
do
echo "${t}/${HourMax} misc ${iv}/${nvar_misc2d} ${var_misc2d[iv]} ${misc2d[iv]}"
grep ":${var_misc2d[iv]}:" my_inv | grep ":${misc2d[iv]}:" > rec.tmp
cat rec.tmp | ${my_get_grib} $URL tmp.grib2 > /dev/null 2>&1
cat misc2d.grib2 tmp.grib2 >> misc2d.grib2 2>/dev/null
done
# Now bundle all these grib2 files into a grib2 file for this timestep
cat iso2.grib2 iso3.grib2 hag7.grib2 dbsl.grib2 surf.grib2 misc2d.grib2 > ${OUTFILE}
${INSTALLDIR}/bin/gen_GRIB_index ${OUTFILE}
rm -f iso2.grib2 iso3.grib2 hag7.grib2 dbsl.grib2 surf.grib2 misc2d.grib2 rec.tmp tmp.grib2
t=$(($t+${HourStep}))
done # iterate to the next forecast hour
mkdir -p $NAMDATAHOME/latest
cd $NAMDATAHOME/latest
rm -f nam.*
ln -s $NAMDATAHOME/$FC_day/nam.* .
#
#t1=`date`
#echo "download start: $t0"
#echo "download end: $t1"
| true |
327d8b0b318451ae209a82e4b789a3d65e2f0c80 | Shell | paullamar3/dotfiles-plus | /utils/my_curl | UTF-8 | 2,894 | 4.3125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
## A utility for quickly pulling down files from my repositories.
set -e
USAGE="$(basename "$0") [-h] [-x] [-f]
[-b BRANCH]
[-r REPO]
FILE
This script wraps the 'curl' command as a convenience for
retrieving files from my repositories.
-h Displays this help message.
-b BRANCH The branch in the repository from which we will pull.
Defaults to 'master'.
-r REPO The name of the repository from which we pull.
Defaults to 'dotfiles-plus'.
-f Forcefully overwrite any existing file.
-x Install into '.my_bins' as an executable
FILE The file to retrieve.
This script was writen to provide a quick and easy way to pull in files
from my 'dotfiles-plus' repository on GitHub. Thus the options for
choosing a branch or even changing the repository. Much of the time
I will be pulling down shell scripts that I want to use as commands.
Specifying the '-x' option communicates this intent. When '-x' is
specified the retrieved script will be placed in the '~/.mybins'
directory and marked as an executable. Otherwise the file will
simply be written to the current directory.
This uses the 'my_chkpth' function to ensure that '~./my_bins'
is in the \$PATH before trying to install any executable
scripts.
"
# Set up default values for all the options.
MKEXEC=""
FORCE=""
BRANCH="master"
REPO="dotfiles-plus"
FILE=""
ME="paullamar3"
HUB="https://raw.githubusercontent.com/"
DEST="$PWD"
# Parse any option
while getopts ":hfxb:r:" opt; do
case $opt in
h)
echo "$USAGE"
exit
;;
f)
FORCE="y"
;;
x)
MKEXEC="y"
;;
b)
BRANCH="$OPTARG"
;;
r)
REPO="$OPTARG"
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
if [ "$MKEXEC" = "y" ]; then
if [ -z $( declare -F | grep my_chkpth) ]; then
echo "No 'my_chkpth' function."
exit 1
fi
# This will check that there is a '.my_bins' directory
# and create it if there is not.
my_chkpth -alm ~/.my_bins
fi
# Check if the file already exists.
if [ -f "$DEST/${FILE##*/}" ]; then
echo "$DEST/${FILE##*/} already exists."
if [ $FORCE = "y" ]; then
echo "Overwriting file."
else
exit 1
fi
fi
shift $(( OPTIND - 1))
FILE=$1
if [ -z "FILE" ]; then
echo "Must specify a file to retrieve."
echo "$USAGE"
exit 1
fi
# Copy the file to the current working directory.
set -vx
curl -o "${FILE##*/}" "$HUB$ME/$REPO/$BRANCH/$FILE"
set +vx
# If we are downloading an executable move it to the
# ~/.my_bins directory and set the executable attribute.
if [ "$MKEXEC" = "y" ]; then
mv "${FILE##*/}" ~/.my_bins/
chmod +x "$HOME/.my_bins/${FILE##*/}"
fi
exit 0
| true |
4f96f8079d440f4234e8b8ae04ef22fca0685527 | Shell | rhlobo/resemblance | /scripts/update-dependency-file.sh | UTF-8 | 349 | 2.5625 | 3 | [] | no_license | #!/bin/bash
## LOADING CONFIGURATION
. "${HOME}/.resemblancerc"
## LOADING HELPER FUNCTIONS
. "${SCRIPTS_BASE_PATH}/utils/config.sh"
. "${SCRIPTS_BASE_PATH}/utils/log.sh"
log "- Updating dependency file '${CONFIG_HOST_DEPENDENCIES_FILE}' based on the current machine setup."
updateHostDependenciesDescription "${CONFIG_HOST_DEPENDENCIES_FILE}"
| true |
df20239b045b83c575ca0a4650ddd7ba94de45e8 | Shell | FrancoLiptak/Facultad_Informatica_UNLP | /cuarto_año/sistemas_operativos/practica1/scripts_4.sh | UTF-8 | 522 | 3.75 | 4 | [] | no_license | #!/bin/bash
for element in *; do
indice=1
if [ -d "$element" ]; then #Si es un directorio, accedo
cd "$element"
for file in *; do
if [ -f "$file" ] && [ ${file##*.} == "jpg" ]; then
extension=${file##*.}
actual_file_name=${file%.*}
directory_name=${element%.*}
mv "$actual_file_name.$extension" "$directory_name-$indice.$extension"
((indice++))
fi;
done;
cd ..
fi;
done;
| true |
6d9d10398b24a02ebfa4dba25edd236bd355e42b | Shell | lagopus/lagopus-next-virtio | /test/integration_test/tools/shell/start_lagopus_ansible.sh | UTF-8 | 911 | 3.8125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# ./start_lagopus_ansible.sh <USER> <HOST> <LAGOPUS PATH> [<TIMEOUT>] [<LOG_DIR>] [LOCAL_LOG_DIR] [<LAGOPUS_LOG>] [<LAGOPUS_OPTS>]
LAGOPUS=$1
USER=$2
HOST=$3
TIMEOUT=${4:-30}
LOG_DIR=${5:-"."}
LOCAL_LOG_DIR=${6:-""}
LAGOPUS_LOG=$LOG_DIR/${7:-"lagopus.log"}
LAGOPUS_OPTS=${@:8}
PID_FILE="/var/run/lagopus.pid"
WD=$(cd $(dirname $0); pwd)
usage() {
echo "Usage: ${0} <USER> <HOST> <LAGOPUS PATH> [<TIMEOUT>] [IS_REMOTE] [<LOG_DIR>] [<LAGOPUS_LOG>] [<LAGOPUS_OPTS>]" 1>&2
}
VARS=""
VARS+=" lagopus=${LAGOPUS}"
VARS+=" lagopus_log=${LAGOPUS_LOG}"
VARS+=" lagopus_opts='${LAGOPUS_OPTS}'"
VARS+=" pid_file=${PID_FILE}"
VARS+=" timeout=${TIMEOUT}"
VARS+=" action=start"
if [ x"$LOCAL_LOG_DIR" != x"" ]; then
VARS+=" lagopus_local_log=${LOCAL_LOG_DIR}"
VARS+=" is_remote=true"
fi
ansible-playbook $WD/../ansible/lagopus.yml --extra-vars "${VARS}" -u $USER -i "${HOST},"
RET=$?
exit $RET
| true |
3f6472fe91137a414889f284ba5ae4816d9b7c16 | Shell | mokagio/dotfiles | /zshrc | UTF-8 | 8,099 | 3.078125 | 3 | [] | no_license | # Executes commands at the start of an interactive session.
# ZSH Plugin manager
antigen_intel_path=/usr/local/share/antigen/antigen.zsh
antigen_apple_silicon_path=/opt/homebrew/share/antigen/antigen.zsh
if [[ -f $antigen_apple_silicon_path ]] || [[ -f $antigen_intel_path ]]; then
if [[ -f $antigen_apple_silicon_path ]]; then
source $antigen_apple_silicon_path
else
# Because of the nested if, we know that the intel path exist
source $antigen_intel_path
fi
antigen use oh-my-zsh
# When you try to use a command that is not available locally, searches the
# package manager for a package offering that command and suggests the proper
# install command.
antigen bundle command-not-found
antigen bundle colored-man-pages
# A bunch of handy aliases. See:
# https://github.com/sorin-ionescu/prezto/tree/95ff0360aeef951111c5ca6a80939e9329ddb434/modules/utility
antigen bundle utility
# Syntax highlighting (commands are one color, text in quotes is another, etc.)
antigen bundle zsh-users/zsh-syntax-highlighting
antigen bundle zsh-users/zsh-autosuggestions
# Better completions
antigen bundle zsh-users/zsh-completions
# This makes it so that tab completions are case insensitive
zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z} m:=_ m:=- m:=.'
# "open Vim and hit Crtl-Z. Now you don't need anymore hit fg, but only Crtl-Z
# again"
antigen bundle alexrochas/zsh-vim-crtl-z
zle -N fancy-ctrl-z
bindkey '^Z' fancy-ctrl-z
# You can find more modules for prezto at
# https://github.com/sorin-ionescu/prezto/tree/master/modules
antigen theme denysdovhan/spaceship-prompt
antigen apply
else
echo "❌ Cannot find Antigen ZSH plugin manager in the system"
fi
# Spaceship prompt settings
# https://github.com/denysdovhan/spaceship-prompt/blob/6319158f19a7bb83a8131da7268213cb636f9653/docs/Options.md
#
# This will split the prompt from the user input in two lines,
# which is handy when the prompt is long because of a long branch
# name and/or multiple versions being listed
SPACESHIP_PROMPT_SEPARATE_LINE=true
# I don't like how the prompt says "via 💎 v2.3.0"
SPACESHIP_RUBY_PREFIX=''
SPACESHIP_TIME_SHOW=false
SPACESHIP_VI_MODE_SHOW=false # don't need to know the Vi mode I'm in
SPACESHIP_GIT_PREFIX=''
# Custom setion based on the default Git one, but with counts
source "$DOTFILES_HOME/spaceship_verbose_git.zsh"
# I can't find a way to remove the different prompt element (functions?) by
# their name, so I have to rely on indexes. The numbers are based on what's
# documented here:
# https://github.com/denysdovhan/spaceship-prompt/blob/50e371f5b7b14922c4c2492ef9c7be1095064cb7/docs/Options.md#order
# Also note, Zsh arrays are 1-indexed
#
# First replace everything that needs replacing
SPACESHIP_PROMPT_ORDER=(${SPACESHIP_PROMPT_ORDER[@]:0:4} verbose_git ${SPACESHIP_PROMPT_ORDER[@]:5})
# Then, remove what needs removing
# time, because it's used it in the right prompt
SPACESHIP_PROMPT_ORDER=(${SPACESHIP_PROMPT_ORDER[@]:1})
# RPROMPT is empty by default
SPACESHIP_RPROMPT_ORDER+=(time)
# Use vim keybindings
bindkey -v
# Re-enable Ctrl-r to search history (vim keybindning disabled it)
bindkey '^R' history-incremental-search-backward
# When in normal mode, press v to edit the command in the $VISUAL editor
autoload -z edit-command-line
zle -N edit-command-line
bindkey -M vicmd v edit-command-line
# autojump configs
[[ -s $(brew --prefix)/etc/profile.d/autojump.sh ]] && . $(brew --prefix)/etc/profile.d/autojump.sh
# Turn off autocorrect for some commands
# See http://yountlabs.com/blog/2010/11/06/disable-autocorrect-in-zsh/
alias jake='nocorrect jake'
alias leiningen='nocorrect leiningen'
# As reccomended in the bower installer
alias bower='noglob bower'
# zsh powerups folder
path_to_zsh_powerups=~/Developer/mokagio/zsh
alias showFiles='defaults write com.apple.finder AppleShowAllFiles YES; killall Finder /System/Library/CoreServices/Finder.app'
alias hideFiles='defaults write com.apple.finder AppleShowAllFiles NO; killall Finder /System/Library/CoreServices/Finder.app'
# added by travis gem
[ -f /Users/gio/.travis/travis.sh ] && source /Users/gio/.travis/travis.sh
test -e ${HOME}/.iterm2_shell_integration.zsh && source ${HOME}/.iterm2_shell_integration.zsh
# Ruby environment management setup
eval "$(rbenv init -)"
if which swiftenv > /dev/null; then eval "$(swiftenv init -)"; fi
if gem which lunchy &> /dev/null; then
LUNCHY_DIR=$(dirname `gem which lunchy`)/../extras
if [ -f $LUNCHY_DIR/lunchy-completion.zsh ]; then
. $LUNCHY_DIR/lunchy-completion.zsh
fi
fi
export NVM_DIR="$HOME/.nvm"
# This loads nvm
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh"
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# does it work with ZSH too?
# This automactically calls nvm use when going in a folder with an .nvmrc
autoload -U add-zsh-hook
load-nvmrc() {
local node_version="$(nvm version)"
local nvmrc_path="$(nvm_find_nvmrc)"
if [ -n "$nvmrc_path" ]; then
local nvmrc_node_version=$(nvm version "$(cat "${nvmrc_path}")")
if [ "$nvmrc_node_version" = "N/A" ]; then
nvm install
elif [ "$nvmrc_node_version" != "$node_version" ]; then
nvm use
fi
elif [ "$node_version" != "$(nvm version default)" ]; then
echo "Reverting to nvm default version"
nvm use default
fi
}
add-zsh-hook chpwd load-nvmrc
load-nvmrc
# Convert an input `.md` file to HTML and paste it to the clipboard.
# I use this everytime I work on a newsletter or other text content to paste
# into an HTML editor.
md2html() {
if [[ -z "$1" ]]; then
# `printf` will not add a newline at the end of the printed output
printf "Missing path to .md file to convert to HTML"
# return something that's not 0 so the consumer knows there's been an
# error.
return 1
fi
pandoc --from gfm --to html --standalone $1 | pbcopy
}
# Generate a random number between 1 and a given threshold, included
rand() {
echo $((1 + RANDOM % $1))
}
# Fastlane autocompletion
# https://docs.fastlane.tools/faqs/#enable-tab-auto-complete-for-fastlane-lane-names
fastlane_autocompletion_source=~/.fastlane/completions/completion.sh
if [[ -f $fastlane_autocompletion_source ]]; then
source ~/.fastlane/completions/completion.sh
else
echo "❌ Could not find Fastlane autocompletion script"
fi
# Useful keybindings and fuzzy completion for fzf
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# Enable Zsh Git tab completions
# This used to be on by default in my pre-antigen prezto setup...
# See also
# https://stackoverflow.com/questions/24513873/git-tab-completion-not-working-in-zsh-on-mac/58517668#58517668
autoload -Uz compinit && compinit
# Go
export GOPATH=$HOME/.go
export PATH=$PATH:$GOPATH/bin
# AWS CLI Autocompleter
# https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-completion.html
autoload bashcompinit && bashcompinit
complete -C '/usr/local/bin/aws_completer' aws
# Xcode build autocompletion, via https://github.com/keith/zsh-xcode-completions
fpath=(/usr/local/share/zsh/site-functions $fpath)
# Paste content of file to clipboard
pastetoclipboard() {
cat $1 | pbcopy
}
alias pbc=pastetoclipboard
# Joseph is my automated virtual assitant. A collection of scripts to open
# certain recurring pages at the start and end of the day.
joseph_path=$HOME/.joseph
if [ -d $joseph_path ]; then
alias joseph=$joseph_path/joseph.rb
else
echo "\033[1;31mCan't find Joseph at $joseph_path. Please install it.\033[0m"
fi
# If there is a local zshrc, load it.
#
# Always load the local zshrc last (or last but before any setting depending on
# it).
LOCAL_ZSHRC="${HOME}/.zshrc.local"
[ -f "$LOCAL_ZSHRC" ] && source "$LOCAL_ZSHRC"
# Load the aliases after the local zshrc, just in case there are env var
# ovverrides in it.
ALIASES_PATH="$DOTFILES_HOME/aliases.sh"
if [[ -f "$ALIASES_PATH" ]]; then
source "$ALIASES_PATH"
else
echo "\033[1;31mMissing aliases file. Have a look inside the zshrc.\033[0m"
fi
| true |
162fd91d7859a794b236510d853908b2aadd8791 | Shell | ysluckly/Hello-Linux | /shell/Day3/test.sh | UTF-8 | 2,410 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
source api.sh
myAdd 1 2
echo $?
#while read line
#do
# echo "$line"
#done < file >> file.bak
#
#read line < file
#echo "$line"
#echo "hello world two" >> file
#for (( i = 0; i <= 3; i++ ))
#do
# echo "${i}hello world"
#done >> file1
#arr=(1 2.23 "hello" 'b')
#
#i=0
#while [ $i -lt ${#arr[@]} ]
#do
# echo "$i: ${arr[$i]}"
# let i++
#done
#
#for i in ${arr[@]}
#do
# echo $i
#done
#echo "0: ${arr[0]}"
#echo "1: ${arr[1]}"
#echo "2: ${arr[2]}"
#echo "3: ${arr[3]}"
#
#echo ${#arr[*]}
#echo ${#arr[@]}
#function fun()
#{
# echo "sucess"
# return 1;
#}
#
#set +x
#ret=$(fun)
#echo "$ret"
#set -x
#echo $?
#function myfun()
#{
# echo "hello"
# echo "$1"
# echo "$2"
# echo "$3"
#}
#myfun 1 2 3 4
#for i in $@
#do
# echo "$i"
#done
#while [ $# -gt 0 ]
#do
# echo "$1"
# shift
#done
#
#echo "##############shift before###############"
#echo "\$0 -> $0"
#echo "\$1 -> $1"
#echo "\$2 -> $2"
#echo "\$3 -> $3"
#echo "\$# -> $#"
#echo "\$@ -> $@"
#echo "\$$ -> $$"
#
#shift #shift 1
#echo "##############shift after###############"
#echo "\$0 -> $0"
#echo "\$1 -> $1"
#echo "\$2 -> $2"
#echo "\$3 -> $3"
#echo "\$# -> $#"
#echo "\$@ -> $@"
#echo "\$$ -> $$"
#echo "\$0 -> $0"
#echo "\$1 -> $1"
#echo "\$2 -> $2"
#echo "\$3 -> $3"
#echo "\$# -> $#"
#echo "\$@ -> $@"
#echo "\$$ -> $$"
#sum=0
#for ((i = 1; i <= 100; i += 2))
#do
# let sum+=$i
#done
#echo "$sum"
#sum=0
#for ((i = 1;i <= 100; i++))
#do
# if [ -z $str ];then
# str=$i
# else
# str=$str'+'$i
# fi
# let sum+=$i;
#done
#echo "$str""=$sum"
#
#until /bin/false
#do
# echo "hello"
#done
#while :
#do
# echo "hello"
#done
#for (( ; ; ))
#do
# echo "hello"
#done
#
#i=10
#until [ $i -le 0 ]
#do
# echo "hello $i"
# let i--
#done
#i=0
#while [ $i -le 10 ]
#do
# echo "hello $i"
# let i++
#done
#for i in {1..4} {a..f}
#do
# echo "$i"
#done
#
#for i in {1..3}{a..d}
#do
# echo "$i"
#done
#
#for i in {a..z}
#do
# echo "$i"
#done
#
#for i in {1..10}
#do
# echo "$i"
#done
#for i in {1,2,3,4}
#do
# echo $i
#done
#for ((i = 0; i <= 10; i++))
#do
# echo "hello$i"
#done
#str='hello'
#read mystr
#case $mystr in
##case $1 in
# "$str" )
# echo "start"
# ;;
# 'stop'| "-s" )
# echo "stop"
# ;;
# [Rr]estart )
# echo "restart"
# ;;
# 'down' )
# echo "down"
# ;;
# 'up' )
# echo "up"
# ;;
# * )
# echo "default"
# ;;
#esac
| true |
596ac62e36284177eb7cfc36990f951ee689230f | Shell | sarweshs/elk-with-filebeat-by-docker-compose | /scripts/get_logs.sh | UTF-8 | 800 | 3.75 | 4 | [
"MIT"
] | permissive | #!/bin/sh
account_id=$1
group_id=$2
service_id=$3
connection_id=$4
start_date=$5
end_date=$6
repo_path=$7
env=$8
# clean or create the /mylog dir
if [ -d "$repo_path/mylog" ]; then
printf "\nCleaning the mylog/ dir to make space for new log files...\n"
rm -rfv $repo_path/mylog/*
else
mkdir $repo_path/mylog
fi
printf "\nCleaning Filebeat Registry dir to make space for new log files...\n"
rm -rfv registry/*
# include the end_date in the query
end_date=$(date -j -v +1d -f "%Y-%m-%d" "$end_date" +%Y-%m-%d)
d="$start_date"
# loop over the date range
while [ "$d" != "$end_date" ]; do
# download log files for each date
./scripts/copy_files_to_machine.sh $account_id $group_id $service_id $connection_id $d $repo_path $env
d=$(date -j -v +1d -f "%Y-%m-%d" "$d" +%Y-%m-%d)
done
| true |
0e871f3a7a2c67b537b0a4611f0d0ba0e87eb192 | Shell | dividead/dotfiles | /.zshrc | UTF-8 | 3,405 | 3.015625 | 3 | [] | no_license | PROMPT='%F{red}%1~ > %F{reset}'
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
export HISTCONTROL=ignoreboth:erasedups
setopt EXTENDED_HISTORY
setopt HIST_EXPIRE_DUPS_FIRST
setopt HIST_IGNORE_DUPS
setopt HIST_IGNORE_ALL_DUPS
setopt HIST_IGNORE_SPACE
setopt HIST_FIND_NO_DUPS
setopt HIST_SAVE_NO_DUPS
setopt HIST_BEEP
export FZF_DEFAULT_COMMAND='rg --files --follow --hidden'
docker_exec() {
name="name=^detmir-"
case $1 in
api)
name+="api-\d"
;;
web)
name+="web-\d"
;;
fe)
name+="fe-\d"
;;
store)
name+="api-store-\d"
;;
*)
return 1
esac
id=$(docker ps -qf $name)
docker exec -it $id sh
}
docker_attach() {
name="name=^detmir-"
case $1 in
api)
name+="api-\d"
;;
web)
name+="web-\d"
;;
fe)
name+="fe-\d"
;;
store)
name+="api-store-\d"
;;
*)
return 1
esac
id=$(docker ps -qf $name)
docker attach $id
}
docker_rmiall(){
docker images -q | xargs docker rmi
}
docker_prune(){
docker system prune -f
}
store() {
id=$(docker ps -f "name=detmir-api-store" --format "{{.ID}}")
docker exec -it $id redis-cli
}
docker_stop() {
name="detmir-"
case $1 in
"api")
name+="api"
;;
"web")
name+="web"
;;
"fe")
name+="fe"
;;
"store")
name+="api-store"
;;
esac
echo "stopping $name"
id=$(docker ps -f "name=$name" --format "{{.ID}}" | head -n 1)
docker stop $id
}
new_branch(){
cmd="git checkout -b feature/GO-$1"
eval ${cmd}
}
alias grep='rg'
alias mpv='mpv "$(fzf)"'
alias codev='git checkout develop'
alias coma='git checkout master'
alias codot='git checkout .'
alias gc='git branch | fzf | xargs git checkout'
alias gd='git diff'
alias gl='git log'
alias gb='git checkout -'
alias gas='git add src'
alias gam='git commit --amend --no-edit'
alias ga='git add src && git commit --amend --no-edit'
alias gs='git status'
alias gp='git push'
alias gpt='git push --follow-tags'
alias gpf='git push -f'
alias gcb='git branch --show-current'
alias grb='git rebase develop'
alias run='npm run dev'
yamusic(){
# for i in 1 2 3; do yamusic $i; done
cmd="youtube-dl --cookies yandex.ru_cookies.txt -o '%(playlist_title)s/%(playlist_index)s_%(title)s.%(ext)s' https://music.yandex.ru/album/$1"
eval ${cmd}
}
vpn(){
if (/opt/cisco/anyconnect/bin/vpn state | grep -c "state: Connected"); then
/opt/cisco/anyconnect/bin/vpn disconnect
else
printf "${SECRET_VPNPASS}\ny" | /opt/cisco/anyconnect/bin/vpn -s connect $SECRET_VPNHOST
fi
}
backup(){
mkdir -p ~/code/dots/.config/mpv
mkdir -p ~/code/dots/.config/git
# mkdir -p ~/code/dots/.config/nvim
# mkdir -p ~/code/dots/.config/alacritty
cp ~/{.zshrc,.gitconfig,.tmux.conf,.vimrc} ~/code/dots/
cp ~/.config/mpv/mpv.conf ~/code/dots/.config/mpv
# cp ~/.config/alacritty/alacritty.yml ~/code/dots/.config/alacritty
cp ~/.config/git/ignore ~/code/dots/.config/git/ignore
# cp ~/.config/nvim/{init.vim,coc-settings.json} ~/code/dots/.config/nvim
cd ~/code/dots
# such security
sed -i '' '/^SECRET/d' ~/code/dots/.zshrc
git add .
git commit -m "backup dotfiles $(date)"
git push
cd -
}
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
| true |
7efd66618dd83a33ba8439811224803597832678 | Shell | mazzn/freerip | /src/header | UTF-8 | 211 | 2.78125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/usr/local/bin/bash -e
# Config
DEVICE=$(blkid -t LABEL="$(echo $1 | sed 's|iso9660/||')" /dev/cd* | cut -d ":" -f 1)
DISCNAME=UNKNOWN
OUTPUTDIR=/mnt/freerip
LOGDIR=${OUTPUTDIR}/logs
# arg parsing
CDEV="$1"
| true |
5c7016ec560658c2e4f685cb53d87810c54453e3 | Shell | mcgin/pass-gen | /pass-gen.sh | UTF-8 | 219 | 3.296875 | 3 | [] | no_license | wordcount=$(grep -c . wordlist.txt)
result=""
i=1
while [ "$i" -le "$1" ]; do
n=$(( ( RANDOM % $wordcount ) + 1 ))
result+=$(head -$n wordlist.txt | tail -1)
result+=" "
i=$(($i + 1))
done
echo $result
| true |
d0017bd766ae72b3abb757ac8b4f0c0dd74c9331 | Shell | mshitrit/assisted-service | /deploy/operator/setup_hive.sh | UTF-8 | 2,184 | 3.828125 | 4 | [
"Apache-2.0"
] | permissive | __dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source ${__dir}/utils.sh
set -o nounset
set -o pipefail
set -o errexit
set -o xtrace
DISCONNECTED="${DISCONNECTED:-false}"
HIVE_IMAGE="${HIVE_IMAGE:-registry.ci.openshift.org/openshift/hive-v4.0:hive}"
function print_help() {
ALL_FUNCS="with_olm|from_upstream|print_help"
if [ "${DISCONNECTED}" == "true" ]; then
echo "Usage: DISCONNECTED=true AUTHFILE=... LOCAL_REGISTRY=... bash ${0} (${ALL_FUNCS})"
else
echo "Usage: bash ${0} (${ALL_FUNCS})"
fi
}
if [ "${DISCONNECTED}" = "true" ] && [ -z "${AUTHFILE:-}" ]; then
echo "On disconnected mode, you must provide AUTHFILE env-var."
print_help
exit 1
fi
if [ "${DISCONNECTED}" = "true" ] && [ -z "${LOCAL_REGISTRY:-}" ]; then
echo "On disconnected mode, you must provide LOCAL_REGISTRY env-var."
print_help
exit 1
fi
function with_olm() {
if [ "${DISCONNECTED}" = "true" ]; then
echo "Not yet implemented"
return 1
fi
cat <<EOCR | oc apply -f -
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: hive-operator
namespace: openshift-operators
spec:
installPlanApproval: Automatic
name: hive-operator
source: community-operators
sourceNamespace: openshift-marketplace
EOCR
wait_for_operator "hive-operator" "openshift-operators"
wait_for_crd "clusterdeployments.hive.openshift.io"
echo "Hive installed successfully!"
}
function from_upstream() {
HIVE_DIR="${HIVE_DIR:-${HOME}/go/src/github.com/openshift/hive}"
HIVE_BRANCH="${HIVE_BRANCH:-master}"
if [ ! -d "${HIVE_DIR}" ]; then
git clone https://github.com/openshift/hive.git "${HIVE_DIR}"
fi
pushd ${HIVE_DIR}
git fetch origin "${HIVE_BRANCH}"
git reset --hard FETCH_HEAD
if [ "${DISCONNECTED}" = "true" ]; then
export IMG="${LOCAL_REGISTRY}/localimages/hive:latest"
oc image mirror \
-a ${AUTHFILE} \
${HIVE_IMAGE} \
${IMG}
else
export IMG="${HIVE_IMAGE}"
fi
make deploy
wait_for_pod "hive-operator" "hive" "control-plane=hive-operator"
wait_for_pod "hive-controllers" "hive" "control-plane=controller-manager"
popd
}
declare -F $@ || (print_help && exit 1)
"$@"
| true |
2dc8b5a62a006781cbf7da7cd7a12c2024f750d0 | Shell | openshift/release | /ci-operator/step-registry/storage/conf/wait-for-csi-driver/storage-conf-wait-for-csi-driver-commands.sh | UTF-8 | 1,063 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
set -x
# For disconnected or otherwise unreachable environments, we want to
# have steps use an HTTP(S) proxy to reach the API server. This proxy
# configuration file should export HTTP_PROXY, HTTPS_PROXY, and NO_PROXY
# environment variables, as well as their lowercase equivalents (note
# that libcurl doesn't recognize the uppercase variables).
if test -f "${SHARED_DIR}/proxy-conf.sh"
then
# shellcheck disable=SC1090
source "${SHARED_DIR}/proxy-conf.sh"
fi
echo "Waiting for the ClusterCSIDriver $CLUSTERCSIDRIVER to get created"
while true; do
oc get clustercsidriver $CLUSTERCSIDRIVER -o yaml && break
sleep 5
done
ARGS=""
for CND in $TRUECONDITIONS; do
ARGS="$ARGS --for=condition=$CND"
done
echo "Waiting for the ClusterCSIDriver $CLUSTERCSIDRIVER conditions $ARGS"
if ! oc wait --timeout=300s $ARGS clustercsidriver $CLUSTERCSIDRIVER; then
# Wait failed
echo "Wait failed. Current ClusterCISDriver:"
oc get clustercsidriver $CLUSTERCSIDRIVER -o yaml
exit 1
fi
| true |
b40efa303348fa5c5f68cb010dd60c13efb69f33 | Shell | creeperlv/LWSwnS | /LinuxInstall.sh | UTF-8 | 555 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo '#######################################'
echo '#Experimental Install Script for Linux#'
echo '#######################################'
#if [ `whoami` = "root" ];then
# echo "You are in sudo"
#
#else
# echo "Please run in sudo"
#fi
command -v dotnet >/dev/null 2>&1 || { echo >&2 "This application relies on .NET Core 3.0 or newer."; exit 1;}
mkdir temp
cd temp
git clone https://github.com/creeperlv/LWSwnS.git
cd LWSwnS/LWSwnS
dotnet build
cd ..
cd ..
cd ..
mv -f temp/LWSwnS/LWSwnS/LWSwnS/bin/Debug/netcoreapp3.0/ ./LWSwnS/
rm -rf temp | true |
9a593fd929a66201ad416287fedb950ba088f6c4 | Shell | PhotonQuantum/mongo-rust-driver | /.evergreen/run-driver-benchmarks.sh | UTF-8 | 489 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -o errexit
. ~/.cargo/env
FEATURES=""
if [ "$ASYNC_RUNTIME" = "tokio" ]; then
FEATURES="tokio-runtime"
elif [ "$ASYNC_RUNTIME" = "async-std" ]; then
FEATURES="async-std-runtime"
else
echo "invalid async runtime: ${ASYNC_RUNTIME}" >&2
exit 1
fi
cd benchmarks
cargo run \
--release \
--no-default-features \
--features ${FEATURES} \
-- --output="../benchmark-results.json" --single --multi --parallel
cat ../benchmark-results.json
| true |
daf91d5e049f364d5e7895fbfeb18cb347a66b69 | Shell | HRahman1777/bash_scripting | /LabR6/function.sh | UTF-8 | 174 | 3.03125 | 3 | [] | no_license | #!/bin/bash
funOne(){
echo "This is function one"
}
funTwo(){
echo "This is function two"
}
addFun(){
a=$1
b=$2
add=$(( a + b))
}
funOne
funTwo
addFun 4 5
echo "$add"
| true |
da3cccabe545d8624e2d925790cf2bce67e87d5d | Shell | dievri/inferno-sh-learning | /006_fn.sh | UTF-8 | 289 | 2.703125 | 3 | [] | no_license | #!/dis/sh.dis
load std
fn greet {
name = $1
if {~ $#name 0} {name = World}
echo Hello, $name!
}
greet
greet everybody
whatis greet
### Output
# ; ./006_fn.sh
# Hello, World!
# Hello, everybody!
# load std; fn greet {name=$1;if {~ $#name 0} {name=World};echo Hello, $name^!} | true |
cd79a3df5a39dc1ae848a6722d16f4aa73c13488 | Shell | voobscout/docker-cryfs-share | /entrypoint.sh | UTF-8 | 998 | 3.34375 | 3 | [] | no_license | #!/bin/bash
set -o nounset
export passwd=$(echo "${1}" | sha256sum | cut -f 1 -d '-' | awk '{gsub(/^ +| +$/,"")} {print $0}')
echo ${passwd} > /etc/default/cryfs_passwd
_setup() {
! [ -d /.exports ] && mkdir -p /.exports
! [ -d /exports ] && mkdir -p /exports
! [ -d /run/sendsigs.omit.d/rpcbind ] && mkdir -p /run/sendsigs.omit.d/rpcbind
useradd cryfs -M
echo "samba123" | tee - | smbpasswd -s -a cryfs
}
cryfs_new() {
yes y | cryfs /.exports /exports --extpass "cat /etc/default/cryfs_passwd" -- -o allow_other
}
cryfs_mount() {
cryfs /.exports /exports --extpass "cat /etc/default/cryfs_passwd" -- -o allow_other
}
_nfs() {
. /etc/default/nfs-kernel-server
. /etc/default/nfs-common
service rpcbind start
service nfs-kernel-server start
}
_cifs() {
service smbd start
}
_setup
! [ -r /.exports/cryfs.config ] && cryfs_new || cryfs_mount
_nfs
_cifs
exec inotifywait --timefmt "%d.%M.%Y %H:%M:%S" --format "[%T] - %w%f [%:e]" -rm /exports
| true |
3c8a189b23bb6fab236e1c7e45409eb92b02f0e1 | Shell | fengpiaohongjishuzhongxin/download | /.build/build-v1.sh | UTF-8 | 4,010 | 3.625 | 4 | [] | no_license | #!/bin/bash
set -e
CODENAME=v1
DISTRIBUTION=dists/${CODENAME}
# define the file filters used to determine all releases and snapshots for version 0.x and 1.x
FILE_FILTER_PREFIX="pi4j-[0.1]*\.[0-9]"
RELEASE_FILE_FILTER="${FILE_FILTER_PREFIX}.deb"
TESTING_FILE_FILTER="${FILE_FILTER_PREFIX}-SNAPSHOT.deb"
# clean and create working directories
rm -R {${DISTRIBUTION},tmp} || true
mkdir -p ${DISTRIBUTION}/{stable,testing}/binary-all
mkdir -p tmp
#----------------------------------------
# [V1] DISTRIBUTION [STABLE] COMPONENT
#----------------------------------------
# define constant for [STABLE] component
COMPONENT=stable
echo "------------------------------------"
echo "BUILDING Pi4J APT REPOSITORY FOR: "
echo " > ${DISTRIBUTION}/${COMPONENT}"
echo "------------------------------------"
echo "THE FOLLOWING FILES WILL BE INCLUDED:"
ls ${RELEASE_FILE_FILTER} || true
echo "------------------------------------"
# copy all Pi4J V1.x release|stable distribution packages (.deb) to temporary working directory
cp ${RELEASE_FILE_FILTER} tmp || true
# create 'Package' file for the [V1] distribution [STABLE] component
dpkg-scanpackages --multiversion --extra-override .build/pi4j.override tmp > ${DISTRIBUTION}/${COMPONENT}/binary-all/Packages
# remove "tmp/" root path from "Filename" in Packages file
sed -i 's/^Filename: tmp\//Filename: /g' ${DISTRIBUTION}/${COMPONENT}/binary-all/Packages
# create compressed Packages file for the [V1] distribution [STABLE] component
gzip -k -f ${DISTRIBUTION}/${COMPONENT}/binary-all/Packages
# create Release files for the [V1] distribution [STABLE] component
apt-ftparchive release ${DISTRIBUTION}/${COMPONENT}/binary-all > ${DISTRIBUTION}/${COMPONENT}/binary-all/Release
#----------------------------------------
# [V1] DISTRIBUTION [TESTING] COMPONENT
#----------------------------------------
# define constant for [TESTING] component
COMPONENT=testing
# clean temporary working directory
rm -R tmp/* || true
echo "------------------------------------"
echo "BUILDING Pi4J APT REPOSITORY FOR: "
echo " > ${DISTRIBUTION}/${COMPONENT}"
echo "------------------------------------"
echo "THE FOLLOWING FILES WILL BE INCLUDED:"
ls ${TESTING_FILE_FILTER} || true
echo "------------------------------------"
# copy all Pi4J testing|snapshot distribution packages (.deb) to temporary working directory
cp ${TESTING_FILE_FILTER} tmp || true
# create 'Package' file for the [V1] distribution [TESTING] component
dpkg-scanpackages --multiversion --extra-override .build/pi4j.override tmp > ${DISTRIBUTION}/${COMPONENT}/binary-all/Packages
# remove "tmp/" root path from "Filename" in Packages file
sed -i 's/^Filename: tmp\//Filename: /g' ${DISTRIBUTION}/${COMPONENT}/binary-all/Packages
# create compressed Packages file for the [V1] distribution [TESTING] component
gzip -k -f ${DISTRIBUTION}/${COMPONENT}/binary-all/Packages
# create Release files for the [V1] distribution [TESTING] component
apt-ftparchive release ${DISTRIBUTION}/${COMPONENT}/binary-all > ${DISTRIBUTION}/${COMPONENT}/binary-all/Release
#----------------------------------------
# CREATE AND SIGN [V1] RELEASE
#----------------------------------------
# create Release files for the [V1] distribution
apt-ftparchive \
-o APT::FTPArchive::Release::Origin="https://pi4j.com/download" \
-o APT::FTPArchive::Release::Label="The Pi4J Project" \
-o APT::FTPArchive::Release::Suite="${CODENAME}" \
-o APT::FTPArchive::Release::Codename="${CODENAME}" \
-o APT::FTPArchive::Release::Architectures="all" \
-o APT::FTPArchive::Release::Components="stable testing" \
release ${DISTRIBUTION} > ${DISTRIBUTION}/Release
# import PGP private key from file
gpg --import pi4j.key
# sign Release files for the [V1] distribution
gpg --default-key "team@pi4j.com" -abs -o - ${DISTRIBUTION}/Release > ${DISTRIBUTION}/Release.gpg
gpg --default-key "team@pi4j.com" --clearsign -o - ${DISTRIBUTION}/Release > ${DISTRIBUTION}/InRelease
# clean and remove temporary working directory
rm -R tmp
| true |
09db7c4d8bd9ecbcb66530d7f55c8461ab439a92 | Shell | fenech2000/photovideo | /rotate180.bash | UTF-8 | 415 | 3.15625 | 3 | [] | no_license | #!/bin/bash
# TODO : faire un vrai multithreading
nbthread=6
cpt=0
for f in ls /$HOME/Documents/timelapse/photos/*
do
echo $f
f2=$(echo $f | cut -d'.' -f 1)
echo $f2 &
/usr/bin/convert $f -rotate 180 -orient TopLeft "${f2}.180.JPG" && rm $f &
#rm $f
((cpt++))
if (( $cpt >= $nbthread )); then
echo "echo $cpt en cours, on attend...."
wait
cpt=0
fi
done
| true |
dcde2141a1b7709fdc3c16ece9b434eef7af6590 | Shell | TottiPuc/speech-recognition-with-PNCC | /CreatePhones/Script_CreatePhones_TIMIT.sh | UTF-8 | 7,620 | 2.90625 | 3 | [] | no_license | #!/bin/bash
################################################
#==============================================#
##### Christian Dayan Arcos Gordillo ##########
##### speech recognition #########
####### CETUC - PUC - RIO ##########
##### christian@cetuc.puc-rio.br ########
##### dayan3846@gmail.com.co ########
#==============================================#
################################################
clear
USE_CROSSWORD_TRIPHONE=0
USE_PHONE_FILES_FOR_TRANSCRIPTION=0
echo " *** Making a monophones0 nad monophones1 files (without and with short pauses)****"
DB=$1/productsDatabase/DatabaseTIMIT
OUT=$1/products/htk/phonesTIMIT
OUTList=$1/products/htk
sour=$2/CreatePhones
#****************************************************************************************#
#************ make monophones0 and monophones1 files from monphones file ***************#
cp $DB/monophones.txt $OUT/monophones0.tmp
cp $DB/monophones.txt $OUT/monophones1.tmp
cat $OUT/monophones0.tmp | tr -d "\r" > $OUT/monophones0.txt
cat $OUT/monophones1.tmp | tr -d "\r" > $OUT/monophones1.txt
rm -rf $OUT/monophones0.tmp $OUT/monophones1.tmp
echo "sil" >> $OUT/monophones0.txt
echo "sil" >> $OUT/monophones1.txt
echo "sp" >> $OUT/monophones1.txt
#****************************************************************************************#
#************ prepare special dictionary and wordnet for monophone test ***************#
touch $OUT/dictionaryForPhonesTest.txt
touch $OUT/grammarPhones.txt
$sour/createPhones.py $OUT/monophones0.txt $OUT/dictionaryForPhonesTest.txt $OUT/grammarPhones.txt
HParse $OUT/grammarPhones.txt $OUT/wordNetPhones.txt
echo ""
echo " *** Listing words of train/test sentences in MLF file ***"
echo ""
touch $OUTList/wordsInTrainSentencesTIMIT.tmp
touch $OUTList/wordsInTestSentencesTIMIT.tmp
touch $OUTList/TrainSentencesTIMIT.txt
touch $OUTList/TestSentencesTIMIT.txt
#train
echo "#!MLF!#" >> $OUTList/wordsInTrainSentencesTIMIT.tmp
find $DB/DatabaseComplet8KHz/Train/ -name "*stc.txt" | while read line
do
nam=`ls $line | cut -d '/' -f 9`
echo "\"*/$nam\"" >> $OUTList/wordsInTrainSentencesTIMIT.tmp
cat $line | tr -s " " "\012" >> $OUTList/wordsInTrainSentencesTIMIT.tmp
echo "." >> $OUTList/wordsInTrainSentencesTIMIT.tmp
sen=`cat $line`
echo "\"*/$nam\" $sen" >> $OUTList/TrainSentencesTIMIT.txt
done
cat $OUTList/wordsInTrainSentencesTIMIT.tmp | tr -d "\r" | sed '/./!d' > $OUTList/wordsInTrainSentencesTIMIT.txt #delet character ascii
#test
echo "#!MLF!#" >> $OUTList/wordsInTestSentencesTIMIT.tmp
find $DB/DatabaseComplet8KHz/Test/ -name "*stc.txt" | while read line
do
nam=`ls $line | cut -d '/' -f 9`
echo "\"*/$nam\"" >> $OUTList/wordsInTestSentencesTIMIT.tmp
cat $line | tr -s " " "\012" >> $OUTList/wordsInTestSentencesTIMIT.tmp
echo "." >> $OUTList/wordsInTestSentencesTIMIT.tmp
sen=`cat $line`
echo "\"*/$nam\" $sen" >> $OUTList/TestSentencesTIMIT.txt
done
cat $OUTList/wordsInTestSentencesTIMIT.tmp | tr -d "\r" | sed '/./!d' > $OUTList/wordsInTestSentencesTIMIT.txt #delet character ascii
echo ""
echo " *** listing phones of /train/test sentences in MFL file***"
echo ""
touch $OUT/dictionaryWithShortPause.txt
cat $DB/dictionary.txt | sed 's/$/ sp/g' >> $OUT/dictionaryWithShortPause.txt
echo "!ENTER sil" >> $OUT/dictionaryWithShortPause.txt
echo "!EXIT sil" >> $OUT/dictionaryWithShortPause.txt
echo ""
echo " *** create a master label file manually, comparing phone and word file***"
echo ""
if [ $USE_PHONE_FILES_FOR_TRANSCRIPTION -eq 1 ]
then
touch $OUT/phonesInTrainSentences0.tmp
touch $OUT/phonesInTrainSentences1.tmp
echo "#!MLF!#" >> $OUT/phonesInTrainSentences0.tmp
echo "#!MLF!#" >> $OUT/phonesInTrainSentences1.tmp
ls $DB/DatabaseComplet8KHz/Train/*.phn.txt > $OUT/list1.tmp
ls $DB/DatabaseComplet8KHz/Train/*.wrd.txt > $OUT/list2.tmp
cat $OUT/list1.tmp | while read line2
do
nom=`ls $line2 | cut -d '/' -f 9`
echo "\"*/$nom\"" >> $OUT/phonesInTrainSentences0.tmp
cat $line2 >> $OUT/phonesInTrainSentences0.tmp
echo "." >> $OUT/phonesInTrainSentences0.tmp
#echo "\"*/$nam\"" >> $OUTList/phonesInTrainSentences1
done
$sour/createMasterLabels.py $OUT/list2.tmp $OUT/list1.tmp $OUT/phonesInTrainSentences1.tmp
cat $OUT/phonesInTrainSentences1.tmp | sed 's/\/home\/christianlab\/reconhecedor_CETUC\/productsDatabase\/DatabaseTIMIT\/DatabaseComplet8KHz\/Train//g' > $OUT/phonesInTrainSentences1.txt
echo "" >> $OUT/phonesInTrainSentences1.txt
echo "" >> $OUT/phonesInTrainSentences0.tmp
mv $OUT/phonesInTrainSentences0.tmp $OUT/phonesInTrainSentences0.txt
else
touch $OUT/phonesInSentencesConfiguration0.txt
touch $OUT/phonesInSentencesConfiguration1.txt
echo "EX" >> $OUT/phonesInSentencesConfiguration0.txt
echo "IS sil sil" >> $OUT/phonesInSentencesConfiguration0.txt
echo "DE sp" >> $OUT/phonesInSentencesConfiguration0.txt
echo "" >> $OUT/phonesInSentencesConfiguration0.txt
echo "EX" >> $OUT/phonesInSentencesConfiguration1.txt
echo "IS sil sil" >> $OUT/phonesInSentencesConfiguration1.txt
echo "" >> $OUT/phonesInSentencesConfiguration1.txt
HLEd -T 0 -X phn.txt -l '*' -d $OUT/dictionaryWithShortPause.txt -i $OUT/phonesInTrainSentences0.tmp $OUT/phonesInSentencesConfiguration0.txt $OUTList/wordsInTrainSentencesTIMIT.txt
#echo "" >> $OUT/phonesInTrainSentences0.tmp
HLEd -T 0 -X phn.txt -l '*' -d $OUT/dictionaryWithShortPause.txt -i $OUT/phonesInTrainSentences1.tmp $OUT/phonesInSentencesConfiguration1.txt $OUTList/wordsInTrainSentencesTIMIT.txt
#echo "" >> $OUT/phonesInTrainSentences1.tmp
HLEd -T 0 -X phn.txt -l '*' -d $OUT/dictionaryWithShortPause.txt -i $OUT/phonesInTestSentences0.tmp $OUT/phonesInSentencesConfiguration0.txt $OUTList/wordsInTestSentencesTIMIT.txt
echo "" >> $OUT/phonesInTestSentences0.tmp
cat $OUT/phonesInTrainSentences0.tmp | sed 's/stc.phn.txt/phn.txt/g' > $OUT/phonesInTrainSentences0.txt
cat $OUT/phonesInTrainSentences1.tmp | sed 's/stc.phn.txt/phn.txt/g' > $OUT/phonesInTrainSentences1.txt
cat $OUT/phonesInTestSentences0.tmp | sed 's/stc.phn.txt/phn.txt/g' > $OUT/phonesInTestSentences0.txt
fi
#*************************************************************************************************************************************************************************************#
#************************************ generating all posible triphonescombinations (and not only the ones in sentences) **************************************************************#
echo "*** Generating all popsible triphones ***"
cp $OUT/monophones1.txt $OUT/triphonesAllCombinations.txt
$sour/generateTriphones.py $OUT/monophones1.txt $OUT/triphonesAllCombinations.txt
touch $OUT/silenceConfiguration.txt
echo "AT 2 4 0.2 {sil.transP}" >> $OUT/silenceConfiguration.txt
echo "AT 4 2 0.2 {sil.transP}" >> $OUT/silenceConfiguration.txt
echo "AT 1 3 0.3 {sp.transP}" >> $OUT/silenceConfiguration.txt
echo "TI silst {sil.state[3],sp.state[2]}" >> $OUT/silenceConfiguration.txt
touch $OUT/modelCloneForTriphoneConfiguration.txt
echo "CL $OUT/triphones1.txt" >> $OUT/modelCloneForTriphoneConfiguration.txt
$sour/confSilence.py $OUT/monophones1.txt $OUT/modelCloneForTriphoneConfiguration.txt
touch $OUT/mergeSpSilConfiguration.txt
echo "ME sil sp sil" >> $OUT/mergeSpSilConfiguration.txt
echo "ME sil sil sil" >> $OUT/mergeSpSilConfiguration.txt
echo "ME sp sil sil" >> $OUT/mergeSpSilConfiguration.txt
touch $OUT/triphoneConfiguration.txt
echo "WB sil" >> $OUT/triphoneConfiguration.txt
echo "WB sp" >> $OUT/triphoneConfiguration.txt
if [ $USE_CROSSWORD_TRIPHONE -eq 1 ]
then
echo "NB sp" >> $OUT/triphoneConfiguration.txt
fi
echo "TC" >> $OUT/triphoneConfiguration.txt
rm -f $OUT/*.tmp $OUTList/*.tmp
| true |
0490ff18b347a5fe9edafc3f6ca03941161ca99d | Shell | ximenpo/simple-cpp | /tools/run_test_cases.sh | UTF-8 | 588 | 3.125 | 3 | [
"MIT"
] | permissive | # ! /bin/bash
test_files=test_*
if [ x$1 != x ]; then
test_files=$1
fi
if [ -f "_test.exe" ]; then
rm _test.exe
fi
if [ -f "*.module" ]; then
rm *.module
fi
gcc tests/dummy_module.cpp -shared -o dummy.module -D NDEBUG -lstdc++
gcc inc/simple/*.cpp inc/simple/*.c tests/main.cpp tests/$test_files.cpp -o _test.exe -D NDEBUG -I inc -I tests -lstdc++
if [ -f "_test.exe" ]; then
echo ==========================================
echo run tests now ...
./_test.exe --version
echo ==========================================
./_test.exe --output=color
fi
| true |
4ab60afa4b34759df92fb1577de18c115305b14e | Shell | srafi1/dotfiles | /scripts/gurl | UTF-8 | 443 | 3.671875 | 4 | [] | no_license | #! /bin/bash
# helper script to get a github url
if [ $# -eq 0 ]
then
echo "Usage: gurl [-g] [<user:sraf1>] <repo>"
elif [ $# -eq 1 ]
then
echo https://github.com/srafi1/$1
elif [ $# -eq 2 ]
then
case $1 in
-g)
echo git@github.com:srafi1/$2
exit
;;
*)
echo https://github.com/$1/$2
exit
;;
esac
elif [ $# -eq 3 ]
then
echo git@github.com:$2/$3
fi
| true |
b940ebc551aadbd1d1f029158e2fe43431cc765d | Shell | londonc/LMC-BSC | /mount_monitor.sh | UTF-8 | 499 | 3.671875 | 4 | [] | no_license | #!/bin/bash
# LMC
# Array of mount points to check
mounts=( "/mnt/drive2" "/mnt/drive3" )
AlertTo=you@domain.com
for i in "${mounts[@]}"
do
if mountpoint -q $i; then
echo "$i mounted."
else
echo "$i not mounted!" | mailx -s '$i NOT MOUNTED!!!' $AlertTo
fi
done
# Check if utiliization is greater than 90%
SpaceCheck=$(df | awk '0+$5 >= 90 {print}')
if [ -z "$SpaceCheck" ] ; then
echo "Utilization okay. "
else
echo $SpaceCheck | mailx -s 'SPACE ISSUE!!!' $AlertTo
fi
exit
| true |
ea9b03f23adcbc31f34d4abe6dadeb5579f40db8 | Shell | nguyentrieulang/K25_CSDL_Cassandra | /import_data_to_mysql.sh | UTF-8 | 1,352 | 3.1875 | 3 | [] | no_license | #!/bin/bash
TEN_BANG=vung_mien
USER=root
PASSWD=123456a@Abc
DB_NAME=che_do_dinh_duong
DATA_PATH=/opt/data
insert_table () {
TEN_BANG=$1
DATE=`date`
echo "${DATE}: Start insert $TEN_BANG table "
mysql -u ${USER} -p${PASSWD} -e "truncate ${DB_NAME}.${TEN_BANG};"
mysql -u ${USER} -p${PASSWD} -e "LOAD DATA INFILE '${DATA_PATH}/${TEN_BANG}' INTO TABLE ${DB_NAME}.${TEN_BANG} FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' IGNORE 1 ROWS;"
DATE=`date`
echo "${DATE}: Finish insert $TEN_BANG success !!!"
}
insert_dinhduong_table () {
TEN_FILE=$2
TEN_BANG=$1
echo "1: $1, 2: $2"
DATE=`date`
echo "${DATE}: Start insert $TEN_FILE $TEN_BANG table "
#mysql -u ${USER} -p${PASSWD} -e "truncate {DB_NAME}.${TEN_BANG};"
mysql -u ${USER} -p${PASSWD} -e "LOAD DATA INFILE '${DATA_PATH}/${TEN_FILE}' INTO TABLE ${DB_NAME}.${TEN_BANG} FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n' IGNORE 1 ROWS;"
DATE=`date`
echo "${DATE}: Finish insert $TEN_FILE success !!!"
}
#make
#./main
insert_table vung_mien
insert_table loai_mon_an
insert_table mon_an
insert_table bua_an
insert_table nhan_khau
mysql -u ${USER} -p${PASSWD} -e "truncate ${DB_NAME}.bang_dinh_duong;"
CUR_DIR=`pwd`
cd $DATA_PATH
LIST_FILE=`ls -rt bang_dinh_duong*`
for x in ${LIST_FILE} ; do
insert_dinhduong_table bang_dinh_duong $x
done
echo "Insert data FINISH!!!!!!!!!!!!!!!!!!!!!!!"
| true |
ec19e3c57b9717ead4fc1e7c654f6a46cb039052 | Shell | JayWheeler/lms-bash | /LMS/src/LLRBTree/testLLRBNode.bash | UTF-8 | 6,428 | 2.96875 | 3 | [] | no_license | #!/bin/bash
# *******************************************************
# *******************************************************
#
# testLLRBNode.bash
#
# Copyright (C) 2016. EarthWalk Software
#
# By Jay Wheeler.
#
# Version 1.0 - 02-28-2016.
#
# *******************************************************
# *******************************************************
# *******************************************************
# *******************************************************
#
# External Scripts
#
# *******************************************************
# *******************************************************
. externalScriptList.bash
# *******************************************************
# *******************************************************
#
# Application Script below here
#
# *******************************************************
# *******************************************************
# *******************************************************
# *******************************************************
#
# Start main program below here
#
# *******************************************************
# *******************************************************
lmscli_optDebug=0 # (d) Debug output if not 0
lmscli_optSilent=0 # (q) Quiet setting: non-zero for absolutely NO output
lmscli_optBatch=0 # (b) Batch mode - missing parameters fail
silentOverride=0 # set to 1 to lmscli_optOverride the lmscli_optSilent flag
applicationVersion="1.0" # Application version
testErrors=0
# *******************************************************
# *******************************************************
lmsErrorInitialize
lmsErrorQInit
if [ $? -ne 0 ]
then
lmsConioDisplay "Unable to initialize error queue."
exit 1
fi
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
lmsScriptDisplayName
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
nodeData="the maid"
nodeName="Bridget"
nodeUID=""
lmsConioDisplay "Creating node: ${nodeName}"
lmsLLRBnCreate "${nodeName}" nodeUID "${nodeData}"
lmsConioDisplay "Created node: ${nodeName} = $nodeUID"
lmsConioDisplay ""
# **********************************************************************
lmsConioDisplay "Getting 'data' element from node: ${nodeName}"
lmsConioDisplay ""
nodeData=$( lmsLLRBnGet "$nodeName" "data" )
if [ $? -eq 1 ]
then
lmsConioDisplay "Unable to get the requested node: ${nodeName}"
else
lmsConioDisplay "NodeData: $nodeData"
fi
lmsConioDisplay "$( lmsLLRBnTS $nodeName )"
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
# **********************************************************************
nodeData="No longer the maid"
lmsLLRBnSet "${nodeName}" "data" "${nodeData}"
if [ $? -ne 0 ]
then
lmsConioDisplay "Unable to set the requested node: ${nodeName}"
fi
nodeData=$( lmsLLRBnGet "${nodeName}" "data" )
if [ $? -eq 1 ]
then
lmsConioDisplay "Unable to get the requested node: ${nodeName}"
else
lmsConioDisplay "NodeData: $nodeData"
fi
lmsConioDisplay "$( lmsLLRBnTS $nodeName )"
# **********************************************************************
rightnodeData="Bridgets brother"
rightnodeName="Zandar"
rightnodeUID=""
lmsConioDisplay "Creating node: ${rightnodeName}"
lmsLLRBnCreate "${rightnodeName}" rightnodeUID "${rightnodeData}"
lmsLLRBnSet $nodeName "right" $rightnodeName
lmsConioDisplay "$( lmsLLRBnTS $nodeName )"
lmsConioDisplay "$( lmsLLRBnTS $rightnodeName )"
# **********************************************************************
lmsConioDisplay "Copying node: $nodeName to ${rightnodeName}"
lmsLLRBnCopy "$rightnodeName" "$nodeName"
lmsConioDisplay "$( lmsLLRBnTS $nodeName )"
lmsConioDisplay "$( lmsLLRBnTS $rightnodeName )"
# **********************************************************************
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
llfield="key"
llkey=""
lmsLLRBn_Field "$rightnodeName" $llfield llkey
lmsConioDisplay "Changing '$llfield' in '$rightnodeName' to " -n
llkey=""
llkeyNew="Mark"
lmsConioDisplay "'$llkeyNew'"
lmsLLRBn_Field "$rightnodeName" $llfield llkey "$llkeyNew"
lmsConioDisplay "Key: $llkey"
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
lmsConioDisplay "$( lmsLLRBnTS $rightnodeName )"
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
lmsConioDisplay "Changing '$llfield' in '$rightnodeName' to " -n
llkey=""
llkeyNew="Zandar"
lmsConioDisplay "'$llkeyNew'"
lmsLLRBn_Field "$rightnodeName" $llfield llkey "$llkeyNew"
lmsConioDisplay "Key: $llkey"
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
lmsConioDisplay "$( lmsLLRBnTS $rightnodeName )"
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
# **********************************************************************
# **********************************************************************
# **********************************************************************
# **********************************************************************
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
llfield="left"
llkey=""
lmsLLRBn_Field "$rightnodeName" $llfield llkey
lmsConioDisplay "Changing '$llfield' in '$rightnodeName' to " -n
llkey=""
llkeyNew="Zandar"
lmsConioDisplay "'$llkeyNew'"
lmsLLRBn_Field "$rightnodeName" $llfield llkey "$llkeyNew"
lmsConioDisplay "Key: $llkey"
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
lmsConioDisplay "$( lmsLLRBnTS $rightnodeName )"
lmsConioDisplay ""
lmsConioDisplay "*******************************************************"
lmsConioDisplay ""
# **********************************************************************
lmsConioDisplay "Deleting llrbNode = ${rightnodeName}"
lmsLLRBnDelete "${rightnodeName}"
lmsConioDisplay "Deleting llrbNode = ${nodeName}"
lmsLLRBnDelete "${nodeName}"
#dumpNameTable
# **********************************************************************
errorQueueDisplay 1 0 None
| true |
61c27060ae6ef0acc0998c618c5bdecc17be4236 | Shell | kmjohny/dotfiles | /eclim/install.sh | UTF-8 | 707 | 3.140625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Check for eclim
ECLIM_DOWNLOAD_URL=https://github.com/ervandew/eclim/releases/download/2.8.0/eclim_2.8.0.bin
ECLIM_DOWNLOAD_FILE=/tmp/eclim.bin
ECLIPSE_HOME=/Applications/eclipse/java-photon/Eclipse.app
if [ -d $ECLIPSE_HOME ]; then
echo " Installing eclim for you."
curl -Lo $ECLIM_DOWNLOAD_FILE $ECLIM_DOWNLOAD_URL
if [ -f $ECLIM_DOWNLOAD_FILE ]; then
chmod +x $ECLIM_DOWNLOAD_FILE
$ECLIM_DOWNLOAD_FILE \
--yes \
--eclipse=$ECLIPSE_HOME \
--vimfiles=$HOME/.vim \
--plugins=jdt,pydev
rm $ECLIM_DOWNLOAD_FILE
if [ ! -f /usr/local/bin/eclimd ]; then
ln -s $ECLIPSE_HOME/Contents/Eclipse/eclimd /usr/local/bin/eclimd
fi
fi
fi
exit 0
| true |
7fdf60f11c25477fb962db65e4d91345e728ffe2 | Shell | BIMSBbioinfo/intro2UnixandSGE | /build.sh | UTF-8 | 1,242 | 3.328125 | 3 | [] | no_license | set -x -e
mkdir -p tmp
# Build navigation
pandoc -f markdown -t html SUMMARY.md | \
sed -e 's|\.md|.html|' -e 's|href="|href="/intro2UnixandSGE/|' > book/SUMMARY.html
echo '[' > tmp/documents.json
for file in $(find pages -type f -printf '%P\n'); do
echo $file
title=$(egrep --max-count=1 '^# ' pages/$file | tail -c +3)
# Build JSON documents for indexing
tr \" \' < pages/$file | tr -d "[:punct:]" | \
pandoc -f gfm \
-t plain \
-V url="/intro2UnixandSGE/$(dirname $file)/$(basename $file .md).html" \
-V pagetitle="$title" \
--template template.json - | tr "\n" " " >> tmp/documents.json
# Build web pages
mkdir -p book/$(dirname $file)
# TODO: level should be taken from SUMMARY
pandoc -f markdown_github+smart \
-t html \
-V level="1.1" \
-V navigation="$(cat book/SUMMARY.html)" \
-V pagetitle="$title" \
-o "book/$(dirname $file)/$(basename $file .md).html" \
--template template.html pages/$file
done
echo '{}]' >> tmp/documents.json
cp book/README.html book/index.html
node bake-index.js < tmp/documents.json > book/search_index.json
rm tmp/documents.json
| true |
a701841cdcb2a8dd1184a2b62a556874c5389804 | Shell | Esli92/legendary-octo-spork | /mkDailyFiles.sh | UTF-8 | 2,707 | 3.8125 | 4 | [] | no_license | #!/bin/bash
#===========================================================================================
#Program Name: mkDailyFiles.sh
#Description: Wrapper to use with getWindHeightLevels.py and process all input files.
#Language: bash
#Programmer Oscar Jurado (ojurado@ciencias.unam.mx)
#Creation date: June-2017
#------------------Use------------------------------------------------------------------
#chmod +x mkDailyFiles.sh
#./mkDailyFiles.sh
#------------------Requisites------------------------------------------------------------
#WRF output data in ../../../salidas/
#pluma data in ../../../pluma
#getWindHeightLevels.py in same folder with requisites fulfilled.
#-----------------Version---------------------------------------------------------------
#v1.0 June/17 Program is created
#----------------Known issues-----------------------------------------------------------
#----------------Dependencies and Libraries----------------------------------------------
#---------------Import-------------------------------------------------------------------
#-----------------Local directories-----------------------------------------------------
WRF_dir='../../../salidas/'
FILES='../../../pluma'
if [ ! -d "latlonpairs/" ]
then
mkdir latlonpairs
else
rm -rf latlonpairs
mkdir latlonpairs
fi
for FILE in `ls $FILES`
do
#We want the first and last hour record to get starting and ending hour.
#First record (no header)
awk -F'\t' '{if (NR == 2) {print $2}}' $FILES/$FILE > firstTime.txt
#Last record
awk -F'\t' END'{print $2}' $FILES/$FILE > lastTime.txt
#Now manipulate the strings to get only the hours
FIRST=`head firstTime.txt`
LAST=`head lastTime.txt`
HOUR_INIT=${FIRST:0:2}
HOUR_FIN=${LAST:0:2}
#Add one hour, since we need the whole thing
HOUR_FIN=$(($HOUR_FIN+1))
#Since day should not change, get any of the lines that's not a header. Say 3.
awk -F'\t' '{if (NR == 3) {print $1}}' $FILES/$FILE > date.txt
#Get the day and month from the date string
DATE=`head date.txt`
DAY=${DATE:8}
MONTH=${DATE:5:2}
#Now comes the interesting part, using sed
sed 's:'MONTH':'${MONTH}':g' getWindHeightLevels.py.template > mkDailyFile.py
sed 's:'DAY':'${DAY}':g' mkDailyFile.py > mkDailyFile2.py
sed 's:'PLUMTEXT':'${FILE}':g' mkDailyFile2.py > mkDailyFile.py
sed 's:'HOUR_INIT':'${HOUR_INIT}':g' mkDailyFile.py > mkDailyFile2.py
sed 's:'HOUR_FIN':'${HOUR_FIN}':g' mkDailyFile2.py > mkDailyFile.py
mkdir latlonpairs/${FILE}
python mkDailyFile.py
cat latlonpairs/???.csv > latlonpairs/${FILE}/transecto_d${DAY}_m${MONTH}.txt
rm latlonpairs/???.csv
done
| true |
c02f4a17a4e752529edc574d667addfe7b3d8537 | Shell | billorbach/test | /web-create-master01.sh | UTF-8 | 557 | 2.796875 | 3 | [] | no_license | #!/bin/sh
#saved in vmconnect /home/jeos/ , but run in esxi server
#server="192.168.3.5"
server="`sh getip.sh esxi`"
user="root"
n=0
ShownName=$1 #"master0.00$n"
#power off master
id=$(ssh $user@$server vim-cmd vmsvc/getallvms|grep $ShownName|awk '{print $1}')
powerstatus=$(ssh $user@$server vim-cmd vmsvc/get.summary $id|grep 'powerState'|awk '{print $3}'|sed 's/[^"]*"\([^"]*\).*/\1/')
if test $powerstatus = "poweredOn"
then
ssh $user@$server vim-cmd vmsvc/power.shutdown $id
fi
#unregister vm
#ssh $user@$server vim-cmd vmsvc/unregister $id
| true |
9c165d7dad810808b8c2e4874e0f7b378aaa4be3 | Shell | leiyongsgithub/sc_frame | /frame-parent/park-registercenter.sh | UTF-8 | 2,168 | 3.640625 | 4 | [] | no_license | #!/bin/sh
APP_MAINCLASS=cn.smart.park.RegisterCenterServer
APP_HOME=/opt/park-parent
PROJECT_NAME=park-registercenter
CLASSPATH=$APP_HOME/$PROJECT_NAME/classes
for i in $APP_HOME/$PROJECT_NAME/lib/*.jar; do
CLASSPATH="$CLASSPATH":"$i"
done
JAVA_OPTS="-server -ms128m -mx128m -Xmn64m -Djava.awt.headless=true"
psid=0
started=0
checkpid() {
javaps=`$JAVA_HOME/bin/jps -l | grep $APP_MAINCLASS`
if [ -n "$javaps" ]; then
psid=`echo $javaps | awk '{print $1}'`
else
psid=0
fi
}
checkport(){
configcenterport=`netstat -ln|grep 8000`
if [ -n "$configcenterport" ]; then
started=1
sleep 10
else
started=0
sleep 1
fi
}
start() {
checkpid
if [ $psid -ne 0 ]; then
echo "================================"
echo "warn: $APP_MAINCLASS already started! (pid=$psid)"
echo "================================"
else
echo "Starting $APP_MAINCLASS ..."
nohup java $JAVA_OPTS -classpath $CLASSPATH $APP_MAINCLASS >/dev/null 2>&1 &
checkpid
if [ $PROJECT_NAME = "park-configcenter" ]; then
while [ $started -eq 0 ]; do
checkport
echo "Waitting for server starting complete!"
done
fi
if [ $psid -ne 0 ]; then
echo "(pid=$psid) [OK]"
else
echo "[Failed]"
fi
fi
}
stop() {
checkpid
if [ $psid -ne 0 ]; then
echo -n "Stopping $APP_MAINCLASS ...(pid=$psid) "
kill -9 $psid
if [ $? -eq 0 ]; then
echo "[OK]"
else
echo "[Failed]"
fi
checkpid
if [ $psid -ne 0 ]; then
stop
fi
else
echo "================================"
echo "warn: $APP_MAINCLASS is not running"
echo "================================"
fi
}
status() {
checkpid
if [ $psid -ne 0 ]; then
echo "$APP_MAINCLASS is running! (pid=$psid)"
else
echo "$APP_MAINCLASS is not running"
fi
}
case "$1" in
'start')
start
;;
'stop')
stop
;;
'restart')
stop
start
;;
'status')
status
;;
*)
echo "Usage: $0 {start|stop|restart|status}"
exit 1
esac
exit 0
| true |
ffcf398b694c644e34a7827a37ff0d44872ffe86 | Shell | MaienM/dotfiles | /config/polybar/scripts/arch-updates.sh | UTF-8 | 265 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env sh
if command -v checkupdates > /dev/null 2>&1; then
echo
exit 0
fi
prefix="${prefix:-}"
arch=$(checkupdates | wc -l)
aur=$(pikaur -Qua 2> /dev/null | wc -l)
if [ "$arch" -gt 0 ] || [ "$aur" -gt 0 ]; then
echo "$prefix$arch/$aur"
else
echo
fi
| true |
ce279cc271ad61770a63798c8244a536b017b12f | Shell | digideskio/frontend-1 | /.buildkite/webpack.sh | UTF-8 | 700 | 3.4375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/bin/bash
set -e
# Add the SHA1 sum of the webpack file to the host path
WEBPACK_CONFIG_SHA1=$(openssl sha1 webpack/config.js | sed 's/^.* //')
FRONTEND_HOST="$FRONTEND_HOST$WEBPACK_CONFIG_SHA1/"
echo "--- :information_desk_person: Appending SHA1 of webpack/config.js to \$FRONTEND_HOST"
echo "\$FRONTEND_HOST is now $FRONTEND_HOST"
echo "--- :wastebasket: Cleaning up.."
rm -rf dist; rm -rf node_modules;
echo "--- :npm: Installing npm packages"
npm-cache install npm
echo "--- :webpack: Building webpack assets"
./node_modules/.bin/webpack -p --config webpack/config.js --progress --bail
echo "--- :javascript: Checking valid JS"
node --check dist/* && echo "👍 Javascript looks valid!"
| true |
3e99ec26f9d87489b42531b3ef871b0f4450f0f4 | Shell | Odra99/REDES_2 | /script | UTF-8 | 1,326 | 2.875 | 3 | [] | no_license | #!/bin/bash
NET1=192.168.2.0/24
IF1=enp0s3
IP1=192.168.2.2
T1=T1
GW1=192.168.2.1
NET2=192.168.3.0/24
IF2=enp0s8
IP2=192.168.3.2
T2=T2
GW2=192.168.3.1
filename='pesos.txt'
while read line; do
eval export "$line"
done < $filename
PROB1=$(echo "scale=2; $ISP1 / ($ISP1 + $ISP2)" | bc)
PROB2=$(echo "scale=2; 1 - $PROB1" | bc)
ip route del default
ip rule add fwmark 3 table $T1 prio 33000
ip rule add fwmark 4 table $T2 prio 33000
ip route del $NET1 dev $IF1 src $IP1 table $T1
ip route del default via $GW1 table $T1
ip route del $NET2 dev $IF2 src $IP2 table $T2
ip route del default via $GW2 table $T2
ip route add $NET1 dev $IF1 src $IP1 table $T1
ip route add default via $GW1 table $T1
ip route add $NET2 dev $IF2 src $IP2 table $T2
ip route add default via $GW2 table $T2
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
iptables -t nat -F
iptables -t mangle -F
iptables -t mangle -A PREROUTING -j CONNMARK --restore-mark
iptables -t mangle -A PREROUTING -m mark ! --mark 0 -j ACCEPT
iptables -t mangle -A PREROUTING -j MARK --set-mark 3
iptables -t mangle -A PREROUTING -m statistic --mode random --probability $PROB2 -j MARK --set-mark 4
iptables -t mangle -A PREROUTING -j CONNMARK --save-mark
echo "1" > /proc/sys/net/ipv4/ip_forward
iptables -t nat -A POSTROUTING -j MASQUERADE
| true |
30b4f419796a4e05549b478e0e361554e6d3c5c2 | Shell | lemonbeat/service_client_java | /init-rabbitmq.sh | UTF-8 | 745 | 3.265625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
set -e
RABBITMQ_USER="user"
RABBITMQ_PASS="password"
RABBITMQ_VHOST="vhost"
DEFAULT_EXCHANGES=( "PARTNER" "DMZ" "EVENT.APP" )
echo "Starting RabbitMQ server as docker container..."
docker run -d \
--name rabbit \
-p 5672:5672 \
-p 15672:15672 \
-e RABBITMQ_DEFAULT_USER=${RABBITMQ_USER} \
-e RABBITMQ_DEFAULT_PASS=${RABBITMQ_PASS} \
-e RABBITMQ_DEFAULT_VHOST=${RABBITMQ_VHOST} \
rabbitmq:3.8.9-management
echo "Waiting for RabbitMQ server..."
sleep 10
for exchange in "${DEFAULT_EXCHANGES[@]}"
do
echo "Declaring exchange ${exchange}..."
docker exec -it rabbit \
rabbitmqadmin declare exchange \
-u ${RABBITMQ_USER} \
-p ${RABBITMQ_PASS} \
--vhost=${RABBITMQ_VHOST} \
name=${exchange} \
type=topic
done | true |
32b115539069aaa9feb3296d138a887a32f69a52 | Shell | alurpawan/organizer | /reorder.sh~ | UTF-8 | 409 | 3.609375 | 4 | [] | no_license | #!/bin/bash
#A script the reorders the list of files in a folder
#input
echo "Enter the location of the folder to reoder. Type back to undo : "
read $location
#Get list of names, and store it in case of error
cd $location
ls | grep -v "orig_name.txt" >> orig_name.txt
ep_no=1
cat orig_name.txt | while $LINE
do
echo "Episode "$ep_no" : "$LINE
mv $LINE "Episode$ep_no"
ep_no=$((ep_no+1))
done
| true |
30b41c45d9fb87c92a46af117d932710acd9cfd7 | Shell | Axway-API-Management-Plus/apimanager-report-tool | /src/main/assembly/scripts/run-metadata-export.sh | UTF-8 | 382 | 3.484375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
if [ -z "$JAVA_HOME" ]
then
echo Environment variable JAVA_HOME not set!
exit 1
fi
#programDir="${0%/*}"
programDir="$( cd "$(dirname "$0")" ; pwd -P )"
cd $programDir/..
CP=lib:conf
for jars in lib/*
do
CP=$CP:$jars
done
"$JAVA_HOME/bin/java" -Xms64m -Xmx256m -classpath "$CP" com.axway.apim.report.APIManagerMetadataExport $*
rc=$?
exit $rc
| true |
eb6016f2f5305fc0ca9869701e0f90f0360b9cdc | Shell | Bologna78/Android-Build-Bot-Script | /buildbot.sh | UTF-8 | 2,249 | 3.796875 | 4 | [] | no_license | #!/bin/bash
# Shane Faulkner
# http://shanefaulkner.com
# You are free to modify and distribute this code,
# so long as you keep my name and URL in it.
# Thanks Andy and David =D
#---------------------Build Settings------------------#
# your build source code directory path
SAUCE=/your/source/directory
# cloud storage directory (can be non-cloud storage folder)
CLOUD=/cloud/storage/directory
# number for the -j parameter
J=9
# leave alone
DATE=`eval date +%m`-`eval date +%d`
# here goes the roms you would like to build
PRODUCT[0]="toro" # phone model name (product folder name)
LUNCHCMD[0]="bamf_nexus-userdebug" # lunch command used for ROM
BUILDNME[0]="bamf_nexus" # name of the output ROM in the out folder, before "-ota-"
OUTPUTNME[0]="bamf_nexus-toro" # what you want the new name to be
PRODUCT[1]="maguro"
LUNCHCMD[1]="bamf_maguronexus-userdebug"
BUILDNME[1]="bamf_maguronexus"
OUTPUTNME[1]="bamf_nexus-maguro"
PRODUCT[2]="torospr"
LUNCHCMD[2]="bamf_nexus_spr-userdebug"
BUILDNME[2]="bamf_nexus_spr"
OUTPUTNME[2]="bamf_nexus-torospr"
#----------------------FTP Settings--------------------#
# set "FTP=y" if you want to enable FTP uploading
FTP=n
# FTP server settings
FTPHOST[0]="host" # ftp hostname
FTPUSER[0]="user" # ftp username
FTPPASS[0]="password" # ftp password
FTPDIR[0]="directory" # ftp upload directory
FTPHOST[1]="host"
FTPUSER[1]="user"
FTPPASS[1]="password"
FTPDIR[1]="directory"
#---------------------Build Bot Code-------------------#
cd $SAUCE
repo sync
make clean
for VAL in "${!PRODUCT[@]}"
do
source build/envsetup.sh && lunch ${LUNCHCMD[$VAL]} && time make -j$J otapackage
cp $SAUCE/out/target/product/${PRODUCT[$VAL]}/${BUILDNME[$VAL]}"-ota-"$DATE".zip" $CLOUD/${OUTPUTNME[$VAL]}"-"$DATE".zip"
done
#----------------------FTP Upload Code--------------------#
if [ $FTP = "y" ]; then
echo "Initiating FTP connection..."
cd $CLOUD
ATTACH=`for file in *"-"$DATE".zip"; do echo -n -e "put ${file}\n"; done`
for VAL in "${!FTPHOST[@]}"
do
echo -e "\nConnecting to ${FTPHOST[$VAL]} with user ${FTPUSER[$VAL]}..."
ftp -nv <<EOF
open ${FTPHOST[$VAL]}
user ${FTPUSER[$VAL]} ${FTPPASS[$VAL]}
cd ${FTPDIR[$VAL]}
$ATTACH
quit
EOF
done
echo -e "FTP transfer complete! \n"
fi
| true |
46a9ab78c91524232dbee88bc3933a1d43bc0480 | Shell | gianleu/starkfell.github.io | /sandbox/oozie/oozieScript.sh | UTF-8 | 239 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo ""
cat /etc/hosts
echo ""
if [ $? ]; then
echo "Contents of /etc/hosts file retrieved Successfully."
echo ""
else
echo "Failed to retrieve the contents of the /etc/hosts file."
echo ""
fi
| true |
a119003906828928f0941a8bcc90c6b37a7220de | Shell | fnune/dotfiles | /lazygit/.dependencies/includes/lazygit | UTF-8 | 394 | 3.03125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -euxo pipefail
LAZYGIT_VERSION=$(curl -s "https://api.github.com/repos/jesseduffield/lazygit/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v*([^"]+)".*/\1/')
curl -Lo lazygit.tar.gz "https://github.com/jesseduffield/lazygit/releases/latest/download/lazygit_${LAZYGIT_VERSION}_Linux_x86_64.tar.gz"
sudo tar xf lazygit.tar.gz -C /usr/local/bin lazygit
rm lazygit.tar.gz
| true |
7736d72954865566675a9960d1f976508b25eb60 | Shell | julescarbon/vframe | /docker/base/start_cpu.sh | UTF-8 | 343 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Startup script for docker image
IMAGE=vframe/base:cpu
xhost +local:docker
DOCKER_NAME="$(echo $IMAGE | sed 's/\//-/g' | sed 's/:/-/g' | sed 's/_/-/g')"
docker run \
-u $(whoami):$(whoami) \
-h $(hostname)-$DOCKER_NAME \
-it \
-v /work:/work \
-w /work \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-p 8888:8888 \
$IMAGE /bin/zsh | true |
751e699a7377a75edd9d82f4743f532b9dad5a73 | Shell | naubergois/bigdata | /BBDP-ResourceBundle/Use Cases/uc1-simple-pipeline-code.sh | UTF-8 | 2,023 | 2.890625 | 3 | [] | no_license | ##########################################################################
#
# Building Big Data Pipelines - Use Case 1 : Code
# Copyright : V2 Maestros @2016-2017
#
##########################################################################
##########################################################################
#The following are commands for Sqoop. They can either be executed on the
#shell or through a shell script
#create the destination directory once
hadoop fs -mkdir pipeline
hadoop fs -mkdir archive
#Create a Sqoop job to incrementally copy records
sqoop job --create auditTrailJob \
-- import \
--connect jdbc:mysql://localhost/pipeline \
--username root \
--password-file file:///home/cloudera/pipelines/pwdfile.txt \
--table audit_trail \
-m 1 \
--target-dir /user/cloudera/pipeline/uc1-audit-trail \
--incremental append \
--check-column id
#Run the job
sqoop job --exec auditTrailJob
hadoop fs -cat /user/cloudera/pipeline/uc1-audit-trail/*
##########################################################################
#The following are commands for Mongo DB.
use pipeline;
db.createCollection("audit_trail");
##########################################################################
#The following are commands for Pig.
auditData = LOAD '/user/cloudera/pipeline/uc1-audit-trail'
USING PigStorage(',')
as ( id:int, eventdate:chararray, user:chararray, action:chararray);
REGISTER mongo-hadoop-core-2.0.1.jar;
REGISTER mongo-hadoop-pig-2.0.1.jar;
REGISTER mongo-java-driver-3.4.0.jar;
STORE auditData INTO 'mongodb://localhost:27017/pipeline.audit_trail' USING
com.mongodb.hadoop.pig.MongoInsertStorage('');
##########################################################################
#The following are commands for shell script
#Archive processed records. Move records to archive directory
TODATE=`date +%Y%m%d`
hadoop fs -mkdir archive/$TODATE
hadoop fs -mv pipeline/uc1-audit-trail/* archive/$TODATE/uc1-audit-trail
| true |
9ed37aedb7114afa4290d0f4574abb5331aeaa1d | Shell | openshift/svt | /perfscale_regression_ci/scripts/scalability/loaded-projects.sh | UTF-8 | 1,311 | 3.09375 | 3 | [
"Apache-2.0"
] | permissive | #/!/bin/bash
################################################
## Auth=prubenda@redhat.com qili@redhat.com
## Desription: Script for creating pause deployments and adding network policies
## Polarion test case: OCP-9461
## https://polarion.engineering.redhat.com/polarion/redirect/project/OSE/workitem?id=OCP-9461
## Cluster config: 3 master (m5.2xlarge or equivalent) with 3 worker
## kube-burner config: perfscale_regerssion_ci/kubeburner-object-templates/loaded-projects-config.yml
## PARAMETERS: number of JOB_ITERATION
################################################
source ../../utils/run_workload.sh
source ../custom_workload_env.sh
source ../common.sh
source loaded_projects_env.sh
# If PARAMETERS is set from upstream ci, overwirte JOB_ITERATION
export JOB_ITERATION=${PARAMETERS:-10}
echo "job iterations $JOB_ITERATION $PARAMETERS"
echo "======Use kube-burner to load the cluster with test objects======"
run_workload
TOTAL_CLUSTERPROJECTS=$(oc get projects | grep -c ${NAMESPACE})
echo -e "\nTotal number of ${NAMESPACE} namespaces created: ${TOTAL_CLUSTERPROJECTS}"
if [[ $TOTAL_CLUSTERPROJECTS -ge $JOB_ITERATION ]]; then
echo "======PASS======"
exit 0
else
echo "======FAIL======"
echo "Please debug, when done, delete all projects using 'oc delete project -l kube-burner-job=$NAMESPACE'"
exit 1
fi
| true |
71d811bd4c0d319e3a6699d28f1caf8f27ee7740 | Shell | imicer/ocp4-bm-install | /9-registry.sh | UTF-8 | 2,082 | 3.09375 | 3 | [] | no_license | echo "STEP:Install Docker Registry"
mkdir -p ${REGISTRY_PATH}/{auth,certs,data}
openssl req -newkey rsa:4096 -nodes -sha256 -x509 -days 365 \
-keyout ${REGISTRY_PATH}/certs/registry.key -out ${REGISTRY_PATH}/certs/registry.crt \
-subj "/C=CN/ST=BEIJING/L=BJ/O=REDHAT/OU=IT/CN=registry.${DOMAIN}/emailAddress=admin@${DOMAIN}"
htpasswd -bBc ${REGISTRY_PATH}/auth/htpasswd openshift redhat
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
yum -y install docker-distribution
#
cat << EOF > /etc/docker-distribution/registry/config.yml
version: 0.1
log:
fields:
service: registry
storage:
cache:
layerinfo: inmemory
filesystem:
rootdirectory: ${REGISTRY_PATH}/data
delete:
enabled: false
auth:
htpasswd:
realm: basic-realm
path: ${REGISTRY_PATH}/auth/htpasswd
http:
addr: 0.0.0.0:5000
host: https://${REG_DOMAIN}
tls:
certificate: ${REGISTRY_PATH}/certs/registry.crt
key: ${REGISTRY_PATH}/certs/registry.key
EOF
#
systemctl enable docker-distribution --now
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
echo "STEP:Prepare Images for Installing OCP "
tar -xzf ${OCP_PATH}/ocp-client/openshift-client-linux-${OCP_VER}.tar.gz -C /usr/local/sbin/
yum -y install podman skopeo jq
\cp ${REGISTRY_PATH}/certs/registry.crt /etc/pki/ca-trust/source/anchors/ > /dev/null 2>&1
update-ca-trust
podman login -u openshift -p redhat --authfile ${REG_SECRET} ${REG_DOMAIN}
tar -xvf ${OCP_PATH}/ocp-image/ocp-image-${OCP_VER}.tar -C ${OCP_PATH}/ocp-image/
#rm -f ${OCP_PATH}/ocp-image/ocp-image-${OCP_VER}.tar
oc image mirror -a ${REG_SECRET} --dir=${OCP_PATH}/ocp-image/mirror_${OCP_VER} file://openshift/release:${OCP_VER}* ${REG_DOMAIN}/${REPO_NAME}
echo =============================================================================================
echo "===================== $(curl -u openshift:redhat -s https://${REG_DOMAIN}/v2/${REPO_NAME}/tags/list | jq -M '.["tags"][]' | wc -l) images have been imported to docker registry =====================" | true |
2c95b8e17d011208fce41ab87199212ecf184ec6 | Shell | october12/perform | /WEB-INF/clear_bench.sh | UTF-8 | 167 | 2.859375 | 3 | [] | no_license | #!/bin.bash
benchs=('astar.base' 'milc' 'sss' 'bbb' 'ddd')
param=$1;
for ((i=0;i<${#param};i++ ))
do
index=${param:$i:1}
let index--;
pidof ${benchs[$index]}
done
| true |
bd2659759b3427340551bd961e56db13b3916478 | Shell | smaccoun/servant-ts | /deploy | UTF-8 | 705 | 3.25 | 3 | [] | permissive | #!/bin/bash
set -e
COMMIT_MSG=$1
echo "About to commit with message: $COMMIT_MSG"
if [ -n $COMMIT_MSG ]; then
echo "MUST HAVE A COMMIT MSG!"
exit 1
fi
stack build --fast
echo "****************************************"
echo "TESTING TO MAKE SURE EVERYTHING IS BUENO"
echo "****************************************\n"
stack test --fast
echo "****************************************"
echo "MAKING DOCS"
echo "****************************************\n"
stack exec servant-ts-mk-docs
echo "****************************************"
echo "ADDING AND COMMITING"
echo "****************************************\n"
which git
$(git add --all)
$("git commit -m '$COMMIT_MSG'")
$(git push origin master)
| true |
a877580bdd55b8d365c80509d68b0d7c8982b2dd | Shell | hyperledger-archives/sawtooth-dev-tools | /bootstrap.d/80-blockchain-install.sh | UTF-8 | 387 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -x
[[ -e /project ]] || exit 1
. /vagrant/conf.sh
if [[ "$INSTALL_TYPE" != "setup.py" ]]; then
echo "Skipping: $0"
exit 0
fi
set -e
cd /project/sawtooth-core
python setup.py build
python setup.py install
cd /project/sawtooth-mktplace
python setup.py build
python setup.py install
cd /project/sawtooth-validator
python setup.py build
python setup.py install
| true |
cd8997e5f44056af934762001f892952d29e2d51 | Shell | gopalrajpurohit/bashutils | /bin/gitutils/my_merge_branches.sh | UTF-8 | 912 | 3.515625 | 4 | [] | no_license | #!/bin/bash
CURRENT_SCRIPT_NAME=$0
CURRENT_SCRIPT_FULL_PATH=$(dirname "$0")
source ${CURRENT_SCRIPT_FULL_PATH}/my_branches.rc
${CURRENT_SCRIPT_FULL_PATH}/my_check_branches.sh || exit
## All branches are existent
for ROOT_BRANCH in $ROOT_BRANCHES
do
# git update and merge with parent
git checkout ${ROOT_BRANCH} || exit
#git refresh || exit;
git push origin ${ROOT_BRANCH} && git pull --rebase origin ${ROOT_BRANCH} || exit;
done
for BRANCH_NAME in $BRANCH_NAMES
do
CHILD_BRANCH=`echo $BRANCH_NAME | sed 's/:.*//g'`
PARENT_BRANCH=`echo $BRANCH_NAME | sed 's/.*://g'`
echo "${CHILD_BRANCH} -> ${PARENT_BRANCH}"
BRANCH=${CHILD_BRANCH}
git checkout ${BRANCH} || exit;
git push origin ${BRANCH} && git pull --rebase origin ${BRANCH} || exit;
git merge --no-ff ${PARENT_BRANCH} || exit;
git push origin ${BRANCH} && git pull --rebase origin ${BRANCH} || exit;
done
| true |
4a7a889c4721a82bfa23f8ceef19b6be1ee44520 | Shell | agermain/rkt | /stage1/usr_from_coreos/cache.sh | UTF-8 | 3,979 | 3.8125 | 4 | [
"Apache-2.0"
] | permissive | set -e;
# maintain a cached copy of coreos pxe image
if [ -z "${IMG_URL}" -o -z "${ITMP}" -o -z "${V}" ]; then
exit 1
fi
if [ ${V} -eq 3 ]; then
set -x
fi
# flatcar gpg signing key:
# $ gpg2 --list-keys --list-options show-unusable-subkeys \
# --keyid-format SHORT F88CFEDEFF29A5B4D9523864E25D9AED0593B34A
# pub rsa4096/0593B34A 2018-02-26 [SC]
# F88CFEDEFF29A5B4D9523864E25D9AED0593B34A
# uid [ultimate] Flatcar Buildbot (Official Builds) <buildbot@flatcar-linux.org>
# sub rsa4096/064D542D 2018-02-26 [S] [revoked: 2018-03-14]
# sub rsa4096/D0FC498C 2018-03-14 [S] [revoked: 2018-09-26]
# sub rsa4096/896E394F 2018-09-26 [S] [expires: 2019-09-26]
GPG_LONG_ID="E25D9AED0593B34A"
GPG_KEY="-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFqUFawBEACdnSVBBSx3negnGv7Ppf2D6fbIQAHSzUQ+BA5zEG02BS6EKbJh
t5TzEKCRw6hpPC4vAHbiO8B36Y884sSU5Wc4WMiuJ0Z4XZiZ/DAOl5TFfWwhwU0l
SEe/3BWKRtldEs2hM/NLT7A2pLh6gx5NVJNv7PMTDXVuS8AGqIj6eT41r6cPWE67
pQhC1u91saqIOLB1PnWxw/a7go9x8sJBmEVz0/DRS3dw8qlTx/aKSooyaGzZsfAY
L1+a/xst8LG4xfyHBSAuHSqi76LXCdBogU2vgz2V46z29hYRDfQQQGb4hE7UCrLp
EBOVzdQv/vAA9B4FTB+f5a7Vi4pQnM4DBqKaf8XP4wgQWBW439yqna7rKFAW+JIr
/w8YbczTTlJ2FT8v8z5tbMOZ5a6nXAn45YXh5d80CzqEVnaG8Bbavw3WR3jD81BO
0WK+K2FcEXzOtWkkwmcj9PrOKVnBmBv5I+0xtpo9Do0vyONyXPDNH/I4b3xilupN
bWV1SXUu8jpCf/PaNrj7oKHB9Nciv+4lqu/L5YmbaSLBxAvHSsxRpKV53dFtU+sR
kQM5I774B+GnFvhd6k2uMerWFaA1aq7gv0oOm/H5ZkndR5+eS0SAx49OrMbxKkk0
OKzVVxFDJ4pJWyix3dL7CwmewzuI0ZFHCANBKbiILEzDugAD3mEUZxa8lQARAQAB
tD9GbGF0Y2FyIEJ1aWxkYm90IChPZmZpY2lhbCBCdWlsZHMpIDxidWlsZGJvdEBm
bGF0Y2FyLWxpbnV4Lm9yZz6JAk4EEwEIADgWIQT4jP7e/ymltNlSOGTiXZrtBZOz
SgUCWpQVrAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRDiXZrtBZOzSi5G
EACHLSjK24szSj4O8/N9B6TOLnNPJ17At/two/iHfTxrT8lcLM/JQd97wPqH+mVK
hrZ8tCwTZemVeFNXPVy98VYBTjAXscnVh/22DIEYs1wbjD6w8TwgUvzUzpaQJUVu
YlLG3vGAMGaK5FK41BFtsIkar6zaIVy5BPhrA6ASsL9wg9bwSrXT5eKksbaqAZEG
sMiYZxYWzxQHlPu19afxmzBJdVY9YUHEqBYboslGMlLcgErzF7CaiLjDEPkt5Cic
9J3HjIJwlKmVBT6DBdt/tuuzHQntYfPRfOaLVtF/QxRxKNyBtxYndG6k9Vq/cuIN
i5fHpyZ66+9cwswrLISQpAVWa0AW/TENuduj8IU24zCGL7RZVf0jnmALrqkmBTfY
KwtTdpaFle0dC7QP+B27vT/GhBao9KVazfLoAT82bt3hXqjDciAKAstEbqxs75f2
JhIl0HvqyJ47zY/5zphxZlZ+TfqLvJPoEujEUeuEgKm8xmSgtR/49Ysal6ELxbEg
hc6qLINFeSjyRL20aQkeXtQjmZJGuXbUsLBSbVgUOEU+4vvID7EiYyV7X36OmS5N
4SV0MD0bNF578rL4UwhH1WSDSAgkmrfAhgFNof+MlI4qbn39tPiAT9J9dpENay0r
+yd59VhILA3eafkC6m0rtpejx81sDNoSp3UkUS1Qq167ZLkCDQRalBYrARAAsHEO
v6b39tgGxFeheiTnq5j6N+/OjjJyG21x2Y/nSU5lgqPD8DtgKyFlKvP7Xu+BcaZ7
hWjL0scvq0LOyagWdzWx5nNTSLuf8e+ShlcIs3u8kFX8QMddyD5l76S7nTl9kE1S
i2WkO6B4JgzRQCAQyr2B/knfE2wrxPsJsnB1qzRIAXHKvs8ev8bR+FfFSENxI5Jg
DoU3KbcyJ5lMKdVhIhSyGSPi1/emEpbEIv1XYV9l8g4b6Ht5fVsgeYUZbOF/z5Gc
+Kwf3ikGr3KCM/fl06xS/jpqM08Z/Uyei/L8b7tv9Wjop5SXN0yPAr0KIGQdnq5z
GMPf9rkG0Xg47JSQcvDJb0o/Ybi3ND3Mj/Ci8q5UtBgs9PWVBS4JyihKYx2Lb+Wj
+LERdEuv2qRPXO045VgOT5g0Ntlc8EvmX3ulofbM2f1DnPnq3OxuYRIscR/Nv4gi
coNLexv/+mmhdxVJKCSTVPp4SoK4MdBOT0B6pzZjcQBI1ldePQmRZMQgonekUaje
wWy1hp9o+7qJ8yFkkaLTplbZjQtcwfI7cGqpogQmsIzuxCKxb1ze/jed/ApEj8RD
6+RO/qa3R4EGKlSW7FZH20oEDLyFyeOAmSbZ8cqPny6m8egP5naXwWka4aYelObn
5VY6OdX2CJQUuIq8lXue8wOAPpkPB61JnVjQqaUAEQEAAYkCNgQoAQgAIBYhBPiM
/t7/KaW02VI4ZOJdmu0Fk7NKBQJaqVa3Ah0CAAoJEOJdmu0Fk7NK8WMP/R+T//rW
QeuXMlV+l8bHKcbBGWBvvMV5XcsJKDxtzrclPJLqfuBXSDTwqlirXXqlEeI613kE
UWG0b0Ny0K87g9CnkbsJiizGtyQJp2HuMnjRivTd/1V30ACCaK01nbu1/sdOk6Y4
Cimv+mGEgzjcXVXs72p+qqhDEaMgf1GYjDrzVHUnKUNIU8QOG2HRVhpP27bOg9Ao
a9Exdo04w3dXxso3KGeVkEE8dN0rKmHQ67jcCqKogzNlsIujbJkgRbwk/e3BgDWX
ifQSMW4SAAl/PVP7z3h6QoLcYSddOMMYwqP5Oqe4obBaKgVrn705s/Z0pW5nEzFg
38hEoJe+CCXjPl0zjHKQGzhwR/MLWvMf6jO06uvASiJuU/hefVCCek9b5SLn+IPU
J+uLh57F1I7O4ohPWY9+sbrpibx2pcSmcefVMwX/iSt6RNlBITYVQLGN8+/0gcRz
3jGf7m+M8Y7KYrmFxtwPsFejygDr6VVvoUarPPnJSzP+UdPqzUCcxdnV7Ub4QMRl
wUyvnwgnpn0xOsZ/Pdh5gOC06Yrkjbr12DWIpUxy/9z/QR2TeImi02trRKpCh9xw
0bKlsWBt1oUnNnQjnMUB9tmWsF1I6DrO/FUcB+5d7iy+MnPB1LIKS8JokODWIrOq
dg763UZfGbp4EbLlO1vcwIdKC6AGoS6hoyPUiQRyBBgBCAAmFiEE+Iz+3v8ppbTZ
Ujhk4l2a7QWTs0oFAlqUFisCGwIFCQHhM4ACQAkQ4l2a7QWTs0rBdCAEGQEIAB0W
IQQeEA3Xpnem+aUyyfm1HeN3Bk1ULQUCWpQWKwAKCRC1HeN3Bk1ULe4hD/0XLBuo
inLaN2wVQpbjeIEG9Shbaax+BmsuufjiVgNxKEkBg4q6/miCpdpjYmcvv7nNG5uK
zuQ/fnLzgldiVS0G+0BVBelF1FlT85xaI/enIrsvTauGEsfie7/ljrkV//0MFqdB
ZnM680JDVbvl8f2RDBACmz3PoJr8kg3PZwvb028effeTqhZ8zA5ZW5rum0Cn6dOb
v3OrCyQw/aoUvjH65j3T+fr17Em5dYaxNShFxoMBKxSsr+V4opwGEzBRxuoLrzAl
/LcazNAL/CLj+7JBxFj4FL5fB7VQcBEBDFBwg0ropojUeqT8Y2oyygnwLHc4otwV
TNxezToTFucnIq87IAqpTdEe3dHXx1CRJAyIeXxh6j+rYpidiL4CegIczva/xE+P
CqKV1qsGPysD301pXEYy4W1nLuST1tu/xbZCIJdqUwOxsVN5D9UVsFEr4Szfq0QC
14UQzMeXJSdXE2Z1TAnl7381AUC8LoRp55BH5Jih/zrUT1+HrzwdWBZdBJc04f5I
RiZqhZ8Goso5Ki6yFGCEXuitQUyWS0OWkZTX4m2rNIiPMw8PVweQ+yeqwaAapfm7
JX4l3Wa9fRpwK8LLV5/iaXti7IEla51lCCHRn+yM+0XcYI//53qQXVobcaC8Z9uy
LfJCjCtETknO2/uGL+kNyoZ4ykMfIhqOaxZWnqfzD/4kHM+EB4Yuti1kxFmSdnjp
MLEOXNFRoJcvPL7kw6ZMQaWZ96UOdlcL2GiHWAyYThsSjWez+kZ60GuDL+JwfQaR
InavuacP3Dw2eg8/W5XAT/G2EEmA4wuDMXZ07aPa3nJPdlCMcwxQLyHb6ZgModxZ
IHXaX/JEylapdh0j4sQf5P8OvK2Qq212OVuIaZPnjloQDeJqJTzP9iGDaJ3Ne6gM
n6nZ3ZIK1qtJc9WxRtjIOLS2ZdMSB5JWb1gE4nEkvDChbWKfeMpv5ox8G6HJe9Xk
sygGj876vmyAHDwl8zsYMvWeFZONxsahKpDFjXKMcnIpV8ZPfaCT4r4G6x4Qil8u
A1iwCKXo4d+uq3qrRKyhGOE+B+H/5QCGmmfAXhBVsR2aUldK0kx/IVi7HJD1aBRF
k+cpC0+vMw4O4f4qXzm2z5qWHftcB/EBhN+h4+IIDSE+wEtz9OdEpXXbPZ1sd7eS
8K4OjjliG2meTQE/wvn1BNtJVJ2rGQX6moCGx/1FYdLXLROv6hOnBslMVHFRbe+9
OmTFXEDlb6Nh/08PwYdyqk4qXddebALpC0TmyEty8QnjEmL1IhDtMTDVlj/33imb
L0waKqGJ5U3s2fA8VaDZQWL6U/c71xtuVFt6trS4rnsoBzlILPfC1n2wpPvKPEHL
avOKXgf6jXnmSzi5GbnBgbkCDQRaqVbRARAA0R+Z6SrbAI5b8m/j+Q3yc2tc5wDB
i7Hly0SW95ydLkKGaGvHhpLrBM5WwKdtQzF45A9tlyu6iGys5HWPRW3BqMpZrcv8
+2QHyoI2lYM/b0ioai2gSZB+lao955iJyBQ8c+pLSybxwcdaXTb6iBLGReCYXlrL
QL6H+NYw338x8bhRvaDanPQis81GzxtSZgRjtZbAGSvOgq25A3oCTF45O8cfBz+I
FxNaziS7x6lXuqOatv5n3HzffGOz3q1baKsxMRVGx3PdAI/LvRRd9SeBeTpFZQYY
ujCC5K8ds7yxB39Hel5llKnoXLHNm/wLGukXY+PtJVzhtBDL0X3o6OUfsb9tPzwM
oMyA8gRXf94nw2XRT8MMrjGChB7Clfq9AFP3e44D3MaVWbEGOWNG9rQ5s72dk7dF
K416D5cc+BQ8mvllYzZ8gzOgYKnlfVmhqVDAIkFz601+lLRUdK4pD0t1BCmlINSY
EKQNmp0NCSNVCbWWscKvTjboqb76oH/hjnIDqh3GeGdnIJ8vGwUdNN2NBA0rrK8o
+lD1Kc+e6Whe5xORc5krUZYtDCwW6ylRb118rmrHsojxoTH/kGr2IB0po59LT01l
M6KjLfGWrz76jJZmDLQ2gDBZNjuqDV+raHaKpVgUlbTHvmVvumBCm50Haz5w2vbM
txDxVhxU1FdYY00AEQEAAYkCNgQoAQgAIBYhBPiM/t7/KaW02VI4ZOJdmu0Fk7NK
BQJbq1h6Ah0CAAoJEOJdmu0Fk7NKGuAP/0LeLoKVOI8GRiU25bBek4mElKV5YNwU
8QMf75VPnRxklMFGkrPDuVCHVIsOUGo7jF4EHfH8ACgXNsFx8v9pMgsvk4WvfxbY
hepoNNOF/PLsPc125Z3hNq3uJsAMEpijNt8pNXgMvYj6mUKRGuMcIm1KLlczknwU
vtAIWSV+qqpCUL2miVPzp7Y8lexUeB1dsxAiF4btZIJ2i53S72kPMqwLzHdrPxDt
TiIweNz/T5K+C19MDAZ9AVp5qTcPWhQMDnNz3bY/4B2NcAwPJTCRxt7Ne5Ufxpll
3D92jwKZxREBdBPlRq/Qr4JEm4VXOw4QLFoU/WOyRBd4q4aNeFR00J5unZ2zcQ/E
ZL5OvHmkZ2Xl27Cuky1dAnT6hdadjMgWfQB/giXfP8Tu0Qpi7ISv5fEyUh70RpKr
SPdbUIR92IR8Qu862SSZsn7KoywUb2lFYzj6N9c1XORBexgRQgGAMdcT0REXyyS0
bl+9aBRntiw00FkEe7V1+EOLTi40bbddLC0Oatxa35lYg38VYmnhHCrkUl3iCLa/
AlhZmUGXSwmACNRzVRzFPAZMjdql+SEIF0XLYe96sb5twX2aztemy0GMU0ybK3pH
eYrpccUsPRPiHvT4k5TqAA+D1Y1WDjEhidPCbYeyThhAu+lfJiSVn2ex8ESByA/c
/QqOMREjkWlwiQRyBBgBCAAmFiEE+Iz+3v8ppbTZUjhk4l2a7QWTs0oFAlqpVtEC
GwIFCQHhM4ACQAkQ4l2a7QWTs0rBdCAEGQEIAB0WIQSmIfHalsk8Y5UGgy1gNEOh
0PxJjAUCWqlW0QAKCRBgNEOh0PxJjFXaD/0cyALbk6YivbqAMCMXnfBFj5kOoG5T
EGC7quviOVI+U5yNyFzqJtayfaxX3EsF9IjZR4cW58gdcQALS/gGAukexDigoYUz
2h1q2r4zr5pxbj+ez9+fftNDpwp7CmuaB5bzVh1bu8gwVJf4yaSsGubBIgfaysB0
Mzc4eJqIpDFMRQvSOOv7TgzXqAsXQuphoqkB5RuiKtKeugv4qofH5fuM3C/Y4QZ8
edQlTA41KOay1a76xAK85a8qMCjVQVCrepo5+LYXwZAryp4WKIbTSbUNRr5GGgSa
UWBe0/Rz5eqOL3r1YV1WzttWgBLzZUZJqvaYoWtfJGwjxDAFebE+meqtLIh/IDEu
Tc4D3Vge6kCI1jjNDKMZQYf6j1rybKPVzOgkxjCyRcgUI8Y904l9LZ3/BiRV8dY4
nBjWmCYVJPlAVzfDxFwF+A2kKInskPriiYJpFX8MVjy/6GfkJTtMZo1bovSDZZ0n
2MbQ+V3mftV8GkL+RPU5xQ79dPx6Ki81Dh31/T0d8FkEpWLbDy3gc1qgvRWcp6bC
uS1Rg0pf7+ftRYDEW7BBOBzmqfNljolHMWPeZT/1sCs7PmDS+kErZARFm0huMljt
8MNx50KljIVGDUbjOmDaOopTqKFhho/UTTe1Kho3iwTIYIgrzfuCT7t2k0Wx+/NI
y6BcGlPHU/R95gl0D/4yrId19rW5h425bWYmKZ6Ilh+H1zipl5OS0iEllmm4sLcp
Mub2+B+YFU3/EvbF0zkCny2HXy2gyZLhbvNm6Zr4FPW/xfaEnB4OXOOnUbA4+RNf
7bTngPXwhaxN+wQti+Uo0LcwKAU5KIBC9KcT46NirakEu5+5XaU2r+lsa7hlJWfb
17e4tmcOB4QfMTsJu+4DcWJqu+cdtm2N4VcorJCvfw/EffnGaGK0mwRvJp7CZiWi
Vc3T70fH+Rbv6NrgJEFV90XuoetQROwqjBEdbL8iNcuvjWO8j8NSlRKrV+UivP+w
yDf0UCQoMTnFshBM0ZnW+8i/jqsg3kKxs7xuxCZVMfwxzkNb6h/YlbqjRR/hFZ56
Chf1guaCfYJn0vCtdTLWimasemZfcKX7oE9EIbrs8FZcd89FkU0wgrJRscoUAiVP
mbkklT9AvTy7Gp4CCMS8Z22r3Q0d3GgIvFNhakLyDzBKPBf+vJyQEx9SdFIM/Kjv
4grCEjQNrWXXsh8ecurhciHPuiykffmMYyWUzdcc0pQyyyhoYiGbmflGIKx/6M9D
OOW2Q4k7ogubPRLZ/nabZnxJdIbi8WVXgSI2JCuO3+i9dpW+Q9s8F5mPht1QmQnI
ZrA5R/pLRP2oE9x9LDvUPLkQdLIB9RRyTw6D5A1UOI4TuLPOhFpcXqNODjJcO7kC
DQRbq1i2ARAApdwHI9mdWuHcct2tCY4uRFR9m0CliX2vJ3ZOHBmo1wS3HBv0BkAv
zmQwOE5xMDk6i9aN/w6fYii0s1Pfj2cwLz8Iw93icnInk7WGU2KoryWM9+KNGIA+
XOtyobwTh4BHY5ggeYDkdOs7Nrlj1FTlj428NaevU75Cm9xQm6aAZnZZtjSDBTWw
BuSXfFa70kiZzpwKMP/jB8ylWdA74VzkCFfYcdwJHzzrcDS64VRqNhWM/vRFJmLP
wN4MHkAE5RDb4cjGAwkwmZQuDzuk2O9oOukxKd7v/ZUmql4k0qDxi3M9dC3SJJ+O
fVPRlyZ74UVlspgjr5zxSBCerj/aDbVSWWr6JjgeRTQdg6WKhO0+mfmttiANxv/a
fBMDaxys9ee5sJL+WHP62fucD8ukmMEVM0P971U/JBfV8r8VRpy+OENgt6ynJ9dV
4YCdOT2xo42YwkBCYcVOF6iY2YqFd3oDSZARqEk4vr+A2/eNDU37+OBWr8E1pfO7
H6FW4/tVRxYjywat6743e0VTjNbwPGmOFBGc0VuwCJzRsY5dwIi9hlXDGwfNpgzd
tB+ON4BEY4f8ooSYCfHa9G2HeXj/+txxN6Km8Oh8OnQpyfJ6POQQVXX+bUG1W8EC
jNBdoi6m00ZqNVtDsNbdKdWTYYhKtgPUOreGmF75k+LLjiqO4jIE1E0AEQEAAYkE
cgQYAQgAJhYhBPiM/t7/KaW02VI4ZOJdmu0Fk7NKBQJbq1i2AhsCBQkB4TOAAkAJ
EOJdmu0Fk7NKwXQgBBkBCAAdFiEEYozCEpOAZdq047lJqKvwBYluOU8FAlurWLYA
CgkQqKvwBYluOU9wWBAApKMHrxbOqWa0gij3ODcvzpky76y1YWG45iroC55B56X0
XslUpHJno7vTLobV5aJDeXlgaYD2ptn53wW31fTZL/1P0lkyIu30OwYwLvOxaFjT
rsVPCwTz80h6TzsaShFiKirZJhPg5UzC0xfmM4aaQGsoC/Z5pOTyfrYrXgbQPNUJ
f8zagYqpo0WZoG2R2cNwH5VzlJAv/JBB0SdMVgBS7bUXP1eudqn1gmZxw6GUEGU5
5tj4X72ceYHiA+MMlKWsvpwJD9iRsl3yuzcBi8yOA0/jSrXu+5BLGaAAXMyMKETg
+e1ierxZ64yoV+AU6xcKykVzThxG5SoH6NiXsCs0XBOpWxQjfJ4MAeWLfTRMf805
2OSzRsIf1/p2byyTbuApshp//O9c+jbPgEvG7G4VeQdBROY2/46+XR7Q0BrDMom9
Bmk93SSbG9oubYKKALrjJaPIzTieLM3t2zLKZ/RJ6JARYDd6+BMdVNs9QS6Hkwq1
4lIDxz9jqenAXSpnK8fKg2xxzz/UFhoThlY/wlrWP+Sa4FQl1lorcz6Xid+yNoxF
CZw+iWx7FMng0QDM9rtyhAbFkm7JFnDuojVFeNTdTUy+siAZB0cFdP84BkcYugvx
WGM8uYydVOrPlI/nzGomgljIqgzvJm+Crun8eYggmItY53U6xDJmQT7Xrtk7YCa+
0Q/+PRuDorQauvB53mfynLywqxn3h/NyegDrlyq+5Nqsjm3nq0umUSG4/kXMwALy
0h6boyGWR/rkHnLOE1gLQ6fSlpcN8YHtsW6+czpkVH1b+wws/RPg49muTADHeYeM
n5eC0aVrUq7D7IVH+UGILDWJuzq2b+jO/IpXd9kIPlwY/2PFIjwfoSd7W+pjgVXh
6Z+xtWE5mVXnSfxPIXxv/cNd9LtYyT9R6RN7Xu+3hJz/BRp6MUANbdErYD36zERz
GKUO2eJVbOJReevXb24SZzIJkpBF2qwI5dEl8yk12YpGCu75XtFRux3cVhDpdQsx
+/RZGV7Id1X55s4/LiqF5PSEFTB4kZpiY+meq3sKOPT+Ra9BLeur8yo7ftMK13WB
BL2e/mzwfw+s2x1sjWRCuc5KbnK2yTY9ske2hdtAPmVJTDXBO3JWfZj5xKuuc3mp
q7OEd9+gKTiW4PyZfxQIzwXi9BJ6R3+ax7WYR0bi7Gll0910RNFV3MOiLhupIS0Y
BuipB6OgQNFUSjB6vammTd3R+98jIrtWyRDHPmdtgRcK86EbRpj6MHd7rATkdG+S
D0+DXGwfuWIeq2OA+P6lHWEmjlepFSEBS72P5jmpbRtNd+aHN23VesPI/WBQkfBU
4Tu51CGRd4KZk5ugFZ5YqjaM3m70od1zrsdq+BCNsfzuJqU=
=hIuN
-----END PGP PUBLIC KEY BLOCK-----
"
# prints passed flags if verbosity level is lower; to be used inside
# output capture (backticks or $())
function be_quiet() {
local verbosity #verbosity level
local flags #silencing flags to use
verbosity=$1
flags=$2
if [ $verbosity -lt 3 ]; then
printf '%s\n' "${flags}"
fi
}
# gpg verify a file using the provided key
function gpg_verify() {
local file #file to verify
local sigfile #signature file (assumed to be suffixed form of file to verify)
local key #signing key
local keyid #signing key signature
local verbosity #verbosity level
local quiet
local gpghome
file=$1
sigfile=$2
key=$3
keyid=$4
verbosity=$5
quiet=$(be_quiet $verbosity '--quiet --no-verbose')
gpghome=$(mktemp -d)
trap "{ rm -rf '${gpghome}'; }" RETURN EXIT
if ! gpg --homedir="${gpghome}" --batch --quiet --import <<<"${key}"; then
return 1
fi
if ! gpg --homedir="${gpghome}" --batch ${quiet} --trusted-key "${keyid}" --verify "${sigfile}" "${file}"; then
return 1
fi
return 0
}
function do_wget() {
local out #output file
local url #url of a file to be downloaded
local verbosity #verbosity level
local quiet
local short_out
out=$1
url=$2
verbosity=$3
quiet=$(be_quiet $verbosity '--quiet')
if [ "${quiet}" ]; then
# strip the working directory from output path, so we get
# something like build-rkt/tmp/coreos-common/pxe.img
# instead of /home/foo/projects/rkt/rkt/build-rkt/...
short_out="${out#${PWD}/}"
printf ' %-12s %s\n' 'WGET' "${url} => ${short_out}"
fi
wget ${quiet} --tries=20 --output-document="${out}" "${url}" # the wget default for retries is 20 times.
}
function cat_to_stderr_if_verbose() {
local file
local verbosity
file=$1
verbosity=$2
if [ -z $(be_quiet $verbosity 'empty-if-verbose') ]; then
cat "${file}" >&2
fi
}
# maintain an gpg-verified url cache, assumes signature available @ $url.sig
function cache_url() {
local cache #verified cache, will be downloaded from the url if bad or missing
local url #url of the file to be downloaded
local key #key used for verification
local keyid #id of a key used for verification
local verbosity #verbosity level
local urlhash
local sigfile
local sigurl
local gpgout
cache=$1
url=$2
key=$3
keyid=$4
verbosity=$5
urlhash=$(echo -n "${url}" | md5sum)
sigfile="${cache}.${urlhash%% *}.sig"
sigurl="${url}.sig"
gpgout=$(mktemp)
trap "{ rm -f '${gpgout}'; }" RETURN EXIT
# verify the cached copy if it exists
if ! gpg_verify "${cache}" "${sigfile}" "${key}" "${keyid}" "${verbosity}" 2>"${gpgout}"; then
# refresh the cache on failure, and verify it again
cat_to_stderr_if_verbose "${gpgout}" "${verbosity}"
do_wget "${cache}" "${url}" "${verbosity}"
do_wget "${sigfile}" "${sigurl}" "${verbosity}"
if ! gpg_verify "${cache}" "${sigfile}" "${key}" "${keyid}" "${verbosity}" 2>"${gpgout}"; then
# print an error if verification failed
cat "${gpgout}" >&2
return 1
fi
cat_to_stderr_if_verbose "${gpgout}" "${verbosity}"
else
cat_to_stderr_if_verbose "${gpgout}" "${verbosity}"
fi
# file $cache exists and can be trusted
touch "${cache}"
}
# cache pxe image
cache_url "${ITMP}/pxe.img" "${IMG_URL}" "${GPG_KEY}" "${GPG_LONG_ID}" "${V}"
| true |
ddaa6f412c2e6467e1e135af45079d4a0d7c8c40 | Shell | azaringhalam/pyez-heat | /bin/create-zeroize-stack | UTF-8 | 414 | 3.171875 | 3 | [] | no_license | #!/bin/bash
if [[ $# -ne 4 ]]; then
echo "Usage: create-zeroize-stack stack_name host user password"
echo "Example: create-zeroize-stack vmx-zeroize junos_device_ip root juniper123"
exit 1
fi
args=("$@")
STACK_NAME=${args[0]}
HOST=${args[1]}
USER=${args[2]}
PASSWORD=${args[3]}
heat stack-create $STACK_NAME -f $TMPLT_DIR/pyez_zeroize.yaml -P "host=$HOST;user=$USER;password=juniper123"
| true |
b08070d272f31949021e9abf1d17b16a2a6c0f4f | Shell | steneu/ffm-packages | /ffffm-banner_legacy/files/lib/gluon/upgrade/999-move-banner | UTF-8 | 107 | 2.59375 | 3 | [] | no_license | #!/bin/sh
if [ -f /etc/banner.ffm ] ; then
rm -rf /etc/banner
/bin/ln -s /etc/banner.ffm /etc/banner
fi
| true |
a3066e769ebc4e6a4fb03ab4ea926bc8143a69de | Shell | themimixcompany/nebula-builder | /ssh-run | UTF-8 | 1,728 | 4.21875 | 4 | [
"BlueOak-1.0.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
set -eu
set -o pipefail
readonly SELF=$(basename "${BASH_SOURCE[0]}")
readonly OS=$(uname)
readonly VERSION=0.0.1
OPT_HELP=
OPT_DEBUG=
OPT_VERBOSE=
function error {
if [[ "${OS}" == "Darwin" ]]; then
echo "error: ${@}" >&2
else
echo -e "\e[0;31m\e[1merror: \e[0;0m${@}" >&2
fi
exit 1
}
function warn {
if [[ "${OS}" == "Darwin" ]]; then
echo "warning: ${@}" >&2
else
echo -e "\e[0;33mwarning: \e[0;0m${@}" >&2
fi
}
function debug {
if [[ -n "${OPT_DEBUG}" ]]; then
echo '**'
echo \${@}: ${@}
echo \$OPT_HELP: "${OPT_HELP}"
echo \$OPT_DEBUG: "${OPT_DEBUG}"
echo \$OPT_VERBOSE: "${OPT_VERBOSE}"
fi
}
function parse_arguments {
debug ${FUNCNAME[0]} "$@"
local opts=$(getopt -n "${SELF}" --options hdv --longoptions help,debug,verbose -- "$@")
if [[ $? != 0 ]]; then error "failed to parsing arguments."; fi
eval set -- "${opts}"
while true; do
case "$1" in
(-h|--help) OPT_HELP=true; shift ;;
(-d|--debug) OPT_DEBUG=true; shift ;;
(-v|--verbose) OPT_VERBOSE=true; shift ;;
(--) shift; break ;;
(*) break ;;
esac
done
}
function process_arguments {
debug ${FUNCNAME[0]} "$@"
if [[ -n "${OPT_HELP}" || "${#}" -lt 1 ]]; then
display_usage
else
return 0
fi
}
function display_usage {
debug ${FUNCNAME[0]} "$@"
cat << EOF
${SELF} [OPTIONS]... <COMMAND> [OPTIONS]...
OPTIONS:
-h, --help Show this help
COMMANDS:
EOF
exit 0
}
function launch_agent {
debug ${FUNCNAME[0]} "$@"
ssh-agent bash -c "ssh-add /root/.ssh/id_rsa; $@"
}
function main {
debug ${FUNCNAME[0]} "$@"
parse_arguments "$@"
process_arguments "$@"
launch_agent "$@"
}
main "$@"
| true |
f0311c8fec407f934a4ddacca2e7f0be9222f290 | Shell | ranma/webcomics | /www.penny-arcade.com/mkindex.sh | ISO-8859-3 | 614 | 3.390625 | 3 | [] | no_license | #!/bin/bash
# $Log: mkindex.sh,v $
# Revision 1.3 2003-02-08 20:09:58 mitch
# Datum wird im Titel mit angzeigt
#
# Revision 1.2 1997/01/04 06:55:06 mitch
# Titel bernehmen, wenn vorhanden
#
# Revision 1.1 1997/01/04 02:32:45 mitch
# Initial source import.
# Kopierbasis: www.errantstory.com/mkindex.sh Rev. 1.1
#
ls *.[gjp][ipn][fg] | sort | while read FILE; do
TITLEFILE=${FILE:0:8}.txt
if [ -s ${TITLEFILE} ] ; then
read TEXT < ${TITLEFILE}
echo -e "${FILE}\t${FILE:6:2}.${FILE:4:2}.${FILE:0:4}: ${TEXT}"
else
echo -e "${FILE}\t${FILE:6:2}.${FILE:4:2}.${FILE:0:4}"
fi
done > index
| true |
0925d105660f179de7b9c04ada9372ee2a8cfdc9 | Shell | BioKom/tools | /galleries/generate_galerie.sh | UTF-8 | 6,742 | 3.15625 | 3 | [] | no_license | #!/bin/bash
#
# @author Betti Oesterholz
# @mail webmaster@BioKom.info
#
# Copyright (C) @c GPL3 2012 Betti Oesterholz
#
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License (GPL) as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This script is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this script If not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
#
# This Script generates the html file for a pictur galerie.
#
# History:
# 17.03.2012 Oesterholz created
#
TO_GENERATE_FILE_NAME=$1
TO_USE_NAME=$2
PICTURES_TO_USE=$3
DIR_BASE="../"
DIR_TUMBNAIL="Bilder_tumb/"
DIR_GENERATED="generated/"
TO_USE_GALERIE_NAME="${DIR_GENERATED}${TO_GENERATE_FILE_NAME}.html"
TO_USE_SLIEDESHOW_NAME="${DIR_GENERATED}${TO_GENERATE_FILE_NAME}_sl.html"
echo "Generating galerie with name \"${TO_USE_GALERIE_NAME}\" for pictures: \"${PICTURES_TO_USE}\""
echo "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Frameset//EN\"" > ${TO_USE_GALERIE_NAME}
echo " \"http://www.w3.org/TR/html4/frameset.dtd\">" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "<html>" >> ${TO_USE_GALERIE_NAME}
echo " <head>" >> ${TO_USE_GALERIE_NAME}
echo " <title>" >> ${TO_USE_GALERIE_NAME}
echo " ${TO_USE_NAME}" >> ${TO_USE_GALERIE_NAME}
echo " </title>" >> ${TO_USE_GALERIE_NAME}
echo " <link rel=\"stylesheet\" type=\"text/css\" media=\"screen\" href=\"${DIR_BASE}mittelalterGallery.css\">" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo " <!--defines for the picture galerie -->" >> ${TO_USE_GALERIE_NAME}
echo " <style type=\"text/css\">" >> ${TO_USE_GALERIE_NAME}
PICTURE_NUMBER=1
for PICTURE_FILE in ${PICTURES_TO_USE}
do
BASENAME=$(basename ${PICTURE_FILE})
echo " #scrollbox a.slide${PICTURE_NUMBER} {background:url(${DIR_TUMBNAIL}${BASENAME}) no-repeat center center;}" >> ${TO_USE_GALERIE_NAME}
PICTURE_NUMBER=`expr ${PICTURE_NUMBER} + 1`
done
echo " </style>" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "</head>" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "<body>" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "<div id=\"navigation\">" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "<ul>" >> ${TO_USE_GALERIE_NAME}
echo "<!--deactivete actual-->" >> ${TO_USE_GALERIE_NAME}
echo " <li><a href=\"${DIR_BASE}index.html\">Hauptseite</a></li>" >> ${TO_USE_GALERIE_NAME}
echo " <li><a href=\"${DIR_BASE}brautpaar.html\">Das Brautpaar</a></li>" >> ${TO_USE_GALERIE_NAME}
echo " <li><a href=\"${DIR_BASE}all_pictures.html\">Bilder Auswahl</a></li>" >> ${TO_USE_GALERIE_NAME}
echo " <li><a href=\"${DIR_BASE}fest.html\">Das Fest</a></li>" >> ${TO_USE_GALERIE_NAME}
echo " <li><a href=\"${DIR_BASE}hergang.html\">Aktivitäten</a></li>" >> ${TO_USE_GALERIE_NAME}
echo " <li><a href=\"${DIR_BASE}gaeste.html\">Gäste</a></li>" >> ${TO_USE_GALERIE_NAME}
echo " <li><a href=\"${DIR_BASE}burgStargard.html\">Burg Stargard</a></li>" >> ${TO_USE_GALERIE_NAME}
echo "</ul>" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "</div><!--navigation-->" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "<div id=\"head\">" >> ${TO_USE_GALERIE_NAME}
echo " <div id=\"headDiashow\">" >> ${TO_USE_GALERIE_NAME}
echo " <a href=\"${TO_USE_SLIEDESHOW_NAME}\"><img src=\"${DIR_BASE}picture_navi/diashow.png\"/></a>" >> ${TO_USE_GALERIE_NAME}
echo " </div><!--headDiashow-->" >> ${TO_USE_GALERIE_NAME}
echo " <div id=\"headLine\">" >> ${TO_USE_GALERIE_NAME}
echo " <h1 align=\"center\">Bilder</h1>" >> ${TO_USE_GALERIE_NAME}
echo " </div><!--headLine-->" >> ${TO_USE_GALERIE_NAME}
echo "</div><!--head-->" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "<div id=\"holder\">" >> ${TO_USE_GALERIE_NAME}
echo " <ul id=\"scrollbox\">" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
PICTURE_NUMBER=1
for PICTURE_FILE in ${PICTURES_TO_USE}
do
PICTURE_TEXT="TEST"
echo " <li><a class=\"slide${PICTURE_NUMBER}\" tabindex=\"1\"><span><img src=\"${PICTURE_FILE}\" widht=\"99%\" height=\"95%\"/><br />${PICTURE_TEXT}</span></a></li>" >> ${TO_USE_GALERIE_NAME}
PICTURE_NUMBER=`expr ${PICTURE_NUMBER} + 1`
done
echo " </ul>" >> ${TO_USE_GALERIE_NAME}
echo "</div> <!-- end of holder -->" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "</body>" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "</html>" >> ${TO_USE_GALERIE_NAME}
echo "" >> ${TO_USE_GALERIE_NAME}
echo "Generating slideshow with name \"${TO_USE_GALERIE_NAME}\" "
echo "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01 Frameset//EN\"" > ${TO_USE_SLIEDESHOW_NAME}
echo " \"http://www.w3.org/TR/html4/frameset.dtd\">" >> ${TO_USE_SLIEDESHOW_NAME}
echo "" >> ${TO_USE_SLIEDESHOW_NAME}
echo "<html>" >> ${TO_USE_SLIEDESHOW_NAME}
echo " <head>" >> ${TO_USE_SLIEDESHOW_NAME}
echo " <title>" >> ${TO_USE_SLIEDESHOW_NAME}
echo " ${TO_USE_NAME}" >> ${TO_USE_SLIEDESHOW_NAME}
echo " </title>" >> ${TO_USE_SLIEDESHOW_NAME}
echo "" >> ${TO_USE_SLIEDESHOW_NAME}
echo " <link rel=\"stylesheet\" type="text/css" media=\"screen\" href=\"mittelalterDiashow.css\">" >> ${TO_USE_SLIEDESHOW_NAME}
echo "" >> ${TO_USE_SLIEDESHOW_NAME}
echo "<!--defines for the diashow -->" >> ${TO_USE_SLIEDESHOW_NAME}
echo " <script language=\"JavaScript\" src=\"diashow.js\"></script>" >> ${TO_USE_SLIEDESHOW_NAME}
echo " <script language=\"JavaScript\">" >> ${TO_USE_SLIEDESHOW_NAME}
echo " <!--" >> ${TO_USE_SLIEDESHOW_NAME}
echo " Banner.img = 'DiashowBild';" >> ${TO_USE_SLIEDESHOW_NAME}
for PICTURE_FILE in ${PICTURES_TO_USE}
do
echo " Banner.Add('${PICTURE_FILE}','all_pictures.htm');" >> ${TO_USE_SLIEDESHOW_NAME}
done
echo " //-->" >> ${TO_USE_SLIEDESHOW_NAME}
echo " </script>" >> ${TO_USE_SLIEDESHOW_NAME}
echo "" >> ${TO_USE_SLIEDESHOW_NAME}
echo "</head>" >> ${TO_USE_SLIEDESHOW_NAME}
echo "" >> ${TO_USE_SLIEDESHOW_NAME}
echo "<body onload=\"Banner.Start()\">" >> ${TO_USE_SLIEDESHOW_NAME}
echo " <a href=\"#\" ><img src=\"\" name=\"DiashowBild\" height=95% style=\"float:center\" hspace=16 vspace=16 border=\"0\"></a>" >> ${TO_USE_SLIEDESHOW_NAME}
echo "</body>" >> ${TO_USE_SLIEDESHOW_NAME}
echo "" >> ${TO_USE_SLIEDESHOW_NAME}
echo "</html>" >> ${TO_USE_SLIEDESHOW_NAME}
| true |
1c10dbbe0f02f8311c1889cbbc95b9fee079fbae | Shell | ajstewart/mocpy | /travis/deploy_doc.sh | UTF-8 | 1,003 | 3.109375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -e
# install python and curl for downloading the rust
# compiler
apt update
apt install -y python3 python3-pip git curl
# configure the remote and permissions
# to push to the cds-astro/gh-pages branch
git config --global user.email "$GH_EMAIL"
git config --global user.name "$GH_NAME"
git remote rm origin
git remote add cds-astro https://"$GH_NAME":"$GH_TOKEN"@github.com/cds-astro/mocpy.git
git fetch cds-astro
git checkout gh-pages
rm -rf *
git checkout cds-astro/master .
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- --default-toolchain nightly -y
export PATH="$HOME/.cargo/bin:$PATH"
ln -s /usr/bin/python3 /usr/bin/python
python -m pip install -U pip
python -m pip install -r requirements/docs.txt
python setup.py build_rust
find build/ -name "*.so" -type f -exec cp {} ./mocpy \;
cd docs
make html
cd ..
mv docs/_build/html/ /tmp/
rm -rf *
mv /tmp/html/* .
touch .nojekyll
git add --all
git commit -am "doc update"
git push cds-astro gh-pages
| true |
24d2dfa94ca2ff7b83820f2a9f32ee95be10a355 | Shell | AkihiroSuda/runrootless | /proot/PRoot/tests/test-5996858d.sh | UTF-8 | 840 | 3.109375 | 3 | [
"GPL-2.0-only",
"Apache-2.0"
] | permissive | if [ -z `which uname` ] || [ -z `which grep` ] || [ -z `which domainname` ] || [ -z `which hostname` ]|| [ -z `which env` ] || [ -z `which true`]; then
exit 125;
fi
UTSNAME="\\sysname\\nodename\\$(uname -r)\\version\\machine\\domainname\\0\\"
${PROOT} -k ${UTSNAME} uname -s | grep ^sysname$
${PROOT} -k ${UTSNAME} uname -n | grep ^nodename$
${PROOT} -k ${UTSNAME} uname -v | grep ^version$
${PROOT} -k ${UTSNAME} uname -m | grep ^machine$
${PROOT} -k ${UTSNAME} domainname | grep ^domainname$
${PROOT} -k ${UTSNAME} env LD_SHOW_AUXV=1 true | grep -E '^AT_HWCAP:[[:space:]]*0?$'
${PROOT} -0 -k ${UTSNAME} sh -c 'domainname domainname2; domainname' | grep ^domainname2$
${PROOT} -0 -k ${UTSNAME} sh -c 'hostname hostname2; hostname' | grep ^hostname2$
${PROOT} -0 -k ${UTSNAME} sh -c 'hostname hostname2; uname -n' | grep ^hostname2$
| true |
576a4f63acd74ed21b9d7c7ed468c27ab802154f | Shell | danielsuo/mobot | /src/move/scripts/install | UTF-8 | 1,059 | 2.625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
# Run from [MOBOT]/src/move
sudo apt-get install -y arduino python-serial libusb-1.0-0-dev zip unzip
# Download phidgets libraries
wget http://www.phidgets.com/downloads/libraries/libphidget.tar.gz
tar zxvf libphidget.tar.gz
cd libphidget*
./configure
make
sudo make install
cd ..
# Install python bindings
wget http://www.phidgets.com/downloads/libraries/PhidgetsPython.zip
unzip PhidgetsPython.zip
cd PhidgetsPython
sudo python setup.py install
# Set up wifi to connect to mobot network
sudo sh -c "wpa_passphrase mobot mobotmobot >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo service networking reload
# Autologin on startup
sudo cp scripts/inittab /etc/inittab
# Set start-up script
sudo sh -c 'echo ". /home/pi/mobot/src/move/scripts/startup" >> /etc/profile'
# Set up VNC server
sudo apt-get install -y tightvncserver
# Save arduino libraries to appropriate library location
# https://www.dimensionengineering.com/software/SabertoothArduinoLibraries.zip
# https://www.dimensionengineering.com/software/KangarooArduinoLibrary.zip | true |
15cf491bbbdb2e05e4e6ce8cd92938e7404e4a22 | Shell | decent-im/decent.im-gentoo | /files/sbin/decent.im_process_templates | UTF-8 | 1,054 | 3.1875 | 3 | [] | no_license | #!/bin/bash -e
. /etc/decent.im/config
# TODO Auto-catch %%STUFF%% and replace with envvar
# TODO Rewrite existing config files only if there are changes, to avoid stray
# mtime bumps and services restarts (see checks in update_world).
cd /usr/share/decent.im/files
# Propagate non-template files
cp -a * /
for FILE_TEMPLATE in `find * | grep template`
do
FILE=/${FILE_TEMPLATE/.template/} # Mind the absolute path
if [[ -e $FILE ]]
then
mv -v $FILE ${FILE}.old
fi
cat $FILE_TEMPLATE | sed \
\
-e "s/%%ADMIN_JID%%/$ADMIN_JID/g" \
-e "s/%%DOMAIN_NAME%%/$DOMAIN_NAME/g" \
-e "s/%%EXTERNAL_IP%%/$EXTERNAL_IP/g" \
\
-e "s/%%XMPP_DB_NAME%%/$XMPP_DB_NAME/g" \
-e "s/%%XMPP_DB_USER%%/$XMPP_DB_USER/g" \
-e "s/%%XMPP_DB_PASS%%/$XMPP_DB_PASS/g" \
\
-e "s/%%TRANSPORT_DB_NAME%%/$TRANSPORT_DB_NAME/g" \
-e "s/%%TRANSPORT_DB_USER%%/$TRANSPORT_DB_USER/g" \
-e "s/%%TRANSPORT_DB_PASS%%/$TRANSPORT_DB_PASS/g" \
\
-e "s/%%TRANSPORT_SECRET%%/$TRANSPORT_SECRET/g" \
\
-e "s/%%TURN_SECRET%%/$TURN_SECRET/g" \
\
> $FILE
done
| true |
81ccd5cf49ac2fc454f3cc5e6d8a28c28725da5b | Shell | jonshouse1/jlc | /jlc_devices/esp8266/jlc_esp8266_devices_v0.20/tools/list | UTF-8 | 225 | 3.71875 | 4 | [] | no_license | #!/bin/bash
#make a list of files and sizes
ls -1 ../www >/tmp/list
echo "<pre>"
n=0;
while read p; do
#echo $p
SIZE=`stat --printf="%s" ../www/$p`
echo -e "<a href=$p>$n\t$SIZE\t$p</a>"
done </tmp/list
echo "</pre>"
| true |
e84590a4cea1e84aa50aaad1b224c387d48451c1 | Shell | VirtualNexus/docker_scumblr | /scripts/start-server.sh | UTF-8 | 600 | 2.53125 | 3 | [] | no_license | #!/bin/bash
source /etc/profile.d/rvm.sh
{ # your 'try' block
git clone https://github.com/Netflix/scumblr.git /scumblr &&
mv output
} || { # your 'catch' block
mv log
}
rm tmp cd /scumblr
if [ "$SCUMBLR_CREATE_DB" == "true" ]
then
bundle exec rake db:create
fi
if [ "$SCUMBLR_LOAD_SCHEMA" == "true" ]
bundle exec rake db:schema:load
then
if [ "$SCUMBLR_RUN_MIGRATIONS" == "true" ]
bundle exec rake db:migrate
then
bundle exec rake db:seed
bundle exec rake assets:precompile
bundle exec unicorn -D -p 8080
redis-server &
sidekiq -l log/sidekiq.log &
nginx &
/bin/bash
| true |
11b63644837c3388dcb2fb2d57fb05583af54c16 | Shell | jtligon/hummingbird | /camera-capture.sh | UTF-8 | 599 | 2.890625 | 3 | [] | no_license | #!/usr/bin/bash -x
STORAGE_FOLDER='/home/jligon/data'
mkdir -p $STORAGE_FOLDER
cd $STORAGE_FOLDER
shopt -s nullglob
numfiles=(*)
numfiles=${#numfiles[@]}
filename='capt'$numfiles'.jpg'
gphoto2 \
--filename $filename \
--quiet \
--set-config manualfocusdrive="None"\
--set-config eosremoterelease="Immediate" \
--set-config eosremoterelease="Release Full" \
--wait-event-and-download=1s
#https://medium.com/nerdery/dslr-webcam-setup-for-linux-9b6d1b79ae22
#https://github.com/gphoto/gphoto2/issues/161
# --capture-image-and-download \
#mv $filename $STORAGE_FOLDER
| true |
1b8badd280cdf736211a875099335a6a1da95b0e | Shell | kgyrtkirk/hive-dev-box | /bin/send_custom_jars | UTF-8 | 392 | 2.640625 | 3 | [] | no_license | #!/bin/bash
set -e
J=$HOME/h.tar.gz
K=${K:-~/.kube/config}
[ "$NS" == "" ] && echo "NS not set!" && exit 1
[ ! -s "$K" ] && echo "K=$K is not set correctly !" && exit 1
set -x
#cd packaging/target/apache-hive-3.1.3000.7.2.3.0-181-bin/apache-hive-3.1.3000.7.2.3.0-181-bin/lib
cd packaging/target/apache-hive-*-bin/apache-hive-*-bin/lib
tar czf $J hive*jar
deploy_custom_jars $K $NS $J
| true |
064b12f77b32898f9076321ddda22dda1f0651bb | Shell | RandomInsano/port-of-rust | /support/build-gcc.sh | UTF-8 | 348 | 3.34375 | 3 | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
set -ex
VERSION=5.3.0
mkdir -p /tmp/build
cd /tmp/build
curl https://ftp.gnu.org/gnu/gcc/gcc-$VERSION/gcc-$VERSION.tar.bz2 | tar xjf -
mkdir gcc-build
cd gcc-$VERSION
./contrib/download_prerequisites
for patch in $PATCHES; do
curl $patch | patch -Np0
done
cd ../gcc-build
../gcc-$VERSION/configure "$@"
make -j10
make install
| true |
50e77165916c5f128341f3959e67b76b4737310d | Shell | andrewp-as-is/dotfiles | /.bin/dev/html | UTF-8 | 325 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env bash
{ set +x; } 2>/dev/null
usage() {
echo "usage: $(basename $0) path" 1>&2
[[ $1 == "-h" ]] || [[ $1 == "--help" ]]; exit
}
[[ $1 == "-h" ]] || [[ $1 == "--help" ]] && usage "$@"
[[ $# != 1 ]] && usage
shopt -s nocasematch
[[ $PWD == *django* ]] && { django-template-cli "$1"; exit; }
open "$1"
| true |
9fd9653cba6eea87a96357607ec592e22ba089cc | Shell | djMax/docker-postgis | /01_postgres.sh | UTF-8 | 420 | 3.0625 | 3 | [] | no_license | #!/bin/bash
set -e
DATADIR="/var/lib/postgresql/9.4/main"
CONF="/etc/postgresql/9.4/main/postgresql.conf"
POSTGRES="/usr/lib/postgresql/9.4/bin/postgres"
if [ -n "${POSTGRES_PASSWD}" ]; then
su postgres sh -c "$POSTGRES --single -jE postgres -D $DATADIR -c config_file=$CONF" <<-EOSQL
ALTER USER docker WITH ENCRYPTED PASSWORD '$POSTGRES_PASSWD'
EOSQL
fi
su postgres sh -c "$POSTGRES -D $DATADIR -c config_file=$CONF"
| true |
f2b15235375b0c962e456abed13ed0511096c4fd | Shell | kescherCode/dotfiles-template | /.bashrc | UTF-8 | 3,174 | 3.453125 | 3 | [] | no_license | #
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
[[ -f /etc/profile ]] && . /etc/profile
[[ -f ~/.profile ]] && . ~/.profile
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=10000
HISTFILESIZE=10000
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# Change the window title of X terminals
case ${TERM} in
[aEkx]term*|rxvt*|gnome*|konsole*|interix)
PS1='\[\033]0;\u@\h:\w\007\]'
;;
screen*)
PS1='\[\033k\u@\h:\w\033\\\]'
;;
*)
unset PS1
;;
esac
use_color=true
# Set colorful PS1 only on colorful terminals.
# dircolors --print-database uses its own built-in database
# instead of using /etc/DIR_COLORS. Try to use the external file
# first to take advantage of user additions. Use internal bash
# globbing instead of external grep binary.
safe_term=${TERM//[^[:alnum:]]/?} # sanitize TERM
match_lhs=""
[[ -f ~/.dir_colors ]] && match_lhs="${match_lhs}$(<~/.dir_colors)"
[[ -f /etc/DIR_COLORS ]] && match_lhs="${match_lhs}$(</etc/DIR_COLORS)"
[[ -z ${match_lhs} ]] \
&& type -P dircolors >/dev/null \
&& match_lhs=$(dircolors --print-database)
#[[ $'\n'${match_lhs} == *$'\n'"TERM "${safe_term}* ]] && use_color=true
if ${use_color} ; then
# Enable colors for ls, etc. Prefer ~/.dir_colors #64489
if type -P dircolors >/dev/null ; then
if [[ -f ~/.dir_colors ]] ; then
eval $(dircolors -b ~/.dir_colors)
elif [[ -f /etc/DIR_COLORS ]] ; then
eval $(dircolors -b /etc/DIR_COLORS)
fi
fi
if [[ ${EUID} == 0 ]] ; then
PS1+='\[\033[01;31m\]\h\[\033[01;34m\] \W \$\[\033[00m\] '
else
PS1+='\[\033[01;32m\]\u@\h\[\033[01;34m\] \w \$\[\033[00m\] '
fi
else
if [[ ${EUID} == 0 ]] ; then
# show root@ when we don't have colors
PS1+='\u@\h \W \$ '
else
PS1+='\u@\h \w \$ '
fi
fi
unset use_color safe_term match_lhs sh oldterm
xhost +local:root > /dev/null 2>&1
complete -cf sudo
# Bash won't get SIGWINCH if another process is in the foreground.
# Enable checkwinsize so that bash will check the terminal size when
# it regains control. #65623
# http://cnswww.cns.cwru.edu/~chet/bash/FAQ (E11)
shopt -s checkwinsize
# Disable completion when the input buffer is empty. i.e. Hitting tab
# and waiting a long time for bash to expand all of $PATH.
shopt -s no_empty_cmd_completion
shopt -s expand_aliases
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
shopt -s globstar
# export QT_SELECT=4
# Enable history appending instead of overwriting. #139609
shopt -s histappend
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
export EDITOR=/usr/bin/micro
for sh in /etc/bash/bashrc.d/* ; do
[[ -r ${sh} ]] && source "${sh}"
done
w
| true |
31eab43139834d711117a17aaec3d1e4ec2ca3b6 | Shell | intel/dffml | /.ci/run.sh | UTF-8 | 10,425 | 3.765625 | 4 | [
"MIT",
"LicenseRef-scancode-generic-export-compliance"
] | permissive | #!/usr/bin/env bash
set -ex
if [ -d "$HOME/.local/bin" ]; then
export PATH="$HOME/.local/bin:$PATH"
fi
SRC_ROOT=${SRC_ROOT:-"${PWD}"}
PYTHON=${PYTHON:-"python3"}
if [ "x${VIRTUAL_ENV}" != "x" ]; then
PYTHON="python"
fi
TEMP_DIRS=()
# Copy temporary fixes to a temporary directory in case we change branches
TEMPFIX="$(mktemp -d)"
TEMP_DIRS+=("${TEMPFIX}")
cp -r ${SRC_ROOT}/scripts/tempfix/* "${TEMPFIX}/"
python_version="$(${PYTHON} -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")')"
function run_plugin_examples() {
if [ ! -d "${SRC_ROOT}/${PLUGIN}/examples" ]; then
return
fi
cd "${SRC_ROOT}/${PLUGIN}/examples"
if [ -f "requirements.txt" ]; then
"${PYTHON}" -m pip install -r requirements.txt
fi
"${PYTHON}" -m unittest discover -v
cd "${SRC_ROOT}/${PLUGIN}"
}
test_no_skips() {
# Log skipped tests to file
check_skips="$(mktemp)"
TEMP_DIRS+=("${check_skips}")
# Run all if nothing given
if [ "x$@" == "x" ]; then
UNITTEST_ARGS="discover -v"
else
UNITTEST_ARGS=$@
fi
# Run with coverage
TEST_DOCS=1 "${PYTHON}" -u -m coverage run -m unittest $UNITTEST_ARGS 2>&1 | tee "${check_skips}"
"${PYTHON}" -m coverage report -m
# Fail if any coroutines were not awaited
unawaited=$(grep -nE 'coroutine .* was never awaited' "${check_skips}" | wc -l)
if [ "$unawaited" -ne 0 ]; then
echo "Found un-awaited coroutines" >&2
exit 1
fi
# Fail if any tests were skipped or errored
skipped=$(tail -n 1 "${check_skips}" | grep -E '(skipped=[0-9]+)' | wc -l)
if [ "$skipped" -ne 0 ]; then
echo "Tests were skipped" >&2
exit 1
fi
errors=$(grep -E '(errors=[0-9]+)' "${check_skips}" | wc -l)
if [ "$errors" -ne 0 ]; then
echo "Tests errored" >&2
exit 1
fi
failures=$(grep -E '(failures=[0-9]+)' "${check_skips}" | wc -l)
if [ "$failures" -ne 0 ]; then
echo "Tests failed" >&2
exit 1
fi
}
function run_plugin() {
export PLUGIN="${1}"
cd "${SRC_ROOT}/${PLUGIN}"
# Install plugin
"${PYTHON}" -m pip install -U -e .[dev]
if [ "x${PLUGIN}" != "x." ]; then
# Test ensuring no tests were skipped
test_no_skips
# Run examples if they exist and we aren't at the root
run_plugin_examples
else
# If we are at the root. Install plugsin and run various integration tests
# Run the tests but not the long documentation consoletests
"${PYTHON}" -u -m unittest discover -v
# Try running create command
plugin_creation_dir="$(mktemp -d)"
TEMP_DIRS+=("${plugin_creation_dir}")
cd "${plugin_creation_dir}"
# Run the create command to create a non-dffml package
plugin="blank"
dffml service dev create "${plugin}" "ci-test-${plugin}"
cd "ci-test-${plugin}"
"${PYTHON}" -m pip install -U .
"${PYTHON}" -m unittest discover -v
# Build the docs
"${PYTHON}" -c 'import os, pkg_resources; [e.load() for e in pkg_resources.iter_entry_points("console_scripts") if e.name.startswith("sphinx-build")][0]()' -W -b html docs/ built_html_docs/
cd "${plugin_creation_dir}"
# Plugins we know how to make
PLUGINS=(\
"model" \
"operations" \
"service" \
"source" \
"config")
for plugin in ${PLUGINS[@]}; do
dffml service dev create "${plugin}" "ci-test-${plugin}"
cd "ci-test-${plugin}"
"${PYTHON}" -m pip install -U .
"${PYTHON}" -m unittest discover -v
cd "${plugin_creation_dir}"
done
# Install all the plugins so examples can use them
"${PYTHON}" -m dffml service dev install
# Run the examples
run_plugin_examples
# Test ensuring no tests were skipped
test_no_skips
fi
cd "${SRC_ROOT}"
# Report installed versions of packages
"${PYTHON}" -m pip freeze
if [[ "x${GITHUB_ACTIONS}" == "xtrue" ]] && \
[[ "x${GITHUB_REF}" =~ xrefs/heads/[a-zA-Z0-9]*\.[a-zA-Z0-9]*\.[a-zA-Z0-9]* ]]; then
git status
dffml service dev release "${PLUGIN}"
fi
}
function run_consoletest() {
export PLUGIN="${1/docs\//}"
export PLUGIN="${PLUGIN//\//_}"
export PLUGIN="${PLUGIN/\.rst/}"
cd "${SRC_ROOT}"
# Log tests to file
test_log="$(mktemp)"
TEMP_DIRS+=("${test_log}")
# Install base package with testing and development utilities
"${PYTHON}" -m pip install -U -e ".[dev]"
test_no_skips -v "tests.docs.test_consoletest.TestDocs.test_${PLUGIN}"
cd "${SRC_ROOT}"
git status
}
function run_changelog() {
# Only run this check on pull requests
if [ "x$GITHUB_EVENT_NAME" != "xpull_request" ]; then
exit 0
fi
# Ensure the number of lines added in the changelog is not 0
added_to_changelog=$(git diff origin/main --numstat -- CHANGELOG.md \
| awk '{print $1}')
if [ "x$added_to_changelog" == "x" ] || [ "$added_to_changelog" -eq 0 ]; then
echo "No changes to CHANGELOG.md" >&2
exit 1
fi
}
function run_whitespace() {
export whitespace=$(mktemp -u)
function rmtempfile () {
rm -f "$whitespace"
}
trap rmtempfile EXIT
find . -type f -name '*.py' -o -name '*.rst' -o -name '*.md' -exec grep -EHn " +$" {} \; 2>&1 > "$whitespace"
lines=$(wc -l < "$whitespace")
if [ "$lines" -ne 0 ]; then
echo "Trailing whitespace found" >&2
cat "${whitespace}" >&2
exit 1
fi
}
function run_style() {
black --check "${SRC_ROOT}"
for filename in $(git ls-files \*.js); do
echo "Checking JavaScript file \'${filename}\'"
diff <(js-beautify -n -s 2 "${filename}") "${filename}"
done
}
function run_commit(){
BRANCH="$(echo $GITHUB_REF | cut -d'/' -f 3)"
echo "On Branch: ${BRANCH}"
if [[ "$BRANCH" != "main" ]]; then
dffml service dev lint commits
fi
}
function run_imports(){
dffml service dev lint imports
if [[ -z $(git status -s) ]]
then
echo "Yay ! No unused imports found"
else
echo "There maybe unused imports in the following files:"
git status -s | grep "M" | awk '{print $2}'
exit 1
fi
}
function run_docs() {
export GIT_SSH_COMMAND='ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no'
cd "${SRC_ROOT}"
"${PYTHON}" -m pip install --prefix=~/.local -U -e "${SRC_ROOT}[dev]"
"${PYTHON}" -m dffml service dev install -user
last_release=$(git log -p -- dffml/version.py \
| grep \+VERSION \
| grep -v rc \
| sed -e 's/.* = "//g' -e 's/"//g' \
| head -n 1)
# Fail if there are any changes to the Git repo
changes=$(git status --porcelain | wc -l)
if [ "$changes" -ne 0 ]; then
echo "Running docs.py resulted in changes to the Git repo" >&2
echo "Need to run dffml service dev docs and commit changes" >&2
exit 1
fi
# Make main docs
main_docs="$(mktemp -d)"
TEMP_DIRS+=("${main_docs}")
rm -rf pages
dffml service dev docs -no-strict || ./scripts/docs.sh
# Check to see if docs built successfully
if [ ! -f pages/index.html ]; then
echo "Docs build failed" 1>&2
exit 1
fi
mv pages "${main_docs}/html"
# Make alice docs
alice_branch_repo="$(mktemp -d)"
alice_docs="$(mktemp -d)"
TEMP_DIRS+=("${main_docs}")
git clone --depth=1 https://github.com/intel/dffml -b alice "${alice_branch_repo}"
rm -rf pages
dffml service dev docs -no-strict || ./scripts/docs.sh
# Check to see if docs built successfully
if [ ! -f pages/index.html ]; then
echo "::warning::Alice docs build failed" 1>&2
fi
# Make last release docs
release_docs="$(mktemp -d)"
TEMP_DIRS+=("${release_docs}")
rm -rf pages
git clean -fdx
git reset --hard HEAD
echo "Checking out last release ${last_release}"
git checkout "${last_release}"
git clean -fdx
git reset --hard HEAD
# Uninstall dffml
"${PYTHON}" -m pip uninstall -y dffml
# Remove .local to force install of correct dependency versions
rm -rf ~/.local
"${PYTHON}" -m pip install --prefix=~/.local -U -e "${SRC_ROOT}[dev]"
"${PYTHON}" -m dffml service dev install -user
dffml service dev docs -no-strict || ./scripts/docs.sh
# Check to see if docs built successfully
if [ ! -f pages/index.html ]; then
echo "Docs build failed" 1>&2
exit 1
fi
mv pages "${release_docs}/html"
git clone https://github.com/intel/dffml -b gh-pages \
"${release_docs}/old-gh-pages-branch"
mv "${release_docs}/old-gh-pages-branch/.git" "${release_docs}/html/"
mv "${main_docs}/html" "${release_docs}/html/main"
mv "${alice_docs}/html" "${release_docs}/html/alice"
cd "${release_docs}/html"
git config user.name 'Alice'
git config user.email 'alice@docs.ci.dffml.chadig.com'
git add -A
git commit -sam "docs: $(date)"
# Don't push docs unless we're running on main
if [ "x${GITHUB_ACTIONS}" == "xtrue" ] && [ "x${GITHUB_REF}" != "xrefs/heads/main" ]; then
return
fi
ssh_key_dir="$(mktemp -d)"
TEMP_DIRS+=("${ssh_key_dir}")
mkdir -p ~/.ssh
chmod 700 ~/.ssh
"${PYTHON}" -c "import pathlib, base64, os; keyfile = pathlib.Path(\"${ssh_key_dir}/github\").absolute(); keyfile.write_bytes(b''); keyfile.chmod(0o600); keyfile.write_bytes(base64.b32decode(os.environ['SSH_DFFML_GH_PAGES']))"
ssh-keygen -y -f "${ssh_key_dir}/github" > "${ssh_key_dir}/github.pub"
export GIT_SSH_COMMAND="${GIT_SSH_COMMAND} -o IdentityFile=${ssh_key_dir}/github"
git remote set-url origin git@github.com:intel/dffml
git push -f
cd -
git reset --hard HEAD
git checkout main
}
function run_lines() {
"${PYTHON}" ./scripts/check_literalincludes.py
}
function run_container() {
docker build --build-arg DFFML_RELEASE=main -t intelotc/dffml .
docker run --rm intelotc/dffml version
docker run --rm intelotc/dffml service dev entrypoints list dffml.model
}
function cleanup_temp_dirs() {
if [ "x${NO_RM_TEMP}" != "x" ]; then
return
fi
for temp_dir in ${TEMP_DIRS[@]}; do
rm -rf "${temp_dir}"
done
}
# Clean up temporary directories on exit
trap cleanup_temp_dirs EXIT
if [ "x${1}" == "xchangelog" ]; then
run_changelog
elif [ "x${1}" == "xwhitespace" ]; then
run_whitespace
elif [ "x${1}" == "xstyle" ]; then
run_style
elif [ "x${1}" == "xcommit" ]; then
run_commit
elif [ "x${1}" == "ximport" ]; then
run_imports
elif [ "x${1}" == "xdocs" ]; then
run_docs
elif [ "x${1}" == "xlines" ]; then
run_lines
elif [ "x${1}" == "xcontainer" ]; then
run_container
elif [ "x${1}" == "xconsoletest" ]; then
run_consoletest "${2}"
elif [ -d "${1}" ]; then
run_plugin "${1}"
else
echo "Not sure what to do" 2>&1
exit 1
fi
| true |
2b42c877b107b38658df30f398e7fbe486840c39 | Shell | murdacg1/assignment-5-mapreduce | /word-counting/part-1/job/final_concatenate_and_sort.bash | UTF-8 | 3,408 | 3.09375 | 3 | [] | no_license | #!/bin/bash
cat part-* | sort -k2 -n -r > all-sorted
sed '1,$s/,//g' all-sorted | sed '1,$s/\t/ /g' | sed '1,$s/ / /g' | sed '1,$s/ / /g' | sed '1,$s/ / /g' > all-sorted-no-commas-single-space
# remove all words with a digit 0-9 or words consisting of just a single letter
# (these are abbreviations since Finnish does not have such words unlike in English which has a and I)
#cat all-sorted-no-commas-single-space | grep -v -E '^.*[0-9]+.*\s' | grep -v '^[a-zäöå]\s' | grep -E '^[a-zäöå]+\s' > all-sorted-no-commas-single-space-realwords
cat all-sorted-no-commas-single-space | grep -v -E '^.*[0-9]+.*\s' | grep -v '^[a-z]\s' | grep -E '^[a-z]+\s' > all-sorted-no-commas-single-space-realwords
total_words=`awk '{s+=$2} END {print s}' all-sorted-no-commas-single-space`
awk -v total_words=$total_words '{print $1, $2, 100.0*$2/total_words}' all-sorted-no-commas-single-space > all-sorted-no-commas-single-space-freqs
echo >> all-sorted-no-commas-single-space-freqs
echo 'Total words:' >> all-sorted-no-commas-single-space-freqs
echo $total_words >> all-sorted-no-commas-single-space-freqs
head -150 all-sorted-no-commas-single-space-freqs > all-sorted-no-commas-single-space-freqs-top-150
total_words=`awk '{s+=$2} END {print s}' all-sorted-no-commas-single-space-realwords`
awk -v total_words=$total_words '{print $1, $2, 100.0*$2/total_words}' all-sorted-no-commas-single-space-realwords > all-sorted-no-commas-single-space-realwords-freqs
echo >> all-sorted-no-commas-single-space-realwords-freqs
echo 'Total words:' >> all-sorted-no-commas-single-space-realwords-freqs
echo $total_words >> all-sorted-no-commas-single-space-realwords-freqs
head -150 all-sorted-no-commas-single-space-realwords-freqs > all-sorted-no-commas-single-space-realwords-freqs-top-150
grep -E '^...\s' all-sorted-no-commas-single-space > all-sorted-no-commas-single-space-three-chars
total_words=`awk '{s+=$2} END {print s}' all-sorted-no-commas-single-space-three-chars`
echo >> all-sorted-no-commas-single-space-three-chars
echo 'Total words:' >> all-sorted-no-commas-single-space-three-chars
echo $total_words >> all-sorted-no-commas-single-space-three-chars
grep -E '^...\s' all-sorted-no-commas-single-space-realwords > all-sorted-no-commas-single-space-realwords-three-chars
total_words=`awk '{s+=$2} END {print s}' all-sorted-no-commas-single-space-realwords-three-chars`
echo >> all-sorted-no-commas-single-space-realwords-three-chars
echo 'Total words:' >> all-sorted-no-commas-single-space-realwords-three-chars
echo $total_words >> all-sorted-no-commas-single-space-realwords-three-chars
grep -E '^.....\s' all-sorted-no-commas-single-space > all-sorted-no-commas-single-space-five-chars
total_words=`awk '{s+=$2} END {print s}' all-sorted-no-commas-single-space-five-chars`
echo >> all-sorted-no-commas-single-space-five-chars
echo 'Total words:' >> all-sorted-no-commas-single-space-five-chars
echo $total_words >> all-sorted-no-commas-single-space-five-chars
grep -E '^.....\s' all-sorted-no-commas-single-space-realwords > all-sorted-no-commas-single-space-realwords-five-chars
total_words=`awk '{s+=$2} END {print s}' all-sorted-no-commas-single-space-realwords-five-chars`
echo >> all-sorted-no-commas-single-space-realwords-five-chars
echo 'Total words:' >> all-sorted-no-commas-single-space-realwords-five-chars
echo $total_words >> all-sorted-no-commas-single-space-realwords-five-chars
| true |
d90714eb1de11b65c18ef4ffe0ed1b21ee6a79a0 | Shell | wangsl/lustre-rsync | /psync-based-on-files.sh | UTF-8 | 1,333 | 3.640625 | 4 | [] | no_license | #!/bin/bash
source /share/apps/lustre-copy/common.sh
if [ "$USER" == "root" ]; then ulimit -n 1024000; fi
prefix=$(/usr/bin/mktemp --dry-run XXXXXXXX)
source_dir="$1"
target_dir="$2"
if [ ! -d "$source_dir" ]; then echo "source $source_dir is not a folder"; exit 1; fi
mkdir -p $target_dir || exit 1
if [ ! -d "$target_dir" ]; then echo "target $target_dir is not a folder"; exit 1; fi
lfs find "$source_dir" --maxdepth 1 | egrep -v ^${source_dir}$ > $tmp_dir/$prefix.full
if [ -s $tmp_dir/$prefix.full ]; then
split --lines=$n_lines --suffix-length=8 --numeric-suffixes $tmp_dir/$prefix.full $tmp_dir/$prefix-
for lst in $tmp_dir/$prefix-*; do
{
while IFS= read -r line; do
printf '/%s\n' "$(basename "$line")"
done < $lst
} > $lst.tmp
rm -rf $lst
echo "$rsync_alias --files-from=$lst.tmp '$source_dir' '$target_dir' > $lst.log 2>&1 && rm -rf $lst.tmp $lst.log"
done | $parallel --no-notice --jobs $n_jobs
fi
rm -rf $tmp_dir/$prefix.full
n_source=$(lfs find "$source_dir" --maxdepth 1 -type f | wc -l)
n_target=$(lfs find "$target_dir" --maxdepth 1 -type f | wc -l)
info="[$(date '+%Y-%m-%dT%H:%M:%S.%3N')] $(hostname); $source_dir; $log_file"
if [ $n_source -eq $n_target ]; then
printf '%s \e[0;34m==DONE==\e[0m\n' "$info"
else
printf '%s \e[1;31m==ERROR==\e[0m\n' "$info"
fi
| true |
253d5fe633ebb9ab5fc57982afc7a6a3c929f9f7 | Shell | randianb/dockers-utiles | /balanceadores/nginx/scripts/docker/bash.sh | UTF-8 | 501 | 2.515625 | 3 | [] | no_license | source ./scripts/docker/ambiente.sh
# OBSCURITY IS ROUNDING US
docker exec -it $(docker ps -a -q --filter name=$NOMBRE_DOCKER --format="{{.ID}}") bash
#VALIDO SI ME PASARON UN PARAMETRO
if [ $? -eq 0 ]; then
success=1
else
docker exec -it $(docker ps -a -q --filter name=$NOMBRE_DOCKER\_$1 --format="{{.ID}}") bash
fi
#VALIDO SI FUNCO EL COMANDO
if [ $? -eq 0 ]; then
success=1
else
docker exec -it $(docker ps -a -q --filter name=$NOMBRE_DOCKER\_1 --format="{{.ID}}") bash
fi
| true |
280b5b1484307e4f783d07fc1db8f0977eb7d404 | Shell | aroswell/ci-integration | /ci-cd/run-test-automation.sh | UTF-8 | 775 | 3.609375 | 4 | [] | no_license | #!/bin/bash
# This script provides a way to run linting and test automation
# in a docker container. It particularly pertains to
# developers working on non-Unix platforms locally.
# Your continuous integration can be implemented using Docker
# in your pipeline.
# Exit immediately if any command returns with a non-zero status after it runs
set -e
# build image
printf "Build Image:\n"
docker build -t gitrise-test-automation .
# run container and execute test automation
printf "Run test automation inside container:\n"
docker run -i --rm gitrise-test-automation /bin/bash << COMMANDS
set -e
echo 'Linting with shellcheck'
shellcheck -x gitrise.sh
echo 'Running Unit Test'
./tests/unit-tests.sh
echo 'Running Integration Test'
./tests/integration-tests.sh
COMMANDS | true |
4edf3b7d5e3be2c84cb2cc59cea0ac8b58d5b167 | Shell | renan/travis | /script.sh | UTF-8 | 409 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if [ -z "$PHPCS" -a -z "$FOC_VALIDATE" ]; then
./Console/cake test $PLUGIN_NAME All$PLUGIN_NAME --stderr --coverage-clover build/logs/clover.xml;
fi
if [ "$PHPCS" == 1 ]; then
phpcs -p --extensions=php --standard=CakePHP ./Plugin/$PLUGIN_NAME;
fi
if [ "$FOC_VALIDATE" == 1 ]; then
export PLUGIN_PATH="Plugin/$PLUGIN_NAME"
echo "./travis/validate.sh"
./travis/validate.sh
fi
| true |
062beb33a017a1e9fa9a08843475a6f802f86168 | Shell | bio-guoda/preston-gnverifier | /make.sh | UTF-8 | 1,547 | 3.890625 | 4 | [
"CC0-1.0"
] | permissive | #!/bin/bash
#
# Script to track resources used by https://verifier.globalnames.org
#
function track_script {
# first track this script
preston track "file://$PWD/$0"
}
function track_content {
# then track the content
preston track "http://opendata.globalnames.org/dwca/"\
| grep "hash://sha256"\
| preston cat\
| grep -P -o "[0-9a-zA-Z-]+.(tar.gz|zip)"\
| sed 's+^+http://opendata.globalnames.org/dwca/+g'\
| xargs preston track
}
function generate_readme {
echo -e "This publication contains a Preston archive of resources used by Global Names Verifier (https://verifier.globalnames.org/about)." > README.md
echo -e "\n\nPlease follow academic citation guidelines when using this corpus." | tee -a README.md
echo -e "\n\nTo clone this archive:"\
| tee -a README.md
echo -e "\n\n$ preston clone"\
| tee -a README.md
echo -e "\n\nAfter cloning this archive, you should be able to reproduce results below without the --remote https://zenodo.org... part."\
| tee -a README.md
echo -e "\n\n$ preston history\n\n"\
| tee -a README.md
preston history\
| tee -a README.md
echo -e "\n\nResource Alias/References\n\n$ preston alias -l tsv | cut -f1 | sort | uniq\n\n" | tee -a README.md
preston alias -l tsv\
| cut -f1\
| sort\
| uniq\
| tee -a README.md
# build the citation list
echo -e "\n\nReferences\n\n"\
| tee -a README.md
echo generating citation list
preston ls\
| preston cite\
| sort\
| uniq\
| tee -a README.md
}
track_script
track_content
generate_readme
| true |
763573f036f30605dfd3be416f8eb188f507aaed | Shell | 4ch7ung/imgClass | /bin/svm_run.sh | UTF-8 | 175 | 2.703125 | 3 | [] | no_license | #!/bin/bash
if [ $3 -eq "1" ]
then
meth="SIFT"
else
meth="SURF"
fi
echo "SVMing $2/10 experiment $meth started"
"$1" "$3" "$4" "$5" "$6" "$7" "$8"
echo "Done $meth $2/10"
| true |
821c02757c51b4b56e1737f48f240dc499391a81 | Shell | dorucioclea/BuyingCatalogueService | /tests/NHSD.BuyingCatalogue.Testing.Data/IntegrationDbSetup/configure-integration-db.sh | UTF-8 | 896 | 3.4375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# wait for MSSQL server to start
export STATUS=1
i=0
while [[ $STATUS -ne 0 ]] && [[ $i -lt 30 ]]; do
i=$i+1
/opt/mssql-tools/bin/sqlcmd -t 1 -U sa -P $SA_PASSWORD -Q "select 1" >> /dev/null
STATUS=$?
done
if [ $STATUS -ne 0 ]; then
echo "Error: MSSQL SERVER took more than thirty seconds to start up."
exit 1
fi
echo "======= MSSQL SERVER STARTED ========" | tee -a ./config.log
# Run the setup script to create the DB using the passed in env. variables and the schema as per the needs of integration tests
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $SA_PASSWORD -d master -i "sql/Create Database.sql"
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $SA_PASSWORD -d $DB_NAME -i "sql/Create.sql"
/opt/mssql-tools/bin/sqlcmd -S localhost -U sa -P $SA_PASSWORD -d $DB_NAME -i "sql/ReferenceData.sql"
echo "======= MSSQL CONFIG COMPLETE =======" | tee -a ./config.log | true |
3bb65345d2e41574c6f3d40ac5a2eaebd217e814 | Shell | trtruong/utilities | /scripts/shell/upgradeAllNGFW.sh | UTF-8 | 1,991 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env bash
## This script will apply ansible playbook to all NGFWs in the specified environment
### Require a environment to execute
if [[ $# -ne 2 ]] ; then
echo "execute $0 with ENV <staging|prod> <playbook name.yml>"
exit 1
fi
ansible_playbook=$2
pushd ../../ansible
REGIONS=(us-west-1 us-west-2 us-east-1 us-east-2 eu-west-1 eu-west-2)
for REGION in "${REGIONS[@]}"; do
r=$(echo $REGION | sed 's/-\(.\).*-/\1/')
# Set domain name for bastion host base on environment
case $1 in
'stage'|'stg'|'staging')
DOMAIN='staging.forcepoint.io'
env_short='s'
env='stg'
sed "s/REGION/$r/" dep-ssh.cfg.tpl | sed "s/DOMAIN/$DOMAIN/" | sed "s/ENV/stg/" > dep-ssh.cfg
;;
'prod'|'production')
DOMAIN='forcepoint.io'
sed "s/REGION/$r/" dep-ssh.cfg.tpl | sed "s/DOMAIN/$DOMAIN/" | sed "s/ENV/prod/" > dep-ssh.cfg
env_short='p'
env='prod'
;;
*)
exit 1
;;
esac
sed "s/REGION/$r/" ansible.cfg.tpl > ansible.cfg
sed "s/REGION/$REGION/" ../scripts/python/ec2.ini.tpl > ../scripts/python/ec2.ini
# Login to Prod in order of invoke BLESS lambda to sign SSH cert
source okta-login pe-prod
### Get Ansible encryption passwrord
if [ -f ~/.vault_pass.txt ]; then
aws ssm get-parameter --name "/COPS/ansible/vault-pass" --with-decryption --region us-east-2 | jq -r '.Parameter.Value' > ~/.vault_pass.txt
fi
### Sign ssh key to access bastion hosts
bless_client -i bastion-host-${r}.${DOMAIN}
if [ "$env_short" == "s" ]; then
source okta-login pe-stg
fi
## Get NGFW SSH private key
if [ ! -f ~/.ssh/cloudops-dep-${env}_rsa ]; then
aws ssm get-parameter --name "/NGFW/ssh-keypairs/cloudops-dep_private" --with-decryption --region us-east-2 | jq -r '.Parameter.Value' > ~/.ssh/cloudops-dep-${env}_rsa
chmod 600 ~/.ssh/cloudops-dep-${env}_rsa
fi
ansible-playbook ${ansible_playbook} -vvv -e "hostgroup=tag_Name_vm_${r}_cpt_${env_short}_edge_ngfw670"
done
popd
| true |
a082341523f5e4212124decd50fcce4da290e7dc | Shell | clchiou/garage | /scripts/check-c++.sh | UTF-8 | 297 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Run checks on C++ source files.
source "$(dirname "${BASH_SOURCE[0]}")/common.sh"
readonly SRCS=($(find . -name '*.h' -or -name '*.cc' | sort | cut -d/ -f2-))
for src in "${SRCS[@]}"; do
echo "=== clang-format ${src} ==="
clang-format "${src}" | diff "${src}" -
done
| true |
234ab40c7525f1f4dd49f7c4369baba74d7af7ba | Shell | yang-ling/little | /images/ymake_main.sh | UTF-8 | 939 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
set -ex
currentPath=$(pwd)
product_name=${currentPath##*/}
temp=${currentPath%/*}
brand_name=${temp##*/}
suffix_main="主图"
suffix_detail="详情"
extension=".jpg"
i=0
for file in $(ls *.jpg); do
i=$((i+1))
newfilename="${brand_name}_${product_name}_${suffix_main}_${i}${extension}"
if [ "$file" != "$newfilename" ]; then
mv $file $newfilename
fi
done
cd images
i=0
for file in $(ls *.jpg); do
i=$((i+1))
index="$i"
if [ $i -lt 10 ]; then
index="0${i}"
fi
newfilename="${brand_name}_${product_name}_${suffix_detail}_${index}${extension}"
if [ "$file" != "$newfilename" ]; then
mv $file $newfilename
fi
done
mergejpg.sh "${brand_name}_${product_name}_${suffix_detail}"
filesize=$(stat -c%s "${brand_name}_${product_name}_${suffix_detail}.jpg")
if [ $filesize -ge 1000000 ]; then
mycrop.sh "${brand_name}_${product_name}_${suffix_detail}"
fi
| true |
7c2907b768aff71775e96c699f1c0700c569ea39 | Shell | petronny/aur3-mirror | /gvolicon-git/PKGBUILD | UTF-8 | 599 | 2.5625 | 3 | [] | no_license | # Maintainer: Unia <jthidskes@outlook.com>
pkgname=gvolicon-git
_gitname=gvolicon
pkgver=2013.07.22
pkgrel=1
pkgdesc="A simple and lightweight volume icon that sits in your system tray, written in GTK3"
arch=('i686' 'x86_64')
url="http://unia.github.io/gvolicon/"
license=('GPL2')
depends=('gtk3' 'hicolor-icon-theme' 'alsa-lib')
makedepends=('git')
source=('git://github.com/Unia/gvolicon.git')
md5sums=('SKIP')
pkgver() {
cd $_gitname
git log -1 --format="%cd" --date=short | sed 's|-|.|g'
}
build() {
cd $_gitname/
make
}
package() {
cd $_gitname/
make PREFIX=/usr DESTDIR="$pkgdir" install
}
| true |
6f646964737e3f2f6a09009b40e0a4e5c6ef3ec9 | Shell | shalomhillelroth/LevanonLab | /UsersCode/Roni/GEODatasetCleanup.sh | UTF-8 | 1,837 | 3.296875 | 3 | [] | no_license | # create argsparse
ARGPARSE_DESCRIPTION="Creates the commands for a GEO dataset" # this is optional
source /private/common/Software/BashLibs/argparse-bash/argparse.bash || exit 1
argparse "$@" <<EOF || exit 1
parser.add_argument('-d', '--wanted_dir', type=str, help='Path of wanted analysis directory', required=True)
parser.add_argument('-s', '--dir_suffix',type=str, help='Suffix of directory and files', default="")
EOF
if [ "$DIR_SUFFIX" != "" ]; then
SUFFIX="_${DIR_SUFFIX}"
fi
# set -e
# # keep track of the last executed command
# trap 'last_command=\$current_command; current_command=\$BASH_COMMAND' DEBUG
# # echo an error message before exiting
# trap 'echo \"\"\${last_command}\" command filed with exit code \$?.\"' EXIT
# remove sra files
rm $WANTED_DIR/RawData/SRR*/SRR*.sra
rmdir $WANTED_DIR/RawData/SRR*/
# remove FastQC files
rm -r $WANTED_DIR/RawData/FastQC/*_fastqc $WANTED_DIR/RawData/FastQC$SUFFIX/*_fastqc.zip
rm -r $WANTED_DIR/RawData/FastQCRemovedDups/*_fastqc $WANTED_DIR/RawData/FastQCRemovedDups$SUFFIX/*_fastqc.zip
find $WANTED_DIR/RawData/FastQC -type d -name *_fastqc -exec rm -r '{}' \;
find $WANTED_DIR/RawData/FastQC$SUFFIX -type d -name *_fastqc -exec rm -r '{}' \;
find $WANTED_DIR/RawData/FastQCRemovedDups -type d -name *_fastqc -exec rm -r '{}' \;
find $WANTED_DIR/RawData/FastQCRemovedDups$SUFFIX -type d -name *_fastqc -exec rm -r '{}' \;
# remove salmon files and directories: leave .sf files
find $WANTED_DIR/Salmon_1.4.0$SUFFIX -type d -name aux_info -exec rm -r "{}" \;
find $WANTED_DIR/Salmon_1.4.0$SUFFIX -type d -name libParams -exec rm -r "{}" \;
find $WANTED_DIR/Salmon_1.4.0$SUFFIX -type d -name logs -exec rm -r "{}" \;
find $WANTED_DIR/Salmon_1.4.0$SUFFIX -type f -name '*.json' -delete
# remove out.tab STAR files
find $WANTED_DIR/STAR$SUFFIX -name '*out.tab*' -delete
| true |
1194fed8146bd5b8cdfa42c9d12f599080cb9b27 | Shell | moritetu/pgenv2 | /libexec/pgenv-exec-env | UTF-8 | 4,628 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eu
source "$PGENV_LIBEXEC"/pgenv--load
# Command line options
declare -A options
options=(
[edit]=0
[sys]=0
[version]=
[write]=0
)
# Arguments array
arguments=()
# Environment file
pgenv_instance_default_environment="$PWD/pgenv_myenv"
pgenv_instance_env_file="$pgenv_instance_default_environment"
# Target version to edit
pg_version="$(pgenv -s current)"
# Editor
EDITOR="${EDITOR:-/usr/bin/vi}"
#: parse_options [arg ...]
#: Parse command line options.
#:
parse_options() {
while [ $# -gt 0 ]; do
case "$1" in
-v|--version)
shift
options[version]="${1:-}"
if [ -z ${options[version]} ]; then
abort "error: version is required: -v <version>"
fi
;;
-w|--write)
options[write]=1
;;
-s|--system)
options[sys]=1
;;
-e|--edit)
options[edit]=1
;;
-*)
abort "error: $(self): invalid option: $1"
;;
*)
arguments[${#arguments[@]}]="$1"
;;
esac
shift
done
if [ ${#arguments[@]} -gt 0 ]; then
pgenv_instance_env_file="${arguments[0]}"
elif [ -n "${options[version]}" ]; then
pgenv_instance_env_file="${pgenv_instance_default_environment}-${options[version]}"
fi
}
# write_env
# Write environment variables.
#
write_env() {
cat <<EOF
# System environment
export PATH="$(pgenv prefix -v $pg_version --bin)":"\$PATH"
export POSTGRES_HOME="$(pgenv prefix -v $pg_version)"
export PGLIB="\$POSTGRES_HOME/lib"
export LD_LIBRARY_PATH="\$PGLIB":"\${LD_LIBRARY_PATH:-}"
EOF
if [ ${options[sys]} -eq 1 ];then
return
fi
cat <<EOF
# Created by pgenv at $(date +'%Y-%m-%d %H:%M:%S')
export PGHOST=localhost # host
#export PGHOSTADDR=127.0.0.1 # hostaddr
#export PGPORT=5432 # port
#export PGDATABASE=postgres # dbname
#export PGUSER=$USER # user
#export PGPASSFILE=~/.pgpass # passfile
#export PGPASSWORD= # password (not recommended, use passfile)
#export PGSERVICE= # service
#export PGSERVICEFILE=~/.pg_service.conf
#export PGREALM= # For Kerberos
#export PGOPTIONS= # options
#export PGAPPNAME=pgenv_app # application_name
# For SSL
#export PGSSLMODE=disable # disable , allow, prefer, require, verify-ca, verify-full
#export PGREQUIRESSL=prefer # 0: prefer, 1: require
#export PGSSLCOMPRESSION=0 # 0: no compression, 1: compression
#export PGSSLCERT=~/.postgresql/postgresql.crt
#export PGSSLKEY=~/.postgresql/postgresql.key
#export PGSSLROOTCERT=~/.postgresql/root.crt
#export PGSSLCRL=~/.postgresql/root.crl
#export PGREQUIREPEER=postgres
#export PGKRBSRVNAME=servicename
#export PGGSSLIB=gssapi # sspi, gssapi
#export PGCONNECT_TIMEOUT=30 # connect_timeout seconds
#export PGCLIENTENCODING=auto # client_encoding
#export PGDATESTYLE='iso, ymd' # Equals 'SET datestyle to...'
#export PGTZ='Japan' # Equals 'SET timezone to...'
#export PGGEQO=on # Equals 'SET geqo to...'
#export PGSYSCONFDIR='/path/to/pg_service'
#export PGLOCALEDIR='/path/to/locale'
EOF
}
# Parse command line options.
parse_options "$@"
if [ ${options[edit]} -eq 1 ]; then
if [ ! -e "$pgenv_instance_env_file" ]; then
write_env > "$pgenv_instance_env_file"
pgenv_hook env write "$pgenv_instance_env_file"
fi
$EDITOR "$pgenv_instance_env_file"
elif [ ${options[write]} -eq 1 ]; then
if [ -e "$pgenv_instance_env_file" ]; then
abort "error: file already exists: $pgenv_instance_env_file"
fi
write_env > "$pgenv_instance_env_file"
pgenv_hook env write "$pgenv_instance_env_file"
println "file created: $pgenv_instance_env_file"
else
pgenv_hook env print "$pgenv_instance_env_file"
write_env
fi
exit $?
#=begin COMMAND
#
# env Edit enviroment variables for postgresql.
#
#=end COMMAND
#=begin HELP
#
# Usage: pgenv env [-w|-e] [-v|--version <version>|<filename>]
#
# Edit enviroment variables for postgresql.
#
#
# SYNOPSIS
# pgenv env
# pgenv env -w -v 10.3
# pgenv env -e mytest.sh
#
# OPTIONS
# -e, --edit
# Edit environment variables and save into the file.
#
# -s, --system
# Show only system environment variables.
# ex: PATH, LD_LIBRARY_PATH
#
# -v, --version <version>
# Version to edit.
#
# -w, --write
# Write environment variables into the file.
#
#
# SHELL VARIABLES
# - EDITOR
# To use your default editor, set the command path to the editor to EDITOR environment variable.
# Ex: export EDITOR=/usr/bin/vim
#
#
#=end HELP
| true |
4982426b58228201ae88234995516bddce1abf76 | Shell | noplacenoaddress/OpenBSD | /src/root/Bin/dnsblock.sh | UTF-8 | 1,779 | 3.78125 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
# Update the DNS based adblock (var/unbound/etc/dnsblock.conf)
# https://www.filters.com
# https://github.com/StevenBlack/hosts
# https://deadc0de.re/articles/unbound-blocking-ads.html
# https://pgl.yoyo.org/adservers/serverlist.php?hostformat=unbound&showintro=0&mimetype=plaintext
#set -eu
set -o errexit
set -o nounset
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin
app=$(basename $0)
AWK=/usr/bin/awk
FTP=/usr/bin/ftp
CAT=/bin/cat
GREP=/usr/bin/grep
EGREP=/usr/bin/egrep
SORT=/usr/bin/sort
PFCTL=/sbin/pfctl
RM=/bin/rm
CP=/bin/cp
CHMOD=/bin/chmod
TR=/usr/bin/tr
RCCTL=/usr/sbin/rcctl
hostsurl="https://raw.githubusercontent.com/StevenBlack/hosts/master/alternates/fakenews-gambling/hosts"
hoststmp="$(mktemp -t ${hostsurl##*/}.XXXXXXXXXX)" || exit 1
dnsblocktmp="$(mktemp)" || exit 1
dnsblock=dnsblock.conf
unboundchroot=/var/unbound
error_exit () {
echo "${app}: ${1:-"Unknown Error"}" 1>&2
exit 1
}
# Bail out if non-privileged UID
[ 0 = "$(id -u)" ] || \
error_exit "$LINENO: ERROR: You are using a non-privileged account."
# Download
"${FTP}" -o "${hoststmp}" "${hostsurl}" || \
error_exit "$LINENO: ERROR: download failed."
# Convert hosts to unbound.conf
"${CAT}" "${hoststmp}" | "${GREP}" '^0\.0\.0\.0' | \
"${AWK}" '{print "local-zone: \""$2"\" redirect\nlocal-data: \""$2" A 0.0.0.0\""}' > \
"${dnsblocktmp}"
# Install
"${CP}" "${dnsblocktmp}" "${unboundchroot}"/etc/"${dnsblock}" || \
error_exit "$LINENO: ERROR: ${dnsblock} copy failed."
"${CHMOD}" 600 "${unboundchroot}"/etc/"${dnsblock}" || exit
# Populate unbound dns block
"${RCCTL}" stop unbound
"${RCCTL}" start unbound || \
error_exit "$LINENO: ERROR: unbound failed."
# Remove temp files
"${RM}" -rf "${hoststmp}" "${dnsblocktmp}"
| true |
68d9c92e137e9edf0c9cfba9d66abeeea0ad7149 | Shell | metadevfoundation/pi-apps | /journey | UTF-8 | 752 | 3.4375 | 3 | [] | no_license | #! /bin/sh
### BEGIN INIT INFO
# Provides: rc.local
# Required-Start: $all
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop:
# Short-Description: Run /etc/rc.local if it exist
### END INIT INFO
PIDFILE=/var/run/journey.pid
JOURNEY=/root/journey-linux-arm/journey
do_start() {
start-stop-daemon\
--start --background\
--pidfile "$PIDFILE"\
--make-pidfile\
--exec "$JOURNEY"
}
case "$1" in
start)
do_start
;;
restart|reload|force-reload)
echo "Error: argument '$1' not supported" >&2
exit 3
;;
stop|status)
start-stop-daemon --pidfile "$PIDFILE" --stop
exit 0
;;
*)
echo "Usage: $0 start|stop" >&2
exit 3
;;
esac
| true |
ab3acb9b4df924d645dc23da5c1e91a9064e625d | Shell | Avarko/nixos-devkit-installer | /nixos-devkit/secrets-management/logout-from-secrets-store.sh | UTF-8 | 193 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
info() { echo -e "\e[35m\e[1m$@\e[21m\e[0m"; }
info "Removing the LastPass session data in $HOME/.local/share/lpass..."
lpass logout
rm -rf "$HOME/.local/share/lpass"
info "Done."
| true |
459dcc36d658cd3225fbe66f1af2c61e47434994 | Shell | eshamster/try-cl-test-on-gh-actions | /test-docker/test.sh | UTF-8 | 653 | 3.265625 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/sh
set -eux
lisp=$1
install_targets="${2:-}"
if [ "${install_targets}" != "" ]; then
echo "${install_targets}" | tr ',' '\n'| while read target; do
ros install ${target}
done
ros -e '(ql:register-local-projects)' -q
fi
ros use ${lisp}
ros run -- --version
# Note: Assume that repository is checkout to workspace folder in previous step
dir=/root/.roswell/local-projects/target
cp -R /github/workspace ${dir}
cd ${dir}
rove *.asd 2>&1 | tee /tmp/test.log
# Note: In Clozure CL, terminating debug console finishes in errcode 0,
# so grep message to check if the test has actually run.
grep -E "tests? passed" /tmp/test.log
| true |
ea30257e31f96815b458bae9c13b11bdc9818889 | Shell | Sudo-B-Set-Trippin/WebCamModuleControl | /WebCamModuleControl.sh | UTF-8 | 2,826 | 3.5625 | 4 | [] | no_license | #!/bin/bash
if [[ ${UID} != 0 ]]
then
echo -e "Run The Script As Root, Are Your Drunk Dude ?"
else
echo
echo -e "\n################################################################"
echo -e "# WebCamModuleControl #"
echo -e "# Remove or Blacklist WebCam Module From Arch Linux Kernel #"
echo -e "# Coder : BL4CKH47H4CK3R #"
echo -e "################################################################\n"
echo -e "[1] Remove WebCam Module"
echo -e "[2] Blacklist WebCam Module"
echo -e "[3] Revert/Fix WebCam Module\n"
read -p "Enter Choice [1, 2, 3]: " confirm
if [[ ${confirm} == 1 ]]
then
echo -e "\nRemoving Module From The Kernel ..."
rmmod uvcvideo -f
rm -rf /lib/modules/$(uname -r)/kernel/drivers/media/usb/uvc/uvcvideo.ko.xz
echo -e "Enjoy Superior Privacy !\nRemoving Module Done !\n"
echo -e "Please Reboot Your System To Take Effect !\n"
elif [[ ${confirm} == 2 ]]
then
echo -e "\nBlacklisting Module From The Kernel ..."
echo -e "blacklist uvcvideo" >> /etc/modprobe.d/blacklist.conf
echo -e "Enjoy Superior Privacy !\nBlacklisting Module Done !\n"
echo -e "Please Reboot Your System To Take Effect !\n"
elif [[ ${confirm} == 3 ]]
then
echo -e "\n[1] Linux [Default]"
echo -e "[2] Linux [LTS]"
echo -e "[3] Linux [ZEN]"
echo -e "[4] Linux [HARDENED]\n"
read -p "Enter Choice [1, 2, 3, 4]: " confirm
if [[ ${confirm} == 1 ]]
then
echo -e "\nReinstalling Specified Kernel & Fixing WebCam Module ...\n"
rm -rf /etc/modprobe.d/blacklist.conf
pacman -S linux --noconfirm
echo -e "\nEnjoy Superior Privacy !\nRemoving Module Done !\n"
echo -e "Please Reboot Your System To Take Effect !\n"
elif [[ ${confirm} == 2 ]]
then
echo -e "\nReinstalling Specified Kernel & Fixing WebCam Module ...\n"
rm -rf /etc/modprobe.d/blacklist.conf
pacman -S linux-lts --noconfirm
echo -e "\nEnjoy Superior Privacy !\nRemoving Module Done !\n"
echo -e "Please Reboot Your System To Take Effect !\n"
elif [[ ${confirm} == 3 ]]
then
echo -e "\nReinstalling Specified Kernel & Fixing WebCam Module ...\n"
rm -rf /etc/modprobe.d/blacklist.conf
pacman -S linux-zen --noconfirm
echo -e "Enjoy Superior Privacy !\nRemoving Module Done !\n"
echo -e "Please Reboot Your System To Take Effect !\n"
elif [[ ${confirm} == 4 ]]
then
echo -e "\nReinstalling Specified Kernel & Fixing WebCam Module ...\n"
rm -rf /etc/modprobe.d/blacklist.conf
pacman -S linux-hardened --noconfirm
echo -e "Enjoy Superior Privacy !\nRemoving Module Done !\n"
echo -e "Please Reboot Your System To Take Effect !\n"
else
echo -e "\nWrong Input, Are You Drunk Dude ?\n"
fi
else
echo "\nWrong Input, Are You Drunk Dude ?\n"
fi
fi
| true |
a1529107548514b9b25e74f701930708e7dbeeb8 | Shell | Firehawke/a2softlist-support | /ia2sl.sh | UTF-8 | 108,950 | 3.625 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
# Subroutines defined here.
function startcycle() {
local worktype=${1^^}
# We use $1 instead of $worktype because BOTH can also be passed through
# worktype and that'll cause this to break. Instead we take what the caller
# gives us in $1
COUNTER=1
# Run to a maximum of 500, end with exit code 1 when we finally hit a dupe.
while [ $COUNTER -lt 499 ]; do
if ! (generator $COUNTER $1); then
aggregate $1
return 1
fi
let COUNTER=COUNTER+1
done
aggregate $1
}
function generator() {
# Remove and recreate certain work directories so they're clean.
rm -rf "$worktype"diskoutput 2>/dev/null
rm -rf "$worktype"diskstaging 2>/dev/null
mkdir "$worktype"diskoutput 2>/dev/null
mkdir "$worktype"diskstaging 2>/dev/null
# Do the actual search and download.
# This will depend on which type we're using, of course.
case ${2} in
"WOZADAY")
ia search 'collection:wozaday' --parameters="page=$1&rows=1" --sort="publicdate desc" -i >"$worktype"currentitems.txt
;;
"CLCRACKED")
ia search '@4am subject:"crack"' --parameters="page=$1&rows=1" --sort="publicdate desc" -i >"$worktype"currentitems.txt
;;
esac
# Output download URLs to currentdls.txt
ia download -d -i --no-directories --glob '*.zip|*.xml|*.dsk|*.2mg|*.po|*.bin|*.woz|*crack).zip|*.BIN' --itemlist="$worktype"currentitems.txt >"$worktype"currentdls.txt
# Let's remove the extras file and certain other unwanted materials
sed -i "/%20extras%20/d;/extras.zip/d;/0playable/d;/_files.xml/d;/demuffin\'d/d;/work%20disk/d" "$worktype"currentdls.txt
# Now we download.
wget -q -i "$worktype"currentdls.txt -nc -nv -nd -P ./"$worktype"diskstaging
# Let's decompress any ZIP files we got, forced lowercase names.
unzip -n -qq -LL -j "$worktype"'diskstaging/*.zip' -d "$worktype"diskoutput 2>/dev/null
# Before we go ANY further, let's clean up any .bin files into .2mg ...
cd "$worktype"diskstaging || exit
# Find all .bin files and rename them to 2mg. This should be Distro-agnostic,
# as opposed to the previous Debian-specific variation.
find . -name "*.bin" -exec sh -c 'mv "$1" "${1%.bin}.2mg"' _ {} \; 2>/dev/null
cd ..
# Move the meta XML and the disk images to the output folder for processing.
mv "$worktype"diskstaging/*.woz "$worktype"diskoutput 2>/dev/null
cp "$worktype"diskstaging/*meta.xml "$worktype"diskoutput 2>/dev/null
cd "$worktype"diskoutput || exit
# Remove stuff we don't want. We don't want to parse the playable.dsk because
# that's an exact copy of the properly named disk. We don't want pictures and
# videos in this case either.
rm ./00playable.dsk 2>/dev/null
rm ./00playable.2mg 2>/dev/null
rm ./00playable.woz 2>/dev/null
rm ./00playable2.dsk 2>/dev/null
rm ./playable.dsk 2>/dev/null
rm ./playable.2mg 2>/dev/null
rm ./playable.woz 2>/dev/null
rm ./*.a2r 2>/dev/null
rm ./*.png 2>/dev/null
rm ./*.mp4 2>/dev/null
rm ./*.jpg 2>/dev/null
rm ./*fastdsk\ rip\* 2>/dev/null
# These next two files seem to pop up a lot with MP4 files, which we don't want.
rm ./ProjFileList.xml 2>/dev/null
rm ./project.xml 2>/dev/null
# 4AM sometimes leaves his work disk in the package. We don't want to keep that.
rm ./*work\ disk* 2>/dev/null
rm ./*demuffin\'d\ only* 2>/dev/null
# Now, we parse the XML file(s), and there should only be one to parse.
for filename in *.xml; do
[ -e "$filename" ] || continue
# We'll have to handcraft the shortname ourselves.
echo -e '\t<software name="ia2slnewnamehere">' >../xml/"$worktype"disk/disk$1.xml
echo -e -n '\t\t<description>' >>../xml/"$worktype"disk/disk$1.xml
# Now, let's do a bit of adjusting with the description name, to change
# v1.0 to (Version 1.0) and so forth.
# We can add more special cases as they show up in the future.
# Also remove the "IIgs" from the end of GS-specific disks since they're
# going to go into a GS-specific software list.
# FIXME: Temporarily disable this while I try to figure out how to keep this from breaking things..
xmllint --xpath 'metadata/title/text()' "$filename" | tr -d '\n' >>../xml/"$worktype"disk/disk$1.xml
# xmllint --xpath 'metadata/title/text()' "$filename" | tr -d '\n' | sed -E 's/v([[:digit:]]*).([[:digit:]]*).([[:digit:]]*)/\(Version \1.\2.\3\)/;s/\.\)/) /;s/ IIga//' >>../xml/"$worktype"disk/disk$1.xml
echo -e '</description>' >>../xml/"$worktype"disk/disk$1.xml
echo -e -n '\t\t<year>' >>../xml/"$worktype"disk/disk$1.xml
xmllint --xpath 'metadata/description/text()' $filename | grep -o '19[0123456789][0123456789]' | tr -d '\n' >>../xml/"$worktype"disk/disk$1.xml
echo -e '</year>' >>../xml/"$worktype"disk/disk$1.xml
echo -e -n '\t\t<publisher>' >>../xml/"$worktype"disk/disk$1.xml
xmllint --xpath 'metadata/description/text()' $filename | grep -o -a -i -F -f ../publishers.txt | tr -d '\n' | sed -E -e 's/distributed by //g;s/published by //g;s/\&\;amp\;/and/g' >>../xml/"$worktype"disk/disk$1.xml
echo -e '</publisher>' >>../xml/"$worktype"disk/disk$1.xml
echo -e -n '\t\t<info name="release" value="' >>../xml/"$worktype"disk/disk$1.xml
xmllint --xpath 'metadata/publicdate/text()' $filename | awk '{print $1}' | tr -d '\n' >>../xml/"$worktype"disk/disk$1.xml
echo -e '"/>' >>../xml/"$worktype"disk/disk$1.xml
# Now, this next step only is done if we're doing WOZADAY where we have actual compatibility data at hand.
case ${2} in
"WOZADAY")
compatdata=$(xmllint --xpath 'metadata/description/text()' $filename | tr -d '\n')
case $compatdata in
# Copy Protection compatibility issues section -------------------
*"It requires a 48K Apple ][ or ][+. It will not run on later models."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple ][ or ][+.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tIt will not run on later models. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an Apple ][ or ][+ with 48K. Due to compatibiltiy issues created by the copy protection, it will not run on later models. Even with a compatible ROM file, this game triggers bugs in several emulators, resulting in crashes or spontaneous reboots."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t\<!-- It requires an Apple II or II+ with 48K.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t\Due to compatibility issues created by the copy protection,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t\it will not run on later models. Even with a compatible ROM file,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t\this game triggers bugs in several emulators,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t\resulting in crashes or spontaneous reboots. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple ][ or ][+, or an unenhanced Apple //e. Due to compatibility issues caused by the copy protection, it will not run on any later models."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple II or II+,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tor an unenhanced Apple //e. Due to compatibility' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tissues caused by the copy protection, it will not' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\trun on any later models. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple II or ][+. Due to compatibility issues created by the copy protection, it will not run on later models."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple II or II+.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tDue to compatibility issues caused by the copy protection,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tit will not run on any later models. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple ][ or ][+. Due to compatibility issues caused by the copy protection, it will not run on any later models."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple II or II+.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tDue to compatibility issues caused by the copy protection,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tit will not run on any later models. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple ][ or Apple ][+. Due to compatibility problems created by the copy protection, it will not run on later models"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple II or II+.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tDue to compatibility issues caused by the copy protection,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tit will not run on any later models. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 64K Apple ][+ or later. It was released with several different copy protections; this version was protected with the E7 bitstream. Game code and data is identical to other protected variants"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple II+ or later.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tThis was released with several different copy protections;' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tthis version was protected with the E7 bitstream. Game code and data' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tis identical to other protected variants. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple ][ with 48K. Some emulators may have difficulty emulating this image due to its extreme copy protection methods"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 48K.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tSome emulators may have difficulty emulating this image due to its' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\textreme copy protection methods. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an Apple ][ or Apple ][+. Due to restrictive copy protection, it will not boot on later models"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an Apple II or Apple II+.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tDue to restrictive copy protection,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tit will not boot on later models. --> -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*'It runs on any Apple ][ with 48K. Note: due to subtle emulation bugs and extremely finicky copy protection, this disk may reboot one or more times before loading.'*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 48K.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tNote: due to subtle emulation bugs and extremely finicky copy' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tprotection, this disk may reboot one or more times before loading. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
# Non-copy protection-related special notes ----------------------
*"Attempting to run with less than 48K will appear to work, but copies will fail with an UNABLE TO WRITE error."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 48K.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t(Attempting to run with less than 48K will appear to work,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tbut copies will fail with an UNABLE TO WRITE error.) -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple ][ or ][+."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple II or II+. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple II or II+."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple II or II+. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple ][ with 48K. The double hi-res version is automatically selected if you have a 128K Apple //e or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 48K.' >>../xml/"$worktype"disk/disk$1.xml
echo -e 'The double hi-res version is automatically selected' >>../xml/"$worktype"disk/disk$1.xml
echo -e 'if you have a 128K Apple //e or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 64K Apple ][+, //e, //c, or IIgs. Double hi-res mode requires a 128K Apple //e, //c, or IIgs"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple ][+, //e, //c, or IIgs.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tDouble hi-res mode requires a 128K Apple //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It uses double hi-res graphics and thus requires a 128K Apple //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It uses double hi-res graphics and thus requires a' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t128K Apple //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"This version, using double hi-res graphics, requires a 128K Apple //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- This version, using double hi-res graphics,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\trequires a 128K Apple //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 13-sector drive but otherwise runs on any Apple II with 48K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 13-sector drive but otherwise' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\truns on any Apple II with 48K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 13-sector drive but otherwise runs on any Apple II with 32K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 13-sector drive but otherwise' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\truns on any Apple II with 32K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 13-sector drive but otherwise runs on any Apple II with 24K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 13-sector drive but otherwise' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\truns on any Apple II with 24K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 13-sector drive but otherwise runs on any Apple II with 16K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 13-sector drive but otherwise' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\truns on any Apple II with 16K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 13-sector drive and a 48K Apple ][+ or later"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 13-sector drive and a 48K Apple II+ or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an original Apple II with 48K and Integer BASIC in ROM."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an original Apple II with 48K and Integer BASIC' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tin ROM. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple II and Integer BASIC in ROM"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an original Apple II with 48K and Integer BASIC' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tin ROM. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an original Apple II with 32K and Integer BASIC in ROM."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an original Apple II with 32K and Integer BASIC' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tin ROM. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 64K Apple ][+ or later. Double hi-res graphics are available on 128K Apple //e and later"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple ][+ or later.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tDouble hi-res graphics are available on 128K Apple //e and later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"This re-release requires a 64K Apple ][+ or later; the optional double hi-res graphics mode requires a 128K Apple //e or later. "*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- This re-release requires a 64K Apple II+ or later;' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tthe optional double hi-res graphics mode requires a' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t128K Apple //e or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"The disk label says it requires a 32K Apple II, but under emulation I could not get it to work on less than a 48K Apple II+"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- The disk label says it requires a 32K Apple II, but under' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\temulation I could not get it to work on less than a 48K Apple II+. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an Apple ][ with an Integer BASIC ROM and at least 32K. Due to the reliance on Integer ROM, it will not run on later models"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an Apple II with Integer BASIC ROM and at least 32K.' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tDue to reliance on Integer ROM, it will not run on later models. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"Due to its use of double hi-res graphics, it requires a 128K Apple //e or later"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- Due to its use of double hi-res graphics,' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\tit requires a 128K Apple //e or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an Apple IIgs, Apple //c+, or a 128K Apple //e with a compatible drive controller card."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an Apple IIgs, Apple //c+, or a 128K Apple //e with a compatible drive controller card. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an Apple IIgs, Apple //c+, Apple //e, or 64K Apple ][+ with a compatible drive controller card."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an Apple IIgs, Apple //c+, Apple //e, or 64K Apple ][+ with a compatible drive controller card. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an Apple IIgs, Apple //c+, or 128K Apple //e with a compatible drive controller card."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an Apple IIgs, Apple //c+, or 128K Apple //e with a compatible drive controller card. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1.5MB Apple IIgs ROM 01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1.5MB Apple IIgs ROM 01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1.25MB Apple IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1.25MB Apple IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1.25M Apple IIgs ROM 00 or ROM 01."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1.25M Apple IIgs ROM 00 or ROM 01. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1.25MB Apple IIgs ROM 01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1.25MB Apple IIgs ROM 01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1MB Apple IIgs. (Music requires 1.25MB.)"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1MB Apple IIgs. (Music requires 1.25MB.) -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1MB Apple IIgs ROM01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1MB Apple IIgs ROM01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1MB Apple IIgs ROM 01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1MB Apple IIgs ROM 01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1 MB Apple IIgs ROM 01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1 MB Apple IIgs ROM 01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1MB Apple IIgs ROM 01 or later"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1MB Apple IIgs ROM 01 or later -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 768K Apple IIgs ROM 01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 768K Apple IIgs ROM 01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 768K Apple IIgs ROM01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 768K Apple IIgs ROM01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 768K Apple IIgs ROM 00 or ROM 01."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 768K Apple IIgs ROM 00 or ROM 01. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 512K Apple IIgs ROM 01. Music requires 768K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 512K Apple IIgs ROM 01. Music requires 768K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 512K Apple IIgs ROM 01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 512K Apple IIgs ROM 01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 512K Apple IIgs ROM01 or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 512K Apple IIgs ROM01 or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 512K Apple IIgs ROM 00 or ROM 01."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 512K Apple IIgs ROM 00 or ROM 01. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 512K Apple IIgs ROM 01 or earlier."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 512K Apple IIgs ROM 01 or earlier. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 256K Apple IIgs ROM 01."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 256K Apple IIgs ROM 01. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 256K Apple IIgs ROM 01 or earlier."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 256K Apple IIgs ROM 01 or earlier. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
# Normal section -------------------------------------------------
*"It requires a Apple IIgs, //c+, or 128K //e with a compatible drive controller card."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a Apple IIgs, //c+, or 128K //e with a compatible drive controller card. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 1MB Apple IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 1MB Apple IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 768K Apple IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 768K Apple IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 512K Apple IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 512K Apple IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 256K Apple IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 256K Apple IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 64K Apple II+, //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple II+, //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 64K Apple ][+, //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple II+, //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple ][+, //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple II+, //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on an Apple //e with 128K, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on an Apple //e with 128K, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requirs a 64K Apple ][+ or later"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple ][+ or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 64K Apple //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple //e or later"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple //e or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple II."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple II with 64K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 64K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple II with 32K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 32K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple ][ with 32K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 32K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any 48K Apple ][+, //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any 48K Apple II+, //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It run on any Apple II with 48K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 48K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an Apple II+ with 48K"*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an Apple II+ with 48K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an Apple ][+ with 48K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires an Apple II+ with 48K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an 64K Apple ][+ or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple II+ or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple ][ model with 48K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 48K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple II with 48K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 48K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It runs on any Apple ][ with 48K."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It runs on any Apple II with 48K. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 48K Apple ][+ or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 48K Apple II+ or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires an 64K Apple ][+, //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple II+ or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 64K Apple ][+ or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple II+ or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires 64K Apple ][+ or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple II+ or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 64K Apple //e or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 64K Apple //e or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 128K Apple //e or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 128K Apple //e or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires 128K Apple //e or later."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 128K Apple //e or later. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*"It requires a 128K Apple //e, //c, or IIgs."*)
echo -e '\t\t<sharedfeat name="compatibility" value="A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- It requires a 128K Apple //e, //c, or IIgs. -->' >>../xml/"$worktype"disk/disk$1.xml
;;
*)
# Fallthrough: We don't know what the compatibility is.
# Output something we can easily regex to find these
echo -e '\t\t<sharedfeat name="compatibility" value="A2,A2P,A2E,A2EE,A2C,A2GS" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t<!-- -=-=UNKNOWNCOMPATIBILITY=-=- -->' >>../xml/"$worktype"disk/disk$1.xml
;;
esac
;;
esac
# Obviously you'll need to hand-tweak the compatibility because there's a lot of ways this can be described...
# We'll include the full description text as a comment, as eventually we need to get other metadata like authors
# We use perl to normalize the text since it uses escaping.
echo -e -n '\t\t<!--' >>../xml/"$worktype"disk/disk$1.xml
xmllint --xpath 'metadata/description/text()' $filename | perl -Mopen=locale -pe 's/&#x([\da-f]+);/chr hex $1/gie' | tr -d '\n' >>../xml/"$worktype"disk/disk$1.xml
echo -e -n '-->\n\n' >>../xml/"$worktype"disk/disk$1.xml
done
# Now we start working on the disk images. Here's where things get a LITTLE
# hairy. We need both the proper (possibly bad)-cased filename for tools, but
# we need a forced lowercase for the XML.
# clrmamepro/romcenter will rename files automagically so the the files are
# fine as-is; we won't be renaming.
disknum=1
for filename in *.woz *.po *.dsk *.2mg; do
[ -e "$filename" ] || continue
# Here we're generating a forced lowercase version of the name which we'll
# use in some places in the XML. We also strip invalid characters as well
# as double spaces.
lfilename=$(echo "$filename" | tr '[:upper:]' '[:lower:]' | sed 's/\!/ /g' | sed 's/\ / /g' | sed 's/\&/and/g')
echo -e "$worktype: [$disknum] '$lfilename'"
# Critical: Check for SHA1 dupes.
# Generate the SHA1 and put it in temp.
sha1sum $filename | awk '{print $1}' >temp
# If we got a dupe, put it in temp2, otherwise leave a 0-byte file.
# We'll use that a little later.
grep -a -i -F -n -R -f temp ~/projects/mame/mame-softlists/hash/apple*_flop*.xml >temp2
if [[ -s temp2 ]]; then
echo "dupe" >dupecheck
fi
# Start outputting disk information.
echo -e -n '\t\t<part name="flop' >>../xml/"$worktype"disk/disk$1.xml
echo -e -n $disknum >>../xml/"$worktype"disk/disk$1.xml
echo -e -n '" interface="' >>../xml/"$worktype"disk/disk$1.xml
# Now, is this a 5.25" or 3.5"?
# In the case of a .po it could technicaly be either, but...
case $lfilename in
*".po"*)
echo -e 'floppy_3_5">' >>../xml/"$worktype"disk/disk$1.xml
;;
*".dsk"*)
echo -e 'floppy_5_25">' >>../xml/"$worktype"disk/disk$1.xml
;;
*".2mg"*)
echo -e 'floppy_3_5">' >>../xml/"$worktype"disk/disk$1.xml
;;
*".woz"*)
# WOZ can be either. We need to pull the data from the WOZ itself.
# According to the WOZ 2.0 spec, certain info is always hard-set
# location-wise to help lower-end emulators.
# The disk type should ALWAYS be at offset 21, and
# should be "1" for 5.25" and "2" for 3.5" disks.
disktype=$((16#$(xxd -e -p -l1 -s 21 "$filename")))
case $disktype in
*"2"*)
echo -e 'floppy_3_5">' >>../xml/"$worktype"disk/disk$1.xml
;;
*)
echo -e 'floppy_5_25">' >>../xml/"$worktype"disk/disk$1.xml
;;
esac
;;
esac
# Generate side/disk number information.
case $lfilename in
# Special cases. Will add as they come up.
*"side 2 (boot)."*)
echo -e '\t\t\t<feature name="part_id" value="Side 2 - Boot"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side a - master scenario disk."*)
echo -e '\t\t\t<feature name="part_id" value="Side A - Master scenario disk"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side b - boot."*)
echo -e '\t\t\t<feature name="part_id" value="Side B - Boot disk"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side a - master disk."*)
echo -e '\t\t\t<feature name="part_id" value="Side A - Master disk"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side b - scenario disk."*)
echo -e '\t\t\t<feature name="part_id" value="Side B - Boot disk"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# These two don't get a disk number because we don't know for sure what it should be.
# We use "-=-=UNKNOWNDISK=-=-" so we can easily regex it out of the whole file.
*"- program disk."*)
echo -e '\t\t\t<feature name="part_id" value="-=-=UNKNOWNDISK=-=- - Program disk"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"- player disk."*)
echo -e '\t\t\t<feature name="part_id" value=""-=-=UNKNOWNDISK=-=- - Player disk"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# "Disk X - Title" type
# e.g. "swordthrust (4am crack) disk 1 - the king's testing ground.dsk"
*"disk 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 11 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 11 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 12 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 12 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 13 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 13 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 14 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 14 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 15 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 15 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 16 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 16 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 17 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 17 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 18 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 18 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 19 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 19 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 20 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 20 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 21 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 21 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 22 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 22 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 23 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 23 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 24 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 24 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 25 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 25 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 26 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 26 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 27 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 27 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 28 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 28 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 29 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 29 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 30 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 30 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# "Disk X" type
# e.g. "read and spell - in the days of knights and castles (4am crack) disk 1.dsk"
*"disk 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# "Disk X Side X" type
# e.g. "read n roll (4am crack) disk 2 side a.dsk"
*"disk 1 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 1 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10 side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10 side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# "Disk X" type
*"disk a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk c."*)
echo -e '\t\t\t<feature name="part_id" value="Disk C"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk d."*)
echo -e '\t\t\t<feature name="part_id" value="Disk D"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk e."*)
echo -e '\t\t\t<feature name="part_id" value="Disk E"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk f."*)
echo -e '\t\t\t<feature name="part_id" value="Disk F"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk g."*)
echo -e '\t\t\t<feature name="part_id" value="Disk G"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk h."*)
echo -e '\t\t\t<feature name="part_id" value="Disk H"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk i."*)
echo -e '\t\t\t<feature name="part_id" value="Disk I"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk j."*)
echo -e '\t\t\t<feature name="part_id" value="Disk J"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk k."*)
echo -e '\t\t\t<feature name="part_id" value="Disk K"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk l."*)
echo -e '\t\t\t<feature name="part_id" value="Disk L"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk m."*)
echo -e '\t\t\t<feature name="part_id" value="Disk M"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk n."*)
echo -e '\t\t\t<feature name="part_id" value="Disk N"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk o."*)
echo -e '\t\t\t<feature name="part_id" value="Disk O"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk p."*)
echo -e '\t\t\t<feature name="part_id" value="Disk P"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk q."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Q"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk r."*)
echo -e '\t\t\t<feature name="part_id" value="Disk R"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk s."*)
echo -e '\t\t\t<feature name="part_id" value="Disk S"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk t."*)
echo -e '\t\t\t<feature name="part_id" value="Disk T"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk u."*)
echo -e '\t\t\t<feature name="part_id" value="Disk U"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk v."*)
echo -e '\t\t\t<feature name="part_id" value="Disk V"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk w."*)
echo -e '\t\t\t<feature name="part_id" value="Disk W"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk x."*)
echo -e '\t\t\t<feature name="part_id" value="Disk X"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk y."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Y"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk z."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Z"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# "Disk X Side A/B" type
*"disk a side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk A Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk a side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk A Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk b side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk B Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk b side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk B Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk c side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk C Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk c side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk C Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk d side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk D Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk d side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk D Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk e side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk E Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk e side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk E Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk f side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk F Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk f side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk F Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk g side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk G Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk g side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk G Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk h side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk H Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk h side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk H Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk i side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk I Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk i side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk I Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk j side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk J Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk j side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk J Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk k side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk K Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk k side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk K Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk l side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk L Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk l side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk L Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk m side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk M Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk m side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk M Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk n side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk N Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk n side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk N Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk o side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk O Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk o side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk O Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk p side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk P Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk p side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk P Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk q side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Q Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk q side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Q Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk r side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk R Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk r side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk R Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk s side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk S Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk s side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk S Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk t side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk T Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk t side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk T Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk u side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk U Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk u side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk U Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk v side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk V Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk v side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk V Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk w side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk W Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk w side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk W Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk x side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk X Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk x side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk X Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk y side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Y Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk y side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Y Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk z side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Z Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk z side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Z Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# "Disk X Side X" type
# e.g. "the perfect score (4am crack) disk a side 1 - antonyms i.dsk"
*"disk a side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk A Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk a side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk A Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk b side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk B Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk b side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk B Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk c side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk C Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk c side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk C Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk d side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk D Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk d side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk D Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk e side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk E Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk e side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk E Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk f side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk F Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk f side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk F Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk g side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk G Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk g side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk G Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk h side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk H Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk h side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk H Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk i side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk I Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk i side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk I Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk j side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk J Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk j side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk J Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk k side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk K Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk k side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk K Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk l side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk L Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk l side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk L Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk m side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk M Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk m side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk M Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk n side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk N Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk n side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk N Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk o side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk O Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk o side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk O Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk p side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk P Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk p side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk P Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk q side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk Q Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk q side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk Q Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk r side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk R Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk r side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk R Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk s side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk S Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk s side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk S Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk t side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk T Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk t side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk T Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk u side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk U Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk u side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk U Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk v side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk V Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk v side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk V Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk w side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk W Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk w side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk W Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk x side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk X Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk x side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk X Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk y side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk Y Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk y side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk Y Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk z side 1 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk Z Side 1 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk z side 2 -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk Z Side 2 - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk a side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk A Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk a side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk A Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk b side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk B Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk b side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk B Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk c side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk C Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk c side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk C Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk d side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk D Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk d side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk D Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk e side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk E Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk e side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk E Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk f side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk F Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk f side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk F Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk g side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk G Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk g side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk G Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk h side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk H Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk h side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk H Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk i side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk I Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk i side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk I Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk j side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk J Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk j side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk J Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk k side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk K Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk k side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk K Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk l side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk L Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk l side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk L Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk m side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk M Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk m side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk M Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk n side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk N Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk n side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk N Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk o side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk O Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk o side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk O Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk p side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk P Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk p side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk P Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk q side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Q Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk q side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Q Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk r side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk R Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk r side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk R Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk s side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk S Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk s side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk S Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk t side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk T Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk t side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk T Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk u side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk U Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk u side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk U Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk v side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk V Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk v side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk V Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk w side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk W Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk w side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk W Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk x side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk X Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk x side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk X Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk y side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Y Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk y side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Y Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk z side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Z Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk z side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Disk Z Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# "Disk X Side X - Title" type
# e.g. "superprint (4am crack) disk 1 side a - program.dsk"
*"disk 1 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 1 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10 side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10 side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# Disk NumberSide.
# e.g. "voyage of the mimi (4am crack) disk 1a.dsk"
*"disk 1a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 1b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 1, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 1, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 1 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 2, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 2 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 3, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 3 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 4, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 4 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 5, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 5 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 6, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 6 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 7, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 7 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 8, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 8 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 9, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 9 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10, side a."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"disk 10, side b."*)
echo -e '\t\t\t<feature name="part_id" value="Disk 10 Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# Because case is a fallthrough, it only ever gets here if it fails to match
# one of the above entries.
# "Side X - Title" type
# e.g. "the bard's tale ii (4am and san inc crack) side a - program.dsk"
*"side a -"*)
echo -e '\t\t\t<feature name="part_id" value="Side A - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side b -"*)
echo -e '\t\t\t<feature name="part_id" value="Side B - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side c -"*)
echo -e '\t\t\t<feature name="part_id" value="Side C - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side d -"*)
echo -e '\t\t\t<feature name="part_id" value="Side D - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side e -"*)
echo -e '\t\t\t<feature name="part_id" value="Side E - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side f -"*)
echo -e '\t\t\t<feature name="part_id" value="Side F - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side g -"*)
echo -e '\t\t\t<feature name="part_id" value="Side G - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side h -"*)
echo -e '\t\t\t<feature name="part_id" value="Side H - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side i -"*)
echo -e '\t\t\t<feature name="part_id" value="Side I - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side j -"*)
echo -e '\t\t\t<feature name="part_id" value="Side J - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side k -"*)
echo -e '\t\t\t<feature name="part_id" value="Side K - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side l -"*)
echo -e '\t\t\t<feature name="part_id" value="Side L - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side m -"*)
echo -e '\t\t\t<feature name="part_id" value="Side M - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side n -"*)
echo -e '\t\t\t<feature name="part_id" value="Side N - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side o -"*)
echo -e '\t\t\t<feature name="part_id" value="Side O - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side p -"*)
echo -e '\t\t\t<feature name="part_id" value="Side P - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side q -"*)
echo -e '\t\t\t<feature name="part_id" value="Side Q - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side r -"*)
echo -e '\t\t\t<feature name="part_id" value="Side R - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side s -"*)
echo -e '\t\t\t<feature name="part_id" value="Side S - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side t -"*)
echo -e '\t\t\t<feature name="part_id" value="Side T - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side u -"*)
echo -e '\t\t\t<feature name="part_id" value="Side U - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side v -"*)
echo -e '\t\t\t<feature name="part_id" value="Side V - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side w -"*)
echo -e '\t\t\t<feature name="part_id" value="Side W - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side x -"*)
echo -e '\t\t\t<feature name="part_id" value="Side X - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side y -"*)
echo -e '\t\t\t<feature name="part_id" value="Side Y - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side z -"*)
echo -e '\t\t\t<feature name="part_id" value="Side Z - "/>' >>../xml/"$worktype"disk/disk$1.xml
;;
# "Side X" type
# e.g. "reading and me (4am crack) side a.dsk"
*"side a."*)
echo -e '\t\t\t<feature name="part_id" value="Side A"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side b."*)
echo -e '\t\t\t<feature name="part_id" value="Side B"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side c."*)
echo -e '\t\t\t<feature name="part_id" value="Side C"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side d."*)
echo -e '\t\t\t<feature name="part_id" value="Side D"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side e."*)
echo -e '\t\t\t<feature name="part_id" value="Side E"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side f."*)
echo -e '\t\t\t<feature name="part_id" value="Side F"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side g."*)
echo -e '\t\t\t<feature name="part_id" value="Side G"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side h."*)
echo -e '\t\t\t<feature name="part_id" value="Side H"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side i."*)
echo -e '\t\t\t<feature name="part_id" value="Side I"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side j."*)
echo -e '\t\t\t<feature name="part_id" value="Side J"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side k."*)
echo -e '\t\t\t<feature name="part_id" value="Side K"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side l."*)
echo -e '\t\t\t<feature name="part_id" value="Side L"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side m."*)
echo -e '\t\t\t<feature name="part_id" value="Side M"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side n."*)
echo -e '\t\t\t<feature name="part_id" value="Side N"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side o."*)
echo -e '\t\t\t<feature name="part_id" value="Side O"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side p."*)
echo -e '\t\t\t<feature name="part_id" value="Side P"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side q."*)
echo -e '\t\t\t<feature name="part_id" value="Side Q"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side r."*)
echo -e '\t\t\t<feature name="part_id" value="Side R"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side s."*)
echo -e '\t\t\t<feature name="part_id" value="Side S"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side t."*)
echo -e '\t\t\t<feature name="part_id" value="Side T"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side u."*)
echo -e '\t\t\t<feature name="part_id" value="Side U"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side v."*)
echo -e '\t\t\t<feature name="part_id" value="Side V"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side w."*)
echo -e '\t\t\t<feature name="part_id" value="Side W"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side x."*)
echo -e '\t\t\t<feature name="part_id" value="Side X"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side y."*)
echo -e '\t\t\t<feature name="part_id" value="Side Y"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side z."*)
echo -e '\t\t\t<feature name="part_id" value="Side Z"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 1."*)
echo -e '\t\t\t<feature name="part_id" value="Side 1"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 2."*)
echo -e '\t\t\t<feature name="part_id" value="Side 2"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 3."*)
echo -e '\t\t\t<feature name="part_id" value="Side 3"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 4."*)
echo -e '\t\t\t<feature name="part_id" value="Side 4"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 5."*)
echo -e '\t\t\t<feature name="part_id" value="Side 5"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 6."*)
echo -e '\t\t\t<feature name="part_id" value="Side 6"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 7."*)
echo -e '\t\t\t<feature name="part_id" value="Side 7"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 8."*)
echo -e '\t\t\t<feature name="part_id" value="Side 8"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 9."*)
echo -e '\t\t\t<feature name="part_id" value="Side 9"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
*"side 10."*)
echo -e '\t\t\t<feature name="part_id" value="Side 10"/>' >>../xml/"$worktype"disk/disk$1.xml
;;
esac
# Give us the actual floppy definition, including size.
echo -e -n '\t\t\t<dataarea name="flop" size="' >>../xml/"$worktype"disk/disk$1.xml
wc "$filename" | awk '{print $3}' | tr -d '\n' >>../xml/"$worktype"disk/disk$1.xml
echo -e '">' >>../xml/"$worktype"disk/disk$1.xml
echo -e -n '\t\t\t\t<rom name="' >>../xml/"$worktype"disk/disk$1.xml
# BUT! Give us the lowercase filename in the XML definition.
echo -e -n $lfilename >>../xml/"$worktype"disk/disk$1.xml
# As always, tools use case of what's there.
echo -e -n '" size="' >>../xml/"$worktype"disk/disk$1.xml
wc "$filename" | awk '{print $3}' | tr -d '\n' >>../xml/"$worktype"disk/disk$1.xml
echo -e -n '" crc="' >>../xml/"$worktype"disk/disk$1.xml
crc32 $filename | awk '{print $1}' | tr -d '\n' >>../xml/"$worktype"disk/disk$1.xml
echo -e -n '" sha1="' >>../xml/"$worktype"disk/disk$1.xml
sha1sum $filename | awk '{print $1}' | tr -d '\n' >>../xml/"$worktype"disk/disk$1.xml
echo -e '" />' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t\t</dataarea>' >>../xml/"$worktype"disk/disk$1.xml
echo -e '\t\t</part>' >>../xml/"$worktype"disk/disk$1.xml
((disknum++))
# One more sanity check to do. If there is a cracker tag that's not
# solo 4am, then we need to put up a warning so we notice and give
# proper credit.
if grep -q "(4am and san inc crack)" ../xml/"$worktype"disk/disk$1.xml; then
echo -e '\t\t<!-- !!!!!!!!!! 4am and San crack !!!!!!!!!! -->' >>../xml/"$worktype"disk/disk$1.xml
fi
if grep -q "(logo crack)" ../xml/"$worktype"disk/disk$1.xml; then
echo -e '\t\t<!-- !!!!!!!!!! LoGo crack !!!!!!!!!! -->' >>../xml/"$worktype"disk/disk$1.xml
fi
done
echo -e '\t</software>\n' >>../xml/"$worktype"disk/disk$1.xml
# Clean out any wozaday collection tags.
# Change any crack tags to cleanly cracked.
sed -i 's/(4am crack)<\/description>/(cleanly cracked)<\/description>/g' ../xml/"$worktype"disk/disk$1.xml
sed -i 's/(san inc crack)<\/description>/(cleanly cracked)<\/description>/g' ../xml/"$worktype"disk/disk$1.xml
sed -i 's/(4am and san inc crack)<\/description>/(cleanly cracked)<\/description>/g' ../xml/"$worktype"disk/disk$1.xml
sed -i 's/ 800K / (800K 3.5") /g' ../xml/"$worktype"disk/disk$1.xml
sed -i 's/ (woz-a-day collection)<\/description>/<\/description>/g' ../xml/"$worktype"disk/disk$1.xml
sed -i 's/ 800K / (800K 3.5") /g' ../xml/"$worktype"disk/disk$1.xml
# Detect if we didn't get a publisher and add a warning notification.
sed -i 's/<publisher><\/publisher>/<publisher>-=-=UNKNOWNPUBLISHER=-=-<\/publisher>/g' ../xml/"$worktype"disk/disk$1.xml
# Clean up any "&" to "and"
sed -i 's/&/and/g' ../xml/"$worktype"disk/disk$1.xml
# If the dupecheck file above says this already exists in our MAME softlists,
# then all this work was a waste and we need to delete the XML now.
if [[ -s dupecheck ]]; then
rm ../xml/"$worktype"disk/disk$1.xml
# We found a dupe, so there's no point in continuing.
cd .. || exit
if [ $worktype == "WOZADAY" ]; then
if [ $1 == "1" ]; then
echo -e "$worktype: No new entries were made! -----------------------------------------------------------------"
else
echo -e "$worktype: $1 entries generated"
fi
fi
if [ $worktype == "CLCRACKED" ]; then
if [ $1 == "1" ]; then
echo -e "$worktype: No new entries were made! ---------------------------------------------------------------"
else
echo -e "$worktype: $1 entries generated"
fi
fi
return 1
fi
# Migrate all non-duplicate disk images to the postsorted folder for later
# parsing so we can be 100% sure the XML is correct even after mame -valid
mv ./*.woz ../postsorted 2>/dev/null
mv ./*.dsk ../postsorted 2>/dev/null
mv ./*.2mg ../postsorted 2>/dev/null
mv ./*.woz ../postsorted 2>/dev/null
cd .. || exit
}
function aggregate() {
cd xml/"$worktype"disk || exit
cat ../../xmlheader.txt >../"$worktype"disk-combined-presort.xml
cat disk*.xml >>../"$worktype"disk-combined-presort.xml 2>/dev/null
cat ../../xmlfooter.txt >>../"$worktype"disk-combined-presort.xml
# This last step sorts the entries to be in release order so you can cut and paste
# them into the MAME XML as-is.
# Because the xsltproc process malforms the XML slightly, we'll use sed to fix.
case ${1} in
"WOZADAY")
xsltproc --novalid --nodtdattr -o ../woz-combined.xml ../../resortxml.xslt ../"$worktype"disk-combined-presort.xml
sed -i 's/<software name="ia2slnewnamehere">/\t<software name="ia2slnewnamehere">/g' ../woz-combined.xml
;;
"CLCRACKED")
xsltproc --novalid --nodtdattr -o ../cc-combined.xml ../../resortxml.xslt ../"$worktype"disk-combined-presort.xml
sed -i 's/<software name="ia2slnewnamehere">/\t<software name="ia2slnewnamehere">/g' ../cc-combined.xml
;;
esac
cd ../.. || exit
IFS=$SAVEIFS
return 0
}
# Now our main loop.
# Introduce ourselves!
echo -e "IA2SL 2022-04-16 r1 by Firehawke\n"
# First thing's first, make sure we picked a type to work with.
if [ $# -eq 0 ]; then
echo -e "This tool generates preliminary (needs editing for full compliance) software lists by polling Internet Archive"
echo -e "for the latest WOZ and/or cleanly cracked Apple II disks provided by 4am.\n"
echo "Usage:"
echo "No Apple disk type supplied. Please run as either:"
echo "$0 clcracked"
echo "$0 wozaday"
echo "or"
echo "$0 both (this parallelizes both)"
exit 1
fi
# We save the type of workload in this variable because we'll lose $1
# when we go into the functions that actually do the work
worktype=${1^^}
# Remove and recreate certain work directories so they're clean.
case ${worktype} in
"BOTH")
mkdir postsorted 2>/dev/null
rm -rf xml/WOZADAYdisk 2>/dev/null
mkdir xml 2>/dev/null
mkdir xml/WOZADAYdisk 2>/dev/null
rm -rf xml/CLCRACKEDdisk 2>/dev/null
mkdir xml 2>/dev/null
mkdir xml/CLCRACKEDdisk 2>/dev/null
;;
"WOZADAY")
# fall through to clcracked since both use same logic.
;&
"CLCRACKED")
rm -rf xml/"$worktype"disk 2>/dev/null
mkdir xml 2>/dev/null
mkdir xml/"$worktype"disk 2>/dev/null
mkdir postsorted 2>/dev/null
;;
esac
# While I could have this in the case above, it's separated so that the
# logic is obviously separate to the eyes.
# Do cleanup of workspace first.
mkdir postsorted 2>/dev/null
rm -rf xml/WOZADAYdisk 2>/dev/null
mkdir xml 2>/dev/null
mkdir xml/WOZADAYdisk 2>/dev/null
rm -rf xml/CLCRACKEDdisk 2>/dev/null
mkdir xml 2>/dev/null
mkdir xml/CLCRACKEDdisk 2>/dev/null
# Depending on which type we do, the loop needs to be changed; singles
# run as usual, both needs parallelization.
case ${worktype} in
"BOTH")
echo -e "IA2SL: WOZADAY and CLCRACKED"
startcycle WOZADAY &
startcycle CLCRACKED &
wait
;;
"WOZADAY")
# fall through to clcracked since both use same logic.
;&
"CLCRACKED")
# Because we're using a single type, we can pass $worktype
echo -e "IA2SL: $worktype"
startcycle $worktype
;;
esac
IFS=$SAVEIFS
exit 0
| true |
6eefb365200351e78d6e29ccccab2cf235bbaf27 | Shell | intel-analytics/BigDL | /python/dev/add_suffix_spark3.sh | UTF-8 | 4,131 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is the script to add spark3 suffix or change spark2 suffix to spark3
# for all project names and dependencies.
# Add name=, ==, - and $ in pattern matching so that if the script runs twice,
# it won't change anything in the second run.
file=$1
sed -i "s/name='bigdl'/name='bigdl-spark3'/g" ${file}
sed -i "s/name='bigdl-spark2'/name='bigdl-spark3'/g" ${file}
sed -i "s/bigdl-dllib==/bigdl-dllib-spark3==/g" ${file}
sed -i "s/bigdl-dllib-spark2==/bigdl-dllib-spark3==/g" ${file}
sed -i "s/name='bigdl-dllib'/name='bigdl-dllib-spark3'/g" ${file}
sed -i "s/name='bigdl-dllib-spark2'/name='bigdl-dllib-spark3'/g" ${file}
sed -i "s/dist\/bigdl_dllib-/dist\/bigdl_dllib_spark3-/g" ${file}
sed -i "s/dist\/bigdl_dllib_spark2-/dist\/bigdl_dllib_spark3-/g" ${file}
sed -i "s/dist\/bigdl-dllib-\\$/dist\/bigdl-dllib-spark3-\\$/g" ${file}
sed -i "s/dist\/bigdl-dllib-spark2-\\$/dist\/bigdl-dllib-spark3-\\$/g" ${file}
sed -i "s/bigdl_dllib.egg-info/bigdl_dllib_spark3.egg-info/g" ${file}
sed -i "s/bigdl_dllib_spark2.egg-info/bigdl_dllib_spark3.egg-info/g" ${file}
sed -i "s/bigdl-orca==/bigdl-orca-spark3==/g" ${file}
sed -i "s/bigdl-orca-spark2==/bigdl-orca-spark3==/g" ${file}
sed -i "s/name='bigdl-orca'/name='bigdl-orca-spark3'/g" ${file}
sed -i "s/name='bigdl-orca-spark2'/name='bigdl-orca-spark3'/g" ${file}
sed -i "s/dist\/bigdl_orca-/dist\/bigdl_orca_spark3-/g" ${file}
sed -i "s/dist\/bigdl_orca_spark2-/dist\/bigdl_orca_spark3-/g" ${file}
sed -i "s/dist\/bigdl-orca-\\$/dist\/bigdl-orca-spark3-\\$/g" ${file}
sed -i "s/dist\/bigdl-orca-spark2-\\$/dist\/bigdl-orca-spark3-\\$/g" ${file}
sed -i "s/bigdl_orca.egg-info/bigdl_orca_spark3.egg-info/g" ${file}
sed -i "s/bigdl_orca_spark2.egg-info/bigdl_orca_spark3.egg-info/g" ${file}
sed -i "s/bigdl-orca\[ray\]/bigdl-orca-spark3\[ray\]/g" ${file}
sed -i "s/bigdl-orca-spark2\[ray\]/bigdl-orca-spark3\[ray\]/g" ${file}
sed -i "s/bigdl-chronos==/bigdl-chronos-spark3==/g" ${file}
sed -i "s/bigdl-chronos-spark2==/bigdl-chronos-spark3==/g" ${file}
sed -i "s/name='bigdl-chronos'/name='bigdl-chronos-spark3'/g" ${file}
sed -i "s/name='bigdl-chronos-spark2'/name='bigdl-chronos-spark3'/g" ${file}
sed -i "s/bigdl-orca\[automl\]==/bigdl-orca-spark3\[automl\]==/g" ${file}
sed -i "s/bigdl-orca-spark2\[automl\]==/bigdl-orca-spark3\[automl\]==/g" ${file}
sed -i "s/dist\/bigdl_chronos-/dist\/bigdl_chronos_spark3-/g" ${file}
sed -i "s/dist\/bigdl_chronos_spark2-/dist\/bigdl_chronos_spark3-/g" ${file}
sed -i "s/dist\/bigdl-chronos-\\$/dist\/bigdl-chronos-spark3-\\$/g" ${file}
sed -i "s/dist\/bigdl-chronos-spark2-\\$/dist\/bigdl-chronos-spark3-\\$/g" ${file}
sed -i "s/bigdl_chronos.egg-info/bigdl_chronos_spark3.egg-info/g" ${file}
sed -i "s/bigdl_chronos_spark2.egg-info/bigdl_chronos_spark3.egg-info/g" ${file}
sed -i "s/bigdl-friesian==/bigdl-friesian-spark3==/g" ${file}
sed -i "s/bigdl-friesian-spark2==/bigdl-friesian-spark3==/g" ${file}
sed -i "s/name='bigdl-friesian'/name='bigdl-friesian-spark3'/g" ${file}
sed -i "s/name='bigdl-friesian-spark2'/name='bigdl-friesian-spark3'/g" ${file}
sed -i "s/dist\/bigdl_friesian-/dist\/bigdl_friesian_spark3-/g" ${file}
sed -i "s/dist\/bigdl_friesian_spark2-/dist\/bigdl_friesian_spark3-/g" ${file}
sed -i "s/dist\/bigdl-friesian-\\$/dist\/bigdl-friesian-spark3-\\$/g" ${file}
sed -i "s/dist\/bigdl-friesian-spark2-\\$/dist\/bigdl-friesian-spark3-\\$/g" ${file}
sed -i "s/bigdl_friesian.egg-info/bigdl_friesian_spark3.egg-info/g" ${file}
sed -i "s/bigdl_friesian_spark2.egg-info/bigdl_friesian_spark3.egg-info/g" ${file}
| true |
eea0ebe9df814a499c7889061704d1c149bc7a1d | Shell | Blivrig/alex | /lb-core-env/lb/live/script/00-package.sh | UTF-8 | 3,105 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env bash
cur_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $cur_dir/_variable.sh
rcVersion=`date "+%Y%m%d%H%M%S"`
dubbo_version=`date "+%m%d%H"`
#将此dubbo_version的变量写入_variable.sh脚本中
sed -i "2c dubbo_version=${dubbo_version}" $cur_dir/_variable.sh
echo $dubbo_version > /var/lb/dubbo_version/dubbo_version.txt
# -------------------------------------------------------- main --------------------------------------------------------
echo "
---------------------------------------- 1. 准备应用目录
"
rm -rf $apps_path/release/*
for app in $ALLPKG $RSPKG; do
if [[ -d "$apps_path/work/$app/" ]]; then
# 清空旧目录
rm -rf $apps_path/work/$app/*
else
mkdir -p $apps_path/work/$app/
fi
tree -L 1 "$apps_path/work/$app/"
done
echo "
---------------------------------------- 2. 重组装 app 包
$exp_script_dir/unpackage.py $base_path $project
"
python $exp_script_dir/unpackage.py $base_path $project
echo "
---------------------------------------- 3. 替换配置, 应用全量包打包到 $apps_path/release/
"
for app in $ALLPKG; do
WEBSITE="$apps_path/work/${app}/WEB-INF/classes/conf"
echo "${app}:
$exp_script_dir/conf.sh $WEBSITE $app"
source $exp_script_dir/conf.sh $WEBSITE $app
# 打包到 /release/
echo " ( cd $apps_path/work/$app/ && zip -rq $apps_path/release/${app}.war ./ )"
(
cd $apps_path/work/$app/ && zip -rq $apps_path/release/${app}.war ./
)
done
echo "
---------------------------------------- 4. 特殊资源: $RSPKG
"
for app in $RSPKG; do
echo "---- $app"
# 统一把war放到 $apps_path/release/ (gb)
if [[ -f $apps_path/packages/${app}.war ]]; then
echo "cp $apps_path/packages/${app}.war $apps_path/release/"
cp $apps_path/packages/${app}.war $apps_path/release/
fi
# 统一把war放到 $apps_path/release/ (lb)
if [[ -f $base_path/apps/${project}/pkgs/wars/${app}.war ]]; then
echo \cp -f $base_path/apps/${project}/pkgs/wars/${app}.war $apps_path/release/${app}.war
\cp -f $base_path/apps/${project}/pkgs/wars/${app}.war $apps_path/release/${app}.war
fi
# 从$apps_path/release/ 解压war 到 $apps_path/work/
if [[ -f "$apps_path/release/${app}.war" ]]; then
# ----- 新rcenter基于dubbo version 重解压打包
if [[ "$app" == rcenter ]]; then
echo "mkdir -p $apps_path/work/rcenter/$dubbo_version/rcenter/"
mkdir -p $apps_path/work/rcenter/$dubbo_version/rcenter/
echo "unzip -q $apps_path/release/rcenter.war -d $apps_path/work/rcenter/$dubbo_version/rcenter/"
unzip -q $apps_path/release/rcenter.war -d $apps_path/work/rcenter/$dubbo_version/rcenter/
else
# 其他war包直接解压到work/:
echo "unzip -q $apps_path/release/${app}.war -d $apps_path/work/${app}/"
unzip -q $apps_path/release/${app}.war -d $apps_path/work/${app}/
fi
fi
done
| true |
43bd12bcf7d5cbe931b8dcbe2466faaab1d76957 | Shell | j-san/node-deploy-bot | /tests/run.sh | UTF-8 | 298 | 3.421875 | 3 | [
"MIT"
] | permissive |
export NODE_ENV=test
vagrant sandbox on
echo
for file in $@; do
vagrant sandbox rollback && sleep 5
printf " ${file#test/} "
node $file && echo -e "\033[32m✓\033[0m"
code=$?
if test $code -ne 0; then
echo -e "\033[31m✖\033[0m"
exit $code
fi
done
echo
| true |
249b542b3c6ca04f3b2fd93148ca4619307a95b3 | Shell | Yenreh/Scrip-S.O-2018 | /script.sh | UTF-8 | 419 | 2.859375 | 3 | [] | no_license | #/bin/bash
#
#Cantidad de procesos
procesos=$(ps ax | tail -n +2 | wc -l)
#Porcentaje de espacio libre
espacio=$(df|grep root |awk '{print $5}'| cut -c1-2)
#Porcentaje de memoria libre
memoriaLibre=$(free|grep Memoria | awk '{print $4/$2 * 100 }')
#
#
curl --silent --request POST --header "X-THINGSPEAKAPIKEY: VTN1M33C9MRMXMJT" --data "field1=${procesos}&field2=${espacio}&field3=${memoriaLibre" http://api.thingspeak.com/update
| true |
2e71f7ccefd224f64557bd4bc8664596a3eeeec0 | Shell | esadr/.dotfiles | /bin/notify-severn | UTF-8 | 196 | 2.953125 | 3 | [] | no_license | #!/bin/bash
TARGET="severn"
if [[ $HOST == $TARGET ]]; then
PREFIX=""
else
PREFIX="ssh $TARGET"
fi
NOTIFIER="notify-send"
ARGS="--urgency=critical"
TEXT="$@"
$PREFIX $NOTIFIER $ARGS $TEXT
| true |
122839030373542709b509aa33969acfa37504b8 | Shell | tnakaicode/jburkardt | /pwl_interp_2d/pwl_interp_2d_prb.sh | UTF-8 | 677 | 2.828125 | 3 | [] | no_license | #!/bin/bash
#
g++ -c -I/$HOME/include pwl_interp_2d_prb.cpp
if [ $? -ne 0 ]; then
echo "Errors compiling pwl_interp_2d_prb.cpp"
exit
fi
#
g++ pwl_interp_2d_prb.o /$HOME/libcpp/$ARCH/pwl_interp_2d.o \
/$HOME/libcpp/$ARCH/test_interp_2d.o \
/$HOME/libcpp/$ARCH/r8lib.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking and loading pwl_interp_2d_prb.o."
exit
fi
#
rm pwl_interp_2d_prb.o
#
mv a.out pwl_interp_2d_prb
./pwl_interp_2d_prb > pwl_interp_2d_prb_output.txt
if [ $? -ne 0 ]; then
echo "Errors running pwl_interp_2d_prb."
exit
fi
rm pwl_interp_2d_prb
#
echo "Program output written to pwl_interp_2d_prb_output.txt"
| true |
031ff4be40363afcf4e4bd0a939b9f43d7a977e5 | Shell | ripple/vault-pki-formula | /test/script/common.sh | UTF-8 | 417 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# source $DIR/common.sh
get_eventlisten() {
if [ ! -f /tmp/eventlisten.py ];
then
wget https://raw.githubusercontent.com/saltstack/salt/develop/tests/eventlisten.py -O /tmp/eventlisten.py
fi
}
install_python_deps() {
apt-get install -y libffi-dev libssl-dev python-dev python-pip
pip install PyYAML hvac cryptography
}
| true |
af5be0b7bfa4a3fca1431ed819e4078870484d04 | Shell | schmiedc/pipeline_3.0 | /jobs_alpha_3.1/registration/create-registration-jobs.sh | UTF-8 | 1,575 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source ../../master_3.3.sh
mkdir -p $jobs_registration
for parallel_timepoints in $parallel_timepoints
do
job="$jobs_registration/register-$parallel_timepoints.job"
echo $job
echo "#!/bin/bash" > "$job"
echo "$XVFB_RUN -a $Fiji \
-Dparallel_timepoints=$parallel_timepoints \
-Dimage_file_directory=$image_file_directory \
-Dxml_filename=$hdf5_xml_filename \
-Dreg_process_timepoint=$reg_process_timepoint \
-Dreg_process_channel=$reg_process_channel \
-Dreg_process_illumination=$reg_process_illumination \
-Dreg_process_angle=$reg_process_angle \
-Dchannels=$channels \
-Dreg_processing_channel=$reg_processing_channel \
-Dlabel_interest_points=$label_interest_points \
-Dtype_of_registration=$type_of_registration \
-Dtype_of_detection=$type_of_detection \
-Dsubpixel_localization=$subpixel_localization \
-Dimglib_container=$imglib_container \
-Dreg_radius_1=$reg_radius_1 \
-Dreg_radius_2=$reg_radius_2 \
-Dreg_threshold=$reg_threshold \
-Dinitial_sigma=$initial_sigma \
-Dthreshold_gaussian=$threshold_gaussian \
-Dregistration_algorithm=$registration_algorithm \
-Dreg_interest_points_channel=$reg_interest_points_channel \
-Dfix_tiles=$fix_tiles \
-Dmap_back_tiles=$map_back_tiles \
-Dtransformation_model=$transformation_model \
-Dmodel_to_regularize_with=$model_to_regularize_with \
-Dlambda=$lambda \
-Dallowed_error_for_ransac=$allowed_error_for_ransac \
-Dsignificance=$significance \
-- --no-splash $registration" >> "$job"
chmod a+x "$job"
done
| true |
32c8761a39967ea514f20059ead7dc0c27d3ce2f | Shell | Vinesma/.dotfiles | /waybar/launch.sh | UTF-8 | 461 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env bash
# Main config path
CONFIG_FILE=$HOME/.dotfiles/waybar/config
STYLE_FILE=$HOME/.cache/wal/waybar-style.css
temp_file=/tmp/waybar.tmp
# Terminate already running bar instances
killall -q waybar
while pgrep waybar &> /dev/null; do
sleep 0.3
done
printf "%s\n" "--- LAUNCH waybar ---" | tee -a "$temp_file"
waybar --config "$CONFIG_FILE" --style "$STYLE_FILE" 2>&1 | tee -a "$temp_file" & disown
printf "%s\n" "[Waybar]: Bar launched..." | true |
7d1767f7fc7e953d938ddcb207b9426644c5c1ab | Shell | andrewslotin/dotfiles | /bash/.bin/gota | UTF-8 | 349 | 3 | 3 | [] | no_license | #!/bin/bash
export GOGC=off
echo -e "\033[0;36mChecking for compile-time errors\033[0m"
goba || exit 1
echo -e "\033[0;36mRunning tests\033[0m"
go test $(go list ./... | fgrep -v 'vendor/')
result=$?
if hash ponysay 2> /dev/null; then
if [ $result -eq 0 ]; then
ponysay "All good!"
else
ponysay "Shit's broken"
fi
fi
exit $result
| true |
a346205cbce0cee944ec74c59714017b5f46e6a4 | Shell | awgeezrick/nyrb-covers-analysis | /analyze.sh | UTF-8 | 514 | 3.4375 | 3 | [] | no_license | #!/bin/sh
rm `pwd`/data/analyzed-covers.csv
echo "slug,dominant_cover_color" >> `pwd`/data/analyzed-covers.csv
for filepath in `pwd`/data/quantized-covers/*; do
FILENAME="$(echo "$filepath" | rev | cut -d "/" -f1 | rev)"
TITLE="$(echo $FILENAME | sed -r 's/(\.gif)//g')"
echo "Making detailed histogram for $TITLE"
COLOR="$(convert $filepath -format %c histogram:info:- | sort -r --sort=numeric | head -1 | sed -e 's/.*\(#[0-9A-F]\+\).*/\1/')"
echo "$TITLE,$COLOR" >> `pwd`/data/analyzed-covers.csv
done | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.