blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b7647b7cfad9d6f708361be1fa9f17554cb57e89 | Shell | roza/LP | /administrations de serveurs : shell/Exemples.sh | UTF-8 | 368 | 2.703125 | 3 | [] | no_license | #trouver tout les fichiers .sh du dossier courant
find ./ -type f -name "*.sh"
#afficher les fichiers dans le repertoire courant qui ont ete modifies dans les 3 derniers jours
find ./ -mtime -3 -type f -print
#afficher les fichiers dans le repertoire courant qui ont ete modifies dans les 3 derniers jours etqui sont executable
find ./ -mtime -3 -type f -executable
| true |
12d9de9278f84fb17cc75a60a5c6bdba6bf8139d | Shell | goodmami/norman | /run.sh | UTF-8 | 3,001 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# usage: run.sh <gold-file> <sys-file> [<sys-file>...]
usage() {
cat <<EOF
Usage: run.sh [--help] [--norm|--eval] [-o DIR] GOLD SYS [SYS...]
Options:
-h, --help display this help message
--norm normalize the GOLD and SYS files
--eval evaluate GOLD and SYS
-o DIR output results to DIR/
Arguments:
GOLD path to the gold AMR file
SYS path to a system output
EOF
}
# redefine this as necessary
evaluate() {
python -m smatch --pr -f "$2" "$1"
}
DIR=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
OUT="$DIR"/out
NORM=false
EVAL=false
declare -A NORMS
NORMS[i]="--canonical-role-inversion"
NORMS[r]="--reify maps/reifications.tsv"
NORMS[a]="--reify-attributes"
NORMS[p]="--preserve-structure"
NORMS[c]="--collapse maps/dereifications.tsv"
# these are the keys of tests that are actually run
TESTS=(
'i'
'r'
'a'
'p'
'i r'
'i a'
'i p'
'r a'
'r p'
'a p'
'i r a'
'i r p'
'r a p'
'i r a p'
)
makeopts() { for k in $1; do printf " %s" ${NORMS[$k]}; done; }
suffix() { sed 's/ \+/./g' <<< "$1"; }
while [ $# -gt 0 ]; do
case "$1" in
-h|--help) usage; exit 0 ;;
--norm) NORM=true; shift ;;
--eval) EVAL=true; shift ;;
-o) OUT=$(readlink -f "$2"); shift 2 ;;
-*) usage; echo "Invalid option: $1"; exit 1 ;;
*) break ;;
esac
done
if [ "$NORM" != "true" -a "$EVAL" != "true" ]; then
usage
echo "nothing to do; use --norm or --eval"
exit 0
fi
mkdir -p "$OUT"
if [ "$NORM" == "true" ]; then
for path in "$@"; do
[ -f "$path" ] || { echo "skipping invalid file: $path"; continue; }
f=$(basename "${path%%.txt}")
echo "Normalizing $f" 1>&2
# get clean file without comments
sed '/^#/d' "$path" > "$OUT"/"$f.raw.txt"
# sanity check that the normalizer doesn't change things
# without requested normalizations
python norman.py "$path" > "$OUT"/"$f.norm.txt"
for x in "${TESTS[@]}"; do
suf=$( suffix "$x" )
python norman.py $( makeopts "$x" ) \
"$path" \
> "$OUT/$f.$suf.txt"
done
done
fi
if [ "$EVAL" == "true" ]; then
g=$(basename "${1%%.txt}")
shift # remove gold path from "$@"
for x in "${TESTS[@]}"; do
suf=$( suffix "$x" )
echo "Evaluating $g to itself ($suf)"
evaluate "$OUT/$g.raw.txt" "$OUT/$g.$suf.txt" \
> "$OUT/$g.$suf.eval"
done
for sys in "$@"; do
s=$(basename "${sys%%.txt}")
echo "Evaluating $g and $s (raw)"
evaluate "$OUT"/"$g.raw.txt" "$OUT"/"$s.raw.txt" \
> "$OUT"/"$s.raw.eval"
echo "Evaluating $g and $s (norm)"
evaluate "$OUT"/"$g.norm.txt" "$OUT"/"$s.norm.txt" \
> "$OUT"/"$s.norm.eval"
for x in "${TESTS[@]}"; do
suf=$( suffix "$x" )
echo "Evaluating $g and $s ($suf)"
evaluate "$OUT/$g.$suf.txt" "$OUT/$s.$suf.txt" \
> "$OUT/$s.$suf.eval"
done
done
fi
| true |
eaabc7892658e2cf8d9e1cb1b950cd97fc8aba9b | Shell | mnjstwins/kocho | /completion/zsh/_kocho | UTF-8 | 2,813 | 3.265625 | 3 | [
"Apache-2.0"
] | permissive | #compdef kocho
#
# Completion script for kocho (https://github.com/giantswarm/kocho)
#
# Source: https://github.com/giantswarm/kocho/tree/master/completion/zsh/_kocho
#
# Authors
#
# * Timo Derstappen (https://github.com/teemow)
local curcontext="$curcontext" state line ret=1
local -a common_ops
common_ops=(
'--help[Show help]'
'--version[Print the version and exit]'
)
local -a _1st_arguments
_1st_arguments=(
'create:Create a new swarm on AWS'
'destroy:Destroy a swarm on AWS'
'instances:List all the dns names of the instances of a swarm'
'status:Status of a swarm'
'list:List all existing swarms'
'wait-until:Wait until a swarm has a certain status'
'dns:Update DNS of a swarm'
'kill-instance:Kill instance of a swarm'
'help:Show a list of commands or help for one command'
'version:Print the version and exit'
'etcd-discovery:Print the etcd discovery url of a swarm'
)
_arguments -C \
$common_ops \
'1: :->cmds' \
'*: :->args' && ret=0
__kocho_cluster () {
local expl
declare -a cluster
cluster=(${(f)"$(_call_program commands kocho list| grep -v "^Name" | awk '{print $1}')"})
_describe -t kocho-cluster "Cluster" cluster
}
case $state in
cmds)
_describe -t commands 'kocho command' _1st_arguments && ret=0
;;
args)
local -a args
args=(
$common_ops
)
case $words[2] in
(create)
local opts
opts=(
'--certificate:Certificate ARN to use to create aws cluster'
'--cluster-size:Number of nodes a cluster should have'
'--image:Image version that should be used to create a swarm'
'--no-block:Do not wait until the swarm has been started before exiting'
'--yochu:Version of Yochu to provision cluster nodes'
)
_describe -t opts 'Options' opts
args=(
'2:__opts'
)
;;
(destroy)
args=(
'2:cluster:__kocho_cluster'
)
;;
(instances)
args=(
'2:cluster:__kocho_cluster'
)
;;
(status)
args=(
'2:cluster:__kocho_cluster'
)
;;
(dns)
args=(
'2:cluster:__kocho_cluster'
)
;;
(kill-instance)
args=(
'2:cluster:__kocho_cluster'
)
;;
(etcd-discovery)
args=(
'2:cluster:__kocho_cluster'
)
;;
(help)
_describe -t commands 'kocho command' _1st_arguments
args=(
'2:__commands'
)
;;
*)
(( ret )) && _message 'no more arguments'
;;
esac
_arguments $args && ret=0
;;
esac
return ret
# Local Variables:
# mode: Shell-Script
# sh-indentation: 2
# indent-tabs-mode: nil
# sh-basic-offset: 2
# End:
# vim: ft=zsh sw=2 ts=2 et
| true |
86635fc142d3efb818d8c57fe30cdc4efd059f08 | Shell | ainfosec/tanooki | /gather_cpu_data/flush-reload/hornby/gather_data_horby.sh | UTF-8 | 558 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
SAMPLES=10
SYSTEM_USER=$USER
URL_SET='wiki-top-100-of-2018-HTTPS.txt'
TRAIN_DIR='data-'$SYSTEM_USER'-1-10-2018'
BIN='links'
PROBE='links.probes'
mkdir data
trap "exit" INT TERM ERR
trap "kill 0" EXIT
echo $(lscpu) > ./data/$USER'-CPU_INFO.txt'
./system_load.sh &
ruby ./ruby/AttackTrainer.rb \
--url-list ./experiments/links/url_sets/$URL_SET \
--train-dir ./data/$TRAIN_DIR \
--run-binary ./experiments/links/binaries/$BIN \
--probe-file ./experiments/links/binaries/$PROBE \
--samples $SAMPLES \
--sleep-kill 10
| true |
3212127828b0c561c40e26366b84f8f3f7f17041 | Shell | dingwell/dotfiles | /bin_files/gen_passwd.sh | UTF-8 | 176 | 3.28125 | 3 | [] | no_license | #!/bin/bash
#
# Usage:
#
# gen_passwd.sh [N]
#
# N Number of characters (default = 16)
#
N=$1
[ "$N" == "" ] && N=16
tr -dc A-Za-z0-9_ < /dev/urandom | head -c ${N} | xargs
| true |
ca250288824f14db01ac0085d1fee8c143c551b6 | Shell | wjvanderlip/NUCENG204 | /SIS/linux_drivers/install_sis.sh | UTF-8 | 1,817 | 3.703125 | 4 | [] | no_license | #! /bin/bash
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Get users in order to run this script as multiple users
USER_LIST=`users`
stringarray=($USER_LIST)
NON_ROOT_USER=${stringarray[0]}
# Env for running as a different user
RUNAS="sudo -u $NON_ROOT_USER"
# Get ubuntu version
VERSION_STRING=`lsb_release -r`
VERSION=${VERSION_STRING//[A-Z:a]/}
SISDIR=$HOME/SIS
PERMISSION_FILE=/etc/udev/rules.d/60-sis3150.rules
CURDIR=`pwd`
if [ $VERSION == "12.04" ] || [ $VERSION == "14.04" ]|| [ $VERSION == "16.04" ]|| [ $VERSION == "18" ]; then
# Unpack the SIS tar
$RUNAS bash<<___
mkdir $SISDIR
tar -C $SISDIR -xzf sisusb-1.2-003.tar.gz
___
# Preparation for 14.04 version - install usb backward compatibility
if [ $VERSION == "14.04" ] || [ $VERSION == "16.04" ]|| [ $VERSION == "18" ]; then
./install_usb_compat.sh
fi
$RUNAS bash<<___
# Configure the build
cd $SISDIR/sisusb-1.2-003
if [ $VERSION == "14.04" ] || [ $VERSION == "18" ]; then
./configure --prefix=$SISDIR --with-usb-headerdir=/usr/local/include --with-usb-libdir=/usr/local/lib --with-tcl-libdir=/usr/lib/x86_64-linux-gnu/ --with-tcl-header-dir=/usr/include/tcl8.5/
else
./configure --prefix=$SISDIR --with-usb-headerdir=/usr/include --with-usb-libdir=/lib/x86_64-linux-gnu
fi
# A couple config changes necessary for linking
sed -i 's/LIBS = -ldl/LIBS = -ldl -lusb/g' loader/apps/Makefile
sed -i 's/LIBS = -ldl/LIBS = -ldl -lusb/g' hotplug/Makefile
# Build and install
make
make install
___
# Setup permissions
cat <<EOM >$PERMISSION_FILE
SUBSYSTEM=="usb", ATTR{idVendor}=="1657", ATTR{idProduct}=="3150", MODE="0777"
EOM
cd $CURDIR
else
echo "Detected Ubuntu Version: $VERSION"
echo "install_sis.sh only works for ubuntu 12.04"
echo "Exiting..."
fi
| true |
0789f9c7dd0e35a495cd504e57ae5ee38a1b05f0 | Shell | rohitverma229/Shell_Script_Linux | /Hello.sh | UTF-8 | 175 | 2.984375 | 3 | [] | no_license | #!/bin/bash
for((i=0;i<=4;i++))
do
echo "Shifted $i time(s):"
echo -----------------
echo 1: $1
echo 2: $2
echo 3: $3
echo 4: $4
echo
shift
done
| true |
59cd2b40d93c09196538366f346c7c5589317437 | Shell | dsnlab/TAG_scripts | /org/bids-conversion/convert_bids_w3_vgw.sh | UTF-8 | 12,258 | 3.78125 | 4 | [] | no_license | #!/bin/bash +x
# This script will convert dicoms, create the BIDS files structure,
# and move and rename converted files to BIDS specification
#
# It will also check the number of files and print errors to the error log
#
#VGW ran this code from her own directory;
#if you are trying to run this on the DSN lab shared folder/directory,
#you will likely need to adjust the paths to pull the correct data
# DEPENDENCIES: All of these variables are set by the outer
# batch conversion script (called batch_convert_bids_w3_vgw.sh at the time of writing).
for varname in sessid subid dicomdir outdir config scriptsdir
do
if [ -z "${!varname+SET}" ]
then
echo "ERROR: \$$varname not set!" >&2
echo "(Are you running this standalone? \`$0' is meant to be called from a sbatch wrapper script -- \`batch_$0')" >&2
fi
done
# Load mcverter and other software
module load MRIConvert
module load python3
module load dcmtk
# Load variables
source "$config"
echo "${subid}"_"${sessid}"
# Create error log file
touch "${errorlog}"
# Check directory dependencies
mkdir -p "${archivedir}" "${niidir}" "${bidsdir}" "${bidsdir}"/derivatives
# Convert dicoms to niftis
echo -e "\nCreating nii directory"
mkdir -pv "$niidir"
mkdir -pv "${subid}"/"${sessid}"
cd "$niidir"/"${subid}"/"${sessid}"
if [ "${convertanat}" == "TRUE" ] && [ ! "$(ls -A anat)" ]; then
echo -e "\nConverting anatomical mprage into nifti"
mkdir anat
anatomicaloutput="$niidir/${subid}/${sessid}/anat"
mcverter -o "${anatomicaloutput}"/ --format=nifti --nii --match="${anat}" -F -PatientName-PatientId-SeriesDate-SeriesTime-StudyId-StudyDescription+SeriesNumber-SequenceName-ProtocolName+SeriesDescription $dicomdir
cd "${anatomicaloutput}"
du -sh *.nii
gzip -f *.nii
cd "$niidir"/"${subid}"/"${sessid}"
else
anatomicaloutput="$niidir/${subid}/${sessid}/anat"
fi
if [ "${convertfmap}" == "TRUE" ] && [ ! "$(ls -A fmap)" ]; then
echo -e "\nConverting fieldmaps into niftis"
cd "$niidir"/"${subid}"/"${sessid}"
mkdir fmap
fmapoutput="$niidir/${subid}/${sessid}/fmap"
mcverter -o "${fmapoutput}"/ --format=nifti --nii --match="${fmap}" -F -PatientName-PatientId-SeriesDate-SeriesTime-StudyId-StudyDescription+SeriesNumber-SequenceName-ProtocolName+SeriesDescription $dicomdir
cd "${fmapoutput}"
du -sh *.nii
gzip -f *.nii
cd "$niidir"/"${subid}"/"${sessid}"
else
fmapoutput="$niidir/${subid}/${sessid}/fmap"
fi
if [ "${convertdti}" == "TRUE" ] && [ ! "$(ls -A dti)" ]; then
echo -e "\nConverting DTI into 4D nifti"
cd "$niidir"/"${subid}"/"${sessid}"
mkdir dti
dtioutput="$niidir/${subid}/${sessid}/dti"
mcverter -o "${dtioutput}"/ --format=nifti --nii --fourd --match="${dti}" -F -PatientName-PatientId-SeriesDate-SeriesTime-StudyId-StudyDescription+SeriesNumber-SequenceName-ProtocolName+SeriesDescription $dicomdir
cd "${dtioutput}"
du -sh *.nii
gzip -f *.nii
cd "$niidir"/"${subid}"/"${sessid}"
else
dtioutput="$niidir/${subid}/${sessid}/dti"
fi
if [ "${convertrest}" == "TRUE" ] && [ ! "$(ls -A resting)" ]; then
echo -e "\nConverting resting state into 4D niftis"
cd "$niidir"/"${subid}"/"${sessid}"
mkdir -p resting
restingoutput="$niidir/${subid}/${sessid}/resting"
for rest in ${resting[@]}; do
mcverter -o "${restingoutput}"/ --format=nifti --nii --fourd --match="${rest}" -F -PatientName-PatientId-SeriesDate-SeriesTime-StudyId-StudyDescription+SeriesNumber-SequenceName-ProtocolName+SeriesDescription $dicomdir
done
cd "${restingoutput}"
du -sh *.nii
gzip -f *.nii
cd "$niidir"/"${subid}"/"${sessid}"
else
restingoutput="$niidir/${subid}/${sessid}/resting"
fi
if [ "${converttask}" == "TRUE" ] && [ ! "$(ls -A task)" ]; then
echo -e "\nConverting fMRI task data into 4D niftis"
cd "$niidir"/"${subid}"/"${sessid}"
mkdir -p task
taskoutput="$niidir/${subid}/${sessid}/task"
for task in ${tasks[@]}; do
mcverter -o "${taskoutput}"/ --format=nifti --nii --fourd --match=${task} -F -PatientName-PatientId-SeriesDate-SeriesTime-StudyId-StudyDescription+SeriesNumber-SequenceName-ProtocolName+SeriesDescription $dicomdir
done
cd "${taskoutput}"
du -sh *.nii
gzip -f *.nii
cd "$niidir"/"${subid}"/"${sessid}"
else
taskoutput="$niidir/${subid}/${sessid}/task"
fi
# Run python script to extract Multiband Acceleration Factor
echo -e "\nCreating text file with additionl acquisition info"
python $scriptsdir/extract_dicom_fields.py "$dicomdir" "$niidir"/"${subid}"/"${sessid}"/"${subid}"_"${sessid}"_multiband_accel.txt PatientName StudyDate SeriesNumber SeriesDescription ImageComments -a -n
# Copy group meta-data to bids directory
rsync -aiv $scriptsdir/meta_data/ $bidsdir/
# Create bids directory structure for one subject
echo -e "\nCreating BIDS directory stucture..."
mkdir -pv "$bidsdir"/sub-"${subid}"/ses-"${sessid}"
cd "$bidsdir"/sub-"${subid}"/ses-"${sessid}"
if [ "${convertanat}" == "TRUE" ]; then mkdir -pv anat; fi
if [ "${convertfmap}" == "TRUE" ]; then mkdir -pv fmap; fi
if [ "${convertdti}" == "TRUE" ]; then mkdir -pv dwi; fi
if [ "${converttask}" == "TRUE" ]; then mkdir -pv func; fi
# Copy and rename files to BIDS structure
# structural (mprage)
if [ "${convertanat}" == "TRUE" ]; then
echo -e "\nCopying structural"
if [ $(ls "${anatomicaloutput}"/*"${anat}".nii.gz | wc -l) -eq 1 ]; then
cp ${cpflags} "${anatomicaloutput}"/*"${anat}".nii.gz "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/anat/sub-"${subid}"_ses-"${sessid}"_T1w.nii.gz
elif [ $(ls "${anatomicaloutput}"/*"${anat}".nii.gz | wc -l) -eq 0 ]; then
# print missing file paths in errorlog.txt if = 0 files
echo "ERROR: no files; nothing to copy"
echo "${anatomicaloutput}": MISSING "${anat}" >> $errorlog
else
# print file paths in errorlog.txt if =~ 1 file; copy both files
echo "ERROR: wrong number of files; file with highest sequence number copied"
ls "${anatomicaloutput}"/*"${anat}".nii.gz >> $errorlog
#Uncomment the commented lines if you have repeated mprages and would like them to be saved in the bidsbir.
#For now, the script does not do this as we only re-do mprages if one is really bad quality - and I don't think the poor image
#should be copied to bidsdir and used by bidsapps
#The script now copies the T1w scan with the highest sequence number, so the second one if the sequence was repeated (edited by MEAB on 11/11/2019)
#t1r1=$(ls "${anatomicaloutput}"/*"${anat}".nii.gz | head -1)
t1r2=$(ls "${anatomicaloutput}"/*"${anat}".nii.gz | tail -1)
#cp ${cpflags} "${t1w1}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/anat/sub-"${subid}"_ses-"${sessid}"_run-01_T1w.nii.gz
cp ${cpflags} "${t1r2}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/anat/sub-"${subid}"_ses-"${sessid}"_T1w.nii.gz
fi
fi
# fieldmaps
if [ "${convertfmap}" == "TRUE" ]; then
echo -e "\nCopying fieldmaps"
if [ $(ls "${fmapoutput}"/*"${fmap}"*.gz | wc -l) -eq 0 ]; then
# print missing file paths in errorlog.txt if = 0 files
echo "ERROR: no files; nothing to copy"
echo "${fmapoutput}": MISSING "${fmap}" >> $errorlog
elif [ "${fieldmapEPI}" == "TRUE" ]; then
ap=$(ls -f "${fmapoutput}"/*_ap.nii.gz | head -1)
pa=$(ls -f "${fmapoutput}"/*_pa.nii.gz | head -1)
cp ${cpflags} "${ap}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/fmap/sub-"${subid}"_ses-"${sessid}"_dir-ap_epi.nii.gz
cp ${cpflags} "${pa}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/fmap/sub-"${subid}"_ses-"${sessid}"_dir-pa_epi.nii.gz
# print file paths in errorlog.txt if =~ 2 files
if [ $(ls "${fmapoutput}"/*_ap.nii.gz | wc -l) -ne 2 ]; then
echo "ERROR: wrong number of files"
ls "${fmapoutput}"/*_ap.nii.gz >> $errorlog
fi
else
phase=$(ls -f "${fmapoutput}"/*"${fmap}".nii.gz)
mag1=$(ls -f "${fmapoutput}"/*"${fmap}"_01.nii.gz)
mag2=$(ls -f "${fmapoutput}"/*"${fmap}"_02.nii.gz)
cp ${cpflags} "${phase}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/fmap/sub-"${subid}"_ses-"${sessid}"_phasediff.nii.gz
cp ${cpflags} "${mag1}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/fmap/sub-"${subid}"_ses-"${sessid}"_magnitude1.nii.gz
cp ${cpflags} "${mag2}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/fmap/sub-"${subid}"_ses-"${sessid}"_magnitude2.nii.gz
# print file paths in errorlog.txt if =~ 3 files
if [ $(ls "${fmapoutput}"/*"${fmap}"*.gz | wc -l) -ne 3 ]; then
echo "ERROR: wrong number of files"
ls "${fmapoutput}"/*"${fmap}"*.gz >> $errorlog
fi
fi
fi
# DTI
if [ "${convertdti}" == "TRUE" ]; then
echo -e "\nCopying DTI"
if [ $(ls "${dtioutput}"/*"${dti}"*.nii.gz | wc -l) -eq 2 ]; then
cp ${cpflags} "${dtioutput}"/*"${dti}"*rl.nii.gz "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/dwi/sub-"${subid}"_ses-"${sessid}"_acq-rl_dwi.nii.gz
cp ${cpflags} "${dtioutput}"/*"${dti}"*lr.nii.gz "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/dwi/sub-"${subid}"_ses-"${sessid}"_acq-lr_dwi.nii.gz
cp ${cpflags} "${dtioutput}"/*"${dti}"*rl_bvecs "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/dwi/sub-"${subid}"_ses-"${sessid}"_acq-rl_dwi_bvecs
cp ${cpflags} "${dtioutput}"/*"${dti}"*rl_bvals "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/dwi/sub-"${subid}"_ses-"${sessid}"_acq-rl_dwi_bvals
cp ${cpflags} "${dtioutput}"/*"${dti}"*lr_bvecs "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/dwi/sub-"${subid}"_ses-"${sessid}"_acq-lr_dwi_bvecs
cp ${cpflags} "${dtioutput}"/*"${dti}"*lr_bvals "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/dwi/sub-"${subid}"_ses-"${sessid}"_acq-lr_dwi_bvals
elif [ $(ls "${dtioutput}"/*"${dti}"*.nii.gz | wc -l) -eq 0 ]; then
# print missing file paths in errorlog.txt if = 0 files
echo "ERROR: no files; nothing to copy"
echo "${dtioutput}": MISSING "${dti}" >> $errorlog
else
# print file paths in errorlog.txt if =~ 2 files; do not copy file
echo "ERROR: wrong number of files; files not copied"
ls "${dtioutput}"/*"${dti}"*.nii.gz >> $errorlog
fi
fi
# resting state
if [ "${convertrest}" == "TRUE" ]; then
echo -e "\nCopying resting state"
for rest in ${resting[@]}; do
if [ $(ls "${restingoutput}"/*"${rest}".nii.gz | wc -l) -eq 1 ]; then
runnum="$(echo "${rest}" | tail -c 2)"
cp ${cpflags} "${restingoutput}"/*"${rest}".nii.gz "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/func/sub-"${subid}"_ses-"${sessid}"_task-rest_run-0"${runnum}"_bold.nii.gz
elif [ $(ls "${restingoutput}"/*"${rest}".nii.gz | wc -l) -eq 0 ]; then
# print missing file paths in errorlog.txt if = 0 files
echo "ERROR: no "${rest}" files; nothing to copy"
echo "${restingoutput}": MISSING "${rest}" >> $errorlog
else
# print file paths in errorlog.txt if =~ 1 files; do not copy file
echo "ERROR: wrong number of "${rest}" files; files not copied"
ls "${restingoutput}"/*"${rest}".nii.gz >> $errorlog
fi
done
fi
# fMRI task data
if [ "${converttask}" == "TRUE" ]; then
echo -e "\nCopying task fMRI"
for task in ${tasks[@]}; do
runnum="$(echo "${task}" | sed 's/[^0-9]//g')"
taskalpha="$(echo "${task}" | sed 's/[^a-zA-Z]//g')"
if [ $(ls "${taskoutput}"/*"${task}"*.nii.gz | wc -l) -eq 1 ]; then
if [[ $runnum =~ ^[0-9]+$ ]]; then
cp ${cpflags} "${taskoutput}"/*"${task}"*.nii.gz "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/func/sub-"${subid}"_ses-"${sessid}"_task-"${taskalpha}"_run-0"${runnum}"_bold.nii.gz
else
cp ${cpflags} "${taskoutput}"/*"${task}"*.nii.gz "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/func/sub-"${subid}"_ses-"${sessid}"_task-"${taskalpha}"_run-01_bold.nii.gz
fi
elif [ $(ls "${taskoutput}"/*"${task}"*.nii.gz | wc -l) -eq 0 ]; then
# print missing file paths in errorlog.txt if = 0 files
echo "ERROR: no "${task}" files; nothing to copy"
echo "${taskoutput}": MISSING "${task}" >> $errorlog
else
# print file paths in errorlog.txt if =~ 1 files; copy the largest file
echo "ERROR: wrong number of "${task}" files; largest file copied"
ls "${taskoutput}"/*"${task}"*.nii.gz >> $errorlog
largestfile=$(du -sh "${taskoutput}"/*"${task}"*.nii.gz | sort -n | tail -1 | cut -f2)
if [[ $runnum =~ ^[0-9]+$ ]]; then
cp ${cpflags} "${largestfile}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/func/sub-"${subid}"_ses-"${sessid}"_task-"${taskalpha}"_run-0"${runnum}"_bold.nii.gz
else
cp ${cpflags} "${largestfile}" "$bidsdir"/sub-"${subid}"/ses-"${sessid}"/func/sub-"${subid}"_ses-"${sessid}"_task-"${taskalpha}"_run-01_bold.nii.gz
fi
fi
done
fi
echo -e "\nCOMPLETED"
| true |
73d56714cd3bf5cb2f8c724977a3437c029b6b91 | Shell | cdown/pkgbuilds | /plenv/PKGBUILD | UTF-8 | 546 | 2.828125 | 3 | [] | no_license | # Maintainer: Chris Down <chris@chrisdown.name>
pkgname=plenv
pkgver=2.3.1
pkgrel=1
pkgdesc='Simple Perl version management'
arch=('any')
url='https://github.com/tokuhirom/plenv'
license=('GPL')
optdepends=('perl-build: To build perl')
depends=()
source=("https://github.com/tokuhirom/plenv/archive/${pkgver}.zip")
md5sums=('a5513465573a2d9374e2168cdb0dccbd')
package() {
mkdir -p "${pkgdir?}"/{opt/plenv,usr/bin}
cd "${srcdir?}/$pkgname-$pkgver" || return
cp -a -- * "$pkgdir"/opt/plenv
ln -s /opt/plenv/libexec/plenv "$pkgdir/usr/bin/plenv"
}
| true |
07a924a8aeb6717399b1ad08d12095dd18914129 | Shell | ErebusBat/puppet-nagios-nrpe | /files/usrbin/ipmi-update-reading-cache.sh | UTF-8 | 385 | 3.21875 | 3 | [] | no_license | #!/bin/bash
CACHE_DIR=/var/log/ipmi
HOST=$HOSTNAME
CACHE=$CACHE_DIR/sensor-reading-cache.$HOSTNAME
if [ ! -d $CACHE_DIR ]; then
mkdir -p $CACHE_DIR
fi
echo "# Sensor cache last updated at: `date`" > $CACHE
#/usr/sbin/ipmi-sensors 2>&1 >> $CACHE
/usr/sbin/ipmi-sensors >> $CACHE
echo "# Sensor cache last updated at: `date`" >> $CACHE
chown -R nagios:nagios $CACHE_DIR
chmod 644 $CACHE | true |
3ccd80faa5fff07bd55b33e43024d4d5033a3ea5 | Shell | deanofthewebb/code-server | /ci/steps/lib.sh | UTF-8 | 1,123 | 3.875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
source ./ci/lib.sh
# Grabs the most recent ci.yaml github workflow run that was successful and triggered from the same commit being pushd.
# This will contain the artifacts we want.
# https://developer.github.com/v3/actions/workflow-runs/#list-workflow-runs
get_artifacts_url() {
curl -sSL 'https://api.github.com/repos/cdr/code-server/actions/workflows/ci.yaml/runs?status=success&event=push' | jq -r ".workflow_runs[] | select(.head_sha == \"$(git rev-parse HEAD)\") | .artifacts_url" | head -n 1
}
# Grabs the artifact's download url.
# https://developer.github.com/v3/actions/artifacts/#list-workflow-run-artifacts
get_artifact_url() {
local artifact_name="$1"
curl -sSL "$(get_artifacts_url)" | jq -r ".artifacts[] | select(.name == \"$artifact_name\") | .archive_download_url" | head -n 1
}
# Uses the above two functions to download a artifact into a directory.
download_artifact() {
local artifact_name="$1"
local dst="$2"
local tmp_file
tmp_file="$(mktemp)"
curl -sSL "$(get_artifact_url "$artifact_name")" > "$tmp_file"
unzip -o "$tmp_file" -d "$dst"
rm "$tmp_file"
}
| true |
3b22c434c5061f070709ba12dd518accb3c91f3d | Shell | weldpua2008/compage | /lib/apt.sh | UTF-8 | 2,841 | 4 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#############################################################
# Sub functions to work with apt
#############################################################
# get $1 repo that we need check
# $2 distribution of repo
function is_aptrepoexist(){
local REPO=${1:-}
local DISTRIBUTION=${2:-}
set -o pipefail || true
if [ "x${REPO:-}" != "x" ] && [ "x${DISTRIBUTION}" != "x" ];then
${SUDO} find /etc/apt/ -name "*.list" | xargs cat | grep ^[[:space:]]*deb[[:space:]][^[:space:]]*${REPO}[^[:space:]]*[[:space:]]${DISTRIBUTION}[^[:space:]]* &> /dev/null
[ $? -eq 0 ] && return 0
return 1
elif [ "x${REPO:-}" != "x" ]; then
#${SUDO} find /etc/apt/ -name "*.list" | xargs cat | grep ^[[:space:]]*deb | grep -w "${REPO}" &> /dev/null
${SUDO} find /etc/apt/ -name "*.list" | xargs cat | grep ^[[:space:]]*deb[[:space:]][^[:space:]]*${REPO}[^[:space:]]*[[:space:]] &> /dev/null
[ $? -eq 0 ] && return 0
return 1
fi
echo "repository was not specified right ${REPO}"
return 127
}
function install_deb(){
set +e
local command="$1"
local installation_src="$2"
which ${command} &>/dev/null
if [ $? -ne 0 ];then
${SUDO} apt-get --no-install-recommends install -y ${installation_src}
fi
set -e
}
function apt_createlist(){
# echo 'APT::Install-Suggests "0";
# APT::Install-Recommends "0";'>> /etc/apt/apt.conf
$SUDO mkdir $1 || return 1
$SUDO apt-get -d --print-uris -y install -o=dir::cache=$1/ $1 > $1/$1.list || return 2
}
# adding key by path
function apt_addkey()
{
local apt_key=${1:-}
local KEY_SERVER=${2:-}
if [[ -e "${apt_key}" ]];then
${SUDO} cat ${apt_key} | apt-key add -
# adding apt key by id
elif [[ "${KEY_SERVER}" != "" ]]; then
#statements
${SUDO} apt-key adv --keyserver ${KEY_SERVER} --recv $apt_key
fi
}
function print_uries_forceinstall()
{
local PKGs=$1
local LINKS_FILE=$2
set -o pipefail || true
${SUDO} apt-get -d --print-uris -y install -f $PKGs | ${SUDO} tee -a $LINKS_FILE
[ ${PIPESTATUS[0]} -ne 0 ] && return 1
return 0
}
############################################################################################
# print uries with dependencies
############################################################################################
function apt_printuries(){
local PKG="${1:-}"
shift 1
set -o pipefail || true
local dependencies=$(apt-cache depends $PKG | grep \" Depends:\" | sed 's/ Depends://' | sed ':a;N;$!ba;s/\n//g')
$SUDO apt-get --print-uris --yes -d --reinstall $@ install $PKG $dependencies | grep ^\' | cut -d\' -f2 || return 1
[ ${PIPESTATUS[0]} -ne 0 ] && return 2
return 0
} | true |
5cbd55ebef440ec2e80eca504ec4d3bc670b0279 | Shell | usachova/os | /lab4/rmtrash.sh | UTF-8 | 287 | 3.65625 | 4 | [] | no_license | #!/bin/bash
file=$1
trash=/home/.trash
trlog=/home/.trash.log
if [[ $# != 1 || ! -f $file ]]
then
echo "error"
exit 1
fi
if [[ ! -d $trash ]]
then
mkdir $trash
touch $trlog
fi
name=$(date +"%d%m%Y%H%M%S")
ln $file $trash/$name
rm $file && echo $(realpath $file) $name >> $trlog
| true |
2d4007087f7adc125904edafb337db1c3a80f423 | Shell | udemirezen/odroid-os | /DEST/bin/install_opt | UTF-8 | 65 | 2.734375 | 3 | [] | no_license | #!/bin/sh
if [ $# -ne 1 ]; then
echo "Usage: $0 url"
exit
fi
| true |
d5d7692397b5a463ba2efbfe97109beb0b167e29 | Shell | olivercodes/dotfiles | /install.sh | UTF-8 | 2,975 | 3.6875 | 4 | [] | no_license | #!/bin/sh
echo "first, we will cleanup nvm and nvim folders"
echo "------------------------"
echo "clean nvm"
if [ -d "$HOME/.nvm" ] # && clean_install = true
then
echo ".nvm folder exists, cleaning"
sudo rm -rf $HOME/.nvm
else
echo ".nvm clean"
fi
echo "------------------------"
echo "create clean nvim setup"
# TODO - add an if
if [ -d "$HOME/.config/nvim" ] # && clean_install = true
then
echo "nvim folder already present, removing"
rm -rf $HOME/.config/nvim
fi
echo "------------------------"
echo "create clean zsh setup"
# TODO - add an if
if [ -d "$HOME/.oh-my-zsh" ]
then
echo "nvim folder already present, removing"
rm -rf $HOME/.oh-my-zsh
fi
echo "------------------------"
echo "now, we will do brew things"
brew="/usr/local/bin/brew"
if [ -f "$brew" ]
then
echo "Homebrew is installed, nothing to do"
else
echo "Homebrew is not installed, installing"
echo "This may take a while"
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
packages=(
"git"
"node"
"tmux"
"neovim"
"zsh"
"pure"
)
for i in "${packages[@]}"; do
brew install $i
echo "---------------------"
done
echo "installing RCM for dotfiles"
brew tap thoughtbot/formulae
brew install rcm
echo "------------------------"
echo "------------------------"
localGit="/usr/local/bin/git"
if [ -f "$localGit" ]
then
echo "git is all good"
else
echo "git is not installed"
fi
if [ -d "$HOME/.dotfiles" ]
then
echo "dir ~/.dotfiles exits"
rm -rf $HOME/.dotfiles
else
echo "no"
fi
echo "Cloning olivercodes's dotfiles insto .dotfiles"
git clone https://github.com/olivercodes/dotfiles.git ~/.dotfiles
echo "---------------------------------------------------------"
echo "init dotfile submodules"
cd $HOME/.dotfiles
git submodule update --init --recursive
echo "---------------------------------------------------------"
cd $HOME
echo "running RCM's rcup command"
echo "This is symlink the rc files in .dofiles"
echo "with the rc files in $HOME"
echo "---------------------------------------------------------"
rcup
echo "setting zsh"
chsh -s $(which zsh)
# echo "running osx defaults"
# ~/.osx.sh
# echo "---------------------------------------------------------"
echo "linking init.vim for neovim"
mkdir -p $HOME/.config/nvim
ln -s $HOME/.init.vim ~/.config/nvim/init.vim
echo "---------------------------------------------------------"
echo "installing node version manager"
~/.dotfiles/nvm/install.sh
echo "---------------------------------------------------------"
echo "installing powerline font"
~/.dotfiles/fonts/install.sh
echo "---------------------------------------------------------"
# export KEEP_ZSHRC=yes
# echo "setup oh-my-zsh"
# ~/.dotfiles/ohmyzsh/tools/install.sh
echo "---------------------------------------------------------"
echo "All done!"
echo "and change your terminal font to source code pro"
echo "Cheers"
echo "---------------------------------------------------------"
exit 0
| true |
a7d97b6272065893d9bf54e974eddc1abea8ff54 | Shell | zombiezooz25/dotfiles | /.bin/search | UTF-8 | 305 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env bash
AG_PREFIX="ag --noheading --color --smart-case "
export FZF_DEFAULT_COMMAND="$AG_PREFIX '.'";
# TODO: Add preview
filename=$(fzf --bind "change:reload:$AG_PREFIX {q} || true" --ansi --phony | cut -d':' -f1);
[[ ! -z "$filename" ]] && [[ -f "$filename" ]] && \
open "$filename";
| true |
118ac09b03dc14974d56bc50a04e0d4e249fcc70 | Shell | e2/scripts | /install_redis | UTF-8 | 738 | 3.484375 | 3 | [] | no_license | #!/bin/bash
BIN="/home/`whoami`/bin"
PAGE="http://code.google.com/p/redis/"
if [ -d $BIN ]
then
cd $BIN
else
mkdir $BIN
cd $BIN
fi
echo "Installing Redis"
package=`curl -s $PAGE | grep 'http://redis.googlecode.com/files/redis' | cut -d\" -f2`
wget -c $package
download=`ls -c redis-* | cut -d" " -f1 | head -n 1`
name=`echo $download | sed 's/.tar.gz//'`
tar xzf $download
cd $name
make
cd ../
rm $download
echo ""
echo ""
echo ""
echo ""
echo "***************************************************************************************"
echo "You can now just run ./redis-server or I suppose you can sudo make install if you want."
echo "***************************************************************************************"
| true |
6e1054cb23a65c468bd219198e16058ead4b3402 | Shell | abiratsis/GlobalWeatherData | /scripts/download_weather.sh | UTF-8 | 9,295 | 3.71875 | 4 | [] | no_license | #!/bin/sh
############################### install python prerequisites ###############################
install_prerequisites() {
# pip3
pip_installed=$(pip3 --version)
if [ -z "$pip_installed" ]; then
echo "pip3 is not installed. Please install pip to continue."
exit 1
fi
pandas_installed=$(pip3 show pandas)
if [ -z "$pandas_installed" ]; then
pip3 install pandas
else
echo "pandas is already installed"
fi
# netCDF4
netCDF4_installed=$(pip3 show netCDF4 | grep Name)
if [ -z "$netCDF4_installed" ]; then
pip3 install netCDF4
else
echo "netCDF4 is already installed"
fi
}
############################### NC to CSV #######################################
nc_to_csv() {
# temp
airtemp_path=""
skintemp_path=""
maxtemp_path=""
mintemp_path=""
# humidity
humid_path=""
# wind
uwind_path=""
vwind_path=""
# solar radiation
csdlf_path=""
csdsf_path=""
dlwrf_path=""
dswrf_path=""
nlwrs_path=""
nswrs_path=""
for arg in "$@"; do
case $arg in
--airtemp)
airtemp_path="$2"
shift
shift
;;
--skintemp)
skintemp_path="$2"
shift
shift
;;
--maxtemp)
maxtemp_path="$2"
shift
shift
;;
--mintemp)
mintemp_path="$2"
shift
shift
;;
--humid)
humid_path="$2"
shift
shift
;;
--uwind)
uwind_path="$2"
shift
shift
;;
--vwind)
vwind_path="$2"
shift
shift
;;
--csdlf)
csdlf_path="$2"
shift
shift
;;
--csdsf)
csdsf_path="$2"
shift
shift
;;
--dswrf)
dswrf_path="$2"
shift
shift
;;
--dlwrf)
dlwrf_path="$2"
shift
shift
;;
--nlwrs)
nlwrs_path="$2"
shift
shift
;;
--nswrs)
nswrs_path="$2"
shift
shift
;;
--world)
tmp="$2"
shift
shift
;;
esac
done
echo "converting nc files to csv..."
cdir=$(pwd)
# Air temperature 2m
if [ -n "$airtemp_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$airtemp_path" "air"
echo "${airtemp_path} was successfully converted to csv."
fi
# skin temperature
if [ -n "$skintemp_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$skintemp_path" "skt"
echo "${skintemp_url} was successfully converted to csv."
fi
# max temperature
if [ -n "$maxtemp_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$maxtemp_path" "tmax"
echo "${maxtemp_path} was successfully converted to csv."
fi
# min temperature
if [ -n "$mintemp_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$mintemp_path" "tmin"
echo "${mintemp_path} was successfully converted to csv."
fi
# humidity 2m
if [ -n "$humid_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$humid_path" "shum"
echo "${humid_path} was successfully converted to csv."
fi
# u-wind
if [ -n "$uwind_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$uwind_path" "uwnd"
echo "${uwind_path} was successfully converted to csv."
fi
# v-wind
if [ -n "$vwind_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$vwind_path" "vwnd"
echo "${vwind_path} was successfully converted to csv."
fi
# Clear Sky Downward Longwave Flux
if [ -n "$csdlf_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$csdlf_path" "csdlf"
echo "${csdlf_path} was successfully converted to csv."
fi
# Clear Sky Downward Solar Flux
if [ -n "$csdsf_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$csdsf_path" "csdsf"
echo "${csdsf_path} was successfully converted to csv."
fi
# Downward Longwave Radiation Flux
if [ -n "$dlwrf_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$dlwrf_path" "dlwrf"
echo "${dlwrf_path} was successfully converted to csv."
fi
# Downward Solar Radiation Flux
if [ -n "$dswrf_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$dswrf_path" "dswrf"
echo "${dswrf_path} was successfully converted to csv."
fi
# Net Longwave Radiation Flux
if [ -n "$nlwrs_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$nlwrs_path" "nlwrs"
echo "${nlwrs_path} was successfully converted to csv."
fi
# Net Shortwave Radiation Flux
if [ -n "$nswrs_path" ]; then
python3 "${cdir}/scripts/nc_to_csv.py" "$nswrs_path" "nswrs"
echo "${nswrs_path} was successfully converted to csv."
fi
}
############################### Data sources #######################################
download_sources() {
temp_dir=""
humidity_dir=""
wind_dir=""
solar_dir=""
world_dir=""
# temp
airtemp_url=""
skintemp_url=""
maxtemp_url=""
mintemp_url=""
# humidity
humid_url=""
# wind
uwind_url=""
vwind_url=""
# solar radiation
csdlf_url=""
csdsf_url=""
dlwrf_url=""
dswrf_url=""
nlwrs_url=""
nswrs_url=""
world_url=""
for arg in "$@"; do
case $arg in
-t|--temperature)
temp_dir="$2"
shift
shift
;;
-h|--humidity)
humidity_dir="$2"
shift
shift
;;
-w|--wind)
wind_dir="$2"
shift
shift
;;
-s|--solar)
solar_dir="$2"
shift
shift
;;
-r)
world_dir="$2"
shift
shift
;;
--skintemp)
skintemp_url="$2"
shift
shift
;;
--airtemp)
airtemp_url="$2"
shift
shift
;;
--maxtemp)
maxtemp_url="$2"
shift
shift
;;
--mintemp)
mintemp_url="$2"
shift
shift
;;
--humid)
humid_url="$2"
shift
shift
;;
--uwind)
uwind_url="$2"
shift
shift
;;
--vwind)
vwind_url="$2"
shift
shift
;;
--csdlf)
csdlf_url="$2"
shift
shift
;;
--csdsf)
csdsf_url="$2"
shift
shift
;;
--dswrf)
dswrf_url="$2"
shift
shift
;;
--dlwrf)
dlwrf_url="$2"
shift
shift
;;
--nlwrs)
nlwrs_url="$2"
shift
shift
;;
--nswrs)
nswrs_url="$2"
shift
shift
;;
--world)
world_url="$2"
shift
shift
;;
esac
done
echo "Executing create_dirs...."
if [ -n "$temp_dir" ]; then
mkdir -p "$temp_dir"
echo "${temp_dir} was created."
fi
if [ -n "$humidity_dir" ]; then
mkdir -p "$humidity_dir"
echo "${humidity_dir} was created."
fi
if [ -n "$wind_dir" ]; then
mkdir -p "$wind_dir"
echo "${wind_dir} was created."
fi
if [ -n "$solar_dir" ]; then
mkdir -p "$solar_dir"
echo "${solar_dir} was created."
fi
echo "executing download_sources..."
# download Air temperature 2m
if [ -n "$airtemp_url" ]; then
wget -P "$temp_dir" -N "$airtemp_url"
echo "${airtemp_url} was downloaded at ${temp_dir}."
fi
# download skin temperature
if [ -n "$skintemp_url" ]; then
wget -P "$temp_dir" -N "$skintemp_url"
echo "${skintemp_url} was downloaded at ${temp_dir}."
fi
# download max temperature
if [ -n "$maxtemp_url" ]; then
wget -P "$temp_dir" -N "$maxtemp_url"
echo "${maxtemp_url} was downloaded at ${temp_dir}."
fi
# download min temperature
if [ -n "$mintemp_url" ]; then
wget -P "$temp_dir" -N "$mintemp_url"
echo "${mintemp_url} was downloaded at ${temp_dir}."
fi
# download humidity 2m
if [ -n "$humid_url" ]; then
wget -P "$humidity_dir" -N "$humid_url"
echo "${humid_url} was downloaded at ${humidity_dir}."
fi
# download u-wind
if [ -n "$uwind_url" ]; then
wget -P "$wind_dir" -N "$uwind_url"
echo "${uwind_url} was downloaded at ${wind_dir}."
fi
# download v-wind
if [ -n "$vwind_url" ]; then
wget -P "$wind_dir" -N "$vwind_url"
echo "${vwind_url} was downloaded at ${wind_dir}."
fi
# download Clear Sky Downward Longwave Flux
if [ -n "$csdlf_url" ]; then
wget -P "$solar_dir" -N "$csdlf_url"
echo "${csdlf_url} was downloaded at ${solar_dir}."
fi
# download Clear Sky Downward Solar Flux
if [ -n "$csdsf_url" ]; then
wget -P "$solar_dir" -N "$csdsf_url"
echo "${csdsf_url} was downloaded at ${solar_dir}."
fi
# download Downward Longwave Radiation Flux
if [ -n "$dlwrf_url" ]; then
wget -P "$solar_dir" -N "$dlwrf_url"
echo "${dlwrf_url} was downloaded at ${solar_dir}."
fi
# download Downward Solar Radiation Flux
if [ -n "$dswrf_url" ]; then
wget -P "$solar_dir" -N "$dswrf_url"
echo "${dswrf_url} was downloaded at ${solar_dir}."
fi
# download Net Longwave Radiation Flux
if [ -n "$nlwrs_url" ]; then
wget -P "$solar_dir" -N "$nlwrs_url"
echo "${nlwrs_url} was downloaded at ${solar_dir}."
fi
# download Net Shortwave Radiation Flux
if [ -n "$nswrs_url" ]; then
wget -P "$solar_dir" -N "$nswrs_url"
echo "${nswrs_url} was downloaded at ${solar_dir}."
fi
# download world data
if [ -n "$world_url" ]; then
wget -P "$world_dir" -N "$world_url"
echo "${world_url} was downloaded at ${world_dir}."
unzip -o "${world_dir}/simplemaps_worldcities_basicv1.6.zip" -d "${world_dir}"
fi
}
"$@" | true |
e25049ee5094b37c30f6759736a43b504522ec8b | Shell | janmejay/dev_utils | /grep_blk | UTF-8 | 879 | 3.28125 | 3 | [] | no_license | #!/bin/bash
exclude="n"
if [ "x$4" != "x" ]; then
exclude=$4
fi
awk -v b_start="${1}" -v b_end="${2}" -v b_match="${3}" -v b_exclude_matched="${exclude}" '
function print_frag() {
if ( ( (matched == 1) && (b_exclude_matched == "n") ) ||
( (matched == 0) && (b_exclude_matched == "y") ) ) {
for (i in a) print a[i];
}
delete a;
}
BEGIN {
track = 0;
matched = 0;
if ( (b_exclude_matched != "n") && (b_exclude_matched != "y") ) {
print "Exclude arg must either be y or n (found: " b_exclude_matched ")";
exit 1;
}
}
{
if ( track == 1 ) {
if ( $0 ~ b_end ) {
track = 0;
print_frag();
} else {
a[NR] = $0;
if ( $0 ~ b_match ) {
matched = 1;
}
}
}
if ( track == 0 ) {
if ( $0 ~ b_start ) {
track = 1;
matched = 0;
a[0]=$0;
}
}
}
END { print_frag(); }
'
| true |
79bbcb218d764ebfcc4a983ffe4ec71d80ce4c5b | Shell | lenik/stack | /deployment/node/sdu/sdu-semsutils/semsd.sh | UTF-8 | 611 | 3.328125 | 3 | [] | no_license | function _cd_sems() {
local cur="${COMP_WORDS[COMP_CWORD]}"
local semsroot
if ! semsroot=`vcscmd rootpath .`; then
return 1
fi
local pomdirs="$semsroot/.pomdirs"
local ver=0
if [ -f "$pomdirs" ]; then
ver=`stat -c%Y "$pomdirs"`
fi
local cver=`date +%s`
if [ $(( (cver - ver) / 3600 )) -gt 240 ]; then
find "$semsroot" -name pom.xml \
| while read f; do
dir="${f%/pom.xml}"
echo "${dir##*/}"
done >"$pomdirs"
fi
COMPREPLY=( $( grep "^$cur" "$pomdirs" ))
return 0
} && complete -F _cd_sems semsd
| true |
991fcec6643c909bb320d8256026772f0aca1ee8 | Shell | jiahaoding0725/ShellCommandExercises | /view/wk2/list_include_files.sh | UTF-8 | 302 | 3.578125 | 4 | [] | no_license | #!/bin/sh
for file in *.c
do
echo "$file includes:"
include_files=`cat $file | egrep "#include" | cut -d' ' -f2 | egrep -o "[^<>\"]+(\.h|\.c)"`
for inc_file in ${include_files[*]} # note: this is to print all items in array
do
echo -n " "
echo $inc_file
done
done | true |
0e51ca49b372e99e9276b74e455bcad31b91de6e | Shell | yuuki0xff/dotfiles | /files/bin/remote-make | UTF-8 | 3,228 | 4.15625 | 4 | [] | no_license | #!/bin/bash
# remote-make - execute "make" command on the remote server.
#
# USAGE:
# remote-make [OPTIONS] [--] [MAKE_ARGS]
#
# OPTIONS:
# -h, --host
# Specify remote host name and login user name (ex. user@example.com).
# -d, --dir
# Specify remote working directory.
# -s, --shell
# Regard MAKE_ARGS as shell commands.
# --force-clean
# Remove all untracked files with git.
# --help
# Show this message.
#
# ENVIRONMENT VALUES:
# REMOTE_MAKE_HOST: remote host name and login user name (ex. user@example.com)
# REMOTE_MAKE_DIR: remote working directory.
# REMOTE_MAKE_FORCE_CLEAN: remove all untracked files with git.
# REMOTE_MAKE_SKIP_CLEAN: skip working directory cleaning.
#
# EXAMPLE:
# export REMOTE_MAKE_HOST=user@build-server
# remote-make build
set -eu
gitroot() {
git rev-parse --show-toplevel
}
gitfiles() {
if git submodule status |grep -q .; then
git ls-files --recurse-submodules
else
git ls-files
fi
}
this=remote-make
host=${REMOTE_MAKE_HOST:-}
local_dir=$(gitroot)
if [ -n "${REMOTE_MAKE_DIR:-}" ]; then
remote_dir=$REMOTE_MAKE_DIR
else
project=$(git remote get-url origin |sed 's@.*/@@; s/\.git$//')
feature=$(basename "$(gitroot)")
remote_dir="remote-make/$project-$feature"
fi
if [ -n "${REMOTE_MAKE_FORCE_CLEAN:-}" ]; then
clean_cmd="git clean -fdx"
elif [ -n "${REMOTE_MAKE_SKIP_CLEAN:-}" ]; then
clean_cmd=":"
else
clean_cmd="git clean -fd"
fi
make_cmd='make -j$(nproc)'
# Consume options.
while (( $# >= 1 )); do
case "$1" in
--)
shift
break
;;
--host=*)
host=$(echo "$1" |tail -c +8)
shift
;;
--host|-h)
shift
host=$1
shift
;;
--dir=*)
remote_dir=$(echo "$1" |tail -c +7)
shift
;;
--dir|-d)
shift
remote_dir=$1
shift
;;
--shell|-s)
make_cmd=
shift
;;
--no-clean)
clean_cmd=true
shift
;;
--force-clean)
clean_cmd="git clean -fdx"
shift
;;
--skip-clean)
clean_cmd=":"
shift
;;
--help)
exec sed '1d; /^[^#]/Q; s/^# \?//' "$0"
;;
-*)
echo "ERROR: unknown option: $1" >&2
exit 1
;;
*)
break
;;
esac
done
if [ -z "$host" ]; then
echo "ERROR: no remote host specified. Please specify the --host argument." >&2
exit 1
fi
cd "$local_dir"
echo "$this: Syncing from '$local_dir' to '$remote_dir' ..." >&2
ssh "$host" mkdir -p "$remote_dir"
rsync -aP --files-from=<(gitfiles) "./" "$host:$remote_dir/"
if [ -d ./.git ]; then
# .git is directory.
rsync -aP --del "./.git" "$host:$remote_dir/"
else
# .git is file. This working tree related to other local git repository.
git_dir=$(sed -n '/^gitdir: / { s/gitdir: //; p }' .git)
git_common_dir=$(git -C "$git_dir" rev-parse --git-common-dir)
git_subdir_name=$(realpath --relative-to="$git_common_dir" "$git_dir")
git_commit_id=$(git rev-parse HEAD)
rsync -aP --del "$git_common_dir/" "$host:$remote_dir/.git/"
ssh "$host" "
set -eux &&
ln -snf \"\$(readlink -f $remote_dir.git/)\" \"$remote_dir/.git\" &&
cd \"$remote_dir\" &&
git reset $git_commit_id
"
fi
echo "$this: Making on '$host' ..." >&2
ssh "$host" "
. ~/.profile &&
set -eux &&
pwd &&
cd '$remote_dir' &&
$clean_cmd &&
renice 19 -p \$\$ &&
($make_cmd $*)
"
echo "$this: Done" >&2
| true |
23ab29ac3e9274293d4aad8375161c48c544fa99 | Shell | ioppermann/munin-contrib | /plugins/raspberry-pi/cpu_freq_1sec | UTF-8 | 1,287 | 3.625 | 4 | [] | no_license | #! /bin/sh
#
# This is a small supersampling plugin that does
# cpu sampling every 1 second.
#
# (c) 2013 - LGPL - Steve Schnepp <steve.schnepp@pwkf.org>
pluginfull="$0" # full name of plugin
plugin="${0##*/}" # name of plugin
pidfile="$MUNIN_PLUGSTATE/munin.$plugin.pid"
cache="$MUNIN_PLUGSTATE/munin.$plugin.value"
if [ ! -r "/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq" ]
then
echo "# Cannot read CPU Freq"
exit 1
fi
if [ "$1" = "acquire" ]
then
(
while sleep 1
do
echo $(
date +%s
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq
)
done | awk "{
print \"scaling_cur_freq.value \" \$1 \":\" (\$2 * 1000);
system(\"\");
}" >> $cache
) &
echo $! > $pidfile
exit 0
fi
if [ "$1" = "config" ]
then
cat <<EOF
graph_title CPU Freq 1sec stats
graph_category 1sec
graph_data_size custom 1d, 10s for 1w, 1m for 1t, 5m for 1y
graph_vlabel Hz
update_rate 1
scaling_cur_freq.label Current CPU Scaling Frequence
scaling_cur_freq.type GAUGE
EOF
exit 0
fi
# values
cat ${cache}
> ${cache}
exit 0
| true |
819608439f258ea8fe0da0c0e948f958c0a7c2d8 | Shell | malkomich/unix-exercices | /pr3/2/floyd.sh | ISO-8859-10 | 311 | 3.453125 | 3 | [] | no_license | # dibuja 1 triangulo con el comando seq
# el tamao del triangulo se pasa como argumento
if [ $# -ne 1 ]
then
echo "Error ... debe introducir 1 argumento"
else
# Validar si es 1 numero positivo
for i in `seq $1`
do
for j in `seq $1`
do
echo -n "* "
done
echo " "
done
fi
exit 0
| true |
7671d0bc72877110c08dd87f45a47103fa8eed6c | Shell | kellmant/console | /MakeSystem | UTF-8 | 384 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
# nothing fancy
# just run the commands to get it launched
#
while IFS= read -r char
do
echo "ENV $char" >> $PWD/Dockerfile
done < "/efs/services/environment"
echo "ENTRYPOINT [\"node\"]" >> $PWD/Dockerfile
echo "CMD [\"app.js\", \"-p\", \"3000\"]" >> $PWD/Dockerfile
docker build -t registry.local:5000/console:latest .
docker push registry.local:5000/console:latest
| true |
fa01be7a22ab2e065ff0715fa97466a86c1fd277 | Shell | Oleg1157/git-lfs | /test/test-custom-transfers.sh | UTF-8 | 6,232 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
. "test/testlib.sh"
begin_test "custom-transfer-wrong-path"
(
set -e
# this repo name is the indicator to the server to support custom transfer
reponame="test-custom-transfer-fail"
setup_remote_repo "$reponame"
clone_repo "$reponame" $reponame
# deliberately incorrect path
git config lfs.customtransfer.testcustom.path path-to-nothing
git lfs track "*.dat" 2>&1 | tee track.log
grep "Tracking \"\*.dat\"" track.log
contents="jksgdfljkgsdlkjafg lsjdgf alkjgsd lkfjag sldjkgf alkjsgdflkjagsd kljfg asdjgf kalsd"
contents_oid=$(calc_oid "$contents")
printf "$contents" > a.dat
git add a.dat
git add .gitattributes
git commit -m "add a.dat" 2>&1 | tee commit.log
GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushcustom.log
# use PIPESTATUS otherwise we get exit code from tee
res=${PIPESTATUS[0]}
grep "xfer: adapter \"testcustom\" Begin()" pushcustom.log
grep "Failed to start custom transfer command" pushcustom.log
if [ "$res" = "0" ]; then
echo "Push should have failed because of an incorrect custom transfer path."
exit 1
fi
)
end_test
begin_test "custom-transfer-upload-download"
(
set -e
# this repo name is the indicator to the server to support custom transfer
reponame="test-custom-transfer-1"
setup_remote_repo "$reponame"
clone_repo "$reponame" $reponame
# set up custom transfer adapter
git config lfs.customtransfer.testcustom.path lfstest-customadapter
git lfs track "*.dat" 2>&1 | tee track.log
grep "Tracking \"\*.dat\"" track.log
git add .gitattributes
git commit -m "Tracking"
# set up a decent amount of data so that there's work for multiple concurrent adapters
echo "[
{
\"CommitDate\":\"$(get_date -10d)\",
\"Files\":[
{\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"},
{\"Filename\":\"file1.dat\",\"Size\":1024},
{\"Filename\":\"file2.dat\",\"Size\":750}]
},
{
\"CommitDate\":\"$(get_date -7d)\",
\"Files\":[
{\"Filename\":\"file1.dat\",\"Size\":1050},
{\"Filename\":\"file3.dat\",\"Size\":660},
{\"Filename\":\"file4.dat\",\"Size\":230}]
},
{
\"CommitDate\":\"$(get_date -5d)\",
\"Files\":[
{\"Filename\":\"file5.dat\",\"Size\":1200},
{\"Filename\":\"file6.dat\",\"Size\":300}]
},
{
\"CommitDate\":\"$(get_date -2d)\",
\"Files\":[
{\"Filename\":\"file3.dat\",\"Size\":120},
{\"Filename\":\"file5.dat\",\"Size\":450},
{\"Filename\":\"file7.dat\",\"Size\":520},
{\"Filename\":\"file8.dat\",\"Size\":2048}]
}
]" | lfstest-testutils addcommits
GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushcustom.log
# use PIPESTATUS otherwise we get exit code from tee
[ ${PIPESTATUS[0]} = "0" ]
grep "xfer: started custom adapter process" pushcustom.log
grep "xfer\[lfstest-customadapter\]:" pushcustom.log
grep "12 of 12 files" pushcustom.log
rm -rf .git/lfs/objects
GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log
[ ${PIPESTATUS[0]} = "0" ]
grep "xfer: started custom adapter process" fetchcustom.log
grep "xfer\[lfstest-customadapter\]:" fetchcustom.log
grep "12 of 12 files" fetchcustom.log
grep "Terminating test custom adapter gracefully" fetchcustom.log
objectlist=`find .git/lfs/objects -type f`
[ "$(echo "$objectlist" | wc -l)" -eq 12 ]
)
end_test
begin_test "custom-transfer-standalone"
(
set -e
# setup a git repo to be used as a local repo, not remote
reponame="test-custom-transfer-standalone"
setup_remote_repo "$reponame"
# clone directly, not through lfstest-gitserver
clone_repo_url "$REMOTEDIR/$reponame.git" $reponame
# set up custom transfer adapter to use a specific transfer agent
git config lfs.customtransfer.testcustom.path lfstest-standalonecustomadapter
git config lfs.customtransfer.testcustom.concurrent false
git config lfs.standalonetransferagent testcustom
export TEST_STANDALONE_BACKUP_PATH="$(pwd)/test-custom-transfer-standalone-backup"
mkdir -p $TEST_STANDALONE_BACKUP_PATH
rm -rf $TEST_STANDALONE_BACKUP_PATH/*
git lfs track "*.dat" 2>&1 | tee track.log
grep "Tracking \"\*.dat\"" track.log
git add .gitattributes
git commit -m "Tracking"
# set up a decent amount of data so that there's work for multiple concurrent adapters
echo "[
{
\"CommitDate\":\"$(get_date -10d)\",
\"Files\":[
{\"Filename\":\"verify.dat\",\"Size\":18,\"Data\":\"send-verify-action\"},
{\"Filename\":\"file1.dat\",\"Size\":1024},
{\"Filename\":\"file2.dat\",\"Size\":750}]
},
{
\"CommitDate\":\"$(get_date -7d)\",
\"Files\":[
{\"Filename\":\"file1.dat\",\"Size\":1050},
{\"Filename\":\"file3.dat\",\"Size\":660},
{\"Filename\":\"file4.dat\",\"Size\":230}]
},
{
\"CommitDate\":\"$(get_date -5d)\",
\"Files\":[
{\"Filename\":\"file5.dat\",\"Size\":1200},
{\"Filename\":\"file6.dat\",\"Size\":300}]
},
{
\"CommitDate\":\"$(get_date -2d)\",
\"Files\":[
{\"Filename\":\"file3.dat\",\"Size\":120},
{\"Filename\":\"file5.dat\",\"Size\":450},
{\"Filename\":\"file7.dat\",\"Size\":520},
{\"Filename\":\"file8.dat\",\"Size\":2048}]
}
]" | lfstest-testutils addcommits
GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git push origin master 2>&1 | tee pushcustom.log
# use PIPESTATUS otherwise we get exit code from tee
[ ${PIPESTATUS[0]} = "0" ]
# Make sure the lock verification is not attempted.
grep "locks/verify$" pushcustom.log && false
grep "xfer: started custom adapter process" pushcustom.log
grep "xfer\[lfstest-standalonecustomadapter\]:" pushcustom.log
grep "12 of 12 files" pushcustom.log
rm -rf .git/lfs/objects
GIT_TRACE=1 GIT_TRANSFER_TRACE=1 git lfs fetch --all 2>&1 | tee fetchcustom.log
[ ${PIPESTATUS[0]} = "0" ]
grep "xfer: started custom adapter process" fetchcustom.log
grep "xfer\[lfstest-standalonecustomadapter\]:" fetchcustom.log
grep "12 of 12 files" fetchcustom.log
grep "Terminating test custom adapter gracefully" fetchcustom.log
objectlist=`find .git/lfs/objects -type f`
[ "$(echo "$objectlist" | wc -l)" -eq 12 ]
)
end_test
| true |
522435c868ba1854b61fae04a4af43ad4bf89f0d | Shell | cnygardtw/metrik | /backend/scripts/tcp-port-check.sh | UTF-8 | 751 | 4.3125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
## This script is for checking if the specific TCP port is available on HOST.
## arg1: Host / IP
## arg2: Port number you would like to check
## arg3: Tolerant time (second)
HOSTNAME="$1"
PORT="$2"
TOLERANCE="$3"
COUNTER=0
PORT_STATUS=0
while [ "$PORT_STATUS" -ne 0 ] && [ "$COUNTER" -lt "$TOLERANCE" ]; do
nc -zv "$HOSTNAME" "$PORT" 2>&1 >/dev/null
if [ $? -eq 0 ]; then
PORT_STATUS=1
break
fi
sleep 2
((COUNTER+=2))
echo "[$HOSTNAME:$PORT] is not accessible... Waiting for retry ($COUNTER seconds so far)"
done
if [ "$PORT_STATUS" -eq 1 ]; then
echo "[$HOSTNAME:$PORT] is up!"
exit 0
elif [ "$PORT_STATUS" -ne 1 ] || [ "$COUNTER" -ge "$TOLERANCE" ]; then
echo "[$HOSTNAME:$PORT] is down."
exit 1
fi | true |
d4c816bc96fdf7d3b180b84222789b0604e5e6ae | Shell | openlibraryenvironment/mod-rs | /okapi-scripts/load_test_data.sh | UTF-8 | 3,782 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/sh
AUTH_TOKEN=`./okapi-login`
echo Listing current requests
curl --header "X-Okapi-Tenant: diku" -H "X-Okapi-Token: ${AUTH_TOKEN}" -H "Content-Type: application/json" -X GET http://localhost:9130/rs/patronrequests
echo Get the directory record for our responding system - we want to send a request to DIKUA
RESHARE_DIKUA=`curl -sSL --header "X-Okapi-Tenant: diku" -H "X-Okapi-Token: $AUTH_TOKEN" -H "Content-Type: application/json" -X GET "http://localhost:9130/directory/entry?filters=symbols.symbol%3dDIKUA&filters=symbols.authority.value=RESHARE&stats=true"`
DIKUA_ID=`echo $RESHARE_DIKUA | jq -r ".results[0].id" | tr -d '\r'`
echo Submitting requests to responder with symbol RESHARE:DIKUA - has directory ID $RESHARE_DIKUA
PATRON_REQ_1=`curl --header "X-Okapi-Tenant: diku" -H "X-Okapi-Token: ${AUTH_TOKEN}" -H "Content-Type: application/json" -X POST http://localhost:9130/rs/patronrequests -d ' {
title:"Brain of the firm",
author:"Beer, Stafford",
patronReference:"Patron001",
state:"Idle",
isRequester = true,
notARealProperty:"Test that this is ignored in accordance with Postels law",
serviceType:"Loan",
pendingAction:"approve",
tags:[
"Testdata", "TestRun1", "MonographTest"
],
customProperties:{
"patronWalletHash": ["298348743738748728524854289743765"],
},
rota:[
{ directoryId:"'"$DIKUA_ID"'", rotaPosition:"0" }
]
}
'`
PATRON_REQ_1_ID=`echo $PATRON_REQ_1 | jq -r ".id" | tr -d '\r'`
echo Created request 1: $PATRON_REQ_1
echo Created request 1: $PATRON_REQ_1_ID
PATRON_REQ_2=`curl --header "X-Okapi-Tenant: diku" -H "X-Okapi-Token: ${AUTH_TOKEN}" -H "Content-Type: application/json" -X POST http://localhost:9130/rs/patronrequests -d ' {
title:"The Heart of Enterprise",
author:"Beer, Stafford",
patronReference:"Patron001",
state:"Idle",
isRequester = true,
serviceType:"Loan",
tags:[
"Testdata", "TestRun1", "MonographTest"
],
customProperties:{
"patronWalletHash": ["298348743738748728524854289743765"],
},
rota:[
{ directoryId:"'"$DIKUA_ID"'", rotaPosition:"0" }
]
}
' | jq -r ".id" | tr -d '\r'`
echo Created request 2: $PATRON_REQ_2
PATRON_REQ_3=`curl --header "X-Okapi-Tenant: diku" -H "X-Okapi-Token: ${AUTH_TOKEN}" -H "Content-Type: application/json" -X POST http://localhost:9130/rs/patronrequests -d ' {
title:"Biological neural networks in invertebrate neuroethology and robotics",
isbn:"978-0120847280",
patronReference:"Patron004",
publisher:"Boston : Academic Press",
state:"Idle",
isRequester = true,
serviceType:"Loan",
tags:[
"Testdata", "TestRun1", "MonographTest"
],
customProperties:{
"patronWalletHash": ["298348743738748728524854289743765"],
},
rota:[
{
directoryId:"'"$DIKUA_ID"'",
rotaPosition:"0" ,
availability:"Availability as a string from the shared index",
normalisedAvailability:"Unknown",
protocolStatus:0,
shelfmark:"A shelfmark",
systemIdentifier:"The remote identifier for the ITEM",
state:"Idle"
}
]
}
' | jq -r ".id" | tr -d '\r'`
echo Created request 3: $PATRON_REQ_3
echo Attempt to read back request 1
curl --header "X-Okapi-Tenant: diku" -H "X-Okapi-Token: ${AUTH_TOKEN}" -H "Content-Type: application/json" -X GET http://localhost:9130/rs/patronrequests/$PATRON_REQ_1_ID
echo Find out what valid actions we can take for patron request 1
curl --header "X-Okapi-Tenant: diku" -H "X-Okapi-Token: ${AUTH_TOKEN}" -H "Content-Type: application/json" -X GET http://localhost:9130/rs/patronrequests/$PATRON_REQ_1_ID/validActions
echo List requests after creation
curl --header "X-Okapi-Tenant: diku" -H "X-Okapi-Token: ${AUTH_TOKEN}" -H "Content-Type: application/json" -X GET http://localhost:9130/rs/patronrequests
echo hit return to exit
read a
| true |
f0c5f1efed0bb69daa4a9529137d889e3fad5215 | Shell | jiaoah97/MQX411 | /build/twrvf65gs10_m4/make/build_gcc_arm.sh | UTF-8 | 1,239 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bash
status=0
$(cd ../../../mqx/build/make/bsp_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
$(cd ../../../ffs/build/make/ffs_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
$(cd ../../../mcc/build/make/mcc_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
$(cd ../../../mfs/build/make/mfs_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
$(cd ../../../mqx/build/make/psp_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
$(cd ../../../rtcs/build/make/rtcs_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
$(cd ../../../shell/build/make/shell_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
$(cd ../../../usb/device/build/make/usbd_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
$(cd ../../../usb/host/build/make/usbh_twrvf65gs10_m4 && ./build_gcc_arm.sh nopause)
if [ "$?" != "0" ]; then
status=-1
fi
if [ "${1}" != "nopause" ]; then
read -p "Press any key to continue... " -n1 -s
fi
exit $status
| true |
720385beb90d2e037da734bb3ddfe10d76ea916b | Shell | prositen/advent-of-code | /bash/2018/dec01.bash | UTF-8 | 1,675 | 3.765625 | 4 | [] | no_license | #!/usr/bin/env bash
source ../common.bash
function frequency_change() {
local sum=0
for i1 in "${!dec01_input[@]}"; do
local term=${dec01_input[$i1]}
((sum = sum + term))
done
echo ${sum}
}
function first_repeating_frequency() {
declare -A freqs
local sum=0
freqs[${sum}]=1
for (( ; ; ${#freqs[@]} > 10000000))
do
for i1 in "${!dec01_input[@]}"; do
local term=${dec01_input[$i1]}
((sum = sum + term))
if [ -n "${freqs[${sum}]}" ];
then
echo ${sum}
return
fi
freqs[${sum}]=1
done
done
echo "Error"
}
function dec01_test() {
dec01_input=("+1" "-2" "+3" "+1")
(( $(frequency_change) == 3 )) || return 1
dec01_input=("+1" "+1" "+1")
(( $(frequency_change) == 3 )) || return 1
dec01_input=("+1" "+1" "-2")
(( $(frequency_change) == 0 )) || return 1
dec01_input=("-1" "-2" "-3")
(( $(frequency_change) == -6 )) || return 1
dec01_input=("+1" "-1")
(( $(first_repeating_frequency) == 0 )) || return 1
dec01_input=("+3" "+3" "+4" "-2" "-4")
(( $(first_repeating_frequency) == 10 )) || return 1
dec01_input=("-6" "+3" "+8" "+5" "-6")
(( $(first_repeating_frequency) == 5)) || return 1
dec01_input=("+7" "+7" "-2" "-7" "-4")
(( $(first_repeating_frequency) == 14 )) || return 1
}
function dec01_main() {
declare -a dec01_input
read_file_to_arr 2018 1 dec01_input
echo "Part 1: $(frequency_change)"
echo "Part 2: $(first_repeating_frequency)"
}
if [ "x$1" == "xtest" ]; then
dec01_test
else
dec01_main
fi
| true |
880a3b384dd3441def55fbd9a061c90a76e46baf | Shell | condekind/page-angha | /deploy-angha/deploy.sh | UTF-8 | 1,923 | 3.765625 | 4 | [] | no_license | #!/bin/bash
#=============================================================================#
# trap for exit signals to avoid messing directories
sigbye () {
exit_code=$?
STTY=$(stty -g)
stty intr undef
cd $basedir
stty ${STTY}
exit $exit_code
}
STTY=$(stty -g)
stty intr undef # Disable interrupts
{
basedir="$(pwd)"
trap sigbye INT; trap sigbye QUIT; trap sigbye TERM; trap sigbye EXIT;
}
stty ${STTY} # Reenable interrupts
## Compiling atomic file swap (not portable, see README.md for details)
#[[ ! -f swap ]] || { make swap && exit $?; }
[[ "${basedir##*/}" =~ "deploy-angha" ]] && cd ..
prod=${prod:-0}
if [[ ! $prod -eq 0 ]]; then
page_dir=/home/www-data/public/cuda.dcc.ufmg.br/public/angha
[[ -d files ]] || exit $?
[[ -d $page_dir ]] || exit $?
[[ $HOSTNAME == "cuda" ]] || exit $?
npm install || exit $?
ng build --prod --base-href="/angha/" || exit $?
rm -rf $page_dir.bkp.d || exit $?
mv dist/page-angha $page_dir.bkp.d || exit $?
cp -r files $page_dir.bkp.d || exit $?
cp .htaccess $page_dir.bkp.d || exit $?
## If running a modern kernel: i) comment the moves below and
## ii) uncomment the "make swap" above and the swap call below.
## Swap the angha directory served by apache with the backup directory
#./deploy-angha/swap $page_dir{,.bkp.d} || exit $?
# Gambs to please the old kernel
STTY=$(stty -g)
stty intr undef # Disable interrupts
mv $page_dir{,.tmp}
mv $page_dir{.bkp.d,}
mv $page_dir{.tmp,.bkp.d}
stty ${STTY} # Reenable interrups
else
echo "Generating developer build and starting development server"
echo "WARNING: do not run this in production!"
ng build || exit $?
npx lite-server --baseDir="dist/page-angha" || exit $?
fi
| true |
5d9328cfe0c583a5bbf2885041cc98635e9cea69 | Shell | R0flcopt3r/dots | /scripts/mic.sh | UTF-8 | 188 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env sh
mic=$(amixer get Capture | grep "Front Left: Capture" | awk '{print $6}')
red="#EF2F27"
if [ "$mic" = "[on]" ]
then
echo ' '
else
echo '%{u'"$red"'} %{u-}'
fi
| true |
b23a24e8f92495a7ddd79f97a08afb41bde6980b | Shell | gsanchietti/ns8-podman | /rootless/traefik.sh | UTF-8 | 810 | 2.671875 | 3 | [] | no_license | #!/bin/bash
HOST=$(hostname -f)
mkdir -p $HOME/traefik/config/acme
cat <<EOF > $HOME/traefik/config/traefik.yaml
defaultEntryPoints:
- http
- https
file: {}
log:
level: DEBUG
accessLog: {}
entryPoints:
http:
address: ":80"
https:
address: ":443"
providers:
redis:
endpoints:
- "127.0.0.1:6379"
tls:
certResolver: letsencrypt
options: {}
certificatesResolvers:
letsencrypt:
acme:
email: root@$HOST
storage: /etc/traefik/acme/acme.json
httpChallenge:
entryPoint: http
tlsChallenge: false
EOF
podman stop traefik
podman rm traefik
podman run --network=host --name traefik -d \
-v "$HOME/traefik/config/acme:/etc/traefik/acme:Z" \
-v "$HOME/traefik/config/traefik.yaml:/etc/traefik/traefik.yaml:Z" \
docker.io/traefik:v2.4
| true |
7d81293d77fa85b49604a7a73c8cd03a4af7becf | Shell | rkat0/rcp | /test.sh | UTF-8 | 470 | 3.609375 | 4 | [] | no_license | #!/bin/bash
try() {
expected="$1"
input="$2"
echo -n "$input" | ./rcp
gcc -o tmp tmp.s
./tmp
actual="$?"
if [ "$actual" = "$expected" ]; then
echo "$input => $actual"
else
echo "$expected expected, but got $actual"
exit 1
fi
}
try 0 "0;"
try 100 "100;"
try 3 "1+2;"
try 4 "5-1;"
try 10 "100-52-83+45;"
try 5 "10 - 5;"
try 10 " 100 - 52 - 83 + 45 ;"
try 2 "5 / 2;"
try 21 "10 * 3 - (5 + 8 / 2);"
echo OK
| true |
4655cc63a78d995bf226ad025c51d52bc9e8c8a5 | Shell | siguangli/Hippy | /layout/gtest/build_run_gtest_for_hippy_layout.sh | UTF-8 | 521 | 3.546875 | 4 | [
"Apache-2.0",
"MIT"
] | permissive | #! /bin/bash
CMAKE=`which cmake`
MAKE=`which make`
BASH_SOURCE_DIR=$(cd `dirname "${BASH_SOURCE[0]}"` && pwd)
BUILD_DIR="${BASH_SOURCE_DIR}"/../out
rm -rf "${BUILD_DIR}"/gtest
mkdir -p "${BUILD_DIR}"/gtest
cd "${BUILD_DIR}"/gtest
#cmake generate make file
"${CMAKE}" ../../gtest/
echo "Start build in directory: `pwd`"
#make gtest_hippy_layout executable
${MAKE}
#run gtest_hippy_layout, start gtest !!!
GTEST_RUN_PATH="${BUILD_DIR}"/gtest/gtest_hippy_layout
if [ -x "${GTEST_RUN_PATH}" ];then
${GTEST_RUN_PATH}
fi
| true |
ad1399dcc586003b9becf75c3ede810a23ee9bba | Shell | nshandra/Practica-ISI | /pruebasIDM/Embotelladora/cr-emb.sh | UTF-8 | 323 | 2.859375 | 3 | [] | no_license | #!/bin/bash
hamcrest="./../hamcrest-core-1.3.jar"
junit="./../junit-4.12.jar"
if [[ -f "$hamcrest" ]] && [[ -f "$junit" ]]; then
javac -cp .:"$hamcrest":"$junit" EmbotelladoraTest.java
javac -cp .:"$hamcrest":"$junit" AllTests.java
java -cp .:"$hamcrest":"$junit" AllTests
else
echo "Error. JUnit libs not in ./../"
fi | true |
6cec53304c72416c11ba47c2a8b4d17d3a4d2aef | Shell | andreisusai/the_hyrule_castle | /characters/basic_characteristics.sh | UTF-8 | 8,323 | 3.625 | 4 | [] | no_license | #!/bin/bash
# Display and chose characters
display_characters(){
first_line=0
echo "
=========== CHARACTERS ==========
"
while IFS="," read -r id name hp mp str int def res spd luck race class rarity ; do
if [[ $first_line -ne 0 ]]; then
echo "
=========== $name =============
"
echo "Name:" "$name"
echo "Hp:" $hp
echo "Mp:" $mp
echo "Strenght:" $str
echo "Int:" $int
echo "Def:" $def
echo "Res:" $res
echo "Speed:" $spd
echo "Luck:" $luck
# Get the character's race
first_line=0
while IFS="," read -r id_race name strength weakness rarity ; do
if [[ $first_line -ne 0 ]]; then
if [[ $race -eq $id_race ]]; then
echo "Race:" "$name"
echo "Strenght:" $str
echo "Weakness:" $weakness
echo "Rarity:" $rarity
fi
else
first_line=1
fi
done < src/races.csv
# Get the character's classe
first_line=0
while IFS="," read -r id_class name strength weakness attack_type alignment rarity ; do
if [[ $first_line -ne 0 ]]; then
if [[ $class -eq $id_class ]]; then
echo "Class:" "$name"
echo "Strenght:" $strength
echo "Weakness:" $weakness
echo "Attack:" $attack_type
echo "Alignement:" $alignment
echo "Rarity:" $rarity
fi
else
first_line=1
fi
done < src/classes.csv
echo "Rarity:" $rarity
else
first_line=1
fi
done < src/players.csv
}
# Get the character's choice of the player
display_character(){
first_line=0
matching_name_character=0
shopt -s nocasematch
if [[ $matching_name_character -eq 0 ]]; then
echo "
====== Please, insert the name character of your choice =======
"
fi
read choice
while IFS="," read -r id nameCharacter hp mp str int def res spd luck race class rarity ; do
if [[ $first_line -ne 0 ]]; then
if [[ $(tr "[:upper:]" "[:lower:]" <<<"$choice") == $(tr "[:upper:]" "[:lower:]" <<<"$nameCharacter") ]]; then
matching_name_character=0
echo "
=========== $nameCharacter =============
"
echo "Name:" "$nameCharacter"
echo "Hp:" $hp
echo "Mp:" $mp
echo "Strenght:" $str
echo "Int:" $int
echo "Def:" $def
echo "Res:" $res
echo "Speed:" $spd
echo "Luck:" $luck
# Get the character's race
first_line=0
while IFS="," read -r id_race nameRace strengthRace weaknessRace rarityRace ; do
if [[ $first_line -ne 0 ]]; then
if [[ $(tr "[:upper:]" "[:lower:]" <<<"$race") = $(tr "[:upper:]" "[:lower:]" <<<"$id_race") ]]; then
echo "Race:" "$nameRace"
echo "Strenght:" $strengthRace
echo "Weakness:" $weaknessRace
echo "Rarity:" $rarityRace
race="$nameRace"
strenghtR=$strengthRace
weaknessR=$weaknessRace
rarityR=$rarityRace
fi
else
first_line=1
fi
done < src/races.csv
# Get the character's classe
first_line=0
while IFS="," read -r id_class nameClass strength weakness attack_type alignment rarity ; do
if [[ $first_line -ne 0 ]]; then
if [[ $class -eq $id_class ]]; then
echo "Class:" "$nameClass"
echo "Strenght:" $strength
echo "Weakness:" $weakness
echo "Attack:" $attack_type
echo "Alignement:" $alignment
echo "Rarity:" $rarity
class="$nameClass"
strengthC=$strength
weaknessC=$weakness
attack_typeC=$attack_type
alignmentC=$alignment
rarityC=$rarity
fi
else
first_line=1
fi
done < src/classes.csv
echo "Rarity:" $rarity
break
fi
else
matching_name_character=1
first_line=1
fi
done < src/players.csv
while [[ $matching_name_character -eq 1 ]]; do
display_character
done
}
# Get enemies
get_enemy(){
echo "
======= Good Choice -> $nameCharacter ======="
echo "
======== You will fight =========
"
first_line=0
randomId=$((1 + $RANDOM % 12))
while IFS="," read -r id nameEnemy hpE mpE strE intE defE resE spdE luckE race class rarityE ; do
if [[ $first_line -ne 0 ]]; then
if [[ $randomId -eq $id ]]; then
echo "
=========== $nameEnemy =============
"
echo "Name:" "$nameEnemy"
echo "Hp:" $hpE
echo "Mp:" $mpE
echo "Strenght:" $strE
echo "Int:" $intE
echo "Def:" $defE
echo "Res:" $resE
echo "Speed:" $spdE
echo "Luck:" $luckE
# Get the enemies race
first_line=0
while IFS="," read -r id_race name strength weakness rarity ; do
if [[ $first_line -ne 0 ]]; then
if [[ $race -eq $id_race ]]; then
echo "Race:" "$name"
echo "Strenght:" $strength
echo "Weakness:" $weakness
echo "Rarity:" $rarity
raceE="$name"
strenghtER=$strength
weaknessER=$weakness
rarityER=$rarity
fi
else
first_line=1
fi
done < src/races.csv
# Get the enemies classe
first_line=0
while IFS="," read -r id_class name strength weakness attack_type alignment rarity ; do
if [[ $first_line -ne 0 ]]; then
if [[ $class -eq $id_class ]]; then
echo "Class:" "$name"
echo "Strenght:" $strength
echo "Weakness:" $weakness
echo "Attack:" $attack_type
echo "Alignement:" $alignment
echo "Rarity:" $rarity
classE="$name"
strengthEC=$strength
weaknessEC=$weakness
attack_typeEC=$attack_type
alignmentEC=$alignment
rarityEC=$rarity
fi
else
first_line=1
fi
done < src/classes.csv
echo "Rarity:" $rarityE
break
fi
else
first_line=1
fi
done < src/enemies.csv
}
export -f display_characters display_character get_enemy
| true |
b061c51a9858646cdedf2a69510042b27957ec87 | Shell | kenmlee/refarch-cloudnative-kubernetes | /delete_bluecompute.sh | UTF-8 | 6,991 | 3.765625 | 4 | [] | no_license | #!/bin/bash
# Terminal Colors
red=$'\e[1;31m'
grn=$'\e[1;32m'
yel=$'\e[1;33m'
blu=$'\e[1;34m'
mag=$'\e[1;35m'
cyn=$'\e[1;36m'
end=$'\e[0m'
coffee=$'\xE2\x98\x95'
coffee3="${coffee} ${coffee} ${coffee}"
CLUSTER_NAME=$1
BX_SPACE=$2
BX_API_KEY=$3
BX_REGION=$4
BX_API_ENDPOINT=""
if [[ -z "${BX_REGION// }" ]]; then
BX_API_ENDPOINT="api.ng.bluemix.net"
echo "Using DEFAULT endpoint ${grn}${BX_API_ENDPOINT}${end}."
else
BX_API_ENDPOINT="api.${BX_REGION}.bluemix.net"
echo "Using endpoint ${grn}${BX_API_ENDPOINT}${end}."
fi
function check_tiller {
kubectl --namespace=kube-system get pods | grep tiller | grep Running | grep 1/1
}
function print_usage {
printf "\n\n${yel}Usage:${end}\n"
printf "\t${cyn}./delete_bluecompute.sh <cluster-name> <bluemix-space-name> <bluemix-api-key>${end}\n\n"
}
function bluemix_login {
# Bluemix Login
if [[ -z "${CLUSTER_NAME// }" ]]; then
print_usage
echo "${red}Please provide Cluster Name. Exiting..${end}"
exit 1
elif [[ -z "${BX_SPACE// }" ]]; then
print_usage
echo "${red}Please provide Bluemix Space. Exiting..${end}"
exit 1
elif [[ -z "${BX_API_KEY// }" ]]; then
print_usage
echo "${red}Please provide Bluemix API Key. Exiting..${end}"
exit 1
fi
printf "${grn}Login into Bluemix${end}\n"
export BLUEMIX_API_KEY=${BX_API_KEY}
bx login -a ${BX_API_ENDPOINT} -s ${BX_SPACE}
status=$?
if [ $status -ne 0 ]; then
printf "\n\n${red}Bluemix Login Error... Exiting.${end}\n"
exit 1
fi
}
function get_cluster_name {
printf "\n\n${grn}Login into Container Service${end}\n\n"
bx cs init
if [[ -z "${CLUSTER_NAME// }" ]]; then
echo "${yel}No cluster name provided. Will try to get an existing cluster...${end}"
CLUSTER_NAME=$(bx cs clusters | tail -1 | awk '{print $1}')
if [[ "$CLUSTER_NAME" == "Name" ]]; then
echo "No Kubernetes Clusters exist in your account. Please provision one and then run this script again."
exit 1
fi
fi
}
function set_cluster_context {
# Getting Cluster Configuration
unset KUBECONFIG
printf "\n${grn}Setting terminal context to \"${CLUSTER_NAME}\"...${end}\n"
eval "$(bx cs cluster-config ${CLUSTER_NAME} | tail -1)"
echo "KUBECONFIG is set to = $KUBECONFIG"
if [[ -z "${KUBECONFIG// }" ]]; then
echo "${red}KUBECONFIG was not properly set. Exiting.${end}"
exit 1
fi
}
function initialize_helm {
printf "\n\n${grn}Initializing Helm.${end}\n"
helm init --upgrade
echo "Waiting for Tiller (Helm's server component) to be ready..."
TILLER_DEPLOYED=$(check_tiller)
while [[ "${TILLER_DEPLOYED}" == "" ]]; do
sleep 1
TILLER_DEPLOYED=$(check_tiller)
done
}
function delete_inventory {
local release=$(helm list | grep inventory | awk '{print $1}' | head -1)
# Creating for API KEY
if [[ -z "${release// }" ]]; then
printf "\n\n${grn}inventory was already deleted!${end}\n"
else
printf "\n\n${grn}Deleting inventory chart. This will take a few minutes...${end} ${coffee3}\n\n"
time helm delete $release --purge --debug --timeout 600
local status=$?
if [ $status -ne 0 ]; then
printf "\n\n${red}Error deleting inventory... Exiting.${end}\n"
exit 1
fi
printf "\n\n${grn}inventory was successfully deleted!${end}\n"
printf "\n\n${grn}Cleaning up...${end}\n"
kubectl delete pods,jobs -l chart=inventory-0.1.1
fi
}
function delete_catalog {
local release=$(helm list | grep catalog | awk '{print $1}' | head -1)
# Creating for API KEY
if [[ -z "${release// }" ]]; then
printf "\n\n${grn}catalog was already deleted!${end}\n"
else
printf "\n\n${grn}Deleting catalog chart. This will take a few minutes...${end} ${coffee3}\n\n"
time helm delete $release --purge --debug --timeout 600
local status=$?
if [ $status -ne 0 ]; then
printf "\n\n${red}Error deleting catalog... Exiting.${end}\n"
exit 1
fi
printf "\n\n${grn}catalog was successfully deleted!${end}\n"
printf "\n\n${grn}Cleaning up...${end}\n"
kubectl delete pods,jobs -l chart=catalog-0.1.1
fi
}
function delete_orders {
local release=$(helm list | grep orders | awk '{print $1}' | head -1)
# Creating for API KEY
if [[ -z "${release// }" ]]; then
printf "\n\n${grn}orders was already deleted!${end}\n"
else
printf "\n\n${grn}Deleting orders chart. This will take a few minutes...${end} ${coffee3}\n\n"
time helm delete $release --purge --debug --timeout 600
local status=$?
if [ $status -ne 0 ]; then
printf "\n\n${red}Error deleting orders... Exiting.${end}\n"
exit 1
fi
printf "\n\n${grn}orders was successfully deleted!${end}\n"
printf "\n\n${grn}Cleaning up...${end}\n"
kubectl delete pods,jobs -l chart=orders-0.1.0
fi
}
function delete_customer {
local release=$(helm list | grep customer | awk '{print $1}' | head -1)
# Creating for API KEY
if [[ -z "${release// }" ]]; then
printf "\n\n${grn}customer was already deleted!${end}\n"
else
printf "\n\n${grn}Deleting customer chart. This will take a few minutes...${end} ${coffee3}\n\n"
time helm delete $release --purge --debug --timeout 600
local status=$?
if [ $status -ne 0 ]; then
printf "\n\n${red}Error deleting customer... Exiting.${end}\n"
exit 1
fi
printf "\n\n${grn}customer was successfully deleted!${end}\n"
printf "\n\n${grn}Cleaning up...${end}\n"
kubectl delete pods,jobs -l chart=customer-0.1.0
fi
}
function delete_auth {
local release=$(helm list | grep auth | awk '{print $1}' | head -1)
# Creating for API KEY
if [[ -z "${release// }" ]]; then
printf "\n\n${grn}auth was already deleted!${end}\n"
else
printf "\n\n${grn}Deleting auth chart. This will take a few minutes...${end} ${coffee3}\n\n"
time helm delete $release --purge --debug --timeout 600
local status=$?
if [ $status -ne 0 ]; then
printf "\n\n${red}Error deleting auth... Exiting.${end}\n"
exit 1
fi
printf "\n\n${grn}auth was successfully deleted!${end}\n"
printf "\n\n${grn}Cleaning up...${end}\n"
kubectl delete pods,jobs -l chart=auth-0.1.0
fi
}
function delete_web {
local release=$(helm list | grep web | awk '{print $1}' | head -1)
# Creating for API KEY
if [[ -z "${release// }" ]]; then
printf "\n\n${grn}web was already deleted!${end}\n"
else
printf "\n\n${grn}Deleting web chart. This will take a few minutes...${end} ${coffee3}\n\n"
time helm delete $release --purge --debug --timeout 600
local status=$?
if [ $status -ne 0 ]; then
printf "\n\n${red}Error deleting web... Exiting.${end}\n"
exit 1
fi
printf "\n\n${grn}web was successfully deleted!${end}\n"
printf "\n\n${grn}Cleaning up...${end}\n"
kubectl delete pods,jobs -l chart=web-0.1.0
fi
}
# Setup Stuff
bluemix_login
get_cluster_name
set_cluster_context
initialize_helm
# Install Bluecompute
delete_web
delete_auth
delete_customer
delete_orders
delete_catalog
delete_inventory
# Sanity Checks
printf "\n\n${grn}Doing some final cleanup${end}\n"
kubectl delete pods,jobs -l heritage=Tiller --force
kubectl delete secrets hs256-key
printf "\n\nBluecompute was uninstalled!\n" | true |
8e012ee7e611aa30d651e023e4b97c9f0cf6fd58 | Shell | KunKaxx/virtual-environments | /images/linux/scripts/installers/julia.sh | UTF-8 | 1,212 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/bin/bash -e
################################################################################
## File: julia.sh
## Desc: Installs Julia, and adds Julia to the path
################################################################################
# This function fetches the latest Julia release from the GitHub API
# Based on https://gist.github.com/lukechilds/a83e1d7127b78fef38c2914c4ececc3c
function GetLatestJuliaRelease () {
curl --silent "https://api.github.com/repos/julialang/julia/releases/latest" |
grep '"tag_name":' |
sed -E 's/.*"([^"]+)".*/\1/' |
sed 's/v//' # remove v prefix
}
juliaVersion="$(GetLatestJuliaRelease)"
juliaMajorAndMinorVersion="$(cut -d. -f1,2 <<< $juliaVersion)"
juliaInstallationPath="/usr/local/julia$juliaVersion"
curl -sL "https://julialang-s3.julialang.org/bin/linux/x64/$juliaMajorAndMinorVersion/julia-$juliaVersion-linux-x86_64.tar.gz" -o "julia-$juliaVersion-linux-x86_64.tar.gz"
mkdir -p "$juliaInstallationPath"
tar -C "$juliaInstallationPath" -xzf "julia-$juliaVersion-linux-x86_64.tar.gz" --strip-components=1
rm "julia-$juliaVersion-linux-x86_64.tar.gz"
ln -s "$juliaInstallationPath/bin/julia" /usr/bin/julia
invoke_tests "Tools" "Julia"
| true |
80e49d18f529f77b96c84e4ecd40421c0866add0 | Shell | marian-babik/glideinwms | /build/jenkins/check_ascii.sh | UTF-8 | 381 | 3.15625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
mydir="$(dirname $0)"
pushd "${mydir}/../.." > /dev/null
echo "List of non ASCII files starting from ($(pwd) - some known directories are skipped):"
find . -type f -not -path "*/.git/*" -not -path "*/images/*" -not -path "*/doc/papers/*" -not -path "*/unittests/fixtures/factory/work-dir/*" -exec file {} \; | grep -v "ASCII text" | grep -v ": empty"
popd > /dev/null
| true |
306c05b5b286c56066428a48b7edb672965e72fe | Shell | santyar/insoft_task | /serv.sh | UTF-8 | 842 | 2.640625 | 3 | [] | no_license | echo "Update repository it take few time"
apt-get -y update > /dev/null
#Install
echo "Install and run nginx and component"
apt-get install -y nginx
apt-get install -y python-dev python-virtualenv
apt-get install -y uwsgi uwsgi-plugin-python
apt-get install -y python-pip > /dev/null
#install Flask
echo "Install Flask"
pip install Flask > /dev/null
pip install uwsgi > /dev/null
#Nginx setup
rm -rf /etc/nginx/sites-available/default
cp /vagrant/app-serv /etc/nginx/sites-available/
ln -s /etc/nginx/sites-available/app-serv /etc/nginx/sites-enabled
service nginx restart
mkdir /var/www
mkdir /var/www/hello
cp /vagrant/py_app.py /var/www/hello
cp /vagrant/welcom.jpg /var/www/hello
echo '127.0.0.1 tapp-serv' > /etc/hosts
uwsgi --socket 127.0.0.1:3031 --wsgi-file /var/www/hello/*.py --callable app --processes 4 --threads 2 &
| true |
e09fd893334a0b02a9ec59c10d0761ab812f5aba | Shell | mrflip/chimpmark | /challenges/graph/pagerank/pig/iterate_pagerank.sh | UTF-8 | 459 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
num_iters=$1 ; shift
work_dir=$1
for (( curr=0 , next=1 ; "$curr" < "$num_iters" ; curr++ , next++ )) ; do
curr_str=`printf "%03d" ${curr}`
next_str=`printf "%03d" ${next}`
curr_dir=${work_dir}/pagerank_graph_${curr_str}
next_dir=${work_dir}/pagerank_graph_${next_str}
# pig -x local -param PRGRPH="${curr_dir}" -param OUT="${next_dir}" pagerank.pig
pig -param PRGRPH="${curr_dir}" -param OUT="${next_dir}" pagerank.pig
done
| true |
44f1c628a1ad502136846483f2b8d925128d673e | Shell | Boladar/sopy | /fiszki.sh | UTF-8 | 2,227 | 3.96875 | 4 | [] | no_license |
h_flag=false
s_flag=false
i_flag=false
files=''
delimiter=';'
while getopts 'hd:f:s:i' flag; do
case "${flag}" in
h) h_flag=true;;
d) delimiter=$OPTARG;;
f) files+=("$OPTARG");;
s) s_flag=true
s_mode=$OPTARG;;
i) i_flag=true;;
esac
done
if [ "$h_flag" = true ]; then
echo "Usage: fiszki [FLAGS]"
echo "FLAGS: "
echo "-h Show help"
echo "-d Set custom delimiter for files(default is ';')"
echo "-f Give a file to script, if you want to use multiple files call multiple -f flags"
echo "-s Set this flag if you want to hear the words
s flag has two modes:
0 - speak and write to screen,
1 - where you only hear the word,
You can choose either by passing a parameter to -s"
echo "-i Inverse the direction of flash cards, by default default - [native;foreign]"
exit 0
fi
total_number=0
correct=0
for filename in ${files[@]}
do
#echo $filename
while read line; do
#reading each line
total_number=$((total_number + 1))
native=$(echo $line | cut -d $delimiter -f1)
foreign=$(echo $line | cut -d $delimiter -f2)
if [ "$i_flag" = true ];then
temp=$native
native=$foreign
foreign=$temp
fi
# generalnie to to załatwia mówienie tekstu ale
# nie chce mi się specjalnie instalować linuxa
# bo na subsystemie do win10 nie chce działać.
if [ "$s_flag" = true ]; then
espeak $native
if [ "$s_mode" = 0 ]; then
echo $native
fi
else
echo $native
fi
read -p "Word in foreign language: " response < /dev/tty
if [[ "$response" == "$foreign" ]]; then
correct=$((correct + 1))
echo "poprawna odpowiedź"
else
echo "musisz jeszcze popracować"
fi
done < $filename
done
percentage=$(bc <<< "scale=1;$correct/$total_number*100")
echo "wynik : $correct / $total_number, procentowo : $percentage% "
exit 0 | true |
d843e2cc3ff51d32c7721bbd433a473683df4b13 | Shell | aolarte/test-vms | /rabbitmq/bootstrap.sh | UTF-8 | 343 | 2.6875 | 3 | [] | no_license | #!/bin/bash -x
# Setup puppet client
if [ ! -f /etc/yum.repos.d/puppetlabs.repo ]; then
sudo rpm -ivh https://yum.puppetlabs.com/puppetlabs-release-el-7.noarch.rpm
fi
if [ ! -f /usr/bin/puppet ]; then
sudo yum -y install puppet
fi
if [ ! -d /etc/puppet/modules/packagecloud ]; then
sudo puppet module install computology-packagecloud
fi | true |
5ca87b16354799136d9b408ff32b5c138cec9184 | Shell | nlf/riakdown | /test-pouchdb.sh | UTF-8 | 371 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# run a single pouchdb test, using pouchdb-server
# in the server running on port 6984
#
./node_modules/.bin/pouchdb-server -p 6984 $SERVER_ARGS &
POUCHDB_SERVER_PID=$!
cd node_modules/pouchdb/
npm install
COUCH_HOST=http://localhost:6984 npm test
EXIT_STATUS=$?
if [[ ! -z $POUCHDB_SERVER_PID ]]; then
kill $POUCHDB_SERVER_PID
fi
exit $EXIT_STATUS
| true |
08f06e94a2b0244de5379a4ef1bddfdabc13b1a3 | Shell | caibai36/clib | /egs/wsj/s0/run.sh | UTF-8 | 975 | 3.546875 | 4 | [] | no_license | #!/bin/bash
# Set bash to 'debug' mode, it will exit on :
# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands',
set -euo pipefail
# Prepare some basic config files of kaldi.
sh local/kaldi_conf.sh
# Note: cmd.sh, path.sh are created by kaldi_conf.sh
. cmd.sh
. path.sh
# general configuration
stage=0 # start from 0 if you need to start from data preparation
# Parse the options. (eg. ./run.sh --stage 1)
# Note that the options should be defined as shell variable before parsing
. utils/parse_options.sh || exit 1
# data
# wsj0=/project/nakamura-lab01/Share/Corpora/Speech/en/WSJ/wsj0
# wsj1=/project/nakamura-lab01/Share/Corpora/Speech/en/WSJ/wsj1
wsj=/project/nakamura-lab01/Share/Corpora/Speech/en/WSJ/
if [ ${stage} -le 0 ]; then
date
echo "stage 0: Data preparation"
# local/wsj_data_prep.sh ${wsj0}/??-{?,??}.? ${wsj1}/??-{?,??}.?
local/cstr_wsj_data_prep.sh $wsj
local/wsj_format_data.sh
date
fi
| true |
0034bb8ad81c5b6b6c5c4258f01da49d01fdfcc4 | Shell | manuel92780/muon_veto_project | /corsika_comparison/job.sh | UTF-8 | 486 | 2.609375 | 3 | [] | no_license | #!/bin/bash
#prep inputs
nseed=$1
nfile=$2
#paths
dir=/data/user/msilva/corsika_20222
scratch_dir=/home/msilva/scratch
mkdir $scratch_dir
mkdir $scratch_dir/corsika
cd $scratch_dir/corsika/
cp /home/msilva/muon_gun_multiplicity_study/corsika_comparison/process_corsika.py .
#setup env
eval `/cvmfs/icecube.opensciencegrid.org/py2-v2/setup.sh`
#run job
bash /home/msilva/combo/build/env-shell.sh <<EOF
python process_corsika.py --nseed ${nseed} --out ${dir}/corsika ${nfile}
EOF
| true |
f0a879bbc90a51d5928532236e1f1c052d4f08a6 | Shell | tteck/Proxmox | /install/deluge-install.sh | UTF-8 | 1,569 | 2.875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Copyright (c) 2021-2023 tteck
# Author: tteck (tteckster)
# License: MIT
# https://github.com/tteck/Proxmox/raw/main/LICENSE
source /dev/stdin <<< "$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt-get install -y curl
$STD apt-get install -y sudo
$STD apt-get install -y mc
msg_ok "Installed Dependencies"
msg_info "Updating Python3"
$STD apt-get install -y \
python3 \
python3-dev \
python3-pip
msg_ok "Updated Python3"
msg_info "Installing Deluge"
$STD pip install deluge[all]
$STD pip install lbry-libtorrent
msg_ok "Installed Deluge"
msg_info "Creating Service"
service_path="/etc/systemd/system/deluged.service"
echo "[Unit]
Description=Deluge Bittorrent Client Daemon
Documentation=man:deluged
After=network-online.target
[Service]
Type=simple
UMask=007
ExecStart=/usr/local/bin/deluged -d
Restart=on-failure
TimeoutStopSec=300
[Install]
WantedBy=multi-user.target" >$service_path
service_path="/etc/systemd/system/deluge-web.service"
echo "[Unit]
Description=Deluge Bittorrent Client Web Interface
Documentation=man:deluge-web
After=deluged.service
Wants=deluged.service
[Service]
Type=simple
UMask=027
ExecStart=/usr/local/bin/deluge-web -d
Restart=on-failure
[Install]
WantedBy=multi-user.target" >$service_path
systemctl enable --now -q deluged.service
systemctl enable --now -q deluge-web.service
msg_ok "Created Service"
motd_ssh
customize
msg_info "Cleaning up"
$STD apt-get autoremove
$STD apt-get autoclean
msg_ok "Cleaned"
| true |
ccdb8eed926a3c6c699d967cb5b562341798b9ca | Shell | maniaabdi/system-config | /bin/ffchm | UTF-8 | 406 | 3.484375 | 3 | [] | no_license | #!/bin/bash
chm=$(readlink -f "$1")
dir=~/.cache/for-code-reading/chm/"$chm"
if ! test -e "$dir"; then
(
archmage -x "$chm" /tmp/archmage-ffchm.$$ &&
mkdir -p "$(dirname "$dir")" &&
mv /tmp/archmage-ffchm.$$ "$dir"
) || (
rm -rf "$dir";
extract_chmLib "$chm" "$dir"
)
fi
if test -e "$dir"/index.html; then
cd "$dir";
firefox index.html&
else
cd "$dir"
firefox . &
fi
| true |
8ccf699f1eef83879e1c59a12f4069c7e0aa0967 | Shell | marcusmyers/dotfiles | /git/templates/hooks/pre-commit | UTF-8 | 462 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
FILES_WITH_FOCUS_TAGS=`grep -El "(:focus\s*=\>|,\s*:focus|focus:\s*true)" $(git diff --cached --name-status | awk '$1 == "M" || $1 == "A" { print $2 }' | grep "_spec.rb")`
NUMBER_OF_FOCUS_ADDITIONS=`echo "$FILES_WITH_FOCUS_TAGS" | wc -l`
if [[ $FILES_WITH_FOCUS_TAGS != "" && $NUMBER_OF_FOCUS_ADDITIONS != 0 ]]; then
echo "Aborting commit, remove the 'focus' tags from the following files:"
echo ""
echo "$FILES_WITH_FOCUS_TAGS"
exit 1
fi
| true |
21812403a1e5f227da93ec4d0dcf2ab5e3a6df3f | Shell | wang-xiao-er/contrail-dev-env | /scripts/build-containers.sh | UTF-8 | 271 | 2.515625 | 3 | [] | no_license | #!/bin/bash
REPODIR=/root/src/review.opencontrail.org/Juniper/contrail-container-builder
[ -d ${REPODIR} ] || git clone https://github.com/tungstenfabric/tf-container-builder ${REPODIR}
cp tpc.repo.template common.env ${REPODIR}
cd ${REPODIR}/containers && ./build.sh
| true |
17afedd8fcfa2d9343846333b89abdfb7f4ff8ad | Shell | struys/envirius | /tests/versions.bats | UTF-8 | 591 | 2.734375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bats
load test_helper
@test "versions: for disabled plugin [go]" {
line_0=$(bold "* go:";)
run nv versions --go
assert_success
assert_equal "${lines[0]}" $line_0
assert_equal "${lines[1]}" " - plugin disabled."
}
@test "versions: for enabled plugin [rust]" {
line_0=$(bold "* rust:";)
run nv versions --rust
assert_success
assert_equal "${lines[0]}" $line_0
assert_equal "${lines[1]}" "0.1 0.2 0.3 0.4 0.5 "
assert_equal "${lines[2]}" "0.6 0.7 0.8 0.9 "
}
| true |
d3e57c0f226eff596d80dd5f3c293080d02c8093 | Shell | robhurring/dotfiles | /config/zsh/lib/util.zsh | UTF-8 | 1,170 | 4 | 4 | [] | no_license | remove_colors() {
sed -E "s/"$'\E'"\[([0-9]{1,3}((;[0-9]{1,3})*)?)?[m|K]//g"
}
push_path(){
if ! echo $PATH | egrep -q "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
PATH=$PATH:$1
else
PATH=$1:$PATH
fi
fi
}
# vim-like shortenpath() function
# modified from http://stackoverflow.com/questions/25945993/how-to-get-a-vim-tab-like-short-path-in-bash-script
shortenpath2() {
sed -e "s|${HOME}|~|" -e 's:\(/*\)\([^\.]\)[^/]*/:\1\2/:g' <<< $1
}
# awk version of shortenpath.
#
# Usage:
# shortenpath2 <PATH> [TRAILING_SEGMENTS:1] [CHARS:1]
#
# TRAILING_SEGMENTS - How many segments of the path to leave unshortened
# CHARS - How many chars to shorten the path segment to (dots increase this by 1)
#
shortenpath() {
input="${1//$HOME/~}"
awk -F/ -v c=${3:-1} -v t=${2:-1} '
BEGIN{
o=""
};
{
for(i=1; i<=NF; i++){
if(i <= NF - t){
l=c
if($i ~ /\./){
l=c+1
}
o=o sep substr($i,0,l)
} else {
o=o sep $i
}
# only set the slash for the 2+ segments to avoid duping it
sep="/"
}
}
END{
print o
}' <<< $input
}
| true |
85fd2954f8a73cc1bdf6bf9a1eecb87abdcb01e0 | Shell | cunderw/dotfiles | /.zshrc | UTF-8 | 5,610 | 3.140625 | 3 | [] | no_license | # Enable p10k-instant-prompt
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
############################
# Environment Setup
############################
export DISABLE_AUTO_TITLE=true
export EDITOR='nvim'
export JAVA_HOME=/opt/homebrew/Cellar/openjdk/18.0.2
export LANG=en_US.UTF-8
export LS_COLORS="$LS_COLORS:ow=1;34:tw=1;34:"
export NVM_DIR=$HOME/.nvm
export PNPM_HOME=/Users/cunderw/Library/pnpm
export TERM="xterm-256color"
export ZPLUG_HOME=$HOME/.zplug
# GO
export GOPATH=$HOME/go
# Android
export ANDROID_SDK_ROOT=$HOME/Library/Android/sdk
# PATH
export PATH=$PATH:$ANDROID_SDK_ROOT/emulator
export PATH=$PATH:$ANDROID_SDK_ROOT/platform-tools
export PATH=$PATH:$GOPATH/bin
export PATH=$PNPM_HOME:$PATH
export PATH=/opt/local/bin:/opt/local/sbin:$HOME/.cargo/bin:$PATH
# Appends every command to the history file once it is executed
setopt inc_append_history
# Reloads the history whenever you use it
setopt share_history
############################
# Plugin Settings
############################
COMPLETION_WAITING_DOTS="true"
DISABLE_UNTRACKED_FILES_DIRTY="true"
ENABLE_CORRECTION="true"
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=247'
HISTORY_SUBSTRING_SEARCH_ENSURE_UNIQUE="true"
############################
# Sourced Files / Utilities
############################
for f in ~/.scripts/sourced/*; do
. $f
done
[[ ! -f $HOME/.secrets ]] || source $HOME/.secrets
[[ ! -f $HOME/.cargo/env ]] || source $HOME/.cargo/env
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
eval "$(rbenv init - zsh)"
# fix nvim breaking cursor
_fix_cursor() {
echo -ne '\e[5 q'
}
precmd_functions+=(_fix_cursor)
############################
# Aliases
############################
alias cls="colorls --dark"
alias diskspace="du -S | sort -n -r | less"
alias dockspace="defaults write com.apple.dock persistent-apps -array-add '{"tile-type"="spacer-tile";}'; killall Dock"
alias dots="git --git-dir=$HOME/.dotfiles/ --work-tree=$HOME"
alias dotsupdate="dotfiles pull && dotfiles submodule update"
alias glog="git log --all --decorate --oneline --graph"
alias grep="grep --color=auto"
alias l="ls --color=auto"
alias la="ls -lah --color=auto"
alias lh="ls -lh --color=auto"
alias logs="find /var/log -type f -exec file {} \; | grep 'text' | cut -d' ' -f1 | sed -e's/:$//g' | grep -v '[0-9]$' | xargs tail -f"
alias ls="ls --color=auto"
alias pullrepos="for i in */.git; do ( echo $i; cd $i/..; git pull; ); done"
alias up="cd $(eval printf '../'%.0s {1..$1}) && pwd;"
alias zconfig="vim ~/.zshrc"
alias zreload="exec zsh"
alias npmt="npm run test"
############################
# Plugins
############################
# make sure we have zplug installed
if [[ ! -d $ZPLUG_HOME ]]; then
printf 'Install zplug? [y/N]: '
if read -q; then
echo; git clone https://github.com/b4b4r07/zplug ~/.zplug
fi
fi
if [[ -f $ZPLUG_HOME/init.zsh ]]; then
source $ZPLUG_HOME/init.zsh
# oh-my-zsh
zplug "plugins/colored-man-pages", from:oh-my-zsh
zplug "plugins/command-not-found", from:oh-my-zsh
zplug "plugins/common-aliases", from:oh-my-zsh
zplug "plugins/copyfile", from:oh-my-zsh
zplug "plugins/debian", from:oh-my-zsh
zplug "plugins/docker", from:oh-my-zsh
zplug "plugins/docker-compose", from:oh-my-zsh
zplug "plugins/dotenv", from:oh-my-zsh
zplug "plugins/git", from:oh-my-zsh
zplug "plugins/go", from:oh-my-zsh
zplug "plugins/jsontools", from:oh-my-zsh
zplug "plugins/macOS", from:oh-my-zsh
zplug "plugins/node", from:oh-my-zsh
zplug "plugins/npm", from:oh-my-zsh
zplug "plugins/nvm", from:oh-my-zsh
zplug "plugins/pylint", from:oh-my-zsh
zplug "plugins/python", from:oh-my-zsh
zplug "plugins/systemd", from:oh-my-zsh
zplug "plugins/thefuck", from:oh-my-zsh
zplug "plugins/tmux", from:oh-my-zsh
zplug "plugins/vscode", from:oh-my-zsh
zplug "plugins/web-search", from:oh-my-zsh
# zplug "plugins/yarn", from:oh-my-zsh
# prezto
zplug "modules/completion", from:prezto
zplug "modules/directory", from:prezto
# commands
zplug "so-fancy/diff-so-fancy", as:command
# zsh users
zplug "zsh-users/zsh-autosuggestions", defer:2, on:"zsh-users/zsh-completions"
zplug "zsh-users/zsh-syntax-highlighting", defer:3, on:"zsh-users/zsh-autosuggestions"
zplug "zsh-users/zsh-history-substring-search", defer:3, on:"zsh-users/zsh-syntax-highlighting"
# themes / appearance
zplug "romkatv/powerlevel10k", as:theme
if ! zplug check --verbose; then
printf 'Install Plugins? [y/N]: '
if read -q; then
echo; zplug install
fi
fi
zplug load
fi
#############################
# Keybindings
############################
# Keybindings for substring search plugin. Maps up and down arrows.
bindkey '^[[A' history-substring-search-up
bindkey '^[[B' history-substring-search-down
# ctrl+p and ctrl+n for previous next
bindkey "^P" history-substring-search-up
bindkey "^N" history-substring-search-down
# ctrl+space to accept the auto suggestion
bindkey '^ ' autosuggest-accept
# fix zsh bug where backspace breaks after exiting insert mode
bindkey "^?" backward-delete-char
# fix home and end keys
bindkey "^[[1~" beginning-of-line
bindkey "^[[4~" end-of-line
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
| true |
1e24781f9d09a3faa1488d38d0e057fb07cee758 | Shell | SmitN/dotfiles | /.bashrc | UTF-8 | 244 | 3.21875 | 3 | [] | no_license | function settitle() {
if [ -n "$STY" ] ; then # We are in a screen session
echo "Setting screen titles to $@"
printf "\033k%s\033\\" "$@"
screen -X eval "at \\# title $@" "shelltitle $@"
else
printf "\033]0;%s\007" "$@"
fi
}
| true |
e7f013081449674637ef57415d9c120d07633256 | Shell | emorchy/lojban.io | /virtualization/prod-containers/bash.sh | UTF-8 | 316 | 3.46875 | 3 | [] | permissive | #!/usr/bin/env bash
set -e
# Change directory to the script's location
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
CONTAINER_NAME="$1"
if [ -z "$CONTAINER_NAME" ]; then
echo "ERROR: missing container name"
exit 1
fi
./docker.sh $DOCKER_OPTS exec -t "$CONTAINER_NAME" /bin/bash
| true |
77d5de8286465ad58c71eb9f5eb4ff17206875bd | Shell | lawrencegripper/ion | /tools/test_certs.sh | UTF-8 | 2,272 | 4 | 4 | [
"MIT"
] | permissive | #!/bin/bash
OUT_DIR=$1 # Directory to write the new certificates to
TOOL=$2 # The certificate generation tool
DNS_NAME=$3 # The DNS name to use to connect the client and server
IMAGE_TAG=$4 # Optional Docker image tag used in terraform
function prockill { P=$(pgrep $1); if [ -n "$P" ]; then kill "$P"; fi }
prockill management
prockill ioncli
set -e # Leave below prockill
echo "------------------------------------"
echo "Generating new certificates using: $TOOL"
echo "------------------------------------"
case "$TOOL" in
"openssl")
if [ -z "$DNS_NAME" ];then
DNS_NAME="localhost"
fi
./tools/generate_certs_openssl.sh "$OUT_DIR" "$DNS_NAME"
STATUS=$?
if [ $STATUS != 0 ]; then
exit $STATUS
fi
echo "------------------------------------"
echo "Building and running management api with new server certificates"
echo "------------------------------------"
export $(cat ./.vscode/private.mgmt.env | xargs) && \
go build ./cmd/management && \
./management start \
--certfile="$OUT_DIR/server.crt" \
--keyfile="$OUT_DIR/server.key" \
--cacertfile="$OUT_DIR/rootCA.pem" \
--hostname="$DNS_NAME" &
sleep 8 # Give time to start server
;;
"terraform")
echo "If no existing terraform.tfstate file is present,"
echo "I will execute a new deployment. Please be patient"
echo "this can take up to 30 minutes."
./tools/generate_certs_terraform.sh "$OUT_DIR" "$IMAGE_TAG"
STATUS=$?
if [ $STATUS != 0 ]; then
exit $STATUS
fi
DNS_NAME=$(cat "$OUT_DIR/dns")
;;
*)
echo "Unrecognized tool choice: $TOOL"
echo "Options are: openssl, terraform"
exit
;;
esac
echo
echo "------------------------------------"
echo "Building ion cli and running with new client certificates"
echo "------------------------------------"
go build -o ioncli ./cmd/ion && \
./ioncli module list \
--certfile="$OUT_DIR/client.crt" \
--keyfile="$OUT_DIR/client.key" \
--cacertfile="$OUT_DIR/rootCA.pem" \
--endpoint="$DNS_NAME:9000" --timeout 5
rm ioncli > /dev/null 2>&1
rm management > /dev/null 2>&1 | true |
7a50e710bbe1d0829a1f538121501da99a5aa1c5 | Shell | osu-particle-astrophysics/icemcQC_keith | /IcemcQCVersion.sh | UTF-8 | 4,454 | 3.609375 | 4 | [] | no_license | #!/bin/bash
## Run by typing . IcemcVersion.sh
####################################################################
## ##
## Author: Khalida Hendricks July 2014 ##
## hendricks.189@osu.edu ##
## ##
## IcemcVersion parses the update log to find the latest ##
## version of Icemc and runs IcemcQC on that version. ## ##
## ##
####################################################################
echo " "
echo 'starting IcemcQCVersion.sh'
echo " "
rundir=$(gawk '/rundir/{print $2}' setups/IcemcQCParameters.txt)
var_path=`pwd`
echo 'current directory: ' $var_path
echo 'run directory: ' $HOME'/'$rundir
mkdir -p logs
cd logs
##Get JUST the updates for /releases/
gawk '/releases/{print NR, $2}' updatelog.txt > latest.txt
count1=$(wc -l latest.txt)
echo 'Number of updates to releases: ' $count1 >> latest.txt
count1=$(gawk '/latest/{print $6}' latest.txt)
echo 'count1 = ' $count1
echo 'Number of updates to releases: ' $count1 >> thisrun.txt
##Extract a list of any versions that were updated
echo " " > versions.txt
for (( i = 1; i <= $count1; i++ ))
do
version=$(gawk -F'[/]' 'NR=='$i'{print $3}' latest.txt)
grep "$version" versions.txt
there=$?
#echo 'version : ' $version
# echo 'check '$i: $there
if [ $there -eq 1 ]; then
echo $version >> versions.txt
fi
done
##versions.txt is a file that lists any updated releases
##There should only be one, but in case there are more
##versions.txt will have them listed
##Now count the versions, and determine which one is the
##latest. This was harder than I thought and is a loooong
##batch of code.
##echos can be taken out once I am happy that it does what I want.
count2=$(wc -l versions.txt)
echo 'Number of versions updated (plus 1): ' $count2 >> latest.txt
count2=$(gawk '/versions/{print $7}' latest.txt)
let count3=count2-1
let count4=count2+1
echo 'Number of versions updated: ' $count3 >> thisrun.txt
echo '# actual versions (count3): ' $count3
echo '# version lines(count4): ' $count4
##count4 should be the number of versions plus 2
field=$(gawk -F'[.]' '{print NF}' versions.txt)
#echo "Fields: " $field
echo "Fields: "$field >> latest.txt
echo "Fields: "$field
max=0
for (( i = 2; i <= $count4; i++ ))
do
numfield=$(gawk '/Fields:/{print $'$i'}' latest.txt)
echo 'numfield '$i': '$numfield
if [ $numfield -gt $max ]; then
max=$numfield
fi
echo 'temp max: '$max
done
echo 'max = '$max
for (( i = 1; i <= $max; i++ ))
do
M[$i]=0
done
latest=0.0
echo ' '
echo ' '
echo 'for loop starts at loop #2'
#echo $latest
for (( i = 2; i <= $count2; i++ ))
do
echo " "
echo " "
echo 'loop #'$i
version=$(gawk 'NR=='$i'{print $1}' versions.txt)
echo 'latest version: ' $latest
echo 'current version: '$version
gotcha=1
dec=1
while [ $gotcha -eq 1 ]
do
echo " "
echo 'dec = '$dec
digit=$(gawk -F'[.]' 'NR=='$i'{print $'$dec'}' versions.txt)
echo 'digit = '$digit
echo 'M['$dec'] = '${M[$dec]}
if [ $digit -gt ${M[$dec]} ]; then
latest=$version
M[$dec]=$digit
gotcha=0
echo 'M['$dec'] changed to '${M[$dec]}
let dec=dec+1
for (( j = $dec; j <= $max; j++ ))
do
echo " "
echo 'dec = '$j
digit=$(gawk -F'[.]' 'NR=='$i'{print $'$j'}' versions.txt)
echo 'digit: ' $digit
if [ $digit -ge 0 ]; then
M[$j]=$digit
echo 'M['$j'] changed to '${M[$j]}
else
echo 'no more digits! unary operator expected'
M[$j]=0
echo 'M['$j'] reset to '${M[$j]}
fi
done
echo " "
echo 'Latest version updated.'
elif [ $digit -lt ${M[$dec]} ]; then
gotcha=0
echo 'This is not the latest version; no update made.'
elif [ $digit -eq ${M[$dec]} ]; then
let dec=dec+1
gotcha=1
echo 'It is a tie...trying the next digit.'
else
echo 'ERROR HERE'
gotcha=0
fi
##This closes the gotcha while
done
##This closes the for while
done
echo 'Latest release version: ' $latest >> thisrun.txt
echo " "
echo " "
echo 'Final answer - the latest version is: ' $latest
echo " "
echo " "
##OK so that was longer/harder that I thought it would be.
##Finally, run IcemcQCSetup on the latest version of Icemc.
cd ..
pwd
. IcemcQCSetup.sh releases/$latest
##The End
| true |
14ee7a22f175d301798e477fd5531fb21e169640 | Shell | Parrows/Parrows | /Eden/examples/sudoku/compile.sh | UTF-8 | 374 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
programs=(
"parrows-sudoku-parmap"
"parrows-sudoku-parmapstream"
"parrows-sudoku-farm"
"parrows-sudoku-farmChunk"
)
# get length of an array
programCount=${#programs[@]}
for (( i=0; i < ${programCount}; i++ ));
do
progName=${programs[$i]}
cmd="ghc "$progName" -parmpi -rtsopts -eventlog -O2"
tmp=$(eval $cmd)
echo ${tmp}
done
| true |
9728357a06f7e89d9e5ea8e5aecb8d7bbbc65b44 | Shell | onknows/terraform-azure-openshift | /bootstrap.sh | UTF-8 | 1,409 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
cd terraform
echo "Synchronizing Terraform state..."
terraform refresh -var-file=azure.tfvars -var-file=bootstrap.tfvars
echo "Planning Terraform changes..."
terraform plan -out openshift.plan -var-file=azure.tfvars -var-file=bootstrap.tfvars
echo "Deploying Terraform plan..."
terraform apply openshift.plan
echo "Getting output variables..."
BASTION_IP=$(terraform output bastion_public_ip)
SERVICE_IP=$(terraform output service_public_ip)
CONSOLE_IP=$(terraform output console_public_ip)
NODE_COUNT=$(terraform output node_count)
MASTER_COUNT=$(terraform output master_count)
INFRA_COUNT=$(terraform output infra_count)
ADMIN_USER=$(terraform output admin_user)
cd ..
# echo "Transfering private key to bastion server..."
# scp -o StrictHostKeychecking=no -i certs/bastion.key certs/openshift.key $ADMIN_USER@$BASTION_IP:/home/openshift/.ssh/id_rsa
# echo "Transfering install script to bastion server..."
# scp -o StrictHostKeychecking=no -i certs/bastion.key scripts/install.sh $ADMIN_USER@$BASTION_IP:/home/openshift/install.sh
# echo "Running install script on bastion server..."
# ssh -t -o StrictHostKeychecking=no -i certs/bastion.key $ADMIN_USER@$BASTION_IP ./install.sh $NODE_COUNT $ADMIN_USER $MASTER_DOMAIN
# echo "Finished!!"
# echo "Console: https://$CONSOLE_IP:8443"
# echo "Bastion: ssh -i certs/bastion.key $ADMIN_USER@$BASTION_IP"
# echo "Router: $SERVICE_IP"
| true |
d54a040c6752057c41d0bf37123d845397831330 | Shell | iconix/gith-deployer | /setup.sh | UTF-8 | 443 | 2.78125 | 3 | [] | no_license | echo '>>> Retrieving new list of packages'
sudo apt-get update
echo '>>> Installing nodejs, npm, curl'
sudo apt-get install nodejs npm nodejs-legacy curl
echo '>>> Installing docker'
curl -sSL https://get.docker.io/ubuntu/ | sudo sh
sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker
sudo gpasswd -a ${USER} docker
sudo service docker restart
newgrp docker
echo '>>> Making shell scripts executable'
chmod a+x hook.sh
chmod a+x update.sh
| true |
6672144869218a93300a58a0a1e7dd99bdbfa511 | Shell | zimbatm/ploy-example | /script/slugify | UTF-8 | 557 | 2.875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
. `dirname $0`/ploy-lib.sh
CACHE_DIR=`expand_path "$1"`
DESTDIR=`expand_path "$2"`
if [ -z "$DESTDIR" ]; then
fail "Missing the first argument: the destination path"
fi
cd `dirname $0`/..
## Build dependencies ##
depend Dependfile build
## Copying the release files ##
rm -rf "$DESTDIR"
mkdir -p "$DESTDIR"
cp -r * "$DESTDIR"
cd "$DESTDIR"
## Installing the runtime dependencies ##
bundle install --path=vendor --binstubs=bin --deployment --standalone --without="development test"
#berks install --path vendor/cookbooks
| true |
cb4e954df16a2b9da800ceb2aaf54a6212de157a | Shell | pkeane/misc_scripts | /rid.sh | UTF-8 | 108 | 2.890625 | 3 | [] | no_license | cd $PWD
for file in `ls`
do
cat $file | sed 's/
/\n/g' | grep -v '^$' > $file.bak
mv $file.bak $file
done
| true |
5d0af4fb093523fa2393da9f2a362344ca2fceef | Shell | jordicenzano/lhls-simple-live-platform | /scripts/transcoding-multirendition-srt.sh | UTF-8 | 4,765 | 3.671875 | 4 | [] | no_license | #!/usr/bin/env bash
if [ $# -lt 1 ]; then
echo "Use ./transcoding-multirendition-srt.sh test/live [SRTPort] [HLSOutHostPort]"
echo "test/live: In test generated a test signal"
echo "SRTPort: SRT listener local UDP port (default: 1935)"
echo "HLSOutHostPort: Host and to send HLS data (default: \"localhost:9094\")"
echo "Example: ./transcoding-multirendition-srt.sh live 1935 \"stream\" \"localhost:9094\""
exit 1
fi
MODE="${1}"
SRT_PORT="${2:-"1935"}"
HOST_DST="${3:-"localhost:9094"}"
PATH_NAME="mrsrt"
STREAM_NAME_720p="720p"
STREAM_NAME_480p="480p"
BASE_DIR="../results/${PATH_NAME}"
LOGS_DIR="../logs"
GO_BINARY_DIR="~/go/bin"
eval TS_SEGMENTER_BIN="$GO_BINARY_DIR/go-ts-segmenter"
# Check segmenter binary
if [ ! -f $TS_SEGMENTER_BIN ]; then
echo "$TS_SEGMENTER_BIN does not exist."
exit 1
fi
# Clean up
echo "Restarting ${BASE_DIR} directory"
rm -rf $BASE_DIR/*
mkdir -p $BASE_DIR
mkdir -p $LOGS_DIR
# Create master playlist (this should be created after 1st chunk is uploaded)
# Assuming source is 1280x720@6Mbps (or better)
# Creating 720p@6Mbps and 480p@3Mbps
echo "Creating master playlist manifest (playlist.m3u8)"
echo "#EXTM3U" > $BASE_DIR/playlist.m3u8
echo "#EXT-X-VERSION:3" >> $BASE_DIR/playlist.m3u8
echo "#EXT-X-STREAM-INF:BANDWIDTH=6000000,RESOLUTION=1280x720" >> $BASE_DIR/playlist.m3u8
echo "$STREAM_NAME_720p.m3u8" >> $BASE_DIR/playlist.m3u8
echo "#EXT-X-STREAM-INF:BANDWIDTH=3000000,RESOLUTION=854x480" >> $BASE_DIR/playlist.m3u8
echo "$STREAM_NAME_480p.m3u8" >> $BASE_DIR/playlist.m3u8
# Upload master playlist
curl "http://${HOST_DST}/${PATH_NAME}/playlist.m3u8" -H "Content-Type: application/vnd.apple.mpegurl" --upload-file $BASE_DIR/playlist.m3u8
# Select font path based in OS
if [[ "$OSTYPE" == "linux-gnu" ]]; then
FONT_PATH='/usr/share/fonts/dejavu/DejaVuSans-Bold.ttf'
elif [[ "$OSTYPE" == "darwin"* ]]; then
FONT_PATH='/Library/Fonts/Arial.ttf'
fi
# Creates pipes
FIFO_FILENAME_720p="fifo-$STREAM_NAME_720p"
mkfifo $BASE_DIR/$FIFO_FILENAME_720p
FIFO_FILENAME_480p="fifo-$STREAM_NAME_480p"
mkfifo $BASE_DIR/$FIFO_FILENAME_480p
# Creates hls producers
cat "$BASE_DIR/$FIFO_FILENAME_720p" | $TS_SEGMENTER_BIN -logsPath "$LOGS_DIR/segmenter720p.log" -dstPath ${PATH_NAME} -manifestDestinationType 2 -mediaDestinationType 2 -targetDur 1 -lhls 3 -chunksBaseFilename ${STREAM_NAME_720p}_ -chunklistFilename ${STREAM_NAME_720p}.m3u8 &
PID_720p=$!
echo "Started go-ts-segmenter for $STREAM_NAME_720p as PID $PID_720p"
cat "$BASE_DIR/$FIFO_FILENAME_480p" | $TS_SEGMENTER_BIN -logsPath "$LOGS_DIR/segmenter480p.log" -dstPath ${PATH_NAME} -manifestDestinationType 2 -mediaDestinationType 2 -targetDur 1 -lhls 3 -chunksBaseFilename ${STREAM_NAME_480p}_ -chunklistFilename ${STREAM_NAME_480p}.m3u8 &
PID_480p=$!
echo "Started go-ts-segmenter for $STREAM_NAME_480p as PID $PID_480p"
if [[ "$MODE" == "test" ]]; then
# Start test signal
# GOP size = 30f @ 30 fps = 1s
ffmpeg -hide_banner -y \
-f lavfi -re -i smptebars=duration=36000:size=1280x720:rate=30 \
-f lavfi -i sine=frequency=1000:duration=36000:sample_rate=48000 -pix_fmt yuv420p \
-s 1280x720 -vf "drawtext=fontfile=$FONT_PATH:text=\'RENDITION 720p - Local time %{localtime\: %Y\/%m\/%d %H.%M.%S} (%{n})\':x=10:y=350:fontsize=30:fontcolor=pink:box=1:boxcolor=0x00000099" \
-c:v libx264 -tune zerolatency -b:v 6000k -g 30 -preset ultrafast \
-c:a aac -b:a 48k \
-f mpegts "$BASE_DIR/$FIFO_FILENAME_720p" \
-s 854x480 -vf "drawtext=fontfile=$FONT_PATH:text=\'RENDITION 480p - Local time %{localtime\: %Y\/%m\/%d %H.%M.%S} (%{n})\':x=10:y=350:fontsize=30:fontcolor=pink:box=1:boxcolor=0x00000099" \
-c:v libx264 -tune zerolatency -b:v 3000k -g 30 -preset ultrafast \
-c:a aac -b:a 48k \
-f mpegts "$BASE_DIR/$FIFO_FILENAME_480p"
else
# Start multilane transcoder from SRT to TS
ffmpeg -hide_banner -y \
-i "srt://0.0.0.0:$SRT_PORT?mode=listener&latency=120000" \
-s 1280x720 -vf "drawtext=fontfile=$FONT_PATH:text=\'RENDITION 720p - Local time %{localtime\: %Y\/%m\/%d %H.%M.%S} (%{n})\':x=10:y=350:fontsize=30:fontcolor=pink:box=1:boxcolor=0x00000099" \
-c:v libx264 -tune zerolatency -b:v 6000k -g 30 -preset ultrafast \
-c:a aac -b:a 48k \
-f mpegts "$BASE_DIR/$FIFO_FILENAME_720p" \
-s 854x480 -vf "drawtext=fontfile=$FONT_PATH:text=\'RENDITION 480p - Local time %{localtime\: %Y\/%m\/%d %H.%M.%S} (%{n})\':x=10:y=350:fontsize=30:fontcolor=pink:box=1:boxcolor=0x00000099" \
-c:v libx264 -tune zerolatency -b:v 3000k -g 30 -preset ultrafast \
-c:a aac -b:a 48k \
-f mpegts "$BASE_DIR/$FIFO_FILENAME_480p"
fi
# Clean up: Stop processes
# If the input stream stops the segmenter processes exists themselves
# kill $PID_720p
# kill $PID_480p
| true |
8ef2dcca1dc5b74c567658462df1739b8faf1ce4 | Shell | Vahel123/Abschlussarbeit | /DataAnalyze/KomponnentenTest/Benchmark/DockerContainer/RAM_Test/Benchmark_RAM_Test.sh | UTF-8 | 474 | 2.875 | 3 | [] | no_license | #/bash/bin!
# Benchmark-RAM
# timeout 30 bash -c -- 'while true; do docker exec -it 89453050352d sysbench --num-threads=1 --test=memory --memory-block-size=1M --memory-total-size=100G run | head -n18 | tail -n1 >> RAM_Test_fuer_Docker_Container_1.csv ;done'
i=1
while [ $i -le 25 ]
do
sysbench --num-threads=1 --test=memory --memory-block-size=1M --memory-total-size=100G run | head -n18 | tail -n1 >> RAM_Test_fuer_Docker_Container.csv
sleep 30s
i=`expr $i + 1`
done
| true |
e2f492e19fd0aa3fba9205d20d1b00b2d7154da7 | Shell | triton/triton | /pkgs/build-support/build-fhs-chrootenv/init.sh.in | UTF-8 | 530 | 3.4375 | 3 | [
"MIT"
] | permissive | #! @shell@ -e
chrootenvDest=/run/chrootenv/@name@
# Create some mount points for stuff that must be bind mounted
mkdir -p $chrootenvDest/{nix/store,dev,proc,sys,host-etc,host-tmp,home,var,run}
# Symlink the software that should be part of the chroot system profile
for i in @chrootEnv@/*
do
if [ "$i" != "@chrootEnv@/var" ]
then
ln -s "$i" "$chrootenvDest"
fi
done
# Create root folder
mkdir $chrootenvDest/root
# Create tmp folder
mkdir -m1777 $chrootenvDest/tmp
mkdir -m1777 -p /tmp/chrootenv-@name@
| true |
352b5986ee12a4866140de9aed527fc6c3a2f0e2 | Shell | r2600r/cfm-vagrant | /cfm-1x1-vqfx-7srv/scripts/enable_root_login.sh | UTF-8 | 778 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -eux
#login as root
sudo su -
sudo sed -e '/PermitRootLogin/ s/^#*/#/' -i /etc/ssh/sshd_config
sudo sed '/^#PermitRootLogin/a PermitRootLogin yes' -i /etc/ssh/sshd_config
sudo sed -i '/^#ListenAddress 0.0.0.0/s/^#//' -i /etc/ssh/sshd_config
sudo sed '/^#PasswordAuthentication/a PasswordAuthentication yes' -i /etc/ssh/sshd_config
sudo systemctl reload sshd
echo "root:c0ntrail123" | sudo chpasswd
sudo iptables -F
cat << EOFF > .bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
HISTSIZE=1000
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;101m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export PATH
EOFF | true |
759ead73a0db85087886ac3362559ec90f9e10ad | Shell | alina-grosu/appium-android-custom | /docker/run_x11vnc_server.sh | UTF-8 | 222 | 2.734375 | 3 | [] | no_license | #!/bin/bash
./entry_point.sh&
#wait for Xvfb to start
until pids=$(pidof Xvfb)
do
sleep 1
done
echo Trying to start vnc11server > foq.txt
x11vnc -forever -display :99 -auth /tmp/xvfb-run.*/Xauthority > /root/foo.txt | true |
f67d739f46bb962392cc0e07301c4eae3554e897 | Shell | basarevski/INTEXOTHER-newbie | /23_Bash_VCS_Cron/script.sh | UTF-8 | 837 | 3.875 | 4 | [] | no_license | #!/bin/bash
LOG_FILE=/var/log/syslog
LOG_FILE_NAME=syslog
CURRENT_DATE=$(date +"%b %d")
EXECUTION_TIME=$(date +"%H:%M:%S")
OUTPUT_PATH=$PWD/output_logs
# Check output folder
if [ ! -d "$OUTPUT_PATH" ]
then
mkdir "$OUTPUT_PATH"
fi
# Create log files
if [ -e $LOG_FILE ]
then
cat $LOG_FILE | head -n 10 > "$OUTPUT_PATH"/"${EXECUTION_TIME}_${LOG_FILE_NAME}_first-10"
cat $LOG_FILE | tail -n 10 > "$OUTPUT_PATH"/"${EXECUTION_TIME}_${LOG_FILE_NAME}_last-10"
awk -v date="$CURRENT_DATE" '$0 ~ date {p=1} p' $LOG_FILE| head -n 10 > "$OUTPUT_PATH"/"${EXECUTION_TIME}_${LOG_FILE_NAME}_today-10"
cat $LOG_FILE | grep cron > "$OUTPUT_PATH"/"${EXECUTION_TIME}_${LOG_FILE_NAME}_cron-full"
else
echo "File $LOG_FILE doesn't exist"
fi
# Remove files older than 10 minutes
find "$OUTPUT_PATH" -type f -mmin +10 -delete | true |
9c2cd462fc4f2fb85934beb61b94089b5454ba69 | Shell | yangmeyer/Xcode-extensions | /insert-super-call.sh | UTF-8 | 2,539 | 4.1875 | 4 | [] | no_license | #!/bin/sh
# Based on the accepted answer in
# http://stackoverflow.com/questions/3916092/how-to-write-an-xcode-user-script-to-surround-the-string-the-cursor-is-inside-wit
#
# Author:
# - Yang Meyer
#
# Xcode integration:
# - Input: Selection
# - Output: Replace Selection
#
# Suggested keyboard shortcut:
# - Cmd-Alt-Shift-up
#
# Usage:
# - With the blinking cursor inside a method, invoke the user script (using the keyboard
# shortcut or choosing the item from the Scripts menu).
# - You should save the file before you invoke the script.
#
# Ideas for improvement:
# - More succinct pattern-matching using sed, instead of iterating through the characters.
# (Steps 1 and 3 should be just one regex: Look for the method-def pattern and only
# retain the name part, discarding the rest.)
# - Assign return value of super call to accordingly-typed variable, e.g.
# id <#something#> = [super initWithFrame:frame]
# see insert-super-call-tests.sh for input/output values
function super_call_from_definition() {
MethodDef=$1
# strip stuff in parens (types) and enclosing whitespace:
MethodDefWithoutTypes=`echo $MethodDef | sed s/\ *\([^\)]*\)\ *//g`
# strip minus/plus, whitespace, and opening curly bracket from beginning:
MethodDefTrimmed=`echo $MethodDefWithoutTypes | sed s/^\ *[-+]\ *//g | sed s/\ *{//g`
# make super call
SuperCall=`echo $MethodDefTrimmed | sed 's/.*/\[super &\];/g'`
echo "$SuperCall"
}
if [ %%%{PBXSelectionLength}%%% -gt 0 ]
then
echo "This does not work if you select text. Put your cursor inside a String." >&2
exit
fi
Source=`cat "%%%{PBXFilePath}%%%"`
SelectionStart="%%%{PBXSelectionStart}%%%"
SelectionEnd="%%%{PBXSelectionEnd}%%%"
StringStart=$SelectionStart
# Step 1: Determine position of method definition start:
# move backwards 1 char at a time until you see something like '- (' or '+('
BOOL=1
while [ $BOOL -eq 1 ]
do
tmpText=`echo "${Source:${StringStart}:3}"`
if [[ "$tmpText" =~ ^\ *[-+]\ *\( ]]
then BOOL=0
else StringStart=$(($StringStart - 1))
fi
done
# Step 2: Determine position of method definition end:
# move forward until you see '{'
StringStop=$StringStart
BOOL=1
while [ $BOOL -eq 1 ]
do
tmpText=`echo "${Source:${StringStop}:1}"`
if [ "$tmpText" = "{" ]
then BOOL=0
else StringStop=$(($StringStop + 1))
fi
done
# Step 3: Use method definition to build super call.
MethodDef=`echo ${Source:${StringStart}:$(($StringStop - $StringStart))}`
SuperCall=`super_call_from_definition "$MethodDef"`
echo -n "$SuperCall"
| true |
87d64e0fd57dbf0e0bebae357cbe93976f266206 | Shell | serpentcross/challenge-revolut.com | /start.sh | UTF-8 | 766 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# Bash Menu Script Example
PS3='Please enter your choice: '
options=("Build" "Run" "Build & Run" "Quit")
select opt in "${options[@]}"
do
case $opt in
"Build")
echo "preparing to build ..."
mvn package
;;
"Run")
echo "Please wait. REST API is being started ..."
java -jar target/revolut-1.0.jar
;;
"Build & Run")
echo "preparing to build ..."
mvn package
echo "Please wait. REST API is being started ..."
java -jar target/revolut-1.0.jar
;;
"Quit")
echo "Thank you for using our service! Bye!"
break
;;
*) echo invalid option;;
esac
done | true |
82f960480d474dd118eeb3a948ac213675eeed9a | Shell | romankhar/JavaServerUsabilityTests | /liberty.sh | UTF-8 | 1,474 | 2.96875 | 3 | [] | no_license | #!/bin/bash
# Author: Roman Kharkovski, IBM, blog: http://whywebsphere.com
SERVER_INSTALL_DIR=$SERVER_HOME/wlp-javaee7-8.5.5.7/wlp
INSTANCE_NAME=defaultServer
INSTANCE_DIR=$SERVER_INSTALL_DIR/usr/servers/$INSTANCE_NAME
STD_OUT_FILE=$INSTANCE_DIR/logs/console.log
STOP_LOG_FILE=$STD_OUT_FILE
PORT=9080
START_COMMAND="$SERVER_INSTALL_DIR/bin/server start"
START_MESSAGE="The server $INSTANCE_NAME is ready to run a smarter planet"
STOP_COMMAND="$SERVER_INSTALL_DIR/bin/server stop"
STOP_MESSAGE="The server $INSTANCE_NAME stopped after"
DEPLOY_DIR=$INSTANCE_DIR/dropins
#UNDEPLOY_APP_MESSAGE="CWWKZ0009I"
###############################################
# Delete logs and other stuff from install dir
###############################################
cleanup_server()
{
echo "--->cleanup_server()"
rm -rf $INSTANCE_DIR
echo "<---cleanup_server()"
}
###############################################
# Create new server instance
###############################################
create_server()
{
echo "--->create_server()"
$SERVER_INSTALL_DIR/bin/server create
cp server_configurations/ibm-liberty/server.xml $INSTANCE_DIR
echo "<---create_server()"
}
###############################################
# Just in case we need to do something
###############################################
liberty_HelloWorld_predeploy()
{
echo "<--->liberty_HelloWorld_predeploy(): nothing to do"
}
liberty_HelloWorld_postdeploy()
{
echo "<--->liberty_HelloWorld_postdeploy(): nothing to do"
} | true |
8e917384376deb8e59a9b66ced1c1721127f3bf9 | Shell | farisalasmary/kaldi-ruby-api | /download_models.sh | UTF-8 | 1,118 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "Current path: $PWD"
if [ ! -f 0013_librispeech_v1_chain.tar.gz ]; then
echo "Downloading 0013_librispeech_v1_lm.tar.gz"
wget http://kaldi-asr.org/models/13/0013_librispeech_v1_chain.tar.gz
fi
if [ ! -f 0013_librispeech_v1_extractor.tar.gz ]; then
echo "Downloading 0013_librispeech_v1_extractor.tar.gz"
wget http://kaldi-asr.org/models/13/0013_librispeech_v1_extractor.tar.gz
fi
if [ ! -f 0013_librispeech_v1_lm.tar.gz ]; then
echo "Downloading 0013_librispeech_v1_lm.tar.gz"
wget http://kaldi-asr.org/models/13/0013_librispeech_v1_lm.tar.gz
fi
tar -xvzf 0013_librispeech_v1_chain.tar.gz
tar -xvzf 0013_librispeech_v1_extractor.tar.gz
tar -xvzf 0013_librispeech_v1_lm.tar.gz
export dir=exp/chain_cleaned/tdnn_1d_sp && \
export graph_dir=$dir/graph_tgsmall && \
utils/mkgraph.sh --self-loop-scale 1.0 --remove-oov data/lang_test_tgsmall $dir $graph_dir && \
echo 'export train_cmd="run.pl --mem 2G"' > cmd.sh && \
echo 'export decode_cmd="run.pl --mem 4G"' >> cmd.sh && \
echo 'export mkgraph_cmd="run.pl --mem 8G"' >> cmd.sh
echo 'Models were successfully installed!!' | true |
373fb99b9038bf124be8332b4b9110d66c5eb56c | Shell | nkzxw/dotfiles | /bootstrap.sh | UTF-8 | 1,191 | 2.875 | 3 | [] | no_license | #! /usr/bin/env bash
ETC=~/.local/etc
BIN=~/.local/bin
mkdir -p $ETC
mkdir -p $BIN
# git clone respository
cd ~/.local/
if [ -d dotfiles ]; then
cd dotfiles
git pull
else
git clone git@github.com:hanxi/dotfiles.git
cd dotfiles
fi
cp -rf etc/* $ETC/
cp -rf bin/* $BIN/
cp bootstrap.sh $BIN/
# setup lemonade
# TODO check sys_type
sys_type=linux_amd64
rm -f $BIN/lemonade
ln -s $BIN/lemonade_${sys_type} $BIN/lemonade
mkdir -p ~/.config
cp $ETC/lemonade.toml ~/.config/lemonade.toml
# source init.sh
sed -i "\:$ETC/init.sh:d" ~/.bashrc
echo ". $ETC/init.sh" >> ~/.bashrc
. ~/.bashrc
# for neovim
mkdir -p ~/.config/nvim
cp $ETC/init.vim ~/.config/nvim/init.vim
# source vimrc.vim
touch ~/.vimrc
sed -i "\:$ETC/vimrc.vim:d" ~/.vimrc
echo "source $ETC/vimrc.vim" >> ~/.vimrc
# source tmux.conf
touch ~/.tmux.conf
sed -i "\:$ETC/tmux.conf:d" ~/.tmux.conf
echo "source $ETC/tmux.conf" >> ~/.tmux.conf
# update git config
git config --global color.status auto
git config --global color.diff auto
git config --global color.branch auto
git config --global color.interactive auto
git config --global core.quotepath false
# install vim plug (may be some bugs)
vim +PlugInstall +qall
| true |
06e3b80b86b9762edbb0e637da47bf9588639397 | Shell | rthallisey/atomic-osp-installer | /docker/cinder-app/cinder-volume/start-scripts/tgt-post-start.sh | UTF-8 | 504 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
TGTD_CONFIG=/etc/tgt/targets.conf
# Put tgtd into "offline" state until all the targets are configured.
# We don't want initiators to (re)connect and fail the connection
# if it's not ready.
echo "Putting tgt in offline state"
tgtadm --op update --mode sys --name State -v offline
# Configure the targets.
echo "Configuring targets"
tgt-admin -e -c $TGTD_CONFIG
# Put tgtd into "ready" state.
echo "Putting tgtd in ready state"
tgtadm --op update --mode sys --name State -v ready
| true |
1dc0db3976f796ea278294a435d8684bb1f608d2 | Shell | soullivaneuh/dotfiles | /.config/rofi/projects_list.sh | UTF-8 | 623 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env bash
list_file=${1}
echo "Fetching GitHub projects..."
hub api --paginate 'user/repos?per_page=100' \
| jq -r '.[].full_name' \
| sed -e 's#^#github.com/#' \
>> "${list_file}"
echo "Fetching GitLab projects..."
LAB_CORE_HOST="https://gitlab.com" LAB_CORE_TOKEN=${GITLAB_TOKEN} lab project list --member --all \
| sed -e 's#^#gitlab.com/#' \
>> "${list_file}"
echo "Fetching git.nexylan.net projects..."
LAB_CORE_HOST="https://git.nexylan.net" LAB_CORE_TOKEN=${GITLAB_NEXYLAN_TOKEN} lab project list --member --all \
| sed -e 's#^#git.nexylan.net/#' \
>> "${list_file}"
sort "${list_file}"
| true |
f234e5b7d35b42cabafa117e73c1e335ef2caf75 | Shell | onezos/gpfs | /gpfsmain.sh | UTF-8 | 5,294 | 3.890625 | 4 | [] | no_license | #!/bin/bash
function print_tips
{
echo "======================================================"
echo "(1) 安装多个gpfs节点"
echo "(2) 增加gpfs节点"
echo "(3) 删除所有gpfs节点"
echo "(4) 钱包地址存入脚本同级文件file.txt, 自动创建gpfs节点"
echo "(Q/q) 退出"
echo "======================================================"
}
function gpfs_rm
{
rm -rf /gpfs
echo "======================================================"
echo "所有gpfs文件已经全部删除!"
echo "======================================================"
}
function gpfs_init
{
lnx_ver=`uname -a|grep 'Ubuntu'`
if [ -n "$lnx_ver" ];then
echo "this is a Ubuntu"
echo $lnx_ver
apt install -y wget
apt install -y unzip
else
echo "this is a centos"
echo $lnx_ver
yum install -y wget
yum install -y unzip
fi
mkdir /gpfs
if [ -f $(pwd)/file.txt ];then
cp $(pwd)/file.txt /gpfs/
fi
cd /gpfs
wget -O /gpfs/linux_amd64.zip https://github.com/gpfs-group/gpfs-mining/releases/download/v0.8.1/linux_amd64.zip
unzip /gpfs/linux_amd64.zip
chmod 766 /gpfs/linux_amd64/gpfs
cp /gpfs/linux_amd64/gpfs /usr/local/bin/
rm -rf /gpfs/linux_amd64 /gpfs/linux_amd64.zip
}
function gpfs_new
{
echo "===gpfs_new==========================================="
echo "请输入要搭建的gpfs节点数量"
read tCnt
echo "开始创建$tCnt个gpfs节点"
echo "======================================================"
iCnt=1
gpfs_main
}
function gpfs_add
{
echo "====gpfs_add=========================================="
echo "请输入要增加的gpfs节点数量"
read tCnt
echo "开始增加$tCnt个gpfs节点"
echo "======================================================"
iCnt=`ls -l /gpfs|grep "gpfs"|wc -l`
tCnt=$(($iCnt + $tCnt))
iCnt=$(($iCnt + 1))
gpfs_main
}
function gpfs_main
{
for ((i=iCnt; i<=tCnt; i ++))
do
echo "===gpfs_main=========================================="
echo "请输入你的0x开头的钱包地址:"
read qb
echo "您的钱包地址是$qb"
mkdir /gpfs/gpfs${i}
cd /gpfs/gpfs${i}
export IPFS_PATH=/gpfs/gpfs${i}
echo "设置环境变量IPFS_PATH=$IPFS_PATH"
gpfs daemon --init >& /gpfs/gpfs${i}/output.log
sleep 3
if (("$i" < 9)); then
sed -i "s/8080/600${i}/g" /gpfs/gpfs${i}/config
sed -i "s/4001/700${i}/g" /gpfs/gpfs${i}/config
sed -i "s/5001/900${i}/g" /gpfs/gpfs${i}/config
else
sed -i "s/8080/60${i}/g" /gpfs/gpfs${i}/config
sed -i "s/4001/40${i}/g" /gpfs/gpfs${i}/config
sed -i "s/5001/50${i}/g" /gpfs/gpfs${i}/config
fi
sleep 1
nohup gpfs daemon --init --miner-address=$qb > /gpfs/output${i}.log 2>&1 &
echo "gpfs${i}已部署完成,日志查看: tail -f /gpfs/output${i}.log"
echo "======================================================"
done
}
function gpfs_file_main
{
tCnt=`awk 'END{print NR}' /gpfs/file.txt`
for ((i=1; i<=tCnt; i ++))
do
echo "===gpfs_file_main====================================="
qb=`cat /gpfs/file.txt | sed -n "${i}p"`
mkdir /gpfs/gpfs${i}
cd /gpfs/gpfs${i}
export IPFS_PATH=/gpfs/gpfs${i}
echo "设置环境变量IPFS_PATH=$IPFS_PATH"
gpfs daemon --init >& /gpfs/gpfs${i}/output.log
sleep 3
if (("$i" < 9)); then
sed -i "s/8080/600${i}/g" /gpfs/gpfs${i}/config
sed -i "s/4001/700${i}/g" /gpfs/gpfs${i}/config
sed -i "s/5001/900${i}/g" /gpfs/gpfs${i}/config
else
sed -i "s/8080/60${i}/g" /gpfs/gpfs${i}/config
sed -i "s/4001/40${i}/g" /gpfs/gpfs${i}/config
sed -i "s/5001/50${i}/g" /gpfs/gpfs${i}/config
fi
sleep 1
nohup gpfs daemon --init --miner-address=$qb > /gpfs/output${i}.log 2>&1 &
echo "gpfs${i}已部署完成,日志查看: tail -f /gpfs/output${i}.log"
echo "======================================================"
done
}
function gpfs_help
{
echo "====gpfs_help========================================"
echo "文件存放在/gpfs/"
echo "查看运行日志:tail -f /gpfs/output1.log, 查看第几个节点就用数字几 "
echo "=========================================== Aven7 ==="
}
while true
do
print_tips
read -p "请输入你的选项(1|2|3|4|q|Q):" choice
case $choice in
1)
gpfs_init
gpfs_new
gpfs_help
exit
;;
2)
gpfs_add
exit
;;
3)
gpfs_rm
;;
4)
gpfs_init
gpfs_file_main
gpfs_help
exit
;;
q|Q)
exit
;;
*)
echo "Error,input only in {1|2|3|4|q|Q}"
;;
esac
done | true |
b876f97ae4bc421b4db638daa9ee84add5bf4c88 | Shell | davekreut/docker-apache-php-5.3 | /build.sh | UTF-8 | 353 | 3.09375 | 3 | [] | no_license | #!/bin/bash
NAME='koder/apache-php-5.3'
VERSION=`grep version package.json | sed -e 's/.*version.*"\(.*\)".*/\1/'`
REPO=${NAME}
echo -e "\033[0;32m*** Building docker image\033[0m"
docker build -t ${REPO}:${VERSION} .
echo -e "\033[0;32m*** Remember to push image to repository:\033[0m"
echo -e "\n\033[0;33mdocker push ${REPO}:${VERSION}\033[0m\n"
| true |
6610949f0f32ade8d6919f30a71cede798ef711c | Shell | ablimit/hadoopgis | /partition/fixedgrid/dopais.sh | UTF-8 | 788 | 3.265625 | 3 | [] | no_license | #! /bin/bash
opath=/mnt/scratch1/aaji/fixedgrid/pais/wbo
ipath=/mnt/scratch1/aaji/algo
for size in 128 256 512 768 1024 2048 4096 8192 16384
do
if [ ! -e ${opath}/grid${size} ] ;
then
mkdir -p ${opath}/grid${size}
xsplit=`expr $((110592/size))`
ysplit=`expr $((57344/size))`
echo "Grid size: ${xsplit}x${ysplit}"
for f in astroII.1 astroII.2 gbm0.1 gbm0.2 gbm1.1 gbm1.2 gbm2.1 gbm2.2 normal.2 normal.3 oligoastroII.1 oligoastroII.2 oligoastroIII.1 oligoastroIII.2 oligoII.1 oligoII.2 oligoIII.1 oligoIII.2
do
echo "FixedGrid for $f: "
for algo in 1 2
do
./mapper -p $f -w 0 -s 0 -n 57344 -e 110592 -x ${xsplit} -y ${ysplit} -d pais < ${ipath}${algo}/${f}.markup.ablet.${algo} > ${opath}/grid${size}/${f}.markup.${algo}
done
done
fi
done
| true |
6b56ff391d1bffe55cd22efe00a5c01a83dd4052 | Shell | jeremygold/config-gold | /.zshrc | UTF-8 | 2,517 | 2.703125 | 3 | [] | no_license | # Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
ZSH_THEME="clean"
# alias slic3r="/home/jeremy/3dPrinting/Slic3r/bin/slic3r"
# alias netfabb="/home/jeremy/3dPrinting/netfabb-basic/netfabb"
alias tmux="tmux -2 a"
alias gpoh="git push origin HEAD"
alias gpfoh="git push --force origin HEAD"
alias gfl="git fetch && git l"
alias c="clear"
alias logout="killall StartDwm"
plugins=(git)
source $ZSH/oh-my-zsh.sh
# Customize to your needs...
# Linux version
# export PATH=$HOME/MyDocs/bin:$PATH
# Mac version
export PATH=$PATH:$HOME/MyDocs/bin:$HOME/Library/Python/2.7/bin
# Set up vcs_info Ref http://arjanvandergaag.nl/blog/customize-zsh-prompt-with-vcs-info.html
autoload -Uz vcs_info
# Style configuration for vcs_info. Ref https://github.com/jleclanche/dotfiles/blob/master/.zshrc
zstyle ":vcs_info:*" enable git
zstyle ":vcs_info:(git*):*" get-revision true
zstyle ":vcs_info:(git*):*" check-for-changes true
local _branch="%{$fg[green]%}%b%{$reset_color%}%c%u%m"
local _repo="%{$fg[green]%}%r %{$fg[yellow]%}%{$reset_color%}"
local _revision="%{$fg[yellow]%}%.7i%{$reset_color%}"
local _action="%{$fg[red]%}%a%{$reset_color%}"
zstyle ":vcs_info:*" stagedstr "%{$fg[yellow]%}✓%{$reset_color%}"
zstyle ":vcs_info:*" unstagedstr "%{$fg[red]%}✗%{$reset_color%}"
zstyle ":vcs_info:git*" formats "($_branch)"
zstyle ":vcs_info:git*" actionformats "($_branch:$_action)"
zstyle ':vcs_info:git*+set-message:*' hooks git-stash
precmd() {
vcs_info
}
# Configure prompts
PROMPT='%{$fg[grey]%}%B%n@%m%b%{$reset_color%}:%{$fg[blue]%}%B%c/%b%{$reset_color%} ${vcs_info_msg_0_} %(!.#.$) '
RPROMPT='%t'
# Vim setup - ref http://www.drbunsen.org/the-text-triumvirate/
# Use vim keybindings
export EDITOR="vim"
bindkey -v
# Backup function. Copied from http://arjanvandergaag.nl/blog/customize-zsh-prompt-with-vcs-info.html
# https://github.com/grml/grml-etc-core/blob/master/etc/zsh/zshrc
bk() {
cp -a "$1" "${1}_$(date --iso-8601=seconds)"
}
# vi style incremental search
bindkey '^R' history-incremental-search-backward
bindkey '^S' history-incremental-search-forward
bindkey '^P' history-search-backward
bindkey '^N' history-search-forward
export ANDROID_HOME=~/Library/Android/sdk
export PATH=$PATH:$ANDROID_HOME/platform-tools
export local_ip=`ip route get 1 | awk '{print $NF;exit}'`
# Initialize ROS
# source /opt/ros/kinetic/setup.zsh
# export ROS_IP=$local_ip
# export ROS_MASTER_URI=http://$local_ip:11311
/usr/bin/keychain $HOME/.ssh/id_rsa
source $HOME/.keychain/`hostname`-sh
| true |
d5d8932f52645588fc04c0aafdbccfdccefcbbbc | Shell | cescmentationfolch/Codeforces | /Contest/compile.sh | UTF-8 | 282 | 3 | 3 | [] | no_license | num=${PWD##*/}
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m'
if g++ ${1}/${1}${num}.cc -DLOCAL -O2 -o ${1}/${1}.out 2> ${1}/Debug.txt; then
echo "Problem ${1}${GREEN} compiled succesfully${NC}";
exit 0;
else echo "Problem ${1}${RED} didn't compile${NC}";
exit 1;
fi
| true |
8330e465e85e7b48ae778af1bf6a6d243728eceb | Shell | afresh1/flashrd | /flashrd | UTF-8 | 5,771 | 3.875 | 4 | [] | no_license | #!/bin/ksh
#
# flashrd initializer with default vnd image destination (or disk destination on demand)
#
# Chris Cappuccio <chris@nmedia.net>
date=`date +%Y%m%d`
arch=`uname -m`
vers=1.3
typeset -x device rootdevice blocks rdroot dest vnddirs vndsize
typeset -x tardirs mfsdirs cylinders trackscylinder sectorstrack
typeset -x bytessec vnd distloc tmpmnt TMPDIR elfrdsetrootdir
device=vnd3 # least likely to conflict ?
rootdevice=vnd2 # used to mount flash vnd for writing of stuff inside
blocks=4600 # blocks to reserve for kernel ramdisk
rdroot=rd.$arch-$date # ramdisk root fs image
dest=flashimg.$arch-$date # final product
# See ./etc/rc.flashrd.sub to define tardirs, vnddirs, mfsdirs
#
. ./etc/rc.flashrd.sub
set -A part a d e f g h i j k l m n o p
# C/H/S for default "1GB" image, used by mkboot
#
# This is specified to get 63 sectors/track, therefore boot
# blocks work properly on most "disks" without resizing image!
#
# Completely ignored if -disk is specified
cylinders=968
trackscylinder=32
sectorstrack=63
#
# Normally 512 (but, who knows, times they are a changin'!)
bytessec=512
# Set to true when the device is an image file false when a device is used.
vnd=true
. ./flashrd.sub
vncfgroot() {
[[ $vnd == true ]] && c 0 vnconfig $1 $2
}
vnuncfgroot() {
[[ $vnd == true ]] && c 0 vnconfig -u $1
}
usage() {
cat <<EOF
Usage: flashrd [options] <openbsd base>
flashrd will create flash image file in absence of a disk name.
Write direct to disk:
-d "dev" Simple disk name (such as wd3 or sd2)
Write to disk image:
-c n Number of cylinder (default: $cylinders)
-h n Number of heads (tracks/cylinder) (default: $trackscylinder)
-s n Number of sectors/track (default $sectorstrack)
Other options:
-e "dir" directory where elfrdsetroot.c is located
EOF
}
while :
do
case $1 in
-\?) usage; exit 0;;
-d | -disk) t2 "$2"; rootdevice="$2"; vnd=false; shift 2;;
-c) t2 "$2"; cylinders="$2"; shift 2;;
-h) t2 "$2"; trackscylinder="$2"; shift 2;;
-s) t2 "$2"; sectorstrack="$2"; shift 2;;
-e) t2 "$2"; elfrdsetrootdir="$2"; shift 2;;
--) shift; break;;
-*) usage; exit 1;;
*) break;;
esac
done
if [ $# -ne 1 ]
then
usage
exit 1
else
distloc=$1
fi
echo "flashrd $vers chris@nmedia.net"
echo
if [ ! -d "$distloc" ]; then
echo % $distloc is not a directory
exit 1
else
if [ ! -f "$distloc"/etc/services ]; then
echo % $distloc does not contain an unpacked etcXX.tgz file
exit 1
fi
if [ ! -f "$distloc"/bin/ksh ]; then
echo % $distloc does not contain an unpacked baseXX.tgz file
exit 1
fi
if [ ! -u "$distloc"/usr/bin/passwd ]; then
echo "% $distloc was not unpacked with tar p flag (to preserve permissions),"
echo "or it was not unpacked as root (to allow set ownership)"
exit 1
fi
fi
if [[ -n $elfrdsetrootdir && ! -d $elfrdsetrootdir ]]; then
echo % elfrdsetrootdir $elfrdsetrootdir not found
fi
if [ `id -u` != 0 ]; then
echo Sorry, mount, vnconfig, and friends require root privileges
exit 1
fi
###
#
# fall back
4() {
umount $tmpmntvnd
}
3() {
vnconfig -u $device
}
2() {
umount $tmpmnt
}
1() {
vnuncfgroot $rootdevice
}
0() {
echo % TMPDIR: $TMPDIR
exit 1
}
###
#
# (Re-)Base TMPDIR to a subdiretory of either the current
# TMPDIR value, or to /tmp
TMPDIR=$(c 0 mktemp -t -d flashrd.XXXXXX)
###
#
# generate kernel ramdisk
c 0 ./mkrdroot
# $rdroot should now have a ramdisk image
###
#
# generate boot image
c 0 ./mkboot
# $dest should now have a boot image
tmpmnt=$TMPDIR/tmpmnt
c 0 mkdir $tmpmnt
vncfgroot $rootdevice $dest
c 1 mount /dev/"$rootdevice"a $tmpmnt
###
#
# generate kernel
c 2 ./mkkern
###
#
# generate vnd, tar files
c 2 ./mkdist
###
#
# Build fstab
tmpfstab=$TMPDIR/fstab
cat <<-EOF >$tmpfstab
/dev/rd0a / ffs rw 1 0
EOF
x=0
for i in $vnddirs; do
case $i {
sbin)
opts=noatime,nodev
;;
usr)
opts=noatime,nodev
;;
*)
opts=noatime,nodev,nosuid
;;
}
echo "/dev/vnd0${part[$x]} /$i ffs rw,$opts 1 0" >> $tmpfstab
((x++))
done
x=0
if [ ! -z "$tardirs" ]; then
for i in $tardirs; do
echo "swap /$i mfs rw,nodev,nosuid,-s${tarsize[$x]} 0 0" >> $tmpfstab
((x++))
done
fi
if [ $x -ne ${#tarsize[*]} ]; then
echo "% \$tardirs count ($x) different than tarsize array count ${#tarsize[*]}, aborting"
2; 1; 0;
fi
x=0
if [ ! -z "$mfsdirs" ]; then
for i in $mfsdirs; do
echo
echo "swap /$i mfs rw,nodev,nosuid,-s${mfssize[$x]} 0 0" >> $tmpfstab
((x++))
done
fi
if [ $x -ne ${#mfssize[*]} ]; then
echo "% \$mfsdirs count ($x) different than mfssize array count ${#mfssize[*]}, aborting"
2; 1; 0;
fi
###
#
# Copy in fstab, etc/rc.conf.local, bin/ro bin/rw to vnd
tmpmntvnd=$TMPDIR/tmpmntvnd # openbsd.vnd image mount point
c 2 mkdir $tmpmntvnd
c 2 vnconfig $device $tmpmnt/openbsd.vnd
###
#
# map $vnddirs etc to a partition label
x=0
for i in $vnddirs; do
if [ $i == etc ]; then
etcpart=${part[$x]}
fi
if [ $i == bin ]; then
binpart=${part[$x]}
fi
((x++))
done
if [ -z "$etcpart" -o -z "$binpart" ]; then
echo "% missing etc and/or bin in \$vnddirs ($vnddirs) aborting"
3; 2; 1; 0;
fi
###
#
# mount, copy, umount etc files
c 3 mount /dev/$device$etcpart $tmpmntvnd
c 4 cp etc/rc.local etc/rc.conf.local etc/rc.shutdown etc/rc.flashrd.conf etc/rc.flashrd.local etc/rc.flashrd.shutdown etc/rc.flashrd.sub $tmpmntvnd/
c 4 mv $tmpfstab $tmpmntvnd/fstab
c 4 "echo $vers > $tmpmntvnd/.flashrd_version"
umountwait 3 $tmpmntvnd
###
#
# mount, copy, umount bin files
c 3 mount /dev/$device$binpart $tmpmntvnd
c 4 cp bin/ro bin/rw $tmpmntvnd/
umountwait 3 $tmpmntvnd
###
#
# done with the main disk vnd
c 2 vnconfig -u $device
umountwait 1 $tmpmnt
###
#
# done with flash img vnd
vnuncfgroot $rootdevice
rm -r $TMPDIR
echo -n Done
[[ $vnd == true ]] && echo -n " (image $dest)"
echo
| true |
8887cfdb60d0ee0b1a255aba76b6e05fb5b83999 | Shell | mnl-jmnz/pytest-loop | /pytest-loop.sh | UTF-8 | 972 | 3.671875 | 4 | [] | no_license | # get newest file
# $tm will contain newest file time stamp
# $name will contain newest file name
function get_newest_file() {
find=$(find . -type f -print0 | xargs -0 stat -f "%m %N" | sort -rn | head -1)
tm=$(echo "$find" | cut -d' ' -f1)
name=$(echo "$find" | cut -d' ' -f2)
}
last_tm=0
LIGHT_GRAY='\033[0;37m'
NO_COLOR='\033[0m' # No Color
# start endless loop
while true; do
# get newest file
get_newest_file
if [ $last_tm -lt $tm ];
# some file has been modifed
then
if [ $last_tm != 0 ];
# it's not the first time
then
# prompt newest file name
echo "${LIGHT_GRAY}File \""$name"\" modified${NO_COLOR}"
if [ "$name" == "$0" ];
# modified file is this script
then
# restart script
echo "${LIGHT_GRAY}Re-run script!${NO_COLOR}"
./$(basename $0) && exit
fi
fi
# update last file timestamp
last_tm=$tm
python manage.py test
# no file modified
else
# while for one second, then run loop again
sleep 1
fi
done
| true |
c86b2ca237b675fec5c66adcd4641c56ea0c6d50 | Shell | aarontran/g309.2-0.6 | /xmm/bin/check_xmmregions | UTF-8 | 1,169 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# As currently coded, MUST run from XMMPATH
# One-off check script
if [[ "$#" -ne 1 ]]; then
echo "ERROR: one argument (region label) required"
exit 1
fi
RSTEM=$1
if [[ "$SAS_OBSID" == "0087940201" ]]; then
exposures="mos1S001 mos2S002 pnS003"
elif [[ "$SAS_OBSID" == "0551000201" ]]; then
exposures="mos1S001 mos2S002"
else
echo "Got SAS_OBSID null, please re-run sasinit {obsid}"
exit 253
fi
for exp in $exposures; do
regfile="$XMM_PATH/regs/${SAS_OBSID}/reg_${exp}_${RSTEM}.txt"
out="regcheck_${SAS_OBSID}_${exp}_${RSTEM}.fits" # Dump to cwd
if [[ ! -e $regfile ]]; then
echo "No region file $regfile found, did you run make_xmmregions?"
exit 254
fi
if [[ -e $out ]]; then
echo "Output file ${out} exists, please remove!"
exit 255
fi
regexpr="$(cat ${regfile})"
# Output images are kinda funky -- not sure why
# So I just make event lists, simpler.
evselect table="${SAS_REPRO}/${exp}-clean.fits:EVENTS" \
filteredset="${out}" filtertype=expression expression="(PATTERN<=12)&&(FLAG == 0)$regexpr" \
withfilteredset=yes keepfilteroutput=yes updateexposure=yes filterexposure=yes
done
| true |
f8b9da40eb1ebdfccd42b1d3ba0699e4cdd90148 | Shell | Easwer/shell | /file_manipulation/file_manipulation.sh | UTF-8 | 1,795 | 4.0625 | 4 | [] | no_license | #!/bin/bash
########################################################################
# Script Name : File manipulation
# Description : File to read an input from txt and output as CSV.
# Author : Easwer AP
########################################################################
logfile=/Drive1/playground/shell/shell.log
input=/Drive1/playground/shell/input.txt
output=/Drive1/playground/shell/output.csv
inputTmp=/Drive1/playground/shell/input.txt.bak
outputTmp=/Drive1/playground/shell/output.csv.bak
echo $(date)": Starting Script..." >> $logfile
if [[ -r $input ]]; then
echo $(date)": File $input exists..." >> $logfile
echo $(date)": Reading file $input..." >> $logfile
echo $(date)": Creating backup file..." >> $logfile
# Taking backup for input file
cp $input $inputTmp
# Removing header (first line) and footer (last line) from input file.
sed -i '1d;$d' $inputTmp
# Checks output file already exists.
if [[ -e $output ]]; then
echo $(date)": File $output exists..." >> $logfile
# Taking backup for output file
cp $output $outputTmp
# Removing existing output file.
rm -rf $output
echo $(date)": Deleting file $output..." >> $logfile
# Creating new empty output file.
touch $output
fi
echo $(date)": Creating file $output..." >> $logfile
echo $(date)": Processing data..." >> $logfile
echo "organizationid, username, firstname, lastname, mobilephone" >> $output
# Reading line by line from input file.
while IFS= read -r line
do
# Spliting contents of line based on delimiter "|"
IFS='|' read -ra contents <<<"$line"
echo "350,${contents[1]},${contents[5]},${contents[7]},${contents[38]}" >> $output
done < "$inputTmp"
else
echo $(date)": File $input does not exists..." >> $logfile
fi
echo $(date)": Ending Script..." >> $logfile
| true |
ad9f8c62256b8c07bac09710fdf2bab80698709f | Shell | roya-lb/lightbits | /collect.sh | UTF-8 | 481 | 2.75 | 3 | [] | no_license | #!/bin/bash
echo -n "Name: "
uname -n
echo -n "CPUs: "
grep "physical id" /proc/cpuinfo | sort | uniq | wc -l
echo -n "Cores: "
grep "^processor" /proc/cpuinfo | wc -l
echo -n "Network: "
lspci | egrep -i --color 'network|ethernet|wireless|wi-fi' | awk '{$1= ""; print $0}'
echo -n "Open Ports: "
lsof -i -P -n | grep LISTE | awk '{print $9}' | sort | uniq | awk -F ":" '{print $2}' | sort -n | tr '\n' ', '
echo
echo -n "Disk(s): "
fdisk -l | grep -i /dev/sd? | grep -i "Disk"
| true |
b1ccc08efd0ab653399827f1b331f46bded734d7 | Shell | yumaatt/dotfiles | /setup.sh | UTF-8 | 1,339 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
cd $(dirname $0)
for dotfile in .?*; do
case $dotfile in
*.elc)
continue;;
..)
continue;;
.git)
continue;;
.gitconfig.local)
continue;;
.gitignore_gen)
continue;;
.gitfiles)
continue;;
.gitmodules)
continue;;
.ssh)
continue;;
.vim)
continue;;
.zsh)
continue;;
.bashrc)
continue;;
*)
#ln -Fis "${PWD}/${dotfile}" $HOME
ln -Ffs "${HOME}/.dotfiles/${dotfile}" $HOME
;;
esac
done
if [ ! -f ~/.gitconfig.local ]; then
cp .gitconfig.local ~/
echo 'cp .gitconfig.local ~/'
fi
#if [ ! -L ~/bin ]; then
# ln -Ffs "${PWD}/bin" $HOME
#fi
if [ ! -L ~/.oh-my-zsh ]; then
#ln -Fis "${PWD}/modules/oh-my-zsh" "$HOME/.oh-my-zsh"
ln -Ffs "${HOME}/.dotfiles/modules/oh-my-zsh" "$HOME/.oh-my-zsh"
fi
if [ ! -L ~/.dotfiles ]; then
ln -Ffs ${PWD} "$HOME/.dotfiles"
fi
if [ ! -L ~/.dotfiles/.vim/bundle/neobundle.vim ]; then
ln -Ffs "$HOME/.dotfiles/modules/neobundle.vim" "$HOME/.dotfiles/.vim/bundle/neobundle.vim"
fi
git submodule init
git submodule update
#vim -c ':NeoBundleInstall' -c ':q!' -c ':q!'
echo 'please edit ~/.gitconfig.local'
| true |
26914b186913469cdc877a394da2e91c69951f48 | Shell | synergiance/dotfiles | /scripts/ytdlplaylistmus | UTF-8 | 1,054 | 3.40625 | 3 | [] | no_license | #!/bin/bash
# Downloads a youtube playlist and keeps it in sync
# Please use a list URL rather than a video from said playlist
# Options are configurable
#
# Author: Synergiance
url="https://www.youtube.com/playlist?list=PL7tPIoWoeE1mBgi-KUwbtkYwLRFG5inDS"
dir=$HOME/Music/yt
#cachedir=$HOME/.ytdl/cache
archive="$HOME/.ytdl/archive01.txt"
opts="--yes-playlist -f mp3/bestaudio --download-archive $archive"
bin=$(which youtube-dl)
ext=webm
opt="-acodec libmp3lame -ab 192k -hide_banner -n"
next=mp3
if [[ $? -eq 1 ]]
then
echo "Could not find youtube-dl"
exit 1
fi
#if [[ ! -d $cachedir ]]
#then
# mkdir -p $cachedir
#fi
if [[ ! -d $dir ]]
then
mkdir -p $dir
fi
# cd $cachedir
cd $dir
echo "$bin $opts \"$url\""
$bin $opts "$url"
ffbin=$(which ffmpeg)
id3bin=$(which id3tag)
#for f in $cachedir/*
#do
# stuff
#done
for f in $dir/*.$ext
do
echo "Processing $f..."
nf=${f%.*}
$ffbin -i "$f" $opt "$nf.$next"
$id3bin -aNightcore -AYouTube -gNightcore "$nf.$next"
$id3bin --song="$nf" "$nf.$next"
done
rm *.$ext
/usr/bin/mpc update
| true |
2ed7203da315aedc37dc6dbbf8fae2e22c2fdade | Shell | PsymonLi/sw | /tools/docker-files/install/initscript | UTF-8 | 3,635 | 3.203125 | 3 | [] | no_license | #!/bin/sh
# /usr/pensando/bin of host (or VM) is mapped to /home/usr/pensando/bin inside the base container.
# The binaries (which need to be run on the host) are packaged in the base container.
# Steps below copy the binaries to be run on the host.
# Also any config files are copied here too.
# Please note that binaries must be Static binaries (hence we dont have to worry about presence of dynamic libraries on host)
mv -f /target/usr/bin/systemd-docker /host/usr/pensando/bin/systemd-docker
mv -f /target/usr/bin/kubelet /host/usr/pensando/bin/kubelet
mv -f /target/usr/bin/kubectl /host/usr/pensando/bin/kubectl
mv -f /target/usr/lib/systemd/system/* /host/usr/lib/systemd/system/
# Each component gets an individual subdir under /etc/pensando if needed.
# The subdir name for each component must match the definitions in venice/globals/modules.go.
# Subdir is created here so that it already exists when the container start and it needs to be mounted.
# No component should mount /etc/pensando except CMD.
# /etc/pensando/shared/common is shared across all components.
mkdir -p /host/etc/pensando/configs/shared/common
mkdir -p /host/etc/pensando/configs/pen-apiserver
mkdir -p /host/etc/pensando/configs/pen-cmd
mkdir -p /host/etc/pensando/configs/pen-elastic
mkdir -p /host/etc/pensando/configs/pen-elastic/elastic-discovery
mkdir -p /host/etc/pensando/configs/pen-filebeat
mkdir -p /host/etc/pensando/configs/pen-etcd
mkdir -p /host/etc/pensando/configs/kubernetes
mkdir -p /host/etc/pensando/configs/kubernetes/pen-kube-apiserver
mkdir -p /host/etc/pensando/configs/kubernetes/pen-kube-scheduler
mkdir -p /host/etc/pensando/configs/kubernetes/pen-kube-controller-manager
mkdir -p /host/etc/pensando/configs/kubernetes/pen-kubelet
mkdir -p /host/etc/pensando/configs/pen-ntp
mkdir -p /host/etc/pensando/configs/pen-elastic-curator
mkdir -p /host/var/log/pensando
mkdir -p /host/var/log/pensando/elastic
mkdir -p /host/var/log/pensando/filebeat
mkdir -p /host/var/lib/pensando
# We need to differentiate build time configs from the configs that get generated runt-time based on user-config
# as of now - keep a copy of the build time configs at /host/etc/pensando/configs.
# even during node cleanup we just copy from this to the expected path
cp -f /target/etc/pensando/shared/common/venice.json /host/etc/pensando/configs/shared/common/venice.json
cp -f /target/etc/pensando/shared/common/venice.conf /host/etc/pensando/configs/shared/common/venice.conf
cp -f /target/usr/bin/gettechsupport.sh /host/usr/pensando/bin/gettechsupport.sh
cp -f /target/usr/bin/penctl.linux /host/usr/pensando/bin/penctl
cp -f /target/usr/bin/psmctl.linux /host/usr/pensando/bin/psmctl
cp -f /target/usr/bin/INSTALL.sh /host/usr/pensando/bin/INSTALL.sh
cp -rf /target/var/lib/alerts /host/var/lib/pensando
#move to directories that are expected by the various services
cp -rf /host/etc/pensando/configs/* /host/etc/pensando/
# Directories for keys and certificates
# Each component gets an individual subdir under /var/lib/pensando/pki if needed.
# The subdir name for each component must match the definitions in venice/globals/modules.go.
# Subdir is created here so that it already exists when the container start and it needs to be mounted.
# No component should mount /var/lib/pensando/pki except CMD.
mkdir -p /host/var/lib/pensando/pki/kubernetes/pen-kube-apiserver
mkdir -p /host/var/lib/pensando/pki/kubernetes/pen-kube-scheduler
mkdir -p /host/var/lib/pensando/pki/kubernetes/pen-kube-controller-manager
mkdir -p /host/var/lib/pensando/pki/kubernetes/service-accounts
mkdir -p /host/var/lib/pensando/pki/kubernetes/pen-kubelet
| true |
5241c73324797b759234362d8a07c143b7eda575 | Shell | cdbeland/moss | /run_moss_parallel.sh | UTF-8 | 1,457 | 3.375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/bash
set -e
export RUN_NAME=run-`git log | head -c 14 | perl -pe "s/commit //"`+`date "+%Y-%m-%dT%T"`
mkdir $RUN_NAME
cd $RUN_NAME
# --- PERFORMANCE ---
# If SSD becomes a bottleneck:
# https://wiki.archlinux.org/title/Solid_state_drive
# TRIM is enabled on weekly timer by default:
# systemctl list-timers
# Other optimizations not yet investigated (discard is for TRIM):
# https://askubuntu.com/questions/78971/best-etc-fstab-settings-for-boosting-ssd-hdd-performance
# https://askubuntu.com/questions/1400/how-do-i-optimize-the-os-for-ssds
# --- HTML ENTITIES ---
# Run time for this segment: ~4 h 10 min (8-core parallel)
# Uses 95%+ of all CPUs and it's eagerly awaited every dump, so run solo before everything else
echo "Beginning HTML entity check"
echo `date`
# Keep tmp-entities because it's sometimes used for unlimited runs
../venv/bin/python3 ../moss_entity_check.py > tmp-entities
cat tmp-entities | ../venv/bin/python3 ../summarizer.py --find-all > post-entities.txt
# --- PARALLELIZED REPORTS ---
# Run multiple main threads because even though most calculations are
# parallelized to use all cores, some are not, and sometimes the
# parent thread becomes a bottleneck, and CPUs are underused. Only run
# 2 tasks at a time to avoid CPU bottleneck that slows down the first
# reports to finish (which are usually urgently needed).
../run_moss_parallel1.sh >& thread1.log &
../run_moss_parallel2.sh >& thread2.log &
| true |
a8f8889e7e0041609c932062328bd72d1cdb8ce1 | Shell | SavinayDharmappa/probcomp-stack | /url-cards.sh | UTF-8 | 1,264 | 3.25 | 3 | [] | no_license | #!/bin/sh
# Leave in the file url-cards.ps a printable document consisting of
# one url per page. This is useful for printing out (single-sided!)
# and handing out at a workshop that involves giving the participants
# EC2 instances. The pages can be folded over to write people's
# passwords inside.
set -eu
echo "" > url-cards.txt
(for i in `seq 1 4 400`
do cat <<EOF >> url-cards.txt
MIT Probabilistic Computing Project
O'Reilly Artificial Intelligence Conference
New York, June 27, 2017
https://oreilly-$i.stack.probcomp.net
Password: `cat oreilly-passwords/$i.passwd`
MIT Probabilistic Computing Project
O'Reilly Artificial Intelligence Conference
New York, June 27, 2017
https://oreilly-$((i+1)).stack.probcomp.net
Password: `cat oreilly-passwords/$((i+1)).passwd`
MIT Probabilistic Computing Project
O'Reilly Artificial Intelligence Conference
New York, June 27, 2017
https://oreilly-$((i+2)).stack.probcomp.net
Password: `cat oreilly-passwords/$((i+2)).passwd`
MIT Probabilistic Computing Project
O'Reilly Artificial Intelligence Conference
New York, June 27, 2017
https://oreilly-$((i+3)).stack.probcomp.net
Password: `cat oreilly-passwords/$((i+3)).passwd`
EOF
done)
enscript -fCourier-Bold16 url-cards.txt -o url-cards.ps -b ''
| true |
f2ddd623c969e079533adc04061ff9f65ecf4685 | Shell | nomeata/ghc-devscripts | /run-nofib.sh | UTF-8 | 496 | 3.359375 | 3 | [] | no_license | test -d nofib || { echo "No nofib found" ; exit 1 ; }
set -e
if [ "$1" = "slow" ]
then
mode=slow
variant="-slow"
shift
else
mode=norm
variant=""
fi
git fetch origin
name="$(date --iso=minutes)-$(cd ..; git rev-parse HEAD|cut -c-8)"
if [ "$1" != noclean ]
then
make distclean
perl boot
./configure
fi
/usr/bin/time -o buildtime-$name make -j8
cd nofib/
make clean
make boot
(cd ..; git log origin/master..HEAD; cd nofib; make mode=$mode) 2>&1 |
tee ../nofib-$name$variant.log
| true |
4a00456d4eeb05176412ea4cbc6d126a05683167 | Shell | chenyueze/review | /3-项目/2-分布式集群监控/2-Mem.sh | UTF-8 | 455 | 3.5625 | 4 | [] | no_license | #!/bin/bash
if [[ $# -lt 1 ]]; then
echo "Usage:$0 DyAver"
fi
Time=`date +%Y-%m-%d_%H:%M:%S`
DyAver=$1
if [[ ${DyAver}x == x ]]; then
exit 1
fi
MemValues=(`free -m | head -n 2 | tail -n 1 | awk '{printf("%s %s", $2, $3)}'`)
MemUsedPrec=`echo "scale=1; ${MemValues[1]}*100/${MemValues[0]}" | bc`
NowAver=`echo "scale=1; 0.7*${MemUsedPrec}+0.3*${DyAver}" | bc`
echo "${Time} ${MemValues[0]}M ${MemValues[1]}M ${MemUsedPrec}% ${NowAver}%"
| true |
3167c3b762da460dbee1781268935783ee944c36 | Shell | aditp928/cyber-secruitiy- | /4-OldFiles/01-Intro-Cybersecurity/2/Activities/07-Ins_Exec/demo.sh | UTF-8 | 797 | 3.65625 | 4 | [] | no_license | # Find all text files that begin with the word flag
find . -type f -iname flag*
# Find and copy all flag files into the present directory
# `-exec` signals that we are executing a command
# `cp {}` signifies that we want to copy the contents we found
# `.` signifies that we want to copy these files to the current folder
# `\;` signifies that we want to end our command
find . -type f -iname flag* -exec cp {} . \;
# Find and copy all pngs and jpgs into the Pictures folder of the present directory.
find . -type f \( -iname '*png' -o -iname '*jpg' \) -exec cp {} ./Pictures \;
# Find and copy all files that are not pngs into the NoPNGsAllowed folder.
# `!` signifies that we are looking for all files except ones that match our term
find . -type f ! -iname *.png -exec cp {} NoPNGsAllowed \; | true |
fcec810e01f6d1fe196d6d82ec75e91905ed469d | Shell | rustyeddy/sandbox | /src/vagrants/vonos/provision/provision.sh | UTF-8 | 971 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
start_seconds="$(date +%s)"
echo "Provisioning ONOS workstation"
VHOME='/home/vagrant'
sudo apt-get update
sudo apt-get -y -f install
# Install Openssh server
sudo apt-get install -y openssh-server
sudo apt-get install -y git git-core git-review
# First install java
echo "Installing Java"
sudo apt-get install software-properties-common -y
sudo add-apt-repository ppa:webupd8team/java -y
sudo apt-get update
sudo echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | `sudo /usr/bin/debconf-set-selections`
sudo apt-get install oracle-java8-installer
sudo apt-get install oracle-java8-set-default -y
# Install some other important things
sudo apt-get install wireshark -y
# Run the following script as vagrant
sudo -EH -u vagrant -- /bin/sh /vagrant/provision/user.sh
# Chown of vagrant home to vagrant
chown -R vagrant.vagrant /home/vagrant
#
# Other things to get:
# git review
# iperf
# mtraf
# wireshark
| true |
5dbdaed626fd61af6cd4709ffdff40d029180b5a | Shell | hickmanz/jukebox_socket-server | /gce/startup-script.sh | UTF-8 | 1,459 | 3.1875 | 3 | [] | no_license | set -v
# Talk to the metadata server to get the project id
PROJECTID=$(curl -s "http://metadata.google.internal/computeMetadata/v1/project/project-id" -H "Metadata-Flavor: Google")
# Install dependencies from apt
apt-get update
apt-get install -yq ca-certificates git nodejs build-essential supervisor
# Install nodejs
mkdir /opt/nodejs
curl https://nodejs.org/dist/v8.11.4/node-v8.11.4-linux-x64.tar.gz | tar xvzf - -C /opt/nodejs --strip-components=1
ln -s /opt/nodejs/bin/node /usr/bin/node
ln -s /opt/nodejs/bin/npm /usr/bin/npm
# Get the application source code from the Google Cloud Repository.
# git requires $HOME and it's not set during the startup script.
export HOME=/root
git config --global credential.helper gcloud.sh
git clone https://github.com/hickmanz/jukebox_socket-server /opt/app
# Install app dependencies
cd /opt/app/
git pull
npm install
# Create a nodeapp user. The application will run as this user.
useradd -m -d /home/nodeapp nodeapp
chown -R nodeapp:nodeapp /opt/app
# Configure supervisor to run the node app.
cat >/etc/supervisor/conf.d/node-app.conf << EOF
[program:nodeapp]
directory=/opt/app/
command=npm start
autostart=true
autorestart=true
user=nodeapp
environment=HOME="/home/nodeapp",USER="nodeapp",NODE_ENV="production"
stdout_logfile=syslog
stderr_logfile=syslog
EOF
supervisorctl reread
supervisorctl update
# Application should now be running under supervisor | true |
b3f6042051191ee4492660dd4c44011137deaa67 | Shell | D-TACQ/acq400_sysconfig | /acq1014/master/acq1014_independent | UTF-8 | 638 | 2.515625 | 3 | [] | no_license | #!/bin/sh
PRE=${PRE:-0}
POST=${POST:-25000000}
EDGE=${1:-rising}
postlen=${1:-25000000}
HN=$(hostname)
PN=$(caget -t ${HN}:MIRROR_HOST)
if [ $PRE -eq 0 ]; then
MODE=post
else
MODE=prepost
fi
caput $HN:MIRROR_EN 1
echo call acq1014_select_trg_src ext2 $MODE $EDGE
acq1014_select_trg_src ext2 $MODE $EDGE
#caput $HN:MIRROR_EN 0
caput $HN:MODE:TRANSIENT:PRE $PRE
caput $HN:MODE:TRANSIENT:POST $POST
caput $HN:MODE:TRANSIENT 1
caput $PN:MODE:TRANSIENT:PRE $POST
caput $PN:MODE:TRANSIENT:POST $PRE
caput $PN:MODE:TRANSIENT 1
#echo paste the next two lines to run
caput $PN:MODE:TRANSIENT:SET_ARM 1
caput $HN:MODE:TRANSIENT:SET_ARM 1
| true |
5b2c0de3a9043fc9811be10f1243ce68ad1c98be | Shell | iampuma/drop | /scripts/_start.sh | UTF-8 | 1,234 | 2.609375 | 3 | [] | no_license | # Replace the default vhost with our own.
sudo rm /etc/nginx/sites-available/default
sudo ln -s /usr/share/nginx/init/default /etc/nginx/sites-available/default
# Replace the default php.ini with our own.
sudo rm /etc/php/5.6/fpm/php.ini
sudo ln -s /usr/share/nginx/init/php.ini /etc/php/5.6/fpm/php.ini
# Set Xdebug settings for Vagrant.
sudo rm /etc/php/5.6/mods-available/xdebug.ini
sudo ln -s /usr/share/nginx/init/xdebug.ini /etc/php/5.6/mods-available/xdebug.ini
# Set XHProf settings for Vagrant.
sudo rm /etc/php/5.6/mods-available/xhprof.ini
sudo ln -s /usr/share/nginx/init/xhprof.ini /etc/php/5.6/mods-available/xhprof.ini
# Set drush aliases.
mkdir /home/vagrant/.drush
sudo ln -s /usr/share/nginx/init/aliases.drushrc.php /home/vagrant/.drush/aliases.drushrc.php
# Restart all services on vagrant up and reload.
sudo service nginx restart
sudo service php5.6-fpm restart
sudo mailcatcher --http-ip 0.0.0.0
# Add the project path, so we are there on vagrant ssh.
echo "cd /usr/share/nginx/htdocs" >> /home/vagrant/.bashrc
## Enable XHProf on Drupal 7.
# drush en -y devel
# drush vset devel_xhprof_enabled
# drush vset devel_xhprof_directory "/usr/share/php"
# drush vset devel_xhprof_url "http://drop.local:2100"
| true |
7d0edf2b0077b24db9325909f6f020020e6aa063 | Shell | tapasjain/oga-uct | /Results_with_time/RESULTS_race/run_base.sh | UTF-8 | 407 | 2.890625 | 3 | [] | no_license | echo -n '' > BASE/baseNew.txt
for n in 30 60 100 150 200 300 500 700 1000 1300 1600
do
echo $n
./../../mdp-engine-master/race/race_NEW -f -a 0 -h 0 -s 0 -t 1000 ../../mdp-engine-master/race/tracks/barto-big.track random uct $n 50 0 > BASE/res$n
echo $n $(grep -n "uct(random" BASE/res$n | awk '{print $2,$3,$5}') >> BASE/baseNew.txt # Adding results to merged file too
done
cp BASE/baseNew.txt ALL/
| true |
ddbee8209b46aeeadcfb6dc0e1860ef6e67e2a4c | Shell | anthezium/dotfiles | /.xsession | UTF-8 | 2,181 | 2.890625 | 3 | [] | no_license | #!/bin/bash
#MGR="kde"
#MGR="xfce"
MGR="xmonad"
if [ "$MGR" = "xmonad" ]; then
## Set up icon tray first so applets have somewhere to go
trayer --edge top --align right --SetDockType true --SetPartialStrut true \
--expand true --widthtype percent --width 10 --transparent true --alpha 0 --tint 0x00ff00 --height 20 &
elif [ "$MGR" = "xfce" ]; then
# for xfce4
#xfce4-panel &
true
fi
eval $(gnome-keyring-daemon --start)
export SSH_AUTH_SOCK
systemctl --user import-environment DISPLAY XAUTHORITY
if command -v dbus-update-activation-environment >/dev/null 2>&1; then
dbus-update-activation-environment DISPLAY XAUTHORITY
fi
# tray applet for networking
#nm-applet --sm-disable --no-agent &
if [ "$MGR" != "kde" ]; then
pkill nm-wrapplet || true
/home/theod/bin/nm-wrapplet &
fi
# tray applet for bluetooth
if [ "$MGR" != "kde" ]; then
blueman-applet &
fi
#wicd-client --tray &
# Load resources
xrdb -merge .Xresources
#echo "place 0" 2>&1
# set up custom X key bindings for volume control, etc.
xbindkeys &
#echo "place 1" 2>&1
# Set background color
xsetroot -solid black
# Set up screensaver
xscreensaver -no-splash &
#echo "place 2" 2>&1
xss-lock -- xscreensaver-command -lock &
#echo "place 2a" 2>&1
# Caffeine to toggle off (or timer off) screen saver
#caffeine &
# Set up power management tray icon
#sleep 3
#gnome-power-manger &
#xfce4-power-manager &
cbatticon &
#echo "place 3" 2>&1
#volume slider
# gnome-sound-applet &
if [ "$MGR" != "kde" ]; then
#volumeicon &
pasystray &
fi
#echo "place 4" 2>&1
# don't screensaver/go to sleep while i'm watching stuff
#${HOME}/git/lightsOn/lightsOn.sh &
# redshift to adjust color temperature based on time of day (latitude:longitude for Portland)
redshift -l 45.5231:122.6765 &
#echo "place 5" 2>&1
# use lxde's polkit daemon
lxpolkit &
#echo "place 6" 2>&1
# start window manager
if [ "$MGR" = "xmonad" ]; then
# for xmonad
#exec ${HOME}/.cabal/bin/xmonad
#exec xmonad
exec ${HOME}/.xmonad/xmonad-x86_64-linux
#lxsession -e xmonad-x86_64-linux -s xmonad
elif [ "$MGR" = "xfce" ]; then
# for xfce4
exec startxfce4
elif [ "$MGR" = "kde" ]; then
exec startplasma-x11
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.