blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d6d179c51f8ca263b7dcfd594d471d90ce3c14d5 | Shell | algono/Utility-Scripts | /Shell/add-alias.sh | UTF-8 | 1,711 | 4.1875 | 4 | [] | no_license | #!/bin/bash
# shellcheck disable=SC1090
## Script made by: Alejandro Gomez - algono - 24/02/2019
## FUNCTIONS
create_alias()
{
read -r -p "Type the name of the new alias: " ALIAS
echo ---------
read -r -p "Type the full command the alias should do: " COMMAND
}
add_alias()
{
if [[ $(cat "$HOME/.bash_aliases") == *"alias $ALIAS="* ]]
then
echo "Error. The alias is already defined."
return 1
fi
echo "Adding alias..."
touch "$HOME/.bash_aliases"
echo "alias $ALIAS='\\$COMMAND'" >> "$HOME/.bash_aliases"
## Reload Aliases and PATH files
. "$HOME/.bash_aliases"
. "$HOME/.bash_profile"
echo "Alias added successfully."
echo "Type the command '. ~/.bash_aliases' to update the changes."
}
add_aliases_no_arguments()
{
while :
do
clear
echo ---------
echo "Welcome to the alias creation system."
echo ---------
create_alias
echo ---------
add_alias
echo ---------
read -p "Press any key to add another alias... (or press 'q' to exit)" -n 1 -r
echo ""
if [ "$REPLY" = "q" ]; then return 0; fi
done
}
load_aliases_by_default()
{
cat >> "$HOME/.bashrc" <<EOL
# Alias definitions
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
EOL
}
## MAIN SCRIPT
if [ "$#" -gt 0 ]; then
if [ "$#" -ne 2 ]; then
echo "Illegal number of parameters. Only 0 (multi-alias) or 2 (one-alias) parameters are valid."
exit 1
else
ALIAS=$1
COMMAND=$2
add_alias
fi
else
add_aliases_no_arguments
fi
if [[ $(cat "$HOME/.bashrc") != *"~/.bash_aliases"* ]]
then
load_aliases_by_default
fi
| true |
0725a7d0feec7f173afe20f854da1c73984ecf03 | Shell | jacob-seiler/random-bash-scripts | /canWrite.sh | UTF-8 | 234 | 3.84375 | 4 | [] | no_license | FILENAME=$1
if [[ -f $FILENAME ]]
then
if [[ -w $FILENAME ]]
then
echo "$FILENAME exists and is writable"
else
echo "$FILENAME exists but is not writable"
fi
else
echo "$FILENAME does not exist"
fi | true |
b9cb5cb64c59e46b69e793a9e131b5c10e1e34e4 | Shell | rrshah/energy_efficient_data_center | /TargetServer/targetScripts/target_dataAnalytics.sh | UTF-8 | 1,395 | 3.140625 | 3 | [] | no_license | #!/bin/bash
echo 'Running Data Analytics on Kraken....'
containername="$1"
echo "Stopping container if running..."
sudo docker stop $containername
echo "Removing container..."
sudo docker rm -f $containername data-analytics-master data-analytics-slave01 data-analytics-slave02 data-analytics-slave03 data-analytics-slave04
echo "Removing container image..."
sudo docker image rm ${containername}_image data-analytics-master data-analytics-slave01 data-analytics-slave02 data-analytics-slave03 data-analytics-slave04
echo "Loading previously saved container..."
sudo docker load < /home/sjsu_ra/migration_img/${containername}_image.tar
echo "Running loaded container..."
sudo docker run -d --net hadoop-net --name data-analytics-master --hostname data-analytics-master cloudsuite/data-analytics master
sudo docker run -d --net hadoop-net --name data-analytics-slave01 --hostname data-analytics-slave01 cloudsuite/hadoop slave
sudo docker run -d --net hadoop-net --name data-analytics-slave03 --hostname data-analytics-slave03 cloudsuite/hadoop slave
sudo docker run -d --net hadoop-net --name data-analytics-slave04 --hostname data-analytics-slave04 cloudsuite/hadoop slave
sudo docker run -d --net hadoop-net --name data-analytics-slave02 --hostname data-analytics-slave02 cloudsuite/hadoop slave
sudo docker exec data-analytics-master benchmark
now=$(date +"%T")
echo "End time : :$now"
| true |
2da518e93475e03de591826f2e7c5a2766adaf28 | Shell | mattmc3/zsh_custom | /plugins/perl/perl.plugin.zsh | UTF-8 | 357 | 2.875 | 3 | [] | permissive | #
# perl
#
if [[ "$OSTYPE" == darwin* ]]; then
# eval "$(perl -I$XDG_DATA_HOME/perl5/lib/perl5 -Mlocal::lib=$XDG_DATA_HOME/perl5)"
export PERL_MB_OPT='--install_base "$XDG_DATA_HOME/perl5"'
export PERL_MM_OPT='INSTALL_BASE=$XDG_DATA_HOME/perl5'
if [[ ! -d $XDG_DATA_HOME/perl5 ]]; then
mkdir -p $XDG_DATA_HOME/perl5
cpan local::lib
fi
fi
| true |
422cbfb8b0e42c95130920fcc2504d252cea2ace | Shell | rakshasa/rtorrent-vagrant | /scripts/torrent-data-wc | UTF-8 | 636 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env bash
source "${BASH_SOURCE[0]%/*}/include/call"
source "${BASH_SOURCE[0]%/*}/include/torrent"
node="${1:?Node or local not specified.}"
torrent_path="${2:?Torrent path not specified.}"
torrent_name="${3:?Torrent name not specified.}"
if [[ "${node}" = "local" ]]; then
cd "${torrent_path}" && ([ -d "${torrent_name}" ]] || [[ -f "${torrent_name}" ]) && (find "${torrent_name}" -type f -print0 | sort -z | xargs -0 wc -c)
else
call_ssh_node "${node}" -- "cd '${torrent_path}' && ([ -d '${torrent_name}' ]] || [[ -f '${torrent_name}' ]) && (find '${torrent_name}' -type f -print0 | sort -z | xargs -0 wc -c)"
fi
| true |
66d541947bb9b3d87b3a7a02a0f00219d96e2f8e | Shell | nochtavio/nochtavio-alief-techtask-php | /run.sh | UTF-8 | 530 | 3.171875 | 3 | [] | no_license | #!/bin/bash
set -e
operator=$1
case $operator in
"default")
docker-compose up --build
;;
"test")
docker-compose down
docker-compose up -d
docker-compose exec -e -T app sh -c "php bin/phpunit"
if [ $? != 0 ]; then
printf "\nTests failed!\n\n"
EXIT_CODE=1
else
printf "\nTests passed\n\n"
fi
docker-compose down
;;
*)
printf "Unidentified Action :( !! \n\n"
EXIT_CODE=1
;;
esac | true |
60bbc0ab773dda67fb72dcb72167fb82d6cdab5f | Shell | delkyd/alfheim_linux-PKGBUILDS | /bash-get-git/PKGBUILD | UTF-8 | 866 | 2.90625 | 3 | [] | no_license | pkgname="bash-get-git"
pkgver=12.9bee7b8
pkgrel=1
pkgdesc="go get inspired bash get tool"
url="https://github.com/reconquest/bash-get"
arch=('any')
license=('GPL')
makedepends=()
source=("bash-get::git://github.com/reconquest/bash-get.git")
md5sums=(SKIP)
pkgver() {
cd "${pkgname%%-git}"
echo $(git rev-list --count master).$(git rev-parse --short master)
}
build() {
cd "${pkgname%%-git}"
git submodule update --init --recursive
}
package() {
install -DTm 0777 "$srcdir/${pkgname%%-git}/get" "$pkgdir/usr/lib/bash-get/get"
cp -r "$srcdir/${pkgname%%-git}/vendor" "$pkgdir/usr/lib/bash-get/"
install -d "$pkgdir/usr/bin"
ln -fs /usr/lib/bash-get/get "$pkgdir/usr/bin/get"
find "$pkgdir" -wholename "*tests*" -type f -delete
find "$pkgdir" -wholename "*.git*" -type f -delete
find "$pkgdir" -type d -empty -delete
}
| true |
1e5c854b57392dd3b1ac00776e82e43b12888c7e | Shell | earthbound19/autobrood | /_setup/_installDependenciesMSYS2.sh | UTF-8 | 1,041 | 3.328125 | 3 | [
"CC-BY-2.5",
"LicenseRef-scancode-public-domain"
] | permissive | # DESCRIPTION
# Installs Windows dependencies for this autobrood repository's scripts,
# if you wish to run them from MSYS2.
MSYS2_packages=" \
perl \
perl-XML-LibXML \
tar \
perl-XML-Parser"
for element in ${MSYS2_packages[@]}
do
# UNINSTALL option:
# pacman -R --noconfirm $element
pacman -S --noconfirm $element
done
wget https://www.xmltwig.org/xmltwig/XML-Twig-3.52.tar.gz
tar xz < XML-Twig-3.52.tar.gz
cd XML-Twig-3.52
perl Makefile.PL -y
make
# But I doubt this helps anything? :
make test
make install
cd ..
rm -rf XML-Twig-3.52 XML-Twig-3.52.tar.gz XML-Twig-3.52
xml_grep --pretty_print indented --wrap flames --descr 'name="Flock Hoibimonko"' --cond "flame" *.flame > flames.flame
# nah: --pretty_print indented -- WAIT actually yes.
echo "!============================================================"
echo Done. If all went as planned, there should be a new file named flames.flame which, if you open in a text editor, you will find to be a series of \<flame\> genomes nested in a \<flames\> tag set at the start and end of the file. | true |
ea0bc5c37e64b649e16ff35cb1f2f5bfc02e10fe | Shell | omkargb/batch_317 | /day5/asignment/day5p2/32case_numToWeekday.sh | UTF-8 | 828 | 3.4375 | 3 | [] | no_license | #!/bin/bash -x
isZero=0
isOne=1
isTwo=2
isThree=3
isFour=4
isFive=5
isSix=6
read -p "Enter single digit number (0 to 6) : " num
case $num in
$isZero) echo "$num : Sunday"
;;
$isOne) echo "$num : Monday"
;;
$isTwo) echo "$num : Tuesday"
;;
$isThree) echo "$num : Wednesday"
;;
$isFour) echo "$num : Thursday"
;;
$isFive) echo "$num : Friday"
;;
$isSix) echo "$num : Saturday"
;;
*) echo "$num : Enter valid number."
;;
esac
| true |
74a67aa8765bb5a6adb3042871c90162fe8093ab | Shell | AGKhalil/pls-work | /script/install/rp_filtering.sh | UTF-8 | 842 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env bash
set -eo pipefail
info() {
echo "$(tput bold)====> $1$(tput sgr0)"
}
# This file is based on https://docs.polyaxon.com/setup/archlinux-kubeadm/#requirements.
info "Configure IPv4 reverse path filtering."
sudo sed -i '/net.ipv4.conf.default.rp_filter/c\net.ipv4.conf.default.rp_filter=1' \
/etc/sysctl.d/99-sysctl.conf
sudo sed -i '/net.ipv4.conf.all.rp_filter/c\net.ipv4.conf.all.rp_filter=1' \
/etc/sysctl.d/99-sysctl.conf
info "Verify IPv4 reverse path filtering configuration."
grep '^net.ipv4.conf.default.rp_filter=1$' /etc/sysctl.d/99-sysctl.conf
grep '^net.ipv4.conf.all.rp_filter=1$' /etc/sysctl.d/99-sysctl.conf
info "Enable IPv4 reverse path filtering."
echo 1 | sudo tee /proc/sys/net/ipv4/conf/default/rp_filter > /dev/null
echo 1 | sudo tee /proc/sys/net/ipv4/conf/all/rp_filter > /dev/null
| true |
3cbdeaaf184908ad0d121fd5b7ba3309f885ab58 | Shell | TMG-MattNewman/aem-tooling | /package.sh | UTF-8 | 3,007 | 3.75 | 4 | [] | no_license | #!/usr/bin/env bash
PACKAGE_MANAGER='crx/packmgr/index.jsp'
PACKAGE_UPDATE_PATH='crx/packmgr/update.jsp'
PACKAGE_SERVICE_PATH='crx/packmgr/service/.json'
TIMEOUT=1
outputDir='downloads'
packageName=''
pathSupplied=0
outputDirSupplied=0
packageGroup='my_packages'
env='http://localhost:4502/'
auth='admin:admin'
create=0
addFilter=0
build=0
download=0
while getopts "p:o:g:e:u:cabdv" OPTION
do
case $OPTION in
p) path=$OPTARG; pathSupplied=1;;
c) create=1;;
a) addFilter=1;;
b) build=1;;
d) download=1;;
o) outputDir=$OPTARG;;
g) packageGroup=$OPTARG;;
e) env=$OPTARG;;
u) auth=$OPTARG;;
v) verbose=1;;
*) exit 1;; # illegal option
esac
done
if [[ ! ${pathSupplied} -eq 1 ]]; then
echo "a path is required for working with packages!"
exit 1;
fi
if [[ ! ( ${create} || ${addFilter} || ${build} || ${download} ) ]]; then
echo "one of create (c), addFilter (f), build (b) or download (d) is required."
exit 1;
fi
# check access to env using username & password
./test-connection.sh -e ${env} -u ${auth} || exit 1;
packagePath="etc/packages/${packageGroup}/"
createPath="${PACKAGE_SERVICE_PATH}/${packagePath}"
# if path starts with a forward slash, strip it, because one exists at the end of $env
if [[ ! ${env} =~ /$ ]]; then
env="${env}/"
fi
if [[ ${path} =~ ^/ ]]; then
path="${path:1}"
fi
fullPath=$(./path-manipulation.sh -p ${path} -j) # add all parts of the path needed for a page level filter
truncatedPackageName=$(./path-manipulation.sh -p ${fullPath} -s) # strip package name down to minimum useful
packageName=${truncatedPackageName//\//-} # replace \ with -
packageZip=${packagePath}${packageName}.zip
CREATE_PACKAGE_PARAMS="-p ${env}${createPath} -u ${auth} -n ${packageName} -g ${packageGroup}"
PACKAGE_ADD_FILTER_PARAMS="-e ${env} -u ${auth} -p ${fullPath} -n ${packageName} -g ${packageGroup}"
PACKAGE_BUILD_PARAMS="-z ${env}${PACKAGE_SERVICE_PATH}/${packageZip} -u ${auth}"
PACKAGE_DOWNLOAD_PARAMS="-z ${env}${packageZip} -u ${auth} -n ${packageName} -o ${outputDir}"
if [[ ${verbose} -eq 1 ]]; then
echo "fullPath=${fullPath}"
echo "packageName=${packageName}"
echo "packageGroup=${packageGroup}"
echo "packageZip=${packageZip}"
CREATE_PACKAGE_PARAMS+=" -v"
PACKAGE_ADD_FILTER_PARAMS+=" -v"
PACKAGE_BUILD_PARAMS+=" -v"
PACKAGE_DOWNLOAD_PARAMS+=" -v"
echo "CREATE_PARAMS=${CREATE_PACKAGE_PARAMS}"
echo "ADD_FILTER_PARAMS=${PACKAGE_ADD_FILTER_PARAMS}"
echo "PACKAGE_BUILD_PARAMS=${PACKAGE_BUILD_PARAMS}"
echo "PACKAGE_DOWNLOAD_PARAMS=${PACKAGE_DOWNLOAD_PARAMS}"
fi
if [[ ${create} -eq 1 ]]; then
./package-create.sh ${CREATE_PACKAGE_PARAMS}
fi
if [[ ${addFilter} -eq 1 ]]; then
./package-add-filter.sh ${PACKAGE_ADD_FILTER_PARAMS}
fi
if [[ ${build} -eq 1 ]]; then
./package-build.sh ${PACKAGE_BUILD_PARAMS}
fi
if [[ ${download} -eq 1 ]]; then
./package-download.sh ${PACKAGE_DOWNLOAD_PARAMS}
fi
| true |
d4a081e6c7412659707c6081b75a20d123db8ac4 | Shell | delkyd/alfheim_linux-PKGBUILDS | /ordbanken/PKGBUILD | UTF-8 | 626 | 2.609375 | 3 | [] | no_license | # Contributor: Kevin Brubeck Unhammer <unhammer+dill@mm.st>
# Maintainer: Kevin Brubeck Unhammer <unhammer+dill@mm.st>
pkgname=ordbanken
pkgver=2013.02.17
upstreampkgver=2013-02-17
pkgrel=1
pkgdesc="Look up Norwegian Nynorsk or Bokmål words in an inflectional dictionary"
url="http://huftis.org/artiklar/ordbanken/"
license=('GPL3')
makedepends=('')
depends=('util-linux-ng' 'sh')
arch=('i686' 'x86_64')
source=("http://download-mirror.savannah.gnu.org/releases/$pkgname/$pkgname-$upstreampkgver.tar.xz")
md5sums=('c2abe87b472da23423734a52a8d7a609')
build() {
cd "$srcdir/$pkgname"
make PREFIX="/usr" || return 1
make PREFIX="/usr" DESTDIR="$pkgdir/" install || return 1
}
| true |
d4bad076d9556ee446bd06eb42a1fd5e9da376b7 | Shell | artisdom/_ebDev | /scripts/recipes/color_growth_cgps.sh | UTF-8 | 3,264 | 3.859375 | 4 | [] | no_license | # DESCRIPTION
# runs `color_growth.py` once for every `.cgp` preset in the current directory and all subdirectories. creates .rendering temp files of the same name as a render target file name so that you can interrupt / resume or run multiple simultaneous renders.
# USAGE
# Run with or without these optional parameters:
# - $1 OPTIONAL. Any extra arguments as usable by color_growth.py, surrounded by single or double quote marks. These will override any arguments that use the same switch or switches which are in the .cgp file(s). To use $2 but not this (or to use the built-in defaults for this), pass an empty string ('') for this.
# - $2 OPTIONAL. Anything, for example the string 'PLIBPLUP', which will cause the script to skip the cooldown period after every render.
# An example that uses parameter $1:
# color_growth_cgps.sh '--WIDTH 850 --HEIGHT 180 --SAVE_PRESET False --SAVE_EVERY_N 7150 --RAMP_UP_SAVE_EVERY_N True'
# An example that uses parameters $1 and $2:
# color_growth_cgps.sh '--WIDTH 850 --HEIGHT 180 --SAVE_PRESET False --SAVE_EVERY_N 7150 --RAMP_UP_SAVE_EVERY_N True' PLIBPLUP
# An example that uses parameter $2 but leaves it to use the defaults for $1:
# color_growth_cgps.sh '' PLIBPLUP
# NOTES
# This is designed to run in multiple simultaneous batch jobs, for example from multiple computers reading and writing to a network drive, or from one computer with many CPU cores, which will allow multiple simultaneous runs of renders if it does not load the CPUs too much. To accomode multiple simultaneous runs, the script does this:
# - On run of a render for a given .cgp preset, it creates a file named after the preset but with the .rendering extension (it does not ever delete them; you have to).
# - But before it makes that file, it checks for the existence of it. If it already exists, it moves on to the next preset render task. Therefore, if one run of the script created the preset already (to signify that a render associated with it is underway), another run of the script will not duplicate that work.
# CODE
if [ "$1" ]; then extraParameters=$1; fi
bypassCooldownPeriod="False"
if [ "$2" ]; then bypassCooldownPeriod="True"; fi
pathToScript=$(getFullPathToFile.sh color_growth.py)
presetsArray=( $(find . -maxdepth 1 -type f -name "*.cgp" -printf '%f\n') )
for element in ${presetsArray[@]}
do
cgpFileNoExt=$(echo "${element%.*}")
renderLogFileFile=$cgpFileNoExt.rendering
if ! [ -e $renderLogFileFile ]
then
# create render stub file so that other or subsequent runs of this script
# in the same directory will skip renders in progress or already done:
printf "Rendering . . ." > $renderLogFileFile
echo ""
echo "Render log file $renderLogFileFile was not found; rendering via command:"
echo "python $pathToScript --LOAD_PRESET $element $extraParameters"
python $pathToScript --LOAD_PRESET $element $extraParameters
# OPTIONAL, and may save your CPU from burning out:
if [ "$bypassCooldownPeriod" == "False" ]
then
echo "Will pause 300 seconds to let CPU(s) cool off . . ."; sleep 300
else
echo "Will bypass cooldown period per parameter passed to this script."
fi
else
echo ""
echo "Render log file $renderLogFileFile found;"
echo "SKIPPING render . . ."
fi
done | true |
3b9bc88284cf5716804aaa9b51020e70df78d60b | Shell | wahello/ABHPC-Guide | /常用slurm脚本/lammps.slm | UTF-8 | 1,862 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# 这里指定作业名称,注意vasp的输入文件无需特意指定
#SBATCH --job-name=test
# 提交到哪个队列(分区)
#SBATCH --partition=E5-2630V2
# 使用多少个节点
#SBATCH --nodes=2
# 每个节点使用多少核 强烈注意:核数必须为该队列单个节点的核数
#SBATCH --ntasks-per-node=12
# 生成错误和输出文件
#SBATCH --error=%j.err
#SBATCH --output=%j.out
# 使用module加载lammps
module load lammps/12Dec18
# 指定LAMMPS程序路径,在此选择使用CPU或者GPU运算
# HW=cpu or HW=gpu
HW="cpu"
# 以下行如果不懂,可以不管,按默认的即可。如果你知道其含义的话,可以进行自定义修改。
# 以下生成MPI的nodelist
CURDIR=`pwd`
rm -rf $CURDIR/nodelist.$SLURM_JOB_ID
NODES=`scontrol show hostnames $SLURM_JOB_NODELIST`
for i in $NODES
do
echo "$i:$SLURM_NTASKS_PER_NODE" >> $CURDIR/nodelist.$SLURM_JOB_ID
done
# 生成nodelist结束
# 通过MPI运行LAMMPS: CPU or GPU
if [ "$HW" = "cpu" ];then
mpirun -genv I_MPI_FABRICS=ofi -machinefile $CURDIR/nodelist.$SLURM_JOB_ID lammps-$HW -in $SLURM_JOB_NAME.in -sf omp -pk omp 1 -l $SLURM_JOB_NAME.log > $SLURM_JOB_NAME.sta
elif [ "$HW" = "gpu" ];then
if [ "$SLURM_JOB_PARTITION" = "E5-2678V3" ]; then
PK_NUM=2
elif [ "$SLURM_JOB_PARTITION" = "E5-2630V2" ]; then
PK_NUM=3
else
PK_NUM=2
echo "Unkown partition, set gpu package number to 2" > $SLURM_JOB_NAME.slm.error
fi
mpirun -machinefile $CURDIR/nodelist.$SLURM_JOB_ID GPU-PRERUN
mpirun -genv I_MPI_FABRICS=ofi -machinefile $CURDIR/nodelist.$SLURM_JOB_ID lammps-$HW -in $SLURM_JOB_NAME.in -sf gpu -pk gpu $PK_NUM -l $SLURM_JOB_NAME.log > $SLURM_JOB_NAME.sta
else
echo "Error: The hardware should be defined as cpu or gpu in slurm scripts!" > $SLURM_JOB_NAME.slm.error
fi
# 运行完后清理nodelist
rm -rf $CURDIR/nodelist.$SLURM_JOB_ID
| true |
64bddb9ab820a3c90fd5230e687a04c2275c1b09 | Shell | sweptr/zy-fvwm | /svn.df7cb.de/dotfiles/cb/.bashrc | UTF-8 | 1,757 | 2.921875 | 3 | [] | no_license | # $Id$ Christoph Berg <cb@df7cb.de>
# login shells: /etc/profile, then ~/.[bash_]profile; interactive: ~/.bashrc
#echo .bashrc
source_rc () {
if [ -e ~/$1 ] ; then . ~/$1
elif [ -e ~cb/$1 ] ; then . ~cb/$1
else echo "$0: $1 not found" 1>&2
fi
}
# Environment
source_rc lib/locale.sh
source_rc lib/session.sh
source_rc bin/os > /dev/null
source_rc .path
source_rc .env
# check whether we run interactively
[ "$PS1" ] || return
#echo ".bashrc: interactive"
source_rc .bash_bind
if [ "$BASH_VERSION" \> "2.04" ] ; then # new bash supporting '\j' and completion
j='$([ $SHLVL -gt 1 ] && echo -n "${SHLVL}s " ; [ \j -gt 0 ] && echo -n "\jj ")'
[ -f ~/.bash_completion ] && . ~/.bash_completion
#. /etc/bash_completion
else
j='$([ $SHLVL -gt 1 ] && echo -n "${SHLVL}s ")'
fi
[ -f /etc/debian_chroot ] && h=$(cat /etc/debian_chroot).
w='$(echo "\w" | perl -pe "1 while (length>35 and s!([^/]{2})[^/]+/!\$1/!)")'
u='\[\033[46m\][\[\033[1;31m\]$?\[\033[0;46m\]] \A \[\033[1m\]\u@'$h'\h:\[\033[34m\]'$w'\[\033[0;46m\]'
case $TERM in
linux*|*vt100*|cons25)
PS1=''$u' '$j'\l \$\[\033[0m\] ' ;;
screen*)
PS1='\[\033k\u@'$h'\h\033\\\033]0;\u@'$h'\h:\w\007\]'$u' '$j'\$\[\033[0m\] ' ;;
xterm*|rxvt|cygwin)
PS1='\[\033]0;\u@'$h'\h:\w\007\]'$u' '$j'\$\[\033[0m\] '
if [ "$console" ] ; then
PS1='\[\033]0;console@'$h'\h:\w\007'$u' '$j'\$\[\033[0m\] '
export -n console
fi ;;
*)
PS1='\n[$?] \u@'$h'w '$j'\$' ;;
esac
unset h j u w
# internal shell settings
auto_resume=
#FIGNORE='~'
HISTCONTROL=ignoredups
#histchars='!^#'
HISTFILESIZE=100
HISTIGNORE="..:[bf]g:cd:l:ls"
HISTSIZE=500
unset ignoreeof
shopt -s extglob no_empty_cmd_completion
#[ -t 0 ] && stty erase ^H &> /dev/null
#unset noclobber
[ -f ~/.bashrc-local ] && . ~/.bashrc-local
true
# vim:ts=4:sw=4:
| true |
91594fb968968e61999b91dc3a82b6178af1948c | Shell | ridgestreet/Slimstart | /ridge | UTF-8 | 659 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function usage() {
echo "runenv start|kill"
}
function start() {
echo "starting env..."
tmux new-session -d -s ridgeenv
tmux new-window -t ridgeenv:1 -n "compass" "compass watch www"
tmux new-window -t ridgeenv:2 -n "coffescript" "coffee --compile --output www/js/ www/src/"
echo "started ridge"
}
function kill() {
echo "killing env..."
killall compass
killall coffescript
tmux kill-session -t ridgeenv
echo "killed ridge"
}
if [ -z $1 ]; then
usage
exit 1
fi
case $1 in
start)
start
;;
kill)
kill
;;
*)
usage
exit 1
;;
esac
| true |
c3c04ad03440d0f66418df5d6d5116fdbdaae8c0 | Shell | StudioEtrange/roracle-install | /pool/stella/nix/pool/feature-recipe/feature_oracle-jdk.sh | UTF-8 | 8,343 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | if [ ! "$_ORACLEJDK_INCLUDED_" = "1" ]; then
_ORACLEJDK_INCLUDED_=1
# Recipe for Oracle Java SE Development Kit (=JDK)
# NOTE : Java Runtime Environment (=JRE) is only a java runtime
# Java SE Development Kit (=JDK) includes a JRE and all components needed to develop
# Java EE Development Kit do NOT include any Java SE Development Kit (JDK) nor any JRE. It includes a server (and other examples stuff) implementing the Java EE rules
# releases : http://www.oracle.com/technetwork/java/javase/archive-139210.html
feature_oracle-jdk() {
FEAT_NAME=oracle-jdk
FEAT_LIST_SCHEMA="8u152@x64:binary 8u152@x86:binary 8u91@x86:binary 8u91@x64:binary 8u45@x86:binary 8u45@x64:binary 7u80@x86:binary 7u80@x64:binary"
FEAT_DEFAULT_ARCH=x64
FEAT_DEFAULT_FLAVOUR=binary
}
feature_oraclesejdk_env() {
export JAVA_HOME=$FEAT_INSTALL_ROOT
}
feature_oracle-jdk_8u152() {
FEAT_VERSION=8u152
FEAT_SOURCE_DEPENDENCIES=
FEAT_BINARY_DEPENDENCIES=
FEAT_SOURCE_URL=
FEAT_SOURCE_URL_FILENAME=
FEAT_SOURCE_URL_PROTOCOL=
if [ "$STELLA_CURRENT_PLATFORM" = "linux" ]; then
FEAT_BINARY_URL_x86="http://download.oracle.com/otn-pub/java/jdk/8u152-b16/aa0333dd3019491ca4f6ddbe78cdb6d0/jdk-8u152-linux-i586.tar.gz"
FEAT_BINARY_URL_FILENAME_x86=jdk-8u152-linux-i586.tar.gz
FEAT_BINARY_URL_PROTOCOL_x86=HTTP_ZIP
FEAT_BINARY_URL_x64="http://download.oracle.com/otn-pub/java/jdk/8u152-b16/aa0333dd3019491ca4f6ddbe78cdb6d0/jdk-8u152-linux-x64.tar.gz"
FEAT_BINARY_URL_FILENAME_x64=jdk-8u152-linux-x64.tar.gz
FEAT_BINARY_URL_PROTOCOL_x64=HTTP_ZIP
fi
if [ "$STELLA_CURRENT_PLATFORM" = "darwin" ]; then
FEAT_BINARY_URL_x86=
FEAT_BINARY_URL_FILENAME_86=
FEAT_BINARY_URL_PROTOCOL_x86=
FEAT_BINARY_URL_x64="http://download.oracle.com/otn-pub/java/jdk/8u152-b16/aa0333dd3019491ca4f6ddbe78cdb6d0/jdk-8u152-macosx-x64.dmg"
FEAT_BINARY_URL_FILENAME_x64=jdk-8u152-macosx-x64.dmg
FEAT_BINARY_URL_PROTOCOL_x64=HTTP
DMG_VOLUME_NAME="JDK 8 Update 152"
PKG_NAME="JDK 8 Update 152.pkg"
fi
FEAT_SOURCE_CALLBACK=
FEAT_BINARY_CALLBACK=feature_oracle-jdk_fix_jni_header
FEAT_ENV_CALLBACK=feature_oraclesejdk_env
FEAT_INSTALL_TEST="$FEAT_INSTALL_ROOT/bin/java"
FEAT_SEARCH_PATH="$FEAT_INSTALL_ROOT/bin"
}
feature_oracle-jdk_8u91() {
FEAT_VERSION=8u91
FEAT_SOURCE_DEPENDENCIES=
FEAT_BINARY_DEPENDENCIES=
FEAT_SOURCE_URL=
FEAT_SOURCE_URL_FILENAME=
FEAT_SOURCE_URL_PROTOCOL=
if [ "$STELLA_CURRENT_PLATFORM" = "linux" ]; then
FEAT_BINARY_URL_x86="http://download.oracle.com/otn/java/jdk/8u91-b14/jdk-8u91-linux-i586.tar.gz"
FEAT_BINARY_URL_FILENAME_x86=jdk-8u91-linux-i586.tar.gz
FEAT_BINARY_URL_PROTOCOL_x86=HTTP_ZIP
FEAT_BINARY_URL_x64="http://download.oracle.com/otn/java/jdk/8u91-b14/jdk-8u91-linux-x64.tar.gz"
FEAT_BINARY_URL_FILENAME_x64=jdk-8u91-linux-x64.tar.gz
FEAT_BINARY_URL_PROTOCOL_x64=HTTP_ZIP
fi
if [ "$STELLA_CURRENT_PLATFORM" = "darwin" ]; then
FEAT_BINARY_URL_x86=
FEAT_BINARY_URL_FILENAME_86=
FEAT_BINARY_URL_PROTOCOL_x86=
FEAT_BINARY_URL_x64="http://download.oracle.com/otn/java/jdk/8u91-b14/jdk-8u91-macosx-x64.dmg"
FEAT_BINARY_URL_FILENAME_x64=jdk-8u91-macosx-x64.dmg
FEAT_BINARY_URL_PROTOCOL_x64=HTTP
DMG_VOLUME_NAME="JDK 8 Update 91"
PKG_NAME="JDK 8 Update 91.pkg"
fi
FEAT_SOURCE_CALLBACK=
FEAT_BINARY_CALLBACK=feature_oracle-jdk_fix_jni_header
FEAT_ENV_CALLBACK=feature_oraclesejdk_env
FEAT_INSTALL_TEST="$FEAT_INSTALL_ROOT/bin/java"
FEAT_SEARCH_PATH="$FEAT_INSTALL_ROOT/bin"
}
feature_oracle-jdk_8u45() {
FEAT_VERSION=8u45
FEAT_SOURCE_DEPENDENCIES=
FEAT_BINARY_DEPENDENCIES=
FEAT_SOURCE_URL=
FEAT_SOURCE_URL_FILENAME=
FEAT_SOURCE_URL_PROTOCOL=
if [ "$STELLA_CURRENT_PLATFORM" = "linux" ]; then
FEAT_BINARY_URL_x86="http://download.oracle.com/otn/java/jdk/8u45-b14/jdk-8u45-linux-i586.tar.gz"
FEAT_BINARY_URL_FILENAME_x86=jdk-8u45-linux-i586.tar.gz
FEAT_BINARY_URL_PROTOCOL_x86=HTTP_ZIP
FEAT_BINARY_URL_x64="http://download.oracle.com/otn/java/jdk/8u45-b14/jdk-8u45-linux-x64.tar.gz"
FEAT_BINARY_URL_FILENAME_x64=jdk-8u45-linux-x64.tar.gz
FEAT_BINARY_URL_PROTOCOL_x64=HTTP_ZIP
fi
if [ "$STELLA_CURRENT_PLATFORM" = "darwin" ]; then
FEAT_BINARY_URL_x86=
FEAT_BINARY_URL_FILENAME_86=
FEAT_BINARY_URL_PROTOCOL_x86=
FEAT_BINARY_URL_x64="http://download.oracle.com/otn/java/jdk/8u45-b14/jdk-8u45-macosx-x64.dmg"
FEAT_BINARY_URL_FILENAME_x64=jdk-8u45-macosx-x64.dmg
FEAT_BINARY_URL_PROTOCOL_x64=HTTP
DMG_VOLUME_NAME="JDK 8 Update 45"
PKG_NAME="JDK 8 Update 45.pkg"
fi
FEAT_SOURCE_CALLBACK=
FEAT_BINARY_CALLBACK=feature_oracle-jdk_fix_jni_header
FEAT_ENV_CALLBACK=feature_oraclesejdk_env
FEAT_INSTALL_TEST="$FEAT_INSTALL_ROOT/bin/java"
FEAT_SEARCH_PATH="$FEAT_INSTALL_ROOT/bin"
}
#
feature_oracle-jdk_7u80() {
FEAT_VERSION=7u80
FEAT_SOURCE_DEPENDENCIES=
FEAT_BINARY_DEPENDENCIES=
FEAT_SOURCE_URL=
FEAT_SOURCE_URL_FILENAME=
FEAT_SOURCE_URL_PROTOCOL=
if [ "$STELLA_CURRENT_PLATFORM" = "linux" ]; then
FEAT_BINARY_URL_x86="http://download.oracle.com/otn/java/jdk/7u80-b15/jdk-7u80-linux-i586.tar.gz"
FEAT_BINARY_URL_FILENAME_x86=jdk-7u80-linux-i586.tar.gz
FEAT_BINARY_URL_PROTOCOL_x86=HTTP_ZIP
FEAT_BINARY_URL_x64="http://download.oracle.com/otn/java/jdk/7u80-b15/jdk-7u80-linux-x64.tar.gz"
FEAT_BINARY_URL_FILENAME_x64=jdk-7u80-linux-x64.tar.gz
FEAT_BINARY_URL_PROTOCOL_x64=HTTP_ZIP
fi
if [ "$STELLA_CURRENT_PLATFORM" = "darwin" ]; then
FEAT_BINARY_URL_x86=
FEAT_BINARY_URL_FILENAME_86=
FEAT_BINARY_URL_PROTOCOL_x86=
FEAT_BINARY_URL_x64="http://download.oracle.com/otn/java/jdk/7u80-b15/jdk-7u80-macosx-x64.dmg"
FEAT_BINARY_URL_FILENAME_x64=jdk-7u80-macosx-x64.dmg
FEAT_BINARY_URL_PROTOCOL_x64=HTTP
DMG_VOLUME_NAME="JDK 7 Update 80"
PKG_NAME="JDK 7 Update 80.pkg"
fi
FEAT_SOURCE_CALLBACK=
FEAT_BINARY_CALLBACK=feature_oracle-jdk_fix_jni_header
FEAT_ENV_CALLBACK=feature_oraclesejdk_env
FEAT_INSTALL_TEST="$FEAT_INSTALL_ROOT/bin/java"
FEAT_SEARCH_PATH="$FEAT_INSTALL_ROOT/bin"
}
# fix problems with jni_md.h
# http://stackoverflow.com/a/24996278
feature_oracle-jdk_fix_jni_header() {
if [ "$STELLA_CURRENT_PLATFORM" = "darwin" ]; then
ln -s $FEAT_INSTALL_ROOT/include/darwin/jni_md.h $FEAT_INSTALL_ROOT/include/jni_md.h
ln -s $FEAT_INSTALL_ROOT/include/darwin/jawt_md.h $FEAT_INSTALL_ROOT/include/jawt_md.h
fi
if [ "$STELLA_CURRENT_PLATFORM" = "linux" ]; then
ln -s $FEAT_INSTALL_ROOT/include/linux/jni_md.h $FEAT_INSTALL_ROOT/include/jni_md.h
ln -s $FEAT_INSTALL_ROOT/include/linux/jawt_md.h $FEAT_INSTALL_ROOT/include/jawt_md.h
fi
}
feature_oracle-jdk_install_binary() {
mkdir -p "$STELLA_APP_CACHE_DIR"
if [ "$STELLA_CURRENT_PLATFORM" = "linux" ]; then
if [ ! -f "$STELLA_APP_CACHE_DIR/$FEAT_BINARY_URL_FILENAME" ]; then
wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "$FEAT_BINARY_URL" -O "$STELLA_APP_CACHE_DIR/$FEAT_BINARY_URL_FILENAME"
fi
__uncompress "$STELLA_APP_CACHE_DIR/$FEAT_BINARY_URL_FILENAME" "$FEAT_INSTALL_ROOT" "DEST_ERASE STRIP"
fi
if [ "$STELLA_CURRENT_PLATFORM" = "darwin" ]; then
mkdir -p $STELLA_APP_TEMP_DIR
# download
if [ ! -f "$STELLA_APP_CACHE_DIR/$FEAT_BINARY_URL_FILENAME" ]; then
# TODO : check cookie (twice the same)
curl -j -k -S -L -H "Cookie: oraclelicense=accept-securebackup-cookie; oraclelicense=accept-securebackup-cookie" -o "$STELLA_APP_CACHE_DIR/$FEAT_BINARY_URL_FILENAME" "$FEAT_BINARY_URL"
fi
# mount dmg file and extract pkg file
if [ ! -f "$STELLA_APP_CACHE_DIR/$PKG_NAME" ]; then
hdiutil mount "$STELLA_APP_CACHE_DIR/$FEAT_BINARY_URL_FILENAME"
cp "/Volumes/$DMG_VOLUME_NAME/$PKG_NAME" "$STELLA_APP_CACHE_DIR/$PKG_NAME"
hdiutil unmount "/Volumes/$DMG_VOLUME_NAME"
fi
# unzip pkg file
rm -Rf "$STELLA_APP_TEMP_DIR/$FEAT_VERSION"
pkgutil --expand "$STELLA_APP_CACHE_DIR/$PKG_NAME" "$STELLA_APP_TEMP_DIR/$FEAT_VERSION/"
# extract files from payload
rm -Rf "$FEAT_INSTALL_ROOT"
mkdir -p "$FEAT_INSTALL_ROOT"
cd "$FEAT_INSTALL_ROOT"
for payload in "$STELLA_APP_TEMP_DIR"/$FEAT_VERSION/jdk*; do
tar xvzf "$payload/Payload"
done
__copy_folder_content_into "$FEAT_INSTALL_ROOT/Contents/Home" "$FEAT_INSTALL_ROOT"
rm -Rf "$FEAT_INSTALL_ROOT/Contents"
rm -Rf "$STELLA_APP_TEMP_DIR/$FEAT_VERSION"
fi
__feature_callback
}
fi
| true |
c535b6170b0af2c00881edd949266ca4ac9cafcc | Shell | jWhytis01/techNotes | /scripts/Dev_Sys_Notes/DevBuild | UTF-8 | 6,543 | 2.9375 | 3 | [] | no_license | #!/bin/sh
# General principles.
# Domanin services in
# Can I setup Users
# All software is to be installed at the top of the users home directory.
# Make sure we are in the user directory
cd
###################################################
# Start by updating the database
sudo apt update -yy
sudo apt upgrade -yy
sudo apt-get update -yy
sudo apt-get upgrade -yy
echo "Log Begin" | cat >> theLog.txt
echo $(date) | cat >> theLog.txt
echo "" | cat >> theLog.txt
###################################################
# No, not a best practice - Create root password
# Yes - Create M21 password
# Yes -Create Ubuntu password
# Create Other User?
# Add many21ai to the admins group
# why is my login able to act as sudo without having a password set
# /home/many21ai/.local/bin/blueprint
###################################################
# Make a connection to python3
# sudo apt install python-is-python3
# sudo apt install python3-pip
# http://devstructure.com/blueprint/
# python --version
# pip install blueprint
# Add utilities
# https://phoenixnap.com/kb/install-rpm-packages-on-ubuntu
sudo apt-get install alien -yy
sudo apt install mlocate -yy
sudo apt install tree -yy
###################################################
# Install Java from command line
# Java
# https://stackoverflow.com/questions/10268583/downloading-java-jdk-on-linux-via-wget-is-shown-license-page-instead
sudo mkdir /usr/java/
cd /usr/java/
sudo wget --no-check-certificate -c --header "Cookie: oraclelicense=accept-securebackup-cookie" https://download.oracle.com/otn-pub/java/jdk/16.0.2%2B7/d4a915d82b4c4fbb9bde534da945d746/jdk-16.0.2_linux-x64_bin.rpm
sudo alien jdk-16.0.2_linux-x64_bin.rpm -yy
sudo dpkg -i jdk-16.0.2_16.0.2-1_amd64.deb -yy
# Clean up files
sudo rm -r jdk-16.0.2_linux-x64_bin.rpm
sudo rm -r jdk-16.0.2_16.0.2-1_amd64.deb
cd #return to home directory
# Do i need to add a user?
# Do i need to check the path
###################################################
# Docker
# https://docs.docker.com/engine/install/ubuntu/
sudo apt-get remove docker docker-engine docker.io containerd runc
sudo apt-get update
sudo apt-get install \
apt-transport-https \
ca-certificates \
curl \
gnupg \
lsb-release -yy
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
#less /etc/apt/sources.list.d/docker.list
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io -yy
sudo groupadd docker
sudo usermod -aG docker $USER
sudo rm -r ~/.docker/
# https://docs.docker.com/config/daemon/systemd/
# You will need to log out for changes to take effect
###################################################
# google cloud
# python --version
# python3 --version
# curl -O https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-351.0.0-linux-x86_64.tar.gz
# I have a version of this in my snap folder.
# I am going to hold off on install.
# locate docker | less
# https://cloud.google.com/code/docs/intellij/client-libraries#for-all-other-projects
# https://cloud.google.com/code/docs/intellij/client-libraries#cloud-run_1
# google cloud python
# miniKube
# Log into google Cloud account
# Running Gcloud --version tells me miniKube it is already there.
###################################################
# Install Gnome Shell
# https://subscription.packtpub.com/book/big_data_and_business_intelligence/9781788474221/1/ch01lvl1sec15/installing-and-configuring-ubuntu-desktop-for-google-cloud-platform
sudo apt-get update -yy
sudo apt-get upgrade -yy
sudo apt-get install gnome-shell -yy
sudo apt-get install ubuntu-gnome-desktop -yy
sudo apt-get install autocutsel -yy
sudo apt-get install gnome-core -yy
sudo apt-get install gnome-panel -yy
sudo apt-get install gnome-themes-standard -yy
###################################################
# Install I3
# https://www.tecmint.com/i3-tiling-window-manager/
sudo apt update -yy
sudo apt install i3 -yy
###################################################
# Chrome Browser
# https://kifarunix.com/install-google-chrome-browser-on-ubuntu-20-04/
sudo wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb -P /tmp
sudo apt install /tmp/google-chrome-stable_current_amd64.deb -yy
#Log into the newly created vm
###################################################
# Install tools app
# Install Pycharm
# Enable the google cloud code plog-in
###################################################
# Resources
# https://linuxize.com/post/how-to-install-python-3-7-on-ubuntu-18-04/
# VM and google Cloud
# https://cloud.google.com/architecture/chrome-desktop-remote-on-compute-engine
# https://remotedesktop.google.com/headless
# sys admin
# https://tldp.org/LDP/lame/LAME/linux-admin-made-easy/changing-user-passwords.html
# https://www.baeldung.com/linux/path-variable
# https://unix.stackexchange.com/questions/241215/how-can-i-find-out-which-users-are-in-a-group-within-linux
# getent group name_of_group
# https://serverfault.com/questions/3852/what-tool-do-you-recommend-to-track-changes-on-a-linux-unix-server
# https://www.youtube.com/watch?v=oxuRxtrO2Ag
# https://linuxhint.com/change_users_password_linux/
# Cloud run
# https://kubernetes.io/docs/tasks/tools/
# https://docs.gitlab.com/charts/development/minikube/
# https://minikube.sigs.k8s.io/docs/handbook/vpn_and_proxy/
# https://community.spiceworks.com/topic/2206557-chrome-remote-desktop-what-specific-udp-port-ranges-need-to-be-opened
# https://cloud.google.com/container-registry
# https://cloud.google.com/logging/docs/reference/tools/gcloud-logging
# Windows Managers / Display
# https://i3wm.org/docs/userguide.html
# https://kifarunix.com/install-and-setup-i3-windows-manager-on-ubuntu-20-04/
# https://www.keyxl.com/aaae677/56/Linux-Gnome-Window-Manager-keyboard-shortcuts.htm
# https://www.linuxtrainingacademy.com/install-desktop-on-ubuntu-server/
# Unity (The Default Desktop)
# sudo apt install ubuntu-desktop
# LXDE (Lubuntu)
# sudo apt-get install lubuntu-desktop
# MATE
# sudo apt-get install mate-desktop
# Gnome
# sudo apt-get install ubuntu-gnome-desktop
# XFCE (Xubuntu)
# sudo apt-get install xubuntu-desktop
#
# https://devanswers.co/how-to-fix-authentication-is-required-to-create-a-color-profile-managed-device-on-ubuntu-20-04-20-10/
| true |
1514c4f9c2a27bd7aaca2d8ba4b66b8b2aa826ec | Shell | VicenteIranzoMaestre/genwebupc4ZEO | /backup.sh | UTF-8 | 3,528 | 3.375 | 3 | [] | no_license | #!/bin/bash
# per a fer un full : ./cron_nocturn.sh full
GWROOT_FOLDER=/var/plone/genwebupcZEO
GENWEB_FOLDER=/var/plone/genwebupcZEO/produccio
ENTORNS=/var/plone/genwebupcZEO/entorns
INSTANCIES=/var/plone/genwebupcZEO/produccio/instancies
INSTANCIES_PORT=/var/plone/genwebupcZEO/produccio/config/sylar-ports-db.conf
BACKUP_FOLDER=/backup/genwebupc
LOGBACKUP=/var/plone/genwebupcZEO/backup.log
BACKUPDIR=`date +%d`
## Funcions
function packEntorn {
exec<$INSTANCIES/$1
while read line
do
instancia=`echo ${line%% *}`
echo "Packing $instancia at $1"
$GENWEB_FOLDER/bin/zeopack -S $instancia -p $2 -h 127.0.0.1
done
}
function inicialitzaBackupEntorn {
echo "Comprovant si existeixen els folders necessaris"
# Comprovem que existeix el folder corresponent al backup dels blobs
if ! [ -x $BACKUP_FOLDER/$1 ] ; then
mkdir $BACKUP_FOLDER/$1
fi
# Comprovem que existeix el folder current-blobs
if ! [ -x $BACKUP_FOLDER/$1/current-blobs ]; then
mkdir $BACKUP_FOLDER/$1/current-blobs
fi
# Comprovem que existeix el folder blobs
if ! [ -x $BACKUP_FOLDER/$1/blobs ]; then
mkdir $BACKUP_FOLDER/$1/blobs
fi
# Comprovem que existeix el folder corresponent al backup dels blobs
if ! [ -x $BACKUP_FOLDER/$1/blobs/$BACKUPDIR ] ; then
mkdir $BACKUP_FOLDER/$1/blobs/$BACKUPDIR
fi
}
function backupZODBsEntorn {
while read line
do
instancia=`echo ${line%% *}`
echo "Comprovant si existeixen els folders necessaris"
# Comprovem que existeix el folder de backup de la ZODB
if ! [ -x $BACKUP_FOLDER/$1/$instancia ]; then
mkdir $BACKUP_FOLDER/$1/$instancia
fi
# backup de Data.fs
echo "Backup ZODB de la instancia $instancia"
if [ "$1" = "full" ]; then
$GENWEB_FOLDER/bin/repozo -B -F -r $BACKUP_FOLDER/$1/$instancia/ -f $GENWEB_FOLDER/var/filestorage/Data_$instancia.fs
echo "Purgant backups antics de la instancia $instancia"
#$GWROOT_FOLDER/neteja.py -l $BACKUP_FOLDER/$1/$instancia -k 2
else
$GENWEB_FOLDER/bin/repozo -B -r $BACKUP_FOLDER/$1/$instancia/ -f $GENWEB_FOLDER/var/filestorage/Data_$instancia.fs
fi
# backup blobs
echo "Backup dels blobs de la instancia $instancia"
echo "rsync --force --ignore-errors --delete --update -a $GENWEB_FOLDER/var/blobs/$instancia/ $BACKUP_FOLDER/$1/current-blobs/$instancia/"
rsync --force --ignore-errors --delete --update -a $GENWEB_FOLDER/var/blobs/$instancia/ $BACKUP_FOLDER/$1/current-blobs/$instancia/
done <$INSTANCIES/$1
}
function actualitzaBackupBlobsAvui {
if [ -x $BACKUP_FOLDER/$1/blobs/$BACKUPDIR/ ] ; then
rm -rf $BACKUP_FOLDER/$1/blobs/$BACKUPDIR
fi
cd $BACKUP_FOLDER/$1/current-blobs && find . -print | cpio -dplm $BACKUP_FOLDER/$1/blobs/$BACKUPDIR
}
echo "INICI BACKUP" >> $LOGBACKUP
echo `date` >> $LOGBACKUP
### Executat per n entorns
while read line
do
echo "$line"
port=`echo ${line#* }`
entorn=`echo ${line%% *}`
# Fem pack de totes els instancies de l'entorn
if [ "$1" = "full" ]; then
packEntorn $entorn $port
fi
# Fem backup de totes les ZODBs i dels blobs corresponents a totes les instancies de l'entorn
inicialitzaBackupEntorn $entorn
backupZODBsEntorn $entorn $port
actualitzaBackupBlobsAvui $entorn
done <$ENTORNS
echo "FI BACKUP" >> $LOGBACKUP
echo `date` >> $LOGBACKUP
| true |
e41b26b92706b7eaf3616efb8e9332e12be5b6e9 | Shell | CanonicalLtd/serial-vault | /factory-serial-vault/src/apache/bin/disable-https | UTF-8 | 310 | 3.34375 | 3 | [] | no_license | #!/bin/sh
. $SNAP/utilities/https-utilities
if [ $(id -u) -ne 0 ]; then
echo "This utility needs to run as root"
exit 1
fi
if certificates_are_active; then
echo -n "Deactivating HTTPS... "
deactivate_certificates
echo "done"
restart_apache_if_running
else
echo "HTTPS doesn't seem enabled"
exit 1
fi | true |
dab6f377e2170ba72db01e638723dcf8767c6497 | Shell | sidAvad/AdmixtureTimeInference | /scripts/ped-sim.sh | UTF-8 | 1,561 | 2.734375 | 3 | [] | no_license | #$ -S /bin/bash # Use bash
#$ -N ped-sim_admixture-times # name for job in qstat/output filename prefix
#$ -o /fs/cbsubscb09/storage/siddharth/AdmixtureTimeInference/$JOB_ID.ped-sim
#$ -j y # output from stdout/err in one file
#$ -m ae # send email on abort or exit of job
#$ -l h_vmem=4G # use 256G of memory instead of the default (4G)
#$ -q regular.q # use the regular queue (for jobs > 24 hours)
/programs/bin/labutils/mount_server cbsubscb09 /storage
cd siddharth/AdmixtureTimeInference/
OUT_DIR=~/siddharth/AdmixtureTimeInference/Output/ped-sim
MAP_SPF=/fs/cbsubscb09/storage/resources/genetic_maps/refined_mf.simmap #mapfile for ped-sim run
MAP_AVG=~/siddharth/AdmixtureTimeInference/Resources/sex-averaged_mf.simmap
t=3
ntypes=16
DEF_FILE=~/siddharth/AdmixtureTimeInference/Resources/admixture-times_${t}gens.def
#Run ped-sim with poisson model and sex averaged map
echo 'running ped-sim in with sex averaged maps and poisson crossover model'
echo "${t}gens is being simulatied" && cat ${DEF_FILE}
ped-sim --bp -d ${DEF_FILE} -m ${MAP_AVG} -i /dev/null --pois -o ${OUT_DIR}/admixture-time_${t}gens_poisson_sexavg --founder_ids
#Run ped-sim with interference model and sex-specific map
echo 'running ped-sim with sex specific maps and interference crossover model'
echo "${t}gens is being simulated" && cat ${DEF_FILE}
ped-sim --bp -d ${DEF_FILE} -m ${MAP_SPF} -i /dev/null --intf ~/siddharth/programs/ped-sim/interfere/nu_p_campbell.tsv -o ${OUT_DIR}/admixture-time_${t}gens_intf_sexspf --founder_ids
| true |
8fae3ce826e6145f9b79cd0f790f86d3b9b6086b | Shell | rizar/digits | /scripts/cv | UTF-8 | 479 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env bash
[ -z $NST ] && NST=3
[ -z $NCMP ] && NCMP=3
[ -z $EPS ] && EPS='1e-2'
set -u
PARAMS="$NST"_"$NCMP"_"$EPS"
SAVE=logs/ws_"$PARAMS".mat
LOG=logs/log_"$PARAMS"
CMD=\
"path(path,'src'); "\
"global n_states; n_states=$NST; "\
"global n_components; n_components=$NCMP; "\
"global hmm_epsilon; hmm_epsilon=$EPS; "\
"save('$SAVE'); runcv; exit; "
MATLAB=/home/rizar/Dist/matlab2013a/bin/matlab
$MATLAB -nojvm -nodisplay -nosplash\
-r "$CMD"\
> $LOG
wait
| true |
9890b8387808cbf7833ed941291537acc92d4a9a | Shell | tamazlykar/cloudstack-ui | /scripts/ci/install-simulator.sh | UTF-8 | 802 | 3.96875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e -u -o pipefail
if [ "$(docker ps -aq -f name=${SIMULATOR_CONTAINER_NAME})" ]; then
docker stop ${SIMULATOR_CONTAINER_NAME} && docker rm ${SIMULATOR_CONTAINER_NAME}
fi
docker run --name ${SIMULATOR_CONTAINER_NAME} -d \
--network ${DOCKER_NETWORK_NAME} \
-p ${SIMULATOR_HOST_PORT}:${SIMULATOR_CONTAINER_PORT} \
${SIMULATOR_IMAGE}
echo -e "\nDocker container is started\n";
sleep 5;
echo -e "\nWait until simulator is initialized\n"
for i in $(seq 1 200); do
PORT_STATUS=$(curl -LI 127.0.0.1:${SIMULATOR_STATUS_CHECK_PORT} -o /dev/null -w '%{http_code}\n' -s);
if [ "$PORT_STATUS" = "403" ]; then
echo -e "\nSimulator initialization is done\n";
break;
fi;
echo -en "\rChecking... ($i/200)";
echo -e "\n Server HTTP response $PORT_STATUS";
sleep 5;
done
| true |
fb65222d8295b9f9a8f0be25b6a6d3a055b8c612 | Shell | vnkrtv/go-vk-tracker | /deploy/deploy_container | UTF-8 | 1,221 | 3.21875 | 3 | [] | no_license | #!/bin/bash
VK_API_V=5.126
TIMEOUT=0.34
read -r -p "VK Token: " VK_TOKEN
if [ "${VK_TOKEN}" == "" ]
then
echo "Error: VK Token must be specify"
exit 1
fi
read -r -p "PostgreSQL host: " PG_HOST
if [ "${PG_HOST}" == "" ]
then
echo "Error: PostgreSQL host must be specify"
exit 1
fi
read -r -p "PostgreSQL port (default: 5432): " PG_PORT
if [ "${PG_PORT}" == "" ]
then
PG_PORT=5432
fi
read -r -p "PostgreSQL user (default: postgres): " PG_USER
if [ "${PG_USER}" == "" ]
then
PG_USER=postgres
fi
read -r -p "PostgreSQL password: " PG_PASS
if [ "${PG_PASS}" == "" ]
then
echo "Error: PostgreSQL password must be specify"
exit 1
fi
read -r -p "PostgreSQL db name (default: vkgroups): " PG_NAME
if [ "${PG_NAME}" == "" ]
then
PG_NAME=vkgroups
fi
export PG_HOST="${PG_HOST}"
export PG_PORT="${PG_PORT}"
export PG_USER="${PG_USER}"
export PG_PASS="${PG_PASS}"
export PG_NAME="${PG_NAME}"
cat > config/config.json <<- EOM
{
"pguser": "${PG_USER}",
"pgpass": "${PG_PASS}",
"pgname": "${PG_NAME}",
"pghost": "${PG_HOST}",
"pgport": "${PG_PORT}",
"vktoken": "${VK_TOKEN}",
"vkapi_version": "${VK_API_V}",
"timeout": ${TIMEOUT}
}
EOM
sudo docker build -t 23031999/go-vk-tracker .
| true |
9de153b13348c6d27d500623c3e7d011b2c3ad7f | Shell | Konnektid/jenkins | /build.sh | UTF-8 | 1,234 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# Tag name to use
TAG_NAME=konnektid/jenkins
# Utilities
ESC_SEQ="\033["
COL_RESET="${ESC_SEQ}0m"
COL_CYAN="${ESC_SEQ}36m"
COL_UL="${ESC_SEQ}04m"
COL_RED="${ESC_SEQ}1;31m"
COL_GREEN="${ESC_SEQ}1;32m"
COL_GREY="${ESC_SEQ}1;30m"
ARROW="${COL_CYAN} => ${COL_RESET}"
function must {
"$@"
local status=$?
if [ $status -ne 0 ]; then
echo "${COL_RED} => Failed!${COL_RESET}"
exit 1;
fi
}
# Determine Jenkins version to use
echo "${ARROW}Determining Jenkins version"
JENKINS_VERSION=`curl -s https://api.github.com/repos/jenkinsci/jenkins/tags | grep '"name":' | grep -o '[0-9]\.[0-9]*' | uniq | sort | tail -1`
echo " ${COL_GREY}Latest is $JENKINS_VERSION${COL_RESET}"
# Build the image
echo "${ARROW}Building image"
must docker build \
--no-cache \
--pull \
--tag $TAG_NAME:$JENKINS_VERSION .
# Push new tag
echo "${ARROW}Pushing $COL_UL$TAG_NAME:$JENKINS_VERSION$COL_RESET to Docker Hub"
must docker push $TAG_NAME:$JENKINS_VERSION
# Update latest
echo "${ARROW}Updating ${COL_UL}latest${COL_RESET} tag"
must docker tag $TAG_NAME:$JENKINS_VERSION $TAG_NAME:latest && \
must docker push $TAG_NAME:latest
# All done
echo "${COL_GREEN} => All done!${COL_RESET}"
| true |
406d16a0b096999ae7c618d22f35a149a0f8147b | Shell | kgrozis/bash | /04/04.5__run_cmd_on_success.sh | UTF-8 | 260 | 3.046875 | 3 | [] | no_license | #!/bin/bash
# Problem: Want to only run a command(s) if another one succeeds
# cd must succed and return 0
cd mytmp
# cd return 0 then rm all file in mytmp dir
# without if statement all files in cwd would be deleted on failure
if (( $? == 0)); then rm *; fi | true |
84fb88da7496e45047a66e2fd9300f4c687c30ab | Shell | theappbusiness/MasterFastfile | /setup.sh | UTF-8 | 2,756 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
mkdir fastlane
cd fastlane
echo "app_identifier = ENV['FL_UPDATE_PLIST_APP_IDENTIFIER']" > Appfile
echo "fastlane_version \"2.86.0\"
import_from_git(url: 'https://github.com/theappbusiness/MasterFastfile.git', path: 'Fastfile')" > Fastfile
function make_default_env_file {
cat > .env.default <<EOF
#This is your default environment file
#Set environment variables used in all builds here
#More information on available environment variables can be found here https://github.com/theappbusiness/MasterFastfile/wiki/Quick-simple-setup-using-TAB-defaults
GYM_EXPORT_OPTIONS="" # Path to export options plist
GYM_CODE_SIGNING_IDENTITY=""" #Code Sign Identity
FL_PROJECT_SIGNING_PROJECT_PATH="" # Path to xcode project to sign
FL_UPDATE_PLIST_PATH="" #Path to Info.plist of application
FL_SLACK_CHANNEL= #Slack channel to post build results to
SCAN_DEVICE="iPhone SE (11.4)"
SCAN_SLACK_CHANNEL="" #Slack channel to post test run info to
TAB_PROVISIONING_PROFILE="" #The name of the provisioning profile to use.
TAB_PROVISIONING_PROFILE_PATH="" #Path to the provisioning profile to install.
TAB_PRIMARY_TARGET="" #Main app target name
TAB_XCODE_PATH="" #Path to required xcode
TAB_UI_TEST_DEVICES="iPhone X (11.4)"
TAB_UI_TEST_SCHEME="" #Scheme to use for running UITests
TAB_SLACK_WEBHOOK_URL="" #Slack webhook for posting build information
ICON_OVERLAY_ASSETS_BUNDLE="" #Path to .xcassets containing AppIcon. If this variable exists build info will be added to the app icon
APPCENTER_API_TOKEN="" #Used to upload builds to app center
APPCENTER_OWNER_NAME="" # when you're on your organization's page in AppCenter, this is the part of the URL slug after `orgs`: `https://appcenter.ms/orgs/<owner_name>/applications`
ITUNES_CONNECT_USERNAME="" #iTunes Connect login (usually email address)
ITUNES_CONNECT_TEAM_ID="" #The ID of your iTunes Connect team if you're in multiple teams https://github.com/fastlane/fastlane/issues/4301#issuecomment-253461017
ITUNES_CONNECT_PROVIDER="" #The provider short name to be used with the iTMSTransporter to identify your team
EOF
}
function make_custom_env_file {
env=$1
cat > .env.$env <<EOF
#This is your ${env} environment file
#Set environment variables used in ${env} builds here
GYM_SCHEME="" #Scheme name
FL_UPDATE_PLIST_APP_IDENTIFIER="" #App Bundle Identifier
FL_UPDATE_PLIST_DISPLAY_NAME="" #Display name of app
APPCENTER_APP_NAME="" #Name of the application in app center
APPCENTER_DISTRIBUTE_DESTINATIONS="" #Distribution groups to give access to this app
APPCENTER_DISTRIBUTE_NOTIFY_TESTERS="" #Set to true to email testers about new build.
EOF
}
make_default_env_file
make_custom_env_file "test"
make_custom_env_file "uat"
make_custom_env_file "staging"
make_custom_env_file "prod"
| true |
bd8f47bbec6089cc6f24b5a7e7811830c1f1b5ad | Shell | mbooali/shell-scripts-practice | /ch7/guess.sh | UTF-8 | 423 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
COMPUTER=25
while [ $COMPUTER ]
do
read -p "What do you think: " GUESS
if [ -z $GUESS ]
then
echo "No Input!!!"
continue
elif [ $GUESS -lt $COMPUTER ]
then
echo "Too low"
continue
elif [ $GUESS -gt $COMPUTER ]
then
echo "Too big"
continue
else
echo "That's it!!! You Won!!!"
break
fi
done
exit 0 | true |
ebd67dc9abc3c087b40987310ce26abbd84b7ef3 | Shell | dtroyer/docker-bind | /entrypoint.sh | UTF-8 | 294 | 3.3125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh -x
# If an alternate config is present, copy it into place and use it
if [ -r /etc/bind.alt/entrypoint.sh ]; then
source /etc/bind.alt/entrypoint.sh
fi
OPTIONS=$@
# Run in foreground and log to STDERR (console):
exec /usr/sbin/named -c /etc/bind/named.conf -g -u named $OPTIONS
| true |
9e0f81068f2d392379abcdbfa42bfe1c6e8d4dac | Shell | RIFTIO/RIFT.ware | /modules/tools/scripts/scripts/usr/rift/etc/install_loganalyzer | UTF-8 | 358 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Installing loganalyzer via link to ${RIFT_INSTALL}"
if [ -d /var/www/html ] ; then
if [ x${RIFT_INSTALL} != x -a -d ${RIFT_INSTALL} ] ; then
sudo rm -R -f /var/www/html/loganalyzer
sudo ln -s /usr/share/loganalyzer/src /var/www/html/loganalyzer
sudo systemctl restart httpd
fi
else
echo 'No /var/www/html directory?'
fi
| true |
20b278c2b81f83e24fb7a6a6f6b3504f1f501fe6 | Shell | jaimergp/computer | /src/install/_knime.sh | UTF-8 | 838 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")" \
&& . "utils.sh" \
&& . "../utils.sh"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
execute "wget https://download.knime.org/analytics-platform/linux/knime-latest-linux.gtk.x86_64.tar.gz -O knime.tar.gz" \
"KNIME (download)"
execute "sudo tar xf knime.tar.gz -C /opt" \
"KNIME (extract)"
knime_install_path=$(find /opt -maxdepth 1 -type d -name 'knime*')
knime_version=${knime_install_path:6}
sudo ln -s "${knime_install_path}/knime" "/usr/local/bin/knime" \
|| print_error "KNIME (symlink)"
cat << EOF | sudo tee "/usr/share/applications/KNIME.desktop" > /dev/null
[Desktop Entry]
Type=Application
Encoding=UTF-8
Name=KNIME ${knime_version}
Comment=A sample application
Exec=knime
Icon="${knime_install_path}/icon.xpm
Terminal=false
EOF
| true |
776936ab6676ad2f84a503215a8f9c0ff346bd81 | Shell | timarenz/instruqt-consul-zero-trust-networking-with-service-mesh | /assets/scripts/diff.sh | UTF-8 | 963 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
echo "Connecting to frontend-server and reading diffs from configuration files..."
echo
# echo "On the left hand side you see the current Consul service configuration for the frontend services."
# echo "To the right you see the same file but with Consul service mesh enabled for this services."
# echo "In addition an upstream service to the public API will be configured."
echo
ssh consul-admin@frontend-server sudo diff -y /etc/consul.d/frontend-service.json /etc/consul.d/frontend-service.json.example
echo
read -p "Press the ENTER to continue..." REPLY
echo
# echo "To communicate via the service mesh you have to tell your application to connect to the local upstream service."
# echo "This service listens on a defined port on localhost. Below you see the change in the nginx configuration file."
# echo
ssh consul-admin@frontend-server sudo diff -y /etc/nginx/conf.d/default.conf /etc/nginx/conf.d/default.conf.example
echo
echo "All done."
echo | true |
081ba33562528a4cb5f6730edf382308b41cf473 | Shell | mrmichalis/toolbox | /auto_build/compile/haproxy.sh | UTF-8 | 984 | 3.296875 | 3 | [] | no_license | #!/bin/bash
#### haproxy-1.4.x
if ! grep '^HAPROXY$' ${INST_LOG} > /dev/null 2>&1 ;then
## handle source packages
file_proc ${HAPROXY_SRC}
get_file
unpack
## for compile
MAKE="make ARCH=x86_64 PREFIX=${INST_DIR}/${SRC_DIR} install"
SYMLINK='/usr/local/haproxy'
compile
mkdir -m 755 -p ${INST_DIR}/${SRC_DIR}/etc
rm -rf ${INST_DIR}/${SRC_DIR}/{doc,share}
## for install config files
if [ "${INST_CONF}" -eq 1 ];then
succ_msg "Begin to install ${SRC_DIR} config files"
## user add
id haproxy >/dev/null 2>&1 || useradd haproxy -u 1004 -M -s /sbin/nologin
## conf
install -m 0644 ${TOP_DIR}/conf/haproxy/haproxy.cfg ${INST_DIR}/${SRC_DIR}/etc/haproxy.cfg
## init scripts
install -m 0755 ${TOP_DIR}/conf/haproxy/haproxy.init /etc/init.d/haproxy
chkconfig --add haproxy
## start
service haproxy start
fi
## record installed tag
echo 'HAPROXY' >> ${INST_LOG}
fi
| true |
89711781a13b9dc142fc8c6c335a549ec2c0b323 | Shell | mzhao035/models | /research/deeplab/train_script_202101021_for_check_decoder_structure.sh | UTF-8 | 2,827 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Move one-level up to tensorflow/models/research directory.
cd ..
# Update PYTHONPATH.
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim
# Set up the working environment.
CURRENT_DIR=$(pwd)
WORK_DIR="${CURRENT_DIR}/deeplab"
#Set up the datasets directories.
DATASET_DIR="${WORK_DIR}/datasets"
#**************************************
TRIFO_DATASET_FOLDER="dataset_wqSfkp" #
#**************************************
TRIFO_DATASET_ROOT="${DATASET_DIR}/${TRIFO_DATASET_FOLDER}"
#Set up the init models directories.
INIT_DIR="${WORK_DIR}/init_models"
#********************************
CKPT_NAME="deeplabv3_mnv2_pascal_trainval"
#********************************
# Go back to research directory.
cd "${CURRENT_DIR}"
#Train
#********************************
TRAIN_CROP_SIZE="241,849"
TRAIN_BATCH_SIZE=2
NUM_ITERATIONS=1
OUTPUT_STRIDE=32
FINE_TUNE_BATCH_NORM=True
MODEL_VARIANT="mobilenet_v2"
IGNORE_UNKNOWN_LABEL=True
MIN_RESIZE_VALUE=238
MAX_RESIZE_VALUE=840
USE_DECODER=True
DECODER_OUTPUT_STRIDE=16,8,4,2
BASE_LEARNING_RATE=0.02
TRAIN_DATE="20210121"
NUM_CLONES=2
DEPTH_MULTIPLIER=1
#********************************
#folder name format:
#crop_xx_xx_batch_xx_iter_xx_os_xx_bn_xx_ignore_unknown_xx
TRAIN_PARAMETER_FOLDER="resize_height_"${MIN_RESIZE_VALUE}"_width_"${MAX_RESIZE_VALUE}"_crop_"${TRAIN_CROP_SIZE%,*}"_"${TRAIN_CROP_SIZE#*,}"_batch_"${TRAIN_BATCH_SIZE}"_iter_"${NUM_ITERATIONS}"_os_"${OUTPUT_STRIDE}"_bn_"${FINE_TUNE_BATCH_NORM}"_decoder_"${USE_DECODER}"_lr_"${BASE_LEARNING_RATE}"_train_date_"${TRAIN_DATE}"_dm_"${DEPTH_MULTIPLIER}"decoder_16_8_4_2"
EXP_DIR="${TRIFO_DATASET_ROOT}/${CKPT_NAME}/${TRAIN_PARAMETER_FOLDER}/exp"
TRAIN_LOGDIR="${EXP_DIR}/train"
TRIFO_DATASET_TFRECORD="${TRIFO_DATASET_ROOT}/tfrecord"
mkdir -p "${EXP_DIR}"
mkdir -p "${TRAIN_LOGDIR}"
python "${WORK_DIR}"/train.py \
--logtostderr \
--train_split="train" \
--model_variant=${MODEL_VARIANT} \
--output_stride=${OUTPUT_STRIDE} \
--train_crop_size=${TRAIN_CROP_SIZE} \
--train_batch_size=${TRAIN_BATCH_SIZE} \
--training_number_of_steps="${NUM_ITERATIONS}" \
--fine_tune_batch_norm=${FINE_TUNE_BATCH_NORM} \
--tf_initial_checkpoint="${INIT_DIR}/${CKPT_NAME}/model.ckpt-30000" \
--train_logdir="${TRAIN_LOGDIR}" \
--dataset_dir="/media/zhaomin/Ruich/000_min_to_move/dataset_v6_update_2020-11-30/dataset_wqSfkp/tfrecord" \
--dataset="${TRIFO_DATASET_FOLDER}" \
--last_layers_contain_logits_only=True \
--last_layer_gradient_multiplier=10.0 \
--initialize_last_layer=False \
--save_interval_secs=300 \
--save_summaries_secs=300 \
--min_resize_value=${MIN_RESIZE_VALUE} \
--max_resize_value=${MAX_RESIZE_VALUE} \
--num_clones=${NUM_CLONES} \
--base_learning_rate=${BASE_LEARNING_RATE} \
--depth_multiplier=${DEPTH_MULTIPLIER} \
--decoder_output_stride=${DECODER_OUTPUT_STRIDE}
| true |
2dd029f5a2ab9b0ceabe64bb24fbbf8a198c664e | Shell | orthros/flocker-api-examples | /runtest.sh | UTF-8 | 360 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
folder=${1:?usage: $0 folder}
if [ ! -d "$folder" ]; then
>&2 echo "$folder does not exist"
fi
docker run --rm -ti \
-v /vagrant:/keys \
-e CONTROL_SERVICE=172.16.255.250 \
-e KEY_FILE=/keys/user.key \
-e CERT_FILE=/keys/user.crt \
-e CA_FILE=/keys/cluster.crt \
clusterhq/flocker-api-examples:$folder | true |
c3ee30dd4f8dc08f9c33d3c0509a829fe835121f | Shell | tdely/freischutz | /tools/freischutz-client.sh | UTF-8 | 6,819 | 3.625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
#
# CLI client for interacting with Freischutz RESTful APIs
#
# see https://gitlab.com/tdely/freischutz/ Freischutz on GitLab
#
# author Tobias Dély (tdely) <cleverhatcamouflage@gmail.com>
# copyright 2017-present Tobias Dély
# license https://directory.fsf.org/wiki/License:BSD-3-Clause BSD-3-Clause
#
set -o errexit
set -o pipefail
set -o nounset
VERSION='0.9.1'
content_type='text/plain'
method='GET'
algorithm='sha256'
id=''
key=''
data=''
ext=''
token=''
basic_auth=false
hawk=false
bearer=false
extra_header=''
verbose=false
target=''
time=$(date +%s)
function display_help()
{
echo "$(basename ${BASH_SOURCE[0]}) -m <STR> [-d <JSON>][-t] <STR> TARGET"
echo " -a STR hash algorithm to use for Hawk, default sha256"
echo " -B use basic authentication"
echo " -c STR content-type, default text/plain"
echo " -d STR data/payload to send"
echo " -e STR optional ext value for Hawk"
echo " -H use Hawk authentication"
echo " -h print this help message and exit"
echo " -i INT user id to use with request"
echo " -k STR key/password for authentication"
echo " -m STR http request method to use"
echo " -t display timing info:"
echo " - response time: from request until first response byte received"
echo " - operation time: from request until last response byte received"
echo " -T STR use bearer token authentication"
echo " -V verbose output"
echo " -v print version and exit"
}
function display_version()
{
echo "$(basename ${BASH_SOURCE[0]}) version ${VERSION}"
}
function hawk_build()
{
# Parse target
local proto=$(echo ${target} | grep -oP "(^https|http)")
local host=$(echo ${target} | grep -oP "(?<=${proto}:\\/\\/)([^\\/:]+)")
local port=$(echo ${target} | grep -oP "(?<=${host}:)([^\\/]+)" || true)
[[ -z "${port}" ]] && port_string='' || port_string=":${port}"
local uri=$(echo ${target} | grep -oP "(?<=${host}${port_string})(\\/.+)")
local nonce=$(cat /dev/urandom|tr -dc 'a-zA-Z0-9'|fold -w 6|head -n 1)
if [ -z "${port}" ]; then
if [ "${proto}" = 'http' ]; then
port=80
elif [ "${proto}" = 'https' ]; then
port=443
else
echo 'unknown protocol specified' >&2
exit 1
fi
fi
# Build Hawk payload string
local payload="hawk.1.payload\n"
payload+="${content_type}\n"
payload+="${data}\n"
local payload_hash=$(echo -ne "${payload}"|openssl dgst -${algorithm} -binary|base64 -w0)
if [ "$ext" != "" ]; then
ext="alg=${algorithm};${ext}"
else
ext="alg=${algorithm}"
fi
# Build Hawk header string for MAC
local message="hawk.1.header\n"
message+="${time}\n"
message+="${nonce}\n"
message+="${method}\n"
message+="${uri}\n"
message+="${host}\n"
message+="${port}\n"
message+="${payload_hash}\n"
message+="${ext}\n"
local mac=$(echo -ne ${message}|openssl dgst -${algorithm} -hmac ${key} -binary|base64 -w0)
if [ ${verbose} = true ]; then
echo "-------------------------------------------"
echo -ne "${payload}"
echo "-------------------------------------------"
echo -ne "${message}"
echo "-------------------------------------------"
echo "MAC:"
echo -e "${mac}\n"
fi
extra_header="Authorization: Hawk id=\"${id}\", ts=\"${time}\", nonce=\"${nonce}\", mac=\"${mac}\", hash=\"${payload_hash}\", ext=\"${ext}\""
}
# getopt index variable
OPTIND=1
while getopts ":a:Bc:d:e:Hhi:k:m:tT:Vv" opt; do
case ${opt} in
a)
algorithm="${OPTARG}"
;;
B)
basic_auth=true
hawk=false
bearer=false
;;
c)
content_type="${OPTARG}"
;;
d)
data="${OPTARG}"
;;
e)
ext="${OPTARG}"
;;
H)
hawk=true
basic_auth=false
bearer=false
;;
h)
display_help
exit 0
;;
i)
id="${OPTARG}"
;;
k)
key="${OPTARG}"
;;
m)
method="${OPTARG^^}"
;;
t)
timing="\n\n--TIMING DETAILS\nResponse time: %{time_starttransfer}\nOperation time: %{time_total}\n"
;;
T)
basic_auth=false
hawk=false
bearer=true
token="${OPTARG}"
;;
V)
verbose=true
;;
v)
display_version
exit 0
;;
\?)
echo "Invalid option: -${OPTARG}" >&2
display_help
exit 1
;;
:)
echo "Option -${OPTARG} requires an argument." >&2
display_help
exit 1
;;
esac
done
# Remove all option arguments
shift $(($OPTIND - 1))
if [ -z "${method}" ]; then
echo "No method specified" >&2
display_help
exit 1
fi
if [ "${#}" = 0 ]; then
echo "No target specified" >&2
display_help
exit 1
fi
if [ "${#}" -gt 1 ]; then
echo "Too many arguments" >&2
display_help
exit 1
fi
# Target is first non-option argument
target="${1}"
if [ ${verbose} = true ]; then
echo -e "\n--REQUEST DETAILS"
fi
if [ ${basic_auth} = true ]; then
if [ -z "${id}" ] || [ -z "${key}" ]; then
echo "Basic authentication requires -i and -k to be set"
fi
fi
if [ ${hawk} = true ]; then
if [ -z "${id}" ] || [ -z "${key}" ]; then
echo "Hawk requires -i and -k to be set"
fi
hawk_build
fi
# Use tmp files for payload and formatting for timing
# easiest way since curl is difficult about whitespace
TMP_DATA=$(mktemp)
TMP_FORMAT=$(mktemp)
echo "${data}" > ${TMP_DATA}
echo "${timing:-}" > ${TMP_FORMAT}
details=''
if [ ${verbose} = true ]; then
details="-i -w @${TMP_FORMAT}"
echo "--BEGIN CURL"
fi
# Send HTTP request
if [ ${hawk} = true ]; then
curl ${details} -d @${TMP_DATA} -X "${method}" -H "Content-Type: ${content_type}" -H "${extra_header}" $target
elif [ ${basic_auth} = true ]; then
curl ${details} -d @${TMP_DATA} -X "${method}" -H "Content-Type: ${content_type}" -u "${id}:${key}" $target
elif [ ${bearer} = true ]; then
curl ${details} -d @${TMP_DATA} -X "${method}" -H "Content-Type: ${content_type}" -H "Authorization: Bearer ${token}" $target
else
curl ${details} -d @${TMP_DATA} -X "${method}" -H "Content-Type: ${content_type}" $target
fi
if [ ${verbose} = true ]; then
echo -e "\n--END CURL"
fi
echo ""
# Clean up
rm ${TMP_DATA}
rm ${TMP_FORMAT}
| true |
baed28d6875cdfc3285bb94a2cd4bf1b4410a747 | Shell | DeviaVir/Rocket.Chat | /install.sh | UTF-8 | 447 | 3.171875 | 3 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | #!/bin/bash
set -x
set -euvo pipefail
IFS=$'\n\t'
ROOTPATH=/var/www/rocket.chat
PM2FILE=pm2.json
if [ "$1" == "development" ]; then
ROOTPATH=/var/www/rocket.chat.dev
PM2FILE=pm2.dev.json
fi
cd $ROOTPATH
curl -fSL "https://s3.amazonaws.com/rocketchatbuild/rocket.chat-develop.tgz" -o rocket.chat.tgz
tar zxf rocket.chat.tgz && rm rocket.chat.tgz
cd $ROOTPATH/bundle/programs/server
npm install
pm2 startOrRestart $ROOTPATH/current/$PM2FILE
| true |
da8769cc5707a373e99b7293fd6355e48f5090e3 | Shell | cu-ecen-aeld/assignments-3-and-later-chaisuresh | /finder-app/manual-linux.sh | UTF-8 | 3,594 | 3.78125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Script outline to install and build kernel.
# Author: Siddhant Jajoo.
set -e
set -u
OUTDIR=/tmp/aeld
KERNEL_REPO=git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git
KERNEL_VERSION=v5.1.10
BUSYBOX_VERSION=1_33_1
FINDER_APP_DIR=$(realpath $(dirname $0))
ARCH=arm64
CROSS_COMPILE=aarch64-none-linux-gnu-
if [ $# -lt 1 ]
then
echo "Using default directory ${OUTDIR} for output"
else
OUTDIR=$1
echo "Using passed directory ${OUTDIR} for output"
fi
mkdir -p ${OUTDIR}
cd "$OUTDIR"
if [ ! -d "${OUTDIR}/linux-stable" ]; then
#Clone only if the repository does not exist.
echo "CLONING GIT LINUX STABLE VERSION ${KERNEL_VERSION} IN ${OUTDIR}"
git clone ${KERNEL_REPO} --depth 1 --single-branch --branch ${KERNEL_VERSION}
fi
if [ ! -e ${OUTDIR}/linux-stable/arch/${ARCH}/boot/Image ]; then
cd linux-stable
echo "Checking out version ${KERNEL_VERSION}"
git checkout ${KERNEL_VERSION}
# TODO: Add your kernel build steps here
make ARCH=arm64 CROSS_COMPILE=aarch64-none-linux-gnu- mrproper
make ARCH=arm64 CROSS_COMPILE=aarch64-none-linux-gnu- defconfig
make -j4 ARCH=arm64 CROSS_COMPILE=aarch64-none-linux-gnu- all
make ARCH=arm64 CROSS_COMPILE=aarch64-none-linux-gnu- modules
make ARCH=arm64 CROSS_COMPILE=aarch64-none-linux-gnu- dtbs
fi
echo "Adding the Image in outdir"
cp -a $OUTDIR/linux-stable/arch/${ARCH}/boot/Image ${OUTDIR}
echo "Creating the staging directory for the root filesystem"
cd "$OUTDIR"
if [ -d "${OUTDIR}/rootfs" ]
then
echo "Deleting rootfs directory at ${OUTDIR}/rootfs and starting over"
sudo rm -rf ${OUTDIR}/rootfs
fi
# TODO: Create necessary base directories
mkdir ${OUTDIR}/rootfs
cd ${OUTDIR}/rootfs
mkdir -p bin dev etc home lib lib64 proc sbin sys tmp usr var
mkdir -p usr/bin usr/lib usr/sbin
mkdir -p var/log
cd "$OUTDIR"
if [ ! -d "${OUTDIR}/busybox" ]
then
git clone git://busybox.net/busybox.git
cd busybox
git checkout ${BUSYBOX_VERSION}
# TODO: Configure busybox
make distclean
make defconfig
# ls -l bin/cat bin/busybox
else
cd busybox
fi
# TODO: Make and insatll busybox
make ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE}
make CONFIG_PREFIX=${OUTDIR}/rootfs ARCH=${ARCH} CROSS_COMPILE=${CROSS_COMPILE} install
cd ${OUTDIR}/rootfs
echo "Library dependencies"
${CROSS_COMPILE}readelf -a bin/busybox | grep "program interpreter"
${CROSS_COMPILE}readelf -a bin/busybox | grep "Shared library"
# TODO: Add library dependencies to rootfs
export SYSROOT=$(aarch64-none-linux-gnu-gcc -print-sysroot)
sudo cp -L $SYSROOT/lib64/libc.so.* lib64
cp -L $SYSROOT/lib64/libresolv.so.* lib64
cp -L $SYSROOT/lib64/libm.so.* lib64
cp -L $SYSROOT/lib/ld-linux-aarch64.* lib
# TODO: Make device nodes
cd ${OUTDIR}/rootfs
sudo mknod -m 666 dev/null c 1 3
sudo mknod -m 600 dev/console c 5 1
# TODO: Clean and build the writer utility
cd ${FINDER_APP_DIR}
make clean
make CROSS_COMPILE=${CROSS_COMPILE}
#TODO: Copy the finder related scripts and executables to the /home directory
# on the target rootfs
cp -r ./conf/ ${OUTDIR}/rootfs/home
cp -a ./writer ${OUTDIR}/rootfs/home
cp -a ./writer.c ${OUTDIR}/rootfs/home
cp -a ./finder.sh ${OUTDIR}/rootfs/home
cp -a ./finder-test.sh ${OUTDIR}/rootfs/home
cp -a ./autorun-qemu.sh ${OUTDIR}/rootfs/home
cp -a ./Makefile ${OUTDIR}/rootfs/home
cp -a ./writer.o ${OUTDIR}/rootfs/home
# TODO: Chown the root directory
cd ${OUTDIR}/rootfs
sudo chown -R root:root *
# TODO: Create initramfs.cpio.gz
find . | cpio -H newc -ov --owner root:root > ${OUTDIR}/initramfs.cpio
cd ..
gzip -f initramfs.cpio
| true |
d6bf35c9752e8e948ce90a4c6141191519a5e3bd | Shell | balajig18/zalenium | /run_integration_tests.sh | UTF-8 | 2,128 | 3.28125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/bin/sh
# Exit on failure
set -e
INTEGRATION_TO_TEST=$1
IN_TRAVIS="${CI:=false}"
VIDEOS_FOLDER=$(pwd)/target/videos
if [ "${IN_TRAVIS}" = "true" ]; then
VIDEOS_FOLDER=/tmp/videos
fi
echo ${VIDEOS_FOLDER}
if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ -n "${TRAVIS_TAG}" ] && [ "${TRAVIS_TAG}" != "latest" ]; then
echo "TRAVIS_TAG=${TRAVIS_TAG}"
echo "Not running integration tests when a TAG is set, we assume they already ran in the PR."
else
# If the environment var exists, then we run the integration tests. This is to allow external PRs ro tun
if [ "$INTEGRATION_TO_TEST" = sauceLabs ]; then
if [ -n "${SAUCE_USERNAME}" ]; then
mvn clean verify -Pintegration-test -DthreadCountProperty=2 -Dskip.surefire.tests=true -DintegrationToTest=${INTEGRATION_TO_TEST}
# Check for generated videos
ls -la ${VIDEOS_FOLDER}/saucelabs*.mp4 || (echo "No Sauce Labs videos were downloaded." && exit 2)
ls -la ${VIDEOS_FOLDER}/zalenium*.mp4 || (echo "No Zalenium videos were generated." && exit 2)
fi
fi
if [ "$INTEGRATION_TO_TEST" = browserStack ]; then
if [ -n "${BROWSER_STACK_USER}" ]; then
mvn clean verify -Pintegration-test -DthreadCountProperty=2 -Dskip.surefire.tests=true -DintegrationToTest=${INTEGRATION_TO_TEST}
# Check for generated videos
ls -la ${VIDEOS_FOLDER}/browserstack*.mp4 || (echo "No BrowserStack videos were downloaded." && exit 2)
ls -la ${VIDEOS_FOLDER}/zalenium*.mp4 || (echo "No Zalenium videos were generated." && exit 2)
fi
fi
if [ "$INTEGRATION_TO_TEST" = testingBot ]; then
if [ -n "${TESTINGBOT_KEY}" ]; then
mvn clean verify -Pintegration-test -DthreadCountProperty=2 -Dskip.surefire.tests=true -DintegrationToTest=${INTEGRATION_TO_TEST}
# Check for generated videos
ls -la ${VIDEOS_FOLDER}/testingbot*.mp4 || (echo "No TestingBot videos were downloaded." && exit 2)
ls -la ${VIDEOS_FOLDER}/zalenium*.mp4 || (echo "No Zalenium videos were generated." && exit 2)
fi
fi
fi
| true |
75c5e4df476a62f71b280f55b4176ea5741dd5be | Shell | LaurentFough/cuff | /tests/cuff_test | UTF-8 | 255 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# Quick script to run the examples from the help msg.
cuff -r -h | grep '\$' | sed 's/.*$ //' | while read CUFF_CMD ; do
printf "Running: %s\n" "$CUFF_CMD"
$CUFF_CMD > /dev/null && printf " SUCCESS\n" || printf " FAILED\n" >&2
done
| true |
628c2f6bf5535e467fc2ea0a005064c1bf276487 | Shell | sppenna/pi_cluster | /sbin/monitor-cluster.sh | UTF-8 | 193 | 2.53125 | 3 | [] | no_license | echo "node-master: "
vcgencmd measure_temp
echo ""
for i in {1..2}
do
for node in node1 node2 node3 node4
do
echo "$node: "
ssh $node vcgencmd measure_temp
echo ""
done
wait 5
done
| true |
933109d614d803803042a582d89e86db88090414 | Shell | lincw6666/System_Administration | /HW03/rc_script/zbackupd | UTF-8 | 1,470 | 3.640625 | 4 | [] | no_license | #!/bin/sh
# PROVIDE: zbackupd
# KEYWORD: shutdown
. /etc/rc.subr
name=zbackupd
rcvar=zbackupd_enable
load_rc_config $name
command=/usr/local/bin/zbackupd
command_interpreter=/bin/sh
zbackupd_enable=${zbackupd_enable:-"no"}
zbackupd_config=${zbackupd_config:-"/usr/local/etc/zbackupd.yaml"}
required_files="${zbackupd_config}"
pidfile_zbackupd=${pidfile_zbackupd:-"/var/run/zbackup.pid"}
pidfile="${pidfile_zbackupd}"
logfile_zbackupd=${logfile_zbackupd:-"/var/log/zbackup.log"}
command_args="-d -p ${pidfile} -c ${zbackupd_config} >> ${logfile_zbackupd} 2>&1"
extra_commands="reload list"
reload_cmd="Func_Reload"
list_cmd="Func_List"
stop_cmd="Func_Stop"
Func_Reload() {
local pid pgid
pid=`cat ${pidfile_zbackupd} | head -1 | sed "1,$ s/[^0-9]*//g"`
pgid=`ps -o pid,pgid -axww | awk '{print $1" "$2}' | grep "^${pid} " | head -1 | awk '{print $2}'`
ps -o pid,pgid -axww | awk '{print $1" "$2}' | grep " ${pgid}$" | awk '{print $1}' | xargs kill -SIGUSR1
}
Func_List() {
/usr/local/bin/zbackup --list
}
Func_Stop() {
local pid
pid=$(check_pidfile ${pidfile} ${command} ${command_interpreter})
if [ -z "${pid}" ] ; then
Error "zbackupd was not running!!"
fi
echo "Stop zbackup."
ps -o pid,pgid -axww | grep ${pid} | head -1 | \
awk '{print "-"$2}' | xargs kill -SIGTERM
echo "Waiting pid: ${pid}."
wait ${pid}
}
Error() {
echo "Error!!" $1
exit 0
}
run_rc_command "$1"
| true |
d84a9282e2413ca269de8b3e8e380e78c5d26caa | Shell | faizulho/atomiadns | /build-rpm.sh | UTF-8 | 772 | 2.890625 | 3 | [
"ISC"
] | permissive | #!/bin/sh
set -x
version=`cat /etc/redhat-release | sed 's/[^0-9.]//g' | cut -d . -f 1`
rm -f *rpm
rm -f /usr/src/redhat/RPMS/*/atomiadns-*
rm -f /usr/src/redhat/SRPMS/atomiadns-*
cd server
./buildrpms rhel"$version"
ret=$?
if [ $ret != 0 ]; then
exit $ret
fi
cd ../powerdns_sync
./buildrpms rhel"$version"
ret=$?
if [ $ret != 0 ]; then
exit $ret
fi
cd ../bind_sync
./buildrpms rhel"$version"
ret=$?
if [ $ret != 0 ]; then
exit $ret
fi
# Disabled old projects from building
#cd ../zonefileimporter
#./buildrpms rhel"$version"
#ret=$?
#if [ $ret != 0 ]; then
# exit $ret
#fi
#cd ../dyndns
#./buildrpms rhel"$version"
#ret=$?
#if [ $ret != 0 ]; then
# exit $ret
#fi
#cd ../syncer
#./buildrpms rhel"$version"
#ret=$?
#if [ $ret != 0 ]; then
# exit $ret
#fi
cd ..
| true |
aa6062452a59e41098f5bd789b13d95bf8fabe3c | Shell | denouche/danstonchat-bash | /danstonchat.sh | UTF-8 | 766 | 3.484375 | 3 | [] | no_license | #!/bin/bash
LAST_FILE=.danstonchat_last
BASE_URL=http://danstonchat.com
if [ ! -e $LAST_FILE ]
then
CURRENT=$(wget -O - -q $BASE_URL | grep 'widget_latest_items' | sed -r "s%.*$BASE_URL/([0-9]+).html.*%\1%")
echo $CURRENT > $LAST_FILE
echo "$LAST_FILE initialized with id $CURRENT"
echo
else
LAST=$(cat $LAST_FILE)
CURRENT=$(($LAST+1))
fi
HTML=$(wget -O - -q $BASE_URL/$CURRENT.html)
if [ $? -eq 0 ]
then
echo $CURRENT > $LAST_FILE
CURRENT_TEXT="$(echo "$HTML" | grep 'item-entry' | sed -r "s/^.*<a href=\".*$CURRENT.html\">//" | sed -r 's/<\/a>.*$//' | sed -r 's/<br ?\/>/\n/g' | sed -r 's/<[^>]*>//g' | perl -n -mHTML::Entities -e ' ; print HTML::Entities::decode_entities($_) ;')"
echo "$CURRENT_TEXT"
else
echo ""
fi
| true |
3dd123de986a6e69f86767c8c664e4f1a0997484 | Shell | Z5X67280/dtc-autobuild | /travis-ci.sh | UTF-8 | 745 | 2.625 | 3 | [] | no_license | # CI Script
# Copyright (C) LiteRako
git clone https://github.com/fabianonline/telegram.sh telegram
TELEGRAM_ID=-1001268516549
TELEGRAM=telegram/telegram
TELEGRAM_TOKEN=${BOT_API_KEY}
BOT_API_KEY=723044228:AAFpmF9aHsMTinCJ7Yq3HLxEzjEBiO47rlU
export TELEGRAM_TOKEN
function push() {
JIP=$echo DTC-9.0.tar.xz
curl -F document=@$JIP "https://api.telegram.org/bot$BOT_API_KEY/sendDocument" \
-F chat_id="$TELEGRAM_ID"
}
git config --global user.name "1"
git config --global user.email "1"
curl https://mirrors.tuna.tsinghua.edu.cn/git/git-repo -o repo
chmod +x repo
./repo init -h
./repo init -u https://github.com/dragon-tc/DragonTC -b master -q
./repo sync -j8 -c -f
cd build
chmod +x build-dtc
./build-dtc 9.0 opt
ls ../out/9.0
cd ../out/9.0
tar cxvf DTC-9.0.tar.xz *
push | true |
6de6444998bc03e1f810a06efc18b00cb7914103 | Shell | tomperr/useful_bash_scripts | /Debian/solve_php_apache.sh | UTF-8 | 624 | 2.984375 | 3 | [] | no_license | #!/bin/bash
# -------------------------------------
#
# Author: @Yoshinker
# Date: 2019-02-23
# Version: 1.0.0
# Tested On: Linux Mint 19 Tara
#
# -------------------------------------
# NEED ROOT PRIVILEGES TO RUN
if [ "$EUID" -ne 0 ]; then
echo "Please run as root"
echo "Usage: sudo $0"
exit
fi
# installing apache2 server
apt install -y apache2
apt install -y mysql-server
# adding php7 repository
add-apt-repository -y ppa:ondrej/php
apt-get -y update
# installing php7
apt-get -y install php7.3 php7.3-fpm
# configuring apache2
a2enmod proxy_fcgi setenvif
a2enconf php7.3-fpm
service apache2 restart
| true |
9fd4e489eb9703c96bc1f8d242712b10c132ec94 | Shell | mgijax/gbpreprocessor | /Install | UTF-8 | 2,070 | 4.125 | 4 | [] | no_license | #!/bin/sh
#
# Installation script for spseqload
#
# 1) cp genbank.config.default genbank.config
# 2) cp refseq.config.default refseq.config
# 3) cp gb_common.config.default gb_common.config
# 2) Install
#
Usage=Install
#
# Function called when the install fails.
#
installFailed ()
{
echo "Installation Failed: `date`"
exit 1
}
#
# Verify the arguments to the script, there shouldn't be any
#
if [ $# -ne 0 ]
then
echo "Usage: ${Usage}"
installFailed
fi
#
# Make sure config files exist
#
cd `dirname $0`
# establish the three config files
GB_CONFIG=genbank.config
RS_CONFIG=refseq.config
COMMON_CONFIG=gb_common.config
# Make sure genbank.config file exists and source it
#if [ -r ${GB_CONFIG} ]
#then
# echo "Source ${GB_CONFIG}"
# . ${GB_CONFIG}
#else
# echo "Cannot source configuration file: ${GB_CONFIG}"
# installFailed
#fi
# Make sure gb_common.config file exists and source it
if [ -r ${COMMON_CONFIG} ]
then
echo "Source ${COMMON_CONFIG}"
. ${COMMON_CONFIG}
else
echo "Cannot source configuration file: ${COMMON_CONFIG}"
installFailed
fi
# Make sure genbank.config file exists and source it
if [ -r ${GB_CONFIG} ]
then
echo "Source ${GB_CONFIG}"
. ${GB_CONFIG}
else
echo "Cannot source configuration file: ${GB_CONFIG}"
installFailed
fi
#
# Check to see if this is a development installation
#
DEV=""
if [ "${INSTALL_TYPE}" = "dev" ]
then
DEV="-d"
fi
#
# run DLAInstall for spseqload
#
echo 'running DLAINSTALL'
${DLAINSTALL} ${DEV}
#
# Create the work directory if it doesn't exist.
#
if [ ! -d ${WORKDIR} ]
then
mkdir -p ${WORKDIR}
fi
# source gb_common.config again
. ${COMMON_CONFIG}
# Make sure refseq.config file exists and source it
if [ -r ${RS_CONFIG} ]
then
echo "Source ${RS_CONFIG}"
. ${RS_CONFIG}
else
echo "Cannot source configuration file: ${RS_CONFIG}"
installFailed
fi
#
# Create the work directory if it doesn't exist.
#
if [ ! -d ${WORKDIR} ]
then
mkdir -p ${WORKDIR}
fi
#
# run DLAInstall for gbpreprocessor
#
${DLAINSTALL} ${DEV}
| true |
9a49dbb02dc0a8c7b84e00de2e255aeac0d0b16f | Shell | NeonMan/shellUnit | /shunit.d/basic-asserts.sh | UTF-8 | 3,326 | 3.5625 | 4 | [
"BSD-2-Clause"
] | permissive | #Tests if the expression returns true
#The return value of the expression must be nonzero for this assert to
#succeed. Refer to the implementation below.
#
# Params:
# $1 <-- A expression.
assertTrue () {
if eval "$1"
then
pass
else
fail "expression '$1' is false"
fi
}
#Tests if the expression returns false
#The return value of the expression must be zero for this assert to
#succeed. Refer to the implementation below.
#
# Params:
# $1 <-- A expression.
assertFalse () {
if eval "$1"
then
fail "expression '$1' is true"
else
pass
fi
}
#Always passes, used for custom asserts
#
# Params: None
assertSuccess () {
pass
}
#Always fails, used for custom asserts
#
# Params: None
assertFail () {
fail "assertFail called"
}
#Tests if param 1 and 2 are equal (string)
#
# Params:
# $1 <-- A string
# $2 <-- A string
assertEquals () {
if [ "$1" == "$2" ]
then
pass
else
fail "expected '$1' but found '$2'"
fi
}
#Tests if param 1 and 2 are NOT equal (string)
#
# Params:
# $1 <-- A string
# $2 <-- A string
assertNotEquals () {
if [ "$1" == "$2" ]
then
fail "expected NOT to be '$1' but found '$2'"
else
pass
fi
}
#Tests if param 1 is greater than param 2 (integer)
#
# Params:
# $1 <-- An integer
# $2 <-- An integer
assertGreaterThan () {
if [ "$1" -gt "$2" ]
then
pass
else
fail "expected '$2' to be greater than '$1'"
fi
}
#Tests if param 1 is not greater than param 2 (integer)
#
# Params:
# $1 <-- An integer
# $2 <-- An integer
assertNotGreaterThan () {
if [ "$1" -gt "$2" ]
then
fail "expected '$2' NOT to be greater than '$1'"
else
pass
fi
}
#Tests if param 1 is greater or equal to param 2
#
# Params:
# $1 <-- An integer
# $2 <-- An integer
assertGreaterThanOrEqual () {
assertNotLessThan $1 $2
}
#Tests if param 1 is lesser than param 2 (integer)
#
# Params:
# $1 <-- An integer
# $2 <-- An integer
assertLessThan () {
if [ "$1" -lt "$2" ]
then
pass
else
fail "expected '$2' to be less than '$1'"
fi
}
#Tests if param 1 is not lesser than param 2 (integer)
#
# Params:
# $1 <-- An integer
# $2 <-- An integer
assertNotLessThan () {
if [ "$1" -lt "$2" ]
then
fail "expected '$2' NOT to be less than '$1'"
else
pass
fi
}
#Tests if param 1 is less thann or equal to param 2(integer)
#
# Params:
# $1 <-- An integer
# $2 <-- An integer
assertLessThanOrEqualTo () {
assertNotGreaterThan $1 $2
}
#Tests if a number is close to another
#
# Params:
# $1 <-- A number to be tested
# $2 <-- Expected value
# $3 <-- Error margin
assertCloseTo () {
lower_bound=`expr $2 - $3`
upper_bound=`expr $2 + $3`
if [ '(' "$1" -ge "$lower_bound" ')' -a '(' "$1" -le "$upper_bound" ')' ]
then
pass
else
fail "$1 not in range [ $lower_bound - $upper_bound ]"
fi
}
#Tests if a number is NOT close to another
#
# Params:
# $1 <-- A number to be tested
# $2 <-- Expected value
# $3 <-- Error margin
assertNotCloseTo () {
lower_bound=`expr $2 - $3`
upper_bound=`expr $2 + $3`
if [ '(' "$1" -ge "$lower_bound" ')' -a '(' "$1" -le "$upper_bound" ')' ]
then
fail "$1 in range [ $lower_bound - $upper_bound ]"
else
pass
fi
}
| true |
7286f1d7bffcd3c1ff5aba2d7ffa8f04dbbb478b | Shell | jenhantao/denovoMutPipeline | /tasks/bam2fastq.sh | UTF-8 | 566 | 2.546875 | 3 | [] | no_license | #! /bin/bash
# NOTE: Currently assumes that only one bamfile is stored in each mount directory
echo "${htscmd} bamshuf -Ou -n 128 ${bamfilename} ${output_dir}/tmp | ${htscmd} bam2fq -a - | gzip > ${output_dir}/${subjectID}_interleaved_reads.fastq.gz"
#${htscmd} bamshuf -Ou -n 128 ${bampath}/${bamfile}/*.bam ${output_dir}/tmp | ${htscmd} bam2fq -a - | gzip > ${output_dir}/${subjectID}_interleaved_reads.fastq.gz
${htscmd} bamshuf -Ou -n 128 ${bamfilename} ${output_dir}/tmp | ${htscmd} bam2fq -a - | gzip > ${output_dir}/${subjectID}_interleaved_reads.fastq.gz | true |
553b909b1e086175e42726452726e99118a726d5 | Shell | marcojahn/dotfiles | /.bash_prompt | UTF-8 | 5,732 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
# Shell prompt based on the Solarized Dark theme.
# Screenshot: http://i.imgur.com/EkEtphC.png
# Heavily inspired by @necolas’s prompt: https://github.com/necolas/dotfiles
# iTerm → Profiles → Text → use 13pt Monaco with 1.1 vertical spacing.
if [[ $COLORTERM = gnome-* && $TERM = xterm ]] && infocmp gnome-256color >/dev/null 2>&1; then
export TERM='gnome-256color';
elif infocmp xterm-256color >/dev/null 2>&1; then
export TERM='xterm-256color';
fi;
prompt_git() {
local s='';
local branchName='';
local marks='';
# Check if the current directory is in a Git repository.
if [ $(git rev-parse --is-inside-work-tree &>/dev/null; echo "${?}") == '0' ]; then
# check if the current directory is in .git before running git checks
if [ "$(git rev-parse --is-inside-git-dir 2> /dev/null)" == 'false' ]; then
# Ensure the index is up to date.
git update-index --really-refresh -q &>/dev/null;
# Check for uncommitted changes in the index.
if ! $(git diff --quiet --ignore-submodules --cached); then
s+='+';
fi;
# Check for unstaged changes.
if ! $(git diff-files --quiet --ignore-submodules --); then
s+='!';
fi;
# Check for untracked files.
if [ -n "$(git ls-files --others --exclude-standard)" ]; then
s+="?";
fi;
# 📦 \U1F4E6
# ± \u00b1
# branch/upwards \ue0a0
# ➦ \u27a6
# ✘ \u2718
# ⚡ \u26a1
# ⚙ \u2699
# ⎇ \u2387
# ⬢ \U2B22
#
# ✔ is from https://unicode-table.com/en/blocks/dingbats/
# ⇡
# https://unicode-table.com/en/blocks/arrows/
# https://unicode-table.com/en/sets/arrows-symbols/
# Check for stashed files.
if $(git rev-parse --verify refs/stash &>/dev/null); then
s+='$';
fi;
## diff checks (new)
#
# branch is modified?
[ -n "$(git status --porcelain)" ] && marks+=" $GIT_BRANCH_CHANGED_SYMBOL"
# how many commits local branch is ahead/behind of remote?
local stat="$(git status --porcelain --branch | grep '^##' | grep -o '\[.\+\]$')"
local aheadN="$(echo $stat | grep -o 'ahead [[:digit:]]\+' | grep -o '[[:digit:]]\+')"
local behindN="$(echo $stat | grep -o 'behind [[:digit:]]\+' | grep -o '[[:digit:]]\+')"
[ -n "$aheadN" ] && marks+=" \u21e1${aheadN}"
[ -n "$behindN" ] && marks+=" \u21e3${behindN}"
fi;
# Get the short symbolic ref.
# If HEAD isn’t a symbolic ref, get the short SHA for the latest commit
# Otherwise, just give up.
branchName="$(git symbolic-ref --quiet --short HEAD 2> /dev/null || \
git rev-parse --short HEAD 2> /dev/null || \
echo '(unknown)')";
[ -n "${s}" ] && s=" [${s}]";
echo -e "${1}\ue0a0 ${branchName}${2}${s}${marks}";
else
return;
fi;
}
prompt_project_infos() {
local PACKAGE_VERSION='';
local RUNTIME_VERSION='';
#if ! [ -x "$(command -v git)" ]; then
# echo 'Error: git is not installed.' >&2
# exit 1
#fi
# js project
if [ -e package.json ]; then
if [ -x "$(command -v node)" ]; then
PACKAGE_VERSION="$(node -p -e "require('./package.json').version")"
RUNTIME_VERSION="$(node --version)"
fi;
echo -e "${white} is \U1F4E6 ${blue}${PACKAGE_VERSION} ${white}via ${green}\U2B22 ${RUNTIME_VERSION}";
fi;
# java/maven project
if [ -e pom.xml ]; then
if [ -x "$(command -v java)" ]; then
# mvn help:evaluate -Dexpression=project.version | grep -v '^['
# or
# grep version pom.xml | grep -v -e '<?xml|~'| head -n 1 | sed 's///' | sed 's/</version>//'| awk '{print $1}'
PACKAGE_VERSION="mvn-ver"
RUNTIME_VERSION="$(java -version 2>&1 | sed -n ';s/.* version "\(.*\)"/\1/p;')"
fi;
echo -e "${white} is \U1F4E6 ${blue}${PACKAGE_VERSION} ${white}via ${green}\u2615 ${RUNTIME_VERSION}";
fi;
# java/gradle project
#if [ -z `ls .gradle` ]; then
# if [ -x "$(command -v java)" ]; then
# PACKAGE_VERSION="gradle-ver"
# RUNTIME_VERSION="$(java -version 2>&1 | sed -n ';s/.* version "\(.*\)"/\1/p;')"
# fi;
#
# echo -e "${white} is \U1F4E6 ${blue}${PACKAGE_VERSION} ${white}via ${green}\u2615 ${RUNTIME_VERSION}";
#fi
# PACKAGE_VERSION='0';
# RUNTIME_VERSION="$(java -version 2>&1 | sed -n ';s/.* version "\(.*\)"/\1/p;')"
}
if tput setaf 1 &> /dev/null; then
tput sgr0; # reset colors
bold=$(tput bold);
reset=$(tput sgr0);
# Solarized colors, taken from http://git.io/solarized-colors.
black=$(tput setaf 0);
blue=$(tput setaf 33);
cyan=$(tput setaf 37);
green=$(tput setaf 64);
orange=$(tput setaf 166);
purple=$(tput setaf 125);
red=$(tput setaf 124);
violet=$(tput setaf 61);
white=$(tput setaf 15);
yellow=$(tput setaf 136);
else
bold='';
reset="\e[0m";
black="\e[1;30m";
blue="\e[1;34m";
cyan="\e[1;36m";
green="\e[1;32m";
orange="\e[1;33m";
purple="\e[1;35m";
red="\e[1;31m";
violet="\e[1;35m";
white="\e[1;37m";
yellow="\e[1;33m";
fi;
# Highlight the user name when logged in as root.
if [[ "${USER}" == "root" ]]; then
userStyle="${red}";
else
userStyle="${orange}";
fi;
# Highlight the hostname when connected via SSH.
if [[ "${SSH_TTY}" ]]; then
hostStyle="${bold}${red}";
else
hostStyle="${yellow}";
fi;
# Set the terminal title and prompt.
PS1="\[\033]0;\W\007\]"; # working directory base name
#PS1+="\[${bold}\]\n"; # newline
PS1+="\n";
PS1+="\${userStyle}\]\u"; # username
PS1+="\[${white}\] at ";
PS1+="\[${hostStyle}\]\h (${LOCATION})"; # host
PS1+="\[${white}\] in ";
PS1+="\[${green}\]\w"; # working directory full path
PS1+="\$(prompt_git \"\[${white}\] on \[${violet}\]\" \"\[${blue}\]\")"; # Git repository details
PS1+="\$(prompt_project_infos)";
PS1+="\n";
PS1+="\[${white}\]\$ \[${reset}\]"; # `$` (and reset color)
export PS1;
PS2="\[${yellow}\]→ \[${reset}\]";
export PS2;
| true |
5b513f54b954413340889952e4c6b9ee21e12856 | Shell | angshine/pytorch3d | /packaging/build_conda.sh | UTF-8 | 1,091 | 2.9375 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
set -ex
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. "$script_dir/pkg_helpers.bash"
VERSION=$(python -c "exec(open('${script_dir}/../pytorch3d/__init__.py').read()); print(__version__)")
# Prevent dev tag in the version string.
export BUILD_VERSION=$VERSION
export BUILD_TYPE=conda
setup_env "$VERSION"
export SOURCE_ROOT_DIR="$PWD"
setup_conda_pytorch_constraint
setup_conda_cudatoolkit_constraint
setup_visual_studio_constraint
if [[ "$JUST_TESTRUN" == "1" ]]
then
# We are not building for other users, we
# are only trying to see if the tests pass.
# So save time by only building for our own GPU.
unset NVCC_FLAGS
fi
# shellcheck disable=SC2086
conda build $CONDA_CHANNEL_FLAGS ${TEST_FLAG:-} -c bottler -c fvcore -c iopath -c conda-forge --no-anaconda-upload --python "$PYTHON_VERSION" packaging/pytorch3d
| true |
6c32ce46c342f37d0e5a7f8e5a8e7955274f9bbc | Shell | coreos/tectonic-installer | /tests/smoke/aws/cluster-foreach.sh | UTF-8 | 981 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -x
set -e
fail () {
# shellcheck disable=SC2181
if [ $? -ne 0 ]
then
echo "$0 failed but exiting 0 because we don't want to fail tests"
fi
exit 0
}
trap fail EXIT
if [ $# -eq 0 ]
then
echo "usage: $0 command"
echo "Make sure your AWS creds & env vars are set (\$AWS_REGION, \$CLUSTER)"
fi
if [ -z "$CLUSTER" ]
then
echo "\$CLUSTER not set"
exit 1
fi
if [ -z "$AWS_REGION" ]
then
echo "\$AWS_REGION not set"
exit 1
fi
CMD=$*
ASG_NAME="${CLUSTER}-masters"
INSTANCES=$(aws autoscaling describe-auto-scaling-groups --region="$AWS_REGION" --auto-scaling-group-names="$ASG_NAME" | jq -r .AutoScalingGroups[0].Instances[].InstanceId)
# shellcheck disable=SC2086
HOSTS=$(aws ec2 describe-instances --region="$AWS_REGION" --instance-ids $INSTANCES | jq -r .Reservations[].Instances[].PublicIpAddress)
set +e
for HOST in $HOSTS
do
ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null "core@${HOST}" 'bash -s' < "$CMD"
done
| true |
b1904e040027ee3044f4a33228da55afa4e1688c | Shell | apowers313/atom | /test/main.bats | UTF-8 | 1,365 | 2.90625 | 3 | [] | no_license | # This is a bats test file
# See also:
# https://github.com/sstephenson/bats
# setup mocks
export PATH="`pwd`/test/helper":$PATH
setup() {
rm -f *.mocklog .mockcount.tmp
}
teardown() {
rm -f *.mocklog .mockcount.tmp
}
@test "no arguments" {
run atom
[ "$status" -eq 0 ]
[ "${lines[0]}" = "atom: no commands specified" ]
}
@test "help" {
skip
}
@test "debug" {
skip
}
@test "push with no args" {
skip
}
@test "push to IP" {
skip
}
@test "push to serial" {
mock.adb.1() {
echo "device"
}
export -f mock.adb.1
atom push 10.100.100.150
[ -s adb.mocklog ]
diff adb.mocklog <(cat << EOF
connect 10.100.100.150
-s 10.100.100.150 get-state
-s 10.100.100.150 root
connect 10.100.100.150
-s 10.100.100.150 remount
-s 10.100.100.150 push $HOME/.atom/staging/android21-arm-generic/* /data/local/tmp
EOF)
}
@test "create-template with no args" {
skip
}
@test "create-template" {
skip
}
@test "create-template second time doesn't install" {
skip
}
@test "install" {
run atom install foobar
[ -s pkg.mocklog ]
diff pkg.mocklog <(cat << EOF
-C $HOME/.atom/pkg.conf -r $HOME/.atom/vendor/android-21/arm/ install foobar
EOF)
}
@test "search" {
run atom search blahpkg
[ -s pkg.mocklog ]
diff pkg.mocklog <(cat << EOF
-C $HOME/.atom/pkg.conf search blahpkg
EOF)
} | true |
6fb17198a8d11b63840a7f4b9f8eee8456b7017d | Shell | commiyou/conf | /zsh/zshrc.d/key-bindings.zsh | UTF-8 | 2,787 | 3.21875 | 3 | [] | no_license | # Make sure that the terminal is in application mode when zle is active, since
# only then values from $terminfo are valid
if (( ${+terminfo[smkx]} )) && (( ${+terminfo[rmkx]} )); then
function zle-line-init() {
echoti smkx
}
function zle-line-finish() {
echoti rmkx
}
zle -N zle-line-init
zle -N zle-line-finish
fi
bindkey -e # Use emacs key bindings
if [[ "${terminfo[kpp]}" != "" ]]; then
bindkey "${terminfo[kpp]}" up-line-or-history # [PageUp] - Up a line of history
fi
if [[ "${terminfo[knp]}" != "" ]]; then
bindkey "${terminfo[knp]}" down-line-or-history # [PageDown] - Down a line of history
fi
# start typing + [Up-Arrow] - fuzzy find history forward
if [[ "${terminfo[kcuu1]}" != "" ]]; then
autoload -U up-line-or-beginning-search
zle -N up-line-or-beginning-search
bindkey "${terminfo[kcuu1]}" up-line-or-beginning-search
fi
# start typing + [Down-Arrow] - fuzzy find history backward
if [[ "${terminfo[kcud1]}" != "" ]]; then
autoload -U down-line-or-beginning-search
zle -N down-line-or-beginning-search
bindkey "${terminfo[kcud1]}" down-line-or-beginning-search
fi
if [[ "${terminfo[khome]}" != "" ]]; then
bindkey "${terminfo[khome]}" beginning-of-line # [Home] - Go to beginning of line
fi
if [[ "${terminfo[kend]}" != "" ]]; then
bindkey "${terminfo[kend]}" end-of-line # [End] - Go to end of line
fi
bindkey '^[[1;5C' forward-word # [Ctrl-RightArrow] - move forward one word
bindkey '^[[1;5D' backward-word # [Ctrl-LeftArrow] - move backward one word
if [[ "${terminfo[kcbt]}" != "" ]]; then
bindkey "${terminfo[kcbt]}" reverse-menu-complete # [Shift-Tab] - move through the completion menu backwards
fi
bindkey '^?' backward-delete-char # [Backspace] - delete backward
if [[ "${terminfo[kdch1]}" != "" ]]; then
bindkey "${terminfo[kdch1]}" delete-char # [Delete] - delete forward
else
bindkey "^[[3~" delete-char
bindkey "^[3;5~" delete-char
bindkey "\e[3~" delete-char
fi
# Edit the current command line in $EDITOR
autoload -U edit-command-line
zle -N edit-command-line
bindkey '\C-x\C-e' edit-command-line
# file rename magick
bindkey "^[m" copy-prev-shell-word
bindkey '^X^F' emacs-forward-word
bindkey '^X^B' emacs-backward-word
#
bindkey -s '^X^Z' '%-^M'
bindkey '^Xe' expand-cmd-path
#bindkey '^[^I' reverse-menu-complete
#bindkey '^X^N' accept-and-infer-next-history
bindkey '^W' kill-region
bindkey '^I' complete-word
expand-dot-to-parent-directory-path() {
if [[ $LBUFFER = *.. ]]; then
LBUFFER+='/..'
else
LBUFFER+='.'
fi
}
zle -N expand-dot-to-parent-directory-path
bindkey -M emacs . expand-dot-to-parent-directory-path
bindkey -M emacs -s '^[o' '^Anohup ^e &^M' # Alt-o
| true |
81f52ef87a316d657ae605301b7d9936e3a17101 | Shell | syseleven/midonet | /tests/sandbox/provisioning/keystone-newton-provisioning.sh | UTF-8 | 794 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -x
INDOCKER="docker exec mnsandbox${SANDBOX_NAME}_keystone_1"
KEYSTONE_IP=$(docker exec mnsandbox${SANDBOX_NAME}_keystone_1 hostname --ip-address)
NEUTRON_IP=$(docker exec mnsandbox${SANDBOX_NAME}_neutron_1 hostname --ip-address)
OS_SERVICE_ENDPOINT="http://$KEYSTONE_IP:5000"
OPENSTACK="$INDOCKER openstack --os-auth-url=$OS_SERVICE_ENDPOINT"
$INDOCKER keystone-manage db_sync
KEYSTONE_SERVICE_ID=$($OPENSTACK service list | awk '/ identity / {print $2}' | xargs | cut -d' ' -f1)
NEUTRON_SERVICE_ID=$($OPENSTACK service list | awk '/ network / {print $2}' | xargs | cut -d' ' -f1)
$OPENSTACK endpoint create \
--publicurl http://$NEUTRON_IP:9696 \
--internalurl http://$NEUTRON_IP:9696 \
--adminurl http://$NEUTRON_IP:9696 \
--region regionOne $NEUTRON_SERVICE_ID
| true |
0c6fb0840e2c3958fd57ec94b08c770622c86240 | Shell | buggsboony/toggleinput | /install.sh | UTF-8 | 236 | 2.90625 | 3 | [] | no_license | #!/bin/bash
#install stuff
what=toggleinput
extension=.php
#peut être extension vide
echo "Set executable..."
chmod +x $what$extension
echo "Create Symbolic link to /usr/bin folder"
sudo ln -s "$PWD/$what$extension" /usr/bin/$what
| true |
f8307901a79879a6fe0819ec4d64c3e9e444209c | Shell | artisdom/_ebDev | /scripts/recipes/next_most_similar_image_anim.sh | UTF-8 | 947 | 3.34375 | 3 | [] | no_license | # DESCRIPTION
# Using other scripts, creates an animation where every frame is the next most similar image. Animation is 30fps, but with the source "framerate" for each image at 0.83 FPS (each still in the animation shows ~1.2 seconds).
# USAGE
# Examine the source code of this script, and the scripts it calls, to get an idea what goes on here. Run this script with:
# next_most_similar_image_anim.sh
# CODE
# Run the following commands and scripts as listed, adapting for your situation.
# All images you operate on must be pngs for this to work:
# allSVG2img.sh 1080 png
imgsGetSimilar.sh png
# After that step, if you want to insert an image to the very start of the process (e.g. to fade in from black and back to black at the end), name that image e.g. 000.png so that the following scripts will sort that first in the process:
mkNumberedCopiesFromFileList.sh
cd _temp_numbered
ffmpegAnim.sh 0.83 30 13 png
# Result will be in ..? | true |
9b93ec8e2104517e459d7286659420e5638d6589 | Shell | mcdba/sysinfo | /addsudousers.sh | UTF-8 | 155 | 2.640625 | 3 | [] | no_license | #!/bin/bash
while [[ -n $1 ]]; do
echo "$1 ALL=(ALL) NOPASSWD: /usr/sbin/tcpdump,/bin/netstat" >> /etc/sudoers;
shift # shift all parameters;
done | true |
af409f72a0e1b435afa34623299ed0981d9a83c8 | Shell | brianhackel/homebridge-rasppi-gpio-garagedoor | /scripts/var/lib/homebridge/garage-door-gpio | UTF-8 | 1,199 | 3.65625 | 4 | [] | no_license | #!/bin/bash
PATH=/sbin:/bin:/usr/sbin:/usr/bin
DESC="Enable GPIO for the Garage Door Module"
NAME="garage-door-gpio"
SCRIPTNAME=/etc/init.d/$NAME
. /lib/lsb/init-functions
function export_pin {
if [ ! -e /sys/class/gpio/gpio${1} ] ; then
echo $1 > /sys/class/gpio/export
if [ "$3" == "low" ] ; then
echo 1 > /sys/class/gpio/gpio${1}/active_low
else
echo 0 > /sys/class/gpio/gpio${1}/active_low
fi
echo $2 > /sys/class/gpio/gpio${1}/direction
if [ "$2" == "out" ] ; then
echo $4 > /sys/class/gpio/gpio${1}/value
fi
fi
}
function unexport_pin {
if [ -e /sys/class/gpio/gpio${1} ] ; then
echo $1 > /sys/class/gpio/unexport
fi
}
if [ "$1" == "start" ] ; then
log_daemon_msg "Enabling Garage Door GPIO pins for door 0"
export_pin 5 out low 0
export_pin 20 in high 0
log_end_msg 0
log_daemon_msg "Enabling Garage Door GPIO pins for door 1"
export_pin 6 out low 0
export_pin 21 in high 0
log_end_msg 0
else [ "$1" == "stop" ]
log_daemon_msg "Disabling Garage Door GPIO pins for door 0"
unexport_pin 5
unexport_pin 20
log_daemon_msg "Disabling Garage Door GPIO pins for door 1"
unexport_pin 6
unexport_pin 21
fi | true |
559f2e19f47b3f40cc7618cc4758d4cebd24189b | Shell | aogaki/CircleAtt | /test.sh | UTF-8 | 272 | 2.640625 | 3 | [] | no_license | #!/bin/bash
kEvents="100000000"
for((i=0;i<75;i++)); do
for t in `cat thickness`; do
suffix="$i""T$t"
attFile="att$suffix.dat"
rm -f att.dat
ln -sf $attFile att.dat
echo "/run/beamOn $kEvents" > test.mac
./BI -m test.mac -o result$suffix.root
done
done
| true |
f1dbb65d03510a2c75062be11e5d5e27d074ee52 | Shell | ktscript/hodl | /generate_splashs.sh | UTF-8 | 221 | 2.515625 | 3 | [] | no_license | INPUT="icon.png"
BG="#141E30"
sizes=(640x1136 750x1294 1242x2148 1125x2436 1536x2048 1668x2224 2048x2732)
for size in ${sizes[*]}
do
convert $INPUT -background $BG -gravity center -extent $size "launch-$size.png"
done
| true |
1daa71ca465b982a15cb2df05b2511742d90d7d9 | Shell | akry/vnet-vagrant | /old/config.d/vna2.sh | UTF-8 | 2,372 | 3.125 | 3 | [] | no_license | #!/bin/bash
#
# requires:
# bash
#
set -e
set -o pipefail
set -x
# Do some changes ...
vnet_root=/opt/axsh/openvnet
PATH=${vnet_root}/ruby/bin:${PATH}
vnmgr=172.16.9.10
lxc_root_passwd=${lxc_root_passwd:-"root"}
yum -y install openvnet-vna
cat > /etc/openvnet/vna.conf <<EOF
node {
id "vna2"
addr {
protocol "tcp"
host "172.16.9.11"
public ""
port 9103
}
}
network {
uuid ""
gateway {
address ""
}
}
EOF
cat > /etc/sysconfig/network-scripts/ifcfg-br0 <<EOF
DEVICE=br0
TYPE=OVSBridge
DEVICETYPE=ovs
ONBOOT=yes
BOOTPROTO=static
IPADDR=10.100.0.3
NETMASK=255.255.255.0
OVS_EXTRA="
set bridge \${DEVICE} protocols=OpenFlow10,OpenFlow12,OpenFlow13 --
set bridge \${DEVICE} other_config:disable-in-band=true --
set bridge \${DEVICE} other-config:datapath-id=0000bbbbbbbbbbbb --
set bridge \${DEVICE} other-config:hwaddr=02:02:00:00:00:02 --
set-fail-mode \${DEVICE} standalone --
set-controller \${DEVICE} tcp:127.0.0.1:6633
"
EOF
cat > /etc/sysconfig/network-scripts/ifcfg-eth2 <<EOF
DEVICE=eth2
TYPE=Ethernet
ONBOOT=yes
BOOTPROTO=static
IPADDR=172.16.9.11
NETMASK=255.255.255.0
EOF
yum -y install lxc lxc-templates
mkdir /cgroup
echo "cgroup /cgroup cgroup defaults 0 0" >> /etc/fstab
mount /cgroup
ifdown eth1
ifdown eth2
ifup eth1 || :
ifup eth2 || :
initctl start vnet-vna
lxc-create -t centos -n inst3
lxc-create -t centos -n inst4
chroot_dirs="
/var/lib/lxc/inst3/rootfs
/var/lib/lxc/inst4/rootfs
"
for dir in ${chroot_dirs}; do
chroot ${dir} /bin/bash -ex <<EOS
echo root:${lxc_root_passwd} | chpasswd
EOS
done
cat > /var/lib/lxc/inst3/config <<EOF
lxc.network.type = veth
lxc.network.flags = up
lxc.network.veth.pair = inst3
lxc.network.ipv4 = 10.200.0.12
lxc.network.hwaddr = 52:54:FF:00:00:03
lxc.rootfs = /var/lib/lxc/inst3/rootfs
lxc.include = /usr/share/lxc/config/centos.common.conf
lxc.arch = x86_64
lxc.utsname = inst3
lxc.autodev = 0
EOF
cat > /var/lib/lxc/inst4/config <<EOF
lxc.network.type = veth
lxc.network.flags = up
lxc.network.veth.pair = inst4
lxc.network.ipv4 = 10.200.0.13
lxc.network.hwaddr = 52:54:FF:00:00:04
lxc.rootfs = /var/lib/lxc/inst4/rootfs
lxc.include = /usr/share/lxc/config/centos.common.conf
lxc.arch = x86_64
lxc.utsname = inst4
lxc.autodev = 0
EOF
lxc-start -d -n inst3
lxc-start -d -n inst4
ovs-vsctl add-port br0 inst3
ovs-vsctl add-port br0 inst4
| true |
be286680ce7312bfbb5bac5f07ac61f39abe066c | Shell | lrajlich/docker-example | /mysql/configure_db.sh | UTF-8 | 819 | 3.34375 | 3 | [] | no_license | #!/bin/bash
### Invoked from Dockerfile. ###
## Start Database
/usr/bin/mysqld_safe > /dev/null 2>&1 &
## Wait for database to be ready
RET=1
while [[ RET -ne 0 ]]; do
echo "=> Waiting for confirmation of MySQL service startup"
sleep 5
mysql --user=root --password=root -e "status" > /dev/null 2>&1
RET=$?
done
## Create "dockerexample" user and database
mysql --user=root --password=root -e "CREATE USER 'dockerexample'@'%' IDENTIFIED BY 'dockerexample'"
mysql --user=root --password=root -e "CREATE DATABASE dockerexample"
mysql --user=root --password=root -e "GRANT ALL PRIVILEGES ON dockerexample.* to 'dockerexample'@'%'"
## Load database schema, which is a table of pageloads timestamps and source ip address
#./rehydrate
## Shut down database
mysqladmin --user=root --password=root shutdown | true |
f3d57a0fbf38dfb18d95f44d10bfd91547020a08 | Shell | razrichter/dotfiles | /shellrc.d/30_iterm2_escapes | UTF-8 | 3,426 | 3.640625 | 4 | [
"MIT"
] | permissive | # -*- mode: sh; -*-
# vim: set filetype=sh :
## iTerm-specific Escapes -- c.f. https://iterm2.com/documentation-escape-codes.html
# -------------------------------------------- cursor/terminal info functions
cursor_shape() {
# Usage: cursor_shape [b|v|u]
# b = block cursor
# v = vertical line
# u = underscore
# empty resets to block cursor
local shape=0
if [ -n "$1" ]; then
case $1 in
b*) shape=0 ;; # block
v*) shape=1 ;; # vertical line
u*) shape=2 ;; # underscore
esac
fi
printf "$(_term_OSC)1337;CursorShape=${shape}$(_term_ST)"
}
cursor_guide() { # highlights entire line cursor is currently on
local val='yes'
if [ -n "$1" ]; then
case $1 in
y*) val='yes' ;;
n*) val='no' ;;
esac
fi
printf "$(_term_OSC)1337;HighlightCursorLine=${val}$(_term_ST)"
}
alias set_mark='printf "$(_term_OSC)1337;SetMark$(_term_ST)"' # set_mark tells iTerm that this is a spot to remember in scrollback history
tell_dir() { # add dir to Recent Directories
# Usage: tell_dir [/path/to/tell]
# default = $PWD
local dir=${1:-$PWD}
printf "$(_term_OSC)1337;CurrentDir=%s$(_term_ST)" $dir
}
# --------------------------------------------------------------------------- terminal display modifications
tab_color() {
# Usage: tab_color RRR GGG BBB
if [ -n "$1" ]; then
local red=${1:-0}
local green=${2:-0}
local blue=${3:-0}
printf "$(_term_OSC)6;1;bg;red;brightness;%i$(_term_ST)" $red
printf "$(_term_OSC)6;1;bg;green;brightness;%i$(_term_ST)" $green
printf "$(_term_OSC)6;1;bg;blue;brightness;%i$(_term_ST)" $blue
else
printf "$(_term_OSC)6;1;bg;*;default$(_term_ST)"
fi
}
iterm_profile() {
local profile=${1:-'Default'}
printf "$(_term_OSC)1337;SetProfile=%s$(_term_ST)" $profile
}
# -------------------------------------------------------------------------- notifications
alias term_to_foreground='printf "$(_term_OSC)1337;StealFocus$(_term_ST)"' # pushes the iTerm window to the foreground
dock_alert() {
local val='yes'
if [ -n "$1" ]; then
case $1 in
y*) val='yes' ;;
n*) val='no' ;;
esac
fi
printf "$(_term_OSC)1337;RequestAttention=%s$(_term_ST)" $val
}
growl() { # outputs text as a Growl notification
# Usage: growl Text of Notification
printf "$(_term_OSC)9;%s$(_term_ST)" "$*"
}
popup() {
growl "$@"
}
growlcat() { # outputs stdin as a Growl notification
# Usage: cmd | growlcat
local s=$(echo $(sed -e 's/[[:cntrl:]]//g')) # swallow control chars and \n
printf "$(_term_OSC)9;${s}$(_term_ST)"
}
# ----------------------------------------------------------------------------- misc functions
# copy disabled because it echos the text to the terminal, and requires the preference Prefs>General>Applications in terminal may access clipboard to be set
# so it's probably easier to just highlight + Cmd-C
# iterm_pbcopy() {
# printf "$(_term_OSC)1337;CopyToClipboard=$(_term_ST)"
# cat -
# printf "$(_term_OSC)1337;EndCopy$(_term_ST)"
# }
# if ! which pbcopy >/dev/null; then # only do this if we're *not* on a mac
# alias pbcopy=iterm_pbcopy
# fi
alias clear_scrollback='printf "$(_term_OSC)1337;ClearScrollback$(_term_ST)"' # clears the terminal foreground (like Cmd-K)
| true |
b0d267c8b4710a8cebadd8980502da0c36e4e7aa | Shell | jschwartzenberg/dos-download-scripts | /dosinstall | UTF-8 | 572 | 3.765625 | 4 | [] | no_license | #!/bin/sh
DOS_VERSIONS="freedos11 freedos11_fromiso freedos10_fromtgz"
GETOPT=`getopt --name "test" \
--longoptions help,list \
--options h,l \
-- "$@"`
if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
eval set -- "$GETOPT"
while true;
do
case "$1" in
--help)
echo Call "$0" with:
echo --list List available DOS versions
break;;
--list)
for dos_version in ${DOS_VERSIONS}
do
echo $dos_version
done
break;;
*)
echo "Error try $0 --help"
break;;
esac
done
| true |
a5bf015f85be8bf1b8c0b4ea9b6b4decf746287c | Shell | vfoucault/kafka-101 | /scripts/get_confluent.sh | UTF-8 | 452 | 3.0625 | 3 | [] | no_license | #!/bin/bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
BASE_DIR=$SCRIPT_DIR/../
SRC_DIR=$BASE_DIR/sources
mkdir $SRC_DIR 2>/dev/null
echo "Downloading confluent OSS 3.3.0..."
curl --silent -o $SRC_DIR/confluent-oss-3.3.0-2.11.tar.gz http://packages.confluent.io/archive/3.3/confluent-oss-3.3.0-2.11.tar.gz
cd $SRC_DIR
tar xzpf confluent-oss-3.3.0-2.11.tar.gz
echo "Done. CP Oss 3.3.0 available in $SRC_DIR/sources/confluent-3.3.0"
| true |
43ee86ab6fe5e437ac9b94d9054d6d50f26fbe00 | Shell | carlskeide/dotfiles | /bin/docker-maid.sh | UTF-8 | 526 | 3.3125 | 3 | [] | no_license | #!/bin/bash
[[ -z "$1" ]] &&\
echo "usage: $0 <containers|images|volumes>" &&\
exit 1
[[ "$1" == "all" || "$1" == "containers" ]] &&\
echo "Containers" &&\
docker ps -qf status=exited -f status=created | xargs -I{} docker rm -vf {}
[[ "$1" == "all" || "$1" == "images" ]] &&\
echo "Images" &&\
docker images -qf "dangling=true" | xargs -I{} docker rmi {}
[[ "$1" == "all" || "$1" == "volumes" ]] &&\
echo "Volumes" &&\
docker volume ls -qf 'dangling=true' | xargs -I{} docker volume rm {}
| true |
3b6ca36186b93385f55e1649abbefd4e437b3b6d | Shell | xulilililili/pg_rman | /pgrman_backup.sh | UTF-8 | 2,089 | 4 | 4 | [] | no_license | #!/bin/bash
# use root to execute this shell script
# level
ERROR="\033[41;37m ERROR \033[0m"
INFO="\033[42;37m INFO \033[0m"
WARN="\033[43;37m WARN \033[0m"
COMMON_ERROR="some error happened, specific information please see console output"
source ~/.bash_profile
# postgresql data directory
# pgdata=$(grep 'PGDATA' ~/.bashrc |awk -F= '{print $2}')
# backup directory
backup_path=$(grep 'BACKUP_PATH' ~/.bashrc | tail -n 1 | awk -F= '{print $2}')
# log file
backup_log=${backup_path}/backup_log/backup_$(date +'%Y%m%d').log
# backup type:{full|incremental}
backup_type=$1
# check command exit value, 0 is success
function check_fun(){
status=$?
error=${COMMON_ERROR}
if [[ 0 -ne ${status} ]] ; then
echo -e "${ERROR} ${error}"
exit 1
fi
}
# create new log every day
# log format:backup_yyyyMMdd
function create_log(){
if [ ! -d "${backup_path}"/backup_log ] ; then
mkdir -p "${backup_path}"/backup_log
check_fun
fi
if [ ! -f "${backup_log}" ] ; then
touch "${backup_log}"
check_fun
fi
# delete logs 7 days ago
find "${backup_path}"/backup_log/ -mtime +6 -name "*.log" -exec rm -Rf {} \;
check_fun
}
# backup :1.full 2.incremental
function backup(){
case "${backup_type}" in
incremental)
pg_rman backup -b incremental &>> "${backup_log}"
check_fun
;;
full)
pg_rman backup -b full &>> "${backup_log}"
check_fun
;;
*)
echo -e "$(date +'%Y-%m-%d %H:%M:%S'): ${ERROR} uthe value of the type is only incremental or full" >> "${backup_log}"
exit 1
esac
# backup set check
pg_rman validate &>> "${backup_log}"
check_fun
# clean up invalid backup data
pg_rman purge &>> "${backup_log}"
check_fun
}
# create new log every day
create_log
start_time=$(date +%s)
echo -e "$(date +'%Y-%m-%d %H:%M:%S'): ${INFO} start ${backup_type} backup" >> "${backup_log}"
# backup
backup
end_time=$(date +%s)
echo -e "$(date +'%Y-%m-%d %H:%M:%S'): ${INFO} ${backup_type} backup end" >> "${backup_log}"
echo -e "$(date +'%Y-%m-%d %H:%M:%S'): ${INFO} total backup time: "$((end_time-start_time))"s" >> "${backup_log}"
echo -e "" "${backup_log}"
| true |
b0010ffad3aaf344144017a5c6ddf23d7663b9a6 | Shell | friemen/i3config | /hadid/i3/setup-kbd.sh | UTF-8 | 381 | 2.9375 | 3 | [] | no_license | #!/bin/bash
layout="$1"
scriptdir="$( cd "$( dirname "$0" )" && pwd )"
tempdir=/tmp/i3-$USERNAME
mkdir -p $tempdir
notify-send -t 2000 "Switching keyboard layout: $layout"
setxkbmap -layout "$layout" -option "compose:sclk" -option "caps:capslock"
xmodmap -pke > $tempdir/i3-xmodmap-last
xmodmap $scriptdir/xmodmaprc
xmodmap -pke > $tempdir/i3-xmodmap-current
xset r rate 250 80
| true |
eb8d51519fef6b20051ad38c3dc78fb8807c3ddb | Shell | arabiaweather/watch-n-compress | /startCss.sh | UTF-8 | 261 | 2.671875 | 3 | [] | no_license | if [ $(ps aux | grep $USER | grep node | grep -v grep | wc -l | tr -s "\n") -eq 0 ]
then
export PATH=/usr/local/bin:$PATH
export NODE_ENV=production
cd /var/njs/watch-n-compress && forever --spinSleepTime 10000 start watchCss.js >> foreverCss.log 2>&1
fi
| true |
993c3b7bdf95786ee38e040964562b3a30763bc9 | Shell | nishanthjois/pipeline | /jupyterhub/run | UTF-8 | 1,222 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# All paths (dirs, not files) up to and including /root must have +x permissions.
# It's just the way linux works. Don't fight it.
# http://askubuntu.com/questions/537032/how-to-configure-apache2-with-symbolic-links-in-var-www
if [ ! -f /var/www/html ]; then
chmod a+x /root
cd /var/www/ && ln -s /root/html
fi
if [ ! -f /etc/apache2/apache2.conf ]; then
cd /etc/apache2/ && ln -s /root/config/apache2/apache2.conf
fi
if [ ! -f /etc/apache2/sites-available/www.conf ]; then
cd /etc/apache2/sites-available && ln -s /root/config/apache2/www.conf
a2ensite www.conf
fi
source /etc/apache2/envvars
service apache2 start
source /root/sysutils/container-limits.sh
export JAVA_MAX_MEM_RATIO=85
export JAVA_OPTIONS="$(/root/sysutils/jvm-limits.sh)"
$SPARK_HOME/sbin/start-master.sh --webui-port 6060
$SPARK_HOME/sbin/start-slave.sh --cores 4 --memory 4g --webui-port 6061 spark://127.0.0.1:7077
/usr/sbin/sshd
cd ~
start-dfs.sh &
cd ~
tensorboard --host=0.0.0.0 --logdir=/root/tensorboard &
cd ~
export CLASSPATH=$(${HADOOP_HDFS_HOME}/bin/hadoop classpath --glob)
CLASSPATH=$CLASSPATH jupyterhub --ip='' --config=config/jupyterhub/jupyterhub_config.py 2>&1 > /root/logs/tensorflow-jupyterhub.out
| true |
61645cc249420641214002291b9bd23aa972f117 | Shell | Moneyfarm/maven-resource | /assets/check | UTF-8 | 3,632 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# vim: set ft=sh
set -eu
set -o pipefail
exec 3>&1 # make stdout available as fd 3 for the result
exec 1>&2 # redirect all output to stderr for logging
resource_dir=$(dirname $0)
source $resource_dir/common.sh
# for jq
PATH=/usr/local/bin:$PATH
TMPDIR=${TMPDIR:-/tmp}
payload=$(mktemp $TMPDIR/maven-resource-request.XXXXXX)
working=$TMPDIR/maven-resource-working && mkdir -p $working
cat > $payload <&0
debug=$(jq -r '.source.debug //empty' < $payload)
if [ "$debug" = "true" ]; then
set -x
fi
release_url=$(jq -r '.source.url //empty' < $payload)
snapshot_url=$(jq -r '.source.snapshot_url //empty' < $payload)
artifact=$(jq -r '.source.artifact //empty' < $payload)
version_regexp=$(jq -r '.source.version_regexp //empty' < $payload)
version=$(jq -r '.version.version //empty' < $payload)
username=$(jq -r '.source.username //empty' < $payload)
password=$(jq -r '.source.password //empty' < $payload)
skip_cert_check=$(jq -r '.source.skip_cert_check //empty' < $payload)
repository_cert=$(jq -r '.source.repository_cert //empty' < $payload)
# TODO: Add more error checking
if [ -z "$release_url" ] && [ -z "$snapshot_url" ] ; then
printf '\e[91m[ERROR]\e[0m invalid payload (must specify url)\n'
exit 1
fi
# groupId:artifactId:type[:classifier]
groupId=$(get_group_id $artifact)
artifactId=$(get_artifact_id $artifact)
packaging=$(get_packaging $artifact)
auth=""
[ -n "$username" ] && auth="--user $username:$password"
cert=""
if [ "$skip_cert_check" = "true" ]; then
cert="-k"
elif [ -n "$repository_cert" ]; then
mkdir -p $working/security
echo "$repository_cert" > $working/security/repository.crt
cert="--cacert $working/security/repository.crt"
fi
# convert 1.0.0-20170328.031519-19 to 1.0.0-SNAPSHOT
uniqueVersion=$(echo "$version" | grep -oE "[0-9]{8}\.[0-9]{6}-[0-9]{1,}" || true)
if [ -n "$uniqueVersion" ]; then
version=$(echo ${version%-$uniqueVersion}-SNAPSHOT)
fi
url=$release_url
if [[ "$version" = *-SNAPSHOT ]]; then
[ -n "$snapshot_url" ] && url=$snapshot_url
metadataUrl="$url/${groupId//.//}/$artifactId/$version/maven-metadata.xml"
else
metadataUrl="$url/${groupId//.//}/$artifactId/maven-metadata.xml"
fi
set +e
metadata=$(curl --fail --silent --show-error $cert $auth $metadataUrl 2>&1)
if [ $? -ne 0 ]; then
printf '\e[91m[ERROR]\e[0m %s\n' "$metadata"
printf '\e[91m[ERROR]\e[0m failed to download maven-metadata.xml from: %s\n' "$metadataUrl"
exit 2
fi
set -e
declare -a versions=( )
if [[ "$version" = *-SNAPSHOT ]]; then
versions[1]=$(echo ${metadata} | xmllint --xpath "/metadata/versioning/snapshotVersions/snapshotVersion[extension='$packaging']/value/text()" - 2>/dev/null)
elif [ "$version" = "latest" ] || [ -z "$version" -a -z "$version_regexp" ]; then
versions[1]=$(echo ${metadata} | xmllint --xpath "/metadata/versioning/versions/version[last()]/text()" - 2>/dev/null)
else
itemsCount=$(echo ${metadata} | xmllint --xpath 'count(/metadata/versioning/versions/version)' - 2>/dev/null)
found=false
for (( i=1; i <= $itemsCount; i++ )); do
current=$(echo ${metadata} | xmllint --xpath "/metadata/versioning/versions/version[$i]/text()" - 2>/dev/null)
if [ "$found" = false ]; then
if [ "$current" = "$version" ]; then
found=true
elif [ -n "$version_regexp" ]; then
if [[ "$current" =~ $version_regexp ]]; then
found=true
fi
fi
fi
if [ "$found" = true ]; then
versions[$i]=${current}
if [ -n "$version_regexp" ]; then
found=false
fi
fi
done
fi
printf "%s\n" "${versions[@]}" | sed 's/.*/{ "version": "&" }/' | jq --slurp . >&3
| true |
df75c6e96f5cafe7b0e4de8ccd2e74b54b16795d | Shell | HASTE-project/HarmonicIOSetup | /playbooks/network_status.sh | UTF-8 | 255 | 3.265625 | 3 | [] | no_license | #!/bin/bash
hname=$(hostname)
dir=${PWD}/network-$(date +%Y%m%d%H%M)
echo "${dir}"
echo "Creating Directory"
mkdir ${dir}
sfile=${dir}/host-${hname}
echo "${sfile}"
touch ${sfile}
echo "logging Network Statistics"
ifstat -n -t -i ens3 >> ${sfile}
| true |
4815c8e5d4368a8cdb85e960ef1ece867d553a70 | Shell | eskadah/pepysdiary | /scripts/run_tests.sh | UTF-8 | 270 | 2.8125 | 3 | [] | no_license | #!/bin/bash
set -e
# You can optionally pass in a test, or test module or class, as an argument.
# e.g.
# ./run_tests.sh tests.appname.test_models.TestClass.test_a_thing
TESTS_TO_RUN=${1:tests}
./manage.py test --settings=pepysdiary.settings.tests $TESTS_TO_RUN
flake8 | true |
4a1b2673e414680a990897d57a81c05449a20069 | Shell | AntoinePigny/validationLinuxDFS15 | /main.sh | UTF-8 | 345 | 2.671875 | 3 | [] | no_license | #!/bin/bash
# PATH TO FUNCTIONS FILE
FUNCTIONS_PATH="./functions/"
# FUNCTIONS SOURCE
source $FUNCTIONS_PATH'functions.sh'
displayMenu
# change vagrant file (reg ex ne marche pas ...): sed -i.bak "s@# config.vm.network \"private_network\", ip: \"192.168.33.10\"@config.vm.network \"private_network\", ip: \"192.168.33.20\"@g" ./Vagrantfile; | true |
cd0a39dafe786b96fcdda957b635fb769043feb2 | Shell | lifeofguenter/systools | /jenkins-batch-edit.sh | UTF-8 | 643 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env bash
readlink_bin="${READLINK_PATH:-readlink}"
if ! "${readlink_bin}" -f test &> /dev/null; then
__DIR__="$(dirname "$(python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "${0}")")"
else
__DIR__="$(dirname "$("${readlink_bin}" -f "${0}")")"
fi
# required libs
source "${__DIR__}/libs/functions.lib.sh"
set -E
trap 'throw_exception' ERR
while IFS= read -r -d '' -u 9; do
if grep -q "${1}" "${REPLY}"; then
consolelog "found match: ${REPLY}"
sed -i.bak "s/${1}/${2}/g" "${REPLY}"
rm -f "${REPLY}.bak"
fi
done 9< <( find . -type f -name config.xml -exec printf '%s\0' {} + )
| true |
43c8b0925771500a21d6b525c11c44658f9352ae | Shell | openzipkin/openzipkin.github.io | /quickstart.sh | UTF-8 | 8,338 | 3.984375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
# This will be set to 1 if we instruct the user to manually verify signatures,
# when they have GPG but don't have the public key of the signer. Would be super
# confusing to tell the user to use files that we've cleaned up.
DO_CLEANUP=0
# shellcheck disable=SC2015
color_title=$(tput setaf 7 && tput bold || true)
color_dim=$(tput setaf 8 || true)
color_good=$(tput setaf 2 || true)
color_bad=$(tput setaf 1 || true)
color_warn=$(tput setaf 3 || true)
color_reset=$(tput sgr0 || true)
repo=https://repo1.maven.org/maven2
usage() {
cat <<EOF
${color_title}$0${color_reset}
Downloads the latest version of the Zipkin Server executable jar
${color_title}$0 GROUP:ARTIFACT:VERSION:CLASSIFIER TARGET${color_reset}
Downloads the "VERSION" version of GROUP:ARTIFACT with classifier "CLASSIFIER"
to path "TARGET" on the local file system. "VERSION" can take the special value
"LATEST", in which case the latest Zipkin release will be used. For example:
${color_title}$0 io.zipkin.aws:zipkin-autoconfigure-collector-kinesis:LATEST:module kinesis.jar${color_reset}
downloads the latest version of the artifact with group "io.zipkin.aws",
artifact id "zipkin-autoconfigure-collector-kinesis", and classifier "module"
to PWD/kinesis.jar
EOF
}
welcome() {
cat <<EOF
${color_title}Thank you for trying Zipkin!${color_reset}
This installer is provided as a quick-start helper, so you can try Zipkin out
without a lengthy installation process.
EOF
}
farewell() {
local artifact_classifier="$1"; shift
local filename="$1"; shift
if [[ "$artifact_classifier" = 'exec' ]]; then
cat <<EOF
${color_good}
You can now run the downloaded executable jar:
java -jar $filename
${color_reset}
EOF
else
cat << EOF
${color_good}
The downloaded artifact is now available at $filename.${color_reset}
EOF
fi
}
cleanup() {
local base_filename="$1"; shift
if [[ "$DO_CLEANUP" -eq 0 ]]; then
printf '\n%s\n' "${color_title}Cleaning up checksum and signature files${color_reset}"
execute_and_log rm -f "$base_filename"{.md5,.asc}
DO_CLEANUP=1
fi
}
handle_shutdown() {
local status=$?
local base_filename="$1"; shift
if [[ $status -eq 0 ]]; then
cleanup "$base_filename"
else
cat <<EOF
${color_bad}
It looks like quick-start setup has failed. Please run the command again
with the debug flag like below, and open an issue on
https://github.com/openzipkin/openzipkin.github.io/issues/new. Make sure
to include the full output of the run.
${color_reset}
\\curl -sSL https://zipkin.io/quickstart.sh | bash -sx -- $@
In the meanwhile, you can manually download and run the latest executable jar
from the following URL:
https://search.maven.org/remote_content?g=io.zipkin&a=zipkin-server&v=LATEST&c=exec
EOF
fi
}
execute_and_log() {
local command=("$@")
printf >&2 '%s\n' "${color_dim}> ${command[*]}${color_reset}"
eval "${command[@]}"
}
fetch() {
url="$1"; shift
target="$1"; shift
execute_and_log curl -fL -o "'$target'" "'$url'"
}
fetch_latest_version() {
local artifact_group="$1"; shift
local artifact_id="$1"; shift
local url="${repo}/${artifact_group_with_slashes}/${artifact_id}/maven-metadata.xml"
printf $(curl -sSL $url | sed -n '/<version>/s/.*<version>\([^<]*\)<\/version>.*/\1/p'|tail -1)
}
artifact_part() {
local index="$1"; shift
local artifact="$1"; shift
local parts
IFS=':' read -ra parts <<< "$artifact"
if [[ "${#parts[@]}" -lt $((index + 1)) ]]; then
printf ''
else
printf '%s' "${parts[$index]}"
fi
}
verify_version_number() {
if [[ ! "$1" =~ ^[[:digit:]+\.[[:digit:]]+\.[[:digit:]]+$ ]]; then
cat <<EOF
${color_bad}The target version is "$1". That doesn't look like a valid Zipkin release version
number; this script is confused. Bailing out.
${color_reset}
EOF
exit 1
fi
}
verify_checksum() {
local url="$1"; shift
local filename="$1"; shift
printf '\n%s\n' "${color_title}Verifying checksum...${color_reset}"
# Fetch the .md5 file even if md5sum is not on the path
# This lets us verify its GPG signature later on, and the user might have another way of checksum verification
fetch "$url.md5" "$filename.md5"
if command -v md5sum >/dev/null 2>&1; then
execute_and_log "md5sum -c <<< \"\$(cat $filename.md5) $filename\""
printf '%s\n' "${color_good}Checksum for ${filename} passes verification${color_reset}"
else
printf '%s\n' "${color_warn}md5sum not found on path, skipping checksum verification${color_reset}"
fi
}
verify_signature() {
local url="$1"; shift
local filename="$1"; shift
printf '\n%s\n' "${color_title}Verifying GPG signature of $filename...${color_reset}"
# Trust OpenZipkin's key
local gpg_key='FF31B515'
if command -v gpg >/dev/null 2>&1; then
fetch "$url.asc" "$filename.asc"
if gpg --list-keys "$gpg_key" >/dev/null 2>&1; then
execute_and_log gpg --verify "$filename.asc" "$filename"
printf '%s\n' "${color_good}GPG signature for ${filename} passes verification${color_reset}"
else
cat <<EOF
${color_warn}
GPG signing key is not known, skipping signature verification.
Use the following commands to manually verify the signature of $filename:
gpg --keyserver keyserver.ubuntu.com --recv $gpg_key
# Optionally trust the key via 'gpg --edit-key $gpg_key', then typing 'trust',
# choosing a trust level, and exiting the interactive GPG session by 'quit'
gpg --verify $filename.asc $filename
${color_reset}
EOF
DO_CLEANUP=1
fi
else
printf '%s\n' "${color_warn}gpg not found on path, skipping checksum verification${color_reset}"
fi
}
main() {
local artifact_group=io.zipkin
local artifact_id=zipkin-server
local artifact_version=LATEST
local artifact_version_lowercase=latest
local artifact_classifier=exec
local artifact_group_with_slashes
local artifact_url
if [[ $# -eq 0 ]]; then
local filename="zipkin.jar"
# shellcheck disable=SC2064
trap "handle_shutdown \"$filename\" $*" EXIT
elif [[ "$1" = '-h' || "$1" = '--help' ]]; then
usage
exit
elif [[ $# -eq 2 ]]; then
local artifact="$1"
local filename="$2"
# shellcheck disable=SC2064
trap "handle_shutdown \"$filename\" $*" EXIT
artifact_group="$(artifact_part 0 "$artifact")"
artifact_id="$(artifact_part 1 "$artifact")"
artifact_version="$(artifact_part 2 "$artifact")"
artifact_classifier="$(artifact_part 3 "$artifact")"
else
usage
exit 1
fi
if [[ -n "$artifact_classifier" ]]; then
artifact_classifier_suffix="-$artifact_classifier"
else
artifact_classifier_suffix=''
fi
welcome
if [ "${artifact_group}" = 'io.zipkin.java' ]; then
printf '%s\n' "${color_warn}You've requested the server's old group name: 'io.zipkin.java'. Please update links to the current group 'io.zipkin'...${color_reset}"
artifact_group=io.zipkin
fi
artifact_group_with_slashes="${artifact_group//.//}"
artifact_version_lowercase="$(tr '[:upper:]' '[:lower:]' <<< "$artifact_version")"
if [ "${artifact_version_lowercase}" = 'latest' ]; then
printf '%s\n' "${color_title}Fetching version number of latest ${artifact_group}:${artifact_id} release...${color_reset}"
artifact_version="$(fetch_latest_version "$artifact_group" "$artifact_id")"
fi
verify_version_number "$artifact_version"
printf '%s\n\n' "${color_good}Latest release of ${artifact_group}:${artifact_id} seems to be ${artifact_version}${color_reset}"
printf '%s\n' "${color_title}Downloading $artifact_group:$artifact_id:$artifact_version:$artifact_classifier to $filename...${color_reset}"
artifact_url="${repo}/${artifact_group_with_slashes}/${artifact_id}/$artifact_version/${artifact_id}-${artifact_version}${artifact_classifier_suffix}.jar"
fetch "$artifact_url" "$filename"
verify_checksum "$artifact_url" "$filename"
verify_signature "$artifact_url" "$filename"
cleanup "$filename"
farewell "$artifact_classifier" "$filename"
}
main "$@"
| true |
25deb4e854742e3fb240d0536d39e7511562cafb | Shell | genieApprentice/dotfiles | /files/script/busybox.sh | UTF-8 | 422 | 3.640625 | 4 | [] | no_license | #!/bin/sh
function get_file_by_http () {
url=$1
if type wget > /dev/null 2>&1; then
wget $url
elif type curl > /dev/null 2>&1; then
curl -LO $url
else
exit 1
fi
}
get_file_by_http https://busybox.net/downloads/binaries/1.30.0-i686/busybox
chmod 0755 busybox
CURRENT_DIR=$(pwd)
for command in $(./busybox --list); do
ln -s $CURRENT_DIR/busybox $CURRENT_DIR/$command
done
| true |
bb24dfd97830d39b8e2f201a5b81fa6eac1a74b2 | Shell | dvidbruhm/i3-dotfiles-old | /install | UTF-8 | 894 | 2.796875 | 3 | [] | no_license | #!/bin/bash
BASEDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Shells
ln -s ${BASEDIR}/.bashrc ~/.bashrc
ln -s ${BASEDIR}/.zshrc ~/.zshrc
# xinit
ln -s ${BASEDIR}/.xinitrc ~/.xinitrc
# Git config
ln -s ${BASEDIR}/.gitconfig ~/.gitconfig
# Personnal bin
ln -s ${BASEDIR}/.bin ~/
# i3 config
ln -s ${BASEDIR}/.i3 ~/
# pip config
ln -s ${BASEDIR}/.pip ~/
# colorls config
ln -s ${BASEDIR}/.config/colorls ~/.config/
# dunst config
ln -s ${BASEDIR}/.config/dunst ~/.config/
# ranger config
ln -s ${BASEDIR}/.config/ranger ~/.config/
# rofi config
ln -s ${BASEDIR}/.config/rofi ~/.config/
# xfce4 terminal config
mkdir ~/.config/xfce4
ln -s ${BASEDIR}/.config/xfce4/terminal ~/.config/xfce4/
# zathura config
ln -s ${BASEDIR}/.config/zathura ~/.config/
# oh-my-zsh themes
ln -s ${BASEDIR}/.oh-my-zsh/themes ~/.oh-my-zsh/
# vim config
ln -s ${BASEDIR}/.vim/.vimrc ~/.vim/.vimrc
| true |
5566d84322f349e15a0a7075c8ebdb377ad7fdec | Shell | hajimehoshi/ebiten | /.github/workflows/steam.sh | UTF-8 | 322 | 2.78125 | 3 | [
"CC-BY-ND-4.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | export PATH=$PATH:/usr/local/go/bin
export CGO_CFLAGS=-std=gnu99
export DISPLAY=:99.0
# Install Go
curl --location --remote-name https://golang.org/dl/${GO_FILENAME}
rm -rf /usr/local/go && tar -C /usr/local -xzf ${GO_FILENAME}
# Run X
Xvfb :99 -screen 0 1024x768x24 > /dev/null 2>&1 &
# Run the tests
go test -v ./...
| true |
89bf0fc7f3ba2804e3ecdca18ecdc400dfe9edfe | Shell | estsaon/dsys-lab | /manage.sh | UTF-8 | 12,081 | 3.796875 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
DATASETS=("kgs" "wiki-Talk" "cit-Patents")
SIMPATH="code/simulations/"
case "$1" in
# Fetch datasets from online source if not in /data/zips folder.
"get_data")
mkdir -p "data/zips"
for dset in "${DATASETS[@]}"; do
if [ ! -f "data/zips/${dset}.zip" ]; then
echo "Fetching ${dset}.."
wget -qc "https://atlarge.ewi.tudelft.nl/graphalytics/zip/${dset}.zip" \
-O "data/zips/${dset}.zip"
else
echo "${dset} already fetched."
fi
done
;;
# Extract data files from datasets in /data/zips into /data.
"extract_data")
# Check if zips folder exists.
if [ -d "/data/zips" ]; then
echo "No /data/zips folder."
exit 1
fi
for dset in "${DATASETS[@]}"; do
# Check if zip exists on machine.
if [ -f "data/zips/${dset}.zip" ]; then
echo "Extracting ${dset}.."
unzip -uq "data/zips/${dset}.zip" -d "data/"
# Remove not needed files.
rm -f "data/${dset}/${dset}-BFS"
rm -f "data/${dset}/${dset}-CDLP"
rm -f "data/${dset}/${dset}-LCC"
rm -f "data/${dset}/${dset}-PR"
rm -f "data/${dset}/${dset}-SSSP"
rm -f "data/${dset}/${dset}-WCC"
else
echo "Dataset ${dset} not found."
fi
done
;;
# Clear all files and folders from the /data folder.
"clear_data")
rm -rf data/*/
;;
# Remove KaHIP installation and download new one.
"get_KaHIP")
# Delete existing folder and clone new one.
if [ -d "KaHIP/" ]; then
echo "Removing KaHIP.."
rm -rf "KaHIP/"
fi
echo "Cloning new version of KaHIP.."
git clone git@github.com:estsaon/KaHIP.git
rm -rf /KaHIP/.git
;;
# Build the KaHIP code on the DAS5.
"build_KaHIP")
if [ ! -d "KaHIP/" ]; then
echo "No KaHIP folder found, please run ./manage.sh install_KaHIP."
exit 1
fi
# Load modules.
module load openmpi/gcc/64
module load cmake
# Build KaHIP.
cd KaHIP
sh ./compile_withcmake.sh
# Unload modules.
module unload openmpi/gcc/64
module unload cmake
cd ..
;;
# Create partitions
"create_partitions")
# Check if KaHIP folder exists.
if [ ! -d "KaHIP/" ]; then
echo "No KaHIP folder found.."
exit 1
fi
# Check if dataset was provided to partition.
if [ -z "$2" ]; then
echo "No dataset specified."
exit 1
fi
# Check if dataset exists.
if [ ! -d "data/${2}" ]; then
echo "Dataset '${2}' does not exist."
exit 1
fi
# Check if number of partitions was provided.
if [ -z "$3" ]; then
echo "No number of partitions specified."
exit 1
fi
# Check if the dataset is already converted to Metis format.
if [ -f "data/${2}/${2}.m" ]; then
echo "Dataset ${2} is already converted into Metis format."
else
# Convert graph format into Metis format that KaHIP supports.
echo "Converting ${2} into Metis format.."
module load python/3.6.0
srun python3 code/scripts/convert_ldbc_to_metis.py "${2}"
module unload python/3.6.0
fi
# Check if the dataset is already partitioned with given setup.
if [ -d "data/${2}/${2}-${3}-partitions" ]; then
echo "Dataset is already split in ${3} partitions."
exit 1
fi
# Compute the total number of processes and run ParHIP.
N_PROCS=$(($3 * 16))
echo "Creating ${3} partitions for ${2} with ${N_PROCS} processes.."
module load openmpi/gcc/64
srun --mpi=pmi2 -n "${N_PROCS}" KaHIP/deploy/parhip \
"data/${2}/${2}.m" --k "${3}" --preconfiguration=fastsocial \
--save_partition
module unload openmpi/gcc/64
# Split the newly created partitions across the number of nodes.
echo "Splitting ${2} with ${3} partitions across new node folders.."
module load python/3.6.0
mkdir -p "data/${2}/${2}-${3}-partitions"
srun python3 code/scripts/split_partitions.py "${2}" "${3}"
module unload python/3.6.0
;;
# Create new job.
"create_job")
# Check if job name is given.
if [ -z "$2" ]; then
echo "No job name specified."
exit 1
fi
# Check if given job name already exists.
if [ -d "jobs/${2}" ]; then
echo "Job name '${2}' is already taken."
exit 1
fi
# Check if simulation name is given.
if [ -z "${3}" ]; then
echo "No simulation name specified."
exit 1
fi
# Check if simulation name translates to existing file.
if [ ! -f "${SIMPATH}${3}" ]; then
echo "Given simulation '${3}' does not exist within the ${SIMPATH} dir."
exit 1
fi
# Check if scale factor is valid positive float.
if ! [[ $4 =~ ^[0-9]+([.][0-9]+)?$ ]]; then
echo "Given scale factor is invalid. Provide a positive float."
exit 1
fi
# Check if dataset name is given.
if [ -z "${5}" ]; then
echo "No dataset specified."
exit 1
fi
# Check if the do_stitch variable is set. Default to true.
if [ -z "${8}" ] || [ "${8}" == "True" ] || [ "${8}" == "true" ]; then
DO_STITCH=true
elif [ "${8}" == "False" ] || [ "${8}" == "false" ]; then
DO_STITCH=false
else
echo "Given do_stitch value is not true or false."
exit 1
fi
# Check if the ring_stitch variable is set. Default to true.
if [ -z "${9}" ] || [ "${DO_STITCH}" == false ]; then
if [ "${DO_STITCH}" == true ]; then
RING_STITCH=true
else
RING_STITCH=false
fi
elif [ "${9}" == "True" ] || [ "${9}" == "true" ]; then
RING_STITCH=true
elif [ "${9}" == "False" ] || [ "${9}" == "false" ]; then
RING_STITCH=false
else
echo "Given ring_stitch value is not true or false."
exit 1
fi
# Check if the connectivity variable is set. Default to 0.1.
if [ -z "${10}" ]; then
if [ "${DO_STITCH}" == true ]; then
CONN="0.1"
else
CONN="0.0"
fi
elif [[ ${10} =~ ^[0-9]+([.][0-9]+)?$ ]]; then
CONN="${10}"
else
echo "Given connectivity is invalid. Provide a positive float."
exit 1
fi
# Create folder and job file. Fil in file with header and body thereafter.
echo "Creating job ${2}.."
mkdir "jobs/${2}"
touch "jobs/${2}/${2}.sh"
echo "#!/usr/bin/env bash
#SBATCH -J ${2}
#SBATCH -o jobs/${2}/${2}.out
#SBATCH --partition=defq
#SBATCH -n ${6:-16}
#SBATCH -N ${6:-16}
#SBATCH -t ${7:-30}
SIMPATH=\"${SIMPATH}\"
SIMFILE=\"${3}\"
DATASET=\"${5}\"
JOBNAME=\"${2}\"
SCALE=\"${4}\"
DO_STITCH=\"${DO_STITCH}\"
RING_STITCH=\"${RING_STITCH}\"
CONN=\"${CONN}\"
" >>"jobs/${2}/${2}.sh"
cat jobs/job_body.sh >>"jobs/${2}/${2}.sh"
;;
# Run an existing job on the DAS-5.
"run_job")
# Check if job name is given.
if [ -z "$2" ]; then
echo "No name of job specified."
exit 1
fi
# Check if given job name exists.
if [ -d "jobs/${2}" ]; then
# Create results folder if it doesn't exist already.
mkdir -p "jobs/${2}/results"
# Run SLURM job.
echo "Starting DAS-5 job ${2}.."
sbatch "jobs/${2}/${2}.sh"
else
echo "Job name does not exist."
exit 1
fi
;;
# Run an existing job locally.
"run_local")
# Check if job name is given.
if [ -z "$2" ]; then
echo "No name of job specified."
exit 1
fi
# Check if given job name exists.
if [ -d "jobs/${2}" ]; then
# Create results folder if it doesn't exist already.
mkdir -p "jobs/${2}/results"
# Define paths for the job to work with.
TMPDIR="runtime_tmps/${2}"
TMP_DATA="data"
TMP_RES="${TMPDIR}/results"
TMP_PLAY="${TMPDIR}/playground"
# Create runtime folders to work with.
mkdir -p "${TMPDIR}"
mkdir -p "${TMPDIR}/results"
mkdir -p "${TMPDIR}/playground"
# Fetch needed variables from job script.
NUMTASKS=$(sed -n 5p "jobs/${2}/${2}.sh" | cut -c 12-)
SIMPATH=$(sed -n 8p "jobs/${2}/${2}.sh" | cut -c 10- | sed 's/.$//')
SIMFILE=$(sed -n 9p "jobs/${2}/${2}.sh" | cut -c 10- | sed 's/.$//')
DATASET=$(sed -n 10p "jobs/${2}/${2}.sh" | cut -c 10- | sed 's/.$//')
SCALE=$(sed -n 12p "jobs/${2}/${2}.sh" | cut -c 8- | sed 's/.$//')
DO_STITCH=$(sed -n 13p "jobs/${2}/${2}.sh" | cut -c 12- | sed 's/.$//')
RING_STITCH=$(sed -n 14p "jobs/${2}/${2}.sh" | cut -c 14- | sed 's/.$//')
CONN=$(sed -n 15p "jobs/${2}/${2}.sh" | cut -c 7- | sed 's/.$//')
# Check if the dataset is partitioned correctly for the requested job.
COMP_NODES=$((NUMTASKS - 1))
if [ ! -d "${PWD}/data/${DATASET}/${DATASET}-${COMP_NODES}-partitions" ]; then
echo "Dataset '${DATASET}' is not partitioned for ${COMP_NODES} Compute Nodes."
exit 1
fi
# Create folder for dataset if it does not exist for catching faults.
mkdir -p "${TMP_DATA}/${DATASET}"
# Run python locally.
echo "Starting local job ${2}.."
mpirun -n "${NUMTASKS}" --use-hwthread-cpus python3 \
"code/run_simulation.py" "${SIMPATH}${SIMFILE}" "${SCALE}" \
"${DATASET}" "${DO_STITCH}" "${RING_STITCH}" "${CONN}" \
"${TMP_PLAY}" "${TMP_DATA}" "${TMP_RES}"
# Copy results to jobs directory.
cp -rf "${TMP_RES}/." "jobs/${2}/results"
# Only delete dataset folder if it is empty, as it was generated to
# catch faults.
rmdir "${TMP_DATA}/${DATASET}" &>/dev/null
# Clean TMP directories for reuse of job script.
rm -rf "${TMPDIR}"
else
echo "Job name does not exist."
exit 1
fi
;;
# Compute properties of the resulting vertex and edges files of a job.
"compute_properties")
# Check if job name is given.
if [ -z "$2" ]; then
echo "No name of job specified."
exit 1
fi
# Check if given job name exists.
if [ ! -d "jobs/${2}" ]; then
echo "Job '${2}' does not exist."
exit 1
fi
# Determine where the results are stored based on local execution.
if [ -n "$3" ] && [ "${3}" == "local" ]; then
V_FILE="runtime_tmps/${2}/results/scaled_graph.v"
E_FILE="runtime_tmps/${2}/results/scaled_graph.e"
else
V_FILE="/var/scratch/$USER/${2}/results/scaled_graph.v"
E_FILE="/var/scratch/$USER/${2}/results/scaled_graph.e"
fi
# Check if the vertex file is in results.
if [ ! -f "${V_FILE}" ]; then
echo "Vertex file is missing in results of '${2}'."
exit 1
fi
# Check if the edge file is in results.
if [ ! -f "${E_FILE}" ]; then
echo "Edge file is missing in results of '${2}'."
exit 1
fi
echo "Start processing '${2}'.."
# Check if local is given as an argument.
if [ -n "$3" ] && [ "${3}" == "local" ]; then
python3 code/scripts/compute_graph_properties.py "${V_FILE}" "${E_FILE}"
else
# Load modules and run the properties measuring script.
module load python/3.6.0
srun -t 360 python3 code/scripts/compute_graph_properties.py "${V_FILE}" \
"${E_FILE}"
module unload python/3.6.0
fi
;;
# Compute properties of all results in parallel.
"compute_all_properties")
# Fetch all jobs.
cd jobs
JOBS=( $(ls -d */) )
cd ..
# Start up a process per job that computes the resulting properties.
for j in "${JOBS[@]}"; do
# Check if local is given as an argument.
if [ -n "$2" ] && [ "${2}" == "local" ]; then
./manage.sh compute_properties "${j::-1}" "local" &
else
./manage.sh compute_properties "${j::-1}" &
fi
done
wait
;;
# Catch all for parse errors.
*)
echo "No command detected from first argument.."
;;
esac
| true |
fe869d3a94db424cf10ae760c9643f10e00eab65 | Shell | silicom-ltd/uBMC | /base/rootfs_install/etc/init.d/rcS | UTF-8 | 802 | 3.5 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#/usr/sbin/load_bcm
#/usr/sbin/is_mount_flash.sh
#/usr/sbin/is_upgrade.sh -C
#/usr/sbin/is_version_hack.sh
echo "1" > /proc/sys/kernel/sysrq
echo "Staring services"
# Start all init scripts in /etc/init.d
# executing them in numerical order.
#
for i in /etc/init.d/S??* ;do
# Ignore dangling symlinks (if any).
[ ! -f "$i" ] && continue
case "$i" in
*.sh)
# Source shell script for speed.
(
trap - INT QUIT TSTP
set start
. $i
)
;;
*)
# No sh extension, so fork subprocess.
$i start
;;
esac
done
read -p "Start system installation? [Y/N]" -n1
echo
if [ "$REPLY" == "Y" ] || [ "$REPLY" == "y" ] ; then
/usr/sbin/is_install_system.sh
else
echo "Not installing. You can log in as root, and run is_install_system.sh manually"
fi
| true |
1be556565ec500406b635aee3a6805d881051cd5 | Shell | aliakbarRashidi/ncycseqpipe | /tools/sspace-longread-1.1/run_sspace-longread-1.1_local.sh | UTF-8 | 3,148 | 3.703125 | 4 | [] | no_license | #!/bin/bash
#
declare SOURCEDIR="$1"
source $SOURCEDIR/tools/local_header.sh
# PREFIX - Name of strain to assemble
# READS1 - First set of paired end reads, relative to $LOCAL_READSDIR
# READS2 - Second set of paired end reads, relative to $LOCAL_READSDIR
# READSPB - Pacbio reads
# TOOL_TAG - Tag to indicate the output from this tool
# PARAMETERS - Data used to paramatirse this tool
#
# WORKDIR - Directory in which to put tempory work files
# READSDIR - Directory where paired end reads are located
#-------------------------- Assembly specific code here --------------------
debug_msg ${LINENO} "SOURCEDIR=$1"
readonly TARGET=${args[0]}${args[1]}${PREFIX}i
debug_msg ${LINENO} "target is $RAGOUT_TARGET"
debug_msg ${LINENO} "about to run local sspace longread on $PREFIX"
declare -a args=( "" "" "" "" "" )
IFS=' ' read -ra args <<< "$PARAMETERS"
debug_msg ${LINENO} "arguments ${args[@]/#/}"
debug_msg ${LINENO} "parametrs $PARAMETERS"
docker run \
--name sspace-longread$PREFIX \
--volume=$READSDIR:/reads:ro \
--volume=$WORKDIR:/results \
--volume=$LOCAL_RESULTDIR:/data \
--entrypoint="perl" \
sriep/sspace-longread-1.1 \
SSPACE-LongRead.pl \
-b /results \
-c /data/$TARGET.fasta \
-p /reads/$READSPB \
-t 10
remove_docker_container sspace-longread$PREFIX
# Give location of result files
# CONTIGS - contig assembly fasta file
# SCAFFOLDS - scaffold assembly fasta file
SCAFFOLDS=$WORKDIR/scaffolds.fasta
#-------------------------- Footer --------------------
source $SOURCEDIR/tools/local_footer.sh
#Usage SSPACE-LongRead scaffolder version 1-1
#perl SSPACE-LongRead.pl -c <contig-sequences> -p <pacbio-reads>
#General options:
#-c Fasta file containing contig sequences used for scaffolding (REQUIRED)
#-p File containing PacBio CLR sequences to be used scaffolding (REQUIRED)
#-b Output folder name where the results are stored
# (optional, default -b 'PacBio_scaffolder_results')
#Alignment options:
#-a Minimum alignment length to allow a contig to be included for scaffolding
# (default -a 0, optional)
#-i Minimum identity of the alignment of the PacBio reads to the contig sequences.
# Alignment below this value will be filtered out (default -i 70, optional)
#-t The number of threads to run BLASR with
#-g Minimmum gap between two contigs
#Scaffolding options:
#-l Minimum number of links (PacBio reads) to allow contig-pairs for scaffolding
# ( default -k 3, optional)
#-r Maximum link ratio between two best contig pairs *higher values lead to
# least accurate scaffolding* (default -r 0.3, optional)
#-o Minimum overlap length to merge two contigs (default -o 10, optional)
#Other options:
#-k Store inner-scaffold sequences in a file. These are the long-read sequences
# spanning over a contig-link (default no output, set '-k 1' to store
# inner-scaffold sequences. If set, a folder #is generated named 'inner-scaffold-sequences'
#-s Skip the alignment step and use a previous alignment file. Note that the
# results of a previous run will be overwritten. Set '-s 1' to skip the alignment.
#-h Prints this help message
| true |
d02032a3aec435257d9803563e36d4a11c39267e | Shell | foxundermoon/prisma2 | /cli/prisma2/fixtures/test.sh | UTF-8 | 497 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Test version command
VERSION=$(node ./build/index.js --version)
if [[ ${VERSION} != *"prisma2@"* ]]; then
echo "prisma2 --version is broken"
exit 1
fi
# Test generate output command
cd fixtures/project/subdir
GENERATE=$(node ../../../build/index.js generate)
if [[ ${GENERATE} != *"Generated "* ]]; then
echo "prisma2 generate is broken"
exit 1
fi
cd ../../..
# Test generation in npm script
rm -rf fixtures/project/subdir/@prisma
cd fixtures/project/ && yarn postinstall | true |
303db182bc4dd3b5569fcfb7e7312a4bc5b535e8 | Shell | fcostanz/DesySusy | /stop_2013/Utilities/babySitting.sh | UTF-8 | 2,484 | 3.1875 | 3 | [] | no_license | for JOBDIR in `cat workdir.txt`
do
echo $JOBDIR
rm -f help.txt status.txt
OUTNAME=`grep output_file $JOBDIR/share/crab.cfg | sed 's/output_file//g' | sed 's/=//g' | sed 's/ //g' | sed 's/.root//g'`
echo "OUTNAME = $OUTNAME"
if [[ -z $OUTNAME ]]
then
continue
fi
BASEDIR=`grep user_remote_dir $JOBDIR/share/crab.cfg | sed 's/user_remote_dir//g' | sed 's/=//g' | sed 's/ //g'`
echo "BASEDIR = $BASEDIR"
if [[ -z $BASEDIR ]]
then
continue
fi
crab -status -c $JOBDIR > status.txt
skipline=-1
dcls $DC_HOME/$BASEDIR > help.txt
while read line
do
if [[ -n `echo $line | grep "Jobs Cleared"` ]]
then
echo "cleared Jobs!"
skipline=2
fi
if [[ skipline -eq 0 ]]
then
CLEAREDJOBS=`echo $line | sed 's/List of jobs://g' | sed 's/ //g'`
if [[ -n $CLEAREDJOBS ]]
then
for subStr in `echo $CLEAREDJOBS | sed 's/,/\n/g'`
do
i=`echo $subStr | cut -f1 -d'-'`
j=`echo $subStr | cut -f2 -d'-'`
for ((k = $i; k <= $j; k++))
do
for file in `grep ${OUTNAME}_${k}_ help.txt | sort`
do
echo removing $file
dcdel $DC_HOME/$BASEDIR/$file
done
done
done
echo killing $CLEAREDJOBS
crab -kill $CLEAREDJOBS -c $JOBDIR
echo resubmitting $CLEAREDJOBS
crab -resubmit $CLEAREDJOBS -c $JOBDIR
fi
break
fi
skipline=`expr $skipline - 1`
done < status.txt
NJOBS=`grep "Total Jobs" status.txt | sed 's/Total Jobs//g' | sed 's/crab://g' | sed 's/ //g'`
echo "NJOBS = $NJOBS"
crab -getoutput 1-$NJOBS -c $JOBDIR
rm -f help.txt
dcls $DC_HOME/$BASEDIR > help.txt
for ((i = 1; i < `expr $NJOBS + 1`; i++))
do
NFILES=`grep ${OUTNAME}_${i}_ help.txt | wc -l`
if [[ $NFILES -gt 1 ]]
then
for file in `grep ${OUTNAME}_${i}_ help.txt | sort`
do
echo removing $file
dcdel $DC_HOME/$BASEDIR/$file
done
fi
done
rm -f help.txt
dcls $DC_HOME/$BASEDIR > help.txt
MISSINGJOBS=""
for ((i = 1; i < `expr $NJOBS + 1`; i++))
do
NFILES=`grep ${OUTNAME}_${i}_ help.txt | wc -l`
if [[ $NFILES -eq 0 ]]
then
MISSINGJOBS_OLD=$MISSINGJOBS
MISSINGJOBS=`echo "$MISSINGJOBS_OLD","$i"`
fi
done
MISSINGJOBS=`echo "$MISSINGJOBS" | sed 's/^,//' | sed 's/ //g'`
if [[ -n $MISSINGJOBS ]]
then
echo resubmitting $MISSINGJOBS
crab -resubmit $MISSINGJOBS -c $JOBDIR
fi
crab -status -c $JOBDIR
echo NFILES = `dcls $DC_HOME/$BASEDIR | grep .root | wc -l`
done
| true |
89c050343fbb9bd5338af7a54e3bd6d699ceff6f | Shell | GpStore/dbatools | /xtrabackup/percona_restore.sh | UTF-8 | 2,393 | 3.140625 | 3 | [] | no_license |
#!/bin/bash
sudo cp qpress /usr/bin
#echo $#
if [ "$#" != 3 ];
then
echo "需要传入两个参数:备份文件所在目录,my.cnf绝对路径, 恢复文件夹[my.cnf中datadir绝对路径],实际只有 $# 个参数,"
echo "正确格式如./percona_restore.sh /home/mysql/server/db_backs/t.xbstream /home/mysql/server/my3306.cnf /data/mysql/data3306/data"
fi
DIR_BACKUP=$1;
DIR_MYCNF=$2;
DIR_RESTORE=$3;
echo "备份文件绝对路径:"$DIR_BACKUP;
echo "mysql实例的data目录:"$DIR_RESTORE;
if [ ! -f "$DIR_BACKUP" ];
then
echo "$DIR_BACKUP 文件不存在"
exit 1;
fi
if [ ! -f "$DIR_MYCNF" ];
then
echo "$DIR_MYCNF 文件不存在"
exit 1;
fi
if [ -d "$DIR_RESTORE" ];
then
mv -f $DIR_RESTORE /tmp/;
fi
mkdir -p "$DIR_RESTORE"
repath=$(cd `dirname $DIR_RESTORE`;pwd)
echo "mysql恢复的base目录:"$repath
echo "参数配置正常;"
## MySQL备份之后的恢复脚本
## percona-xtrabackup-24-2.4.4-1.el6.x86_64下载路径
## wget https://www.percona.com/downloads/XtraBackup/Percona-XtraBackup-2.4.4/binary/redhat/6/x86_64/percona-xtrabackup-24-2.4.4-1.el6.x86_64.rpm
## 以ROOT用户安装 yum install percona-xtrabackup-24-2.4.4-1.el6.x86_64.rpm
## MySQL安装
## yum install Percona-mysql的client,develop,share和Server四个安装包
##先解压xbstream
xbstream -x -v <$DIR_BACKUP -C $DIR_RESTORE;
innobackupex --decompress --parallel=4 $DIR_RESTORE;
find $DIR_RESTORE -name "*.qp" -delete;
##准备MySQL实例配置文件与相关初始目录与文件等
cd $repath
mkdir -p {binlog,relaylog,data,tmp,backup}
touch binlog/binlog.index
chmod -R 760 binlog
chmod -R 750 data
innobackupex --use-memory=1G --apply-log $DIR_RESTORE
cd $DIR_RESTORE;
mv ib* ../
mv undo* ../
mv xtrabackup* ../backup/
cd $repath
chmod -R 755 relaylog
touch slow.log
chmod a+r slow.log
chmod 664 *.log
chmod 660 ib*
chown -R admin:admin $repath
#innobackupex --defaults-file=$DIR_MYCNF --copy-back $DIR_RESTORE
## 启动MySQL服务程序
#mysqld_safe --defaults-file=$DIR_MYCNF --skip-name-resolve --read-only=1 -umysql&
## 挂载复制起始文件与位置点
##mysql -e "stop slave; change master to MASTER_HOST='10.11.12.13', MASTER_PORT=3306, MASTER_USER='repl', MASTER_PASSWORD='repl123', MASTER_LOG_FILE='binlog.000004', MASTER_LOG_POS=12397601;"
## 恢复成功
## mysql -e "start slave ; show slave status\G";
| true |
37de5f878d5a5ec879c41729a914b5c543524060 | Shell | tlepple/forge-horizons | /bin/start_cluster.sh | UTF-8 | 940 | 3.078125 | 3 | [] | no_license | #!/bin/bash
starting_dir=`pwd`
export PRIVATE_IP=`ip route get 1 | awk '{print $NF;exit}'`
# assumes the cm host is being run from host of this script
export CM_HOST=`hostname -f`
# Load util functions.
. $starting_dir/bin/cm_utils.sh
# start CM Service:
start_cm_service
# Start all cluster services:
start_cluster_services
#check all services are started
#all_services_status_eq
new_all_services_status_eq
echo
echo "sleeping for 20s"
sleep 20s
while [ ${ARRAY_EQ} != 'YES' ]; do
#all_services_status_eq
new_all_services_status_eq
echo;
echo "sleeping for 20s"
echo;
sleep 20s
done
echo
echo "All Services Started!!!"
echo
#check that CDSW is ready
#check_cdsw
# copy the zeppelin keytab
. ${starting_dir}/bin/zepplin_keytab_retrieval.sh
#echo services connections
. ${starting_dir}/bin/echo_service_conns.sh
#echo private services connections
. ${starting_dir}/bin/echo_private_service_conns.sh
| true |
a70263221a3df9228d15ada523f205b180e5dd40 | Shell | bayvictor/distributed-polling-system | /bin/install__bugzilla.sh | UTF-8 | 2,768 | 3.15625 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | apt-get install -y libapache2-mod-perl2-dev
/usr/bin/perl install-module.pl --all
perl -MCPAN -e 'install Bundle::Bugzilla'
echo " Bugzilla
Translation(s): English - 简体中文
(!) Discussion
Prior to installation of bugzilla, a CGI capable webserver package should be installed, together with the [mysql-client] packages. If a remote [mysql-server] is not being used, it is also necessary to install the [mysql-server] package.
Installation
Installation of the web server
It is assumed the the [thttpd] web server package is being used for bugzilla.
"
read readline
apt-get install thttpd
echo "Installation of the mysql server daemon
"
apt-get install mysql-server
echo "Installation of the mysql client
"
apt-get install mysql-client
echo "Configure the password for the mysql root user
It is necessary to configure a password for the mysql root user. The default blank password cannot be used. To setup the mysql root password for first time, use mysqladmin command at shell prompt as follows:
"
read readline
mysqladmin -u root password vhuang #NEWPASSWORD
echo "Installation of the bugzilla package
Install the bugzilla package.
"
apt-get install bugzilla3
echo "Configure database for bugzilla with dbconfig-common
The bugzilla package must have a database installed and configured before it can be used. If this is a first time installation and the database has not been installed or configured, this option will configure a common database.
Configure database for bugzilla with dbconfig-common? y
What is the password for the administrative account with which this package should create its mysql database and user?
Password of your database's administrative user:
Troubleshooting
Errors during installation
ERROR 1045 (28000): Access denied for user 'root'@'localhost' (using password: YES)
This error occurs if the mysql root password has not been set, or the password being entered at the dbconfig-common password prompt does not match that of the mysql root user.
Resolution
Configure the password for the mysql root user
mysql said: ERROR 1049 (42000): Unknown database 'bugzilla'
There is a bug in the bugzilla configure script, which causes an Unknown database error if the bugzilla database does not exist.
"
read readline
echo "Resolution
To resolve this error, it is necessary to create an empty bugzilla database:
First login to mysql:
"
mysql -u root -p
echo "Create the bugzilla database:
create database bugzilla; quit;
Bugzilla (last edited 2011-09-25 06:19:21 by DebiNix)
MoinMoin Powered
Python Powered
Valid HTML 4.01
Debian Wiki team, bugs and config available.
Hosting provided by Dembach Goo Informatik GmbH & Co KG
"
read readline
| true |
926f2d73c22a33432c2ede05dcddfb26d95eadc8 | Shell | forge-lab/upmax | /run_all.sh | UTF-8 | 1,565 | 3.75 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#Title : run_all.sh
#Usage : bash run_all.sh
#Author : pmorvalho
#Date : November 08, 2022
#Description : Recreates our experiments step by step from running the solvers to getting the results.
#Notes :
# (C) Copyright 2022 Pedro Orvalho.
#==============================================================================
# echo "Running configuration script to unzip data and install the PySAT package"
# chmod +x scripts/config.sh
# ./scripts/config.sh
# echo
dataset=$1
timeout=$2
if [[ $dataset == "" ]];
then
dataset="representative_subset_10_instances"
fi
if [[ $timeout == "" ]];
then
timeout=180
fi
echo "Starting..."
echo "Using dataset "$dataset" with timeout of "$timeout" seconds!"
echo
# In order to run the solvers on the entire evaluation dataset the following script run_solvers must be modified.
echo "Running solvers on the representative subset of our dataset"
./scripts/run_solvers.sh $dataset $timeout
echo
echo "All the logs are stored in 'results/logs/' directory"
echo "Processing the logs..."
./scripts/read_logs.sh $timeout
echo
echo "Csv files with the resuts save in results/csvs/ directory"
echo "Printing tables (Table 1 and Table 2)"
./scripts/get_tables.sh $timeout
echo "Tables saved sucessfully in results/tables/ directory"
echo
echo "Getting the csv files for the plots (results/plots directory)"
./scripts/get_plots.sh
echo
echo
## The following script requires an existing installation of matplotlib.
echo "Getting the plots..."
./scripts/gen_plots.sh $timeout
echo
echo
echo "All done."
| true |
117327dcc9651196f5c7c7155ee3e0862c10cb2f | Shell | kalos/prezto | /modules/environment/init.zsh | UTF-8 | 2,657 | 3.015625 | 3 | [
"MIT"
] | permissive | #
# Sets general shell options and defines environment variables.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
#
# Smart URLs
#
autoload -Uz url-quote-magic
zle -N self-insert url-quote-magic
#
# General
#
setopt BRACE_CCL # Allow brace character class list expansion.
setopt COMBINING_CHARS # Combine zero-length punctuation characters (accents)
# with the base character.
setopt RC_QUOTES # Allow 'Henry''s Garage' instead of 'Henry'\''s Garage'.
unsetopt MAIL_WARNING # Don't print a warning message if a mail file has been accessed.
setopt INTERACTIVE_COMMENTS # Allow comments in interactive shell.
setopt SHORT_LOOPS # Allow the short forms of for, repeat, select, if, and function constructs.
setopt BAD_PATTERN # Warn on bad file patterns
#
# Jobs
#
setopt LONG_LIST_JOBS # List jobs in the long format by default.
setopt AUTO_RESUME # Attempt to resume existing job before creating a new process.
setopt NOTIFY # Report status of background jobs immediately.
setopt MONITOR # Allow job control.
unsetopt BG_NICE # Don't run all background jobs at a lower priority.
unsetopt HUP # Don't kill jobs on shell exit.
unsetopt CHECK_JOBS # Don't report on jobs when shell exit.
#
# Grep
#
if zstyle -t ':prezto:environment:grep' color; then
export GREP_COLOR='37;45'
export GREP_OPTIONS='--color=auto'
fi
#
# Termcap
#
if zstyle -t ':prezto:environment:termcap' color; then
export LESS_TERMCAP_mb=$'\E[01;31m' # Begins blinking.
export LESS_TERMCAP_md=$'\E[01;31m' # Begins bold.
export LESS_TERMCAP_me=$'\E[0m' # Ends mode.
export LESS_TERMCAP_se=$'\E[0m' # Ends standout-mode.
export LESS_TERMCAP_so=$'\E[00;47;30m' # Begins standout-mode.
export LESS_TERMCAP_ue=$'\E[0m' # Ends underline.
export LESS_TERMCAP_us=$'\E[01;32m' # Begins underline.
fi
#
# Misc Variables
#
MAILCHECK=10
LISTMAX=200
watch=(notme) # watch login/logout
WATCHFMT="%B->%b %n has just %a %(l:tty%l:%U-Ghost-%u)%(m: from %m:)"
LOGCHECK=20 # interval in seconds between checks for login/logout
REPORTTIME=60 # report time if execution exceeds amount of seconds
TIMEFMT="Real: %E User: %U System: %S Percent: %P Cmd: %J"
# If this parameter is nonzero, the shell will receive an ALRM signal if a
# command is not entered within the specified number of seconds after issuing a
# prompt. If there is a trap on SIGALRM, it will be executed and a new alarm is
# scheduled using the value of the TMOUT parameter after executing the trap.
#TMOUT=1800
| true |
eac4f6cccfb27fa536b4b80ad9b3a7ade75c9f3e | Shell | jasonjei/cloudsql-proxy | /cmd/cloud_sql_proxy/build.sh | UTF-8 | 662 | 3.625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
files=$(git status -s)
if [[ $? != 0 ]]; then
echo >&2 "Error running git status"
exit 2
fi
if [[ "$1" == "release" ]]; then
if [[ "$files" != "" ]]; then
echo >&2 "Can't build a release version with local edits; files:"
echo >&2 "$files"
exit 1
fi
if [[ "$2" == "" ]]; then
echo >&2 "Must provide a version number to use as a second parameter"
exit 1
fi
VERSION="version $2"
else
VERSION="development"
fi
VERSION+="; sha $(git rev-parse HEAD) built $(date)"
echo "Compiling $VERSION"
CGO_ENABLED=0 GOOS=linux go build -x -ldflags "-X 'main.versionString=$VERSION'" -a -installsuffix cgo -o cloud_sql_proxy .
| true |
a5607e564d697af7579a6ace88556c6c6199e0f2 | Shell | usagi/usagi-rust-reference | /.opt/bin/make_index.sh | UTF-8 | 1,155 | 4.21875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
book_title='うさぎさんでもわかるRustプログラミング言語リファレンス'
index='index.md'
global_index_buffer='';
main()
{
pushed_dir=`pwd`
while [ ! -d .git ]
do
cd ..
done
top_dir=`pwd`
dirs=`find . -type d -regex './[^.][^/]+$' | sort`
for d in $dirs
do
make_index $d
done
make_global_index
cd $pushed_dir
}
make_index()
{
cd $1
section=`echo $1 | tr -d ./`
echo [generate: $section/$index]
rm $index
echo "# $book_title" >> $index
echo "- [../${index}](../${index})" >> $index
echo "" >> $index
echo "## $section" >> $index
global_index_buffer+="- [$section]($section/$index)\n"
for c in `find . -type f -iname '*.md' | sort`
do
if [ $c == ./$index ]; then continue; fi
content=`echo $c | sed 's|.md$||g' | sed 's|^./||g'`
echo " $content"
echo "- [$content](${content}.md)" >> $index
global_index_buffer+=" - [$content](${section}/${content}.md)\n"
done
cd $top_dir
}
make_global_index()
{
echo [generate: $index]
rm $index
echo "# $book_title" >> $index
echo -e $global_index_buffer >> $index
}
main
| true |
69c6c63922eabe89b2202d82dfcd66e65c1eae46 | Shell | rsenn/scripts | /sh/filezilla-server-entry.sh | UTF-8 | 964 | 3.109375 | 3 | [] | no_license | srv ()
{
URL=${1%/}
echo " <Server>
<Host>heanet.dl.sourceforge.net</Host>
<Port>21</Port>
<Protocol>0</Protocol>
<Type>0</Type>
<Logontype>0</Logontype>
<TimezoneOffset>0</TimezoneOffset>
<PasvMode>MODE_DEFAULT</PasvMode>
<MaximumMultipleConnections>0</MaximumMultipleConnections>
<EncodingType>Auto</EncodingType>
<BypassProxy>0</BypassProxy>
<Name>${URL##*/}</Name>
<Comments />
<LocalDir />
<RemoteDir>$(srvsplit "$URL")</RemoteDir>
<SyncBrowsing>0</SyncBrowsing>${URL##*/}
</Server>"
}
srvsplit ()
{
( IFS="/ ";
A=/${1#*://*/};
set -- $A;
OUT=;
while [ $# -gt 0 ]; do
N=${#1};
OUT="${OUT:+$OUT }$N $1";
shift;
done;
echo 1 $OUT )
}
| true |
489b1f608cf900e8a62d58e39425d0b2bec3bc71 | Shell | yutofukushima/autoware-docker | /mac/run.sh | UTF-8 | 1,629 | 3.859375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
# Default settings
CUDA="off"
IMAGE_NAME="autoware/autoware"
TAG_PREFIX="latest"
ROS_DISTRO="melodic"
BASE_ONLY="false"
PRE_RELEASE="off"
AUTOWARE_HOST_DIR=""
USER_ID="$(id -u)"
# Convert a relative directory path to absolute
function abspath() {
local path=$1
if [ ! -d $path ]; then
exit 1
fi
pushd $path > /dev/null
echo $(pwd)
popd > /dev/null
}
echo "Using options:"
echo -e "\tROS distro: $ROS_DISTRO"
echo -e "\tImage name: $IMAGE_NAME"
echo -e "\tTag prefix: $TAG_PREFIX"
echo -e "\tCuda support: $CUDA"
if [ "$BASE_ONLY" == "true" ]; then
echo -e "\tAutoware Home: $AUTOWARE_HOST_DIR"
fi
echo -e "\tPre-release version: $PRE_RELEASE"
echo -e "\tUID: <$USER_ID>"
SUFFIX=""
RUNTIME=""
XSOCK=/tmp/.X11-unix
XAUTH=$HOME/.Xauthority
SHARED_DOCKER_DIR=/home/autoware/shared_dir
SHARED_HOST_DIR=$HOME/shared_dir
AUTOWARE_DOCKER_DIR=/home/autoware/Autoware
VOLUMES="--volume=$XSOCK:$XSOCK:rw
--volume=$XAUTH:$XAUTH:rw
--volume=$SHARED_HOST_DIR:$SHARED_DOCKER_DIR:rw"
if [ "$BASE_ONLY" == "true" ]; then
SUFFIX=$SUFFIX"-base"
VOLUMES="$VOLUMES --volume=$AUTOWARE_HOST_DIR:$AUTOWARE_DOCKER_DIR "
fi
if [ $PRE_RELEASE == "on" ]; then
SUFFIX=$SUFFIX"-rc"
fi
# Create the shared directory in advance to ensure it is owned by the host user
mkdir -p $SHARED_HOST_DIR
IMAGE=$IMAGE_NAME:$TAG_PREFIX-$ROS_DISTRO$SUFFIX
echo "Launching $IMAGE"
docker run \
-it --rm \
$VOLUMES \
--env="XAUTHORITY=${XAUTH}" \
--env="DISPLAY=${DISPLAY}" \
--env="USER_ID=$USER_ID" \
--privileged \
--net=host \
$RUNTIME \
$IMAGE
| true |
743433e5db92980a00a39575fd2bd0b165e39d39 | Shell | mtaranov/ChicagoCalls_processing | /get_intra.sh | UTF-8 | 115 | 2.859375 | 3 | [] | no_license | #!/bin/bash
in_file=$1
out_file=$2
cat $in_file | awk '{if ($1==$4) print $0}' | perl -p -e 's/ /\t/g' > $out_file
| true |
9fee7d740ec5b17671e7624609753aa83ce8f7a4 | Shell | RecodeFei/makeperso | /releasekey.sh | UTF-8 | 1,520 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ -z "$1" ]; then
echo "Please give the key"
exit 1
fi
if [ -z "$2" ]; then
echo "Please give the platform name"
exit 1
fi
product_path=`pwd`
echo $product_path
script_path=$(dirname $0)
processApk() {
$script_path/signapk.py $1 $2 $3 $4
$product_path/out/host/linux-x86/bin/zipalign -f 4 $product_path/out/target/product/$4/system/custpack/$1.signed $product_path/out/target/product/$4/system/custpack/$1.signed_aligned
mv $product_path/out/target/product/$4/system/custpack/$1.signed_aligned $product_path/out/target/product/$4/system/custpack/$1
rm $product_path/out/target/product/$4/system/custpack/$1.signed
}
platform_apkfiles=(
)
shared_apkfiles=(
)
media_apkfiles=(
)
for apkfile in ${platform_apkfiles[*]}
do
if [ -f $product_path/out/target/product/$2/system/custpack/$apkfile ]; then
echo "signing $apkfile"
processApk $apkfile platform $1 $2
fi
done
for apkfile in ${shared_apkfiles[*]}
do
if [ -f $product_path/out/target/product/$2/system/custpack/$apkfile ]; then
echo "signing $apkfile"
processApk $apkfile shared $1 $2
fi
done
for apkfile in ${media_apkfiles[*]}
do
if [ -f $product_path/out/target/product/$2/system/custpack/$apkfile ]; then
echo "signing $apkfile"
processApk $apkfile media $1 $2
fi
done
sed -i "s/test-keys/release-keys/g" $product_path/out/target/product/$2/system/build.prop
#$product_path/out/host/linux-x86/bin/make_ext4fs -s -l 838860800 -a system $product_path/out/target/product/$2/system.img $product_path/out/target/product/$2/system
| true |
cff1be2e26c55f9ce27ade8e49dc155aa3bb391e | Shell | arnizzle/k8s-vagrant | /bootstrap_kworker.sh | UTF-8 | 440 | 2.796875 | 3 | [] | no_license | #!/bin/bash
# Join worker nodes to the Kubernetes cluster
echo "[TASK 1] Join node to Kubernetes Cluster"
echo /vagrant/hosts >> /etc/hosts
apt-get install -y sshpass >/dev/null 2>&1
# sshpass -p "kubeadmin" scp -o StrictHostKeyChecking=no $MASTERNAME:/joincluster.sh /joincluster.sh
# Manually adding IP address since setup sometimes doesn't show INTERNAL_IP
cp /vagrant/joincluster.sh /tmp
bash /tmp/joincluster.sh >/dev/null 2>&1
| true |
0b0c150823f25bd3c7476f6f48c1ed17cca24bbd | Shell | gzaharia/SOMIPP | /SOMIPP_Laboratory_Work1/script.sh | UTF-8 | 146 | 2.984375 | 3 | [] | no_license | #!/bin/bash
x="$1"
y="${x%.*}"
STR="$(stat --printf="%s" $1)"
truncate -s "$((1474560-$STR))" file.txt
cat misa.txt>>$1
mv $1 "$y.img"
rm file.txt | true |
0fe4a0b3c654d669db60570883e6cea7c44585c3 | Shell | mcclayton/query-console | /bin/start.sh | UTF-8 | 1,492 | 3.953125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Set appropriate working directory
cd "$(npm root -g)/query-console"
echo " _____ _____ _ "
echo " | |_ _ ___ ___ _ _ | |___ ___ ___ ___| |___ "
echo " | | | | | -_| _| | | | --| . | |_ -| . | | -_| "
echo " |__ _|___|___|_| |_ | |_____|___|_|_|___|___|_|___| "
echo " |__| |___| "
echo "Starting Query Console..."
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-p|--port)
CLIENT_PORT="$2"
shift # past argument
shift # past value
;;
-c|--config)
CONFIG_PATH="$2"
shift # past argument
shift # past value
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
if [ -z "$CONFIG_PATH" ]
then
echo
echo "No config path present. Please use the -c flag to specify the path to the JSON config file."
echo
pwd
else
# Run the API Server and Client Server in parallel processes
# Start the API server on the port above the client
CONFIG_PATH="$CONFIG_PATH" npm run start:server -- -p $(( ${CLIENT_PORT:-3005} + 1)) & pid=$!
PID_LIST+=" $pid";
# Start the client server on the port specified
npm run start:client -- -l ${CLIENT_PORT:-3005} & pid=$!
PID_LIST+=" $pid";
trap "kill $PID_LIST" SIGINT
wait $PID_LIST
echo
echo "Query Console Exited.";
fi
| true |
bc58a1f29748661fbe369ee8cb2c037feaf78e56 | Shell | dana-i2cat/felix | /deploy/utils/utils.sh | UTF-8 | 2,041 | 4 | 4 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | #!/bin/bash
###
# @author: msune, CarolinaFernandez
# @organization: i2CAT
# @project: Ofelia FP7
# @description: Shell utils file
###
###I/O functions
##Output utils
txtrst=$(tput sgr0) # Text reset
txtred=$(tput setaf 1) # Red
txtgreen=$(tput setaf 2) # Green
txtylw=$(tput setaf 3) # Yellow
txtbold=$(tput bold) # Bold text
# Simple print
function print()
{
if [ "$2" == 1 ]; then
OUT="/dev/stderr"
else
OUT="/dev/stdout"
fi
echo -e "$1" > $OUT
}
# Make bold text
function print_bold()
{
print "${txtbold} $@ ${txtrst}"
}
# Heading print
function print_header()
{
print ""
print ""
print_bold "${txtgreen}$1 `print_bold $2`"
}
# Success
function success()
{
print "${txtgreen}SUCCESS:${txtrst} $1"
}
# Warning
function warning()
{
print "${txtylw}WARNING:${txtrst} $1"
}
# Error function; invoques restore
function error()
{
print "${txtred}FATAL ERROR:${txtrst} $1"
exit 1
}
## INPUT UTILS
# Confirmation with exit
# Usage: $1: message, [$2 throw error on its presence $NO_RETURN], [$3 when $2 present, do not rollback; $NO_RESCUE]
function confirm()
{
local VALUE
while :
do
echo "$1. Do you confirm (Y/N)?"
read VALUE
if [ "$VALUE" == "Y" ] || [ "$VALUE" == "y" ]; then
# Accepted
return 0
elif [ "$VALUE" == "N" ] || [ "$VALUE" == "n" ]; then
# Rejected
error "'$1' clause not confirmed. Aborting..." $3
fi
done
}
function pause()
{
echo $1
echo -n "Press any key to continue..."
read
}
## FILE UTILS
# Recover directory path
function get_directory()
{
echo `dirname $(readlink -f $1)`
}
## POST-PROCESSING
# Convert list (i.e. Python) to bash array
function list_to_array()
{
list=$@
# Replace all "[", "]", "," by a space
string=${list//[\[\]\,]/ }
# Create array from list
array=($string)
echo "$array"
}
#arr=$(list_to_array "['expedient', 'vt_manager']")
#echo "array: $arr"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.